diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu index 1a04ca8162ad882483e402256059446fea8d6943..44c6e57303988cf527e9e8f9619071b2d8571f46 100644 --- a/Documentation/ABI/testing/sysfs-devices-system-cpu +++ b/Documentation/ABI/testing/sysfs-devices-system-cpu @@ -510,6 +510,7 @@ What: /sys/devices/system/cpu/vulnerabilities /sys/devices/system/cpu/vulnerabilities/srbds /sys/devices/system/cpu/vulnerabilities/tsx_async_abort /sys/devices/system/cpu/vulnerabilities/itlb_multihit + /sys/devices/system/cpu/vulnerabilities/mmio_stale_data Date: January 2018 Contact: Linux kernel mailing list Description: Information about CPU vulnerabilities diff --git a/Documentation/accounting/psi.rst b/Documentation/accounting/psi.rst index 860fe651d6453eed1652f721cb048c8b3840be33..5e40b3f437f90c2d202198aa6bfac5707fa7ac63 100644 --- a/Documentation/accounting/psi.rst +++ b/Documentation/accounting/psi.rst @@ -37,11 +37,7 @@ Pressure interface Pressure information for each resource is exported through the respective file in /proc/pressure/ -- cpu, memory, and io. -The format for CPU is as such:: - - some avg10=0.00 avg60=0.00 avg300=0.00 total=0 - -and for memory and IO:: +The format is as such:: some avg10=0.00 avg60=0.00 avg300=0.00 total=0 full avg10=0.00 avg60=0.00 avg300=0.00 total=0 @@ -58,6 +54,9 @@ situation from a state where some tasks are stalled but the CPU is still doing productive work. As such, time spent in this subset of the stall state is tracked separately and exported in the "full" averages. +CPU full is undefined at the system level, but has been reported +since 5.13, so it is set to zero for backward compatibility. + The ratios (in %) are tracked as recent trends over ten, sixty, and three hundred second windows, which gives insight into short term events as well as medium and long term trends. The total absolute stall time diff --git a/Documentation/admin-guide/cgroup-v2.rst b/Documentation/admin-guide/cgroup-v2.rst index 608d7c279396b5832a07dea3a1d5ce3c75c24142..5d9b7e552fb0e2112eebc55e94f71e0c75a09bb9 100644 --- a/Documentation/admin-guide/cgroup-v2.rst +++ b/Documentation/admin-guide/cgroup-v2.rst @@ -997,6 +997,8 @@ All time durations are in microseconds. - nr_periods - nr_throttled - throttled_usec + - nr_bursts + - burst_usec cpu.weight A read-write single value file which exists on non-root @@ -1028,6 +1030,12 @@ All time durations are in microseconds. $PERIOD duration. "max" for $MAX indicates no limit. If only one number is written, $MAX is updated. + cpu.max.burst + A read-write single value file which exists on non-root + cgroups. The default is "0". + + The burst in the range [0, $MAX]. + cpu.pressure A read-only nested-key file which exists on non-root cgroups. @@ -1181,6 +1189,27 @@ PAGE_SIZE multiple when read back. high limit is used and monitored properly, this limit's utility is limited to providing the final safety net. + memory.reclaim + A write-only nested-keyed file which exists for all cgroups. + + This is a simple interface to trigger memory reclaim in the + target cgroup. + + This file accepts a single key, the number of bytes to reclaim. + No nested keys are currently supported. + + Example:: + + echo "1G" > memory.reclaim + + The interface can be later extended with nested keys to + configure the reclaim behavior. For example, specify the + type of memory to reclaim from (anon, file, ..). + + Please note that the kernel can over or under reclaim from + the target cgroup. If less bytes are reclaimed than the + specified amount, -EAGAIN is returned. + memory.oom.group A read-write single value file which exists on non-root cgroups. The default value is "0". diff --git a/Documentation/admin-guide/hw-vuln/index.rst b/Documentation/admin-guide/hw-vuln/index.rst index ca4dbdd9016d5a873b11381dbe75bdff5ee13038..2adec1e6520a68e4d6dd9c52bd86f48158e30957 100644 --- a/Documentation/admin-guide/hw-vuln/index.rst +++ b/Documentation/admin-guide/hw-vuln/index.rst @@ -15,3 +15,4 @@ are configurable at compile, boot or run time. tsx_async_abort multihit.rst special-register-buffer-data-sampling.rst + processor_mmio_stale_data.rst diff --git a/Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst b/Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst new file mode 100644 index 0000000000000000000000000000000000000000..9393c50b5afc9c9fe8b9ac90ed9fe4774e1d1550 --- /dev/null +++ b/Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst @@ -0,0 +1,246 @@ +========================================= +Processor MMIO Stale Data Vulnerabilities +========================================= + +Processor MMIO Stale Data Vulnerabilities are a class of memory-mapped I/O +(MMIO) vulnerabilities that can expose data. The sequences of operations for +exposing data range from simple to very complex. Because most of the +vulnerabilities require the attacker to have access to MMIO, many environments +are not affected. System environments using virtualization where MMIO access is +provided to untrusted guests may need mitigation. These vulnerabilities are +not transient execution attacks. However, these vulnerabilities may propagate +stale data into core fill buffers where the data can subsequently be inferred +by an unmitigated transient execution attack. Mitigation for these +vulnerabilities includes a combination of microcode update and software +changes, depending on the platform and usage model. Some of these mitigations +are similar to those used to mitigate Microarchitectural Data Sampling (MDS) or +those used to mitigate Special Register Buffer Data Sampling (SRBDS). + +Data Propagators +================ +Propagators are operations that result in stale data being copied or moved from +one microarchitectural buffer or register to another. Processor MMIO Stale Data +Vulnerabilities are operations that may result in stale data being directly +read into an architectural, software-visible state or sampled from a buffer or +register. + +Fill Buffer Stale Data Propagator (FBSDP) +----------------------------------------- +Stale data may propagate from fill buffers (FB) into the non-coherent portion +of the uncore on some non-coherent writes. Fill buffer propagation by itself +does not make stale data architecturally visible. Stale data must be propagated +to a location where it is subject to reading or sampling. + +Sideband Stale Data Propagator (SSDP) +------------------------------------- +The sideband stale data propagator (SSDP) is limited to the client (including +Intel Xeon server E3) uncore implementation. The sideband response buffer is +shared by all client cores. For non-coherent reads that go to sideband +destinations, the uncore logic returns 64 bytes of data to the core, including +both requested data and unrequested stale data, from a transaction buffer and +the sideband response buffer. As a result, stale data from the sideband +response and transaction buffers may now reside in a core fill buffer. + +Primary Stale Data Propagator (PSDP) +------------------------------------ +The primary stale data propagator (PSDP) is limited to the client (including +Intel Xeon server E3) uncore implementation. Similar to the sideband response +buffer, the primary response buffer is shared by all client cores. For some +processors, MMIO primary reads will return 64 bytes of data to the core fill +buffer including both requested data and unrequested stale data. This is +similar to the sideband stale data propagator. + +Vulnerabilities +=============== +Device Register Partial Write (DRPW) (CVE-2022-21166) +----------------------------------------------------- +Some endpoint MMIO registers incorrectly handle writes that are smaller than +the register size. Instead of aborting the write or only copying the correct +subset of bytes (for example, 2 bytes for a 2-byte write), more bytes than +specified by the write transaction may be written to the register. On +processors affected by FBSDP, this may expose stale data from the fill buffers +of the core that created the write transaction. + +Shared Buffers Data Sampling (SBDS) (CVE-2022-21125) +---------------------------------------------------- +After propagators may have moved data around the uncore and copied stale data +into client core fill buffers, processors affected by MFBDS can leak data from +the fill buffer. It is limited to the client (including Intel Xeon server E3) +uncore implementation. + +Shared Buffers Data Read (SBDR) (CVE-2022-21123) +------------------------------------------------ +It is similar to Shared Buffer Data Sampling (SBDS) except that the data is +directly read into the architectural software-visible state. It is limited to +the client (including Intel Xeon server E3) uncore implementation. + +Affected Processors +=================== +Not all the CPUs are affected by all the variants. For instance, most +processors for the server market (excluding Intel Xeon E3 processors) are +impacted by only Device Register Partial Write (DRPW). + +Below is the list of affected Intel processors [#f1]_: + + =================== ============ ========= + Common name Family_Model Steppings + =================== ============ ========= + HASWELL_X 06_3FH 2,4 + SKYLAKE_L 06_4EH 3 + BROADWELL_X 06_4FH All + SKYLAKE_X 06_55H 3,4,6,7,11 + BROADWELL_D 06_56H 3,4,5 + SKYLAKE 06_5EH 3 + ICELAKE_X 06_6AH 4,5,6 + ICELAKE_D 06_6CH 1 + ICELAKE_L 06_7EH 5 + ATOM_TREMONT_D 06_86H All + LAKEFIELD 06_8AH 1 + KABYLAKE_L 06_8EH 9 to 12 + ATOM_TREMONT 06_96H 1 + ATOM_TREMONT_L 06_9CH 0 + KABYLAKE 06_9EH 9 to 13 + COMETLAKE 06_A5H 2,3,5 + COMETLAKE_L 06_A6H 0,1 + ROCKETLAKE 06_A7H 1 + =================== ============ ========= + +If a CPU is in the affected processor list, but not affected by a variant, it +is indicated by new bits in MSR IA32_ARCH_CAPABILITIES. As described in a later +section, mitigation largely remains the same for all the variants, i.e. to +clear the CPU fill buffers via VERW instruction. + +New bits in MSRs +================ +Newer processors and microcode update on existing affected processors added new +bits to IA32_ARCH_CAPABILITIES MSR. These bits can be used to enumerate +specific variants of Processor MMIO Stale Data vulnerabilities and mitigation +capability. + +MSR IA32_ARCH_CAPABILITIES +-------------------------- +Bit 13 - SBDR_SSDP_NO - When set, processor is not affected by either the + Shared Buffers Data Read (SBDR) vulnerability or the sideband stale + data propagator (SSDP). +Bit 14 - FBSDP_NO - When set, processor is not affected by the Fill Buffer + Stale Data Propagator (FBSDP). +Bit 15 - PSDP_NO - When set, processor is not affected by Primary Stale Data + Propagator (PSDP). +Bit 17 - FB_CLEAR - When set, VERW instruction will overwrite CPU fill buffer + values as part of MD_CLEAR operations. Processors that do not + enumerate MDS_NO (meaning they are affected by MDS) but that do + enumerate support for both L1D_FLUSH and MD_CLEAR implicitly enumerate + FB_CLEAR as part of their MD_CLEAR support. +Bit 18 - FB_CLEAR_CTRL - Processor supports read and write to MSR + IA32_MCU_OPT_CTRL[FB_CLEAR_DIS]. On such processors, the FB_CLEAR_DIS + bit can be set to cause the VERW instruction to not perform the + FB_CLEAR action. Not all processors that support FB_CLEAR will support + FB_CLEAR_CTRL. + +MSR IA32_MCU_OPT_CTRL +--------------------- +Bit 3 - FB_CLEAR_DIS - When set, VERW instruction does not perform the FB_CLEAR +action. This may be useful to reduce the performance impact of FB_CLEAR in +cases where system software deems it warranted (for example, when performance +is more critical, or the untrusted software has no MMIO access). Note that +FB_CLEAR_DIS has no impact on enumeration (for example, it does not change +FB_CLEAR or MD_CLEAR enumeration) and it may not be supported on all processors +that enumerate FB_CLEAR. + +Mitigation +========== +Like MDS, all variants of Processor MMIO Stale Data vulnerabilities have the +same mitigation strategy to force the CPU to clear the affected buffers before +an attacker can extract the secrets. + +This is achieved by using the otherwise unused and obsolete VERW instruction in +combination with a microcode update. The microcode clears the affected CPU +buffers when the VERW instruction is executed. + +Kernel reuses the MDS function to invoke the buffer clearing: + + mds_clear_cpu_buffers() + +On MDS affected CPUs, the kernel already invokes CPU buffer clear on +kernel/userspace, hypervisor/guest and C-state (idle) transitions. No +additional mitigation is needed on such CPUs. + +For CPUs not affected by MDS or TAA, mitigation is needed only for the attacker +with MMIO capability. Therefore, VERW is not required for kernel/userspace. For +virtualization case, VERW is only needed at VMENTER for a guest with MMIO +capability. + +Mitigation points +----------------- +Return to user space +^^^^^^^^^^^^^^^^^^^^ +Same mitigation as MDS when affected by MDS/TAA, otherwise no mitigation +needed. + +C-State transition +^^^^^^^^^^^^^^^^^^ +Control register writes by CPU during C-state transition can propagate data +from fill buffer to uncore buffers. Execute VERW before C-state transition to +clear CPU fill buffers. + +Guest entry point +^^^^^^^^^^^^^^^^^ +Same mitigation as MDS when processor is also affected by MDS/TAA, otherwise +execute VERW at VMENTER only for MMIO capable guests. On CPUs not affected by +MDS/TAA, guest without MMIO access cannot extract secrets using Processor MMIO +Stale Data vulnerabilities, so there is no need to execute VERW for such guests. + +Mitigation control on the kernel command line +--------------------------------------------- +The kernel command line allows to control the Processor MMIO Stale Data +mitigations at boot time with the option "mmio_stale_data=". The valid +arguments for this option are: + + ========== ================================================================= + full If the CPU is vulnerable, enable mitigation; CPU buffer clearing + on exit to userspace and when entering a VM. Idle transitions are + protected as well. It does not automatically disable SMT. + full,nosmt Same as full, with SMT disabled on vulnerable CPUs. This is the + complete mitigation. + off Disables mitigation completely. + ========== ================================================================= + +If the CPU is affected and mmio_stale_data=off is not supplied on the kernel +command line, then the kernel selects the appropriate mitigation. + +Mitigation status information +----------------------------- +The Linux kernel provides a sysfs interface to enumerate the current +vulnerability status of the system: whether the system is vulnerable, and +which mitigations are active. The relevant sysfs file is: + + /sys/devices/system/cpu/vulnerabilities/mmio_stale_data + +The possible values in this file are: + + .. list-table:: + + * - 'Not affected' + - The processor is not vulnerable + * - 'Vulnerable' + - The processor is vulnerable, but no mitigation enabled + * - 'Vulnerable: Clear CPU buffers attempted, no microcode' + - The processor is vulnerable, but microcode is not updated. The + mitigation is enabled on a best effort basis. + * - 'Mitigation: Clear CPU buffers' + - The processor is vulnerable and the CPU buffer clearing mitigation is + enabled. + +If the processor is vulnerable then the following information is appended to +the above information: + + ======================== =========================================== + 'SMT vulnerable' SMT is enabled + 'SMT disabled' SMT is disabled + 'SMT Host state unknown' Kernel runs in a VM, Host SMT state unknown + ======================== =========================================== + +References +---------- +.. [#f1] Affected Processors + https://www.intel.com/content/www/us/en/developer/topic-technology/software-security-guidance/processors-affected-consolidated-product-cpu-model.html diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 98199d3ae7419d2707ef840a2877c7a8f5270586..2b04cf8fbab4989c9cedbfa0585436e684fb4c78 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -2848,7 +2848,8 @@ [KNL,ACPI] Mark specific memory as reserved. Region of memory to be reserved is from ss to ss+nn. For ARM64, reserved memory must be in the range of - existed memory. + existed memory and do not overlap in-use memory region, + otherwise request will be ignored. Example: Exclude memory from 0x18690000-0x1869ffff memmap=64K$0x18690000 or @@ -2987,6 +2988,7 @@ kvm.nx_huge_pages=off [X86] no_entry_flush [PPC] no_uaccess_flush [PPC] + mmio_stale_data=off [X86] Exceptions: This does not have any effect on @@ -3008,6 +3010,7 @@ Equivalent to: l1tf=flush,nosmt [X86] mds=full,nosmt [X86] tsx_async_abort=full,nosmt [X86] + mmio_stale_data=full,nosmt [X86] mminit_loglevel= [KNL] When CONFIG_DEBUG_MEMORY_INIT is set, this @@ -3017,6 +3020,40 @@ log everything. Information is printed at KERN_DEBUG so loglevel=8 may also need to be specified. + mmio_stale_data= + [X86,INTEL] Control mitigation for the Processor + MMIO Stale Data vulnerabilities. + + Processor MMIO Stale Data is a class of + vulnerabilities that may expose data after an MMIO + operation. Exposed data could originate or end in + the same CPU buffers as affected by MDS and TAA. + Therefore, similar to MDS and TAA, the mitigation + is to clear the affected CPU buffers. + + This parameter controls the mitigation. The + options are: + + full - Enable mitigation on vulnerable CPUs + + full,nosmt - Enable mitigation and disable SMT on + vulnerable CPUs. + + off - Unconditionally disable mitigation + + On MDS or TAA affected machines, + mmio_stale_data=off can be prevented by an active + MDS or TAA mitigation as these vulnerabilities are + mitigated with the same mechanism so in order to + disable this mitigation, you need to specify + mds=off and tsx_async_abort=off too. + + Not specifying this option is equivalent to + mmio_stale_data=full. + + For details see: + Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst + module.sig_enforce [KNL] When CONFIG_MODULE_SIG is set, this means that modules without (valid) signatures will fail to load. diff --git a/Documentation/admin-guide/sysctl/kernel.rst b/Documentation/admin-guide/sysctl/kernel.rst index 56bb3afe3794f40d8bb5d8c62fcdcd19d74147f1..773747c1b329b74c0dd17b1eb67248e7dfd34de6 100644 --- a/Documentation/admin-guide/sysctl/kernel.rst +++ b/Documentation/admin-guide/sysctl/kernel.rst @@ -787,6 +787,7 @@ bit 1 print system memory info bit 2 print timer info bit 3 print locks info if ``CONFIG_LOCKDEP`` is on bit 4 print ftrace buffer +bit 5 print all printk messages in buffer ===== ============================================ So for example to print tasks and memory info on panic, user can:: diff --git a/Documentation/arm64/cpu-feature-registers.rst b/Documentation/arm64/cpu-feature-registers.rst index 9f9b8fd060892bad7fb370f6124135ff8c2b58f9..749ae970c31955a6ddd54807d4f7a0700cf85295 100644 --- a/Documentation/arm64/cpu-feature-registers.rst +++ b/Documentation/arm64/cpu-feature-registers.rst @@ -275,6 +275,23 @@ infrastructure: | SVEVer | [3-0] | y | +------------------------------+---------+---------+ + 8) ID_AA64MMFR1_EL1 - Memory model feature register 1 + + +------------------------------+---------+---------+ + | Name | bits | visible | + +------------------------------+---------+---------+ + | AFP | [47-44] | y | + +------------------------------+---------+---------+ + + 9) ID_AA64ISAR2_EL1 - Instruction set attribute register 2 + + +------------------------------+---------+---------+ + | Name | bits | visible | + +------------------------------+---------+---------+ + | RPRES | [7-4] | y | + +------------------------------+---------+---------+ + + Appendix I: Example ------------------- diff --git a/Documentation/arm64/elf_hwcaps.rst b/Documentation/arm64/elf_hwcaps.rst index 95e66bd7dd17efdf855ab4268eab7146c4349f38..e88d245d426da330240d12edae7941d905b8ffe8 100644 --- a/Documentation/arm64/elf_hwcaps.rst +++ b/Documentation/arm64/elf_hwcaps.rst @@ -249,6 +249,14 @@ HWCAP2_ECV Functionality implied by ID_AA64MMFR0_EL1.ECV == 0b0001. +HWCAP2_AFP + + Functionality implied by ID_AA64MFR1_EL1.AFP == 0b0001. + +HWCAP2_RPRES + + Functionality implied by ID_AA64ISAR2_EL1.RPRES == 0b0001. + 4. Unused AT_HWCAP bits ----------------------- diff --git a/Documentation/dev-tools/kfence.rst b/Documentation/dev-tools/kfence.rst index 5d194615aed01111ef6ed9abd6713b73bd4a0348..2e26d2998722ef64bffd18920e6c57bb7fb5ee95 100644 --- a/Documentation/dev-tools/kfence.rst +++ b/Documentation/dev-tools/kfence.rst @@ -61,6 +61,17 @@ The total memory dedicated to the KFENCE memory pool can be computed as:: Using the default config, and assuming a page size of 4 KiB, results in dedicating 2 MiB to the KFENCE memory pool. +KFENCE allow re-enabling after system startup, but ifndef CONFIG_CONTIG_ALLOC +and KFENCE_NUM_OBJECTS exceeds MAX_ORDER, alloc KFENCE pool after system startup +is not supported. + +For arm64, re-enabling KFENCE is kind of conflict with map the ages in KFENCE +pool itself at page granularity. For the flexibility, scale sample_interval to +control whether arm64 supported to enable kfence after system startup. +Once this is set to -1 in boot parameter, kfence_pool will be allocated from +early memory no matter kfence is enabled or not. Otherwise, re-enabling is not +supported on arm64. + Note: On architectures that support huge pages, KFENCE will ensure that the pool is using pages of size ``PAGE_SIZE``. This will result in additional page tables being allocated. diff --git a/Documentation/devicetree/bindings/mtd/nand-controller.yaml b/Documentation/devicetree/bindings/mtd/nand-controller.yaml index b29050fd7470a3811530572ef5d4d25922ffee56..6fe2a3d8ee6b86dd3d83b63512834c70fee18fce 100644 --- a/Documentation/devicetree/bindings/mtd/nand-controller.yaml +++ b/Documentation/devicetree/bindings/mtd/nand-controller.yaml @@ -44,7 +44,7 @@ patternProperties: properties: reg: description: - Contains the native Ready/Busy IDs. + Contains the chip-select IDs. nand-ecc-mode: description: @@ -174,6 +174,6 @@ examples: nand-ecc-mode = "soft"; nand-ecc-algo = "bch"; - /* controller specific properties */ + /* NAND chip specific properties */ }; }; diff --git a/Documentation/devicetree/bindings/spi/spi-mxic.txt b/Documentation/devicetree/bindings/spi/spi-mxic.txt index 529f2dab2648a78b5a82d7b7ffd95bb07e8c51e1..7bcbb229b78bb3c6d03652d63ad5da6ace0304f9 100644 --- a/Documentation/devicetree/bindings/spi/spi-mxic.txt +++ b/Documentation/devicetree/bindings/spi/spi-mxic.txt @@ -8,11 +8,13 @@ Required properties: - reg: should contain 2 entries, one for the registers and one for the direct mapping area - reg-names: should contain "regs" and "dirmap" -- interrupts: interrupt line connected to the SPI controller - clock-names: should contain "ps_clk", "send_clk" and "send_dly_clk" - clocks: should contain 3 entries for the "ps_clk", "send_clk" and "send_dly_clk" clocks +Optional properties: +- interrupts: interrupt line connected to the SPI controller + Example: spi@43c30000 { diff --git a/Documentation/process/stable-kernel-rules.rst b/Documentation/process/stable-kernel-rules.rst index 003c865e9c212342ecac53c42eddd6848077de4f..fbcb48bc2a9030caa28ebc04d8c24f305bb505bd 100644 --- a/Documentation/process/stable-kernel-rules.rst +++ b/Documentation/process/stable-kernel-rules.rst @@ -168,7 +168,16 @@ Trees - The finalized and tagged releases of all stable kernels can be found in separate branches per version at: - https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux-stable.git + https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git + + - The release candidate of all stable kernel versions can be found at: + + https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux-stable-rc.git/ + + .. warning:: + The -stable-rc tree is a snapshot in time of the stable-queue tree and + will change frequently, hence will be rebased often. It should only be + used for testing purposes (e.g. to be consumed by CI systems). Review committee diff --git a/Documentation/scheduler/sched-bwc.rst b/Documentation/scheduler/sched-bwc.rst index 9801d6b284b1ecfdd51b7e1ab5ab5f9bc6da3b40..5723d8c69e3542e1b44054613e2e8e7f9c9abc03 100644 --- a/Documentation/scheduler/sched-bwc.rst +++ b/Documentation/scheduler/sched-bwc.rst @@ -21,33 +21,84 @@ cfs_quota units at each period boundary. As threads consume this bandwidth it is transferred to cpu-local "silos" on a demand basis. The amount transferred within each of these updates is tunable and described as the "slice". +Burst feature +------------- +This feature borrows time now against our future underrun, at the cost of +increased interference against the other system users. All nicely bounded. + +Traditional (UP-EDF) bandwidth control is something like: + + (U = \Sum u_i) <= 1 + +This guaranteeds both that every deadline is met and that the system is +stable. After all, if U were > 1, then for every second of walltime, +we'd have to run more than a second of program time, and obviously miss +our deadline, but the next deadline will be further out still, there is +never time to catch up, unbounded fail. + +The burst feature observes that a workload doesn't always executes the full +quota; this enables one to describe u_i as a statistical distribution. + +For example, have u_i = {x,e}_i, where x is the p(95) and x+e p(100) +(the traditional WCET). This effectively allows u to be smaller, +increasing the efficiency (we can pack more tasks in the system), but at +the cost of missing deadlines when all the odds line up. However, it +does maintain stability, since every overrun must be paired with an +underrun as long as our x is above the average. + +That is, suppose we have 2 tasks, both specify a p(95) value, then we +have a p(95)*p(95) = 90.25% chance both tasks are within their quota and +everything is good. At the same time we have a p(5)p(5) = 0.25% chance +both tasks will exceed their quota at the same time (guaranteed deadline +fail). Somewhere in between there's a threshold where one exceeds and +the other doesn't underrun enough to compensate; this depends on the +specific CDFs. + +At the same time, we can say that the worst case deadline miss, will be +\Sum e_i; that is, there is a bounded tardiness (under the assumption +that x+e is indeed WCET). + +The interferenece when using burst is valued by the possibilities for +missing the deadline and the average WCET. Test results showed that when +there many cgroups or CPU is under utilized, the interference is +limited. More details are shown in: +https://lore.kernel.org/lkml/5371BD36-55AE-4F71-B9D7-B86DC32E3D2B@linux.alibaba.com/ + Management ---------- -Quota and period are managed within the cpu subsystem via cgroupfs. +Quota, period and burst are managed within the cpu subsystem via cgroupfs. -cpu.cfs_quota_us: the total available run-time within a period (in microseconds) +cpu.cfs_quota_us: run-time replenished within a period (in microseconds) cpu.cfs_period_us: the length of a period (in microseconds) cpu.stat: exports throttling statistics [explained further below] +cpu.cfs_burst_us: the maximum accumulated run-time (in microseconds) The default values are:: cpu.cfs_period_us=100ms - cpu.cfs_quota=-1 + cpu.cfs_quota_us=-1 + cpu.cfs_burst_us=0 A value of -1 for cpu.cfs_quota_us indicates that the group does not have any bandwidth restriction in place, such a group is described as an unconstrained bandwidth group. This represents the traditional work-conserving behavior for CFS. -Writing any (valid) positive value(s) will enact the specified bandwidth limit. -The minimum quota allowed for the quota or period is 1ms. There is also an -upper bound on the period length of 1s. Additional restrictions exist when -bandwidth limits are used in a hierarchical fashion, these are explained in -more detail below. +Writing any (valid) positive value(s) no smaller than cpu.cfs_burst_us will +enact the specified bandwidth limit. The minimum quota allowed for the quota or +period is 1ms. There is also an upper bound on the period length of 1s. +Additional restrictions exist when bandwidth limits are used in a hierarchical +fashion, these are explained in more detail below. Writing any negative value to cpu.cfs_quota_us will remove the bandwidth limit and return the group to an unconstrained state once more. +A value of 0 for cpu.cfs_burst_us indicates that the group can not accumulate +any unused bandwidth. It makes the traditional bandwidth control behavior for +CFS unchanged. Writing any (valid) positive value(s) no larger than +cpu.cfs_quota_us into cpu.cfs_burst_us will enact the cap on unused bandwidth +accumulation. + Any updates to a group's bandwidth specification will result in it becoming unthrottled if it is in a constrained state. @@ -67,7 +118,7 @@ for more fine-grained consumption. Statistics ---------- -A group's bandwidth statistics are exported via 3 fields in cpu.stat. +A group's bandwidth statistics are exported via 5 fields in cpu.stat. cpu.stat: @@ -75,6 +126,9 @@ cpu.stat: - nr_throttled: Number of times the group has been throttled/limited. - throttled_time: The total time duration (in nanoseconds) for which entities of the group have been throttled. +- nr_bursts: Number of periods burst occurs. +- burst_time: Cumulative wall-time (in nanoseconds) that any CPUs has used + above quota in respective periods This interface is read-only. @@ -172,3 +226,15 @@ Examples By using a small period here we are ensuring a consistent latency response at the expense of burst capacity. + +4. Limit a group to 40% of 1 CPU, and allow accumulate up to 20% of 1 CPU + additionally, in case accumulation has been done. + + With 50ms period, 20ms quota will be equivalent to 40% of 1 CPU. + And 10ms burst will be equivalent to 20% of 1 CPU. + + # echo 20000 > cpu.cfs_quota_us /* quota = 20ms */ + # echo 50000 > cpu.cfs_period_us /* period = 50ms */ + # echo 10000 > cpu.cfs_burst_us /* burst = 10ms */ + + Larger buffer setting (no larger than quota) allows greater burst capacity. diff --git a/Documentation/sound/hd-audio/models.rst b/Documentation/sound/hd-audio/models.rst index d25335993e55309d8c7799178231a251ec890e42..9b52f50a68542b932879f054bdd6b436ee7658a1 100644 --- a/Documentation/sound/hd-audio/models.rst +++ b/Documentation/sound/hd-audio/models.rst @@ -261,6 +261,10 @@ alc-sense-combo huawei-mbx-stereo Enable initialization verbs for Huawei MBX stereo speakers; might be risky, try this at your own risk +alc298-samsung-headphone + Samsung laptops with ALC298 +alc256-samsung-headphone + Samsung laptops with ALC256 ALC66x/67x/892 ============== diff --git a/Documentation/trace/events.rst b/Documentation/trace/events.rst index 2a5aa48eff6c78dd59ebbf23930f8ce135dd9e87..9df29a935757afe25590f36cc2746eeabf8646bb 100644 --- a/Documentation/trace/events.rst +++ b/Documentation/trace/events.rst @@ -198,6 +198,15 @@ The glob (~) accepts a wild card character (\*,?) and character classes prev_comm ~ "*sh*" prev_comm ~ "ba*sh" +If the field is a pointer that points into user space (for example +"filename" from sys_enter_openat), then you have to append ".ustring" to the +field name:: + + filename.ustring ~ "password" + +As the kernel will have to know how to retrieve the memory that the pointer +is at from user space. + 5.2 Setting filters ------------------- @@ -230,6 +239,16 @@ Currently the caret ('^') for an error always appears at the beginning of the filter string; the error message should still be useful though even without more accurate position info. +5.2.1 Filter limitations +------------------------ + +If a filter is placed on a string pointer ``(char *)`` that does not point +to a string on the ring buffer, but instead points to kernel or user space +memory, then, for safety reasons, at most 1024 bytes of the content is +copied onto a temporary buffer to do the compare. If the copy of the memory +faults (the pointer points to memory that should not be accessed), then the +string compare will be treated as not matching. + 5.3 Clearing filters -------------------- diff --git a/arch/arc/kernel/process.c b/arch/arc/kernel/process.c index 37f724ad5e3992297b4513bc7498bd4e981cdac7..a85e9c625ab50b693337ad1ebbf1f5b36372b68f 100644 --- a/arch/arc/kernel/process.c +++ b/arch/arc/kernel/process.c @@ -43,7 +43,7 @@ SYSCALL_DEFINE0(arc_gettls) return task_thread_info(current)->thr_ptr; } -SYSCALL_DEFINE3(arc_usr_cmpxchg, int *, uaddr, int, expected, int, new) +SYSCALL_DEFINE3(arc_usr_cmpxchg, int __user *, uaddr, int, expected, int, new) { struct pt_regs *regs = current_pt_regs(); u32 uval; diff --git a/arch/arm/boot/dts/aspeed-g6-pinctrl.dtsi b/arch/arm/boot/dts/aspeed-g6-pinctrl.dtsi index 910eacc8ad3bd25fb1eb0f84b6c9916bc66d3838..a362714ae9fc0307b083dc837c416e59c520ecc9 100644 --- a/arch/arm/boot/dts/aspeed-g6-pinctrl.dtsi +++ b/arch/arm/boot/dts/aspeed-g6-pinctrl.dtsi @@ -118,7 +118,7 @@ pinctrl_fwspid_default: fwspid_default { }; pinctrl_fwqspid_default: fwqspid_default { - function = "FWQSPID"; + function = "FWSPID"; groups = "FWQSPID"; }; diff --git a/arch/arm/boot/dts/bcm2711.dtsi b/arch/arm/boot/dts/bcm2711.dtsi index 55ec83bde5a61f4ccb085923b7684df37eba8abc..b50229c3102fabe58d02ab4f027de282883726cd 100644 --- a/arch/arm/boot/dts/bcm2711.dtsi +++ b/arch/arm/boot/dts/bcm2711.dtsi @@ -290,6 +290,7 @@ pixelvalve4: pixelvalve@7e216000 { hvs: hvs@7e400000 { compatible = "brcm,bcm2711-hvs"; + reg = <0x7e400000 0x8000>; interrupts = ; }; @@ -432,12 +433,26 @@ cpus: cpus { #size-cells = <0>; enable-method = "brcm,bcm2836-smp"; // for ARM 32-bit + /* Source for d/i-cache-line-size and d/i-cache-sets + * https://developer.arm.com/documentation/100095/0003 + * /Level-1-Memory-System/About-the-L1-memory-system?lang=en + * Source for d/i-cache-size + * https://www.raspberrypi.com/documentation/computers + * /processors.html#bcm2711 + */ cpu0: cpu@0 { device_type = "cpu"; compatible = "arm,cortex-a72"; reg = <0>; enable-method = "spin-table"; cpu-release-addr = <0x0 0x000000d8>; + d-cache-size = <0x8000>; + d-cache-line-size = <64>; + d-cache-sets = <256>; // 32KiB(size)/64(line-size)=512ways/2-way set + i-cache-size = <0xc000>; + i-cache-line-size = <64>; + i-cache-sets = <256>; // 48KiB(size)/64(line-size)=768ways/3-way set + next-level-cache = <&l2>; }; cpu1: cpu@1 { @@ -446,6 +461,13 @@ cpu1: cpu@1 { reg = <1>; enable-method = "spin-table"; cpu-release-addr = <0x0 0x000000e0>; + d-cache-size = <0x8000>; + d-cache-line-size = <64>; + d-cache-sets = <256>; // 32KiB(size)/64(line-size)=512ways/2-way set + i-cache-size = <0xc000>; + i-cache-line-size = <64>; + i-cache-sets = <256>; // 48KiB(size)/64(line-size)=768ways/3-way set + next-level-cache = <&l2>; }; cpu2: cpu@2 { @@ -454,6 +476,13 @@ cpu2: cpu@2 { reg = <2>; enable-method = "spin-table"; cpu-release-addr = <0x0 0x000000e8>; + d-cache-size = <0x8000>; + d-cache-line-size = <64>; + d-cache-sets = <256>; // 32KiB(size)/64(line-size)=512ways/2-way set + i-cache-size = <0xc000>; + i-cache-line-size = <64>; + i-cache-sets = <256>; // 48KiB(size)/64(line-size)=768ways/3-way set + next-level-cache = <&l2>; }; cpu3: cpu@3 { @@ -462,6 +491,28 @@ cpu3: cpu@3 { reg = <3>; enable-method = "spin-table"; cpu-release-addr = <0x0 0x000000f0>; + d-cache-size = <0x8000>; + d-cache-line-size = <64>; + d-cache-sets = <256>; // 32KiB(size)/64(line-size)=512ways/2-way set + i-cache-size = <0xc000>; + i-cache-line-size = <64>; + i-cache-sets = <256>; // 48KiB(size)/64(line-size)=768ways/3-way set + next-level-cache = <&l2>; + }; + + /* Source for d/i-cache-line-size and d/i-cache-sets + * https://developer.arm.com/documentation/100095/0003 + * /Level-2-Memory-System/About-the-L2-memory-system?lang=en + * Source for d/i-cache-size + * https://www.raspberrypi.com/documentation/computers + * /processors.html#bcm2711 + */ + l2: l2-cache0 { + compatible = "cache"; + cache-size = <0x100000>; + cache-line-size = <64>; + cache-sets = <1024>; // 1MiB(size)/64(line-size)=16384ways/16-way set + cache-level = <2>; }; }; diff --git a/arch/arm/boot/dts/bcm2837.dtsi b/arch/arm/boot/dts/bcm2837.dtsi index 0199ec98cd61690ad964c1bbe782fcf64583b236..5dbdebc4625946a266d2c3f1719256086d5a2a59 100644 --- a/arch/arm/boot/dts/bcm2837.dtsi +++ b/arch/arm/boot/dts/bcm2837.dtsi @@ -40,12 +40,26 @@ cpus: cpus { #size-cells = <0>; enable-method = "brcm,bcm2836-smp"; // for ARM 32-bit + /* Source for d/i-cache-line-size and d/i-cache-sets + * https://developer.arm.com/documentation/ddi0500/e/level-1-memory-system + * /about-the-l1-memory-system?lang=en + * + * Source for d/i-cache-size + * https://magpi.raspberrypi.com/articles/raspberry-pi-3-specs-benchmarks + */ cpu0: cpu@0 { device_type = "cpu"; compatible = "arm,cortex-a53"; reg = <0>; enable-method = "spin-table"; cpu-release-addr = <0x0 0x000000d8>; + d-cache-size = <0x8000>; + d-cache-line-size = <64>; + d-cache-sets = <128>; // 32KiB(size)/64(line-size)=512ways/4-way set + i-cache-size = <0x8000>; + i-cache-line-size = <64>; + i-cache-sets = <256>; // 32KiB(size)/64(line-size)=512ways/2-way set + next-level-cache = <&l2>; }; cpu1: cpu@1 { @@ -54,6 +68,13 @@ cpu1: cpu@1 { reg = <1>; enable-method = "spin-table"; cpu-release-addr = <0x0 0x000000e0>; + d-cache-size = <0x8000>; + d-cache-line-size = <64>; + d-cache-sets = <128>; // 32KiB(size)/64(line-size)=512ways/4-way set + i-cache-size = <0x8000>; + i-cache-line-size = <64>; + i-cache-sets = <256>; // 32KiB(size)/64(line-size)=512ways/2-way set + next-level-cache = <&l2>; }; cpu2: cpu@2 { @@ -62,6 +83,13 @@ cpu2: cpu@2 { reg = <2>; enable-method = "spin-table"; cpu-release-addr = <0x0 0x000000e8>; + d-cache-size = <0x8000>; + d-cache-line-size = <64>; + d-cache-sets = <128>; // 32KiB(size)/64(line-size)=512ways/4-way set + i-cache-size = <0x8000>; + i-cache-line-size = <64>; + i-cache-sets = <256>; // 32KiB(size)/64(line-size)=512ways/2-way set + next-level-cache = <&l2>; }; cpu3: cpu@3 { @@ -70,6 +98,27 @@ cpu3: cpu@3 { reg = <3>; enable-method = "spin-table"; cpu-release-addr = <0x0 0x000000f0>; + d-cache-size = <0x8000>; + d-cache-line-size = <64>; + d-cache-sets = <128>; // 32KiB(size)/64(line-size)=512ways/4-way set + i-cache-size = <0x8000>; + i-cache-line-size = <64>; + i-cache-sets = <256>; // 32KiB(size)/64(line-size)=512ways/2-way set + next-level-cache = <&l2>; + }; + + /* Source for cache-line-size + cache-sets + * https://developer.arm.com/documentation/ddi0500 + * /e/level-2-memory-system/about-the-l2-memory-system?lang=en + * Source for cache-size + * https://datasheets.raspberrypi.com/cm/cm1-and-cm3-datasheet.pdf + */ + l2: l2-cache0 { + compatible = "cache"; + cache-size = <0x80000>; + cache-line-size = <64>; + cache-sets = <512>; // 512KiB(size)/64(line-size)=8192ways/16-way set + cache-level = <2>; }; }; }; diff --git a/arch/arm/boot/dts/dra7-l4.dtsi b/arch/arm/boot/dts/dra7-l4.dtsi index 30b72f4318501181c543dd028971d5eccf40c567..f8c0eee7a62b99728989a68d867e3c0d0282528a 100644 --- a/arch/arm/boot/dts/dra7-l4.dtsi +++ b/arch/arm/boot/dts/dra7-l4.dtsi @@ -3448,8 +3448,7 @@ timer14: timer@0 { ti,timer-pwm; }; }; - - target-module@2c000 { /* 0x4882c000, ap 17 02.0 */ + timer15_target: target-module@2c000 { /* 0x4882c000, ap 17 02.0 */ compatible = "ti,sysc-omap4-timer", "ti,sysc"; reg = <0x2c000 0x4>, <0x2c010 0x4>; @@ -3477,7 +3476,7 @@ timer15: timer@0 { }; }; - target-module@2e000 { /* 0x4882e000, ap 19 14.0 */ + timer16_target: target-module@2e000 { /* 0x4882e000, ap 19 14.0 */ compatible = "ti,sysc-omap4-timer", "ti,sysc"; reg = <0x2e000 0x4>, <0x2e010 0x4>; diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi index 7ecf8f86ac747ce2c4af9cd1d17d82937509a19a..9989321366560fd2f03d6837c8d63192a1dbf4c3 100644 --- a/arch/arm/boot/dts/dra7.dtsi +++ b/arch/arm/boot/dts/dra7.dtsi @@ -1093,20 +1093,20 @@ timer@0 { }; /* Local timers, see ARM architected timer wrap erratum i940 */ -&timer3_target { +&timer15_target { ti,no-reset-on-init; ti,no-idle; timer@0 { - assigned-clocks = <&l4per_clkctrl DRA7_L4PER_TIMER3_CLKCTRL 24>; + assigned-clocks = <&l4per3_clkctrl DRA7_L4PER3_TIMER15_CLKCTRL 24>; assigned-clock-parents = <&timer_sys_clk_div>; }; }; -&timer4_target { +&timer16_target { ti,no-reset-on-init; ti,no-idle; timer@0 { - assigned-clocks = <&l4per_clkctrl DRA7_L4PER_TIMER4_CLKCTRL 24>; + assigned-clocks = <&l4per3_clkctrl DRA7_L4PER3_TIMER16_CLKCTRL 24>; assigned-clock-parents = <&timer_sys_clk_div>; }; }; diff --git a/arch/arm/boot/dts/exynos5250-pinctrl.dtsi b/arch/arm/boot/dts/exynos5250-pinctrl.dtsi index d31a68672bfacb3a2f6575c26790db05b5498d6c..d7d756614edd1f27e125bd7b791a8ae196eba99b 100644 --- a/arch/arm/boot/dts/exynos5250-pinctrl.dtsi +++ b/arch/arm/boot/dts/exynos5250-pinctrl.dtsi @@ -260,7 +260,7 @@ i2c3_hs_bus: i2c3-hs-bus { }; uart3_data: uart3-data { - samsung,pins = "gpa1-4", "gpa1-4"; + samsung,pins = "gpa1-4", "gpa1-5"; samsung,pin-function = ; samsung,pin-pud = ; samsung,pin-drv = ; diff --git a/arch/arm/boot/dts/exynos5250-smdk5250.dts b/arch/arm/boot/dts/exynos5250-smdk5250.dts index d0e48c10aec2bfc0624baf8417b3a2d7b242a551..572198b6834e6b2aecee01a121fce38a1d97761a 100644 --- a/arch/arm/boot/dts/exynos5250-smdk5250.dts +++ b/arch/arm/boot/dts/exynos5250-smdk5250.dts @@ -118,6 +118,9 @@ &hdmi { status = "okay"; ddc = <&i2c_2>; hpd-gpios = <&gpx3 7 GPIO_ACTIVE_HIGH>; + vdd-supply = <&ldo8_reg>; + vdd_osc-supply = <&ldo10_reg>; + vdd_pll-supply = <&ldo8_reg>; }; &i2c_0 { diff --git a/arch/arm/boot/dts/exynos5420-smdk5420.dts b/arch/arm/boot/dts/exynos5420-smdk5420.dts index 4e49d8095b2927d8d31849916d1bee1637824429..741294bd564e7f455baacbc31fba2bc324ef12e5 100644 --- a/arch/arm/boot/dts/exynos5420-smdk5420.dts +++ b/arch/arm/boot/dts/exynos5420-smdk5420.dts @@ -124,6 +124,9 @@ &hdmi { hpd-gpios = <&gpx3 7 GPIO_ACTIVE_HIGH>; pinctrl-names = "default"; pinctrl-0 = <&hdmi_hpd_irq>; + vdd-supply = <&ldo6_reg>; + vdd_osc-supply = <&ldo7_reg>; + vdd_pll-supply = <&ldo6_reg>; }; &hsi2c_4 { diff --git a/arch/arm/boot/dts/imx53-m53menlo.dts b/arch/arm/boot/dts/imx53-m53menlo.dts index 4f88e96d81ddbdd42078409d0a94e43f3e194566..d5c68d1ea707c6610ff1ef2b1c3bb9883ad5e803 100644 --- a/arch/arm/boot/dts/imx53-m53menlo.dts +++ b/arch/arm/boot/dts/imx53-m53menlo.dts @@ -53,6 +53,31 @@ eth { }; }; + lvds-decoder { + compatible = "ti,ds90cf364a", "lvds-decoder"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + + lvds_decoder_in: endpoint { + remote-endpoint = <&lvds0_out>; + }; + }; + + port@1 { + reg = <1>; + + lvds_decoder_out: endpoint { + remote-endpoint = <&panel_in>; + }; + }; + }; + }; + panel { compatible = "edt,etm0700g0dh6"; pinctrl-0 = <&pinctrl_display_gpio>; @@ -61,7 +86,7 @@ panel { port { panel_in: endpoint { - remote-endpoint = <&lvds0_out>; + remote-endpoint = <&lvds_decoder_out>; }; }; }; @@ -450,7 +475,7 @@ port@2 { reg = <2>; lvds0_out: endpoint { - remote-endpoint = <&panel_in>; + remote-endpoint = <&lvds_decoder_in>; }; }; }; diff --git a/arch/arm/boot/dts/imx7-colibri.dtsi b/arch/arm/boot/dts/imx7-colibri.dtsi index 62b771c1d5a9a71058dfb478a6f6f14b4641dbaa..f1c60b0cb143edad66f9376f34b0a856bfc86187 100644 --- a/arch/arm/boot/dts/imx7-colibri.dtsi +++ b/arch/arm/boot/dts/imx7-colibri.dtsi @@ -40,7 +40,7 @@ simple-audio-card,cpu { dailink_master: simple-audio-card,codec { sound-dai = <&codec>; - clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_CLK>; + clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_DIV>; }; }; }; @@ -293,7 +293,7 @@ codec: sgtl5000@a { compatible = "fsl,sgtl5000"; #sound-dai-cells = <0>; reg = <0x0a>; - clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_CLK>; + clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_DIV>; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_sai1_mclk>; VDDA-supply = <®_module_3v3_avdd>; diff --git a/arch/arm/boot/dts/imx7-mba7.dtsi b/arch/arm/boot/dts/imx7-mba7.dtsi index 50abf18ad30b20dc530c3b558d19956e98443c2f..887497e3bb4b8ff0e608835bc01bfe86ec07a78f 100644 --- a/arch/arm/boot/dts/imx7-mba7.dtsi +++ b/arch/arm/boot/dts/imx7-mba7.dtsi @@ -250,7 +250,7 @@ &i2c2 { tlv320aic32x4: audio-codec@18 { compatible = "ti,tlv320aic32x4"; reg = <0x18>; - clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_CLK>; + clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_DIV>; clock-names = "mclk"; ldoin-supply = <®_audio_3v3>; iov-supply = <®_audio_3v3>; diff --git a/arch/arm/boot/dts/imx7d-nitrogen7.dts b/arch/arm/boot/dts/imx7d-nitrogen7.dts index e0751e6ba3c0f7e0d4709a5c1106eaeb7ab4f33b..a31de900139d6d10f0c849756bbf95e83ec5157d 100644 --- a/arch/arm/boot/dts/imx7d-nitrogen7.dts +++ b/arch/arm/boot/dts/imx7d-nitrogen7.dts @@ -288,7 +288,7 @@ &i2c4 { codec: wm8960@1a { compatible = "wlf,wm8960"; reg = <0x1a>; - clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_CLK>; + clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_DIV>; clock-names = "mclk"; wlf,shared-lrclk; }; diff --git a/arch/arm/boot/dts/imx7d-pico-hobbit.dts b/arch/arm/boot/dts/imx7d-pico-hobbit.dts index 7b2198a9372c621e0276bd509c3b373cf02c4072..d917dc4f2f22759bc546c18248bbff7fcc3d726f 100644 --- a/arch/arm/boot/dts/imx7d-pico-hobbit.dts +++ b/arch/arm/boot/dts/imx7d-pico-hobbit.dts @@ -31,7 +31,7 @@ simple-audio-card,cpu { dailink_master: simple-audio-card,codec { sound-dai = <&sgtl5000>; - clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_CLK>; + clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_DIV>; }; }; }; @@ -41,7 +41,7 @@ sgtl5000: codec@a { #sound-dai-cells = <0>; reg = <0x0a>; compatible = "fsl,sgtl5000"; - clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_CLK>; + clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_DIV>; VDDA-supply = <®_2p5v>; VDDIO-supply = <®_vref_1v8>; }; diff --git a/arch/arm/boot/dts/imx7d-pico-pi.dts b/arch/arm/boot/dts/imx7d-pico-pi.dts index 70bea95c06d83f84737d13478d1d7b37dca2272d..f263e391e24cbb6c44d569cf284dde74020f58ac 100644 --- a/arch/arm/boot/dts/imx7d-pico-pi.dts +++ b/arch/arm/boot/dts/imx7d-pico-pi.dts @@ -31,7 +31,7 @@ simple-audio-card,cpu { dailink_master: simple-audio-card,codec { sound-dai = <&sgtl5000>; - clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_CLK>; + clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_DIV>; }; }; }; @@ -41,7 +41,7 @@ sgtl5000: codec@a { #sound-dai-cells = <0>; reg = <0x0a>; compatible = "fsl,sgtl5000"; - clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_CLK>; + clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_DIV>; VDDA-supply = <®_2p5v>; VDDIO-supply = <®_vref_1v8>; }; diff --git a/arch/arm/boot/dts/imx7d-sdb.dts b/arch/arm/boot/dts/imx7d-sdb.dts index ac0751bc1177e1544c3c00b9a9073a7e89252d71..6823b9f1a2a32a960b1940d878f716af6bf857e8 100644 --- a/arch/arm/boot/dts/imx7d-sdb.dts +++ b/arch/arm/boot/dts/imx7d-sdb.dts @@ -378,14 +378,14 @@ &i2c4 { codec: wm8960@1a { compatible = "wlf,wm8960"; reg = <0x1a>; - clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_CLK>; + clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_DIV>; clock-names = "mclk"; wlf,shared-lrclk; wlf,hp-cfg = <2 2 3>; wlf,gpio-cfg = <1 3>; assigned-clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_SRC>, <&clks IMX7D_PLL_AUDIO_POST_DIV>, - <&clks IMX7D_AUDIO_MCLK_ROOT_CLK>; + <&clks IMX7D_AUDIO_MCLK_ROOT_DIV>; assigned-clock-parents = <&clks IMX7D_PLL_AUDIO_POST_DIV>; assigned-clock-rates = <0>, <884736000>, <12288000>; }; diff --git a/arch/arm/boot/dts/imx7s-warp.dts b/arch/arm/boot/dts/imx7s-warp.dts index d6b4888fa686bcc8b33699c4078429da53fb762f..e035dd5bf4f62ec6f6a687fb97e25c3650fe8e0d 100644 --- a/arch/arm/boot/dts/imx7s-warp.dts +++ b/arch/arm/boot/dts/imx7s-warp.dts @@ -75,7 +75,7 @@ simple-audio-card,cpu { dailink_master: simple-audio-card,codec { sound-dai = <&codec>; - clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_CLK>; + clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_DIV>; }; }; }; @@ -232,7 +232,7 @@ codec: sgtl5000@a { #sound-dai-cells = <0>; reg = <0x0a>; compatible = "fsl,sgtl5000"; - clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_CLK>; + clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_DIV>; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_sai1_mclk>; VDDA-supply = <&vgen4_reg>; diff --git a/arch/arm/boot/dts/omap3-devkit8000-common.dtsi b/arch/arm/boot/dts/omap3-devkit8000-common.dtsi index 2c19d6e255bdc9c4c15cefcb4e12ecd85c2de28d..6883ccb45600b38a752353ad8118eb57695150c1 100644 --- a/arch/arm/boot/dts/omap3-devkit8000-common.dtsi +++ b/arch/arm/boot/dts/omap3-devkit8000-common.dtsi @@ -158,6 +158,24 @@ &mmc3 { status = "disabled"; }; +/* Unusable as clockevent because if unreliable oscillator, allow to idle */ +&timer1_target { + /delete-property/ti,no-reset-on-init; + /delete-property/ti,no-idle; + timer@0 { + /delete-property/ti,timer-alwon; + }; +}; + +/* Preferred timer for clockevent */ +&timer12_target { + ti,no-reset-on-init; + ti,no-idle; + timer@0 { + /* Always clocked by secure_32k_fck */ + }; +}; + &twl_gpio { ti,use-leds; /* diff --git a/arch/arm/boot/dts/omap3-devkit8000.dts b/arch/arm/boot/dts/omap3-devkit8000.dts index c2995a280729d27900eeda63af011ce94d8b995a..162d0726b00801c50f4b029ef011f6d24cced686 100644 --- a/arch/arm/boot/dts/omap3-devkit8000.dts +++ b/arch/arm/boot/dts/omap3-devkit8000.dts @@ -14,36 +14,3 @@ aliases { display2 = &tv0; }; }; - -/* Unusable as clocksource because of unreliable oscillator */ -&counter32k { - status = "disabled"; -}; - -/* Unusable as clockevent because if unreliable oscillator, allow to idle */ -&timer1_target { - /delete-property/ti,no-reset-on-init; - /delete-property/ti,no-idle; - timer@0 { - /delete-property/ti,timer-alwon; - }; -}; - -/* Preferred always-on timer for clocksource */ -&timer12_target { - ti,no-reset-on-init; - ti,no-idle; - timer@0 { - /* Always clocked by secure_32k_fck */ - }; -}; - -/* Preferred timer for clockevent */ -&timer2_target { - ti,no-reset-on-init; - ti,no-idle; - timer@0 { - assigned-clocks = <&gpt2_fck>; - assigned-clock-parents = <&sys_ck>; - }; -}; diff --git a/arch/arm/boot/dts/qcom-ipq4019.dtsi b/arch/arm/boot/dts/qcom-ipq4019.dtsi index 74d8e2c8e4b343f55742b49ed8797793b67d2d20..3defd47fd8fabb026f1972afa78680c1f787815d 100644 --- a/arch/arm/boot/dts/qcom-ipq4019.dtsi +++ b/arch/arm/boot/dts/qcom-ipq4019.dtsi @@ -142,7 +142,8 @@ pmu { clocks { sleep_clk: sleep_clk { compatible = "fixed-clock"; - clock-frequency = <32768>; + clock-frequency = <32000>; + clock-output-names = "gcc_sleep_clk_src"; #clock-cells = <0>; }; diff --git a/arch/arm/boot/dts/qcom-msm8960.dtsi b/arch/arm/boot/dts/qcom-msm8960.dtsi index 172ea3c70eac200e85f89917ba3c1533ee9ea036..c197927e7435f5434a2a3e97ee15c0b0b51cbbd1 100644 --- a/arch/arm/boot/dts/qcom-msm8960.dtsi +++ b/arch/arm/boot/dts/qcom-msm8960.dtsi @@ -146,7 +146,9 @@ rpm@108000 { reg = <0x108000 0x1000>; qcom,ipc = <&l2cc 0x8 2>; - interrupts = <0 19 0>, <0 21 0>, <0 22 0>; + interrupts = , + , + ; interrupt-names = "ack", "err", "wakeup"; regulators { @@ -192,7 +194,7 @@ gsbi5_serial: serial@16440000 { compatible = "qcom,msm-uartdm-v1.3", "qcom,msm-uartdm"; reg = <0x16440000 0x1000>, <0x16400000 0x1000>; - interrupts = <0 154 0x0>; + interrupts = ; clocks = <&gcc GSBI5_UART_CLK>, <&gcc GSBI5_H_CLK>; clock-names = "core", "iface"; status = "disabled"; @@ -318,7 +320,7 @@ spi@16080000 { #address-cells = <1>; #size-cells = <0>; reg = <0x16080000 0x1000>; - interrupts = <0 147 0>; + interrupts = ; spi-max-frequency = <24000000>; cs-gpios = <&msmgpio 8 0>; diff --git a/arch/arm/boot/dts/rk322x.dtsi b/arch/arm/boot/dts/rk322x.dtsi index 7de8b006ca13aad537e06aa5fa41a57b1f14906a..2f17bf35d7a65f5a68bcd0bc3287cc46ca090053 100644 --- a/arch/arm/boot/dts/rk322x.dtsi +++ b/arch/arm/boot/dts/rk322x.dtsi @@ -640,8 +640,8 @@ hdmi: hdmi@200a0000 { interrupts = ; assigned-clocks = <&cru SCLK_HDMI_PHY>; assigned-clock-parents = <&hdmi_phy>; - clocks = <&cru SCLK_HDMI_HDCP>, <&cru PCLK_HDMI_CTRL>, <&cru SCLK_HDMI_CEC>; - clock-names = "isfr", "iahb", "cec"; + clocks = <&cru PCLK_HDMI_CTRL>, <&cru SCLK_HDMI_HDCP>, <&cru SCLK_HDMI_CEC>; + clock-names = "iahb", "isfr", "cec"; pinctrl-names = "default"; pinctrl-0 = <&hdmii2c_xfer &hdmi_hpd &hdmi_cec>; resets = <&cru SRST_HDMI_P>; diff --git a/arch/arm/boot/dts/rk3288.dtsi b/arch/arm/boot/dts/rk3288.dtsi index 0d89ad274268baec38f05ef8d624b3f9863233fe..9051fb4a267d4f9daac2db4d034945605172bbb4 100644 --- a/arch/arm/boot/dts/rk3288.dtsi +++ b/arch/arm/boot/dts/rk3288.dtsi @@ -990,7 +990,7 @@ i2s: i2s@ff890000 { status = "disabled"; }; - crypto: cypto-controller@ff8a0000 { + crypto: crypto@ff8a0000 { compatible = "rockchip,rk3288-crypto"; reg = <0x0 0xff8a0000 0x0 0x4000>; interrupts = ; diff --git a/arch/arm/boot/dts/sama5d2.dtsi b/arch/arm/boot/dts/sama5d2.dtsi index 2c4952427296efc6ba96428096a6cf71b59e455b..12f57278ba4a53b9ff948f36917f5b672f5669c6 100644 --- a/arch/arm/boot/dts/sama5d2.dtsi +++ b/arch/arm/boot/dts/sama5d2.dtsi @@ -413,7 +413,7 @@ hsmc: hsmc@f8014000 { pmecc: ecc-engine@f8014070 { compatible = "atmel,sama5d2-pmecc"; reg = <0xf8014070 0x490>, - <0xf8014500 0x100>; + <0xf8014500 0x200>; }; }; diff --git a/arch/arm/boot/dts/spear1340.dtsi b/arch/arm/boot/dts/spear1340.dtsi index 1a8f5e8b10e3a2fd472634d852c66b8131eaae2e..66cd473ecb61796b9f919e0b50c1df5ee1fc7df8 100644 --- a/arch/arm/boot/dts/spear1340.dtsi +++ b/arch/arm/boot/dts/spear1340.dtsi @@ -136,9 +136,9 @@ serial@b4100000 { reg = <0xb4100000 0x1000>; interrupts = <0 105 0x4>; status = "disabled"; - dmas = <&dwdma0 12 0 1>, - <&dwdma0 13 1 0>; - dma-names = "tx", "rx"; + dmas = <&dwdma0 13 0 1>, + <&dwdma0 12 1 0>; + dma-names = "rx", "tx"; }; thermal@e07008c4 { diff --git a/arch/arm/boot/dts/spear13xx.dtsi b/arch/arm/boot/dts/spear13xx.dtsi index c87b881b2c8bb244fc43cdc37c97c6bd0f7657eb..9135533676879e8abb0d24aeb9a6c14b4701261d 100644 --- a/arch/arm/boot/dts/spear13xx.dtsi +++ b/arch/arm/boot/dts/spear13xx.dtsi @@ -284,9 +284,9 @@ spi0: spi@e0100000 { #size-cells = <0>; interrupts = <0 31 0x4>; status = "disabled"; - dmas = <&dwdma0 4 0 0>, - <&dwdma0 5 0 0>; - dma-names = "tx", "rx"; + dmas = <&dwdma0 5 0 0>, + <&dwdma0 4 0 0>; + dma-names = "rx", "tx"; }; rtc@e0580000 { diff --git a/arch/arm/boot/dts/sun8i-v3s.dtsi b/arch/arm/boot/dts/sun8i-v3s.dtsi index 89abd4cc7e23a381953d70de63e4b40539cae4ea..b21ecb820b133055caf17d8c7be3f584d42365d2 100644 --- a/arch/arm/boot/dts/sun8i-v3s.dtsi +++ b/arch/arm/boot/dts/sun8i-v3s.dtsi @@ -524,6 +524,17 @@ spi0: spi@1c68000 { #size-cells = <0>; }; + gic: interrupt-controller@1c81000 { + compatible = "arm,gic-400"; + reg = <0x01c81000 0x1000>, + <0x01c82000 0x2000>, + <0x01c84000 0x2000>, + <0x01c86000 0x2000>; + interrupt-controller; + #interrupt-cells = <3>; + interrupts = ; + }; + csi1: camera@1cb4000 { compatible = "allwinner,sun8i-v3s-csi"; reg = <0x01cb4000 0x3000>; @@ -535,16 +546,5 @@ csi1: camera@1cb4000 { resets = <&ccu RST_BUS_CSI>; status = "disabled"; }; - - gic: interrupt-controller@1c81000 { - compatible = "arm,gic-400"; - reg = <0x01c81000 0x1000>, - <0x01c82000 0x2000>, - <0x01c84000 0x2000>, - <0x01c86000 0x2000>; - interrupt-controller; - #interrupt-cells = <3>; - interrupts = ; - }; }; }; diff --git a/arch/arm/boot/dts/tegra124-nyan-big.dts b/arch/arm/boot/dts/tegra124-nyan-big.dts index 1d2aac2cb6d038b50db7e48fbcdc2432e7564d13..fdc1d64dfff9dccbd9d3cc69a90e8c803de56dd1 100644 --- a/arch/arm/boot/dts/tegra124-nyan-big.dts +++ b/arch/arm/boot/dts/tegra124-nyan-big.dts @@ -13,12 +13,15 @@ / { "google,nyan-big-rev1", "google,nyan-big-rev0", "google,nyan-big", "google,nyan", "nvidia,tegra124"; - panel: panel { - compatible = "auo,b133xtn01"; - - power-supply = <&vdd_3v3_panel>; - backlight = <&backlight>; - ddc-i2c-bus = <&dpaux>; + host1x@50000000 { + dpaux@545c0000 { + aux-bus { + panel: panel { + compatible = "auo,b133xtn01"; + backlight = <&backlight>; + }; + }; + }; }; mmc@700b0400 { /* SD Card on this bus */ diff --git a/arch/arm/boot/dts/tegra124-nyan-blaze.dts b/arch/arm/boot/dts/tegra124-nyan-blaze.dts index 677babde6460ed1eb39a1e5d2db5fc42c896e1f4..abdf4456826f8f7100519e742fadb01b110e04db 100644 --- a/arch/arm/boot/dts/tegra124-nyan-blaze.dts +++ b/arch/arm/boot/dts/tegra124-nyan-blaze.dts @@ -15,12 +15,15 @@ / { "google,nyan-blaze-rev0", "google,nyan-blaze", "google,nyan", "nvidia,tegra124"; - panel: panel { - compatible = "samsung,ltn140at29-301"; - - power-supply = <&vdd_3v3_panel>; - backlight = <&backlight>; - ddc-i2c-bus = <&dpaux>; + host1x@50000000 { + dpaux@545c0000 { + aux-bus { + panel: panel { + compatible = "samsung,ltn140at29-301"; + backlight = <&backlight>; + }; + }; + }; }; sound { diff --git a/arch/arm/boot/dts/tegra124-venice2.dts b/arch/arm/boot/dts/tegra124-venice2.dts index e6b54ac1ebd1a4252c2386a2370795a0c9326876..84e2d24065e9ae98a635ac29d40aeb3a0a543986 100644 --- a/arch/arm/boot/dts/tegra124-venice2.dts +++ b/arch/arm/boot/dts/tegra124-venice2.dts @@ -48,6 +48,13 @@ sor@54540000 { dpaux@545c0000 { vdd-supply = <&vdd_3v3_panel>; status = "okay"; + + aux-bus { + panel: panel { + compatible = "lg,lp129qe"; + backlight = <&backlight>; + }; + }; }; }; @@ -1079,13 +1086,6 @@ power { }; }; - panel: panel { - compatible = "lg,lp129qe"; - power-supply = <&vdd_3v3_panel>; - backlight = <&backlight>; - ddc-i2c-bus = <&dpaux>; - }; - vdd_mux: regulator@0 { compatible = "regulator-fixed"; regulator-name = "+VDD_MUX"; diff --git a/arch/arm/boot/dts/tegra20-tamonten.dtsi b/arch/arm/boot/dts/tegra20-tamonten.dtsi index dd4d506683de7dcf07c45b173e57077ce79acbf0..7f14f0d005c3e2055103943c4f7ac93997a9ba14 100644 --- a/arch/arm/boot/dts/tegra20-tamonten.dtsi +++ b/arch/arm/boot/dts/tegra20-tamonten.dtsi @@ -183,8 +183,8 @@ uca { }; conf_ata { nvidia,pins = "ata", "atb", "atc", "atd", "ate", - "cdev1", "cdev2", "dap1", "dtb", "gma", - "gmb", "gmc", "gmd", "gme", "gpu7", + "cdev1", "cdev2", "dap1", "dtb", "dtf", + "gma", "gmb", "gmc", "gmd", "gme", "gpu7", "gpv", "i2cp", "irrx", "irtx", "pta", "rm", "slxa", "slxk", "spia", "spib", "uac"; @@ -203,7 +203,7 @@ conf_csus { }; conf_crtp { nvidia,pins = "crtp", "dap2", "dap3", "dap4", - "dtc", "dte", "dtf", "gpu", "sdio1", + "dtc", "dte", "gpu", "sdio1", "slxc", "slxd", "spdi", "spdo", "spig", "uda"; nvidia,pull = ; diff --git a/arch/arm/configs/multi_v5_defconfig b/arch/arm/configs/multi_v5_defconfig index e00be9faa23bfa7e1a28a8b8f8d0bd8ba0c60a2b..4393e689f2354cb9a79da77e11c729ac6905873e 100644 --- a/arch/arm/configs/multi_v5_defconfig +++ b/arch/arm/configs/multi_v5_defconfig @@ -187,6 +187,7 @@ CONFIG_REGULATOR=y CONFIG_REGULATOR_FIXED_VOLTAGE=y CONFIG_MEDIA_SUPPORT=y CONFIG_MEDIA_CAMERA_SUPPORT=y +CONFIG_MEDIA_PLATFORM_SUPPORT=y CONFIG_V4L_PLATFORM_DRIVERS=y CONFIG_VIDEO_ASPEED=m CONFIG_VIDEO_ATMEL_ISI=m diff --git a/arch/arm/crypto/Kconfig b/arch/arm/crypto/Kconfig index c9bf2df85cb904d735bac933817041af206b8479..c46c05548080aaaa0a6366bb0c25e4ee40ae5e70 100644 --- a/arch/arm/crypto/Kconfig +++ b/arch/arm/crypto/Kconfig @@ -83,6 +83,8 @@ config CRYPTO_AES_ARM_BS depends on KERNEL_MODE_NEON select CRYPTO_SKCIPHER select CRYPTO_LIB_AES + select CRYPTO_AES + select CRYPTO_CBC select CRYPTO_SIMD help Use a faster and more secure NEON based implementation of AES in CBC, diff --git a/arch/arm/include/asm/livepatch.h b/arch/arm/include/asm/livepatch.h index befa1efbbcd124616b9add5f4d65dfbc74f8d4e9..47d8b01618c74743324f061f2cf9034570a8b670 100644 --- a/arch/arm/include/asm/livepatch.h +++ b/arch/arm/include/asm/livepatch.h @@ -23,6 +23,8 @@ #include +#define KLP_ARM_BREAKPOINT_INSTRUCTION 0xe7f001f9 + struct klp_patch; struct klp_func; @@ -47,9 +49,18 @@ int klp_check_calltrace(struct klp_patch *patch, int enable); struct arch_klp_data { u32 old_insns[LJMP_INSN_SIZE]; + + /* + * Saved opcode at the entry of the old func (which maybe replaced + * with breakpoint). + */ + u32 saved_opcode; }; +int arch_klp_add_breakpoint(struct arch_klp_data *arch_data, void *old_func); +void arch_klp_remove_breakpoint(struct arch_klp_data *arch_data, void *old_func); long arch_klp_save_old_code(struct arch_klp_data *arch_data, void *old_func); +int arch_klp_module_check_calltrace(void *data); #endif diff --git a/arch/arm/kernel/entry-ftrace.S b/arch/arm/kernel/entry-ftrace.S index a74289ebc803699955155b4f31bd387c8f23b9bd..5f1b1ce10473aa80c8fae70d130d45723c2bfdf2 100644 --- a/arch/arm/kernel/entry-ftrace.S +++ b/arch/arm/kernel/entry-ftrace.S @@ -22,10 +22,7 @@ * mcount can be thought of as a function called in the middle of a subroutine * call. As such, it needs to be transparent for both the caller and the * callee: the original lr needs to be restored when leaving mcount, and no - * registers should be clobbered. (In the __gnu_mcount_nc implementation, we - * clobber the ip register. This is OK because the ARM calling convention - * allows it to be clobbered in subroutines and doesn't use it to hold - * parameters.) + * registers should be clobbered. * * When using dynamic ftrace, we patch out the mcount call by a "pop {lr}" * instead of the __gnu_mcount_nc call (see arch/arm/kernel/ftrace.c). @@ -70,26 +67,25 @@ .macro __ftrace_regs_caller - sub sp, sp, #8 @ space for PC and CPSR OLD_R0, + str lr, [sp, #-8]! @ store LR as PC and make space for CPSR/OLD_R0, @ OLD_R0 will overwrite previous LR - add ip, sp, #12 @ move in IP the value of SP as it was - @ before the push {lr} of the mcount mechanism + ldr lr, [sp, #8] @ get previous LR - str lr, [sp, #0] @ store LR instead of PC + str r0, [sp, #8] @ write r0 as OLD_R0 over previous LR - ldr lr, [sp, #8] @ get previous LR + str lr, [sp, #-4]! @ store previous LR as LR - str r0, [sp, #8] @ write r0 as OLD_R0 over previous LR + add lr, sp, #16 @ move in LR the value of SP as it was + @ before the push {lr} of the mcount mechanism - stmdb sp!, {ip, lr} - stmdb sp!, {r0-r11, lr} + push {r0-r11, ip, lr} @ stack content at this point: @ 0 4 48 52 56 60 64 68 72 - @ R0 | R1 | ... | LR | SP + 4 | previous LR | LR | PSR | OLD_R0 | + @ R0 | R1 | ... | IP | SP + 4 | previous LR | LR | PSR | OLD_R0 | - mov r3, sp @ struct pt_regs* + mov r3, sp @ struct pt_regs* ldr r2, =function_trace_op ldr r2, [r2] @ pointer to the current @@ -112,11 +108,9 @@ ftrace_graph_regs_call: #endif @ pop saved regs - ldmia sp!, {r0-r12} @ restore r0 through r12 - ldr ip, [sp, #8] @ restore PC - ldr lr, [sp, #4] @ restore LR - ldr sp, [sp, #0] @ restore SP - mov pc, ip @ return + pop {r0-r11, ip, lr} @ restore r0 through r12 + ldr lr, [sp], #4 @ restore LR + ldr pc, [sp], #12 .endm #ifdef CONFIG_FUNCTION_GRAPH_TRACER @@ -132,11 +126,9 @@ ftrace_graph_regs_call: bl prepare_ftrace_return @ pop registers saved in ftrace_regs_caller - ldmia sp!, {r0-r12} @ restore r0 through r12 - ldr ip, [sp, #8] @ restore PC - ldr lr, [sp, #4] @ restore LR - ldr sp, [sp, #0] @ restore SP - mov pc, ip @ return + pop {r0-r11, ip, lr} @ restore r0 through r12 + ldr lr, [sp], #4 @ restore LR + ldr pc, [sp], #12 .endm #endif @@ -202,16 +194,17 @@ ftrace_graph_call\suffix: .endm .macro mcount_exit - ldmia sp!, {r0-r3, ip, lr} - ret ip + ldmia sp!, {r0-r3} + ldr lr, [sp, #4] + ldr pc, [sp], #8 .endm ENTRY(__gnu_mcount_nc) UNWIND(.fnstart) #ifdef CONFIG_DYNAMIC_FTRACE - mov ip, lr - ldmia sp!, {lr} - ret ip + push {lr} + ldr lr, [sp, #4] + ldr pc, [sp], #8 #else __mcount #endif diff --git a/arch/arm/kernel/kgdb.c b/arch/arm/kernel/kgdb.c index 7bd30c0a4280d9a6e029c05c49e8fb2d9b373b00..22f937e6f3ffb12a7e854179b73ea2a77c0eb06b 100644 --- a/arch/arm/kernel/kgdb.c +++ b/arch/arm/kernel/kgdb.c @@ -154,22 +154,38 @@ static int kgdb_compiled_brk_fn(struct pt_regs *regs, unsigned int instr) return 0; } -static struct undef_hook kgdb_brkpt_hook = { +static struct undef_hook kgdb_brkpt_arm_hook = { .instr_mask = 0xffffffff, .instr_val = KGDB_BREAKINST, - .cpsr_mask = MODE_MASK, + .cpsr_mask = PSR_T_BIT | MODE_MASK, .cpsr_val = SVC_MODE, .fn = kgdb_brk_fn }; -static struct undef_hook kgdb_compiled_brkpt_hook = { +static struct undef_hook kgdb_brkpt_thumb_hook = { + .instr_mask = 0xffff, + .instr_val = KGDB_BREAKINST & 0xffff, + .cpsr_mask = PSR_T_BIT | MODE_MASK, + .cpsr_val = PSR_T_BIT | SVC_MODE, + .fn = kgdb_brk_fn +}; + +static struct undef_hook kgdb_compiled_brkpt_arm_hook = { .instr_mask = 0xffffffff, .instr_val = KGDB_COMPILED_BREAK, - .cpsr_mask = MODE_MASK, + .cpsr_mask = PSR_T_BIT | MODE_MASK, .cpsr_val = SVC_MODE, .fn = kgdb_compiled_brk_fn }; +static struct undef_hook kgdb_compiled_brkpt_thumb_hook = { + .instr_mask = 0xffff, + .instr_val = KGDB_COMPILED_BREAK & 0xffff, + .cpsr_mask = PSR_T_BIT | MODE_MASK, + .cpsr_val = PSR_T_BIT | SVC_MODE, + .fn = kgdb_compiled_brk_fn +}; + static int __kgdb_notify(struct die_args *args, unsigned long cmd) { struct pt_regs *regs = args->regs; @@ -210,8 +226,10 @@ int kgdb_arch_init(void) if (ret != 0) return ret; - register_undef_hook(&kgdb_brkpt_hook); - register_undef_hook(&kgdb_compiled_brkpt_hook); + register_undef_hook(&kgdb_brkpt_arm_hook); + register_undef_hook(&kgdb_brkpt_thumb_hook); + register_undef_hook(&kgdb_compiled_brkpt_arm_hook); + register_undef_hook(&kgdb_compiled_brkpt_thumb_hook); return 0; } @@ -224,8 +242,10 @@ int kgdb_arch_init(void) */ void kgdb_arch_exit(void) { - unregister_undef_hook(&kgdb_brkpt_hook); - unregister_undef_hook(&kgdb_compiled_brkpt_hook); + unregister_undef_hook(&kgdb_brkpt_arm_hook); + unregister_undef_hook(&kgdb_brkpt_thumb_hook); + unregister_undef_hook(&kgdb_compiled_brkpt_arm_hook); + unregister_undef_hook(&kgdb_compiled_brkpt_thumb_hook); unregister_die_notifier(&kgdb_notifier); } diff --git a/arch/arm/kernel/livepatch.c b/arch/arm/kernel/livepatch.c index da88113d14e98ffadfecaa1ca28c620ffc5d9af2..713ce67fa6e3e287cd8372998831f76a78061514 100644 --- a/arch/arm/kernel/livepatch.c +++ b/arch/arm/kernel/livepatch.c @@ -28,6 +28,8 @@ #include #include #include +#include +#include #include #include @@ -73,6 +75,7 @@ struct klp_func_list { struct walk_stackframe_args { int enable; struct klp_func_list *check_funcs; + struct module *mod; int ret; }; @@ -86,16 +89,6 @@ static inline unsigned long klp_size_to_check(unsigned long func_size, return size; } -static inline int klp_compare_address(unsigned long pc, unsigned long func_addr, - const char *func_name, unsigned long check_size) -{ - if (pc >= func_addr && pc < func_addr + check_size) { - pr_err("func %s is in use!\n", func_name); - return -EBUSY; - } - return 0; -} - static bool check_jump_insn(unsigned long func_addr) { unsigned long i; @@ -147,7 +140,7 @@ static int klp_check_activeness_func(struct klp_patch *patch, int enable, for (obj = patch->objs; obj->funcs; obj++) { for (func = obj->funcs; func->old_name; func++) { if (enable) { - if (func->force == KLP_ENFORCEMENT) + if (func->patched || func->force == KLP_ENFORCEMENT) continue; /* * When enable, checking the currently @@ -272,29 +265,18 @@ static void free_list(struct klp_func_list **funcs) } } -int klp_check_calltrace(struct klp_patch *patch, int enable) +static int do_check_calltrace(struct walk_stackframe_args *args, + int (*fn)(struct stackframe *, void *)) { struct task_struct *g, *t; struct stackframe frame; - int ret = 0; - struct klp_func_list *check_funcs = NULL; - struct walk_stackframe_args args = { - .ret = 0 - }; - - ret = klp_check_activeness_func(patch, enable, &check_funcs); - if (ret) { - pr_err("collect active functions failed, ret=%d\n", ret); - goto out; - } - args.check_funcs = check_funcs; for_each_process_thread(g, t) { if (t == current) { frame.fp = (unsigned long)__builtin_frame_address(0); frame.sp = current_stack_pointer; frame.lr = (unsigned long)__builtin_return_address(0); - frame.pc = (unsigned long)klp_check_calltrace; + frame.pc = (unsigned long)do_check_calltrace; } else if (strncmp(t->comm, "migration/", 10) == 0) { /* * current on other CPU @@ -312,21 +294,104 @@ int klp_check_calltrace(struct klp_patch *patch, int enable) frame.lr = 0; /* recovered from the stack */ frame.pc = thread_saved_pc(t); } - if (check_funcs != NULL) { - walk_stackframe(&frame, klp_check_jump_func, &args); - if (args.ret) { - ret = args.ret; - pr_info("PID: %d Comm: %.20s\n", t->pid, t->comm); - show_stack(t, NULL, KERN_INFO); - goto out; - } + walk_stackframe(&frame, fn, args); + if (args->ret) { + pr_info("PID: %d Comm: %.20s\n", t->pid, t->comm); + show_stack(t, NULL, KERN_INFO); + return args->ret; } } + return 0; +} + +int klp_check_calltrace(struct klp_patch *patch, int enable) +{ + int ret = 0; + struct klp_func_list *check_funcs = NULL; + struct walk_stackframe_args args = { + .enable = enable, + .ret = 0 + }; + + ret = klp_check_activeness_func(patch, enable, &check_funcs); + if (ret) { + pr_err("collect active functions failed, ret=%d\n", ret); + goto out; + } + if (!check_funcs) + goto out; + + args.check_funcs = check_funcs; + ret = do_check_calltrace(&args, klp_check_jump_func); out: free_list(&check_funcs); return ret; } + +static int check_module_calltrace(struct stackframe *frame, void *data) +{ + struct walk_stackframe_args *args = data; + + if (within_module_core(frame->pc, args->mod)) { + pr_err("module %s is in use!\n", args->mod->name); + return (args->ret = -EBUSY); + } + return 0; +} + +int arch_klp_module_check_calltrace(void *data) +{ + struct walk_stackframe_args args = { + .mod = (struct module *)data, + .ret = 0 + }; + + return do_check_calltrace(&args, check_module_calltrace); +} + +int arch_klp_add_breakpoint(struct arch_klp_data *arch_data, void *old_func) +{ + u32 *addr = (u32 *)old_func; + + arch_data->saved_opcode = le32_to_cpu(*addr); + patch_text(old_func, KLP_ARM_BREAKPOINT_INSTRUCTION); + return 0; +} + +void arch_klp_remove_breakpoint(struct arch_klp_data *arch_data, void *old_func) +{ + patch_text(old_func, arch_data->saved_opcode); +} + +static int klp_trap_handler(struct pt_regs *regs, unsigned int instr) +{ + void *brk_func = NULL; + unsigned long addr = regs->ARM_pc; + + brk_func = klp_get_brk_func((void *)addr); + if (!brk_func) { + pr_warn("Unrecoverable livepatch detected.\n"); + BUG(); + } + + regs->ARM_pc = (unsigned long)brk_func; + return 0; +} + +static struct undef_hook klp_arm_break_hook = { + .instr_mask = 0x0fffffff, + .instr_val = (KLP_ARM_BREAKPOINT_INSTRUCTION & 0x0fffffff), + .cpsr_mask = MODE_MASK, + .cpsr_val = SVC_MODE, + .fn = klp_trap_handler, +}; + +void arch_klp_init(void) +{ + register_undef_hook(&klp_arm_break_hook); +} + #endif static inline bool offset_in_range(unsigned long pc, unsigned long addr, diff --git a/arch/arm/kernel/swp_emulate.c b/arch/arm/kernel/swp_emulate.c index f8023061075348ce79bb40e47186ba0da30efa01..2d803839aa0676dc2a4593d7870ba6c63446acc8 100644 --- a/arch/arm/kernel/swp_emulate.c +++ b/arch/arm/kernel/swp_emulate.c @@ -193,7 +193,7 @@ static int swp_handler(struct pt_regs *regs, unsigned int instr) destreg, EXTRACT_REG_NUM(instr, RT2_OFFSET), data); /* Check access in reasonable access range for both SWP and SWPB */ - if (!access_ok((address & ~3), 4)) { + if (!access_ok((void __user *)(address & ~3), 4)) { pr_debug("SWP{B} emulation: access to %p not allowed!\n", (void *)address); res = -EFAULT; diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index 2d9e72ad1b0f923d8afafa49fd4e5cd5d1feadb6..a531afad87fdb984fc8444e9a9e971c7100dff73 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -589,7 +589,7 @@ do_cache_op(unsigned long start, unsigned long end, int flags) if (end < start || flags) return -EINVAL; - if (!access_ok(start, end - start)) + if (!access_ok((void __user *)start, end - start)) return -EFAULT; return __do_cache_op(start, end); diff --git a/arch/arm/mach-iop32x/include/mach/entry-macro.S b/arch/arm/mach-iop32x/include/mach/entry-macro.S index 8e6766d4621eb7c6bf53afbd575f2eb5ec6f056f..341e5d9a6616d3287a87dc45ff8fb711566093c5 100644 --- a/arch/arm/mach-iop32x/include/mach/entry-macro.S +++ b/arch/arm/mach-iop32x/include/mach/entry-macro.S @@ -20,7 +20,7 @@ mrc p6, 0, \irqstat, c8, c0, 0 @ Read IINTSRC cmp \irqstat, #0 clzne \irqnr, \irqstat - rsbne \irqnr, \irqnr, #31 + rsbne \irqnr, \irqnr, #32 .endm .macro arch_ret_to_user, tmp1, tmp2 diff --git a/arch/arm/mach-iop32x/include/mach/irqs.h b/arch/arm/mach-iop32x/include/mach/irqs.h index c4e78df428e860e5b2b13b4bccd96eab8ea44ef2..e09ae5f48aec5c558cbf2582480b6f41ad58ff18 100644 --- a/arch/arm/mach-iop32x/include/mach/irqs.h +++ b/arch/arm/mach-iop32x/include/mach/irqs.h @@ -9,6 +9,6 @@ #ifndef __IRQS_H #define __IRQS_H -#define NR_IRQS 32 +#define NR_IRQS 33 #endif diff --git a/arch/arm/mach-iop32x/irq.c b/arch/arm/mach-iop32x/irq.c index 2d48bf1398c10d26c41776b9dfa9d1d6bc505f29..d1e8824cbd824a4620a045328e4b4025318b096f 100644 --- a/arch/arm/mach-iop32x/irq.c +++ b/arch/arm/mach-iop32x/irq.c @@ -32,14 +32,14 @@ static void intstr_write(u32 val) static void iop32x_irq_mask(struct irq_data *d) { - iop32x_mask &= ~(1 << d->irq); + iop32x_mask &= ~(1 << (d->irq - 1)); intctl_write(iop32x_mask); } static void iop32x_irq_unmask(struct irq_data *d) { - iop32x_mask |= 1 << d->irq; + iop32x_mask |= 1 << (d->irq - 1); intctl_write(iop32x_mask); } @@ -65,7 +65,7 @@ void __init iop32x_init_irq(void) machine_is_em7210()) *IOP3XX_PCIIRSR = 0x0f; - for (i = 0; i < NR_IRQS; i++) { + for (i = 1; i < NR_IRQS; i++) { irq_set_chip_and_handler(i, &ext_chip, handle_level_irq); irq_clear_status_flags(i, IRQ_NOREQUEST | IRQ_NOPROBE); } diff --git a/arch/arm/mach-iop32x/irqs.h b/arch/arm/mach-iop32x/irqs.h index 69858e4e905d13d37beb484f80459bf3c76cfe6a..e1dfc8b4e7d7e3e503a5f57ea49448b4f91d0189 100644 --- a/arch/arm/mach-iop32x/irqs.h +++ b/arch/arm/mach-iop32x/irqs.h @@ -7,36 +7,40 @@ #ifndef __IOP32X_IRQS_H #define __IOP32X_IRQS_H +/* Interrupts in Linux start at 1, hardware starts at 0 */ + +#define IOP_IRQ(x) ((x) + 1) + /* * IOP80321 chipset interrupts */ -#define IRQ_IOP32X_DMA0_EOT 0 -#define IRQ_IOP32X_DMA0_EOC 1 -#define IRQ_IOP32X_DMA1_EOT 2 -#define IRQ_IOP32X_DMA1_EOC 3 -#define IRQ_IOP32X_AA_EOT 6 -#define IRQ_IOP32X_AA_EOC 7 -#define IRQ_IOP32X_CORE_PMON 8 -#define IRQ_IOP32X_TIMER0 9 -#define IRQ_IOP32X_TIMER1 10 -#define IRQ_IOP32X_I2C_0 11 -#define IRQ_IOP32X_I2C_1 12 -#define IRQ_IOP32X_MESSAGING 13 -#define IRQ_IOP32X_ATU_BIST 14 -#define IRQ_IOP32X_PERFMON 15 -#define IRQ_IOP32X_CORE_PMU 16 -#define IRQ_IOP32X_BIU_ERR 17 -#define IRQ_IOP32X_ATU_ERR 18 -#define IRQ_IOP32X_MCU_ERR 19 -#define IRQ_IOP32X_DMA0_ERR 20 -#define IRQ_IOP32X_DMA1_ERR 21 -#define IRQ_IOP32X_AA_ERR 23 -#define IRQ_IOP32X_MSG_ERR 24 -#define IRQ_IOP32X_SSP 25 -#define IRQ_IOP32X_XINT0 27 -#define IRQ_IOP32X_XINT1 28 -#define IRQ_IOP32X_XINT2 29 -#define IRQ_IOP32X_XINT3 30 -#define IRQ_IOP32X_HPI 31 +#define IRQ_IOP32X_DMA0_EOT IOP_IRQ(0) +#define IRQ_IOP32X_DMA0_EOC IOP_IRQ(1) +#define IRQ_IOP32X_DMA1_EOT IOP_IRQ(2) +#define IRQ_IOP32X_DMA1_EOC IOP_IRQ(3) +#define IRQ_IOP32X_AA_EOT IOP_IRQ(6) +#define IRQ_IOP32X_AA_EOC IOP_IRQ(7) +#define IRQ_IOP32X_CORE_PMON IOP_IRQ(8) +#define IRQ_IOP32X_TIMER0 IOP_IRQ(9) +#define IRQ_IOP32X_TIMER1 IOP_IRQ(10) +#define IRQ_IOP32X_I2C_0 IOP_IRQ(11) +#define IRQ_IOP32X_I2C_1 IOP_IRQ(12) +#define IRQ_IOP32X_MESSAGING IOP_IRQ(13) +#define IRQ_IOP32X_ATU_BIST IOP_IRQ(14) +#define IRQ_IOP32X_PERFMON IOP_IRQ(15) +#define IRQ_IOP32X_CORE_PMU IOP_IRQ(16) +#define IRQ_IOP32X_BIU_ERR IOP_IRQ(17) +#define IRQ_IOP32X_ATU_ERR IOP_IRQ(18) +#define IRQ_IOP32X_MCU_ERR IOP_IRQ(19) +#define IRQ_IOP32X_DMA0_ERR IOP_IRQ(20) +#define IRQ_IOP32X_DMA1_ERR IOP_IRQ(21) +#define IRQ_IOP32X_AA_ERR IOP_IRQ(23) +#define IRQ_IOP32X_MSG_ERR IOP_IRQ(24) +#define IRQ_IOP32X_SSP IOP_IRQ(25) +#define IRQ_IOP32X_XINT0 IOP_IRQ(27) +#define IRQ_IOP32X_XINT1 IOP_IRQ(28) +#define IRQ_IOP32X_XINT2 IOP_IRQ(29) +#define IRQ_IOP32X_XINT3 IOP_IRQ(30) +#define IRQ_IOP32X_HPI IOP_IRQ(31) #endif diff --git a/arch/arm/mach-mmp/sram.c b/arch/arm/mach-mmp/sram.c index 6794e2db1ad5f5ae1f0ea7026b73bf5fb7894b38..ecc46c31004f660961a450027a6d1bfd8e86a053 100644 --- a/arch/arm/mach-mmp/sram.c +++ b/arch/arm/mach-mmp/sram.c @@ -72,6 +72,8 @@ static int sram_probe(struct platform_device *pdev) if (!info) return -ENOMEM; + platform_set_drvdata(pdev, info); + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (res == NULL) { dev_err(&pdev->dev, "no memory resource defined\n"); @@ -107,8 +109,6 @@ static int sram_probe(struct platform_device *pdev) list_add(&info->node, &sram_bank_list); mutex_unlock(&sram_lock); - platform_set_drvdata(pdev, info); - dev_info(&pdev->dev, "initialized\n"); return 0; @@ -127,17 +127,19 @@ static int sram_remove(struct platform_device *pdev) struct sram_bank_info *info; info = platform_get_drvdata(pdev); - if (info == NULL) - return -ENODEV; - mutex_lock(&sram_lock); - list_del(&info->node); - mutex_unlock(&sram_lock); + if (info->sram_size) { + mutex_lock(&sram_lock); + list_del(&info->node); + mutex_unlock(&sram_lock); + + gen_pool_destroy(info->gpool); + iounmap(info->sram_virt); + kfree(info->pool_name); + } - gen_pool_destroy(info->gpool); - iounmap(info->sram_virt); - kfree(info->pool_name); kfree(info); + return 0; } diff --git a/arch/arm/mach-mstar/Kconfig b/arch/arm/mach-mstar/Kconfig index 576d1ab293c8734c587d2dd2d918c21358fbf874..30560fdf87ed224c29d106f930bc12571745098e 100644 --- a/arch/arm/mach-mstar/Kconfig +++ b/arch/arm/mach-mstar/Kconfig @@ -3,6 +3,7 @@ menuconfig ARCH_MSTARV7 depends on ARCH_MULTI_V7 select ARM_GIC select ARM_HEAVY_MB + select HAVE_ARM_ARCH_TIMER select MST_IRQ help Support for newer MStar/Sigmastar SoC families that are diff --git a/arch/arm/mach-s3c/mach-jive.c b/arch/arm/mach-s3c/mach-jive.c index 2a29c3eca559eb6bf1c2448d9a5cfd12d522e216..ae6a1c9ebf78cb01ee4faefa4298f0ed51dd6b78 100644 --- a/arch/arm/mach-s3c/mach-jive.c +++ b/arch/arm/mach-s3c/mach-jive.c @@ -236,11 +236,11 @@ static int __init jive_mtdset(char *options) unsigned long set; if (options == NULL || options[0] == '\0') - return 0; + return 1; if (kstrtoul(options, 10, &set)) { printk(KERN_ERR "failed to parse mtdset=%s\n", options); - return 0; + return 1; } switch (set) { @@ -255,7 +255,7 @@ static int __init jive_mtdset(char *options) "using default.", set); } - return 0; + return 1; } /* parse the mtdset= option given to the kernel command line */ diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index c9e32610f05ee79b5750bc6fd3407441aa424db9..aa4490ccb8f43a2ebf7be4ae79c5538a2326e2ce 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -213,12 +213,14 @@ early_param("ecc", early_ecc); static int __init early_cachepolicy(char *p) { pr_warn("cachepolicy kernel parameter not supported without cp15\n"); + return 0; } early_param("cachepolicy", early_cachepolicy); static int __init noalign_setup(char *__unused) { pr_warn("noalign kernel parameter not supported without cp15\n"); + return 1; } __setup("noalign", noalign_setup); diff --git a/arch/arm64/boot/dts/broadcom/northstar2/ns2-svk.dts b/arch/arm64/boot/dts/broadcom/northstar2/ns2-svk.dts index ec19fbf928a142db6d9e853fee7e98f1e31d66b2..12a4b1c03390c0e2be02d6b2f2f921a1f6dc3d05 100644 --- a/arch/arm64/boot/dts/broadcom/northstar2/ns2-svk.dts +++ b/arch/arm64/boot/dts/broadcom/northstar2/ns2-svk.dts @@ -111,8 +111,8 @@ slic@0 { compatible = "silabs,si3226x"; reg = <0>; spi-max-frequency = <5000000>; - spi-cpha = <1>; - spi-cpol = <1>; + spi-cpha; + spi-cpol; pl022,hierarchy = <0>; pl022,interface = <0>; pl022,slave-tx-disable = <0>; @@ -135,8 +135,8 @@ at25@0 { at25,byte-len = <0x8000>; at25,addr-mode = <2>; at25,page-size = <64>; - spi-cpha = <1>; - spi-cpol = <1>; + spi-cpha; + spi-cpol; pl022,hierarchy = <0>; pl022,interface = <0>; pl022,slave-tx-disable = <0>; diff --git a/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi b/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi index 2cfeaf3b0a87685cc270b22f8e6bf862721e529f..8c218689fef70e745061ea92047179cfb53613a3 100644 --- a/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi +++ b/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi @@ -687,7 +687,7 @@ sata_phy1: sata-phy@1 { }; }; - sata: ahci@663f2000 { + sata: sata@663f2000 { compatible = "brcm,iproc-ahci", "generic-ahci"; reg = <0x663f2000 0x1000>; dma-coherent; diff --git a/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi b/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi index 07c099b4ed5b565f3832c24d4240eef8a9414bf0..1e0c9415bfcd044507d9ced5e0ff4eeea2fea6f0 100644 --- a/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi +++ b/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi @@ -476,7 +476,7 @@ usbphy0: usbphy@0 { }; usb0: usb@ffb00000 { - compatible = "snps,dwc2"; + compatible = "intel,socfpga-agilex-hsotg", "snps,dwc2"; reg = <0xffb00000 0x40000>; interrupts = <0 93 4>; phys = <&usbphy0>; @@ -489,7 +489,7 @@ usb0: usb@ffb00000 { }; usb1: usb@ffb40000 { - compatible = "snps,dwc2"; + compatible = "intel,socfpga-agilex-hsotg", "snps,dwc2"; reg = <0xffb40000 0x40000>; interrupts = <0 94 4>; phys = <&usbphy0>; diff --git a/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts b/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts index 2e437f20da39b72d638e9da6d3a7eff9e6540dc3..00e5dbf4b82363094e30a387f082ae6b739c440f 100644 --- a/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts +++ b/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts @@ -18,6 +18,7 @@ / { aliases { spi0 = &spi0; + ethernet0 = ð0; ethernet1 = ð1; mmc0 = &sdhci0; mmc1 = &sdhci1; @@ -137,7 +138,9 @@ &pcie0 { /* * U-Boot port for Turris Mox has a bug which always expects that "ranges" DT property * contains exactly 2 ranges with 3 (child) address cells, 2 (parent) address cells and - * 2 size cells and also expects that the second range starts at 16 MB offset. If these + * 2 size cells and also expects that the second range starts at 16 MB offset. Also it + * expects that first range uses same address for PCI (child) and CPU (parent) cells (so + * no remapping) and that this address is the lowest from all specified ranges. If these * conditions are not met then U-Boot crashes during loading kernel DTB file. PCIe address * space is 128 MB long, so the best split between MEM and IO is to use fixed 16 MB window * for IO and the rest 112 MB (64+32+16) for MEM, despite that maximal IO size is just 64 kB. @@ -146,6 +149,9 @@ &pcie0 { * https://source.denx.de/u-boot/u-boot/-/commit/cb2ddb291ee6fcbddd6d8f4ff49089dfe580f5d7 * https://source.denx.de/u-boot/u-boot/-/commit/c64ac3b3185aeb3846297ad7391fc6df8ecd73bf * https://source.denx.de/u-boot/u-boot/-/commit/4a82fca8e330157081fc132a591ebd99ba02ee33 + * Bug related to requirement of same child and parent addresses for first range is fixed + * in U-Boot version 2022.04 by following commit: + * https://source.denx.de/u-boot/u-boot/-/commit/1fd54253bca7d43d046bba4853fe5fafd034bc17 */ #address-cells = <3>; #size-cells = <2>; diff --git a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi index 2a2015a153627c7e05b976597204b4e694a2fd77..0f4bcd15d8580a2ad0d42f0ca1d05c0a83cbf393 100644 --- a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi +++ b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi @@ -495,7 +495,7 @@ pcie0: pcie@d0070000 { * (totaling 127 MiB) for MEM. */ ranges = <0x82000000 0 0xe8000000 0 0xe8000000 0 0x07f00000 /* Port 0 MEM */ - 0x81000000 0 0xefff0000 0 0xefff0000 0 0x00010000>; /* Port 0 IO */ + 0x81000000 0 0x00000000 0 0xefff0000 0 0x00010000>; /* Port 0 IO */ interrupt-map-mask = <0 0 0 7>; interrupt-map = <0 0 0 1 &pcie_intc 0>, <0 0 0 2 &pcie_intc 1>, diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi index ea6e3a11e641b0f4df11600f6ea56ba5a9672995..9beb3c34fcdb5f8ed4eaf38ad92e4d680560f14b 100644 --- a/arch/arm64/boot/dts/qcom/sdm845.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi @@ -3406,10 +3406,10 @@ wcd9340: codec@1{ #clock-cells = <0>; clock-frequency = <9600000>; clock-output-names = "mclk"; - qcom,micbias1-millivolt = <1800>; - qcom,micbias2-millivolt = <1800>; - qcom,micbias3-millivolt = <1800>; - qcom,micbias4-millivolt = <1800>; + qcom,micbias1-microvolt = <1800000>; + qcom,micbias2-microvolt = <1800000>; + qcom,micbias3-microvolt = <1800000>; + qcom,micbias4-microvolt = <1800000>; #address-cells = <1>; #size-cells = <1>; diff --git a/arch/arm64/boot/dts/qcom/sm8150.dtsi b/arch/arm64/boot/dts/qcom/sm8150.dtsi index 1aec54590a11aba8170d7293680f374521a837b7..a8a47378ba689b09af24dc2b2e6722c769e63715 100644 --- a/arch/arm64/boot/dts/qcom/sm8150.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150.dtsi @@ -1114,9 +1114,9 @@ apps_rsc: rsc@18200000 { qcom,tcs-offset = <0xd00>; qcom,drv-id = <2>; qcom,tcs-config = , - , - , - ; + , + , + ; rpmhcc: clock-controller { compatible = "qcom,sm8150-rpmh-clk"; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-firefly.dts b/arch/arm64/boot/dts/rockchip/rk3399-firefly.dts index 6db18808b9c54be1f9c91398b7aa71545c8c446b..dc45ec372ada46bd4b97ba24456b4fe3dac93679 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-firefly.dts +++ b/arch/arm64/boot/dts/rockchip/rk3399-firefly.dts @@ -665,8 +665,8 @@ &sdio0 { sd-uhs-sdr104; /* Power supply */ - vqmmc-supply = &vcc1v8_s3; /* IO line */ - vmmc-supply = &vcc_sdio; /* card's power */ + vqmmc-supply = <&vcc1v8_s3>; /* IO line */ + vmmc-supply = <&vcc_sdio>; /* card's power */ #address-cells = <1>; #size-cells = <0>; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi index 765b24a2bcbf06d217a9400cca4e7df51c732088..fb0a13cad6c93d8d8dd1033e1efe5d6d0491bdc6 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi @@ -281,7 +281,7 @@ max98357a: max98357a { sound: sound { compatible = "rockchip,rk3399-gru-sound"; - rockchip,cpu = <&i2s0 &i2s2>; + rockchip,cpu = <&i2s0 &spdif>; }; }; @@ -432,10 +432,6 @@ &i2s0 { status = "okay"; }; -&i2s2 { - status = "okay"; -}; - &io_domains { status = "okay"; @@ -532,6 +528,17 @@ &sdmmc { vqmmc-supply = <&ppvar_sd_card_io>; }; +&spdif { + status = "okay"; + + /* + * SPDIF is routed internally to DP; we either don't use these pins, or + * mux them to something else. + */ + /delete-property/ pinctrl-0; + /delete-property/ pinctrl-names; +}; + &spi1 { status = "okay"; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi index 4660416c8f382aa97b485023e8682fb8725b00a3..544110aaffc569b183a61820a7fcd895f4a29a46 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi @@ -472,6 +472,12 @@ vcc5v0_host_en: vcc5v0-host-en { }; &sdhci { + /* + * Signal integrity isn't great at 200MHz but 100MHz has proven stable + * enough. + */ + max-frequency = <100000000>; + bus-width = <8>; mmc-hs400-1_8v; mmc-hs400-enhanced-strobe; diff --git a/arch/arm64/boot/dts/rockchip/rk3399.dtsi b/arch/arm64/boot/dts/rockchip/rk3399.dtsi index 4b6065dbba55eacb162b730895db83abdf8cbab0..52ba4d07e77123bb5fbcd257e32ed81ee10e0dbf 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3399.dtsi @@ -1770,10 +1770,10 @@ hdmi: hdmi@ff940000 { interrupts = ; clocks = <&cru PCLK_HDMI_CTRL>, <&cru SCLK_HDMI_SFR>, - <&cru PLL_VPLL>, + <&cru SCLK_HDMI_CEC>, <&cru PCLK_VIO_GRF>, - <&cru SCLK_HDMI_CEC>; - clock-names = "iahb", "isfr", "vpll", "grf", "cec"; + <&cru PLL_VPLL>; + clock-names = "iahb", "isfr", "cec", "grf", "vpll"; power-domains = <&power RK3399_PD_HDCP>; reg-io-width = <4>; rockchip,grf = <&grf>; diff --git a/arch/arm64/boot/dts/ti/k3-am65-main.dtsi b/arch/arm64/boot/dts/ti/k3-am65-main.dtsi index b9662205be9bf95b8c4efb160f457a169d926b21..d04189771c773d08c1ec7f6d4591af77f56fd6a0 100644 --- a/arch/arm64/boot/dts/ti/k3-am65-main.dtsi +++ b/arch/arm64/boot/dts/ti/k3-am65-main.dtsi @@ -35,7 +35,10 @@ gic500: interrupt-controller@1800000 { #interrupt-cells = <3>; interrupt-controller; reg = <0x00 0x01800000 0x00 0x10000>, /* GICD */ - <0x00 0x01880000 0x00 0x90000>; /* GICR */ + <0x00 0x01880000 0x00 0x90000>, /* GICR */ + <0x00 0x6f000000 0x00 0x2000>, /* GICC */ + <0x00 0x6f010000 0x00 0x1000>, /* GICH */ + <0x00 0x6f020000 0x00 0x2000>; /* GICV */ /* * vcpumntirq: * virtual CPU interface maintenance interrupt diff --git a/arch/arm64/boot/dts/ti/k3-am65.dtsi b/arch/arm64/boot/dts/ti/k3-am65.dtsi index d84c0bc05023373e7cbebdd41c2c21655be79bb0..c6a3fecc7518ef4f90afd09d9ff41a7af848a7cb 100644 --- a/arch/arm64/boot/dts/ti/k3-am65.dtsi +++ b/arch/arm64/boot/dts/ti/k3-am65.dtsi @@ -84,6 +84,7 @@ cbass_main: bus@100000 { <0x00 0x46000000 0x00 0x46000000 0x00 0x00200000>, <0x00 0x47000000 0x00 0x47000000 0x00 0x00068400>, <0x00 0x50000000 0x00 0x50000000 0x00 0x8000000>, + <0x00 0x6f000000 0x00 0x6f000000 0x00 0x00310000>, /* A53 PERIPHBASE */ <0x00 0x70000000 0x00 0x70000000 0x00 0x200000>, <0x05 0x00000000 0x05 0x00000000 0x01 0x0000000>, <0x07 0x00000000 0x07 0x00000000 0x01 0x0000000>; diff --git a/arch/arm64/boot/dts/ti/k3-j7200-main.dtsi b/arch/arm64/boot/dts/ti/k3-j7200-main.dtsi index 1ab9f9604af6c569788745ccd829df182407f2d5..bef47f96376d997b827a4234f2c2379f791bfe29 100644 --- a/arch/arm64/boot/dts/ti/k3-j7200-main.dtsi +++ b/arch/arm64/boot/dts/ti/k3-j7200-main.dtsi @@ -47,7 +47,10 @@ gic500: interrupt-controller@1800000 { #interrupt-cells = <3>; interrupt-controller; reg = <0x00 0x01800000 0x00 0x10000>, /* GICD */ - <0x00 0x01900000 0x00 0x100000>; /* GICR */ + <0x00 0x01900000 0x00 0x100000>, /* GICR */ + <0x00 0x6f000000 0x00 0x2000>, /* GICC */ + <0x00 0x6f010000 0x00 0x1000>, /* GICH */ + <0x00 0x6f020000 0x00 0x2000>; /* GICV */ /* vcpumntirq: virtual CPU interface maintenance interrupt */ interrupts = ; diff --git a/arch/arm64/boot/dts/ti/k3-j7200.dtsi b/arch/arm64/boot/dts/ti/k3-j7200.dtsi index 03a9623f0f9562e902184cc68490a27b175aac81..59f5113e657dd62c18ee13bc26709f47bbb23f4d 100644 --- a/arch/arm64/boot/dts/ti/k3-j7200.dtsi +++ b/arch/arm64/boot/dts/ti/k3-j7200.dtsi @@ -127,6 +127,7 @@ cbass_main: bus@100000 { <0x00 0x00a40000 0x00 0x00a40000 0x00 0x00000800>, /* timesync router */ <0x00 0x01000000 0x00 0x01000000 0x00 0x0d000000>, /* Most peripherals */ <0x00 0x30000000 0x00 0x30000000 0x00 0x0c400000>, /* MAIN NAVSS */ + <0x00 0x6f000000 0x00 0x6f000000 0x00 0x00310000>, /* A72 PERIPHBASE */ <0x00 0x70000000 0x00 0x70000000 0x00 0x00800000>, /* MSMC RAM */ <0x00 0x18000000 0x00 0x18000000 0x00 0x08000000>, /* PCIe1 DAT0 */ <0x41 0x00000000 0x41 0x00000000 0x01 0x00000000>, /* PCIe1 DAT1 */ diff --git a/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi b/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi index 85526f72b4616c888fc953bc21d4b7f25dc0c48d..0350ddfe2c72384313539d98a23ab50c33dbe4e1 100644 --- a/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi +++ b/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi @@ -108,7 +108,10 @@ gic500: interrupt-controller@1800000 { #interrupt-cells = <3>; interrupt-controller; reg = <0x00 0x01800000 0x00 0x10000>, /* GICD */ - <0x00 0x01900000 0x00 0x100000>; /* GICR */ + <0x00 0x01900000 0x00 0x100000>, /* GICR */ + <0x00 0x6f000000 0x00 0x2000>, /* GICC */ + <0x00 0x6f010000 0x00 0x1000>, /* GICH */ + <0x00 0x6f020000 0x00 0x2000>; /* GICV */ /* vcpumntirq: virtual CPU interface maintenance interrupt */ interrupts = ; diff --git a/arch/arm64/boot/dts/ti/k3-j721e.dtsi b/arch/arm64/boot/dts/ti/k3-j721e.dtsi index a199227327ed299907f84fb925d9ca52832d7724..ba4fe3f9831586921472504bc00cb3148747c168 100644 --- a/arch/arm64/boot/dts/ti/k3-j721e.dtsi +++ b/arch/arm64/boot/dts/ti/k3-j721e.dtsi @@ -136,6 +136,7 @@ cbass_main: bus@100000 { <0x00 0x0e000000 0x00 0x0e000000 0x00 0x01800000>, /* PCIe Core*/ <0x00 0x10000000 0x00 0x10000000 0x00 0x10000000>, /* PCIe DAT */ <0x00 0x64800000 0x00 0x64800000 0x00 0x00800000>, /* C71 */ + <0x00 0x6f000000 0x00 0x6f000000 0x00 0x00310000>, /* A72 PERIPHBASE */ <0x44 0x00000000 0x44 0x00000000 0x00 0x08000000>, /* PCIe2 DAT */ <0x44 0x10000000 0x44 0x10000000 0x00 0x08000000>, /* PCIe3 DAT */ <0x4d 0x80800000 0x4d 0x80800000 0x00 0x00800000>, /* C66_0 */ diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig index 5e7d86cf5dfa4886dd24db6e03b60a8314214ad6..d025bafcce433cfeee2c7b374915a41de900969d 100644 --- a/arch/arm64/configs/defconfig +++ b/arch/arm64/configs/defconfig @@ -837,7 +837,7 @@ CONFIG_DMADEVICES=y CONFIG_DMA_BCM2835=y CONFIG_DMA_SUN6I=m CONFIG_FSL_EDMA=y -CONFIG_IMX_SDMA=y +CONFIG_IMX_SDMA=m CONFIG_K3_DMA=y CONFIG_MV_XOR=y CONFIG_MV_XOR_V2=y diff --git a/arch/arm64/crypto/Kconfig b/arch/arm64/crypto/Kconfig index 764b64837b34057621fc77c293969132df77b6ae..124da08a10988b797095dd4040b4999bfc622d34 100644 --- a/arch/arm64/crypto/Kconfig +++ b/arch/arm64/crypto/Kconfig @@ -45,13 +45,25 @@ config CRYPTO_SM3_ARM64_CE tristate "SM3 digest algorithm (ARMv8.2 Crypto Extensions)" depends on KERNEL_MODE_NEON select CRYPTO_HASH - select CRYPTO_LIB_SM3 + select CRYPTO_SM3 config CRYPTO_SM4_ARM64_CE tristate "SM4 symmetric cipher (ARMv8.2 Crypto Extensions)" depends on KERNEL_MODE_NEON select CRYPTO_ALGAPI - select CRYPTO_LIB_SM4 + select CRYPTO_SM4 + +config CRYPTO_SM4_ARM64_CE_BLK + tristate "SM4 in ECB/CBC/CFB/CTR modes using ARMv8 Crypto Extensions" + depends on KERNEL_MODE_NEON + select CRYPTO_SKCIPHER + select CRYPTO_SM4 + +config CRYPTO_SM4_ARM64_NEON_BLK + tristate "SM4 in ECB/CBC/CFB/CTR modes using NEON instructions" + depends on KERNEL_MODE_NEON + select CRYPTO_SKCIPHER + select CRYPTO_SM4 config CRYPTO_GHASH_ARM64_CE tristate "GHASH/AES-GCM using ARMv8 Crypto Extensions" diff --git a/arch/arm64/crypto/Makefile b/arch/arm64/crypto/Makefile index a5d4b672b6e1079a23d46bbf8ab3fcfe0761d313..5b2bb7e92bad91c507a580151a2b8e047a627193 100644 --- a/arch/arm64/crypto/Makefile +++ b/arch/arm64/crypto/Makefile @@ -20,9 +20,15 @@ sha3-ce-y := sha3-ce-glue.o sha3-ce-core.o obj-$(CONFIG_CRYPTO_SM3_ARM64_CE) += sm3-ce.o sm3-ce-y := sm3-ce-glue.o sm3-ce-core.o -obj-$(CONFIG_CRYPTO_SM4_ARM64_CE) += sm4-ce.o +obj-$(CONFIG_CRYPTO_SM4_ARM64_CE) += sm4-ce-cipher.o +sm4-ce-cipher-y := sm4-ce-cipher-glue.o sm4-ce-cipher-core.o + +obj-$(CONFIG_CRYPTO_SM4_ARM64_CE_BLK) += sm4-ce.o sm4-ce-y := sm4-ce-glue.o sm4-ce-core.o +obj-$(CONFIG_CRYPTO_SM4_ARM64_NEON_BLK) += sm4-neon.o +sm4-neon-y := sm4-neon-glue.o sm4-neon-core.o + obj-$(CONFIG_CRYPTO_GHASH_ARM64_CE) += ghash-ce.o ghash-ce-y := ghash-ce-glue.o ghash-ce-core.o diff --git a/arch/arm64/crypto/sm4-ce-cipher-core.S b/arch/arm64/crypto/sm4-ce-cipher-core.S new file mode 100644 index 0000000000000000000000000000000000000000..4ac6cfbc57970e2449067144aaa4e27c6d4373d4 --- /dev/null +++ b/arch/arm64/crypto/sm4-ce-cipher-core.S @@ -0,0 +1,36 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include + + .irp b, 0, 1, 2, 3, 4, 5, 6, 7, 8 + .set .Lv\b\().4s, \b + .endr + + .macro sm4e, rd, rn + .inst 0xcec08400 | .L\rd | (.L\rn << 5) + .endm + + /* + * void sm4_ce_do_crypt(const u32 *rk, u32 *out, const u32 *in); + */ + .text +SYM_FUNC_START(sm4_ce_do_crypt) + ld1 {v8.4s}, [x2] + ld1 {v0.4s-v3.4s}, [x0], #64 +CPU_LE( rev32 v8.16b, v8.16b ) + ld1 {v4.4s-v7.4s}, [x0] + sm4e v8.4s, v0.4s + sm4e v8.4s, v1.4s + sm4e v8.4s, v2.4s + sm4e v8.4s, v3.4s + sm4e v8.4s, v4.4s + sm4e v8.4s, v5.4s + sm4e v8.4s, v6.4s + sm4e v8.4s, v7.4s + rev64 v8.4s, v8.4s + ext v8.16b, v8.16b, v8.16b, #8 +CPU_LE( rev32 v8.16b, v8.16b ) + st1 {v8.4s}, [x1] + ret +SYM_FUNC_END(sm4_ce_do_crypt) diff --git a/arch/arm64/crypto/sm4-ce-cipher-glue.c b/arch/arm64/crypto/sm4-ce-cipher-glue.c new file mode 100644 index 0000000000000000000000000000000000000000..76a34ef4abbbf198d1fe62e898c37da4c4d3e365 --- /dev/null +++ b/arch/arm64/crypto/sm4-ce-cipher-glue.c @@ -0,0 +1,82 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include +#include +#include +#include +#include +#include + +MODULE_ALIAS_CRYPTO("sm4"); +MODULE_ALIAS_CRYPTO("sm4-ce"); +MODULE_DESCRIPTION("SM4 symmetric cipher using ARMv8 Crypto Extensions"); +MODULE_AUTHOR("Ard Biesheuvel "); +MODULE_LICENSE("GPL v2"); + +asmlinkage void sm4_ce_do_crypt(const u32 *rk, void *out, const void *in); + +static int sm4_ce_setkey(struct crypto_tfm *tfm, const u8 *key, + unsigned int key_len) +{ + struct sm4_ctx *ctx = crypto_tfm_ctx(tfm); + + return sm4_expandkey(ctx, key, key_len); +} + +static void sm4_ce_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) +{ + const struct sm4_ctx *ctx = crypto_tfm_ctx(tfm); + + if (!crypto_simd_usable()) { + sm4_crypt_block(ctx->rkey_enc, out, in); + } else { + kernel_neon_begin(); + sm4_ce_do_crypt(ctx->rkey_enc, out, in); + kernel_neon_end(); + } +} + +static void sm4_ce_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) +{ + const struct sm4_ctx *ctx = crypto_tfm_ctx(tfm); + + if (!crypto_simd_usable()) { + sm4_crypt_block(ctx->rkey_dec, out, in); + } else { + kernel_neon_begin(); + sm4_ce_do_crypt(ctx->rkey_dec, out, in); + kernel_neon_end(); + } +} + +static struct crypto_alg sm4_ce_alg = { + .cra_name = "sm4", + .cra_driver_name = "sm4-ce", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_CIPHER, + .cra_blocksize = SM4_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct sm4_ctx), + .cra_module = THIS_MODULE, + .cra_u.cipher = { + .cia_min_keysize = SM4_KEY_SIZE, + .cia_max_keysize = SM4_KEY_SIZE, + .cia_setkey = sm4_ce_setkey, + .cia_encrypt = sm4_ce_encrypt, + .cia_decrypt = sm4_ce_decrypt + } +}; + +static int __init sm4_ce_mod_init(void) +{ + return crypto_register_alg(&sm4_ce_alg); +} + +static void __exit sm4_ce_mod_fini(void) +{ + crypto_unregister_alg(&sm4_ce_alg); +} + +module_cpu_feature_match(SM4, sm4_ce_mod_init); +module_exit(sm4_ce_mod_fini); diff --git a/arch/arm64/crypto/sm4-ce-core.S b/arch/arm64/crypto/sm4-ce-core.S index 4ac6cfbc57970e2449067144aaa4e27c6d4373d4..934e0f093279968362d7ddc2e4c1e7774c34077e 100644 --- a/arch/arm64/crypto/sm4-ce-core.S +++ b/arch/arm64/crypto/sm4-ce-core.S @@ -1,36 +1,660 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * SM4 Cipher Algorithm for ARMv8 with Crypto Extensions + * as specified in + * https://tools.ietf.org/id/draft-ribose-cfrg-sm4-10.html + * + * Copyright (C) 2022, Alibaba Group. + * Copyright (C) 2022 Tianjia Zhang + */ #include #include - .irp b, 0, 1, 2, 3, 4, 5, 6, 7, 8 - .set .Lv\b\().4s, \b - .endr - - .macro sm4e, rd, rn - .inst 0xcec08400 | .L\rd | (.L\rn << 5) - .endm - - /* - * void sm4_ce_do_crypt(const u32 *rk, u32 *out, const u32 *in); - */ - .text -SYM_FUNC_START(sm4_ce_do_crypt) - ld1 {v8.4s}, [x2] - ld1 {v0.4s-v3.4s}, [x0], #64 -CPU_LE( rev32 v8.16b, v8.16b ) - ld1 {v4.4s-v7.4s}, [x0] - sm4e v8.4s, v0.4s - sm4e v8.4s, v1.4s - sm4e v8.4s, v2.4s - sm4e v8.4s, v3.4s - sm4e v8.4s, v4.4s - sm4e v8.4s, v5.4s - sm4e v8.4s, v6.4s - sm4e v8.4s, v7.4s - rev64 v8.4s, v8.4s - ext v8.16b, v8.16b, v8.16b, #8 -CPU_LE( rev32 v8.16b, v8.16b ) - st1 {v8.4s}, [x1] - ret -SYM_FUNC_END(sm4_ce_do_crypt) +.arch armv8-a+crypto + +.irp b, 0, 1, 2, 3, 4, 5, 6, 7, 16, 20, 24, 25, 26, 27, 28, 29, 30, 31 + .set .Lv\b\().4s, \b +.endr + +.macro sm4e, vd, vn + .inst 0xcec08400 | (.L\vn << 5) | .L\vd +.endm + +.macro sm4ekey, vd, vn, vm + .inst 0xce60c800 | (.L\vm << 16) | (.L\vn << 5) | .L\vd +.endm + +/* Register macros */ + +#define RTMP0 v16 +#define RTMP1 v17 +#define RTMP2 v18 +#define RTMP3 v19 + +#define RIV v20 + +/* Helper macros. */ + +#define PREPARE \ + ld1 {v24.16b-v27.16b}, [x0], #64; \ + ld1 {v28.16b-v31.16b}, [x0]; + +#define SM4_CRYPT_BLK(b0) \ + rev32 b0.16b, b0.16b; \ + sm4e b0.4s, v24.4s; \ + sm4e b0.4s, v25.4s; \ + sm4e b0.4s, v26.4s; \ + sm4e b0.4s, v27.4s; \ + sm4e b0.4s, v28.4s; \ + sm4e b0.4s, v29.4s; \ + sm4e b0.4s, v30.4s; \ + sm4e b0.4s, v31.4s; \ + rev64 b0.4s, b0.4s; \ + ext b0.16b, b0.16b, b0.16b, #8; \ + rev32 b0.16b, b0.16b; + +#define SM4_CRYPT_BLK4(b0, b1, b2, b3) \ + rev32 b0.16b, b0.16b; \ + rev32 b1.16b, b1.16b; \ + rev32 b2.16b, b2.16b; \ + rev32 b3.16b, b3.16b; \ + sm4e b0.4s, v24.4s; \ + sm4e b1.4s, v24.4s; \ + sm4e b2.4s, v24.4s; \ + sm4e b3.4s, v24.4s; \ + sm4e b0.4s, v25.4s; \ + sm4e b1.4s, v25.4s; \ + sm4e b2.4s, v25.4s; \ + sm4e b3.4s, v25.4s; \ + sm4e b0.4s, v26.4s; \ + sm4e b1.4s, v26.4s; \ + sm4e b2.4s, v26.4s; \ + sm4e b3.4s, v26.4s; \ + sm4e b0.4s, v27.4s; \ + sm4e b1.4s, v27.4s; \ + sm4e b2.4s, v27.4s; \ + sm4e b3.4s, v27.4s; \ + sm4e b0.4s, v28.4s; \ + sm4e b1.4s, v28.4s; \ + sm4e b2.4s, v28.4s; \ + sm4e b3.4s, v28.4s; \ + sm4e b0.4s, v29.4s; \ + sm4e b1.4s, v29.4s; \ + sm4e b2.4s, v29.4s; \ + sm4e b3.4s, v29.4s; \ + sm4e b0.4s, v30.4s; \ + sm4e b1.4s, v30.4s; \ + sm4e b2.4s, v30.4s; \ + sm4e b3.4s, v30.4s; \ + sm4e b0.4s, v31.4s; \ + sm4e b1.4s, v31.4s; \ + sm4e b2.4s, v31.4s; \ + sm4e b3.4s, v31.4s; \ + rev64 b0.4s, b0.4s; \ + rev64 b1.4s, b1.4s; \ + rev64 b2.4s, b2.4s; \ + rev64 b3.4s, b3.4s; \ + ext b0.16b, b0.16b, b0.16b, #8; \ + ext b1.16b, b1.16b, b1.16b, #8; \ + ext b2.16b, b2.16b, b2.16b, #8; \ + ext b3.16b, b3.16b, b3.16b, #8; \ + rev32 b0.16b, b0.16b; \ + rev32 b1.16b, b1.16b; \ + rev32 b2.16b, b2.16b; \ + rev32 b3.16b, b3.16b; + +#define SM4_CRYPT_BLK8(b0, b1, b2, b3, b4, b5, b6, b7) \ + rev32 b0.16b, b0.16b; \ + rev32 b1.16b, b1.16b; \ + rev32 b2.16b, b2.16b; \ + rev32 b3.16b, b3.16b; \ + rev32 b4.16b, b4.16b; \ + rev32 b5.16b, b5.16b; \ + rev32 b6.16b, b6.16b; \ + rev32 b7.16b, b7.16b; \ + sm4e b0.4s, v24.4s; \ + sm4e b1.4s, v24.4s; \ + sm4e b2.4s, v24.4s; \ + sm4e b3.4s, v24.4s; \ + sm4e b4.4s, v24.4s; \ + sm4e b5.4s, v24.4s; \ + sm4e b6.4s, v24.4s; \ + sm4e b7.4s, v24.4s; \ + sm4e b0.4s, v25.4s; \ + sm4e b1.4s, v25.4s; \ + sm4e b2.4s, v25.4s; \ + sm4e b3.4s, v25.4s; \ + sm4e b4.4s, v25.4s; \ + sm4e b5.4s, v25.4s; \ + sm4e b6.4s, v25.4s; \ + sm4e b7.4s, v25.4s; \ + sm4e b0.4s, v26.4s; \ + sm4e b1.4s, v26.4s; \ + sm4e b2.4s, v26.4s; \ + sm4e b3.4s, v26.4s; \ + sm4e b4.4s, v26.4s; \ + sm4e b5.4s, v26.4s; \ + sm4e b6.4s, v26.4s; \ + sm4e b7.4s, v26.4s; \ + sm4e b0.4s, v27.4s; \ + sm4e b1.4s, v27.4s; \ + sm4e b2.4s, v27.4s; \ + sm4e b3.4s, v27.4s; \ + sm4e b4.4s, v27.4s; \ + sm4e b5.4s, v27.4s; \ + sm4e b6.4s, v27.4s; \ + sm4e b7.4s, v27.4s; \ + sm4e b0.4s, v28.4s; \ + sm4e b1.4s, v28.4s; \ + sm4e b2.4s, v28.4s; \ + sm4e b3.4s, v28.4s; \ + sm4e b4.4s, v28.4s; \ + sm4e b5.4s, v28.4s; \ + sm4e b6.4s, v28.4s; \ + sm4e b7.4s, v28.4s; \ + sm4e b0.4s, v29.4s; \ + sm4e b1.4s, v29.4s; \ + sm4e b2.4s, v29.4s; \ + sm4e b3.4s, v29.4s; \ + sm4e b4.4s, v29.4s; \ + sm4e b5.4s, v29.4s; \ + sm4e b6.4s, v29.4s; \ + sm4e b7.4s, v29.4s; \ + sm4e b0.4s, v30.4s; \ + sm4e b1.4s, v30.4s; \ + sm4e b2.4s, v30.4s; \ + sm4e b3.4s, v30.4s; \ + sm4e b4.4s, v30.4s; \ + sm4e b5.4s, v30.4s; \ + sm4e b6.4s, v30.4s; \ + sm4e b7.4s, v30.4s; \ + sm4e b0.4s, v31.4s; \ + sm4e b1.4s, v31.4s; \ + sm4e b2.4s, v31.4s; \ + sm4e b3.4s, v31.4s; \ + sm4e b4.4s, v31.4s; \ + sm4e b5.4s, v31.4s; \ + sm4e b6.4s, v31.4s; \ + sm4e b7.4s, v31.4s; \ + rev64 b0.4s, b0.4s; \ + rev64 b1.4s, b1.4s; \ + rev64 b2.4s, b2.4s; \ + rev64 b3.4s, b3.4s; \ + rev64 b4.4s, b4.4s; \ + rev64 b5.4s, b5.4s; \ + rev64 b6.4s, b6.4s; \ + rev64 b7.4s, b7.4s; \ + ext b0.16b, b0.16b, b0.16b, #8; \ + ext b1.16b, b1.16b, b1.16b, #8; \ + ext b2.16b, b2.16b, b2.16b, #8; \ + ext b3.16b, b3.16b, b3.16b, #8; \ + ext b4.16b, b4.16b, b4.16b, #8; \ + ext b5.16b, b5.16b, b5.16b, #8; \ + ext b6.16b, b6.16b, b6.16b, #8; \ + ext b7.16b, b7.16b, b7.16b, #8; \ + rev32 b0.16b, b0.16b; \ + rev32 b1.16b, b1.16b; \ + rev32 b2.16b, b2.16b; \ + rev32 b3.16b, b3.16b; \ + rev32 b4.16b, b4.16b; \ + rev32 b5.16b, b5.16b; \ + rev32 b6.16b, b6.16b; \ + rev32 b7.16b, b7.16b; + + +.align 3 +SYM_FUNC_START(sm4_ce_expand_key) + /* input: + * x0: 128-bit key + * x1: rkey_enc + * x2: rkey_dec + * x3: fk array + * x4: ck array + */ + ld1 {v0.16b}, [x0]; + rev32 v0.16b, v0.16b; + ld1 {v1.16b}, [x3]; + /* load ck */ + ld1 {v24.16b-v27.16b}, [x4], #64; + ld1 {v28.16b-v31.16b}, [x4]; + + /* input ^ fk */ + eor v0.16b, v0.16b, v1.16b; + + sm4ekey v0.4s, v0.4s, v24.4s; + sm4ekey v1.4s, v0.4s, v25.4s; + sm4ekey v2.4s, v1.4s, v26.4s; + sm4ekey v3.4s, v2.4s, v27.4s; + sm4ekey v4.4s, v3.4s, v28.4s; + sm4ekey v5.4s, v4.4s, v29.4s; + sm4ekey v6.4s, v5.4s, v30.4s; + sm4ekey v7.4s, v6.4s, v31.4s; + + st1 {v0.16b-v3.16b}, [x1], #64; + st1 {v4.16b-v7.16b}, [x1]; + rev64 v7.4s, v7.4s; + rev64 v6.4s, v6.4s; + rev64 v5.4s, v5.4s; + rev64 v4.4s, v4.4s; + rev64 v3.4s, v3.4s; + rev64 v2.4s, v2.4s; + rev64 v1.4s, v1.4s; + rev64 v0.4s, v0.4s; + ext v7.16b, v7.16b, v7.16b, #8; + ext v6.16b, v6.16b, v6.16b, #8; + ext v5.16b, v5.16b, v5.16b, #8; + ext v4.16b, v4.16b, v4.16b, #8; + ext v3.16b, v3.16b, v3.16b, #8; + ext v2.16b, v2.16b, v2.16b, #8; + ext v1.16b, v1.16b, v1.16b, #8; + ext v0.16b, v0.16b, v0.16b, #8; + st1 {v7.16b}, [x2], #16; + st1 {v6.16b}, [x2], #16; + st1 {v5.16b}, [x2], #16; + st1 {v4.16b}, [x2], #16; + st1 {v3.16b}, [x2], #16; + st1 {v2.16b}, [x2], #16; + st1 {v1.16b}, [x2], #16; + st1 {v0.16b}, [x2]; + + ret; +SYM_FUNC_END(sm4_ce_expand_key) + +.align 3 +SYM_FUNC_START(sm4_ce_crypt_block) + /* input: + * x0: round key array, CTX + * x1: dst + * x2: src + */ + PREPARE; + + ld1 {v0.16b}, [x2]; + SM4_CRYPT_BLK(v0); + st1 {v0.16b}, [x1]; + + ret; +SYM_FUNC_END(sm4_ce_crypt_block) + +.align 3 +SYM_FUNC_START(sm4_ce_crypt) + /* input: + * x0: round key array, CTX + * x1: dst + * x2: src + * w3: nblocks + */ + PREPARE; + +.Lcrypt_loop_blk: + sub w3, w3, #8; + tbnz w3, #31, .Lcrypt_tail8; + + ld1 {v0.16b-v3.16b}, [x2], #64; + ld1 {v4.16b-v7.16b}, [x2], #64; + + SM4_CRYPT_BLK8(v0, v1, v2, v3, v4, v5, v6, v7); + + st1 {v0.16b-v3.16b}, [x1], #64; + st1 {v4.16b-v7.16b}, [x1], #64; + + cbz w3, .Lcrypt_end; + b .Lcrypt_loop_blk; + +.Lcrypt_tail8: + add w3, w3, #8; + cmp w3, #4; + blt .Lcrypt_tail4; + + sub w3, w3, #4; + + ld1 {v0.16b-v3.16b}, [x2], #64; + SM4_CRYPT_BLK4(v0, v1, v2, v3); + st1 {v0.16b-v3.16b}, [x1], #64; + + cbz w3, .Lcrypt_end; + +.Lcrypt_tail4: + sub w3, w3, #1; + + ld1 {v0.16b}, [x2], #16; + SM4_CRYPT_BLK(v0); + st1 {v0.16b}, [x1], #16; + + cbnz w3, .Lcrypt_tail4; + +.Lcrypt_end: + ret; +SYM_FUNC_END(sm4_ce_crypt) + +.align 3 +SYM_FUNC_START(sm4_ce_cbc_enc) + /* input: + * x0: round key array, CTX + * x1: dst + * x2: src + * x3: iv (big endian, 128 bit) + * w4: nblocks + */ + PREPARE; + + ld1 {RIV.16b}, [x3]; + +.Lcbc_enc_loop: + sub w4, w4, #1; + + ld1 {RTMP0.16b}, [x2], #16; + eor RIV.16b, RIV.16b, RTMP0.16b; + + SM4_CRYPT_BLK(RIV); + + st1 {RIV.16b}, [x1], #16; + + cbnz w4, .Lcbc_enc_loop; + + /* store new IV */ + st1 {RIV.16b}, [x3]; + + ret; +SYM_FUNC_END(sm4_ce_cbc_enc) + +.align 3 +SYM_FUNC_START(sm4_ce_cbc_dec) + /* input: + * x0: round key array, CTX + * x1: dst + * x2: src + * x3: iv (big endian, 128 bit) + * w4: nblocks + */ + PREPARE; + + ld1 {RIV.16b}, [x3]; + +.Lcbc_loop_blk: + sub w4, w4, #8; + tbnz w4, #31, .Lcbc_tail8; + + ld1 {v0.16b-v3.16b}, [x2], #64; + ld1 {v4.16b-v7.16b}, [x2]; + + SM4_CRYPT_BLK8(v0, v1, v2, v3, v4, v5, v6, v7); + + sub x2, x2, #64; + eor v0.16b, v0.16b, RIV.16b; + ld1 {RTMP0.16b-RTMP3.16b}, [x2], #64; + eor v1.16b, v1.16b, RTMP0.16b; + eor v2.16b, v2.16b, RTMP1.16b; + eor v3.16b, v3.16b, RTMP2.16b; + st1 {v0.16b-v3.16b}, [x1], #64; + + eor v4.16b, v4.16b, RTMP3.16b; + ld1 {RTMP0.16b-RTMP3.16b}, [x2], #64; + eor v5.16b, v5.16b, RTMP0.16b; + eor v6.16b, v6.16b, RTMP1.16b; + eor v7.16b, v7.16b, RTMP2.16b; + + mov RIV.16b, RTMP3.16b; + st1 {v4.16b-v7.16b}, [x1], #64; + + cbz w4, .Lcbc_end; + b .Lcbc_loop_blk; + +.Lcbc_tail8: + add w4, w4, #8; + cmp w4, #4; + blt .Lcbc_tail4; + + sub w4, w4, #4; + + ld1 {v0.16b-v3.16b}, [x2]; + + SM4_CRYPT_BLK4(v0, v1, v2, v3); + + eor v0.16b, v0.16b, RIV.16b; + ld1 {RTMP0.16b-RTMP3.16b}, [x2], #64; + eor v1.16b, v1.16b, RTMP0.16b; + eor v2.16b, v2.16b, RTMP1.16b; + eor v3.16b, v3.16b, RTMP2.16b; + + mov RIV.16b, RTMP3.16b; + st1 {v0.16b-v3.16b}, [x1], #64; + + cbz w4, .Lcbc_end; + +.Lcbc_tail4: + sub w4, w4, #1; + + ld1 {v0.16b}, [x2]; + + SM4_CRYPT_BLK(v0); + + eor v0.16b, v0.16b, RIV.16b; + ld1 {RIV.16b}, [x2], #16; + st1 {v0.16b}, [x1], #16; + + cbnz w4, .Lcbc_tail4; + +.Lcbc_end: + /* store new IV */ + st1 {RIV.16b}, [x3]; + + ret; +SYM_FUNC_END(sm4_ce_cbc_dec) + +.align 3 +SYM_FUNC_START(sm4_ce_cfb_enc) + /* input: + * x0: round key array, CTX + * x1: dst + * x2: src + * x3: iv (big endian, 128 bit) + * w4: nblocks + */ + PREPARE; + + ld1 {RIV.16b}, [x3]; + +.Lcfb_enc_loop: + sub w4, w4, #1; + + SM4_CRYPT_BLK(RIV); + + ld1 {RTMP0.16b}, [x2], #16; + eor RIV.16b, RIV.16b, RTMP0.16b; + st1 {RIV.16b}, [x1], #16; + + cbnz w4, .Lcfb_enc_loop; + + /* store new IV */ + st1 {RIV.16b}, [x3]; + + ret; +SYM_FUNC_END(sm4_ce_cfb_enc) + +.align 3 +SYM_FUNC_START(sm4_ce_cfb_dec) + /* input: + * x0: round key array, CTX + * x1: dst + * x2: src + * x3: iv (big endian, 128 bit) + * w4: nblocks + */ + PREPARE; + + ld1 {v0.16b}, [x3]; + +.Lcfb_loop_blk: + sub w4, w4, #8; + tbnz w4, #31, .Lcfb_tail8; + + ld1 {v1.16b, v2.16b, v3.16b}, [x2], #48; + ld1 {v4.16b-v7.16b}, [x2]; + + SM4_CRYPT_BLK8(v0, v1, v2, v3, v4, v5, v6, v7); + + sub x2, x2, #48; + ld1 {RTMP0.16b-RTMP3.16b}, [x2], #64; + eor v0.16b, v0.16b, RTMP0.16b; + eor v1.16b, v1.16b, RTMP1.16b; + eor v2.16b, v2.16b, RTMP2.16b; + eor v3.16b, v3.16b, RTMP3.16b; + st1 {v0.16b-v3.16b}, [x1], #64; + + ld1 {RTMP0.16b-RTMP3.16b}, [x2], #64; + eor v4.16b, v4.16b, RTMP0.16b; + eor v5.16b, v5.16b, RTMP1.16b; + eor v6.16b, v6.16b, RTMP2.16b; + eor v7.16b, v7.16b, RTMP3.16b; + st1 {v4.16b-v7.16b}, [x1], #64; + + mov v0.16b, RTMP3.16b; + + cbz w4, .Lcfb_end; + b .Lcfb_loop_blk; + +.Lcfb_tail8: + add w4, w4, #8; + cmp w4, #4; + blt .Lcfb_tail4; + + sub w4, w4, #4; + + ld1 {v1.16b, v2.16b, v3.16b}, [x2]; + + SM4_CRYPT_BLK4(v0, v1, v2, v3); + + ld1 {RTMP0.16b-RTMP3.16b}, [x2], #64; + eor v0.16b, v0.16b, RTMP0.16b; + eor v1.16b, v1.16b, RTMP1.16b; + eor v2.16b, v2.16b, RTMP2.16b; + eor v3.16b, v3.16b, RTMP3.16b; + st1 {v0.16b-v3.16b}, [x1], #64; + + mov v0.16b, RTMP3.16b; + + cbz w4, .Lcfb_end; + +.Lcfb_tail4: + sub w4, w4, #1; + + SM4_CRYPT_BLK(v0); + + ld1 {RTMP0.16b}, [x2], #16; + eor v0.16b, v0.16b, RTMP0.16b; + st1 {v0.16b}, [x1], #16; + + mov v0.16b, RTMP0.16b; + + cbnz w4, .Lcfb_tail4; + +.Lcfb_end: + /* store new IV */ + st1 {v0.16b}, [x3]; + + ret; +SYM_FUNC_END(sm4_ce_cfb_dec) + +.align 3 +SYM_FUNC_START(sm4_ce_ctr_enc) + /* input: + * x0: round key array, CTX + * x1: dst + * x2: src + * x3: ctr (big endian, 128 bit) + * w4: nblocks + */ + PREPARE; + + ldp x7, x8, [x3]; + rev x7, x7; + rev x8, x8; + +.Lctr_loop_blk: + sub w4, w4, #8; + tbnz w4, #31, .Lctr_tail8; + +#define inc_le128(vctr) \ + mov vctr.d[1], x8; \ + mov vctr.d[0], x7; \ + adds x8, x8, #1; \ + adc x7, x7, xzr; \ + rev64 vctr.16b, vctr.16b; + + /* construct CTRs */ + inc_le128(v0); /* +0 */ + inc_le128(v1); /* +1 */ + inc_le128(v2); /* +2 */ + inc_le128(v3); /* +3 */ + inc_le128(v4); /* +4 */ + inc_le128(v5); /* +5 */ + inc_le128(v6); /* +6 */ + inc_le128(v7); /* +7 */ + + SM4_CRYPT_BLK8(v0, v1, v2, v3, v4, v5, v6, v7); + + ld1 {RTMP0.16b-RTMP3.16b}, [x2], #64; + eor v0.16b, v0.16b, RTMP0.16b; + eor v1.16b, v1.16b, RTMP1.16b; + eor v2.16b, v2.16b, RTMP2.16b; + eor v3.16b, v3.16b, RTMP3.16b; + st1 {v0.16b-v3.16b}, [x1], #64; + + ld1 {RTMP0.16b-RTMP3.16b}, [x2], #64; + eor v4.16b, v4.16b, RTMP0.16b; + eor v5.16b, v5.16b, RTMP1.16b; + eor v6.16b, v6.16b, RTMP2.16b; + eor v7.16b, v7.16b, RTMP3.16b; + st1 {v4.16b-v7.16b}, [x1], #64; + + cbz w4, .Lctr_end; + b .Lctr_loop_blk; + +.Lctr_tail8: + add w4, w4, #8; + cmp w4, #4; + blt .Lctr_tail4; + + sub w4, w4, #4; + + /* construct CTRs */ + inc_le128(v0); /* +0 */ + inc_le128(v1); /* +1 */ + inc_le128(v2); /* +2 */ + inc_le128(v3); /* +3 */ + + SM4_CRYPT_BLK4(v0, v1, v2, v3); + + ld1 {RTMP0.16b-RTMP3.16b}, [x2], #64; + eor v0.16b, v0.16b, RTMP0.16b; + eor v1.16b, v1.16b, RTMP1.16b; + eor v2.16b, v2.16b, RTMP2.16b; + eor v3.16b, v3.16b, RTMP3.16b; + st1 {v0.16b-v3.16b}, [x1], #64; + + cbz w4, .Lctr_end; + +.Lctr_tail4: + sub w4, w4, #1; + + /* construct CTRs */ + inc_le128(v0); + + SM4_CRYPT_BLK(v0); + + ld1 {RTMP0.16b}, [x2], #16; + eor v0.16b, v0.16b, RTMP0.16b; + st1 {v0.16b}, [x1], #16; + + cbnz w4, .Lctr_tail4; + +.Lctr_end: + /* store new CTR */ + rev x7, x7; + rev x8, x8; + stp x7, x8, [x3]; + + ret; +SYM_FUNC_END(sm4_ce_ctr_enc) diff --git a/arch/arm64/crypto/sm4-ce-glue.c b/arch/arm64/crypto/sm4-ce-glue.c index 9c93cfc4841bc3527eef0371fa0f579cb49a1842..496d55c0d01a4617ca3c014382ad3e9ea246f866 100644 --- a/arch/arm64/crypto/sm4-ce-glue.c +++ b/arch/arm64/crypto/sm4-ce-glue.c @@ -1,82 +1,372 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * SM4 Cipher Algorithm, using ARMv8 Crypto Extensions + * as specified in + * https://tools.ietf.org/id/draft-ribose-cfrg-sm4-10.html + * + * Copyright (C) 2022, Alibaba Group. + * Copyright (C) 2022 Tianjia Zhang + */ +#include +#include +#include +#include #include #include -#include #include -#include -#include -#include -#include +#include +#include -MODULE_ALIAS_CRYPTO("sm4"); -MODULE_ALIAS_CRYPTO("sm4-ce"); -MODULE_DESCRIPTION("SM4 symmetric cipher using ARMv8 Crypto Extensions"); -MODULE_AUTHOR("Ard Biesheuvel "); -MODULE_LICENSE("GPL v2"); +#define BYTES2BLKS(nbytes) ((nbytes) >> 4) + +asmlinkage void sm4_ce_expand_key(const u8 *key, u32 *rkey_enc, u32 *rkey_dec, + const u32 *fk, const u32 *ck); +asmlinkage void sm4_ce_crypt_block(const u32 *rkey, u8 *dst, const u8 *src); +asmlinkage void sm4_ce_crypt(const u32 *rkey, u8 *dst, const u8 *src, + unsigned int nblks); +asmlinkage void sm4_ce_cbc_enc(const u32 *rkey, u8 *dst, const u8 *src, + u8 *iv, unsigned int nblks); +asmlinkage void sm4_ce_cbc_dec(const u32 *rkey, u8 *dst, const u8 *src, + u8 *iv, unsigned int nblks); +asmlinkage void sm4_ce_cfb_enc(const u32 *rkey, u8 *dst, const u8 *src, + u8 *iv, unsigned int nblks); +asmlinkage void sm4_ce_cfb_dec(const u32 *rkey, u8 *dst, const u8 *src, + u8 *iv, unsigned int nblks); +asmlinkage void sm4_ce_ctr_enc(const u32 *rkey, u8 *dst, const u8 *src, + u8 *iv, unsigned int nblks); + +static int sm4_setkey(struct crypto_skcipher *tfm, const u8 *key, + unsigned int key_len) +{ + struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm); + + if (key_len != SM4_KEY_SIZE) + return -EINVAL; + + sm4_ce_expand_key(key, ctx->rkey_enc, ctx->rkey_dec, + crypto_sm4_fk, crypto_sm4_ck); + return 0; +} + +static int sm4_ecb_do_crypt(struct skcipher_request *req, const u32 *rkey) +{ + struct skcipher_walk walk; + unsigned int nbytes; + int err; + + err = skcipher_walk_virt(&walk, req, false); + + while ((nbytes = walk.nbytes) > 0) { + const u8 *src = walk.src.virt.addr; + u8 *dst = walk.dst.virt.addr; + unsigned int nblks; + + kernel_neon_begin(); + + nblks = BYTES2BLKS(nbytes); + if (nblks) { + sm4_ce_crypt(rkey, dst, src, nblks); + nbytes -= nblks * SM4_BLOCK_SIZE; + } + + kernel_neon_end(); + + err = skcipher_walk_done(&walk, nbytes); + } + + return err; +} + +static int sm4_ecb_encrypt(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm); + + return sm4_ecb_do_crypt(req, ctx->rkey_enc); +} + +static int sm4_ecb_decrypt(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm); + + return sm4_ecb_do_crypt(req, ctx->rkey_dec); +} + +static int sm4_cbc_encrypt(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk walk; + unsigned int nbytes; + int err; + + err = skcipher_walk_virt(&walk, req, false); + + while ((nbytes = walk.nbytes) > 0) { + const u8 *src = walk.src.virt.addr; + u8 *dst = walk.dst.virt.addr; + unsigned int nblks; + + kernel_neon_begin(); + + nblks = BYTES2BLKS(nbytes); + if (nblks) { + sm4_ce_cbc_enc(ctx->rkey_enc, dst, src, walk.iv, nblks); + nbytes -= nblks * SM4_BLOCK_SIZE; + } + + kernel_neon_end(); -asmlinkage void sm4_ce_do_crypt(const u32 *rk, void *out, const void *in); + err = skcipher_walk_done(&walk, nbytes); + } + + return err; +} -static int sm4_ce_setkey(struct crypto_tfm *tfm, const u8 *key, - unsigned int key_len) +static int sm4_cbc_decrypt(struct skcipher_request *req) { - struct sm4_ctx *ctx = crypto_tfm_ctx(tfm); + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk walk; + unsigned int nbytes; + int err; + + err = skcipher_walk_virt(&walk, req, false); + + while ((nbytes = walk.nbytes) > 0) { + const u8 *src = walk.src.virt.addr; + u8 *dst = walk.dst.virt.addr; + unsigned int nblks; + + kernel_neon_begin(); + + nblks = BYTES2BLKS(nbytes); + if (nblks) { + sm4_ce_cbc_dec(ctx->rkey_dec, dst, src, walk.iv, nblks); + nbytes -= nblks * SM4_BLOCK_SIZE; + } + + kernel_neon_end(); + + err = skcipher_walk_done(&walk, nbytes); + } - return sm4_expandkey(ctx, key, key_len); + return err; } -static void sm4_ce_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) +static int sm4_cfb_encrypt(struct skcipher_request *req) { - const struct sm4_ctx *ctx = crypto_tfm_ctx(tfm); + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk walk; + unsigned int nbytes; + int err; + + err = skcipher_walk_virt(&walk, req, false); + + while ((nbytes = walk.nbytes) > 0) { + const u8 *src = walk.src.virt.addr; + u8 *dst = walk.dst.virt.addr; + unsigned int nblks; - if (!crypto_simd_usable()) { - sm4_crypt_block(ctx->rkey_enc, out, in); - } else { kernel_neon_begin(); - sm4_ce_do_crypt(ctx->rkey_enc, out, in); + + nblks = BYTES2BLKS(nbytes); + if (nblks) { + sm4_ce_cfb_enc(ctx->rkey_enc, dst, src, walk.iv, nblks); + dst += nblks * SM4_BLOCK_SIZE; + src += nblks * SM4_BLOCK_SIZE; + nbytes -= nblks * SM4_BLOCK_SIZE; + } + + /* tail */ + if (walk.nbytes == walk.total && nbytes > 0) { + u8 keystream[SM4_BLOCK_SIZE]; + + sm4_ce_crypt_block(ctx->rkey_enc, keystream, walk.iv); + crypto_xor_cpy(dst, src, keystream, nbytes); + nbytes = 0; + } + kernel_neon_end(); + + err = skcipher_walk_done(&walk, nbytes); } + + return err; } -static void sm4_ce_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) +static int sm4_cfb_decrypt(struct skcipher_request *req) { - const struct sm4_ctx *ctx = crypto_tfm_ctx(tfm); + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk walk; + unsigned int nbytes; + int err; + + err = skcipher_walk_virt(&walk, req, false); + + while ((nbytes = walk.nbytes) > 0) { + const u8 *src = walk.src.virt.addr; + u8 *dst = walk.dst.virt.addr; + unsigned int nblks; - if (!crypto_simd_usable()) { - sm4_crypt_block(ctx->rkey_dec, out, in); - } else { kernel_neon_begin(); - sm4_ce_do_crypt(ctx->rkey_dec, out, in); + + nblks = BYTES2BLKS(nbytes); + if (nblks) { + sm4_ce_cfb_dec(ctx->rkey_enc, dst, src, walk.iv, nblks); + dst += nblks * SM4_BLOCK_SIZE; + src += nblks * SM4_BLOCK_SIZE; + nbytes -= nblks * SM4_BLOCK_SIZE; + } + + /* tail */ + if (walk.nbytes == walk.total && nbytes > 0) { + u8 keystream[SM4_BLOCK_SIZE]; + + sm4_ce_crypt_block(ctx->rkey_enc, keystream, walk.iv); + crypto_xor_cpy(dst, src, keystream, nbytes); + nbytes = 0; + } + kernel_neon_end(); + + err = skcipher_walk_done(&walk, nbytes); } + + return err; } -static struct crypto_alg sm4_ce_alg = { - .cra_name = "sm4", - .cra_driver_name = "sm4-ce", - .cra_priority = 200, - .cra_flags = CRYPTO_ALG_TYPE_CIPHER, - .cra_blocksize = SM4_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct sm4_ctx), - .cra_module = THIS_MODULE, - .cra_u.cipher = { - .cia_min_keysize = SM4_KEY_SIZE, - .cia_max_keysize = SM4_KEY_SIZE, - .cia_setkey = sm4_ce_setkey, - .cia_encrypt = sm4_ce_encrypt, - .cia_decrypt = sm4_ce_decrypt +static int sm4_ctr_crypt(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk walk; + unsigned int nbytes; + int err; + + err = skcipher_walk_virt(&walk, req, false); + + while ((nbytes = walk.nbytes) > 0) { + const u8 *src = walk.src.virt.addr; + u8 *dst = walk.dst.virt.addr; + unsigned int nblks; + + kernel_neon_begin(); + + nblks = BYTES2BLKS(nbytes); + if (nblks) { + sm4_ce_ctr_enc(ctx->rkey_enc, dst, src, walk.iv, nblks); + dst += nblks * SM4_BLOCK_SIZE; + src += nblks * SM4_BLOCK_SIZE; + nbytes -= nblks * SM4_BLOCK_SIZE; + } + + /* tail */ + if (walk.nbytes == walk.total && nbytes > 0) { + u8 keystream[SM4_BLOCK_SIZE]; + + sm4_ce_crypt_block(ctx->rkey_enc, keystream, walk.iv); + crypto_inc(walk.iv, SM4_BLOCK_SIZE); + crypto_xor_cpy(dst, src, keystream, nbytes); + nbytes = 0; + } + + kernel_neon_end(); + + err = skcipher_walk_done(&walk, nbytes); + } + + return err; +} + +static struct skcipher_alg sm4_algs[] = { + { + .base = { + .cra_name = "ecb(sm4)", + .cra_driver_name = "ecb-sm4-ce", + .cra_priority = 400, + .cra_blocksize = SM4_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct sm4_ctx), + .cra_module = THIS_MODULE, + }, + .min_keysize = SM4_KEY_SIZE, + .max_keysize = SM4_KEY_SIZE, + .setkey = sm4_setkey, + .encrypt = sm4_ecb_encrypt, + .decrypt = sm4_ecb_decrypt, + }, { + .base = { + .cra_name = "cbc(sm4)", + .cra_driver_name = "cbc-sm4-ce", + .cra_priority = 400, + .cra_blocksize = SM4_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct sm4_ctx), + .cra_module = THIS_MODULE, + }, + .min_keysize = SM4_KEY_SIZE, + .max_keysize = SM4_KEY_SIZE, + .ivsize = SM4_BLOCK_SIZE, + .setkey = sm4_setkey, + .encrypt = sm4_cbc_encrypt, + .decrypt = sm4_cbc_decrypt, + }, { + .base = { + .cra_name = "cfb(sm4)", + .cra_driver_name = "cfb-sm4-ce", + .cra_priority = 400, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct sm4_ctx), + .cra_module = THIS_MODULE, + }, + .min_keysize = SM4_KEY_SIZE, + .max_keysize = SM4_KEY_SIZE, + .ivsize = SM4_BLOCK_SIZE, + .chunksize = SM4_BLOCK_SIZE, + .setkey = sm4_setkey, + .encrypt = sm4_cfb_encrypt, + .decrypt = sm4_cfb_decrypt, + }, { + .base = { + .cra_name = "ctr(sm4)", + .cra_driver_name = "ctr-sm4-ce", + .cra_priority = 400, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct sm4_ctx), + .cra_module = THIS_MODULE, + }, + .min_keysize = SM4_KEY_SIZE, + .max_keysize = SM4_KEY_SIZE, + .ivsize = SM4_BLOCK_SIZE, + .chunksize = SM4_BLOCK_SIZE, + .setkey = sm4_setkey, + .encrypt = sm4_ctr_crypt, + .decrypt = sm4_ctr_crypt, } }; -static int __init sm4_ce_mod_init(void) +static int __init sm4_init(void) { - return crypto_register_alg(&sm4_ce_alg); + return crypto_register_skciphers(sm4_algs, ARRAY_SIZE(sm4_algs)); } -static void __exit sm4_ce_mod_fini(void) +static void __exit sm4_exit(void) { - crypto_unregister_alg(&sm4_ce_alg); + crypto_unregister_skciphers(sm4_algs, ARRAY_SIZE(sm4_algs)); } -module_cpu_feature_match(SM4, sm4_ce_mod_init); -module_exit(sm4_ce_mod_fini); +module_cpu_feature_match(SM4, sm4_init); +module_exit(sm4_exit); + +MODULE_DESCRIPTION("SM4 ECB/CBC/CFB/CTR using ARMv8 Crypto Extensions"); +MODULE_ALIAS_CRYPTO("sm4-ce"); +MODULE_ALIAS_CRYPTO("sm4"); +MODULE_ALIAS_CRYPTO("ecb(sm4)"); +MODULE_ALIAS_CRYPTO("cbc(sm4)"); +MODULE_ALIAS_CRYPTO("cfb(sm4)"); +MODULE_ALIAS_CRYPTO("ctr(sm4)"); +MODULE_AUTHOR("Tianjia Zhang "); +MODULE_LICENSE("GPL v2"); diff --git a/arch/arm64/crypto/sm4-neon-core.S b/arch/arm64/crypto/sm4-neon-core.S new file mode 100644 index 0000000000000000000000000000000000000000..3d5256b354d27fb2f5464fc810f595ab17b1a115 --- /dev/null +++ b/arch/arm64/crypto/sm4-neon-core.S @@ -0,0 +1,487 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * SM4 Cipher Algorithm for ARMv8 NEON + * as specified in + * https://tools.ietf.org/id/draft-ribose-cfrg-sm4-10.html + * + * Copyright (C) 2022, Alibaba Group. + * Copyright (C) 2022 Tianjia Zhang + */ + +#include +#include + +/* Register macros */ + +#define RTMP0 v8 +#define RTMP1 v9 +#define RTMP2 v10 +#define RTMP3 v11 + +#define RX0 v12 +#define RX1 v13 +#define RKEY v14 +#define RIV v15 + +/* Helper macros. */ + +#define PREPARE \ + adr_l x5, crypto_sm4_sbox; \ + ld1 {v16.16b-v19.16b}, [x5], #64; \ + ld1 {v20.16b-v23.16b}, [x5], #64; \ + ld1 {v24.16b-v27.16b}, [x5], #64; \ + ld1 {v28.16b-v31.16b}, [x5]; + +#define transpose_4x4(s0, s1, s2, s3) \ + zip1 RTMP0.4s, s0.4s, s1.4s; \ + zip1 RTMP1.4s, s2.4s, s3.4s; \ + zip2 RTMP2.4s, s0.4s, s1.4s; \ + zip2 RTMP3.4s, s2.4s, s3.4s; \ + zip1 s0.2d, RTMP0.2d, RTMP1.2d; \ + zip2 s1.2d, RTMP0.2d, RTMP1.2d; \ + zip1 s2.2d, RTMP2.2d, RTMP3.2d; \ + zip2 s3.2d, RTMP2.2d, RTMP3.2d; + +#define rotate_clockwise_90(s0, s1, s2, s3) \ + zip1 RTMP0.4s, s1.4s, s0.4s; \ + zip2 RTMP1.4s, s1.4s, s0.4s; \ + zip1 RTMP2.4s, s3.4s, s2.4s; \ + zip2 RTMP3.4s, s3.4s, s2.4s; \ + zip1 s0.2d, RTMP2.2d, RTMP0.2d; \ + zip2 s1.2d, RTMP2.2d, RTMP0.2d; \ + zip1 s2.2d, RTMP3.2d, RTMP1.2d; \ + zip2 s3.2d, RTMP3.2d, RTMP1.2d; + +#define ROUND4(round, s0, s1, s2, s3) \ + dup RX0.4s, RKEY.s[round]; \ + /* rk ^ s1 ^ s2 ^ s3 */ \ + eor RTMP1.16b, s2.16b, s3.16b; \ + eor RX0.16b, RX0.16b, s1.16b; \ + eor RX0.16b, RX0.16b, RTMP1.16b; \ + \ + /* sbox, non-linear part */ \ + movi RTMP3.16b, #64; /* sizeof(sbox) / 4 */ \ + tbl RTMP0.16b, {v16.16b-v19.16b}, RX0.16b; \ + sub RX0.16b, RX0.16b, RTMP3.16b; \ + tbx RTMP0.16b, {v20.16b-v23.16b}, RX0.16b; \ + sub RX0.16b, RX0.16b, RTMP3.16b; \ + tbx RTMP0.16b, {v24.16b-v27.16b}, RX0.16b; \ + sub RX0.16b, RX0.16b, RTMP3.16b; \ + tbx RTMP0.16b, {v28.16b-v31.16b}, RX0.16b; \ + \ + /* linear part */ \ + shl RTMP1.4s, RTMP0.4s, #8; \ + shl RTMP2.4s, RTMP0.4s, #16; \ + shl RTMP3.4s, RTMP0.4s, #24; \ + sri RTMP1.4s, RTMP0.4s, #(32-8); \ + sri RTMP2.4s, RTMP0.4s, #(32-16); \ + sri RTMP3.4s, RTMP0.4s, #(32-24); \ + /* RTMP1 = x ^ rol32(x, 8) ^ rol32(x, 16) */ \ + eor RTMP1.16b, RTMP1.16b, RTMP0.16b; \ + eor RTMP1.16b, RTMP1.16b, RTMP2.16b; \ + /* RTMP3 = x ^ rol32(x, 24) ^ rol32(RTMP1, 2) */ \ + eor RTMP3.16b, RTMP3.16b, RTMP0.16b; \ + shl RTMP2.4s, RTMP1.4s, 2; \ + sri RTMP2.4s, RTMP1.4s, #(32-2); \ + eor RTMP3.16b, RTMP3.16b, RTMP2.16b; \ + /* s0 ^= RTMP3 */ \ + eor s0.16b, s0.16b, RTMP3.16b; + +#define SM4_CRYPT_BLK4(b0, b1, b2, b3) \ + rev32 b0.16b, b0.16b; \ + rev32 b1.16b, b1.16b; \ + rev32 b2.16b, b2.16b; \ + rev32 b3.16b, b3.16b; \ + \ + transpose_4x4(b0, b1, b2, b3); \ + \ + mov x6, 8; \ +4: \ + ld1 {RKEY.4s}, [x0], #16; \ + subs x6, x6, #1; \ + \ + ROUND4(0, b0, b1, b2, b3); \ + ROUND4(1, b1, b2, b3, b0); \ + ROUND4(2, b2, b3, b0, b1); \ + ROUND4(3, b3, b0, b1, b2); \ + \ + bne 4b; \ + \ + rotate_clockwise_90(b0, b1, b2, b3); \ + rev32 b0.16b, b0.16b; \ + rev32 b1.16b, b1.16b; \ + rev32 b2.16b, b2.16b; \ + rev32 b3.16b, b3.16b; \ + \ + /* repoint to rkey */ \ + sub x0, x0, #128; + +#define ROUND8(round, s0, s1, s2, s3, t0, t1, t2, t3) \ + /* rk ^ s1 ^ s2 ^ s3 */ \ + dup RX0.4s, RKEY.s[round]; \ + eor RTMP0.16b, s2.16b, s3.16b; \ + mov RX1.16b, RX0.16b; \ + eor RTMP1.16b, t2.16b, t3.16b; \ + eor RX0.16b, RX0.16b, s1.16b; \ + eor RX1.16b, RX1.16b, t1.16b; \ + eor RX0.16b, RX0.16b, RTMP0.16b; \ + eor RX1.16b, RX1.16b, RTMP1.16b; \ + \ + /* sbox, non-linear part */ \ + movi RTMP3.16b, #64; /* sizeof(sbox) / 4 */ \ + tbl RTMP0.16b, {v16.16b-v19.16b}, RX0.16b; \ + tbl RTMP1.16b, {v16.16b-v19.16b}, RX1.16b; \ + sub RX0.16b, RX0.16b, RTMP3.16b; \ + sub RX1.16b, RX1.16b, RTMP3.16b; \ + tbx RTMP0.16b, {v20.16b-v23.16b}, RX0.16b; \ + tbx RTMP1.16b, {v20.16b-v23.16b}, RX1.16b; \ + sub RX0.16b, RX0.16b, RTMP3.16b; \ + sub RX1.16b, RX1.16b, RTMP3.16b; \ + tbx RTMP0.16b, {v24.16b-v27.16b}, RX0.16b; \ + tbx RTMP1.16b, {v24.16b-v27.16b}, RX1.16b; \ + sub RX0.16b, RX0.16b, RTMP3.16b; \ + sub RX1.16b, RX1.16b, RTMP3.16b; \ + tbx RTMP0.16b, {v28.16b-v31.16b}, RX0.16b; \ + tbx RTMP1.16b, {v28.16b-v31.16b}, RX1.16b; \ + \ + /* linear part */ \ + shl RX0.4s, RTMP0.4s, #8; \ + shl RX1.4s, RTMP1.4s, #8; \ + shl RTMP2.4s, RTMP0.4s, #16; \ + shl RTMP3.4s, RTMP1.4s, #16; \ + sri RX0.4s, RTMP0.4s, #(32 - 8); \ + sri RX1.4s, RTMP1.4s, #(32 - 8); \ + sri RTMP2.4s, RTMP0.4s, #(32 - 16); \ + sri RTMP3.4s, RTMP1.4s, #(32 - 16); \ + /* RX = x ^ rol32(x, 8) ^ rol32(x, 16) */ \ + eor RX0.16b, RX0.16b, RTMP0.16b; \ + eor RX1.16b, RX1.16b, RTMP1.16b; \ + eor RX0.16b, RX0.16b, RTMP2.16b; \ + eor RX1.16b, RX1.16b, RTMP3.16b; \ + /* RTMP0/1 ^= x ^ rol32(x, 24) ^ rol32(RX, 2) */ \ + shl RTMP2.4s, RTMP0.4s, #24; \ + shl RTMP3.4s, RTMP1.4s, #24; \ + sri RTMP2.4s, RTMP0.4s, #(32 - 24); \ + sri RTMP3.4s, RTMP1.4s, #(32 - 24); \ + eor RTMP0.16b, RTMP0.16b, RTMP2.16b; \ + eor RTMP1.16b, RTMP1.16b, RTMP3.16b; \ + shl RTMP2.4s, RX0.4s, #2; \ + shl RTMP3.4s, RX1.4s, #2; \ + sri RTMP2.4s, RX0.4s, #(32 - 2); \ + sri RTMP3.4s, RX1.4s, #(32 - 2); \ + eor RTMP0.16b, RTMP0.16b, RTMP2.16b; \ + eor RTMP1.16b, RTMP1.16b, RTMP3.16b; \ + /* s0/t0 ^= RTMP0/1 */ \ + eor s0.16b, s0.16b, RTMP0.16b; \ + eor t0.16b, t0.16b, RTMP1.16b; + +#define SM4_CRYPT_BLK8(b0, b1, b2, b3, b4, b5, b6, b7) \ + rev32 b0.16b, b0.16b; \ + rev32 b1.16b, b1.16b; \ + rev32 b2.16b, b2.16b; \ + rev32 b3.16b, b3.16b; \ + rev32 b4.16b, b4.16b; \ + rev32 b5.16b, b5.16b; \ + rev32 b6.16b, b6.16b; \ + rev32 b7.16b, b7.16b; \ + \ + transpose_4x4(b0, b1, b2, b3); \ + transpose_4x4(b4, b5, b6, b7); \ + \ + mov x6, 8; \ +8: \ + ld1 {RKEY.4s}, [x0], #16; \ + subs x6, x6, #1; \ + \ + ROUND8(0, b0, b1, b2, b3, b4, b5, b6, b7); \ + ROUND8(1, b1, b2, b3, b0, b5, b6, b7, b4); \ + ROUND8(2, b2, b3, b0, b1, b6, b7, b4, b5); \ + ROUND8(3, b3, b0, b1, b2, b7, b4, b5, b6); \ + \ + bne 8b; \ + \ + rotate_clockwise_90(b0, b1, b2, b3); \ + rotate_clockwise_90(b4, b5, b6, b7); \ + rev32 b0.16b, b0.16b; \ + rev32 b1.16b, b1.16b; \ + rev32 b2.16b, b2.16b; \ + rev32 b3.16b, b3.16b; \ + rev32 b4.16b, b4.16b; \ + rev32 b5.16b, b5.16b; \ + rev32 b6.16b, b6.16b; \ + rev32 b7.16b, b7.16b; \ + \ + /* repoint to rkey */ \ + sub x0, x0, #128; + + +.align 3 +SYM_FUNC_START_LOCAL(__sm4_neon_crypt_blk1_4) + /* input: + * x0: round key array, CTX + * x1: dst + * x2: src + * w3: num blocks (1..4) + */ + PREPARE; + + ld1 {v0.16b}, [x2], #16; + mov v1.16b, v0.16b; + mov v2.16b, v0.16b; + mov v3.16b, v0.16b; + cmp w3, #2; + blt .Lblk4_load_input_done; + ld1 {v1.16b}, [x2], #16; + beq .Lblk4_load_input_done; + ld1 {v2.16b}, [x2], #16; + cmp w3, #3; + beq .Lblk4_load_input_done; + ld1 {v3.16b}, [x2]; + +.Lblk4_load_input_done: + SM4_CRYPT_BLK4(v0, v1, v2, v3); + + st1 {v0.16b}, [x1], #16; + cmp w3, #2; + blt .Lblk4_store_output_done; + st1 {v1.16b}, [x1], #16; + beq .Lblk4_store_output_done; + st1 {v2.16b}, [x1], #16; + cmp w3, #3; + beq .Lblk4_store_output_done; + st1 {v3.16b}, [x1]; + +.Lblk4_store_output_done: + ret; +SYM_FUNC_END(__sm4_neon_crypt_blk1_4) + +.align 3 +SYM_FUNC_START(sm4_neon_crypt_blk1_8) + /* input: + * x0: round key array, CTX + * x1: dst + * x2: src + * w3: num blocks (1..8) + */ + cmp w3, #5; + blt __sm4_neon_crypt_blk1_4; + + PREPARE; + + ld1 {v0.16b-v3.16b}, [x2], #64; + ld1 {v4.16b}, [x2], #16; + mov v5.16b, v4.16b; + mov v6.16b, v4.16b; + mov v7.16b, v4.16b; + beq .Lblk8_load_input_done; + ld1 {v5.16b}, [x2], #16; + cmp w3, #7; + blt .Lblk8_load_input_done; + ld1 {v6.16b}, [x2], #16; + beq .Lblk8_load_input_done; + ld1 {v7.16b}, [x2]; + +.Lblk8_load_input_done: + SM4_CRYPT_BLK8(v0, v1, v2, v3, v4, v5, v6, v7); + + cmp w3, #6; + st1 {v0.16b-v3.16b}, [x1], #64; + st1 {v4.16b}, [x1], #16; + blt .Lblk8_store_output_done; + st1 {v5.16b}, [x1], #16; + beq .Lblk8_store_output_done; + st1 {v6.16b}, [x1], #16; + cmp w3, #7; + beq .Lblk8_store_output_done; + st1 {v7.16b}, [x1]; + +.Lblk8_store_output_done: + ret; +SYM_FUNC_END(sm4_neon_crypt_blk1_8) + +.align 3 +SYM_FUNC_START(sm4_neon_crypt_blk8) + /* input: + * x0: round key array, CTX + * x1: dst + * x2: src + * w3: nblocks (multiples of 8) + */ + PREPARE; + +.Lcrypt_loop_blk: + subs w3, w3, #8; + bmi .Lcrypt_end; + + ld1 {v0.16b-v3.16b}, [x2], #64; + ld1 {v4.16b-v7.16b}, [x2], #64; + + SM4_CRYPT_BLK8(v0, v1, v2, v3, v4, v5, v6, v7); + + st1 {v0.16b-v3.16b}, [x1], #64; + st1 {v4.16b-v7.16b}, [x1], #64; + + b .Lcrypt_loop_blk; + +.Lcrypt_end: + ret; +SYM_FUNC_END(sm4_neon_crypt_blk8) + +.align 3 +SYM_FUNC_START(sm4_neon_cbc_dec_blk8) + /* input: + * x0: round key array, CTX + * x1: dst + * x2: src + * x3: iv (big endian, 128 bit) + * w4: nblocks (multiples of 8) + */ + PREPARE; + + ld1 {RIV.16b}, [x3]; + +.Lcbc_loop_blk: + subs w4, w4, #8; + bmi .Lcbc_end; + + ld1 {v0.16b-v3.16b}, [x2], #64; + ld1 {v4.16b-v7.16b}, [x2]; + + SM4_CRYPT_BLK8(v0, v1, v2, v3, v4, v5, v6, v7); + + sub x2, x2, #64; + eor v0.16b, v0.16b, RIV.16b; + ld1 {RTMP0.16b-RTMP3.16b}, [x2], #64; + eor v1.16b, v1.16b, RTMP0.16b; + eor v2.16b, v2.16b, RTMP1.16b; + eor v3.16b, v3.16b, RTMP2.16b; + st1 {v0.16b-v3.16b}, [x1], #64; + + eor v4.16b, v4.16b, RTMP3.16b; + ld1 {RTMP0.16b-RTMP3.16b}, [x2], #64; + eor v5.16b, v5.16b, RTMP0.16b; + eor v6.16b, v6.16b, RTMP1.16b; + eor v7.16b, v7.16b, RTMP2.16b; + + mov RIV.16b, RTMP3.16b; + st1 {v4.16b-v7.16b}, [x1], #64; + + b .Lcbc_loop_blk; + +.Lcbc_end: + /* store new IV */ + st1 {RIV.16b}, [x3]; + + ret; +SYM_FUNC_END(sm4_neon_cbc_dec_blk8) + +.align 3 +SYM_FUNC_START(sm4_neon_cfb_dec_blk8) + /* input: + * x0: round key array, CTX + * x1: dst + * x2: src + * x3: iv (big endian, 128 bit) + * w4: nblocks (multiples of 8) + */ + PREPARE; + + ld1 {v0.16b}, [x3]; + +.Lcfb_loop_blk: + subs w4, w4, #8; + bmi .Lcfb_end; + + ld1 {v1.16b, v2.16b, v3.16b}, [x2], #48; + ld1 {v4.16b-v7.16b}, [x2]; + + SM4_CRYPT_BLK8(v0, v1, v2, v3, v4, v5, v6, v7); + + sub x2, x2, #48; + ld1 {RTMP0.16b-RTMP3.16b}, [x2], #64; + eor v0.16b, v0.16b, RTMP0.16b; + eor v1.16b, v1.16b, RTMP1.16b; + eor v2.16b, v2.16b, RTMP2.16b; + eor v3.16b, v3.16b, RTMP3.16b; + st1 {v0.16b-v3.16b}, [x1], #64; + + ld1 {RTMP0.16b-RTMP3.16b}, [x2], #64; + eor v4.16b, v4.16b, RTMP0.16b; + eor v5.16b, v5.16b, RTMP1.16b; + eor v6.16b, v6.16b, RTMP2.16b; + eor v7.16b, v7.16b, RTMP3.16b; + st1 {v4.16b-v7.16b}, [x1], #64; + + mov v0.16b, RTMP3.16b; + + b .Lcfb_loop_blk; + +.Lcfb_end: + /* store new IV */ + st1 {v0.16b}, [x3]; + + ret; +SYM_FUNC_END(sm4_neon_cfb_dec_blk8) + +.align 3 +SYM_FUNC_START(sm4_neon_ctr_enc_blk8) + /* input: + * x0: round key array, CTX + * x1: dst + * x2: src + * x3: ctr (big endian, 128 bit) + * w4: nblocks (multiples of 8) + */ + PREPARE; + + ldp x7, x8, [x3]; + rev x7, x7; + rev x8, x8; + +.Lctr_loop_blk: + subs w4, w4, #8; + bmi .Lctr_end; + +#define inc_le128(vctr) \ + mov vctr.d[1], x8; \ + mov vctr.d[0], x7; \ + adds x8, x8, #1; \ + adc x7, x7, xzr; \ + rev64 vctr.16b, vctr.16b; + + /* construct CTRs */ + inc_le128(v0); /* +0 */ + inc_le128(v1); /* +1 */ + inc_le128(v2); /* +2 */ + inc_le128(v3); /* +3 */ + inc_le128(v4); /* +4 */ + inc_le128(v5); /* +5 */ + inc_le128(v6); /* +6 */ + inc_le128(v7); /* +7 */ + + SM4_CRYPT_BLK8(v0, v1, v2, v3, v4, v5, v6, v7); + + ld1 {RTMP0.16b-RTMP3.16b}, [x2], #64; + eor v0.16b, v0.16b, RTMP0.16b; + eor v1.16b, v1.16b, RTMP1.16b; + eor v2.16b, v2.16b, RTMP2.16b; + eor v3.16b, v3.16b, RTMP3.16b; + st1 {v0.16b-v3.16b}, [x1], #64; + + ld1 {RTMP0.16b-RTMP3.16b}, [x2], #64; + eor v4.16b, v4.16b, RTMP0.16b; + eor v5.16b, v5.16b, RTMP1.16b; + eor v6.16b, v6.16b, RTMP2.16b; + eor v7.16b, v7.16b, RTMP3.16b; + st1 {v4.16b-v7.16b}, [x1], #64; + + b .Lctr_loop_blk; + +.Lctr_end: + /* store new CTR */ + rev x7, x7; + rev x8, x8; + stp x7, x8, [x3]; + + ret; +SYM_FUNC_END(sm4_neon_ctr_enc_blk8) diff --git a/arch/arm64/crypto/sm4-neon-glue.c b/arch/arm64/crypto/sm4-neon-glue.c new file mode 100644 index 0000000000000000000000000000000000000000..03a6a6866a3112f0dc10dc2fc119f7d53c867a1d --- /dev/null +++ b/arch/arm64/crypto/sm4-neon-glue.c @@ -0,0 +1,442 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * SM4 Cipher Algorithm, using ARMv8 NEON + * as specified in + * https://tools.ietf.org/id/draft-ribose-cfrg-sm4-10.html + * + * Copyright (C) 2022, Alibaba Group. + * Copyright (C) 2022 Tianjia Zhang + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define BYTES2BLKS(nbytes) ((nbytes) >> 4) +#define BYTES2BLK8(nbytes) (((nbytes) >> 4) & ~(8 - 1)) + +asmlinkage void sm4_neon_crypt_blk1_8(const u32 *rkey, u8 *dst, const u8 *src, + unsigned int nblks); +asmlinkage void sm4_neon_crypt_blk8(const u32 *rkey, u8 *dst, const u8 *src, + unsigned int nblks); +asmlinkage void sm4_neon_cbc_dec_blk8(const u32 *rkey, u8 *dst, const u8 *src, + u8 *iv, unsigned int nblks); +asmlinkage void sm4_neon_cfb_dec_blk8(const u32 *rkey, u8 *dst, const u8 *src, + u8 *iv, unsigned int nblks); +asmlinkage void sm4_neon_ctr_enc_blk8(const u32 *rkey, u8 *dst, const u8 *src, + u8 *iv, unsigned int nblks); + +static int sm4_setkey(struct crypto_skcipher *tfm, const u8 *key, + unsigned int key_len) +{ + struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm); + + return sm4_expandkey(ctx, key, key_len); +} + +static int sm4_ecb_do_crypt(struct skcipher_request *req, const u32 *rkey) +{ + struct skcipher_walk walk; + unsigned int nbytes; + int err; + + err = skcipher_walk_virt(&walk, req, false); + + while ((nbytes = walk.nbytes) > 0) { + const u8 *src = walk.src.virt.addr; + u8 *dst = walk.dst.virt.addr; + unsigned int nblks; + + kernel_neon_begin(); + + nblks = BYTES2BLK8(nbytes); + if (nblks) { + sm4_neon_crypt_blk8(rkey, dst, src, nblks); + dst += nblks * SM4_BLOCK_SIZE; + src += nblks * SM4_BLOCK_SIZE; + nbytes -= nblks * SM4_BLOCK_SIZE; + } + + nblks = BYTES2BLKS(nbytes); + if (nblks) { + sm4_neon_crypt_blk1_8(rkey, dst, src, nblks); + nbytes -= nblks * SM4_BLOCK_SIZE; + } + + kernel_neon_end(); + + err = skcipher_walk_done(&walk, nbytes); + } + + return err; +} + +static int sm4_ecb_encrypt(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm); + + return sm4_ecb_do_crypt(req, ctx->rkey_enc); +} + +static int sm4_ecb_decrypt(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm); + + return sm4_ecb_do_crypt(req, ctx->rkey_dec); +} + +static int sm4_cbc_encrypt(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk walk; + unsigned int nbytes; + int err; + + err = skcipher_walk_virt(&walk, req, false); + + while ((nbytes = walk.nbytes) > 0) { + const u8 *iv = walk.iv; + const u8 *src = walk.src.virt.addr; + u8 *dst = walk.dst.virt.addr; + + while (nbytes >= SM4_BLOCK_SIZE) { + crypto_xor_cpy(dst, src, iv, SM4_BLOCK_SIZE); + sm4_crypt_block(ctx->rkey_enc, dst, dst); + iv = dst; + src += SM4_BLOCK_SIZE; + dst += SM4_BLOCK_SIZE; + nbytes -= SM4_BLOCK_SIZE; + } + if (iv != walk.iv) + memcpy(walk.iv, iv, SM4_BLOCK_SIZE); + + err = skcipher_walk_done(&walk, nbytes); + } + + return err; +} + +static int sm4_cbc_decrypt(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk walk; + unsigned int nbytes; + int err; + + err = skcipher_walk_virt(&walk, req, false); + + while ((nbytes = walk.nbytes) > 0) { + const u8 *src = walk.src.virt.addr; + u8 *dst = walk.dst.virt.addr; + unsigned int nblks; + + kernel_neon_begin(); + + nblks = BYTES2BLK8(nbytes); + if (nblks) { + sm4_neon_cbc_dec_blk8(ctx->rkey_dec, dst, src, + walk.iv, nblks); + dst += nblks * SM4_BLOCK_SIZE; + src += nblks * SM4_BLOCK_SIZE; + nbytes -= nblks * SM4_BLOCK_SIZE; + } + + nblks = BYTES2BLKS(nbytes); + if (nblks) { + u8 keystream[SM4_BLOCK_SIZE * 8]; + u8 iv[SM4_BLOCK_SIZE]; + int i; + + sm4_neon_crypt_blk1_8(ctx->rkey_dec, keystream, + src, nblks); + + src += ((int)nblks - 2) * SM4_BLOCK_SIZE; + dst += (nblks - 1) * SM4_BLOCK_SIZE; + memcpy(iv, src + SM4_BLOCK_SIZE, SM4_BLOCK_SIZE); + + for (i = nblks - 1; i > 0; i--) { + crypto_xor_cpy(dst, src, + &keystream[i * SM4_BLOCK_SIZE], + SM4_BLOCK_SIZE); + src -= SM4_BLOCK_SIZE; + dst -= SM4_BLOCK_SIZE; + } + crypto_xor_cpy(dst, walk.iv, + keystream, SM4_BLOCK_SIZE); + memcpy(walk.iv, iv, SM4_BLOCK_SIZE); + nbytes -= nblks * SM4_BLOCK_SIZE; + } + + kernel_neon_end(); + + err = skcipher_walk_done(&walk, nbytes); + } + + return err; +} + +static int sm4_cfb_encrypt(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk walk; + unsigned int nbytes; + int err; + + err = skcipher_walk_virt(&walk, req, false); + + while ((nbytes = walk.nbytes) > 0) { + u8 keystream[SM4_BLOCK_SIZE]; + const u8 *iv = walk.iv; + const u8 *src = walk.src.virt.addr; + u8 *dst = walk.dst.virt.addr; + + while (nbytes >= SM4_BLOCK_SIZE) { + sm4_crypt_block(ctx->rkey_enc, keystream, iv); + crypto_xor_cpy(dst, src, keystream, SM4_BLOCK_SIZE); + iv = dst; + src += SM4_BLOCK_SIZE; + dst += SM4_BLOCK_SIZE; + nbytes -= SM4_BLOCK_SIZE; + } + if (iv != walk.iv) + memcpy(walk.iv, iv, SM4_BLOCK_SIZE); + + /* tail */ + if (walk.nbytes == walk.total && nbytes > 0) { + sm4_crypt_block(ctx->rkey_enc, keystream, walk.iv); + crypto_xor_cpy(dst, src, keystream, nbytes); + nbytes = 0; + } + + err = skcipher_walk_done(&walk, nbytes); + } + + return err; +} + +static int sm4_cfb_decrypt(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk walk; + unsigned int nbytes; + int err; + + err = skcipher_walk_virt(&walk, req, false); + + while ((nbytes = walk.nbytes) > 0) { + const u8 *src = walk.src.virt.addr; + u8 *dst = walk.dst.virt.addr; + unsigned int nblks; + + kernel_neon_begin(); + + nblks = BYTES2BLK8(nbytes); + if (nblks) { + sm4_neon_cfb_dec_blk8(ctx->rkey_enc, dst, src, + walk.iv, nblks); + dst += nblks * SM4_BLOCK_SIZE; + src += nblks * SM4_BLOCK_SIZE; + nbytes -= nblks * SM4_BLOCK_SIZE; + } + + nblks = BYTES2BLKS(nbytes); + if (nblks) { + u8 keystream[SM4_BLOCK_SIZE * 8]; + + memcpy(keystream, walk.iv, SM4_BLOCK_SIZE); + if (nblks > 1) + memcpy(&keystream[SM4_BLOCK_SIZE], src, + (nblks - 1) * SM4_BLOCK_SIZE); + memcpy(walk.iv, src + (nblks - 1) * SM4_BLOCK_SIZE, + SM4_BLOCK_SIZE); + + sm4_neon_crypt_blk1_8(ctx->rkey_enc, keystream, + keystream, nblks); + + crypto_xor_cpy(dst, src, keystream, + nblks * SM4_BLOCK_SIZE); + dst += nblks * SM4_BLOCK_SIZE; + src += nblks * SM4_BLOCK_SIZE; + nbytes -= nblks * SM4_BLOCK_SIZE; + } + + kernel_neon_end(); + + /* tail */ + if (walk.nbytes == walk.total && nbytes > 0) { + u8 keystream[SM4_BLOCK_SIZE]; + + sm4_crypt_block(ctx->rkey_enc, keystream, walk.iv); + crypto_xor_cpy(dst, src, keystream, nbytes); + nbytes = 0; + } + + err = skcipher_walk_done(&walk, nbytes); + } + + return err; +} + +static int sm4_ctr_crypt(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk walk; + unsigned int nbytes; + int err; + + err = skcipher_walk_virt(&walk, req, false); + + while ((nbytes = walk.nbytes) > 0) { + const u8 *src = walk.src.virt.addr; + u8 *dst = walk.dst.virt.addr; + unsigned int nblks; + + kernel_neon_begin(); + + nblks = BYTES2BLK8(nbytes); + if (nblks) { + sm4_neon_ctr_enc_blk8(ctx->rkey_enc, dst, src, + walk.iv, nblks); + dst += nblks * SM4_BLOCK_SIZE; + src += nblks * SM4_BLOCK_SIZE; + nbytes -= nblks * SM4_BLOCK_SIZE; + } + + nblks = BYTES2BLKS(nbytes); + if (nblks) { + u8 keystream[SM4_BLOCK_SIZE * 8]; + int i; + + for (i = 0; i < nblks; i++) { + memcpy(&keystream[i * SM4_BLOCK_SIZE], + walk.iv, SM4_BLOCK_SIZE); + crypto_inc(walk.iv, SM4_BLOCK_SIZE); + } + sm4_neon_crypt_blk1_8(ctx->rkey_enc, keystream, + keystream, nblks); + + crypto_xor_cpy(dst, src, keystream, + nblks * SM4_BLOCK_SIZE); + dst += nblks * SM4_BLOCK_SIZE; + src += nblks * SM4_BLOCK_SIZE; + nbytes -= nblks * SM4_BLOCK_SIZE; + } + + kernel_neon_end(); + + /* tail */ + if (walk.nbytes == walk.total && nbytes > 0) { + u8 keystream[SM4_BLOCK_SIZE]; + + sm4_crypt_block(ctx->rkey_enc, keystream, walk.iv); + crypto_inc(walk.iv, SM4_BLOCK_SIZE); + crypto_xor_cpy(dst, src, keystream, nbytes); + nbytes = 0; + } + + err = skcipher_walk_done(&walk, nbytes); + } + + return err; +} + +static struct skcipher_alg sm4_algs[] = { + { + .base = { + .cra_name = "ecb(sm4)", + .cra_driver_name = "ecb-sm4-neon", + .cra_priority = 200, + .cra_blocksize = SM4_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct sm4_ctx), + .cra_module = THIS_MODULE, + }, + .min_keysize = SM4_KEY_SIZE, + .max_keysize = SM4_KEY_SIZE, + .setkey = sm4_setkey, + .encrypt = sm4_ecb_encrypt, + .decrypt = sm4_ecb_decrypt, + }, { + .base = { + .cra_name = "cbc(sm4)", + .cra_driver_name = "cbc-sm4-neon", + .cra_priority = 200, + .cra_blocksize = SM4_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct sm4_ctx), + .cra_module = THIS_MODULE, + }, + .min_keysize = SM4_KEY_SIZE, + .max_keysize = SM4_KEY_SIZE, + .ivsize = SM4_BLOCK_SIZE, + .setkey = sm4_setkey, + .encrypt = sm4_cbc_encrypt, + .decrypt = sm4_cbc_decrypt, + }, { + .base = { + .cra_name = "cfb(sm4)", + .cra_driver_name = "cfb-sm4-neon", + .cra_priority = 200, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct sm4_ctx), + .cra_module = THIS_MODULE, + }, + .min_keysize = SM4_KEY_SIZE, + .max_keysize = SM4_KEY_SIZE, + .ivsize = SM4_BLOCK_SIZE, + .chunksize = SM4_BLOCK_SIZE, + .setkey = sm4_setkey, + .encrypt = sm4_cfb_encrypt, + .decrypt = sm4_cfb_decrypt, + }, { + .base = { + .cra_name = "ctr(sm4)", + .cra_driver_name = "ctr-sm4-neon", + .cra_priority = 200, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct sm4_ctx), + .cra_module = THIS_MODULE, + }, + .min_keysize = SM4_KEY_SIZE, + .max_keysize = SM4_KEY_SIZE, + .ivsize = SM4_BLOCK_SIZE, + .chunksize = SM4_BLOCK_SIZE, + .setkey = sm4_setkey, + .encrypt = sm4_ctr_crypt, + .decrypt = sm4_ctr_crypt, + } +}; + +static int __init sm4_init(void) +{ + return crypto_register_skciphers(sm4_algs, ARRAY_SIZE(sm4_algs)); +} + +static void __exit sm4_exit(void) +{ + crypto_unregister_skciphers(sm4_algs, ARRAY_SIZE(sm4_algs)); +} + +module_init(sm4_init); +module_exit(sm4_exit); + +MODULE_DESCRIPTION("SM4 ECB/CBC/CFB/CTR using ARMv8 NEON"); +MODULE_ALIAS_CRYPTO("sm4-neon"); +MODULE_ALIAS_CRYPTO("sm4"); +MODULE_ALIAS_CRYPTO("ecb(sm4)"); +MODULE_ALIAS_CRYPTO("cbc(sm4)"); +MODULE_ALIAS_CRYPTO("cfb(sm4)"); +MODULE_ALIAS_CRYPTO("ctr(sm4)"); +MODULE_AUTHOR("Tianjia Zhang "); +MODULE_LICENSE("GPL v2"); diff --git a/arch/arm64/include/asm/arch_gicv3.h b/arch/arm64/include/asm/arch_gicv3.h index 12aced900ada4ef79609fb8062285e093c8a69a3..80714a8589a03e8ccb4fef277a57031a0df6a3b8 100644 --- a/arch/arm64/include/asm/arch_gicv3.h +++ b/arch/arm64/include/asm/arch_gicv3.h @@ -191,9 +191,8 @@ static inline void gic_arch_disable_irqs(void) static inline void gic_arch_restore_irqs(unsigned long flags) { - if (gic_supports_nmi()) - asm volatile ("msr daif, %0" : : "r" (flags >> 32) - : "memory"); + if (gic_supports_nmi() && !(flags & GIC_PRIO_PSR_I_SET)) + gic_arch_enable_irqs(); } #endif /* __ASSEMBLY__ */ #endif /* __ASM_ARCH_GICV3_H */ diff --git a/arch/arm64/include/asm/brk-imm.h b/arch/arm64/include/asm/brk-imm.h index ec7720dbe2c801bc44344d7893cfc510fed7d0d7..1ac8bc293ea2cb85a703ef76529bd879fdd48baa 100644 --- a/arch/arm64/include/asm/brk-imm.h +++ b/arch/arm64/include/asm/brk-imm.h @@ -21,6 +21,7 @@ #define KPROBES_BRK_IMM 0x004 #define UPROBES_BRK_IMM 0x005 #define KPROBES_BRK_SS_IMM 0x006 +#define KLP_BRK_IMM 0x007 #define FAULT_BRK_IMM 0x100 #define KGDB_DYN_DBG_BRK_IMM 0x400 #define KGDB_COMPILED_DBG_BRK_IMM 0x401 diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h index 663814c0bfc8385d79976162be9a8a9650cbcdbf..2e0066f13ad41b9f954eda94d9d21ce6f94baca9 100644 --- a/arch/arm64/include/asm/cputype.h +++ b/arch/arm64/include/asm/cputype.h @@ -60,6 +60,7 @@ #define ARM_CPU_IMP_FUJITSU 0x46 #define ARM_CPU_IMP_HISI 0x48 #define ARM_CPU_IMP_PHYTIUM 0x70 +#define ARM_CPU_IMP_APPLE 0x61 #define ARM_CPU_PART_AEM_V8 0xD0F #define ARM_CPU_PART_FOUNDATION 0xD00 @@ -77,6 +78,10 @@ #define ARM_CPU_PART_CORTEX_A78 0xD41 #define ARM_CPU_PART_CORTEX_X1 0xD44 #define ARM_CPU_PART_CORTEX_A78C 0xD4B +#define ARM_CPU_PART_CORTEX_A510 0xD46 +#define ARM_CPU_PART_CORTEX_A710 0xD47 +#define ARM_CPU_PART_CORTEX_X2 0xD48 +#define ARM_CPU_PART_NEOVERSE_N2 0xD49 #define APM_CPU_PART_POTENZA 0x000 @@ -111,6 +116,9 @@ #define PHYTIUM_CPU_PART_2004 0X663 #define PHYTIUM_CPU_PART_2500 0X663 +#define APPLE_CPU_PART_M1_ICESTORM 0x022 +#define APPLE_CPU_PART_M1_FIRESTORM 0x023 + #define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53) #define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57) #define MIDR_CORTEX_A72 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A72) @@ -125,6 +133,10 @@ #define MIDR_CORTEX_A78 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78) #define MIDR_CORTEX_X1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X1) #define MIDR_CORTEX_A78C MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78C) +#define MIDR_CORTEX_A510 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A510) +#define MIDR_CORTEX_A710 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A710) +#define MIDR_CORTEX_X2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X2) +#define MIDR_NEOVERSE_N2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N2) #define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX) #define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX) #define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX) @@ -149,6 +161,8 @@ #define MIDR_FT_2000PLUS MIDR_CPU_MODEL(ARM_CPU_IMP_PHYTIUM, PHYTIUM_CPU_PART_2000PLUS) #define MIDR_FT_2004 MIDR_CPU_MODEL(ARM_CPU_IMP_PHYTIUM, PHYTIUM_CPU_PART_2004) #define MIDR_FT_2500 MIDR_CPU_MODEL(ARM_CPU_IMP_PHYTIUM, PHYTIUM_CPU_PART_2500) +#define MIDR_APPLE_M1_ICESTORM MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_ICESTORM) +#define MIDR_APPLE_M1_FIRESTORM MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_FIRESTORM) /* Fujitsu Erratum 010001 affects A64FX 1.0 and 1.1, (v0r0 and v1r0) */ #define MIDR_FUJITSU_ERRATUM_010001 MIDR_FUJITSU_A64FX diff --git a/arch/arm64/include/asm/debug-monitors.h b/arch/arm64/include/asm/debug-monitors.h index 657c921fd784a7c12a0e45d814c6bc089f9ca0ec..bc015465ecd29f1b239e064d5f8ffac6865ec66b 100644 --- a/arch/arm64/include/asm/debug-monitors.h +++ b/arch/arm64/include/asm/debug-monitors.h @@ -56,6 +56,8 @@ #define BRK64_OPCODE_KPROBES_SS (AARCH64_BREAK_MON | (KPROBES_BRK_SS_IMM << 5)) /* uprobes BRK opcodes with ESR encoding */ #define BRK64_OPCODE_UPROBES (AARCH64_BREAK_MON | (UPROBES_BRK_IMM << 5)) +/* klp BRK opcodes with ESR encoding */ +#define BRK64_OPCODE_KLP (AARCH64_BREAK_MON | (KLP_BRK_IMM << 5)) /* AArch32 */ #define DBG_ESR_EVT_BKPT 0x4 diff --git a/arch/arm64/include/asm/hwcap.h b/arch/arm64/include/asm/hwcap.h index be9cb527b309e86a31608850898c85ac35411626..b2cb230ef21eb0909bb32d0c716da99341f2c147 100644 --- a/arch/arm64/include/asm/hwcap.h +++ b/arch/arm64/include/asm/hwcap.h @@ -106,6 +106,8 @@ #define KERNEL_HWCAP_BTI __khwcap2_feature(BTI) #define KERNEL_HWCAP_MTE __khwcap2_feature(MTE) #define KERNEL_HWCAP_ECV __khwcap2_feature(ECV) +#define KERNEL_HWCAP_AFP __khwcap2_feature(AFP) +#define KERNEL_HWCAP_RPRES __khwcap2_feature(RPRES) /* * This yields a mask that user programs can use to figure out what diff --git a/arch/arm64/include/asm/livepatch.h b/arch/arm64/include/asm/livepatch.h index 7b9ea5dcea4d29603acd6d6b2a6c8a014cc88d3d..bcb6c4081978f749a74e40b7c985bc1caa9c2848 100644 --- a/arch/arm64/include/asm/livepatch.h +++ b/arch/arm64/include/asm/livepatch.h @@ -58,9 +58,18 @@ int klp_check_calltrace(struct klp_patch *patch, int enable); struct arch_klp_data { u32 old_insns[LJMP_INSN_SIZE]; + + /* + * Saved opcode at the entry of the old func (which maybe replaced + * with breakpoint). + */ + u32 saved_opcode; }; +int arch_klp_add_breakpoint(struct arch_klp_data *arch_data, void *old_func); +void arch_klp_remove_breakpoint(struct arch_klp_data *arch_data, void *old_func); long arch_klp_save_old_code(struct arch_klp_data *arch_data, void *old_func); +int arch_klp_module_check_calltrace(void *data); #endif diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index b293c0757f9b9420a8dfeb7eefe74b7736c92023..098247c4c6685aa12d37f978dd41a92f4aada6f4 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -822,6 +822,7 @@ /* id_aa64mmfr1 */ #define ID_AA64MMFR1_ECBHB_SHIFT 60 +#define ID_AA64MMFR1_AFP_SHIFT 44 #define ID_AA64MMFR1_ETS_SHIFT 36 #define ID_AA64MMFR1_TWED_SHIFT 32 #define ID_AA64MMFR1_XNX_SHIFT 28 diff --git a/arch/arm64/include/uapi/asm/hwcap.h b/arch/arm64/include/uapi/asm/hwcap.h index 7b23b16f21ce39ff25595920e3d561f292c623e5..f03731847d9dfdbed849baceafb61f91745bd1cc 100644 --- a/arch/arm64/include/uapi/asm/hwcap.h +++ b/arch/arm64/include/uapi/asm/hwcap.h @@ -76,5 +76,7 @@ #define HWCAP2_BTI (1 << 17) #define HWCAP2_MTE (1 << 18) #define HWCAP2_ECV (1 << 19) +#define HWCAP2_AFP (1 << 20) +#define HWCAP2_RPRES (1 << 21) #endif /* _UAPI__ASM_HWCAP_H */ diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 271862babb8a24bc3919f76431537f32c53a999d..f5ce1e3a532fcc78a3af99a9d685b6b8f9c5bc1a 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -208,6 +208,7 @@ static const struct arm64_ftr_bits ftr_id_aa64isar1[] = { static const struct arm64_ftr_bits ftr_id_aa64isar2[] = { ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_HIGHER_SAFE, ID_AA64ISAR2_CLEARBHB_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_RPRES_SHIFT, 4, 0), ARM64_FTR_END, }; @@ -311,6 +312,7 @@ static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = { }; static const struct arm64_ftr_bits ftr_id_aa64mmfr1[] = { + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_AFP_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_ETS_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_TWED_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_XNX_SHIFT, 4, 0), @@ -2379,6 +2381,8 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = { HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_MTE_SHIFT, FTR_UNSIGNED, ID_AA64PFR1_MTE, CAP_HWCAP, KERNEL_HWCAP_MTE), #endif /* CONFIG_ARM64_MTE */ HWCAP_CAP(SYS_ID_AA64MMFR0_EL1, ID_AA64MMFR0_ECV_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ECV), + HWCAP_CAP(SYS_ID_AA64MMFR1_EL1, ID_AA64MMFR1_AFP_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_AFP), + HWCAP_CAP(SYS_ID_AA64ISAR2_EL1, ID_AA64ISAR2_RPRES_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_RPRES), {}, }; diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c index e658b7a17d9b8d477e9a0b811cf56928b6748b4b..97dab8f4634f71ac4b884d102fa7bb20bdc4e7e2 100644 --- a/arch/arm64/kernel/cpuinfo.c +++ b/arch/arm64/kernel/cpuinfo.c @@ -95,6 +95,8 @@ static const char *const hwcap_str[] = { [KERNEL_HWCAP_BTI] = "bti", [KERNEL_HWCAP_MTE] = "mte", [KERNEL_HWCAP_ECV] = "ecv", + [KERNEL_HWCAP_AFP] = "afp", + [KERNEL_HWCAP_RPRES] = "rpres", }; #ifdef CONFIG_AARCH32_EL0 diff --git a/arch/arm64/kernel/ipi_nmi.c b/arch/arm64/kernel/ipi_nmi.c index 3b105852fc176c757ca001ba1208b143f2b091fa..2cf28e511b23b4d045c80864d4adf2d0bfb41d84 100644 --- a/arch/arm64/kernel/ipi_nmi.c +++ b/arch/arm64/kernel/ipi_nmi.c @@ -33,12 +33,24 @@ void arm64_send_nmi(cpumask_t *mask) __ipi_send_mask(ipi_nmi_desc, mask); } +static void ipi_cpu_backtrace(void *info) +{ + printk_safe_enter(); + nmi_cpu_backtrace(get_irq_regs()); + printk_safe_exit(); +} + +static void arm64_send_ipi(cpumask_t *mask) +{ + smp_call_function_many(mask, ipi_cpu_backtrace, NULL, false); +} + bool arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self) { if (!ipi_nmi_desc) - return false; - - nmi_trigger_cpumask_backtrace(mask, exclude_self, arm64_send_nmi); + nmi_trigger_cpumask_backtrace(mask, exclude_self, arm64_send_ipi); + else + nmi_trigger_cpumask_backtrace(mask, exclude_self, arm64_send_nmi); return true; } diff --git a/arch/arm64/kernel/livepatch.c b/arch/arm64/kernel/livepatch.c index 4bc35725af36fa907b706b780f6965443a7c0de1..cda56066d85962238ee520657691d1f9ec147ced 100644 --- a/arch/arm64/kernel/livepatch.c +++ b/arch/arm64/kernel/livepatch.c @@ -30,6 +30,7 @@ #include #include #include +#include #include #include #include @@ -67,6 +68,7 @@ struct klp_func_list { struct walk_stackframe_args { int enable; struct klp_func_list *check_funcs; + struct module *mod; int ret; }; @@ -80,16 +82,6 @@ static inline unsigned long klp_size_to_check(unsigned long func_size, return size; } -static inline int klp_compare_address(unsigned long pc, unsigned long func_addr, - const char *func_name, unsigned long check_size) -{ - if (pc >= func_addr && pc < func_addr + check_size) { - pr_err("func %s is in use!\n", func_name); - return -EBUSY; - } - return 0; -} - static bool check_jump_insn(unsigned long func_addr) { unsigned long i; @@ -141,7 +133,7 @@ static int klp_check_activeness_func(struct klp_patch *patch, int enable, for (obj = patch->objs; obj->funcs; obj++) { for (func = obj->funcs; func->old_name; func++) { if (enable) { - if (func->force == KLP_ENFORCEMENT) + if (func->patched || func->force == KLP_ENFORCEMENT) continue; /* * When enable, checking the currently @@ -264,23 +256,11 @@ static void free_list(struct klp_func_list **funcs) } } -int klp_check_calltrace(struct klp_patch *patch, int enable) +static int do_check_calltrace(struct walk_stackframe_args *args, + bool (*fn)(void *, unsigned long)) { struct task_struct *g, *t; struct stackframe frame; - int ret = 0; - struct klp_func_list *check_funcs = NULL; - struct walk_stackframe_args args = { - .enable = enable, - .ret = 0 - }; - - ret = klp_check_activeness_func(patch, enable, &check_funcs); - if (ret) { - pr_err("collect active functions failed, ret=%d\n", ret); - goto out; - } - args.check_funcs = check_funcs; for_each_process_thread(g, t) { /* @@ -293,7 +273,7 @@ int klp_check_calltrace(struct klp_patch *patch, int enable) if (t == current) { /* current on this CPU */ frame.fp = (unsigned long)__builtin_frame_address(0); - frame.pc = (unsigned long)klp_check_calltrace; + frame.pc = (unsigned long)do_check_calltrace; } else if (strncmp(t->comm, "migration/", 10) == 0) { /* * current on other CPU @@ -302,29 +282,109 @@ int klp_check_calltrace(struct klp_patch *patch, int enable) * task_comm here, because we can't get the * cpu_curr(task_cpu(t))). This assumes that no * other thread will pretend to be a stopper via - * task_comm.  + * task_comm. */ continue; } else { frame.fp = thread_saved_fp(t); frame.pc = thread_saved_pc(t); } - if (check_funcs != NULL) { - start_backtrace(&frame, frame.fp, frame.pc); - walk_stackframe(t, &frame, klp_check_jump_func, &args); - if (args.ret) { - ret = args.ret; - pr_info("PID: %d Comm: %.20s\n", t->pid, t->comm); - show_stack(t, NULL, KERN_INFO); - goto out; - } + start_backtrace(&frame, frame.fp, frame.pc); + walk_stackframe(t, &frame, fn, args); + if (args->ret) { + pr_info("PID: %d Comm: %.20s\n", t->pid, t->comm); + show_stack(t, NULL, KERN_INFO); + return args->ret; } } + return 0; +} + +int klp_check_calltrace(struct klp_patch *patch, int enable) +{ + int ret = 0; + struct klp_func_list *check_funcs = NULL; + struct walk_stackframe_args args = { + .enable = enable, + .ret = 0 + }; + ret = klp_check_activeness_func(patch, enable, &check_funcs); + if (ret) { + pr_err("collect active functions failed, ret=%d\n", ret); + goto out; + } + if (!check_funcs) + goto out; + + args.check_funcs = check_funcs; + ret = do_check_calltrace(&args, klp_check_jump_func); out: free_list(&check_funcs); return ret; } + +static bool check_module_calltrace(void *data, unsigned long pc) +{ + struct walk_stackframe_args *args = data; + + if (within_module_core(pc, args->mod)) { + pr_err("module %s is in use!\n", args->mod->name); + args->ret = -EBUSY; + return false; + } + return true; +} + +int arch_klp_module_check_calltrace(void *data) +{ + struct walk_stackframe_args args = { + .mod = (struct module *)data, + .ret = 0 + }; + + return do_check_calltrace(&args, check_module_calltrace); +} + +int arch_klp_add_breakpoint(struct arch_klp_data *arch_data, void *old_func) +{ + u32 insn = BRK64_OPCODE_KLP; + u32 *addr = (u32 *)old_func; + + arch_data->saved_opcode = le32_to_cpu(*addr); + aarch64_insn_patch_text(&old_func, &insn, 1); + return 0; +} + +void arch_klp_remove_breakpoint(struct arch_klp_data *arch_data, void *old_func) +{ + aarch64_insn_patch_text(&old_func, &arch_data->saved_opcode, 1); +} + +static int klp_breakpoint_handler(struct pt_regs *regs, unsigned int esr) +{ + void *brk_func = NULL; + unsigned long addr = instruction_pointer(regs); + + brk_func = klp_get_brk_func((void *)addr); + if (!brk_func) { + pr_warn("Unrecoverable livepatch detected.\n"); + BUG(); + } + + instruction_pointer_set(regs, (unsigned long)brk_func); + return 0; +} + +static struct break_hook klp_break_hook = { + .imm = KLP_BRK_IMM, + .fn = klp_breakpoint_handler, +}; + +void arch_klp_init(void) +{ + register_kernel_break_hook(&klp_break_hook); +} #endif long arch_klp_save_old_code(struct arch_klp_data *arch_data, void *old_func) diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c index d288bb4a138b70a3ec6ffb3dbd54822866347ed9..e5e2f1e888a29239f7a6b729a4d8846d98735e86 100644 --- a/arch/arm64/kernel/signal.c +++ b/arch/arm64/kernel/signal.c @@ -447,10 +447,12 @@ int setup_sigframe_layout(struct rt_sigframe_user_layout *user, bool add_all) { int err; - err = sigframe_alloc(user, &user->fpsimd_offset, - sizeof(struct fpsimd_context)); - if (err) - return err; + if (system_supports_fpsimd()) { + err = sigframe_alloc(user, &user->fpsimd_offset, + sizeof(struct fpsimd_context)); + if (err) + return err; + } /* fault information, if valid */ if (add_all || current->thread.fault_code) { diff --git a/arch/arm64/kvm/vgic/vgic-mmio.c b/arch/arm64/kvm/vgic/vgic-mmio.c index b2d73fc0d1ef48091ee428f915cad6054b3c3c21..9e1459534ce54e074bbc580362fac31c297044f6 100644 --- a/arch/arm64/kvm/vgic/vgic-mmio.c +++ b/arch/arm64/kvm/vgic/vgic-mmio.c @@ -248,6 +248,8 @@ unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu, IRQCHIP_STATE_PENDING, &val); WARN_RATELIMIT(err, "IRQ %d", irq->host_irq); + } else if (vgic_irq_is_mapped_level(irq)) { + val = vgic_get_phys_line_level(irq); } else { val = irq_is_pending(irq); } diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index b19bdd48cc4364b677e63ee68e9b5db7224b3fe4..5023c7e1f7540f153d32f8d107be79939f45df67 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -63,8 +63,34 @@ EXPORT_SYMBOL(memstart_addr); * unless restricted on specific platforms (e.g. 30-bit on Raspberry Pi 4). * In such case, ZONE_DMA32 covers the rest of the 32-bit addressable memory, * otherwise it is empty. + * + * Memory reservation for crash kernel either done early or deferred + * depending on DMA memory zones configs (ZONE_DMA) -- + * + * In absence of ZONE_DMA configs arm64_dma_phys_limit initialized + * here instead of max_zone_phys(). This lets early reservation of + * crash kernel memory which has a dependency on arm64_dma_phys_limit. + * Reserving memory early for crash kernel allows linear creation of block + * mappings (greater than page-granularity) for all the memory bank rangs. + * In this scheme a comparatively quicker boot is observed. + * + * If ZONE_DMA configs are defined, crash kernel memory reservation + * is delayed until DMA zone memory range size initilazation performed in + * zone_sizes_init(). The defer is necessary to steer clear of DMA zone + * memory range to avoid overlap allocation. So crash kernel memory boundaries + * are not known when mapping all bank memory ranges, which otherwise means + * not possible to exclude crash kernel range from creating block mappings + * so page-granularity mappings are created for the entire memory range. + * Hence a slightly slower boot is observed. + * + * Note: Page-granularity mapppings are necessary for crash kernel memory + * range for shrinking its size via /sys/kernel/kexec_crash_size interface. */ -phys_addr_t arm64_dma_phys_limit __ro_after_init; +#if IS_ENABLED(CONFIG_ZONE_DMA) || IS_ENABLED(CONFIG_ZONE_DMA32) +phys_addr_t __ro_after_init arm64_dma_phys_limit; +#else +phys_addr_t __ro_after_init arm64_dma_phys_limit = PHYS_MASK + 1; +#endif #ifndef CONFIG_KEXEC_CORE static void __init reserve_crashkernel(void) @@ -173,8 +199,6 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max) if (!arm64_dma_phys_limit) arm64_dma_phys_limit = dma32_phys_limit; #endif - if (!arm64_dma_phys_limit) - arm64_dma_phys_limit = PHYS_MASK + 1; max_zone_pfns[ZONE_NORMAL] = max; free_area_init(max_zone_pfns); @@ -298,7 +322,27 @@ static void __init reserve_memmap_regions(void) for (i = 0; i < mbk_memmap_cnt; i++) { base = mbk_memmap_regions[i].base; size = mbk_memmap_regions[i].size; - memblock_reserve(base, size); + + if (!memblock_is_region_memory(base, size)) { + pr_warn("memmap reserve: 0x%08llx - 0x%08llx is not a memory region - ignore\n", + base, base + size); + continue; + } + + if (memblock_is_region_reserved(base, size)) { + pr_warn("memmap reserve: 0x%08llx - 0x%08llx overlaps in-use memory region - ignore\n", + base, base + size); + continue; + } + + if (memblock_reserve(base, size)) { + pr_warn("memmap reserve: 0x%08llx - 0x%08llx failed\n", + base, base + size); + continue; + } + + pr_info("memmap reserved: 0x%08llx - 0x%08llx (%lld MB)", + base, base + size, size >> 20); memblock_mark_memmap(base, size); } } @@ -478,6 +522,9 @@ void __init arm64_memblock_init(void) reserve_elfcorehdr(); + if (!IS_ENABLED(CONFIG_ZONE_DMA) && !IS_ENABLED(CONFIG_ZONE_DMA32)) + reserve_crashkernel(); + high_memory = __va(memblock_end_of_DRAM() - 1) + 1; } @@ -533,7 +580,8 @@ void __init bootmem_init(void) * request_standard_resources() depends on crashkernel's memory being * reserved, so do it here. */ - reserve_crashkernel(); + if (IS_ENABLED(CONFIG_ZONE_DMA) || IS_ENABLED(CONFIG_ZONE_DMA32)) + reserve_crashkernel(); reserve_quick_kexec(); diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index c21d911aa755b543af7502a84fa5a96adca2bd38..e767653540407334bc782a5331578933297ca8ad 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -62,6 +62,7 @@ static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss __maybe_unused; static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss __maybe_unused; static DEFINE_SPINLOCK(swapper_pgdir_lock); +static DEFINE_MUTEX(fixmap_lock); void set_swapper_pgd(pgd_t *pgdp, pgd_t pgd) { @@ -315,6 +316,12 @@ static void alloc_init_pud(pgd_t *pgdp, unsigned long addr, unsigned long end, } BUG_ON(p4d_bad(p4d)); + /* + * No need for locking during early boot. And it doesn't work as + * expected with KASLR enabled. + */ + if (system_state != SYSTEM_BOOTING) + mutex_lock(&fixmap_lock); pudp = pud_set_fixmap_offset(p4dp, addr); do { pud_t old_pud = READ_ONCE(*pudp); @@ -345,6 +352,8 @@ static void alloc_init_pud(pgd_t *pgdp, unsigned long addr, unsigned long end, } while (pudp++, addr = next, addr != end); pud_clear_fixmap(); + if (system_state != SYSTEM_BOOTING) + mutex_unlock(&fixmap_lock); } static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys, @@ -553,17 +562,6 @@ static void __init map_mem(pgd_t *pgdp) PAGE_KERNEL, NO_CONT_MAPPINGS); memblock_clear_nomap(kernel_start, kernel_end - kernel_start); -#ifdef CONFIG_KEXEC_CORE - if (crashk_res.end) { - __map_memblock(pgdp, crashk_res.start, - crashk_res.end + 1, - PAGE_KERNEL, - NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS); - memblock_clear_nomap(crashk_res.start, - resource_size(&crashk_res)); - } -#endif - #ifdef CONFIG_KFENCE /* * Map the __kfence_pool at page granularity now. @@ -576,6 +574,22 @@ static void __init map_mem(pgd_t *pgdp) memblock_clear_nomap(__pa(__kfence_pool), KFENCE_POOL_SIZE); } #endif + + /* + * Use page-level mappings here so that we can shrink the region + * in page granularity and put back unused memory to buddy system + * through /sys/kernel/kexec_crash_size interface. + */ +#ifdef CONFIG_KEXEC_CORE + if (crashk_res.end) { + __map_memblock(pgdp, crashk_res.start, + crashk_res.end + 1, + PAGE_KERNEL, + NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS); + memblock_clear_nomap(crashk_res.start, + resource_size(&crashk_res)); + } +#endif } void mark_rodata_ro(void) diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c index 064577ff9ff591983209225ac7d89e584de5394d..9c6cab71ba98b4c859f918ea10cd18f29ab84f7b 100644 --- a/arch/arm64/net/bpf_jit_comp.c +++ b/arch/arm64/net/bpf_jit_comp.c @@ -1040,15 +1040,18 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) goto out_off; } - /* 1. Initial fake pass to compute ctx->idx. */ - - /* Fake pass to fill in ctx->offset. */ - if (build_body(&ctx, extra_pass)) { + /* + * 1. Initial fake pass to compute ctx->idx and ctx->offset. + * + * BPF line info needs ctx->offset[i] to be the offset of + * instruction[i] in jited image, so build prologue first. + */ + if (build_prologue(&ctx, was_classic)) { prog = orig_prog; goto out_off; } - if (build_prologue(&ctx, was_classic)) { + if (build_body(&ctx, extra_pass)) { prog = orig_prog; goto out_off; } @@ -1121,6 +1124,11 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) prog->jited_len = prog_size; if (!prog->is_func || extra_pass) { + int i; + + /* offset[prog->len] is the size of program */ + for (i = 0; i <= prog->len; i++) + ctx.offset[i] *= AARCH64_INSN_SIZE; bpf_prog_fill_jited_linfo(prog, ctx.offset + 1); out_off: kfree(ctx.offset); diff --git a/arch/csky/kernel/perf_callchain.c b/arch/csky/kernel/perf_callchain.c index 35318a635a5fae7b6de24723f6f43338bab85b67..75e1f9df5f60449c6ec65c3e97ed3bd5ca2b07e2 100644 --- a/arch/csky/kernel/perf_callchain.c +++ b/arch/csky/kernel/perf_callchain.c @@ -49,7 +49,7 @@ static unsigned long user_backtrace(struct perf_callchain_entry_ctx *entry, { struct stackframe buftail; unsigned long lr = 0; - unsigned long *user_frame_tail = (unsigned long *)fp; + unsigned long __user *user_frame_tail = (unsigned long __user *)fp; /* Check accessibility of one struct frame_tail beyond */ if (!access_ok(user_frame_tail, sizeof(buftail))) diff --git a/arch/csky/kernel/signal.c b/arch/csky/kernel/signal.c index 0ca49b5e3dd378145dda5f3fbbf52b5587ed0275..243228b0aa075e4d9862905afeb88824f924f265 100644 --- a/arch/csky/kernel/signal.c +++ b/arch/csky/kernel/signal.c @@ -136,7 +136,7 @@ static inline void __user *get_sigframe(struct ksignal *ksig, static int setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs) { - struct rt_sigframe *frame; + struct rt_sigframe __user *frame; int err = 0; struct csky_vdso *vdso = current->mm->context.vdso; diff --git a/arch/m68k/coldfire/device.c b/arch/m68k/coldfire/device.c index 59f7dfe50a4d011b437c69c4db5ed440cbe11545..a055616942a1eee821fb7207f9558856fbac8896 100644 --- a/arch/m68k/coldfire/device.c +++ b/arch/m68k/coldfire/device.c @@ -480,7 +480,7 @@ static struct platform_device mcf_i2c5 = { #endif /* MCFI2C_BASE5 */ #endif /* IS_ENABLED(CONFIG_I2C_IMX) */ -#if IS_ENABLED(CONFIG_MCF_EDMA) +#ifdef MCFEDMA_BASE static const struct dma_slave_map mcf_edma_map[] = { { "dreq0", "rx-tx", MCF_EDMA_FILTER_PARAM(0) }, @@ -552,7 +552,7 @@ static struct platform_device mcf_edma = { .platform_data = &mcf_edma_data, } }; -#endif /* IS_ENABLED(CONFIG_MCF_EDMA) */ +#endif /* MCFEDMA_BASE */ #ifdef MCFSDHC_BASE static struct mcf_esdhc_platform_data mcf_esdhc_data = { @@ -610,7 +610,7 @@ static struct platform_device *mcf_devices[] __initdata = { &mcf_i2c5, #endif #endif -#if IS_ENABLED(CONFIG_MCF_EDMA) +#ifdef MCFEDMA_BASE &mcf_edma, #endif #ifdef MCFSDHC_BASE diff --git a/arch/microblaze/include/asm/uaccess.h b/arch/microblaze/include/asm/uaccess.h index 304b04ffea2faf4104cab64cfba5036ad010abf3..7c5d92e2915ca585ec076644770b22986a477aee 100644 --- a/arch/microblaze/include/asm/uaccess.h +++ b/arch/microblaze/include/asm/uaccess.h @@ -167,27 +167,27 @@ extern long __user_bad(void); #define __get_user(x, ptr) \ ({ \ - unsigned long __gu_val = 0; \ long __gu_err; \ switch (sizeof(*(ptr))) { \ case 1: \ - __get_user_asm("lbu", (ptr), __gu_val, __gu_err); \ + __get_user_asm("lbu", (ptr), x, __gu_err); \ break; \ case 2: \ - __get_user_asm("lhu", (ptr), __gu_val, __gu_err); \ + __get_user_asm("lhu", (ptr), x, __gu_err); \ break; \ case 4: \ - __get_user_asm("lw", (ptr), __gu_val, __gu_err); \ + __get_user_asm("lw", (ptr), x, __gu_err); \ break; \ - case 8: \ - __gu_err = __copy_from_user(&__gu_val, ptr, 8); \ - if (__gu_err) \ - __gu_err = -EFAULT; \ + case 8: { \ + __u64 __x = 0; \ + __gu_err = raw_copy_from_user(&__x, ptr, 8) ? \ + -EFAULT : 0; \ + (x) = (typeof(x))(typeof((x) - (x)))__x; \ break; \ + } \ default: \ /* __gu_val = 0; __gu_err = -EINVAL;*/ __gu_err = __user_bad();\ } \ - x = (__force __typeof__(*(ptr))) __gu_val; \ __gu_err; \ }) diff --git a/arch/mips/dec/int-handler.S b/arch/mips/dec/int-handler.S index ea5b5a83f1e11b82fea00297ef50d966950a2d37..011d1d678840aa513166e0cfbf209bb975dc6a90 100644 --- a/arch/mips/dec/int-handler.S +++ b/arch/mips/dec/int-handler.S @@ -131,7 +131,7 @@ */ mfc0 t0,CP0_CAUSE # get pending interrupts mfc0 t1,CP0_STATUS -#ifdef CONFIG_32BIT +#if defined(CONFIG_32BIT) && defined(CONFIG_MIPS_FP_SUPPORT) lw t2,cpu_fpu_mask #endif andi t0,ST0_IM # CAUSE.CE may be non-zero! @@ -139,7 +139,7 @@ beqz t0,spurious -#ifdef CONFIG_32BIT +#if defined(CONFIG_32BIT) && defined(CONFIG_MIPS_FP_SUPPORT) and t2,t0 bnez t2,fpu # handle FPU immediately #endif @@ -280,7 +280,7 @@ handle_it: j dec_irq_dispatch nop -#ifdef CONFIG_32BIT +#if defined(CONFIG_32BIT) && defined(CONFIG_MIPS_FP_SUPPORT) fpu: lw t0,fpu_kstat_irq nop diff --git a/arch/mips/dec/prom/Makefile b/arch/mips/dec/prom/Makefile index d95016016b42bef365d7b8bb6348888df9e386f2..2bad87551203b2714529f7bbd38a832bf2b30a1e 100644 --- a/arch/mips/dec/prom/Makefile +++ b/arch/mips/dec/prom/Makefile @@ -6,4 +6,4 @@ lib-y += init.o memory.o cmdline.o identify.o console.o -lib-$(CONFIG_32BIT) += locore.o +lib-$(CONFIG_CPU_R3000) += locore.o diff --git a/arch/mips/dec/setup.c b/arch/mips/dec/setup.c index eaad0ed4b523bbc0ee70c58c994f91a9c59d0818..99b9b29750db3cd116467991a823777e8bada15c 100644 --- a/arch/mips/dec/setup.c +++ b/arch/mips/dec/setup.c @@ -746,7 +746,8 @@ void __init arch_init_irq(void) dec_interrupt[DEC_IRQ_HALT] = -1; /* Register board interrupts: FPU and cascade. */ - if (dec_interrupt[DEC_IRQ_FPU] >= 0 && cpu_has_fpu) { + if (IS_ENABLED(CONFIG_MIPS_FP_SUPPORT) && + dec_interrupt[DEC_IRQ_FPU] >= 0 && cpu_has_fpu) { struct irq_desc *desc_fpu; int irq_fpu; diff --git a/arch/mips/include/asm/dec/prom.h b/arch/mips/include/asm/dec/prom.h index 62c7dfb90e06c35afa168fa999e408f3b240004d..1e1247add1cf802b9abe3a5216b3a785f83e5988 100644 --- a/arch/mips/include/asm/dec/prom.h +++ b/arch/mips/include/asm/dec/prom.h @@ -43,16 +43,11 @@ */ #define REX_PROM_MAGIC 0x30464354 -#ifdef CONFIG_64BIT - -#define prom_is_rex(magic) 1 /* KN04 and KN05 are REX PROMs. */ - -#else /* !CONFIG_64BIT */ - -#define prom_is_rex(magic) ((magic) == REX_PROM_MAGIC) - -#endif /* !CONFIG_64BIT */ - +/* KN04 and KN05 are REX PROMs, so only do the check for R3k systems. */ +static inline bool prom_is_rex(u32 magic) +{ + return !IS_ENABLED(CONFIG_CPU_R3000) || magic == REX_PROM_MAGIC; +} /* * 3MIN/MAXINE PROM entry points for DS5000/1xx's, DS5000/xx's and diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h index 139b4050259fab5b6971f3c0d153cb66b288e887..71153c369f2948ec25ea296bd410468bc2960d6f 100644 --- a/arch/mips/include/asm/pgalloc.h +++ b/arch/mips/include/asm/pgalloc.h @@ -15,6 +15,7 @@ #define __HAVE_ARCH_PMD_ALLOC_ONE #define __HAVE_ARCH_PUD_ALLOC_ONE +#define __HAVE_ARCH_PGD_FREE #include static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, @@ -49,6 +50,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) extern void pgd_init(unsigned long page); extern pgd_t *pgd_alloc(struct mm_struct *mm); +static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) +{ + free_pages((unsigned long)pgd, PGD_ORDER); +} + #define __pte_free_tlb(tlb,pte,address) \ do { \ pgtable_pte_page_dtor(pte); \ diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index 535eb49e5904a9380ac7e3dbeda77b5644a251b5..b258dc96841a380d38d6a113a60758427e800cc1 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c @@ -351,6 +351,9 @@ asmlinkage void start_secondary(void) cpu = smp_processor_id(); cpu_data[cpu].udelay_val = loops_per_jiffy; + set_cpu_sibling_map(cpu); + set_cpu_core_map(cpu); + cpumask_set_cpu(cpu, &cpu_coherent_mask); notify_cpu_starting(cpu); @@ -362,9 +365,6 @@ asmlinkage void start_secondary(void) /* The CPU is running and counters synchronised, now mark it online */ set_cpu_online(cpu, true); - set_cpu_sibling_map(cpu); - set_cpu_core_map(cpu); - calculate_cpu_foreign_map(); /* diff --git a/arch/mips/rb532/devices.c b/arch/mips/rb532/devices.c index dd34f1b32b7976ca316f50235448788d20b49160..0e3c8d761a451b7d67239107b72fa86b32b60d9b 100644 --- a/arch/mips/rb532/devices.c +++ b/arch/mips/rb532/devices.c @@ -310,11 +310,9 @@ static int __init plat_setup_devices(void) static int __init setup_kmac(char *s) { printk(KERN_INFO "korina mac = %s\n", s); - if (!mac_pton(s, korina_dev0_data.mac)) { + if (!mac_pton(s, korina_dev0_data.mac)) printk(KERN_ERR "Invalid mac\n"); - return -EINVAL; - } - return 0; + return 1; } __setup("kmac=", setup_kmac); diff --git a/arch/nds32/include/asm/uaccess.h b/arch/nds32/include/asm/uaccess.h index 010ba5f1d7dd6b45445e51faef8a1b0809d2f3a6..54500e81efe59a040531fc863b2658b5cb7981f4 100644 --- a/arch/nds32/include/asm/uaccess.h +++ b/arch/nds32/include/asm/uaccess.h @@ -70,9 +70,7 @@ static inline void set_fs(mm_segment_t fs) * versions are void (ie, don't return a value as such). */ -#define get_user __get_user \ - -#define __get_user(x, ptr) \ +#define get_user(x, ptr) \ ({ \ long __gu_err = 0; \ __get_user_check((x), (ptr), __gu_err); \ @@ -85,6 +83,14 @@ static inline void set_fs(mm_segment_t fs) (void)0; \ }) +#define __get_user(x, ptr) \ +({ \ + long __gu_err = 0; \ + const __typeof__(*(ptr)) __user *__p = (ptr); \ + __get_user_err((x), __p, (__gu_err)); \ + __gu_err; \ +}) + #define __get_user_check(x, ptr, err) \ ({ \ const __typeof__(*(ptr)) __user *__p = (ptr); \ @@ -165,12 +171,18 @@ do { \ : "r"(addr), "i"(-EFAULT) \ : "cc") -#define put_user __put_user \ +#define put_user(x, ptr) \ +({ \ + long __pu_err = 0; \ + __put_user_check((x), (ptr), __pu_err); \ + __pu_err; \ +}) #define __put_user(x, ptr) \ ({ \ long __pu_err = 0; \ - __put_user_err((x), (ptr), __pu_err); \ + __typeof__(*(ptr)) __user *__p = (ptr); \ + __put_user_err((x), __p, __pu_err); \ __pu_err; \ }) diff --git a/arch/nios2/include/asm/uaccess.h b/arch/nios2/include/asm/uaccess.h index a741abbed6fbf5ae8738730e4abeebe6c1627cb0..8a386e6c07df19905d7dd7413d75a11f7818be58 100644 --- a/arch/nios2/include/asm/uaccess.h +++ b/arch/nios2/include/asm/uaccess.h @@ -89,6 +89,7 @@ extern __must_check long strnlen_user(const char __user *s, long n); /* Optimized macros */ #define __get_user_asm(val, insn, addr, err) \ { \ + unsigned long __gu_val; \ __asm__ __volatile__( \ " movi %0, %3\n" \ "1: " insn " %1, 0(%2)\n" \ @@ -97,14 +98,20 @@ extern __must_check long strnlen_user(const char __user *s, long n); " .section __ex_table,\"a\"\n" \ " .word 1b, 2b\n" \ " .previous" \ - : "=&r" (err), "=r" (val) \ + : "=&r" (err), "=r" (__gu_val) \ : "r" (addr), "i" (-EFAULT)); \ + val = (__force __typeof__(*(addr)))__gu_val; \ } -#define __get_user_unknown(val, size, ptr, err) do { \ +extern void __get_user_unknown(void); + +#define __get_user_8(val, ptr, err) do { \ + u64 __val = 0; \ err = 0; \ - if (__copy_from_user(&(val), ptr, size)) { \ + if (raw_copy_from_user(&(__val), ptr, sizeof(val))) { \ err = -EFAULT; \ + } else { \ + val = (typeof(val))(typeof((val) - (val)))__val; \ } \ } while (0) @@ -120,8 +127,11 @@ do { \ case 4: \ __get_user_asm(val, "ldw", ptr, err); \ break; \ + case 8: \ + __get_user_8(val, ptr, err); \ + break; \ default: \ - __get_user_unknown(val, size, ptr, err); \ + __get_user_unknown(); \ break; \ } \ } while (0) @@ -130,9 +140,7 @@ do { \ ({ \ long __gu_err = -EFAULT; \ const __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \ - unsigned long __gu_val = 0; \ - __get_user_common(__gu_val, sizeof(*(ptr)), __gu_ptr, __gu_err);\ - (x) = (__force __typeof__(x))__gu_val; \ + __get_user_common(x, sizeof(*(ptr)), __gu_ptr, __gu_err); \ __gu_err; \ }) @@ -140,11 +148,9 @@ do { \ ({ \ long __gu_err = -EFAULT; \ const __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \ - unsigned long __gu_val = 0; \ if (access_ok( __gu_ptr, sizeof(*__gu_ptr))) \ - __get_user_common(__gu_val, sizeof(*__gu_ptr), \ + __get_user_common(x, sizeof(*__gu_ptr), \ __gu_ptr, __gu_err); \ - (x) = (__force __typeof__(x))__gu_val; \ __gu_err; \ }) diff --git a/arch/nios2/kernel/signal.c b/arch/nios2/kernel/signal.c index cf2dca2ac7c37d8f49af4a0a725a03509cd37629..e45491d1d3e4425c6c6d288959f64b46773f1360 100644 --- a/arch/nios2/kernel/signal.c +++ b/arch/nios2/kernel/signal.c @@ -36,10 +36,10 @@ struct rt_sigframe { static inline int rt_restore_ucontext(struct pt_regs *regs, struct switch_stack *sw, - struct ucontext *uc, int *pr2) + struct ucontext __user *uc, int *pr2) { int temp; - unsigned long *gregs = uc->uc_mcontext.gregs; + unsigned long __user *gregs = uc->uc_mcontext.gregs; int err; /* Always make any pending restarted system calls return -EINTR */ @@ -102,10 +102,11 @@ asmlinkage int do_rt_sigreturn(struct switch_stack *sw) { struct pt_regs *regs = (struct pt_regs *)(sw + 1); /* Verify, can we follow the stack back */ - struct rt_sigframe *frame = (struct rt_sigframe *) regs->sp; + struct rt_sigframe __user *frame; sigset_t set; int rval; + frame = (struct rt_sigframe __user *) regs->sp; if (!access_ok(frame, sizeof(*frame))) goto badframe; @@ -124,10 +125,10 @@ asmlinkage int do_rt_sigreturn(struct switch_stack *sw) return 0; } -static inline int rt_setup_ucontext(struct ucontext *uc, struct pt_regs *regs) +static inline int rt_setup_ucontext(struct ucontext __user *uc, struct pt_regs *regs) { struct switch_stack *sw = (struct switch_stack *)regs - 1; - unsigned long *gregs = uc->uc_mcontext.gregs; + unsigned long __user *gregs = uc->uc_mcontext.gregs; int err = 0; err |= __put_user(MCONTEXT_VERSION, &uc->uc_mcontext.version); @@ -162,8 +163,9 @@ static inline int rt_setup_ucontext(struct ucontext *uc, struct pt_regs *regs) return err; } -static inline void *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, - size_t frame_size) +static inline void __user *get_sigframe(struct ksignal *ksig, + struct pt_regs *regs, + size_t frame_size) { unsigned long usp; @@ -174,13 +176,13 @@ static inline void *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, usp = sigsp(usp, ksig); /* Verify, is it 32 or 64 bit aligned */ - return (void *)((usp - frame_size) & -8UL); + return (void __user *)((usp - frame_size) & -8UL); } static int setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs) { - struct rt_sigframe *frame; + struct rt_sigframe __user *frame; int err = 0; frame = get_sigframe(ksig, regs, sizeof(*frame)); diff --git a/arch/parisc/include/asm/traps.h b/arch/parisc/include/asm/traps.h index 8ecc1f0c0483d5a4a60dd44c08b6e49b9d3b776e..d0e090a2c000da4a94ee55fa84e2956ae96c3821 100644 --- a/arch/parisc/include/asm/traps.h +++ b/arch/parisc/include/asm/traps.h @@ -17,6 +17,7 @@ void die_if_kernel(char *str, struct pt_regs *regs, long err); const char *trap_name(unsigned long code); void do_page_fault(struct pt_regs *regs, unsigned long code, unsigned long address); +int handle_nadtlb_fault(struct pt_regs *regs); #endif #endif diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c index 269b737d26299cf09dda74abb22266f457df0766..bce47e0fb692cb77cfc803d95b41939d2d8799c0 100644 --- a/arch/parisc/kernel/traps.c +++ b/arch/parisc/kernel/traps.c @@ -661,6 +661,8 @@ void notrace handle_interruption(int code, struct pt_regs *regs) by hand. Technically we need to emulate: fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw */ + if (code == 17 && handle_nadtlb_fault(regs)) + return; fault_address = regs->ior; fault_space = regs->isr; break; diff --git a/arch/parisc/kernel/unaligned.c b/arch/parisc/kernel/unaligned.c index 237d20dd5622de53fb207ec5888f972c7237a9e3..286cec4d86d7b0165a7b81ffcfdc48512a568acf 100644 --- a/arch/parisc/kernel/unaligned.c +++ b/arch/parisc/kernel/unaligned.c @@ -340,7 +340,7 @@ static int emulate_stw(struct pt_regs *regs, int frreg, int flop) : "r" (val), "r" (regs->ior), "r" (regs->isr) : "r19", "r20", "r21", "r22", "r1", FIXUP_BRANCH_CLOBBER ); - return 0; + return ret; } static int emulate_std(struct pt_regs *regs, int frreg, int flop) { @@ -397,7 +397,7 @@ static int emulate_std(struct pt_regs *regs, int frreg, int flop) __asm__ __volatile__ ( " mtsp %4, %%sr1\n" " zdep %2, 29, 2, %%r19\n" -" dep %%r0, 31, 2, %2\n" +" dep %%r0, 31, 2, %3\n" " mtsar %%r19\n" " zvdepi -2, 32, %%r19\n" "1: ldw 0(%%sr1,%3),%%r20\n" @@ -409,7 +409,7 @@ static int emulate_std(struct pt_regs *regs, int frreg, int flop) " andcm %%r21, %%r19, %%r21\n" " or %1, %%r20, %1\n" " or %2, %%r21, %2\n" -"3: stw %1,0(%%sr1,%1)\n" +"3: stw %1,0(%%sr1,%3)\n" "4: stw %%r1,4(%%sr1,%3)\n" "5: stw %2,8(%%sr1,%3)\n" " copy %%r0, %0\n" @@ -596,7 +596,6 @@ void handle_unaligned(struct pt_regs *regs) ret = ERR_NOTHANDLED; /* "undefined", but lets kill them. */ break; } -#ifdef CONFIG_PA20 switch (regs->iir & OPCODE2_MASK) { case OPCODE_FLDD_L: @@ -607,22 +606,23 @@ void handle_unaligned(struct pt_regs *regs) flop=1; ret = emulate_std(regs, R2(regs->iir),1); break; +#ifdef CONFIG_PA20 case OPCODE_LDD_L: ret = emulate_ldd(regs, R2(regs->iir),0); break; case OPCODE_STD_L: ret = emulate_std(regs, R2(regs->iir),0); break; - } #endif + } switch (regs->iir & OPCODE3_MASK) { case OPCODE_FLDW_L: flop=1; - ret = emulate_ldw(regs, R2(regs->iir),0); + ret = emulate_ldw(regs, R2(regs->iir), 1); break; case OPCODE_LDW_M: - ret = emulate_ldw(regs, R2(regs->iir),1); + ret = emulate_ldw(regs, R2(regs->iir), 0); break; case OPCODE_FSTW_L: diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c index 716960f5d92ea460a4b1084954e7bb69ac6cc476..5faa3cff47387ac289fe5792e260c4b1bde0bd2b 100644 --- a/arch/parisc/mm/fault.c +++ b/arch/parisc/mm/fault.c @@ -424,3 +424,92 @@ void do_page_fault(struct pt_regs *regs, unsigned long code, goto no_context; pagefault_out_of_memory(); } + +/* Handle non-access data TLB miss faults. + * + * For probe instructions, accesses to userspace are considered allowed + * if they lie in a valid VMA and the access type matches. We are not + * allowed to handle MM faults here so there may be situations where an + * actual access would fail even though a probe was successful. + */ +int +handle_nadtlb_fault(struct pt_regs *regs) +{ + unsigned long insn = regs->iir; + int breg, treg, xreg, val = 0; + struct vm_area_struct *vma, *prev_vma; + struct task_struct *tsk; + struct mm_struct *mm; + unsigned long address; + unsigned long acc_type; + + switch (insn & 0x380) { + case 0x280: + /* FDC instruction */ + fallthrough; + case 0x380: + /* PDC and FIC instructions */ + if (printk_ratelimit()) { + pr_warn("BUG: nullifying cache flush/purge instruction\n"); + show_regs(regs); + } + if (insn & 0x20) { + /* Base modification */ + breg = (insn >> 21) & 0x1f; + xreg = (insn >> 16) & 0x1f; + if (breg && xreg) + regs->gr[breg] += regs->gr[xreg]; + } + regs->gr[0] |= PSW_N; + return 1; + + case 0x180: + /* PROBE instruction */ + treg = insn & 0x1f; + if (regs->isr) { + tsk = current; + mm = tsk->mm; + if (mm) { + /* Search for VMA */ + address = regs->ior; + mmap_read_lock(mm); + vma = find_vma_prev(mm, address, &prev_vma); + mmap_read_unlock(mm); + + /* + * Check if access to the VMA is okay. + * We don't allow for stack expansion. + */ + acc_type = (insn & 0x40) ? VM_WRITE : VM_READ; + if (vma + && address >= vma->vm_start + && (vma->vm_flags & acc_type) == acc_type) + val = 1; + } + } + if (treg) + regs->gr[treg] = val; + regs->gr[0] |= PSW_N; + return 1; + + case 0x300: + /* LPA instruction */ + if (insn & 0x20) { + /* Base modification */ + breg = (insn >> 21) & 0x1f; + xreg = (insn >> 16) & 0x1f; + if (breg && xreg) + regs->gr[breg] += regs->gr[xreg]; + } + treg = insn & 0x1f; + if (treg) + regs->gr[treg] = 0; + regs->gr[0] |= PSW_N; + return 1; + + default: + break; + } + + return 0; +} diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile index 35bf7634e095ec34071c5b2d0bbe2b4e6de73d87..a7c353ea01665c5cacc4a7947c5579888c407daa 100644 --- a/arch/powerpc/Makefile +++ b/arch/powerpc/Makefile @@ -172,7 +172,7 @@ else CFLAGS-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=power7,$(call cc-option,-mtune=power5)) CFLAGS-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mcpu=power5,-mcpu=power4) endif -else +else ifdef CONFIG_PPC_BOOK3E_64 CFLAGS-$(CONFIG_GENERIC_CPU) += -mcpu=powerpc64 endif diff --git a/arch/powerpc/boot/dts/fsl/t1040rdb-rev-a.dts b/arch/powerpc/boot/dts/fsl/t1040rdb-rev-a.dts new file mode 100644 index 0000000000000000000000000000000000000000..73f8c998c64dfefa6859cd5bcda8ddafcdc51f38 --- /dev/null +++ b/arch/powerpc/boot/dts/fsl/t1040rdb-rev-a.dts @@ -0,0 +1,30 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * T1040RDB-REV-A Device Tree Source + * + * Copyright 2014 - 2015 Freescale Semiconductor Inc. + * + */ + +#include "t1040rdb.dts" + +/ { + model = "fsl,T1040RDB-REV-A"; + compatible = "fsl,T1040RDB-REV-A"; +}; + +&seville_port0 { + label = "ETH5"; +}; + +&seville_port2 { + label = "ETH7"; +}; + +&seville_port4 { + label = "ETH9"; +}; + +&seville_port6 { + label = "ETH11"; +}; diff --git a/arch/powerpc/boot/dts/fsl/t1040rdb.dts b/arch/powerpc/boot/dts/fsl/t1040rdb.dts index af0c8a6f561385ce9fcf9f24a4effccf9df21fe2..b6733e7e65805e47fc52d345040dd190ca0ea2e7 100644 --- a/arch/powerpc/boot/dts/fsl/t1040rdb.dts +++ b/arch/powerpc/boot/dts/fsl/t1040rdb.dts @@ -119,7 +119,7 @@ &seville_port0 { managed = "in-band-status"; phy-handle = <&phy_qsgmii_0>; phy-mode = "qsgmii"; - label = "ETH5"; + label = "ETH3"; status = "okay"; }; @@ -135,7 +135,7 @@ &seville_port2 { managed = "in-band-status"; phy-handle = <&phy_qsgmii_2>; phy-mode = "qsgmii"; - label = "ETH7"; + label = "ETH5"; status = "okay"; }; @@ -151,7 +151,7 @@ &seville_port4 { managed = "in-band-status"; phy-handle = <&phy_qsgmii_4>; phy-mode = "qsgmii"; - label = "ETH9"; + label = "ETH7"; status = "okay"; }; @@ -167,7 +167,7 @@ &seville_port6 { managed = "in-band-status"; phy-handle = <&phy_qsgmii_6>; phy-mode = "qsgmii"; - label = "ETH11"; + label = "ETH9"; status = "okay"; }; diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h index 58635960403c058b6c0d29f5e7b77c015c004303..0182b291248ace8e4cb2bbbe7ce3422524de322c 100644 --- a/arch/powerpc/include/asm/io.h +++ b/arch/powerpc/include/asm/io.h @@ -344,25 +344,37 @@ static inline void __raw_writeq_be(unsigned long v, volatile void __iomem *addr) */ static inline void __raw_rm_writeb(u8 val, volatile void __iomem *paddr) { - __asm__ __volatile__("stbcix %0,0,%1" + __asm__ __volatile__(".machine push; \ + .machine power6; \ + stbcix %0,0,%1; \ + .machine pop;" : : "r" (val), "r" (paddr) : "memory"); } static inline void __raw_rm_writew(u16 val, volatile void __iomem *paddr) { - __asm__ __volatile__("sthcix %0,0,%1" + __asm__ __volatile__(".machine push; \ + .machine power6; \ + sthcix %0,0,%1; \ + .machine pop;" : : "r" (val), "r" (paddr) : "memory"); } static inline void __raw_rm_writel(u32 val, volatile void __iomem *paddr) { - __asm__ __volatile__("stwcix %0,0,%1" + __asm__ __volatile__(".machine push; \ + .machine power6; \ + stwcix %0,0,%1; \ + .machine pop;" : : "r" (val), "r" (paddr) : "memory"); } static inline void __raw_rm_writeq(u64 val, volatile void __iomem *paddr) { - __asm__ __volatile__("stdcix %0,0,%1" + __asm__ __volatile__(".machine push; \ + .machine power6; \ + stdcix %0,0,%1; \ + .machine pop;" : : "r" (val), "r" (paddr) : "memory"); } @@ -374,7 +386,10 @@ static inline void __raw_rm_writeq_be(u64 val, volatile void __iomem *paddr) static inline u8 __raw_rm_readb(volatile void __iomem *paddr) { u8 ret; - __asm__ __volatile__("lbzcix %0,0, %1" + __asm__ __volatile__(".machine push; \ + .machine power6; \ + lbzcix %0,0, %1; \ + .machine pop;" : "=r" (ret) : "r" (paddr) : "memory"); return ret; } @@ -382,7 +397,10 @@ static inline u8 __raw_rm_readb(volatile void __iomem *paddr) static inline u16 __raw_rm_readw(volatile void __iomem *paddr) { u16 ret; - __asm__ __volatile__("lhzcix %0,0, %1" + __asm__ __volatile__(".machine push; \ + .machine power6; \ + lhzcix %0,0, %1; \ + .machine pop;" : "=r" (ret) : "r" (paddr) : "memory"); return ret; } @@ -390,7 +408,10 @@ static inline u16 __raw_rm_readw(volatile void __iomem *paddr) static inline u32 __raw_rm_readl(volatile void __iomem *paddr) { u32 ret; - __asm__ __volatile__("lwzcix %0,0, %1" + __asm__ __volatile__(".machine push; \ + .machine power6; \ + lwzcix %0,0, %1; \ + .machine pop;" : "=r" (ret) : "r" (paddr) : "memory"); return ret; } @@ -398,7 +419,10 @@ static inline u32 __raw_rm_readl(volatile void __iomem *paddr) static inline u64 __raw_rm_readq(volatile void __iomem *paddr) { u64 ret; - __asm__ __volatile__("ldcix %0,0, %1" + __asm__ __volatile__(".machine push; \ + .machine power6; \ + ldcix %0,0, %1; \ + .machine pop;" : "=r" (ret) : "r" (paddr) : "memory"); return ret; } diff --git a/arch/powerpc/include/asm/livepatch.h b/arch/powerpc/include/asm/livepatch.h index fea12c6b915cf190cc5e78dfad0bc103546f0035..bafbfaba190fbe5a5d013a03db00eb41f53a6a18 100644 --- a/arch/powerpc/include/asm/livepatch.h +++ b/arch/powerpc/include/asm/livepatch.h @@ -75,6 +75,11 @@ extern void livepatch_branch_stub_end(void); #ifdef PPC64_ELF_ABI_v1 extern void livepatch_branch_trampoline(void); extern void livepatch_branch_trampoline_end(void); +extern void livepatch_brk_trampoline(void); +void livepatch_create_btramp(struct ppc64_klp_btramp_entry *entry, unsigned long addr); +#else +static inline void livepatch_create_btramp(struct ppc64_klp_btramp_entry *entry, + unsigned long addr) {} #endif /* PPC64_ELF_ABI_v1 */ int livepatch_create_branch(unsigned long pc, @@ -89,6 +94,12 @@ struct arch_klp_data { #else unsigned long trampoline; #endif /* PPC64_ELF_ABI_v1 */ + + /* + * Saved opcode at the entry of the old func (which maybe replaced + * with breakpoint). + */ + u32 saved_opcode; }; #elif defined(CONFIG_PPC32) @@ -97,11 +108,25 @@ struct arch_klp_data { #define LJMP_INSN_SIZE 4 struct arch_klp_data { u32 old_insns[LJMP_INSN_SIZE]; + + /* + * Saved opcode at the entry of the old func (which maybe replaced + * with breakpoint). + */ + u32 saved_opcode; }; #endif /* CONFIG_PPC64 */ +#ifdef PPC64_ELF_ABI_v1 +struct klp_func_node; +void arch_klp_set_brk_func(struct klp_func_node *func_node, void *new_func); +#endif +int klp_brk_handler(struct pt_regs *regs); +int arch_klp_add_breakpoint(struct arch_klp_data *arch_data, void *old_func); +void arch_klp_remove_breakpoint(struct arch_klp_data *arch_data, void *old_func); long arch_klp_save_old_code(struct arch_klp_data *arch_data, void *old_func); +int arch_klp_module_check_calltrace(void *data); #endif /* CONFIG_LIVEPATCH_FTRACE */ diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h index f53bfefb4a5773e15d2b36d4b9ee0bb3046e0831..6b808bcdecd52d4fb143ef77a319fa42e612ed2b 100644 --- a/arch/powerpc/include/asm/uaccess.h +++ b/arch/powerpc/include/asm/uaccess.h @@ -229,8 +229,11 @@ extern long __get_user_bad(void); */ #define __get_user_atomic_128_aligned(kaddr, uaddr, err) \ __asm__ __volatile__( \ + ".machine push\n" \ + ".machine altivec\n" \ "1: lvx 0,0,%1 # get user\n" \ " stvx 0,0,%2 # put kernel\n" \ + ".machine pop\n" \ "2:\n" \ ".section .fixup,\"ax\"\n" \ "3: li %0,%3\n" \ diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index 4b6720e8163292c73abb0fcb297ecc03ab0fe6ec..32c617ba6901a992677fce86cc586c45d5c2929c 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -95,7 +95,7 @@ obj-$(CONFIG_44x) += cpu_setup_44x.o obj-$(CONFIG_PPC_FSL_BOOK3E) += cpu_setup_fsl_booke.o obj-$(CONFIG_PPC_DOORBELL) += dbell.o obj-$(CONFIG_JUMP_LABEL) += jump_label.o -obj-$(CONFIG_LIVEPATCH_WO_FTRACE) += livepatch_$(BITS).o +obj-$(CONFIG_LIVEPATCH_WO_FTRACE) += livepatch.o livepatch_$(BITS).o extra-$(CONFIG_PPC64) := head_64.o extra-$(CONFIG_PPC_BOOK3S_32) := head_book3s_32.o diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 71ff3a4f10a6bd8c57dc0a1428cf13bfdf1bd1dc..ad3281b092be41a3b4cfb069bc332165cb66d518 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -1068,5 +1068,41 @@ _GLOBAL(livepatch_branch_trampoline) blr _GLOBAL(livepatch_branch_trampoline_end) nop + +/* + * This function is the trampoline of livepatch brk handler. + * + * brk -> traps + * - klp_brk_handler + * - set R11 to new_func address + * - set NIP to livepatch_brk_trampoline address + * see arch/powerpc/kernel/livepatch.c + */ +_GLOBAL(livepatch_brk_trampoline) + mflr r0 + std r0, 16(r1) + std r2, 24(r1) + stdu r1, -STACK_FRAME_OVERHEAD(r1) + + /* Call NEW_FUNC */ + ld r12, 0(r11) /* load new func address to R12 */ +#ifdef PPC64_ELF_ABI_v1 + ld r2, 8(r11) /* set up new R2 */ +#endif + mtctr r12 /* load R12(new func address) to CTR */ + bctrl /* call new func */ + + /* + * Now we are returning from the patched function to the original + * caller A. We are free to use r11, r12 and we can use r2 until we + * restore it. + */ + addi r1, r1, STACK_FRAME_OVERHEAD + ld r2, 24(r1) + ld r0, 16(r1) + mtlr r0 + + /* Return to original caller of live patched function */ + blr #endif #endif /* CONFIG_LIVEPATCH_WO_FTRACE */ diff --git a/arch/powerpc/kernel/kvm.c b/arch/powerpc/kernel/kvm.c index 617eba82531cb9916c5d34c7a5f087849a724601..d89cf802d9aa771788b550cf9359571b77c5a4a8 100644 --- a/arch/powerpc/kernel/kvm.c +++ b/arch/powerpc/kernel/kvm.c @@ -669,7 +669,7 @@ static void __init kvm_use_magic_page(void) on_each_cpu(kvm_map_magic_page, &features, 1); /* Quick self-test to see if the mapping works */ - if (!fault_in_pages_readable((const char *)KVM_MAGIC_PAGE, sizeof(u32))) { + if (fault_in_pages_readable((const char *)KVM_MAGIC_PAGE, sizeof(u32))) { kvm_patching_worked = false; return; } diff --git a/arch/powerpc/kernel/livepatch.c b/arch/powerpc/kernel/livepatch.c new file mode 100644 index 0000000000000000000000000000000000000000..b8afcc7b99399f7b0774c2e2ae67a0cc0739b728 --- /dev/null +++ b/arch/powerpc/kernel/livepatch.c @@ -0,0 +1,67 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * livepatch.c - powerpc-specific Kernel Live Patching Core + * + * Copyright (C) 2022 Huawei Technologies Co., Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include + +int arch_klp_add_breakpoint(struct arch_klp_data *arch_data, void *old_func) +{ + struct ppc_inst insn = ppc_inst_read((struct ppc_inst *)old_func); + + arch_data->saved_opcode = ppc_inst_val(insn); + patch_instruction((struct ppc_inst *)old_func, ppc_inst(BREAKPOINT_INSTRUCTION)); + return 0; +} + +void arch_klp_remove_breakpoint(struct arch_klp_data *arch_data, void *old_func) +{ + patch_instruction((struct ppc_inst *)old_func, ppc_inst(arch_data->saved_opcode)); +} + +int klp_brk_handler(struct pt_regs *regs) +{ + void *brk_func = NULL; + unsigned long addr = regs->nip; + + if (user_mode(regs)) + return 0; + + brk_func = klp_get_brk_func((void *)addr); + if (!brk_func) + return 0; + +#ifdef PPC64_ELF_ABI_v1 + /* + * Only static trampoline can be used here to prevent + * resource release caused by rollback. + */ + regs->gpr[PT_R11] = (unsigned long)brk_func; + regs->nip = ppc_function_entry((void *)livepatch_brk_trampoline); +#else + regs->nip = (unsigned long)brk_func; +#endif + + return 1; +} diff --git a/arch/powerpc/kernel/livepatch_32.c b/arch/powerpc/kernel/livepatch_32.c index a3cf41af073eb5be777c4e66a77ae5fb3f9e253b..603f1d61cc23f744ace46b4fbc7974eb58805178 100644 --- a/arch/powerpc/kernel/livepatch_32.c +++ b/arch/powerpc/kernel/livepatch_32.c @@ -70,6 +70,7 @@ struct stackframe { struct walk_stackframe_args { int enable; struct klp_func_list *check_funcs; + struct module *mod; int ret; }; @@ -83,16 +84,6 @@ static inline unsigned long klp_size_to_check(unsigned long func_size, return size; } -static inline int klp_compare_address(unsigned long pc, unsigned long func_addr, - const char *func_name, unsigned long check_size) -{ - if (pc >= func_addr && pc < func_addr + check_size) { - pr_err("func %s is in use!\n", func_name); - return -EBUSY; - } - return 0; -} - static bool check_jump_insn(unsigned long func_addr) { unsigned long i; @@ -144,7 +135,7 @@ static int klp_check_activeness_func(struct klp_patch *patch, int enable, for (obj = patch->objs; obj->funcs; obj++) { for (func = obj->funcs; func->old_name; func++) { if (enable) { - if (func->force == KLP_ENFORCEMENT) + if (func->patched || func->force == KLP_ENFORCEMENT) continue; /* * When enable, checking the currently @@ -299,23 +290,12 @@ static void free_list(struct klp_func_list **funcs) } } -int klp_check_calltrace(struct klp_patch *patch, int enable) +static int do_check_calltrace(struct walk_stackframe_args *args, + int (*fn)(struct stackframe *, void *)) { struct task_struct *g, *t; struct stackframe frame; unsigned long *stack; - int ret = 0; - struct klp_func_list *check_funcs = NULL; - struct walk_stackframe_args args = { - .ret = 0 - }; - - ret = klp_check_activeness_func(patch, enable, &check_funcs); - if (ret) { - pr_err("collect active functions failed, ret=%d\n", ret); - goto out; - } - args.check_funcs = check_funcs; for_each_process_thread(g, t) { if (t == current) { @@ -354,21 +334,61 @@ int klp_check_calltrace(struct klp_patch *patch, int enable) frame.sp = (unsigned long)stack; frame.pc = stack[STACK_FRAME_LR_SAVE]; - if (check_funcs != NULL) { - klp_walk_stackframe(&frame, klp_check_jump_func, t, &args); - if (args.ret) { - ret = args.ret; - pr_info("PID: %d Comm: %.20s\n", t->pid, t->comm); - show_stack(t, NULL, KERN_INFO); - goto out; - } + klp_walk_stackframe(&frame, fn, t, args); + if (args->ret) { + pr_info("PID: %d Comm: %.20s\n", t->pid, t->comm); + show_stack(t, NULL, KERN_INFO); + return args->ret; } } + return 0; +} + +int klp_check_calltrace(struct klp_patch *patch, int enable) +{ + int ret = 0; + struct klp_func_list *check_funcs = NULL; + struct walk_stackframe_args args = { + .ret = 0 + }; + + ret = klp_check_activeness_func(patch, enable, &check_funcs); + if (ret) { + pr_err("collect active functions failed, ret=%d\n", ret); + goto out; + } + if (!check_funcs) + goto out; + + args.check_funcs = check_funcs; + ret = do_check_calltrace(&args, klp_check_jump_func); out: free_list(&check_funcs); return ret; } + +static int check_module_calltrace(struct stackframe *frame, void *data) +{ + struct walk_stackframe_args *args = data; + + if (within_module_core(frame->pc, args->mod)) { + pr_err("module %s is in use!\n", args->mod->name); + return (args->ret = -EBUSY); + } + return 0; +} + +int arch_klp_module_check_calltrace(void *data) +{ + struct walk_stackframe_args args = { + .mod = (struct module *)data, + .ret = 0 + }; + + return do_check_calltrace(&args, check_module_calltrace); +} + #endif #ifdef CONFIG_LIVEPATCH_WO_FTRACE diff --git a/arch/powerpc/kernel/livepatch_64.c b/arch/powerpc/kernel/livepatch_64.c index 0098ad48f9180c8431996dcc515884f8ea16560d..f008b3beb0017ba3b605cb3ba0f59ab08320f3e7 100644 --- a/arch/powerpc/kernel/livepatch_64.c +++ b/arch/powerpc/kernel/livepatch_64.c @@ -76,6 +76,7 @@ struct stackframe { struct walk_stackframe_args { int enable; struct klp_func_list *check_funcs; + struct module *mod; int ret; }; @@ -89,16 +90,6 @@ static inline unsigned long klp_size_to_check(unsigned long func_size, return size; } -static inline int klp_compare_address(unsigned long pc, unsigned long func_addr, - const char *func_name, unsigned long check_size) -{ - if (pc >= func_addr && pc < func_addr + check_size) { - pr_err("func %s is in use!\n", func_name); - return -EBUSY; - } - return 0; -} - static bool check_jump_insn(unsigned long func_addr) { unsigned long i; @@ -153,7 +144,7 @@ static int klp_check_activeness_func(struct klp_patch *patch, int enable, /* Check func address in stack */ if (enable) { - if (func->force == KLP_ENFORCEMENT) + if (func->patched || func->force == KLP_ENFORCEMENT) continue; /* * When enable, checking the currently @@ -349,22 +340,12 @@ static void free_list(struct klp_func_list **funcs) } } -int klp_check_calltrace(struct klp_patch *patch, int enable) +static int do_check_calltrace(struct walk_stackframe_args *args, + int (*fn)(struct stackframe *, void *)) { struct task_struct *g, *t; struct stackframe frame; unsigned long *stack; - int ret = 0; - struct klp_func_list *check_funcs = NULL; - struct walk_stackframe_args args; - - ret = klp_check_activeness_func(patch, enable, &check_funcs); - if (ret) { - pr_err("collect active functions failed, ret=%d\n", ret); - goto out; - } - args.check_funcs = check_funcs; - args.ret = 0; for_each_process_thread(g, t) { if (t == current) { @@ -406,23 +387,63 @@ int klp_check_calltrace(struct klp_patch *patch, int enable) frame.sp = (unsigned long)stack; frame.pc = stack[STACK_FRAME_LR_SAVE]; frame.nip = 0; - if (check_funcs != NULL) { - klp_walk_stackframe(&frame, klp_check_jump_func, t, &args); - if (args.ret) { - ret = args.ret; - pr_debug("%s FAILED when %s\n", __func__, - enable ? "enabling" : "disabling"); - pr_info("PID: %d Comm: %.20s\n", t->pid, t->comm); - show_stack(t, NULL, KERN_INFO); - goto out; - } + klp_walk_stackframe(&frame, fn, t, args); + if (args->ret) { + pr_debug("%s FAILED when %s\n", __func__, + args->enable ? "enabling" : "disabling"); + pr_info("PID: %d Comm: %.20s\n", t->pid, t->comm); + show_stack(t, NULL, KERN_INFO); + return args->ret; } } + return 0; +} + +int klp_check_calltrace(struct klp_patch *patch, int enable) +{ + int ret = 0; + struct klp_func_list *check_funcs = NULL; + struct walk_stackframe_args args; + + ret = klp_check_activeness_func(patch, enable, &check_funcs); + if (ret) { + pr_err("collect active functions failed, ret=%d\n", ret); + goto out; + } + if (!check_funcs) + goto out; + + args.check_funcs = check_funcs; + args.ret = 0; + args.enable = enable; + ret = do_check_calltrace(&args, klp_check_jump_func); out: free_list(&check_funcs); return ret; } + +static int check_module_calltrace(struct stackframe *frame, void *data) +{ + struct walk_stackframe_args *args = data; + + if (within_module_core(frame->pc, args->mod)) { + pr_err("module %s is in use!\n", args->mod->name); + return (args->ret = -EBUSY); + } + return 0; +} + +int arch_klp_module_check_calltrace(void *data) +{ + struct walk_stackframe_args args = { + .mod = (struct module *)data, + .ret = 0 + }; + + return do_check_calltrace(&args, check_module_calltrace); +} + #endif #ifdef CONFIG_LIVEPATCH_WO_FTRACE diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c index 7a143ab7d433d3700858699b9413713acc6b2966..ef093691f6063c3b9f4c192d57d6f63de20600ec 100644 --- a/arch/powerpc/kernel/module_64.c +++ b/arch/powerpc/kernel/module_64.c @@ -835,16 +835,15 @@ static int livepatch_create_bstub(struct ppc64_klp_bstub_entry *entry, return 0; } - if (entry->magic != BRANCH_STUB_MAGIC) { - stub_start = ppc_function_entry((void *)livepatch_branch_stub); - stub_end = ppc_function_entry((void *)livepatch_branch_stub_end); - stub_size = stub_end - stub_start; - memcpy(entry->jump, (u32 *)stub_start, stub_size); - - entry->jump[0] |= PPC_HA(reladdr); - entry->jump[1] |= PPC_LO(reladdr); - entry->magic = BRANCH_STUB_MAGIC; - } + + stub_start = ppc_function_entry((void *)livepatch_branch_stub); + stub_end = ppc_function_entry((void *)livepatch_branch_stub_end); + stub_size = stub_end - stub_start; + memcpy(entry->jump, (u32 *)stub_start, stub_size); + + entry->jump[0] |= PPC_HA(reladdr); + entry->jump[1] |= PPC_LO(reladdr); + entry->magic = BRANCH_STUB_MAGIC; entry->trampoline = addr; pr_debug("Create livepatch branch stub 0x%px with reladdr 0x%lx r2 0x%lx to trampoline 0x%lx\n", @@ -854,9 +853,8 @@ static int livepatch_create_bstub(struct ppc64_klp_bstub_entry *entry, } #ifdef PPC64_ELF_ABI_v1 -static void livepatch_create_btramp(struct ppc64_klp_btramp_entry *entry, - unsigned long addr, - struct module *me) +void livepatch_create_btramp(struct ppc64_klp_btramp_entry *entry, + unsigned long addr) { unsigned long reladdr, tramp_start, tramp_end, tramp_size; @@ -894,7 +892,7 @@ int livepatch_create_branch(unsigned long pc, { #ifdef PPC64_ELF_ABI_v1 /* Create trampoline to addr(new func) */ - livepatch_create_btramp((struct ppc64_klp_btramp_entry *)trampoline, addr, me); + livepatch_create_btramp((struct ppc64_klp_btramp_entry *)trampoline, addr); #else trampoline = addr; #endif diff --git a/arch/powerpc/kernel/ptrace/ptrace.c b/arch/powerpc/kernel/ptrace/ptrace.c old mode 100644 new mode 100755 index f6e51be47c6e4d7150d7b148dac6c9fe786edc88..81125c8220089252b073db9a17d07fd88eef242e --- a/arch/powerpc/kernel/ptrace/ptrace.c +++ b/arch/powerpc/kernel/ptrace/ptrace.c @@ -74,10 +74,13 @@ long arch_ptrace(struct task_struct *child, long request, unsigned int fpidx = index - PT_FPR0; flush_fp_to_thread(child); - if (fpidx < (PT_FPSCR - PT_FPR0)) - memcpy(&tmp, &child->thread.TS_FPR(fpidx), - sizeof(long)); - else + if (fpidx < (PT_FPSCR - PT_FPR0)) { + if (IS_ENABLED(CONFIG_PPC32)) + // On 32-bit the index we are passed refers to 32-bit words + tmp = ((u32 *)child->thread.fp_state.fpr)[fpidx]; + else + memcpy(&tmp, &child->thread.TS_FPR(fpidx), sizeof(long)); + } else tmp = child->thread.fp_state.fpscr; } ret = put_user(tmp, datalp); @@ -107,10 +110,13 @@ long arch_ptrace(struct task_struct *child, long request, unsigned int fpidx = index - PT_FPR0; flush_fp_to_thread(child); - if (fpidx < (PT_FPSCR - PT_FPR0)) - memcpy(&child->thread.TS_FPR(fpidx), &data, - sizeof(long)); - else + if (fpidx < (PT_FPSCR - PT_FPR0)) { + if (IS_ENABLED(CONFIG_PPC32)) + // On 32-bit the index we are passed refers to 32-bit words + ((u32 *)child->thread.fp_state.fpr)[fpidx] = data; + else + memcpy(&child->thread.TS_FPR(fpidx), &data, sizeof(long)); + } else child->thread.fp_state.fpscr = data; ret = 0; } @@ -478,4 +484,7 @@ void __init pt_regs_check(void) * real registers. */ BUILD_BUG_ON(PT_DSCR < sizeof(struct user_pt_regs) / sizeof(unsigned long)); + + // ptrace_get/put_fpr() rely on PPC32 and VSX being incompatible + BUILD_BUG_ON(IS_ENABLED(CONFIG_PPC32) && IS_ENABLED(CONFIG_VSX)); } diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index 069d451240fa4c961fc85fed91a3eb5777d57676..d2f6b2e30b6ae0c2db3b57d30a9071574136ef09 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -67,6 +67,9 @@ #include #include #include +#ifdef CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY +#include +#endif #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC_CORE) int (*__debugger)(struct pt_regs *regs) __read_mostly; @@ -1491,6 +1494,11 @@ void program_check_exception(struct pt_regs *regs) if (kprobe_handler(regs)) goto bail; +#ifdef CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY + if (klp_brk_handler(regs)) + goto bail; +#endif + /* trap exception */ if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP) == NOTIFY_STOP) diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 527c205d5a5f5f4f05d7416ef83ede53d5479d23..38b7a3491aac080a26aef0120c09fc32db7176af 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -5752,8 +5752,11 @@ static int kvmppc_book3s_init_hv(void) if (r) return r; - if (kvmppc_radix_possible()) + if (kvmppc_radix_possible()) { r = kvmppc_radix_init(); + if (r) + return r; + } /* * POWER9 chips before version 2.02 can't have some threads in diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 543db9157f3b1578ed4c3da17a4819f278c79876..ef8077a739b8813c6fcb4be11b616a560c06615f 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -1500,7 +1500,7 @@ int kvmppc_handle_vmx_load(struct kvm_vcpu *vcpu, { enum emulation_result emulated = EMULATE_DONE; - if (vcpu->arch.mmio_vsx_copy_nums > 2) + if (vcpu->arch.mmio_vmx_copy_nums > 2) return EMULATE_FAIL; while (vcpu->arch.mmio_vmx_copy_nums) { @@ -1597,7 +1597,7 @@ int kvmppc_handle_vmx_store(struct kvm_vcpu *vcpu, unsigned int index = rs & KVM_MMIO_REG_MASK; enum emulation_result emulated = EMULATE_DONE; - if (vcpu->arch.mmio_vsx_copy_nums > 2) + if (vcpu->arch.mmio_vmx_copy_nums > 2) return EMULATE_FAIL; vcpu->arch.io_gpr = rs; diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c index 0edebbbffcdcaa00f41f86f4e1d8e56f8e9feae9..2d19655328f12c7d7b9c3141b7251b71874c3444 100644 --- a/arch/powerpc/lib/sstep.c +++ b/arch/powerpc/lib/sstep.c @@ -108,9 +108,9 @@ static nokprobe_inline long address_ok(struct pt_regs *regs, { if (!user_mode(regs)) return 1; - if (__access_ok(ea, nb)) + if (access_ok((void __user *)ea, nb)) return 1; - if (__access_ok(ea, 1)) + if (access_ok((void __user *)ea, 1)) /* Access overlaps the end of the user region */ regs->dar = TASK_SIZE_MAX - 1; else @@ -949,7 +949,10 @@ NOKPROBE_SYMBOL(emulate_dcbz); #define __put_user_asmx(x, addr, err, op, cr) \ __asm__ __volatile__( \ + ".machine push\n" \ + ".machine power8\n" \ "1: " op " %2,0,%3\n" \ + ".machine pop\n" \ " mfcr %1\n" \ "2:\n" \ ".section .fixup,\"ax\"\n" \ @@ -962,7 +965,10 @@ NOKPROBE_SYMBOL(emulate_dcbz); #define __get_user_asmx(x, addr, err, op) \ __asm__ __volatile__( \ + ".machine push\n" \ + ".machine power8\n" \ "1: "op" %1,0,%2\n" \ + ".machine pop\n" \ "2:\n" \ ".section .fixup,\"ax\"\n" \ "3: li %0,%3\n" \ @@ -3187,7 +3193,7 @@ int emulate_loadstore(struct pt_regs *regs, struct instruction_op *op) __put_user_asmx(op->val, ea, err, "stbcx.", cr); break; case 2: - __put_user_asmx(op->val, ea, err, "stbcx.", cr); + __put_user_asmx(op->val, ea, err, "sthcx.", cr); break; #endif case 4: diff --git a/arch/powerpc/mm/kasan/kasan_init_32.c b/arch/powerpc/mm/kasan/kasan_init_32.c index cf8770b1a692ec4a2349fc0645ca1375252df7bb..f3e4d069e0ba7b822cee880ecf4e016addf4b2b0 100644 --- a/arch/powerpc/mm/kasan/kasan_init_32.c +++ b/arch/powerpc/mm/kasan/kasan_init_32.c @@ -83,13 +83,12 @@ void __init kasan_update_early_region(unsigned long k_start, unsigned long k_end, pte_t pte) { unsigned long k_cur; - phys_addr_t pa = __pa(kasan_early_shadow_page); for (k_cur = k_start; k_cur != k_end; k_cur += PAGE_SIZE) { pmd_t *pmd = pmd_off_k(k_cur); pte_t *ptep = pte_offset_kernel(pmd, k_cur); - if ((pte_val(*ptep) & PTE_RPN_MASK) != pa) + if (pte_page(*ptep) != virt_to_page(lm_alias(kasan_early_shadow_page))) continue; __set_pte_at(&init_mm, k_cur, ptep, pte, 0); diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index 23bfe00811aea0427a0154b30d3c2f3297c3c6b3..eb882413dd17ecec70514bc83e3f09bc241d16c4 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -739,7 +739,9 @@ static int __init parse_numa_properties(void) of_node_put(cpu); } - node_set_online(nid); + /* node_set_online() is an UB if 'nid' is negative */ + if (likely(nid >= 0)) + node_set_online(nid); } get_n_mem_cells(&n_mem_addr_cells, &n_mem_size_cells); diff --git a/arch/powerpc/perf/imc-pmu.c b/arch/powerpc/perf/imc-pmu.c index 7b25548ec42b0db3ae872adce803de2a75ef9e03..e8074d7f2401b7eef06c27dd1124bc092b0274eb 100644 --- a/arch/powerpc/perf/imc-pmu.c +++ b/arch/powerpc/perf/imc-pmu.c @@ -1457,7 +1457,11 @@ static int trace_imc_event_init(struct perf_event *event) event->hw.idx = -1; - event->pmu->task_ctx_nr = perf_hw_context; + /* + * There can only be a single PMU for perf_hw_context events which is assigned to + * core PMU. Hence use "perf_sw_context" for trace_imc. + */ + event->pmu->task_ctx_nr = perf_sw_context; event->destroy = reset_global_refc; return 0; } diff --git a/arch/powerpc/platforms/8xx/pic.c b/arch/powerpc/platforms/8xx/pic.c index f2ba837249d694ab47a843ecff366191b64ea4cb..04a6abf14c2958ca5f3f98a3bba203152be5104c 100644 --- a/arch/powerpc/platforms/8xx/pic.c +++ b/arch/powerpc/platforms/8xx/pic.c @@ -153,6 +153,7 @@ int __init mpc8xx_pic_init(void) if (mpc8xx_pic_host == NULL) { printk(KERN_ERR "MPC8xx PIC: failed to allocate irq host!\n"); ret = -ENOMEM; + goto out; } ret = 0; diff --git a/arch/powerpc/platforms/powernv/rng.c b/arch/powerpc/platforms/powernv/rng.c index 72c25295c1c2b4e9bd89824dbf99d5646a1d5624..69c344c8884f36b120075ecf2c9123481299756a 100644 --- a/arch/powerpc/platforms/powernv/rng.c +++ b/arch/powerpc/platforms/powernv/rng.c @@ -43,7 +43,11 @@ static unsigned long rng_whiten(struct powernv_rng *rng, unsigned long val) unsigned long parity; /* Calculate the parity of the value */ - asm ("popcntd %0,%1" : "=r" (parity) : "r" (val)); + asm (".machine push; \ + .machine power7; \ + popcntd %0,%1; \ + .machine pop;" + : "=r" (parity) : "r" (val)); /* xor our value with the previous mask */ val ^= rng->mask; diff --git a/arch/powerpc/sysdev/fsl_gtm.c b/arch/powerpc/sysdev/fsl_gtm.c index 8963eaffb1b7b54234cecaedea9f8fd42677470e..39186ad6b3c3a96f7291ed77782b0f028350656a 100644 --- a/arch/powerpc/sysdev/fsl_gtm.c +++ b/arch/powerpc/sysdev/fsl_gtm.c @@ -86,7 +86,7 @@ static LIST_HEAD(gtms); */ struct gtm_timer *gtm_get_timer16(void) { - struct gtm *gtm = NULL; + struct gtm *gtm; int i; list_for_each_entry(gtm, >ms, list_node) { @@ -103,7 +103,7 @@ struct gtm_timer *gtm_get_timer16(void) spin_unlock_irq(>m->lock); } - if (gtm) + if (!list_empty(>ms)) return ERR_PTR(-EBUSY); return ERR_PTR(-ENODEV); } diff --git a/arch/riscv/include/asm/module.lds.h b/arch/riscv/include/asm/module.lds.h index 4254ff2ff04943f7df7053d6d8263edaba7a3f36..1075beae1ac64521e4c98eadf9acc785deb93d3e 100644 --- a/arch/riscv/include/asm/module.lds.h +++ b/arch/riscv/include/asm/module.lds.h @@ -2,8 +2,8 @@ /* Copyright (C) 2017 Andes Technology Corporation */ #ifdef CONFIG_MODULE_SECTIONS SECTIONS { - .plt (NOLOAD) : { BYTE(0) } - .got (NOLOAD) : { BYTE(0) } - .got.plt (NOLOAD) : { BYTE(0) } + .plt : { BYTE(0) } + .got : { BYTE(0) } + .got.plt : { BYTE(0) } } #endif diff --git a/arch/riscv/include/asm/thread_info.h b/arch/riscv/include/asm/thread_info.h index a390711129de6471524141120419213e228a47e2..d79ae9d98999f6b9dce68aaf00412fe08635e31b 100644 --- a/arch/riscv/include/asm/thread_info.h +++ b/arch/riscv/include/asm/thread_info.h @@ -11,11 +11,17 @@ #include #include +#ifdef CONFIG_KASAN +#define KASAN_STACK_ORDER 1 +#else +#define KASAN_STACK_ORDER 0 +#endif + /* thread information allocation */ #ifdef CONFIG_64BIT -#define THREAD_SIZE_ORDER (2) +#define THREAD_SIZE_ORDER (2 + KASAN_STACK_ORDER) #else -#define THREAD_SIZE_ORDER (1) +#define THREAD_SIZE_ORDER (1 + KASAN_STACK_ORDER) #endif #define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER) diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile index 62de075fc60c0a617b7230e7063a6f44e0917824..bc49d5f2302b605ca72e28d43b71f35259ec8996 100644 --- a/arch/riscv/kernel/Makefile +++ b/arch/riscv/kernel/Makefile @@ -44,6 +44,8 @@ obj-$(CONFIG_MODULE_SECTIONS) += module-sections.o obj-$(CONFIG_FUNCTION_TRACER) += mcount.o ftrace.o obj-$(CONFIG_DYNAMIC_FTRACE) += mcount-dyn.o +obj-$(CONFIG_TRACE_IRQFLAGS) += trace_irq.o + obj-$(CONFIG_RISCV_BASE_PMU) += perf_event.o obj-$(CONFIG_PERF_EVENTS) += perf_callchain.o obj-$(CONFIG_HAVE_PERF_REGS) += perf_regs.o diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S index 3a4f24a5b32b24fceaf5e8b2e88c2557852ba9cf..3b3e25fa26d4bf9346bad77a6bd192fb70b452e1 100644 --- a/arch/riscv/kernel/entry.S +++ b/arch/riscv/kernel/entry.S @@ -98,7 +98,7 @@ _save_context: .option pop #ifdef CONFIG_TRACE_IRQFLAGS - call trace_hardirqs_off + call __trace_hardirqs_off #endif #ifdef CONFIG_CONTEXT_TRACKING @@ -131,7 +131,7 @@ skip_context_tracking: andi t0, s1, SR_PIE beqz t0, 1f #ifdef CONFIG_TRACE_IRQFLAGS - call trace_hardirqs_on + call __trace_hardirqs_on #endif csrs CSR_STATUS, SR_IE @@ -222,7 +222,7 @@ ret_from_exception: REG_L s0, PT_STATUS(sp) csrc CSR_STATUS, SR_IE #ifdef CONFIG_TRACE_IRQFLAGS - call trace_hardirqs_off + call __trace_hardirqs_off #endif #ifdef CONFIG_RISCV_M_MODE /* the MPP value is too large to be used as an immediate arg for addi */ @@ -258,10 +258,10 @@ restore_all: REG_L s1, PT_STATUS(sp) andi t0, s1, SR_PIE beqz t0, 1f - call trace_hardirqs_on + call __trace_hardirqs_on j 2f 1: - call trace_hardirqs_off + call __trace_hardirqs_off 2: #endif REG_L a0, PT_STATUS(sp) diff --git a/arch/riscv/kernel/module.c b/arch/riscv/kernel/module.c index 104fba889cf767cb159840954de99f985f7f0929..c3310a68ac463d092ea8362c9df8fbc07de80ae1 100644 --- a/arch/riscv/kernel/module.c +++ b/arch/riscv/kernel/module.c @@ -13,6 +13,19 @@ #include #include +/* + * The auipc+jalr instruction pair can reach any PC-relative offset + * in the range [-2^31 - 2^11, 2^31 - 2^11) + */ +static bool riscv_insn_valid_32bit_offset(ptrdiff_t val) +{ +#ifdef CONFIG_32BIT + return true; +#else + return (-(1L << 31) - (1L << 11)) <= val && val < ((1L << 31) - (1L << 11)); +#endif +} + static int apply_r_riscv_32_rela(struct module *me, u32 *location, Elf_Addr v) { if (v != (u32)v) { @@ -95,7 +108,7 @@ static int apply_r_riscv_pcrel_hi20_rela(struct module *me, u32 *location, ptrdiff_t offset = (void *)v - (void *)location; s32 hi20; - if (offset != (s32)offset) { + if (!riscv_insn_valid_32bit_offset(offset)) { pr_err( "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n", me->name, (long long)v, location); @@ -197,10 +210,9 @@ static int apply_r_riscv_call_plt_rela(struct module *me, u32 *location, Elf_Addr v) { ptrdiff_t offset = (void *)v - (void *)location; - s32 fill_v = offset; u32 hi20, lo12; - if (offset != fill_v) { + if (!riscv_insn_valid_32bit_offset(offset)) { /* Only emit the plt entry if offset over 32-bit range */ if (IS_ENABLED(CONFIG_MODULE_SECTIONS)) { offset = module_emit_plt_entry(me, v); @@ -224,10 +236,9 @@ static int apply_r_riscv_call_rela(struct module *me, u32 *location, Elf_Addr v) { ptrdiff_t offset = (void *)v - (void *)location; - s32 fill_v = offset; u32 hi20, lo12; - if (offset != fill_v) { + if (!riscv_insn_valid_32bit_offset(offset)) { pr_err( "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n", me->name, (long long)v, location); diff --git a/arch/riscv/kernel/perf_callchain.c b/arch/riscv/kernel/perf_callchain.c index ad3001cbdf6186778e09d33f285070c5d3f06ad9..fb02811df71434ce30f2db7dbcf80a49f1bf9c64 100644 --- a/arch/riscv/kernel/perf_callchain.c +++ b/arch/riscv/kernel/perf_callchain.c @@ -19,8 +19,8 @@ static unsigned long user_backtrace(struct perf_callchain_entry_ctx *entry, { struct stackframe buftail; unsigned long ra = 0; - unsigned long *user_frame_tail = - (unsigned long *)(fp - sizeof(struct stackframe)); + unsigned long __user *user_frame_tail = + (unsigned long __user *)(fp - sizeof(struct stackframe)); /* Check accessibility of one struct frame_tail beyond */ if (!access_ok(user_frame_tail, sizeof(buftail))) @@ -77,7 +77,7 @@ void perf_callchain_user(struct perf_callchain_entry_ctx *entry, bool fill_callchain(unsigned long pc, void *entry) { - return perf_callchain_store(entry, pc); + return perf_callchain_store(entry, pc) == 0; } void notrace walk_stackframe(struct task_struct *task, diff --git a/arch/riscv/kernel/trace_irq.c b/arch/riscv/kernel/trace_irq.c new file mode 100644 index 0000000000000000000000000000000000000000..095ac976d7da1092cbbec8d1a2c11282236d7c36 --- /dev/null +++ b/arch/riscv/kernel/trace_irq.c @@ -0,0 +1,27 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2022 Changbin Du + */ + +#include +#include +#include "trace_irq.h" + +/* + * trace_hardirqs_on/off require the caller to setup frame pointer properly. + * Otherwise, CALLER_ADDR1 might trigger an pagging exception in kernel. + * Here we add one extra level so they can be safely called by low + * level entry code which $fp is used for other purpose. + */ + +void __trace_hardirqs_on(void) +{ + trace_hardirqs_on(); +} +NOKPROBE_SYMBOL(__trace_hardirqs_on); + +void __trace_hardirqs_off(void) +{ + trace_hardirqs_off(); +} +NOKPROBE_SYMBOL(__trace_hardirqs_off); diff --git a/arch/riscv/kernel/trace_irq.h b/arch/riscv/kernel/trace_irq.h new file mode 100644 index 0000000000000000000000000000000000000000..99fe67377e5ed6795b1a45bf6b2a0d8af6fef41d --- /dev/null +++ b/arch/riscv/kernel/trace_irq.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2022 Changbin Du + */ +#ifndef __TRACE_IRQ_H +#define __TRACE_IRQ_H + +void __trace_hardirqs_on(void); +void __trace_hardirqs_off(void); + +#endif /* __TRACE_IRQ_H */ diff --git a/arch/riscv/mm/Makefile b/arch/riscv/mm/Makefile index 7ebaef10ea1b69e1557c9d08fec4d288ac31e842..ac7a25298a04af665ff0552d99a95e1671013ddd 100644 --- a/arch/riscv/mm/Makefile +++ b/arch/riscv/mm/Makefile @@ -24,6 +24,9 @@ obj-$(CONFIG_KASAN) += kasan_init.o ifdef CONFIG_KASAN KASAN_SANITIZE_kasan_init.o := n KASAN_SANITIZE_init.o := n +ifdef CONFIG_DEBUG_VIRTUAL +KASAN_SANITIZE_physaddr.o := n +endif endif obj-$(CONFIG_DEBUG_VIRTUAL) += physaddr.o diff --git a/arch/riscv/mm/kasan_init.c b/arch/riscv/mm/kasan_init.c index 883c3be43ea983020b6e4c82e418089215f1a2e6..2db442701ee28f528602f9d7aca5d7174938322f 100644 --- a/arch/riscv/mm/kasan_init.c +++ b/arch/riscv/mm/kasan_init.c @@ -21,8 +21,7 @@ asmlinkage void __init kasan_early_init(void) for (i = 0; i < PTRS_PER_PTE; ++i) set_pte(kasan_early_shadow_pte + i, - mk_pte(virt_to_page(kasan_early_shadow_page), - PAGE_KERNEL)); + pfn_pte(virt_to_pfn(kasan_early_shadow_page), PAGE_KERNEL)); for (i = 0; i < PTRS_PER_PMD; ++i) set_pmd(kasan_early_shadow_pmd + i, diff --git a/arch/s390/include/asm/extable.h b/arch/s390/include/asm/extable.h index 3beb294fd553148486014d1a63057807066a20ac..ce0db8172aad1a876f8bf9444c49d6b76cab47e3 100644 --- a/arch/s390/include/asm/extable.h +++ b/arch/s390/include/asm/extable.h @@ -69,8 +69,13 @@ static inline void swap_ex_entry_fixup(struct exception_table_entry *a, { a->fixup = b->fixup + delta; b->fixup = tmp.fixup - delta; - a->handler = b->handler + delta; - b->handler = tmp.handler - delta; + a->handler = b->handler; + if (a->handler) + a->handler += delta; + b->handler = tmp.handler; + if (b->handler) + b->handler -= delta; } +#define swap_ex_entry_fixup swap_ex_entry_fixup #endif diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c index 741d0701003af072bf2be08ac6ff8529da81dd6a..1da36dd34990b59b1d26ff18c01bbb046924802d 100644 --- a/arch/sparc/kernel/signal_32.c +++ b/arch/sparc/kernel/signal_32.c @@ -65,7 +65,7 @@ struct rt_signal_frame { */ static inline bool invalid_frame_pointer(void __user *fp, int fplen) { - if ((((unsigned long) fp) & 15) || !__access_ok((unsigned long)fp, fplen)) + if ((((unsigned long) fp) & 15) || !access_ok(fp, fplen)) return true; return false; diff --git a/arch/sw_64/Kconfig b/arch/sw_64/Kconfig index bef7ab3816742bfc1f968e4424c5316e4204eb09..472a916fd93e09e30417bb5992318c1525911396 100644 --- a/arch/sw_64/Kconfig +++ b/arch/sw_64/Kconfig @@ -3,23 +3,17 @@ config SW64 bool default y select AUDIT_ARCH - select VIRT_IO - select HAVE_AOUT select HAVE_IDE select HAVE_OPROFILE -# select HAVE_SYSCALL_WRAPPERS - select HAVE_IRQ_WORK select HAVE_PCSPKR_PLATFORM select HAVE_PERF_EVENTS - select HAVE_GENERIC_HARDIRQS + select HAVE_GENERIC_GUP select GENERIC_CLOCKEVENTS select GENERIC_IRQ_PROBE select GENERIC_IRQ_LEGACY - select GENERIC_IDLE_LOOP select GENERIC_IRQ_SHOW select ARCH_WANT_IPC_PARSE_VERSION select ARCH_HAVE_NMI_SAFE_CMPXCHG - select ARCH_SUPPORTS_MSI select ARCH_MIGHT_HAVE_PC_SERIO select ARCH_NO_PREEMPT select ARCH_USE_CMPXCHG_LOCKREF @@ -27,18 +21,16 @@ config SW64 select HAVE_MOD_ARCH_SPECIFIC select MODULES_USE_ELF_RELA select ARCH_SUPPORTS_NUMA_BALANCING - select ARCH_WANTS_PROT_NUMA_PROT_NONE select HAVE_ARCH_TRANSPARENT_HUGEPAGE - select HAVE_GENERIC_RCU_GUP select HAVE_ARCH_AUDITSYSCALL select HAVE_ARCH_SECCOMP_FILTER - select GENERIC_SIGALTSTACK select OLD_SIGACTION select OLD_SIGSUSPEND select GENERIC_STRNCPY_FROM_USER select GENERIC_STRNLEN_USER select HAVE_ARCH_KGDB select ARCH_HAS_PHYS_TO_DMA + select SWIOTLB select HAVE_MEMBLOCK select HAVE_MEMBLOCK_NODE_MAP select NO_BOOTMEM @@ -46,32 +38,32 @@ config SW64 select ARCH_USE_QUEUED_SPINLOCKS select COMMON_CLK select HANDLE_DOMAIN_IRQ - select ARCH_INLINE_READ_LOCK if !PREEMPT - select ARCH_INLINE_READ_LOCK_BH if !PREEMPT - select ARCH_INLINE_READ_LOCK_IRQ if !PREEMPT - select ARCH_INLINE_READ_LOCK_IRQSAVE if !PREEMPT - select ARCH_INLINE_READ_UNLOCK if !PREEMPT - select ARCH_INLINE_READ_UNLOCK_BH if !PREEMPT - select ARCH_INLINE_READ_UNLOCK_IRQ if !PREEMPT - select ARCH_INLINE_READ_UNLOCK_IRQRESTORE if !PREEMPT - select ARCH_INLINE_WRITE_LOCK if !PREEMPT - select ARCH_INLINE_WRITE_LOCK_BH if !PREEMPT - select ARCH_INLINE_WRITE_LOCK_IRQ if !PREEMPT - select ARCH_INLINE_WRITE_LOCK_IRQSAVE if !PREEMPT - select ARCH_INLINE_WRITE_UNLOCK if !PREEMPT - select ARCH_INLINE_WRITE_UNLOCK_BH if !PREEMPT - select ARCH_INLINE_WRITE_UNLOCK_IRQ if !PREEMPT - select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE if !PREEMPT - select ARCH_INLINE_SPIN_TRYLOCK if !PREEMPT - select ARCH_INLINE_SPIN_TRYLOCK_BH if !PREEMPT - select ARCH_INLINE_SPIN_LOCK if !PREEMPT - select ARCH_INLINE_SPIN_LOCK_BH if !PREEMPT - select ARCH_INLINE_SPIN_LOCK_IRQ if !PREEMPT - select ARCH_INLINE_SPIN_LOCK_IRQSAVE if !PREEMPT - select ARCH_INLINE_SPIN_UNLOCK if !PREEMPT - select ARCH_INLINE_SPIN_UNLOCK_BH if !PREEMPT - select ARCH_INLINE_SPIN_UNLOCK_IRQ if !PREEMPT - select ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE if !PREEMPT + select ARCH_INLINE_READ_LOCK + select ARCH_INLINE_READ_LOCK_BH + select ARCH_INLINE_READ_LOCK_IRQ + select ARCH_INLINE_READ_LOCK_IRQSAVE + select ARCH_INLINE_READ_UNLOCK + select ARCH_INLINE_READ_UNLOCK_BH + select ARCH_INLINE_READ_UNLOCK_IRQ + select ARCH_INLINE_READ_UNLOCK_IRQRESTORE + select ARCH_INLINE_WRITE_LOCK + select ARCH_INLINE_WRITE_LOCK_BH + select ARCH_INLINE_WRITE_LOCK_IRQ + select ARCH_INLINE_WRITE_LOCK_IRQSAVE + select ARCH_INLINE_WRITE_UNLOCK + select ARCH_INLINE_WRITE_UNLOCK_BH + select ARCH_INLINE_WRITE_UNLOCK_IRQ + select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE + select ARCH_INLINE_SPIN_TRYLOCK + select ARCH_INLINE_SPIN_TRYLOCK_BH + select ARCH_INLINE_SPIN_LOCK + select ARCH_INLINE_SPIN_LOCK_BH + select ARCH_INLINE_SPIN_LOCK_IRQ + select ARCH_INLINE_SPIN_LOCK_IRQSAVE + select ARCH_INLINE_SPIN_UNLOCK + select ARCH_INLINE_SPIN_UNLOCK_BH + select ARCH_INLINE_SPIN_UNLOCK_IRQ + select ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE select ARCH_SUPPORTS_ATOMIC_RMW select ARCH_HAS_SG_CHAIN select IRQ_FORCED_THREADING @@ -95,12 +87,15 @@ config SW64 select HAVE_PERF_USER_STACK_DUMP select HAVE_PERF_REGS select ARCH_SUPPORTS_ACPI - select ACPI if ARCH_SUPPORTS_ACPI - select ACPI_REDUCED_HARDWARE_ONLY if ACPI + select ACPI + select ACPI_REDUCED_HARDWARE_ONLY select GENERIC_TIME_VSYSCALL select SET_FS select PCI_MSI_ARCH_FALLBACKS select DMA_OPS if PCI + select HAVE_REGS_AND_STACK_ACCESS_API + select ARCH_HAS_PTE_SPECIAL + select HARDIRQS_SW_RESEND config LOCKDEP_SUPPORT def_bool y @@ -249,22 +244,252 @@ config LOCK_MEMB bool "Insert mem barrier before lock instruction" default y +menu "CPU Frequency scaling" + +config CPU_FREQ + bool "CPU Frequency scaling" + select SRCU + help + CPU Frequency scaling allows you to change the clock speed of + CPUs on the fly. This is a nice method to save power, because + the lower the CPU clock speed, the less power the CPU consumes. + + Note that this driver doesn't automatically change the CPU + clock speed, you need to either enable a dynamic cpufreq governor + (see below) after boot, or use a userspace tool. + + For details, take a look at . + + If in doubt, say N. + +if CPU_FREQ + +config SW64_CPUFREQ + bool "sw64 CPU Frequency interface for Chip3 Asic" + depends on SW64_CHIP3 + default y + help + Turns on the interface for SW64_CPU Frequency. + +config SW64_CPUAUTOPLUG + bool "sw64 CPU Autoplug interface" + depends on SW64_CPUFREQ + default y + help + Turns on the interface for SW64_CPU CPUAUTOPLUG. + +config CPU_FREQ_GOV_ATTR_SET + bool + +config CPU_FREQ_GOV_COMMON + select CPU_FREQ_GOV_ATTR_SET + select IRQ_WORK + bool + +config CPU_FREQ_BOOST_SW + bool + depends on THERMAL + +config CPU_FREQ_STAT + bool "CPU frequency transition statistics" + help + Export CPU frequency statistics information through sysfs. + + If in doubt, say N. + choice - prompt "DMA Mapping Type" - depends on SW64 && PCI + prompt "Default CPUFreq governor" + default CPU_FREQ_DEFAULT_GOV_USERSPACE if ARM_SA1100_CPUFREQ || ARM_SA1110_CPUFREQ + default CPU_FREQ_DEFAULT_GOV_PERFORMANCE + help + This option sets which CPUFreq governor shall be loaded at + startup. If in doubt, select 'performance'. + +config CPU_FREQ_DEFAULT_GOV_PERFORMANCE + bool "performance" + select CPU_FREQ_GOV_PERFORMANCE + help + Use the CPUFreq governor 'performance' as default. This sets + the frequency statically to the highest frequency supported by + the CPU. -config DIRECT_DMA - bool "Direct DMA Mapping" - depends on SW64 && PCI +config CPU_FREQ_DEFAULT_GOV_POWERSAVE + bool "powersave" + select CPU_FREQ_GOV_POWERSAVE + help + Use the CPUFreq governor 'powersave' as default. This sets + the frequency statically to the lowest frequency supported by + the CPU. -config SWIOTLB - bool "Software IO TLB" - depends on SW64 && PCI +config CPU_FREQ_DEFAULT_GOV_USERSPACE + bool "userspace" + select CPU_FREQ_GOV_USERSPACE + help + Use the CPUFreq governor 'userspace' as default. This allows + you to set the CPU frequency manually or when a userspace + program shall be able to set the CPU dynamically without having + to enable the userspace governor manually. + +config CPU_FREQ_DEFAULT_GOV_ONDEMAND + bool "ondemand" + select CPU_FREQ_GOV_ONDEMAND + select CPU_FREQ_GOV_PERFORMANCE + help + Use the CPUFreq governor 'ondemand' as default. This allows + you to get a full dynamic frequency capable system by simply + loading your cpufreq low-level hardware driver. + Be aware that not all cpufreq drivers support the ondemand + governor. If unsure have a look at the help section of the + driver. Fallback governor will be the performance governor. + +config CPU_FREQ_DEFAULT_GOV_CONSERVATIVE + bool "conservative" + select CPU_FREQ_GOV_CONSERVATIVE + select CPU_FREQ_GOV_PERFORMANCE help - Software IO TLB + Use the CPUFreq governor 'conservative' as default. This allows + you to get a full dynamic frequency capable system by simply + loading your cpufreq low-level hardware driver. + Be aware that not all cpufreq drivers support the conservative + governor. If unsure have a look at the help section of the + driver. Fallback governor will be the performance governor. + +config CPU_FREQ_DEFAULT_GOV_SCHEDUTIL + bool "schedutil" + depends on SMP + select CPU_FREQ_GOV_SCHEDUTIL + select CPU_FREQ_GOV_PERFORMANCE + help + Use the 'schedutil' CPUFreq governor by default. If unsure, + have a look at the help section of that governor. The fallback + governor will be 'performance'. endchoice +config CPU_FREQ_GOV_PERFORMANCE + tristate "'performance' governor" + help + This cpufreq governor sets the frequency statically to the + highest available CPU frequency. + + To compile this driver as a module, choose M here: the + module will be called cpufreq_performance. + + If in doubt, say Y. + +config CPU_FREQ_GOV_POWERSAVE + tristate "'powersave' governor" + help + This cpufreq governor sets the frequency statically to the + lowest available CPU frequency. + + To compile this driver as a module, choose M here: the + module will be called cpufreq_powersave. + + If in doubt, say Y. + +config CPU_FREQ_GOV_USERSPACE + tristate "'userspace' governor for userspace frequency scaling" + help + Enable this cpufreq governor when you either want to set the + CPU frequency manually or when a userspace program shall + be able to set the CPU dynamically, like on LART + . + + To compile this driver as a module, choose M here: the + module will be called cpufreq_userspace. + + For details, take a look at . + + If in doubt, say Y. + +config CPU_FREQ_GOV_ONDEMAND + tristate "'ondemand' cpufreq policy governor" + select CPU_FREQ_GOV_COMMON + help + 'ondemand' - This driver adds a dynamic cpufreq policy governor. + The governor does a periodic polling and + changes frequency based on the CPU utilization. + The support for this governor depends on CPU capability to + do fast frequency switching (i.e, very low latency frequency + transitions). + + To compile this driver as a module, choose M here: the + module will be called cpufreq_ondemand. + + For details, take a look at linux/Documentation/cpu-freq. + + If in doubt, say N. + +config CPU_FREQ_GOV_CONSERVATIVE + tristate "'conservative' cpufreq governor" + depends on CPU_FREQ + select CPU_FREQ_GOV_COMMON + help + 'conservative' - this driver is rather similar to the 'ondemand' + governor both in its source code and its purpose, the difference is + its optimisation for better suitability in a battery powered + environment. The frequency is gracefully increased and decreased + rather than jumping to 100% when speed is required. + + If you have a desktop machine then you should really be considering + the 'ondemand' governor instead, however if you are using a laptop, + PDA or even an AMD64 based computer (due to the unacceptable + step-by-step latency issues between the minimum and maximum frequency + transitions in the CPU) you will probably want to use this governor. + + To compile this driver as a module, choose M here: the + module will be called cpufreq_conservative. + + For details, take a look at linux/Documentation/cpu-freq. + + If in doubt, say N. + +config CPU_FREQ_GOV_SCHEDUTIL + bool "'schedutil' cpufreq policy governor" + depends on CPU_FREQ && SMP + select CPU_FREQ_GOV_ATTR_SET + select IRQ_WORK + help + This governor makes decisions based on the utilization data provided + by the scheduler. It sets the CPU frequency to be proportional to + the utilization/capacity ratio coming from the scheduler. If the + utilization is frequency-invariant, the new frequency is also + proportional to the maximum available frequency. If that is not the + case, it is proportional to the current frequency of the CPU. The + frequency tipping point is at utilization/capacity equal to 80% in + both cases. + + If in doubt, say N. + +comment "CPU frequency scaling drivers" + +config CPUFREQ_DT + tristate "Generic DT based cpufreq driver" + depends on HAVE_CLK && OF + # if CPU_THERMAL is on and THERMAL=m, CPUFREQ_DT cannot be =y: + depends on !CPU_THERMAL || THERMAL + select CPUFREQ_DT_PLATDEV + select PM_OPP + help + This adds a generic DT based cpufreq driver for frequency management. + It supports both uniprocessor (UP) and symmetric multiprocessor (SMP) + systems. + + If in doubt, say N. + +config CPUFREQ_DT_PLATDEV + bool + help + This adds a generic DT based cpufreq platdev driver for frequency + management. This creates a 'cpufreq-dt' platform device, on the + supported platforms. + + If in doubt, say N. + +endif +endmenu + # clear all implied options (don't want default values for those): # Most of these machines have ISA slots; not exactly sure which don't, # and this doesn't activate hordes of code, so do it always. @@ -294,8 +519,7 @@ config PCI VESA. If you have PCI, say Y, otherwise N. config PCI_DOMAINS - bool - default y + def_bool PCI config PCI_SYSCALL def_bool PCI @@ -434,9 +658,6 @@ config ARCH_DISCONTIGMEM_ENABLE or have huge holes in the physical address space for other reasons. See for more. -source "kernel/Kconfig.preempt" - - config NUMA bool "NUMA Support" depends on SMP && !FLATMEM diff --git a/arch/sw_64/Makefile b/arch/sw_64/Makefile index 341fe6a0d9c8c262590b201fa520b41a015aaacf..7d86e80362f69b24cbce973da7da1179ded57f03 100644 --- a/arch/sw_64/Makefile +++ b/arch/sw_64/Makefile @@ -31,6 +31,7 @@ cflags-y += $(call cc-option, -fno-jump-tables) cflags-y += $(cpuflags-y) KBUILD_CFLAGS += $(cflags-y) +KBUILD_DEFCONFIG = defconfig head-y := arch/sw_64/kernel/head.o diff --git a/arch/sw_64/boot/dts/chip3.dts b/arch/sw_64/boot/dts/chip3.dts index ce61dfe6e7bd7a3ddfd435c09f1508b2d6699efa..be2e91ee32796e9245207f4651aa55a5fc0acca6 100644 --- a/arch/sw_64/boot/dts/chip3.dts +++ b/arch/sw_64/boot/dts/chip3.dts @@ -103,8 +103,21 @@ rtc: pcf8523@68 { compatible = "nxp,pcf8523"; reg = <0x68>; }; + + lm75: at30tse752a@48 { + compatible = "microchip,tcn75"; + reg = <0x48>; + }; }; + pvt: pvt@0x8030 { + #address-cells = <2>; + #size-cells = <2>; + compatible = "sw64,pvt-vol"; + reg = <0x8030 0x0 0x0 0x7c00>; + status = "okay"; + }; + spi: spi@0x8032 { #address-cells = <2>; #size-cells = <2>; @@ -131,7 +144,7 @@ partitions { #size-cells = <1>; partition@0 { - label = "test"; + label = "spares0"; reg = <0 0x400000>; }; }; @@ -153,7 +166,7 @@ partitions { #size-cells = <1>; partition@0 { - label = "test"; + label = "spares1"; reg = <0 0x400000>; }; }; @@ -171,6 +184,30 @@ lpc: lpc@0x8037 { }; + ipmi-kcs@0x8037 { + #address-cells = <2>; + #size-cells = <2>; + device_type = "ipmi"; + compatible = "ipmi-kcs"; + reg = <0x8037 0x10000ca2 0x0 0x10>; + reg-size = <1>; + reg-spacing = <1>; + reg-shift = <0>; + status = "disabled"; + }; + + ipmi-bt@0x8037 { + #address-cells = <2>; + #size-cells = <2>; + device_type = "ipmi"; + compatible = "ipmi-bt"; + reg = <0x8037 0x100000e4 0x0 0x10>; + reg-size = <1>; + reg-spacing = <1>; + reg-shift = <0>; + status = "disabled"; + }; + gpio: gpio@8036 { #address-cells = <2>; #size-cells = <2>; diff --git a/arch/sw_64/chip/chip3/chip.c b/arch/sw_64/chip/chip3/chip.c index adb4d325fc9176a9ea58907a520b2a46b867b311..4d2f99cc64025db6a254f1ae2eade4e976c21f2c 100644 --- a/arch/sw_64/chip/chip3/chip.c +++ b/arch/sw_64/chip/chip3/chip.c @@ -1,16 +1,12 @@ // SPDX-License-Identifier: GPL-2.0 #include -#include #include -#include -#include + #include #include #include -#include #include #include -#include #include "../../../../drivers/pci/pci.h" static u64 read_longtime(struct clocksource *cs) @@ -58,14 +54,9 @@ static struct clocksource clocksource_longtime = { static u64 read_vtime(struct clocksource *cs) { u64 result; - unsigned long node; unsigned long vtime_addr = PAGE_OFFSET | IO_BASE | LONG_TIME; - if (is_in_guest()) - result = rdio64(vtime_addr); - else - result = sw64_io_read(node, LONG_TIME); - + result = rdio64(vtime_addr); return result; } @@ -116,7 +107,7 @@ static int chip3_get_cpu_nums(void) static unsigned long chip3_get_vt_node_mem(int nodeid) { - return *(unsigned long *)MMSIZE; + return *(unsigned long *)MMSIZE & MMSIZE_MASK; } static unsigned long chip3_get_node_mem(int nodeid) @@ -133,6 +124,19 @@ static unsigned long chip3_get_node_mem(int nodeid) return node_mem; } +static void chip3_setup_vt_core_start(struct cpumask *cpumask) +{ + int i; + unsigned long coreonline; + + coreonline = sw64_io_read(0, CORE_ONLINE); + + for (i = 0; i < 64 ; i++) { + if (coreonline & (1UL << i)) + cpumask_set_cpu(i, cpumask); + } +} + static void chip3_setup_core_start(struct cpumask *cpumask) { int i, j, cpus; @@ -408,14 +412,17 @@ static int chip3_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { struct pci_controller *hose = dev->sysdata; - return hose->int_irq; + if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) + return hose->service_irq; + else + return hose->int_irq; } extern struct pci_controller *hose_head, **hose_tail; static void sw6_handle_intx(unsigned int offset) { struct pci_controller *hose; - unsigned long value; + unsigned long value, pme_value, aer_value; hose = hose_head; for (hose = hose_head; hose; hose = hose->next) { @@ -427,6 +434,18 @@ static void sw6_handle_intx(unsigned int offset) value = value | (1UL << 62); write_piu_ior0(hose->node, hose->index, INTACONFIG + (offset << 7), value); } + + pme_value = read_piu_ior0(hose->node, hose->index, PMEINTCONFIG); + aer_value = read_piu_ior0(hose->node, hose->index, AERERRINTCONFIG); + if ((pme_value >> 63) || (aer_value >> 63)) { + handle_irq(hose->service_irq); + + if (pme_value >> 63) + write_piu_ior0(hose->node, hose->index, PMEINTCONFIG, pme_value); + if (aer_value >> 63) + write_piu_ior0(hose->node, hose->index, AERERRINTCONFIG, aer_value); + } + if (hose->iommu_enable) { value = read_piu_ior0(hose->node, hose->index, IOMMUEXCPT_STATUS); if (value >> 63) @@ -450,76 +469,6 @@ static void chip3_device_interrupt(unsigned long irq_info) } } -static void set_devint_wken(int node, int val) -{ - sw64_io_write(node, DEVINT_WKEN, val); - sw64_io_write(node, DEVINTWK_INTEN, 0x0); -} - -static void clear_rc_status(int node, int rc) -{ - unsigned int val, status; - - val = 0x10000; - do { - write_rc_conf(node, rc, RC_STATUS, val); - mb(); - status = read_rc_conf(node, rc, RC_STATUS); - } while (status >> 16); -} - -static void chip3_suspend(int wake) -{ - unsigned long val; - unsigned int val_32; - unsigned long rc_start; - int node, rc, index, cpus; - - cpus = chip3_get_cpu_nums(); - for (node = 0; node < cpus; node++) { - rc = -1; - rc_start = sw64_io_read(node, IO_START); - index = ffs(rc_start); - while (index) { - rc += index; - if (wake) { - val_32 = read_rc_conf(node, rc, RC_CONTROL); - val_32 &= ~0x8; - write_rc_conf(node, rc, RC_CONTROL, val_32); - - set_devint_wken(node, 0x0); - val = 0x8000000000000000UL; - write_piu_ior0(node, rc, PMEINTCONFIG, val); - write_piu_ior0(node, rc, PMEMSICONFIG, val); - - clear_rc_status(node, rc); - } else { - val_32 = read_rc_conf(node, rc, RC_CONTROL); - val_32 |= 0x8; - write_rc_conf(node, rc, RC_CONTROL, val_32); - - clear_rc_status(node, rc); - set_devint_wken(node, 0x1f0); -#ifdef CONFIG_PCI_MSI //USE MSI - val_32 = read_rc_conf(node, rc, RC_COMMAND); - val_32 |= 0x400; - write_rc_conf(node, rc, RC_COMMAND, val_32); - val_32 = read_rc_conf(node, rc, RC_MSI_CONTROL); - val_32 |= 0x10000; - write_rc_conf(node, rc, RC_MSI_CONTROL, val_32); - val = 0x4000000000000000UL; - write_piu_ior0(node, rc, PMEMSICONFIG, val); -#else //USE INT - val = 0x4000000000000400UL; - write_piu_ior0(node, rc, PMEINTCONFIG, val); -#endif - } - rc_start = rc_start >> index; - index = ffs(rc_start); - } - } -} - static void chip3_hose_init(struct pci_controller *hose) { unsigned long pci_io_base; @@ -534,10 +483,7 @@ static void chip3_hose_init(struct pci_controller *hose) hose->ep_config_space_base = PAGE_OFFSET | pci_io_base | PCI_EP_CFG; hose->rc_config_space_base = PAGE_OFFSET | pci_io_base | PCI_RC_CFG; - if (is_in_host()) - hose->mem_space->start = pci_io_base + PCI_32BIT_MEMIO; - else - hose->mem_space->start = pci_io_base + PCI_32BIT_VT_MEMIO; + hose->mem_space->start = pci_io_base + PCI_32BIT_MEMIO; hose->mem_space->end = hose->mem_space->start + PCI_32BIT_MEMIO_SIZE - 1; hose->mem_space->name = "pci memory space"; hose->mem_space->flags = IORESOURCE_MEM; @@ -574,6 +520,7 @@ static void chip3_hose_init(struct pci_controller *hose) static void chip3_init_ops_fixup(void) { if (is_guest_or_emul()) { + sw64_chip_init->early_init.setup_core_start = chip3_setup_vt_core_start; sw64_chip_init->early_init.get_node_mem = chip3_get_vt_node_mem; sw64_chip_init->pci_init.check_pci_linkup = chip3_check_pci_vt_linkup; } @@ -603,7 +550,6 @@ static struct sw64_chip_init_ops chip3_chip_init_ops = { static struct sw64_chip_ops chip3_chip_ops = { .get_cpu_num = chip3_get_cpu_nums, - .suspend = chip3_suspend, .fixup = chip3_ops_fixup, }; @@ -782,14 +728,16 @@ static void chip3_pci_fixup_root_complex(struct pci_dev *dev) } dev->class &= 0xff; - dev->class |= PCI_CLASS_BRIDGE_HOST << 8; + dev->class |= PCI_CLASS_BRIDGE_PCI << 8; for (i = 0; i < PCI_NUM_RESOURCES; i++) { dev->resource[i].start = 0; dev->resource[i].end = 0; - dev->resource[i].flags = 0; + dev->resource[i].flags = IORESOURCE_PCI_FIXED; } } atomic_inc(&dev->enable_cnt); + + dev->no_msi = 1; } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_JN, PCI_DEVICE_ID_CHIP3, chip3_pci_fixup_root_complex); diff --git a/arch/sw_64/chip/chip3/cpufreq_debugfs.c b/arch/sw_64/chip/chip3/cpufreq_debugfs.c index 3b152f84454fc7ba277830ae612ed3c58a42346e..13696360ef0294dd02cea16aa70cc31d2f6cb043 100644 --- a/arch/sw_64/chip/chip3/cpufreq_debugfs.c +++ b/arch/sw_64/chip/chip3/cpufreq_debugfs.c @@ -1,15 +1,10 @@ // SPDX-License-Identifier: GPL-2.0 #include -#include -#include -#include -#include #include #include -#include + #include -#include #include #define CLK_PRT 0x1UL diff --git a/arch/sw_64/chip/chip3/i2c-lib.c b/arch/sw_64/chip/chip3/i2c-lib.c index 581f2b3d81a1f847fb2bcc919cec7063a3519d46..ddf0a187ab5ab8f1096fa16337702387b0b9e85f 100644 --- a/arch/sw_64/chip/chip3/i2c-lib.c +++ b/arch/sw_64/chip/chip3/i2c-lib.c @@ -14,14 +14,10 @@ * of the interrupt mode. */ -#include -#include #include #include #include #include -#include -#include #define CPLD_BUSNR 2 diff --git a/arch/sw_64/chip/chip3/irq_chip.c b/arch/sw_64/chip/chip3/irq_chip.c index ee43e87c554b5d20e33a6bd4e027edb7672bc995..24dfa1e1a89894f05781842a308d8ebc3fdc9f72 100644 --- a/arch/sw_64/chip/chip3/irq_chip.c +++ b/arch/sw_64/chip/chip3/irq_chip.c @@ -1,18 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include #include #include -#include -#include + #include static void fake_irq_mask(struct irq_data *data) diff --git a/arch/sw_64/chip/chip3/msi.c b/arch/sw_64/chip/chip3/msi.c index 0c6d415e082e4588a81cdbc7a5267b1e74948b8b..43688c96ccabeda7f5152da9f82d0c87f3d47abf 100644 --- a/arch/sw_64/chip/chip3/msi.c +++ b/arch/sw_64/chip/chip3/msi.c @@ -1,15 +1,10 @@ // SPDX-License-Identifier: GPL-2.0 #include -#include -#include -#include #include #include #include + #include -#include -#include -#include static struct irq_domain *msi_default_domain; static DEFINE_RAW_SPINLOCK(vector_lock); diff --git a/arch/sw_64/chip/chip3/pci-quirks.c b/arch/sw_64/chip/chip3/pci-quirks.c index e70c211df68f0fae28891ae2cff0b49a73687f6a..22887d269fe38c6d2328b2f72618b087ce63dad7 100644 --- a/arch/sw_64/chip/chip3/pci-quirks.c +++ b/arch/sw_64/chip/chip3/pci-quirks.c @@ -1,9 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 #include #include -#include + #include -#include static int handshake(void __iomem *ptr, u32 mask, u32 done, int wait_usec, int delay_usec) @@ -232,9 +231,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, enable_sw_dca); void __init reserve_mem_for_pci(void) { int ret; - unsigned long base; - - base = is_in_host() ? PCI_32BIT_MEMIO : PCI_32BIT_VT_MEMIO; + unsigned long base = PCI_32BIT_MEMIO; ret = add_memmap_region(base, PCI_32BIT_MEMIO_SIZE, memmap_pci); if (ret) { diff --git a/arch/sw_64/chip/chip3/vt_msi.c b/arch/sw_64/chip/chip3/vt_msi.c index 31f49d3c3511af86ab6794eeedb8cb4a18702f33..428757642342e01491332a682e01668563ed5557 100644 --- a/arch/sw_64/chip/chip3/vt_msi.c +++ b/arch/sw_64/chip/chip3/vt_msi.c @@ -1,14 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 #include #include -#include -#include #include #include -#include -#include -#include -#include #define QEMU_MSIX_MSG_ADDR (0x8000fee00000UL) diff --git a/arch/sw_64/configs/openeuler_defconfig b/arch/sw_64/configs/openeuler_defconfig new file mode 100644 index 0000000000000000000000000000000000000000..0e77721dae36685d5aa67b3482581fc50fe5ddc6 --- /dev/null +++ b/arch/sw_64/configs/openeuler_defconfig @@ -0,0 +1,4312 @@ +# +# Automatically generated file; DO NOT EDIT. +# Linux/sw_64 5.10.0 Kernel Configuration +# +CONFIG_CC_VERSION_TEXT="sw_64sw6b-sunway-linux-gnu-gcc (GCC) 7.1.0" +CONFIG_CC_IS_GCC=y +CONFIG_GCC_VERSION=70100 +CONFIG_LD_VERSION=226010000 +CONFIG_CLANG_VERSION=0 +CONFIG_LLD_VERSION=0 +CONFIG_CC_CAN_LINK=y +CONFIG_CC_CAN_LINK_STATIC=y +CONFIG_CC_HAS_ASM_GOTO=y +CONFIG_IRQ_WORK=y + +# +# General setup +# +CONFIG_INIT_ENV_ARG_LIMIT=32 +# CONFIG_COMPILE_TEST is not set +CONFIG_LOCALVERSION="-xuelang" +CONFIG_LOCALVERSION_AUTO=y +CONFIG_BUILD_SALT="" +CONFIG_DEFAULT_INIT="" +CONFIG_DEFAULT_HOSTNAME="(none)" +CONFIG_SWAP=y +CONFIG_SYSVIPC=y +CONFIG_SYSVIPC_SYSCTL=y +CONFIG_POSIX_MQUEUE=y +CONFIG_POSIX_MQUEUE_SYSCTL=y +# CONFIG_WATCH_QUEUE is not set +# CONFIG_CROSS_MEMORY_ATTACH is not set +CONFIG_USELIB=y +# CONFIG_AUDIT is not set +CONFIG_HAVE_ARCH_AUDITSYSCALL=y + +# +# IRQ subsystem +# +CONFIG_GENERIC_IRQ_LEGACY=y +CONFIG_GENERIC_IRQ_PROBE=y +CONFIG_GENERIC_IRQ_SHOW=y +CONFIG_GENERIC_IRQ_MIGRATION=y +CONFIG_GENERIC_IRQ_CHIP=y +CONFIG_IRQ_DOMAIN=y +CONFIG_IRQ_DOMAIN_HIERARCHY=y +CONFIG_GENERIC_MSI_IRQ=y +CONFIG_GENERIC_MSI_IRQ_DOMAIN=y +CONFIG_HANDLE_DOMAIN_IRQ=y +CONFIG_IRQ_FORCED_THREADING=y +CONFIG_SPARSE_IRQ=y +# CONFIG_GENERIC_IRQ_DEBUGFS is not set +# end of IRQ subsystem + +CONFIG_GENERIC_TIME_VSYSCALL=y +CONFIG_GENERIC_CLOCKEVENTS=y + +# +# Timers subsystem +# +CONFIG_TICK_ONESHOT=y +CONFIG_HZ_PERIODIC=y +# CONFIG_NO_HZ_IDLE is not set +# CONFIG_NO_HZ is not set +CONFIG_HIGH_RES_TIMERS=y +# end of Timers subsystem + +CONFIG_PREEMPT_NONE=y + +# +# CPU/Task time and stats accounting +# +CONFIG_TICK_CPU_ACCOUNTING=y +# CONFIG_BSD_PROCESS_ACCT is not set +# CONFIG_TASKSTATS is not set +# CONFIG_PSI is not set +# end of CPU/Task time and stats accounting + +CONFIG_CPU_ISOLATION=y + +# +# RCU Subsystem +# +CONFIG_TREE_RCU=y +# CONFIG_RCU_EXPERT is not set +CONFIG_SRCU=y +CONFIG_TREE_SRCU=y +CONFIG_RCU_STALL_COMMON=y +CONFIG_RCU_NEED_SEGCBLIST=y +# end of RCU Subsystem + +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +# CONFIG_IKHEADERS is not set +CONFIG_LOG_BUF_SHIFT=18 +CONFIG_LOG_CPU_MAX_BUF_SHIFT=12 +CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT=13 + +# +# Scheduler features +# + +# +# Intelligent aware scheduler +# +# CONFIG_IAS_SMART_IDLE is not set +# CONFIG_IAS_SMART_LOAD_TRACKING is not set +# end of Intelligent aware scheduler +# end of Scheduler features + +CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y +CONFIG_CC_HAS_INT128=y +# CONFIG_NUMA_BALANCING is not set +CONFIG_CGROUPS=y +CONFIG_PAGE_COUNTER=y +CONFIG_MEMCG=y +CONFIG_MEMCG_SWAP=y +CONFIG_MEMCG_KMEM=y +CONFIG_BLK_CGROUP=y +CONFIG_CGROUP_WRITEBACK=y +CONFIG_CGROUP_SCHED=y +CONFIG_FAIR_GROUP_SCHED=y +# CONFIG_CFS_BANDWIDTH is not set +# CONFIG_RT_GROUP_SCHED is not set +CONFIG_CGROUP_PIDS=y +# CONFIG_CGROUP_RDMA is not set +CONFIG_CGROUP_FREEZER=y +# CONFIG_CGROUP_HUGETLB is not set +CONFIG_CPUSETS=y +CONFIG_PROC_PID_CPUSET=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_CPUACCT=y +# CONFIG_CGROUP_PERF is not set +# CONFIG_CGROUP_DEBUG is not set +CONFIG_SOCK_CGROUP_DATA=y +# CONFIG_CGROUP_FILES is not set +CONFIG_NAMESPACES=y +CONFIG_UTS_NS=y +CONFIG_IPC_NS=y +# CONFIG_USER_NS is not set +CONFIG_PID_NS=y +CONFIG_NET_NS=y +# CONFIG_SCHED_STEAL is not set +# CONFIG_CHECKPOINT_RESTORE is not set +CONFIG_SCHED_AUTOGROUP=y +# CONFIG_SYSFS_DEPRECATED is not set +CONFIG_RELAY=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_INITRAMFS_SOURCE="" +CONFIG_RD_GZIP=y +CONFIG_RD_BZIP2=y +CONFIG_RD_LZMA=y +CONFIG_RD_XZ=y +CONFIG_RD_LZO=y +CONFIG_RD_LZ4=y +CONFIG_RD_ZSTD=y +CONFIG_INITRAMFS_FILE_METADATA="" +# CONFIG_BOOT_CONFIG is not set +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set +CONFIG_SYSCTL=y +CONFIG_HAVE_PCSPKR_PLATFORM=y +CONFIG_BPF=y +CONFIG_EXPERT=y +CONFIG_MULTIUSER=y +# CONFIG_SGETMASK_SYSCALL is not set +CONFIG_SYSFS_SYSCALL=y +CONFIG_FHANDLE=y +CONFIG_POSIX_TIMERS=y +CONFIG_PRINTK=y +CONFIG_BUG=y +CONFIG_ELF_CORE=y +CONFIG_PCSPKR_PLATFORM=y +CONFIG_BASE_FULL=y +CONFIG_FUTEX=y +CONFIG_FUTEX_PI=y +CONFIG_EPOLL=y +CONFIG_SIGNALFD=y +CONFIG_TIMERFD=y +CONFIG_EVENTFD=y +CONFIG_SHMEM=y +CONFIG_AIO=y +CONFIG_IO_URING=y +CONFIG_ADVISE_SYSCALLS=y +CONFIG_MEMBARRIER=y +CONFIG_KALLSYMS=y +CONFIG_KALLSYMS_ALL=y +CONFIG_KALLSYMS_BASE_RELATIVE=y +# CONFIG_BPF_SYSCALL is not set +# CONFIG_USERFAULTFD is not set +CONFIG_KCMP=y +# CONFIG_EMBEDDED is not set +CONFIG_HAVE_PERF_EVENTS=y +CONFIG_PERF_USE_VMALLOC=y +# CONFIG_PC104 is not set + +# +# Kernel Performance Events And Counters +# +CONFIG_PERF_EVENTS=y +CONFIG_DEBUG_PERF_USE_VMALLOC=y +# end of Kernel Performance Events And Counters + +CONFIG_VM_EVENT_COUNTERS=y +CONFIG_SLUB_DEBUG=y +# CONFIG_SLUB_MEMCG_SYSFS_ON is not set +# CONFIG_COMPAT_BRK is not set +# CONFIG_SLAB is not set +CONFIG_SLUB=y +# CONFIG_SLOB is not set +CONFIG_SLAB_MERGE_DEFAULT=y +# CONFIG_SLAB_FREELIST_RANDOM is not set +# CONFIG_SLAB_FREELIST_HARDENED is not set +# CONFIG_SHUFFLE_PAGE_ALLOCATOR is not set +CONFIG_SLUB_CPU_PARTIAL=y +# CONFIG_PROFILING is not set +# end of General setup + +CONFIG_SW64=y +CONFIG_LOCKDEP_SUPPORT=y +CONFIG_64BIT=y +CONFIG_MMU=y +CONFIG_PGTABLE_LEVELS=4 +CONFIG_SYS_SUPPORTS_HUGETLBFS=y +CONFIG_RWSEM_XCHGADD_ALGORITHM=y +CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y +CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y +CONFIG_ZONE_DMA32=y +CONFIG_NEED_DMA_MAP_STATE=y +CONFIG_NEED_SG_DMA_LENGTH=y +CONFIG_ARCH_WANT_HUGE_PMD_SHARE=y +CONFIG_GENERIC_ISA_DMA=y +CONFIG_NONCACHE_PAGE=y +CONFIG_AUDIT_ARCH=y +CONFIG_SYS_HAS_EARLY_PRINTK=y + +# +# System setup +# + +# +# Machine Configuration +# +CONFIG_SUBARCH_C3B=y +CONFIG_SW64_CHIP3=y +# CONFIG_SW64_FPGA is not set +# CONFIG_SW64_SIM is not set +CONFIG_SW64_ASIC=y +# CONFIG_SW64_CHIP3_ASIC_DEBUG is not set +CONFIG_CPUFREQ_DEBUGFS=y +CONFIG_PLATFORM_XUELANG=y +# end of Machine Configuration + +# CONFIG_LOCK_MEMB is not set +# CONFIG_DIRECT_DMA is not set +CONFIG_SWIOTLB=y +CONFIG_ISA=y +CONFIG_ISA_DMA_API=y +CONFIG_PCI=y +CONFIG_PCI_DOMAINS=y +CONFIG_PCI_SYSCALL=y +CONFIG_IOMMU_HELPER=y +CONFIG_PHYSICAL_START=0x900000 +# CONFIG_KEXEC is not set +# CONFIG_CRASH_DUMP is not set +CONFIG_SECCOMP=y +CONFIG_GENERIC_HWEIGHT=y +# CONFIG_LOCK_FIXUP is not set +CONFIG_SMP=y +CONFIG_ARCH_PROC_KCORE_TEXT=y +CONFIG_TRACE_IRQFLAGS_SUPPORT=y +CONFIG_ARCH_SUPPORTS_UPROBES=y +CONFIG_NR_CPUS=64 +CONFIG_HOTPLUG_CPU=y +CONFIG_ARCH_SPARSEMEM_ENABLE=y +# CONFIG_ARCH_DISCONTIGMEM_ENABLE is not set +CONFIG_NUMA=y +CONFIG_USE_PERCPU_NUMA_NODE_ID=y +CONFIG_NODES_SHIFT=7 +# CONFIG_RELOCATABLE is not set +CONFIG_HZ=100 +# CONFIG_PCIEPORTBUS is not set +CONFIG_PCIEASPM=y +CONFIG_PCIEASPM_DEFAULT=y +# CONFIG_PCIEASPM_POWERSAVE is not set +# CONFIG_PCIEASPM_POWER_SUPERSAVE is not set +# CONFIG_PCIEASPM_PERFORMANCE is not set +# CONFIG_PCIE_PTM is not set +CONFIG_PCI_MSI=y +CONFIG_PCI_MSI_IRQ_DOMAIN=y +CONFIG_PCI_MSI_ARCH_FALLBACKS=y +CONFIG_PCI_QUIRKS=y +# CONFIG_PCI_DEBUG is not set +# CONFIG_PCI_REALLOC_ENABLE_AUTO is not set +# CONFIG_PCI_STUB is not set +# CONFIG_PCI_PF_STUB is not set +CONFIG_PCI_ATS=y +CONFIG_PCI_IOV=y +# CONFIG_PCI_PRI is not set +# CONFIG_PCI_PASID is not set +CONFIG_PCI_LABEL=y +# CONFIG_PCIE_BUS_TUNE_OFF is not set +CONFIG_PCIE_BUS_DEFAULT=y +# CONFIG_PCIE_BUS_SAFE is not set +# CONFIG_PCIE_BUS_PERFORMANCE is not set +# CONFIG_PCIE_BUS_PEER2PEER is not set +# CONFIG_HOTPLUG_PCI is not set + +# +# PCI controller drivers +# +# CONFIG_PCI_FTPCI100 is not set +# CONFIG_PCI_HOST_GENERIC is not set +# CONFIG_PCIE_XILINX is not set + +# +# DesignWare PCI Core Support +# +# CONFIG_PCIE_DW_PLAT_HOST is not set +# CONFIG_PCI_MESON is not set +# end of DesignWare PCI Core Support + +# +# Mobiveil PCIe Core Support +# +# end of Mobiveil PCIe Core Support + +# +# Cadence PCIe controllers support +# +# CONFIG_PCIE_CADENCE_PLAT_HOST is not set +# CONFIG_PCI_J721E_HOST is not set +# end of Cadence PCIe controllers support +# end of PCI controller drivers + +# +# PCI Endpoint +# +# end of PCI Endpoint + +# +# PCI switch controller drivers +# +# CONFIG_PCI_SW_SWITCHTEC is not set +# end of PCI switch controller drivers + +# CONFIG_PCCARD is not set + +# +# Executable file formats +# +CONFIG_BINFMT_ELF=y +CONFIG_ELFCORE=y +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +CONFIG_BINFMT_SCRIPT=y +CONFIG_HAVE_AOUT=y +# CONFIG_BINFMT_AOUT is not set +# CONFIG_BINFMT_MISC is not set +CONFIG_COREDUMP=y +# end of Executable file formats + +# +# Library optimization options +# +CONFIG_DEEP_CLEAR_PAGE=y +CONFIG_DEEP_COPY_PAGE=y +CONFIG_DEEP_COPY_USER=y +CONFIG_DEEP_MEMCPY=y +CONFIG_DEEP_MEMSET=y +# end of Library optimization options +# end of System setup + +# +# Boot options +# +CONFIG_SW64_IRQ_CHIP=y +CONFIG_USE_OF=y +# CONFIG_SW64_BUILTIN_DTB is not set +CONFIG_EFI=y +CONFIG_DMI=y +# CONFIG_CMDLINE_BOOL is not set +CONFIG_FORCE_MAX_ZONEORDER=16 +# end of Boot options + +# +# Firmware Drivers +# +CONFIG_FIRMWARE_MEMMAP=y +CONFIG_DMIID=y +CONFIG_DMI_SYSFS=m +# CONFIG_ISCSI_IBFT is not set +# CONFIG_GOOGLE_FIRMWARE is not set + +# +# EFI (Extensible Firmware Interface) Support +# +CONFIG_EFI_ESRT=y +CONFIG_EFI_RUNTIME_WRAPPERS=y +# CONFIG_EFI_BOOTLOADER_CONTROL is not set +# CONFIG_EFI_CAPSULE_LOADER is not set +# CONFIG_EFI_TEST is not set +# CONFIG_EFI_DISABLE_PCI_DMA is not set +# end of EFI (Extensible Firmware Interface) Support + +# CONFIG_EFI_CUSTOM_SSDT_OVERLAYS is not set + +# +# Tegra firmware driver +# +# end of Tegra firmware driver +# end of Firmware Drivers + +# +# Power management options +# +# CONFIG_SUSPEND is not set +# CONFIG_HIBERNATION is not set +# CONFIG_PM is not set +CONFIG_ARCH_SUPPORTS_ACPI=y +CONFIG_ACPI=y +# CONFIG_ACPI_DEBUGGER is not set +# CONFIG_ACPI_SPCR_TABLE is not set +# CONFIG_ACPI_EC_DEBUGFS is not set +CONFIG_ACPI_AC=y +CONFIG_ACPI_BATTERY=y +CONFIG_ACPI_BUTTON=y +# CONFIG_ACPI_DOCK is not set +CONFIG_ACPI_CUSTOM_DSDT_FILE="" +# CONFIG_ACPI_DEBUG is not set +# CONFIG_ACPI_PCI_SLOT is not set +# CONFIG_ACPI_CONTAINER is not set +# CONFIG_ACPI_HED is not set +# CONFIG_ACPI_CUSTOM_METHOD is not set +CONFIG_ACPI_REDUCED_HARDWARE_ONLY=y +# CONFIG_ACPI_CONFIGFS is not set +# CONFIG_PMIC_OPREGION is not set +CONFIG_ARCH_SUSPEND_POSSIBLE=y +CONFIG_ARCH_HIBERNATION_POSSIBLE=y + +# +# CPU Idle +# +# CONFIG_CPU_IDLE is not set +# end of CPU Idle +# end of Power management options + +CONFIG_DUMMY_CONSOLE=y +CONFIG_HAVE_KVM_IRQCHIP=y +CONFIG_HAVE_KVM_IRQFD=y +CONFIG_HAVE_KVM_IRQ_ROUTING=y +CONFIG_HAVE_KVM_EVENTFD=y +CONFIG_HAVE_KVM_MSI=y +CONFIG_KVM_VFIO=y +CONFIG_VIRTUALIZATION=y +CONFIG_KVM=y +CONFIG_KVM_SW64_HOST=y +CONFIG_VHOST_IOTLB=m +CONFIG_VHOST=m +CONFIG_VHOST_MENU=y +CONFIG_VHOST_NET=m +CONFIG_VHOST_SCSI=m +CONFIG_VHOST_VSOCK=m +CONFIG_VHOST_CROSS_ENDIAN_LEGACY=y + +# +# General architecture-dependent options +# +CONFIG_CRASH_CORE=y +CONFIG_SET_FS=y +CONFIG_HAVE_OPROFILE=y +# CONFIG_KPROBES is not set +# CONFIG_JUMP_LABEL is not set +CONFIG_HAVE_64BIT_ALIGNED_ACCESS=y +CONFIG_HAVE_KPROBES=y +CONFIG_HAVE_KRETPROBES=y +CONFIG_GENERIC_SMP_IDLE_THREAD=y +CONFIG_HAVE_ASM_MODVERSIONS=y +CONFIG_HAVE_PERF_REGS=y +CONFIG_HAVE_PERF_USER_STACK_DUMP=y +CONFIG_HAVE_ARCH_JUMP_LABEL=y +CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y +CONFIG_ARCH_WANT_IPC_PARSE_VERSION=y +CONFIG_HAVE_ARCH_SECCOMP=y +CONFIG_HAVE_ARCH_SECCOMP_FILTER=y +CONFIG_SECCOMP_FILTER=y +# CONFIG_SECCOMP_CACHE_DEBUG is not set +CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y +CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y +CONFIG_HAVE_MOD_ARCH_SPECIFIC=y +CONFIG_MODULES_USE_ELF_RELA=y +CONFIG_ARCH_HAS_ELF_RANDOMIZE=y +CONFIG_ISA_BUS_API=y +CONFIG_OLD_SIGSUSPEND=y +CONFIG_OLD_SIGACTION=y +# CONFIG_COMPAT_32BIT_TIME is not set +CONFIG_ARCH_NO_PREEMPT=y +CONFIG_ARCH_HAS_PHYS_TO_DMA=y +# CONFIG_LOCK_EVENT_COUNTS is not set + +# +# GCOV-based kernel profiling +# +# CONFIG_GCOV_KERNEL is not set +# end of GCOV-based kernel profiling +# end of General architecture-dependent options + +CONFIG_RT_MUTEXES=y +CONFIG_BASE_SMALL=0 +CONFIG_MODULES=y +CONFIG_MODULE_FORCE_LOAD=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_FORCE_UNLOAD=y +CONFIG_MODVERSIONS=y +CONFIG_ASM_MODVERSIONS=y +# CONFIG_MODULE_SRCVERSION_ALL is not set +# CONFIG_MODULE_SIG is not set +# CONFIG_MODULE_COMPRESS is not set +# CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS is not set +# CONFIG_UNUSED_SYMBOLS is not set +# CONFIG_TRIM_UNUSED_KSYMS is not set +CONFIG_MODULES_TREE_LOOKUP=y +CONFIG_BLOCK=y +CONFIG_BLK_SCSI_REQUEST=y +CONFIG_BLK_DEV_BSG=y +CONFIG_BLK_DEV_BSGLIB=y +CONFIG_BLK_DEV_INTEGRITY=y +CONFIG_BLK_DEV_INTEGRITY_T10=y +# CONFIG_BLK_DEV_ZONED is not set +# CONFIG_BLK_DEV_THROTTLING is not set +# CONFIG_BLK_CMDLINE_PARSER is not set +# CONFIG_BLK_WBT is not set +# CONFIG_BLK_CGROUP_IOLATENCY is not set +# CONFIG_BLK_CGROUP_IOCOST is not set +CONFIG_BLK_DEBUG_FS=y +# CONFIG_BLK_SED_OPAL is not set +# CONFIG_BLK_INLINE_ENCRYPTION is not set + +# +# Partition Types +# +CONFIG_PARTITION_ADVANCED=y +# CONFIG_ACORN_PARTITION is not set +# CONFIG_AIX_PARTITION is not set +CONFIG_OSF_PARTITION=y +# CONFIG_AMIGA_PARTITION is not set +# CONFIG_ATARI_PARTITION is not set +# CONFIG_MAC_PARTITION is not set +CONFIG_MSDOS_PARTITION=y +CONFIG_BSD_DISKLABEL=y +CONFIG_MINIX_SUBPARTITION=y +CONFIG_SOLARIS_X86_PARTITION=y +CONFIG_UNIXWARE_DISKLABEL=y +CONFIG_LDM_PARTITION=y +# CONFIG_LDM_DEBUG is not set +CONFIG_SGI_PARTITION=y +CONFIG_ULTRIX_PARTITION=y +# CONFIG_SUN_PARTITION is not set +# CONFIG_KARMA_PARTITION is not set +CONFIG_EFI_PARTITION=y +# CONFIG_SYSV68_PARTITION is not set +# CONFIG_CMDLINE_PARTITION is not set +# end of Partition Types + +CONFIG_BLK_MQ_PCI=y +CONFIG_BLK_MQ_VIRTIO=y +CONFIG_BLK_MQ_RDMA=y + +# +# IO Schedulers +# +CONFIG_MQ_IOSCHED_DEADLINE=y +CONFIG_MQ_IOSCHED_KYBER=y +# CONFIG_IOSCHED_BFQ is not set +# end of IO Schedulers + +CONFIG_PREEMPT_NOTIFIERS=y +CONFIG_ASN1=m +CONFIG_ARCH_INLINE_SPIN_TRYLOCK=y +CONFIG_ARCH_INLINE_SPIN_TRYLOCK_BH=y +CONFIG_ARCH_INLINE_SPIN_LOCK=y +CONFIG_ARCH_INLINE_SPIN_LOCK_BH=y +CONFIG_ARCH_INLINE_SPIN_LOCK_IRQ=y +CONFIG_ARCH_INLINE_SPIN_LOCK_IRQSAVE=y +CONFIG_ARCH_INLINE_SPIN_UNLOCK=y +CONFIG_ARCH_INLINE_SPIN_UNLOCK_BH=y +CONFIG_ARCH_INLINE_SPIN_UNLOCK_IRQ=y +CONFIG_ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE=y +CONFIG_ARCH_INLINE_READ_LOCK=y +CONFIG_ARCH_INLINE_READ_LOCK_BH=y +CONFIG_ARCH_INLINE_READ_LOCK_IRQ=y +CONFIG_ARCH_INLINE_READ_LOCK_IRQSAVE=y +CONFIG_ARCH_INLINE_READ_UNLOCK=y +CONFIG_ARCH_INLINE_READ_UNLOCK_BH=y +CONFIG_ARCH_INLINE_READ_UNLOCK_IRQ=y +CONFIG_ARCH_INLINE_READ_UNLOCK_IRQRESTORE=y +CONFIG_ARCH_INLINE_WRITE_LOCK=y +CONFIG_ARCH_INLINE_WRITE_LOCK_BH=y +CONFIG_ARCH_INLINE_WRITE_LOCK_IRQ=y +CONFIG_ARCH_INLINE_WRITE_LOCK_IRQSAVE=y +CONFIG_ARCH_INLINE_WRITE_UNLOCK=y +CONFIG_ARCH_INLINE_WRITE_UNLOCK_BH=y +CONFIG_ARCH_INLINE_WRITE_UNLOCK_IRQ=y +CONFIG_ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE=y +CONFIG_INLINE_SPIN_TRYLOCK=y +CONFIG_INLINE_SPIN_TRYLOCK_BH=y +CONFIG_INLINE_SPIN_LOCK=y +CONFIG_INLINE_SPIN_LOCK_BH=y +CONFIG_INLINE_SPIN_LOCK_IRQ=y +CONFIG_INLINE_SPIN_LOCK_IRQSAVE=y +CONFIG_INLINE_SPIN_UNLOCK_BH=y +CONFIG_INLINE_SPIN_UNLOCK_IRQ=y +CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE=y +CONFIG_INLINE_READ_LOCK=y +CONFIG_INLINE_READ_LOCK_BH=y +CONFIG_INLINE_READ_LOCK_IRQ=y +CONFIG_INLINE_READ_LOCK_IRQSAVE=y +CONFIG_INLINE_READ_UNLOCK=y +CONFIG_INLINE_READ_UNLOCK_BH=y +CONFIG_INLINE_READ_UNLOCK_IRQ=y +CONFIG_INLINE_READ_UNLOCK_IRQRESTORE=y +CONFIG_INLINE_WRITE_LOCK=y +CONFIG_INLINE_WRITE_LOCK_BH=y +CONFIG_INLINE_WRITE_LOCK_IRQ=y +CONFIG_INLINE_WRITE_LOCK_IRQSAVE=y +CONFIG_INLINE_WRITE_UNLOCK=y +CONFIG_INLINE_WRITE_UNLOCK_BH=y +CONFIG_INLINE_WRITE_UNLOCK_IRQ=y +CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE=y +CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y +CONFIG_MUTEX_SPIN_ON_OWNER=y +CONFIG_RWSEM_SPIN_ON_OWNER=y +CONFIG_LOCK_SPIN_ON_OWNER=y +CONFIG_ARCH_USE_QUEUED_SPINLOCKS=y +CONFIG_QUEUED_SPINLOCKS=y +CONFIG_ARCH_USE_QUEUED_RWLOCKS=y +CONFIG_QUEUED_RWLOCKS=y +CONFIG_FREEZER=y + +# +# Executable file formats +# +# end of Executable file formats + +# +# Memory Management options +# +CONFIG_SPARSEMEM=y +CONFIG_NEED_MULTIPLE_NODES=y +CONFIG_SPARSEMEM_EXTREME=y +CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y +CONFIG_SPARSEMEM_VMEMMAP=y +CONFIG_MEMORY_ISOLATION=y +# CONFIG_MEMORY_HOTPLUG is not set +CONFIG_SPLIT_PTLOCK_CPUS=4 +CONFIG_COMPACTION=y +# CONFIG_PAGE_REPORTING is not set +CONFIG_MIGRATION=y +CONFIG_CONTIG_ALLOC=y +CONFIG_PHYS_ADDR_T_64BIT=y +CONFIG_MMU_NOTIFIER=y +# CONFIG_KSM is not set +CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 +CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS=y +# CONFIG_TRANSPARENT_HUGEPAGE_MADVISE is not set +# CONFIG_CLEANCACHE is not set +# CONFIG_FRONTSWAP is not set +# CONFIG_SHRINK_PAGECACHE is not set +CONFIG_CMA=y +# CONFIG_CMA_DEBUG is not set +# CONFIG_CMA_DEBUGFS is not set +CONFIG_CMA_AREAS=7 +# CONFIG_ZPOOL is not set +# CONFIG_ZBUD is not set +# CONFIG_ZSMALLOC is not set +# CONFIG_DEFERRED_STRUCT_PAGE_INIT is not set +# CONFIG_IDLE_PAGE_TRACKING is not set +CONFIG_HMM_MIRROR=y +# CONFIG_PERCPU_STATS is not set +# CONFIG_GUP_BENCHMARK is not set +# CONFIG_READ_ONLY_THP_FOR_FS is not set + +# +# Data Access Monitoring +# +# CONFIG_DAMON is not set +# end of Data Access Monitoring +# end of Memory Management options + +CONFIG_NET=y +CONFIG_NET_INGRESS=y +CONFIG_NET_EGRESS=y +CONFIG_SKB_EXTENSIONS=y + +# +# Networking options +# +CONFIG_PACKET=y +CONFIG_PACKET_DIAG=y +CONFIG_UNIX=y +CONFIG_UNIX_SCM=y +CONFIG_UNIX_DIAG=y +CONFIG_TLS=m +CONFIG_TLS_DEVICE=y +# CONFIG_TLS_TOE is not set +CONFIG_XFRM=y +CONFIG_XFRM_OFFLOAD=y +CONFIG_XFRM_ALGO=m +CONFIG_XFRM_USER=m +CONFIG_XFRM_INTERFACE=m +CONFIG_XFRM_SUB_POLICY=y +CONFIG_XFRM_MIGRATE=y +CONFIG_XFRM_STATISTICS=y +CONFIG_XFRM_AH=m +CONFIG_XFRM_ESP=m +CONFIG_XFRM_IPCOMP=m +CONFIG_NET_KEY=m +CONFIG_NET_KEY_MIGRATE=y +# CONFIG_SMC is not set +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_FIB_TRIE_STATS=y +CONFIG_IP_MULTIPLE_TABLES=y +# CONFIG_IP_ROUTE_MULTIPATH is not set +# CONFIG_IP_ROUTE_VERBOSE is not set +CONFIG_IP_ROUTE_CLASSID=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +# CONFIG_IP_PNP_BOOTP is not set +# CONFIG_IP_PNP_RARP is not set +CONFIG_NET_IPIP=m +CONFIG_NET_IPGRE_DEMUX=m +CONFIG_NET_IP_TUNNEL=m +# CONFIG_NET_IPGRE is not set +CONFIG_IP_MROUTE_COMMON=y +CONFIG_IP_MROUTE=y +# CONFIG_IP_MROUTE_MULTIPLE_TABLES is not set +# CONFIG_IP_PIMSM_V1 is not set +# CONFIG_IP_PIMSM_V2 is not set +CONFIG_SYN_COOKIES=y +CONFIG_NET_IPVTI=m +# CONFIG_NET_FOU is not set +# CONFIG_NET_FOU_IP_TUNNELS is not set +CONFIG_INET_AH=m +CONFIG_INET_ESP=m +CONFIG_INET_ESP_OFFLOAD=m +# CONFIG_INET_ESPINTCP is not set +CONFIG_INET_IPCOMP=m +CONFIG_INET_XFRM_TUNNEL=m +CONFIG_INET_TUNNEL=m +CONFIG_INET_DIAG=y +CONFIG_INET_TCP_DIAG=y +CONFIG_INET_UDP_DIAG=m +# CONFIG_INET_RAW_DIAG is not set +# CONFIG_INET_DIAG_DESTROY is not set +CONFIG_TCP_CONG_ADVANCED=y +CONFIG_TCP_CONG_BIC=m +CONFIG_TCP_CONG_CUBIC=y +CONFIG_TCP_CONG_WESTWOOD=m +CONFIG_TCP_CONG_HTCP=m +# CONFIG_TCP_CONG_HSTCP is not set +# CONFIG_TCP_CONG_HYBLA is not set +# CONFIG_TCP_CONG_VEGAS is not set +# CONFIG_TCP_CONG_NV is not set +# CONFIG_TCP_CONG_SCALABLE is not set +# CONFIG_TCP_CONG_LP is not set +# CONFIG_TCP_CONG_VENO is not set +# CONFIG_TCP_CONG_YEAH is not set +# CONFIG_TCP_CONG_ILLINOIS is not set +# CONFIG_TCP_CONG_DCTCP is not set +# CONFIG_TCP_CONG_CDG is not set +# CONFIG_TCP_CONG_BBR is not set +CONFIG_DEFAULT_CUBIC=y +# CONFIG_DEFAULT_RENO is not set +CONFIG_DEFAULT_TCP_CONG="cubic" +CONFIG_TCP_MD5SIG=y +# CONFIG_TCP_COMP is not set +CONFIG_IPV6=m +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=m +CONFIG_INET6_ESP=m +CONFIG_INET6_ESP_OFFLOAD=m +# CONFIG_INET6_ESPINTCP is not set +CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_MIP6=m +CONFIG_IPV6_ILA=m +CONFIG_INET6_XFRM_TUNNEL=m +CONFIG_INET6_TUNNEL=m +CONFIG_IPV6_VTI=m +CONFIG_IPV6_SIT=m +CONFIG_IPV6_SIT_6RD=y +CONFIG_IPV6_NDISC_NODETYPE=y +CONFIG_IPV6_TUNNEL=m +CONFIG_IPV6_GRE=m +CONFIG_IPV6_MULTIPLE_TABLES=y +CONFIG_IPV6_SUBTREES=y +CONFIG_IPV6_MROUTE=y +CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y +CONFIG_IPV6_PIMSM_V2=y +CONFIG_IPV6_SEG6_LWTUNNEL=y +CONFIG_IPV6_SEG6_HMAC=y +# CONFIG_IPV6_RPL_LWTUNNEL is not set +# CONFIG_NETLABEL is not set +# CONFIG_MPTCP is not set +# CONFIG_NETWORK_SECMARK is not set +CONFIG_NET_PTP_CLASSIFY=y +# CONFIG_NETWORK_PHY_TIMESTAMPING is not set +CONFIG_NETFILTER=y +CONFIG_NETFILTER_ADVANCED=y +CONFIG_BRIDGE_NETFILTER=m + +# +# Core Netfilter Configuration +# +CONFIG_NETFILTER_INGRESS=y +CONFIG_NETFILTER_NETLINK=m +CONFIG_NETFILTER_FAMILY_BRIDGE=y +CONFIG_NETFILTER_FAMILY_ARP=y +CONFIG_NETFILTER_NETLINK_ACCT=m +CONFIG_NETFILTER_NETLINK_QUEUE=m +CONFIG_NETFILTER_NETLINK_LOG=m +CONFIG_NETFILTER_NETLINK_OSF=m +CONFIG_NF_CONNTRACK=m +CONFIG_NF_LOG_COMMON=m +CONFIG_NF_LOG_NETDEV=m +CONFIG_NETFILTER_CONNCOUNT=m +CONFIG_NF_CONNTRACK_MARK=y +CONFIG_NF_CONNTRACK_ZONES=y +CONFIG_NF_CONNTRACK_PROCFS=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CONNTRACK_TIMEOUT=y +CONFIG_NF_CONNTRACK_TIMESTAMP=y +CONFIG_NF_CONNTRACK_LABELS=y +CONFIG_NF_CT_PROTO_DCCP=y +CONFIG_NF_CT_PROTO_GRE=y +CONFIG_NF_CT_PROTO_SCTP=y +CONFIG_NF_CT_PROTO_UDPLITE=y +CONFIG_NF_CONNTRACK_AMANDA=m +CONFIG_NF_CONNTRACK_FTP=m +CONFIG_NF_CONNTRACK_H323=m +CONFIG_NF_CONNTRACK_IRC=m +CONFIG_NF_CONNTRACK_BROADCAST=m +CONFIG_NF_CONNTRACK_NETBIOS_NS=m +CONFIG_NF_CONNTRACK_SNMP=m +CONFIG_NF_CONNTRACK_PPTP=m +CONFIG_NF_CONNTRACK_SANE=m +CONFIG_NF_CONNTRACK_SIP=m +CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_CT_NETLINK=m +CONFIG_NF_CT_NETLINK_TIMEOUT=m +CONFIG_NF_CT_NETLINK_HELPER=m +CONFIG_NETFILTER_NETLINK_GLUE_CT=y +CONFIG_NF_NAT=m +CONFIG_NF_NAT_AMANDA=m +CONFIG_NF_NAT_FTP=m +CONFIG_NF_NAT_IRC=m +CONFIG_NF_NAT_SIP=m +CONFIG_NF_NAT_TFTP=m +CONFIG_NF_NAT_REDIRECT=y +CONFIG_NF_NAT_MASQUERADE=y +CONFIG_NETFILTER_SYNPROXY=m +CONFIG_NF_TABLES=m +# CONFIG_NF_TABLES_INET is not set +CONFIG_NF_TABLES_NETDEV=y +CONFIG_NFT_NUMGEN=m +CONFIG_NFT_CT=m +# CONFIG_NFT_FLOW_OFFLOAD is not set +CONFIG_NFT_COUNTER=m +CONFIG_NFT_CONNLIMIT=m +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_MASQ=m +CONFIG_NFT_REDIR=m +CONFIG_NFT_NAT=m +CONFIG_NFT_TUNNEL=m +CONFIG_NFT_OBJREF=m +CONFIG_NFT_QUEUE=m +CONFIG_NFT_QUOTA=m +CONFIG_NFT_REJECT=m +CONFIG_NFT_COMPAT=m +CONFIG_NFT_HASH=m +CONFIG_NFT_FIB=m +# CONFIG_NFT_XFRM is not set +CONFIG_NFT_SOCKET=m +CONFIG_NFT_OSF=m +CONFIG_NFT_TPROXY=m +# CONFIG_NFT_SYNPROXY is not set +CONFIG_NF_DUP_NETDEV=m +CONFIG_NFT_DUP_NETDEV=m +CONFIG_NFT_FWD_NETDEV=m +CONFIG_NF_FLOW_TABLE_INET=m +CONFIG_NF_FLOW_TABLE=m +CONFIG_NETFILTER_XTABLES=m + +# +# Xtables combined modules +# +CONFIG_NETFILTER_XT_MARK=m +CONFIG_NETFILTER_XT_CONNMARK=m +CONFIG_NETFILTER_XT_SET=m + +# +# Xtables targets +# +CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m +CONFIG_NETFILTER_XT_TARGET_CONNMARK=m +CONFIG_NETFILTER_XT_TARGET_CT=m +CONFIG_NETFILTER_XT_TARGET_DSCP=m +CONFIG_NETFILTER_XT_TARGET_HL=m +CONFIG_NETFILTER_XT_TARGET_HMARK=m +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m +CONFIG_NETFILTER_XT_TARGET_LOG=m +CONFIG_NETFILTER_XT_TARGET_MARK=m +CONFIG_NETFILTER_XT_NAT=m +CONFIG_NETFILTER_XT_TARGET_NETMAP=m +CONFIG_NETFILTER_XT_TARGET_NFLOG=m +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m +CONFIG_NETFILTER_XT_TARGET_NOTRACK=m +CONFIG_NETFILTER_XT_TARGET_RATEEST=m +CONFIG_NETFILTER_XT_TARGET_REDIRECT=m +CONFIG_NETFILTER_XT_TARGET_MASQUERADE=m +CONFIG_NETFILTER_XT_TARGET_TEE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m +CONFIG_NETFILTER_XT_TARGET_TRACE=m +CONFIG_NETFILTER_XT_TARGET_TCPMSS=m +CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m + +# +# Xtables matches +# +CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m +CONFIG_NETFILTER_XT_MATCH_BPF=m +CONFIG_NETFILTER_XT_MATCH_CGROUP=m +CONFIG_NETFILTER_XT_MATCH_CLUSTER=m +CONFIG_NETFILTER_XT_MATCH_COMMENT=m +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m +CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m +CONFIG_NETFILTER_XT_MATCH_CONNMARK=m +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m +CONFIG_NETFILTER_XT_MATCH_CPU=m +CONFIG_NETFILTER_XT_MATCH_DCCP=m +CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m +CONFIG_NETFILTER_XT_MATCH_DSCP=m +CONFIG_NETFILTER_XT_MATCH_ECN=m +CONFIG_NETFILTER_XT_MATCH_ESP=m +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m +CONFIG_NETFILTER_XT_MATCH_HELPER=m +CONFIG_NETFILTER_XT_MATCH_HL=m +CONFIG_NETFILTER_XT_MATCH_IPCOMP=m +CONFIG_NETFILTER_XT_MATCH_IPRANGE=m +CONFIG_NETFILTER_XT_MATCH_IPVS=m +CONFIG_NETFILTER_XT_MATCH_L2TP=m +CONFIG_NETFILTER_XT_MATCH_LENGTH=m +CONFIG_NETFILTER_XT_MATCH_LIMIT=m +CONFIG_NETFILTER_XT_MATCH_MAC=m +CONFIG_NETFILTER_XT_MATCH_MARK=m +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m +CONFIG_NETFILTER_XT_MATCH_NFACCT=m +CONFIG_NETFILTER_XT_MATCH_OSF=m +CONFIG_NETFILTER_XT_MATCH_OWNER=m +CONFIG_NETFILTER_XT_MATCH_POLICY=m +CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m +CONFIG_NETFILTER_XT_MATCH_QUOTA=m +CONFIG_NETFILTER_XT_MATCH_RATEEST=m +CONFIG_NETFILTER_XT_MATCH_REALM=m +CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SCTP=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m +CONFIG_NETFILTER_XT_MATCH_STATE=m +CONFIG_NETFILTER_XT_MATCH_STATISTIC=m +CONFIG_NETFILTER_XT_MATCH_STRING=m +CONFIG_NETFILTER_XT_MATCH_TCPMSS=m +CONFIG_NETFILTER_XT_MATCH_TIME=m +CONFIG_NETFILTER_XT_MATCH_U32=m +# end of Core Netfilter Configuration + +CONFIG_IP_SET=m +CONFIG_IP_SET_MAX=256 +CONFIG_IP_SET_BITMAP_IP=m +CONFIG_IP_SET_BITMAP_IPMAC=m +CONFIG_IP_SET_BITMAP_PORT=m +CONFIG_IP_SET_HASH_IP=m +CONFIG_IP_SET_HASH_IPMARK=m +CONFIG_IP_SET_HASH_IPPORT=m +CONFIG_IP_SET_HASH_IPPORTIP=m +CONFIG_IP_SET_HASH_IPPORTNET=m +CONFIG_IP_SET_HASH_IPMAC=m +CONFIG_IP_SET_HASH_MAC=m +CONFIG_IP_SET_HASH_NETPORTNET=m +CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETNET=m +CONFIG_IP_SET_HASH_NETPORT=m +CONFIG_IP_SET_HASH_NETIFACE=m +CONFIG_IP_SET_LIST_SET=m +CONFIG_IP_VS=m +# CONFIG_IP_VS_IPV6 is not set +# CONFIG_IP_VS_DEBUG is not set +CONFIG_IP_VS_TAB_BITS=12 + +# +# IPVS transport protocol load balancing support +# +# CONFIG_IP_VS_PROTO_TCP is not set +# CONFIG_IP_VS_PROTO_UDP is not set +# CONFIG_IP_VS_PROTO_ESP is not set +# CONFIG_IP_VS_PROTO_AH is not set +# CONFIG_IP_VS_PROTO_SCTP is not set + +# +# IPVS scheduler +# +# CONFIG_IP_VS_RR is not set +# CONFIG_IP_VS_WRR is not set +# CONFIG_IP_VS_LC is not set +# CONFIG_IP_VS_WLC is not set +# CONFIG_IP_VS_FO is not set +# CONFIG_IP_VS_OVF is not set +# CONFIG_IP_VS_LBLC is not set +# CONFIG_IP_VS_LBLCR is not set +# CONFIG_IP_VS_DH is not set +# CONFIG_IP_VS_SH is not set +# CONFIG_IP_VS_MH is not set +# CONFIG_IP_VS_SED is not set +# CONFIG_IP_VS_NQ is not set + +# +# IPVS SH scheduler +# +CONFIG_IP_VS_SH_TAB_BITS=8 + +# +# IPVS MH scheduler +# +CONFIG_IP_VS_MH_TAB_INDEX=12 + +# +# IPVS application helper +# +# CONFIG_IP_VS_NFCT is not set + +# +# IP: Netfilter Configuration +# +CONFIG_NF_DEFRAG_IPV4=m +CONFIG_NF_SOCKET_IPV4=m +CONFIG_NF_TPROXY_IPV4=m +CONFIG_NF_TABLES_IPV4=y +CONFIG_NFT_REJECT_IPV4=m +CONFIG_NFT_DUP_IPV4=m +CONFIG_NFT_FIB_IPV4=m +CONFIG_NF_TABLES_ARP=y +CONFIG_NF_FLOW_TABLE_IPV4=m +CONFIG_NF_DUP_IPV4=m +CONFIG_NF_LOG_ARP=m +CONFIG_NF_LOG_IPV4=m +CONFIG_NF_REJECT_IPV4=m +CONFIG_NF_NAT_SNMP_BASIC=m +CONFIG_NF_NAT_PPTP=m +CONFIG_NF_NAT_H323=m +CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_MATCH_AH=m +CONFIG_IP_NF_MATCH_ECN=m +CONFIG_IP_NF_MATCH_RPFILTER=m +CONFIG_IP_NF_MATCH_TTL=m +CONFIG_IP_NF_FILTER=m +CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_SYNPROXY=m +CONFIG_IP_NF_NAT=m +CONFIG_IP_NF_TARGET_MASQUERADE=m +CONFIG_IP_NF_TARGET_NETMAP=m +CONFIG_IP_NF_TARGET_REDIRECT=m +CONFIG_IP_NF_MANGLE=m +CONFIG_IP_NF_TARGET_CLUSTERIP=m +CONFIG_IP_NF_TARGET_ECN=m +CONFIG_IP_NF_TARGET_TTL=m +CONFIG_IP_NF_RAW=m +CONFIG_IP_NF_SECURITY=m +CONFIG_IP_NF_ARPTABLES=m +CONFIG_IP_NF_ARPFILTER=m +CONFIG_IP_NF_ARP_MANGLE=m +# end of IP: Netfilter Configuration + +# +# IPv6: Netfilter Configuration +# +# CONFIG_NF_SOCKET_IPV6 is not set +# CONFIG_NF_TPROXY_IPV6 is not set +# CONFIG_NF_TABLES_IPV6 is not set +# CONFIG_NF_FLOW_TABLE_IPV6 is not set +# CONFIG_NF_DUP_IPV6 is not set +# CONFIG_NF_REJECT_IPV6 is not set +# CONFIG_NF_LOG_IPV6 is not set +# CONFIG_IP6_NF_IPTABLES is not set +# end of IPv6: Netfilter Configuration + +CONFIG_NF_DEFRAG_IPV6=m +CONFIG_NF_TABLES_BRIDGE=m +# CONFIG_NFT_BRIDGE_META is not set +CONFIG_NF_LOG_BRIDGE=m +# CONFIG_NF_CONNTRACK_BRIDGE is not set +CONFIG_BRIDGE_NF_EBTABLES=m +CONFIG_BRIDGE_EBT_BROUTE=m +CONFIG_BRIDGE_EBT_T_FILTER=m +CONFIG_BRIDGE_EBT_T_NAT=m +CONFIG_BRIDGE_EBT_802_3=m +CONFIG_BRIDGE_EBT_AMONG=m +CONFIG_BRIDGE_EBT_ARP=m +CONFIG_BRIDGE_EBT_IP=m +# CONFIG_BRIDGE_EBT_IP6 is not set +CONFIG_BRIDGE_EBT_LIMIT=m +CONFIG_BRIDGE_EBT_MARK=m +CONFIG_BRIDGE_EBT_PKTTYPE=m +CONFIG_BRIDGE_EBT_STP=m +CONFIG_BRIDGE_EBT_VLAN=m +CONFIG_BRIDGE_EBT_ARPREPLY=m +CONFIG_BRIDGE_EBT_DNAT=m +CONFIG_BRIDGE_EBT_MARK_T=m +CONFIG_BRIDGE_EBT_REDIRECT=m +CONFIG_BRIDGE_EBT_SNAT=m +CONFIG_BRIDGE_EBT_LOG=m +CONFIG_BRIDGE_EBT_NFLOG=m +# CONFIG_BPFILTER is not set +# CONFIG_IP_DCCP is not set +# CONFIG_IP_SCTP is not set +# CONFIG_RDS is not set +# CONFIG_TIPC is not set +# CONFIG_ATM is not set +# CONFIG_L2TP is not set +CONFIG_STP=m +CONFIG_GARP=m +CONFIG_MRP=m +CONFIG_BRIDGE=m +CONFIG_BRIDGE_IGMP_SNOOPING=y +# CONFIG_BRIDGE_VLAN_FILTERING is not set +# CONFIG_BRIDGE_MRP is not set +CONFIG_HAVE_NET_DSA=y +# CONFIG_NET_DSA is not set +CONFIG_VLAN_8021Q=m +CONFIG_VLAN_8021Q_GVRP=y +CONFIG_VLAN_8021Q_MVRP=y +# CONFIG_DECNET is not set +CONFIG_LLC=m +# CONFIG_LLC2 is not set +# CONFIG_ATALK is not set +# CONFIG_X25 is not set +# CONFIG_LAPB is not set +# CONFIG_PHONET is not set +# CONFIG_6LOWPAN is not set +# CONFIG_IEEE802154 is not set +CONFIG_NET_SCHED=y + +# +# Queueing/Scheduling +# +CONFIG_NET_SCH_CBQ=m +CONFIG_NET_SCH_HTB=m +CONFIG_NET_SCH_HFSC=m +CONFIG_NET_SCH_PRIO=m +CONFIG_NET_SCH_MULTIQ=m +CONFIG_NET_SCH_RED=m +CONFIG_NET_SCH_SFB=m +CONFIG_NET_SCH_SFQ=m +CONFIG_NET_SCH_TEQL=m +CONFIG_NET_SCH_TBF=m +CONFIG_NET_SCH_CBS=m +CONFIG_NET_SCH_ETF=m +# CONFIG_NET_SCH_TAPRIO is not set +CONFIG_NET_SCH_GRED=m +CONFIG_NET_SCH_DSMARK=m +CONFIG_NET_SCH_NETEM=m +CONFIG_NET_SCH_DRR=m +CONFIG_NET_SCH_MQPRIO=m +CONFIG_NET_SCH_SKBPRIO=m +CONFIG_NET_SCH_CHOKE=m +CONFIG_NET_SCH_QFQ=m +CONFIG_NET_SCH_CODEL=m +CONFIG_NET_SCH_FQ_CODEL=m +CONFIG_NET_SCH_CAKE=m +CONFIG_NET_SCH_FQ=m +CONFIG_NET_SCH_HHF=m +CONFIG_NET_SCH_PIE=m +# CONFIG_NET_SCH_FQ_PIE is not set +CONFIG_NET_SCH_INGRESS=m +CONFIG_NET_SCH_PLUG=m +# CONFIG_NET_SCH_ETS is not set +CONFIG_NET_SCH_DEFAULT=y +# CONFIG_DEFAULT_FQ is not set +# CONFIG_DEFAULT_CODEL is not set +# CONFIG_DEFAULT_FQ_CODEL is not set +# CONFIG_DEFAULT_SFQ is not set +CONFIG_DEFAULT_PFIFO_FAST=y +CONFIG_DEFAULT_NET_SCH="pfifo_fast" + +# +# Classification +# +CONFIG_NET_CLS=y +CONFIG_NET_CLS_BASIC=m +CONFIG_NET_CLS_TCINDEX=m +CONFIG_NET_CLS_ROUTE4=m +CONFIG_NET_CLS_FW=m +CONFIG_NET_CLS_U32=m +CONFIG_CLS_U32_PERF=y +CONFIG_CLS_U32_MARK=y +CONFIG_NET_CLS_RSVP=m +CONFIG_NET_CLS_RSVP6=m +CONFIG_NET_CLS_FLOW=m +CONFIG_NET_CLS_CGROUP=m +CONFIG_NET_CLS_BPF=m +CONFIG_NET_CLS_FLOWER=m +CONFIG_NET_CLS_MATCHALL=m +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_STACK=32 +CONFIG_NET_EMATCH_CMP=m +CONFIG_NET_EMATCH_NBYTE=m +CONFIG_NET_EMATCH_U32=m +CONFIG_NET_EMATCH_META=m +CONFIG_NET_EMATCH_TEXT=m +# CONFIG_NET_EMATCH_IPSET is not set +# CONFIG_NET_EMATCH_IPT is not set +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=m +CONFIG_NET_ACT_GACT=m +CONFIG_GACT_PROB=y +CONFIG_NET_ACT_MIRRED=m +CONFIG_NET_ACT_SAMPLE=m +# CONFIG_NET_ACT_IPT is not set +CONFIG_NET_ACT_NAT=m +CONFIG_NET_ACT_PEDIT=m +CONFIG_NET_ACT_SIMP=m +CONFIG_NET_ACT_SKBEDIT=m +CONFIG_NET_ACT_CSUM=m +# CONFIG_NET_ACT_MPLS is not set +CONFIG_NET_ACT_VLAN=m +CONFIG_NET_ACT_BPF=m +# CONFIG_NET_ACT_CONNMARK is not set +# CONFIG_NET_ACT_CTINFO is not set +CONFIG_NET_ACT_SKBMOD=m +CONFIG_NET_ACT_IFE=m +CONFIG_NET_ACT_TUNNEL_KEY=m +# CONFIG_NET_ACT_CT is not set +# CONFIG_NET_ACT_GATE is not set +CONFIG_NET_IFE_SKBMARK=m +CONFIG_NET_IFE_SKBPRIO=m +CONFIG_NET_IFE_SKBTCINDEX=m +# CONFIG_NET_TC_SKB_EXT is not set +CONFIG_NET_SCH_FIFO=y +# CONFIG_DCB is not set +# CONFIG_DNS_RESOLVER is not set +# CONFIG_BATMAN_ADV is not set +CONFIG_OPENVSWITCH=m +CONFIG_VSOCKETS=m +CONFIG_VSOCKETS_DIAG=m +CONFIG_VSOCKETS_LOOPBACK=m +# CONFIG_VIRTIO_VSOCKETS is not set +CONFIG_VIRTIO_VSOCKETS_COMMON=m +CONFIG_NETLINK_DIAG=m +CONFIG_MPLS=y +CONFIG_NET_MPLS_GSO=m +# CONFIG_MPLS_ROUTING is not set +CONFIG_NET_NSH=m +# CONFIG_HSR is not set +# CONFIG_NET_SWITCHDEV is not set +# CONFIG_NET_L3_MASTER_DEV is not set +# CONFIG_QRTR is not set +# CONFIG_NET_NCSI is not set +CONFIG_RPS=y +CONFIG_RFS_ACCEL=y +CONFIG_XPS=y +CONFIG_CGROUP_NET_PRIO=y +CONFIG_CGROUP_NET_CLASSID=y +CONFIG_NET_RX_BUSY_POLL=y +CONFIG_BQL=y +CONFIG_BPF_JIT=y +CONFIG_NET_FLOW_LIMIT=y + +# +# Network testing +# +# CONFIG_NET_PKTGEN is not set +# end of Network testing +# end of Networking options + +# CONFIG_HAMRADIO is not set +# CONFIG_CAN is not set +# CONFIG_BT is not set +# CONFIG_AF_RXRPC is not set +# CONFIG_AF_KCM is not set +CONFIG_STREAM_PARSER=y +CONFIG_FIB_RULES=y +# CONFIG_WIRELESS is not set +# CONFIG_WIMAX is not set +# CONFIG_RFKILL is not set +# CONFIG_NET_9P is not set +# CONFIG_CAIF is not set +# CONFIG_CEPH_LIB is not set +# CONFIG_NFC is not set +CONFIG_PSAMPLE=m +CONFIG_NET_IFE=m +CONFIG_LWTUNNEL=y +CONFIG_LWTUNNEL_BPF=y +CONFIG_DST_CACHE=y +CONFIG_GRO_CELLS=y +CONFIG_SOCK_VALIDATE_XMIT=y +CONFIG_NET_SOCK_MSG=y +CONFIG_NET_DEVLINK=y +CONFIG_PAGE_POOL=y +CONFIG_FAILOVER=y +CONFIG_ETHTOOL_NETLINK=y +CONFIG_HAVE_EBPF_JIT=y + +# +# Device Drivers +# + +# +# PCI controller drivers +# + +# +# DesignWare PCI Core Support +# +# end of DesignWare PCI Core Support + +# +# Mobiveil PCIe Core Support +# +# end of Mobiveil PCIe Core Support + +# +# Cadence PCIe controllers support +# +# end of Cadence PCIe controllers support +# end of PCI controller drivers + +# +# PCI Endpoint +# +# end of PCI Endpoint + +# +# PCI switch controller drivers +# +# end of PCI switch controller drivers + +# CONFIG_RAPIDIO is not set + +# +# Generic Driver Options +# +CONFIG_UEVENT_HELPER=y +CONFIG_UEVENT_HELPER_PATH="" +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +# CONFIG_STANDALONE is not set +# CONFIG_PREVENT_FIRMWARE_BUILD is not set + +# +# Firmware loader +# +CONFIG_FW_LOADER=y +CONFIG_EXTRA_FIRMWARE="" +# CONFIG_FW_LOADER_USER_HELPER is not set +# CONFIG_FW_LOADER_COMPRESS is not set +# end of Firmware loader + +CONFIG_ALLOW_DEV_COREDUMP=y +# CONFIG_DEBUG_DRIVER is not set +# CONFIG_DEBUG_DEVRES is not set +# CONFIG_DEBUG_TEST_DRIVER_REMOVE is not set +# CONFIG_TEST_ASYNC_DRIVER_PROBE is not set +CONFIG_DMA_SHARED_BUFFER=y +# CONFIG_DMA_FENCE_TRACE is not set +# end of Generic Driver Options + +# +# Bus devices +# +# CONFIG_MOXTET is not set +# CONFIG_MHI_BUS is not set +# end of Bus devices + +CONFIG_CONNECTOR=m +# CONFIG_GNSS is not set +CONFIG_MTD=y +# CONFIG_MTD_TESTS is not set + +# +# Partition parsers +# +# CONFIG_MTD_AR7_PARTS is not set +CONFIG_MTD_CMDLINE_PARTS=y +CONFIG_MTD_OF_PARTS=y +# CONFIG_MTD_REDBOOT_PARTS is not set +# end of Partition parsers + +# +# User Modules And Translation Layers +# +CONFIG_MTD_BLKDEVS=y +CONFIG_MTD_BLOCK=y +# CONFIG_FTL is not set +# CONFIG_NFTL is not set +# CONFIG_INFTL is not set +# CONFIG_RFD_FTL is not set +# CONFIG_SSFDC is not set +# CONFIG_SM_FTL is not set +# CONFIG_MTD_OOPS is not set +# CONFIG_MTD_SWAP is not set +# CONFIG_MTD_PARTITIONED_MASTER is not set + +# +# RAM/ROM/Flash chip drivers +# +CONFIG_MTD_CFI=y +CONFIG_MTD_JEDECPROBE=y +CONFIG_MTD_GEN_PROBE=y +CONFIG_MTD_CFI_ADV_OPTIONS=y +CONFIG_MTD_CFI_NOSWAP=y +# CONFIG_MTD_CFI_BE_BYTE_SWAP is not set +# CONFIG_MTD_CFI_LE_BYTE_SWAP is not set +# CONFIG_MTD_CFI_GEOMETRY is not set +CONFIG_MTD_MAP_BANK_WIDTH_1=y +CONFIG_MTD_MAP_BANK_WIDTH_2=y +CONFIG_MTD_MAP_BANK_WIDTH_4=y +CONFIG_MTD_CFI_I1=y +CONFIG_MTD_CFI_I2=y +# CONFIG_MTD_OTP is not set +CONFIG_MTD_CFI_INTELEXT=y +CONFIG_MTD_CFI_AMDSTD=y +CONFIG_MTD_CFI_STAA=y +CONFIG_MTD_CFI_UTIL=y +CONFIG_MTD_RAM=y +CONFIG_MTD_ROM=y +CONFIG_MTD_ABSENT=y +# end of RAM/ROM/Flash chip drivers + +# +# Mapping drivers for chip access +# +CONFIG_MTD_COMPLEX_MAPPINGS=y +CONFIG_MTD_PHYSMAP=y +# CONFIG_MTD_PHYSMAP_COMPAT is not set +CONFIG_MTD_PHYSMAP_OF=y +# CONFIG_MTD_PCI is not set +# CONFIG_MTD_INTEL_VR_NOR is not set +CONFIG_MTD_PLATRAM=y +# end of Mapping drivers for chip access + +# +# Self-contained MTD device drivers +# +# CONFIG_MTD_PMC551 is not set +# CONFIG_MTD_DATAFLASH is not set +# CONFIG_MTD_MCHP23K256 is not set +# CONFIG_MTD_SST25L is not set +# CONFIG_MTD_SLRAM is not set +# CONFIG_MTD_PHRAM is not set +# CONFIG_MTD_MTDRAM is not set +# CONFIG_MTD_BLOCK2MTD is not set + +# +# Disk-On-Chip Device Drivers +# +# CONFIG_MTD_DOCG3 is not set +# end of Self-contained MTD device drivers + +# +# NAND +# +# CONFIG_MTD_ONENAND is not set +# CONFIG_MTD_RAW_NAND is not set +# CONFIG_MTD_SPI_NAND is not set + +# +# ECC engine support +# +# end of ECC engine support +# end of NAND + +# +# LPDDR & LPDDR2 PCM memory drivers +# +# CONFIG_MTD_LPDDR is not set +# end of LPDDR & LPDDR2 PCM memory drivers + +CONFIG_MTD_SPI_NOR=y +CONFIG_MTD_SPI_NOR_USE_4K_SECTORS=y +# CONFIG_MTD_UBI is not set +# CONFIG_MTD_HYPERBUS is not set +CONFIG_DTC=y +CONFIG_OF=y +# CONFIG_OF_UNITTEST is not set +CONFIG_OF_FLATTREE=y +CONFIG_OF_EARLY_FLATTREE=y +CONFIG_OF_KOBJ=y +CONFIG_OF_ADDRESS=y +CONFIG_OF_IRQ=y +CONFIG_OF_NET=y +CONFIG_OF_RESERVED_MEM=y +# CONFIG_OF_OVERLAY is not set +# CONFIG_PARPORT is not set +CONFIG_PNP=y +CONFIG_PNP_DEBUG_MESSAGES=y + +# +# Protocols +# +# CONFIG_ISAPNP is not set +CONFIG_PNPACPI=y +CONFIG_BLK_DEV=y +# CONFIG_BLK_DEV_NULL_BLK is not set +CONFIG_CDROM=y +# CONFIG_BLK_DEV_PCIESSD_MTIP32XX is not set +# CONFIG_BLK_DEV_UMEM is not set +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_LOOP_MIN_COUNT=8 +# CONFIG_BLK_DEV_CRYPTOLOOP is not set +# CONFIG_BLK_DEV_DRBD is not set +CONFIG_BLK_DEV_NBD=m +# CONFIG_BLK_DEV_SKD is not set +# CONFIG_BLK_DEV_SX8 is not set +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_COUNT=16 +CONFIG_BLK_DEV_RAM_SIZE=5000000 +# CONFIG_CDROM_PKTCDVD is not set +# CONFIG_ATA_OVER_ETH is not set +CONFIG_VIRTIO_BLK=y +# CONFIG_BLK_DEV_RBD is not set +# CONFIG_BLK_DEV_RSXX is not set + +# +# NVME Support +# +CONFIG_NVME_CORE=y +CONFIG_BLK_DEV_NVME=y +CONFIG_NVME_MULTIPATH=y +# CONFIG_NVME_HWMON is not set +CONFIG_NVME_FABRICS=y +CONFIG_NVME_RDMA=m +CONFIG_NVME_FC=y +# CONFIG_NVME_TCP is not set +CONFIG_NVME_TARGET=y +# CONFIG_NVME_TARGET_PASSTHRU is not set +CONFIG_NVME_TARGET_LOOP=y +CONFIG_NVME_TARGET_RDMA=m +CONFIG_NVME_TARGET_FC=y +CONFIG_NVME_TARGET_FCLOOP=y +# CONFIG_NVME_TARGET_TCP is not set +# end of NVME Support + +# +# Misc devices +# +# CONFIG_AD525X_DPOT is not set +# CONFIG_DUMMY_IRQ is not set +# CONFIG_PHANTOM is not set +# CONFIG_TIFM_CORE is not set +# CONFIG_ICS932S401 is not set +# CONFIG_ENCLOSURE_SERVICES is not set +# CONFIG_HP_ILO is not set +# CONFIG_APDS9802ALS is not set +# CONFIG_ISL29003 is not set +# CONFIG_ISL29020 is not set +# CONFIG_SENSORS_TSL2550 is not set +# CONFIG_SENSORS_BH1770 is not set +# CONFIG_SENSORS_APDS990X is not set +# CONFIG_HMC6352 is not set +# CONFIG_DS1682 is not set +# CONFIG_LATTICE_ECP3_CONFIG is not set +# CONFIG_SRAM is not set +# CONFIG_PCI_ENDPOINT_TEST is not set +# CONFIG_XILINX_SDFEC is not set +# CONFIG_PVPANIC is not set +# CONFIG_C2PORT is not set + +# +# EEPROM support +# +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_AT25 is not set +# CONFIG_EEPROM_LEGACY is not set +# CONFIG_EEPROM_MAX6875 is not set +# CONFIG_EEPROM_93CX6 is not set +# CONFIG_EEPROM_93XX46 is not set +# CONFIG_EEPROM_IDT_89HPESX is not set +# CONFIG_EEPROM_EE1004 is not set +# end of EEPROM support + +# CONFIG_CB710_CORE is not set + +# +# Texas Instruments shared transport line discipline +# +# end of Texas Instruments shared transport line discipline + +# CONFIG_SENSORS_LIS3_I2C is not set +# CONFIG_ALTERA_STAPL is not set +# CONFIG_GENWQE is not set +# CONFIG_ECHO is not set +# CONFIG_MISC_ALCOR_PCI is not set +# CONFIG_MISC_RTSX_PCI is not set +# CONFIG_MISC_RTSX_USB is not set +# CONFIG_HABANA_AI is not set +# CONFIG_UACCE is not set +# end of Misc devices + +CONFIG_HAVE_IDE=y +# CONFIG_IDE is not set + +# +# SCSI device support +# +CONFIG_SCSI_MOD=y +CONFIG_RAID_ATTRS=y +CONFIG_SCSI=y +CONFIG_SCSI_DMA=y +CONFIG_SCSI_NETLINK=y +CONFIG_SCSI_PROC_FS=y + +# +# SCSI support type (disk, tape, CD-ROM) +# +CONFIG_BLK_DEV_SD=y +CONFIG_CHR_DEV_ST=y +CONFIG_BLK_DEV_SR=y +CONFIG_CHR_DEV_SG=y +CONFIG_CHR_DEV_SCH=y +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y +CONFIG_SCSI_SCAN_ASYNC=y + +# +# SCSI Transports +# +CONFIG_SCSI_SPI_ATTRS=y +CONFIG_SCSI_FC_ATTRS=m +CONFIG_SCSI_ISCSI_ATTRS=m +CONFIG_SCSI_SAS_ATTRS=y +CONFIG_SCSI_SAS_LIBSAS=y +CONFIG_SCSI_SAS_ATA=y +CONFIG_SCSI_SAS_HOST_SMP=y +CONFIG_SCSI_SRP_ATTRS=y +# end of SCSI Transports + +CONFIG_SCSI_LOWLEVEL=y +CONFIG_ISCSI_TCP=m +CONFIG_ISCSI_BOOT_SYSFS=y +CONFIG_SCSI_CXGB3_ISCSI=m +CONFIG_SCSI_CXGB4_ISCSI=m +CONFIG_SCSI_BNX2_ISCSI=m +# CONFIG_BE2ISCSI is not set +# CONFIG_BLK_DEV_3W_XXXX_RAID is not set +# CONFIG_SCSI_HPSA is not set +# CONFIG_SCSI_3W_9XXX is not set +# CONFIG_SCSI_3W_SAS is not set +# CONFIG_SCSI_ACARD is not set +# CONFIG_SCSI_AHA152X is not set +# CONFIG_SCSI_AHA1542 is not set +# CONFIG_SCSI_AACRAID is not set +# CONFIG_SCSI_AIC7XXX is not set +# CONFIG_SCSI_AIC79XX is not set +# CONFIG_SCSI_AIC94XX is not set +# CONFIG_SCSI_MVSAS is not set +# CONFIG_SCSI_MVUMI is not set +# CONFIG_SCSI_ADVANSYS is not set +# CONFIG_SCSI_ARCMSR is not set +# CONFIG_SCSI_ESAS2R is not set +CONFIG_MEGARAID_NEWGEN=y +CONFIG_MEGARAID_MM=m +CONFIG_MEGARAID_MAILBOX=m +CONFIG_MEGARAID_LEGACY=m +CONFIG_MEGARAID_SAS=m +CONFIG_SCSI_MPT3SAS=m +CONFIG_SCSI_MPT2SAS_MAX_SGE=128 +CONFIG_SCSI_MPT3SAS_MAX_SGE=128 +# CONFIG_SCSI_MPT2SAS is not set +# CONFIG_SCSI_SMARTPQI is not set +# CONFIG_SCSI_UFSHCD is not set +# CONFIG_SCSI_HPTIOP is not set +# CONFIG_SCSI_MYRB is not set +# CONFIG_SCSI_MYRS is not set +# CONFIG_LIBFC is not set +# CONFIG_SCSI_SNIC is not set +# CONFIG_SCSI_DMX3191D is not set +# CONFIG_SCSI_FDOMAIN_PCI is not set +# CONFIG_SCSI_FDOMAIN_ISA is not set +# CONFIG_SCSI_GDTH is not set +# CONFIG_SCSI_GENERIC_NCR5380 is not set +# CONFIG_SCSI_IPS is not set +# CONFIG_SCSI_INITIO is not set +# CONFIG_SCSI_INIA100 is not set +# CONFIG_SCSI_STEX is not set +# CONFIG_SCSI_SYM53C8XX_2 is not set +# CONFIG_SCSI_IPR is not set +# CONFIG_SCSI_QLOGIC_FAS is not set +# CONFIG_SCSI_QLOGIC_1280 is not set +# CONFIG_SCSI_QLA_FC is not set +# CONFIG_SCSI_QLA_ISCSI is not set +# CONFIG_SCSI_DC395x is not set +# CONFIG_SCSI_AM53C974 is not set +# CONFIG_SCSI_WD719X is not set +# CONFIG_SCSI_DEBUG is not set +# CONFIG_SCSI_PMCRAID is not set +# CONFIG_SCSI_PM8001 is not set +# CONFIG_SCSI_BFA_FC is not set +# CONFIG_SCSI_VIRTIO is not set +# CONFIG_SCSI_CHELSIO_FCOE is not set +CONFIG_SCSI_DH=y +CONFIG_SCSI_DH_RDAC=y +CONFIG_SCSI_DH_HP_SW=y +CONFIG_SCSI_DH_EMC=y +CONFIG_SCSI_DH_ALUA=y +# end of SCSI device support + +CONFIG_ATA=y +CONFIG_SATA_HOST=y +CONFIG_PATA_TIMINGS=y +CONFIG_ATA_VERBOSE_ERROR=y +CONFIG_ATA_FORCE=y +CONFIG_ATA_ACPI=y +CONFIG_SATA_PMP=y + +# +# Controllers with non-SFF native interface +# +CONFIG_SATA_AHCI=y +CONFIG_SATA_MOBILE_LPM_POLICY=0 +# CONFIG_SATA_AHCI_PLATFORM is not set +# CONFIG_AHCI_CEVA is not set +# CONFIG_AHCI_QORIQ is not set +# CONFIG_SATA_INIC162X is not set +# CONFIG_SATA_ACARD_AHCI is not set +# CONFIG_SATA_SIL24 is not set +# CONFIG_ATA_SFF is not set +CONFIG_MD=y +CONFIG_BLK_DEV_MD=m +CONFIG_MD_LINEAR=m +CONFIG_MD_RAID0=m +CONFIG_MD_RAID1=m +CONFIG_MD_RAID10=m +CONFIG_MD_RAID456=m +CONFIG_MD_MULTIPATH=m +CONFIG_MD_FAULTY=m +CONFIG_BCACHE=m +CONFIG_BCACHE_DEBUG=y +CONFIG_BCACHE_CLOSURES_DEBUG=y +# CONFIG_BCACHE_ASYNC_REGISTRATION is not set +CONFIG_BLK_DEV_DM_BUILTIN=y +CONFIG_BLK_DEV_DM=m +CONFIG_DM_DEBUG=y +CONFIG_DM_BUFIO=m +CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING=y +CONFIG_DM_DEBUG_BLOCK_STACK_TRACING=y +CONFIG_DM_BIO_PRISON=m +CONFIG_DM_PERSISTENT_DATA=m +CONFIG_DM_UNSTRIPED=m +CONFIG_DM_CRYPT=m +CONFIG_DM_SNAPSHOT=m +CONFIG_DM_THIN_PROVISIONING=m +CONFIG_DM_CACHE=m +CONFIG_DM_CACHE_SMQ=m +CONFIG_DM_WRITECACHE=m +# CONFIG_DM_EBS is not set +CONFIG_DM_ERA=m +# CONFIG_DM_CLONE is not set +CONFIG_DM_MIRROR=m +CONFIG_DM_LOG_USERSPACE=m +CONFIG_DM_RAID=m +CONFIG_DM_ZERO=m +CONFIG_DM_MULTIPATH=m +CONFIG_DM_MULTIPATH_QL=m +CONFIG_DM_MULTIPATH_ST=m +# CONFIG_DM_MULTIPATH_HST is not set +CONFIG_DM_DELAY=m +# CONFIG_DM_DUST is not set +CONFIG_DM_UEVENT=y +CONFIG_DM_FLAKEY=m +CONFIG_DM_VERITY=m +# CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG is not set +CONFIG_DM_VERITY_FEC=y +CONFIG_DM_SWITCH=m +CONFIG_DM_LOG_WRITES=m +CONFIG_DM_INTEGRITY=m +CONFIG_TARGET_CORE=m +CONFIG_TCM_IBLOCK=m +CONFIG_TCM_FILEIO=m +CONFIG_TCM_PSCSI=m +CONFIG_TCM_USER2=m +CONFIG_LOOPBACK_TARGET=m +CONFIG_ISCSI_TARGET=m +# CONFIG_ISCSI_TARGET_CXGB4 is not set +# CONFIG_FUSION is not set + +# +# IEEE 1394 (FireWire) support +# +# CONFIG_FIREWIRE is not set +# CONFIG_FIREWIRE_NOSY is not set +# end of IEEE 1394 (FireWire) support + +CONFIG_NETDEVICES=y +CONFIG_MII=y +CONFIG_NET_CORE=y +# CONFIG_BONDING is not set +# CONFIG_DUMMY is not set +# CONFIG_WIREGUARD is not set +# CONFIG_EQUALIZER is not set +CONFIG_NET_FC=y +# CONFIG_IFB is not set +# CONFIG_NET_TEAM is not set +CONFIG_MACVLAN=m +CONFIG_MACVTAP=m +# CONFIG_IPVLAN is not set +# CONFIG_VXLAN is not set +# CONFIG_GENEVE is not set +# CONFIG_BAREUDP is not set +# CONFIG_GTP is not set +# CONFIG_MACSEC is not set +# CONFIG_NETCONSOLE is not set +CONFIG_TUN=y +CONFIG_TAP=m +# CONFIG_TUN_VNET_CROSS_LE is not set +# CONFIG_VETH is not set +CONFIG_VIRTIO_NET=y +# CONFIG_NLMON is not set +# CONFIG_VSOCKMON is not set +# CONFIG_ARCNET is not set + +# +# Distributed Switch Architecture drivers +# +# end of Distributed Switch Architecture drivers + +CONFIG_ETHERNET=y +CONFIG_MDIO=m +# CONFIG_NET_VENDOR_3COM is not set +CONFIG_NET_VENDOR_ADAPTEC=y +# CONFIG_ADAPTEC_STARFIRE is not set +CONFIG_NET_VENDOR_AGERE=y +# CONFIG_ET131X is not set +CONFIG_NET_VENDOR_ALACRITECH=y +# CONFIG_SLICOSS is not set +CONFIG_NET_VENDOR_ALTEON=y +# CONFIG_ACENIC is not set +# CONFIG_ALTERA_TSE is not set +CONFIG_NET_VENDOR_AMAZON=y +# CONFIG_ENA_ETHERNET is not set +# CONFIG_NET_VENDOR_AMD is not set +CONFIG_NET_VENDOR_AQUANTIA=y +# CONFIG_NET_VENDOR_ARC is not set +CONFIG_NET_VENDOR_ATHEROS=y +# CONFIG_ATL2 is not set +# CONFIG_ATL1 is not set +# CONFIG_ATL1E is not set +# CONFIG_ATL1C is not set +# CONFIG_ALX is not set +# CONFIG_NET_VENDOR_AURORA is not set +CONFIG_NET_VENDOR_BROADCOM=y +# CONFIG_B44 is not set +# CONFIG_BCMGENET is not set +CONFIG_BNX2=m +CONFIG_CNIC=m +# CONFIG_TIGON3 is not set +# CONFIG_BNX2X is not set +# CONFIG_SYSTEMPORT is not set +# CONFIG_BNXT is not set +CONFIG_NET_VENDOR_BROCADE=y +# CONFIG_BNA is not set +CONFIG_NET_VENDOR_CADENCE=y +# CONFIG_MACB is not set +CONFIG_NET_VENDOR_CAVIUM=y +# CONFIG_THUNDER_NIC_PF is not set +# CONFIG_THUNDER_NIC_VF is not set +# CONFIG_THUNDER_NIC_BGX is not set +# CONFIG_THUNDER_NIC_RGX is not set +CONFIG_CAVIUM_PTP=y +# CONFIG_LIQUIDIO is not set +# CONFIG_LIQUIDIO_VF is not set +CONFIG_NET_VENDOR_CHELSIO=y +# CONFIG_CHELSIO_T1 is not set +CONFIG_CHELSIO_T3=m +CONFIG_CHELSIO_T4=m +# CONFIG_CHELSIO_T4VF is not set +CONFIG_CHELSIO_LIB=m +CONFIG_CHELSIO_INLINE_CRYPTO=y +# CONFIG_CHELSIO_IPSEC_INLINE is not set +# CONFIG_CHELSIO_TLS_DEVICE is not set +# CONFIG_NET_VENDOR_CIRRUS is not set +CONFIG_NET_VENDOR_CISCO=y +# CONFIG_ENIC is not set +CONFIG_NET_VENDOR_CORTINA=y +# CONFIG_GEMINI_ETHERNET is not set +# CONFIG_DNET is not set +CONFIG_NET_VENDOR_DEC=y +# CONFIG_NET_TULIP is not set +CONFIG_NET_VENDOR_DLINK=y +# CONFIG_DL2K is not set +# CONFIG_SUNDANCE is not set +CONFIG_NET_VENDOR_EMULEX=y +# CONFIG_BE2NET is not set +# CONFIG_NET_VENDOR_EZCHIP is not set +CONFIG_NET_VENDOR_GOOGLE=y +# CONFIG_GVE is not set +CONFIG_NET_VENDOR_HUAWEI=y +# CONFIG_BMA is not set +CONFIG_NET_VENDOR_I825XX=y +CONFIG_NET_VENDOR_INTEL=y +CONFIG_E100=y +CONFIG_E1000=y +CONFIG_E1000E=y +CONFIG_IGB=y +CONFIG_IGB_HWMON=y +CONFIG_IGBVF=m +CONFIG_IXGB=m +CONFIG_IXGBE=m +CONFIG_IXGBE_HWMON=y +CONFIG_IXGBE_IPSEC=y +CONFIG_IXGBEVF=m +CONFIG_IXGBEVF_IPSEC=y +CONFIG_I40E=y +CONFIG_IAVF=y +CONFIG_I40EVF=y +# CONFIG_ICE is not set +# CONFIG_FM10K is not set +# CONFIG_IGC is not set +CONFIG_NET_VENDOR_NETSWIFT=y +# CONFIG_JME is not set +# CONFIG_NET_VENDOR_MARVELL is not set +CONFIG_NET_VENDOR_MELLANOX=y +CONFIG_MLX4_EN=y +CONFIG_MLX4_CORE=y +CONFIG_MLX4_DEBUG=y +CONFIG_MLX4_CORE_GEN2=y +CONFIG_MLX5_CORE=m +CONFIG_MLX5_ACCEL=y +CONFIG_MLX5_FPGA=y +CONFIG_MLX5_CORE_EN=y +CONFIG_MLX5_EN_ARFS=y +CONFIG_MLX5_EN_RXNFC=y +CONFIG_MLX5_MPFS=y +# CONFIG_MLX5_CORE_IPOIB is not set +# CONFIG_MLX5_FPGA_IPSEC is not set +# CONFIG_MLX5_IPSEC is not set +# CONFIG_MLX5_FPGA_TLS is not set +# CONFIG_MLX5_TLS is not set +CONFIG_MLXSW_CORE=y +CONFIG_MLXSW_CORE_HWMON=y +CONFIG_MLXSW_PCI=y +CONFIG_MLXSW_I2C=y +CONFIG_MLXSW_MINIMAL=y +CONFIG_MLXFW=y +# CONFIG_NET_VENDOR_MICREL is not set +CONFIG_NET_VENDOR_MICROCHIP=y +# CONFIG_ENC28J60 is not set +# CONFIG_ENCX24J600 is not set +# CONFIG_LAN743X is not set +CONFIG_NET_VENDOR_MICROSEMI=y +CONFIG_NET_VENDOR_MYRI=y +# CONFIG_MYRI10GE is not set +# CONFIG_FEALNX is not set +# CONFIG_NET_VENDOR_NATSEMI is not set +CONFIG_NET_VENDOR_NETERION=y +# CONFIG_S2IO is not set +# CONFIG_VXGE is not set +CONFIG_NET_VENDOR_NETRONOME=y +# CONFIG_NFP is not set +CONFIG_NET_VENDOR_NI=y +# CONFIG_NI_XGE_MANAGEMENT_ENET is not set +CONFIG_NET_VENDOR_NVIDIA=y +# CONFIG_FORCEDETH is not set +CONFIG_NET_VENDOR_OKI=y +# CONFIG_ETHOC is not set +CONFIG_NET_VENDOR_PACKET_ENGINES=y +# CONFIG_HAMACHI is not set +# CONFIG_YELLOWFIN is not set +CONFIG_NET_VENDOR_PENSANDO=y +# CONFIG_IONIC is not set +CONFIG_NET_VENDOR_QLOGIC=y +# CONFIG_QLA3XXX is not set +# CONFIG_QLCNIC is not set +# CONFIG_NETXEN_NIC is not set +# CONFIG_QED is not set +# CONFIG_NET_VENDOR_QUALCOMM is not set +CONFIG_NET_VENDOR_RAMAXEL=y +CONFIG_NET_VENDOR_RDC=y +# CONFIG_R6040 is not set +CONFIG_NET_VENDOR_REALTEK=y +# CONFIG_8139CP is not set +# CONFIG_8139TOO is not set +# CONFIG_R8169 is not set +# CONFIG_NET_VENDOR_RENESAS is not set +# CONFIG_NET_VENDOR_ROCKER is not set +# CONFIG_NET_VENDOR_SAMSUNG is not set +# CONFIG_NET_VENDOR_SEEQ is not set +CONFIG_NET_VENDOR_SOLARFLARE=y +# CONFIG_SFC is not set +# CONFIG_SFC_FALCON is not set +CONFIG_NET_VENDOR_SILAN=y +# CONFIG_SC92031 is not set +CONFIG_NET_VENDOR_SIS=y +# CONFIG_SIS900 is not set +# CONFIG_SIS190 is not set +# CONFIG_NET_VENDOR_SMSC is not set +CONFIG_NET_VENDOR_SOCIONEXT=y +# CONFIG_NET_VENDOR_STMICRO is not set +CONFIG_NET_VENDOR_SUN=y +# CONFIG_HAPPYMEAL is not set +# CONFIG_SUNGEM is not set +# CONFIG_CASSINI is not set +# CONFIG_NIU is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set +CONFIG_NET_VENDOR_TEHUTI=y +# CONFIG_TEHUTI is not set +CONFIG_NET_VENDOR_TI=y +# CONFIG_TI_CPSW_PHY_SEL is not set +# CONFIG_TLAN is not set +# CONFIG_NET_VENDOR_VIA is not set +# CONFIG_NET_VENDOR_WIZNET is not set +CONFIG_NET_VENDOR_XILINX=y +# CONFIG_XILINX_AXI_EMAC is not set +# CONFIG_XILINX_LL_TEMAC is not set +# CONFIG_FDDI is not set +# CONFIG_HIPPI is not set +# CONFIG_NET_SB1000 is not set +CONFIG_PHYLIB=m +CONFIG_SWPHY=y +CONFIG_FIXED_PHY=m + +# +# MII PHY device drivers +# +# CONFIG_AMD_PHY is not set +# CONFIG_ADIN_PHY is not set +# CONFIG_AQUANTIA_PHY is not set +# CONFIG_AX88796B_PHY is not set +# CONFIG_BROADCOM_PHY is not set +# CONFIG_BCM54140_PHY is not set +# CONFIG_BCM7XXX_PHY is not set +# CONFIG_BCM84881_PHY is not set +# CONFIG_BCM87XX_PHY is not set +# CONFIG_CICADA_PHY is not set +# CONFIG_CORTINA_PHY is not set +# CONFIG_DAVICOM_PHY is not set +# CONFIG_ICPLUS_PHY is not set +# CONFIG_LXT_PHY is not set +# CONFIG_INTEL_XWAY_PHY is not set +# CONFIG_LSI_ET1011C_PHY is not set +# CONFIG_MARVELL_PHY is not set +# CONFIG_MARVELL_10G_PHY is not set +# CONFIG_MICREL_PHY is not set +# CONFIG_MICROCHIP_PHY is not set +# CONFIG_MICROCHIP_T1_PHY is not set +# CONFIG_MICROSEMI_PHY is not set +# CONFIG_NATIONAL_PHY is not set +# CONFIG_NXP_TJA11XX_PHY is not set +# CONFIG_QSEMI_PHY is not set +# CONFIG_REALTEK_PHY is not set +# CONFIG_RENESAS_PHY is not set +# CONFIG_ROCKCHIP_PHY is not set +# CONFIG_SMSC_PHY is not set +# CONFIG_STE10XP is not set +# CONFIG_TERANETICS_PHY is not set +# CONFIG_DP83822_PHY is not set +# CONFIG_DP83TC811_PHY is not set +# CONFIG_DP83848_PHY is not set +# CONFIG_DP83867_PHY is not set +# CONFIG_DP83869_PHY is not set +# CONFIG_VITESSE_PHY is not set +# CONFIG_XILINX_GMII2RGMII is not set +# CONFIG_MICREL_KS8995MA is not set +CONFIG_MDIO_DEVICE=m +CONFIG_MDIO_BUS=m +CONFIG_OF_MDIO=m +CONFIG_MDIO_DEVRES=m +# CONFIG_MDIO_BITBANG is not set +# CONFIG_MDIO_BCM_UNIMAC is not set +# CONFIG_MDIO_HISI_FEMAC is not set +# CONFIG_MDIO_MVUSB is not set +# CONFIG_MDIO_MSCC_MIIM is not set +# CONFIG_MDIO_OCTEON is not set +# CONFIG_MDIO_IPQ4019 is not set +# CONFIG_MDIO_THUNDER is not set + +# +# MDIO Multiplexers +# +# CONFIG_MDIO_BUS_MUX_MULTIPLEXER is not set +# CONFIG_MDIO_BUS_MUX_MMIOREG is not set + +# +# PCS device drivers +# +# CONFIG_PCS_XPCS is not set +# end of PCS device drivers + +# CONFIG_PPP is not set +# CONFIG_SLIP is not set +CONFIG_USB_NET_DRIVERS=y +# CONFIG_USB_CATC is not set +# CONFIG_USB_KAWETH is not set +# CONFIG_USB_PEGASUS is not set +# CONFIG_USB_RTL8150 is not set +# CONFIG_USB_RTL8152 is not set +# CONFIG_USB_LAN78XX is not set +# CONFIG_USB_USBNET is not set +# CONFIG_USB_IPHETH is not set +# CONFIG_WLAN is not set + +# +# Enable WiMAX (Networking options) to see the WiMAX drivers +# +# CONFIG_WAN is not set +# CONFIG_VMXNET3 is not set +# CONFIG_FUJITSU_ES is not set +# CONFIG_NETDEVSIM is not set +CONFIG_NET_FAILOVER=y +# CONFIG_ISDN is not set +# CONFIG_NVM is not set + +# +# Input device support +# +CONFIG_INPUT=y +CONFIG_INPUT_FF_MEMLESS=y +CONFIG_INPUT_POLLDEV=y +# CONFIG_INPUT_SPARSEKMAP is not set +# CONFIG_INPUT_MATRIXKMAP is not set + +# +# Userland interfaces +# +CONFIG_INPUT_MOUSEDEV=y +# CONFIG_INPUT_MOUSEDEV_PSAUX is not set +CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 +CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 +# CONFIG_INPUT_JOYDEV is not set +CONFIG_INPUT_EVDEV=y +# CONFIG_INPUT_EVBUG is not set + +# +# Input Device Drivers +# +CONFIG_INPUT_KEYBOARD=y +# CONFIG_KEYBOARD_ADP5588 is not set +# CONFIG_KEYBOARD_ADP5589 is not set +# CONFIG_KEYBOARD_ATKBD is not set +# CONFIG_KEYBOARD_QT1050 is not set +# CONFIG_KEYBOARD_QT1070 is not set +# CONFIG_KEYBOARD_QT2160 is not set +# CONFIG_KEYBOARD_DLINK_DIR685 is not set +# CONFIG_KEYBOARD_LKKBD is not set +# CONFIG_KEYBOARD_TCA6416 is not set +# CONFIG_KEYBOARD_TCA8418 is not set +# CONFIG_KEYBOARD_LM8333 is not set +# CONFIG_KEYBOARD_MAX7359 is not set +# CONFIG_KEYBOARD_MCS is not set +# CONFIG_KEYBOARD_MPR121 is not set +# CONFIG_KEYBOARD_NEWTON is not set +# CONFIG_KEYBOARD_OPENCORES is not set +# CONFIG_KEYBOARD_SAMSUNG is not set +# CONFIG_KEYBOARD_STOWAWAY is not set +# CONFIG_KEYBOARD_SUNKBD is not set +# CONFIG_KEYBOARD_OMAP4 is not set +# CONFIG_KEYBOARD_XTKBD is not set +# CONFIG_KEYBOARD_CAP11XX is not set +# CONFIG_KEYBOARD_BCM is not set +# CONFIG_INPUT_MOUSE is not set +# CONFIG_INPUT_JOYSTICK is not set +# CONFIG_INPUT_TABLET is not set +# CONFIG_INPUT_TOUCHSCREEN is not set +# CONFIG_INPUT_MISC is not set +# CONFIG_RMI4_CORE is not set + +# +# Hardware I/O ports +# +CONFIG_SERIO=y +CONFIG_ARCH_MIGHT_HAVE_PC_SERIO=y +# CONFIG_SERIO_I8042 is not set +# CONFIG_SERIO_SERPORT is not set +# CONFIG_SERIO_PCIPS2 is not set +CONFIG_SERIO_LIBPS2=y +# CONFIG_SERIO_RAW is not set +# CONFIG_SERIO_ALTERA_PS2 is not set +# CONFIG_SERIO_PS2MULT is not set +# CONFIG_SERIO_ARC_PS2 is not set +# CONFIG_SERIO_APBPS2 is not set +# CONFIG_USERIO is not set +# CONFIG_GAMEPORT is not set +# end of Hardware I/O ports +# end of Input device support + +# +# Character devices +# +CONFIG_TTY=y +CONFIG_VT=y +CONFIG_CONSOLE_TRANSLATIONS=y +CONFIG_VT_CONSOLE=y +CONFIG_HW_CONSOLE=y +CONFIG_VT_HW_CONSOLE_BINDING=y +CONFIG_UNIX98_PTYS=y +CONFIG_LEGACY_PTYS=y +CONFIG_LEGACY_PTY_COUNT=256 +CONFIG_LDISC_AUTOLOAD=y + +# +# Serial drivers +# +CONFIG_SERIAL_EARLYCON=y +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_DEPRECATED_OPTIONS=y +CONFIG_SERIAL_8250_PNP=y +CONFIG_SERIAL_8250_16550A_VARIANTS=y +# CONFIG_SERIAL_8250_FINTEK is not set +CONFIG_SERIAL_8250_CONSOLE=y +# CONFIG_SERIAL_8250_PCI is not set +CONFIG_SERIAL_8250_NR_UARTS=4 +CONFIG_SERIAL_8250_RUNTIME_UARTS=4 +# CONFIG_SERIAL_8250_EXTENDED is not set +# CONFIG_SERIAL_8250_DW is not set +CONFIG_SERIAL_8250_SUNWAY=y +# CONFIG_SERIAL_8250_RT288X is not set +CONFIG_SERIAL_OF_PLATFORM=y + +# +# Non-8250 serial port support +# +# CONFIG_SERIAL_MAX3100 is not set +# CONFIG_SERIAL_MAX310X is not set +# CONFIG_SERIAL_UARTLITE is not set +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +# CONFIG_SERIAL_JSM is not set +# CONFIG_SERIAL_SIFIVE is not set +# CONFIG_SERIAL_SCCNXP is not set +# CONFIG_SERIAL_SC16IS7XX is not set +# CONFIG_SERIAL_ALTERA_JTAGUART is not set +# CONFIG_SERIAL_ALTERA_UART is not set +# CONFIG_SERIAL_XILINX_PS_UART is not set +# CONFIG_SERIAL_ARC is not set +# CONFIG_SERIAL_RP2 is not set +# CONFIG_SERIAL_FSL_LPUART is not set +# CONFIG_SERIAL_FSL_LINFLEXUART is not set +# CONFIG_SERIAL_CONEXANT_DIGICOLOR is not set +# CONFIG_SERIAL_SPRD is not set +# end of Serial drivers + +# CONFIG_SERIAL_NONSTANDARD is not set +# CONFIG_N_GSM is not set +# CONFIG_NOZOMI is not set +# CONFIG_NULL_TTY is not set +# CONFIG_TRACE_SINK is not set +CONFIG_HVC_DRIVER=y +# CONFIG_SERIAL_DEV_BUS is not set +# CONFIG_TTY_PRINTK is not set +CONFIG_VIRTIO_CONSOLE=y +# CONFIG_IPMI_HANDLER is not set +# CONFIG_HW_RANDOM is not set +# CONFIG_DTLK is not set +# CONFIG_APPLICOM is not set +CONFIG_DEVMEM=y +# CONFIG_DEVKMEM is not set +# CONFIG_RAW_DRIVER is not set +CONFIG_DEVPORT=y +# CONFIG_TCG_TPM is not set +# CONFIG_XILLYBUS is not set +# end of Character devices + +# CONFIG_RANDOM_TRUST_BOOTLOADER is not set + +# +# I2C support +# +CONFIG_I2C=y +CONFIG_ACPI_I2C_OPREGION=y +CONFIG_I2C_BOARDINFO=y +# CONFIG_I2C_COMPAT is not set +CONFIG_I2C_CHARDEV=y +CONFIG_I2C_MUX=y + +# +# Multiplexer I2C Chip support +# +# CONFIG_I2C_MUX_GPMUX is not set +# CONFIG_I2C_MUX_LTC4306 is not set +# CONFIG_I2C_MUX_PCA9541 is not set +# CONFIG_I2C_MUX_REG is not set +# CONFIG_I2C_MUX_MLXCPLD is not set +# end of Multiplexer I2C Chip support + +CONFIG_I2C_HELPER_AUTO=y +CONFIG_I2C_ALGOBIT=y + +# +# I2C Hardware Bus support +# + +# +# PC SMBus host controller drivers +# +# CONFIG_I2C_ALI1535 is not set +# CONFIG_I2C_ALI1563 is not set +# CONFIG_I2C_ALI15X3 is not set +# CONFIG_I2C_AMD756 is not set +# CONFIG_I2C_AMD8111 is not set +# CONFIG_I2C_AMD_MP2 is not set +# CONFIG_I2C_I801 is not set +# CONFIG_I2C_ISCH is not set +# CONFIG_I2C_PIIX4 is not set +# CONFIG_I2C_NFORCE2 is not set +# CONFIG_I2C_NVIDIA_GPU is not set +# CONFIG_I2C_SIS5595 is not set +# CONFIG_I2C_SIS630 is not set +# CONFIG_I2C_SIS96X is not set +# CONFIG_I2C_VIA is not set +# CONFIG_I2C_VIAPRO is not set + +# +# ACPI drivers +# +# CONFIG_I2C_SCMI is not set + +# +# I2C system bus drivers (mostly embedded / system-on-chip) +# +# CONFIG_I2C_DESIGNWARE_PLATFORM is not set +CONFIG_I2C_SUNWAY_SW6=y +# CONFIG_I2C_DESIGNWARE_PCI is not set +# CONFIG_I2C_EMEV2 is not set +# CONFIG_I2C_OCORES is not set +# CONFIG_I2C_PCA_PLATFORM is not set +# CONFIG_I2C_RK3X is not set +# CONFIG_I2C_SIMTEC is not set +# CONFIG_I2C_XILINX is not set + +# +# External I2C/SMBus adapter drivers +# +# CONFIG_I2C_DIOLAN_U2C is not set +# CONFIG_I2C_ROBOTFUZZ_OSIF is not set +# CONFIG_I2C_TAOS_EVM is not set +# CONFIG_I2C_TINY_USB is not set + +# +# Other I2C/SMBus bus drivers +# +# CONFIG_I2C_PCA_ISA is not set +# end of I2C Hardware Bus support + +# CONFIG_I2C_STUB is not set +# CONFIG_I2C_SLAVE is not set +# CONFIG_I2C_DEBUG_CORE is not set +# CONFIG_I2C_DEBUG_ALGO is not set +# CONFIG_I2C_DEBUG_BUS is not set +# end of I2C support + +# CONFIG_I3C is not set +CONFIG_SPI=y +# CONFIG_SPI_DEBUG is not set +CONFIG_SPI_MASTER=y +CONFIG_SPI_MEM=y + +# +# SPI Master Controller Drivers +# +# CONFIG_SPI_ALTERA is not set +# CONFIG_SPI_AXI_SPI_ENGINE is not set +# CONFIG_SPI_BITBANG is not set +# CONFIG_SPI_CADENCE is not set +# CONFIG_SPI_DESIGNWARE is not set +# CONFIG_SPI_NXP_FLEXSPI is not set +# CONFIG_SPI_FSL_SPI is not set +# CONFIG_SPI_PXA2XX is not set +# CONFIG_SPI_ROCKCHIP is not set +# CONFIG_SPI_SC18IS602 is not set +# CONFIG_SPI_SIFIVE is not set +# CONFIG_SPI_MXIC is not set +# CONFIG_SPI_XCOMM is not set +# CONFIG_SPI_XILINX is not set +# CONFIG_SPI_ZYNQMP_GQSPI is not set +# CONFIG_SPI_AMD is not set +CONFIG_SPI_CHIP3=y + +# +# SPI Multiplexer support +# +# CONFIG_SPI_MUX is not set + +# +# SPI Protocol Masters +# +CONFIG_SPI_SPIDEV=y +# CONFIG_SPI_LOOPBACK_TEST is not set +# CONFIG_SPI_TLE62X0 is not set +# CONFIG_SPI_SLAVE is not set +CONFIG_SPI_DYNAMIC=y +# CONFIG_SPMI is not set +# CONFIG_HSI is not set +CONFIG_PPS=y +# CONFIG_PPS_DEBUG is not set +# CONFIG_NTP_PPS is not set + +# +# PPS clients support +# +# CONFIG_PPS_CLIENT_KTIMER is not set +# CONFIG_PPS_CLIENT_LDISC is not set +# CONFIG_PPS_CLIENT_GPIO is not set + +# +# PPS generators support +# + +# +# PTP clock support +# +CONFIG_PTP_1588_CLOCK=y + +# +# Enable PHYLIB and NETWORK_PHY_TIMESTAMPING to see the additional clocks. +# +# CONFIG_PTP_1588_CLOCK_IDT82P33 is not set +# CONFIG_PTP_1588_CLOCK_IDTCM is not set +# end of PTP clock support + +# CONFIG_PINCTRL is not set +# CONFIG_GPIOLIB is not set +# CONFIG_W1 is not set +# CONFIG_POWER_RESET is not set +CONFIG_POWER_SUPPLY=y +# CONFIG_POWER_SUPPLY_DEBUG is not set +CONFIG_POWER_SUPPLY_HWMON=y +# CONFIG_PDA_POWER is not set +# CONFIG_TEST_POWER is not set +# CONFIG_CHARGER_ADP5061 is not set +# CONFIG_BATTERY_CW2015 is not set +# CONFIG_BATTERY_DS2780 is not set +# CONFIG_BATTERY_DS2781 is not set +# CONFIG_BATTERY_DS2782 is not set +# CONFIG_BATTERY_SBS is not set +# CONFIG_CHARGER_SBS is not set +# CONFIG_BATTERY_BQ27XXX is not set +# CONFIG_BATTERY_MAX17040 is not set +# CONFIG_BATTERY_MAX17042 is not set +# CONFIG_CHARGER_MAX8903 is not set +# CONFIG_CHARGER_LP8727 is not set +# CONFIG_CHARGER_DETECTOR_MAX14656 is not set +# CONFIG_CHARGER_BQ2415X is not set +# CONFIG_CHARGER_SMB347 is not set +# CONFIG_BATTERY_GAUGE_LTC2941 is not set +# CONFIG_BATTERY_RT5033 is not set +# CONFIG_CHARGER_BD99954 is not set +CONFIG_HWMON=y +# CONFIG_HWMON_DEBUG_CHIP is not set + +# +# Native drivers +# +# CONFIG_SENSORS_AD7314 is not set +# CONFIG_SENSORS_AD7414 is not set +# CONFIG_SENSORS_AD7418 is not set +# CONFIG_SENSORS_ADM1021 is not set +# CONFIG_SENSORS_ADM1025 is not set +# CONFIG_SENSORS_ADM1026 is not set +# CONFIG_SENSORS_ADM1029 is not set +# CONFIG_SENSORS_ADM1031 is not set +# CONFIG_SENSORS_ADM1177 is not set +# CONFIG_SENSORS_ADM9240 is not set +# CONFIG_SENSORS_ADT7310 is not set +# CONFIG_SENSORS_ADT7410 is not set +# CONFIG_SENSORS_ADT7411 is not set +# CONFIG_SENSORS_ADT7462 is not set +# CONFIG_SENSORS_ADT7470 is not set +# CONFIG_SENSORS_ADT7475 is not set +# CONFIG_SENSORS_AS370 is not set +# CONFIG_SENSORS_ASC7621 is not set +# CONFIG_SENSORS_AXI_FAN_CONTROL is not set +# CONFIG_SENSORS_ASPEED is not set +# CONFIG_SENSORS_ATXP1 is not set +# CONFIG_SENSORS_CORSAIR_CPRO is not set +# CONFIG_SENSORS_DRIVETEMP is not set +# CONFIG_SENSORS_DS620 is not set +# CONFIG_SENSORS_DS1621 is not set +# CONFIG_SENSORS_I5K_AMB is not set +# CONFIG_SENSORS_F71805F is not set +# CONFIG_SENSORS_F71882FG is not set +# CONFIG_SENSORS_F75375S is not set +# CONFIG_SENSORS_GL518SM is not set +# CONFIG_SENSORS_GL520SM is not set +# CONFIG_SENSORS_G760A is not set +# CONFIG_SENSORS_G762 is not set +# CONFIG_SENSORS_HIH6130 is not set +# CONFIG_SENSORS_IT87 is not set +# CONFIG_SENSORS_JC42 is not set +# CONFIG_SENSORS_POWR1220 is not set +# CONFIG_SENSORS_LINEAGE is not set +# CONFIG_SENSORS_LTC2945 is not set +# CONFIG_SENSORS_LTC2947_I2C is not set +# CONFIG_SENSORS_LTC2947_SPI is not set +# CONFIG_SENSORS_LTC2990 is not set +# CONFIG_SENSORS_LTC4151 is not set +# CONFIG_SENSORS_LTC4215 is not set +# CONFIG_SENSORS_LTC4222 is not set +# CONFIG_SENSORS_LTC4245 is not set +# CONFIG_SENSORS_LTC4260 is not set +# CONFIG_SENSORS_LTC4261 is not set +# CONFIG_SENSORS_MAX1111 is not set +# CONFIG_SENSORS_MAX16065 is not set +# CONFIG_SENSORS_MAX1619 is not set +# CONFIG_SENSORS_MAX1668 is not set +# CONFIG_SENSORS_MAX197 is not set +# CONFIG_SENSORS_MAX31722 is not set +# CONFIG_SENSORS_MAX31730 is not set +# CONFIG_SENSORS_MAX6621 is not set +# CONFIG_SENSORS_MAX6639 is not set +# CONFIG_SENSORS_MAX6642 is not set +# CONFIG_SENSORS_MAX6650 is not set +# CONFIG_SENSORS_MAX6697 is not set +# CONFIG_SENSORS_MAX31790 is not set +# CONFIG_SENSORS_MCP3021 is not set +# CONFIG_SENSORS_TC654 is not set +# CONFIG_SENSORS_MR75203 is not set +# CONFIG_SENSORS_ADCXX is not set +# CONFIG_SENSORS_LM63 is not set +# CONFIG_SENSORS_LM70 is not set +# CONFIG_SENSORS_LM73 is not set +# CONFIG_SENSORS_LM75 is not set +# CONFIG_SENSORS_LM77 is not set +# CONFIG_SENSORS_LM78 is not set +# CONFIG_SENSORS_LM80 is not set +# CONFIG_SENSORS_LM83 is not set +# CONFIG_SENSORS_LM85 is not set +# CONFIG_SENSORS_LM87 is not set +# CONFIG_SENSORS_LM90 is not set +# CONFIG_SENSORS_LM92 is not set +# CONFIG_SENSORS_LM93 is not set +# CONFIG_SENSORS_LM95234 is not set +# CONFIG_SENSORS_LM95241 is not set +# CONFIG_SENSORS_LM95245 is not set +# CONFIG_SENSORS_PC87360 is not set +# CONFIG_SENSORS_PC87427 is not set +# CONFIG_SENSORS_NTC_THERMISTOR is not set +# CONFIG_SENSORS_NCT6683 is not set +# CONFIG_SENSORS_NCT6775 is not set +# CONFIG_SENSORS_NCT7802 is not set +# CONFIG_SENSORS_NPCM7XX is not set +# CONFIG_SENSORS_PCF8591 is not set +# CONFIG_PMBUS is not set +# CONFIG_SENSORS_SHT21 is not set +# CONFIG_SENSORS_SHT3x is not set +# CONFIG_SENSORS_SHTC1 is not set +# CONFIG_SENSORS_SIS5595 is not set +# CONFIG_SENSORS_DME1737 is not set +# CONFIG_SENSORS_EMC1403 is not set +# CONFIG_SENSORS_EMC2103 is not set +# CONFIG_SENSORS_EMC6W201 is not set +# CONFIG_SENSORS_SMSC47M1 is not set +# CONFIG_SENSORS_SMSC47M192 is not set +# CONFIG_SENSORS_SMSC47B397 is not set +# CONFIG_SENSORS_STTS751 is not set +# CONFIG_SENSORS_SMM665 is not set +# CONFIG_SENSORS_ADC128D818 is not set +# CONFIG_SENSORS_ADS7828 is not set +# CONFIG_SENSORS_ADS7871 is not set +# CONFIG_SENSORS_AMC6821 is not set +# CONFIG_SENSORS_INA209 is not set +# CONFIG_SENSORS_INA2XX is not set +# CONFIG_SENSORS_INA3221 is not set +# CONFIG_SENSORS_TC74 is not set +# CONFIG_SENSORS_THMC50 is not set +# CONFIG_SENSORS_TMP102 is not set +# CONFIG_SENSORS_TMP103 is not set +# CONFIG_SENSORS_TMP108 is not set +# CONFIG_SENSORS_TMP401 is not set +# CONFIG_SENSORS_TMP421 is not set +# CONFIG_SENSORS_TMP513 is not set +# CONFIG_SENSORS_VIA686A is not set +# CONFIG_SENSORS_VT1211 is not set +# CONFIG_SENSORS_VT8231 is not set +# CONFIG_SENSORS_W83773G is not set +# CONFIG_SENSORS_W83781D is not set +# CONFIG_SENSORS_W83791D is not set +# CONFIG_SENSORS_W83792D is not set +# CONFIG_SENSORS_W83793 is not set +# CONFIG_SENSORS_W83795 is not set +# CONFIG_SENSORS_W83L785TS is not set +# CONFIG_SENSORS_W83L786NG is not set +# CONFIG_SENSORS_W83627HF is not set +# CONFIG_SENSORS_W83627EHF is not set + +# +# ACPI drivers +# +# CONFIG_SENSORS_ACPI_POWER is not set +# CONFIG_THERMAL is not set +# CONFIG_WATCHDOG is not set +CONFIG_SSB_POSSIBLE=y +CONFIG_SSB=y +CONFIG_SSB_SPROM=y +CONFIG_SSB_PCIHOST_POSSIBLE=y +CONFIG_SSB_PCIHOST=y +CONFIG_SSB_DRIVER_PCICORE_POSSIBLE=y +# CONFIG_SSB_DRIVER_PCICORE is not set +CONFIG_BCMA_POSSIBLE=y +# CONFIG_BCMA is not set + +# +# Multifunction device drivers +# +# CONFIG_MFD_ACT8945A is not set +# CONFIG_MFD_AS3711 is not set +# CONFIG_MFD_AS3722 is not set +# CONFIG_PMIC_ADP5520 is not set +# CONFIG_MFD_ATMEL_FLEXCOM is not set +# CONFIG_MFD_ATMEL_HLCDC is not set +# CONFIG_MFD_BCM590XX is not set +# CONFIG_MFD_BD9571MWV is not set +# CONFIG_MFD_AXP20X_I2C is not set +# CONFIG_MFD_MADERA is not set +# CONFIG_PMIC_DA903X is not set +# CONFIG_MFD_DA9052_SPI is not set +# CONFIG_MFD_DA9052_I2C is not set +# CONFIG_MFD_DA9055 is not set +# CONFIG_MFD_DA9062 is not set +# CONFIG_MFD_DA9063 is not set +# CONFIG_MFD_DA9150 is not set +# CONFIG_MFD_DLN2 is not set +# CONFIG_MFD_GATEWORKS_GSC is not set +# CONFIG_MFD_MC13XXX_SPI is not set +# CONFIG_MFD_MC13XXX_I2C is not set +# CONFIG_MFD_MP2629 is not set +# CONFIG_MFD_HI6421_PMIC is not set +# CONFIG_HTC_PASIC3 is not set +# CONFIG_LPC_ICH is not set +# CONFIG_LPC_SCH is not set +# CONFIG_LPC_CHIP3 is not set +# CONFIG_SUNWAY_SUPERIO_AST2400 is not set +# CONFIG_MFD_IQS62X is not set +# CONFIG_MFD_JANZ_CMODIO is not set +# CONFIG_MFD_KEMPLD is not set +# CONFIG_MFD_88PM800 is not set +# CONFIG_MFD_88PM805 is not set +# CONFIG_MFD_88PM860X is not set +# CONFIG_MFD_MAX14577 is not set +# CONFIG_MFD_MAX77620 is not set +# CONFIG_MFD_MAX77650 is not set +# CONFIG_MFD_MAX77686 is not set +# CONFIG_MFD_MAX77693 is not set +# CONFIG_MFD_MAX77843 is not set +# CONFIG_MFD_MAX8907 is not set +# CONFIG_MFD_MAX8925 is not set +# CONFIG_MFD_MAX8997 is not set +# CONFIG_MFD_MAX8998 is not set +# CONFIG_MFD_MT6360 is not set +# CONFIG_MFD_MT6397 is not set +# CONFIG_MFD_MENF21BMC is not set +# CONFIG_EZX_PCAP is not set +# CONFIG_MFD_CPCAP is not set +# CONFIG_MFD_VIPERBOARD is not set +# CONFIG_MFD_RETU is not set +# CONFIG_MFD_PCF50633 is not set +# CONFIG_MFD_RDC321X is not set +# CONFIG_MFD_RT5033 is not set +# CONFIG_MFD_RC5T583 is not set +# CONFIG_MFD_RK808 is not set +# CONFIG_MFD_RN5T618 is not set +# CONFIG_MFD_SEC_CORE is not set +# CONFIG_MFD_SI476X_CORE is not set +# CONFIG_MFD_SM501 is not set +# CONFIG_MFD_SKY81452 is not set +# CONFIG_ABX500_CORE is not set +# CONFIG_MFD_STMPE is not set +# CONFIG_MFD_SYSCON is not set +# CONFIG_MFD_TI_AM335X_TSCADC is not set +# CONFIG_MFD_LP3943 is not set +# CONFIG_MFD_LP8788 is not set +# CONFIG_MFD_TI_LMU is not set +# CONFIG_MFD_PALMAS is not set +# CONFIG_TPS6105X is not set +# CONFIG_TPS6507X is not set +# CONFIG_MFD_TPS65086 is not set +# CONFIG_MFD_TPS65090 is not set +# CONFIG_MFD_TPS65217 is not set +# CONFIG_MFD_TI_LP873X is not set +# CONFIG_MFD_TI_LP87565 is not set +# CONFIG_MFD_TPS65218 is not set +# CONFIG_MFD_TPS6586X is not set +# CONFIG_MFD_TPS65912_I2C is not set +# CONFIG_MFD_TPS65912_SPI is not set +# CONFIG_MFD_TPS80031 is not set +# CONFIG_TWL4030_CORE is not set +# CONFIG_TWL6040_CORE is not set +# CONFIG_MFD_WL1273_CORE is not set +# CONFIG_MFD_LM3533 is not set +# CONFIG_MFD_TC3589X is not set +# CONFIG_MFD_TQMX86 is not set +# CONFIG_MFD_VX855 is not set +# CONFIG_MFD_LOCHNAGAR is not set +# CONFIG_MFD_ARIZONA_I2C is not set +# CONFIG_MFD_ARIZONA_SPI is not set +# CONFIG_MFD_WM8400 is not set +# CONFIG_MFD_WM831X_I2C is not set +# CONFIG_MFD_WM831X_SPI is not set +# CONFIG_MFD_WM8350_I2C is not set +# CONFIG_MFD_WM8994 is not set +# CONFIG_MFD_ROHM_BD718XX is not set +# CONFIG_MFD_ROHM_BD70528 is not set +# CONFIG_MFD_ROHM_BD71828 is not set +# CONFIG_MFD_STPMIC1 is not set +# CONFIG_MFD_STMFX is not set +# CONFIG_MFD_INTEL_M10_BMC is not set +# end of Multifunction device drivers + +# CONFIG_REGULATOR is not set +# CONFIG_RC_CORE is not set +# CONFIG_MEDIA_CEC_SUPPORT is not set +# CONFIG_MEDIA_SUPPORT is not set + +# +# Graphics support +# +CONFIG_VGA_ARB=y +CONFIG_VGA_ARB_MAX_GPUS=16 +CONFIG_DRM=y +# CONFIG_DRM_DP_AUX_CHARDEV is not set +# CONFIG_DRM_DEBUG_MM is not set +# CONFIG_DRM_DEBUG_SELFTEST is not set +CONFIG_DRM_KMS_HELPER=y +CONFIG_DRM_KMS_FB_HELPER=y +# CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS is not set +CONFIG_DRM_FBDEV_EMULATION=y +CONFIG_DRM_FBDEV_OVERALLOC=100 +# CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM is not set +# CONFIG_DRM_LOAD_EDID_FIRMWARE is not set +# CONFIG_DRM_DP_CEC is not set +CONFIG_DRM_TTM=y +CONFIG_DRM_TTM_DMA_PAGE_POOL=y +CONFIG_DRM_VRAM_HELPER=y +CONFIG_DRM_TTM_HELPER=y +CONFIG_DRM_GEM_SHMEM_HELPER=y + +# +# I2C encoder or helper chips +# +# CONFIG_DRM_I2C_CH7006 is not set +# CONFIG_DRM_I2C_SIL164 is not set +# CONFIG_DRM_I2C_NXP_TDA998X is not set +# CONFIG_DRM_I2C_NXP_TDA9950 is not set +# end of I2C encoder or helper chips + +# +# ARM devices +# +# CONFIG_DRM_KOMEDA is not set +# end of ARM devices + +CONFIG_DRM_RADEON=y +# CONFIG_DRM_RADEON_USERPTR is not set +# CONFIG_DRM_AMDGPU is not set +# CONFIG_DRM_NOUVEAU is not set +# CONFIG_DRM_VGEM is not set +# CONFIG_DRM_VKMS is not set +# CONFIG_DRM_UDL is not set +CONFIG_DRM_AST=y +# CONFIG_DRM_MGAG200 is not set +# CONFIG_DRM_RCAR_DW_HDMI is not set +# CONFIG_DRM_RCAR_LVDS is not set +# CONFIG_DRM_QXL is not set +# CONFIG_DRM_BOCHS is not set +CONFIG_DRM_VIRTIO_GPU=y +CONFIG_DRM_PANEL=y + +# +# Display Panels +# +# CONFIG_DRM_PANEL_LVDS is not set +# CONFIG_DRM_PANEL_SIMPLE is not set +# CONFIG_DRM_PANEL_ILITEK_IL9322 is not set +# CONFIG_DRM_PANEL_SAMSUNG_LD9040 is not set +# CONFIG_DRM_PANEL_LG_LG4573 is not set +# CONFIG_DRM_PANEL_NOVATEK_NT39016 is not set +# CONFIG_DRM_PANEL_OLIMEX_LCD_OLINUXINO is not set +# CONFIG_DRM_PANEL_SAMSUNG_S6E63M0 is not set +# CONFIG_DRM_PANEL_SAMSUNG_S6E88A0_AMS452EF01 is not set +# CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0 is not set +# CONFIG_DRM_PANEL_SEIKO_43WVF1G is not set +# CONFIG_DRM_PANEL_SITRONIX_ST7789V is not set +# CONFIG_DRM_PANEL_TPO_TD028TTEC1 is not set +# end of Display Panels + +CONFIG_DRM_BRIDGE=y +CONFIG_DRM_PANEL_BRIDGE=y + +# +# Display Interface Bridges +# +# CONFIG_DRM_CDNS_DSI is not set +# CONFIG_DRM_CHRONTEL_CH7033 is not set +# CONFIG_DRM_DISPLAY_CONNECTOR is not set +# CONFIG_DRM_LONTIUM_LT9611 is not set +# CONFIG_DRM_LVDS_CODEC is not set +# CONFIG_DRM_MEGACHIPS_STDPXXXX_GE_B850V3_FW is not set +# CONFIG_DRM_NWL_MIPI_DSI is not set +# CONFIG_DRM_NXP_PTN3460 is not set +# CONFIG_DRM_PARADE_PS8622 is not set +# CONFIG_DRM_PARADE_PS8640 is not set +# CONFIG_DRM_SIL_SII8620 is not set +# CONFIG_DRM_SII902X is not set +# CONFIG_DRM_SII9234 is not set +# CONFIG_DRM_SIMPLE_BRIDGE is not set +# CONFIG_DRM_THINE_THC63LVD1024 is not set +# CONFIG_DRM_TOSHIBA_TC358762 is not set +# CONFIG_DRM_TOSHIBA_TC358764 is not set +# CONFIG_DRM_TOSHIBA_TC358767 is not set +# CONFIG_DRM_TOSHIBA_TC358768 is not set +# CONFIG_DRM_TOSHIBA_TC358775 is not set +# CONFIG_DRM_TI_TFP410 is not set +# CONFIG_DRM_TI_SN65DSI86 is not set +# CONFIG_DRM_TI_TPD12S015 is not set +# CONFIG_DRM_ANALOGIX_ANX6345 is not set +# CONFIG_DRM_ANALOGIX_ANX78XX is not set +# CONFIG_DRM_I2C_ADV7511 is not set +# CONFIG_DRM_CDNS_MHDP8546 is not set +# end of Display Interface Bridges + +# CONFIG_DRM_ETNAVIV is not set +# CONFIG_DRM_ARCPGU is not set +# CONFIG_DRM_MXSFB is not set +# CONFIG_DRM_CIRRUS_QEMU is not set +# CONFIG_DRM_GM12U320 is not set +# CONFIG_TINYDRM_HX8357D is not set +# CONFIG_TINYDRM_ILI9225 is not set +# CONFIG_TINYDRM_ILI9341 is not set +# CONFIG_TINYDRM_ILI9486 is not set +# CONFIG_TINYDRM_MI0283QT is not set +# CONFIG_TINYDRM_REPAPER is not set +# CONFIG_TINYDRM_ST7586 is not set +# CONFIG_TINYDRM_ST7735R is not set +# CONFIG_DRM_LEGACY is not set +CONFIG_DRM_PANEL_ORIENTATION_QUIRKS=y + +# +# Frame buffer Devices +# +CONFIG_FB_CMDLINE=y +CONFIG_FB_NOTIFY=y +CONFIG_FB=y +CONFIG_FIRMWARE_EDID=y +CONFIG_FB_CFB_FILLRECT=y +CONFIG_FB_CFB_COPYAREA=y +CONFIG_FB_CFB_IMAGEBLIT=y +CONFIG_FB_SYS_FILLRECT=y +CONFIG_FB_SYS_COPYAREA=y +CONFIG_FB_SYS_IMAGEBLIT=y +# CONFIG_FB_FOREIGN_ENDIAN is not set +CONFIG_FB_SYS_FOPS=y +CONFIG_FB_DEFERRED_IO=y +CONFIG_FB_MODE_HELPERS=y +# CONFIG_FB_TILEBLITTING is not set + +# +# Frame buffer hardware drivers +# +# CONFIG_FB_CIRRUS is not set +# CONFIG_FB_PM2 is not set +# CONFIG_FB_CYBER2000 is not set +# CONFIG_FB_ASILIANT is not set +# CONFIG_FB_IMSTT is not set +# CONFIG_FB_UVESA is not set +# CONFIG_FB_EFI is not set +# CONFIG_FB_OPENCORES is not set +# CONFIG_FB_S1D13XXX is not set +# CONFIG_FB_NVIDIA is not set +# CONFIG_FB_RIVA is not set +# CONFIG_FB_I740 is not set +# CONFIG_FB_MATROX is not set +# CONFIG_FB_RADEON is not set +# CONFIG_FB_ATY128 is not set +# CONFIG_FB_ATY is not set +# CONFIG_FB_S3 is not set +# CONFIG_FB_SAVAGE is not set +# CONFIG_FB_SIS is not set +# CONFIG_FB_NEOMAGIC is not set +# CONFIG_FB_KYRO is not set +# CONFIG_FB_3DFX is not set +# CONFIG_FB_VOODOO1 is not set +# CONFIG_FB_VT8623 is not set +# CONFIG_FB_TRIDENT is not set +# CONFIG_FB_ARK is not set +# CONFIG_FB_PM3 is not set +# CONFIG_FB_CARMINE is not set +# CONFIG_FB_SMSCUFX is not set +# CONFIG_FB_UDL is not set +# CONFIG_FB_IBM_GXT4500 is not set +# CONFIG_FB_VIRTUAL is not set +# CONFIG_FB_METRONOME is not set +# CONFIG_FB_MB862XX is not set +# CONFIG_FB_SIMPLE is not set +# CONFIG_FB_SM712 is not set +# end of Frame buffer Devices + +# +# Backlight & LCD device support +# +CONFIG_LCD_CLASS_DEVICE=y +# CONFIG_LCD_LTV350QV is not set +# CONFIG_LCD_ILI922X is not set +# CONFIG_LCD_ILI9320 is not set +# CONFIG_LCD_TDO24M is not set +# CONFIG_LCD_VGG2432A4 is not set +# CONFIG_LCD_PLATFORM is not set +# CONFIG_LCD_AMS369FG06 is not set +# CONFIG_LCD_LMS501KF03 is not set +# CONFIG_LCD_HX8357 is not set +# CONFIG_LCD_OTM3225A is not set +CONFIG_BACKLIGHT_CLASS_DEVICE=y +# CONFIG_BACKLIGHT_QCOM_WLED is not set +# CONFIG_BACKLIGHT_ADP8860 is not set +# CONFIG_BACKLIGHT_ADP8870 is not set +# CONFIG_BACKLIGHT_LM3639 is not set +# CONFIG_BACKLIGHT_LV5207LP is not set +# CONFIG_BACKLIGHT_BD6107 is not set +# CONFIG_BACKLIGHT_ARCXCNN is not set +# end of Backlight & LCD device support + +CONFIG_HDMI=y + +# +# Console display driver support +# +# CONFIG_VGA_CONSOLE is not set +# CONFIG_MDA_CONSOLE is not set +CONFIG_DUMMY_CONSOLE_COLUMNS=80 +CONFIG_DUMMY_CONSOLE_ROWS=25 +CONFIG_FRAMEBUFFER_CONSOLE=y +CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y +CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y +# CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER is not set +# end of Console display driver support + +CONFIG_LOGO=y +CONFIG_LOGO_LINUX_MONO=y +CONFIG_LOGO_LINUX_VGA16=y +CONFIG_LOGO_LINUX_CLUT224=y +# end of Graphics support + +# CONFIG_SOUND is not set + +# +# HID support +# +CONFIG_HID=y +# CONFIG_HID_BATTERY_STRENGTH is not set +# CONFIG_HIDRAW is not set +# CONFIG_UHID is not set +CONFIG_HID_GENERIC=y + +# +# Special HID drivers +# +# CONFIG_HID_A4TECH is not set +# CONFIG_HID_ACCUTOUCH is not set +# CONFIG_HID_ACRUX is not set +# CONFIG_HID_APPLE is not set +# CONFIG_HID_APPLEIR is not set +# CONFIG_HID_AUREAL is not set +# CONFIG_HID_BELKIN is not set +# CONFIG_HID_BETOP_FF is not set +# CONFIG_HID_CHERRY is not set +# CONFIG_HID_CHICONY is not set +# CONFIG_HID_COUGAR is not set +# CONFIG_HID_MACALLY is not set +# CONFIG_HID_CMEDIA is not set +# CONFIG_HID_CREATIVE_SB0540 is not set +# CONFIG_HID_CYPRESS is not set +# CONFIG_HID_DRAGONRISE is not set +# CONFIG_HID_EMS_FF is not set +# CONFIG_HID_ELECOM is not set +# CONFIG_HID_ELO is not set +# CONFIG_HID_EZKEY is not set +# CONFIG_HID_GEMBIRD is not set +# CONFIG_HID_GFRM is not set +# CONFIG_HID_GLORIOUS is not set +# CONFIG_HID_HOLTEK is not set +# CONFIG_HID_VIVALDI is not set +# CONFIG_HID_KEYTOUCH is not set +# CONFIG_HID_KYE is not set +# CONFIG_HID_UCLOGIC is not set +# CONFIG_HID_WALTOP is not set +# CONFIG_HID_VIEWSONIC is not set +# CONFIG_HID_GYRATION is not set +# CONFIG_HID_ICADE is not set +# CONFIG_HID_ITE is not set +# CONFIG_HID_JABRA is not set +# CONFIG_HID_TWINHAN is not set +# CONFIG_HID_KENSINGTON is not set +# CONFIG_HID_LCPOWER is not set +# CONFIG_HID_LENOVO is not set +# CONFIG_HID_MAGICMOUSE is not set +# CONFIG_HID_MALTRON is not set +# CONFIG_HID_MAYFLASH is not set +# CONFIG_HID_REDRAGON is not set +# CONFIG_HID_MICROSOFT is not set +# CONFIG_HID_MONTEREY is not set +# CONFIG_HID_MULTITOUCH is not set +# CONFIG_HID_NTI is not set +# CONFIG_HID_NTRIG is not set +# CONFIG_HID_ORTEK is not set +# CONFIG_HID_PANTHERLORD is not set +# CONFIG_HID_PENMOUNT is not set +# CONFIG_HID_PETALYNX is not set +# CONFIG_HID_PICOLCD is not set +# CONFIG_HID_PLANTRONICS is not set +# CONFIG_HID_PRIMAX is not set +# CONFIG_HID_RETRODE is not set +# CONFIG_HID_ROCCAT is not set +# CONFIG_HID_SAITEK is not set +# CONFIG_HID_SAMSUNG is not set +# CONFIG_HID_SPEEDLINK is not set +# CONFIG_HID_STEAM is not set +# CONFIG_HID_STEELSERIES is not set +# CONFIG_HID_SUNPLUS is not set +# CONFIG_HID_RMI is not set +# CONFIG_HID_GREENASIA is not set +# CONFIG_HID_SMARTJOYPLUS is not set +# CONFIG_HID_TIVO is not set +# CONFIG_HID_TOPSEED is not set +# CONFIG_HID_THRUSTMASTER is not set +# CONFIG_HID_UDRAW_PS3 is not set +# CONFIG_HID_WACOM is not set +# CONFIG_HID_XINMO is not set +# CONFIG_HID_ZEROPLUS is not set +# CONFIG_HID_ZYDACRON is not set +# CONFIG_HID_SENSOR_HUB is not set +# CONFIG_HID_ALPS is not set +# end of Special HID drivers + +# +# USB HID support +# +CONFIG_USB_HID=y +# CONFIG_HID_PID is not set +# CONFIG_USB_HIDDEV is not set +# end of USB HID support + +# +# I2C HID support +# +# CONFIG_I2C_HID is not set +# end of I2C HID support +# end of HID support + +CONFIG_USB_OHCI_LITTLE_ENDIAN=y +CONFIG_USB_SUPPORT=y +CONFIG_USB_COMMON=y +# CONFIG_USB_ULPI_BUS is not set +CONFIG_USB_ARCH_HAS_HCD=y +CONFIG_USB=y +CONFIG_USB_PCI=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y + +# +# Miscellaneous USB options +# +CONFIG_USB_DEFAULT_PERSIST=y +# CONFIG_USB_FEW_INIT_RETRIES is not set +# CONFIG_USB_DYNAMIC_MINORS is not set +# CONFIG_USB_OTG_PRODUCTLIST is not set +# CONFIG_USB_OTG_DISABLE_EXTERNAL_HUB is not set +CONFIG_USB_AUTOSUSPEND_DELAY=2 +# CONFIG_USB_MON is not set + +# +# USB Host Controller Drivers +# +# CONFIG_USB_C67X00_HCD is not set +CONFIG_USB_XHCI_HCD=y +# CONFIG_USB_XHCI_DBGCAP is not set +CONFIG_USB_XHCI_PCI=y +# CONFIG_USB_XHCI_PCI_RENESAS is not set +# CONFIG_USB_XHCI_PLATFORM is not set +# CONFIG_USB_EHCI_HCD is not set +# CONFIG_USB_OXU210HP_HCD is not set +# CONFIG_USB_ISP116X_HCD is not set +# CONFIG_USB_FOTG210_HCD is not set +# CONFIG_USB_MAX3421_HCD is not set +# CONFIG_USB_OHCI_HCD is not set +# CONFIG_USB_UHCI_HCD is not set +# CONFIG_USB_SL811_HCD is not set +# CONFIG_USB_R8A66597_HCD is not set +# CONFIG_USB_HCD_SSB is not set +# CONFIG_USB_HCD_TEST_MODE is not set + +# +# USB Device Class drivers +# +# CONFIG_USB_ACM is not set +# CONFIG_USB_PRINTER is not set +# CONFIG_USB_WDM is not set +# CONFIG_USB_TMC is not set + +# +# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may +# + +# +# also be needed; see USB_STORAGE Help for more info +# +CONFIG_USB_STORAGE=y +# CONFIG_USB_STORAGE_DEBUG is not set +# CONFIG_USB_STORAGE_REALTEK is not set +# CONFIG_USB_STORAGE_DATAFAB is not set +# CONFIG_USB_STORAGE_FREECOM is not set +# CONFIG_USB_STORAGE_ISD200 is not set +# CONFIG_USB_STORAGE_USBAT is not set +# CONFIG_USB_STORAGE_SDDR09 is not set +# CONFIG_USB_STORAGE_SDDR55 is not set +# CONFIG_USB_STORAGE_JUMPSHOT is not set +# CONFIG_USB_STORAGE_ALAUDA is not set +# CONFIG_USB_STORAGE_ONETOUCH is not set +# CONFIG_USB_STORAGE_KARMA is not set +# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set +# CONFIG_USB_STORAGE_ENE_UB6250 is not set +# CONFIG_USB_UAS is not set + +# +# USB Imaging devices +# +# CONFIG_USB_MDC800 is not set +# CONFIG_USB_MICROTEK is not set +# CONFIG_USBIP_CORE is not set +# CONFIG_USB_CDNS3 is not set +# CONFIG_USB_MUSB_HDRC is not set +# CONFIG_USB_DWC3 is not set +# CONFIG_USB_DWC2 is not set +# CONFIG_USB_ISP1760 is not set + +# +# USB port drivers +# +# CONFIG_USB_SERIAL is not set + +# +# USB Miscellaneous drivers +# +# CONFIG_USB_EMI62 is not set +# CONFIG_USB_EMI26 is not set +# CONFIG_USB_ADUTUX is not set +# CONFIG_USB_SEVSEG is not set +# CONFIG_USB_LEGOTOWER is not set +# CONFIG_USB_LCD is not set +# CONFIG_USB_CYPRESS_CY7C63 is not set +# CONFIG_USB_CYTHERM is not set +# CONFIG_USB_IDMOUSE is not set +# CONFIG_USB_FTDI_ELAN is not set +# CONFIG_USB_APPLEDISPLAY is not set +# CONFIG_APPLE_MFI_FASTCHARGE is not set +# CONFIG_USB_LD is not set +# CONFIG_USB_TRANCEVIBRATOR is not set +# CONFIG_USB_IOWARRIOR is not set +# CONFIG_USB_TEST is not set +# CONFIG_USB_EHSET_TEST_FIXTURE is not set +# CONFIG_USB_ISIGHTFW is not set +# CONFIG_USB_YUREX is not set +# CONFIG_USB_EZUSB_FX2 is not set +# CONFIG_USB_HUB_USB251XB is not set +# CONFIG_USB_HSIC_USB3503 is not set +# CONFIG_USB_HSIC_USB4604 is not set +# CONFIG_USB_LINK_LAYER_TEST is not set + +# +# USB Physical Layer drivers +# +# CONFIG_NOP_USB_XCEIV is not set +# CONFIG_USB_ISP1301 is not set +# end of USB Physical Layer drivers + +# CONFIG_USB_GADGET is not set +# CONFIG_TYPEC is not set +# CONFIG_USB_ROLE_SWITCH is not set +# CONFIG_MMC is not set +# CONFIG_MEMSTICK is not set +# CONFIG_NEW_LEDS is not set +# CONFIG_ACCESSIBILITY is not set +CONFIG_INFINIBAND=m +CONFIG_INFINIBAND_USER_MAD=m +CONFIG_INFINIBAND_USER_ACCESS=m +CONFIG_INFINIBAND_USER_MEM=y +CONFIG_INFINIBAND_ON_DEMAND_PAGING=y +CONFIG_INFINIBAND_ADDR_TRANS=y +CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS=y +CONFIG_INFINIBAND_VIRT_DMA=y +CONFIG_INFINIBAND_MTHCA=m +# CONFIG_INFINIBAND_MTHCA_DEBUG is not set +# CONFIG_INFINIBAND_CXGB4 is not set +# CONFIG_INFINIBAND_EFA is not set +# CONFIG_INFINIBAND_I40IW is not set +CONFIG_MLX4_INFINIBAND=m +CONFIG_MLX5_INFINIBAND=m +# CONFIG_INFINIBAND_OCRDMA is not set +# CONFIG_RDMA_RXE is not set +# CONFIG_RDMA_SIW is not set +CONFIG_INFINIBAND_IPOIB=m +CONFIG_INFINIBAND_IPOIB_CM=y +CONFIG_INFINIBAND_IPOIB_DEBUG=y +# CONFIG_INFINIBAND_IPOIB_DEBUG_DATA is not set +# CONFIG_INFINIBAND_SRP is not set +# CONFIG_INFINIBAND_SRPT is not set +# CONFIG_INFINIBAND_ISER is not set +# CONFIG_INFINIBAND_ISERT is not set +# CONFIG_INFINIBAND_RTRS_CLIENT is not set +# CONFIG_INFINIBAND_RTRS_SERVER is not set +CONFIG_RTC_LIB=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_HCTOSYS=y +CONFIG_RTC_HCTOSYS_DEVICE="rtc0" +CONFIG_RTC_SYSTOHC=y +CONFIG_RTC_SYSTOHC_DEVICE="rtc0" +# CONFIG_RTC_DEBUG is not set +# CONFIG_RTC_NVMEM is not set + +# +# RTC interfaces +# +CONFIG_RTC_INTF_SYSFS=y +# CONFIG_RTC_INTF_PROC is not set +CONFIG_RTC_INTF_DEV=y +# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set +# CONFIG_RTC_DRV_TEST is not set + +# +# I2C RTC drivers +# +# CONFIG_RTC_DRV_ABB5ZES3 is not set +# CONFIG_RTC_DRV_ABEOZ9 is not set +# CONFIG_RTC_DRV_ABX80X is not set +# CONFIG_RTC_DRV_DS1307 is not set +# CONFIG_RTC_DRV_DS1374 is not set +# CONFIG_RTC_DRV_DS1672 is not set +# CONFIG_RTC_DRV_HYM8563 is not set +# CONFIG_RTC_DRV_MAX6900 is not set +# CONFIG_RTC_DRV_RS5C372 is not set +# CONFIG_RTC_DRV_ISL1208 is not set +# CONFIG_RTC_DRV_ISL12022 is not set +# CONFIG_RTC_DRV_ISL12026 is not set +# CONFIG_RTC_DRV_X1205 is not set +CONFIG_RTC_DRV_PCF8523=y +# CONFIG_RTC_DRV_PCF85063 is not set +# CONFIG_RTC_DRV_PCF85363 is not set +# CONFIG_RTC_DRV_PCF8563 is not set +# CONFIG_RTC_DRV_PCF8583 is not set +# CONFIG_RTC_DRV_M41T80 is not set +# CONFIG_RTC_DRV_BQ32K is not set +# CONFIG_RTC_DRV_S35390A is not set +# CONFIG_RTC_DRV_FM3130 is not set +# CONFIG_RTC_DRV_RX8010 is not set +# CONFIG_RTC_DRV_RX8581 is not set +# CONFIG_RTC_DRV_RX8025 is not set +# CONFIG_RTC_DRV_EM3027 is not set +# CONFIG_RTC_DRV_RV3028 is not set +# CONFIG_RTC_DRV_RV3032 is not set +# CONFIG_RTC_DRV_RV8803 is not set +# CONFIG_RTC_DRV_SD3078 is not set + +# +# SPI RTC drivers +# +# CONFIG_RTC_DRV_M41T93 is not set +# CONFIG_RTC_DRV_M41T94 is not set +# CONFIG_RTC_DRV_DS1302 is not set +# CONFIG_RTC_DRV_DS1305 is not set +# CONFIG_RTC_DRV_DS1343 is not set +# CONFIG_RTC_DRV_DS1347 is not set +# CONFIG_RTC_DRV_DS1390 is not set +# CONFIG_RTC_DRV_MAX6916 is not set +# CONFIG_RTC_DRV_R9701 is not set +# CONFIG_RTC_DRV_RX4581 is not set +# CONFIG_RTC_DRV_RX6110 is not set +# CONFIG_RTC_DRV_RS5C348 is not set +# CONFIG_RTC_DRV_MAX6902 is not set +# CONFIG_RTC_DRV_PCF2123 is not set +# CONFIG_RTC_DRV_MCP795 is not set +CONFIG_RTC_I2C_AND_SPI=y + +# +# SPI and I2C RTC drivers +# +# CONFIG_RTC_DRV_DS3232 is not set +# CONFIG_RTC_DRV_PCF2127 is not set +# CONFIG_RTC_DRV_RV3029C2 is not set + +# +# Platform RTC drivers +# +CONFIG_RTC_DRV_SW64_VIRT=y +# CONFIG_RTC_DRV_DS1286 is not set +# CONFIG_RTC_DRV_DS1511 is not set +# CONFIG_RTC_DRV_DS1553 is not set +# CONFIG_RTC_DRV_DS1685_FAMILY is not set +# CONFIG_RTC_DRV_DS1742 is not set +# CONFIG_RTC_DRV_DS2404 is not set +# CONFIG_RTC_DRV_EFI is not set +# CONFIG_RTC_DRV_STK17TA8 is not set +# CONFIG_RTC_DRV_M48T86 is not set +# CONFIG_RTC_DRV_M48T35 is not set +# CONFIG_RTC_DRV_M48T59 is not set +# CONFIG_RTC_DRV_MSM6242 is not set +# CONFIG_RTC_DRV_BQ4802 is not set +# CONFIG_RTC_DRV_RP5C01 is not set +# CONFIG_RTC_DRV_V3020 is not set +# CONFIG_RTC_DRV_ZYNQMP is not set + +# +# on-CPU RTC drivers +# +# CONFIG_RTC_DRV_CADENCE is not set +# CONFIG_RTC_DRV_FTRTC010 is not set +# CONFIG_RTC_DRV_R7301 is not set + +# +# HID Sensor RTC drivers +# +# CONFIG_DMADEVICES is not set + +# +# DMABUF options +# +CONFIG_SYNC_FILE=y +# CONFIG_SW_SYNC is not set +# CONFIG_UDMABUF is not set +# CONFIG_DMABUF_MOVE_NOTIFY is not set +# CONFIG_DMABUF_SELFTESTS is not set +# CONFIG_DMABUF_HEAPS is not set +# end of DMABUF options + +# CONFIG_AUXDISPLAY is not set +CONFIG_UIO=y +# CONFIG_UIO_CIF is not set +# CONFIG_UIO_PDRV_GENIRQ is not set +# CONFIG_UIO_DMEM_GENIRQ is not set +# CONFIG_UIO_AEC is not set +# CONFIG_UIO_SERCOS3 is not set +CONFIG_UIO_PCI_GENERIC=m +# CONFIG_UIO_NETX is not set +# CONFIG_UIO_PRUSS is not set +# CONFIG_UIO_MF624 is not set +# CONFIG_VFIO is not set +# CONFIG_VIRT_DRIVERS is not set +CONFIG_VIRTIO=y +CONFIG_VIRTIO_MENU=y +CONFIG_VIRTIO_PCI=y +# CONFIG_VIRTIO_PCI_LEGACY is not set +# CONFIG_VIRTIO_BALLOON is not set +# CONFIG_VIRTIO_INPUT is not set +CONFIG_VIRTIO_MMIO=y +# CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES is not set +CONFIG_VIRTIO_DMA_SHARED_BUFFER=y +# CONFIG_VDPA is not set + +# +# Microsoft Hyper-V guest support +# +# end of Microsoft Hyper-V guest support + +# CONFIG_GREYBUS is not set +CONFIG_STAGING=y +# CONFIG_COMEDI is not set +# CONFIG_RTS5208 is not set +CONFIG_FB_SM750=y +# CONFIG_STAGING_MEDIA is not set + +# +# Android +# +# end of Android + +# CONFIG_STAGING_BOARD is not set +# CONFIG_LTE_GDM724X is not set +# CONFIG_GS_FPGABOOT is not set +# CONFIG_UNISYSSPAR is not set +# CONFIG_COMMON_CLK_XLNX_CLKWZRD is not set +# CONFIG_PI433 is not set + +# +# Gasket devices +# +# end of Gasket devices + +# CONFIG_XIL_AXIS_FIFO is not set +# CONFIG_FIELDBUS_DEV is not set +# CONFIG_KPC2000 is not set +# CONFIG_QLGE is not set +# CONFIG_GOLDFISH is not set +CONFIG_HAVE_CLK=y +CONFIG_CLKDEV_LOOKUP=y +CONFIG_HAVE_CLK_PREPARE=y +CONFIG_COMMON_CLK=y +# CONFIG_COMMON_CLK_MAX9485 is not set +# CONFIG_COMMON_CLK_SI5341 is not set +# CONFIG_COMMON_CLK_SI5351 is not set +# CONFIG_COMMON_CLK_SI514 is not set +# CONFIG_COMMON_CLK_SI544 is not set +# CONFIG_COMMON_CLK_SI570 is not set +# CONFIG_COMMON_CLK_CDCE706 is not set +# CONFIG_COMMON_CLK_CDCE925 is not set +# CONFIG_COMMON_CLK_CS2000_CP is not set +# CONFIG_COMMON_CLK_VC5 is not set +# CONFIG_COMMON_CLK_FIXED_MMIO is not set +# CONFIG_HWSPINLOCK is not set + +# +# Clock Source drivers +# +CONFIG_I8253_LOCK=y +CONFIG_CLKBLD_I8253=y +# CONFIG_MICROCHIP_PIT64B is not set +# end of Clock Source drivers + +# CONFIG_MAILBOX is not set +CONFIG_IOMMU_IOVA=y +CONFIG_IOMMU_API=y +CONFIG_IOMMU_SUPPORT=y + +# +# Generic IOMMU Pagetable Support +# +# end of Generic IOMMU Pagetable Support + +# CONFIG_IOMMU_DEBUGFS is not set +CONFIG_IOMMU_DEFAULT_PASSTHROUGH=y +CONFIG_OF_IOMMU=y +CONFIG_SUNWAY_IOMMU=y + +# +# Remoteproc drivers +# +# CONFIG_REMOTEPROC is not set +# end of Remoteproc drivers + +# +# Rpmsg drivers +# +# CONFIG_RPMSG_VIRTIO is not set +# end of Rpmsg drivers + +# CONFIG_SOUNDWIRE is not set + +# +# SOC (System On Chip) specific Drivers +# + +# +# Amlogic SoC drivers +# +# end of Amlogic SoC drivers + +# +# Aspeed SoC drivers +# +# end of Aspeed SoC drivers + +# +# Broadcom SoC drivers +# +# end of Broadcom SoC drivers + +# +# NXP/Freescale QorIQ SoC drivers +# +# end of NXP/Freescale QorIQ SoC drivers + +# +# i.MX SoC drivers +# +# end of i.MX SoC drivers + +# +# Qualcomm SoC drivers +# +# end of Qualcomm SoC drivers + +# CONFIG_SOC_TI is not set + +# +# Xilinx SoC drivers +# +# CONFIG_XILINX_VCU is not set +# end of Xilinx SoC drivers +# end of SOC (System On Chip) specific Drivers + +# CONFIG_PM_DEVFREQ is not set +# CONFIG_EXTCON is not set +# CONFIG_MEMORY is not set +# CONFIG_IIO is not set +# CONFIG_NTB is not set +# CONFIG_VME_BUS is not set +# CONFIG_PWM is not set + +# +# IRQ chip support +# +CONFIG_SW64_INTC=y +CONFIG_IRQCHIP=y +# CONFIG_AL_FIC is not set +# end of IRQ chip support + +# CONFIG_IPACK_BUS is not set +# CONFIG_RESET_CONTROLLER is not set + +# +# PHY Subsystem +# +# CONFIG_GENERIC_PHY is not set +# CONFIG_BCM_KONA_USB2_PHY is not set +# CONFIG_PHY_CADENCE_TORRENT is not set +# CONFIG_PHY_CADENCE_DPHY is not set +# CONFIG_PHY_CADENCE_SALVO is not set +# CONFIG_PHY_FSL_IMX8MQ_USB is not set +# CONFIG_PHY_MIXEL_MIPI_DPHY is not set +# CONFIG_PHY_PXA_28NM_HSIC is not set +# CONFIG_PHY_PXA_28NM_USB2 is not set +# end of PHY Subsystem + +# CONFIG_POWERCAP is not set +# CONFIG_MCB is not set + +# +# Performance monitor support +# +# end of Performance monitor support + +# CONFIG_RAS is not set +# CONFIG_USB4 is not set + +# +# Android +# +# CONFIG_ANDROID is not set +# end of Android + +# CONFIG_LIBNVDIMM is not set +# CONFIG_DAX is not set +CONFIG_NVMEM=y +CONFIG_NVMEM_SYSFS=y + +# +# HW tracing support +# +# CONFIG_STM is not set +# CONFIG_INTEL_TH is not set +# end of HW tracing support + +# CONFIG_FPGA is not set +# CONFIG_FSI is not set +# CONFIG_SIOX is not set +# CONFIG_SLIMBUS is not set +# CONFIG_INTERCONNECT is not set +# CONFIG_COUNTER is not set +# CONFIG_MOST is not set +# end of Device Drivers + +# +# File systems +# +# CONFIG_VALIDATE_FS_PARSER is not set +CONFIG_FS_IOMAP=y +# CONFIG_EXT2_FS is not set +# CONFIG_EXT3_FS is not set +CONFIG_EXT4_FS=y +CONFIG_EXT4_USE_FOR_EXT2=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y +CONFIG_EXT4_DEBUG=y +CONFIG_JBD2=y +# CONFIG_JBD2_DEBUG is not set +CONFIG_FS_MBCACHE=y +# CONFIG_REISERFS_FS is not set +# CONFIG_JFS_FS is not set +CONFIG_XFS_FS=y +CONFIG_XFS_SUPPORT_V4=y +# CONFIG_XFS_QUOTA is not set +# CONFIG_XFS_POSIX_ACL is not set +# CONFIG_XFS_RT is not set +# CONFIG_XFS_ONLINE_SCRUB is not set +# CONFIG_XFS_WARN is not set +# CONFIG_XFS_DEBUG is not set +CONFIG_GFS2_FS=y +# CONFIG_OCFS2_FS is not set +# CONFIG_BTRFS_FS is not set +# CONFIG_NILFS2_FS is not set +# CONFIG_F2FS_FS is not set +# CONFIG_FS_DAX is not set +CONFIG_FS_POSIX_ACL=y +CONFIG_EXPORTFS=y +CONFIG_EXPORTFS_BLOCK_OPS=y +CONFIG_FILE_LOCKING=y +CONFIG_MANDATORY_FILE_LOCKING=y +# CONFIG_FS_ENCRYPTION is not set +# CONFIG_FS_VERITY is not set +CONFIG_FSNOTIFY=y +CONFIG_DNOTIFY=y +CONFIG_INOTIFY_USER=y +CONFIG_FANOTIFY=y +# CONFIG_FANOTIFY_ACCESS_PERMISSIONS is not set +CONFIG_QUOTA=y +# CONFIG_QUOTA_NETLINK_INTERFACE is not set +CONFIG_PRINT_QUOTA_WARNING=y +# CONFIG_QUOTA_DEBUG is not set +# CONFIG_QFMT_V1 is not set +# CONFIG_QFMT_V2 is not set +CONFIG_QUOTACTL=y +CONFIG_AUTOFS4_FS=y +CONFIG_AUTOFS_FS=y +CONFIG_FUSE_FS=y +# CONFIG_CUSE is not set +# CONFIG_VIRTIO_FS is not set +# CONFIG_OVERLAY_FS is not set + +# +# Caches +# +CONFIG_FSCACHE=y +# CONFIG_FSCACHE_STATS is not set +# CONFIG_FSCACHE_HISTOGRAM is not set +# CONFIG_FSCACHE_DEBUG is not set +# CONFIG_FSCACHE_OBJECT_LIST is not set +# CONFIG_CACHEFILES is not set +# end of Caches + +# +# CD-ROM/DVD Filesystems +# +CONFIG_ISO9660_FS=y +CONFIG_JOLIET=y +CONFIG_ZISOFS=y +CONFIG_UDF_FS=y +# end of CD-ROM/DVD Filesystems + +# +# DOS/FAT/EXFAT/NT Filesystems +# +CONFIG_FAT_FS=y +CONFIG_MSDOS_FS=y +CONFIG_VFAT_FS=y +CONFIG_FAT_DEFAULT_CODEPAGE=437 +CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1" +CONFIG_FAT_DEFAULT_UTF8=y +# CONFIG_EXFAT_FS is not set +CONFIG_NTFS_FS=y +# CONFIG_NTFS_DEBUG is not set +CONFIG_NTFS_RW=y +# end of DOS/FAT/EXFAT/NT Filesystems + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_PROC_KCORE=y +CONFIG_PROC_SYSCTL=y +CONFIG_PROC_PAGE_MONITOR=y +# CONFIG_PROC_CHILDREN is not set +CONFIG_KERNFS=y +CONFIG_SYSFS=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_TMPFS_XATTR=y +# CONFIG_TMPFS_INODE64 is not set +CONFIG_HUGETLBFS=y +CONFIG_HUGETLB_PAGE=y +CONFIG_MEMFD_CREATE=y +CONFIG_CONFIGFS_FS=y +CONFIG_EFIVAR_FS=m +# end of Pseudo filesystems + +# CONFIG_MISC_FILESYSTEMS is not set +CONFIG_NETWORK_FILESYSTEMS=y +CONFIG_NFS_FS=y +CONFIG_NFS_V2=y +CONFIG_NFS_V3=y +CONFIG_NFS_V3_ACL=y +CONFIG_NFS_V4=y +CONFIG_NFS_SWAP=y +CONFIG_NFS_V4_1=y +CONFIG_NFS_V4_2=y +CONFIG_PNFS_FILE_LAYOUT=y +CONFIG_PNFS_BLOCK=m +CONFIG_PNFS_FLEXFILE_LAYOUT=y +CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN="kernel.org" +CONFIG_NFS_V4_1_MIGRATION=y +CONFIG_NFS_V4_SECURITY_LABEL=y +CONFIG_ROOT_NFS=y +CONFIG_NFS_FSCACHE=y +CONFIG_NFS_USE_LEGACY_DNS=y +CONFIG_NFS_DISABLE_UDP_SUPPORT=y +# CONFIG_NFS_V4_2_READ_PLUS is not set +CONFIG_NFSD=m +CONFIG_NFSD_V2_ACL=y +CONFIG_NFSD_V3=y +CONFIG_NFSD_V3_ACL=y +CONFIG_NFSD_V4=y +CONFIG_NFSD_PNFS=y +# CONFIG_NFSD_BLOCKLAYOUT is not set +CONFIG_NFSD_SCSILAYOUT=y +# CONFIG_NFSD_FLEXFILELAYOUT is not set +# CONFIG_NFSD_V4_2_INTER_SSC is not set +CONFIG_NFSD_V4_SECURITY_LABEL=y +CONFIG_GRACE_PERIOD=y +CONFIG_LOCKD=y +CONFIG_LOCKD_V4=y +CONFIG_NFS_ACL_SUPPORT=y +CONFIG_NFS_COMMON=y +CONFIG_SUNRPC=y +CONFIG_SUNRPC_GSS=y +CONFIG_SUNRPC_BACKCHANNEL=y +CONFIG_SUNRPC_SWAP=y +# CONFIG_SUNRPC_DEBUG is not set +CONFIG_SUNRPC_XPRT_RDMA=m +# CONFIG_CEPH_FS is not set +# CONFIG_CIFS is not set +# CONFIG_CODA_FS is not set +# CONFIG_AFS_FS is not set +CONFIG_NLS=y +CONFIG_NLS_DEFAULT="iso8859-1" +CONFIG_NLS_CODEPAGE_437=m +CONFIG_NLS_CODEPAGE_737=m +CONFIG_NLS_CODEPAGE_775=m +CONFIG_NLS_CODEPAGE_850=m +CONFIG_NLS_CODEPAGE_852=m +CONFIG_NLS_CODEPAGE_855=m +CONFIG_NLS_CODEPAGE_857=m +CONFIG_NLS_CODEPAGE_860=m +CONFIG_NLS_CODEPAGE_861=m +CONFIG_NLS_CODEPAGE_862=m +CONFIG_NLS_CODEPAGE_863=m +CONFIG_NLS_CODEPAGE_864=m +CONFIG_NLS_CODEPAGE_865=m +CONFIG_NLS_CODEPAGE_866=m +CONFIG_NLS_CODEPAGE_869=m +CONFIG_NLS_CODEPAGE_936=y +CONFIG_NLS_CODEPAGE_950=y +CONFIG_NLS_CODEPAGE_932=m +CONFIG_NLS_CODEPAGE_949=m +CONFIG_NLS_CODEPAGE_874=m +CONFIG_NLS_ISO8859_8=m +CONFIG_NLS_CODEPAGE_1250=m +CONFIG_NLS_CODEPAGE_1251=m +CONFIG_NLS_ASCII=m +CONFIG_NLS_ISO8859_1=m +CONFIG_NLS_ISO8859_2=m +CONFIG_NLS_ISO8859_3=m +CONFIG_NLS_ISO8859_4=m +CONFIG_NLS_ISO8859_5=m +CONFIG_NLS_ISO8859_6=m +CONFIG_NLS_ISO8859_7=m +CONFIG_NLS_ISO8859_9=m +CONFIG_NLS_ISO8859_13=m +CONFIG_NLS_ISO8859_14=m +CONFIG_NLS_ISO8859_15=m +CONFIG_NLS_KOI8_R=m +CONFIG_NLS_KOI8_U=m +CONFIG_NLS_MAC_ROMAN=m +CONFIG_NLS_MAC_CELTIC=m +CONFIG_NLS_MAC_CENTEURO=m +CONFIG_NLS_MAC_CROATIAN=m +CONFIG_NLS_MAC_CYRILLIC=m +CONFIG_NLS_MAC_GAELIC=m +CONFIG_NLS_MAC_GREEK=m +CONFIG_NLS_MAC_ICELAND=m +CONFIG_NLS_MAC_INUIT=m +CONFIG_NLS_MAC_ROMANIAN=m +CONFIG_NLS_MAC_TURKISH=m +CONFIG_NLS_UTF8=y +# CONFIG_DLM is not set +# CONFIG_UNICODE is not set +CONFIG_IO_WQ=y +# end of File systems + +# +# Security options +# +CONFIG_KEYS=y +# CONFIG_KEYS_REQUEST_CACHE is not set +# CONFIG_PERSISTENT_KEYRINGS is not set +# CONFIG_ENCRYPTED_KEYS is not set +# CONFIG_KEY_DH_OPERATIONS is not set +# CONFIG_SECURITY_DMESG_RESTRICT is not set +CONFIG_SECURITY=y +# CONFIG_SECURITYFS is not set +CONFIG_SECURITY_NETWORK=y +CONFIG_SECURITY_INFINIBAND=y +# CONFIG_SECURITY_NETWORK_XFRM is not set +CONFIG_SECURITY_PATH=y +CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR=y +# CONFIG_HARDENED_USERCOPY is not set +# CONFIG_STATIC_USERMODEHELPER is not set +# CONFIG_SECURITY_SMACK is not set +# CONFIG_SECURITY_TOMOYO is not set +# CONFIG_SECURITY_APPARMOR is not set +# CONFIG_SECURITY_LOADPIN is not set +# CONFIG_SECURITY_YAMA is not set +# CONFIG_SECURITY_SAFESETID is not set +# CONFIG_SECURITY_LOCKDOWN_LSM is not set +CONFIG_INTEGRITY=y +# CONFIG_INTEGRITY_SIGNATURE is not set +# CONFIG_IMA is not set +# CONFIG_EVM is not set +CONFIG_DEFAULT_SECURITY_DAC=y +CONFIG_LSM="lockdown,yama,loadpin,safesetid,integrity,bpf" + +# +# Kernel hardening options +# + +# +# Memory initialization +# +CONFIG_INIT_STACK_NONE=y +# CONFIG_INIT_ON_ALLOC_DEFAULT_ON is not set +# CONFIG_INIT_ON_FREE_DEFAULT_ON is not set +# end of Memory initialization +# end of Kernel hardening options + +# CONFIG_SECURITY_BOOT_INIT is not set +# end of Security options + +CONFIG_XOR_BLOCKS=m +CONFIG_ASYNC_CORE=m +CONFIG_ASYNC_MEMCPY=m +CONFIG_ASYNC_XOR=m +CONFIG_ASYNC_PQ=m +CONFIG_ASYNC_RAID6_RECOV=m +CONFIG_CRYPTO=y + +# +# Crypto core or helper +# +CONFIG_CRYPTO_ALGAPI=y +CONFIG_CRYPTO_ALGAPI2=y +CONFIG_CRYPTO_AEAD=y +CONFIG_CRYPTO_AEAD2=y +CONFIG_CRYPTO_SKCIPHER=y +CONFIG_CRYPTO_SKCIPHER2=y +CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_RNG=y +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_RNG_DEFAULT=y +CONFIG_CRYPTO_AKCIPHER2=y +CONFIG_CRYPTO_KPP2=y +CONFIG_CRYPTO_ACOMP2=y +CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_MANAGER2=y +# CONFIG_CRYPTO_USER is not set +CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y +CONFIG_CRYPTO_GF128MUL=y +CONFIG_CRYPTO_NULL=y +CONFIG_CRYPTO_NULL2=y +# CONFIG_CRYPTO_PCRYPT is not set +# CONFIG_CRYPTO_CRYPTD is not set +CONFIG_CRYPTO_AUTHENC=y +# CONFIG_CRYPTO_TEST is not set + +# +# Public-key cryptography +# +# CONFIG_CRYPTO_RSA is not set +# CONFIG_CRYPTO_DH is not set +# CONFIG_CRYPTO_ECDH is not set +# CONFIG_CRYPTO_ECDSA is not set +# CONFIG_CRYPTO_ECRDSA is not set +# CONFIG_CRYPTO_SM2 is not set +# CONFIG_CRYPTO_CURVE25519 is not set + +# +# Authenticated Encryption with Associated Data +# +# CONFIG_CRYPTO_CCM is not set +CONFIG_CRYPTO_GCM=y +# CONFIG_CRYPTO_CHACHA20POLY1305 is not set +# CONFIG_CRYPTO_AEGIS128 is not set +CONFIG_CRYPTO_SEQIV=y +CONFIG_CRYPTO_ECHAINIV=y + +# +# Block modes +# +CONFIG_CRYPTO_CBC=y +# CONFIG_CRYPTO_CFB is not set +CONFIG_CRYPTO_CTR=y +# CONFIG_CRYPTO_CTS is not set +# CONFIG_CRYPTO_ECB is not set +# CONFIG_CRYPTO_LRW is not set +# CONFIG_CRYPTO_OFB is not set +# CONFIG_CRYPTO_PCBC is not set +# CONFIG_CRYPTO_XTS is not set +# CONFIG_CRYPTO_KEYWRAP is not set +# CONFIG_CRYPTO_ADIANTUM is not set +CONFIG_CRYPTO_ESSIV=m + +# +# Hash modes +# +# CONFIG_CRYPTO_CMAC is not set +CONFIG_CRYPTO_HMAC=y +# CONFIG_CRYPTO_XCBC is not set +# CONFIG_CRYPTO_VMAC is not set + +# +# Digest +# +CONFIG_CRYPTO_CRC32C=y +# CONFIG_CRYPTO_CRC32 is not set +# CONFIG_CRYPTO_XXHASH is not set +# CONFIG_CRYPTO_BLAKE2B is not set +# CONFIG_CRYPTO_BLAKE2S is not set +CONFIG_CRYPTO_CRCT10DIF=y +CONFIG_CRYPTO_GHASH=y +# CONFIG_CRYPTO_POLY1305 is not set +# CONFIG_CRYPTO_MD4 is not set +CONFIG_CRYPTO_MD5=y +# CONFIG_CRYPTO_MICHAEL_MIC is not set +# CONFIG_CRYPTO_RMD128 is not set +# CONFIG_CRYPTO_RMD160 is not set +# CONFIG_CRYPTO_RMD256 is not set +# CONFIG_CRYPTO_RMD320 is not set +CONFIG_CRYPTO_SHA1=y +CONFIG_CRYPTO_SHA256=y +# CONFIG_CRYPTO_SHA512 is not set +# CONFIG_CRYPTO_SHA3 is not set +# CONFIG_CRYPTO_SM3 is not set +# CONFIG_CRYPTO_STREEBOG is not set +# CONFIG_CRYPTO_TGR192 is not set +# CONFIG_CRYPTO_WP512 is not set + +# +# Ciphers +# +CONFIG_CRYPTO_AES=y +# CONFIG_CRYPTO_AES_TI is not set +# CONFIG_CRYPTO_BLOWFISH is not set +# CONFIG_CRYPTO_CAMELLIA is not set +# CONFIG_CRYPTO_CAST5 is not set +# CONFIG_CRYPTO_CAST6 is not set +CONFIG_CRYPTO_DES=y +# CONFIG_CRYPTO_FCRYPT is not set +# CONFIG_CRYPTO_SALSA20 is not set +# CONFIG_CRYPTO_CHACHA20 is not set +# CONFIG_CRYPTO_SERPENT is not set +# CONFIG_CRYPTO_SM4 is not set +# CONFIG_CRYPTO_TWOFISH is not set + +# +# Compression +# +CONFIG_CRYPTO_DEFLATE=y +CONFIG_CRYPTO_LZO=y +# CONFIG_CRYPTO_842 is not set +# CONFIG_CRYPTO_LZ4 is not set +# CONFIG_CRYPTO_LZ4HC is not set +# CONFIG_CRYPTO_ZSTD is not set + +# +# Random Number Generation +# +# CONFIG_CRYPTO_ANSI_CPRNG is not set +CONFIG_CRYPTO_DRBG_MENU=y +CONFIG_CRYPTO_DRBG_HMAC=y +# CONFIG_CRYPTO_DRBG_HASH is not set +# CONFIG_CRYPTO_DRBG_CTR is not set +CONFIG_CRYPTO_DRBG=y +CONFIG_CRYPTO_JITTERENTROPY=y +# CONFIG_CRYPTO_USER_API_HASH is not set +# CONFIG_CRYPTO_USER_API_SKCIPHER is not set +# CONFIG_CRYPTO_USER_API_RNG is not set +# CONFIG_CRYPTO_USER_API_AEAD is not set + +# +# Crypto library routines +# +CONFIG_CRYPTO_LIB_AES=y +# CONFIG_CRYPTO_LIB_BLAKE2S is not set +# CONFIG_CRYPTO_LIB_CHACHA is not set +# CONFIG_CRYPTO_LIB_CURVE25519 is not set +CONFIG_CRYPTO_LIB_DES=y +CONFIG_CRYPTO_LIB_POLY1305_RSIZE=1 +# CONFIG_CRYPTO_LIB_POLY1305 is not set +# CONFIG_CRYPTO_LIB_CHACHA20POLY1305 is not set +CONFIG_CRYPTO_LIB_SHA256=y +# CONFIG_CRYPTO_HW is not set +# CONFIG_ASYMMETRIC_KEY_TYPE is not set + +# +# Certificates for signature checking +# +# CONFIG_SYSTEM_BLACKLIST_KEYRING is not set +# CONFIG_PGP_PRELOAD_PUBLIC_KEYS is not set +# end of Certificates for signature checking + +# +# Library routines +# +CONFIG_RAID6_PQ=m +CONFIG_RAID6_PQ_BENCHMARK=y +# CONFIG_PACKING is not set +CONFIG_BITREVERSE=y +CONFIG_GENERIC_STRNCPY_FROM_USER=y +CONFIG_GENERIC_STRNLEN_USER=y +CONFIG_GENERIC_NET_UTILS=y +# CONFIG_CORDIC is not set +# CONFIG_PRIME_NUMBERS is not set +CONFIG_RATIONAL=y +CONFIG_GENERIC_PCI_IOMAP=y +CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y +# CONFIG_CRC_CCITT is not set +CONFIG_CRC16=y +CONFIG_CRC_T10DIF=y +CONFIG_CRC_ITU_T=y +CONFIG_CRC32=y +# CONFIG_CRC32_SELFTEST is not set +CONFIG_CRC32_SLICEBY8=y +# CONFIG_CRC32_SLICEBY4 is not set +# CONFIG_CRC32_SARWATE is not set +# CONFIG_CRC32_BIT is not set +CONFIG_CRC64=m +# CONFIG_CRC4 is not set +# CONFIG_CRC7 is not set +CONFIG_LIBCRC32C=y +# CONFIG_CRC8 is not set +CONFIG_XXHASH=y +# CONFIG_RANDOM32_SELFTEST is not set +CONFIG_ZLIB_INFLATE=y +CONFIG_ZLIB_DEFLATE=y +CONFIG_LZO_COMPRESS=y +CONFIG_LZO_DECOMPRESS=y +CONFIG_LZ4_DECOMPRESS=y +CONFIG_ZSTD_DECOMPRESS=y +CONFIG_XZ_DEC=y +CONFIG_XZ_DEC_X86=y +CONFIG_XZ_DEC_POWERPC=y +CONFIG_XZ_DEC_IA64=y +CONFIG_XZ_DEC_ARM=y +CONFIG_XZ_DEC_ARMTHUMB=y +CONFIG_XZ_DEC_SPARC=y +CONFIG_XZ_DEC_BCJ=y +# CONFIG_XZ_DEC_TEST is not set +CONFIG_DECOMPRESS_GZIP=y +CONFIG_DECOMPRESS_BZIP2=y +CONFIG_DECOMPRESS_LZMA=y +CONFIG_DECOMPRESS_XZ=y +CONFIG_DECOMPRESS_LZO=y +CONFIG_DECOMPRESS_LZ4=y +CONFIG_DECOMPRESS_ZSTD=y +CONFIG_GENERIC_ALLOCATOR=y +CONFIG_REED_SOLOMON=m +CONFIG_REED_SOLOMON_DEC8=y +CONFIG_TEXTSEARCH=y +CONFIG_TEXTSEARCH_KMP=m +CONFIG_TEXTSEARCH_BM=m +CONFIG_TEXTSEARCH_FSM=m +CONFIG_INTERVAL_TREE=y +CONFIG_XARRAY_MULTI=y +CONFIG_ASSOCIATIVE_ARRAY=y +CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT_MAP=y +CONFIG_HAS_DMA=y +CONFIG_DMA_OPS=y +CONFIG_ARCH_DMA_ADDR_T_64BIT=y +CONFIG_DMA_DECLARE_COHERENT=y +# CONFIG_DMA_API_DEBUG is not set +CONFIG_SGL_ALLOC=y +CONFIG_CPU_RMAP=y +CONFIG_DQL=y +CONFIG_GLOB=y +# CONFIG_GLOB_SELFTEST is not set +CONFIG_NLATTR=y +CONFIG_IRQ_POLL=y +CONFIG_DIMLIB=y +CONFIG_LIBFDT=y +CONFIG_OID_REGISTRY=y +CONFIG_UCS2_STRING=y +CONFIG_FONT_SUPPORT=y +# CONFIG_FONTS is not set +CONFIG_FONT_8x8=y +CONFIG_FONT_8x16=y +CONFIG_SG_POOL=y +CONFIG_SBITMAP=y +# CONFIG_STRING_SELFTEST is not set +# end of Library routines + +# +# Kernel hacking +# + +# +# printk and dmesg options +# +# CONFIG_PRINTK_TIME is not set +# CONFIG_PRINTK_CALLER is not set +CONFIG_CONSOLE_LOGLEVEL_DEFAULT=7 +CONFIG_CONSOLE_LOGLEVEL_QUIET=7 +CONFIG_MESSAGE_LOGLEVEL_DEFAULT=4 +# CONFIG_DYNAMIC_DEBUG is not set +# CONFIG_DYNAMIC_DEBUG_CORE is not set +CONFIG_SYMBOLIC_ERRNAME=y +# end of printk and dmesg options + +# +# Compile-time checks and compiler options +# +# CONFIG_DEBUG_INFO is not set +# CONFIG_ENABLE_MUST_CHECK is not set +CONFIG_FRAME_WARN=2048 +# CONFIG_STRIP_ASM_SYMS is not set +# CONFIG_READABLE_ASM is not set +# CONFIG_HEADERS_INSTALL is not set +# CONFIG_OPTIMIZE_INLINING is not set +# CONFIG_DEBUG_SECTION_MISMATCH is not set +CONFIG_SECTION_MISMATCH_WARN_ONLY=y +# CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_32B is not set +CONFIG_ARCH_WANT_FRAME_POINTERS=y +CONFIG_FRAME_POINTER=y +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set +# end of Compile-time checks and compiler options + +# +# Generic Kernel Debugging Instruments +# +# CONFIG_MAGIC_SYSRQ is not set +CONFIG_DEBUG_FS=y +CONFIG_DEBUG_FS_ALLOW_ALL=y +# CONFIG_DEBUG_FS_DISALLOW_MOUNT is not set +# CONFIG_DEBUG_FS_ALLOW_NONE is not set +CONFIG_HAVE_ARCH_KGDB=y +# CONFIG_KGDB is not set +# CONFIG_UBSAN is not set +# end of Generic Kernel Debugging Instruments + +CONFIG_DEBUG_KERNEL=y +CONFIG_DEBUG_MISC=y + +# +# Memory Debugging +# +# CONFIG_PAGE_EXTENSION is not set +# CONFIG_DEBUG_PAGEALLOC is not set +# CONFIG_PAGE_OWNER is not set +# CONFIG_PAGE_POISONING is not set +# CONFIG_DEBUG_OBJECTS is not set +# CONFIG_SLUB_DEBUG_ON is not set +# CONFIG_SLUB_STATS is not set +# CONFIG_DEBUG_STACK_USAGE is not set +# CONFIG_SCHED_STACK_END_CHECK is not set +# CONFIG_DEBUG_VM is not set +# CONFIG_DEBUG_MEMORY_INIT is not set +# CONFIG_DEBUG_PER_CPU_MAPS is not set +# end of Memory Debugging + +# CONFIG_DEBUG_SHIRQ is not set + +# +# Debug Oops, Lockups and Hangs +# +# CONFIG_PANIC_ON_OOPS is not set +CONFIG_PANIC_ON_OOPS_VALUE=0 +CONFIG_PANIC_TIMEOUT=0 +# CONFIG_SOFTLOCKUP_DETECTOR is not set +# CONFIG_DETECT_HUNG_TASK is not set +# CONFIG_WQ_WATCHDOG is not set +# CONFIG_TEST_LOCKUP is not set +# end of Debug Oops, Lockups and Hangs + +# +# Scheduler Debugging +# +CONFIG_SCHED_DEBUG=y +# CONFIG_SCHEDSTATS is not set +# end of Scheduler Debugging + +# CONFIG_DEBUG_TIMEKEEPING is not set + +# +# Lock Debugging (spinlocks, mutexes, etc...) +# +CONFIG_LOCK_DEBUGGING_SUPPORT=y +# CONFIG_PROVE_LOCKING is not set +# CONFIG_LOCK_STAT is not set +# CONFIG_DEBUG_RT_MUTEXES is not set +# CONFIG_DEBUG_SPINLOCK is not set +# CONFIG_DEBUG_MUTEXES is not set +# CONFIG_DEBUG_WW_MUTEX_SLOWPATH is not set +# CONFIG_DEBUG_RWSEMS is not set +# CONFIG_DEBUG_LOCK_ALLOC is not set +# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set +# CONFIG_LOCK_TORTURE_TEST is not set +# CONFIG_WW_MUTEX_SELFTEST is not set +# CONFIG_SCF_TORTURE_TEST is not set +# CONFIG_CSD_LOCK_WAIT_DEBUG is not set +# end of Lock Debugging (spinlocks, mutexes, etc...) + +CONFIG_STACKTRACE=y +# CONFIG_WARN_ALL_UNSEEDED_RANDOM is not set +# CONFIG_DEBUG_KOBJECT is not set + +# +# Debug kernel data structures +# +# CONFIG_DEBUG_LIST is not set +# CONFIG_DEBUG_PLIST is not set +# CONFIG_DEBUG_SG is not set +# CONFIG_DEBUG_NOTIFIERS is not set +# CONFIG_BUG_ON_DATA_CORRUPTION is not set +# end of Debug kernel data structures + +# CONFIG_DEBUG_CREDENTIALS is not set + +# +# RCU Debugging +# +# CONFIG_RCU_SCALE_TEST is not set +# CONFIG_RCU_TORTURE_TEST is not set +# CONFIG_RCU_REF_SCALE_TEST is not set +CONFIG_RCU_CPU_STALL_TIMEOUT=21 +# CONFIG_RCU_TRACE is not set +# CONFIG_RCU_EQS_DEBUG is not set +# end of RCU Debugging + +# CONFIG_DEBUG_WQ_FORCE_RR_CPU is not set +# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set +# CONFIG_CPU_HOTPLUG_STATE_CONTROL is not set +# CONFIG_LATENCYTOP is not set +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_HAVE_DYNAMIC_FTRACE=y +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y +CONFIG_HAVE_SYSCALL_TRACEPOINTS=y +CONFIG_HAVE_C_RECORDMCOUNT=y +CONFIG_TRACING_SUPPORT=y +# CONFIG_FTRACE is not set +# CONFIG_SAMPLES is not set + +# +# sw_64 Debugging +# +CONFIG_EARLY_PRINTK=y +# CONFIG_UNA_PRINT is not set +CONFIG_MATHEMU=y +CONFIG_STACKTRACE_SUPPORT=y +# CONFIG_SW64_RRU is not set +# CONFIG_SW64_RRK is not set +# end of sw_64 Debugging + +# +# Kernel Testing and Coverage +# +# CONFIG_KUNIT is not set +# CONFIG_NOTIFIER_ERROR_INJECTION is not set +# CONFIG_FAULT_INJECTION is not set +CONFIG_CC_HAS_SANCOV_TRACE_PC=y +CONFIG_RUNTIME_TESTING_MENU=y +# CONFIG_LKDTM is not set +# CONFIG_TEST_LIST_SORT is not set +# CONFIG_TEST_MIN_HEAP is not set +# CONFIG_TEST_SORT is not set +# CONFIG_BACKTRACE_SELF_TEST is not set +# CONFIG_RBTREE_TEST is not set +# CONFIG_REED_SOLOMON_TEST is not set +# CONFIG_INTERVAL_TREE_TEST is not set +# CONFIG_PERCPU_TEST is not set +# CONFIG_ATOMIC64_SELFTEST is not set +# CONFIG_ASYNC_RAID6_TEST is not set +# CONFIG_TEST_HEXDUMP is not set +# CONFIG_TEST_STRING_HELPERS is not set +# CONFIG_TEST_STRSCPY is not set +# CONFIG_TEST_KSTRTOX is not set +# CONFIG_TEST_PRINTF is not set +# CONFIG_TEST_BITMAP is not set +# CONFIG_TEST_UUID is not set +# CONFIG_TEST_XARRAY is not set +# CONFIG_TEST_OVERFLOW is not set +# CONFIG_TEST_RHASHTABLE is not set +# CONFIG_TEST_HASH is not set +# CONFIG_TEST_IDA is not set +# CONFIG_TEST_LKM is not set +# CONFIG_TEST_BITOPS is not set +# CONFIG_TEST_VMALLOC is not set +# CONFIG_TEST_USER_COPY is not set +# CONFIG_TEST_BPF is not set +# CONFIG_TEST_BLACKHOLE_DEV is not set +# CONFIG_FIND_BIT_BENCHMARK is not set +# CONFIG_TEST_FIRMWARE is not set +# CONFIG_TEST_SYSCTL is not set +# CONFIG_TEST_UDELAY is not set +# CONFIG_TEST_STATIC_KEYS is not set +# CONFIG_TEST_KMOD is not set +# CONFIG_TEST_MEMCAT_P is not set +# CONFIG_TEST_STACKINIT is not set +# CONFIG_TEST_MEMINIT is not set +# CONFIG_TEST_FREE_PAGES is not set +# CONFIG_MEMTEST is not set +# end of Kernel Testing and Coverage +# end of Kernel hacking + +CONFIG_KABI_SIZE_ALIGN_CHECKS=y +CONFIG_KABI_RESERVE=y diff --git a/arch/sw_64/defconfig b/arch/sw_64/defconfig deleted file mode 100644 index d641ca0c108aa4c590f393b64d8bbadf961a7b35..0000000000000000000000000000000000000000 --- a/arch/sw_64/defconfig +++ /dev/null @@ -1,73 +0,0 @@ -CONFIG_EXPERIMENTAL=y -CONFIG_SYSVIPC=y -CONFIG_POSIX_MQUEUE=y -CONFIG_LOG_BUF_SHIFT=14 -# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set -CONFIG_KALLSYMS_ALL=y -CONFIG_MODULES=y -CONFIG_MODULE_UNLOAD=y -CONFIG_VERBOSE_MCHECK=y -CONFIG_NET=y -CONFIG_PACKET=y -CONFIG_UNIX=y -CONFIG_XFRM_USER=m -CONFIG_NET_KEY=m -CONFIG_INET=y -CONFIG_IP_MULTICAST=y -CONFIG_INET_AH=m -CONFIG_INET_ESP=m -# CONFIG_IPV6 is not set -CONFIG_NETFILTER=y -CONFIG_IP_NF_IPTABLES=m -CONFIG_IP_NF_FILTER=m -CONFIG_VLAN_8021Q=m -CONFIG_PNP=y -CONFIG_ISAPNP=y -CONFIG_BLK_DEV_FD=y -CONFIG_BLK_DEV_LOOP=m -CONFIG_IDE=y -CONFIG_BLK_DEV_IDECD=y -CONFIG_IDE_GENERIC=y -CONFIG_BLK_DEV_GENERIC=y -CONFIG_BLK_DEV_ALI15X3=y -CONFIG_BLK_DEV_CMD64X=y -CONFIG_BLK_DEV_CY82C693=y -CONFIG_SCSI=y -CONFIG_BLK_DEV_SD=y -CONFIG_BLK_DEV_SR=y -CONFIG_BLK_DEV_SR_VENDOR=y -CONFIG_SCSI_AIC7XXX=m -CONFIG_AIC7XXX_CMDS_PER_DEVICE=253 -# CONFIG_AIC7XXX_DEBUG_ENABLE is not set -CONFIG_NETDEVICES=y -CONFIG_DUMMY=m -CONFIG_NET_ETHERNET=y -CONFIG_NET_VENDOR_3COM=y -CONFIG_VORTEX=y -CONFIG_NET_TULIP=y -CONFIG_DE2104X=m -CONFIG_TULIP=y -CONFIG_TULIP_MMIO=y -CONFIG_NET_PCI=y -CONFIG_YELLOWFIN=y -CONFIG_SERIAL_8250=y -CONFIG_SERIAL_8250_CONSOLE=y -CONFIG_RTC=y -CONFIG_EXT2_FS=y -CONFIG_REISERFS_FS=m -CONFIG_ISO9660_FS=y -CONFIG_MSDOS_FS=y -CONFIG_VFAT_FS=y -CONFIG_PROC_KCORE=y -CONFIG_TMPFS=y -CONFIG_NFS_FS=m -CONFIG_NFS_V3=y -CONFIG_NFSD=m -CONFIG_NFSD_V3=y -CONFIG_NLS_CODEPAGE_437=y -CONFIG_MAGIC_SYSRQ=y -CONFIG_DEBUG_KERNEL=y -CONFIG_DEBUG_INFO=y -CONFIG_SW64_LEGACY_START_ADDRESS=y -CONFIG_MATHEMU=y -CONFIG_CRYPTO_HMAC=y diff --git a/arch/sw_64/include/asm/Kbuild b/arch/sw_64/include/asm/Kbuild index ab266af1a06d6a7f0a535a57b4c617bba78e11be..e276ba366e6890ee1543eb1fc50d2acccd15a592 100644 --- a/arch/sw_64/include/asm/Kbuild +++ b/arch/sw_64/include/asm/Kbuild @@ -20,3 +20,4 @@ generic-y += qspinlock.h generic-y += mcs_spinlock.h generic-y += clkdev.h generic-y += scatterlist.h +generic-y += user.h diff --git a/arch/sw_64/include/asm/a.out-core.h b/arch/sw_64/include/asm/a.out-core.h deleted file mode 100644 index 39dc16142955da86a69b4ac2ffa37a707957d668..0000000000000000000000000000000000000000 --- a/arch/sw_64/include/asm/a.out-core.h +++ /dev/null @@ -1,80 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* a.out coredump register dumper - * - * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. - * Written by David Howells (dhowells@redhat.com) - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public Licence - * as published by the Free Software Foundation; either version - * 2 of the Licence, or (at your option) any later version. - */ - -#ifndef _ASM_SW64_OUT_CORE_H -#define _ASM_SW64_OUT_CORE_H - -#ifdef __KERNEL__ - -#include - -/* - * Fill in the user structure for an ECOFF core dump. - */ -static inline void aout_dump_thread(struct pt_regs *pt, struct user *dump) -{ - /* switch stack follows right below pt_regs: */ - struct switch_stack *sw = ((struct switch_stack *) pt) - 1; - - dump->magic = CMAGIC; - dump->start_code = current->mm->start_code; - dump->start_data = current->mm->start_data; - dump->start_stack = rdusp() & ~(PAGE_SIZE - 1); - dump->u_tsize = ((current->mm->end_code - dump->start_code) - >> PAGE_SHIFT); - dump->u_dsize = ((current->mm->brk + PAGE_SIZE - 1 - dump->start_data) - >> PAGE_SHIFT); - dump->u_ssize = (current->mm->start_stack - dump->start_stack - + PAGE_SIZE - 1) >> PAGE_SHIFT; - - /* - * We store the registers in an order/format that makes life easier - * for gdb. - */ - dump->regs[EF_V0] = pt->r0; - dump->regs[EF_T0] = pt->r1; - dump->regs[EF_T1] = pt->r2; - dump->regs[EF_T2] = pt->r3; - dump->regs[EF_T3] = pt->r4; - dump->regs[EF_T4] = pt->r5; - dump->regs[EF_T5] = pt->r6; - dump->regs[EF_T6] = pt->r7; - dump->regs[EF_T7] = pt->r8; - dump->regs[EF_S0] = sw->r9; - dump->regs[EF_S1] = sw->r10; - dump->regs[EF_S2] = sw->r11; - dump->regs[EF_S3] = sw->r12; - dump->regs[EF_S4] = sw->r13; - dump->regs[EF_S5] = sw->r14; - dump->regs[EF_S6] = sw->r15; - dump->regs[EF_A3] = pt->r19; - dump->regs[EF_A4] = pt->r20; - dump->regs[EF_A5] = pt->r21; - dump->regs[EF_T8] = pt->r22; - dump->regs[EF_T9] = pt->r23; - dump->regs[EF_T10] = pt->r24; - dump->regs[EF_T11] = pt->r25; - dump->regs[EF_RA] = pt->r26; - dump->regs[EF_T12] = pt->r27; - dump->regs[EF_AT] = pt->r28; - dump->regs[EF_SP] = rdusp(); - dump->regs[EF_PS] = pt->ps; - dump->regs[EF_PC] = pt->pc; - dump->regs[EF_GP] = pt->gp; - dump->regs[EF_A0] = pt->r16; - dump->regs[EF_A1] = pt->r17; - dump->regs[EF_A2] = pt->r18; - memcpy((char *)dump->regs + EF_SIZE, sw->fp, 32 * 8); -} - -#endif /* __KERNEL__ */ -#endif /* _ASM_SW64_OUT_CORE_H */ diff --git a/arch/sw_64/include/asm/a.out.h b/arch/sw_64/include/asm/a.out.h deleted file mode 100644 index 4f2004a7fa8e0d71e75673ee1621210677d58d64..0000000000000000000000000000000000000000 --- a/arch/sw_64/include/asm/a.out.h +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _ASM_SW64_A_OUT_H -#define _ASM_SW64_A_OUT_H - -#include - -/* Assume that start addresses below 4G belong to a TASO application. - * Unfortunately, there is no proper bit in the exec header to check. - * Worse, we have to notice the start address before swapping to use - * /sbin/loader, which of course is _not_ a TASO application. - */ -#define SET_AOUT_PERSONALITY(BFPM, EX) \ - set_personality(((BFPM->taso || EX.ah.entry < 0x100000000L \ - ? ADDR_LIMIT_32BIT : 0) | PER_OSF4)) - -#endif /* _ASM_SW64_A_OUT_H */ diff --git a/arch/sw_64/include/asm/checksum.h b/arch/sw_64/include/asm/checksum.h index 0bb933350dc67b3a62d5e4adef7cff3ed7213ced..284c1678f51ea084f71ee6a3747ce49854c512eb 100644 --- a/arch/sw_64/include/asm/checksum.h +++ b/arch/sw_64/include/asm/checksum.h @@ -4,9 +4,33 @@ #include +#define extll(x, y, z) \ + ({__asm__ __volatile__("extll %1, %2, %0" : "=r" (z) \ + : "r" (x), "r" (y)); }) + +#define exthl(x, y, z) \ + ({__asm__ __volatile__("exthl %1, %2, %0" : "=r" (z) \ + : "r" (x), "r" (y)); }) + +#define maskll(x, y, z) \ + ({__asm__ __volatile__("maskll %1, %2, %0" : "=r" (z) \ + : "r" (x), "r" (y)); }) + +#define maskhl(x, y, z) \ + ({__asm__ __volatile__("maskhl %1, %2, %0" : "=r" (z) \ + : "r" (x), "r" (y)); }) + +#define insll(x, y, z) \ + ({__asm__ __volatile__("insll %1, %2, %0" : "=r" (z) \ + : "r" (x), "r" (y)); }) + +#define inshl(x, y, z) \ + ({__asm__ __volatile__("inshl %1, %2, %0" : "=r" (z) \ + : "r" (x), "r" (y)); }) + /* - * This is a version of ip_compute_csum() optimized for IP headers, - * which always checksum on 4 octet boundaries. + * This is a version of ip_compute_csum() optimized for IP headers, + * which always checksum on 4 octet boundaries. */ extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl); @@ -55,7 +79,7 @@ __wsum csum_partial_copy_nocheck(const void *src, void *dst, int len); extern __sum16 ip_compute_csum(const void *buff, int len); /* - * Fold a partial checksum without adding pseudo headers + * Fold a partial checksum without adding pseudo headers */ static inline __sum16 csum_fold(__wsum csum) @@ -71,4 +95,32 @@ static inline __sum16 csum_fold(__wsum csum) extern __sum16 csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr, __u32 len, __u8 proto, __wsum sum); + +static inline unsigned short from64to16(unsigned long x) +{ + /* + * Using extract instructions is a bit more efficient + * than the original shift/bitmask version. + */ + + union { + unsigned long ul; + unsigned int ui[2]; + unsigned short us[4]; + } in_v, tmp_v, out_v; + + in_v.ul = x; + tmp_v.ul = (unsigned long)in_v.ui[0] + (unsigned long)in_v.ui[1]; + + /* + * Since the bits of tmp_v.sh[3] are going to always be zero, + * we don't have to bother to add that in. + */ + out_v.ul = (unsigned long)tmp_v.us[0] + (unsigned long)tmp_v.us[1] + + (unsigned long)tmp_v.us[2]; + + /* Similarly, out_v.us[2] is always zero for the final add. */ + return out_v.us[0] + out_v.us[1]; +} + #endif diff --git a/arch/sw_64/include/asm/chip3_io.h b/arch/sw_64/include/asm/chip3_io.h index 1028842f7a817bce1be5b4ab67870384aac72826..14d02c080607403805e357207ec8fa1de9eb6376 100644 --- a/arch/sw_64/include/asm/chip3_io.h +++ b/arch/sw_64/include/asm/chip3_io.h @@ -19,7 +19,6 @@ #define PCI_LEGACY_IO (0x1UL << 32) #define PCI_LEGACY_IO_SIZE (0x100000000UL) #define PCI_MEM_UNPRE 0x0UL -#define PCI_32BIT_VT_MEMIO (0xc0000000UL) #define PCI_32BIT_MEMIO (0xe0000000UL) #define PCI_32BIT_MEMIO_SIZE (0x20000000UL) #define PCI_64BIT_MEMIO (0x1UL << 39) @@ -70,6 +69,9 @@ #define DLI_PHY_CTL (0x10UL << 24) #define PCI_VT_LEGACY_IO (IO_BASE | PCI_BASE | PCI_LEGACY_IO) +#define PME_ENABLE_INTD_CORE0 (0x1UL << 62 | 0x1UL << 10) +#define AER_ENABLE_INTD_CORE0 (0x1UL << 62 | 0x1UL << 10) + /*-----------------------addr-----------------------*/ /* CAB0 REG */ enum { diff --git a/arch/sw_64/include/asm/clock.h b/arch/sw_64/include/asm/clock.h new file mode 100644 index 0000000000000000000000000000000000000000..06ad4bcd6ad3f2599a15a8acfc18dd08eb3685e8 --- /dev/null +++ b/arch/sw_64/include/asm/clock.h @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _ASM_SW64_CLOCK_H +#define _ASM_SW64_CLOCK_H + +#include +#include +#include +#include +#include + +struct clk; + +extern struct cpufreq_frequency_table sw64_clockmod_table[]; + +extern char curruent_policy[CPUFREQ_NAME_LEN]; + +struct clk_ops { + void (*init)(struct clk *clk); + void (*enable)(struct clk *clk); + void (*disable)(struct clk *clk); + void (*recalc)(struct clk *clk); + int (*set_rate)(struct clk *clk, unsigned long rate, int algo_id); + long (*round_rate)(struct clk *clk, unsigned long rate); +}; + +struct clk { + struct list_head node; + const char *name; + int id; + struct module *owner; + + struct clk *parent; + const struct clk_ops *ops; + + struct kref kref; + + unsigned long rate; + unsigned long flags; +}; + +#define CLK_ALWAYS_ENABLED (1 << 0) +#define CLK_RATE_PROPAGATES (1 << 1) + +int clk_init(void); + +int sw64_set_rate(int index, unsigned long rate); + +struct clk *sw64_clk_get(struct device *dev, const char *id); + +unsigned long sw64_clk_get_rate(struct clk *clk); + +void sw64_update_clockevents(unsigned long cpu, u32 freq); + +void sw64_store_policy(struct cpufreq_policy *policy); +#endif /* _ASM_SW64_CLOCK_H */ diff --git a/arch/sw_64/include/asm/cputime.h b/arch/sw_64/include/asm/cputime.h index bada5a01d887b7aa58bf9c1349197bc5ba206934..cdd46b05e22840bbbe033ca200951269afa0b98f 100644 --- a/arch/sw_64/include/asm/cputime.h +++ b/arch/sw_64/include/asm/cputime.h @@ -2,6 +2,8 @@ #ifndef _ASM_SW64_CPUTIME_H #define _ASM_SW64_CPUTIME_H -#include +typedef u64 __nocast cputime64_t; + +#define jiffies64_to_cputime64(__jif) ((__force cputime64_t)(__jif)) #endif /* _ASM_SW64_CPUTIME_H */ diff --git a/arch/sw_64/include/asm/early_ioremap.h b/arch/sw_64/include/asm/early_ioremap.h index 6f6fc6218cb38562da1469cc677687ac1eddf634..930c6bf36ad3c69a0be9dbe175649fc17d780d84 100644 --- a/arch/sw_64/include/asm/early_ioremap.h +++ b/arch/sw_64/include/asm/early_ioremap.h @@ -14,7 +14,7 @@ early_ioremap(unsigned long phys_addr, unsigned long size) y = (unsigned long) phys_to_virt(__pa(phys_addr)); } else { y = phys_addr; - y += PAGE_OFFSET; + y |= PAGE_OFFSET; } return (void __iomem *) y; diff --git a/arch/sw_64/include/asm/hw_init.h b/arch/sw_64/include/asm/hw_init.h index 9a56590ef653e4d7107f0f581da5e25d1187f455..f60a58570a9219c5d796d0383843b6c62d1eb93b 100644 --- a/arch/sw_64/include/asm/hw_init.h +++ b/arch/sw_64/include/asm/hw_init.h @@ -2,6 +2,7 @@ #ifndef _ASM_SW64_HW_INIT_H #define _ASM_SW64_HW_INIT_H #include +#include #define MMSIZE __va(0x2040) @@ -96,26 +97,17 @@ static inline bool icache_is_vivt_no_ictag(void) return (cpu_desc.arch_var == 0x3 && cpu_desc.arch_rev == 0x1); } -enum RUNMODE { - HOST_MODE = 0, - GUEST_MODE = 1, - EMUL_MODE = 2, -}; - -static inline bool is_in_host(void) -{ - return !cpu_desc.run_mode; -} +#define EMUL_FLAG (0x1UL << 63) +#define MMSIZE_MASK (EMUL_FLAG - 1) -static inline bool is_in_guest(void) -{ - return cpu_desc.run_mode == GUEST_MODE; -} +DECLARE_STATIC_KEY_TRUE(run_mode_host_key); +DECLARE_STATIC_KEY_FALSE(run_mode_guest_key); +DECLARE_STATIC_KEY_FALSE(run_mode_emul_key); -static inline bool is_guest_or_emul(void) -{ - return !!cpu_desc.run_mode; -} +#define is_in_host() static_branch_likely(&run_mode_host_key) +#define is_in_guest() static_branch_unlikely(&run_mode_guest_key) +#define is_in_emul() static_branch_unlikely(&run_mode_emul_key) +#define is_guest_or_emul() !static_branch_likely(&run_mode_host_key) #define CPU_SW3231 0x31 #define CPU_SW831 0x32 @@ -176,5 +168,6 @@ static inline bool is_guest_or_emul(void) #define CACHE_INDEX_BITS_MASK (0x3fUL << CACHE_INDEX_BITS_SHIFT) #define CACHE_INDEX_BITS(val) \ (((val) & CACHE_INDEX_BITS_MASK) >> CACHE_INDEX_BITS_SHIFT) +#define current_cpu_data cpu_data[smp_processor_id()] #endif /* HW_INIT_H */ diff --git a/arch/sw_64/include/asm/kvm_host.h b/arch/sw_64/include/asm/kvm_host.h index 913a2e9789c11c4764a934e68a55469c35af5417..e4ebb993153ccc619036b9035942eafb1c937a45 100644 --- a/arch/sw_64/include/asm/kvm_host.h +++ b/arch/sw_64/include/asm/kvm_host.h @@ -46,7 +46,8 @@ struct kvm_arch_memory_slot { }; struct kvm_arch { - struct swvm_mem mem; + unsigned long host_phys_addr; + unsigned long size; }; diff --git a/arch/sw_64/include/asm/mmu_context.h b/arch/sw_64/include/asm/mmu_context.h index 6b2ab3224ec9a2b2028fb99722a5bbc3ac2e6605..e3d7ae7c873e10b70b161c8dd4dee8d3c278283a 100644 --- a/arch/sw_64/include/asm/mmu_context.h +++ b/arch/sw_64/include/asm/mmu_context.h @@ -48,7 +48,6 @@ __reload_thread(struct pcb_struct *pcb) */ #ifdef CONFIG_SUBARCH_C3B -#define MAX_ASN 1023 #define WIDTH_HARDWARE_ASN 10 #endif @@ -89,7 +88,7 @@ __get_new_mm_context(struct mm_struct *mm, long cpu) unsigned long asn = cpu_last_asn(cpu); unsigned long next = asn + 1; - if ((asn & HARDWARE_ASN_MASK) >= MAX_ASN) { + if ((asn & HARDWARE_ASN_MASK) >= HARDWARE_ASN_MASK) { tbiap(); next = (asn & ~HARDWARE_ASN_MASK) + ASN_FIRST_VERSION; } diff --git a/arch/sw_64/include/asm/numa.h b/arch/sw_64/include/asm/numa.h index 47071007e8ff6ae62b8bb328684baa5a8b92380a..4ea8b8de248af170f1cc9107e20d078f17a29f37 100644 --- a/arch/sw_64/include/asm/numa.h +++ b/arch/sw_64/include/asm/numa.h @@ -4,6 +4,7 @@ #define _ASM_SW64_NUMA_H #include +#include #ifdef CONFIG_NUMA extern nodemask_t numa_nodes_parsed __initdata; diff --git a/arch/sw_64/include/asm/pci.h b/arch/sw_64/include/asm/pci.h index 7e0c03da1d17a7113f9d288f3df081d279ba88c0..ed875e0c31626233b4e5d3e3a3f6d25a11bdd65c 100644 --- a/arch/sw_64/include/asm/pci.h +++ b/arch/sw_64/include/asm/pci.h @@ -15,7 +15,6 @@ struct pci_dev; struct pci_bus; struct resource; -struct pci_iommu_arena; struct sunway_iommu; struct page; @@ -42,13 +41,11 @@ struct pci_controller { unsigned long node; DECLARE_BITMAP(piu_msiconfig, 256); int int_irq; + int service_irq; /* For compatibility with current (as of July 2003) pciutils - and XFree86. Eventually will be removed. */ + * and XFree86. Eventually will be removed. + */ unsigned int need_domain_info; - - struct pci_iommu_arena *sg_pci; - struct pci_iommu_arena *sg_isa; - bool iommu_enable; struct sunway_iommu *pci_iommu; int first_busno; @@ -66,27 +63,23 @@ struct pci_controller { #define PCIBIOS_MIN_IO 0 #define PCIBIOS_MIN_MEM 0 -extern void pcibios_set_master(struct pci_dev *dev); +/* generic pci stuff */ +#include + extern void __init sw64_init_pci(void); extern void __init sw64_device_interrupt(unsigned long vector); extern void __init sw64_init_irq(void); extern void __init sw64_init_arch(void); -extern unsigned char sw64_swizzle(struct pci_dev *dev, u8 *pinp); extern struct pci_ops sw64_pci_ops; extern int sw64_map_irq(const struct pci_dev *dev, u8 slot, u8 pin); extern struct pci_controller *hose_head; -/* TODO: integrate with include/asm-generic/pci.h ? */ -static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) -{ - return channel ? 15 : 14; -} - #ifdef CONFIG_SUNWAY_IOMMU extern struct syscore_ops iommu_cpu_syscore_ops; #endif -#define pci_domain_nr(bus) 0 +#ifdef CONFIG_PCI_DOMAINS +static inline int pci_domain_nr(struct pci_bus *bus) { return 0; } static inline int pci_proc_domain(struct pci_bus *bus) { @@ -94,6 +87,7 @@ static inline int pci_proc_domain(struct pci_bus *bus) return hose->need_domain_info; } +#endif #ifdef CONFIG_NUMA static inline int __pcibus_to_node(const struct pci_bus *bus) diff --git a/arch/sw_64/include/asm/pgtable.h b/arch/sw_64/include/asm/pgtable.h index 32fde38a2be0b196c33bdc33111bdb049e0a17a5..590f15508e28bcee9693531f09ae942dc7bb8adb 100644 --- a/arch/sw_64/include/asm/pgtable.h +++ b/arch/sw_64/include/asm/pgtable.h @@ -26,10 +26,18 @@ struct vm_area_struct; * hook is made available. */ #define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval)) -#define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval) +static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pteval) +{ + set_pte(ptep, pteval); +} #define set_pmd(pmdptr, pmdval) ((*(pmdptr)) = (pmdval)) -#define set_pmd_at(mm, addr, pmdp, pmdval) set_pmd(pmdp, pmdval) +static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, + pmd_t *pmdp, pmd_t pmdval) +{ + set_pmd(pmdp, pmdval); +} /* PGDIR_SHIFT determines what a forth-level page table entry can map */ #define PGDIR_SHIFT (PAGE_SHIFT + 3 * (PAGE_SHIFT - 3)) @@ -81,6 +89,7 @@ struct vm_area_struct; #define _PAGE_PHU 0x0020 /* used for 256M page size bit */ #define _PAGE_PSE 0x0040 /* used for 8M page size bit */ #define _PAGE_PROTNONE 0x0080 /* used for numa page balancing */ +#define _PAGE_SPECIAL 0x0100 #define _PAGE_KRE 0x0400 /* xxx - see below on the "accessed" bit */ #define _PAGE_URE 0x0800 /* xxx */ #define _PAGE_KWE 0x4000 /* used to do the dirty bit in software */ @@ -115,7 +124,7 @@ struct vm_area_struct; #define _PTE_FLAGS_BITS (64 - _PFN_BITS) #define _PAGE_TABLE (_PAGE_VALID | __DIRTY_BITS | __ACCESS_BITS) -#define _PAGE_CHG_MASK (_PFN_MASK | __DIRTY_BITS | __ACCESS_BITS) +#define _PAGE_CHG_MASK (_PFN_MASK | __DIRTY_BITS | __ACCESS_BITS | _PAGE_SPECIAL) #define _HPAGE_CHG_MASK (_PAGE_CHG_MASK | _PAGE_PSE | _PAGE_PHU) /* @@ -448,6 +457,11 @@ static inline int pte_young(pte_t pte) return pte_val(pte) & _PAGE_ACCESSED; } +static inline int pte_special(pte_t pte) +{ + return pte_val(pte) & _PAGE_SPECIAL; +} + static inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) |= _PAGE_FOW; @@ -491,6 +505,12 @@ static inline pte_t pte_mkhuge(pte_t pte) return pte; } +static inline pte_t pte_mkspecial(pte_t pte) +{ + pte_val(pte) |= _PAGE_SPECIAL; + return pte; +} + #ifdef CONFIG_NUMA_BALANCING /* * See the comment in include/asm-generic/pgtable.h diff --git a/arch/sw_64/include/asm/ptrace.h b/arch/sw_64/include/asm/ptrace.h index 1dde5e6cba8ad46753d0db8cccb539d854777d7e..74349a05b9e446f36cc2f06cdb0980a3ba43de2c 100644 --- a/arch/sw_64/include/asm/ptrace.h +++ b/arch/sw_64/include/asm/ptrace.h @@ -3,7 +3,11 @@ #define _ASM_SW64_PTRACE_H #include - +#include +#include +#include +#include +#include #define arch_has_single_step() (1) #define user_mode(regs) (((regs)->ps & 8) != 0) @@ -41,6 +45,8 @@ static inline u64 regs_get_register(struct pt_regs *regs, unsigned int offset) return *(unsigned long *)((unsigned long)regs + offset); } extern int regs_query_register_offset(const char *name); +extern unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, + unsigned int n); static inline unsigned long regs_return_value(struct pt_regs *regs) { diff --git a/arch/sw_64/include/asm/topology.h b/arch/sw_64/include/asm/topology.h index 79af6349fe80bfca3a264a95581b66198770ee22..f8242d00290b8cecef3a59582b69b09756b74cc8 100644 --- a/arch/sw_64/include/asm/topology.h +++ b/arch/sw_64/include/asm/topology.h @@ -32,28 +32,21 @@ static inline int rcid_to_package(int rcid) #ifdef CONFIG_NUMA -#ifndef CONFIG_USE_PERCPU_NUMA_NODE_ID -extern int cpuid_to_nid(int cpuid); -static inline int cpu_to_node(int cpu) -{ - int node; - - node = cpuid_to_nid(cpu); - -#ifdef DEBUG_NUMA - BUG_ON(node < 0); -#endif - - return node; -} - -static inline void set_cpu_numa_node(int cpu, int node) { } -#endif /* CONFIG_USE_PERCPU_NUMA_NODE_ID */ - +#ifndef CONFIG_DEBUG_PER_CPU_MAPS +extern cpumask_var_t node_to_cpumask_map[MAX_NUMNODES]; +/* Returns a pointer to the cpumask of CPUs on Node 'node'. */ +#define cpumask_of_node(node) ((node) == NUMA_NO_NODE ? \ + cpu_all_mask : \ + node_to_cpumask_map[node]) +#else extern const struct cpumask *cpumask_of_node(int node); +#endif /* CONFIG_DEBUG_PER_CPU_MAPS */ + extern void numa_add_cpu(unsigned int cpu); extern void numa_remove_cpu(unsigned int cpu); extern void numa_store_cpu_info(unsigned int cpu); +extern int __node_distance(int from, int to); +#define node_distance(a, b) __node_distance(a, b) #define parent_node(node) (node) #define cpumask_of_pcibus(bus) (cpu_online_mask) #else /* !CONFIG_NUMA */ diff --git a/arch/sw_64/include/asm/unistd.h b/arch/sw_64/include/asm/unistd.h index c1778adf4fbab44f1b577293d8eb3cd1536a44d0..6d1b8d1e201167d56c5e6ca8e0ecff89160b7af1 100644 --- a/arch/sw_64/include/asm/unistd.h +++ b/arch/sw_64/include/asm/unistd.h @@ -4,7 +4,7 @@ #include -#define NR_SYSCALLS 519 +#define NR_SYSCALLS __NR_syscalls #define NR_syscalls NR_SYSCALLS #define __ARCH_WANT_NEW_STAT @@ -22,5 +22,6 @@ #define __ARCH_WANT_SYS_CLONE #define __ARCH_WANT_SYS_SOCKETCALL #define __ARCH_WANT_SYS_SIGPROCMASK +#define __ARCH_WANT_SYS_CLONE3 #endif /* _ASM_SW64_UNISTD_H */ diff --git a/arch/sw_64/include/asm/user.h b/arch/sw_64/include/asm/user.h deleted file mode 100644 index a6ff58097ea3ca993bce1287cf064c58cfe7dae8..0000000000000000000000000000000000000000 --- a/arch/sw_64/include/asm/user.h +++ /dev/null @@ -1,53 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _ASM_SW64_USER_H -#define _ASM_SW64_USER_H - -#include -#include - -#include -#include - -/* - * Core file format: The core file is written in such a way that gdb - * can understand it and provide useful information to the user (under - * linux we use the `trad-core' bfd). The file contents are as follows: - * - * upage: 1 page consisting of a user struct that tells gdb - * what is present in the file. Directly after this is a - * copy of the task_struct, which is currently not used by gdb, - * but it may come in handy at some point. All of the registers - * are stored as part of the upage. The upage should always be - * only one page long. - * data: The data segment follows next. We use current->end_text to - * current->brk to pick up all of the user variables, plus any memory - * that may have been sbrk'ed. No attempt is made to determine if a - * page is demand-zero or if a page is totally unused, we just cover - * the entire range. All of the addresses are rounded in such a way - * that an integral number of pages is written. - * stack: We need the stack information in order to get a meaningful - * backtrace. We need to write the data from usp to - * current->start_stack, so we round each of these in order to be able - * to write an integer number of pages. - */ -struct user { - unsigned long regs[EF_SIZE/8+32]; /* integer and fp regs */ - size_t u_tsize; /* text size (pages) */ - size_t u_dsize; /* data size (pages) */ - size_t u_ssize; /* stack size (pages) */ - unsigned long start_code; /* text starting address */ - unsigned long start_data; /* data starting address */ - unsigned long start_stack; /* stack starting address */ - long signal; /* signal causing core dump */ - unsigned long u_ar0; /* help gdb find registers */ - unsigned long magic; /* identifies a core file */ - char u_comm[32]; /* user command name */ -}; - -#define NBPG PAGE_SIZE -#define UPAGES 1 -#define HOST_TEXT_START_ADDR (u.start_code) -#define HOST_DATA_START_ADDR (u.start_data) -#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG) - -#endif /* _ASM_SW64_USER_H */ diff --git a/arch/sw_64/include/asm/vdso.h b/arch/sw_64/include/asm/vdso.h index 8ecd5add42ad89871fbc23edbc4a165777f4da01..7a2e23c648f3d48c50ab72709e9d2091aee9410c 100644 --- a/arch/sw_64/include/asm/vdso.h +++ b/arch/sw_64/include/asm/vdso.h @@ -41,8 +41,8 @@ struct vdso_data { u64 xtime_sec; u64 xtime_nsec; - u32 wall_to_mono_sec; - u32 wall_to_mono_nsec; + u64 wall_to_mono_sec; + u64 wall_to_mono_nsec; u32 cs_shift; u32 cs_mult; u64 cs_cycle_last; diff --git a/arch/sw_64/include/asm/vga.h b/arch/sw_64/include/asm/vga.h index 3ca5c397b9460c5fab43e3c0ceb3cd4cc363a738..28adb8b8b7f191e40cba3f1d0aef200b80aa3a6c 100644 --- a/arch/sw_64/include/asm/vga.h +++ b/arch/sw_64/include/asm/vga.h @@ -55,29 +55,29 @@ extern void scr_memcpyw(u16 *d, const u16 *s, unsigned int count); extern struct pci_controller *pci_vga_hose; -# define __is_port_vga(a) \ +#define __is_port_vga(a) \ (((a) >= 0x3b0) && ((a) < 0x3e0) && \ ((a) != 0x3b3) && ((a) != 0x3d3)) -# define __is_mem_vga(a) \ +#define __is_mem_vga(a) \ (((a) >= 0xa0000) && ((a) <= 0xc0000)) -# define FIXUP_IOADDR_VGA(a) do { \ +#define FIXUP_IOADDR_VGA(a) do { \ if (pci_vga_hose && __is_port_vga(a)) \ (a) += pci_vga_hose->io_space->start; \ } while (0) -# define FIXUP_MEMADDR_VGA(a) do { \ +#define FIXUP_MEMADDR_VGA(a) do { \ if (pci_vga_hose && __is_mem_vga(a)) \ (a) += pci_vga_hose->mem_space->start; \ } while (0) #else /* CONFIG_VGA_HOSE */ -# define pci_vga_hose 0 -# define __is_port_vga(a) 0 -# define __is_mem_vga(a) 0 -# define FIXUP_IOADDR_VGA(a) -# define FIXUP_MEMADDR_VGA(a) +#define pci_vga_hose 0 +#define __is_port_vga(a) 0 +#define __is_mem_vga(a) 0 +#define FIXUP_IOADDR_VGA(a) +#define FIXUP_MEMADDR_VGA(a) #endif /* CONFIG_VGA_HOSE */ #define VGA_MAP_MEM(x, s) ((unsigned long)ioremap(x, s)) diff --git a/arch/sw_64/include/uapi/asm/a.out.h b/arch/sw_64/include/uapi/asm/a.out.h deleted file mode 100644 index addb648b8ed67d754d963458778223411aeb0150..0000000000000000000000000000000000000000 --- a/arch/sw_64/include/uapi/asm/a.out.h +++ /dev/null @@ -1,88 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _UAPI_ASM_SW64_A_OUT_H -#define _UAPI_ASM_SW64_A_OUT_H - -#include - -/* - * ECOFF header structs. ECOFF files consist of: - * - a file header (struct filehdr), - * - an a.out header (struct aouthdr), - * - one or more section headers (struct scnhdr). - * The filhdr's "f_nscns" field contains the - * number of section headers. - */ - -struct filehdr { - /* "file" header */ - __u16 f_magic, f_nscns; - __u32 f_timdat; - __u64 f_symptr; - __u32 f_nsyms; - __u16 f_opthdr, f_flags; -}; - -struct aouthdr { - __u64 info; /* after that it looks quite normal.. */ - __u64 tsize; - __u64 dsize; - __u64 bsize; - __u64 entry; - __u64 text_start; /* with a few additions that actually make sense */ - __u64 data_start; - __u64 bss_start; - __u32 gprmask, fprmask; /* bitmask of general & floating point regs used in binary */ - __u64 gpvalue; -}; - -struct scnhdr { - char s_name[8]; - __u64 s_paddr; - __u64 s_vaddr; - __u64 s_size; - __u64 s_scnptr; - __u64 s_relptr; - __u64 s_lnnoptr; - __u16 s_nreloc; - __u16 s_nlnno; - __u32 s_flags; -}; - -struct exec { - /* "file" header */ - struct filehdr fh; - struct aouthdr ah; -}; - -/* - * Define's so that the kernel exec code can access the a.out header - * fields... - */ -#define a_info ah.info -#define a_text ah.tsize -#define a_data ah.dsize -#define a_bss ah.bsize -#define a_entry ah.entry -#define a_textstart ah.text_start -#define a_datastart ah.data_start -#define a_bssstart ah.bss_start -#define a_gprmask ah.gprmask -#define a_fprmask ah.fprmask -#define a_gpvalue ah.gpvalue - -#define N_TXTADDR(x) ((x).a_textstart) -#define N_DATADDR(x) ((x).a_datastart) -#define N_BSSADDR(x) ((x).a_bssstart) -#define N_DRSIZE(x) 0 -#define N_TRSIZE(x) 0 -#define N_SYMSIZE(x) 0 - -#define AOUTHSZ sizeof(struct aouthdr) -#define SCNHSZ sizeof(struct scnhdr) -#define SCNROUND 16 - -#define N_TXTOFF(x) \ - ((long) N_MAGIC(x) == ZMAGIC ? 0 : \ - (sizeof(struct exec) + (x).fh.f_nscns * SCNHSZ + SCNROUND - 1) & ~(SCNROUND - 1)) - -#endif /* _UAPI_ASM_SW64_A_OUT_H */ diff --git a/arch/sw_64/include/uapi/asm/auxvec.h b/arch/sw_64/include/uapi/asm/auxvec.h index 59854f3ac501952bd55aa91f823e47af1de89fbb..309a8294be7a839fac7c55fe5959d8b1ad1404fc 100644 --- a/arch/sw_64/include/uapi/asm/auxvec.h +++ b/arch/sw_64/include/uapi/asm/auxvec.h @@ -1,28 +1,11 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _UAPI_ASM_SW64_AUXVEC_H #define _UAPI_ASM_SW64_AUXVEC_H -/* Reserve these numbers for any future use of a VDSO. */ -#if 1 -#define AT_SYSINFO 32 +/* VDSO location. */ #define AT_SYSINFO_EHDR 33 -#endif -/* - * More complete cache descriptions than AT_[DIU]CACHEBSIZE. If the - * value is -1, then the cache doesn't exist. Otherwise: - * - * bit 0-3: Cache set-associativity; 0 means fully associative. - * bit 4-7: Log2 of cacheline size. - * bit 8-31: Size of the entire cache >> 8. - * bit 32-63: Reserved. - */ - -#define AT_L1I_CACHESHAPE 34 -#define AT_L1D_CACHESHAPE 35 -#define AT_L2_CACHESHAPE 36 -#define AT_L3_CACHESHAPE 37 - -#define AT_VECTOR_SIZE_ARCH 4 /* entries in ARCH_DLINFO */ +/* entries in ARCH_DLINFO */ +#define AT_VECTOR_SIZE_ARCH 1 #endif /* _UAPI_ASM_SW64_AUXVEC_H */ diff --git a/arch/sw_64/include/uapi/asm/bitsperlong.h b/arch/sw_64/include/uapi/asm/bitsperlong.h index 5d2c677a86b80ea42add009167429e1aa69f65ed..712c823e23d82cc177f74a77fed021a68e35a941 100644 --- a/arch/sw_64/include/uapi/asm/bitsperlong.h +++ b/arch/sw_64/include/uapi/asm/bitsperlong.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _UAPI_ASM_SW64_BITSPERLONG_H #define _UAPI_ASM_SW64_BITSPERLONG_H diff --git a/arch/sw_64/include/uapi/asm/byteorder.h b/arch/sw_64/include/uapi/asm/byteorder.h index 1b1698df58ca68cd11eead76be10a6a9d9347c18..ededdd045e96b2dc915be110f6d278a5bbc58654 100644 --- a/arch/sw_64/include/uapi/asm/byteorder.h +++ b/arch/sw_64/include/uapi/asm/byteorder.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _UAPI_ASM_SW64_BYTEORDER_H #define _UAPI_ASM_SW64_BYTEORDER_H diff --git a/arch/sw_64/include/uapi/asm/compiler.h b/arch/sw_64/include/uapi/asm/compiler.h index e5cf0fb170fa2626adfb5a13cf764d9c2f778599..64786df0f2668734957147e7ba30dbb52feb8dd1 100644 --- a/arch/sw_64/include/uapi/asm/compiler.h +++ b/arch/sw_64/include/uapi/asm/compiler.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _UAPI_ASM_SW64_COMPILER_H #define _UAPI_ASM_SW64_COMPILER_H diff --git a/arch/sw_64/include/uapi/asm/console.h b/arch/sw_64/include/uapi/asm/console.h index 91246b759ecf1c940906a7b0914e821aff23a0ec..a40cd7aeb31f9e21216131f2db0a635d41b57e09 100644 --- a/arch/sw_64/include/uapi/asm/console.h +++ b/arch/sw_64/include/uapi/asm/console.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _UAPI_ASM_SW64_CONSOLE_H #define _UAPI_ASM_SW64_CONSOLE_H diff --git a/arch/sw_64/include/uapi/asm/errno.h b/arch/sw_64/include/uapi/asm/errno.h index 04b07f30c787d9ab27d00bfd0b82d3f6889e3677..0d8438f6bd402aea665d96c8f7d983271ad91d24 100644 --- a/arch/sw_64/include/uapi/asm/errno.h +++ b/arch/sw_64/include/uapi/asm/errno.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _UAPI_ASM_SW64_ERRNO_H #define _UAPI_ASM_SW64_ERRNO_H diff --git a/arch/sw_64/include/uapi/asm/fcntl.h b/arch/sw_64/include/uapi/asm/fcntl.h index 29c3aece8b555dbb265f6db7693ad3827b5bc70d..99e1a31c5e8606000808bd62c6a6db25c0982501 100644 --- a/arch/sw_64/include/uapi/asm/fcntl.h +++ b/arch/sw_64/include/uapi/asm/fcntl.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _UAPI_ASM_SW64_FCNTL_H #define _UAPI_ASM_SW64_FCNTL_H diff --git a/arch/sw_64/include/uapi/asm/fpu.h b/arch/sw_64/include/uapi/asm/fpu.h index 9b25f97e6a3a0416c533ad8d4995c29b393d671c..035ca65b1ba38a711348e5bb85c1b22f5d699eef 100644 --- a/arch/sw_64/include/uapi/asm/fpu.h +++ b/arch/sw_64/include/uapi/asm/fpu.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _UAPI_ASM_SW64_FPU_H #define _UAPI_ASM_SW64_FPU_H diff --git a/arch/sw_64/include/uapi/asm/gentrap.h b/arch/sw_64/include/uapi/asm/gentrap.h index 4345058291fbc6d6735b22cd73b1a140e4085538..3786b8b52add336464589167bf99296d59e74c65 100644 --- a/arch/sw_64/include/uapi/asm/gentrap.h +++ b/arch/sw_64/include/uapi/asm/gentrap.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _UAPI_ASM_SW64_GENTRAP_H #define _UAPI_ASM_SW64_GENTRAP_H diff --git a/arch/sw_64/include/uapi/asm/hmcall.h b/arch/sw_64/include/uapi/asm/hmcall.h index 524101102fb8a2bb2b5010fc7d9cdb4fd3d95f8b..f10378ba99c8042db62524c013ba64a44a5c2d48 100644 --- a/arch/sw_64/include/uapi/asm/hmcall.h +++ b/arch/sw_64/include/uapi/asm/hmcall.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _UAPI_ASM_SW64_HMCALL_H #define _UAPI_ASM_SW64_HMCALL_H diff --git a/arch/sw_64/include/uapi/asm/ioctl.h b/arch/sw_64/include/uapi/asm/ioctl.h index d62f10a6fa643c6361476039bb04b7303ecbb760..fb5267b034fca832f44e0c0794b5d0248f0e86b8 100644 --- a/arch/sw_64/include/uapi/asm/ioctl.h +++ b/arch/sw_64/include/uapi/asm/ioctl.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _UAPI_ASM_SW64_IOCTL_H #define _UAPI_ASM_SW64_IOCTL_H diff --git a/arch/sw_64/include/uapi/asm/ioctls.h b/arch/sw_64/include/uapi/asm/ioctls.h index eab34173f222fd2bab60af216bb66717d7cddb73..db8e456290e6592da5e15e7d68b4449729b9ffb9 100644 --- a/arch/sw_64/include/uapi/asm/ioctls.h +++ b/arch/sw_64/include/uapi/asm/ioctls.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _UAPI_ASM_SW64_IOCTLS_H #define _UAPI_ASM_SW64_IOCTLS_H @@ -52,20 +52,20 @@ #define TIOCMBIS 0x5416 #define TIOCMBIC 0x5417 #define TIOCMSET 0x5418 -# define TIOCM_LE 0x001 -# define TIOCM_DTR 0x002 -# define TIOCM_RTS 0x004 -# define TIOCM_ST 0x008 -# define TIOCM_SR 0x010 -# define TIOCM_CTS 0x020 -# define TIOCM_CAR 0x040 -# define TIOCM_RNG 0x080 -# define TIOCM_DSR 0x100 -# define TIOCM_CD TIOCM_CAR -# define TIOCM_RI TIOCM_RNG -# define TIOCM_OUT1 0x2000 -# define TIOCM_OUT2 0x4000 -# define TIOCM_LOOP 0x8000 +#define TIOCM_LE 0x001 +#define TIOCM_DTR 0x002 +#define TIOCM_RTS 0x004 +#define TIOCM_ST 0x008 +#define TIOCM_SR 0x010 +#define TIOCM_CTS 0x020 +#define TIOCM_CAR 0x040 +#define TIOCM_RNG 0x080 +#define TIOCM_DSR 0x100 +#define TIOCM_CD TIOCM_CAR +#define TIOCM_RI TIOCM_RNG +#define TIOCM_OUT1 0x2000 +#define TIOCM_OUT2 0x4000 +#define TIOCM_LOOP 0x8000 #define TIOCGSOFTCAR 0x5419 #define TIOCSSOFTCAR 0x541A @@ -74,14 +74,14 @@ #define TIOCGSERIAL 0x541E #define TIOCSSERIAL 0x541F #define TIOCPKT 0x5420 -# define TIOCPKT_DATA 0 -# define TIOCPKT_FLUSHREAD 1 -# define TIOCPKT_FLUSHWRITE 2 -# define TIOCPKT_STOP 4 -# define TIOCPKT_START 8 -# define TIOCPKT_NOSTOP 16 -# define TIOCPKT_DOSTOP 32 -# define TIOCPKT_IOCTL 64 +#define TIOCPKT_DATA 0 +#define TIOCPKT_FLUSHREAD 1 +#define TIOCPKT_FLUSHWRITE 2 +#define TIOCPKT_STOP 4 +#define TIOCPKT_START 8 +#define TIOCPKT_NOSTOP 16 +#define TIOCPKT_DOSTOP 32 +#define TIOCPKT_IOCTL 64 #define TIOCNOTTY 0x5422 @@ -113,7 +113,7 @@ #define TIOCSERGSTRUCT 0x5458 /* For debugging only */ #define TIOCSERGETLSR 0x5459 /* Get line status register */ /* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */ -# define TIOCSER_TEMT 0x01 /* Transmitter physically empty */ +#define TIOCSER_TEMT 0x01 /* Transmitter physically empty */ #define TIOCSERGETMULTI 0x545A /* Get multiport config */ #define TIOCSERSETMULTI 0x545B /* Set multiport config */ diff --git a/arch/sw_64/include/uapi/asm/ipcbuf.h b/arch/sw_64/include/uapi/asm/ipcbuf.h index f063105ba09f307c72e25f1374e3453497481eca..553cdb37052db1d6c8e90d9bc043de5a55f8fc7e 100644 --- a/arch/sw_64/include/uapi/asm/ipcbuf.h +++ b/arch/sw_64/include/uapi/asm/ipcbuf.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _UAPI_ASM_SW64_IPCBUF_H #define _UAPI_ASM_SW64_IPCBUF_H diff --git a/arch/sw_64/include/uapi/asm/kvm.h b/arch/sw_64/include/uapi/asm/kvm.h index 47877b56e980facf32d680898c389a8d879ea75a..ff1b6e7f096f77405394fa307a4f64b304598e33 100644 --- a/arch/sw_64/include/uapi/asm/kvm.h +++ b/arch/sw_64/include/uapi/asm/kvm.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _UAPI_ASM_SW64_KVM_H #define _UAPI_ASM_SW64_KVM_H @@ -114,16 +114,4 @@ struct kvm_sync_regs { struct kvm_sregs { }; - -struct swvm_mem_bank { - unsigned long guest_phys_addr; - unsigned long host_phys_addr; - unsigned long host_addr; - unsigned long size; -}; - -struct swvm_mem { - struct swvm_mem_bank membank[SWVM_NUM_NUMA_MEMBANKS]; -}; - #endif /* _UAPI_ASM_SW64_KVM_H */ diff --git a/arch/sw_64/include/uapi/asm/kvm_para.h b/arch/sw_64/include/uapi/asm/kvm_para.h index 405840b0e1d8f827e7b1db8166f1cc12079a98e9..3c0f9fa712abfc725b4d5925192cdf5ef914943c 100644 --- a/arch/sw_64/include/uapi/asm/kvm_para.h +++ b/arch/sw_64/include/uapi/asm/kvm_para.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _UAPI_ASM_SW64_KVM_PARA_H #define _UAPI_ASM_SW64_KVM_PARA_H diff --git a/arch/sw_64/include/uapi/asm/mman.h b/arch/sw_64/include/uapi/asm/mman.h index f9ac285702a522819102af9e940d32ff9401a2b0..57970e1e3a2cb6514b5650b012738b74f4c82238 100644 --- a/arch/sw_64/include/uapi/asm/mman.h +++ b/arch/sw_64/include/uapi/asm/mman.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _UAPI_ASM_SW64_MMAN_H #define _UAPI_ASM_SW64_MMAN_H diff --git a/arch/sw_64/include/uapi/asm/msgbuf.h b/arch/sw_64/include/uapi/asm/msgbuf.h index d61eea10813d65698f7edeef2a756a6782f833a2..b938df3664a0c6adc0a0774a9c974274326f22a4 100644 --- a/arch/sw_64/include/uapi/asm/msgbuf.h +++ b/arch/sw_64/include/uapi/asm/msgbuf.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _UAPI_ASM_SW64_MSGBUF_H #define _UAPI_ASM_SW64_MSGBUF_H diff --git a/arch/sw_64/include/uapi/asm/param.h b/arch/sw_64/include/uapi/asm/param.h index 75eeac6a7dc85c0428f5c18e570e5427b6c87fe2..16c4934c937e5af2058c331029d9b96fb6b8a16a 100644 --- a/arch/sw_64/include/uapi/asm/param.h +++ b/arch/sw_64/include/uapi/asm/param.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _UAPI_ASM_SW64_PARAM_H #define _UAPI_ASM_SW64_PARAM_H diff --git a/arch/sw_64/include/uapi/asm/poll.h b/arch/sw_64/include/uapi/asm/poll.h index 5e2de318205050bc66f0a72ff8b102d6d2042865..114d0344e37749982a1da54bed06916150b61979 100644 --- a/arch/sw_64/include/uapi/asm/poll.h +++ b/arch/sw_64/include/uapi/asm/poll.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _UAPI_ASM_SW64_POLL_H #define _UAPI_ASM_SW64_POLL_H diff --git a/arch/sw_64/include/uapi/asm/posix_types.h b/arch/sw_64/include/uapi/asm/posix_types.h index fb7badf78c3ccb2db967989760a2f9fd9deb8358..182741aaa06e583deb69acbf1dacf4fbf1e1f6c6 100644 --- a/arch/sw_64/include/uapi/asm/posix_types.h +++ b/arch/sw_64/include/uapi/asm/posix_types.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _UAPI_ASM_SW64_POSIX_TYPES_H #define _UAPI_ASM_SW64_POSIX_TYPES_H diff --git a/arch/sw_64/include/uapi/asm/ptrace.h b/arch/sw_64/include/uapi/asm/ptrace.h index 7cf7bf5a75b4ccf0fc3e005d5364e31f038b3dac..96cb10891bea7499009052806f7a6e9b22fc5dc4 100644 --- a/arch/sw_64/include/uapi/asm/ptrace.h +++ b/arch/sw_64/include/uapi/asm/ptrace.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _UAPI_ASM_SW64_PTRACE_H #define _UAPI_ASM_SW64_PTRACE_H diff --git a/arch/sw_64/include/uapi/asm/reg.h b/arch/sw_64/include/uapi/asm/reg.h index a19dc4cbf744afb04377a8c66cd2f5a09cc155cb..e692e45a4936f4e492fca3f85aee3344fb572646 100644 --- a/arch/sw_64/include/uapi/asm/reg.h +++ b/arch/sw_64/include/uapi/asm/reg.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _UAPI_ASM_SW64_REG_H #define _UAPI_ASM_SW64_REG_H diff --git a/arch/sw_64/include/uapi/asm/regdef.h b/arch/sw_64/include/uapi/asm/regdef.h index 5031abc0947af4aeebde27cbfb3017be2885c9a8..ad4475b7943517e62b8f0935c442a260bed9fa22 100644 --- a/arch/sw_64/include/uapi/asm/regdef.h +++ b/arch/sw_64/include/uapi/asm/regdef.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _UAPI_ASM_SW64_REGDEF_H #define _UAPI_ASM_SW64_REGDEF_H diff --git a/arch/sw_64/include/uapi/asm/resource.h b/arch/sw_64/include/uapi/asm/resource.h index ff7dc683c195984e3815b73a92a2fb37cfe27290..fecca2214849d99ad460c547ff93a8c5d2f4e1c9 100644 --- a/arch/sw_64/include/uapi/asm/resource.h +++ b/arch/sw_64/include/uapi/asm/resource.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _UAPI_ASM_SW64_RESOURCE_H #define _UAPI_ASM_SW64_RESOURCE_H diff --git a/arch/sw_64/include/uapi/asm/sembuf.h b/arch/sw_64/include/uapi/asm/sembuf.h index f574390bcd5782840e0e7e3a1a83cd9519cdd090..08b0876e739c580407e95fe7c24d2dcc90159d29 100644 --- a/arch/sw_64/include/uapi/asm/sembuf.h +++ b/arch/sw_64/include/uapi/asm/sembuf.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _UAPI_ASM_SW64_SEMBUF_H #define _UAPI_ASM_SW64_SEMBUF_H diff --git a/arch/sw_64/include/uapi/asm/setup.h b/arch/sw_64/include/uapi/asm/setup.h index fefd57415a3b7a75b25929706033025597571013..10ce5dba9c3066a3334f8ecfe59f569403024178 100644 --- a/arch/sw_64/include/uapi/asm/setup.h +++ b/arch/sw_64/include/uapi/asm/setup.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _UAPI_ASM_SW64_SETUP_H #define _UAPI_ASM_SW64_SETUP_H diff --git a/arch/sw_64/include/uapi/asm/shmbuf.h b/arch/sw_64/include/uapi/asm/shmbuf.h index 66d8cb5b2ba30f67745280f5ac7cae640d7c705b..4572337bee0224582ecb78e07619a77d624f6fff 100644 --- a/arch/sw_64/include/uapi/asm/shmbuf.h +++ b/arch/sw_64/include/uapi/asm/shmbuf.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _UAPI_ASM_SW64_SHMBUF_H #define _UAPI_ASM_SW64_SHMBUF_H diff --git a/arch/sw_64/include/uapi/asm/sigcontext.h b/arch/sw_64/include/uapi/asm/sigcontext.h index c2b7cff884ebbbdf9b902588e2468a91ad450c44..facbf34e920d4f57b2ba51069e760920f8cde730 100644 --- a/arch/sw_64/include/uapi/asm/sigcontext.h +++ b/arch/sw_64/include/uapi/asm/sigcontext.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _UAPI_ASM_SW64_SIGCONTEXT_H #define _UAPI_ASM_SW64_SIGCONTEXT_H diff --git a/arch/sw_64/include/uapi/asm/siginfo.h b/arch/sw_64/include/uapi/asm/siginfo.h index b50afbf15f7cd2ec201cdd2ab190b9da7c14026b..4a58eea9b67c9ab254f7142aeb37bf70758b5d8a 100644 --- a/arch/sw_64/include/uapi/asm/siginfo.h +++ b/arch/sw_64/include/uapi/asm/siginfo.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _UAPI_ASM_SW64_SIGINFO_H #define _UAPI_ASM_SW64_SIGINFO_H diff --git a/arch/sw_64/include/uapi/asm/signal.h b/arch/sw_64/include/uapi/asm/signal.h index 71471c8c762481b596cd33b6de27908151edbb9b..5bad0adae93f3ab8ff40abe9e3842bc45a6c7503 100644 --- a/arch/sw_64/include/uapi/asm/signal.h +++ b/arch/sw_64/include/uapi/asm/signal.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _UAPI_ASM_SW64_SIGNAL_H #define _UAPI_ASM_SW64_SIGNAL_H diff --git a/arch/sw_64/include/uapi/asm/socket.h b/arch/sw_64/include/uapi/asm/socket.h index abfa2108522c9a68e22304e0da71fe202973374c..d47041ebe08a2991423ef3ff1e238f6081479e04 100644 --- a/arch/sw_64/include/uapi/asm/socket.h +++ b/arch/sw_64/include/uapi/asm/socket.h @@ -1,7 +1,8 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _UAPI_ASM_SW64_SOCKET_H #define _UAPI_ASM_SW64_SOCKET_H +#include #include /* For setsockopt(2) */ @@ -51,13 +52,9 @@ #define SO_GET_FILTER SO_ATTACH_FILTER #define SO_PEERNAME 28 -#define SO_TIMESTAMP 29 -#define SCM_TIMESTAMP SO_TIMESTAMP #define SO_PEERSEC 30 #define SO_PASSSEC 34 -#define SO_TIMESTAMPNS 35 -#define SCM_TIMESTAMPNS SO_TIMESTAMPNS /* Security levels - as per NRL IPv6 - don't actually do anything */ #define SO_SECURITY_AUTHENTICATION 19 @@ -66,9 +63,6 @@ #define SO_MARK 36 -#define SO_TIMESTAMPING 37 -#define SCM_TIMESTAMPING SO_TIMESTAMPING - #define SO_RXQ_OVFL 40 #define SO_WIFI_STATUS 41 @@ -124,4 +118,28 @@ #define SO_DETACH_REUSEPORT_BPF 68 +#if !defined(__KERNEL__) + +#if __BITS_PER_LONG == 64 +#define SO_TIMESTAMP SO_TIMESTAMP_OLD +#define SO_TIMESTAMPNS SO_TIMESTAMPNS_OLD +#define SO_TIMESTAMPING SO_TIMESTAMPING_OLD + +#define SO_RCVTIMEO SO_RCVTIMEO_OLD +#define SO_SNDTIMEO SO_SNDTIMEO_OLD +#else +#define SO_TIMESTAMP (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_TIMESTAMP_OLD : SO_TIMESTAMP_NEW) +#define SO_TIMESTAMPNS (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_TIMESTAMPNS_OLD : SO_TIMESTAMPNS_NEW) +#define SO_TIMESTAMPING (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_TIMESTAMPING_OLD : SO_TIMESTAMPING_NEW) + +#define SO_RCVTIMEO (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_RCVTIMEO_OLD : SO_RCVTIMEO_NEW) +#define SO_SNDTIMEO (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_SNDTIMEO_OLD : SO_SNDTIMEO_NEW) +#endif + +#define SCM_TIMESTAMP SO_TIMESTAMP +#define SCM_TIMESTAMPNS SO_TIMESTAMPNS +#define SCM_TIMESTAMPING SO_TIMESTAMPING + +#endif + #endif /* _UAPI_ASM_SW64_SOCKET_H */ diff --git a/arch/sw_64/include/uapi/asm/sockios.h b/arch/sw_64/include/uapi/asm/sockios.h index 1f30fb881065ad6b38820d71cdf5ea0ed6486b3c..88e89dcf8300ea66394003769c351ed91c14cd55 100644 --- a/arch/sw_64/include/uapi/asm/sockios.h +++ b/arch/sw_64/include/uapi/asm/sockios.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _UAPI_ASM_SW64_SOCKIOS_H #define _UAPI_ASM_SW64_SOCKIOS_H diff --git a/arch/sw_64/include/uapi/asm/stat.h b/arch/sw_64/include/uapi/asm/stat.h index b1c1c5e3db22c25229a7b65236ef43daf171d8e0..d2b21128c56947bfeef183113ba6a51e149a91a1 100644 --- a/arch/sw_64/include/uapi/asm/stat.h +++ b/arch/sw_64/include/uapi/asm/stat.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _UAPI_ASM_SW64_STAT_H #define _UAPI_ASM_SW64_STAT_H diff --git a/arch/sw_64/include/uapi/asm/statfs.h b/arch/sw_64/include/uapi/asm/statfs.h index 3b8d1e3300a91cf01f09b47603bd95d6d4fad4f7..b92d719238d182997e104abe30a06ae4766b654c 100644 --- a/arch/sw_64/include/uapi/asm/statfs.h +++ b/arch/sw_64/include/uapi/asm/statfs.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _UAPI_ASM_SW64_STATFS_H #define _UAPI_ASM_SW64_STATFS_H diff --git a/arch/sw_64/include/uapi/asm/swab.h b/arch/sw_64/include/uapi/asm/swab.h index a3d67645aa524c025c6e02c9e35dc8e3841c73c3..275661b346ac202afed612cee1f75bc1a0b6209e 100644 --- a/arch/sw_64/include/uapi/asm/swab.h +++ b/arch/sw_64/include/uapi/asm/swab.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _UAPI_ASM_SW64_SWAB_H #define _UAPI_ASM_SW64_SWAB_H diff --git a/arch/sw_64/include/uapi/asm/sysinfo.h b/arch/sw_64/include/uapi/asm/sysinfo.h index 9d2112f8bc4d4a1c314828013dd09f1419dd96c2..667405c3447cd841cda75d7e8cf624166db70d2c 100644 --- a/arch/sw_64/include/uapi/asm/sysinfo.h +++ b/arch/sw_64/include/uapi/asm/sysinfo.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ /* * include/asm/sysinfo.h */ diff --git a/arch/sw_64/include/uapi/asm/termbits.h b/arch/sw_64/include/uapi/asm/termbits.h index bcb9adb11e81d4ce995205e6a0e52075c8a5242c..83de6ff63234f69a7ddf3b169c4e204961640d3d 100644 --- a/arch/sw_64/include/uapi/asm/termbits.h +++ b/arch/sw_64/include/uapi/asm/termbits.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _UAPI_ASM_SW64_TERMBITS_H #define _UAPI_ASM_SW64_TERMBITS_H diff --git a/arch/sw_64/include/uapi/asm/termios.h b/arch/sw_64/include/uapi/asm/termios.h index d44e218b29b5587dd7d63aab6dcc97362aa2c5da..62f4b40551b241ff1ca7fd9c1c25e65415b976c4 100644 --- a/arch/sw_64/include/uapi/asm/termios.h +++ b/arch/sw_64/include/uapi/asm/termios.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _UAPI_ASM_SW64_TERMIOS_H #define _UAPI_ASM_SW64_TERMIOS_H diff --git a/arch/sw_64/include/uapi/asm/types.h b/arch/sw_64/include/uapi/asm/types.h index 9c605ea7bba92a69d3ac8e77c7915896f12a5734..750b5181c3de747134d88a697ffd944cc2c78a24 100644 --- a/arch/sw_64/include/uapi/asm/types.h +++ b/arch/sw_64/include/uapi/asm/types.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _UAPI_ASM_SW64_TYPES_H #define _UAPI_ASM_SW64_TYPES_H diff --git a/arch/sw_64/include/uapi/asm/unistd.h b/arch/sw_64/include/uapi/asm/unistd.h index 225358536dc9eb9cf358fb2477ab3de5d4056add..be844b2be9d5591b183e6fc54ae2d682a83c89d0 100644 --- a/arch/sw_64/include/uapi/asm/unistd.h +++ b/arch/sw_64/include/uapi/asm/unistd.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _UAPI_ASM_SW64_UNISTD_H #define _UAPI_ASM_SW64_UNISTD_H @@ -9,9 +9,4 @@ #include -/* sw64 doesn't have protection keys. */ -#define __IGNORE_pkey_mprotect -#define __IGNORE_pkey_alloc -#define __IGNORE_pkey_free - #endif /* _UAPI_ASM_SW64_UNISTD_H */ diff --git a/arch/sw_64/kernel/Makefile b/arch/sw_64/kernel/Makefile index d9e2fcbc1e91aca998bc955be5ac596195d8a431..f6a2813b0466292d3337eeaef73f5aedb32399c9 100644 --- a/arch/sw_64/kernel/Makefile +++ b/arch/sw_64/kernel/Makefile @@ -29,8 +29,7 @@ obj-$(CONFIG_SUSPEND) += suspend_asm.o suspend.o obj-$(CONFIG_PERF_EVENTS) += perf_event.o obj-$(CONFIG_HIBERNATION) += hibernate_asm.o hibernate.o obj-$(CONFIG_AUDIT) += audit.o -obj-$(CONFIG_DIRECT_DMA) += pci_common.o -obj-$(CONFIG_SWIOTLB) += dma_swiotlb.o +obj-$(CONFIG_PCI) += pci_common.o obj-$(CONFIG_RELOCATABLE) += relocate.o obj-$(CONFIG_DEBUG_FS) += unaligned.o segvdbg.o obj-$(CONFIG_JUMP_LABEL) += jump_label.o @@ -45,6 +44,8 @@ endif # Core logic support obj-$(CONFIG_SW64) += core.o timer.o +obj-$(CONFIG_SW64_CPUFREQ) += platform.o clock.o +obj-$(CONFIG_SW64_CPUAUTOPLUG) += cpuautoplug.o obj-$(CONFIG_CRASH_DUMP) += crash_dump.o obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o diff --git a/arch/sw_64/kernel/acpi.c b/arch/sw_64/kernel/acpi.c index 1c1afe8e812e67e45e52d4fdf12fbe24d73c1c37..a0b5c4a57a07e698a8fd6e05c171e16c14a16c7e 100644 --- a/arch/sw_64/kernel/acpi.c +++ b/arch/sw_64/kernel/acpi.c @@ -2,25 +2,8 @@ #include #include -#include -#include -#include -#include -#include -#include -#include #include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include + #include int acpi_disabled = 1; diff --git a/arch/sw_64/kernel/asm-offsets.c b/arch/sw_64/kernel/asm-offsets.c index 44e7fa77265edd327dbde23ad011556714bebfd6..bea12d2d96fe1ab3766e80cab22f185afa21e53d 100644 --- a/arch/sw_64/kernel/asm-offsets.c +++ b/arch/sw_64/kernel/asm-offsets.c @@ -5,17 +5,16 @@ * and format the required data. */ -#include #include #include -#include #include #include -#include + #include +#include + #include "traps.c" -#include void foo(void) { DEFINE(TI_TASK, offsetof(struct thread_info, task)); diff --git a/arch/sw_64/kernel/audit.c b/arch/sw_64/kernel/audit.c index adc4622211d21ce8e9f0114199949f87e7bcdcea..dcf58deee3e2018e0efa6cb355e8802aef04de35 100644 --- a/arch/sw_64/kernel/audit.c +++ b/arch/sw_64/kernel/audit.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 #include -#include #include + #include static unsigned int dir_class[] = { diff --git a/arch/sw_64/kernel/cacheinfo.c b/arch/sw_64/kernel/cacheinfo.c index 5193d7544b5933d20474115a9f90eaeaddaf29b5..87d3f4bcd10f12efdea915910fbeb2ab9f25c50d 100644 --- a/arch/sw_64/kernel/cacheinfo.c +++ b/arch/sw_64/kernel/cacheinfo.c @@ -14,9 +14,8 @@ * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ -#include #include -#include + #include /* Populates leaf and increments to next leaf */ diff --git a/arch/sw_64/kernel/clock.c b/arch/sw_64/kernel/clock.c new file mode 100644 index 0000000000000000000000000000000000000000..f31f596a00521e4b8ea193eb8adc69896f272c92 --- /dev/null +++ b/arch/sw_64/kernel/clock.c @@ -0,0 +1,184 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * linux/arch/sw/kernel/setup.c + * + * Copyright (C) 1995 Linus Torvalds + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#define CLK_PRT 0x1UL +#define CORE_CLK0_V (0x1UL << 1) +#define CORE_CLK0_R (0x1UL << 2) +#define CORE_CLK2_V (0x1UL << 15) +#define CORE_CLK2_R (0x1UL << 16) + +#define CLK_LV1_SEL_PRT 0x1UL +#define CLK_LV1_SEL_MUXA (0x1UL << 2) +#define CLK_LV1_SEL_MUXB (0x1UL << 3) + +#define CORE_PLL0_CFG_SHIFT 4 +#define CORE_PLL2_CFG_SHIFT 18 + +char curruent_policy[CPUFREQ_NAME_LEN]; + +/* Minimum CLK support */ +enum { + DC_0, DC_1, DC_2, DC_3, DC_4, DC_5, DC_6, DC_7, DC_8, + DC_9, DC_10, DC_11, DC_12, DC_13, DC_14, DC_15, DC_16, DC_RESV +}; + +static int cpu_freq[14] = { + 0, 1200, 1800, 1900, + 1950, 2000, 2050, 2100, + 2150, 2200, 2250, 2300, + 2350, 2400 }; + +struct cpufreq_frequency_table sw64_clockmod_table[] = { + {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, + {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, + {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, + {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, + {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, + {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, + {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, + {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, + {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, + {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, + {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, + {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, + {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, + {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, + {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, + {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, + {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, + {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, + {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, + {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, + {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, + {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, + {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, + {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, + {0, DC_1, 0}, + {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, + {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, + {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, + {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, + {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, + {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, + {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, + {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, + {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, + {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, + {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, + {0, DC_2, 0}, + {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, + {0, DC_3, 0}, + {0, DC_4, 0}, + {0, DC_5, 0}, + {0, DC_6, 0}, + {0, DC_7, 0}, + {0, DC_8, 0}, + {0, DC_9, 0}, + {0, DC_10, 0}, + {0, DC_11, 0}, + {0, DC_12, 0}, + {0, DC_13, 0}, +{-1, DC_RESV, CPUFREQ_TABLE_END}, +}; +EXPORT_SYMBOL_GPL(sw64_clockmod_table); + +static struct clk cpu_clk = { + .name = "cpu_clk", + .flags = CLK_ALWAYS_ENABLED | CLK_RATE_PROPAGATES, + .rate = 2400000000, +}; + +struct clk *sw64_clk_get(struct device *dev, const char *id) +{ + return &cpu_clk; +} +EXPORT_SYMBOL(sw64_clk_get); + +unsigned long sw64_clk_get_rate(struct clk *clk) +{ + if (!clk) + return 0; + + return (unsigned long)clk->rate; +} +EXPORT_SYMBOL(sw64_clk_get_rate); + +void sw64_store_policy(struct cpufreq_policy *policy) +{ + memcpy(curruent_policy, policy->governor->name, CPUFREQ_NAME_LEN); +} +EXPORT_SYMBOL_GPL(sw64_store_policy); + +int sw64_set_rate(int index, unsigned long rate) +{ + unsigned int i, val; + + rate /= 1000000; + + for (i = 0; i < sizeof(cpu_freq)/sizeof(int); i++) { + if (rate == cpu_freq[i]) { + index = i; + break; + } + } + + if (index < 0) + return -EINVAL; + + sw64_io_write(0, CLK_CTL, CORE_CLK2_R | CORE_CLK2_V | CLK_PRT); + sw64_io_write(1, CLK_CTL, CORE_CLK2_R | CORE_CLK2_V | CLK_PRT); + val = sw64_io_read(0, CLK_CTL); + + sw64_io_write(0, CLK_CTL, val | index << CORE_PLL2_CFG_SHIFT); + sw64_io_write(1, CLK_CTL, val | index << CORE_PLL2_CFG_SHIFT); + + udelay(1); + + sw64_io_write(0, CLK_CTL, CORE_CLK2_V | CLK_PRT + | index << CORE_PLL2_CFG_SHIFT); + sw64_io_write(1, CLK_CTL, CORE_CLK2_V | CLK_PRT + | index << CORE_PLL2_CFG_SHIFT); + val = sw64_io_read(0, CLK_CTL); + + /* LV1 select PLL1/PLL2 */ + sw64_io_write(0, CLU_LV1_SEL, CLK_LV1_SEL_MUXA | CLK_LV1_SEL_PRT); + sw64_io_write(1, CLU_LV1_SEL, CLK_LV1_SEL_MUXA | CLK_LV1_SEL_PRT); + + /* Set CLK_CTL PLL0 */ + sw64_io_write(0, CLK_CTL, val | CORE_CLK0_R | CORE_CLK0_V); + sw64_io_write(1, CLK_CTL, val | CORE_CLK0_R | CORE_CLK0_V); + + sw64_io_write(0, CLK_CTL, val | CORE_CLK0_R | CORE_CLK0_V + | index << CORE_PLL0_CFG_SHIFT); + sw64_io_write(1, CLK_CTL, val | CORE_CLK0_R | CORE_CLK0_V + | index << CORE_PLL0_CFG_SHIFT); + + udelay(1); + + sw64_io_write(0, CLK_CTL, val | CORE_CLK0_V + | index << CORE_PLL0_CFG_SHIFT); + sw64_io_write(1, CLK_CTL, val | CORE_CLK0_V + | index << CORE_PLL0_CFG_SHIFT); + + /* LV1 select PLL0/PLL1 */ + sw64_io_write(0, CLU_LV1_SEL, CLK_LV1_SEL_MUXB | CLK_LV1_SEL_PRT); + sw64_io_write(1, CLU_LV1_SEL, CLK_LV1_SEL_MUXB | CLK_LV1_SEL_PRT); + + return index; +} +EXPORT_SYMBOL_GPL(sw64_set_rate); diff --git a/arch/sw_64/kernel/core.c b/arch/sw_64/kernel/core.c index 4a35c1dc1e1934b65496e0030e7bb8cb3e095233..e26b3a5faab2fe5252d8189cd8582098d3e74d44 100644 --- a/arch/sw_64/kernel/core.c +++ b/arch/sw_64/kernel/core.c @@ -1,27 +1,9 @@ // SPDX-License-Identifier: GPL-2.0 #include -#include -#include -#include -#include #include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#ifdef CONFIG_NUMA -#include -#endif -#include "pci_impl.h" -#ifdef CONFIG_NUMA #ifdef CONFIG_DISCONTIGMEM +#ifdef CONFIG_NUMA int pa_to_nid(unsigned long pa) { int i = 0; @@ -43,30 +25,11 @@ int pa_to_nid(unsigned long pa) return 0; } EXPORT_SYMBOL(pa_to_nid); -#endif /* CONFIG_DISCONTIGMEM */ - -#ifndef CONFIG_USE_PERCPU_NUMA_NODE_ID -extern int cpu_to_node_map[NR_CPUS]; -int cpuid_to_nid(int cpuid) -{ - return cpu_to_node_map[cpuid]; -} -EXPORT_SYMBOL(cpuid_to_nid); -#endif /* CONFIG_USE_PERCPU_NUMA_NODE_ID */ #else /* !CONFIG_NUMA */ -#ifdef CONFIG_DISCONTIGMEM int pa_to_nid(unsigned long pa) { return 0; } EXPORT_SYMBOL(pa_to_nid); -#endif /* CONFIG_DISCONTIGMEM */ - -#ifndef CONFIG_USE_PERCPU_NUMA_NODE_ID -int cpuid_to_nid(int cpuid) -{ - return 0; -} -EXPORT_SYMBOL(cpuid_to_nid); -#endif /* CONFIG_USE_PERCPU_NUMA_NODE_ID */ #endif /* CONFIG_NUMA */ +#endif /* CONFIG_DISCONTIGMEM */ diff --git a/arch/sw_64/kernel/cpuautoplug.c b/arch/sw_64/kernel/cpuautoplug.c new file mode 100644 index 0000000000000000000000000000000000000000..de6f77086185abdc164ffc7cdac79fa89f5f28ef --- /dev/null +++ b/arch/sw_64/kernel/cpuautoplug.c @@ -0,0 +1,496 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * linux/arch/sw/kernel/setup.c + * + * Copyright (C) 1995 Linus Torvalds + */ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +int autoplug_enabled; +int autoplug_verbose; +int autoplug_adjusting; + +DEFINE_PER_CPU(int, cpu_adjusting); + +struct cpu_autoplug_info { + cputime64_t prev_idle; + cputime64_t prev_wall; + struct delayed_work work; + unsigned int sampling_rate; + int maxcpus; /* max cpus for autoplug */ + int mincpus; /* min cpus for autoplug */ + int dec_reqs; /* continuous core-decreasing requests */ + int inc_reqs; /* continuous core-increasing requests */ +}; + +struct cpu_autoplug_info ap_info; + +static ssize_t enabled_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", autoplug_enabled); +} + + +static ssize_t enabled_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + char val[5]; + int n; + + memcpy(val, buf, count); + n = kstrtol(val, 0, 0); + + if (n > 1 || n < 0) + return -EINVAL; + + autoplug_enabled = n; + + return count; +} + +static ssize_t verbose_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", autoplug_verbose); +} + +static ssize_t verbose_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + char val[5]; + int n; + + memcpy(val, buf, count); + n = kstrtol(val, 0, 0); + + if (n > 1 || n < 0) + return -EINVAL; + + autoplug_verbose = n; + + return count; +} + +static ssize_t maxcpus_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", ap_info.maxcpus); +} + +static ssize_t maxcpus_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + char val[5]; + int n; + + memcpy(val, buf, count); + n = kstrtol(val, 0, 0); + + if (n > num_possible_cpus() || n < ap_info.mincpus) + return -EINVAL; + + ap_info.maxcpus = n; + + return count; +} + +static ssize_t mincpus_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", ap_info.mincpus); +} + +static ssize_t mincpus_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + char val[5]; + int n; + + memcpy(val, buf, count); + n = kstrtol(val, 0, 0); + + if (n > ap_info.maxcpus || n < 1) + return -EINVAL; + + ap_info.mincpus = n; + + return count; +} + +static ssize_t sampling_rate_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", ap_info.sampling_rate); +} + +#define SAMPLING_RATE_MAX 1000 +#define SAMPLING_RATE_MIN 600 + +static ssize_t sampling_rate_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + char val[6]; + int n; + + memcpy(val, buf, count); + n = kstrtol(val, 0, 0); + + if (n > SAMPLING_RATE_MAX || n < SAMPLING_RATE_MIN) + return -EINVAL; + + ap_info.sampling_rate = n; + + return count; +} + +static ssize_t available_value_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "enabled: 0-1\nverbose: 0-1\nmaxcpus:" + "1-%d\nmincpus: 1-%d\nsampling_rate: %d-%d\n", + num_possible_cpus(), num_possible_cpus(), + SAMPLING_RATE_MIN, SAMPLING_RATE_MAX); +} + +static DEVICE_ATTR_RW(enabled); +static DEVICE_ATTR_RW(verbose); +static DEVICE_ATTR_RW(maxcpus); +static DEVICE_ATTR_RW(mincpus); +static DEVICE_ATTR_RW(sampling_rate); +static DEVICE_ATTR(available_value, 0644, available_value_show, NULL); + +static struct attribute *cpuclass_default_attrs[] = { + &dev_attr_enabled.attr, + &dev_attr_verbose.attr, + &dev_attr_maxcpus.attr, + &dev_attr_mincpus.attr, + &dev_attr_sampling_rate.attr, + &dev_attr_available_value.attr, + NULL +}; + +static struct attribute_group cpuclass_attr_group = { + .attrs = cpuclass_default_attrs, + .name = "cpuautoplug", +}; + +#ifndef MODULE +static int __init setup_autoplug(char *str) +{ + if (!strcmp(str, "off")) + autoplug_enabled = 0; + else if (!strcmp(str, "on")) + autoplug_enabled = 1; + else + return 0; + return 1; +} + +__setup("autoplug=", setup_autoplug); +#endif + +static cputime64_t calc_busy_time(unsigned int cpu) +{ + cputime64_t busy_time; + + busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE]; + busy_time += 1; + + return busy_time; +} + +static inline cputime64_t get_idle_time_jiffy(cputime64_t *wall) +{ + unsigned int cpu; + cputime64_t idle_time = 0; + cputime64_t cur_wall_time; + cputime64_t busy_time; + + cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); + + for_each_online_cpu(cpu) { + busy_time = calc_busy_time(cpu); + + idle_time += cur_wall_time - busy_time; + } + + if (wall) + *wall = (cputime64_t)jiffies_to_usecs(cur_wall_time); + + return (cputime64_t)jiffies_to_usecs(idle_time); +} + +static inline cputime64_t sw64_get_idle_time(cputime64_t *wall) +{ + unsigned int cpu; + u64 idle_time = 0; + + for_each_online_cpu(cpu) { + idle_time += get_cpu_idle_time_us(cpu, wall); + if (idle_time == -1ULL) + return get_idle_time_jiffy(wall); + } + + return idle_time; +} + +static cputime64_t get_min_busy_time(cputime64_t arr[], int size) +{ + int loop, min_idx; + cputime64_t min_time = arr[0]; + + for (loop = 1; loop < size; loop++) { + if (arr[loop] > 0) { + if (arr[loop] < min_time) { + min_time = arr[loop]; + min_idx = loop; + } + } + } + return min_idx; +} + +static int find_min_busy_cpu(void) +{ + int nr_all_cpus = num_possible_cpus(); + unsigned int cpus, target_cpu; + cputime64_t busy_time; + cputime64_t b_time[nr_all_cpus]; + + memset(b_time, 0, sizeof(b_time)); + for_each_online_cpu(cpus) { + busy_time = calc_busy_time(cpus); + b_time[cpus] = busy_time; + } + target_cpu = get_min_busy_time(b_time, nr_all_cpus); + pr_info("The target_cpu is %d, the cpu_num is %d\n", + target_cpu, num_online_cpus() - 1); + return target_cpu; +} + +static void increase_cores(int cur_cpus) +{ + if (cur_cpus == ap_info.maxcpus) + return; + + cur_cpus = cpumask_next_zero(0, cpu_online_mask); + + struct device *dev = get_cpu_device(cur_cpus); + + per_cpu(cpu_adjusting, dev->id) = 1; + lock_device_hotplug(); + cpu_device_up(dev); + pr_info("The target_cpu is %d, After cpu_up, the cpu_num is %d\n", + dev->id, num_online_cpus()); + get_cpu_device(dev->id)->offline = false; + unlock_device_hotplug(); + per_cpu(cpu_adjusting, dev->id) = 0; +} + +static void decrease_cores(int cur_cpus) +{ + if (cur_cpus == ap_info.mincpus) + return; + + cur_cpus = find_min_busy_cpu(); + + struct device *dev = get_cpu_device(cur_cpus); + + if (dev->id > 0) { + per_cpu(cpu_adjusting, dev->id) = -1; + lock_device_hotplug(); + cpu_device_down(dev); + get_cpu_device(dev->id)->offline = true; + unlock_device_hotplug(); + per_cpu(cpu_adjusting, dev->id) = 0; + } +} + +#define INC_THRESHOLD 80 +#define DEC_THRESHOLD 40 + +static void do_autoplug_timer(struct work_struct *work) +{ + cputime64_t cur_wall_time = 0, cur_idle_time; + unsigned long idle_time, wall_time; + int delay, load; + int nr_cur_cpus = num_online_cpus(); + int nr_all_cpus = num_possible_cpus(); + int inc_req = 1, dec_req = 2; + + ap_info.maxcpus = + setup_max_cpus > nr_cpu_ids ? nr_cpu_ids : setup_max_cpus; + ap_info.mincpus = ap_info.maxcpus / 4; + + if (strcmp(curruent_policy, "performance") == 0) { + ap_info.mincpus = ap_info.maxcpus; + } else if (strcmp(curruent_policy, "powersave") == 0) { + ap_info.maxcpus = ap_info.mincpus; + } else if (strcmp(curruent_policy, "ondemand") == 0) { + ap_info.sampling_rate = 500; + inc_req = 0; + dec_req = 2; + } else if (strcmp(curruent_policy, "conservative") == 0) { + inc_req = 1; + dec_req = 3; + ap_info.sampling_rate = 1000; /* 1s */ + } + + BUG_ON(smp_processor_id() != 0); + delay = msecs_to_jiffies(ap_info.sampling_rate); + if (!autoplug_enabled || system_state != SYSTEM_RUNNING) + goto out; + + autoplug_adjusting = 1; + + if (nr_cur_cpus > ap_info.maxcpus) { + decrease_cores(nr_cur_cpus); + autoplug_adjusting = 0; + goto out; + } + if (nr_cur_cpus < ap_info.mincpus) { + increase_cores(nr_cur_cpus); + autoplug_adjusting = 0; + goto out; + } + + cur_idle_time = sw64_get_idle_time(&cur_wall_time); + if (cur_wall_time == 0) + cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); + + wall_time = (unsigned int)(cur_wall_time - ap_info.prev_wall); + ap_info.prev_wall = cur_wall_time; + + idle_time = (unsigned int)(cur_idle_time - ap_info.prev_idle); + idle_time += wall_time * (nr_all_cpus - nr_cur_cpus); + ap_info.prev_wall = cur_idle_time; + + if (unlikely(!wall_time || wall_time * nr_all_cpus < idle_time)) { + autoplug_adjusting = 0; + goto out; + } + + load = 100 * (wall_time * nr_all_cpus - idle_time) / wall_time; + + if (load < (nr_cur_cpus - 1) * 100 - DEC_THRESHOLD) { + ap_info.inc_reqs = 0; + if (ap_info.dec_reqs < dec_req) + ap_info.dec_reqs++; + else { + ap_info.dec_reqs = 0; + decrease_cores(nr_cur_cpus); + } + } else { + ap_info.dec_reqs = 0; + if (load > (nr_cur_cpus - 1) * 100 + INC_THRESHOLD) { + if (ap_info.inc_reqs < inc_req) + ap_info.inc_reqs++; + else { + ap_info.inc_reqs = 0; + increase_cores(nr_cur_cpus); + } + } + } + + autoplug_adjusting = 0; +out: + schedule_delayed_work_on(0, &ap_info.work, delay); +} + +static struct platform_device_id platform_device_ids[] = { + { + .name = "sw64_cpuautoplug", + }, + {} +}; + +MODULE_DEVICE_TABLE(platform, platform_device_ids); + +static struct platform_driver platform_driver = { + .driver = { + .name = "sw64_cpuautoplug", + .owner = THIS_MODULE, + }, + .id_table = platform_device_ids, +}; + +static int __init cpuautoplug_init(void) +{ + int i, ret, delay; + + ret = sysfs_create_group(&cpu_subsys.dev_root->kobj, + &cpuclass_attr_group); + if (ret) + return ret; + + ret = platform_driver_register(&platform_driver); + if (ret) + return ret; + + pr_info("cpuautoplug: SW64 CPU autoplug driver.\n"); + + ap_info.maxcpus = + setup_max_cpus > nr_cpu_ids ? nr_cpu_ids : setup_max_cpus; + ap_info.mincpus = 16; + ap_info.dec_reqs = 0; + ap_info.inc_reqs = 0; + ap_info.sampling_rate = 720; /* 720ms */ + if (setup_max_cpus == 0) { /* boot with npsmp */ + ap_info.maxcpus = 1; + autoplug_enabled = 0; + } + if (setup_max_cpus > num_possible_cpus()) + ap_info.maxcpus = num_possible_cpus(); + + pr_info("mincpu = %d, maxcpu = %d, autoplug_enabled = %d, rate = %d\n", + ap_info.mincpus, ap_info.maxcpus, autoplug_enabled, + ap_info.sampling_rate); + + for_each_possible_cpu(i) + per_cpu(cpu_adjusting, i) = 0; +#ifndef MODULE + delay = msecs_to_jiffies(ap_info.sampling_rate * 24); +#else + delay = msecs_to_jiffies(ap_info.sampling_rate * 8); +#endif + INIT_DEFERRABLE_WORK(&ap_info.work, do_autoplug_timer); + schedule_delayed_work_on(0, &ap_info.work, delay); + + if (!autoplug_enabled) + cancel_delayed_work_sync(&ap_info.work); + + return ret; +} + +static void __exit cpuautoplug_exit(void) +{ + cancel_delayed_work_sync(&ap_info.work); + platform_driver_unregister(&platform_driver); + sysfs_remove_group(&cpu_subsys.dev_root->kobj, &cpuclass_attr_group); +} + +late_initcall(cpuautoplug_init); +module_exit(cpuautoplug_exit); + +MODULE_DESCRIPTION("cpuautoplug driver for SW64"); diff --git a/arch/sw_64/kernel/crash_dump.c b/arch/sw_64/kernel/crash_dump.c index f3836afe3e25528267a14bcf1424f01f887ac9d6..4484673823b8e6065d9efb5f2299a21df67d421a 100644 --- a/arch/sw_64/kernel/crash_dump.c +++ b/arch/sw_64/kernel/crash_dump.c @@ -14,8 +14,6 @@ * published by the Free Software Foundation. */ -#include -#include #include #include diff --git a/arch/sw_64/kernel/dup_print.c b/arch/sw_64/kernel/dup_print.c index ac0a95d4d30ba3f8b955936bd19c2ab720d8bba7..1aa7710b5092b78a51e82c5268257ba3d8647994 100644 --- a/arch/sw_64/kernel/dup_print.c +++ b/arch/sw_64/kernel/dup_print.c @@ -1,11 +1,9 @@ // SPDX-License-Identifier: GPL-2.0 -#include #include -#include #include -#include #include -#include + +#include #ifdef CONFIG_SW64_RRK @@ -20,7 +18,7 @@ unsigned long sw64_printk_offset; * For output the kernel message on the console * with full-system emulator. */ -#define QEMU_PRINTF_BUFF_BASE (0x805000040000ULL | PAGE_OFFSET) +#define QEMU_PRINTF_BUFF_BASE (IO_BASE | MCU_BASE | 0x40000UL | PAGE_OFFSET) int sw64_printk(const char *fmt, va_list args) { @@ -39,7 +37,7 @@ int sw64_printk(const char *fmt, va_list args) printed_len += vscnprintf(sw64_printk_buf, 1024, fmt, args); } else { printed_len += vscnprintf(sw64_printk_buf, 1024, fmt, args); - if (is_guest_or_emul()) { + if (is_in_emul()) { unsigned long write_addr = QEMU_PRINTF_BUFF_BASE; *(unsigned long *)write_addr = (unsigned long)((((unsigned long)sw64_printk_buf) & 0xffffffffUL) | ((unsigned long)printed_len << 32)); diff --git a/arch/sw_64/kernel/early_printk.c b/arch/sw_64/kernel/early_printk.c index f4d5f2d5c876e6b2f08d0e0c745af100ab3cbbb4..62902175217a6f8f74cad4cdbb3156039009c341 100644 --- a/arch/sw_64/kernel/early_printk.c +++ b/arch/sw_64/kernel/early_printk.c @@ -1,9 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 #include #include -#include -#include -#include + #include static unsigned long early_serial_base; /* ttyS0 */ diff --git a/arch/sw_64/kernel/entry.S b/arch/sw_64/kernel/entry.S index 753eb31a76c6b648b1476f60633d467b13b7a4c5..6c40d2015439460ae0fb98449521722452afdcc9 100644 --- a/arch/sw_64/kernel/entry.S +++ b/arch/sw_64/kernel/entry.S @@ -22,7 +22,7 @@ */ #define SAVE_ALL \ - subl $sp, PT_REGS_PS, $sp; \ + ldi $sp, -PT_REGS_PS($sp); \ stl $0, PT_REGS_R0($sp); \ stl $1, PT_REGS_R1($sp); \ stl $2, PT_REGS_R2($sp); \ @@ -66,7 +66,7 @@ ldl $26, PT_REGS_R26($sp); \ ldl $27, PT_REGS_R27($sp); \ ldl $28, PT_REGS_R28($sp); \ - addl $sp, PT_REGS_PS, $sp + ldi $sp, PT_REGS_PS($sp) /* * Non-syscall kernel entry points. @@ -659,6 +659,7 @@ sw64_\name: fork_like fork fork_like vfork fork_like clone +fork_like clone3 .align 4 .globl sys_sigreturn diff --git a/arch/sw_64/kernel/ftrace.c b/arch/sw_64/kernel/ftrace.c index 413562b5d9be939a480a3eb092aa451136fea662..42efca28d3864c84e625e38c803b88499673c3d4 100644 --- a/arch/sw_64/kernel/ftrace.c +++ b/arch/sw_64/kernel/ftrace.c @@ -10,13 +10,8 @@ */ #include -#include -#include -#include -#include #include -#include #ifdef CONFIG_FUNCTION_TRACER EXPORT_SYMBOL(_mcount); diff --git a/arch/sw_64/kernel/insn.c b/arch/sw_64/kernel/insn.c index 71d3832d1fe325b88d204757686ba796638d55b4..e8dd41b6b7c420a056c181bcc4e6a6ff8385cbaf 100644 --- a/arch/sw_64/kernel/insn.c +++ b/arch/sw_64/kernel/insn.c @@ -14,22 +14,9 @@ * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ -#include -#include -#include -#include -#include -#include #include -#include -#include -#include #include -#include -#include - - //static DEFINE_RAW_SPINLOCK(patch_lock); int __kprobes sw64_insn_read(void *addr, u32 *insnp) diff --git a/arch/sw_64/kernel/irq.c b/arch/sw_64/kernel/irq.c index 6cd26af15b230027f4bd9ea78a7c9ee4c8afc6d9..126fe2f70495e10c9cc313dc2cdecb0e6b65516d 100644 --- a/arch/sw_64/kernel/irq.c +++ b/arch/sw_64/kernel/irq.c @@ -12,24 +12,9 @@ */ #include -#include -#include -#include -#include -#include -#include #include -#include #include -#include #include -#include -#include -#include - -#include -#include -#include volatile unsigned long irq_err_count; DEFINE_PER_CPU(unsigned long, irq_pmi_count); diff --git a/arch/sw_64/kernel/irq_sw64.c b/arch/sw_64/kernel/irq_sw64.c index 376e8397ba3578453576c2dfee7e1813f1966465..8ab845d153eb15ddf1978e069680757d2cdd4136 100644 --- a/arch/sw_64/kernel/irq_sw64.c +++ b/arch/sw_64/kernel/irq_sw64.c @@ -3,18 +3,11 @@ * SW64 specific irq code. */ -#include #include -#include -#include #include -#include -#include + #include #include -#include -#include -#include asmlinkage void do_entInt(unsigned long type, unsigned long vector, diff --git a/arch/sw_64/kernel/jump_label.c b/arch/sw_64/kernel/jump_label.c index a67d16eb3076f9fa50b2dfdc5cd89b6218feaa37..f3bc40370e4de9b77889343338b509d6bdcad8c6 100644 --- a/arch/sw_64/kernel/jump_label.c +++ b/arch/sw_64/kernel/jump_label.c @@ -1,8 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 -#include #include -#include + #include #include diff --git a/arch/sw_64/kernel/kgdb.c b/arch/sw_64/kernel/kgdb.c index c1100ef8fcdd8df15ab2f7ab70cca8c015bbb601..491f287eede9b6bc02370ce07ceaaddedd3b968c 100644 --- a/arch/sw_64/kernel/kgdb.c +++ b/arch/sw_64/kernel/kgdb.c @@ -20,11 +20,8 @@ * along with this program. If not, see . */ -#include #include #include -#include -#include struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = { { "r0", 8, offsetof(struct pt_regs, r0)}, @@ -142,12 +139,12 @@ void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc) pr_info("AFTER SET PC IS %lx\n", instruction_pointer(regs)); } -static void kgdb_call_nmi_hook(void *ignored) +void kgdb_call_nmi_hook(void *ignored) { kgdb_nmicallback(raw_smp_processor_id(), NULL); } -void kgdb_roundup_cpus(unsigned long flags) +void kgdb_roundup_cpus(void) { local_irq_enable(); smp_call_function(kgdb_call_nmi_hook, NULL, 0); @@ -231,6 +228,6 @@ void kgdb_arch_exit(void) * sw64 instructions are always in LE. * Break instruction is encoded in LE format */ -struct kgdb_arch arch_kgdb_ops = { +const struct kgdb_arch arch_kgdb_ops = { .gdb_bpt_instr = {0x80, 00, 00, 00} }; diff --git a/arch/sw_64/kernel/kprobes/decode-insn.c b/arch/sw_64/kernel/kprobes/decode-insn.c index e3ab856d60840f096371f1267ebc901d3ca6ed99..d376a7e2bee41a167270f4dd54b1c0a4755d462d 100644 --- a/arch/sw_64/kernel/kprobes/decode-insn.c +++ b/arch/sw_64/kernel/kprobes/decode-insn.c @@ -12,12 +12,8 @@ * General Public License for more details. */ -#include #include -#include -#include -#include -#include + #include "common.h" static bool __kprobes sw64_insn_is_steppable(u32 insn) diff --git a/arch/sw_64/kernel/kprobes/kprobes.c b/arch/sw_64/kernel/kprobes/kprobes.c index 85400f96f9916d5a3bb65f4cd052686c80cc3216..59f040eaa3e17f0f30c26e84f86332363c4a6e58 100644 --- a/arch/sw_64/kernel/kprobes/kprobes.c +++ b/arch/sw_64/kernel/kprobes/kprobes.c @@ -5,13 +5,9 @@ */ #include -#include -#include #include #include -#include -#include #include "common.h" static u32 breakpoint_insn = BREAK_KPROBE; diff --git a/arch/sw_64/kernel/kvm_cma.c b/arch/sw_64/kernel/kvm_cma.c index dc61e2e369e8e91815fce34007979a120969f75d..054dec95b996c0c622501b6244c94a7ebff86142 100644 --- a/arch/sw_64/kernel/kvm_cma.c +++ b/arch/sw_64/kernel/kvm_cma.c @@ -10,12 +10,8 @@ #include #include -#include #include #include -#include -#include -#include #include #include diff --git a/arch/sw_64/kernel/machine_kexec.c b/arch/sw_64/kernel/machine_kexec.c index c778bc1374afb46e05bfabeae3040efd2f408168..c9ca7a728bd458f323575ceae67657b379d6d925 100644 --- a/arch/sw_64/kernel/machine_kexec.c +++ b/arch/sw_64/kernel/machine_kexec.c @@ -5,18 +5,13 @@ * This source code is licensed under the GNU General Public License, * Version 2. See the file COPYING for more details. */ -#include #include #include -#include #include #include #include + #include -#include -#include -#include -#include extern void *kexec_control_page; extern const unsigned char relocate_new_kernel[]; diff --git a/arch/sw_64/kernel/module.c b/arch/sw_64/kernel/module.c index c75d8a2e43090ff0874907c18f33dda2b3a1c759..2904bb750eb5d0b272e7017bb580bdfdb11c4033 100644 --- a/arch/sw_64/kernel/module.c +++ b/arch/sw_64/kernel/module.c @@ -1,17 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 #include -#include -#include -#include -#include -#include #include -#if 0 -#define DEBUGP printk -#else #define DEBUGP(fmt...) -#endif /* Allocate the GOT at the end of the core sections. */ diff --git a/arch/sw_64/kernel/msi.c b/arch/sw_64/kernel/msi.c index 644e4010af8a15f69f294b4078325d9eaa82e396..ee1bda3c644741915f6a6cc376d8cf0fd0f41f38 100644 --- a/arch/sw_64/kernel/msi.c +++ b/arch/sw_64/kernel/msi.c @@ -1,14 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 #include #include -#include #include -#include -#include -#include -#include -#include - int msi_compose_msg(unsigned int irq, struct msi_msg *msg) { @@ -22,26 +15,8 @@ void sw64_irq_noop(struct irq_data *d) { } -void destroy_irq(unsigned int irq) -{ -#if 0 - int pos; - - irq_init_desc(irq); - - if (irq < RC1_FIRST_MSI_VECTOR) { - pos = irq - RC0_FIRST_MSI_VECTOR; - clear_bit(pos, msi0_irq_in_use); - } else { - pos = irq - RC1_FIRST_MSI_VECTOR; - clear_bit(pos, msi1_irq_in_use); - } -#endif -} - void arch_teardown_msi_irq(unsigned int irq) { - destroy_irq(irq); } static int __init msi_init(void) diff --git a/arch/sw_64/kernel/pci-noop.c b/arch/sw_64/kernel/pci-noop.c index 4ef694e629e8512321b28ba755667055c5f494ce..a0aa2e5bb675d2c181d6711a3973778f61cbf5d7 100644 --- a/arch/sw_64/kernel/pci-noop.c +++ b/arch/sw_64/kernel/pci-noop.c @@ -6,16 +6,8 @@ */ #include -#include #include -#include -#include -#include -#include -#include #include -#include -#include /* * The PCI controller list. diff --git a/arch/sw_64/kernel/pci-sysfs.c b/arch/sw_64/kernel/pci-sysfs.c index 584243922df99e151ac74578f445409ebc93f41d..504fd4a0075491ef16e4b6ccd2a939f526098e57 100644 --- a/arch/sw_64/kernel/pci-sysfs.c +++ b/arch/sw_64/kernel/pci-sysfs.c @@ -10,9 +10,6 @@ * drivers/pci/pci-sysfs.c */ -#include -#include -#include #include static int hose_mmap_page_range(struct pci_controller *hose, diff --git a/arch/sw_64/kernel/pci.c b/arch/sw_64/kernel/pci.c index 36616d31f32fb28274db34b11a9d2abcb6eca493..44264e3da18fff12a9b8230fd23b9822b2d801cf 100644 --- a/arch/sw_64/kernel/pci.c +++ b/arch/sw_64/kernel/pci.c @@ -1,38 +1,16 @@ // SPDX-License-Identifier: GPL-2.0 -/* - * linux/arch/sw_64/kernel/pci.c - * Modified by Suweiqiang 2013-9-30 - */ - -#include #include +#include #include -#include -#include #include -#include -#include -#include -#include -#include -#include -#include #include -#include + #include -#include #include "pci_impl.h" unsigned long rc_linkup; -/* Indicate whether we respect the PCI setup left by console. */ -/* - * Make this long-lived so that we know when shutting down - * whether we probed only or not. - */ -int pci_probe_only; - /* * raw_pci_read/write - Platform-specific PCI config space access. */ @@ -58,12 +36,12 @@ int raw_pci_write(unsigned int domain, unsigned int bus, unsigned int devfn, return -EINVAL; } +#ifdef CONFIG_ACPI struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root) { - struct pci_bus *bus; - - return bus; + return NULL; } +#endif /* * The PCI controller list. @@ -86,6 +64,14 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82378, quirk_i #define MB (1024*KB) #define GB (1024*MB) +resource_size_t pcibios_default_alignment(void) +{ + if (is_in_guest()) + return PAGE_SIZE; + else + return 0; +} + resource_size_t pcibios_align_resource(void *data, const struct resource *res, resource_size_t size, resource_size_t align) { @@ -106,7 +92,7 @@ resource_size_t pcibios_align_resource(void *data, const struct resource *res, } else if (res->flags & IORESOURCE_MEM) { /* Make sure we start at our min on all hoses */ if (start - hose->mem_space->start < PCIBIOS_MIN_MEM) - start = PCIBIOS_MIN_MEM + hose->mem_space->start; //0xc0000000- 0xffffffff + start = PCIBIOS_MIN_MEM + hose->mem_space->start; /* * The following holds at least for the Low Cost * Sw_64 implementation of the PCI interface: @@ -153,7 +139,6 @@ pcibios_init(void) sw64_init_pci(); return 0; } - subsys_initcall(pcibios_init); char *pcibios_setup(char *str) @@ -164,20 +149,13 @@ char *pcibios_setup(char *str) void pcibios_fixup_bus(struct pci_bus *bus) { /* Propagate hose info into the subordinate devices. */ - struct pci_controller *hose = bus->sysdata; struct pci_dev *dev = bus->self; - if (!dev || bus->number == hose->first_busno) { - /* Root bus. */ - unsigned long end; - + if (!dev || bus->number == hose->first_busno) { bus->resource[0] = hose->io_space; bus->resource[1] = hose->mem_space; bus->resource[2] = hose->pre_mem_space; - } else if (pci_probe_only && - (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) { - pci_read_bridge_bases(bus); } } @@ -195,21 +173,6 @@ struct pci_dev *sw64_gendev_to_pci(struct device *dev) return NULL; } -/* - * If we set up a device for bus mastering, we need to check the latency - * timer as certain firmware forgets to set it properly. - */ -void pcibios_set_master(struct pci_dev *dev) -{ - u8 lat; - - pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat); - if (lat >= 16) - return; - pr_info("PCI: Setting latency timer of device %s to 64\n", pci_name(dev)); - pci_write_config_byte(dev, PCI_LATENCY_TIMER, 64); -} - void __init pcibios_claim_one_bus(struct pci_bus *b) { struct pci_dev *dev; @@ -223,7 +186,7 @@ void __init pcibios_claim_one_bus(struct pci_bus *b) if (r->parent || !r->start || !r->flags) continue; - if (pci_probe_only || (r->flags & IORESOURCE_PCI_FIXED)) { + if (r->flags & IORESOURCE_PCI_FIXED) { if (pci_claim_resource(dev, i) == 0) continue; @@ -270,13 +233,11 @@ void __init common_init_pci(void) hose->busn_space->start = last_bus; init_busnr = (0xff << 16) + ((last_bus + 1) << 8) + (last_bus); write_rc_conf(hose->node, hose->index, RC_PRIMARY_BUS, init_busnr); - if (is_in_host()) { - offset = hose->mem_space->start - PCI_32BIT_MEMIO; + offset = hose->mem_space->start - PCI_32BIT_MEMIO; + if (is_in_host()) hose->first_busno = last_bus + 1; - } else { - offset = hose->mem_space->start - PCI_32BIT_VT_MEMIO; + else hose->first_busno = last_bus; - } pci_add_resource_offset(&bridge->windows, hose->mem_space, offset); pci_add_resource_offset(&bridge->windows, hose->io_space, hose->io_space->start); pci_add_resource_offset(&bridge->windows, hose->pre_mem_space, 0); @@ -285,7 +246,7 @@ void __init common_init_pci(void) bridge->sysdata = hose; bridge->busnr = hose->busn_space->start; bridge->ops = &sw64_pci_ops; - bridge->swizzle_irq = sw64_swizzle; + bridge->swizzle_irq = pci_common_swizzle; bridge->map_irq = sw64_map_irq; ret = pci_scan_root_bus_bridge(bridge); @@ -613,11 +574,6 @@ int sw64_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) return sw64_chip_init->pci_init.map_irq(dev, slot, pin); } -unsigned char sw64_swizzle(struct pci_dev *dev, u8 *pinp) -{ - return PCI_SLOT(dev->devfn); -} - static void __init sw64_init_host(unsigned long node, unsigned long index) { @@ -649,6 +605,16 @@ sw64_init_host(unsigned long node, unsigned long index) } } +static void set_devint_wken(int node) +{ + unsigned long val; + + /* enable INTD wakeup */ + val = 0x80; + sw64_io_write(node, DEVINT_WKEN, val); + sw64_io_write(node, DEVINTWK_INTEN, val); +} + void __init sw64_init_arch(void) { if (IS_ENABLED(CONFIG_PCI)) { @@ -661,6 +627,7 @@ void __init sw64_init_arch(void) cpu_num = sw64_chip->get_cpu_num(); for (node = 0; node < cpu_num; node++) { + set_devint_wken(node); rc_enable = sw64_chip_init->pci_init.get_rc_enable(node); if (rc_enable == 0) { printk("PCIe is disabled on node %ld\n", node); @@ -702,11 +669,13 @@ static void __init sw64_init_intx(struct pci_controller *hose) val_node = next_node_in(node, node_online_map); else val_node = node; - irq = irq_alloc_descs_from(NR_IRQS_LEGACY, 1, val_node); + irq = irq_alloc_descs_from(NR_IRQS_LEGACY, 2, val_node); WARN_ON(irq < 0); irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_level_irq); irq_set_status_flags(irq, IRQ_LEVEL); hose->int_irq = irq; + irq_set_chip_and_handler(irq + 1, &dummy_irq_chip, handle_level_irq); + hose->service_irq = irq + 1; rcid = cpu_to_rcid(0); printk_once(KERN_INFO "INTx are directed to node %d core %d.\n", @@ -714,6 +683,9 @@ static void __init sw64_init_intx(struct pci_controller *hose) int_conf = 1UL << 62 | rcid; /* rebase all intx on the first logical cpu */ if (sw64_chip_init->pci_init.set_intx) sw64_chip_init->pci_init.set_intx(node, index, int_conf); + + write_piu_ior0(node, index, PMEINTCONFIG, PME_ENABLE_INTD_CORE0); + write_piu_ior0(node, index, AERERRINTCONFIG, AER_ENABLE_INTD_CORE0); } void __init sw64_init_irq(void) diff --git a/arch/sw_64/kernel/pci_common.c b/arch/sw_64/kernel/pci_common.c index c8c4bf08a4589afbe26772b5e8c824e632dd825e..f996baca9d935a7881a510c1aaf190a2f0975a66 100644 --- a/arch/sw_64/kernel/pci_common.c +++ b/arch/sw_64/kernel/pci_common.c @@ -3,210 +3,113 @@ * linux/arch/sw_64/kernel/pci_iommu.c */ -#include -#include #include -#include -#include #include -#include -#include #include -#include -#include #include #include -#include -#include -#include -#include - -#include "pci_impl.h" - -#define DEBUG_ALLOC 0 -#if DEBUG_ALLOC > 0 -# define DBGA(args...) printk(KERN_DEBUG args) -#else -# define DBGA(args...) -#endif -#if DEBUG_ALLOC > 1 -# define DBGA2(args...) printk(KERN_DEBUG args) -#else -# define DBGA2(args...) -#endif - -#define DEBUG_NODIRECT 0 - -#define ISA_DMA_MASK 0x00ffffff - -/* - * Map a single buffer of the indicated size for PCI DMA in streaming - * mode. The 32-bit PCI bus mastering address to use is returned. - * Once the device is given the dma address, the device owns this memory - * until either pci_unmap_single or pci_dma_sync_single is performed. - */ - -static dma_addr_t -pci_direct_map_single_1(struct pci_dev *pdev, void *cpu_addr) -{ - struct pci_controller *hose = pdev->sysdata; - unsigned long paddr; - unsigned long dma_offset; - - if (hose == NULL) { - pr_err("%s: hose does not exist!\n", __func__); - return 0; - } - - dma_offset = read_piu_ior0(hose->node, hose->index, EPDMABAR); - paddr = __pa(cpu_addr) + dma_offset; - return paddr; -} - -/* Helper for generic DMA-mapping functions. */ -static struct pci_dev *sw64_direct_gendev_to_pci(struct device *dev) -{ - if (dev && dev->bus == &pci_bus_type) - return to_pci_dev(dev); - - /* This assumes ISA bus master with dma_mask 0xffffff. */ - return NULL; -} static dma_addr_t sw64_direct_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, unsigned long attrs) { - struct pci_dev *pdev = sw64_direct_gendev_to_pci(dev); + dma_addr_t dma_addr = page_to_phys(page) + offset; - if (dir == PCI_DMA_NONE) - BUG(); + if (unlikely(swiotlb_force == SWIOTLB_FORCE)) + return swiotlb_map(dev, dma_addr, size, dir, attrs); - return pci_direct_map_single_1(pdev, (char *)page_address(page) + offset); -} + if (unlikely(!dma_capable(dev, dma_addr, size, true))) { + if (swiotlb_force != SWIOTLB_NO_FORCE) + return swiotlb_map(dev, dma_addr, size, dir, attrs); -/* - * Unmap a single streaming mode DMA translation. The DMA_ADDR and - * SIZE must match what was provided for in a previous pci_map_single - * call. All other usages are undefined. After this call, reads by - * the cpu to the buffer are guaranteed to see whatever the device - * wrote there. - */ + dev_WARN_ONCE(dev, 1, + "DMA addr %pad+%zu overflow (mask %llx, bus limit %llx).\n", + &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit); + return DMA_MAPPING_ERROR; + } -static inline void sw64_direct_unmap_page(struct device *dev, dma_addr_t dma_addr, + return dma_addr; +} + +static inline void sw64_direct_unmap_page(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir, unsigned long attrs) { + if (unlikely(is_swiotlb_buffer(addr))) + swiotlb_tbl_unmap_single(dev, addr, size, size, dir, attrs); } -/* Allocate and map kernel buffer using consistent mode DMA for PCI - * device. Returns non-NULL cpu-view pointer to the buffer if - * successful and sets *DMA_ADDRP to the pci side dma address as well, - * else DMA_ADDRP is undefined. - */ +static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size) +{ + return phys + size - 1 <= + min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit); +} static void *sw64_direct_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addrp, gfp_t gfp, unsigned long attrs) { - struct pci_dev *pdev = sw64_direct_gendev_to_pci(dev); - void *cpu_addr; - long order = get_order(size); - - gfp &= ~GFP_DMA; - -#ifdef CONFIG_ZONE_DMA - if (dev->coherent_dma_mask < DMA_BIT_MASK(32)) - gfp |= GFP_DMA; -#endif - -try_again: - cpu_addr = (void *)__get_free_pages(gfp, order); - if (!cpu_addr) { - pr_info("pci_alloc_consistent: get_free_pages failed from %ps\n", - __builtin_return_address(0)); - /* ??? Really atomic allocation? Otherwise we could play - * with vmalloc and sg if we can't find contiguous memory. - */ - return NULL; + struct page *page; + void *ret; + u64 dma_limit; + + size = PAGE_ALIGN(size); + if (attrs & DMA_ATTR_NO_WARN) + gfp |= __GFP_NOWARN; + + dma_limit = min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit); + if (dma_limit <= DMA_BIT_MASK(32)) + gfp |= GFP_DMA32; + + /* we always manually zero the memory once we are done */ + gfp &= ~__GFP_ZERO; +again: + page = alloc_pages_node(dev_to_node(dev), gfp, get_order(size)); + if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) { + dma_free_contiguous(dev, page, size); + page = NULL; + + if (IS_ENABLED(CONFIG_ZONE_DMA32) && + dma_limit < DMA_BIT_MASK(64) && + !(gfp & (GFP_DMA32 | GFP_DMA))) { + gfp |= GFP_DMA32; + goto again; + } } - memset(cpu_addr, 0, size); - *dma_addrp = pci_direct_map_single_1(pdev, cpu_addr); - if (*dma_addrp == 0) { - free_pages((unsigned long)cpu_addr, order); - if (gfp & GFP_DMA) - return NULL; - /* The address doesn't fit required mask and we - * do not have iommu. Try again with GFP_DMA. - */ - gfp |= GFP_DMA; - goto try_again; - } + if (!page) + return NULL; - DBGA2("pci_alloc_consistent: %zx -> [%p,%llx] from %ps\n", - size, cpu_addr, *dma_addrp, __builtin_return_address(0)); + ret = page_address(page); + memset(ret, 0, size); + *dma_addrp = page_to_phys(page); - return cpu_addr; + return ret; } -/* Free and unmap a consistent DMA buffer. CPU_ADDR and DMA_ADDR must - * be values that were returned from pci_alloc_consistent. SIZE must - * be the same as what as passed into pci_alloc_consistent. - * References to the memory and mappings associated with CPU_ADDR or - * DMA_ADDR past this call are illegal. - */ - static void sw64_direct_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs) { - struct pci_dev *pdev = sw64_direct_gendev_to_pci(dev); - - pci_unmap_single(pdev, dma_addr, size, PCI_DMA_BIDIRECTIONAL); - free_pages((unsigned long)cpu_addr, get_order(size)); - DBGA2("pci_free_consistent: [%llx,%zx] from %ps\n", - dma_addr, size, __builtin_return_address(0)); -} -#define SG_ENT_VIRT_ADDRESS(SG) (sg_virt((SG))) -#define SG_ENT_PHYS_ADDRESS(SG) __pa(SG_ENT_VIRT_ADDRESS(SG)) - -static dma_addr_t sw64_phys_to_dma(struct device *dev, phys_addr_t pa) -{ - unsigned long dma_offset; - struct pci_dev *pdev = sw64_gendev_to_pci(dev); - struct pci_controller *hose = pdev->sysdata; - - if (hose == NULL) { - pr_err("%s: hose does not exist!\n", __func__); - return 0; + if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) { + /* cpu_addr is a struct page cookie, not a kernel address */ + dma_free_contiguous(dev, cpu_addr, size); + return; } - dma_offset = read_piu_ior0(hose->node, hose->index, EPDMABAR); - return pa + dma_offset; + free_pages((unsigned long)cpu_addr, get_order(size)); } -static bool -check_addr(struct device *dev, dma_addr_t dma_addr, size_t size, - const char *caller) +static void sw64_direct_unmap_sg(struct device *dev, struct scatterlist *sgl, + int nents, enum dma_data_direction dir, unsigned long attrs) { - if (unlikely(dev && !dma_capable(dev, dma_addr, size, true))) { - if (!dev->dma_mask) { - dev_err(dev, - "%s: call on device without dma_mask\n", - caller); - return false; - } + struct scatterlist *sg; + int i; - if (*dev->dma_mask >= DMA_BIT_MASK(32)) { - dev_err(dev, - "%s: overflow %pad+%zu of device mask %llx\n", - caller, &dma_addr, size, *dev->dma_mask); - } - return false; - } - return true; + for_each_sg(sgl, sg, nents, i) + sw64_direct_unmap_page(dev, sg->dma_address, sg_dma_len(sg), dir, + attrs); } static int sw64_direct_map_sg(struct device *dev, struct scatterlist *sgl, @@ -216,58 +119,16 @@ static int sw64_direct_map_sg(struct device *dev, struct scatterlist *sgl, struct scatterlist *sg; for_each_sg(sgl, sg, nents, i) { - BUG_ON(!sg_page(sg)); - - sg_dma_address(sg) = sw64_phys_to_dma(dev, sg_phys(sg)); - if (!check_addr(dev, sg_dma_address(sg), sg->length, __func__)) - return 0; + sg_dma_address(sg) = sw64_direct_map_page(dev, sg_page(sg), + sg->offset, sg->length, dir, attrs); + if (sg->dma_address == DMA_MAPPING_ERROR) + goto out_unmap; sg_dma_len(sg) = sg->length; } - return nents; -} - -/* Unmap a set of streaming mode DMA translations. Again, cpu read - * rules concerning calls here are the same as for pci_unmap_single() - * above. - */ - -static inline void sw64_direct_unmap_sg(struct device *dev, struct scatterlist *sg, - int nents, enum dma_data_direction dir, - unsigned long attrs) -{ -} - -/* Return whether the given PCI device DMA address mask can be - * supported properly. - */ - -static int sw64_direct_supported(struct device *dev, u64 mask) -{ - struct pci_dev *pdev = sw64_direct_gendev_to_pci(dev); - struct pci_controller *hose; - - if ((max_low_pfn << PAGE_SHIFT) - 1 <= mask) - return 1; - - /* Check that we have a scatter-gather arena that fits. */ - hose = pdev->sysdata; - if (hose == NULL) { - pr_err("%s: hose does not exist!\n", __func__); - return 0; - } - - /* As last resort try ZONE_DMA. */ - if (MAX_DMA_ADDRESS - PAGE_OFFSET - 1 <= mask) - return 1; - - /* - * Upstream PCI/PCIe bridges or SoC interconnects may not carry - * as many DMA address bits as the device itself supports. - */ - if (dev->bus_dma_limit && mask > dev->bus_dma_limit) - return 0; +out_unmap: + sw64_direct_unmap_sg(dev, sgl, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC); return 0; } @@ -278,7 +139,7 @@ const struct dma_map_ops sw64_dma_direct_ops = { .unmap_page = sw64_direct_unmap_page, .map_sg = sw64_direct_map_sg, .unmap_sg = sw64_direct_unmap_sg, - .dma_supported = sw64_direct_supported, + .dma_supported = dma_direct_supported, }; const struct dma_map_ops *dma_ops = &sw64_dma_direct_ops; diff --git a/arch/sw_64/kernel/pci_impl.h b/arch/sw_64/kernel/pci_impl.h index 0cb6d1b1d1e3799cd9ae46dbd52f7fc6be792c63..8e541f28f4ce9a9c28ca8fc7c072243f0ef40249 100644 --- a/arch/sw_64/kernel/pci_impl.h +++ b/arch/sw_64/kernel/pci_impl.h @@ -8,60 +8,11 @@ struct pci_dev; struct pci_controller; -struct pci_iommu_arena; - -/* - * We can't just blindly use 64K for machines with EISA busses; they - * may also have PCI-PCI bridges present, and then we'd configure the - * bridge incorrectly. - * - * Also, we start at 0x8000 or 0x9000, in hopes to get all devices' - * IO space areas allocated *before* 0xC000; this is because certain - * BIOSes (Millennium for one) use PCI Config space "mechanism #2" - * accesses to probe the bus. If a device's registers appear at 0xC000, - * it may see an INx/OUTx at that address during BIOS emulation of the - * VGA BIOS, and some cards, notably Adaptec 2940UW, take mortal offense. - */ - -#define EISA_DEFAULT_IO_BASE 0x9000 /* start above 8th slot */ -#define DEFAULT_IO_BASE 0x0 /* start at 8th slot */ - -/* - * We try to make the DEFAULT_MEM_BASE addresses *always* have more than - * a single bit set. This is so that devices like the broken Myrinet card - * will always have a PCI memory address that will never match a IDSEL - * address in PCI Config space, which can cause problems with early rev cards. - */ - -#define DEFAULT_MEM_BASE 0 - -/* - * A PCI IOMMU allocation arena. There are typically two of these - * regions per bus. - * ??? The 8400 has a 32-byte pte entry, and the entire table apparently - * lives directly on the host bridge (no tlb?). We don't support this - * machine, but if we ever did, we'd need to parameterize all this quite - * a bit further. Probably with per-bus operation tables. - */ - -struct pci_iommu_arena { - spinlock_t lock; - struct pci_controller *hose; -#define IOMMU_INVALID_PTE 0x2 /* 32:63 bits MBZ */ -#define IOMMU_RESERVED_PTE 0xface - unsigned long *ptes; - dma_addr_t dma_base; - unsigned int size; - unsigned int next_entry; - unsigned int align_entry; -}; - /* The hose list. */ extern struct pci_controller *hose_head, **hose_tail; extern void common_init_pci(void); -#define common_swizzle pci_common_swizzle extern struct pci_controller *alloc_pci_controller(void); extern struct resource *alloc_resource(void); diff --git a/arch/sw_64/kernel/perf_event.c b/arch/sw_64/kernel/perf_event.c index dac979d4b09aa679934062ef87cf1456df251573..d2975e17f666b743d98ed82d50848efad1401296 100644 --- a/arch/sw_64/kernel/perf_event.c +++ b/arch/sw_64/kernel/perf_event.c @@ -6,18 +6,6 @@ */ #include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include /* For tracking PMCs and the hw events they monitor on each CPU. */ struct cpu_hw_events { diff --git a/arch/sw_64/kernel/perf_regs.c b/arch/sw_64/kernel/perf_regs.c index 8eec2179eb863157aaa80f07cd94ba9a719e079d..4c12a2cdf912020c4e49df19f4c9fa99e2ffae36 100644 --- a/arch/sw_64/kernel/perf_regs.c +++ b/arch/sw_64/kernel/perf_regs.c @@ -1,11 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 #include -#include #include -#include -#include -#include u64 perf_reg_value(struct pt_regs *regs, int idx) { diff --git a/arch/sw_64/kernel/platform.c b/arch/sw_64/kernel/platform.c new file mode 100644 index 0000000000000000000000000000000000000000..f4c880acaa40da8c366675b1b31047d980e3a932 --- /dev/null +++ b/arch/sw_64/kernel/platform.c @@ -0,0 +1,20 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * linux/arch/sw/kernel/setup.c + * + * Copyright (C) 1995 Linus Torvalds + */ + +#include + +static struct platform_device sw64_cpufreq_device = { + .name = "sw64_cpufreq", + .id = -1, +}; + +static int __init sw64_cpufreq_init(void) +{ + return platform_device_register(&sw64_cpufreq_device); +} + +arch_initcall(sw64_cpufreq_init); diff --git a/arch/sw_64/kernel/process.c b/arch/sw_64/kernel/process.c index 8fd493776bec5da719152799f2b7820ae04afdf5..4192d50f5b0ebaa1bafbf940f860b0d1846343c4 100644 --- a/arch/sw_64/kernel/process.c +++ b/arch/sw_64/kernel/process.c @@ -3,42 +3,16 @@ * This file handles the architecture-dependent parts of process handling. */ -#include -#include -#include #include -#include -#include -#include -#include -#include -#include -#include #include -#include -#include -#include -#include -#include -#include #include #include -#include -#include #include -#include -#include #include -#include -#include -#include -#include #include -#include #include "proto.h" -#include "pci_impl.h" /* * Power off function, if any diff --git a/arch/sw_64/kernel/ptrace.c b/arch/sw_64/kernel/ptrace.c index 5f29c500c8b1913c9952f3d7b6077bffbe3c7dad..b06c98e9944b18cd1ef4943f528d0f5ad377dd68 100644 --- a/arch/sw_64/kernel/ptrace.c +++ b/arch/sw_64/kernel/ptrace.c @@ -5,48 +5,16 @@ /* mangled further by Bob Manson (manson@santafe.edu) */ /* more mutilation by David Mosberger (davidm@azstarnet.com) */ -#include -#include -#include -#include -#include -#include -#include -#include -#include #include -#include #include -#include -#include -#include -#include +#include + #include "proto.h" #define CREATE_TRACE_POINTS #include -#define DEBUG DBG_MEM -#undef DEBUG - -#define DEBUG 0 - -#ifdef DEBUG -enum { - DBG_MEM = (1 << 0), - DBG_BPT = (1 << 1), - DBG_MEM_ALL = (1 << 2) -}; -#define DBG(fac, args) \ -{ \ - if ((fac) & DEBUG) \ - printk args; \ -} -#else -#define DBG(fac, args) -#endif - #define BREAKINST 0x00000080 /* sys_call bpt */ /* @@ -243,15 +211,12 @@ ptrace_set_bpt(struct task_struct *child) if (displ) /* guard against unoptimized code */ task_thread_info(child)->bpt_addr[nsaved++] = pc + 4 + displ; - DBG(DBG_BPT, ("execing branch\n")); /*call ret jmp*/ } else if (op_code >= 0x1 && op_code <= 0x3) { reg_b = (insn >> 16) & 0x1f; task_thread_info(child)->bpt_addr[nsaved++] = get_reg(child, reg_b); - DBG(DBG_BPT, ("execing jump\n")); } else { task_thread_info(child)->bpt_addr[nsaved++] = pc + 4; - DBG(DBG_BPT, ("execing normal insn\n")); } /* install breakpoints: */ @@ -261,8 +226,6 @@ ptrace_set_bpt(struct task_struct *child) if (res < 0) return res; task_thread_info(child)->bpt_insn[i] = insn; - DBG(DBG_BPT, (" -> next_pc=%lx\n", - task_thread_info(child)->bpt_addr[i])); res = write_int(child, task_thread_info(child)->bpt_addr[i], BREAKINST); if (res < 0) @@ -451,7 +414,6 @@ long arch_ptrace(struct task_struct *child, long request, case PTRACE_PEEKUSR: force_successful_syscall_return(); ret = get_reg(child, addr); - DBG(DBG_MEM, ("peek $%lu->%#lx\n", addr, ret)); break; /* When I and D space are separate, this will have to be fixed. */ @@ -461,7 +423,6 @@ long arch_ptrace(struct task_struct *child, long request, break; case PTRACE_POKEUSR: /* write the specified register */ - DBG(DBG_MEM, ("poke $%lu<-%#lx\n", addr, data)); ret = put_reg(child, addr, data); break; case PTRACE_GETREGS: @@ -647,8 +608,8 @@ struct pt_regs_offset { int offset; }; -#define REG_OFFSET_NAME(reg, r) { \ - .name = #reg, \ +#define REG_OFFSET_NAME(r) { \ + .name = #r, \ .offset = offsetof(struct pt_regs, r) \ } @@ -658,37 +619,38 @@ struct pt_regs_offset { } static const struct pt_regs_offset regoffset_table[] = { - REG_OFFSET_NAME(r0, r0), - REG_OFFSET_NAME(r1, r1), - REG_OFFSET_NAME(r2, r2), - REG_OFFSET_NAME(r3, r3), - REG_OFFSET_NAME(r4, r4), - REG_OFFSET_NAME(r5, r5), - REG_OFFSET_NAME(r6, r6), - REG_OFFSET_NAME(r7, r7), - REG_OFFSET_NAME(r8, r8), - REG_OFFSET_NAME(r19, r19), - REG_OFFSET_NAME(r20, r20), - REG_OFFSET_NAME(r21, r21), - REG_OFFSET_NAME(r22, r22), - REG_OFFSET_NAME(r23, r23), - REG_OFFSET_NAME(r24, r24), - REG_OFFSET_NAME(r25, r25), - REG_OFFSET_NAME(r26, r26), - REG_OFFSET_NAME(r27, r27), - REG_OFFSET_NAME(r28, r28), - REG_OFFSET_NAME(hae, hae), - REG_OFFSET_NAME(trap_a0, trap_a0), - REG_OFFSET_NAME(trap_a1, trap_a1), - REG_OFFSET_NAME(trap_a2, trap_a2), - REG_OFFSET_NAME(ps, ps), - REG_OFFSET_NAME(pc, pc), - REG_OFFSET_NAME(gp, gp), - REG_OFFSET_NAME(r16, r16), - REG_OFFSET_NAME(r17, r17), - REG_OFFSET_NAME(r18, r18), + REG_OFFSET_NAME(r0), + REG_OFFSET_NAME(r1), + REG_OFFSET_NAME(r2), + REG_OFFSET_NAME(r3), + REG_OFFSET_NAME(r4), + REG_OFFSET_NAME(r5), + REG_OFFSET_NAME(r6), + REG_OFFSET_NAME(r7), + REG_OFFSET_NAME(r8), + REG_OFFSET_NAME(r19), + REG_OFFSET_NAME(r20), + REG_OFFSET_NAME(r21), + REG_OFFSET_NAME(r22), + REG_OFFSET_NAME(r23), + REG_OFFSET_NAME(r24), + REG_OFFSET_NAME(r25), + REG_OFFSET_NAME(r26), + REG_OFFSET_NAME(r27), + REG_OFFSET_NAME(r28), + REG_OFFSET_NAME(hae), + REG_OFFSET_NAME(trap_a0), + REG_OFFSET_NAME(trap_a1), + REG_OFFSET_NAME(trap_a2), + REG_OFFSET_NAME(ps), + REG_OFFSET_NAME(pc), + REG_OFFSET_NAME(gp), + REG_OFFSET_NAME(r16), + REG_OFFSET_NAME(r17), + REG_OFFSET_NAME(r18), REG_OFFSET_END, }; + /** * regs_query_register_offset() - query register offset from its name * @name: the name of a register @@ -705,3 +667,29 @@ int regs_query_register_offset(const char *name) return roff->offset; return -EINVAL; } + +static int regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr) +{ + unsigned long ksp = kernel_stack_pointer(regs); + + return (addr & ~(THREAD_SIZE - 1)) == (ksp & ~(THREAD_SIZE - 1)); +} + +/** + * regs_get_kernel_stack_nth() - get Nth entry of the stack + * @regs:pt_regs which contains kernel stack pointer. + * @n:stack entry number. + * + * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which + * is specifined by @regs. If the @n th entry is NOT in the kernel stack, + * this returns 0. + */ +unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n) +{ + unsigned long addr; + + addr = kernel_stack_pointer(regs) + n * sizeof(long); + if (!regs_within_kernel_stack(regs, addr)) + return 0; + return *(unsigned long *)addr; +} diff --git a/arch/sw_64/kernel/relocate.c b/arch/sw_64/kernel/relocate.c index 36b16d84d5ab38150eafe5cec4614a25bcc8b669..fe403f9c70c74f35be4fa575ac3d3e743ff5b0e5 100644 --- a/arch/sw_64/kernel/relocate.c +++ b/arch/sw_64/kernel/relocate.c @@ -9,21 +9,12 @@ * Copyright (C) 2019 He Sheng * Authors: He Sheng (hesheng05@gmail.com) */ -#include -#include -#include -#include #include -#include -#include -#include -#include -#include -#include -#include #include #include +#include + #define INITRD_ADDR 0x3000000UL #define KTEXT_MAX 0xffffffffa0000000UL #define RELOCATED(x) ((void *)((unsigned long)x + offset)) diff --git a/arch/sw_64/kernel/segvdbg.c b/arch/sw_64/kernel/segvdbg.c index aee4b38630724595e58827cd3a797429578078ed..5b8a638bf8b93f6aa217da97efb78647e2177256 100644 --- a/arch/sw_64/kernel/segvdbg.c +++ b/arch/sw_64/kernel/segvdbg.c @@ -9,9 +9,7 @@ */ #include -#include -#include -#include + #include extern bool segv_debug_enabled; diff --git a/arch/sw_64/kernel/setup.c b/arch/sw_64/kernel/setup.c index cc33a6f3b4f96f804ce972548393b4263fe2baea..ca19445ac8836cb4bcee32ac383c9ed567b217db 100644 --- a/arch/sw_64/kernel/setup.c +++ b/arch/sw_64/kernel/setup.c @@ -9,31 +9,13 @@ * Bootup setup stuff. */ -#include -#include -#include -#include -#include -#include -#include -#include #include #include #include #include -#include -#include -#include -#include -#include -#include #include -#include -#include #include #include -#include -#include #ifdef CONFIG_MAGIC_SYSRQ #include #include @@ -41,26 +23,12 @@ #ifdef CONFIG_DEBUG_FS #include #endif -#include -#include -#include #include #include -#include -#include #include #include -#include -#include + #include -#include -#include -#include -#include -#include -#include -#include -#include #include #include @@ -74,14 +42,15 @@ #define DBGDCONT(args...) #endif + DEFINE_PER_CPU(unsigned long, hard_node_id) = { 0 }; #if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) struct cma *sw64_kvm_cma; EXPORT_SYMBOL(sw64_kvm_cma); -static phys_addr_t size_cmdline; -static phys_addr_t base_cmdline; +static phys_addr_t kvm_mem_size; +static phys_addr_t kvm_mem_base; struct gen_pool *sw64_kvm_pool; EXPORT_SYMBOL(sw64_kvm_pool); @@ -133,6 +102,9 @@ static struct resource bss_resource = { struct cpuinfo_sw64 cpu_data[NR_CPUS]; EXPORT_SYMBOL(cpu_data); +DEFINE_STATIC_KEY_TRUE(run_mode_host_key); +DEFINE_STATIC_KEY_FALSE(run_mode_guest_key); +DEFINE_STATIC_KEY_FALSE(run_mode_emul_key); struct cpu_desc_t cpu_desc; struct socket_desc_t socket_desc[MAX_NUMSOCKETS]; int memmap_nr; @@ -175,7 +147,8 @@ static void __init kexec_control_page_init(void) { phys_addr_t addr; - addr = memblock_alloc_base(KEXEC_CONTROL_PAGE_SIZE, PAGE_SIZE, KTEXT_MAX); + addr = memblock_phys_alloc_range(KEXEC_CONTROL_PAGE_SIZE, PAGE_SIZE, + 0, KTEXT_MAX); kexec_control_page = (void *)(__START_KERNEL_map + addr); } @@ -356,7 +329,7 @@ static void * __init move_initrd(unsigned long mem_limit) static int __init memmap_range_valid(phys_addr_t base, phys_addr_t size) { - if (phys_to_virt(base + size - 1) < phys_to_virt(PFN_PHYS(max_low_pfn))) + if ((base + size) <= memblock_end_of_DRAM()) return true; else return false; @@ -367,6 +340,7 @@ void __init process_memmap(void) static int i; // Make it static so we won't start over again every time. int ret; phys_addr_t base, size; + unsigned long dma_end __maybe_unused = virt_to_phys((void *)MAX_DMA_ADDRESS); if (!memblock_initialized) return; @@ -378,24 +352,27 @@ void __init process_memmap(void) case memmap_reserved: if (!memmap_range_valid(base, size)) { pr_err("reserved memmap region [mem %#018llx-%#018llx] extends beyond end of memory (%#018llx)\n", - base, base + size - 1, PFN_PHYS(max_low_pfn)); + base, base + size - 1, memblock_end_of_DRAM()); } else { pr_info("reserved memmap region [mem %#018llx-%#018llx]\n", base, base + size - 1); - ret = memblock_remove(base, size); + ret = memblock_mark_nomap(base, size); if (ret) pr_err("reserve memmap region [mem %#018llx-%#018llx] failed\n", base, base + size - 1); + else if (IS_ENABLED(CONFIG_ZONE_DMA32) && (base < dma_end)) + pr_warn("memmap region [mem %#018llx-%#018llx] overlapped with DMA32 region\n", + base, base + size - 1); } break; case memmap_pci: if (!memmap_range_valid(base, size)) { pr_info("pci memmap region [mem %#018llx-%#018llx] extends beyond end of memory (%#018llx)\n", - base, base + size - 1, PFN_PHYS(max_low_pfn)); + base, base + size - 1, memblock_end_of_DRAM()); } else { pr_info("pci memmap region [mem %#018llx-%#018llx]\n", base, base + size - 1); - ret = memblock_remove(base, size); + ret = memblock_mark_nomap(base, size); if (ret) pr_err("reserve memmap region [mem %#018llx-%#018llx] failed\n", base, base + size - 1); @@ -403,10 +380,12 @@ void __init process_memmap(void) break; case memmap_initrd: if (!memmap_range_valid(base, size)) { - base = (unsigned long) move_initrd(PFN_PHYS(max_low_pfn)); + phys_addr_t old_base = base; + + base = (unsigned long) move_initrd(memblock_end_of_DRAM()); if (!base) { pr_err("initrd memmap region [mem %#018llx-%#018llx] extends beyond end of memory (%#018llx)\n", - base, base + size - 1, PFN_PHYS(max_low_pfn)); + old_base, old_base + size - 1, memblock_end_of_DRAM()); } else { memmap_map[i].addr = base; pr_info("initrd memmap region [mem %#018llx-%#018llx]\n", @@ -490,7 +469,6 @@ insert_ram_resource(u64 start, u64 end, bool reserved) static int __init request_standard_resources(void) { - int i; struct memblock_region *mblk; extern char _text[], _etext[]; @@ -498,17 +476,12 @@ static int __init request_standard_resources(void) extern char __bss_start[], __bss_stop[]; for_each_mem_region(mblk) { - insert_ram_resource(mblk->base, mblk->base + mblk->size - 1, 0); - } - - for (i = 0; i < memmap_nr; i++) { - switch (memmap_map[i].type) { - case memmap_crashkernel: - break; - default: - insert_ram_resource(memmap_map[i].addr, - memmap_map[i].addr + memmap_map[i].size - 1, 1); - } + if (!memblock_is_nomap(mblk)) + insert_ram_resource(mblk->base, + mblk->base + mblk->size - 1, 0); + else + insert_ram_resource(mblk->base, + mblk->base + mblk->size - 1, 1); } code_resource.start = __pa_symbol(_text); @@ -639,10 +612,25 @@ static void __init setup_cpu_info(void) cpu_desc.arch_rev = CPUID_ARCH_REV(val); cpu_desc.pa_bits = CPUID_PA_BITS(val); cpu_desc.va_bits = CPUID_VA_BITS(val); - cpu_desc.run_mode = HOST_MODE; - if (*(unsigned long *)MMSIZE) - cpu_desc.run_mode = GUEST_MODE; + if (*(unsigned long *)MMSIZE) { + static_branch_disable(&run_mode_host_key); + if (*(unsigned long *)MMSIZE & EMUL_FLAG) { + pr_info("run mode: emul\n"); + static_branch_disable(&run_mode_guest_key); + static_branch_enable(&run_mode_emul_key); + + } else { + pr_info("run mode: guest\n"); + static_branch_enable(&run_mode_guest_key); + static_branch_disable(&run_mode_emul_key); + } + } else { + pr_info("run mode: host\n"); + static_branch_enable(&run_mode_host_key); + static_branch_disable(&run_mode_guest_key); + static_branch_disable(&run_mode_emul_key); + } for (i = 0; i < VENDOR_ID_MAX; i++) { val = cpuid(GET_VENDOR_ID, i); @@ -729,17 +717,17 @@ static int __init early_kvm_reserved_mem(char *p) return -EINVAL; } - size_cmdline = memparse(p, &p); + kvm_mem_size = memparse(p, &p); if (*p != '@') return -EINVAL; - base_cmdline = memparse(p + 1, &p); + kvm_mem_base = memparse(p + 1, &p); return 0; } early_param("kvm_mem", early_kvm_reserved_mem); void __init sw64_kvm_reserve(void) { - kvm_cma_declare_contiguous(base_cmdline, size_cmdline, 0, + kvm_cma_declare_contiguous(kvm_mem_base, kvm_mem_size, 0, PAGE_SIZE, 0, "sw64_kvm_cma", &sw64_kvm_cma); } #endif @@ -747,6 +735,7 @@ void __init sw64_kvm_reserve(void) void __init setup_arch(char **cmdline_p) { + jump_label_init(); setup_cpu_info(); sw64_chip->fixup(); sw64_chip_init->fixup(); @@ -754,7 +743,6 @@ setup_arch(char **cmdline_p) show_socket_mem_layout(); sw64_chip_init->early_init.setup_core_start(&core_start); - jump_label_init(); setup_sched_clock(); #ifdef CONFIG_GENERIC_SCHED_CLOCK sw64_sched_clock_init(); @@ -938,6 +926,7 @@ c_start(struct seq_file *f, loff_t *pos) static void * c_next(struct seq_file *f, void *v, loff_t *pos) { + (*pos)++; return NULL; } @@ -1011,14 +1000,14 @@ static int __init sw64_kvm_pool_init(void) if (!sw64_kvm_cma) goto out; - kvm_pool_virt = (unsigned long)base_cmdline; + kvm_pool_virt = (unsigned long)kvm_mem_base; sw64_kvm_pool = gen_pool_create(PAGE_SHIFT, -1); if (!sw64_kvm_pool) goto out; - status = gen_pool_add_virt(sw64_kvm_pool, kvm_pool_virt, base_cmdline, - size_cmdline, -1); + status = gen_pool_add_virt(sw64_kvm_pool, kvm_pool_virt, kvm_mem_base, + kvm_mem_size, -1); if (status < 0) { pr_err("failed to add memory chunks to sw64 kvm pool\n"); gen_pool_destroy(sw64_kvm_pool); @@ -1027,13 +1016,14 @@ static int __init sw64_kvm_pool_init(void) } gen_pool_set_algo(sw64_kvm_pool, gen_pool_best_fit, NULL); - base_page = pfn_to_page(base_cmdline >> PAGE_SHIFT); - end_page = pfn_to_page((base_cmdline + size_cmdline) >> PAGE_SHIFT); + base_page = pfn_to_page(kvm_mem_base >> PAGE_SHIFT); + end_page = pfn_to_page((kvm_mem_base + kvm_mem_size - 1) >> PAGE_SHIFT); p = base_page; while (page_ref_count(p) == 0 && (unsigned long)p <= (unsigned long)end_page) { set_page_count(p, 1); + page_mapcount_reset(p); SetPageReserved(p); p++; } diff --git a/arch/sw_64/kernel/signal.c b/arch/sw_64/kernel/signal.c index 74e98063c874a128557012798c5b99e1abbf5c44..dd0d8ff4242085478b28177789f8fc8aa1c2b838 100644 --- a/arch/sw_64/kernel/signal.c +++ b/arch/sw_64/kernel/signal.c @@ -7,24 +7,11 @@ * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson */ -#include -#include #include #include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include #include -#include +#include -#include #include #include @@ -37,6 +24,21 @@ asmlinkage void ret_from_sys_call(void); +SYSCALL_DEFINE2(odd_sigprocmask, int, how, unsigned long, newmask) +{ + sigset_t oldmask; + sigset_t mask; + unsigned long res; + + siginitset(&mask, newmask & _BLOCKABLE); + res = sigprocmask(how, &mask, &oldmask); + if (!res) { + force_successful_syscall_return(); + res = oldmask.sig[0]; + } + return res; +} + /* * Do a signal return; undo the signal stack. */ @@ -48,7 +50,6 @@ asmlinkage void ret_from_sys_call(void); struct rt_sigframe { struct siginfo info; struct ucontext uc; - unsigned int retcode[3]; }; /* @@ -59,17 +60,12 @@ struct rt_sigframe { extern char compile_time_assert [offsetof(struct rt_sigframe, uc.uc_mcontext) == 176 ? 1 : -1]; -#define INSN_MOV_R30_R16 0x47fe0410 -#define INSN_LDI_R0 0x201f0000 -#define INSN_CALLSYS 0x00000083 - static long restore_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs) { unsigned long usp; struct switch_stack *sw = (struct switch_stack *)regs - 1; - unsigned long *ctx_fp = (unsigned long *)¤t->thread.ctx_fp; - long i, err = __get_user(regs->pc, &sc->sc_pc); + long err = __get_user(regs->pc, &sc->sc_pc); current->restart_block.fn = do_no_restart_syscall; @@ -108,8 +104,8 @@ restore_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs) err |= __get_user(usp, sc->sc_regs+30); wrusp(usp); /* simd-fp */ - for (i = 0; i < 31 * 4; i++) - err |= __get_user(ctx_fp[i], sc->sc_fpregs + i); + err |= __copy_from_user(¤t->thread.ctx_fp, + &sc->sc_fpregs, sizeof(struct context_fpregs)); err |= __get_user(current->thread.fpcr, &sc->sc_fpcr); return err; @@ -140,8 +136,8 @@ do_sigreturn(struct sigcontext __user *sc) /* Send SIGTRAP if we're single-stepping: */ if (ptrace_cancel_bpt(current)) { - send_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *) regs->pc, 0, - current); + force_sig_fault(SIGTRAP, TRAP_BRKPT, + (void __user *)regs->pc, 0); } return; @@ -171,8 +167,8 @@ do_rt_sigreturn(struct rt_sigframe __user *frame) /* Send SIGTRAP if we're single-stepping: */ if (ptrace_cancel_bpt(current)) { - send_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *) regs->pc, 0, - current); + force_sig_fault(SIGTRAP, TRAP_BRKPT, + (void __user *)regs->pc, 0); } return; @@ -196,8 +192,7 @@ setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, unsigned long mask, unsigned long sp) { struct switch_stack *sw = (struct switch_stack *)regs - 1; - unsigned long *ctx_fp = (unsigned long *)¤t->thread.ctx_fp; - long i, err = 0; + long err = 0; err |= __put_user(on_sig_stack((unsigned long)sc), &sc->sc_onstack); err |= __put_user(mask, &sc->sc_mask); @@ -237,8 +232,8 @@ setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, err |= __put_user(sp, sc->sc_regs+30); err |= __put_user(0, sc->sc_regs+31); /* simd-fp */ - for (i = 0; i < 31 * 4; i++) - err |= __put_user(ctx_fp[i], sc->sc_fpregs + i); + err |= __copy_to_user(&sc->sc_fpregs, + ¤t->thread.ctx_fp, sizeof(struct context_fpregs)); err |= __put_user(current->thread.fpcr, &sc->sc_fpcr); err |= __put_user(regs->trap_a0, &sc->sc_traparg_a0); @@ -259,7 +254,8 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs) if (!access_ok(frame, sizeof(*frame))) return -EFAULT; - err |= copy_siginfo_to_user(&frame->info, &ksig->info); + if (ksig->ka.sa.sa_flags & SA_SIGINFO) + err |= copy_siginfo_to_user(&frame->info, &ksig->info); /* Create the ucontext. */ err |= __put_user(0, &frame->uc.uc_flags); @@ -277,15 +273,19 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs) */ r26 = VDSO_SYMBOL(current->mm->context.vdso, rt_sigreturn); - if (err) - return -EFAULT; - /* "Return" to the handler */ regs->r26 = r26; regs->r27 = regs->pc = (unsigned long) ksig->ka.sa.sa_handler; regs->r16 = ksig->sig; /* a0: signal number */ - regs->r17 = (unsigned long) &frame->info; /* a1: siginfo pointer */ - regs->r18 = (unsigned long) &frame->uc; /* a2: ucontext pointer */ + if (ksig->ka.sa.sa_flags & SA_SIGINFO) { + /* a1: siginfo pointer, a2: ucontext pointer */ + regs->r17 = (unsigned long) &frame->info; + regs->r18 = (unsigned long) &frame->uc; + } else { + /* a1: exception code, a2: sigcontext pointer */ + regs->r17 = 0; + regs->r18 = (unsigned long) &frame->uc.uc_mcontext; + } wrusp((unsigned long) frame); #if DEBUG_SIG diff --git a/arch/sw_64/kernel/smp.c b/arch/sw_64/kernel/smp.c index 7d9c5c90f1ac1b946d09bb43240388db4542be0c..fb915d1660691e19d92b9dd648a932b978eed6a7 100644 --- a/arch/sw_64/kernel/smp.c +++ b/arch/sw_64/kernel/smp.c @@ -4,41 +4,18 @@ */ #include -#include -#include -#include #include #include -#include -#include -#include #include -#include -#include #include -#include #include -#include -#include -#include #include -#include -#include - -#include -#include -#include -#include -#include #include #include -#include -#include -#include #include #include -#include + #include "proto.h" struct smp_rcb_struct *smp_rcb; diff --git a/arch/sw_64/kernel/stacktrace.c b/arch/sw_64/kernel/stacktrace.c index bb501c14565b42bb307b7ee2c0c543c51723e465..41cdff5b49416a9cb5e8677ad7f9cacdacb2c7ab 100644 --- a/arch/sw_64/kernel/stacktrace.c +++ b/arch/sw_64/kernel/stacktrace.c @@ -6,7 +6,6 @@ */ #include #include -#include #include #include diff --git a/arch/sw_64/kernel/suspend.c b/arch/sw_64/kernel/suspend.c index b2b07ac3042b8c171ed39f361f3125c6426aec20..369bc1e19b85713cb0ebe9a0719fd3a7a68ec358 100644 --- a/arch/sw_64/kernel/suspend.c +++ b/arch/sw_64/kernel/suspend.c @@ -1,17 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 #include -#include -#include -#include -#include -#include -#include + #include -#include -#include -#include -#include -#include #include struct processor_state suspend_state; @@ -43,20 +33,14 @@ void sw64_suspend_enter(void) */ disable_local_timer(); -#ifdef CONFIG_PCI - if (sw64_chip->suspend) - sw64_chip->suspend(0); -#endif + #ifdef CONFIG_SW64_SUSPEND_DEEPSLEEP_BOOTCORE sw64_suspend_deep_sleep(&suspend_state); #else mtinten(); asm("halt"); #endif -#ifdef CONFIG_PCI - if (sw64_chip->suspend) - sw64_chip->suspend(1); -#endif + disable_local_timer(); } diff --git a/arch/sw_64/kernel/syscalls/syscall.tbl b/arch/sw_64/kernel/syscalls/syscall.tbl index 37b1e3f9f9e2a3a8174a4a8530913fb6c2039f14..b9b93d70124dc5cba0547ad65b256b3c4be5e090 100644 --- a/arch/sw_64/kernel/syscalls/syscall.tbl +++ b/arch/sw_64/kernel/syscalls/syscall.tbl @@ -55,7 +55,7 @@ 45 common open sys_open #46 is unused 47 common getxgid sys_getxgid -48 common sigprocmask sys_sigprocmask +48 common odd_sigprocmask sys_odd_sigprocmask #49 is unused #50 is unused 51 common acct sys_acct @@ -211,13 +211,13 @@ 201 common msgget sys_msgget 202 common msgrcv sys_msgrcv 203 common msgsnd sys_msgsnd -204 common semctl sys_semctl +204 common semctl sys_old_semctl 205 common semget sys_semget 206 common semop sys_semop #207 is unused 208 common lchown sys_lchown 209 common shmat sys_shmat -210 common shmctl sys_shmctl +210 common shmctl sys_old_shmctl 211 common shmdt sys_shmdt 212 common shmget sys_shmget #213 is unused @@ -289,15 +289,15 @@ 279 common fsmount sys_fsmount 280 common fspick sys_fspick 281 common pidfd_open sys_pidfd_open -282 common clone3 sys_clone3 -283 common close_range sys_close_range -284 common openat2 sys_openat2 -285 common pidfd_getfd sys_pidfd_getfd -286 common faccessat2 sys_faccessat2 -287 common process_madvise sys_process_madvise -#288 is unused -#289 is unused -#290 is unused +282 common clone3 sw64_clone3 +283 common close_range sys_close_range +284 common openat2 sys_openat2 +285 common pidfd_getfd sys_pidfd_getfd +286 common faccessat2 sys_faccessat2 +287 common process_madvise sys_process_madvise +288 common pkey_mprotect sys_pkey_mprotect +289 common pkey_alloc sys_pkey_alloc +290 common pkey_free sys_pkey_free #291 is unused #292 is unused #293 is unused @@ -306,7 +306,7 @@ #296 is unused #297 is unused 298 common getpriority sys_getpriority -#299 is unused +299 common sigprocmask sys_sigprocmask 300 common bdflush sys_bdflush #301 is unused 302 common mount sys_mount @@ -377,7 +377,7 @@ 367 common getcwd sys_getcwd 368 common capget sys_capget 369 common capset sys_capset -370 common sendfile sys_sendfile +370 common sendfile sys_sendfile64 371 common setresgid sys_setresgid 372 common getresgid sys_getresgid 373 common dipc sys_ni_syscall diff --git a/arch/sw_64/kernel/tc.c b/arch/sw_64/kernel/tc.c index c047d457e55abfec92e29975430d3a181c7aceec..f2de5ac3d9dc440ca8685e33354c5e0b35919f91 100644 --- a/arch/sw_64/kernel/tc.c +++ b/arch/sw_64/kernel/tc.c @@ -5,9 +5,6 @@ #include -#include -#include -#include #include /* diff --git a/arch/sw_64/kernel/time.c b/arch/sw_64/kernel/time.c index 0815d06b03d4d1023f47fe049adbcfb6a9dafce2..15035a01e48a8856b73b40d2b2349f38d19223e1 100644 --- a/arch/sw_64/kernel/time.c +++ b/arch/sw_64/kernel/time.c @@ -1,34 +1,11 @@ // SPDX-License-Identifier: GPL-2.0 #include #include -#include -#include -#include -#include -#include #include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include - #include -#include -#include #include -#include -#include + +#include #include "proto.h" @@ -237,8 +214,8 @@ static int __init sched_clock_debug_init(void) if (!sw64_debugfs_dir) return -ENODEV; - sched_clock_status = debugfs_create_file_unsafe("use_tc_as_sched_clock", - 0666, sw64_debugfs_dir, NULL, + sched_clock_status = debugfs_create_file("tc_sched_clock", + 0644, sw64_debugfs_dir, NULL, &sched_clock_status_fops); if (!sched_clock_status) diff --git a/arch/sw_64/kernel/timer.c b/arch/sw_64/kernel/timer.c index c29e7d1b664bd37a538a72266dce876f12b8b894..268537d5e483956070ef05c21b873f0ddb4debfb 100644 --- a/arch/sw_64/kernel/timer.c +++ b/arch/sw_64/kernel/timer.c @@ -4,20 +4,9 @@ * Description: percpu local timer, based on arch/x86/kernel/apic/apic.c */ -#include -#include -#include -#include #include -#include -#include -#include #include -#include -#include -#include -#include -#include + #include #include @@ -98,6 +87,14 @@ static int timer_set_oneshot(struct clock_event_device *evt) return 0; } +void sw64_update_clockevents(unsigned long cpu, u32 freq) +{ + struct clock_event_device *swevt = &per_cpu(timer_events, cpu); + + if (cpu == smp_processor_id()) + clockevents_update_freq(swevt, freq); +} + /* * Setup the local timer for this CPU. Copy the initilized values * of the boot CPU and register the clock event in the framework. diff --git a/arch/sw_64/kernel/topology.c b/arch/sw_64/kernel/topology.c index e6df862705831fc0cb51093589a2290aaec8381c..964d6a83d901e10b7f2e35f0276c6a27c856a05f 100644 --- a/arch/sw_64/kernel/topology.c +++ b/arch/sw_64/kernel/topology.c @@ -1,20 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 #include -#include -#include -#include -#include -#include -#include -#include -#include #include -#include -#include -#include -#include -#include #include @@ -138,16 +125,10 @@ void remove_cpu_topology(int cpu) } #ifdef CONFIG_ACPI -static bool __init acpi_cpu_is_threaded(int cpu) -{ - return 0; -} - static int __init parse_acpi_topology(void) { return 0; } - #else static inline int __init parse_acpi_topology(void) { diff --git a/arch/sw_64/kernel/traps.c b/arch/sw_64/kernel/traps.c index c736a67ef7b8b49b51fca652b512b16aaf2a1a85..99cee58e886dc0cc21af1c22e0eed02643ff81f9 100644 --- a/arch/sw_64/kernel/traps.c +++ b/arch/sw_64/kernel/traps.c @@ -9,29 +9,16 @@ * This file initializes the trap entry points */ -#include -#include -#include -#include -#include -#include #include -#include -#include -#include #include #include #include #include -#include -#include #include -#include #include #include #include -#include #include "proto.h" @@ -193,7 +180,7 @@ do_entArith(unsigned long summary, unsigned long write_mask, } die_if_kernel("Arithmetic fault", regs, 0, NULL); - send_sig_fault(SIGFPE, si_code, (void __user *) regs->pc, 0, current); + force_sig_fault(SIGFPE, si_code, (void __user *)regs->pc, 0); } asmlinkage void @@ -226,13 +213,11 @@ do_entIF(unsigned long inst_type, struct pt_regs *regs) if (ptrace_cancel_bpt(current)) regs->pc -= 4; /* make pc point to former bpt */ - send_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->pc, 0, - current); + force_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->pc, 0); return; case 1: /* bugcheck */ - send_sig_fault(SIGTRAP, TRAP_UNK, (void __user *)regs->pc, 0, - current); + force_sig_fault(SIGTRAP, TRAP_UNK, (void __user *)regs->pc, 0); return; case 2: /* gentrap */ @@ -293,8 +278,7 @@ do_entIF(unsigned long inst_type, struct pt_regs *regs) break; } - send_sig_fault(signo, code, (void __user *)regs->pc, 0, - current); + force_sig_fault(signo, code, (void __user *)regs->pc, regs->r16); return; case 4: /* opDEC */ @@ -337,8 +321,7 @@ do_entIF(unsigned long inst_type, struct pt_regs *regs) break; } - send_sig_fault(SIGILL, ILL_ILLOPC, (void __user *)regs->pc, 0, - current); + force_sig_fault(SIGILL, ILL_ILLOPC, (void __user *)regs->pc, 0); } /* @@ -456,10 +439,9 @@ do_entUna(void *va, unsigned long opcode, unsigned long reg, __asm__ __volatile__( " zap %6, 2, %1\n" " srl %6, 8, %2\n" - " stb %1, 0x0(%5)\n" - " stb %2, 0x1(%5)\n" + "1: stb %1, 0x0(%5)\n" + "2: stb %2, 0x1(%5)\n" "3:\n" - ".section __ex_table, \"a\"\n" " .long 1b - .\n" " ldi %2, 3b-1b(%0)\n" @@ -990,20 +972,16 @@ do_entUnaUser(void __user *va, unsigned long opcode, sw64_read_simd_fp_m_s(reg, fp); if ((unsigned long)va<<61 == 0) { __asm__ __volatile__( - "1: bis %4, %4, %1\n" - "2: bis %5, %5, %2\n" - "3: stl %1, 0(%3)\n" - "4: stl %2, 8(%3)\n" - "5:\n" + " bis %4, %4, %1\n" + " bis %5, %5, %2\n" + "1: stl %1, 0(%3)\n" + "2: stl %2, 8(%3)\n" + "3:\n" ".section __ex_table, \"a\"\n\t" " .long 1b - .\n" - " ldi %1, 5b-1b(%0)\n" + " ldi $31, 3b-1b(%0)\n" " .long 2b - .\n" - " ldi %2, 5b-2b(%0)\n" - " .long 3b - .\n" - " ldi $31, 5b-3b(%0)\n" - " .long 4b - .\n" - " ldi $31, 5b-4b(%0)\n" + " ldi $31, 3b-2b(%0)\n" ".previous" : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) : "r"(va), "r"(fp[0]), "r"(fp[1]), "0"(0)); @@ -1123,20 +1101,16 @@ do_entUnaUser(void __user *va, unsigned long opcode, sw64_read_simd_fp_m_d(reg, fp); if ((unsigned long)va<<61 == 0) { __asm__ __volatile__( - "1: bis %4, %4, %1\n" - "2: bis %5, %5, %2\n" - "3: stl %1, 0(%3)\n" - "4: stl %2, 8(%3)\n" - "5:\n" + " bis %4, %4, %1\n" + " bis %5, %5, %2\n" + "1: stl %1, 0(%3)\n" + "2: stl %2, 8(%3)\n" + "3:\n" ".section __ex_table, \"a\"\n\t" " .long 1b - .\n" - " ldi %1, 5b-1b(%0)\n" + " ldi $31, 3b-1b(%0)\n" " .long 2b - .\n" - " ldi %2, 5b-2b(%0)\n" - " .long 3b - .\n" - " ldi $31, 5b-3b(%0)\n" - " .long 4b - .\n" - " ldi $31, 5b-4b(%0)\n" + " ldi $31, 3b-2b(%0)\n" ".previous" : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) : "r"(va), "r"(fp[0]), "r"(fp[1]), "0"(0)); @@ -1148,20 +1122,16 @@ do_entUnaUser(void __user *va, unsigned long opcode, __asm__ __volatile__( - "1: bis %4, %4, %1\n" - "2: bis %5, %5, %2\n" - "3: stl %1, 0(%3)\n" - "4: stl %2, 8(%3)\n" - "5:\n" + " bis %4, %4, %1\n" + " bis %5, %5, %2\n" + "1: stl %1, 0(%3)\n" + "2: stl %2, 8(%3)\n" + "3:\n" ".section __ex_table, \"a\"\n\t" " .long 1b - .\n" - " ldi %1, 5b-1b(%0)\n" + " ldi $31, 3b-1b(%0)\n" " .long 2b - .\n" - " ldi %2, 5b-2b(%0)\n" - " .long 3b - .\n" - " ldi $31, 5b-3b(%0)\n" - " .long 4b - .\n" - " ldi $31, 5b-4b(%0)\n" + " ldi $31, 3b-2b(%0)\n" ".previous" : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) : "r"(vb), "r"(fp[2]), "r"(fp[3]), "0"(0)); @@ -1489,10 +1459,9 @@ do_entUnaUser(void __user *va, unsigned long opcode, __asm__ __volatile__( " zap %6, 2, %1\n" " srl %6, 8, %2\n" - " stb %1, 0x0(%5)\n" - " stb %2, 0x1(%5)\n" + "1: stb %1, 0x0(%5)\n" + "2: stb %2, 0x1(%5)\n" "3:\n" - ".section __ex_table, \"a\"\n" " .long 1b - .\n" " ldi %2, 3b-1b(%0)\n" @@ -1628,12 +1597,12 @@ do_entUnaUser(void __user *va, unsigned long opcode, si_code = SEGV_MAPERR; up_read(&mm->mmap_lock); } - send_sig_fault(SIGBUS, si_code, va, 0, current); + force_sig_fault(SIGSEGV, si_code, va, 0); return; give_sigbus: regs->pc -= 4; - send_sig_fault(SIGBUS, BUS_ADRALN, va, 0, current); + force_sig_fault(SIGBUS, BUS_ADRALN, va, 0); } void diff --git a/arch/sw_64/kernel/unaligned.c b/arch/sw_64/kernel/unaligned.c index 4ec1187d6cd00db8213bcccec5d952364beea632..a1bbdab4a26681883c630dc4ce06d42df907cb00 100644 --- a/arch/sw_64/kernel/unaligned.c +++ b/arch/sw_64/kernel/unaligned.c @@ -12,9 +12,6 @@ * for more details. */ -#include -#include -#include #include #include diff --git a/arch/sw_64/kernel/uprobes.c b/arch/sw_64/kernel/uprobes.c index d10464d0dcdd3dff06af4da1d696b1bb523cc1d1..786f2e38a59f831cb832840d5a27e2f50788a5c3 100644 --- a/arch/sw_64/kernel/uprobes.c +++ b/arch/sw_64/kernel/uprobes.c @@ -1,14 +1,9 @@ // SPDX-License-Identifier: GPL-2.0 #include #include -#include -#include -#include #include #include -#include - #define UPROBE_TRAP_NR ULONG_MAX /** diff --git a/arch/sw_64/kernel/vdso.c b/arch/sw_64/kernel/vdso.c index 32ed952748f056620f2b8e636916464b55d6423b..b4126cbaa4bda220635ac284a6fe526ee02a923b 100644 --- a/arch/sw_64/kernel/vdso.c +++ b/arch/sw_64/kernel/vdso.c @@ -14,20 +14,11 @@ * */ -#include -#include #include -#include -#include -#include #include -#include -#include #include #include -#include -#include #include extern char vdso_start, vdso_end; diff --git a/arch/sw_64/kernel/vdso/so2s.sh b/arch/sw_64/kernel/vdso/so2s.sh index 8f23ac544d1b3ebffa66e0fc19c28c6256a8e716..e1763af8e7301a0ec8ca7e9f901c6bf438c5920d 100755 --- a/arch/sw_64/kernel/vdso/so2s.sh +++ b/arch/sw_64/kernel/vdso/so2s.sh @@ -1,5 +1,4 @@ #!/bin/sh # SPDX-License-Identifier: GPL-2.0+ -# Copyright 2020 Palmer Dabbelt -grep -v "LINUX" | sed 's/\([0-9a-f]*\) T \([a-z0-9_]*\)/.globl\t\2\n\2:\n.quad\t0x\1/' +grep "__vdso_" | sed 's/\([0-9a-f]*\) T \([a-z0-9_]*\)\(@@LINUX_.*\)*/.globl\t\2\n\2:\n.quad\t0x\1/' diff --git a/arch/sw_64/kernel/vdso/vdso.S b/arch/sw_64/kernel/vdso/vdso.S index ce5448d00cf7bee127be338a01f351be03c56ebc..edd9be27db9d5b90652553bf48196b4f0f999e3a 100644 --- a/arch/sw_64/kernel/vdso/vdso.S +++ b/arch/sw_64/kernel/vdso/vdso.S @@ -15,9 +15,7 @@ * */ -#include #include -#include #include __PAGE_ALIGNED_DATA diff --git a/arch/sw_64/kernel/vdso/vdso.lds.S b/arch/sw_64/kernel/vdso/vdso.lds.S index 67a635d6dfafaa09c776a076bcfc0cae15acc365..de1782ccb7b678c44497377f2a7985355b94a4a4 100644 --- a/arch/sw_64/kernel/vdso/vdso.lds.S +++ b/arch/sw_64/kernel/vdso/vdso.lds.S @@ -79,7 +79,7 @@ PHDRS */ VERSION { - LINUX_2.6.39 { + LINUX_2.6 { global: __vdso_rt_sigreturn; __vdso_gettimeofday; diff --git a/arch/sw_64/kernel/vdso/vgettimeofday.c b/arch/sw_64/kernel/vdso/vgettimeofday.c index 6ba9ff6e33d505ddd67e0aacbaeaa268e829dce5..b9c9a137f9d3438d500db5f511008652e1f371d6 100644 --- a/arch/sw_64/kernel/vdso/vgettimeofday.c +++ b/arch/sw_64/kernel/vdso/vgettimeofday.c @@ -13,10 +13,26 @@ */ #include -#include + #include #include -#include + +static __always_inline int syscall_fallback(clockid_t clkid, struct timespec64 *ts) +{ + register int r0 asm("$0"); + register unsigned long r19 asm("$19"); + asm volatile( + " mov %0, $16\n" + " mov %1, $17\n" + " ldi $0, %2\n" + " sys_call 0x83\n" + :: "r"(clkid), "r"(ts), "i"(__NR_clock_gettime) + : "$0", "$16", "$17", "$19"); + if (unlikely(r19)) + return -r0; + else + return r0; +} static __always_inline int do_realtime_coarse(struct timespec64 *ts, const struct vdso_data *data) @@ -38,8 +54,8 @@ static __always_inline int do_monotonic_coarse(struct timespec64 *ts, const struct vdso_data *data) { u32 start_seq; - u32 to_mono_sec; - u32 to_mono_nsec; + u64 to_mono_sec; + u64 to_mono_nsec; do { start_seq = vdso_data_read_begin(data); @@ -107,8 +123,8 @@ static __always_inline int do_monotonic(struct timespec64 *ts, { u32 start_seq; u64 ns; - u32 to_mono_sec; - u32 to_mono_nsec; + u64 to_mono_sec; + u64 to_mono_nsec; do { start_seq = vdso_data_read_begin(data); @@ -170,10 +186,9 @@ int __vdso_clock_gettime(clockid_t clkid, struct timespec64 *ts) ret = do_monotonic(ts, data); break; default: - ret = -ENOSYS; - break; + /* fall back to a syscall */ + ret = syscall_fallback(clkid, ts); } - /* If we return -ENOSYS libc should fall back to a syscall. */ return ret; } diff --git a/arch/sw_64/kernel/vdso/vrt_sigreturn.S b/arch/sw_64/kernel/vdso/vrt_sigreturn.S index c07eb7244d0c4adc0740c7cda1609a0743c02759..6aa7aa300b4d119a844ea1fcd75a1985b1685573 100644 --- a/arch/sw_64/kernel/vdso/vrt_sigreturn.S +++ b/arch/sw_64/kernel/vdso/vrt_sigreturn.S @@ -20,10 +20,50 @@ #include #include +#define RT_SIGFRAME_SIZE 1600 +#define RT_SIGFRAME_MCTX 176 + .text + .macro SIGCONTEXT_REGS_I base, from = 0 + .cfi_offset \from, \base + (4 + \from) * 8 + .if 30 - \from + SIGCONTEXT_REGS_I \base, "(\from + 1)" + .endif + .endm + + .macro SIGCONTEXT_REGS_F base, from = 32 + .cfi_offset \from, \base + (4 + 32 + 1) * 8 + (\from - 32) * 32 + .if 62 - \from + SIGCONTEXT_REGS_F \base, "(\from + 1)" + .endif + .endm + + .macro SIGCONTEXT_REGS_V base, from = 67 + .cfi_offset \from, \base + (4 + 32 + 1) * 8 + ((\from - 67) & 0x1f) * 32 + (((\from - 67) >> 5) + 1) * 8 + .if 161 - \from + SIGCONTEXT_REGS_V \base, "(\from + 1)" + .endif + .endm + + .macro SIGCONTEXT_REGS base + SIGCONTEXT_REGS_I \base + SIGCONTEXT_REGS_F \base + SIGCONTEXT_REGS_V \base + .cfi_offset 63, \base + (4 + 32 + 1) * 8 + 32 * 32 + .cfi_offset 64, \base + 2 * 8 + .endm + + .cfi_startproc + .cfi_return_column 64 + .cfi_signal_frame + SIGCONTEXT_REGS -RT_SIGFRAME_SIZE + RT_SIGFRAME_MCTX + .cfi_def_cfa_offset RT_SIGFRAME_SIZE + + nop ENTRY(__vdso_rt_sigreturn) mov $sp, $16 ldi $0, __NR_rt_sigreturn sys_call 0x83 ENDPROC(__vdso_rt_sigreturn) + .cfi_endproc diff --git a/arch/sw_64/kvm/emulate.c b/arch/sw_64/kvm/emulate.c index 1552119e63463edd0596e527e8436c23d60cba49..bcc06c0dd618757c844a54cfc86c0bc8c9beab58 100644 --- a/arch/sw_64/kvm/emulate.c +++ b/arch/sw_64/kvm/emulate.c @@ -32,6 +32,7 @@ void sw64_decode(struct kvm_vcpu *vcpu, unsigned int insn, struct kvm_run *run) vcpu->arch.mmio_decode.rt = ra; break; case 0x23: /* LDL */ + case 0x24: /* LDL_U */ run->mmio.is_write = 0; run->mmio.len = 8; vcpu->arch.mmio_decode.rt = ra; @@ -52,6 +53,7 @@ void sw64_decode(struct kvm_vcpu *vcpu, unsigned int insn, struct kvm_run *run) run->mmio.len = 4; break; case 0x2b: /* STL */ + case 0x2c: /* STL_U */ run->mmio.is_write = 1; *(unsigned long *)run->mmio.data = vcpu_get_reg(vcpu, ra); run->mmio.len = 8; diff --git a/arch/sw_64/kvm/kvm-sw64.c b/arch/sw_64/kvm/kvm-sw64.c index 1481c3dbb211caf9f4c4090f18386fc8895de617..d651d26a957a4df5e7c29e8e3c0e99e561cd1606 100644 --- a/arch/sw_64/kvm/kvm-sw64.c +++ b/arch/sw_64/kvm/kvm-sw64.c @@ -5,31 +5,18 @@ * linhn */ -#include #include -#include #include #include -#include -#include #include #include -#include -#include #include #include -#include -#include -#include -#include #include -#include #include -#include #include "../kernel/pci_impl.h" - #include "vmem.c" bool set_msi_flag; @@ -37,7 +24,6 @@ unsigned long sw64_kvm_last_vpn[NR_CPUS]; #define cpu_last_vpn(cpuid) sw64_kvm_last_vpn[cpuid] #ifdef CONFIG_SUBARCH_C3B -#define MAX_VPN 255 #define WIDTH_HARDWARE_VPN 8 #endif @@ -80,7 +66,7 @@ static unsigned long __get_new_vpn_context(struct kvm_vcpu *vcpu, long cpu) unsigned long vpn = cpu_last_vpn(cpu); unsigned long next = vpn + 1; - if ((vpn & HARDWARE_VPN_MASK) >= MAX_VPN) { + if ((vpn & HARDWARE_VPN_MASK) >= HARDWARE_VPN_MASK) { tbia(); next = (vpn & ~HARDWARE_VPN_MASK) + VPN_FIRST_VERSION + 1; /* bypass 0 */ } @@ -298,6 +284,9 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, ret = vm_mmap(vm_file, mem->userspace_addr, mem->memory_size, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED, 0); + if ((long)ret < 0) + return ret; + vma = find_vma(current->mm, mem->userspace_addr); if (!vma) return -ENOMEM; @@ -309,11 +298,8 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, vma->vm_ops = &vmem_vm_ops; vma->vm_ops->open(vma); - remap_pfn_range(vma, mem->userspace_addr, - addr >> PAGE_SHIFT, - mem->memory_size, vma->vm_page_prot); - - if ((long)ret < 0) + ret = vmem_vm_insert_page(vma); + if ((int)ret < 0) return ret; } else { info = vm_file->private_data; @@ -322,9 +308,8 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, pr_info("guest phys addr = %#lx, size = %#lx\n", addr, vma->vm_end - vma->vm_start); - kvm->arch.mem.membank[0].guest_phys_addr = 0; - kvm->arch.mem.membank[0].host_phys_addr = (u64)addr; - kvm->arch.mem.membank[0].size = round_up(mem->memory_size, 8<<20); + kvm->arch.host_phys_addr = (u64)addr; + kvm->arch.size = round_up(mem->memory_size, 8<<20); memset((void *)(PAGE_OFFSET + addr), 0, 0x2000000); @@ -343,7 +328,6 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) /* For guest kernel "sys_call HMC_whami", indicate virtual cpu id */ vcpu->arch.vcb.whami = vcpu->vcpu_id; vcpu->arch.vcb.vcpu_irq_disabled = 1; - vcpu->arch.vcb.pcbb = vcpu->kvm->arch.mem.membank[0].host_phys_addr; vcpu->arch.pcpu_id = -1; /* force flush tlb for the first time */ return 0; @@ -351,11 +335,10 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu) { - unsigned long addr = vcpu->kvm->arch.mem.membank[0].host_phys_addr; + unsigned long addr = vcpu->kvm->arch.host_phys_addr; vcpu->arch.vcb.whami = vcpu->vcpu_id; vcpu->arch.vcb.vcpu_irq_disabled = 1; - vcpu->arch.vcb.pcbb = vcpu->kvm->arch.mem.membank[0].host_phys_addr; vcpu->arch.pcpu_id = -1; /* force flush tlb for the first time */ vcpu->arch.power_off = 0; memset(&vcpu->arch.irqs_pending, 0, sizeof(vcpu->arch.irqs_pending)); @@ -448,7 +431,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_ void _debug_printk_vcpu(struct kvm_vcpu *vcpu) { unsigned long pc = vcpu->arch.regs.pc; - unsigned long offset = vcpu->kvm->arch.mem.membank[0].host_phys_addr; + unsigned long offset = vcpu->kvm->arch.host_phys_addr; unsigned long pc_phys = PAGE_OFFSET | ((pc & 0x7fffffffUL) + offset); unsigned int insn; int opc, ra, disp16; @@ -482,7 +465,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) /* vpn will update later when vcpu is running */ if (vcpu->arch.vcb.vpcr == 0) { vcpu->arch.vcb.vpcr - = get_vpcr(vcpu->kvm->arch.mem.membank[0].host_phys_addr, vcpu->kvm->arch.mem.membank[0].size, 0); + = get_vpcr(vcpu->kvm->arch.host_phys_addr, vcpu->kvm->arch.size, 0); vcpu->arch.vcb.upcr = 0x7; } diff --git a/arch/sw_64/kvm/vmem.c b/arch/sw_64/kvm/vmem.c index b8a585ec1ad1058074980ad6e531fd749321ace0..c6f9d6cdf03b120b32d3e39ac017e412838f9efb 100644 --- a/arch/sw_64/kvm/vmem.c +++ b/arch/sw_64/kvm/vmem.c @@ -28,6 +28,35 @@ static bool addr_in_pool(struct gen_pool *pool, return found; } +static int vmem_vm_insert_page(struct vm_area_struct *vma) +{ + unsigned long addr, uaddr; + struct page *vmem_page; + struct vmem_info *info; + size_t size; + int ret; + + info = vma->vm_private_data; + addr = info->start; + size = info->size; + uaddr = vma->vm_start; + + vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP | VM_MIXEDMAP; + vmem_page = pfn_to_page(addr >> PAGE_SHIFT); + do { + ret = vm_insert_page(vma, uaddr, vmem_page); + if (ret < 0) { + pr_info("vm_insert_page failed: %d\n", ret); + return ret; + } + vmem_page++; + uaddr += PAGE_SIZE; + size -= PAGE_SIZE; + } while (size > 0); + + return 0; +} + static void vmem_vm_open(struct vm_area_struct *vma) { struct vmem_info *info = vma->vm_private_data; @@ -83,6 +112,7 @@ static int vmem_mmap(struct file *flip, struct vm_area_struct *vma) unsigned long addr; static struct vmem_info *info; size_t size = vma->vm_end - vma->vm_start; + int ret; if (!(vma->vm_flags & VM_SHARED)) { pr_err("%s: mapping must be shared\n", __func__); @@ -114,10 +144,9 @@ static int vmem_mmap(struct file *flip, struct vm_area_struct *vma) /*to do if size bigger than vm_mem_size*/ pr_info("sw64_vmem: vm_start=%#lx, size= %#lx\n", vma->vm_start, size); - /*remap_pfn_range - remap kernel memory to userspace*/ - if (remap_pfn_range(vma, vma->vm_start, addr >> PAGE_SHIFT, size, - vma->vm_page_prot)) - return -EAGAIN; + vmem_vm_insert_page(vma); + if (ret < 0) + return ret; return 0; } diff --git a/arch/sw_64/lib/checksum.c b/arch/sw_64/lib/checksum.c index 561bbac59f8dbfd58691ab42d681a0712415104b..d1314caa15bf44591ec1f6121017a77da16c6e2e 100644 --- a/arch/sw_64/lib/checksum.c +++ b/arch/sw_64/lib/checksum.c @@ -7,31 +7,7 @@ #include #include #include - -static inline unsigned short from64to16(unsigned long x) -{ - /* Using extract instructions is a bit more efficient - * than the original shift/bitmask version. - */ - - union { - unsigned long ul; - unsigned int ui[2]; - unsigned short us[4]; - } in_v, tmp_v, out_v; - - in_v.ul = x; - tmp_v.ul = (unsigned long) in_v.ui[0] + (unsigned long) in_v.ui[1]; - - /* Since the bits of tmp_v.sh[3] are going to always be zero, - *we don't have to bother to add that in. - */ - out_v.ul = (unsigned long) tmp_v.us[0] + (unsigned long) tmp_v.us[1] - + (unsigned long) tmp_v.us[2]; - - /* Similarly, out_v.us[2] is always zero for the final add. */ - return out_v.us[0] + out_v.us[1]; -} +#include /* * computes the checksum of the TCP/UDP pseudo-header @@ -69,73 +45,61 @@ EXPORT_SYMBOL(csum_tcpudp_nofold); /* * Do a 64-bit checksum on an arbitrary memory area.. - * - * This isn't a great routine, but it's not _horrible_ either. The - * inner loop could be unrolled a bit further, and there are better - * ways to do the carry, but this is reasonable. */ static inline unsigned long do_csum(const unsigned char *buff, int len) { - int odd, count; - unsigned long result = 0; - - if (len <= 0) - goto out; - odd = 1 & (unsigned long) buff; - if (odd) { - result = *buff << 8; - len--; - buff++; - } - count = len >> 1; /* nr of 16-bit words.. */ - if (count) { - if (2 & (unsigned long) buff) { - result += *(unsigned short *) buff; - count--; - len -= 2; - buff += 2; - } - count >>= 1; /* nr of 32-bit words.. */ - if (count) { - if (4 & (unsigned long) buff) { - result += *(unsigned int *) buff; - count--; - len -= 4; - buff += 4; - } - count >>= 1; /* nr of 64-bit words.. */ - if (count) { - unsigned long carry = 0; - - do { - unsigned long w = *(unsigned long *) buff; - - count--; - buff += 8; - result += carry; - result += w; - carry = (w > result); - } while (count); - result += carry; - result = (result & 0xffffffff) + (result >> 32); - } - if (len & 4) { - result += *(unsigned int *) buff; - buff += 4; - } + const unsigned long *dst = (unsigned long *)buff; + unsigned long doff = 7 & (unsigned long) dst; + unsigned long checksum = 0; + unsigned long word, patch; + unsigned long partial_dest, second_dest; + + len -= 8; + + if (!doff) { + while (len > 0) { + word = *dst; + checksum += word; + checksum += (checksum < word); + dst++; + len -= 8; } - if (len & 2) { - result += *(unsigned short *) buff; - buff += 2; + + len += 8; + word = *dst; + + if (len != 8) + maskll(word, len, word); + + checksum += word; + checksum += (checksum < word); + } else { + dst = (unsigned long *)((unsigned long)dst & (~7UL)); + word = *dst; + inshl(word, 8 - doff, partial_dest); + dst++; + + while (len >= 0) { + word = *dst; + insll(word, 8 - doff, second_dest); + patch = partial_dest | second_dest; + checksum += patch; + checksum += (checksum < patch); + inshl(word, 8 - doff, partial_dest); + dst++; + len -= 8; } + + len += 8; + word = *dst; + insll(word, 8 - doff, second_dest); + patch = partial_dest | second_dest; + maskll(patch, len, patch); + checksum += patch; + checksum += (checksum < patch); } - if (len & 1) - result += *buff; - result = from64to16(result); - if (odd) - result = ((result >> 8) & 0xff) | ((result & 0xff) << 8); -out: - return result; + + return from64to16(checksum); } /* diff --git a/arch/sw_64/lib/csum_partial_copy.c b/arch/sw_64/lib/csum_partial_copy.c index 678d9aa78d159929bd1663b7540e32288f70aa31..441ae5575de58d930ee8e57d8c6da572845aadd5 100644 --- a/arch/sw_64/lib/csum_partial_copy.c +++ b/arch/sw_64/lib/csum_partial_copy.c @@ -11,6 +11,7 @@ #include #include #include +#include #define ldl_u(x, y) \ @@ -37,25 +38,6 @@ static inline void sthl_u(unsigned long data, unsigned long *dst) *((char *)dst + 8 - doff + i) = *((char *)&data + 8 - doff + i); } -#define extll(x, y, z) \ - __asm__ __volatile__("extll %1, %2, %0":"=r" (z):"r" (x), "r" (y)) - -#define exthl(x, y, z) \ - __asm__ __volatile__("exthl %1, %2, %0":"=r" (z):"r" (x), "r" (y)) - -#define maskll(x, y, z) \ - __asm__ __volatile__("maskll %1, %2, %0":"=r" (z):"r" (x), "r" (y)) - -#define maskhl(x, y, z) \ - __asm__ __volatile__("maskhl %1, %2, %0":"=r" (z):"r" (x), "r" (y)) - -#define insll(x, y, z) \ - __asm__ __volatile__("insll %1, %2, %0":"=r" (z):"r" (x), "r" (y)) - -#define inshl(x, y, z) \ - __asm__ __volatile__("inshl %1, %2, %0":"=r" (z):"r" (x), "r" (y)) - - #define __get_word(insn, x, ptr) \ ({ \ long __guu_err; \ @@ -71,286 +53,84 @@ static inline void sthl_u(unsigned long data, unsigned long *dst) __guu_err; \ }) -static inline unsigned short from64to16(unsigned long x) -{ - /* Using extract instructions is a bit more efficient - * than the original shift/bitmask version. - */ - - union { - unsigned long ul; - unsigned int ui[2]; - unsigned short us[4]; - } in_v, tmp_v, out_v; - - in_v.ul = x; - tmp_v.ul = (unsigned long) in_v.ui[0] + (unsigned long) in_v.ui[1]; - - /* Since the bits of tmp_v.sh[3] are going to always be zero, - * we don't have to bother to add that in. - */ - out_v.ul = (unsigned long) tmp_v.us[0] + (unsigned long) tmp_v.us[1] - + (unsigned long) tmp_v.us[2]; - - /* Similarly, out_v.us[2] is always zero for the final add. */ - return out_v.us[0] + out_v.us[1]; -} - -/* - * Ok. This isn't fun, but this is the EASY case. - */ -static inline unsigned long -csum_partial_cfu_aligned(const unsigned long __user *src, unsigned long *dst, - long len) -{ - unsigned long checksum = ~0U; - unsigned long carry = 0; - - while (len >= 0) { - unsigned long word; - - if (__get_word(ldl, word, src)) - return 0; - checksum += carry; - src++; - checksum += word; - len -= 8; - carry = checksum < word; - *dst = word; - dst++; - } - len += 8; - checksum += carry; - if (len) { - int i = 0; - unsigned long word; - - if (__get_word(ldl, word, src)) - return 0; - maskll(word, len, word); - checksum += word; - carry = checksum < word; - for (; i < len; i++) - *((char *)dst + i) = *((char *)&word + i); - checksum += carry; - } - return checksum; -} - -/* - * This is even less fun, but this is still reasonably - * easy. - */ static inline unsigned long csum_partial_cfu_dest_aligned(const unsigned long __user *src, - unsigned long *dst, unsigned long soff, long len) + unsigned long *dst, long len) { - unsigned long first; - unsigned long word, carry; - unsigned long lastsrc = 7+len+(unsigned long)src; - unsigned long checksum = ~0U; - - if (__get_word(ldl_u, first, src)) - return 0; - carry = 0; - while (len >= 0) { - unsigned long second; - - if (__get_word(ldl_u, second, src+1)) - return 0; - extll(first, soff, word); - len -= 8; - src++; - exthl(second, soff, first); - checksum += carry; - word |= first; - first = second; - checksum += word; - *dst = word; - dst++; - carry = checksum < word; - } - len += 8; - checksum += carry; - if (len) { - int i = 0; - unsigned long second; - - if (__get_word(ldl_u, second, lastsrc)) - return 0; - extll(first, soff, word); - exthl(second, soff, first); - word |= first; - maskll(word, len, word); - checksum += word; - carry = checksum < word; - for (; i < len; i++) - *((char *)dst + i) = *((char *)&word + i); - checksum += carry; - } - return checksum; -} - -/* - * This is slightly less fun than the above.. - */ -static inline unsigned long -csum_partial_cfu_src_aligned(const unsigned long __user *src, - unsigned long *dst, unsigned long doff, - long len, unsigned long partial_dest) -{ - unsigned long carry = 0; unsigned long word; - unsigned long second_dest; - int i; unsigned long checksum = ~0U; + int err = 0; - if (len >= 0) { - if (__get_word(ldl, word, src)) - return 0; - checksum += carry; + err = __copy_from_user(dst, src, len+8); + while (len > 0) { + word = *dst; checksum += word; - carry = checksum < word; - stll_u(word, dst); - len -= 8; - src++; + checksum += (checksum < word); dst++; - - inshl(word, doff, partial_dest); - while (len >= 0) { - if (__get_word(ldl, word, src)) - return 0; - len -= 8; - insll(word, doff, second_dest); - checksum += carry; - stl_u(partial_dest | second_dest, dst); - src++; - checksum += word; - inshl(word, doff, partial_dest); - carry = checksum < word; - dst++; - } - sthl_u(word, dst - 1); + len -= 8; } len += 8; + word = *dst; - if (__get_word(ldl, word, src)) - return 0; - maskll(word, len, word); - checksum += carry; + if (len != 8) + maskll(word, len, word); checksum += word; - carry = checksum < word; - for (i = 0; i < len; i++) - *((char *)dst + i) = *((char *)&word + i); + checksum += (checksum < word); - checksum += carry; return checksum; } -/* - * This is so totally un-fun that it's frightening. Don't - * look at this too closely, you'll go blind. - */ static inline unsigned long -csum_partial_cfu_unaligned(const unsigned long __user *src, - unsigned long *dst, unsigned long soff, unsigned long doff, - long len, unsigned long partial_dest) +csum_partial_cfu_dest_unaligned(const unsigned long __user *src, + unsigned long *dst, unsigned long doff, long len) { - unsigned long carry = 0; - unsigned long first; - unsigned long second, word; - unsigned long second_dest; - int i; + unsigned long word, patch; + unsigned long partial_dest, second_dest; unsigned long checksum = ~0U; + int err = 0; - if (__get_word(ldl_u, first, src)) - return 0; - if (len >= 0) { - extll(first, soff, word); - if (__get_word(ldl_u, second, src+1)) - return 0; - exthl(second, soff, first); - word |= first; - checksum += carry; - checksum += word; - carry = checksum < word; - stll_u(word, dst); - sthl_u(word, dst); - len -= 8; - src++; - dst++; + err = __copy_from_user(dst, src, len+8); - if (__get_word(ldl_u, first, src)) - return 0; - ldl_u(partial_dest, dst); - maskll(partial_dest, doff, partial_dest); - while (len >= 0) { - if (__get_word(ldl_u, second, src+1)) - return 0; - extll(first, soff, word); - checksum += carry; - len -= 8; - exthl(second, soff, first); - src++; - word |= first; - first = second; - insll(word, doff, second_dest); - checksum += word; - stl_u(partial_dest | second_dest, dst); - carry = checksum < word; - inshl(word, doff, partial_dest); - dst++; - } - sthl_u(word, dst - 1); + dst = (unsigned long *)((unsigned long)dst & (~7UL)); + word = *dst; + inshl(word, 8 - doff, partial_dest); + dst++; + + while (len >= 0) { + word = *dst; + insll(word, 8 - doff, second_dest); + patch = partial_dest | second_dest; + checksum += patch; + checksum += (checksum < patch); + inshl(word, 8 - doff, partial_dest); + dst++; + len -= 8; } - len += 8; - checksum += carry; - if (__get_word(ldl_u, second, src+1)) - return 0; - extll(first, soff, word); - exthl(second, soff, first); - word |= first; - maskll(word, len, word); - checksum += word; - carry = checksum < word; - for (i = 0; i < len; i++) - *((char *)dst + i) = *((char *)&word + i); + len += 8; + word = *dst; + insll(word, 8 - doff, second_dest); + patch = partial_dest | second_dest; + maskll(patch, len, patch); + checksum += patch; + checksum += (checksum < patch); - checksum += carry; return checksum; } static __wsum __csum_and_copy(const void __user *src, void *dst, int len) { unsigned long checksum; - unsigned long soff = 7 & (unsigned long) src; unsigned long doff = 7 & (unsigned long) dst; if (!doff) { - if (!soff) - checksum = csum_partial_cfu_aligned( - (const unsigned long __user *) src, - (unsigned long *) dst, len-8); - else - checksum = csum_partial_cfu_dest_aligned( - (const unsigned long __user *) src, - (unsigned long *) dst, - soff, len-8); + checksum = csum_partial_cfu_dest_aligned( + (const unsigned long __user *) src, + (unsigned long *) dst, len-8); } else { - unsigned long partial_dest; - - ldl_u(partial_dest, dst); - if (!soff) - checksum = csum_partial_cfu_src_aligned( - (const unsigned long __user *) src, - (unsigned long *) dst, - doff, len-8, partial_dest); - else - checksum = csum_partial_cfu_unaligned( - (const unsigned long __user *) src, - (unsigned long *) dst, - soff, doff, len-8, partial_dest); + checksum = csum_partial_cfu_dest_aligned( + (const unsigned long __user *) src, + (unsigned long *) dst, len-8); } return (__force __wsum)from64to16(checksum); } diff --git a/arch/sw_64/lib/deep-memcpy.S b/arch/sw_64/lib/deep-memcpy.S index e847ec3d08df6f3976ca101dc15ba7225be1e4b0..83c726d42778ef7d85758236e9d7cac601b8548d 100644 --- a/arch/sw_64/lib/deep-memcpy.S +++ b/arch/sw_64/lib/deep-memcpy.S @@ -1,240 +1,309 @@ /* SPDX-License-Identifier: GPL-2.0 */ + #include - .set noreorder - .set noat - .align 4 +#define NC_STORE_THRESHOLD 2048 + +#define SAVE_SIMD_REGS \ + ldi $sp, -0x60($sp); \ + addl $sp, 0x1f, $23; \ + bic $23, 0x1f, $23; \ + vstd $f1, 0($23); \ + vstd $f2, 0x20($23) + +#define RESTORE_SIMD_REGS \ + addl $sp, 0x1f, $23; \ + bic $23, 0x1f, $23; \ + vldd $f1, 0($23); \ + vldd $f2, 0x20($23); \ + ldi $sp, 0x60($sp) + +#define SAVE_SIMD_U_REGS \ + ldi $sp, -0x120($sp); \ + addl $sp, 0x1f, $23; \ + bic $23, 0x1f, $23; \ + vstd $f1, 0($23); \ + vstd $f2, 0x20($23); \ + vstd $f4, 0x40($23); \ + vstd $f5, 0x60($23); \ + vstd $f10, 0x80($23); \ + vstd $f11, 0xa0($23); \ + vstd $f20, 0xc0($23); \ + vstd $f21, 0xe0($23) + +#define RESTORE_SIMD_U_REGS \ + addl $sp, 0x1f, $23; \ + bic $23, 0x1f, $23; \ + vldd $f1, 0($23); \ + vldd $f2, 0x20($23); \ + vldd $f4, 0x40($23); \ + vldd $f5, 0x60($23); \ + vldd $f10, 0x80($23); \ + vldd $f11, 0xa0($23); \ + vldd $f20, 0xc0($23); \ + vldd $f21, 0xe0($23); \ + ldi $sp, 0x120($sp) + + .set noat + .align 4 .globl memcpy .ent memcpy - memcpy: .frame $30, 0, $26, 0 .prologue 0 - subl $sp, 0xa0, $sp - ldi $4, 0x40($sp) - stl $4, 0($sp) - bic $4, 0x1f, $4 - vstd $f4, 0($4) - vstd $f5, 0x20($4) - mov $16, $0 - ble $18, $nomoredata - xor $16, $17, $1 - and $1, 7, $1 - - bne $1, $misaligned - + ble $18, $out and $16, 7, $1 - beq $1, $both_0mod8 + beq $1, $dest_aligned_8 -$head_align: - ldbu $1, 0($17) + .align 4 +$byte_loop_head: + ldbu $2, 0($17) subl $18, 1, $18 addl $17, 1, $17 - stb $1, 0($16) + stb $2, 0($16) addl $16, 1, $16 + ble $18, $out and $16, 7, $1 - ble $18, $nomoredata - bne $1, $head_align + bne $1, $byte_loop_head -$both_0mod8: - cmple $18, 127, $1 - bne $1, $no_unroll - and $16, 63, $1 - beq $1, $do_unroll - -$single_head_quad: - ldl $1, 0($17) +$dest_aligned_8: + and $17, 7, $4 + subl $18, 16, $18 + blt $18, $quad_end + subl $18, 64, $18 + blt $18, $simd_end + and $16, 31, $1 + beq $1, $dest_aligned_32 + bne $4, $quad_u_loop_head + + .align 5 +$quad_loop_head: + ldl $2, 0($17) subl $18, 8, $18 addl $17, 8, $17 - - stl $1, 0($16) + stl $2, 0($16) addl $16, 8, $16 - and $16, 63, $1 - bne $1, $single_head_quad - -$do_unroll: - addl $16, 64, $7 - cmple $18, 127, $1 - bne $1, $tail_quads - -#JJ - and $17, 31, $1 - bne $1, $unroll_body - -$unroll_body_simd: - ldwe $f31,128*5($17) - vldd $f4, 0($17) - vldd $f5, 32($17) - vstd_nc $f4, 0($16) - vstd_nc $f5, 32($16) + and $16, 31, $1 + blt $18, $simd_end + beq $16, $dest_aligned_32 + br $31, $quad_loop_head + +$dest_aligned_32: + and $17, 31, $5 + bne $5, $prep_simd_u_loop + +$prep_simd_loop: + SAVE_SIMD_REGS + ldi $1, NC_STORE_THRESHOLD($31) + cmple $18, $1, $1 + bne $1, $simd_loop + + .align 5 +$simd_loop_nc: + fillcs 128 * 5($17) + vldd $f1, 0($17) + vldd $f2, 32($17) + subl $18, 64, $18 + addl $17, 64, $17 + vstd_nc $f1, 0($16) + vstd_nc $f2, 32($16) addl $16, 64, $16 + bge $18, $simd_loop_nc + memb # required for _nc store instructions + br $31, $simd_loop_end + + .align 5 +$simd_loop: + fillcs 128 * 5($17) + vldd $f1, 0($17) + vldd $f2, 32($17) subl $18, 64, $18 addl $17, 64, $17 - cmple $18, 63, $1 - beq $1, $unroll_body_simd - memb - br $no_unroll -#endJJ - -$unroll_body: - #wh64 ($7) - #e_fillcs 0($7) - - ldl $6, 0($17) - #e_fillcs 256($17) - - ldl $4, 8($17) - ldl $5, 16($17) - addl $7, 64, $7 - - ldl $3, 24($17) - addl $16, 64, $1 - + vstd $f1, 0($16) + vstd $f2, 32($16) + addl $16, 64, $16 + bge $18, $simd_loop + +$simd_loop_end: + addl $18, 64, $1 + cmplt $1, 32, $1 + bne $1, $no_more_simd + vldd $f1, 0($17) + subl $18, 32, $18 addl $17, 32, $17 - stl_nc $6, 0($16) - - stl_nc $4, 8($16) - stl_nc $5, 16($16) - subl $18, 192, $2 - - stl_nc $3, 24($16) + vstd $f1, 0($16) addl $16, 32, $16 - ldl $6, 0($17) - ldwe $f31, 4*128($17) - #e_fillcs 288($17) - ldl $4, 8($17) - #cmovlt $2, $1, $7 - sellt $2, $1, $7, $7 +$no_more_simd: + RESTORE_SIMD_REGS - ldl $5, 16($17) - ldl $3, 24($17) - addl $16, 32, $16 - subl $18, 64, $18 - - addl $17, 32, $17 - stl_nc $6, -32($16) - stl_nc $4, -24($16) - cmple $18, 63, $1 - - stl_nc $5, -16($16) - stl_nc $3, -8($16) - beq $1, $unroll_body +$simd_end: + addl $18, 64, $18 + blt $18, $quad_end + bne $4, $prep_quad_u_loop_tail - memb - -$tail_quads: -$no_unroll: .align 4 - subl $18, 8, $18 - blt $18, $less_than_8 - -$move_a_quad: - ldl $1, 0($17) +$quad_loop_tail: + ldl $2, 0($17) + ldl $3, 8($17) + subl $18, 16, $18 + addl $17, 16, $17 + stl $2, 0($16) + stl $3, 8($16) + addl $16, 16, $16 + bge $18, $quad_loop_tail + +$quad_end: + addl $18, 16, $18 + ble $18, $out + cmplt $18, 8, $1 + bne $1, $byte_loop_tail + bne $4, $move_one_quad_u + +$move_one_quad: + ldl $2, 0($17) subl $18, 8, $18 addl $17, 8, $17 - - stl $1, 0($16) + stl $2, 0($16) addl $16, 8, $16 - bge $18, $move_a_quad + ble $18, $out -$less_than_8: .align 4 - addl $18, 8, $18 - ble $18, $nomoredata - - -$tail_bytes: +$byte_loop_tail: + ldbu $2, 0($17) subl $18, 1, $18 - ldbu $1, 0($17) addl $17, 1, $17 - - stb $1, 0($16) + stb $2, 0($16) addl $16, 1, $16 - bgt $18, $tail_bytes - - ldi $4, 0x40($sp) - bic $4, 0x1f, $4 - vldd $f4, 0($4) - vldd $f5, 0x20($4) - ldl $4, 0($sp) - addl $sp, 0xa0, $sp + bgt $18, $byte_loop_tail +$out: ret $31, ($26), 1 -$misaligned: - mov $0, $4 - and $0, 7, $1 - beq $1, $dest_0mod8 - -$aligndest: - ble $18, $nomoredata - ldbu $1, 0($17) - subl $18, 1, $18 - addl $17, 1, $17 - stb $1, 0($4) - addl $4, 1, $4 - and $4, 7, $1 - bne $1, $aligndest - -$dest_0mod8: + .align 5 +$quad_u_loop_head: + ldl_u $2, 0($17) + ldl_u $3, 7($17) subl $18, 8, $18 - blt $18, $misalign_tail - ldl_u $3, 0($17) - -$mis_quad: - ldl_u $16, 8($17) - #extql $3, $17, $3 - fillde 256($17) - and $17, 7, $1 - sll $1, 3, $1 - srl $3, $1, $3 - - #extqh $16, $17, $1 - subl $1, 64, $1 - negl $1, $1 - sll $16, $1, $1 - - bis $3, $1, $1 + addl $17, 8, $17 + extll $2, $4, $2 + exthl $3, $4, $3 + bis $2, $3, $2 + stl $2, 0($16) + addl $16, 8, $16 + blt $18, $simd_end + beq $16, $dest_aligned_32 + br $31, $quad_u_loop_head + +$prep_simd_u_loop: + SAVE_SIMD_U_REGS + andnot $17, 31, $3 + ldi $2, 256($31) + sll $5, 3, $1 + subl $2, $1, $2 + sll $1, 29, $1 + sll $2, 29, $2 + ifmovd $1, $f1 + ifmovd $2, $f2 + vldd $f4, 0($3) + ldi $1, NC_STORE_THRESHOLD($31) + cmple $18, $1, $1 + bne $1, $simd_u_loop + + .align 5 +$simd_u_loop_nc: + vldd $f5, 32($3) + fillcs 128 * 5($3) + srlow $f4, $f1, $f10 + sllow $f5, $f2, $f11 + vlogfc $f10, $f11, $f31, $f10 + vldd $f4, 64($3) + srlow $f5, $f1, $f20 + sllow $f4, $f2, $f21 + vlogfc $f20, $f21, $f31, $f20 + vstd_nc $f10, 0($16) + vstd_nc $f20, 32($16) + subl $18, 64, $18 + addl $3, 64, $3 + addl $16, 64, $16 + bge $18, $simd_u_loop_nc + memb # required for _nc store instructions + br $31, $simd_u_loop_end + + .align 5 +$simd_u_loop: + vldd $f5, 32($3) + fillcs 128 * 5($3) + srlow $f4, $f1, $f10 + sllow $f5, $f2, $f11 + vlogfc $f10, $f11, $f31, $f10 + vldd $f4, 64($3) + srlow $f5, $f1, $f20 + sllow $f4, $f2, $f21 + vlogfc $f20, $f21, $f31, $f20 + vstd $f10, 0($16) + vstd $f20, 32($16) + subl $18, 64, $18 + addl $3, 64, $3 + addl $16, 64, $16 + bge $18, $simd_u_loop + +$simd_u_loop_end: + addl $18, 64, $1 + cmplt $1, 32, $1 + bne $1, $no_more_simd_u + vldd $f5, 32($3) + srlow $f4, $f1, $f10 + sllow $f5, $f2, $f11 + vlogfc $f10, $f11, $f31, $f10 + vstd $f10, 0($16) + subl $18, 32, $18 + addl $3, 32, $3 + addl $16, 32, $16 +$no_more_simd_u: + RESTORE_SIMD_U_REGS + bis $3, $5, $17 + br $31, $simd_end + +$prep_quad_u_loop_tail: + ldl_u $2, 0($17) + .align 5 +$quad_u_loop_tail: + ldl_u $3, 8($17) + extll $2, $4, $22 + exthl $3, $4, $23 + bis $22, $23, $22 + stl $22, 0($16) + ldl_u $2, 16($17) + extll $3, $4, $24 + exthl $2, $4, $25 + bis $24, $25, $24 + stl $24, 8($16) + subl $18, 16, $18 + addl $17, 16, $17 + addl $16, 16, $16 + bge $18, $quad_u_loop_tail + br $31, $quad_end + +$move_one_quad_u: + ldl_u $2, 0($17) + ldl_u $3, 8($17) subl $18, 8, $18 addl $17, 8, $17 - fillde 128($4) - stl $1, 0($4) - mov $16, $3 - - addl $4, 8, $4 - bge $18, $mis_quad - -$misalign_tail: - addl $18, 8, $18 - ble $18, $nomoredata - -$misalign_byte: - ldbu $1, 0($17) - subl $18, 1, $18 - addl $17, 1, $17 - - stb $1, 0($4) - addl $4, 1, $4 - bgt $18, $misalign_byte - - -$nomoredata: - ldi $4, 0x40($sp) - bic $4, 0x1f, $4 - vldd $f4, 0($4) - vldd $f5, 0x20($4) - ldl $4, 0($sp) - addl $sp, 0xa0, $sp - - ret $31, ($26), 1 + extll $2, $4, $22 + exthl $3, $4, $23 + bis $22, $23, $22 + stl $22, 0($16) + addl $16, 8, $16 + ble $18, $out + br $31, $byte_loop_tail .end memcpy - EXPORT_SYMBOL(memcpy) + EXPORT_SYMBOL(memcpy) __memcpy = memcpy .globl __memcpy diff --git a/arch/sw_64/lib/deep-memset.S b/arch/sw_64/lib/deep-memset.S index 4efba2062e119c58849b20af93f1394f5c944a3c..ed2171c56d4dd554de7161a0fbd453df3d4800f2 100644 --- a/arch/sw_64/lib/deep-memset.S +++ b/arch/sw_64/lib/deep-memset.S @@ -27,6 +27,8 @@ #include +#define NC_STORE_THRESHOLD 2048 + .set noat .set noreorder .text @@ -35,6 +37,7 @@ .globl __memset .globl ___memset .globl __memsetw + .globl __constant_c_memset .ent ___memset ___memset: .frame $30, 0, $26, 0 @@ -56,6 +59,7 @@ __constant_c_memset: bne $5, $tail_loop /* loop until SRC is 8 bytes aligned */ + .align 5 $head_loop: and $16, 0x7, $1 beq $1, $mod8_aligned @@ -68,6 +72,7 @@ $head_loop: $mod8_aligned: /* set 8 bytes each time */ + .align 5 $mod8_loop: and $16, 0x1f, $1 beq $1, $mod32_aligned @@ -86,23 +91,39 @@ $mod32_aligned: ifmovd $17, $f10 vcpyf $f10, $f10 + ldi $1, NC_STORE_THRESHOLD($31) + cmple $18, $1, $1 + bne $1, $mod32_loop + /* set 64 bytes each time */ -$mod32_loop: + .align 5 +$mod32_loop_nc: subl $18, 64, $18 blt $18, $mod32_tail vstd_nc $f10, 0($16) vstd_nc $f10, 32($16) addl $16, 64, $16 + br $31, $mod32_loop_nc + memb # required for _nc store instructions + + .align 5 +$mod32_loop: + subl $18, 64, $18 + blt $18, $mod32_tail + vstd $f10, 0($16) + vstd $f10, 32($16) + addl $16, 64, $16 br $31, $mod32_loop $mod32_tail: vldd $f10, 0($4) addl $sp, 64, $sp addl $18, 64, $18 + .align 5 $mod32_tail_loop: subl $18, 8, $18 blt $18, $tail - stl_nc $17, 0($16) + stl $17, 0($16) addl $16, 8, $16 br $31, $mod32_tail_loop @@ -110,6 +131,7 @@ $tail: addl $18, 8, $18 /* set one byte each time */ + .align 5 $tail_loop: beq $18, $out stb $17, 0($16) @@ -119,7 +141,6 @@ $tail_loop: /* done, return */ $out: - memb # required for _nc store instructions ret .end ___memset diff --git a/arch/sw_64/lib/fls.c b/arch/sw_64/lib/fls.c index e960b1c06782e483d91522e73d81c5ce8f405c67..aa4231f7e472dc1fdca58ea2d63631b64cc2fc4f 100644 --- a/arch/sw_64/lib/fls.c +++ b/arch/sw_64/lib/fls.c @@ -1,6 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 #include -#include /* This is fls(x)-1, except zero is held to zero. This allows most * efficient input into extbl, plus it allows easy handling of fls(0)=0. diff --git a/arch/sw_64/lib/iomap.c b/arch/sw_64/lib/iomap.c index 30d24923624d0968bc10467d797ed4548df30ef5..39e3d5498ae617f0b240ff76cafd17c6715250e0 100644 --- a/arch/sw_64/lib/iomap.c +++ b/arch/sw_64/lib/iomap.c @@ -3,10 +3,8 @@ * Sw_64 IO and memory functions. */ -#include -#include -#include #include + #include /* diff --git a/arch/sw_64/lib/udelay.c b/arch/sw_64/lib/udelay.c index 595887caa7b3cca41de7a32dd7fd0b7e98e7d23a..48356ab8872f89f6f3fb75189c4fa9760f14b1bc 100644 --- a/arch/sw_64/lib/udelay.c +++ b/arch/sw_64/lib/udelay.c @@ -6,11 +6,6 @@ */ #include -#include /* for udelay's use of smp_processor_id */ -#include -#include -#include -#include /* * Use only for very small delays (< 1 msec). diff --git a/arch/sw_64/math-emu/math.c b/arch/sw_64/math-emu/math.c index 3903b421b8f48273239da06c063eafff57f39956..9f281d82ad83cc76632901f9f2f96aaac7a17b47 100644 --- a/arch/sw_64/math-emu/math.c +++ b/arch/sw_64/math-emu/math.c @@ -8,16 +8,12 @@ * fire3 2008-12-27 Add SIMD floating emulation code for SW64 */ -#include -#include -#include -#include -#include - - #include +#include + #include "sfp-util.h" + #include #include #include diff --git a/arch/sw_64/mm/fault.c b/arch/sw_64/mm/fault.c index c68be4a40d23a039dc202545fd317259c054f135..b580450893bae48eb19bc7de17d8cdc9bfbf0f9b 100644 --- a/arch/sw_64/mm/fault.c +++ b/arch/sw_64/mm/fault.c @@ -3,28 +3,11 @@ * Copyright (C) 1995 Linus Torvalds */ -#include -#include -#include -#include - -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include #include #include #include -#include + +#include __read_mostly bool segv_debug_enabled; diff --git a/arch/sw_64/mm/hugetlbpage.c b/arch/sw_64/mm/hugetlbpage.c index 3c03709d441c3e27e5fd6fe0e3fdf0b44597c9d5..2a40225af4d810361068bc5a6d8dcbad9b846e6e 100644 --- a/arch/sw_64/mm/hugetlbpage.c +++ b/arch/sw_64/mm/hugetlbpage.c @@ -3,18 +3,13 @@ * SW64 Huge TLB Page Support for Kernel. */ -#include -#include #include #include #include -#include #include -#include + #include #include -#include -#include /* * pmd_huge() returns 1 if @pmd is hugetlb related entry, that is normal diff --git a/arch/sw_64/mm/init.c b/arch/sw_64/mm/init.c index d0e934356dd5000c92179f711f9fba560ded09f2..7fcd3d834ba53c5c4b138f29f4cf20872b800ffd 100644 --- a/arch/sw_64/mm/init.c +++ b/arch/sw_64/mm/init.c @@ -5,36 +5,13 @@ /* 2.3.x zone allocator, 1999 Andrea Arcangeli */ -#include -#include -#include -#include -#include -#include -#include -#include -#include #include #include -#include -#include -#include -#include #include -#include #include #include -#include -#include -#include #include -#include -#include -#include -#include -#include -#include extern void die_if_kernel(char *, struct pt_regs *, long); @@ -193,15 +170,27 @@ void __init sw64_memblock_init(void) memblock_remove(1ULL << MAX_PHYSMEM_BITS, PHYS_ADDR_MAX); - /* Make sure kernel text is in memory range. */ - memblock_add(__pa_symbol(_text), (unsigned long)(_end - _text)); - memblock_reserve(__pa_symbol(_text), _end - _text); - max_pfn = max_low_pfn = PFN_DOWN(memblock_end_of_DRAM()); memblock_allow_resize(); memblock_initialized = true; process_memmap(); + + /* Make sure kernel text is in memory range. */ + memblock_add(__pa_symbol(_text), _end - _text); + memblock_reserve(__pa_symbol(_text), _end - _text); + + /* Make sure initrd is in memory range. */ + if (sunway_boot_params->initrd_start) { + phys_addr_t base = __pa(sunway_boot_params->initrd_start); + phys_addr_t size = sunway_boot_params->initrd_size; + + memblock_add(base, size); + memblock_reserve(base, size); + } + + /* end of DRAM range may have been changed */ + max_pfn = max_low_pfn = PFN_DOWN(memblock_end_of_DRAM()); } #ifndef CONFIG_NUMA @@ -323,14 +312,13 @@ void __init early_init_dt_add_memory_arch(u64 base, u64 size) #endif #ifdef CONFIG_MEMORY_HOTPLUG -int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap, - bool want_memblock) +int arch_add_memory(int nid, u64 start, u64 size, struct mhp_params *params) { unsigned long start_pfn = start >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT; int ret; - ret = __add_pages(nid, start_pfn, nr_pages, altmap, want_memblock); + ret = __add_pages(nid, start_pfn, nr_pages, params); if (ret) printk("%s: Problem encountered in __add_pages() as ret=%d\n", __func__, ret); diff --git a/arch/sw_64/mm/numa.c b/arch/sw_64/mm/numa.c index 97288d91d7bb22fd2f5240e31b783a2301a4b1b3..7cb13587e465d821f8fb56a6ea8e782091921d11 100644 --- a/arch/sw_64/mm/numa.c +++ b/arch/sw_64/mm/numa.c @@ -3,27 +3,11 @@ * DISCONTIGMEM NUMA sw64 support. */ -#include -#include -#include #include -#include -#include -#include -#include #include -#include -#ifdef CONFIG_PCI -#include -#endif #include #include -#include -#include -#include -#include -#include #include int cpu_to_node_map[NR_CPUS]; @@ -417,24 +401,32 @@ void numa_store_cpu_info(unsigned int cpu) set_cpu_numa_node(cpu, cpu_to_node_map[cpu]); } +#ifdef CONFIG_DEBUG_PER_CPU_MAPS /* * Returns a pointer to the bitmask of CPUs on Node 'node'. */ const struct cpumask *cpumask_of_node(int node) { - if (node == NUMA_NO_NODE) + if (node == NUMA_NO_NODE) { + pr_warn("%s: NUMA_NO_NODE\n", __func__); return cpu_all_mask; + } - if (WARN_ON(node < 0 || node >= nr_node_ids)) + if (WARN_ON(node < 0 || node >= nr_node_ids)) { + pr_warn("%s: invalid node %d\n", __func__, node); return cpu_none_mask; + } - if (WARN_ON(node_to_cpumask_map[node] == NULL)) + if (WARN_ON(node_to_cpumask_map[node] == NULL)) { + pr_warn("%s: uninitialized node %d\n", __func__, node); return cpu_online_mask; + } return node_to_cpumask_map[node]; } EXPORT_SYMBOL(cpumask_of_node); +#endif static void numa_update_cpu(unsigned int cpu, bool remove) { diff --git a/arch/sw_64/mm/physaddr.c b/arch/sw_64/mm/physaddr.c index d5cf83e671ae3cb2ae87919e9edf8ae0c3e4cc31..fbb489ae4db5a75a6423ea45b32681614e1d1504 100644 --- a/arch/sw_64/mm/physaddr.c +++ b/arch/sw_64/mm/physaddr.c @@ -1,7 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 #include #include -#include #include unsigned long __phys_addr(unsigned long x) diff --git a/arch/sw_64/mm/thp.c b/arch/sw_64/mm/thp.c index 68260dd0e837926664ca9918ddb5ffb3a4f430f4..833bb59f79d0e9f01fb9813eec8fc5cb24df9da3 100644 --- a/arch/sw_64/mm/thp.c +++ b/arch/sw_64/mm/thp.c @@ -1,13 +1,4 @@ // SPDX-License-Identifier: GPL-2.0 -#include -#include -#include -#include -#include -#include -#include -#include -#include #include #include diff --git a/arch/um/drivers/mconsole_kern.c b/arch/um/drivers/mconsole_kern.c index 6a22ead31c5b8159537e12abd2ce1f263a56d5ee..66d5452c2bcb6f2405efc3ebbcd0ce4d640f53b7 100644 --- a/arch/um/drivers/mconsole_kern.c +++ b/arch/um/drivers/mconsole_kern.c @@ -223,7 +223,7 @@ void mconsole_go(struct mc_request *req) void mconsole_stop(struct mc_request *req) { - deactivate_fd(req->originating_fd, MCONSOLE_IRQ); + block_signals(); os_set_fd_block(req->originating_fd, 1); mconsole_reply(req, "stopped", 0, 0); for (;;) { @@ -246,6 +246,7 @@ void mconsole_stop(struct mc_request *req) } os_set_fd_block(req->originating_fd, 0); mconsole_reply(req, "", 0, 0); + unblock_signals(); } static DEFINE_SPINLOCK(mc_devices_lock); diff --git a/arch/x86/configs/openeuler_defconfig b/arch/x86/configs/openeuler_defconfig index 61c4be815462d1d9295610aedd2179276a31ca87..f9c94b618ad46df4fb371aa94d4056ff321a6980 100644 --- a/arch/x86/configs/openeuler_defconfig +++ b/arch/x86/configs/openeuler_defconfig @@ -6370,7 +6370,7 @@ CONFIG_HSU_DMA=y # DMA Clients # CONFIG_ASYNC_TX_DMA=y -CONFIG_DMATEST=y +CONFIG_DMATEST=m CONFIG_DMA_ENGINE_RAID=y # diff --git a/arch/x86/events/intel/pt.c b/arch/x86/events/intel/pt.c index c084899e9582546d96cb49997e936ef1a8d89939..cc3b79c06685303983984ef191973f12ebe7ca90 100644 --- a/arch/x86/events/intel/pt.c +++ b/arch/x86/events/intel/pt.c @@ -472,7 +472,7 @@ static u64 pt_config_filters(struct perf_event *event) pt->filters.filter[range].msr_b = filter->msr_b; } - rtit_ctl |= filter->config << pt_address_ranges[range].reg_off; + rtit_ctl |= (u64)filter->config << pt_address_ranges[range].reg_off; } return rtit_ctl; diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index 1e9b13636f17b58150e5bf1045a1b2d9a4589644..4da419226377d02572ce6504b6811a29e842ed08 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -419,5 +419,6 @@ #define X86_BUG_TAA X86_BUG(22) /* CPU is affected by TSX Async Abort(TAA) */ #define X86_BUG_ITLB_MULTIHIT X86_BUG(23) /* CPU may incur MCE during certain page attribute changes */ #define X86_BUG_SRBDS X86_BUG(24) /* CPU may leak RNG bits if not mitigated */ +#define X86_BUG_MMIO_STALE_DATA X86_BUG(25) /* CPU is affected by Processor MMIO Stale Data vulnerabilities */ #endif /* _ASM_X86_CPUFEATURES_H */ diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h index 4e5af2b00d89ba9651d3e5a84ef754ce96b68ce1..70b9bc5403c5e175081d3ba7c351af1b7ffa78d5 100644 --- a/arch/x86/include/asm/fpu/internal.h +++ b/arch/x86/include/asm/fpu/internal.h @@ -531,9 +531,11 @@ static inline void __fpregs_load_activate(void) * The FPU context is only stored/restored for a user task and * PF_KTHREAD is used to distinguish between kernel and user threads. */ -static inline void switch_fpu_prepare(struct fpu *old_fpu, int cpu) +static inline void switch_fpu_prepare(struct task_struct *prev, int cpu) { - if (static_cpu_has(X86_FEATURE_FPU) && !(current->flags & PF_KTHREAD)) { + struct fpu *old_fpu = &prev->thread.fpu; + + if (static_cpu_has(X86_FEATURE_FPU) && !(prev->flags & PF_KTHREAD)) { if (!copy_fpregs_to_fpstate(old_fpu)) old_fpu->last_cpu = -1; else @@ -552,10 +554,11 @@ static inline void switch_fpu_prepare(struct fpu *old_fpu, int cpu) * Load PKRU from the FPU context if available. Delay loading of the * complete FPU state until the return to userland. */ -static inline void switch_fpu_finish(struct fpu *new_fpu) +static inline void switch_fpu_finish(struct task_struct *next) { u32 pkru_val = init_pkru_value; struct pkru_state *pk; + struct fpu *next_fpu = &next->thread.fpu; if (!static_cpu_has(X86_FEATURE_FPU)) return; @@ -569,7 +572,7 @@ static inline void switch_fpu_finish(struct fpu *new_fpu) * PKRU state is switched eagerly because it needs to be valid before we * return to userland e.g. for a copy_to_user() operation. */ - if (!(current->flags & PF_KTHREAD)) { + if (!(next->flags & PF_KTHREAD)) { /* * If the PKRU bit in xsave.header.xfeatures is not set, * then the PKRU component was in init state, which means @@ -578,7 +581,7 @@ static inline void switch_fpu_finish(struct fpu *new_fpu) * in memory is not valid. This means pkru_val has to be * set to 0 and not to init_pkru_value. */ - pk = get_xsave_addr(&new_fpu->state.xsave, XFEATURE_PKRU); + pk = get_xsave_addr(&next_fpu->state.xsave, XFEATURE_PKRU); pkru_val = pk ? pk->pkru : 0; } __write_pkru(pkru_val); diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 4ab8f866e39d779e47f598a3c3ba5272bcb9300d..a3bf158f5b1237a568071d97e116b5eea0edcced 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1286,6 +1286,7 @@ struct kvm_x86_ops { int (*mem_enc_op)(struct kvm *kvm, void __user *argp); int (*mem_enc_reg_region)(struct kvm *kvm, struct kvm_enc_region *argp); int (*mem_enc_unreg_region)(struct kvm *kvm, struct kvm_enc_region *argp); + void (*guest_memory_reclaimed)(struct kvm *kvm); int (*get_msr_feature)(struct kvm_msr_entry *entry); diff --git a/arch/x86/include/asm/livepatch.h b/arch/x86/include/asm/livepatch.h index e23c2da3c323ae83d3f0a243bc904e3606405f01..b510f935ec111284a06d964153f5b58a666582b9 100644 --- a/arch/x86/include/asm/livepatch.h +++ b/arch/x86/include/asm/livepatch.h @@ -37,9 +37,23 @@ int klp_check_calltrace(struct klp_patch *patch, int enable); #define JMP_E9_INSN_SIZE 5 struct arch_klp_data { unsigned char old_code[JMP_E9_INSN_SIZE]; +#ifdef CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY + /* + * Saved opcode at the entry of the old func (which maybe replaced + * with breakpoint). + */ + unsigned char saved_opcode; +#endif }; long arch_klp_save_old_code(struct arch_klp_data *arch_data, void *old_func); +#ifdef CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY +int arch_klp_check_breakpoint(struct arch_klp_data *arch_data, void *old_func); +int arch_klp_add_breakpoint(struct arch_klp_data *arch_data, void *old_func); +void arch_klp_remove_breakpoint(struct arch_klp_data *arch_data, void *old_func); +int klp_int3_handler(struct pt_regs *regs); +int arch_klp_module_check_calltrace(void *data); +#endif #endif diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index 5de2040b73a77d9f09682bb76a6955d0522a0196..2b0af5eb5131f85eba11877f9e37546ccd3a5bdf 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -114,6 +114,30 @@ * Not susceptible to * TSX Async Abort (TAA) vulnerabilities. */ +#define ARCH_CAP_SBDR_SSDP_NO BIT(13) /* + * Not susceptible to SBDR and SSDP + * variants of Processor MMIO stale data + * vulnerabilities. + */ +#define ARCH_CAP_FBSDP_NO BIT(14) /* + * Not susceptible to FBSDP variant of + * Processor MMIO stale data + * vulnerabilities. + */ +#define ARCH_CAP_PSDP_NO BIT(15) /* + * Not susceptible to PSDP variant of + * Processor MMIO stale data + * vulnerabilities. + */ +#define ARCH_CAP_FB_CLEAR BIT(17) /* + * VERW clears CPU fill buffer + * even on MDS_NO CPUs. + */ +#define ARCH_CAP_FB_CLEAR_CTRL BIT(18) /* + * MSR_IA32_MCU_OPT_CTRL[FB_CLEAR_DIS] + * bit available to control VERW + * behavior. + */ #define MSR_IA32_FLUSH_CMD 0x0000010b #define L1D_FLUSH BIT(0) /* @@ -131,6 +155,7 @@ /* SRBDS support */ #define MSR_IA32_MCU_OPT_CTRL 0x00000123 #define RNGDS_MITG_DIS BIT(0) +#define FB_CLEAR_DIS BIT(3) /* CPU Fill buffer clear disable */ #define MSR_IA32_SYSENTER_CS 0x00000174 #define MSR_IA32_SYSENTER_ESP 0x00000175 diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index 4d0f5386e637ba09361f36abddc7fb8ac0fe3bde..e247151c3dcf241347e1f4db5b39daba3217a815 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h @@ -255,6 +255,8 @@ DECLARE_STATIC_KEY_FALSE(switch_mm_always_ibpb); DECLARE_STATIC_KEY_FALSE(mds_user_clear); DECLARE_STATIC_KEY_FALSE(mds_idle_clear); +DECLARE_STATIC_KEY_FALSE(mmio_stale_data_clear); + #include /** diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 14cd3186dc77dc7a929a47026bfa8c3152f89308..55562a9b7f92e9d6bc5f5db958dee01753f8321c 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -1340,6 +1340,17 @@ static int __init disable_acpi_pci(const struct dmi_system_id *d) return 0; } +static int __init disable_acpi_xsdt(const struct dmi_system_id *d) +{ + if (!acpi_force) { + pr_notice("%s detected: force use of acpi=rsdt\n", d->ident); + acpi_gbl_do_not_use_xsdt = TRUE; + } else { + pr_notice("Warning: DMI blacklist says broken, but acpi XSDT forced\n"); + } + return 0; +} + static int __init dmi_disable_acpi(const struct dmi_system_id *d) { if (!acpi_force) { @@ -1464,6 +1475,19 @@ static const struct dmi_system_id acpi_dmi_table[] __initconst = { DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 360"), }, }, + /* + * Boxes that need ACPI XSDT use disabled due to corrupted tables + */ + { + .callback = disable_acpi_xsdt, + .ident = "Advantech DAC-BJ01", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "NEC"), + DMI_MATCH(DMI_PRODUCT_NAME, "Bearlake CRB Board"), + DMI_MATCH(DMI_BIOS_VERSION, "V1.12"), + DMI_MATCH(DMI_BIOS_DATE, "02/01/2011"), + }, + }, {} }; diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 78b9514a3844051916aec16ba69d8a8fe1937417..2a21046846b6fd005a59e6a4defbbd789f9b2948 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -41,8 +41,10 @@ static void __init spectre_v2_select_mitigation(void); static void __init ssb_select_mitigation(void); static void __init l1tf_select_mitigation(void); static void __init mds_select_mitigation(void); -static void __init mds_print_mitigation(void); +static void __init md_clear_update_mitigation(void); +static void __init md_clear_select_mitigation(void); static void __init taa_select_mitigation(void); +static void __init mmio_select_mitigation(void); static void __init srbds_select_mitigation(void); /* The base value of the SPEC_CTRL MSR that always has to be preserved. */ @@ -77,6 +79,10 @@ EXPORT_SYMBOL_GPL(mds_user_clear); DEFINE_STATIC_KEY_FALSE(mds_idle_clear); EXPORT_SYMBOL_GPL(mds_idle_clear); +/* Controls CPU Fill buffer clear before KVM guest MMIO accesses */ +DEFINE_STATIC_KEY_FALSE(mmio_stale_data_clear); +EXPORT_SYMBOL_GPL(mmio_stale_data_clear); + void __init check_bugs(void) { identify_boot_cpu(); @@ -109,16 +115,9 @@ void __init check_bugs(void) spectre_v2_select_mitigation(); ssb_select_mitigation(); l1tf_select_mitigation(); - mds_select_mitigation(); - taa_select_mitigation(); + md_clear_select_mitigation(); srbds_select_mitigation(); - /* - * As MDS and TAA mitigations are inter-related, print MDS - * mitigation until after TAA mitigation selection is done. - */ - mds_print_mitigation(); - arch_smt_update(); #ifdef CONFIG_X86_32 @@ -258,14 +257,6 @@ static void __init mds_select_mitigation(void) } } -static void __init mds_print_mitigation(void) -{ - if (!boot_cpu_has_bug(X86_BUG_MDS) || cpu_mitigations_off()) - return; - - pr_info("%s\n", mds_strings[mds_mitigation]); -} - static int __init mds_cmdline(char *str) { if (!boot_cpu_has_bug(X86_BUG_MDS)) @@ -320,7 +311,7 @@ static void __init taa_select_mitigation(void) /* TSX previously disabled by tsx=off */ if (!boot_cpu_has(X86_FEATURE_RTM)) { taa_mitigation = TAA_MITIGATION_TSX_DISABLED; - goto out; + return; } if (cpu_mitigations_off()) { @@ -334,7 +325,7 @@ static void __init taa_select_mitigation(void) */ if (taa_mitigation == TAA_MITIGATION_OFF && mds_mitigation == MDS_MITIGATION_OFF) - goto out; + return; if (boot_cpu_has(X86_FEATURE_MD_CLEAR)) taa_mitigation = TAA_MITIGATION_VERW; @@ -366,18 +357,6 @@ static void __init taa_select_mitigation(void) if (taa_nosmt || cpu_mitigations_auto_nosmt()) cpu_smt_disable(false); - - /* - * Update MDS mitigation, if necessary, as the mds_user_clear is - * now enabled for TAA mitigation. - */ - if (mds_mitigation == MDS_MITIGATION_OFF && - boot_cpu_has_bug(X86_BUG_MDS)) { - mds_mitigation = MDS_MITIGATION_FULL; - mds_select_mitigation(); - } -out: - pr_info("%s\n", taa_strings[taa_mitigation]); } static int __init tsx_async_abort_parse_cmdline(char *str) @@ -401,6 +380,151 @@ static int __init tsx_async_abort_parse_cmdline(char *str) } early_param("tsx_async_abort", tsx_async_abort_parse_cmdline); +#undef pr_fmt +#define pr_fmt(fmt) "MMIO Stale Data: " fmt + +enum mmio_mitigations { + MMIO_MITIGATION_OFF, + MMIO_MITIGATION_UCODE_NEEDED, + MMIO_MITIGATION_VERW, +}; + +/* Default mitigation for Processor MMIO Stale Data vulnerabilities */ +static enum mmio_mitigations mmio_mitigation __ro_after_init = MMIO_MITIGATION_VERW; +static bool mmio_nosmt __ro_after_init = false; + +static const char * const mmio_strings[] = { + [MMIO_MITIGATION_OFF] = "Vulnerable", + [MMIO_MITIGATION_UCODE_NEEDED] = "Vulnerable: Clear CPU buffers attempted, no microcode", + [MMIO_MITIGATION_VERW] = "Mitigation: Clear CPU buffers", +}; + +static void __init mmio_select_mitigation(void) +{ + u64 ia32_cap; + + if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA) || + cpu_mitigations_off()) { + mmio_mitigation = MMIO_MITIGATION_OFF; + return; + } + + if (mmio_mitigation == MMIO_MITIGATION_OFF) + return; + + ia32_cap = x86_read_arch_cap_msr(); + + /* + * Enable CPU buffer clear mitigation for host and VMM, if also affected + * by MDS or TAA. Otherwise, enable mitigation for VMM only. + */ + if (boot_cpu_has_bug(X86_BUG_MDS) || (boot_cpu_has_bug(X86_BUG_TAA) && + boot_cpu_has(X86_FEATURE_RTM))) + static_branch_enable(&mds_user_clear); + else + static_branch_enable(&mmio_stale_data_clear); + + /* + * If Processor-MMIO-Stale-Data bug is present and Fill Buffer data can + * be propagated to uncore buffers, clearing the Fill buffers on idle + * is required irrespective of SMT state. + */ + if (!(ia32_cap & ARCH_CAP_FBSDP_NO)) + static_branch_enable(&mds_idle_clear); + + /* + * Check if the system has the right microcode. + * + * CPU Fill buffer clear mitigation is enumerated by either an explicit + * FB_CLEAR or by the presence of both MD_CLEAR and L1D_FLUSH on MDS + * affected systems. + */ + if ((ia32_cap & ARCH_CAP_FB_CLEAR) || + (boot_cpu_has(X86_FEATURE_MD_CLEAR) && + boot_cpu_has(X86_FEATURE_FLUSH_L1D) && + !(ia32_cap & ARCH_CAP_MDS_NO))) + mmio_mitigation = MMIO_MITIGATION_VERW; + else + mmio_mitigation = MMIO_MITIGATION_UCODE_NEEDED; + + if (mmio_nosmt || cpu_mitigations_auto_nosmt()) + cpu_smt_disable(false); +} + +static int __init mmio_stale_data_parse_cmdline(char *str) +{ + if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) + return 0; + + if (!str) + return -EINVAL; + + if (!strcmp(str, "off")) { + mmio_mitigation = MMIO_MITIGATION_OFF; + } else if (!strcmp(str, "full")) { + mmio_mitigation = MMIO_MITIGATION_VERW; + } else if (!strcmp(str, "full,nosmt")) { + mmio_mitigation = MMIO_MITIGATION_VERW; + mmio_nosmt = true; + } + + return 0; +} +early_param("mmio_stale_data", mmio_stale_data_parse_cmdline); + +#undef pr_fmt +#define pr_fmt(fmt) "" fmt + +static void __init md_clear_update_mitigation(void) +{ + if (cpu_mitigations_off()) + return; + + if (!static_key_enabled(&mds_user_clear)) + goto out; + + /* + * mds_user_clear is now enabled. Update MDS, TAA and MMIO Stale Data + * mitigation, if necessary. + */ + if (mds_mitigation == MDS_MITIGATION_OFF && + boot_cpu_has_bug(X86_BUG_MDS)) { + mds_mitigation = MDS_MITIGATION_FULL; + mds_select_mitigation(); + } + if (taa_mitigation == TAA_MITIGATION_OFF && + boot_cpu_has_bug(X86_BUG_TAA)) { + taa_mitigation = TAA_MITIGATION_VERW; + taa_select_mitigation(); + } + if (mmio_mitigation == MMIO_MITIGATION_OFF && + boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) { + mmio_mitigation = MMIO_MITIGATION_VERW; + mmio_select_mitigation(); + } +out: + if (boot_cpu_has_bug(X86_BUG_MDS)) + pr_info("MDS: %s\n", mds_strings[mds_mitigation]); + if (boot_cpu_has_bug(X86_BUG_TAA)) + pr_info("TAA: %s\n", taa_strings[taa_mitigation]); + if (boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) + pr_info("MMIO Stale Data: %s\n", mmio_strings[mmio_mitigation]); +} + +static void __init md_clear_select_mitigation(void) +{ + mds_select_mitigation(); + taa_select_mitigation(); + mmio_select_mitigation(); + + /* + * As MDS, TAA and MMIO Stale Data mitigations are inter-related, update + * and print their mitigation after MDS, TAA and MMIO Stale Data + * mitigation selection is done. + */ + md_clear_update_mitigation(); +} + #undef pr_fmt #define pr_fmt(fmt) "SRBDS: " fmt @@ -462,11 +586,13 @@ static void __init srbds_select_mitigation(void) return; /* - * Check to see if this is one of the MDS_NO systems supporting - * TSX that are only exposed to SRBDS when TSX is enabled. + * Check to see if this is one of the MDS_NO systems supporting TSX that + * are only exposed to SRBDS when TSX is enabled or when CPU is affected + * by Processor MMIO Stale Data vulnerability. */ ia32_cap = x86_read_arch_cap_msr(); - if ((ia32_cap & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM)) + if ((ia32_cap & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM) && + !boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) srbds_mitigation = SRBDS_MITIGATION_TSX_OFF; else if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) srbds_mitigation = SRBDS_MITIGATION_HYPERVISOR; @@ -1072,6 +1198,8 @@ static void update_indir_branch_cond(void) /* Update the static key controlling the MDS CPU buffer clear in idle */ static void update_mds_branch_idle(void) { + u64 ia32_cap = x86_read_arch_cap_msr(); + /* * Enable the idle clearing if SMT is active on CPUs which are * affected only by MSBDS and not any other MDS variant. @@ -1083,14 +1211,17 @@ static void update_mds_branch_idle(void) if (!boot_cpu_has_bug(X86_BUG_MSBDS_ONLY)) return; - if (sched_smt_active()) + if (sched_smt_active()) { static_branch_enable(&mds_idle_clear); - else + } else if (mmio_mitigation == MMIO_MITIGATION_OFF || + (ia32_cap & ARCH_CAP_FBSDP_NO)) { static_branch_disable(&mds_idle_clear); + } } #define MDS_MSG_SMT "MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.\n" #define TAA_MSG_SMT "TAA CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/tsx_async_abort.html for more details.\n" +#define MMIO_MSG_SMT "MMIO Stale Data CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/processor_mmio_stale_data.html for more details.\n" void cpu_bugs_smt_update(void) { @@ -1135,6 +1266,16 @@ void cpu_bugs_smt_update(void) break; } + switch (mmio_mitigation) { + case MMIO_MITIGATION_VERW: + case MMIO_MITIGATION_UCODE_NEEDED: + if (sched_smt_active()) + pr_warn_once(MMIO_MSG_SMT); + break; + case MMIO_MITIGATION_OFF: + break; + } + mutex_unlock(&spec_ctrl_mutex); } @@ -1704,6 +1845,20 @@ static ssize_t tsx_async_abort_show_state(char *buf) sched_smt_active() ? "vulnerable" : "disabled"); } +static ssize_t mmio_stale_data_show_state(char *buf) +{ + if (mmio_mitigation == MMIO_MITIGATION_OFF) + return sysfs_emit(buf, "%s\n", mmio_strings[mmio_mitigation]); + + if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) { + return sysfs_emit(buf, "%s; SMT Host state unknown\n", + mmio_strings[mmio_mitigation]); + } + + return sysfs_emit(buf, "%s; SMT %s\n", mmio_strings[mmio_mitigation], + sched_smt_active() ? "vulnerable" : "disabled"); +} + static char *stibp_state(void) { if (spectre_v2_in_eibrs_mode(spectre_v2_enabled)) @@ -1804,6 +1959,9 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr case X86_BUG_SRBDS: return srbds_show_state(buf); + case X86_BUG_MMIO_STALE_DATA: + return mmio_stale_data_show_state(buf); + default: break; } @@ -1855,4 +2013,9 @@ ssize_t cpu_show_srbds(struct device *dev, struct device_attribute *attr, char * { return cpu_show_common(dev, attr, buf, X86_BUG_SRBDS); } + +ssize_t cpu_show_mmio_stale_data(struct device *dev, struct device_attribute *attr, char *buf) +{ + return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_STALE_DATA); +} #endif diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 9c8fc6f513ed3f9b9142a1c253c65f3760345814..4917c2698ac1f2a7aebb34a62716d8d3520c9e35 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -1098,18 +1098,42 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = { X86_FEATURE_ANY, issues) #define SRBDS BIT(0) +/* CPU is affected by X86_BUG_MMIO_STALE_DATA */ +#define MMIO BIT(1) +/* CPU is affected by Shared Buffers Data Sampling (SBDS), a variant of X86_BUG_MMIO_STALE_DATA */ +#define MMIO_SBDS BIT(2) static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = { VULNBL_INTEL_STEPPINGS(IVYBRIDGE, X86_STEPPING_ANY, SRBDS), VULNBL_INTEL_STEPPINGS(HASWELL, X86_STEPPING_ANY, SRBDS), VULNBL_INTEL_STEPPINGS(HASWELL_L, X86_STEPPING_ANY, SRBDS), VULNBL_INTEL_STEPPINGS(HASWELL_G, X86_STEPPING_ANY, SRBDS), + VULNBL_INTEL_STEPPINGS(HASWELL_X, BIT(2) | BIT(4), MMIO), + VULNBL_INTEL_STEPPINGS(BROADWELL_D, X86_STEPPINGS(0x3, 0x5), MMIO), VULNBL_INTEL_STEPPINGS(BROADWELL_G, X86_STEPPING_ANY, SRBDS), + VULNBL_INTEL_STEPPINGS(BROADWELL_X, X86_STEPPING_ANY, MMIO), VULNBL_INTEL_STEPPINGS(BROADWELL, X86_STEPPING_ANY, SRBDS), + VULNBL_INTEL_STEPPINGS(SKYLAKE_L, X86_STEPPINGS(0x3, 0x3), SRBDS | MMIO), VULNBL_INTEL_STEPPINGS(SKYLAKE_L, X86_STEPPING_ANY, SRBDS), + VULNBL_INTEL_STEPPINGS(SKYLAKE_X, BIT(3) | BIT(4) | BIT(6) | + BIT(7) | BIT(0xB), MMIO), + VULNBL_INTEL_STEPPINGS(SKYLAKE, X86_STEPPINGS(0x3, 0x3), SRBDS | MMIO), VULNBL_INTEL_STEPPINGS(SKYLAKE, X86_STEPPING_ANY, SRBDS), - VULNBL_INTEL_STEPPINGS(KABYLAKE_L, X86_STEPPINGS(0x0, 0xC), SRBDS), - VULNBL_INTEL_STEPPINGS(KABYLAKE, X86_STEPPINGS(0x0, 0xD), SRBDS), + VULNBL_INTEL_STEPPINGS(KABYLAKE_L, X86_STEPPINGS(0x9, 0xC), SRBDS | MMIO), + VULNBL_INTEL_STEPPINGS(KABYLAKE_L, X86_STEPPINGS(0x0, 0x8), SRBDS), + VULNBL_INTEL_STEPPINGS(KABYLAKE, X86_STEPPINGS(0x9, 0xD), SRBDS | MMIO), + VULNBL_INTEL_STEPPINGS(KABYLAKE, X86_STEPPINGS(0x0, 0x8), SRBDS), + VULNBL_INTEL_STEPPINGS(ICELAKE_L, X86_STEPPINGS(0x5, 0x5), MMIO | MMIO_SBDS), + VULNBL_INTEL_STEPPINGS(ICELAKE_D, X86_STEPPINGS(0x1, 0x1), MMIO), + VULNBL_INTEL_STEPPINGS(ICELAKE_X, X86_STEPPINGS(0x4, 0x6), MMIO), + VULNBL_INTEL_STEPPINGS(COMETLAKE, BIT(2) | BIT(3) | BIT(5), MMIO | MMIO_SBDS), + VULNBL_INTEL_STEPPINGS(COMETLAKE_L, X86_STEPPINGS(0x1, 0x1), MMIO | MMIO_SBDS), + VULNBL_INTEL_STEPPINGS(COMETLAKE_L, X86_STEPPINGS(0x0, 0x0), MMIO), + VULNBL_INTEL_STEPPINGS(LAKEFIELD, X86_STEPPINGS(0x1, 0x1), MMIO | MMIO_SBDS), + VULNBL_INTEL_STEPPINGS(ROCKETLAKE, X86_STEPPINGS(0x1, 0x1), MMIO), + VULNBL_INTEL_STEPPINGS(ATOM_TREMONT, X86_STEPPINGS(0x1, 0x1), MMIO | MMIO_SBDS), + VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_D, X86_STEPPING_ANY, MMIO), + VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_L, X86_STEPPINGS(0x0, 0x0), MMIO | MMIO_SBDS), {} }; @@ -1130,6 +1154,13 @@ u64 x86_read_arch_cap_msr(void) return ia32_cap; } +static bool arch_cap_mmio_immune(u64 ia32_cap) +{ + return (ia32_cap & ARCH_CAP_FBSDP_NO && + ia32_cap & ARCH_CAP_PSDP_NO && + ia32_cap & ARCH_CAP_SBDR_SSDP_NO); +} + static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) { u64 ia32_cap = x86_read_arch_cap_msr(); @@ -1183,12 +1214,27 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) /* * SRBDS affects CPUs which support RDRAND or RDSEED and are listed * in the vulnerability blacklist. + * + * Some of the implications and mitigation of Shared Buffers Data + * Sampling (SBDS) are similar to SRBDS. Give SBDS same treatment as + * SRBDS. */ if ((cpu_has(c, X86_FEATURE_RDRAND) || cpu_has(c, X86_FEATURE_RDSEED)) && - cpu_matches(cpu_vuln_blacklist, SRBDS)) + cpu_matches(cpu_vuln_blacklist, SRBDS | MMIO_SBDS)) setup_force_cpu_bug(X86_BUG_SRBDS); + /* + * Processor MMIO Stale Data bug enumeration + * + * Affected CPU list is generally enough to enumerate the vulnerability, + * but for virtualization case check for ARCH_CAP MSR bits also, VMM may + * not want the guest to enumerate the bug. + */ + if (cpu_matches(cpu_vuln_blacklist, MMIO) && + !arch_cap_mmio_immune(ia32_cap)) + setup_force_cpu_bug(X86_BUG_MMIO_STALE_DATA); + if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN)) return; diff --git a/arch/x86/kernel/cpu/sgx/encl.c b/arch/x86/kernel/cpu/sgx/encl.c index 97fb7efce224332c81fb713933b8ddc3a5a067c9..3aaa709b5054702c7936a978729ea3d282c75b73 100644 --- a/arch/x86/kernel/cpu/sgx/encl.c +++ b/arch/x86/kernel/cpu/sgx/encl.c @@ -12,6 +12,116 @@ #include "encls.h" #include "sgx.h" +#define PCMDS_PER_PAGE (PAGE_SIZE / sizeof(struct sgx_pcmd)) +/* + * 32 PCMD entries share a PCMD page. PCMD_FIRST_MASK is used to + * determine the page index associated with the first PCMD entry + * within a PCMD page. + */ +#define PCMD_FIRST_MASK GENMASK(4, 0) + +/** + * reclaimer_writing_to_pcmd() - Query if any enclave page associated with + * a PCMD page is in process of being reclaimed. + * @encl: Enclave to which PCMD page belongs + * @start_addr: Address of enclave page using first entry within the PCMD page + * + * When an enclave page is reclaimed some Paging Crypto MetaData (PCMD) is + * stored. The PCMD data of a reclaimed enclave page contains enough + * information for the processor to verify the page at the time + * it is loaded back into the Enclave Page Cache (EPC). + * + * The backing storage to which enclave pages are reclaimed is laid out as + * follows: + * Encrypted enclave pages:SECS page:PCMD pages + * + * Each PCMD page contains the PCMD metadata of + * PAGE_SIZE/sizeof(struct sgx_pcmd) enclave pages. + * + * A PCMD page can only be truncated if it is (a) empty, and (b) not in the + * process of getting data (and thus soon being non-empty). (b) is tested with + * a check if an enclave page sharing the PCMD page is in the process of being + * reclaimed. + * + * The reclaimer sets the SGX_ENCL_PAGE_BEING_RECLAIMED flag when it + * intends to reclaim that enclave page - it means that the PCMD page + * associated with that enclave page is about to get some data and thus + * even if the PCMD page is empty, it should not be truncated. + * + * Context: Enclave mutex (&sgx_encl->lock) must be held. + * Return: 1 if the reclaimer is about to write to the PCMD page + * 0 if the reclaimer has no intention to write to the PCMD page + */ +static int reclaimer_writing_to_pcmd(struct sgx_encl *encl, + unsigned long start_addr) +{ + int reclaimed = 0; + int i; + + /* + * PCMD_FIRST_MASK is based on number of PCMD entries within + * PCMD page being 32. + */ + BUILD_BUG_ON(PCMDS_PER_PAGE != 32); + + for (i = 0; i < PCMDS_PER_PAGE; i++) { + struct sgx_encl_page *entry; + unsigned long addr; + + addr = start_addr + i * PAGE_SIZE; + + /* + * Stop when reaching the SECS page - it does not + * have a page_array entry and its reclaim is + * started and completed with enclave mutex held so + * it does not use the SGX_ENCL_PAGE_BEING_RECLAIMED + * flag. + */ + if (addr == encl->base + encl->size) + break; + + entry = xa_load(&encl->page_array, PFN_DOWN(addr)); + if (!entry) + continue; + + /* + * VA page slot ID uses same bit as the flag so it is important + * to ensure that the page is not already in backing store. + */ + if (entry->epc_page && + (entry->desc & SGX_ENCL_PAGE_BEING_RECLAIMED)) { + reclaimed = 1; + break; + } + } + + return reclaimed; +} + +/* + * Calculate byte offset of a PCMD struct associated with an enclave page. PCMD's + * follow right after the EPC data in the backing storage. In addition to the + * visible enclave pages, there's one extra page slot for SECS, before PCMD + * structs. + */ +static inline pgoff_t sgx_encl_get_backing_page_pcmd_offset(struct sgx_encl *encl, + unsigned long page_index) +{ + pgoff_t epc_end_off = encl->size + sizeof(struct sgx_secs); + + return epc_end_off + page_index * sizeof(struct sgx_pcmd); +} + +/* + * Free a page from the backing storage in the given page index. + */ +static inline void sgx_encl_truncate_backing_page(struct sgx_encl *encl, unsigned long page_index) +{ + struct inode *inode = file_inode(encl->backing); + + shmem_truncate_range(inode, PFN_PHYS(page_index), PFN_PHYS(page_index) + PAGE_SIZE - 1); +} + /* * ELDU: Load an EPC page as unblocked. For more info, see "OS Management of EPC * Pages" in the SDM. @@ -22,9 +132,12 @@ static int __sgx_encl_eldu(struct sgx_encl_page *encl_page, { unsigned long va_offset = encl_page->desc & SGX_ENCL_PAGE_VA_OFFSET_MASK; struct sgx_encl *encl = encl_page->encl; + pgoff_t page_index, page_pcmd_off; + unsigned long pcmd_first_page; struct sgx_pageinfo pginfo; struct sgx_backing b; - pgoff_t page_index; + bool pcmd_page_empty; + u8 *pcmd_page; int ret; if (secs_page) @@ -32,14 +145,21 @@ static int __sgx_encl_eldu(struct sgx_encl_page *encl_page, else page_index = PFN_DOWN(encl->size); + /* + * Address of enclave page using the first entry within the PCMD page. + */ + pcmd_first_page = PFN_PHYS(page_index & ~PCMD_FIRST_MASK) + encl->base; + + page_pcmd_off = sgx_encl_get_backing_page_pcmd_offset(encl, page_index); + ret = sgx_encl_get_backing(encl, page_index, &b); if (ret) return ret; pginfo.addr = encl_page->desc & PAGE_MASK; pginfo.contents = (unsigned long)kmap_atomic(b.contents); - pginfo.metadata = (unsigned long)kmap_atomic(b.pcmd) + - b.pcmd_offset; + pcmd_page = kmap_atomic(b.pcmd); + pginfo.metadata = (unsigned long)pcmd_page + b.pcmd_offset; if (secs_page) pginfo.secs = (u64)sgx_get_epc_virt_addr(secs_page); @@ -55,11 +175,25 @@ static int __sgx_encl_eldu(struct sgx_encl_page *encl_page, ret = -EFAULT; } - kunmap_atomic((void *)(unsigned long)(pginfo.metadata - b.pcmd_offset)); + memset(pcmd_page + b.pcmd_offset, 0, sizeof(struct sgx_pcmd)); + set_page_dirty(b.pcmd); + + /* + * The area for the PCMD in the page was zeroed above. Check if the + * whole page is now empty meaning that all PCMD's have been zeroed: + */ + pcmd_page_empty = !memchr_inv(pcmd_page, 0, PAGE_SIZE); + + kunmap_atomic(pcmd_page); kunmap_atomic((void *)(unsigned long)pginfo.contents); sgx_encl_put_backing(&b, false); + sgx_encl_truncate_backing_page(encl, page_index); + + if (pcmd_page_empty && !reclaimer_writing_to_pcmd(encl, pcmd_first_page)) + sgx_encl_truncate_backing_page(encl, PFN_DOWN(page_pcmd_off)); + return ret; } @@ -577,7 +711,7 @@ static struct page *sgx_encl_get_backing_page(struct sgx_encl *encl, int sgx_encl_get_backing(struct sgx_encl *encl, unsigned long page_index, struct sgx_backing *backing) { - pgoff_t pcmd_index = PFN_DOWN(encl->size) + 1 + (page_index >> 5); + pgoff_t page_pcmd_off = sgx_encl_get_backing_page_pcmd_offset(encl, page_index); struct page *contents; struct page *pcmd; @@ -585,7 +719,7 @@ int sgx_encl_get_backing(struct sgx_encl *encl, unsigned long page_index, if (IS_ERR(contents)) return PTR_ERR(contents); - pcmd = sgx_encl_get_backing_page(encl, pcmd_index); + pcmd = sgx_encl_get_backing_page(encl, PFN_DOWN(page_pcmd_off)); if (IS_ERR(pcmd)) { put_page(contents); return PTR_ERR(pcmd); @@ -594,9 +728,7 @@ int sgx_encl_get_backing(struct sgx_encl *encl, unsigned long page_index, backing->page_index = page_index; backing->contents = contents; backing->pcmd = pcmd; - backing->pcmd_offset = - (page_index & (PAGE_SIZE / sizeof(struct sgx_pcmd) - 1)) * - sizeof(struct sgx_pcmd); + backing->pcmd_offset = page_pcmd_off & (PAGE_SIZE - 1); return 0; } diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c index 629c4994f1654cd059daa7116b511a3578e22b66..7f57110f958e1cc6e853a697b619117528d7e05b 100644 --- a/arch/x86/kernel/e820.c +++ b/arch/x86/kernel/e820.c @@ -995,8 +995,10 @@ early_param("memmap", parse_memmap_opt); */ void __init e820__reserve_setup_data(void) { + struct setup_indirect *indirect; struct setup_data *data; - u64 pa_data; + u64 pa_data, pa_next; + u32 len; pa_data = boot_params.hdr.setup_data; if (!pa_data) @@ -1004,6 +1006,14 @@ void __init e820__reserve_setup_data(void) while (pa_data) { data = early_memremap(pa_data, sizeof(*data)); + if (!data) { + pr_warn("e820: failed to memremap setup_data entry\n"); + return; + } + + len = sizeof(*data); + pa_next = data->next; + e820__range_update(pa_data, sizeof(*data)+data->len, E820_TYPE_RAM, E820_TYPE_RESERVED_KERN); /* @@ -1015,18 +1025,27 @@ void __init e820__reserve_setup_data(void) sizeof(*data) + data->len, E820_TYPE_RAM, E820_TYPE_RESERVED_KERN); - if (data->type == SETUP_INDIRECT && - ((struct setup_indirect *)data->data)->type != SETUP_INDIRECT) { - e820__range_update(((struct setup_indirect *)data->data)->addr, - ((struct setup_indirect *)data->data)->len, - E820_TYPE_RAM, E820_TYPE_RESERVED_KERN); - e820__range_update_kexec(((struct setup_indirect *)data->data)->addr, - ((struct setup_indirect *)data->data)->len, - E820_TYPE_RAM, E820_TYPE_RESERVED_KERN); + if (data->type == SETUP_INDIRECT) { + len += data->len; + early_memunmap(data, sizeof(*data)); + data = early_memremap(pa_data, len); + if (!data) { + pr_warn("e820: failed to memremap indirect setup_data\n"); + return; + } + + indirect = (struct setup_indirect *)data->data; + + if (indirect->type != SETUP_INDIRECT) { + e820__range_update(indirect->addr, indirect->len, + E820_TYPE_RAM, E820_TYPE_RESERVED_KERN); + e820__range_update_kexec(indirect->addr, indirect->len, + E820_TYPE_RAM, E820_TYPE_RESERVED_KERN); + } } - pa_data = data->next; - early_memunmap(data, sizeof(*data)); + pa_data = pa_next; + early_memunmap(data, len); } e820__update_table(e820_table); diff --git a/arch/x86/kernel/kdebugfs.c b/arch/x86/kernel/kdebugfs.c index 64b6da95af984868962777bb4978b3fa8a4eff16..e2e89bebcbc32840357788cbd803805e5f652c62 100644 --- a/arch/x86/kernel/kdebugfs.c +++ b/arch/x86/kernel/kdebugfs.c @@ -88,11 +88,13 @@ create_setup_data_node(struct dentry *parent, int no, static int __init create_setup_data_nodes(struct dentry *parent) { + struct setup_indirect *indirect; struct setup_data_node *node; struct setup_data *data; - int error; + u64 pa_data, pa_next; struct dentry *d; - u64 pa_data; + int error; + u32 len; int no = 0; d = debugfs_create_dir("setup_data", parent); @@ -112,12 +114,29 @@ static int __init create_setup_data_nodes(struct dentry *parent) error = -ENOMEM; goto err_dir; } - - if (data->type == SETUP_INDIRECT && - ((struct setup_indirect *)data->data)->type != SETUP_INDIRECT) { - node->paddr = ((struct setup_indirect *)data->data)->addr; - node->type = ((struct setup_indirect *)data->data)->type; - node->len = ((struct setup_indirect *)data->data)->len; + pa_next = data->next; + + if (data->type == SETUP_INDIRECT) { + len = sizeof(*data) + data->len; + memunmap(data); + data = memremap(pa_data, len, MEMREMAP_WB); + if (!data) { + kfree(node); + error = -ENOMEM; + goto err_dir; + } + + indirect = (struct setup_indirect *)data->data; + + if (indirect->type != SETUP_INDIRECT) { + node->paddr = indirect->addr; + node->type = indirect->type; + node->len = indirect->len; + } else { + node->paddr = pa_data; + node->type = data->type; + node->len = data->len; + } } else { node->paddr = pa_data; node->type = data->type; @@ -125,7 +144,7 @@ static int __init create_setup_data_nodes(struct dentry *parent) } create_setup_data_node(d, no, node); - pa_data = data->next; + pa_data = pa_next; memunmap(data); no++; diff --git a/arch/x86/kernel/ksysfs.c b/arch/x86/kernel/ksysfs.c index d0a19121c6a4f1f4bb1f62e05f12d8350d4710a1..257892fcefa794803d8eaf2d3d1810ebb278957b 100644 --- a/arch/x86/kernel/ksysfs.c +++ b/arch/x86/kernel/ksysfs.c @@ -91,26 +91,41 @@ static int get_setup_data_paddr(int nr, u64 *paddr) static int __init get_setup_data_size(int nr, size_t *size) { - int i = 0; + u64 pa_data = boot_params.hdr.setup_data, pa_next; + struct setup_indirect *indirect; struct setup_data *data; - u64 pa_data = boot_params.hdr.setup_data; + int i = 0; + u32 len; while (pa_data) { data = memremap(pa_data, sizeof(*data), MEMREMAP_WB); if (!data) return -ENOMEM; + pa_next = data->next; + if (nr == i) { - if (data->type == SETUP_INDIRECT && - ((struct setup_indirect *)data->data)->type != SETUP_INDIRECT) - *size = ((struct setup_indirect *)data->data)->len; - else + if (data->type == SETUP_INDIRECT) { + len = sizeof(*data) + data->len; + memunmap(data); + data = memremap(pa_data, len, MEMREMAP_WB); + if (!data) + return -ENOMEM; + + indirect = (struct setup_indirect *)data->data; + + if (indirect->type != SETUP_INDIRECT) + *size = indirect->len; + else + *size = data->len; + } else { *size = data->len; + } memunmap(data); return 0; } - pa_data = data->next; + pa_data = pa_next; memunmap(data); i++; } @@ -120,9 +135,11 @@ static int __init get_setup_data_size(int nr, size_t *size) static ssize_t type_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { + struct setup_indirect *indirect; + struct setup_data *data; int nr, ret; u64 paddr; - struct setup_data *data; + u32 len; ret = kobj_to_setup_data_nr(kobj, &nr); if (ret) @@ -135,10 +152,20 @@ static ssize_t type_show(struct kobject *kobj, if (!data) return -ENOMEM; - if (data->type == SETUP_INDIRECT) - ret = sprintf(buf, "0x%x\n", ((struct setup_indirect *)data->data)->type); - else + if (data->type == SETUP_INDIRECT) { + len = sizeof(*data) + data->len; + memunmap(data); + data = memremap(paddr, len, MEMREMAP_WB); + if (!data) + return -ENOMEM; + + indirect = (struct setup_indirect *)data->data; + + ret = sprintf(buf, "0x%x\n", indirect->type); + } else { ret = sprintf(buf, "0x%x\n", data->type); + } + memunmap(data); return ret; } @@ -149,9 +176,10 @@ static ssize_t setup_data_data_read(struct file *fp, char *buf, loff_t off, size_t count) { + struct setup_indirect *indirect; + struct setup_data *data; int nr, ret = 0; u64 paddr, len; - struct setup_data *data; void *p; ret = kobj_to_setup_data_nr(kobj, &nr); @@ -165,10 +193,27 @@ static ssize_t setup_data_data_read(struct file *fp, if (!data) return -ENOMEM; - if (data->type == SETUP_INDIRECT && - ((struct setup_indirect *)data->data)->type != SETUP_INDIRECT) { - paddr = ((struct setup_indirect *)data->data)->addr; - len = ((struct setup_indirect *)data->data)->len; + if (data->type == SETUP_INDIRECT) { + len = sizeof(*data) + data->len; + memunmap(data); + data = memremap(paddr, len, MEMREMAP_WB); + if (!data) + return -ENOMEM; + + indirect = (struct setup_indirect *)data->data; + + if (indirect->type != SETUP_INDIRECT) { + paddr = indirect->addr; + len = indirect->len; + } else { + /* + * Even though this is technically undefined, return + * the data as though it is a normal setup_data struct. + * This will at least allow it to be inspected. + */ + paddr += sizeof(*data); + len = data->len; + } } else { paddr += sizeof(*data); len = data->len; diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index dc5512a789241b83fc29cc31579eca8aa8e76395..9ada27e1cb4d822888ec7f94fc1cc113df3cf96a 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -532,7 +532,7 @@ static void __send_ipi_mask(const struct cpumask *mask, int vector) } else if (apic_id < min && max - apic_id < KVM_IPI_CLUSTER_SIZE) { ipi_bitmap <<= min - apic_id; min = apic_id; - } else if (apic_id < min + KVM_IPI_CLUSTER_SIZE) { + } else if (apic_id > min && apic_id < min + KVM_IPI_CLUSTER_SIZE) { max = apic_id < max ? max : apic_id; } else { ret = kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap, diff --git a/arch/x86/kernel/livepatch.c b/arch/x86/kernel/livepatch.c index fe34183826d336824d0a24ef5f6fdb31405cb56e..d134169488b63b000eced609c3f66e97a8f27057 100644 --- a/arch/x86/kernel/livepatch.c +++ b/arch/x86/kernel/livepatch.c @@ -31,6 +31,10 @@ #include #include +#ifdef CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY +#include +#endif + #ifdef CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY /* * The instruction set on x86 is CISC. @@ -66,17 +70,6 @@ static inline unsigned long klp_size_to_check(unsigned long func_size, return size; } -static inline int klp_compare_address(unsigned long stack_addr, - unsigned long func_addr, const char *func_name, - unsigned long check_size) -{ - if (stack_addr >= func_addr && stack_addr < func_addr + check_size) { - pr_err("func %s is in use!\n", func_name); - return -EBUSY; - } - return 0; -} - static bool check_jump_insn(unsigned long func_addr) { int len = JMP_E9_INSN_SIZE; @@ -137,7 +130,7 @@ static int klp_check_activeness_func(struct klp_patch *patch, int enable, /* Check func address in stack */ if (enable) { - if (func->force == KLP_ENFORCEMENT) + if (func->patched || func->force == KLP_ENFORCEMENT) continue; /* * When enable, checking the currently @@ -253,8 +246,10 @@ static void klp_print_stack_trace(void *trace_ptr, int trace_len) #endif #define MAX_STACK_ENTRIES 100 -static bool check_func_list(struct klp_func_list *funcs, int *ret, unsigned long pc) +static bool check_func_list(void *data, int *ret, unsigned long pc) { + struct klp_func_list *funcs = (struct klp_func_list *)data; + while (funcs != NULL) { *ret = klp_compare_address(pc, funcs->func_addr, funcs->func_name, klp_size_to_check(funcs->func_size, funcs->force)); @@ -267,7 +262,7 @@ static bool check_func_list(struct klp_func_list *funcs, int *ret, unsigned long } static int klp_check_stack(void *trace_ptr, int trace_len, - struct klp_func_list *check_funcs) + bool (*fn)(void *, int *, unsigned long), void *data) { #ifdef CONFIG_ARCH_STACKWALK unsigned long *trace = trace_ptr; @@ -284,7 +279,7 @@ static int klp_check_stack(void *trace_ptr, int trace_len, for (i = 0; i < trace->nr_entries; i++) { address = trace->entries[i]; #endif - if (!check_func_list(check_funcs, &ret, address)) { + if (!fn(data, &ret, address)) { #ifdef CONFIG_ARCH_STACKWALK klp_print_stack_trace(trace_ptr, trace_len); #else @@ -308,11 +303,10 @@ static void free_list(struct klp_func_list **funcs) } } -int klp_check_calltrace(struct klp_patch *patch, int enable) +static int do_check_calltrace(bool (*fn)(void *, int *, unsigned long), void *data) { struct task_struct *g, *t; int ret = 0; - struct klp_func_list *check_funcs = NULL; static unsigned long trace_entries[MAX_STACK_ENTRIES]; #ifdef CONFIG_ARCH_STACKWALK int trace_len; @@ -320,11 +314,6 @@ int klp_check_calltrace(struct klp_patch *patch, int enable) struct stack_trace trace; #endif - ret = klp_check_activeness_func(patch, enable, &check_funcs); - if (ret) { - pr_err("collect active functions failed, ret=%d\n", ret); - goto out; - } for_each_process_thread(g, t) { if (!strncmp(t->comm, "migration/", 10)) continue; @@ -334,10 +323,10 @@ int klp_check_calltrace(struct klp_patch *patch, int enable) if (ret < 0) { pr_err("%s:%d has an unreliable stack, ret=%d\n", t->comm, t->pid, ret); - goto out; + return ret; } trace_len = ret; - ret = klp_check_stack(trace_entries, trace_len, check_funcs); + ret = klp_check_stack(trace_entries, trace_len, fn, data); #else trace.skip = 0; trace.nr_entries = 0; @@ -348,21 +337,125 @@ int klp_check_calltrace(struct klp_patch *patch, int enable) if (ret) { pr_err("%s: %s:%d has an unreliable stack, ret=%d\n", __func__, t->comm, t->pid, ret); - goto out; + return ret; } - ret = klp_check_stack(&trace, 0, check_funcs); + ret = klp_check_stack(&trace, 0, fn, data); #endif if (ret) { pr_err("%s:%d check stack failed, ret=%d\n", t->comm, t->pid, ret); - goto out; + return ret; } } + return 0; +} + +int klp_check_calltrace(struct klp_patch *patch, int enable) +{ + int ret = 0; + struct klp_func_list *check_funcs = NULL; + + ret = klp_check_activeness_func(patch, enable, &check_funcs); + if (ret) { + pr_err("collect active functions failed, ret=%d\n", ret); + goto out; + } + + if (!check_funcs) + goto out; + + ret = do_check_calltrace(check_func_list, (void *)check_funcs); + out: free_list(&check_funcs); return ret; } + +static bool check_module_calltrace(void *data, int *ret, unsigned long pc) +{ + struct module *mod = (struct module *)data; + + if (within_module_core(pc, mod)) { + pr_err("module %s is in use!\n", mod->name); + *ret = -EBUSY; + return false; + } + return true; +} + +int arch_klp_module_check_calltrace(void *data) +{ + return do_check_calltrace(check_module_calltrace, data); +} + +int arch_klp_check_breakpoint(struct arch_klp_data *arch_data, void *old_func) +{ + int ret; + unsigned char opcode; + + ret = copy_from_kernel_nofault(&opcode, old_func, INT3_INSN_SIZE); + if (ret) + return ret; + + /* Another subsystem puts a breakpoint, reject patching at this time */ + if (opcode == INT3_INSN_OPCODE) + return -EBUSY; + + return 0; +} + +int arch_klp_add_breakpoint(struct arch_klp_data *arch_data, void *old_func) +{ + unsigned char int3 = INT3_INSN_OPCODE; + int ret; + + ret = copy_from_kernel_nofault(&arch_data->saved_opcode, old_func, + INT3_INSN_SIZE); + if (ret) + return ret; + + text_poke(old_func, &int3, INT3_INSN_SIZE); + /* arch_klp_code_modify_post_process() will do text_poke_sync() */ + + return 0; +} + +void arch_klp_remove_breakpoint(struct arch_klp_data *arch_data, void *old_func) +{ + unsigned char opcode; + int ret; + + ret = copy_from_kernel_nofault(&opcode, old_func, INT3_INSN_SIZE); + if (ret) { + pr_warn("%s: failed to read opcode, ret=%d\n", __func__, ret); + return; + } + + /* instruction have been recovered at arch_klp_unpatch_func() */ + if (opcode != INT3_INSN_OPCODE) + return; + + text_poke(old_func, &arch_data->saved_opcode, INT3_INSN_SIZE); + /* arch_klp_code_modify_post_process() will do text_poke_sync() */ +} + +int klp_int3_handler(struct pt_regs *regs) +{ + unsigned long addr = regs->ip - INT3_INSN_SIZE; + void *brk_func; + + if (user_mode(regs)) + return 0; + + brk_func = klp_get_brk_func((void *)addr); + if (!brk_func) + return 0; + + int3_emulate_jmp(regs, (unsigned long)brk_func); + return 1; +} +NOKPROBE_SYMBOL(klp_int3_handler); #endif #ifdef CONFIG_LIVEPATCH_WO_FTRACE @@ -386,23 +479,37 @@ void arch_klp_code_modify_post_process(void) long arch_klp_save_old_code(struct arch_klp_data *arch_data, void *old_func) { - return copy_from_kernel_nofault(arch_data->old_code, - old_func, JMP_E9_INSN_SIZE); + long ret; + + /* Prevent text modification */ + mutex_lock(&text_mutex); + ret = copy_from_kernel_nofault(arch_data->old_code, + old_func, JMP_E9_INSN_SIZE); + mutex_unlock(&text_mutex); + + return ret; } int arch_klp_patch_func(struct klp_func *func) { struct klp_func_node *func_node; unsigned long ip, new_addr; - void *new; + unsigned char *new; func_node = func->func_node; ip = (unsigned long)func->old_func; list_add_rcu(&func->stack_node, &func_node->func_stack); new_addr = (unsigned long)func->new_func; /* replace the text with the new text */ - new = klp_jmp_code(ip, new_addr); + new = (unsigned char *)klp_jmp_code(ip, new_addr); +#ifdef CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY + /* update jmp offset */ + text_poke((void *)(ip + 1), new + 1, JMP_E9_INSN_SIZE - 1); + /* update jmp opcode */ + text_poke((void *)ip, new, 1); +#else text_poke((void *)ip, new, JMP_E9_INSN_SIZE); +#endif return 0; } diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c old mode 100644 new mode 100755 index 035cd1e1ede5c879db831f5d0c67ce1017957200..ca588a3ac01bcbf8a44c5c3eae0de0914d473ea0 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -43,6 +43,7 @@ #include #include #include +#include #include "process.h" @@ -918,8 +919,8 @@ unsigned long arch_randomize_brk(struct mm_struct *mm) */ unsigned long get_wchan(struct task_struct *p) { - unsigned long start, bottom, top, sp, fp, ip, ret = 0; - int count = 0; + struct unwind_state state; + unsigned long addr = 0; if (p == current || p->state == TASK_RUNNING) return 0; @@ -927,49 +928,19 @@ unsigned long get_wchan(struct task_struct *p) if (!try_get_task_stack(p)) return 0; - start = (unsigned long)task_stack_page(p); - if (!start) - goto out; - - /* - * Layout of the stack page: - * - * ----------- topmax = start + THREAD_SIZE - sizeof(unsigned long) - * PADDING - * ----------- top = topmax - TOP_OF_KERNEL_STACK_PADDING - * stack - * ----------- bottom = start - * - * The tasks stack pointer points at the location where the - * framepointer is stored. The data on the stack is: - * ... IP FP ... IP FP - * - * We need to read FP and IP, so we need to adjust the upper - * bound by another unsigned long. - */ - top = start + THREAD_SIZE - TOP_OF_KERNEL_STACK_PADDING; - top -= 2 * sizeof(unsigned long); - bottom = start; - - sp = READ_ONCE(p->thread.sp); - if (sp < bottom || sp > top) - goto out; - - fp = READ_ONCE_NOCHECK(((struct inactive_task_frame *)sp)->bp); - do { - if (fp < bottom || fp > top) - goto out; - ip = READ_ONCE_NOCHECK(*(unsigned long *)(fp + sizeof(unsigned long))); - if (!in_sched_functions(ip)) { - ret = ip; - goto out; - } - fp = READ_ONCE_NOCHECK(*(unsigned long *)fp); - } while (count++ < 16 && p->state != TASK_RUNNING); - -out: + for (unwind_start(&state, p, NULL, NULL); !unwind_done(&state); + unwind_next_frame(&state)) { + addr = unwind_get_return_address(&state); + if (!addr) + break; + if (in_sched_functions(addr)) + continue; + break; + } + put_task_stack(p); - return ret; + + return addr; } long do_arch_prctl_common(struct task_struct *task, int option, diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 4f2f54e1281c3f1d35d4166c8835f71bf7954f99..98bf8fd189025d02168436669b28f11cfad81b73 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c @@ -159,14 +159,12 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) { struct thread_struct *prev = &prev_p->thread, *next = &next_p->thread; - struct fpu *prev_fpu = &prev->fpu; - struct fpu *next_fpu = &next->fpu; int cpu = smp_processor_id(); /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */ if (!test_thread_flag(TIF_NEED_FPU_LOAD)) - switch_fpu_prepare(prev_fpu, cpu); + switch_fpu_prepare(prev_p, cpu); /* * Save away %gs. No need to save %fs, as it was saved on the @@ -213,7 +211,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) this_cpu_write(current_task, next_p); - switch_fpu_finish(next_fpu); + switch_fpu_finish(next_p); /* Load the Intel cache allocation PQR MSR. */ resctrl_sched_in(); diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index df342bedea88afc1e8571d6e20024bf477569c82..ad3f82a18de9df1e151e41ea3c5f184a493b9cac 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -535,15 +535,13 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) { struct thread_struct *prev = &prev_p->thread; struct thread_struct *next = &next_p->thread; - struct fpu *prev_fpu = &prev->fpu; - struct fpu *next_fpu = &next->fpu; int cpu = smp_processor_id(); WARN_ON_ONCE(IS_ENABLED(CONFIG_DEBUG_ENTRY) && this_cpu_read(irq_count) != -1); if (!test_thread_flag(TIF_NEED_FPU_LOAD)) - switch_fpu_prepare(prev_fpu, cpu); + switch_fpu_prepare(prev_p, cpu); /* We must save %fs and %gs before load_TLS() because * %fs and %gs may be cleared by load_TLS(). @@ -595,7 +593,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) this_cpu_write(current_task, next_p); this_cpu_write(cpu_current_top_of_stack, task_top_of_stack(next_p)); - switch_fpu_finish(next_fpu); + switch_fpu_finish(next_p); /* Reload sp0. */ update_task_stack(next_p); diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 62df2aa1ac32ae0f6a82964e906dacf6f52431c3..85979c1a404e933654642d7f07ba062976e9cd2b 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -372,21 +372,41 @@ static void __init parse_setup_data(void) static void __init memblock_x86_reserve_range_setup_data(void) { + struct setup_indirect *indirect; struct setup_data *data; - u64 pa_data; + u64 pa_data, pa_next; + u32 len; pa_data = boot_params.hdr.setup_data; while (pa_data) { data = early_memremap(pa_data, sizeof(*data)); + if (!data) { + pr_warn("setup: failed to memremap setup_data entry\n"); + return; + } + + len = sizeof(*data); + pa_next = data->next; + memblock_reserve(pa_data, sizeof(*data) + data->len); - if (data->type == SETUP_INDIRECT && - ((struct setup_indirect *)data->data)->type != SETUP_INDIRECT) - memblock_reserve(((struct setup_indirect *)data->data)->addr, - ((struct setup_indirect *)data->data)->len); + if (data->type == SETUP_INDIRECT) { + len += data->len; + early_memunmap(data, sizeof(*data)); + data = early_memremap(pa_data, len); + if (!data) { + pr_warn("setup: failed to memremap indirect setup_data\n"); + return; + } - pa_data = data->next; - early_memunmap(data, sizeof(*data)); + indirect = (struct setup_indirect *)data->data; + + if (indirect->type != SETUP_INDIRECT) + memblock_reserve(indirect->addr, indirect->len); + } + + pa_data = pa_next; + early_memunmap(data, len); } } diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 5da01819fb479858989a0f5a6c18b4a58fafde0a..696ec85164e626ef32fb2c81d10cacc2cbdeff18 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -62,6 +62,10 @@ #include #include +#ifdef CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY +#include +#endif + #ifdef CONFIG_X86_64 #include #include @@ -654,10 +658,17 @@ static bool do_int3(struct pt_regs *regs) if (kprobe_int3_handler(regs)) return true; #endif + +#ifdef CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY + if (klp_int3_handler(regs)) + return true; +#endif + res = notify_die(DIE_INT3, "int3", regs, 0, X86_TRAP_BP, SIGTRAP); return res == NOTIFY_STOP; } +NOKPROBE_SYMBOL(do_int3); static void do_int3_user(struct pt_regs *regs) { diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index e82151ba95c091ffce968e3422536a5484ba500c..a63df19ef4dad2d1d75d022b86e4559a107fa131 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -1718,11 +1718,6 @@ static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt, goto exception; } - if (!seg_desc.p) { - err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR; - goto exception; - } - dpl = seg_desc.dpl; switch (seg) { @@ -1762,6 +1757,10 @@ static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt, case VCPU_SREG_TR: if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9)) goto exception; + if (!seg_desc.p) { + err_vec = NP_VECTOR; + goto exception; + } old_desc = seg_desc; seg_desc.type |= 2; /* busy */ ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc, @@ -1786,6 +1785,11 @@ static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt, break; } + if (!seg_desc.p) { + err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR; + goto exception; + } + if (seg_desc.s) { /* mark segment as accessed */ if (!(seg_desc.type & 1)) { diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c index 328f37e4fd3a723a48b198d56141aa2d99be1dbc..d806139377bc684aca5fef4593c34ec86f8151fc 100644 --- a/arch/x86/kvm/hyperv.c +++ b/arch/x86/kvm/hyperv.c @@ -207,7 +207,7 @@ static int synic_set_msr(struct kvm_vcpu_hv_synic *synic, struct kvm_vcpu *vcpu = synic_to_vcpu(synic); int ret; - if (!synic->active && !host) + if (!synic->active && (!host || data)) return 1; trace_kvm_hv_synic_set_msr(vcpu->vcpu_id, msr, data, host); @@ -253,6 +253,9 @@ static int synic_set_msr(struct kvm_vcpu_hv_synic *synic, case HV_X64_MSR_EOM: { int i; + if (!synic->active) + break; + for (i = 0; i < ARRAY_SIZE(synic->sint); i++) kvm_hv_notify_acked_sint(vcpu, i); break; @@ -636,7 +639,7 @@ static int stimer_set_config(struct kvm_vcpu_hv_stimer *stimer, u64 config, struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer); struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu); - if (!synic->active && !host) + if (!synic->active && (!host || config)) return 1; trace_kvm_hv_stimer_set_config(stimer_to_vcpu(stimer)->vcpu_id, @@ -660,7 +663,7 @@ static int stimer_set_count(struct kvm_vcpu_hv_stimer *stimer, u64 count, struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer); struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu); - if (!synic->active && !host) + if (!synic->active && (!host || count)) return 1; trace_kvm_hv_stimer_set_count(stimer_to_vcpu(stimer)->vcpu_id, diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 70dcb723a0f9d7887a178d1cedb4860b71a9fbd7..d62390885a9d40c00b90933007e6e656a1d5464b 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -2232,10 +2232,7 @@ void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data) void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8) { - struct kvm_lapic *apic = vcpu->arch.apic; - - apic_set_tpr(apic, ((cr8 & 0x0f) << 4) - | (kvm_lapic_get_reg(apic, APIC_TASKPRI) & 4)); + apic_set_tpr(vcpu->arch.apic, (cr8 & 0x0f) << 4); } u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c old mode 100644 new mode 100755 index c2516ddc3cbec958a78473aa46c927d8885018ec..9506cfcb86be765024b7ffc7504d727807727914 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -3631,12 +3631,23 @@ static void shadow_page_table_clear_flood(struct kvm_vcpu *vcpu, gva_t addr) walk_shadow_page_lockless_end(vcpu); } +static u32 alloc_apf_token(struct kvm_vcpu *vcpu) +{ + /* make sure the token value is not 0 */ + u32 id = vcpu->arch.apf.id; + + if (id << 12 == 0) + vcpu->arch.apf.id = 1; + + return (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id; +} + static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, gfn_t gfn) { struct kvm_arch_async_pf arch; - arch.token = (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id; + arch.token = alloc_apf_token(vcpu); arch.gfn = gfn; arch.direct_map = vcpu->arch.mmu->direct_map; arch.cr3 = vcpu->arch.mmu->get_guest_pgd(vcpu); @@ -5165,14 +5176,16 @@ void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid) uint i; if (pcid == kvm_get_active_pcid(vcpu)) { - mmu->invlpg(vcpu, gva, mmu->root_hpa); + if (mmu->invlpg) + mmu->invlpg(vcpu, gva, mmu->root_hpa); tlb_flush = true; } for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) { if (VALID_PAGE(mmu->prev_roots[i].hpa) && pcid == kvm_get_pcid(vcpu, mmu->prev_roots[i].pgd)) { - mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa); + if (mmu->invlpg) + mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa); tlb_flush = true; } } diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h index f8829134bf3413c8141e1c74cabb888853a90a15..c6daeeff1d9c9d958e08375c8baaedc295f7069f 100644 --- a/arch/x86/kvm/mmu/paging_tmpl.h +++ b/arch/x86/kvm/mmu/paging_tmpl.h @@ -34,9 +34,8 @@ #define PT_HAVE_ACCESSED_DIRTY(mmu) true #ifdef CONFIG_X86_64 #define PT_MAX_FULL_LEVELS PT64_ROOT_MAX_LEVEL - #define CMPXCHG cmpxchg + #define CMPXCHG "cmpxchgq" #else - #define CMPXCHG cmpxchg64 #define PT_MAX_FULL_LEVELS 2 #endif #elif PTTYPE == 32 @@ -52,7 +51,7 @@ #define PT_GUEST_DIRTY_SHIFT PT_DIRTY_SHIFT #define PT_GUEST_ACCESSED_SHIFT PT_ACCESSED_SHIFT #define PT_HAVE_ACCESSED_DIRTY(mmu) true - #define CMPXCHG cmpxchg + #define CMPXCHG "cmpxchgl" #elif PTTYPE == PTTYPE_EPT #define pt_element_t u64 #define guest_walker guest_walkerEPT @@ -65,7 +64,9 @@ #define PT_GUEST_DIRTY_SHIFT 9 #define PT_GUEST_ACCESSED_SHIFT 8 #define PT_HAVE_ACCESSED_DIRTY(mmu) ((mmu)->ept_ad) - #define CMPXCHG cmpxchg64 + #ifdef CONFIG_X86_64 + #define CMPXCHG "cmpxchgq" + #endif #define PT_MAX_FULL_LEVELS PT64_ROOT_MAX_LEVEL #else #error Invalid PTTYPE value @@ -147,43 +148,39 @@ static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, pt_element_t __user *ptep_user, unsigned index, pt_element_t orig_pte, pt_element_t new_pte) { - int npages; - pt_element_t ret; - pt_element_t *table; - struct page *page; - - npages = get_user_pages_fast((unsigned long)ptep_user, 1, FOLL_WRITE, &page); - if (likely(npages == 1)) { - table = kmap_atomic(page); - ret = CMPXCHG(&table[index], orig_pte, new_pte); - kunmap_atomic(table); - - kvm_release_page_dirty(page); - } else { - struct vm_area_struct *vma; - unsigned long vaddr = (unsigned long)ptep_user & PAGE_MASK; - unsigned long pfn; - unsigned long paddr; - - mmap_read_lock(current->mm); - vma = find_vma_intersection(current->mm, vaddr, vaddr + PAGE_SIZE); - if (!vma || !(vma->vm_flags & VM_PFNMAP)) { - mmap_read_unlock(current->mm); - return -EFAULT; - } - pfn = ((vaddr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; - paddr = pfn << PAGE_SHIFT; - table = memremap(paddr, PAGE_SIZE, MEMREMAP_WB); - if (!table) { - mmap_read_unlock(current->mm); - return -EFAULT; - } - ret = CMPXCHG(&table[index], orig_pte, new_pte); - memunmap(table); - mmap_read_unlock(current->mm); - } + int r = -EFAULT; + + if (!user_access_begin(ptep_user, sizeof(pt_element_t))) + return -EFAULT; + +#ifdef CMPXCHG + asm volatile("1:" LOCK_PREFIX CMPXCHG " %[new], %[ptr]\n" + "mov $0, %[r]\n" + "setnz %b[r]\n" + "2:" + _ASM_EXTABLE_UA(1b, 2b) + : [ptr] "+m" (*ptep_user), + [old] "+a" (orig_pte), + [r] "+q" (r) + : [new] "r" (new_pte) + : "memory"); +#else + asm volatile("1:" LOCK_PREFIX "cmpxchg8b %[ptr]\n" + "movl $0, %[r]\n" + "jz 2f\n" + "incl %[r]\n" + "2:" + _ASM_EXTABLE_UA(1b, 2b) + : [ptr] "+m" (*ptep_user), + [old] "+A" (orig_pte), + [r] "+rm" (r) + : [new_lo] "b" ((u32)new_pte), + [new_hi] "c" ((u32)(new_pte >> 32)) + : "memory"); +#endif - return (ret != orig_pte); + user_access_end(); + return r; } static bool FNAME(prefetch_invalid_gpte)(struct kvm_vcpu *vcpu, diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index 7e08efb06839379328323af53ad7b887dbbbc284..073514bbb5f715684d8ef584583d6dc27a732c9c 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -902,6 +902,9 @@ static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, if (tdp_mmu_iter_cond_resched(kvm, &iter, false)) continue; + if (!is_shadow_present_pte(iter.old_spte)) + continue; + if (spte_ad_need_write_protect(iter.old_spte)) { if (is_writable_pte(iter.old_spte)) new_spte = iter.old_spte & ~PT_WRITABLE_MASK; diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c index a8b5533cf601d686b21bd8e11dd874ae70f8c1a6..3e5cb74c0b5386f4b81a3e7845d1353d1e92dbbc 100644 --- a/arch/x86/kvm/svm/avic.c +++ b/arch/x86/kvm/svm/avic.c @@ -806,7 +806,7 @@ int svm_update_pi_irte(struct kvm *kvm, unsigned int host_irq, { struct kvm_kernel_irq_routing_entry *e; struct kvm_irq_routing_table *irq_rt; - int idx, ret = -EINVAL; + int idx, ret = 0; if (!kvm_arch_has_assigned_device(kvm) || !irq_remapping_cap(IRQ_POSTING_CAP)) @@ -817,7 +817,13 @@ int svm_update_pi_irte(struct kvm *kvm, unsigned int host_irq, idx = srcu_read_lock(&kvm->irq_srcu); irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu); - WARN_ON(guest_irq >= irq_rt->nr_rt_entries); + + if (guest_irq >= irq_rt->nr_rt_entries || + hlist_empty(&irq_rt->map[guest_irq])) { + pr_warn_once("no route for guest_irq %u/%u (broken user space?)\n", + guest_irq, irq_rt->nr_rt_entries); + goto out; + } hlist_for_each_entry(e, &irq_rt->map[guest_irq], link) { struct vcpu_data vcpu_info; diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 6c82ef22985d98e8820638c70e396f58345c7a0e..7828b36d67c1f89846549b55b7186481719037ae 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -1177,6 +1177,14 @@ void sev_hardware_teardown(void) sev_flush_asids(); } +void sev_guest_memory_reclaimed(struct kvm *kvm) +{ + if (!sev_guest(kvm)) + return; + + wbinvd_on_all_cpus(); +} + void pre_sev_run(struct vcpu_svm *svm, int cpu) { struct svm_cpu_data *sd = per_cpu(svm_data, cpu); diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 7773a765f548923888359b18aa92a96abc6cc35b..2124fe54abfb5e2ff655a8793c9813ebade5096a 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -4325,6 +4325,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = { .mem_enc_op = svm_mem_enc_op, .mem_enc_reg_region = svm_register_enc_region, .mem_enc_unreg_region = svm_unregister_enc_region, + .guest_memory_reclaimed = sev_guest_memory_reclaimed, .can_emulate_instruction = svm_can_emulate_instruction, diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index 2c007241fbf5397054c00aa9f17c5da79f4cf0fc..c707d689b60ee264741e04ad2800a5c8225ab517 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -491,6 +491,8 @@ int svm_register_enc_region(struct kvm *kvm, struct kvm_enc_region *range); int svm_unregister_enc_region(struct kvm *kvm, struct kvm_enc_region *range); +void sev_guest_memory_reclaimed(struct kvm *kvm); + void pre_sev_run(struct vcpu_svm *svm, int cpu); int __init sev_hardware_setup(void); void sev_hardware_teardown(void); diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index e208e54f5cad9156103e2e71a92b8d08547c5039..79889d27aa5b337a44890e30d8b0855f72b50947 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -226,6 +226,9 @@ static const struct { #define L1D_CACHE_ORDER 4 static void *vmx_l1d_flush_pages; +/* Control for disabling CPU Fill buffer clear */ +static bool __read_mostly vmx_fb_clear_ctrl_available; + static int vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf) { struct page *page; @@ -357,6 +360,60 @@ static int vmentry_l1d_flush_get(char *s, const struct kernel_param *kp) return sprintf(s, "%s\n", vmentry_l1d_param[l1tf_vmx_mitigation].option); } +static void vmx_setup_fb_clear_ctrl(void) +{ + u64 msr; + + if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES) && + !boot_cpu_has_bug(X86_BUG_MDS) && + !boot_cpu_has_bug(X86_BUG_TAA)) { + rdmsrl(MSR_IA32_ARCH_CAPABILITIES, msr); + if (msr & ARCH_CAP_FB_CLEAR_CTRL) + vmx_fb_clear_ctrl_available = true; + } +} + +static __always_inline void vmx_disable_fb_clear(struct vcpu_vmx *vmx) +{ + u64 msr; + + if (!vmx->disable_fb_clear) + return; + + rdmsrl(MSR_IA32_MCU_OPT_CTRL, msr); + msr |= FB_CLEAR_DIS; + wrmsrl(MSR_IA32_MCU_OPT_CTRL, msr); + /* Cache the MSR value to avoid reading it later */ + vmx->msr_ia32_mcu_opt_ctrl = msr; +} + +static __always_inline void vmx_enable_fb_clear(struct vcpu_vmx *vmx) +{ + if (!vmx->disable_fb_clear) + return; + + vmx->msr_ia32_mcu_opt_ctrl &= ~FB_CLEAR_DIS; + wrmsrl(MSR_IA32_MCU_OPT_CTRL, vmx->msr_ia32_mcu_opt_ctrl); +} + +static void vmx_update_fb_clear_dis(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx) +{ + vmx->disable_fb_clear = vmx_fb_clear_ctrl_available; + + /* + * If guest will not execute VERW, there is no need to set FB_CLEAR_DIS + * at VMEntry. Skip the MSR read/write when a guest has no use case to + * execute VERW. + */ + if ((vcpu->arch.arch_capabilities & ARCH_CAP_FB_CLEAR) || + ((vcpu->arch.arch_capabilities & ARCH_CAP_MDS_NO) && + (vcpu->arch.arch_capabilities & ARCH_CAP_TAA_NO) && + (vcpu->arch.arch_capabilities & ARCH_CAP_PSDP_NO) && + (vcpu->arch.arch_capabilities & ARCH_CAP_FBSDP_NO) && + (vcpu->arch.arch_capabilities & ARCH_CAP_SBDR_SSDP_NO))) + vmx->disable_fb_clear = false; +} + static const struct kernel_param_ops vmentry_l1d_flush_ops = { .set = vmentry_l1d_flush_set, .get = vmentry_l1d_flush_get, @@ -2259,6 +2316,10 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) ret = kvm_set_msr_common(vcpu, msr_info); } + /* FB_CLEAR may have changed, also update the FB_CLEAR_DIS behavior */ + if (msr_index == MSR_IA32_ARCH_CAPABILITIES) + vmx_update_fb_clear_dis(vcpu, vmx); + return ret; } @@ -4531,6 +4592,8 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) vpid_sync_context(vmx->vpid); if (init_event) vmx_clear_hlt(vcpu); + + vmx_update_fb_clear_dis(vcpu, vmx); } static void enable_irq_window(struct kvm_vcpu *vcpu) @@ -6710,6 +6773,11 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu, vmx_l1d_flush(vcpu); else if (static_branch_unlikely(&mds_user_clear)) mds_clear_cpu_buffers(); + else if (static_branch_unlikely(&mmio_stale_data_clear) && + kvm_arch_has_assigned_device(vcpu->kvm)) + mds_clear_cpu_buffers(); + + vmx_disable_fb_clear(vmx); if (vcpu->arch.cr2 != native_read_cr2()) native_write_cr2(vcpu->arch.cr2); @@ -6719,6 +6787,8 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu, vcpu->arch.cr2 = native_read_cr2(); + vmx_enable_fb_clear(vmx); + /* * VMEXIT disables interrupts (host state), but tracing and lockdep * have them in state 'on' as recorded before entering guest mode. @@ -8105,6 +8175,8 @@ static int __init vmx_init(void) return r; } + vmx_setup_fb_clear_ctrl(); + for_each_possible_cpu(cpu) { INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu)); diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h index c0b52498e4bb465c77c9f3a84b8cb32e778e158a..05eca210a5fffea266c134b109c6469f7212f897 100644 --- a/arch/x86/kvm/vmx/vmx.h +++ b/arch/x86/kvm/vmx/vmx.h @@ -325,6 +325,8 @@ struct vcpu_vmx { u64 msr_ia32_feature_control; u64 msr_ia32_feature_control_valid_bits; u64 ept_pointer; + u64 msr_ia32_mcu_opt_ctrl; + bool disable_fb_clear; struct pt_desc pt_desc; struct lbr_desc lbr_desc; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 7da272aea5c4b203a5d65d353a909ef15702b2b0..1f857bc5ac6eccf9ca083c1ed079f3500e5d5a36 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1459,6 +1459,9 @@ static u64 kvm_get_arch_capabilities(void) */ } + /* Guests don't need to know "Fill buffer clear control" exists */ + data &= ~ARCH_CAP_FB_CLEAR_CTRL; + return data; } @@ -7349,7 +7352,7 @@ int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu) } EXPORT_SYMBOL_GPL(kvm_skip_emulated_instruction); -static bool kvm_vcpu_check_breakpoint(struct kvm_vcpu *vcpu, int *r) +static bool kvm_vcpu_check_code_breakpoint(struct kvm_vcpu *vcpu, int *r) { if (unlikely(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) && (vcpu->arch.guest_debug_dr7 & DR7_BP_EN_MASK)) { @@ -7418,25 +7421,23 @@ static bool is_vmware_backdoor_opcode(struct x86_emulate_ctxt *ctxt) } /* - * Decode to be emulated instruction. Return EMULATION_OK if success. + * Decode an instruction for emulation. The caller is responsible for handling + * code breakpoints. Note, manually detecting code breakpoints is unnecessary + * (and wrong) when emulating on an intercepted fault-like exception[*], as + * code breakpoints have higher priority and thus have already been done by + * hardware. + * + * [*] Except #MC, which is higher priority, but KVM should never emulate in + * response to a machine check. */ int x86_decode_emulated_instruction(struct kvm_vcpu *vcpu, int emulation_type, void *insn, int insn_len) { - int r = EMULATION_OK; struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; + int r; init_emulate_ctxt(vcpu); - /* - * We will reenter on the same instruction since we do not set - * complete_userspace_io. This does not handle watchpoints yet, - * those would be handled in the emulate_ops. - */ - if (!(emulation_type & EMULTYPE_SKIP) && - kvm_vcpu_check_breakpoint(vcpu, &r)) - return r; - ctxt->ud = emulation_type & EMULTYPE_TRAP_UD; r = x86_decode_insn(ctxt, insn, insn_len); @@ -7471,6 +7472,15 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, if (!(emulation_type & EMULTYPE_NO_DECODE)) { kvm_clear_exception_queue(vcpu); + /* + * Return immediately if RIP hits a code breakpoint, such #DBs + * are fault-like and are higher priority than any faults on + * the code fetch itself. + */ + if (!(emulation_type & EMULTYPE_SKIP) && + kvm_vcpu_check_code_breakpoint(vcpu, &r)) + return r; + r = x86_decode_emulated_instruction(vcpu, emulation_type, insn, insn_len); if (r != EMULATION_OK) { @@ -8895,6 +8905,14 @@ void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, kvm_make_all_cpus_request(kvm, KVM_REQ_APIC_PAGE_RELOAD); } +void kvm_arch_guest_memory_reclaimed(struct kvm *kvm) +{ + if (!kvm_x86_ops.guest_memory_reclaimed) + return; + + kvm_x86_ops.guest_memory_reclaimed(kvm); +} + void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu) { if (!lapic_in_kernel(vcpu)) diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index 60ade7dd71bd99b99ca9cee5b2cd95a506212d9c..7ce9b8dd875773bc480a7cbd26bb1ebfa4c97ccd 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c @@ -614,6 +614,7 @@ static bool memremap_is_efi_data(resource_size_t phys_addr, static bool memremap_is_setup_data(resource_size_t phys_addr, unsigned long size) { + struct setup_indirect *indirect; struct setup_data *data; u64 paddr, paddr_next; @@ -626,6 +627,10 @@ static bool memremap_is_setup_data(resource_size_t phys_addr, data = memremap(paddr, sizeof(*data), MEMREMAP_WB | MEMREMAP_DEC); + if (!data) { + pr_warn("failed to memremap setup_data entry\n"); + return false; + } paddr_next = data->next; len = data->len; @@ -635,10 +640,21 @@ static bool memremap_is_setup_data(resource_size_t phys_addr, return true; } - if (data->type == SETUP_INDIRECT && - ((struct setup_indirect *)data->data)->type != SETUP_INDIRECT) { - paddr = ((struct setup_indirect *)data->data)->addr; - len = ((struct setup_indirect *)data->data)->len; + if (data->type == SETUP_INDIRECT) { + memunmap(data); + data = memremap(paddr, sizeof(*data) + len, + MEMREMAP_WB | MEMREMAP_DEC); + if (!data) { + pr_warn("failed to memremap indirect setup_data\n"); + return false; + } + + indirect = (struct setup_indirect *)data->data; + + if (indirect->type != SETUP_INDIRECT) { + paddr = indirect->addr; + len = indirect->len; + } } memunmap(data); @@ -659,22 +675,51 @@ static bool memremap_is_setup_data(resource_size_t phys_addr, static bool __init early_memremap_is_setup_data(resource_size_t phys_addr, unsigned long size) { + struct setup_indirect *indirect; struct setup_data *data; u64 paddr, paddr_next; paddr = boot_params.hdr.setup_data; while (paddr) { - unsigned int len; + unsigned int len, size; if (phys_addr == paddr) return true; data = early_memremap_decrypted(paddr, sizeof(*data)); + if (!data) { + pr_warn("failed to early memremap setup_data entry\n"); + return false; + } + + size = sizeof(*data); paddr_next = data->next; len = data->len; - early_memunmap(data, sizeof(*data)); + if ((phys_addr > paddr) && (phys_addr < (paddr + len))) { + early_memunmap(data, sizeof(*data)); + return true; + } + + if (data->type == SETUP_INDIRECT) { + size += len; + early_memunmap(data, sizeof(*data)); + data = early_memremap_decrypted(paddr, size); + if (!data) { + pr_warn("failed to early memremap indirect setup_data\n"); + return false; + } + + indirect = (struct setup_indirect *)data->data; + + if (indirect->type != SETUP_INDIRECT) { + paddr = indirect->addr; + len = indirect->len; + } + } + + early_memunmap(data, size); if ((phys_addr > paddr) && (phys_addr < (paddr + len))) return true; diff --git a/arch/x86/xen/pmu.c b/arch/x86/xen/pmu.c index e13b0b49fcdfc181c19f76de1c968b9a344e8a70..d7249f4c90f1b9c48d68f70f51e6d7d03d6a65ca 100644 --- a/arch/x86/xen/pmu.c +++ b/arch/x86/xen/pmu.c @@ -512,10 +512,7 @@ irqreturn_t xen_pmu_irq_handler(int irq, void *dev_id) return ret; } -bool is_xen_pmu(int cpu) -{ - return (get_xenpmu_data() != NULL); -} +bool is_xen_pmu; void xen_pmu_init(int cpu) { @@ -526,7 +523,7 @@ void xen_pmu_init(int cpu) BUILD_BUG_ON(sizeof(struct xen_pmu_data) > PAGE_SIZE); - if (xen_hvm_domain()) + if (xen_hvm_domain() || (cpu != 0 && !is_xen_pmu)) return; xenpmu_data = (struct xen_pmu_data *)get_zeroed_page(GFP_KERNEL); @@ -547,7 +544,8 @@ void xen_pmu_init(int cpu) per_cpu(xenpmu_shared, cpu).xenpmu_data = xenpmu_data; per_cpu(xenpmu_shared, cpu).flags = 0; - if (cpu == 0) { + if (!is_xen_pmu) { + is_xen_pmu = true; perf_register_guest_info_callbacks(&xen_guest_cbs); xen_pmu_arch_init(); } diff --git a/arch/x86/xen/pmu.h b/arch/x86/xen/pmu.h index 0e83a160589bc2e6c9149bbbae7e049c08e0e8c0..65c58894fc79f6d94a1ce860112ded726e0801f2 100644 --- a/arch/x86/xen/pmu.h +++ b/arch/x86/xen/pmu.h @@ -4,6 +4,8 @@ #include +extern bool is_xen_pmu; + irqreturn_t xen_pmu_irq_handler(int irq, void *dev_id); #ifdef CONFIG_XEN_HAVE_VPMU void xen_pmu_init(int cpu); @@ -12,7 +14,6 @@ void xen_pmu_finish(int cpu); static inline void xen_pmu_init(int cpu) {} static inline void xen_pmu_finish(int cpu) {} #endif -bool is_xen_pmu(int cpu); bool pmu_msr_read(unsigned int msr, uint64_t *val, int *err); bool pmu_msr_write(unsigned int msr, uint32_t low, uint32_t high, int *err); int pmu_apic_update(uint32_t reg); diff --git a/arch/x86/xen/smp_pv.c b/arch/x86/xen/smp_pv.c index 8f9e7e2407c873621f50c6954e6cff712437c6bf..35b6d15d874d046cc9c0a4a73aad541dbfcfa963 100644 --- a/arch/x86/xen/smp_pv.c +++ b/arch/x86/xen/smp_pv.c @@ -130,7 +130,7 @@ int xen_smp_intr_init_pv(unsigned int cpu) per_cpu(xen_irq_work, cpu).irq = rc; per_cpu(xen_irq_work, cpu).name = callfunc_name; - if (is_xen_pmu(cpu)) { + if (is_xen_pmu) { pmu_name = kasprintf(GFP_KERNEL, "pmu%d", cpu); rc = bind_virq_to_irqhandler(VIRQ_XENPMU, cpu, xen_pmu_irq_handler, diff --git a/arch/xtensa/include/asm/processor.h b/arch/xtensa/include/asm/processor.h index 7f63aca6a0d340632646ccceb9f31ae63b878402..9dd4efe1bf0bd0108f1c809636aa78776858876e 100644 --- a/arch/xtensa/include/asm/processor.h +++ b/arch/xtensa/include/asm/processor.h @@ -226,8 +226,8 @@ extern unsigned long get_wchan(struct task_struct *p); #define xtensa_set_sr(x, sr) \ ({ \ - unsigned int v = (unsigned int)(x); \ - __asm__ __volatile__ ("wsr %0, "__stringify(sr) :: "a"(v)); \ + __asm__ __volatile__ ("wsr %0, "__stringify(sr) :: \ + "a"((unsigned int)(x))); \ }) #define xtensa_get_sr(sr) \ diff --git a/arch/xtensa/kernel/jump_label.c b/arch/xtensa/kernel/jump_label.c index 61cf6497a646b7ec0273f0c8e823df64cffd9824..0dde21e0d3de4c2836bbce5c7fee361811863ec8 100644 --- a/arch/xtensa/kernel/jump_label.c +++ b/arch/xtensa/kernel/jump_label.c @@ -61,7 +61,7 @@ static void patch_text(unsigned long addr, const void *data, size_t sz) .data = data, }; stop_machine_cpuslocked(patch_text_stop_machine, - &patch, NULL); + &patch, cpu_online_mask); } else { unsigned long flags; diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c index b5da3ad76d773106e89327d809ce9d6570d6f869..195ab5219736259e37b2292c4b9c2399b646fdcd 100644 --- a/block/bfq-iosched.c +++ b/block/bfq-iosched.c @@ -1871,7 +1871,11 @@ static void bfq_add_request(struct request *rq) bfq_log_bfqq(bfqd, bfqq, "add_request %d", rq_is_sync(rq)); bfqq->queued[rq_is_sync(rq)]++; - bfqd->queued++; + /* + * Updating of 'bfqd->queued' is protected by 'bfqd->lock', however, it + * may be read without holding the lock in bfq_has_work(). + */ + WRITE_ONCE(bfqd->queued, bfqd->queued + 1); if (RB_EMPTY_ROOT(&bfqq->sort_list) && bfq_bfqq_sync(bfqq)) { /* @@ -2164,7 +2168,11 @@ static void bfq_remove_request(struct request_queue *q, if (rq->queuelist.prev != &rq->queuelist) list_del_init(&rq->queuelist); bfqq->queued[sync]--; - bfqd->queued--; + /* + * Updating of 'bfqd->queued' is protected by 'bfqd->lock', however, it + * may be read without holding the lock in bfq_has_work(). + */ + WRITE_ONCE(bfqd->queued, bfqd->queued - 1); elv_rb_del(&bfqq->sort_list, rq); elv_rqhash_del(q, rq); @@ -2534,6 +2542,15 @@ bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq) * are likely to increase the throughput. */ bfqq->new_bfqq = new_bfqq; + /* + * The above assignment schedules the following redirections: + * each time some I/O for bfqq arrives, the process that + * generated that I/O is disassociated from bfqq and + * associated with new_bfqq. Here we increases new_bfqq->ref + * in advance, adding the number of processes that are + * expected to be associated with new_bfqq as they happen to + * issue I/O. + */ new_bfqq->ref += process_refs; return new_bfqq; } @@ -2593,6 +2610,10 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq, { struct bfq_queue *in_service_bfqq, *new_bfqq; + /* if a merge has already been setup, then proceed with that first */ + if (bfqq->new_bfqq) + return bfqq->new_bfqq; + /* * Do not perform queue merging if the device is non * rotational and performs internal queueing. In fact, such a @@ -2647,9 +2668,6 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq, if (bfq_too_late_for_merging(bfqq)) return NULL; - if (bfqq->new_bfqq) - return bfqq->new_bfqq; - if (!io_struct || unlikely(bfqq == &bfqd->oom_bfqq)) return NULL; @@ -4652,11 +4670,11 @@ static bool bfq_has_work(struct blk_mq_hw_ctx *hctx) struct bfq_data *bfqd = hctx->queue->elevator->elevator_data; /* - * Avoiding lock: a race on bfqd->busy_queues should cause at + * Avoiding lock: a race on bfqd->queued should cause at * most a call to dispatch for nothing */ return !list_empty_careful(&bfqd->dispatch) || - bfq_tot_busy_queues(bfqd) > 0; + READ_ONCE(bfqd->queued); } static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx) diff --git a/block/bio.c b/block/bio.c index cd0ca4166164c7f2edde7b08eeb88fbdeb427765..28191e7035f4f46bb04317372c2c22676d219fb6 100644 --- a/block/bio.c +++ b/block/bio.c @@ -1111,6 +1111,9 @@ static int __bio_iov_append_get_pages(struct bio *bio, struct iov_iter *iter) * fit into the bio, or are requested in @iter, whatever is smaller. If * MM encounters an error pinning the requested pages, it stops. Error * is returned only if 0 pages could be pinned. + * + * It's intended for direct IO, so doesn't do PSI tracking, the caller is + * responsible for setting BIO_WORKINGSET if necessary. */ int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter) { @@ -1135,6 +1138,9 @@ int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter) if (is_bvec) bio_set_flag(bio, BIO_NO_PAGE_REF); + + /* don't account direct I/O as memory stall */ + bio_clear_flag(bio, BIO_WORKINGSET); return bio->bi_vcnt ? 0 : ret; } EXPORT_SYMBOL_GPL(bio_iov_iter_get_pages); diff --git a/block/blk-core.c b/block/blk-core.c index 019d583b355cd02bcf9b5c908d2640a8f603cc3a..bbd3d4560458f431f8cf4bf37e59b5bce47be12d 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -398,8 +398,10 @@ void blk_cleanup_queue(struct request_queue *q) del_timer_sync(&q->backing_dev_info->laptop_mode_wb_timer); blk_sync_queue(q); - if (queue_is_mq(q)) + if (queue_is_mq(q)) { + blk_mq_cancel_work_sync(q); blk_mq_exit_queue(q); + } /* * In theory, request pool of sched_tags belongs to request queue. @@ -517,13 +519,15 @@ static void blk_timeout_work(struct work_struct *work) struct request_queue *blk_alloc_queue(int node_id) { struct request_queue *q; + struct request_queue_wrapper *q_wrapper; int ret; - q = kmem_cache_alloc_node(blk_requestq_cachep, + q_wrapper = kmem_cache_alloc_node(blk_requestq_cachep, GFP_KERNEL | __GFP_ZERO, node_id); - if (!q) + if (!q_wrapper) return NULL; + q = &q_wrapper->q; q->last_merge = NULL; q->id = ida_simple_get(&blk_queue_ida, 0, 0, GFP_KERNEL); @@ -594,7 +598,7 @@ struct request_queue *blk_alloc_queue(int node_id) fail_id: ida_simple_remove(&blk_queue_ida, q->id); fail_q: - kmem_cache_free(blk_requestq_cachep, q); + kmem_cache_free(blk_requestq_cachep, q_wrapper); return NULL; } EXPORT_SYMBOL(blk_alloc_queue); @@ -1796,7 +1800,7 @@ int __init blk_dev_init(void) panic("Failed to create kblockd\n"); blk_requestq_cachep = kmem_cache_create("request_queue", - sizeof(struct request_queue), 0, SLAB_PANIC, NULL); + sizeof(struct request_queue_wrapper), 0, SLAB_PANIC, NULL); blk_debugfs_root = debugfs_create_dir("block", NULL); diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index 606bef13f1c210d88dd7c970b3670125cf63c6d8..0aa2069d95d5e470cbee7b14d44faf8b0c934504 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c @@ -194,11 +194,18 @@ static int __blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx) static int blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx) { + unsigned long end = jiffies + HZ; int ret; do { ret = __blk_mq_do_dispatch_sched(hctx); - } while (ret == 1); + if (ret != 1) + break; + if (need_resched() || time_is_before_jiffies(end)) { + blk_mq_delay_run_hw_queue(hctx, 0); + break; + } + } while (1); return ret; } @@ -512,18 +519,16 @@ static int blk_mq_sched_alloc_tags(struct request_queue *q, unsigned int hctx_idx) { struct blk_mq_tag_set *set = q->tag_set; - /* Clear HCTX_SHARED so tags are init'ed */ - unsigned int flags = set->flags & ~BLK_MQ_F_TAG_HCTX_SHARED; int ret; hctx->sched_tags = blk_mq_alloc_rq_map(set, hctx_idx, q->nr_requests, - set->reserved_tags, flags); + set->reserved_tags, set->flags); if (!hctx->sched_tags) return -ENOMEM; ret = blk_mq_alloc_rqs(set, hctx->sched_tags, hctx_idx, q->nr_requests); if (ret) { - blk_mq_free_rq_map(hctx->sched_tags, flags); + blk_mq_free_rq_map(hctx->sched_tags, set->flags); hctx->sched_tags = NULL; } @@ -537,16 +542,53 @@ static void blk_mq_sched_tags_teardown(struct request_queue *q) int i; queue_for_each_hw_ctx(q, hctx, i) { - /* Clear HCTX_SHARED so tags are freed */ - unsigned int flags = hctx->flags & ~BLK_MQ_F_TAG_HCTX_SHARED; - if (hctx->sched_tags) { - blk_mq_free_rq_map(hctx->sched_tags, flags); + blk_mq_free_rq_map(hctx->sched_tags, hctx->flags); hctx->sched_tags = NULL; } } } +static int blk_mq_init_sched_shared_sbitmap(struct request_queue *queue) +{ + struct blk_mq_tag_set *set = queue->tag_set; + int alloc_policy = BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags); + struct blk_mq_hw_ctx *hctx; + int ret, i; + struct request_queue_wrapper *q_wrapper = queue_to_wrapper(queue); + + /* + * Set initial depth at max so that we don't need to reallocate for + * updating nr_requests. + */ + ret = blk_mq_init_bitmaps(&q_wrapper->sched_bitmap_tags, + &q_wrapper->sched_breserved_tags, + MAX_SCHED_RQ, set->reserved_tags, + set->numa_node, alloc_policy); + if (ret) + return ret; + + queue_for_each_hw_ctx(queue, hctx, i) { + hctx->sched_tags->bitmap_tags = + &q_wrapper->sched_bitmap_tags; + hctx->sched_tags->breserved_tags = + &q_wrapper->sched_breserved_tags; + } + + sbitmap_queue_resize(&q_wrapper->sched_bitmap_tags, + queue->nr_requests - set->reserved_tags); + + return 0; +} + +static void blk_mq_exit_sched_shared_sbitmap(struct request_queue *queue) +{ + struct request_queue_wrapper *q_wrapper = queue_to_wrapper(queue); + + sbitmap_queue_free(&q_wrapper->sched_bitmap_tags); + sbitmap_queue_free(&q_wrapper->sched_breserved_tags); +} + int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e) { struct blk_mq_hw_ctx *hctx; @@ -571,12 +613,18 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e) queue_for_each_hw_ctx(q, hctx, i) { ret = blk_mq_sched_alloc_tags(q, hctx, i); if (ret) - goto err; + goto err_free_tags; + } + + if (blk_mq_is_sbitmap_shared(q->tag_set->flags)) { + ret = blk_mq_init_sched_shared_sbitmap(q); + if (ret) + goto err_free_tags; } ret = e->ops.init_sched(q, e); if (ret) - goto err; + goto err_free_sbitmap; blk_mq_debugfs_register_sched(q); @@ -596,7 +644,10 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e) return 0; -err: +err_free_sbitmap: + if (blk_mq_is_sbitmap_shared(q->tag_set->flags)) + blk_mq_exit_sched_shared_sbitmap(q); +err_free_tags: blk_mq_sched_free_requests(q); blk_mq_sched_tags_teardown(q); q->elevator = NULL; @@ -622,6 +673,7 @@ void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e) { struct blk_mq_hw_ctx *hctx; unsigned int i; + unsigned int flags = 0; queue_for_each_hw_ctx(q, hctx, i) { blk_mq_debugfs_unregister_sched_hctx(hctx); @@ -629,10 +681,13 @@ void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e) e->type->ops.exit_hctx(hctx, i); hctx->sched_data = NULL; } + flags = hctx->flags; } blk_mq_debugfs_unregister_sched(q); if (e->type->ops.exit_sched) e->type->ops.exit_sched(e); blk_mq_sched_tags_teardown(q); + if (blk_mq_is_sbitmap_shared(flags)) + blk_mq_exit_sched_shared_sbitmap(q); q->elevator = NULL; } diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h index 15f3d611db1048760d71710f290c69e60643d7ce..b228ee0674912b65a6e1b4009f22741b55fc40d4 100644 --- a/block/blk-mq-sched.h +++ b/block/blk-mq-sched.h @@ -5,6 +5,8 @@ #include "blk-mq.h" #include "blk-mq-tag.h" +#define MAX_SCHED_RQ (16 * BLKDEV_MAX_RQ) + void blk_mq_sched_assign_ioc(struct request *rq); void blk_mq_sched_request_inserted(struct request *rq); diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index 64c9633d5d5a0ce1fb5c9f770b4c2d8351abaf80..98e4edd03ad4c607998307d375941fd7203c179a 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -13,6 +13,7 @@ #include #include "blk.h" #include "blk-mq.h" +#include "blk-mq-sched.h" #include "blk-mq-tag.h" #define BLK_MQ_DTAG_WAIT_EXPIRE (5 * HZ) @@ -538,39 +539,54 @@ static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth, node); } -static int blk_mq_init_bitmap_tags(struct blk_mq_tags *tags, - int node, int alloc_policy) +int blk_mq_init_bitmaps(struct sbitmap_queue *bitmap_tags, + struct sbitmap_queue *breserved_tags, + unsigned int queue_depth, unsigned int reserved, + int node, int alloc_policy) { - unsigned int depth = tags->nr_tags - tags->nr_reserved_tags; + unsigned int depth = queue_depth - reserved; bool round_robin = alloc_policy == BLK_TAG_ALLOC_RR; - if (bt_alloc(&tags->__bitmap_tags, depth, round_robin, node)) + if (bt_alloc(bitmap_tags, depth, round_robin, node)) return -ENOMEM; - if (bt_alloc(&tags->__breserved_tags, tags->nr_reserved_tags, - round_robin, node)) + if (bt_alloc(breserved_tags, reserved, round_robin, node)) goto free_bitmap_tags; + return 0; + +free_bitmap_tags: + sbitmap_queue_free(bitmap_tags); + return -ENOMEM; +} + +static int blk_mq_init_bitmap_tags(struct blk_mq_tags *tags, + int node, int alloc_policy) +{ + int ret; + + ret = blk_mq_init_bitmaps(&tags->__bitmap_tags, + &tags->__breserved_tags, + tags->nr_tags, tags->nr_reserved_tags, + node, alloc_policy); + if (ret) + return ret; + tags->bitmap_tags = &tags->__bitmap_tags; tags->breserved_tags = &tags->__breserved_tags; return 0; -free_bitmap_tags: - sbitmap_queue_free(&tags->__bitmap_tags); - return -ENOMEM; } -int blk_mq_init_shared_sbitmap(struct blk_mq_tag_set *set, unsigned int flags) +int blk_mq_init_shared_sbitmap(struct blk_mq_tag_set *set) { - unsigned int depth = set->queue_depth - set->reserved_tags; int alloc_policy = BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags); - bool round_robin = alloc_policy == BLK_TAG_ALLOC_RR; - int i, node = set->numa_node; + int i, ret; - if (bt_alloc(&set->__bitmap_tags, depth, round_robin, node)) - return -ENOMEM; - if (bt_alloc(&set->__breserved_tags, set->reserved_tags, - round_robin, node)) - goto free_bitmap_tags; + ret = blk_mq_init_bitmaps(&set->__bitmap_tags, &set->__breserved_tags, + set->queue_depth, set->reserved_tags, + set->numa_node, alloc_policy); + if (ret) + return ret; for (i = 0; i < set->nr_hw_queues; i++) { struct blk_mq_tags *tags = set->tags[i]; @@ -580,9 +596,6 @@ int blk_mq_init_shared_sbitmap(struct blk_mq_tag_set *set, unsigned int flags) } return 0; -free_bitmap_tags: - sbitmap_queue_free(&set->__bitmap_tags); - return -ENOMEM; } void blk_mq_exit_shared_sbitmap(struct blk_mq_tag_set *set) @@ -645,8 +658,6 @@ int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx, */ if (tdepth > tags->nr_tags) { struct blk_mq_tag_set *set = hctx->queue->tag_set; - /* Only sched tags can grow, so clear HCTX_SHARED flag */ - unsigned int flags = set->flags & ~BLK_MQ_F_TAG_HCTX_SHARED; struct blk_mq_tags *new; bool ret; @@ -657,21 +668,21 @@ int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx, * We need some sort of upper limit, set it high enough that * no valid use cases should require more. */ - if (tdepth > 16 * BLKDEV_MAX_RQ) + if (tdepth > MAX_SCHED_RQ) return -EINVAL; new = blk_mq_alloc_rq_map(set, hctx->queue_num, tdepth, - tags->nr_reserved_tags, flags); + tags->nr_reserved_tags, set->flags); if (!new) return -ENOMEM; ret = blk_mq_alloc_rqs(set, new, hctx->queue_num, tdepth); if (ret) { - blk_mq_free_rq_map(new, flags); + blk_mq_free_rq_map(new, set->flags); return -ENOMEM; } blk_mq_free_rqs(set, *tagsptr, hctx->queue_num); - blk_mq_free_rq_map(*tagsptr, flags); + blk_mq_free_rq_map(*tagsptr, set->flags); *tagsptr = new; } else { /* diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h index 25f30fa9985704b78d38542353211c3959e8a261..baa36e5f495d447db1580925207abf414859afcc 100644 --- a/block/blk-mq-tag.h +++ b/block/blk-mq-tag.h @@ -45,11 +45,14 @@ extern struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags, unsigned int reserved_tags, int node, unsigned int flags); extern void blk_mq_free_tags(struct blk_mq_tags *tags, unsigned int flags); +extern int blk_mq_init_bitmaps(struct sbitmap_queue *bitmap_tags, + struct sbitmap_queue *breserved_tags, + unsigned int queue_depth, + unsigned int reserved, + int node, int alloc_policy); -extern int blk_mq_init_shared_sbitmap(struct blk_mq_tag_set *set, - unsigned int flags); +extern int blk_mq_init_shared_sbitmap(struct blk_mq_tag_set *set); extern void blk_mq_exit_shared_sbitmap(struct blk_mq_tag_set *set); - extern unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data); extern void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx, unsigned int tag); diff --git a/block/blk-mq.c b/block/blk-mq.c index cedc355218db4f1c3abd2e9a2a8ed0313ad9aac1..83193e44aada9d91e4c58905f7af3a857c8e4085 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1627,8 +1627,16 @@ static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async, put_cpu(); } + /* + * No need to queue work if there is no io, and this can avoid race + * with blk_cleanup_queue(). + */ + if (!percpu_ref_tryget(&hctx->queue->q_usage_counter)) + return; + kblockd_mod_delayed_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work, msecs_to_jiffies(msecs)); + percpu_ref_put(&hctx->queue->q_usage_counter); } /** @@ -2729,8 +2737,9 @@ static void blk_mq_exit_hctx(struct request_queue *q, blk_mq_dtag_idle(hctx, true); } - blk_mq_clear_flush_rq_mapping(set->tags[hctx_idx], - set->queue_depth, flush_rq); + if (blk_queue_init_done(q)) + blk_mq_clear_flush_rq_mapping(set->tags[hctx_idx], + set->queue_depth, flush_rq); if (set->ops->exit_request) set->ops->exit_request(set, flush_rq, hctx_idx); @@ -3621,7 +3630,7 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set) atomic_set(&set->active_queues_shared_sbitmap, 0); atomic_set(&set->pending_queues_shared_sbitmap, 0); - if (blk_mq_init_shared_sbitmap(set, set->flags)) { + if (blk_mq_init_shared_sbitmap(set)) { ret = -ENOMEM; goto out_free_mq_rq_maps; } @@ -3671,6 +3680,7 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr) struct blk_mq_tag_set *set = q->tag_set; struct blk_mq_hw_ctx *hctx; int i, ret; + struct request_queue_wrapper *q_wrapper = queue_to_wrapper(q); if (!set) return -EINVAL; @@ -3697,15 +3707,24 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr) } else { ret = blk_mq_tag_update_depth(hctx, &hctx->sched_tags, nr, true); + if (blk_mq_is_sbitmap_shared(set->flags)) { + hctx->sched_tags->bitmap_tags = + &q_wrapper->sched_bitmap_tags; + hctx->sched_tags->breserved_tags = + &q_wrapper->sched_breserved_tags; + } } if (ret) break; if (q->elevator && q->elevator->type->ops.depth_updated) q->elevator->type->ops.depth_updated(hctx); } - - if (!ret) + if (!ret) { q->nr_requests = nr; + if (q->elevator && blk_mq_is_sbitmap_shared(set->flags)) + sbitmap_queue_resize(&q_wrapper->sched_bitmap_tags, + nr - set->reserved_tags); + } blk_mq_unquiesce_queue(q); blk_mq_unfreeze_queue(q); diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 66765740902bb6d911719c1fbe07393bc26a692f..0a4fcbda8ab45b9f235995b52b1ef5271f6a0112 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -726,7 +726,7 @@ static void blk_free_queue_rcu(struct rcu_head *rcu_head) { struct request_queue *q = container_of(rcu_head, struct request_queue, rcu_head); - kmem_cache_free(blk_requestq_cachep, q); + kmem_cache_free(blk_requestq_cachep, queue_to_wrapper(q)); } /* Unconfigure the I/O scheduler and dissociate from the cgroup controller. */ @@ -790,16 +790,6 @@ static void blk_release_queue(struct kobject *kobj) blk_free_queue_stats(q->stats); - if (queue_is_mq(q)) { - struct blk_mq_hw_ctx *hctx; - int i; - - cancel_delayed_work_sync(&q->requeue_work); - - queue_for_each_hw_ctx(q, hctx, i) - cancel_delayed_work_sync(&hctx->run_work); - } - blk_exit_queue(q); blk_queue_free_zone_bitmaps(q); @@ -965,15 +955,17 @@ void blk_unregister_queue(struct gendisk *disk) */ if (queue_is_mq(q)) blk_mq_unregister_dev(disk_to_dev(disk), q); - - kobject_uevent(&q->kobj, KOBJ_REMOVE); - kobject_del(&q->kobj); blk_trace_remove_sysfs(disk_to_dev(disk)); mutex_lock(&q->sysfs_lock); if (q->elevator) elv_unregister_queue(q); mutex_unlock(&q->sysfs_lock); + + /* Now that we've deleted all child objects, we can delete the queue. */ + kobject_uevent(&q->kobj, KOBJ_REMOVE); + kobject_del(&q->kobj); + mutex_unlock(&q->sysfs_dir_lock); kobject_put(&disk_to_dev(disk)->kobj); diff --git a/block/blk.h b/block/blk.h index 3165c16725d53496bddae46a391b3bef8b4de4ae..b8948fda06e11b4b0b732c2f538728c40accc767 100644 --- a/block/blk.h +++ b/block/blk.h @@ -28,6 +28,19 @@ struct blk_flush_queue { spinlock_t mq_flush_lock; }; +/* + * The wrapper of request_queue to fix kabi while adding members. + */ +struct request_queue_wrapper { + struct request_queue q; + + struct sbitmap_queue sched_bitmap_tags; + struct sbitmap_queue sched_breserved_tags; +}; + +#define queue_to_wrapper(queue) \ + container_of(queue, struct request_queue_wrapper, q) + extern struct kmem_cache *blk_requestq_cachep; extern struct kobj_type blk_queue_ktype; extern struct ida blk_queue_ida; diff --git a/crypto/Kconfig b/crypto/Kconfig index 2d24738065672f8e6655141058311e3c9d1e4878..ef9000c5db4d210e970b6cda6022d2a326f55f2e 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -272,7 +272,7 @@ config CRYPTO_ECRDSA config CRYPTO_SM2 tristate "SM2 algorithm" - select CRYPTO_LIB_SM3 + select CRYPTO_SM3 select CRYPTO_AKCIPHER select CRYPTO_MANAGER select MPILIB @@ -1039,9 +1039,12 @@ config CRYPTO_SHA3 http://keccak.noekeon.org/ config CRYPTO_SM3 + tristate + +config CRYPTO_SM3_GENERIC tristate "SM3 digest algorithm" select CRYPTO_HASH - select CRYPTO_LIB_SM3 + select CRYPTO_SM3 help SM3 secure hash function as defined by OSCCA GM/T 0004-2012 SM3). It is part of the Chinese Commercial Cryptography suite. @@ -1054,7 +1057,7 @@ config CRYPTO_SM3_AVX_X86_64 tristate "SM3 digest algorithm (x86_64/AVX)" depends on X86 && 64BIT select CRYPTO_HASH - select CRYPTO_LIB_SM3 + select CRYPTO_SM3 help SM3 secure hash function as defined by OSCCA GM/T 0004-2012 SM3). It is part of the Chinese Commercial Cryptography suite. This is @@ -1631,9 +1634,12 @@ config CRYPTO_SERPENT_AVX2_X86_64 config CRYPTO_SM4 + tristate + +config CRYPTO_SM4_GENERIC tristate "SM4 cipher algorithm" select CRYPTO_ALGAPI - select CRYPTO_LIB_SM4 + select CRYPTO_SM4 help SM4 cipher algorithms (OSCCA GB/T 32907-2016). @@ -1662,7 +1668,7 @@ config CRYPTO_SM4_AESNI_AVX_X86_64 select CRYPTO_SKCIPHER select CRYPTO_SIMD select CRYPTO_ALGAPI - select CRYPTO_LIB_SM4 + select CRYPTO_SM4 help SM4 cipher algorithms (OSCCA GB/T 32907-2016) (x86_64/AES-NI/AVX). @@ -1683,7 +1689,7 @@ config CRYPTO_SM4_AESNI_AVX2_X86_64 select CRYPTO_SKCIPHER select CRYPTO_SIMD select CRYPTO_ALGAPI - select CRYPTO_LIB_SM4 + select CRYPTO_SM4 select CRYPTO_SM4_AESNI_AVX_X86_64 help SM4 cipher algorithms (OSCCA GB/T 32907-2016) (x86_64/AES-NI/AVX2). diff --git a/crypto/Makefile b/crypto/Makefile index 982066c6bdfb1438e87c3620a5ee77251ca095a6..58dac31a3367611e0fbe1dd2a377fb71f927a501 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -81,7 +81,8 @@ obj-$(CONFIG_CRYPTO_SHA1) += sha1_generic.o obj-$(CONFIG_CRYPTO_SHA256) += sha256_generic.o obj-$(CONFIG_CRYPTO_SHA512) += sha512_generic.o obj-$(CONFIG_CRYPTO_SHA3) += sha3_generic.o -obj-$(CONFIG_CRYPTO_SM3) += sm3_generic.o +obj-$(CONFIG_CRYPTO_SM3) += sm3.o +obj-$(CONFIG_CRYPTO_SM3_GENERIC) += sm3_generic.o obj-$(CONFIG_CRYPTO_STREEBOG) += streebog_generic.o obj-$(CONFIG_CRYPTO_WP512) += wp512.o CFLAGS_wp512.o := $(call cc-option,-fno-schedule-insns) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79149 @@ -136,7 +137,8 @@ obj-$(CONFIG_CRYPTO_SERPENT) += serpent_generic.o CFLAGS_serpent_generic.o := $(call cc-option,-fsched-pressure) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79149 obj-$(CONFIG_CRYPTO_AES) += aes_generic.o CFLAGS_aes_generic.o := $(call cc-option,-fno-code-hoisting) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=83356 -obj-$(CONFIG_CRYPTO_SM4) += sm4_generic.o +obj-$(CONFIG_CRYPTO_SM4) += sm4.o +obj-$(CONFIG_CRYPTO_SM4_GENERIC) += sm4_generic.o obj-$(CONFIG_CRYPTO_AES_TI) += aes_ti.o obj-$(CONFIG_CRYPTO_CAMELLIA) += camellia_generic.o obj-$(CONFIG_CRYPTO_CAST_COMMON) += cast_common.o diff --git a/crypto/asymmetric_keys/pkcs7_parser.c b/crypto/asymmetric_keys/pkcs7_parser.c index 967329e0a07b7958a60b334d24d89273868cb5ba..6cf6c4552c1138190d1de0efa231ef023e44f05a 100644 --- a/crypto/asymmetric_keys/pkcs7_parser.c +++ b/crypto/asymmetric_keys/pkcs7_parser.c @@ -248,6 +248,9 @@ int pkcs7_sig_note_digest_algo(void *context, size_t hdrlen, case OID_sha224: ctx->sinfo->sig->hash_algo = "sha224"; break; + case OID_sm3: + ctx->sinfo->sig->hash_algo = "sm3"; + break; default: printk("Unsupported digest algo: %u\n", ctx->last_oid); return -ENOPKG; @@ -269,6 +272,10 @@ int pkcs7_sig_note_pkey_algo(void *context, size_t hdrlen, ctx->sinfo->sig->pkey_algo = "rsa"; ctx->sinfo->sig->encoding = "pkcs1"; break; + case OID_SM2_with_SM3: + ctx->sinfo->sig->pkey_algo = "sm2"; + ctx->sinfo->sig->encoding = "raw"; + break; default: printk("Unsupported pkey algo: %u\n", ctx->last_oid); return -ENOPKG; diff --git a/crypto/authenc.c b/crypto/authenc.c index 670bf1a01d00e4c3fd1cda495973a78da9ae6db2..17f674a7cdff5434a213f66f66ca5c6976b565de 100644 --- a/crypto/authenc.c +++ b/crypto/authenc.c @@ -253,7 +253,7 @@ static int crypto_authenc_decrypt_tail(struct aead_request *req, dst = scatterwalk_ffwd(areq_ctx->dst, req->dst, req->assoclen); skcipher_request_set_tfm(skreq, ctx->enc); - skcipher_request_set_callback(skreq, aead_request_flags(req), + skcipher_request_set_callback(skreq, flags, req->base.complete, req->base.data); skcipher_request_set_crypt(skreq, src, dst, req->cryptlen - authsize, req->iv); diff --git a/crypto/rsa-pkcs1pad.c b/crypto/rsa-pkcs1pad.c index 8ac3e73e8ea65121a9ccfbe9e598d757f426e933..9d804831c8b3f9d1dba390cd6f92aab574af7967 100644 --- a/crypto/rsa-pkcs1pad.c +++ b/crypto/rsa-pkcs1pad.c @@ -476,6 +476,8 @@ static int pkcs1pad_verify_complete(struct akcipher_request *req, int err) pos++; if (digest_info) { + if (digest_info->size > dst_len - pos) + goto done; if (crypto_memneq(out_buf + pos, digest_info->data, digest_info->size)) goto done; @@ -495,7 +497,7 @@ static int pkcs1pad_verify_complete(struct akcipher_request *req, int err) sg_nents_for_len(req->src, req->src_len + req->dst_len), req_ctx->out_buf + ctx->key_size, - req->dst_len, ctx->key_size); + req->dst_len, req->src_len); /* Do the actual verification step. */ if (memcmp(req_ctx->out_buf + ctx->key_size, out_buf + pos, req->dst_len) != 0) @@ -538,7 +540,7 @@ static int pkcs1pad_verify(struct akcipher_request *req) if (WARN_ON(req->dst) || WARN_ON(!req->dst_len) || - !ctx->key_size || req->src_len < ctx->key_size) + !ctx->key_size || req->src_len != ctx->key_size) return -EINVAL; req_ctx->out_buf = kmalloc(ctx->key_size + req->dst_len, GFP_KERNEL); @@ -621,6 +623,11 @@ static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb) rsa_alg = crypto_spawn_akcipher_alg(&ctx->spawn); + if (strcmp(rsa_alg->base.cra_name, "rsa") != 0) { + err = -EINVAL; + goto err_free_inst; + } + err = -ENAMETOOLONG; hash_name = crypto_attr_alg_name(tb[2]); if (IS_ERR(hash_name)) { diff --git a/lib/crypto/sm3.c b/crypto/sm3.c similarity index 100% rename from lib/crypto/sm3.c rename to crypto/sm3.c diff --git a/lib/crypto/sm4.c b/crypto/sm4.c similarity index 94% rename from lib/crypto/sm4.c rename to crypto/sm4.c index 284e62576d0c621692b368bed91936bfaeb6fe4b..2c44193bc27e4a0369a795ed641a6e32ca33cb6b 100644 --- a/lib/crypto/sm4.c +++ b/crypto/sm4.c @@ -11,7 +11,7 @@ #include #include -static const u32 fk[4] = { +static const u32 ____cacheline_aligned fk[4] = { 0xa3b1bac6, 0x56aa3350, 0x677d9197, 0xb27022dc }; @@ -61,6 +61,14 @@ static const u8 ____cacheline_aligned sbox[256] = { 0x79, 0xee, 0x5f, 0x3e, 0xd7, 0xcb, 0x39, 0x48 }; +extern const u32 crypto_sm4_fk[4] __alias(fk); +extern const u32 crypto_sm4_ck[32] __alias(ck); +extern const u8 crypto_sm4_sbox[256] __alias(sbox); + +EXPORT_SYMBOL(crypto_sm4_fk); +EXPORT_SYMBOL(crypto_sm4_ck); +EXPORT_SYMBOL(crypto_sm4_sbox); + static inline u32 sm4_t_non_lin_sub(u32 x) { u32 out; diff --git a/drivers/acpi/acpica/nswalk.c b/drivers/acpi/acpica/nswalk.c index b7f3e8603ad841f8af776d88415a960087a5ca1d..901fa5ca284d2f54f973a25b9529e88090d69193 100644 --- a/drivers/acpi/acpica/nswalk.c +++ b/drivers/acpi/acpica/nswalk.c @@ -169,6 +169,9 @@ acpi_ns_walk_namespace(acpi_object_type type, if (start_node == ACPI_ROOT_OBJECT) { start_node = acpi_gbl_root_node; + if (!start_node) { + return_ACPI_STATUS(AE_NO_NAMESPACE); + } } /* Null child means "get first node" */ diff --git a/drivers/acpi/apei/bert.c b/drivers/acpi/apei/bert.c index 19e50fcbf4d6f52771f573094f4fa635bcfd5ff6..598fd19b65fa489d1cc4d44da8db7ee072612e1a 100644 --- a/drivers/acpi/apei/bert.c +++ b/drivers/acpi/apei/bert.c @@ -29,6 +29,7 @@ #undef pr_fmt #define pr_fmt(fmt) "BERT: " fmt +#define ACPI_BERT_PRINT_MAX_LEN 1024 static int bert_disable; @@ -58,8 +59,11 @@ static void __init bert_print_all(struct acpi_bert_region *region, } pr_info_once("Error records from previous boot:\n"); - - cper_estatus_print(KERN_INFO HW_ERR, estatus); + if (region_len < ACPI_BERT_PRINT_MAX_LEN) + cper_estatus_print(KERN_INFO HW_ERR, estatus); + else + pr_info_once("Max print length exceeded, table data is available at:\n" + "/sys/firmware/acpi/tables/data/BERT"); /* * Because the boot error source is "one-time polled" type, @@ -77,7 +81,7 @@ static int __init setup_bert_disable(char *str) { bert_disable = 1; - return 0; + return 1; } __setup("bert_disable", setup_bert_disable); diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c index 2e0b0fcad9607c3803c6db616be62967e725d512..83efb52a3f31d070cfde3fe9e34166ba3bed64de 100644 --- a/drivers/acpi/apei/erst.c +++ b/drivers/acpi/apei/erst.c @@ -891,7 +891,7 @@ EXPORT_SYMBOL_GPL(erst_clear); static int __init setup_erst_disable(char *str) { erst_disable = 1; - return 0; + return 1; } __setup("erst_disable", setup_erst_disable); diff --git a/drivers/acpi/apei/hest.c b/drivers/acpi/apei/hest.c index 6e980fe16772cc194e8c8daa0589c31102d31b52..7bf48c2776fbf5e417ccbb795fc59ffa9e3a2f4b 100644 --- a/drivers/acpi/apei/hest.c +++ b/drivers/acpi/apei/hest.c @@ -219,7 +219,7 @@ static int __init hest_ghes_dev_register(unsigned int ghes_count) static int __init setup_hest_disable(char *str) { hest_disable = HEST_DISABLED; - return 0; + return 1; } __setup("hest_disable", setup_hest_disable); diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c index 2376f57b3617aa2f4ff47d4100a85d8ba792effa..be743d177bcbf8ddc5089d6079c651550fb40179 100644 --- a/drivers/acpi/battery.c +++ b/drivers/acpi/battery.c @@ -66,6 +66,10 @@ MODULE_PARM_DESC(cache_time, "cache time in milliseconds"); static const struct acpi_device_id battery_device_ids[] = { {"PNP0C0A", 0}, + + /* Microsoft Surface Go 3 */ + {"MSHW0146", 0}, + {"", 0}, }; @@ -1171,6 +1175,14 @@ static const struct dmi_system_id bat_dmi_table[] __initconst = { DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad"), }, }, + { + /* Microsoft Surface Go 3 */ + .callback = battery_notification_delay_quirk, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, "Surface Go 3"), + }, + }, {}, }; diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c index dc8ac435dea1611fb95c7bb1c07485f8ee932651..e4ef64988761f84240530cd304f17c3b3f5cd4b2 100644 --- a/drivers/acpi/cppc_acpi.c +++ b/drivers/acpi/cppc_acpi.c @@ -804,6 +804,11 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr) cpc_obj = &out_obj->package.elements[0]; if (cpc_obj->type == ACPI_TYPE_INTEGER) { num_ent = cpc_obj->integer.value; + if (num_ent <= 1) { + pr_debug("Unexpected _CPC NumEntries value (%d) for CPU:%d\n", + num_ent, pr->id); + goto out_free; + } } else { pr_debug("Unexpected entry type(%d) for NumEntries\n", cpc_obj->type); diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index 8347eaee679c8e4a63184cb9dabeb8945381a123..3f2e5ea9ab6b7a62b99c225c68cf1b9ae9458b79 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -2064,16 +2064,6 @@ bool acpi_ec_dispatch_gpe(void) if (acpi_any_gpe_status_set(first_ec->gpe)) return true; - /* - * Cancel the SCI wakeup and process all pending events in case there - * are any wakeup ones in there. - * - * Note that if any non-EC GPEs are active at this point, the SCI will - * retrigger after the rearming in acpi_s2idle_wake(), so no events - * should be missed by canceling the wakeup here. - */ - pm_system_cancel_wakeup(); - /* * Dispatch the EC GPE in-band, but do not report wakeup in any case * to allow the caller to process events properly after that. diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c index 18bd428f11ac03dc7d8b34df308816ae3451da9f..bd16340088389a5733f5c815234a37b09c019455 100644 --- a/drivers/acpi/property.c +++ b/drivers/acpi/property.c @@ -685,7 +685,7 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode, */ if (obj->type == ACPI_TYPE_LOCAL_REFERENCE) { if (index) - return -EINVAL; + return -ENOENT; ret = acpi_bus_get_device(obj->reference.handle, &device); if (ret) diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index e2614ea820bb87dfd55b92ef8a20cc9a6698be34..503935b1deeb1efba7ec001123b6db402d446425 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c @@ -1012,15 +1012,21 @@ static bool acpi_s2idle_wake(void) return true; } - /* - * Check non-EC GPE wakeups and if there are none, cancel the - * SCI-related wakeup and dispatch the EC GPE. - */ + /* Check non-EC GPE wakeups and dispatch the EC GPE. */ if (acpi_ec_dispatch_gpe()) { pm_pr_dbg("ACPI non-EC GPE wakeup\n"); return true; } + /* + * Cancel the SCI wakeup and process all pending events in case + * there are any wakeup ones in there. + * + * Note that if any non-EC GPEs are active at this point, the + * SCI will retrigger after the rearming below, so no events + * should be missed by canceling the wakeup here. + */ + pm_system_cancel_wakeup(); acpi_os_wait_events_complete(); /* diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c index 33474fd969913c513d1acfd1577a0135f1c8a8d9..7b9793cb55c504c6b027d031d7e435677a61cf66 100644 --- a/drivers/acpi/video_detect.c +++ b/drivers/acpi/video_detect.c @@ -409,6 +409,81 @@ static const struct dmi_system_id video_detect_dmi_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "GA503"), }, }, + /* + * Clevo NL5xRU and NL5xNU/TUXEDO Aura 15 Gen1 and Gen2 have both a + * working native and video interface. However the default detection + * mechanism first registers the video interface before unregistering + * it again and switching to the native interface during boot. This + * results in a dangling SBIOS request for backlight change for some + * reason, causing the backlight to switch to ~2% once per boot on the + * first power cord connect or disconnect event. Setting the native + * interface explicitly circumvents this buggy behaviour, by avoiding + * the unregistering process. + */ + { + .callback = video_detect_force_native, + .ident = "Clevo NL5xRU", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"), + DMI_MATCH(DMI_BOARD_NAME, "NL5xRU"), + }, + }, + { + .callback = video_detect_force_native, + .ident = "Clevo NL5xRU", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "SchenkerTechnologiesGmbH"), + DMI_MATCH(DMI_BOARD_NAME, "NL5xRU"), + }, + }, + { + .callback = video_detect_force_native, + .ident = "Clevo NL5xRU", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Notebook"), + DMI_MATCH(DMI_BOARD_NAME, "NL5xRU"), + }, + }, + { + .callback = video_detect_force_native, + .ident = "Clevo NL5xRU", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"), + DMI_MATCH(DMI_BOARD_NAME, "AURA1501"), + }, + }, + { + .callback = video_detect_force_native, + .ident = "Clevo NL5xRU", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"), + DMI_MATCH(DMI_BOARD_NAME, "EDUBOOK1502"), + }, + }, + { + .callback = video_detect_force_native, + .ident = "Clevo NL5xNU", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"), + DMI_MATCH(DMI_BOARD_NAME, "NL5xNU"), + }, + }, + { + .callback = video_detect_force_native, + .ident = "Clevo NL5xNU", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "SchenkerTechnologiesGmbH"), + DMI_MATCH(DMI_BOARD_NAME, "NL5xNU"), + }, + }, + { + .callback = video_detect_force_native, + .ident = "Clevo NL5xNU", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Notebook"), + DMI_MATCH(DMI_BOARD_NAME, "NL5xNU"), + }, + }, /* * Desktops which falsely report a backlight and which our heuristics diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c index 8f4ae6e967e3925538b6d747df2d6394b4a76633..47c72447ccd5948280b511db47faaa039921ff58 100644 --- a/drivers/amba/bus.c +++ b/drivers/amba/bus.c @@ -299,11 +299,10 @@ static int amba_remove(struct device *dev) { struct amba_device *pcdev = to_amba_device(dev); struct amba_driver *drv = to_amba_driver(dev->driver); - int ret = 0; pm_runtime_get_sync(dev); if (drv->remove) - ret = drv->remove(pcdev); + drv->remove(pcdev); pm_runtime_put_noidle(dev); /* Undo the runtime PM settings in amba_probe() */ @@ -314,7 +313,7 @@ static int amba_remove(struct device *dev) amba_put_disable_pclk(pcdev); dev_pm_domain_detach(dev, true); - return ret; + return 0; } static void amba_shutdown(struct device *dev) diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c index fad6c6a873130a5b4af26e117a36ba94ec9fe42f..fef46de2f6b23c29f2c8aba3d26850c1222d542e 100644 --- a/drivers/ata/pata_hpt37x.c +++ b/drivers/ata/pata_hpt37x.c @@ -917,6 +917,20 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id) irqmask &= ~0x10; pci_write_config_byte(dev, 0x5a, irqmask); + /* + * HPT371 chips physically have only one channel, the secondary one, + * but the primary channel registers do exist! Go figure... + * So, we manually disable the non-existing channel here + * (if the BIOS hasn't done this already). + */ + if (dev->device == PCI_DEVICE_ID_TTI_HPT371) { + u8 mcr1; + + pci_read_config_byte(dev, 0x50, &mcr1); + mcr1 &= ~0x04; + pci_write_config_byte(dev, 0x50, mcr1); + } + /* * default to pci clock. make sure MA15/16 are set to output * to prevent drives having problems with 40-pin cables. Needed @@ -948,14 +962,14 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id) if ((freq >> 12) != 0xABCDE) { int i; - u8 sr; + u16 sr; u32 total = 0; pr_warn("BIOS has not set timing clocks\n"); /* This is the process the HPT371 BIOS is reported to use */ for (i = 0; i < 128; i++) { - pci_read_config_byte(dev, 0x78, &sr); + pci_read_config_word(dev, 0x78, &sr); total += sr & 0x1FF; udelay(15); } diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c index b574cce98dc3686ce473b1f2cc132b4de8616ed2..9fcc49be499f18a3cdfcf607f79891a345e36111 100644 --- a/drivers/atm/eni.c +++ b/drivers/atm/eni.c @@ -1112,6 +1112,8 @@ DPRINTK("iovcnt = %d\n",skb_shinfo(skb)->nr_frags); skb_data3 = skb->data[3]; paddr = dma_map_single(&eni_dev->pci_dev->dev,skb->data,skb->len, DMA_TO_DEVICE); + if (dma_mapping_error(&eni_dev->pci_dev->dev, paddr)) + return enq_next; ENI_PRV_PADDR(skb) = paddr; /* prepare DMA queue entries */ j = 0; diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c index 0ddd611b427766f4f4d01eb17334a1e7ff13124b..43a34aee33b82059983637eb259b200ea46f3e64 100644 --- a/drivers/atm/firestream.c +++ b/drivers/atm/firestream.c @@ -1675,6 +1675,8 @@ static int fs_init(struct fs_dev *dev) dev->hw_base = pci_resource_start(pci_dev, 0); dev->base = ioremap(dev->hw_base, 0x1000); + if (!dev->base) + return 1; reset_chip (dev); diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index 8f1d6569564c4099deea31b1ebfd2371481ee36a..8ecb9f90f467b0ebfe2bd471c9cce3b6a1ddb96e 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c @@ -566,6 +566,12 @@ ssize_t __weak cpu_show_srbds(struct device *dev, return sysfs_emit(buf, "Not affected\n"); } +ssize_t __weak cpu_show_mmio_stale_data(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sysfs_emit(buf, "Not affected\n"); +} + static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL); static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL); static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL); @@ -575,6 +581,7 @@ static DEVICE_ATTR(mds, 0444, cpu_show_mds, NULL); static DEVICE_ATTR(tsx_async_abort, 0444, cpu_show_tsx_async_abort, NULL); static DEVICE_ATTR(itlb_multihit, 0444, cpu_show_itlb_multihit, NULL); static DEVICE_ATTR(srbds, 0444, cpu_show_srbds, NULL); +static DEVICE_ATTR(mmio_stale_data, 0444, cpu_show_mmio_stale_data, NULL); static struct attribute *cpu_root_vulnerabilities_attrs[] = { &dev_attr_meltdown.attr, @@ -586,6 +593,7 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = { &dev_attr_tsx_async_abort.attr, &dev_attr_itlb_multihit.attr, &dev_attr_srbds.attr, + &dev_attr_mmio_stale_data.attr, NULL }; diff --git a/drivers/base/dd.c b/drivers/base/dd.c index 81ad4f867f02d584eb08a34cd4a082c8e44ba34d..c72f6f5b3297ded6172e63edb59a8c9ce7d5f36a 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c @@ -592,6 +592,9 @@ static int really_probe(struct device *dev, struct device_driver *drv) drv->remove(dev); devres_release_all(dev); + arch_teardown_dma_ops(dev); + kfree(dev->dma_range_map); + dev->dma_range_map = NULL; driver_sysfs_remove(dev); dev->driver = NULL; dev_set_drvdata(dev, NULL); @@ -768,7 +771,7 @@ static int __init save_async_options(char *buf) pr_warn("Too long list of driver names for 'driver_async_probe'!\n"); strlcpy(async_probe_drv_names, buf, ASYNC_DRV_NAMES_MAX_LEN); - return 0; + return 1; } __setup("driver_async_probe=", save_async_options); @@ -894,6 +897,7 @@ static void __device_attach_async_helper(void *_dev, async_cookie_t cookie) static int __device_attach(struct device *dev, bool allow_async) { int ret = 0; + bool async = false; device_lock(dev); if (dev->p->dead) { @@ -932,7 +936,7 @@ static int __device_attach(struct device *dev, bool allow_async) */ dev_dbg(dev, "scheduling asynchronous probe\n"); get_device(dev); - async_schedule_dev(__device_attach_async_helper, dev); + async = true; } else { pm_request_idle(dev); } @@ -942,6 +946,8 @@ static int __device_attach(struct device *dev, bool allow_async) } out_unlock: device_unlock(dev); + if (async) + async_schedule_dev(__device_attach_async_helper, dev); return ret; } @@ -1056,6 +1062,7 @@ static int __driver_attach(struct device *dev, void *data) { struct device_driver *drv = data; int ret; + bool async = false; /* * Lock device and try to bind to it. We drop the error @@ -1092,9 +1099,11 @@ static int __driver_attach(struct device *dev, void *data) if (!dev->driver) { get_device(dev); dev->p->async_driver = drv; - async_schedule_dev(__driver_attach_async_helper, dev); + async = true; } device_unlock(dev); + if (async) + async_schedule_dev(__driver_attach_async_helper, dev); return 0; } @@ -1168,6 +1177,8 @@ static void __device_release_driver(struct device *dev, struct device *parent) devres_release_all(dev); arch_teardown_dma_ops(dev); + kfree(dev->dma_range_map); + dev->dma_range_map = NULL; dev->driver = NULL; dev_set_drvdata(dev, NULL); if (dev->pm_domain && dev->pm_domain->dismiss) diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 4167e2aef397519645b6be5c1cf42bd6f3571321..1dbaaddf540e162c9147fd6bd24b8f2de9a7b506 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -1994,7 +1994,9 @@ static bool pm_ops_is_empty(const struct dev_pm_ops *ops) void device_pm_check_callbacks(struct device *dev) { - spin_lock_irq(&dev->power.lock); + unsigned long flags; + + spin_lock_irqsave(&dev->power.lock, flags); dev->power.no_pm_callbacks = (!dev->bus || (pm_ops_is_empty(dev->bus->pm) && !dev->bus->suspend && !dev->bus->resume)) && @@ -2003,7 +2005,7 @@ void device_pm_check_callbacks(struct device *dev) (!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) && (!dev->driver || (pm_ops_is_empty(dev->driver->pm) && !dev->driver->suspend && !dev->driver->resume)); - spin_unlock_irq(&dev->power.lock); + spin_unlock_irqrestore(&dev->power.lock, flags); } bool dev_pm_skip_suspend(struct device *dev) diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c index ad5c2de395d1f11690e4b6dd03fc435565a51a85..87c5c421e0f461a307c484821243f82aef75a1b0 100644 --- a/drivers/base/regmap/regmap-irq.c +++ b/drivers/base/regmap/regmap-irq.c @@ -170,11 +170,9 @@ static void regmap_irq_sync_unlock(struct irq_data *data) ret = regmap_write(map, reg, d->mask_buf[i]); if (d->chip->clear_ack) { if (d->chip->ack_invert && !ret) - ret = regmap_write(map, reg, - d->mask_buf[i]); + ret = regmap_write(map, reg, UINT_MAX); else if (!ret) - ret = regmap_write(map, reg, - ~d->mask_buf[i]); + ret = regmap_write(map, reg, 0); } if (ret != 0) dev_err(d->map->dev, "Failed to ack 0x%x: %d\n", @@ -509,11 +507,9 @@ static irqreturn_t regmap_irq_thread(int irq, void *d) data->status_buf[i]); if (chip->clear_ack) { if (chip->ack_invert && !ret) - ret = regmap_write(map, reg, - data->status_buf[i]); + ret = regmap_write(map, reg, UINT_MAX); else if (!ret) - ret = regmap_write(map, reg, - ~data->status_buf[i]); + ret = regmap_write(map, reg, 0); } if (ret != 0) dev_err(map->dev, "Failed to ack 0x%x: %d\n", @@ -745,13 +741,9 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode, d->status_buf[i] & d->mask_buf[i]); if (chip->clear_ack) { if (chip->ack_invert && !ret) - ret = regmap_write(map, reg, - (d->status_buf[i] & - d->mask_buf[i])); + ret = regmap_write(map, reg, UINT_MAX); else if (!ret) - ret = regmap_write(map, reg, - ~(d->status_buf[i] & - d->mask_buf[i])); + ret = regmap_write(map, reg, 0); } if (ret != 0) { dev_err(map->dev, "Failed to ack 0x%x: %d\n", diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig index f2548049aa0e92137f2f59728d517ffdc6c53cc2..40c53632512b764eee3b720a973b55930096e06c 100644 --- a/drivers/block/Kconfig +++ b/drivers/block/Kconfig @@ -39,6 +39,22 @@ config BLK_DEV_FD To compile this driver as a module, choose M here: the module will be called floppy. +config BLK_DEV_FD_RAWCMD + bool "Support for raw floppy disk commands (DEPRECATED)" + depends on BLK_DEV_FD + help + If you want to use actual physical floppies and expect to do + special low-level hardware accesses to them (access and use + non-standard formats, for example), then enable this. + + Note that the code enabled by this option is rarely used and + might be unstable or insecure, and distros should not enable it. + + Note: FDRAWCMD is deprecated and will be removed from the kernel + in the near future. + + If unsure, say N. + config AMIGA_FLOPPY tristate "Amiga floppy support" depends on AMIGA diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c index 330f851cb8f0b0acccb15160f7a8f9cc64f723b8..69638146f949cfa339e6c428de6d8af1b0dd1a12 100644 --- a/drivers/block/drbd/drbd_req.c +++ b/drivers/block/drbd/drbd_req.c @@ -177,7 +177,8 @@ void start_new_tl_epoch(struct drbd_connection *connection) void complete_master_bio(struct drbd_device *device, struct bio_and_error *m) { - m->bio->bi_status = errno_to_blk_status(m->error); + if (unlikely(m->error)) + m->bio->bi_status = errno_to_blk_status(m->error); bio_endio(m->bio); dec_ap_bio(device); } diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index 8fc23f5026f0640cbb3bac58a824f453ad5a714c..4ef407a33996aaf95e8297374e623fae8c81ba28 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c @@ -3067,6 +3067,8 @@ static const char *drive_name(int type, int drive) return "(null)"; } +#ifdef CONFIG_BLK_DEV_FD_RAWCMD + /* raw commands */ static void raw_cmd_done(int flag) { @@ -3271,6 +3273,35 @@ static int raw_cmd_ioctl(int cmd, void __user *param) return ret; } +static int floppy_raw_cmd_ioctl(int type, int drive, int cmd, + void __user *param) +{ + int ret; + + pr_warn_once("Note: FDRAWCMD is deprecated and will be removed from the kernel in the near future.\n"); + + if (type) + return -EINVAL; + if (lock_fdc(drive)) + return -EINTR; + set_floppy(drive); + ret = raw_cmd_ioctl(cmd, param); + if (ret == -EINTR) + return -EINTR; + process_fd_request(); + return ret; +} + +#else /* CONFIG_BLK_DEV_FD_RAWCMD */ + +static int floppy_raw_cmd_ioctl(int type, int drive, int cmd, + void __user *param) +{ + return -EOPNOTSUPP; +} + +#endif + static int invalidate_drive(struct block_device *bdev) { /* invalidate the buffer track to force a reread */ @@ -3459,7 +3490,6 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int { int drive = (long)bdev->bd_disk->private_data; int type = ITYPE(drive_state[drive].fd_device); - int i; int ret; int size; union inparam { @@ -3610,16 +3640,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int outparam = &write_errors[drive]; break; case FDRAWCMD: - if (type) - return -EINVAL; - if (lock_fdc(drive)) - return -EINTR; - set_floppy(drive); - i = raw_cmd_ioctl(cmd, (void __user *)param); - if (i == -EINTR) - return -EINTR; - process_fd_request(); - return i; + return floppy_raw_cmd_ioctl(type, drive, cmd, (void __user *)param); case FDTWADDLE: if (lock_fdc(drive)) return -EINTR; diff --git a/drivers/block/loop.c b/drivers/block/loop.c index e354faf7c9e6fca8430bc47d25f2fe93b973a521..9ee97e5933471b6dfec315529e38a38e0b5d33ce 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -797,33 +797,33 @@ static ssize_t loop_attr_backing_file_show(struct loop_device *lo, char *buf) static ssize_t loop_attr_offset_show(struct loop_device *lo, char *buf) { - return sprintf(buf, "%llu\n", (unsigned long long)lo->lo_offset); + return sysfs_emit(buf, "%llu\n", (unsigned long long)lo->lo_offset); } static ssize_t loop_attr_sizelimit_show(struct loop_device *lo, char *buf) { - return sprintf(buf, "%llu\n", (unsigned long long)lo->lo_sizelimit); + return sysfs_emit(buf, "%llu\n", (unsigned long long)lo->lo_sizelimit); } static ssize_t loop_attr_autoclear_show(struct loop_device *lo, char *buf) { int autoclear = (lo->lo_flags & LO_FLAGS_AUTOCLEAR); - return sprintf(buf, "%s\n", autoclear ? "1" : "0"); + return sysfs_emit(buf, "%s\n", autoclear ? "1" : "0"); } static ssize_t loop_attr_partscan_show(struct loop_device *lo, char *buf) { int partscan = (lo->lo_flags & LO_FLAGS_PARTSCAN); - return sprintf(buf, "%s\n", partscan ? "1" : "0"); + return sysfs_emit(buf, "%s\n", partscan ? "1" : "0"); } static ssize_t loop_attr_dio_show(struct loop_device *lo, char *buf) { int dio = (lo->lo_flags & LO_FLAGS_DIRECT_IO); - return sprintf(buf, "%s\n", dio ? "1" : "0"); + return sysfs_emit(buf, "%s\n", dio ? "1" : "0"); } LOOP_ATTR_RO(backing_file); diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 42acf9587ef38947a68485dd884f5c2507b6cc45..02e2056780ad2287f3cbb6b42514fa5cc9ebf7b3 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -825,9 +825,17 @@ static int virtblk_probe(struct virtio_device *vdev) err = virtio_cread_feature(vdev, VIRTIO_BLK_F_BLK_SIZE, struct virtio_blk_config, blk_size, &blk_size); - if (!err) + if (!err) { + err = blk_validate_block_size(blk_size); + if (err) { + dev_err(&vdev->dev, + "virtio_blk: invalid block size: 0x%x\n", + blk_size); + goto out_free_tags; + } + blk_queue_logical_block_size(q, blk_size); - else + } else blk_size = queue_logical_block_size(q); /* Use topology information if available */ @@ -869,9 +877,15 @@ static int virtblk_probe(struct virtio_device *vdev) virtio_cread(vdev, struct virtio_blk_config, max_discard_seg, &v); + + /* + * max_discard_seg == 0 is out of spec but we always + * handled it. + */ + if (!v) + v = sg_elems - 2; blk_queue_max_discard_segments(q, - min_not_zero(v, - MAX_DISCARD_SEGMENTS)); + min(v, MAX_DISCARD_SEGMENTS)); blk_queue_flag_set(QUEUE_FLAG_DISCARD, q); } diff --git a/drivers/bluetooth/btmtksdio.c b/drivers/bluetooth/btmtksdio.c index 74856a58621622dae4582f05a4914dd7bdfe3f05..c41560be39fb6bcacb6a58a8a7b7e789b90c40d6 100644 --- a/drivers/bluetooth/btmtksdio.c +++ b/drivers/bluetooth/btmtksdio.c @@ -981,6 +981,8 @@ static int btmtksdio_probe(struct sdio_func *func, hdev->manufacturer = 70; set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks); + sdio_set_drvdata(func, bdev); + err = hci_register_dev(hdev); if (err < 0) { dev_err(&func->dev, "Can't register HCI device\n"); @@ -988,8 +990,6 @@ static int btmtksdio_probe(struct sdio_func *func, return err; } - sdio_set_drvdata(func, bdev); - /* pm_runtime_enable would be done after the firmware is being * downloaded because the core layer probably already enables * runtime PM for this func such as the case host->caps & diff --git a/drivers/bluetooth/hci_serdev.c b/drivers/bluetooth/hci_serdev.c index 9e03402ef1b378c2f613df7697f1708beba1f9b9..e9a44ab3812df06e1714cb237eb15d4b72f3b5bd 100644 --- a/drivers/bluetooth/hci_serdev.c +++ b/drivers/bluetooth/hci_serdev.c @@ -305,6 +305,8 @@ int hci_uart_register_device(struct hci_uart *hu, if (err) return err; + percpu_init_rwsem(&hu->proto_lock); + err = p->open(hu); if (err) goto err_open; @@ -327,7 +329,6 @@ int hci_uart_register_device(struct hci_uart *hu, INIT_WORK(&hu->init_ready, hci_uart_init_work); INIT_WORK(&hu->write_work, hci_uart_write_work); - percpu_init_rwsem(&hu->proto_lock); /* Only when vendor specific setup callback is provided, consider * the manufacturer information valid. This avoids filling in the diff --git a/drivers/bus/mips_cdmm.c b/drivers/bus/mips_cdmm.c index 626dedd110cbc2463b67cf48599242740a037e21..fca0d0669aa97e78168b20aa503a60fa9cdf72d1 100644 --- a/drivers/bus/mips_cdmm.c +++ b/drivers/bus/mips_cdmm.c @@ -351,6 +351,7 @@ phys_addr_t __weak mips_cdmm_phys_base(void) np = of_find_compatible_node(NULL, NULL, "mti,mips-cdmm"); if (np) { err = of_address_to_resource(np, 0, &res); + of_node_put(np); if (!err) return res.start; } diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig index dda4a9dfad2e8f609170d2d6dd006ab1676030f8..87cebb34aeccdad84d581343250dd4ea57cc2e57 100644 --- a/drivers/char/hw_random/Kconfig +++ b/drivers/char/hw_random/Kconfig @@ -427,7 +427,7 @@ config HW_RANDOM_MESON config HW_RANDOM_CAVIUM tristate "Cavium ThunderX Random Number Generator support" - depends on HW_RANDOM && PCI && (ARM64 || (COMPILE_TEST && 64BIT)) + depends on HW_RANDOM && PCI && ARCH_THUNDER default HW_RANDOM help This driver provides kernel-side support for the Random Number diff --git a/drivers/char/hw_random/atmel-rng.c b/drivers/char/hw_random/atmel-rng.c index ecb71c4317a503a8772fb078cfec2950ce3a39d8..8cf0ef501341eafbdeb6ae31d95bc2327f7a0532 100644 --- a/drivers/char/hw_random/atmel-rng.c +++ b/drivers/char/hw_random/atmel-rng.c @@ -114,6 +114,7 @@ static int atmel_trng_probe(struct platform_device *pdev) err_register: clk_disable_unprepare(trng->clk); + atmel_trng_disable(trng); return ret; } diff --git a/drivers/char/hw_random/cavium-rng-vf.c b/drivers/char/hw_random/cavium-rng-vf.c index 3de4a6a443ef98ac3369228593147afd7cd1b862..6f66919652bf571b25a8c2762de0fe12398aab67 100644 --- a/drivers/char/hw_random/cavium-rng-vf.c +++ b/drivers/char/hw_random/cavium-rng-vf.c @@ -1,10 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0 /* - * Hardware Random Number Generator support for Cavium, Inc. - * Thunder processor family. - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. + * Hardware Random Number Generator support. + * Cavium Thunder, Marvell OcteonTx/Tx2 processor families. * * Copyright (C) 2016 Cavium, Inc. */ @@ -15,16 +12,146 @@ #include #include +#include + +/* PCI device IDs */ +#define PCI_DEVID_CAVIUM_RNG_PF 0xA018 +#define PCI_DEVID_CAVIUM_RNG_VF 0xA033 + +#define HEALTH_STATUS_REG 0x38 + +/* RST device info */ +#define PCI_DEVICE_ID_RST_OTX2 0xA085 +#define RST_BOOT_REG 0x1600ULL +#define CLOCK_BASE_RATE 50000000ULL +#define MSEC_TO_NSEC(x) (x * 1000000) + struct cavium_rng { struct hwrng ops; void __iomem *result; + void __iomem *pf_regbase; + struct pci_dev *pdev; + u64 clock_rate; + u64 prev_error; + u64 prev_time; }; +static inline bool is_octeontx(struct pci_dev *pdev) +{ + if (midr_is_cpu_model_range(read_cpuid_id(), MIDR_THUNDERX_83XX, + MIDR_CPU_VAR_REV(0, 0), + MIDR_CPU_VAR_REV(3, 0)) || + midr_is_cpu_model_range(read_cpuid_id(), MIDR_THUNDERX_81XX, + MIDR_CPU_VAR_REV(0, 0), + MIDR_CPU_VAR_REV(3, 0)) || + midr_is_cpu_model_range(read_cpuid_id(), MIDR_THUNDERX, + MIDR_CPU_VAR_REV(0, 0), + MIDR_CPU_VAR_REV(3, 0))) + return true; + + return false; +} + +static u64 rng_get_coprocessor_clkrate(void) +{ + u64 ret = CLOCK_BASE_RATE * 16; /* Assume 800Mhz as default */ + struct pci_dev *pdev; + void __iomem *base; + + pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, + PCI_DEVICE_ID_RST_OTX2, NULL); + if (!pdev) + goto error; + + base = pci_ioremap_bar(pdev, 0); + if (!base) + goto error_put_pdev; + + /* RST: PNR_MUL * 50Mhz gives clockrate */ + ret = CLOCK_BASE_RATE * ((readq(base + RST_BOOT_REG) >> 33) & 0x3F); + + iounmap(base); + +error_put_pdev: + pci_dev_put(pdev); + +error: + return ret; +} + +static int check_rng_health(struct cavium_rng *rng) +{ + u64 cur_err, cur_time; + u64 status, cycles; + u64 time_elapsed; + + + /* Skip checking health for OcteonTx */ + if (!rng->pf_regbase) + return 0; + + status = readq(rng->pf_regbase + HEALTH_STATUS_REG); + if (status & BIT_ULL(0)) { + dev_err(&rng->pdev->dev, "HWRNG: Startup health test failed\n"); + return -EIO; + } + + cycles = status >> 1; + if (!cycles) + return 0; + + cur_time = arch_timer_read_counter(); + + /* RNM_HEALTH_STATUS[CYCLES_SINCE_HEALTH_FAILURE] + * Number of coprocessor cycles times 2 since the last failure. + * This field doesn't get cleared/updated until another failure. + */ + cycles = cycles / 2; + cur_err = (cycles * 1000000000) / rng->clock_rate; /* In nanosec */ + + /* Ignore errors that happenned a long time ago, these + * are most likely false positive errors. + */ + if (cur_err > MSEC_TO_NSEC(10)) { + rng->prev_error = 0; + rng->prev_time = 0; + return 0; + } + + if (rng->prev_error) { + /* Calculate time elapsed since last error + * '1' tick of CNTVCT is 10ns, since it runs at 100Mhz. + */ + time_elapsed = (cur_time - rng->prev_time) * 10; + time_elapsed += rng->prev_error; + + /* Check if current error is a new one or the old one itself. + * If error is a new one then consider there is a persistent + * issue with entropy, declare hardware failure. + */ + if (cur_err < time_elapsed) { + dev_err(&rng->pdev->dev, "HWRNG failure detected\n"); + rng->prev_error = cur_err; + rng->prev_time = cur_time; + return -EIO; + } + } + + rng->prev_error = cur_err; + rng->prev_time = cur_time; + return 0; +} + /* Read data from the RNG unit */ static int cavium_rng_read(struct hwrng *rng, void *dat, size_t max, bool wait) { struct cavium_rng *p = container_of(rng, struct cavium_rng, ops); unsigned int size = max; + int err = 0; + + err = check_rng_health(p); + if (err) + return err; while (size >= 8) { *((u64 *)dat) = readq(p->result); @@ -39,6 +166,39 @@ static int cavium_rng_read(struct hwrng *rng, void *dat, size_t max, bool wait) return max; } +static int cavium_map_pf_regs(struct cavium_rng *rng) +{ + struct pci_dev *pdev; + + /* Health status is not supported on 83xx, skip mapping PF CSRs */ + if (is_octeontx(rng->pdev)) { + rng->pf_regbase = NULL; + return 0; + } + + pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, + PCI_DEVID_CAVIUM_RNG_PF, NULL); + if (!pdev) { + dev_err(&pdev->dev, "Cannot find RNG PF device\n"); + return -EIO; + } + + rng->pf_regbase = ioremap(pci_resource_start(pdev, 0), + pci_resource_len(pdev, 0)); + if (!rng->pf_regbase) { + dev_err(&pdev->dev, "Failed to map PF CSR region\n"); + pci_dev_put(pdev); + return -ENOMEM; + } + + pci_dev_put(pdev); + + /* Get co-processor clock rate */ + rng->clock_rate = rng_get_coprocessor_clkrate(); + + return 0; +} + /* Map Cavium RNG to an HWRNG object */ static int cavium_rng_probe_vf(struct pci_dev *pdev, const struct pci_device_id *id) @@ -50,6 +210,8 @@ static int cavium_rng_probe_vf(struct pci_dev *pdev, if (!rng) return -ENOMEM; + rng->pdev = pdev; + /* Map the RNG result */ rng->result = pcim_iomap(pdev, 0, 0); if (!rng->result) { @@ -67,6 +229,11 @@ static int cavium_rng_probe_vf(struct pci_dev *pdev, pci_set_drvdata(pdev, rng); + /* Health status is available only at PF, hence map PF registers. */ + ret = cavium_map_pf_regs(rng); + if (ret) + return ret; + ret = devm_hwrng_register(&pdev->dev, &rng->ops); if (ret) { dev_err(&pdev->dev, "Error registering device as HWRNG.\n"); @@ -76,10 +243,18 @@ static int cavium_rng_probe_vf(struct pci_dev *pdev, return 0; } +/* Remove the VF */ +static void cavium_rng_remove_vf(struct pci_dev *pdev) +{ + struct cavium_rng *rng; + + rng = pci_get_drvdata(pdev); + iounmap(rng->pf_regbase); +} static const struct pci_device_id cavium_rng_vf_id_table[] = { - { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, 0xa033), 0, 0, 0}, - {0,}, + { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CAVIUM_RNG_VF) }, + { 0, } }; MODULE_DEVICE_TABLE(pci, cavium_rng_vf_id_table); @@ -87,8 +262,9 @@ static struct pci_driver cavium_rng_vf_driver = { .name = "cavium_rng_vf", .id_table = cavium_rng_vf_id_table, .probe = cavium_rng_probe_vf, + .remove = cavium_rng_remove_vf, }; module_pci_driver(cavium_rng_vf_driver); MODULE_AUTHOR("Omer Khaliq "); -MODULE_LICENSE("GPL"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/char/hw_random/cavium-rng.c b/drivers/char/hw_random/cavium-rng.c index 63d6e68c24d2fc979f54345c2f71ad2e58b36236..b96579222408ba0bd24c4cdf2f063aa1bedf7514 100644 --- a/drivers/char/hw_random/cavium-rng.c +++ b/drivers/char/hw_random/cavium-rng.c @@ -1,10 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0 /* - * Hardware Random Number Generator support for Cavium Inc. - * Thunder processor family. - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. + * Hardware Random Number Generator support. + * Cavium Thunder, Marvell OcteonTx/Tx2 processor families. * * Copyright (C) 2016 Cavium, Inc. */ @@ -91,4 +88,4 @@ static struct pci_driver cavium_rng_pf_driver = { module_pci_driver(cavium_rng_pf_driver); MODULE_AUTHOR("Omer Khaliq "); -MODULE_LICENSE("GPL"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/char/hw_random/nomadik-rng.c b/drivers/char/hw_random/nomadik-rng.c index b0ded41eb865f56483c2fa80dbf7361832680b3c..e8f9621e795410a21fdc6760e8c7b949c67172e7 100644 --- a/drivers/char/hw_random/nomadik-rng.c +++ b/drivers/char/hw_random/nomadik-rng.c @@ -65,15 +65,14 @@ static int nmk_rng_probe(struct amba_device *dev, const struct amba_id *id) out_release: amba_release_regions(dev); out_clk: - clk_disable(rng_clk); + clk_disable_unprepare(rng_clk); return ret; } -static int nmk_rng_remove(struct amba_device *dev) +static void nmk_rng_remove(struct amba_device *dev) { amba_release_regions(dev); - clk_disable(rng_clk); - return 0; + clk_disable_unprepare(rng_clk); } static const struct amba_id nmk_rng_ids[] = { diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c index 92eda5b2f1341ba46053a67c715b6ecc67be7516..883b4a3410122b84a143e6c73d517597663f5d7d 100644 --- a/drivers/char/ipmi/ipmi_watchdog.c +++ b/drivers/char/ipmi/ipmi_watchdog.c @@ -503,7 +503,7 @@ static void panic_halt_ipmi_heartbeat(void) msg.cmd = IPMI_WDOG_RESET_TIMER; msg.data = NULL; msg.data_len = 0; - atomic_add(1, &panic_done_count); + atomic_add(2, &panic_done_count); rv = ipmi_request_supply_msgs(watchdog_user, (struct ipmi_addr *) &addr, 0, @@ -513,7 +513,7 @@ static void panic_halt_ipmi_heartbeat(void) &panic_halt_heartbeat_recv_msg, 1); if (rv) - atomic_sub(1, &panic_done_count); + atomic_sub(2, &panic_done_count); } static struct ipmi_smi_msg panic_halt_smi_msg = { @@ -537,12 +537,12 @@ static void panic_halt_ipmi_set_timeout(void) /* Wait for the messages to be free. */ while (atomic_read(&panic_done_count) != 0) ipmi_poll_interface(watchdog_user); - atomic_add(1, &panic_done_count); + atomic_add(2, &panic_done_count); rv = __ipmi_set_timeout(&panic_halt_smi_msg, &panic_halt_recv_msg, &send_heartbeat_now); if (rv) { - atomic_sub(1, &panic_done_count); + atomic_sub(2, &panic_done_count); pr_warn("Unable to extend the watchdog timeout\n"); } else { if (send_heartbeat_now) diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c index ddaeceb7e109105297f6baf53d74138911246fed..ed600473ad7e3e63d4d27a7e634bb96dc59320fa 100644 --- a/drivers/char/tpm/tpm-chip.c +++ b/drivers/char/tpm/tpm-chip.c @@ -274,14 +274,6 @@ static void tpm_dev_release(struct device *dev) kfree(chip); } -static void tpm_devs_release(struct device *dev) -{ - struct tpm_chip *chip = container_of(dev, struct tpm_chip, devs); - - /* release the master device reference */ - put_device(&chip->dev); -} - /** * tpm_class_shutdown() - prepare the TPM device for loss of power. * @dev: device to which the chip is associated. @@ -344,7 +336,6 @@ struct tpm_chip *tpm_chip_alloc(struct device *pdev, chip->dev_num = rc; device_initialize(&chip->dev); - device_initialize(&chip->devs); chip->dev.class = tpm_class; chip->dev.class->shutdown_pre = tpm_class_shutdown; @@ -352,29 +343,12 @@ struct tpm_chip *tpm_chip_alloc(struct device *pdev, chip->dev.parent = pdev; chip->dev.groups = chip->groups; - chip->devs.parent = pdev; - chip->devs.class = tpmrm_class; - chip->devs.release = tpm_devs_release; - /* get extra reference on main device to hold on - * behalf of devs. This holds the chip structure - * while cdevs is in use. The corresponding put - * is in the tpm_devs_release (TPM2 only) - */ - if (chip->flags & TPM_CHIP_FLAG_TPM2) - get_device(&chip->dev); - if (chip->dev_num == 0) chip->dev.devt = MKDEV(MISC_MAJOR, TPM_MINOR); else chip->dev.devt = MKDEV(MAJOR(tpm_devt), chip->dev_num); - chip->devs.devt = - MKDEV(MAJOR(tpm_devt), chip->dev_num + TPM_NUM_DEVICES); - rc = dev_set_name(&chip->dev, "tpm%d", chip->dev_num); - if (rc) - goto out; - rc = dev_set_name(&chip->devs, "tpmrm%d", chip->dev_num); if (rc) goto out; @@ -382,9 +356,7 @@ struct tpm_chip *tpm_chip_alloc(struct device *pdev, chip->flags |= TPM_CHIP_FLAG_VIRTUAL; cdev_init(&chip->cdev, &tpm_fops); - cdev_init(&chip->cdevs, &tpmrm_fops); chip->cdev.owner = THIS_MODULE; - chip->cdevs.owner = THIS_MODULE; rc = tpm2_init_space(&chip->work_space, TPM2_SPACE_BUFFER_SIZE); if (rc) { @@ -396,7 +368,6 @@ struct tpm_chip *tpm_chip_alloc(struct device *pdev, return chip; out: - put_device(&chip->devs); put_device(&chip->dev); return ERR_PTR(rc); } @@ -445,14 +416,9 @@ static int tpm_add_char_device(struct tpm_chip *chip) } if (chip->flags & TPM_CHIP_FLAG_TPM2) { - rc = cdev_device_add(&chip->cdevs, &chip->devs); - if (rc) { - dev_err(&chip->devs, - "unable to cdev_device_add() %s, major %d, minor %d, err=%d\n", - dev_name(&chip->devs), MAJOR(chip->devs.devt), - MINOR(chip->devs.devt), rc); - return rc; - } + rc = tpm_devs_add(chip); + if (rc) + goto err_del_cdev; } /* Make the chip available. */ @@ -460,6 +426,10 @@ static int tpm_add_char_device(struct tpm_chip *chip) idr_replace(&dev_nums_idr, chip, chip->dev_num); mutex_unlock(&idr_lock); + return 0; + +err_del_cdev: + cdev_device_del(&chip->cdev, &chip->dev); return rc; } @@ -641,7 +611,7 @@ void tpm_chip_unregister(struct tpm_chip *chip) hwrng_unregister(&chip->hwrng); tpm_bios_log_teardown(chip); if (chip->flags & TPM_CHIP_FLAG_TPM2) - cdev_device_del(&chip->cdevs, &chip->devs); + tpm_devs_remove(chip); tpm_del_char_device(chip); } EXPORT_SYMBOL_GPL(tpm_chip_unregister); diff --git a/drivers/char/tpm/tpm-dev-common.c b/drivers/char/tpm/tpm-dev-common.c index 1784530b8387bb46bec8694a49ddb5385833be80..b99e1941c52c98359863ed1bfb9c785ed22ceabe 100644 --- a/drivers/char/tpm/tpm-dev-common.c +++ b/drivers/char/tpm/tpm-dev-common.c @@ -70,7 +70,13 @@ static void tpm_dev_async_work(struct work_struct *work) ret = tpm_dev_transmit(priv->chip, priv->space, priv->data_buffer, sizeof(priv->data_buffer)); tpm_put_ops(priv->chip); - if (ret > 0) { + + /* + * If ret is > 0 then tpm_dev_transmit returned the size of the + * response. If ret is < 0 then tpm_dev_transmit failed and + * returned an error code. + */ + if (ret != 0) { priv->response_length = ret; mod_timer(&priv->user_read_timer, jiffies + (120 * HZ)); } diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h index 283f78211c3a7bc09092085d12834c48571f93f3..2163c6ee0d364f3f8ee935e5abae8f547b4a886f 100644 --- a/drivers/char/tpm/tpm.h +++ b/drivers/char/tpm/tpm.h @@ -234,6 +234,8 @@ int tpm2_prepare_space(struct tpm_chip *chip, struct tpm_space *space, u8 *cmd, size_t cmdsiz); int tpm2_commit_space(struct tpm_chip *chip, struct tpm_space *space, void *buf, size_t *bufsiz); +int tpm_devs_add(struct tpm_chip *chip); +void tpm_devs_remove(struct tpm_chip *chip); void tpm_bios_log_setup(struct tpm_chip *chip); void tpm_bios_log_teardown(struct tpm_chip *chip); diff --git a/drivers/char/tpm/tpm2-space.c b/drivers/char/tpm/tpm2-space.c index 97e916856cf3e25445c94256dbe94569f5e84524..ffb35f0154c16c463082962426dcc7bdaa4c3a38 100644 --- a/drivers/char/tpm/tpm2-space.c +++ b/drivers/char/tpm/tpm2-space.c @@ -58,12 +58,12 @@ int tpm2_init_space(struct tpm_space *space, unsigned int buf_size) void tpm2_del_space(struct tpm_chip *chip, struct tpm_space *space) { - mutex_lock(&chip->tpm_mutex); - if (!tpm_chip_start(chip)) { + + if (tpm_try_get_ops(chip) == 0) { tpm2_flush_sessions(chip, space); - tpm_chip_stop(chip); + tpm_put_ops(chip); } - mutex_unlock(&chip->tpm_mutex); + kfree(space->context_buf); kfree(space->session_buf); } @@ -574,3 +574,68 @@ int tpm2_commit_space(struct tpm_chip *chip, struct tpm_space *space, dev_err(&chip->dev, "%s: error %d\n", __func__, rc); return rc; } + +/* + * Put the reference to the main device. + */ +static void tpm_devs_release(struct device *dev) +{ + struct tpm_chip *chip = container_of(dev, struct tpm_chip, devs); + + /* release the master device reference */ + put_device(&chip->dev); +} + +/* + * Remove the device file for exposed TPM spaces and release the device + * reference. This may also release the reference to the master device. + */ +void tpm_devs_remove(struct tpm_chip *chip) +{ + cdev_device_del(&chip->cdevs, &chip->devs); + put_device(&chip->devs); +} + +/* + * Add a device file to expose TPM spaces. Also take a reference to the + * main device. + */ +int tpm_devs_add(struct tpm_chip *chip) +{ + int rc; + + device_initialize(&chip->devs); + chip->devs.parent = chip->dev.parent; + chip->devs.class = tpmrm_class; + + /* + * Get extra reference on main device to hold on behalf of devs. + * This holds the chip structure while cdevs is in use. The + * corresponding put is in the tpm_devs_release. + */ + get_device(&chip->dev); + chip->devs.release = tpm_devs_release; + chip->devs.devt = MKDEV(MAJOR(tpm_devt), chip->dev_num + TPM_NUM_DEVICES); + cdev_init(&chip->cdevs, &tpmrm_fops); + chip->cdevs.owner = THIS_MODULE; + + rc = dev_set_name(&chip->devs, "tpmrm%d", chip->dev_num); + if (rc) + goto err_put_devs; + + rc = cdev_device_add(&chip->cdevs, &chip->devs); + if (rc) { + dev_err(&chip->devs, + "unable to cdev_device_add() %s, major %d, minor %d, err=%d\n", + dev_name(&chip->devs), MAJOR(chip->devs.devt), + MINOR(chip->devs.devt), rc); + goto err_put_devs; + } + + return 0; + +err_put_devs: + put_device(&chip->devs); + + return rc; +} diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index 673522874cec43e6e502b83c9795978671c0ef96..3dd4deb60adbf046c88790d591ef5b0115689f48 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c @@ -1959,6 +1959,13 @@ static void virtcons_remove(struct virtio_device *vdev) list_del(&portdev->list); spin_unlock_irq(&pdrvdata_lock); + /* Device is going away, exit any polling for buffers */ + virtio_break_device(vdev); + if (use_multiport(portdev)) + flush_work(&portdev->control_work); + else + flush_work(&portdev->config_work); + /* Disable interrupts for vqs */ vdev->config->reset(vdev); /* Finish up work that's lined up */ diff --git a/drivers/clk/actions/owl-s700.c b/drivers/clk/actions/owl-s700.c index a2f34d13fb54304357eed076ceb11e31863e76d2..6ea7da1d6d755e25c191fc91d5e2780343c08680 100644 --- a/drivers/clk/actions/owl-s700.c +++ b/drivers/clk/actions/owl-s700.c @@ -162,6 +162,7 @@ static struct clk_div_table hdmia_div_table[] = { static struct clk_div_table rmii_div_table[] = { {0, 4}, {1, 10}, + {0, 0} }; /* divider clocks */ diff --git a/drivers/clk/actions/owl-s900.c b/drivers/clk/actions/owl-s900.c index 790890978424a241c8229c7592c2843b1fdb694d..5144ada2c7e1a46ac2617e0cd1af7f9dcc1c37c7 100644 --- a/drivers/clk/actions/owl-s900.c +++ b/drivers/clk/actions/owl-s900.c @@ -140,7 +140,7 @@ static struct clk_div_table rmii_ref_div_table[] = { static struct clk_div_table usb3_mac_div_table[] = { { 1, 2 }, { 2, 3 }, { 3, 4 }, - { 0, 8 }, + { 0, 0 } }; static struct clk_div_table i2s_div_table[] = { diff --git a/drivers/clk/at91/sama7g5.c b/drivers/clk/at91/sama7g5.c index a092a940baa40bc857ff5f26d3bd1d07c4f06ec8..9d25b23fb99d7f8c706aa4d36e9b18afca2bb3d1 100644 --- a/drivers/clk/at91/sama7g5.c +++ b/drivers/clk/at91/sama7g5.c @@ -606,16 +606,16 @@ static const struct { { .n = "pdmc0_gclk", .id = 68, .r = { .max = 50000000 }, - .pp = { "syspll_divpmcck", "baudpll_divpmcck", }, - .pp_mux_table = { 5, 8, }, + .pp = { "syspll_divpmcck", "audiopll_divpmcck", }, + .pp_mux_table = { 5, 9, }, .pp_count = 2, .pp_chg_id = INT_MIN, }, { .n = "pdmc1_gclk", .id = 69, .r = { .max = 50000000, }, - .pp = { "syspll_divpmcck", "baudpll_divpmcck", }, - .pp_mux_table = { 5, 8, }, + .pp = { "syspll_divpmcck", "audiopll_divpmcck", }, + .pp_mux_table = { 5, 9, }, .pp_count = 2, .pp_chg_id = INT_MIN, }, diff --git a/drivers/clk/clk-clps711x.c b/drivers/clk/clk-clps711x.c index a2c6486ef1708bb387fd88b33e8bbd1fd04c730d..f8417ee2961aaabc9c46e9007716d83b25a3c7bc 100644 --- a/drivers/clk/clk-clps711x.c +++ b/drivers/clk/clk-clps711x.c @@ -28,11 +28,13 @@ static const struct clk_div_table spi_div_table[] = { { .val = 1, .div = 8, }, { .val = 2, .div = 2, }, { .val = 3, .div = 1, }, + { /* sentinel */ } }; static const struct clk_div_table timer_div_table[] = { { .val = 0, .div = 256, }, { .val = 1, .div = 1, }, + { /* sentinel */ } }; struct clps711x_clk { diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index b8a0e3d23698c259971ebb1366b3f76fe51b9caa..92fc084203b757cef465389105c1dbb4aa88b30b 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -3384,6 +3384,19 @@ static void clk_core_reparent_orphans_nolock(void) __clk_set_parent_after(orphan, parent, NULL); __clk_recalc_accuracies(orphan); __clk_recalc_rates(orphan, 0); + + /* + * __clk_init_parent() will set the initial req_rate to + * 0 if the clock doesn't have clk_ops::recalc_rate and + * is an orphan when it's registered. + * + * 'req_rate' is used by clk_set_rate_range() and + * clk_put() to trigger a clk_set_rate() call whenever + * the boundaries are modified. Let's make sure + * 'req_rate' is set to something non-zero so that + * clk_set_rate_range() doesn't drop the frequency. + */ + orphan->req_rate = orphan->rate; } } } diff --git a/drivers/clk/imx/clk-imx7d.c b/drivers/clk/imx/clk-imx7d.c index c4e0f1c07192f2d8ccd9c059940c104fe6205291..3f6fd7ef2a68fcca4c38de27afcbfaafa61af218 100644 --- a/drivers/clk/imx/clk-imx7d.c +++ b/drivers/clk/imx/clk-imx7d.c @@ -849,7 +849,6 @@ static void __init imx7d_clocks_init(struct device_node *ccm_node) hws[IMX7D_WDOG4_ROOT_CLK] = imx_clk_hw_gate4("wdog4_root_clk", "wdog_post_div", base + 0x49f0, 0); hws[IMX7D_KPP_ROOT_CLK] = imx_clk_hw_gate4("kpp_root_clk", "ipg_root_clk", base + 0x4aa0, 0); hws[IMX7D_CSI_MCLK_ROOT_CLK] = imx_clk_hw_gate4("csi_mclk_root_clk", "csi_mclk_post_div", base + 0x4490, 0); - hws[IMX7D_AUDIO_MCLK_ROOT_CLK] = imx_clk_hw_gate4("audio_mclk_root_clk", "audio_mclk_post_div", base + 0x4790, 0); hws[IMX7D_WRCLK_ROOT_CLK] = imx_clk_hw_gate4("wrclk_root_clk", "wrclk_post_div", base + 0x47a0, 0); hws[IMX7D_USB_CTRL_CLK] = imx_clk_hw_gate4("usb_ctrl_clk", "ahb_root_clk", base + 0x4680, 0); hws[IMX7D_USB_PHY1_CLK] = imx_clk_hw_gate4("usb_phy1_clk", "pll_usb1_main_clk", base + 0x46a0, 0); diff --git a/drivers/clk/ingenic/jz4725b-cgu.c b/drivers/clk/ingenic/jz4725b-cgu.c index 8c38e72d14a79919f51569ee226f4b94558f6722..786e361a4a6a456ef090fa770a27704f4ba5c783 100644 --- a/drivers/clk/ingenic/jz4725b-cgu.c +++ b/drivers/clk/ingenic/jz4725b-cgu.c @@ -139,11 +139,10 @@ static const struct ingenic_cgu_clk_info jz4725b_cgu_clocks[] = { }, [JZ4725B_CLK_I2S] = { - "i2s", CGU_CLK_MUX | CGU_CLK_DIV | CGU_CLK_GATE, + "i2s", CGU_CLK_MUX | CGU_CLK_DIV, .parents = { JZ4725B_CLK_EXT, JZ4725B_CLK_PLL_HALF, -1, -1 }, .mux = { CGU_REG_CPCCR, 31, 1 }, .div = { CGU_REG_I2SCDR, 0, 1, 9, -1, -1, -1 }, - .gate = { CGU_REG_CLKGR, 6 }, }, [JZ4725B_CLK_SPI] = { diff --git a/drivers/clk/loongson1/clk-loongson1c.c b/drivers/clk/loongson1/clk-loongson1c.c index 703f87622cf5f7042c216a27b5842ec83e146a99..1ebf740380efbd38f6a912e59e3488c4fc76a2e9 100644 --- a/drivers/clk/loongson1/clk-loongson1c.c +++ b/drivers/clk/loongson1/clk-loongson1c.c @@ -37,6 +37,7 @@ static const struct clk_div_table ahb_div_table[] = { [1] = { .val = 1, .div = 4 }, [2] = { .val = 2, .div = 3 }, [3] = { .val = 3, .div = 3 }, + [4] = { /* sentinel */ } }; void __init ls1x_clk_init(void) diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c index 59a5a0f261f336098748b02453a4b59a9910e7f1..71a0d30cf44dffc7c44708b32c869197360335ee 100644 --- a/drivers/clk/qcom/clk-rcg2.c +++ b/drivers/clk/qcom/clk-rcg2.c @@ -264,7 +264,7 @@ static int clk_rcg2_determine_floor_rate(struct clk_hw *hw, static int __clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f) { - u32 cfg, mask; + u32 cfg, mask, d_val, not2d_val, n_minus_m; struct clk_hw *hw = &rcg->clkr.hw; int ret, index = qcom_find_src_index(hw, rcg->parent_map, f->src); @@ -283,8 +283,17 @@ static int __clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f) if (ret) return ret; + /* Calculate 2d value */ + d_val = f->n; + + n_minus_m = f->n - f->m; + n_minus_m *= 2; + + d_val = clamp_t(u32, d_val, f->m, n_minus_m); + not2d_val = ~d_val & mask; + ret = regmap_update_bits(rcg->clkr.regmap, - RCG_D_OFFSET(rcg), mask, ~f->n); + RCG_D_OFFSET(rcg), mask, not2d_val); if (ret) return ret; } @@ -639,6 +648,7 @@ static const struct frac_entry frac_table_pixel[] = { { 2, 9 }, { 4, 9 }, { 1, 1 }, + { 2, 3 }, { } }; diff --git a/drivers/clk/qcom/gcc-ipq8074.c b/drivers/clk/qcom/gcc-ipq8074.c index 108fe27bee10f611f7417f01ffd52aec2c3fd4aa..541016db3c4bbae76a6374eebe383bd39f95bb6d 100644 --- a/drivers/clk/qcom/gcc-ipq8074.c +++ b/drivers/clk/qcom/gcc-ipq8074.c @@ -60,11 +60,6 @@ static const struct parent_map gcc_xo_gpll0_gpll0_out_main_div2_map[] = { { P_GPLL0_DIV2, 4 }, }; -static const char * const gcc_xo_gpll0[] = { - "xo", - "gpll0", -}; - static const struct parent_map gcc_xo_gpll0_map[] = { { P_XO, 0 }, { P_GPLL0, 1 }, @@ -956,6 +951,11 @@ static struct clk_rcg2 blsp1_uart6_apps_clk_src = { }, }; +static const struct clk_parent_data gcc_xo_gpll0[] = { + { .fw_name = "xo" }, + { .hw = &gpll0.clkr.hw }, +}; + static const struct freq_tbl ftbl_pcie_axi_clk_src[] = { F(19200000, P_XO, 1, 0, 0), F(200000000, P_GPLL0, 4, 0, 0), @@ -969,7 +969,7 @@ static struct clk_rcg2 pcie0_axi_clk_src = { .parent_map = gcc_xo_gpll0_map, .clkr.hw.init = &(struct clk_init_data){ .name = "pcie0_axi_clk_src", - .parent_names = gcc_xo_gpll0, + .parent_data = gcc_xo_gpll0, .num_parents = 2, .ops = &clk_rcg2_ops, }, @@ -1016,7 +1016,7 @@ static struct clk_rcg2 pcie1_axi_clk_src = { .parent_map = gcc_xo_gpll0_map, .clkr.hw.init = &(struct clk_init_data){ .name = "pcie1_axi_clk_src", - .parent_names = gcc_xo_gpll0, + .parent_data = gcc_xo_gpll0, .num_parents = 2, .ops = &clk_rcg2_ops, }, @@ -1074,7 +1074,7 @@ static struct clk_rcg2 sdcc1_apps_clk_src = { .name = "sdcc1_apps_clk_src", .parent_names = gcc_xo_gpll0_gpll2_gpll0_out_main_div2, .num_parents = 4, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_floor_ops, }, }; @@ -1330,7 +1330,7 @@ static struct clk_rcg2 nss_ce_clk_src = { .parent_map = gcc_xo_gpll0_map, .clkr.hw.init = &(struct clk_init_data){ .name = "nss_ce_clk_src", - .parent_names = gcc_xo_gpll0, + .parent_data = gcc_xo_gpll0, .num_parents = 2, .ops = &clk_rcg2_ops, }, @@ -4329,8 +4329,7 @@ static struct clk_rcg2 pcie0_rchng_clk_src = { .parent_map = gcc_xo_gpll0_map, .clkr.hw.init = &(struct clk_init_data){ .name = "pcie0_rchng_clk_src", - .parent_hws = (const struct clk_hw *[]) { - &gpll0.clkr.hw }, + .parent_data = gcc_xo_gpll0, .num_parents = 2, .ops = &clk_rcg2_ops, }, diff --git a/drivers/clk/qcom/gcc-msm8994.c b/drivers/clk/qcom/gcc-msm8994.c index 144d2ba7a9bef10660884cf4185dc551694260f8..463a444c8a7e4dad1e349a9c658dafd07b947c73 100644 --- a/drivers/clk/qcom/gcc-msm8994.c +++ b/drivers/clk/qcom/gcc-msm8994.c @@ -108,6 +108,7 @@ static struct clk_alpha_pll gpll4_early = { static struct clk_alpha_pll_postdiv gpll4 = { .offset = 0x1dc0, + .width = 4, .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], .clkr.hw.init = &(struct clk_init_data) { diff --git a/drivers/clk/qcom/gdsc.c b/drivers/clk/qcom/gdsc.c index 4ece326ea233e90e51c7dd4e5eecf48b8d6e424d..cf23cfd7e46743703776d12b0d593a05524df31c 100644 --- a/drivers/clk/qcom/gdsc.c +++ b/drivers/clk/qcom/gdsc.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2015, 2017-2018, The Linux Foundation. All rights reserved. + * Copyright (c) 2015, 2017-2018, 2022, The Linux Foundation. All rights reserved. */ #include @@ -34,9 +34,14 @@ #define CFG_GDSCR_OFFSET 0x4 /* Wait 2^n CXO cycles between all states. Here, n=2 (4 cycles). */ -#define EN_REST_WAIT_VAL (0x2 << 20) -#define EN_FEW_WAIT_VAL (0x8 << 16) -#define CLK_DIS_WAIT_VAL (0x2 << 12) +#define EN_REST_WAIT_VAL 0x2 +#define EN_FEW_WAIT_VAL 0x8 +#define CLK_DIS_WAIT_VAL 0x2 + +/* Transition delay shifts */ +#define EN_REST_WAIT_SHIFT 20 +#define EN_FEW_WAIT_SHIFT 16 +#define CLK_DIS_WAIT_SHIFT 12 #define RETAIN_MEM BIT(14) #define RETAIN_PERIPH BIT(13) @@ -341,7 +346,18 @@ static int gdsc_init(struct gdsc *sc) */ mask = HW_CONTROL_MASK | SW_OVERRIDE_MASK | EN_REST_WAIT_MASK | EN_FEW_WAIT_MASK | CLK_DIS_WAIT_MASK; - val = EN_REST_WAIT_VAL | EN_FEW_WAIT_VAL | CLK_DIS_WAIT_VAL; + + if (!sc->en_rest_wait_val) + sc->en_rest_wait_val = EN_REST_WAIT_VAL; + if (!sc->en_few_wait_val) + sc->en_few_wait_val = EN_FEW_WAIT_VAL; + if (!sc->clk_dis_wait_val) + sc->clk_dis_wait_val = CLK_DIS_WAIT_VAL; + + val = sc->en_rest_wait_val << EN_REST_WAIT_SHIFT | + sc->en_few_wait_val << EN_FEW_WAIT_SHIFT | + sc->clk_dis_wait_val << CLK_DIS_WAIT_SHIFT; + ret = regmap_update_bits(sc->regmap, sc->gdscr, mask, val); if (ret) return ret; diff --git a/drivers/clk/qcom/gdsc.h b/drivers/clk/qcom/gdsc.h index 5bb396b344d16f814f23c0ce7f717eac859c7b54..762f1b5e1ec51b1a4d09f23630d1988ed8f7144f 100644 --- a/drivers/clk/qcom/gdsc.h +++ b/drivers/clk/qcom/gdsc.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (c) 2015, 2017-2018, The Linux Foundation. All rights reserved. + * Copyright (c) 2015, 2017-2018, 2022, The Linux Foundation. All rights reserved. */ #ifndef __QCOM_GDSC_H__ @@ -22,6 +22,9 @@ struct reset_controller_dev; * @cxcs: offsets of branch registers to toggle mem/periph bits in * @cxc_count: number of @cxcs * @pwrsts: Possible powerdomain power states + * @en_rest_wait_val: transition delay value for receiving enr ack signal + * @en_few_wait_val: transition delay value for receiving enf ack signal + * @clk_dis_wait_val: transition delay value for halting clock * @resets: ids of resets associated with this gdsc * @reset_count: number of @resets * @rcdev: reset controller @@ -35,6 +38,9 @@ struct gdsc { unsigned int clamp_io_ctrl; unsigned int *cxcs; unsigned int cxc_count; + unsigned int en_rest_wait_val; + unsigned int en_few_wait_val; + unsigned int clk_dis_wait_val; const u8 pwrsts; /* Powerdomain allowable state bitfields */ #define PWRSTS_OFF BIT(0) diff --git a/drivers/clk/tegra/clk-tegra124-emc.c b/drivers/clk/tegra/clk-tegra124-emc.c index 745f9faa98d8ef00e7bcc3483bfdf4584ba525d8..733a962ff521ac53d5f1310abd740955c90d4872 100644 --- a/drivers/clk/tegra/clk-tegra124-emc.c +++ b/drivers/clk/tegra/clk-tegra124-emc.c @@ -191,6 +191,7 @@ static struct tegra_emc *emc_ensure_emc_driver(struct tegra_clk_emc *tegra) tegra->emc = platform_get_drvdata(pdev); if (!tegra->emc) { + put_device(&pdev->dev); pr_err("%s: cannot find EMC driver\n", __func__); return NULL; } diff --git a/drivers/clk/uniphier/clk-uniphier-fixed-rate.c b/drivers/clk/uniphier/clk-uniphier-fixed-rate.c index 5319cd3804801f03bf181e0807b965ab4ee0c1fd..3bc55ab75314bf8d87e05490d95ae0c1c0c4c174 100644 --- a/drivers/clk/uniphier/clk-uniphier-fixed-rate.c +++ b/drivers/clk/uniphier/clk-uniphier-fixed-rate.c @@ -24,6 +24,7 @@ struct clk_hw *uniphier_clk_register_fixed_rate(struct device *dev, init.name = name; init.ops = &clk_fixed_rate_ops; + init.flags = 0; init.parent_names = NULL; init.num_parents = 0; diff --git a/drivers/clocksource/acpi_pm.c b/drivers/clocksource/acpi_pm.c index eb596ff9e7bb30908e82f80c04e8046893ce2b8c..279ddff81ab4955a7106dd3504433fa41f546a4e 100644 --- a/drivers/clocksource/acpi_pm.c +++ b/drivers/clocksource/acpi_pm.c @@ -229,8 +229,10 @@ static int __init parse_pmtmr(char *arg) int ret; ret = kstrtouint(arg, 16, &base); - if (ret) - return ret; + if (ret) { + pr_warn("PMTMR: invalid 'pmtmr=' value: '%s'\n", arg); + return 1; + } pr_info("PMTMR IOPort override: 0x%04x -> 0x%04x\n", pmtmr_ioport, base); diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c index fabad79baafce28ea848a1b42538bed66b3d6893..df194b05e944c631415207f19aa166f36fddf691 100644 --- a/drivers/clocksource/exynos_mct.c +++ b/drivers/clocksource/exynos_mct.c @@ -494,11 +494,14 @@ static int exynos4_mct_dying_cpu(unsigned int cpu) return 0; } -static int __init exynos4_timer_resources(struct device_node *np, void __iomem *base) +static int __init exynos4_timer_resources(struct device_node *np) { - int err, cpu; struct clk *mct_clk, *tick_clk; + reg_base = of_iomap(np, 0); + if (!reg_base) + panic("%s: unable to ioremap mct address space\n", __func__); + tick_clk = of_clk_get_by_name(np, "fin_pll"); if (IS_ERR(tick_clk)) panic("%s: unable to determine tick clock rate\n", __func__); @@ -509,9 +512,32 @@ static int __init exynos4_timer_resources(struct device_node *np, void __iomem * panic("%s: unable to retrieve mct clock instance\n", __func__); clk_prepare_enable(mct_clk); - reg_base = base; - if (!reg_base) - panic("%s: unable to ioremap mct address space\n", __func__); + return 0; +} + +static int __init exynos4_timer_interrupts(struct device_node *np, + unsigned int int_type) +{ + int nr_irqs, i, err, cpu; + + mct_int_type = int_type; + + /* This driver uses only one global timer interrupt */ + mct_irqs[MCT_G0_IRQ] = irq_of_parse_and_map(np, MCT_G0_IRQ); + + /* + * Find out the number of local irqs specified. The local + * timer irqs are specified after the four global timer + * irqs are specified. + */ + nr_irqs = of_irq_count(np); + if (nr_irqs > ARRAY_SIZE(mct_irqs)) { + pr_err("exynos-mct: too many (%d) interrupts configured in DT\n", + nr_irqs); + nr_irqs = ARRAY_SIZE(mct_irqs); + } + for (i = MCT_L0_IRQ; i < nr_irqs; i++) + mct_irqs[i] = irq_of_parse_and_map(np, i); if (mct_int_type == MCT_INT_PPI) { @@ -522,11 +548,14 @@ static int __init exynos4_timer_resources(struct device_node *np, void __iomem * mct_irqs[MCT_L0_IRQ], err); } else { for_each_possible_cpu(cpu) { - int mct_irq = mct_irqs[MCT_L0_IRQ + cpu]; + int mct_irq; struct mct_clock_event_device *pcpu_mevt = per_cpu_ptr(&percpu_mct_tick, cpu); pcpu_mevt->evt.irq = -1; + if (MCT_L0_IRQ + cpu >= ARRAY_SIZE(mct_irqs)) + break; + mct_irq = mct_irqs[MCT_L0_IRQ + cpu]; irq_set_status_flags(mct_irq, IRQ_NOAUTOEN); if (request_irq(mct_irq, @@ -571,24 +600,13 @@ static int __init exynos4_timer_resources(struct device_node *np, void __iomem * static int __init mct_init_dt(struct device_node *np, unsigned int int_type) { - u32 nr_irqs, i; int ret; - mct_int_type = int_type; - - /* This driver uses only one global timer interrupt */ - mct_irqs[MCT_G0_IRQ] = irq_of_parse_and_map(np, MCT_G0_IRQ); - - /* - * Find out the number of local irqs specified. The local - * timer irqs are specified after the four global timer - * irqs are specified. - */ - nr_irqs = of_irq_count(np); - for (i = MCT_L0_IRQ; i < nr_irqs; i++) - mct_irqs[i] = irq_of_parse_and_map(np, i); + ret = exynos4_timer_resources(np); + if (ret) + return ret; - ret = exynos4_timer_resources(np, of_iomap(np, 0)); + ret = exynos4_timer_interrupts(np, int_type); if (ret) return ret; diff --git a/drivers/clocksource/timer-microchip-pit64b.c b/drivers/clocksource/timer-microchip-pit64b.c index 59e11ca8ee73e02186335c9707f02b2030b511bc..5c9485cb4e0590bfde56ba72d4c20afdc288fc76 100644 --- a/drivers/clocksource/timer-microchip-pit64b.c +++ b/drivers/clocksource/timer-microchip-pit64b.c @@ -121,7 +121,7 @@ static u64 mchp_pit64b_clksrc_read(struct clocksource *cs) return mchp_pit64b_cnt_read(mchp_pit64b_cs_base); } -static u64 mchp_pit64b_sched_read_clk(void) +static u64 notrace mchp_pit64b_sched_read_clk(void) { return mchp_pit64b_cnt_read(mchp_pit64b_cs_base); } diff --git a/drivers/clocksource/timer-of.c b/drivers/clocksource/timer-of.c index 572da477c6d35c5edc64f16b5d8562cde5311d53..b965f20174e3aadf405ccf4998bd63ba1ec647ae 100644 --- a/drivers/clocksource/timer-of.c +++ b/drivers/clocksource/timer-of.c @@ -157,9 +157,9 @@ static __init int timer_of_base_init(struct device_node *np, of_base->base = of_base->name ? of_io_request_and_map(np, of_base->index, of_base->name) : of_iomap(np, of_base->index); - if (IS_ERR(of_base->base)) { - pr_err("Failed to iomap (%s)\n", of_base->name); - return PTR_ERR(of_base->base); + if (IS_ERR_OR_NULL(of_base->base)) { + pr_err("Failed to iomap (%s:%s)\n", np->name, of_base->name); + return of_base->base ? PTR_ERR(of_base->base) : -ENOMEM; } return 0; diff --git a/drivers/clocksource/timer-ti-dm-systimer.c b/drivers/clocksource/timer-ti-dm-systimer.c index 5c40ca1d4740e7f67f4a674dd08a433f78488ffc..2737407ff0698033daa955a41abd65cba914af6f 100644 --- a/drivers/clocksource/timer-ti-dm-systimer.c +++ b/drivers/clocksource/timer-ti-dm-systimer.c @@ -241,8 +241,7 @@ static void __init dmtimer_systimer_assign_alwon(void) bool quirk_unreliable_oscillator = false; /* Quirk unreliable 32 KiHz oscillator with incomplete dts */ - if (of_machine_is_compatible("ti,omap3-beagle-ab4") || - of_machine_is_compatible("timll,omap3-devkit8000")) { + if (of_machine_is_compatible("ti,omap3-beagle-ab4")) { quirk_unreliable_oscillator = true; counter_32k = -ENODEV; } @@ -695,9 +694,9 @@ static int __init dmtimer_percpu_quirk_init(struct device_node *np, u32 pa) return 0; } - if (pa == 0x48034000) /* dra7 dmtimer3 */ + if (pa == 0x4882c000) /* dra7 dmtimer15 */ return dmtimer_percpu_timer_init(np, 0); - else if (pa == 0x48036000) /* dra7 dmtimer4 */ + else if (pa == 0x4882e000) /* dra7 dmtimer16 */ return dmtimer_percpu_timer_init(np, 1); return 0; diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index f1b7e3dd6e5daf38c249f15ec2aeb2baa636369c..7c762e105146420872b0f6f43eeeff68f45d90e8 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -109,3 +109,4 @@ obj-$(CONFIG_LOONGSON1_CPUFREQ) += loongson1-cpufreq.o obj-$(CONFIG_SH_CPU_FREQ) += sh-cpufreq.o obj-$(CONFIG_SPARC_US2E_CPUFREQ) += sparc-us2e-cpufreq.o obj-$(CONFIG_SPARC_US3_CPUFREQ) += sparc-us3-cpufreq.o +obj-$(CONFIG_SW64_CPUFREQ) += sw64_cpufreq.o diff --git a/drivers/cpufreq/qcom-cpufreq-nvmem.c b/drivers/cpufreq/qcom-cpufreq-nvmem.c index fba9937a406b387164a3836c69af2b1ebf2aa0c1..7fdd30e92e42973bfb549500f77a06a4638fe9b4 100644 --- a/drivers/cpufreq/qcom-cpufreq-nvmem.c +++ b/drivers/cpufreq/qcom-cpufreq-nvmem.c @@ -130,7 +130,7 @@ static void get_krait_bin_format_b(struct device *cpu_dev, } /* Check PVS_BLOW_STATUS */ - pte_efuse = *(((u32 *)buf) + 4); + pte_efuse = *(((u32 *)buf) + 1); pte_efuse &= BIT(21); if (pte_efuse) { dev_dbg(cpu_dev, "PVS bin: %d\n", *pvs); diff --git a/drivers/cpufreq/sw64_cpufreq.c b/drivers/cpufreq/sw64_cpufreq.c new file mode 100644 index 0000000000000000000000000000000000000000..5f49b5175d34f634d13e71c18c87a6e142186359 --- /dev/null +++ b/drivers/cpufreq/sw64_cpufreq.c @@ -0,0 +1,186 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * linux/arch/sw/kernel/setup.c + * + * Copyright (C) 1995 Linus Torvalds + */ + +/* + * Cpufreq driver for the sw64 processors + * + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include /* set_cpus_allowed() */ +#include +#include +#include + +#include +#include + +static uint nowait; + +static struct clk *cpuclk; + +static int sw64_cpu_freq_notifier(struct notifier_block *nb, + unsigned long val, void *data); + +static struct notifier_block sw64_cpufreq_notifier_block = { + .notifier_call = sw64_cpu_freq_notifier +}; + +static int sw64_cpu_freq_notifier(struct notifier_block *nb, + unsigned long val, void *data) +{ + struct cpufreq_freqs *freqs = (struct cpufreq_freqs *)data; + unsigned long cpu; + + for_each_online_cpu(cpu) { + if (val == CPUFREQ_POSTCHANGE) { + sw64_update_clockevents(cpu, freqs->new * 1000); + current_cpu_data.loops_per_jiffy = loops_per_jiffy; + } + } + + return 0; +} + +static unsigned int sw64_cpufreq_get(unsigned int cpu) +{ + struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu); + + if (!policy || IS_ERR(policy->clk)) { + pr_err("%s: No %s associated to cpu: %d\n", + __func__, policy ? "clk" : "policy", cpu); + return 0; + } + + return sw64_clk_get_rate(policy->clk); +} + +/* + * Here we notify other drivers of the proposed change and the final change. + */ +static int sw64_cpufreq_target(struct cpufreq_policy *policy, + unsigned int index) +{ + unsigned long freq; + + freq = (get_cpu_freq() / 1000) * index / 48; + + sw64_store_policy(policy); + + /* setting the cpu frequency */ + sw64_set_rate(-1, freq * 1000); + + return 0; +} + +static int sw64_cpufreq_cpu_init(struct cpufreq_policy *policy) +{ + unsigned long rate; + int i; + + cpuclk = sw64_clk_get(NULL, "cpu_clk"); + if (IS_ERR(cpuclk)) { + pr_err("couldn't get CPU clk\n"); + return PTR_ERR(cpuclk); + } + + rate = get_cpu_freq() / 1000; + + /* clock table init */ + for (i = 0; + (sw64_clockmod_table[i].frequency != CPUFREQ_TABLE_END); + i++) + if (sw64_clockmod_table[i].frequency == 0) + sw64_clockmod_table[i].frequency = (rate * i) / 48; + + sw64_set_rate(-1, rate * 1000); + + policy->clk = cpuclk; + + cpufreq_generic_init(policy, &sw64_clockmod_table[0], 0); + + return 0; +} + +static int sw64_cpufreq_verify(struct cpufreq_policy_data *policy) +{ + return cpufreq_frequency_table_verify(policy, &sw64_clockmod_table[0]); +} + +static int sw64_cpufreq_exit(struct cpufreq_policy *policy) +{ + return 0; +} + +static struct freq_attr *sw64_table_attr[] = { + &cpufreq_freq_attr_scaling_available_freqs, NULL, +}; + +static struct cpufreq_driver sw64_cpufreq_driver = { + .name = "sw64", + .init = sw64_cpufreq_cpu_init, + .verify = sw64_cpufreq_verify, + .target_index = sw64_cpufreq_target, + .get = sw64_cpufreq_get, + .exit = sw64_cpufreq_exit, + .attr = sw64_table_attr, +}; + +static const struct platform_device_id platform_device_ids[] = { + { + .name = "sw64_cpufreq", + }, + {} +}; + +MODULE_DEVICE_TABLE(platform, platform_device_ids); + +static struct platform_driver platform_driver = { + .driver = { + .name = "sw64_cpufreq", + }, + .id_table = platform_device_ids, +}; + + +static int __init cpufreq_init(void) +{ + int ret; + + /* Register platform stuff */ + ret = platform_driver_register(&platform_driver); + if (ret) + return ret; + + pr_info("SW-64 CPU frequency driver\n"); + + cpufreq_register_notifier(&sw64_cpufreq_notifier_block, + CPUFREQ_TRANSITION_NOTIFIER); + + return cpufreq_register_driver(&sw64_cpufreq_driver); +} + +static void __exit cpufreq_exit(void) +{ + cpufreq_unregister_driver(&sw64_cpufreq_driver); + cpufreq_unregister_notifier(&sw64_cpufreq_notifier_block, + CPUFREQ_TRANSITION_NOTIFIER); + + platform_driver_unregister(&platform_driver); +} + +module_init(cpufreq_init); +module_exit(cpufreq_exit); + +module_param(nowait, uint, 0644); +MODULE_PARM_DESC(nowait, "Disable SW-64 specific wait"); + +MODULE_DESCRIPTION("cpufreq driver for sw64"); +MODULE_LICENSE("GPL"); diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c index 33707a2e55ff0c2dd0e2f16c1487c708038db867..64133d4da3d566b88fdb905e5ecc3c39cbe74a98 100644 --- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c @@ -11,6 +11,7 @@ * You could find a link for the datasheet in Documentation/arm/sunxi.rst */ +#include #include #include #include @@ -280,7 +281,9 @@ static int sun8i_ce_cipher_run(struct crypto_engine *engine, void *areq) flow = rctx->flow; err = sun8i_ce_run_task(ce, flow, crypto_tfm_alg_name(breq->base.tfm)); + local_bh_disable(); crypto_finalize_skcipher_request(engine, breq, err); + local_bh_enable(); return 0; } diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c index 2f09a37306e28412c81c3f4a7576526c5312af10..7f16b9406a41fc38f3f824fa98e478a6f1b8c538 100644 --- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c @@ -9,6 +9,7 @@ * * You could find the datasheet in Documentation/arm/sunxi.rst */ +#include #include #include #include @@ -413,6 +414,8 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq) theend: kfree(buf); kfree(result); + local_bh_disable(); crypto_finalize_hash_request(engine, breq, err); + local_bh_enable(); return 0; } diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c index 7c355bc2fb0664172bd3c1f499bafd55a82234d1..f783748462f94781f5553dff988afd2b0f51d877 100644 --- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c @@ -11,6 +11,7 @@ * You could find a link for the datasheet in Documentation/arm/sunxi.rst */ +#include #include #include #include @@ -271,7 +272,9 @@ static int sun8i_ss_handle_cipher_request(struct crypto_engine *engine, void *ar struct skcipher_request *breq = container_of(areq, struct skcipher_request, base); err = sun8i_ss_cipher(breq); + local_bh_disable(); crypto_finalize_skcipher_request(engine, breq, err); + local_bh_enable(); return 0; } diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c index 80e89066dbd1ae60f3a646a8a69481267b1d5074..319fe3279a7162e5bc5811822878fe87a9543129 100644 --- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c @@ -30,6 +30,8 @@ static const struct ss_variant ss_a80_variant = { .alg_cipher = { SS_ALG_AES, SS_ALG_DES, SS_ALG_3DES, }, + .alg_hash = { SS_ID_NOTSUPP, SS_ID_NOTSUPP, SS_ID_NOTSUPP, SS_ID_NOTSUPP, + }, .op_mode = { SS_OP_ECB, SS_OP_CBC, }, .ss_clks = { diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c index 64446b86c927f15dbc21e8dbb7cc52929a1f3a23..7b1d00fbbeb0f3aa56874fdfdf0c5734f004b43c 100644 --- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c @@ -9,6 +9,7 @@ * * You could find the datasheet in Documentation/arm/sunxi.rst */ +#include #include #include #include @@ -441,6 +442,8 @@ int sun8i_ss_hash_run(struct crypto_engine *engine, void *breq) theend: kfree(pad); kfree(result); + local_bh_disable(); crypto_finalize_hash_request(engine, breq, err); + local_bh_enable(); return 0; } diff --git a/drivers/crypto/amlogic/amlogic-gxl-cipher.c b/drivers/crypto/amlogic/amlogic-gxl-cipher.c index 8b5e07316352c3df6ac52ea27bebd11c0897fe72..652e72d030bb09a51f2d772e6817283b489b2ad8 100644 --- a/drivers/crypto/amlogic/amlogic-gxl-cipher.c +++ b/drivers/crypto/amlogic/amlogic-gxl-cipher.c @@ -265,7 +265,9 @@ static int meson_handle_cipher_request(struct crypto_engine *engine, struct skcipher_request *breq = container_of(areq, struct skcipher_request, base); err = meson_cipher(breq); + local_bh_disable(); crypto_finalize_skcipher_request(engine, breq, err); + local_bh_enable(); return 0; } diff --git a/drivers/crypto/ccp/ccp-dmaengine.c b/drivers/crypto/ccp/ccp-dmaengine.c index 0770a83bf1a570c1b0392ab82f76e323ea4636b5..b3eea329f840fbc974aed0b93c2412c11efd9895 100644 --- a/drivers/crypto/ccp/ccp-dmaengine.c +++ b/drivers/crypto/ccp/ccp-dmaengine.c @@ -633,6 +633,20 @@ static int ccp_terminate_all(struct dma_chan *dma_chan) return 0; } +static void ccp_dma_release(struct ccp_device *ccp) +{ + struct ccp_dma_chan *chan; + struct dma_chan *dma_chan; + unsigned int i; + + for (i = 0; i < ccp->cmd_q_count; i++) { + chan = ccp->ccp_dma_chan + i; + dma_chan = &chan->dma_chan; + tasklet_kill(&chan->cleanup_tasklet); + list_del_rcu(&dma_chan->device_node); + } +} + int ccp_dmaengine_register(struct ccp_device *ccp) { struct ccp_dma_chan *chan; @@ -737,6 +751,7 @@ int ccp_dmaengine_register(struct ccp_device *ccp) return 0; err_reg: + ccp_dma_release(ccp); kmem_cache_destroy(ccp->dma_desc_cache); err_cache: @@ -753,6 +768,7 @@ void ccp_dmaengine_unregister(struct ccp_device *ccp) return; dma_async_device_unregister(dma_dev); + ccp_dma_release(ccp); kmem_cache_destroy(ccp->dma_desc_cache); kmem_cache_destroy(ccp->dma_cmd_cache); diff --git a/drivers/crypto/ccree/cc_buffer_mgr.c b/drivers/crypto/ccree/cc_buffer_mgr.c index a5e041d9d2cf132516aa43db7bb3d13ec04f703a..11e0278c8631d2e0b2ea5f0e406b02074218a658 100644 --- a/drivers/crypto/ccree/cc_buffer_mgr.c +++ b/drivers/crypto/ccree/cc_buffer_mgr.c @@ -258,6 +258,13 @@ static int cc_map_sg(struct device *dev, struct scatterlist *sg, { int ret = 0; + if (!nbytes) { + *mapped_nents = 0; + *lbytes = 0; + *nents = 0; + return 0; + } + *nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes); if (*nents > max_sg_nents) { *nents = 0; diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c index dafa6577a8451f2c175f44070a10be4760b34b92..c289e4d5cbdc0517a9fba64ca05c0015400cefae 100644 --- a/drivers/crypto/ccree/cc_cipher.c +++ b/drivers/crypto/ccree/cc_cipher.c @@ -254,8 +254,8 @@ static void cc_cipher_exit(struct crypto_tfm *tfm) &ctx_p->user.key_dma_addr); /* Free key buffer in context */ - kfree_sensitive(ctx_p->user.key); dev_dbg(dev, "Free key buffer in context. key=@%p\n", ctx_p->user.key); + kfree_sensitive(ctx_p->user.key); } struct tdes_keys { diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c index d19e5ffb5104b953c90e771401ca5c5a3778fd48..d6f9e2fe863d771d64146b8cc203e857174c394c 100644 --- a/drivers/crypto/mxs-dcp.c +++ b/drivers/crypto/mxs-dcp.c @@ -331,7 +331,7 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq) memset(key + AES_KEYSIZE_128, 0, AES_KEYSIZE_128); } - for_each_sg(req->src, src, sg_nents(src), i) { + for_each_sg(req->src, src, sg_nents(req->src), i) { src_buf = sg_virt(src); len = sg_dma_len(src); tlen += len; diff --git a/drivers/crypto/qat/qat_common/qat_crypto.c b/drivers/crypto/qat/qat_common/qat_crypto.c index ab621b7dbd203e700b1f252ac3c33a9d7b69318b..9210af8a1f58ccda41fca0f0983750abc5e03aef 100644 --- a/drivers/crypto/qat/qat_common/qat_crypto.c +++ b/drivers/crypto/qat/qat_common/qat_crypto.c @@ -126,6 +126,14 @@ int qat_crypto_dev_config(struct adf_accel_dev *accel_dev) goto err; if (adf_cfg_section_add(accel_dev, "Accelerator0")) goto err; + + /* Temporarily set the number of crypto instances to zero to avoid + * registering the crypto algorithms. + * This will be removed when the algorithms will support the + * CRYPTO_TFM_REQ_MAY_BACKLOG flag + */ + instances = 0; + for (i = 0; i < instances; i++) { val = i; snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_BANK_NUM, i); diff --git a/drivers/crypto/qcom-rng.c b/drivers/crypto/qcom-rng.c index 99ba8d51d10209de2d99c666bb790c181d800c64..11f30fd48c1414780006ec57b7fca020ce1e891d 100644 --- a/drivers/crypto/qcom-rng.c +++ b/drivers/crypto/qcom-rng.c @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include @@ -43,16 +44,19 @@ static int qcom_rng_read(struct qcom_rng *rng, u8 *data, unsigned int max) { unsigned int currsize = 0; u32 val; + int ret; /* read random data from hardware */ do { - val = readl_relaxed(rng->base + PRNG_STATUS); - if (!(val & PRNG_STATUS_DATA_AVAIL)) - break; + ret = readl_poll_timeout(rng->base + PRNG_STATUS, val, + val & PRNG_STATUS_DATA_AVAIL, + 200, 10000); + if (ret) + return ret; val = readl_relaxed(rng->base + PRNG_DATA_OUT); if (!val) - break; + return -EINVAL; if ((max - currsize) >= WORD_SZ) { memcpy(data, &val, WORD_SZ); @@ -61,11 +65,10 @@ static int qcom_rng_read(struct qcom_rng *rng, u8 *data, unsigned int max) } else { /* copy only remaining bytes */ memcpy(data, &val, max - currsize); - break; } } while (currsize < max); - return currsize; + return 0; } static int qcom_rng_generate(struct crypto_rng *tfm, @@ -87,7 +90,7 @@ static int qcom_rng_generate(struct crypto_rng *tfm, mutex_unlock(&rng->lock); clk_disable_unprepare(rng->clk); - return 0; + return ret; } static int qcom_rng_seed(struct crypto_rng *tfm, const u8 *seed, diff --git a/drivers/crypto/rockchip/rk3288_crypto_skcipher.c b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c index 1cece1a7d3f008fb60a2d2ecedfffc5ec8757e36..5bbf0d2722e11cffef500ad42a27cb7d5e65980a 100644 --- a/drivers/crypto/rockchip/rk3288_crypto_skcipher.c +++ b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c @@ -506,7 +506,6 @@ struct rk_crypto_tmp rk_ecb_des3_ede_alg = { .exit = rk_ablk_exit_tfm, .min_keysize = DES3_EDE_KEY_SIZE, .max_keysize = DES3_EDE_KEY_SIZE, - .ivsize = DES_BLOCK_SIZE, .setkey = rk_tdes_setkey, .encrypt = rk_des3_ede_ecb_encrypt, .decrypt = rk_des3_ede_ecb_decrypt, diff --git a/drivers/crypto/vmx/Kconfig b/drivers/crypto/vmx/Kconfig index c85fab7ef0bdd21754ad1a30ad833778b92832f6..b2c28b87f14b3d1566cbca862441f80bcc3e1ad9 100644 --- a/drivers/crypto/vmx/Kconfig +++ b/drivers/crypto/vmx/Kconfig @@ -2,7 +2,11 @@ config CRYPTO_DEV_VMX_ENCRYPT tristate "Encryption acceleration support on P8 CPU" depends on CRYPTO_DEV_VMX + select CRYPTO_AES + select CRYPTO_CBC + select CRYPTO_CTR select CRYPTO_GHASH + select CRYPTO_XTS default m help Support for VMX cryptographic acceleration instructions on Power8 CPU. diff --git a/drivers/dax/super.c b/drivers/dax/super.c index cadbd0a1a1ef0220296faa93736f53b1b359783c..260a247c60d2da5c56c93144cdaac1340fd2750a 100644 --- a/drivers/dax/super.c +++ b/drivers/dax/super.c @@ -723,6 +723,7 @@ static int dax_fs_init(void) static void dax_fs_exit(void) { kern_unmount(dax_mnt); + rcu_barrier(); kmem_cache_destroy(dax_cache); } diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c index db732f71e59aded339b3b0199579864436aa5510..cfbf10128aaedbc04ec6be9d6b6ab164c6e5ea79 100644 --- a/drivers/dma-buf/udmabuf.c +++ b/drivers/dma-buf/udmabuf.c @@ -181,6 +181,10 @@ static long udmabuf_create(struct miscdevice *device, if (ubuf->pagecount > pglimit) goto err; } + + if (!ubuf->pagecount) + goto err; + ubuf->pages = kmalloc_array(ubuf->pagecount, sizeof(*ubuf->pages), GFP_KERNEL); if (!ubuf->pages) { diff --git a/drivers/dma/hisi_dma.c b/drivers/dma/hisi_dma.c index e1a958ae7925477b1d7c2ba310721ac046e094cd..3e83769615d1cb71d335093aa2b8aeae276b5e0e 100644 --- a/drivers/dma/hisi_dma.c +++ b/drivers/dma/hisi_dma.c @@ -30,7 +30,7 @@ #define HISI_DMA_MODE 0x217c #define HISI_DMA_OFFSET 0x100 -#define HISI_DMA_MSI_NUM 30 +#define HISI_DMA_MSI_NUM 32 #define HISI_DMA_CHAN_NUM 30 #define HISI_DMA_Q_DEPTH_VAL 1024 diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index dfbf514188f3702cf9fbaa9fb9b90e5e580117c8..6dca548f4dab1bf71f579b7eb6e4f53bdf445cce 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -3199,7 +3199,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) return ret; } -static int pl330_remove(struct amba_device *adev) +static void pl330_remove(struct amba_device *adev) { struct pl330_dmac *pl330 = amba_get_drvdata(adev); struct dma_pl330_chan *pch, *_p; @@ -3239,7 +3239,6 @@ static int pl330_remove(struct amba_device *adev) if (pl330->rstc) reset_control_assert(pl330->rstc); - return 0; } static const struct amba_id pl330_ids[] = { diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c index 7f72b3f4cd1aefd083e1c8d8017d682820091cf0..19ac95c0098f0f7a245c6657ee4af07dd9996f97 100644 --- a/drivers/dma/sh/shdma-base.c +++ b/drivers/dma/sh/shdma-base.c @@ -115,8 +115,10 @@ static dma_cookie_t shdma_tx_submit(struct dma_async_tx_descriptor *tx) ret = pm_runtime_get(schan->dev); spin_unlock_irq(&schan->chan_lock); - if (ret < 0) + if (ret < 0) { dev_err(schan->dev, "%s(): GET = %d\n", __func__, ret); + pm_runtime_put(schan->dev); + } pm_runtime_barrier(schan->dev); diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c index 763223248664530fa2c9d799ef60fd74a8fc6a2a..745b7f9eb335108823cd0103423422d4c37e2a5b 100644 --- a/drivers/firmware/arm_scmi/driver.c +++ b/drivers/firmware/arm_scmi/driver.c @@ -979,7 +979,7 @@ static void __exit scmi_driver_exit(void) } module_exit(scmi_driver_exit); -MODULE_ALIAS("platform: arm-scmi"); +MODULE_ALIAS("platform:arm-scmi"); MODULE_AUTHOR("Sudeep Holla "); MODULE_DESCRIPTION("ARM SCMI protocol driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/firmware/efi/apple-properties.c b/drivers/firmware/efi/apple-properties.c index e1926483ae2fdc910a1cc785ac390c11c2ebd408..e51838d749e2e415f4fc891b9ab0fc8f36831e4f 100644 --- a/drivers/firmware/efi/apple-properties.c +++ b/drivers/firmware/efi/apple-properties.c @@ -24,7 +24,7 @@ static bool dump_properties __initdata; static int __init dump_properties_enable(char *arg) { dump_properties = true; - return 0; + return 1; } __setup("dump_apple_properties", dump_properties_enable); diff --git a/drivers/firmware/efi/efi-pstore.c b/drivers/firmware/efi/efi-pstore.c index 0ef086e43090bb14f3789466bedf0af049e3d18d..7e771c56c13c6194cbda0ac31a1359dd9d2d8d8f 100644 --- a/drivers/firmware/efi/efi-pstore.c +++ b/drivers/firmware/efi/efi-pstore.c @@ -266,7 +266,7 @@ static int efi_pstore_write(struct pstore_record *record) efi_name[i] = name[i]; ret = efivar_entry_set_safe(efi_name, vendor, PSTORE_EFI_ATTRIBUTES, - preemptible(), record->size, record->psi->buf); + false, record->size, record->psi->buf); if (record->reason == KMSG_DUMP_OOPS && try_module_get(THIS_MODULE)) if (!schedule_work(&efivar_work)) diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c index 28d35b6c67102a7ff1e336692b3591501f015991..c406de00883aea7044a14fda3338b82fcdb8cc94 100644 --- a/drivers/firmware/efi/efi.c +++ b/drivers/firmware/efi/efi.c @@ -209,7 +209,7 @@ static int __init efivar_ssdt_setup(char *str) memcpy(efivar_ssdt, str, strlen(str)); else pr_warn("efivar_ssdt: name too long: %s\n", str); - return 0; + return 1; } __setup("efivar_ssdt=", efivar_ssdt_setup); diff --git a/drivers/firmware/efi/libstub/riscv-stub.c b/drivers/firmware/efi/libstub/riscv-stub.c index 380e4e2513994e9d21fa7ecc520c00218d645ce3..9c460843442f5ad56a8865a19088d7a2cb3dbcbd 100644 --- a/drivers/firmware/efi/libstub/riscv-stub.c +++ b/drivers/firmware/efi/libstub/riscv-stub.c @@ -25,7 +25,7 @@ typedef void __noreturn (*jump_kernel_func)(unsigned int, unsigned long); static u32 hartid; -static u32 get_boot_hartid_from_fdt(void) +static int get_boot_hartid_from_fdt(void) { const void *fdt; int chosen_node, len; @@ -33,23 +33,26 @@ static u32 get_boot_hartid_from_fdt(void) fdt = get_efi_config_table(DEVICE_TREE_GUID); if (!fdt) - return U32_MAX; + return -EINVAL; chosen_node = fdt_path_offset(fdt, "/chosen"); if (chosen_node < 0) - return U32_MAX; + return -EINVAL; prop = fdt_getprop((void *)fdt, chosen_node, "boot-hartid", &len); if (!prop || len != sizeof(u32)) - return U32_MAX; + return -EINVAL; - return fdt32_to_cpu(*prop); + hartid = fdt32_to_cpu(*prop); + return 0; } efi_status_t check_platform_features(void) { - hartid = get_boot_hartid_from_fdt(); - if (hartid == U32_MAX) { + int ret; + + ret = get_boot_hartid_from_fdt(); + if (ret) { efi_err("/chosen/boot-hartid missing or invalid!\n"); return EFI_UNSUPPORTED; } diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c index abdc8a6a396318a915455dd73e26439b88c1b003..cae590bd08f27c3c769459802d3546d1542c2ff8 100644 --- a/drivers/firmware/efi/vars.c +++ b/drivers/firmware/efi/vars.c @@ -742,6 +742,7 @@ int efivar_entry_set_safe(efi_char16_t *name, efi_guid_t vendor, u32 attributes, { const struct efivar_operations *ops; efi_status_t status; + unsigned long varsize; if (!__efivars) return -EINVAL; @@ -764,15 +765,17 @@ int efivar_entry_set_safe(efi_char16_t *name, efi_guid_t vendor, u32 attributes, return efivar_entry_set_nonblocking(name, vendor, attributes, size, data); + varsize = size + ucs2_strsize(name, 1024); if (!block) { if (down_trylock(&efivars_lock)) return -EBUSY; + status = check_var_size_nonblocking(attributes, varsize); } else { if (down_interruptible(&efivars_lock)) return -EINTR; + status = check_var_size(attributes, varsize); } - status = check_var_size(attributes, size + ucs2_strsize(name, 1024)); if (status != EFI_SUCCESS) { up(&efivars_lock); return -ENOSPC; diff --git a/drivers/firmware/google/Kconfig b/drivers/firmware/google/Kconfig index 931544c9f63d4b8e0ed169e491b51c15df2259b3..983e07dc022ede84376add92c25bc651b0b16bb2 100644 --- a/drivers/firmware/google/Kconfig +++ b/drivers/firmware/google/Kconfig @@ -21,7 +21,7 @@ config GOOGLE_SMI config GOOGLE_COREBOOT_TABLE tristate "Coreboot Table Access" - depends on ACPI || OF + depends on HAS_IOMEM && (ACPI || OF) help This option enables the coreboot_table module, which provides other firmware modules access to the coreboot table. The coreboot table diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c index e10a99860ca4b489544f14bbd9c9895ab1377517..d417199f8fe9433bc069c125c6da93340b03b9fc 100644 --- a/drivers/firmware/qcom_scm.c +++ b/drivers/firmware/qcom_scm.c @@ -749,12 +749,6 @@ int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare) }; int ret; - desc.args[0] = addr; - desc.args[1] = size; - desc.args[2] = spare; - desc.arginfo = QCOM_SCM_ARGS(3, QCOM_SCM_RW, QCOM_SCM_VAL, - QCOM_SCM_VAL); - ret = qcom_scm_call(__scm->dev, &desc, NULL); /* the pg table has been initialized already, ignore the error */ diff --git a/drivers/firmware/stratix10-svc.c b/drivers/firmware/stratix10-svc.c index 2a7687911c097c964bed2444d45262441f2872e3..53c7e3f8cfde2925fba08d52c3292450ad79db74 100644 --- a/drivers/firmware/stratix10-svc.c +++ b/drivers/firmware/stratix10-svc.c @@ -477,7 +477,7 @@ static int svc_normal_to_secure_thread(void *data) case INTEL_SIP_SMC_RSU_ERROR: pr_err("%s: STATUS_ERROR\n", __func__); cbdata->status = BIT(SVC_STATUS_ERROR); - cbdata->kaddr1 = NULL; + cbdata->kaddr1 = &res.a1; cbdata->kaddr2 = NULL; cbdata->kaddr3 = NULL; pdata->chan->scl->receive_cb(pdata->chan->scl, cbdata); diff --git a/drivers/fsi/fsi-master-aspeed.c b/drivers/fsi/fsi-master-aspeed.c index dbad73162c8333bf5161b688d9a53a4eb4ae71b4..87edc77260d20598ac5851c7e6098db674654db7 100644 --- a/drivers/fsi/fsi-master-aspeed.c +++ b/drivers/fsi/fsi-master-aspeed.c @@ -525,7 +525,6 @@ static int tacoma_cabled_fsi_fixup(struct device *dev) static int fsi_master_aspeed_probe(struct platform_device *pdev) { struct fsi_master_aspeed *aspeed; - struct resource *res; int rc, links, reg; __be32 raw; @@ -535,26 +534,28 @@ static int fsi_master_aspeed_probe(struct platform_device *pdev) return rc; } - aspeed = devm_kzalloc(&pdev->dev, sizeof(*aspeed), GFP_KERNEL); + aspeed = kzalloc(sizeof(*aspeed), GFP_KERNEL); if (!aspeed) return -ENOMEM; aspeed->dev = &pdev->dev; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - aspeed->base = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(aspeed->base)) - return PTR_ERR(aspeed->base); + aspeed->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(aspeed->base)) { + rc = PTR_ERR(aspeed->base); + goto err_free_aspeed; + } aspeed->clk = devm_clk_get(aspeed->dev, NULL); if (IS_ERR(aspeed->clk)) { dev_err(aspeed->dev, "couldn't get clock\n"); - return PTR_ERR(aspeed->clk); + rc = PTR_ERR(aspeed->clk); + goto err_free_aspeed; } rc = clk_prepare_enable(aspeed->clk); if (rc) { dev_err(aspeed->dev, "couldn't enable clock\n"); - return rc; + goto err_free_aspeed; } rc = setup_cfam_reset(aspeed); @@ -589,7 +590,7 @@ static int fsi_master_aspeed_probe(struct platform_device *pdev) rc = opb_readl(aspeed, ctrl_base + FSI_MVER, &raw); if (rc) { dev_err(&pdev->dev, "failed to read hub version\n"); - return rc; + goto err_release; } reg = be32_to_cpu(raw); @@ -628,6 +629,8 @@ static int fsi_master_aspeed_probe(struct platform_device *pdev) err_release: clk_disable_unprepare(aspeed->clk); +err_free_aspeed: + kfree(aspeed); return rc; } diff --git a/drivers/gpio/gpio-tegra186.c b/drivers/gpio/gpio-tegra186.c index 9500074b1f1b55e00951af9b7b9f2d956f37a7ef..7fbe5f0681b956b582a03f12c5b409e9f7f6de14 100644 --- a/drivers/gpio/gpio-tegra186.c +++ b/drivers/gpio/gpio-tegra186.c @@ -337,9 +337,12 @@ static int tegra186_gpio_of_xlate(struct gpio_chip *chip, return offset + pin; } +#define to_tegra_gpio(x) container_of((x), struct tegra_gpio, gpio) + static void tegra186_irq_ack(struct irq_data *data) { - struct tegra_gpio *gpio = irq_data_get_irq_chip_data(data); + struct gpio_chip *gc = irq_data_get_irq_chip_data(data); + struct tegra_gpio *gpio = to_tegra_gpio(gc); void __iomem *base; base = tegra186_gpio_get_base(gpio, data->hwirq); @@ -351,7 +354,8 @@ static void tegra186_irq_ack(struct irq_data *data) static void tegra186_irq_mask(struct irq_data *data) { - struct tegra_gpio *gpio = irq_data_get_irq_chip_data(data); + struct gpio_chip *gc = irq_data_get_irq_chip_data(data); + struct tegra_gpio *gpio = to_tegra_gpio(gc); void __iomem *base; u32 value; @@ -366,7 +370,8 @@ static void tegra186_irq_mask(struct irq_data *data) static void tegra186_irq_unmask(struct irq_data *data) { - struct tegra_gpio *gpio = irq_data_get_irq_chip_data(data); + struct gpio_chip *gc = irq_data_get_irq_chip_data(data); + struct tegra_gpio *gpio = to_tegra_gpio(gc); void __iomem *base; u32 value; @@ -381,7 +386,8 @@ static void tegra186_irq_unmask(struct irq_data *data) static int tegra186_irq_set_type(struct irq_data *data, unsigned int type) { - struct tegra_gpio *gpio = irq_data_get_irq_chip_data(data); + struct gpio_chip *gc = irq_data_get_irq_chip_data(data); + struct tegra_gpio *gpio = to_tegra_gpio(gc); void __iomem *base; u32 value; diff --git a/drivers/gpio/gpio-ts4900.c b/drivers/gpio/gpio-ts4900.c index d885032cf814d89e184f7a10ab51fa09b2efc033..d918d2df4de2cbf536a67483b496f5079640ebfd 100644 --- a/drivers/gpio/gpio-ts4900.c +++ b/drivers/gpio/gpio-ts4900.c @@ -1,7 +1,7 @@ /* * Digital I/O driver for Technologic Systems I2C FPGA Core * - * Copyright (C) 2015 Technologic Systems + * Copyright (C) 2015, 2018 Technologic Systems * Copyright (C) 2016 Savoir-Faire Linux * * This program is free software; you can redistribute it and/or @@ -55,19 +55,33 @@ static int ts4900_gpio_direction_input(struct gpio_chip *chip, { struct ts4900_gpio_priv *priv = gpiochip_get_data(chip); - /* - * This will clear the output enable bit, the other bits are - * dontcare when this is cleared + /* Only clear the OE bit here, requires a RMW. Prevents potential issue + * with OE and data getting to the physical pin at different times. */ - return regmap_write(priv->regmap, offset, 0); + return regmap_update_bits(priv->regmap, offset, TS4900_GPIO_OE, 0); } static int ts4900_gpio_direction_output(struct gpio_chip *chip, unsigned int offset, int value) { struct ts4900_gpio_priv *priv = gpiochip_get_data(chip); + unsigned int reg; int ret; + /* If changing from an input to an output, we need to first set the + * proper data bit to what is requested and then set OE bit. This + * prevents a glitch that can occur on the IO line + */ + regmap_read(priv->regmap, offset, ®); + if (!(reg & TS4900_GPIO_OE)) { + if (value) + reg = TS4900_GPIO_OUT; + else + reg &= ~TS4900_GPIO_OUT; + + regmap_write(priv->regmap, offset, reg); + } + if (value) ret = regmap_write(priv->regmap, offset, TS4900_GPIO_OE | TS4900_GPIO_OUT); diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index af5bb8fedfea78cfea00eb6d22f804f0b8a4a9b2..00526fdd7691f2adba8798b082ca36b67f8f0fbb 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -3215,6 +3215,16 @@ int gpiod_to_irq(const struct gpio_desc *desc) return retirq; } +#ifdef CONFIG_GPIOLIB_IRQCHIP + if (gc->irq.chip) { + /* + * Avoid race condition with other code, which tries to lookup + * an IRQ before the irqchip has been properly registered, + * i.e. while gpiochip is still being brought up. + */ + return -EPROBE_DEFER; + } +#endif return -ENXIO; } EXPORT_SYMBOL_GPL(gpiod_to_irq); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index b47829ff30af79237cad7cdf2b463d6b583b0e5b..635601d8b131052c50ed1f2bd7d5463c0348be18 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -715,11 +715,17 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, * Check if all VM PDs/PTs are ready for updates * * Returns: - * True if eviction list is empty. + * True if VM is not evicting. */ bool amdgpu_vm_ready(struct amdgpu_vm *vm) { - return list_empty(&vm->evicted); + bool ret; + + amdgpu_vm_eviction_lock(vm); + ret = !vm->evicting; + amdgpu_vm_eviction_unlock(vm); + + return ret && list_empty(&vm->evicted); } /** diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 37226cbbbd11a4c3d79dbe41ac9b581eb6f049e0..7212b9900e0abafdf8386235789ec6f25167384d 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -1194,8 +1194,11 @@ static int soc15_common_early_init(void *handle) AMD_CG_SUPPORT_SDMA_MGCG | AMD_CG_SUPPORT_SDMA_LS; + /* + * MMHUB PG needs to be disabled for Picasso for + * stability reasons. + */ adev->pg_flags = AMD_PG_SUPPORT_SDMA | - AMD_PG_SUPPORT_MMHUB | AMD_PG_SUPPORT_VCN; } else { adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 6c8f141103da47b0b581067c6a2e06363fbc05c0..e828f9414ba2cfd9dc30dd2176534b818ba76e21 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -6396,6 +6396,9 @@ static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder, mode = amdgpu_dm_create_common_mode(encoder, common_modes[i].name, common_modes[i].w, common_modes[i].h); + if (!mode) + continue; + drm_mode_probed_add(connector, mode); amdgpu_dm_connector->num_modes++; } @@ -8612,10 +8615,13 @@ static int dm_update_plane_state(struct dc *dc, static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc) { struct drm_connector *connector; - struct drm_connector_state *conn_state; + struct drm_connector_state *conn_state, *old_conn_state; struct amdgpu_dm_connector *aconnector = NULL; int i; - for_each_new_connector_in_state(state, connector, conn_state, i) { + for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) { + if (!conn_state->crtc) + conn_state = old_conn_state; + if (conn_state->crtc != crtc) continue; diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c index 0e0f494fbb5e138b1f739cd664252d2899c2fc12..b037fd57fd366ce2237592ebf2e0b03928f53bf4 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c @@ -227,14 +227,6 @@ static const struct irq_source_info_funcs vupdate_no_lock_irq_info_funcs = { .funcs = &pflip_irq_info_funcs\ } -#define vupdate_int_entry(reg_num)\ - [DC_IRQ_SOURCE_VUPDATE1 + reg_num] = {\ - IRQ_REG_ENTRY(OTG, reg_num,\ - OTG_GLOBAL_SYNC_STATUS, VUPDATE_INT_EN,\ - OTG_GLOBAL_SYNC_STATUS, VUPDATE_EVENT_CLEAR),\ - .funcs = &vblank_irq_info_funcs\ - } - /* vupdate_no_lock_int_entry maps to DC_IRQ_SOURCE_VUPDATEx, to match semantic * of DCE's DC_IRQ_SOURCE_VUPDATEx. */ @@ -348,12 +340,6 @@ irq_source_info_dcn21[DAL_IRQ_SOURCES_NUMBER] = { dc_underflow_int_entry(6), [DC_IRQ_SOURCE_DMCU_SCP] = dummy_irq_entry(), [DC_IRQ_SOURCE_VBIOS_SW] = dummy_irq_entry(), - vupdate_int_entry(0), - vupdate_int_entry(1), - vupdate_int_entry(2), - vupdate_int_entry(3), - vupdate_int_entry(4), - vupdate_int_entry(5), vupdate_no_lock_int_entry(0), vupdate_no_lock_int_entry(1), vupdate_no_lock_int_entry(2), diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c index 49109614510b8c8ef38ab94e2f4cac1db0867ac0..5abb68017f6ed6df2165114a36881264be9c4437 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c @@ -2098,8 +2098,8 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_ } } - /* setting should not be allowed from VF */ - if (amdgpu_sriov_vf(adev)) { + /* setting should not be allowed from VF if not in one VF mode */ + if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) { dev_attr->attr.mode &= ~S_IWUGO; dev_attr->store = NULL; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index e5893218fa4bb5c62fcd7b76a9336b5645527565..ee27970cfff952d38b5d02cd8e368f8851d8ae57 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -115,7 +115,7 @@ int smu_get_dpm_freq_range(struct smu_context *smu, uint32_t *min, uint32_t *max) { - int ret = 0; + int ret = -ENOTSUPP; if (!min && !max) return -EINVAL; diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511.h b/drivers/gpu/drm/bridge/adv7511/adv7511.h index a9bb734366ae6ffa1172b394b851047170ff7e2c..a0f6ee15c24859a48f39c7120ad36a48ca28eec0 100644 --- a/drivers/gpu/drm/bridge/adv7511/adv7511.h +++ b/drivers/gpu/drm/bridge/adv7511/adv7511.h @@ -169,6 +169,7 @@ #define ADV7511_PACKET_ENABLE_SPARE2 BIT(1) #define ADV7511_PACKET_ENABLE_SPARE1 BIT(0) +#define ADV7535_REG_POWER2_HPD_OVERRIDE BIT(6) #define ADV7511_REG_POWER2_HPD_SRC_MASK 0xc0 #define ADV7511_REG_POWER2_HPD_SRC_BOTH 0x00 #define ADV7511_REG_POWER2_HPD_SRC_HPD 0x40 diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c index a0d392c338da5241744835e0ed3d89ba14440e8b..c6f059be4b8976a38dfb169df9e652da1bbc5c81 100644 --- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c +++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c @@ -351,11 +351,17 @@ static void __adv7511_power_on(struct adv7511 *adv7511) * from standby or are enabled. When the HPD goes low the adv7511 is * reset and the outputs are disabled which might cause the monitor to * go to standby again. To avoid this we ignore the HPD pin for the - * first few seconds after enabling the output. + * first few seconds after enabling the output. On the other hand + * adv7535 require to enable HPD Override bit for proper HPD. */ - regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2, - ADV7511_REG_POWER2_HPD_SRC_MASK, - ADV7511_REG_POWER2_HPD_SRC_NONE); + if (adv7511->type == ADV7535) + regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2, + ADV7535_REG_POWER2_HPD_OVERRIDE, + ADV7535_REG_POWER2_HPD_OVERRIDE); + else + regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2, + ADV7511_REG_POWER2_HPD_SRC_MASK, + ADV7511_REG_POWER2_HPD_SRC_NONE); } static void adv7511_power_on(struct adv7511 *adv7511) @@ -375,6 +381,10 @@ static void adv7511_power_on(struct adv7511 *adv7511) static void __adv7511_power_off(struct adv7511 *adv7511) { /* TODO: setup additional power down modes */ + if (adv7511->type == ADV7535) + regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2, + ADV7535_REG_POWER2_HPD_OVERRIDE, 0); + regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER, ADV7511_POWER_POWER_DOWN, ADV7511_POWER_POWER_DOWN); @@ -672,9 +682,14 @@ adv7511_detect(struct adv7511 *adv7511, struct drm_connector *connector) status = connector_status_disconnected; } else { /* Renable HPD sensing */ - regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2, - ADV7511_REG_POWER2_HPD_SRC_MASK, - ADV7511_REG_POWER2_HPD_SRC_BOTH); + if (adv7511->type == ADV7535) + regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2, + ADV7535_REG_POWER2_HPD_OVERRIDE, + ADV7535_REG_POWER2_HPD_OVERRIDE); + else + regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2, + ADV7511_REG_POWER2_HPD_SRC_MASK, + ADV7511_REG_POWER2_HPD_SRC_BOTH); } adv7511->status = status; diff --git a/drivers/gpu/drm/bridge/cdns-dsi.c b/drivers/gpu/drm/bridge/cdns-dsi.c index b31281f76117c02335393fc9b447a6e166b2570b..0ced08d81d7a262dc3c36af5b1f81ca460be2864 100644 --- a/drivers/gpu/drm/bridge/cdns-dsi.c +++ b/drivers/gpu/drm/bridge/cdns-dsi.c @@ -1286,6 +1286,7 @@ static const struct of_device_id cdns_dsi_of_match[] = { { .compatible = "cdns,dsi" }, { }, }; +MODULE_DEVICE_TABLE(of, cdns_dsi_of_match); static struct platform_driver cdns_dsi_platform_driver = { .probe = cdns_dsi_drm_probe, diff --git a/drivers/gpu/drm/bridge/nwl-dsi.c b/drivers/gpu/drm/bridge/nwl-dsi.c index 6cac2e58cd15fb15e51b7026aca23ac9200c8421..b68d335981588e42d884959a2e5fc4c1d0b1af10 100644 --- a/drivers/gpu/drm/bridge/nwl-dsi.c +++ b/drivers/gpu/drm/bridge/nwl-dsi.c @@ -1188,6 +1188,7 @@ static int nwl_dsi_probe(struct platform_device *pdev) ret = nwl_dsi_select_input(dsi); if (ret < 0) { + pm_runtime_disable(dev); mipi_dsi_host_unregister(&dsi->dsi_host); return ret; } diff --git a/drivers/gpu/drm/bridge/sil-sii8620.c b/drivers/gpu/drm/bridge/sil-sii8620.c index 843265d7f1b123b3dee5c07cc4830314b8168c4e..ec7745c31da07aaccdbde0a90cd220b6dd441e19 100644 --- a/drivers/gpu/drm/bridge/sil-sii8620.c +++ b/drivers/gpu/drm/bridge/sil-sii8620.c @@ -2120,7 +2120,7 @@ static void sii8620_init_rcp_input_dev(struct sii8620 *ctx) if (ret) { dev_err(ctx->dev, "Failed to register RC device\n"); ctx->error = ret; - rc_free_device(ctx->rc_dev); + rc_free_device(rc_dev); return; } ctx->rc_dev = rc_dev; diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c index 29c0eb4bd7546d3452533607ffafff65c618cebe..b10228b9e3a93bf4144ce6d6d3d818907edf9b92 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c @@ -2566,8 +2566,9 @@ static u32 *dw_hdmi_bridge_atomic_get_output_bus_fmts(struct drm_bridge *bridge, if (!output_fmts) return NULL; - /* If dw-hdmi is the only bridge, avoid negociating with ourselves */ - if (list_is_singular(&bridge->encoder->bridge_chain)) { + /* If dw-hdmi is the first or only bridge, avoid negociating with ourselves */ + if (list_is_singular(&bridge->encoder->bridge_chain) || + list_is_first(&bridge->chain_node, &bridge->encoder->bridge_chain)) { *num_output_fmts = 1; output_fmts[0] = MEDIA_BUS_FMT_FIXED; diff --git a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c index 6b268f9445b36aea903d13fdd8c76822d50c66dc..376fa6eb46f6978afff4fd4d4b63c37c4818a8ad 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c @@ -1172,6 +1172,7 @@ __dw_mipi_dsi_probe(struct platform_device *pdev, ret = mipi_dsi_host_register(&dsi->dsi_host); if (ret) { dev_err(dev, "Failed to register MIPI host: %d\n", ret); + pm_runtime_disable(dev); dw_mipi_dsi_debugfs_remove(dsi); return ERR_PTR(ret); } diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c index 717c4e7271b0422692c485d8c8cb426de0bc006f..5163433ac561b884e2dfd60f1aa4509c9399b5c3 100644 --- a/drivers/gpu/drm/drm_connector.c +++ b/drivers/gpu/drm/drm_connector.c @@ -2155,6 +2155,9 @@ EXPORT_SYMBOL(drm_connector_attach_max_bpc_property); void drm_connector_set_vrr_capable_property( struct drm_connector *connector, bool capable) { + if (!connector->vrr_capable_property) + return; + drm_object_property_set_value(&connector->base, connector->vrr_capable_property, capable); diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index add317bd8d55c4c423c239aefa4d66f472235f33..862e173d34315e7d3ccadaa2b7d604ce07d34fc2 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -4806,7 +4806,8 @@ bool drm_detect_monitor_audio(struct edid *edid) if (!edid_ext) goto end; - has_audio = ((edid_ext[3] & EDID_BASIC_AUDIO) != 0); + has_audio = (edid_ext[0] == CEA_EXT && + (edid_ext[3] & EDID_BASIC_AUDIO) != 0); if (has_audio) { DRM_DEBUG_KMS("Monitor has basic audio support\n"); @@ -4959,16 +4960,8 @@ static void drm_parse_hdmi_deep_color_info(struct drm_connector *connector, connector->name, dc_bpc); info->bpc = dc_bpc; - /* - * Deep color support mandates RGB444 support for all video - * modes and forbids YCRCB422 support for all video modes per - * HDMI 1.3 spec. - */ - info->color_formats = DRM_COLOR_FORMAT_RGB444; - /* YCRCB444 is optional according to spec. */ if (hdmi[6] & DRM_EDID_HDMI_DC_Y444) { - info->color_formats |= DRM_COLOR_FORMAT_YCRCB444; DRM_DEBUG("%s: HDMI sink does YCRCB444 in deep color.\n", connector->name); } @@ -5132,6 +5125,7 @@ u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edi if (!(edid->input & DRM_EDID_INPUT_DIGITAL)) return quirks; + info->color_formats |= DRM_COLOR_FORMAT_RGB444; drm_parse_cea_ext(connector, edid); /* @@ -5180,7 +5174,6 @@ u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edi DRM_DEBUG("%s: Assigning EDID-1.4 digital sink color depth as %d bpc.\n", connector->name, info->bpc); - info->color_formats |= DRM_COLOR_FORMAT_RGB444; if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB444) info->color_formats |= DRM_COLOR_FORMAT_YCRCB444; if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB422) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c index 5754bccff4d15030dd1a81d10a222e0b29c1fc2b..92dd65befbcb833244edfe0b88f3a1082e54ae68 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c @@ -423,7 +423,7 @@ vm_access(struct vm_area_struct *area, unsigned long addr, return -EACCES; addr -= area->vm_start; - if (addr >= obj->base.size) + if (range_overflows_t(u64, addr, len, obj->base.size)) return -EINVAL; /* As this is primarily for debugging, let's focus on simplicity */ diff --git a/drivers/gpu/drm/i915/intel_pch.c b/drivers/gpu/drm/i915/intel_pch.c index 6c97192e9ca87ea923d522ca0fcc75c40f7f0a79..a0d5e95234fd0a340405e3f51d39fda4e22271e6 100644 --- a/drivers/gpu/drm/i915/intel_pch.c +++ b/drivers/gpu/drm/i915/intel_pch.c @@ -110,6 +110,7 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id) /* Comet Lake V PCH is based on KBP, which is SPT compatible */ return PCH_SPT; case INTEL_PCH_ICP_DEVICE_ID_TYPE: + case INTEL_PCH_ICP2_DEVICE_ID_TYPE: drm_dbg_kms(&dev_priv->drm, "Found Ice Lake PCH\n"); drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv)); return PCH_ICP; @@ -124,7 +125,6 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id) !IS_ROCKETLAKE(dev_priv)); return PCH_TGP; case INTEL_PCH_JSP_DEVICE_ID_TYPE: - case INTEL_PCH_JSP2_DEVICE_ID_TYPE: drm_dbg_kms(&dev_priv->drm, "Found Jasper Lake PCH\n"); drm_WARN_ON(&dev_priv->drm, !IS_ELKHARTLAKE(dev_priv)); return PCH_JSP; diff --git a/drivers/gpu/drm/i915/intel_pch.h b/drivers/gpu/drm/i915/intel_pch.h index 06d2cd50af0b9d4d8b28ee48a9d1fa23cdfd476f..49325022b3c9628b605723735043556e35296cfc 100644 --- a/drivers/gpu/drm/i915/intel_pch.h +++ b/drivers/gpu/drm/i915/intel_pch.h @@ -48,11 +48,11 @@ enum intel_pch { #define INTEL_PCH_CMP2_DEVICE_ID_TYPE 0x0680 #define INTEL_PCH_CMP_V_DEVICE_ID_TYPE 0xA380 #define INTEL_PCH_ICP_DEVICE_ID_TYPE 0x3480 +#define INTEL_PCH_ICP2_DEVICE_ID_TYPE 0x3880 #define INTEL_PCH_MCC_DEVICE_ID_TYPE 0x4B00 #define INTEL_PCH_TGP_DEVICE_ID_TYPE 0xA080 #define INTEL_PCH_TGP2_DEVICE_ID_TYPE 0x4380 #define INTEL_PCH_JSP_DEVICE_ID_TYPE 0x4D80 -#define INTEL_PCH_JSP2_DEVICE_ID_TYPE 0x3880 #define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100 #define INTEL_PCH_P3X_DEVICE_ID_TYPE 0x7000 #define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */ diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index e51ca7ca0a2a70bfca211ac809b8f4c8d7a05d1e..472aaea75ef84303ec901d4b81df104c25c15e3d 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -3996,6 +3996,17 @@ static int intel_compute_sagv_mask(struct intel_atomic_state *state) return ret; } + if (intel_can_enable_sagv(dev_priv, new_bw_state) != + intel_can_enable_sagv(dev_priv, old_bw_state)) { + ret = intel_atomic_serialize_global_state(&new_bw_state->base); + if (ret) + return ret; + } else if (new_bw_state->pipe_sagv_reject != old_bw_state->pipe_sagv_reject) { + ret = intel_atomic_lock_global_state(&new_bw_state->base); + if (ret) + return ret; + } + for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { struct skl_pipe_wm *pipe_wm = &new_crtc_state->wm.skl.optimal; @@ -4010,17 +4021,6 @@ static int intel_compute_sagv_mask(struct intel_atomic_state *state) intel_can_enable_sagv(dev_priv, new_bw_state); } - if (intel_can_enable_sagv(dev_priv, new_bw_state) != - intel_can_enable_sagv(dev_priv, old_bw_state)) { - ret = intel_atomic_serialize_global_state(&new_bw_state->base); - if (ret) - return ret; - } else if (new_bw_state->pipe_sagv_reject != old_bw_state->pipe_sagv_reject) { - ret = intel_atomic_lock_global_state(&new_bw_state->base); - if (ret) - return ret; - } - return 0; } diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c index 2eb8df4697dfac6c0bd1e6a2098a62a296e0498c..605ac8825a5911278e4e8c9e002aa1e087d7f728 100644 --- a/drivers/gpu/drm/imx/parallel-display.c +++ b/drivers/gpu/drm/imx/parallel-display.c @@ -212,14 +212,6 @@ static int imx_pd_bridge_atomic_check(struct drm_bridge *bridge, if (!imx_pd_format_supported(bus_fmt)) return -EINVAL; - if (bus_flags & - ~(DRM_BUS_FLAG_DE_LOW | DRM_BUS_FLAG_DE_HIGH | - DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE | - DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE)) { - dev_warn(imxpd->dev, "invalid bus_flags (%x)\n", bus_flags); - return -EINVAL; - } - bridge_state->output_bus_cfg.flags = bus_flags; bridge_state->input_bus_cfg.flags = bus_flags; imx_crtc_state->bus_flags = bus_flags; diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c index 2753067c08e68bfe22e483fae825fbfed9af5934..728fea50941243f15c377f4bc40fd6ca99bf3952 100644 --- a/drivers/gpu/drm/meson/meson_drv.c +++ b/drivers/gpu/drm/meson/meson_drv.c @@ -396,10 +396,8 @@ static void meson_drv_unbind(struct device *dev) drm_irq_uninstall(drm); drm_dev_put(drm); - if (priv->afbcd.ops) { - priv->afbcd.ops->reset(priv); - meson_rdma_free(priv); - } + if (priv->afbcd.ops) + priv->afbcd.ops->exit(priv); } static const struct component_master_ops meson_drv_master_ops = { diff --git a/drivers/gpu/drm/meson/meson_osd_afbcd.c b/drivers/gpu/drm/meson/meson_osd_afbcd.c index ffc6b584dbf85d82b778b37ebc3b0a069063ea08..0cdbe899402f84571e508f09f7c4a978d9d77d31 100644 --- a/drivers/gpu/drm/meson/meson_osd_afbcd.c +++ b/drivers/gpu/drm/meson/meson_osd_afbcd.c @@ -79,11 +79,6 @@ static bool meson_gxm_afbcd_supported_fmt(u64 modifier, uint32_t format) return meson_gxm_afbcd_pixel_fmt(modifier, format) >= 0; } -static int meson_gxm_afbcd_init(struct meson_drm *priv) -{ - return 0; -} - static int meson_gxm_afbcd_reset(struct meson_drm *priv) { writel_relaxed(VIU_SW_RESET_OSD1_AFBCD, @@ -93,6 +88,16 @@ static int meson_gxm_afbcd_reset(struct meson_drm *priv) return 0; } +static int meson_gxm_afbcd_init(struct meson_drm *priv) +{ + return 0; +} + +static void meson_gxm_afbcd_exit(struct meson_drm *priv) +{ + meson_gxm_afbcd_reset(priv); +} + static int meson_gxm_afbcd_enable(struct meson_drm *priv) { writel_relaxed(FIELD_PREP(OSD1_AFBCD_ID_FIFO_THRD, 0x40) | @@ -172,6 +177,7 @@ static int meson_gxm_afbcd_setup(struct meson_drm *priv) struct meson_afbcd_ops meson_afbcd_gxm_ops = { .init = meson_gxm_afbcd_init, + .exit = meson_gxm_afbcd_exit, .reset = meson_gxm_afbcd_reset, .enable = meson_gxm_afbcd_enable, .disable = meson_gxm_afbcd_disable, @@ -269,6 +275,18 @@ static bool meson_g12a_afbcd_supported_fmt(u64 modifier, uint32_t format) return meson_g12a_afbcd_pixel_fmt(modifier, format) >= 0; } +static int meson_g12a_afbcd_reset(struct meson_drm *priv) +{ + meson_rdma_reset(priv); + + meson_rdma_writel_sync(priv, VIU_SW_RESET_G12A_AFBC_ARB | + VIU_SW_RESET_G12A_OSD1_AFBCD, + VIU_SW_RESET); + meson_rdma_writel_sync(priv, 0, VIU_SW_RESET); + + return 0; +} + static int meson_g12a_afbcd_init(struct meson_drm *priv) { int ret; @@ -286,16 +304,10 @@ static int meson_g12a_afbcd_init(struct meson_drm *priv) return 0; } -static int meson_g12a_afbcd_reset(struct meson_drm *priv) +static void meson_g12a_afbcd_exit(struct meson_drm *priv) { - meson_rdma_reset(priv); - - meson_rdma_writel_sync(priv, VIU_SW_RESET_G12A_AFBC_ARB | - VIU_SW_RESET_G12A_OSD1_AFBCD, - VIU_SW_RESET); - meson_rdma_writel_sync(priv, 0, VIU_SW_RESET); - - return 0; + meson_g12a_afbcd_reset(priv); + meson_rdma_free(priv); } static int meson_g12a_afbcd_enable(struct meson_drm *priv) @@ -380,6 +392,7 @@ static int meson_g12a_afbcd_setup(struct meson_drm *priv) struct meson_afbcd_ops meson_afbcd_g12a_ops = { .init = meson_g12a_afbcd_init, + .exit = meson_g12a_afbcd_exit, .reset = meson_g12a_afbcd_reset, .enable = meson_g12a_afbcd_enable, .disable = meson_g12a_afbcd_disable, diff --git a/drivers/gpu/drm/meson/meson_osd_afbcd.h b/drivers/gpu/drm/meson/meson_osd_afbcd.h index 5e5523304f42f95c305a850bcde238fc57988ca5..e77ddeb6416f3c2998c45131e14dd9f3fb503f6d 100644 --- a/drivers/gpu/drm/meson/meson_osd_afbcd.h +++ b/drivers/gpu/drm/meson/meson_osd_afbcd.h @@ -14,6 +14,7 @@ struct meson_afbcd_ops { int (*init)(struct meson_drm *priv); + void (*exit)(struct meson_drm *priv); int (*reset)(struct meson_drm *priv); int (*enable)(struct meson_drm *priv); int (*disable)(struct meson_drm *priv); diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c index 509968c0d16bc7ad8874b33baf362078c3e29db9..2a13e297e16df5a529e9a52ec6cbe283c4285ea7 100644 --- a/drivers/gpu/drm/mgag200/mgag200_mode.c +++ b/drivers/gpu/drm/mgag200/mgag200_mode.c @@ -1243,7 +1243,10 @@ static void mgag200_set_format_regs(struct mga_device *mdev, WREG_GFX(3, 0x00); WREG_GFX(4, 0x00); WREG_GFX(5, 0x40); - WREG_GFX(6, 0x05); + /* GCTL6 should be 0x05, but we configure memmapsl to 0xb8000 (text mode), + * so that it doesn't hang when running kexec/kdump on G200_SE rev42. + */ + WREG_GFX(6, 0x0d); WREG_GFX(7, 0x0f); WREG_GFX(8, 0x0f); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c index f7f5c258b5537b38946e29150b99c4710abcd28c..a0274fcfe9c9d58e92f2c8118e6ad3bfae004efb 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c @@ -1113,7 +1113,7 @@ static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc) } - if (dpu_enc->disp_info.intf_type == DRM_MODE_CONNECTOR_DisplayPort && + if (dpu_enc->disp_info.intf_type == DRM_MODE_ENCODER_TMDS && dpu_enc->cur_master->hw_mdptop && dpu_enc->cur_master->hw_mdptop->ops.intf_audio_select) dpu_enc->cur_master->hw_mdptop->ops.intf_audio_select( diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c index 9b2b5044e8e05490003f534a6d34f10204304d22..74a13ccad34c0bb942983385ec46db5b5554241f 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c @@ -34,6 +34,14 @@ int dpu_rm_destroy(struct dpu_rm *rm) { int i; + for (i = 0; i < ARRAY_SIZE(rm->dspp_blks); i++) { + struct dpu_hw_dspp *hw; + + if (rm->dspp_blks[i]) { + hw = to_dpu_hw_dspp(rm->dspp_blks[i]); + dpu_hw_dspp_destroy(hw); + } + } for (i = 0; i < ARRAY_SIZE(rm->pingpong_blks); i++) { struct dpu_hw_pingpong *hw; diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c index 66f2ea3d42fc2074f430d598a6887a79b6105a7a..6cd6934c8c9f10760a2b89b2393a43ba721d7cf2 100644 --- a/drivers/gpu/drm/msm/dp/dp_display.c +++ b/drivers/gpu/drm/msm/dp/dp_display.c @@ -1336,6 +1336,7 @@ int msm_dp_modeset_init(struct msm_dp *dp_display, struct drm_device *dev, struct drm_encoder *encoder) { struct msm_drm_private *priv; + struct dp_display_private *dp_priv; int ret; if (WARN_ON(!encoder) || WARN_ON(!dp_display) || WARN_ON(!dev)) @@ -1344,6 +1345,8 @@ int msm_dp_modeset_init(struct msm_dp *dp_display, struct drm_device *dev, priv = dev->dev_private; dp_display->drm_dev = dev; + dp_priv = container_of(dp_display, struct dp_display_private, dp_display); + ret = dp_display_request_irq(dp_display); if (ret) { DRM_ERROR("request_irq failed, ret=%d\n", ret); @@ -1361,6 +1364,8 @@ int msm_dp_modeset_init(struct msm_dp *dp_display, struct drm_device *dev, return ret; } + dp_priv->panel->connector = dp_display->connector; + priv->connectors[priv->num_connectors++] = dp_display->connector; return 0; } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/hsfw.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/hsfw.c index 667fa016496eeb11e61b774f8db98bb9565829b3..a6ea89a5d51ab90806de88908bb7e7d961412c3b 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/hsfw.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/hsfw.c @@ -142,11 +142,12 @@ nvkm_acr_hsfw_load_bl(struct nvkm_acr *acr, const char *name, int ver, hsfw->imem_size = desc->code_size; hsfw->imem_tag = desc->start_tag; - hsfw->imem = kmalloc(desc->code_size, GFP_KERNEL); - memcpy(hsfw->imem, data + desc->code_off, desc->code_size); - + hsfw->imem = kmemdup(data + desc->code_off, desc->code_size, GFP_KERNEL); nvkm_firmware_put(fw); - return 0; + if (!hsfw->imem) + return -ENOMEM; + else + return 0; } int diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c index 7ffd2a04ab23ad4758d0952be357e89d6e32c420..959dcbd8a29c175b3b697e23b784779a365f04ef 100644 --- a/drivers/gpu/drm/panel/panel-simple.c +++ b/drivers/gpu/drm/panel/panel-simple.c @@ -2132,7 +2132,7 @@ static const struct display_timing innolux_g070y2_l01_timing = { static const struct panel_desc innolux_g070y2_l01 = { .timings = &innolux_g070y2_l01_timing, .num_timings = 1, - .bpc = 6, + .bpc = 8, .size = { .width = 152, .height = 91, diff --git a/drivers/gpu/drm/panfrost/panfrost_gpu.c b/drivers/gpu/drm/panfrost/panfrost_gpu.c index 2aae636f1cf5cbe10f0f51724a1062c19c6ab29b..107ad2d764ec0de1fef6911ec691f9d457eb5f1b 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gpu.c +++ b/drivers/gpu/drm/panfrost/panfrost_gpu.c @@ -359,8 +359,11 @@ int panfrost_gpu_init(struct panfrost_device *pfdev) panfrost_gpu_init_features(pfdev); - dma_set_mask_and_coherent(pfdev->dev, + err = dma_set_mask_and_coherent(pfdev->dev, DMA_BIT_MASK(FIELD_GET(0xff00, pfdev->features.mmu_features))); + if (err) + return err; + dma_set_max_seg_size(pfdev->dev, UINT_MAX); irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "gpu"); diff --git a/drivers/gpu/drm/pl111/pl111_drv.c b/drivers/gpu/drm/pl111/pl111_drv.c index 46b0d1c4a16c65547092e7c60a49d3550f2cce5c..d5e8e3a8bff3ec198972f6cc6261b5322de20034 100644 --- a/drivers/gpu/drm/pl111/pl111_drv.c +++ b/drivers/gpu/drm/pl111/pl111_drv.c @@ -324,7 +324,7 @@ static int pl111_amba_probe(struct amba_device *amba_dev, return ret; } -static int pl111_amba_remove(struct amba_device *amba_dev) +static void pl111_amba_remove(struct amba_device *amba_dev) { struct device *dev = &amba_dev->dev; struct drm_device *drm = amba_get_drvdata(amba_dev); @@ -335,8 +335,6 @@ static int pl111_amba_remove(struct amba_device *amba_dev) drm_panel_bridge_remove(priv->bridge); drm_dev_put(drm); of_reserved_mem_device_release(dev); - - return 0; } /* diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c index 1f4e3396d097cd9d6c01408754053f41359350e4..a42ea2b76985264eba78b21aa0f39418eefab1de 100644 --- a/drivers/gpu/drm/radeon/radeon_uvd.c +++ b/drivers/gpu/drm/radeon/radeon_uvd.c @@ -275,6 +275,11 @@ int radeon_uvd_suspend(struct radeon_device *rdev) } } +#if IS_ENABLED(CONFIG_SW64) + /* Finish executing delayed work */ + flush_delayed_work(&rdev->uvd.idle_work); +#endif + return 0; } diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.h b/drivers/gpu/drm/sun4i/sun8i_mixer.h index 7576b523fdbb1409c720ca5874425a71be9cc411..b0178c045267c6f6e584d46a076e5780c8144bb8 100644 --- a/drivers/gpu/drm/sun4i/sun8i_mixer.h +++ b/drivers/gpu/drm/sun4i/sun8i_mixer.h @@ -113,10 +113,10 @@ /* format 13 is semi-planar YUV411 VUVU */ #define SUN8I_MIXER_FBFMT_YUV411 14 /* format 15 doesn't exist */ -/* format 16 is P010 YVU */ -#define SUN8I_MIXER_FBFMT_P010_YUV 17 -/* format 18 is P210 YVU */ -#define SUN8I_MIXER_FBFMT_P210_YUV 19 +#define SUN8I_MIXER_FBFMT_P010_YUV 16 +/* format 17 is P010 YVU */ +#define SUN8I_MIXER_FBFMT_P210_YUV 18 +/* format 19 is P210 YVU */ /* format 20 is packed YVU444 10-bit */ /* format 21 is packed YUV444 10-bit */ diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c index f46d377f0c3046309c2bf1d12415f593f6aed467..de1333dc0d8670aa30dc9867d5c13a41691ea4a3 100644 --- a/drivers/gpu/drm/tegra/dsi.c +++ b/drivers/gpu/drm/tegra/dsi.c @@ -1538,8 +1538,10 @@ static int tegra_dsi_ganged_probe(struct tegra_dsi *dsi) dsi->slave = platform_get_drvdata(gangster); of_node_put(np); - if (!dsi->slave) + if (!dsi->slave) { + put_device(&gangster->dev); return -EPROBE_DEFER; + } dsi->slave->master = dsi; } diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c index a2c09dca4eef945e76a2bf68da9f0db52bb39263..8659558b518d6afc3100c19631363c42e6fe1b43 100644 --- a/drivers/gpu/host1x/dev.c +++ b/drivers/gpu/host1x/dev.c @@ -520,6 +520,7 @@ static int host1x_remove(struct platform_device *pdev) host1x_syncpt_deinit(host); reset_control_assert(host->rst); clk_disable_unprepare(host->clk); + host1x_channel_list_free(&host->channel_list); host1x_iommu_exit(host); return 0; diff --git a/drivers/greybus/svc.c b/drivers/greybus/svc.c index ce7740ef449babaf0f7bfd1afd2021cb6c7a7aec..51d0875a34800aec4ea11347060e4d1956443aba 100644 --- a/drivers/greybus/svc.c +++ b/drivers/greybus/svc.c @@ -866,8 +866,14 @@ static int gb_svc_hello(struct gb_operation *op) gb_svc_debugfs_init(svc); - return gb_svc_queue_deferred_request(op); + ret = gb_svc_queue_deferred_request(op); + if (ret) + goto err_remove_debugfs; + + return 0; +err_remove_debugfs: + gb_svc_debugfs_exit(svc); err_unregister_device: gb_svc_watchdog_destroy(svc); device_del(&svc->dev); diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c index 982737827b871278239c2372984e8a0e29ca0cd0..f4e2e6937758952ed499e6adbaf89ed147221945 100644 --- a/drivers/hid/hid-debug.c +++ b/drivers/hid/hid-debug.c @@ -823,7 +823,9 @@ static const char *keys[KEY_MAX + 1] = { [KEY_F22] = "F22", [KEY_F23] = "F23", [KEY_F24] = "F24", [KEY_PLAYCD] = "PlayCD", [KEY_PAUSECD] = "PauseCD", [KEY_PROG3] = "Prog3", - [KEY_PROG4] = "Prog4", [KEY_SUSPEND] = "Suspend", + [KEY_PROG4] = "Prog4", + [KEY_ALL_APPLICATIONS] = "AllApplications", + [KEY_SUSPEND] = "Suspend", [KEY_CLOSE] = "Close", [KEY_PLAY] = "Play", [KEY_FASTFORWARD] = "FastForward", [KEY_BASSBOOST] = "BassBoost", [KEY_PRINT] = "Print", [KEY_HP] = "HP", @@ -930,6 +932,7 @@ static const char *keys[KEY_MAX + 1] = { [KEY_SCREENSAVER] = "ScreenSaver", [KEY_VOICECOMMAND] = "VoiceCommand", [KEY_EMOJI_PICKER] = "EmojiPicker", + [KEY_DICTATE] = "Dictate", [KEY_BRIGHTNESS_MIN] = "BrightnessMin", [KEY_BRIGHTNESS_MAX] = "BrightnessMax", [KEY_BRIGHTNESS_AUTO] = "BrightnessAuto", diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c index eb53855898c8d225905f1bd3de5499c72ef001ef..a17d1dda95703718087f59aa1ed349f9a1af6c4d 100644 --- a/drivers/hid/hid-input.c +++ b/drivers/hid/hid-input.c @@ -956,6 +956,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel case 0x0cd: map_key_clear(KEY_PLAYPAUSE); break; case 0x0cf: map_key_clear(KEY_VOICECOMMAND); break; + case 0x0d8: map_key_clear(KEY_DICTATE); break; case 0x0d9: map_key_clear(KEY_EMOJI_PICKER); break; case 0x0e0: map_abs_clear(ABS_VOLUME); break; @@ -1047,6 +1048,8 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel case 0x29d: map_key_clear(KEY_KBD_LAYOUT_NEXT); break; + case 0x2a2: map_key_clear(KEY_ALL_APPLICATIONS); break; + case 0x2c7: map_key_clear(KEY_KBDINPUTASSIST_PREV); break; case 0x2c8: map_key_clear(KEY_KBDINPUTASSIST_NEXT); break; case 0x2c9: map_key_clear(KEY_KBDINPUTASSIST_PREVGROUP); break; diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c index a311b0a33eba7ff45501c396281daba4919a79b8..587259b3db97c5c547b049ea81ba5017252e4771 100644 --- a/drivers/hid/hid-logitech-dj.c +++ b/drivers/hid/hid-logitech-dj.c @@ -1000,6 +1000,7 @@ static void logi_hidpp_recv_queue_notif(struct hid_device *hdev, workitem.reports_supported |= STD_KEYBOARD; break; case 0x0f: + case 0x11: device_type = "eQUAD Lightspeed 1.2"; logi_hidpp_dev_conn_notif_equad(hdev, hidpp_report, &workitem); workitem.reports_supported |= STD_KEYBOARD; diff --git a/drivers/hid/hid-vivaldi.c b/drivers/hid/hid-vivaldi.c index 576518e704ee6426484ee6aa9687992af7c5608a..d57ec17670379cf75b28dfd8bab713332fa30b4a 100644 --- a/drivers/hid/hid-vivaldi.c +++ b/drivers/hid/hid-vivaldi.c @@ -143,7 +143,7 @@ static void vivaldi_feature_mapping(struct hid_device *hdev, static int vivaldi_input_configured(struct hid_device *hdev, struct hid_input *hidinput) { - return sysfs_create_group(&hdev->dev.kobj, &input_attribute_group); + return devm_device_add_group(&hdev->dev, &input_attribute_group); } static const struct hid_device_id vivaldi_table[] = { diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c index 998aad8a9e60893d5b5b5830245ea311fc6068c4..14811d42a5a918bfdd2ba09c7dedaaf50317917a 100644 --- a/drivers/hid/i2c-hid/i2c-hid-core.c +++ b/drivers/hid/i2c-hid/i2c-hid-core.c @@ -620,6 +620,17 @@ static int i2c_hid_get_raw_report(struct hid_device *hid, if (report_type == HID_OUTPUT_REPORT) return -EINVAL; + /* + * In case of unnumbered reports the response from the device will + * not have the report ID that the upper layers expect, so we need + * to stash it the buffer ourselves and adjust the data size. + */ + if (!report_number) { + buf[0] = 0; + buf++; + count--; + } + /* +2 bytes to include the size of the reply in the query buffer */ ask_count = min(count + 2, (size_t)ihid->bufsize); @@ -641,6 +652,9 @@ static int i2c_hid_get_raw_report(struct hid_device *hid, count = min(count, ret_count - 2); memcpy(buf, ihid->rawbuf + 2, count); + if (!report_number) + count++; + return count; } @@ -657,17 +671,19 @@ static int i2c_hid_output_raw_report(struct hid_device *hid, __u8 *buf, mutex_lock(&ihid->reset_lock); - if (report_id) { - buf++; - count--; - } - + /* + * Note that both numbered and unnumbered reports passed here + * are supposed to have report ID stored in the 1st byte of the + * buffer, so we strip it off unconditionally before passing payload + * to i2c_hid_set_or_send_report which takes care of encoding + * everything properly. + */ ret = i2c_hid_set_or_send_report(client, report_type == HID_FEATURE_REPORT ? 0x03 : 0x02, - report_id, buf, count, use_data); + report_id, buf + 1, count - 1, use_data); - if (report_id && ret >= 0) - ret++; /* add report_id to the number of transfered bytes */ + if (ret >= 0) + ret++; /* add report_id to the number of transferred bytes */ mutex_unlock(&ihid->reset_lock); diff --git a/drivers/hid/intel-ish-hid/ishtp-fw-loader.c b/drivers/hid/intel-ish-hid/ishtp-fw-loader.c index 6cf59fd26ad784356923115c3c28054a344990b0..b6d6d119035ca0405a46c3b94d64f35ea5159be0 100644 --- a/drivers/hid/intel-ish-hid/ishtp-fw-loader.c +++ b/drivers/hid/intel-ish-hid/ishtp-fw-loader.c @@ -656,21 +656,12 @@ static int ish_fw_xfer_direct_dma(struct ishtp_cl_data *client_data, */ payload_max_size &= ~(L1_CACHE_BYTES - 1); - dma_buf = kmalloc(payload_max_size, GFP_KERNEL | GFP_DMA32); + dma_buf = dma_alloc_coherent(devc, payload_max_size, &dma_buf_phy, GFP_KERNEL); if (!dma_buf) { client_data->flag_retry = true; return -ENOMEM; } - dma_buf_phy = dma_map_single(devc, dma_buf, payload_max_size, - DMA_TO_DEVICE); - if (dma_mapping_error(devc, dma_buf_phy)) { - dev_err(cl_data_to_dev(client_data), "DMA map failed\n"); - client_data->flag_retry = true; - rv = -ENOMEM; - goto end_err_dma_buf_release; - } - ldr_xfer_dma_frag.fragment.hdr.command = LOADER_CMD_XFER_FRAGMENT; ldr_xfer_dma_frag.fragment.xfer_mode = LOADER_XFER_MODE_DIRECT_DMA; ldr_xfer_dma_frag.ddr_phys_addr = (u64)dma_buf_phy; @@ -690,14 +681,7 @@ static int ish_fw_xfer_direct_dma(struct ishtp_cl_data *client_data, ldr_xfer_dma_frag.fragment.size = fragment_size; memcpy(dma_buf, &fw->data[fragment_offset], fragment_size); - dma_sync_single_for_device(devc, dma_buf_phy, - payload_max_size, - DMA_TO_DEVICE); - - /* - * Flush cache here because the dma_sync_single_for_device() - * does not do for x86. - */ + /* Flush cache to be sure the data is in main memory. */ clflush_cache_range(dma_buf, payload_max_size); dev_dbg(cl_data_to_dev(client_data), @@ -720,15 +704,8 @@ static int ish_fw_xfer_direct_dma(struct ishtp_cl_data *client_data, fragment_offset += fragment_size; } - dma_unmap_single(devc, dma_buf_phy, payload_max_size, DMA_TO_DEVICE); - kfree(dma_buf); - return 0; - end_err_resp_buf_release: - /* Free ISH buffer if not done already, in error case */ - dma_unmap_single(devc, dma_buf_phy, payload_max_size, DMA_TO_DEVICE); -end_err_dma_buf_release: - kfree(dma_buf); + dma_free_coherent(devc, payload_max_size, dma_buf, dma_buf_phy); return rv; } diff --git a/drivers/hv/Kconfig b/drivers/hv/Kconfig index 79e5356a737a2ad6b9fb19a498613eba20aa9761..210e532ac277fe3057ffdd75e6ab89b08b22adb9 100644 --- a/drivers/hv/Kconfig +++ b/drivers/hv/Kconfig @@ -17,6 +17,7 @@ config HYPERV_TIMER config HYPERV_UTILS tristate "Microsoft Hyper-V Utilities driver" depends on HYPERV && CONNECTOR && NLS + depends on PTP_1588_CLOCK_OPTIONAL help Select this option to enable the Hyper-V Utilities. diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c index eb56e09ae15f3999478b0e75f601836c92ad175a..6a716996a6250b30ac458a8a95714b44c9f04d8e 100644 --- a/drivers/hv/hv_balloon.c +++ b/drivers/hv/hv_balloon.c @@ -1558,7 +1558,7 @@ static void balloon_onchannelcallback(void *context) break; default: - pr_warn("Unhandled message: type: %d\n", dm_hdr->type); + pr_warn_ratelimited("Unhandled message: type: %d\n", dm_hdr->type); } } diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig index 0c2b032ee6176c755e319139dd9c612154eec9d8..45a1a5969d01fd1021c7310181fb3a73ca0493b2 100644 --- a/drivers/hwmon/Kconfig +++ b/drivers/hwmon/Kconfig @@ -51,6 +51,16 @@ config SENSORS_AB8500 This driver can also be built as a module. If so, the module will be called abx500-temp. +config SENSORS_PVT + tristate "SW64 PVT monitor" + depends on SW64 + help + If you say yes here you get support for the voltage + sensor inside your CPU. + + This driver can also be built as a module. If so, the module + will be called PVT. + config SENSORS_ABITUGURU tristate "Abit uGuru (rev 1 & 2)" depends on X86 && DMI diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile index 9db2903b61e5b7a45eb9a197b2677166e281f0b4..c22a5316bd91afde51505898505ed5e5ce99805e 100644 --- a/drivers/hwmon/Makefile +++ b/drivers/hwmon/Makefile @@ -193,6 +193,7 @@ obj-$(CONFIG_SENSORS_W83L786NG) += w83l786ng.o obj-$(CONFIG_SENSORS_WM831X) += wm831x-hwmon.o obj-$(CONFIG_SENSORS_WM8350) += wm8350-hwmon.o obj-$(CONFIG_SENSORS_XGENE) += xgene-hwmon.o +obj-$(CONFIG_SENSORS_PVT) += sw64_pvt.o obj-$(CONFIG_SENSORS_OCC) += occ/ obj-$(CONFIG_PMBUS) += pmbus/ diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c index e5a83f74926772ac138e3479945612669c5ec2bd..d649fea8299948a183b582669df670c728877927 100644 --- a/drivers/hwmon/hwmon.c +++ b/drivers/hwmon/hwmon.c @@ -178,12 +178,14 @@ static int hwmon_thermal_add_sensor(struct device *dev, int index) tzd = devm_thermal_zone_of_sensor_register(dev, index, tdata, &hwmon_thermal_ops); - /* - * If CONFIG_THERMAL_OF is disabled, this returns -ENODEV, - * so ignore that error but forward any other error. - */ - if (IS_ERR(tzd) && (PTR_ERR(tzd) != -ENODEV)) - return PTR_ERR(tzd); + if (IS_ERR(tzd)) { + if (PTR_ERR(tzd) != -ENODEV) + return PTR_ERR(tzd); + dev_info(dev, "temp%d_input not attached to any thermal zone\n", + index + 1); + devm_kfree(dev, tdata); + return 0; + } err = devm_add_action(dev, hwmon_thermal_remove_sensor, &tdata->node); if (err) diff --git a/drivers/hwmon/pmbus/pmbus.h b/drivers/hwmon/pmbus/pmbus.h index 88a5df2633fb254cbbf6474eae59899eb7ec0081..de27837e852717a42e98b902295bf2c8c79b628f 100644 --- a/drivers/hwmon/pmbus/pmbus.h +++ b/drivers/hwmon/pmbus/pmbus.h @@ -319,6 +319,7 @@ enum pmbus_fan_mode { percent = 0, rpm }; /* * STATUS_VOUT, STATUS_INPUT */ +#define PB_VOLTAGE_VIN_OFF BIT(3) #define PB_VOLTAGE_UV_FAULT BIT(4) #define PB_VOLTAGE_UV_WARNING BIT(5) #define PB_VOLTAGE_OV_WARNING BIT(6) diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c index b0e2820a2d578f62ca0db7ac565ca5404065c790..117e3ce9c76ad8a775b4b6f75c0285f98cf1133b 100644 --- a/drivers/hwmon/pmbus/pmbus_core.c +++ b/drivers/hwmon/pmbus/pmbus_core.c @@ -898,6 +898,11 @@ static int pmbus_get_boolean(struct i2c_client *client, struct pmbus_boolean *b, pmbus_update_sensor_data(client, s2); regval = status & mask; + if (regval) { + ret = pmbus_write_byte_data(client, page, reg, regval); + if (ret) + goto unlock; + } if (s1 && s2) { s64 v1, v2; @@ -1355,7 +1360,7 @@ static const struct pmbus_limit_attr vin_limit_attrs[] = { .reg = PMBUS_VIN_UV_FAULT_LIMIT, .attr = "lcrit", .alarm = "lcrit_alarm", - .sbit = PB_VOLTAGE_UV_FAULT, + .sbit = PB_VOLTAGE_UV_FAULT | PB_VOLTAGE_VIN_OFF, }, { .reg = PMBUS_VIN_OV_WARN_LIMIT, .attr = "max", @@ -2250,10 +2255,14 @@ static int pmbus_regulator_is_enabled(struct regulator_dev *rdev) { struct device *dev = rdev_get_dev(rdev); struct i2c_client *client = to_i2c_client(dev->parent); + struct pmbus_data *data = i2c_get_clientdata(client); u8 page = rdev_get_id(rdev); int ret; + mutex_lock(&data->update_lock); ret = pmbus_read_byte_data(client, page, PMBUS_OPERATION); + mutex_unlock(&data->update_lock); + if (ret < 0) return ret; @@ -2264,11 +2273,17 @@ static int _pmbus_regulator_on_off(struct regulator_dev *rdev, bool enable) { struct device *dev = rdev_get_dev(rdev); struct i2c_client *client = to_i2c_client(dev->parent); + struct pmbus_data *data = i2c_get_clientdata(client); u8 page = rdev_get_id(rdev); + int ret; - return pmbus_update_byte_data(client, page, PMBUS_OPERATION, - PB_OPERATION_CONTROL_ON, - enable ? PB_OPERATION_CONTROL_ON : 0); + mutex_lock(&data->update_lock); + ret = pmbus_update_byte_data(client, page, PMBUS_OPERATION, + PB_OPERATION_CONTROL_ON, + enable ? PB_OPERATION_CONTROL_ON : 0); + mutex_unlock(&data->update_lock); + + return ret; } static int pmbus_regulator_enable(struct regulator_dev *rdev) diff --git a/drivers/hwmon/sch56xx-common.c b/drivers/hwmon/sch56xx-common.c index 6c84780e358e8d12cc21a73deaa4644ae637ec0c..066b12990fbfb6f97ee7f16beb174ab388595013 100644 --- a/drivers/hwmon/sch56xx-common.c +++ b/drivers/hwmon/sch56xx-common.c @@ -424,7 +424,7 @@ struct sch56xx_watchdog_data *sch56xx_watchdog_register(struct device *parent, if (nowayout) set_bit(WDOG_NO_WAY_OUT, &data->wddev.status); if (output_enable & SCH56XX_WDOG_OUTPUT_ENABLE) - set_bit(WDOG_ACTIVE, &data->wddev.status); + set_bit(WDOG_HW_RUNNING, &data->wddev.status); /* Since the watchdog uses a downcounter there is no register to read the BIOS set timeout from (if any was set at all) -> diff --git a/drivers/hwmon/sw64_pvt.c b/drivers/hwmon/sw64_pvt.c new file mode 100644 index 0000000000000000000000000000000000000000..9e292a90af38887bed826284ac64f6b06c1c7e8b --- /dev/null +++ b/drivers/hwmon/sw64_pvt.c @@ -0,0 +1,223 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * PVT device driver. + * + * Part of lm_sensors, Linux kernel modules + * for hardware monitoring in sunway. + */ +#include +#include +#include +#include +#include +#include +#include + +#define PVT_VSYS 0 +#define PVT0_CTRL 0x7c00 +#define PVT02SPBU_DATA_OUT (0x1 << 26) +#define PVT_READ 0xc000 +#define PVT_WADDR 0xc800 +#define PVT_WDATA 0xcc00 + +/* The PVT registers */ +#define PVT_SAFECTRL 0x0 +#define CLK_SEL 0x1 +#define PVT_RUN 0x2 +#define PVT_CONFIG 0x3 +#define PVT_WAIT_TIME 0x4 +#define TS_ALARM_HVALUE_L 0x5 +#define TS_ALARM_HVALUE_H 0x6 +#define TS_ALARM_LVALUE_L 0x7 +#define TS_ALARM_LVALUE_H 0x8 +#define TS_ALARM_TIMES 0x9 +#define TRIMG 0xa +#define TRIMO 0xb +#define VS_ALARM_HVALUE_L 0xc +#define VS_ALARM_HVALUE_H 0xd +#define VS_ALARM_LVALUE_L 0xe +#define VS_ALARM_LVALUE_H 0xf +#define VS_ALARM_TIMES 0x10 +#define PVT_ALARM_CLEAR 0x11 +#define PVT_ALARM_MASK 0x12 +#define PVT_DATA_OUT_L 0x13 +#define PVT_DATA_OUT_H 0x14 +#define PVT_STATE_INFO 0x15 +#define PVT_ALARM_INFO 0x16 +#define COFFICIENT 71 +#define FIXEDVAL 45598 + +#define vol_algorithm(m, n) (((((m >> 16) & 0x3) * 0x100) +\ + ((n >> 16) & 0xff)) * COFFICIENT + FIXEDVAL) + + +struct pvt_hwmon { + struct pvt *pvt; + void __iomem *base; +}; + +static const char * const input_names[] = { + [PVT_VSYS] = "voltage", +}; + +static inline void pvt_write_reg(struct pvt_hwmon *pvtvol, u64 a, + u64 b, unsigned int offset) +{ + writel(a | b, pvtvol->base + offset); +} + +static inline u64 pvt_read_reg(struct pvt_hwmon *pvtvol, unsigned int offset) +{ + u64 value; + + value = readl(pvtvol->base + offset); + return value; +} + +void pvt_configure(struct pvt_hwmon *pvtvol, u64 value, u64 reg) +{ + pvt_write_reg(pvtvol, PVT_WDATA, value, PVT0_CTRL); + pvt_write_reg(pvtvol, PVT_WADDR, reg, PVT0_CTRL); +} + +static inline u64 pvt_read_vol(struct pvt_hwmon *pvtvol, u64 data, + u64 reg, unsigned int offset) +{ + unsigned int value; + + pvt_write_reg(pvtvol, data, reg, offset); + msleep(100); + value = pvt_read_reg(pvtvol, offset); + return value; +} + +static int pvt_get_vol(struct pvt_hwmon *pvtvol) +{ + unsigned long long data_h, data_l; + + pvt_configure(pvtvol, 0x1, PVT_SAFECTRL); + + /* configure PVT mode */ + pvt_configure(pvtvol, 0x3, PVT_CONFIG); + + /* PVT monitor enable */ + pvt_configure(pvtvol, 0x1, PVT_RUN); + + /* get the upper 2 bits of the PVT voltage */ + data_h = pvt_read_vol(pvtvol, PVT_READ, PVT_DATA_OUT_H, PVT0_CTRL); + if ((data_h & PVT02SPBU_DATA_OUT) == 0) { + pr_err("error: the Voltage_h is error\n"); + return false; + } + + /* get the lower 8 bits of the PVT voltage */ + data_l = pvt_read_vol(pvtvol, PVT_READ, PVT_DATA_OUT_L, PVT0_CTRL); + if ((data_l & PVT02SPBU_DATA_OUT) == 0) { + pr_err("error: the Voltage_l is error\n"); + return false; + } + + return vol_algorithm(data_h, data_l); +} + +static ssize_t pvt_read(struct device *dev, + struct device_attribute *devattr, char *buf) +{ + struct pvt_hwmon *pvtvol = dev_get_drvdata(dev); + unsigned long long pvt_vol; + + pvt_vol = pvt_get_vol(pvtvol); + return sprintf(buf, "%lld\n", (pvt_vol / 100)); +} + +static ssize_t show_label(struct device *dev, + struct device_attribute *devattr, char *buf) +{ + return sprintf(buf, "%s\n", + input_names[to_sensor_dev_attr(devattr)->index]); +} + +static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, pvt_read, NULL, + PVT_VSYS); +static SENSOR_DEVICE_ATTR(in0_label, S_IRUGO, show_label, NULL, + PVT_VSYS); + +static struct attribute *pvt_attrs[] = { + &sensor_dev_attr_in0_input.dev_attr.attr, + &sensor_dev_attr_in0_label.dev_attr.attr, + NULL +}; + +ATTRIBUTE_GROUPS(pvt); + +static int pvt_vol_plat_probe(struct platform_device *pdev) +{ + struct resource *res; + struct pvt_hwmon *pvtvol; + struct device *hwmon_dev; + unsigned long long value; + struct device *dev = &pdev->dev; + + pvtvol = devm_kzalloc(&pdev->dev, sizeof(*pvtvol), GFP_KERNEL); + if (!pvtvol) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + goto err; + + pvtvol->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(pvtvol->base)) + return PTR_ERR(pvtvol->base); + + platform_set_drvdata(pdev, pvtvol); + hwmon_dev = devm_hwmon_device_register_with_groups(dev, "pvt", + pvtvol, pvt_groups); + + if (IS_ERR(hwmon_dev)) + return PTR_ERR(hwmon_dev); + + value = pvt_get_vol(pvtvol); + if (!value) { + dev_info(&pdev->dev, "pvt_vol get failed\n"); + return false; + } + + return 0; + +err: + dev_err(&pdev->dev, "no PVT resource\n"); + return -ENXIO; +} + +#ifdef CONFIG_OF +static const struct of_device_id pvt_vol_of_match[] = { + { .compatible = "sw64,pvt-vol", }, + {}, +}; +MODULE_DEVICE_TABLE(of, pvt_vol_of_match); +#endif + +static struct platform_driver pvt_vol_driver = { + .probe = pvt_vol_plat_probe, + .driver = { + .name = "pvt-sw64", + .of_match_table = of_match_ptr(pvt_vol_of_match), + }, +}; + +static int __init pvt_vol_init_driver(void) +{ + return platform_driver_register(&pvt_vol_driver); +} +subsys_initcall(pvt_vol_init_driver); + +static void __exit pvt_vol_exit_driver(void) +{ + platform_driver_unregister(&pvt_vol_driver); +} +module_exit(pvt_vol_exit_driver); + +MODULE_AUTHOR("Wang Yingying "); +MODULE_DESCRIPTION("pvt controller"); +MODULE_LICENSE("GPL"); diff --git a/drivers/hwtracing/coresight/coresight-catu.c b/drivers/hwtracing/coresight/coresight-catu.c index a61313f320bda220e54c41e7ef536edffb91474e..8e19e8cdcce5e735f946bf510a5dab2b45ed3e6d 100644 --- a/drivers/hwtracing/coresight/coresight-catu.c +++ b/drivers/hwtracing/coresight/coresight-catu.c @@ -567,12 +567,11 @@ static int catu_probe(struct amba_device *adev, const struct amba_id *id) return ret; } -static int catu_remove(struct amba_device *adev) +static void catu_remove(struct amba_device *adev) { struct catu_drvdata *drvdata = dev_get_drvdata(&adev->dev); coresight_unregister(drvdata->csdev); - return 0; } static struct amba_id catu_ids[] = { diff --git a/drivers/hwtracing/coresight/coresight-cpu-debug.c b/drivers/hwtracing/coresight/coresight-cpu-debug.c index e1d232411d8d753b992eccdcc70014d5356751fa..2dcf13de751fc2296ad5e0c26f8d8a92aaa2049e 100644 --- a/drivers/hwtracing/coresight/coresight-cpu-debug.c +++ b/drivers/hwtracing/coresight/coresight-cpu-debug.c @@ -627,7 +627,7 @@ static int debug_probe(struct amba_device *adev, const struct amba_id *id) return ret; } -static int debug_remove(struct amba_device *adev) +static void debug_remove(struct amba_device *adev) { struct device *dev = &adev->dev; struct debug_drvdata *drvdata = amba_get_drvdata(adev); @@ -642,8 +642,6 @@ static int debug_remove(struct amba_device *adev) if (!--debug_count) debug_func_exit(); - - return 0; } static const struct amba_cs_uci_id uci_id_debug[] = { diff --git a/drivers/hwtracing/coresight/coresight-cti-core.c b/drivers/hwtracing/coresight/coresight-cti-core.c index 7ea93598f0eeafd00db4890961dd5cf28085e30c..0276700c246d59351c4792b09c19b5f793f1a2bc 100644 --- a/drivers/hwtracing/coresight/coresight-cti-core.c +++ b/drivers/hwtracing/coresight/coresight-cti-core.c @@ -836,7 +836,7 @@ static void cti_device_release(struct device *dev) if (drvdata->csdev_release) drvdata->csdev_release(dev); } -static int cti_remove(struct amba_device *adev) +static void cti_remove(struct amba_device *adev) { struct cti_drvdata *drvdata = dev_get_drvdata(&adev->dev); @@ -845,8 +845,6 @@ static int cti_remove(struct amba_device *adev) mutex_unlock(&ect_mutex); coresight_unregister(drvdata->csdev); - - return 0; } static int cti_probe(struct amba_device *adev, const struct amba_id *id) diff --git a/drivers/hwtracing/coresight/coresight-etb10.c b/drivers/hwtracing/coresight/coresight-etb10.c index 0cf6f0b947b6f8849d3732f9e0fa7a062e245323..51c801c05e5c30cac3e4f1b84bb79e01a5da4eb5 100644 --- a/drivers/hwtracing/coresight/coresight-etb10.c +++ b/drivers/hwtracing/coresight/coresight-etb10.c @@ -803,7 +803,7 @@ static int etb_probe(struct amba_device *adev, const struct amba_id *id) return ret; } -static int etb_remove(struct amba_device *adev) +static void etb_remove(struct amba_device *adev) { struct etb_drvdata *drvdata = dev_get_drvdata(&adev->dev); @@ -814,8 +814,6 @@ static int etb_remove(struct amba_device *adev) */ misc_deregister(&drvdata->miscdev); coresight_unregister(drvdata->csdev); - - return 0; } #ifdef CONFIG_PM diff --git a/drivers/hwtracing/coresight/coresight-etm3x-core.c b/drivers/hwtracing/coresight/coresight-etm3x-core.c index 5bf5a5a4ce6d15265831afac116f193ca2ea4026..683a69e88efda89ab5e100d92a048a47c323181c 100644 --- a/drivers/hwtracing/coresight/coresight-etm3x-core.c +++ b/drivers/hwtracing/coresight/coresight-etm3x-core.c @@ -909,7 +909,7 @@ static void clear_etmdrvdata(void *info) etmdrvdata[cpu] = NULL; } -static int etm_remove(struct amba_device *adev) +static void etm_remove(struct amba_device *adev) { struct etm_drvdata *drvdata = dev_get_drvdata(&adev->dev); @@ -932,8 +932,6 @@ static int etm_remove(struct amba_device *adev) cpus_read_unlock(); coresight_unregister(drvdata->csdev); - - return 0; } #ifdef CONFIG_PM diff --git a/drivers/hwtracing/coresight/coresight-etm4x-core.c b/drivers/hwtracing/coresight/coresight-etm4x-core.c index 02d0b92cf510183ddc458b6040891e4067aa7ea6..d4d9c8bb88cad79d1fb7a15356bd87a0776f1c6c 100644 --- a/drivers/hwtracing/coresight/coresight-etm4x-core.c +++ b/drivers/hwtracing/coresight/coresight-etm4x-core.c @@ -1680,7 +1680,7 @@ static void clear_etmdrvdata(void *info) etmdrvdata[cpu] = NULL; } -static int etm4_remove(struct amba_device *adev) +static void etm4_remove(struct amba_device *adev) { struct etmv4_drvdata *drvdata = dev_get_drvdata(&adev->dev); @@ -1703,8 +1703,6 @@ static int etm4_remove(struct amba_device *adev) cpus_read_unlock(); coresight_unregister(drvdata->csdev); - - return 0; } static const struct amba_id etm4_ids[] = { diff --git a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c index 4682f26139961a2478045acfab3c8e492f6fb0ad..42cc38c89f3ba54e8fc46eb66b1b570e295ebd1d 100644 --- a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c +++ b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c @@ -364,8 +364,12 @@ static ssize_t mode_store(struct device *dev, mode = ETM_MODE_QELEM(config->mode); /* start by clearing QE bits */ config->cfg &= ~(BIT(13) | BIT(14)); - /* if supported, Q elements with instruction counts are enabled */ - if ((mode & BIT(0)) && (drvdata->q_support & BIT(0))) + /* + * if supported, Q elements with instruction counts are enabled. + * Always set the low bit for any requested mode. Valid combos are + * 0b00, 0b01 and 0b11. + */ + if (mode && drvdata->q_support) config->cfg |= BIT(13); /* * if supported, Q elements with and without instruction diff --git a/drivers/hwtracing/coresight/coresight-funnel.c b/drivers/hwtracing/coresight/coresight-funnel.c index 3fc6c678b51d8ebb1f9d60504d68feb2844189f4..b2fb853776d791721034ee9dff4f3134f0f74bf9 100644 --- a/drivers/hwtracing/coresight/coresight-funnel.c +++ b/drivers/hwtracing/coresight/coresight-funnel.c @@ -370,9 +370,9 @@ static int dynamic_funnel_probe(struct amba_device *adev, return funnel_probe(&adev->dev, &adev->res); } -static int dynamic_funnel_remove(struct amba_device *adev) +static void dynamic_funnel_remove(struct amba_device *adev) { - return funnel_remove(&adev->dev); + funnel_remove(&adev->dev); } static const struct amba_id dynamic_funnel_ids[] = { diff --git a/drivers/hwtracing/coresight/coresight-replicator.c b/drivers/hwtracing/coresight/coresight-replicator.c index 38008aca2c0f4b618168e8f231829c04d1d19d9f..da2bfeeabc1b48c3c79f4006aa66aba009798235 100644 --- a/drivers/hwtracing/coresight/coresight-replicator.c +++ b/drivers/hwtracing/coresight/coresight-replicator.c @@ -388,9 +388,9 @@ static int dynamic_replicator_probe(struct amba_device *adev, return replicator_probe(&adev->dev, &adev->res); } -static int dynamic_replicator_remove(struct amba_device *adev) +static void dynamic_replicator_remove(struct amba_device *adev) { - return replicator_remove(&adev->dev); + replicator_remove(&adev->dev); } static const struct amba_id dynamic_replicator_ids[] = { diff --git a/drivers/hwtracing/coresight/coresight-stm.c b/drivers/hwtracing/coresight/coresight-stm.c index 587c1d7f252081d4e24f63a45ac3caa85fe68a7c..0ecca9f93f3a1b9c8d814b76bd62619f2ca583db 100644 --- a/drivers/hwtracing/coresight/coresight-stm.c +++ b/drivers/hwtracing/coresight/coresight-stm.c @@ -951,15 +951,13 @@ static int stm_probe(struct amba_device *adev, const struct amba_id *id) return ret; } -static int stm_remove(struct amba_device *adev) +static void stm_remove(struct amba_device *adev) { struct stm_drvdata *drvdata = dev_get_drvdata(&adev->dev); coresight_unregister(drvdata->csdev); stm_unregister_device(&drvdata->stm); - - return 0; } #ifdef CONFIG_PM diff --git a/drivers/hwtracing/coresight/coresight-tmc-core.c b/drivers/hwtracing/coresight/coresight-tmc-core.c index 8169dff5a9f6a789552d61bf0db7f5bcc2c11185..e29b3914fc0ff14fbdeba40c2ad296d8fcfd3df4 100644 --- a/drivers/hwtracing/coresight/coresight-tmc-core.c +++ b/drivers/hwtracing/coresight/coresight-tmc-core.c @@ -559,7 +559,7 @@ static void tmc_shutdown(struct amba_device *adev) spin_unlock_irqrestore(&drvdata->spinlock, flags); } -static int tmc_remove(struct amba_device *adev) +static void tmc_remove(struct amba_device *adev) { struct tmc_drvdata *drvdata = dev_get_drvdata(&adev->dev); @@ -570,8 +570,6 @@ static int tmc_remove(struct amba_device *adev) */ misc_deregister(&drvdata->miscdev); coresight_unregister(drvdata->csdev); - - return 0; } static const struct amba_id tmc_ids[] = { diff --git a/drivers/hwtracing/coresight/coresight-tpiu.c b/drivers/hwtracing/coresight/coresight-tpiu.c index 5b35029461a0cb2605c3ff43f9d72f1a07c19dbe..0ca39d905d0b3d70decf54ede4780f40b8fc2e1a 100644 --- a/drivers/hwtracing/coresight/coresight-tpiu.c +++ b/drivers/hwtracing/coresight/coresight-tpiu.c @@ -173,13 +173,11 @@ static int tpiu_probe(struct amba_device *adev, const struct amba_id *id) return PTR_ERR(drvdata->csdev); } -static int tpiu_remove(struct amba_device *adev) +static void tpiu_remove(struct amba_device *adev) { struct tpiu_drvdata *drvdata = dev_get_drvdata(&adev->dev); coresight_unregister(drvdata->csdev); - - return 0; } #ifdef CONFIG_PM diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index 036fdcee5eb377487ec55b7d82553bbac4d0c7b4..9535e995ecc92bfab66f73f865c981e1dc224bed 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -488,7 +488,7 @@ config I2C_BRCMSTB config I2C_CADENCE tristate "Cadence I2C Controller" - depends on ARCH_ZYNQ || ARM64 || XTENSA + depends on ARCH_ZYNQ || ARM64 || XTENSA || COMPILE_TEST help Say yes here to select Cadence I2C Host Controller. This controller is e.g. used by Xilinx Zynq. @@ -946,7 +946,7 @@ config I2C_QCOM_GENI config I2C_QUP tristate "Qualcomm QUP based I2C controller" - depends on ARCH_QCOM + depends on ARCH_QCOM || COMPILE_TEST help If you say yes to this option, support will be included for the built-in I2C interface on the Qualcomm SoCs. diff --git a/drivers/i2c/busses/i2c-bcm2835.c b/drivers/i2c/busses/i2c-bcm2835.c index 37443edbf75464083693878983790c93d3457709..ad3b124a2e3768c9eb132fd2208ce421ed1da631 100644 --- a/drivers/i2c/busses/i2c-bcm2835.c +++ b/drivers/i2c/busses/i2c-bcm2835.c @@ -23,6 +23,11 @@ #define BCM2835_I2C_FIFO 0x10 #define BCM2835_I2C_DIV 0x14 #define BCM2835_I2C_DEL 0x18 +/* + * 16-bit field for the number of SCL cycles to wait after rising SCL + * before deciding the slave is not responding. 0 disables the + * timeout detection. + */ #define BCM2835_I2C_CLKT 0x1c #define BCM2835_I2C_C_READ BIT(0) @@ -477,6 +482,12 @@ static int bcm2835_i2c_probe(struct platform_device *pdev) adap->dev.of_node = pdev->dev.of_node; adap->quirks = of_device_get_match_data(&pdev->dev); + /* + * Disable the hardware clock stretching timeout. SMBUS + * specifies a limit for how long the device can stretch the + * clock, but core I2C doesn't. + */ + bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_CLKT, 0); bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_C, 0); ret = i2c_add_adapter(adap); diff --git a/drivers/i2c/busses/i2c-meson.c b/drivers/i2c/busses/i2c-meson.c index ef73a42577cc7b3cedff721df31b6c94f707f31c..07eb819072c4fdf44a2a570b08aded0ae83c02e4 100644 --- a/drivers/i2c/busses/i2c-meson.c +++ b/drivers/i2c/busses/i2c-meson.c @@ -465,18 +465,18 @@ static int meson_i2c_probe(struct platform_device *pdev) */ meson_i2c_set_mask(i2c, REG_CTRL, REG_CTRL_START, 0); - ret = i2c_add_adapter(&i2c->adap); - if (ret < 0) { - clk_disable_unprepare(i2c->clk); - return ret; - } - /* Disable filtering */ meson_i2c_set_mask(i2c, REG_SLAVE_ADDR, REG_SLV_SDA_FILTER | REG_SLV_SCL_FILTER, 0); meson_i2c_set_clk_div(i2c, timings.bus_freq_hz); + ret = i2c_add_adapter(&i2c->adap); + if (ret < 0) { + clk_disable_unprepare(i2c->clk); + return ret; + } + return 0; } diff --git a/drivers/i2c/busses/i2c-nomadik.c b/drivers/i2c/busses/i2c-nomadik.c index d4b1b0865f6768b166693b4aa319981aca10ab16..a3363b20f168a458901d1eaeb1232d10169082db 100644 --- a/drivers/i2c/busses/i2c-nomadik.c +++ b/drivers/i2c/busses/i2c-nomadik.c @@ -1055,7 +1055,7 @@ static int nmk_i2c_probe(struct amba_device *adev, const struct amba_id *id) return ret; } -static int nmk_i2c_remove(struct amba_device *adev) +static void nmk_i2c_remove(struct amba_device *adev) { struct resource *res = &adev->res; struct nmk_i2c_dev *dev = amba_get_drvdata(adev); @@ -1068,8 +1068,6 @@ static int nmk_i2c_remove(struct amba_device *adev) i2c_clr_bit(dev->virtbase + I2C_CR, I2C_CR_PE); clk_disable_unprepare(dev->clk); release_mem_region(res->start, resource_size(res)); - - return 0; } static struct i2c_vendor_data vendor_stn8815 = { diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c index 2a8568b97c14d5452a532c6aede85b4734611b1d..8dabb6ffb1a4f088adc2b507c2ea89ad411cdbbe 100644 --- a/drivers/i2c/busses/i2c-xiic.c +++ b/drivers/i2c/busses/i2c-xiic.c @@ -756,7 +756,6 @@ static const struct i2c_adapter_quirks xiic_quirks = { static const struct i2c_adapter xiic_adapter = { .owner = THIS_MODULE, - .name = DRIVER_NAME, .class = I2C_CLASS_DEPRECATED, .algo = &xiic_algorithm, .quirks = &xiic_quirks, @@ -793,6 +792,8 @@ static int xiic_i2c_probe(struct platform_device *pdev) i2c_set_adapdata(&i2c->adap, i2c); i2c->adap.dev.parent = &pdev->dev; i2c->adap.dev.of_node = pdev->dev.of_node; + snprintf(i2c->adap.name, sizeof(i2c->adap.name), + DRIVER_NAME " %s", pdev->name); mutex_init(&i2c->lock); init_waitqueue_head(&i2c->wait); diff --git a/drivers/i2c/muxes/i2c-demux-pinctrl.c b/drivers/i2c/muxes/i2c-demux-pinctrl.c index 5365199a31f419b9db442ba3fa555e3a3ed044fc..f7a7405d4350a10718b7948d546ca31fc11dabdb 100644 --- a/drivers/i2c/muxes/i2c-demux-pinctrl.c +++ b/drivers/i2c/muxes/i2c-demux-pinctrl.c @@ -261,7 +261,7 @@ static int i2c_demux_pinctrl_probe(struct platform_device *pdev) err = device_create_file(&pdev->dev, &dev_attr_available_masters); if (err) - goto err_rollback; + goto err_rollback_activation; err = device_create_file(&pdev->dev, &dev_attr_current_master); if (err) @@ -271,8 +271,9 @@ static int i2c_demux_pinctrl_probe(struct platform_device *pdev) err_rollback_available: device_remove_file(&pdev->dev, &dev_attr_available_masters); -err_rollback: +err_rollback_activation: i2c_demux_deactivate_master(priv); +err_rollback: for (j = 0; j < i; j++) { of_node_put(priv->chan[j].parent_np); of_changeset_destroy(&priv->chan[j].chgset); diff --git a/drivers/iio/accel/bmc150-accel-core.c b/drivers/iio/accel/bmc150-accel-core.c index 48435865fdaf34387b563b9ce025e00fddedcc0b..792526462f1c9c4275edda27526cc7eb555e97a0 100644 --- a/drivers/iio/accel/bmc150-accel-core.c +++ b/drivers/iio/accel/bmc150-accel-core.c @@ -1648,11 +1648,14 @@ int bmc150_accel_core_probe(struct device *dev, struct regmap *regmap, int irq, ret = iio_device_register(indio_dev); if (ret < 0) { dev_err(dev, "Unable to register iio device\n"); - goto err_trigger_unregister; + goto err_pm_cleanup; } return 0; +err_pm_cleanup: + pm_runtime_dont_use_autosuspend(dev); + pm_runtime_disable(dev); err_trigger_unregister: bmc150_accel_unregister_triggers(data, BMC150_ACCEL_TRIGGERS - 1); err_buffer_cleanup: diff --git a/drivers/iio/accel/kxcjk-1013.c b/drivers/iio/accel/kxcjk-1013.c index 2eaf85b6e39f4c399ea971451649042fd82c6712..89e0a89d95d6bdadc34dcda832345ae95e4c89d8 100644 --- a/drivers/iio/accel/kxcjk-1013.c +++ b/drivers/iio/accel/kxcjk-1013.c @@ -1429,11 +1429,14 @@ static int kxcjk1013_probe(struct i2c_client *client, ret = iio_device_register(indio_dev); if (ret < 0) { dev_err(&client->dev, "unable to register iio device\n"); - goto err_buffer_cleanup; + goto err_pm_cleanup; } return 0; +err_pm_cleanup: + pm_runtime_dont_use_autosuspend(&client->dev); + pm_runtime_disable(&client->dev); err_buffer_cleanup: iio_triggered_buffer_cleanup(indio_dev); err_trigger_unregister: diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c index a7208704d31c90d15b7e40bff45e43ab7dd783ec..e7e280282774089830e0f922d781f8ba3f31d7d1 100644 --- a/drivers/iio/accel/mma8452.c +++ b/drivers/iio/accel/mma8452.c @@ -176,6 +176,7 @@ static const struct mma8452_event_regs trans_ev_regs = { * @enabled_events: event flags enabled and handled by this driver */ struct mma_chip_info { + const char *name; u8 chip_id; const struct iio_chan_spec *channels; int num_channels; @@ -1303,6 +1304,7 @@ enum { static const struct mma_chip_info mma_chip_info_table[] = { [mma8451] = { + .name = "mma8451", .chip_id = MMA8451_DEVICE_ID, .channels = mma8451_channels, .num_channels = ARRAY_SIZE(mma8451_channels), @@ -1327,6 +1329,7 @@ static const struct mma_chip_info mma_chip_info_table[] = { MMA8452_INT_FF_MT, }, [mma8452] = { + .name = "mma8452", .chip_id = MMA8452_DEVICE_ID, .channels = mma8452_channels, .num_channels = ARRAY_SIZE(mma8452_channels), @@ -1343,6 +1346,7 @@ static const struct mma_chip_info mma_chip_info_table[] = { MMA8452_INT_FF_MT, }, [mma8453] = { + .name = "mma8453", .chip_id = MMA8453_DEVICE_ID, .channels = mma8453_channels, .num_channels = ARRAY_SIZE(mma8453_channels), @@ -1359,6 +1363,7 @@ static const struct mma_chip_info mma_chip_info_table[] = { MMA8452_INT_FF_MT, }, [mma8652] = { + .name = "mma8652", .chip_id = MMA8652_DEVICE_ID, .channels = mma8652_channels, .num_channels = ARRAY_SIZE(mma8652_channels), @@ -1368,6 +1373,7 @@ static const struct mma_chip_info mma_chip_info_table[] = { .enabled_events = MMA8452_INT_FF_MT, }, [mma8653] = { + .name = "mma8653", .chip_id = MMA8653_DEVICE_ID, .channels = mma8653_channels, .num_channels = ARRAY_SIZE(mma8653_channels), @@ -1382,6 +1388,7 @@ static const struct mma_chip_info mma_chip_info_table[] = { .enabled_events = MMA8452_INT_FF_MT, }, [fxls8471] = { + .name = "fxls8471", .chip_id = FXLS8471_DEVICE_ID, .channels = mma8451_channels, .num_channels = ARRAY_SIZE(mma8451_channels), @@ -1525,13 +1532,6 @@ static int mma8452_probe(struct i2c_client *client, struct mma8452_data *data; struct iio_dev *indio_dev; int ret; - const struct of_device_id *match; - - match = of_match_device(mma8452_dt_ids, &client->dev); - if (!match) { - dev_err(&client->dev, "unknown device model\n"); - return -ENODEV; - } indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data)); if (!indio_dev) @@ -1540,7 +1540,14 @@ static int mma8452_probe(struct i2c_client *client, data = iio_priv(indio_dev); data->client = client; mutex_init(&data->lock); - data->chip_info = match->data; + + data->chip_info = device_get_match_data(&client->dev); + if (!data->chip_info && id) { + data->chip_info = &mma_chip_info_table[id->driver_data]; + } else { + dev_err(&client->dev, "unknown device model\n"); + return -ENODEV; + } data->vdd_reg = devm_regulator_get(&client->dev, "vdd"); if (IS_ERR(data->vdd_reg)) @@ -1584,11 +1591,11 @@ static int mma8452_probe(struct i2c_client *client, } dev_info(&client->dev, "registering %s accelerometer; ID 0x%x\n", - match->compatible, data->chip_info->chip_id); + data->chip_info->name, data->chip_info->chip_id); i2c_set_clientdata(client, indio_dev); indio_dev->info = &mma8452_info; - indio_dev->name = id->name; + indio_dev->name = data->chip_info->name; indio_dev->modes = INDIO_DIRECT_MODE; indio_dev->channels = data->chip_info->channels; indio_dev->num_channels = data->chip_info->num_channels; @@ -1814,7 +1821,7 @@ MODULE_DEVICE_TABLE(i2c, mma8452_id); static struct i2c_driver mma8452_driver = { .driver = { .name = "mma8452", - .of_match_table = of_match_ptr(mma8452_dt_ids), + .of_match_table = mma8452_dt_ids, .pm = &mma8452_pm_ops, }, .probe = mma8452_probe, diff --git a/drivers/iio/accel/mma9551.c b/drivers/iio/accel/mma9551.c index 08a2303cc9df3c08022bb32730faa41c01f49816..26421e8e8263958b1585d0b8d176a56bca0c4951 100644 --- a/drivers/iio/accel/mma9551.c +++ b/drivers/iio/accel/mma9551.c @@ -495,11 +495,14 @@ static int mma9551_probe(struct i2c_client *client, ret = iio_device_register(indio_dev); if (ret < 0) { dev_err(&client->dev, "unable to register iio device\n"); - goto out_poweroff; + goto err_pm_cleanup; } return 0; +err_pm_cleanup: + pm_runtime_dont_use_autosuspend(&client->dev); + pm_runtime_disable(&client->dev); out_poweroff: mma9551_set_device_state(client, false); diff --git a/drivers/iio/accel/mma9553.c b/drivers/iio/accel/mma9553.c index c15908faa38167b0d6ff68d53ceca8de0ed2d403..a23a7685d1f93c893da306c14760eeea03966e94 100644 --- a/drivers/iio/accel/mma9553.c +++ b/drivers/iio/accel/mma9553.c @@ -1134,12 +1134,15 @@ static int mma9553_probe(struct i2c_client *client, ret = iio_device_register(indio_dev); if (ret < 0) { dev_err(&client->dev, "unable to register iio device\n"); - goto out_poweroff; + goto err_pm_cleanup; } dev_dbg(&indio_dev->dev, "Registered device %s\n", name); return 0; +err_pm_cleanup: + pm_runtime_dont_use_autosuspend(&client->dev); + pm_runtime_disable(&client->dev); out_poweroff: mma9551_set_device_state(client, false); return ret; diff --git a/drivers/iio/adc/ad7124.c b/drivers/iio/adc/ad7124.c index 9c2401c5848ece566c7bc8944d2699c7f4cd20fa..bd350099503765cc2213470bbb137057be211924 100644 --- a/drivers/iio/adc/ad7124.c +++ b/drivers/iio/adc/ad7124.c @@ -74,7 +74,7 @@ #define AD7124_CONFIG_REF_SEL(x) FIELD_PREP(AD7124_CONFIG_REF_SEL_MSK, x) #define AD7124_CONFIG_PGA_MSK GENMASK(2, 0) #define AD7124_CONFIG_PGA(x) FIELD_PREP(AD7124_CONFIG_PGA_MSK, x) -#define AD7124_CONFIG_IN_BUFF_MSK GENMASK(7, 6) +#define AD7124_CONFIG_IN_BUFF_MSK GENMASK(6, 5) #define AD7124_CONFIG_IN_BUFF(x) FIELD_PREP(AD7124_CONFIG_IN_BUFF_MSK, x) /* AD7124_FILTER_X */ diff --git a/drivers/iio/adc/men_z188_adc.c b/drivers/iio/adc/men_z188_adc.c index 42ea8bc7e78051c16bc8f1dcd7546393c99153d5..adc5ceaef8c93a373daef26dbaded4d4be1904ca 100644 --- a/drivers/iio/adc/men_z188_adc.c +++ b/drivers/iio/adc/men_z188_adc.c @@ -103,6 +103,7 @@ static int men_z188_probe(struct mcb_device *dev, struct z188_adc *adc; struct iio_dev *indio_dev; struct resource *mem; + int ret; indio_dev = devm_iio_device_alloc(&dev->dev, sizeof(struct z188_adc)); if (!indio_dev) @@ -128,8 +129,14 @@ static int men_z188_probe(struct mcb_device *dev, adc->mem = mem; mcb_set_drvdata(dev, indio_dev); - return iio_device_register(indio_dev); + ret = iio_device_register(indio_dev); + if (ret) + goto err_unmap; + + return 0; +err_unmap: + iounmap(adc->base); err: mcb_release_mem(mem); return -ENXIO; diff --git a/drivers/iio/adc/twl6030-gpadc.c b/drivers/iio/adc/twl6030-gpadc.c index c6416ad795ca48ae2a24cd02c8ecd5e8d4fb74e3..256177b15c511de57c9e93b31a9e690d09bb7780 100644 --- a/drivers/iio/adc/twl6030-gpadc.c +++ b/drivers/iio/adc/twl6030-gpadc.c @@ -911,6 +911,8 @@ static int twl6030_gpadc_probe(struct platform_device *pdev) ret = devm_request_threaded_irq(dev, irq, NULL, twl6030_gpadc_irq_handler, IRQF_ONESHOT, "twl6030_gpadc", indio_dev); + if (ret) + return ret; ret = twl6030_gpadc_enable_irq(TWL6030_GPADC_RT_SW1_EOC_MASK); if (ret < 0) { diff --git a/drivers/iio/afe/iio-rescale.c b/drivers/iio/afe/iio-rescale.c index e42ea2b1707db9eb4628387a1db3a2b6fbbdebc7..3809f98894a515fd100167dc5c865bd7928a8bd5 100644 --- a/drivers/iio/afe/iio-rescale.c +++ b/drivers/iio/afe/iio-rescale.c @@ -38,7 +38,7 @@ static int rescale_read_raw(struct iio_dev *indio_dev, int *val, int *val2, long mask) { struct rescale *rescale = iio_priv(indio_dev); - unsigned long long tmp; + s64 tmp; int ret; switch (mask) { @@ -59,10 +59,10 @@ static int rescale_read_raw(struct iio_dev *indio_dev, *val2 = rescale->denominator; return IIO_VAL_FRACTIONAL; case IIO_VAL_FRACTIONAL_LOG2: - tmp = *val * 1000000000LL; - do_div(tmp, rescale->denominator); + tmp = (s64)*val * 1000000000LL; + tmp = div_s64(tmp, rescale->denominator); tmp *= rescale->numerator; - do_div(tmp, 1000000000LL); + tmp = div_s64(tmp, 1000000000LL); *val = tmp; return ret; default: diff --git a/drivers/iio/gyro/bmg160_core.c b/drivers/iio/gyro/bmg160_core.c index 39fe0b1785920c5150005a7af38a702087cdc564..b6b90eebec0b99b290490ae5b7f46c713f06fac9 100644 --- a/drivers/iio/gyro/bmg160_core.c +++ b/drivers/iio/gyro/bmg160_core.c @@ -1170,11 +1170,14 @@ int bmg160_core_probe(struct device *dev, struct regmap *regmap, int irq, ret = iio_device_register(indio_dev); if (ret < 0) { dev_err(dev, "unable to register iio device\n"); - goto err_buffer_cleanup; + goto err_pm_cleanup; } return 0; +err_pm_cleanup: + pm_runtime_dont_use_autosuspend(dev); + pm_runtime_disable(dev); err_buffer_cleanup: iio_triggered_buffer_cleanup(indio_dev); err_trigger_unregister: diff --git a/drivers/iio/imu/kmx61.c b/drivers/iio/imu/kmx61.c index 61885e99d3fc14db2655e7ac955b1adb4dd7a618..89133315e6aaf07bf509916a857233e04275acc9 100644 --- a/drivers/iio/imu/kmx61.c +++ b/drivers/iio/imu/kmx61.c @@ -1392,7 +1392,7 @@ static int kmx61_probe(struct i2c_client *client, ret = iio_device_register(data->acc_indio_dev); if (ret < 0) { dev_err(&client->dev, "Failed to register acc iio device\n"); - goto err_buffer_cleanup_mag; + goto err_pm_cleanup; } ret = iio_device_register(data->mag_indio_dev); @@ -1405,6 +1405,9 @@ static int kmx61_probe(struct i2c_client *client, err_iio_unregister_acc: iio_device_unregister(data->acc_indio_dev); +err_pm_cleanup: + pm_runtime_dont_use_autosuspend(&client->dev); + pm_runtime_disable(&client->dev); err_buffer_cleanup_mag: if (client->irq > 0) iio_triggered_buffer_cleanup(data->mag_indio_dev); diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c index 558ca3843bb95fe858932ebf363b1ad4c6a915a0..2c528425b03b4c0583c156a12cab7745dbae4d72 100644 --- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c +++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c @@ -1558,8 +1558,12 @@ static int st_lsm6dsx_read_oneshot(struct st_lsm6dsx_sensor *sensor, if (err < 0) return err; + /* + * we need to wait for sensor settling time before + * reading data in order to avoid corrupted samples + */ delay = 1000000000 / sensor->odr; - usleep_range(delay, 2 * delay); + usleep_range(3 * delay, 4 * delay); err = st_lsm6dsx_read_locked(hw, addr, &data, sizeof(data)); if (err < 0) diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c index ede99e0d53714d4d7d9dce825c8551598d3d7f5a..8c3faa7972842db38a55531c18cf626bfb4119fa 100644 --- a/drivers/iio/inkern.c +++ b/drivers/iio/inkern.c @@ -561,28 +561,50 @@ EXPORT_SYMBOL_GPL(iio_read_channel_average_raw); static int iio_convert_raw_to_processed_unlocked(struct iio_channel *chan, int raw, int *processed, unsigned int scale) { - int scale_type, scale_val, scale_val2, offset; + int scale_type, scale_val, scale_val2; + int offset_type, offset_val, offset_val2; s64 raw64 = raw; - int ret; - ret = iio_channel_read(chan, &offset, NULL, IIO_CHAN_INFO_OFFSET); - if (ret >= 0) - raw64 += offset; + offset_type = iio_channel_read(chan, &offset_val, &offset_val2, + IIO_CHAN_INFO_OFFSET); + if (offset_type >= 0) { + switch (offset_type) { + case IIO_VAL_INT: + break; + case IIO_VAL_INT_PLUS_MICRO: + case IIO_VAL_INT_PLUS_NANO: + /* + * Both IIO_VAL_INT_PLUS_MICRO and IIO_VAL_INT_PLUS_NANO + * implicitely truncate the offset to it's integer form. + */ + break; + case IIO_VAL_FRACTIONAL: + offset_val /= offset_val2; + break; + case IIO_VAL_FRACTIONAL_LOG2: + offset_val >>= offset_val2; + break; + default: + return -EINVAL; + } + + raw64 += offset_val; + } scale_type = iio_channel_read(chan, &scale_val, &scale_val2, IIO_CHAN_INFO_SCALE); if (scale_type < 0) { /* - * Just pass raw values as processed if no scaling is - * available. + * If no channel scaling is available apply consumer scale to + * raw value and return. */ - *processed = raw; + *processed = raw * scale; return 0; } switch (scale_type) { case IIO_VAL_INT: - *processed = raw64 * scale_val; + *processed = raw64 * scale_val * scale; break; case IIO_VAL_INT_PLUS_MICRO: if (scale_val2 < 0) diff --git a/drivers/iio/magnetometer/bmc150_magn.c b/drivers/iio/magnetometer/bmc150_magn.c index 8eacfaf584cfd0690fb4020a6ff74cca8bb490eb..620537d0104d4b756fcaa17f8e1c93bb390ca444 100644 --- a/drivers/iio/magnetometer/bmc150_magn.c +++ b/drivers/iio/magnetometer/bmc150_magn.c @@ -941,13 +941,14 @@ int bmc150_magn_probe(struct device *dev, struct regmap *regmap, ret = iio_device_register(indio_dev); if (ret < 0) { dev_err(dev, "unable to register iio device\n"); - goto err_disable_runtime_pm; + goto err_pm_cleanup; } dev_dbg(dev, "Registered device %s\n", name); return 0; -err_disable_runtime_pm: +err_pm_cleanup: + pm_runtime_dont_use_autosuspend(dev); pm_runtime_disable(dev); err_buffer_cleanup: iio_triggered_buffer_cleanup(indio_dev); diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index ce492134c1e5c1678ca3dc1d31e94a629427f07a..3c40aa50cd60c17064c4187b1a92fe307c0ebd1c 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -2635,7 +2635,7 @@ int rdma_set_ack_timeout(struct rdma_cm_id *id, u8 timeout) { struct rdma_id_private *id_priv; - if (id->qp_type != IB_QPT_RC) + if (id->qp_type != IB_QPT_RC && id->qp_type != IB_QPT_XRC_INI) return -EINVAL; id_priv = container_of(id, struct rdma_id_private, id); @@ -3321,22 +3321,30 @@ static int cma_resolve_ib_addr(struct rdma_id_private *id_priv) static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr, const struct sockaddr *dst_addr) { - if (!src_addr || !src_addr->sa_family) { - src_addr = (struct sockaddr *) &id->route.addr.src_addr; - src_addr->sa_family = dst_addr->sa_family; - if (IS_ENABLED(CONFIG_IPV6) && - dst_addr->sa_family == AF_INET6) { - struct sockaddr_in6 *src_addr6 = (struct sockaddr_in6 *) src_addr; - struct sockaddr_in6 *dst_addr6 = (struct sockaddr_in6 *) dst_addr; - src_addr6->sin6_scope_id = dst_addr6->sin6_scope_id; - if (ipv6_addr_type(&dst_addr6->sin6_addr) & IPV6_ADDR_LINKLOCAL) - id->route.addr.dev_addr.bound_dev_if = dst_addr6->sin6_scope_id; - } else if (dst_addr->sa_family == AF_IB) { - ((struct sockaddr_ib *) src_addr)->sib_pkey = - ((struct sockaddr_ib *) dst_addr)->sib_pkey; - } - } - return rdma_bind_addr(id, src_addr); + struct sockaddr_storage zero_sock = {}; + + if (src_addr && src_addr->sa_family) + return rdma_bind_addr(id, src_addr); + + /* + * When the src_addr is not specified, automatically supply an any addr + */ + zero_sock.ss_family = dst_addr->sa_family; + if (IS_ENABLED(CONFIG_IPV6) && dst_addr->sa_family == AF_INET6) { + struct sockaddr_in6 *src_addr6 = + (struct sockaddr_in6 *)&zero_sock; + struct sockaddr_in6 *dst_addr6 = + (struct sockaddr_in6 *)dst_addr; + + src_addr6->sin6_scope_id = dst_addr6->sin6_scope_id; + if (ipv6_addr_type(&dst_addr6->sin6_addr) & IPV6_ADDR_LINKLOCAL) + id->route.addr.dev_addr.bound_dev_if = + dst_addr6->sin6_scope_id; + } else if (dst_addr->sa_family == AF_IB) { + ((struct sockaddr_ib *)&zero_sock)->sib_pkey = + ((struct sockaddr_ib *)dst_addr)->sib_pkey; + } + return rdma_bind_addr(id, (struct sockaddr *)&zero_sock); } /* diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index 3d895cc41c3ad93cf2df789035e01304d2039e47..597e889ba83126ff337125782aeac8ea038ac8a4 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c @@ -2078,6 +2078,7 @@ struct ib_mr *ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, return mr; mr->device = pd->device; + mr->type = IB_MR_TYPE_USER; mr->pd = pd; mr->dm = NULL; atomic_inc(&pd->usecnt); diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c index 3591923abebb9b172396136f186d3b053711ccbb..5f3edd255ca3cab845b8cc0019b86132c37a0b29 100644 --- a/drivers/infiniband/hw/hfi1/verbs.c +++ b/drivers/infiniband/hw/hfi1/verbs.c @@ -1439,8 +1439,7 @@ static int query_port(struct rvt_dev_info *rdi, u8 port_num, 4096 : hfi1_max_mtu), IB_MTU_4096); props->active_mtu = !valid_ib_mtu(ppd->ibmtu) ? props->max_mtu : mtu_to_enum(ppd->ibmtu, IB_MTU_4096); - props->phys_mtu = HFI1_CAP_IS_KSET(AIP) ? hfi1_max_mtu : - ib_mtu_enum_to_int(props->max_mtu); + props->phys_mtu = hfi1_max_mtu; return 0; } diff --git a/drivers/infiniband/hw/hns/hns_roce_cmd.c b/drivers/infiniband/hw/hns/hns_roce_cmd.c index 4b693d542aceef1566de7d82766d939890025dc3..8644136075719ca6d54bbab7fa95114b5987526d 100644 --- a/drivers/infiniband/hw/hns/hns_roce_cmd.c +++ b/drivers/infiniband/hw/hns/hns_roce_cmd.c @@ -38,45 +38,36 @@ #define CMD_POLL_TOKEN 0xffff #define CMD_MAX_NUM 32 -static int hns_roce_cmd_mbox_post_hw(struct hns_roce_dev *hr_dev, u64 in_param, - u64 out_param, u32 in_modifier, - u8 op_modifier, u16 op, u16 token, - int event) +static int hns_roce_cmd_mbox_post_hw(struct hns_roce_dev *hr_dev, + struct hns_roce_mbox_msg *mbox_msg) { - return hr_dev->hw->post_mbox(hr_dev, in_param, out_param, in_modifier, - op_modifier, op, token, event); + return hr_dev->hw->post_mbox(hr_dev, mbox_msg); } /* this should be called with "poll_sem" */ -static int __hns_roce_cmd_mbox_poll(struct hns_roce_dev *hr_dev, u64 in_param, - u64 out_param, unsigned long in_modifier, - u8 op_modifier, u16 op, - unsigned int timeout) +static int __hns_roce_cmd_mbox_poll(struct hns_roce_dev *hr_dev, + struct hns_roce_mbox_msg *mbox_msg) { int ret; - ret = hns_roce_cmd_mbox_post_hw(hr_dev, in_param, out_param, - in_modifier, op_modifier, op, - CMD_POLL_TOKEN, 0); + ret = hns_roce_cmd_mbox_post_hw(hr_dev, mbox_msg); if (ret) { dev_err_ratelimited(hr_dev->dev, "failed to post mailbox 0x%x in poll mode, ret = %d.\n", - op, ret); + mbox_msg->cmd, ret); return ret; } - return hr_dev->hw->poll_mbox_done(hr_dev, timeout); + return hr_dev->hw->poll_mbox_done(hr_dev); } -static int hns_roce_cmd_mbox_poll(struct hns_roce_dev *hr_dev, u64 in_param, - u64 out_param, unsigned long in_modifier, - u8 op_modifier, u16 op, unsigned int timeout) +static int hns_roce_cmd_mbox_poll(struct hns_roce_dev *hr_dev, + struct hns_roce_mbox_msg *mbox_msg) { int ret; down(&hr_dev->cmd.poll_sem); - ret = __hns_roce_cmd_mbox_poll(hr_dev, in_param, out_param, in_modifier, - op_modifier, op, timeout); + ret = __hns_roce_cmd_mbox_poll(hr_dev, mbox_msg); up(&hr_dev->cmd.poll_sem); return ret; @@ -100,10 +91,8 @@ void hns_roce_cmd_event(struct hns_roce_dev *hr_dev, u16 token, u8 status, complete(&context->done); } -static int __hns_roce_cmd_mbox_wait(struct hns_roce_dev *hr_dev, u64 in_param, - u64 out_param, unsigned long in_modifier, - u8 op_modifier, u16 op, - unsigned int timeout) +static int __hns_roce_cmd_mbox_wait(struct hns_roce_dev *hr_dev, + struct hns_roce_mbox_msg *mbox_msg) { struct hns_roce_cmdq *cmd = &hr_dev->cmd; struct hns_roce_cmd_context *context; @@ -124,20 +113,19 @@ static int __hns_roce_cmd_mbox_wait(struct hns_roce_dev *hr_dev, u64 in_param, reinit_completion(&context->done); - ret = hns_roce_cmd_mbox_post_hw(hr_dev, in_param, out_param, - in_modifier, op_modifier, op, - context->token, 1); + mbox_msg->token = context->token; + ret = hns_roce_cmd_mbox_post_hw(hr_dev, mbox_msg); if (ret) { dev_err_ratelimited(dev, "failed to post mailbox 0x%x in event mode, ret = %d.\n", - op, ret); + mbox_msg->cmd, ret); goto out; } if (!wait_for_completion_timeout(&context->done, - msecs_to_jiffies(timeout))) { + msecs_to_jiffies(HNS_ROCE_CMD_TIMEOUT_MSECS))) { dev_err_ratelimited(dev, "[cmd] token 0x%x mailbox 0x%x timeout.\n", - context->token, op); + context->token, mbox_msg->cmd); ret = -EBUSY; goto out; } @@ -145,45 +133,50 @@ static int __hns_roce_cmd_mbox_wait(struct hns_roce_dev *hr_dev, u64 in_param, ret = context->result; if (ret) dev_err_ratelimited(dev, "[cmd] token 0x%x mailbox 0x%x error %d.\n", - context->token, op, ret); + context->token, mbox_msg->cmd, ret); out: context->busy = 0; return ret; } -static int hns_roce_cmd_mbox_wait(struct hns_roce_dev *hr_dev, u64 in_param, - u64 out_param, unsigned long in_modifier, - u8 op_modifier, u16 op, unsigned int timeout) +static int hns_roce_cmd_mbox_wait(struct hns_roce_dev *hr_dev, + struct hns_roce_mbox_msg *mbox_msg) { int ret; down(&hr_dev->cmd.event_sem); - ret = __hns_roce_cmd_mbox_wait(hr_dev, in_param, out_param, in_modifier, - op_modifier, op, timeout); + ret = __hns_roce_cmd_mbox_wait(hr_dev, mbox_msg); up(&hr_dev->cmd.event_sem); return ret; } int hns_roce_cmd_mbox(struct hns_roce_dev *hr_dev, u64 in_param, u64 out_param, - unsigned long in_modifier, u8 op_modifier, u16 op, - unsigned int timeout) + u8 cmd, unsigned long tag) { + struct hns_roce_mbox_msg mbox_msg = {}; bool is_busy; if (hr_dev->hw->chk_mbox_avail) if (!hr_dev->hw->chk_mbox_avail(hr_dev, &is_busy)) return is_busy ? -EBUSY : 0; - if (hr_dev->cmd.use_events) - return hns_roce_cmd_mbox_wait(hr_dev, in_param, out_param, - in_modifier, op_modifier, op, - timeout); - else - return hns_roce_cmd_mbox_poll(hr_dev, in_param, out_param, - in_modifier, op_modifier, op, - timeout); + mbox_msg.in_param = in_param; + mbox_msg.out_param = out_param; + mbox_msg.cmd = cmd; + mbox_msg.tag = tag; + + if (hr_dev->cmd.use_events) { + mbox_msg.event_en = 1; + + return hns_roce_cmd_mbox_wait(hr_dev, &mbox_msg); + } else { + mbox_msg.event_en = 0; + mbox_msg.token = CMD_POLL_TOKEN; + + return hns_roce_cmd_mbox_poll(hr_dev, &mbox_msg); + } } int hns_roce_cmd_init(struct hns_roce_dev *hr_dev) @@ -269,3 +262,15 @@ void hns_roce_free_cmd_mailbox(struct hns_roce_dev *hr_dev, dma_pool_free(hr_dev->cmd.pool, mailbox->buf, mailbox->dma); kfree(mailbox); } + +int hns_roce_create_hw_ctx(struct hns_roce_dev *dev, + struct hns_roce_cmd_mailbox *mailbox, + u8 cmd, unsigned long idx) +{ + return hns_roce_cmd_mbox(dev, mailbox->dma, 0, cmd, idx); +} + +int hns_roce_destroy_hw_ctx(struct hns_roce_dev *dev, u8 cmd, unsigned long idx) +{ + return hns_roce_cmd_mbox(dev, 0, 0, cmd, idx); +} diff --git a/drivers/infiniband/hw/hns/hns_roce_cmd.h b/drivers/infiniband/hw/hns/hns_roce_cmd.h index 8025e7f657fa668b6a60f886080d5981aea52d54..052a3d60905aa3063f00596c529d6facf52de74e 100644 --- a/drivers/infiniband/hw/hns/hns_roce_cmd.h +++ b/drivers/infiniband/hw/hns/hns_roce_cmd.h @@ -140,12 +140,16 @@ enum { }; int hns_roce_cmd_mbox(struct hns_roce_dev *hr_dev, u64 in_param, u64 out_param, - unsigned long in_modifier, u8 op_modifier, u16 op, - unsigned int timeout); + u8 cmd, unsigned long tag); struct hns_roce_cmd_mailbox * hns_roce_alloc_cmd_mailbox(struct hns_roce_dev *hr_dev); void hns_roce_free_cmd_mailbox(struct hns_roce_dev *hr_dev, struct hns_roce_cmd_mailbox *mailbox); +int hns_roce_create_hw_ctx(struct hns_roce_dev *dev, + struct hns_roce_cmd_mailbox *mailbox, + u8 cmd, unsigned long idx); +int hns_roce_destroy_hw_ctx(struct hns_roce_dev *dev, u8 cmd, + unsigned long idx); #endif /* _HNS_ROCE_CMD_H */ diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c index 65e1e6126d95096f1ebb76cc83708be2c23f3962..5320f4a4c31295895ae0594eed6030ae5aa6bfc1 100644 --- a/drivers/infiniband/hw/hns/hns_roce_cq.c +++ b/drivers/infiniband/hw/hns/hns_roce_cq.c @@ -100,12 +100,39 @@ static void free_cqn(struct hns_roce_dev *hr_dev, unsigned long cqn) mutex_unlock(&cq_table->bank_mutex); } +static int hns_roce_create_cqc(struct hns_roce_dev *hr_dev, + struct hns_roce_cq *hr_cq, + u64 *mtts, dma_addr_t dma_handle) +{ + struct ib_device *ibdev = &hr_dev->ib_dev; + struct hns_roce_cmd_mailbox *mailbox; + int ret; + + mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); + if (IS_ERR(mailbox)) { + ibdev_err(ibdev, "failed to alloc mailbox for CQC.\n"); + return PTR_ERR(mailbox); + } + + hr_dev->hw->write_cqc(hr_dev, hr_cq, mailbox->buf, mtts, dma_handle); + + ret = hns_roce_create_hw_ctx(hr_dev, mailbox, HNS_ROCE_CMD_CREATE_CQC, + hr_cq->cqn); + if (ret) + ibdev_err(ibdev, + "failed to send create cmd for CQ(0x%lx), ret = %d.\n", + hr_cq->cqn, ret); + + hns_roce_free_cmd_mailbox(hr_dev, mailbox); + + return ret; +} + static int alloc_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq) { struct hns_roce_cq_table *cq_table = &hr_dev->cq_table; struct ib_device *ibdev = &hr_dev->ib_dev; - struct hns_roce_cmd_mailbox *mailbox; - u64 mtts[MTT_MIN_COUNT] = { 0 }; + u64 mtts[MTT_MIN_COUNT] = {}; dma_addr_t dma_handle; int ret; @@ -121,7 +148,7 @@ static int alloc_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq) if (ret) { ibdev_err(ibdev, "failed to get CQ(0x%lx) context, ret = %d.\n", hr_cq->cqn, ret); - goto err_out; + return ret; } ret = xa_err(xa_store(&cq_table->array, hr_cq->cqn, hr_cq, GFP_KERNEL)); @@ -130,41 +157,17 @@ static int alloc_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq) goto err_put; } - /* Allocate mailbox memory */ - mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); - if (IS_ERR(mailbox)) { - ret = PTR_ERR(mailbox); - goto err_xa; - } - - hr_dev->hw->write_cqc(hr_dev, hr_cq, mailbox->buf, mtts, dma_handle); - - /* Send mailbox to hw */ - ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_cq->cqn, 0, - HNS_ROCE_CMD_CREATE_CQC, HNS_ROCE_CMD_TIMEOUT_MSECS); - hns_roce_free_cmd_mailbox(hr_dev, mailbox); - if (ret) { - ibdev_err(ibdev, - "failed to send create cmd for CQ(0x%lx), ret = %d.\n", - hr_cq->cqn, ret); + ret = hns_roce_create_cqc(hr_dev, hr_cq, mtts, dma_handle); + if (ret) goto err_xa; - } - - hr_cq->cons_index = 0; - hr_cq->arm_sn = 1; - - refcount_set(&hr_cq->refcount, 1); - init_completion(&hr_cq->free); return 0; err_xa: xa_erase(&cq_table->array, hr_cq->cqn); - err_put: hns_roce_table_put(hr_dev, &cq_table->table, hr_cq->cqn); -err_out: return ret; } @@ -174,9 +177,8 @@ static void free_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq) struct device *dev = hr_dev->dev; int ret; - ret = hns_roce_cmd_mbox(hr_dev, 0, 0, hr_cq->cqn, 1, - HNS_ROCE_CMD_DESTROY_CQC, - HNS_ROCE_CMD_TIMEOUT_MSECS); + ret = hns_roce_destroy_hw_ctx(hr_dev, HNS_ROCE_CMD_DESTROY_CQC, + hr_cq->cqn); if (ret) dev_err(dev, "DESTROY_CQ failed (%d) for CQN %06lx\n", ret, hr_cq->cqn); @@ -414,6 +416,11 @@ int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr, goto err_cqc; } + hr_cq->cons_index = 0; + hr_cq->arm_sn = 1; + refcount_set(&hr_cq->refcount, 1); + init_completion(&hr_cq->free); + return 0; err_cqc: diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h index 8bea6de7f9552d5fb9b271471c27372745e46573..0d160432fa6588ff6b9a4c95d55e409a6be1b4ca 100644 --- a/drivers/infiniband/hw/hns/hns_roce_device.h +++ b/drivers/infiniband/hw/hns/hns_roce_device.h @@ -106,16 +106,6 @@ enum { SERV_TYPE_XRC = 5, }; -enum hns_roce_qp_state { - HNS_ROCE_QP_STATE_RST, - HNS_ROCE_QP_STATE_INIT, - HNS_ROCE_QP_STATE_RTR, - HNS_ROCE_QP_STATE_RTS, - HNS_ROCE_QP_STATE_SQD, - HNS_ROCE_QP_STATE_ERR, - HNS_ROCE_QP_NUM_STATE, -}; - enum hns_roce_event { HNS_ROCE_EVENT_TYPE_PATH_MIG = 0x01, HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED = 0x02, @@ -139,8 +129,6 @@ enum hns_roce_event { HNS_ROCE_EVENT_TYPE_INVALID_XRCETH = 0x17, }; -#define HNS_ROCE_CAP_FLAGS_EX_SHIFT 12 - enum { HNS_ROCE_CAP_FLAG_REREG_MR = BIT(0), HNS_ROCE_CAP_FLAG_ROCE_V1_V2 = BIT(1), @@ -535,6 +523,11 @@ struct hns_roce_cmd_context { u16 busy; }; +enum hns_roce_cmdq_state { + HNS_ROCE_CMDQ_STATE_NORMAL, + HNS_ROCE_CMDQ_STATE_FATAL_ERR, +}; + struct hns_roce_cmdq { struct dma_pool *pool; struct semaphore poll_sem; @@ -554,6 +547,7 @@ struct hns_roce_cmdq { * close device, switch into poll mode(non event mode) */ u8 use_events; + enum hns_roce_cmdq_state state; }; struct hns_roce_cmd_mailbox { @@ -561,6 +555,15 @@ struct hns_roce_cmd_mailbox { dma_addr_t dma; }; +struct hns_roce_mbox_msg { + u64 in_param; + u64 out_param; + u8 cmd; + u32 tag; + u16 token; + u8 event_en; +}; + struct hns_roce_dev; struct hns_roce_rinl_sge { @@ -647,6 +650,11 @@ struct hns_roce_ceqe { __le32 rsv[15]; }; +#define CEQE_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_ceqe, h, l) + +#define CEQE_CQN CEQE_FIELD_LOC(23, 0) +#define CEQE_OWNER CEQE_FIELD_LOC(31, 31) + struct hns_roce_aeqe { __le32 asyn; union { @@ -666,6 +674,13 @@ struct hns_roce_aeqe { __le32 rsv[12]; }; +#define AEQE_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_aeqe, h, l) + +#define AEQE_EVENT_TYPE AEQE_FIELD_LOC(7, 0) +#define AEQE_SUB_TYPE AEQE_FIELD_LOC(15, 8) +#define AEQE_OWNER AEQE_FIELD_LOC(31, 31) +#define AEQE_EVENT_QUEUE_NUM AEQE_FIELD_LOC(55, 32) + struct hns_roce_eq { struct hns_roce_dev *hr_dev; void __iomem *db_reg; @@ -715,7 +730,6 @@ struct hns_roce_caps { u32 num_pi_qps; u32 reserved_qps; int num_qpc_timer; - int num_cqc_timer; u32 num_srqs; u32 max_wqes; u32 max_srq_wrs; @@ -851,11 +865,9 @@ struct hns_roce_hw { int (*hw_profile)(struct hns_roce_dev *hr_dev); int (*hw_init)(struct hns_roce_dev *hr_dev); void (*hw_exit)(struct hns_roce_dev *hr_dev); - int (*post_mbox)(struct hns_roce_dev *hr_dev, u64 in_param, - u64 out_param, u32 in_modifier, u8 op_modifier, u16 op, - u16 token, int event); - int (*poll_mbox_done)(struct hns_roce_dev *hr_dev, - unsigned int timeout); + int (*post_mbox)(struct hns_roce_dev *hr_dev, + struct hns_roce_mbox_msg *mbox_msg); + int (*poll_mbox_done)(struct hns_roce_dev *hr_dev); bool (*chk_mbox_avail)(struct hns_roce_dev *hr_dev, bool *is_busy); int (*set_gid)(struct hns_roce_dev *hr_dev, int gid_index, const union ib_gid *gid, const struct ib_gid_attr *attr); @@ -873,10 +885,10 @@ struct hns_roce_hw { struct hns_roce_cq *hr_cq, void *mb_buf, u64 *mtts, dma_addr_t dma_handle); int (*set_hem)(struct hns_roce_dev *hr_dev, - struct hns_roce_hem_table *table, int obj, int step_idx); + struct hns_roce_hem_table *table, int obj, u32 step_idx); int (*clear_hem)(struct hns_roce_dev *hr_dev, struct hns_roce_hem_table *table, int obj, - int step_idx); + u32 step_idx); int (*modify_qp)(struct ib_qp *ibqp, const struct ib_qp_attr *attr, int attr_mask, enum ib_qp_state cur_state, enum ib_qp_state new_state); @@ -1140,9 +1152,6 @@ struct ib_mr *hns_roce_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, unsigned int *sg_offset); int hns_roce_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata); -int hns_roce_hw_destroy_mpt(struct hns_roce_dev *hr_dev, - struct hns_roce_cmd_mailbox *mailbox, - unsigned long mpt_index); unsigned long key_to_hw_index(u32 key); int hns_roce_alloc_mw(struct ib_mw *mw, struct ib_udata *udata); @@ -1180,7 +1189,6 @@ void *hns_roce_get_send_wqe(struct hns_roce_qp *hr_qp, unsigned int n); void *hns_roce_get_extend_sge(struct hns_roce_qp *hr_qp, unsigned int n); bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, u32 nreq, struct ib_cq *ib_cq); -enum hns_roce_qp_state to_hns_roce_state(enum ib_qp_state state); void hns_roce_lock_cqs(struct hns_roce_cq *send_cq, struct hns_roce_cq *recv_cq); void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq, diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c index 7cc45a332fc0215a6c6013d88d48abe9894eb239..a5f7b87757568e418a33850f2ab56f22bebdb77f 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hem.c +++ b/drivers/infiniband/hw/hns/hns_roce_hem.c @@ -488,7 +488,7 @@ static int set_mhop_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem_index *index) { struct ib_device *ibdev = &hr_dev->ib_dev; - int step_idx; + u32 step_idx; int ret = 0; if (index->inited & HEM_INDEX_L0) { @@ -618,7 +618,7 @@ static void clear_mhop_hem(struct hns_roce_dev *hr_dev, struct ib_device *ibdev = &hr_dev->ib_dev; u32 hop_num = mhop->hop_num; u32 chunk_ba_num; - int step_idx; + u32 step_idx; index->inited = HEM_INDEX_BUF; chunk_ba_num = mhop->bt_chunk_size / BA_BYTE_LEN; diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index 587b46ddecfc0187cb47ee4a1b264c2bb431b5b0..b5ed2aee578b07a89f75ebdeb78b14778da1bb27 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -149,8 +149,7 @@ static void set_atomic_seg(const struct ib_send_wr *wr, aseg->cmp_data = 0; } - roce_set_field(rc_sq_wqe->byte_16, V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M, - V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S, valid_num_sge); + hr_reg_write(rc_sq_wqe, RC_SEND_WQE_SGE_NUM, valid_num_sge); } static int fill_ext_sge_inl_data(struct hns_roce_qp *qp, @@ -271,8 +270,7 @@ static int set_rc_inl(struct hns_roce_qp *qp, const struct ib_send_wr *wr, dseg += sizeof(struct hns_roce_v2_rc_send_wqe); if (msg_len <= HNS_ROCE_V2_MAX_RC_INL_INN_SZ) { - roce_set_bit(rc_sq_wqe->byte_20, - V2_RC_SEND_WQE_BYTE_20_INL_TYPE_S, 0); + hr_reg_clear(rc_sq_wqe, RC_SEND_WQE_INL_TYPE); for (i = 0; i < wr->num_sge; i++) { memcpy(dseg, ((void *)wr->sg_list[i].addr), @@ -280,17 +278,13 @@ static int set_rc_inl(struct hns_roce_qp *qp, const struct ib_send_wr *wr, dseg += wr->sg_list[i].length; } } else { - roce_set_bit(rc_sq_wqe->byte_20, - V2_RC_SEND_WQE_BYTE_20_INL_TYPE_S, 1); + hr_reg_enable(rc_sq_wqe, RC_SEND_WQE_INL_TYPE); ret = fill_ext_sge_inl_data(qp, wr, &curr_idx, msg_len); if (ret) return ret; - roce_set_field(rc_sq_wqe->byte_16, - V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M, - V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S, - curr_idx - *sge_idx); + hr_reg_write(rc_sq_wqe, RC_SEND_WQE_SGE_NUM, curr_idx - *sge_idx); } *sge_idx = curr_idx; @@ -309,12 +303,10 @@ static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr, int j = 0; int i; - roce_set_field(rc_sq_wqe->byte_20, - V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M, - V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S, - (*sge_ind) & (qp->sge.sge_cnt - 1)); + hr_reg_write(rc_sq_wqe, RC_SEND_WQE_MSG_START_SGE_IDX, + (*sge_ind) & (qp->sge.sge_cnt - 1)); - roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_INLINE_S, + hr_reg_write(rc_sq_wqe, RC_SEND_WQE_INLINE, !!(wr->send_flags & IB_SEND_INLINE)); if (wr->send_flags & IB_SEND_INLINE) return set_rc_inl(qp, wr, rc_sq_wqe, sge_ind); @@ -339,9 +331,7 @@ static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr, valid_num_sge - HNS_ROCE_SGE_IN_WQE); } - roce_set_field(rc_sq_wqe->byte_16, - V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M, - V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S, valid_num_sge); + hr_reg_write(rc_sq_wqe, RC_SEND_WQE_SGE_NUM, valid_num_sge); return 0; } @@ -412,8 +402,7 @@ static int set_ud_opcode(struct hns_roce_v2_ud_send_wqe *ud_sq_wqe, ud_sq_wqe->immtdata = get_immtdata(wr); - roce_set_field(ud_sq_wqe->byte_4, V2_UD_SEND_WQE_BYTE_4_OPCODE_M, - V2_UD_SEND_WQE_BYTE_4_OPCODE_S, to_hr_opcode(ib_op)); + hr_reg_write(ud_sq_wqe, UD_SEND_WQE_OPCODE, to_hr_opcode(ib_op)); return 0; } @@ -424,21 +413,15 @@ static int fill_ud_av(struct hns_roce_v2_ud_send_wqe *ud_sq_wqe, struct ib_device *ib_dev = ah->ibah.device; struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev); - roce_set_field(ud_sq_wqe->byte_24, V2_UD_SEND_WQE_BYTE_24_UDPSPN_M, - V2_UD_SEND_WQE_BYTE_24_UDPSPN_S, ah->av.udp_sport); - - roce_set_field(ud_sq_wqe->byte_36, V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_M, - V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_S, ah->av.hop_limit); - roce_set_field(ud_sq_wqe->byte_36, V2_UD_SEND_WQE_BYTE_36_TCLASS_M, - V2_UD_SEND_WQE_BYTE_36_TCLASS_S, ah->av.tclass); - roce_set_field(ud_sq_wqe->byte_40, V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_M, - V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_S, ah->av.flowlabel); + hr_reg_write(ud_sq_wqe, UD_SEND_WQE_UDPSPN, ah->av.udp_sport); + hr_reg_write(ud_sq_wqe, UD_SEND_WQE_HOPLIMIT, ah->av.hop_limit); + hr_reg_write(ud_sq_wqe, UD_SEND_WQE_TCLASS, ah->av.tclass); + hr_reg_write(ud_sq_wqe, UD_SEND_WQE_FLOW_LABEL, ah->av.flowlabel); if (WARN_ON(ah->av.sl > MAX_SERVICE_LEVEL)) return -EINVAL; - roce_set_field(ud_sq_wqe->byte_40, V2_UD_SEND_WQE_BYTE_40_SL_M, - V2_UD_SEND_WQE_BYTE_40_SL_S, ah->av.sl); + hr_reg_write(ud_sq_wqe, UD_SEND_WQE_SL, ah->av.sl); ud_sq_wqe->sgid_index = ah->av.gid_index; @@ -448,10 +431,8 @@ static int fill_ud_av(struct hns_roce_v2_ud_send_wqe *ud_sq_wqe, if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) return 0; - roce_set_bit(ud_sq_wqe->byte_40, V2_UD_SEND_WQE_BYTE_40_UD_VLAN_EN_S, - ah->av.vlan_en); - roce_set_field(ud_sq_wqe->byte_36, V2_UD_SEND_WQE_BYTE_36_VLAN_M, - V2_UD_SEND_WQE_BYTE_36_VLAN_S, ah->av.vlan_id); + hr_reg_write(ud_sq_wqe, UD_SEND_WQE_VLAN_EN, ah->av.vlan_en); + hr_reg_write(ud_sq_wqe, UD_SEND_WQE_VLAN, ah->av.vlan_id); return 0; } @@ -476,27 +457,19 @@ static inline int set_ud_wqe(struct hns_roce_qp *qp, ud_sq_wqe->msg_len = cpu_to_le32(msg_len); - roce_set_bit(ud_sq_wqe->byte_4, V2_UD_SEND_WQE_BYTE_4_CQE_S, + hr_reg_write(ud_sq_wqe, UD_SEND_WQE_CQE, !!(wr->send_flags & IB_SEND_SIGNALED)); - - roce_set_bit(ud_sq_wqe->byte_4, V2_UD_SEND_WQE_BYTE_4_SE_S, + hr_reg_write(ud_sq_wqe, UD_SEND_WQE_SE, !!(wr->send_flags & IB_SEND_SOLICITED)); - roce_set_field(ud_sq_wqe->byte_16, V2_UD_SEND_WQE_BYTE_16_PD_M, - V2_UD_SEND_WQE_BYTE_16_PD_S, to_hr_pd(qp->ibqp.pd)->pdn); - - roce_set_field(ud_sq_wqe->byte_16, V2_UD_SEND_WQE_BYTE_16_SGE_NUM_M, - V2_UD_SEND_WQE_BYTE_16_SGE_NUM_S, valid_num_sge); - - roce_set_field(ud_sq_wqe->byte_20, - V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M, - V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S, - curr_idx & (qp->sge.sge_cnt - 1)); + hr_reg_write(ud_sq_wqe, UD_SEND_WQE_PD, to_hr_pd(qp->ibqp.pd)->pdn); + hr_reg_write(ud_sq_wqe, UD_SEND_WQE_SGE_NUM, valid_num_sge); + hr_reg_write(ud_sq_wqe, UD_SEND_WQE_MSG_START_SGE_IDX, + curr_idx & (qp->sge.sge_cnt - 1)); ud_sq_wqe->qkey = cpu_to_le32(ud_wr(wr)->remote_qkey & 0x80000000 ? qp->qkey : ud_wr(wr)->remote_qkey); - roce_set_field(ud_sq_wqe->byte_32, V2_UD_SEND_WQE_BYTE_32_DQPN_M, - V2_UD_SEND_WQE_BYTE_32_DQPN_S, ud_wr(wr)->remote_qpn); + hr_reg_write(ud_sq_wqe, UD_SEND_WQE_DQPN, ud_wr(wr)->remote_qpn); ret = fill_ud_av(ud_sq_wqe, ah); if (ret) @@ -516,8 +489,7 @@ static inline int set_ud_wqe(struct hns_roce_qp *qp, dma_wmb(); *sge_idx = curr_idx; - roce_set_bit(ud_sq_wqe->byte_4, V2_UD_SEND_WQE_BYTE_4_OWNER_S, - owner_bit); + hr_reg_write(ud_sq_wqe, UD_SEND_WQE_OWNER, owner_bit); return 0; } @@ -553,7 +525,7 @@ static int set_rc_opcode(struct hns_roce_dev *hr_dev, ret = -EOPNOTSUPP; break; case IB_WR_LOCAL_INV: - roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_SO_S, 1); + hr_reg_enable(rc_sq_wqe, RC_SEND_WQE_SO); fallthrough; case IB_WR_SEND_WITH_INV: rc_sq_wqe->inv_key = cpu_to_le32(wr->ex.invalidate_rkey); @@ -565,11 +537,11 @@ static int set_rc_opcode(struct hns_roce_dev *hr_dev, if (unlikely(ret)) return ret; - roce_set_field(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_OPCODE_M, - V2_RC_SEND_WQE_BYTE_4_OPCODE_S, to_hr_opcode(ib_op)); + hr_reg_write(rc_sq_wqe, RC_SEND_WQE_OPCODE, to_hr_opcode(ib_op)); return ret; } + static inline int set_rc_wqe(struct hns_roce_qp *qp, const struct ib_send_wr *wr, void *wqe, unsigned int *sge_idx, @@ -590,13 +562,13 @@ static inline int set_rc_wqe(struct hns_roce_qp *qp, if (WARN_ON(ret)) return ret; - roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_FENCE_S, + hr_reg_write(rc_sq_wqe, RC_SEND_WQE_FENCE, (wr->send_flags & IB_SEND_FENCE) ? 1 : 0); - roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_SE_S, + hr_reg_write(rc_sq_wqe, RC_SEND_WQE_SE, (wr->send_flags & IB_SEND_SOLICITED) ? 1 : 0); - roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_CQE_S, + hr_reg_write(rc_sq_wqe, RC_SEND_WQE_CQE, (wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0); if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP || @@ -616,8 +588,7 @@ static inline int set_rc_wqe(struct hns_roce_qp *qp, dma_wmb(); *sge_idx = curr_idx; - roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_OWNER_S, - owner_bit); + hr_reg_write(rc_sq_wqe, RC_SEND_WQE_OWNER, owner_bit); return ret; } @@ -682,14 +653,11 @@ static void write_dwqe(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp, struct hns_roce_v2_rc_send_wqe *rc_sq_wqe = wqe; /* All kinds of DirectWQE have the same header field layout */ - roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_FLAG_S, 1); - roce_set_field(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_DB_SL_L_M, - V2_RC_SEND_WQE_BYTE_4_DB_SL_L_S, qp->sl); - roce_set_field(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_DB_SL_H_M, - V2_RC_SEND_WQE_BYTE_4_DB_SL_H_S, - qp->sl >> HNS_ROCE_SL_SHIFT); - roce_set_field(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_WQE_INDEX_M, - V2_RC_SEND_WQE_BYTE_4_WQE_INDEX_S, qp->sq.head); + hr_reg_enable(rc_sq_wqe, RC_SEND_WQE_FLAG); + hr_reg_write(rc_sq_wqe, RC_SEND_WQE_DB_SL_L, qp->sl); + hr_reg_write(rc_sq_wqe, RC_SEND_WQE_DB_SL_H, + qp->sl >> HNS_ROCE_SL_SHIFT); + hr_reg_write(rc_sq_wqe, RC_SEND_WQE_WQE_INDEX, qp->sq.head); hns_roce_write512(hr_dev, wqe, qp->sq.db_reg); } @@ -1265,6 +1233,16 @@ static int hns_roce_cmq_csq_done(struct hns_roce_dev *hr_dev) return tail == priv->cmq.csq.head; } +static void update_cmdq_status(struct hns_roce_dev *hr_dev) +{ + struct hns_roce_v2_priv *priv = hr_dev->priv; + struct hnae3_handle *handle = priv->handle; + + if (handle->rinfo.reset_state == HNS_ROCE_STATE_RST_INIT || + handle->rinfo.instance_state == HNS_ROCE_STATE_INIT) + hr_dev->cmd.state = HNS_ROCE_CMDQ_STATE_FATAL_ERR; +} + static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev, struct hns_roce_cmq_desc *desc, int num) { @@ -1296,7 +1274,8 @@ static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev, } while (++timeout < priv->cmq.tx_timeout); if (hns_roce_cmq_csq_done(hr_dev)) { - for (ret = 0, i = 0; i < num; i++) { + ret = 0; + for (i = 0; i < num; i++) { /* check the result of hardware write back */ desc[i] = csq->desc[tail++]; if (tail == csq->desc_num) @@ -1318,6 +1297,8 @@ static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev, csq->head, tail); csq->head = tail; + update_cmdq_status(hr_dev); + ret = -EAGAIN; } @@ -1332,6 +1313,9 @@ static int hns_roce_cmq_send(struct hns_roce_dev *hr_dev, bool busy; int ret; + if (hr_dev->cmd.state == HNS_ROCE_CMDQ_STATE_FATAL_ERR) + return -EIO; + if (!v2_chk_mbox_is_avail(hr_dev, &busy)) return busy ? -EBUSY : 0; @@ -1344,17 +1328,17 @@ static int hns_roce_cmq_send(struct hns_roce_dev *hr_dev, return ret; } -static int config_hem_ba_to_hw(struct hns_roce_dev *hr_dev, unsigned long obj, - dma_addr_t base_addr, u16 op) +static int config_hem_ba_to_hw(struct hns_roce_dev *hr_dev, + dma_addr_t base_addr, u8 cmd, unsigned long tag) { - struct hns_roce_cmd_mailbox *mbox = hns_roce_alloc_cmd_mailbox(hr_dev); + struct hns_roce_cmd_mailbox *mbox; int ret; + mbox = hns_roce_alloc_cmd_mailbox(hr_dev); if (IS_ERR(mbox)) return PTR_ERR(mbox); - ret = hns_roce_cmd_mbox(hr_dev, base_addr, mbox->dma, obj, 0, op, - HNS_ROCE_CMD_TIMEOUT_MSECS); + ret = hns_roce_cmd_mbox(hr_dev, base_addr, mbox->dma, cmd, tag); hns_roce_free_cmd_mailbox(hr_dev, mbox); return ret; } @@ -1499,7 +1483,7 @@ static void __hns_roce_function_clear(struct hns_roce_dev *hr_dev, int vf_id) if (ret) continue; - if (roce_get_bit(resp->func_done, FUNC_CLEAR_RST_FUN_DONE_S)) { + if (hr_reg_read(resp, FUNC_CLEAR_RST_FUN_DONE)) { if (vf_id == 0) hr_dev->is_reset = true; return; @@ -1510,7 +1494,7 @@ static void __hns_roce_function_clear(struct hns_roce_dev *hr_dev, int vf_id) hns_roce_func_clr_rst_proc(hr_dev, ret, fclr_write_fail_flag); } -static void hns_roce_free_vf_resource(struct hns_roce_dev *hr_dev, int vf_id) +static int hns_roce_free_vf_resource(struct hns_roce_dev *hr_dev, int vf_id) { enum hns_roce_opcode_type opcode = HNS_ROCE_OPC_ALLOC_VF_RES; struct hns_roce_cmq_desc desc[2]; @@ -1521,17 +1505,29 @@ static void hns_roce_free_vf_resource(struct hns_roce_dev *hr_dev, int vf_id) desc[0].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT); hns_roce_cmq_setup_basic_desc(&desc[1], opcode, false); hr_reg_write(req_a, FUNC_RES_A_VF_ID, vf_id); - hns_roce_cmq_send(hr_dev, desc, 2); + + return hns_roce_cmq_send(hr_dev, desc, 2); } static void hns_roce_function_clear(struct hns_roce_dev *hr_dev) { + int ret; int i; + if (hr_dev->cmd.state == HNS_ROCE_CMDQ_STATE_FATAL_ERR) + return; + for (i = hr_dev->func_num - 1; i >= 0; i--) { __hns_roce_function_clear(hr_dev, i); - if (i != 0) - hns_roce_free_vf_resource(hr_dev, i); + + if (i == 0) + continue; + + ret = hns_roce_free_vf_resource(hr_dev, i); + if (ret) + ibdev_err(&hr_dev->ib_dev, + "failed to free vf resource, vf_id = %d, ret = %d.\n", + i, ret); } } @@ -1751,17 +1747,16 @@ static int __hns_roce_set_vf_switch_param(struct hns_roce_dev *hr_dev, swt = (struct hns_roce_vf_switch *)desc.data; hns_roce_cmq_setup_basic_desc(&desc, HNS_SWITCH_PARAMETER_CFG, true); swt->rocee_sel |= cpu_to_le32(HNS_ICL_SWITCH_CMD_ROCEE_SEL); - roce_set_field(swt->fun_id, VF_SWITCH_DATA_FUN_ID_VF_ID_M, - VF_SWITCH_DATA_FUN_ID_VF_ID_S, vf_id); + hr_reg_write(swt, VF_SWITCH_VF_ID, vf_id); ret = hns_roce_cmq_send(hr_dev, &desc, 1); if (ret) return ret; desc.flag = cpu_to_le16(HNS_ROCE_CMD_FLAG_IN); desc.flag &= cpu_to_le16(~HNS_ROCE_CMD_FLAG_WR); - roce_set_bit(swt->cfg, VF_SWITCH_DATA_CFG_ALW_LPBK_S, 1); - roce_set_bit(swt->cfg, VF_SWITCH_DATA_CFG_ALW_LCL_LPBK_S, 0); - roce_set_bit(swt->cfg, VF_SWITCH_DATA_CFG_ALW_DST_OVRD_S, 1); + hr_reg_enable(swt, VF_SWITCH_ALW_LPBK); + hr_reg_clear(swt, VF_SWITCH_ALW_LCL_LPBK); + hr_reg_enable(swt, VF_SWITCH_ALW_DST_OVRD); return hns_roce_cmq_send(hr_dev, &desc, 1); } @@ -1941,7 +1936,7 @@ static void set_default_caps(struct hns_roce_dev *hr_dev) caps->num_mtpts = HNS_ROCE_V2_MAX_MTPT_NUM; caps->num_pds = HNS_ROCE_V2_MAX_PD_NUM; caps->num_qpc_timer = HNS_ROCE_V2_MAX_QPC_TIMER_NUM; - caps->num_cqc_timer = HNS_ROCE_V2_MAX_CQC_TIMER_NUM; + caps->cqc_timer_bt_num = HNS_ROCE_V2_MAX_CQC_TIMER_BT_NUM; caps->max_qp_init_rdma = HNS_ROCE_V2_MAX_QP_INIT_RDMA; caps->max_qp_dest_rdma = HNS_ROCE_V2_MAX_QP_DEST_RDMA; @@ -2219,7 +2214,6 @@ static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev) caps->max_rq_sg = roundup_pow_of_two(caps->max_rq_sg); caps->max_extend_sg = le32_to_cpu(resp_a->max_extend_sg); caps->num_qpc_timer = le16_to_cpu(resp_a->num_qpc_timer); - caps->num_cqc_timer = le16_to_cpu(resp_a->num_cqc_timer); caps->max_srq_sges = le16_to_cpu(resp_a->max_srq_sges); caps->max_srq_sges = roundup_pow_of_two(caps->max_srq_sges); caps->num_aeq_vectors = resp_a->num_aeq_vectors; @@ -2246,87 +2240,39 @@ static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev) ctx_hop_num = resp_b->ctx_hop_num; pbl_hop_num = resp_b->pbl_hop_num; - caps->num_pds = 1 << roce_get_field(resp_c->cap_flags_num_pds, - V2_QUERY_PF_CAPS_C_NUM_PDS_M, - V2_QUERY_PF_CAPS_C_NUM_PDS_S); - caps->flags = roce_get_field(resp_c->cap_flags_num_pds, - V2_QUERY_PF_CAPS_C_CAP_FLAGS_M, - V2_QUERY_PF_CAPS_C_CAP_FLAGS_S); + caps->num_pds = 1 << hr_reg_read(resp_c, PF_CAPS_C_NUM_PDS); + + caps->flags = hr_reg_read(resp_c, PF_CAPS_C_CAP_FLAGS); caps->flags |= le16_to_cpu(resp_d->cap_flags_ex) << HNS_ROCE_CAP_FLAGS_EX_SHIFT; - caps->num_cqs = 1 << roce_get_field(resp_c->max_gid_num_cqs, - V2_QUERY_PF_CAPS_C_NUM_CQS_M, - V2_QUERY_PF_CAPS_C_NUM_CQS_S); - caps->gid_table_len[0] = roce_get_field(resp_c->max_gid_num_cqs, - V2_QUERY_PF_CAPS_C_MAX_GID_M, - V2_QUERY_PF_CAPS_C_MAX_GID_S); - - caps->max_cqes = 1 << roce_get_field(resp_c->cq_depth, - V2_QUERY_PF_CAPS_C_CQ_DEPTH_M, - V2_QUERY_PF_CAPS_C_CQ_DEPTH_S); - caps->num_mtpts = 1 << roce_get_field(resp_c->num_mrws, - V2_QUERY_PF_CAPS_C_NUM_MRWS_M, - V2_QUERY_PF_CAPS_C_NUM_MRWS_S); - caps->num_qps = 1 << roce_get_field(resp_c->ord_num_qps, - V2_QUERY_PF_CAPS_C_NUM_QPS_M, - V2_QUERY_PF_CAPS_C_NUM_QPS_S); - caps->max_qp_init_rdma = roce_get_field(resp_c->ord_num_qps, - V2_QUERY_PF_CAPS_C_MAX_ORD_M, - V2_QUERY_PF_CAPS_C_MAX_ORD_S); + caps->num_cqs = 1 << hr_reg_read(resp_c, PF_CAPS_C_NUM_CQS); + caps->gid_table_len[0] = hr_reg_read(resp_c, PF_CAPS_C_MAX_GID); + caps->max_cqes = 1 << hr_reg_read(resp_c, PF_CAPS_C_CQ_DEPTH); + caps->num_mtpts = 1 << hr_reg_read(resp_c, PF_CAPS_C_NUM_MRWS); + caps->num_qps = 1 << hr_reg_read(resp_c, PF_CAPS_C_NUM_QPS); + caps->max_qp_init_rdma = hr_reg_read(resp_c, PF_CAPS_C_MAX_ORD); caps->max_qp_dest_rdma = caps->max_qp_init_rdma; caps->max_wqes = 1 << le16_to_cpu(resp_c->sq_depth); - caps->num_srqs = 1 << roce_get_field(resp_d->wq_hop_num_max_srqs, - V2_QUERY_PF_CAPS_D_NUM_SRQS_M, - V2_QUERY_PF_CAPS_D_NUM_SRQS_S); - caps->cong_type = roce_get_field(resp_d->wq_hop_num_max_srqs, - V2_QUERY_PF_CAPS_D_CONG_TYPE_M, - V2_QUERY_PF_CAPS_D_CONG_TYPE_S); - caps->max_srq_wrs = 1 << le16_to_cpu(resp_d->srq_depth); - caps->ceqe_depth = 1 << roce_get_field(resp_d->num_ceqs_ceq_depth, - V2_QUERY_PF_CAPS_D_CEQ_DEPTH_M, - V2_QUERY_PF_CAPS_D_CEQ_DEPTH_S); - caps->num_comp_vectors = roce_get_field(resp_d->num_ceqs_ceq_depth, - V2_QUERY_PF_CAPS_D_NUM_CEQS_M, - V2_QUERY_PF_CAPS_D_NUM_CEQS_S); - - caps->aeqe_depth = 1 << roce_get_field(resp_d->arm_st_aeq_depth, - V2_QUERY_PF_CAPS_D_AEQ_DEPTH_M, - V2_QUERY_PF_CAPS_D_AEQ_DEPTH_S); - caps->default_aeq_arm_st = roce_get_field(resp_d->arm_st_aeq_depth, - V2_QUERY_PF_CAPS_D_AEQ_ARM_ST_M, - V2_QUERY_PF_CAPS_D_AEQ_ARM_ST_S); - caps->default_ceq_arm_st = roce_get_field(resp_d->arm_st_aeq_depth, - V2_QUERY_PF_CAPS_D_CEQ_ARM_ST_M, - V2_QUERY_PF_CAPS_D_CEQ_ARM_ST_S); - caps->reserved_pds = roce_get_field(resp_d->num_uars_rsv_pds, - V2_QUERY_PF_CAPS_D_RSV_PDS_M, - V2_QUERY_PF_CAPS_D_RSV_PDS_S); - caps->num_uars = 1 << roce_get_field(resp_d->num_uars_rsv_pds, - V2_QUERY_PF_CAPS_D_NUM_UARS_M, - V2_QUERY_PF_CAPS_D_NUM_UARS_S); - caps->reserved_qps = roce_get_field(resp_d->rsv_uars_rsv_qps, - V2_QUERY_PF_CAPS_D_RSV_QPS_M, - V2_QUERY_PF_CAPS_D_RSV_QPS_S); - caps->reserved_uars = roce_get_field(resp_d->rsv_uars_rsv_qps, - V2_QUERY_PF_CAPS_D_RSV_UARS_M, - V2_QUERY_PF_CAPS_D_RSV_UARS_S); - caps->reserved_mrws = roce_get_field(resp_e->chunk_size_shift_rsv_mrws, - V2_QUERY_PF_CAPS_E_RSV_MRWS_M, - V2_QUERY_PF_CAPS_E_RSV_MRWS_S); - caps->chunk_sz = 1 << roce_get_field(resp_e->chunk_size_shift_rsv_mrws, - V2_QUERY_PF_CAPS_E_CHUNK_SIZE_SHIFT_M, - V2_QUERY_PF_CAPS_E_CHUNK_SIZE_SHIFT_S); - caps->reserved_cqs = roce_get_field(resp_e->rsv_cqs, - V2_QUERY_PF_CAPS_E_RSV_CQS_M, - V2_QUERY_PF_CAPS_E_RSV_CQS_S); - caps->reserved_srqs = roce_get_field(resp_e->rsv_srqs, - V2_QUERY_PF_CAPS_E_RSV_SRQS_M, - V2_QUERY_PF_CAPS_E_RSV_SRQS_S); - caps->reserved_lkey = roce_get_field(resp_e->rsv_lkey, - V2_QUERY_PF_CAPS_E_RSV_LKEYS_M, - V2_QUERY_PF_CAPS_E_RSV_LKEYS_S); + caps->num_srqs = 1 << hr_reg_read(resp_d, PF_CAPS_D_NUM_SRQS); + caps->cong_type = hr_reg_read(resp_d, PF_CAPS_D_CONG_TYPE); + caps->max_srq_wrs = 1 << le16_to_cpu(resp_d->srq_depth); + caps->ceqe_depth = 1 << hr_reg_read(resp_d, PF_CAPS_D_CEQ_DEPTH); + caps->num_comp_vectors = hr_reg_read(resp_d, PF_CAPS_D_NUM_CEQS); + caps->aeqe_depth = 1 << hr_reg_read(resp_d, PF_CAPS_D_AEQ_DEPTH); + caps->default_aeq_arm_st = hr_reg_read(resp_d, PF_CAPS_D_AEQ_ARM_ST); + caps->default_ceq_arm_st = hr_reg_read(resp_d, PF_CAPS_D_CEQ_ARM_ST); + caps->reserved_pds = hr_reg_read(resp_d, PF_CAPS_D_RSV_PDS); + caps->num_uars = 1 << hr_reg_read(resp_d, PF_CAPS_D_NUM_UARS); + caps->reserved_qps = hr_reg_read(resp_d, PF_CAPS_D_RSV_QPS); + caps->reserved_uars = hr_reg_read(resp_d, PF_CAPS_D_RSV_UARS); + + caps->reserved_mrws = hr_reg_read(resp_e, PF_CAPS_E_RSV_MRWS); + caps->chunk_sz = 1 << hr_reg_read(resp_e, PF_CAPS_E_CHUNK_SIZE_SHIFT); + caps->reserved_cqs = hr_reg_read(resp_e, PF_CAPS_E_RSV_CQS); + caps->reserved_srqs = hr_reg_read(resp_e, PF_CAPS_E_RSV_SRQS); + caps->reserved_lkey = hr_reg_read(resp_e, PF_CAPS_E_RSV_LKEYS); caps->default_ceq_max_cnt = le16_to_cpu(resp_e->ceq_max_cnt); caps->default_ceq_period = le16_to_cpu(resp_e->ceq_period); caps->default_aeq_max_cnt = le16_to_cpu(resp_e->aeq_max_cnt); @@ -2341,15 +2287,9 @@ static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev) caps->cqe_hop_num = pbl_hop_num; caps->srqwqe_hop_num = pbl_hop_num; caps->idx_hop_num = pbl_hop_num; - caps->wqe_sq_hop_num = roce_get_field(resp_d->wq_hop_num_max_srqs, - V2_QUERY_PF_CAPS_D_SQWQE_HOP_NUM_M, - V2_QUERY_PF_CAPS_D_SQWQE_HOP_NUM_S); - caps->wqe_sge_hop_num = roce_get_field(resp_d->wq_hop_num_max_srqs, - V2_QUERY_PF_CAPS_D_EX_SGE_HOP_NUM_M, - V2_QUERY_PF_CAPS_D_EX_SGE_HOP_NUM_S); - caps->wqe_rq_hop_num = roce_get_field(resp_d->wq_hop_num_max_srqs, - V2_QUERY_PF_CAPS_D_RQWQE_HOP_NUM_M, - V2_QUERY_PF_CAPS_D_RQWQE_HOP_NUM_S); + caps->wqe_sq_hop_num = hr_reg_read(resp_d, PF_CAPS_D_SQWQE_HOP_NUM); + caps->wqe_sge_hop_num = hr_reg_read(resp_d, PF_CAPS_D_EX_SGE_HOP_NUM); + caps->wqe_rq_hop_num = hr_reg_read(resp_d, PF_CAPS_D_RQWQE_HOP_NUM); return 0; } @@ -2756,21 +2696,21 @@ static void hns_roce_v2_exit(struct hns_roce_dev *hr_dev) free_dip_list(hr_dev); } -static int hns_roce_mbox_post(struct hns_roce_dev *hr_dev, u64 in_param, - u64 out_param, u32 in_modifier, u8 op_modifier, - u16 op, u16 token, int event) +static int hns_roce_mbox_post(struct hns_roce_dev *hr_dev, + struct hns_roce_mbox_msg *mbox_msg) { struct hns_roce_cmq_desc desc; struct hns_roce_post_mbox *mb = (struct hns_roce_post_mbox *)desc.data; hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_POST_MB, false); - mb->in_param_l = cpu_to_le32(in_param); - mb->in_param_h = cpu_to_le32(in_param >> 32); - mb->out_param_l = cpu_to_le32(out_param); - mb->out_param_h = cpu_to_le32(out_param >> 32); - mb->cmd_tag = cpu_to_le32(in_modifier << 8 | op); - mb->token_event_en = cpu_to_le32(event << 16 | token); + mb->in_param_l = cpu_to_le32(mbox_msg->in_param); + mb->in_param_h = cpu_to_le32(mbox_msg->in_param >> 32); + mb->out_param_l = cpu_to_le32(mbox_msg->out_param); + mb->out_param_h = cpu_to_le32(mbox_msg->out_param >> 32); + mb->cmd_tag = cpu_to_le32(mbox_msg->tag << 8 | mbox_msg->cmd); + mb->token_event_en = cpu_to_le32(mbox_msg->event_en << 16 | + mbox_msg->token); return hns_roce_cmq_send(hr_dev, &desc, 1); } @@ -2788,6 +2728,9 @@ static int v2_wait_mbox_complete(struct hns_roce_dev *hr_dev, u32 timeout, mb_st = (struct hns_roce_mbox_status *)desc.data; end = msecs_to_jiffies(timeout) + jiffies; while (v2_chk_mbox_is_avail(hr_dev, &busy)) { + if (hr_dev->cmd.state == HNS_ROCE_CMDQ_STATE_FATAL_ERR) + return -EIO; + status = 0; hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_MB_ST, true); @@ -2823,9 +2766,8 @@ static int v2_wait_mbox_complete(struct hns_roce_dev *hr_dev, u32 timeout, return ret; } -static int v2_post_mbox(struct hns_roce_dev *hr_dev, u64 in_param, - u64 out_param, u32 in_modifier, u8 op_modifier, - u16 op, u16 token, int event) +static int v2_post_mbox(struct hns_roce_dev *hr_dev, + struct hns_roce_mbox_msg *mbox_msg) { u8 status = 0; int ret; @@ -2841,8 +2783,7 @@ static int v2_post_mbox(struct hns_roce_dev *hr_dev, u64 in_param, } /* Post new message to mbox */ - ret = hns_roce_mbox_post(hr_dev, in_param, out_param, in_modifier, - op_modifier, op, token, event); + ret = hns_roce_mbox_post(hr_dev, mbox_msg); if (ret) dev_err_ratelimited(hr_dev->dev, "failed to post mailbox, ret = %d.\n", ret); @@ -2850,12 +2791,13 @@ static int v2_post_mbox(struct hns_roce_dev *hr_dev, u64 in_param, return ret; } -static int v2_poll_mbox_done(struct hns_roce_dev *hr_dev, unsigned int timeout) +static int v2_poll_mbox_done(struct hns_roce_dev *hr_dev) { u8 status = 0; int ret; - ret = v2_wait_mbox_complete(hr_dev, timeout, &status); + ret = v2_wait_mbox_complete(hr_dev, HNS_ROCE_CMD_TIMEOUT_MSECS, + &status); if (!ret) { if (status != MB_ST_COMPLETE_SUCC) return -EBUSY; @@ -2892,10 +2834,8 @@ static int config_sgid_table(struct hns_roce_dev *hr_dev, hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_SGID_TB, false); - roce_set_field(sgid_tb->table_idx_rsv, CFG_SGID_TB_TABLE_IDX_M, - CFG_SGID_TB_TABLE_IDX_S, gid_index); - roce_set_field(sgid_tb->vf_sgid_type_rsv, CFG_SGID_TB_VF_SGID_TYPE_M, - CFG_SGID_TB_VF_SGID_TYPE_S, sgid_type); + hr_reg_write(sgid_tb, CFG_SGID_TB_TABLE_IDX, gid_index); + hr_reg_write(sgid_tb, CFG_SGID_TB_VF_SGID_TYPE, sgid_type); copy_gid(&sgid_tb->vf_sgid_l, gid); @@ -2930,19 +2870,14 @@ static int config_gmv_table(struct hns_roce_dev *hr_dev, copy_gid(&tb_a->vf_sgid_l, gid); - roce_set_field(tb_a->vf_sgid_type_vlan, CFG_GMV_TB_VF_SGID_TYPE_M, - CFG_GMV_TB_VF_SGID_TYPE_S, sgid_type); - roce_set_bit(tb_a->vf_sgid_type_vlan, CFG_GMV_TB_VF_VLAN_EN_S, - vlan_id < VLAN_CFI_MASK); - roce_set_field(tb_a->vf_sgid_type_vlan, CFG_GMV_TB_VF_VLAN_ID_M, - CFG_GMV_TB_VF_VLAN_ID_S, vlan_id); + hr_reg_write(tb_a, GMV_TB_A_VF_SGID_TYPE, sgid_type); + hr_reg_write(tb_a, GMV_TB_A_VF_VLAN_EN, vlan_id < VLAN_CFI_MASK); + hr_reg_write(tb_a, GMV_TB_A_VF_VLAN_ID, vlan_id); tb_b->vf_smac_l = cpu_to_le32(*(u32 *)mac); - roce_set_field(tb_b->vf_smac_h, CFG_GMV_TB_SMAC_H_M, - CFG_GMV_TB_SMAC_H_S, *(u16 *)&mac[4]); - roce_set_field(tb_b->table_idx_rsv, CFG_GMV_TB_SGID_IDX_M, - CFG_GMV_TB_SGID_IDX_S, gid_index); + hr_reg_write(tb_b, GMV_TB_B_SMAC_H, *(u16 *)&mac[4]); + hr_reg_write(tb_b, GMV_TB_B_SGID_IDX, gid_index); return hns_roce_cmq_send(hr_dev, desc, 2); } @@ -2991,10 +2926,8 @@ static int hns_roce_v2_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port, reg_smac_l = *(u32 *)(&addr[0]); reg_smac_h = *(u16 *)(&addr[4]); - roce_set_field(smac_tb->tb_idx_rsv, CFG_SMAC_TB_IDX_M, - CFG_SMAC_TB_IDX_S, phy_port); - roce_set_field(smac_tb->vf_smac_h_rsv, CFG_SMAC_TB_VF_SMAC_H_M, - CFG_SMAC_TB_VF_SMAC_H_S, reg_smac_h); + hr_reg_write(smac_tb, CFG_SMAC_TB_IDX, phy_port); + hr_reg_write(smac_tb, CFG_SMAC_TB_VF_SMAC_H, reg_smac_h); smac_tb->vf_smac_l = cpu_to_le32(reg_smac_l); return hns_roce_cmq_send(hr_dev, &desc, 1); @@ -3023,21 +2956,15 @@ static int set_mtpt_pbl(struct hns_roce_dev *hr_dev, mpt_entry->pbl_size = cpu_to_le32(mr->npages); mpt_entry->pbl_ba_l = cpu_to_le32(pbl_ba >> 3); - roce_set_field(mpt_entry->byte_48_mode_ba, - V2_MPT_BYTE_48_PBL_BA_H_M, V2_MPT_BYTE_48_PBL_BA_H_S, - upper_32_bits(pbl_ba >> 3)); + hr_reg_write(mpt_entry, MPT_PBL_BA_H, upper_32_bits(pbl_ba >> 3)); mpt_entry->pa0_l = cpu_to_le32(lower_32_bits(pages[0])); - roce_set_field(mpt_entry->byte_56_pa0_h, V2_MPT_BYTE_56_PA0_H_M, - V2_MPT_BYTE_56_PA0_H_S, upper_32_bits(pages[0])); + hr_reg_write(mpt_entry, MPT_PA0_H, upper_32_bits(pages[0])); mpt_entry->pa1_l = cpu_to_le32(lower_32_bits(pages[1])); - roce_set_field(mpt_entry->byte_64_buf_pa1, V2_MPT_BYTE_64_PA1_H_M, - V2_MPT_BYTE_64_PA1_H_S, upper_32_bits(pages[1])); - roce_set_field(mpt_entry->byte_64_buf_pa1, - V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M, - V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S, - to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.buf_pg_shift)); + hr_reg_write(mpt_entry, MPT_PA1_H, upper_32_bits(pages[1])); + hr_reg_write(mpt_entry, MPT_PBL_BUF_PG_SZ, + to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.buf_pg_shift)); return 0; } @@ -3046,7 +2973,6 @@ static int hns_roce_v2_write_mtpt(struct hns_roce_dev *hr_dev, void *mb_buf, struct hns_roce_mr *mr) { struct hns_roce_v2_mpt_entry *mpt_entry; - int ret; mpt_entry = mb_buf; memset(mpt_entry, 0, sizeof(*mpt_entry)); @@ -3085,9 +3011,7 @@ static int hns_roce_v2_write_mtpt(struct hns_roce_dev *hr_dev, to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.ba_pg_shift)); hr_reg_enable(mpt_entry, MPT_INNER_PA_VLD); - ret = set_mtpt_pbl(hr_dev, mpt_entry, mr); - - return ret; + return set_mtpt_pbl(hr_dev, mpt_entry, mr); } static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev, @@ -3098,24 +3022,19 @@ static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev, u32 mr_access_flags = mr->access; int ret = 0; - roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M, - V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_VALID); - - roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M, - V2_MPT_BYTE_4_PD_S, mr->pd); + hr_reg_write(mpt_entry, MPT_ST, V2_MPT_ST_VALID); + hr_reg_write(mpt_entry, MPT_PD, mr->pd); if (flags & IB_MR_REREG_ACCESS) { - roce_set_bit(mpt_entry->byte_8_mw_cnt_en, - V2_MPT_BYTE_8_BIND_EN_S, + hr_reg_write(mpt_entry, MPT_BIND_EN, (mr_access_flags & IB_ACCESS_MW_BIND ? 1 : 0)); - roce_set_bit(mpt_entry->byte_8_mw_cnt_en, - V2_MPT_BYTE_8_ATOMIC_EN_S, + hr_reg_write(mpt_entry, MPT_ATOMIC_EN, mr_access_flags & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0); - roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RR_EN_S, + hr_reg_write(mpt_entry, MPT_RR_EN, mr_access_flags & IB_ACCESS_REMOTE_READ ? 1 : 0); - roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RW_EN_S, + hr_reg_write(mpt_entry, MPT_RW_EN, mr_access_flags & IB_ACCESS_REMOTE_WRITE ? 1 : 0); - roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_LW_EN_S, + hr_reg_write(mpt_entry, MPT_LW_EN, mr_access_flags & IB_ACCESS_LOCAL_WRITE ? 1 : 0); } @@ -3146,37 +3065,28 @@ static int hns_roce_v2_frmr_write_mtpt(struct hns_roce_dev *hr_dev, return -ENOBUFS; } - roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M, - V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_FREE); - roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PBL_HOP_NUM_M, - V2_MPT_BYTE_4_PBL_HOP_NUM_S, 1); - roce_set_field(mpt_entry->byte_4_pd_hop_st, - V2_MPT_BYTE_4_PBL_BA_PG_SZ_M, - V2_MPT_BYTE_4_PBL_BA_PG_SZ_S, - to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.ba_pg_shift)); - roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M, - V2_MPT_BYTE_4_PD_S, mr->pd); + hr_reg_write(mpt_entry, MPT_ST, V2_MPT_ST_FREE); + hr_reg_write(mpt_entry, MPT_PD, mr->pd); + + hr_reg_enable(mpt_entry, MPT_RA_EN); + hr_reg_enable(mpt_entry, MPT_R_INV_EN); + hr_reg_enable(mpt_entry, MPT_L_INV_EN); - roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RA_EN_S, 1); - roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 1); - roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 1); + hr_reg_enable(mpt_entry, MPT_FRE); + hr_reg_clear(mpt_entry, MPT_MR_MW); + hr_reg_enable(mpt_entry, MPT_BPD); + hr_reg_clear(mpt_entry, MPT_PA); - roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_FRE_S, 1); - roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S, 0); - roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_MR_MW_S, 0); - roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_BPD_S, 1); + hr_reg_write(mpt_entry, MPT_PBL_HOP_NUM, 1); + hr_reg_write(mpt_entry, MPT_PBL_BA_PG_SZ, + to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.ba_pg_shift)); + hr_reg_write(mpt_entry, MPT_PBL_BUF_PG_SZ, + to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.buf_pg_shift)); mpt_entry->pbl_size = cpu_to_le32(mr->npages); mpt_entry->pbl_ba_l = cpu_to_le32(lower_32_bits(pbl_ba >> 3)); - roce_set_field(mpt_entry->byte_48_mode_ba, V2_MPT_BYTE_48_PBL_BA_H_M, - V2_MPT_BYTE_48_PBL_BA_H_S, - upper_32_bits(pbl_ba >> 3)); - - roce_set_field(mpt_entry->byte_64_buf_pa1, - V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M, - V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S, - to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.buf_pg_shift)); + hr_reg_write(mpt_entry, MPT_PBL_BA_H, upper_32_bits(pbl_ba >> 3)); return 0; } @@ -3188,36 +3098,29 @@ static int hns_roce_v2_mw_write_mtpt(void *mb_buf, struct hns_roce_mw *mw) mpt_entry = mb_buf; memset(mpt_entry, 0, sizeof(*mpt_entry)); - roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M, - V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_FREE); - roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M, - V2_MPT_BYTE_4_PD_S, mw->pdn); - roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PBL_HOP_NUM_M, - V2_MPT_BYTE_4_PBL_HOP_NUM_S, - mw->pbl_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : - mw->pbl_hop_num); - roce_set_field(mpt_entry->byte_4_pd_hop_st, - V2_MPT_BYTE_4_PBL_BA_PG_SZ_M, - V2_MPT_BYTE_4_PBL_BA_PG_SZ_S, - mw->pbl_ba_pg_sz + PG_SHIFT_OFFSET); - - roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 1); - roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 1); - roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_LW_EN_S, 1); - - roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S, 0); - roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_MR_MW_S, 1); - roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_BPD_S, 1); - roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_BQP_S, - mw->ibmw.type == IB_MW_TYPE_1 ? 0 : 1); + hr_reg_write(mpt_entry, MPT_ST, V2_MPT_ST_FREE); + hr_reg_write(mpt_entry, MPT_PD, mw->pdn); - roce_set_field(mpt_entry->byte_64_buf_pa1, - V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M, - V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S, - mw->pbl_buf_pg_sz + PG_SHIFT_OFFSET); + hr_reg_enable(mpt_entry, MPT_R_INV_EN); + hr_reg_enable(mpt_entry, MPT_L_INV_EN); + hr_reg_enable(mpt_entry, MPT_LW_EN); + + hr_reg_enable(mpt_entry, MPT_MR_MW); + hr_reg_enable(mpt_entry, MPT_BPD); + hr_reg_clear(mpt_entry, MPT_PA); + hr_reg_write(mpt_entry, MPT_BQP, + mw->ibmw.type == IB_MW_TYPE_1 ? 0 : 1); mpt_entry->lkey = cpu_to_le32(mw->rkey); + hr_reg_write(mpt_entry, MPT_PBL_HOP_NUM, + mw->pbl_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : + mw->pbl_hop_num); + hr_reg_write(mpt_entry, MPT_PBL_BA_PG_SZ, + mw->pbl_ba_pg_sz + PG_SHIFT_OFFSET); + hr_reg_write(mpt_entry, MPT_PBL_BUF_PG_SZ, + mw->pbl_buf_pg_sz + PG_SHIFT_OFFSET); + return 0; } @@ -3794,38 +3697,38 @@ static int hns_roce_v2_poll_cq(struct ib_cq *ibcq, int num_entries, } static int get_op_for_set_hem(struct hns_roce_dev *hr_dev, u32 type, - int step_idx, u16 *mbox_op) + u32 step_idx, u8 *mbox_cmd) { - u16 op; + u8 cmd; switch (type) { case HEM_TYPE_QPC: - op = HNS_ROCE_CMD_WRITE_QPC_BT0; + cmd = HNS_ROCE_CMD_WRITE_QPC_BT0; break; case HEM_TYPE_MTPT: - op = HNS_ROCE_CMD_WRITE_MPT_BT0; + cmd = HNS_ROCE_CMD_WRITE_MPT_BT0; break; case HEM_TYPE_CQC: - op = HNS_ROCE_CMD_WRITE_CQC_BT0; + cmd = HNS_ROCE_CMD_WRITE_CQC_BT0; break; case HEM_TYPE_SRQC: - op = HNS_ROCE_CMD_WRITE_SRQC_BT0; + cmd = HNS_ROCE_CMD_WRITE_SRQC_BT0; break; case HEM_TYPE_SCCC: - op = HNS_ROCE_CMD_WRITE_SCCC_BT0; + cmd = HNS_ROCE_CMD_WRITE_SCCC_BT0; break; case HEM_TYPE_QPC_TIMER: - op = HNS_ROCE_CMD_WRITE_QPC_TIMER_BT0; + cmd = HNS_ROCE_CMD_WRITE_QPC_TIMER_BT0; break; case HEM_TYPE_CQC_TIMER: - op = HNS_ROCE_CMD_WRITE_CQC_TIMER_BT0; + cmd = HNS_ROCE_CMD_WRITE_CQC_TIMER_BT0; break; default: dev_warn(hr_dev->dev, "failed to check hem type %u.\n", type); return -EINVAL; } - *mbox_op = op + step_idx; + *mbox_cmd = cmd + step_idx; return 0; } @@ -3848,10 +3751,10 @@ static int config_gmv_ba_to_hw(struct hns_roce_dev *hr_dev, unsigned long obj, } static int set_hem_to_hw(struct hns_roce_dev *hr_dev, int obj, - dma_addr_t base_addr, u32 hem_type, int step_idx) + dma_addr_t base_addr, u32 hem_type, u32 step_idx) { int ret; - u16 op; + u8 cmd; if (unlikely(hem_type == HEM_TYPE_GMV)) return config_gmv_ba_to_hw(hr_dev, obj, base_addr); @@ -3859,16 +3762,16 @@ static int set_hem_to_hw(struct hns_roce_dev *hr_dev, int obj, if (unlikely(hem_type == HEM_TYPE_SCCC && step_idx)) return 0; - ret = get_op_for_set_hem(hr_dev, hem_type, step_idx, &op); + ret = get_op_for_set_hem(hr_dev, hem_type, step_idx, &cmd); if (ret < 0) return ret; - return config_hem_ba_to_hw(hr_dev, obj, base_addr, op); + return config_hem_ba_to_hw(hr_dev, base_addr, cmd, obj); } static int hns_roce_v2_set_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem_table *table, int obj, - int step_idx) + u32 step_idx) { struct hns_roce_hem_iter iter; struct hns_roce_hem_mhop mhop; @@ -3926,29 +3829,29 @@ static int hns_roce_v2_set_hem(struct hns_roce_dev *hr_dev, } static int hns_roce_v2_clear_hem(struct hns_roce_dev *hr_dev, - struct hns_roce_hem_table *table, int obj, - int step_idx) + struct hns_roce_hem_table *table, + int tag, u32 step_idx) { - struct device *dev = hr_dev->dev; struct hns_roce_cmd_mailbox *mailbox; + struct device *dev = hr_dev->dev; + u8 cmd = 0xff; int ret; - u16 op = 0xff; if (!hns_roce_check_whether_mhop(hr_dev, table->type)) return 0; switch (table->type) { case HEM_TYPE_QPC: - op = HNS_ROCE_CMD_DESTROY_QPC_BT0; + cmd = HNS_ROCE_CMD_DESTROY_QPC_BT0; break; case HEM_TYPE_MTPT: - op = HNS_ROCE_CMD_DESTROY_MPT_BT0; + cmd = HNS_ROCE_CMD_DESTROY_MPT_BT0; break; case HEM_TYPE_CQC: - op = HNS_ROCE_CMD_DESTROY_CQC_BT0; + cmd = HNS_ROCE_CMD_DESTROY_CQC_BT0; break; case HEM_TYPE_SRQC: - op = HNS_ROCE_CMD_DESTROY_SRQC_BT0; + cmd = HNS_ROCE_CMD_DESTROY_SRQC_BT0; break; case HEM_TYPE_SCCC: case HEM_TYPE_QPC_TIMER: @@ -3961,15 +3864,13 @@ static int hns_roce_v2_clear_hem(struct hns_roce_dev *hr_dev, return 0; } - op += step_idx; + cmd += step_idx; mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); if (IS_ERR(mailbox)) return PTR_ERR(mailbox); - /* configure the tag and op */ - ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, obj, 0, op, - HNS_ROCE_CMD_TIMEOUT_MSECS); + ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, cmd, tag); hns_roce_free_cmd_mailbox(hr_dev, mailbox); return ret; @@ -3993,9 +3894,8 @@ static int hns_roce_v2_qp_modify(struct hns_roce_dev *hr_dev, memcpy(mailbox->buf, context, qpc_size); memcpy(mailbox->buf + qpc_size, qpc_mask, qpc_size); - ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_qp->qpn, 0, - HNS_ROCE_CMD_MODIFY_QPC, - HNS_ROCE_CMD_TIMEOUT_MSECS); + ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, + HNS_ROCE_CMD_MODIFY_QPC, hr_qp->qpn); hns_roce_free_cmd_mailbox(hr_dev, mailbox); @@ -4654,9 +4554,7 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp, if (ret) return ret; - if (gid_attr) - is_udp = (gid_attr->gid_type == - IB_GID_TYPE_ROCE_UDP_ENCAP); + is_udp = (gid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP); } /* Only HIP08 needs to set the vlan_en bits in QPC */ @@ -5040,9 +4938,8 @@ static int hns_roce_v2_query_qpc(struct hns_roce_dev *hr_dev, if (IS_ERR(mailbox)) return PTR_ERR(mailbox); - ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, hr_qp->qpn, 0, - HNS_ROCE_CMD_QUERY_QPC, - HNS_ROCE_CMD_TIMEOUT_MSECS); + ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, HNS_ROCE_CMD_QUERY_QPC, + hr_qp->qpn); if (ret) goto out; @@ -5408,9 +5305,8 @@ static int hns_roce_v2_modify_srq(struct ib_srq *ibsrq, hr_reg_write(srq_context, SRQC_LIMIT_WL, srq_attr->srq_limit); hr_reg_clear(srqc_mask, SRQC_LIMIT_WL); - ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, srq->srqn, 0, - HNS_ROCE_CMD_MODIFY_SRQC, - HNS_ROCE_CMD_TIMEOUT_MSECS); + ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, + HNS_ROCE_CMD_MODIFY_SRQC, srq->srqn); hns_roce_free_cmd_mailbox(hr_dev, mailbox); if (ret) { ibdev_err(&hr_dev->ib_dev, @@ -5436,9 +5332,8 @@ static int hns_roce_v2_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr) return PTR_ERR(mailbox); srq_context = mailbox->buf; - ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, srq->srqn, 0, - HNS_ROCE_CMD_QUERY_SRQC, - HNS_ROCE_CMD_TIMEOUT_MSECS); + ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, + HNS_ROCE_CMD_QUERY_SRQC, srq->srqn); if (ret) { ibdev_err(&hr_dev->ib_dev, "failed to process cmd of querying SRQ, ret = %d.\n", @@ -5478,9 +5373,8 @@ static int hns_roce_v2_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period) hr_reg_write(cq_context, CQC_CQ_PERIOD, cq_period); hr_reg_clear(cqc_mask, CQC_CQ_PERIOD); - ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_cq->cqn, 1, - HNS_ROCE_CMD_MODIFY_CQC, - HNS_ROCE_CMD_TIMEOUT_MSECS); + ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, + HNS_ROCE_CMD_MODIFY_CQC, hr_cq->cqn); hns_roce_free_cmd_mailbox(hr_dev, mailbox); if (ret) ibdev_err(&hr_dev->ib_dev, @@ -5603,7 +5497,7 @@ static struct hns_roce_aeqe *next_aeqe_sw_v2(struct hns_roce_eq *eq) (eq->cons_index & (eq->entries - 1)) * eq->eqe_size); - return (roce_get_bit(aeqe->asyn, HNS_ROCE_V2_AEQ_AEQE_OWNER_S) ^ + return (hr_reg_read(aeqe, AEQE_OWNER) ^ !!(eq->cons_index & eq->entries)) ? aeqe : NULL; } @@ -5623,15 +5517,9 @@ static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev, */ dma_rmb(); - event_type = roce_get_field(aeqe->asyn, - HNS_ROCE_V2_AEQE_EVENT_TYPE_M, - HNS_ROCE_V2_AEQE_EVENT_TYPE_S); - sub_type = roce_get_field(aeqe->asyn, - HNS_ROCE_V2_AEQE_SUB_TYPE_M, - HNS_ROCE_V2_AEQE_SUB_TYPE_S); - queue_num = roce_get_field(aeqe->event.queue_event.num, - HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M, - HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S); + event_type = hr_reg_read(aeqe, AEQE_EVENT_TYPE); + sub_type = hr_reg_read(aeqe, AEQE_SUB_TYPE); + queue_num = hr_reg_read(aeqe, AEQE_EVENT_QUEUE_NUM); switch (event_type) { case HNS_ROCE_EVENT_TYPE_PATH_MIG: @@ -5691,8 +5579,8 @@ static struct hns_roce_ceqe *next_ceqe_sw_v2(struct hns_roce_eq *eq) (eq->cons_index & (eq->entries - 1)) * eq->eqe_size); - return (!!(roce_get_bit(ceqe->comp, HNS_ROCE_V2_CEQ_CEQE_OWNER_S))) ^ - (!!(eq->cons_index & eq->entries)) ? ceqe : NULL; + return (hr_reg_read(ceqe, CEQE_OWNER) ^ + !!(eq->cons_index & eq->entries)) ? ceqe : NULL; } static int hns_roce_v2_ceq_int(struct hns_roce_dev *hr_dev, @@ -5708,8 +5596,7 @@ static int hns_roce_v2_ceq_int(struct hns_roce_dev *hr_dev, */ dma_rmb(); - cqn = roce_get_field(ceqe->comp, HNS_ROCE_V2_CEQE_COMP_CQN_M, - HNS_ROCE_V2_CEQE_COMP_CQN_S); + cqn = hr_reg_read(ceqe, CEQE_CQN); hns_roce_cq_completion(hr_dev, cqn); @@ -5807,15 +5694,14 @@ static void hns_roce_v2_destroy_eqc(struct hns_roce_dev *hr_dev, u32 eqn) { struct device *dev = hr_dev->dev; int ret; + u8 cmd; if (eqn < hr_dev->caps.num_comp_vectors) - ret = hns_roce_cmd_mbox(hr_dev, 0, 0, eqn & HNS_ROCE_V2_EQN_M, - 0, HNS_ROCE_CMD_DESTROY_CEQC, - HNS_ROCE_CMD_TIMEOUT_MSECS); + cmd = HNS_ROCE_CMD_DESTROY_CEQC; else - ret = hns_roce_cmd_mbox(hr_dev, 0, 0, eqn & HNS_ROCE_V2_EQN_M, - 0, HNS_ROCE_CMD_DESTROY_AEQC, - HNS_ROCE_CMD_TIMEOUT_MSECS); + cmd = HNS_ROCE_CMD_DESTROY_AEQC; + + ret = hns_roce_destroy_hw_ctx(hr_dev, cmd, eqn & HNS_ROCE_V2_EQN_M); if (ret) dev_err(dev, "[mailbox cmd] destroy eqc(%u) failed.\n", eqn); } @@ -5912,16 +5798,15 @@ static int alloc_eq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq) } static int hns_roce_v2_create_eq(struct hns_roce_dev *hr_dev, - struct hns_roce_eq *eq, - unsigned int eq_cmd) + struct hns_roce_eq *eq, u8 eq_cmd) { struct hns_roce_cmd_mailbox *mailbox; int ret; /* Allocate mailbox memory */ mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); - if (IS_ERR_OR_NULL(mailbox)) - return -ENOMEM; + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); ret = alloc_eq_buf(hr_dev, eq); if (ret) @@ -5931,8 +5816,7 @@ static int hns_roce_v2_create_eq(struct hns_roce_dev *hr_dev, if (ret) goto err_cmd_mbox; - ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, eq->eqn, 0, - eq_cmd, HNS_ROCE_CMD_TIMEOUT_MSECS); + ret = hns_roce_create_hw_ctx(hr_dev, mailbox, eq_cmd, eq->eqn); if (ret) { dev_err(hr_dev->dev, "[mailbox cmd] create eqc failed.\n"); goto err_cmd_mbox; @@ -6043,14 +5927,14 @@ static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev) struct hns_roce_eq_table *eq_table = &hr_dev->eq_table; struct device *dev = hr_dev->dev; struct hns_roce_eq *eq; - unsigned int eq_cmd; - int irq_num; - int eq_num; int other_num; int comp_num; int aeq_num; - int i; + int irq_num; + int eq_num; + u8 eq_cmd; int ret; + int i; other_num = hr_dev->caps.num_other_vectors; comp_num = hr_dev->caps.num_comp_vectors; diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h index e9a73c34389bd846c65e4aaff129eff81c1c910d..a3a2524a5e25e911ba786527a863910a9c9ea1bf 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h @@ -41,7 +41,7 @@ #define HNS_ROCE_V2_MAX_SRQ_WR 0x8000 #define HNS_ROCE_V2_MAX_SRQ_SGE 64 #define HNS_ROCE_V2_MAX_CQ_NUM 0x100000 -#define HNS_ROCE_V2_MAX_CQC_TIMER_NUM 0x100 +#define HNS_ROCE_V2_MAX_CQC_TIMER_BT_NUM 0x100 #define HNS_ROCE_V2_MAX_SRQ_NUM 0x100000 #define HNS_ROCE_V2_MAX_CQE_NUM 0x400000 #define HNS_ROCE_V2_MAX_RQ_SGE_NUM 64 @@ -291,33 +291,6 @@ struct hns_roce_v2_cq_context { #define HNS_ROCE_V2_CQ_DEFAULT_BURST_NUM 0x0 #define HNS_ROCE_V2_CQ_DEFAULT_INTERVAL 0x0 -#define V2_CQC_BYTE_4_ARM_ST_S 6 -#define V2_CQC_BYTE_4_ARM_ST_M GENMASK(7, 6) - -#define V2_CQC_BYTE_4_CEQN_S 15 -#define V2_CQC_BYTE_4_CEQN_M GENMASK(23, 15) - -#define V2_CQC_BYTE_8_CQN_S 0 -#define V2_CQC_BYTE_8_CQN_M GENMASK(23, 0) - -#define V2_CQC_BYTE_16_CQE_HOP_NUM_S 30 -#define V2_CQC_BYTE_16_CQE_HOP_NUM_M GENMASK(31, 30) - -#define V2_CQC_BYTE_28_CQ_PRODUCER_IDX_S 0 -#define V2_CQC_BYTE_28_CQ_PRODUCER_IDX_M GENMASK(23, 0) - -#define V2_CQC_BYTE_32_CQ_CONSUMER_IDX_S 0 -#define V2_CQC_BYTE_32_CQ_CONSUMER_IDX_M GENMASK(23, 0) - -#define V2_CQC_BYTE_52_CQE_CNT_S 0 -#define V2_CQC_BYTE_52_CQE_CNT_M GENMASK(23, 0) - -#define V2_CQC_BYTE_56_CQ_MAX_CNT_S 0 -#define V2_CQC_BYTE_56_CQ_MAX_CNT_M GENMASK(15, 0) - -#define V2_CQC_BYTE_56_CQ_PERIOD_S 16 -#define V2_CQC_BYTE_56_CQ_PERIOD_M GENMASK(31, 16) - #define CQC_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_v2_cq_context, h, l) #define CQC_CQ_ST CQC_FIELD_LOC(1, 0) @@ -776,12 +749,15 @@ struct hns_roce_v2_mpt_entry { #define MPT_LKEY MPT_FIELD_LOC(223, 192) #define MPT_VA MPT_FIELD_LOC(287, 224) #define MPT_PBL_SIZE MPT_FIELD_LOC(319, 288) -#define MPT_PBL_BA MPT_FIELD_LOC(380, 320) +#define MPT_PBL_BA_L MPT_FIELD_LOC(351, 320) +#define MPT_PBL_BA_H MPT_FIELD_LOC(380, 352) #define MPT_BLK_MODE MPT_FIELD_LOC(381, 381) #define MPT_RSV0 MPT_FIELD_LOC(383, 382) -#define MPT_PA0 MPT_FIELD_LOC(441, 384) +#define MPT_PA0_L MPT_FIELD_LOC(415, 384) +#define MPT_PA0_H MPT_FIELD_LOC(441, 416) #define MPT_BOUND_VA MPT_FIELD_LOC(447, 442) -#define MPT_PA1 MPT_FIELD_LOC(505, 448) +#define MPT_PA1_L MPT_FIELD_LOC(479, 448) +#define MPT_PA1_H MPT_FIELD_LOC(505, 480) #define MPT_PERSIST_EN MPT_FIELD_LOC(506, 506) #define MPT_RSV2 MPT_FIELD_LOC(507, 507) #define MPT_PBL_BUF_PG_SZ MPT_FIELD_LOC(511, 508) @@ -887,48 +863,24 @@ struct hns_roce_v2_ud_send_wqe { u8 dgid[GID_LEN_V2]; }; -#define V2_UD_SEND_WQE_BYTE_4_OPCODE_S 0 -#define V2_UD_SEND_WQE_BYTE_4_OPCODE_M GENMASK(4, 0) - -#define V2_UD_SEND_WQE_BYTE_4_OWNER_S 7 - -#define V2_UD_SEND_WQE_BYTE_4_CQE_S 8 - -#define V2_UD_SEND_WQE_BYTE_4_SE_S 11 - -#define V2_UD_SEND_WQE_BYTE_16_PD_S 0 -#define V2_UD_SEND_WQE_BYTE_16_PD_M GENMASK(23, 0) - -#define V2_UD_SEND_WQE_BYTE_16_SGE_NUM_S 24 -#define V2_UD_SEND_WQE_BYTE_16_SGE_NUM_M GENMASK(31, 24) - -#define V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S 0 -#define V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M GENMASK(23, 0) - -#define V2_UD_SEND_WQE_BYTE_24_UDPSPN_S 16 -#define V2_UD_SEND_WQE_BYTE_24_UDPSPN_M GENMASK(31, 16) - -#define V2_UD_SEND_WQE_BYTE_32_DQPN_S 0 -#define V2_UD_SEND_WQE_BYTE_32_DQPN_M GENMASK(23, 0) - -#define V2_UD_SEND_WQE_BYTE_36_VLAN_S 0 -#define V2_UD_SEND_WQE_BYTE_36_VLAN_M GENMASK(15, 0) - -#define V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_S 16 -#define V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_M GENMASK(23, 16) - -#define V2_UD_SEND_WQE_BYTE_36_TCLASS_S 24 -#define V2_UD_SEND_WQE_BYTE_36_TCLASS_M GENMASK(31, 24) - -#define V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_S 0 -#define V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_M GENMASK(19, 0) - -#define V2_UD_SEND_WQE_BYTE_40_SL_S 20 -#define V2_UD_SEND_WQE_BYTE_40_SL_M GENMASK(23, 20) - -#define V2_UD_SEND_WQE_BYTE_40_UD_VLAN_EN_S 30 - -#define V2_UD_SEND_WQE_BYTE_40_LBI_S 31 +#define UD_SEND_WQE_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_v2_ud_send_wqe, h, l) + +#define UD_SEND_WQE_OPCODE UD_SEND_WQE_FIELD_LOC(4, 0) +#define UD_SEND_WQE_OWNER UD_SEND_WQE_FIELD_LOC(7, 7) +#define UD_SEND_WQE_CQE UD_SEND_WQE_FIELD_LOC(8, 8) +#define UD_SEND_WQE_SE UD_SEND_WQE_FIELD_LOC(11, 11) +#define UD_SEND_WQE_PD UD_SEND_WQE_FIELD_LOC(119, 96) +#define UD_SEND_WQE_SGE_NUM UD_SEND_WQE_FIELD_LOC(127, 120) +#define UD_SEND_WQE_MSG_START_SGE_IDX UD_SEND_WQE_FIELD_LOC(151, 128) +#define UD_SEND_WQE_UDPSPN UD_SEND_WQE_FIELD_LOC(191, 176) +#define UD_SEND_WQE_DQPN UD_SEND_WQE_FIELD_LOC(247, 224) +#define UD_SEND_WQE_VLAN UD_SEND_WQE_FIELD_LOC(271, 256) +#define UD_SEND_WQE_HOPLIMIT UD_SEND_WQE_FIELD_LOC(279, 272) +#define UD_SEND_WQE_TCLASS UD_SEND_WQE_FIELD_LOC(287, 280) +#define UD_SEND_WQE_FLOW_LABEL UD_SEND_WQE_FIELD_LOC(307, 288) +#define UD_SEND_WQE_SL UD_SEND_WQE_FIELD_LOC(311, 308) +#define UD_SEND_WQE_VLAN_EN UD_SEND_WQE_FIELD_LOC(318, 318) +#define UD_SEND_WQE_LBI UD_SEND_WQE_FIELD_LOC(319, 319) struct hns_roce_v2_rc_send_wqe { __le32 byte_4; @@ -943,42 +895,23 @@ struct hns_roce_v2_rc_send_wqe { __le64 va; }; -#define V2_RC_SEND_WQE_BYTE_4_OPCODE_S 0 -#define V2_RC_SEND_WQE_BYTE_4_OPCODE_M GENMASK(4, 0) - -#define V2_RC_SEND_WQE_BYTE_4_DB_SL_L_S 5 -#define V2_RC_SEND_WQE_BYTE_4_DB_SL_L_M GENMASK(6, 5) - -#define V2_RC_SEND_WQE_BYTE_4_DB_SL_H_S 13 -#define V2_RC_SEND_WQE_BYTE_4_DB_SL_H_M GENMASK(14, 13) - -#define V2_RC_SEND_WQE_BYTE_4_WQE_INDEX_S 15 -#define V2_RC_SEND_WQE_BYTE_4_WQE_INDEX_M GENMASK(30, 15) - -#define V2_RC_SEND_WQE_BYTE_4_OWNER_S 7 - -#define V2_RC_SEND_WQE_BYTE_4_CQE_S 8 - -#define V2_RC_SEND_WQE_BYTE_4_FENCE_S 9 - -#define V2_RC_SEND_WQE_BYTE_4_SO_S 10 - -#define V2_RC_SEND_WQE_BYTE_4_SE_S 11 - -#define V2_RC_SEND_WQE_BYTE_4_INLINE_S 12 - -#define V2_RC_SEND_WQE_BYTE_4_FLAG_S 31 - -#define V2_RC_SEND_WQE_BYTE_16_XRC_SRQN_S 0 -#define V2_RC_SEND_WQE_BYTE_16_XRC_SRQN_M GENMASK(23, 0) - -#define V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S 24 -#define V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M GENMASK(31, 24) - -#define V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S 0 -#define V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M GENMASK(23, 0) - -#define V2_RC_SEND_WQE_BYTE_20_INL_TYPE_S 31 +#define RC_SEND_WQE_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_v2_rc_send_wqe, h, l) + +#define RC_SEND_WQE_OPCODE RC_SEND_WQE_FIELD_LOC(4, 0) +#define RC_SEND_WQE_DB_SL_L RC_SEND_WQE_FIELD_LOC(6, 5) +#define RC_SEND_WQE_DB_SL_H RC_SEND_WQE_FIELD_LOC(14, 13) +#define RC_SEND_WQE_OWNER RC_SEND_WQE_FIELD_LOC(7, 7) +#define RC_SEND_WQE_CQE RC_SEND_WQE_FIELD_LOC(8, 8) +#define RC_SEND_WQE_FENCE RC_SEND_WQE_FIELD_LOC(9, 9) +#define RC_SEND_WQE_SO RC_SEND_WQE_FIELD_LOC(10, 10) +#define RC_SEND_WQE_SE RC_SEND_WQE_FIELD_LOC(11, 11) +#define RC_SEND_WQE_INLINE RC_SEND_WQE_FIELD_LOC(12, 12) +#define RC_SEND_WQE_WQE_INDEX RC_SEND_WQE_FIELD_LOC(30, 15) +#define RC_SEND_WQE_FLAG RC_SEND_WQE_FIELD_LOC(31, 31) +#define RC_SEND_WQE_XRC_SRQN RC_SEND_WQE_FIELD_LOC(119, 96) +#define RC_SEND_WQE_SGE_NUM RC_SEND_WQE_FIELD_LOC(127, 120) +#define RC_SEND_WQE_MSG_START_SGE_IDX RC_SEND_WQE_FIELD_LOC(151, 128) +#define RC_SEND_WQE_INL_TYPE RC_SEND_WQE_FIELD_LOC(159, 159) struct hns_roce_wqe_frmr_seg { __le32 pbl_size; @@ -1021,7 +954,10 @@ struct hns_roce_func_clear { __le32 rsv[4]; }; -#define FUNC_CLEAR_RST_FUN_DONE_S 0 +#define FUNC_CLEAR_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_func_clear, h, l) + +#define FUNC_CLEAR_RST_FUN_DONE FUNC_CLEAR_FIELD_LOC(32, 32) + /* Each physical function manages up to 248 virtual functions, it takes up to * 100ms for each function to execute clear. If an abnormal reset occurs, it is * executed twice at most, so it takes up to 249 * 2 * 100ms. @@ -1100,12 +1036,12 @@ struct hns_roce_vf_switch { __le32 resv3; }; -#define VF_SWITCH_DATA_FUN_ID_VF_ID_S 3 -#define VF_SWITCH_DATA_FUN_ID_VF_ID_M GENMASK(10, 3) +#define VF_SWITCH_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_vf_switch, h, l) -#define VF_SWITCH_DATA_CFG_ALW_LPBK_S 1 -#define VF_SWITCH_DATA_CFG_ALW_LCL_LPBK_S 2 -#define VF_SWITCH_DATA_CFG_ALW_DST_OVRD_S 3 +#define VF_SWITCH_VF_ID VF_SWITCH_FIELD_LOC(42, 35) +#define VF_SWITCH_ALW_LPBK VF_SWITCH_FIELD_LOC(65, 65) +#define VF_SWITCH_ALW_LCL_LPBK VF_SWITCH_FIELD_LOC(66, 66) +#define VF_SWITCH_ALW_DST_OVRD VF_SWITCH_FIELD_LOC(67, 67) struct hns_roce_post_mbox { __le32 in_param_l; @@ -1168,11 +1104,10 @@ struct hns_roce_cfg_sgid_tb { __le32 vf_sgid_type_rsv; }; -#define CFG_SGID_TB_TABLE_IDX_S 0 -#define CFG_SGID_TB_TABLE_IDX_M GENMASK(7, 0) +#define SGID_TB_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_cfg_sgid_tb, h, l) -#define CFG_SGID_TB_VF_SGID_TYPE_S 0 -#define CFG_SGID_TB_VF_SGID_TYPE_M GENMASK(1, 0) +#define CFG_SGID_TB_TABLE_IDX SGID_TB_FIELD_LOC(7, 0) +#define CFG_SGID_TB_VF_SGID_TYPE SGID_TB_FIELD_LOC(161, 160) struct hns_roce_cfg_smac_tb { __le32 tb_idx_rsv; @@ -1180,11 +1115,11 @@ struct hns_roce_cfg_smac_tb { __le32 vf_smac_h_rsv; __le32 rsv[3]; }; -#define CFG_SMAC_TB_IDX_S 0 -#define CFG_SMAC_TB_IDX_M GENMASK(7, 0) -#define CFG_SMAC_TB_VF_SMAC_H_S 0 -#define CFG_SMAC_TB_VF_SMAC_H_M GENMASK(15, 0) +#define SMAC_TB_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_cfg_smac_tb, h, l) + +#define CFG_SMAC_TB_IDX SMAC_TB_FIELD_LOC(7, 0) +#define CFG_SMAC_TB_VF_SMAC_H SMAC_TB_FIELD_LOC(79, 64) struct hns_roce_cfg_gmv_tb_a { __le32 vf_sgid_l; @@ -1195,16 +1130,11 @@ struct hns_roce_cfg_gmv_tb_a { __le32 resv; }; -#define CFG_GMV_TB_SGID_IDX_S 0 -#define CFG_GMV_TB_SGID_IDX_M GENMASK(7, 0) - -#define CFG_GMV_TB_VF_SGID_TYPE_S 0 -#define CFG_GMV_TB_VF_SGID_TYPE_M GENMASK(1, 0) +#define GMV_TB_A_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_cfg_gmv_tb_a, h, l) -#define CFG_GMV_TB_VF_VLAN_EN_S 2 - -#define CFG_GMV_TB_VF_VLAN_ID_S 16 -#define CFG_GMV_TB_VF_VLAN_ID_M GENMASK(27, 16) +#define GMV_TB_A_VF_SGID_TYPE GMV_TB_A_FIELD_LOC(129, 128) +#define GMV_TB_A_VF_VLAN_EN GMV_TB_A_FIELD_LOC(130, 130) +#define GMV_TB_A_VF_VLAN_ID GMV_TB_A_FIELD_LOC(155, 144) struct hns_roce_cfg_gmv_tb_b { __le32 vf_smac_l; @@ -1213,8 +1143,10 @@ struct hns_roce_cfg_gmv_tb_b { __le32 resv[3]; }; -#define CFG_GMV_TB_SMAC_H_S 0 -#define CFG_GMV_TB_SMAC_H_M GENMASK(15, 0) +#define GMV_TB_B_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_cfg_gmv_tb_b, h, l) + +#define GMV_TB_B_SMAC_H GMV_TB_B_FIELD_LOC(47, 32) +#define GMV_TB_B_SGID_IDX GMV_TB_B_FIELD_LOC(71, 64) #define HNS_ROCE_QUERY_PF_CAPS_CMD_NUM 5 struct hns_roce_query_pf_caps_a { @@ -1266,29 +1198,17 @@ struct hns_roce_query_pf_caps_c { __le16 rq_depth; }; -#define V2_QUERY_PF_CAPS_C_NUM_PDS_S 0 -#define V2_QUERY_PF_CAPS_C_NUM_PDS_M GENMASK(19, 0) +#define PF_CAPS_C_FIELD_LOC(h, l) \ + FIELD_LOC(struct hns_roce_query_pf_caps_c, h, l) -#define V2_QUERY_PF_CAPS_C_CAP_FLAGS_S 20 -#define V2_QUERY_PF_CAPS_C_CAP_FLAGS_M GENMASK(31, 20) - -#define V2_QUERY_PF_CAPS_C_NUM_CQS_S 0 -#define V2_QUERY_PF_CAPS_C_NUM_CQS_M GENMASK(19, 0) - -#define V2_QUERY_PF_CAPS_C_MAX_GID_S 20 -#define V2_QUERY_PF_CAPS_C_MAX_GID_M GENMASK(28, 20) - -#define V2_QUERY_PF_CAPS_C_CQ_DEPTH_S 0 -#define V2_QUERY_PF_CAPS_C_CQ_DEPTH_M GENMASK(22, 0) - -#define V2_QUERY_PF_CAPS_C_NUM_MRWS_S 0 -#define V2_QUERY_PF_CAPS_C_NUM_MRWS_M GENMASK(19, 0) - -#define V2_QUERY_PF_CAPS_C_NUM_QPS_S 0 -#define V2_QUERY_PF_CAPS_C_NUM_QPS_M GENMASK(19, 0) - -#define V2_QUERY_PF_CAPS_C_MAX_ORD_S 20 -#define V2_QUERY_PF_CAPS_C_MAX_ORD_M GENMASK(27, 20) +#define PF_CAPS_C_NUM_PDS PF_CAPS_C_FIELD_LOC(19, 0) +#define PF_CAPS_C_CAP_FLAGS PF_CAPS_C_FIELD_LOC(31, 20) +#define PF_CAPS_C_NUM_CQS PF_CAPS_C_FIELD_LOC(51, 32) +#define PF_CAPS_C_MAX_GID PF_CAPS_C_FIELD_LOC(60, 52) +#define PF_CAPS_C_CQ_DEPTH PF_CAPS_C_FIELD_LOC(86, 64) +#define PF_CAPS_C_NUM_MRWS PF_CAPS_C_FIELD_LOC(115, 96) +#define PF_CAPS_C_NUM_QPS PF_CAPS_C_FIELD_LOC(147, 128) +#define PF_CAPS_C_MAX_ORD PF_CAPS_C_FIELD_LOC(155, 148) struct hns_roce_query_pf_caps_d { __le32 wq_hop_num_max_srqs; @@ -1299,20 +1219,26 @@ struct hns_roce_query_pf_caps_d { __le32 num_uars_rsv_pds; __le32 rsv_uars_rsv_qps; }; -#define V2_QUERY_PF_CAPS_D_NUM_SRQS_S 0 -#define V2_QUERY_PF_CAPS_D_NUM_SRQS_M GENMASK(19, 0) - -#define V2_QUERY_PF_CAPS_D_RQWQE_HOP_NUM_S 20 -#define V2_QUERY_PF_CAPS_D_RQWQE_HOP_NUM_M GENMASK(21, 20) - -#define V2_QUERY_PF_CAPS_D_EX_SGE_HOP_NUM_S 22 -#define V2_QUERY_PF_CAPS_D_EX_SGE_HOP_NUM_M GENMASK(23, 22) - -#define V2_QUERY_PF_CAPS_D_SQWQE_HOP_NUM_S 24 -#define V2_QUERY_PF_CAPS_D_SQWQE_HOP_NUM_M GENMASK(25, 24) -#define V2_QUERY_PF_CAPS_D_CONG_TYPE_S 26 -#define V2_QUERY_PF_CAPS_D_CONG_TYPE_M GENMASK(29, 26) +#define PF_CAPS_D_FIELD_LOC(h, l) \ + FIELD_LOC(struct hns_roce_query_pf_caps_d, h, l) + +#define PF_CAPS_D_NUM_SRQS PF_CAPS_D_FIELD_LOC(19, 0) +#define PF_CAPS_D_RQWQE_HOP_NUM PF_CAPS_D_FIELD_LOC(21, 20) +#define PF_CAPS_D_EX_SGE_HOP_NUM PF_CAPS_D_FIELD_LOC(23, 22) +#define PF_CAPS_D_SQWQE_HOP_NUM PF_CAPS_D_FIELD_LOC(25, 24) +#define PF_CAPS_D_CONG_TYPE PF_CAPS_D_FIELD_LOC(29, 26) +#define PF_CAPS_D_CEQ_DEPTH PF_CAPS_D_FIELD_LOC(85, 64) +#define PF_CAPS_D_NUM_CEQS PF_CAPS_D_FIELD_LOC(95, 86) +#define PF_CAPS_D_AEQ_DEPTH PF_CAPS_D_FIELD_LOC(117, 96) +#define PF_CAPS_D_AEQ_ARM_ST PF_CAPS_D_FIELD_LOC(119, 118) +#define PF_CAPS_D_CEQ_ARM_ST PF_CAPS_D_FIELD_LOC(121, 120) +#define PF_CAPS_D_RSV_PDS PF_CAPS_D_FIELD_LOC(147, 128) +#define PF_CAPS_D_NUM_UARS PF_CAPS_D_FIELD_LOC(155, 148) +#define PF_CAPS_D_RSV_QPS PF_CAPS_D_FIELD_LOC(179, 160) +#define PF_CAPS_D_RSV_UARS PF_CAPS_D_FIELD_LOC(187, 180) + +#define HNS_ROCE_CAP_FLAGS_EX_SHIFT 12 struct hns_roce_congestion_algorithm { u8 alg_sel; @@ -1321,33 +1247,6 @@ struct hns_roce_congestion_algorithm { u8 wnd_mode_sel; }; -#define V2_QUERY_PF_CAPS_D_CEQ_DEPTH_S 0 -#define V2_QUERY_PF_CAPS_D_CEQ_DEPTH_M GENMASK(21, 0) - -#define V2_QUERY_PF_CAPS_D_NUM_CEQS_S 22 -#define V2_QUERY_PF_CAPS_D_NUM_CEQS_M GENMASK(31, 22) - -#define V2_QUERY_PF_CAPS_D_AEQ_DEPTH_S 0 -#define V2_QUERY_PF_CAPS_D_AEQ_DEPTH_M GENMASK(21, 0) - -#define V2_QUERY_PF_CAPS_D_AEQ_ARM_ST_S 22 -#define V2_QUERY_PF_CAPS_D_AEQ_ARM_ST_M GENMASK(23, 22) - -#define V2_QUERY_PF_CAPS_D_CEQ_ARM_ST_S 24 -#define V2_QUERY_PF_CAPS_D_CEQ_ARM_ST_M GENMASK(25, 24) - -#define V2_QUERY_PF_CAPS_D_RSV_PDS_S 0 -#define V2_QUERY_PF_CAPS_D_RSV_PDS_M GENMASK(19, 0) - -#define V2_QUERY_PF_CAPS_D_NUM_UARS_S 20 -#define V2_QUERY_PF_CAPS_D_NUM_UARS_M GENMASK(27, 20) - -#define V2_QUERY_PF_CAPS_D_RSV_QPS_S 0 -#define V2_QUERY_PF_CAPS_D_RSV_QPS_M GENMASK(19, 0) - -#define V2_QUERY_PF_CAPS_D_RSV_UARS_S 20 -#define V2_QUERY_PF_CAPS_D_RSV_UARS_M GENMASK(27, 20) - struct hns_roce_query_pf_caps_e { __le32 chunk_size_shift_rsv_mrws; __le32 rsv_cqs; @@ -1359,20 +1258,14 @@ struct hns_roce_query_pf_caps_e { __le16 aeq_period; }; -#define V2_QUERY_PF_CAPS_E_RSV_MRWS_S 0 -#define V2_QUERY_PF_CAPS_E_RSV_MRWS_M GENMASK(19, 0) +#define PF_CAPS_E_FIELD_LOC(h, l) \ + FIELD_LOC(struct hns_roce_query_pf_caps_e, h, l) -#define V2_QUERY_PF_CAPS_E_CHUNK_SIZE_SHIFT_S 20 -#define V2_QUERY_PF_CAPS_E_CHUNK_SIZE_SHIFT_M GENMASK(31, 20) - -#define V2_QUERY_PF_CAPS_E_RSV_CQS_S 0 -#define V2_QUERY_PF_CAPS_E_RSV_CQS_M GENMASK(19, 0) - -#define V2_QUERY_PF_CAPS_E_RSV_SRQS_S 0 -#define V2_QUERY_PF_CAPS_E_RSV_SRQS_M GENMASK(19, 0) - -#define V2_QUERY_PF_CAPS_E_RSV_LKEYS_S 0 -#define V2_QUERY_PF_CAPS_E_RSV_LKEYS_M GENMASK(19, 0) +#define PF_CAPS_E_RSV_MRWS PF_CAPS_E_FIELD_LOC(19, 0) +#define PF_CAPS_E_CHUNK_SIZE_SHIFT PF_CAPS_E_FIELD_LOC(31, 20) +#define PF_CAPS_E_RSV_CQS PF_CAPS_E_FIELD_LOC(51, 32) +#define PF_CAPS_E_RSV_SRQS PF_CAPS_E_FIELD_LOC(83, 64) +#define PF_CAPS_E_RSV_LKEYS PF_CAPS_E_FIELD_LOC(115, 96) struct hns_roce_cmq_req { __le32 data[6]; @@ -1457,9 +1350,6 @@ struct hns_roce_dip { #define HNS_ROCE_EQ_INIT_CONS_IDX 0 #define HNS_ROCE_EQ_INIT_NXT_EQE_BA 0 -#define HNS_ROCE_V2_CEQ_CEQE_OWNER_S 31 -#define HNS_ROCE_V2_AEQ_AEQE_OWNER_S 31 - #define HNS_ROCE_V2_COMP_EQE_NUM 0x1000 #define HNS_ROCE_V2_ASYNC_EQE_NUM 0x1000 @@ -1516,18 +1406,6 @@ struct hns_roce_eq_context { #define EQC_NEX_EQE_BA_H EQC_FIELD_LOC(339, 320) #define EQC_EQE_SIZE EQC_FIELD_LOC(341, 340) -#define HNS_ROCE_V2_CEQE_COMP_CQN_S 0 -#define HNS_ROCE_V2_CEQE_COMP_CQN_M GENMASK(23, 0) - -#define HNS_ROCE_V2_AEQE_EVENT_TYPE_S 0 -#define HNS_ROCE_V2_AEQE_EVENT_TYPE_M GENMASK(7, 0) - -#define HNS_ROCE_V2_AEQE_SUB_TYPE_S 8 -#define HNS_ROCE_V2_AEQE_SUB_TYPE_M GENMASK(15, 8) - -#define HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S 0 -#define HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M GENMASK(23, 0) - #define MAX_SERVICE_LEVEL 0x7 struct hns_roce_wqe_atomic_seg { diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2_dfx.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2_dfx.c index 5a97b5a0b7be34040ad2888ea97f7c4dacdfdd01..f7a75a7cda7491e93f1aa6134facad61623f52a3 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2_dfx.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2_dfx.c @@ -18,9 +18,8 @@ int hns_roce_v2_query_cqc_info(struct hns_roce_dev *hr_dev, u32 cqn, return PTR_ERR(mailbox); cq_context = mailbox->buf; - ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, cqn, 0, - HNS_ROCE_CMD_QUERY_CQC, - HNS_ROCE_CMD_TIMEOUT_MSECS); + ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, HNS_ROCE_CMD_QUERY_CQC, + cqn); if (ret) { dev_err(hr_dev->dev, "QUERY cqc cmd process error\n"); goto err_mailbox; diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c index 8aa0af069042af86e0dafc5e32b1eda9f6876493..11f42cebfa40a6ba7598758f229004665fc3962f 100644 --- a/drivers/infiniband/hw/hns/hns_roce_main.c +++ b/drivers/infiniband/hw/hns/hns_roce_main.c @@ -774,7 +774,7 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev) ret = hns_roce_init_hem_table(hr_dev, &hr_dev->cqc_timer_table, HEM_TYPE_CQC_TIMER, hr_dev->caps.cqc_timer_entry_sz, - hr_dev->caps.num_cqc_timer, 1); + hr_dev->caps.cqc_timer_bt_num, 1); if (ret) { dev_err(dev, "Failed to init CQC timer memory, aborting.\n"); diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c index 44e0ee3b5b6c0b0f8fce10ef94fc18f9ee298a41..1e36ac383ea3042fcbb186d5a51aefbc0db91665 100644 --- a/drivers/infiniband/hw/hns/hns_roce_mr.c +++ b/drivers/infiniband/hw/hns/hns_roce_mr.c @@ -47,24 +47,6 @@ unsigned long key_to_hw_index(u32 key) return (key << 24) | (key >> 8); } -static int hns_roce_hw_create_mpt(struct hns_roce_dev *hr_dev, - struct hns_roce_cmd_mailbox *mailbox, - unsigned long mpt_index) -{ - return hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, mpt_index, 0, - HNS_ROCE_CMD_CREATE_MPT, - HNS_ROCE_CMD_TIMEOUT_MSECS); -} - -int hns_roce_hw_destroy_mpt(struct hns_roce_dev *hr_dev, - struct hns_roce_cmd_mailbox *mailbox, - unsigned long mpt_index) -{ - return hns_roce_cmd_mbox(hr_dev, 0, mailbox ? mailbox->dma : 0, - mpt_index, !mailbox, HNS_ROCE_CMD_DESTROY_MPT, - HNS_ROCE_CMD_TIMEOUT_MSECS); -} - static int alloc_mr_key(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr) { struct hns_roce_ida *mtpt_ida = &hr_dev->mr_table.mtpt_ida; @@ -144,7 +126,7 @@ static void hns_roce_mr_free(struct hns_roce_dev *hr_dev, int ret; if (mr->enabled) { - ret = hns_roce_hw_destroy_mpt(hr_dev, NULL, + ret = hns_roce_destroy_hw_ctx(hr_dev, HNS_ROCE_CMD_DESTROY_MPT, key_to_hw_index(mr->key) & (hr_dev->caps.num_mtpts - 1)); if (ret) @@ -166,10 +148,8 @@ static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev, /* Allocate mailbox memory */ mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); - if (IS_ERR(mailbox)) { - ret = PTR_ERR(mailbox); - return ret; - } + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); if (mr->type != MR_TYPE_FRMR) ret = hr_dev->hw->write_mtpt(hr_dev, mailbox->buf, mr); @@ -180,7 +160,7 @@ static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev, goto err_page; } - ret = hns_roce_hw_create_mpt(hr_dev, mailbox, + ret = hns_roce_create_hw_ctx(hr_dev, mailbox, HNS_ROCE_CMD_CREATE_MPT, mtpt_idx & (hr_dev->caps.num_mtpts - 1)); if (ret) { dev_err(dev, "failed to create mpt, ret = %d.\n", ret); @@ -302,13 +282,13 @@ int hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start, u64 length, return PTR_ERR(mailbox); mtpt_idx = key_to_hw_index(mr->key) & (hr_dev->caps.num_mtpts - 1); - ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, mtpt_idx, 0, - HNS_ROCE_CMD_QUERY_MPT, - HNS_ROCE_CMD_TIMEOUT_MSECS); + ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, HNS_ROCE_CMD_QUERY_MPT, + mtpt_idx); if (ret) goto free_cmd_mbox; - ret = hns_roce_hw_destroy_mpt(hr_dev, NULL, mtpt_idx); + ret = hns_roce_destroy_hw_ctx(hr_dev, HNS_ROCE_CMD_DESTROY_MPT, + mtpt_idx); if (ret) ibdev_warn(ib_dev, "failed to destroy MPT, ret = %d.\n", ret); @@ -338,7 +318,8 @@ int hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start, u64 length, goto free_cmd_mbox; } - ret = hns_roce_hw_create_mpt(hr_dev, mailbox, mtpt_idx); + ret = hns_roce_create_hw_ctx(hr_dev, mailbox, HNS_ROCE_CMD_CREATE_MPT, + mtpt_idx); if (ret) { ibdev_err(ib_dev, "failed to create MPT, ret = %d.\n", ret); goto free_cmd_mbox; @@ -356,12 +337,11 @@ int hns_roce_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata) { struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device); struct hns_roce_mr *mr = to_hr_mr(ibmr); - int ret = 0; hns_roce_mr_free(hr_dev, mr); kfree(mr); - return ret; + return 0; } struct ib_mr *hns_roce_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, @@ -477,7 +457,7 @@ static void hns_roce_mw_free(struct hns_roce_dev *hr_dev, int ret; if (mw->enabled) { - ret = hns_roce_hw_destroy_mpt(hr_dev, NULL, + ret = hns_roce_destroy_hw_ctx(hr_dev, HNS_ROCE_CMD_DESTROY_MPT, key_to_hw_index(mw->rkey) & (hr_dev->caps.num_mtpts - 1)); if (ret) @@ -517,7 +497,7 @@ static int hns_roce_mw_enable(struct hns_roce_dev *hr_dev, goto err_page; } - ret = hns_roce_hw_create_mpt(hr_dev, mailbox, + ret = hns_roce_create_hw_ctx(hr_dev, mailbox, HNS_ROCE_CMD_CREATE_MPT, mtpt_idx & (hr_dev->caps.num_mtpts - 1)); if (ret) { dev_err(dev, "MW CREATE_MPT failed (%d)\n", ret); diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c index 1099963db1b62c23552e04514ae3cd95623a1ff9..43530a7c8304d1377f40650aff5b7439e5b5c431 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c @@ -243,26 +243,6 @@ static int alloc_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) return 0; } -enum hns_roce_qp_state to_hns_roce_state(enum ib_qp_state state) -{ - switch (state) { - case IB_QPS_RESET: - return HNS_ROCE_QP_STATE_RST; - case IB_QPS_INIT: - return HNS_ROCE_QP_STATE_INIT; - case IB_QPS_RTR: - return HNS_ROCE_QP_STATE_RTR; - case IB_QPS_RTS: - return HNS_ROCE_QP_STATE_RTS; - case IB_QPS_SQD: - return HNS_ROCE_QP_STATE_SQD; - case IB_QPS_ERR: - return HNS_ROCE_QP_STATE_ERR; - default: - return HNS_ROCE_QP_NUM_STATE; - } -} - static void add_qp_to_list(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, struct ib_cq *send_cq, struct ib_cq *recv_cq) diff --git a/drivers/infiniband/hw/hns/hns_roce_restrack.c b/drivers/infiniband/hw/hns/hns_roce_restrack.c index 259444c0a6301a547e76843bb9a488fa14b5fff8..24a154d646304de3b2bd48bd416568da2324542f 100644 --- a/drivers/infiniband/hw/hns/hns_roce_restrack.c +++ b/drivers/infiniband/hw/hns/hns_roce_restrack.c @@ -13,61 +13,40 @@ static int hns_roce_fill_cq(struct sk_buff *msg, struct hns_roce_v2_cq_context *context) { if (rdma_nl_put_driver_u32(msg, "state", - roce_get_field(context->byte_4_pg_ceqn, - V2_CQC_BYTE_4_ARM_ST_M, - V2_CQC_BYTE_4_ARM_ST_S))) + hr_reg_read(context, CQC_ARM_ST))) + goto err; if (rdma_nl_put_driver_u32(msg, "ceqn", - roce_get_field(context->byte_4_pg_ceqn, - V2_CQC_BYTE_4_CEQN_M, - V2_CQC_BYTE_4_CEQN_S))) + hr_reg_read(context, CQC_CEQN))) goto err; if (rdma_nl_put_driver_u32(msg, "cqn", - roce_get_field(context->byte_8_cqn, - V2_CQC_BYTE_8_CQN_M, - V2_CQC_BYTE_8_CQN_S))) + hr_reg_read(context, CQC_CQN))) goto err; if (rdma_nl_put_driver_u32(msg, "hopnum", - roce_get_field(context->byte_16_hop_addr, - V2_CQC_BYTE_16_CQE_HOP_NUM_M, - V2_CQC_BYTE_16_CQE_HOP_NUM_S))) + hr_reg_read(context, CQC_CQE_HOP_NUM))) goto err; - if (rdma_nl_put_driver_u32( - msg, "pi", - roce_get_field(context->byte_28_cq_pi, - V2_CQC_BYTE_28_CQ_PRODUCER_IDX_M, - V2_CQC_BYTE_28_CQ_PRODUCER_IDX_S))) + if (rdma_nl_put_driver_u32(msg, "pi", + hr_reg_read(context, CQC_CQ_PRODUCER_IDX))) goto err; - if (rdma_nl_put_driver_u32( - msg, "ci", - roce_get_field(context->byte_32_cq_ci, - V2_CQC_BYTE_32_CQ_CONSUMER_IDX_M, - V2_CQC_BYTE_32_CQ_CONSUMER_IDX_S))) + if (rdma_nl_put_driver_u32(msg, "ci", + hr_reg_read(context, CQC_CQ_CONSUMER_IDX))) goto err; - if (rdma_nl_put_driver_u32( - msg, "coalesce", - roce_get_field(context->byte_56_cqe_period_maxcnt, - V2_CQC_BYTE_56_CQ_MAX_CNT_M, - V2_CQC_BYTE_56_CQ_MAX_CNT_S))) + if (rdma_nl_put_driver_u32(msg, "coalesce", + hr_reg_read(context, CQC_CQ_MAX_CNT))) goto err; - if (rdma_nl_put_driver_u32( - msg, "period", - roce_get_field(context->byte_56_cqe_period_maxcnt, - V2_CQC_BYTE_56_CQ_PERIOD_M, - V2_CQC_BYTE_56_CQ_PERIOD_S))) + if (rdma_nl_put_driver_u32(msg, "period", + hr_reg_read(context, CQC_CQ_PERIOD))) goto err; if (rdma_nl_put_driver_u32(msg, "cnt", - roce_get_field(context->byte_52_cqe_cnt, - V2_CQC_BYTE_52_CQE_CNT_M, - V2_CQC_BYTE_52_CQE_CNT_S))) + hr_reg_read(context, CQC_CQE_CNT))) goto err; return 0; diff --git a/drivers/infiniband/hw/hns/hns_roce_srq.c b/drivers/infiniband/hw/hns/hns_roce_srq.c index 21962e5472438534f3f360ee6699e5b756b2bb4f..f3e19c66283f96b55afeaf413e2d481198365427 100644 --- a/drivers/infiniband/hw/hns/hns_roce_srq.c +++ b/drivers/infiniband/hw/hns/hns_roce_srq.c @@ -59,58 +59,39 @@ static void hns_roce_ib_srq_event(struct hns_roce_srq *srq, } } -static int hns_roce_hw_create_srq(struct hns_roce_dev *dev, - struct hns_roce_cmd_mailbox *mailbox, - unsigned long srq_num) +static int alloc_srqn(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq) { - return hns_roce_cmd_mbox(dev, mailbox->dma, 0, srq_num, 0, - HNS_ROCE_CMD_CREATE_SRQ, - HNS_ROCE_CMD_TIMEOUT_MSECS); -} - -static int hns_roce_hw_destroy_srq(struct hns_roce_dev *dev, - struct hns_roce_cmd_mailbox *mailbox, - unsigned long srq_num) -{ - return hns_roce_cmd_mbox(dev, 0, mailbox ? mailbox->dma : 0, srq_num, - mailbox ? 0 : 1, HNS_ROCE_CMD_DESTROY_SRQ, - HNS_ROCE_CMD_TIMEOUT_MSECS); -} - -static int alloc_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq) -{ - struct hns_roce_srq_table *srq_table = &hr_dev->srq_table; struct hns_roce_ida *srq_ida = &hr_dev->srq_table.srq_ida; - struct ib_device *ibdev = &hr_dev->ib_dev; - struct hns_roce_cmd_mailbox *mailbox; - int ret; int id; id = ida_alloc_range(&srq_ida->ida, srq_ida->min, srq_ida->max, GFP_KERNEL); if (id < 0) { - ibdev_err(ibdev, "failed to alloc srq(%d).\n", id); + ibdev_err(&hr_dev->ib_dev, "failed to alloc srq(%d).\n", id); return -ENOMEM; } - srq->srqn = (unsigned long)id; - ret = hns_roce_table_get(hr_dev, &srq_table->table, srq->srqn); - if (ret) { - ibdev_err(ibdev, "failed to get SRQC table, ret = %d.\n", ret); - goto err_out; - } + srq->srqn = id; - ret = xa_err(xa_store(&srq_table->xa, srq->srqn, srq, GFP_KERNEL)); - if (ret) { - ibdev_err(ibdev, "failed to store SRQC, ret = %d.\n", ret); - goto err_put; - } + return 0; +} + +static void free_srqn(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq) +{ + ida_free(&hr_dev->srq_table.srq_ida.ida, (int)srq->srqn); +} + +static int hns_roce_create_srqc(struct hns_roce_dev *hr_dev, + struct hns_roce_srq *srq) +{ + struct ib_device *ibdev = &hr_dev->ib_dev; + struct hns_roce_cmd_mailbox *mailbox; + int ret; mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); - if (IS_ERR_OR_NULL(mailbox)) { + if (IS_ERR(mailbox)) { ibdev_err(ibdev, "failed to alloc mailbox for SRQC.\n"); - ret = -ENOMEM; - goto err_xa; + return PTR_ERR(mailbox); } ret = hr_dev->hw->write_srqc(srq, mailbox->buf); @@ -119,24 +100,44 @@ static int alloc_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq) goto err_mbox; } - ret = hns_roce_hw_create_srq(hr_dev, mailbox, srq->srqn); - if (ret) { + ret = hns_roce_create_hw_ctx(hr_dev, mailbox, HNS_ROCE_CMD_CREATE_SRQ, + srq->srqn); + if (ret) ibdev_err(ibdev, "failed to config SRQC, ret = %d.\n", ret); - goto err_mbox; - } +err_mbox: hns_roce_free_cmd_mailbox(hr_dev, mailbox); + return ret; +} + +static int alloc_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq) +{ + struct hns_roce_srq_table *srq_table = &hr_dev->srq_table; + struct ib_device *ibdev = &hr_dev->ib_dev; + int ret; + + ret = hns_roce_table_get(hr_dev, &srq_table->table, srq->srqn); + if (ret) { + ibdev_err(ibdev, "failed to get SRQC table, ret = %d.\n", ret); + return ret; + } + + ret = xa_err(xa_store(&srq_table->xa, srq->srqn, srq, GFP_KERNEL)); + if (ret) { + ibdev_err(ibdev, "failed to store SRQC, ret = %d.\n", ret); + goto err_put; + } + + ret = hns_roce_create_srqc(hr_dev, srq); + if (ret) + goto err_xa; return 0; -err_mbox: - hns_roce_free_cmd_mailbox(hr_dev, mailbox); err_xa: xa_erase(&srq_table->xa, srq->srqn); err_put: hns_roce_table_put(hr_dev, &srq_table->table, srq->srqn); -err_out: - ida_free(&srq_ida->ida, id); return ret; } @@ -146,7 +147,8 @@ static void free_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq) struct hns_roce_srq_table *srq_table = &hr_dev->srq_table; int ret; - ret = hns_roce_hw_destroy_srq(hr_dev, NULL, srq->srqn); + ret = hns_roce_destroy_hw_ctx(hr_dev, HNS_ROCE_CMD_DESTROY_SRQ, + srq->srqn); if (ret) dev_err(hr_dev->dev, "DESTROY_SRQ failed (%d) for SRQN %06lx\n", ret, srq->srqn); @@ -158,7 +160,6 @@ static void free_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq) wait_for_completion(&srq->free); hns_roce_table_put(hr_dev, &srq_table->table, srq->srqn); - ida_free(&srq_table->srq_ida.ida, (int)srq->srqn); } static int alloc_srq_idx(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq, @@ -406,10 +407,14 @@ int hns_roce_create_srq(struct ib_srq *ib_srq, if (ret) return ret; - ret = alloc_srqc(hr_dev, srq); + ret = alloc_srqn(hr_dev, srq); if (ret) goto err_srq_buf; + ret = alloc_srqc(hr_dev, srq); + if (ret) + goto err_srqn; + if (udata) { resp.srqn = srq->srqn; if (ib_copy_to_udata(udata, &resp, @@ -428,6 +433,8 @@ int hns_roce_create_srq(struct ib_srq *ib_srq, err_srqc: free_srqc(hr_dev, srq); +err_srqn: + free_srqn(hr_dev, srq); err_srq_buf: free_srq_buf(hr_dev, srq); @@ -440,6 +447,7 @@ int hns_roce_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata) struct hns_roce_srq *srq = to_hr_srq(ibsrq); free_srqc(hr_dev, srq); + free_srqn(hr_dev, srq); free_srq_buf(hr_dev, srq); return 0; } diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c index 343e6709d9fc3878e994b2a4962c619b821a78e5..2f053f48f1bebcb0dba263e5a5df20501cc83cd8 100644 --- a/drivers/infiniband/hw/mlx5/devx.c +++ b/drivers/infiniband/hw/mlx5/devx.c @@ -1792,8 +1792,10 @@ subscribe_event_xa_alloc(struct mlx5_devx_event_table *devx_event_table, key_level2, obj_event, GFP_KERNEL); - if (err) + if (err) { + kfree(obj_event); return err; + } INIT_LIST_HEAD(&obj_event->obj_sub_list); } diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index 19346693c1da4a3b6068b6aeb3de2feda3227e89..6cd0cbd4fc9f6169e65f7d3dad69a6e35f860954 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -575,6 +575,8 @@ struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev, ent = &cache->ent[entry]; spin_lock_irq(&ent->lock); if (list_empty(&ent->head)) { + queue_adjust_cache_locked(ent); + ent->miss++; spin_unlock_irq(&ent->lock); mr = create_cache_mr(ent); if (IS_ERR(mr)) diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c index 8937530a42d3d32b589d443e37fe46304a6ff9a7..13634eda833de3d2729fba2b9ada55c721276da9 100644 --- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c +++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c @@ -1328,6 +1328,12 @@ static int alloc_permits(struct rtrs_clt *clt) static void free_permits(struct rtrs_clt *clt) { + if (clt->permits_map) { + size_t sz = clt->queue_depth; + + wait_event(clt->permits_wait, + find_first_bit(clt->permits_map, sz) >= sz); + } kfree(clt->permits_map); clt->permits_map = NULL; kfree(clt->permits); @@ -2630,20 +2636,8 @@ static struct rtrs_clt *alloc_clt(const char *sessname, size_t paths_num, return ERR_PTR(err); } -static void wait_for_inflight_permits(struct rtrs_clt *clt) -{ - if (clt->permits_map) { - size_t sz = clt->queue_depth; - - wait_event(clt->permits_wait, - find_first_bit(clt->permits_map, sz) >= sz); - } -} - static void free_clt(struct rtrs_clt *clt) { - wait_for_inflight_permits(clt); - free_permits(clt); free_percpu(clt->pcpu_path); /* @@ -2764,6 +2758,7 @@ void rtrs_clt_close(struct rtrs_clt *clt) rtrs_clt_destroy_sess_files(sess, NULL); kobject_put(&sess->kobj); } + free_permits(clt); free_clt(clt); } EXPORT_SYMBOL(rtrs_clt_close); diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index 86d5c4c92b363c4fc6fb9832ca7da626496a7200..b4ccb333a834201e0e67cac4337383293cacd780 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c @@ -4045,9 +4045,11 @@ static void srp_remove_one(struct ib_device *device, void *client_data) spin_unlock(&host->target_lock); /* - * Wait for tl_err and target port removal tasks. + * srp_queue_remove_work() queues a call to + * srp_remove_target(). The latter function cancels + * target->tl_err_work so waiting for the remove works to + * finish is sufficient. */ - flush_workqueue(system_long_wq); flush_workqueue(srp_remove_wq); kfree(host); diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig index 9f60f1559e49926b161b2a5b472e30445f843c59..3f7a5ff17a9a3c23405b19aafbce729d5972fa42 100644 --- a/drivers/input/keyboard/Kconfig +++ b/drivers/input/keyboard/Kconfig @@ -556,7 +556,7 @@ config KEYBOARD_PMIC8XXX config KEYBOARD_SAMSUNG tristate "Samsung keypad support" - depends on HAVE_CLK + depends on HAS_IOMEM && HAVE_CLK select INPUT_MATRIXKMAP help Say Y here if you want to use the keypad on your Samsung mobile diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c index 11a9ee32c98cc89ab58d981162c1c7f0f1fd7219..6f59c8b245f240e19922c05e870ae7b735144f4c 100644 --- a/drivers/input/mouse/elan_i2c_core.c +++ b/drivers/input/mouse/elan_i2c_core.c @@ -153,55 +153,21 @@ static int elan_get_fwinfo(u16 ic_type, u8 iap_version, u16 *validpage_count, return 0; } -static int elan_enable_power(struct elan_tp_data *data) +static int elan_set_power(struct elan_tp_data *data, bool on) { int repeat = ETP_RETRY_COUNT; int error; - error = regulator_enable(data->vcc); - if (error) { - dev_err(&data->client->dev, - "failed to enable regulator: %d\n", error); - return error; - } - do { - error = data->ops->power_control(data->client, true); + error = data->ops->power_control(data->client, on); if (error >= 0) return 0; msleep(30); } while (--repeat > 0); - dev_err(&data->client->dev, "failed to enable power: %d\n", error); - return error; -} - -static int elan_disable_power(struct elan_tp_data *data) -{ - int repeat = ETP_RETRY_COUNT; - int error; - - do { - error = data->ops->power_control(data->client, false); - if (!error) { - error = regulator_disable(data->vcc); - if (error) { - dev_err(&data->client->dev, - "failed to disable regulator: %d\n", - error); - /* Attempt to power the chip back up */ - data->ops->power_control(data->client, true); - break; - } - - return 0; - } - - msleep(30); - } while (--repeat > 0); - - dev_err(&data->client->dev, "failed to disable power: %d\n", error); + dev_err(&data->client->dev, "failed to set power %s: %d\n", + on ? "on" : "off", error); return error; } @@ -1361,9 +1327,19 @@ static int __maybe_unused elan_suspend(struct device *dev) /* Enable wake from IRQ */ data->irq_wake = (enable_irq_wake(client->irq) == 0); } else { - ret = elan_disable_power(data); + ret = elan_set_power(data, false); + if (ret) + goto err; + + ret = regulator_disable(data->vcc); + if (ret) { + dev_err(dev, "error %d disabling regulator\n", ret); + /* Attempt to power the chip back up */ + elan_set_power(data, true); + } } +err: mutex_unlock(&data->sysfs_mutex); return ret; } @@ -1374,12 +1350,18 @@ static int __maybe_unused elan_resume(struct device *dev) struct elan_tp_data *data = i2c_get_clientdata(client); int error; - if (device_may_wakeup(dev) && data->irq_wake) { + if (!device_may_wakeup(dev)) { + error = regulator_enable(data->vcc); + if (error) { + dev_err(dev, "error %d enabling regulator\n", error); + goto err; + } + } else if (data->irq_wake) { disable_irq_wake(client->irq); data->irq_wake = false; } - error = elan_enable_power(data); + error = elan_set_power(data, true); if (error) { dev_err(dev, "power up when resuming failed: %d\n", error); goto err; diff --git a/drivers/input/serio/ambakmi.c b/drivers/input/serio/ambakmi.c index ecdeca147ed717c77615f89d9b3bf9126eae8669..4408245b61d2c6707bf136fc0d82a7e11795916a 100644 --- a/drivers/input/serio/ambakmi.c +++ b/drivers/input/serio/ambakmi.c @@ -159,7 +159,7 @@ static int amba_kmi_probe(struct amba_device *dev, return ret; } -static int amba_kmi_remove(struct amba_device *dev) +static void amba_kmi_remove(struct amba_device *dev) { struct amba_kmi_port *kmi = amba_get_drvdata(dev); @@ -168,7 +168,6 @@ static int amba_kmi_remove(struct amba_device *dev) iounmap(kmi->base); kfree(kmi); amba_release_regions(dev); - return 0; } static int __maybe_unused amba_kmi_resume(struct device *dev) diff --git a/drivers/input/tablet/aiptek.c b/drivers/input/tablet/aiptek.c index e08b0ef078e8198474120972a656b09a0150ee09..8afeefcea67bb1ad498569a2c8b8a3b78f5285e5 100644 --- a/drivers/input/tablet/aiptek.c +++ b/drivers/input/tablet/aiptek.c @@ -1801,15 +1801,13 @@ aiptek_probe(struct usb_interface *intf, const struct usb_device_id *id) input_set_abs_params(inputdev, ABS_TILT_Y, AIPTEK_TILT_MIN, AIPTEK_TILT_MAX, 0, 0); input_set_abs_params(inputdev, ABS_WHEEL, AIPTEK_WHEEL_MIN, AIPTEK_WHEEL_MAX - 1, 0, 0); - /* Verify that a device really has an endpoint */ - if (intf->cur_altsetting->desc.bNumEndpoints < 1) { + err = usb_find_common_endpoints(intf->cur_altsetting, + NULL, NULL, &endpoint, NULL); + if (err) { dev_err(&intf->dev, - "interface has %d endpoints, but must have minimum 1\n", - intf->cur_altsetting->desc.bNumEndpoints); - err = -EINVAL; + "interface has no int in endpoints, but must have minimum 1\n"); goto fail3; } - endpoint = &intf->cur_altsetting->endpoint[0].desc; /* Go set up our URB, which is called when the tablet receives * input. diff --git a/drivers/input/touchscreen/zinitix.c b/drivers/input/touchscreen/zinitix.c index 6df6f07f1ac66a036737e1957a5ed8007cd7f2bd..17b10b81c71319947ff95542cbdc794fb68a1112 100644 --- a/drivers/input/touchscreen/zinitix.c +++ b/drivers/input/touchscreen/zinitix.c @@ -135,7 +135,7 @@ struct point_coord { struct touch_event { __le16 status; - u8 finger_cnt; + u8 finger_mask; u8 time_stamp; struct point_coord point_coord[MAX_SUPPORTED_FINGER_NUM]; }; @@ -311,11 +311,32 @@ static int zinitix_send_power_on_sequence(struct bt541_ts_data *bt541) static void zinitix_report_finger(struct bt541_ts_data *bt541, int slot, const struct point_coord *p) { + u16 x, y; + + if (unlikely(!(p->sub_status & + (SUB_BIT_UP | SUB_BIT_DOWN | SUB_BIT_MOVE)))) { + dev_dbg(&bt541->client->dev, "unknown finger event %#02x\n", + p->sub_status); + return; + } + + x = le16_to_cpu(p->x); + y = le16_to_cpu(p->y); + input_mt_slot(bt541->input_dev, slot); - input_mt_report_slot_state(bt541->input_dev, MT_TOOL_FINGER, true); - touchscreen_report_pos(bt541->input_dev, &bt541->prop, - le16_to_cpu(p->x), le16_to_cpu(p->y), true); - input_report_abs(bt541->input_dev, ABS_MT_TOUCH_MAJOR, p->width); + if (input_mt_report_slot_state(bt541->input_dev, MT_TOOL_FINGER, + !(p->sub_status & SUB_BIT_UP))) { + touchscreen_report_pos(bt541->input_dev, + &bt541->prop, x, y, true); + input_report_abs(bt541->input_dev, + ABS_MT_TOUCH_MAJOR, p->width); + dev_dbg(&bt541->client->dev, "finger %d %s (%u, %u)\n", + slot, p->sub_status & SUB_BIT_DOWN ? "down" : "move", + x, y); + } else { + dev_dbg(&bt541->client->dev, "finger %d up (%u, %u)\n", + slot, x, y); + } } static irqreturn_t zinitix_ts_irq_handler(int irq, void *bt541_handler) @@ -323,6 +344,7 @@ static irqreturn_t zinitix_ts_irq_handler(int irq, void *bt541_handler) struct bt541_ts_data *bt541 = bt541_handler; struct i2c_client *client = bt541->client; struct touch_event touch_event; + unsigned long finger_mask; int error; int i; @@ -335,10 +357,14 @@ static irqreturn_t zinitix_ts_irq_handler(int irq, void *bt541_handler) goto out; } - for (i = 0; i < MAX_SUPPORTED_FINGER_NUM; i++) - if (touch_event.point_coord[i].sub_status & SUB_BIT_EXIST) - zinitix_report_finger(bt541, i, - &touch_event.point_coord[i]); + finger_mask = touch_event.finger_mask; + for_each_set_bit(i, &finger_mask, MAX_SUPPORTED_FINGER_NUM) { + const struct point_coord *p = &touch_event.point_coord[i]; + + /* Only process contacts that are actually reported */ + if (p->sub_status & SUB_BIT_EXIST) + zinitix_report_finger(bt541, i, p); + } input_mt_sync_frame(bt541->input_dev); input_sync(bt541->input_dev); diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h index b4adab69856323b3f9158720dac5dc673249974e..0c40d22409f232f46becc79a25891f1cb42df70c 100644 --- a/drivers/iommu/amd/amd_iommu.h +++ b/drivers/iommu/amd/amd_iommu.h @@ -17,6 +17,7 @@ extern int amd_iommu_init_passthrough(void); extern irqreturn_t amd_iommu_int_thread(int irq, void *data); extern irqreturn_t amd_iommu_int_handler(int irq, void *data); extern void amd_iommu_apply_erratum_63(u16 devid); +extern void amd_iommu_restart_event_logging(struct amd_iommu *iommu); extern void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu); extern int amd_iommu_init_devices(void); extern void amd_iommu_uninit_devices(void); diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h index 33446c9d3bac81cc4d7c3c66165be776193376e5..690c5976575c68ea0398ebb8ae900b160d3ef2bb 100644 --- a/drivers/iommu/amd/amd_iommu_types.h +++ b/drivers/iommu/amd/amd_iommu_types.h @@ -109,6 +109,7 @@ #define PASID_MASK 0x0000ffff /* MMIO status bits */ +#define MMIO_STATUS_EVT_OVERFLOW_INT_MASK (1 << 0) #define MMIO_STATUS_EVT_INT_MASK (1 << 1) #define MMIO_STATUS_COM_WAIT_INT_MASK (1 << 2) #define MMIO_STATUS_PPR_INT_MASK (1 << 6) diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c index 502e6532dd549477e851f03e9f43766ae27f6c24..6eaefc9e7b3d60f6ef2bb0ab629466cc66c3f3d6 100644 --- a/drivers/iommu/amd/init.c +++ b/drivers/iommu/amd/init.c @@ -656,6 +656,16 @@ static int __init alloc_command_buffer(struct amd_iommu *iommu) return iommu->cmd_buf ? 0 : -ENOMEM; } +/* + * This function restarts event logging in case the IOMMU experienced + * an event log buffer overflow. + */ +void amd_iommu_restart_event_logging(struct amd_iommu *iommu) +{ + iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN); + iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN); +} + /* * This function resets the command buffer if the IOMMU stopped fetching * commands from it. diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c index 5f1195791cb18a30ba695bd983f919e5e0cf2e97..200cf5da5e0ad1c9ec3acf34fe06566d5e1f14dd 100644 --- a/drivers/iommu/amd/iommu.c +++ b/drivers/iommu/amd/iommu.c @@ -813,7 +813,8 @@ amd_iommu_set_pci_msi_domain(struct device *dev, struct amd_iommu *iommu) { } #endif /* !CONFIG_IRQ_REMAP */ #define AMD_IOMMU_INT_MASK \ - (MMIO_STATUS_EVT_INT_MASK | \ + (MMIO_STATUS_EVT_OVERFLOW_INT_MASK | \ + MMIO_STATUS_EVT_INT_MASK | \ MMIO_STATUS_PPR_INT_MASK | \ MMIO_STATUS_GALOG_INT_MASK) @@ -823,7 +824,7 @@ irqreturn_t amd_iommu_int_thread(int irq, void *data) u32 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); while (status & AMD_IOMMU_INT_MASK) { - /* Enable EVT and PPR and GA interrupts again */ + /* Enable interrupt sources again */ writel(AMD_IOMMU_INT_MASK, iommu->mmio_base + MMIO_STATUS_OFFSET); @@ -844,6 +845,11 @@ irqreturn_t amd_iommu_int_thread(int irq, void *data) } #endif + if (status & MMIO_STATUS_EVT_OVERFLOW_INT_MASK) { + pr_info_ratelimited("IOMMU event log overflow\n"); + amd_iommu_restart_event_logging(iommu); + } + /* * Hardware bug: ERBT1312 * When re-enabling interrupt (by writing 1 diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index e61d16f0ede2fe6b90765db1ebedf3ffb6446121..9116c93945d09f4ed805f41b38725523de7fe8f4 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -3142,9 +3142,8 @@ int iommu_clear_dirty_log(struct iommu_domain *domain, unsigned long bitmap_pgshift) { unsigned long riova, rsize; - unsigned int min_pagesz; + unsigned int min_pagesz, rs, re, start, end; bool flush = false; - int rs, re, start, end; int ret = 0; min_pagesz = 1 << __ffs(domain->pgsize_bitmap); @@ -3160,8 +3159,8 @@ int iommu_clear_dirty_log(struct iommu_domain *domain, end = start + (size >> bitmap_pgshift); bitmap_for_each_set_region(bitmap, rs, re, start, end) { flush = true; - riova = base_iova + (rs << bitmap_pgshift); - rsize = (re - rs) << bitmap_pgshift; + riova = base_iova + ((unsigned long)rs << bitmap_pgshift); + rsize = (unsigned long)(re - rs) << bitmap_pgshift; ret = __iommu_clear_dirty_log(domain, riova, rsize, bitmap, base_iova, bitmap_pgshift); if (ret) diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c index 82504049f8e445e95b4ae5777a1df2122ecf2d4b..1246e8f8bf08601725485c6c2c58dd1107f22e31 100644 --- a/drivers/iommu/iova.c +++ b/drivers/iommu/iova.c @@ -158,10 +158,11 @@ __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free) cached_iova = rb_entry(iovad->cached32_node, struct iova, node); if (free == cached_iova || (free->pfn_hi < iovad->dma_32bit_pfn && - free->pfn_lo >= cached_iova->pfn_lo)) { + free->pfn_lo >= cached_iova->pfn_lo)) iovad->cached32_node = rb_next(&free->node); + + if (free->pfn_lo < iovad->dma_32bit_pfn) iovad->max32_alloc_size = iovad->dma_32bit_pfn; - } cached_iova = rb_entry(iovad->cached_node, struct iova, node); if (free->pfn_lo >= cached_iova->pfn_lo) diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c index d71f10257f15929f6337b9cc13cc147998c59df5..d9068e8f2db4fcc7484ed6fe8d4b56589b6f7159 100644 --- a/drivers/iommu/ipmmu-vmsa.c +++ b/drivers/iommu/ipmmu-vmsa.c @@ -1012,7 +1012,9 @@ static int ipmmu_probe(struct platform_device *pdev) bitmap_zero(mmu->ctx, IPMMU_CTX_MAX); mmu->features = of_device_get_match_data(&pdev->dev); memset(mmu->utlb_ctx, IPMMU_CTX_INVALID, mmu->features->num_utlbs); - dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40)); + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40)); + if (ret) + return ret; /* Map I/O memory and request IRQ. */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); diff --git a/drivers/iommu/sw64/sunway_iommu.c b/drivers/iommu/sw64/sunway_iommu.c index dd3382ee007f0f67181eca5c8cee881c78ca16a2..8b851e0a0c20bff8621c064001bb840aeaf54521 100644 --- a/drivers/iommu/sw64/sunway_iommu.c +++ b/drivers/iommu/sw64/sunway_iommu.c @@ -40,6 +40,9 @@ #define SW64_IOMMU_PGSIZES (((1ULL) << PAGE_SHIFT) | ((1ULL) << PAGE_8M_SHIFT)) +#define IDENTMAP_ALL ((1U) << 0) +#define DMA_MASK64 ((1U) << 1) + /* IOMMU Exceptional Status */ enum exceptype { DTE_LEVEL1 = 0x0, @@ -383,7 +386,7 @@ set_dte_entry(struct sunway_iommu_dev *sdev, struct sunway_iommu_domain *sdomain dte_l2_val = (__pa(sdomain->pt_root) & PAGE_MASK) | SW64_IOMMU_ENTRY_VALID; if (sdomain->type == IOMMU_DOMAIN_IDENTITY) { dte_l2_val |= 0x1; - sdev->passthrough = true; + sdev->passthrough = IDENTMAP_ALL; } *dte_l2 = dte_l2_val; @@ -1058,18 +1061,24 @@ static void *sunway_alloc_coherent(struct device *dev, if (!(hose->iommu_enable)) return cpu_addr; - sdomain = get_sunway_domain(dev); sdev = dev_iommu_priv_get(dev); - if (sdev->passthrough) - if (pdev->dma_mask > DMA_BIT_MASK(32)) + if (sdev->passthrough & DMA_MASK64) + return cpu_addr; + else if (sdev->passthrough) { + if (min_not_zero(*dev->dma_mask, dev->coherent_dma_mask) + > DMA_BIT_MASK(32)) { + sdev->passthrough |= DMA_MASK64; return cpu_addr; + } - dma_dom = to_dma_domain(sdomain); - if (sdomain->type == IOMMU_DOMAIN_IDENTITY) { - sdomain->type = IOMMU_DOMAIN_DMA; - set_dte_entry(sdev, sdomain); + __free_pages(page, get_order(size)); + set_dma_ops(dev, get_arch_dma_ops(dev->bus)); + return dev->dma_ops->alloc(dev, size, dma_addr, gfp, attrs); } + sdomain = get_sunway_domain(dev); + dma_dom = to_dma_domain(sdomain); + *dma_addr = pci_iommu_map_single(pdev, dma_dom, cpu_addr, size); if (*dma_addr == 0) { free_pages((unsigned long)cpu_addr, get_order(size)); @@ -1164,16 +1173,22 @@ sunway_map_page(struct device *dev, struct page *page, return paddr; sdev = dev_iommu_priv_get(dev); - if (sdev->passthrough) - if (pdev->dma_mask > DMA_BIT_MASK(32)) + if (sdev->passthrough & DMA_MASK64) + return paddr; + else if (sdev->passthrough) { + if (min_not_zero(*dev->dma_mask, dev->coherent_dma_mask) + > DMA_BIT_MASK(32)) { + sdev->passthrough |= DMA_MASK64; return paddr; + } + + set_dma_ops(dev, get_arch_dma_ops(dev->bus)); + return dev->dma_ops->map_page(dev, page, offset, + size, dir, attrs); + } sdomain = get_sunway_domain(dev); dma_dom = to_dma_domain(sdomain); - if (sdomain->type == IOMMU_DOMAIN_IDENTITY) { - sdomain->type = IOMMU_DOMAIN_DMA; - set_dte_entry(sdev, sdomain); - } return pci_iommu_map_single(pdev, dma_dom, (char *)page_address(page) + offset, size); @@ -1243,13 +1258,18 @@ sunway_map_sg(struct device *dev, struct scatterlist *sgl, goto check; sdev = dev_iommu_priv_get(dev); - if (sdev->passthrough) - if (pdev->dma_mask > DMA_BIT_MASK(32)) + if (sdev->passthrough & DMA_MASK64) + goto check; + else if (sdev->passthrough) { + if (min_not_zero(*dev->dma_mask, dev->coherent_dma_mask) + > DMA_BIT_MASK(32)) { + sdev->passthrough |= DMA_MASK64; goto check; + } - if (sdomain->type == IOMMU_DOMAIN_IDENTITY) { - sdomain->type = IOMMU_DOMAIN_DMA; - set_dte_entry(sdev, sdomain); + set_dma_ops(dev, get_arch_dma_ops(dev->bus)); + return dev->dma_ops->map_sg(dev, sgl, nents, + dir, attrs); } sg_dma_address(sg) = diff --git a/drivers/iommu/sw64/sunway_iommu.h b/drivers/iommu/sw64/sunway_iommu.h index 5ad1dc7c406f1c4472a33fc2b6d3659aa4373cc9..bc9e13466f0697843f33a10227f588b067179109 100644 --- a/drivers/iommu/sw64/sunway_iommu.h +++ b/drivers/iommu/sw64/sunway_iommu.h @@ -30,7 +30,7 @@ struct sunway_iommu_dev { struct llist_node dev_data_list; /* Global device list */ u16 devid; int alias; - bool passthrough; + unsigned int passthrough; struct sunway_iommu *iommu; struct pci_dev *pdev; diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 54662b36dd6a7f1d5a04f7685deee57ae51f2544..81271fd8954fadd969264a75911d92d042dcb6f0 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -1632,7 +1632,7 @@ static int its_select_cpu(struct irq_data *d, cpu = cpumask_pick_least_loaded(d, tmpmask); } else { - cpumask_and(tmpmask, irq_data_get_affinity_mask(d), cpu_online_mask); + cpumask_copy(tmpmask, aff_mask); /* If we cannot cross sockets, limit the search to that node */ if ((its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) && diff --git a/drivers/irqchip/irq-nvic.c b/drivers/irqchip/irq-nvic.c index 21cb31ff2bbf25777d34c6b12ed86c6458724461..e903c44edb64ab474d35097daed4ebcbd44ba913 100644 --- a/drivers/irqchip/irq-nvic.c +++ b/drivers/irqchip/irq-nvic.c @@ -94,6 +94,7 @@ static int __init nvic_of_init(struct device_node *node, if (!nvic_irq_domain) { pr_warn("Failed to allocate irq domain\n"); + iounmap(nvic_base); return -ENOMEM; } @@ -103,6 +104,7 @@ static int __init nvic_of_init(struct device_node *node, if (ret) { pr_warn("Failed to allocate irq chips\n"); irq_domain_remove(nvic_irq_domain); + iounmap(nvic_base); return ret; } diff --git a/drivers/irqchip/qcom-pdc.c b/drivers/irqchip/qcom-pdc.c index 5dc63c20b67ea93730c27161015c4f7f0672b51f..fc747b7f498302e8b4daf0d2a6b01a28abddcb69 100644 --- a/drivers/irqchip/qcom-pdc.c +++ b/drivers/irqchip/qcom-pdc.c @@ -74,17 +74,18 @@ static int qcom_pdc_gic_set_irqchip_state(struct irq_data *d, static void pdc_enable_intr(struct irq_data *d, bool on) { int pin_out = d->hwirq; + unsigned long flags; u32 index, mask; u32 enable; index = pin_out / 32; mask = pin_out % 32; - raw_spin_lock(&pdc_lock); + raw_spin_lock_irqsave(&pdc_lock, flags); enable = pdc_reg_read(IRQ_ENABLE_BANK, index); enable = on ? ENABLE_INTR(enable, mask) : CLEAR_INTR(enable, mask); pdc_reg_write(IRQ_ENABLE_BANK, index, enable); - raw_spin_unlock(&pdc_lock); + raw_spin_unlock_irqrestore(&pdc_lock, flags); } static void qcom_pdc_gic_disable(struct irq_data *d) diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c index bd087cca1c1d2d0aca56d7479598d0c2f50c228b..af17459c1a5c02d847d801be97f72605c1870e51 100644 --- a/drivers/isdn/hardware/mISDN/hfcpci.c +++ b/drivers/isdn/hardware/mISDN/hfcpci.c @@ -2005,7 +2005,11 @@ setup_hw(struct hfc_pci *hc) } /* Allocate memory for FIFOS */ /* the memory needs to be on a 32k boundary within the first 4G */ - dma_set_mask(&hc->pdev->dev, 0xFFFF8000); + if (dma_set_mask(&hc->pdev->dev, 0xFFFF8000)) { + printk(KERN_WARNING + "HFC-PCI: No usable DMA configuration!\n"); + return -EIO; + } buffer = dma_alloc_coherent(&hc->pdev->dev, 0x8000, &hc->hw.dmahandle, GFP_KERNEL); /* We silently assume the address is okay if nonzero */ diff --git a/drivers/isdn/mISDN/dsp_pipeline.c b/drivers/isdn/mISDN/dsp_pipeline.c index 40588692cec74ed5861b32c193f238d320e8e24b..c3b2c99b5cd5ceaf12c9fc7dcd929840ec6b3870 100644 --- a/drivers/isdn/mISDN/dsp_pipeline.c +++ b/drivers/isdn/mISDN/dsp_pipeline.c @@ -17,9 +17,6 @@ #include "dsp.h" #include "dsp_hwec.h" -/* uncomment for debugging */ -/*#define PIPELINE_DEBUG*/ - struct dsp_pipeline_entry { struct mISDN_dsp_element *elem; void *p; @@ -104,10 +101,6 @@ int mISDN_dsp_element_register(struct mISDN_dsp_element *elem) } } -#ifdef PIPELINE_DEBUG - printk(KERN_DEBUG "%s: %s registered\n", __func__, elem->name); -#endif - return 0; err2: @@ -129,10 +122,6 @@ void mISDN_dsp_element_unregister(struct mISDN_dsp_element *elem) list_for_each_entry_safe(entry, n, &dsp_elements, list) if (entry->elem == elem) { device_unregister(&entry->dev); -#ifdef PIPELINE_DEBUG - printk(KERN_DEBUG "%s: %s unregistered\n", - __func__, elem->name); -#endif return; } printk(KERN_ERR "%s: element %s not in list.\n", __func__, elem->name); @@ -145,10 +134,6 @@ int dsp_pipeline_module_init(void) if (IS_ERR(elements_class)) return PTR_ERR(elements_class); -#ifdef PIPELINE_DEBUG - printk(KERN_DEBUG "%s: dsp pipeline module initialized\n", __func__); -#endif - dsp_hwec_init(); return 0; @@ -168,10 +153,6 @@ void dsp_pipeline_module_exit(void) __func__, entry->elem->name); kfree(entry); } - -#ifdef PIPELINE_DEBUG - printk(KERN_DEBUG "%s: dsp pipeline module exited\n", __func__); -#endif } int dsp_pipeline_init(struct dsp_pipeline *pipeline) @@ -181,10 +162,6 @@ int dsp_pipeline_init(struct dsp_pipeline *pipeline) INIT_LIST_HEAD(&pipeline->list); -#ifdef PIPELINE_DEBUG - printk(KERN_DEBUG "%s: dsp pipeline ready\n", __func__); -#endif - return 0; } @@ -210,16 +187,12 @@ void dsp_pipeline_destroy(struct dsp_pipeline *pipeline) return; _dsp_pipeline_destroy(pipeline); - -#ifdef PIPELINE_DEBUG - printk(KERN_DEBUG "%s: dsp pipeline destroyed\n", __func__); -#endif } int dsp_pipeline_build(struct dsp_pipeline *pipeline, const char *cfg) { - int incomplete = 0, found = 0; - char *dup, *tok, *name, *args; + int found = 0; + char *dup, *next, *tok, *name, *args; struct dsp_element_entry *entry, *n; struct dsp_pipeline_entry *pipeline_entry; struct mISDN_dsp_element *elem; @@ -230,10 +203,10 @@ int dsp_pipeline_build(struct dsp_pipeline *pipeline, const char *cfg) if (!list_empty(&pipeline->list)) _dsp_pipeline_destroy(pipeline); - dup = kstrdup(cfg, GFP_ATOMIC); + dup = next = kstrdup(cfg, GFP_ATOMIC); if (!dup) return 0; - while ((tok = strsep(&dup, "|"))) { + while ((tok = strsep(&next, "|"))) { if (!strlen(tok)) continue; name = strsep(&tok, "("); @@ -251,7 +224,6 @@ int dsp_pipeline_build(struct dsp_pipeline *pipeline, const char *cfg) printk(KERN_ERR "%s: failed to add " "entry to pipeline: %s (out of " "memory)\n", __func__, elem->name); - incomplete = 1; goto _out; } pipeline_entry->elem = elem; @@ -268,20 +240,12 @@ int dsp_pipeline_build(struct dsp_pipeline *pipeline, const char *cfg) if (pipeline_entry->p) { list_add_tail(&pipeline_entry-> list, &pipeline->list); -#ifdef PIPELINE_DEBUG - printk(KERN_DEBUG "%s: created " - "instance of %s%s%s\n", - __func__, name, args ? - " with args " : "", args ? - args : ""); -#endif } else { printk(KERN_ERR "%s: failed " "to add entry to pipeline: " "%s (new() returned NULL)\n", __func__, elem->name); kfree(pipeline_entry); - incomplete = 1; } } found = 1; @@ -290,11 +254,9 @@ int dsp_pipeline_build(struct dsp_pipeline *pipeline, const char *cfg) if (found) found = 0; - else { + else printk(KERN_ERR "%s: element not found, skipping: " "%s\n", __func__, name); - incomplete = 1; - } } _out: @@ -303,10 +265,6 @@ int dsp_pipeline_build(struct dsp_pipeline *pipeline, const char *cfg) else pipeline->inuse = 0; -#ifdef PIPELINE_DEBUG - printk(KERN_DEBUG "%s: dsp pipeline built%s: %s\n", - __func__, incomplete ? " incomplete" : "", cfg); -#endif kfree(dup); return 0; } diff --git a/drivers/mailbox/imx-mailbox.c b/drivers/mailbox/imx-mailbox.c index 2543c7b6948b6d9ce701eccdd0e6430ef629a869..c5663398c6b7d7ec49b0314d2f564e23d62dd900 100644 --- a/drivers/mailbox/imx-mailbox.c +++ b/drivers/mailbox/imx-mailbox.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #define IMX_MU_xSR_GIPn(x) BIT(28 + (3 - (x))) @@ -66,6 +67,7 @@ struct imx_mu_priv { const struct imx_mu_dcfg *dcfg; struct clk *clk; int irq; + bool suspend; u32 xcr; @@ -277,6 +279,9 @@ static irqreturn_t imx_mu_isr(int irq, void *p) return IRQ_NONE; } + if (priv->suspend) + pm_system_wakeup(); + return IRQ_HANDLED; } @@ -326,6 +331,8 @@ static int imx_mu_startup(struct mbox_chan *chan) break; } + priv->suspend = true; + return 0; } @@ -543,6 +550,8 @@ static int imx_mu_probe(struct platform_device *pdev) clk_disable_unprepare(priv->clk); + priv->suspend = false; + return 0; disable_runtime_pm: diff --git a/drivers/mailbox/tegra-hsp.c b/drivers/mailbox/tegra-hsp.c index e07091d71986a5f1ca30dfdfd38c785356e6647b..4895d8074002220ef7baa5dc1c0e6bd628cdf6db 100644 --- a/drivers/mailbox/tegra-hsp.c +++ b/drivers/mailbox/tegra-hsp.c @@ -410,6 +410,11 @@ static int tegra_hsp_mailbox_flush(struct mbox_chan *chan, value = tegra_hsp_channel_readl(ch, HSP_SM_SHRD_MBOX); if ((value & HSP_SM_SHRD_MBOX_FULL) == 0) { mbox_chan_txdone(chan, 0); + + /* Wait until channel is empty */ + if (chan->active_req != NULL) + continue; + return 0; } diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c index 8c371d5eef8eb96becfc9da08a32545552591bae..097577ae3c47177a6ec0706e106aa14e5a0b66e2 100644 --- a/drivers/md/bcache/alloc.c +++ b/drivers/md/bcache/alloc.c @@ -482,8 +482,7 @@ void bch_bucket_free(struct cache_set *c, struct bkey *k) unsigned int i; for (i = 0; i < KEY_PTRS(k); i++) - __bch_bucket_free(PTR_CACHE(c, k, i), - PTR_BUCKET(c, k, i)); + __bch_bucket_free(c->cache, PTR_BUCKET(c, k, i)); } int __bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve, @@ -674,7 +673,7 @@ bool bch_alloc_sectors(struct cache_set *c, SET_PTR_OFFSET(&b->key, i, PTR_OFFSET(&b->key, i) + sectors); atomic_long_add(sectors, - &PTR_CACHE(c, &b->key, i)->sectors_written); + &c->cache->sectors_written); } if (b->sectors_free < c->cache->sb.block_size) diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h index e8bf4f752e8beebcad27756fb533b7550ba53686..0563a40812fa5a35208b74bacf4d62ce0dc14998 100644 --- a/drivers/md/bcache/bcache.h +++ b/drivers/md/bcache/bcache.h @@ -178,7 +178,6 @@ #define pr_fmt(fmt) "bcache: %s() " fmt, __func__ -#include #include #include #include @@ -190,6 +189,7 @@ #include #include +#include "bcache_ondisk.h" #include "bset.h" #include "util.h" #include "closure.h" @@ -364,7 +364,6 @@ struct cached_dev { /* The rest of this all shows up in sysfs */ unsigned int sequential_cutoff; - unsigned int readahead; unsigned int io_disable:1; unsigned int verify:1; @@ -373,6 +372,7 @@ struct cached_dev { unsigned int partial_stripes_expensive:1; unsigned int writeback_metadata:1; unsigned int writeback_running:1; + unsigned int writeback_consider_fragment:1; unsigned char writeback_percent; unsigned int writeback_delay; @@ -385,6 +385,9 @@ struct cached_dev { unsigned int writeback_rate_update_seconds; unsigned int writeback_rate_i_term_inverse; unsigned int writeback_rate_p_term_inverse; + unsigned int writeback_rate_fp_term_low; + unsigned int writeback_rate_fp_term_mid; + unsigned int writeback_rate_fp_term_high; unsigned int writeback_rate_minimum; enum stop_on_failure stop_when_cache_set_failed; @@ -393,6 +396,13 @@ struct cached_dev { unsigned int error_limit; unsigned int offline_seconds; + /* + * Retry to update writeback_rate if contention happens for + * down_read(dc->writeback_lock) in update_writeback_rate() + */ +#define BCH_WBRATE_UPDATE_MAX_SKIPS 15 + unsigned int rate_update_retry; + char backing_dev_name[BDEVNAME_SIZE]; }; @@ -800,13 +810,6 @@ static inline sector_t bucket_remainder(struct cache_set *c, sector_t s) return s & (c->cache->sb.bucket_size - 1); } -static inline struct cache *PTR_CACHE(struct cache_set *c, - const struct bkey *k, - unsigned int ptr) -{ - return c->cache; -} - static inline size_t PTR_BUCKET_NR(struct cache_set *c, const struct bkey *k, unsigned int ptr) @@ -818,7 +821,7 @@ static inline struct bucket *PTR_BUCKET(struct cache_set *c, const struct bkey *k, unsigned int ptr) { - return PTR_CACHE(c, k, ptr)->buckets + PTR_BUCKET_NR(c, k, ptr); + return c->cache->buckets + PTR_BUCKET_NR(c, k, ptr); } static inline uint8_t gen_after(uint8_t a, uint8_t b) @@ -837,7 +840,7 @@ static inline uint8_t ptr_stale(struct cache_set *c, const struct bkey *k, static inline bool ptr_available(struct cache_set *c, const struct bkey *k, unsigned int i) { - return (PTR_DEV(k, i) < MAX_CACHES_PER_SET) && PTR_CACHE(c, k, i); + return (PTR_DEV(k, i) < MAX_CACHES_PER_SET) && c->cache; } /* Btree key macros */ diff --git a/include/uapi/linux/bcache.h b/drivers/md/bcache/bcache_ondisk.h similarity index 100% rename from include/uapi/linux/bcache.h rename to drivers/md/bcache/bcache_ondisk.h diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c index 67a2c47f4201ae03fd9d41febeb507ea623a351d..94d38e8a59b323aa576cc1b0f6ed6ef95ebef49d 100644 --- a/drivers/md/bcache/bset.c +++ b/drivers/md/bcache/bset.c @@ -712,8 +712,10 @@ void bch_bset_build_written_tree(struct btree_keys *b) for (j = inorder_next(0, t->size); j; j = inorder_next(j, t->size)) { - while (bkey_to_cacheline(t, k) < cacheline) - prev = k, k = bkey_next(k); + while (bkey_to_cacheline(t, k) < cacheline) { + prev = k; + k = bkey_next(k); + } t->prev[j] = bkey_u64s(prev); t->tree[j].m = bkey_to_cacheline_offset(t, cacheline++, k); @@ -901,8 +903,10 @@ unsigned int bch_btree_insert_key(struct btree_keys *b, struct bkey *k, status = BTREE_INSERT_STATUS_INSERT; while (m != bset_bkey_last(i) && - bkey_cmp(k, b->ops->is_extents ? &START_KEY(m) : m) > 0) - prev = m, m = bkey_next(m); + bkey_cmp(k, b->ops->is_extents ? &START_KEY(m) : m) > 0) { + prev = m; + m = bkey_next(m); + } /* prev is in the tree, if we merge we're done */ status = BTREE_INSERT_STATUS_BACK_MERGE; diff --git a/drivers/md/bcache/bset.h b/drivers/md/bcache/bset.h index a50dcfda656f5f9426eb66ac6620348aa23e2a0d..d795c84246b0184b60d98d01f798cdf36876d99b 100644 --- a/drivers/md/bcache/bset.h +++ b/drivers/md/bcache/bset.h @@ -2,10 +2,10 @@ #ifndef _BCACHE_BSET_H #define _BCACHE_BSET_H -#include #include #include +#include "bcache_ondisk.h" #include "util.h" /* for time_stats */ /* diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index fe6dce125aba226e5f7c05cad39c78b38829389d..98daa9d200f79a92603e420a5438604b91c5df69 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c @@ -426,7 +426,7 @@ void __bch_btree_node_write(struct btree *b, struct closure *parent) do_btree_node_write(b); atomic_long_add(set_blocks(i, block_bytes(b->c->cache)) * b->c->cache->sb.block_size, - &PTR_CACHE(b->c, &b->key, 0)->btree_sectors_written); + &b->c->cache->btree_sectors_written); b->written += set_blocks(i, block_bytes(b->c->cache)); } @@ -1161,7 +1161,7 @@ static void make_btree_freeing_key(struct btree *b, struct bkey *k) for (i = 0; i < KEY_PTRS(k); i++) SET_PTR_GEN(k, i, - bch_inc_gen(PTR_CACHE(b->c, &b->key, i), + bch_inc_gen(b->c->cache, PTR_BUCKET(b->c, &b->key, i))); mutex_unlock(&b->c->bucket_lock); @@ -2006,8 +2006,7 @@ int bch_btree_check(struct cache_set *c) int i; struct bkey *k = NULL; struct btree_iter iter; - struct btree_check_state *check_state; - char name[32]; + struct btree_check_state check_state; /* check and mark root node keys */ for_each_key_filter(&c->root->keys, k, &iter, bch_ptr_invalid) @@ -2018,61 +2017,59 @@ int bch_btree_check(struct cache_set *c) if (c->root->level == 0) return 0; - check_state = kzalloc(sizeof(struct btree_check_state), GFP_KERNEL); - if (!check_state) - return -ENOMEM; - - check_state->c = c; - check_state->total_threads = bch_btree_chkthread_nr(); - check_state->key_idx = 0; - spin_lock_init(&check_state->idx_lock); - atomic_set(&check_state->started, 0); - atomic_set(&check_state->enough, 0); - init_waitqueue_head(&check_state->wait); + memset(&check_state, 0, sizeof(struct btree_check_state)); + check_state.c = c; + check_state.total_threads = bch_btree_chkthread_nr(); + check_state.key_idx = 0; + spin_lock_init(&check_state.idx_lock); + atomic_set(&check_state.started, 0); + atomic_set(&check_state.enough, 0); + init_waitqueue_head(&check_state.wait); + rw_lock(0, c->root, c->root->level); /* * Run multiple threads to check btree nodes in parallel, - * if check_state->enough is non-zero, it means current + * if check_state.enough is non-zero, it means current * running check threads are enough, unncessary to create * more. */ - for (i = 0; i < check_state->total_threads; i++) { - /* fetch latest check_state->enough earlier */ + for (i = 0; i < check_state.total_threads; i++) { + /* fetch latest check_state.enough earlier */ smp_mb__before_atomic(); - if (atomic_read(&check_state->enough)) + if (atomic_read(&check_state.enough)) break; - check_state->infos[i].result = 0; - check_state->infos[i].state = check_state; - snprintf(name, sizeof(name), "bch_btrchk[%u]", i); - atomic_inc(&check_state->started); + check_state.infos[i].result = 0; + check_state.infos[i].state = &check_state; - check_state->infos[i].thread = + check_state.infos[i].thread = kthread_run(bch_btree_check_thread, - &check_state->infos[i], - name); - if (IS_ERR(check_state->infos[i].thread)) { + &check_state.infos[i], + "bch_btrchk[%d]", i); + if (IS_ERR(check_state.infos[i].thread)) { pr_err("fails to run thread bch_btrchk[%d]\n", i); for (--i; i >= 0; i--) - kthread_stop(check_state->infos[i].thread); + kthread_stop(check_state.infos[i].thread); ret = -ENOMEM; goto out; } + atomic_inc(&check_state.started); } - wait_event_interruptible(check_state->wait, - atomic_read(&check_state->started) == 0 || - test_bit(CACHE_SET_IO_DISABLE, &c->flags)); + /* + * Must wait for all threads to stop. + */ + wait_event(check_state.wait, atomic_read(&check_state.started) == 0); - for (i = 0; i < check_state->total_threads; i++) { - if (check_state->infos[i].result) { - ret = check_state->infos[i].result; + for (i = 0; i < check_state.total_threads; i++) { + if (check_state.infos[i].result) { + ret = check_state.infos[i].result; goto out; } } out: - kfree(check_state); + rw_unlock(0, c->root); return ret; } diff --git a/drivers/md/bcache/btree.h b/drivers/md/bcache/btree.h index 50482107134f12745066dfab31750693c6bba04a..1b5fdbc0d83eba863d4915ee47b3a8c27643694e 100644 --- a/drivers/md/bcache/btree.h +++ b/drivers/md/bcache/btree.h @@ -226,7 +226,7 @@ struct btree_check_info { int result; }; -#define BCH_BTR_CHKTHREAD_MAX 64 +#define BCH_BTR_CHKTHREAD_MAX 12 struct btree_check_state { struct cache_set *c; int total_threads; diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c index b00fd08d696b5f187db535ef9f97fb3e9bee6802..45e7d54a40ff72b2abc1a43ce0818adb5ec0e5da 100644 --- a/drivers/md/bcache/debug.c +++ b/drivers/md/bcache/debug.c @@ -50,7 +50,7 @@ void bch_btree_verify(struct btree *b) v->keys.ops = b->keys.ops; bio = bch_bbio_alloc(b->c); - bio_set_dev(bio, PTR_CACHE(b->c, &b->key, 0)->bdev); + bio_set_dev(bio, b->c->cache->bdev); bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0); bio->bi_iter.bi_size = KEY_SIZE(&v->key) << 9; bio->bi_opf = REQ_OP_READ | REQ_META; diff --git a/drivers/md/bcache/extents.c b/drivers/md/bcache/extents.c index f4658a1f37b862efc543c514da21958598d3ec11..d626ffcbecb99c040e3a5ee05e4ba67b525a2847 100644 --- a/drivers/md/bcache/extents.c +++ b/drivers/md/bcache/extents.c @@ -50,7 +50,7 @@ static bool __ptr_invalid(struct cache_set *c, const struct bkey *k) for (i = 0; i < KEY_PTRS(k); i++) if (ptr_available(c, k, i)) { - struct cache *ca = PTR_CACHE(c, k, i); + struct cache *ca = c->cache; size_t bucket = PTR_BUCKET_NR(c, k, i); size_t r = bucket_remainder(c, PTR_OFFSET(k, i)); @@ -71,7 +71,7 @@ static const char *bch_ptr_status(struct cache_set *c, const struct bkey *k) for (i = 0; i < KEY_PTRS(k); i++) if (ptr_available(c, k, i)) { - struct cache *ca = PTR_CACHE(c, k, i); + struct cache *ca = c->cache; size_t bucket = PTR_BUCKET_NR(c, k, i); size_t r = bucket_remainder(c, PTR_OFFSET(k, i)); diff --git a/drivers/md/bcache/features.c b/drivers/md/bcache/features.c index d636b7b2d070c49608aeb910096edfb32bcb4469..634922c5601db7d9f836ea1a49568db1b9876861 100644 --- a/drivers/md/bcache/features.c +++ b/drivers/md/bcache/features.c @@ -6,7 +6,7 @@ * Copyright 2020 Coly Li * */ -#include +#include "bcache_ondisk.h" #include "bcache.h" #include "features.h" @@ -19,7 +19,7 @@ struct feature { static struct feature feature_list[] = { {BCH_FEATURE_INCOMPAT, BCH_FEATURE_INCOMPAT_LOG_LARGE_BUCKET_SIZE, "large_bucket"}, - {0, 0, 0 }, + {0, 0, NULL }, }; #define compose_feature_string(type) \ diff --git a/drivers/md/bcache/features.h b/drivers/md/bcache/features.h index d1c8fd3977fc64461215673c18e67ef9f937d03d..09161b89c63edf993b2a8ac815590fb414b92ef1 100644 --- a/drivers/md/bcache/features.h +++ b/drivers/md/bcache/features.h @@ -2,10 +2,11 @@ #ifndef _BCACHE_FEATURES_H #define _BCACHE_FEATURES_H -#include #include #include +#include "bcache_ondisk.h" + #define BCH_FEATURE_COMPAT 0 #define BCH_FEATURE_RO_COMPAT 1 #define BCH_FEATURE_INCOMPAT 2 diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c index dad71a6b78891c149c428d9ebc1758c5b9c357de..e4388fe3ab7ef96ead612d775ce64270ec9ea318 100644 --- a/drivers/md/bcache/io.c +++ b/drivers/md/bcache/io.c @@ -36,7 +36,7 @@ void __bch_submit_bbio(struct bio *bio, struct cache_set *c) struct bbio *b = container_of(bio, struct bbio, bio); bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0); - bio_set_dev(bio, PTR_CACHE(c, &b->key, 0)->bdev); + bio_set_dev(bio, c->cache->bdev); b->submit_time_us = local_clock_us(); closure_bio_submit(c, bio, bio->bi_private); @@ -137,7 +137,7 @@ void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio, blk_status_t error, const char *m) { struct bbio *b = container_of(bio, struct bbio, bio); - struct cache *ca = PTR_CACHE(c, &b->key, 0); + struct cache *ca = c->cache; int is_read = (bio_data_dir(bio) == READ ? 1 : 0); unsigned int threshold = op_is_write(bio_op(bio)) diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c index c6613e817333765aa240053f381a374a7d8f1db8..346a92c43858224dc6ce7ed9216936fe8e915333 100644 --- a/drivers/md/bcache/journal.c +++ b/drivers/md/bcache/journal.c @@ -111,7 +111,7 @@ reread: left = ca->sb.bucket_size - offset; * Check from the oldest jset for last_seq. If * i->j.seq < j->last_seq, it means the oldest jset * in list is expired and useless, remove it from - * this list. Otherwise, j is a condidate jset for + * this list. Otherwise, j is a candidate jset for * further following checks. */ while (!list_empty(list)) { @@ -407,6 +407,11 @@ int bch_journal_replay(struct cache_set *s, struct list_head *list) return ret; } +void bch_journal_space_reserve(struct journal *j) +{ + j->do_reserve = true; +} + /* Journalling */ static void btree_flush_write(struct cache_set *c) @@ -498,7 +503,7 @@ static void btree_flush_write(struct cache_set *c) * - If there are matched nodes recorded in btree_nodes[], * they are clean now (this is why and how the oldest * journal entry can be reclaimed). These selected nodes - * will be ignored and skipped in the folowing for-loop. + * will be ignored and skipped in the following for-loop. */ if (((btree_current_write(b)->journal - fifo_front_p) & mask) != 0) { @@ -625,12 +630,30 @@ static void do_journal_discard(struct cache *ca) } } +static unsigned int free_journal_buckets(struct cache_set *c) +{ + struct journal *j = &c->journal; + struct cache *ca = c->cache; + struct journal_device *ja = &c->cache->journal; + unsigned int n; + + /* In case njournal_buckets is not power of 2 */ + if (ja->cur_idx >= ja->discard_idx) + n = ca->sb.njournal_buckets + ja->discard_idx - ja->cur_idx; + else + n = ja->discard_idx - ja->cur_idx; + + if (n > (1 + j->do_reserve)) + return n - (1 + j->do_reserve); + + return 0; +} + static void journal_reclaim(struct cache_set *c) { struct bkey *k = &c->journal.key; struct cache *ca = c->cache; uint64_t last_seq; - unsigned int next; struct journal_device *ja = &ca->journal; atomic_t p __maybe_unused; @@ -653,12 +676,10 @@ static void journal_reclaim(struct cache_set *c) if (c->journal.blocks_free) goto out; - next = (ja->cur_idx + 1) % ca->sb.njournal_buckets; - /* No space available on this device */ - if (next == ja->discard_idx) + if (!free_journal_buckets(c)) goto out; - ja->cur_idx = next; + ja->cur_idx = (ja->cur_idx + 1) % ca->sb.njournal_buckets; k->ptr[0] = MAKE_PTR(0, bucket_to_sector(c, ca->sb.d[ja->cur_idx]), ca->sb.nr_this_dev); @@ -768,7 +789,7 @@ static void journal_write_unlocked(struct closure *cl) w->data->csum = csum_set(w->data); for (i = 0; i < KEY_PTRS(k); i++) { - ca = PTR_CACHE(c, k, i); + ca = c->cache; bio = &ca->journal.bio; atomic_long_add(sectors, &ca->meta_sectors_written); diff --git a/drivers/md/bcache/journal.h b/drivers/md/bcache/journal.h index f2ea34d5f431ba860904d074fca567e772e9daa9..cd316b4a1e95f5b2e5ee260599e44d35fe25a805 100644 --- a/drivers/md/bcache/journal.h +++ b/drivers/md/bcache/journal.h @@ -105,6 +105,7 @@ struct journal { spinlock_t lock; spinlock_t flush_write_lock; bool btree_flushing; + bool do_reserve; /* used when waiting because the journal was full */ struct closure_waitlist wait; struct closure io; @@ -182,5 +183,6 @@ int bch_journal_replay(struct cache_set *c, struct list_head *list); void bch_journal_free(struct cache_set *c); int bch_journal_alloc(struct cache_set *c); +void bch_journal_space_reserve(struct journal *j); #endif /* _BCACHE_JOURNAL_H */ diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index 2143263831456217af9cd1676dda4a79722c8d7e..c1a1bd7aa9ec47e2a0f6241356240a5ae132de85 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c @@ -878,9 +878,9 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s, struct bio *bio, unsigned int sectors) { int ret = MAP_CONTINUE; - unsigned int reada = 0; struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); struct bio *miss, *cache_bio; + unsigned int size_limit; s->cache_missed = 1; @@ -890,13 +890,10 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s, goto out_submit; } - if (!(bio->bi_opf & REQ_RAHEAD) && - !(bio->bi_opf & (REQ_META|REQ_PRIO)) && - s->iop.c->gc_stats.in_use < CUTOFF_CACHE_READA) - reada = min_t(sector_t, dc->readahead >> 9, - get_capacity(bio->bi_disk) - bio_end_sector(bio)); - - s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada); + /* Limitation for valid replace key size and cache_bio bvecs number */ + size_limit = min_t(unsigned int, BIO_MAX_PAGES * PAGE_SECTORS, + (1 << KEY_SIZE_BITS) - 1); + s->insert_bio_sectors = min3(size_limit, sectors, bio_sectors(bio)); s->iop.replace_key = KEY(s->iop.inode, bio->bi_iter.bi_sector + s->insert_bio_sectors, @@ -908,7 +905,8 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s, s->iop.replace = true; - miss = bio_next_split(bio, sectors, GFP_NOIO, &s->d->bio_split); + miss = bio_next_split(bio, s->insert_bio_sectors, GFP_NOIO, + &s->d->bio_split); /* btree_search_recurse()'s btree iterator is no good anymore */ ret = miss == bio ? MAP_DONE : -EINTR; @@ -930,9 +928,6 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s, if (bch_bio_alloc_pages(cache_bio, __GFP_NOWARN|GFP_NOIO)) goto out_put; - if (reada) - bch_mark_cache_readahead(s->iop.c, s->d); - s->cache_miss = miss; s->iop.bio = cache_bio; bio_get(cache_bio); @@ -1109,6 +1104,12 @@ static void detached_dev_do_request(struct bcache_device *d, struct bio *bio) * which would call closure_get(&dc->disk.cl) */ ddip = kzalloc(sizeof(struct detached_dev_io_private), GFP_NOIO); + if (!ddip) { + bio->bi_status = BLK_STS_RESOURCE; + bio->bi_end_io(bio); + return; + } + ddip->d = d; /* Count on the bcache device */ ddip->start_time = part_start_io_acct(d->disk, &ddip->part, bio); diff --git a/drivers/md/bcache/stats.c b/drivers/md/bcache/stats.c index 503aafe188dce4b6b1ca4a9361ec0a3f637967f6..68b02216033d039b76be143ced490f65f9dc6565 100644 --- a/drivers/md/bcache/stats.c +++ b/drivers/md/bcache/stats.c @@ -46,7 +46,6 @@ read_attribute(cache_misses); read_attribute(cache_bypass_hits); read_attribute(cache_bypass_misses); read_attribute(cache_hit_ratio); -read_attribute(cache_readaheads); read_attribute(cache_miss_collisions); read_attribute(bypassed); @@ -64,7 +63,6 @@ SHOW(bch_stats) DIV_SAFE(var(cache_hits) * 100, var(cache_hits) + var(cache_misses))); - var_print(cache_readaheads); var_print(cache_miss_collisions); sysfs_hprint(bypassed, var(sectors_bypassed) << 9); #undef var @@ -80,17 +78,17 @@ static void bch_stats_release(struct kobject *k) { } -static struct attribute *bch_stats_files[] = { +static struct attribute *bch_stats_attrs[] = { &sysfs_cache_hits, &sysfs_cache_misses, &sysfs_cache_bypass_hits, &sysfs_cache_bypass_misses, &sysfs_cache_hit_ratio, - &sysfs_cache_readaheads, &sysfs_cache_miss_collisions, &sysfs_bypassed, NULL }; +ATTRIBUTE_GROUPS(bch_stats); static KTYPE(bch_stats); int bch_cache_accounting_add_kobjs(struct cache_accounting *acc, @@ -113,7 +111,6 @@ void bch_cache_accounting_clear(struct cache_accounting *acc) acc->total.cache_misses = 0; acc->total.cache_bypass_hits = 0; acc->total.cache_bypass_misses = 0; - acc->total.cache_readaheads = 0; acc->total.cache_miss_collisions = 0; acc->total.sectors_bypassed = 0; } @@ -145,7 +142,6 @@ static void scale_stats(struct cache_stats *stats, unsigned long rescale_at) scale_stat(&stats->cache_misses); scale_stat(&stats->cache_bypass_hits); scale_stat(&stats->cache_bypass_misses); - scale_stat(&stats->cache_readaheads); scale_stat(&stats->cache_miss_collisions); scale_stat(&stats->sectors_bypassed); } @@ -168,7 +164,6 @@ static void scale_accounting(struct timer_list *t) move_stat(cache_misses); move_stat(cache_bypass_hits); move_stat(cache_bypass_misses); - move_stat(cache_readaheads); move_stat(cache_miss_collisions); move_stat(sectors_bypassed); @@ -209,14 +204,6 @@ void bch_mark_cache_accounting(struct cache_set *c, struct bcache_device *d, mark_cache_stats(&c->accounting.collector, hit, bypass); } -void bch_mark_cache_readahead(struct cache_set *c, struct bcache_device *d) -{ - struct cached_dev *dc = container_of(d, struct cached_dev, disk); - - atomic_inc(&dc->accounting.collector.cache_readaheads); - atomic_inc(&c->accounting.collector.cache_readaheads); -} - void bch_mark_cache_miss_collision(struct cache_set *c, struct bcache_device *d) { struct cached_dev *dc = container_of(d, struct cached_dev, disk); diff --git a/drivers/md/bcache/stats.h b/drivers/md/bcache/stats.h index abfaabf7e7fcf8fee33c8370074551bf5fc91768..ca4f435f7216a7231cf9e1dd4ee8cae56ea93841 100644 --- a/drivers/md/bcache/stats.h +++ b/drivers/md/bcache/stats.h @@ -7,7 +7,6 @@ struct cache_stat_collector { atomic_t cache_misses; atomic_t cache_bypass_hits; atomic_t cache_bypass_misses; - atomic_t cache_readaheads; atomic_t cache_miss_collisions; atomic_t sectors_bypassed; }; diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 81f1cc5b34999578b06020875cb2cca30f12e3fb..b5601f200c090ca32a5819c2f8fd74e9e3e67798 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -1058,6 +1058,7 @@ static int cached_dev_status_update(void *arg) int bch_cached_dev_run(struct cached_dev *dc) { + int ret = 0; struct bcache_device *d = &dc->disk; char *buf = kmemdup_nul(dc->sb.label, SB_LABEL_SIZE, GFP_KERNEL); char *env[] = { @@ -1070,19 +1071,15 @@ int bch_cached_dev_run(struct cached_dev *dc) if (dc->io_disable) { pr_err("I/O disabled on cached dev %s\n", dc->backing_dev_name); - kfree(env[1]); - kfree(env[2]); - kfree(buf); - return -EIO; + ret = -EIO; + goto out; } if (atomic_xchg(&dc->running, 1)) { - kfree(env[1]); - kfree(env[2]); - kfree(buf); pr_info("cached dev %s is running already\n", dc->backing_dev_name); - return -EBUSY; + ret = -EBUSY; + goto out; } if (!d->c && @@ -1103,15 +1100,13 @@ int bch_cached_dev_run(struct cached_dev *dc) * only class / kset properties are persistent */ kobject_uevent_env(&disk_to_dev(d->disk)->kobj, KOBJ_CHANGE, env); - kfree(env[1]); - kfree(env[2]); - kfree(buf); if (sysfs_create_link(&d->kobj, &disk_to_dev(d->disk)->kobj, "dev") || sysfs_create_link(&disk_to_dev(d->disk)->kobj, &d->kobj, "bcache")) { pr_err("Couldn't create bcache dev <-> disk sysfs symlinks\n"); - return -ENOMEM; + ret = -ENOMEM; + goto out; } dc->status_update_thread = kthread_run(cached_dev_status_update, @@ -1120,7 +1115,11 @@ int bch_cached_dev_run(struct cached_dev *dc) pr_warn("failed to create bcache_status_update kthread, continue to run without monitoring backing device status\n"); } - return 0; +out: + kfree(env[1]); + kfree(env[2]); + kfree(buf); + return ret; } /* @@ -1151,9 +1150,7 @@ static void cancel_writeback_rate_update_dwork(struct cached_dev *dc) static void cached_dev_detach_finish(struct work_struct *w) { struct cached_dev *dc = container_of(w, struct cached_dev, detach); - struct closure cl; - - closure_init_stack(&cl); + struct cache_set *c = dc->disk.c; BUG_ON(!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)); BUG_ON(refcount_read(&dc->count)); @@ -1167,17 +1164,11 @@ static void cached_dev_detach_finish(struct work_struct *w) dc->writeback_thread = NULL; } - memset(&dc->sb.set_uuid, 0, 16); - SET_BDEV_STATE(&dc->sb, BDEV_STATE_NONE); - - bch_write_bdev_super(dc, &cl); - closure_sync(&cl); - mutex_lock(&bch_register_lock); - calc_cached_dev_sectors(dc->disk.c); bcache_device_detach(&dc->disk); list_move(&dc->list, &uncached_devices); + calc_cached_dev_sectors(c); clear_bit(BCACHE_DEV_DETACHING, &dc->disk.flags); clear_bit(BCACHE_DEV_UNLINK_DONE, &dc->disk.flags); @@ -1956,7 +1947,7 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb) goto err; if (bioset_init(&c->bio_split, 4, offsetof(struct bbio, bio), - BIOSET_NEED_BVECS|BIOSET_NEED_RESCUER)) + BIOSET_NEED_RESCUER)) goto err; c->uuids = alloc_meta_bucket_pages(GFP_KERNEL, sb); @@ -2150,6 +2141,7 @@ static int run_cache_set(struct cache_set *c) flash_devs_run(c); + bch_journal_space_reserve(&c->journal); set_bit(CACHE_SET_RUNNING, &c->flags); return 0; err: @@ -2536,7 +2528,7 @@ static void register_cache_worker(struct work_struct *work) module_put(THIS_MODULE); } -static void register_device_aync(struct async_reg_args *args) +static void register_device_async(struct async_reg_args *args) { if (SB_IS_BDEV(args->sb)) INIT_DELAYED_WORK(&args->reg_work, register_bdev_worker); @@ -2597,8 +2589,6 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, else err = "device busy"; mutex_unlock(&bch_register_lock); - if (!IS_ERR(bdev)) - bdput(bdev); if (attr == &ksysfs_register_quiet) goto done; } @@ -2630,7 +2620,7 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, args->sb = sb; args->sb_disk = sb_disk; args->bdev = bdev; - register_device_aync(args); + register_device_async(args); /* No wait and returns to user space */ goto async_done; } @@ -2638,8 +2628,11 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, if (SB_IS_BDEV(sb)) { struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL); - if (!dc) + if (!dc) { + ret = -ENOMEM; + err = "cannot allocate memory"; goto out_put_sb_page; + } mutex_lock(&bch_register_lock); ret = register_bdev(sb, sb_disk, bdev, dc); @@ -2650,11 +2643,15 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, } else { struct cache *ca = kzalloc(sizeof(*ca), GFP_KERNEL); - if (!ca) + if (!ca) { + ret = -ENOMEM; + err = "cannot allocate memory"; goto out_put_sb_page; + } /* blkdev_put() will be called in bch_cache_release() */ - if (register_cache(sb, sb_disk, bdev, ca) != 0) + ret = register_cache(sb, sb_disk, bdev, ca); + if (ret) goto out_free_sb; } @@ -2708,8 +2705,8 @@ static ssize_t bch_pending_bdevs_cleanup(struct kobject *k, } list_for_each_entry_safe(pdev, tpdev, &pending_devs, list) { + char *pdev_set_uuid = pdev->dc->sb.set_uuid; list_for_each_entry_safe(c, tc, &bch_cache_sets, list) { - char *pdev_set_uuid = pdev->dc->sb.set_uuid; char *set_uuid = c->set_uuid; if (!memcmp(pdev_set_uuid, set_uuid, 16)) { @@ -2771,7 +2768,7 @@ static int bcache_reboot(struct notifier_block *n, unsigned long code, void *x) * The reason bch_register_lock is not held to call * bch_cache_set_stop() and bcache_device_stop() is to * avoid potential deadlock during reboot, because cache - * set or bcache device stopping process will acqurie + * set or bcache device stopping process will acquire * bch_register_lock too. * * We are safe here because bcache_is_reboot sets to diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c index 554e3afc9b688be1b3415d7fb1fdf407f5c192b0..8467e37411a7c65a41d0abd6f0f476a0b39a3b46 100644 --- a/drivers/md/bcache/sysfs.c +++ b/drivers/md/bcache/sysfs.c @@ -117,10 +117,14 @@ rw_attribute(writeback_running); rw_attribute(writeback_percent); rw_attribute(writeback_delay); rw_attribute(writeback_rate); +rw_attribute(writeback_consider_fragment); rw_attribute(writeback_rate_update_seconds); rw_attribute(writeback_rate_i_term_inverse); rw_attribute(writeback_rate_p_term_inverse); +rw_attribute(writeback_rate_fp_term_low); +rw_attribute(writeback_rate_fp_term_mid); +rw_attribute(writeback_rate_fp_term_high); rw_attribute(writeback_rate_minimum); read_attribute(writeback_rate_debug); @@ -133,7 +137,6 @@ rw_attribute(io_disable); rw_attribute(discard); rw_attribute(running); rw_attribute(label); -rw_attribute(readahead); rw_attribute(errors); rw_attribute(io_error_limit); rw_attribute(io_error_halflife); @@ -195,6 +198,7 @@ SHOW(__bch_cached_dev) var_printf(bypass_torture_test, "%i"); var_printf(writeback_metadata, "%i"); var_printf(writeback_running, "%i"); + var_printf(writeback_consider_fragment, "%i"); var_print(writeback_delay); var_print(writeback_percent); sysfs_hprint(writeback_rate, @@ -205,6 +209,9 @@ SHOW(__bch_cached_dev) var_print(writeback_rate_update_seconds); var_print(writeback_rate_i_term_inverse); var_print(writeback_rate_p_term_inverse); + var_print(writeback_rate_fp_term_low); + var_print(writeback_rate_fp_term_mid); + var_print(writeback_rate_fp_term_high); var_print(writeback_rate_minimum); if (attr == &sysfs_writeback_rate_debug) { @@ -252,7 +259,6 @@ SHOW(__bch_cached_dev) var_printf(partial_stripes_expensive, "%u"); var_hprint(sequential_cutoff); - var_hprint(readahead); sysfs_print(running, atomic_read(&dc->running)); sysfs_print(state, states[BDEV_STATE(&dc->sb)]); @@ -303,6 +309,7 @@ STORE(__cached_dev) sysfs_strtoul_bool(bypass_torture_test, dc->bypass_torture_test); sysfs_strtoul_bool(writeback_metadata, dc->writeback_metadata); sysfs_strtoul_bool(writeback_running, dc->writeback_running); + sysfs_strtoul_bool(writeback_consider_fragment, dc->writeback_consider_fragment); sysfs_strtoul_clamp(writeback_delay, dc->writeback_delay, 0, UINT_MAX); sysfs_strtoul_clamp(writeback_percent, dc->writeback_percent, @@ -331,6 +338,16 @@ STORE(__cached_dev) sysfs_strtoul_clamp(writeback_rate_p_term_inverse, dc->writeback_rate_p_term_inverse, 1, UINT_MAX); + sysfs_strtoul_clamp(writeback_rate_fp_term_low, + dc->writeback_rate_fp_term_low, + 1, dc->writeback_rate_fp_term_mid - 1); + sysfs_strtoul_clamp(writeback_rate_fp_term_mid, + dc->writeback_rate_fp_term_mid, + dc->writeback_rate_fp_term_low + 1, + dc->writeback_rate_fp_term_high - 1); + sysfs_strtoul_clamp(writeback_rate_fp_term_high, + dc->writeback_rate_fp_term_high, + dc->writeback_rate_fp_term_mid + 1, UINT_MAX); sysfs_strtoul_clamp(writeback_rate_minimum, dc->writeback_rate_minimum, 1, UINT_MAX); @@ -346,7 +363,6 @@ STORE(__cached_dev) sysfs_strtoul_clamp(sequential_cutoff, dc->sequential_cutoff, 0, UINT_MAX); - d_strtoi_h(readahead); if (attr == &sysfs_clear_stats) bch_cache_accounting_clear(&dc->accounting); @@ -404,7 +420,7 @@ STORE(__cached_dev) if (!env) return -ENOMEM; add_uevent_var(env, "DRIVER=bcache"); - add_uevent_var(env, "CACHED_UUID=%pU", dc->sb.uuid), + add_uevent_var(env, "CACHED_UUID=%pU", dc->sb.uuid); add_uevent_var(env, "CACHED_LABEL=%s", buf); kobject_uevent_env(&disk_to_dev(dc->disk.disk)->kobj, KOBJ_CHANGE, @@ -484,7 +500,7 @@ STORE(bch_cached_dev) return size; } -static struct attribute *bch_cached_dev_files[] = { +static struct attribute *bch_cached_dev_attrs[] = { &sysfs_attach, &sysfs_detach, &sysfs_stop, @@ -499,9 +515,13 @@ static struct attribute *bch_cached_dev_files[] = { &sysfs_writeback_delay, &sysfs_writeback_percent, &sysfs_writeback_rate, + &sysfs_writeback_consider_fragment, &sysfs_writeback_rate_update_seconds, &sysfs_writeback_rate_i_term_inverse, &sysfs_writeback_rate_p_term_inverse, + &sysfs_writeback_rate_fp_term_low, + &sysfs_writeback_rate_fp_term_mid, + &sysfs_writeback_rate_fp_term_high, &sysfs_writeback_rate_minimum, &sysfs_writeback_rate_debug, &sysfs_io_errors, @@ -515,7 +535,6 @@ static struct attribute *bch_cached_dev_files[] = { &sysfs_running, &sysfs_state, &sysfs_label, - &sysfs_readahead, #ifdef CONFIG_BCACHE_DEBUG &sysfs_verify, &sysfs_bypass_torture_test, @@ -524,6 +543,7 @@ static struct attribute *bch_cached_dev_files[] = { &sysfs_backing_dev_uuid, NULL }; +ATTRIBUTE_GROUPS(bch_cached_dev); KTYPE(bch_cached_dev); SHOW(bch_flash_dev) @@ -581,7 +601,7 @@ STORE(__bch_flash_dev) } STORE_LOCKED(bch_flash_dev) -static struct attribute *bch_flash_dev_files[] = { +static struct attribute *bch_flash_dev_attrs[] = { &sysfs_unregister, #if 0 &sysfs_data_csum, @@ -590,6 +610,7 @@ static struct attribute *bch_flash_dev_files[] = { &sysfs_size, NULL }; +ATTRIBUTE_GROUPS(bch_flash_dev); KTYPE(bch_flash_dev); struct bset_stats_op { @@ -936,7 +957,7 @@ static void bch_cache_set_internal_release(struct kobject *k) { } -static struct attribute *bch_cache_set_files[] = { +static struct attribute *bch_cache_set_attrs[] = { &sysfs_unregister, &sysfs_stop, &sysfs_synchronous, @@ -961,9 +982,10 @@ static struct attribute *bch_cache_set_files[] = { &sysfs_clear_stats, NULL }; +ATTRIBUTE_GROUPS(bch_cache_set); KTYPE(bch_cache_set); -static struct attribute *bch_cache_set_internal_files[] = { +static struct attribute *bch_cache_set_internal_attrs[] = { &sysfs_active_journal_entries, sysfs_time_stats_attribute_list(btree_gc, sec, ms) @@ -1003,6 +1025,7 @@ static struct attribute *bch_cache_set_internal_files[] = { &sysfs_feature_incompat, NULL }; +ATTRIBUTE_GROUPS(bch_cache_set_internal); KTYPE(bch_cache_set_internal); static int __bch_cache_cmp(const void *l, const void *r) @@ -1071,8 +1094,10 @@ SHOW(__bch_cache) --n; while (cached < p + n && - *cached == BTREE_PRIO) - cached++, n--; + *cached == BTREE_PRIO) { + cached++; + n--; + } for (i = 0; i < n; i++) sum += INITIAL_PRIO - cached[i]; @@ -1161,7 +1186,7 @@ STORE(__bch_cache) } STORE_LOCKED(bch_cache) -static struct attribute *bch_cache_files[] = { +static struct attribute *bch_cache_attrs[] = { &sysfs_bucket_size, &sysfs_block_size, &sysfs_nbuckets, @@ -1175,4 +1200,5 @@ static struct attribute *bch_cache_files[] = { &sysfs_cache_replacement_policy, NULL }; +ATTRIBUTE_GROUPS(bch_cache); KTYPE(bch_cache); diff --git a/drivers/md/bcache/sysfs.h b/drivers/md/bcache/sysfs.h index 215df32f567b9143c192cc2d67b63a5831b150e7..a2ff6447b699f595f6b7a1b0e1fe2504972b067c 100644 --- a/drivers/md/bcache/sysfs.h +++ b/drivers/md/bcache/sysfs.h @@ -9,7 +9,7 @@ struct kobj_type type ## _ktype = { \ .show = type ## _show, \ .store = type ## _store \ }), \ - .default_attrs = type ## _files \ + .default_groups = type ## _groups \ } #define SHOW(fn) \ @@ -51,13 +51,27 @@ STORE(fn) \ #define sysfs_printf(file, fmt, ...) \ do { \ if (attr == &sysfs_ ## file) \ - return snprintf(buf, PAGE_SIZE, fmt "\n", __VA_ARGS__); \ + return sysfs_emit(buf, fmt "\n", __VA_ARGS__); \ } while (0) #define sysfs_print(file, var) \ do { \ if (attr == &sysfs_ ## file) \ - return snprint(buf, PAGE_SIZE, var); \ + return sysfs_emit(buf, \ + __builtin_types_compatible_p(typeof(var), int) \ + ? "%i\n" : \ + __builtin_types_compatible_p(typeof(var), unsigned int) \ + ? "%u\n" : \ + __builtin_types_compatible_p(typeof(var), long) \ + ? "%li\n" : \ + __builtin_types_compatible_p(typeof(var), unsigned long)\ + ? "%lu\n" : \ + __builtin_types_compatible_p(typeof(var), int64_t) \ + ? "%lli\n" : \ + __builtin_types_compatible_p(typeof(var), uint64_t) \ + ? "%llu\n" : \ + __builtin_types_compatible_p(typeof(var), const char *) \ + ? "%s\n" : "%i\n", var); \ } while (0) #define sysfs_hprint(file, val) \ diff --git a/drivers/md/bcache/util.h b/drivers/md/bcache/util.h index c029f744319080570704de408d0cbcfff2c788e5..97c524679c8ad87ac5530cc390c17d83966a5dad 100644 --- a/drivers/md/bcache/util.h +++ b/drivers/md/bcache/util.h @@ -27,7 +27,7 @@ struct closure; #else /* DEBUG */ -#define EBUG_ON(cond) do { if (cond); } while (0) +#define EBUG_ON(cond) do { if (cond) do {} while (0); } while (0) #define atomic_dec_bug(v) atomic_dec(v) #define atomic_inc_bug(v, i) atomic_inc(v) @@ -342,23 +342,6 @@ static inline int bch_strtoul_h(const char *cp, long *res) _r; \ }) -#define snprint(buf, size, var) \ - snprintf(buf, size, \ - __builtin_types_compatible_p(typeof(var), int) \ - ? "%i\n" : \ - __builtin_types_compatible_p(typeof(var), unsigned int) \ - ? "%u\n" : \ - __builtin_types_compatible_p(typeof(var), long) \ - ? "%li\n" : \ - __builtin_types_compatible_p(typeof(var), unsigned long)\ - ? "%lu\n" : \ - __builtin_types_compatible_p(typeof(var), int64_t) \ - ? "%lli\n" : \ - __builtin_types_compatible_p(typeof(var), uint64_t) \ - ? "%llu\n" : \ - __builtin_types_compatible_p(typeof(var), const char *) \ - ? "%s\n" : "%i\n", var) - ssize_t bch_hprint(char *buf, int64_t v); bool bch_is_zero(const char *p, size_t n); diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c index 3c74996978dadef2642c3ac716abb4c46abda3c9..3e879e98537343f7db2e2633f511e84dffee048a 100644 --- a/drivers/md/bcache/writeback.c +++ b/drivers/md/bcache/writeback.c @@ -88,6 +88,44 @@ static void __update_writeback_rate(struct cached_dev *dc) int64_t integral_scaled; uint32_t new_rate; + /* + * We need to consider the number of dirty buckets as well + * when calculating the proportional_scaled, Otherwise we might + * have an unreasonable small writeback rate at a highly fragmented situation + * when very few dirty sectors consumed a lot dirty buckets, the + * worst case is when dirty buckets reached cutoff_writeback_sync and + * dirty data is still not even reached to writeback percent, so the rate + * still will be at the minimum value, which will cause the write + * stuck at a non-writeback mode. + */ + struct cache_set *c = dc->disk.c; + + int64_t dirty_buckets = c->nbuckets - c->avail_nbuckets; + + if (dc->writeback_consider_fragment && + c->gc_stats.in_use > BCH_WRITEBACK_FRAGMENT_THRESHOLD_LOW && dirty > 0) { + int64_t fragment = + div_s64((dirty_buckets * c->cache->sb.bucket_size), dirty); + int64_t fp_term; + int64_t fps; + + if (c->gc_stats.in_use <= BCH_WRITEBACK_FRAGMENT_THRESHOLD_MID) { + fp_term = (int64_t)dc->writeback_rate_fp_term_low * + (c->gc_stats.in_use - BCH_WRITEBACK_FRAGMENT_THRESHOLD_LOW); + } else if (c->gc_stats.in_use <= BCH_WRITEBACK_FRAGMENT_THRESHOLD_HIGH) { + fp_term = (int64_t)dc->writeback_rate_fp_term_mid * + (c->gc_stats.in_use - BCH_WRITEBACK_FRAGMENT_THRESHOLD_MID); + } else { + fp_term = (int64_t)dc->writeback_rate_fp_term_high * + (c->gc_stats.in_use - BCH_WRITEBACK_FRAGMENT_THRESHOLD_HIGH); + } + fps = div_s64(dirty, dirty_buckets) * fp_term; + if (fragment > 3 && fps > proportional_scaled) { + /* Only overrite the p when fragment > 3 */ + proportional_scaled = fps; + } + } + if ((error < 0 && dc->writeback_rate_integral > 0) || (error > 0 && time_before64(local_clock(), dc->writeback_rate.next + NSEC_PER_MSEC))) { @@ -197,19 +235,27 @@ static void update_writeback_rate(struct work_struct *work) return; } - if (atomic_read(&dc->has_dirty) && dc->writeback_percent) { - /* - * If the whole cache set is idle, set_at_max_writeback_rate() - * will set writeback rate to a max number. Then it is - * unncessary to update writeback rate for an idle cache set - * in maximum writeback rate number(s). - */ - if (!set_at_max_writeback_rate(c, dc)) { - down_read(&dc->writeback_lock); + /* + * If the whole cache set is idle, set_at_max_writeback_rate() + * will set writeback rate to a max number. Then it is + * unncessary to update writeback rate for an idle cache set + * in maximum writeback rate number(s). + */ + if (atomic_read(&dc->has_dirty) && dc->writeback_percent && + !set_at_max_writeback_rate(c, dc)) { + do { + if (!down_read_trylock((&dc->writeback_lock))) { + dc->rate_update_retry++; + if (dc->rate_update_retry <= + BCH_WBRATE_UPDATE_MAX_SKIPS) + break; + down_read(&dc->writeback_lock); + dc->rate_update_retry = 0; + } __update_writeback_rate(dc); update_gc_after_writeback(c); up_read(&dc->writeback_lock); - } + } while (0); } @@ -378,7 +424,7 @@ static void read_dirty_endio(struct bio *bio) struct dirty_io *io = w->private; /* is_read = 1 */ - bch_count_io_errors(PTR_CACHE(io->dc->disk.c, &w->key, 0), + bch_count_io_errors(io->dc->disk.c->cache, bio->bi_status, 1, "reading dirty data from cache"); @@ -472,8 +518,7 @@ static void read_dirty(struct cached_dev *dc) dirty_init(w); bio_set_op_attrs(&io->bio, REQ_OP_READ, 0); io->bio.bi_iter.bi_sector = PTR_OFFSET(&w->key, 0); - bio_set_dev(&io->bio, - PTR_CACHE(dc->disk.c, &w->key, 0)->bdev); + bio_set_dev(&io->bio, dc->disk.c->cache->bdev); io->bio.bi_end_io = read_dirty_endio; if (bch_bio_alloc_pages(&io->bio, GFP_KERNEL)) @@ -548,10 +593,13 @@ void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned int inode, sectors_dirty = atomic_add_return(s, d->stripe_sectors_dirty + stripe); - if (sectors_dirty == d->stripe_size) - set_bit(stripe, d->full_dirty_stripes); - else - clear_bit(stripe, d->full_dirty_stripes); + if (sectors_dirty == d->stripe_size) { + if (!test_bit(stripe, d->full_dirty_stripes)) + set_bit(stripe, d->full_dirty_stripes); + } else { + if (test_bit(stripe, d->full_dirty_stripes)) + clear_bit(stripe, d->full_dirty_stripes); + } nr_sectors -= s; stripe_offset = 0; @@ -705,6 +753,15 @@ static int bch_writeback_thread(void *arg) * bch_cached_dev_detach(). */ if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)) { + struct closure cl; + + closure_init_stack(&cl); + memset(&dc->sb.set_uuid, 0, 16); + SET_BDEV_STATE(&dc->sb, BDEV_STATE_NONE); + + bch_write_bdev_super(dc, &cl); + closure_sync(&cl); + up_write(&dc->writeback_lock); break; } @@ -756,13 +813,11 @@ static int bch_writeback_thread(void *arg) /* Init */ #define INIT_KEYS_EACH_TIME 500000 -#define INIT_KEYS_SLEEP_MS 100 struct sectors_dirty_init { struct btree_op op; unsigned int inode; size_t count; - struct bkey start; }; static int sectors_dirty_init_fn(struct btree_op *_op, struct btree *b, @@ -778,11 +833,8 @@ static int sectors_dirty_init_fn(struct btree_op *_op, struct btree *b, KEY_START(k), KEY_SIZE(k)); op->count++; - if (atomic_read(&b->c->search_inflight) && - !(op->count % INIT_KEYS_EACH_TIME)) { - bkey_copy_key(&op->start, k); - return -EAGAIN; - } + if (!(op->count % INIT_KEYS_EACH_TIME)) + cond_resched(); return MAP_CONTINUE; } @@ -797,24 +849,16 @@ static int bch_root_node_dirty_init(struct cache_set *c, bch_btree_op_init(&op.op, -1); op.inode = d->id; op.count = 0; - op.start = KEY(op.inode, 0, 0); - - do { - ret = bcache_btree(map_keys_recurse, - k, - c->root, - &op.op, - &op.start, - sectors_dirty_init_fn, - 0); - if (ret == -EAGAIN) - schedule_timeout_interruptible( - msecs_to_jiffies(INIT_KEYS_SLEEP_MS)); - else if (ret < 0) { - pr_warn("sectors dirty init failed, ret=%d!\n", ret); - break; - } - } while (ret == -EAGAIN); + + ret = bcache_btree(map_keys_recurse, + k, + c->root, + &op.op, + &KEY(op.inode, 0, 0), + sectors_dirty_init_fn, + 0); + if (ret < 0) + pr_warn("sectors dirty init failed, ret=%d!\n", ret); return ret; } @@ -858,7 +902,6 @@ static int bch_dirty_init_thread(void *arg) goto out; } skip_nr--; - cond_resched(); } if (p) { @@ -868,7 +911,6 @@ static int bch_dirty_init_thread(void *arg) p = NULL; prev_idx = cur_idx; - cond_resched(); } out: @@ -899,65 +941,56 @@ void bch_sectors_dirty_init(struct bcache_device *d) struct btree_iter iter; struct sectors_dirty_init op; struct cache_set *c = d->c; - struct bch_dirty_init_state *state; - char name[32]; + struct bch_dirty_init_state state; /* Just count root keys if no leaf node */ + rw_lock(0, c->root, c->root->level); if (c->root->level == 0) { bch_btree_op_init(&op.op, -1); op.inode = d->id; op.count = 0; - op.start = KEY(op.inode, 0, 0); for_each_key_filter(&c->root->keys, k, &iter, bch_ptr_invalid) sectors_dirty_init_fn(&op.op, c->root, k); - return; - } - state = kzalloc(sizeof(struct bch_dirty_init_state), GFP_KERNEL); - if (!state) { - pr_warn("sectors dirty init failed: cannot allocate memory\n"); + rw_unlock(0, c->root); return; } - state->c = c; - state->d = d; - state->total_threads = bch_btre_dirty_init_thread_nr(); - state->key_idx = 0; - spin_lock_init(&state->idx_lock); - atomic_set(&state->started, 0); - atomic_set(&state->enough, 0); - init_waitqueue_head(&state->wait); - - for (i = 0; i < state->total_threads; i++) { - /* Fetch latest state->enough earlier */ + memset(&state, 0, sizeof(struct bch_dirty_init_state)); + state.c = c; + state.d = d; + state.total_threads = bch_btre_dirty_init_thread_nr(); + state.key_idx = 0; + spin_lock_init(&state.idx_lock); + atomic_set(&state.started, 0); + atomic_set(&state.enough, 0); + init_waitqueue_head(&state.wait); + + for (i = 0; i < state.total_threads; i++) { + /* Fetch latest state.enough earlier */ smp_mb__before_atomic(); - if (atomic_read(&state->enough)) + if (atomic_read(&state.enough)) break; - state->infos[i].state = state; - atomic_inc(&state->started); - snprintf(name, sizeof(name), "bch_dirty_init[%d]", i); - - state->infos[i].thread = - kthread_run(bch_dirty_init_thread, - &state->infos[i], - name); - if (IS_ERR(state->infos[i].thread)) { + state.infos[i].state = &state; + state.infos[i].thread = + kthread_run(bch_dirty_init_thread, &state.infos[i], + "bch_dirtcnt[%d]", i); + if (IS_ERR(state.infos[i].thread)) { pr_err("fails to run thread bch_dirty_init[%d]\n", i); for (--i; i >= 0; i--) - kthread_stop(state->infos[i].thread); + kthread_stop(state.infos[i].thread); goto out; } + atomic_inc(&state.started); } - wait_event_interruptible(state->wait, - atomic_read(&state->started) == 0 || - test_bit(CACHE_SET_IO_DISABLE, &c->flags)); - out: - kfree(state); + /* Must wait for all threads to stop. */ + wait_event(state.wait, atomic_read(&state.started) == 0); + rw_unlock(0, c->root); } void bch_cached_dev_writeback_init(struct cached_dev *dc) @@ -968,6 +1001,7 @@ void bch_cached_dev_writeback_init(struct cached_dev *dc) dc->writeback_metadata = true; dc->writeback_running = false; + dc->writeback_consider_fragment = true; dc->writeback_percent = 10; dc->writeback_delay = 30; atomic_long_set(&dc->writeback_rate.rate, 1024); @@ -975,8 +1009,14 @@ void bch_cached_dev_writeback_init(struct cached_dev *dc) dc->writeback_rate_update_seconds = WRITEBACK_RATE_UPDATE_SECS_DEFAULT; dc->writeback_rate_p_term_inverse = 40; + dc->writeback_rate_fp_term_low = 1; + dc->writeback_rate_fp_term_mid = 10; + dc->writeback_rate_fp_term_high = 1000; dc->writeback_rate_i_term_inverse = 10000; + /* For dc->writeback_lock contention in update_writeback_rate() */ + dc->rate_update_retry = 0; + WARN_ON(test_and_clear_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags)); INIT_DELAYED_WORK(&dc->writeback_rate_update, update_writeback_rate); } diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h index 3f1230e22de013a67a8bc332a53d52dacba8990c..31df716951f66b127303da51be522ce943de1064 100644 --- a/drivers/md/bcache/writeback.h +++ b/drivers/md/bcache/writeback.h @@ -16,7 +16,11 @@ #define BCH_AUTO_GC_DIRTY_THRESHOLD 50 -#define BCH_DIRTY_INIT_THRD_MAX 64 +#define BCH_WRITEBACK_FRAGMENT_THRESHOLD_LOW 50 +#define BCH_WRITEBACK_FRAGMENT_THRESHOLD_MID 57 +#define BCH_WRITEBACK_FRAGMENT_THRESHOLD_HIGH 64 + +#define BCH_DIRTY_INIT_THRD_MAX 12 /* * 14 (16384ths) is chosen here as something that each backing device * should be a reasonable fraction of the share, and not to blow up diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 2aa4acd33af3903506824331181179f856502b97..b9677f701b6a11a0011683dc01f0721aa8f07285 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -2561,7 +2561,7 @@ static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string static int get_key_size(char **key_string) { - return (*key_string[0] == ':') ? -EINVAL : strlen(*key_string) >> 1; + return (*key_string[0] == ':') ? -EINVAL : (int)(strlen(*key_string) >> 1); } #endif /* CONFIG_KEYS */ diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c index 4c7da1c4e6cb9e50c2b828e41a0716c3ad12a4c1..f7471a2642dd4162b2c29e4f416166b45eb000f5 100644 --- a/drivers/md/dm-integrity.c +++ b/drivers/md/dm-integrity.c @@ -2354,9 +2354,11 @@ static void do_journal_write(struct dm_integrity_c *ic, unsigned write_start, dm_integrity_io_error(ic, "invalid sector in journal", -EIO); sec &= ~(sector_t)(ic->sectors_per_block - 1); } + if (unlikely(sec >= ic->provided_data_sectors)) { + journal_entry_set_unused(je); + continue; + } } - if (unlikely(sec >= ic->provided_data_sectors)) - continue; get_area_and_offset(ic, sec, &area, &offset); restore_last_bytes(ic, access_journal_data(ic, i, j), je); for (k = j + 1; k < ic->journal_section_entries; k++) { diff --git a/drivers/media/i2c/adv7511-v4l2.c b/drivers/media/i2c/adv7511-v4l2.c index ab7883cff8b2236064f6d8870d398f8eabc1407f..9f5713b76794d686c17bff3b7d0a3dedc7d10505 100644 --- a/drivers/media/i2c/adv7511-v4l2.c +++ b/drivers/media/i2c/adv7511-v4l2.c @@ -555,7 +555,7 @@ static void log_infoframe(struct v4l2_subdev *sd, const struct adv7511_cfg_read_ buffer[3] = 0; buffer[3] = hdmi_infoframe_checksum(buffer, len + 4); - if (hdmi_infoframe_unpack(&frame, buffer, sizeof(buffer)) < 0) { + if (hdmi_infoframe_unpack(&frame, buffer, len + 4) < 0) { v4l2_err(sd, "%s: unpack of %s infoframe failed\n", __func__, cri->desc); return; } diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c index d1f58795794fd0a2c2278c1692cdfc7ed01e5f81..8cf1704308bf5aaef736debb1a45dad5d930445a 100644 --- a/drivers/media/i2c/adv7604.c +++ b/drivers/media/i2c/adv7604.c @@ -2454,7 +2454,7 @@ static int adv76xx_read_infoframe(struct v4l2_subdev *sd, int index, buffer[i + 3] = infoframe_read(sd, adv76xx_cri[index].payload_addr + i); - if (hdmi_infoframe_unpack(frame, buffer, sizeof(buffer)) < 0) { + if (hdmi_infoframe_unpack(frame, buffer, len + 3) < 0) { v4l2_err(sd, "%s: unpack of %s infoframe failed\n", __func__, adv76xx_cri[index].desc); return -ENOENT; diff --git a/drivers/media/i2c/adv7842.c b/drivers/media/i2c/adv7842.c index f7d2b6cd3008b3e4e9bf0e46de05b581cd779aea..a870117feb44c550b65c02c8b7b4b1d8e5afeb2b 100644 --- a/drivers/media/i2c/adv7842.c +++ b/drivers/media/i2c/adv7842.c @@ -2574,7 +2574,7 @@ static void log_infoframe(struct v4l2_subdev *sd, const struct adv7842_cfg_read_ for (i = 0; i < len; i++) buffer[i + 3] = infoframe_read(sd, cri->payload_addr + i); - if (hdmi_infoframe_unpack(&frame, buffer, sizeof(buffer)) < 0) { + if (hdmi_infoframe_unpack(&frame, buffer, len + 3) < 0) { v4l2_err(sd, "%s: unpack of %s infoframe failed\n", __func__, cri->desc); return; } diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c index 35a51e9b539da194c766e72b7156225db043f9a2..1f0e4b913a053485493a90bef6b049fbefc46a42 100644 --- a/drivers/media/pci/bt8xx/bttv-driver.c +++ b/drivers/media/pci/bt8xx/bttv-driver.c @@ -3898,7 +3898,7 @@ static int bttv_register_video(struct bttv *btv) /* video */ vdev_init(btv, &btv->video_dev, &bttv_video_template, "video"); - btv->video_dev.device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_TUNER | + btv->video_dev.device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE | V4L2_CAP_STREAMING; if (btv->tuner_type != TUNER_ABSENT) btv->video_dev.device_caps |= V4L2_CAP_TUNER; @@ -3919,7 +3919,7 @@ static int bttv_register_video(struct bttv *btv) /* vbi */ vdev_init(btv, &btv->vbi_dev, &bttv_video_template, "vbi"); btv->vbi_dev.device_caps = V4L2_CAP_VBI_CAPTURE | V4L2_CAP_READWRITE | - V4L2_CAP_STREAMING | V4L2_CAP_TUNER; + V4L2_CAP_STREAMING; if (btv->tuner_type != TUNER_ABSENT) btv->vbi_dev.device_caps |= V4L2_CAP_TUNER; diff --git a/drivers/media/pci/cx88/cx88-mpeg.c b/drivers/media/pci/cx88/cx88-mpeg.c index a57c991b165b1ab84ddcd4edf79cd68dd62c8ac0..10d2971ef0624ff2b87119567d7a099565245b81 100644 --- a/drivers/media/pci/cx88/cx88-mpeg.c +++ b/drivers/media/pci/cx88/cx88-mpeg.c @@ -162,6 +162,9 @@ int cx8802_start_dma(struct cx8802_dev *dev, cx_write(MO_TS_GPCNTRL, GP_COUNT_CONTROL_RESET); q->count = 0; + /* clear interrupt status register */ + cx_write(MO_TS_INTSTAT, 0x1f1111); + /* enable irqs */ dprintk(1, "setting the interrupt mask\n"); cx_set(MO_PCI_INTMSK, core->pci_irqmask | PCI_INT_TSINT); diff --git a/drivers/media/pci/ivtv/ivtv-driver.h b/drivers/media/pci/ivtv/ivtv-driver.h index e5efe525ad7bf91dc726c78581d91a5ddce9a75e..00caf60ff98903f4fcf9c0209768225ee883d565 100644 --- a/drivers/media/pci/ivtv/ivtv-driver.h +++ b/drivers/media/pci/ivtv/ivtv-driver.h @@ -332,7 +332,6 @@ struct ivtv_stream { struct ivtv *itv; /* for ease of use */ const char *name; /* name of the stream */ int type; /* stream type */ - u32 caps; /* V4L2 capabilities */ struct v4l2_fh *fh; /* pointer to the streaming filehandle */ spinlock_t qlock; /* locks access to the queues */ diff --git a/drivers/media/pci/ivtv/ivtv-ioctl.c b/drivers/media/pci/ivtv/ivtv-ioctl.c index 35dccb31174c1e82d77ba72d6a58b7b9af7b34e8..a9d69b253516b610d9b772b7dd56d67ba79478b2 100644 --- a/drivers/media/pci/ivtv/ivtv-ioctl.c +++ b/drivers/media/pci/ivtv/ivtv-ioctl.c @@ -443,7 +443,7 @@ static int ivtv_g_fmt_vid_out_overlay(struct file *file, void *fh, struct v4l2_f struct ivtv_stream *s = &itv->streams[fh2id(fh)->type]; struct v4l2_window *winfmt = &fmt->fmt.win; - if (!(s->caps & V4L2_CAP_VIDEO_OUTPUT_OVERLAY)) + if (!(s->vdev.device_caps & V4L2_CAP_VIDEO_OUTPUT_OVERLAY)) return -EINVAL; if (!itv->osd_video_pbase) return -EINVAL; @@ -554,7 +554,7 @@ static int ivtv_try_fmt_vid_out_overlay(struct file *file, void *fh, struct v4l2 u32 chromakey = fmt->fmt.win.chromakey; u8 global_alpha = fmt->fmt.win.global_alpha; - if (!(s->caps & V4L2_CAP_VIDEO_OUTPUT_OVERLAY)) + if (!(s->vdev.device_caps & V4L2_CAP_VIDEO_OUTPUT_OVERLAY)) return -EINVAL; if (!itv->osd_video_pbase) return -EINVAL; @@ -1388,7 +1388,7 @@ static int ivtv_g_fbuf(struct file *file, void *fh, struct v4l2_framebuffer *fb) 0, }; - if (!(s->caps & V4L2_CAP_VIDEO_OUTPUT_OVERLAY)) + if (!(s->vdev.device_caps & V4L2_CAP_VIDEO_OUTPUT_OVERLAY)) return -ENOTTY; if (!itv->osd_video_pbase) return -ENOTTY; @@ -1455,7 +1455,7 @@ static int ivtv_s_fbuf(struct file *file, void *fh, const struct v4l2_framebuffe struct ivtv_stream *s = &itv->streams[fh2id(fh)->type]; struct yuv_playback_info *yi = &itv->yuv_info; - if (!(s->caps & V4L2_CAP_VIDEO_OUTPUT_OVERLAY)) + if (!(s->vdev.device_caps & V4L2_CAP_VIDEO_OUTPUT_OVERLAY)) return -ENOTTY; if (!itv->osd_video_pbase) return -ENOTTY; @@ -1475,7 +1475,7 @@ static int ivtv_overlay(struct file *file, void *fh, unsigned int on) struct ivtv *itv = id->itv; struct ivtv_stream *s = &itv->streams[fh2id(fh)->type]; - if (!(s->caps & V4L2_CAP_VIDEO_OUTPUT_OVERLAY)) + if (!(s->vdev.device_caps & V4L2_CAP_VIDEO_OUTPUT_OVERLAY)) return -ENOTTY; if (!itv->osd_video_pbase) return -ENOTTY; diff --git a/drivers/media/pci/ivtv/ivtv-streams.c b/drivers/media/pci/ivtv/ivtv-streams.c index f04ee84bab5fd38690914860ea6c7db7b51ae318..f9de5d1605fe37aea734d0e1f24963ea099cab11 100644 --- a/drivers/media/pci/ivtv/ivtv-streams.c +++ b/drivers/media/pci/ivtv/ivtv-streams.c @@ -176,7 +176,7 @@ static void ivtv_stream_init(struct ivtv *itv, int type) s->itv = itv; s->type = type; s->name = ivtv_stream_info[type].name; - s->caps = ivtv_stream_info[type].v4l2_caps; + s->vdev.device_caps = ivtv_stream_info[type].v4l2_caps; if (ivtv_stream_info[type].pio) s->dma = PCI_DMA_NONE; @@ -299,12 +299,9 @@ static int ivtv_reg_dev(struct ivtv *itv, int type) if (s_mpg->vdev.v4l2_dev) num = s_mpg->vdev.num + ivtv_stream_info[type].num_offset; } - s->vdev.device_caps = s->caps; - if (itv->osd_video_pbase) { - itv->streams[IVTV_DEC_STREAM_TYPE_YUV].vdev.device_caps |= - V4L2_CAP_VIDEO_OUTPUT_OVERLAY; - itv->streams[IVTV_DEC_STREAM_TYPE_MPG].vdev.device_caps |= - V4L2_CAP_VIDEO_OUTPUT_OVERLAY; + if (itv->osd_video_pbase && (type == IVTV_DEC_STREAM_TYPE_YUV || + type == IVTV_DEC_STREAM_TYPE_MPG)) { + s->vdev.device_caps |= V4L2_CAP_VIDEO_OUTPUT_OVERLAY; itv->v4l2_cap |= V4L2_CAP_VIDEO_OUTPUT_OVERLAY; } video_set_drvdata(&s->vdev, s); diff --git a/drivers/media/pci/saa7134/saa7134-alsa.c b/drivers/media/pci/saa7134/saa7134-alsa.c index 7a1fb067b0e09b81e6453abd006edcc623e05978..d3cde05a6ebab80027cb5722c9d3eccb836022a6 100644 --- a/drivers/media/pci/saa7134/saa7134-alsa.c +++ b/drivers/media/pci/saa7134/saa7134-alsa.c @@ -1214,16 +1214,14 @@ static int alsa_device_exit(struct saa7134_dev *dev) static int saa7134_alsa_init(void) { - struct saa7134_dev *dev = NULL; - struct list_head *list; + struct saa7134_dev *dev; saa7134_dmasound_init = alsa_device_init; saa7134_dmasound_exit = alsa_device_exit; pr_info("saa7134 ALSA driver for DMA sound loaded\n"); - list_for_each(list,&saa7134_devlist) { - dev = list_entry(list, struct saa7134_dev, devlist); + list_for_each_entry(dev, &saa7134_devlist, devlist) { if (dev->pci->device == PCI_DEVICE_ID_PHILIPS_SAA7130) pr_info("%s/alsa: %s doesn't support digital audio\n", dev->name, saa7134_boards[dev->board].name); @@ -1231,7 +1229,7 @@ static int saa7134_alsa_init(void) alsa_device_init(dev); } - if (dev == NULL) + if (list_empty(&saa7134_devlist)) pr_info("saa7134 ALSA: no saa7134 cards found\n"); return 0; diff --git a/drivers/media/platform/aspeed-video.c b/drivers/media/platform/aspeed-video.c index debc7509c173c41e218bbebbebeeeb9f4fc7ca1e..757a58829a512a21602f7902ee997c631a5e19c5 100644 --- a/drivers/media/platform/aspeed-video.c +++ b/drivers/media/platform/aspeed-video.c @@ -151,7 +151,7 @@ #define VE_SRC_TB_EDGE_DET_BOT GENMASK(28, VE_SRC_TB_EDGE_DET_BOT_SHF) #define VE_MODE_DETECT_STATUS 0x098 -#define VE_MODE_DETECT_H_PIXELS GENMASK(11, 0) +#define VE_MODE_DETECT_H_PERIOD GENMASK(11, 0) #define VE_MODE_DETECT_V_LINES_SHF 16 #define VE_MODE_DETECT_V_LINES GENMASK(27, VE_MODE_DETECT_V_LINES_SHF) #define VE_MODE_DETECT_STATUS_VSYNC BIT(28) @@ -162,6 +162,8 @@ #define VE_SYNC_STATUS_VSYNC_SHF 16 #define VE_SYNC_STATUS_VSYNC GENMASK(27, VE_SYNC_STATUS_VSYNC_SHF) +#define VE_H_TOTAL_PIXELS 0x0A0 + #define VE_INTERRUPT_CTRL 0x304 #define VE_INTERRUPT_STATUS 0x308 #define VE_INTERRUPT_MODE_DETECT_WD BIT(0) @@ -765,6 +767,7 @@ static void aspeed_video_get_resolution(struct aspeed_video *video) u32 src_lr_edge; u32 src_tb_edge; u32 sync; + u32 htotal; struct v4l2_bt_timings *det = &video->detected_timings; det->width = MIN_WIDTH; @@ -809,6 +812,7 @@ static void aspeed_video_get_resolution(struct aspeed_video *video) src_tb_edge = aspeed_video_read(video, VE_SRC_TB_EDGE_DET); mds = aspeed_video_read(video, VE_MODE_DETECT_STATUS); sync = aspeed_video_read(video, VE_SYNC_STATUS); + htotal = aspeed_video_read(video, VE_H_TOTAL_PIXELS); video->frame_bottom = (src_tb_edge & VE_SRC_TB_EDGE_DET_BOT) >> VE_SRC_TB_EDGE_DET_BOT_SHF; @@ -825,8 +829,7 @@ static void aspeed_video_get_resolution(struct aspeed_video *video) VE_SRC_LR_EDGE_DET_RT_SHF; video->frame_left = src_lr_edge & VE_SRC_LR_EDGE_DET_LEFT; det->hfrontporch = video->frame_left; - det->hbackporch = (mds & VE_MODE_DETECT_H_PIXELS) - - video->frame_right; + det->hbackporch = htotal - video->frame_right; det->hsync = sync & VE_SYNC_STATUS_HSYNC; if (video->frame_left > video->frame_right) continue; diff --git a/drivers/media/platform/coda/coda-common.c b/drivers/media/platform/coda/coda-common.c index 1eed69d29149f34c43fd155ff957c157623deac4..2333079a83c71330412f64e930e257a1f3132cc1 100644 --- a/drivers/media/platform/coda/coda-common.c +++ b/drivers/media/platform/coda/coda-common.c @@ -408,6 +408,7 @@ static struct vdoa_data *coda_get_vdoa_data(void) if (!vdoa_data) vdoa_data = ERR_PTR(-EPROBE_DEFER); + put_device(&vdoa_pdev->dev); out: of_node_put(vdoa_node); diff --git a/drivers/media/platform/davinci/vpif.c b/drivers/media/platform/davinci/vpif.c index 5e67994e62ccac50a0205f06d520e3be469174a6..ee610daf90a3c9a93805cd7be44d4996d87e6cdf 100644 --- a/drivers/media/platform/davinci/vpif.c +++ b/drivers/media/platform/davinci/vpif.c @@ -428,6 +428,7 @@ static int vpif_probe(struct platform_device *pdev) static struct resource *res, *res_irq; struct platform_device *pdev_capture, *pdev_display; struct device_node *endpoint = NULL; + int ret; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); vpif_base = devm_ioremap_resource(&pdev->dev, res); @@ -458,8 +459,8 @@ static int vpif_probe(struct platform_device *pdev) res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!res_irq) { dev_warn(&pdev->dev, "Missing IRQ resource.\n"); - pm_runtime_put(&pdev->dev); - return -EINVAL; + ret = -EINVAL; + goto err_put_rpm; } pdev_capture = devm_kzalloc(&pdev->dev, sizeof(*pdev_capture), @@ -493,10 +494,17 @@ static int vpif_probe(struct platform_device *pdev) } return 0; + +err_put_rpm: + pm_runtime_put(&pdev->dev); + pm_runtime_disable(&pdev->dev); + + return ret; } static int vpif_remove(struct platform_device *pdev) { + pm_runtime_put(&pdev->dev); pm_runtime_disable(&pdev->dev); return 0; } diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_fw_vpu.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_fw_vpu.c index cd27f637dbe7c8690baf3b7d0cd9790839118d61..cfc7ebed8fb7ab7a828ff1357b5cc5fa04afbe2a 100644 --- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_fw_vpu.c +++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_fw_vpu.c @@ -102,6 +102,8 @@ struct mtk_vcodec_fw *mtk_vcodec_fw_vpu_init(struct mtk_vcodec_dev *dev, vpu_wdt_reg_handler(fw_pdev, mtk_vcodec_vpu_reset_handler, dev, rst_id); fw = devm_kzalloc(&dev->plat_dev->dev, sizeof(*fw), GFP_KERNEL); + if (!fw) + return ERR_PTR(-ENOMEM); fw->type = VPU; fw->ops = &mtk_vcodec_vpu_msg; fw->pdev = fw_pdev; diff --git a/drivers/media/rc/gpio-ir-tx.c b/drivers/media/rc/gpio-ir-tx.c index c6cd2e6d8e654d8cb5182f1bb7fc3436d7cdecf5..a50701cfbbd7b37c5462347c9e74926091f1fdee 100644 --- a/drivers/media/rc/gpio-ir-tx.c +++ b/drivers/media/rc/gpio-ir-tx.c @@ -48,11 +48,29 @@ static int gpio_ir_tx_set_carrier(struct rc_dev *dev, u32 carrier) return 0; } +static void delay_until(ktime_t until) +{ + /* + * delta should never exceed 0.5 seconds (IR_MAX_DURATION) and on + * m68k ndelay(s64) does not compile; so use s32 rather than s64. + */ + s32 delta; + + while (true) { + delta = ktime_us_delta(until, ktime_get()); + if (delta <= 0) + return; + + /* udelay more than 1ms may not work */ + delta = min(delta, 1000); + udelay(delta); + } +} + static void gpio_ir_tx_unmodulated(struct gpio_ir *gpio_ir, uint *txbuf, uint count) { ktime_t edge; - s32 delta; int i; local_irq_disable(); @@ -63,9 +81,7 @@ static void gpio_ir_tx_unmodulated(struct gpio_ir *gpio_ir, uint *txbuf, gpiod_set_value(gpio_ir->gpio, !(i % 2)); edge = ktime_add_us(edge, txbuf[i]); - delta = ktime_us_delta(edge, ktime_get()); - if (delta > 0) - udelay(delta); + delay_until(edge); } gpiod_set_value(gpio_ir->gpio, 0); @@ -97,9 +113,7 @@ static void gpio_ir_tx_modulated(struct gpio_ir *gpio_ir, uint *txbuf, if (i % 2) { // space edge = ktime_add_us(edge, txbuf[i]); - delta = ktime_us_delta(edge, ktime_get()); - if (delta > 0) - udelay(delta); + delay_until(edge); } else { // pulse ktime_t last = ktime_add_us(edge, txbuf[i]); diff --git a/drivers/media/rc/ir_toy.c b/drivers/media/rc/ir_toy.c index 1aa7989e756ccd967ca376ce0201911f3f5c4708..7f394277478b32ae0c461dfc3949249194775b90 100644 --- a/drivers/media/rc/ir_toy.c +++ b/drivers/media/rc/ir_toy.c @@ -429,7 +429,7 @@ static int irtoy_probe(struct usb_interface *intf, err = usb_submit_urb(irtoy->urb_in, GFP_KERNEL); if (err != 0) { dev_err(irtoy->dev, "fail to submit in urb: %d\n", err); - return err; + goto free_rcdev; } err = irtoy_setup(irtoy); diff --git a/drivers/media/test-drivers/vidtv/vidtv_s302m.c b/drivers/media/test-drivers/vidtv/vidtv_s302m.c index d79b65854627cc18c60c5172430c40369e687c68..4676083cee3b8aa58f01845c9839d91505d24850 100644 --- a/drivers/media/test-drivers/vidtv/vidtv_s302m.c +++ b/drivers/media/test-drivers/vidtv/vidtv_s302m.c @@ -455,6 +455,9 @@ struct vidtv_encoder e->name = kstrdup(args.name, GFP_KERNEL); e->encoder_buf = vzalloc(VIDTV_S302M_BUF_SZ); + if (!e->encoder_buf) + goto out_kfree_e; + e->encoder_buf_sz = VIDTV_S302M_BUF_SZ; e->encoder_buf_offset = 0; @@ -467,10 +470,8 @@ struct vidtv_encoder e->is_video_encoder = false; ctx = kzalloc(priv_sz, GFP_KERNEL); - if (!ctx) { - kfree(e); - return NULL; - } + if (!ctx) + goto out_kfree_buf; e->ctx = ctx; ctx->last_duration = 0; @@ -498,6 +499,14 @@ struct vidtv_encoder e->next = NULL; return e; + +out_kfree_buf: + kfree(e->encoder_buf); + +out_kfree_e: + kfree(e->name); + kfree(e); + return NULL; } void vidtv_s302m_encoder_destroy(struct vidtv_encoder *e) diff --git a/drivers/media/usb/em28xx/em28xx-cards.c b/drivers/media/usb/em28xx/em28xx-cards.c index 87e375562dbb2b1df019c6ea305a222a298b01df..26408a972b443e7d90751aa3ad8beb9362b51be1 100644 --- a/drivers/media/usb/em28xx/em28xx-cards.c +++ b/drivers/media/usb/em28xx/em28xx-cards.c @@ -3881,6 +3881,8 @@ static int em28xx_usb_probe(struct usb_interface *intf, goto err_free; } + kref_init(&dev->ref); + dev->devno = nr; dev->model = id->driver_info; dev->alt = -1; @@ -3981,6 +3983,8 @@ static int em28xx_usb_probe(struct usb_interface *intf, } if (dev->board.has_dual_ts && em28xx_duplicate_dev(dev) == 0) { + kref_init(&dev->dev_next->ref); + dev->dev_next->ts = SECONDARY_TS; dev->dev_next->alt = -1; dev->dev_next->is_audio_only = has_vendor_audio && @@ -4035,12 +4039,8 @@ static int em28xx_usb_probe(struct usb_interface *intf, em28xx_write_reg(dev, 0x0b, 0x82); mdelay(100); } - - kref_init(&dev->dev_next->ref); } - kref_init(&dev->ref); - request_modules(dev); /* @@ -4095,11 +4095,8 @@ static void em28xx_usb_disconnect(struct usb_interface *intf) em28xx_close_extension(dev); - if (dev->dev_next) { - em28xx_close_extension(dev->dev_next); + if (dev->dev_next) em28xx_release_resources(dev->dev_next); - } - em28xx_release_resources(dev); if (dev->dev_next) { diff --git a/drivers/media/usb/go7007/s2250-board.c b/drivers/media/usb/go7007/s2250-board.c index b9e45124673b6b72955ceb0003658e183e0ed5ee..2e5913bccb38f696cadd93ea5bd1c7b17862a7b1 100644 --- a/drivers/media/usb/go7007/s2250-board.c +++ b/drivers/media/usb/go7007/s2250-board.c @@ -504,6 +504,7 @@ static int s2250_probe(struct i2c_client *client, u8 *data; struct go7007 *go = i2c_get_adapdata(adapter); struct go7007_usb *usb = go->hpi_context; + int err = -EIO; audio = i2c_new_dummy_device(adapter, TLV320_ADDRESS >> 1); if (IS_ERR(audio)) @@ -532,11 +533,8 @@ static int s2250_probe(struct i2c_client *client, V4L2_CID_HUE, -512, 511, 1, 0); sd->ctrl_handler = &state->hdl; if (state->hdl.error) { - int err = state->hdl.error; - - v4l2_ctrl_handler_free(&state->hdl); - kfree(state); - return err; + err = state->hdl.error; + goto fail; } state->std = V4L2_STD_NTSC; @@ -600,7 +598,7 @@ static int s2250_probe(struct i2c_client *client, i2c_unregister_device(audio); v4l2_ctrl_handler_free(&state->hdl); kfree(state); - return -EIO; + return err; } static int s2250_remove(struct i2c_client *client) diff --git a/drivers/media/usb/hdpvr/hdpvr-video.c b/drivers/media/usb/hdpvr/hdpvr-video.c index 563128d117317f40eef5661aed370616e1ed026d..60e57e0f192725a8dae8500b911c8b2426dd94ae 100644 --- a/drivers/media/usb/hdpvr/hdpvr-video.c +++ b/drivers/media/usb/hdpvr/hdpvr-video.c @@ -308,7 +308,6 @@ static int hdpvr_start_streaming(struct hdpvr_device *dev) dev->status = STATUS_STREAMING; - INIT_WORK(&dev->worker, hdpvr_transmit_buffers); schedule_work(&dev->worker); v4l2_dbg(MSG_BUFFER, hdpvr_debug, &dev->v4l2_dev, @@ -1165,6 +1164,9 @@ int hdpvr_register_videodev(struct hdpvr_device *dev, struct device *parent, bool ac3 = dev->flags & HDPVR_FLAG_AC3_CAP; int res; + // initialize dev->worker + INIT_WORK(&dev->worker, hdpvr_transmit_buffers); + dev->cur_std = V4L2_STD_525_60; dev->width = 720; dev->height = 480; diff --git a/drivers/media/usb/stk1160/stk1160-core.c b/drivers/media/usb/stk1160/stk1160-core.c index 4e1698f7881876010ff05f18b39610d863fccaa6..ce717502ea4c393096646c92b25e380e86d9d4df 100644 --- a/drivers/media/usb/stk1160/stk1160-core.c +++ b/drivers/media/usb/stk1160/stk1160-core.c @@ -403,7 +403,7 @@ static void stk1160_disconnect(struct usb_interface *interface) /* Here is the only place where isoc get released */ stk1160_uninit_isoc(dev); - stk1160_clear_queue(dev); + stk1160_clear_queue(dev, VB2_BUF_STATE_ERROR); video_unregister_device(&dev->vdev); v4l2_device_disconnect(&dev->v4l2_dev); diff --git a/drivers/media/usb/stk1160/stk1160-v4l.c b/drivers/media/usb/stk1160/stk1160-v4l.c index 6a4eb616d5160ec822acace9122bef200827792d..1aa953469402f50590a3b92590ed83871d88c140 100644 --- a/drivers/media/usb/stk1160/stk1160-v4l.c +++ b/drivers/media/usb/stk1160/stk1160-v4l.c @@ -258,7 +258,7 @@ static int stk1160_start_streaming(struct stk1160 *dev) stk1160_uninit_isoc(dev); out_stop_hw: usb_set_interface(dev->udev, 0, 0); - stk1160_clear_queue(dev); + stk1160_clear_queue(dev, VB2_BUF_STATE_QUEUED); mutex_unlock(&dev->v4l_lock); @@ -306,7 +306,7 @@ static int stk1160_stop_streaming(struct stk1160 *dev) stk1160_stop_hw(dev); - stk1160_clear_queue(dev); + stk1160_clear_queue(dev, VB2_BUF_STATE_ERROR); stk1160_dbg("streaming stopped\n"); @@ -745,7 +745,7 @@ static const struct video_device v4l_template = { /********************************************************************/ /* Must be called with both v4l_lock and vb_queue_lock hold */ -void stk1160_clear_queue(struct stk1160 *dev) +void stk1160_clear_queue(struct stk1160 *dev, enum vb2_buffer_state vb2_state) { struct stk1160_buffer *buf; unsigned long flags; @@ -756,7 +756,7 @@ void stk1160_clear_queue(struct stk1160 *dev) buf = list_first_entry(&dev->avail_bufs, struct stk1160_buffer, list); list_del(&buf->list); - vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR); + vb2_buffer_done(&buf->vb.vb2_buf, vb2_state); stk1160_dbg("buffer [%p/%d] aborted\n", buf, buf->vb.vb2_buf.index); } @@ -766,7 +766,7 @@ void stk1160_clear_queue(struct stk1160 *dev) buf = dev->isoc_ctl.buf; dev->isoc_ctl.buf = NULL; - vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR); + vb2_buffer_done(&buf->vb.vb2_buf, vb2_state); stk1160_dbg("buffer [%p/%d] aborted\n", buf, buf->vb.vb2_buf.index); } diff --git a/drivers/media/usb/stk1160/stk1160.h b/drivers/media/usb/stk1160/stk1160.h index a31ea1c80f25569a9312e228601c3ac58665bd36..a70963ce875337f3929e45582f800dc1251d6848 100644 --- a/drivers/media/usb/stk1160/stk1160.h +++ b/drivers/media/usb/stk1160/stk1160.h @@ -166,7 +166,7 @@ struct regval { int stk1160_vb2_setup(struct stk1160 *dev); int stk1160_video_register(struct stk1160 *dev); void stk1160_video_unregister(struct stk1160 *dev); -void stk1160_clear_queue(struct stk1160 *dev); +void stk1160_clear_queue(struct stk1160 *dev, enum vb2_buffer_state vb2_state); /* Provided by stk1160-video.c */ int stk1160_alloc_isoc(struct stk1160 *dev); diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c index b221b4e438a1a91f8e8bf31164390189288e1cdb..73190652c267bd61cddafa2ede59a3e98ac1f3ab 100644 --- a/drivers/media/v4l2-core/v4l2-mem2mem.c +++ b/drivers/media/v4l2-core/v4l2-mem2mem.c @@ -585,19 +585,14 @@ int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, } EXPORT_SYMBOL_GPL(v4l2_m2m_reqbufs); -int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, - struct v4l2_buffer *buf) +static void v4l2_m2m_adjust_mem_offset(struct vb2_queue *vq, + struct v4l2_buffer *buf) { - struct vb2_queue *vq; - int ret = 0; - unsigned int i; - - vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); - ret = vb2_querybuf(vq, buf); - /* Adjust MMAP memory offsets for the CAPTURE queue */ if (buf->memory == V4L2_MEMORY_MMAP && V4L2_TYPE_IS_CAPTURE(vq->type)) { if (V4L2_TYPE_IS_MULTIPLANAR(vq->type)) { + unsigned int i; + for (i = 0; i < buf->length; ++i) buf->m.planes[i].m.mem_offset += DST_QUEUE_OFF_BASE; @@ -605,8 +600,23 @@ int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, buf->m.offset += DST_QUEUE_OFF_BASE; } } +} - return ret; +int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, + struct v4l2_buffer *buf) +{ + struct vb2_queue *vq; + int ret; + + vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); + ret = vb2_querybuf(vq, buf); + if (ret) + return ret; + + /* Adjust MMAP memory offsets for the CAPTURE queue */ + v4l2_m2m_adjust_mem_offset(vq, buf); + + return 0; } EXPORT_SYMBOL_GPL(v4l2_m2m_querybuf); @@ -763,6 +773,9 @@ int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, if (ret) return ret; + /* Adjust MMAP memory offsets for the CAPTURE queue */ + v4l2_m2m_adjust_mem_offset(vq, buf); + /* * If the capture queue is streaming, but streaming hasn't started * on the device, but was asked to stop, mark the previously queued @@ -784,9 +797,17 @@ int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, struct v4l2_buffer *buf) { struct vb2_queue *vq; + int ret; vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); - return vb2_dqbuf(vq, buf, file->f_flags & O_NONBLOCK); + ret = vb2_dqbuf(vq, buf, file->f_flags & O_NONBLOCK); + if (ret) + return ret; + + /* Adjust MMAP memory offsets for the CAPTURE queue */ + v4l2_m2m_adjust_mem_offset(vq, buf); + + return 0; } EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf); @@ -795,9 +816,17 @@ int v4l2_m2m_prepare_buf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, { struct video_device *vdev = video_devdata(file); struct vb2_queue *vq; + int ret; vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); - return vb2_prepare_buf(vq, vdev->v4l2_dev->mdev, buf); + ret = vb2_prepare_buf(vq, vdev->v4l2_dev->mdev, buf); + if (ret) + return ret; + + /* Adjust MMAP memory offsets for the CAPTURE queue */ + v4l2_m2m_adjust_mem_offset(vq, buf); + + return 0; } EXPORT_SYMBOL_GPL(v4l2_m2m_prepare_buf); diff --git a/drivers/memory/emif.c b/drivers/memory/emif.c index ddb1879f07d3f7a9b657307719ca6bd05c9bdd92..5a059be3516c9f2c1082c05b5ebb7fef54204fe6 100644 --- a/drivers/memory/emif.c +++ b/drivers/memory/emif.c @@ -1403,7 +1403,7 @@ static struct emif_data *__init_or_module get_device_details( temp = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL); dev_info = devm_kzalloc(dev, sizeof(*dev_info), GFP_KERNEL); - if (!emif || !pd || !dev_info) { + if (!emif || !temp || !dev_info) { dev_err(dev, "%s:%d: allocation error\n", __func__, __LINE__); goto error; } @@ -1495,7 +1495,7 @@ static int __init_or_module emif_probe(struct platform_device *pdev) { struct emif_data *emif; struct resource *res; - int irq; + int irq, ret; if (pdev->dev.of_node) emif = of_get_memory_device_details(pdev->dev.of_node, &pdev->dev); @@ -1526,7 +1526,9 @@ static int __init_or_module emif_probe(struct platform_device *pdev) emif_onetime_settings(emif); emif_debugfs_init(emif); disable_and_clear_all_interrupts(emif); - setup_interrupts(emif, irq); + ret = setup_interrupts(emif, irq); + if (ret) + goto error; /* One-time actions taken on probing the first device */ if (!emif1) { diff --git a/drivers/memory/pl172.c b/drivers/memory/pl172.c index 575fadbffa3062ec74af5bb3b9a4300e07281858..9eb8cc7de494a8d6d558e17f3ae519635abdf7bc 100644 --- a/drivers/memory/pl172.c +++ b/drivers/memory/pl172.c @@ -273,14 +273,12 @@ static int pl172_probe(struct amba_device *adev, const struct amba_id *id) return ret; } -static int pl172_remove(struct amba_device *adev) +static void pl172_remove(struct amba_device *adev) { struct pl172_data *pl172 = amba_get_drvdata(adev); clk_disable_unprepare(pl172->clk); amba_release_regions(adev); - - return 0; } static const struct amba_id pl172_ids[] = { diff --git a/drivers/memory/pl353-smc.c b/drivers/memory/pl353-smc.c index cc01979780d87eb04fffdaae933d5ecd240bac16..b0b251bb207f3a68d8a8f1969567ac1275f0ce0e 100644 --- a/drivers/memory/pl353-smc.c +++ b/drivers/memory/pl353-smc.c @@ -427,14 +427,12 @@ static int pl353_smc_probe(struct amba_device *adev, const struct amba_id *id) return err; } -static int pl353_smc_remove(struct amba_device *adev) +static void pl353_smc_remove(struct amba_device *adev) { struct pl353_smc_data *pl353_smc = amba_get_drvdata(adev); clk_disable_unprepare(pl353_smc->memclk); clk_disable_unprepare(pl353_smc->aclk); - - return 0; } static const struct amba_id pl353_ids[] = { diff --git a/drivers/mfd/asic3.c b/drivers/mfd/asic3.c index a6bd2134cea2ae8c8a2720347b8523965e8e8cd4..14e4bbe6a9da3d007bdf44ad65ea3f4b44e343ad 100644 --- a/drivers/mfd/asic3.c +++ b/drivers/mfd/asic3.c @@ -914,14 +914,14 @@ static int __init asic3_mfd_probe(struct platform_device *pdev, ret = mfd_add_devices(&pdev->dev, pdev->id, &asic3_cell_ds1wm, 1, mem, asic->irq_base, NULL); if (ret < 0) - goto out; + goto out_unmap; } if (mem_sdio && (irq >= 0)) { ret = mfd_add_devices(&pdev->dev, pdev->id, &asic3_cell_mmc, 1, mem_sdio, irq, NULL); if (ret < 0) - goto out; + goto out_unmap; } ret = 0; @@ -935,8 +935,12 @@ static int __init asic3_mfd_probe(struct platform_device *pdev, ret = mfd_add_devices(&pdev->dev, 0, asic3_cell_leds, ASIC3_NUM_LEDS, NULL, 0, NULL); } + return ret; - out: +out_unmap: + if (asic->tmio_cnf) + iounmap(asic->tmio_cnf); +out: return ret; } diff --git a/drivers/mfd/mc13xxx-core.c b/drivers/mfd/mc13xxx-core.c index 1abe7432aad82a308d6b3d8f4c977e9a91964edd..e281a9202f110f3c5b98ba3cbf304bb12115e56e 100644 --- a/drivers/mfd/mc13xxx-core.c +++ b/drivers/mfd/mc13xxx-core.c @@ -323,8 +323,10 @@ int mc13xxx_adc_do_conversion(struct mc13xxx *mc13xxx, unsigned int mode, adc1 |= MC13783_ADC1_ATOX; dev_dbg(mc13xxx->dev, "%s: request irq\n", __func__); - mc13xxx_irq_request(mc13xxx, MC13XXX_IRQ_ADCDONE, + ret = mc13xxx_irq_request(mc13xxx, MC13XXX_IRQ_ADCDONE, mc13xxx_handler_adcdone, __func__, &adcdone_data); + if (ret) + goto out; mc13xxx_reg_write(mc13xxx, MC13XXX_ADC0, adc0); mc13xxx_reg_write(mc13xxx, MC13XXX_ADC1, adc1); diff --git a/drivers/misc/cardreader/alcor_pci.c b/drivers/misc/cardreader/alcor_pci.c index de6d44a158bbae69f22ed2deed3000928f7cb6fa..3f514d77a843f3c5bb13934343945dde101628c0 100644 --- a/drivers/misc/cardreader/alcor_pci.c +++ b/drivers/misc/cardreader/alcor_pci.c @@ -266,7 +266,7 @@ static int alcor_pci_probe(struct pci_dev *pdev, if (!priv) return -ENOMEM; - ret = ida_simple_get(&alcor_pci_idr, 0, 0, GFP_KERNEL); + ret = ida_alloc(&alcor_pci_idr, GFP_KERNEL); if (ret < 0) return ret; priv->id = ret; @@ -280,7 +280,8 @@ static int alcor_pci_probe(struct pci_dev *pdev, ret = pci_request_regions(pdev, DRV_NAME_ALCOR_PCI); if (ret) { dev_err(&pdev->dev, "Cannot request region\n"); - return -ENOMEM; + ret = -ENOMEM; + goto error_free_ida; } if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) { @@ -324,6 +325,8 @@ static int alcor_pci_probe(struct pci_dev *pdev, error_release_regions: pci_release_regions(pdev); +error_free_ida: + ida_free(&alcor_pci_idr, priv->id); return ret; } @@ -337,7 +340,7 @@ static void alcor_pci_remove(struct pci_dev *pdev) mfd_remove_devices(&pdev->dev); - ida_simple_remove(&alcor_pci_idr, priv->id); + ida_free(&alcor_pci_idr, priv->id); pci_release_regions(pdev); pci_set_drvdata(pdev, NULL); diff --git a/drivers/misc/habanalabs/common/debugfs.c b/drivers/misc/habanalabs/common/debugfs.c index 912ddfa360b1375e79d8b6d37e69bda169485ad5..9716b0728b306aeac90d9f50fb3e25ed28281a53 100644 --- a/drivers/misc/habanalabs/common/debugfs.c +++ b/drivers/misc/habanalabs/common/debugfs.c @@ -859,6 +859,8 @@ static ssize_t hl_set_power_state(struct file *f, const char __user *buf, pci_set_power_state(hdev->pdev, PCI_D0); pci_restore_state(hdev->pdev); rc = pci_enable_device(hdev->pdev); + if (rc < 0) + return rc; } else if (value == 2) { pci_save_state(hdev->pdev); pci_disable_device(hdev->pdev); diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c index f764684367153e004ebed4a154dd981c5862353a..954f7230b3886a48d0313cd82a22fcd7075ac825 100644 --- a/drivers/misc/kgdbts.c +++ b/drivers/misc/kgdbts.c @@ -1061,10 +1061,10 @@ static int kgdbts_option_setup(char *opt) { if (strlen(opt) >= MAX_CONFIG_LEN) { printk(KERN_ERR "kgdbts: config string too long\n"); - return -ENOSPC; + return 1; } strcpy(config, opt); - return 0; + return 1; } __setup("kgdbts=", kgdbts_option_setup); diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h index 67bb6a25fd0a020c55c310fcd171b1fc566982f9..d81d75a20b8f211f6fd312b4282d086a10cecc67 100644 --- a/drivers/misc/mei/hw-me-regs.h +++ b/drivers/misc/mei/hw-me-regs.h @@ -107,6 +107,7 @@ #define MEI_DEV_ID_ADP_S 0x7AE8 /* Alder Lake Point S */ #define MEI_DEV_ID_ADP_LP 0x7A60 /* Alder Lake Point LP */ #define MEI_DEV_ID_ADP_P 0x51E0 /* Alder Lake Point P */ +#define MEI_DEV_ID_ADP_N 0x54E0 /* Alder Lake Point N */ /* * MEI HW Section diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c index fee603039e872430736dedb51a934494d103d5b6..ca3067fa6f0e07df9e2d8ce3fc11ab0a53ef550c 100644 --- a/drivers/misc/mei/interrupt.c +++ b/drivers/misc/mei/interrupt.c @@ -427,31 +427,26 @@ int mei_irq_read_handler(struct mei_device *dev, list_for_each_entry(cl, &dev->file_list, link) { if (mei_cl_hbm_equal(cl, mei_hdr)) { cl_dbg(dev, cl, "got a message\n"); - break; + ret = mei_cl_irq_read_msg(cl, mei_hdr, meta_hdr, cmpl_list); + goto reset_slots; } } /* if no recipient cl was found we assume corrupted header */ - if (&cl->link == &dev->file_list) { - /* A message for not connected fixed address clients - * should be silently discarded - * On power down client may be force cleaned, - * silently discard such messages - */ - if (hdr_is_fixed(mei_hdr) || - dev->dev_state == MEI_DEV_POWER_DOWN) { - mei_irq_discard_msg(dev, mei_hdr, mei_hdr->length); - ret = 0; - goto reset_slots; - } - dev_err(dev->dev, "no destination client found 0x%08X\n", - dev->rd_msg_hdr[0]); - ret = -EBADMSG; - goto end; + /* A message for not connected fixed address clients + * should be silently discarded + * On power down client may be force cleaned, + * silently discard such messages + */ + if (hdr_is_fixed(mei_hdr) || + dev->dev_state == MEI_DEV_POWER_DOWN) { + mei_irq_discard_msg(dev, mei_hdr, mei_hdr->length); + ret = 0; + goto reset_slots; } - - ret = mei_cl_irq_read_msg(cl, mei_hdr, meta_hdr, cmpl_list); - + dev_err(dev->dev, "no destination client found 0x%08X\n", dev->rd_msg_hdr[0]); + ret = -EBADMSG; + goto end; reset_slots: /* reset the number of slots and header */ diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c index 3a45aaf002ac8523e3955e23e21603acfffeb280..a738253dbd056171fc8db0e16b60cbb3407450df 100644 --- a/drivers/misc/mei/pci-me.c +++ b/drivers/misc/mei/pci-me.c @@ -113,6 +113,7 @@ static const struct pci_device_id mei_me_pci_tbl[] = { {MEI_PCI_DEVICE(MEI_DEV_ID_ADP_S, MEI_ME_PCH15_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_ADP_LP, MEI_ME_PCH15_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_ADP_P, MEI_ME_PCH15_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_ADP_N, MEI_ME_PCH15_CFG)}, /* required last entry */ {0, } diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c index 864c8c205ff782908e6f137c82e13fdf3739a3a6..03e2f965a96a89aa6e4d5667221c68a1e8f74db1 100644 --- a/drivers/mmc/core/host.c +++ b/drivers/mmc/core/host.c @@ -513,6 +513,16 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev) EXPORT_SYMBOL(mmc_alloc_host); +static int mmc_validate_host_caps(struct mmc_host *host) +{ + if (host->caps & MMC_CAP_SDIO_IRQ && !host->ops->enable_sdio_irq) { + dev_warn(host->parent, "missing ->enable_sdio_irq() ops\n"); + return -EINVAL; + } + + return 0; +} + /** * mmc_add_host - initialise host hardware * @host: mmc host @@ -525,8 +535,9 @@ int mmc_add_host(struct mmc_host *host) { int err; - WARN_ON((host->caps & MMC_CAP_SDIO_IRQ) && - !host->ops->enable_sdio_irq); + err = mmc_validate_host_caps(host); + if (err) + return err; err = device_add(&host->class_dev); if (err) diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c index 90cd179625fc27be597ab4e4bcc21f2b626a9ead..647928ab00a30db117ce9a65d52a765cf364999a 100644 --- a/drivers/mmc/host/davinci_mmc.c +++ b/drivers/mmc/host/davinci_mmc.c @@ -1375,8 +1375,12 @@ static int davinci_mmcsd_suspend(struct device *dev) static int davinci_mmcsd_resume(struct device *dev) { struct mmc_davinci_host *host = dev_get_drvdata(dev); + int ret; + + ret = clk_enable(host->clk); + if (ret) + return ret; - clk_enable(host->clk); mmc_davinci_reset_ctrl(host, 0); return 0; diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c index b274083a6e63540f9bd6dc6cdd2fdf6f4e64d40e..091e0e051d109609f363bb102f806e4f6c28996a 100644 --- a/drivers/mmc/host/meson-gx-mmc.c +++ b/drivers/mmc/host/meson-gx-mmc.c @@ -173,6 +173,8 @@ struct meson_host { int irq; bool vqmmc_enabled; + bool needs_pre_post_req; + }; #define CMD_CFG_LENGTH_MASK GENMASK(8, 0) @@ -652,6 +654,8 @@ static void meson_mmc_request_done(struct mmc_host *mmc, struct meson_host *host = mmc_priv(mmc); host->cmd = NULL; + if (host->needs_pre_post_req) + meson_mmc_post_req(mmc, mrq, 0); mmc_request_done(host->mmc, mrq); } @@ -869,7 +873,7 @@ static int meson_mmc_validate_dram_access(struct mmc_host *mmc, struct mmc_data static void meson_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq) { struct meson_host *host = mmc_priv(mmc); - bool needs_pre_post_req = mrq->data && + host->needs_pre_post_req = mrq->data && !(mrq->data->host_cookie & SD_EMMC_PRE_REQ_DONE); /* @@ -885,22 +889,19 @@ static void meson_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq) } } - if (needs_pre_post_req) { + if (host->needs_pre_post_req) { meson_mmc_get_transfer_mode(mmc, mrq); if (!meson_mmc_desc_chain_mode(mrq->data)) - needs_pre_post_req = false; + host->needs_pre_post_req = false; } - if (needs_pre_post_req) + if (host->needs_pre_post_req) meson_mmc_pre_req(mmc, mrq); /* Stop execution */ writel(0, host->regs + SD_EMMC_START); meson_mmc_start_cmd(mmc, mrq->sbc ?: mrq->cmd); - - if (needs_pre_post_req) - meson_mmc_post_req(mmc, mrq, 0); } static void meson_mmc_read_resp(struct mmc_host *mmc, struct mmc_command *cmd) diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c index 9bde0def114b5714330632e5d7c08590accfd67b..b5684e5d79e60d3c67f18bf38353df68a4c01acc 100644 --- a/drivers/mmc/host/mmci.c +++ b/drivers/mmc/host/mmci.c @@ -2203,7 +2203,7 @@ static int mmci_probe(struct amba_device *dev, return ret; } -static int mmci_remove(struct amba_device *dev) +static void mmci_remove(struct amba_device *dev) { struct mmc_host *mmc = amba_get_drvdata(dev); @@ -2231,8 +2231,6 @@ static int mmci_remove(struct amba_device *dev) clk_disable_unprepare(host->clk); mmc_free_host(mmc); } - - return 0; } #ifdef CONFIG_PM diff --git a/drivers/mtd/nand/onenand/generic.c b/drivers/mtd/nand/onenand/generic.c index 8b6f4da5d72011b878732242d0041dc6b6c0b2aa..a4b8b65fe15f50faeaa1225203c52cd50f6a9288 100644 --- a/drivers/mtd/nand/onenand/generic.c +++ b/drivers/mtd/nand/onenand/generic.c @@ -53,7 +53,12 @@ static int generic_onenand_probe(struct platform_device *pdev) } info->onenand.mmcontrol = pdata ? pdata->mmcontrol : NULL; - info->onenand.irq = platform_get_irq(pdev, 0); + + err = platform_get_irq(pdev, 0); + if (err < 0) + goto out_iounmap; + + info->onenand.irq = err; info->mtd.dev.parent = &pdev->dev; info->mtd.priv = &info->onenand; diff --git a/drivers/mtd/nand/raw/atmel/nand-controller.c b/drivers/mtd/nand/raw/atmel/nand-controller.c index 8aab1017b460037d1ba7d5036a89bad20dce0e99..c048e826746a9d92cd2a9d455bd417f91da62630 100644 --- a/drivers/mtd/nand/raw/atmel/nand-controller.c +++ b/drivers/mtd/nand/raw/atmel/nand-controller.c @@ -2057,13 +2057,15 @@ static int atmel_nand_controller_init(struct atmel_nand_controller *nc, nc->mck = of_clk_get(dev->parent->of_node, 0); if (IS_ERR(nc->mck)) { dev_err(dev, "Failed to retrieve MCK clk\n"); - return PTR_ERR(nc->mck); + ret = PTR_ERR(nc->mck); + goto out_release_dma; } np = of_parse_phandle(dev->parent->of_node, "atmel,smc", 0); if (!np) { dev_err(dev, "Missing or invalid atmel,smc property\n"); - return -EINVAL; + ret = -EINVAL; + goto out_release_dma; } nc->smc = syscon_node_to_regmap(np); @@ -2071,10 +2073,16 @@ static int atmel_nand_controller_init(struct atmel_nand_controller *nc, if (IS_ERR(nc->smc)) { ret = PTR_ERR(nc->smc); dev_err(dev, "Could not get SMC regmap (err = %d)\n", ret); - return ret; + goto out_release_dma; } return 0; + +out_release_dma: + if (nc->dmac) + dma_release_channel(nc->dmac); + + return ret; } static int diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c index cb7631145700a56f700fa861d4ed42f88817fc50..92e8ca56f56653761a502b383fe8a8a86150a952 100644 --- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c +++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c @@ -646,6 +646,7 @@ static void gpmi_nfc_compute_timings(struct gpmi_nand_data *this, const struct nand_sdr_timings *sdr) { struct gpmi_nfc_hardware_timing *hw = &this->hw; + struct resources *r = &this->resources; unsigned int dll_threshold_ps = this->devdata->max_chain_delay; unsigned int period_ps, reference_period_ps; unsigned int data_setup_cycles, data_hold_cycles, addr_setup_cycles; @@ -669,6 +670,8 @@ static void gpmi_nfc_compute_timings(struct gpmi_nand_data *this, wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_NO_DELAY; } + hw->clk_rate = clk_round_rate(r->clock[0], hw->clk_rate); + /* SDR core timings are given in picoseconds */ period_ps = div_u64((u64)NSEC_PER_SEC * 1000, hw->clk_rate); diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c index 1f0d542d59230c06d5277f5741d66831d23958fb..c41c0ff611b1bf312310c5e66ed0c5b464d6cee6 100644 --- a/drivers/mtd/nand/raw/nand_base.c +++ b/drivers/mtd/nand/raw/nand_base.c @@ -297,16 +297,19 @@ static int nand_isbad_bbm(struct nand_chip *chip, loff_t ofs) * * Return: -EBUSY if the chip has been suspended, 0 otherwise */ -static int nand_get_device(struct nand_chip *chip) +static void nand_get_device(struct nand_chip *chip) { - mutex_lock(&chip->lock); - if (chip->suspended) { + /* Wait until the device is resumed. */ + while (1) { + mutex_lock(&chip->lock); + if (!chip->suspended) { + mutex_lock(&chip->controller->lock); + return; + } mutex_unlock(&chip->lock); - return -EBUSY; - } - mutex_lock(&chip->controller->lock); - return 0; + wait_event(chip->resume_wq, !chip->suspended); + } } /** @@ -531,9 +534,7 @@ static int nand_block_markbad_lowlevel(struct nand_chip *chip, loff_t ofs) nand_erase_nand(chip, &einfo, 0); /* Write bad block marker to OOB */ - ret = nand_get_device(chip); - if (ret) - return ret; + nand_get_device(chip); ret = nand_markbad_bbm(chip, ofs); nand_release_device(chip); @@ -3534,9 +3535,7 @@ static int nand_read_oob(struct mtd_info *mtd, loff_t from, ops->mode != MTD_OPS_RAW) return -ENOTSUPP; - ret = nand_get_device(chip); - if (ret) - return ret; + nand_get_device(chip); if (!ops->datbuf) ret = nand_do_read_oob(chip, from, ops); @@ -4119,13 +4118,11 @@ static int nand_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops) { struct nand_chip *chip = mtd_to_nand(mtd); - int ret; + int ret = 0; ops->retlen = 0; - ret = nand_get_device(chip); - if (ret) - return ret; + nand_get_device(chip); switch (ops->mode) { case MTD_OPS_PLACE_OOB: @@ -4181,9 +4178,7 @@ int nand_erase_nand(struct nand_chip *chip, struct erase_info *instr, return -EINVAL; /* Grab the lock and see if the device is available */ - ret = nand_get_device(chip); - if (ret) - return ret; + nand_get_device(chip); /* Shift to get first page */ page = (int)(instr->addr >> chip->page_shift); @@ -4270,7 +4265,7 @@ static void nand_sync(struct mtd_info *mtd) pr_debug("%s: called\n", __func__); /* Grab the lock and see if the device is available */ - WARN_ON(nand_get_device(chip)); + nand_get_device(chip); /* Release it and go back */ nand_release_device(chip); } @@ -4287,9 +4282,7 @@ static int nand_block_isbad(struct mtd_info *mtd, loff_t offs) int ret; /* Select the NAND device */ - ret = nand_get_device(chip); - if (ret) - return ret; + nand_get_device(chip); nand_select_target(chip, chipnr); @@ -4360,6 +4353,8 @@ static void nand_resume(struct mtd_info *mtd) __func__); } mutex_unlock(&chip->lock); + + wake_up_all(&chip->resume_wq); } /** @@ -5068,6 +5063,7 @@ static int nand_scan_ident(struct nand_chip *chip, unsigned int maxchips, chip->cur_cs = -1; mutex_init(&chip->lock); + init_waitqueue_head(&chip->resume_wq); /* Enforce the right timings for reset/detection */ chip->current_interface_config = nand_get_reset_interface_config(); diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c index 820b5c1c8e8e7cfb24813628e1d40c7edaed8a93..e2e70efc02fb7dd56b8937766d43a98180c3d3ed 100644 --- a/drivers/mtd/ubi/wl.c +++ b/drivers/mtd/ubi/wl.c @@ -968,11 +968,11 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, spin_lock(&ubi->wl_lock); ubi->move_from = ubi->move_to = NULL; ubi->move_to_put = ubi->wl_scheduled = 0; + wl_entry_destroy(ubi, e1); + wl_entry_destroy(ubi, e2); spin_unlock(&ubi->wl_lock); ubi_free_vid_buf(vidb); - wl_entry_destroy(ubi, e1); - wl_entry_destroy(ubi, e2); out_ro: ubi_ro_mode(ubi); @@ -1244,6 +1244,18 @@ int ubi_wl_put_peb(struct ubi_device *ubi, int vol_id, int lnum, retry: spin_lock(&ubi->wl_lock); e = ubi->lookuptbl[pnum]; + if (!e) { + /* + * This wl entry has been removed for some errors by other + * process (eg. wear leveling worker), corresponding process + * (except __erase_worker, which cannot concurrent with + * ubi_wl_put_peb) will set ubi ro_mode at the same time, + * just ignore this wl entry. + */ + spin_unlock(&ubi->wl_lock); + up_read(&ubi->fm_protect); + return 0; + } if (e == ubi->move_from) { /* * User is putting the physical eraseblock which was selected to diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c index eb7f76753c9c0dbdd2ca07985d9c407b433ab506..9f44e2e458df17e08b2ab9238d5dca39c52110bb 100644 --- a/drivers/net/arcnet/com20020-pci.c +++ b/drivers/net/arcnet/com20020-pci.c @@ -136,6 +136,9 @@ static int com20020pci_probe(struct pci_dev *pdev, return -ENOMEM; ci = (struct com20020_pci_card_info *)id->driver_data; + if (!ci) + return -EINVAL; + priv->ci = ci; mm = &ci->misc_map; diff --git a/drivers/net/bareudp.c b/drivers/net/bareudp.c index 39b128205f2551b97fb4213fe6127ea4fd3d4b85..53ef48588e59a1510ab5ac4d16a76f26d7728ca1 100644 --- a/drivers/net/bareudp.c +++ b/drivers/net/bareudp.c @@ -140,14 +140,14 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb) oiph = skb_network_header(skb); skb_reset_network_header(skb); - if (!IS_ENABLED(CONFIG_IPV6) || family == AF_INET) + if (!ipv6_mod_enabled() || family == AF_INET) err = IP_ECN_decapsulate(oiph, skb); else err = IP6_ECN_decapsulate(oiph, skb); if (unlikely(err)) { if (log_ecn_error) { - if (!IS_ENABLED(CONFIG_IPV6) || family == AF_INET) + if (!ipv6_mod_enabled() || family == AF_INET) net_info_ratelimited("non-ECT from %pI4 " "with TOS=%#x\n", &((struct iphdr *)oiph)->saddr, @@ -213,11 +213,12 @@ static struct socket *bareudp_create_sock(struct net *net, __be16 port) int err; memset(&udp_conf, 0, sizeof(udp_conf)); -#if IS_ENABLED(CONFIG_IPV6) - udp_conf.family = AF_INET6; -#else - udp_conf.family = AF_INET; -#endif + + if (ipv6_mod_enabled()) + udp_conf.family = AF_INET6; + else + udp_conf.family = AF_INET; + udp_conf.local_udp_port = port; /* Open UDP socket */ err = udp_sock_create(net, &udp_conf, &sock); @@ -246,12 +247,6 @@ static int bareudp_socket_create(struct bareudp_dev *bareudp, __be16 port) tunnel_cfg.encap_destroy = NULL; setup_udp_tunnel_sock(bareudp->net, sock, &tunnel_cfg); - /* As the setup_udp_tunnel_sock does not call udp_encap_enable if the - * socket type is v6 an explicit call to udp_encap_enable is needed. - */ - if (sock->sk->sk_family == AF_INET6) - udp_encap_enable(); - rcu_assign_pointer(bareudp->sock, sock); return 0; } @@ -445,7 +440,7 @@ static netdev_tx_t bareudp_xmit(struct sk_buff *skb, struct net_device *dev) } rcu_read_lock(); - if (IS_ENABLED(CONFIG_IPV6) && info->mode & IP_TUNNEL_INFO_IPV6) + if (ipv6_mod_enabled() && info->mode & IP_TUNNEL_INFO_IPV6) err = bareudp6_xmit_skb(skb, dev, bareudp, info); else err = bareudp_xmit_skb(skb, dev, bareudp, info); @@ -475,7 +470,7 @@ static int bareudp_fill_metadata_dst(struct net_device *dev, use_cache = ip_tunnel_dst_cache_usable(skb, info); - if (!IS_ENABLED(CONFIG_IPV6) || ip_tunnel_info_af(info) == AF_INET) { + if (!ipv6_mod_enabled() || ip_tunnel_info_af(info) == AF_INET) { struct rtable *rt; __be32 saddr; diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c index 19a7e4adb9338d945490fb68117221a23ba3529b..19a19a7b7deb87b3a73895f044f4cd0e387650bb 100644 --- a/drivers/net/can/m_can/m_can.c +++ b/drivers/net/can/m_can/m_can.c @@ -1491,8 +1491,6 @@ static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev) M_CAN_FIFO_DATA(i / 4), *(u32 *)(cf->data + i)); - can_put_echo_skb(skb, dev, 0); - if (cdev->can.ctrlmode & CAN_CTRLMODE_FD) { cccr = m_can_read(cdev, M_CAN_CCCR); cccr &= ~(CCCR_CMR_MASK << CCCR_CMR_SHIFT); @@ -1509,6 +1507,9 @@ static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev) m_can_write(cdev, M_CAN_CCCR, cccr); } m_can_write(cdev, M_CAN_TXBTIE, 0x1); + + can_put_echo_skb(skb, dev, 0); + m_can_write(cdev, M_CAN_TXBAR, 0x1); /* End of xmit function for version 3.0.x */ } else { diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c index de59dd6aad29918633ccfc4a237fccc26643b1d5..67f0f14e2bf4e1eadcd9409aa80d0a4ac6ece0dd 100644 --- a/drivers/net/can/rcar/rcar_canfd.c +++ b/drivers/net/can/rcar/rcar_canfd.c @@ -1598,15 +1598,15 @@ static int rcar_canfd_channel_probe(struct rcar_canfd_global *gpriv, u32 ch, netif_napi_add(ndev, &priv->napi, rcar_canfd_rx_poll, RCANFD_NAPI_WEIGHT); + spin_lock_init(&priv->tx_lock); + devm_can_led_init(ndev); + gpriv->ch[priv->channel] = priv; err = register_candev(ndev); if (err) { dev_err(&pdev->dev, "register_candev() failed, error %d\n", err); goto fail_candev; } - spin_lock_init(&priv->tx_lock); - devm_can_led_init(ndev); - gpriv->ch[priv->channel] = priv; dev_info(&pdev->dev, "device registered (channel %u)\n", priv->channel); return 0; diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c index abe00a085f6fcd872481441120aa020471c6fe80..189d226588133dcdf2d6442fecc33e2c3bf496c9 100644 --- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c @@ -2578,7 +2578,7 @@ mcp251xfd_register_get_dev_id(const struct mcp251xfd_priv *priv, out_kfree_buf_rx: kfree(buf_rx); - return 0; + return err; } #define MCP251XFD_QUIRK_ACTIVE(quirk) \ diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c index 3f759fae81fe2bdc8cbb21dfcd6e4fe280f1806d..e023c401f4f77387826eff3f3f71a6fb862a89f5 100644 --- a/drivers/net/can/usb/gs_usb.c +++ b/drivers/net/can/usb/gs_usb.c @@ -190,8 +190,8 @@ struct gs_can { struct gs_usb { struct gs_can *canch[GS_MAX_INTF]; struct usb_anchor rx_submitted; - atomic_t active_channels; struct usb_device *udev; + u8 active_channels; }; /* 'allocate' a tx context. @@ -588,7 +588,7 @@ static int gs_can_open(struct net_device *netdev) if (rc) return rc; - if (atomic_add_return(1, &parent->active_channels) == 1) { + if (!parent->active_channels) { for (i = 0; i < GS_MAX_RX_URBS; i++) { struct urb *urb; u8 *buf; @@ -689,6 +689,7 @@ static int gs_can_open(struct net_device *netdev) dev->can.state = CAN_STATE_ERROR_ACTIVE; + parent->active_channels++; if (!(dev->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)) netif_start_queue(netdev); @@ -704,7 +705,8 @@ static int gs_can_close(struct net_device *netdev) netif_stop_queue(netdev); /* Stop polling */ - if (atomic_dec_and_test(&parent->active_channels)) + parent->active_channels--; + if (!parent->active_channels) usb_kill_anchored_urbs(&parent->rx_submitted); /* Stop sending URBs */ @@ -983,8 +985,6 @@ static int gs_usb_probe(struct usb_interface *intf, init_usb_anchor(&dev->rx_submitted); - atomic_set(&dev->active_channels, 0); - usb_set_intfdata(intf, dev); dev->udev = interface_to_usbdev(intf); diff --git a/drivers/net/can/usb/mcba_usb.c b/drivers/net/can/usb/mcba_usb.c index 756a343e3cf55f7592e260fa703675762e4b22ab..21063335ab599cde2e410e7ca0389f6b3ae1bdb6 100644 --- a/drivers/net/can/usb/mcba_usb.c +++ b/drivers/net/can/usb/mcba_usb.c @@ -33,10 +33,6 @@ #define MCBA_USB_RX_BUFF_SIZE 64 #define MCBA_USB_TX_BUFF_SIZE (sizeof(struct mcba_usb_msg)) -/* MCBA endpoint numbers */ -#define MCBA_USB_EP_IN 1 -#define MCBA_USB_EP_OUT 1 - /* Microchip command id */ #define MBCA_CMD_RECEIVE_MESSAGE 0xE3 #define MBCA_CMD_I_AM_ALIVE_FROM_CAN 0xF5 @@ -84,6 +80,8 @@ struct mcba_priv { atomic_t free_ctx_cnt; void *rxbuf[MCBA_MAX_RX_URBS]; dma_addr_t rxbuf_dma[MCBA_MAX_RX_URBS]; + int rx_pipe; + int tx_pipe; }; /* CAN frame */ @@ -272,10 +270,8 @@ static netdev_tx_t mcba_usb_xmit(struct mcba_priv *priv, memcpy(buf, usb_msg, MCBA_USB_TX_BUFF_SIZE); - usb_fill_bulk_urb(urb, priv->udev, - usb_sndbulkpipe(priv->udev, MCBA_USB_EP_OUT), buf, - MCBA_USB_TX_BUFF_SIZE, mcba_usb_write_bulk_callback, - ctx); + usb_fill_bulk_urb(urb, priv->udev, priv->tx_pipe, buf, MCBA_USB_TX_BUFF_SIZE, + mcba_usb_write_bulk_callback, ctx); urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; usb_anchor_urb(urb, &priv->tx_submitted); @@ -610,7 +606,7 @@ static void mcba_usb_read_bulk_callback(struct urb *urb) resubmit_urb: usb_fill_bulk_urb(urb, priv->udev, - usb_rcvbulkpipe(priv->udev, MCBA_USB_EP_OUT), + priv->rx_pipe, urb->transfer_buffer, MCBA_USB_RX_BUFF_SIZE, mcba_usb_read_bulk_callback, priv); @@ -655,7 +651,7 @@ static int mcba_usb_start(struct mcba_priv *priv) urb->transfer_dma = buf_dma; usb_fill_bulk_urb(urb, priv->udev, - usb_rcvbulkpipe(priv->udev, MCBA_USB_EP_IN), + priv->rx_pipe, buf, MCBA_USB_RX_BUFF_SIZE, mcba_usb_read_bulk_callback, priv); urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; @@ -809,6 +805,13 @@ static int mcba_usb_probe(struct usb_interface *intf, struct mcba_priv *priv; int err; struct usb_device *usbdev = interface_to_usbdev(intf); + struct usb_endpoint_descriptor *in, *out; + + err = usb_find_common_endpoints(intf->cur_altsetting, &in, &out, NULL, NULL); + if (err) { + dev_err(&intf->dev, "Can't find endpoints\n"); + return err; + } netdev = alloc_candev(sizeof(struct mcba_priv), MCBA_MAX_TX_URBS); if (!netdev) { @@ -854,6 +857,9 @@ static int mcba_usb_probe(struct usb_interface *intf, goto cleanup_free_candev; } + priv->rx_pipe = usb_rcvbulkpipe(priv->udev, in->bEndpointAddress); + priv->tx_pipe = usb_sndbulkpipe(priv->udev, out->bEndpointAddress); + devm_can_led_init(netdev); /* Start USB dev only if we have successfully registered CAN device */ diff --git a/drivers/net/can/vxcan.c b/drivers/net/can/vxcan.c index 7000c6cd1e48bc5d8fbee7bd3b69523cc69f9186..282c53ef76d233535938091043c3d52072c36e9b 100644 --- a/drivers/net/can/vxcan.c +++ b/drivers/net/can/vxcan.c @@ -148,7 +148,7 @@ static void vxcan_setup(struct net_device *dev) dev->hard_header_len = 0; dev->addr_len = 0; dev->tx_queue_len = 0; - dev->flags = (IFF_NOARP|IFF_ECHO); + dev->flags = IFF_NOARP; dev->netdev_ops = &vxcan_netdev_ops; dev->needs_free_netdev = true; diff --git a/drivers/net/dsa/bcm_sf2_cfp.c b/drivers/net/dsa/bcm_sf2_cfp.c index d82cee5d92022be1ee39e1ab1a314c2352cf1f3a..cbf44fc7d03aa4f650c8b22e8d332096a13e6220 100644 --- a/drivers/net/dsa/bcm_sf2_cfp.c +++ b/drivers/net/dsa/bcm_sf2_cfp.c @@ -567,14 +567,14 @@ static void bcm_sf2_cfp_slice_ipv6(struct bcm_sf2_priv *priv, static struct cfp_rule *bcm_sf2_cfp_rule_find(struct bcm_sf2_priv *priv, int port, u32 location) { - struct cfp_rule *rule = NULL; + struct cfp_rule *rule; list_for_each_entry(rule, &priv->cfp.rules_list, next) { if (rule->port == port && rule->fs.location == location) - break; + return rule; } - return rule; + return NULL; } static int bcm_sf2_cfp_rule_cmp(struct bcm_sf2_priv *priv, int port, diff --git a/drivers/net/dsa/microchip/ksz8795_spi.c b/drivers/net/dsa/microchip/ksz8795_spi.c index 8b00f8e6c02f4f2a2fbc545c026cb2daaa48528c..5639c5c59e255e6f639bad75a722d842f723c51c 100644 --- a/drivers/net/dsa/microchip/ksz8795_spi.c +++ b/drivers/net/dsa/microchip/ksz8795_spi.c @@ -86,12 +86,23 @@ static const struct of_device_id ksz8795_dt_ids[] = { }; MODULE_DEVICE_TABLE(of, ksz8795_dt_ids); +static const struct spi_device_id ksz8795_spi_ids[] = { + { "ksz8765" }, + { "ksz8794" }, + { "ksz8795" }, + { "ksz8863" }, + { "ksz8873" }, + { }, +}; +MODULE_DEVICE_TABLE(spi, ksz8795_spi_ids); + static struct spi_driver ksz8795_spi_driver = { .driver = { .name = "ksz8795-switch", .owner = THIS_MODULE, .of_match_table = of_match_ptr(ksz8795_dt_ids), }, + .id_table = ksz8795_spi_ids, .probe = ksz8795_spi_probe, .remove = ksz8795_spi_remove, .shutdown = ksz8795_spi_shutdown, diff --git a/drivers/net/dsa/microchip/ksz9477_spi.c b/drivers/net/dsa/microchip/ksz9477_spi.c index 1142768969c205b9c4f5a25bfc56bd452c4dd283..9bda83d063e8ec652434b02aca2047d165f216de 100644 --- a/drivers/net/dsa/microchip/ksz9477_spi.c +++ b/drivers/net/dsa/microchip/ksz9477_spi.c @@ -88,12 +88,24 @@ static const struct of_device_id ksz9477_dt_ids[] = { }; MODULE_DEVICE_TABLE(of, ksz9477_dt_ids); +static const struct spi_device_id ksz9477_spi_ids[] = { + { "ksz9477" }, + { "ksz9897" }, + { "ksz9893" }, + { "ksz9563" }, + { "ksz8563" }, + { "ksz9567" }, + { }, +}; +MODULE_DEVICE_TABLE(spi, ksz9477_spi_ids); + static struct spi_driver ksz9477_spi_driver = { .driver = { .name = "ksz9477-switch", .owner = THIS_MODULE, .of_match_table = of_match_ptr(ksz9477_dt_ids), }, + .id_table = ksz9477_spi_ids, .probe = ksz9477_spi_probe, .remove = ksz9477_spi_remove, .shutdown = ksz9477_spi_shutdown, diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c index 1f642fdbf214c80864cdb46a16f3ab3a674dc684..5ee8809bc27112cfd9da1430531abfc6da73f592 100644 --- a/drivers/net/dsa/mt7530.c +++ b/drivers/net/dsa/mt7530.c @@ -2342,7 +2342,7 @@ mt753x_phylink_validate(struct dsa_switch *ds, int port, phylink_set_port_modes(mask); - if (state->interface != PHY_INTERFACE_MODE_TRGMII || + if (state->interface != PHY_INTERFACE_MODE_TRGMII && !phy_interface_mode_is_8023z(state->interface)) { phylink_set(mask, 10baseT_Half); phylink_set(mask, 10baseT_Full); diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 1992be77522ac0db4046e74b5e29badbba1718d9..e79a808375fc862fd886a1d57c97a92a6d1bc7ac 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -3297,6 +3297,7 @@ static const struct mv88e6xxx_ops mv88e6097_ops = { .port_set_link = mv88e6xxx_port_set_link, .port_set_speed_duplex = mv88e6185_port_set_speed_duplex, .port_tag_remap = mv88e6095_port_tag_remap, + .port_set_policy = mv88e6352_port_set_policy, .port_set_frame_mode = mv88e6351_port_set_frame_mode, .port_set_egress_floods = mv88e6352_port_set_egress_floods, .port_set_ether_type = mv88e6351_port_set_ether_type, diff --git a/drivers/net/ethernet/8390/mcf8390.c b/drivers/net/ethernet/8390/mcf8390.c index 4ad8031ab6695707bd275a4e0d66b65dfe5c0d95..065fdbe66c425de384dfaeccef0182f7eaae5f46 100644 --- a/drivers/net/ethernet/8390/mcf8390.c +++ b/drivers/net/ethernet/8390/mcf8390.c @@ -406,12 +406,12 @@ static int mcf8390_init(struct net_device *dev) static int mcf8390_probe(struct platform_device *pdev) { struct net_device *dev; - struct resource *mem, *irq; + struct resource *mem; resource_size_t msize; - int ret; + int ret, irq; - irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); - if (irq == NULL) { + irq = platform_get_irq(pdev, 0); + if (irq < 0) { dev_err(&pdev->dev, "no IRQ specified?\n"); return -ENXIO; } @@ -434,7 +434,7 @@ static int mcf8390_probe(struct platform_device *pdev) SET_NETDEV_DEV(dev, &pdev->dev); platform_set_drvdata(pdev, dev); - dev->irq = irq->start; + dev->irq = irq; dev->base_addr = mem->start; ret = mcf8390_init(dev); diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c index 5f1fc6582d74a2b624ac3351b6e0ec41f400bc11..78c7cbc372b0559c1aaf2a95bb9ac84c9dba860a 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c @@ -696,6 +696,12 @@ static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring, buf_pool->rx_skb[skb_index] = NULL; datalen = xgene_enet_get_data_len(le64_to_cpu(raw_desc->m1)); + + /* strip off CRC as HW isn't doing this */ + nv = GET_VAL(NV, le64_to_cpu(raw_desc->m0)); + if (!nv) + datalen -= 4; + skb_put(skb, datalen); prefetch(skb->data - NET_IP_ALIGN); skb->protocol = eth_type_trans(skb, ndev); @@ -717,12 +723,8 @@ static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring, } } - nv = GET_VAL(NV, le64_to_cpu(raw_desc->m0)); - if (!nv) { - /* strip off CRC as HW isn't doing this */ - datalen -= 4; + if (!nv) goto skip_jumbo; - } slots = page_pool->slots - 1; head = page_pool->head; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index bb3ba614fb17497d62566d41026997db23473a26..2a61229d3f9762cf0b3d389d1a3916ad0555c1da 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@ -2534,6 +2534,4 @@ void bnx2x_register_phc(struct bnx2x *bp); * Meant for implicit re-load flows. */ int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp); -int bnx2x_init_firmware(struct bnx2x *bp); -void bnx2x_release_firmware(struct bnx2x *bp); #endif /* bnx2x.h */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 41ebbb2c7d3ac3c8d1aa172d9a2a874e6f51b8d7..198e041d841091d5d6bcbe76b0a858f6de715de0 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -2363,24 +2363,30 @@ int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err) /* is another pf loaded on this engine? */ if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP && load_code != FW_MSG_CODE_DRV_LOAD_COMMON) { - /* build my FW version dword */ - u32 my_fw = (bp->fw_major) + (bp->fw_minor << 8) + - (bp->fw_rev << 16) + (bp->fw_eng << 24); + u8 loaded_fw_major, loaded_fw_minor, loaded_fw_rev, loaded_fw_eng; + u32 loaded_fw; /* read loaded FW from chip */ - u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM); + loaded_fw = REG_RD(bp, XSEM_REG_PRAM); - DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x\n", - loaded_fw, my_fw); + loaded_fw_major = loaded_fw & 0xff; + loaded_fw_minor = (loaded_fw >> 8) & 0xff; + loaded_fw_rev = (loaded_fw >> 16) & 0xff; + loaded_fw_eng = (loaded_fw >> 24) & 0xff; + + DP(BNX2X_MSG_SP, "loaded fw 0x%x major 0x%x minor 0x%x rev 0x%x eng 0x%x\n", + loaded_fw, loaded_fw_major, loaded_fw_minor, loaded_fw_rev, loaded_fw_eng); /* abort nic load if version mismatch */ - if (my_fw != loaded_fw) { + if (loaded_fw_major != BCM_5710_FW_MAJOR_VERSION || + loaded_fw_minor != BCM_5710_FW_MINOR_VERSION || + loaded_fw_eng != BCM_5710_FW_ENGINEERING_VERSION || + loaded_fw_rev < BCM_5710_FW_REVISION_VERSION_V15) { if (print_err) - BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. Aborting\n", - loaded_fw, my_fw); + BNX2X_ERR("loaded FW incompatible. Aborting\n"); else - BNX2X_DEV_INFO("bnx2x with FW %x was already loaded which mismatches my %x FW, possibly due to MF UNDI\n", - loaded_fw, my_fw); + BNX2X_DEV_INFO("loaded FW incompatible, possibly due to MF UNDI\n"); + return -EBUSY; } } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 9a86367a26369ba267d8c0af906d4e134e262832..6333471916be10ff9cb601f1a9ce9404e72a7a12 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -100,6 +100,9 @@ MODULE_LICENSE("GPL"); MODULE_FIRMWARE(FW_FILE_NAME_E1); MODULE_FIRMWARE(FW_FILE_NAME_E1H); MODULE_FIRMWARE(FW_FILE_NAME_E2); +MODULE_FIRMWARE(FW_FILE_NAME_E1_V15); +MODULE_FIRMWARE(FW_FILE_NAME_E1H_V15); +MODULE_FIRMWARE(FW_FILE_NAME_E2_V15); int bnx2x_num_queues; module_param_named(num_queues, bnx2x_num_queues, int, 0444); @@ -12363,15 +12366,6 @@ static int bnx2x_init_bp(struct bnx2x *bp) bnx2x_read_fwinfo(bp); - if (IS_PF(bp)) { - rc = bnx2x_init_firmware(bp); - - if (rc) { - bnx2x_free_mem_bp(bp); - return rc; - } - } - func = BP_FUNC(bp); /* need to reset chip if undi was active */ @@ -12384,7 +12378,6 @@ static int bnx2x_init_bp(struct bnx2x *bp) rc = bnx2x_prev_unload(bp); if (rc) { - bnx2x_release_firmware(bp); bnx2x_free_mem_bp(bp); return rc; } @@ -13466,7 +13459,7 @@ do { \ (u8 *)bp->arr, len); \ } while (0) -int bnx2x_init_firmware(struct bnx2x *bp) +static int bnx2x_init_firmware(struct bnx2x *bp) { const char *fw_file_name, *fw_file_name_v15; struct bnx2x_fw_file_hdr *fw_hdr; @@ -13566,7 +13559,7 @@ int bnx2x_init_firmware(struct bnx2x *bp) return rc; } -void bnx2x_release_firmware(struct bnx2x *bp) +static void bnx2x_release_firmware(struct bnx2x *bp) { kfree(bp->init_ops_offsets); kfree(bp->init_ops); @@ -14083,7 +14076,6 @@ static int bnx2x_init_one(struct pci_dev *pdev, return 0; init_one_freemem: - bnx2x_release_firmware(bp); bnx2x_free_mem_bp(bp); init_one_exit: diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index f12a01ba86cf407ad8bf48595c20f0a22b7d4b2f..66f9d9b3de0a72a85c018437b27d008f14bb0fa2 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -1934,6 +1934,9 @@ static int bnxt_get_fecparam(struct net_device *dev, case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE: fec->active_fec |= ETHTOOL_FEC_LLRS; break; + case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE: + fec->active_fec |= ETHTOOL_FEC_OFF; + break; } return 0; } diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 8c221666c9726d6e72a0df780cb3c64ff601ea74..d676e59eb82d20701603d480b831534e8ffa3421 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -76,7 +76,7 @@ static inline void bcmgenet_writel(u32 value, void __iomem *offset) if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) __raw_writel(value, offset); else - writel_relaxed(value, offset); + writel(value, offset); } static inline u32 bcmgenet_readl(void __iomem *offset) @@ -84,7 +84,7 @@ static inline u32 bcmgenet_readl(void __iomem *offset) if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) return __raw_readl(offset); else - return readl_relaxed(offset); + return readl(offset); } static inline void dmadesc_set_length_status(struct bcmgenet_priv *priv, @@ -2243,8 +2243,10 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring, dma_length_status = status->length_status; if (dev->features & NETIF_F_RXCSUM) { rx_csum = (__force __be16)(status->rx_csum & 0xffff); - skb->csum = (__force __wsum)ntohs(rx_csum); - skb->ip_summed = CHECKSUM_COMPLETE; + if (rx_csum) { + skb->csum = (__force __wsum)ntohs(rx_csum); + skb->ip_summed = CHECKSUM_COMPLETE; + } } /* DMA flags and length are still valid no matter how diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c index e84ad587fb2141c72408f413724c432d84717786..2c2a56d5a0a1a87e41eec62dd5b1bbd1abbdec37 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c @@ -41,6 +41,13 @@ void bcmgenet_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct bcmgenet_priv *priv = netdev_priv(dev); + struct device *kdev = &priv->pdev->dev; + + if (!device_can_wakeup(kdev)) { + wol->supported = 0; + wol->wolopts = 0; + return; + } wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER; wol->wolopts = priv->wolopts; diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index b0631495f8ef87bb8da190fcf77450e62ebe38b5..78c6d133f54fad5d759b57adfe30ce256feb2cc7 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -1448,7 +1448,14 @@ static int macb_poll(struct napi_struct *napi, int budget) if (work_done < budget) { napi_complete_done(napi, work_done); - /* Packets received while interrupts were disabled */ + /* RSR bits only seem to propagate to raise interrupts when + * interrupts are enabled at the time, so if bits are already + * set due to packets received while interrupts were disabled, + * they will not cause another interrupt to be generated when + * interrupts are re-enabled. + * Check for this case here. This has been seen to happen + * around 30% of the time under heavy network load. + */ status = macb_readl(bp, RSR); if (status) { if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) @@ -1456,6 +1463,22 @@ static int macb_poll(struct napi_struct *napi, int budget) napi_reschedule(napi); } else { queue_writel(queue, IER, bp->rx_intr_mask); + + /* In rare cases, packets could have been received in + * the window between the check above and re-enabling + * interrupts. Therefore, a double-check is required + * to avoid losing a wakeup. This can potentially race + * with the interrupt handler doing the same actions + * if an interrupt is raised just after enabling them, + * but this should be harmless. + */ + status = macb_readl(bp, RSR); + if (unlikely(status)) { + queue_writel(queue, IDR, bp->rx_intr_mask); + if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) + queue_writel(queue, ISR, MACB_BIT(RCOMP)); + napi_schedule(napi); + } } } diff --git a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c index 7ff31d1026fb27a4edc8af073b36927c675c8fe7..e0d34e64fc6cb72d396a6505c5c4028b2cf81a19 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c @@ -3678,6 +3678,8 @@ int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai, MAC_STATS_ACCUM_SECS : (MAC_STATS_ACCUM_SECS * 10); adapter->params.pci.vpd_cap_addr = pci_find_capability(adapter->pdev, PCI_CAP_ID_VPD); + if (!adapter->params.pci.vpd_cap_addr) + return -ENODEV; ret = get_vpd_params(adapter, &adapter->params.vpd); if (ret < 0) return ret; diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c index 37f9554618e9b04cc49d9fb8b489540d7ece80f6..6571107bf291cd9d15df78790767ee67c787a099 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c @@ -657,7 +657,10 @@ static int enetc_get_ts_info(struct net_device *ndev, #ifdef CONFIG_FSL_ENETC_PTP_CLOCK info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE | SOF_TIMESTAMPING_RX_HARDWARE | - SOF_TIMESTAMPING_RAW_HARDWARE; + SOF_TIMESTAMPING_RAW_HARDWARE | + SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE; info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c index ff756265d58faf2e2d5af0e05e7a41d927ee8745..9a2c16d69e2c1ae7657811cd6060d4187ee719b7 100644 --- a/drivers/net/ethernet/freescale/gianfar_ethtool.c +++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c @@ -1464,6 +1464,7 @@ static int gfar_get_ts_info(struct net_device *dev, ptp_node = of_find_compatible_node(NULL, NULL, "fsl,etsec-ptp"); if (ptp_node) { ptp_dev = of_find_device_by_node(ptp_node); + of_node_put(ptp_node); if (ptp_dev) ptp = platform_get_drvdata(ptp_dev); } diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h index b668df6193be4ede325b6baf9101cdea4f95f160..7d4ae467f3ad413c42f39f6b05db3a4ab0d2475a 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h +++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h @@ -46,6 +46,7 @@ enum HCLGE_MBX_OPCODE { HCLGE_MBX_PUSH_PROMISC_INFO, /* (PF -> VF) push vf promisc info */ HCLGE_MBX_VF_UNINIT, /* (VF -> PF) vf is unintializing */ HCLGE_MBX_HANDLE_VF_TBL, /* (VF -> PF) store/clear hw table */ + HCLGE_MBX_GET_RING_VECTOR_MAP, /* (VF -> PF) get ring-to-vector map */ HCLGE_MBX_GET_VF_FLR_STATUS = 200, /* (M7 -> PF) get vf flr status */ HCLGE_MBX_PUSH_LINK_STATUS, /* (M7 -> PF) get port link status */ @@ -92,8 +93,8 @@ struct hclge_ring_chain_param { struct hclge_basic_info { u8 hw_tc_map; u8 rsv; - u16 mbx_api_version; - u32 pf_caps; + __le16 mbx_api_version; + __le32 pf_caps; }; struct hclgevf_mbx_resp_status { @@ -134,11 +135,20 @@ struct hclge_vf_to_pf_msg { }; struct hclge_pf_to_vf_msg { - u16 code; - u16 vf_mbx_msg_code; - u16 vf_mbx_msg_subcode; - u16 resp_status; - u8 resp_data[HCLGE_MBX_MAX_RESP_DATA_SIZE]; + __le16 code; + union { + /* used for mbx response */ + struct { + __le16 vf_mbx_msg_code; + __le16 vf_mbx_msg_subcode; + __le16 resp_status; + u8 resp_data[HCLGE_MBX_MAX_RESP_DATA_SIZE]; + }; + /* used for general mbx */ + struct { + u8 msg_data[HCLGE_MBX_MAX_MSG_SIZE]; + }; + }; }; struct hclge_mbx_vf_to_pf_cmd { @@ -148,7 +158,7 @@ struct hclge_mbx_vf_to_pf_cmd { u8 rsv1[1]; u8 msg_len; u8 rsv2; - u16 match_id; + __le16 match_id; struct hclge_vf_to_pf_msg msg; }; @@ -159,7 +169,7 @@ struct hclge_mbx_pf_to_vf_cmd { u8 rsv[3]; u8 msg_len; u8 rsv1; - u16 match_id; + __le16 match_id; struct hclge_pf_to_vf_msg msg; }; @@ -169,6 +179,49 @@ struct hclge_vf_rst_cmd { u8 rsv[22]; }; +#pragma pack(1) +struct hclge_mbx_link_status { + __le16 link_status; + __le32 speed; + __le16 duplex; + u8 flag; +}; + +struct hclge_mbx_link_mode { + __le16 idx; + __le64 link_mode; +}; + +struct hclge_mbx_port_base_vlan { + __le16 state; + __le16 vlan_proto; + __le16 qos; + __le16 vlan_tag; +}; + +struct hclge_mbx_vf_queue_info { + __le16 num_tqps; + __le16 rss_size; + __le16 rx_buf_len; +}; + +struct hclge_mbx_vf_queue_depth { + __le16 num_tx_desc; + __le16 num_rx_desc; +}; + +struct hclge_mbx_vlan_filter { + u8 is_kill; + __le16 vlan_id; + __le16 proto; +}; + +struct hclge_mbx_mtu_info { + __le32 mtu; +}; + +#pragma pack() + /* used by VF to store the received Async responses from PF */ struct hclgevf_mbx_arq_ring { #define HCLGE_MBX_MAX_ARQ_MSG_SIZE 8 @@ -177,7 +230,7 @@ struct hclgevf_mbx_arq_ring { u32 head; u32 tail; atomic_t count; - u16 msg_q[HCLGE_MBX_MAX_ARQ_MSG_NUM][HCLGE_MBX_MAX_ARQ_MSG_SIZE]; + __le16 msg_q[HCLGE_MBX_MAX_ARQ_MSG_NUM][HCLGE_MBX_MAX_ARQ_MSG_SIZE]; }; #define hclge_mbx_ring_ptr_move_crq(crq) \ diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index 6618ab32ab25d3bb6e18bdaed57fd094bf94a060..1eaea162d00e4477bd0302f71bf31cf14fdbb914 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -95,6 +95,7 @@ enum HNAE3_DEV_CAP_BITS { HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, HNAE3_DEV_SUPPORT_MC_MAC_MNG_B, + HNAE3_DEV_SUPPORT_CQ_B, }; #define hnae3_dev_fd_supported(hdev) \ @@ -154,6 +155,9 @@ enum HNAE3_DEV_CAP_BITS { #define hnae3_ae_dev_mc_mac_mng_supported(ae_dev) \ test_bit(HNAE3_DEV_SUPPORT_MC_MAC_MNG_B, (ae_dev)->caps) +#define hnae3_ae_dev_cq_supported(ae_dev) \ + test_bit(HNAE3_DEV_SUPPORT_CQ_B, (ae_dev)->caps) + enum HNAE3_PF_CAP_BITS { HNAE3_PF_SUPPORT_VLAN_FLTR_MDF_B = 0, }; @@ -764,6 +768,7 @@ struct hnae3_tc_info { u8 prio_tc[HNAE3_MAX_USER_PRIO]; /* TC indexed by prio */ u16 tqp_count[HNAE3_MAX_TC]; u16 tqp_offset[HNAE3_MAX_TC]; + u8 max_tc; /* Total number of TCs */ u8 num_tc; /* Total number of enabled TCs */ bool mqprio_active; }; @@ -845,6 +850,7 @@ struct hnae3_handle { struct dentry *hnae3_dbgfs; /* protects concurrent contention between debugfs commands */ struct mutex dbgfs_lock; + char **dbgfs_buf; /* Network interface message level enabled bits */ u32 msg_enable; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c index c15ca710dabb826c0d3cb2f2d8fdb61812a207d8..c8b151d29f53ba5df2e269937f25a642951db457 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c @@ -149,6 +149,7 @@ static const struct hclge_comm_caps_bit_map hclge_pf_cmd_caps[] = { {HCLGE_COMM_CAP_PORT_VLAN_BYPASS_B, HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B}, {HCLGE_COMM_CAP_PORT_VLAN_BYPASS_B, HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B}, + {HCLGE_COMM_CAP_CQ_B, HNAE3_DEV_SUPPORT_CQ_B}, }; static const struct hclge_comm_caps_bit_map hclge_vf_cmd_caps[] = { @@ -160,6 +161,7 @@ static const struct hclge_comm_caps_bit_map hclge_vf_cmd_caps[] = { {HCLGE_COMM_CAP_QB_B, HNAE3_DEV_SUPPORT_QB_B}, {HCLGE_COMM_CAP_TX_PUSH_B, HNAE3_DEV_SUPPORT_TX_PUSH_B}, {HCLGE_COMM_CAP_RXD_ADV_LAYOUT_B, HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B}, + {HCLGE_COMM_CAP_CQ_B, HNAE3_DEV_SUPPORT_CQ_B}, }; static void diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h index 876650eddac4242872ee66b76933ceeb7e3bbbe8..7a7d4cf9bf35d5c45e8b479633e3b2f040a78a3e 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h @@ -338,6 +338,7 @@ enum HCLGE_COMM_CAP_BITS { HCLGE_COMM_CAP_PAUSE_B = 14, HCLGE_COMM_CAP_RXD_ADV_LAYOUT_B = 15, HCLGE_COMM_CAP_PORT_VLAN_BYPASS_B = 17, + HCLGE_COMM_CAP_CQ_B = 18, }; enum HCLGE_COMM_API_CAP_BITS { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.h b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.h index aa1d7a6ff4ca20030d14b41397f1b1111cc24479..946d166a452db20bde87d504dadbc407296b1475 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.h @@ -106,7 +106,7 @@ int hclge_comm_parse_rss_hfunc(struct hclge_comm_rss_cfg *rss_cfg, void hclge_comm_get_rss_hash_info(struct hclge_comm_rss_cfg *rss_cfg, u8 *key, u8 *hfunc); void hclge_comm_get_rss_indir_tbl(struct hclge_comm_rss_cfg *rss_cfg, - u32 *indir, __le16 rss_ind_tbl_size); + u32 *indir, u16 rss_ind_tbl_size); int hclge_comm_set_rss_algo_key(struct hclge_comm_hw *hw, const u8 hfunc, const u8 *key); int hclge_comm_init_rss_tuple_cmd(struct hclge_comm_rss_cfg *rss_cfg, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c index 0c60f41fca8a6f25446131feaa47ca04e294c837..f3c9395d8351cb31108973e1867332e00026c6ac 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c @@ -75,7 +75,7 @@ int hclge_comm_tqps_update_stats(struct hnae3_handle *handle, ret = hclge_comm_cmd_send(hw, &desc, 1); if (ret) { dev_err(&hw->cmq.csq.pdev->dev, - "failed to get tqp stat, ret = %d, tx = %u.\n", + "failed to get tqp stat, ret = %d, rx = %u.\n", ret, i); return ret; } @@ -89,7 +89,7 @@ int hclge_comm_tqps_update_stats(struct hnae3_handle *handle, ret = hclge_comm_cmd_send(hw, &desc, 1); if (ret) { dev_err(&hw->cmq.csq.pdev->dev, - "failed to get tqp stat, ret = %d, rx = %u.\n", + "failed to get tqp stat, ret = %d, tx = %u.\n", ret, i); return ret; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c index f726a5b70f9e2dc271bc9f2dabe068d06b0cfda2..93aeb615191d9064aee2be7e1f21c0003fb75322 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c @@ -562,12 +562,12 @@ static void hns3_dbg_tx_spare_info(struct hns3_enet_ring *ring, char *buf, for (i = 0; i < ring_num; i++) { j = 0; - sprintf(result[j++], "%8u", i); - sprintf(result[j++], "%9u", ring->tx_copybreak); - sprintf(result[j++], "%3u", tx_spare->len); - sprintf(result[j++], "%3u", tx_spare->next_to_use); - sprintf(result[j++], "%3u", tx_spare->next_to_clean); - sprintf(result[j++], "%3u", tx_spare->last_to_clean); + sprintf(result[j++], "%u", i); + sprintf(result[j++], "%u", ring->tx_copybreak); + sprintf(result[j++], "%u", tx_spare->len); + sprintf(result[j++], "%u", tx_spare->next_to_use); + sprintf(result[j++], "%u", tx_spare->next_to_clean); + sprintf(result[j++], "%u", tx_spare->last_to_clean); sprintf(result[j++], "%pad", &tx_spare->dma); hns3_dbg_fill_content(content, sizeof(content), tx_spare_info_items, @@ -598,35 +598,35 @@ static void hns3_dump_rx_queue_info(struct hns3_enet_ring *ring, u32 base_add_l, base_add_h; u32 j = 0; - sprintf(result[j++], "%8u", index); + sprintf(result[j++], "%u", index); - sprintf(result[j++], "%6u", readl_relaxed(ring->tqp->io_base + + sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_BD_NUM_REG)); - sprintf(result[j++], "%6u", readl_relaxed(ring->tqp->io_base + + sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_BD_LEN_REG)); - sprintf(result[j++], "%4u", readl_relaxed(ring->tqp->io_base + + sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_TAIL_REG)); - sprintf(result[j++], "%4u", readl_relaxed(ring->tqp->io_base + + sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG)); - sprintf(result[j++], "%6u", readl_relaxed(ring->tqp->io_base + + sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_FBDNUM_REG)); - sprintf(result[j++], "%6u", readl_relaxed(ring->tqp->io_base + + sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_PKTNUM_RECORD_REG)); - sprintf(result[j++], "%9u", ring->rx_copybreak); + sprintf(result[j++], "%u", ring->rx_copybreak); - sprintf(result[j++], "%7s", readl_relaxed(ring->tqp->io_base + + sprintf(result[j++], "%s", readl_relaxed(ring->tqp->io_base + HNS3_RING_EN_REG) ? "on" : "off"); if (hnae3_ae_dev_tqp_txrx_indep_supported(ae_dev)) - sprintf(result[j++], "%10s", readl_relaxed(ring->tqp->io_base + + sprintf(result[j++], "%s", readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_EN_REG) ? "on" : "off"); else - sprintf(result[j++], "%10s", "NA"); + sprintf(result[j++], "%s", "NA"); base_add_h = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_BASEADDR_H_REG); @@ -700,36 +700,36 @@ static void hns3_dump_tx_queue_info(struct hns3_enet_ring *ring, u32 base_add_l, base_add_h; u32 j = 0; - sprintf(result[j++], "%8u", index); - sprintf(result[j++], "%6u", readl_relaxed(ring->tqp->io_base + + sprintf(result[j++], "%u", index); + sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_BD_NUM_REG)); - sprintf(result[j++], "%2u", readl_relaxed(ring->tqp->io_base + + sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_TC_REG)); - sprintf(result[j++], "%4u", readl_relaxed(ring->tqp->io_base + + sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_TAIL_REG)); - sprintf(result[j++], "%4u", readl_relaxed(ring->tqp->io_base + + sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_HEAD_REG)); - sprintf(result[j++], "%6u", readl_relaxed(ring->tqp->io_base + + sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_FBDNUM_REG)); - sprintf(result[j++], "%6u", readl_relaxed(ring->tqp->io_base + + sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_OFFSET_REG)); - sprintf(result[j++], "%6u", readl_relaxed(ring->tqp->io_base + + sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_PKTNUM_RECORD_REG)); - sprintf(result[j++], "%7s", readl_relaxed(ring->tqp->io_base + + sprintf(result[j++], "%s", readl_relaxed(ring->tqp->io_base + HNS3_RING_EN_REG) ? "on" : "off"); if (hnae3_ae_dev_tqp_txrx_indep_supported(ae_dev)) - sprintf(result[j++], "%10s", readl_relaxed(ring->tqp->io_base + + sprintf(result[j++], "%s", readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_EN_REG) ? "on" : "off"); else - sprintf(result[j++], "%10s", "NA"); + sprintf(result[j++], "%s", "NA"); base_add_h = readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_BASEADDR_H_REG); @@ -848,15 +848,15 @@ static void hns3_dump_rx_bd_info(struct hns3_nic_priv *priv, { unsigned int j = 0; - sprintf(result[j++], "%5d", idx); + sprintf(result[j++], "%d", idx); sprintf(result[j++], "%#x", le32_to_cpu(desc->rx.l234_info)); - sprintf(result[j++], "%7u", le16_to_cpu(desc->rx.pkt_len)); - sprintf(result[j++], "%4u", le16_to_cpu(desc->rx.size)); + sprintf(result[j++], "%u", le16_to_cpu(desc->rx.pkt_len)); + sprintf(result[j++], "%u", le16_to_cpu(desc->rx.size)); sprintf(result[j++], "%#x", le32_to_cpu(desc->rx.rss_hash)); - sprintf(result[j++], "%5u", le16_to_cpu(desc->rx.fd_id)); - sprintf(result[j++], "%8u", le16_to_cpu(desc->rx.vlan_tag)); - sprintf(result[j++], "%15u", le16_to_cpu(desc->rx.o_dm_vlan_id_fb)); - sprintf(result[j++], "%11u", le16_to_cpu(desc->rx.ot_vlan_tag)); + sprintf(result[j++], "%u", le16_to_cpu(desc->rx.fd_id)); + sprintf(result[j++], "%u", le16_to_cpu(desc->rx.vlan_tag)); + sprintf(result[j++], "%u", le16_to_cpu(desc->rx.o_dm_vlan_id_fb)); + sprintf(result[j++], "%u", le16_to_cpu(desc->rx.ot_vlan_tag)); sprintf(result[j++], "%#x", le32_to_cpu(desc->rx.bd_base_info)); if (test_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, &priv->state)) { u32 ol_info = le32_to_cpu(desc->rx.ol_info); @@ -930,19 +930,19 @@ static void hns3_dump_tx_bd_info(struct hns3_nic_priv *priv, { unsigned int j = 0; - sprintf(result[j++], "%6d", idx); + sprintf(result[j++], "%d", idx); sprintf(result[j++], "%#llx", le64_to_cpu(desc->addr)); - sprintf(result[j++], "%5u", le16_to_cpu(desc->tx.vlan_tag)); - sprintf(result[j++], "%5u", le16_to_cpu(desc->tx.send_size)); + sprintf(result[j++], "%u", le16_to_cpu(desc->tx.vlan_tag)); + sprintf(result[j++], "%u", le16_to_cpu(desc->tx.send_size)); sprintf(result[j++], "%#x", le32_to_cpu(desc->tx.type_cs_vlan_tso_len)); - sprintf(result[j++], "%5u", le16_to_cpu(desc->tx.outer_vlan_tag)); - sprintf(result[j++], "%5u", le16_to_cpu(desc->tx.tv)); - sprintf(result[j++], "%10u", + sprintf(result[j++], "%u", le16_to_cpu(desc->tx.outer_vlan_tag)); + sprintf(result[j++], "%u", le16_to_cpu(desc->tx.tv)); + sprintf(result[j++], "%u", le32_to_cpu(desc->tx.ol_type_vlan_len_msec)); sprintf(result[j++], "%#x", le32_to_cpu(desc->tx.paylen_ol4cs)); sprintf(result[j++], "%#x", le16_to_cpu(desc->tx.bdtp_fe_sc_vld_ra_ri)); - sprintf(result[j++], "%5u", le16_to_cpu(desc->tx.mss_hw_csum)); + sprintf(result[j++], "%u", le16_to_cpu(desc->tx.mss_hw_csum)); } static int hns3_dbg_tx_bd_info(struct hns3_dbg_data *d, char *buf, int len) @@ -1227,7 +1227,7 @@ static ssize_t hns3_dbg_read(struct file *filp, char __user *buffer, return ret; mutex_lock(&handle->dbgfs_lock); - save_buf = &hns3_dbg_cmd[index].buf; + save_buf = &handle->dbgfs_buf[index]; if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state) || test_bit(HNS3_NIC_STATE_RESETTING, &priv->state)) { @@ -1332,6 +1332,13 @@ int hns3_dbg_init(struct hnae3_handle *handle) int ret; u32 i; + handle->dbgfs_buf = devm_kcalloc(&handle->pdev->dev, + ARRAY_SIZE(hns3_dbg_cmd), + sizeof(*handle->dbgfs_buf), + GFP_KERNEL); + if (!handle->dbgfs_buf) + return -ENOMEM; + hns3_dbg_dentry[HNS3_DBG_DENTRY_COMMON].dentry = debugfs_create_dir(name, hns3_dbgfs_root); handle->hnae3_dbgfs = hns3_dbg_dentry[HNS3_DBG_DENTRY_COMMON].dentry; @@ -1380,9 +1387,9 @@ void hns3_dbg_uninit(struct hnae3_handle *handle) u32 i; for (i = 0; i < ARRAY_SIZE(hns3_dbg_cmd); i++) - if (hns3_dbg_cmd[i].buf) { - kvfree(hns3_dbg_cmd[i].buf); - hns3_dbg_cmd[i].buf = NULL; + if (handle->dbgfs_buf[i]) { + kvfree(handle->dbgfs_buf[i]); + handle->dbgfs_buf[i] = NULL; } mutex_destroy(&handle->dbgfs_lock); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h index 83aa1450ab9fe383bd31e979a885131329e4bf6a..97578eabb7d8b7a2defc1ac1422bb77f29b1f6bf 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h @@ -49,7 +49,6 @@ struct hns3_dbg_cmd_info { enum hnae3_dbg_cmd cmd; enum hns3_dbg_dentry_type dentry; u32 buf_len; - char *buf; int (*init)(struct hnae3_handle *handle, unsigned int cmd); }; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index e3911116a88fb19a39f995670ade61af70c21213..2174b5756b07be316de3fe7d3ea347d6f209ce6e 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -5158,10 +5158,7 @@ static void hns3_set_cq_period_mode(struct hns3_nic_priv *priv, priv->tqp_vector[i].rx_group.dim.mode = mode; } - /* only device version above V3(include V3), GL can switch CQ/EQ - * period mode. - */ - if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) { + if (hnae3_ae_dev_cq_supported(ae_dev)) { u32 new_mode; u64 reg; @@ -5205,6 +5202,13 @@ static void hns3_state_init(struct hnae3_handle *handle) set_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, &priv->state); } +static void hns3_state_uninit(struct hnae3_handle *handle) +{ + struct hns3_nic_priv *priv = handle->priv; + + clear_bit(HNS3_NIC_STATE_INITED, &priv->state); +} + static int hns3_client_init(struct hnae3_handle *handle) { struct pci_dev *pdev = handle->pdev; @@ -5322,7 +5326,9 @@ static int hns3_client_init(struct hnae3_handle *handle) return ret; out_reg_netdev_fail: + hns3_state_uninit(handle); hns3_dbg_uninit(handle); + hns3_client_stop(handle); out_client_start: hns3_free_rx_cpu_rmap(netdev); hns3_nic_uninit_irq(priv); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c index 69cee085ddeee5c1e07bb61e20f07798b04db696..295733e1bbc4b211c5b2d4dd463df01955ada44e 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c @@ -1121,6 +1121,36 @@ static int hns3_check_ringparam(struct net_device *ndev, return 0; } +static bool +hns3_is_ringparam_changed(struct net_device *ndev, + struct ethtool_ringparam *param, + struct kernel_ethtool_ringparam *kernel_param, + struct hns3_ring_param *old_ringparam, + struct hns3_ring_param *new_ringparam) +{ + struct hns3_nic_priv *priv = netdev_priv(ndev); + struct hnae3_handle *h = priv->ae_handle; + u16 queue_num = h->kinfo.num_tqps; + + new_ringparam->tx_desc_num = ALIGN(param->tx_pending, + HNS3_RING_BD_MULTIPLE); + new_ringparam->rx_desc_num = ALIGN(param->rx_pending, + HNS3_RING_BD_MULTIPLE); + old_ringparam->tx_desc_num = priv->ring[0].desc_num; + old_ringparam->rx_desc_num = priv->ring[queue_num].desc_num; + old_ringparam->rx_buf_len = priv->ring[queue_num].buf_size; + new_ringparam->rx_buf_len = kernel_param->rx_buf_len; + + if (old_ringparam->tx_desc_num == new_ringparam->tx_desc_num && + old_ringparam->rx_desc_num == new_ringparam->rx_desc_num && + old_ringparam->rx_buf_len == new_ringparam->rx_buf_len) { + netdev_info(ndev, "descriptor number and rx buffer length not changed\n"); + return false; + } + + return true; +} + static int hns3_change_rx_buf_len(struct net_device *ndev, u32 rx_buf_len) { struct hns3_nic_priv *priv = netdev_priv(ndev); @@ -1142,57 +1172,47 @@ static int hns3_set_ringparam(struct net_device *ndev, struct kernel_ethtool_ringparam *kernel_param, struct netlink_ext_ack *extack) { + struct hns3_ring_param old_ringparam, new_ringparam; struct hns3_nic_priv *priv = netdev_priv(ndev); struct hnae3_handle *h = priv->ae_handle; struct hns3_enet_ring *tmp_rings; bool if_running = netif_running(ndev); - u32 old_tx_desc_num, new_tx_desc_num; - u32 old_rx_desc_num, new_rx_desc_num; - u16 queue_num = h->kinfo.num_tqps; - u32 old_rx_buf_len; int ret, i; ret = hns3_check_ringparam(ndev, param, kernel_param); if (ret) return ret; - /* Hardware requires that its descriptors must be multiple of eight */ - new_tx_desc_num = ALIGN(param->tx_pending, HNS3_RING_BD_MULTIPLE); - new_rx_desc_num = ALIGN(param->rx_pending, HNS3_RING_BD_MULTIPLE); - old_tx_desc_num = priv->ring[0].desc_num; - old_rx_desc_num = priv->ring[queue_num].desc_num; - old_rx_buf_len = priv->ring[queue_num].buf_size; - if (old_tx_desc_num == new_tx_desc_num && - old_rx_desc_num == new_rx_desc_num && - kernel_param->rx_buf_len == old_rx_buf_len) + if (!hns3_is_ringparam_changed(ndev, param, kernel_param, + &old_ringparam, &new_ringparam)) return 0; tmp_rings = hns3_backup_ringparam(priv); if (!tmp_rings) { - netdev_err(ndev, - "backup ring param failed by allocating memory fail\n"); + netdev_err(ndev, "backup ring param failed by allocating memory fail\n"); return -ENOMEM; } netdev_info(ndev, - "Changing Tx/Rx ring depth from %u/%u to %u/%u, Changing rx buffer len from %d to %d\n", - old_tx_desc_num, old_rx_desc_num, - new_tx_desc_num, new_rx_desc_num, - old_rx_buf_len, kernel_param->rx_buf_len); + "Changing Tx/Rx ring depth from %u/%u to %u/%u, Changing rx buffer len from %u to %u\n", + old_ringparam.tx_desc_num, old_ringparam.rx_desc_num, + new_ringparam.tx_desc_num, new_ringparam.rx_desc_num, + old_ringparam.rx_buf_len, new_ringparam.rx_buf_len); if (if_running) ndev->netdev_ops->ndo_stop(ndev); - hns3_change_all_ring_bd_num(priv, new_tx_desc_num, new_rx_desc_num); - hns3_change_rx_buf_len(ndev, kernel_param->rx_buf_len); + hns3_change_all_ring_bd_num(priv, new_ringparam.tx_desc_num, + new_ringparam.rx_desc_num); + hns3_change_rx_buf_len(ndev, new_ringparam.rx_buf_len); ret = hns3_init_all_ring(priv); if (ret) { netdev_err(ndev, "set ringparam fail, revert to old value(%d)\n", ret); - hns3_change_rx_buf_len(ndev, old_rx_buf_len); - hns3_change_all_ring_bd_num(priv, old_tx_desc_num, - old_rx_desc_num); + hns3_change_rx_buf_len(ndev, old_ringparam.rx_buf_len); + hns3_change_all_ring_bd_num(priv, old_ringparam.tx_desc_num, + old_ringparam.rx_desc_num); for (i = 0; i < h->kinfo.num_tqps * 2; i++) memcpy(&priv->ring[i], &tmp_rings[i], sizeof(struct hns3_enet_ring)); @@ -1402,11 +1422,33 @@ static int hns3_check_ql_coalesce_param(struct net_device *netdev, return 0; } -static int hns3_check_coalesce_para(struct net_device *netdev, - struct ethtool_coalesce *cmd) +static int +hns3_check_cqe_coalesce_param(struct net_device *netdev, + struct kernel_ethtool_coalesce *kernel_coal) +{ + struct hnae3_handle *handle = hns3_get_handle(netdev); + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev); + + if ((kernel_coal->use_cqe_mode_tx || kernel_coal->use_cqe_mode_rx) && + !hnae3_ae_dev_cq_supported(ae_dev)) { + netdev_err(netdev, "coalesced cqe mode is not supported\n"); + return -EOPNOTSUPP; + } + + return 0; +} + +static int +hns3_check_coalesce_para(struct net_device *netdev, + struct ethtool_coalesce *cmd, + struct kernel_ethtool_coalesce *kernel_coal) { int ret; + ret = hns3_check_cqe_coalesce_param(netdev, kernel_coal); + if (ret) + return ret; + ret = hns3_check_gl_coalesce_para(netdev, cmd); if (ret) { netdev_err(netdev, @@ -1481,7 +1523,7 @@ static int hns3_set_coalesce(struct net_device *netdev, if (hns3_nic_resetting(netdev)) return -EBUSY; - ret = hns3_check_coalesce_para(netdev, cmd); + ret = hns3_check_coalesce_para(netdev, cmd, kernel_coal); if (ret) return ret; @@ -1842,23 +1884,30 @@ static int hns3_set_tunable(struct net_device *netdev, case ETHTOOL_TX_COPYBREAK_BUF_SIZE: old_tx_spare_buf_size = h->kinfo.tx_spare_buf_size; new_tx_spare_buf_size = *(u32 *)data; + netdev_info(netdev, "request to set tx spare buf size from %u to %u\n", + old_tx_spare_buf_size, new_tx_spare_buf_size); ret = hns3_set_tx_spare_buf_size(netdev, new_tx_spare_buf_size); if (ret || (!priv->ring->tx_spare && new_tx_spare_buf_size != 0)) { int ret1; - netdev_warn(netdev, - "change tx spare buf size fail, revert to old value\n"); + netdev_warn(netdev, "change tx spare buf size fail, revert to old value\n"); ret1 = hns3_set_tx_spare_buf_size(netdev, old_tx_spare_buf_size); if (ret1) { - netdev_err(netdev, - "revert to old tx spare buf size fail\n"); + netdev_err(netdev, "revert to old tx spare buf size fail\n"); return ret1; } return ret; } + + if (!priv->ring->tx_spare) + netdev_info(netdev, "the active tx spare buf size is 0, disable tx spare buffer\n"); + else + netdev_info(netdev, "the active tx spare buf size is %u, due to page order\n", + priv->ring->tx_spare->len); + break; default: ret = -EOPNOTSUPP; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.h b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.h index 822d6fcbc73b8f6f871d43d00779e5d95e2ab34d..da207d1d9aa93d1ce1bc95b58bf4aa9fe49d7f20 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.h @@ -28,4 +28,10 @@ struct hns3_ethtool_link_ext_state_mapping { u8 link_ext_substate; }; +struct hns3_ring_param { + u32 tx_desc_num; + u32 rx_desc_num; + u32 rx_buf_len; +}; + #endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c index 42a9e73d8588958cfffe0c85c691b4c839449105..6efd768cc07cffee1cac345276bd65a0966823fb 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c @@ -1977,7 +1977,7 @@ static int hclge_handle_mpf_ras_error(struct hclge_dev *hdev, * @num: number of extended command structures * * This function handles all the PF RAS errors in the - * hw register/s using command. + * hw registers using command. */ static int hclge_handle_pf_ras_error(struct hclge_dev *hdev, struct hclge_desc *desc, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index db8455955a00c2f8925148f59b5c0dd59bd571c6..e2edf6e3cb2078cd06f7cb8a9136e1030292ba50 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -1546,9 +1546,8 @@ static void hclge_init_tc_config(struct hclge_dev *hdev) static int hclge_configure(struct hclge_dev *hdev) { struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); - const struct cpumask *cpumask = cpu_online_mask; struct hclge_cfg cfg; - int node, ret; + int ret; ret = hclge_get_cfg(hdev, &cfg); if (ret) @@ -1594,13 +1593,6 @@ static int hclge_configure(struct hclge_dev *hdev) hclge_init_tc_config(hdev); hclge_init_kdump_kernel_config(hdev); - /* Set the affinity based on numa node */ - node = dev_to_node(&hdev->pdev->dev); - if (node != NUMA_NO_NODE) - cpumask = cpumask_of_node(node); - - cpumask_copy(&hdev->affinity_mask, cpumask); - return ret; } @@ -3276,7 +3268,7 @@ static int hclge_tp_port_init(struct hclge_dev *hdev) static int hclge_update_port_info(struct hclge_dev *hdev) { struct hclge_mac *mac = &hdev->hw.mac; - int speed = HCLGE_MAC_SPEED_UNKNOWN; + int speed; int ret; /* get the port info from SFP cmd if not copper port */ @@ -3287,10 +3279,13 @@ static int hclge_update_port_info(struct hclge_dev *hdev) if (!hdev->support_sfp_query) return 0; - if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) + if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { + speed = mac->speed; ret = hclge_get_sfp_info(hdev, mac); - else + } else { + speed = HCLGE_MAC_SPEED_UNKNOWN; ret = hclge_get_sfp_speed(hdev, &speed); + } if (ret == -EOPNOTSUPP) { hdev->support_sfp_query = false; @@ -3302,6 +3297,8 @@ static int hclge_update_port_info(struct hclge_dev *hdev) if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { if (mac->speed_type == QUERY_ACTIVE_SPEED) { hclge_update_port_capability(hdev, mac); + if (mac->speed != speed) + (void)hclge_tm_port_shaper_cfg(hdev); return 0; } return hclge_cfg_mac_speed_dup(hdev, mac->speed, @@ -3384,6 +3381,12 @@ static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf, link_state_old = vport->vf_info.link_state; vport->vf_info.link_state = link_state; + /* return success directly if the VF is unalive, VF will + * query link state itself when it starts work. + */ + if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) + return 0; + ret = hclge_push_vf_link_status(vport); if (ret) { vport->vf_info.link_state = link_state_old; @@ -3564,17 +3567,6 @@ static void hclge_get_misc_vector(struct hclge_dev *hdev) hdev->num_msi_used += 1; } -static void hclge_misc_affinity_setup(struct hclge_dev *hdev) -{ - irq_set_affinity_hint(hdev->misc_vector.vector_irq, - &hdev->affinity_mask); -} - -static void hclge_misc_affinity_teardown(struct hclge_dev *hdev) -{ - irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL); -} - static int hclge_misc_irq_init(struct hclge_dev *hdev) { int ret; @@ -10136,6 +10128,7 @@ static int hclge_modify_port_base_vlan_tag(struct hclge_vport *vport, if (ret) return ret; + vport->port_base_vlan_cfg.tbl_sta = false; /* remove old VLAN tag */ if (old_info->vlan_tag == 0) ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, @@ -10323,11 +10316,11 @@ int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto, } if (!ret) { - if (is_kill) - hclge_rm_vport_vlan_table(vport, vlan_id, false); - else + if (!is_kill) hclge_add_vport_vlan_table(vport, vlan_id, writen_to_tbl); + else if (is_kill && vlan_id != 0) + hclge_rm_vport_vlan_table(vport, vlan_id, false); } else if (is_kill) { /* when remove hw vlan filter failed, record the vlan id, * and try to remove it from hw later, to be consistence @@ -10449,6 +10442,9 @@ int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu) /* PF's mps must be greater then VF's mps */ for (i = 1; i < hdev->num_alloc_vport; i++) if (max_frm_size < hdev->vport[i].mps) { + dev_err(&hdev->pdev->dev, + "failed to set pf mtu for less than vport %d, mps = %u.\n", + i, hdev->vport[i].mps); mutex_unlock(&hdev->vport_lock); return -EINVAL; } @@ -11454,11 +11450,6 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) timer_setup(&hdev->reset_timer, hclge_reset_timer, 0); INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task); - /* Setup affinity after service timer setup because add_timer_on - * is called in affinity notify. - */ - hclge_misc_affinity_setup(hdev); - hclge_clear_all_event_cause(hdev); hclge_clear_resetting_state(hdev); @@ -11876,7 +11867,6 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) hclge_reset_vf_rate(hdev); hclge_clear_vf_vlan(hdev); - hclge_misc_affinity_teardown(hdev); hclge_state_uninit(hdev); hclge_ptp_uninit(hdev); hclge_uninit_rxd_adv_layout(hdev); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index 025fd73ea485c309e6e49c599663f02ee9f44d72..d2158116398d367fb677f327b356b0e451e94127 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -780,8 +780,8 @@ struct hclge_vf_vlan_cfg { union { struct { u8 is_kill; - u16 vlan; - u16 proto; + __le16 vlan; + __le16 proto; }; u8 enable; }; @@ -938,8 +938,6 @@ struct hclge_dev { DECLARE_KFIFO(mac_tnl_log, struct hclge_mac_tnl_stats, HCLGE_MAC_TNL_LOG_SIZE); - /* affinity mask and notify for misc interrupt */ - cpumask_t affinity_mask; struct hclge_ptp *ptp; struct devlink *devlink; struct hclge_comm_rss_cfg rss_cfg; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c index 6799d16de34b9490f60fe67c94a2619e329e0ca4..e1012f7f9b7349029383cbc13709f34cffd81e37 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c @@ -57,17 +57,19 @@ static int hclge_gen_resp_to_vf(struct hclge_vport *vport, resp_pf_to_vf->msg_len = vf_to_pf_req->msg_len; resp_pf_to_vf->match_id = vf_to_pf_req->match_id; - resp_pf_to_vf->msg.code = HCLGE_MBX_PF_VF_RESP; - resp_pf_to_vf->msg.vf_mbx_msg_code = vf_to_pf_req->msg.code; - resp_pf_to_vf->msg.vf_mbx_msg_subcode = vf_to_pf_req->msg.subcode; + resp_pf_to_vf->msg.code = cpu_to_le16(HCLGE_MBX_PF_VF_RESP); + resp_pf_to_vf->msg.vf_mbx_msg_code = + cpu_to_le16(vf_to_pf_req->msg.code); + resp_pf_to_vf->msg.vf_mbx_msg_subcode = + cpu_to_le16(vf_to_pf_req->msg.subcode); resp = hclge_errno_to_resp(resp_msg->status); if (resp < SHRT_MAX) { - resp_pf_to_vf->msg.resp_status = resp; + resp_pf_to_vf->msg.resp_status = cpu_to_le16(resp); } else { dev_warn(&hdev->pdev->dev, "failed to send response to VF, response status %u is out-of-bound\n", resp); - resp_pf_to_vf->msg.resp_status = EIO; + resp_pf_to_vf->msg.resp_status = cpu_to_le16(EIO); } if (resp_msg->len > 0) @@ -94,15 +96,22 @@ static int hclge_send_mbx_msg(struct hclge_vport *vport, u8 *msg, u16 msg_len, enum hclge_comm_cmd_status status; struct hclge_desc desc; + if (msg_len > HCLGE_MBX_MAX_MSG_SIZE) { + dev_err(&hdev->pdev->dev, + "msg data length(=%u) exceeds maximum(=%u)\n", + msg_len, HCLGE_MBX_MAX_MSG_SIZE); + return -EMSGSIZE; + } + resp_pf_to_vf = (struct hclge_mbx_pf_to_vf_cmd *)desc.data; hclge_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_PF_TO_VF, false); resp_pf_to_vf->dest_vfid = dest_vfid; resp_pf_to_vf->msg_len = msg_len; - resp_pf_to_vf->msg.code = mbx_opcode; + resp_pf_to_vf->msg.code = cpu_to_le16(mbx_opcode); - memcpy(&resp_pf_to_vf->msg.vf_mbx_msg_code, msg, msg_len); + memcpy(resp_pf_to_vf->msg.msg_data, msg, msg_len); trace_hclge_pf_mbx_send(hdev, resp_pf_to_vf); @@ -118,8 +127,8 @@ static int hclge_send_mbx_msg(struct hclge_vport *vport, u8 *msg, u16 msg_len, int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport) { struct hclge_dev *hdev = vport->back; + __le16 msg_data; u16 reset_type; - u8 msg_data[2]; u8 dest_vfid; BUILD_BUG_ON(HNAE3_MAX_RESET > U16_MAX); @@ -133,10 +142,10 @@ int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport) else reset_type = HNAE3_VF_FUNC_RESET; - memcpy(&msg_data[0], &reset_type, sizeof(u16)); + msg_data = cpu_to_le16(reset_type); /* send this requested info to VF */ - return hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data), + return hclge_send_mbx_msg(vport, (u8 *)&msg_data, sizeof(msg_data), HCLGE_MBX_ASSERTING_RESET, dest_vfid); } @@ -176,7 +185,7 @@ static int hclge_get_ring_chain_from_mbx( ring_num = req->msg.ring_num; if (ring_num > HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM) - return -ENOMEM; + return -EINVAL; for (i = 0; i < ring_num; i++) { if (req->msg.param[i].tqp_index >= vport->nic.kinfo.rss_size) { @@ -242,6 +251,81 @@ static int hclge_map_unmap_ring_to_vf_vector(struct hclge_vport *vport, bool en, return ret; } +static int hclge_query_ring_vector_map(struct hclge_vport *vport, + struct hnae3_ring_chain_node *ring_chain, + struct hclge_desc *desc) +{ + struct hclge_ctrl_vector_chain_cmd *req = + (struct hclge_ctrl_vector_chain_cmd *)desc->data; + struct hclge_dev *hdev = vport->back; + u16 tqp_type_and_id; + int status; + + hclge_cmd_setup_basic_desc(desc, HCLGE_OPC_ADD_RING_TO_VECTOR, true); + + tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[0]); + hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M, HCLGE_INT_TYPE_S, + hnae3_get_bit(ring_chain->flag, HNAE3_RING_TYPE_B)); + hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M, HCLGE_TQP_ID_S, + ring_chain->tqp_index); + req->tqp_type_and_id[0] = cpu_to_le16(tqp_type_and_id); + req->vfid = vport->vport_id; + + status = hclge_cmd_send(&hdev->hw, desc, 1); + if (status) + dev_err(&hdev->pdev->dev, + "Get VF ring vector map info fail, status is %d.\n", + status); + + return status; +} + +static int hclge_get_vf_ring_vector_map(struct hclge_vport *vport, + struct hclge_mbx_vf_to_pf_cmd *req, + struct hclge_respond_to_vf_msg *resp) +{ +#define HCLGE_LIMIT_RING_NUM 1 +#define HCLGE_RING_TYPE_OFFSET 0 +#define HCLGE_TQP_INDEX_OFFSET 1 +#define HCLGE_INT_GL_INDEX_OFFSET 2 +#define HCLGE_VECTOR_ID_OFFSET 3 +#define HCLGE_RING_VECTOR_MAP_INFO_LEN 4 + struct hnae3_ring_chain_node ring_chain; + struct hclge_desc desc; + struct hclge_ctrl_vector_chain_cmd *data = + (struct hclge_ctrl_vector_chain_cmd *)desc.data; + u16 tqp_type_and_id; + u8 int_gl_index; + int ret; + + req->msg.ring_num = HCLGE_LIMIT_RING_NUM; + + memset(&ring_chain, 0, sizeof(ring_chain)); + ret = hclge_get_ring_chain_from_mbx(req, &ring_chain, vport); + if (ret) + return ret; + + ret = hclge_query_ring_vector_map(vport, &ring_chain, &desc); + if (ret) { + hclge_free_vector_ring_chain(&ring_chain); + return ret; + } + + tqp_type_and_id = le16_to_cpu(data->tqp_type_and_id[0]); + int_gl_index = hnae3_get_field(tqp_type_and_id, + HCLGE_INT_GL_IDX_M, HCLGE_INT_GL_IDX_S); + + resp->data[HCLGE_RING_TYPE_OFFSET] = req->msg.param[0].ring_type; + resp->data[HCLGE_TQP_INDEX_OFFSET] = req->msg.param[0].tqp_index; + resp->data[HCLGE_INT_GL_INDEX_OFFSET] = int_gl_index; + resp->data[HCLGE_VECTOR_ID_OFFSET] = data->int_vector_id_l; + resp->len = HCLGE_RING_VECTOR_MAP_INFO_LEN; + + hclge_free_vector_ring_chain(&ring_chain); + + return ret; +} + static void hclge_set_vf_promisc_mode(struct hclge_vport *vport, struct hclge_mbx_vf_to_pf_cmd *req) { @@ -332,16 +416,14 @@ int hclge_push_vf_port_base_vlan_info(struct hclge_vport *vport, u8 vfid, u16 state, struct hclge_vlan_info *vlan_info) { -#define MSG_DATA_SIZE 8 + struct hclge_mbx_port_base_vlan base_vlan; - u8 msg_data[MSG_DATA_SIZE]; + base_vlan.state = cpu_to_le16(state); + base_vlan.vlan_proto = cpu_to_le16(vlan_info->vlan_proto); + base_vlan.qos = cpu_to_le16(vlan_info->qos); + base_vlan.vlan_tag = cpu_to_le16(vlan_info->vlan_tag); - memcpy(&msg_data[0], &state, sizeof(u16)); - memcpy(&msg_data[2], &vlan_info->vlan_proto, sizeof(u16)); - memcpy(&msg_data[4], &vlan_info->qos, sizeof(u16)); - memcpy(&msg_data[6], &vlan_info->vlan_tag, sizeof(u16)); - - return hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data), + return hclge_send_mbx_msg(vport, (u8 *)&base_vlan, sizeof(base_vlan), HCLGE_MBX_PUSH_VLAN_INFO, vfid); } @@ -355,13 +437,16 @@ static int hclge_set_vf_vlan_cfg(struct hclge_vport *vport, struct hnae3_handle *handle = &vport->nic; struct hclge_dev *hdev = vport->back; struct hclge_vf_vlan_cfg *msg_cmd; + __be16 proto; + u16 vlan_id; msg_cmd = (struct hclge_vf_vlan_cfg *)&mbx_req->msg; switch (msg_cmd->subcode) { case HCLGE_MBX_VLAN_FILTER: - return hclge_set_vlan_filter(handle, - cpu_to_be16(msg_cmd->proto), - msg_cmd->vlan, msg_cmd->is_kill); + proto = cpu_to_be16(le16_to_cpu(msg_cmd->proto)); + vlan_id = le16_to_cpu(msg_cmd->vlan); + return hclge_set_vlan_filter(handle, proto, vlan_id, + msg_cmd->is_kill); case HCLGE_MBX_VLAN_RX_OFF_CFG: return hclge_en_hw_strip_rxvtag(handle, msg_cmd->enable); case HCLGE_MBX_GET_PORT_BASE_VLAN_STATE: @@ -404,15 +489,17 @@ static void hclge_get_basic_info(struct hclge_vport *vport, struct hnae3_ae_dev *ae_dev = vport->back->ae_dev; struct hclge_basic_info *basic_info; unsigned int i; + u32 pf_caps; basic_info = (struct hclge_basic_info *)resp_msg->data; for (i = 0; i < kinfo->tc_info.num_tc; i++) basic_info->hw_tc_map |= BIT(i); + pf_caps = le32_to_cpu(basic_info->pf_caps); if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps)) - hnae3_set_bit(basic_info->pf_caps, - HNAE3_PF_SUPPORT_VLAN_FLTR_MDF_B, 1); + hnae3_set_bit(pf_caps, HNAE3_PF_SUPPORT_VLAN_FLTR_MDF_B, 1); + basic_info->pf_caps = cpu_to_le32(pf_caps); resp_msg->len = HCLGE_MBX_MAX_RESP_DATA_SIZE; } @@ -420,19 +507,15 @@ static void hclge_get_vf_queue_info(struct hclge_vport *vport, struct hclge_respond_to_vf_msg *resp_msg) { #define HCLGE_TQPS_RSS_INFO_LEN 6 -#define HCLGE_TQPS_ALLOC_OFFSET 0 -#define HCLGE_TQPS_RSS_SIZE_OFFSET 2 -#define HCLGE_TQPS_RX_BUFFER_LEN_OFFSET 4 + struct hclge_mbx_vf_queue_info *queue_info; struct hclge_dev *hdev = vport->back; /* get the queue related info */ - memcpy(&resp_msg->data[HCLGE_TQPS_ALLOC_OFFSET], - &vport->alloc_tqps, sizeof(u16)); - memcpy(&resp_msg->data[HCLGE_TQPS_RSS_SIZE_OFFSET], - &vport->nic.kinfo.rss_size, sizeof(u16)); - memcpy(&resp_msg->data[HCLGE_TQPS_RX_BUFFER_LEN_OFFSET], - &hdev->rx_buf_len, sizeof(u16)); + queue_info = (struct hclge_mbx_vf_queue_info *)resp_msg->data; + queue_info->num_tqps = cpu_to_le16(vport->alloc_tqps); + queue_info->rss_size = cpu_to_le16(vport->nic.kinfo.rss_size); + queue_info->rx_buf_len = cpu_to_le16(hdev->rx_buf_len); resp_msg->len = HCLGE_TQPS_RSS_INFO_LEN; } @@ -447,16 +530,15 @@ static void hclge_get_vf_queue_depth(struct hclge_vport *vport, struct hclge_respond_to_vf_msg *resp_msg) { #define HCLGE_TQPS_DEPTH_INFO_LEN 4 -#define HCLGE_TQPS_NUM_TX_DESC_OFFSET 0 -#define HCLGE_TQPS_NUM_RX_DESC_OFFSET 2 + struct hclge_mbx_vf_queue_depth *queue_depth; struct hclge_dev *hdev = vport->back; /* get the queue depth info */ - memcpy(&resp_msg->data[HCLGE_TQPS_NUM_TX_DESC_OFFSET], - &hdev->num_tx_desc, sizeof(u16)); - memcpy(&resp_msg->data[HCLGE_TQPS_NUM_RX_DESC_OFFSET], - &hdev->num_rx_desc, sizeof(u16)); + queue_depth = (struct hclge_mbx_vf_queue_depth *)resp_msg->data; + queue_depth->num_tx_desc = cpu_to_le16(hdev->num_tx_desc); + queue_depth->num_rx_desc = cpu_to_le16(hdev->num_rx_desc); + resp_msg->len = HCLGE_TQPS_DEPTH_INFO_LEN; } @@ -481,10 +563,9 @@ int hclge_push_vf_link_status(struct hclge_vport *vport) #define HCLGE_VF_LINK_STATE_UP 1U #define HCLGE_VF_LINK_STATE_DOWN 0U + struct hclge_mbx_link_status link_info; struct hclge_dev *hdev = vport->back; u16 link_status; - u8 msg_data[9]; - u16 duplex; /* mac.link can only be 0 or 1 */ switch (vport->vf_info.link_state) { @@ -500,14 +581,13 @@ int hclge_push_vf_link_status(struct hclge_vport *vport) break; } - duplex = hdev->hw.mac.duplex; - memcpy(&msg_data[0], &link_status, sizeof(u16)); - memcpy(&msg_data[2], &hdev->hw.mac.speed, sizeof(u32)); - memcpy(&msg_data[6], &duplex, sizeof(u16)); - msg_data[8] = HCLGE_MBX_PUSH_LINK_STATUS_EN; + link_info.link_status = cpu_to_le16(link_status); + link_info.speed = cpu_to_le32(hdev->hw.mac.speed); + link_info.duplex = cpu_to_le16(hdev->hw.mac.duplex); + link_info.flag = HCLGE_MBX_PUSH_LINK_STATUS_EN; /* send this requested info to VF */ - return hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data), + return hclge_send_mbx_msg(vport, (u8 *)&link_info, sizeof(link_info), HCLGE_MBX_LINK_STAT_CHANGE, vport->vport_id); } @@ -515,22 +595,22 @@ static void hclge_get_link_mode(struct hclge_vport *vport, struct hclge_mbx_vf_to_pf_cmd *mbx_req) { #define HCLGE_SUPPORTED 1 + struct hclge_mbx_link_mode link_mode; struct hclge_dev *hdev = vport->back; unsigned long advertising; unsigned long supported; unsigned long send_data; - u8 msg_data[10] = {}; u8 dest_vfid; advertising = hdev->hw.mac.advertising[0]; supported = hdev->hw.mac.supported[0]; dest_vfid = mbx_req->mbx_src_vfid; - msg_data[0] = mbx_req->msg.data[0]; - - send_data = msg_data[0] == HCLGE_SUPPORTED ? supported : advertising; + send_data = mbx_req->msg.data[0] == HCLGE_SUPPORTED ? supported : + advertising; + link_mode.idx = cpu_to_le16((u16)mbx_req->msg.data[0]); + link_mode.link_mode = cpu_to_le64(send_data); - memcpy(&msg_data[2], &send_data, sizeof(unsigned long)); - hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data), + hclge_send_mbx_msg(vport, (u8 *)&link_mode, sizeof(link_mode), HCLGE_MBX_LINK_STAT_MODE, dest_vfid); } @@ -544,7 +624,7 @@ static int hclge_mbx_reset_vf_queue(struct hclge_vport *vport, u16 queue_id; int ret; - memcpy(&queue_id, mbx_req->msg.data, sizeof(queue_id)); + queue_id = le16_to_cpu(*(__le16 *)mbx_req->msg.data); resp_msg->data[0] = HCLGE_RESET_ALL_QUEUE_DONE; resp_msg->len = sizeof(u8); @@ -580,36 +660,39 @@ static void hclge_vf_keep_alive(struct hclge_vport *vport) static int hclge_set_vf_mtu(struct hclge_vport *vport, struct hclge_mbx_vf_to_pf_cmd *mbx_req) { + struct hclge_mbx_mtu_info *mtu_info; u32 mtu; - memcpy(&mtu, mbx_req->msg.data, sizeof(mtu)); + mtu_info = (struct hclge_mbx_mtu_info *)mbx_req->msg.data; + mtu = le32_to_cpu(mtu_info->mtu); return hclge_set_vport_mtu(vport, mtu); } -static void hclge_get_queue_id_in_pf(struct hclge_vport *vport, - struct hclge_mbx_vf_to_pf_cmd *mbx_req, - struct hclge_respond_to_vf_msg *resp_msg) +static int hclge_get_queue_id_in_pf(struct hclge_vport *vport, + struct hclge_mbx_vf_to_pf_cmd *mbx_req, + struct hclge_respond_to_vf_msg *resp_msg) { struct hnae3_handle *handle = &vport->nic; struct hclge_dev *hdev = vport->back; u16 queue_id, qid_in_pf; - memcpy(&queue_id, mbx_req->msg.data, sizeof(queue_id)); + queue_id = le16_to_cpu(*(__le16 *)mbx_req->msg.data); if (queue_id >= handle->kinfo.num_tqps) { dev_err(&hdev->pdev->dev, "Invalid queue id(%u) from VF %u\n", queue_id, mbx_req->mbx_src_vfid); - return; + return -EINVAL; } qid_in_pf = hclge_covert_handle_qid_global(&vport->nic, queue_id); - memcpy(resp_msg->data, &qid_in_pf, sizeof(qid_in_pf)); + *(__le16 *)resp_msg->data = cpu_to_le16(qid_in_pf); resp_msg->len = sizeof(qid_in_pf); + return 0; } -static void hclge_get_rss_key(struct hclge_vport *vport, - struct hclge_mbx_vf_to_pf_cmd *mbx_req, - struct hclge_respond_to_vf_msg *resp_msg) +static int hclge_get_rss_key(struct hclge_vport *vport, + struct hclge_mbx_vf_to_pf_cmd *mbx_req, + struct hclge_respond_to_vf_msg *resp_msg) { #define HCLGE_RSS_MBX_RESP_LEN 8 struct hclge_dev *hdev = vport->back; @@ -627,13 +710,14 @@ static void hclge_get_rss_key(struct hclge_vport *vport, dev_warn(&hdev->pdev->dev, "failed to get the rss hash key, the index(%u) invalid !\n", index); - return; + return -EINVAL; } memcpy(resp_msg->data, &rss_cfg->rss_hash_key[index * HCLGE_RSS_MBX_RESP_LEN], HCLGE_RSS_MBX_RESP_LEN); resp_msg->len = HCLGE_RSS_MBX_RESP_LEN; + return 0; } static void hclge_link_fail_parse(struct hclge_dev *hdev, u8 link_fail_code) @@ -746,6 +830,14 @@ void hclge_mbx_handler(struct hclge_dev *hdev) ret = hclge_map_unmap_ring_to_vf_vector(vport, false, req); break; + case HCLGE_MBX_GET_RING_VECTOR_MAP: + ret = hclge_get_vf_ring_vector_map(vport, req, + &resp_msg); + if (ret) + dev_err(&hdev->pdev->dev, + "PF fail(%d) to get VF ring vector map\n", + ret); + break; case HCLGE_MBX_SET_PROMISC_MODE: hclge_set_vf_promisc_mode(vport, req); break; @@ -809,10 +901,10 @@ void hclge_mbx_handler(struct hclge_dev *hdev) "VF fail(%d) to set mtu\n", ret); break; case HCLGE_MBX_GET_QID_IN_PF: - hclge_get_queue_id_in_pf(vport, req, &resp_msg); + ret = hclge_get_queue_id_in_pf(vport, req, &resp_msg); break; case HCLGE_MBX_GET_RSS_KEY: - hclge_get_rss_key(vport, req, &resp_msg); + ret = hclge_get_rss_key(vport, req, &resp_msg); break; case HCLGE_MBX_GET_LINK_MODE: hclge_get_link_mode(vport, req); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c index 089f4444b7e3e3484bb30a883de139926968e367..084e190602d6890077ece10fcf41fd2356f81e7f 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c @@ -282,8 +282,8 @@ static int hclge_tm_pg_to_pri_map_cfg(struct hclge_dev *hdev, return hclge_cmd_send(&hdev->hw, &desc, 1); } -static int hclge_tm_qs_to_pri_map_cfg(struct hclge_dev *hdev, - u16 qs_id, u8 pri) +static int hclge_tm_qs_to_pri_map_cfg(struct hclge_dev *hdev, u16 qs_id, u8 pri, + bool link_vld) { struct hclge_qs_to_pri_link_cmd *map; struct hclge_desc desc; @@ -294,7 +294,7 @@ static int hclge_tm_qs_to_pri_map_cfg(struct hclge_dev *hdev, map->qs_id = cpu_to_le16(qs_id); map->priority = pri; - map->link_vld = HCLGE_TM_QS_PRI_LINK_VLD_MSK; + map->link_vld = link_vld ? HCLGE_TM_QS_PRI_LINK_VLD_MSK : 0; return hclge_cmd_send(&hdev->hw, &desc, 1); } @@ -420,7 +420,7 @@ static int hclge_tm_pg_shapping_cfg(struct hclge_dev *hdev, return hclge_cmd_send(&hdev->hw, &desc, 1); } -static int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev) +int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev) { struct hclge_port_shapping_cmd *shap_cfg_cmd; struct hclge_shaper_ir_para ir_para; @@ -642,11 +642,13 @@ static void hclge_tm_update_kinfo_rss_size(struct hclge_vport *vport) * one tc for VF for simplicity. VF's vport_id is non zero. */ if (vport->vport_id) { + kinfo->tc_info.max_tc = 1; kinfo->tc_info.num_tc = 1; vport->qs_offset = HNAE3_MAX_TC + vport->vport_id - HCLGE_VF_VPORT_START_NUM; vport_max_rss_size = hdev->vf_rss_size_max; } else { + kinfo->tc_info.max_tc = hdev->tc_max; kinfo->tc_info.num_tc = min_t(u16, vport->alloc_tqps, hdev->tm_info.num_tc); vport->qs_offset = 0; @@ -679,7 +681,9 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport) kinfo->num_tqps = hclge_vport_get_tqp_num(vport); vport->dwrr = 100; /* 100 percent as init */ vport->bw_limit = hdev->tm_info.pg_info[0].bw_limit; - hdev->rss_cfg.rss_size = kinfo->rss_size; + + if (vport->vport_id == PF_VPORT_ID) + hdev->rss_cfg.rss_size = kinfo->rss_size; /* when enable mqprio, the tc_info has been updated. */ if (kinfo->tc_info.mqprio_active) @@ -714,14 +718,22 @@ static void hclge_tm_vport_info_update(struct hclge_dev *hdev) static void hclge_tm_tc_info_init(struct hclge_dev *hdev) { - u8 i; + u8 i, tc_sch_mode; + u32 bw_limit; + + for (i = 0; i < hdev->tc_max; i++) { + if (i < hdev->tm_info.num_tc) { + tc_sch_mode = HCLGE_SCH_MODE_DWRR; + bw_limit = hdev->tm_info.pg_info[0].bw_limit; + } else { + tc_sch_mode = HCLGE_SCH_MODE_SP; + bw_limit = 0; + } - for (i = 0; i < hdev->tm_info.num_tc; i++) { hdev->tm_info.tc_info[i].tc_id = i; - hdev->tm_info.tc_info[i].tc_sch_mode = HCLGE_SCH_MODE_DWRR; + hdev->tm_info.tc_info[i].tc_sch_mode = tc_sch_mode; hdev->tm_info.tc_info[i].pgid = 0; - hdev->tm_info.tc_info[i].bw_limit = - hdev->tm_info.pg_info[0].bw_limit; + hdev->tm_info.tc_info[i].bw_limit = bw_limit; } for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) @@ -926,10 +938,13 @@ static int hclge_tm_pri_q_qs_cfg_tc_base(struct hclge_dev *hdev) for (k = 0; k < hdev->num_alloc_vport; k++) { struct hnae3_knic_private_info *kinfo = &vport[k].nic.kinfo; - for (i = 0; i < kinfo->tc_info.num_tc; i++) { + for (i = 0; i < kinfo->tc_info.max_tc; i++) { + u8 pri = i < kinfo->tc_info.num_tc ? i : 0; + bool link_vld = i < kinfo->tc_info.num_tc; + ret = hclge_tm_qs_to_pri_map_cfg(hdev, vport[k].qs_offset + i, - i); + pri, link_vld); if (ret) return ret; } @@ -949,7 +964,7 @@ static int hclge_tm_pri_q_qs_cfg_vnet_base(struct hclge_dev *hdev) for (i = 0; i < HNAE3_MAX_TC; i++) { ret = hclge_tm_qs_to_pri_map_cfg(hdev, vport[k].qs_offset + i, - k); + k, true); if (ret) return ret; } @@ -989,33 +1004,39 @@ static int hclge_tm_pri_tc_base_shaper_cfg(struct hclge_dev *hdev) { u32 max_tm_rate = hdev->ae_dev->dev_specs.max_tm_rate; struct hclge_shaper_ir_para ir_para; - u32 shaper_para; + u32 shaper_para_c, shaper_para_p; int ret; u32 i; - for (i = 0; i < hdev->tm_info.num_tc; i++) { + for (i = 0; i < hdev->tc_max; i++) { u32 rate = hdev->tm_info.tc_info[i].bw_limit; - ret = hclge_shaper_para_calc(rate, HCLGE_SHAPER_LVL_PRI, - &ir_para, max_tm_rate); - if (ret) - return ret; + if (rate) { + ret = hclge_shaper_para_calc(rate, HCLGE_SHAPER_LVL_PRI, + &ir_para, max_tm_rate); + if (ret) + return ret; + + shaper_para_c = hclge_tm_get_shapping_para(0, 0, 0, + HCLGE_SHAPER_BS_U_DEF, + HCLGE_SHAPER_BS_S_DEF); + shaper_para_p = hclge_tm_get_shapping_para(ir_para.ir_b, + ir_para.ir_u, + ir_para.ir_s, + HCLGE_SHAPER_BS_U_DEF, + HCLGE_SHAPER_BS_S_DEF); + } else { + shaper_para_c = 0; + shaper_para_p = 0; + } - shaper_para = hclge_tm_get_shapping_para(0, 0, 0, - HCLGE_SHAPER_BS_U_DEF, - HCLGE_SHAPER_BS_S_DEF); ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET, i, - shaper_para, rate); + shaper_para_c, rate); if (ret) return ret; - shaper_para = hclge_tm_get_shapping_para(ir_para.ir_b, - ir_para.ir_u, - ir_para.ir_s, - HCLGE_SHAPER_BS_U_DEF, - HCLGE_SHAPER_BS_S_DEF); ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET, i, - shaper_para, rate); + shaper_para_p, rate); if (ret) return ret; } @@ -1125,7 +1146,7 @@ static int hclge_tm_pri_tc_base_dwrr_cfg(struct hclge_dev *hdev) int ret; u32 i, k; - for (i = 0; i < hdev->tm_info.num_tc; i++) { + for (i = 0; i < hdev->tc_max; i++) { pg_info = &hdev->tm_info.pg_info[hdev->tm_info.tc_info[i].pgid]; dwrr = pg_info->tc_dwrr[i]; @@ -1135,9 +1156,15 @@ static int hclge_tm_pri_tc_base_dwrr_cfg(struct hclge_dev *hdev) return ret; for (k = 0; k < hdev->num_alloc_vport; k++) { + struct hnae3_knic_private_info *kinfo = &vport[k].nic.kinfo; + + if (i >= kinfo->tc_info.max_tc) + continue; + + dwrr = i < kinfo->tc_info.num_tc ? vport[k].dwrr : 0; ret = hclge_tm_qs_weight_cfg( hdev, vport[k].qs_offset + i, - vport[k].dwrr); + dwrr); if (ret) return ret; } @@ -1303,6 +1330,7 @@ static int hclge_tm_schd_mode_tc_base_cfg(struct hclge_dev *hdev, u8 pri_id) { struct hclge_vport *vport = hdev->vport; int ret; + u8 mode; u16 i; ret = hclge_tm_pri_schd_mode_cfg(hdev, pri_id); @@ -1310,9 +1338,16 @@ static int hclge_tm_schd_mode_tc_base_cfg(struct hclge_dev *hdev, u8 pri_id) return ret; for (i = 0; i < hdev->num_alloc_vport; i++) { + struct hnae3_knic_private_info *kinfo = &vport[i].nic.kinfo; + + if (pri_id >= kinfo->tc_info.max_tc) + continue; + + mode = pri_id < kinfo->tc_info.num_tc ? HCLGE_SCH_MODE_DWRR : + HCLGE_SCH_MODE_SP; ret = hclge_tm_qs_schd_mode_cfg(hdev, vport[i].qs_offset + pri_id, - HCLGE_SCH_MODE_DWRR); + mode); if (ret) return ret; } @@ -1353,7 +1388,7 @@ static int hclge_tm_lvl34_schd_mode_cfg(struct hclge_dev *hdev) u8 i; if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) { - for (i = 0; i < hdev->tm_info.num_tc; i++) { + for (i = 0; i < hdev->tc_max; i++) { ret = hclge_tm_schd_mode_tc_base_cfg(hdev, i); if (ret) return ret; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h index 619cc30a2dfcc2804312b6d110818c10599287bd..d943943912f76522ec340f99b32180a07a224c12 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h @@ -237,6 +237,7 @@ int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr); void hclge_pfc_rx_stats_get(struct hclge_dev *hdev, u64 *stats); void hclge_pfc_tx_stats_get(struct hclge_dev *hdev, u64 *stats); int hclge_tm_qs_shaper_cfg(struct hclge_vport *vport, int max_tx_rate); +int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev); int hclge_tm_get_qset_num(struct hclge_dev *hdev, u16 *qset_num); int hclge_tm_get_pri_num(struct hclge_dev *hdev, u8 *pri_num); int hclge_tm_get_qset_map_pri(struct hclge_dev *hdev, u16 qset_id, u8 *priority, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h index 5b0b71bd61200e47d70e74895d30533a95092bc1..8510b88d49820acf3ef81c34ee263d6eaebd2591 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h @@ -62,7 +62,7 @@ TRACE_EVENT(hclge_pf_mbx_send, TP_fast_assign( __entry->vfid = req->dest_vfid; - __entry->code = req->msg.code; + __entry->code = le16_to_cpu(req->msg.code); __assign_str(pciname, pci_name(hdev->pdev)); __assign_str(devname, &hdev->vport[0].nic.kinfo.netdev->name); memcpy(__entry->mbx_data, req, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index c956da60d90a82cc5367511d37ed8343a710a25c..32a7b467d79fcd9016148c4f1bf8a6715bd17116 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -189,8 +189,8 @@ static int hclgevf_get_basic_info(struct hclgevf_dev *hdev) basic_info = (struct hclge_basic_info *)resp_msg; hdev->hw_tc_map = basic_info->hw_tc_map; - hdev->mbx_api_version = basic_info->mbx_api_version; - caps = basic_info->pf_caps; + hdev->mbx_api_version = le16_to_cpu(basic_info->mbx_api_version); + caps = le32_to_cpu(basic_info->pf_caps); if (test_bit(HNAE3_PF_SUPPORT_VLAN_FLTR_MDF_B, &caps)) set_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps); @@ -223,10 +223,8 @@ static int hclgevf_get_port_base_vlan_filter_state(struct hclgevf_dev *hdev) static int hclgevf_get_queue_info(struct hclgevf_dev *hdev) { #define HCLGEVF_TQPS_RSS_INFO_LEN 6 -#define HCLGEVF_TQPS_ALLOC_OFFSET 0 -#define HCLGEVF_TQPS_RSS_SIZE_OFFSET 2 -#define HCLGEVF_TQPS_RX_BUFFER_LEN_OFFSET 4 + struct hclge_mbx_vf_queue_info *queue_info; u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN]; struct hclge_vf_to_pf_msg send_msg; int status; @@ -241,12 +239,10 @@ static int hclgevf_get_queue_info(struct hclgevf_dev *hdev) return status; } - memcpy(&hdev->num_tqps, &resp_msg[HCLGEVF_TQPS_ALLOC_OFFSET], - sizeof(u16)); - memcpy(&hdev->rss_size_max, &resp_msg[HCLGEVF_TQPS_RSS_SIZE_OFFSET], - sizeof(u16)); - memcpy(&hdev->rx_buf_len, &resp_msg[HCLGEVF_TQPS_RX_BUFFER_LEN_OFFSET], - sizeof(u16)); + queue_info = (struct hclge_mbx_vf_queue_info *)resp_msg; + hdev->num_tqps = le16_to_cpu(queue_info->num_tqps); + hdev->rss_size_max = le16_to_cpu(queue_info->rss_size); + hdev->rx_buf_len = le16_to_cpu(queue_info->rx_buf_len); return 0; } @@ -254,9 +250,8 @@ static int hclgevf_get_queue_info(struct hclgevf_dev *hdev) static int hclgevf_get_queue_depth(struct hclgevf_dev *hdev) { #define HCLGEVF_TQPS_DEPTH_INFO_LEN 4 -#define HCLGEVF_TQPS_NUM_TX_DESC_OFFSET 0 -#define HCLGEVF_TQPS_NUM_RX_DESC_OFFSET 2 + struct hclge_mbx_vf_queue_depth *queue_depth; u8 resp_msg[HCLGEVF_TQPS_DEPTH_INFO_LEN]; struct hclge_vf_to_pf_msg send_msg; int ret; @@ -271,10 +266,9 @@ static int hclgevf_get_queue_depth(struct hclgevf_dev *hdev) return ret; } - memcpy(&hdev->num_tx_desc, &resp_msg[HCLGEVF_TQPS_NUM_TX_DESC_OFFSET], - sizeof(u16)); - memcpy(&hdev->num_rx_desc, &resp_msg[HCLGEVF_TQPS_NUM_RX_DESC_OFFSET], - sizeof(u16)); + queue_depth = (struct hclge_mbx_vf_queue_depth *)resp_msg; + hdev->num_tx_desc = le16_to_cpu(queue_depth->num_tx_desc); + hdev->num_rx_desc = le16_to_cpu(queue_depth->num_rx_desc); return 0; } @@ -288,11 +282,11 @@ static u16 hclgevf_get_qid_global(struct hnae3_handle *handle, u16 queue_id) int ret; hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QID_IN_PF, 0); - memcpy(send_msg.data, &queue_id, sizeof(queue_id)); + *(__le16 *)send_msg.data = cpu_to_le16(queue_id); ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_data, sizeof(resp_data)); if (!ret) - qid_in_pf = *(u16 *)resp_data; + qid_in_pf = le16_to_cpu(*(__le16 *)resp_data); return qid_in_pf; } @@ -1245,11 +1239,8 @@ static int hclgevf_set_vlan_filter(struct hnae3_handle *handle, __be16 proto, u16 vlan_id, bool is_kill) { -#define HCLGEVF_VLAN_MBX_IS_KILL_OFFSET 0 -#define HCLGEVF_VLAN_MBX_VLAN_ID_OFFSET 1 -#define HCLGEVF_VLAN_MBX_PROTO_OFFSET 3 - struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + struct hclge_mbx_vlan_filter *vlan_filter; struct hclge_vf_to_pf_msg send_msg; int ret; @@ -1271,11 +1262,11 @@ static int hclgevf_set_vlan_filter(struct hnae3_handle *handle, hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, HCLGE_MBX_VLAN_FILTER); - send_msg.data[HCLGEVF_VLAN_MBX_IS_KILL_OFFSET] = is_kill; - memcpy(&send_msg.data[HCLGEVF_VLAN_MBX_VLAN_ID_OFFSET], &vlan_id, - sizeof(vlan_id)); - memcpy(&send_msg.data[HCLGEVF_VLAN_MBX_PROTO_OFFSET], &proto, - sizeof(proto)); + vlan_filter = (struct hclge_mbx_vlan_filter *)send_msg.data; + vlan_filter->is_kill = is_kill; + vlan_filter->vlan_id = cpu_to_le16(vlan_id); + vlan_filter->proto = cpu_to_le16(be16_to_cpu(proto)); + /* when remove hw vlan filter failed, record the vlan id, * and try to remove it from hw later, to be consistence * with stack. @@ -1347,7 +1338,7 @@ static int hclgevf_reset_tqp(struct hnae3_handle *handle) for (i = 1; i < handle->kinfo.num_tqps; i++) { hclgevf_build_send_msg(&send_msg, HCLGE_MBX_QUEUE_RESET, 0); - memcpy(send_msg.data, &i, sizeof(i)); + *(__le16 *)send_msg.data = cpu_to_le16(i); ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); if (ret) return ret; @@ -1359,10 +1350,13 @@ static int hclgevf_reset_tqp(struct hnae3_handle *handle) static int hclgevf_set_mtu(struct hnae3_handle *handle, int new_mtu) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + struct hclge_mbx_mtu_info *mtu_info; struct hclge_vf_to_pf_msg send_msg; hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_MTU, 0); - memcpy(send_msg.data, &new_mtu, sizeof(new_mtu)); + mtu_info = (struct hclge_mbx_mtu_info *)send_msg.data; + mtu_info->mtu = cpu_to_le32(new_mtu); + return hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); } @@ -2963,7 +2957,7 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev) goto err_config; } - /* ensure vf tbl list as empty before init*/ + /* ensure vf tbl list as empty before init */ ret = hclgevf_clear_vport_list(hdev); if (ret) { dev_err(&pdev->dev, @@ -3315,7 +3309,7 @@ static void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version, for (i = 0; i < reg_um; i++) *reg++ = hclgevf_read_dev(&hdev->hw, ring_reg_addr_list[i] + - 0x200 * j); + HCLGEVF_TQP_REG_SIZE * j); for (i = 0; i < separator_num; i++) *reg++ = SEPARATOR_VALUE; } @@ -3333,7 +3327,7 @@ static void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version, } void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state, - u8 *port_base_vlan_info, u8 data_size) + struct hclge_mbx_port_base_vlan *port_base_vlan) { struct hnae3_handle *nic = &hdev->nic; struct hclge_vf_to_pf_msg send_msg; @@ -3358,7 +3352,7 @@ void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state, /* send msg to PF and wait update port based vlan info */ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, HCLGE_MBX_PORT_BASE_VLAN_CFG); - memcpy(send_msg.data, port_base_vlan_info, data_size); + memcpy(send_msg.data, port_base_vlan, sizeof(*port_base_vlan)); ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); if (!ret) { if (state == HNAE3_PORT_BASE_VLAN_DISABLE) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h index 4b00fd44f118821f559083567549ce104fab7f6b..59ca6c794d6dbef559c4a51eb6e521c2841b2d96 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h @@ -293,5 +293,5 @@ void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed, void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev); void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev); void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state, - u8 *port_base_vlan_info, u8 data_size); + struct hclge_mbx_port_base_vlan *port_base_vlan); #endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c index d5e0a3f762f7dbe04d729561f8334a3cc88580b6..bbf7b14079de3cf2dc68cdd67a1f288b38903a79 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c @@ -17,7 +17,7 @@ static int hclgevf_resp_to_errno(u16 resp_code) static void hclgevf_reset_mbx_resp_status(struct hclgevf_dev *hdev) { /* this function should be called with mbx_resp.mbx_mutex held - * to prtect the received_response from race condition + * to protect the received_response from race condition */ hdev->mbx_resp.received_resp = false; hdev->mbx_resp.origin_mbx_msg = 0; @@ -32,8 +32,10 @@ static void hclgevf_reset_mbx_resp_status(struct hclgevf_dev *hdev) /* hclgevf_get_mbx_resp: used to get a response from PF after VF sends a mailbox * message to PF. * @hdev: pointer to struct hclgevf_dev - * @resp_msg: pointer to store the original message type and response status - * @len: the resp_msg data array length. + * @code0: the message opcode VF send to PF. + * @code1: the message sub-opcode VF send to PF. + * @resp_data: pointer to store response data from PF to VF. + * @resp_len: the length of resp_data from PF to VF. */ static int hclgevf_get_mbx_resp(struct hclgevf_dev *hdev, u16 code0, u16 code1, u8 *resp_data, u16 resp_len) @@ -122,7 +124,7 @@ int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev, if (need_resp) { mutex_lock(&hdev->mbx_resp.mbx_mutex); hclgevf_reset_mbx_resp_status(hdev); - req->match_id = hdev->mbx_resp.match_id; + req->match_id = cpu_to_le16(hdev->mbx_resp.match_id); status = hclgevf_cmd_send(&hdev->hw, &desc, 1); if (status) { dev_err(&hdev->pdev->dev, @@ -160,27 +162,29 @@ static bool hclgevf_cmd_crq_empty(struct hclgevf_hw *hw) static void hclgevf_handle_mbx_response(struct hclgevf_dev *hdev, struct hclge_mbx_pf_to_vf_cmd *req) { + u16 vf_mbx_msg_subcode = le16_to_cpu(req->msg.vf_mbx_msg_subcode); + u16 vf_mbx_msg_code = le16_to_cpu(req->msg.vf_mbx_msg_code); struct hclgevf_mbx_resp_status *resp = &hdev->mbx_resp; + u16 resp_status = le16_to_cpu(req->msg.resp_status); + u16 match_id = le16_to_cpu(req->match_id); if (resp->received_resp) dev_warn(&hdev->pdev->dev, - "VF mbx resp flag not clear(%u)\n", - req->msg.vf_mbx_msg_code); - - resp->origin_mbx_msg = - (req->msg.vf_mbx_msg_code << 16); - resp->origin_mbx_msg |= req->msg.vf_mbx_msg_subcode; - resp->resp_status = - hclgevf_resp_to_errno(req->msg.resp_status); + "VF mbx resp flag not clear(%u)\n", + vf_mbx_msg_code); + + resp->origin_mbx_msg = (vf_mbx_msg_code << 16); + resp->origin_mbx_msg |= vf_mbx_msg_subcode; + resp->resp_status = hclgevf_resp_to_errno(resp_status); memcpy(resp->additional_info, req->msg.resp_data, HCLGE_MBX_MAX_RESP_DATA_SIZE * sizeof(u8)); - if (req->match_id) { + if (match_id) { /* If match_id is not zero, it means PF support match_id. * if the match_id is right, VF get the right response, or * ignore the response. and driver will clear hdev->mbx_resp * when send next message which need response. */ - if (req->match_id == resp->match_id) + if (match_id == resp->match_id) resp->received_resp = true; } else { resp->received_resp = true; @@ -197,7 +201,7 @@ static void hclgevf_handle_mbx_msg(struct hclgevf_dev *hdev, HCLGE_MBX_MAX_ARQ_MSG_NUM) { dev_warn(&hdev->pdev->dev, "Async Q full, dropping msg(%u)\n", - req->msg.code); + le16_to_cpu(req->msg.code)); return; } @@ -216,6 +220,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev) struct hclge_comm_cmq_ring *crq; struct hclge_desc *desc; u16 flag; + u16 code; crq = &hdev->hw.hw.cmq.crq; @@ -230,10 +235,11 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev) req = (struct hclge_mbx_pf_to_vf_cmd *)desc->data; flag = le16_to_cpu(crq->desc[crq->next_to_use].flag); + code = le16_to_cpu(req->msg.code); if (unlikely(!hnae3_get_bit(flag, HCLGEVF_CMDQ_RX_OUTVLD_B))) { dev_warn(&hdev->pdev->dev, "dropped invalid mailbox message, code = %u\n", - req->msg.code); + code); /* dropping/not processing this invalid message */ crq->desc[crq->next_to_use].flag = 0; @@ -249,7 +255,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev) * timeout and simultaneously queue the async messages for later * prcessing in context of mailbox task i.e. the slow path. */ - switch (req->msg.code) { + switch (code) { case HCLGE_MBX_PF_VF_RESP: hclgevf_handle_mbx_response(hdev, req); break; @@ -263,7 +269,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev) default: dev_err(&hdev->pdev->dev, "VF received unsupported(%u) mbx msg from PF\n", - req->msg.code); + code); break; } crq->desc[crq->next_to_use].flag = 0; @@ -285,14 +291,18 @@ static void hclgevf_parse_promisc_info(struct hclgevf_dev *hdev, void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev) { + struct hclge_mbx_port_base_vlan *vlan_info; + struct hclge_mbx_link_status *link_info; + struct hclge_mbx_link_mode *link_mode; enum hnae3_reset_type reset_type; u16 link_status, state; - u16 *msg_q, *vlan_info; + __le16 *msg_q; + u16 opcode; u8 duplex; u32 speed; u32 tail; u8 flag; - u8 idx; + u16 idx; tail = hdev->arq.tail; @@ -306,13 +316,14 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev) } msg_q = hdev->arq.msg_q[hdev->arq.head]; - - switch (msg_q[0]) { + opcode = le16_to_cpu(msg_q[0]); + switch (opcode) { case HCLGE_MBX_LINK_STAT_CHANGE: - link_status = msg_q[1]; - memcpy(&speed, &msg_q[2], sizeof(speed)); - duplex = (u8)msg_q[4]; - flag = (u8)msg_q[5]; + link_info = (struct hclge_mbx_link_status *)(msg_q + 1); + link_status = le16_to_cpu(link_info->link_status); + speed = le32_to_cpu(link_info->speed); + duplex = (u8)le16_to_cpu(link_info->duplex); + flag = link_info->flag; /* update upper layer with new link link status */ hclgevf_update_speed_duplex(hdev, speed, duplex); @@ -324,13 +335,14 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev) break; case HCLGE_MBX_LINK_STAT_MODE: - idx = (u8)msg_q[1]; + link_mode = (struct hclge_mbx_link_mode *)(msg_q + 1); + idx = le16_to_cpu(link_mode->idx); if (idx) - memcpy(&hdev->hw.mac.supported, &msg_q[2], - sizeof(unsigned long)); + hdev->hw.mac.supported = + le64_to_cpu(link_mode->link_mode); else - memcpy(&hdev->hw.mac.advertising, &msg_q[2], - sizeof(unsigned long)); + hdev->hw.mac.advertising = + le64_to_cpu(link_mode->link_mode); break; case HCLGE_MBX_ASSERTING_RESET: /* PF has asserted reset hence VF should go in pending @@ -338,25 +350,27 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev) * has been completely reset. After this stack should * eventually be re-initialized. */ - reset_type = (enum hnae3_reset_type)msg_q[1]; + reset_type = + (enum hnae3_reset_type)le16_to_cpu(msg_q[1]); set_bit(reset_type, &hdev->reset_pending); set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); hclgevf_reset_task_schedule(hdev); break; case HCLGE_MBX_PUSH_VLAN_INFO: - state = msg_q[1]; - vlan_info = &msg_q[1]; + vlan_info = + (struct hclge_mbx_port_base_vlan *)(msg_q + 1); + state = le16_to_cpu(vlan_info->state); hclgevf_update_port_base_vlan_info(hdev, state, - (u8 *)vlan_info, 8); + vlan_info); break; case HCLGE_MBX_PUSH_PROMISC_INFO: - hclgevf_parse_promisc_info(hdev, msg_q[1]); + hclgevf_parse_promisc_info(hdev, le16_to_cpu(msg_q[1])); break; default: dev_err(&hdev->pdev->dev, "fetched unsupported(%u) message from arq\n", - msg_q[0]); + opcode); break; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h index e4bfb6191fef579e8c758062700750e62844941f..5d4895bb57a17d9a01b29545af427ae760c0d578 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h @@ -29,7 +29,7 @@ TRACE_EVENT(hclge_vf_mbx_get, TP_fast_assign( __entry->vfid = req->dest_vfid; - __entry->code = req->msg.code; + __entry->code = le16_to_cpu(req->msg.code); __assign_str(pciname, pci_name(hdev->pdev)); __assign_str(devname, &hdev->nic.kinfo.netdev->name); memcpy(__entry->mbx_data, req, diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index ef2f5216f0bd8c3d773fea4d4862fe6950efb5e2..85f4d2418d25c062de3c8106a609e14351190c82 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -2354,8 +2354,10 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter, * flush reset queue and process this reset */ if (adapter->force_reset_recovery && !list_empty(&adapter->rwi_list)) { - list_for_each_safe(entry, tmp_entry, &adapter->rwi_list) + list_for_each_safe(entry, tmp_entry, &adapter->rwi_list) { list_del(entry); + kfree(list_entry(entry, struct ibmvnic_rwi, list)); + } } rwi->reset_reason = reason; list_add_tail(&rwi->list, &adapter->rwi_list); @@ -4925,6 +4927,13 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq, adapter->fw_done_rc = -EIO; complete(&adapter->fw_done); } + + /* if we got here during crq-init, retry crq-init */ + if (!completion_done(&adapter->init_done)) { + adapter->init_done_rc = -EAGAIN; + complete(&adapter->init_done); + } + if (!completion_done(&adapter->stats_done)) complete(&adapter->stats_done); if (test_bit(0, &adapter->resetting)) @@ -5387,6 +5396,12 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) goto ibmvnic_dev_file_err; netif_carrier_off(netdev); + + adapter->state = VNIC_PROBED; + + adapter->wait_for_reset = false; + adapter->last_reset_time = jiffies; + rc = register_netdev(netdev); if (rc) { dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc); @@ -5394,10 +5409,6 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) } dev_info(&dev->dev, "ibmvnic registered\n"); - adapter->state = VNIC_PROBED; - - adapter->wait_for_reset = false; - adapter->last_reset_time = jiffies; return 0; ibmvnic_register_fail: diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index b38b914f9ac6cc0bd55e2d7930d739ea709f4f65..15b1503d5b6ca9cd4aa7f8a7f28a2922aa90cca1 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -4134,9 +4134,9 @@ static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw) return ret_val; if (!(data & valid_csum_mask)) { - e_dbg("NVM Checksum Invalid\n"); + e_dbg("NVM Checksum valid bit not set\n"); - if (hw->mac.type < e1000_pch_cnp) { + if (hw->mac.type < e1000_pch_tgp) { data |= valid_csum_mask; ret_val = e1000_write_nvm(hw, word, 1, &data); if (ret_val) diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index 1114a15a9ce3c88783e17e84389d19d8916cb8b5..989d5c7263d7cc1e32a16d5a3aad2499a54bbf19 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -742,10 +742,8 @@ static void i40e_dbg_dump_vf(struct i40e_pf *pf, int vf_id) vsi = pf->vsi[vf->lan_vsi_idx]; dev_info(&pf->pdev->dev, "vf %2d: VSI id=%d, seid=%d, qps=%d\n", vf_id, vf->lan_vsi_id, vsi->seid, vf->num_queue_pairs); - dev_info(&pf->pdev->dev, " num MDD=%lld, invalid msg=%lld, valid msg=%lld\n", - vf->num_mdd_events, - vf->num_invalid_msgs, - vf->num_valid_msgs); + dev_info(&pf->pdev->dev, " num MDD=%lld\n", + vf->num_mdd_events); } else { dev_info(&pf->pdev->dev, "invalid VF id %d\n", vf_id); } diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index f71b7334e295582471e773ac66ceff24e012c794..9181e007e0392e7150d6d01b39fc808cf74a86f7 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -1864,19 +1864,17 @@ int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) /***********************virtual channel routines******************/ /** - * i40e_vc_send_msg_to_vf_ex + * i40e_vc_send_msg_to_vf * @vf: pointer to the VF info * @v_opcode: virtual channel opcode * @v_retval: virtual channel return value * @msg: pointer to the msg buffer * @msglen: msg length - * @is_quiet: true for not printing unsuccessful return values, false otherwise * * send msg to VF **/ -static int i40e_vc_send_msg_to_vf_ex(struct i40e_vf *vf, u32 v_opcode, - u32 v_retval, u8 *msg, u16 msglen, - bool is_quiet) +static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode, + u32 v_retval, u8 *msg, u16 msglen) { struct i40e_pf *pf; struct i40e_hw *hw; @@ -1891,25 +1889,6 @@ static int i40e_vc_send_msg_to_vf_ex(struct i40e_vf *vf, u32 v_opcode, hw = &pf->hw; abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; - /* single place to detect unsuccessful return values */ - if (v_retval && !is_quiet) { - vf->num_invalid_msgs++; - dev_info(&pf->pdev->dev, "VF %d failed opcode %d, retval: %d\n", - vf->vf_id, v_opcode, v_retval); - if (vf->num_invalid_msgs > - I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED) { - dev_err(&pf->pdev->dev, - "Number of invalid messages exceeded for VF %d\n", - vf->vf_id); - dev_err(&pf->pdev->dev, "Use PF Control I/F to enable the VF\n"); - set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states); - } - } else { - vf->num_valid_msgs++; - /* reset the invalid counter, if a valid message is received. */ - vf->num_invalid_msgs = 0; - } - aq_ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval, msg, msglen, NULL); if (aq_ret) { @@ -1922,23 +1901,6 @@ static int i40e_vc_send_msg_to_vf_ex(struct i40e_vf *vf, u32 v_opcode, return 0; } -/** - * i40e_vc_send_msg_to_vf - * @vf: pointer to the VF info - * @v_opcode: virtual channel opcode - * @v_retval: virtual channel return value - * @msg: pointer to the msg buffer - * @msglen: msg length - * - * send msg to VF - **/ -static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode, - u32 v_retval, u8 *msg, u16 msglen) -{ - return i40e_vc_send_msg_to_vf_ex(vf, v_opcode, v_retval, - msg, msglen, false); -} - /** * i40e_vc_send_resp_to_vf * @vf: pointer to the VF info @@ -2759,7 +2721,6 @@ static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg) * i40e_check_vf_permission * @vf: pointer to the VF info * @al: MAC address list from virtchnl - * @is_quiet: set true for printing msg without opcode info, false otherwise * * Check that the given list of MAC addresses is allowed. Will return -EPERM * if any address in the list is not valid. Checks the following conditions: @@ -2774,15 +2735,13 @@ static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg) * addresses might not be accurate. **/ static inline int i40e_check_vf_permission(struct i40e_vf *vf, - struct virtchnl_ether_addr_list *al, - bool *is_quiet) + struct virtchnl_ether_addr_list *al) { struct i40e_pf *pf = vf->pf; struct i40e_vsi *vsi = pf->vsi[vf->lan_vsi_idx]; int mac2add_cnt = 0; int i; - *is_quiet = false; for (i = 0; i < al->num_elements; i++) { struct i40e_mac_filter *f; u8 *addr = al->list[i].addr; @@ -2806,7 +2765,6 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf, !ether_addr_equal(addr, vf->default_lan_addr.addr)) { dev_err(&pf->pdev->dev, "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n"); - *is_quiet = true; return -EPERM; } @@ -2843,7 +2801,6 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg) (struct virtchnl_ether_addr_list *)msg; struct i40e_pf *pf = vf->pf; struct i40e_vsi *vsi = NULL; - bool is_quiet = false; i40e_status ret = 0; int i; @@ -2860,7 +2817,7 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg) */ spin_lock_bh(&vsi->mac_filter_hash_lock); - ret = i40e_check_vf_permission(vf, al, &is_quiet); + ret = i40e_check_vf_permission(vf, al); if (ret) { spin_unlock_bh(&vsi->mac_filter_hash_lock); goto error_param; @@ -2898,8 +2855,8 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg) error_param: /* send the response to the VF */ - return i40e_vc_send_msg_to_vf_ex(vf, VIRTCHNL_OP_ADD_ETH_ADDR, - ret, NULL, 0, is_quiet); + return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR, + ret, NULL, 0); } /** diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h index 03c42fd0fea19326916d302d67a7585456dc0a7c..a554d0a0b09bd56fb9904defd0a390df877cfa89 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h @@ -10,8 +10,6 @@ #define I40E_VIRTCHNL_SUPPORTED_QTYPES 2 -#define I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED 10 - #define I40E_VLAN_PRIORITY_SHIFT 13 #define I40E_VLAN_MASK 0xFFF #define I40E_PRIORITY_MASK 0xE000 @@ -92,9 +90,6 @@ struct i40e_vf { u8 num_queue_pairs; /* num of qps assigned to VF vsis */ u8 num_req_queues; /* num of requested qps */ u64 num_mdd_events; /* num of mdd events detected */ - /* num of continuous malformed or invalid msgs detected */ - u64 num_invalid_msgs; - u64 num_valid_msgs; /* num of valid msgs detected */ unsigned long vf_caps; /* vf's adv. capabilities */ unsigned long vf_states; /* vf's runtime states */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c index 86c79f71c685a5f41a118685bb792f7e943466a0..75e4a698c3db20b71140cb694cba11e031697f0d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c +++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c @@ -247,21 +247,25 @@ bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count) static struct sk_buff *i40e_construct_skb_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp) { + unsigned int totalsize = xdp->data_end - xdp->data_meta; unsigned int metasize = xdp->data - xdp->data_meta; - unsigned int datasize = xdp->data_end - xdp->data; struct sk_buff *skb; + net_prefetch(xdp->data_meta); + /* allocate a skb to store the frags */ - skb = __napi_alloc_skb(&rx_ring->q_vector->napi, - xdp->data_end - xdp->data_hard_start, + skb = __napi_alloc_skb(&rx_ring->q_vector->napi, totalsize, GFP_ATOMIC | __GFP_NOWARN); if (unlikely(!skb)) return NULL; - skb_reserve(skb, xdp->data - xdp->data_hard_start); - memcpy(__skb_put(skb, datasize), xdp->data, datasize); - if (metasize) + memcpy(__skb_put(skb, totalsize), xdp->data_meta, + ALIGN(totalsize, sizeof(long))); + + if (metasize) { skb_metadata_set(skb, metasize); + __skb_pull(skb, metasize); + } xsk_buff_free(xdp); return skb; diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h index 6766446a33f49b13b6385026585c90827b630ab8..ce1e2fb22e09283302ad55e740b145a4f4ce6fa5 100644 --- a/drivers/net/ethernet/intel/iavf/iavf.h +++ b/drivers/net/ethernet/intel/iavf/iavf.h @@ -309,6 +309,7 @@ struct iavf_adapter { struct iavf_hw hw; /* defined in iavf_type.h */ enum iavf_state_t state; + enum iavf_state_t last_state; unsigned long crit_section; struct delayed_work watchdog_task; @@ -378,6 +379,15 @@ struct iavf_device { extern char iavf_driver_name[]; extern struct workqueue_struct *iavf_wq; +static inline void iavf_change_state(struct iavf_adapter *adapter, + enum iavf_state_t state) +{ + if (adapter->state != state) { + adapter->last_state = adapter->state; + adapter->state = state; + } +} + int iavf_up(struct iavf_adapter *adapter); void iavf_down(struct iavf_adapter *adapter); int iavf_process_config(struct iavf_adapter *adapter); diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index de7794ebc7e73e732c7423e47ac99234d6217d1e..bd1fb3774769b0c301d31f2e87a572cffd7c143f 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -963,7 +963,7 @@ static void iavf_configure(struct iavf_adapter *adapter) **/ static void iavf_up_complete(struct iavf_adapter *adapter) { - adapter->state = __IAVF_RUNNING; + iavf_change_state(adapter, __IAVF_RUNNING); clear_bit(__IAVF_VSI_DOWN, adapter->vsi.state); iavf_napi_enable_all(adapter); @@ -1698,7 +1698,7 @@ static int iavf_startup(struct iavf_adapter *adapter) iavf_shutdown_adminq(hw); goto err; } - adapter->state = __IAVF_INIT_VERSION_CHECK; + iavf_change_state(adapter, __IAVF_INIT_VERSION_CHECK); err: return err; } @@ -1722,7 +1722,7 @@ static int iavf_init_version_check(struct iavf_adapter *adapter) if (!iavf_asq_done(hw)) { dev_err(&pdev->dev, "Admin queue command never completed\n"); iavf_shutdown_adminq(hw); - adapter->state = __IAVF_STARTUP; + iavf_change_state(adapter, __IAVF_STARTUP); goto err; } @@ -1745,8 +1745,7 @@ static int iavf_init_version_check(struct iavf_adapter *adapter) err); goto err; } - adapter->state = __IAVF_INIT_GET_RESOURCES; - + iavf_change_state(adapter, __IAVF_INIT_GET_RESOURCES); err: return err; } @@ -1862,7 +1861,7 @@ static int iavf_init_get_resources(struct iavf_adapter *adapter) if (netdev->features & NETIF_F_GRO) dev_info(&pdev->dev, "GRO is enabled\n"); - adapter->state = __IAVF_DOWN; + iavf_change_state(adapter, __IAVF_DOWN); set_bit(__IAVF_VSI_DOWN, adapter->vsi.state); rtnl_unlock(); @@ -1910,7 +1909,7 @@ static void iavf_watchdog_task(struct work_struct *work) goto restart_watchdog; if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) - adapter->state = __IAVF_COMM_FAILED; + iavf_change_state(adapter, __IAVF_COMM_FAILED); switch (adapter->state) { case __IAVF_COMM_FAILED: @@ -1921,7 +1920,7 @@ static void iavf_watchdog_task(struct work_struct *work) /* A chance for redemption! */ dev_err(&adapter->pdev->dev, "Hardware came out of reset. Attempting reinit.\n"); - adapter->state = __IAVF_STARTUP; + iavf_change_state(adapter, __IAVF_STARTUP); adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED; queue_delayed_work(iavf_wq, &adapter->init_task, 10); clear_bit(__IAVF_IN_CRITICAL_TASK, @@ -1971,9 +1970,10 @@ static void iavf_watchdog_task(struct work_struct *work) goto restart_watchdog; } - /* check for hw reset */ + /* check for hw reset */ reg_val = rd32(hw, IAVF_VF_ARQLEN1) & IAVF_VF_ARQLEN1_ARQENABLE_MASK; if (!reg_val) { + iavf_change_state(adapter, __IAVF_RESETTING); adapter->flags |= IAVF_FLAG_RESET_PENDING; adapter->aq_required = 0; adapter->current_op = VIRTCHNL_OP_UNKNOWN; @@ -2053,7 +2053,7 @@ static void iavf_disable_vf(struct iavf_adapter *adapter) adapter->netdev->flags &= ~IFF_UP; clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); adapter->flags &= ~IAVF_FLAG_RESET_PENDING; - adapter->state = __IAVF_DOWN; + iavf_change_state(adapter, __IAVF_DOWN); wake_up(&adapter->down_waitqueue); dev_info(&adapter->pdev->dev, "Reset task did not complete, VF disabled\n"); } @@ -2165,7 +2165,7 @@ static void iavf_reset_task(struct work_struct *work) } iavf_irq_disable(adapter); - adapter->state = __IAVF_RESETTING; + iavf_change_state(adapter, __IAVF_RESETTING); adapter->flags &= ~IAVF_FLAG_RESET_PENDING; /* free the Tx/Rx rings and descriptors, might be better to just @@ -2265,11 +2265,14 @@ static void iavf_reset_task(struct work_struct *work) iavf_configure(adapter); + /* iavf_up_complete() will switch device back + * to __IAVF_RUNNING + */ iavf_up_complete(adapter); iavf_irq_enable(adapter, true); } else { - adapter->state = __IAVF_DOWN; + iavf_change_state(adapter, __IAVF_DOWN); wake_up(&adapter->down_waitqueue); } clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section); @@ -3277,7 +3280,7 @@ static int iavf_close(struct net_device *netdev) adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_CLOSE; iavf_down(adapter); - adapter->state = __IAVF_DOWN_PENDING; + iavf_change_state(adapter, __IAVF_DOWN_PENDING); iavf_free_traffic_irqs(adapter); clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); @@ -3317,8 +3320,11 @@ static int iavf_change_mtu(struct net_device *netdev, int new_mtu) iavf_notify_client_l2_params(&adapter->vsi); adapter->flags |= IAVF_FLAG_SERVICE_CLIENT_REQUESTED; } - adapter->flags |= IAVF_FLAG_RESET_NEEDED; - queue_work(iavf_wq, &adapter->reset_task); + + if (netif_running(netdev)) { + adapter->flags |= IAVF_FLAG_RESET_NEEDED; + queue_work(iavf_wq, &adapter->reset_task); + } return 0; } @@ -3658,7 +3664,7 @@ static void iavf_init_task(struct work_struct *work) "Failed to communicate with PF; waiting before retry\n"); adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED; iavf_shutdown_adminq(hw); - adapter->state = __IAVF_STARTUP; + iavf_change_state(adapter, __IAVF_STARTUP); queue_delayed_work(iavf_wq, &adapter->init_task, HZ * 5); goto out; } @@ -3684,7 +3690,7 @@ static void iavf_shutdown(struct pci_dev *pdev) if (iavf_lock_timeout(adapter, __IAVF_IN_CRITICAL_TASK, 5000)) dev_warn(&adapter->pdev->dev, "failed to set __IAVF_IN_CRITICAL_TASK in %s\n", __FUNCTION__); /* Prevent the watchdog from running. */ - adapter->state = __IAVF_REMOVE; + iavf_change_state(adapter, __IAVF_REMOVE); adapter->aq_required = 0; clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); @@ -3757,7 +3763,7 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) hw->back = adapter; adapter->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1; - adapter->state = __IAVF_STARTUP; + iavf_change_state(adapter, __IAVF_STARTUP); /* Call save state here because it relies on the adapter struct. */ pci_save_state(pdev); @@ -3925,7 +3931,7 @@ static void iavf_remove(struct pci_dev *pdev) dev_warn(&adapter->pdev->dev, "failed to set __IAVF_IN_CRITICAL_TASK in %s\n", __FUNCTION__); /* Shut down all the garbage mashers on the detention level */ - adapter->state = __IAVF_REMOVE; + iavf_change_state(adapter, __IAVF_REMOVE); adapter->aq_required = 0; adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED; iavf_free_all_tx_resources(adapter); diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c index 8be3151f2c62b5c4d4ffd03dd1d935a4ee6fb15a..ff479bf7214433755a193951c51cad33afceefa2 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c +++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c @@ -1460,7 +1460,7 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, iavf_free_all_tx_resources(adapter); iavf_free_all_rx_resources(adapter); if (adapter->state == __IAVF_DOWN_PENDING) { - adapter->state = __IAVF_DOWN; + iavf_change_state(adapter, __IAVF_DOWN); wake_up(&adapter->down_waitqueue); } break; diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h index b06fbe99d8e9316e2df848dafd1db1f1487f2af2..b6dd8f81d69979edc78c323985bb99a2a35234e5 100644 --- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h +++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h @@ -870,11 +870,11 @@ struct ice_aqc_get_phy_caps { * 01b - Report topology capabilities * 10b - Report SW configured */ -#define ICE_AQC_REPORT_MODE_S 1 -#define ICE_AQC_REPORT_MODE_M (3 << ICE_AQC_REPORT_MODE_S) -#define ICE_AQC_REPORT_NVM_CAP 0 -#define ICE_AQC_REPORT_TOPO_CAP BIT(1) -#define ICE_AQC_REPORT_SW_CFG BIT(2) +#define ICE_AQC_REPORT_MODE_S 1 +#define ICE_AQC_REPORT_MODE_M (3 << ICE_AQC_REPORT_MODE_S) +#define ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA 0 +#define ICE_AQC_REPORT_TOPO_CAP_MEDIA BIT(1) +#define ICE_AQC_REPORT_ACTIVE_CFG BIT(2) __le32 reserved1; __le32 addr_high; __le32 addr_low; diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index 2b0d0373ab2c64d6261a2f622537d63efaf842b0..ecdc467c4f6f5c048bb855efb2d6f4e159979ebd 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -193,7 +193,7 @@ ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode, ice_debug(hw, ICE_DBG_LINK, " module_type[2] = 0x%x\n", pcaps->module_type[2]); - if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP) { + if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP_MEDIA) { pi->phy.phy_type_low = le64_to_cpu(pcaps->phy_type_low); pi->phy.phy_type_high = le64_to_cpu(pcaps->phy_type_high); memcpy(pi->phy.link_info.module_type, &pcaps->module_type, @@ -924,7 +924,8 @@ enum ice_status ice_init_hw(struct ice_hw *hw) /* Initialize port_info struct with PHY capabilities */ status = ice_aq_get_phy_caps(hw->port_info, false, - ICE_AQC_REPORT_TOPO_CAP, pcaps, NULL); + ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps, + NULL); devm_kfree(ice_hw_to_dev(hw), pcaps); if (status) goto err_unroll_sched; @@ -2682,7 +2683,7 @@ enum ice_status ice_update_link_info(struct ice_port_info *pi) if (!pcaps) return ICE_ERR_NO_MEMORY; - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, + status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps, NULL); devm_kfree(ice_hw_to_dev(hw), pcaps); @@ -2842,8 +2843,8 @@ ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update) return ICE_ERR_NO_MEMORY; /* Get the current PHY config */ - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps, - NULL); + status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, + pcaps, NULL); if (status) { *aq_failures = ICE_SET_FC_AQ_FAIL_GET; goto out; @@ -2989,7 +2990,7 @@ ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, if (!pcaps) return ICE_ERR_NO_MEMORY; - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, pcaps, + status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps, NULL); if (status) goto out; diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index 0c596b67b68996d32076335184c673ebe2bc7719..57fe21c23cb134adddec73884ab9ab4b07ba0834 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -1081,7 +1081,7 @@ ice_get_fecparam(struct net_device *netdev, struct ethtool_fecparam *fecparam) if (!caps) return -ENOMEM; - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, + status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, caps, NULL); if (status) { err = -EAGAIN; @@ -1976,7 +1976,7 @@ ice_get_link_ksettings(struct net_device *netdev, return -ENOMEM; status = ice_aq_get_phy_caps(vsi->port_info, false, - ICE_AQC_REPORT_SW_CFG, caps, NULL); + ICE_AQC_REPORT_ACTIVE_CFG, caps, NULL); if (status) { err = -EIO; goto done; @@ -2013,7 +2013,7 @@ ice_get_link_ksettings(struct net_device *netdev, ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS); status = ice_aq_get_phy_caps(vsi->port_info, false, - ICE_AQC_REPORT_TOPO_CAP, caps, NULL); + ICE_AQC_REPORT_TOPO_CAP_MEDIA, caps, NULL); if (status) { err = -EIO; goto done; @@ -2187,12 +2187,12 @@ ice_set_link_ksettings(struct net_device *netdev, { struct ice_netdev_priv *np = netdev_priv(netdev); struct ethtool_link_ksettings safe_ks, copy_ks; - struct ice_aqc_get_phy_caps_data *abilities; u8 autoneg, timeout = TEST_SET_BITS_TIMEOUT; - u16 adv_link_speed, curr_link_speed, idx; + struct ice_aqc_get_phy_caps_data *phy_caps; struct ice_aqc_set_phy_cfg_data config; + u16 adv_link_speed, curr_link_speed; struct ice_pf *pf = np->vsi->back; - struct ice_port_info *p; + struct ice_port_info *pi; u8 autoneg_changed = 0; enum ice_status status; u64 phy_type_high = 0; @@ -2200,33 +2200,25 @@ ice_set_link_ksettings(struct net_device *netdev, int err = 0; bool linkup; - p = np->vsi->port_info; + pi = np->vsi->port_info; - if (!p) + if (!pi) return -EOPNOTSUPP; - /* Check if this is LAN VSI */ - ice_for_each_vsi(pf, idx) - if (pf->vsi[idx]->type == ICE_VSI_PF) { - if (np->vsi != pf->vsi[idx]) - return -EOPNOTSUPP; - break; - } - - if (p->phy.media_type != ICE_MEDIA_BASET && - p->phy.media_type != ICE_MEDIA_FIBER && - p->phy.media_type != ICE_MEDIA_BACKPLANE && - p->phy.media_type != ICE_MEDIA_DA && - p->phy.link_info.link_info & ICE_AQ_LINK_UP) + if (pi->phy.media_type != ICE_MEDIA_BASET && + pi->phy.media_type != ICE_MEDIA_FIBER && + pi->phy.media_type != ICE_MEDIA_BACKPLANE && + pi->phy.media_type != ICE_MEDIA_DA && + pi->phy.link_info.link_info & ICE_AQ_LINK_UP) return -EOPNOTSUPP; - abilities = kzalloc(sizeof(*abilities), GFP_KERNEL); - if (!abilities) + phy_caps = kzalloc(sizeof(*phy_caps), GFP_KERNEL); + if (!phy_caps) return -ENOMEM; /* Get the PHY capabilities based on media */ - status = ice_aq_get_phy_caps(p, false, ICE_AQC_REPORT_TOPO_CAP, - abilities, NULL); + status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, + phy_caps, NULL); if (status) { err = -EAGAIN; goto done; @@ -2288,26 +2280,26 @@ ice_set_link_ksettings(struct net_device *netdev, * configuration is initialized during probe from PHY capabilities * software mode, and updated on set PHY configuration. */ - memcpy(&config, &p->phy.curr_user_phy_cfg, sizeof(config)); + memcpy(&config, &pi->phy.curr_user_phy_cfg, sizeof(config)); config.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; /* Check autoneg */ - err = ice_setup_autoneg(p, &safe_ks, &config, autoneg, &autoneg_changed, + err = ice_setup_autoneg(pi, &safe_ks, &config, autoneg, &autoneg_changed, netdev); if (err) goto done; /* Call to get the current link speed */ - p->phy.get_link_info = true; - status = ice_get_link_status(p, &linkup); + pi->phy.get_link_info = true; + status = ice_get_link_status(pi, &linkup); if (status) { err = -EAGAIN; goto done; } - curr_link_speed = p->phy.link_info.link_speed; + curr_link_speed = pi->phy.curr_user_speed_req; adv_link_speed = ice_ksettings_find_adv_link_speed(ks); /* If speed didn't get set, set it to what it currently is. @@ -2326,7 +2318,7 @@ ice_set_link_ksettings(struct net_device *netdev, } /* save the requested speeds */ - p->phy.link_info.req_speeds = adv_link_speed; + pi->phy.link_info.req_speeds = adv_link_speed; /* set link and auto negotiation so changes take effect */ config.caps |= ICE_AQ_PHY_ENA_LINK; @@ -2342,9 +2334,9 @@ ice_set_link_ksettings(struct net_device *netdev, * for set PHY configuration */ config.phy_type_high = cpu_to_le64(phy_type_high) & - abilities->phy_type_high; + phy_caps->phy_type_high; config.phy_type_low = cpu_to_le64(phy_type_low) & - abilities->phy_type_low; + phy_caps->phy_type_low; if (!(config.phy_type_high || config.phy_type_low)) { /* If there is no intersection and lenient mode is enabled, then @@ -2364,7 +2356,7 @@ ice_set_link_ksettings(struct net_device *netdev, } /* If link is up put link down */ - if (p->phy.link_info.link_info & ICE_AQ_LINK_UP) { + if (pi->phy.link_info.link_info & ICE_AQ_LINK_UP) { /* Tell the OS link is going down, the link will go * back up when fw says it is ready asynchronously */ @@ -2374,7 +2366,7 @@ ice_set_link_ksettings(struct net_device *netdev, } /* make the aq call */ - status = ice_aq_set_phy_cfg(&pf->hw, p, &config, NULL); + status = ice_aq_set_phy_cfg(&pf->hw, pi, &config, NULL); if (status) { netdev_info(netdev, "Set phy config failed,\n"); err = -EAGAIN; @@ -2382,9 +2374,9 @@ ice_set_link_ksettings(struct net_device *netdev, } /* Save speed request */ - p->phy.curr_user_speed_req = adv_link_speed; + pi->phy.curr_user_speed_req = adv_link_speed; done: - kfree(abilities); + kfree(phy_caps); clear_bit(__ICE_CFG_BUSY, pf->state); return err; @@ -2958,7 +2950,7 @@ ice_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) return; /* Get current PHY config */ - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps, + status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps, NULL); if (status) goto out; @@ -3025,7 +3017,7 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) return -ENOMEM; /* Get current PHY config */ - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps, + status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps, NULL); if (status) { kfree(pcaps); diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index fb4656902634c624d3b24855a2a5767d6b9482a0..20c9d55f3adcee9e9b126877f7cd79a9c339e39a 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -726,7 +726,7 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup) } status = ice_aq_get_phy_caps(vsi->port_info, false, - ICE_AQC_REPORT_SW_CFG, caps, NULL); + ICE_AQC_REPORT_ACTIVE_CFG, caps, NULL); if (status) netdev_info(vsi->netdev, "Get phy capability failed.\n"); @@ -1602,7 +1602,9 @@ static void ice_handle_mdd_event(struct ice_pf *pf) * reset, so print the event prior to reset. */ ice_print_vf_rx_mdd_event(vf); + mutex_lock(&pf->vf[i].cfg_lock); ice_reset_vf(&pf->vf[i], false); + mutex_unlock(&pf->vf[i].cfg_lock); } } } @@ -1643,7 +1645,7 @@ static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up) if (!pcaps) return -ENOMEM; - retcode = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps, + retcode = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps, NULL); if (retcode) { dev_err(dev, "Failed to get phy capabilities, VSI %d error %d\n", @@ -1703,7 +1705,7 @@ static int ice_init_nvm_phy_type(struct ice_port_info *pi) if (!pcaps) return -ENOMEM; - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_NVM_CAP, pcaps, + status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA, pcaps, NULL); if (status) { @@ -1819,7 +1821,7 @@ static int ice_init_phy_user_cfg(struct ice_port_info *pi) if (!pcaps) return -ENOMEM; - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, pcaps, + status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps, NULL); if (status) { dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n"); @@ -1898,7 +1900,7 @@ static int ice_configure_phy(struct ice_vsi *vsi) return -ENOMEM; /* Get current PHY config */ - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps, + status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps, NULL); if (status) { dev_err(dev, "Failed to get PHY configuration, VSI %d error %s\n", @@ -1916,7 +1918,7 @@ static int ice_configure_phy(struct ice_vsi *vsi) /* Use PHY topology as baseline for configuration */ memset(pcaps, 0, sizeof(*pcaps)); - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, pcaps, + status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps, NULL); if (status) { dev_err(dev, "Failed to get PHY topology, VSI %d error %s\n", diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c index 69ce5d60a8570334f0e9f61261618af363a3822e..5134342ff70fc3d188e91e8ca6ea96276eb2a8df 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c @@ -360,20 +360,26 @@ void ice_free_vfs(struct ice_pf *pf) else dev_warn(dev, "VFs are assigned - not disabling SR-IOV\n"); - /* Avoid wait time by stopping all VFs at the same time */ - ice_for_each_vf(pf, i) - ice_dis_vf_qs(&pf->vf[i]); - tmp = pf->num_alloc_vfs; pf->num_qps_per_vf = 0; pf->num_alloc_vfs = 0; for (i = 0; i < tmp; i++) { - if (test_bit(ICE_VF_STATE_INIT, pf->vf[i].vf_states)) { + struct ice_vf *vf = &pf->vf[i]; + + mutex_lock(&vf->cfg_lock); + + ice_dis_vf_qs(vf); + + if (test_bit(ICE_VF_STATE_INIT, vf->vf_states)) { /* disable VF qp mappings and set VF disable state */ - ice_dis_vf_mappings(&pf->vf[i]); - set_bit(ICE_VF_STATE_DIS, pf->vf[i].vf_states); - ice_free_vf_res(&pf->vf[i]); + ice_dis_vf_mappings(vf); + set_bit(ICE_VF_STATE_DIS, vf->vf_states); + ice_free_vf_res(vf); } + + mutex_unlock(&vf->cfg_lock); + + mutex_destroy(&vf->cfg_lock); } if (ice_sriov_free_msix_res(pf)) @@ -1221,9 +1227,13 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr) ice_for_each_vf(pf, v) { vf = &pf->vf[v]; + mutex_lock(&vf->cfg_lock); + ice_vf_pre_vsi_rebuild(vf); ice_vf_rebuild_vsi(vf); ice_vf_post_vsi_rebuild(vf); + + mutex_unlock(&vf->cfg_lock); } ice_flush(hw); @@ -1270,6 +1280,8 @@ bool ice_reset_vf(struct ice_vf *vf, bool is_vflr) u32 reg; int i; + lockdep_assert_held(&vf->cfg_lock); + dev = ice_pf_to_dev(pf); if (test_bit(__ICE_VF_RESETS_DISABLED, pf->state)) { @@ -1518,6 +1530,8 @@ static void ice_set_dflt_settings_vfs(struct ice_pf *pf) set_bit(ICE_VIRTCHNL_VF_CAP_L2, &vf->vf_caps); vf->spoofchk = true; vf->num_vf_qs = pf->num_qps_per_vf; + + mutex_init(&vf->cfg_lock); } } @@ -1721,9 +1735,12 @@ void ice_process_vflr_event(struct ice_pf *pf) bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32; /* read GLGEN_VFLRSTAT register to find out the flr VFs */ reg = rd32(hw, GLGEN_VFLRSTAT(reg_idx)); - if (reg & BIT(bit_idx)) + if (reg & BIT(bit_idx)) { /* GLGEN_VFLRSTAT bit will be cleared in ice_reset_vf */ + mutex_lock(&vf->cfg_lock); ice_reset_vf(vf, true); + mutex_unlock(&vf->cfg_lock); + } } } @@ -1800,7 +1817,9 @@ ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event) if (!vf) return; + mutex_lock(&vf->cfg_lock); ice_vc_reset_vf(vf); + mutex_unlock(&vf->cfg_lock); } /** @@ -1830,24 +1849,6 @@ ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode, dev = ice_pf_to_dev(pf); - /* single place to detect unsuccessful return values */ - if (v_retval) { - vf->num_inval_msgs++; - dev_info(dev, "VF %d failed opcode %d, retval: %d\n", vf->vf_id, - v_opcode, v_retval); - if (vf->num_inval_msgs > ICE_DFLT_NUM_INVAL_MSGS_ALLOWED) { - dev_err(dev, "Number of invalid messages exceeded for VF %d\n", - vf->vf_id); - dev_err(dev, "Use PF Control I/F to enable the VF\n"); - set_bit(ICE_VF_STATE_DIS, vf->vf_states); - return -EIO; - } - } else { - vf->num_valid_msgs++; - /* reset the invalid counter, if a valid message is received. */ - vf->num_inval_msgs = 0; - } - aq_ret = ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, v_opcode, v_retval, msg, msglen, NULL); if (aq_ret && pf->hw.mailboxq.sq_last_status != ICE_AQ_RC_ENOSYS) { @@ -3345,6 +3346,8 @@ ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos, return 0; } + mutex_lock(&vf->cfg_lock); + vf->port_vlan_info = vlanprio; if (vf->port_vlan_info) @@ -3354,6 +3357,7 @@ ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos, dev_info(dev, "Clearing port VLAN on VF %d\n", vf_id); ice_vc_reset_vf(vf); + mutex_unlock(&vf->cfg_lock); return 0; } @@ -3719,6 +3723,15 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event) return; } + /* VF is being configured in another context that triggers a VFR, so no + * need to process this message + */ + if (!mutex_trylock(&vf->cfg_lock)) { + dev_info(dev, "VF %u is being configured in another context that will trigger a VFR, so there is no need to handle this message\n", + vf->vf_id); + return; + } + switch (v_opcode) { case VIRTCHNL_OP_VERSION: err = ice_vc_get_ver_msg(vf, msg); @@ -3795,6 +3808,8 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event) dev_info(dev, "PF failed to honor VF %d, opcode %d, error %d\n", vf_id, v_opcode, err); } + + mutex_unlock(&vf->cfg_lock); } /** @@ -3909,6 +3924,8 @@ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) return -EINVAL; } + mutex_lock(&vf->cfg_lock); + /* VF is notified of its new MAC via the PF's response to the * VIRTCHNL_OP_GET_VF_RESOURCES message after the VF has been reset */ @@ -3926,6 +3943,7 @@ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) } ice_vc_reset_vf(vf); + mutex_unlock(&vf->cfg_lock); return 0; } @@ -3955,11 +3973,15 @@ int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted) if (trusted == vf->trusted) return 0; + mutex_lock(&vf->cfg_lock); + vf->trusted = trusted; ice_vc_reset_vf(vf); dev_info(ice_pf_to_dev(pf), "VF %u is now %strusted\n", vf_id, trusted ? "" : "un"); + mutex_unlock(&vf->cfg_lock); + return 0; } diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h index 0f519fba3770d3d3b4f250b69158c53f1170293b..d2e935c678a147e14037ea451a4b5f0e020c0640 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h @@ -13,7 +13,6 @@ #define ICE_MAX_MACADDR_PER_VF 18 /* Malicious Driver Detection */ -#define ICE_DFLT_NUM_INVAL_MSGS_ALLOWED 10 #define ICE_MDD_EVENTS_THRESHOLD 30 /* Static VF transaction/status register def */ @@ -68,6 +67,11 @@ struct ice_mdd_vf_events { struct ice_vf { struct ice_pf *pf; + /* Used during virtchnl message handling and NDO ops against the VF + * that will trigger a VFR + */ + struct mutex cfg_lock; + u16 vf_id; /* VF ID in the PF space */ u16 lan_vsi_idx; /* index into PF struct */ /* first vector index of this VF in the PF space */ @@ -92,8 +96,6 @@ struct ice_vf { unsigned int tx_rate; /* Tx bandwidth limit in Mbps */ DECLARE_BITMAP(vf_states, ICE_VF_STATES_NBITS); /* VF runtime states */ - u64 num_inval_msgs; /* number of continuous invalid msgs */ - u64 num_valid_msgs; /* number of valid msgs detected */ unsigned long vf_caps; /* VF's adv. capabilities */ u8 num_req_qs; /* num of queue pairs requested by VF */ u16 num_mac; diff --git a/drivers/net/ethernet/intel/igc/igc_phy.c b/drivers/net/ethernet/intel/igc/igc_phy.c index 8e1799508edc42438a128c38be2f62cf8eeae083..e380b7a3ea63b19468d976ac56a4ff29116b5bbb 100644 --- a/drivers/net/ethernet/intel/igc/igc_phy.c +++ b/drivers/net/ethernet/intel/igc/igc_phy.c @@ -748,8 +748,6 @@ s32 igc_write_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 data) if (ret_val) return ret_val; ret_val = igc_write_phy_reg_mdic(hw, offset, data); - if (ret_val) - return ret_val; hw->phy.ops.release(hw); } else { ret_val = igc_write_xmdio_reg(hw, (u16)offset, dev_addr, @@ -781,8 +779,6 @@ s32 igc_read_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 *data) if (ret_val) return ret_val; ret_val = igc_read_phy_reg_mdic(hw, offset, data); - if (ret_val) - return ret_val; hw->phy.ops.release(hw); } else { ret_val = igc_read_xmdio_reg(hw, (u16)offset, dev_addr, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c index d60da7a89092e82f8195b961ba67938af6739722..ca1a428b278e0df4dcceaf3bd048b79054b3faf5 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c @@ -391,12 +391,14 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget) u32 cmd_type; while (budget-- > 0) { - if (unlikely(!ixgbe_desc_unused(xdp_ring)) || - !netif_carrier_ok(xdp_ring->netdev)) { + if (unlikely(!ixgbe_desc_unused(xdp_ring))) { work_done = false; break; } + if (!netif_carrier_ok(xdp_ring->netdev)) + break; + if (!xsk_tx_peek_desc(pool, &desc)) break; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index 6af0dd847169177ef5c9ca52e04dc0b38c2f2b65..94426d29025eb4e8a3bf2d6ea3b73d8445768453 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -130,11 +130,8 @@ static int cmd_alloc_index(struct mlx5_cmd *cmd) static void cmd_free_index(struct mlx5_cmd *cmd, int idx) { - unsigned long flags; - - spin_lock_irqsave(&cmd->alloc_lock, flags); + lockdep_assert_held(&cmd->alloc_lock); set_bit(idx, &cmd->bitmask); - spin_unlock_irqrestore(&cmd->alloc_lock, flags); } static void cmd_ent_get(struct mlx5_cmd_work_ent *ent) @@ -144,17 +141,21 @@ static void cmd_ent_get(struct mlx5_cmd_work_ent *ent) static void cmd_ent_put(struct mlx5_cmd_work_ent *ent) { + struct mlx5_cmd *cmd = ent->cmd; + unsigned long flags; + + spin_lock_irqsave(&cmd->alloc_lock, flags); if (!refcount_dec_and_test(&ent->refcnt)) - return; + goto out; if (ent->idx >= 0) { - struct mlx5_cmd *cmd = ent->cmd; - cmd_free_index(cmd, ent->idx); up(ent->page_queue ? &cmd->pages_sem : &cmd->sem); } cmd_free_ent(ent); +out: + spin_unlock_irqrestore(&cmd->alloc_lock, flags); } static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 584751da20b17a29054ea2faa571007694d9ebff..38b557a6353d6023a2b9059089d301ea931efbd2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -1754,7 +1754,7 @@ static int mlx5e_get_module_eeprom(struct net_device *netdev, if (size_read < 0) { netdev_err(priv->netdev, "%s: mlx5_query_eeprom failed:0x%x\n", __func__, size_read); - return 0; + return size_read; } i += size_read; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 8b3a7325e24ec3a2dc3ecd85018c3a49403b4a53..59dc746b71888010dbc17227c421645100e2b256 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -980,7 +980,8 @@ static inline void mlx5e_handle_csum(struct net_device *netdev, } /* True when explicitly set via priv flag, or XDP prog is loaded */ - if (test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state)) + if (test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state) || + get_cqe_tls_offload(cqe)) goto csum_unnecessary; /* CQE csum doesn't cover padding octets in short ethernet diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index e06b1ba7d23498bd72cb7e895e2365d4fe2c4b14..ccc7dd3e738a48ede918113f769a0bed7b84300b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -2037,10 +2037,6 @@ esw_check_vport_match_metadata_supported(const struct mlx5_eswitch *esw) if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source)) return false; - if (mlx5_core_is_ecpf_esw_manager(esw->dev) || - mlx5_ecpf_vport_exists(esw->dev)) - return false; - return true; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 0ff034b0866e2e9da7334fee35531f7f9a1830a3..55772f0cbbf8fef911b7e233605b0bd9564fd286 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -2034,6 +2034,8 @@ void mlx5_del_flow_rules(struct mlx5_flow_handle *handle) fte->node.del_hw_func = NULL; up_write_ref_node(&fte->node, false); tree_put_node(&fte->node, false); + } else { + up_write_ref_node(&fte->node, false); } kfree(handle); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c index 0f0d250bbc1508f05661267a084faf136541ae8e..c04413f449c509ac7e387b2a556d2ffa86176dfb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c @@ -123,6 +123,10 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev, return; } + /* Handle multipath entry with lower priority value */ + if (mp->mfi && mp->mfi != fi && fi->fib_priority >= mp->mfi->fib_priority) + return; + /* Handle add/replace event */ nhs = fib_info_num_path(fi); if (nhs == 1) { @@ -132,12 +136,13 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev, int i = mlx5_lag_dev_get_netdev_idx(ldev, nh_dev); if (i < 0) - i = MLX5_LAG_NORMAL_AFFINITY; - else - ++i; + return; + i++; mlx5_lag_set_port_affinity(ldev, i); } + + mp->mfi = fi; return; } diff --git a/drivers/net/ethernet/mscc/ocelot_flower.c b/drivers/net/ethernet/mscc/ocelot_flower.c index 217e8333de6c66154cbe0d6b0af078b3200083ac..c4c4649b2088e17653f3ab9267c71316e340a3d3 100644 --- a/drivers/net/ethernet/mscc/ocelot_flower.c +++ b/drivers/net/ethernet/mscc/ocelot_flower.c @@ -54,6 +54,12 @@ static int ocelot_chain_to_block(int chain, bool ingress) */ static int ocelot_chain_to_lookup(int chain) { + /* Backwards compatibility with older, single-chain tc-flower + * offload support in Ocelot + */ + if (chain == 0) + return 0; + return (chain / VCAP_LOOKUP) % 10; } @@ -62,7 +68,15 @@ static int ocelot_chain_to_lookup(int chain) */ static int ocelot_chain_to_pag(int chain) { - int lookup = ocelot_chain_to_lookup(chain); + int lookup; + + /* Backwards compatibility with older, single-chain tc-flower + * offload support in Ocelot + */ + if (chain == 0) + return 0; + + lookup = ocelot_chain_to_lookup(chain); /* calculate PAG value as chain index relative to the first PAG */ return chain - VCAP_IS2_CHAIN(lookup, 0); diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c index d3d5b663a4a3c423d426a8d709e999c4017a5678..088ceac07b80556be5b85ea5b1ad6ccb4f05391a 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c +++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c @@ -922,8 +922,8 @@ nfp_tunnel_add_shared_mac(struct nfp_app *app, struct net_device *netdev, int port, bool mod) { struct nfp_flower_priv *priv = app->priv; - int ida_idx = NFP_MAX_MAC_INDEX, err; struct nfp_tun_offloaded_mac *entry; + int ida_idx = -1, err; u16 nfp_mac_idx = 0; entry = nfp_tunnel_lookup_offloaded_macs(app, netdev->dev_addr); @@ -997,7 +997,7 @@ nfp_tunnel_add_shared_mac(struct nfp_app *app, struct net_device *netdev, err_free_entry: kfree(entry); err_free_ida: - if (ida_idx != NFP_MAX_MAC_INDEX) + if (ida_idx != -1) ida_simple_remove(&priv->tun.mac_off_ids, ida_idx); return err; diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c index 9e098e40fb1c690220ee8ab0c9daa8ba2bbf6877..a9a9bf2e065a5635880392e0c4099eed982bb381 100644 --- a/drivers/net/ethernet/nxp/lpc_eth.c +++ b/drivers/net/ethernet/nxp/lpc_eth.c @@ -1468,6 +1468,7 @@ static int lpc_eth_drv_resume(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct netdata_local *pldat; + int ret; if (device_may_wakeup(&pdev->dev)) disable_irq_wake(ndev->irq); @@ -1477,7 +1478,9 @@ static int lpc_eth_drv_resume(struct platform_device *pdev) pldat = netdev_priv(ndev); /* Enable interface clock */ - clk_enable(pldat->clk); + ret = clk_enable(pldat->clk); + if (ret) + return ret; /* Reset and initialize */ __lpc_eth_reset(pldat); diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c index d355676f6c160d685239d52565cf96075297eae2..e14869a2e24a517d75e5b1e0afba00d4e93d7856 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_main.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c @@ -311,10 +311,10 @@ int ionic_adminq_post_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx) static void ionic_dev_cmd_clean(struct ionic *ionic) { - union __iomem ionic_dev_cmd_regs *regs = ionic->idev.dev_cmd_regs; + struct ionic_dev *idev = &ionic->idev; - iowrite32(0, ®s->doorbell); - memset_io(®s->cmd, 0, sizeof(regs->cmd)); + iowrite32(0, &idev->dev_cmd_regs->doorbell); + memset_io(&idev->dev_cmd_regs->cmd, 0, sizeof(idev->dev_cmd_regs->cmd)); } int ionic_dev_cmd_wait(struct ionic *ionic, unsigned long max_seconds) diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index b8dc5c4591ef52e212776aeece736303092c4ff6..d6b79caf9d8e5f48725690ae77d36d10619193b1 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -3778,11 +3778,11 @@ bool qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *p_disabled_vfs) return found; } -static void qed_iov_get_link(struct qed_hwfn *p_hwfn, - u16 vfid, - struct qed_mcp_link_params *p_params, - struct qed_mcp_link_state *p_link, - struct qed_mcp_link_capabilities *p_caps) +static int qed_iov_get_link(struct qed_hwfn *p_hwfn, + u16 vfid, + struct qed_mcp_link_params *p_params, + struct qed_mcp_link_state *p_link, + struct qed_mcp_link_capabilities *p_caps) { struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn, vfid, @@ -3790,7 +3790,7 @@ static void qed_iov_get_link(struct qed_hwfn *p_hwfn, struct qed_bulletin_content *p_bulletin; if (!p_vf) - return; + return -EINVAL; p_bulletin = p_vf->bulletin.p_virt; @@ -3800,6 +3800,7 @@ static void qed_iov_get_link(struct qed_hwfn *p_hwfn, __qed_vf_get_link_state(p_hwfn, p_link, p_bulletin); if (p_caps) __qed_vf_get_link_caps(p_hwfn, p_caps, p_bulletin); + return 0; } static int @@ -4658,6 +4659,7 @@ static int qed_get_vf_config(struct qed_dev *cdev, struct qed_public_vf_info *vf_info; struct qed_mcp_link_state link; u32 tx_rate; + int ret; /* Sanitize request */ if (IS_VF(cdev)) @@ -4671,7 +4673,9 @@ static int qed_get_vf_config(struct qed_dev *cdev, vf_info = qed_iov_get_public_vf_info(hwfn, vf_id, true); - qed_iov_get_link(hwfn, vf_id, NULL, &link, NULL); + ret = qed_iov_get_link(hwfn, vf_id, NULL, &link, NULL); + if (ret) + return ret; /* Fill information about VF */ ivi->vf = vf_id; @@ -4687,6 +4691,7 @@ static int qed_get_vf_config(struct qed_dev *cdev, tx_rate = vf_info->tx_rate; ivi->max_tx_rate = tx_rate ? tx_rate : link.speed; ivi->min_tx_rate = qed_iov_get_vf_min_rate(hwfn, vf_id); + ivi->trusted = vf_info->is_trusted_request; return 0; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c index 72a38d53d33f68c497c3f18b14c7874c6da713f4..e2a5a6a373cbe613ad06ef5d94196efc8174f5c7 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.c +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c @@ -513,6 +513,9 @@ int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn) p_iov->bulletin.size, &p_iov->bulletin.phys, GFP_KERNEL); + if (!p_iov->bulletin.p_virt) + goto free_pf2vf_reply; + DP_VERBOSE(p_hwfn, QED_MSG_IOV, "VF's bulletin Board [%p virt 0x%llx phys 0x%08x bytes]\n", p_iov->bulletin.p_virt, @@ -552,6 +555,10 @@ int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn) return rc; +free_pf2vf_reply: + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + sizeof(union pfvf_tlvs), + p_iov->pf2vf_reply, p_iov->pf2vf_reply_phys); free_vf2pf_request: dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(union vfpf_tlvs), diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h index 5d79ee4370bcd5b89e0ca7cbc8b2e7b32c8c4d7b..7519773eaca6ee5caa29f40c6f8aa891464760bd 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h @@ -51,7 +51,7 @@ static inline int qlcnic_dcb_get_hw_capability(struct qlcnic_dcb *dcb) if (dcb && dcb->ops->get_hw_capability) return dcb->ops->get_hw_capability(dcb); - return 0; + return -EOPNOTSUPP; } static inline void qlcnic_dcb_free(struct qlcnic_dcb *dcb) @@ -65,7 +65,7 @@ static inline int qlcnic_dcb_attach(struct qlcnic_dcb *dcb) if (dcb && dcb->ops->attach) return dcb->ops->attach(dcb); - return 0; + return -EOPNOTSUPP; } static inline int @@ -74,7 +74,7 @@ qlcnic_dcb_query_hw_capability(struct qlcnic_dcb *dcb, char *buf) if (dcb && dcb->ops->query_hw_capability) return dcb->ops->query_hw_capability(dcb, buf); - return 0; + return -EOPNOTSUPP; } static inline void qlcnic_dcb_get_info(struct qlcnic_dcb *dcb) @@ -89,7 +89,7 @@ qlcnic_dcb_query_cee_param(struct qlcnic_dcb *dcb, char *buf, u8 type) if (dcb && dcb->ops->query_cee_param) return dcb->ops->query_cee_param(dcb, buf, type); - return 0; + return -EOPNOTSUPP; } static inline int qlcnic_dcb_get_cee_cfg(struct qlcnic_dcb *dcb) @@ -97,7 +97,7 @@ static inline int qlcnic_dcb_get_cee_cfg(struct qlcnic_dcb *dcb) if (dcb && dcb->ops->get_cee_cfg) return dcb->ops->get_cee_cfg(dcb); - return 0; + return -EOPNOTSUPP; } static inline void qlcnic_dcb_aen_handler(struct qlcnic_dcb *dcb, void *msg) diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c index 971f1e54b6526e36223059f684292bbd95bc539f..b1dd6189638b3ee173366f0c0ab274f11ae5c420 100644 --- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c @@ -2282,18 +2282,18 @@ static int __init sxgbe_cmdline_opt(char *str) char *opt; if (!str || !*str) - return -EINVAL; + return 1; while ((opt = strsep(&str, ",")) != NULL) { if (!strncmp(opt, "eee_timer:", 10)) { if (kstrtoint(opt + 10, 0, &eee_timer)) goto err; } } - return 0; + return 1; err: pr_err("%s: ERROR broken module parameter conversion\n", __func__); - return -EINVAL; + return 1; } __setup("sxgbeeth=", sxgbe_cmdline_opt); diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c index be6bfd6b7ec7576407f85ebb786eaed238f7d8be..50baf62b2cbc6808bad368e45d6e2efce7573f8c 100644 --- a/drivers/net/ethernet/sfc/mcdi.c +++ b/drivers/net/ethernet/sfc/mcdi.c @@ -163,9 +163,9 @@ static void efx_mcdi_send_request(struct efx_nic *efx, unsigned cmd, /* Serialise with efx_mcdi_ev_cpl() and efx_mcdi_ev_death() */ spin_lock_bh(&mcdi->iface_lock); ++mcdi->seqno; + seqno = mcdi->seqno & SEQ_MASK; spin_unlock_bh(&mcdi->iface_lock); - seqno = mcdi->seqno & SEQ_MASK; xflags = 0; if (mcdi->mode == MCDI_MODE_EVENTS) xflags |= MCDI_HEADER_XFLAGS_EVREQ; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 6d8a839fab22e7348fb8ede26973c2a584208f6c..a46c32257de42ba1e18c693a09999437a51d9c1c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -5428,7 +5428,7 @@ static int __init stmmac_cmdline_opt(char *str) char *opt; if (!str || !*str) - return -EINVAL; + return 1; while ((opt = strsep(&str, ",")) != NULL) { if (!strncmp(opt, "debug:", 6)) { if (kstrtoint(opt + 6, 0, &debug)) @@ -5459,11 +5459,11 @@ static int __init stmmac_cmdline_opt(char *str) goto err; } } - return 0; + return 1; err: pr_err("%s: ERROR broken module parameter conversion", __func__); - return -EINVAL; + return 1; } __setup("stmmaceth=", stmmac_cmdline_opt); diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c index 54b53dbdb33cdbfa11e8f51b3358dc756de36871..69fc47089e625b4ab9077e4f6433f452f4b53dd2 100644 --- a/drivers/net/ethernet/sun/sunhme.c +++ b/drivers/net/ethernet/sun/sunhme.c @@ -3163,7 +3163,7 @@ static int happy_meal_pci_probe(struct pci_dev *pdev, if (err) { printk(KERN_ERR "happymeal(PCI): Cannot register net device, " "aborting.\n"); - goto err_out_iounmap; + goto err_out_free_coherent; } pci_set_drvdata(pdev, hp); @@ -3196,6 +3196,10 @@ static int happy_meal_pci_probe(struct pci_dev *pdev, return 0; +err_out_free_coherent: + dma_free_coherent(hp->dma_dev, PAGE_SIZE, + hp->happy_block, hp->hblock_dvma); + err_out_iounmap: iounmap(hp->gregs); diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c index 43222a34cba069b9bc10750cbd9a4bcdfc8b6228..f9514518700ebb7f70009b46583f66af968ec417 100644 --- a/drivers/net/ethernet/ti/cpts.c +++ b/drivers/net/ethernet/ti/cpts.c @@ -568,7 +568,9 @@ int cpts_register(struct cpts *cpts) for (i = 0; i < CPTS_MAX_EVENTS; i++) list_add(&cpts->pool_data[i].list, &cpts->pool); - clk_enable(cpts->refclk); + err = clk_enable(cpts->refclk); + if (err) + return err; cpts_write32(cpts, CPTS_EN, control); cpts_write32(cpts, TS_PEND_EN, int_enable); diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c index 52f184500dd6f9a51a43b9aabf30d78e459e6b0e..f3d1814818c720e24c3ee269f02209b1c0a7c89b 100644 --- a/drivers/net/ethernet/xilinx/ll_temac_main.c +++ b/drivers/net/ethernet/xilinx/ll_temac_main.c @@ -1433,6 +1433,8 @@ static int temac_probe(struct platform_device *pdev) lp->indirect_lock = devm_kmalloc(&pdev->dev, sizeof(*lp->indirect_lock), GFP_KERNEL); + if (!lp->indirect_lock) + return -ENOMEM; spin_lock_init(lp->indirect_lock); } diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index b2b435dbb8159a4b29c8ae9a9d89524226a27c8a..b8e85ef7ce921ca71feadaa2e7a88588f70f6e7c 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -857,46 +857,53 @@ static void axienet_recv(struct net_device *ndev) while ((cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) { dma_addr_t phys; - tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci; - /* Ensure we see complete descriptor update */ dma_rmb(); - phys = desc_get_phys_addr(lp, cur_p); - dma_unmap_single(ndev->dev.parent, phys, lp->max_frm_size, - DMA_FROM_DEVICE); skb = cur_p->skb; cur_p->skb = NULL; - length = cur_p->app4 & 0x0000FFFF; - - skb_put(skb, length); - skb->protocol = eth_type_trans(skb, ndev); - /*skb_checksum_none_assert(skb);*/ - skb->ip_summed = CHECKSUM_NONE; - - /* if we're doing Rx csum offload, set it up */ - if (lp->features & XAE_FEATURE_FULL_RX_CSUM) { - csumstatus = (cur_p->app2 & - XAE_FULL_CSUM_STATUS_MASK) >> 3; - if ((csumstatus == XAE_IP_TCP_CSUM_VALIDATED) || - (csumstatus == XAE_IP_UDP_CSUM_VALIDATED)) { - skb->ip_summed = CHECKSUM_UNNECESSARY; + + /* skb could be NULL if a previous pass already received the + * packet for this slot in the ring, but failed to refill it + * with a newly allocated buffer. In this case, don't try to + * receive it again. + */ + if (likely(skb)) { + length = cur_p->app4 & 0x0000FFFF; + + phys = desc_get_phys_addr(lp, cur_p); + dma_unmap_single(ndev->dev.parent, phys, lp->max_frm_size, + DMA_FROM_DEVICE); + + skb_put(skb, length); + skb->protocol = eth_type_trans(skb, ndev); + /*skb_checksum_none_assert(skb);*/ + skb->ip_summed = CHECKSUM_NONE; + + /* if we're doing Rx csum offload, set it up */ + if (lp->features & XAE_FEATURE_FULL_RX_CSUM) { + csumstatus = (cur_p->app2 & + XAE_FULL_CSUM_STATUS_MASK) >> 3; + if (csumstatus == XAE_IP_TCP_CSUM_VALIDATED || + csumstatus == XAE_IP_UDP_CSUM_VALIDATED) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + } + } else if ((lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) != 0 && + skb->protocol == htons(ETH_P_IP) && + skb->len > 64) { + skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF); + skb->ip_summed = CHECKSUM_COMPLETE; } - } else if ((lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) != 0 && - skb->protocol == htons(ETH_P_IP) && - skb->len > 64) { - skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF); - skb->ip_summed = CHECKSUM_COMPLETE; - } - netif_rx(skb); + netif_rx(skb); - size += length; - packets++; + size += length; + packets++; + } new_skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size); if (!new_skb) - return; + break; phys = dma_map_single(ndev->dev.parent, new_skb->data, lp->max_frm_size, @@ -905,7 +912,7 @@ static void axienet_recv(struct net_device *ndev) if (net_ratelimit()) netdev_err(ndev, "RX DMA mapping error\n"); dev_kfree_skb(new_skb); - return; + break; } desc_set_phys_addr(lp, phys, cur_p); @@ -913,6 +920,11 @@ static void axienet_recv(struct net_device *ndev) cur_p->status = 0; cur_p->skb = new_skb; + /* Only update tail_p to mark this slot as usable after it has + * been successfully refilled. + */ + tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci; + if (++lp->rx_bd_ci >= lp->rx_bd_num) lp->rx_bd_ci = 0; cur_p = &lp->rx_bd_v[lp->rx_bd_ci]; diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c index 962831cdde4db0860673e24d51f2305fda0cf333..4bd44fbc6ecfa92030021750b0637555d255e74b 100644 --- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c +++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c @@ -1187,7 +1187,7 @@ static int xemaclite_of_probe(struct platform_device *ofdev) if (rc) { dev_err(dev, "Cannot register network device, aborting\n"); - goto error; + goto put_node; } dev_info(dev, @@ -1195,6 +1195,8 @@ static int xemaclite_of_probe(struct platform_device *ofdev) (unsigned int __force)ndev->mem_start, lp->base_addr, ndev->irq); return 0; +put_node: + of_node_put(lp->phy_node); error: free_netdev(ndev); return rc; diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c index 63502a85a97514b3d64598e66dee4947bf6dc0e4..049264a7d9611b5697741b67e8ea5f10dd6c535e 100644 --- a/drivers/net/hamradio/mkiss.c +++ b/drivers/net/hamradio/mkiss.c @@ -31,6 +31,8 @@ #define AX_MTU 236 +/* some arch define END as assembly function ending, just undef it */ +#undef END /* SLIP/KISS protocol characters. */ #define END 0300 /* indicates end of frame */ #define ESC 0333 /* indicates byte stuffing */ diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index c042e22aa7049e98c41cc1a70dd05fbf82eced63..554e1863aab9945562d22aa8afe38f94c8cee33d 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -1562,6 +1562,9 @@ static void netvsc_get_ethtool_stats(struct net_device *dev, pcpu_sum = kvmalloc_array(num_possible_cpus(), sizeof(struct netvsc_ethtool_pcpu_stats), GFP_KERNEL); + if (!pcpu_sum) + return; + netvsc_get_pcpu_stats(dev, pcpu_sum); for_each_present_cpu(cpu) { struct netvsc_ethtool_pcpu_stats *this_sum = &pcpu_sum[cpu]; diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c index 644861366d5442c7aaca5c29c7801ae73a70e61e..0cde17bd743f3cb3798f5c282cbd3e7fcae630e9 100644 --- a/drivers/net/phy/broadcom.c +++ b/drivers/net/phy/broadcom.c @@ -11,6 +11,7 @@ */ #include "bcm-phy-lib.h" +#include #include #include #include @@ -622,6 +623,26 @@ static int brcm_fet_config_init(struct phy_device *phydev) if (err < 0) return err; + /* The datasheet indicates the PHY needs up to 1us to complete a reset, + * build some slack here. + */ + usleep_range(1000, 2000); + + /* The PHY requires 65 MDC clock cycles to complete a write operation + * and turnaround the line properly. + * + * We ignore -EIO here as the MDIO controller (e.g.: mdio-bcm-unimac) + * may flag the lack of turn-around as a read failure. This is + * particularly true with this combination since the MDIO controller + * only used 64 MDC cycles. This is not a critical failure in this + * specific case and it has no functional impact otherwise, so we let + * that one go through. If there is a genuine bus error, the next read + * of MII_BRCM_FET_INTREG will error out. + */ + err = phy_read(phydev, MII_BMCR); + if (err < 0 && err != -EIO) + return err; + reg = phy_read(phydev, MII_BRCM_FET_INTREG); if (reg < 0) return reg; diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c index 7bf43031cea8c6c533b05e68a8ecaf6fb0029500..3d75b98f3051d9b5a1c49db8f2aa7e791220aa32 100644 --- a/drivers/net/phy/dp83822.c +++ b/drivers/net/phy/dp83822.c @@ -289,7 +289,7 @@ static int dp83822_config_intr(struct phy_device *phydev) if (err < 0) return err; - err = phy_write(phydev, MII_DP83822_MISR1, 0); + err = phy_write(phydev, MII_DP83822_MISR2, 0); if (err < 0) return err; diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index cb9d1852a75c8c0f23d68583f3309230bb84c8e8..54786712a99130583bf0546c2f9a500b9bb021b3 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -1536,8 +1536,8 @@ static int marvell_suspend(struct phy_device *phydev) int err; /* Suspend the fiber mode first */ - if (!linkmode_test_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, - phydev->supported)) { + if (linkmode_test_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, + phydev->supported)) { err = marvell_set_page(phydev, MII_MARVELL_FIBER_PAGE); if (err < 0) goto error; @@ -1571,8 +1571,8 @@ static int marvell_resume(struct phy_device *phydev) int err; /* Resume the fiber mode first */ - if (!linkmode_test_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, - phydev->supported)) { + if (linkmode_test_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, + phydev->supported)) { err = marvell_set_page(phydev, MII_MARVELL_FIBER_PAGE); if (err < 0) goto error; diff --git a/drivers/net/phy/mscc/mscc_main.c b/drivers/net/phy/mscc/mscc_main.c index 41a410124437d105b9657b9174d8b56f52606148..e14fa72791b0e47f1a5a1c22e1538b1196611fcd 100644 --- a/drivers/net/phy/mscc/mscc_main.c +++ b/drivers/net/phy/mscc/mscc_main.c @@ -2584,3 +2584,6 @@ MODULE_DEVICE_TABLE(mdio, vsc85xx_tbl); MODULE_DESCRIPTION("Microsemi VSC85xx PHY driver"); MODULE_AUTHOR("Nagaraju Lakkaraju"); MODULE_LICENSE("Dual MIT/GPL"); + +MODULE_FIRMWARE(MSCC_VSC8584_REVB_INT8051_FW); +MODULE_FIRMWARE(MSCC_VSC8574_REVB_INT8051_FW); diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index 6aaa0675c28a397c30991375f22256e9209bb2a3..43ddbe61dc58e140015e43317efcd7893fdb1693 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c @@ -570,6 +570,11 @@ static const struct usb_device_id products[] = { .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET, \ .bInterfaceProtocol = USB_CDC_PROTO_NONE +#define ZAURUS_FAKE_INTERFACE \ + .bInterfaceClass = USB_CLASS_COMM, \ + .bInterfaceSubClass = USB_CDC_SUBCLASS_MDLM, \ + .bInterfaceProtocol = USB_CDC_PROTO_NONE + /* SA-1100 based Sharp Zaurus ("collie"), or compatible; * wire-incompatible with true CDC Ethernet implementations. * (And, it seems, needlessly so...) @@ -623,6 +628,13 @@ static const struct usb_device_id products[] = { .idProduct = 0x9032, /* SL-6000 */ ZAURUS_MASTER_INTERFACE, .driver_info = 0, +}, { + .match_flags = USB_DEVICE_ID_MATCH_INT_INFO + | USB_DEVICE_ID_MATCH_DEVICE, + .idVendor = 0x04DD, + .idProduct = 0x9032, /* SL-6000 */ + ZAURUS_FAKE_INTERFACE, + .driver_info = 0, }, { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_DEVICE, diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c index 77ac5a721e7b6e6d8e1b31b3fc11547bb793a2d3..414341c9cf5ae17dc4f0ff14f02f37468d57c78a 100644 --- a/drivers/net/usb/cdc_mbim.c +++ b/drivers/net/usb/cdc_mbim.c @@ -658,6 +658,11 @@ static const struct usb_device_id mbim_devs[] = { .driver_info = (unsigned long)&cdc_mbim_info_avoid_altsetting_toggle, }, + /* Telit FN990 */ + { USB_DEVICE_AND_INTERFACE_INFO(0x1bc7, 0x1071, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE), + .driver_info = (unsigned long)&cdc_mbim_info_avoid_altsetting_toggle, + }, + /* default entry */ { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE), .driver_info = (unsigned long)&cdc_mbim_info_zlp, diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index eaaa5aee58251e5469dde979bddef20edf5cac85..ab91fa5b0194db57041c8f92ec9ba4ea62632a25 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c @@ -1702,10 +1702,10 @@ int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in) { struct sk_buff *skb; struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0]; - int len; + unsigned int len; int nframes; int x; - int offset; + unsigned int offset; union { struct usb_cdc_ncm_ndp16 *ndp16; struct usb_cdc_ncm_ndp32 *ndp32; @@ -1777,8 +1777,8 @@ int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in) break; } - /* sanity checking */ - if (((offset + len) > skb_in->len) || + /* sanity checking - watch out for integer wrap*/ + if ((offset > skb_in->len) || (len > skb_in->len - offset) || (len > ctx->rx_max) || (len < ETH_HLEN)) { netif_dbg(dev, rx_err, dev->net, "invalid frame detected (ignored) offset[%u]=%u, length=%u, skb=%p\n", diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c index 465e11dcdf12938d938a966b560a2fbbe20d6bd5..e5b74485114675fae3cc291f1501a1cd06088c81 100644 --- a/drivers/net/usb/smsc95xx.c +++ b/drivers/net/usb/smsc95xx.c @@ -84,9 +84,10 @@ static int __must_check __smsc95xx_read_reg(struct usbnet *dev, u32 index, ret = fn(dev, USB_VENDOR_REQUEST_READ_REGISTER, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, index, &buf, 4); - if (unlikely(ret < 0)) { - netdev_warn(dev->net, "Failed to read reg index 0x%08x: %d\n", - index, ret); + if (ret < 0) { + if (ret != -ENODEV) + netdev_warn(dev->net, "Failed to read reg index 0x%08x: %d\n", + index, ret); return ret; } @@ -116,7 +117,7 @@ static int __must_check __smsc95xx_write_reg(struct usbnet *dev, u32 index, ret = fn(dev, USB_VENDOR_REQUEST_WRITE_REGISTER, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, index, &buf, 4); - if (unlikely(ret < 0)) + if (ret < 0 && ret != -ENODEV) netdev_warn(dev->net, "Failed to write reg index 0x%08x: %d\n", index, ret); @@ -159,6 +160,9 @@ static int __must_check __smsc95xx_phy_wait_not_busy(struct usbnet *dev, do { ret = __smsc95xx_read_reg(dev, MII_ADDR, &val, in_pm); if (ret < 0) { + /* Ignore -ENODEV error during disconnect() */ + if (ret == -ENODEV) + return 0; netdev_warn(dev->net, "Error reading MII_ACCESS\n"); return ret; } @@ -194,7 +198,8 @@ static int __smsc95xx_mdio_read(struct usbnet *dev, int phy_id, int idx, addr = mii_address_cmd(phy_id, idx, MII_READ_ | MII_BUSY_); ret = __smsc95xx_write_reg(dev, MII_ADDR, addr, in_pm); if (ret < 0) { - netdev_warn(dev->net, "Error writing MII_ADDR\n"); + if (ret != -ENODEV) + netdev_warn(dev->net, "Error writing MII_ADDR\n"); goto done; } @@ -206,7 +211,8 @@ static int __smsc95xx_mdio_read(struct usbnet *dev, int phy_id, int idx, ret = __smsc95xx_read_reg(dev, MII_DATA, &val, in_pm); if (ret < 0) { - netdev_warn(dev->net, "Error reading MII_DATA\n"); + if (ret != -ENODEV) + netdev_warn(dev->net, "Error reading MII_DATA\n"); goto done; } @@ -214,6 +220,10 @@ static int __smsc95xx_mdio_read(struct usbnet *dev, int phy_id, int idx, done: mutex_unlock(&dev->phy_mutex); + + /* Ignore -ENODEV error during disconnect() */ + if (ret == -ENODEV) + return 0; return ret; } @@ -235,7 +245,8 @@ static void __smsc95xx_mdio_write(struct usbnet *dev, int phy_id, val = regval; ret = __smsc95xx_write_reg(dev, MII_DATA, val, in_pm); if (ret < 0) { - netdev_warn(dev->net, "Error writing MII_DATA\n"); + if (ret != -ENODEV) + netdev_warn(dev->net, "Error writing MII_DATA\n"); goto done; } @@ -243,7 +254,8 @@ static void __smsc95xx_mdio_write(struct usbnet *dev, int phy_id, addr = mii_address_cmd(phy_id, idx, MII_WRITE_ | MII_BUSY_); ret = __smsc95xx_write_reg(dev, MII_ADDR, addr, in_pm); if (ret < 0) { - netdev_warn(dev->net, "Error writing MII_ADDR\n"); + if (ret != -ENODEV) + netdev_warn(dev->net, "Error writing MII_ADDR\n"); goto done; } @@ -1049,6 +1061,14 @@ static const struct net_device_ops smsc95xx_netdev_ops = { .ndo_set_features = smsc95xx_set_features, }; +static void smsc95xx_handle_link_change(struct net_device *net) +{ + struct usbnet *dev = netdev_priv(net); + + phy_print_status(net->phydev); + usbnet_defer_kevent(dev, EVENT_LINK_CHANGE); +} + static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf) { struct smsc95xx_priv *pdata; @@ -1153,6 +1173,17 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf) dev->net->min_mtu = ETH_MIN_MTU; dev->net->max_mtu = ETH_DATA_LEN; dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len; + + ret = phy_connect_direct(dev->net, pdata->phydev, + &smsc95xx_handle_link_change, + PHY_INTERFACE_MODE_MII); + if (ret) { + netdev_err(dev->net, "can't attach PHY to %s\n", pdata->mdiobus->id); + goto unregister_mdio; + } + + phy_attached_info(dev->net->phydev); + return 0; unregister_mdio: @@ -1170,47 +1201,25 @@ static void smsc95xx_unbind(struct usbnet *dev, struct usb_interface *intf) { struct smsc95xx_priv *pdata = dev->driver_priv; + phy_disconnect(dev->net->phydev); mdiobus_unregister(pdata->mdiobus); mdiobus_free(pdata->mdiobus); netif_dbg(dev, ifdown, dev->net, "free pdata\n"); kfree(pdata); } -static void smsc95xx_handle_link_change(struct net_device *net) -{ - struct usbnet *dev = netdev_priv(net); - - phy_print_status(net->phydev); - usbnet_defer_kevent(dev, EVENT_LINK_CHANGE); -} - static int smsc95xx_start_phy(struct usbnet *dev) { - struct smsc95xx_priv *pdata = dev->driver_priv; - struct net_device *net = dev->net; - int ret; - - ret = smsc95xx_reset(dev); - if (ret < 0) - return ret; + phy_start(dev->net->phydev); - ret = phy_connect_direct(net, pdata->phydev, - &smsc95xx_handle_link_change, - PHY_INTERFACE_MODE_MII); - if (ret) { - netdev_err(net, "can't attach PHY to %s\n", pdata->mdiobus->id); - return ret; - } - - phy_attached_info(net->phydev); - phy_start(net->phydev); return 0; } -static int smsc95xx_disconnect_phy(struct usbnet *dev) +static int smsc95xx_stop(struct usbnet *dev) { - phy_stop(dev->net->phydev); - phy_disconnect(dev->net->phydev); + if (dev->net->phydev) + phy_stop(dev->net->phydev); + return 0; } @@ -1964,8 +1973,9 @@ static const struct driver_info smsc95xx_info = { .bind = smsc95xx_bind, .unbind = smsc95xx_unbind, .link_reset = smsc95xx_link_reset, - .reset = smsc95xx_start_phy, - .stop = smsc95xx_disconnect_phy, + .reset = smsc95xx_reset, + .check_connect = smsc95xx_start_phy, + .stop = smsc95xx_stop, .rx_fixup = smsc95xx_rx_fixup, .tx_fixup = smsc95xx_tx_fixup, .status = smsc95xx_status, diff --git a/drivers/net/usb/zaurus.c b/drivers/net/usb/zaurus.c index 8e717a0b559b3aac42e94c0ea1c776737c79527b..7984f2157d222dbe2971702221dac46b2a370721 100644 --- a/drivers/net/usb/zaurus.c +++ b/drivers/net/usb/zaurus.c @@ -256,6 +256,11 @@ static const struct usb_device_id products [] = { .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET, \ .bInterfaceProtocol = USB_CDC_PROTO_NONE +#define ZAURUS_FAKE_INTERFACE \ + .bInterfaceClass = USB_CLASS_COMM, \ + .bInterfaceSubClass = USB_CDC_SUBCLASS_MDLM, \ + .bInterfaceProtocol = USB_CDC_PROTO_NONE + /* SA-1100 based Sharp Zaurus ("collie"), or compatible. */ { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO @@ -313,6 +318,13 @@ static const struct usb_device_id products [] = { .idProduct = 0x9032, /* SL-6000 */ ZAURUS_MASTER_INTERFACE, .driver_info = ZAURUS_PXA_INFO, +}, { + .match_flags = USB_DEVICE_ID_MATCH_INT_INFO + | USB_DEVICE_ID_MATCH_DEVICE, + .idVendor = 0x04DD, + .idProduct = 0x9032, /* SL-6000 */ + ZAURUS_FAKE_INTERFACE, + .driver_info = (unsigned long)&bogus_mdlm_info, }, { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_DEVICE, diff --git a/drivers/net/wireguard/queueing.c b/drivers/net/wireguard/queueing.c index 1de413b19e3424a2ace2edcbcf0d0d49c4be6167..8084e7408c0ae9065f57bc463921cb985fd68c5e 100644 --- a/drivers/net/wireguard/queueing.c +++ b/drivers/net/wireguard/queueing.c @@ -4,6 +4,7 @@ */ #include "queueing.h" +#include struct multicore_worker __percpu * wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr) @@ -42,7 +43,7 @@ void wg_packet_queue_free(struct crypt_queue *queue, bool purge) { free_percpu(queue->worker); WARN_ON(!purge && !__ptr_ring_empty(&queue->ring)); - ptr_ring_cleanup(&queue->ring, purge ? (void(*)(void*))kfree_skb : NULL); + ptr_ring_cleanup(&queue->ring, purge ? __skb_array_destroy_skb : NULL); } #define NEXT(skb) ((skb)->prev) diff --git a/drivers/net/wireguard/socket.c b/drivers/net/wireguard/socket.c index 52b9bc83abcbcca9007795fee738f1ad564e84e3..473221aa2236813d6188a078a7e38307cd0b59eb 100644 --- a/drivers/net/wireguard/socket.c +++ b/drivers/net/wireguard/socket.c @@ -160,6 +160,7 @@ static int send6(struct wg_device *wg, struct sk_buff *skb, rcu_read_unlock_bh(); return ret; #else + kfree_skb(skb); return -EAFNOSUPPORT; #endif } @@ -241,7 +242,7 @@ int wg_socket_endpoint_from_skb(struct endpoint *endpoint, endpoint->addr4.sin_addr.s_addr = ip_hdr(skb)->saddr; endpoint->src4.s_addr = ip_hdr(skb)->daddr; endpoint->src_if4 = skb->skb_iif; - } else if (skb->protocol == htons(ETH_P_IPV6)) { + } else if (IS_ENABLED(CONFIG_IPV6) && skb->protocol == htons(ETH_P_IPV6)) { endpoint->addr6.sin6_family = AF_INET6; endpoint->addr6.sin6_port = udp_hdr(skb)->source; endpoint->addr6.sin6_addr = ipv6_hdr(skb)->saddr; @@ -284,7 +285,7 @@ void wg_socket_set_peer_endpoint(struct wg_peer *peer, peer->endpoint.addr4 = endpoint->addr4; peer->endpoint.src4 = endpoint->src4; peer->endpoint.src_if4 = endpoint->src_if4; - } else if (endpoint->addr.sa_family == AF_INET6) { + } else if (IS_ENABLED(CONFIG_IPV6) && endpoint->addr.sa_family == AF_INET6) { peer->endpoint.addr6 = endpoint->addr6; peer->endpoint.src6 = endpoint->src6; } else { diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c index daae470ecf5aa3f3495a623435d34a09111fa91d..e5a296039f714a1d4a284402291d00a3318ecf42 100644 --- a/drivers/net/wireless/ath/ath10k/snoc.c +++ b/drivers/net/wireless/ath/ath10k/snoc.c @@ -1477,11 +1477,11 @@ static int ath10k_setup_msa_resources(struct ath10k *ar, u32 msa_size) node = of_parse_phandle(dev->of_node, "memory-region", 0); if (node) { ret = of_address_to_resource(node, 0, &r); + of_node_put(node); if (ret) { dev_err(dev, "failed to resolve msa fixed region\n"); return ret; } - of_node_put(node); ar->msa.paddr = r.start; ar->msa.mem_size = resource_size(&r); diff --git a/drivers/net/wireless/ath/ath10k/wow.c b/drivers/net/wireless/ath/ath10k/wow.c index 7d65c115669fe2a5f5d813884015b824552f4ad5..20b9aa8ddf7d52e2574daf8d50a197ce6bc1f8b6 100644 --- a/drivers/net/wireless/ath/ath10k/wow.c +++ b/drivers/net/wireless/ath/ath10k/wow.c @@ -337,14 +337,15 @@ static int ath10k_vif_wow_set_wakeups(struct ath10k_vif *arvif, if (patterns[i].mask[j / 8] & BIT(j % 8)) bitmask[j] = 0xff; old_pattern.mask = bitmask; - new_pattern = old_pattern; if (ar->wmi.rx_decap_mode == ATH10K_HW_TXRX_NATIVE_WIFI) { - if (patterns[i].pkt_offset < ETH_HLEN) + if (patterns[i].pkt_offset < ETH_HLEN) { ath10k_wow_convert_8023_to_80211(&new_pattern, &old_pattern); - else + } else { + new_pattern = old_pattern; new_pattern.pkt_offset += WOW_HDR_LEN - ETH_HLEN; + } } if (WARN_ON(new_pattern.pattern_len > WOW_MAX_PATTERN_SIZE)) diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c index 510e61e97dbcbd1dd2d4c1847d00f0afdbfd6385..994ec48b2f669588dbfe46c69611aad47aaebfbe 100644 --- a/drivers/net/wireless/ath/ath9k/htc_hst.c +++ b/drivers/net/wireless/ath/ath9k/htc_hst.c @@ -30,6 +30,7 @@ static int htc_issue_send(struct htc_target *target, struct sk_buff* skb, hdr->endpoint_id = epid; hdr->flags = flags; hdr->payload_len = cpu_to_be16(len); + memset(hdr->control, 0, sizeof(hdr->control)); status = target->hif->send(target->hif_dev, endpoint->ul_pipeid, skb); @@ -272,6 +273,10 @@ int htc_connect_service(struct htc_target *target, conn_msg->dl_pipeid = endpoint->dl_pipeid; conn_msg->ul_pipeid = endpoint->ul_pipeid; + /* To prevent infoleak */ + conn_msg->svc_meta_len = 0; + conn_msg->pad = 0; + ret = htc_issue_send(target, skb, skb->len, 0, ENDPOINT0); if (ret) goto err; diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c index dbef9d8fc893b7350bccfd7b5b34fbdbf99ac9f5..b903b856bcf7b5547ed0d2771bcd3aefc3aa9a04 100644 --- a/drivers/net/wireless/ath/carl9170/main.c +++ b/drivers/net/wireless/ath/carl9170/main.c @@ -1916,7 +1916,7 @@ static int carl9170_parse_eeprom(struct ar9170 *ar) WARN_ON(!(tx_streams >= 1 && tx_streams <= IEEE80211_HT_MCS_TX_MAX_STREAMS)); - tx_params = (tx_streams - 1) << + tx_params |= (tx_streams - 1) << IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT; carl9170_band_2GHz.ht_cap.mcs.tx_params |= tx_params; diff --git a/drivers/net/wireless/ath/regd.c b/drivers/net/wireless/ath/regd.c index bee9110b91f38620ef4b8f8c0e43f1c87698f808..20f4f8ea9f894c3ba1f21fce5f6d7505c1b44a06 100644 --- a/drivers/net/wireless/ath/regd.c +++ b/drivers/net/wireless/ath/regd.c @@ -666,14 +666,14 @@ ath_regd_init_wiphy(struct ath_regulatory *reg, /* * Some users have reported their EEPROM programmed with - * 0x8000 or 0x0 set, this is not a supported regulatory - * domain but since we have more than one user with it we - * need a solution for them. We default to 0x64, which is - * the default Atheros world regulatory domain. + * 0x8000 set, this is not a supported regulatory domain + * but since we have more than one user with it we need + * a solution for them. We default to 0x64, which is the + * default Atheros world regulatory domain. */ static void ath_regd_sanitize(struct ath_regulatory *reg) { - if (reg->current_rd != COUNTRY_ERD_FLAG && reg->current_rd != 0) + if (reg->current_rd != COUNTRY_ERD_FLAG) return; printk(KERN_DEBUG "ath: EEPROM regdomain sanitized\n"); reg->current_rd = 0x64; diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c index 9aaf6f74733336cbe842625821667068813323e9..37e6e49de3366b8a7176d1aa4fd2be9aabc4c549 100644 --- a/drivers/net/wireless/ath/wcn36xx/main.c +++ b/drivers/net/wireless/ath/wcn36xx/main.c @@ -1362,6 +1362,9 @@ static int wcn36xx_platform_get_resources(struct wcn36xx *wcn, if (iris_node) { if (of_device_is_compatible(iris_node, "qcom,wcn3620")) wcn->rf_id = RF_IRIS_WCN3620; + if (of_device_is_compatible(iris_node, "qcom,wcn3660") || + of_device_is_compatible(iris_node, "qcom,wcn3660b")) + wcn->rf_id = RF_IRIS_WCN3660; if (of_device_is_compatible(iris_node, "qcom,wcn3680")) wcn->rf_id = RF_IRIS_WCN3680; of_node_put(iris_node); diff --git a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h index 5c40d0bdee2451245f0442da5077c5eed34699cc..82be08265c06ca56efa586c0ce847258a3431084 100644 --- a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h +++ b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h @@ -96,6 +96,7 @@ enum wcn36xx_ampdu_state { #define RF_UNKNOWN 0x0000 #define RF_IRIS_WCN3620 0x3620 +#define RF_IRIS_WCN3660 0x3660 #define RF_IRIS_WCN3680 0x3680 static inline void buff_to_be(u32 *buf, size_t len) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c index d821a4758f8cf0d3df5f7092d7cda8478499d7ed..a2b8d9171af2abe41290eea365041be88076c062 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c @@ -207,6 +207,8 @@ static int brcmf_init_nvram_parser(struct nvram_parser *nvp, size = BRCMF_FW_MAX_NVRAM_SIZE; else size = data_len; + /* Add space for properties we may add */ + size += strlen(BRCMF_FW_DEFAULT_BOARDREV) + 1; /* Alloc for extra 0 byte + roundup by 4 + length field */ size += 1 + 3 + sizeof(u32); nvp->nvram = kzalloc(size, GFP_KERNEL); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c index 1f12dfb33938a9f64b9c2bdebbc729549dd39c40..61febc9bfa14ab344329e0255c1480079e2a691b 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include @@ -446,47 +447,6 @@ brcmf_pcie_write_ram32(struct brcmf_pciedev_info *devinfo, u32 mem_offset, } -static void -brcmf_pcie_copy_mem_todev(struct brcmf_pciedev_info *devinfo, u32 mem_offset, - void *srcaddr, u32 len) -{ - void __iomem *address = devinfo->tcm + mem_offset; - __le32 *src32; - __le16 *src16; - u8 *src8; - - if (((ulong)address & 4) || ((ulong)srcaddr & 4) || (len & 4)) { - if (((ulong)address & 2) || ((ulong)srcaddr & 2) || (len & 2)) { - src8 = (u8 *)srcaddr; - while (len) { - iowrite8(*src8, address); - address++; - src8++; - len--; - } - } else { - len = len / 2; - src16 = (__le16 *)srcaddr; - while (len) { - iowrite16(le16_to_cpu(*src16), address); - address += 2; - src16++; - len--; - } - } - } else { - len = len / 4; - src32 = (__le32 *)srcaddr; - while (len) { - iowrite32(le32_to_cpu(*src32), address); - address += 4; - src32++; - len--; - } - } -} - - static void brcmf_pcie_copy_dev_tomem(struct brcmf_pciedev_info *devinfo, u32 mem_offset, void *dstaddr, u32 len) @@ -1346,6 +1306,18 @@ static void brcmf_pcie_down(struct device *dev) { } +static int brcmf_pcie_preinit(struct device *dev) +{ + struct brcmf_bus *bus_if = dev_get_drvdata(dev); + struct brcmf_pciedev *buspub = bus_if->bus_priv.pcie; + + brcmf_dbg(PCIE, "Enter\n"); + + brcmf_pcie_intr_enable(buspub->devinfo); + brcmf_pcie_hostready(buspub->devinfo); + + return 0; +} static int brcmf_pcie_tx(struct device *dev, struct sk_buff *skb) { @@ -1454,6 +1426,7 @@ static int brcmf_pcie_reset(struct device *dev) } static const struct brcmf_bus_ops brcmf_pcie_bus_ops = { + .preinit = brcmf_pcie_preinit, .txdata = brcmf_pcie_tx, .stop = brcmf_pcie_down, .txctl = brcmf_pcie_tx_ctlpkt, @@ -1561,8 +1534,8 @@ static int brcmf_pcie_download_fw_nvram(struct brcmf_pciedev_info *devinfo, return err; brcmf_dbg(PCIE, "Download FW %s\n", devinfo->fw_name); - brcmf_pcie_copy_mem_todev(devinfo, devinfo->ci->rambase, - (void *)fw->data, fw->size); + memcpy_toio(devinfo->tcm + devinfo->ci->rambase, + (void *)fw->data, fw->size); resetintr = get_unaligned_le32(fw->data); release_firmware(fw); @@ -1576,7 +1549,7 @@ static int brcmf_pcie_download_fw_nvram(struct brcmf_pciedev_info *devinfo, brcmf_dbg(PCIE, "Download NVRAM %s\n", devinfo->nvram_name); address = devinfo->ci->rambase + devinfo->ci->ramsize - nvram_len; - brcmf_pcie_copy_mem_todev(devinfo, address, nvram, nvram_len); + memcpy_toio(devinfo->tcm + address, nvram, nvram_len); brcmf_fw_nvram_free(nvram); } else { brcmf_dbg(PCIE, "No matching NVRAM file found %s\n", @@ -1775,6 +1748,8 @@ static void brcmf_pcie_setup(struct device *dev, int ret, ret = brcmf_chip_get_raminfo(devinfo->ci); if (ret) { brcmf_err(bus, "Failed to get RAM info\n"); + release_firmware(fw); + brcmf_fw_nvram_free(nvram); goto fail; } @@ -1824,9 +1799,6 @@ static void brcmf_pcie_setup(struct device *dev, int ret, init_waitqueue_head(&devinfo->mbdata_resp_wait); - brcmf_pcie_intr_enable(devinfo); - brcmf_pcie_hostready(devinfo); - ret = brcmf_attach(&devinfo->pdev->dev); if (ret) goto fail; diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c index 423d3c396b2d3f1047e4247b94ae7963ac730f22..1e21cdbb7313b788a758ba1144c546f4895f6ae4 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c @@ -304,7 +304,7 @@ static int iwlagn_mac_start(struct ieee80211_hw *hw) priv->is_open = 1; IWL_DEBUG_MAC80211(priv, "leave\n"); - return 0; + return ret; } static void iwlagn_mac_stop(struct ieee80211_hw *hw) diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c index cbde21e772b17a328e3b170d20364862cbf8ceb4..b862cfbcd6e79ccf1c0e0e1992521c836135b950 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c @@ -587,8 +587,7 @@ static struct ieee80211_sband_iftype_data iwl_he_capa[] = { .has_he = true, .he_cap_elem = { .mac_cap_info[0] = - IEEE80211_HE_MAC_CAP0_HTC_HE | - IEEE80211_HE_MAC_CAP0_TWT_REQ, + IEEE80211_HE_MAC_CAP0_HTC_HE, .mac_cap_info[1] = IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US | IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index 6348dfa61724a6110923b9d06b0a65a36754b32c..54b28f0932e25709d32f649412ee12405ef5b0a6 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -1495,8 +1495,10 @@ int iwl_mvm_up(struct iwl_mvm *mvm) while (!sband && i < NUM_NL80211_BANDS) sband = mvm->hw->wiphy->bands[i++]; - if (WARN_ON_ONCE(!sband)) + if (WARN_ON_ONCE(!sband)) { + ret = -ENODEV; goto error; + } chan = &sband->channels[0]; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 922a7ea0cd24e892109d2d4db11e4a0aec5f05c9..d2c6fdb7027320ac7bbeff2a638ad62d735d47f3 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -350,7 +350,6 @@ static const u8 he_if_types_ext_capa_sta[] = { [0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING, [2] = WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT, [7] = WLAN_EXT_CAPA8_OPMODE_NOTIF, - [9] = WLAN_EXT_CAPA10_TWT_REQUESTER_SUPPORT, }; static const struct wiphy_iftype_ext_capab he_iftypes_ext_capa[] = { diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index b793d61d15d272664b3e3aa83a2e44b398005223..cc550ba0c9dfefe2513cf2361fc2e97c64c60090 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -2264,6 +2264,15 @@ static void hw_scan_work(struct work_struct *work) if (req->ie_len) skb_put_data(probe, req->ie, req->ie_len); + if (!ieee80211_tx_prepare_skb(hwsim->hw, + hwsim->hw_scan_vif, + probe, + hwsim->tmp_chan->band, + NULL)) { + kfree_skb(probe); + continue; + } + local_bh_disable(); mac80211_hwsim_tx_frame(hwsim->hw, probe, hwsim->tmp_chan); @@ -3567,6 +3576,10 @@ static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2, } txi->flags |= IEEE80211_TX_STAT_ACK; } + + if (hwsim_flags & HWSIM_TX_CTL_NO_ACK) + txi->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED; + ieee80211_tx_status_irqsafe(data2->hw, skb); return 0; out: diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/main.c b/drivers/net/wireless/mediatek/mt76/mt7603/main.c index c9226dceb510c19eede46b476edd16d8ab39d7b0..bdff89cc3105e6b51bbb27a04e5061b0062ac214 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/main.c @@ -618,6 +618,9 @@ mt7603_sta_rate_tbl_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta_rates *sta_rates = rcu_dereference(sta->rates); int i; + if (!sta_rates) + return; + spin_lock_bh(&dev->mt76.lock); for (i = 0; i < ARRAY_SIZE(msta->rates); i++) { msta->rates[i].idx = sta_rates->rate[i].idx; diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c index 88cdc2badeae7129d00bf301c7e1ab04abb620d6..defa207f53d6f5dbed3ce42c1e9305941d5182b4 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c @@ -673,6 +673,9 @@ static void mt7615_sta_rate_tbl_update(struct ieee80211_hw *hw, struct ieee80211_sta_rates *sta_rates = rcu_dereference(sta->rates); int i; + if (!sta_rates) + return; + spin_lock_bh(&dev->mt76.lock); for (i = 0; i < ARRAY_SIZE(msta->rates); i++) { msta->rates[i].idx = sta_rates->rate[i].idx; diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c index 9a7f317a098fc805ba661803d8eb456f051b807c..41054ee43dbfad4aa007164ac694bad6178218be 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c @@ -1259,8 +1259,11 @@ mt7915_mcu_wtbl_generic_tlv(struct sk_buff *skb, struct ieee80211_vif *vif, generic = (struct wtbl_generic *)tlv; if (sta) { + if (vif->type == NL80211_IFTYPE_STATION) + generic->partial_aid = cpu_to_le16(vif->bss_conf.aid); + else + generic->partial_aid = cpu_to_le16(sta->aid); memcpy(generic->peer_addr, sta->addr, ETH_ALEN); - generic->partial_aid = cpu_to_le16(sta->aid); generic->muar_idx = mvif->omac_idx; generic->qos = sta->wme; } else { @@ -1314,12 +1317,15 @@ mt7915_mcu_sta_basic_tlv(struct sk_buff *skb, struct ieee80211_vif *vif, case NL80211_IFTYPE_MESH_POINT: case NL80211_IFTYPE_AP: basic->conn_type = cpu_to_le32(CONNECTION_INFRA_STA); + basic->aid = cpu_to_le16(sta->aid); break; case NL80211_IFTYPE_STATION: basic->conn_type = cpu_to_le32(CONNECTION_INFRA_AP); + basic->aid = cpu_to_le16(vif->bss_conf.aid); break; case NL80211_IFTYPE_ADHOC: basic->conn_type = cpu_to_le32(CONNECTION_IBSS_ADHOC); + basic->aid = cpu_to_le16(sta->aid); break; default: WARN_ON(1); @@ -1327,7 +1333,6 @@ mt7915_mcu_sta_basic_tlv(struct sk_buff *skb, struct ieee80211_vif *vif, } memcpy(basic->peer_addr, sta->addr, ETH_ALEN); - basic->aid = cpu_to_le16(sta->aid); basic->qos = sta->wme; } diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c index bf3fbd14eda3ccd8926f475f7b6f913ae59a124d..091eea0d958d1be13b632837f077fe75f7699fce 100644 --- a/drivers/net/wireless/ray_cs.c +++ b/drivers/net/wireless/ray_cs.c @@ -382,6 +382,8 @@ static int ray_config(struct pcmcia_device *link) goto failed; local->sram = ioremap(link->resource[2]->start, resource_size(link->resource[2])); + if (!local->sram) + goto failed; /*** Set up 16k window for shared memory (receive buffer) ***************/ link->resource[3]->flags |= @@ -396,6 +398,8 @@ static int ray_config(struct pcmcia_device *link) goto failed; local->rmem = ioremap(link->resource[3]->start, resource_size(link->resource[3])); + if (!local->rmem) + goto failed; /*** Set up window for attribute memory ***********************************/ link->resource[4]->flags |= @@ -410,6 +414,8 @@ static int ray_config(struct pcmcia_device *link) goto failed; local->amem = ioremap(link->resource[4]->start, resource_size(link->resource[4])); + if (!local->amem) + goto failed; dev_dbg(&link->dev, "ray_config sram=%p\n", local->sram); dev_dbg(&link->dev, "ray_config rmem=%p\n", local->rmem); diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c index 94d19158efc188acc0d963b491343f52319f873b..ca261e0fc9c9b4b1ecd082db124b45a9452d7e73 100644 --- a/drivers/net/xen-netback/xenbus.c +++ b/drivers/net/xen-netback/xenbus.c @@ -256,6 +256,7 @@ static void backend_disconnect(struct backend_info *be) unsigned int queue_index; xen_unregister_watchers(vif); + xenbus_rm(XBT_NIL, be->dev->nodename, "hotplug-status"); #ifdef CONFIG_DEBUG_FS xenvif_debugfs_delif(vif); #endif /* CONFIG_DEBUG_FS */ @@ -675,7 +676,6 @@ static void hotplug_status_changed(struct xenbus_watch *watch, /* Not interested in this watch anymore. */ unregister_hotplug_status_watch(be); - xenbus_rm(XBT_NIL, be->dev->nodename, "hotplug-status"); } kfree(str); } @@ -824,15 +824,11 @@ static void connect(struct backend_info *be) xenvif_carrier_on(be->vif); unregister_hotplug_status_watch(be); - if (xenbus_exists(XBT_NIL, dev->nodename, "hotplug-status")) { - err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch, - NULL, hotplug_status_changed, - "%s/%s", dev->nodename, - "hotplug-status"); - if (err) - goto err; + err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch, NULL, + hotplug_status_changed, + "%s/%s", dev->nodename, "hotplug-status"); + if (!err) be->have_hotplug_status_watch = 1; - } netif_tx_wake_all_queues(be->vif->dev); diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 0776b567504de367983adaf390e5e8375d89c871..1a69b5246133b1310e06f670d15be400f22b540e 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -842,6 +842,28 @@ static int xennet_close(struct net_device *dev) return 0; } +static void xennet_destroy_queues(struct netfront_info *info) +{ + unsigned int i; + + for (i = 0; i < info->netdev->real_num_tx_queues; i++) { + struct netfront_queue *queue = &info->queues[i]; + + if (netif_running(info->netdev)) + napi_disable(&queue->napi); + netif_napi_del(&queue->napi); + } + + kfree(info->queues); + info->queues = NULL; +} + +static void xennet_uninit(struct net_device *dev) +{ + struct netfront_info *np = netdev_priv(dev); + xennet_destroy_queues(np); +} + static void xennet_set_rx_rsp_cons(struct netfront_queue *queue, RING_IDX val) { unsigned long flags; @@ -1619,6 +1641,7 @@ static int xennet_xdp(struct net_device *dev, struct netdev_bpf *xdp) } static const struct net_device_ops xennet_netdev_ops = { + .ndo_uninit = xennet_uninit, .ndo_open = xennet_open, .ndo_stop = xennet_close, .ndo_start_xmit = xennet_start_xmit, @@ -2117,22 +2140,6 @@ static int write_queue_xenstore_keys(struct netfront_queue *queue, return err; } -static void xennet_destroy_queues(struct netfront_info *info) -{ - unsigned int i; - - for (i = 0; i < info->netdev->real_num_tx_queues; i++) { - struct netfront_queue *queue = &info->queues[i]; - - if (netif_running(info->netdev)) - napi_disable(&queue->napi); - netif_napi_del(&queue->napi); - } - - kfree(info->queues); - info->queues = NULL; -} - static int xennet_create_page_pool(struct netfront_queue *queue) diff --git a/drivers/nfc/port100.c b/drivers/nfc/port100.c index 1caebefb25ff1d43abe9adf71e933fe2732ad497..2ae1474faede99aea3e9fea55d80da245f7e164b 100644 --- a/drivers/nfc/port100.c +++ b/drivers/nfc/port100.c @@ -1609,7 +1609,9 @@ static int port100_probe(struct usb_interface *interface, nfc_digital_free_device(dev->nfc_digital_dev); error: + usb_kill_urb(dev->in_urb); usb_free_urb(dev->in_urb); + usb_kill_urb(dev->out_urb); usb_free_urb(dev->out_urb); usb_put_dev(dev->udev); diff --git a/drivers/ntb/hw/intel/ntb_hw_gen4.c b/drivers/ntb/hw/intel/ntb_hw_gen4.c index bc4541cbf8c6e174011851c09627971613a17057..99a5fc1ab0aafb2f0d1f0f969ea8a229cf70a10b 100644 --- a/drivers/ntb/hw/intel/ntb_hw_gen4.c +++ b/drivers/ntb/hw/intel/ntb_hw_gen4.c @@ -168,6 +168,18 @@ static enum ntb_topo gen4_ppd_topo(struct intel_ntb_dev *ndev, u32 ppd) return NTB_TOPO_NONE; } +static enum ntb_topo spr_ppd_topo(struct intel_ntb_dev *ndev, u32 ppd) +{ + switch (ppd & SPR_PPD_TOPO_MASK) { + case SPR_PPD_TOPO_B2B_USD: + return NTB_TOPO_B2B_USD; + case SPR_PPD_TOPO_B2B_DSD: + return NTB_TOPO_B2B_DSD; + } + + return NTB_TOPO_NONE; +} + int gen4_init_dev(struct intel_ntb_dev *ndev) { struct pci_dev *pdev = ndev->ntb.pdev; @@ -181,7 +193,10 @@ int gen4_init_dev(struct intel_ntb_dev *ndev) ndev->hwerr_flags |= NTB_HWERR_BAR_ALIGN; ppd1 = ioread32(ndev->self_mmio + GEN4_PPD1_OFFSET); - ndev->ntb.topo = gen4_ppd_topo(ndev, ppd1); + if (pdev_is_ICX(pdev)) + ndev->ntb.topo = gen4_ppd_topo(ndev, ppd1); + else if (pdev_is_SPR(pdev)) + ndev->ntb.topo = spr_ppd_topo(ndev, ppd1); dev_dbg(&pdev->dev, "ppd %#x topo %s\n", ppd1, ntb_topo_string(ndev->ntb.topo)); if (ndev->ntb.topo == NTB_TOPO_NONE) diff --git a/drivers/ntb/hw/intel/ntb_hw_gen4.h b/drivers/ntb/hw/intel/ntb_hw_gen4.h index a868c788de02f3abf37a1ac862814344e1d22f52..ec293953d665f7f7d98ca8134e69387f7fa8a9a3 100644 --- a/drivers/ntb/hw/intel/ntb_hw_gen4.h +++ b/drivers/ntb/hw/intel/ntb_hw_gen4.h @@ -46,10 +46,14 @@ #define GEN4_PPD_CLEAR_TRN 0x0001 #define GEN4_PPD_LINKTRN 0x0008 #define GEN4_PPD_CONN_MASK 0x0300 +#define SPR_PPD_CONN_MASK 0x0700 #define GEN4_PPD_CONN_B2B 0x0200 #define GEN4_PPD_DEV_MASK 0x1000 #define GEN4_PPD_DEV_DSD 0x1000 #define GEN4_PPD_DEV_USD 0x0000 +#define SPR_PPD_DEV_MASK 0x4000 +#define SPR_PPD_DEV_DSD 0x4000 +#define SPR_PPD_DEV_USD 0x0000 #define GEN4_LINK_CTRL_LINK_DISABLE 0x0010 #define GEN4_SLOTSTS 0xb05a @@ -59,6 +63,10 @@ #define GEN4_PPD_TOPO_B2B_USD (GEN4_PPD_CONN_B2B | GEN4_PPD_DEV_USD) #define GEN4_PPD_TOPO_B2B_DSD (GEN4_PPD_CONN_B2B | GEN4_PPD_DEV_DSD) +#define SPR_PPD_TOPO_MASK (SPR_PPD_CONN_MASK | SPR_PPD_DEV_MASK) +#define SPR_PPD_TOPO_B2B_USD (GEN4_PPD_CONN_B2B | SPR_PPD_DEV_USD) +#define SPR_PPD_TOPO_B2B_DSD (GEN4_PPD_CONN_B2B | SPR_PPD_DEV_DSD) + #define GEN4_DB_COUNT 32 #define GEN4_DB_LINK 32 #define GEN4_DB_LINK_BIT BIT_ULL(GEN4_DB_LINK) @@ -97,4 +105,12 @@ static inline int pdev_is_ICX(struct pci_dev *pdev) return 0; } +static inline int pdev_is_SPR(struct pci_dev *pdev) +{ + if (pdev_is_gen4(pdev) && + pdev->revision > PCI_DEVICE_REVISION_ICX_MAX) + return 1; + return 0; +} + #endif diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c index e05cc9f8a9fd1bfe95e9965400e4fb72b7605cfe..1d72653b5c8d172b56f55fe16741886c283a9e7a 100644 --- a/drivers/nvdimm/region_devs.c +++ b/drivers/nvdimm/region_devs.c @@ -1018,6 +1018,9 @@ static unsigned long default_align(struct nd_region *nd_region) } } + if (nd_region->ndr_size < MEMREMAP_COMPAT_ALIGN_MAX) + align = PAGE_SIZE; + mappings = max_t(u16, 1, nd_region->ndr_mappings); div_u64_rem(align, mappings, &remainder); if (remainder) diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index ed2c044faf111debc313739e3faf3fb4c494e108..dcc047f01a0761fdbae4ae887fedd83bdb87157e 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -3661,16 +3661,15 @@ static struct nvme_ns_head *nvme_find_ns_head(struct nvme_subsystem *subsys, return NULL; } -static int __nvme_check_ids(struct nvme_subsystem *subsys, - struct nvme_ns_head *new) +static int nvme_subsys_check_duplicate_ids(struct nvme_subsystem *subsys, + struct nvme_ns_ids *ids) { struct nvme_ns_head *h; lockdep_assert_held(&subsys->lock); list_for_each_entry(h, &subsys->nsheads, entry) { - if (nvme_ns_ids_valid(&new->ids) && - nvme_ns_ids_equal(&new->ids, &h->ids)) + if (nvme_ns_ids_valid(ids) && nvme_ns_ids_equal(ids, &h->ids)) return -EINVAL; } @@ -3704,7 +3703,7 @@ static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl, head->ids = *ids; kref_init(&head->ref); - ret = __nvme_check_ids(ctrl->subsys, head); + ret = nvme_subsys_check_duplicate_ids(ctrl->subsys, &head->ids); if (ret) { dev_err(ctrl->device, "duplicate IDs for nsid %d\n", nsid); diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index 305db4a6c5a6cdd5bf04d1bfc58450e59ebc3f1d..559e7fda7cc7ba40e112db0c33175bda3611c3fa 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -30,6 +30,44 @@ static int so_priority; module_param(so_priority, int, 0644); MODULE_PARM_DESC(so_priority, "nvme tcp socket optimize priority"); +#ifdef CONFIG_DEBUG_LOCK_ALLOC +/* lockdep can detect a circular dependency of the form + * sk_lock -> mmap_lock (page fault) -> fs locks -> sk_lock + * because dependencies are tracked for both nvme-tcp and user contexts. Using + * a separate class prevents lockdep from conflating nvme-tcp socket use with + * user-space socket API use. + */ +static struct lock_class_key nvme_tcp_sk_key[2]; +static struct lock_class_key nvme_tcp_slock_key[2]; + +static void nvme_tcp_reclassify_socket(struct socket *sock) +{ + struct sock *sk = sock->sk; + + if (WARN_ON_ONCE(!sock_allow_reclassification(sk))) + return; + + switch (sk->sk_family) { + case AF_INET: + sock_lock_init_class_and_name(sk, "slock-AF_INET-NVME", + &nvme_tcp_slock_key[0], + "sk_lock-AF_INET-NVME", + &nvme_tcp_sk_key[0]); + break; + case AF_INET6: + sock_lock_init_class_and_name(sk, "slock-AF_INET6-NVME", + &nvme_tcp_slock_key[1], + "sk_lock-AF_INET6-NVME", + &nvme_tcp_sk_key[1]); + break; + default: + WARN_ON_ONCE(1); + } +} +#else +static void nvme_tcp_reclassify_socket(struct socket *sock) { } +#endif + enum nvme_tcp_send_state { NVME_TCP_SEND_CMD_PDU = 0, NVME_TCP_SEND_H2C_PDU, @@ -1422,6 +1460,8 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, goto err_destroy_mutex; } + nvme_tcp_reclassify_socket(queue->sock); + /* Single syn retry */ tcp_sock_set_syncnt(queue->sock->sk, 1); diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c index f30144c8c0bd2c6ae5adf93a7837c10e80d08e97..49ff8bf10c7409d191f4750ced9a299ed57d60df 100644 --- a/drivers/pci/controller/pci-aardvark.c +++ b/drivers/pci/controller/pci-aardvark.c @@ -851,7 +851,9 @@ advk_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul *bridge, case PCI_EXP_RTSTA: { u32 isr0 = advk_readl(pcie, PCIE_ISR0_REG); u32 msglog = advk_readl(pcie, PCIE_MSG_LOG_REG); - *value = (isr0 & PCIE_MSG_PM_PME_MASK) << 16 | (msglog >> 16); + *value = msglog >> 16; + if (isr0 & PCIE_MSG_PM_PME_MASK) + *value |= PCI_EXP_RTSTA_PME; return PCI_BRIDGE_EMUL_HANDLED; } diff --git a/drivers/pci/controller/pci-xgene.c b/drivers/pci/controller/pci-xgene.c index b651b6f4446917855d92875df4c6f93ce55aed10..e1c2daa50b4987d5668cb65fb15d5e6330022445 100644 --- a/drivers/pci/controller/pci-xgene.c +++ b/drivers/pci/controller/pci-xgene.c @@ -467,7 +467,7 @@ static int xgene_pcie_select_ib_reg(u8 *ib_reg_mask, u64 size) return 1; } - if ((size > SZ_1K) && (size < SZ_4G) && !(*ib_reg_mask & (1 << 0))) { + if ((size > SZ_1K) && (size < SZ_1T) && !(*ib_reg_mask & (1 << 0))) { *ib_reg_mask |= (1 << 0); return 0; } @@ -481,28 +481,27 @@ static int xgene_pcie_select_ib_reg(u8 *ib_reg_mask, u64 size) } static void xgene_pcie_setup_ib_reg(struct xgene_pcie_port *port, - struct resource_entry *entry, - u8 *ib_reg_mask) + struct of_pci_range *range, u8 *ib_reg_mask) { void __iomem *cfg_base = port->cfg_base; struct device *dev = port->dev; void *bar_addr; u32 pim_reg; - u64 cpu_addr = entry->res->start; - u64 pci_addr = cpu_addr - entry->offset; - u64 size = resource_size(entry->res); + u64 cpu_addr = range->cpu_addr; + u64 pci_addr = range->pci_addr; + u64 size = range->size; u64 mask = ~(size - 1) | EN_REG; u32 flags = PCI_BASE_ADDRESS_MEM_TYPE_64; u32 bar_low; int region; - region = xgene_pcie_select_ib_reg(ib_reg_mask, size); + region = xgene_pcie_select_ib_reg(ib_reg_mask, range->size); if (region < 0) { dev_warn(dev, "invalid pcie dma-range config\n"); return; } - if (entry->res->flags & IORESOURCE_PREFETCH) + if (range->flags & IORESOURCE_PREFETCH) flags |= PCI_BASE_ADDRESS_MEM_PREFETCH; bar_low = pcie_bar_low_val((u32)cpu_addr, flags); @@ -533,13 +532,25 @@ static void xgene_pcie_setup_ib_reg(struct xgene_pcie_port *port, static int xgene_pcie_parse_map_dma_ranges(struct xgene_pcie_port *port) { - struct pci_host_bridge *bridge = pci_host_bridge_from_priv(port); - struct resource_entry *entry; + struct device_node *np = port->node; + struct of_pci_range range; + struct of_pci_range_parser parser; + struct device *dev = port->dev; u8 ib_reg_mask = 0; - resource_list_for_each_entry(entry, &bridge->dma_ranges) - xgene_pcie_setup_ib_reg(port, entry, &ib_reg_mask); + if (of_pci_dma_range_parser_init(&parser, np)) { + dev_err(dev, "missing dma-ranges property\n"); + return -EINVAL; + } + + /* Get the dma-ranges from DT */ + for_each_of_pci_range(&parser, &range) { + u64 end = range.cpu_addr + range.size - 1; + dev_dbg(dev, "0x%08x 0x%016llx..0x%016llx -> 0x%016llx\n", + range.flags, range.cpu_addr, end, range.pci_addr); + xgene_pcie_setup_ib_reg(port, &range, &ib_reg_mask); + } return 0; } diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c index 66e59742471e87dd252a3b4b1372ccd58b3a0b16..c4ce4c4e9f7ad27e42b859ddaf46aeca91b4251d 100644 --- a/drivers/pci/hotplug/pciehp_hpc.c +++ b/drivers/pci/hotplug/pciehp_hpc.c @@ -93,6 +93,8 @@ static int pcie_poll_cmd(struct controller *ctrl, int timeout) if (slot_status & PCI_EXP_SLTSTA_CC) { pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, PCI_EXP_SLTSTA_CC); + ctrl->cmd_busy = 0; + smp_mb(); return 1; } msleep(10); diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index d998477fbc3e3d86ecd2d0456764461f166e2a5a..3b5d896af2331a1ade17d7de171dc2a516f7c01f 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -1816,6 +1816,18 @@ static void quirk_alder_ioapic(struct pci_dev *pdev) DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EESSC, quirk_alder_ioapic); #endif +static void quirk_no_msi(struct pci_dev *dev) +{ + pci_info(dev, "avoiding MSI to work around a hardware defect\n"); + dev->no_msi = 1; +} +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4386, quirk_no_msi); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4387, quirk_no_msi); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4388, quirk_no_msi); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4389, quirk_no_msi); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x438a, quirk_no_msi); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x438b, quirk_no_msi); + static void quirk_pcie_mch(struct pci_dev *pdev) { pdev->no_msi = 1; diff --git a/drivers/phy/phy-core-mipi-dphy.c b/drivers/phy/phy-core-mipi-dphy.c index 14e0551cd3190f71aeb85f0318c3d71c455836de..0aa740b73d0db06adc659a4ef519d6f0a564e890 100644 --- a/drivers/phy/phy-core-mipi-dphy.c +++ b/drivers/phy/phy-core-mipi-dphy.c @@ -66,10 +66,10 @@ int phy_mipi_dphy_get_default_config(unsigned long pixel_clock, cfg->hs_trail = max(4 * 8 * ui, 60000 + 4 * 4 * ui); cfg->init = 100; - cfg->lpx = 60000; + cfg->lpx = 50000; cfg->ta_get = 5 * cfg->lpx; cfg->ta_go = 4 * cfg->lpx; - cfg->ta_sure = 2 * cfg->lpx; + cfg->ta_sure = cfg->lpx; cfg->wakeup = 1000; cfg->hs_clk_rate = hs_clk_rate; diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c index a02ad10ec6fada549a1f03d2903d6c11da1aa4aa..730581d130649bcf5878b7682b3125e666376f57 100644 --- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c +++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c @@ -1039,6 +1039,7 @@ int mtk_pctrl_init(struct platform_device *pdev, node = of_parse_phandle(np, "mediatek,pctl-regmap", 0); if (node) { pctl->regmap1 = syscon_node_to_regmap(node); + of_node_put(node); if (IS_ERR(pctl->regmap1)) return PTR_ERR(pctl->regmap1); } else if (regmap) { @@ -1052,6 +1053,7 @@ int mtk_pctrl_init(struct platform_device *pdev, node = of_parse_phandle(np, "mediatek,pctl-regmap", 1); if (node) { pctl->regmap2 = syscon_node_to_regmap(node); + of_node_put(node); if (IS_ERR(pctl->regmap2)) return PTR_ERR(pctl->regmap2); } diff --git a/drivers/pinctrl/mediatek/pinctrl-paris.c b/drivers/pinctrl/mediatek/pinctrl-paris.c index 623af4410b07c70d6d011c775cb692d7f8fbd5bd..d0a4ebbe1e7e65f6e04532bc02f8101afbb3fbd1 100644 --- a/drivers/pinctrl/mediatek/pinctrl-paris.c +++ b/drivers/pinctrl/mediatek/pinctrl-paris.c @@ -96,20 +96,16 @@ static int mtk_pinconf_get(struct pinctrl_dev *pctldev, err = hw->soc->bias_get_combo(hw, desc, &pullup, &ret); if (err) goto out; + if (ret == MTK_PUPD_SET_R1R0_00) + ret = MTK_DISABLE; if (param == PIN_CONFIG_BIAS_DISABLE) { - if (ret == MTK_PUPD_SET_R1R0_00) - ret = MTK_DISABLE; + if (ret != MTK_DISABLE) + err = -EINVAL; } else if (param == PIN_CONFIG_BIAS_PULL_UP) { - /* When desire to get pull-up value, return - * error if current setting is pull-down - */ - if (!pullup) + if (!pullup || ret == MTK_DISABLE) err = -EINVAL; } else if (param == PIN_CONFIG_BIAS_PULL_DOWN) { - /* When desire to get pull-down value, return - * error if current setting is pull-up - */ - if (pullup) + if (pullup || ret == MTK_DISABLE) err = -EINVAL; } } else { @@ -188,8 +184,7 @@ static int mtk_pinconf_get(struct pinctrl_dev *pctldev, } static int mtk_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin, - enum pin_config_param param, - enum pin_config_param arg) + enum pin_config_param param, u32 arg) { struct mtk_pinctrl *hw = pinctrl_dev_get_drvdata(pctldev); const struct mtk_pin_desc *desc; @@ -585,6 +580,9 @@ ssize_t mtk_pctrl_show_one_pin(struct mtk_pinctrl *hw, if (gpio >= hw->soc->npins) return -EINVAL; + if (mtk_is_virt_gpio(hw, gpio)) + return -EINVAL; + desc = (const struct mtk_pin_desc *)&hw->soc->pins[gpio]; pinmux = mtk_pctrl_get_pinmux(hw, gpio); if (pinmux >= hw->soc->nfuncs) @@ -719,10 +717,10 @@ static int mtk_pconf_group_get(struct pinctrl_dev *pctldev, unsigned group, unsigned long *config) { struct mtk_pinctrl *hw = pinctrl_dev_get_drvdata(pctldev); + struct mtk_pinctrl_group *grp = &hw->groups[group]; - *config = hw->groups[group].config; - - return 0; + /* One pin per group only */ + return mtk_pinconf_get(pctldev, grp->pin, config); } static int mtk_pconf_group_set(struct pinctrl_dev *pctldev, unsigned group, @@ -738,8 +736,6 @@ static int mtk_pconf_group_set(struct pinctrl_dev *pctldev, unsigned group, pinconf_to_config_argument(configs[i])); if (ret < 0) return ret; - - grp->config = configs[i]; } return 0; diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c index 657e35a75d84aad961606193d5bb089401127872..6d77feda9090a9ec6af22ef545ef785bd53a5dbf 100644 --- a/drivers/pinctrl/nomadik/pinctrl-nomadik.c +++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c @@ -1883,8 +1883,10 @@ static int nmk_pinctrl_probe(struct platform_device *pdev) } prcm_np = of_parse_phandle(np, "prcm", 0); - if (prcm_np) + if (prcm_np) { npct->prcm_base = of_iomap(prcm_np, 0); + of_node_put(prcm_np); + } if (!npct->prcm_base) { if (version == PINCTRL_NMK_STN8815) { dev_info(&pdev->dev, diff --git a/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c b/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c index 6de31b5ee358c62adfd1672c0120d015bee3e129..ce36b6ff7b95e4668b7aa8fb77aca0a32cda402a 100644 --- a/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c +++ b/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c @@ -78,7 +78,6 @@ struct npcm7xx_gpio { struct gpio_chip gc; int irqbase; int irq; - void *priv; struct irq_chip irq_chip; u32 pinctrl_id; int (*direction_input)(struct gpio_chip *chip, unsigned offset); @@ -226,7 +225,7 @@ static void npcmgpio_irq_handler(struct irq_desc *desc) chained_irq_enter(chip, desc); sts = ioread32(bank->base + NPCM7XX_GP_N_EVST); en = ioread32(bank->base + NPCM7XX_GP_N_EVEN); - dev_dbg(chip->parent_device, "==> got irq sts %.8x %.8x\n", sts, + dev_dbg(bank->gc.parent, "==> got irq sts %.8x %.8x\n", sts, en); sts &= en; @@ -241,33 +240,33 @@ static int npcmgpio_set_irq_type(struct irq_data *d, unsigned int type) gpiochip_get_data(irq_data_get_irq_chip_data(d)); unsigned int gpio = BIT(d->hwirq); - dev_dbg(d->chip->parent_device, "setirqtype: %u.%u = %u\n", gpio, + dev_dbg(bank->gc.parent, "setirqtype: %u.%u = %u\n", gpio, d->irq, type); switch (type) { case IRQ_TYPE_EDGE_RISING: - dev_dbg(d->chip->parent_device, "edge.rising\n"); + dev_dbg(bank->gc.parent, "edge.rising\n"); npcm_gpio_clr(&bank->gc, bank->base + NPCM7XX_GP_N_EVBE, gpio); npcm_gpio_clr(&bank->gc, bank->base + NPCM7XX_GP_N_POL, gpio); break; case IRQ_TYPE_EDGE_FALLING: - dev_dbg(d->chip->parent_device, "edge.falling\n"); + dev_dbg(bank->gc.parent, "edge.falling\n"); npcm_gpio_clr(&bank->gc, bank->base + NPCM7XX_GP_N_EVBE, gpio); npcm_gpio_set(&bank->gc, bank->base + NPCM7XX_GP_N_POL, gpio); break; case IRQ_TYPE_EDGE_BOTH: - dev_dbg(d->chip->parent_device, "edge.both\n"); + dev_dbg(bank->gc.parent, "edge.both\n"); npcm_gpio_set(&bank->gc, bank->base + NPCM7XX_GP_N_EVBE, gpio); break; case IRQ_TYPE_LEVEL_LOW: - dev_dbg(d->chip->parent_device, "level.low\n"); + dev_dbg(bank->gc.parent, "level.low\n"); npcm_gpio_set(&bank->gc, bank->base + NPCM7XX_GP_N_POL, gpio); break; case IRQ_TYPE_LEVEL_HIGH: - dev_dbg(d->chip->parent_device, "level.high\n"); + dev_dbg(bank->gc.parent, "level.high\n"); npcm_gpio_clr(&bank->gc, bank->base + NPCM7XX_GP_N_POL, gpio); break; default: - dev_dbg(d->chip->parent_device, "invalid irq type\n"); + dev_dbg(bank->gc.parent, "invalid irq type\n"); return -EINVAL; } @@ -289,7 +288,7 @@ static void npcmgpio_irq_ack(struct irq_data *d) gpiochip_get_data(irq_data_get_irq_chip_data(d)); unsigned int gpio = d->hwirq; - dev_dbg(d->chip->parent_device, "irq_ack: %u.%u\n", gpio, d->irq); + dev_dbg(bank->gc.parent, "irq_ack: %u.%u\n", gpio, d->irq); iowrite32(BIT(gpio), bank->base + NPCM7XX_GP_N_EVST); } @@ -301,7 +300,7 @@ static void npcmgpio_irq_mask(struct irq_data *d) unsigned int gpio = d->hwirq; /* Clear events */ - dev_dbg(d->chip->parent_device, "irq_mask: %u.%u\n", gpio, d->irq); + dev_dbg(bank->gc.parent, "irq_mask: %u.%u\n", gpio, d->irq); iowrite32(BIT(gpio), bank->base + NPCM7XX_GP_N_EVENC); } @@ -313,7 +312,7 @@ static void npcmgpio_irq_unmask(struct irq_data *d) unsigned int gpio = d->hwirq; /* Enable events */ - dev_dbg(d->chip->parent_device, "irq_unmask: %u.%u\n", gpio, d->irq); + dev_dbg(bank->gc.parent, "irq_unmask: %u.%u\n", gpio, d->irq); iowrite32(BIT(gpio), bank->base + NPCM7XX_GP_N_EVENS); } @@ -323,7 +322,7 @@ static unsigned int npcmgpio_irq_startup(struct irq_data *d) unsigned int gpio = d->hwirq; /* active-high, input, clear interrupt, enable interrupt */ - dev_dbg(d->chip->parent_device, "startup: %u.%u\n", gpio, d->irq); + dev_dbg(gc->parent, "startup: %u.%u\n", gpio, d->irq); npcmgpio_direction_input(gc, gpio); npcmgpio_irq_ack(d); npcmgpio_irq_unmask(d); @@ -905,7 +904,7 @@ static struct npcm7xx_func npcm7xx_funcs[] = { #define DRIVE_STRENGTH_HI_SHIFT 12 #define DRIVE_STRENGTH_MASK 0x0000FF00 -#define DS(lo, hi) (((lo) << DRIVE_STRENGTH_LO_SHIFT) | \ +#define DSTR(lo, hi) (((lo) << DRIVE_STRENGTH_LO_SHIFT) | \ ((hi) << DRIVE_STRENGTH_HI_SHIFT)) #define DSLO(x) (((x) >> DRIVE_STRENGTH_LO_SHIFT) & 0xF) #define DSHI(x) (((x) >> DRIVE_STRENGTH_HI_SHIFT) & 0xF) @@ -925,31 +924,31 @@ struct npcm7xx_pincfg { static const struct npcm7xx_pincfg pincfg[] = { /* PIN FUNCTION 1 FUNCTION 2 FUNCTION 3 FLAGS */ NPCM7XX_PINCFG(0, iox1, MFSEL1, 30, none, NONE, 0, none, NONE, 0, 0), - NPCM7XX_PINCFG(1, iox1, MFSEL1, 30, none, NONE, 0, none, NONE, 0, DS(8, 12)), - NPCM7XX_PINCFG(2, iox1, MFSEL1, 30, none, NONE, 0, none, NONE, 0, DS(8, 12)), + NPCM7XX_PINCFG(1, iox1, MFSEL1, 30, none, NONE, 0, none, NONE, 0, DSTR(8, 12)), + NPCM7XX_PINCFG(2, iox1, MFSEL1, 30, none, NONE, 0, none, NONE, 0, DSTR(8, 12)), NPCM7XX_PINCFG(3, iox1, MFSEL1, 30, none, NONE, 0, none, NONE, 0, 0), NPCM7XX_PINCFG(4, iox2, MFSEL3, 14, smb1d, I2CSEGSEL, 7, none, NONE, 0, SLEW), NPCM7XX_PINCFG(5, iox2, MFSEL3, 14, smb1d, I2CSEGSEL, 7, none, NONE, 0, SLEW), NPCM7XX_PINCFG(6, iox2, MFSEL3, 14, smb2d, I2CSEGSEL, 10, none, NONE, 0, SLEW), NPCM7XX_PINCFG(7, iox2, MFSEL3, 14, smb2d, I2CSEGSEL, 10, none, NONE, 0, SLEW), - NPCM7XX_PINCFG(8, lkgpo1, FLOCKR1, 4, none, NONE, 0, none, NONE, 0, DS(8, 12)), - NPCM7XX_PINCFG(9, lkgpo2, FLOCKR1, 8, none, NONE, 0, none, NONE, 0, DS(8, 12)), - NPCM7XX_PINCFG(10, ioxh, MFSEL3, 18, none, NONE, 0, none, NONE, 0, DS(8, 12)), - NPCM7XX_PINCFG(11, ioxh, MFSEL3, 18, none, NONE, 0, none, NONE, 0, DS(8, 12)), + NPCM7XX_PINCFG(8, lkgpo1, FLOCKR1, 4, none, NONE, 0, none, NONE, 0, DSTR(8, 12)), + NPCM7XX_PINCFG(9, lkgpo2, FLOCKR1, 8, none, NONE, 0, none, NONE, 0, DSTR(8, 12)), + NPCM7XX_PINCFG(10, ioxh, MFSEL3, 18, none, NONE, 0, none, NONE, 0, DSTR(8, 12)), + NPCM7XX_PINCFG(11, ioxh, MFSEL3, 18, none, NONE, 0, none, NONE, 0, DSTR(8, 12)), NPCM7XX_PINCFG(12, gspi, MFSEL1, 24, smb5b, I2CSEGSEL, 19, none, NONE, 0, SLEW), NPCM7XX_PINCFG(13, gspi, MFSEL1, 24, smb5b, I2CSEGSEL, 19, none, NONE, 0, SLEW), NPCM7XX_PINCFG(14, gspi, MFSEL1, 24, smb5c, I2CSEGSEL, 20, none, NONE, 0, SLEW), NPCM7XX_PINCFG(15, gspi, MFSEL1, 24, smb5c, I2CSEGSEL, 20, none, NONE, 0, SLEW), - NPCM7XX_PINCFG(16, lkgpo0, FLOCKR1, 0, none, NONE, 0, none, NONE, 0, DS(8, 12)), - NPCM7XX_PINCFG(17, pspi2, MFSEL3, 13, smb4den, I2CSEGSEL, 23, none, NONE, 0, DS(8, 12)), - NPCM7XX_PINCFG(18, pspi2, MFSEL3, 13, smb4b, I2CSEGSEL, 14, none, NONE, 0, DS(8, 12)), - NPCM7XX_PINCFG(19, pspi2, MFSEL3, 13, smb4b, I2CSEGSEL, 14, none, NONE, 0, DS(8, 12)), + NPCM7XX_PINCFG(16, lkgpo0, FLOCKR1, 0, none, NONE, 0, none, NONE, 0, DSTR(8, 12)), + NPCM7XX_PINCFG(17, pspi2, MFSEL3, 13, smb4den, I2CSEGSEL, 23, none, NONE, 0, DSTR(8, 12)), + NPCM7XX_PINCFG(18, pspi2, MFSEL3, 13, smb4b, I2CSEGSEL, 14, none, NONE, 0, DSTR(8, 12)), + NPCM7XX_PINCFG(19, pspi2, MFSEL3, 13, smb4b, I2CSEGSEL, 14, none, NONE, 0, DSTR(8, 12)), NPCM7XX_PINCFG(20, smb4c, I2CSEGSEL, 15, smb15, MFSEL3, 8, none, NONE, 0, 0), NPCM7XX_PINCFG(21, smb4c, I2CSEGSEL, 15, smb15, MFSEL3, 8, none, NONE, 0, 0), NPCM7XX_PINCFG(22, smb4d, I2CSEGSEL, 16, smb14, MFSEL3, 7, none, NONE, 0, 0), NPCM7XX_PINCFG(23, smb4d, I2CSEGSEL, 16, smb14, MFSEL3, 7, none, NONE, 0, 0), - NPCM7XX_PINCFG(24, ioxh, MFSEL3, 18, none, NONE, 0, none, NONE, 0, DS(8, 12)), - NPCM7XX_PINCFG(25, ioxh, MFSEL3, 18, none, NONE, 0, none, NONE, 0, DS(8, 12)), + NPCM7XX_PINCFG(24, ioxh, MFSEL3, 18, none, NONE, 0, none, NONE, 0, DSTR(8, 12)), + NPCM7XX_PINCFG(25, ioxh, MFSEL3, 18, none, NONE, 0, none, NONE, 0, DSTR(8, 12)), NPCM7XX_PINCFG(26, smb5, MFSEL1, 2, none, NONE, 0, none, NONE, 0, 0), NPCM7XX_PINCFG(27, smb5, MFSEL1, 2, none, NONE, 0, none, NONE, 0, 0), NPCM7XX_PINCFG(28, smb4, MFSEL1, 1, none, NONE, 0, none, NONE, 0, 0), @@ -965,12 +964,12 @@ static const struct npcm7xx_pincfg pincfg[] = { NPCM7XX_PINCFG(39, smb3b, I2CSEGSEL, 11, none, NONE, 0, none, NONE, 0, SLEW), NPCM7XX_PINCFG(40, smb3b, I2CSEGSEL, 11, none, NONE, 0, none, NONE, 0, SLEW), NPCM7XX_PINCFG(41, bmcuart0a, MFSEL1, 9, none, NONE, 0, none, NONE, 0, 0), - NPCM7XX_PINCFG(42, bmcuart0a, MFSEL1, 9, none, NONE, 0, none, NONE, 0, DS(2, 4) | GPO), + NPCM7XX_PINCFG(42, bmcuart0a, MFSEL1, 9, none, NONE, 0, none, NONE, 0, DSTR(2, 4) | GPO), NPCM7XX_PINCFG(43, uart1, MFSEL1, 10, jtag2, MFSEL4, 0, bmcuart1, MFSEL3, 24, 0), NPCM7XX_PINCFG(44, uart1, MFSEL1, 10, jtag2, MFSEL4, 0, bmcuart1, MFSEL3, 24, 0), NPCM7XX_PINCFG(45, uart1, MFSEL1, 10, jtag2, MFSEL4, 0, none, NONE, 0, 0), - NPCM7XX_PINCFG(46, uart1, MFSEL1, 10, jtag2, MFSEL4, 0, none, NONE, 0, DS(2, 8)), - NPCM7XX_PINCFG(47, uart1, MFSEL1, 10, jtag2, MFSEL4, 0, none, NONE, 0, DS(2, 8)), + NPCM7XX_PINCFG(46, uart1, MFSEL1, 10, jtag2, MFSEL4, 0, none, NONE, 0, DSTR(2, 8)), + NPCM7XX_PINCFG(47, uart1, MFSEL1, 10, jtag2, MFSEL4, 0, none, NONE, 0, DSTR(2, 8)), NPCM7XX_PINCFG(48, uart2, MFSEL1, 11, bmcuart0b, MFSEL4, 1, none, NONE, 0, GPO), NPCM7XX_PINCFG(49, uart2, MFSEL1, 11, bmcuart0b, MFSEL4, 1, none, NONE, 0, 0), NPCM7XX_PINCFG(50, uart2, MFSEL1, 11, none, NONE, 0, none, NONE, 0, 0), @@ -980,8 +979,8 @@ static const struct npcm7xx_pincfg pincfg[] = { NPCM7XX_PINCFG(54, uart2, MFSEL1, 11, none, NONE, 0, none, NONE, 0, 0), NPCM7XX_PINCFG(55, uart2, MFSEL1, 11, none, NONE, 0, none, NONE, 0, 0), NPCM7XX_PINCFG(56, r1err, MFSEL1, 12, none, NONE, 0, none, NONE, 0, 0), - NPCM7XX_PINCFG(57, r1md, MFSEL1, 13, none, NONE, 0, none, NONE, 0, DS(2, 4)), - NPCM7XX_PINCFG(58, r1md, MFSEL1, 13, none, NONE, 0, none, NONE, 0, DS(2, 4)), + NPCM7XX_PINCFG(57, r1md, MFSEL1, 13, none, NONE, 0, none, NONE, 0, DSTR(2, 4)), + NPCM7XX_PINCFG(58, r1md, MFSEL1, 13, none, NONE, 0, none, NONE, 0, DSTR(2, 4)), NPCM7XX_PINCFG(59, smb3d, I2CSEGSEL, 13, none, NONE, 0, none, NONE, 0, 0), NPCM7XX_PINCFG(60, smb3d, I2CSEGSEL, 13, none, NONE, 0, none, NONE, 0, 0), NPCM7XX_PINCFG(61, uart1, MFSEL1, 10, none, NONE, 0, none, NONE, 0, GPO), @@ -1004,19 +1003,19 @@ static const struct npcm7xx_pincfg pincfg[] = { NPCM7XX_PINCFG(77, fanin13, MFSEL2, 13, none, NONE, 0, none, NONE, 0, 0), NPCM7XX_PINCFG(78, fanin14, MFSEL2, 14, none, NONE, 0, none, NONE, 0, 0), NPCM7XX_PINCFG(79, fanin15, MFSEL2, 15, none, NONE, 0, none, NONE, 0, 0), - NPCM7XX_PINCFG(80, pwm0, MFSEL2, 16, none, NONE, 0, none, NONE, 0, DS(4, 8)), - NPCM7XX_PINCFG(81, pwm1, MFSEL2, 17, none, NONE, 0, none, NONE, 0, DS(4, 8)), - NPCM7XX_PINCFG(82, pwm2, MFSEL2, 18, none, NONE, 0, none, NONE, 0, DS(4, 8)), - NPCM7XX_PINCFG(83, pwm3, MFSEL2, 19, none, NONE, 0, none, NONE, 0, DS(4, 8)), - NPCM7XX_PINCFG(84, r2, MFSEL1, 14, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW), - NPCM7XX_PINCFG(85, r2, MFSEL1, 14, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW), - NPCM7XX_PINCFG(86, r2, MFSEL1, 14, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW), + NPCM7XX_PINCFG(80, pwm0, MFSEL2, 16, none, NONE, 0, none, NONE, 0, DSTR(4, 8)), + NPCM7XX_PINCFG(81, pwm1, MFSEL2, 17, none, NONE, 0, none, NONE, 0, DSTR(4, 8)), + NPCM7XX_PINCFG(82, pwm2, MFSEL2, 18, none, NONE, 0, none, NONE, 0, DSTR(4, 8)), + NPCM7XX_PINCFG(83, pwm3, MFSEL2, 19, none, NONE, 0, none, NONE, 0, DSTR(4, 8)), + NPCM7XX_PINCFG(84, r2, MFSEL1, 14, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW), + NPCM7XX_PINCFG(85, r2, MFSEL1, 14, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW), + NPCM7XX_PINCFG(86, r2, MFSEL1, 14, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW), NPCM7XX_PINCFG(87, r2, MFSEL1, 14, none, NONE, 0, none, NONE, 0, 0), NPCM7XX_PINCFG(88, r2, MFSEL1, 14, none, NONE, 0, none, NONE, 0, 0), NPCM7XX_PINCFG(89, r2, MFSEL1, 14, none, NONE, 0, none, NONE, 0, 0), NPCM7XX_PINCFG(90, r2err, MFSEL1, 15, none, NONE, 0, none, NONE, 0, 0), - NPCM7XX_PINCFG(91, r2md, MFSEL1, 16, none, NONE, 0, none, NONE, 0, DS(2, 4)), - NPCM7XX_PINCFG(92, r2md, MFSEL1, 16, none, NONE, 0, none, NONE, 0, DS(2, 4)), + NPCM7XX_PINCFG(91, r2md, MFSEL1, 16, none, NONE, 0, none, NONE, 0, DSTR(2, 4)), + NPCM7XX_PINCFG(92, r2md, MFSEL1, 16, none, NONE, 0, none, NONE, 0, DSTR(2, 4)), NPCM7XX_PINCFG(93, ga20kbc, MFSEL1, 17, smb5d, I2CSEGSEL, 21, none, NONE, 0, 0), NPCM7XX_PINCFG(94, ga20kbc, MFSEL1, 17, smb5d, I2CSEGSEL, 21, none, NONE, 0, 0), NPCM7XX_PINCFG(95, lpc, NONE, 0, espi, MFSEL4, 8, gpio, MFSEL1, 26, 0), @@ -1062,34 +1061,34 @@ static const struct npcm7xx_pincfg pincfg[] = { NPCM7XX_PINCFG(133, smb10, MFSEL4, 13, none, NONE, 0, none, NONE, 0, 0), NPCM7XX_PINCFG(134, smb11, MFSEL4, 14, none, NONE, 0, none, NONE, 0, 0), NPCM7XX_PINCFG(135, smb11, MFSEL4, 14, none, NONE, 0, none, NONE, 0, 0), - NPCM7XX_PINCFG(136, sd1, MFSEL3, 12, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW), - NPCM7XX_PINCFG(137, sd1, MFSEL3, 12, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW), - NPCM7XX_PINCFG(138, sd1, MFSEL3, 12, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW), - NPCM7XX_PINCFG(139, sd1, MFSEL3, 12, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW), - NPCM7XX_PINCFG(140, sd1, MFSEL3, 12, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW), + NPCM7XX_PINCFG(136, sd1, MFSEL3, 12, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW), + NPCM7XX_PINCFG(137, sd1, MFSEL3, 12, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW), + NPCM7XX_PINCFG(138, sd1, MFSEL3, 12, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW), + NPCM7XX_PINCFG(139, sd1, MFSEL3, 12, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW), + NPCM7XX_PINCFG(140, sd1, MFSEL3, 12, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW), NPCM7XX_PINCFG(141, sd1, MFSEL3, 12, none, NONE, 0, none, NONE, 0, 0), - NPCM7XX_PINCFG(142, sd1, MFSEL3, 12, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW), + NPCM7XX_PINCFG(142, sd1, MFSEL3, 12, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW), NPCM7XX_PINCFG(143, sd1, MFSEL3, 12, sd1pwr, MFSEL4, 5, none, NONE, 0, 0), - NPCM7XX_PINCFG(144, pwm4, MFSEL2, 20, none, NONE, 0, none, NONE, 0, DS(4, 8)), - NPCM7XX_PINCFG(145, pwm5, MFSEL2, 21, none, NONE, 0, none, NONE, 0, DS(4, 8)), - NPCM7XX_PINCFG(146, pwm6, MFSEL2, 22, none, NONE, 0, none, NONE, 0, DS(4, 8)), - NPCM7XX_PINCFG(147, pwm7, MFSEL2, 23, none, NONE, 0, none, NONE, 0, DS(4, 8)), - NPCM7XX_PINCFG(148, mmc8, MFSEL3, 11, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW), - NPCM7XX_PINCFG(149, mmc8, MFSEL3, 11, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW), - NPCM7XX_PINCFG(150, mmc8, MFSEL3, 11, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW), - NPCM7XX_PINCFG(151, mmc8, MFSEL3, 11, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW), - NPCM7XX_PINCFG(152, mmc, MFSEL3, 10, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW), + NPCM7XX_PINCFG(144, pwm4, MFSEL2, 20, none, NONE, 0, none, NONE, 0, DSTR(4, 8)), + NPCM7XX_PINCFG(145, pwm5, MFSEL2, 21, none, NONE, 0, none, NONE, 0, DSTR(4, 8)), + NPCM7XX_PINCFG(146, pwm6, MFSEL2, 22, none, NONE, 0, none, NONE, 0, DSTR(4, 8)), + NPCM7XX_PINCFG(147, pwm7, MFSEL2, 23, none, NONE, 0, none, NONE, 0, DSTR(4, 8)), + NPCM7XX_PINCFG(148, mmc8, MFSEL3, 11, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW), + NPCM7XX_PINCFG(149, mmc8, MFSEL3, 11, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW), + NPCM7XX_PINCFG(150, mmc8, MFSEL3, 11, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW), + NPCM7XX_PINCFG(151, mmc8, MFSEL3, 11, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW), + NPCM7XX_PINCFG(152, mmc, MFSEL3, 10, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW), NPCM7XX_PINCFG(153, mmcwp, FLOCKR1, 24, none, NONE, 0, none, NONE, 0, 0), /* Z1/A1 */ - NPCM7XX_PINCFG(154, mmc, MFSEL3, 10, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW), + NPCM7XX_PINCFG(154, mmc, MFSEL3, 10, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW), NPCM7XX_PINCFG(155, mmccd, MFSEL3, 25, mmcrst, MFSEL4, 6, none, NONE, 0, 0), /* Z1/A1 */ - NPCM7XX_PINCFG(156, mmc, MFSEL3, 10, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW), - NPCM7XX_PINCFG(157, mmc, MFSEL3, 10, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW), - NPCM7XX_PINCFG(158, mmc, MFSEL3, 10, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW), - NPCM7XX_PINCFG(159, mmc, MFSEL3, 10, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW), - - NPCM7XX_PINCFG(160, clkout, MFSEL1, 21, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW), - NPCM7XX_PINCFG(161, lpc, NONE, 0, espi, MFSEL4, 8, gpio, MFSEL1, 26, DS(8, 12)), - NPCM7XX_PINCFG(162, serirq, NONE, 0, gpio, MFSEL1, 31, none, NONE, 0, DS(8, 12)), + NPCM7XX_PINCFG(156, mmc, MFSEL3, 10, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW), + NPCM7XX_PINCFG(157, mmc, MFSEL3, 10, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW), + NPCM7XX_PINCFG(158, mmc, MFSEL3, 10, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW), + NPCM7XX_PINCFG(159, mmc, MFSEL3, 10, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW), + + NPCM7XX_PINCFG(160, clkout, MFSEL1, 21, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW), + NPCM7XX_PINCFG(161, lpc, NONE, 0, espi, MFSEL4, 8, gpio, MFSEL1, 26, DSTR(8, 12)), + NPCM7XX_PINCFG(162, serirq, NONE, 0, gpio, MFSEL1, 31, none, NONE, 0, DSTR(8, 12)), NPCM7XX_PINCFG(163, lpc, NONE, 0, espi, MFSEL4, 8, gpio, MFSEL1, 26, 0), NPCM7XX_PINCFG(164, lpc, NONE, 0, espi, MFSEL4, 8, gpio, MFSEL1, 26, SLEWLPC), NPCM7XX_PINCFG(165, lpc, NONE, 0, espi, MFSEL4, 8, gpio, MFSEL1, 26, SLEWLPC), @@ -1102,25 +1101,25 @@ static const struct npcm7xx_pincfg pincfg[] = { NPCM7XX_PINCFG(172, smb6, MFSEL3, 1, none, NONE, 0, none, NONE, 0, 0), NPCM7XX_PINCFG(173, smb7, MFSEL3, 2, none, NONE, 0, none, NONE, 0, 0), NPCM7XX_PINCFG(174, smb7, MFSEL3, 2, none, NONE, 0, none, NONE, 0, 0), - NPCM7XX_PINCFG(175, pspi1, MFSEL3, 4, faninx, MFSEL3, 3, none, NONE, 0, DS(8, 12)), - NPCM7XX_PINCFG(176, pspi1, MFSEL3, 4, faninx, MFSEL3, 3, none, NONE, 0, DS(8, 12)), - NPCM7XX_PINCFG(177, pspi1, MFSEL3, 4, faninx, MFSEL3, 3, none, NONE, 0, DS(8, 12)), - NPCM7XX_PINCFG(178, r1, MFSEL3, 9, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW), - NPCM7XX_PINCFG(179, r1, MFSEL3, 9, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW), - NPCM7XX_PINCFG(180, r1, MFSEL3, 9, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW), + NPCM7XX_PINCFG(175, pspi1, MFSEL3, 4, faninx, MFSEL3, 3, none, NONE, 0, DSTR(8, 12)), + NPCM7XX_PINCFG(176, pspi1, MFSEL3, 4, faninx, MFSEL3, 3, none, NONE, 0, DSTR(8, 12)), + NPCM7XX_PINCFG(177, pspi1, MFSEL3, 4, faninx, MFSEL3, 3, none, NONE, 0, DSTR(8, 12)), + NPCM7XX_PINCFG(178, r1, MFSEL3, 9, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW), + NPCM7XX_PINCFG(179, r1, MFSEL3, 9, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW), + NPCM7XX_PINCFG(180, r1, MFSEL3, 9, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW), NPCM7XX_PINCFG(181, r1, MFSEL3, 9, none, NONE, 0, none, NONE, 0, 0), NPCM7XX_PINCFG(182, r1, MFSEL3, 9, none, NONE, 0, none, NONE, 0, 0), - NPCM7XX_PINCFG(183, spi3, MFSEL4, 16, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW), - NPCM7XX_PINCFG(184, spi3, MFSEL4, 16, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW | GPO), - NPCM7XX_PINCFG(185, spi3, MFSEL4, 16, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW | GPO), - NPCM7XX_PINCFG(186, spi3, MFSEL4, 16, none, NONE, 0, none, NONE, 0, DS(8, 12)), - NPCM7XX_PINCFG(187, spi3cs1, MFSEL4, 17, none, NONE, 0, none, NONE, 0, DS(8, 12)), - NPCM7XX_PINCFG(188, spi3quad, MFSEL4, 20, spi3cs2, MFSEL4, 18, none, NONE, 0, DS(8, 12) | SLEW), - NPCM7XX_PINCFG(189, spi3quad, MFSEL4, 20, spi3cs3, MFSEL4, 19, none, NONE, 0, DS(8, 12) | SLEW), - NPCM7XX_PINCFG(190, gpio, FLOCKR1, 20, nprd_smi, NONE, 0, none, NONE, 0, DS(2, 4)), - NPCM7XX_PINCFG(191, none, NONE, 0, none, NONE, 0, none, NONE, 0, DS(8, 12)), /* XX */ - - NPCM7XX_PINCFG(192, none, NONE, 0, none, NONE, 0, none, NONE, 0, DS(8, 12)), /* XX */ + NPCM7XX_PINCFG(183, spi3, MFSEL4, 16, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW), + NPCM7XX_PINCFG(184, spi3, MFSEL4, 16, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW | GPO), + NPCM7XX_PINCFG(185, spi3, MFSEL4, 16, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW | GPO), + NPCM7XX_PINCFG(186, spi3, MFSEL4, 16, none, NONE, 0, none, NONE, 0, DSTR(8, 12)), + NPCM7XX_PINCFG(187, spi3cs1, MFSEL4, 17, none, NONE, 0, none, NONE, 0, DSTR(8, 12)), + NPCM7XX_PINCFG(188, spi3quad, MFSEL4, 20, spi3cs2, MFSEL4, 18, none, NONE, 0, DSTR(8, 12) | SLEW), + NPCM7XX_PINCFG(189, spi3quad, MFSEL4, 20, spi3cs3, MFSEL4, 19, none, NONE, 0, DSTR(8, 12) | SLEW), + NPCM7XX_PINCFG(190, gpio, FLOCKR1, 20, nprd_smi, NONE, 0, none, NONE, 0, DSTR(2, 4)), + NPCM7XX_PINCFG(191, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(8, 12)), /* XX */ + + NPCM7XX_PINCFG(192, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(8, 12)), /* XX */ NPCM7XX_PINCFG(193, r1, MFSEL3, 9, none, NONE, 0, none, NONE, 0, 0), NPCM7XX_PINCFG(194, smb0b, I2CSEGSEL, 0, none, NONE, 0, none, NONE, 0, 0), NPCM7XX_PINCFG(195, smb0b, I2CSEGSEL, 0, none, NONE, 0, none, NONE, 0, 0), @@ -1131,11 +1130,11 @@ static const struct npcm7xx_pincfg pincfg[] = { NPCM7XX_PINCFG(200, r2, MFSEL1, 14, none, NONE, 0, none, NONE, 0, 0), NPCM7XX_PINCFG(201, r1, MFSEL3, 9, none, NONE, 0, none, NONE, 0, 0), NPCM7XX_PINCFG(202, smb0c, I2CSEGSEL, 1, none, NONE, 0, none, NONE, 0, 0), - NPCM7XX_PINCFG(203, faninx, MFSEL3, 3, none, NONE, 0, none, NONE, 0, DS(8, 12)), + NPCM7XX_PINCFG(203, faninx, MFSEL3, 3, none, NONE, 0, none, NONE, 0, DSTR(8, 12)), NPCM7XX_PINCFG(204, ddc, NONE, 0, gpio, MFSEL3, 22, none, NONE, 0, SLEW), NPCM7XX_PINCFG(205, ddc, NONE, 0, gpio, MFSEL3, 22, none, NONE, 0, SLEW), - NPCM7XX_PINCFG(206, ddc, NONE, 0, gpio, MFSEL3, 22, none, NONE, 0, DS(4, 8)), - NPCM7XX_PINCFG(207, ddc, NONE, 0, gpio, MFSEL3, 22, none, NONE, 0, DS(4, 8)), + NPCM7XX_PINCFG(206, ddc, NONE, 0, gpio, MFSEL3, 22, none, NONE, 0, DSTR(4, 8)), + NPCM7XX_PINCFG(207, ddc, NONE, 0, gpio, MFSEL3, 22, none, NONE, 0, DSTR(4, 8)), NPCM7XX_PINCFG(208, rg2, MFSEL4, 24, ddr, MFSEL3, 26, none, NONE, 0, 0), NPCM7XX_PINCFG(209, rg2, MFSEL4, 24, ddr, MFSEL3, 26, none, NONE, 0, 0), NPCM7XX_PINCFG(210, rg2, MFSEL4, 24, ddr, MFSEL3, 26, none, NONE, 0, 0), @@ -1147,20 +1146,20 @@ static const struct npcm7xx_pincfg pincfg[] = { NPCM7XX_PINCFG(216, rg2mdio, MFSEL4, 23, ddr, MFSEL3, 26, none, NONE, 0, 0), NPCM7XX_PINCFG(217, rg2mdio, MFSEL4, 23, ddr, MFSEL3, 26, none, NONE, 0, 0), NPCM7XX_PINCFG(218, wdog1, MFSEL3, 19, none, NONE, 0, none, NONE, 0, 0), - NPCM7XX_PINCFG(219, wdog2, MFSEL3, 20, none, NONE, 0, none, NONE, 0, DS(4, 8)), + NPCM7XX_PINCFG(219, wdog2, MFSEL3, 20, none, NONE, 0, none, NONE, 0, DSTR(4, 8)), NPCM7XX_PINCFG(220, smb12, MFSEL3, 5, none, NONE, 0, none, NONE, 0, 0), NPCM7XX_PINCFG(221, smb12, MFSEL3, 5, none, NONE, 0, none, NONE, 0, 0), NPCM7XX_PINCFG(222, smb13, MFSEL3, 6, none, NONE, 0, none, NONE, 0, 0), NPCM7XX_PINCFG(223, smb13, MFSEL3, 6, none, NONE, 0, none, NONE, 0, 0), NPCM7XX_PINCFG(224, spix, MFSEL4, 27, none, NONE, 0, none, NONE, 0, SLEW), - NPCM7XX_PINCFG(225, spix, MFSEL4, 27, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW | GPO), - NPCM7XX_PINCFG(226, spix, MFSEL4, 27, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW | GPO), - NPCM7XX_PINCFG(227, spix, MFSEL4, 27, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW), - NPCM7XX_PINCFG(228, spixcs1, MFSEL4, 28, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW), - NPCM7XX_PINCFG(229, spix, MFSEL4, 27, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW), - NPCM7XX_PINCFG(230, spix, MFSEL4, 27, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW), - NPCM7XX_PINCFG(231, clkreq, MFSEL4, 9, none, NONE, 0, none, NONE, 0, DS(8, 12)), + NPCM7XX_PINCFG(225, spix, MFSEL4, 27, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW | GPO), + NPCM7XX_PINCFG(226, spix, MFSEL4, 27, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW | GPO), + NPCM7XX_PINCFG(227, spix, MFSEL4, 27, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW), + NPCM7XX_PINCFG(228, spixcs1, MFSEL4, 28, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW), + NPCM7XX_PINCFG(229, spix, MFSEL4, 27, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW), + NPCM7XX_PINCFG(230, spix, MFSEL4, 27, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW), + NPCM7XX_PINCFG(231, clkreq, MFSEL4, 9, none, NONE, 0, none, NONE, 0, DSTR(8, 12)), NPCM7XX_PINCFG(253, none, NONE, 0, none, NONE, 0, none, NONE, 0, GPI), /* SDHC1 power */ NPCM7XX_PINCFG(254, none, NONE, 0, none, NONE, 0, none, NONE, 0, GPI), /* SDHC2 power */ NPCM7XX_PINCFG(255, none, NONE, 0, none, NONE, 0, none, NONE, 0, GPI), /* DACOSEL */ @@ -1561,7 +1560,7 @@ static int npcm7xx_get_groups_count(struct pinctrl_dev *pctldev) { struct npcm7xx_pinctrl *npcm = pinctrl_dev_get_drvdata(pctldev); - dev_dbg(npcm->dev, "group size: %d\n", ARRAY_SIZE(npcm7xx_groups)); + dev_dbg(npcm->dev, "group size: %zu\n", ARRAY_SIZE(npcm7xx_groups)); return ARRAY_SIZE(npcm7xx_groups); } diff --git a/drivers/pinctrl/pinconf-generic.c b/drivers/pinctrl/pinconf-generic.c index 1e225d513988857391264d4155a9bf014f5b0c09..42e27dba62e26292554cb9b5c6b7f3414c48b6f3 100644 --- a/drivers/pinctrl/pinconf-generic.c +++ b/drivers/pinctrl/pinconf-generic.c @@ -30,10 +30,10 @@ static const struct pin_config_item conf_items[] = { PCONFDUMP(PIN_CONFIG_BIAS_BUS_HOLD, "input bias bus hold", NULL, false), PCONFDUMP(PIN_CONFIG_BIAS_DISABLE, "input bias disabled", NULL, false), PCONFDUMP(PIN_CONFIG_BIAS_HIGH_IMPEDANCE, "input bias high impedance", NULL, false), - PCONFDUMP(PIN_CONFIG_BIAS_PULL_DOWN, "input bias pull down", NULL, false), + PCONFDUMP(PIN_CONFIG_BIAS_PULL_DOWN, "input bias pull down", "ohms", true), PCONFDUMP(PIN_CONFIG_BIAS_PULL_PIN_DEFAULT, - "input bias pull to pin specific state", NULL, false), - PCONFDUMP(PIN_CONFIG_BIAS_PULL_UP, "input bias pull up", NULL, false), + "input bias pull to pin specific state", "ohms", true), + PCONFDUMP(PIN_CONFIG_BIAS_PULL_UP, "input bias pull up", "ohms", true), PCONFDUMP(PIN_CONFIG_DRIVE_OPEN_DRAIN, "output drive open drain", NULL, false), PCONFDUMP(PIN_CONFIG_DRIVE_OPEN_SOURCE, "output drive open source", NULL, false), PCONFDUMP(PIN_CONFIG_DRIVE_PUSH_PULL, "output drive push pull", NULL, false), diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c index 53a0badc6b035e4afd24e2d285f6e23755b65be4..9df48e0cf4cb4c028ac66b20177396a8068e1f15 100644 --- a/drivers/pinctrl/pinctrl-rockchip.c +++ b/drivers/pinctrl/pinctrl-rockchip.c @@ -3774,6 +3774,7 @@ static int rockchip_pinctrl_probe(struct platform_device *pdev) node = of_parse_phandle(np, "rockchip,grf", 0); if (node) { info->regmap_base = syscon_node_to_regmap(node); + of_node_put(node); if (IS_ERR(info->regmap_base)) return PTR_ERR(info->regmap_base); } else { @@ -3810,6 +3811,7 @@ static int rockchip_pinctrl_probe(struct platform_device *pdev) node = of_parse_phandle(np, "rockchip,pmu", 0); if (node) { info->regmap_pmu = syscon_node_to_regmap(node); + of_node_put(node); if (IS_ERR(info->regmap_pmu)) return PTR_ERR(info->regmap_pmu); } diff --git a/drivers/pinctrl/renesas/core.c b/drivers/pinctrl/renesas/core.c index 9d168b90cd2810e7ae5eb3813ae0dfe5e58e88b7..258972672eda1077871df9e3db2deb015397e076 100644 --- a/drivers/pinctrl/renesas/core.c +++ b/drivers/pinctrl/renesas/core.c @@ -739,7 +739,7 @@ static int sh_pfc_suspend_init(struct sh_pfc *pfc) { return 0; } #ifdef DEBUG #define SH_PFC_MAX_REGS 300 -#define SH_PFC_MAX_ENUMS 3000 +#define SH_PFC_MAX_ENUMS 5000 static unsigned int sh_pfc_errors __initdata = 0; static unsigned int sh_pfc_warnings __initdata = 0; @@ -851,7 +851,8 @@ static void __init sh_pfc_check_cfg_reg(const char *drvname, sh_pfc_check_reg(drvname, cfg_reg->reg); if (cfg_reg->field_width) { - n = cfg_reg->reg_width / cfg_reg->field_width; + fw = cfg_reg->field_width; + n = (cfg_reg->reg_width / fw) << fw; /* Skip field checks (done at build time) */ goto check_enum_ids; } diff --git a/drivers/pinctrl/renesas/pfc-r8a77470.c b/drivers/pinctrl/renesas/pfc-r8a77470.c index b3b116da1bb0dd3521fa2a619133e776079d6d2e..14005725a726b1c7fa9d46bb27da2bbde68de6a2 100644 --- a/drivers/pinctrl/renesas/pfc-r8a77470.c +++ b/drivers/pinctrl/renesas/pfc-r8a77470.c @@ -2121,7 +2121,7 @@ static const unsigned int vin0_clk_mux[] = { VI0_CLK_MARK, }; /* - VIN1 ------------------------------------------------------------------- */ -static const union vin_data vin1_data_pins = { +static const union vin_data12 vin1_data_pins = { .data12 = { RCAR_GP_PIN(3, 1), RCAR_GP_PIN(3, 2), RCAR_GP_PIN(3, 3), RCAR_GP_PIN(3, 4), @@ -2131,7 +2131,7 @@ static const union vin_data vin1_data_pins = { RCAR_GP_PIN(3, 15), RCAR_GP_PIN(3, 16), }, }; -static const union vin_data vin1_data_mux = { +static const union vin_data12 vin1_data_mux = { .data12 = { VI1_DATA0_MARK, VI1_DATA1_MARK, VI1_DATA2_MARK, VI1_DATA3_MARK, diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c index 7f809a57bee50db67cab65e22fef2654220d7f8a..56fff83a143bda97c4379e82b99b9b888e0ce84f 100644 --- a/drivers/pinctrl/samsung/pinctrl-samsung.c +++ b/drivers/pinctrl/samsung/pinctrl-samsung.c @@ -1002,6 +1002,16 @@ samsung_pinctrl_get_soc_data_for_of_alias(struct platform_device *pdev) return &(of_data->ctrl[id]); } +static void samsung_banks_of_node_put(struct samsung_pinctrl_drv_data *d) +{ + struct samsung_pin_bank *bank; + unsigned int i; + + bank = d->pin_banks; + for (i = 0; i < d->nr_banks; ++i, ++bank) + of_node_put(bank->of_node); +} + /* retrieve the soc specific data */ static const struct samsung_pin_ctrl * samsung_pinctrl_get_soc_data(struct samsung_pinctrl_drv_data *d, @@ -1116,19 +1126,19 @@ static int samsung_pinctrl_probe(struct platform_device *pdev) if (ctrl->retention_data) { drvdata->retention_ctrl = ctrl->retention_data->init(drvdata, ctrl->retention_data); - if (IS_ERR(drvdata->retention_ctrl)) - return PTR_ERR(drvdata->retention_ctrl); + if (IS_ERR(drvdata->retention_ctrl)) { + ret = PTR_ERR(drvdata->retention_ctrl); + goto err_put_banks; + } } ret = samsung_pinctrl_register(pdev, drvdata); if (ret) - return ret; + goto err_put_banks; ret = samsung_gpiolib_register(pdev, drvdata); - if (ret) { - samsung_pinctrl_unregister(pdev, drvdata); - return ret; - } + if (ret) + goto err_unregister; if (ctrl->eint_gpio_init) ctrl->eint_gpio_init(drvdata); @@ -1138,6 +1148,12 @@ static int samsung_pinctrl_probe(struct platform_device *pdev) platform_set_drvdata(pdev, drvdata); return 0; + +err_unregister: + samsung_pinctrl_unregister(pdev, drvdata); +err_put_banks: + samsung_banks_of_node_put(drvdata); + return ret; } /* diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c index e42a3a0005a72c562346e484b7b3ee307310ed06..be7f4f95f455daeb819746a5b64d4da1a8f7544b 100644 --- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c +++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c @@ -36,6 +36,13 @@ #include "../core.h" #include "pinctrl-sunxi.h" +/* + * These lock classes tell lockdep that GPIO IRQs are in a different + * category than their parents, so it won't report false recursion. + */ +static struct lock_class_key sunxi_pinctrl_irq_lock_class; +static struct lock_class_key sunxi_pinctrl_irq_request_class; + static struct irq_chip sunxi_pinctrl_edge_irq_chip; static struct irq_chip sunxi_pinctrl_level_irq_chip; @@ -1552,6 +1559,8 @@ int sunxi_pinctrl_init_with_variant(struct platform_device *pdev, for (i = 0; i < (pctl->desc->irq_banks * IRQ_PER_BANK); i++) { int irqno = irq_create_mapping(pctl->domain, i); + irq_set_lockdep_class(irqno, &sunxi_pinctrl_irq_lock_class, + &sunxi_pinctrl_irq_request_class); irq_set_chip_and_handler(irqno, &sunxi_pinctrl_edge_irq_chip, handle_edge_irq); irq_set_chip_data(irqno, pctl); diff --git a/drivers/platform/chrome/Makefile b/drivers/platform/chrome/Makefile index f901d2e43166c339971aed719f8d749026dd4ad1..88cbc434c06b224b07c3b5d1cdc3fac7d2a8b9f9 100644 --- a/drivers/platform/chrome/Makefile +++ b/drivers/platform/chrome/Makefile @@ -2,6 +2,7 @@ # tell define_trace.h where to find the cros ec trace header CFLAGS_cros_ec_trace.o:= -I$(src) +CFLAGS_cros_ec_sensorhub_ring.o:= -I$(src) obj-$(CONFIG_CHROMEOS_LAPTOP) += chromeos_laptop.o obj-$(CONFIG_CHROMEOS_PSTORE) += chromeos_pstore.o @@ -20,7 +21,7 @@ obj-$(CONFIG_CROS_EC_CHARDEV) += cros_ec_chardev.o obj-$(CONFIG_CROS_EC_LIGHTBAR) += cros_ec_lightbar.o obj-$(CONFIG_CROS_EC_VBC) += cros_ec_vbc.o obj-$(CONFIG_CROS_EC_DEBUGFS) += cros_ec_debugfs.o -cros-ec-sensorhub-objs := cros_ec_sensorhub.o cros_ec_sensorhub_ring.o cros_ec_trace.o +cros-ec-sensorhub-objs := cros_ec_sensorhub.o cros_ec_sensorhub_ring.o obj-$(CONFIG_CROS_EC_SENSORHUB) += cros-ec-sensorhub.o obj-$(CONFIG_CROS_EC_SYSFS) += cros_ec_sysfs.o obj-$(CONFIG_CROS_USBPD_LOGGER) += cros_usbpd_logger.o diff --git a/drivers/platform/chrome/cros_ec_sensorhub_ring.c b/drivers/platform/chrome/cros_ec_sensorhub_ring.c index 98e37080f760913ecdbeb53f611ce71cfbecbc7f..71948dade0e2aedd0a1f1b4ea09dc954c6fb5b5e 100644 --- a/drivers/platform/chrome/cros_ec_sensorhub_ring.c +++ b/drivers/platform/chrome/cros_ec_sensorhub_ring.c @@ -17,7 +17,8 @@ #include #include -#include "cros_ec_trace.h" +#define CREATE_TRACE_POINTS +#include "cros_ec_sensorhub_trace.h" /* Precision of fixed point for the m values from the filter */ #define M_PRECISION BIT(23) diff --git a/drivers/platform/chrome/cros_ec_sensorhub_trace.h b/drivers/platform/chrome/cros_ec_sensorhub_trace.h new file mode 100644 index 0000000000000000000000000000000000000000..57d9b47859692710b2048007524a01457a2bca48 --- /dev/null +++ b/drivers/platform/chrome/cros_ec_sensorhub_trace.h @@ -0,0 +1,123 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Trace events for the ChromeOS Sensorhub kernel module + * + * Copyright 2021 Google LLC. + */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM cros_ec + +#if !defined(_CROS_EC_SENSORHUB_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ) +#define _CROS_EC_SENSORHUB_TRACE_H_ + +#include +#include + +#include + +TRACE_EVENT(cros_ec_sensorhub_timestamp, + TP_PROTO(u32 ec_sample_timestamp, u32 ec_fifo_timestamp, s64 fifo_timestamp, + s64 current_timestamp, s64 current_time), + TP_ARGS(ec_sample_timestamp, ec_fifo_timestamp, fifo_timestamp, current_timestamp, + current_time), + TP_STRUCT__entry( + __field(u32, ec_sample_timestamp) + __field(u32, ec_fifo_timestamp) + __field(s64, fifo_timestamp) + __field(s64, current_timestamp) + __field(s64, current_time) + __field(s64, delta) + ), + TP_fast_assign( + __entry->ec_sample_timestamp = ec_sample_timestamp; + __entry->ec_fifo_timestamp = ec_fifo_timestamp; + __entry->fifo_timestamp = fifo_timestamp; + __entry->current_timestamp = current_timestamp; + __entry->current_time = current_time; + __entry->delta = current_timestamp - current_time; + ), + TP_printk("ec_ts: %9u, ec_fifo_ts: %9u, fifo_ts: %12lld, curr_ts: %12lld, curr_time: %12lld, delta %12lld", + __entry->ec_sample_timestamp, + __entry->ec_fifo_timestamp, + __entry->fifo_timestamp, + __entry->current_timestamp, + __entry->current_time, + __entry->delta + ) +); + +TRACE_EVENT(cros_ec_sensorhub_data, + TP_PROTO(u32 ec_sensor_num, u32 ec_fifo_timestamp, s64 fifo_timestamp, + s64 current_timestamp, s64 current_time), + TP_ARGS(ec_sensor_num, ec_fifo_timestamp, fifo_timestamp, current_timestamp, current_time), + TP_STRUCT__entry( + __field(u32, ec_sensor_num) + __field(u32, ec_fifo_timestamp) + __field(s64, fifo_timestamp) + __field(s64, current_timestamp) + __field(s64, current_time) + __field(s64, delta) + ), + TP_fast_assign( + __entry->ec_sensor_num = ec_sensor_num; + __entry->ec_fifo_timestamp = ec_fifo_timestamp; + __entry->fifo_timestamp = fifo_timestamp; + __entry->current_timestamp = current_timestamp; + __entry->current_time = current_time; + __entry->delta = current_timestamp - current_time; + ), + TP_printk("ec_num: %4u, ec_fifo_ts: %9u, fifo_ts: %12lld, curr_ts: %12lld, curr_time: %12lld, delta %12lld", + __entry->ec_sensor_num, + __entry->ec_fifo_timestamp, + __entry->fifo_timestamp, + __entry->current_timestamp, + __entry->current_time, + __entry->delta + ) +); + +TRACE_EVENT(cros_ec_sensorhub_filter, + TP_PROTO(struct cros_ec_sensors_ts_filter_state *state, s64 dx, s64 dy), + TP_ARGS(state, dx, dy), + TP_STRUCT__entry( + __field(s64, dx) + __field(s64, dy) + __field(s64, median_m) + __field(s64, median_error) + __field(s64, history_len) + __field(s64, x) + __field(s64, y) + ), + TP_fast_assign( + __entry->dx = dx; + __entry->dy = dy; + __entry->median_m = state->median_m; + __entry->median_error = state->median_error; + __entry->history_len = state->history_len; + __entry->x = state->x_offset; + __entry->y = state->y_offset; + ), + TP_printk("dx: %12lld. dy: %12lld median_m: %12lld median_error: %12lld len: %lld x: %12lld y: %12lld", + __entry->dx, + __entry->dy, + __entry->median_m, + __entry->median_error, + __entry->history_len, + __entry->x, + __entry->y + ) +); + + +#endif /* _CROS_EC_SENSORHUB_TRACE_H_ */ + +/* this part must be outside header guard */ + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . + +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE cros_ec_sensorhub_trace + +#include diff --git a/drivers/platform/chrome/cros_ec_trace.h b/drivers/platform/chrome/cros_ec_trace.h index 7e7cfc98657a4aea599e93efb75094f6df4c4144..9bb5cd2c98b8b4690f4633883103c21a801ba04d 100644 --- a/drivers/platform/chrome/cros_ec_trace.h +++ b/drivers/platform/chrome/cros_ec_trace.h @@ -15,7 +15,6 @@ #include #include #include -#include #include @@ -71,100 +70,6 @@ TRACE_EVENT(cros_ec_request_done, __entry->retval) ); -TRACE_EVENT(cros_ec_sensorhub_timestamp, - TP_PROTO(u32 ec_sample_timestamp, u32 ec_fifo_timestamp, s64 fifo_timestamp, - s64 current_timestamp, s64 current_time), - TP_ARGS(ec_sample_timestamp, ec_fifo_timestamp, fifo_timestamp, current_timestamp, - current_time), - TP_STRUCT__entry( - __field(u32, ec_sample_timestamp) - __field(u32, ec_fifo_timestamp) - __field(s64, fifo_timestamp) - __field(s64, current_timestamp) - __field(s64, current_time) - __field(s64, delta) - ), - TP_fast_assign( - __entry->ec_sample_timestamp = ec_sample_timestamp; - __entry->ec_fifo_timestamp = ec_fifo_timestamp; - __entry->fifo_timestamp = fifo_timestamp; - __entry->current_timestamp = current_timestamp; - __entry->current_time = current_time; - __entry->delta = current_timestamp - current_time; - ), - TP_printk("ec_ts: %9u, ec_fifo_ts: %9u, fifo_ts: %12lld, curr_ts: %12lld, curr_time: %12lld, delta %12lld", - __entry->ec_sample_timestamp, - __entry->ec_fifo_timestamp, - __entry->fifo_timestamp, - __entry->current_timestamp, - __entry->current_time, - __entry->delta - ) -); - -TRACE_EVENT(cros_ec_sensorhub_data, - TP_PROTO(u32 ec_sensor_num, u32 ec_fifo_timestamp, s64 fifo_timestamp, - s64 current_timestamp, s64 current_time), - TP_ARGS(ec_sensor_num, ec_fifo_timestamp, fifo_timestamp, current_timestamp, current_time), - TP_STRUCT__entry( - __field(u32, ec_sensor_num) - __field(u32, ec_fifo_timestamp) - __field(s64, fifo_timestamp) - __field(s64, current_timestamp) - __field(s64, current_time) - __field(s64, delta) - ), - TP_fast_assign( - __entry->ec_sensor_num = ec_sensor_num; - __entry->ec_fifo_timestamp = ec_fifo_timestamp; - __entry->fifo_timestamp = fifo_timestamp; - __entry->current_timestamp = current_timestamp; - __entry->current_time = current_time; - __entry->delta = current_timestamp - current_time; - ), - TP_printk("ec_num: %4u, ec_fifo_ts: %9u, fifo_ts: %12lld, curr_ts: %12lld, curr_time: %12lld, delta %12lld", - __entry->ec_sensor_num, - __entry->ec_fifo_timestamp, - __entry->fifo_timestamp, - __entry->current_timestamp, - __entry->current_time, - __entry->delta - ) -); - -TRACE_EVENT(cros_ec_sensorhub_filter, - TP_PROTO(struct cros_ec_sensors_ts_filter_state *state, s64 dx, s64 dy), - TP_ARGS(state, dx, dy), - TP_STRUCT__entry( - __field(s64, dx) - __field(s64, dy) - __field(s64, median_m) - __field(s64, median_error) - __field(s64, history_len) - __field(s64, x) - __field(s64, y) - ), - TP_fast_assign( - __entry->dx = dx; - __entry->dy = dy; - __entry->median_m = state->median_m; - __entry->median_error = state->median_error; - __entry->history_len = state->history_len; - __entry->x = state->x_offset; - __entry->y = state->y_offset; - ), - TP_printk("dx: %12lld. dy: %12lld median_m: %12lld median_error: %12lld len: %lld x: %12lld y: %12lld", - __entry->dx, - __entry->dy, - __entry->median_m, - __entry->median_error, - __entry->history_len, - __entry->x, - __entry->y - ) -); - - #endif /* _CROS_EC_TRACE_H_ */ /* this part must be outside header guard */ diff --git a/drivers/platform/chrome/cros_ec_typec.c b/drivers/platform/chrome/cros_ec_typec.c index 036d54dc52e24d810d0b228603ecfb54981f18c3..cc336457ca808cdbfc6944485b2e805c63639233 100644 --- a/drivers/platform/chrome/cros_ec_typec.c +++ b/drivers/platform/chrome/cros_ec_typec.c @@ -712,7 +712,13 @@ static int cros_typec_probe(struct platform_device *pdev) return -ENOMEM; typec->dev = dev; + typec->ec = dev_get_drvdata(pdev->dev.parent); + if (!typec->ec) { + dev_err(dev, "couldn't find parent EC device\n"); + return -ENODEV; + } + platform_set_drvdata(pdev, typec); ret = cros_typec_get_cmd_version(typec); diff --git a/drivers/platform/x86/huawei-wmi.c b/drivers/platform/x86/huawei-wmi.c index a2d846c4a7eef53c05745e7ef9514662876563e9..eac3e6b4ea113064aa47f41bf587ab22a85ac68f 100644 --- a/drivers/platform/x86/huawei-wmi.c +++ b/drivers/platform/x86/huawei-wmi.c @@ -470,10 +470,17 @@ static DEVICE_ATTR_RW(charge_control_thresholds); static int huawei_wmi_battery_add(struct power_supply *battery) { - device_create_file(&battery->dev, &dev_attr_charge_control_start_threshold); - device_create_file(&battery->dev, &dev_attr_charge_control_end_threshold); + int err = 0; - return 0; + err = device_create_file(&battery->dev, &dev_attr_charge_control_start_threshold); + if (err) + return err; + + err = device_create_file(&battery->dev, &dev_attr_charge_control_end_threshold); + if (err) + device_remove_file(&battery->dev, &dev_attr_charge_control_start_threshold); + + return err; } static int huawei_wmi_battery_remove(struct power_supply *battery) diff --git a/drivers/platform/x86/surface3_power.c b/drivers/platform/x86/surface3_power.c index cc4f9cba68563c137c1c71785e8d6c26479ef2c6..01aacf1bee0749d63836c3553337871754c633a4 100644 --- a/drivers/platform/x86/surface3_power.c +++ b/drivers/platform/x86/surface3_power.c @@ -233,14 +233,21 @@ static int mshw0011_bix(struct mshw0011_data *cdata, struct bix *bix) } bix->last_full_charg_capacity = ret; - /* get serial number */ + /* + * Get serial number, on some devices (with unofficial replacement + * battery?) reading any of the serial number range addresses gets + * nacked in this case just leave the serial number empty. + */ ret = i2c_smbus_read_i2c_block_data(client, MSHW0011_BAT0_REG_SERIAL_NO, sizeof(buf), buf); - if (ret != sizeof(buf)) { + if (ret == -EREMOTEIO) { + /* no serial number available */ + } else if (ret != sizeof(buf)) { dev_err(&client->dev, "Error reading serial no: %d\n", ret); return ret; + } else { + snprintf(bix->serial, ARRAY_SIZE(bix->serial), "%3pE%6pE", buf + 7, buf); } - snprintf(bix->serial, ARRAY_SIZE(bix->serial), "%3pE%6pE", buf + 7, buf); /* get cycle count */ ret = i2c_smbus_read_word_data(client, MSHW0011_BAT0_REG_CYCLE_CNT); diff --git a/drivers/power/reset/gemini-poweroff.c b/drivers/power/reset/gemini-poweroff.c index 90e35c07240aee158c3478a4f6a248c15b138552..b7f7a8225f22e133726833e78b39636bec967607 100644 --- a/drivers/power/reset/gemini-poweroff.c +++ b/drivers/power/reset/gemini-poweroff.c @@ -107,8 +107,8 @@ static int gemini_poweroff_probe(struct platform_device *pdev) return PTR_ERR(gpw->base); irq = platform_get_irq(pdev, 0); - if (!irq) - return -EINVAL; + if (irq < 0) + return irq; gpw->dev = dev; diff --git a/drivers/power/supply/ab8500_fg.c b/drivers/power/supply/ab8500_fg.c index f1da757c939f8acdfa192997bfc3fb6325b15eee..a6b4a94c276627009ce4dbf4d25b92b3f200f73c 100644 --- a/drivers/power/supply/ab8500_fg.c +++ b/drivers/power/supply/ab8500_fg.c @@ -2541,8 +2541,10 @@ static int ab8500_fg_sysfs_init(struct ab8500_fg *di) ret = kobject_init_and_add(&di->fg_kobject, &ab8500_fg_ktype, NULL, "battery"); - if (ret < 0) + if (ret < 0) { + kobject_put(&di->fg_kobject); dev_err(di->dev, "failed to create sysfs entry\n"); + } return ret; } diff --git a/drivers/power/supply/bq24190_charger.c b/drivers/power/supply/bq24190_charger.c index 845af0f44c022c853a2d5dc9f0eb5351211132d3..8c3c378dce0d545076017f54a1c5ce1658fb13a4 100644 --- a/drivers/power/supply/bq24190_charger.c +++ b/drivers/power/supply/bq24190_charger.c @@ -41,6 +41,7 @@ #define BQ24190_REG_POC_CHG_CONFIG_DISABLE 0x0 #define BQ24190_REG_POC_CHG_CONFIG_CHARGE 0x1 #define BQ24190_REG_POC_CHG_CONFIG_OTG 0x2 +#define BQ24190_REG_POC_CHG_CONFIG_OTG_ALT 0x3 #define BQ24190_REG_POC_SYS_MIN_MASK (BIT(3) | BIT(2) | BIT(1)) #define BQ24190_REG_POC_SYS_MIN_SHIFT 1 #define BQ24190_REG_POC_SYS_MIN_MIN 3000 @@ -552,7 +553,11 @@ static int bq24190_vbus_is_enabled(struct regulator_dev *dev) pm_runtime_mark_last_busy(bdi->dev); pm_runtime_put_autosuspend(bdi->dev); - return ret ? ret : val == BQ24190_REG_POC_CHG_CONFIG_OTG; + if (ret) + return ret; + + return (val == BQ24190_REG_POC_CHG_CONFIG_OTG || + val == BQ24190_REG_POC_CHG_CONFIG_OTG_ALT); } static const struct regulator_ops bq24190_vbus_ops = { diff --git a/drivers/power/supply/wm8350_power.c b/drivers/power/supply/wm8350_power.c index e05cee457471bf1ff39db193930f5090309e1901..908cfd45d262403013fc0020d6a5a77218bfe47e 100644 --- a/drivers/power/supply/wm8350_power.c +++ b/drivers/power/supply/wm8350_power.c @@ -408,44 +408,112 @@ static const struct power_supply_desc wm8350_usb_desc = { * Initialisation *********************************************************************/ -static void wm8350_init_charger(struct wm8350 *wm8350) +static int wm8350_init_charger(struct wm8350 *wm8350) { + int ret; + /* register our interest in charger events */ - wm8350_register_irq(wm8350, WM8350_IRQ_CHG_BAT_HOT, + ret = wm8350_register_irq(wm8350, WM8350_IRQ_CHG_BAT_HOT, wm8350_charger_handler, 0, "Battery hot", wm8350); - wm8350_register_irq(wm8350, WM8350_IRQ_CHG_BAT_COLD, + if (ret) + goto err; + + ret = wm8350_register_irq(wm8350, WM8350_IRQ_CHG_BAT_COLD, wm8350_charger_handler, 0, "Battery cold", wm8350); - wm8350_register_irq(wm8350, WM8350_IRQ_CHG_BAT_FAIL, + if (ret) + goto free_chg_bat_hot; + + ret = wm8350_register_irq(wm8350, WM8350_IRQ_CHG_BAT_FAIL, wm8350_charger_handler, 0, "Battery fail", wm8350); - wm8350_register_irq(wm8350, WM8350_IRQ_CHG_TO, + if (ret) + goto free_chg_bat_cold; + + ret = wm8350_register_irq(wm8350, WM8350_IRQ_CHG_TO, wm8350_charger_handler, 0, "Charger timeout", wm8350); - wm8350_register_irq(wm8350, WM8350_IRQ_CHG_END, + if (ret) + goto free_chg_bat_fail; + + ret = wm8350_register_irq(wm8350, WM8350_IRQ_CHG_END, wm8350_charger_handler, 0, "Charge end", wm8350); - wm8350_register_irq(wm8350, WM8350_IRQ_CHG_START, + if (ret) + goto free_chg_to; + + ret = wm8350_register_irq(wm8350, WM8350_IRQ_CHG_START, wm8350_charger_handler, 0, "Charge start", wm8350); - wm8350_register_irq(wm8350, WM8350_IRQ_CHG_FAST_RDY, + if (ret) + goto free_chg_end; + + ret = wm8350_register_irq(wm8350, WM8350_IRQ_CHG_FAST_RDY, wm8350_charger_handler, 0, "Fast charge ready", wm8350); - wm8350_register_irq(wm8350, WM8350_IRQ_CHG_VBATT_LT_3P9, + if (ret) + goto free_chg_start; + + ret = wm8350_register_irq(wm8350, WM8350_IRQ_CHG_VBATT_LT_3P9, wm8350_charger_handler, 0, "Battery <3.9V", wm8350); - wm8350_register_irq(wm8350, WM8350_IRQ_CHG_VBATT_LT_3P1, + if (ret) + goto free_chg_fast_rdy; + + ret = wm8350_register_irq(wm8350, WM8350_IRQ_CHG_VBATT_LT_3P1, wm8350_charger_handler, 0, "Battery <3.1V", wm8350); - wm8350_register_irq(wm8350, WM8350_IRQ_CHG_VBATT_LT_2P85, + if (ret) + goto free_chg_vbatt_lt_3p9; + + ret = wm8350_register_irq(wm8350, WM8350_IRQ_CHG_VBATT_LT_2P85, wm8350_charger_handler, 0, "Battery <2.85V", wm8350); + if (ret) + goto free_chg_vbatt_lt_3p1; /* and supply change events */ - wm8350_register_irq(wm8350, WM8350_IRQ_EXT_USB_FB, + ret = wm8350_register_irq(wm8350, WM8350_IRQ_EXT_USB_FB, wm8350_charger_handler, 0, "USB", wm8350); - wm8350_register_irq(wm8350, WM8350_IRQ_EXT_WALL_FB, + if (ret) + goto free_chg_vbatt_lt_2p85; + + ret = wm8350_register_irq(wm8350, WM8350_IRQ_EXT_WALL_FB, wm8350_charger_handler, 0, "Wall", wm8350); - wm8350_register_irq(wm8350, WM8350_IRQ_EXT_BAT_FB, + if (ret) + goto free_ext_usb_fb; + + ret = wm8350_register_irq(wm8350, WM8350_IRQ_EXT_BAT_FB, wm8350_charger_handler, 0, "Battery", wm8350); + if (ret) + goto free_ext_wall_fb; + + return 0; + +free_ext_wall_fb: + wm8350_free_irq(wm8350, WM8350_IRQ_EXT_WALL_FB, wm8350); +free_ext_usb_fb: + wm8350_free_irq(wm8350, WM8350_IRQ_EXT_USB_FB, wm8350); +free_chg_vbatt_lt_2p85: + wm8350_free_irq(wm8350, WM8350_IRQ_CHG_VBATT_LT_2P85, wm8350); +free_chg_vbatt_lt_3p1: + wm8350_free_irq(wm8350, WM8350_IRQ_CHG_VBATT_LT_3P1, wm8350); +free_chg_vbatt_lt_3p9: + wm8350_free_irq(wm8350, WM8350_IRQ_CHG_VBATT_LT_3P9, wm8350); +free_chg_fast_rdy: + wm8350_free_irq(wm8350, WM8350_IRQ_CHG_FAST_RDY, wm8350); +free_chg_start: + wm8350_free_irq(wm8350, WM8350_IRQ_CHG_START, wm8350); +free_chg_end: + wm8350_free_irq(wm8350, WM8350_IRQ_CHG_END, wm8350); +free_chg_to: + wm8350_free_irq(wm8350, WM8350_IRQ_CHG_TO, wm8350); +free_chg_bat_fail: + wm8350_free_irq(wm8350, WM8350_IRQ_CHG_BAT_FAIL, wm8350); +free_chg_bat_cold: + wm8350_free_irq(wm8350, WM8350_IRQ_CHG_BAT_COLD, wm8350); +free_chg_bat_hot: + wm8350_free_irq(wm8350, WM8350_IRQ_CHG_BAT_HOT, wm8350); +err: + return ret; } static void free_charger_irq(struct wm8350 *wm8350) @@ -456,6 +524,7 @@ static void free_charger_irq(struct wm8350 *wm8350) wm8350_free_irq(wm8350, WM8350_IRQ_CHG_TO, wm8350); wm8350_free_irq(wm8350, WM8350_IRQ_CHG_END, wm8350); wm8350_free_irq(wm8350, WM8350_IRQ_CHG_START, wm8350); + wm8350_free_irq(wm8350, WM8350_IRQ_CHG_FAST_RDY, wm8350); wm8350_free_irq(wm8350, WM8350_IRQ_CHG_VBATT_LT_3P9, wm8350); wm8350_free_irq(wm8350, WM8350_IRQ_CHG_VBATT_LT_3P1, wm8350); wm8350_free_irq(wm8350, WM8350_IRQ_CHG_VBATT_LT_2P85, wm8350); diff --git a/drivers/pwm/pwm-lpc18xx-sct.c b/drivers/pwm/pwm-lpc18xx-sct.c index 5ff11145c1a309716227196c909894a22ca90476..9b15b6a79082a776ee1d77f211b612e1162c79dd 100644 --- a/drivers/pwm/pwm-lpc18xx-sct.c +++ b/drivers/pwm/pwm-lpc18xx-sct.c @@ -400,12 +400,6 @@ static int lpc18xx_pwm_probe(struct platform_device *pdev) lpc18xx_pwm_writel(lpc18xx_pwm, LPC18XX_PWM_LIMIT, BIT(lpc18xx_pwm->period_event)); - ret = pwmchip_add(&lpc18xx_pwm->chip); - if (ret < 0) { - dev_err(&pdev->dev, "pwmchip_add failed: %d\n", ret); - goto disable_pwmclk; - } - for (i = 0; i < lpc18xx_pwm->chip.npwm; i++) { struct lpc18xx_pwm_data *data; @@ -415,14 +409,12 @@ static int lpc18xx_pwm_probe(struct platform_device *pdev) GFP_KERNEL); if (!data) { ret = -ENOMEM; - goto remove_pwmchip; + goto disable_pwmclk; } pwm_set_chip_data(pwm, data); } - platform_set_drvdata(pdev, lpc18xx_pwm); - val = lpc18xx_pwm_readl(lpc18xx_pwm, LPC18XX_PWM_CTRL); val &= ~LPC18XX_PWM_BIDIR; val &= ~LPC18XX_PWM_CTRL_HALT; @@ -430,10 +422,16 @@ static int lpc18xx_pwm_probe(struct platform_device *pdev) val |= LPC18XX_PWM_PRE(0); lpc18xx_pwm_writel(lpc18xx_pwm, LPC18XX_PWM_CTRL, val); + ret = pwmchip_add(&lpc18xx_pwm->chip); + if (ret < 0) { + dev_err(&pdev->dev, "pwmchip_add failed: %d\n", ret); + goto disable_pwmclk; + } + + platform_set_drvdata(pdev, lpc18xx_pwm); + return 0; -remove_pwmchip: - pwmchip_remove(&lpc18xx_pwm->chip); disable_pwmclk: clk_disable_unprepare(lpc18xx_pwm->pwm_clk); return ret; diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index a6d27334a71d20cd49de05389dcced0341b4eca3..c65299f8c01d0a079d348f400d8228b76493a134 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -5868,9 +5868,8 @@ core_initcall(regulator_init); static int regulator_late_cleanup(struct device *dev, void *data) { struct regulator_dev *rdev = dev_to_rdev(dev); - const struct regulator_ops *ops = rdev->desc->ops; struct regulation_constraints *c = rdev->constraints; - int enabled, ret; + int ret; if (c && c->always_on) return 0; @@ -5883,14 +5882,8 @@ static int regulator_late_cleanup(struct device *dev, void *data) if (rdev->use_count) goto unlock; - /* If we can't read the status assume it's always on. */ - if (ops->is_enabled) - enabled = ops->is_enabled(rdev); - else - enabled = 1; - - /* But if reading the status failed, assume that it's off. */ - if (enabled <= 0) + /* If reading the status failed, assume that it's off. */ + if (_regulator_is_enabled(rdev) <= 0) goto unlock; if (have_full_constraints()) { diff --git a/drivers/regulator/qcom_smd-regulator.c b/drivers/regulator/qcom_smd-regulator.c index 03e146e98abd5eadb82b213824fba5a252f700b8..8d784a2a09d867c04d74486a33ec516d7a767b65 100644 --- a/drivers/regulator/qcom_smd-regulator.c +++ b/drivers/regulator/qcom_smd-regulator.c @@ -1185,8 +1185,10 @@ static int rpm_reg_probe(struct platform_device *pdev) for_each_available_child_of_node(dev->of_node, node) { vreg = devm_kzalloc(&pdev->dev, sizeof(*vreg), GFP_KERNEL); - if (!vreg) + if (!vreg) { + of_node_put(node); return -ENOMEM; + } ret = rpm_regulator_init_vreg(vreg, dev, node, rpm, vreg_data); diff --git a/drivers/regulator/rpi-panel-attiny-regulator.c b/drivers/regulator/rpi-panel-attiny-regulator.c index ee46bfbf5eee7e25380208372c5ac0a47ae8293f..991b4730d7687a89a6f8696d4e36597365d6e8d9 100644 --- a/drivers/regulator/rpi-panel-attiny-regulator.c +++ b/drivers/regulator/rpi-panel-attiny-regulator.c @@ -37,11 +37,24 @@ static const struct regmap_config attiny_regmap_config = { static int attiny_lcd_power_enable(struct regulator_dev *rdev) { unsigned int data; + int ret, i; regmap_write(rdev->regmap, REG_POWERON, 1); + msleep(80); + /* Wait for nPWRDWN to go low to indicate poweron is done. */ - regmap_read_poll_timeout(rdev->regmap, REG_PORTB, data, - data & BIT(0), 10, 1000000); + for (i = 0; i < 20; i++) { + ret = regmap_read(rdev->regmap, REG_PORTB, &data); + if (!ret) { + if (data & BIT(0)) + break; + } + usleep_range(10000, 12000); + } + usleep_range(10000, 12000); + + if (ret) + pr_err("%s: regmap_read_poll_timeout failed %d\n", __func__, ret); /* Default to the same orientation as the closed source * firmware used for the panel. Runtime rotation @@ -57,23 +70,34 @@ static int attiny_lcd_power_disable(struct regulator_dev *rdev) { regmap_write(rdev->regmap, REG_PWM, 0); regmap_write(rdev->regmap, REG_POWERON, 0); - udelay(1); + msleep(30); return 0; } static int attiny_lcd_power_is_enabled(struct regulator_dev *rdev) { unsigned int data; - int ret; + int ret, i; - ret = regmap_read(rdev->regmap, REG_POWERON, &data); + for (i = 0; i < 10; i++) { + ret = regmap_read(rdev->regmap, REG_POWERON, &data); + if (!ret) + break; + usleep_range(10000, 12000); + } if (ret < 0) return ret; if (!(data & BIT(0))) return 0; - ret = regmap_read(rdev->regmap, REG_PORTB, &data); + for (i = 0; i < 10; i++) { + ret = regmap_read(rdev->regmap, REG_PORTB, &data); + if (!ret) + break; + usleep_range(10000, 12000); + } + if (ret < 0) return ret; @@ -103,20 +127,32 @@ static int attiny_update_status(struct backlight_device *bl) { struct regmap *regmap = bl_get_data(bl); int brightness = bl->props.brightness; + int ret, i; if (bl->props.power != FB_BLANK_UNBLANK || bl->props.fb_blank != FB_BLANK_UNBLANK) brightness = 0; - return regmap_write(regmap, REG_PWM, brightness); + for (i = 0; i < 10; i++) { + ret = regmap_write(regmap, REG_PWM, brightness); + if (!ret) + break; + } + + return ret; } static int attiny_get_brightness(struct backlight_device *bl) { struct regmap *regmap = bl_get_data(bl); - int ret, brightness; + int ret, brightness, i; + + for (i = 0; i < 10; i++) { + ret = regmap_read(regmap, REG_PWM, &brightness); + if (!ret) + break; + } - ret = regmap_read(regmap, REG_PWM, &brightness); if (ret) return ret; @@ -166,7 +202,7 @@ static int attiny_i2c_probe(struct i2c_client *i2c, } regmap_write(regmap, REG_POWERON, 0); - mdelay(1); + msleep(30); config.dev = &i2c->dev; config.regmap = regmap; diff --git a/drivers/remoteproc/qcom_q6v5_adsp.c b/drivers/remoteproc/qcom_q6v5_adsp.c index 9eb599701f9b046b6369c844954f8429894e5511..c39138d39cf07c6ec01b7b802527a68efe221106 100644 --- a/drivers/remoteproc/qcom_q6v5_adsp.c +++ b/drivers/remoteproc/qcom_q6v5_adsp.c @@ -406,6 +406,7 @@ static int adsp_alloc_memory_region(struct qcom_adsp *adsp) } ret = of_address_to_resource(node, 0, &r); + of_node_put(node); if (ret) return ret; diff --git a/drivers/remoteproc/qcom_q6v5_mss.c b/drivers/remoteproc/qcom_q6v5_mss.c index ebc3e755bcbcd488229024a2614e6dbfaf6af675..1b3aa84e36e7ad051daeab86cd776d6236268a96 100644 --- a/drivers/remoteproc/qcom_q6v5_mss.c +++ b/drivers/remoteproc/qcom_q6v5_mss.c @@ -1594,18 +1594,20 @@ static int q6v5_alloc_memory_region(struct q6v5 *qproc) * reserved memory regions from device's memory-region property. */ child = of_get_child_by_name(qproc->dev->of_node, "mba"); - if (!child) + if (!child) { node = of_parse_phandle(qproc->dev->of_node, "memory-region", 0); - else + } else { node = of_parse_phandle(child, "memory-region", 0); + of_node_put(child); + } ret = of_address_to_resource(node, 0, &r); + of_node_put(node); if (ret) { dev_err(qproc->dev, "unable to resolve mba region\n"); return ret; } - of_node_put(node); qproc->mba_phys = r.start; qproc->mba_size = resource_size(&r); @@ -1622,14 +1624,15 @@ static int q6v5_alloc_memory_region(struct q6v5 *qproc) } else { child = of_get_child_by_name(qproc->dev->of_node, "mpss"); node = of_parse_phandle(child, "memory-region", 0); + of_node_put(child); } ret = of_address_to_resource(node, 0, &r); + of_node_put(node); if (ret) { dev_err(qproc->dev, "unable to resolve mpss region\n"); return ret; } - of_node_put(node); qproc->mpss_phys = qproc->mpss_reloc = r.start; qproc->mpss_size = resource_size(&r); diff --git a/drivers/remoteproc/qcom_wcnss.c b/drivers/remoteproc/qcom_wcnss.c index e2573f79a137d71a00c92916e7c305cc3268cc4d..67286a4505cd1ce401aa57f6e1fbb4cbfb11331b 100644 --- a/drivers/remoteproc/qcom_wcnss.c +++ b/drivers/remoteproc/qcom_wcnss.c @@ -448,6 +448,7 @@ static int wcnss_alloc_memory_region(struct qcom_wcnss *wcnss) } ret = of_address_to_resource(node, 0, &r); + of_node_put(node); if (ret) return ret; diff --git a/drivers/remoteproc/remoteproc_debugfs.c b/drivers/remoteproc/remoteproc_debugfs.c index 7e5845376e9faebbe726393bfd7eb14d0567c50f..e8bb0ee6b35ac788637784c7e74a01004fd1fadf 100644 --- a/drivers/remoteproc/remoteproc_debugfs.c +++ b/drivers/remoteproc/remoteproc_debugfs.c @@ -76,7 +76,7 @@ static ssize_t rproc_coredump_write(struct file *filp, int ret, err = 0; char buf[20]; - if (count > sizeof(buf)) + if (count < 1 || count > sizeof(buf)) return -EINVAL; ret = copy_from_user(buf, user_buf, count); diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c index 794a4f036b99834c8fa15bcc834dddcfaa1090ba..146056858135e304fce9d0a173d8d667427b5394 100644 --- a/drivers/rtc/interface.c +++ b/drivers/rtc/interface.c @@ -807,9 +807,13 @@ static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer) struct timerqueue_node *next = timerqueue_getnext(&rtc->timerqueue); struct rtc_time tm; ktime_t now; + int err; + + err = __rtc_read_time(rtc, &tm); + if (err) + return err; timer->enabled = 1; - __rtc_read_time(rtc, &tm); now = rtc_tm_to_ktime(tm); /* Skip over expired timers */ @@ -823,7 +827,6 @@ static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer) trace_rtc_timer_enqueue(timer); if (!next || ktime_before(timer->node.expires, next->expires)) { struct rtc_wkalrm alarm; - int err; alarm.time = rtc_ktime_to_tm(timer->node.expires); alarm.enabled = 1; diff --git a/drivers/rtc/rtc-pl030.c b/drivers/rtc/rtc-pl030.c index ebe03eba8f5ffafa19823082cfe198e8ea658cdb..87c93843d62add93f09e048baa309b868564dbe5 100644 --- a/drivers/rtc/rtc-pl030.c +++ b/drivers/rtc/rtc-pl030.c @@ -137,7 +137,7 @@ static int pl030_probe(struct amba_device *dev, const struct amba_id *id) return ret; } -static int pl030_remove(struct amba_device *dev) +static void pl030_remove(struct amba_device *dev) { struct pl030_rtc *rtc = amba_get_drvdata(dev); @@ -146,8 +146,6 @@ static int pl030_remove(struct amba_device *dev) free_irq(dev->irq[0], rtc); iounmap(rtc->base); amba_release_regions(dev); - - return 0; } static struct amba_id pl030_ids[] = { diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c index d4b2ab7861266e367ce87a590fb3f4fd8611318d..2f5581ea26fe152b16d374300b12db45d427bfa0 100644 --- a/drivers/rtc/rtc-pl031.c +++ b/drivers/rtc/rtc-pl031.c @@ -280,7 +280,7 @@ static int pl031_set_alarm(struct device *dev, struct rtc_wkalrm *alarm) return 0; } -static int pl031_remove(struct amba_device *adev) +static void pl031_remove(struct amba_device *adev) { struct pl031_local *ldata = dev_get_drvdata(&adev->dev); @@ -289,8 +289,6 @@ static int pl031_remove(struct amba_device *adev) if (adev->irq[0]) free_irq(adev->irq[0], ldata); amba_release_regions(adev); - - return 0; } static int pl031_probe(struct amba_device *adev, const struct amba_id *id) diff --git a/drivers/rtc/rtc-sw64-virt.c b/drivers/rtc/rtc-sw64-virt.c index 549d2e2d8a01d55989917e1836b2252d7eaa0340..23c93d7ddbae7281d04f4f86137018cc297b77f4 100644 --- a/drivers/rtc/rtc-sw64-virt.c +++ b/drivers/rtc/rtc-sw64-virt.c @@ -14,18 +14,40 @@ #include #define RTC_IO_ADDR (0x804910000000ULL) +unsigned long vtime_old, vtime_new; static int sw64_virt_read_time(struct device *dev, struct rtc_time *tm) { unsigned long *ioaddr; + unsigned long vtime_now; + long vtime_offset; ioaddr = ioremap(RTC_IO_ADDR, sizeof(long)); - rtc_time64_to_tm(*ioaddr, tm); + if (!vtime_new) { + rtc_time64_to_tm(*ioaddr, tm); + } else { + vtime_now = *ioaddr; + vtime_offset = vtime_new - vtime_old; + vtime_now += vtime_offset; + rtc_time64_to_tm(vtime_now, tm); + } + return 0; +} + +static int sw64_virt_set_time(struct device *dev, struct rtc_time *tm) +{ + unsigned long *ioaddr; + + ioaddr = ioremap(RTC_IO_ADDR, sizeof(long)); + vtime_old = *ioaddr; + + vtime_new = rtc_tm_to_time64(tm); return 0; } static const struct rtc_class_ops rtc_sw64_virt_ops = { .read_time = sw64_virt_read_time, + .set_time = sw64_virt_set_time, }; static int __init rtc_sw64_virt_probe(struct platform_device *pdev) diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c index 4cbe8711b6d4a8b999760a13bb5a37c07976070b..a1c6a67da132a37ff9a1c2d7abf46ec9a85eb049 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_main.c +++ b/drivers/scsi/hisi_sas/hisi_sas_main.c @@ -2852,13 +2852,13 @@ EXPORT_SYMBOL_GPL(hisi_sas_remove); #if IS_ENABLED(CONFIG_SCSI_HISI_SAS_DEBUGFS_DEFAULT_ENABLE) #define DEBUGFS_ENABLE_DEFAULT "enabled" bool hisi_sas_debugfs_enable = true; -u32 hisi_sas_debugfs_dump_count = 50; #else #define DEBUGFS_ENABLE_DEFAULT "disabled" bool hisi_sas_debugfs_enable; -u32 hisi_sas_debugfs_dump_count = 1; #endif +u32 hisi_sas_debugfs_dump_count = 1; + EXPORT_SYMBOL_GPL(hisi_sas_debugfs_enable); module_param_named(debugfs_enable, hisi_sas_debugfs_enable, bool, 0444); MODULE_PARM_DESC(hisi_sas_debugfs_enable, diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c index b896d60e20acbb715bd81750ecbf2606b75a53dc..3ecc61eb721498a775ea7b7db3017cd3b4109a94 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c @@ -527,7 +527,7 @@ MODULE_PARM_DESC(intr_conv, "interrupt converge enable (0-1)"); /* permit overriding the host protection capabilities mask (EEDP/T10 PI) */ static int prot_mask; -module_param(prot_mask, int, 0); +module_param(prot_mask, int, 0444); MODULE_PARM_DESC(prot_mask, " host protection capabilities mask, def=0x0 "); static void debugfs_work_handler_v3_hw(struct work_struct *work); diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c index dba3f5bec6bedf10dbcd848628273bbcb9d18e8a..f92b889369c3996bfb6e5a0d0da1379c286185f6 100644 --- a/drivers/scsi/libsas/sas_ata.c +++ b/drivers/scsi/libsas/sas_ata.c @@ -203,7 +203,7 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc) task->total_xfer_len = qc->nbytes; task->num_scatter = qc->n_elem; task->data_dir = qc->dma_dir; - } else if (qc->tf.protocol == ATA_PROT_NODATA) { + } else if (!ata_is_data(qc->tf.protocol)) { task->data_dir = DMA_NONE; } else { for_each_sg(qc->sg, sg, qc->n_elem, si) diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c index 3fbbdf084d67a3f574b2de37c6394d3e3b2c4639..3153f164554aabaa6fbbed8d847b76316cc2b318 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c @@ -1832,9 +1832,10 @@ mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc, u8 poll) enable_irq(reply_q->os_irq); } } + + if (poll) + _base_process_reply_queue(reply_q); } - if (poll) - _base_process_reply_queue(reply_q); } /** diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c index eb646a78a04ea4060d229a25c1cf3d23f9cbb89b..a8219a42c786a7ed99b874bfb5a8d2107c68063f 100644 --- a/drivers/scsi/pm8001/pm8001_hwi.c +++ b/drivers/scsi/pm8001/pm8001_hwi.c @@ -1727,6 +1727,7 @@ static void pm8001_send_abort_all(struct pm8001_hba_info *pm8001_ha, ccb->device = pm8001_ha_dev; ccb->ccb_tag = ccb_tag; ccb->task = task; + ccb->n_elem = 0; circularQ = &pm8001_ha->inbnd_q_tbl[0]; @@ -1788,6 +1789,7 @@ static void pm8001_send_read_log(struct pm8001_hba_info *pm8001_ha, ccb->device = pm8001_ha_dev; ccb->ccb_tag = ccb_tag; ccb->task = task; + ccb->n_elem = 0; pm8001_ha_dev->id |= NCQ_READ_LOG_FLAG; pm8001_ha_dev->id |= NCQ_2ND_RLE_FLAG; @@ -1804,7 +1806,7 @@ static void pm8001_send_read_log(struct pm8001_hba_info *pm8001_ha, sata_cmd.tag = cpu_to_le32(ccb_tag); sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id); - sata_cmd.ncqtag_atap_dir_m |= ((0x1 << 7) | (0x5 << 9)); + sata_cmd.ncqtag_atap_dir_m = cpu_to_le32((0x1 << 7) | (0x5 << 9)); memcpy(&sata_cmd.sata_fis, &fis, sizeof(struct host_to_dev_fis)); res = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd, @@ -2365,7 +2367,8 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) len = sizeof(struct pio_setup_fis); pm8001_dbg(pm8001_ha, IO, "PIO read len = %d\n", len); - } else if (t->ata_task.use_ncq) { + } else if (t->ata_task.use_ncq && + t->data_dir != DMA_NONE) { len = sizeof(struct set_dev_bits_fis); pm8001_dbg(pm8001_ha, IO, "FPDMA len = %d\n", len); @@ -4234,22 +4237,22 @@ static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha, u32 opc = OPC_INB_SATA_HOST_OPSTART; memset(&sata_cmd, 0, sizeof(sata_cmd)); circularQ = &pm8001_ha->inbnd_q_tbl[0]; - if (task->data_dir == DMA_NONE) { + + if (task->data_dir == DMA_NONE && !task->ata_task.use_ncq) { ATAP = 0x04; /* no data*/ pm8001_dbg(pm8001_ha, IO, "no data\n"); } else if (likely(!task->ata_task.device_control_reg_update)) { - if (task->ata_task.dma_xfer) { + if (task->ata_task.use_ncq && + dev->sata_dev.class != ATA_DEV_ATAPI) { + ATAP = 0x07; /* FPDMA */ + pm8001_dbg(pm8001_ha, IO, "FPDMA\n"); + } else if (task->ata_task.dma_xfer) { ATAP = 0x06; /* DMA */ pm8001_dbg(pm8001_ha, IO, "DMA\n"); } else { ATAP = 0x05; /* PIO*/ pm8001_dbg(pm8001_ha, IO, "PIO\n"); } - if (task->ata_task.use_ncq && - dev->sata_dev.class != ATA_DEV_ATAPI) { - ATAP = 0x07; /* FPDMA */ - pm8001_dbg(pm8001_ha, IO, "FPDMA\n"); - } } if (task->ata_task.use_ncq && pm8001_get_ncq_tag(task, &hdr_tag)) { task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3); @@ -4589,7 +4592,7 @@ int pm8001_chip_ssp_tm_req(struct pm8001_hba_info *pm8001_ha, memcpy(sspTMCmd.lun, task->ssp_task.LUN, 8); sspTMCmd.tag = cpu_to_le32(ccb->ccb_tag); if (pm8001_ha->chip_id != chip_8001) - sspTMCmd.ds_ads_m = 0x08; + sspTMCmd.ds_ads_m = cpu_to_le32(0x08); circularQ = &pm8001_ha->inbnd_q_tbl[0]; ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sspTMCmd, sizeof(sspTMCmd), 0); diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c index 72d37c934667c8905412df2debf6963cb1d09172..2b3ee963e3753e8ab3beffe696424dbc380f7302 100644 --- a/drivers/scsi/pm8001/pm80xx_hwi.c +++ b/drivers/scsi/pm8001/pm80xx_hwi.c @@ -1199,9 +1199,11 @@ pm80xx_set_thermal_config(struct pm8001_hba_info *pm8001_ha) else page_code = THERMAL_PAGE_CODE_8H; - payload.cfg_pg[0] = (THERMAL_LOG_ENABLE << 9) | - (THERMAL_ENABLE << 8) | page_code; - payload.cfg_pg[1] = (LTEMPHIL << 24) | (RTEMPHIL << 8); + payload.cfg_pg[0] = + cpu_to_le32((THERMAL_LOG_ENABLE << 9) | + (THERMAL_ENABLE << 8) | page_code); + payload.cfg_pg[1] = + cpu_to_le32((LTEMPHIL << 24) | (RTEMPHIL << 8)); pm8001_dbg(pm8001_ha, DEV, "Setting up thermal config. cfg_pg 0 0x%x cfg_pg 1 0x%x\n", @@ -1241,43 +1243,41 @@ pm80xx_set_sas_protocol_timer_config(struct pm8001_hba_info *pm8001_ha) circularQ = &pm8001_ha->inbnd_q_tbl[0]; payload.tag = cpu_to_le32(tag); - SASConfigPage.pageCode = SAS_PROTOCOL_TIMER_CONFIG_PAGE; - SASConfigPage.MST_MSI = 3 << 15; - SASConfigPage.STP_SSP_MCT_TMO = (STP_MCT_TMO << 16) | SSP_MCT_TMO; - SASConfigPage.STP_FRM_TMO = (SAS_MAX_OPEN_TIME << 24) | - (SMP_MAX_CONN_TIMER << 16) | STP_FRM_TIMER; - SASConfigPage.STP_IDLE_TMO = STP_IDLE_TIME; - - if (SASConfigPage.STP_IDLE_TMO > 0x3FFFFFF) - SASConfigPage.STP_IDLE_TMO = 0x3FFFFFF; - - - SASConfigPage.OPNRJT_RTRY_INTVL = (SAS_MFD << 16) | - SAS_OPNRJT_RTRY_INTVL; - SASConfigPage.Data_Cmd_OPNRJT_RTRY_TMO = (SAS_DOPNRJT_RTRY_TMO << 16) - | SAS_COPNRJT_RTRY_TMO; - SASConfigPage.Data_Cmd_OPNRJT_RTRY_THR = (SAS_DOPNRJT_RTRY_THR << 16) - | SAS_COPNRJT_RTRY_THR; - SASConfigPage.MAX_AIP = SAS_MAX_AIP; + SASConfigPage.pageCode = cpu_to_le32(SAS_PROTOCOL_TIMER_CONFIG_PAGE); + SASConfigPage.MST_MSI = cpu_to_le32(3 << 15); + SASConfigPage.STP_SSP_MCT_TMO = + cpu_to_le32((STP_MCT_TMO << 16) | SSP_MCT_TMO); + SASConfigPage.STP_FRM_TMO = + cpu_to_le32((SAS_MAX_OPEN_TIME << 24) | + (SMP_MAX_CONN_TIMER << 16) | STP_FRM_TIMER); + SASConfigPage.STP_IDLE_TMO = cpu_to_le32(STP_IDLE_TIME); + + SASConfigPage.OPNRJT_RTRY_INTVL = + cpu_to_le32((SAS_MFD << 16) | SAS_OPNRJT_RTRY_INTVL); + SASConfigPage.Data_Cmd_OPNRJT_RTRY_TMO = + cpu_to_le32((SAS_DOPNRJT_RTRY_TMO << 16) | SAS_COPNRJT_RTRY_TMO); + SASConfigPage.Data_Cmd_OPNRJT_RTRY_THR = + cpu_to_le32((SAS_DOPNRJT_RTRY_THR << 16) | SAS_COPNRJT_RTRY_THR); + SASConfigPage.MAX_AIP = cpu_to_le32(SAS_MAX_AIP); pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.pageCode 0x%08x\n", - SASConfigPage.pageCode); + le32_to_cpu(SASConfigPage.pageCode)); pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.MST_MSI 0x%08x\n", - SASConfigPage.MST_MSI); + le32_to_cpu(SASConfigPage.MST_MSI)); pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.STP_SSP_MCT_TMO 0x%08x\n", - SASConfigPage.STP_SSP_MCT_TMO); + le32_to_cpu(SASConfigPage.STP_SSP_MCT_TMO)); pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.STP_FRM_TMO 0x%08x\n", - SASConfigPage.STP_FRM_TMO); + le32_to_cpu(SASConfigPage.STP_FRM_TMO)); pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.STP_IDLE_TMO 0x%08x\n", - SASConfigPage.STP_IDLE_TMO); + le32_to_cpu(SASConfigPage.STP_IDLE_TMO)); pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.OPNRJT_RTRY_INTVL 0x%08x\n", - SASConfigPage.OPNRJT_RTRY_INTVL); + le32_to_cpu(SASConfigPage.OPNRJT_RTRY_INTVL)); pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.Data_Cmd_OPNRJT_RTRY_TMO 0x%08x\n", - SASConfigPage.Data_Cmd_OPNRJT_RTRY_TMO); + le32_to_cpu(SASConfigPage.Data_Cmd_OPNRJT_RTRY_TMO)); pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.Data_Cmd_OPNRJT_RTRY_THR 0x%08x\n", - SASConfigPage.Data_Cmd_OPNRJT_RTRY_THR); + le32_to_cpu(SASConfigPage.Data_Cmd_OPNRJT_RTRY_THR)); pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.MAX_AIP 0x%08x\n", - SASConfigPage.MAX_AIP); + le32_to_cpu(SASConfigPage.MAX_AIP)); memcpy(&payload.cfg_pg, &SASConfigPage, sizeof(SASProtocolTimerConfig_t)); @@ -1403,12 +1403,13 @@ static int pm80xx_encrypt_update(struct pm8001_hba_info *pm8001_ha) /* Currently only one key is used. New KEK index is 1. * Current KEK index is 1. Store KEK to NVRAM is 1. */ - payload.new_curidx_ksop = ((1 << 24) | (1 << 16) | (1 << 8) | - KEK_MGMT_SUBOP_KEYCARDUPDATE); + payload.new_curidx_ksop = + cpu_to_le32(((1 << 24) | (1 << 16) | (1 << 8) | + KEK_MGMT_SUBOP_KEYCARDUPDATE)); pm8001_dbg(pm8001_ha, DEV, "Saving Encryption info to flash. payload 0x%x\n", - payload.new_curidx_ksop); + le32_to_cpu(payload.new_curidx_ksop)); rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, sizeof(payload), 0); @@ -1749,6 +1750,7 @@ static void pm80xx_send_abort_all(struct pm8001_hba_info *pm8001_ha, ccb->device = pm8001_ha_dev; ccb->ccb_tag = ccb_tag; ccb->task = task; + ccb->n_elem = 0; circularQ = &pm8001_ha->inbnd_q_tbl[0]; @@ -1830,7 +1832,7 @@ static void pm80xx_send_read_log(struct pm8001_hba_info *pm8001_ha, sata_cmd.tag = cpu_to_le32(ccb_tag); sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id); - sata_cmd.ncqtag_atap_dir_m_dad |= ((0x1 << 7) | (0x5 << 9)); + sata_cmd.ncqtag_atap_dir_m_dad = cpu_to_le32(((0x1 << 7) | (0x5 << 9))); memcpy(&sata_cmd.sata_fis, &fis, sizeof(struct host_to_dev_fis)); res = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd, @@ -2464,7 +2466,8 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) len = sizeof(struct pio_setup_fis); pm8001_dbg(pm8001_ha, IO, "PIO read len = %d\n", len); - } else if (t->ata_task.use_ncq) { + } else if (t->ata_task.use_ncq && + t->data_dir != DMA_NONE) { len = sizeof(struct set_dev_bits_fis); pm8001_dbg(pm8001_ha, IO, "FPDMA len = %d\n", len); @@ -4316,13 +4319,15 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha, struct ssp_ini_io_start_req ssp_cmd; u32 tag = ccb->ccb_tag; int ret; - u64 phys_addr, start_addr, end_addr; + u64 phys_addr, end_addr; u32 end_addr_high, end_addr_low; struct inbound_queue_table *circularQ; u32 q_index, cpu_id; u32 opc = OPC_INB_SSPINIIOSTART; + memset(&ssp_cmd, 0, sizeof(ssp_cmd)); memcpy(ssp_cmd.ssp_iu.lun, task->ssp_task.LUN, 8); + /* data address domain added for spcv; set to 0 by host, * used internally by controller * 0 for SAS 1.1 and SAS 2.0 compatible TLR @@ -4333,7 +4338,7 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha, ssp_cmd.device_id = cpu_to_le32(pm8001_dev->device_id); ssp_cmd.tag = cpu_to_le32(tag); if (task->ssp_task.enable_first_burst) - ssp_cmd.ssp_iu.efb_prio_attr |= 0x80; + ssp_cmd.ssp_iu.efb_prio_attr = 0x80; ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_prio << 3); ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_attr & 7); memcpy(ssp_cmd.ssp_iu.cdb, task->ssp_task.cmd->cmnd, @@ -4365,21 +4370,24 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha, ssp_cmd.enc_esgl = cpu_to_le32(1<<31); } else if (task->num_scatter == 1) { u64 dma_addr = sg_dma_address(task->scatter); + ssp_cmd.enc_addr_low = cpu_to_le32(lower_32_bits(dma_addr)); ssp_cmd.enc_addr_high = cpu_to_le32(upper_32_bits(dma_addr)); ssp_cmd.enc_len = cpu_to_le32(task->total_xfer_len); ssp_cmd.enc_esgl = 0; + /* Check 4G Boundary */ - start_addr = cpu_to_le64(dma_addr); - end_addr = (start_addr + ssp_cmd.enc_len) - 1; - end_addr_low = cpu_to_le32(lower_32_bits(end_addr)); - end_addr_high = cpu_to_le32(upper_32_bits(end_addr)); - if (end_addr_high != ssp_cmd.enc_addr_high) { + end_addr = dma_addr + le32_to_cpu(ssp_cmd.enc_len) - 1; + end_addr_low = lower_32_bits(end_addr); + end_addr_high = upper_32_bits(end_addr); + + if (end_addr_high != le32_to_cpu(ssp_cmd.enc_addr_high)) { pm8001_dbg(pm8001_ha, FAIL, "The sg list address start_addr=0x%016llx data_len=0x%x end_addr_high=0x%08x end_addr_low=0x%08x has crossed 4G boundary\n", - start_addr, ssp_cmd.enc_len, + dma_addr, + le32_to_cpu(ssp_cmd.enc_len), end_addr_high, end_addr_low); pm8001_chip_make_sg(task->scatter, 1, ccb->buf_prd); @@ -4388,7 +4396,7 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha, cpu_to_le32(lower_32_bits(phys_addr)); ssp_cmd.enc_addr_high = cpu_to_le32(upper_32_bits(phys_addr)); - ssp_cmd.enc_esgl = cpu_to_le32(1<<31); + ssp_cmd.enc_esgl = cpu_to_le32(1U<<31); } } else if (task->num_scatter == 0) { ssp_cmd.enc_addr_low = 0; @@ -4396,8 +4404,10 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha, ssp_cmd.enc_len = cpu_to_le32(task->total_xfer_len); ssp_cmd.enc_esgl = 0; } + /* XTS mode. All other fields are 0 */ - ssp_cmd.key_cmode = 0x6 << 4; + ssp_cmd.key_cmode = cpu_to_le32(0x6 << 4); + /* set tweak values. Should be the start lba */ ssp_cmd.twk_val0 = cpu_to_le32((task->ssp_task.cmd->cmnd[2] << 24) | (task->ssp_task.cmd->cmnd[3] << 16) | @@ -4419,20 +4429,22 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha, ssp_cmd.esgl = cpu_to_le32(1<<31); } else if (task->num_scatter == 1) { u64 dma_addr = sg_dma_address(task->scatter); + ssp_cmd.addr_low = cpu_to_le32(lower_32_bits(dma_addr)); ssp_cmd.addr_high = cpu_to_le32(upper_32_bits(dma_addr)); ssp_cmd.len = cpu_to_le32(task->total_xfer_len); ssp_cmd.esgl = 0; + /* Check 4G Boundary */ - start_addr = cpu_to_le64(dma_addr); - end_addr = (start_addr + ssp_cmd.len) - 1; - end_addr_low = cpu_to_le32(lower_32_bits(end_addr)); - end_addr_high = cpu_to_le32(upper_32_bits(end_addr)); - if (end_addr_high != ssp_cmd.addr_high) { + end_addr = dma_addr + le32_to_cpu(ssp_cmd.len) - 1; + end_addr_low = lower_32_bits(end_addr); + end_addr_high = upper_32_bits(end_addr); + if (end_addr_high != le32_to_cpu(ssp_cmd.addr_high)) { pm8001_dbg(pm8001_ha, FAIL, "The sg list address start_addr=0x%016llx data_len=0x%x end_addr_high=0x%08x end_addr_low=0x%08x has crossed 4G boundary\n", - start_addr, ssp_cmd.len, + dma_addr, + le32_to_cpu(ssp_cmd.len), end_addr_high, end_addr_low); pm8001_chip_make_sg(task->scatter, 1, ccb->buf_prd); @@ -4466,7 +4478,7 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha, u32 q_index, cpu_id; struct sata_start_req sata_cmd; u32 hdr_tag, ncg_tag = 0; - u64 phys_addr, start_addr, end_addr; + u64 phys_addr, end_addr; u32 end_addr_high, end_addr_low; u32 ATAP = 0x0; u32 dir; @@ -4478,22 +4490,21 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha, q_index = (u32) (cpu_id) % (pm8001_ha->max_q_num); circularQ = &pm8001_ha->inbnd_q_tbl[q_index]; - if (task->data_dir == DMA_NONE) { + if (task->data_dir == DMA_NONE && !task->ata_task.use_ncq) { ATAP = 0x04; /* no data*/ pm8001_dbg(pm8001_ha, IO, "no data\n"); } else if (likely(!task->ata_task.device_control_reg_update)) { - if (task->ata_task.dma_xfer) { + if (task->ata_task.use_ncq && + dev->sata_dev.class != ATA_DEV_ATAPI) { + ATAP = 0x07; /* FPDMA */ + pm8001_dbg(pm8001_ha, IO, "FPDMA\n"); + } else if (task->ata_task.dma_xfer) { ATAP = 0x06; /* DMA */ pm8001_dbg(pm8001_ha, IO, "DMA\n"); } else { ATAP = 0x05; /* PIO*/ pm8001_dbg(pm8001_ha, IO, "PIO\n"); } - if (task->ata_task.use_ncq && - dev->sata_dev.class != ATA_DEV_ATAPI) { - ATAP = 0x07; /* FPDMA */ - pm8001_dbg(pm8001_ha, IO, "FPDMA\n"); - } } if (task->ata_task.use_ncq && pm8001_get_ncq_tag(task, &hdr_tag)) { task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3); @@ -4527,32 +4538,38 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha, pm8001_chip_make_sg(task->scatter, ccb->n_elem, ccb->buf_prd); phys_addr = ccb->ccb_dma_handle; - sata_cmd.enc_addr_low = lower_32_bits(phys_addr); - sata_cmd.enc_addr_high = upper_32_bits(phys_addr); + sata_cmd.enc_addr_low = + cpu_to_le32(lower_32_bits(phys_addr)); + sata_cmd.enc_addr_high = + cpu_to_le32(upper_32_bits(phys_addr)); sata_cmd.enc_esgl = cpu_to_le32(1 << 31); } else if (task->num_scatter == 1) { u64 dma_addr = sg_dma_address(task->scatter); - sata_cmd.enc_addr_low = lower_32_bits(dma_addr); - sata_cmd.enc_addr_high = upper_32_bits(dma_addr); + + sata_cmd.enc_addr_low = + cpu_to_le32(lower_32_bits(dma_addr)); + sata_cmd.enc_addr_high = + cpu_to_le32(upper_32_bits(dma_addr)); sata_cmd.enc_len = cpu_to_le32(task->total_xfer_len); sata_cmd.enc_esgl = 0; + /* Check 4G Boundary */ - start_addr = cpu_to_le64(dma_addr); - end_addr = (start_addr + sata_cmd.enc_len) - 1; - end_addr_low = cpu_to_le32(lower_32_bits(end_addr)); - end_addr_high = cpu_to_le32(upper_32_bits(end_addr)); - if (end_addr_high != sata_cmd.enc_addr_high) { + end_addr = dma_addr + le32_to_cpu(sata_cmd.enc_len) - 1; + end_addr_low = lower_32_bits(end_addr); + end_addr_high = upper_32_bits(end_addr); + if (end_addr_high != le32_to_cpu(sata_cmd.enc_addr_high)) { pm8001_dbg(pm8001_ha, FAIL, "The sg list address start_addr=0x%016llx data_len=0x%x end_addr_high=0x%08x end_addr_low=0x%08x has crossed 4G boundary\n", - start_addr, sata_cmd.enc_len, + dma_addr, + le32_to_cpu(sata_cmd.enc_len), end_addr_high, end_addr_low); pm8001_chip_make_sg(task->scatter, 1, ccb->buf_prd); phys_addr = ccb->ccb_dma_handle; sata_cmd.enc_addr_low = - lower_32_bits(phys_addr); + cpu_to_le32(lower_32_bits(phys_addr)); sata_cmd.enc_addr_high = - upper_32_bits(phys_addr); + cpu_to_le32(upper_32_bits(phys_addr)); sata_cmd.enc_esgl = cpu_to_le32(1 << 31); } @@ -4563,7 +4580,8 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha, sata_cmd.enc_esgl = 0; } /* XTS mode. All other fields are 0 */ - sata_cmd.key_index_mode = 0x6 << 4; + sata_cmd.key_index_mode = cpu_to_le32(0x6 << 4); + /* set tweak values. Should be the start lba */ sata_cmd.twk_val0 = cpu_to_le32((sata_cmd.sata_fis.lbal_exp << 24) | @@ -4589,31 +4607,31 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha, phys_addr = ccb->ccb_dma_handle; sata_cmd.addr_low = lower_32_bits(phys_addr); sata_cmd.addr_high = upper_32_bits(phys_addr); - sata_cmd.esgl = cpu_to_le32(1 << 31); + sata_cmd.esgl = cpu_to_le32(1U << 31); } else if (task->num_scatter == 1) { u64 dma_addr = sg_dma_address(task->scatter); + sata_cmd.addr_low = lower_32_bits(dma_addr); sata_cmd.addr_high = upper_32_bits(dma_addr); sata_cmd.len = cpu_to_le32(task->total_xfer_len); sata_cmd.esgl = 0; + /* Check 4G Boundary */ - start_addr = cpu_to_le64(dma_addr); - end_addr = (start_addr + sata_cmd.len) - 1; - end_addr_low = cpu_to_le32(lower_32_bits(end_addr)); - end_addr_high = cpu_to_le32(upper_32_bits(end_addr)); + end_addr = dma_addr + le32_to_cpu(sata_cmd.len) - 1; + end_addr_low = lower_32_bits(end_addr); + end_addr_high = upper_32_bits(end_addr); if (end_addr_high != sata_cmd.addr_high) { pm8001_dbg(pm8001_ha, FAIL, "The sg list address start_addr=0x%016llx data_len=0x%xend_addr_high=0x%08x end_addr_low=0x%08x has crossed 4G boundary\n", - start_addr, sata_cmd.len, + dma_addr, + le32_to_cpu(sata_cmd.len), end_addr_high, end_addr_low); pm8001_chip_make_sg(task->scatter, 1, ccb->buf_prd); phys_addr = ccb->ccb_dma_handle; - sata_cmd.addr_low = - lower_32_bits(phys_addr); - sata_cmd.addr_high = - upper_32_bits(phys_addr); - sata_cmd.esgl = cpu_to_le32(1 << 31); + sata_cmd.addr_low = lower_32_bits(phys_addr); + sata_cmd.addr_high = upper_32_bits(phys_addr); + sata_cmd.esgl = cpu_to_le32(1U << 31); } } else if (task->num_scatter == 0) { sata_cmd.addr_low = 0; @@ -4621,27 +4639,28 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha, sata_cmd.len = cpu_to_le32(task->total_xfer_len); sata_cmd.esgl = 0; } + /* scsi cdb */ sata_cmd.atapi_scsi_cdb[0] = cpu_to_le32(((task->ata_task.atapi_packet[0]) | - (task->ata_task.atapi_packet[1] << 8) | - (task->ata_task.atapi_packet[2] << 16) | - (task->ata_task.atapi_packet[3] << 24))); + (task->ata_task.atapi_packet[1] << 8) | + (task->ata_task.atapi_packet[2] << 16) | + (task->ata_task.atapi_packet[3] << 24))); sata_cmd.atapi_scsi_cdb[1] = cpu_to_le32(((task->ata_task.atapi_packet[4]) | - (task->ata_task.atapi_packet[5] << 8) | - (task->ata_task.atapi_packet[6] << 16) | - (task->ata_task.atapi_packet[7] << 24))); + (task->ata_task.atapi_packet[5] << 8) | + (task->ata_task.atapi_packet[6] << 16) | + (task->ata_task.atapi_packet[7] << 24))); sata_cmd.atapi_scsi_cdb[2] = cpu_to_le32(((task->ata_task.atapi_packet[8]) | - (task->ata_task.atapi_packet[9] << 8) | - (task->ata_task.atapi_packet[10] << 16) | - (task->ata_task.atapi_packet[11] << 24))); + (task->ata_task.atapi_packet[9] << 8) | + (task->ata_task.atapi_packet[10] << 16) | + (task->ata_task.atapi_packet[11] << 24))); sata_cmd.atapi_scsi_cdb[3] = cpu_to_le32(((task->ata_task.atapi_packet[12]) | - (task->ata_task.atapi_packet[13] << 8) | - (task->ata_task.atapi_packet[14] << 16) | - (task->ata_task.atapi_packet[15] << 24))); + (task->ata_task.atapi_packet[13] << 8) | + (task->ata_task.atapi_packet[14] << 16) | + (task->ata_task.atapi_packet[15] << 24))); } /* Check for read log for failed drive and return */ diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index e40a37236aa10a5d0c2728d957122f3b858d56da..d0407f44de78da687d67b87922cf77eaab2070e3 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -555,7 +555,7 @@ qla2x00_sysfs_read_vpd(struct file *filp, struct kobject *kobj, if (!capable(CAP_SYS_ADMIN)) return -EINVAL; - if (IS_NOCACHE_VPD_TYPE(ha)) + if (!IS_NOCACHE_VPD_TYPE(ha)) goto skip; faddr = ha->flt_region_vpd << 2; @@ -739,7 +739,7 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj, ql_log(ql_log_info, vha, 0x706f, "Issuing MPI reset.\n"); - if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { + if (IS_QLA83XX(ha)) { uint32_t idc_control; qla83xx_idc_lock(vha, 0); @@ -1050,9 +1050,6 @@ qla2x00_free_sysfs_attr(scsi_qla_host_t *vha, bool stop_beacon) continue; if (iter->type == 3 && !(IS_CNA_CAPABLE(ha))) continue; - if (iter->type == 0x27 && - (!IS_QLA27XX(ha) || !IS_QLA28XX(ha))) - continue; sysfs_remove_bin_file(&host->shost_gendev.kobj, iter->attr); diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index e1fd91a58120240b281842dff166e3f10f3d94b5..8a8e0920d2b41e478c173f67445292faa6cc6528 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -2796,7 +2796,11 @@ struct ct_fdmi2_hba_attributes { #define FDMI_PORT_SPEED_8GB 0x10 #define FDMI_PORT_SPEED_16GB 0x20 #define FDMI_PORT_SPEED_32GB 0x40 -#define FDMI_PORT_SPEED_64GB 0x80 +#define FDMI_PORT_SPEED_20GB 0x80 +#define FDMI_PORT_SPEED_40GB 0x100 +#define FDMI_PORT_SPEED_128GB 0x200 +#define FDMI_PORT_SPEED_64GB 0x400 +#define FDMI_PORT_SPEED_256GB 0x800 #define FDMI_PORT_SPEED_UNKNOWN 0x8000 #define FC_CLASS_2 0x04 @@ -5171,4 +5175,8 @@ struct sff_8247_a0 { #include "qla_gbl.h" #include "qla_dbg.h" #include "qla_inline.h" + +#define IS_SESSION_DELETED(_fcport) (_fcport->disc_state == DSC_DELETE_PEND || \ + _fcport->disc_state == DSC_DELETED) + #endif diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c index e28c4b7ec55ffb55ce4aa10e43c62719955007f5..73015c69b5e892b1c909659064212fa19a4ab726 100644 --- a/drivers/scsi/qla2xxx/qla_gs.c +++ b/drivers/scsi/qla2xxx/qla_gs.c @@ -676,8 +676,7 @@ qla2x00_rff_id(scsi_qla_host_t *vha, u8 type) return (QLA_SUCCESS); } - return qla_async_rffid(vha, &vha->d_id, qlt_rff_id(vha), - FC4_TYPE_FCP_SCSI); + return qla_async_rffid(vha, &vha->d_id, qlt_rff_id(vha), type); } static int qla_async_rffid(scsi_qla_host_t *vha, port_id_t *d_id, @@ -727,7 +726,7 @@ static int qla_async_rffid(scsi_qla_host_t *vha, port_id_t *d_id, /* Prepare CT arguments -- port_id, FC-4 feature, FC-4 type */ ct_req->req.rff_id.port_id = port_id_to_be_id(*d_id); ct_req->req.rff_id.fc4_feature = fc4feature; - ct_req->req.rff_id.fc4_type = fc4type; /* SCSI - FCP */ + ct_req->req.rff_id.fc4_type = fc4type; /* SCSI-FCP or FC-NVMe */ sp->u.iocb_cmd.u.ctarg.req_size = RFF_ID_REQ_SIZE; sp->u.iocb_cmd.u.ctarg.rsp_size = RFF_ID_RSP_SIZE; diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index fdae25ec554d9f1ba2ee6b35ba52dea859f76e5f..9452848ede3f86d29e9f1b5df196f8a484e37e9d 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -570,6 +570,14 @@ qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport, struct srb_iocb *lio; int rval = QLA_FUNCTION_FAILED; + if (IS_SESSION_DELETED(fcport)) { + ql_log(ql_log_warn, vha, 0xffff, + "%s: %8phC is being delete - not sending command.\n", + __func__, fcport->port_name); + fcport->flags &= ~FCF_ASYNC_ACTIVE; + return rval; + } + if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT)) return rval; @@ -953,6 +961,9 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha, set_bit(RELOGIN_NEEDED, &vha->dpc_flags); } break; + case ISP_CFG_NL: + qla24xx_fcport_handle_login(vha, fcport); + break; default: break; } @@ -1313,14 +1324,21 @@ int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt) struct port_database_24xx *pd; struct qla_hw_data *ha = vha->hw; - if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT) || - fcport->loop_id == FC_NO_LOOP_ID) { + if (IS_SESSION_DELETED(fcport)) { ql_log(ql_log_warn, vha, 0xffff, - "%s: %8phC - not sending command.\n", - __func__, fcport->port_name); + "%s: %8phC is being delete - not sending command.\n", + __func__, fcport->port_name); + fcport->flags &= ~FCF_ASYNC_ACTIVE; return rval; } + if (!vha->flags.online || fcport->flags & FCF_ASYNC_SENT) { + ql_log(ql_log_warn, vha, 0xffff, + "%s: %8phC online %d flags %x - not sending command.\n", + __func__, fcport->port_name, vha->flags.online, fcport->flags); + goto done; + } + sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); if (!sp) goto done; @@ -1480,6 +1498,11 @@ static void qla_chk_n2n_b4_login(struct scsi_qla_host *vha, fc_port_t *fcport) u8 login = 0; int rc; + ql_dbg(ql_dbg_disc, vha, 0x307b, + "%s %8phC DS %d LS %d lid %d retries=%d\n", + __func__, fcport->port_name, fcport->disc_state, + fcport->fw_login_state, fcport->loop_id, fcport->login_retry); + if (qla_tgt_mode_enabled(vha)) return; @@ -1537,7 +1560,8 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport) fcport->conflict, fcport->last_rscn_gen, fcport->rscn_gen, fcport->login_gen, fcport->loop_id, fcport->scan_state); - if (fcport->scan_state != QLA_FCPORT_FOUND) + if (fcport->scan_state != QLA_FCPORT_FOUND || + fcport->disc_state == DSC_DELETE_PEND) return 0; if ((fcport->loop_id != FC_NO_LOOP_ID) && @@ -1558,7 +1582,7 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport) if (vha->host->active_mode == MODE_TARGET && !N2N_TOPO(vha->hw)) return 0; - if (fcport->flags & FCF_ASYNC_SENT) { + if (fcport->flags & (FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE)) { set_bit(RELOGIN_NEEDED, &vha->dpc_flags); return 0; } @@ -2114,12 +2138,7 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea) ql_dbg(ql_dbg_disc, vha, 0x20eb, "%s %d %8phC cmd error %x\n", __func__, __LINE__, ea->fcport->port_name, ea->data[1]); - ea->fcport->flags &= ~FCF_ASYNC_SENT; - qla2x00_set_fcport_disc_state(ea->fcport, DSC_LOGIN_FAILED); - if (ea->data[1] & QLA_LOGIO_LOGIN_RETRIED) - set_bit(RELOGIN_NEEDED, &vha->dpc_flags); - else - qla2x00_mark_device_lost(vha, ea->fcport, 1); + qlt_schedule_sess_for_deletion(ea->fcport); break; case MBS_LOOP_ID_USED: /* data[1] = IO PARAM 1 = nport ID */ @@ -3309,6 +3328,14 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha) struct rsp_que *rsp = ha->rsp_q_map[0]; struct qla2xxx_fw_dump *fw_dump; + if (ha->fw_dump) { + ql_dbg(ql_dbg_init, vha, 0x00bd, + "Firmware dump already allocated.\n"); + return; + } + + ha->fw_dumped = 0; + ha->fw_dump_cap_flags = 0; dump_size = fixed_size = mem_size = eft_size = fce_size = mq_size = 0; req_q_size = rsp_q_size = 0; @@ -3319,7 +3346,7 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha) mem_size = (ha->fw_memory_size - 0x11000 + 1) * sizeof(uint16_t); } else if (IS_FWI2_CAPABLE(ha)) { - if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) + if (IS_QLA83XX(ha)) fixed_size = offsetof(struct qla83xx_fw_dump, ext_mem); else if (IS_QLA81XX(ha)) fixed_size = offsetof(struct qla81xx_fw_dump, ext_mem); @@ -3331,8 +3358,7 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha) mem_size = (ha->fw_memory_size - 0x100000 + 1) * sizeof(uint32_t); if (ha->mqenable) { - if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && - !IS_QLA28XX(ha)) + if (!IS_QLA83XX(ha)) mq_size = sizeof(struct qla2xxx_mq_chain); /* * Allocate maximum buffer size for all queues - Q0. @@ -3893,8 +3919,7 @@ qla2x00_setup_chip(scsi_qla_host_t *vha) ha->fw_major_version, ha->fw_minor_version, ha->fw_subminor_version); - if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || - IS_QLA28XX(ha)) { + if (IS_QLA83XX(ha)) { ha->flags.fac_supported = 0; rval = QLA_SUCCESS; } @@ -5382,6 +5407,13 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha) memcpy(fcport->node_name, new_fcport->node_name, WWN_SIZE); fcport->scan_state = QLA_FCPORT_FOUND; + if (fcport->login_retry == 0) { + fcport->login_retry = vha->hw->login_retry_count; + ql_dbg(ql_dbg_disc, vha, 0x2135, + "Port login retry %8phN, lid 0x%04x retry cnt=%d.\n", + fcport->port_name, fcport->loop_id, + fcport->login_retry); + } found++; break; } @@ -5515,6 +5547,8 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport) if (atomic_read(&fcport->state) == FCS_ONLINE) return; + qla2x00_set_fcport_state(fcport, FCS_ONLINE); + rport_ids.node_name = wwn_to_u64(fcport->node_name); rport_ids.port_name = wwn_to_u64(fcport->port_name); rport_ids.port_id = fcport->d_id.b.domain << 16 | @@ -5615,6 +5649,7 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) qla2x00_reg_remote_port(vha, fcport); break; case MODE_TARGET: + qla2x00_set_fcport_state(fcport, FCS_ONLINE); if (!vha->vha_tgt.qla_tgt->tgt_stop && !vha->vha_tgt.qla_tgt->tgt_stopped) qlt_fc_port_added(vha, fcport); @@ -5629,8 +5664,6 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) break; } - qla2x00_set_fcport_state(fcport, FCS_ONLINE); - if (IS_IIDMA_CAPABLE(vha->hw) && vha->hw->flags.gpsc_supported) { if (fcport->id_changed) { fcport->id_changed = 0; @@ -9127,7 +9160,7 @@ struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos, qpair->rsp->req = qpair->req; qpair->rsp->qpair = qpair; /* init qpair to this cpu. Will adjust at run time. */ - qla_cpu_update(qpair, smp_processor_id()); + qla_cpu_update(qpair, raw_smp_processor_id()); if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) { if (ha->fw_attributes & BIT_4) diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index c532c74ca1ab9e17ece023d6ee69e4b5edc1a799..e54cc2a761dd46ee110c2149b79d327ed0218d2f 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c @@ -2910,6 +2910,7 @@ static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res) set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); qla2xxx_wake_dpc(vha); + break; } fallthrough; default: @@ -2919,9 +2920,7 @@ static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res) fw_status[0], fw_status[1], fw_status[2]); fcport->flags &= ~FCF_ASYNC_SENT; - qla2x00_set_fcport_disc_state(fcport, - DSC_LOGIN_FAILED); - set_bit(RELOGIN_NEEDED, &vha->dpc_flags); + qlt_schedule_sess_for_deletion(fcport); break; } break; @@ -2933,8 +2932,7 @@ static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res) fw_status[0], fw_status[1], fw_status[2]); sp->fcport->flags &= ~FCF_ASYNC_SENT; - qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_FAILED); - set_bit(RELOGIN_NEEDED, &vha->dpc_flags); + qlt_schedule_sess_for_deletion(fcport); break; } diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 5e040b6debc84ef224dbf7e05ffac1dc931eea16..c5c7d60ab25241a0c1ba787e9268d30d6a5c2bcd 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -2248,6 +2248,7 @@ qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk) iocb->u.tmf.data = QLA_FUNCTION_FAILED; } else if ((le16_to_cpu(sts->scsi_status) & SS_RESPONSE_INFO_LEN_VALID)) { + host_to_fcp_swap(sts->data, sizeof(sts->data)); if (le32_to_cpu(sts->rsp_data_len) < 4) { ql_log(ql_log_warn, fcport->vha, 0x503b, "Async-%s error - hdl=%x not enough response(%d).\n", diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index 734745f450211a921beda952e2371011a6f663d5..bbb57edc1f66290b72495b3dcabf7652385aef2f 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -9,6 +9,12 @@ #include #include +#ifdef CONFIG_PPC +#define IS_PPCARCH true +#else +#define IS_PPCARCH false +#endif + static struct mb_cmd_name { uint16_t cmd; const char *str; @@ -698,6 +704,9 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr) vha->min_supported_speed = nv->min_supported_speed; } + + if (IS_PPCARCH) + mcp->mb[11] |= BIT_4; } if (ha->flags.exlogins_enabled) @@ -2984,8 +2993,7 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha) ha->orig_fw_iocb_count = mcp->mb[10]; if (ha->flags.npiv_supported) ha->max_npiv_vports = mcp->mb[11]; - if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) || - IS_QLA28XX(ha)) + if (IS_QLA81XX(ha) || IS_QLA83XX(ha)) ha->fw_max_fcf_count = mcp->mb[12]; } @@ -5546,7 +5554,7 @@ qla2x00_get_data_rate(scsi_qla_host_t *vha) mcp->out_mb = MBX_1|MBX_0; mcp->in_mb = MBX_2|MBX_1|MBX_0; if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) - mcp->in_mb |= MBX_3; + mcp->in_mb |= MBX_4|MBX_3; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c index 5acee3c798d42771e739201f043e9c44ce42dcb6..ba1b1c7549d359f9e025d1ac61f74466ab193e34 100644 --- a/drivers/scsi/qla2xxx/qla_nvme.c +++ b/drivers/scsi/qla2xxx/qla_nvme.c @@ -35,6 +35,11 @@ int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport) (fcport->nvme_flag & NVME_FLAG_REGISTERED)) return 0; + if (atomic_read(&fcport->state) == FCS_ONLINE) + return 0; + + qla2x00_set_fcport_state(fcport, FCS_ONLINE); + fcport->nvme_flag &= ~NVME_FLAG_RESETTING; memset(&req, 0, sizeof(struct nvme_fc_port_info)); @@ -165,6 +170,18 @@ static void qla_nvme_release_fcp_cmd_kref(struct kref *kref) qla2xxx_rel_qpair_sp(sp->qpair, sp); } +static void qla_nvme_ls_unmap(struct srb *sp, struct nvmefc_ls_req *fd) +{ + if (sp->flags & SRB_DMA_VALID) { + struct srb_iocb *nvme = &sp->u.iocb_cmd; + struct qla_hw_data *ha = sp->fcport->vha->hw; + + dma_unmap_single(&ha->pdev->dev, nvme->u.nvme.cmd_dma, + fd->rqstlen, DMA_TO_DEVICE); + sp->flags &= ~SRB_DMA_VALID; + } +} + static void qla_nvme_release_ls_cmd_kref(struct kref *kref) { struct srb *sp = container_of(kref, struct srb, cmd_kref); @@ -181,6 +198,8 @@ static void qla_nvme_release_ls_cmd_kref(struct kref *kref) spin_unlock_irqrestore(&priv->cmd_lock, flags); fd = priv->fd; + + qla_nvme_ls_unmap(sp, fd); fd->done(fd, priv->comp_status); out: qla2x00_rel_sp(sp); @@ -327,6 +346,8 @@ static int qla_nvme_ls_req(struct nvme_fc_local_port *lport, dma_sync_single_for_device(&ha->pdev->dev, nvme->u.nvme.cmd_dma, fd->rqstlen, DMA_TO_DEVICE); + sp->flags |= SRB_DMA_VALID; + rval = qla2x00_start_sp(sp); if (rval != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0x700e, @@ -334,6 +355,7 @@ static int qla_nvme_ls_req(struct nvme_fc_local_port *lport, wake_up(&sp->nvme_ls_waitq); sp->priv = NULL; priv->sp = NULL; + qla_nvme_ls_unmap(sp, fd); qla2x00_rel_sp(sp); return rval; } diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index e7f73a167fbd64136e64ad305d4eb0ee399724da..419156121cb59a9ededeef864994c7c9a3b6d05a 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -3673,8 +3673,7 @@ qla2x00_unmap_iobases(struct qla_hw_data *ha) if (ha->mqiobase) iounmap(ha->mqiobase); - if ((IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) && - ha->msixbase) + if (ha->msixbase) iounmap(ha->msixbase); } } @@ -5390,6 +5389,11 @@ void qla2x00_relogin(struct scsi_qla_host *vha) memset(&ea, 0, sizeof(ea)); ea.fcport = fcport; qla24xx_handle_relogin_event(vha, &ea); + } else if (vha->hw->current_topology == + ISP_CFG_NL && + IS_QLA2XXX_MIDTYPE(vha->hw)) { + (void)qla24xx_fcport_handle_login(vha, + fcport); } else if (vha->hw->current_topology == ISP_CFG_NL) { fcport->login_retry--; diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c index 0f92e9a044dcdb4e27c4226b6f96792bde915824..0fa9c529fca11a51d7191a089335c212fec035d3 100644 --- a/drivers/scsi/qla2xxx/qla_sup.c +++ b/drivers/scsi/qla2xxx/qla_sup.c @@ -844,7 +844,7 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr) ha->flt_region_nvram = start; break; case FLT_REG_IMG_PRI_27XX: - if (IS_QLA27XX(ha) && !IS_QLA28XX(ha)) + if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) ha->flt_region_img_status_pri = start; break; case FLT_REG_IMG_SEC_27XX: @@ -1356,7 +1356,7 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, __le32 *dwptr, uint32_t faddr, flash_data_addr(ha, faddr), le32_to_cpu(*dwptr)); if (ret) { ql_dbg(ql_dbg_user, vha, 0x7006, - "Failed slopw write %x (%x)\n", faddr, *dwptr); + "Failed slow write %x (%x)\n", faddr, *dwptr); break; } } diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index ebed14bed7835cc46a1512c6afc05198bd67f08a..cf9ae0ab489a0ef2566c0c1d58abcbe897a2c75c 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -3256,6 +3256,7 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, "RESET-RSP online/active/old-count/new-count = %d/%d/%d/%d.\n", vha->flags.online, qla2x00_reset_active(vha), cmd->reset_count, qpair->chip_reset); + res = 0; goto out_unmap_unlock; } @@ -7076,8 +7077,7 @@ qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha) if (!QLA_TGT_MODE_ENABLED()) return; - if ((ql2xenablemsix == 0) || IS_QLA83XX(ha) || IS_QLA27XX(ha) || - IS_QLA28XX(ha)) { + if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { ISP_ATIO_Q_IN(base_vha) = &ha->mqiobase->isp25mq.atio_q_in; ISP_ATIO_Q_OUT(base_vha) = &ha->mqiobase->isp25mq.atio_q_out; } else { diff --git a/drivers/soc/fsl/guts.c b/drivers/soc/fsl/guts.c index 34810f9bb2ee781b3dbf96aa73326b2b5552a57f..091e94c04f3095ceb57a9c48b4c03960d1793850 100644 --- a/drivers/soc/fsl/guts.c +++ b/drivers/soc/fsl/guts.c @@ -28,7 +28,6 @@ struct fsl_soc_die_attr { static struct guts *guts; static struct soc_device_attribute soc_dev_attr; static struct soc_device *soc_dev; -static struct device_node *root; /* SoC die attribute definition for QorIQ platform */ @@ -138,7 +137,7 @@ static u32 fsl_guts_get_svr(void) static int fsl_guts_probe(struct platform_device *pdev) { - struct device_node *np = pdev->dev.of_node; + struct device_node *root, *np = pdev->dev.of_node; struct device *dev = &pdev->dev; struct resource *res; const struct fsl_soc_die_attr *soc_die; @@ -161,8 +160,14 @@ static int fsl_guts_probe(struct platform_device *pdev) root = of_find_node_by_path("/"); if (of_property_read_string(root, "model", &machine)) of_property_read_string_index(root, "compatible", 0, &machine); - if (machine) - soc_dev_attr.machine = machine; + if (machine) { + soc_dev_attr.machine = devm_kstrdup(dev, machine, GFP_KERNEL); + if (!soc_dev_attr.machine) { + of_node_put(root); + return -ENOMEM; + } + } + of_node_put(root); svr = fsl_guts_get_svr(); soc_die = fsl_soc_die_match(svr, fsl_soc_die); @@ -197,7 +202,6 @@ static int fsl_guts_probe(struct platform_device *pdev) static int fsl_guts_remove(struct platform_device *dev) { soc_device_unregister(soc_dev); - of_node_put(root); return 0; } diff --git a/drivers/soc/fsl/qe/qe_io.c b/drivers/soc/fsl/qe/qe_io.c index 11ea08e97db75e0e2d06a5504c661a5ace8bcab0..1bb46d955d52576bf0cfc7cf4ffe72e200df2915 100644 --- a/drivers/soc/fsl/qe/qe_io.c +++ b/drivers/soc/fsl/qe/qe_io.c @@ -35,6 +35,8 @@ int par_io_init(struct device_node *np) if (ret) return ret; par_io = ioremap(res.start, resource_size(&res)); + if (!par_io) + return -ENOMEM; if (!of_property_read_u32(np, "num-ports", &num_ports)) num_par_io_ports = num_ports; diff --git a/drivers/soc/qcom/ocmem.c b/drivers/soc/qcom/ocmem.c index f1875dc31ae2cb8991e98a3a28e6b52fcd5405a9..85f82e195ef8b7115536ea852a7fafd94cd1a3d2 100644 --- a/drivers/soc/qcom/ocmem.c +++ b/drivers/soc/qcom/ocmem.c @@ -206,6 +206,7 @@ struct ocmem *of_get_ocmem(struct device *dev) ocmem = platform_get_drvdata(pdev); if (!ocmem) { dev_err(dev, "Cannot get ocmem\n"); + put_device(&pdev->dev); return ERR_PTR(-ENODEV); } return ocmem; diff --git a/drivers/soc/qcom/qcom_aoss.c b/drivers/soc/qcom/qcom_aoss.c index 4fe88d4690e2b259de1279de7a3af46e76af533c..941499b117580fb8e6d49874030272a9c68f50f4 100644 --- a/drivers/soc/qcom/qcom_aoss.c +++ b/drivers/soc/qcom/qcom_aoss.c @@ -548,7 +548,7 @@ static int qmp_probe(struct platform_device *pdev) } irq = platform_get_irq(pdev, 0); - ret = devm_request_irq(&pdev->dev, irq, qmp_intr, IRQF_ONESHOT, + ret = devm_request_irq(&pdev->dev, irq, qmp_intr, 0, "aoss-qmp", qmp); if (ret < 0) { dev_err(&pdev->dev, "failed to request interrupt\n"); diff --git a/drivers/soc/qcom/rpmpd.c b/drivers/soc/qcom/rpmpd.c index f2168e4259b231485a91ade30f3515e48d8732ba..c6084c0d35302ca2b94943d07d2bdc41dcb1a58f 100644 --- a/drivers/soc/qcom/rpmpd.c +++ b/drivers/soc/qcom/rpmpd.c @@ -387,6 +387,9 @@ static int rpmpd_probe(struct platform_device *pdev) data->domains = devm_kcalloc(&pdev->dev, num, sizeof(*data->domains), GFP_KERNEL); + if (!data->domains) + return -ENOMEM; + data->num_domains = num; for (i = 0; i < num; i++) { diff --git a/drivers/soc/ti/wkup_m3_ipc.c b/drivers/soc/ti/wkup_m3_ipc.c index e9ece45d7a3334e7612cc4f61c616873472e312c..ef3f95fefab582d264e8fbbb5fe1d3ec45147cb9 100644 --- a/drivers/soc/ti/wkup_m3_ipc.c +++ b/drivers/soc/ti/wkup_m3_ipc.c @@ -447,9 +447,9 @@ static int wkup_m3_ipc_probe(struct platform_device *pdev) } irq = platform_get_irq(pdev, 0); - if (!irq) { + if (irq < 0) { dev_err(&pdev->dev, "no irq resource\n"); - return -ENXIO; + return irq; } ret = devm_request_irq(dev, irq, wkup_m3_txev_handler, diff --git a/drivers/soundwire/intel.c b/drivers/soundwire/intel.c index dad4326a2a714219577d31eda64f22a28592e7aa..824d9f900aca74e41e4b62a04420e7a0c8c951ec 100644 --- a/drivers/soundwire/intel.c +++ b/drivers/soundwire/intel.c @@ -521,8 +521,8 @@ static void intel_shim_wake(struct sdw_intel *sdw, bool wake_enable) /* Clear wake status */ wake_sts = intel_readw(shim, SDW_SHIM_WAKESTS); - wake_sts |= (SDW_SHIM_WAKEEN_ENABLE << link_id); - intel_writew(shim, SDW_SHIM_WAKESTS_STATUS, wake_sts); + wake_sts |= (SDW_SHIM_WAKESTS_STATUS << link_id); + intel_writew(shim, SDW_SHIM_WAKESTS, wake_sts); } mutex_unlock(sdw->link_res->shim_lock); } diff --git a/drivers/spi/spi-mxic.c b/drivers/spi/spi-mxic.c index 96b418293bf2a493dca92f016e7dc342120db141..4fb19e6f94b05256400c0d27b621418d10a63684 100644 --- a/drivers/spi/spi-mxic.c +++ b/drivers/spi/spi-mxic.c @@ -304,25 +304,21 @@ static int mxic_spi_data_xfer(struct mxic_spi *mxic, const void *txbuf, writel(data, mxic->regs + TXD(nbytes % 4)); + ret = readl_poll_timeout(mxic->regs + INT_STS, sts, + sts & INT_TX_EMPTY, 0, USEC_PER_SEC); + if (ret) + return ret; + + ret = readl_poll_timeout(mxic->regs + INT_STS, sts, + sts & INT_RX_NOT_EMPTY, 0, + USEC_PER_SEC); + if (ret) + return ret; + + data = readl(mxic->regs + RXD); if (rxbuf) { - ret = readl_poll_timeout(mxic->regs + INT_STS, sts, - sts & INT_TX_EMPTY, 0, - USEC_PER_SEC); - if (ret) - return ret; - - ret = readl_poll_timeout(mxic->regs + INT_STS, sts, - sts & INT_RX_NOT_EMPTY, 0, - USEC_PER_SEC); - if (ret) - return ret; - - data = readl(mxic->regs + RXD); data >>= (8 * (4 - nbytes)); memcpy(rxbuf + pos, &data, nbytes); - WARN_ON(readl(mxic->regs + INT_STS) & INT_RX_NOT_EMPTY); - } else { - readl(mxic->regs + RXD); } WARN_ON(readl(mxic->regs + INT_STS) & INT_RX_NOT_EMPTY); diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c index e4ee8b084799365eb7620f02d1f8c57379de893c..f7603c209e9d5fd89ce96d1006836e3d1d066813 100644 --- a/drivers/spi/spi-pl022.c +++ b/drivers/spi/spi-pl022.c @@ -2315,13 +2315,13 @@ static int pl022_probe(struct amba_device *adev, const struct amba_id *id) return status; } -static int +static void pl022_remove(struct amba_device *adev) { struct pl022 *pl022 = amba_get_drvdata(adev); if (!pl022) - return 0; + return; /* * undo pm_runtime_put() in probe. I assume that we're not @@ -2336,7 +2336,6 @@ pl022_remove(struct amba_device *adev) clk_disable_unprepare(pl022->clk); amba_release_regions(adev); tasklet_disable(&pl022->pump_transfers); - return 0; } #ifdef CONFIG_PM_SLEEP diff --git a/drivers/spi/spi-pxa2xx-pci.c b/drivers/spi/spi-pxa2xx-pci.c index aafac128bb5f1816b092eff0a020c0b34ec9760f..4eb979a096c78a49a9f3c272653a2766bdeece29 100644 --- a/drivers/spi/spi-pxa2xx-pci.c +++ b/drivers/spi/spi-pxa2xx-pci.c @@ -74,14 +74,23 @@ static bool lpss_dma_filter(struct dma_chan *chan, void *param) return true; } +static void lpss_dma_put_device(void *dma_dev) +{ + pci_dev_put(dma_dev); +} + static int lpss_spi_setup(struct pci_dev *dev, struct pxa_spi_info *c) { struct pci_dev *dma_dev; + int ret; c->num_chipselect = 1; c->max_clk_rate = 50000000; dma_dev = pci_get_slot(dev->bus, PCI_DEVFN(PCI_SLOT(dev->devfn), 0)); + ret = devm_add_action_or_reset(&dev->dev, lpss_dma_put_device, dma_dev); + if (ret) + return ret; if (c->tx_param) { struct dw_dma_slave *slave = c->tx_param; @@ -105,8 +114,9 @@ static int lpss_spi_setup(struct pci_dev *dev, struct pxa_spi_info *c) static int mrfld_spi_setup(struct pci_dev *dev, struct pxa_spi_info *c) { - struct pci_dev *dma_dev = pci_get_slot(dev->bus, PCI_DEVFN(21, 0)); struct dw_dma_slave *tx, *rx; + struct pci_dev *dma_dev; + int ret; switch (PCI_FUNC(dev->devfn)) { case 0: @@ -131,6 +141,11 @@ static int mrfld_spi_setup(struct pci_dev *dev, struct pxa_spi_info *c) return -ENODEV; } + dma_dev = pci_get_slot(dev->bus, PCI_DEVFN(21, 0)); + ret = devm_add_action_or_reset(&dev->dev, lpss_dma_put_device, dma_dev); + if (ret) + return ret; + tx = c->tx_param; tx->dma_dev = &dma_dev->dev; diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c index 624273d0e727fa564a507572cb500e0f579c23de..a9f97023d5a00da66064abdfbc2d3abaaf4db0d7 100644 --- a/drivers/spi/spi-rockchip.c +++ b/drivers/spi/spi-rockchip.c @@ -567,6 +567,12 @@ static int rockchip_spi_slave_abort(struct spi_controller *ctlr) { struct rockchip_spi *rs = spi_controller_get_devdata(ctlr); + if (atomic_read(&rs->state) & RXDMA) + dmaengine_terminate_sync(ctlr->dma_rx); + if (atomic_read(&rs->state) & TXDMA) + dmaengine_terminate_sync(ctlr->dma_tx); + atomic_set(&rs->state, 0); + spi_enable_chip(rs, false); rs->slave_abort = true; complete(&ctlr->xfer_completion); @@ -636,7 +642,7 @@ static int rockchip_spi_probe(struct platform_device *pdev) struct spi_controller *ctlr; struct resource *mem; struct device_node *np = pdev->dev.of_node; - u32 rsd_nsecs; + u32 rsd_nsecs, num_cs; bool slave_mode; slave_mode = of_property_read_bool(np, "spi-slave"); @@ -744,8 +750,9 @@ static int rockchip_spi_probe(struct platform_device *pdev) * rk spi0 has two native cs, spi1..5 one cs only * if num-cs is missing in the dts, default to 1 */ - if (of_property_read_u16(np, "num-cs", &ctlr->num_chipselect)) - ctlr->num_chipselect = 1; + if (of_property_read_u32(np, "num-cs", &num_cs)) + num_cs = 1; + ctlr->num_chipselect = num_cs; ctlr->use_gpio_descriptors = true; } ctlr->dev.of_node = pdev->dev.of_node; diff --git a/drivers/spi/spi-tegra114.c b/drivers/spi/spi-tegra114.c index a2e5907276e7fd6ce045ece59ceb8b0064d143b3..ed42665b12241b803a2dc7e2e6071b01bbda043d 100644 --- a/drivers/spi/spi-tegra114.c +++ b/drivers/spi/spi-tegra114.c @@ -1353,6 +1353,10 @@ static int tegra_spi_probe(struct platform_device *pdev) tspi->phys = r->start; spi_irq = platform_get_irq(pdev, 0); + if (spi_irq < 0) { + ret = spi_irq; + goto exit_free_master; + } tspi->irq = spi_irq; tspi->clk = devm_clk_get(&pdev->dev, "spi"); diff --git a/drivers/spi/spi-tegra20-slink.c b/drivers/spi/spi-tegra20-slink.c index 669fc4286231f508743c50ffce21e86c0cb0b6d5..9e2b812b9025f69a668f157decf4a4a36b281848 100644 --- a/drivers/spi/spi-tegra20-slink.c +++ b/drivers/spi/spi-tegra20-slink.c @@ -1006,14 +1006,8 @@ static int tegra_slink_probe(struct platform_device *pdev) struct resource *r; int ret, spi_irq; const struct tegra_slink_chip_data *cdata = NULL; - const struct of_device_id *match; - match = of_match_device(tegra_slink_of_match, &pdev->dev); - if (!match) { - dev_err(&pdev->dev, "Error: No device match found\n"); - return -ENODEV; - } - cdata = match->data; + cdata = of_device_get_match_data(&pdev->dev); master = spi_alloc_master(&pdev->dev, sizeof(*tspi)); if (!master) { diff --git a/drivers/spi/spi-zynq-qspi.c b/drivers/spi/spi-zynq-qspi.c index b635835729d66401501b99a31782760ddee54763..13c0b15fe17649490b7075c0d5119e4d012b7413 100644 --- a/drivers/spi/spi-zynq-qspi.c +++ b/drivers/spi/spi-zynq-qspi.c @@ -570,6 +570,9 @@ static int zynq_qspi_exec_mem_op(struct spi_mem *mem, if (op->dummy.nbytes) { tmpbuf = kzalloc(op->dummy.nbytes, GFP_KERNEL); + if (!tmpbuf) + return -ENOMEM; + memset(tmpbuf, 0xff, op->dummy.nbytes); reinit_completion(&xqspi->data_completion); xqspi->txbuf = tmpbuf; diff --git a/drivers/spi/spi-zynqmp-gqspi.c b/drivers/spi/spi-zynqmp-gqspi.c index 1dd2af9cc237441effef5a5ca9d55f54e454e24a..3d3ac48243ebd6ae8813c527210ee87d98246afe 100644 --- a/drivers/spi/spi-zynqmp-gqspi.c +++ b/drivers/spi/spi-zynqmp-gqspi.c @@ -1165,7 +1165,10 @@ static int zynqmp_qspi_probe(struct platform_device *pdev) goto clk_dis_all; } - dma_set_mask(&pdev->dev, DMA_BIT_MASK(44)); + ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(44)); + if (ret) + goto clk_dis_all; + ctlr->bits_per_word_mask = SPI_BPW_MASK(8); ctlr->num_chipselect = GQSPI_DEFAULT_NUM_CS; ctlr->mem_ops = &zynqmp_qspi_mem_ops; diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 8c261eac2cee5b64148e9fd3f319f003f5b8ba97..6ea7b286c80c250fc1649b710f0e5056be1617bf 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -881,10 +881,10 @@ int spi_map_buf(struct spi_controller *ctlr, struct device *dev, int i, ret; if (vmalloced_buf || kmap_buf) { - desc_len = min_t(int, max_seg_size, PAGE_SIZE); + desc_len = min_t(unsigned long, max_seg_size, PAGE_SIZE); sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len); } else if (virt_addr_valid(buf)) { - desc_len = min_t(int, max_seg_size, ctlr->max_dma_len); + desc_len = min_t(size_t, max_seg_size, ctlr->max_dma_len); sgs = DIV_ROUND_UP(len, desc_len); } else { return -EINVAL; diff --git a/drivers/staging/fbtft/fb_st7789v.c b/drivers/staging/fbtft/fb_st7789v.c index 3a280cc1892ca027062006f5e4b6450123f29543..0a2dbed9ffc74f3399fa0fc6e1de2afb648d3de8 100644 --- a/drivers/staging/fbtft/fb_st7789v.c +++ b/drivers/staging/fbtft/fb_st7789v.c @@ -82,6 +82,8 @@ enum st7789v_command { */ static int init_display(struct fbtft_par *par) { + par->fbtftops.reset(par); + /* turn off sleep mode */ write_reg(par, MIPI_DCS_EXIT_SLEEP_MODE); mdelay(120); diff --git a/drivers/staging/gdm724x/gdm_lte.c b/drivers/staging/gdm724x/gdm_lte.c index bd5f874334043217014a2c59c5167d82c838be80..de30262c3fae023e2e34d58f21aa6444370fa706 100644 --- a/drivers/staging/gdm724x/gdm_lte.c +++ b/drivers/staging/gdm724x/gdm_lte.c @@ -76,14 +76,15 @@ static void tx_complete(void *arg) static int gdm_lte_rx(struct sk_buff *skb, struct nic *nic, int nic_type) { - int ret; + int ret, len; + len = skb->len + ETH_HLEN; ret = netif_rx_ni(skb); if (ret == NET_RX_DROP) { nic->stats.rx_dropped++; } else { nic->stats.rx_packets++; - nic->stats.rx_bytes += skb->len + ETH_HLEN; + nic->stats.rx_bytes += len; } return 0; diff --git a/drivers/staging/iio/adc/ad7280a.c b/drivers/staging/iio/adc/ad7280a.c index fef0055b89909fc0454870e602cecad237554873..20183b2ea127962f347a9af6a7a4ea1a37eb5926 100644 --- a/drivers/staging/iio/adc/ad7280a.c +++ b/drivers/staging/iio/adc/ad7280a.c @@ -107,9 +107,9 @@ static unsigned int ad7280a_devaddr(unsigned int addr) { return ((addr & 0x1) << 4) | - ((addr & 0x2) << 3) | + ((addr & 0x2) << 2) | (addr & 0x4) | - ((addr & 0x8) >> 3) | + ((addr & 0x8) >> 2) | ((addr & 0x10) >> 4); } diff --git a/drivers/staging/media/atomisp/pci/atomisp_acc.c b/drivers/staging/media/atomisp/pci/atomisp_acc.c index f638d0bd09fe6f4d3c956e17f79c050ac56de31d..b1614cce2dfb05dda05aac6d3d860caaceac1c2e 100644 --- a/drivers/staging/media/atomisp/pci/atomisp_acc.c +++ b/drivers/staging/media/atomisp/pci/atomisp_acc.c @@ -439,6 +439,18 @@ int atomisp_acc_s_mapped_arg(struct atomisp_sub_device *asd, return 0; } +static void atomisp_acc_unload_some_extensions(struct atomisp_sub_device *asd, + int i, + struct atomisp_acc_fw *acc_fw) +{ + while (--i >= 0) { + if (acc_fw->flags & acc_flag_to_pipe[i].flag) { + atomisp_css_unload_acc_extension(asd, acc_fw->fw, + acc_flag_to_pipe[i].pipe_id); + } + } +} + /* * Appends the loaded acceleration binary extensions to the * current ISP mode. Must be called just before sh_css_start(). @@ -477,16 +489,20 @@ int atomisp_acc_load_extensions(struct atomisp_sub_device *asd) acc_fw->fw, acc_flag_to_pipe[i].pipe_id, acc_fw->type); - if (ret) + if (ret) { + atomisp_acc_unload_some_extensions(asd, i, acc_fw); goto error; + } ext_loaded = true; } } ret = atomisp_css_set_acc_parameters(acc_fw); - if (ret < 0) + if (ret < 0) { + atomisp_acc_unload_some_extensions(asd, i, acc_fw); goto error; + } } if (!ext_loaded) @@ -495,6 +511,7 @@ int atomisp_acc_load_extensions(struct atomisp_sub_device *asd) ret = atomisp_css_update_stream(asd); if (ret) { dev_err(isp->dev, "%s: update stream failed.\n", __func__); + atomisp_acc_unload_extensions(asd); goto error; } @@ -502,13 +519,6 @@ int atomisp_acc_load_extensions(struct atomisp_sub_device *asd) return 0; error: - while (--i >= 0) { - if (acc_fw->flags & acc_flag_to_pipe[i].flag) { - atomisp_css_unload_acc_extension(asd, acc_fw->fw, - acc_flag_to_pipe[i].pipe_id); - } - } - list_for_each_entry_continue_reverse(acc_fw, &asd->acc.fw, list) { if (acc_fw->type != ATOMISP_ACC_FW_LOAD_TYPE_OUTPUT && acc_fw->type != ATOMISP_ACC_FW_LOAD_TYPE_VIEWFINDER) diff --git a/drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c b/drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c index 34480ca164746fe58d1f141ba1dbc7ff4fbcd223..c9ee85037644fdd893ca94f3889d16515ef6967a 100644 --- a/drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c +++ b/drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c @@ -729,6 +729,21 @@ static int axp_regulator_set(struct device *dev, struct gmin_subdev *gs, return 0; } +/* + * Some boards contain a hw-bug where turning eldo2 back on after having turned + * it off causes the CPLM3218 ambient-light-sensor on the image-sensor's I2C bus + * to crash, hanging the bus. Do not turn eldo2 off on these systems. + */ +static const struct dmi_system_id axp_leave_eldo2_on_ids[] = { + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TrekStor"), + DMI_MATCH(DMI_PRODUCT_NAME, "SurfTab duo W1 10.1 (VT4)"), + }, + }, + { } +}; + static int axp_v1p8_on(struct device *dev, struct gmin_subdev *gs) { int ret; @@ -763,6 +778,9 @@ static int axp_v1p8_off(struct device *dev, struct gmin_subdev *gs) if (ret) return ret; + if (dmi_check_system(axp_leave_eldo2_on_ids)) + return 0; + ret = axp_regulator_set(dev, gs, gs->eldo2_sel_reg, gs->eldo2_1p8v, ELDO_CTRL_REG, gs->eldo2_ctrl_shift, false); return ret; diff --git a/drivers/staging/media/atomisp/pci/hmm/hmm.c b/drivers/staging/media/atomisp/pci/hmm/hmm.c index 6a5ee46070898b3b15cd44b1cf4ea594a5e3f307..c1cda16f2dc018d10ca9014511dca2c3688ec80b 100644 --- a/drivers/staging/media/atomisp/pci/hmm/hmm.c +++ b/drivers/staging/media/atomisp/pci/hmm/hmm.c @@ -39,7 +39,7 @@ struct hmm_bo_device bo_device; struct hmm_pool dynamic_pool; struct hmm_pool reserved_pool; -static ia_css_ptr dummy_ptr; +static ia_css_ptr dummy_ptr = mmgr_EXCEPTION; static bool hmm_initialized; struct _hmm_mem_stat hmm_mem_stat; @@ -209,7 +209,7 @@ int hmm_init(void) void hmm_cleanup(void) { - if (!dummy_ptr) + if (dummy_ptr == mmgr_EXCEPTION) return; sysfs_remove_group(&atomisp_dev->kobj, atomisp_attribute_group); @@ -288,7 +288,8 @@ void hmm_free(ia_css_ptr virt) dev_dbg(atomisp_dev, "%s: free 0x%08x\n", __func__, virt); - WARN_ON(!virt); + if (WARN_ON(virt == mmgr_EXCEPTION)) + return; bo = hmm_bo_device_search_start(&bo_device, (unsigned int)virt); diff --git a/drivers/staging/media/hantro/hantro_h1_jpeg_enc.c b/drivers/staging/media/hantro/hantro_h1_jpeg_enc.c index b88dc4ed06db710806a1d647bf5624e582dca395..ed244aee196c3d6d631a7f64905679050de6cf08 100644 --- a/drivers/staging/media/hantro/hantro_h1_jpeg_enc.c +++ b/drivers/staging/media/hantro/hantro_h1_jpeg_enc.c @@ -23,7 +23,7 @@ static void hantro_h1_set_src_img_ctrl(struct hantro_dev *vpu, reg = H1_REG_IN_IMG_CTRL_ROW_LEN(pix_fmt->width) | H1_REG_IN_IMG_CTRL_OVRFLR_D4(0) - | H1_REG_IN_IMG_CTRL_OVRFLB_D4(0) + | H1_REG_IN_IMG_CTRL_OVRFLB(0) | H1_REG_IN_IMG_CTRL_FMT(ctx->vpu_src_fmt->enc_fmt); vepu_write_relaxed(vpu, reg, H1_REG_IN_IMG_CTRL); } diff --git a/drivers/staging/media/hantro/hantro_h1_regs.h b/drivers/staging/media/hantro/hantro_h1_regs.h index d6e9825bb5c7be3ca3e336c3649c5bfc7b697b78..30e7e7b920b553aeca0c3fc1ca4be04712d8ef42 100644 --- a/drivers/staging/media/hantro/hantro_h1_regs.h +++ b/drivers/staging/media/hantro/hantro_h1_regs.h @@ -47,7 +47,7 @@ #define H1_REG_IN_IMG_CTRL 0x03c #define H1_REG_IN_IMG_CTRL_ROW_LEN(x) ((x) << 12) #define H1_REG_IN_IMG_CTRL_OVRFLR_D4(x) ((x) << 10) -#define H1_REG_IN_IMG_CTRL_OVRFLB_D4(x) ((x) << 6) +#define H1_REG_IN_IMG_CTRL_OVRFLB(x) ((x) << 6) #define H1_REG_IN_IMG_CTRL_FMT(x) ((x) << 2) #define H1_REG_ENC_CTRL0 0x040 #define H1_REG_ENC_CTRL0_INIT_QP(x) ((x) << 26) diff --git a/drivers/staging/media/meson/vdec/esparser.c b/drivers/staging/media/meson/vdec/esparser.c index db7022707ff8dd4ce5cfd9f89203c2e46885da3b..86ccc8937afcaf0a89c96cfc92d3a13de3ee2121 100644 --- a/drivers/staging/media/meson/vdec/esparser.c +++ b/drivers/staging/media/meson/vdec/esparser.c @@ -328,7 +328,12 @@ esparser_queue(struct amvdec_session *sess, struct vb2_v4l2_buffer *vbuf) offset = esparser_get_offset(sess); - amvdec_add_ts(sess, vb->timestamp, vbuf->timecode, offset, vbuf->flags); + ret = amvdec_add_ts(sess, vb->timestamp, vbuf->timecode, offset, vbuf->flags); + if (ret) { + v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR); + return ret; + } + dev_dbg(core->dev, "esparser: ts = %llu pld_size = %u offset = %08X flags = %08X\n", vb->timestamp, payload_size, offset, vbuf->flags); diff --git a/drivers/staging/media/meson/vdec/vdec_helpers.c b/drivers/staging/media/meson/vdec/vdec_helpers.c index 7f07a9175815f025a918e0ecb33788e516e2735c..db4a854e59a38df94fc72084c38e8fc9174e7783 100644 --- a/drivers/staging/media/meson/vdec/vdec_helpers.c +++ b/drivers/staging/media/meson/vdec/vdec_helpers.c @@ -227,13 +227,16 @@ int amvdec_set_canvases(struct amvdec_session *sess, } EXPORT_SYMBOL_GPL(amvdec_set_canvases); -void amvdec_add_ts(struct amvdec_session *sess, u64 ts, - struct v4l2_timecode tc, u32 offset, u32 vbuf_flags) +int amvdec_add_ts(struct amvdec_session *sess, u64 ts, + struct v4l2_timecode tc, u32 offset, u32 vbuf_flags) { struct amvdec_timestamp *new_ts; unsigned long flags; new_ts = kzalloc(sizeof(*new_ts), GFP_KERNEL); + if (!new_ts) + return -ENOMEM; + new_ts->ts = ts; new_ts->tc = tc; new_ts->offset = offset; @@ -242,6 +245,7 @@ void amvdec_add_ts(struct amvdec_session *sess, u64 ts, spin_lock_irqsave(&sess->ts_spinlock, flags); list_add_tail(&new_ts->list, &sess->timestamps); spin_unlock_irqrestore(&sess->ts_spinlock, flags); + return 0; } EXPORT_SYMBOL_GPL(amvdec_add_ts); diff --git a/drivers/staging/media/meson/vdec/vdec_helpers.h b/drivers/staging/media/meson/vdec/vdec_helpers.h index cfaed52ab526577e17d2d50659a9761a498a9235..798e5a8a9b3f19b83f4d56779bbb3e2b358b0d40 100644 --- a/drivers/staging/media/meson/vdec/vdec_helpers.h +++ b/drivers/staging/media/meson/vdec/vdec_helpers.h @@ -55,8 +55,8 @@ void amvdec_dst_buf_done_offset(struct amvdec_session *sess, * @offset: offset in the VIFIFO where the associated packet was written * @flags the vb2_v4l2_buffer flags */ -void amvdec_add_ts(struct amvdec_session *sess, u64 ts, - struct v4l2_timecode tc, u32 offset, u32 flags); +int amvdec_add_ts(struct amvdec_session *sess, u64 ts, + struct v4l2_timecode tc, u32 offset, u32 flags); void amvdec_remove_ts(struct amvdec_session *sess, u64 ts); /** diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_h264.c b/drivers/staging/media/sunxi/cedrus/cedrus_h264.c index de7442d4834dcab1237dc7f9558021c6871c1551..d3e26bfe6c90b21c9a222263c483a2c69f35f694 100644 --- a/drivers/staging/media/sunxi/cedrus/cedrus_h264.c +++ b/drivers/staging/media/sunxi/cedrus/cedrus_h264.c @@ -38,7 +38,7 @@ struct cedrus_h264_sram_ref_pic { #define CEDRUS_H264_FRAME_NUM 18 -#define CEDRUS_NEIGHBOR_INFO_BUF_SIZE (16 * SZ_1K) +#define CEDRUS_NEIGHBOR_INFO_BUF_SIZE (32 * SZ_1K) #define CEDRUS_MIN_PIC_INFO_BUF_SIZE (130 * SZ_1K) static void cedrus_h264_write_sram(struct cedrus_dev *dev, diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_h265.c b/drivers/staging/media/sunxi/cedrus/cedrus_h265.c index 10744fab7ceaa1e11471d9b4ace77794d45e2bd7..368439cf5e1744229dda9b6fb16eb7ee4267d551 100644 --- a/drivers/staging/media/sunxi/cedrus/cedrus_h265.c +++ b/drivers/staging/media/sunxi/cedrus/cedrus_h265.c @@ -23,7 +23,7 @@ * Subsequent BSP implementations seem to double the neighbor info buffer size * for the H6 SoC, which may be related to 10 bit H265 support. */ -#define CEDRUS_H265_NEIGHBOR_INFO_BUF_SIZE (397 * SZ_1K) +#define CEDRUS_H265_NEIGHBOR_INFO_BUF_SIZE (794 * SZ_1K) #define CEDRUS_H265_ENTRY_POINTS_BUF_SIZE (4 * SZ_1K) #define CEDRUS_H265_MV_COL_BUF_UNIT_CTB_SIZE 160 diff --git a/drivers/staging/media/zoran/zoran.h b/drivers/staging/media/zoran/zoran.h index e7fe8da7732c7845189fe9b0fb52bde7db288829..3f223e5b1872ba2e687721639a0793ba8a7ae364 100644 --- a/drivers/staging/media/zoran/zoran.h +++ b/drivers/staging/media/zoran/zoran.h @@ -314,6 +314,6 @@ static inline struct zoran *to_zoran(struct v4l2_device *v4l2_dev) #endif -int zoran_queue_init(struct zoran *zr, struct vb2_queue *vq); +int zoran_queue_init(struct zoran *zr, struct vb2_queue *vq, int dir); void zoran_queue_exit(struct zoran *zr); int zr_set_buf(struct zoran *zr); diff --git a/drivers/staging/media/zoran/zoran_card.c b/drivers/staging/media/zoran/zoran_card.c index dfc60e2e9dd7ab355cffd7731fa4b1a9e290aa39..fe0cca12119c7510c6041f2fc5800fe1f26a52c9 100644 --- a/drivers/staging/media/zoran/zoran_card.c +++ b/drivers/staging/media/zoran/zoran_card.c @@ -802,6 +802,52 @@ int zoran_check_jpg_settings(struct zoran *zr, return 0; } +static int zoran_init_video_device(struct zoran *zr, struct video_device *video_dev, int dir) +{ + int err; + + /* Now add the template and register the device unit. */ + *video_dev = zoran_template; + video_dev->v4l2_dev = &zr->v4l2_dev; + video_dev->lock = &zr->lock; + video_dev->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_READWRITE | dir; + + strscpy(video_dev->name, ZR_DEVNAME(zr), sizeof(video_dev->name)); + /* + * It's not a mem2mem device, but you can both capture and output from one and the same + * device. This should really be split up into two device nodes, but that's a job for + * another day. + */ + video_dev->vfl_dir = VFL_DIR_M2M; + zoran_queue_init(zr, &zr->vq, V4L2_BUF_TYPE_VIDEO_CAPTURE); + + err = video_register_device(video_dev, VFL_TYPE_VIDEO, video_nr[zr->id]); + if (err < 0) + return err; + video_set_drvdata(video_dev, zr); + return 0; +} + +static void zoran_exit_video_devices(struct zoran *zr) +{ + video_unregister_device(zr->video_dev); + kfree(zr->video_dev); +} + +static int zoran_init_video_devices(struct zoran *zr) +{ + int err; + + zr->video_dev = video_device_alloc(); + if (!zr->video_dev) + return -ENOMEM; + + err = zoran_init_video_device(zr, zr->video_dev, V4L2_CAP_VIDEO_CAPTURE); + if (err) + kfree(zr->video_dev); + return err; +} + void zoran_open_init_params(struct zoran *zr) { int i; @@ -873,17 +919,11 @@ static int zr36057_init(struct zoran *zr) zoran_open_init_params(zr); /* allocate memory *before* doing anything to the hardware in case allocation fails */ - zr->video_dev = video_device_alloc(); - if (!zr->video_dev) { - err = -ENOMEM; - goto exit; - } zr->stat_com = dma_alloc_coherent(&zr->pci_dev->dev, BUZ_NUM_STAT_COM * sizeof(u32), &zr->p_sc, GFP_KERNEL); if (!zr->stat_com) { - err = -ENOMEM; - goto exit_video; + return -ENOMEM; } for (j = 0; j < BUZ_NUM_STAT_COM; j++) zr->stat_com[j] = cpu_to_le32(1); /* mark as unavailable to zr36057 */ @@ -896,26 +936,9 @@ static int zr36057_init(struct zoran *zr) goto exit_statcom; } - /* Now add the template and register the device unit. */ - *zr->video_dev = zoran_template; - zr->video_dev->v4l2_dev = &zr->v4l2_dev; - zr->video_dev->lock = &zr->lock; - zr->video_dev->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_CAPTURE; - - strscpy(zr->video_dev->name, ZR_DEVNAME(zr), sizeof(zr->video_dev->name)); - /* - * It's not a mem2mem device, but you can both capture and output from one and the same - * device. This should really be split up into two device nodes, but that's a job for - * another day. - */ - zr->video_dev->vfl_dir = VFL_DIR_M2M; - - zoran_queue_init(zr, &zr->vq); - - err = video_register_device(zr->video_dev, VFL_TYPE_VIDEO, video_nr[zr->id]); - if (err < 0) + err = zoran_init_video_devices(zr); + if (err) goto exit_statcomb; - video_set_drvdata(zr->video_dev, zr); zoran_init_hardware(zr); if (!pass_through) { @@ -930,9 +953,6 @@ static int zr36057_init(struct zoran *zr) dma_free_coherent(&zr->pci_dev->dev, BUZ_NUM_STAT_COM * sizeof(u32) * 2, zr->stat_comb, zr->p_scb); exit_statcom: dma_free_coherent(&zr->pci_dev->dev, BUZ_NUM_STAT_COM * sizeof(u32), zr->stat_com, zr->p_sc); -exit_video: - kfree(zr->video_dev); -exit: return err; } @@ -964,7 +984,7 @@ static void zoran_remove(struct pci_dev *pdev) dma_free_coherent(&zr->pci_dev->dev, BUZ_NUM_STAT_COM * sizeof(u32) * 2, zr->stat_comb, zr->p_scb); pci_release_regions(pdev); pci_disable_device(zr->pci_dev); - video_unregister_device(zr->video_dev); + zoran_exit_video_devices(zr); exit_free: v4l2_ctrl_handler_free(&zr->hdl); v4l2_device_unregister(&zr->v4l2_dev); @@ -1068,8 +1088,10 @@ static int zoran_probe(struct pci_dev *pdev, const struct pci_device_id *ent) err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); if (err) - return -ENODEV; - vb2_dma_contig_set_max_seg_size(&pdev->dev, DMA_BIT_MASK(32)); + return err; + err = vb2_dma_contig_set_max_seg_size(&pdev->dev, U32_MAX); + if (err) + return err; nr = zoran_num++; if (nr >= BUZ_MAX) { diff --git a/drivers/staging/media/zoran/zoran_device.c b/drivers/staging/media/zoran/zoran_device.c index e569a1341d0103d2b329841ba81b228fa64c443d..913f5a3c5bfce083389678c797ed4c999b461ab4 100644 --- a/drivers/staging/media/zoran/zoran_device.c +++ b/drivers/staging/media/zoran/zoran_device.c @@ -879,7 +879,7 @@ static void zoran_reap_stat_com(struct zoran *zr) if (zr->jpg_settings.tmp_dcm == 1) i = (zr->jpg_dma_tail - zr->jpg_err_shift) & BUZ_MASK_STAT_COM; else - i = ((zr->jpg_dma_tail - zr->jpg_err_shift) & 1) * 2 + 1; + i = ((zr->jpg_dma_tail - zr->jpg_err_shift) & 1) * 2; stat_com = le32_to_cpu(zr->stat_com[i]); if ((stat_com & 1) == 0) { @@ -891,6 +891,11 @@ static void zoran_reap_stat_com(struct zoran *zr) size = (stat_com & GENMASK(22, 1)) >> 1; buf = zr->inuse[i]; + if (!buf) { + spin_unlock_irqrestore(&zr->queued_bufs_lock, flags); + pci_err(zr->pci_dev, "No buffer at slot %d\n", i); + return; + } buf->vbuf.vb2_buf.timestamp = ktime_get_ns(); if (zr->codec_mode == BUZ_MODE_MOTION_COMPRESS) { diff --git a/drivers/staging/media/zoran/zoran_driver.c b/drivers/staging/media/zoran/zoran_driver.c index 808196ea5b81b14e8e6ae46657a95c7c19fbad81..ea04f6c732b212bf61f17e444012244a0f1719cf 100644 --- a/drivers/staging/media/zoran/zoran_driver.c +++ b/drivers/staging/media/zoran/zoran_driver.c @@ -255,8 +255,6 @@ static int zoran_querycap(struct file *file, void *__fh, struct v4l2_capability strscpy(cap->card, ZR_DEVNAME(zr), sizeof(cap->card)); strscpy(cap->driver, "zoran", sizeof(cap->driver)); snprintf(cap->bus_info, sizeof(cap->bus_info), "PCI:%s", pci_name(zr->pci_dev)); - cap->device_caps = zr->video_dev->device_caps; - cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS; return 0; } @@ -582,6 +580,9 @@ static int zoran_s_std(struct file *file, void *__fh, v4l2_std_id std) struct zoran *zr = video_drvdata(file); int res = 0; + if (zr->norm == std) + return 0; + if (zr->running != ZORAN_MAP_MODE_NONE) return -EBUSY; @@ -737,6 +738,7 @@ static int zoran_g_parm(struct file *file, void *priv, struct v4l2_streamparm *p if (parm->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) return -EINVAL; + parm->parm.capture.readbuffers = 9; return 0; } @@ -867,6 +869,10 @@ int zr_set_buf(struct zoran *zr) vbuf = &buf->vbuf; buf->vbuf.field = V4L2_FIELD_INTERLACED; + if (BUZ_MAX_HEIGHT < (zr->v4l_settings.height * 2)) + buf->vbuf.field = V4L2_FIELD_INTERLACED; + else + buf->vbuf.field = V4L2_FIELD_TOP; vb2_set_plane_payload(&buf->vbuf.vb2_buf, 0, zr->buffer_size); vb2_buffer_done(&buf->vbuf.vb2_buf, VB2_BUF_STATE_DONE); zr->inuse[0] = NULL; @@ -926,6 +932,7 @@ static int zr_vb2_start_streaming(struct vb2_queue *vq, unsigned int count) zr->stat_com[j] = cpu_to_le32(1); zr->inuse[j] = NULL; } + zr->vbseq = 0; if (zr->map_mode != ZORAN_MAP_MODE_RAW) { pci_info(zr->pci_dev, "START JPG\n"); @@ -1006,7 +1013,7 @@ static const struct vb2_ops zr_video_qops = { .wait_finish = vb2_ops_wait_finish, }; -int zoran_queue_init(struct zoran *zr, struct vb2_queue *vq) +int zoran_queue_init(struct zoran *zr, struct vb2_queue *vq, int dir) { int err; @@ -1014,8 +1021,9 @@ int zoran_queue_init(struct zoran *zr, struct vb2_queue *vq) INIT_LIST_HEAD(&zr->queued_bufs); vq->dev = &zr->pci_dev->dev; - vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; - vq->io_modes = VB2_USERPTR | VB2_DMABUF | VB2_MMAP | VB2_READ | VB2_WRITE; + vq->type = dir; + + vq->io_modes = VB2_DMABUF | VB2_MMAP | VB2_READ | VB2_WRITE; vq->drv_priv = zr; vq->buf_struct_size = sizeof(struct zr_buffer); vq->ops = &zr_video_qops; diff --git a/drivers/staging/mt7621-dts/gbpc1.dts b/drivers/staging/mt7621-dts/gbpc1.dts index a7c0d3115d7264f36b25c563a9ccee03c9923e2d..d48ca5a25c2c4f635b176ba74c26a68485b48d95 100644 --- a/drivers/staging/mt7621-dts/gbpc1.dts +++ b/drivers/staging/mt7621-dts/gbpc1.dts @@ -11,7 +11,8 @@ / { memory@0 { device_type = "memory"; - reg = <0x0 0x1c000000>, <0x20000000 0x4000000>; + reg = <0x00000000 0x1c000000>, + <0x20000000 0x04000000>; }; chosen { @@ -37,24 +38,16 @@ reset { gpio-leds { compatible = "gpio-leds"; - system { - label = "gb-pc1:green:system"; + power { + label = "green:power"; gpios = <&gpio 6 GPIO_ACTIVE_LOW>; + linux,default-trigger = "default-on"; }; - status { - label = "gb-pc1:green:status"; + system { + label = "green:system"; gpios = <&gpio 8 GPIO_ACTIVE_LOW>; - }; - - lan1 { - label = "gb-pc1:green:lan1"; - gpios = <&gpio 24 GPIO_ACTIVE_LOW>; - }; - - lan2 { - label = "gb-pc1:green:lan2"; - gpios = <&gpio 25 GPIO_ACTIVE_LOW>; + linux,default-trigger = "disk-activity"; }; }; }; @@ -94,9 +87,8 @@ factory: partition@40000 { partition@50000 { label = "firmware"; - reg = <0x50000 0x1FB0000>; + reg = <0x50000 0x1fb0000>; }; - }; }; @@ -122,9 +114,12 @@ &pcie { }; &pinctrl { - state_default: pinctrl0 { - default_gpio: gpio { - groups = "wdt", "rgmii2", "uart3"; + pinctrl-names = "default"; + pinctrl-0 = <&state_default>; + + state_default: state-default { + gpio-pinmux { + groups = "rgmii2", "uart3", "wdt"; function = "gpio"; }; }; @@ -133,12 +128,13 @@ default_gpio: gpio { &switch0 { ports { port@0 { + status = "okay"; label = "ethblack"; - status = "ok"; }; + port@4 { + status = "okay"; label = "ethblue"; - status = "ok"; }; }; }; diff --git a/drivers/staging/mt7621-dts/gbpc2.dts b/drivers/staging/mt7621-dts/gbpc2.dts index 52760e7351f6c855ed6d9b2fe0521c067461f705..6f6fed071dda01de8bded044ecf49b1698fd2c5f 100644 --- a/drivers/staging/mt7621-dts/gbpc2.dts +++ b/drivers/staging/mt7621-dts/gbpc2.dts @@ -1,21 +1,121 @@ /dts-v1/; -#include "gbpc1.dts" +#include "mt7621.dtsi" + +#include +#include / { compatible = "gnubee,gb-pc2", "mediatek,mt7621-soc"; model = "GB-PC2"; + + memory@0 { + device_type = "memory"; + reg = <0x00000000 0x1c000000>, + <0x20000000 0x04000000>; + }; + + chosen { + bootargs = "console=ttyS0,57600"; + }; + + palmbus: palmbus@1e000000 { + i2c@900 { + status = "okay"; + }; + }; + + gpio-keys { + compatible = "gpio-keys"; + + reset { + label = "reset"; + gpios = <&gpio 18 GPIO_ACTIVE_HIGH>; + linux,code = ; + }; + }; +}; + +&sdhci { + status = "okay"; +}; + +&spi0 { + status = "okay"; + + m25p80@0 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "jedec,spi-nor"; + reg = <0>; + spi-max-frequency = <50000000>; + broken-flash-reset; + + partition@0 { + label = "u-boot"; + reg = <0x0 0x30000>; + read-only; + }; + + partition@30000 { + label = "u-boot-env"; + reg = <0x30000 0x10000>; + read-only; + }; + + factory: partition@40000 { + label = "factory"; + reg = <0x40000 0x10000>; + read-only; + }; + + partition@50000 { + label = "firmware"; + reg = <0x50000 0x1fb0000>; + }; + }; }; -&default_gpio { - groups = "wdt", "uart3"; - function = "gpio"; +&pcie { + status = "okay"; }; -&gmac1 { - status = "ok"; +&pinctrl { + pinctrl-names = "default"; + pinctrl-0 = <&state_default>; + + state_default: state-default { + gpio-pinmux { + groups = "wdt"; + function = "gpio"; + }; + }; }; -&phy_external { - status = "ok"; +ðernet { + gmac1: mac@1 { + status = "okay"; + phy-handle = <ðphy7>; + }; + + mdio-bus { + ethphy7: ethernet-phy@7 { + reg = <7>; + phy-mode = "rgmii-rxid"; + }; + }; +}; + +&switch0 { + ports { + port@0 { + status = "okay"; + label = "ethblack"; + }; + + port@4 { + status = "okay"; + label = "ethblue"; + }; + }; }; diff --git a/drivers/staging/mt7621-dts/mt7621.dtsi b/drivers/staging/mt7621-dts/mt7621.dtsi index 27222f7b246fd970d982be1c483a87aa6838b7ac..91a7fa74829643d898dcb515ce1caeceabf7b411 100644 --- a/drivers/staging/mt7621-dts/mt7621.dtsi +++ b/drivers/staging/mt7621-dts/mt7621.dtsi @@ -56,9 +56,9 @@ mmc_fixed_3v3: fixedregulator@0 { regulator-max-microvolt = <3300000>; enable-active-high; regulator-always-on; - }; + }; - mmc_fixed_1v8_io: fixedregulator@1 { + mmc_fixed_1v8_io: fixedregulator@1 { compatible = "regulator-fixed"; regulator-name = "mmc_io"; regulator-min-microvolt = <1800000>; @@ -412,37 +412,32 @@ ethernet: ethernet@1e100000 { mediatek,ethsys = <ðsys>; + pinctrl-names = "default"; + pinctrl-0 = <&mdio_pins>, <&rgmii1_pins>, <&rgmii2_pins>; gmac0: mac@0 { compatible = "mediatek,eth-mac"; reg = <0>; phy-mode = "rgmii"; + fixed-link { speed = <1000>; full-duplex; pause; }; }; + gmac1: mac@1 { compatible = "mediatek,eth-mac"; reg = <1>; status = "off"; phy-mode = "rgmii-rxid"; - phy-handle = <&phy_external>; }; + mdio-bus { #address-cells = <1>; #size-cells = <0>; - phy_external: ethernet-phy@5 { - status = "off"; - reg = <5>; - phy-mode = "rgmii-rxid"; - - pinctrl-names = "default"; - pinctrl-0 = <&rgmii2_pins>; - }; - switch0: switch0@0 { compatible = "mediatek,mt7621"; #address-cells = <1>; @@ -456,36 +451,43 @@ ports { #address-cells = <1>; #size-cells = <0>; reg = <0>; + port@0 { status = "off"; reg = <0>; label = "lan0"; }; + port@1 { status = "off"; reg = <1>; label = "lan1"; }; + port@2 { status = "off"; reg = <2>; label = "lan2"; }; + port@3 { status = "off"; reg = <3>; label = "lan3"; }; + port@4 { status = "off"; reg = <4>; label = "lan4"; }; + port@6 { reg = <6>; label = "cpu"; ethernet = <&gmac0>; phy-mode = "trgmii"; + fixed-link { speed = <1000>; full-duplex; diff --git a/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c b/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c index 4df6d04315e39d3f456fee1e6e6c7c8412f1eed1..b912ad2f4b720f64091e2a7e1da32d913baab95c 100644 --- a/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c +++ b/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c @@ -6679,6 +6679,7 @@ u8 chk_bmc_sleepq_hdl(struct adapter *padapter, unsigned char *pbuf) struct sta_info *psta_bmc; struct list_head *xmitframe_plist, *xmitframe_phead; struct xmit_frame *pxmitframe = NULL; + struct xmit_priv *pxmitpriv = &padapter->xmitpriv; struct sta_priv *pstapriv = &padapter->stapriv; /* for BC/MC Frames */ @@ -6689,7 +6690,8 @@ u8 chk_bmc_sleepq_hdl(struct adapter *padapter, unsigned char *pbuf) if ((pstapriv->tim_bitmap&BIT(0)) && (psta_bmc->sleepq_len > 0)) { msleep(10);/* 10ms, ATIM(HIQ) Windows */ - spin_lock_bh(&psta_bmc->sleep_q.lock); + /* spin_lock_bh(&psta_bmc->sleep_q.lock); */ + spin_lock_bh(&pxmitpriv->lock); xmitframe_phead = get_list_head(&psta_bmc->sleep_q); xmitframe_plist = get_next(xmitframe_phead); @@ -6715,7 +6717,8 @@ u8 chk_bmc_sleepq_hdl(struct adapter *padapter, unsigned char *pbuf) rtw_hal_xmitframe_enqueue(padapter, pxmitframe); } - spin_unlock_bh(&psta_bmc->sleep_q.lock); + /* spin_unlock_bh(&psta_bmc->sleep_q.lock); */ + spin_unlock_bh(&pxmitpriv->lock); /* check hi queue and bmc_sleepq */ rtw_chk_hi_queue_cmd(padapter); diff --git a/drivers/staging/rtl8723bs/core/rtw_recv.c b/drivers/staging/rtl8723bs/core/rtw_recv.c index 0d47e6e121777c72185d423209b6c51624e0de77..6979f8dbccb84920b85fa199110438cd5b09660a 100644 --- a/drivers/staging/rtl8723bs/core/rtw_recv.c +++ b/drivers/staging/rtl8723bs/core/rtw_recv.c @@ -1144,8 +1144,10 @@ sint validate_recv_ctrl_frame(struct adapter *padapter, union recv_frame *precv_ if ((psta->state&WIFI_SLEEP_STATE) && (pstapriv->sta_dz_bitmap&BIT(psta->aid))) { struct list_head *xmitframe_plist, *xmitframe_phead; struct xmit_frame *pxmitframe = NULL; + struct xmit_priv *pxmitpriv = &padapter->xmitpriv; - spin_lock_bh(&psta->sleep_q.lock); + /* spin_lock_bh(&psta->sleep_q.lock); */ + spin_lock_bh(&pxmitpriv->lock); xmitframe_phead = get_list_head(&psta->sleep_q); xmitframe_plist = get_next(xmitframe_phead); @@ -1180,10 +1182,12 @@ sint validate_recv_ctrl_frame(struct adapter *padapter, union recv_frame *precv_ update_beacon(padapter, _TIM_IE_, NULL, true); } - spin_unlock_bh(&psta->sleep_q.lock); + /* spin_unlock_bh(&psta->sleep_q.lock); */ + spin_unlock_bh(&pxmitpriv->lock); } else { - spin_unlock_bh(&psta->sleep_q.lock); + /* spin_unlock_bh(&psta->sleep_q.lock); */ + spin_unlock_bh(&pxmitpriv->lock); /* DBG_871X("no buffered packets to xmit\n"); */ if (pstapriv->tim_bitmap&BIT(psta->aid)) { diff --git a/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c b/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c index b1784b4e466f3a44b06234468a5a8f97a3eb3ec8..e3f56c6cc882e816a660c4ba914a8a83d3d5b8e2 100644 --- a/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c +++ b/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c @@ -330,48 +330,46 @@ u32 rtw_free_stainfo(struct adapter *padapter, struct sta_info *psta) /* list_del_init(&psta->wakeup_list); */ - spin_lock_bh(&psta->sleep_q.lock); + spin_lock_bh(&pxmitpriv->lock); + rtw_free_xmitframe_queue(pxmitpriv, &psta->sleep_q); psta->sleepq_len = 0; - spin_unlock_bh(&psta->sleep_q.lock); - - spin_lock_bh(&pxmitpriv->lock); /* vo */ - spin_lock_bh(&pstaxmitpriv->vo_q.sta_pending.lock); + /* spin_lock_bh(&(pxmitpriv->vo_pending.lock)); */ rtw_free_xmitframe_queue(pxmitpriv, &pstaxmitpriv->vo_q.sta_pending); list_del_init(&(pstaxmitpriv->vo_q.tx_pending)); phwxmit = pxmitpriv->hwxmits; phwxmit->accnt -= pstaxmitpriv->vo_q.qcnt; pstaxmitpriv->vo_q.qcnt = 0; - spin_unlock_bh(&pstaxmitpriv->vo_q.sta_pending.lock); + /* spin_unlock_bh(&(pxmitpriv->vo_pending.lock)); */ /* vi */ - spin_lock_bh(&pstaxmitpriv->vi_q.sta_pending.lock); + /* spin_lock_bh(&(pxmitpriv->vi_pending.lock)); */ rtw_free_xmitframe_queue(pxmitpriv, &pstaxmitpriv->vi_q.sta_pending); list_del_init(&(pstaxmitpriv->vi_q.tx_pending)); phwxmit = pxmitpriv->hwxmits+1; phwxmit->accnt -= pstaxmitpriv->vi_q.qcnt; pstaxmitpriv->vi_q.qcnt = 0; - spin_unlock_bh(&pstaxmitpriv->vi_q.sta_pending.lock); + /* spin_unlock_bh(&(pxmitpriv->vi_pending.lock)); */ /* be */ - spin_lock_bh(&pstaxmitpriv->be_q.sta_pending.lock); + /* spin_lock_bh(&(pxmitpriv->be_pending.lock)); */ rtw_free_xmitframe_queue(pxmitpriv, &pstaxmitpriv->be_q.sta_pending); list_del_init(&(pstaxmitpriv->be_q.tx_pending)); phwxmit = pxmitpriv->hwxmits+2; phwxmit->accnt -= pstaxmitpriv->be_q.qcnt; pstaxmitpriv->be_q.qcnt = 0; - spin_unlock_bh(&pstaxmitpriv->be_q.sta_pending.lock); + /* spin_unlock_bh(&(pxmitpriv->be_pending.lock)); */ /* bk */ - spin_lock_bh(&pstaxmitpriv->bk_q.sta_pending.lock); + /* spin_lock_bh(&(pxmitpriv->bk_pending.lock)); */ rtw_free_xmitframe_queue(pxmitpriv, &pstaxmitpriv->bk_q.sta_pending); list_del_init(&(pstaxmitpriv->bk_q.tx_pending)); phwxmit = pxmitpriv->hwxmits+3; phwxmit->accnt -= pstaxmitpriv->bk_q.qcnt; pstaxmitpriv->bk_q.qcnt = 0; - spin_unlock_bh(&pstaxmitpriv->bk_q.sta_pending.lock); + /* spin_unlock_bh(&(pxmitpriv->bk_pending.lock)); */ spin_unlock_bh(&pxmitpriv->lock); diff --git a/drivers/staging/rtl8723bs/core/rtw_xmit.c b/drivers/staging/rtl8723bs/core/rtw_xmit.c index d78cff7ed6a01a321c85a799c8d9dcc8dbdd1587..6ecaff9728fd4831001d9327c32f6ac917bc13f3 100644 --- a/drivers/staging/rtl8723bs/core/rtw_xmit.c +++ b/drivers/staging/rtl8723bs/core/rtw_xmit.c @@ -1871,6 +1871,8 @@ void rtw_free_xmitframe_queue(struct xmit_priv *pxmitpriv, struct __queue *pfram struct list_head *plist, *phead; struct xmit_frame *pxmitframe; + spin_lock_bh(&pframequeue->lock); + phead = get_list_head(pframequeue); plist = get_next(phead); @@ -1881,6 +1883,7 @@ void rtw_free_xmitframe_queue(struct xmit_priv *pxmitpriv, struct __queue *pfram rtw_free_xmitframe(pxmitpriv, pxmitframe); } + spin_unlock_bh(&pframequeue->lock); } s32 rtw_xmitframe_enqueue(struct adapter *padapter, struct xmit_frame *pxmitframe) @@ -1943,7 +1946,6 @@ s32 rtw_xmit_classifier(struct adapter *padapter, struct xmit_frame *pxmitframe) struct sta_info *psta; struct tx_servq *ptxservq; struct pkt_attrib *pattrib = &pxmitframe->attrib; - struct xmit_priv *xmit_priv = &padapter->xmitpriv; struct hw_xmit *phwxmits = padapter->xmitpriv.hwxmits; sint res = _SUCCESS; @@ -1972,14 +1974,12 @@ s32 rtw_xmit_classifier(struct adapter *padapter, struct xmit_frame *pxmitframe) ptxservq = rtw_get_sta_pending(padapter, psta, pattrib->priority, (u8 *)(&ac_index)); - spin_lock_bh(&xmit_priv->lock); if (list_empty(&ptxservq->tx_pending)) list_add_tail(&ptxservq->tx_pending, get_list_head(phwxmits[ac_index].sta_queue)); list_add_tail(&pxmitframe->list, get_list_head(&ptxservq->sta_pending)); ptxservq->qcnt++; phwxmits[ac_index].accnt++; - spin_unlock_bh(&xmit_priv->lock); exit: @@ -2397,10 +2397,11 @@ void wakeup_sta_to_xmit(struct adapter *padapter, struct sta_info *psta) struct list_head *xmitframe_plist, *xmitframe_phead; struct xmit_frame *pxmitframe = NULL; struct sta_priv *pstapriv = &padapter->stapriv; + struct xmit_priv *pxmitpriv = &padapter->xmitpriv; psta_bmc = rtw_get_bcmc_stainfo(padapter); - spin_lock_bh(&psta->sleep_q.lock); + spin_lock_bh(&pxmitpriv->lock); xmitframe_phead = get_list_head(&psta->sleep_q); xmitframe_plist = get_next(xmitframe_phead); @@ -2508,7 +2509,7 @@ void wakeup_sta_to_xmit(struct adapter *padapter, struct sta_info *psta) _exit: - spin_unlock_bh(&psta->sleep_q.lock); + spin_unlock_bh(&pxmitpriv->lock); if (update_mask) update_beacon(padapter, _TIM_IE_, NULL, true); @@ -2520,8 +2521,9 @@ void xmit_delivery_enabled_frames(struct adapter *padapter, struct sta_info *pst struct list_head *xmitframe_plist, *xmitframe_phead; struct xmit_frame *pxmitframe = NULL; struct sta_priv *pstapriv = &padapter->stapriv; + struct xmit_priv *pxmitpriv = &padapter->xmitpriv; - spin_lock_bh(&psta->sleep_q.lock); + spin_lock_bh(&pxmitpriv->lock); xmitframe_phead = get_list_head(&psta->sleep_q); xmitframe_plist = get_next(xmitframe_phead); @@ -2577,7 +2579,7 @@ void xmit_delivery_enabled_frames(struct adapter *padapter, struct sta_info *pst } } - spin_unlock_bh(&psta->sleep_q.lock); + spin_unlock_bh(&pxmitpriv->lock); } void enqueue_pending_xmitbuf( diff --git a/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c b/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c index ce5bf2861d0c14b7c25fe7711a487da6f5c89bd4..44799c4a9f35b95b13bab83264f330c69d4e15f3 100644 --- a/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c +++ b/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c @@ -572,7 +572,9 @@ s32 rtl8723bs_hal_xmit( rtw_issue_addbareq_cmd(padapter, pxmitframe); } + spin_lock_bh(&pxmitpriv->lock); err = rtw_xmitframe_enqueue(padapter, pxmitframe); + spin_unlock_bh(&pxmitpriv->lock); if (err != _SUCCESS) { RT_TRACE(_module_hal_xmit_c_, _drv_err_, ("rtl8723bs_hal_xmit: enqueue xmitframe fail\n")); rtw_free_xmitframe(pxmitpriv, pxmitframe); diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c index f255a96ae5a48ca6323b0d932348cdb86fe6379f..6ea80add7378fce0de69adecad0b611d38ce0247 100644 --- a/drivers/tee/optee/core.c +++ b/drivers/tee/optee/core.c @@ -588,6 +588,7 @@ static int optee_remove(struct platform_device *pdev) /* Unregister OP-TEE specific client devices on TEE bus */ optee_unregister_devices(); + teedev_close_context(optee->ctx); /* * Ask OP-TEE to free all cached shared memory objects to decrease * reference counters and also avoid wild pointers in secure world @@ -633,6 +634,7 @@ static int optee_probe(struct platform_device *pdev) struct optee *optee = NULL; void *memremaped_shm = NULL; struct tee_device *teedev; + struct tee_context *ctx; u32 sec_caps; int rc; @@ -719,6 +721,12 @@ static int optee_probe(struct platform_device *pdev) optee_supp_init(&optee->supp); optee->memremaped_shm = memremaped_shm; optee->pool = pool; + ctx = teedev_open(optee->teedev); + if (IS_ERR(ctx)) { + rc = PTR_ERR(ctx); + goto err; + } + optee->ctx = ctx; /* * Ensure that there are no pre-existing shm objects before enabling diff --git a/drivers/tee/optee/optee_private.h b/drivers/tee/optee/optee_private.h index f6bb4a763ba94e80f0463cc42957702ef47f1b93..ea09533e30cdef5b214043e801241618084cc38f 100644 --- a/drivers/tee/optee/optee_private.h +++ b/drivers/tee/optee/optee_private.h @@ -70,6 +70,7 @@ struct optee_supp { * struct optee - main service struct * @supp_teedev: supplicant device * @teedev: client device + * @ctx: driver internal TEE context * @invoke_fn: function to issue smc or hvc * @call_queue: queue of threads waiting to call @invoke_fn * @wait_queue: queue of threads from secure world waiting for a @@ -87,6 +88,7 @@ struct optee { struct tee_device *supp_teedev; struct tee_device *teedev; optee_invoke_fn *invoke_fn; + struct tee_context *ctx; struct optee_call_queue call_queue; struct optee_wait_queue wait_queue; struct optee_supp supp; diff --git a/drivers/tee/optee/rpc.c b/drivers/tee/optee/rpc.c index 9dbdd783d6f2d3b15c4bff071bc153f147ebcd58..f1e0332b0f6e8609d024f535fffb19252b8a2777 100644 --- a/drivers/tee/optee/rpc.c +++ b/drivers/tee/optee/rpc.c @@ -284,6 +284,7 @@ static struct tee_shm *cmd_alloc_suppl(struct tee_context *ctx, size_t sz) } static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx, + struct optee *optee, struct optee_msg_arg *arg, struct optee_call_ctx *call_ctx) { @@ -313,7 +314,8 @@ static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx, shm = cmd_alloc_suppl(ctx, sz); break; case OPTEE_MSG_RPC_SHM_TYPE_KERNEL: - shm = tee_shm_alloc(ctx, sz, TEE_SHM_MAPPED | TEE_SHM_PRIV); + shm = tee_shm_alloc(optee->ctx, sz, + TEE_SHM_MAPPED | TEE_SHM_PRIV); break; default: arg->ret = TEEC_ERROR_BAD_PARAMETERS; @@ -470,7 +472,7 @@ static void handle_rpc_func_cmd(struct tee_context *ctx, struct optee *optee, break; case OPTEE_MSG_RPC_CMD_SHM_ALLOC: free_pages_list(call_ctx); - handle_rpc_func_cmd_shm_alloc(ctx, arg, call_ctx); + handle_rpc_func_cmd_shm_alloc(ctx, optee, arg, call_ctx); break; case OPTEE_MSG_RPC_CMD_SHM_FREE: handle_rpc_func_cmd_shm_free(ctx, arg); @@ -501,7 +503,7 @@ void optee_handle_rpc(struct tee_context *ctx, struct optee_rpc_param *param, switch (OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0)) { case OPTEE_SMC_RPC_FUNC_ALLOC: - shm = tee_shm_alloc(ctx, param->a1, + shm = tee_shm_alloc(optee->ctx, param->a1, TEE_SHM_MAPPED | TEE_SHM_PRIV); if (!IS_ERR(shm) && !tee_shm_get_pa(shm, 0, &pa)) { reg_pair_from_64(¶m->a1, ¶m->a2, pa); diff --git a/drivers/tee/tee_core.c b/drivers/tee/tee_core.c index f97d95b50773480cdb9610a850c571cfe79376a7..b615aca0023ed317c15cb1a116ca5c15d864b56b 100644 --- a/drivers/tee/tee_core.c +++ b/drivers/tee/tee_core.c @@ -43,7 +43,7 @@ static DEFINE_SPINLOCK(driver_lock); static struct class *tee_class; static dev_t tee_devt; -static struct tee_context *teedev_open(struct tee_device *teedev) +struct tee_context *teedev_open(struct tee_device *teedev) { int rc; struct tee_context *ctx; @@ -70,6 +70,7 @@ static struct tee_context *teedev_open(struct tee_device *teedev) return ERR_PTR(rc); } +EXPORT_SYMBOL_GPL(teedev_open); void teedev_ctx_get(struct tee_context *ctx) { @@ -96,13 +97,14 @@ void teedev_ctx_put(struct tee_context *ctx) kref_put(&ctx->refcount, teedev_ctx_release); } -static void teedev_close_context(struct tee_context *ctx) +void teedev_close_context(struct tee_context *ctx) { struct tee_device *teedev = ctx->teedev; teedev_ctx_put(ctx); tee_device_put(teedev); } +EXPORT_SYMBOL_GPL(teedev_close_context); static int tee_open(struct inode *inode, struct file *filp) { diff --git a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c index 0966551cbaaa0aad9259c39ee6a91b745a7af406..72a26867c2092fff21799ca3c89b4bd2633d7775 100644 --- a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c +++ b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c @@ -53,7 +53,7 @@ struct int3400_thermal_priv { struct art *arts; int trt_count; struct trt *trts; - u8 uuid_bitmap; + u32 uuid_bitmap; int rel_misc_dev_res; int current_uuid_index; char *data_vault; @@ -402,6 +402,10 @@ static void int3400_notify(acpi_handle handle, thermal_prop[3] = kasprintf(GFP_KERNEL, "EVENT=%d", therm_event); thermal_prop[4] = NULL; kobject_uevent_env(&priv->thermal->device.kobj, KOBJ_CHANGE, thermal_prop); + kfree(thermal_prop[0]); + kfree(thermal_prop[1]); + kfree(thermal_prop[2]); + kfree(thermal_prop[3]); } static int int3400_thermal_get_temp(struct thermal_zone_device *thermal, @@ -462,6 +466,11 @@ static void int3400_setup_gddv(struct int3400_thermal_priv *priv) priv->data_vault = kmemdup(obj->package.elements[0].buffer.pointer, obj->package.elements[0].buffer.length, GFP_KERNEL); + if (!priv->data_vault) { + kfree(buffer.pointer); + return; + } + bin_attr_data_vault.private = priv->data_vault; bin_attr_data_vault.size = obj->package.elements[0].buffer.length; kfree(buffer.pointer); diff --git a/drivers/thermal/thermal_netlink.c b/drivers/thermal/thermal_netlink.c index 1234dbe95895112ccba9364512df7679c4cff499..41c8d47805c4e295da650706beba103ccf0d83ef 100644 --- a/drivers/thermal/thermal_netlink.c +++ b/drivers/thermal/thermal_netlink.c @@ -418,11 +418,12 @@ static int thermal_genl_cmd_tz_get_trip(struct param *p) for (i = 0; i < tz->trips; i++) { enum thermal_trip_type type; - int temp, hyst; + int temp, hyst = 0; tz->ops->get_trip_type(tz, i, &type); tz->ops->get_trip_temp(tz, i, &temp); - tz->ops->get_trip_hyst(tz, i, &hyst); + if (tz->ops->get_trip_hyst) + tz->ops->get_trip_hyst(tz, i, &hyst); if (nla_put_u32(msg, THERMAL_GENL_ATTR_TZ_TRIP_ID, i) || nla_put_u32(msg, THERMAL_GENL_ATTR_TZ_TRIP_TYPE, type) || diff --git a/drivers/tty/hvc/hvc_iucv.c b/drivers/tty/hvc/hvc_iucv.c index 2af1e5751bd6302463a397cedd68bdccb23d8313..796fbff623f6e9fbe0c27290922cd0bebd689ca5 100644 --- a/drivers/tty/hvc/hvc_iucv.c +++ b/drivers/tty/hvc/hvc_iucv.c @@ -1470,7 +1470,9 @@ static int __init hvc_iucv_init(void) */ static int __init hvc_iucv_config(char *val) { - return kstrtoul(val, 10, &hvc_iucv_devices); + if (kstrtoul(val, 10, &hvc_iucv_devices)) + pr_warn("hvc_iucv= invalid parameter value '%s'\n", val); + return 1; } diff --git a/drivers/tty/mxser.c b/drivers/tty/mxser.c index 3703987c46661396bbf3c276a4ce295e3157ca0b..8344265a1948bf78acc66f836f79e75cd7d33f93 100644 --- a/drivers/tty/mxser.c +++ b/drivers/tty/mxser.c @@ -858,6 +858,7 @@ static int mxser_activate(struct tty_port *port, struct tty_struct *tty) struct mxser_port *info = container_of(port, struct mxser_port, port); unsigned long page; unsigned long flags; + int ret; page = __get_free_page(GFP_KERNEL); if (!page) @@ -867,9 +868,9 @@ static int mxser_activate(struct tty_port *port, struct tty_struct *tty) if (!info->ioaddr || !info->type) { set_bit(TTY_IO_ERROR, &tty->flags); - free_page(page); spin_unlock_irqrestore(&info->slock, flags); - return 0; + ret = 0; + goto err_free_xmit; } info->port.xmit_buf = (unsigned char *) page; @@ -895,8 +896,10 @@ static int mxser_activate(struct tty_port *port, struct tty_struct *tty) if (capable(CAP_SYS_ADMIN)) { set_bit(TTY_IO_ERROR, &tty->flags); return 0; - } else - return -ENODEV; + } + + ret = -ENODEV; + goto err_free_xmit; } /* @@ -941,6 +944,10 @@ static int mxser_activate(struct tty_port *port, struct tty_struct *tty) spin_unlock_irqrestore(&info->slock, flags); return 0; +err_free_xmit: + free_page(page); + info->port.xmit_buf = NULL; + return ret; } /* diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c index b8f8621537720454b41c60524c2cfba80accf8b3..05562b3cca451a9bf5549a9edb6c3e9530653797 100644 --- a/drivers/tty/n_gsm.c +++ b/drivers/tty/n_gsm.c @@ -434,7 +434,7 @@ static u8 gsm_encode_modem(const struct gsm_dlci *dlci) modembits |= MDM_RTR; if (dlci->modem_tx & TIOCM_RI) modembits |= MDM_IC; - if (dlci->modem_tx & TIOCM_CD) + if (dlci->modem_tx & TIOCM_CD || dlci->gsm->initiator) modembits |= MDM_DV; return modembits; } @@ -1426,6 +1426,9 @@ static void gsm_dlci_close(struct gsm_dlci *dlci) if (dlci->addr != 0) { tty_port_tty_hangup(&dlci->port, false); kfifo_reset(&dlci->fifo); + /* Ensure that gsmtty_open() can return. */ + tty_port_set_initialized(&dlci->port, 0); + wake_up_interruptible(&dlci->port.open_wait); } else dlci->gsm->dead = true; wake_up(&dlci->gsm->event); @@ -1485,7 +1488,7 @@ static void gsm_dlci_t1(struct timer_list *t) dlci->mode = DLCI_MODE_ADM; gsm_dlci_open(dlci); } else { - gsm_dlci_close(dlci); + gsm_dlci_begin_close(dlci); /* prevent half open link */ } break; @@ -1719,7 +1722,12 @@ static void gsm_dlci_release(struct gsm_dlci *dlci) gsm_destroy_network(dlci); mutex_unlock(&dlci->mutex); - tty_hangup(tty); + /* We cannot use tty_hangup() because in tty_kref_put() the tty + * driver assumes that the hangup queue is free and reuses it to + * queue release_one_tty() -> NULL pointer panic in + * process_one_work(). + */ + tty_vhangup(tty); tty_port_tty_set(&dlci->port, NULL); tty_kref_put(tty); @@ -3173,9 +3181,9 @@ static void gsmtty_throttle(struct tty_struct *tty) if (dlci->state == DLCI_CLOSED) return; if (C_CRTSCTS(tty)) - dlci->modem_tx &= ~TIOCM_DTR; + dlci->modem_tx &= ~TIOCM_RTS; dlci->throttled = true; - /* Send an MSC with DTR cleared */ + /* Send an MSC with RTS cleared */ gsmtty_modem_update(dlci, 0); } @@ -3185,9 +3193,9 @@ static void gsmtty_unthrottle(struct tty_struct *tty) if (dlci->state == DLCI_CLOSED) return; if (C_CRTSCTS(tty)) - dlci->modem_tx |= TIOCM_DTR; + dlci->modem_tx |= TIOCM_RTS; dlci->throttled = false; - /* Send an MSC with DTR set */ + /* Send an MSC with RTS set */ gsmtty_modem_update(dlci, 0); } diff --git a/drivers/tty/serial/8250/8250_dma.c b/drivers/tty/serial/8250/8250_dma.c index 890fa7ddaa7f36650a1b69a8554e5567fd674f84..b3c3f7e5851aba3a17ccb7f80f18586c5aac4e4c 100644 --- a/drivers/tty/serial/8250/8250_dma.c +++ b/drivers/tty/serial/8250/8250_dma.c @@ -64,10 +64,19 @@ int serial8250_tx_dma(struct uart_8250_port *p) struct uart_8250_dma *dma = p->dma; struct circ_buf *xmit = &p->port.state->xmit; struct dma_async_tx_descriptor *desc; + struct uart_port *up = &p->port; int ret; - if (dma->tx_running) + if (dma->tx_running) { + if (up->x_char) { + dmaengine_pause(dma->txchan); + uart_xchar_out(up, UART_TX); + dmaengine_resume(dma->txchan); + } return 0; + } else if (up->x_char) { + uart_xchar_out(up, UART_TX); + } if (uart_tx_stopped(&p->port) || uart_circ_empty(xmit)) { /* We have been called from __dma_tx_complete() */ diff --git a/drivers/tty/serial/8250/8250_lpss.c b/drivers/tty/serial/8250/8250_lpss.c index 4dee8a9e0c9512ae65f39e8b44e3bf10e861db29..dfb730b7ea2ae7b20ab0e42ff49d4ff78cb302b8 100644 --- a/drivers/tty/serial/8250/8250_lpss.c +++ b/drivers/tty/serial/8250/8250_lpss.c @@ -121,8 +121,7 @@ static int byt_serial_setup(struct lpss8250 *lpss, struct uart_port *port) { struct dw_dma_slave *param = &lpss->dma_param; struct pci_dev *pdev = to_pci_dev(port->dev); - unsigned int dma_devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0); - struct pci_dev *dma_dev = pci_get_slot(pdev->bus, dma_devfn); + struct pci_dev *dma_dev; switch (pdev->device) { case PCI_DEVICE_ID_INTEL_BYT_UART1: @@ -141,6 +140,8 @@ static int byt_serial_setup(struct lpss8250 *lpss, struct uart_port *port) return -EINVAL; } + dma_dev = pci_get_slot(pdev->bus, PCI_DEVFN(PCI_SLOT(pdev->devfn), 0)); + param->dma_dev = &dma_dev->dev; param->m_master = 0; param->p_master = 1; @@ -156,11 +157,26 @@ static int byt_serial_setup(struct lpss8250 *lpss, struct uart_port *port) return 0; } +static void byt_serial_exit(struct lpss8250 *lpss) +{ + struct dw_dma_slave *param = &lpss->dma_param; + + /* Paired with pci_get_slot() in the byt_serial_setup() above */ + put_device(param->dma_dev); +} + static int ehl_serial_setup(struct lpss8250 *lpss, struct uart_port *port) { return 0; } +static void ehl_serial_exit(struct lpss8250 *lpss) +{ + struct uart_8250_port *up = serial8250_get_port(lpss->data.line); + + up->dma = NULL; +} + #ifdef CONFIG_SERIAL_8250_DMA static const struct dw_dma_platform_data qrk_serial_dma_pdata = { .nr_channels = 2, @@ -335,8 +351,7 @@ static int lpss8250_probe(struct pci_dev *pdev, const struct pci_device_id *id) return 0; err_exit: - if (lpss->board->exit) - lpss->board->exit(lpss); + lpss->board->exit(lpss); pci_free_irq_vectors(pdev); return ret; } @@ -347,8 +362,7 @@ static void lpss8250_remove(struct pci_dev *pdev) serial8250_unregister_port(lpss->data.line); - if (lpss->board->exit) - lpss->board->exit(lpss); + lpss->board->exit(lpss); pci_free_irq_vectors(pdev); } @@ -356,12 +370,14 @@ static const struct lpss8250_board byt_board = { .freq = 100000000, .base_baud = 2764800, .setup = byt_serial_setup, + .exit = byt_serial_exit, }; static const struct lpss8250_board ehl_board = { .freq = 200000000, .base_baud = 12500000, .setup = ehl_serial_setup, + .exit = ehl_serial_exit, }; static const struct lpss8250_board qrk_board = { diff --git a/drivers/tty/serial/8250/8250_mid.c b/drivers/tty/serial/8250/8250_mid.c index efa0515139f8ec052486a414f614519780ff96d3..e6c1791609ddf339427212a857eddcb1f98d8a10 100644 --- a/drivers/tty/serial/8250/8250_mid.c +++ b/drivers/tty/serial/8250/8250_mid.c @@ -73,6 +73,11 @@ static int pnw_setup(struct mid8250 *mid, struct uart_port *p) return 0; } +static void pnw_exit(struct mid8250 *mid) +{ + pci_dev_put(mid->dma_dev); +} + static int tng_handle_irq(struct uart_port *p) { struct mid8250 *mid = p->private_data; @@ -124,6 +129,11 @@ static int tng_setup(struct mid8250 *mid, struct uart_port *p) return 0; } +static void tng_exit(struct mid8250 *mid) +{ + pci_dev_put(mid->dma_dev); +} + static int dnv_handle_irq(struct uart_port *p) { struct mid8250 *mid = p->private_data; @@ -330,9 +340,9 @@ static int mid8250_probe(struct pci_dev *pdev, const struct pci_device_id *id) pci_set_drvdata(pdev, mid); return 0; + err: - if (mid->board->exit) - mid->board->exit(mid); + mid->board->exit(mid); return ret; } @@ -342,8 +352,7 @@ static void mid8250_remove(struct pci_dev *pdev) serial8250_unregister_port(mid->line); - if (mid->board->exit) - mid->board->exit(mid); + mid->board->exit(mid); } static const struct mid8250_board pnw_board = { @@ -351,6 +360,7 @@ static const struct mid8250_board pnw_board = { .freq = 50000000, .base_baud = 115200, .setup = pnw_setup, + .exit = pnw_exit, }; static const struct mid8250_board tng_board = { @@ -358,6 +368,7 @@ static const struct mid8250_board tng_board = { .freq = 38400000, .base_baud = 1843200, .setup = tng_setup, + .exit = tng_exit, }; static const struct mid8250_board dnv_board = { diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c index 7c07ebb37b1b9c07ff15a6f5f303ac493f7421dc..3055353514e1dd891f3cbe5278513d77311624bb 100644 --- a/drivers/tty/serial/8250/8250_port.c +++ b/drivers/tty/serial/8250/8250_port.c @@ -1620,6 +1620,18 @@ static inline void start_tx_rs485(struct uart_port *port) struct uart_8250_port *up = up_to_u8250p(port); struct uart_8250_em485 *em485 = up->em485; + /* + * While serial8250_em485_handle_stop_tx() is a noop if + * em485->active_timer != &em485->stop_tx_timer, it might happen that + * the timer is still armed and triggers only after the current bunch of + * chars is send and em485->active_timer == &em485->stop_tx_timer again. + * So cancel the timer. There is still a theoretical race condition if + * the timer is already running and only comes around to check for + * em485->active_timer when &em485->stop_tx_timer is armed again. + */ + if (em485->active_timer == &em485->stop_tx_timer) + hrtimer_try_to_cancel(&em485->stop_tx_timer); + em485->active_timer = NULL; if (em485->tx_stopped) { @@ -1805,9 +1817,7 @@ void serial8250_tx_chars(struct uart_8250_port *up) int count; if (port->x_char) { - serial_out(up, UART_TX, port->x_char); - port->icount.tx++; - port->x_char = 0; + uart_xchar_out(port, UART_TX); return; } if (uart_tx_stopped(port)) { diff --git a/drivers/tty/serial/amba-pl010.c b/drivers/tty/serial/amba-pl010.c index 75d61e038a77580ecaceebb529b00ba915f98465..e538d6d75155e4840c420f5bf2a2007dcd8e8891 100644 --- a/drivers/tty/serial/amba-pl010.c +++ b/drivers/tty/serial/amba-pl010.c @@ -751,7 +751,7 @@ static int pl010_probe(struct amba_device *dev, const struct amba_id *id) return ret; } -static int pl010_remove(struct amba_device *dev) +static void pl010_remove(struct amba_device *dev) { struct uart_amba_port *uap = amba_get_drvdata(dev); int i; @@ -767,8 +767,6 @@ static int pl010_remove(struct amba_device *dev) if (!busy) uart_unregister_driver(&amba_reg); - - return 0; } #ifdef CONFIG_PM_SLEEP diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c index 859d0c30dcc2059f74a2263ffe7622093d06b7e4..90b370ee9a34ea8fc7f60d1f6a16d55b6ef35316 100644 --- a/drivers/tty/serial/amba-pl011.c +++ b/drivers/tty/serial/amba-pl011.c @@ -2736,13 +2736,12 @@ static int pl011_probe(struct amba_device *dev, const struct amba_id *id) return pl011_register_port(uap); } -static int pl011_remove(struct amba_device *dev) +static void pl011_remove(struct amba_device *dev) { struct uart_amba_port *uap = amba_get_drvdata(dev); uart_remove_one_port(&amba_reg, &uap->port); pl011_unregister_port(uap); - return 0; } #ifdef CONFIG_PM_SLEEP diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c index 49d0c7f2b29b8f927915327025a84a555a73a7bd..79b7db8580e05c9240e447b83d42137fc7f3a8a9 100644 --- a/drivers/tty/serial/kgdboc.c +++ b/drivers/tty/serial/kgdboc.c @@ -403,16 +403,16 @@ static int kgdboc_option_setup(char *opt) { if (!opt) { pr_err("config string not provided\n"); - return -EINVAL; + return 1; } if (strlen(opt) >= MAX_CONFIG_LEN) { pr_err("config string too long\n"); - return -ENOSPC; + return 1; } strcpy(config, opt); - return 0; + return 1; } __setup("kgdboc=", kgdboc_option_setup); diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c index 9adb8362578c5d8c54a5c1a34061e5688eaf727d..04b4ed5d06341f76e96c6f6a7cf7ac2b4f4130ed 100644 --- a/drivers/tty/serial/sc16is7xx.c +++ b/drivers/tty/serial/sc16is7xx.c @@ -734,12 +734,15 @@ static irqreturn_t sc16is7xx_irq(int irq, void *dev_id) static void sc16is7xx_tx_proc(struct kthread_work *ws) { struct uart_port *port = &(to_sc16is7xx_one(ws, tx_work)->port); + struct sc16is7xx_port *s = dev_get_drvdata(port->dev); if ((port->rs485.flags & SER_RS485_ENABLED) && (port->rs485.delay_rts_before_send > 0)) msleep(port->rs485.delay_rts_before_send); + mutex_lock(&s->efr_lock); sc16is7xx_handle_tx(port); + mutex_unlock(&s->efr_lock); } static void sc16is7xx_reconf_rs485(struct uart_port *port) diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c index be0d9922e320e04a0c609cbece0e66a8b698b1a0..19f0c5db11e3364c2716c6158f724cf8c2013e05 100644 --- a/drivers/tty/serial/serial_core.c +++ b/drivers/tty/serial/serial_core.c @@ -676,6 +676,20 @@ static void uart_flush_buffer(struct tty_struct *tty) tty_port_tty_wakeup(&state->port); } +/* + * This function performs low-level write of high-priority XON/XOFF + * character and accounting for it. + * + * Requires uart_port to implement .serial_out(). + */ +void uart_xchar_out(struct uart_port *uport, int offset) +{ + serial_port_out(uport, offset, uport->x_char); + uport->icount.tx++; + uport->x_char = 0; +} +EXPORT_SYMBOL_GPL(uart_xchar_out); + /* * This function is used to send a high-priority XON/XOFF character to * the device diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c index 0eadf0547175c5150fc3c730cd4b7638f5ad420a..6afae051ba8d117cc5170c03b556db514d60a4a2 100644 --- a/drivers/tty/serial/stm32-usart.c +++ b/drivers/tty/serial/stm32-usart.c @@ -420,10 +420,22 @@ static void stm32_usart_transmit_chars(struct uart_port *port) struct stm32_port *stm32_port = to_stm32_port(port); const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; struct circ_buf *xmit = &port->state->xmit; + u32 isr; + int ret; if (port->x_char) { if (stm32_port->tx_dma_busy) stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT); + + /* Check that TDR is empty before filling FIFO */ + ret = + readl_relaxed_poll_timeout_atomic(port->membase + ofs->isr, + isr, + (isr & USART_SR_TXE), + 10, 1000); + if (ret) + dev_warn(port->dev, "1 character may be erased\n"); + writel_relaxed(port->x_char, port->membase + ofs->tdr); port->x_char = 0; port->icount.tx++; diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c index 58274c507353120062531efac0f1b174219e9fb1..49f59d53b4b26d213b21efa62eee468c79a20e0c 100644 --- a/drivers/usb/class/usbtmc.c +++ b/drivers/usb/class/usbtmc.c @@ -1889,6 +1889,7 @@ static int usbtmc_ioctl_request(struct usbtmc_device_data *data, struct usbtmc_ctrlrequest request; u8 *buffer = NULL; int rv; + unsigned int is_in, pipe; unsigned long res; res = copy_from_user(&request, arg, sizeof(struct usbtmc_ctrlrequest)); @@ -1898,12 +1899,14 @@ static int usbtmc_ioctl_request(struct usbtmc_device_data *data, if (request.req.wLength > USBTMC_BUFSIZE) return -EMSGSIZE; + is_in = request.req.bRequestType & USB_DIR_IN; + if (request.req.wLength) { buffer = kmalloc(request.req.wLength, GFP_KERNEL); if (!buffer) return -ENOMEM; - if ((request.req.bRequestType & USB_DIR_IN) == 0) { + if (!is_in) { /* Send control data to device */ res = copy_from_user(buffer, request.data, request.req.wLength); @@ -1914,8 +1917,12 @@ static int usbtmc_ioctl_request(struct usbtmc_device_data *data, } } + if (is_in) + pipe = usb_rcvctrlpipe(data->usb_dev, 0); + else + pipe = usb_sndctrlpipe(data->usb_dev, 0); rv = usb_control_msg(data->usb_dev, - usb_rcvctrlpipe(data->usb_dev, 0), + pipe, request.req.bRequest, request.req.bRequestType, request.req.wValue, @@ -1927,7 +1934,7 @@ static int usbtmc_ioctl_request(struct usbtmc_device_data *data, goto exit; } - if (rv && (request.req.bRequestType & USB_DIR_IN)) { + if (rv && is_in) { /* Read control data from device */ res = copy_to_user(request.data, buffer, rv); if (res) diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h index 641e4251cb7f10d501e059532092918686eaf108..03d16a08261d88e9f1ad91660cb363ed11155b65 100644 --- a/drivers/usb/dwc2/core.h +++ b/drivers/usb/dwc2/core.h @@ -1406,6 +1406,7 @@ void dwc2_hsotg_core_connect(struct dwc2_hsotg *hsotg); void dwc2_hsotg_disconnect(struct dwc2_hsotg *dwc2); int dwc2_hsotg_set_test_mode(struct dwc2_hsotg *hsotg, int testmode); #define dwc2_is_device_connected(hsotg) (hsotg->connected) +#define dwc2_is_device_enabled(hsotg) (hsotg->enabled) int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg); int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg, int remote_wakeup); int dwc2_gadget_enter_hibernation(struct dwc2_hsotg *hsotg); @@ -1434,6 +1435,7 @@ static inline int dwc2_hsotg_set_test_mode(struct dwc2_hsotg *hsotg, int testmode) { return 0; } #define dwc2_is_device_connected(hsotg) (0) +#define dwc2_is_device_enabled(hsotg) (0) static inline int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg) { return 0; } static inline int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg, diff --git a/drivers/usb/dwc2/drd.c b/drivers/usb/dwc2/drd.c index aa6eb76f64ddc0ea78b39bf53451005fa7bc87a1..36f2c38416e5ec4c2b942c0a49db3571ee9d04fd 100644 --- a/drivers/usb/dwc2/drd.c +++ b/drivers/usb/dwc2/drd.c @@ -109,8 +109,10 @@ static int dwc2_drd_role_sw_set(struct usb_role_switch *sw, enum usb_role role) already = dwc2_ovr_avalid(hsotg, true); } else if (role == USB_ROLE_DEVICE) { already = dwc2_ovr_bvalid(hsotg, true); - /* This clear DCTL.SFTDISCON bit */ - dwc2_hsotg_core_connect(hsotg); + if (dwc2_is_device_enabled(hsotg)) { + /* This clear DCTL.SFTDISCON bit */ + dwc2_hsotg_core_connect(hsotg); + } } else { if (dwc2_is_device_mode(hsotg)) { if (!dwc2_ovr_bvalid(hsotg, false)) diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c index 17117870f6cea1989f9df85b841f5f8b2c671e4d..98df8d52c765c9b4147a3d572907b12d40678bb3 100644 --- a/drivers/usb/dwc3/dwc3-pci.c +++ b/drivers/usb/dwc3/dwc3-pci.c @@ -81,8 +81,8 @@ static const struct acpi_gpio_mapping acpi_dwc3_byt_gpios[] = { static struct gpiod_lookup_table platform_bytcr_gpios = { .dev_id = "0000:00:16.0", .table = { - GPIO_LOOKUP("INT33FC:00", 54, "reset", GPIO_ACTIVE_HIGH), - GPIO_LOOKUP("INT33FC:02", 14, "cs", GPIO_ACTIVE_HIGH), + GPIO_LOOKUP("INT33FC:00", 54, "cs", GPIO_ACTIVE_HIGH), + GPIO_LOOKUP("INT33FC:02", 14, "reset", GPIO_ACTIVE_HIGH), {} }, }; diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 9095ce52c28c69a96fb4dbbb3faa96482e8a3664..b68fe48ac57923a66598b39ad0392344a75fbec6 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -3775,9 +3775,11 @@ static irqreturn_t dwc3_thread_interrupt(int irq, void *_evt) unsigned long flags; irqreturn_t ret = IRQ_NONE; + local_bh_disable(); spin_lock_irqsave(&dwc->lock, flags); ret = dwc3_process_event_buf(evt); spin_unlock_irqrestore(&dwc->lock, flags); + local_bh_enable(); return ret; } diff --git a/drivers/usb/gadget/function/rndis.c b/drivers/usb/gadget/function/rndis.c index 60d89339a563357369097c2d0e6d21bf70c3af61..eef71a12f6f23cec2d41de35b26f659d0548c11b 100644 --- a/drivers/usb/gadget/function/rndis.c +++ b/drivers/usb/gadget/function/rndis.c @@ -658,6 +658,7 @@ static int rndis_set_response(struct rndis_params *params, BufLength = le32_to_cpu(buf->InformationBufferLength); BufOffset = le32_to_cpu(buf->InformationBufferOffset); if ((BufLength > RNDIS_MAX_TOTAL_SIZE) || + (BufOffset > RNDIS_MAX_TOTAL_SIZE) || (BufOffset + 8 >= RNDIS_MAX_TOTAL_SIZE)) return -EINVAL; @@ -951,6 +952,7 @@ struct rndis_params *rndis_register(void (*resp_avail)(void *v), void *v) params->resp_avail = resp_avail; params->v = v; INIT_LIST_HEAD(¶ms->resp_queue); + spin_lock_init(¶ms->resp_lock); pr_debug("%s: configNr = %d\n", __func__, i); return params; @@ -1044,12 +1046,14 @@ void rndis_free_response(struct rndis_params *params, u8 *buf) { rndis_resp_t *r, *n; + spin_lock(¶ms->resp_lock); list_for_each_entry_safe(r, n, ¶ms->resp_queue, list) { if (r->buf == buf) { list_del(&r->list); kfree(r); } } + spin_unlock(¶ms->resp_lock); } EXPORT_SYMBOL_GPL(rndis_free_response); @@ -1059,14 +1063,17 @@ u8 *rndis_get_next_response(struct rndis_params *params, u32 *length) if (!length) return NULL; + spin_lock(¶ms->resp_lock); list_for_each_entry_safe(r, n, ¶ms->resp_queue, list) { if (!r->send) { r->send = 1; *length = r->length; + spin_unlock(¶ms->resp_lock); return r->buf; } } + spin_unlock(¶ms->resp_lock); return NULL; } EXPORT_SYMBOL_GPL(rndis_get_next_response); @@ -1083,7 +1090,9 @@ static rndis_resp_t *rndis_add_response(struct rndis_params *params, u32 length) r->length = length; r->send = 0; + spin_lock(¶ms->resp_lock); list_add_tail(&r->list, ¶ms->resp_queue); + spin_unlock(¶ms->resp_lock); return r; } diff --git a/drivers/usb/gadget/function/rndis.h b/drivers/usb/gadget/function/rndis.h index f6167f7fea82b59da4b0eb40c0fd7effc06bec5c..6206b8b7490f64b0e11dcc71aa2d99691d5d1ac3 100644 --- a/drivers/usb/gadget/function/rndis.h +++ b/drivers/usb/gadget/function/rndis.h @@ -174,6 +174,7 @@ typedef struct rndis_params { void (*resp_avail)(void *v); void *v; struct list_head resp_queue; + spinlock_t resp_lock; } rndis_params; /* RNDIS Message parser and other useless functions */ diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c index da691a69fec10c0bdf9bbab58656e263cbe32a8f..3a3b5a03dda75a297588642cdb5e9889af387922 100644 --- a/drivers/usb/gadget/udc/core.c +++ b/drivers/usb/gadget/udc/core.c @@ -1343,7 +1343,6 @@ static void usb_gadget_remove_driver(struct usb_udc *udc) usb_gadget_udc_stop(udc); udc->driver = NULL; - udc->dev.driver = NULL; udc->gadget->dev.driver = NULL; } @@ -1405,7 +1404,6 @@ static int udc_bind_to_driver(struct usb_udc *udc, struct usb_gadget_driver *dri driver->function); udc->driver = driver; - udc->dev.driver = &driver->driver; udc->gadget->dev.driver = &driver->driver; usb_gadget_udc_set_speed(udc, driver->max_speed); @@ -1427,7 +1425,6 @@ static int udc_bind_to_driver(struct usb_udc *udc, struct usb_gadget_driver *dri dev_err(&udc->dev, "failed to start %s: %d\n", udc->driver->function, ret); udc->driver = NULL; - udc->dev.driver = NULL; udc->gadget->dev.driver = NULL; return ret; } diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index 71b018e9a5735bbc45ab89c683297f2a163fabe3..460a8a86e3111ffe2d6d5e621c58e80d47759c9f 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c @@ -676,7 +676,7 @@ static int xhci_exit_test_mode(struct xhci_hcd *xhci) } pm_runtime_allow(xhci_to_hcd(xhci)->self.controller); xhci->test_mode = 0; - return xhci_reset(xhci); + return xhci_reset(xhci, XHCI_RESET_SHORT_USEC); } void xhci_set_link_state(struct xhci_hcd *xhci, struct xhci_port *port, @@ -1002,6 +1002,9 @@ static void xhci_get_usb2_port_status(struct xhci_port *port, u32 *status, if (link_state == XDEV_U2) *status |= USB_PORT_STAT_L1; if (link_state == XDEV_U0) { + if (bus_state->resume_done[portnum]) + usb_hcd_end_port_resume(&port->rhub->hcd->self, + portnum); bus_state->resume_done[portnum] = 0; clear_bit(portnum, &bus_state->resuming_ports); if (bus_state->suspended_ports & (1 << portnum)) { diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index 93f429436e454de07dc99598b8b3a5591132f252..4aab93d5b6514165a8b9df5861a223f33c3d1118 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -2603,7 +2603,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) fail: xhci_halt(xhci); - xhci_reset(xhci); + xhci_reset(xhci, XHCI_RESET_SHORT_USEC); xhci_mem_cleanup(xhci); return -ENOMEM; } diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 7f1e5296d0f68aec1731b88d8069d23ae56720fe..cc21f5f9d0ad4295d3ccd43eeaa891ee630d9b23 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -66,7 +66,7 @@ static bool td_on_ring(struct xhci_td *td, struct xhci_ring *ring) * handshake done). There are two failure modes: "usec" have passed (major * hardware flakeout), or the register reads as all-ones (hardware removed). */ -int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, int usec) +int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, u64 timeout_us) { u32 result; int ret; @@ -74,7 +74,7 @@ int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, int usec) ret = readl_poll_timeout_atomic(ptr, result, (result & mask) == done || result == U32_MAX, - 1, usec); + 1, timeout_us); if (result == U32_MAX) /* card removed */ return -ENODEV; @@ -163,7 +163,7 @@ int xhci_start(struct xhci_hcd *xhci) * Transactions will be terminated immediately, and operational registers * will be set to their defaults. */ -int xhci_reset(struct xhci_hcd *xhci) +int xhci_reset(struct xhci_hcd *xhci, u64 timeout_us) { u32 command; u32 state; @@ -196,8 +196,7 @@ int xhci_reset(struct xhci_hcd *xhci) if (xhci->quirks & XHCI_INTEL_HOST) udelay(1000); - ret = xhci_handshake(&xhci->op_regs->command, - CMD_RESET, 0, 10 * 1000 * 1000); + ret = xhci_handshake(&xhci->op_regs->command, CMD_RESET, 0, timeout_us); if (ret) return ret; @@ -210,8 +209,7 @@ int xhci_reset(struct xhci_hcd *xhci) * xHCI cannot write to any doorbells or operational registers other * than status until the "Controller Not Ready" flag is cleared. */ - ret = xhci_handshake(&xhci->op_regs->status, - STS_CNR, 0, 10 * 1000 * 1000); + ret = xhci_handshake(&xhci->op_regs->status, STS_CNR, 0, timeout_us); xhci->usb2_rhub.bus_state.port_c_suspend = 0; xhci->usb2_rhub.bus_state.suspended_ports = 0; @@ -732,7 +730,7 @@ static void xhci_stop(struct usb_hcd *hcd) xhci->xhc_state |= XHCI_STATE_HALTED; xhci->cmd_ring_state = CMD_RING_STATE_STOPPED; xhci_halt(xhci); - xhci_reset(xhci); + xhci_reset(xhci, XHCI_RESET_SHORT_USEC); spin_unlock_irq(&xhci->lock); xhci_cleanup_msix(xhci); @@ -785,7 +783,7 @@ void xhci_shutdown(struct usb_hcd *hcd) xhci_halt(xhci); /* Workaround for spurious wakeups at shutdown with HSW */ if (xhci->quirks & XHCI_SPURIOUS_WAKEUP) - xhci_reset(xhci); + xhci_reset(xhci, XHCI_RESET_SHORT_USEC); spin_unlock_irq(&xhci->lock); xhci_cleanup_msix(xhci); @@ -1091,6 +1089,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) int retval = 0; bool comp_timer_running = false; bool pending_portevent = false; + bool reinit_xhc = false; if (!hcd->state) return 0; @@ -1107,10 +1106,11 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags); spin_lock_irq(&xhci->lock); - if ((xhci->quirks & XHCI_RESET_ON_RESUME) || xhci->broken_suspend) - hibernated = true; - if (!hibernated) { + if (hibernated || xhci->quirks & XHCI_RESET_ON_RESUME || xhci->broken_suspend) + reinit_xhc = true; + + if (!reinit_xhc) { /* * Some controllers might lose power during suspend, so wait * for controller not ready bit to clear, just as in xHC init. @@ -1143,12 +1143,17 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) spin_unlock_irq(&xhci->lock); return -ETIMEDOUT; } - temp = readl(&xhci->op_regs->status); } - /* If restore operation fails, re-initialize the HC during resume */ - if ((temp & STS_SRE) || hibernated) { + temp = readl(&xhci->op_regs->status); + + /* re-initialize the HC on Restore Error, or Host Controller Error */ + if (temp & (STS_SRE | STS_HCE)) { + reinit_xhc = true; + xhci_warn(xhci, "xHC error in resume, USBSTS 0x%x, Reinit\n", temp); + } + if (reinit_xhc) { if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && !(xhci_all_ports_seen_u0(xhci))) { del_timer_sync(&xhci->comp_mode_recovery_timer); @@ -1163,7 +1168,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) xhci_dbg(xhci, "Stop HCD\n"); xhci_halt(xhci); xhci_zero_64b_regs(xhci); - retval = xhci_reset(xhci); + retval = xhci_reset(xhci, XHCI_RESET_LONG_USEC); spin_unlock_irq(&xhci->lock); if (retval) return retval; @@ -1480,9 +1485,12 @@ static int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flag struct urb_priv *urb_priv; int num_tds; - if (!urb || xhci_check_args(hcd, urb->dev, urb->ep, - true, true, __func__) <= 0) + if (!urb) return -EINVAL; + ret = xhci_check_args(hcd, urb->dev, urb->ep, + true, true, __func__); + if (ret <= 0) + return ret ? ret : -EINVAL; slot_id = urb->dev->slot_id; ep_index = xhci_get_endpoint_index(&urb->ep->desc); @@ -3282,7 +3290,7 @@ static int xhci_check_streams_endpoint(struct xhci_hcd *xhci, return -EINVAL; ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, true, __func__); if (ret <= 0) - return -EINVAL; + return ret ? ret : -EINVAL; if (usb_ss_max_streams(&ep->ss_ep_comp) == 0) { xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion" " descriptor for ep 0x%x does not support streams\n", @@ -5261,10 +5269,10 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks) if (XHCI_EXT_PORT_PSIV(xhci->port_caps[j].psi[i]) >= 5) minor_rev = 1; } - if (minor_rev != 1) { - hcd->speed = HCD_USB3; - hcd->self.root_hub->speed = USB_SPEED_SUPER; - } + } + if (minor_rev != 1) { + hcd->speed = HCD_USB3; + hcd->self.root_hub->speed = USB_SPEED_SUPER; } } @@ -5315,7 +5323,7 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks) xhci_dbg(xhci, "Resetting HCD\n"); /* Reset the internal HC memory state and registers. */ - retval = xhci_reset(xhci); + retval = xhci_reset(xhci, XHCI_RESET_LONG_USEC); if (retval) return retval; xhci_dbg(xhci, "Reset complete\n"); diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 679ef073d99dcee21f6e9160f11635d213808658..1c84d73250d0981814eac93445a7e0bf62985212 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -229,6 +229,9 @@ struct xhci_op_regs { #define CMD_ETE (1 << 14) /* bits 15:31 are reserved (and should be preserved on writes). */ +#define XHCI_RESET_LONG_USEC (10 * 1000 * 1000) +#define XHCI_RESET_SHORT_USEC (250 * 1000) + /* IMAN - Interrupt Management Register */ #define IMAN_IE (1 << 1) #define IMAN_IP (1 << 0) @@ -2070,11 +2073,11 @@ void xhci_free_container_ctx(struct xhci_hcd *xhci, /* xHCI host controller glue */ typedef void (*xhci_get_quirks_t)(struct device *, struct xhci_hcd *); -int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, int usec); +int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, u64 timeout_us); void xhci_quiesce(struct xhci_hcd *xhci); int xhci_halt(struct xhci_hcd *xhci); int xhci_start(struct xhci_hcd *xhci); -int xhci_reset(struct xhci_hcd *xhci); +int xhci_reset(struct xhci_hcd *xhci, u64 timeout_us); int xhci_run(struct usb_hcd *hcd); int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks); void xhci_shutdown(struct usb_hcd *hcd); @@ -2457,6 +2460,8 @@ static inline const char *xhci_decode_ctrl_ctx(char *str, unsigned int bit; int ret = 0; + str[0] = '\0'; + if (drop) { ret = sprintf(str, "Drop:"); for_each_set_bit(bit, &drop, 32) @@ -2614,8 +2619,11 @@ static inline const char *xhci_decode_usbsts(char *str, u32 usbsts) { int ret = 0; + ret = sprintf(str, " 0x%08x", usbsts); + if (usbsts == ~(u32)0) - return " 0xffffffff"; + return str; + if (usbsts & STS_HALT) ret += sprintf(str + ret, " HCHalted"); if (usbsts & STS_FATAL) diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig index 4007fa25a8ffae389570b32c470189ef716c8f4c..169251ec8353e592b628840b3dd8d155aad2f65b 100644 --- a/drivers/usb/serial/Kconfig +++ b/drivers/usb/serial/Kconfig @@ -66,6 +66,7 @@ config USB_SERIAL_SIMPLE - Libtransistor USB console - a number of Motorola phones - Motorola Tetra devices + - Nokia mobile phones - Novatel Wireless GPS receivers - Siemens USB/MPI adapter. - ViVOtech ViVOpay USB device. diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c index 8716ada0b1387efd59418f3f416fb22816615a9d..a2a38fc76ca53c602b94bba123fd9776d03a7a57 100644 --- a/drivers/usb/serial/ch341.c +++ b/drivers/usb/serial/ch341.c @@ -81,7 +81,6 @@ #define CH341_QUIRK_SIMULATE_BREAK BIT(1) static const struct usb_device_id id_table[] = { - { USB_DEVICE(0x1a86, 0x5512) }, { USB_DEVICE(0x1a86, 0x5523) }, { USB_DEVICE(0x1a86, 0x7522) }, { USB_DEVICE(0x1a86, 0x7523) }, diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index c39c505b081b1d73490cf72c38f7dacbb086fc8d..b878f4c87fee8e5d1eafc3cf5e46ce785a71e753 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -198,6 +198,8 @@ static void option_instat_callback(struct urb *urb); #define DELL_PRODUCT_5821E 0x81d7 #define DELL_PRODUCT_5821E_ESIM 0x81e0 +#define DELL_PRODUCT_5829E_ESIM 0x81e4 +#define DELL_PRODUCT_5829E 0x81e6 #define KYOCERA_VENDOR_ID 0x0c88 #define KYOCERA_PRODUCT_KPC650 0x17da @@ -1063,6 +1065,10 @@ static const struct usb_device_id option_ids[] = { .driver_info = RSVD(0) | RSVD(1) | RSVD(6) }, { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5821E_ESIM), .driver_info = RSVD(0) | RSVD(1) | RSVD(6) }, + { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5829E), + .driver_info = RSVD(0) | RSVD(6) }, + { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5829E_ESIM), + .driver_info = RSVD(0) | RSVD(6) }, { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) }, /* ADU-E100, ADU-310 */ { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) }, { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) }, @@ -1273,10 +1279,16 @@ static const struct usb_device_id option_ids[] = { .driver_info = NCTRL(2) }, { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x7011, 0xff), /* Telit LE910-S1 (ECM) */ .driver_info = NCTRL(2) }, + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x701a, 0xff), /* Telit LE910R1 (RNDIS) */ + .driver_info = NCTRL(2) }, + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x701b, 0xff), /* Telit LE910R1 (ECM) */ + .driver_info = NCTRL(2) }, { USB_DEVICE(TELIT_VENDOR_ID, 0x9010), /* Telit SBL FN980 flashing device */ .driver_info = NCTRL(0) | ZLP }, { USB_DEVICE(TELIT_VENDOR_ID, 0x9200), /* Telit LE910S1 flashing device */ .driver_info = NCTRL(0) | ZLP }, + { USB_DEVICE(TELIT_VENDOR_ID, 0x9201), /* Telit LE910R1 flashing device */ + .driver_info = NCTRL(0) | ZLP }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff), .driver_info = RSVD(1) }, diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c index 1bbe18f3f9f11eb21c76da0740d4f17ef8f2b7ec..d736822e95e184485485fd057966a7c114027d64 100644 --- a/drivers/usb/serial/pl2303.c +++ b/drivers/usb/serial/pl2303.c @@ -116,6 +116,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(ADLINK_VENDOR_ID, ADLINK_ND6530GC_PRODUCT_ID) }, { USB_DEVICE(SMART_VENDOR_ID, SMART_PRODUCT_ID) }, { USB_DEVICE(AT_VENDOR_ID, AT_VTKIT3_PRODUCT_ID) }, + { USB_DEVICE(IBM_VENDOR_ID, IBM_PRODUCT_ID) }, { } /* Terminating entry */ }; diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h index 6097ee8fccb25cdbf6feaa28d49f784c913ca492..c5406452b774ef9dc6cdc986f0aa4ce6ead1b866 100644 --- a/drivers/usb/serial/pl2303.h +++ b/drivers/usb/serial/pl2303.h @@ -35,6 +35,9 @@ #define ATEN_PRODUCT_UC232B 0x2022 #define ATEN_PRODUCT_ID2 0x2118 +#define IBM_VENDOR_ID 0x04b3 +#define IBM_PRODUCT_ID 0x4016 + #define IODATA_VENDOR_ID 0x04bb #define IODATA_PRODUCT_ID 0x0a03 #define IODATA_PRODUCT_ID_RSAQ5 0x0a0e diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c index bd23a7cb1be2bceaa8422e88bc3cdb5255dd566c..4c6747889a194664a04f90824e417bedec625b3f 100644 --- a/drivers/usb/serial/usb-serial-simple.c +++ b/drivers/usb/serial/usb-serial-simple.c @@ -91,6 +91,11 @@ DEVICE(moto_modem, MOTO_IDS); { USB_DEVICE(0x0cad, 0x9016) } /* TPG2200 */ DEVICE(motorola_tetra, MOTOROLA_TETRA_IDS); +/* Nokia mobile phone driver */ +#define NOKIA_IDS() \ + { USB_DEVICE(0x0421, 0x069a) } /* Nokia 130 (RM-1035) */ +DEVICE(nokia, NOKIA_IDS); + /* Novatel Wireless GPS driver */ #define NOVATEL_IDS() \ { USB_DEVICE(0x09d7, 0x0100) } /* NovAtel FlexPack GPS */ @@ -123,6 +128,7 @@ static struct usb_serial_driver * const serial_drivers[] = { &vivopay_device, &moto_modem_device, &motorola_tetra_device, + &nokia_device, &novatel_gps_device, &hp4x_device, &suunto_device, @@ -140,6 +146,7 @@ static const struct usb_device_id id_table[] = { VIVOPAY_IDS(), MOTO_IDS(), MOTOROLA_TETRA_IDS(), + NOKIA_IDS(), NOVATEL_IDS(), HP4X_IDS(), SUUNTO_IDS(), diff --git a/drivers/usb/storage/ene_ub6250.c b/drivers/usb/storage/ene_ub6250.c index 98c1aa594e6c4d58e568366b106746c4dbf81599..c9ce1c25c80cc59b61701c868d0d8ff8206e993c 100644 --- a/drivers/usb/storage/ene_ub6250.c +++ b/drivers/usb/storage/ene_ub6250.c @@ -237,36 +237,33 @@ static struct us_unusual_dev ene_ub6250_unusual_dev_list[] = { #define memstick_logaddr(logadr1, logadr0) ((((u16)(logadr1)) << 8) | (logadr0)) -struct SD_STATUS { - u8 Insert:1; - u8 Ready:1; - u8 MediaChange:1; - u8 IsMMC:1; - u8 HiCapacity:1; - u8 HiSpeed:1; - u8 WtP:1; - u8 Reserved:1; -}; - -struct MS_STATUS { - u8 Insert:1; - u8 Ready:1; - u8 MediaChange:1; - u8 IsMSPro:1; - u8 IsMSPHG:1; - u8 Reserved1:1; - u8 WtP:1; - u8 Reserved2:1; -}; - -struct SM_STATUS { - u8 Insert:1; - u8 Ready:1; - u8 MediaChange:1; - u8 Reserved:3; - u8 WtP:1; - u8 IsMS:1; -}; +/* SD_STATUS bits */ +#define SD_Insert BIT(0) +#define SD_Ready BIT(1) +#define SD_MediaChange BIT(2) +#define SD_IsMMC BIT(3) +#define SD_HiCapacity BIT(4) +#define SD_HiSpeed BIT(5) +#define SD_WtP BIT(6) + /* Bit 7 reserved */ + +/* MS_STATUS bits */ +#define MS_Insert BIT(0) +#define MS_Ready BIT(1) +#define MS_MediaChange BIT(2) +#define MS_IsMSPro BIT(3) +#define MS_IsMSPHG BIT(4) + /* Bit 5 reserved */ +#define MS_WtP BIT(6) + /* Bit 7 reserved */ + +/* SM_STATUS bits */ +#define SM_Insert BIT(0) +#define SM_Ready BIT(1) +#define SM_MediaChange BIT(2) + /* Bits 3-5 reserved */ +#define SM_WtP BIT(6) +#define SM_IsMS BIT(7) struct ms_bootblock_cis { u8 bCistplDEVICE[6]; /* 0 */ @@ -437,9 +434,9 @@ struct ene_ub6250_info { u8 *bbuf; /* for 6250 code */ - struct SD_STATUS SD_Status; - struct MS_STATUS MS_Status; - struct SM_STATUS SM_Status; + u8 SD_Status; + u8 MS_Status; + u8 SM_Status; /* ----- SD Control Data ---------------- */ /*SD_REGISTER SD_Regs; */ @@ -602,7 +599,7 @@ static int sd_scsi_test_unit_ready(struct us_data *us, struct scsi_cmnd *srb) { struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra; - if (info->SD_Status.Insert && info->SD_Status.Ready) + if ((info->SD_Status & SD_Insert) && (info->SD_Status & SD_Ready)) return USB_STOR_TRANSPORT_GOOD; else { ene_sd_init(us); @@ -622,7 +619,7 @@ static int sd_scsi_mode_sense(struct us_data *us, struct scsi_cmnd *srb) 0x0b, 0x00, 0x80, 0x08, 0x00, 0x00, 0x71, 0xc0, 0x00, 0x00, 0x02, 0x00 }; - if (info->SD_Status.WtP) + if (info->SD_Status & SD_WtP) usb_stor_set_xfer_buf(mediaWP, 12, srb); else usb_stor_set_xfer_buf(mediaNoWP, 12, srb); @@ -641,9 +638,9 @@ static int sd_scsi_read_capacity(struct us_data *us, struct scsi_cmnd *srb) struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra; usb_stor_dbg(us, "sd_scsi_read_capacity\n"); - if (info->SD_Status.HiCapacity) { + if (info->SD_Status & SD_HiCapacity) { bl_len = 0x200; - if (info->SD_Status.IsMMC) + if (info->SD_Status & SD_IsMMC) bl_num = info->HC_C_SIZE-1; else bl_num = (info->HC_C_SIZE + 1) * 1024 - 1; @@ -693,7 +690,7 @@ static int sd_scsi_read(struct us_data *us, struct scsi_cmnd *srb) return USB_STOR_TRANSPORT_ERROR; } - if (info->SD_Status.HiCapacity) + if (info->SD_Status & SD_HiCapacity) bnByte = bn; /* set up the command wrapper */ @@ -733,7 +730,7 @@ static int sd_scsi_write(struct us_data *us, struct scsi_cmnd *srb) return USB_STOR_TRANSPORT_ERROR; } - if (info->SD_Status.HiCapacity) + if (info->SD_Status & SD_HiCapacity) bnByte = bn; /* set up the command wrapper */ @@ -1455,7 +1452,7 @@ static int ms_scsi_test_unit_ready(struct us_data *us, struct scsi_cmnd *srb) struct ene_ub6250_info *info = (struct ene_ub6250_info *)(us->extra); /* pr_info("MS_SCSI_Test_Unit_Ready\n"); */ - if (info->MS_Status.Insert && info->MS_Status.Ready) { + if ((info->MS_Status & MS_Insert) && (info->MS_Status & MS_Ready)) { return USB_STOR_TRANSPORT_GOOD; } else { ene_ms_init(us); @@ -1475,7 +1472,7 @@ static int ms_scsi_mode_sense(struct us_data *us, struct scsi_cmnd *srb) 0x0b, 0x00, 0x80, 0x08, 0x00, 0x00, 0x71, 0xc0, 0x00, 0x00, 0x02, 0x00 }; - if (info->MS_Status.WtP) + if (info->MS_Status & MS_WtP) usb_stor_set_xfer_buf(mediaWP, 12, srb); else usb_stor_set_xfer_buf(mediaNoWP, 12, srb); @@ -1494,7 +1491,7 @@ static int ms_scsi_read_capacity(struct us_data *us, struct scsi_cmnd *srb) usb_stor_dbg(us, "ms_scsi_read_capacity\n"); bl_len = 0x200; - if (info->MS_Status.IsMSPro) + if (info->MS_Status & MS_IsMSPro) bl_num = info->MSP_TotalBlock - 1; else bl_num = info->MS_Lib.NumberOfLogBlock * info->MS_Lib.blockSize * 2 - 1; @@ -1649,7 +1646,7 @@ static int ms_scsi_read(struct us_data *us, struct scsi_cmnd *srb) if (bn > info->bl_num) return USB_STOR_TRANSPORT_ERROR; - if (info->MS_Status.IsMSPro) { + if (info->MS_Status & MS_IsMSPro) { result = ene_load_bincode(us, MSP_RW_PATTERN); if (result != USB_STOR_XFER_GOOD) { usb_stor_dbg(us, "Load MPS RW pattern Fail !!\n"); @@ -1750,7 +1747,7 @@ static int ms_scsi_write(struct us_data *us, struct scsi_cmnd *srb) if (bn > info->bl_num) return USB_STOR_TRANSPORT_ERROR; - if (info->MS_Status.IsMSPro) { + if (info->MS_Status & MS_IsMSPro) { result = ene_load_bincode(us, MSP_RW_PATTERN); if (result != USB_STOR_XFER_GOOD) { pr_info("Load MSP RW pattern Fail !!\n"); @@ -1858,12 +1855,12 @@ static int ene_get_card_status(struct us_data *us, u8 *buf) tmpreg = (u16) reg4b; reg4b = *(u32 *)(&buf[0x14]); - if (info->SD_Status.HiCapacity && !info->SD_Status.IsMMC) + if ((info->SD_Status & SD_HiCapacity) && !(info->SD_Status & SD_IsMMC)) info->HC_C_SIZE = (reg4b >> 8) & 0x3fffff; info->SD_C_SIZE = ((tmpreg & 0x03) << 10) | (u16)(reg4b >> 22); info->SD_C_SIZE_MULT = (u8)(reg4b >> 7) & 0x07; - if (info->SD_Status.HiCapacity && info->SD_Status.IsMMC) + if ((info->SD_Status & SD_HiCapacity) && (info->SD_Status & SD_IsMMC)) info->HC_C_SIZE = *(u32 *)(&buf[0x100]); if (info->SD_READ_BL_LEN > SD_BLOCK_LEN) { @@ -2075,6 +2072,7 @@ static int ene_ms_init(struct us_data *us) u16 MSP_BlockSize, MSP_UserAreaBlocks; struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra; u8 *bbuf = info->bbuf; + unsigned int s; printk(KERN_INFO "transport --- ENE_MSInit\n"); @@ -2099,15 +2097,16 @@ static int ene_ms_init(struct us_data *us) return USB_STOR_TRANSPORT_ERROR; } /* the same part to test ENE */ - info->MS_Status = *(struct MS_STATUS *) bbuf; - - if (info->MS_Status.Insert && info->MS_Status.Ready) { - printk(KERN_INFO "Insert = %x\n", info->MS_Status.Insert); - printk(KERN_INFO "Ready = %x\n", info->MS_Status.Ready); - printk(KERN_INFO "IsMSPro = %x\n", info->MS_Status.IsMSPro); - printk(KERN_INFO "IsMSPHG = %x\n", info->MS_Status.IsMSPHG); - printk(KERN_INFO "WtP= %x\n", info->MS_Status.WtP); - if (info->MS_Status.IsMSPro) { + info->MS_Status = bbuf[0]; + + s = info->MS_Status; + if ((s & MS_Insert) && (s & MS_Ready)) { + printk(KERN_INFO "Insert = %x\n", !!(s & MS_Insert)); + printk(KERN_INFO "Ready = %x\n", !!(s & MS_Ready)); + printk(KERN_INFO "IsMSPro = %x\n", !!(s & MS_IsMSPro)); + printk(KERN_INFO "IsMSPHG = %x\n", !!(s & MS_IsMSPHG)); + printk(KERN_INFO "WtP= %x\n", !!(s & MS_WtP)); + if (s & MS_IsMSPro) { MSP_BlockSize = (bbuf[6] << 8) | bbuf[7]; MSP_UserAreaBlocks = (bbuf[10] << 8) | bbuf[11]; info->MSP_TotalBlock = MSP_BlockSize * MSP_UserAreaBlocks; @@ -2168,17 +2167,17 @@ static int ene_sd_init(struct us_data *us) return USB_STOR_TRANSPORT_ERROR; } - info->SD_Status = *(struct SD_STATUS *) bbuf; - if (info->SD_Status.Insert && info->SD_Status.Ready) { - struct SD_STATUS *s = &info->SD_Status; + info->SD_Status = bbuf[0]; + if ((info->SD_Status & SD_Insert) && (info->SD_Status & SD_Ready)) { + unsigned int s = info->SD_Status; ene_get_card_status(us, bbuf); - usb_stor_dbg(us, "Insert = %x\n", s->Insert); - usb_stor_dbg(us, "Ready = %x\n", s->Ready); - usb_stor_dbg(us, "IsMMC = %x\n", s->IsMMC); - usb_stor_dbg(us, "HiCapacity = %x\n", s->HiCapacity); - usb_stor_dbg(us, "HiSpeed = %x\n", s->HiSpeed); - usb_stor_dbg(us, "WtP = %x\n", s->WtP); + usb_stor_dbg(us, "Insert = %x\n", !!(s & SD_Insert)); + usb_stor_dbg(us, "Ready = %x\n", !!(s & SD_Ready)); + usb_stor_dbg(us, "IsMMC = %x\n", !!(s & SD_IsMMC)); + usb_stor_dbg(us, "HiCapacity = %x\n", !!(s & SD_HiCapacity)); + usb_stor_dbg(us, "HiSpeed = %x\n", !!(s & SD_HiSpeed)); + usb_stor_dbg(us, "WtP = %x\n", !!(s & SD_WtP)); } else { usb_stor_dbg(us, "SD Card Not Ready --- %x\n", bbuf[0]); return USB_STOR_TRANSPORT_ERROR; @@ -2200,14 +2199,14 @@ static int ene_init(struct us_data *us) misc_reg03 = bbuf[0]; if (misc_reg03 & 0x01) { - if (!info->SD_Status.Ready) { + if (!(info->SD_Status & SD_Ready)) { result = ene_sd_init(us); if (result != USB_STOR_XFER_GOOD) return USB_STOR_TRANSPORT_ERROR; } } if (misc_reg03 & 0x02) { - if (!info->MS_Status.Ready) { + if (!(info->MS_Status & MS_Ready)) { result = ene_ms_init(us); if (result != USB_STOR_XFER_GOOD) return USB_STOR_TRANSPORT_ERROR; @@ -2306,14 +2305,14 @@ static int ene_transport(struct scsi_cmnd *srb, struct us_data *us) /*US_DEBUG(usb_stor_show_command(us, srb)); */ scsi_set_resid(srb, 0); - if (unlikely(!(info->SD_Status.Ready || info->MS_Status.Ready))) + if (unlikely(!(info->SD_Status & SD_Ready) || (info->MS_Status & MS_Ready))) result = ene_init(us); if (result == USB_STOR_XFER_GOOD) { result = USB_STOR_TRANSPORT_ERROR; - if (info->SD_Status.Ready) + if (info->SD_Status & SD_Ready) result = sd_scsi_irp(us, srb); - if (info->MS_Status.Ready) + if (info->MS_Status & MS_Ready) result = ms_scsi_irp(us, srb); } return result; @@ -2377,7 +2376,6 @@ static int ene_ub6250_probe(struct usb_interface *intf, static int ene_ub6250_resume(struct usb_interface *iface) { - u8 tmp = 0; struct us_data *us = usb_get_intfdata(iface); struct ene_ub6250_info *info = (struct ene_ub6250_info *)(us->extra); @@ -2389,17 +2387,16 @@ static int ene_ub6250_resume(struct usb_interface *iface) mutex_unlock(&us->dev_mutex); info->Power_IsResum = true; - /*info->SD_Status.Ready = 0; */ - info->SD_Status = *(struct SD_STATUS *)&tmp; - info->MS_Status = *(struct MS_STATUS *)&tmp; - info->SM_Status = *(struct SM_STATUS *)&tmp; + /* info->SD_Status &= ~SD_Ready; */ + info->SD_Status = 0; + info->MS_Status = 0; + info->SM_Status = 0; return 0; } static int ene_ub6250_reset_resume(struct usb_interface *iface) { - u8 tmp = 0; struct us_data *us = usb_get_intfdata(iface); struct ene_ub6250_info *info = (struct ene_ub6250_info *)(us->extra); @@ -2411,10 +2408,10 @@ static int ene_ub6250_reset_resume(struct usb_interface *iface) * the device */ info->Power_IsResum = true; - /*info->SD_Status.Ready = 0; */ - info->SD_Status = *(struct SD_STATUS *)&tmp; - info->MS_Status = *(struct MS_STATUS *)&tmp; - info->SM_Status = *(struct SM_STATUS *)&tmp; + /* info->SD_Status &= ~SD_Ready; */ + info->SD_Status = 0; + info->MS_Status = 0; + info->SM_Status = 0; return 0; } diff --git a/drivers/usb/storage/realtek_cr.c b/drivers/usb/storage/realtek_cr.c index 3789698d9d3c6431096f0320b280014b6ca4477b..0c423916d7bfa4e325243ea4b0e0b9ce8854a535 100644 --- a/drivers/usb/storage/realtek_cr.c +++ b/drivers/usb/storage/realtek_cr.c @@ -365,7 +365,7 @@ static int rts51x_read_mem(struct us_data *us, u16 addr, u8 *data, u16 len) buf = kmalloc(len, GFP_NOIO); if (buf == NULL) - return USB_STOR_TRANSPORT_ERROR; + return -ENOMEM; usb_stor_dbg(us, "addr = 0x%x, len = %d\n", addr, len); diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c index 65d6f8fd81e706a25e12937d5d81bfb78bdd2535..577ff786f11b1295b2715685f82b5e4395da0dc4 100644 --- a/drivers/vdpa/mlx5/net/mlx5_vnet.c +++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c @@ -1482,11 +1482,25 @@ static u64 mlx5_vdpa_get_features(struct vdpa_device *vdev) return ndev->mvdev.mlx_features; } -static int verify_min_features(struct mlx5_vdpa_dev *mvdev, u64 features) +static int verify_driver_features(struct mlx5_vdpa_dev *mvdev, u64 features) { + /* Minimum features to expect */ if (!(features & BIT_ULL(VIRTIO_F_ACCESS_PLATFORM))) return -EOPNOTSUPP; + /* Double check features combination sent down by the driver. + * Fail invalid features due to absence of the depended feature. + * + * Per VIRTIO v1.1 specification, section 5.1.3.1 Feature bit + * requirements: "VIRTIO_NET_F_MQ Requires VIRTIO_NET_F_CTRL_VQ". + * By failing the invalid features sent down by untrusted drivers, + * we're assured the assumption made upon is_index_valid() and + * is_ctrl_vq_idx() will not be compromised. + */ + if ((features & (BIT_ULL(VIRTIO_NET_F_MQ) | BIT_ULL(VIRTIO_NET_F_CTRL_VQ))) == + BIT_ULL(VIRTIO_NET_F_MQ)) + return -EINVAL; + return 0; } @@ -1544,7 +1558,7 @@ static int mlx5_vdpa_set_features(struct vdpa_device *vdev, u64 features) print_features(mvdev, features, true); - err = verify_min_features(mvdev, features); + err = verify_driver_features(mvdev, features); if (err) return err; diff --git a/drivers/vfio/platform/vfio_amba.c b/drivers/vfio/platform/vfio_amba.c index 9636a2afaecd1b4599473debb5ee7dc7908055b0..3626c21501017e95163572fe8ee5cdb8a40bc75b 100644 --- a/drivers/vfio/platform/vfio_amba.c +++ b/drivers/vfio/platform/vfio_amba.c @@ -71,18 +71,13 @@ static int vfio_amba_probe(struct amba_device *adev, const struct amba_id *id) return ret; } -static int vfio_amba_remove(struct amba_device *adev) +static void vfio_amba_remove(struct amba_device *adev) { - struct vfio_platform_device *vdev; - - vdev = vfio_platform_remove_common(&adev->dev); - if (vdev) { - kfree(vdev->name); - kfree(vdev); - return 0; - } + struct vfio_platform_device *vdev = + vfio_platform_remove_common(&adev->dev); - return -EINVAL; + kfree(vdev->name); + kfree(vdev); } static const struct amba_id pl330_ids[] = { diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index f556b572c86dd4ce2f617cae1afa3adb79e0b67c..1422cbb37013713e152301907695a4871a1606fd 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c @@ -1096,7 +1096,7 @@ static int vfio_iova_dirty_log_clear(u64 __user *bitmap, unsigned long bitmap_size; unsigned long *bitmap_buffer = NULL; bool clear_valid; - int rs, re, start, end, dma_offset; + unsigned int rs, re, start, end, dma_offset; int ret = 0; bitmap_size = DIRTY_BITMAP_BYTES(size >> pgshift); @@ -1128,7 +1128,7 @@ static int vfio_iova_dirty_log_clear(u64 __user *bitmap, end = (end_iova - iova) >> pgshift; bitmap_for_each_set_region(bitmap_buffer, rs, re, start, end) { clear_valid = true; - riova = iova + (rs << pgshift); + riova = iova + ((unsigned long)rs << pgshift); dma_offset = (riova - dma->iova) >> pgshift; bitmap_clear(dma->bitmap, dma_offset, re - rs); } diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c index 5cd1ee66d23266931202d3229720905b83a2d318..5d2d6ce7ff41340f2d4ec7d07601d2b6f3fbea84 100644 --- a/drivers/vhost/vsock.c +++ b/drivers/vhost/vsock.c @@ -573,16 +573,18 @@ static int vhost_vsock_start(struct vhost_vsock *vsock) return ret; } -static int vhost_vsock_stop(struct vhost_vsock *vsock) +static int vhost_vsock_stop(struct vhost_vsock *vsock, bool check_owner) { size_t i; - int ret; + int ret = 0; mutex_lock(&vsock->dev.mutex); - ret = vhost_dev_check_owner(&vsock->dev); - if (ret) - goto err; + if (check_owner) { + ret = vhost_dev_check_owner(&vsock->dev); + if (ret) + goto err; + } for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) { struct vhost_virtqueue *vq = &vsock->vqs[i]; @@ -695,9 +697,15 @@ static int vhost_vsock_dev_release(struct inode *inode, struct file *file) /* Iterating over all connections for all CIDs to find orphans is * inefficient. Room for improvement here. */ - vsock_for_each_connected_socket(vhost_vsock_reset_orphans); + vsock_for_each_connected_socket(&vhost_transport.transport, + vhost_vsock_reset_orphans); - vhost_vsock_stop(vsock); + /* Don't check the owner, because we are in the release path, so we + * need to stop the vsock device in any case. + * vhost_vsock_stop() can not fail in this case, so we don't need to + * check the return code. + */ + vhost_vsock_stop(vsock, false); vhost_vsock_flush(vsock); vhost_dev_stop(&vsock->dev); @@ -801,7 +809,7 @@ static long vhost_vsock_dev_ioctl(struct file *f, unsigned int ioctl, if (start) return vhost_vsock_start(vsock); else - return vhost_vsock_stop(vsock); + return vhost_vsock_stop(vsock, true); case VHOST_GET_FEATURES: features = VHOST_VSOCK_FEATURES; if (copy_to_user(argp, &features, sizeof(features))) diff --git a/drivers/video/fbdev/amba-clcd.c b/drivers/video/fbdev/amba-clcd.c index b7682de412d83fd0e64152c5e301c57d26e5a19a..33595cc4778e9f18c83e2a482b55665f1609eaf8 100644 --- a/drivers/video/fbdev/amba-clcd.c +++ b/drivers/video/fbdev/amba-clcd.c @@ -925,7 +925,7 @@ static int clcdfb_probe(struct amba_device *dev, const struct amba_id *id) return ret; } -static int clcdfb_remove(struct amba_device *dev) +static void clcdfb_remove(struct amba_device *dev) { struct clcd_fb *fb = amba_get_drvdata(dev); @@ -942,8 +942,6 @@ static int clcdfb_remove(struct amba_device *dev) kfree(fb); amba_release_regions(dev); - - return 0; } static const struct amba_id clcdfb_id_table[] = { diff --git a/drivers/video/fbdev/atafb.c b/drivers/video/fbdev/atafb.c index f253daa05d9d3872777077692c51e0aa2d9fbcdb..a7a1739cff1bd4fd78585b7548961983be20f093 100644 --- a/drivers/video/fbdev/atafb.c +++ b/drivers/video/fbdev/atafb.c @@ -1691,9 +1691,9 @@ static int falcon_setcolreg(unsigned int regno, unsigned int red, ((blue & 0xfc00) >> 8)); if (regno < 16) { shifter_tt.color_reg[regno] = - (((red & 0xe000) >> 13) | ((red & 0x1000) >> 12) << 8) | - (((green & 0xe000) >> 13) | ((green & 0x1000) >> 12) << 4) | - ((blue & 0xe000) >> 13) | ((blue & 0x1000) >> 12); + ((((red & 0xe000) >> 13) | ((red & 0x1000) >> 12)) << 8) | + ((((green & 0xe000) >> 13) | ((green & 0x1000) >> 12)) << 4) | + ((blue & 0xe000) >> 13) | ((blue & 0x1000) >> 12); ((u32 *)info->pseudo_palette)[regno] = ((red & 0xf800) | ((green & 0xfc00) >> 5) | ((blue & 0xf800) >> 11)); @@ -1979,9 +1979,9 @@ static int stste_setcolreg(unsigned int regno, unsigned int red, green >>= 12; if (ATARIHW_PRESENT(EXTD_SHIFTER)) shifter_tt.color_reg[regno] = - (((red & 0xe) >> 1) | ((red & 1) << 3) << 8) | - (((green & 0xe) >> 1) | ((green & 1) << 3) << 4) | - ((blue & 0xe) >> 1) | ((blue & 1) << 3); + ((((red & 0xe) >> 1) | ((red & 1) << 3)) << 8) | + ((((green & 0xe) >> 1) | ((green & 1) << 3)) << 4) | + ((blue & 0xe) >> 1) | ((blue & 1) << 3); else shifter_tt.color_reg[regno] = ((red & 0xe) << 7) | diff --git a/drivers/video/fbdev/atmel_lcdfb.c b/drivers/video/fbdev/atmel_lcdfb.c index 355b6120dc4f0dcd77433ac4c8753ffa5561e8af..1fc8de4ecbebf9e5e2974c3c97cba94d536d73c4 100644 --- a/drivers/video/fbdev/atmel_lcdfb.c +++ b/drivers/video/fbdev/atmel_lcdfb.c @@ -1062,15 +1062,16 @@ static int __init atmel_lcdfb_probe(struct platform_device *pdev) INIT_LIST_HEAD(&info->modelist); - if (pdev->dev.of_node) { - ret = atmel_lcdfb_of_init(sinfo); - if (ret) - goto free_info; - } else { + if (!pdev->dev.of_node) { dev_err(dev, "cannot get default configuration\n"); goto free_info; } + ret = atmel_lcdfb_of_init(sinfo); + if (ret) + goto free_info; + + ret = -ENODEV; if (!sinfo->config) goto free_info; diff --git a/drivers/video/fbdev/cirrusfb.c b/drivers/video/fbdev/cirrusfb.c index 15a9ee7cd734d3fa1147a9cacbe65cb06bf5b3e4..b4980bc2985e3c9cf971b2c1bb0516f84c77e5f2 100644 --- a/drivers/video/fbdev/cirrusfb.c +++ b/drivers/video/fbdev/cirrusfb.c @@ -469,7 +469,7 @@ static int cirrusfb_check_mclk(struct fb_info *info, long freq) return 0; } -static int cirrusfb_check_pixclock(const struct fb_var_screeninfo *var, +static int cirrusfb_check_pixclock(struct fb_var_screeninfo *var, struct fb_info *info) { long freq; @@ -478,9 +478,7 @@ static int cirrusfb_check_pixclock(const struct fb_var_screeninfo *var, unsigned maxclockidx = var->bits_per_pixel >> 3; /* convert from ps to kHz */ - freq = PICOS2KHZ(var->pixclock); - - dev_dbg(info->device, "desired pixclock: %ld kHz\n", freq); + freq = PICOS2KHZ(var->pixclock ? : 1); maxclock = cirrusfb_board_info[cinfo->btype].maxclock[maxclockidx]; cinfo->multiplexing = 0; @@ -488,11 +486,13 @@ static int cirrusfb_check_pixclock(const struct fb_var_screeninfo *var, /* If the frequency is greater than we can support, we might be able * to use multiplexing for the video mode */ if (freq > maxclock) { - dev_err(info->device, - "Frequency greater than maxclock (%ld kHz)\n", - maxclock); - return -EINVAL; + var->pixclock = KHZ2PICOS(maxclock); + + while ((freq = PICOS2KHZ(var->pixclock)) > maxclock) + var->pixclock++; } + dev_dbg(info->device, "desired pixclock: %ld kHz\n", freq); + /* * Additional constraint: 8bpp uses DAC clock doubling to allow maximum * pixel clock diff --git a/drivers/video/fbdev/controlfb.c b/drivers/video/fbdev/controlfb.c index 2df56bd303d25d125c6a7813ed0bf2787abc1ec9..bd59e7b11ed5305d922b42d33d0dd01c98edb09f 100644 --- a/drivers/video/fbdev/controlfb.c +++ b/drivers/video/fbdev/controlfb.c @@ -64,10 +64,12 @@ #undef in_le32 #undef out_le32 #define in_8(addr) 0 -#define out_8(addr, val) +#define out_8(addr, val) (void)(val) #define in_le32(addr) 0 -#define out_le32(addr, val) +#define out_le32(addr, val) (void)(val) +#ifndef pgprot_cached_wthru #define pgprot_cached_wthru(prot) (prot) +#endif #else static void invalid_vram_cache(void __force *addr) { diff --git a/drivers/video/fbdev/core/fbcvt.c b/drivers/video/fbdev/core/fbcvt.c index 55d2bd0ce5c0229cddf48cb0960a59e31d604060..64843464c66135b4c5c083418c8c89ae237f052a 100644 --- a/drivers/video/fbdev/core/fbcvt.c +++ b/drivers/video/fbdev/core/fbcvt.c @@ -214,9 +214,11 @@ static u32 fb_cvt_aspect_ratio(struct fb_cvt_data *cvt) static void fb_cvt_print_name(struct fb_cvt_data *cvt) { u32 pixcount, pixcount_mod; - int cnt = 255, offset = 0, read = 0; - u8 *buf = kzalloc(256, GFP_KERNEL); + int size = 256; + int off = 0; + u8 *buf; + buf = kzalloc(size, GFP_KERNEL); if (!buf) return; @@ -224,43 +226,30 @@ static void fb_cvt_print_name(struct fb_cvt_data *cvt) pixcount_mod = (cvt->xres * (cvt->yres/cvt->interlace)) % 1000000; pixcount_mod /= 1000; - read = snprintf(buf+offset, cnt, "fbcvt: %dx%d@%d: CVT Name - ", - cvt->xres, cvt->yres, cvt->refresh); - offset += read; - cnt -= read; + off += scnprintf(buf + off, size - off, "fbcvt: %dx%d@%d: CVT Name - ", + cvt->xres, cvt->yres, cvt->refresh); - if (cvt->status) - snprintf(buf+offset, cnt, "Not a CVT standard - %d.%03d Mega " - "Pixel Image\n", pixcount, pixcount_mod); - else { - if (pixcount) { - read = snprintf(buf+offset, cnt, "%d", pixcount); - cnt -= read; - offset += read; - } + if (cvt->status) { + off += scnprintf(buf + off, size - off, + "Not a CVT standard - %d.%03d Mega Pixel Image\n", + pixcount, pixcount_mod); + } else { + if (pixcount) + off += scnprintf(buf + off, size - off, "%d", pixcount); - read = snprintf(buf+offset, cnt, ".%03dM", pixcount_mod); - cnt -= read; - offset += read; + off += scnprintf(buf + off, size - off, ".%03dM", pixcount_mod); if (cvt->aspect_ratio == 0) - read = snprintf(buf+offset, cnt, "3"); + off += scnprintf(buf + off, size - off, "3"); else if (cvt->aspect_ratio == 3) - read = snprintf(buf+offset, cnt, "4"); + off += scnprintf(buf + off, size - off, "4"); else if (cvt->aspect_ratio == 1 || cvt->aspect_ratio == 4) - read = snprintf(buf+offset, cnt, "9"); + off += scnprintf(buf + off, size - off, "9"); else if (cvt->aspect_ratio == 2) - read = snprintf(buf+offset, cnt, "A"); - else - read = 0; - cnt -= read; - offset += read; - - if (cvt->flags & FB_CVT_FLAG_REDUCED_BLANK) { - read = snprintf(buf+offset, cnt, "-R"); - cnt -= read; - offset += read; - } + off += scnprintf(buf + off, size - off, "A"); + + if (cvt->flags & FB_CVT_FLAG_REDUCED_BLANK) + off += scnprintf(buf + off, size - off, "-R"); } printk(KERN_INFO "%s\n", buf); diff --git a/drivers/video/fbdev/matrox/matroxfb_base.c b/drivers/video/fbdev/matrox/matroxfb_base.c index 570439b326552a674e89a59cce9abce202f7856a..daaa99818d3b7532e8d31c8cce299907dcfd9dba 100644 --- a/drivers/video/fbdev/matrox/matroxfb_base.c +++ b/drivers/video/fbdev/matrox/matroxfb_base.c @@ -1377,7 +1377,7 @@ static struct video_board vbG200 = { .lowlevel = &matrox_G100 }; static struct video_board vbG200eW = { - .maxvram = 0x800000, + .maxvram = 0x100000, .maxdisplayable = 0x800000, .accelID = FB_ACCEL_MATROX_MGAG200, .lowlevel = &matrox_G100 diff --git a/drivers/video/fbdev/nvidia/nv_i2c.c b/drivers/video/fbdev/nvidia/nv_i2c.c index d7994a1732459d831775374d61f135ecaf5e06c5..0b48965a6420c2a2a27928a55a22aee3cc1d57e2 100644 --- a/drivers/video/fbdev/nvidia/nv_i2c.c +++ b/drivers/video/fbdev/nvidia/nv_i2c.c @@ -86,7 +86,7 @@ static int nvidia_setup_i2c_bus(struct nvidia_i2c_chan *chan, const char *name, { int rc; - strcpy(chan->adapter.name, name); + strscpy(chan->adapter.name, name, sizeof(chan->adapter.name)); chan->adapter.owner = THIS_MODULE; chan->adapter.class = i2c_class; chan->adapter.algo_data = &chan->algo; diff --git a/drivers/video/fbdev/omap2/omapfb/displays/connector-dvi.c b/drivers/video/fbdev/omap2/omapfb/displays/connector-dvi.c index b4a1aefff7661da1650c9ba230c4dcb5dd96dc89..777f6d66c28c3d5ba69113740fad7723e6a53a07 100644 --- a/drivers/video/fbdev/omap2/omapfb/displays/connector-dvi.c +++ b/drivers/video/fbdev/omap2/omapfb/displays/connector-dvi.c @@ -251,6 +251,7 @@ static int dvic_probe_of(struct platform_device *pdev) adapter_node = of_parse_phandle(node, "ddc-i2c-bus", 0); if (adapter_node) { adapter = of_get_i2c_adapter_by_node(adapter_node); + of_node_put(adapter_node); if (adapter == NULL) { dev_err(&pdev->dev, "failed to parse ddc-i2c-bus\n"); omap_dss_put_device(ddata->in); diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c index 4b0793abdd84b4509afea88329230c56261e1d80..a2c7c5cb1523460508b439dbfde2e0efa170d158 100644 --- a/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c +++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c @@ -409,7 +409,7 @@ static ssize_t dsicm_num_errors_show(struct device *dev, if (r) return r; - return snprintf(buf, PAGE_SIZE, "%d\n", errors); + return sysfs_emit(buf, "%d\n", errors); } static ssize_t dsicm_hw_revision_show(struct device *dev, @@ -439,7 +439,7 @@ static ssize_t dsicm_hw_revision_show(struct device *dev, if (r) return r; - return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x\n", id1, id2, id3); + return sysfs_emit(buf, "%02x.%02x.%02x\n", id1, id2, id3); } static ssize_t dsicm_store_ulps(struct device *dev, @@ -487,7 +487,7 @@ static ssize_t dsicm_show_ulps(struct device *dev, t = ddata->ulps_enabled; mutex_unlock(&ddata->lock); - return snprintf(buf, PAGE_SIZE, "%u\n", t); + return sysfs_emit(buf, "%u\n", t); } static ssize_t dsicm_store_ulps_timeout(struct device *dev, @@ -532,7 +532,7 @@ static ssize_t dsicm_show_ulps_timeout(struct device *dev, t = ddata->ulps_timeout; mutex_unlock(&ddata->lock); - return snprintf(buf, PAGE_SIZE, "%u\n", t); + return sysfs_emit(buf, "%u\n", t); } static DEVICE_ATTR(num_dsi_errors, S_IRUGO, dsicm_num_errors_show, NULL); diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c index 1293515e4b1692e1917411c5eaf6159581454825..0cbc5b9183f895c4a1d994cc2ca35731b8d364fe 100644 --- a/drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c +++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c @@ -476,7 +476,7 @@ static ssize_t show_cabc_available_modes(struct device *dev, int i; if (!ddata->has_cabc) - return snprintf(buf, PAGE_SIZE, "%s\n", cabc_modes[0]); + return sysfs_emit(buf, "%s\n", cabc_modes[0]); for (i = 0, len = 0; len < PAGE_SIZE && i < ARRAY_SIZE(cabc_modes); i++) diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c index bb85b21f072487239f87005191f8c3fa117a38ac..9f6ef9e04d9ce41a57b2e1791183390a008599a0 100644 --- a/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c +++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c @@ -169,7 +169,7 @@ static ssize_t tpo_td043_vmirror_show(struct device *dev, { struct panel_drv_data *ddata = dev_get_drvdata(dev); - return snprintf(buf, PAGE_SIZE, "%d\n", ddata->vmirror); + return sysfs_emit(buf, "%d\n", ddata->vmirror); } static ssize_t tpo_td043_vmirror_store(struct device *dev, @@ -199,7 +199,7 @@ static ssize_t tpo_td043_mode_show(struct device *dev, { struct panel_drv_data *ddata = dev_get_drvdata(dev); - return snprintf(buf, PAGE_SIZE, "%d\n", ddata->mode); + return sysfs_emit(buf, "%d\n", ddata->mode); } static ssize_t tpo_td043_mode_store(struct device *dev, diff --git a/drivers/video/fbdev/sm712fb.c b/drivers/video/fbdev/sm712fb.c index 0dbc6bf8268acf5fa0528f28f047372cbd4fd5e8..092a1caa1208e191b016a0ec2097208e5e2d960f 100644 --- a/drivers/video/fbdev/sm712fb.c +++ b/drivers/video/fbdev/sm712fb.c @@ -1047,7 +1047,7 @@ static ssize_t smtcfb_read(struct fb_info *info, char __user *buf, if (count + p > total_size) count = total_size - p; - buffer = kmalloc((count > PAGE_SIZE) ? PAGE_SIZE : count, GFP_KERNEL); + buffer = kmalloc(PAGE_SIZE, GFP_KERNEL); if (!buffer) return -ENOMEM; @@ -1059,25 +1059,14 @@ static ssize_t smtcfb_read(struct fb_info *info, char __user *buf, while (count) { c = (count > PAGE_SIZE) ? PAGE_SIZE : count; dst = buffer; - for (i = c >> 2; i--;) { - *dst = fb_readl(src++); - *dst = big_swap(*dst); + for (i = (c + 3) >> 2; i--;) { + u32 val; + + val = fb_readl(src); + *dst = big_swap(val); + src++; dst++; } - if (c & 3) { - u8 *dst8 = (u8 *)dst; - u8 __iomem *src8 = (u8 __iomem *)src; - - for (i = c & 3; i--;) { - if (i & 1) { - *dst8++ = fb_readb(++src8); - } else { - *dst8++ = fb_readb(--src8); - src8 += 2; - } - } - src = (u32 __iomem *)src8; - } if (copy_to_user(buf, buffer, c)) { err = -EFAULT; @@ -1130,7 +1119,7 @@ static ssize_t smtcfb_write(struct fb_info *info, const char __user *buf, count = total_size - p; } - buffer = kmalloc((count > PAGE_SIZE) ? PAGE_SIZE : count, GFP_KERNEL); + buffer = kmalloc(PAGE_SIZE, GFP_KERNEL); if (!buffer) return -ENOMEM; @@ -1148,24 +1137,11 @@ static ssize_t smtcfb_write(struct fb_info *info, const char __user *buf, break; } - for (i = c >> 2; i--;) { - fb_writel(big_swap(*src), dst++); + for (i = (c + 3) >> 2; i--;) { + fb_writel(big_swap(*src), dst); + dst++; src++; } - if (c & 3) { - u8 *src8 = (u8 *)src; - u8 __iomem *dst8 = (u8 __iomem *)dst; - - for (i = c & 3; i--;) { - if (i & 1) { - fb_writeb(*src8++, ++dst8); - } else { - fb_writeb(*src8++, --dst8); - dst8 += 2; - } - } - dst = (u32 __iomem *)dst8; - } *ppos += c; buf += c; diff --git a/drivers/video/fbdev/smscufx.c b/drivers/video/fbdev/smscufx.c index bfac3ee4a64228160adc5d11e85e66e7b7bcf9a8..28768c272b73d3e57ff4212dfe44f9c81d4af56a 100644 --- a/drivers/video/fbdev/smscufx.c +++ b/drivers/video/fbdev/smscufx.c @@ -1656,6 +1656,7 @@ static int ufx_usb_probe(struct usb_interface *interface, info->par = dev; info->pseudo_palette = dev->pseudo_palette; info->fbops = &ufx_ops; + INIT_LIST_HEAD(&info->modelist); retval = fb_alloc_cmap(&info->cmap, 256, 0); if (retval < 0) { @@ -1666,8 +1667,6 @@ static int ufx_usb_probe(struct usb_interface *interface, INIT_DELAYED_WORK(&dev->free_framebuffer_work, ufx_free_framebuffer_work); - INIT_LIST_HEAD(&info->modelist); - retval = ufx_reg_read(dev, 0x3000, &id_rev); check_warn_goto_error(retval, "error %d reading 0x3000 register from device", retval); dev_dbg(dev->gdev, "ID_REV register value 0x%08x", id_rev); diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c index b9cdd02c100095d99ef929571115f82c28290f29..90f48b71fd8f790c23a0f7d109a286a8e515b542 100644 --- a/drivers/video/fbdev/udlfb.c +++ b/drivers/video/fbdev/udlfb.c @@ -1426,7 +1426,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev, struct device_attribute *a, char *buf) { struct fb_info *fb_info = dev_get_drvdata(fbdev); struct dlfb_data *dlfb = fb_info->par; - return snprintf(buf, PAGE_SIZE, "%u\n", + return sysfs_emit(buf, "%u\n", atomic_read(&dlfb->bytes_rendered)); } @@ -1434,7 +1434,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev, struct device_attribute *a, char *buf) { struct fb_info *fb_info = dev_get_drvdata(fbdev); struct dlfb_data *dlfb = fb_info->par; - return snprintf(buf, PAGE_SIZE, "%u\n", + return sysfs_emit(buf, "%u\n", atomic_read(&dlfb->bytes_identical)); } @@ -1442,7 +1442,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev, struct device_attribute *a, char *buf) { struct fb_info *fb_info = dev_get_drvdata(fbdev); struct dlfb_data *dlfb = fb_info->par; - return snprintf(buf, PAGE_SIZE, "%u\n", + return sysfs_emit(buf, "%u\n", atomic_read(&dlfb->bytes_sent)); } @@ -1450,7 +1450,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev, struct device_attribute *a, char *buf) { struct fb_info *fb_info = dev_get_drvdata(fbdev); struct dlfb_data *dlfb = fb_info->par; - return snprintf(buf, PAGE_SIZE, "%u\n", + return sysfs_emit(buf, "%u\n", atomic_read(&dlfb->cpu_kcycles_used)); } diff --git a/drivers/video/fbdev/w100fb.c b/drivers/video/fbdev/w100fb.c index d96ab28f8ce4ae54b384c3a4d8f2b9bfa085187e..4e641a780726e784892110c72b71a379fd94637a 100644 --- a/drivers/video/fbdev/w100fb.c +++ b/drivers/video/fbdev/w100fb.c @@ -770,12 +770,18 @@ static int w100fb_probe(struct platform_device *pdev) fb_dealloc_cmap(&info->cmap); kfree(info->pseudo_palette); } - if (remapped_fbuf != NULL) + if (remapped_fbuf != NULL) { iounmap(remapped_fbuf); - if (remapped_regs != NULL) + remapped_fbuf = NULL; + } + if (remapped_regs != NULL) { iounmap(remapped_regs); - if (remapped_base != NULL) + remapped_regs = NULL; + } + if (remapped_base != NULL) { iounmap(remapped_base); + remapped_base = NULL; + } if (info) framebuffer_release(info); return err; @@ -795,8 +801,11 @@ static int w100fb_remove(struct platform_device *pdev) fb_dealloc_cmap(&info->cmap); iounmap(remapped_base); + remapped_base = NULL; iounmap(remapped_regs); + remapped_regs = NULL; iounmap(remapped_fbuf); + remapped_fbuf = NULL; framebuffer_release(info); diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c index 5c53098755a3594c2e429054632d2a8080faf3d6..441bc057896f5fd6521791467f2c152385a1a5b0 100644 --- a/drivers/virtio/virtio.c +++ b/drivers/virtio/virtio.c @@ -167,14 +167,13 @@ void virtio_add_status(struct virtio_device *dev, unsigned int status) } EXPORT_SYMBOL_GPL(virtio_add_status); -int virtio_finalize_features(struct virtio_device *dev) +/* Do some validation, then set FEATURES_OK */ +static int virtio_features_ok(struct virtio_device *dev) { - int ret = dev->config->finalize_features(dev); unsigned status; + int ret; might_sleep(); - if (ret) - return ret; ret = arch_has_restricted_virtio_memory_access(); if (ret) { @@ -203,7 +202,6 @@ int virtio_finalize_features(struct virtio_device *dev) } return 0; } -EXPORT_SYMBOL_GPL(virtio_finalize_features); static int virtio_dev_probe(struct device *_d) { @@ -240,17 +238,6 @@ static int virtio_dev_probe(struct device *_d) driver_features_legacy = driver_features; } - /* - * Some devices detect legacy solely via F_VERSION_1. Write - * F_VERSION_1 to force LE config space accesses before FEATURES_OK for - * these when needed. - */ - if (drv->validate && !virtio_legacy_is_little_endian() - && device_features & BIT_ULL(VIRTIO_F_VERSION_1)) { - dev->features = BIT_ULL(VIRTIO_F_VERSION_1); - dev->config->finalize_features(dev); - } - if (device_features & (1ULL << VIRTIO_F_VERSION_1)) dev->features = driver_features & device_features; else @@ -261,13 +248,26 @@ static int virtio_dev_probe(struct device *_d) if (device_features & (1ULL << i)) __virtio_set_bit(dev, i); + err = dev->config->finalize_features(dev); + if (err) + goto err; + if (drv->validate) { + u64 features = dev->features; + err = drv->validate(dev); if (err) goto err; + + /* Did validation change any features? Then write them again. */ + if (features != dev->features) { + err = dev->config->finalize_features(dev); + if (err) + goto err; + } } - err = virtio_finalize_features(dev); + err = virtio_features_ok(dev); if (err) goto err; @@ -438,7 +438,11 @@ int virtio_device_restore(struct virtio_device *dev) /* We have a driver! */ virtio_add_status(dev, VIRTIO_CONFIG_S_DRIVER); - ret = virtio_finalize_features(dev); + ret = dev->config->finalize_features(dev); + if (ret) + goto err; + + ret = virtio_features_ok(dev); if (ret) goto err; diff --git a/drivers/watchdog/rti_wdt.c b/drivers/watchdog/rti_wdt.c index 359302f71f7efef34ccc419842a0079be2e364b4..ae7f9357bb871aa44911609e60529116565c98d5 100644 --- a/drivers/watchdog/rti_wdt.c +++ b/drivers/watchdog/rti_wdt.c @@ -229,6 +229,7 @@ static int rti_wdt_probe(struct platform_device *pdev) ret = pm_runtime_get_sync(dev); if (ret) { pm_runtime_put_noidle(dev); + pm_runtime_disable(&pdev->dev); return dev_err_probe(dev, ret, "runtime pm failed\n"); } diff --git a/drivers/watchdog/sp805_wdt.c b/drivers/watchdog/sp805_wdt.c index 190d26e2e75f9e7a8c5bf2fd35fe4c202d9380f3..2815f78d22bb354afc6d7a2f73547d607c7b0406 100644 --- a/drivers/watchdog/sp805_wdt.c +++ b/drivers/watchdog/sp805_wdt.c @@ -304,14 +304,12 @@ sp805_wdt_probe(struct amba_device *adev, const struct amba_id *id) return ret; } -static int sp805_wdt_remove(struct amba_device *adev) +static void sp805_wdt_remove(struct amba_device *adev) { struct sp805_wdt *wdt = amba_get_drvdata(adev); watchdog_unregister_device(&wdt->wdd); watchdog_set_drvdata(&wdt->wdd, NULL); - - return 0; } static int __maybe_unused sp805_wdt_suspend(struct device *dev) diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index 04c4aa7a1df2c59716abaa12aefdf29fd7d4630c..ed507d27034b1dc804e6482d7403bd6bcc0025cd 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -170,8 +170,8 @@ static int padzero(unsigned long elf_bss) static int create_elf_tables(struct linux_binprm *bprm, const struct elfhdr *exec, - unsigned long load_addr, unsigned long interp_load_addr, - unsigned long e_entry) + unsigned long interp_load_addr, + unsigned long e_entry, unsigned long phdr_addr) { struct mm_struct *mm = current->mm; unsigned long p = bprm->p; @@ -256,7 +256,7 @@ create_elf_tables(struct linux_binprm *bprm, const struct elfhdr *exec, NEW_AUX_ENT(AT_HWCAP, ELF_HWCAP); NEW_AUX_ENT(AT_PAGESZ, ELF_EXEC_PAGESIZE); NEW_AUX_ENT(AT_CLKTCK, CLOCKS_PER_SEC); - NEW_AUX_ENT(AT_PHDR, load_addr + exec->e_phoff); + NEW_AUX_ENT(AT_PHDR, phdr_addr); NEW_AUX_ENT(AT_PHENT, sizeof(struct elf_phdr)); NEW_AUX_ENT(AT_PHNUM, exec->e_phnum); NEW_AUX_ENT(AT_BASE, interp_load_addr); @@ -820,7 +820,7 @@ static int parse_elf_properties(struct file *f, const struct elf_phdr *phdr, static int load_elf_binary(struct linux_binprm *bprm) { struct file *interpreter = NULL; /* to shut gcc up */ - unsigned long load_addr = 0, load_bias = 0; + unsigned long load_addr, load_bias = 0, phdr_addr = 0; int load_addr_set = 0; unsigned long error; struct elf_phdr *elf_ppnt, *elf_phdata, *interp_elf_phdata = NULL; @@ -1153,6 +1153,17 @@ static int load_elf_binary(struct linux_binprm *bprm) reloc_func_desc = load_bias; } } + + /* + * Figure out which segment in the file contains the Program + * Header table, and map to the associated memory address. + */ + if (elf_ppnt->p_offset <= elf_ex->e_phoff && + elf_ex->e_phoff < elf_ppnt->p_offset + elf_ppnt->p_filesz) { + phdr_addr = elf_ex->e_phoff - elf_ppnt->p_offset + + elf_ppnt->p_vaddr; + } + k = elf_ppnt->p_vaddr; if ((elf_ppnt->p_flags & PF_X) && k < start_code) start_code = k; @@ -1188,6 +1199,7 @@ static int load_elf_binary(struct linux_binprm *bprm) } e_entry = elf_ex->e_entry + load_bias; + phdr_addr += load_bias; elf_bss += load_bias; elf_brk += load_bias; start_code += load_bias; @@ -1251,8 +1263,8 @@ static int load_elf_binary(struct linux_binprm *bprm) goto out; #endif /* ARCH_HAS_SETUP_ADDITIONAL_PAGES */ - retval = create_elf_tables(bprm, elf_ex, - load_addr, interp_load_addr, e_entry); + retval = create_elf_tables(bprm, elf_ex, interp_load_addr, + e_entry, phdr_addr); if (retval < 0) goto out; diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c index e39a12037b40362dec3ffb9e853cf72b7c6c5521..a02e38fb696c17f62d81a9ef0358bf376d411c48 100644 --- a/fs/btrfs/qgroup.c +++ b/fs/btrfs/qgroup.c @@ -1197,6 +1197,14 @@ int btrfs_quota_disable(struct btrfs_fs_info *fs_info) if (!fs_info->quota_root) goto out; + /* + * Unlock the qgroup_ioctl_lock mutex before waiting for the rescan worker to + * complete. Otherwise we can deadlock because btrfs_remove_qgroup() needs + * to lock that mutex while holding a transaction handle and the rescan + * worker needs to commit a transaction. + */ + mutex_unlock(&fs_info->qgroup_ioctl_lock); + /* * Request qgroup rescan worker to complete and wait for it. This wait * must be done before transaction start for quota disable since it may @@ -1204,7 +1212,6 @@ int btrfs_quota_disable(struct btrfs_fs_info *fs_info) */ clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags); btrfs_qgroup_wait_for_completion(fs_info, false); - mutex_unlock(&fs_info->qgroup_ioctl_lock); /* * 1 For the root item diff --git a/fs/btrfs/reflink.c b/fs/btrfs/reflink.c index 3a3102bc15a057291154e3a44c1e369fd822bda3..4b3ae0faf548e5df94ff2e38b3d483fd42b2d106 100644 --- a/fs/btrfs/reflink.c +++ b/fs/btrfs/reflink.c @@ -503,8 +503,11 @@ static int btrfs_clone(struct inode *src, struct inode *inode, */ ASSERT(key.offset == 0); ASSERT(datal <= fs_info->sectorsize); - if (key.offset != 0 || datal > fs_info->sectorsize) - return -EUCLEAN; + if (WARN_ON(key.offset != 0) || + WARN_ON(datal > fs_info->sectorsize)) { + ret = -EUCLEAN; + goto out; + } ret = clone_copy_inline_extent(inode, path, &new_key, drop_start, datal, size, diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c index d4a3a56726aa8cba77739a45e8f591d420370838..32f1b15b25dcc34b027b3f4883e36b12e4793e4c 100644 --- a/fs/btrfs/tree-checker.c +++ b/fs/btrfs/tree-checker.c @@ -947,6 +947,7 @@ static int check_dev_item(struct extent_buffer *leaf, struct btrfs_key *key, int slot) { struct btrfs_dev_item *ditem; + const u32 item_size = btrfs_item_size_nr(leaf, slot); if (key->objectid != BTRFS_DEV_ITEMS_OBJECTID) { dev_item_err(leaf, slot, @@ -954,6 +955,13 @@ static int check_dev_item(struct extent_buffer *leaf, key->objectid, BTRFS_DEV_ITEMS_OBJECTID); return -EUCLEAN; } + + if (unlikely(item_size != sizeof(*ditem))) { + dev_item_err(leaf, slot, "invalid item size: has %u expect %zu", + item_size, sizeof(*ditem)); + return -EUCLEAN; + } + ditem = btrfs_item_ptr(leaf, slot, struct btrfs_dev_item); if (btrfs_device_id(leaf, ditem) != key->offset) { dev_item_err(leaf, slot, @@ -989,6 +997,7 @@ static int check_inode_item(struct extent_buffer *leaf, struct btrfs_inode_item *iitem; u64 super_gen = btrfs_super_generation(fs_info->super_copy); u32 valid_mask = (S_IFMT | S_ISUID | S_ISGID | S_ISVTX | 0777); + const u32 item_size = btrfs_item_size_nr(leaf, slot); u32 mode; int ret; @@ -996,6 +1005,12 @@ static int check_inode_item(struct extent_buffer *leaf, if (ret < 0) return ret; + if (unlikely(item_size != sizeof(*iitem))) { + generic_err(leaf, slot, "invalid item size: has %u expect %zu", + item_size, sizeof(*iitem)); + return -EUCLEAN; + } + iitem = btrfs_item_ptr(leaf, slot, struct btrfs_inode_item); /* Here we use super block generation + 1 to handle log tree */ diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 09ef6419e890a6af419c4ada7cc64ac2c16ead48..62784b99a80741bfecc9ae40d7fffc27cd34f099 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -1286,6 +1286,15 @@ static int unlink_old_inode_refs(struct btrfs_trans_handle *trans, inode, name, namelen); kfree(name); iput(dir); + /* + * Whenever we need to check if a name exists or not, we + * check the subvolume tree. So after an unlink we must + * run delayed items, so that future checks for a name + * during log replay see that the name does not exists + * anymore. + */ + if (!ret) + ret = btrfs_run_delayed_items(trans); if (ret) goto out; goto again; @@ -1537,6 +1546,15 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans, */ if (!ret && inode->i_nlink == 0) inc_nlink(inode); + /* + * Whenever we need to check if a name exists or + * not, we check the subvolume tree. So after an + * unlink we must run delayed items, so that future + * checks for a name during log replay see that the + * name does not exists anymore. + */ + if (!ret) + ret = btrfs_run_delayed_items(trans); } if (ret < 0) goto out; @@ -4297,7 +4315,7 @@ static int log_one_extent(struct btrfs_trans_handle *trans, /* * Log all prealloc extents beyond the inode's i_size to make sure we do not - * lose them after doing a fast fsync and replaying the log. We scan the + * lose them after doing a full/fast fsync and replaying the log. We scan the * subvolume's root instead of iterating the inode's extent map tree because * otherwise we can log incorrect extent items based on extent map conversion. * That can happen due to the fact that extent maps are merged when they @@ -5084,6 +5102,7 @@ static int copy_inode_items_to_log(struct btrfs_trans_handle *trans, struct btrfs_log_ctx *ctx, bool *need_log_inode_item) { + const u64 i_size = i_size_read(&inode->vfs_inode); struct btrfs_root *root = inode->root; int ins_start_slot = 0; int ins_nr = 0; @@ -5104,13 +5123,21 @@ static int copy_inode_items_to_log(struct btrfs_trans_handle *trans, if (min_key->type > max_key->type) break; - if (min_key->type == BTRFS_INODE_ITEM_KEY) + if (min_key->type == BTRFS_INODE_ITEM_KEY) { *need_log_inode_item = false; - - if ((min_key->type == BTRFS_INODE_REF_KEY || - min_key->type == BTRFS_INODE_EXTREF_KEY) && - inode->generation == trans->transid && - !recursive_logging) { + } else if (min_key->type == BTRFS_EXTENT_DATA_KEY && + min_key->offset >= i_size) { + /* + * Extents at and beyond eof are logged with + * btrfs_log_prealloc_extents(). + * Only regular files have BTRFS_EXTENT_DATA_KEY keys, + * and no keys greater than that, so bail out. + */ + break; + } else if ((min_key->type == BTRFS_INODE_REF_KEY || + min_key->type == BTRFS_INODE_EXTREF_KEY) && + inode->generation == trans->transid && + !recursive_logging) { u64 other_ino = 0; u64 other_parent = 0; @@ -5141,10 +5168,8 @@ static int copy_inode_items_to_log(struct btrfs_trans_handle *trans, btrfs_release_path(path); goto next_key; } - } - - /* Skip xattrs, we log them later with btrfs_log_all_xattrs() */ - if (min_key->type == BTRFS_XATTR_ITEM_KEY) { + } else if (min_key->type == BTRFS_XATTR_ITEM_KEY) { + /* Skip xattrs, logged later with btrfs_log_all_xattrs() */ if (ins_nr == 0) goto next_slot; ret = copy_items(trans, inode, dst_path, path, @@ -5197,9 +5222,21 @@ static int copy_inode_items_to_log(struct btrfs_trans_handle *trans, break; } } - if (ins_nr) + if (ins_nr) { ret = copy_items(trans, inode, dst_path, path, ins_start_slot, ins_nr, inode_only, logged_isize); + if (ret) + return ret; + } + + if (inode_only == LOG_INODE_ALL && S_ISREG(inode->vfs_inode.i_mode)) { + /* + * Release the path because otherwise we might attempt to double + * lock the same leaf with btrfs_log_prealloc_extents() below. + */ + btrfs_release_path(path); + ret = btrfs_log_prealloc_extents(trans, inode, dst_path); + } return ret; } diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index f0ed29a9a6f11a3e923d4eb61574324cc4482747..aa5a4d759ca236a10df969bed3f0080cb48e582e 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -864,6 +864,7 @@ cifs_smb3_do_mount(struct file_system_type *fs_type, out_super: deactivate_locked_super(sb); + return root; out: cifs_cleanup_volume_info(volume_info); return root; diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c index fdb1d660bd136ed2587746c860926c1ad5dbe321..0e8f484031da981c64df21bfd720b7f109d83a69 100644 --- a/fs/cifs/smb2ops.c +++ b/fs/cifs/smb2ops.c @@ -1526,6 +1526,7 @@ smb2_ioctl_query_info(const unsigned int xid, unsigned int size[2]; void *data[2]; int create_options = is_dir ? CREATE_NOT_FILE : CREATE_NOT_DIR; + void (*free_req1_func)(struct smb_rqst *r); vars = kzalloc(sizeof(*vars), GFP_ATOMIC); if (vars == NULL) @@ -1535,27 +1536,29 @@ smb2_ioctl_query_info(const unsigned int xid, resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER; - if (copy_from_user(&qi, arg, sizeof(struct smb_query_info))) - goto e_fault; - + if (copy_from_user(&qi, arg, sizeof(struct smb_query_info))) { + rc = -EFAULT; + goto free_vars; + } if (qi.output_buffer_length > 1024) { - kfree(vars); - return -EINVAL; + rc = -EINVAL; + goto free_vars; } if (!ses || !server) { - kfree(vars); - return -EIO; + rc = -EIO; + goto free_vars; } if (smb3_encryption_required(tcon)) flags |= CIFS_TRANSFORM_REQ; - buffer = memdup_user(arg + sizeof(struct smb_query_info), - qi.output_buffer_length); - if (IS_ERR(buffer)) { - kfree(vars); - return PTR_ERR(buffer); + if (qi.output_buffer_length) { + buffer = memdup_user(arg + sizeof(struct smb_query_info), qi.output_buffer_length); + if (IS_ERR(buffer)) { + rc = PTR_ERR(buffer); + goto free_vars; + } } /* Open */ @@ -1593,45 +1596,45 @@ smb2_ioctl_query_info(const unsigned int xid, rc = SMB2_open_init(tcon, server, &rqst[0], &oplock, &oparms, path); if (rc) - goto iqinf_exit; + goto free_output_buffer; smb2_set_next_command(tcon, &rqst[0]); /* Query */ if (qi.flags & PASSTHRU_FSCTL) { /* Can eventually relax perm check since server enforces too */ - if (!capable(CAP_SYS_ADMIN)) + if (!capable(CAP_SYS_ADMIN)) { rc = -EPERM; - else { - rqst[1].rq_iov = &vars->io_iov[0]; - rqst[1].rq_nvec = SMB2_IOCTL_IOV_SIZE; - - rc = SMB2_ioctl_init(tcon, server, - &rqst[1], - COMPOUND_FID, COMPOUND_FID, - qi.info_type, true, buffer, - qi.output_buffer_length, - CIFSMaxBufSize - - MAX_SMB2_CREATE_RESPONSE_SIZE - - MAX_SMB2_CLOSE_RESPONSE_SIZE); + goto free_open_req; } + rqst[1].rq_iov = &vars->io_iov[0]; + rqst[1].rq_nvec = SMB2_IOCTL_IOV_SIZE; + + rc = SMB2_ioctl_init(tcon, server, &rqst[1], COMPOUND_FID, COMPOUND_FID, + qi.info_type, true, buffer, qi.output_buffer_length, + CIFSMaxBufSize - MAX_SMB2_CREATE_RESPONSE_SIZE - + MAX_SMB2_CLOSE_RESPONSE_SIZE); + free_req1_func = SMB2_ioctl_free; } else if (qi.flags == PASSTHRU_SET_INFO) { /* Can eventually relax perm check since server enforces too */ - if (!capable(CAP_SYS_ADMIN)) + if (!capable(CAP_SYS_ADMIN)) { rc = -EPERM; - else { - rqst[1].rq_iov = &vars->si_iov[0]; - rqst[1].rq_nvec = 1; - - size[0] = 8; - data[0] = buffer; - - rc = SMB2_set_info_init(tcon, server, - &rqst[1], - COMPOUND_FID, COMPOUND_FID, - current->tgid, - FILE_END_OF_FILE_INFORMATION, - SMB2_O_INFO_FILE, 0, data, size); + goto free_open_req; } + if (qi.output_buffer_length < 8) { + rc = -EINVAL; + goto free_open_req; + } + rqst[1].rq_iov = &vars->si_iov[0]; + rqst[1].rq_nvec = 1; + + /* MS-FSCC 2.4.13 FileEndOfFileInformation */ + size[0] = 8; + data[0] = buffer; + + rc = SMB2_set_info_init(tcon, server, &rqst[1], COMPOUND_FID, COMPOUND_FID, + current->tgid, FILE_END_OF_FILE_INFORMATION, + SMB2_O_INFO_FILE, 0, data, size); + free_req1_func = SMB2_set_info_free; } else if (qi.flags == PASSTHRU_QUERY_INFO) { rqst[1].rq_iov = &vars->qi_iov[0]; rqst[1].rq_nvec = 1; @@ -1642,6 +1645,7 @@ smb2_ioctl_query_info(const unsigned int xid, qi.info_type, qi.additional_information, qi.input_buffer_length, qi.output_buffer_length, buffer); + free_req1_func = SMB2_query_info_free; } else { /* unknown flags */ cifs_tcon_dbg(VFS, "Invalid passthru query flags: 0x%x\n", qi.flags); @@ -1649,7 +1653,7 @@ smb2_ioctl_query_info(const unsigned int xid, } if (rc) - goto iqinf_exit; + goto free_open_req; smb2_set_next_command(tcon, &rqst[1]); smb2_set_related(&rqst[1]); @@ -1660,14 +1664,14 @@ smb2_ioctl_query_info(const unsigned int xid, rc = SMB2_close_init(tcon, server, &rqst[2], COMPOUND_FID, COMPOUND_FID, false); if (rc) - goto iqinf_exit; + goto free_req_1; smb2_set_related(&rqst[2]); rc = compound_send_recv(xid, ses, server, flags, 3, rqst, resp_buftype, rsp_iov); if (rc) - goto iqinf_exit; + goto out; /* No need to bump num_remote_opens since handle immediately closed */ if (qi.flags & PASSTHRU_FSCTL) { @@ -1677,18 +1681,22 @@ smb2_ioctl_query_info(const unsigned int xid, qi.input_buffer_length = le32_to_cpu(io_rsp->OutputCount); if (qi.input_buffer_length > 0 && le32_to_cpu(io_rsp->OutputOffset) + qi.input_buffer_length - > rsp_iov[1].iov_len) - goto e_fault; + > rsp_iov[1].iov_len) { + rc = -EFAULT; + goto out; + } if (copy_to_user(&pqi->input_buffer_length, &qi.input_buffer_length, - sizeof(qi.input_buffer_length))) - goto e_fault; + sizeof(qi.input_buffer_length))) { + rc = -EFAULT; + goto out; + } if (copy_to_user((void __user *)pqi + sizeof(struct smb_query_info), (const void *)io_rsp + le32_to_cpu(io_rsp->OutputOffset), qi.input_buffer_length)) - goto e_fault; + rc = -EFAULT; } else { pqi = (struct smb_query_info __user *)arg; qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base; @@ -1696,28 +1704,30 @@ smb2_ioctl_query_info(const unsigned int xid, qi.input_buffer_length = le32_to_cpu(qi_rsp->OutputBufferLength); if (copy_to_user(&pqi->input_buffer_length, &qi.input_buffer_length, - sizeof(qi.input_buffer_length))) - goto e_fault; + sizeof(qi.input_buffer_length))) { + rc = -EFAULT; + goto out; + } if (copy_to_user(pqi + 1, qi_rsp->Buffer, qi.input_buffer_length)) - goto e_fault; + rc = -EFAULT; } - iqinf_exit: - cifs_small_buf_release(rqst[0].rq_iov[0].iov_base); - cifs_small_buf_release(rqst[1].rq_iov[0].iov_base); - cifs_small_buf_release(rqst[2].rq_iov[0].iov_base); +out: free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base); free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base); free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base); - kfree(vars); + SMB2_close_free(&rqst[2]); +free_req_1: + free_req1_func(&rqst[1]); +free_open_req: + SMB2_open_free(&rqst[0]); +free_output_buffer: kfree(buffer); +free_vars: + kfree(vars); return rc; - -e_fault: - rc = -EFAULT; - goto iqinf_exit; } static ssize_t diff --git a/fs/coredump.c b/fs/coredump.c index 335c98787e668640f075728e33f104117c5f6dbf..42c9c3dde764d7163eb951619d07fbbe3fd5f219 100644 --- a/fs/coredump.c +++ b/fs/coredump.c @@ -41,6 +41,7 @@ #include #include #include +#include #include #include @@ -969,6 +970,8 @@ static bool always_dump_vma(struct vm_area_struct *vma) return false; } +#define DUMP_SIZE_MAYBE_ELFHDR_PLACEHOLDER 1 + /* * Decide how much of @vma's contents should be included in a core dump. */ @@ -1028,9 +1031,20 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma, * dump the first page to aid in determining what was mapped here. */ if (FILTER(ELF_HEADERS) && - vma->vm_pgoff == 0 && (vma->vm_flags & VM_READ) && - (READ_ONCE(file_inode(vma->vm_file)->i_mode) & 0111) != 0) - return PAGE_SIZE; + vma->vm_pgoff == 0 && (vma->vm_flags & VM_READ)) { + if ((READ_ONCE(file_inode(vma->vm_file)->i_mode) & 0111) != 0) + return PAGE_SIZE; + + /* + * ELF libraries aren't always executable. + * We'll want to check whether the mapping starts with the ELF + * magic, but not now - we're holding the mmap lock, + * so copy_from_user() doesn't work here. + * Use a placeholder instead, and fix it up later in + * dump_vma_snapshot(). + */ + return DUMP_SIZE_MAYBE_ELFHDR_PLACEHOLDER; + } #undef FILTER @@ -1105,8 +1119,6 @@ int dump_vma_snapshot(struct coredump_params *cprm, int *vma_count, m->end = vma->vm_end; m->flags = vma->vm_flags; m->dump_size = vma_dump_size(vma, cprm->mm_flags); - - vma_data_size += m->dump_size; } mmap_write_unlock(mm); @@ -1116,6 +1128,23 @@ int dump_vma_snapshot(struct coredump_params *cprm, int *vma_count, return -EFAULT; } + for (i = 0; i < *vma_count; i++) { + struct core_vma_metadata *m = (*vma_meta) + i; + + if (m->dump_size == DUMP_SIZE_MAYBE_ELFHDR_PLACEHOLDER) { + char elfmag[SELFMAG]; + + if (copy_from_user(elfmag, (void __user *)m->start, SELFMAG) || + memcmp(elfmag, ELFMAG, SELFMAG) != 0) { + m->dump_size = 0; + } else { + m->dump_size = PAGE_SIZE; + } + } + + vma_data_size += m->dump_size; + } + *vma_data_size_ptr = vma_data_size; return 0; } diff --git a/fs/direct-io.c b/fs/direct-io.c index c64d4eb38995a7639eec0756111071d8a861830f..9dafbb07dd6a6f387512eb78dcccdef4fa834789 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c @@ -426,6 +426,8 @@ static inline void dio_bio_submit(struct dio *dio, struct dio_submit *sdio) unsigned long flags; bio->bi_private = dio; + /* don't account direct I/O as memory stall */ + bio_clear_flag(bio, BIO_WORKINGSET); spin_lock_irqsave(&dio->bio_lock, flags); dio->refcount++; diff --git a/fs/eulerfs/dep.c b/fs/eulerfs/dep.c index ec014bbf3700223d2b8a809063a819e04ca1338e..a41471c5f2ec33132e888d45a54dca0afa2f57e4 100644 --- a/fs/eulerfs/dep.c +++ b/fs/eulerfs/dep.c @@ -718,7 +718,7 @@ int dep_init(struct super_block *sb) for_each_possible_cpu(cpu) init_llist_head(per_cpu_ptr(sbi->persistee_list, cpu)); - sbi->persisters = kmalloc(sizeof(struct task_struct *) * + sbi->persisters = kzalloc(sizeof(struct task_struct *) * persisters_per_socket * num_sockets, GFP_KERNEL); if (!sbi->persisters) { diff --git a/fs/exec.c b/fs/exec.c index 2147ae12787c3333c10209cdb63cf8bed730f586..4c2d18061633247d926e80b4176135c77145b23f 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -484,8 +484,14 @@ static int bprm_stack_limits(struct linux_binprm *bprm) * the stack. They aren't stored until much later when we can't * signal to the parent that the child has run out of stack space. * Instead, calculate it here so it's possible to fail gracefully. + * + * In the case of argc = 0, make sure there is space for adding a + * empty string (which will bump argc to 1), to ensure confused + * userspace programs don't start processing from argv[1], thinking + * argc can never be 0, to keep them from walking envp by accident. + * See do_execveat_common(). */ - ptr_size = (bprm->argc + bprm->envc) * sizeof(void *); + ptr_size = (max(bprm->argc, 1) + bprm->envc) * sizeof(void *); if (limit <= ptr_size) return -E2BIG; limit -= ptr_size; @@ -1866,6 +1872,9 @@ static int do_execveat_common(int fd, struct filename *filename, } retval = count(argv, MAX_ARG_STRINGS); + if (retval == 0) + pr_warn_once("process '%s' launched '%s' with NULL argv: empty string added\n", + current->comm, bprm->filename); if (retval < 0) goto out_free; bprm->argc = retval; @@ -1892,6 +1901,19 @@ static int do_execveat_common(int fd, struct filename *filename, if (retval < 0) goto out_free; + /* + * When argv is empty, add an empty string ("") as argv[0] to + * ensure confused userspace programs that start processing + * from argv[1] won't end up walking envp. See also + * bprm_stack_limits(). + */ + if (bprm->argc == 0) { + retval = copy_string_kernel("", bprm); + if (retval < 0) + goto out_free; + bprm->argc = 1; + } + retval = bprm_execve(bprm, fd, filename, flags); out_free: free_bprm(bprm); @@ -1920,6 +1942,8 @@ int kernel_execve(const char *kernel_filename, } retval = count_strings_kernel(argv); + if (WARN_ON_ONCE(retval == 0)) + retval = -EINVAL; if (retval < 0) goto out_free; bprm->argc = retval; diff --git a/fs/exfat/file.c b/fs/exfat/file.c index a92478eabfa4e43f2198ccf9b62b8632b697c8dc..c819e8427ea577f1734c783986fbddc19445b721 100644 --- a/fs/exfat/file.c +++ b/fs/exfat/file.c @@ -109,8 +109,7 @@ int __exfat_truncate(struct inode *inode, loff_t new_size) exfat_set_volume_dirty(sb); num_clusters_new = EXFAT_B_TO_CLU_ROUND_UP(i_size_read(inode), sbi); - num_clusters_phys = - EXFAT_B_TO_CLU_ROUND_UP(EXFAT_I(inode)->i_size_ondisk, sbi); + num_clusters_phys = EXFAT_B_TO_CLU_ROUND_UP(ei->i_size_ondisk, sbi); exfat_chain_set(&clu, ei->start_clu, num_clusters_phys, ei->flags); @@ -227,12 +226,13 @@ void exfat_truncate(struct inode *inode, loff_t size) { struct super_block *sb = inode->i_sb; struct exfat_sb_info *sbi = EXFAT_SB(sb); + struct exfat_inode_info *ei = EXFAT_I(inode); unsigned int blocksize = i_blocksize(inode); loff_t aligned_size; int err; mutex_lock(&sbi->s_lock); - if (EXFAT_I(inode)->start_clu == 0) { + if (ei->start_clu == 0) { /* * Empty start_clu != ~0 (not allocated) */ @@ -250,8 +250,8 @@ void exfat_truncate(struct inode *inode, loff_t size) else mark_inode_dirty(inode); - inode->i_blocks = ((i_size_read(inode) + (sbi->cluster_size - 1)) & - ~(sbi->cluster_size - 1)) >> inode->i_blkbits; + inode->i_blocks = round_up(i_size_read(inode), sbi->cluster_size) >> + inode->i_blkbits; write_size: aligned_size = i_size_read(inode); if (aligned_size & (blocksize - 1)) { @@ -259,11 +259,11 @@ void exfat_truncate(struct inode *inode, loff_t size) aligned_size++; } - if (EXFAT_I(inode)->i_size_ondisk > i_size_read(inode)) - EXFAT_I(inode)->i_size_ondisk = aligned_size; + if (ei->i_size_ondisk > i_size_read(inode)) + ei->i_size_ondisk = aligned_size; - if (EXFAT_I(inode)->i_size_aligned > i_size_read(inode)) - EXFAT_I(inode)->i_size_aligned = aligned_size; + if (ei->i_size_aligned > i_size_read(inode)) + ei->i_size_aligned = aligned_size; mutex_unlock(&sbi->s_lock); } diff --git a/fs/exfat/inode.c b/fs/exfat/inode.c index 8b0288f70e93db1ba12c0493c52e661836435056..2a9f6a80584ee3d63d9325b3f540d5457fa9a217 100644 --- a/fs/exfat/inode.c +++ b/fs/exfat/inode.c @@ -114,10 +114,9 @@ static int exfat_map_cluster(struct inode *inode, unsigned int clu_offset, unsigned int local_clu_offset = clu_offset; unsigned int num_to_be_allocated = 0, num_clusters = 0; - if (EXFAT_I(inode)->i_size_ondisk > 0) + if (ei->i_size_ondisk > 0) num_clusters = - EXFAT_B_TO_CLU_ROUND_UP(EXFAT_I(inode)->i_size_ondisk, - sbi); + EXFAT_B_TO_CLU_ROUND_UP(ei->i_size_ondisk, sbi); if (clu_offset >= num_clusters) num_to_be_allocated = clu_offset - num_clusters + 1; @@ -415,10 +414,10 @@ static int exfat_write_end(struct file *file, struct address_space *mapping, err = generic_write_end(file, mapping, pos, len, copied, pagep, fsdata); - if (EXFAT_I(inode)->i_size_aligned < i_size_read(inode)) { + if (ei->i_size_aligned < i_size_read(inode)) { exfat_fs_error(inode->i_sb, "invalid size(size(%llu) > aligned(%llu)\n", - i_size_read(inode), EXFAT_I(inode)->i_size_aligned); + i_size_read(inode), ei->i_size_aligned); return -EIO; } @@ -601,8 +600,8 @@ static int exfat_fill_inode(struct inode *inode, struct exfat_dir_entry *info) exfat_save_attr(inode, info->attr); - inode->i_blocks = ((i_size_read(inode) + (sbi->cluster_size - 1)) & - ~((loff_t)sbi->cluster_size - 1)) >> inode->i_blkbits; + inode->i_blocks = round_up(i_size_read(inode), sbi->cluster_size) >> + inode->i_blkbits; inode->i_mtime = info->mtime; inode->i_ctime = info->mtime; ei->i_crtime = info->crtime; diff --git a/fs/exfat/namei.c b/fs/exfat/namei.c index 2932b23a3b6c36ebfb82e5a5e7621c07fbb2822d..935f6005090091342c109456c50c0e5a27b8bc5b 100644 --- a/fs/exfat/namei.c +++ b/fs/exfat/namei.c @@ -395,9 +395,9 @@ static int exfat_find_empty_entry(struct inode *inode, /* directory inode should be updated in here */ i_size_write(inode, size); - EXFAT_I(inode)->i_size_ondisk += sbi->cluster_size; - EXFAT_I(inode)->i_size_aligned += sbi->cluster_size; - EXFAT_I(inode)->flags = p_dir->flags; + ei->i_size_ondisk += sbi->cluster_size; + ei->i_size_aligned += sbi->cluster_size; + ei->flags = p_dir->flags; inode->i_blocks += 1 << sbi->sect_per_clus_bits; } diff --git a/fs/exfat/super.c b/fs/exfat/super.c index c6d8d2e534865236d16386575627e0a442a8db52..ba70ed1c980490cf593232722fe1e579459e0e7d 100644 --- a/fs/exfat/super.c +++ b/fs/exfat/super.c @@ -364,11 +364,11 @@ static int exfat_read_root(struct inode *inode) inode->i_op = &exfat_dir_inode_operations; inode->i_fop = &exfat_dir_operations; - inode->i_blocks = ((i_size_read(inode) + (sbi->cluster_size - 1)) - & ~(sbi->cluster_size - 1)) >> inode->i_blkbits; - EXFAT_I(inode)->i_pos = ((loff_t)sbi->root_dir << 32) | 0xffffffff; - EXFAT_I(inode)->i_size_aligned = i_size_read(inode); - EXFAT_I(inode)->i_size_ondisk = i_size_read(inode); + inode->i_blocks = round_up(i_size_read(inode), sbi->cluster_size) >> + inode->i_blkbits; + ei->i_pos = ((loff_t)sbi->root_dir << 32) | 0xffffffff; + ei->i_size_aligned = i_size_read(inode); + ei->i_size_ondisk = i_size_read(inode); exfat_save_attr(inode, ATTR_SUBDIR); inode->i_mtime = inode->i_atime = inode->i_ctime = ei->i_crtime = @@ -690,7 +690,7 @@ static int exfat_fill_super(struct super_block *sb, struct fs_context *fc) if (!sb->s_root) { exfat_err(sb, "failed to get the root dentry"); err = -ENOMEM; - goto put_inode; + goto free_table; } return 0; diff --git a/fs/ext2/super.c b/fs/ext2/super.c index 09f1fe67697278d4963ab036ea75fdc0dd234488..b6314d3c6a87d907b714542323efc6a51c4ec07c 100644 --- a/fs/ext2/super.c +++ b/fs/ext2/super.c @@ -756,8 +756,12 @@ static loff_t ext2_max_size(int bits) res += 1LL << (bits-2); res += 1LL << (2*(bits-2)); res += 1LL << (3*(bits-2)); + /* Compute how many metadata blocks are needed */ + meta_blocks = 1; + meta_blocks += 1 + ppb; + meta_blocks += 1 + ppb + ppb * ppb; /* Does block tree limit file size? */ - if (res < upper_limit) + if (res + meta_blocks <= upper_limit) goto check_lfs; res = upper_limit; diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index c11a23d73c79e456c3f68641a40fb5388b5c2c97..277f89d5de038ddb29d08167fa4159f86f714da1 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -17,6 +17,7 @@ #ifndef _EXT4_H #define _EXT4_H +#include #include #include #include @@ -235,7 +236,7 @@ typedef struct ext4_io_end { struct bio *bio; /* Linked list of completed * bios covering the extent */ unsigned int flag; /* unwritten or not */ - atomic_t count; /* reference counter */ + refcount_t count; /* reference counter */ struct list_head list_vec; /* list of ext4_io_end_vec */ } ext4_io_end_t; diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c index bf70efd2451902d2f784917df21e4a918784e8c5..c2c688cb45005616a9c6f852411f6bb2017c5f40 100644 --- a/fs/ext4/inline.c +++ b/fs/ext4/inline.c @@ -34,6 +34,9 @@ static int get_max_inline_xattr_value_size(struct inode *inode, struct ext4_inode *raw_inode; int free, min_offs; + if (!EXT4_INODE_HAS_XATTR_SPACE(inode)) + return 0; + min_offs = EXT4_SB(inode->i_sb)->s_inode_size - EXT4_GOOD_OLD_INODE_SIZE - EXT4_I(inode)->i_extra_isize - @@ -1774,19 +1777,20 @@ bool empty_inline_dir(struct inode *dir, int *has_inline_data) void *inline_pos; unsigned int offset; struct ext4_dir_entry_2 *de; - bool ret = true; + bool ret = false; err = ext4_get_inode_loc(dir, &iloc); if (err) { EXT4_ERROR_INODE_ERR(dir, -err, "error %d getting inode %lu block", err, dir->i_ino); - return true; + return false; } down_read(&EXT4_I(dir)->xattr_sem); if (!ext4_has_inline_data(dir)) { *has_inline_data = 0; + ret = true; goto out; } @@ -1795,7 +1799,6 @@ bool empty_inline_dir(struct inode *dir, int *has_inline_data) ext4_warning(dir->i_sb, "bad inline directory (dir #%lu) - no `..'", dir->i_ino); - ret = true; goto out; } @@ -1814,16 +1817,15 @@ bool empty_inline_dir(struct inode *dir, int *has_inline_data) dir->i_ino, le32_to_cpu(de->inode), le16_to_cpu(de->rec_len), de->name_len, inline_size); - ret = true; goto out; } if (le32_to_cpu(de->inode)) { - ret = false; goto out; } offset += ext4_rec_len_from_disk(de->rec_len, inline_size); } + ret = true; out: up_read(&EXT4_I(dir)->xattr_sem); brelse(iloc.bh); diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index a057b9f54bda1ecb9ab7cc263644a649eba39df2..e85c238edd8545fbac91d1c49d583bf84a75a240 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -2019,6 +2019,15 @@ static int ext4_writepage(struct page *page, else len = PAGE_SIZE; + /* Should never happen but for bugs in other kernel subsystems */ + if (!page_has_buffers(page)) { + ext4_warning_inode(inode, + "page %lu does not have buffers attached", page->index); + ClearPageDirty(page); + unlock_page(page); + return 0; + } + page_bufs = page_buffers(page); /* * We cannot do block allocation or other extent handling in this @@ -2628,6 +2637,22 @@ static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd) wait_on_page_writeback(page); BUG_ON(PageWriteback(page)); + /* + * Should never happen but for buggy code in + * other subsystems that call + * set_page_dirty() without properly warning + * the file system first. See [1] for more + * information. + * + * [1] https://lore.kernel.org/linux-mm/20180103100430.GE4911@quack2.suse.cz + */ + if (!page_has_buffers(page)) { + ext4_warning_inode(mpd->inode, "page %lu does not have buffers attached", page->index); + ClearPageDirty(page); + unlock_page(page); + continue; + } + if (mpd->map.m_len == 0) mpd->first_page = page->index; mpd->next_page = page->index + 1; @@ -4654,8 +4679,7 @@ static inline int ext4_iget_extra_inode(struct inode *inode, __le32 *magic = (void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize; - if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize + sizeof(__le32) <= - EXT4_INODE_SIZE(inode->i_sb) && + if (EXT4_INODE_HAS_XATTR_SPACE(inode) && *magic == cpu_to_le32(EXT4_XATTR_MAGIC)) { ext4_set_inode_state(inode, EXT4_STATE_XATTR); return ext4_find_inline_data_nolock(inode); diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index 110c25824a67fd1ead6c89dfdec1e614a2a07671..19108d6bb566c19d17b4977fe0f409b27dec44bb 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -3320,69 +3320,95 @@ void ext4_mb_mark_bb(struct super_block *sb, ext4_fsblk_t block, struct ext4_sb_info *sbi = EXT4_SB(sb); ext4_group_t group; ext4_grpblk_t blkoff; - int i, clen, err; + int i, err; int already; + unsigned int clen, clen_changed, thisgrp_len; - clen = EXT4_B2C(sbi, len); + while (len > 0) { + ext4_get_group_no_and_offset(sb, block, &group, &blkoff); - ext4_get_group_no_and_offset(sb, block, &group, &blkoff); - bitmap_bh = ext4_read_block_bitmap(sb, group); - if (IS_ERR(bitmap_bh)) { - err = PTR_ERR(bitmap_bh); - bitmap_bh = NULL; - goto out_err; - } + /* + * Check to see if we are freeing blocks across a group + * boundary. + * In case of flex_bg, this can happen that (block, len) may + * span across more than one group. In that case we need to + * get the corresponding group metadata to work with. + * For this we have goto again loop. + */ + thisgrp_len = min_t(unsigned int, (unsigned int)len, + EXT4_BLOCKS_PER_GROUP(sb) - EXT4_C2B(sbi, blkoff)); + clen = EXT4_NUM_B2C(sbi, thisgrp_len); - err = -EIO; - gdp = ext4_get_group_desc(sb, group, &gdp_bh); - if (!gdp) - goto out_err; + bitmap_bh = ext4_read_block_bitmap(sb, group); + if (IS_ERR(bitmap_bh)) { + err = PTR_ERR(bitmap_bh); + bitmap_bh = NULL; + break; + } - ext4_lock_group(sb, group); - already = 0; - for (i = 0; i < clen; i++) - if (!mb_test_bit(blkoff + i, bitmap_bh->b_data) == !state) - already++; + err = -EIO; + gdp = ext4_get_group_desc(sb, group, &gdp_bh); + if (!gdp) + break; - if (state) - ext4_set_bits(bitmap_bh->b_data, blkoff, clen); - else - mb_test_and_clear_bits(bitmap_bh->b_data, blkoff, clen); - if (ext4_has_group_desc_csum(sb) && - (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) { - gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT); - ext4_free_group_clusters_set(sb, gdp, - ext4_free_clusters_after_init(sb, - group, gdp)); - } - if (state) - clen = ext4_free_group_clusters(sb, gdp) - clen + already; - else - clen = ext4_free_group_clusters(sb, gdp) + clen - already; + ext4_lock_group(sb, group); + already = 0; + for (i = 0; i < clen; i++) + if (!mb_test_bit(blkoff + i, bitmap_bh->b_data) == + !state) + already++; + + clen_changed = clen - already; + if (state) + ext4_set_bits(bitmap_bh->b_data, blkoff, clen); + else + mb_test_and_clear_bits(bitmap_bh->b_data, blkoff, clen); + if (ext4_has_group_desc_csum(sb) && + (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) { + gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT); + ext4_free_group_clusters_set(sb, gdp, + ext4_free_clusters_after_init(sb, group, gdp)); + } + if (state) + clen = ext4_free_group_clusters(sb, gdp) - clen_changed; + else + clen = ext4_free_group_clusters(sb, gdp) + clen_changed; - ext4_free_group_clusters_set(sb, gdp, clen); - ext4_block_bitmap_csum_set(sb, group, gdp, bitmap_bh); - ext4_group_desc_csum_set(sb, group, gdp); + ext4_free_group_clusters_set(sb, gdp, clen); + ext4_block_bitmap_csum_set(sb, group, gdp, bitmap_bh); + ext4_group_desc_csum_set(sb, group, gdp); - ext4_unlock_group(sb, group); + ext4_unlock_group(sb, group); - if (sbi->s_log_groups_per_flex) { - ext4_group_t flex_group = ext4_flex_group(sbi, group); + if (sbi->s_log_groups_per_flex) { + ext4_group_t flex_group = ext4_flex_group(sbi, group); + struct flex_groups *fg = sbi_array_rcu_deref(sbi, + s_flex_groups, flex_group); - atomic64_sub(len, - &sbi_array_rcu_deref(sbi, s_flex_groups, - flex_group)->free_clusters); + if (state) + atomic64_sub(clen_changed, &fg->free_clusters); + else + atomic64_add(clen_changed, &fg->free_clusters); + + } + + err = ext4_handle_dirty_metadata(NULL, NULL, bitmap_bh); + if (err) + break; + sync_dirty_buffer(bitmap_bh); + err = ext4_handle_dirty_metadata(NULL, NULL, gdp_bh); + sync_dirty_buffer(gdp_bh); + if (err) + break; + + block += thisgrp_len; + len -= thisgrp_len; + brelse(bitmap_bh); + BUG_ON(len < 0); } - err = ext4_handle_dirty_metadata(NULL, NULL, bitmap_bh); if (err) - goto out_err; - sync_dirty_buffer(bitmap_bh); - err = ext4_handle_dirty_metadata(NULL, NULL, gdp_bh); - sync_dirty_buffer(gdp_bh); - -out_err: - brelse(bitmap_bh); + brelse(bitmap_bh); } /* @@ -3494,6 +3520,15 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac, size = size >> bsbits; start = start_off >> bsbits; + /* + * For tiny groups (smaller than 8MB) the chosen allocation + * alignment may be larger than group size. Make sure the + * alignment does not move allocation to a different group which + * makes mballoc fail assertions later. + */ + start = max(start, rounddown(ac->ac_o_ex.fe_logical, + (ext4_lblk_t)EXT4_BLOCKS_PER_GROUP(ac->ac_sb))); + /* don't cover already allocated blocks in selected range */ if (ar->pleft && start <= ar->lleft) { size -= ar->lleft + 1 - start; @@ -3566,7 +3601,22 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac, } rcu_read_unlock(); - if (start + size <= ac->ac_o_ex.fe_logical && + /* + * In this function "start" and "size" are normalized for better + * alignment and length such that we could preallocate more blocks. + * This normalization is done such that original request of + * ac->ac_o_ex.fe_logical & fe_len should always lie within "start" and + * "size" boundaries. + * (Note fe_len can be relaxed since FS block allocation API does not + * provide gurantee on number of contiguous blocks allocation since that + * depends upon free space left, etc). + * In case of inode pa, later we use the allocated blocks + * [pa_start + fe_logical - pa_lstart, fe_len/size] from the preallocated + * range of goal/best blocks [start, size] to put it at the + * ac_o_ex.fe_logical extent of this inode. + * (See ext4_mb_use_inode_pa() for more details) + */ + if (start + size <= ac->ac_o_ex.fe_logical || start > ac->ac_o_ex.fe_logical) { ext4_msg(ac->ac_sb, KERN_ERR, "start %lu, size %lu, fe_logical %lu", diff --git a/fs/ext4/migrate.c b/fs/ext4/migrate.c index 49912814f3d8dda93f50b98f664f586a81c8d072..04320715d61f1bbfad7f41c0fc5161f0cb975a86 100644 --- a/fs/ext4/migrate.c +++ b/fs/ext4/migrate.c @@ -417,7 +417,7 @@ int ext4_ext_migrate(struct inode *inode) struct inode *tmp_inode = NULL; struct migrate_struct lb; unsigned long max_entries; - __u32 goal; + __u32 goal, tmp_csum_seed; uid_t owner[2]; /* @@ -465,6 +465,7 @@ int ext4_ext_migrate(struct inode *inode) * the migration. */ ei = EXT4_I(inode); + tmp_csum_seed = EXT4_I(tmp_inode)->i_csum_seed; EXT4_I(tmp_inode)->i_csum_seed = ei->i_csum_seed; i_size_write(tmp_inode, i_size_read(inode)); /* @@ -575,6 +576,7 @@ int ext4_ext_migrate(struct inode *inode) * the inode is not visible to user space. */ tmp_inode->i_blocks = 0; + EXT4_I(tmp_inode)->i_csum_seed = tmp_csum_seed; /* Reset the extent details */ ext4_ext_tree_init(handle, tmp_inode); diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c index f7279ff5f975eab5a15d28e932d9d31477e8c67a..0076f0ba3d3cedc3278b4662313636c82f6c7915 100644 --- a/fs/ext4/namei.c +++ b/fs/ext4/namei.c @@ -2888,14 +2888,14 @@ bool ext4_empty_dir(struct inode *inode) sb = inode->i_sb; if (inode->i_size < EXT4_DIR_REC_LEN(1) + EXT4_DIR_REC_LEN(2)) { EXT4_ERROR_INODE(inode, "invalid size"); - return true; + return false; } /* The first directory block must not be a hole, * so treat it as DIRENT_HTREE */ bh = ext4_read_dirblock(inode, 0, DIRENT_HTREE); if (IS_ERR(bh)) - return true; + return false; de = (struct ext4_dir_entry_2 *) bh->b_data; if (ext4_check_dir_entry(inode, NULL, de, bh, bh->b_data, bh->b_size, @@ -2903,7 +2903,7 @@ bool ext4_empty_dir(struct inode *inode) le32_to_cpu(de->inode) != inode->i_ino || strcmp(".", de->name)) { ext4_warning_inode(inode, "directory missing '.'"); brelse(bh); - return true; + return false; } offset = ext4_rec_len_from_disk(de->rec_len, sb->s_blocksize); de = ext4_next_entry(de, sb->s_blocksize); @@ -2912,7 +2912,7 @@ bool ext4_empty_dir(struct inode *inode) le32_to_cpu(de->inode) == 0 || strcmp("..", de->name)) { ext4_warning_inode(inode, "directory missing '..'"); brelse(bh); - return true; + return false; } offset += ext4_rec_len_from_disk(de->rec_len, sb->s_blocksize); while (offset < inode->i_size) { @@ -2926,7 +2926,7 @@ bool ext4_empty_dir(struct inode *inode) continue; } if (IS_ERR(bh)) - return true; + return false; } de = (struct ext4_dir_entry_2 *) (bh->b_data + (offset & (sb->s_blocksize - 1))); diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c index 4569075a7da0c87f1b559bfc6958da1598756dd8..b076fabb72e29be8591c1f540e5a443207e13ed8 100644 --- a/fs/ext4/page-io.c +++ b/fs/ext4/page-io.c @@ -284,14 +284,14 @@ ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags) io_end->inode = inode; INIT_LIST_HEAD(&io_end->list); INIT_LIST_HEAD(&io_end->list_vec); - atomic_set(&io_end->count, 1); + refcount_set(&io_end->count, 1); } return io_end; } void ext4_put_io_end_defer(ext4_io_end_t *io_end) { - if (atomic_dec_and_test(&io_end->count)) { + if (refcount_dec_and_test(&io_end->count)) { if (!(io_end->flag & EXT4_IO_END_UNWRITTEN) || list_empty(&io_end->list_vec)) { ext4_release_io_end(io_end); @@ -305,7 +305,7 @@ int ext4_put_io_end(ext4_io_end_t *io_end) { int err = 0; - if (atomic_dec_and_test(&io_end->count)) { + if (refcount_dec_and_test(&io_end->count)) { if (io_end->flag & EXT4_IO_END_UNWRITTEN) { err = ext4_convert_unwritten_io_end_vec(io_end->handle, io_end); @@ -319,7 +319,7 @@ int ext4_put_io_end(ext4_io_end_t *io_end) ext4_io_end_t *ext4_get_io_end(ext4_io_end_t *io_end) { - atomic_inc(&io_end->count); + refcount_inc(&io_end->count); return io_end; } diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c index bd0d185654f3357cdc7a5826a6d3afa87c052dff..ebb6affb26c2033be54a4460a6844579323c9d8f 100644 --- a/fs/ext4/resize.c +++ b/fs/ext4/resize.c @@ -74,6 +74,11 @@ int ext4_resize_begin(struct super_block *sb) return -EPERM; } + if (ext4_has_feature_sparse_super2(sb)) { + ext4_msg(sb, KERN_ERR, "Online resizing not supported with sparse_super2"); + return -EOPNOTSUPP; + } + if (test_and_set_bit_lock(EXT4_FLAGS_RESIZING, &EXT4_SB(sb)->s_ext4_flags)) ret = -EBUSY; @@ -2006,6 +2011,9 @@ int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count) ext4_warning(sb, "Error opening resize inode"); return PTR_ERR(resize_inode); } + } else if (es->s_reserved_gdt_blocks) { + ext4_error(sb, "resize_inode disabled but reserved GDT blocks non-zero"); + return -EFSCORRUPTED; } if ((!resize_inode && !meta_bg) || n_blocks_count == o_blocks_count) { diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c index 2f93e8b90492e5ebbb884cf485afdf278058764c..b5016eb7b37323e1999c3e72765f257f018dc5f8 100644 --- a/fs/ext4/xattr.c +++ b/fs/ext4/xattr.c @@ -2170,8 +2170,9 @@ int ext4_xattr_ibody_find(struct inode *inode, struct ext4_xattr_info *i, struct ext4_inode *raw_inode; int error; - if (EXT4_I(inode)->i_extra_isize == 0) + if (!EXT4_INODE_HAS_XATTR_SPACE(inode)) return 0; + raw_inode = ext4_raw_inode(&is->iloc); header = IHDR(inode, raw_inode); is->s.base = is->s.first = IFIRST(header); @@ -2199,8 +2200,9 @@ int ext4_xattr_ibody_inline_set(handle_t *handle, struct inode *inode, struct ext4_xattr_search *s = &is->s; int error; - if (EXT4_I(inode)->i_extra_isize == 0) + if (!EXT4_INODE_HAS_XATTR_SPACE(inode)) return -ENOSPC; + error = ext4_xattr_set_entry(i, s, handle, inode, false /* is_block */); if (error) return error; @@ -2223,8 +2225,9 @@ static int ext4_xattr_ibody_set(handle_t *handle, struct inode *inode, struct ext4_xattr_search *s = &is->s; int error; - if (EXT4_I(inode)->i_extra_isize == 0) + if (!EXT4_INODE_HAS_XATTR_SPACE(inode)) return -ENOSPC; + error = ext4_xattr_set_entry(i, s, handle, inode, false /* is_block */); if (error) return error; diff --git a/fs/ext4/xattr.h b/fs/ext4/xattr.h index 730b91fa0dd70231c1c3300650146c2e5ea8902b..87e5863bb4931be9b633e95ba043edc8f048020f 100644 --- a/fs/ext4/xattr.h +++ b/fs/ext4/xattr.h @@ -95,6 +95,19 @@ struct ext4_xattr_entry { #define EXT4_ZERO_XATTR_VALUE ((void *)-1) +/* + * If we want to add an xattr to the inode, we should make sure that + * i_extra_isize is not 0 and that the inode size is not less than + * EXT4_GOOD_OLD_INODE_SIZE + extra_isize + pad. + * EXT4_GOOD_OLD_INODE_SIZE extra_isize header entry pad data + * |--------------------------|------------|------|---------|---|-------| + */ +#define EXT4_INODE_HAS_XATTR_SPACE(inode) \ + ((EXT4_I(inode)->i_extra_isize != 0) && \ + (EXT4_GOOD_OLD_INODE_SIZE + EXT4_I(inode)->i_extra_isize + \ + sizeof(struct ext4_xattr_ibody_header) + EXT4_XATTR_PAD <= \ + EXT4_INODE_SIZE((inode)->i_sb))) + struct ext4_xattr_info { const char *name; const void *value; diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c index 9bcd77db980dfe2d22499a829d774c36da4f4fd4..77f30320f8628cec3620da426f492f670871be9f 100644 --- a/fs/f2fs/checkpoint.c +++ b/fs/f2fs/checkpoint.c @@ -851,6 +851,7 @@ static struct page *validate_checkpoint(struct f2fs_sb_info *sbi, struct page *cp_page_1 = NULL, *cp_page_2 = NULL; struct f2fs_checkpoint *cp_block = NULL; unsigned long long cur_version = 0, pre_version = 0; + unsigned int cp_blocks; int err; err = get_checkpoint_version(sbi, cp_addr, &cp_block, @@ -858,15 +859,16 @@ static struct page *validate_checkpoint(struct f2fs_sb_info *sbi, if (err) return NULL; - if (le32_to_cpu(cp_block->cp_pack_total_block_count) > - sbi->blocks_per_seg) { + cp_blocks = le32_to_cpu(cp_block->cp_pack_total_block_count); + + if (cp_blocks > sbi->blocks_per_seg || cp_blocks <= F2FS_CP_PACKS) { f2fs_warn(sbi, "invalid cp_pack_total_block_count:%u", le32_to_cpu(cp_block->cp_pack_total_block_count)); goto invalid_cp; } pre_version = *version; - cp_addr += le32_to_cpu(cp_block->cp_pack_total_block_count) - 1; + cp_addr += cp_blocks - 1; err = get_checkpoint_version(sbi, cp_addr, &cp_block, &cp_page_2, version); if (err) diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c index ec542e8c46cc981ed6f59e0442fdf8c7daf36251..1541da5ace85ef47e420850708847a18fc960f71 100644 --- a/fs/f2fs/compress.c +++ b/fs/f2fs/compress.c @@ -286,10 +286,9 @@ static int lz4_decompress_pages(struct decompress_io_ctx *dic) } if (ret != PAGE_SIZE << dic->log_cluster_size) { - printk_ratelimited("%sF2FS-fs (%s): lz4 invalid rlen:%zu, " + printk_ratelimited("%sF2FS-fs (%s): lz4 invalid ret:%d, " "expected:%lu\n", KERN_ERR, - F2FS_I_SB(dic->inode)->sb->s_id, - dic->rlen, + F2FS_I_SB(dic->inode)->sb->s_id, ret, PAGE_SIZE << dic->log_cluster_size); return -EIO; } diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index 1b11a42847c48e92b8a3af471c5793c724ffcc66..b2016fd3a7ca301f714f7792b6307d64b520b350 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -3264,8 +3264,12 @@ static int __f2fs_write_data_pages(struct address_space *mapping, /* to avoid spliting IOs due to mixed WB_SYNC_ALL and WB_SYNC_NONE */ if (wbc->sync_mode == WB_SYNC_ALL) atomic_inc(&sbi->wb_sync_req[DATA]); - else if (atomic_read(&sbi->wb_sync_req[DATA])) + else if (atomic_read(&sbi->wb_sync_req[DATA])) { + /* to avoid potential deadlock */ + if (current->plug) + blk_finish_plug(current->plug); goto skip_write; + } if (__should_serialize_io(inode, wbc)) { mutex_lock(&sbi->writepages); @@ -3457,6 +3461,9 @@ static int f2fs_write_begin(struct file *file, struct address_space *mapping, *fsdata = NULL; + if (len == PAGE_SIZE && !(f2fs_is_atomic_file(inode))) + goto repeat; + ret = f2fs_prepare_compress_overwrite(inode, pagep, index, fsdata); if (ret < 0) { diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c index 1fbaab1f7aba8666da7f07193ffc4c179b55da57..792f9059d897c3b21cb387c7a229216196cd9fae 100644 --- a/fs/f2fs/file.c +++ b/fs/f2fs/file.c @@ -2035,7 +2035,10 @@ static int f2fs_ioc_start_atomic_write(struct file *filp) inode_lock(inode); - f2fs_disable_compressed_file(inode); + if (!f2fs_disable_compressed_file(inode)) { + ret = -EINVAL; + goto out; + } if (f2fs_is_atomic_file(inode)) { if (is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST)) diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c index 6153b29de331bf9b861e868fea8a236f6f002259..827b5a6175ecf19487895c4b29a2a053e2303589 100644 --- a/fs/f2fs/gc.c +++ b/fs/f2fs/gc.c @@ -998,8 +998,10 @@ static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, set_sbi_flag(sbi, SBI_NEED_FSCK); } - if (f2fs_check_nid_range(sbi, dni->ino)) + if (f2fs_check_nid_range(sbi, dni->ino)) { + f2fs_put_page(node_page, 1); return false; + } *nofs = ofs_of_node(node_page); source_blkaddr = data_blkaddr(NULL, node_page, ofs_in_node); diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c index a35fcf43ad5a3346399b05ed624f98c12e6f017d..98483f50e5e9218dd9f35a660cd2fed3f236d406 100644 --- a/fs/f2fs/inode.c +++ b/fs/f2fs/inode.c @@ -848,6 +848,7 @@ void f2fs_handle_failed_inode(struct inode *inode) err = f2fs_get_node_info(sbi, inode->i_ino, &ni); if (err) { set_sbi_flag(sbi, SBI_NEED_FSCK); + set_inode_flag(inode, FI_FREE_NID); f2fs_warn(sbi, "May loss orphan inode, run fsck to fix."); goto out; } diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c index 7e625806bd4a2f33b383579487fa37f7a7bfef7f..5fa10d0b006837e397c4f60dc8ef5f3540c6b6ec 100644 --- a/fs/f2fs/node.c +++ b/fs/f2fs/node.c @@ -2055,8 +2055,12 @@ static int f2fs_write_node_pages(struct address_space *mapping, if (wbc->sync_mode == WB_SYNC_ALL) atomic_inc(&sbi->wb_sync_req[NODE]); - else if (atomic_read(&sbi->wb_sync_req[NODE])) + else if (atomic_read(&sbi->wb_sync_req[NODE])) { + /* to avoid potential deadlock */ + if (current->plug) + blk_finish_plug(current->plug); goto skip_write; + } trace_f2fs_writepages(mapping->host, wbc, NODE); diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index d04b449978aa8e4c8146527508bb2d6fcd60096b..49f5cb532738d99c5a1207d4e790442e8a479283 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c @@ -4650,6 +4650,13 @@ static int sanity_check_curseg(struct f2fs_sb_info *sbi) sanity_check_seg_type(sbi, curseg->seg_type); + if (curseg->alloc_type != LFS && curseg->alloc_type != SSR) { + f2fs_err(sbi, + "Current segment has invalid alloc_type:%d", + curseg->alloc_type); + return -EFSCORRUPTED; + } + if (f2fs_test_bit(blkofs, se->cur_valid_map)) goto out; diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index af98abb17c272a1b1d2b1c32547da694916211a9..78ee14f6e939eb620f00e18249cd90ed43829be6 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c @@ -2278,7 +2278,7 @@ int f2fs_quota_sync(struct super_block *sb, int type) struct f2fs_sb_info *sbi = F2FS_SB(sb); struct quota_info *dqopt = sb_dqopt(sb); int cnt; - int ret; + int ret = 0; /* * Now when everything is written we can discard the pagecache so @@ -2289,8 +2289,8 @@ int f2fs_quota_sync(struct super_block *sb, int type) if (type != -1 && cnt != type) continue; - if (!sb_has_quota_active(sb, type)) - return 0; + if (!sb_has_quota_active(sb, cnt)) + continue; inode_lock(dqopt->files[cnt]); diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c index 7ffd4bb398b0cbd0b24fb2222c77d26830051e2f..a7e7d68256e004f2212de2deb9cb984c8a153559 100644 --- a/fs/f2fs/sysfs.c +++ b/fs/f2fs/sysfs.c @@ -386,7 +386,7 @@ static ssize_t __sbi_store(struct f2fs_attr *a, } else if (t == GC_IDLE_AT) { if (!sbi->am.atgc_enabled) return -EINVAL; - sbi->gc_mode = GC_AT; + sbi->gc_mode = GC_IDLE_AT; } else { sbi->gc_mode = GC_NORMAL; } diff --git a/fs/file.c b/fs/file.c index 0aa251ca02a6e4d1cae56dd1be1acb8af3b2c9d8..cf236025e7984ee53175cea67e041353b8c7018d 100644 --- a/fs/file.c +++ b/fs/file.c @@ -86,6 +86,21 @@ static void copy_fdtable(struct fdtable *nfdt, struct fdtable *ofdt) copy_fd_bitmaps(nfdt, ofdt, ofdt->max_fds); } +/* + * Note how the fdtable bitmap allocations very much have to be a multiple of + * BITS_PER_LONG. This is not only because we walk those things in chunks of + * 'unsigned long' in some places, but simply because that is how the Linux + * kernel bitmaps are defined to work: they are not "bits in an array of bytes", + * they are very much "bits in an array of unsigned long". + * + * The ALIGN(nr, BITS_PER_LONG) here is for clarity: since we just multiplied + * by that "1024/sizeof(ptr)" before, we already know there are sufficient + * clear low bits. Clang seems to realize that, gcc ends up being confused. + * + * On a 128-bit machine, the ALIGN() would actually matter. In the meantime, + * let's consider it documentation (and maybe a test-case for gcc to improve + * its code generation ;) + */ static struct fdtable * alloc_fdtable(unsigned int nr) { struct fdtable *fdt; @@ -101,6 +116,7 @@ static struct fdtable * alloc_fdtable(unsigned int nr) nr /= (1024 / sizeof(struct file *)); nr = roundup_pow_of_two(nr + 1); nr *= (1024 / sizeof(struct file *)); + nr = ALIGN(nr, BITS_PER_LONG); /* * Note that this can drive nr *below* what we had passed if sysctl_nr_open * had been set lower between the check in expand_files() and here. Deal @@ -268,6 +284,19 @@ static unsigned int count_open_files(struct fdtable *fdt) return i; } +/* + * Note that a sane fdtable size always has to be a multiple of + * BITS_PER_LONG, since we have bitmaps that are sized by this. + * + * 'max_fds' will normally already be properly aligned, but it + * turns out that in the close_range() -> __close_range() -> + * unshare_fd() -> dup_fd() -> sane_fdtable_size() we can end + * up having a 'max_fds' value that isn't already aligned. + * + * Rather than make close_range() have to worry about this, + * just make that BITS_PER_LONG alignment be part of a sane + * fdtable size. Becuase that's really what it is. + */ static unsigned int sane_fdtable_size(struct fdtable *fdt, unsigned int max_fds) { unsigned int count; @@ -275,7 +304,7 @@ static unsigned int sane_fdtable_size(struct fdtable *fdt, unsigned int max_fds) count = count_open_files(fdt); if (max_fds < NR_OPEN_DEFAULT) max_fds = NR_OPEN_DEFAULT; - return min(count, max_fds); + return ALIGN(min(count, max_fds), BITS_PER_LONG); } /* diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c index 5e8eef9990e326428d9c5e44ee19a5233029425f..eb775e93de97c336b14d84b17f59a78c2d91f4c4 100644 --- a/fs/gfs2/rgrp.c +++ b/fs/gfs2/rgrp.c @@ -1389,7 +1389,8 @@ int gfs2_fitrim(struct file *filp, void __user *argp) start = r.start >> bs_shift; end = start + (r.len >> bs_shift); - minlen = max_t(u64, r.minlen, + minlen = max_t(u64, r.minlen, sdp->sd_sb.sb_bsize); + minlen = max_t(u64, minlen, q->limits.discard_granularity) >> bs_shift; if (end <= start || minlen > sdp->sd_max_rg_data) diff --git a/fs/io_uring.c b/fs/io_uring.c index 26a925d90074d24685ba30027aa586c5a979385c..a1c2c04b8ca015e273bf9ed8841bdac08dd19430 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -1156,7 +1156,7 @@ static inline void __io_req_init_async(struct io_kiocb *req) */ static inline void io_req_init_async(struct io_kiocb *req) { - struct io_uring_task *tctx = current->io_uring; + struct io_uring_task *tctx = req->task->io_uring; if (req->flags & REQ_F_WORK_INITIALIZED) return; @@ -2582,45 +2582,6 @@ static void io_complete_rw_common(struct kiocb *kiocb, long res, #ifdef CONFIG_BLOCK static bool io_resubmit_prep(struct io_kiocb *req, int error) { - struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs; - ssize_t ret = -ECANCELED; - struct iov_iter iter; - int rw; - - if (error) { - ret = error; - goto end_req; - } - - switch (req->opcode) { - case IORING_OP_READV: - case IORING_OP_READ_FIXED: - case IORING_OP_READ: - rw = READ; - break; - case IORING_OP_WRITEV: - case IORING_OP_WRITE_FIXED: - case IORING_OP_WRITE: - rw = WRITE; - break; - default: - printk_once(KERN_WARNING "io_uring: bad opcode in resubmit %d\n", - req->opcode); - goto end_req; - } - - if (!req->async_data) { - ret = io_import_iovec(rw, req, &iovec, &iter, false); - if (ret < 0) - goto end_req; - ret = io_setup_async_rw(req, iovec, inline_vecs, &iter, false); - if (!ret) - return true; - kfree(iovec); - } else { - return true; - } -end_req: req_set_fail_links(req); return false; } @@ -3221,13 +3182,15 @@ static ssize_t loop_rw_iter(int rw, struct io_kiocb *req, struct iov_iter *iter) ret = nr; break; } + ret += nr; if (!iov_iter_is_bvec(iter)) { iov_iter_advance(iter, nr); } else { - req->rw.len -= nr; req->rw.addr += nr; + req->rw.len -= nr; + if (!req->rw.len) + break; } - ret += nr; if (nr != iovec.iov_len) break; } @@ -3422,6 +3385,7 @@ static int io_read(struct io_kiocb *req, bool force_nonblock, struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs; struct kiocb *kiocb = &req->rw.kiocb; struct iov_iter __iter, *iter = &__iter; + struct iov_iter iter_cp; struct io_async_rw *rw = req->async_data; ssize_t io_size, ret, ret2; bool no_async; @@ -3432,6 +3396,7 @@ static int io_read(struct io_kiocb *req, bool force_nonblock, ret = io_import_iovec(READ, req, &iovec, iter, !force_nonblock); if (ret < 0) return ret; + iter_cp = *iter; io_size = iov_iter_count(iter); req->result = io_size; ret = 0; @@ -3467,7 +3432,7 @@ static int io_read(struct io_kiocb *req, bool force_nonblock, if (req->file->f_flags & O_NONBLOCK) goto done; /* some cases will consume bytes even on error returns */ - iov_iter_revert(iter, io_size - iov_iter_count(iter)); + *iter = iter_cp; ret = 0; goto copy_iov; } else if (ret < 0) { @@ -3550,6 +3515,7 @@ static int io_write(struct io_kiocb *req, bool force_nonblock, struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs; struct kiocb *kiocb = &req->rw.kiocb; struct iov_iter __iter, *iter = &__iter; + struct iov_iter iter_cp; struct io_async_rw *rw = req->async_data; ssize_t ret, ret2, io_size; @@ -3559,6 +3525,7 @@ static int io_write(struct io_kiocb *req, bool force_nonblock, ret = io_import_iovec(WRITE, req, &iovec, iter, !force_nonblock); if (ret < 0) return ret; + iter_cp = *iter; io_size = iov_iter_count(iter); req->result = io_size; @@ -3620,7 +3587,7 @@ static int io_write(struct io_kiocb *req, bool force_nonblock, } else { copy_iov: /* some cases will consume bytes even on error returns */ - iov_iter_revert(iter, io_size - iov_iter_count(iter)); + *iter = iter_cp; ret = io_setup_async_rw(req, iovec, inline_vecs, iter, false); if (!ret) return -EAGAIN; @@ -4065,6 +4032,7 @@ static int io_add_buffers(struct io_provide_buf *pbuf, struct io_buffer **head) } else { list_add_tail(&buf->list, &(*head)->list); } + cond_resched(); } return i ? i : -ENOMEM; @@ -6725,6 +6693,9 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr) err = io_submit_sqe(req, sqe, &link, &state.comp); if (err) goto fail_req; + /* to avoid doing too much in one submit round */ + if (submitted > IORING_MAX_ENTRIES / 2) + cond_resched(); } if (unlikely(submitted != nr)) { @@ -7347,6 +7318,7 @@ static int __io_sqe_files_scm(struct io_ring_ctx *ctx, int nr, int offset) fput(fpl->fp[i]); } else { kfree_skb(skb); + free_uid(fpl->user); kfree(fpl); } diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c index b121d7d434c67510a58c7d962610abc68f3423cc..98cfa73cb165bc512987bfb9ede08ad447f082c8 100644 --- a/fs/jbd2/commit.c +++ b/fs/jbd2/commit.c @@ -501,7 +501,6 @@ void jbd2_journal_commit_transaction(journal_t *journal) } spin_unlock(&commit_transaction->t_handle_lock); commit_transaction->t_state = T_SWITCH; - write_unlock(&journal->j_state_lock); J_ASSERT (atomic_read(&commit_transaction->t_outstanding_credits) <= journal->j_max_transaction_buffers); @@ -521,6 +520,8 @@ void jbd2_journal_commit_transaction(journal_t *journal) * has reserved. This is consistent with the existing behaviour * that multiple jbd2_journal_get_write_access() calls to the same * buffer are perfectly permissible. + * We use journal->j_state_lock here to serialize processing of + * t_reserved_list with eviction of buffers from journal_unmap_buffer(). */ while (commit_transaction->t_reserved_list) { jh = commit_transaction->t_reserved_list; @@ -540,6 +541,7 @@ void jbd2_journal_commit_transaction(journal_t *journal) jbd2_journal_refile_buffer(journal, jh); } + write_unlock(&journal->j_state_lock); /* * Now try to drop any written-back buffers from the journal's * checkpoint lists. We do this *before* commit because it potentially @@ -562,13 +564,13 @@ void jbd2_journal_commit_transaction(journal_t *journal) */ jbd2_journal_switch_revoke_table(journal); + write_lock(&journal->j_state_lock); /* * Reserved credits cannot be claimed anymore, free them */ atomic_sub(atomic_read(&journal->j_reserved_credits), &commit_transaction->t_outstanding_credits); - write_lock(&journal->j_state_lock); trace_jbd2_commit_flushing(journal, commit_transaction); stats.run.rs_flushing = jiffies; stats.run.rs_locked = jbd2_time_diff(stats.run.rs_locked, diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c index aedad59f8a4589af32e0ee3002f33b44ba65e667..e58ae29a223d755bf58f685ae248db2909097c46 100644 --- a/fs/jfs/jfs_dmap.c +++ b/fs/jfs/jfs_dmap.c @@ -148,6 +148,7 @@ static const s8 budtab[256] = { * 0 - success * -ENOMEM - insufficient memory * -EIO - i/o error + * -EINVAL - wrong bmap data */ int dbMount(struct inode *ipbmap) { @@ -179,6 +180,12 @@ int dbMount(struct inode *ipbmap) bmp->db_nfree = le64_to_cpu(dbmp_le->dn_nfree); bmp->db_l2nbperpage = le32_to_cpu(dbmp_le->dn_l2nbperpage); bmp->db_numag = le32_to_cpu(dbmp_le->dn_numag); + if (!bmp->db_numag) { + release_metapage(mp); + kfree(bmp); + return -EINVAL; + } + bmp->db_maxlevel = le32_to_cpu(dbmp_le->dn_maxlevel); bmp->db_maxag = le32_to_cpu(dbmp_le->dn_maxag); bmp->db_agpref = le32_to_cpu(dbmp_le->dn_agpref); diff --git a/fs/namei.c b/fs/namei.c index 0782401c6514f1b6dde5c1ad9684feea33da715d..4b55e176cbfcf76b80dc05aeb1480f9bbed608b6 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -3818,7 +3818,9 @@ int vfs_unlink(struct inode *dir, struct dentry *dentry, struct inode **delegate return -EPERM; inode_lock(target); - if (is_local_mountpoint(dentry)) + if (IS_SWAPFILE(target)) + error = -EPERM; + else if (is_local_mountpoint(dentry)) error = -EBUSY; else { error = security_inode_unlink(dir, dentry); @@ -4282,6 +4284,10 @@ int vfs_rename(struct inode *old_dir, struct dentry *old_dentry, else if (target) inode_lock(target); + error = -EPERM; + if (IS_SWAPFILE(source) || (target && IS_SWAPFILE(target))) + goto out; + error = -EBUSY; if (is_local_mountpoint(old_dentry) || is_local_mountpoint(new_dentry)) goto out; diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c index b44219ce60b869c5c6556ab6053ee5e47c12b170..a5209643ac36ce4c7d04c8a7be1e6ce4db79bd45 100644 --- a/fs/nfs/callback_proc.c +++ b/fs/nfs/callback_proc.c @@ -353,12 +353,11 @@ __be32 nfs4_callback_devicenotify(void *argp, void *resp, struct cb_process_state *cps) { struct cb_devicenotifyargs *args = argp; + const struct pnfs_layoutdriver_type *ld = NULL; uint32_t i; __be32 res = 0; - struct nfs_client *clp = cps->clp; - struct nfs_server *server = NULL; - if (!clp) { + if (!cps->clp) { res = cpu_to_be32(NFS4ERR_OP_NOT_IN_SESSION); goto out; } @@ -366,23 +365,15 @@ __be32 nfs4_callback_devicenotify(void *argp, void *resp, for (i = 0; i < args->ndevs; i++) { struct cb_devicenotifyitem *dev = &args->devs[i]; - if (!server || - server->pnfs_curr_ld->id != dev->cbd_layout_type) { - rcu_read_lock(); - list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) - if (server->pnfs_curr_ld && - server->pnfs_curr_ld->id == dev->cbd_layout_type) { - rcu_read_unlock(); - goto found; - } - rcu_read_unlock(); - continue; + if (!ld || ld->id != dev->cbd_layout_type) { + pnfs_put_layoutdriver(ld); + ld = pnfs_find_layoutdriver(dev->cbd_layout_type); + if (!ld) + continue; } - - found: - nfs4_delete_deviceid(server->pnfs_curr_ld, clp, &dev->cbd_dev_id); + nfs4_delete_deviceid(ld, cps->clp, &dev->cbd_dev_id); } - + pnfs_put_layoutdriver(ld); out: kfree(args->devs); return res; diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c index 1725079a05276275c4b9d90d0af3bdc591079bc4..ca8a4aa351dc948303dafa647a50f8893caa0465 100644 --- a/fs/nfs/callback_xdr.c +++ b/fs/nfs/callback_xdr.c @@ -272,10 +272,6 @@ __be32 decode_devicenotify_args(struct svc_rqst *rqstp, n = ntohl(*p++); if (n == 0) goto out; - if (n > ULONG_MAX / sizeof(*args->devs)) { - status = htonl(NFS4ERR_BADXDR); - goto out; - } args->devs = kmalloc_array(n, sizeof(*args->devs), GFP_KERNEL); if (!args->devs) { diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 2ad56ff4752c7f8a85f270539d0bdc43ad7bb393..9f88ca7b2001567447aa47c79d6b91aba12b3246 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -1628,16 +1628,6 @@ const struct dentry_operations nfs4_dentry_operations = { }; EXPORT_SYMBOL_GPL(nfs4_dentry_operations); -static fmode_t flags_to_mode(int flags) -{ - fmode_t res = (__force fmode_t)flags & FMODE_EXEC; - if ((flags & O_ACCMODE) != O_WRONLY) - res |= FMODE_READ; - if ((flags & O_ACCMODE) != O_RDONLY) - res |= FMODE_WRITE; - return res; -} - static struct nfs_open_context *create_nfs_open_context(struct dentry *dentry, int open_flags, struct file *filp) { return alloc_nfs_open_context(dentry, flags_to_mode(open_flags), filp); diff --git a/fs/nfs/file.c b/fs/nfs/file.c index 4556e75d4591ff981931e50a18055ac2bdd97a24..f96367a2463e37a355d777fdfe4314eeb561191e 100644 --- a/fs/nfs/file.c +++ b/fs/nfs/file.c @@ -587,14 +587,12 @@ static const struct vm_operations_struct nfs_file_vm_ops = { .page_mkwrite = nfs_vm_page_mkwrite, }; -static int nfs_need_check_write(struct file *filp, struct inode *inode, - int error) +static int nfs_need_check_write(struct file *filp, struct inode *inode) { struct nfs_open_context *ctx; ctx = nfs_file_open_context(filp); - if (nfs_error_is_fatal_on_server(error) || - nfs_ctx_key_to_expire(ctx, inode)) + if (nfs_ctx_key_to_expire(ctx, inode)) return 1; return 0; } @@ -605,8 +603,6 @@ ssize_t nfs_file_write(struct kiocb *iocb, struct iov_iter *from) struct inode *inode = file_inode(file); unsigned long written = 0; ssize_t result; - errseq_t since; - int error; result = nfs_key_timeout_notify(file, inode); if (result) @@ -631,7 +627,6 @@ ssize_t nfs_file_write(struct kiocb *iocb, struct iov_iter *from) if (iocb->ki_pos > i_size_read(inode)) nfs_revalidate_mapping(inode, file->f_mapping); - since = filemap_sample_wb_err(file->f_mapping); nfs_start_io_write(inode); result = generic_write_checks(iocb, from); if (result > 0) { @@ -650,8 +645,7 @@ ssize_t nfs_file_write(struct kiocb *iocb, struct iov_iter *from) goto out; /* Return error values */ - error = filemap_check_wb_err(file->f_mapping, since); - if (nfs_need_check_write(file, inode, error)) { + if (nfs_need_check_write(file, inode)) { int err = nfs_wb_all(inode); if (err < 0) result = err; diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index f27ecc2e490f23ee62aaa15493df9f1025d96614..1adece1cff3ed9cacc449845a7927a1364328743 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -1139,7 +1139,6 @@ int nfs_open(struct inode *inode, struct file *filp) nfs_fscache_open_file(inode, filp); return 0; } -EXPORT_SYMBOL_GPL(nfs_open); /* * This function is called whenever some part of NFS notices that diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index 98554dd18a7157619c2b5f85c751b4dd88c7311a..4c7666cd20e1757399b6bd8ea8ae07c9cc523a10 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h @@ -42,6 +42,16 @@ static inline bool nfs_lookup_is_soft_revalidate(const struct dentry *dentry) return true; } +static inline fmode_t flags_to_mode(int flags) +{ + fmode_t res = (__force fmode_t)flags & FMODE_EXEC; + if ((flags & O_ACCMODE) != O_WRONLY) + res |= FMODE_READ; + if ((flags & O_ACCMODE) != O_RDONLY) + res |= FMODE_WRITE; + return res; +} + /* * Note: RFC 1813 doesn't limit the number of auth flavors that * a server can return, so make something up. diff --git a/fs/nfs/nfs2xdr.c b/fs/nfs/nfs2xdr.c index f6676af37d5dbb26baee5e776f5c7c05554227ab..5e6453e9b30790bd8cb7160c14feecfc5bb71186 100644 --- a/fs/nfs/nfs2xdr.c +++ b/fs/nfs/nfs2xdr.c @@ -948,7 +948,7 @@ int nfs2_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry, error = decode_filename_inline(xdr, &entry->name, &entry->len); if (unlikely(error)) - return error; + return -EAGAIN; /* * The type (size and byte order) of nfscookie isn't defined in diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c index dff6b52d26a856ab698828d32f35f56ab7a7c939..b5a9379b14504cb15172886a6c97acd7e6bdc0d7 100644 --- a/fs/nfs/nfs3xdr.c +++ b/fs/nfs/nfs3xdr.c @@ -1964,7 +1964,6 @@ int nfs3_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry, bool plus) { struct user_namespace *userns = rpc_userns(entry->server->client); - struct nfs_entry old = *entry; __be32 *p; int error; u64 new_cookie; @@ -1984,15 +1983,15 @@ int nfs3_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry, error = decode_fileid3(xdr, &entry->ino); if (unlikely(error)) - return error; + return -EAGAIN; error = decode_inline_filename3(xdr, &entry->name, &entry->len); if (unlikely(error)) - return error; + return -EAGAIN; error = decode_cookie3(xdr, &new_cookie); if (unlikely(error)) - return error; + return -EAGAIN; entry->d_type = DT_UNKNOWN; @@ -2000,7 +1999,7 @@ int nfs3_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry, entry->fattr->valid = 0; error = decode_post_op_attr(xdr, entry->fattr, userns); if (unlikely(error)) - return error; + return -EAGAIN; if (entry->fattr->valid & NFS_ATTR_FATTR_V3) entry->d_type = nfs_umode_to_dtype(entry->fattr->mode); @@ -2015,11 +2014,8 @@ int nfs3_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry, return -EAGAIN; if (*p != xdr_zero) { error = decode_nfs_fh3(xdr, entry->fh); - if (unlikely(error)) { - if (error == -E2BIG) - goto out_truncated; - return error; - } + if (unlikely(error)) + return -EAGAIN; } else zero_nfs_fh3(entry->fh); } @@ -2028,11 +2024,6 @@ int nfs3_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry, entry->cookie = new_cookie; return 0; - -out_truncated: - dprintk("NFS: directory entry contains invalid file handle\n"); - *entry = old; - return -EAGAIN; } /* diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c index 079ec1947c2470dd7bc685e717fcb7473efdab28..f12494e08267f5c2163069b001c0c582dfec66cf 100644 --- a/fs/nfs/nfs4file.c +++ b/fs/nfs/nfs4file.c @@ -32,6 +32,7 @@ nfs4_file_open(struct inode *inode, struct file *filp) struct dentry *parent = NULL; struct inode *dir; unsigned openflags = filp->f_flags; + fmode_t f_mode; struct iattr attr; int err; @@ -50,8 +51,9 @@ nfs4_file_open(struct inode *inode, struct file *filp) if (err) return err; + f_mode = filp->f_mode; if ((openflags & O_ACCMODE) == 3) - return nfs_open(inode, filp); + f_mode |= flags_to_mode(openflags); /* We can't create new files here */ openflags &= ~(O_CREAT|O_EXCL); @@ -59,7 +61,7 @@ nfs4_file_open(struct inode *inode, struct file *filp) parent = dget_parent(dentry); dir = d_inode(parent); - ctx = alloc_nfs_open_context(file_dentry(filp), filp->f_mode, filp); + ctx = alloc_nfs_open_context(file_dentry(filp), f_mode, filp); err = PTR_ERR(ctx); if (IS_ERR(ctx)) goto out; diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index d222a980164b79e5869c9585f2aa9a2c9952d5e9..77199d3560429753e4b2c64a3874dbc72fad6902 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -8205,6 +8205,7 @@ nfs4_bind_one_conn_to_session_done(struct rpc_task *task, void *calldata) case -NFS4ERR_DEADSESSION: nfs4_schedule_session_recovery(clp->cl_session, task->tk_status); + return; } if (args->dir == NFS4_CDFC4_FORE_OR_BOTH && res->dir != NFS4_CDFS4_BOTH) { diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index 5370e082aded53187e7779f0832b7a2901e9701e..b3b9eff5d57275a4be84a9e394c67ea581fa90aa 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -92,6 +92,17 @@ find_pnfs_driver(u32 id) return local; } +const struct pnfs_layoutdriver_type *pnfs_find_layoutdriver(u32 id) +{ + return find_pnfs_driver(id); +} + +void pnfs_put_layoutdriver(const struct pnfs_layoutdriver_type *ld) +{ + if (ld) + module_put(ld->owner); +} + void unset_pnfs_layoutdriver(struct nfs_server *nfss) { diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h index 0212fe32e63aa285f871b80595ef7a339f36114d..11d9ed9addc06ef51d6f00704c0c4080f68b86a2 100644 --- a/fs/nfs/pnfs.h +++ b/fs/nfs/pnfs.h @@ -236,6 +236,8 @@ struct pnfs_devicelist { extern int pnfs_register_layoutdriver(struct pnfs_layoutdriver_type *); extern void pnfs_unregister_layoutdriver(struct pnfs_layoutdriver_type *); +extern const struct pnfs_layoutdriver_type *pnfs_find_layoutdriver(u32 id); +extern void pnfs_put_layoutdriver(const struct pnfs_layoutdriver_type *ld); /* nfs4proc.c */ extern size_t max_response_pages(struct nfs_server *server); diff --git a/fs/nfs/write.c b/fs/nfs/write.c index bde4c362841f009cdb492d0b5787c14fbc100409..cc926e69ee9baaebb898d801e4591207e5ec0a6a 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -314,7 +314,10 @@ static void nfs_mapping_set_error(struct page *page, int error) struct address_space *mapping = page_file_mapping(page); SetPageError(page); - mapping_set_error(mapping, error); + filemap_set_wb_err(mapping, error); + if (mapping->host) + errseq_set(&mapping->host->i_sb->s_wb_err, + error == -ENOSPC ? -ENOSPC : -EIO); nfs_set_pageerror(mapping); } diff --git a/fs/nfsd/filecache.c b/fs/nfsd/filecache.c index e5aad1c10ea32dd050a2fedb504f0b33d1fb5c94..acd0898e3866d88d7d44edb7845052207707ea9a 100644 --- a/fs/nfsd/filecache.c +++ b/fs/nfsd/filecache.c @@ -641,7 +641,7 @@ nfsd_file_cache_init(void) if (!nfsd_filecache_wq) goto out; - nfsd_file_hashtbl = kcalloc(NFSD_FILE_HASH_SIZE, + nfsd_file_hashtbl = kvcalloc(NFSD_FILE_HASH_SIZE, sizeof(*nfsd_file_hashtbl), GFP_KERNEL); if (!nfsd_file_hashtbl) { pr_err("nfsd: unable to allocate nfsd_file_hashtbl\n"); @@ -708,7 +708,7 @@ nfsd_file_cache_init(void) nfsd_file_slab = NULL; kmem_cache_destroy(nfsd_file_mark_slab); nfsd_file_mark_slab = NULL; - kfree(nfsd_file_hashtbl); + kvfree(nfsd_file_hashtbl); nfsd_file_hashtbl = NULL; destroy_workqueue(nfsd_filecache_wq); nfsd_filecache_wq = NULL; @@ -854,7 +854,7 @@ nfsd_file_cache_shutdown(void) fsnotify_wait_marks_destroyed(); kmem_cache_destroy(nfsd_file_mark_slab); nfsd_file_mark_slab = NULL; - kfree(nfsd_file_hashtbl); + kvfree(nfsd_file_hashtbl); nfsd_file_hashtbl = NULL; destroy_workqueue(nfsd_filecache_wq); nfsd_filecache_wq = NULL; diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index d01d7929753efec03f8e942d0b61e320e12c85b9..84dd68091f42262ba2eafa95e01296b84666a3c0 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -4607,6 +4607,14 @@ nfsd_break_deleg_cb(struct file_lock *fl) return ret; } +/** + * nfsd_breaker_owns_lease - Check if lease conflict was resolved + * @fl: Lock state to check + * + * Return values: + * %true: Lease conflict was resolved + * %false: Lease conflict was not resolved. + */ static bool nfsd_breaker_owns_lease(struct file_lock *fl) { struct nfs4_delegation *dl = fl->fl_owner; @@ -4614,11 +4622,11 @@ static bool nfsd_breaker_owns_lease(struct file_lock *fl) struct nfs4_client *clp; if (!i_am_nfsd()) - return NULL; + return false; rqst = kthread_data(current); /* Note rq_prog == NFS_ACL_PROGRAM is also possible: */ if (rqst->rq_prog != NFS_PROGRAM || rqst->rq_vers < 4) - return NULL; + return false; clp = *(rqst->rq_lease_breaker); return dl->dl_stid.sc_client == clp; } diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c index dedec4771ecc2cd1d569184d19db07b356b7964d..5b09b82a4e5934fc8784fd1ddbc218c0b5554edf 100644 --- a/fs/nfsd/nfsctl.c +++ b/fs/nfsd/nfsctl.c @@ -1540,20 +1540,20 @@ static int __init init_nfsd(void) retval = create_proc_exports_entry(); if (retval) goto out_free_lockd; - retval = register_filesystem(&nfsd_fs_type); - if (retval) - goto out_free_exports; retval = register_pernet_subsys(&nfsd_net_ops); if (retval < 0) - goto out_free_filesystem; + goto out_free_exports; retval = register_cld_notifier(); + if (retval) + goto out_free_subsys; + retval = register_filesystem(&nfsd_fs_type); if (retval) goto out_free_all; return 0; out_free_all: + unregister_cld_notifier(); +out_free_subsys: unregister_pernet_subsys(&nfsd_net_ops); -out_free_filesystem: - unregister_filesystem(&nfsd_fs_type); out_free_exports: remove_proc_entry("fs/nfs/exports", NULL); remove_proc_entry("fs/nfs", NULL); @@ -1570,6 +1570,7 @@ static int __init init_nfsd(void) static void __exit exit_nfsd(void) { + unregister_filesystem(&nfsd_fs_type); unregister_cld_notifier(); unregister_pernet_subsys(&nfsd_net_ops); nfsd_drc_slab_free(); @@ -1579,7 +1580,6 @@ static void __exit exit_nfsd(void) nfsd_lockd_shutdown(); nfsd4_free_slabs(); nfsd4_exit_pnfs(); - unregister_filesystem(&nfsd_fs_type); } MODULE_AUTHOR("Olaf Kirch "); diff --git a/fs/nfsd/nfsproc.c b/fs/nfsd/nfsproc.c index 9c9de2b66e64169b93c21a028d3a78c113dabe6f..bbd01e8397f6e53905f4a8ba1515986a440044e0 100644 --- a/fs/nfsd/nfsproc.c +++ b/fs/nfsd/nfsproc.c @@ -223,7 +223,7 @@ nfsd_proc_write(struct svc_rqst *rqstp) unsigned long cnt = argp->len; unsigned int nvecs; - dprintk("nfsd: WRITE %s %d bytes at %d\n", + dprintk("nfsd: WRITE %s %u bytes at %d\n", SVCFH_fmt(&argp->fh), argp->len, argp->offset); diff --git a/fs/nfsd/xdr.h b/fs/nfsd/xdr.h index 0ff336b0b25f9a3b498516dab19863631e96f088..b8cc6a4b2e0ec6f272cb9f17d8c66f951d3bad53 100644 --- a/fs/nfsd/xdr.h +++ b/fs/nfsd/xdr.h @@ -33,7 +33,7 @@ struct nfsd_readargs { struct nfsd_writeargs { svc_fh fh; __u32 offset; - int len; + __u32 len; struct kvec first; }; diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c index ea18e4a2a691df3a3679dbc56dfbf31596be21b0..cf222c9225d6d5816b7928d26a1c0eecda00f0c4 100644 --- a/fs/ntfs/inode.c +++ b/fs/ntfs/inode.c @@ -1881,6 +1881,10 @@ int ntfs_read_inode_mount(struct inode *vi) } /* Now allocate memory for the attribute list. */ ni->attr_list_size = (u32)ntfs_attr_size(a); + if (!ni->attr_list_size) { + ntfs_error(sb, "Attr_list_size is zero"); + goto put_err_out; + } ni->attr_list = ntfs_malloc_nofs(ni->attr_list_size); if (!ni->attr_list) { ntfs_error(sb, "Not enough memory to allocate buffer " diff --git a/fs/ntfs3/fslog.c b/fs/ntfs3/fslog.c index 06492f088d6020592bd4fdb8b1a6ecd22b823324..fc36c53b865a7f654faa14872f970f9a2ab2fce7 100644 --- a/fs/ntfs3/fslog.c +++ b/fs/ntfs3/fslog.c @@ -1185,8 +1185,6 @@ static int log_read_rst(struct ntfs_log *log, u32 l_size, bool first, if (!r_page) return -ENOMEM; - memset(info, 0, sizeof(struct restart_info)); - /* Determine which restart area we are looking for. */ if (first) { vbo = 0; @@ -3791,10 +3789,11 @@ int log_replay(struct ntfs_inode *ni, bool *initialized) if (!log) return -ENOMEM; + memset(&rst_info, 0, sizeof(struct restart_info)); + log->ni = ni; log->l_size = l_size; log->one_page_buf = kmalloc(page_size, GFP_NOFS); - if (!log->one_page_buf) { err = -ENOMEM; goto out; @@ -3842,6 +3841,7 @@ int log_replay(struct ntfs_inode *ni, bool *initialized) if (rst_info.vbo) goto check_restart_area; + memset(&rst_info2, 0, sizeof(struct restart_info)); err = log_read_rst(log, l_size, false, &rst_info2); /* Determine which restart area to use. */ diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c index 435f82892432c6b33e7502259f3491c451301a64..477ad05a34ea2edf075fb8db225b60f608713b49 100644 --- a/fs/ocfs2/super.c +++ b/fs/ocfs2/super.c @@ -1110,17 +1110,6 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent) goto read_super_error; } - root = d_make_root(inode); - if (!root) { - status = -ENOMEM; - mlog_errno(status); - goto read_super_error; - } - - sb->s_root = root; - - ocfs2_complete_mount_recovery(osb); - osb->osb_dev_kset = kset_create_and_add(sb->s_id, NULL, &ocfs2_kset->kobj); if (!osb->osb_dev_kset) { @@ -1138,6 +1127,17 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent) goto read_super_error; } + root = d_make_root(inode); + if (!root) { + status = -ENOMEM; + mlog_errno(status); + goto read_super_error; + } + + sb->s_root = root; + + ocfs2_complete_mount_recovery(osb); + if (ocfs2_mount_local(osb)) snprintf(nodestr, sizeof(nodestr), "local"); else diff --git a/fs/pipe.c b/fs/pipe.c index f5ae4feb512ea62d6d9423bb3c5fda6812f1921d..2c2bacd315322c7ee788ee58b2bdc5c8c92995bd 100644 --- a/fs/pipe.c +++ b/fs/pipe.c @@ -252,7 +252,8 @@ pipe_read(struct kiocb *iocb, struct iov_iter *to) */ was_full = pipe_full(pipe->head, pipe->tail, pipe->max_usage); for (;;) { - unsigned int head = pipe->head; + /* Read ->head with a barrier vs post_one_notification() */ + unsigned int head = smp_load_acquire(&pipe->head); unsigned int tail = pipe->tail; unsigned int mask = pipe->ring_size - 1; @@ -832,10 +833,8 @@ void free_pipe_info(struct pipe_inode_info *pipe) int i; #ifdef CONFIG_WATCH_QUEUE - if (pipe->watch_queue) { + if (pipe->watch_queue) watch_queue_clear(pipe->watch_queue); - put_watch_queue(pipe->watch_queue); - } #endif (void) account_pipe_buffers(pipe->user, pipe->nr_accounted, 0); @@ -845,6 +844,10 @@ void free_pipe_info(struct pipe_inode_info *pipe) if (buf->ops) pipe_buf_release(pipe, buf); } +#ifdef CONFIG_WATCH_QUEUE + if (pipe->watch_queue) + put_watch_queue(pipe->watch_queue); +#endif if (pipe->tmp_page) __free_page(pipe->tmp_page); kfree(pipe->bufs); diff --git a/fs/proc/base.c b/fs/proc/base.c index 2ba1313aa444948a90e7935c49b2d9107e817532..b9052be86e8d56a7aaa83d847b30bd58ded52a7a 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -1988,7 +1988,7 @@ void proc_pid_evict_inode(struct proc_inode *ei) put_pid(pid); } -struct inode *proc_pid_make_inode(struct super_block * sb, +struct inode *proc_pid_make_inode(struct super_block *sb, struct task_struct *task, umode_t mode) { struct inode * inode; @@ -2017,11 +2017,6 @@ struct inode *proc_pid_make_inode(struct super_block * sb, /* Let the pid remember us for quick removal */ ei->pid = pid; - if (S_ISDIR(mode)) { - spin_lock(&pid->lock); - hlist_add_head_rcu(&ei->sibling_inodes, &pid->inodes); - spin_unlock(&pid->lock); - } task_dump_owner(task, 0, &inode->i_uid, &inode->i_gid); security_task_to_inode(task, inode); @@ -2034,6 +2029,27 @@ struct inode *proc_pid_make_inode(struct super_block * sb, return NULL; } +static struct inode *proc_pid_make_base_inode(struct super_block *sb, + struct task_struct *task, umode_t mode) +{ + struct inode *inode; + struct proc_inode *ei; + struct pid *pid; + + inode = proc_pid_make_inode(sb, task, mode); + if (!inode) + return NULL; + + /* Let proc_flush_pid find this directory inode */ + ei = PROC_I(inode); + pid = ei->pid; + spin_lock(&pid->lock); + hlist_add_head_rcu(&ei->sibling_inodes, &pid->inodes); + spin_unlock(&pid->lock); + + return inode; +} + int pid_getattr(const struct path *path, struct kstat *stat, u32 request_mask, unsigned int query_flags) { @@ -3459,7 +3475,8 @@ static struct dentry *proc_pid_instantiate(struct dentry * dentry, { struct inode *inode; - inode = proc_pid_make_inode(dentry->d_sb, task, S_IFDIR | S_IRUGO | S_IXUGO); + inode = proc_pid_make_base_inode(dentry->d_sb, task, + S_IFDIR | S_IRUGO | S_IXUGO); if (!inode) return ERR_PTR(-ENOENT); @@ -3765,7 +3782,8 @@ static struct dentry *proc_task_instantiate(struct dentry *dentry, struct task_struct *task, const void *ptr) { struct inode *inode; - inode = proc_pid_make_inode(dentry->d_sb, task, S_IFDIR | S_IRUGO | S_IXUGO); + inode = proc_pid_make_base_inode(dentry->d_sb, task, + S_IFDIR | S_IRUGO | S_IXUGO); if (!inode) return ERR_PTR(-ENOENT); diff --git a/fs/proc/bootconfig.c b/fs/proc/bootconfig.c index ad31ec4ad6270138af7528ad2acc1f8192dee5d7..d82dae133243b1471a603d5261774c97f013b752 100644 --- a/fs/proc/bootconfig.c +++ b/fs/proc/bootconfig.c @@ -32,6 +32,8 @@ static int __init copy_xbc_key_value_list(char *dst, size_t size) int ret = 0; key = kzalloc(XBC_KEYLEN_MAX, GFP_KERNEL); + if (!key) + return -ENOMEM; xbc_for_each_key_value(leaf, val) { ret = xbc_node_compose_key(leaf, key, XBC_KEYLEN_MAX); diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c index b1ebf7b61732c396f731cb988f5e07addee065b8..ce03c3dbb5c308ea93d2a84491123e279fa31435 100644 --- a/fs/pstore/platform.c +++ b/fs/pstore/platform.c @@ -143,21 +143,22 @@ static void pstore_timer_kick(void) mod_timer(&pstore_timer, jiffies + msecs_to_jiffies(pstore_update_ms)); } -/* - * Should pstore_dump() wait for a concurrent pstore_dump()? If - * not, the current pstore_dump() will report a failure to dump - * and return. - */ -static bool pstore_cannot_wait(enum kmsg_dump_reason reason) +static bool pstore_cannot_block_path(enum kmsg_dump_reason reason) { - /* In NMI path, pstore shouldn't block regardless of reason. */ + /* + * In case of NMI path, pstore shouldn't be blocked + * regardless of reason. + */ if (in_nmi()) return true; switch (reason) { /* In panic case, other cpus are stopped by smp_send_stop(). */ case KMSG_DUMP_PANIC: - /* Emergency restart shouldn't be blocked. */ + /* + * Emergency restart shouldn't be blocked by spinning on + * pstore_info::buf_lock. + */ case KMSG_DUMP_EMERG: return true; default: @@ -388,21 +389,19 @@ static void pstore_dump(struct kmsg_dumper *dumper, unsigned long total = 0; const char *why; unsigned int part = 1; + unsigned long flags = 0; int ret; why = kmsg_dump_reason_str(reason); - if (down_trylock(&psinfo->buf_lock)) { - /* Failed to acquire lock: give up if we cannot wait. */ - if (pstore_cannot_wait(reason)) { - pr_err("dump skipped in %s path: may corrupt error record\n", - in_nmi() ? "NMI" : why); - return; - } - if (down_interruptible(&psinfo->buf_lock)) { - pr_err("could not grab semaphore?!\n"); + if (pstore_cannot_block_path(reason)) { + if (!spin_trylock_irqsave(&psinfo->buf_lock, flags)) { + pr_err("dump skipped in %s path because of concurrent dump\n", + in_nmi() ? "NMI" : why); return; } + } else { + spin_lock_irqsave(&psinfo->buf_lock, flags); } oopscount++; @@ -464,8 +463,7 @@ static void pstore_dump(struct kmsg_dumper *dumper, total += record.size; part++; } - - up(&psinfo->buf_lock); + spin_unlock_irqrestore(&psinfo->buf_lock, flags); } static struct kmsg_dumper pstore_dumper = { @@ -591,7 +589,7 @@ int pstore_register(struct pstore_info *psi) psi->write_user = pstore_write_user_compat; psinfo = psi; mutex_init(&psinfo->read_mutex); - sema_init(&psinfo->buf_lock, 1); + spin_lock_init(&psinfo->buf_lock); if (psi->flags & PSTORE_FLAGS_DMESG) allocate_buf_for_compression(); diff --git a/fs/tracefs/inode.c b/fs/tracefs/inode.c index ade05887070dd64a04403c27ade76d8b0198a19d..8b7315c22f0d1208e3b897d61c912561642c8b00 100644 --- a/fs/tracefs/inode.c +++ b/fs/tracefs/inode.c @@ -262,7 +262,6 @@ static int tracefs_parse_options(char *data, struct tracefs_mount_opts *opts) if (!gid_valid(gid)) return -EINVAL; opts->gid = gid; - set_gid(tracefs_mount->mnt_root, gid); break; case Opt_mode: if (match_octal(&args[0], &option)) @@ -289,7 +288,9 @@ static int tracefs_apply_options(struct super_block *sb) inode->i_mode |= opts->mode; inode->i_uid = opts->uid; - inode->i_gid = opts->gid; + + /* Set all the group ids to the mount option */ + set_gid(sb->s_root, opts->gid); return 0; } diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c index 8533571421e6822c6d9c52b4a76738fe7cc2828f..9148170a12cbfd310e2cb03b80f24764a78db323 100644 --- a/fs/xfs/xfs_super.c +++ b/fs/xfs/xfs_super.c @@ -1205,6 +1205,22 @@ suffix_kstrtoint( return ret; } +static inline void +xfs_fs_warn_deprecated( + struct fs_context *fc, + struct fs_parameter *param, + uint64_t flag, + bool value) +{ + /* Don't print the warning if reconfiguring and current mount point + * already had the flag set + */ + if ((fc->purpose & FS_CONTEXT_FOR_RECONFIGURE) && + !!(XFS_M(fc->root->d_sb)->m_flags & flag) == value) + return; + xfs_warn(fc->s_fs_info, "%s mount option is deprecated.", param->key); +} + /* * Set mount state from a mount option. * @@ -1215,7 +1231,7 @@ xfs_fc_parse_param( struct fs_context *fc, struct fs_parameter *param) { - struct xfs_mount *mp = fc->s_fs_info; + struct xfs_mount *parsing_mp = fc->s_fs_info; struct fs_parse_result result; int size = 0; int opt; @@ -1226,138 +1242,138 @@ xfs_fc_parse_param( switch (opt) { case Opt_logbufs: - mp->m_logbufs = result.uint_32; + parsing_mp->m_logbufs = result.uint_32; return 0; case Opt_logbsize: - if (suffix_kstrtoint(param->string, 10, &mp->m_logbsize)) + if (suffix_kstrtoint(param->string, 10, &parsing_mp->m_logbsize)) return -EINVAL; return 0; case Opt_logdev: - kfree(mp->m_logname); - mp->m_logname = kstrdup(param->string, GFP_KERNEL); - if (!mp->m_logname) + kfree(parsing_mp->m_logname); + parsing_mp->m_logname = kstrdup(param->string, GFP_KERNEL); + if (!parsing_mp->m_logname) return -ENOMEM; return 0; case Opt_rtdev: - kfree(mp->m_rtname); - mp->m_rtname = kstrdup(param->string, GFP_KERNEL); - if (!mp->m_rtname) + kfree(parsing_mp->m_rtname); + parsing_mp->m_rtname = kstrdup(param->string, GFP_KERNEL); + if (!parsing_mp->m_rtname) return -ENOMEM; return 0; case Opt_allocsize: if (suffix_kstrtoint(param->string, 10, &size)) return -EINVAL; - mp->m_allocsize_log = ffs(size) - 1; - mp->m_flags |= XFS_MOUNT_ALLOCSIZE; + parsing_mp->m_allocsize_log = ffs(size) - 1; + parsing_mp->m_flags |= XFS_MOUNT_ALLOCSIZE; return 0; case Opt_grpid: case Opt_bsdgroups: - mp->m_flags |= XFS_MOUNT_GRPID; + parsing_mp->m_flags |= XFS_MOUNT_GRPID; return 0; case Opt_nogrpid: case Opt_sysvgroups: - mp->m_flags &= ~XFS_MOUNT_GRPID; + parsing_mp->m_flags &= ~XFS_MOUNT_GRPID; return 0; case Opt_wsync: - mp->m_flags |= XFS_MOUNT_WSYNC; + parsing_mp->m_flags |= XFS_MOUNT_WSYNC; return 0; case Opt_norecovery: - mp->m_flags |= XFS_MOUNT_NORECOVERY; + parsing_mp->m_flags |= XFS_MOUNT_NORECOVERY; return 0; case Opt_noalign: - mp->m_flags |= XFS_MOUNT_NOALIGN; + parsing_mp->m_flags |= XFS_MOUNT_NOALIGN; return 0; case Opt_swalloc: - mp->m_flags |= XFS_MOUNT_SWALLOC; + parsing_mp->m_flags |= XFS_MOUNT_SWALLOC; return 0; case Opt_sunit: - mp->m_dalign = result.uint_32; + parsing_mp->m_dalign = result.uint_32; return 0; case Opt_swidth: - mp->m_swidth = result.uint_32; + parsing_mp->m_swidth = result.uint_32; return 0; case Opt_inode32: - mp->m_flags |= XFS_MOUNT_SMALL_INUMS; + parsing_mp->m_flags |= XFS_MOUNT_SMALL_INUMS; return 0; case Opt_inode64: - mp->m_flags &= ~XFS_MOUNT_SMALL_INUMS; + parsing_mp->m_flags &= ~XFS_MOUNT_SMALL_INUMS; return 0; case Opt_nouuid: - mp->m_flags |= XFS_MOUNT_NOUUID; + parsing_mp->m_flags |= XFS_MOUNT_NOUUID; return 0; case Opt_largeio: - mp->m_flags |= XFS_MOUNT_LARGEIO; + parsing_mp->m_flags |= XFS_MOUNT_LARGEIO; return 0; case Opt_nolargeio: - mp->m_flags &= ~XFS_MOUNT_LARGEIO; + parsing_mp->m_flags &= ~XFS_MOUNT_LARGEIO; return 0; case Opt_filestreams: - mp->m_flags |= XFS_MOUNT_FILESTREAMS; + parsing_mp->m_flags |= XFS_MOUNT_FILESTREAMS; return 0; case Opt_noquota: - mp->m_qflags &= ~XFS_ALL_QUOTA_ACCT; - mp->m_qflags &= ~XFS_ALL_QUOTA_ENFD; + parsing_mp->m_qflags &= ~XFS_ALL_QUOTA_ACCT; + parsing_mp->m_qflags &= ~XFS_ALL_QUOTA_ENFD; return 0; case Opt_quota: case Opt_uquota: case Opt_usrquota: - mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ENFD); + parsing_mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ENFD); return 0; case Opt_qnoenforce: case Opt_uqnoenforce: - mp->m_qflags |= XFS_UQUOTA_ACCT; - mp->m_qflags &= ~XFS_UQUOTA_ENFD; + parsing_mp->m_qflags |= XFS_UQUOTA_ACCT; + parsing_mp->m_qflags &= ~XFS_UQUOTA_ENFD; return 0; case Opt_pquota: case Opt_prjquota: - mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ENFD); + parsing_mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ENFD); return 0; case Opt_pqnoenforce: - mp->m_qflags |= XFS_PQUOTA_ACCT; - mp->m_qflags &= ~XFS_PQUOTA_ENFD; + parsing_mp->m_qflags |= XFS_PQUOTA_ACCT; + parsing_mp->m_qflags &= ~XFS_PQUOTA_ENFD; return 0; case Opt_gquota: case Opt_grpquota: - mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ENFD); + parsing_mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ENFD); return 0; case Opt_gqnoenforce: - mp->m_qflags |= XFS_GQUOTA_ACCT; - mp->m_qflags &= ~XFS_GQUOTA_ENFD; + parsing_mp->m_qflags |= XFS_GQUOTA_ACCT; + parsing_mp->m_qflags &= ~XFS_GQUOTA_ENFD; return 0; case Opt_discard: - mp->m_flags |= XFS_MOUNT_DISCARD; + parsing_mp->m_flags |= XFS_MOUNT_DISCARD; return 0; case Opt_nodiscard: - mp->m_flags &= ~XFS_MOUNT_DISCARD; + parsing_mp->m_flags &= ~XFS_MOUNT_DISCARD; return 0; #ifdef CONFIG_FS_DAX case Opt_dax: - xfs_mount_set_dax_mode(mp, XFS_DAX_ALWAYS); + xfs_mount_set_dax_mode(parsing_mp, XFS_DAX_ALWAYS); return 0; case Opt_dax_enum: - xfs_mount_set_dax_mode(mp, result.uint_32); + xfs_mount_set_dax_mode(parsing_mp, result.uint_32); return 0; #endif /* Following mount options will be removed in September 2025 */ case Opt_ikeep: - xfs_warn(mp, "%s mount option is deprecated.", param->key); - mp->m_flags |= XFS_MOUNT_IKEEP; + xfs_fs_warn_deprecated(fc, param, XFS_MOUNT_IKEEP, true); + parsing_mp->m_flags |= XFS_MOUNT_IKEEP; return 0; case Opt_noikeep: - xfs_warn(mp, "%s mount option is deprecated.", param->key); - mp->m_flags &= ~XFS_MOUNT_IKEEP; + xfs_fs_warn_deprecated(fc, param, XFS_MOUNT_IKEEP, false); + parsing_mp->m_flags &= ~XFS_MOUNT_IKEEP; return 0; case Opt_attr2: - xfs_warn(mp, "%s mount option is deprecated.", param->key); - mp->m_flags |= XFS_MOUNT_ATTR2; + xfs_fs_warn_deprecated(fc, param, XFS_MOUNT_ATTR2, true); + parsing_mp->m_flags |= XFS_MOUNT_ATTR2; return 0; case Opt_noattr2: - xfs_warn(mp, "%s mount option is deprecated.", param->key); - mp->m_flags &= ~XFS_MOUNT_ATTR2; - mp->m_flags |= XFS_MOUNT_NOATTR2; + xfs_fs_warn_deprecated(fc, param, XFS_MOUNT_NOATTR2, true); + parsing_mp->m_flags &= ~XFS_MOUNT_ATTR2; + parsing_mp->m_flags |= XFS_MOUNT_NOATTR2; return 0; default: - xfs_warn(mp, "unknown mount option [%s].", param->key); + xfs_warn(parsing_mp, "unknown mount option [%s].", param->key); return -EINVAL; } diff --git a/include/crypto/sm4.h b/include/crypto/sm4.h index 709f286e7b25355e60e1aa007ad84ee5f67ac9b5..9656a9a403264cf26a7efe598b4b15ded9540fc7 100644 --- a/include/crypto/sm4.h +++ b/include/crypto/sm4.h @@ -21,6 +21,10 @@ struct sm4_ctx { u32 rkey_dec[SM4_RKEY_WORDS]; }; +extern const u32 crypto_sm4_fk[]; +extern const u32 crypto_sm4_ck[]; +extern const u8 crypto_sm4_sbox[]; + /** * sm4_expandkey - Expands the SM4 key as described in GB/T 32907-2016 * @ctx: The location where the computed key will be stored. diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h index 0bbfd647f5c6dec3c12c52c621a94cf57975ebc5..6cc93ab5b8096d1def9ebd9256e45c08fa86d161 100644 --- a/include/linux/amba/bus.h +++ b/include/linux/amba/bus.h @@ -76,7 +76,7 @@ struct amba_device { struct amba_driver { struct device_driver drv; int (*probe)(struct amba_device *, const struct amba_id *); - int (*remove)(struct amba_device *); + void (*remove)(struct amba_device *); void (*shutdown)(struct amba_device *); const struct amba_id *id_table; }; diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 23dfe7608e793c076b96bafbdcad7fbe4cfcd343..6a4b2a01a46216ca7971629074c626372189ef11 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -27,6 +27,7 @@ #include #include #include +#include struct module; struct scsi_ioctl_command; diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index 55b9a3924cd72ca61b01c484b13619f7c2fb9709..35d8ce603815eb1068f73bfc75e8e23bd3fdebb9 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h @@ -487,7 +487,12 @@ struct cgroup { /* used to schedule release agent */ struct work_struct release_agent_work; - /* used to track pressure stalls */ + /* used to track pressure stalls. */ + + /* + * It is accessed only the cgroup core code and so changes made to + * the cgroup structure should not affect third-party kernel modules. + */ struct psi_group psi; /* used to store eBPF programs */ diff --git a/include/linux/cpu.h b/include/linux/cpu.h index d6428aaf67e7313be7db903dfead2b907e9d95a1..d63b8f70d1239df47eb79217779d8469c4aad473 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -65,6 +65,9 @@ extern ssize_t cpu_show_tsx_async_abort(struct device *dev, extern ssize_t cpu_show_itlb_multihit(struct device *dev, struct device_attribute *attr, char *buf); extern ssize_t cpu_show_srbds(struct device *dev, struct device_attribute *attr, char *buf); +extern ssize_t cpu_show_mmio_stale_data(struct device *dev, + struct device_attribute *attr, + char *buf); extern __printf(4, 5) struct device *cpu_device_create(struct device *parent, void *drvdata, diff --git a/include/linux/if_arp.h b/include/linux/if_arp.h index bf5c5f32c65e46cc3d58208612482c4f42594333..e147ea6794670888fbf949e9cee5dc38e16d8c3e 100644 --- a/include/linux/if_arp.h +++ b/include/linux/if_arp.h @@ -51,6 +51,7 @@ static inline bool dev_is_mac_header_xmit(const struct net_device *dev) case ARPHRD_VOID: case ARPHRD_NONE: case ARPHRD_RAWIP: + case ARPHRD_PIMREG: return false; default: return true; diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index fea02cfe65205a87912a03d2e55fcf65a4c1f52e..96f2cd2b46f7eb4b604195f91552a582b97efb82 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -1480,6 +1480,8 @@ static inline long kvm_arch_vcpu_async_ioctl(struct file *filp, void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, unsigned long start, unsigned long end); +void kvm_arch_guest_memory_reclaimed(struct kvm *kvm); + #ifdef CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu); #else diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h index 5f88e6429484bb10829c1143a647a7138245681e..9301f8e9bb9081c95ae050bb7a8faa51c7cf389c 100644 --- a/include/linux/livepatch.h +++ b/include/linux/livepatch.h @@ -229,11 +229,36 @@ struct klp_func_node { struct list_head func_stack; void *old_func; struct arch_klp_data arch_data; + /* + * Used in breakpoint exception handling functions. + * If 'brk_func' is NULL, no breakpoint is inserted into the entry of + * the old function. + * If it is not NULL, the value is the new function that will jump to + * when the breakpoint exception is triggered. + */ + void *brk_func; }; struct klp_func_node *klp_find_func_node(const void *old_func); void klp_add_func_node(struct klp_func_node *func_node); void klp_del_func_node(struct klp_func_node *func_node); +void *klp_get_brk_func(void *addr); + +static inline +int klp_compare_address(unsigned long pc, unsigned long func_addr, + const char *func_name, unsigned long check_size) +{ + if (pc >= func_addr && pc < func_addr + check_size) { + pr_warn("func %s is in use!\n", func_name); + /* Return -EAGAIN for next retry */ + return -EAGAIN; + } + return 0; +} + +void arch_klp_init(void); +int klp_module_delete_safety_check(struct module *mod); + #endif int klp_apply_section_relocs(struct module *pmod, Elf_Shdr *sechdrs, diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h index 64ab4398ba9050dde814d732bd5215671735461a..ba74e7399dc6600a3dc54ba2724d158b000d9c59 100644 --- a/include/linux/mempolicy.h +++ b/include/linux/mempolicy.h @@ -201,6 +201,9 @@ extern bool vma_migratable(struct vm_area_struct *vma); extern int mpol_misplaced(struct page *, struct vm_area_struct *, unsigned long); extern void mpol_put_task_policy(struct task_struct *); +extern long __do_mbind(unsigned long start, unsigned long len, + unsigned short mode, unsigned short mode_flags, + nodemask_t *nmask, unsigned long flags, struct mm_struct *mm); #else struct mempolicy {}; @@ -301,6 +304,13 @@ static inline int mpol_misplaced(struct page *page, struct vm_area_struct *vma, return -1; /* no node preference */ } +static inline long __do_mbind(unsigned long start, unsigned long len, + unsigned short mode, unsigned short mode_flags, + nodemask_t *nmask, unsigned long flags, struct mm_struct *mm) +{ + return 0; +} + static inline void mpol_put_task_policy(struct task_struct *task) { } diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index f5e829e12a76deabe6bff05e9593c15d68f6e293..eba1f1cbc9fbd1b4093f2285d5fa15f4276807f7 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -9307,8 +9307,8 @@ struct mlx5_ifc_bufferx_reg_bits { u8 reserved_at_0[0x6]; u8 lossy[0x1]; u8 epsb[0x1]; - u8 reserved_at_8[0xc]; - u8 size[0xc]; + u8 reserved_at_8[0x8]; + u8 size[0x10]; u8 xoff_threshold[0x10]; u8 xon_threshold[0x10]; diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h index aac07940de09d7b503d23f74d5adfe6e59873b7b..db2eaff77f41aa10e16c619a6c2de632fa11637a 100644 --- a/include/linux/mtd/rawnand.h +++ b/include/linux/mtd/rawnand.h @@ -1083,6 +1083,7 @@ struct nand_manufacturer { * @lock: Lock protecting the suspended field. Also used to serialize accesses * to the NAND device * @suspended: Set to 1 when the device is suspended, 0 when it's not + * @resume_wq: wait queue to sleep if rawnand is in suspended state. * @cur_cs: Currently selected target. -1 means no target selected, otherwise we * should always have cur_cs >= 0 && cur_cs < nanddev_ntargets(). * NAND Controller drivers should not modify this value, but they're @@ -1135,6 +1136,7 @@ struct nand_chip { /* Internals */ struct mutex lock; unsigned int suspended : 1; + wait_queue_head_t resume_wq; int cur_cs; int read_retries; diff --git a/include/linux/psi.h b/include/linux/psi.h index 1b9c0e5955bc0616d5ea5ad9ce7115f3af7d2998..d290f0493c3335cddcc9153d4cdf36499866c0b4 100644 --- a/include/linux/psi.h +++ b/include/linux/psi.h @@ -22,7 +22,6 @@ void psi_task_change(struct task_struct *task, int clear, int set); void psi_task_switch(struct task_struct *prev, struct task_struct *next, bool sleep); -void psi_memstall_tick(struct task_struct *task, int cpu); void psi_memstall_enter(unsigned long *flags); void psi_memstall_leave(unsigned long *flags); diff --git a/include/linux/psi_types.h b/include/linux/psi_types.h index 17d74f62c1818ad93f6568cc6ac6a074aba35f3c..c17aeb774e23a2d77b5a4008c433e47f46e0d951 100644 --- a/include/linux/psi_types.h +++ b/include/linux/psi_types.h @@ -10,6 +10,11 @@ #ifdef CONFIG_PSI /* Tracked task states */ +#ifdef __GENKSYMS__ +/* + * This definition is used to keep kabi unchanged, + * and **should not changed** + */ enum psi_task_count { NR_IOWAIT, NR_MEMSTALL, @@ -23,12 +28,41 @@ enum psi_task_count { NR_ONCPU, NR_PSI_TASK_COUNTS = 4, }; +#else +/* + * All modification to psi_task_count should apply to here. + */ +enum psi_task_count { + NR_IOWAIT, + NR_MEMSTALL, + NR_RUNNING, + /* + * This can't have values other than 0 or 1 and could be + * implemented as a bit flag. But for now we still have room + * in the first cacheline of psi_group_cpu, and this way we + * don't have to special case any state tracking for it. + */ + NR_ONCPU, + /* + * For IO and CPU stalls the presence of running/oncpu tasks + * in the domain means a partial rather than a full stall. + * For memory it's not so simple because of page reclaimers: + * they are running/oncpu while representing a stall. To tell + * whether a domain has productivity left or not, we need to + * distinguish between regular running (i.e. productive) + * threads and memstall ones. + */ + NR_MEMSTALL_RUNNING, + NR_PSI_TASK_COUNTS = 5, +}; +#endif /* Task state bitmasks */ #define TSK_IOWAIT (1 << NR_IOWAIT) #define TSK_MEMSTALL (1 << NR_MEMSTALL) #define TSK_RUNNING (1 << NR_RUNNING) #define TSK_ONCPU (1 << NR_ONCPU) +#define TSK_MEMSTALL_RUNNING (1 << NR_MEMSTALL_RUNNING) /* Resources that workloads could be stalled on */ enum psi_res { @@ -44,6 +78,11 @@ enum psi_res { * SOME: Stalled tasks & working tasks * FULL: Stalled tasks & no working tasks */ +#ifdef __GENKSYMS__ +/* + * This definition is used to keep kabi unchanged, + * and **should not changed** + */ enum psi_states { PSI_IO_SOME, PSI_IO_FULL, @@ -54,6 +93,23 @@ enum psi_states { PSI_NONIDLE, NR_PSI_STATES = 6, }; +#else +/* + * All modification to psi_states should apply to here. + */ +enum psi_states { + PSI_IO_SOME, + PSI_IO_FULL, + PSI_MEM_SOME, + PSI_MEM_FULL, + PSI_CPU_SOME, + PSI_CPU_FULL, + /* Only per-CPU, to weigh the CPU in the global average: */ + PSI_NONIDLE, + NR_PSI_STATES = 7, +}; +#endif + enum psi_aggregators { PSI_AVGS = 0, diff --git a/include/linux/pstore.h b/include/linux/pstore.h index eb93a54cff31fa21d49b77012f14888b51e01bca..e97a8188f0fd8bcac0e0edd8a5570c7aa00edf9d 100644 --- a/include/linux/pstore.h +++ b/include/linux/pstore.h @@ -14,7 +14,7 @@ #include #include #include -#include +#include #include #include @@ -87,7 +87,7 @@ struct pstore_record { * @owner: module which is responsible for this backend driver * @name: name of the backend driver * - * @buf_lock: semaphore to serialize access to @buf + * @buf_lock: spinlock to serialize access to @buf * @buf: preallocated crash dump buffer * @bufsize: size of @buf available for crash dump bytes (must match * smallest number of bytes available for writing to a @@ -178,7 +178,7 @@ struct pstore_info { struct module *owner; const char *name; - struct semaphore buf_lock; + spinlock_t buf_lock; char *buf; size_t bufsize; diff --git a/include/linux/security.h b/include/linux/security.h index 35355429648e3bd5fbe73f7dbefb955236933769..330029ef7e894b06e8c17399f714d901bb514627 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -121,10 +121,12 @@ enum lockdown_reason { LOCKDOWN_DEBUGFS, LOCKDOWN_XMON_WR, LOCKDOWN_BPF_WRITE_USER, + LOCKDOWN_DBG_WRITE_KERNEL, LOCKDOWN_INTEGRITY_MAX, LOCKDOWN_KCORE, LOCKDOWN_KPROBES, LOCKDOWN_BPF_READ, + LOCKDOWN_DBG_READ_KERNEL, LOCKDOWN_PERF, LOCKDOWN_TRACEFS, LOCKDOWN_XMON_RW, diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index ff63c2963359d295dae0a93c19382d985056abb7..35b26743dbb283c6a0d5f9de886d101ea407a3a8 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h @@ -463,6 +463,8 @@ extern void uart_handle_cts_change(struct uart_port *uport, extern void uart_insert_char(struct uart_port *port, unsigned int status, unsigned int overrun, unsigned int ch, unsigned int flag); +void uart_xchar_out(struct uart_port *uport, int offset); + #ifdef CONFIG_MAGIC_SYSRQ_SERIAL #define SYSRQ_TIMEOUT (HZ * 5) diff --git a/include/linux/soc/ti/ti_sci_protocol.h b/include/linux/soc/ti/ti_sci_protocol.h index cf27b080e148216905d65c8be80630e2fb624784..b1af87330f863e1aaa346edab61996504d601948 100644 --- a/include/linux/soc/ti/ti_sci_protocol.h +++ b/include/linux/soc/ti/ti_sci_protocol.h @@ -618,7 +618,7 @@ devm_ti_sci_get_of_resource(const struct ti_sci_handle *handle, static inline struct ti_sci_resource * devm_ti_sci_get_resource(const struct ti_sci_handle *handle, struct device *dev, - u32 dev_id, u32 sub_type); + u32 dev_id, u32 sub_type) { return ERR_PTR(-EINVAL); } diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h index b998e4b7369129328ad616831e1f73245ca71fef..6d9d1520612b87e07dedff9875c49f302aa70075 100644 --- a/include/linux/sunrpc/xdr.h +++ b/include/linux/sunrpc/xdr.h @@ -603,6 +603,8 @@ xdr_stream_decode_uint32_array(struct xdr_stream *xdr, if (unlikely(xdr_stream_decode_u32(xdr, &len) < 0)) return -EBADMSG; + if (len > SIZE_MAX / sizeof(*p)) + return -EBADMSG; p = xdr_inline_decode(xdr, len * sizeof(*p)); if (unlikely(!p)) return -EBADMSG; diff --git a/include/linux/tee_drv.h b/include/linux/tee_drv.h index 0c6c1de6f3b7782c833090aed78a28b4efdf7c3f..18a9949bba187e7979a4d409e0485ae3d149f7ca 100644 --- a/include/linux/tee_drv.h +++ b/include/linux/tee_drv.h @@ -582,4 +582,18 @@ struct tee_client_driver { #define to_tee_client_driver(d) \ container_of(d, struct tee_client_driver, driver) +/** + * teedev_open() - Open a struct tee_device + * @teedev: Device to open + * + * @return a pointer to struct tee_context on success or an ERR_PTR on failure. + */ +struct tee_context *teedev_open(struct tee_device *teedev); + +/** + * teedev_close_context() - closes a struct tee_context + * @ctx: The struct tee_context to close + */ +void teedev_close_context(struct tee_context *ctx); + #endif /*__TEE_DRV_H*/ diff --git a/include/linux/virtio.h b/include/linux/virtio.h index 8ecc2e208d6137715fb1455f4b5af1e79f9d9f7e..90c5ad556809721a37fe6512bc7265fb9e251e18 100644 --- a/include/linux/virtio.h +++ b/include/linux/virtio.h @@ -135,7 +135,6 @@ void virtio_break_device(struct virtio_device *dev); void virtio_config_changed(struct virtio_device *dev); void virtio_config_disable(struct virtio_device *dev); void virtio_config_enable(struct virtio_device *dev); -int virtio_finalize_features(struct virtio_device *dev); #ifdef CONFIG_PM_SLEEP int virtio_device_freeze(struct virtio_device *dev); int virtio_device_restore(struct virtio_device *dev); diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h index 8519b3ae5d52ef4d78763109ab7b419721289880..b341dd62aa4da9843f2af1d3132c7132dcc48341 100644 --- a/include/linux/virtio_config.h +++ b/include/linux/virtio_config.h @@ -62,8 +62,9 @@ struct virtio_shm_region { * Returns the first 64 feature bits (all we currently need). * @finalize_features: confirm what device features we'll be using. * vdev: the virtio_device - * This gives the final feature bits for the device: it can change + * This sends the driver feature bits to the device: it can change * the dev->feature bits if it wants. + * Note: despite the name this can be called any number of times. * Returns 0 on success or error status * @bus_name: return the bus name associated with the device (optional) * vdev: the virtio_device diff --git a/include/linux/watch_queue.h b/include/linux/watch_queue.h index c994d1b2cdbaa2abb313170749d0ee396a807a90..3b9a40ae8bdba76dec83554989fa67f98cb59aba 100644 --- a/include/linux/watch_queue.h +++ b/include/linux/watch_queue.h @@ -28,7 +28,8 @@ struct watch_type_filter { struct watch_filter { union { struct rcu_head rcu; - unsigned long type_filter[2]; /* Bitmask of accepted types */ + /* Bitmask of accepted types */ + DECLARE_BITMAP(type_filter, WATCH_TYPE__NR); }; u32 nr_filters; /* Number of filters */ struct watch_type_filter filters[]; diff --git a/include/net/af_vsock.h b/include/net/af_vsock.h index b1c7172869939c2d0bfb2f08f11d773e1ed1028b..4d8589244dc75acb2d8efbea4521c2a67d0008c4 100644 --- a/include/net/af_vsock.h +++ b/include/net/af_vsock.h @@ -197,7 +197,8 @@ struct sock *vsock_find_bound_socket(struct sockaddr_vm *addr); struct sock *vsock_find_connected_socket(struct sockaddr_vm *src, struct sockaddr_vm *dst); void vsock_remove_sock(struct vsock_sock *vsk); -void vsock_for_each_connected_socket(void (*fn)(struct sock *sk)); +void vsock_for_each_connected_socket(struct vsock_transport *transport, + void (*fn)(struct sock *sk)); int vsock_assign_transport(struct vsock_sock *vsk, struct vsock_sock *psk); bool vsock_find_cid(unsigned int cid); diff --git a/include/net/ax25.h b/include/net/ax25.h index 8b7eb46ad72d8804c1ffaa3943bb2816113239d8..aadff553e4b734aa6af206ca65f64fff5119eba9 100644 --- a/include/net/ax25.h +++ b/include/net/ax25.h @@ -236,6 +236,7 @@ typedef struct ax25_dev { #if defined(CONFIG_AX25_DAMA_SLAVE) || defined(CONFIG_AX25_DAMA_MASTER) ax25_dama_info dama; #endif + refcount_t refcount; } ax25_dev; typedef struct ax25_cb { @@ -290,6 +291,17 @@ static __inline__ void ax25_cb_put(ax25_cb *ax25) } } +static inline void ax25_dev_hold(ax25_dev *ax25_dev) +{ + refcount_inc(&ax25_dev->refcount); +} + +static inline void ax25_dev_put(ax25_dev *ax25_dev) +{ + if (refcount_dec_and_test(&ax25_dev->refcount)) { + kfree(ax25_dev); + } +} static inline __be16 ax25_type_trans(struct sk_buff *skb, struct net_device *dev) { skb->dev = dev; diff --git a/include/net/checksum.h b/include/net/checksum.h index 0d05b9e8690b8bdf7c526f9625484d2bb54c8106..8b7d0c31598f5158fdae9a513c3cf8f01ffa9d99 100644 --- a/include/net/checksum.h +++ b/include/net/checksum.h @@ -22,7 +22,7 @@ #include #ifndef _HAVE_ARCH_COPY_AND_CSUM_FROM_USER -static inline +static __always_inline __wsum csum_and_copy_from_user (const void __user *src, void *dst, int len) { @@ -33,7 +33,7 @@ __wsum csum_and_copy_from_user (const void __user *src, void *dst, #endif #ifndef HAVE_CSUM_COPY_USER -static __inline__ __wsum csum_and_copy_to_user +static __always_inline __wsum csum_and_copy_to_user (const void *src, void __user *dst, int len) { __wsum sum = csum_partial(src, len, ~0U); @@ -45,7 +45,7 @@ static __inline__ __wsum csum_and_copy_to_user #endif #ifndef _HAVE_ARCH_CSUM_AND_COPY -static inline __wsum +static __always_inline __wsum csum_partial_copy_nocheck(const void *src, void *dst, int len) { memcpy(dst, src, len); @@ -54,7 +54,7 @@ csum_partial_copy_nocheck(const void *src, void *dst, int len) #endif #ifndef HAVE_ARCH_CSUM_ADD -static inline __wsum csum_add(__wsum csum, __wsum addend) +static __always_inline __wsum csum_add(__wsum csum, __wsum addend) { u32 res = (__force u32)csum; res += (__force u32)addend; @@ -62,12 +62,12 @@ static inline __wsum csum_add(__wsum csum, __wsum addend) } #endif -static inline __wsum csum_sub(__wsum csum, __wsum addend) +static __always_inline __wsum csum_sub(__wsum csum, __wsum addend) { return csum_add(csum, ~addend); } -static inline __sum16 csum16_add(__sum16 csum, __be16 addend) +static __always_inline __sum16 csum16_add(__sum16 csum, __be16 addend) { u16 res = (__force u16)csum; @@ -75,12 +75,12 @@ static inline __sum16 csum16_add(__sum16 csum, __be16 addend) return (__force __sum16)(res + (res < (__force u16)addend)); } -static inline __sum16 csum16_sub(__sum16 csum, __be16 addend) +static __always_inline __sum16 csum16_sub(__sum16 csum, __be16 addend) { return csum16_add(csum, ~addend); } -static inline __wsum +static __always_inline __wsum csum_block_add(__wsum csum, __wsum csum2, int offset) { u32 sum = (__force u32)csum2; @@ -92,36 +92,37 @@ csum_block_add(__wsum csum, __wsum csum2, int offset) return csum_add(csum, (__force __wsum)sum); } -static inline __wsum +static __always_inline __wsum csum_block_add_ext(__wsum csum, __wsum csum2, int offset, int len) { return csum_block_add(csum, csum2, offset); } -static inline __wsum +static __always_inline __wsum csum_block_sub(__wsum csum, __wsum csum2, int offset) { return csum_block_add(csum, ~csum2, offset); } -static inline __wsum csum_unfold(__sum16 n) +static __always_inline __wsum csum_unfold(__sum16 n) { return (__force __wsum)n; } -static inline __wsum csum_partial_ext(const void *buff, int len, __wsum sum) +static __always_inline +__wsum csum_partial_ext(const void *buff, int len, __wsum sum) { return csum_partial(buff, len, sum); } #define CSUM_MANGLED_0 ((__force __sum16)0xffff) -static inline void csum_replace_by_diff(__sum16 *sum, __wsum diff) +static __always_inline void csum_replace_by_diff(__sum16 *sum, __wsum diff) { *sum = csum_fold(csum_add(diff, ~csum_unfold(*sum))); } -static inline void csum_replace4(__sum16 *sum, __be32 from, __be32 to) +static __always_inline void csum_replace4(__sum16 *sum, __be32 from, __be32 to) { __wsum tmp = csum_sub(~csum_unfold(*sum), (__force __wsum)from); @@ -134,11 +135,16 @@ static inline void csum_replace4(__sum16 *sum, __be32 from, __be32 to) * m : old value of a 16bit field * m' : new value of a 16bit field */ -static inline void csum_replace2(__sum16 *sum, __be16 old, __be16 new) +static __always_inline void csum_replace2(__sum16 *sum, __be16 old, __be16 new) { *sum = ~csum16_add(csum16_sub(~(*sum), old), new); } +static inline void csum_replace(__wsum *csum, __wsum old, __wsum new) +{ + *csum = csum_add(csum_sub(*csum, old), new); +} + struct sk_buff; void inet_proto_csum_replace4(__sum16 *sum, struct sk_buff *skb, __be32 from, __be32 to, bool pseudohdr); @@ -148,16 +154,16 @@ void inet_proto_csum_replace16(__sum16 *sum, struct sk_buff *skb, void inet_proto_csum_replace_by_diff(__sum16 *sum, struct sk_buff *skb, __wsum diff, bool pseudohdr); -static inline void inet_proto_csum_replace2(__sum16 *sum, struct sk_buff *skb, - __be16 from, __be16 to, - bool pseudohdr) +static __always_inline +void inet_proto_csum_replace2(__sum16 *sum, struct sk_buff *skb, + __be16 from, __be16 to, bool pseudohdr) { inet_proto_csum_replace4(sum, skb, (__force __be32)from, (__force __be32)to, pseudohdr); } -static inline __wsum remcsum_adjust(void *ptr, __wsum csum, - int start, int offset) +static __always_inline __wsum remcsum_adjust(void *ptr, __wsum csum, + int start, int offset) { __sum16 *psum = (__sum16 *)(ptr + offset); __wsum delta; @@ -173,7 +179,7 @@ static inline __wsum remcsum_adjust(void *ptr, __wsum csum, return delta; } -static inline void remcsum_unadjust(__sum16 *psum, __wsum delta) +static __always_inline void remcsum_unadjust(__sum16 *psum, __wsum delta) { *psum = csum_fold(csum_sub(delta, (__force __wsum)*psum)); } diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h index ca6a3ea9057ec431b2dd30726c2e6d06be94fe72..d4d611064a76f587217ac5b51b91d5df7814758e 100644 --- a/include/net/inet_hashtables.h +++ b/include/net/inet_hashtables.h @@ -419,7 +419,7 @@ static inline void sk_rcv_saddr_set(struct sock *sk, __be32 addr) } int __inet_hash_connect(struct inet_timewait_death_row *death_row, - struct sock *sk, u32 port_offset, + struct sock *sk, u64 port_offset, int (*check_established)(struct inet_timewait_death_row *, struct sock *, __u16, struct inet_timewait_sock **)); diff --git a/include/net/ipv6.h b/include/net/ipv6.h index c0273ae502964b99aebcadff39bd15c950ff8c8a..9392a81a3ae4ca1f8a690a37c606fcbf7a35f3d3 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -390,17 +390,20 @@ static inline void txopt_put(struct ipv6_txoptions *opt) kfree_rcu(opt, rcu); } +#if IS_ENABLED(CONFIG_IPV6) struct ip6_flowlabel *__fl6_sock_lookup(struct sock *sk, __be32 label); extern struct static_key_false_deferred ipv6_flowlabel_exclusive; static inline struct ip6_flowlabel *fl6_sock_lookup(struct sock *sk, __be32 label) { - if (static_branch_unlikely(&ipv6_flowlabel_exclusive.key)) + if (static_branch_unlikely(&ipv6_flowlabel_exclusive.key) && + READ_ONCE(sock_net(sk)->ipv6.flowlabel_has_excl)) return __fl6_sock_lookup(sk, label) ? : ERR_PTR(-ENOENT); return NULL; } +#endif struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions *opt_space, struct ip6_flowlabel *fl, diff --git a/include/net/netfilter/nf_queue.h b/include/net/netfilter/nf_queue.h index e770bba00066453885c147cfebaf2f3ed6f00124..b1d43894296a6ac35b9e69e90efa13f32ba9143e 100644 --- a/include/net/netfilter/nf_queue.h +++ b/include/net/netfilter/nf_queue.h @@ -37,7 +37,7 @@ void nf_register_queue_handler(struct net *net, const struct nf_queue_handler *q void nf_unregister_queue_handler(struct net *net); void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict); -void nf_queue_entry_get_refs(struct nf_queue_entry *entry); +bool nf_queue_entry_get_refs(struct nf_queue_entry *entry); void nf_queue_entry_free(struct nf_queue_entry *entry); static inline void init_hashrandom(u32 *jhash_initval) diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h index ce5ed87accda51249937179c7efd7c478ea7b5a8..b2a28201f4fdea03abae09651237b644eb12b57d 100644 --- a/include/net/netns/ipv6.h +++ b/include/net/netns/ipv6.h @@ -83,6 +83,11 @@ struct netns_ipv6 { unsigned long ip6_rt_last_gc; #ifdef CONFIG_IPV6_MULTIPLE_TABLES unsigned int fib6_rules_require_fldissect; +#endif +#ifndef __GENKSYMS__ + unsigned char flowlabel_has_excl; +#endif +#ifdef CONFIG_IPV6_MULTIPLE_TABLES bool fib6_has_custom_rules; #ifdef CONFIG_IPV6_SUBTREES unsigned int fib6_routes_require_src; diff --git a/include/net/secure_seq.h b/include/net/secure_seq.h index d7d2495f83c27cc6707fed26f8e433dd6d1eb295..dac91aa38c5af389648e84971b0ad17947ef844c 100644 --- a/include/net/secure_seq.h +++ b/include/net/secure_seq.h @@ -4,8 +4,8 @@ #include -u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport); -u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr, +u64 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport); +u64 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr, __be16 dport); u32 secure_tcp_seq(__be32 saddr, __be32 daddr, __be16 sport, __be16 dport); diff --git a/include/net/sock.h b/include/net/sock.h index af73dda0285bbe53f2e4f09c015f291ea2e73bad..7d068cf871a3a9138ce76db8e52568b17f57d07e 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -429,7 +429,7 @@ struct sock { #ifdef CONFIG_XFRM struct xfrm_policy __rcu *sk_policy[2]; #endif - struct dst_entry *sk_rx_dst; + struct dst_entry __rcu *sk_rx_dst; struct dst_entry __rcu *sk_dst_cache; atomic_t sk_omem_alloc; int sk_sndbuf; diff --git a/include/net/tls.h b/include/net/tls.h index fee4e55bd8d960fb2ac03f555e8ec3fde191d55e..166903b38f5ed1ef556fbed194bdc6cfed374876 100644 --- a/include/net/tls.h +++ b/include/net/tls.h @@ -67,7 +67,7 @@ #define MAX_IV_SIZE 16 #define TLS_MAX_REC_SEQ_SIZE 8 -/* For AES-CCM, the full 16-bytes of IV is made of '4' fields of given sizes. +/* For CCM mode, the full 16-bytes of IV is made of '4' fields of given sizes. * * IV[16] = b0[1] || implicit nonce[4] || explicit nonce[8] || length[3] * @@ -75,6 +75,7 @@ * Hence b0 contains (3 - 1) = 2. */ #define TLS_AES_CCM_IV_B0_BYTE 2 +#define TLS_SM4_CCM_IV_B0_BYTE 2 #define __TLS_INC_STATS(net, field) \ __SNMP_INC_STATS((net)->mib.tls_statistics, field) @@ -526,31 +527,30 @@ static inline void tls_advance_record_sn(struct sock *sk, tls_err_abort(sk, -EBADMSG); if (prot->version != TLS_1_3_VERSION) - tls_bigint_increment(ctx->iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, + tls_bigint_increment(ctx->iv + prot->salt_size, prot->iv_size); } static inline void tls_fill_prepend(struct tls_context *ctx, char *buf, size_t plaintext_len, - unsigned char record_type, - int version) + unsigned char record_type) { struct tls_prot_info *prot = &ctx->prot_info; size_t pkt_len, iv_size = prot->iv_size; pkt_len = plaintext_len + prot->tag_size; - if (version != TLS_1_3_VERSION) { + if (prot->version != TLS_1_3_VERSION) { pkt_len += iv_size; memcpy(buf + TLS_NONCE_OFFSET, - ctx->tx.iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, iv_size); + ctx->tx.iv + prot->salt_size, iv_size); } /* we cover nonce explicit here as well, so buf should be of * size KTLS_DTLS_HEADER_SIZE + KTLS_DTLS_NONCE_EXPLICIT_SIZE */ - buf[0] = version == TLS_1_3_VERSION ? + buf[0] = prot->version == TLS_1_3_VERSION ? TLS_RECORD_TYPE_DATA : record_type; /* Note that VERSION must be TLS_1_2 for both TLS1.2 and TLS1.3 */ buf[1] = TLS_1_2_VERSION_MINOR; @@ -563,18 +563,17 @@ static inline void tls_fill_prepend(struct tls_context *ctx, static inline void tls_make_aad(char *buf, size_t size, char *record_sequence, - int record_sequence_size, unsigned char record_type, - int version) + struct tls_prot_info *prot) { - if (version != TLS_1_3_VERSION) { - memcpy(buf, record_sequence, record_sequence_size); + if (prot->version != TLS_1_3_VERSION) { + memcpy(buf, record_sequence, prot->rec_seq_size); buf += 8; } else { - size += TLS_CIPHER_AES_GCM_128_TAG_SIZE; + size += prot->tag_size; } - buf[0] = version == TLS_1_3_VERSION ? + buf[0] = prot->version == TLS_1_3_VERSION ? TLS_RECORD_TYPE_DATA : record_type; buf[1] = TLS_1_2_VERSION_MAJOR; buf[2] = TLS_1_2_VERSION_MINOR; @@ -582,11 +581,11 @@ static inline void tls_make_aad(char *buf, buf[4] = size & 0xFF; } -static inline void xor_iv_with_seq(int version, char *iv, char *seq) +static inline void xor_iv_with_seq(struct tls_prot_info *prot, char *iv, char *seq) { int i; - if (version == TLS_1_3_VERSION) { + if (prot->version == TLS_1_3_VERSION) { for (i = 0; i < 8; i++) iv[i + 4] ^= seq[i]; } diff --git a/include/net/udp.h b/include/net/udp.h index 435cc009e6eaaa79d376d3d71156ed5d3186534b..4017f257628f34301d4a3571804b6e662453949e 100644 --- a/include/net/udp.h +++ b/include/net/udp.h @@ -467,6 +467,7 @@ void udp_init(void); DECLARE_STATIC_KEY_FALSE(udp_encap_needed_key); void udp_encap_enable(void); +void udp_encap_disable(void); #if IS_ENABLED(CONFIG_IPV6) DECLARE_STATIC_KEY_FALSE(udpv6_encap_needed_key); void udpv6_encap_enable(void); diff --git a/include/net/udp_tunnel.h b/include/net/udp_tunnel.h index 2ea453dac87624ba1612fe008f08b1c7de9c0b1e..24ece06bad9eff61612d89f618d389ae0e2f03b8 100644 --- a/include/net/udp_tunnel.h +++ b/include/net/udp_tunnel.h @@ -177,9 +177,8 @@ static inline void udp_tunnel_encap_enable(struct socket *sock) #if IS_ENABLED(CONFIG_IPV6) if (sock->sk->sk_family == PF_INET6) ipv6_stub->udpv6_encap_enable(); - else #endif - udp_encap_enable(); + udp_encap_enable(); } #define UDP_TUNNEL_NIC_MAX_TABLES 4 diff --git a/include/net/xfrm.h b/include/net/xfrm.h index f2875c629d06f19f2632e5cd0c779407d86e6e22..8bcf40e4e6e3c75a48101b6f7797b65c2109885c 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -1552,7 +1552,6 @@ void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si); void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si); u32 xfrm_replay_seqhi(struct xfrm_state *x, __be32 net_seq); int xfrm_init_replay(struct xfrm_state *x); -u32 __xfrm_state_mtu(struct xfrm_state *x, int mtu); u32 xfrm_state_mtu(struct xfrm_state *x, int mtu); int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload); int xfrm_init_state(struct xfrm_state *x); @@ -1670,14 +1669,15 @@ int km_migrate(const struct xfrm_selector *sel, u8 dir, u8 type, const struct xfrm_migrate *m, int num_bundles, const struct xfrm_kmaddress *k, const struct xfrm_encap_tmpl *encap); -struct xfrm_state *xfrm_migrate_state_find(struct xfrm_migrate *m, struct net *net); +struct xfrm_state *xfrm_migrate_state_find(struct xfrm_migrate *m, struct net *net, + u32 if_id); struct xfrm_state *xfrm_state_migrate(struct xfrm_state *x, struct xfrm_migrate *m, struct xfrm_encap_tmpl *encap); int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type, struct xfrm_migrate *m, int num_bundles, struct xfrm_kmaddress *k, struct net *net, - struct xfrm_encap_tmpl *encap); + struct xfrm_encap_tmpl *encap, u32 if_id); #endif int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport); diff --git a/include/sound/pcm.h b/include/sound/pcm.h index 2336bf9243e185cd0e8b84b4afe3add26faf59b6..5ffc2efedd9f83f9d4f2ab88f83e21a5bea6b9e4 100644 --- a/include/sound/pcm.h +++ b/include/sound/pcm.h @@ -398,6 +398,8 @@ struct snd_pcm_runtime { wait_queue_head_t tsleep; /* transfer sleep */ struct fasync_struct *fasync; bool stop_operating; /* sync_stop will be called */ + struct mutex buffer_mutex; /* protect for buffer changes */ + atomic_t buffer_accessing; /* >0: in r/w operation, <0: blocked */ /* -- private section -- */ void *private_data; diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h index 70ae5497b73a6d3da4ec9543aa34c6cebf073a2e..4973265655a7fb324d20943fef497c046457c252 100644 --- a/include/trace/events/ext4.h +++ b/include/trace/events/ext4.h @@ -95,6 +95,17 @@ TRACE_DEFINE_ENUM(ES_REFERENCED_B); { FALLOC_FL_COLLAPSE_RANGE, "COLLAPSE_RANGE"}, \ { FALLOC_FL_ZERO_RANGE, "ZERO_RANGE"}) +TRACE_DEFINE_ENUM(EXT4_FC_REASON_XATTR); +TRACE_DEFINE_ENUM(EXT4_FC_REASON_CROSS_RENAME); +TRACE_DEFINE_ENUM(EXT4_FC_REASON_JOURNAL_FLAG_CHANGE); +TRACE_DEFINE_ENUM(EXT4_FC_REASON_NOMEM); +TRACE_DEFINE_ENUM(EXT4_FC_REASON_SWAP_BOOT); +TRACE_DEFINE_ENUM(EXT4_FC_REASON_RESIZE); +TRACE_DEFINE_ENUM(EXT4_FC_REASON_RENAME_DIR); +TRACE_DEFINE_ENUM(EXT4_FC_REASON_FALLOC_RANGE); +TRACE_DEFINE_ENUM(EXT4_FC_REASON_INODE_JOURNAL_DATA); +TRACE_DEFINE_ENUM(EXT4_FC_REASON_MAX); + #define show_fc_reason(reason) \ __print_symbolic(reason, \ { EXT4_FC_REASON_XATTR, "XATTR"}, \ @@ -2899,41 +2910,50 @@ TRACE_EVENT(ext4_fc_commit_stop, #define FC_REASON_NAME_STAT(reason) \ show_fc_reason(reason), \ - __entry->sbi->s_fc_stats.fc_ineligible_reason_count[reason] + __entry->fc_ineligible_rc[reason] TRACE_EVENT(ext4_fc_stats, - TP_PROTO(struct super_block *sb), - - TP_ARGS(sb), + TP_PROTO(struct super_block *sb), - TP_STRUCT__entry( - __field(dev_t, dev) - __field(struct ext4_sb_info *, sbi) - __field(int, count) - ), + TP_ARGS(sb), - TP_fast_assign( - __entry->dev = sb->s_dev; - __entry->sbi = EXT4_SB(sb); - ), + TP_STRUCT__entry( + __field(dev_t, dev) + __array(unsigned int, fc_ineligible_rc, EXT4_FC_REASON_MAX) + __field(unsigned long, fc_commits) + __field(unsigned long, fc_ineligible_commits) + __field(unsigned long, fc_numblks) + ), - TP_printk("dev %d:%d fc ineligible reasons:\n" - "%s:%d, %s:%d, %s:%d, %s:%d, %s:%d, %s:%d, %s:%d, %s:%d, %s:%d; " - "num_commits:%ld, ineligible: %ld, numblks: %ld", - MAJOR(__entry->dev), MINOR(__entry->dev), - FC_REASON_NAME_STAT(EXT4_FC_REASON_XATTR), - FC_REASON_NAME_STAT(EXT4_FC_REASON_CROSS_RENAME), - FC_REASON_NAME_STAT(EXT4_FC_REASON_JOURNAL_FLAG_CHANGE), - FC_REASON_NAME_STAT(EXT4_FC_REASON_NOMEM), - FC_REASON_NAME_STAT(EXT4_FC_REASON_SWAP_BOOT), - FC_REASON_NAME_STAT(EXT4_FC_REASON_RESIZE), - FC_REASON_NAME_STAT(EXT4_FC_REASON_RENAME_DIR), - FC_REASON_NAME_STAT(EXT4_FC_REASON_FALLOC_RANGE), - FC_REASON_NAME_STAT(EXT4_FC_REASON_INODE_JOURNAL_DATA), - __entry->sbi->s_fc_stats.fc_num_commits, - __entry->sbi->s_fc_stats.fc_ineligible_commits, - __entry->sbi->s_fc_stats.fc_numblks) + TP_fast_assign( + int i; + __entry->dev = sb->s_dev; + for (i = 0; i < EXT4_FC_REASON_MAX; i++) { + __entry->fc_ineligible_rc[i] = + EXT4_SB(sb)->s_fc_stats.fc_ineligible_reason_count[i]; + } + __entry->fc_commits = EXT4_SB(sb)->s_fc_stats.fc_num_commits; + __entry->fc_ineligible_commits = + EXT4_SB(sb)->s_fc_stats.fc_ineligible_commits; + __entry->fc_numblks = EXT4_SB(sb)->s_fc_stats.fc_numblks; + ), + + TP_printk("dev %d,%d fc ineligible reasons:\n" + "%s:%u, %s:%u, %s:%u, %s:%u, %s:%u, %s:%u, %s:%u, %s:%u, %s:%u " + "num_commits:%lu, ineligible: %lu, numblks: %lu", + MAJOR(__entry->dev), MINOR(__entry->dev), + FC_REASON_NAME_STAT(EXT4_FC_REASON_XATTR), + FC_REASON_NAME_STAT(EXT4_FC_REASON_CROSS_RENAME), + FC_REASON_NAME_STAT(EXT4_FC_REASON_JOURNAL_FLAG_CHANGE), + FC_REASON_NAME_STAT(EXT4_FC_REASON_NOMEM), + FC_REASON_NAME_STAT(EXT4_FC_REASON_SWAP_BOOT), + FC_REASON_NAME_STAT(EXT4_FC_REASON_RESIZE), + FC_REASON_NAME_STAT(EXT4_FC_REASON_RENAME_DIR), + FC_REASON_NAME_STAT(EXT4_FC_REASON_FALLOC_RANGE), + FC_REASON_NAME_STAT(EXT4_FC_REASON_INODE_JOURNAL_DATA), + __entry->fc_commits, __entry->fc_ineligible_commits, + __entry->fc_numblks) ); #define DEFINE_TRACE_DENTRY_EVENT(__type) \ diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h index e70c90116edaed0f14d3df2c7fa4cb98cec4eb22..4a3ab0ed6e0629d48c61c50b49cbdcb5d4638dea 100644 --- a/include/trace/events/rxrpc.h +++ b/include/trace/events/rxrpc.h @@ -83,12 +83,15 @@ enum rxrpc_call_trace { rxrpc_call_error, rxrpc_call_got, rxrpc_call_got_kernel, + rxrpc_call_got_timer, rxrpc_call_got_userid, rxrpc_call_new_client, rxrpc_call_new_service, rxrpc_call_put, rxrpc_call_put_kernel, rxrpc_call_put_noqueue, + rxrpc_call_put_notimer, + rxrpc_call_put_timer, rxrpc_call_put_userid, rxrpc_call_queued, rxrpc_call_queued_ref, @@ -278,12 +281,15 @@ enum rxrpc_tx_point { EM(rxrpc_call_error, "*E*") \ EM(rxrpc_call_got, "GOT") \ EM(rxrpc_call_got_kernel, "Gke") \ + EM(rxrpc_call_got_timer, "GTM") \ EM(rxrpc_call_got_userid, "Gus") \ EM(rxrpc_call_new_client, "NWc") \ EM(rxrpc_call_new_service, "NWs") \ EM(rxrpc_call_put, "PUT") \ EM(rxrpc_call_put_kernel, "Pke") \ - EM(rxrpc_call_put_noqueue, "PNQ") \ + EM(rxrpc_call_put_noqueue, "PnQ") \ + EM(rxrpc_call_put_notimer, "PnT") \ + EM(rxrpc_call_put_timer, "PTM") \ EM(rxrpc_call_put_userid, "Pus") \ EM(rxrpc_call_queued, "QUE") \ EM(rxrpc_call_queued_ref, "QUR") \ diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h index 028f49662ac34f53e0cb452a330f2ebe0857eaad..eb5ec1fb66b4b365c62ab7a8f8431420b960910f 100644 --- a/include/trace/events/sched.h +++ b/include/trace/events/sched.h @@ -705,6 +705,33 @@ DECLARE_TRACE(sched_update_nr_running_tp, TP_PROTO(struct rq *rq, int change), TP_ARGS(rq, change)); +DECLARE_EVENT_CLASS(psi_memstall_template, + + TP_PROTO(unsigned long function), + + TP_ARGS(function), + + TP_STRUCT__entry( + __field(unsigned long, function) + ), + + TP_fast_assign( + __entry->function = function; + ), + + TP_printk("%ps", (void *)__entry->function) +); + +DEFINE_EVENT(psi_memstall_template, psi_memstall_enter, + TP_PROTO(unsigned long function), + TP_ARGS(function) +); + +DEFINE_EVENT(psi_memstall_template, psi_memstall_leave, + TP_PROTO(unsigned long function), + TP_ARGS(function) +); + #endif /* _TRACE_SCHED_H */ /* This part must be outside protection */ diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 75617c529efd681b836061f9d1b2ffb97899e797..6bf4d010222e8919376316e6dabd2e1880bd26d9 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -1490,8 +1490,8 @@ union bpf_attr { * Return * The return value depends on the result of the test, and can be: * - * * 0, if current task belongs to the cgroup2. - * * 1, if current task does not belong to the cgroup2. + * * 1, if current task belongs to the cgroup2. + * * 0, if current task does not belong to the cgroup2. * * A negative error code, if an error occurred. * * long bpf_skb_change_tail(struct sk_buff *skb, u32 len, u64 flags) @@ -2163,8 +2163,8 @@ union bpf_attr { * * # sysctl kernel.perf_event_max_stack= * Return - * A non-negative value equal to or less than *size* on success, - * or a negative error in case of failure. + * The non-negative copied *buf* length equal to or less than + * *size* on success, or a negative error in case of failure. * * long bpf_skb_load_bytes_relative(const void *skb, u32 offset, void *to, u32 len, u32 start_header) * Description @@ -3448,8 +3448,8 @@ union bpf_attr { * * # sysctl kernel.perf_event_max_stack= * Return - * A non-negative value equal to or less than *size* on success, - * or a negative error in case of failure. + * The non-negative copied *buf* length equal to or less than + * *size* on success, or a negative error in case of failure. * * long bpf_load_hdr_opt(struct bpf_sock_ops *skops, void *searchby_res, u32 len, u64 flags) * Description diff --git a/include/uapi/linux/input-event-codes.h b/include/uapi/linux/input-event-codes.h index 225ec87d4f2283c7e2aacba55fbf3d405753b570..7989d9483ea75e2bbaaf78c1fd3d3bca741678ff 100644 --- a/include/uapi/linux/input-event-codes.h +++ b/include/uapi/linux/input-event-codes.h @@ -278,7 +278,8 @@ #define KEY_PAUSECD 201 #define KEY_PROG3 202 #define KEY_PROG4 203 -#define KEY_DASHBOARD 204 /* AL Dashboard */ +#define KEY_ALL_APPLICATIONS 204 /* AC Desktop Show All Applications */ +#define KEY_DASHBOARD KEY_ALL_APPLICATIONS #define KEY_SUSPEND 205 #define KEY_CLOSE 206 /* AC Close */ #define KEY_PLAY 207 @@ -612,6 +613,7 @@ #define KEY_ASSISTANT 0x247 /* AL Context-aware desktop assistant */ #define KEY_KBD_LAYOUT_NEXT 0x248 /* AC Next Keyboard Layout Select */ #define KEY_EMOJI_PICKER 0x249 /* Show/hide emoji picker (HUTRR101) */ +#define KEY_DICTATE 0x24a /* Start or Stop Voice Dictation Session (HUTRR99) */ #define KEY_BRIGHTNESS_MIN 0x250 /* Set Brightness to Minimum */ #define KEY_BRIGHTNESS_MAX 0x251 /* Set Brightness to Maximum */ diff --git a/include/uapi/linux/tls.h b/include/uapi/linux/tls.h index bcd2869ed472f8ddff64c9c584de44ff4cf90826..acd81e4220081f46e07b78b11efad4565311d27e 100644 --- a/include/uapi/linux/tls.h +++ b/include/uapi/linux/tls.h @@ -77,6 +77,20 @@ #define TLS_CIPHER_AES_CCM_128_TAG_SIZE 16 #define TLS_CIPHER_AES_CCM_128_REC_SEQ_SIZE 8 +#define TLS_CIPHER_SM4_GCM 55 +#define TLS_CIPHER_SM4_GCM_IV_SIZE 8 +#define TLS_CIPHER_SM4_GCM_KEY_SIZE 16 +#define TLS_CIPHER_SM4_GCM_SALT_SIZE 4 +#define TLS_CIPHER_SM4_GCM_TAG_SIZE 16 +#define TLS_CIPHER_SM4_GCM_REC_SEQ_SIZE 8 + +#define TLS_CIPHER_SM4_CCM 56 +#define TLS_CIPHER_SM4_CCM_IV_SIZE 8 +#define TLS_CIPHER_SM4_CCM_KEY_SIZE 16 +#define TLS_CIPHER_SM4_CCM_SALT_SIZE 4 +#define TLS_CIPHER_SM4_CCM_TAG_SIZE 16 +#define TLS_CIPHER_SM4_CCM_REC_SEQ_SIZE 8 + #define TLS_SET_RECORD_TYPE 1 #define TLS_GET_RECORD_TYPE 2 @@ -109,6 +123,22 @@ struct tls12_crypto_info_aes_ccm_128 { unsigned char rec_seq[TLS_CIPHER_AES_CCM_128_REC_SEQ_SIZE]; }; +struct tls12_crypto_info_sm4_gcm { + struct tls_crypto_info info; + unsigned char iv[TLS_CIPHER_SM4_GCM_IV_SIZE]; + unsigned char key[TLS_CIPHER_SM4_GCM_KEY_SIZE]; + unsigned char salt[TLS_CIPHER_SM4_GCM_SALT_SIZE]; + unsigned char rec_seq[TLS_CIPHER_SM4_GCM_REC_SEQ_SIZE]; +}; + +struct tls12_crypto_info_sm4_ccm { + struct tls_crypto_info info; + unsigned char iv[TLS_CIPHER_SM4_CCM_IV_SIZE]; + unsigned char key[TLS_CIPHER_SM4_CCM_KEY_SIZE]; + unsigned char salt[TLS_CIPHER_SM4_CCM_SALT_SIZE]; + unsigned char rec_seq[TLS_CIPHER_SM4_CCM_REC_SEQ_SIZE]; +}; + enum { TLS_INFO_UNSPEC, TLS_INFO_VERSION, diff --git a/include/uapi/linux/xfrm.h b/include/uapi/linux/xfrm.h index ffc6a5391bb7bb8f7543cb36fb16487596fd3255..66073c082a06004f0387cce30bd85b4d2ddbaf65 100644 --- a/include/uapi/linux/xfrm.h +++ b/include/uapi/linux/xfrm.h @@ -505,6 +505,12 @@ struct xfrm_user_offload { int ifindex; __u8 flags; }; +/* This flag was exposed without any kernel code that supporting it. + * Unfortunately, strongswan has the code that uses sets this flag, + * which makes impossible to reuse this bit. + * + * So leave it here to make sure that it won't be reused by mistake. + */ #define XFRM_OFFLOAD_IPV6 1 #define XFRM_OFFLOAD_INBOUND 2 diff --git a/kernel/audit.h b/kernel/audit.h index 3b9c0945225a121e7031cfe29299925bb0b42516..1918019e6aaf7b8c54a248fcc593d22e3ac772c8 100644 --- a/kernel/audit.h +++ b/kernel/audit.h @@ -191,6 +191,10 @@ struct audit_context { struct { char *name; } module; + struct { + struct audit_ntp_data ntp_data; + struct timespec64 tk_injoffset; + } time; }; int fds[2]; struct audit_proctitle proctitle; diff --git a/kernel/auditsc.c b/kernel/auditsc.c index 638f424859edc384aa22b8908b1dc5a784d6bd97..07e2788bbbf124ea6b7a7f6077a4eed2d3669d8b 100644 --- a/kernel/auditsc.c +++ b/kernel/auditsc.c @@ -1214,6 +1214,53 @@ static void audit_log_fcaps(struct audit_buffer *ab, struct audit_names *name) from_kuid(&init_user_ns, name->fcap.rootid)); } +static void audit_log_time(struct audit_context *context, struct audit_buffer **ab) +{ + const struct audit_ntp_data *ntp = &context->time.ntp_data; + const struct timespec64 *tk = &context->time.tk_injoffset; + static const char * const ntp_name[] = { + "offset", + "freq", + "status", + "tai", + "tick", + "adjust", + }; + int type; + + if (context->type == AUDIT_TIME_ADJNTPVAL) { + for (type = 0; type < AUDIT_NTP_NVALS; type++) { + if (ntp->vals[type].newval != ntp->vals[type].oldval) { + if (!*ab) { + *ab = audit_log_start(context, + GFP_KERNEL, + AUDIT_TIME_ADJNTPVAL); + if (!*ab) + return; + } + audit_log_format(*ab, "op=%s old=%lli new=%lli", + ntp_name[type], + ntp->vals[type].oldval, + ntp->vals[type].newval); + audit_log_end(*ab); + *ab = NULL; + } + } + } + if (tk->tv_sec != 0 || tk->tv_nsec != 0) { + if (!*ab) { + *ab = audit_log_start(context, GFP_KERNEL, + AUDIT_TIME_INJOFFSET); + if (!*ab) + return; + } + audit_log_format(*ab, "sec=%lli nsec=%li", + (long long)tk->tv_sec, tk->tv_nsec); + audit_log_end(*ab); + *ab = NULL; + } +} + static void show_special(struct audit_context *context, int *call_panic) { struct audit_buffer *ab; @@ -1319,6 +1366,11 @@ static void show_special(struct audit_context *context, int *call_panic) audit_log_format(ab, "(null)"); break; + case AUDIT_TIME_ADJNTPVAL: + case AUDIT_TIME_INJOFFSET: + /* this call deviates from the rest, eating the buffer */ + audit_log_time(context, &ab); + break; } audit_log_end(ab); } @@ -2560,31 +2612,26 @@ void __audit_fanotify(unsigned int response) void __audit_tk_injoffset(struct timespec64 offset) { - audit_log(audit_context(), GFP_KERNEL, AUDIT_TIME_INJOFFSET, - "sec=%lli nsec=%li", - (long long)offset.tv_sec, offset.tv_nsec); -} - -static void audit_log_ntp_val(const struct audit_ntp_data *ad, - const char *op, enum audit_ntp_type type) -{ - const struct audit_ntp_val *val = &ad->vals[type]; - - if (val->newval == val->oldval) - return; + struct audit_context *context = audit_context(); - audit_log(audit_context(), GFP_KERNEL, AUDIT_TIME_ADJNTPVAL, - "op=%s old=%lli new=%lli", op, val->oldval, val->newval); + /* only set type if not already set by NTP */ + if (!context->type) + context->type = AUDIT_TIME_INJOFFSET; + memcpy(&context->time.tk_injoffset, &offset, sizeof(offset)); } void __audit_ntp_log(const struct audit_ntp_data *ad) { - audit_log_ntp_val(ad, "offset", AUDIT_NTP_OFFSET); - audit_log_ntp_val(ad, "freq", AUDIT_NTP_FREQ); - audit_log_ntp_val(ad, "status", AUDIT_NTP_STATUS); - audit_log_ntp_val(ad, "tai", AUDIT_NTP_TAI); - audit_log_ntp_val(ad, "tick", AUDIT_NTP_TICK); - audit_log_ntp_val(ad, "adjust", AUDIT_NTP_ADJUST); + struct audit_context *context = audit_context(); + int type; + + for (type = 0; type < AUDIT_NTP_NVALS; type++) + if (ad->vals[type].newval != ad->vals[type].oldval) { + /* unconditionally set type, overwriting TK */ + context->type = AUDIT_TIME_ADJNTPVAL; + memcpy(&context->time.ntp_data, ad, sizeof(*ad)); + break; + } } void __audit_log_nfcfg(const char *name, u8 af, unsigned int nentries, diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c index 7e1e5051468f8870e980e9bcbf9399caa94819d2..e1d0c6248aae763501fce3e58025494e8e78877d 100644 --- a/kernel/bpf/cgroup.c +++ b/kernel/bpf/cgroup.c @@ -667,6 +667,60 @@ static struct bpf_prog_list *find_detach_entry(struct list_head *progs, return ERR_PTR(-ENOENT); } +/** + * purge_effective_progs() - After compute_effective_progs fails to alloc new + * cgrp->bpf.inactive table we can recover by + * recomputing the array in place. + * + * @cgrp: The cgroup which descendants to travers + * @prog: A program to detach or NULL + * @link: A link to detach or NULL + * @atype: Type of detach operation + */ +static void purge_effective_progs(struct cgroup *cgrp, struct bpf_prog *prog, + struct bpf_cgroup_link *link, + enum cgroup_bpf_attach_type atype) +{ + struct cgroup_subsys_state *css; + struct bpf_prog_array *progs; + struct bpf_prog_list *pl; + struct list_head *head; + struct cgroup *cg; + int pos; + + /* recompute effective prog array in place */ + css_for_each_descendant_pre(css, &cgrp->self) { + struct cgroup *desc = container_of(css, struct cgroup, self); + + if (percpu_ref_is_zero(&desc->bpf.refcnt)) + continue; + + /* find position of link or prog in effective progs array */ + for (pos = 0, cg = desc; cg; cg = cgroup_parent(cg)) { + if (pos && !(cg->bpf.flags[atype] & BPF_F_ALLOW_MULTI)) + continue; + + head = &cg->bpf.progs[atype]; + list_for_each_entry(pl, head, node) { + if (!prog_list_prog(pl)) + continue; + if (pl->prog == prog && pl->link == link) + goto found; + pos++; + } + } +found: + BUG_ON(!cg); + progs = rcu_dereference_protected( + desc->bpf.effective[atype], + lockdep_is_held(&cgroup_mutex)); + + /* Remove the program from the array */ + WARN_ONCE(bpf_prog_array_delete_safe_at(progs, pos), + "Failed to purge a prog from array at index %d", pos); + } +} + /** * __cgroup_bpf_detach() - Detach the program or link from a cgroup, and * propagate the change to descendants @@ -686,7 +740,6 @@ int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog, struct bpf_prog_list *pl; struct list_head *progs; u32 flags; - int err; atype = to_cgroup_bpf_attach_type(type); if (atype < 0) @@ -708,9 +761,12 @@ int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog, pl->prog = NULL; pl->link = NULL; - err = update_effective_progs(cgrp, atype); - if (err) - goto cleanup; + if (update_effective_progs(cgrp, atype)) { + /* if update effective array failed replace the prog with a dummy prog*/ + pl->prog = old_prog; + pl->link = link; + purge_effective_progs(cgrp, old_prog, link, atype); + } /* now can actually delete it from this cgroup list */ list_del(&pl->node); @@ -722,12 +778,6 @@ int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog, bpf_prog_put(old_prog); static_branch_dec(&cgroup_bpf_enabled_key[atype]); return 0; - -cleanup: - /* restore back prog or link */ - pl->prog = old_prog; - pl->link = link; - return err; } /* Must be called with cgroup_mutex held to avoid races. */ diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c index a4c8c584e76b19f4102e9b66cbe6373b4734ca9b..22ef49037579131735974fda3d5eedf22c5de1fc 100644 --- a/kernel/bpf/stackmap.c +++ b/kernel/bpf/stackmap.c @@ -364,7 +364,7 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs, } static struct perf_callchain_entry * -get_callchain_entry_for_task(struct task_struct *task, u32 init_nr) +get_callchain_entry_for_task(struct task_struct *task, u32 max_depth) { #ifdef CONFIG_STACKTRACE struct perf_callchain_entry *entry; @@ -375,9 +375,8 @@ get_callchain_entry_for_task(struct task_struct *task, u32 init_nr) if (!entry) return NULL; - entry->nr = init_nr + - stack_trace_save_tsk(task, (unsigned long *)(entry->ip + init_nr), - sysctl_perf_event_max_stack - init_nr, 0); + entry->nr = stack_trace_save_tsk(task, (unsigned long *)entry->ip, + max_depth, 0); /* stack_trace_save_tsk() works on unsigned long array, while * perf_callchain_entry uses u64 array. For 32-bit systems, it is @@ -389,7 +388,7 @@ get_callchain_entry_for_task(struct task_struct *task, u32 init_nr) int i; /* copy data from the end to avoid using extra buffer */ - for (i = entry->nr - 1; i >= (int)init_nr; i--) + for (i = entry->nr - 1; i >= 0; i--) to[i] = (u64)(from[i]); } @@ -406,27 +405,19 @@ static long __bpf_get_stackid(struct bpf_map *map, { struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map); struct stack_map_bucket *bucket, *new_bucket, *old_bucket; - u32 max_depth = map->value_size / stack_map_data_size(map); - /* stack_map_alloc() checks that max_depth <= sysctl_perf_event_max_stack */ - u32 init_nr = sysctl_perf_event_max_stack - max_depth; u32 skip = flags & BPF_F_SKIP_FIELD_MASK; u32 hash, id, trace_nr, trace_len; bool user = flags & BPF_F_USER_STACK; u64 *ips; bool hash_matches; - /* get_perf_callchain() guarantees that trace->nr >= init_nr - * and trace-nr <= sysctl_perf_event_max_stack, so trace_nr <= max_depth - */ - trace_nr = trace->nr - init_nr; - - if (trace_nr <= skip) + if (trace->nr <= skip) /* skipping more than usable stack trace */ return -EFAULT; - trace_nr -= skip; + trace_nr = trace->nr - skip; trace_len = trace_nr * sizeof(u64); - ips = trace->ip + skip + init_nr; + ips = trace->ip + skip; hash = jhash2((u32 *)ips, trace_len / sizeof(u32), 0); id = hash & (smap->n_buckets - 1); bucket = READ_ONCE(smap->buckets[id]); @@ -483,8 +474,7 @@ BPF_CALL_3(bpf_get_stackid, struct pt_regs *, regs, struct bpf_map *, map, u64, flags) { u32 max_depth = map->value_size / stack_map_data_size(map); - /* stack_map_alloc() checks that max_depth <= sysctl_perf_event_max_stack */ - u32 init_nr = sysctl_perf_event_max_stack - max_depth; + u32 skip = flags & BPF_F_SKIP_FIELD_MASK; bool user = flags & BPF_F_USER_STACK; struct perf_callchain_entry *trace; bool kernel = !user; @@ -493,8 +483,12 @@ BPF_CALL_3(bpf_get_stackid, struct pt_regs *, regs, struct bpf_map *, map, BPF_F_FAST_STACK_CMP | BPF_F_REUSE_STACKID))) return -EINVAL; - trace = get_perf_callchain(regs, init_nr, kernel, user, - sysctl_perf_event_max_stack, false, false); + max_depth += skip; + if (max_depth > sysctl_perf_event_max_stack) + max_depth = sysctl_perf_event_max_stack; + + trace = get_perf_callchain(regs, 0, kernel, user, max_depth, + false, false); if (unlikely(!trace)) /* couldn't fetch the stack trace */ @@ -585,7 +579,7 @@ static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task, struct perf_callchain_entry *trace_in, void *buf, u32 size, u64 flags) { - u32 init_nr, trace_nr, copy_len, elem_size, num_elem; + u32 trace_nr, copy_len, elem_size, num_elem, max_depth; bool user_build_id = flags & BPF_F_USER_BUILD_ID; u32 skip = flags & BPF_F_SKIP_FIELD_MASK; bool user = flags & BPF_F_USER_STACK; @@ -610,30 +604,28 @@ static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task, goto err_fault; num_elem = size / elem_size; - if (sysctl_perf_event_max_stack < num_elem) - init_nr = 0; - else - init_nr = sysctl_perf_event_max_stack - num_elem; + max_depth = num_elem + skip; + if (sysctl_perf_event_max_stack < max_depth) + max_depth = sysctl_perf_event_max_stack; if (trace_in) trace = trace_in; else if (kernel && task) - trace = get_callchain_entry_for_task(task, init_nr); + trace = get_callchain_entry_for_task(task, max_depth); else - trace = get_perf_callchain(regs, init_nr, kernel, user, - sysctl_perf_event_max_stack, + trace = get_perf_callchain(regs, 0, kernel, user, max_depth, false, false); if (unlikely(!trace)) goto err_fault; - trace_nr = trace->nr - init_nr; - if (trace_nr < skip) + if (trace->nr < skip) goto err_fault; - trace_nr -= skip; + trace_nr = trace->nr - skip; trace_nr = (trace_nr <= num_elem) ? trace_nr : num_elem; copy_len = trace_nr * elem_size; - ips = trace->ip + skip + init_nr; + + ips = trace->ip + skip; if (user && user_build_id) stack_map_get_build_id_offset(buf, ips, trace_nr, user); else diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 209e6567cdab051df2d3640cac5b83ce577866eb..419dbc3d060ee1fea6835a59291bf36b9c570d7e 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -1308,6 +1308,7 @@ int generic_map_delete_batch(struct bpf_map *map, maybe_wait_bpf_programs(map); if (err) break; + cond_resched(); } if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp))) err = -EFAULT; @@ -1365,6 +1366,7 @@ int generic_map_update_batch(struct bpf_map *map, if (err) break; + cond_resched(); } if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp))) @@ -1462,6 +1464,7 @@ int generic_map_lookup_batch(struct bpf_map *map, swap(prev_key, key); retry = MAP_LOOKUP_RETRIES; cp++; + cond_resched(); } if (err == -EFAULT) diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c index ff965ca9ca21857045c9c18e08c57ea8976154fa..be884bc2ae61b6ec870728704cb485f0460d47ac 100644 --- a/kernel/cgroup/cgroup-v1.c +++ b/kernel/cgroup/cgroup-v1.c @@ -545,6 +545,7 @@ static ssize_t cgroup_release_agent_write(struct kernfs_open_file *of, char *buf, size_t nbytes, loff_t off) { struct cgroup *cgrp; + struct cgroup_file_ctx *ctx; BUILD_BUG_ON(sizeof(cgrp->root->release_agent_path) < PATH_MAX); @@ -552,8 +553,9 @@ static ssize_t cgroup_release_agent_write(struct kernfs_open_file *of, * Release agent gets called with all capabilities, * require capabilities to set release agent. */ - if ((of->file->f_cred->user_ns != &init_user_ns) || - !capable(CAP_SYS_ADMIN)) + ctx = of->priv; + if ((ctx->ns->user_ns != &init_user_ns) || + !file_ns_capable(of->file, &init_user_ns, CAP_SYS_ADMIN)) return -EPERM; cgrp = cgroup_kn_lock_live(of->kn, false); diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c index be5b6b97adbfe1f12f95de61e510dc902fa52d19..363f781b56cad6b5cc5796c08aebfa7f3b016c55 100644 --- a/kernel/debug/debug_core.c +++ b/kernel/debug/debug_core.c @@ -56,6 +56,7 @@ #include #include #include +#include #include #include @@ -762,6 +763,29 @@ static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs, continue; kgdb_connected = 0; } else { + /* + * This is a brutal way to interfere with the debugger + * and prevent gdb being used to poke at kernel memory. + * This could cause trouble if lockdown is applied when + * there is already an active gdb session. For now the + * answer is simply "don't do that". Typically lockdown + * *will* be applied before the debug core gets started + * so only developers using kgdb for fairly advanced + * early kernel debug can be biten by this. Hopefully + * they are sophisticated enough to take care of + * themselves, especially with help from the lockdown + * message printed on the console! + */ + if (security_locked_down(LOCKDOWN_DBG_WRITE_KERNEL)) { + if (IS_ENABLED(CONFIG_KGDB_KDB)) { + /* Switch back to kdb if possible... */ + dbg_kdb_mode = 1; + continue; + } else { + /* ... otherwise just bail */ + break; + } + } error = gdb_serial_stub(ks); } diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c index 930ac1b25ec7c4377e3cfcdc63c7a558826bf60e..4e09fab52faf50beed7f4af7de696d4689c16d40 100644 --- a/kernel/debug/kdb/kdb_main.c +++ b/kernel/debug/kdb/kdb_main.c @@ -45,6 +45,7 @@ #include #include #include +#include #include "kdb_private.h" #undef MODULE_PARAM_PREFIX @@ -197,10 +198,62 @@ struct task_struct *kdb_curr_task(int cpu) } /* - * Check whether the flags of the current command and the permissions - * of the kdb console has allow a command to be run. + * Update the permissions flags (kdb_cmd_enabled) to match the + * current lockdown state. + * + * Within this function the calls to security_locked_down() are "lazy". We + * avoid calling them if the current value of kdb_cmd_enabled already excludes + * flags that might be subject to lockdown. Additionally we deliberately check + * the lockdown flags independently (even though read lockdown implies write + * lockdown) since that results in both simpler code and clearer messages to + * the user on first-time debugger entry. + * + * The permission masks during a read+write lockdown permits the following + * flags: INSPECT, SIGNAL, REBOOT (and ALWAYS_SAFE). + * + * The INSPECT commands are not blocked during lockdown because they are + * not arbitrary memory reads. INSPECT covers the backtrace family (sometimes + * forcing them to have no arguments) and lsmod. These commands do expose + * some kernel state but do not allow the developer seated at the console to + * choose what state is reported. SIGNAL and REBOOT should not be controversial, + * given these are allowed for root during lockdown already. + */ +static void kdb_check_for_lockdown(void) +{ + const int write_flags = KDB_ENABLE_MEM_WRITE | + KDB_ENABLE_REG_WRITE | + KDB_ENABLE_FLOW_CTRL; + const int read_flags = KDB_ENABLE_MEM_READ | + KDB_ENABLE_REG_READ; + + bool need_to_lockdown_write = false; + bool need_to_lockdown_read = false; + + if (kdb_cmd_enabled & (KDB_ENABLE_ALL | write_flags)) + need_to_lockdown_write = + security_locked_down(LOCKDOWN_DBG_WRITE_KERNEL); + + if (kdb_cmd_enabled & (KDB_ENABLE_ALL | read_flags)) + need_to_lockdown_read = + security_locked_down(LOCKDOWN_DBG_READ_KERNEL); + + /* De-compose KDB_ENABLE_ALL if required */ + if (need_to_lockdown_write || need_to_lockdown_read) + if (kdb_cmd_enabled & KDB_ENABLE_ALL) + kdb_cmd_enabled = KDB_ENABLE_MASK & ~KDB_ENABLE_ALL; + + if (need_to_lockdown_write) + kdb_cmd_enabled &= ~write_flags; + + if (need_to_lockdown_read) + kdb_cmd_enabled &= ~read_flags; +} + +/* + * Check whether the flags of the current command, the permissions of the kdb + * console and the lockdown state allow a command to be run. */ -static inline bool kdb_check_flags(kdb_cmdflags_t flags, int permissions, +static bool kdb_check_flags(kdb_cmdflags_t flags, int permissions, bool no_args) { /* permissions comes from userspace so needs massaging slightly */ @@ -1194,6 +1247,9 @@ static int kdb_local(kdb_reason_t reason, int error, struct pt_regs *regs, kdb_curr_task(raw_smp_processor_id()); KDB_DEBUG_STATE("kdb_local 1", reason); + + kdb_check_for_lockdown(); + kdb_go_count = 0; if (reason == KDB_REASON_DEBUG) { /* special case below */ diff --git a/kernel/debug/kdb/kdb_support.c b/kernel/debug/kdb/kdb_support.c index 6226502ce04991d8ba1642ca19c97f37ad5df35f..13417f0045f028ed04b4348460b415ab31dd94c1 100644 --- a/kernel/debug/kdb/kdb_support.c +++ b/kernel/debug/kdb/kdb_support.c @@ -350,7 +350,7 @@ int kdb_getarea_size(void *res, unsigned long addr, size_t size) */ int kdb_putarea_size(unsigned long addr, void *res, size_t size) { - int ret = copy_from_kernel_nofault((char *)addr, (char *)res, size); + int ret = copy_to_kernel_nofault((char *)addr, (char *)res, size); if (ret) { if (!KDB_STATE(SUPPRESS)) { kdb_printf("kdb_putarea: Bad address 0x%lx\n", addr); diff --git a/kernel/dma/debug.c b/kernel/dma/debug.c index 10d07ace46c15d22873ee44de06b703319123987..f8ae5467986511a5c406d192b7113c4624bfc25a 100644 --- a/kernel/dma/debug.c +++ b/kernel/dma/debug.c @@ -928,7 +928,7 @@ static __init int dma_debug_cmdline(char *str) global_disable = true; } - return 0; + return 1; } static __init int dma_debug_entries_cmdline(char *str) @@ -937,7 +937,7 @@ static __init int dma_debug_entries_cmdline(char *str) return -EINVAL; if (!get_option(&str, &nr_prealloc_entries)) nr_prealloc_entries = PREALLOC_DMA_DEBUG_ENTRIES; - return 0; + return 1; } __setup("dma_debug=", dma_debug_cmdline); diff --git a/kernel/events/core.c b/kernel/events/core.c index a59fe1c5dd9ef6d49f4b24dff4938f5f84942b91..4bd9dd6c3b72cc287ca4ec819e4e5b5ee8f0fabc 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -10219,8 +10219,11 @@ perf_event_parse_addr_filter(struct perf_event *event, char *fstr, } /* ready to consume more filters */ + kfree(filename); + filename = NULL; state = IF_STATE_ACTION; filter = NULL; + kernel = 0; } } @@ -11897,6 +11900,9 @@ SYSCALL_DEFINE5(perf_event_open, * Do not allow to attach to a group in a different task * or CPU context. If we're moving SW events, we'll fix * this up later, so allow that. + * + * Racy, not holding group_leader->ctx->mutex, see comment with + * perf_event_ctx_lock(). */ if (!move_group && group_leader->ctx != ctx) goto err_context; @@ -11964,6 +11970,7 @@ SYSCALL_DEFINE5(perf_event_open, } else { perf_event_ctx_unlock(group_leader, gctx); move_group = 0; + goto not_move_group; } } @@ -11980,7 +11987,17 @@ SYSCALL_DEFINE5(perf_event_open, } } else { mutex_lock(&ctx->mutex); + + /* + * Now that we hold ctx->lock, (re)validate group_leader->ctx == ctx, + * see the group_leader && !move_group test earlier. + */ + if (group_leader && group_leader->ctx != ctx) { + err = -EINVAL; + goto err_locked; + } } +not_move_group: if (ctx->task == TASK_TOMBSTONE) { err = -ESRCH; diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index d3033e1f9d87e1c82b28474bc18f1f7ed41a1ced..8d3d49c0483eace0c664e223f9dce11a2999ca7c 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -223,11 +223,16 @@ int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask, { struct irq_desc *desc = irq_data_to_desc(data); struct irq_chip *chip = irq_data_get_irq_chip(data); + const struct cpumask *prog_mask; int ret; + static DEFINE_RAW_SPINLOCK(tmp_mask_lock); + static struct cpumask tmp_mask; + if (!chip || !chip->irq_set_affinity) return -EINVAL; + raw_spin_lock(&tmp_mask_lock); /* * If this is a managed interrupt and housekeeping is enabled on * it check whether the requested affinity mask intersects with @@ -249,24 +254,34 @@ int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask, */ if (irqd_affinity_is_managed(data) && housekeeping_enabled(HK_FLAG_MANAGED_IRQ)) { - const struct cpumask *hk_mask, *prog_mask; - - static DEFINE_RAW_SPINLOCK(tmp_mask_lock); - static struct cpumask tmp_mask; + const struct cpumask *hk_mask; hk_mask = housekeeping_cpumask(HK_FLAG_MANAGED_IRQ); - raw_spin_lock(&tmp_mask_lock); cpumask_and(&tmp_mask, mask, hk_mask); if (!cpumask_intersects(&tmp_mask, cpu_online_mask)) prog_mask = mask; else prog_mask = &tmp_mask; - ret = chip->irq_set_affinity(data, prog_mask, force); - raw_spin_unlock(&tmp_mask_lock); } else { - ret = chip->irq_set_affinity(data, mask, force); + prog_mask = mask; } + + /* + * Make sure we only provide online CPUs to the irqchip, + * unless we are being asked to force the affinity (in which + * case we do as we are told). + */ + cpumask_and(&tmp_mask, prog_mask, cpu_online_mask); + if (!force && !cpumask_empty(&tmp_mask)) + ret = chip->irq_set_affinity(data, &tmp_mask, force); + else if (force) + ret = chip->irq_set_affinity(data, mask, force); + else + ret = -EINVAL; + + raw_spin_unlock(&tmp_mask_lock); + switch (ret) { case IRQ_SET_MASK_OK: case IRQ_SET_MASK_OK_DONE: diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c index d217acc9f71b6a615e5add7d1dbaa876eb180371..77722ebdf6f5fd7f8fbfd37574fd685cee1427ea 100644 --- a/kernel/irq/msi.c +++ b/kernel/irq/msi.c @@ -456,6 +456,21 @@ int __msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev, irqd_clr_can_reserve(irq_data); if (domain->flags & IRQ_DOMAIN_MSI_NOMASK_QUIRK) irqd_set_msi_nomask_quirk(irq_data); + + /* + * If the interrupt is managed but no CPU is available to + * service it, shut it down until better times. Note that + * we only do this on the !RESERVE path as x86 (the only + * architecture using this flag) deals with this in a + * different way by using a catch-all vector. + */ + if ((info->flags & MSI_FLAG_ACTIVATE_EARLY) && + irqd_affinity_is_managed(irq_data) && + !cpumask_intersects(irq_data_get_affinity_mask(irq_data), + cpu_online_mask)) { + irqd_set_managed_shutdown(irq_data); + return 0; + } } ret = irq_domain_activate_irq(irq_data, can_reserve); if (ret) diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index 87ed93df7a98daa346d474bbf75ba3533cde3594..780a825cee8be202f5de0f8d5a18e1b557413228 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c @@ -31,6 +31,7 @@ #include "state.h" #include "transition.h" #elif defined(CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY) +#include #include #endif @@ -57,6 +58,7 @@ static struct kobject *klp_root_kobj; struct patch_data { struct klp_patch *patch; atomic_t cpu_count; + bool rollback; }; #endif @@ -1273,11 +1275,16 @@ int __weak klp_check_calltrace(struct klp_patch *patch, int enable) static LIST_HEAD(klp_func_list); +/* + * The caller must ensure that the klp_mutex lock is held or is in the rcu read + * critical area. + */ struct klp_func_node *klp_find_func_node(const void *old_func) { struct klp_func_node *func_node; - list_for_each_entry(func_node, &klp_func_list, node) { + list_for_each_entry_rcu(func_node, &klp_func_list, node, + lockdep_is_held(&klp_mutex)) { if (func_node->old_func == old_func) return func_node; } @@ -1295,6 +1302,37 @@ void klp_del_func_node(struct klp_func_node *func_node) list_del_rcu(&func_node->node); } +/* + * Called from the breakpoint exception handler function. + */ +void *klp_get_brk_func(void *addr) +{ + struct klp_func_node *func_node; + void *brk_func = NULL; + + if (!addr) + return NULL; + + rcu_read_lock(); + + func_node = klp_find_func_node(addr); + if (!func_node) + goto unlock; + + /* + * Corresponds to smp_wmb() in {add, remove}_breakpoint(). If the + * current breakpoint exception belongs to us, we have observed the + * breakpoint instruction, so brk_func must be observed. + */ + smp_rmb(); + + brk_func = func_node->brk_func; + +unlock: + rcu_read_unlock(); + return brk_func; +} + /* * This function is called from stop_machine() context. */ @@ -1365,6 +1403,34 @@ long __weak arch_klp_save_old_code(struct arch_klp_data *arch_data, void *old_fu return -ENOSYS; } +void __weak arch_klp_init(void) +{ +} + +int __weak arch_klp_check_breakpoint(struct arch_klp_data *arch_data, void *old_func) +{ + return 0; +} + +int __weak arch_klp_add_breakpoint(struct arch_klp_data *arch_data, void *old_func) +{ + return -ENOTSUPP; +} + +void __weak arch_klp_remove_breakpoint(struct arch_klp_data *arch_data, void *old_func) +{ +} + +void __weak arch_klp_set_brk_func(struct klp_func_node *func_node, void *new_func) +{ + func_node->brk_func = new_func; +} + +int __weak arch_klp_module_check_calltrace(void *data) +{ + return 0; +} + static struct klp_func_node *func_node_alloc(struct klp_func *func) { long ret; @@ -1403,11 +1469,24 @@ static void func_node_free(struct klp_func *func) func->func_node = NULL; if (list_empty(&func_node->func_stack)) { klp_del_func_node(func_node); + synchronize_rcu(); arch_klp_mem_free(func_node); } } } +static void klp_mem_recycle(struct klp_patch *patch) +{ + struct klp_object *obj; + struct klp_func *func; + + klp_for_each_object(patch, obj) { + klp_for_each_func(obj, func) { + func_node_free(func); + } + } +} + static int klp_mem_prepare(struct klp_patch *patch) { struct klp_object *obj; @@ -1417,6 +1496,7 @@ static int klp_mem_prepare(struct klp_patch *patch) klp_for_each_func(obj, func) { func->func_node = func_node_alloc(func); if (func->func_node == NULL) { + klp_mem_recycle(patch); pr_err("alloc func_node failed\n"); return -ENOMEM; } @@ -1425,16 +1505,108 @@ static int klp_mem_prepare(struct klp_patch *patch) return 0; } -static void klp_mem_recycle(struct klp_patch *patch) +static void remove_breakpoint(struct klp_func *func, bool restore) +{ + + struct klp_func_node *func_node = klp_find_func_node(func->old_func); + struct arch_klp_data *arch_data = &func_node->arch_data; + + if (!func_node->brk_func) + return; + + if (restore) + arch_klp_remove_breakpoint(arch_data, func->old_func); + + /* Wait for all breakpoint exception handler functions to exit. */ + synchronize_rcu(); + + /* 'brk_func' cannot be set to NULL before the breakpoint is removed. */ + smp_wmb(); + + arch_klp_set_brk_func(func_node, NULL); +} + +static void __klp_breakpoint_post_process(struct klp_patch *patch, bool restore) { struct klp_object *obj; struct klp_func *func; klp_for_each_object(patch, obj) { klp_for_each_func(obj, func) { - func_node_free(func); + remove_breakpoint(func, restore); + } + } +} + +static int add_breakpoint(struct klp_func *func) +{ + struct klp_func_node *func_node = klp_find_func_node(func->old_func); + struct arch_klp_data *arch_data = &func_node->arch_data; + int ret; + + if (WARN_ON_ONCE(func_node->brk_func)) + return -EINVAL; + + ret = arch_klp_check_breakpoint(arch_data, func->old_func); + if (ret) + return ret; + + arch_klp_set_brk_func(func_node, func->new_func); + + /* + * When entering an exception, we must see 'brk_func' or the kernel + * will not be able to handle the breakpoint exception we are about + * to insert. + */ + smp_wmb(); + + ret = arch_klp_add_breakpoint(arch_data, func->old_func); + if (ret) + arch_klp_set_brk_func(func_node, NULL); + + return ret; +} + +static int klp_add_breakpoint(struct klp_patch *patch) +{ + struct klp_object *obj; + struct klp_func *func; + int ret; + + /* + * Ensure that the module is not uninstalled before the breakpoint is + * removed. After the breakpoint is removed, it can be ensured that the + * new function will not be jumped through the handler function of the + * breakpoint. + */ + if (!try_module_get(patch->mod)) + return -ENODEV; + + arch_klp_code_modify_prepare(); + + klp_for_each_object(patch, obj) { + klp_for_each_func(obj, func) { + ret = add_breakpoint(func); + if (ret) { + __klp_breakpoint_post_process(patch, true); + arch_klp_code_modify_post_process(); + module_put(patch->mod); + return ret; + } } } + + arch_klp_code_modify_post_process(); + + return 0; +} + +static void klp_breakpoint_post_process(struct klp_patch *patch, bool restore) +{ + arch_klp_code_modify_prepare(); + __klp_breakpoint_post_process(patch, restore); + arch_klp_code_modify_post_process(); + module_put(patch->mod); } static int __klp_disable_patch(struct klp_patch *patch) @@ -1607,7 +1779,7 @@ EXPORT_SYMBOL_GPL(klp_enable_patch); /* * This function is called from stop_machine() context. */ -static int enable_patch(struct klp_patch *patch) +static int enable_patch(struct klp_patch *patch, bool rollback) { struct klp_object *obj; int ret; @@ -1615,19 +1787,21 @@ static int enable_patch(struct klp_patch *patch) pr_notice_once("tainting kernel with TAINT_LIVEPATCH\n"); add_taint(TAINT_LIVEPATCH, LOCKDEP_STILL_OK); - if (!try_module_get(patch->mod)) - return -ENODEV; + if (!patch->enabled) { + if (!try_module_get(patch->mod)) + return -ENODEV; - patch->enabled = true; + patch->enabled = true; - pr_notice("enabling patch '%s'\n", patch->mod->name); + pr_notice("enabling patch '%s'\n", patch->mod->name); + } klp_for_each_object(patch, obj) { if (!klp_is_object_loaded(obj)) continue; - ret = klp_patch_object(obj); - if (ret) { + ret = klp_patch_object(obj, rollback); + if (ret && klp_need_rollback(ret, rollback)) { pr_warn("failed to patch object '%s'\n", klp_is_module(obj) ? obj->name : "vmlinux"); goto disable; @@ -1659,7 +1833,7 @@ int klp_try_enable_patch(void *data) atomic_inc(&pd->cpu_count); return ret; } - ret = enable_patch(patch); + ret = enable_patch(patch, pd->rollback); if (ret) { atomic_inc(&pd->cpu_count); return ret; @@ -1675,12 +1849,89 @@ int klp_try_enable_patch(void *data) return ret; } +/* + * When the stop_machine is used to enable the patch, if the patch fails to be + * enabled because the stack check fails, a certain number of retries are + * allowed. The maximum number of retries is KLP_RETRY_COUNT. + * + * Sleeps for KLP_RETRY_INTERVAL milliseconds before each retry to give tasks + * that fail the stack check a chance to run out of the instruction replacement + * area. + */ +#define KLP_RETRY_COUNT 5 +#define KLP_RETRY_INTERVAL 100 + +static bool klp_use_breakpoint(struct klp_patch *patch) +{ + struct klp_object *obj; + struct klp_func *func; + + klp_for_each_object(patch, obj) { + klp_for_each_func(obj, func) { + if (func->force != KLP_STACK_OPTIMIZE) + return false; + } + } + + return true; +} + +static int klp_breakpoint_optimize(struct klp_patch *patch) +{ + int ret; + int i; + int cnt = 0; + + ret = klp_add_breakpoint(patch); + if (ret) { + pr_err("failed to add breakpoints, ret=%d\n", ret); + return ret; + } + + for (i = 0; i < KLP_RETRY_COUNT; i++) { + struct patch_data patch_data = { + .patch = patch, + .cpu_count = ATOMIC_INIT(0), + .rollback = false, + }; + + if (i == KLP_RETRY_COUNT - 1) + patch_data.rollback = true; + + cnt++; + + arch_klp_code_modify_prepare(); + ret = stop_machine(klp_try_enable_patch, &patch_data, + cpu_online_mask); + arch_klp_code_modify_post_process(); + if (!ret || ret != -EAGAIN) + break; + + pr_notice("try again in %d ms.\n", KLP_RETRY_INTERVAL); + + msleep(KLP_RETRY_INTERVAL); + } + pr_notice("patching %s, tried %d times, ret=%d.\n", + ret ? "failed" : "success", cnt, ret); + + /* + * If the patch is enabled successfully, the breakpoint instruction + * has been replaced with the jump instruction. However, if the patch + * fails to be enabled, we need to delete the previously inserted + * breakpoint to restore the instruction at the old function entry. + */ + klp_breakpoint_post_process(patch, !!ret); + + return ret; +} + static int __klp_enable_patch(struct klp_patch *patch) { int ret; struct patch_data patch_data = { .patch = patch, .cpu_count = ATOMIC_INIT(0), + .rollback = true, }; if (WARN_ON(patch->enabled)) @@ -1695,16 +1946,29 @@ static int __klp_enable_patch(struct klp_patch *patch) } #endif - arch_klp_code_modify_prepare(); ret = klp_mem_prepare(patch); - if (ret == 0) - ret = stop_machine(klp_try_enable_patch, &patch_data, cpu_online_mask); - arch_klp_code_modify_post_process(); - if (ret) { - klp_mem_recycle(patch); + if (ret) return ret; + + arch_klp_code_modify_prepare(); + ret = stop_machine(klp_try_enable_patch, &patch_data, + cpu_online_mask); + arch_klp_code_modify_post_process(); + if (!ret) + goto move_patch_to_tail; + if (ret != -EAGAIN) + goto err_out; + + if (!klp_use_breakpoint(patch)) { + pr_debug("breakpoint exception optimization is not used.\n"); + goto err_out; } + ret = klp_breakpoint_optimize(patch); + if (ret) + goto err_out; + +move_patch_to_tail: #ifndef CONFIG_LIVEPATCH_STACK /* move the enabled patch to the list tail */ list_del(&patch->list); @@ -1712,6 +1976,10 @@ static int __klp_enable_patch(struct klp_patch *patch) #endif return 0; + +err_out: + klp_mem_recycle(patch); + return ret; } /** @@ -1830,6 +2098,37 @@ int klp_unregister_patch(struct klp_patch *patch) } EXPORT_SYMBOL_GPL(klp_unregister_patch); +/** + * klp_module_delete_safety_check() - safety check in livepatch scenario when delete a module + * @mod: Module to be deleted + * + * Module refcnt ensures that there is no rare case between enable_patch and delete_module: + * 1. safety_check -> try_enable_patch -> try_release_module_ref: + * try_enable_patch would increase module refcnt, which cause try_release_module_ref fails. + * 2. safety_check -> try_release_module_ref -> try_enable_patch: + * after release module ref, try_enable_patch would fail because try_module_get fails. + * So the problem that release resources unsafely when enable livepatch after safety_check is + * passed during module deletion does not exist, complex synchronization protection is not + * required. + + * Return: 0 on success, otherwise error + */ +int klp_module_delete_safety_check(struct module *mod) +{ + int ret; + + if (!mod || !is_livepatch_module(mod)) + return 0; + + ret = stop_machine(arch_klp_module_check_calltrace, (void *)mod, NULL); + if (ret) { + pr_debug("failed to check klp module calltrace: %d\n", ret); + return ret; + } + + return 0; +} + #endif /* #ifdef CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY */ /* * This function unpatches objects from the replaced livepatches. @@ -2034,6 +2333,9 @@ static int __init klp_init(void) if (!klp_root_kobj) goto error_remove; +#ifdef CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY + arch_klp_init(); +#endif return 0; error_remove: diff --git a/kernel/livepatch/core.h b/kernel/livepatch/core.h index 9bcd139eb7d6272ca649dfa4a966a76607db6a4b..911b6452e5be972f85ec3e2e418138d2aa3b6399 100644 --- a/kernel/livepatch/core.h +++ b/kernel/livepatch/core.h @@ -57,4 +57,18 @@ static inline void klp_post_unpatch_callback(struct klp_object *obj) obj->callbacks.post_unpatch_enabled = false; } #endif /* CONFIG_LIVEPATCH_PER_TASK_CONSISTENCY */ + +#ifdef CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY +/* + * In the enable_patch() process, we do not need to roll back the patch + * immediately if the patch fails to enabled. In this way, the function that has + * been successfully patched does not need to be enabled repeatedly during + * retry. However, if it is the last retry (rollback == true) or not because of + * stack check failure (patch_err != -EAGAIN), rollback is required immediately. + */ +static inline bool klp_need_rollback(int patch_err, bool rollback) +{ + return patch_err != -EAGAIN || rollback; +} +#endif /* CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY */ #endif /* _LIVEPATCH_CORE_H */ diff --git a/kernel/livepatch/patch.c b/kernel/livepatch/patch.c index 6515b8e9982941436c4e5d078f50ac5c9109bd4f..bea6c5d0af942e541e63a5d1232c624e2a287dfd 100644 --- a/kernel/livepatch/patch.c +++ b/kernel/livepatch/patch.c @@ -269,10 +269,10 @@ static inline int klp_patch_func(struct klp_func *func) { int ret = 0; + if (func->patched) + return 0; if (WARN_ON(!func->old_func)) return -EINVAL; - if (WARN_ON(func->patched)) - return -EINVAL; if (WARN_ON(!func->func_node)) return -EINVAL; @@ -306,6 +306,27 @@ void klp_unpatch_object(struct klp_object *obj) __klp_unpatch_object(obj, false); } +#ifdef CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY +int klp_patch_object(struct klp_object *obj, bool rollback) +{ + struct klp_func *func; + int ret; + + if (obj->patched) + return 0; + + klp_for_each_func(obj, func) { + ret = klp_patch_func(func); + if (ret && klp_need_rollback(ret, rollback)) { + klp_unpatch_object(obj); + return ret; + } + } + obj->patched = true; + + return 0; +} +#else int klp_patch_object(struct klp_object *obj) { struct klp_func *func; @@ -325,6 +346,7 @@ int klp_patch_object(struct klp_object *obj) return 0; } +#endif static void __klp_unpatch_objects(struct klp_patch *patch, bool nops_only) { diff --git a/kernel/livepatch/patch.h b/kernel/livepatch/patch.h index c9cde47f7e979b789a2ff11a9f3f1971022fc188..9566681660e4e1d49b0bed016ef34ed54f6aff9e 100644 --- a/kernel/livepatch/patch.h +++ b/kernel/livepatch/patch.h @@ -29,7 +29,11 @@ struct klp_ops { struct klp_ops *klp_find_ops(void *old_func); +#ifdef CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY +int klp_patch_object(struct klp_object *obj, bool rollback); +#else int klp_patch_object(struct klp_object *obj); +#endif void klp_unpatch_object(struct klp_object *obj); void klp_unpatch_objects(struct klp_patch *patch); void klp_unpatch_objects_dynamic(struct klp_patch *patch); diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index af4b35450556ff9a3165307edfa54c69b6e78b32..b6683cefe19a4950af2a2be057401593d38c8472 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -182,11 +182,9 @@ static DECLARE_BITMAP(list_entries_in_use, MAX_LOCKDEP_ENTRIES); static struct hlist_head lock_keys_hash[KEYHASH_SIZE]; unsigned long nr_lock_classes; unsigned long nr_zapped_classes; -#ifndef CONFIG_DEBUG_LOCKDEP -static -#endif +unsigned long max_lock_class_idx; struct lock_class lock_classes[MAX_LOCKDEP_KEYS]; -static DECLARE_BITMAP(lock_classes_in_use, MAX_LOCKDEP_KEYS); +DECLARE_BITMAP(lock_classes_in_use, MAX_LOCKDEP_KEYS); static inline struct lock_class *hlock_class(struct held_lock *hlock) { @@ -337,7 +335,7 @@ static inline void lock_release_holdtime(struct held_lock *hlock) * elements. These elements are linked together by the lock_entry member in * struct lock_class. */ -LIST_HEAD(all_lock_classes); +static LIST_HEAD(all_lock_classes); static LIST_HEAD(free_lock_classes); /** @@ -1239,6 +1237,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force) struct lockdep_subclass_key *key; struct hlist_head *hash_head; struct lock_class *class; + int idx; DEBUG_LOCKS_WARN_ON(!irqs_disabled()); @@ -1304,6 +1303,9 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force) * of classes. */ list_move_tail(&class->lock_entry, &all_lock_classes); + idx = class - lock_classes; + if (idx > max_lock_class_idx) + max_lock_class_idx = idx; if (verbose(class)) { graph_unlock(); @@ -5919,6 +5921,8 @@ static void zap_class(struct pending_free *pf, struct lock_class *class) WRITE_ONCE(class->name, NULL); nr_lock_classes--; __clear_bit(class - lock_classes, lock_classes_in_use); + if (class - lock_classes == max_lock_class_idx) + max_lock_class_idx--; } else { WARN_ONCE(true, "%s() failed for class %s\n", __func__, class->name); @@ -6209,7 +6213,13 @@ void lockdep_reset_lock(struct lockdep_map *lock) lockdep_reset_lock_reg(lock); } -/* Unregister a dynamically allocated key. */ +/* + * Unregister a dynamically allocated key. + * + * Unlike lockdep_register_key(), a search is always done to find a matching + * key irrespective of debug_locks to avoid potential invalid access to freed + * memory in lock_class entry. + */ void lockdep_unregister_key(struct lock_class_key *key) { struct hlist_head *hash_head = keyhashentry(key); @@ -6224,10 +6234,8 @@ void lockdep_unregister_key(struct lock_class_key *key) return; raw_local_irq_save(flags); - if (!graph_lock()) - goto out_irq; + lockdep_lock(); - pf = get_pending_free(); hlist_for_each_entry_rcu(k, hash_head, hash_entry) { if (k == key) { hlist_del_rcu(&k->hash_entry); @@ -6235,11 +6243,13 @@ void lockdep_unregister_key(struct lock_class_key *key) break; } } - WARN_ON_ONCE(!found); - __lockdep_free_key_range(pf, key, 1); - call_rcu_zapped(pf); - graph_unlock(); -out_irq: + WARN_ON_ONCE(!found && debug_locks); + if (found) { + pf = get_pending_free(); + __lockdep_free_key_range(pf, key, 1); + call_rcu_zapped(pf); + } + lockdep_unlock(); raw_local_irq_restore(flags); /* Wait until is_dynamic_key() has finished accessing k->hash_entry. */ diff --git a/kernel/locking/lockdep_internals.h b/kernel/locking/lockdep_internals.h index de49f9e1c11ba8be13fffcda9159c9c91db16e85..a19b016353478f844eaa758880939925e62cd82d 100644 --- a/kernel/locking/lockdep_internals.h +++ b/kernel/locking/lockdep_internals.h @@ -121,7 +121,6 @@ static const unsigned long LOCKF_USED_IN_IRQ_READ = #define MAX_LOCKDEP_CHAIN_HLOCKS (MAX_LOCKDEP_CHAINS*5) -extern struct list_head all_lock_classes; extern struct lock_chain lock_chains[]; #define LOCK_USAGE_CHARS (2*XXX_LOCK_USAGE_STATES + 1) @@ -151,6 +150,10 @@ extern unsigned int nr_large_chain_blocks; extern unsigned int max_lockdep_depth; extern unsigned int max_bfs_queue_depth; +extern unsigned long max_lock_class_idx; + +extern struct lock_class lock_classes[MAX_LOCKDEP_KEYS]; +extern unsigned long lock_classes_in_use[]; #ifdef CONFIG_PROVE_LOCKING extern unsigned long lockdep_count_forward_deps(struct lock_class *); @@ -205,7 +208,6 @@ struct lockdep_stats { }; DECLARE_PER_CPU(struct lockdep_stats, lockdep_stats); -extern struct lock_class lock_classes[MAX_LOCKDEP_KEYS]; #define __debug_atomic_inc(ptr) \ this_cpu_inc(lockdep_stats.ptr); diff --git a/kernel/locking/lockdep_proc.c b/kernel/locking/lockdep_proc.c index 02ef87f50df29c2a53dbf7b27320e1001fe09e33..ccb5292d1e1944bae1150d14f4103588aea1b1f5 100644 --- a/kernel/locking/lockdep_proc.c +++ b/kernel/locking/lockdep_proc.c @@ -24,14 +24,33 @@ #include "lockdep_internals.h" +/* + * Since iteration of lock_classes is done without holding the lockdep lock, + * it is not safe to iterate all_lock_classes list directly as the iteration + * may branch off to free_lock_classes or the zapped list. Iteration is done + * directly on the lock_classes array by checking the lock_classes_in_use + * bitmap and max_lock_class_idx. + */ +#define iterate_lock_classes(idx, class) \ + for (idx = 0, class = lock_classes; idx <= max_lock_class_idx; \ + idx++, class++) + static void *l_next(struct seq_file *m, void *v, loff_t *pos) { - return seq_list_next(v, &all_lock_classes, pos); + struct lock_class *class = v; + + ++class; + *pos = class - lock_classes; + return (*pos > max_lock_class_idx) ? NULL : class; } static void *l_start(struct seq_file *m, loff_t *pos) { - return seq_list_start_head(&all_lock_classes, *pos); + unsigned long idx = *pos; + + if (idx > max_lock_class_idx) + return NULL; + return lock_classes + idx; } static void l_stop(struct seq_file *m, void *v) @@ -57,14 +76,16 @@ static void print_name(struct seq_file *m, struct lock_class *class) static int l_show(struct seq_file *m, void *v) { - struct lock_class *class = list_entry(v, struct lock_class, lock_entry); + struct lock_class *class = v; struct lock_list *entry; char usage[LOCK_USAGE_CHARS]; + int idx = class - lock_classes; - if (v == &all_lock_classes) { + if (v == lock_classes) seq_printf(m, "all lock classes:\n"); + + if (!test_bit(idx, lock_classes_in_use)) return 0; - } seq_printf(m, "%p", class->key); #ifdef CONFIG_DEBUG_LOCKDEP @@ -218,8 +239,11 @@ static int lockdep_stats_show(struct seq_file *m, void *v) #ifdef CONFIG_PROVE_LOCKING struct lock_class *class; + unsigned long idx; - list_for_each_entry(class, &all_lock_classes, lock_entry) { + iterate_lock_classes(idx, class) { + if (!test_bit(idx, lock_classes_in_use)) + continue; if (class->usage_mask == 0) nr_unused++; @@ -252,6 +276,7 @@ static int lockdep_stats_show(struct seq_file *m, void *v) sum_forward_deps += lockdep_count_forward_deps(class); } + #ifdef CONFIG_DEBUG_LOCKDEP DEBUG_LOCKS_WARN_ON(debug_atomic_read(nr_unused_locks) != nr_unused); #endif @@ -343,6 +368,8 @@ static int lockdep_stats_show(struct seq_file *m, void *v) seq_printf(m, " max bfs queue depth: %11u\n", max_bfs_queue_depth); #endif + seq_printf(m, " max lock class index: %11lu\n", + max_lock_class_idx); lockdep_stats_debug_show(m); seq_printf(m, " debug_locks: %11u\n", debug_locks); @@ -620,12 +647,16 @@ static int lock_stat_open(struct inode *inode, struct file *file) if (!res) { struct lock_stat_data *iter = data->stats; struct seq_file *m = file->private_data; + unsigned long idx; - list_for_each_entry(class, &all_lock_classes, lock_entry) { + iterate_lock_classes(idx, class) { + if (!test_bit(idx, lock_classes_in_use)) + continue; iter->class = class; iter->stats = lock_stats(class); iter++; } + data->iter_end = iter; sort(data->stats, data->iter_end - data->stats, @@ -643,6 +674,7 @@ static ssize_t lock_stat_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { struct lock_class *class; + unsigned long idx; char c; if (count) { @@ -652,8 +684,11 @@ static ssize_t lock_stat_write(struct file *file, const char __user *buf, if (c != '0') return count; - list_for_each_entry(class, &all_lock_classes, lock_entry) + iterate_lock_classes(idx, class) { + if (!test_bit(idx, lock_classes_in_use)) + continue; clear_lock_stats(class); + } } return count; } diff --git a/kernel/module.c b/kernel/module.c index 1acdfba63716f1083a33e1ac502647e538385742..5fdfa29a0738eab5c9db875fd5e9f40b419094c4 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -57,6 +57,9 @@ #include #include #include +#ifdef CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY +#include +#endif #include #include "module-internal.h" @@ -1027,6 +1030,12 @@ SYSCALL_DEFINE2(delete_module, const char __user *, name_user, } } +#ifdef CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY + ret = klp_module_delete_safety_check(mod); + if (ret != 0) + goto out; +#endif + /* Stop the machine so refcounts can't move and disable module. */ ret = try_stop_module(mod, flags, &forced); if (ret != 0) diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c index bf640fd6142a00f0f1812d29506db5988cca7051..522cb1387462c981222aadbcd9cef005216cd192 100644 --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c @@ -1323,7 +1323,7 @@ static int __init resumedelay_setup(char *str) int rc = kstrtouint(str, 0, &resume_delay); if (rc) - return rc; + pr_warn("resumedelay: bad option string '%s'\n", str); return 1; } diff --git a/kernel/power/suspend_test.c b/kernel/power/suspend_test.c index e1ed58adb69e480b8537bf4a37b2d2e1f23be9b2..be480ae5cb2aa1da01379025ff4f862bbb2aef94 100644 --- a/kernel/power/suspend_test.c +++ b/kernel/power/suspend_test.c @@ -157,22 +157,22 @@ static int __init setup_test_suspend(char *value) value++; suspend_type = strsep(&value, ","); if (!suspend_type) - return 0; + return 1; repeat = strsep(&value, ","); if (repeat) { if (kstrtou32(repeat, 0, &test_repeat_count_max)) - return 0; + return 1; } for (i = PM_SUSPEND_MIN; i < PM_SUSPEND_MAX; i++) if (!strcmp(pm_labels[i], suspend_type)) { test_state_label = pm_labels[i]; - return 0; + return 1; } printk(warn_bad_state, suspend_type); - return 0; + return 1; } __setup("test_suspend", setup_test_suspend); diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index 43f8f2573eac4a73ed27a4b1a95af745325a9f83..ecd28d4fa20eb5d80c3bc9b46cffa4f33a011f93 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -146,8 +146,10 @@ static int __control_devkmsg(char *str) static int __init control_devkmsg(char *str) { - if (__control_devkmsg(str) < 0) + if (__control_devkmsg(str) < 0) { + pr_warn("printk.devkmsg: bad option string '%s'\n", str); return 1; + } /* * Set sysctl string accordingly: @@ -166,7 +168,7 @@ static int __init control_devkmsg(char *str) */ devkmsg_log |= DEVKMSG_LOG_MASK_LOCK; - return 0; + return 1; } __setup("printk.devkmsg=", control_devkmsg); diff --git a/kernel/ptrace.c b/kernel/ptrace.c index e3210358bcd2f6670616f6cf1f624851610f2674..072033f40e2300764827d4bcd2d7a0e71301bd43 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c @@ -371,6 +371,26 @@ bool ptrace_may_access(struct task_struct *task, unsigned int mode) return !err; } +static int check_ptrace_options(unsigned long data) +{ + if (data & ~(unsigned long)PTRACE_O_MASK) + return -EINVAL; + + if (unlikely(data & PTRACE_O_SUSPEND_SECCOMP)) { + if (!IS_ENABLED(CONFIG_CHECKPOINT_RESTORE) || + !IS_ENABLED(CONFIG_SECCOMP)) + return -EINVAL; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + if (seccomp_mode(¤t->seccomp) != SECCOMP_MODE_DISABLED || + current->ptrace & PT_SUSPEND_SECCOMP) + return -EPERM; + } + return 0; +} + static int ptrace_attach(struct task_struct *task, long request, unsigned long addr, unsigned long flags) @@ -382,8 +402,16 @@ static int ptrace_attach(struct task_struct *task, long request, if (seize) { if (addr != 0) goto out; + /* + * This duplicates the check in check_ptrace_options() because + * ptrace_attach() and ptrace_setoptions() have historically + * used different error codes for unknown ptrace options. + */ if (flags & ~(unsigned long)PTRACE_O_MASK) goto out; + retval = check_ptrace_options(flags); + if (retval) + return retval; flags = PT_PTRACED | PT_SEIZED | (flags << PT_OPT_FLAG_SHIFT); } else { flags = PT_PTRACED; @@ -656,22 +684,11 @@ int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long ds static int ptrace_setoptions(struct task_struct *child, unsigned long data) { unsigned flags; + int ret; - if (data & ~(unsigned long)PTRACE_O_MASK) - return -EINVAL; - - if (unlikely(data & PTRACE_O_SUSPEND_SECCOMP)) { - if (!IS_ENABLED(CONFIG_CHECKPOINT_RESTORE) || - !IS_ENABLED(CONFIG_SECCOMP)) - return -EINVAL; - - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; - - if (seccomp_mode(¤t->seccomp) != SECCOMP_MODE_DISABLED || - current->ptrace & PT_SUSPEND_SECCOMP) - return -EPERM; - } + ret = check_ptrace_options(data); + if (ret) + return ret; /* Avoid intermediate state when all opts are cleared */ flags = child->ptrace; diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 029fb69b549df2352bcafe8dfbf9dd733a34c3e3..87fe7f423b287eb7ad1142b87aeea368b88d8886 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -531,16 +531,17 @@ rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags) raw_spin_unlock_irqrestore_rcu_node(rnp, flags); } - /* Unboost if we were boosted. */ - if (IS_ENABLED(CONFIG_RCU_BOOST) && drop_boost_mutex) - rt_mutex_futex_unlock(&rnp->boost_mtx); - /* * If this was the last task on the expedited lists, * then we need to report up the rcu_node hierarchy. */ if (!empty_exp && empty_exp_now) rcu_report_exp_rnp(rnp, true); + + /* Unboost if we were boosted. */ + if (IS_ENABLED(CONFIG_RCU_BOOST) && drop_boost_mutex) + rt_mutex_futex_unlock(&rnp->boost_mtx); + } else { local_irq_restore(flags); } diff --git a/kernel/rseq.c b/kernel/rseq.c index 0077713bf2400126caacd3f8900143adab5edea3..1b4547e0d8414d678509015870a8cfe126ec7508 100644 --- a/kernel/rseq.c +++ b/kernel/rseq.c @@ -120,8 +120,13 @@ static int rseq_get_rseq_cs(struct task_struct *t, struct rseq_cs *rseq_cs) u32 sig; int ret; +#ifdef CONFIG_64BIT + if (get_user(ptr, &t->rseq->rseq_cs.ptr64)) + return -EFAULT; +#else if (copy_from_user(&ptr, &t->rseq->rseq_cs.ptr64, sizeof(ptr))) return -EFAULT; +#endif if (!ptr) { memset(rseq_cs, 0, sizeof(*rseq_cs)); return 0; @@ -204,9 +209,13 @@ static int clear_rseq_cs(struct task_struct *t) * * Set rseq_cs to NULL. */ +#ifdef CONFIG_64BIT + return put_user(0UL, &t->rseq->rseq_cs.ptr64); +#else if (clear_user(&t->rseq->rseq_cs.ptr64, sizeof(t->rseq->rseq_cs.ptr64))) return -EFAULT; return 0; +#endif } /* diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 56be8d1c7f69435de1eabaa6cd3e80a9f6b480a5..e00b39d4e2e267fd4ec558103398e7e5d9d54e9e 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -36,6 +36,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_rt_tp); EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_dl_tp); EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_irq_tp); EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_se_tp); +EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_thermal_tp); EXPORT_TRACEPOINT_SYMBOL_GPL(sched_cpu_capacity_tp); EXPORT_TRACEPOINT_SYMBOL_GPL(sched_overutilized_tp); EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_cfs_tp); @@ -4048,7 +4049,6 @@ void scheduler_tick(void) update_thermal_load_avg(rq_clock_thermal(rq), rq, thermal_pressure); curr->sched_class->task_tick(rq, curr, 0); calc_global_load_tick(rq); - psi_task_tick(rq); rq_unlock(rq, &rf); @@ -8215,7 +8215,8 @@ static const u64 max_cfs_runtime = MAX_BW * NSEC_PER_USEC; static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime); -static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota) +static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota, + u64 burst) { int i, ret = 0, runtime_enabled, runtime_was_enabled; struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; @@ -8245,6 +8246,10 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota) if (quota != RUNTIME_INF && quota > max_cfs_runtime) return -EINVAL; + if (quota != RUNTIME_INF && (burst > quota || + burst + quota > max_cfs_runtime)) + return -EINVAL; + /* * Prevent race between setting of cfs_rq->runtime_enabled and * unthrottle_offline_cfs_rqs(). @@ -8266,6 +8271,7 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota) raw_spin_lock_irq(&cfs_b->lock); cfs_b->period = ns_to_ktime(period); cfs_b->quota = quota; + cfs_b->burst = burst; __refill_cfs_bandwidth_runtime(cfs_b); @@ -8299,9 +8305,10 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota) static int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us) { - u64 quota, period; + u64 quota, period, burst; period = ktime_to_ns(tg->cfs_bandwidth.period); + burst = tg->cfs_bandwidth.burst; if (cfs_quota_us < 0) quota = RUNTIME_INF; else if ((u64)cfs_quota_us <= U64_MAX / NSEC_PER_USEC) @@ -8309,7 +8316,7 @@ static int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us) else return -EINVAL; - return tg_set_cfs_bandwidth(tg, period, quota); + return tg_set_cfs_bandwidth(tg, period, quota, burst); } static long tg_get_cfs_quota(struct task_group *tg) @@ -8327,15 +8334,16 @@ static long tg_get_cfs_quota(struct task_group *tg) static int tg_set_cfs_period(struct task_group *tg, long cfs_period_us) { - u64 quota, period; + u64 quota, period, burst; if ((u64)cfs_period_us > U64_MAX / NSEC_PER_USEC) return -EINVAL; period = (u64)cfs_period_us * NSEC_PER_USEC; quota = tg->cfs_bandwidth.quota; + burst = tg->cfs_bandwidth.burst; - return tg_set_cfs_bandwidth(tg, period, quota); + return tg_set_cfs_bandwidth(tg, period, quota, burst); } static long tg_get_cfs_period(struct task_group *tg) @@ -8348,6 +8356,30 @@ static long tg_get_cfs_period(struct task_group *tg) return cfs_period_us; } +static int tg_set_cfs_burst(struct task_group *tg, long cfs_burst_us) +{ + u64 quota, period, burst; + + if ((u64)cfs_burst_us > U64_MAX / NSEC_PER_USEC) + return -EINVAL; + + burst = (u64)cfs_burst_us * NSEC_PER_USEC; + period = ktime_to_ns(tg->cfs_bandwidth.period); + quota = tg->cfs_bandwidth.quota; + + return tg_set_cfs_bandwidth(tg, period, quota, burst); +} + +static long tg_get_cfs_burst(struct task_group *tg) +{ + u64 burst_us; + + burst_us = tg->cfs_bandwidth.burst; + do_div(burst_us, NSEC_PER_USEC); + + return burst_us; +} + static s64 cpu_cfs_quota_read_s64(struct cgroup_subsys_state *css, struct cftype *cft) { @@ -8372,6 +8404,18 @@ static int cpu_cfs_period_write_u64(struct cgroup_subsys_state *css, return tg_set_cfs_period(css_tg(css), cfs_period_us); } +static u64 cpu_cfs_burst_read_u64(struct cgroup_subsys_state *css, + struct cftype *cft) +{ + return tg_get_cfs_burst(css_tg(css)); +} + +static int cpu_cfs_burst_write_u64(struct cgroup_subsys_state *css, + struct cftype *cftype, u64 cfs_burst_us) +{ + return tg_set_cfs_burst(css_tg(css), cfs_burst_us); +} + struct cfs_schedulable_data { struct task_group *tg; u64 period, quota; @@ -8474,6 +8518,9 @@ static int cpu_cfs_stat_show(struct seq_file *sf, void *v) seq_printf(sf, "wait_sum %llu\n", ws); } + seq_printf(sf, "nr_bursts %d\n", cfs_b->nr_burst); + seq_printf(sf, "burst_time %llu\n", cfs_b->burst_time); + return 0; } #endif /* CONFIG_CFS_BANDWIDTH */ @@ -8587,6 +8634,11 @@ static struct cftype cpu_legacy_files[] = { .read_u64 = cpu_cfs_period_read_u64, .write_u64 = cpu_cfs_period_write_u64, }, + { + .name = "cfs_burst_us", + .read_u64 = cpu_cfs_burst_read_u64, + .write_u64 = cpu_cfs_burst_write_u64, + }, { .name = "stat", .seq_show = cpu_cfs_stat_show, @@ -8635,16 +8687,20 @@ static int cpu_extra_stat_show(struct seq_file *sf, { struct task_group *tg = css_tg(css); struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; - u64 throttled_usec; + u64 throttled_usec, burst_usec; throttled_usec = cfs_b->throttled_time; do_div(throttled_usec, NSEC_PER_USEC); + burst_usec = cfs_b->burst_time; + do_div(burst_usec, NSEC_PER_USEC); seq_printf(sf, "nr_periods %d\n" "nr_throttled %d\n" - "throttled_usec %llu\n", + "throttled_usec %llu\n" + "nr_bursts %d\n" + "burst_usec %llu\n", cfs_b->nr_periods, cfs_b->nr_throttled, - throttled_usec); + throttled_usec, cfs_b->nr_burst, burst_usec); } #endif return 0; @@ -8759,12 +8815,13 @@ static ssize_t cpu_max_write(struct kernfs_open_file *of, { struct task_group *tg = css_tg(of_css(of)); u64 period = tg_get_cfs_period(tg); + u64 burst = tg_get_cfs_burst(tg); u64 quota; int ret; ret = cpu_period_quota_parse(buf, &period, "a); if (!ret) - ret = tg_set_cfs_bandwidth(tg, period, quota); + ret = tg_set_cfs_bandwidth(tg, period, quota, burst); return ret ?: nbytes; } #endif @@ -8791,6 +8848,12 @@ static struct cftype cpu_files[] = { .seq_show = cpu_max_show, .write = cpu_max_write, }, + { + .name = "max.burst", + .flags = CFTYPE_NOT_ON_ROOT, + .read_u64 = cpu_cfs_burst_read_u64, + .write_u64 = cpu_cfs_burst_write_u64, + }, #endif #ifdef CONFIG_UCLAMP_TASK_GROUP { diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index 12fbaf1302ac223ff271d3f1b17c692f3fff2f84..a260ff7800db34cb218fbe98181e03da01f6f1d4 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c @@ -908,25 +908,15 @@ void print_numa_stats(struct seq_file *m, int node, unsigned long tsf, static void sched_show_numa(struct task_struct *p, struct seq_file *m) { #ifdef CONFIG_NUMA_BALANCING - struct mempolicy *pol; - if (p->mm) P(mm->numa_scan_seq); - task_lock(p); - pol = p->mempolicy; - if (pol && !(pol->flags & MPOL_F_MORON)) - pol = NULL; - mpol_get(pol); - task_unlock(p); - P(numa_pages_migrated); P(numa_preferred_nid); P(total_numa_faults); SEQ_printf(m, "current_node=%d, numa_group_id=%d\n", task_node(p), task_numa_group_id(p)); show_numa_stats(p, m); - mpol_put(pol); #endif } diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 5fe13efce378be302cf7186c17b70559d7de0ca1..50d457979db61fa44f73f64c2a082f30fbeab8ee 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -4738,8 +4738,20 @@ static inline u64 sched_cfs_bandwidth_slice(void) */ void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b) { - if (cfs_b->quota != RUNTIME_INF) - cfs_b->runtime = cfs_b->quota; + s64 runtime; + + if (unlikely(cfs_b->quota == RUNTIME_INF)) + return; + + cfs_b->runtime += cfs_b->quota; + runtime = cfs_b->runtime_snap - cfs_b->runtime; + if (runtime > 0) { + cfs_b->burst_time += runtime; + cfs_b->nr_burst++; + } + + cfs_b->runtime = min(cfs_b->runtime, cfs_b->quota + cfs_b->burst); + cfs_b->runtime_snap = cfs_b->runtime; } static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg) @@ -5095,6 +5107,9 @@ static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun, u throttled = !list_empty(&cfs_b->throttled_cfs_rq); cfs_b->nr_periods += overrun; + /* Refill extra burst quota even if cfs_b->idle */ + __refill_cfs_bandwidth_runtime(cfs_b); + /* * idle depends on !throttled (for the case of a large deficit), and if * we're going inactive then everything else can be deferred @@ -5102,8 +5117,6 @@ static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun, u if (cfs_b->idle && !throttled) goto out_deactivate; - __refill_cfs_bandwidth_runtime(cfs_b); - if (!throttled) { /* mark as potentially idle for the upcoming period */ cfs_b->idle = 1; @@ -5356,6 +5369,7 @@ static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer) if (new < max_cfs_quota_period) { cfs_b->period = ns_to_ktime(new); cfs_b->quota *= 2; + cfs_b->burst *= 2; pr_warn_ratelimited( "cfs_period_timer[cpu%d]: period too short, scaling up (new cfs_period_us = %lld, cfs_quota_us = %lld)\n", @@ -5387,6 +5401,7 @@ void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) cfs_b->runtime = 0; cfs_b->quota = RUNTIME_INF; cfs_b->period = ns_to_ktime(default_cfs_period()); + cfs_b->burst = 0; INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq); hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED); @@ -7266,6 +7281,7 @@ static void unthrottle_qos_cfs_rq(struct cfs_rq *cfs_rq) cfs_rq->throttled = 0; + update_rq_clock(rq); list_del_init(&cfs_rq->qos_throttled_list); /* update hierarchical throttle state */ diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c index 0c9a596692a5e458b728c4882f3a04731c2997bf..25f7d46ad7bd37214496f1a02abeb35f5e3f443e 100644 --- a/kernel/sched/psi.c +++ b/kernel/sched/psi.c @@ -34,10 +34,19 @@ * delayed on that resource such that nobody is advancing and the CPU * goes idle. This leaves both workload and CPU unproductive. * - * (Naturally, the FULL state doesn't exist for the CPU resource.) - * * SOME = nr_delayed_tasks != 0 - * FULL = nr_delayed_tasks != 0 && nr_running_tasks == 0 + * FULL = nr_delayed_tasks != 0 && nr_productive_tasks == 0 + * + * What it means for a task to be productive is defined differently + * for each resource. For IO, productive means a running task. For + * memory, productive means a running task that isn't a reclaimer. For + * CPU, productive means an oncpu task. + * + * Naturally, the FULL state doesn't exist for the CPU resource at the + * system level, but exist at the cgroup level. At the cgroup level, + * FULL means all non-idle tasks in the cgroup are delayed on the CPU + * resource which is being used by others outside of the cgroup or + * throttled by the cgroup cpu.max configuration. * * The percentage of wallclock time spent in those compound stall * states gives pressure numbers between 0 and 100 for each resource, @@ -78,13 +87,13 @@ * * threads = min(nr_nonidle_tasks, nr_cpus) * SOME = min(nr_delayed_tasks / threads, 1) - * FULL = (threads - min(nr_running_tasks, threads)) / threads + * FULL = (threads - min(nr_productive_tasks, threads)) / threads * * For the 257 number crunchers on 256 CPUs, this yields: * * threads = min(257, 256) * SOME = min(1 / 256, 1) = 0.4% - * FULL = (256 - min(257, 256)) / 256 = 0% + * FULL = (256 - min(256, 256)) / 256 = 0% * * For the 1 out of 4 memory-delayed tasks, this yields: * @@ -109,7 +118,7 @@ * For each runqueue, we track: * * tSOME[cpu] = time(nr_delayed_tasks[cpu] != 0) - * tFULL[cpu] = time(nr_delayed_tasks[cpu] && !nr_running_tasks[cpu]) + * tFULL[cpu] = time(nr_delayed_tasks[cpu] && !nr_productive_tasks[cpu]) * tNONIDLE[cpu] = time(nr_nonidle_tasks[cpu] != 0) * * and then periodically aggregate: @@ -142,6 +151,8 @@ #include #include "sched.h" +#include + static int psi_bug __read_mostly; DEFINE_STATIC_KEY_FALSE(psi_disabled); @@ -221,15 +232,18 @@ static bool test_state(unsigned int *tasks, enum psi_states state) { switch (state) { case PSI_IO_SOME: - return tasks[NR_IOWAIT]; + return unlikely(tasks[NR_IOWAIT]); case PSI_IO_FULL: - return tasks[NR_IOWAIT] && !tasks[NR_RUNNING]; + return unlikely(tasks[NR_IOWAIT] && !tasks[NR_RUNNING]); case PSI_MEM_SOME: - return tasks[NR_MEMSTALL]; + return unlikely(tasks[NR_MEMSTALL]); case PSI_MEM_FULL: - return tasks[NR_MEMSTALL] && !tasks[NR_RUNNING]; + return unlikely(tasks[NR_MEMSTALL] && + tasks[NR_RUNNING] == tasks[NR_MEMSTALL_RUNNING]); case PSI_CPU_SOME: - return tasks[NR_RUNNING] > tasks[NR_ONCPU]; + return unlikely(tasks[NR_RUNNING] > tasks[NR_ONCPU]); + case PSI_CPU_FULL: + return unlikely(tasks[NR_RUNNING] && !tasks[NR_ONCPU]); case PSI_NONIDLE: return tasks[NR_IOWAIT] || tasks[NR_MEMSTALL] || tasks[NR_RUNNING]; @@ -644,13 +658,10 @@ static void poll_timer_fn(struct timer_list *t) wake_up_interruptible(&group->poll_wait); } -static void record_times(struct psi_group_cpu *groupc, int cpu, - bool memstall_tick) +static void record_times(struct psi_group_cpu *groupc, u64 now) { u32 delta; - u64 now; - now = cpu_clock(cpu); delta = now - groupc->state_start; groupc->state_start = now; @@ -664,34 +675,20 @@ static void record_times(struct psi_group_cpu *groupc, int cpu, groupc->times[PSI_MEM_SOME] += delta; if (groupc->state_mask & (1 << PSI_MEM_FULL)) groupc->times[PSI_MEM_FULL] += delta; - else if (memstall_tick) { - u32 sample; - /* - * Since we care about lost potential, a - * memstall is FULL when there are no other - * working tasks, but also when the CPU is - * actively reclaiming and nothing productive - * could run even if it were runnable. - * - * When the timer tick sees a reclaiming CPU, - * regardless of runnable tasks, sample a FULL - * tick (or less if it hasn't been a full tick - * since the last state change). - */ - sample = min(delta, (u32)jiffies_to_nsecs(1)); - groupc->times[PSI_MEM_FULL] += sample; - } } - if (groupc->state_mask & (1 << PSI_CPU_SOME)) + if (groupc->state_mask & (1 << PSI_CPU_SOME)) { groupc->times[PSI_CPU_SOME] += delta; + if (groupc->state_mask & (1 << PSI_CPU_FULL)) + groupc->times[PSI_CPU_FULL] += delta; + } if (groupc->state_mask & (1 << PSI_NONIDLE)) groupc->times[PSI_NONIDLE] += delta; } static void psi_group_change(struct psi_group *group, int cpu, - unsigned int clear, unsigned int set, + unsigned int clear, unsigned int set, u64 now, bool wake_clock) { struct psi_group_cpu *groupc; @@ -711,7 +708,7 @@ static void psi_group_change(struct psi_group *group, int cpu, */ write_seqcount_begin(&groupc->seq); - record_times(groupc, cpu, false); + record_times(groupc, now); for (t = 0, m = clear; m; m &= ~(1 << t), t++) { if (!(m & (1 << t))) @@ -719,10 +716,11 @@ static void psi_group_change(struct psi_group *group, int cpu, if (groupc->tasks[t]) { groupc->tasks[t]--; } else if (!psi_bug) { - printk_deferred(KERN_ERR "psi: task underflow! cpu=%d t=%d tasks=[%u %u %u %u] clear=%x set=%x\n", + printk_deferred(KERN_ERR "psi: task underflow! cpu=%d t=%d tasks=[%u %u %u %u %u] clear=%x set=%x\n", cpu, t, groupc->tasks[0], groupc->tasks[1], groupc->tasks[2], - groupc->tasks[3], clear, set); + groupc->tasks[3], groupc->tasks[4], + clear, set); psi_bug = 1; } } @@ -736,6 +734,18 @@ static void psi_group_change(struct psi_group *group, int cpu, if (test_state(groupc->tasks, s)) state_mask |= (1 << s); } + + /* + * Since we care about lost potential, a memstall is FULL + * when there are no other working tasks, but also when + * the CPU is actively reclaiming and nothing productive + * could run even if it were runnable. So when the current + * task in a cgroup is in_memstall, the corresponding groupc + * on that cpu is in PSI_MEM_FULL state. + */ + if (unlikely(groupc->tasks[NR_ONCPU] && cpu_curr(cpu)->in_memstall)) + state_mask |= (1 << PSI_MEM_FULL); + groupc->state_mask = state_mask; write_seqcount_end(&groupc->seq); @@ -757,9 +767,13 @@ static struct psi_group *iterate_groups(struct task_struct *task, void **iter) cgroup = task->cgroups->dfl_cgrp; else { #ifdef CONFIG_CGROUP_CPUACCT - rcu_read_lock(); - cgroup = task_cgroup(task, cpuacct_cgrp_id); - rcu_read_unlock(); + if (!cgroup_subsys_on_dfl(cpuacct_cgrp_subsys)) { + rcu_read_lock(); + cgroup = task_cgroup(task, cpuacct_cgrp_id); + rcu_read_unlock(); + } else { + cgroup = task->cgroups->dfl_cgrp; + } #else cgroup = NULL; #endif @@ -802,12 +816,14 @@ void psi_task_change(struct task_struct *task, int clear, int set) struct psi_group *group; bool wake_clock = true; void *iter = NULL; + u64 now; if (!task->pid) return; psi_flags_change(task, clear, set); + now = cpu_clock(cpu); /* * Periodic aggregation shuts off if there is a period of no * task changes, so we wake it back up if necessary. However, @@ -820,7 +836,7 @@ void psi_task_change(struct task_struct *task, int clear, int set) wake_clock = false; while ((group = iterate_groups(task, &iter))) - psi_group_change(group, cpu, clear, set, wake_clock); + psi_group_change(group, cpu, clear, set, now, wake_clock); } void psi_task_switch(struct task_struct *prev, struct task_struct *next, @@ -829,56 +845,64 @@ void psi_task_switch(struct task_struct *prev, struct task_struct *next, struct psi_group *group, *common = NULL; int cpu = task_cpu(prev); void *iter; + u64 now = cpu_clock(cpu); if (next->pid) { + bool identical_state; + psi_flags_change(next, 0, TSK_ONCPU); /* - * When moving state between tasks, the group that - * contains them both does not change: we can stop - * updating the tree once we reach the first common - * ancestor. Iterate @next's ancestors until we - * encounter @prev's state. + * When switching between tasks that have an identical + * runtime state, the cgroup that contains both tasks + * runtime state, the cgroup that contains both tasks + * we reach the first common ancestor. Iterate @next's + * ancestors only until we encounter @prev's ONCPU. */ + identical_state = prev->psi_flags == next->psi_flags; iter = NULL; while ((group = iterate_groups(next, &iter))) { - if (per_cpu_ptr(group->pcpu, cpu)->tasks[NR_ONCPU]) { + if (identical_state && + per_cpu_ptr(group->pcpu, cpu)->tasks[NR_ONCPU]) { common = group; break; } - psi_group_change(group, cpu, 0, TSK_ONCPU, true); + psi_group_change(group, cpu, 0, TSK_ONCPU, now, true); } } - /* - * If this is a voluntary sleep, dequeue will have taken care - * of the outgoing TSK_ONCPU alongside TSK_RUNNING already. We - * only need to deal with it during preemption. - */ - if (sleep) - return; - if (prev->pid) { - psi_flags_change(prev, TSK_ONCPU, 0); + int clear = TSK_ONCPU, set = 0; - iter = NULL; - while ((group = iterate_groups(prev, &iter)) && group != common) - psi_group_change(group, cpu, TSK_ONCPU, 0, true); - } -} + /* + * When we're going to sleep, psi_dequeue() lets us + * handle TSK_RUNNING, TSK_MEMSTALL_RUNNING and + * TSK_IOWAIT here, where we can combine it with + * TSK_ONCPU and save walking common ancestors twice. + */ + if (sleep) { + clear |= TSK_RUNNING; + if (prev->in_memstall) + clear |= TSK_MEMSTALL_RUNNING; + if (prev->in_iowait) + set |= TSK_IOWAIT; + } -void psi_memstall_tick(struct task_struct *task, int cpu) -{ - struct psi_group *group; - void *iter = NULL; + psi_flags_change(prev, clear, set); - while ((group = iterate_groups(task, &iter))) { - struct psi_group_cpu *groupc; + iter = NULL; + while ((group = iterate_groups(prev, &iter)) && group != common) + psi_group_change(group, cpu, clear, set, now, true); - groupc = per_cpu_ptr(group->pcpu, cpu); - write_seqcount_begin(&groupc->seq); - record_times(groupc, cpu, true); - write_seqcount_end(&groupc->seq); + /* + * TSK_ONCPU is handled up to the common ancestor. If we're tasked + * with dequeuing too, finish that for the rest of the hierarchy. + */ + if (sleep) { + clear &= ~TSK_ONCPU; + for (; group; group = iterate_groups(prev, &iter)) + psi_group_change(group, cpu, clear, set, now, true); + } } } @@ -900,6 +924,8 @@ void psi_memstall_enter(unsigned long *flags) *flags = current->in_memstall; if (*flags) return; + + trace_psi_memstall_enter(_RET_IP_); /* * in_memstall setting & accounting needs to be atomic wrt * changes to the task's scheduling state, otherwise we can @@ -908,7 +934,7 @@ void psi_memstall_enter(unsigned long *flags) rq = this_rq_lock_irq(&rf); current->in_memstall = 1; - psi_task_change(current, 0, TSK_MEMSTALL); + psi_task_change(current, 0, TSK_MEMSTALL | TSK_MEMSTALL_RUNNING); rq_unlock_irq(rq, &rf); } @@ -929,6 +955,8 @@ void psi_memstall_leave(unsigned long *flags) if (*flags) return; + + trace_psi_memstall_leave(_RET_IP_); /* * in_memstall clearing & accounting needs to be atomic wrt * changes to the task's scheduling state, otherwise we could @@ -937,7 +965,7 @@ void psi_memstall_leave(unsigned long *flags) rq = this_rq_lock_irq(&rf); current->in_memstall = 0; - psi_task_change(current, TSK_MEMSTALL, 0); + psi_task_change(current, TSK_MEMSTALL | TSK_MEMSTALL_RUNNING, 0); rq_unlock_irq(rq, &rf); } @@ -980,7 +1008,7 @@ void psi_cgroup_free(struct cgroup *cgroup) */ void cgroup_move_task(struct task_struct *task, struct css_set *to) { - unsigned int task_flags = 0; + unsigned int task_flags; struct rq_flags rf; struct rq *rq; @@ -995,15 +1023,31 @@ void cgroup_move_task(struct task_struct *task, struct css_set *to) rq = task_rq_lock(task, &rf); - if (task_on_rq_queued(task)) { - task_flags = TSK_RUNNING; - if (task_current(rq, task)) - task_flags |= TSK_ONCPU; - } else if (task->in_iowait) - task_flags = TSK_IOWAIT; - - if (task->in_memstall) - task_flags |= TSK_MEMSTALL; + /* + * We may race with schedule() dropping the rq lock between + * deactivating prev and switching to next. Because the psi + * updates from the deactivation are deferred to the switch + * callback to save cgroup tree updates, the task's scheduling + * state here is not coherent with its psi state: + * + * schedule() cgroup_move_task() + * rq_lock() + * deactivate_task() + * p->on_rq = 0 + * psi_dequeue() // defers TSK_RUNNING & TSK_IOWAIT updates + * pick_next_task() + * rq_unlock() + * rq_lock() + * psi_task_change() // old cgroup + * task->cgroups = to + * psi_task_change() // new cgroup + * rq_unlock() + * rq_lock() + * psi_sched_switch() // does deferred updates in new cgroup + * + * Don't rely on the scheduling state. Use psi_flags instead. + */ + task_flags = task->psi_flags; if (task_flags) psi_task_change(task, task_flags, 0); @@ -1034,15 +1078,18 @@ int psi_show(struct seq_file *m, struct psi_group *group, enum psi_res res) group->avg_next_update = update_averages(group, now); mutex_unlock(&group->avgs_lock); - for (full = 0; full < 2 - (res == PSI_CPU); full++) { - unsigned long avg[3]; - u64 total; + for (full = 0; full < 2; full++) { + unsigned long avg[3] = { 0, }; + u64 total = 0; int w; - for (w = 0; w < 3; w++) - avg[w] = group->avg[res * 2 + full][w]; - total = div_u64(group->total[PSI_AVGS][res * 2 + full], - NSEC_PER_USEC); + /* CPU FULL is undefined at the system level */ + if (!(group == &psi_system && res == PSI_CPU && full)) { + for (w = 0; w < 3; w++) + avg[w] = group->avg[res * 2 + full][w]; + total = div_u64(group->total[PSI_AVGS][res * 2 + full], + NSEC_PER_USEC); + } seq_printf(m, "%s avg10=%lu.%02lu avg60=%lu.%02lu avg300=%lu.%02lu total=%llu\n", full ? "full" : "some", diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 0d40bb700f3c0181f3c30db1f1a661c6307d6b24..e41a5207a212edf05ab3804ee1c4c20f2481f58c 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -385,10 +385,17 @@ struct cfs_bandwidth { int nr_throttled; u64 throttled_time; +#if !defined(__GENKSYMS__) + u64 burst; + u64 runtime_snap; + int nr_burst; + u64 burst_time; +#else KABI_RESERVE(1) KABI_RESERVE(2) KABI_RESERVE(3) KABI_RESERVE(4) +#endif KABI_RESERVE(5) KABI_RESERVE(6) #endif diff --git a/kernel/sched/stats.h b/kernel/sched/stats.h index 06cf8202c178a059055abf0f782ca342ae83c05b..b8b4e5b2694e94017e6c4e41451ca8bbb9c0a616 100644 --- a/kernel/sched/stats.h +++ b/kernel/sched/stats.h @@ -89,6 +89,9 @@ static inline void psi_enqueue(struct task_struct *p, bool wakeup) if (static_branch_likely(&psi_disabled)) return; + if (p->in_memstall) + set |= TSK_MEMSTALL_RUNNING; + if (!wakeup || p->sched_psi_wake_requeue) { if (p->in_memstall) set |= TSK_MEMSTALL; @@ -104,28 +107,24 @@ static inline void psi_enqueue(struct task_struct *p, bool wakeup) static inline void psi_dequeue(struct task_struct *p, bool sleep) { - int clear = TSK_RUNNING, set = 0; + int clear = TSK_RUNNING; if (static_branch_likely(&psi_disabled)) return; - if (!sleep) { - if (p->in_memstall) - clear |= TSK_MEMSTALL; - } else { - /* - * When a task sleeps, schedule() dequeues it before - * switching to the next one. Merge the clearing of - * TSK_RUNNING and TSK_ONCPU to save an unnecessary - * psi_task_change() call in psi_sched_switch(). - */ - clear |= TSK_ONCPU; + /* + * A voluntary sleep is a dequeue followed by a task switch. To + * avoid walking all ancestors twice, psi_task_switch() handles + * TSK_RUNNING and TSK_IOWAIT for us when it moves TSK_ONCPU. + * Do nothing here. + */ + if (sleep) + return; - if (p->in_iowait) - set |= TSK_IOWAIT; - } + if (p->in_memstall) + clear |= (TSK_MEMSTALL | TSK_MEMSTALL_RUNNING); - psi_task_change(p, clear, set); + psi_task_change(p, clear, 0); } static inline void psi_ttwu_dequeue(struct task_struct *p) @@ -164,14 +163,6 @@ static inline void psi_sched_switch(struct task_struct *prev, psi_task_switch(prev, next, sleep); } -static inline void psi_task_tick(struct rq *rq) -{ - if (static_branch_likely(&psi_disabled)) - return; - - if (unlikely(rq->curr->in_memstall)) - psi_memstall_tick(rq->curr, cpu_of(rq)); -} #else /* CONFIG_PSI */ static inline void psi_enqueue(struct task_struct *p, bool wakeup) {} static inline void psi_dequeue(struct task_struct *p, bool sleep) {} @@ -179,7 +170,6 @@ static inline void psi_ttwu_dequeue(struct task_struct *p) {} static inline void psi_sched_switch(struct task_struct *prev, struct task_struct *next, bool sleep) {} -static inline void psi_task_tick(struct rq *rq) {} #endif /* CONFIG_PSI */ #ifdef CONFIG_SCHED_INFO diff --git a/kernel/smp.c b/kernel/smp.c index b04ab01eb9e0336f2b687dfc2841c4f3403b5589..7cb03edf1735390f394263c47a3b2e0b5d673492 100644 --- a/kernel/smp.c +++ b/kernel/smp.c @@ -113,9 +113,9 @@ static int __init csdlock_debug(char *str) if (val) static_branch_enable(&csdlock_debug_enabled); - return 0; + return 1; } -early_param("csdlock_debug", csdlock_debug); +__setup("csdlock_debug=", csdlock_debug); static DEFINE_PER_CPU(call_single_data_t *, cur_csd); static DEFINE_PER_CPU(smp_call_func_t, cur_csd_func); diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 364659978d8396bfa2db0a05755f941b43fe9ec4..b3a0ee21d31c0b47cb72a7e5e9d436b40806ccc6 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -233,7 +233,7 @@ static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata; static int __init set_trace_boot_options(char *str) { strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE); - return 0; + return 1; } __setup("trace_options=", set_trace_boot_options); @@ -244,7 +244,7 @@ static int __init set_trace_boot_clock(char *str) { strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE); trace_boot_clock = trace_boot_clock_buf; - return 0; + return 1; } __setup("trace_clock=", set_trace_boot_clock); @@ -1484,10 +1484,12 @@ static int __init set_buf_size(char *str) if (!str) return 0; buf_size = memparse(str, &str); - /* nr_entries can not be zero */ - if (buf_size == 0) - return 0; - trace_buf_size = buf_size; + /* + * nr_entries can not be zero and the startup + * tests require some buffer space. Therefore + * ensure we have at least 4096 bytes of buffer. + */ + trace_buf_size = max(4096UL, buf_size); return 1; } __setup("trace_buf_size=", set_buf_size); diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c index 78a678eeb140935354283529066bc75c4fd4863e..a255ffbe342f3a519a75fcbd1ffe69503db10767 100644 --- a/kernel/trace/trace_events_filter.c +++ b/kernel/trace/trace_events_filter.c @@ -5,6 +5,7 @@ * Copyright (C) 2009 Tom Zanussi */ +#include #include #include #include @@ -654,6 +655,52 @@ DEFINE_EQUALITY_PRED(32); DEFINE_EQUALITY_PRED(16); DEFINE_EQUALITY_PRED(8); +/* user space strings temp buffer */ +#define USTRING_BUF_SIZE 1024 + +struct ustring_buffer { + char buffer[USTRING_BUF_SIZE]; +}; + +static __percpu struct ustring_buffer *ustring_per_cpu; + +static __always_inline char *test_string(char *str) +{ + struct ustring_buffer *ubuf; + char *kstr; + + if (!ustring_per_cpu) + return NULL; + + ubuf = this_cpu_ptr(ustring_per_cpu); + kstr = ubuf->buffer; + + /* For safety, do not trust the string pointer */ + if (!strncpy_from_kernel_nofault(kstr, str, USTRING_BUF_SIZE)) + return NULL; + return kstr; +} + +static __always_inline char *test_ustring(char *str) +{ + struct ustring_buffer *ubuf; + char __user *ustr; + char *kstr; + + if (!ustring_per_cpu) + return NULL; + + ubuf = this_cpu_ptr(ustring_per_cpu); + kstr = ubuf->buffer; + + /* user space address? */ + ustr = (char __user *)str; + if (!strncpy_from_user_nofault(kstr, ustr, USTRING_BUF_SIZE)) + return NULL; + + return kstr; +} + /* Filter predicate for fixed sized arrays of characters */ static int filter_pred_string(struct filter_pred *pred, void *event) { @@ -667,19 +714,43 @@ static int filter_pred_string(struct filter_pred *pred, void *event) return match; } -/* Filter predicate for char * pointers */ -static int filter_pred_pchar(struct filter_pred *pred, void *event) +static __always_inline int filter_pchar(struct filter_pred *pred, char *str) { - char **addr = (char **)(event + pred->offset); int cmp, match; - int len = strlen(*addr) + 1; /* including tailing '\0' */ + int len; - cmp = pred->regex.match(*addr, &pred->regex, len); + len = strlen(str) + 1; /* including tailing '\0' */ + cmp = pred->regex.match(str, &pred->regex, len); match = cmp ^ pred->not; return match; } +/* Filter predicate for char * pointers */ +static int filter_pred_pchar(struct filter_pred *pred, void *event) +{ + char **addr = (char **)(event + pred->offset); + char *str; + + str = test_string(*addr); + if (!str) + return 0; + + return filter_pchar(pred, str); +} + +/* Filter predicate for char * pointers in user space*/ +static int filter_pred_pchar_user(struct filter_pred *pred, void *event) +{ + char **addr = (char **)(event + pred->offset); + char *str; + + str = test_ustring(*addr); + if (!str) + return 0; + + return filter_pchar(pred, str); +} /* * Filter predicate for dynamic sized arrays of characters. @@ -1158,6 +1229,7 @@ static int parse_pred(const char *str, void *data, struct filter_pred *pred = NULL; char num_buf[24]; /* Big enough to hold an address */ char *field_name; + bool ustring = false; char q; u64 val; int len; @@ -1192,6 +1264,12 @@ static int parse_pred(const char *str, void *data, return -EINVAL; } + /* See if the field is a user space string */ + if ((len = str_has_prefix(str + i, ".ustring"))) { + ustring = true; + i += len; + } + while (isspace(str[i])) i++; @@ -1320,8 +1398,20 @@ static int parse_pred(const char *str, void *data, } else if (field->filter_type == FILTER_DYN_STRING) pred->fn = filter_pred_strloc; - else - pred->fn = filter_pred_pchar; + else { + + if (!ustring_per_cpu) { + /* Once allocated, keep it around for good */ + ustring_per_cpu = alloc_percpu(struct ustring_buffer); + if (!ustring_per_cpu) + goto err_mem; + } + + if (ustring) + pred->fn = filter_pred_pchar_user; + else + pred->fn = filter_pred_pchar; + } /* go past the last quote */ i++; @@ -1387,6 +1477,9 @@ static int parse_pred(const char *str, void *data, err_free: kfree(pred); return -EINVAL; +err_mem: + kfree(pred); + return -ENOMEM; } enum { diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c index 41a9bd52e1fdcd52bb31cb9f9941683d893d2f06..eb7200699cf664ef852cfe7257ed45f704d98b17 100644 --- a/kernel/trace/trace_events_hist.c +++ b/kernel/trace/trace_events_hist.c @@ -1985,9 +1985,9 @@ parse_field(struct hist_trigger_data *hist_data, struct trace_event_file *file, /* * For backward compatibility, if field_name * was "cpu", then we treat this the same as - * common_cpu. + * common_cpu. This also works for "CPU". */ - if (strcmp(field_name, "cpu") == 0) { + if (field && field->filter_type == FILTER_CPU) { *flags |= HIST_FIELD_FL_CPU; } else { hist_err(tr, HIST_ERR_FIELD_NOT_FOUND, @@ -4365,7 +4365,7 @@ static int create_tracing_map_fields(struct hist_trigger_data *hist_data) if (hist_field->flags & HIST_FIELD_FL_STACKTRACE) cmp_fn = tracing_map_cmp_none; - else if (!field) + else if (!field || hist_field->flags & HIST_FIELD_FL_CPU) cmp_fn = tracing_map_cmp_num(hist_field->size, hist_field->is_signed); else if (is_string_field(field)) diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c index f725802160c0bc05fab47c29f72234219b6612cc..d0309de2f84fea5bfce5cbe4a8321be92c547057 100644 --- a/kernel/trace/trace_events_trigger.c +++ b/kernel/trace/trace_events_trigger.c @@ -940,6 +940,16 @@ static void traceon_trigger(struct event_trigger_data *data, void *rec, struct ring_buffer_event *event) { + struct trace_event_file *file = data->private_data; + + if (file) { + if (tracer_tracing_is_on(file->tr)) + return; + + tracer_tracing_on(file->tr); + return; + } + if (tracing_is_on()) return; @@ -950,8 +960,15 @@ static void traceon_count_trigger(struct event_trigger_data *data, void *rec, struct ring_buffer_event *event) { - if (tracing_is_on()) - return; + struct trace_event_file *file = data->private_data; + + if (file) { + if (tracer_tracing_is_on(file->tr)) + return; + } else { + if (tracing_is_on()) + return; + } if (!data->count) return; @@ -959,13 +976,26 @@ traceon_count_trigger(struct event_trigger_data *data, void *rec, if (data->count != -1) (data->count)--; - tracing_on(); + if (file) + tracer_tracing_on(file->tr); + else + tracing_on(); } static void traceoff_trigger(struct event_trigger_data *data, void *rec, struct ring_buffer_event *event) { + struct trace_event_file *file = data->private_data; + + if (file) { + if (!tracer_tracing_is_on(file->tr)) + return; + + tracer_tracing_off(file->tr); + return; + } + if (!tracing_is_on()) return; @@ -976,8 +1006,15 @@ static void traceoff_count_trigger(struct event_trigger_data *data, void *rec, struct ring_buffer_event *event) { - if (!tracing_is_on()) - return; + struct trace_event_file *file = data->private_data; + + if (file) { + if (!tracer_tracing_is_on(file->tr)) + return; + } else { + if (!tracing_is_on()) + return; + } if (!data->count) return; @@ -985,7 +1022,10 @@ traceoff_count_trigger(struct event_trigger_data *data, void *rec, if (data->count != -1) (data->count)--; - tracing_off(); + if (file) + tracer_tracing_off(file->tr); + else + tracing_off(); } static int diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 4cf9524f5693cd007777eb49809a3571245547a2..a15de1c183775f06a5b4003381463695ac76a805 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -31,7 +31,7 @@ static int __init set_kprobe_boot_events(char *str) strlcpy(kprobe_boot_events_buf, str, COMMAND_LINE_SIZE); disable_tracing_selftest("running kprobe events"); - return 0; + return 1; } __setup("kprobe_event=", set_kprobe_boot_events); diff --git a/kernel/watch_queue.c b/kernel/watch_queue.c index 0ef8f65bd2d71188bbe314b11eda366fe87ff889..249ed32591449d88b59e5eea1cc321514429717c 100644 --- a/kernel/watch_queue.c +++ b/kernel/watch_queue.c @@ -54,6 +54,7 @@ static void watch_queue_pipe_buf_release(struct pipe_inode_info *pipe, bit += page->index; set_bit(bit, wqueue->notes_bitmap); + generic_pipe_buf_release(pipe, buf); } // No try_steal function => no stealing @@ -112,7 +113,7 @@ static bool post_one_notification(struct watch_queue *wqueue, buf->offset = offset; buf->len = len; buf->flags = PIPE_BUF_FLAG_WHOLE; - pipe->head = head + 1; + smp_store_release(&pipe->head, head + 1); /* vs pipe_read() */ if (!test_and_clear_bit(note, wqueue->notes_bitmap)) { spin_unlock_irq(&pipe->rd_wait.lock); @@ -243,7 +244,8 @@ long watch_queue_set_size(struct pipe_inode_info *pipe, unsigned int nr_notes) goto error; } - ret = pipe_resize_ring(pipe, nr_notes); + nr_notes = nr_pages * WATCH_QUEUE_NOTES_PER_PAGE; + ret = pipe_resize_ring(pipe, roundup_pow_of_two(nr_notes)); if (ret < 0) goto error; @@ -268,11 +270,11 @@ long watch_queue_set_size(struct pipe_inode_info *pipe, unsigned int nr_notes) wqueue->notes = pages; wqueue->notes_bitmap = bitmap; wqueue->nr_pages = nr_pages; - wqueue->nr_notes = nr_pages * WATCH_QUEUE_NOTES_PER_PAGE; + wqueue->nr_notes = nr_notes; return 0; error_p: - for (i = 0; i < nr_pages; i++) + while (--i >= 0) __free_page(pages[i]); kfree(pages); error: @@ -320,7 +322,7 @@ long watch_queue_set_filter(struct pipe_inode_info *pipe, tf[i].info_mask & WATCH_INFO_LENGTH) goto err_filter; /* Ignore any unknown types */ - if (tf[i].type >= sizeof(wfilter->type_filter) * 8) + if (tf[i].type >= WATCH_TYPE__NR) continue; nr_filter++; } @@ -336,7 +338,7 @@ long watch_queue_set_filter(struct pipe_inode_info *pipe, q = wfilter->filters; for (i = 0; i < filter.nr_filters; i++) { - if (tf[i].type >= sizeof(wfilter->type_filter) * BITS_PER_LONG) + if (tf[i].type >= WATCH_TYPE__NR) continue; q->type = tf[i].type; @@ -371,6 +373,8 @@ static void __put_watch_queue(struct kref *kref) for (i = 0; i < wqueue->nr_pages; i++) __free_page(wqueue->notes[i]); + kfree(wqueue->notes); + bitmap_free(wqueue->notes_bitmap); wfilter = rcu_access_pointer(wqueue->filter); if (wfilter) @@ -395,6 +399,7 @@ static void free_watch(struct rcu_head *rcu) put_watch_queue(rcu_access_pointer(watch->queue)); atomic_dec(&watch->cred->user->nr_watches); put_cred(watch->cred); + kfree(watch); } static void __put_watch(struct kref *kref) @@ -566,7 +571,7 @@ void watch_queue_clear(struct watch_queue *wqueue) rcu_read_lock(); spin_lock_bh(&wqueue->lock); - /* Prevent new additions and prevent notifications from happening */ + /* Prevent new notifications from being stored. */ wqueue->defunct = true; while (!hlist_empty(&wqueue->watches)) { diff --git a/lib/Kconfig b/lib/Kconfig index b46a9fd122c81acabd888e8b250e78be4f575ef6..9216e24e516469944e0938b62461d6174a9fdfab 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -45,7 +45,6 @@ config BITREVERSE config HAVE_ARCH_BITREVERSE bool default n - depends on BITREVERSE help This option enables the use of hardware bit-reversal instructions on architectures which support such operations. diff --git a/lib/crc64.c b/lib/crc64.c index 47cfa054827f3df027873dae9e7eb37c13ae167c..9f852a89ee2a1e4858b40d04373395e385a1d66c 100644 --- a/lib/crc64.c +++ b/lib/crc64.c @@ -37,7 +37,7 @@ MODULE_LICENSE("GPL v2"); /** * crc64_be - Calculate bitwise big-endian ECMA-182 CRC64 * @crc: seed value for computation. 0 or (u64)~0 for a new CRC calculation, - or the previous crc64 value if computing incrementally. + * or the previous crc64 value if computing incrementally. * @p: pointer to buffer over which CRC64 is run * @len: length of buffer @p */ diff --git a/lib/crypto/Kconfig b/lib/crypto/Kconfig index 36963e8f4eaa8f1e2ce89519a259e5e5acad32fc..14c032de276e6bf71b745ea25bb820f5c28a5580 100644 --- a/lib/crypto/Kconfig +++ b/lib/crypto/Kconfig @@ -128,9 +128,3 @@ config CRYPTO_LIB_CHACHA20POLY1305 config CRYPTO_LIB_SHA256 tristate - -config CRYPTO_LIB_SM3 - tristate - -config CRYPTO_LIB_SM4 - tristate diff --git a/lib/crypto/Makefile b/lib/crypto/Makefile index 8149bc00b627079905323b6a35ab9cb0eb9ceb65..3a435629d9ce9c4806f75068d1891dad411556e6 100644 --- a/lib/crypto/Makefile +++ b/lib/crypto/Makefile @@ -38,12 +38,6 @@ libpoly1305-y += poly1305.o obj-$(CONFIG_CRYPTO_LIB_SHA256) += libsha256.o libsha256-y := sha256.o -obj-$(CONFIG_CRYPTO_LIB_SM3) += libsm3.o -libsm3-y := sm3.o - -obj-$(CONFIG_CRYPTO_LIB_SM4) += libsm4.o -libsm4-y := sm4.o - ifneq ($(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS),y) libblake2s-y += blake2s-selftest.o libchacha20poly1305-y += chacha20poly1305-selftest.o diff --git a/lib/kunit/try-catch.c b/lib/kunit/try-catch.c index 0dd434e40487cf9ef032932369280fc3300520ad..d7b3fe4d5f240b8d3d0fcf21663afc62848362d1 100644 --- a/lib/kunit/try-catch.c +++ b/lib/kunit/try-catch.c @@ -52,7 +52,7 @@ static unsigned long kunit_test_timeout(void) * If tests timeout due to exceeding sysctl_hung_task_timeout_secs, * the task will be killed and an oops generated. */ - return 300 * MSEC_PER_SEC; /* 5 min */ + return 300 * msecs_to_jiffies(MSEC_PER_SEC); /* 5 min */ } void kunit_try_catch_run(struct kunit_try_catch *try_catch, void *context) @@ -78,6 +78,7 @@ void kunit_try_catch_run(struct kunit_try_catch *try_catch, void *context) if (time_remaining == 0) { kunit_err(test, "try timed out\n"); try_catch->try_result = -ETIMEDOUT; + kthread_stop(task_struct); } exit_code = try_catch->try_result; diff --git a/lib/raid6/test/Makefile b/lib/raid6/test/Makefile index a4c7cd74cff589dcedc2efc57fa6238ea5e00006..4fb7700a741bdf289eda14ff0d2a6d27c9b8ea10 100644 --- a/lib/raid6/test/Makefile +++ b/lib/raid6/test/Makefile @@ -4,6 +4,8 @@ # from userspace. # +pound := \# + CC = gcc OPTFLAGS = -O2 # Adjust as desired CFLAGS = -I.. -I ../../../include -g $(OPTFLAGS) @@ -42,7 +44,7 @@ else ifeq ($(HAS_NEON),yes) OBJS += neon.o neon1.o neon2.o neon4.o neon8.o recov_neon.o recov_neon_inner.o CFLAGS += -DCONFIG_KERNEL_MODE_NEON=1 else - HAS_ALTIVEC := $(shell printf '\#include \nvector int a;\n' |\ + HAS_ALTIVEC := $(shell printf '$(pound)include \nvector int a;\n' |\ gcc -c -x c - >/dev/null && rm ./-.o && echo yes) ifeq ($(HAS_ALTIVEC),yes) CFLAGS += -I../../../arch/powerpc/include diff --git a/lib/raid6/test/test.c b/lib/raid6/test/test.c index a3cf071941ab42e7b9438f8c50c46a876a914008..841a55242abaaede1c37746be8754567b5d3c270 100644 --- a/lib/raid6/test/test.c +++ b/lib/raid6/test/test.c @@ -19,7 +19,6 @@ #define NDISKS 16 /* Including P and Q */ const char raid6_empty_zero_page[PAGE_SIZE] __attribute__((aligned(PAGE_SIZE))); -struct raid6_calls raid6_call; char *dataptrs[NDISKS]; char data[NDISKS][PAGE_SIZE] __attribute__((aligned(PAGE_SIZE))); diff --git a/lib/test_kmod.c b/lib/test_kmod.c index eab52770070d63d4266f4054ac2583e896ba9804..c637f6b5053a902650a5babcafd07f685af5ae9e 100644 --- a/lib/test_kmod.c +++ b/lib/test_kmod.c @@ -1155,6 +1155,7 @@ static struct kmod_test_device *register_test_dev_kmod(void) if (ret) { pr_err("could not register misc device: %d\n", ret); free_test_dev_kmod(test_dev); + test_dev = NULL; goto out; } diff --git a/lib/test_lockup.c b/lib/test_lockup.c index f1a020bcc763ed593b583229b39e42f7028e439a..78a630bbd03dfac16a45141953cba13c6a1a7e39 100644 --- a/lib/test_lockup.c +++ b/lib/test_lockup.c @@ -417,9 +417,14 @@ static bool test_kernel_ptr(unsigned long addr, int size) return false; /* should be at least readable kernel address */ - if (access_ok(ptr, 1) || - access_ok(ptr + size - 1, 1) || - get_kernel_nofault(buf, ptr) || + if (!IS_ENABLED(CONFIG_ALTERNATE_USER_ADDRESS_SPACE) && + (access_ok((void __user *)ptr, 1) || + access_ok((void __user *)ptr + size - 1, 1))) { + pr_err("user space ptr invalid in kernel: %#lx\n", addr); + return true; + } + + if (get_kernel_nofault(buf, ptr) || get_kernel_nofault(buf, ptr + size - 1)) { pr_err("invalid kernel ptr: %#lx\n", addr); return true; diff --git a/lib/test_xarray.c b/lib/test_xarray.c index 8b1c318189ce801a0935133b4882556fede5bfd2..e77d4856442c3f750434e37819e13688d210048e 100644 --- a/lib/test_xarray.c +++ b/lib/test_xarray.c @@ -1463,6 +1463,25 @@ static noinline void check_create_range_4(struct xarray *xa, XA_BUG_ON(xa, !xa_empty(xa)); } +static noinline void check_create_range_5(struct xarray *xa, + unsigned long index, unsigned int order) +{ + XA_STATE_ORDER(xas, xa, index, order); + unsigned int i; + + xa_store_order(xa, index, order, xa_mk_index(index), GFP_KERNEL); + + for (i = 0; i < order + 10; i++) { + do { + xas_lock(&xas); + xas_create_range(&xas); + xas_unlock(&xas); + } while (xas_nomem(&xas, GFP_KERNEL)); + } + + xa_destroy(xa); +} + static noinline void check_create_range(struct xarray *xa) { unsigned int order; @@ -1490,6 +1509,9 @@ static noinline void check_create_range(struct xarray *xa) check_create_range_4(xa, (3U << order) + 1, order); check_create_range_4(xa, (3U << order) - 1, order); check_create_range_4(xa, (1U << 24) + 1, order); + + check_create_range_5(xa, 0, order); + check_create_range_5(xa, (1U << order), order); } check_create_range_3(); diff --git a/lib/xarray.c b/lib/xarray.c index ed775dee1074c99e798ab55a56eaa73754783b76..75da19a7a93348ae94e1608ef75261b27024a029 100644 --- a/lib/xarray.c +++ b/lib/xarray.c @@ -722,6 +722,8 @@ void xas_create_range(struct xa_state *xas) for (;;) { struct xa_node *node = xas->xa_node; + if (node->shift >= shift) + break; xas->xa_node = xa_parent_locked(xas->xa, node); xas->xa_offset = node->offset - 1; if (node->offset != 0) @@ -1078,6 +1080,7 @@ void xas_split(struct xa_state *xas, void *entry, unsigned int order) xa_mk_node(child)); if (xa_is_value(curr)) values--; + xas_update(xas, child); } else { unsigned int canon = offset - xas->xa_sibs; @@ -1092,6 +1095,7 @@ void xas_split(struct xa_state *xas, void *entry, unsigned int order) } while (offset-- > xas->xa_offset); node->nr_values += values; + xas_update(xas, node); } EXPORT_SYMBOL_GPL(xas_split); #endif diff --git a/mm/kfence/core.c b/mm/kfence/core.c index fcc79594020c8a731d956348f4ceff25d19b2306..f67418a30282ce2cd82ccdcc2b594f0105268485 100644 --- a/mm/kfence/core.c +++ b/mm/kfence/core.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include @@ -21,6 +22,7 @@ #include #include #include +#include #include #include #include @@ -38,14 +40,18 @@ #define KFENCE_WARN_ON(cond) \ ({ \ const bool __cond = WARN_ON(cond); \ - if (unlikely(__cond)) \ + if (unlikely(__cond)) { \ WRITE_ONCE(kfence_enabled, false); \ + disabled_by_warn = true; \ + } \ __cond; \ }) /* === Data ================================================================= */ static bool kfence_enabled __read_mostly; +static bool disabled_by_warn __read_mostly; +static bool re_enabling __read_mostly; unsigned long kfence_sample_interval __read_mostly = CONFIG_KFENCE_SAMPLE_INTERVAL; EXPORT_SYMBOL_GPL(kfence_sample_interval); /* Export for test modules. */ @@ -55,20 +61,33 @@ EXPORT_SYMBOL_GPL(kfence_sample_interval); /* Export for test modules. */ #endif #define MODULE_PARAM_PREFIX "kfence." +static int kfence_enable_late(void); static int param_set_sample_interval(const char *val, const struct kernel_param *kp) { - unsigned long num; - int ret = kstrtoul(val, 0, &num); + long num; + int ret = kstrtol(val, 0, &num); if (ret < 0) return ret; - if (!num) /* Using 0 to indicate KFENCE is disabled. */ + if (num < -1) + return -ERANGE; + /* + * For architecture that don't require early allocation, always support + * re-enabling. So only need to set num to 0 if num < 0. + */ + num = max_t(long, 0, num); + + /* Using 0 to indicate KFENCE is disabled. */ + if (!num && READ_ONCE(kfence_enabled)) { + pr_info("disabled\n"); WRITE_ONCE(kfence_enabled, false); - else if (!READ_ONCE(kfence_enabled) && system_state != SYSTEM_BOOTING) - return -EINVAL; /* Cannot (re-)enable KFENCE on-the-fly. */ + } - *((unsigned long *)kp->arg) = num; + *((unsigned long *)kp->arg) = (unsigned long)num; + + if (num && !READ_ONCE(kfence_enabled) && system_state != SYSTEM_BOOTING) + return disabled_by_warn ? -EINVAL : kfence_enable_late(); return 0; } @@ -89,11 +108,22 @@ module_param_cb(sample_interval, &sample_interval_param_ops, &kfence_sample_inte #ifdef CONFIG_ARM64 static int __init parse_sample_interval(char *str) { - unsigned long num; + long num; - if (kstrtoul(str, 0, &num) < 0) + if (kstrtol(str, 0, &num) < 0) return 0; - kfence_sample_interval = num; + + if (num < -1) + return 0; + + /* Using -1 to indicate re-enabling is supported */ + if (num == -1) { + re_enabling = true; + pr_err("re-enabling is supported\n"); + } + num = max_t(long, 0, num); + + kfence_sample_interval = (unsigned long)num; return 0; } early_param("kfence.sample_interval", parse_sample_interval); @@ -103,8 +133,12 @@ early_param("kfence.sample_interval", parse_sample_interval); static unsigned long kfence_skip_covered_thresh __read_mostly = 75; module_param_named(skip_covered_thresh, kfence_skip_covered_thresh, ulong, 0644); +/* If true, check all canary bytes on panic. */ +static bool kfence_check_on_panic __read_mostly; +module_param_named(check_on_panic, kfence_check_on_panic, bool, 0444); + /* The pool of pages used for guard pages and objects. */ -char *__kfence_pool __ro_after_init; +char *__kfence_pool __read_mostly; EXPORT_SYMBOL(__kfence_pool); /* Export for test modules. */ #ifdef CONFIG_KFENCE_DYNAMIC_OBJECTS @@ -628,17 +662,66 @@ static void rcu_guarded_free(struct rcu_head *h) kfence_guarded_free((void *)meta->addr, meta, false); } -static bool __init kfence_init_pool(void) +#ifdef CONFIG_KFENCE_DYNAMIC_OBJECTS +static int __ref kfence_dynamic_init(void) +{ + metadata_size = sizeof(struct kfence_metadata) * KFENCE_NR_OBJECTS; + if (system_state < SYSTEM_RUNNING) + kfence_metadata = memblock_alloc(metadata_size, PAGE_SIZE); + else + kfence_metadata = kzalloc(metadata_size, GFP_KERNEL); + if (!kfence_metadata) + return -ENOMEM; + + covered_size = sizeof(atomic_t) * ALLOC_COVERED_SIZE; + if (system_state < SYSTEM_RUNNING) + alloc_covered = memblock_alloc(covered_size, PAGE_SIZE); + else + alloc_covered = kzalloc(covered_size, GFP_KERNEL); + if (!alloc_covered) { + if (system_state < SYSTEM_RUNNING) + memblock_free(__pa(kfence_metadata), metadata_size); + else + kfree(kfence_metadata); + kfence_metadata = NULL; + + return -ENOMEM; + } + + return 0; +} + +static void __ref kfence_dynamic_destroy(void) +{ + if (system_state < SYSTEM_RUNNING) { + memblock_free(__pa(alloc_covered), covered_size); + memblock_free(__pa(kfence_metadata), metadata_size); + } else { + kfree(alloc_covered); + kfree(kfence_metadata); + } + alloc_covered = NULL; + kfence_metadata = NULL; +} + +#else +static int __init kfence_dynamic_init(void) { return 0; } +static void __init kfence_dynamic_destroy(void) { } +#endif + +/* + * Initialization of the KFENCE pool after its allocation. + * Returns 0 on success; otherwise returns the address up to + * which partial initialization succeeded. + */ +static unsigned long kfence_init_pool(void) { unsigned long addr = (unsigned long)__kfence_pool; struct page *pages; int i; - if (!__kfence_pool) - return false; - if (!arch_kfence_init_pool()) - goto err; + return addr; pages = virt_to_page(addr); @@ -656,9 +739,13 @@ static bool __init kfence_init_pool(void) /* Verify we do not have a compound head page. */ if (WARN_ON(compound_head(&pages[i]) != &pages[i])) - goto err; + return addr; __SetPageSlab(&pages[i]); +#ifdef CONFIG_MEMCG + pages[i].memcg_data = (unsigned long)&kfence_metadata[i / 2 - 1].objcg | + MEMCG_DATA_OBJCGS; +#endif } /* @@ -669,7 +756,7 @@ static bool __init kfence_init_pool(void) */ for (i = 0; i < 2; i++) { if (unlikely(!kfence_protect(addr))) - goto err; + return addr; addr += PAGE_SIZE; } @@ -686,7 +773,7 @@ static bool __init kfence_init_pool(void) /* Protect the right redzone. */ if (unlikely(!kfence_protect(addr + PAGE_SIZE))) - goto err; + return addr; addr += 2 * PAGE_SIZE; } @@ -699,9 +786,22 @@ static bool __init kfence_init_pool(void) */ kmemleak_free(__kfence_pool); - return true; + return 0; +} + +static bool __init kfence_init_pool_early(void) +{ + unsigned long addr; + char *p; + + if (!__kfence_pool) + return false; + + addr = kfence_init_pool(); + + if (!addr) + return true; -err: /* * Only release unprotected pages, and do not try to go back and change * page attributes due to risk of failing to do so as well. If changing @@ -709,8 +809,40 @@ static bool __init kfence_init_pool(void) * fails for the first page, and therefore expect addr==__kfence_pool in * most failure cases. */ + for (p = (char *)addr; p < __kfence_pool + KFENCE_POOL_SIZE; p += PAGE_SIZE) { + struct page *page = virt_to_page(p); + + if (!page) + continue; +#ifdef CONFIG_MEMCG + page->memcg_data = 0; +#endif + __ClearPageSlab(page); + } memblock_free_late(__pa(addr), KFENCE_POOL_SIZE - (addr - (unsigned long)__kfence_pool)); __kfence_pool = NULL; + kfence_dynamic_destroy(); + return false; +} + +static bool kfence_init_pool_late(void) +{ + unsigned long addr, free_size; + + addr = kfence_init_pool(); + + if (!addr) + return true; + + /* Same as above. */ + free_size = KFENCE_POOL_SIZE - (addr - (unsigned long)__kfence_pool); +#ifdef CONFIG_CONTIG_ALLOC + free_contig_range(page_to_pfn(virt_to_page(addr)), free_size / PAGE_SIZE); +#else + free_pages_exact((void *)addr, free_size); +#endif + __kfence_pool = NULL; + kfence_dynamic_destroy(); return false; } @@ -754,9 +886,14 @@ static void *next_object(struct seq_file *seq, void *v, loff_t *pos) static int show_object(struct seq_file *seq, void *v) { - struct kfence_metadata *meta = &kfence_metadata[(long)v - 1]; + struct kfence_metadata *meta; unsigned long flags; + if (!kfence_metadata_valid()) + return 0; + + meta = &kfence_metadata[(long)v - 1]; + raw_spin_lock_irqsave(&meta->lock, flags); kfence_print_object(seq, meta); raw_spin_unlock_irqrestore(&meta->lock, flags); @@ -791,14 +928,38 @@ static int __init kfence_debugfs_init(void) debugfs_create_file("stats", 0444, kfence_dir, NULL, &stats_fops); /* Variable kfence_metadata may fail to allocate. */ - if (kfence_metadata_valid()) - debugfs_create_file("objects", 0400, kfence_dir, NULL, &objects_fops); + debugfs_create_file("objects", 0400, kfence_dir, NULL, &objects_fops); return 0; } late_initcall(kfence_debugfs_init); +/* === Panic Notifier ====================================================== */ + +static void kfence_check_all_canary(void) +{ + int i; + + for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) { + struct kfence_metadata *meta = &kfence_metadata[i]; + + if (meta->state == KFENCE_OBJECT_ALLOCATED) + for_each_canary(meta, check_canary_byte); + } +} + +static int kfence_check_canary_callback(struct notifier_block *nb, + unsigned long reason, void *arg) +{ + kfence_check_all_canary(); + return NOTIFY_OK; +} + +static struct notifier_block kfence_check_canary_notifier = { + .notifier_call = kfence_check_canary_callback, +}; + /* === Allocation Gate Timer ================================================ */ #ifdef CONFIG_KFENCE_STATIC_KEYS @@ -853,44 +1014,10 @@ static void toggle_allocation_gate(struct work_struct *work) } static DECLARE_DELAYED_WORK(kfence_timer, toggle_allocation_gate); -#ifdef CONFIG_KFENCE_DYNAMIC_OBJECTS -static int __init kfence_dynamic_init(void) -{ - metadata_size = sizeof(struct kfence_metadata) * KFENCE_NR_OBJECTS; - kfence_metadata = memblock_alloc(metadata_size, PAGE_SIZE); - if (!kfence_metadata) { - pr_err("failed to allocate metadata\n"); - return -ENOMEM; - } - - covered_size = sizeof(atomic_t) * ALLOC_COVERED_SIZE; - alloc_covered = memblock_alloc(covered_size, PAGE_SIZE); - if (!alloc_covered) { - memblock_free(__pa(kfence_metadata), metadata_size); - kfence_metadata = NULL; - pr_err("failed to allocate covered\n"); - return -ENOMEM; - } - - return 0; -} - -static void __init kfence_dynamic_destroy(void) -{ - memblock_free(__pa(alloc_covered), covered_size); - alloc_covered = NULL; - memblock_free(__pa(kfence_metadata), metadata_size); - kfence_metadata = NULL; -} -#else -static int __init kfence_dynamic_init(void) { return 0; } -static void __init kfence_dynamic_destroy(void) { } -#endif - /* === Public interface ===================================================== */ void __init kfence_early_alloc_pool(void) { - if (!kfence_sample_interval) + if (!kfence_sample_interval && !re_enabling) return; __kfence_pool = memblock_alloc_raw(KFENCE_POOL_SIZE, PAGE_SIZE); @@ -903,7 +1030,7 @@ void __init kfence_early_alloc_pool(void) void __init kfence_alloc_pool(void) { - if (!kfence_sample_interval) + if (!kfence_sample_interval && !__kfence_pool) return; if (kfence_dynamic_init()) { @@ -922,25 +1049,97 @@ void __init kfence_alloc_pool(void) } } +static void kfence_init_enable(void) +{ + if (!IS_ENABLED(CONFIG_KFENCE_STATIC_KEYS)) + static_branch_enable(&kfence_allocation_key); + + if (kfence_check_on_panic) + atomic_notifier_chain_register(&panic_notifier_list, &kfence_check_canary_notifier); + + WRITE_ONCE(kfence_enabled, true); + queue_delayed_work(system_unbound_wq, &kfence_timer, 0); + pr_info("initialized - using %lu bytes for %lu objects at 0x%p-0x%p\n", KFENCE_POOL_SIZE, + (unsigned long)KFENCE_NR_OBJECTS, (void *)__kfence_pool, + (void *)(__kfence_pool + KFENCE_POOL_SIZE)); +} + void __init kfence_init(void) { + stack_hash_seed = (u32)random_get_entropy(); + /* Setting kfence_sample_interval to 0 on boot disables KFENCE. */ - if (!kfence_sample_interval) + if (!kfence_sample_interval && !__kfence_pool) return; - stack_hash_seed = (u32)random_get_entropy(); - if (!kfence_init_pool()) { + if (!kfence_init_pool_early()) { pr_err("%s failed\n", __func__); return; } - if (!IS_ENABLED(CONFIG_KFENCE_STATIC_KEYS)) - static_branch_enable(&kfence_allocation_key); + kfence_init_enable(); + + if (!kfence_sample_interval) + WRITE_ONCE(kfence_enabled, false); +} + +static int kfence_init_late(void) +{ + const unsigned long nr_pages = KFENCE_POOL_SIZE / PAGE_SIZE; + +#ifdef CONFIG_CONTIG_ALLOC + struct page *pages; +#endif + + /* + * For kfence re_enabling on ARM64, kfence_pool should be allocated + * at startup instead of here. So just return -EINVAL here which means + * re_enabling is not supported. + */ + if (IS_ENABLED(CONFIG_ARM64)) + return -EINVAL; + + if (kfence_dynamic_init()) + return -ENOMEM; + +#ifdef CONFIG_CONTIG_ALLOC + pages = alloc_contig_pages(nr_pages, GFP_KERNEL, first_online_node, NULL); + if (!pages) { + kfence_dynamic_destroy(); + return -ENOMEM; + } + + __kfence_pool = page_to_virt(pages); +#else + if (nr_pages > MAX_ORDER_NR_PAGES) { + pr_warn("KFENCE_NUM_OBJECTS too large for buddy allocator\n"); + return -EINVAL; + } + __kfence_pool = alloc_pages_exact(KFENCE_POOL_SIZE, GFP_KERNEL); + if (!__kfence_pool) { + kfence_dynamic_destroy(); + return -ENOMEM; + } +#endif + + if (!kfence_init_pool_late()) { + pr_err("%s failed\n", __func__); + return -EBUSY; + } + + kfence_init_enable(); + return 0; +} + +static int kfence_enable_late(void) +{ + if (!__kfence_pool) + return kfence_init_late(); + WRITE_ONCE(kfence_enabled, true); queue_delayed_work(system_unbound_wq, &kfence_timer, 0); - pr_info("initialized - using %lu bytes for %lu objects at 0x%p-0x%p\n", KFENCE_POOL_SIZE, - (unsigned long)KFENCE_NR_OBJECTS, (void *)__kfence_pool, - (void *)(__kfence_pool + KFENCE_POOL_SIZE)); + pr_info("re-enabled\n"); + return 0; } void kfence_shutdown_cache(struct kmem_cache *s) @@ -1097,6 +1296,9 @@ void __kfence_free(void *addr) { struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr); +#ifdef CONFIG_MEMCG + KFENCE_WARN_ON(meta->objcg); +#endif /* * If the objects of the cache are SLAB_TYPESAFE_BY_RCU, defer freeing * the object, as the object page may be recycled for other-typed diff --git a/mm/kfence/kfence.h b/mm/kfence/kfence.h index e5f8f857791124ce08a5c5c6c028b9027d260d4c..867e7982adb57c3454f26474ba5001a223912622 100644 --- a/mm/kfence/kfence.h +++ b/mm/kfence/kfence.h @@ -89,6 +89,9 @@ struct kfence_metadata { struct kfence_track free_track; /* For updating alloc_covered on frees. */ u32 alloc_stack_hash; +#ifdef CONFIG_MEMCG + struct obj_cgroup *objcg; +#endif }; #ifdef CONFIG_KFENCE_DYNAMIC_OBJECTS diff --git a/mm/kfence/kfence_test.c b/mm/kfence/kfence_test.c index c9952fc8d596af193b6aab64a28602beea42d715..0acbc736541297e5befceec4b1544f2855caee14 100644 --- a/mm/kfence/kfence_test.c +++ b/mm/kfence/kfence_test.c @@ -621,10 +621,11 @@ static void test_gfpzero(struct kunit *test) break; test_free(buf2); - if (i == KFENCE_NR_OBJECTS) { + if (kthread_should_stop() || (i == KFENCE_NR_OBJECTS)) { kunit_warn(test, "giving up ... cannot get same object back\n"); return; } + cond_resched(); } for (i = 0; i < size; i++) diff --git a/mm/kmemleak.c b/mm/kmemleak.c index a910b181d493c377b8ca9e152be88b9d8691041b..90eb82299149d6a9e2dc1872eca9c177caae9e94 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c @@ -788,6 +788,8 @@ static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp) unsigned long flags; struct kmemleak_object *object; struct kmemleak_scan_area *area = NULL; + unsigned long untagged_ptr; + unsigned long untagged_objp; object = find_and_get_object(ptr, 1); if (!object) { @@ -796,6 +798,9 @@ static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp) return; } + untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr); + untagged_objp = (unsigned long)kasan_reset_tag((void *)object->pointer); + if (scan_area_cache) area = kmem_cache_alloc(scan_area_cache, gfp_kmemleak_mask(gfp)); @@ -807,8 +812,8 @@ static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp) goto out_unlock; } if (size == SIZE_MAX) { - size = object->pointer + object->size - ptr; - } else if (ptr + size > object->pointer + object->size) { + size = untagged_objp + object->size - untagged_ptr; + } else if (untagged_ptr + size > untagged_objp + object->size) { kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr); dump_object_info(object); kmem_cache_free(scan_area_cache, area); diff --git a/mm/madvise.c b/mm/madvise.c index 24abc79f8914e86836381dce3e0d406ad034f127..16b1c2885b63333fe674d9a8a3a44e2753f6d0ed 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -877,7 +877,6 @@ static long madvise_remove(struct vm_area_struct *vma, static int madvise_inject_error(int behavior, unsigned long start, unsigned long end) { - struct zone *zone; unsigned long size; if (!capable(CAP_SYS_ADMIN)) @@ -915,10 +914,6 @@ static int madvise_inject_error(int behavior, return ret; } - /* Ensure that all poisoned pages are removed from per-cpu lists */ - for_each_populated_zone(zone) - drain_all_pages(zone); - return 0; } #endif @@ -1229,8 +1224,7 @@ SYSCALL_DEFINE5(process_madvise, int, pidfd, const struct iovec __user *, vec, iov_iter_advance(&iter, iovec.iov_len); } - if (ret == 0) - ret = total_len - iov_iter_count(&iter); + ret = (total_len - iov_iter_count(&iter)) ? : ret; release_mm: mmput(mm); diff --git a/mm/memblock.c b/mm/memblock.c index 873625fdc504791e0808704add6f0fff5cfa53b2..b93fa16292d04c4a89b1e6b2fd5f4ab78315cee4 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -366,14 +366,20 @@ void __init memblock_discard(void) addr = __pa(memblock.reserved.regions); size = PAGE_ALIGN(sizeof(struct memblock_region) * memblock.reserved.max); - __memblock_free_late(addr, size); + if (memblock_reserved_in_slab) + kfree(memblock.reserved.regions); + else + __memblock_free_late(addr, size); } if (memblock.memory.regions != memblock_memory_init_regions) { addr = __pa(memblock.memory.regions); size = PAGE_ALIGN(sizeof(struct memblock_region) * memblock.memory.max); - __memblock_free_late(addr, size); + if (memblock_memory_in_slab) + kfree(memblock.memory.regions); + else + __memblock_free_late(addr, size); } memblock_memory = NULL; diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 7061f9283a34bc0e2a696b1a54a1074e972a6bd9..a850d1f3fc5bda98d5f9d6436f3bee992bf4966f 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -4556,6 +4556,53 @@ static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg, spin_unlock(&memcg_oom_lock); } +static const char *const memcg_flag_name[] = { + "NO_REF", + "ONLINE", + "RELEASED", + "VISIBLE", + "DYING" +}; + +static void memcg_flag_stat_get(int mem_flags, int *stat) +{ + int i; + int flags = mem_flags; + + for (i = 0; i < ARRAY_SIZE(memcg_flag_name); i++) { + if (flags & 1) + stat[i] += 1; + flags >>= 1; + } +} + +static int memcg_flag_stat_show(struct seq_file *sf, void *v) +{ + int self_flag[ARRAY_SIZE(memcg_flag_name)]; + int child_flag[ARRAY_SIZE(memcg_flag_name)]; + int iter; + struct cgroup_subsys_state *child; + struct cgroup_subsys_state *css = seq_css(sf); + + memset(self_flag, 0, sizeof(self_flag)); + memset(child_flag, 0, sizeof(child_flag)); + + memcg_flag_stat_get(css->flags, self_flag); + + rcu_read_lock(); + css_for_each_child(child, css) + memcg_flag_stat_get(child->flags, child_flag); + rcu_read_unlock(); + + for (iter = 0; iter < ARRAY_SIZE(memcg_flag_name); iter++) + seq_printf(sf, "%s %d\n", memcg_flag_name[iter], self_flag[iter]); + + for (iter = 0; iter < ARRAY_SIZE(memcg_flag_name); iter++) + seq_printf(sf, "CHILD_%s %d\n", memcg_flag_name[iter], child_flag[iter]); + + return 0; +} + static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v) { struct mem_cgroup *memcg = mem_cgroup_from_seq(sf); @@ -5152,6 +5199,49 @@ static int memcg_events_local_show(struct seq_file *m, void *v) return 0; } +static ssize_t memory_reclaim(struct kernfs_open_file *of, char *buf, + size_t nbytes, loff_t off) +{ + struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); + unsigned int nr_retries = MAX_RECLAIM_RETRIES; + unsigned long nr_to_reclaim, nr_reclaimed = 0; + int err; + + buf = strstrip(buf); + err = page_counter_memparse(buf, "", &nr_to_reclaim); + if (err) + return err; + + if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && + mem_cgroup_is_root(memcg)) + return -EINVAL; + + while (nr_reclaimed < nr_to_reclaim) { + unsigned long reclaimed; + + if (signal_pending(current)) + return -EINTR; + + /* This is the final attempt, drain percpu lru caches in the + * hope of introducing more evictable pages for + * try_to_free_mem_cgroup_pages(). + */ + if (!nr_retries) + lru_add_drain_all(); + + reclaimed = try_to_free_mem_cgroup_pages(memcg, + nr_to_reclaim - nr_reclaimed, + GFP_KERNEL, true); + + if (!reclaimed && !nr_retries--) + return -EAGAIN; + + nr_reclaimed += reclaimed; + } + + return nbytes; +} + static struct cftype mem_cgroup_legacy_files[] = { { .name = "usage_in_bytes", @@ -5216,6 +5306,10 @@ static struct cftype mem_cgroup_legacy_files[] = { .write_u64 = mem_cgroup_oom_control_write, .private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL), }, + { + .name = "flag_stat", + .seq_show = memcg_flag_stat_show, + }, { .name = "pressure_level", }, @@ -5345,6 +5439,10 @@ static struct cftype mem_cgroup_legacy_files[] = { .file_offset = offsetof(struct mem_cgroup, events_local_file), .seq_show = memcg_events_local_show, }, + { + .name = "reclaim", + .write = memory_reclaim, + }, { }, /* terminate */ }; @@ -6769,6 +6867,11 @@ static struct cftype memory_files[] = { .seq_show = memory_oom_group_show, .write = memory_oom_group_write, }, + { + .name = "reclaim", + .flags = CFTYPE_NS_DELEGATABLE, + .write = memory_reclaim, + }, { } /* terminate */ }; @@ -7357,7 +7460,7 @@ static int __init cgroup_memory(char *s) else if (!strcmp(token, "kmem")) cgroup_memory_nokmem = false; } - return 0; + return 1; } __setup("cgroup.memory=", cgroup_memory); diff --git a/mm/memfd.c b/mm/memfd.c index 2647c898990c80491b512944a890d47c90f23aca..fae4142f7d25451e048d43a54bb8debcda4d64c4 100644 --- a/mm/memfd.c +++ b/mm/memfd.c @@ -31,20 +31,28 @@ static void memfd_tag_pins(struct xa_state *xas) { struct page *page; - unsigned int tagged = 0; + int latency = 0; + int cache_count; lru_add_drain(); xas_lock_irq(xas); xas_for_each(xas, page, ULONG_MAX) { - if (xa_is_value(page)) - continue; - page = find_subpage(page, xas->xa_index); - if (page_count(page) - page_mapcount(page) > 1) + cache_count = 1; + if (!xa_is_value(page) && + PageTransHuge(page) && !PageHuge(page)) + cache_count = HPAGE_PMD_NR; + + if (!xa_is_value(page) && + page_count(page) - total_mapcount(page) != cache_count) xas_set_mark(xas, MEMFD_TAG_PINNED); + if (cache_count != 1) + xas_set(xas, page->index + cache_count); - if (++tagged % XA_CHECK_SCHED) + latency += cache_count; + if (latency < XA_CHECK_SCHED) continue; + latency = 0; xas_pause(xas); xas_unlock_irq(xas); @@ -73,7 +81,8 @@ static int memfd_wait_for_pins(struct address_space *mapping) error = 0; for (scan = 0; scan <= LAST_SCAN; scan++) { - unsigned int tagged = 0; + int latency = 0; + int cache_count; if (!xas_marked(&xas, MEMFD_TAG_PINNED)) break; @@ -87,10 +96,14 @@ static int memfd_wait_for_pins(struct address_space *mapping) xas_lock_irq(&xas); xas_for_each_marked(&xas, page, ULONG_MAX, MEMFD_TAG_PINNED) { bool clear = true; - if (xa_is_value(page)) - continue; - page = find_subpage(page, xas.xa_index); - if (page_count(page) - page_mapcount(page) != 1) { + + cache_count = 1; + if (!xa_is_value(page) && + PageTransHuge(page) && !PageHuge(page)) + cache_count = HPAGE_PMD_NR; + + if (!xa_is_value(page) && cache_count != + page_count(page) - total_mapcount(page)) { /* * On the last scan, we clean up all those tags * we inserted; but make a note that we still @@ -103,8 +116,11 @@ static int memfd_wait_for_pins(struct address_space *mapping) } if (clear) xas_clear_mark(&xas, MEMFD_TAG_PINNED); - if (++tagged % XA_CHECK_SCHED) + + latency += cache_count; + if (latency < XA_CHECK_SCHED) continue; + latency = 0; xas_pause(&xas); xas_unlock_irq(&xas); diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 63bacfcca122ce34b37fc06caf426c66631ebaf8..0519f20d2b57d708960c38052548ae68990df54e 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -810,7 +810,7 @@ static int me_swapcache_clean(struct page *p, unsigned long pfn) */ static int me_huge_page(struct page *p, unsigned long pfn) { - int res = 0; + int res; struct page *hpage = compound_head(p); struct address_space *mapping; @@ -821,6 +821,7 @@ static int me_huge_page(struct page *p, unsigned long pfn) if (mapping) { res = truncate_error_page(hpage, pfn, mapping); } else { + res = MF_FAILED; unlock_page(hpage); /* * migration entry prevents later access on error anonymous @@ -829,8 +830,10 @@ static int me_huge_page(struct page *p, unsigned long pfn) */ if (PageAnon(hpage)) put_page(hpage); - dissolve_free_huge_page(p); - res = MF_RECOVERED; + if (!dissolve_free_huge_page(p) && take_page_off_buddy(p)) { + page_ref_inc(p); + res = MF_RECOVERED; + } lock_page(hpage); } @@ -947,13 +950,13 @@ static int page_action(struct page_state *ps, struct page *p, } /** - * get_hwpoison_page() - Get refcount for memory error handling: + * __get_hwpoison_page() - Get refcount for memory error handling: * @page: raw error page (hit by memory error) * * Return: return 0 if failed to grab the refcount, otherwise true (some * non-zero value.) */ -static int get_hwpoison_page(struct page *page) +static int __get_hwpoison_page(struct page *page) { struct page *head = compound_head(page); @@ -983,6 +986,26 @@ static int get_hwpoison_page(struct page *page) return 0; } +static int get_hwpoison_page(struct page *p) +{ + int ret; + bool drained = false; + +retry: + ret = __get_hwpoison_page(p); + if (!ret && !is_free_buddy_page(p) && !page_count(p) && !drained) { + /* + * The page might be in a pcplist, so try to drain those + * and see if we are lucky. + */ + drain_all_pages(page_zone(p)); + drained = true; + goto retry; + } + + return ret; +} + /* * Do all that is necessary to remove user space mappings. Unmap * the pages and send SIGBUS to the processes if the data was dirty. @@ -1181,9 +1204,13 @@ static int memory_failure_hugetlb(unsigned long pfn, int flags) return 0; } unlock_page(head); - dissolve_free_huge_page(p); - action_result(pfn, MF_MSG_FREE_HUGE, MF_DELAYED); - return 0; + res = MF_FAILED; + if (!dissolve_free_huge_page(p) && take_page_off_buddy(p)) { + page_ref_inc(p); + res = MF_RECOVERED; + } + action_result(pfn, MF_MSG_FREE_HUGE, res); + return res == MF_RECOVERED ? 0 : -EBUSY; } lock_page(head); @@ -1338,6 +1365,7 @@ int memory_failure(unsigned long pfn, int flags) int res = 0; unsigned long page_flags; static DEFINE_MUTEX(mf_mutex); + bool retry = true; if (!sysctl_memory_failure_recovery) panic("Memory failure on page %lx", pfn); @@ -1357,6 +1385,7 @@ int memory_failure(unsigned long pfn, int flags) mutex_lock(&mf_mutex); +try_again: if (PageHuge(p)) { res = memory_failure_hugetlb(pfn, flags); goto unlock_mutex; @@ -1385,7 +1414,21 @@ int memory_failure(unsigned long pfn, int flags) */ if (!(flags & MF_COUNT_INCREASED) && !get_hwpoison_page(p)) { if (is_free_buddy_page(p)) { - action_result(pfn, MF_MSG_BUDDY, MF_DELAYED); + if (take_page_off_buddy(p)) { + page_ref_inc(p); + res = MF_RECOVERED; + } else { + /* We lost the race, try again */ + if (retry) { + ClearPageHWPoison(p); + num_poisoned_pages_dec(); + retry = false; + goto try_again; + } + res = MF_FAILED; + } + action_result(pfn, MF_MSG_BUDDY, res); + res = res == MF_RECOVERED ? 0 : -EBUSY; } else { action_result(pfn, MF_MSG_KERNEL_HIGH_ORDER, MF_IGNORED); res = -EBUSY; @@ -1411,14 +1454,6 @@ int memory_failure(unsigned long pfn, int flags) * walked by the page reclaim code, however that's not a big loss. */ shake_page(p, 0); - /* shake_page could have turned it free. */ - if (!PageLRU(p) && is_free_buddy_page(p)) { - if (flags & MF_COUNT_INCREASED) - action_result(pfn, MF_MSG_BUDDY, MF_DELAYED); - else - action_result(pfn, MF_MSG_BUDDY_2ND, MF_DELAYED); - goto unlock_mutex; - } lock_page(p); diff --git a/mm/memory.c b/mm/memory.c index 58e3e276d753ade65bab0de22648478c8df25c74..8379f39dd69755cac8b764ff83d16d6236f5b756 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3726,11 +3726,20 @@ static vm_fault_t __do_fault(struct vm_fault *vmf) return ret; if (unlikely(PageHWPoison(vmf->page))) { - if (ret & VM_FAULT_LOCKED) - unlock_page(vmf->page); - put_page(vmf->page); + struct page *page = vmf->page; + vm_fault_t poisonret = VM_FAULT_HWPOISON; + if (ret & VM_FAULT_LOCKED) { + if (page_mapped(page)) + unmap_mapping_pages(page_mapping(page), + page->index, 1, false); + /* Retry if a clean page was removed from the cache. */ + if (invalidate_inode_page(page)) + poisonret = VM_FAULT_NOPAGE; + unlock_page(page); + } + put_page(page); vmf->page = NULL; - return VM_FAULT_HWPOISON; + return poisonret; } if (unlikely(!(ret & VM_FAULT_LOCKED))) diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 5ce39dbc84e1c7c4c6cf0c9b4298c3cf4101009a..ef7eb6a068277c62dd4284973efe0652934c76da 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -841,7 +841,6 @@ static int vma_replace_policy(struct vm_area_struct *vma, static int mbind_range(struct mm_struct *mm, unsigned long start, unsigned long end, struct mempolicy *new_pol) { - struct vm_area_struct *next; struct vm_area_struct *prev; struct vm_area_struct *vma; int err = 0; @@ -856,8 +855,7 @@ static int mbind_range(struct mm_struct *mm, unsigned long start, if (start > vma->vm_start) prev = vma; - for (; vma && vma->vm_start < end; prev = vma, vma = next) { - next = vma->vm_next; + for (; vma && vma->vm_start < end; prev = vma, vma = vma->vm_next) { vmstart = max(start, vma->vm_start); vmend = min(end, vma->vm_end); @@ -875,10 +873,6 @@ static int mbind_range(struct mm_struct *mm, unsigned long start, new_pol, vma->vm_userfaultfd_ctx); if (prev) { vma = prev; - next = vma->vm_next; - if (mpol_equal(vma_policy(vma), new_pol)) - continue; - /* vma_merge() joined vma && vma->next, case 8 */ goto replace; } if (vma->vm_start != vmstart) { @@ -1308,11 +1302,10 @@ static struct page *new_page(struct page *page, unsigned long start) } #endif -static long do_mbind(unsigned long start, unsigned long len, - unsigned short mode, unsigned short mode_flags, - nodemask_t *nmask, unsigned long flags) +long __do_mbind(unsigned long start, unsigned long len, + unsigned short mode, unsigned short mode_flags, + nodemask_t *nmask, unsigned long flags, struct mm_struct *mm) { - struct mm_struct *mm = current->mm; struct mempolicy *new; unsigned long end; int err; @@ -1411,6 +1404,13 @@ static long do_mbind(unsigned long start, unsigned long len, return err; } +static long do_mbind(unsigned long start, unsigned long len, + unsigned short mode, unsigned short mode_flags, + nodemask_t *nmask, unsigned long flags) +{ + return __do_mbind(start, len, mode, mode_flags, nmask, flags, current->mm); +} + /* * User space interface with variable sized bitmaps for nodelists. */ diff --git a/mm/mmap.c b/mm/mmap.c index 5c9b27aa337d82b06d59880ad226737f91fcee3a..5ad32537604a5bf17943856ef0e72d4190d4d8a8 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -2873,7 +2873,7 @@ static int __init cmdline_parse_stack_guard_gap(char *p) if (!*endptr) stack_guard_gap = val << PAGE_SHIFT; - return 0; + return 1; } __setup("stack_guard_gap=", cmdline_parse_stack_guard_gap); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 97eb298a0f573b8df707f2c160114c5d52a1b11c..c81ff36f412157ab6e22f495e018b0ec4c101ebd 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -7677,10 +7677,17 @@ static void __init find_zone_movable_pfns_for_nodes(void) out2: /* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */ - for (nid = 0; nid < MAX_NUMNODES; nid++) + for (nid = 0; nid < MAX_NUMNODES; nid++) { + unsigned long start_pfn, end_pfn; + zone_movable_pfn[nid] = roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES); + get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); + if (zone_movable_pfn[nid] >= end_pfn) + zone_movable_pfn[nid] = 0; + } + out: /* restore the node_state */ node_states[N_MEMORY] = saved_node_state; diff --git a/mm/share_pool.c b/mm/share_pool.c index 3a37418378f64fbf66924e377a8be3e5cf1b1682..76088952d0a55600556ae38296fbac115a1932c5 100644 --- a/mm/share_pool.c +++ b/mm/share_pool.c @@ -16,7 +16,6 @@ * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ - #define pr_fmt(fmt) "share pool: " fmt #include @@ -2157,6 +2156,7 @@ struct sp_alloc_context { bool need_fallocate; struct timespec64 start; struct timespec64 end; + bool have_mbind; }; static void trace_sp_alloc_begin(struct sp_alloc_context *ac) @@ -2298,6 +2298,7 @@ static int sp_alloc_prepare(unsigned long size, unsigned long sp_flags, ac->sp_flags = sp_flags; ac->state = ALLOC_NORMAL; ac->need_fallocate = false; + ac->have_mbind = false; return 0; } @@ -2391,7 +2392,7 @@ static void sp_alloc_fallback(struct sp_area *spa, struct sp_alloc_context *ac) } static int sp_alloc_populate(struct mm_struct *mm, struct sp_area *spa, - struct sp_group_node *spg_node, struct sp_alloc_context *ac) + struct sp_alloc_context *ac) { int ret = 0; unsigned long sp_addr = spa->va_start; @@ -2423,25 +2424,20 @@ static int sp_alloc_populate(struct mm_struct *mm, struct sp_area *spa, if (ret) sp_add_work_compact(); } - if (ret) { - if (spa->spg != spg_none) - sp_alloc_unmap(list_next_entry(spg_node, proc_node)->master->mm, spa, spg_node); - else - sp_munmap(mm, spa->va_start, spa->real_size); - - if (unlikely(fatal_signal_pending(current))) - pr_warn_ratelimited("allocation failed, current thread is killed\n"); - else - pr_warn_ratelimited("allocation failed due to mm populate failed(potential no enough memory when -12): %d\n", - ret); - sp_fallocate(spa); /* need this, otherwise memleak */ - sp_alloc_fallback(spa, ac); - } else { - ac->need_fallocate = true; - } return ret; } +static long sp_mbind(struct mm_struct *mm, unsigned long start, unsigned long len, + unsigned long node) +{ + nodemask_t nmask; + + nodes_clear(nmask); + node_set(node, nmask); + return __do_mbind(start, len, MPOL_BIND, MPOL_F_STATIC_NODES, + &nmask, MPOL_MF_STRICT, mm); +} + static int __sp_alloc_mmap_populate(struct mm_struct *mm, struct sp_area *spa, struct sp_group_node *spg_node, struct sp_alloc_context *ac) { @@ -2457,7 +2453,34 @@ static int __sp_alloc_mmap_populate(struct mm_struct *mm, struct sp_area *spa, return ret; } - ret = sp_alloc_populate(mm, spa, spg_node, ac); + if (!ac->have_mbind) { + ret = sp_mbind(mm, spa->va_start, spa->real_size, spa->node_id); + if (ret < 0) { + pr_err("cannot bind the memory range to specified node:%d, err:%d\n", + spa->node_id, ret); + goto err; + } + ac->have_mbind = true; + } + + ret = sp_alloc_populate(mm, spa, ac); + if (ret) { +err: + if (spa->spg != spg_none) + sp_alloc_unmap(list_next_entry(spg_node, proc_node)->master->mm, spa, spg_node); + else + sp_munmap(mm, spa->va_start, spa->real_size); + + if (unlikely(fatal_signal_pending(current))) + pr_warn_ratelimited("allocation failed, current thread is killed\n"); + else + pr_warn_ratelimited("allocation failed due to mm populate failed(potential no enough memory when -12): %d\n", + ret); + sp_fallocate(spa); /* need this, otherwise memleak */ + sp_alloc_fallback(spa, ac); + } else + ac->need_fallocate = true; + return ret; } @@ -2479,11 +2502,6 @@ static int sp_alloc_mmap_populate(struct sp_area *spa, if (mmap_ret) { if (ac->state != ALLOC_COREDUMP) return mmap_ret; - if (ac->spg == spg_none) { - sp_alloc_unmap(mm, spa, spg_node); - pr_err("dvpp allocation failed due to coredump"); - return mmap_ret; - } ac->state = ALLOC_NORMAL; continue; } @@ -4042,9 +4060,9 @@ void spg_overview_show(struct seq_file *seq) atomic_read(&sp_overall_stat.spa_total_num)); } - down_read(&sp_group_sem); + down_read(&sp_spg_stat_sem); idr_for_each(&sp_spg_stat_idr, idr_spg_stat_cb, seq); - up_read(&sp_group_sem); + up_read(&sp_spg_stat_sem); if (seq != NULL) seq_puts(seq, "\n"); @@ -4083,7 +4101,6 @@ static int idr_proc_stat_cb(int id, void *p, void *data) long sp_res, sp_res_nsize, non_sp_res, non_sp_shm; /* to prevent ABBA deadlock, first hold sp_group_sem */ - down_read(&sp_group_sem); mutex_lock(&spg_stat->lock); hash_for_each(spg_stat->hash, i, spg_proc_stat, gnode) { proc_stat = spg_proc_stat->proc_stat; @@ -4112,7 +4129,6 @@ static int idr_proc_stat_cb(int id, void *p, void *data) seq_putc(seq, '\n'); } mutex_unlock(&spg_stat->lock); - up_read(&sp_group_sem); return 0; } @@ -4130,10 +4146,16 @@ static int proc_stat_show(struct seq_file *seq, void *offset) byte2kb(atomic64_read(&kthread_stat.alloc_size)), byte2kb(atomic64_read(&kthread_stat.k2u_size))); - /* pay attention to potential ABBA deadlock */ + /* + * This ugly code is just for fixing the ABBA deadlock against + * sp_group_add_task. + */ + down_read(&sp_group_sem); down_read(&sp_spg_stat_sem); idr_for_each(&sp_spg_stat_idr, idr_proc_stat_cb, seq); up_read(&sp_spg_stat_sem); + up_read(&sp_group_sem); + return 0; } diff --git a/mm/slub.c b/mm/slub.c index 7a7b0bf82b8eb71c1762331c50beff70110324b7..98452815a066c966b5c41f2511779cdb91f1fd12 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -3209,7 +3209,9 @@ static inline void free_nonslab_page(struct page *page, void *object) { unsigned int order = compound_order(page); - VM_BUG_ON_PAGE(!PageCompound(page), page); + if (WARN_ON_ONCE(!PageCompound(page))) + pr_warn_once("object pointer: 0x%p\n", object); + kfree_hook(object); mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B, -(PAGE_SIZE << order)); __free_pages(page, order); diff --git a/mm/swap_state.c b/mm/swap_state.c index a9e42d48312be0bdbd0e0fa400f1e752bfd4b142..149f4678106179117d8571621ac70d278e30008a 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -502,7 +502,7 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, * __read_swap_cache_async(), which has set SWAP_HAS_CACHE * in swap_map, but not yet added its page to swap cache. */ - cond_resched(); + schedule_timeout_uninterruptible(1); } /* diff --git a/mm/swapfile.c b/mm/swapfile.c index 5af6b0f770de626c8ab644563c01e8f3081c6aee..eaf483c7c83e7691297d12b818bfe20ffa1104e8 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -3167,6 +3167,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) struct filename *name; struct file *swap_file = NULL; struct address_space *mapping; + struct dentry *dentry; int prio; int error; union swap_header *swap_header; @@ -3210,6 +3211,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) p->swap_file = swap_file; mapping = swap_file->f_mapping; + dentry = swap_file->f_path.dentry; inode = mapping->host; error = claim_swapfile(p, inode); @@ -3217,6 +3219,10 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) goto bad_swap; inode_lock(inode); + if (d_unlinked(dentry) || cant_mount(dentry)) { + error = -ENOENT; + goto bad_swap_unlock_inode; + } if (IS_SWAPFILE(inode)) { error = -EBUSY; goto bad_swap_unlock_inode; diff --git a/mm/usercopy.c b/mm/usercopy.c index ce83e0b137dd3f9292842d995aa5d0e8dd27e581..1f0dcff7bd23cc910b1b2481bfa35803a89ac8e8 100644 --- a/mm/usercopy.c +++ b/mm/usercopy.c @@ -296,7 +296,10 @@ static bool enable_checks __initdata = true; static int __init parse_hardened_usercopy(char *str) { - return strtobool(str, &enable_checks); + if (strtobool(str, &enable_checks)) + pr_warn("Invalid option string for hardened_usercopy: '%s'\n", + str); + return 1; } __setup("hardened_usercopy=", parse_hardened_usercopy); diff --git a/mm/util.c b/mm/util.c index d31820abadb4b6fe37a2b6397bedc1907622dcfa..67b350f4ffdc5f9e145ed27babea34c4b8b77821 100644 --- a/mm/util.c +++ b/mm/util.c @@ -587,8 +587,10 @@ void *kvmalloc_node(size_t size, gfp_t flags, int node) return ret; /* Don't even allow crazy sizes */ - if (WARN_ON_ONCE(size > INT_MAX)) + if (unlikely(size > INT_MAX)) { + WARN_ON_ONCE(!(flags & __GFP_NOWARN)); return NULL; + } return __vmalloc_node(size, 1, flags, node, __builtin_return_address(0)); diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c index 556bf1a8ea3f781d0b56dbf388c187459fc049d3..5fff027f25fad2c1021f2c14e4746d79123119f6 100644 --- a/net/ax25/af_ax25.c +++ b/net/ax25/af_ax25.c @@ -99,6 +99,10 @@ static void ax25_kill_by_device(struct net_device *dev) lock_sock(sk); ax25_disconnect(s, ENETUNREACH); s->ax25_dev = NULL; + if (sk->sk_socket) { + dev_put(ax25_dev->dev); + ax25_dev_put(ax25_dev); + } release_sock(sk); spin_lock_bh(&ax25_list_lock); sock_put(sk); @@ -365,21 +369,25 @@ static int ax25_ctl_ioctl(const unsigned int cmd, void __user *arg) if (copy_from_user(&ax25_ctl, arg, sizeof(ax25_ctl))) return -EFAULT; - if ((ax25_dev = ax25_addr_ax25dev(&ax25_ctl.port_addr)) == NULL) - return -ENODEV; - if (ax25_ctl.digi_count > AX25_MAX_DIGIS) return -EINVAL; if (ax25_ctl.arg > ULONG_MAX / HZ && ax25_ctl.cmd != AX25_KILL) return -EINVAL; + ax25_dev = ax25_addr_ax25dev(&ax25_ctl.port_addr); + if (!ax25_dev) + return -ENODEV; + digi.ndigi = ax25_ctl.digi_count; for (k = 0; k < digi.ndigi; k++) digi.calls[k] = ax25_ctl.digi_addr[k]; - if ((ax25 = ax25_find_cb(&ax25_ctl.source_addr, &ax25_ctl.dest_addr, &digi, ax25_dev->dev)) == NULL) + ax25 = ax25_find_cb(&ax25_ctl.source_addr, &ax25_ctl.dest_addr, &digi, ax25_dev->dev); + if (!ax25) { + ax25_dev_put(ax25_dev); return -ENOTCONN; + } switch (ax25_ctl.cmd) { case AX25_KILL: @@ -446,6 +454,7 @@ static int ax25_ctl_ioctl(const unsigned int cmd, void __user *arg) } out_put: + ax25_dev_put(ax25_dev); ax25_cb_put(ax25); return ret; @@ -971,14 +980,16 @@ static int ax25_release(struct socket *sock) { struct sock *sk = sock->sk; ax25_cb *ax25; + ax25_dev *ax25_dev; if (sk == NULL) return 0; sock_hold(sk); - sock_orphan(sk); lock_sock(sk); + sock_orphan(sk); ax25 = sk_to_ax25(sk); + ax25_dev = ax25->ax25_dev; if (sk->sk_type == SOCK_SEQPACKET) { switch (ax25->state) { @@ -1040,6 +1051,15 @@ static int ax25_release(struct socket *sock) sk->sk_state_change(sk); ax25_destroy_socket(ax25); } + if (ax25_dev) { + del_timer_sync(&ax25->timer); + del_timer_sync(&ax25->t1timer); + del_timer_sync(&ax25->t2timer); + del_timer_sync(&ax25->t3timer); + del_timer_sync(&ax25->idletimer); + dev_put(ax25_dev->dev); + ax25_dev_put(ax25_dev); + } sock->sk = NULL; release_sock(sk); @@ -1116,8 +1136,10 @@ static int ax25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) } } - if (ax25_dev != NULL) + if (ax25_dev) { ax25_fillin_cb(ax25, ax25_dev); + dev_hold(ax25_dev->dev); + } done: ax25_cb_add(ax25); diff --git a/net/ax25/ax25_dev.c b/net/ax25/ax25_dev.c index 4ac2e0847652a9426b5acf2ebc618520fbaf1751..d2e0cc67d91a72470c2f865688d3442c9d12e240 100644 --- a/net/ax25/ax25_dev.c +++ b/net/ax25/ax25_dev.c @@ -37,6 +37,7 @@ ax25_dev *ax25_addr_ax25dev(ax25_address *addr) for (ax25_dev = ax25_dev_list; ax25_dev != NULL; ax25_dev = ax25_dev->next) if (ax25cmp(addr, (ax25_address *)ax25_dev->dev->dev_addr) == 0) { res = ax25_dev; + ax25_dev_hold(ax25_dev); } spin_unlock_bh(&ax25_dev_lock); @@ -56,6 +57,7 @@ void ax25_dev_device_up(struct net_device *dev) return; } + refcount_set(&ax25_dev->refcount, 1); dev->ax25_ptr = ax25_dev; ax25_dev->dev = dev; dev_hold(dev); @@ -84,6 +86,7 @@ void ax25_dev_device_up(struct net_device *dev) ax25_dev->next = ax25_dev_list; ax25_dev_list = ax25_dev; spin_unlock_bh(&ax25_dev_lock); + ax25_dev_hold(ax25_dev); ax25_register_dev_sysctl(ax25_dev); } @@ -113,9 +116,10 @@ void ax25_dev_device_down(struct net_device *dev) if ((s = ax25_dev_list) == ax25_dev) { ax25_dev_list = s->next; spin_unlock_bh(&ax25_dev_lock); + ax25_dev_put(ax25_dev); dev->ax25_ptr = NULL; dev_put(dev); - kfree(ax25_dev); + ax25_dev_put(ax25_dev); return; } @@ -123,9 +127,10 @@ void ax25_dev_device_down(struct net_device *dev) if (s->next == ax25_dev) { s->next = ax25_dev->next; spin_unlock_bh(&ax25_dev_lock); + ax25_dev_put(ax25_dev); dev->ax25_ptr = NULL; dev_put(dev); - kfree(ax25_dev); + ax25_dev_put(ax25_dev); return; } @@ -133,6 +138,7 @@ void ax25_dev_device_down(struct net_device *dev) } spin_unlock_bh(&ax25_dev_lock); dev->ax25_ptr = NULL; + ax25_dev_put(ax25_dev); } int ax25_fwd_ioctl(unsigned int cmd, struct ax25_fwd_struct *fwd) @@ -144,20 +150,32 @@ int ax25_fwd_ioctl(unsigned int cmd, struct ax25_fwd_struct *fwd) switch (cmd) { case SIOCAX25ADDFWD: - if ((fwd_dev = ax25_addr_ax25dev(&fwd->port_to)) == NULL) + fwd_dev = ax25_addr_ax25dev(&fwd->port_to); + if (!fwd_dev) { + ax25_dev_put(ax25_dev); return -EINVAL; - if (ax25_dev->forward != NULL) + } + if (ax25_dev->forward) { + ax25_dev_put(fwd_dev); + ax25_dev_put(ax25_dev); return -EINVAL; + } ax25_dev->forward = fwd_dev->dev; + ax25_dev_put(fwd_dev); + ax25_dev_put(ax25_dev); break; case SIOCAX25DELFWD: - if (ax25_dev->forward == NULL) + if (!ax25_dev->forward) { + ax25_dev_put(ax25_dev); return -EINVAL; + } ax25_dev->forward = NULL; + ax25_dev_put(ax25_dev); break; default: + ax25_dev_put(ax25_dev); return -EINVAL; } diff --git a/net/ax25/ax25_route.c b/net/ax25/ax25_route.c index b40e0bce67ead7d1dd36f435aa51bb9c53fa0e19..dc2168d2a32a9e4bcea055a8df49424530f988c2 100644 --- a/net/ax25/ax25_route.c +++ b/net/ax25/ax25_route.c @@ -75,11 +75,13 @@ static int __must_check ax25_rt_add(struct ax25_routes_struct *route) ax25_dev *ax25_dev; int i; - if ((ax25_dev = ax25_addr_ax25dev(&route->port_addr)) == NULL) - return -EINVAL; if (route->digi_count > AX25_MAX_DIGIS) return -EINVAL; + ax25_dev = ax25_addr_ax25dev(&route->port_addr); + if (!ax25_dev) + return -EINVAL; + write_lock_bh(&ax25_route_lock); ax25_rt = ax25_route_list; @@ -91,6 +93,7 @@ static int __must_check ax25_rt_add(struct ax25_routes_struct *route) if (route->digi_count != 0) { if ((ax25_rt->digipeat = kmalloc(sizeof(ax25_digi), GFP_ATOMIC)) == NULL) { write_unlock_bh(&ax25_route_lock); + ax25_dev_put(ax25_dev); return -ENOMEM; } ax25_rt->digipeat->lastrepeat = -1; @@ -101,6 +104,7 @@ static int __must_check ax25_rt_add(struct ax25_routes_struct *route) } } write_unlock_bh(&ax25_route_lock); + ax25_dev_put(ax25_dev); return 0; } ax25_rt = ax25_rt->next; @@ -108,6 +112,7 @@ static int __must_check ax25_rt_add(struct ax25_routes_struct *route) if ((ax25_rt = kmalloc(sizeof(ax25_route), GFP_ATOMIC)) == NULL) { write_unlock_bh(&ax25_route_lock); + ax25_dev_put(ax25_dev); return -ENOMEM; } @@ -120,6 +125,7 @@ static int __must_check ax25_rt_add(struct ax25_routes_struct *route) if ((ax25_rt->digipeat = kmalloc(sizeof(ax25_digi), GFP_ATOMIC)) == NULL) { write_unlock_bh(&ax25_route_lock); kfree(ax25_rt); + ax25_dev_put(ax25_dev); return -ENOMEM; } ax25_rt->digipeat->lastrepeat = -1; @@ -132,6 +138,7 @@ static int __must_check ax25_rt_add(struct ax25_routes_struct *route) ax25_rt->next = ax25_route_list; ax25_route_list = ax25_rt; write_unlock_bh(&ax25_route_lock); + ax25_dev_put(ax25_dev); return 0; } @@ -173,6 +180,7 @@ static int ax25_rt_del(struct ax25_routes_struct *route) } } write_unlock_bh(&ax25_route_lock); + ax25_dev_put(ax25_dev); return 0; } @@ -215,6 +223,7 @@ static int ax25_rt_opt(struct ax25_route_opt_struct *rt_option) out: write_unlock_bh(&ax25_route_lock); + ax25_dev_put(ax25_dev); return err; } diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c index ee9cead7654502cf5f9b3db1de7870c88babd46d..986f707e7d973a6a545a73951ae1c8163b4574ca 100644 --- a/net/batman-adv/bridge_loop_avoidance.c +++ b/net/batman-adv/bridge_loop_avoidance.c @@ -164,6 +164,9 @@ static void batadv_backbone_gw_release(struct kref *ref) */ static void batadv_backbone_gw_put(struct batadv_bla_backbone_gw *backbone_gw) { + if (!backbone_gw) + return; + kref_put(&backbone_gw->refcount, batadv_backbone_gw_release); } @@ -199,6 +202,9 @@ static void batadv_claim_release(struct kref *ref) */ static void batadv_claim_put(struct batadv_bla_claim *claim) { + if (!claim) + return; + kref_put(&claim->refcount, batadv_claim_release); } diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c index 0e6e53e9b5f35b48cdfff8b70d4f1cd81e044d41..338e4e9c33b8a40414ac90cd872ad56728e682d4 100644 --- a/net/batman-adv/distributed-arp-table.c +++ b/net/batman-adv/distributed-arp-table.c @@ -128,6 +128,9 @@ static void batadv_dat_entry_release(struct kref *ref) */ static void batadv_dat_entry_put(struct batadv_dat_entry *dat_entry) { + if (!dat_entry) + return; + kref_put(&dat_entry->refcount, batadv_dat_entry_release); } diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c index ef3f85b576c4c7cc6b0a7d2c2c3e987b53218291..62f6f13f89ffda04511a3d17dfe709759a7dedc8 100644 --- a/net/batman-adv/gateway_client.c +++ b/net/batman-adv/gateway_client.c @@ -60,7 +60,7 @@ * after rcu grace period * @ref: kref pointer of the gw_node */ -static void batadv_gw_node_release(struct kref *ref) +void batadv_gw_node_release(struct kref *ref) { struct batadv_gw_node *gw_node; @@ -70,16 +70,6 @@ static void batadv_gw_node_release(struct kref *ref) kfree_rcu(gw_node, rcu); } -/** - * batadv_gw_node_put() - decrement the gw_node refcounter and possibly release - * it - * @gw_node: gateway node to free - */ -void batadv_gw_node_put(struct batadv_gw_node *gw_node) -{ - kref_put(&gw_node->refcount, batadv_gw_node_release); -} - /** * batadv_gw_get_selected_gw_node() - Get currently selected gateway * @bat_priv: the bat priv with all the soft interface information diff --git a/net/batman-adv/gateway_client.h b/net/batman-adv/gateway_client.h index 88b5dba843547f714d4c05677958aa2a66d3c496..c5b1de586fde0b2322724cea2eab21d7306ca1a3 100644 --- a/net/batman-adv/gateway_client.h +++ b/net/batman-adv/gateway_client.h @@ -9,6 +9,7 @@ #include "main.h" +#include #include #include #include @@ -28,7 +29,7 @@ void batadv_gw_node_update(struct batadv_priv *bat_priv, void batadv_gw_node_delete(struct batadv_priv *bat_priv, struct batadv_orig_node *orig_node); void batadv_gw_node_free(struct batadv_priv *bat_priv); -void batadv_gw_node_put(struct batadv_gw_node *gw_node); +void batadv_gw_node_release(struct kref *ref); struct batadv_gw_node * batadv_gw_get_selected_gw_node(struct batadv_priv *bat_priv); int batadv_gw_client_seq_print_text(struct seq_file *seq, void *offset); @@ -40,4 +41,17 @@ batadv_gw_dhcp_recipient_get(struct sk_buff *skb, unsigned int *header_len, struct batadv_gw_node *batadv_gw_node_get(struct batadv_priv *bat_priv, struct batadv_orig_node *orig_node); +/** + * batadv_gw_node_put() - decrement the gw_node refcounter and possibly release + * it + * @gw_node: gateway node to free + */ +static inline void batadv_gw_node_put(struct batadv_gw_node *gw_node) +{ + if (!gw_node) + return; + + kref_put(&gw_node->refcount, batadv_gw_node_release); +} + #endif /* _NET_BATMAN_ADV_GATEWAY_CLIENT_H_ */ diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c index 33904595fc56a1e512edd5f217ff602f5a9744fe..fe0898a9b4e82963652da8a32615ea2030e620f9 100644 --- a/net/batman-adv/hard-interface.c +++ b/net/batman-adv/hard-interface.c @@ -151,22 +151,25 @@ static bool batadv_is_on_batman_iface(const struct net_device *net_dev) struct net *net = dev_net(net_dev); struct net_device *parent_dev; struct net *parent_net; + int iflink; bool ret; /* check if this is a batman-adv mesh interface */ if (batadv_softif_is_valid(net_dev)) return true; - /* no more parents..stop recursion */ - if (dev_get_iflink(net_dev) == 0 || - dev_get_iflink(net_dev) == net_dev->ifindex) + iflink = dev_get_iflink(net_dev); + if (iflink == 0) return false; parent_net = batadv_getlink_net(net_dev, net); + /* iflink to itself, most likely physical device */ + if (net == parent_net && iflink == net_dev->ifindex) + return false; + /* recurse over the parent device */ - parent_dev = __dev_get_by_index((struct net *)parent_net, - dev_get_iflink(net_dev)); + parent_dev = __dev_get_by_index((struct net *)parent_net, iflink); /* if we got a NULL parent_dev there is something broken.. */ if (!parent_dev) { pr_err("Cannot find parent device\n"); @@ -216,14 +219,15 @@ static struct net_device *batadv_get_real_netdevice(struct net_device *netdev) struct net_device *real_netdev = NULL; struct net *real_net; struct net *net; - int ifindex; + int iflink; ASSERT_RTNL(); if (!netdev) return NULL; - if (netdev->ifindex == dev_get_iflink(netdev)) { + iflink = dev_get_iflink(netdev); + if (iflink == 0) { dev_hold(netdev); return netdev; } @@ -233,9 +237,16 @@ static struct net_device *batadv_get_real_netdevice(struct net_device *netdev) goto out; net = dev_net(hard_iface->soft_iface); - ifindex = dev_get_iflink(netdev); real_net = batadv_getlink_net(netdev, net); - real_netdev = dev_get_by_index(real_net, ifindex); + + /* iflink to itself, most likely physical device */ + if (net == real_net && netdev->ifindex == iflink) { + real_netdev = netdev; + dev_hold(real_netdev); + goto out; + } + + real_netdev = dev_get_by_index(real_net, iflink); out: if (hard_iface) diff --git a/net/batman-adv/hard-interface.h b/net/batman-adv/hard-interface.h index b1855d9d0b062e446b1e637e5d5fa86a2c1ead54..ba5850cfb2774958ce2cd07fc0c6f97ba3721153 100644 --- a/net/batman-adv/hard-interface.h +++ b/net/batman-adv/hard-interface.h @@ -113,6 +113,9 @@ int batadv_hardif_no_broadcast(struct batadv_hard_iface *if_outgoing, */ static inline void batadv_hardif_put(struct batadv_hard_iface *hard_iface) { + if (!hard_iface) + return; + kref_put(&hard_iface->refcount, batadv_hardif_release); } diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c index 35b3e03c07774b1095b290938c49a224a0c9f34e..1481b803956892049c90a568078d21eee7a313d9 100644 --- a/net/batman-adv/network-coding.c +++ b/net/batman-adv/network-coding.c @@ -222,6 +222,9 @@ static void batadv_nc_node_release(struct kref *ref) */ static void batadv_nc_node_put(struct batadv_nc_node *nc_node) { + if (!nc_node) + return; + kref_put(&nc_node->refcount, batadv_nc_node_release); } @@ -246,6 +249,9 @@ static void batadv_nc_path_release(struct kref *ref) */ static void batadv_nc_path_put(struct batadv_nc_path *nc_path) { + if (!nc_path) + return; + kref_put(&nc_path->refcount, batadv_nc_path_release); } diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c index 805d8969bdfbc7f3012b6539bdefcee50e806aa4..2d38a09459bb53c8094a0895741efbdb9fb02b1b 100644 --- a/net/batman-adv/originator.c +++ b/net/batman-adv/originator.c @@ -178,7 +178,7 @@ batadv_orig_node_vlan_new(struct batadv_orig_node *orig_node, * and queue for free after rcu grace period * @ref: kref pointer of the originator-vlan object */ -static void batadv_orig_node_vlan_release(struct kref *ref) +void batadv_orig_node_vlan_release(struct kref *ref) { struct batadv_orig_node_vlan *orig_vlan; @@ -187,16 +187,6 @@ static void batadv_orig_node_vlan_release(struct kref *ref) kfree_rcu(orig_vlan, rcu); } -/** - * batadv_orig_node_vlan_put() - decrement the refcounter and possibly release - * the originator-vlan object - * @orig_vlan: the originator-vlan object to release - */ -void batadv_orig_node_vlan_put(struct batadv_orig_node_vlan *orig_vlan) -{ - kref_put(&orig_vlan->refcount, batadv_orig_node_vlan_release); -} - /** * batadv_originator_init() - Initialize all originator structures * @bat_priv: the bat priv with all the soft interface information @@ -232,7 +222,7 @@ int batadv_originator_init(struct batadv_priv *bat_priv) * free after rcu grace period * @ref: kref pointer of the neigh_ifinfo */ -static void batadv_neigh_ifinfo_release(struct kref *ref) +void batadv_neigh_ifinfo_release(struct kref *ref) { struct batadv_neigh_ifinfo *neigh_ifinfo; @@ -244,22 +234,12 @@ static void batadv_neigh_ifinfo_release(struct kref *ref) kfree_rcu(neigh_ifinfo, rcu); } -/** - * batadv_neigh_ifinfo_put() - decrement the refcounter and possibly release - * the neigh_ifinfo - * @neigh_ifinfo: the neigh_ifinfo object to release - */ -void batadv_neigh_ifinfo_put(struct batadv_neigh_ifinfo *neigh_ifinfo) -{ - kref_put(&neigh_ifinfo->refcount, batadv_neigh_ifinfo_release); -} - /** * batadv_hardif_neigh_release() - release hardif neigh node from lists and * queue for free after rcu grace period * @ref: kref pointer of the neigh_node */ -static void batadv_hardif_neigh_release(struct kref *ref) +void batadv_hardif_neigh_release(struct kref *ref) { struct batadv_hardif_neigh_node *hardif_neigh; @@ -274,22 +254,12 @@ static void batadv_hardif_neigh_release(struct kref *ref) kfree_rcu(hardif_neigh, rcu); } -/** - * batadv_hardif_neigh_put() - decrement the hardif neighbors refcounter - * and possibly release it - * @hardif_neigh: hardif neigh neighbor to free - */ -void batadv_hardif_neigh_put(struct batadv_hardif_neigh_node *hardif_neigh) -{ - kref_put(&hardif_neigh->refcount, batadv_hardif_neigh_release); -} - /** * batadv_neigh_node_release() - release neigh_node from lists and queue for * free after rcu grace period * @ref: kref pointer of the neigh_node */ -static void batadv_neigh_node_release(struct kref *ref) +void batadv_neigh_node_release(struct kref *ref) { struct hlist_node *node_tmp; struct batadv_neigh_node *neigh_node; @@ -309,16 +279,6 @@ static void batadv_neigh_node_release(struct kref *ref) kfree_rcu(neigh_node, rcu); } -/** - * batadv_neigh_node_put() - decrement the neighbors refcounter and possibly - * release it - * @neigh_node: neigh neighbor to free - */ -void batadv_neigh_node_put(struct batadv_neigh_node *neigh_node) -{ - kref_put(&neigh_node->refcount, batadv_neigh_node_release); -} - /** * batadv_orig_router_get() - router to the originator depending on iface * @orig_node: the orig node for the router @@ -851,7 +811,7 @@ int batadv_hardif_neigh_dump(struct sk_buff *msg, struct netlink_callback *cb) * free after rcu grace period * @ref: kref pointer of the orig_ifinfo */ -static void batadv_orig_ifinfo_release(struct kref *ref) +void batadv_orig_ifinfo_release(struct kref *ref) { struct batadv_orig_ifinfo *orig_ifinfo; struct batadv_neigh_node *router; @@ -869,16 +829,6 @@ static void batadv_orig_ifinfo_release(struct kref *ref) kfree_rcu(orig_ifinfo, rcu); } -/** - * batadv_orig_ifinfo_put() - decrement the refcounter and possibly release - * the orig_ifinfo - * @orig_ifinfo: the orig_ifinfo object to release - */ -void batadv_orig_ifinfo_put(struct batadv_orig_ifinfo *orig_ifinfo) -{ - kref_put(&orig_ifinfo->refcount, batadv_orig_ifinfo_release); -} - /** * batadv_orig_node_free_rcu() - free the orig_node * @rcu: rcu pointer of the orig_node @@ -902,7 +852,7 @@ static void batadv_orig_node_free_rcu(struct rcu_head *rcu) * free after rcu grace period * @ref: kref pointer of the orig_node */ -static void batadv_orig_node_release(struct kref *ref) +void batadv_orig_node_release(struct kref *ref) { struct hlist_node *node_tmp; struct batadv_neigh_node *neigh_node; @@ -948,16 +898,6 @@ static void batadv_orig_node_release(struct kref *ref) call_rcu(&orig_node->rcu, batadv_orig_node_free_rcu); } -/** - * batadv_orig_node_put() - decrement the orig node refcounter and possibly - * release it - * @orig_node: the orig node to free - */ -void batadv_orig_node_put(struct batadv_orig_node *orig_node) -{ - kref_put(&orig_node->refcount, batadv_orig_node_release); -} - /** * batadv_originator_free() - Free all originator structures * @bat_priv: the bat priv with all the soft interface information diff --git a/net/batman-adv/originator.h b/net/batman-adv/originator.h index 7bc01c138b3ab8bc473c91423d7a777817ce82f4..3b824a79743a252c47f2a5480ac7a633cff33555 100644 --- a/net/batman-adv/originator.h +++ b/net/batman-adv/originator.h @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include @@ -21,19 +22,18 @@ bool batadv_compare_orig(const struct hlist_node *node, const void *data2); int batadv_originator_init(struct batadv_priv *bat_priv); void batadv_originator_free(struct batadv_priv *bat_priv); void batadv_purge_orig_ref(struct batadv_priv *bat_priv); -void batadv_orig_node_put(struct batadv_orig_node *orig_node); +void batadv_orig_node_release(struct kref *ref); struct batadv_orig_node *batadv_orig_node_new(struct batadv_priv *bat_priv, const u8 *addr); struct batadv_hardif_neigh_node * batadv_hardif_neigh_get(const struct batadv_hard_iface *hard_iface, const u8 *neigh_addr); -void -batadv_hardif_neigh_put(struct batadv_hardif_neigh_node *hardif_neigh); +void batadv_hardif_neigh_release(struct kref *ref); struct batadv_neigh_node * batadv_neigh_node_get_or_create(struct batadv_orig_node *orig_node, struct batadv_hard_iface *hard_iface, const u8 *neigh_addr); -void batadv_neigh_node_put(struct batadv_neigh_node *neigh_node); +void batadv_neigh_node_release(struct kref *ref); struct batadv_neigh_node * batadv_orig_router_get(struct batadv_orig_node *orig_node, const struct batadv_hard_iface *if_outgoing); @@ -43,7 +43,7 @@ batadv_neigh_ifinfo_new(struct batadv_neigh_node *neigh, struct batadv_neigh_ifinfo * batadv_neigh_ifinfo_get(struct batadv_neigh_node *neigh, struct batadv_hard_iface *if_outgoing); -void batadv_neigh_ifinfo_put(struct batadv_neigh_ifinfo *neigh_ifinfo); +void batadv_neigh_ifinfo_release(struct kref *ref); int batadv_hardif_neigh_dump(struct sk_buff *msg, struct netlink_callback *cb); int batadv_hardif_neigh_seq_print_text(struct seq_file *seq, void *offset); @@ -54,7 +54,7 @@ batadv_orig_ifinfo_get(struct batadv_orig_node *orig_node, struct batadv_orig_ifinfo * batadv_orig_ifinfo_new(struct batadv_orig_node *orig_node, struct batadv_hard_iface *if_outgoing); -void batadv_orig_ifinfo_put(struct batadv_orig_ifinfo *orig_ifinfo); +void batadv_orig_ifinfo_release(struct kref *ref); int batadv_orig_seq_print_text(struct seq_file *seq, void *offset); int batadv_orig_dump(struct sk_buff *msg, struct netlink_callback *cb); @@ -65,7 +65,7 @@ batadv_orig_node_vlan_new(struct batadv_orig_node *orig_node, struct batadv_orig_node_vlan * batadv_orig_node_vlan_get(struct batadv_orig_node *orig_node, unsigned short vid); -void batadv_orig_node_vlan_put(struct batadv_orig_node_vlan *orig_vlan); +void batadv_orig_node_vlan_release(struct kref *ref); /** * batadv_choose_orig() - Return the index of the orig entry in the hash table @@ -86,4 +86,86 @@ static inline u32 batadv_choose_orig(const void *data, u32 size) struct batadv_orig_node * batadv_orig_hash_find(struct batadv_priv *bat_priv, const void *data); +/** + * batadv_orig_node_vlan_put() - decrement the refcounter and possibly release + * the originator-vlan object + * @orig_vlan: the originator-vlan object to release + */ +static inline void +batadv_orig_node_vlan_put(struct batadv_orig_node_vlan *orig_vlan) +{ + if (!orig_vlan) + return; + + kref_put(&orig_vlan->refcount, batadv_orig_node_vlan_release); +} + +/** + * batadv_neigh_ifinfo_put() - decrement the refcounter and possibly release + * the neigh_ifinfo + * @neigh_ifinfo: the neigh_ifinfo object to release + */ +static inline void +batadv_neigh_ifinfo_put(struct batadv_neigh_ifinfo *neigh_ifinfo) +{ + if (!neigh_ifinfo) + return; + + kref_put(&neigh_ifinfo->refcount, batadv_neigh_ifinfo_release); +} + +/** + * batadv_hardif_neigh_put() - decrement the hardif neighbors refcounter + * and possibly release it + * @hardif_neigh: hardif neigh neighbor to free + */ +static inline void +batadv_hardif_neigh_put(struct batadv_hardif_neigh_node *hardif_neigh) +{ + if (!hardif_neigh) + return; + + kref_put(&hardif_neigh->refcount, batadv_hardif_neigh_release); +} + +/** + * batadv_neigh_node_put() - decrement the neighbors refcounter and possibly + * release it + * @neigh_node: neigh neighbor to free + */ +static inline void batadv_neigh_node_put(struct batadv_neigh_node *neigh_node) +{ + if (!neigh_node) + return; + + kref_put(&neigh_node->refcount, batadv_neigh_node_release); +} + +/** + * batadv_orig_ifinfo_put() - decrement the refcounter and possibly release + * the orig_ifinfo + * @orig_ifinfo: the orig_ifinfo object to release + */ +static inline void +batadv_orig_ifinfo_put(struct batadv_orig_ifinfo *orig_ifinfo) +{ + if (!orig_ifinfo) + return; + + kref_put(&orig_ifinfo->refcount, batadv_orig_ifinfo_release); +} + +/** + * batadv_orig_node_put() - decrement the orig node refcounter and possibly + * release it + * @orig_node: the orig node to free + */ +static inline void batadv_orig_node_put(struct batadv_orig_node *orig_node) +{ + if (!orig_node) + return; + + kref_put(&orig_node->refcount, batadv_orig_node_release); +} + #endif /* _NET_BATMAN_ADV_ORIGINATOR_H_ */ diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c index 7496047b318a412e51d33849bd22f4d59357b5c2..8f7c778255fba74f51e9f84ad094c5beb492b0f9 100644 --- a/net/batman-adv/soft-interface.c +++ b/net/batman-adv/soft-interface.c @@ -512,7 +512,7 @@ void batadv_interface_rx(struct net_device *soft_iface, * after rcu grace period * @ref: kref pointer of the vlan object */ -static void batadv_softif_vlan_release(struct kref *ref) +void batadv_softif_vlan_release(struct kref *ref) { struct batadv_softif_vlan *vlan; @@ -525,19 +525,6 @@ static void batadv_softif_vlan_release(struct kref *ref) kfree_rcu(vlan, rcu); } -/** - * batadv_softif_vlan_put() - decrease the vlan object refcounter and - * possibly release it - * @vlan: the vlan object to release - */ -void batadv_softif_vlan_put(struct batadv_softif_vlan *vlan) -{ - if (!vlan) - return; - - kref_put(&vlan->refcount, batadv_softif_vlan_release); -} - /** * batadv_softif_vlan_get() - get the vlan object for a specific vid * @bat_priv: the bat priv with all the soft interface information diff --git a/net/batman-adv/soft-interface.h b/net/batman-adv/soft-interface.h index 534e08d6ad919e3e244732d6646b4b48e11d5e6b..53aba17b90688b7efc478dde5a1b2d92ecf0b003 100644 --- a/net/batman-adv/soft-interface.h +++ b/net/batman-adv/soft-interface.h @@ -9,6 +9,7 @@ #include "main.h" +#include #include #include #include @@ -24,8 +25,21 @@ void batadv_softif_destroy_sysfs(struct net_device *soft_iface); bool batadv_softif_is_valid(const struct net_device *net_dev); extern struct rtnl_link_ops batadv_link_ops; int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid); -void batadv_softif_vlan_put(struct batadv_softif_vlan *softif_vlan); +void batadv_softif_vlan_release(struct kref *ref); struct batadv_softif_vlan *batadv_softif_vlan_get(struct batadv_priv *bat_priv, unsigned short vid); +/** + * batadv_softif_vlan_put() - decrease the vlan object refcounter and + * possibly release it + * @vlan: the vlan object to release + */ +static inline void batadv_softif_vlan_put(struct batadv_softif_vlan *vlan) +{ + if (!vlan) + return; + + kref_put(&vlan->refcount, batadv_softif_vlan_release); +} + #endif /* _NET_BATMAN_ADV_SOFT_INTERFACE_H_ */ diff --git a/net/batman-adv/tp_meter.c b/net/batman-adv/tp_meter.c index db7e3774825b5f5871b23c0e4fa5ce7afb2accd6..00d62a6c5e0efb619e18789271d86c69c395bfb4 100644 --- a/net/batman-adv/tp_meter.c +++ b/net/batman-adv/tp_meter.c @@ -357,6 +357,9 @@ static void batadv_tp_vars_release(struct kref *ref) */ static void batadv_tp_vars_put(struct batadv_tp_vars *tp_vars) { + if (!tp_vars) + return; + kref_put(&tp_vars->refcount, batadv_tp_vars_release); } diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c index de946ea8f13c8c96d5153c62b68e480c2bbcade6..5f990a2061072b8f7e024efacf9460250e079e5e 100644 --- a/net/batman-adv/translation-table.c +++ b/net/batman-adv/translation-table.c @@ -248,6 +248,9 @@ static void batadv_tt_local_entry_release(struct kref *ref) static void batadv_tt_local_entry_put(struct batadv_tt_local_entry *tt_local_entry) { + if (!tt_local_entry) + return; + kref_put(&tt_local_entry->common.refcount, batadv_tt_local_entry_release); } @@ -271,7 +274,7 @@ static void batadv_tt_global_entry_free_rcu(struct rcu_head *rcu) * queue for free after rcu grace period * @ref: kref pointer of the nc_node */ -static void batadv_tt_global_entry_release(struct kref *ref) +void batadv_tt_global_entry_release(struct kref *ref) { struct batadv_tt_global_entry *tt_global_entry; @@ -283,17 +286,6 @@ static void batadv_tt_global_entry_release(struct kref *ref) call_rcu(&tt_global_entry->common.rcu, batadv_tt_global_entry_free_rcu); } -/** - * batadv_tt_global_entry_put() - decrement the tt_global_entry refcounter and - * possibly release it - * @tt_global_entry: tt_global_entry to be free'd - */ -void batadv_tt_global_entry_put(struct batadv_tt_global_entry *tt_global_entry) -{ - kref_put(&tt_global_entry->common.refcount, - batadv_tt_global_entry_release); -} - /** * batadv_tt_global_hash_count() - count the number of orig entries * @bat_priv: the bat priv with all the soft interface information @@ -453,6 +445,9 @@ static void batadv_tt_orig_list_entry_release(struct kref *ref) static void batadv_tt_orig_list_entry_put(struct batadv_tt_orig_list_entry *orig_entry) { + if (!orig_entry) + return; + kref_put(&orig_entry->refcount, batadv_tt_orig_list_entry_release); } @@ -2818,6 +2813,9 @@ static void batadv_tt_req_node_release(struct kref *ref) */ static void batadv_tt_req_node_put(struct batadv_tt_req_node *tt_req_node) { + if (!tt_req_node) + return; + kref_put(&tt_req_node->refcount, batadv_tt_req_node_release); } diff --git a/net/batman-adv/translation-table.h b/net/batman-adv/translation-table.h index b24d35b9226a1f28be08e374264fd15130d53cdc..63cc8fd3ff66aea16fcecf9f2adbff03dc4ef63d 100644 --- a/net/batman-adv/translation-table.h +++ b/net/batman-adv/translation-table.h @@ -9,6 +9,7 @@ #include "main.h" +#include #include #include #include @@ -31,7 +32,7 @@ void batadv_tt_global_del_orig(struct batadv_priv *bat_priv, struct batadv_tt_global_entry * batadv_tt_global_hash_find(struct batadv_priv *bat_priv, const u8 *addr, unsigned short vid); -void batadv_tt_global_entry_put(struct batadv_tt_global_entry *tt_global_entry); +void batadv_tt_global_entry_release(struct kref *ref); int batadv_tt_global_hash_count(struct batadv_priv *bat_priv, const u8 *addr, unsigned short vid); struct batadv_orig_node *batadv_transtable_search(struct batadv_priv *bat_priv, @@ -58,4 +59,19 @@ bool batadv_tt_global_is_isolated(struct batadv_priv *bat_priv, int batadv_tt_cache_init(void); void batadv_tt_cache_destroy(void); +/** + * batadv_tt_global_entry_put() - decrement the tt_global_entry refcounter and + * possibly release it + * @tt_global_entry: tt_global_entry to be free'd + */ +static inline void +batadv_tt_global_entry_put(struct batadv_tt_global_entry *tt_global_entry) +{ + if (!tt_global_entry) + return; + + kref_put(&tt_global_entry->common.refcount, + batadv_tt_global_entry_release); +} + #endif /* _NET_BATMAN_ADV_TRANSLATION_TABLE_H_ */ diff --git a/net/batman-adv/tvlv.c b/net/batman-adv/tvlv.c index 6a23a566cde17b1ea80bec83a30e975496e8ea2c..99fc48efde5431c8231ebd7ac661f559cf09e94d 100644 --- a/net/batman-adv/tvlv.c +++ b/net/batman-adv/tvlv.c @@ -50,6 +50,9 @@ static void batadv_tvlv_handler_release(struct kref *ref) */ static void batadv_tvlv_handler_put(struct batadv_tvlv_handler *tvlv_handler) { + if (!tvlv_handler) + return; + kref_put(&tvlv_handler->refcount, batadv_tvlv_handler_release); } @@ -106,6 +109,9 @@ static void batadv_tvlv_container_release(struct kref *ref) */ static void batadv_tvlv_container_put(struct batadv_tvlv_container *tvlv) { + if (!tvlv) + return; + kref_put(&tvlv->refcount, batadv_tvlv_container_release); } diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index 1c5a0a60292d2ae96c54553f34cf5c2261e24292..ecd2ffcf2ba284e499dc7ab525d5fa3930c965ca 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c @@ -508,7 +508,9 @@ static void le_conn_timeout(struct work_struct *work) if (conn->role == HCI_ROLE_SLAVE) { /* Disable LE Advertising */ le_disable_advertising(hdev); + hci_dev_lock(hdev); hci_le_conn_failed(conn, HCI_ERROR_ADVERTISING_TIMEOUT); + hci_dev_unlock(hdev); return; } diff --git a/net/can/isotp.c b/net/can/isotp.c index d0581dc6a65fd5565969f084e95fe126e17f947b..63e6e8923200bba51e8732ea5a0e412e43cfade7 100644 --- a/net/can/isotp.c +++ b/net/can/isotp.c @@ -1003,26 +1003,29 @@ static int isotp_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, { struct sock *sk = sock->sk; struct sk_buff *skb; - int err = 0; - int noblock; + struct isotp_sock *so = isotp_sk(sk); + int noblock = flags & MSG_DONTWAIT; + int ret = 0; - noblock = flags & MSG_DONTWAIT; - flags &= ~MSG_DONTWAIT; + if (flags & ~(MSG_DONTWAIT | MSG_TRUNC | MSG_PEEK)) + return -EINVAL; - skb = skb_recv_datagram(sk, flags, noblock, &err); + if (!so->bound) + return -EADDRNOTAVAIL; + + flags &= ~MSG_DONTWAIT; + skb = skb_recv_datagram(sk, flags, noblock, &ret); if (!skb) - return err; + return ret; if (size < skb->len) msg->msg_flags |= MSG_TRUNC; else size = skb->len; - err = memcpy_to_msg(msg, skb->data, size); - if (err < 0) { - skb_free_datagram(sk, skb); - return err; - } + ret = memcpy_to_msg(msg, skb->data, size); + if (ret < 0) + goto out_err; sock_recv_timestamp(msg, sk, skb); @@ -1032,9 +1035,13 @@ static int isotp_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, memcpy(msg->msg_name, skb->cb, msg->msg_namelen); } + /* set length of return value */ + ret = (flags & MSG_TRUNC) ? skb->len : size; + +out_err: skb_free_datagram(sk, skb); - return size; + return ret; } static int isotp_release(struct socket *sock) @@ -1102,6 +1109,7 @@ static int isotp_bind(struct socket *sock, struct sockaddr *uaddr, int len) struct net *net = sock_net(sk); int ifindex; struct net_device *dev; + canid_t tx_id, rx_id; int err = 0; int notify_enetdown = 0; int do_rx_reg = 1; @@ -1109,8 +1117,18 @@ static int isotp_bind(struct socket *sock, struct sockaddr *uaddr, int len) if (len < ISOTP_MIN_NAMELEN) return -EINVAL; - if (addr->can_addr.tp.tx_id & (CAN_ERR_FLAG | CAN_RTR_FLAG)) - return -EADDRNOTAVAIL; + /* sanitize tx/rx CAN identifiers */ + tx_id = addr->can_addr.tp.tx_id; + if (tx_id & CAN_EFF_FLAG) + tx_id &= (CAN_EFF_FLAG | CAN_EFF_MASK); + else + tx_id &= CAN_SFF_MASK; + + rx_id = addr->can_addr.tp.rx_id; + if (rx_id & CAN_EFF_FLAG) + rx_id &= (CAN_EFF_FLAG | CAN_EFF_MASK); + else + rx_id &= CAN_SFF_MASK; if (!addr->can_ifindex) return -ENODEV; @@ -1122,21 +1140,13 @@ static int isotp_bind(struct socket *sock, struct sockaddr *uaddr, int len) do_rx_reg = 0; /* do not validate rx address for functional addressing */ - if (do_rx_reg) { - if (addr->can_addr.tp.rx_id == addr->can_addr.tp.tx_id) { - err = -EADDRNOTAVAIL; - goto out; - } - - if (addr->can_addr.tp.rx_id & (CAN_ERR_FLAG | CAN_RTR_FLAG)) { - err = -EADDRNOTAVAIL; - goto out; - } + if (do_rx_reg && rx_id == tx_id) { + err = -EADDRNOTAVAIL; + goto out; } if (so->bound && addr->can_ifindex == so->ifindex && - addr->can_addr.tp.rx_id == so->rxid && - addr->can_addr.tp.tx_id == so->txid) + rx_id == so->rxid && tx_id == so->txid) goto out; dev = dev_get_by_index(net, addr->can_ifindex); @@ -1160,8 +1170,7 @@ static int isotp_bind(struct socket *sock, struct sockaddr *uaddr, int len) ifindex = dev->ifindex; if (do_rx_reg) - can_rx_register(net, dev, addr->can_addr.tp.rx_id, - SINGLE_MASK(addr->can_addr.tp.rx_id), + can_rx_register(net, dev, rx_id, SINGLE_MASK(rx_id), isotp_rcv, sk, "isotp", sk); dev_put(dev); @@ -1181,8 +1190,8 @@ static int isotp_bind(struct socket *sock, struct sockaddr *uaddr, int len) /* switch to new settings */ so->ifindex = ifindex; - so->rxid = addr->can_addr.tp.rx_id; - so->txid = addr->can_addr.tp.tx_id; + so->rxid = rx_id; + so->txid = tx_id; so->bound = 1; out: diff --git a/net/core/dev.c b/net/core/dev.c index f20f0d5e5280e85e189af6319070af664629fc08..12089c484b304b25be98bebb8922c79c6098671d 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1648,6 +1648,7 @@ void dev_close_many(struct list_head *head, bool unlink) call_netdevice_notifiers(NETDEV_DOWN, dev); if (unlink) list_del_init(&dev->close_list); + cond_resched(); } } EXPORT_SYMBOL(dev_close_many); @@ -4621,9 +4622,9 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb, struct netdev_rx_queue *rxqueue; void *orig_data, *orig_data_end; u32 metalen, act = XDP_DROP; + bool orig_bcast, orig_host; __be16 orig_eth_type; struct ethhdr *eth; - bool orig_bcast; int hlen, off; u32 mac_len; @@ -4670,6 +4671,7 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb, orig_data_end = xdp->data_end; orig_data = xdp->data; eth = (struct ethhdr *)xdp->data; + orig_host = ether_addr_equal_64bits(eth->h_dest, skb->dev->dev_addr); orig_bcast = is_multicast_ether_addr_64bits(eth->h_dest); orig_eth_type = eth->h_proto; @@ -4700,8 +4702,11 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb, /* check if XDP changed eth hdr such SKB needs update */ eth = (struct ethhdr *)xdp->data; if ((orig_eth_type != eth->h_proto) || + (orig_host != ether_addr_equal_64bits(eth->h_dest, + skb->dev->dev_addr)) || (orig_bcast != is_multicast_ether_addr_64bits(eth->h_dest))) { __skb_push(skb, ETH_HLEN); + skb->pkt_type = PACKET_HOST; skb->protocol = eth_type_trans(skb, skb->dev); } @@ -9587,6 +9592,7 @@ static void rollback_registered_many(struct list_head *head) /* Remove XPS queueing entries */ netif_reset_xps_queues_gt(dev, 0); #endif + cond_resched(); } synchronize_net(); diff --git a/net/core/filter.c b/net/core/filter.c index fa473a58d1be5eef3ea18909ae2b3e43e5a8ba59..4e06f733191450517246c233b135c515c7365d7b 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -2730,6 +2730,9 @@ BPF_CALL_4(bpf_msg_push_data, struct sk_msg *, msg, u32, start, if (unlikely(flags)) return -EINVAL; + if (unlikely(len == 0)) + return 0; + /* First find the starting scatterlist element */ i = msg->sg.start; do { diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index 99303897b7bb7683a24c7128831f273d7fb682f1..989b3f7ee85f40341a9549d424f7a4b262acbacc 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c @@ -213,7 +213,7 @@ static ssize_t speed_show(struct device *dev, if (!rtnl_trylock()) return restart_syscall(); - if (netif_running(netdev)) { + if (netif_running(netdev) && netif_device_present(netdev)) { struct ethtool_link_ksettings cmd; if (!__ethtool_get_link_ksettings(netdev, &cmd)) diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c index b5bc680d475536de6da68a9a8815691cf81176a6..444cce0184c3722b4585920145bf459e29dfedfe 100644 --- a/net/core/secure_seq.c +++ b/net/core/secure_seq.c @@ -94,7 +94,7 @@ u32 secure_tcpv6_seq(const __be32 *saddr, const __be32 *daddr, } EXPORT_SYMBOL(secure_tcpv6_seq); -u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr, +u64 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr, __be16 dport) { const struct { @@ -142,7 +142,7 @@ u32 secure_tcp_seq(__be32 saddr, __be32 daddr, } EXPORT_SYMBOL_GPL(secure_tcp_seq); -u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport) +u64 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport) { net_secret_init(); return siphash_3u32((__force u32)saddr, (__force u32)daddr, diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 08d7b78a61cd19d856ecf8b2b3b6f89c8072696b..379c426f8d656390ca28e2e84f4d49b4aa09e83a 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -2156,7 +2156,7 @@ void *__pskb_pull_tail(struct sk_buff *skb, int delta) /* Free pulled out fragments. */ while ((list = skb_shinfo(skb)->frag_list) != insp) { skb_shinfo(skb)->frag_list = list->next; - kfree_skb(list); + consume_skb(list); } /* And insert new clone at head. */ if (clone) { @@ -3736,6 +3736,7 @@ struct sk_buff *skb_segment_list(struct sk_buff *skb, list_skb = list_skb->next; err = 0; + delta_truesize += nskb->truesize; if (skb_shared(nskb)) { tmp = skb_clone(nskb, GFP_ATOMIC); if (tmp) { @@ -3760,7 +3761,6 @@ struct sk_buff *skb_segment_list(struct sk_buff *skb, tail = nskb; delta_len += nskb->len; - delta_truesize += nskb->truesize; skb_push(nskb, -skb_network_offset(nskb) + offset); @@ -5237,11 +5237,18 @@ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from, if (skb_cloned(to)) return false; - /* The page pool signature of struct page will eventually figure out - * which pages can be recycled or not but for now let's prohibit slab - * allocated and page_pool allocated SKBs from being coalesced. + /* In general, avoid mixing slab allocated and page_pool allocated + * pages within the same SKB. However when @to is not pp_recycle and + * @from is cloned, we can transition frag pages from page_pool to + * reference counted. + * + * On the other hand, don't allow coalescing two pp_recycle SKBs if + * @from is cloned, in case the SKB is using page_pool fragment + * references (PP_FLAG_PAGE_FRAG). Since we only take full page + * references for cloned SKBs at the moment that would result in + * inconsistent reference counts. */ - if (to->pp_recycle != from->pp_recycle) + if (to->pp_recycle != (from->pp_recycle && !skb_cloned(from))) return false; if (len <= skb_tailroom(to)) { @@ -6097,7 +6104,7 @@ static int pskb_carve_frag_list(struct sk_buff *skb, /* Free pulled out fragments. */ while ((list = shinfo->frag_list) != insp) { shinfo->frag_list = list->next; - kfree_skb(list); + consume_skb(list); } /* And insert new clone at head. */ if (clone) { diff --git a/net/core/skmsg.c b/net/core/skmsg.c index 02d6d254d94aa4cd587c7c89e07790fd67164f5a..9dec3d35af791ca20298887139ae4d6c45dbd997 100644 --- a/net/core/skmsg.c +++ b/net/core/skmsg.c @@ -1010,7 +1010,7 @@ static int sk_psock_verdict_recv(read_descriptor_t *desc, struct sk_buff *skb, struct sk_psock *psock; struct bpf_prog *prog; int ret = __SK_DROP; - int len = skb->len; + int len = orig_len; /* clone here so sk_eat_skb() in tcp_read_sock does not drop our data */ skb = skb_clone(skb, GFP_ATOMIC); diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c index a352ce4f878a374e078f95b38b11631295f4bc35..2535d3dfb92c8a8fc3a53bdc3d8bff37eb875dc1 100644 --- a/net/dcb/dcbnl.c +++ b/net/dcb/dcbnl.c @@ -2063,10 +2063,54 @@ u8 dcb_ieee_getapp_default_prio_mask(const struct net_device *dev) } EXPORT_SYMBOL(dcb_ieee_getapp_default_prio_mask); +static void dcbnl_flush_dev(struct net_device *dev) +{ + struct dcb_app_type *itr, *tmp; + + spin_lock_bh(&dcb_lock); + + list_for_each_entry_safe(itr, tmp, &dcb_app_list, list) { + if (itr->ifindex == dev->ifindex) { + list_del(&itr->list); + kfree(itr); + } + } + + spin_unlock_bh(&dcb_lock); +} + +static int dcbnl_netdevice_event(struct notifier_block *nb, + unsigned long event, void *ptr) +{ + struct net_device *dev = netdev_notifier_info_to_dev(ptr); + + switch (event) { + case NETDEV_UNREGISTER: + if (!dev->dcbnl_ops) + return NOTIFY_DONE; + + dcbnl_flush_dev(dev); + + return NOTIFY_OK; + default: + return NOTIFY_DONE; + } +} + +static struct notifier_block dcbnl_nb __read_mostly = { + .notifier_call = dcbnl_netdevice_event, +}; + static int __init dcbnl_init(void) { + int err; + INIT_LIST_HEAD(&dcb_app_list); + err = register_netdevice_notifier(&dcbnl_nb); + if (err) + return err; + rtnl_register(PF_UNSPEC, RTM_GETDCB, dcb_doit, NULL, 0); rtnl_register(PF_UNSPEC, RTM_SETDCB, dcb_doit, NULL, 0); diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c index 71c8ef7d40870b3321daba2620b43ee43c134186..f543fca6dfcbfd718eca7fcc430b6691afc13a6f 100644 --- a/net/dsa/dsa2.c +++ b/net/dsa/dsa2.c @@ -766,6 +766,7 @@ static int dsa_port_parse_of(struct dsa_port *dp, struct device_node *dn) struct net_device *master; master = of_find_net_device_by_node(ethernet); + of_node_put(ethernet); if (!master) return -EPROBE_DEFER; diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 67a081b5fb83c38355fec49a2f6f9658a8dd0fd4..911ad595dbb944f31a5708f86568a3e840f88965 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -158,7 +158,7 @@ void inet_sock_destruct(struct sock *sk) kfree(rcu_dereference_protected(inet->inet_opt, 1)); dst_release(rcu_dereference_protected(sk->sk_dst_cache, 1)); - dst_release(sk->sk_rx_dst); + dst_release(rcu_dereference_protected(sk->sk_rx_dst, 1)); sk_refcnt_debug_dec(sk); } EXPORT_SYMBOL(inet_sock_destruct); @@ -1375,8 +1375,11 @@ struct sk_buff *inet_gso_segment(struct sk_buff *skb, } ops = rcu_dereference(inet_offloads[proto]); - if (likely(ops && ops->callbacks.gso_segment)) + if (likely(ops && ops->callbacks.gso_segment)) { segs = ops->callbacks.gso_segment(skb, features); + if (!segs) + skb->network_header = skb_mac_header(skb) + nhoff - skb->head; + } if (IS_ERR_OR_NULL(segs)) goto out; diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c index e2b91fca93ba9c816128b90896eb662c4f9102dd..9aae82145bc16d957f1ca6b98e3bbe410de3b56c 100644 --- a/net/ipv4/esp4.c +++ b/net/ipv4/esp4.c @@ -678,7 +678,7 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb) struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb); u32 padto; - padto = min(x->tfcpad, __xfrm_state_mtu(x, dst->child_mtu_cached)); + padto = min(x->tfcpad, xfrm_state_mtu(x, dst->child_mtu_cached)); if (skb->len < padto) esp.tfclen = padto - skb->len; } diff --git a/net/ipv4/esp4_offload.c b/net/ipv4/esp4_offload.c index 5aa7344dbec7fd80e8bab542e675d26ea423ad16..3450c9ba2728c7a1f5ac021fa88db37ff0cd977b 100644 --- a/net/ipv4/esp4_offload.c +++ b/net/ipv4/esp4_offload.c @@ -160,6 +160,9 @@ static struct sk_buff *xfrm4_beet_gso_segment(struct xfrm_state *x, skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4; } + if (proto == IPPROTO_IPV6) + skb_shinfo(skb)->gso_type |= SKB_GSO_IPXIP4; + __skb_pull(skb, skb_transport_offset(skb)); ops = rcu_dereference(inet_offloads[proto]); if (likely(ops && ops->callbacks.gso_segment)) diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c index e093847c334da59f789509aba9184a58e20951f8..cf178d8eea81a30a84115f69753993557befff98 100644 --- a/net/ipv4/inet_hashtables.c +++ b/net/ipv4/inet_hashtables.c @@ -504,7 +504,7 @@ static int __inet_check_established(struct inet_timewait_death_row *death_row, return -EADDRNOTAVAIL; } -static u32 inet_sk_port_offset(const struct sock *sk) +static u64 inet_sk_port_offset(const struct sock *sk) { const struct inet_sock *inet = inet_sk(sk); @@ -711,8 +711,20 @@ void inet_unhash(struct sock *sk) } EXPORT_SYMBOL_GPL(inet_unhash); +/* RFC 6056 3.3.4. Algorithm 4: Double-Hash Port Selection Algorithm + * Note that we use 32bit integers (vs RFC 'short integers') + * because 2^16 is not a multiple of num_ephemeral and this + * property might be used by clever attacker. + * RFC claims using TABLE_LENGTH=10 buckets gives an improvement, though + * attacks were since demonstrated, thus we use 65536 instead to really + * give more isolation and privacy, at the expense of 256kB of kernel + * memory. + */ +#define INET_TABLE_PERTURB_SHIFT 16 +static u32 table_perturb[1 << INET_TABLE_PERTURB_SHIFT]; + int __inet_hash_connect(struct inet_timewait_death_row *death_row, - struct sock *sk, u32 port_offset, + struct sock *sk, u64 port_offset, int (*check_established)(struct inet_timewait_death_row *, struct sock *, __u16, struct inet_timewait_sock **)) { @@ -724,8 +736,8 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row, struct inet_bind_bucket *tb; u32 remaining, offset; int ret, i, low, high; - static u32 hint; int l3mdev; + u32 index; if (port) { head = &hinfo->bhash[inet_bhashfn(net, port, @@ -752,7 +764,11 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row, if (likely(remaining > 1)) remaining &= ~1U; - offset = (hint + port_offset) % remaining; + net_get_random_once(table_perturb, sizeof(table_perturb)); + index = hash_32(port_offset, INET_TABLE_PERTURB_SHIFT); + + offset = READ_ONCE(table_perturb[index]) + port_offset; + offset %= remaining; /* In first pass we try ports of @low parity. * inet_csk_get_port() does the opposite choice. */ @@ -806,7 +822,7 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row, return -EADDRNOTAVAIL; ok: - hint += i + 2; + WRITE_ONCE(table_perturb[index], READ_ONCE(table_perturb[index]) + i + 2); /* Head lock still held and bh's disabled */ inet_bind_hash(sk, tb, port); @@ -829,7 +845,7 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row, int inet_hash_connect(struct inet_timewait_death_row *death_row, struct sock *sk) { - u32 port_offset = 0; + u64 port_offset = 0; if (!inet_sk(sk)->inet_num) port_offset = inet_sk_port_offset(sk); diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c index 323cb231cb580b7046bc4a265f68c92dbc0e4e14..e60ca03543a536b2b9a32522d87b93f94ea5b02b 100644 --- a/net/ipv4/ping.c +++ b/net/ipv4/ping.c @@ -187,7 +187,6 @@ static struct sock *ping_lookup(struct net *net, struct sk_buff *skb, u16 ident) (int)ident, &ipv6_hdr(skb)->daddr, dif); #endif } else { - pr_err("ping: protocol(%x) is not supported\n", ntohs(skb->protocol)); return NULL; } diff --git a/net/ipv4/route.c b/net/ipv4/route.c index ce787c38679384866687d3522f406df71d502983..c72d0de8bf71432d6bce6b73ec73db7e00060ff2 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -529,6 +529,15 @@ void __ip_select_ident(struct net *net, struct iphdr *iph, int segs) } EXPORT_SYMBOL(__ip_select_ident); +static void ip_rt_fix_tos(struct flowi4 *fl4) +{ + __u8 tos = RT_FL_TOS(fl4); + + fl4->flowi4_tos = tos & IPTOS_RT_MASK; + fl4->flowi4_scope = tos & RTO_ONLINK ? + RT_SCOPE_LINK : RT_SCOPE_UNIVERSE; +} + static void __build_flow_key(const struct net *net, struct flowi4 *fl4, const struct sock *sk, const struct iphdr *iph, @@ -853,6 +862,7 @@ static void ip_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buf rt = (struct rtable *) dst; __build_flow_key(net, &fl4, sk, iph, oif, tos, prot, mark, 0); + ip_rt_fix_tos(&fl4); __ip_do_redirect(rt, skb, &fl4, true); } @@ -1077,6 +1087,7 @@ static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk, struct flowi4 fl4; ip_rt_build_flow_key(&fl4, sk, skb); + ip_rt_fix_tos(&fl4); /* Don't make lookup fail for bridged encapsulations */ if (skb && netif_is_any_bridge_port(skb->dev)) @@ -1151,6 +1162,8 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu) goto out; new = true; + } else { + ip_rt_fix_tos(&fl4); } __ip_rt_update_pmtu((struct rtable *)xfrm_dst_path(&rt->dst), &fl4, mtu); @@ -2524,7 +2537,6 @@ static struct rtable *__mkroute_output(const struct fib_result *res, struct rtable *ip_route_output_key_hash(struct net *net, struct flowi4 *fl4, const struct sk_buff *skb) { - __u8 tos = RT_FL_TOS(fl4); struct fib_result res = { .type = RTN_UNSPEC, .fi = NULL, @@ -2534,9 +2546,7 @@ struct rtable *ip_route_output_key_hash(struct net *net, struct flowi4 *fl4, struct rtable *rth; fl4->flowi4_iif = LOOPBACK_IFINDEX; - fl4->flowi4_tos = tos & IPTOS_RT_MASK; - fl4->flowi4_scope = ((tos & RTO_ONLINK) ? - RT_SCOPE_LINK : RT_SCOPE_UNIVERSE); + ip_rt_fix_tos(fl4); rcu_read_lock(); rth = ip_route_output_key_hash_rcu(net, fl4, &res, skb); diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index fcd792816756784b558d28512feaceaa5ae3ae88..4d5280780a8e1eaacf214328dc1c50af3563fca4 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -1656,11 +1656,13 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, if (!copied) copied = used; break; - } else if (used <= len) { - seq += used; - copied += used; - offset += used; } + if (WARN_ON_ONCE(used > len)) + used = len; + seq += used; + copied += used; + offset += used; + /* If recv_actor drops the lock (e.g. TCP splice * receive) the skb pointer might be invalid when * getting here: tcp_collapse might have deleted it @@ -2814,8 +2816,7 @@ int tcp_disconnect(struct sock *sk, int flags) icsk->icsk_ack.rcv_mss = TCP_MIN_MSS; memset(&tp->rx_opt, 0, sizeof(tp->rx_opt)); __sk_dst_reset(sk); - dst_release(sk->sk_rx_dst); - sk->sk_rx_dst = NULL; + dst_release(xchg((__force struct dst_entry **)&sk->sk_rx_dst, NULL)); tcp_saved_syn_free(tp); tp->compressed_ack = 0; tp->segs_in = 0; diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 11f3f8e23a8a9f449fef00091e29a0b91c3b6f6e..6f4ac0f10f57fb58db992ac39afa8e83b365f5cb 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -5744,7 +5744,7 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb) trace_tcp_probe(sk, skb); tcp_mstamp_refresh(tp); - if (unlikely(!sk->sk_rx_dst)) + if (unlikely(!rcu_access_pointer(sk->sk_rx_dst))) inet_csk(sk)->icsk_af_ops->sk_rx_dst_set(sk, skb); /* * Header prediction. diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index ebfeeeadd47ce6552ec12e23baafa00476fb0c6f..078f3a5d65b3e1b7193150183d4cce51d8857e05 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -1670,15 +1670,18 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) struct sock *rsk; if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ - struct dst_entry *dst = sk->sk_rx_dst; + struct dst_entry *dst; + + dst = rcu_dereference_protected(sk->sk_rx_dst, + lockdep_sock_is_held(sk)); sock_rps_save_rxhash(sk, skb); sk_mark_napi_id(sk, skb); if (dst) { if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif || !dst->ops->check(dst, 0)) { + RCU_INIT_POINTER(sk->sk_rx_dst, NULL); dst_release(dst); - sk->sk_rx_dst = NULL; } } tcp_rcv_established(sk, skb); @@ -1753,7 +1756,7 @@ int tcp_v4_early_demux(struct sk_buff *skb) skb->sk = sk; skb->destructor = sock_edemux; if (sk_fullsock(sk)) { - struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst); + struct dst_entry *dst = rcu_dereference(sk->sk_rx_dst); if (dst) dst = dst_check(dst, 0); @@ -2160,7 +2163,7 @@ void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb) struct dst_entry *dst = skb_dst(skb); if (dst && dst_hold_safe(dst)) { - sk->sk_rx_dst = dst; + rcu_assign_pointer(sk->sk_rx_dst, dst); inet_sk(sk)->rx_dst_ifindex = skb->skb_iif; } } diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index da8bcdd90715298caa0e315e80abb51041640ac2..68b066d2fe0aa0d76554a4c708e82ce0cc7d66b7 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -3785,6 +3785,7 @@ static void tcp_connect_queue_skb(struct sock *sk, struct sk_buff *skb) */ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn) { + struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); struct tcp_fastopen_request *fo = tp->fastopen_req; int space, err = 0; @@ -3799,8 +3800,10 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn) * private TCP options. The cost is reduced data space in SYN :( */ tp->rx_opt.mss_clamp = tcp_mss_clamp(tp, tp->rx_opt.mss_clamp); + /* Sync mss_cache after updating the mss_clamp */ + tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); - space = __tcp_mtu_to_mss(sk, inet_csk(sk)->icsk_pmtu_cookie) - + space = __tcp_mtu_to_mss(sk, icsk->icsk_pmtu_cookie) - MAX_TCP_OPTION_SPACE; space = min_t(size_t, space, fo->size); diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 8cca8bda061231f333fa7e43d4bf7a15656e62b7..763adffb84d68f52234cfcb2086b18783e4a25b5 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -598,6 +598,12 @@ void udp_encap_enable(void) } EXPORT_SYMBOL(udp_encap_enable); +void udp_encap_disable(void) +{ + static_branch_dec(&udp_encap_needed_key); +} +EXPORT_SYMBOL(udp_encap_disable); + /* Handler for tunnels with arbitrary destination ports: no socket lookup, go * through error handlers in encapsulations looking for a match. */ @@ -2186,7 +2192,7 @@ bool udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst) struct dst_entry *old; if (dst_hold_safe(dst)) { - old = xchg(&sk->sk_rx_dst, dst); + old = xchg((__force struct dst_entry **)&sk->sk_rx_dst, dst); dst_release(old); return old != dst; } @@ -2376,7 +2382,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, struct dst_entry *dst = skb_dst(skb); int ret; - if (unlikely(sk->sk_rx_dst != dst)) + if (unlikely(rcu_dereference(sk->sk_rx_dst) != dst)) udp_sk_rx_dst_set(sk, dst); ret = udp_unicast_rcv_skb(sk, skb, uh); @@ -2535,7 +2541,7 @@ int udp_v4_early_demux(struct sk_buff *skb) skb->sk = sk; skb->destructor = sock_efree; - dst = READ_ONCE(sk->sk_rx_dst); + dst = rcu_dereference(sk->sk_rx_dst); if (dst) dst = dst_check(dst, 0); diff --git a/net/ipv4/udp_tunnel_nic.c b/net/ipv4/udp_tunnel_nic.c index b91003538d87a03855df9bd35b751017d37fe031..bc3a043a5d5c7635b7acabe76bb19f7ad59d8db9 100644 --- a/net/ipv4/udp_tunnel_nic.c +++ b/net/ipv4/udp_tunnel_nic.c @@ -846,7 +846,7 @@ udp_tunnel_nic_unregister(struct net_device *dev, struct udp_tunnel_nic *utn) list_for_each_entry(node, &info->shared->devices, list) if (node->dev == dev) break; - if (node->dev != dev) + if (list_entry_is_head(node, &info->shared->devices, list)) return; list_del(&node->list); diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index d9785ea78593e02709d47e4a52a6c5741f700f8f..1064edea88419227cef01ccf254679bb21b0fd8a 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -3716,6 +3716,7 @@ static int addrconf_ifdown(struct net_device *dev, bool unregister) struct inet6_dev *idev; struct inet6_ifaddr *ifa, *tmp; bool keep_addr = false; + bool was_ready; int state, i; ASSERT_RTNL(); @@ -3781,7 +3782,10 @@ static int addrconf_ifdown(struct net_device *dev, bool unregister) addrconf_del_rs_timer(idev); - /* Step 2: clear flags for stateless addrconf */ + /* Step 2: clear flags for stateless addrconf, repeated down + * detection + */ + was_ready = idev->if_flags & IF_READY; if (!unregister) idev->if_flags &= ~(IF_RS_SENT|IF_RA_RCVD|IF_READY); @@ -3855,7 +3859,7 @@ static int addrconf_ifdown(struct net_device *dev, bool unregister) if (unregister) { ipv6_ac_destroy_dev(idev); ipv6_mc_destroy_dev(idev); - } else { + } else if (was_ready) { ipv6_mc_down(idev); } @@ -4979,6 +4983,7 @@ static int inet6_fill_ifaddr(struct sk_buff *skb, struct inet6_ifaddr *ifa, nla_put_s32(skb, IFA_TARGET_NETNSID, args->netnsid)) goto error; + spin_lock_bh(&ifa->lock); if (!((ifa->flags&IFA_F_PERMANENT) && (ifa->prefered_lft == INFINITY_LIFE_TIME))) { preferred = ifa->prefered_lft; @@ -5000,6 +5005,7 @@ static int inet6_fill_ifaddr(struct sk_buff *skb, struct inet6_ifaddr *ifa, preferred = INFINITY_LIFE_TIME; valid = INFINITY_LIFE_TIME; } + spin_unlock_bh(&ifa->lock); if (!ipv6_addr_any(&ifa->peer_addr)) { if (nla_put_in6_addr(skb, IFA_LOCAL, &ifa->addr) < 0 || diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c index e106dc3ec039c7020ddc2fe36b088023e1cf7c18..20c7bef6829e1fc89188545c9f4d7f33c7937a0b 100644 --- a/net/ipv6/esp6.c +++ b/net/ipv6/esp6.c @@ -713,7 +713,7 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb) struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb); u32 padto; - padto = min(x->tfcpad, __xfrm_state_mtu(x, dst->child_mtu_cached)); + padto = min(x->tfcpad, xfrm_state_mtu(x, dst->child_mtu_cached)); if (skb->len < padto) esp.tfclen = padto - skb->len; } @@ -813,8 +813,7 @@ int esp6_input_done2(struct sk_buff *skb, int err) struct tcphdr *th; offset = ipv6_skip_exthdr(skb, offset, &nexthdr, &frag_off); - - if (offset < 0) { + if (offset == -1) { err = -EINVAL; goto out; } diff --git a/net/ipv6/esp6_offload.c b/net/ipv6/esp6_offload.c index 4af56affaafd436fbd35ade87ffd2b7c8e6d4d91..1c3f02d05d2bfdaad4e915a1dca29c9d71b310bf 100644 --- a/net/ipv6/esp6_offload.c +++ b/net/ipv6/esp6_offload.c @@ -198,6 +198,9 @@ static struct sk_buff *xfrm6_beet_gso_segment(struct xfrm_state *x, ipv6_skip_exthdr(skb, 0, &proto, &frag); } + if (proto == IPPROTO_IPIP) + skb_shinfo(skb)->gso_type |= SKB_GSO_IPXIP6; + __skb_pull(skb, skb_transport_offset(skb)); ops = rcu_dereference(inet6_offloads[proto]); if (likely(ops && ops->callbacks.gso_segment)) diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c index 67c9114835c84864a3353c6f1c16853ea62a21c5..c9e7ecc7afd3e24efa93f51cd8630f506537b103 100644 --- a/net/ipv6/inet6_hashtables.c +++ b/net/ipv6/inet6_hashtables.c @@ -308,7 +308,7 @@ static int __inet6_check_established(struct inet_timewait_death_row *death_row, return -EADDRNOTAVAIL; } -static u32 inet6_sk_port_offset(const struct sock *sk) +static u64 inet6_sk_port_offset(const struct sock *sk) { const struct inet_sock *inet = inet_sk(sk); @@ -320,7 +320,7 @@ static u32 inet6_sk_port_offset(const struct sock *sk) int inet6_hash_connect(struct inet_timewait_death_row *death_row, struct sock *sk) { - u32 port_offset = 0; + u64 port_offset = 0; if (!inet_sk(sk)->inet_num) port_offset = inet6_sk_port_offset(sk); diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c index aa673a6a7e4328d03e89527ed3daaccd69c39084..ceb85c67ce3952b7142eeec29bff46a7eaf5217b 100644 --- a/net/ipv6/ip6_flowlabel.c +++ b/net/ipv6/ip6_flowlabel.c @@ -450,8 +450,10 @@ fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq, err = -EINVAL; goto done; } - if (fl_shared_exclusive(fl) || fl->opt) + if (fl_shared_exclusive(fl) || fl->opt) { + WRITE_ONCE(sock_net(sk)->ipv6.flowlabel_has_excl, 1); static_branch_deferred_inc(&ipv6_flowlabel_exclusive); + } return fl; done: diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c index a80f90bf3ae7dc1aec904fd93b3d8e8c87a926e4..15c8eef1ef443854e207bfaa238cb7aa321fedb9 100644 --- a/net/ipv6/ip6_offload.c +++ b/net/ipv6/ip6_offload.c @@ -113,6 +113,8 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, if (likely(ops && ops->callbacks.gso_segment)) { skb_reset_transport_header(skb); segs = ops->callbacks.gso_segment(skb, features); + if (!segs) + skb->network_header = skb_mac_header(skb) + nhoff - skb->head; } if (IS_ERR_OR_NULL(segs)) diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 54cabf1c2ae1500ff9c03a7c2c230e07dcf409c7..2aa39ce7093dfd32870f9986c762ef9a7a680791 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -1432,8 +1432,6 @@ static int ip6_setup_cork(struct sock *sk, struct inet_cork_full *cork, if (np->frag_size) mtu = np->frag_size; } - if (mtu < IPV6_MIN_MTU) - return -EINVAL; cork->base.fragsize = mtu; cork->base.gso_size = ipc6->gso_size; cork->base.tx_flags = 0; @@ -1495,8 +1493,6 @@ static int __ip6_append_data(struct sock *sk, fragheaderlen = sizeof(struct ipv6hdr) + rt->rt6i_nfheader_len + (opt ? opt->opt_nflen : 0); - maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen - - sizeof(struct frag_hdr); headersize = sizeof(struct ipv6hdr) + (opt ? opt->opt_flen + opt->opt_nflen : 0) + @@ -1504,6 +1500,13 @@ static int __ip6_append_data(struct sock *sk, sizeof(struct frag_hdr) : 0) + rt->rt6i_nfheader_len; + if (mtu <= fragheaderlen || + ((mtu - fragheaderlen) & ~7) + fragheaderlen <= sizeof(struct frag_hdr)) + goto emsgsize; + + maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen - + sizeof(struct frag_hdr); + /* as per RFC 7112 section 5, the entire IPv6 Header Chain must fit * the first fragment */ diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index df33145b876c69eb0c16d2be0ec6441c71a30e8e..b87b04526e651bac97a18bb26087928b7d1f9619 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -107,7 +107,7 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb) if (dst && dst_hold_safe(dst)) { const struct rt6_info *rt = (const struct rt6_info *)dst; - sk->sk_rx_dst = dst; + rcu_assign_pointer(sk->sk_rx_dst, dst); inet_sk(sk)->rx_dst_ifindex = skb->skb_iif; tcp_inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt); } @@ -1482,15 +1482,18 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) opt_skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC)); if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ - struct dst_entry *dst = sk->sk_rx_dst; + struct dst_entry *dst; + + dst = rcu_dereference_protected(sk->sk_rx_dst, + lockdep_sock_is_held(sk)); sock_rps_save_rxhash(sk, skb); sk_mark_napi_id(sk, skb); if (dst) { if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif || dst->ops->check(dst, np->rx_dst_cookie) == NULL) { + RCU_INIT_POINTER(sk->sk_rx_dst, NULL); dst_release(dst); - sk->sk_rx_dst = NULL; } } @@ -1842,7 +1845,7 @@ INDIRECT_CALLABLE_SCOPE void tcp_v6_early_demux(struct sk_buff *skb) skb->sk = sk; skb->destructor = sock_edemux; if (sk_fullsock(sk)) { - struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst); + struct dst_entry *dst = rcu_dereference(sk->sk_rx_dst); if (dst) dst = dst_check(dst, tcp_inet6_sk(sk)->rx_dst_cookie); diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 503706397839ce460951e523bd6a69e28c6dd153..e378ebee4a4dad5053282ec66d23e74f35373dbb 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -940,7 +940,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, struct dst_entry *dst = skb_dst(skb); int ret; - if (unlikely(sk->sk_rx_dst != dst)) + if (unlikely(rcu_dereference(sk->sk_rx_dst) != dst)) udp6_sk_rx_dst_set(sk, dst); if (!uh->check && !udp_sk(sk)->no_check6_rx) { @@ -1054,7 +1054,7 @@ INDIRECT_CALLABLE_SCOPE void udp_v6_early_demux(struct sk_buff *skb) skb->sk = sk; skb->destructor = sock_efree; - dst = READ_ONCE(sk->sk_rx_dst); + dst = rcu_dereference(sk->sk_rx_dst); if (dst) dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie); @@ -1609,8 +1609,10 @@ void udpv6_destroy_sock(struct sock *sk) if (encap_destroy) encap_destroy(sk); } - if (up->encap_enabled) + if (up->encap_enabled) { static_branch_dec(&udpv6_encap_needed_key); + udp_encap_disable(); + } } inet6_destroy_sock(sk); diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c index 6abb45a671994128d618e82f192ec5d943e29185..ee349c2438782315b129ace5511b320af6e7b828 100644 --- a/net/ipv6/xfrm6_output.c +++ b/net/ipv6/xfrm6_output.c @@ -52,6 +52,19 @@ static int __xfrm6_output_finish(struct net *net, struct sock *sk, struct sk_buf return xfrm_output(sk, skb); } +static int xfrm6_noneed_fragment(struct sk_buff *skb) +{ + struct frag_hdr *fh; + u8 prevhdr = ipv6_hdr(skb)->nexthdr; + + if (prevhdr != NEXTHDR_FRAGMENT) + return 0; + fh = (struct frag_hdr *)(skb->data + sizeof(struct ipv6hdr)); + if (fh->nexthdr == NEXTHDR_ESP || fh->nexthdr == NEXTHDR_AUTH) + return 1; + return 0; +} + static int __xfrm6_output(struct net *net, struct sock *sk, struct sk_buff *skb) { struct dst_entry *dst = skb_dst(skb); @@ -80,6 +93,9 @@ static int __xfrm6_output(struct net *net, struct sock *sk, struct sk_buff *skb) xfrm6_local_rxpmtu(skb, mtu); kfree_skb(skb); return -EMSGSIZE; + } else if (toobig && xfrm6_noneed_fragment(skb)) { + skb->ignore_df = 1; + goto skip_frag; } else if (!skb->ignore_df && toobig && skb->sk) { xfrm_local_error(skb, mtu); kfree_skb(skb); diff --git a/net/key/af_key.c b/net/key/af_key.c index b95684b8903e457f8fd43370fda4c326587adfd4..bd9b5c573b5a4dd328d44af315a9efd5ab51bcc2 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c @@ -2627,7 +2627,7 @@ static int pfkey_migrate(struct sock *sk, struct sk_buff *skb, } return xfrm_migrate(&sel, dir, XFRM_POLICY_TYPE_MAIN, m, i, - kma ? &k : NULL, net, NULL); + kma ? &k : NULL, net, NULL, 0); out: return err; diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c index 190f300d8923c8392df58aba7e8e66eccc1e9038..4b4ab1961068fcbcc170d5eb960eb03a344a9058 100644 --- a/net/mac80211/agg-tx.c +++ b/net/mac80211/agg-tx.c @@ -9,7 +9,7 @@ * Copyright 2007, Michael Wu * Copyright 2007-2010, Intel Corporation * Copyright(c) 2015-2017 Intel Deutschland GmbH - * Copyright (C) 2018 - 2021 Intel Corporation + * Copyright (C) 2018 - 2022 Intel Corporation */ #include @@ -626,6 +626,14 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid, return -EINVAL; } + if (test_sta_flag(sta, WLAN_STA_MFP) && + !test_sta_flag(sta, WLAN_STA_AUTHORIZED)) { + ht_dbg(sdata, + "MFP STA not authorized - deny BA session request %pM tid %d\n", + sta->sta.addr, tid); + return -EINVAL; + } + /* * 802.11n-2009 11.5.1.1: If the initiating STA is an HT STA, is a * member of an IBSS, and has no other existing Block Ack agreement diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index d46ed4cbe771757b48b15090d33a17a8d4b83096..8010967a68741447924990329b686f7e853a6ec7 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -2076,14 +2076,12 @@ static int copy_mesh_setup(struct ieee80211_if_mesh *ifmsh, const struct mesh_setup *setup) { u8 *new_ie; - const u8 *old_ie; struct ieee80211_sub_if_data *sdata = container_of(ifmsh, struct ieee80211_sub_if_data, u.mesh); int i; /* allocate information elements */ new_ie = NULL; - old_ie = ifmsh->ie; if (setup->ie_len) { new_ie = kmemdup(setup->ie, setup->ie_len, @@ -2093,7 +2091,6 @@ static int copy_mesh_setup(struct ieee80211_if_mesh *ifmsh, } ifmsh->ie_len = setup->ie_len; ifmsh->ie = new_ie; - kfree(old_ie); /* now copy the rest of the setup parameters */ ifmsh->mesh_id_len = setup->mesh_id_len; diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 7f2be08b72a56a80be12d822a75c66420af38591..fe8f586886b414e22d4cebce25d13d20c69b3d6d 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -374,7 +374,7 @@ struct ieee80211_mgd_auth_data { u8 key[WLAN_KEY_LEN_WEP104]; u8 key_len, key_idx; - bool done; + bool done, waiting; bool peer_confirmed; bool timeout_started; diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 778bf262418b546dad1f797a3a8ee77f539db319..0dba353d3f8fe16c786f7cc383778e6a0b8a60f7 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -37,6 +37,7 @@ #define IEEE80211_AUTH_TIMEOUT_SAE (HZ * 2) #define IEEE80211_AUTH_MAX_TRIES 3 #define IEEE80211_AUTH_WAIT_ASSOC (HZ * 5) +#define IEEE80211_AUTH_WAIT_SAE_RETRY (HZ * 2) #define IEEE80211_ASSOC_TIMEOUT (HZ / 5) #define IEEE80211_ASSOC_TIMEOUT_LONG (HZ / 2) #define IEEE80211_ASSOC_TIMEOUT_SHORT (HZ / 10) @@ -2999,8 +3000,15 @@ static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata, (status_code == WLAN_STATUS_ANTI_CLOG_REQUIRED || (auth_transaction == 1 && (status_code == WLAN_STATUS_SAE_HASH_TO_ELEMENT || - status_code == WLAN_STATUS_SAE_PK)))) + status_code == WLAN_STATUS_SAE_PK)))) { + /* waiting for userspace now */ + ifmgd->auth_data->waiting = true; + ifmgd->auth_data->timeout = + jiffies + IEEE80211_AUTH_WAIT_SAE_RETRY; + ifmgd->auth_data->timeout_started = true; + run_again(sdata, ifmgd->auth_data->timeout); return; + } sdata_info(sdata, "%pM denied authentication (status %d)\n", mgmt->sa, status_code); @@ -4526,10 +4534,10 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata) if (ifmgd->auth_data && ifmgd->auth_data->timeout_started && time_after(jiffies, ifmgd->auth_data->timeout)) { - if (ifmgd->auth_data->done) { + if (ifmgd->auth_data->done || ifmgd->auth_data->waiting) { /* - * ok ... we waited for assoc but userspace didn't, - * so let's just kill the auth data + * ok ... we waited for assoc or continuation but + * userspace didn't do it, so kill the auth data */ ieee80211_destroy_auth_data(sdata, false); } else if (ieee80211_auth(sdata)) { diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index d27c444a19ed1d2335a249e1966af4d13f30a8ca..1e7614abd947de8fab8b3bfc1c11cabd8bbd10e3 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -2910,13 +2910,13 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx) ether_addr_equal(sdata->vif.addr, hdr->addr3)) return RX_CONTINUE; - ac = ieee80211_select_queue_80211(sdata, skb, hdr); + ac = ieee802_1d_to_ac[skb->priority]; q = sdata->vif.hw_queue[ac]; if (ieee80211_queue_stopped(&local->hw, q)) { IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_congestion); return RX_DROP_MONITOR; } - skb_set_queue_mapping(skb, q); + skb_set_queue_mapping(skb, ac); if (!--mesh_hdr->ttl) { if (!is_multicast_ether_addr(hdr->addr1)) diff --git a/net/netfilter/core.c b/net/netfilter/core.c index 63d032191e6269acff9e48b51b6c0e09a411ceb1..60332fdb6dd4429fb1e22a645476b3954d9f04e3 100644 --- a/net/netfilter/core.c +++ b/net/netfilter/core.c @@ -406,14 +406,15 @@ static int __nf_register_net_hook(struct net *net, int pf, p = nf_entry_dereference(*pp); new_hooks = nf_hook_entries_grow(p, reg); - if (!IS_ERR(new_hooks)) + if (!IS_ERR(new_hooks)) { + hooks_validate(new_hooks); rcu_assign_pointer(*pp, new_hooks); + } mutex_unlock(&nf_hook_mutex); if (IS_ERR(new_hooks)) return PTR_ERR(new_hooks); - hooks_validate(new_hooks); #ifdef CONFIG_NETFILTER_INGRESS if (nf_ingress_hook(reg, pf)) net_inc_ingress_queue(); diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c index c8fb2187ad4b2df200273acdbdf2163c831de09b..3f785bdfa942d7149bf65573ca17c3fe3ce2164a 100644 --- a/net/netfilter/nf_conntrack_proto_tcp.c +++ b/net/netfilter/nf_conntrack_proto_tcp.c @@ -354,8 +354,8 @@ static void tcp_options(const struct sk_buff *skb, length, buff); BUG_ON(ptr == NULL); - state->td_scale = - state->flags = 0; + state->td_scale = 0; + state->flags &= IP_CT_TCP_FLAG_BE_LIBERAL; while (length > 0) { int opcode=*ptr++; @@ -840,6 +840,16 @@ static bool nf_conntrack_tcp_established(const struct nf_conn *ct) test_bit(IPS_ASSURED_BIT, &ct->status); } +static void nf_ct_tcp_state_reset(struct ip_ct_tcp_state *state) +{ + state->td_end = 0; + state->td_maxend = 0; + state->td_maxwin = 0; + state->td_maxack = 0; + state->td_scale = 0; + state->flags &= IP_CT_TCP_FLAG_BE_LIBERAL; +} + /* Returns verdict for packet, or -1 for invalid. */ int nf_conntrack_tcp_packet(struct nf_conn *ct, struct sk_buff *skb, @@ -946,8 +956,7 @@ int nf_conntrack_tcp_packet(struct nf_conn *ct, ct->proto.tcp.last_flags &= ~IP_CT_EXP_CHALLENGE_ACK; ct->proto.tcp.seen[ct->proto.tcp.last_dir].flags = ct->proto.tcp.last_flags; - memset(&ct->proto.tcp.seen[dir], 0, - sizeof(struct ip_ct_tcp_state)); + nf_ct_tcp_state_reset(&ct->proto.tcp.seen[dir]); break; } ct->proto.tcp.last_index = index; diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c index bbd1209694b89bb1164c39ce581f54b24f61e8d4..bb8607ff94bc7a46751724d530efc58d325cdde0 100644 --- a/net/netfilter/nf_queue.c +++ b/net/netfilter/nf_queue.c @@ -46,6 +46,15 @@ void nf_unregister_queue_handler(struct net *net) } EXPORT_SYMBOL(nf_unregister_queue_handler); +static void nf_queue_sock_put(struct sock *sk) +{ +#ifdef CONFIG_INET + sock_gen_put(sk); +#else + sock_put(sk); +#endif +} + static void nf_queue_entry_release_refs(struct nf_queue_entry *entry) { struct nf_hook_state *state = &entry->state; @@ -56,7 +65,7 @@ static void nf_queue_entry_release_refs(struct nf_queue_entry *entry) if (state->out) dev_put(state->out); if (state->sk) - sock_put(state->sk); + nf_queue_sock_put(state->sk); #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) if (entry->physin) @@ -91,16 +100,17 @@ static void __nf_queue_entry_init_physdevs(struct nf_queue_entry *entry) } /* Bump dev refs so they don't vanish while packet is out */ -void nf_queue_entry_get_refs(struct nf_queue_entry *entry) +bool nf_queue_entry_get_refs(struct nf_queue_entry *entry) { struct nf_hook_state *state = &entry->state; + if (state->sk && !refcount_inc_not_zero(&state->sk->sk_refcnt)) + return false; + if (state->in) dev_hold(state->in); if (state->out) dev_hold(state->out); - if (state->sk) - sock_hold(state->sk); #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) if (entry->physin) @@ -108,6 +118,7 @@ void nf_queue_entry_get_refs(struct nf_queue_entry *entry) if (entry->physout) dev_hold(entry->physout); #endif + return true; } EXPORT_SYMBOL_GPL(nf_queue_entry_get_refs); @@ -178,6 +189,18 @@ static int __nf_queue(struct sk_buff *skb, const struct nf_hook_state *state, break; } + if (skb_sk_is_prefetched(skb)) { + struct sock *sk = skb->sk; + + if (!sk_is_refcounted(sk)) { + if (!refcount_inc_not_zero(&sk->sk_refcnt)) + return -ENOTCONN; + + /* drop refcount on skb_orphan */ + skb->destructor = sock_edemux; + } + } + entry = kmalloc(sizeof(*entry) + route_key_size, GFP_ATOMIC); if (!entry) return -ENOMEM; @@ -196,7 +219,10 @@ static int __nf_queue(struct sk_buff *skb, const struct nf_hook_state *state, __nf_queue_entry_init_physdevs(entry); - nf_queue_entry_get_refs(entry); + if (!nf_queue_entry_get_refs(entry)) { + kfree(entry); + return -ENOTCONN; + } switch (entry->state.pf) { case AF_INET: diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index b781ba97c474e3216e85bf95c4505358297ec1d3..ea162e36e0e4b9358cd2741e6d995efd083ad40e 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -2679,27 +2679,31 @@ static struct nft_expr *nft_expr_init(const struct nft_ctx *ctx, err = nf_tables_expr_parse(ctx, nla, &info); if (err < 0) - goto err1; + goto err_expr_parse; + + err = -EOPNOTSUPP; + if (!(info.ops->type->flags & NFT_EXPR_STATEFUL)) + goto err_expr_stateful; err = -ENOMEM; expr = kzalloc(info.ops->size, GFP_KERNEL); if (expr == NULL) - goto err2; + goto err_expr_stateful; err = nf_tables_newexpr(ctx, &info, expr); if (err < 0) - goto err3; + goto err_expr_new; return expr; -err3: +err_expr_new: kfree(expr); -err2: +err_expr_stateful: owner = info.ops->type->owner; if (info.ops->type->release_ops) info.ops->type->release_ops(info.ops); module_put(owner); -err1: +err_expr_parse: return ERR_PTR(err); } @@ -4047,6 +4051,9 @@ static int nft_set_desc_concat_parse(const struct nlattr *attr, u32 len; int err; + if (desc->field_count >= ARRAY_SIZE(desc->field_len)) + return -E2BIG; + err = nla_parse_nested_deprecated(tb, NFTA_SET_FIELD_MAX, attr, nft_concat_policy, NULL); if (err < 0) @@ -4056,9 +4063,8 @@ static int nft_set_desc_concat_parse(const struct nlattr *attr, return -EINVAL; len = ntohl(nla_get_be32(tb[NFTA_SET_FIELD_LEN])); - - if (len * BITS_PER_BYTE / 32 > NFT_REG32_COUNT) - return -E2BIG; + if (!len || len > U8_MAX) + return -EINVAL; desc->field_len[desc->field_count++] = len; @@ -4069,7 +4075,8 @@ static int nft_set_desc_concat(struct nft_set_desc *desc, const struct nlattr *nla) { struct nlattr *attr; - int rem, err; + u32 num_regs = 0; + int rem, err, i; nla_for_each_nested(attr, nla, rem) { if (nla_type(attr) != NFTA_LIST_ELEM) @@ -4080,6 +4087,12 @@ static int nft_set_desc_concat(struct nft_set_desc *desc, return err; } + for (i = 0; i < desc->field_count; i++) + num_regs += DIV_ROUND_UP(desc->field_len[i], sizeof(u32)); + + if (num_regs > NFT_REG32_COUNT) + return -E2BIG; + return 0; } @@ -5055,9 +5068,6 @@ struct nft_expr *nft_set_elem_expr_alloc(const struct nft_ctx *ctx, return expr; err = -EOPNOTSUPP; - if (!(expr->ops->type->flags & NFT_EXPR_STATEFUL)) - goto err_set_elem_expr; - if (expr->ops->type->flags & NFT_EXPR_GC) { if (set->flags & NFT_SET_TIMEOUT) goto err_set_elem_expr; @@ -5924,12 +5934,15 @@ static int nf_tables_updobj(const struct nft_ctx *ctx, { struct nft_object *newobj; struct nft_trans *trans; - int err; + int err = -ENOMEM; + + if (!try_module_get(type->owner)) + return -ENOENT; trans = nft_trans_alloc(ctx, NFT_MSG_NEWOBJ, sizeof(struct nft_trans_obj)); if (!trans) - return -ENOMEM; + goto err_trans; newobj = nft_obj_init(ctx, type, attr); if (IS_ERR(newobj)) { @@ -5946,6 +5959,8 @@ static int nf_tables_updobj(const struct nft_ctx *ctx, err_free_trans: kfree(trans); +err_trans: + module_put(type->owner); return err; } @@ -7555,7 +7570,7 @@ static void nft_obj_commit_update(struct nft_trans *trans) if (obj->ops->update) obj->ops->update(obj, newobj); - kfree(newobj); + nft_obj_destroy(&trans->ctx, newobj); } static void nft_commit_release(struct nft_trans *trans) @@ -8202,7 +8217,7 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action) break; case NFT_MSG_NEWOBJ: if (nft_trans_obj_update(trans)) { - kfree(nft_trans_obj_newobj(trans)); + nft_obj_destroy(&trans->ctx, nft_trans_obj_newobj(trans)); nft_trans_destroy(trans); } else { trans->ctx.table->use--; diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c index b0358f30947ea80b6aa715bd515d652538ee785e..1640da5c50776189b8cfc354eb5e6d09ccc50363 100644 --- a/net/netfilter/nfnetlink_queue.c +++ b/net/netfilter/nfnetlink_queue.c @@ -712,9 +712,15 @@ static struct nf_queue_entry * nf_queue_entry_dup(struct nf_queue_entry *e) { struct nf_queue_entry *entry = kmemdup(e, e->size, GFP_ATOMIC); - if (entry) - nf_queue_entry_get_refs(entry); - return entry; + + if (!entry) + return NULL; + + if (nf_queue_entry_get_refs(entry)) + return entry; + + kfree(entry); + return NULL; } #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index e55af5c078ac096985c842484a8a81e42bffadda..f37916156ca523556c0266ad50acc2e9036ba0a1 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -149,6 +149,8 @@ static const struct rhashtable_params netlink_rhashtable_params; static inline u32 netlink_group_mask(u32 group) { + if (group > 32) + return 0; return group ? 1 << (group - 1) : 0; } diff --git a/net/nfc/core.c b/net/nfc/core.c index 6800470dd6df7a0eb216fa3dc078294946366e30..3b2983813ff13a4d911a388390e7b080c5b8f421 100644 --- a/net/nfc/core.c +++ b/net/nfc/core.c @@ -38,7 +38,7 @@ int nfc_fw_download(struct nfc_dev *dev, const char *firmware_name) device_lock(&dev->dev); - if (!device_is_registered(&dev->dev)) { + if (dev->shutting_down) { rc = -ENODEV; goto error; } @@ -94,7 +94,7 @@ int nfc_dev_up(struct nfc_dev *dev) device_lock(&dev->dev); - if (!device_is_registered(&dev->dev)) { + if (dev->shutting_down) { rc = -ENODEV; goto error; } @@ -142,7 +142,7 @@ int nfc_dev_down(struct nfc_dev *dev) device_lock(&dev->dev); - if (!device_is_registered(&dev->dev)) { + if (dev->shutting_down) { rc = -ENODEV; goto error; } @@ -206,7 +206,7 @@ int nfc_start_poll(struct nfc_dev *dev, u32 im_protocols, u32 tm_protocols) device_lock(&dev->dev); - if (!device_is_registered(&dev->dev)) { + if (dev->shutting_down) { rc = -ENODEV; goto error; } @@ -245,7 +245,7 @@ int nfc_stop_poll(struct nfc_dev *dev) device_lock(&dev->dev); - if (!device_is_registered(&dev->dev)) { + if (dev->shutting_down) { rc = -ENODEV; goto error; } @@ -290,7 +290,7 @@ int nfc_dep_link_up(struct nfc_dev *dev, int target_index, u8 comm_mode) device_lock(&dev->dev); - if (!device_is_registered(&dev->dev)) { + if (dev->shutting_down) { rc = -ENODEV; goto error; } @@ -334,7 +334,7 @@ int nfc_dep_link_down(struct nfc_dev *dev) device_lock(&dev->dev); - if (!device_is_registered(&dev->dev)) { + if (dev->shutting_down) { rc = -ENODEV; goto error; } @@ -400,7 +400,7 @@ int nfc_activate_target(struct nfc_dev *dev, u32 target_idx, u32 protocol) device_lock(&dev->dev); - if (!device_is_registered(&dev->dev)) { + if (dev->shutting_down) { rc = -ENODEV; goto error; } @@ -446,7 +446,7 @@ int nfc_deactivate_target(struct nfc_dev *dev, u32 target_idx, u8 mode) device_lock(&dev->dev); - if (!device_is_registered(&dev->dev)) { + if (dev->shutting_down) { rc = -ENODEV; goto error; } @@ -493,7 +493,7 @@ int nfc_data_exchange(struct nfc_dev *dev, u32 target_idx, struct sk_buff *skb, device_lock(&dev->dev); - if (!device_is_registered(&dev->dev)) { + if (dev->shutting_down) { rc = -ENODEV; kfree_skb(skb); goto error; @@ -550,7 +550,7 @@ int nfc_enable_se(struct nfc_dev *dev, u32 se_idx) device_lock(&dev->dev); - if (!device_is_registered(&dev->dev)) { + if (dev->shutting_down) { rc = -ENODEV; goto error; } @@ -599,7 +599,7 @@ int nfc_disable_se(struct nfc_dev *dev, u32 se_idx) device_lock(&dev->dev); - if (!device_is_registered(&dev->dev)) { + if (dev->shutting_down) { rc = -ENODEV; goto error; } @@ -1126,6 +1126,7 @@ int nfc_register_device(struct nfc_dev *dev) dev->rfkill = NULL; } } + dev->shutting_down = false; device_unlock(&dev->dev); rc = nfc_genl_device_added(dev); @@ -1158,12 +1159,10 @@ void nfc_unregister_device(struct nfc_dev *dev) rfkill_unregister(dev->rfkill); rfkill_destroy(dev->rfkill); } + dev->shutting_down = true; device_unlock(&dev->dev); if (dev->ops->check_presence) { - device_lock(&dev->dev); - dev->shutting_down = true; - device_unlock(&dev->dev); del_timer_sync(&dev->check_pres_timer); cancel_work_sync(&dev->check_pres_work); } diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c index 72d1e29e28b290b4cb8904acda08abd05ae055f6..f5c816a18b9c1dd8dbd7abe7e21ac1c143e47fb7 100644 --- a/net/nfc/netlink.c +++ b/net/nfc/netlink.c @@ -1244,7 +1244,7 @@ int nfc_genl_fw_download_done(struct nfc_dev *dev, const char *firmware_name, struct sk_buff *msg; void *hdr; - msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); if (!msg) return -ENOMEM; @@ -1260,7 +1260,7 @@ int nfc_genl_fw_download_done(struct nfc_dev *dev, const char *firmware_name, genlmsg_end(msg, hdr); - genlmsg_multicast(&nfc_genl_family, msg, 0, 0, GFP_KERNEL); + genlmsg_multicast(&nfc_genl_family, msg, 0, 0, GFP_ATOMIC); return 0; diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c index fc487f9812fc554003b8d6c8c3b36a7ea3d760df..525c1540f10e6484344135d1707ea7100c4fc91d 100644 --- a/net/openvswitch/actions.c +++ b/net/openvswitch/actions.c @@ -422,12 +422,43 @@ static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto, memcpy(addr, new_addr, sizeof(__be32[4])); } -static void set_ipv6_fl(struct ipv6hdr *nh, u32 fl, u32 mask) +static void set_ipv6_dsfield(struct sk_buff *skb, struct ipv6hdr *nh, u8 ipv6_tclass, u8 mask) { + u8 old_ipv6_tclass = ipv6_get_dsfield(nh); + + ipv6_tclass = OVS_MASKED(old_ipv6_tclass, ipv6_tclass, mask); + + if (skb->ip_summed == CHECKSUM_COMPLETE) + csum_replace(&skb->csum, (__force __wsum)(old_ipv6_tclass << 12), + (__force __wsum)(ipv6_tclass << 12)); + + ipv6_change_dsfield(nh, ~mask, ipv6_tclass); +} + +static void set_ipv6_fl(struct sk_buff *skb, struct ipv6hdr *nh, u32 fl, u32 mask) +{ + u32 ofl; + + ofl = nh->flow_lbl[0] << 16 | nh->flow_lbl[1] << 8 | nh->flow_lbl[2]; + fl = OVS_MASKED(ofl, fl, mask); + /* Bits 21-24 are always unmasked, so this retains their values. */ - OVS_SET_MASKED(nh->flow_lbl[0], (u8)(fl >> 16), (u8)(mask >> 16)); - OVS_SET_MASKED(nh->flow_lbl[1], (u8)(fl >> 8), (u8)(mask >> 8)); - OVS_SET_MASKED(nh->flow_lbl[2], (u8)fl, (u8)mask); + nh->flow_lbl[0] = (u8)(fl >> 16); + nh->flow_lbl[1] = (u8)(fl >> 8); + nh->flow_lbl[2] = (u8)fl; + + if (skb->ip_summed == CHECKSUM_COMPLETE) + csum_replace(&skb->csum, (__force __wsum)htonl(ofl), (__force __wsum)htonl(fl)); +} + +static void set_ipv6_ttl(struct sk_buff *skb, struct ipv6hdr *nh, u8 new_ttl, u8 mask) +{ + new_ttl = OVS_MASKED(nh->hop_limit, new_ttl, mask); + + if (skb->ip_summed == CHECKSUM_COMPLETE) + csum_replace(&skb->csum, (__force __wsum)(nh->hop_limit << 8), + (__force __wsum)(new_ttl << 8)); + nh->hop_limit = new_ttl; } static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl, @@ -545,18 +576,17 @@ static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key, } } if (mask->ipv6_tclass) { - ipv6_change_dsfield(nh, ~mask->ipv6_tclass, key->ipv6_tclass); + set_ipv6_dsfield(skb, nh, key->ipv6_tclass, mask->ipv6_tclass); flow_key->ip.tos = ipv6_get_dsfield(nh); } if (mask->ipv6_label) { - set_ipv6_fl(nh, ntohl(key->ipv6_label), + set_ipv6_fl(skb, nh, ntohl(key->ipv6_label), ntohl(mask->ipv6_label)); flow_key->ipv6.label = *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL); } if (mask->ipv6_hlimit) { - OVS_SET_MASKED(nh->hop_limit, key->ipv6_hlimit, - mask->ipv6_hlimit); + set_ipv6_ttl(skb, nh, key->ipv6_hlimit, mask->ipv6_hlimit); flow_key->ip.ttl = nh->hop_limit; } return 0; diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c index a11b558813c10783977a52cbb00d901182072a21..7ff98d39ec942ccf00f1bfdb01e9aad54b7a35f0 100644 --- a/net/openvswitch/conntrack.c +++ b/net/openvswitch/conntrack.c @@ -730,6 +730,57 @@ static bool skb_nfct_cached(struct net *net, } #if IS_ENABLED(CONFIG_NF_NAT) +static void ovs_nat_update_key(struct sw_flow_key *key, + const struct sk_buff *skb, + enum nf_nat_manip_type maniptype) +{ + if (maniptype == NF_NAT_MANIP_SRC) { + __be16 src; + + key->ct_state |= OVS_CS_F_SRC_NAT; + if (key->eth.type == htons(ETH_P_IP)) + key->ipv4.addr.src = ip_hdr(skb)->saddr; + else if (key->eth.type == htons(ETH_P_IPV6)) + memcpy(&key->ipv6.addr.src, &ipv6_hdr(skb)->saddr, + sizeof(key->ipv6.addr.src)); + else + return; + + if (key->ip.proto == IPPROTO_UDP) + src = udp_hdr(skb)->source; + else if (key->ip.proto == IPPROTO_TCP) + src = tcp_hdr(skb)->source; + else if (key->ip.proto == IPPROTO_SCTP) + src = sctp_hdr(skb)->source; + else + return; + + key->tp.src = src; + } else { + __be16 dst; + + key->ct_state |= OVS_CS_F_DST_NAT; + if (key->eth.type == htons(ETH_P_IP)) + key->ipv4.addr.dst = ip_hdr(skb)->daddr; + else if (key->eth.type == htons(ETH_P_IPV6)) + memcpy(&key->ipv6.addr.dst, &ipv6_hdr(skb)->daddr, + sizeof(key->ipv6.addr.dst)); + else + return; + + if (key->ip.proto == IPPROTO_UDP) + dst = udp_hdr(skb)->dest; + else if (key->ip.proto == IPPROTO_TCP) + dst = tcp_hdr(skb)->dest; + else if (key->ip.proto == IPPROTO_SCTP) + dst = sctp_hdr(skb)->dest; + else + return; + + key->tp.dst = dst; + } +} + /* Modelled after nf_nat_ipv[46]_fn(). * range is only used for new, uninitialized NAT state. * Returns either NF_ACCEPT or NF_DROP. @@ -737,7 +788,7 @@ static bool skb_nfct_cached(struct net *net, static int ovs_ct_nat_execute(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, const struct nf_nat_range2 *range, - enum nf_nat_manip_type maniptype) + enum nf_nat_manip_type maniptype, struct sw_flow_key *key) { int hooknum, nh_off, err = NF_ACCEPT; @@ -810,58 +861,11 @@ static int ovs_ct_nat_execute(struct sk_buff *skb, struct nf_conn *ct, skb_push(skb, nh_off); skb_postpush_rcsum(skb, skb->data, nh_off); - return err; -} - -static void ovs_nat_update_key(struct sw_flow_key *key, - const struct sk_buff *skb, - enum nf_nat_manip_type maniptype) -{ - if (maniptype == NF_NAT_MANIP_SRC) { - __be16 src; - - key->ct_state |= OVS_CS_F_SRC_NAT; - if (key->eth.type == htons(ETH_P_IP)) - key->ipv4.addr.src = ip_hdr(skb)->saddr; - else if (key->eth.type == htons(ETH_P_IPV6)) - memcpy(&key->ipv6.addr.src, &ipv6_hdr(skb)->saddr, - sizeof(key->ipv6.addr.src)); - else - return; - - if (key->ip.proto == IPPROTO_UDP) - src = udp_hdr(skb)->source; - else if (key->ip.proto == IPPROTO_TCP) - src = tcp_hdr(skb)->source; - else if (key->ip.proto == IPPROTO_SCTP) - src = sctp_hdr(skb)->source; - else - return; - - key->tp.src = src; - } else { - __be16 dst; - - key->ct_state |= OVS_CS_F_DST_NAT; - if (key->eth.type == htons(ETH_P_IP)) - key->ipv4.addr.dst = ip_hdr(skb)->daddr; - else if (key->eth.type == htons(ETH_P_IPV6)) - memcpy(&key->ipv6.addr.dst, &ipv6_hdr(skb)->daddr, - sizeof(key->ipv6.addr.dst)); - else - return; - - if (key->ip.proto == IPPROTO_UDP) - dst = udp_hdr(skb)->dest; - else if (key->ip.proto == IPPROTO_TCP) - dst = tcp_hdr(skb)->dest; - else if (key->ip.proto == IPPROTO_SCTP) - dst = sctp_hdr(skb)->dest; - else - return; + /* Update the flow key if NAT successful. */ + if (err == NF_ACCEPT) + ovs_nat_update_key(key, skb, maniptype); - key->tp.dst = dst; - } + return err; } /* Returns NF_DROP if the packet should be dropped, NF_ACCEPT otherwise. */ @@ -903,7 +907,7 @@ static int ovs_ct_nat(struct net *net, struct sw_flow_key *key, } else { return NF_ACCEPT; /* Connection is not NATed. */ } - err = ovs_ct_nat_execute(skb, ct, ctinfo, &info->range, maniptype); + err = ovs_ct_nat_execute(skb, ct, ctinfo, &info->range, maniptype, key); if (err == NF_ACCEPT && ct->status & IPS_DST_NAT) { if (ct->status & IPS_SRC_NAT) { @@ -913,17 +917,13 @@ static int ovs_ct_nat(struct net *net, struct sw_flow_key *key, maniptype = NF_NAT_MANIP_SRC; err = ovs_ct_nat_execute(skb, ct, ctinfo, &info->range, - maniptype); + maniptype, key); } else if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) { err = ovs_ct_nat_execute(skb, ct, ctinfo, NULL, - NF_NAT_MANIP_SRC); + NF_NAT_MANIP_SRC, key); } } - /* Mark NAT done if successful and update the flow key. */ - if (err == NF_ACCEPT) - ovs_nat_update_key(key, skb, maniptype); - return err; } #else /* !CONFIG_NF_NAT */ diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c index 4c5c2331e7648fb84c26bc791a8de914e0336e05..8c4bdfa627ca908ebd6ed3aa2621a203adb2c1f8 100644 --- a/net/openvswitch/flow_netlink.c +++ b/net/openvswitch/flow_netlink.c @@ -2201,8 +2201,8 @@ static int __ovs_nla_put_key(const struct sw_flow_key *swkey, icmpv6_key->icmpv6_type = ntohs(output->tp.src); icmpv6_key->icmpv6_code = ntohs(output->tp.dst); - if (icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_SOLICITATION || - icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_ADVERTISEMENT) { + if (swkey->tp.src == htons(NDISC_NEIGHBOUR_SOLICITATION) || + swkey->tp.src == htons(NDISC_NEIGHBOUR_ADVERTISEMENT)) { struct ovs_key_nd *nd_key; nla = nla_reserve(skb, OVS_KEY_ATTR_ND, sizeof(*nd_key)); diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index a31334b92be7e975330420f34298731efd8e9872..d0c95d7dd292d89f0de3b36816ff927e0ffdacfb 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -2278,8 +2278,11 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, copy_skb = skb_get(skb); skb_head = skb->data; } - if (copy_skb) + if (copy_skb) { + memset(&PACKET_SKB_CB(copy_skb)->sa.ll, 0, + sizeof(PACKET_SKB_CB(copy_skb)->sa.ll)); skb_set_owner_r(copy_skb, sk); + } } snaplen = po->rx_ring.frame_size - macoff; if ((int)snaplen < 0) { @@ -3434,6 +3437,8 @@ static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, sock_recv_ts_and_drops(msg, sk, skb); if (msg->msg_name) { + const size_t max_len = min(sizeof(skb->cb), + sizeof(struct sockaddr_storage)); int copy_len; /* If the address length field is there to be filled @@ -3456,6 +3461,10 @@ static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, msg->msg_namelen = sizeof(struct sockaddr_ll); } } + if (WARN_ON_ONCE(copy_len > max_len)) { + copy_len = max_len; + msg->msg_namelen = copy_len; + } memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa, copy_len); } diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h index dce48162f6c274116f25ff6fa04b54dd7835f372..3bad9f5f9102395eac52d393f497602bab6fac69 100644 --- a/net/rxrpc/ar-internal.h +++ b/net/rxrpc/ar-internal.h @@ -760,14 +760,12 @@ void rxrpc_propose_ACK(struct rxrpc_call *, u8, u32, bool, bool, enum rxrpc_propose_ack_trace); void rxrpc_process_call(struct work_struct *); -static inline void rxrpc_reduce_call_timer(struct rxrpc_call *call, - unsigned long expire_at, - unsigned long now, - enum rxrpc_timer_trace why) -{ - trace_rxrpc_timer(call, why, now); - timer_reduce(&call->timer, expire_at); -} +void rxrpc_reduce_call_timer(struct rxrpc_call *call, + unsigned long expire_at, + unsigned long now, + enum rxrpc_timer_trace why); + +void rxrpc_delete_call_timer(struct rxrpc_call *call); /* * call_object.c @@ -791,6 +789,7 @@ void rxrpc_release_calls_on_socket(struct rxrpc_sock *); bool __rxrpc_queue_call(struct rxrpc_call *); bool rxrpc_queue_call(struct rxrpc_call *); void rxrpc_see_call(struct rxrpc_call *); +bool rxrpc_try_get_call(struct rxrpc_call *call, enum rxrpc_call_trace op); void rxrpc_get_call(struct rxrpc_call *, enum rxrpc_call_trace); void rxrpc_put_call(struct rxrpc_call *, enum rxrpc_call_trace); void rxrpc_cleanup_call(struct rxrpc_call *); diff --git a/net/rxrpc/call_event.c b/net/rxrpc/call_event.c index df864e6922679140d5b7c8c3aadd65a92906e9e3..22e05de5d1ca96fccb667a0a287e0042fc5be6e9 100644 --- a/net/rxrpc/call_event.c +++ b/net/rxrpc/call_event.c @@ -310,7 +310,7 @@ void rxrpc_process_call(struct work_struct *work) } if (call->state == RXRPC_CALL_COMPLETE) { - del_timer_sync(&call->timer); + rxrpc_delete_call_timer(call); goto out_put; } diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c index 4eb91d958a48d3f89c17bcf1d43eadc57a748d70..043508fd8d8a5d3a57d8a805093769b0be1f3b06 100644 --- a/net/rxrpc/call_object.c +++ b/net/rxrpc/call_object.c @@ -53,10 +53,30 @@ static void rxrpc_call_timer_expired(struct timer_list *t) if (call->state < RXRPC_CALL_COMPLETE) { trace_rxrpc_timer(call, rxrpc_timer_expired, jiffies); - rxrpc_queue_call(call); + __rxrpc_queue_call(call); + } else { + rxrpc_put_call(call, rxrpc_call_put); + } +} + +void rxrpc_reduce_call_timer(struct rxrpc_call *call, + unsigned long expire_at, + unsigned long now, + enum rxrpc_timer_trace why) +{ + if (rxrpc_try_get_call(call, rxrpc_call_got_timer)) { + trace_rxrpc_timer(call, why, now); + if (timer_reduce(&call->timer, expire_at)) + rxrpc_put_call(call, rxrpc_call_put_notimer); } } +void rxrpc_delete_call_timer(struct rxrpc_call *call) +{ + if (del_timer_sync(&call->timer)) + rxrpc_put_call(call, rxrpc_call_put_timer); +} + static struct lock_class_key rxrpc_call_user_mutex_lock_class_key; /* @@ -463,6 +483,17 @@ void rxrpc_see_call(struct rxrpc_call *call) } } +bool rxrpc_try_get_call(struct rxrpc_call *call, enum rxrpc_call_trace op) +{ + const void *here = __builtin_return_address(0); + int n = atomic_fetch_add_unless(&call->usage, 1, 0); + + if (n == 0) + return false; + trace_rxrpc_call(call->debug_id, op, n, here, NULL); + return true; +} + /* * Note the addition of a ref on a call. */ @@ -510,8 +541,7 @@ void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call) spin_unlock_bh(&call->lock); rxrpc_put_call_slot(call); - - del_timer_sync(&call->timer); + rxrpc_delete_call_timer(call); /* Make sure we don't get any more notifications */ write_lock_bh(&rx->recvmsg_lock); @@ -618,6 +648,8 @@ static void rxrpc_destroy_call(struct work_struct *work) struct rxrpc_call *call = container_of(work, struct rxrpc_call, processor); struct rxrpc_net *rxnet = call->rxnet; + rxrpc_delete_call_timer(call); + rxrpc_put_connection(call->conn); rxrpc_put_peer(call->peer); kfree(call->rxtx_buffer); @@ -652,8 +684,6 @@ void rxrpc_cleanup_call(struct rxrpc_call *call) memset(&call->sock_node, 0xcd, sizeof(call->sock_node)); - del_timer_sync(&call->timer); - ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE); ASSERT(test_bit(RXRPC_CALL_RELEASED, &call->flags)); diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c index 812c3c70a53a04029ccf11b78d0c59d877fb2180..825b3e9b55f7e23900e55af586b45e826e1f5fbb 100644 --- a/net/sched/act_ct.c +++ b/net/sched/act_ct.c @@ -514,11 +514,6 @@ static bool tcf_ct_flow_table_lookup(struct tcf_ct_params *p, struct nf_conn *ct; u8 dir; - /* Previously seen or loopback */ - ct = nf_ct_get(skb, &ctinfo); - if ((ct && !nf_ct_is_template(ct)) || ctinfo == IP_CT_UNTRACKED) - return false; - switch (family) { case NFPROTO_IPV4: if (!tcf_ct_flow_table_fill_tuple_ipv4(skb, &tuple, &tcph)) diff --git a/net/sctp/diag.c b/net/sctp/diag.c index babadd6720a2be1f4d7c5c0840c677f588a01023..68ff82ff49a3dc0373b1992618f606cf694a93d0 100644 --- a/net/sctp/diag.c +++ b/net/sctp/diag.c @@ -61,10 +61,6 @@ static void inet_diag_msg_sctpasoc_fill(struct inet_diag_msg *r, r->idiag_timer = SCTP_EVENT_TIMEOUT_T3_RTX; r->idiag_retrans = asoc->rtx_data_chunks; r->idiag_expires = jiffies_to_msecs(t3_rtx->expires - jiffies); - } else { - r->idiag_timer = 0; - r->idiag_retrans = 0; - r->idiag_expires = 0; } } @@ -144,13 +140,14 @@ static int inet_sctp_diag_fill(struct sock *sk, struct sctp_association *asoc, r = nlmsg_data(nlh); BUG_ON(!sk_fullsock(sk)); + r->idiag_timer = 0; + r->idiag_retrans = 0; + r->idiag_expires = 0; if (asoc) { inet_diag_msg_sctpasoc_fill(r, sk, asoc); } else { inet_diag_msg_common_fill(r, sk); r->idiag_state = sk->sk_state; - r->idiag_timer = 0; - r->idiag_retrans = 0; } if (inet_diag_msg_attrs_fill(sk, skb, r, ext, user_ns, net_admin)) diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index 99b902e410c49d16f560dd6bf61c957db9ad8ef7..4f16d406ad8ea9e1716ee26ca826c13c2cf876df 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c @@ -180,7 +180,7 @@ static int smc_release(struct socket *sock) { struct sock *sk = sock->sk; struct smc_sock *smc; - int rc = 0; + int old_state, rc = 0; if (!sk) goto out; @@ -188,8 +188,10 @@ static int smc_release(struct socket *sock) sock_hold(sk); /* sock_put below */ smc = smc_sk(sk); + old_state = sk->sk_state; + /* cleanup for a dangling non-blocking connect */ - if (smc->connect_nonblock && sk->sk_state == SMC_INIT) + if (smc->connect_nonblock && old_state == SMC_INIT) tcp_abort(smc->clcsock->sk, ECONNABORTED); if (cancel_work_sync(&smc->connect_work)) @@ -203,6 +205,10 @@ static int smc_release(struct socket *sock) else lock_sock(sk); + if (old_state == SMC_INIT && sk->sk_state == SMC_ACTIVE && + !smc->use_fallback) + smc_close_active_abort(smc); + rc = __smc_release(smc); /* detach socket */ diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c index 4eb9ef9c28003e718ef5572da6af23137fb2a50d..d69aac6c1fcea555bdaecb434655f1093da8265e 100644 --- a/net/smc/smc_core.c +++ b/net/smc/smc_core.c @@ -662,8 +662,8 @@ void smc_conn_free(struct smc_connection *conn) cancel_work_sync(&conn->abort_work); } if (!list_empty(&lgr->list)) { - smc_lgr_unregister_conn(conn); smc_buf_unuse(conn, lgr); /* allow buffer reuse */ + smc_lgr_unregister_conn(conn); } if (!lgr->conns_num) @@ -1316,7 +1316,8 @@ int smc_conn_create(struct smc_sock *smc, struct smc_init_info *ini) (ini->smcd_version == SMC_V2 || lgr->vlan_id == ini->vlan_id) && (role == SMC_CLNT || ini->is_smcd || - lgr->conns_num < SMC_RMBS_PER_LGR_MAX)) { + (lgr->conns_num < SMC_RMBS_PER_LGR_MAX && + !bitmap_full(lgr->rtokens_used_mask, SMC_RMBS_PER_LGR_MAX)))) { /* link group found */ ini->first_contact_local = 0; conn->lgr = lgr; diff --git a/net/smc/smc_pnet.c b/net/smc/smc_pnet.c index f3c18b991d35c42442b332fbe280e976a8fd6517..9007c7e3bae4e89470ea7fd72061a5f777910f60 100644 --- a/net/smc/smc_pnet.c +++ b/net/smc/smc_pnet.c @@ -112,7 +112,7 @@ static int smc_pnet_remove_by_pnetid(struct net *net, char *pnet_name) pnettable = &sn->pnettable; /* remove table entry */ - write_lock(&pnettable->lock); + mutex_lock(&pnettable->lock); list_for_each_entry_safe(pnetelem, tmp_pe, &pnettable->pnetlist, list) { if (!pnet_name || @@ -130,7 +130,7 @@ static int smc_pnet_remove_by_pnetid(struct net *net, char *pnet_name) rc = 0; } } - write_unlock(&pnettable->lock); + mutex_unlock(&pnettable->lock); /* if this is not the initial namespace, stop here */ if (net != &init_net) @@ -191,7 +191,7 @@ static int smc_pnet_add_by_ndev(struct net_device *ndev) sn = net_generic(net, smc_net_id); pnettable = &sn->pnettable; - write_lock(&pnettable->lock); + mutex_lock(&pnettable->lock); list_for_each_entry_safe(pnetelem, tmp_pe, &pnettable->pnetlist, list) { if (pnetelem->type == SMC_PNET_ETH && !pnetelem->ndev && !strncmp(pnetelem->eth_name, ndev->name, IFNAMSIZ)) { @@ -205,7 +205,7 @@ static int smc_pnet_add_by_ndev(struct net_device *ndev) break; } } - write_unlock(&pnettable->lock); + mutex_unlock(&pnettable->lock); return rc; } @@ -223,7 +223,7 @@ static int smc_pnet_remove_by_ndev(struct net_device *ndev) sn = net_generic(net, smc_net_id); pnettable = &sn->pnettable; - write_lock(&pnettable->lock); + mutex_lock(&pnettable->lock); list_for_each_entry_safe(pnetelem, tmp_pe, &pnettable->pnetlist, list) { if (pnetelem->type == SMC_PNET_ETH && pnetelem->ndev == ndev) { dev_put(pnetelem->ndev); @@ -236,7 +236,7 @@ static int smc_pnet_remove_by_ndev(struct net_device *ndev) break; } } - write_unlock(&pnettable->lock); + mutex_unlock(&pnettable->lock); return rc; } @@ -371,7 +371,7 @@ static int smc_pnet_add_eth(struct smc_pnettable *pnettable, struct net *net, rc = -EEXIST; new_netdev = true; - write_lock(&pnettable->lock); + mutex_lock(&pnettable->lock); list_for_each_entry(tmp_pe, &pnettable->pnetlist, list) { if (tmp_pe->type == SMC_PNET_ETH && !strncmp(tmp_pe->eth_name, eth_name, IFNAMSIZ)) { @@ -381,9 +381,9 @@ static int smc_pnet_add_eth(struct smc_pnettable *pnettable, struct net *net, } if (new_netdev) { list_add_tail(&new_pe->list, &pnettable->pnetlist); - write_unlock(&pnettable->lock); + mutex_unlock(&pnettable->lock); } else { - write_unlock(&pnettable->lock); + mutex_unlock(&pnettable->lock); kfree(new_pe); goto out_put; } @@ -445,7 +445,7 @@ static int smc_pnet_add_ib(struct smc_pnettable *pnettable, char *ib_name, new_pe->ib_port = ib_port; new_ibdev = true; - write_lock(&pnettable->lock); + mutex_lock(&pnettable->lock); list_for_each_entry(tmp_pe, &pnettable->pnetlist, list) { if (tmp_pe->type == SMC_PNET_IB && !strncmp(tmp_pe->ib_name, ib_name, IB_DEVICE_NAME_MAX)) { @@ -455,9 +455,9 @@ static int smc_pnet_add_ib(struct smc_pnettable *pnettable, char *ib_name, } if (new_ibdev) { list_add_tail(&new_pe->list, &pnettable->pnetlist); - write_unlock(&pnettable->lock); + mutex_unlock(&pnettable->lock); } else { - write_unlock(&pnettable->lock); + mutex_unlock(&pnettable->lock); kfree(new_pe); } return (new_ibdev) ? 0 : -EEXIST; @@ -602,7 +602,7 @@ static int _smc_pnet_dump(struct net *net, struct sk_buff *skb, u32 portid, pnettable = &sn->pnettable; /* dump pnettable entries */ - read_lock(&pnettable->lock); + mutex_lock(&pnettable->lock); list_for_each_entry(pnetelem, &pnettable->pnetlist, list) { if (pnetid && !smc_pnet_match(pnetelem->pnet_name, pnetid)) continue; @@ -617,7 +617,7 @@ static int _smc_pnet_dump(struct net *net, struct sk_buff *skb, u32 portid, break; } } - read_unlock(&pnettable->lock); + mutex_unlock(&pnettable->lock); return idx; } @@ -859,7 +859,7 @@ int smc_pnet_net_init(struct net *net) struct smc_pnetids_ndev *pnetids_ndev = &sn->pnetids_ndev; INIT_LIST_HEAD(&pnettable->pnetlist); - rwlock_init(&pnettable->lock); + mutex_init(&pnettable->lock); INIT_LIST_HEAD(&pnetids_ndev->list); rwlock_init(&pnetids_ndev->lock); @@ -939,7 +939,7 @@ static int smc_pnet_find_ndev_pnetid_by_table(struct net_device *ndev, sn = net_generic(net, smc_net_id); pnettable = &sn->pnettable; - read_lock(&pnettable->lock); + mutex_lock(&pnettable->lock); list_for_each_entry(pnetelem, &pnettable->pnetlist, list) { if (pnetelem->type == SMC_PNET_ETH && ndev == pnetelem->ndev) { /* get pnetid of netdev device */ @@ -948,7 +948,7 @@ static int smc_pnet_find_ndev_pnetid_by_table(struct net_device *ndev, break; } } - read_unlock(&pnettable->lock); + mutex_unlock(&pnettable->lock); return rc; } @@ -1129,7 +1129,7 @@ int smc_pnetid_by_table_ib(struct smc_ib_device *smcibdev, u8 ib_port) sn = net_generic(&init_net, smc_net_id); pnettable = &sn->pnettable; - read_lock(&pnettable->lock); + mutex_lock(&pnettable->lock); list_for_each_entry(tmp_pe, &pnettable->pnetlist, list) { if (tmp_pe->type == SMC_PNET_IB && !strncmp(tmp_pe->ib_name, ib_name, IB_DEVICE_NAME_MAX) && @@ -1139,7 +1139,7 @@ int smc_pnetid_by_table_ib(struct smc_ib_device *smcibdev, u8 ib_port) break; } } - read_unlock(&pnettable->lock); + mutex_unlock(&pnettable->lock); return rc; } @@ -1158,7 +1158,7 @@ int smc_pnetid_by_table_smcd(struct smcd_dev *smcddev) sn = net_generic(&init_net, smc_net_id); pnettable = &sn->pnettable; - read_lock(&pnettable->lock); + mutex_lock(&pnettable->lock); list_for_each_entry(tmp_pe, &pnettable->pnetlist, list) { if (tmp_pe->type == SMC_PNET_IB && !strncmp(tmp_pe->ib_name, ib_name, IB_DEVICE_NAME_MAX)) { @@ -1167,7 +1167,7 @@ int smc_pnetid_by_table_smcd(struct smcd_dev *smcddev) break; } } - read_unlock(&pnettable->lock); + mutex_unlock(&pnettable->lock); return rc; } diff --git a/net/smc/smc_pnet.h b/net/smc/smc_pnet.h index 14039272f7e4263a3d86f29d0c4f048731af387f..80a88eea494918663f0263b9577c0ca49dcf1542 100644 --- a/net/smc/smc_pnet.h +++ b/net/smc/smc_pnet.h @@ -29,7 +29,7 @@ struct smc_link_group; * @pnetlist: List of PNETIDs */ struct smc_pnettable { - rwlock_t lock; + struct mutex lock; struct list_head pnetlist; }; diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 5c8c5b38faf11303444b8047f5a6e65048fb8dca..3ea27bb3f512dbdd4a6483af675334bf4cb024ab 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -2054,7 +2054,14 @@ static void xprt_destroy(struct rpc_xprt *xprt) */ wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_UNINTERRUPTIBLE); + /* + * xprt_schedule_autodisconnect() can run after XPRT_LOCKED + * is cleared. We use ->transport_lock to ensure the mod_timer() + * can only run *before* del_time_sync(), never after. + */ + spin_lock(&xprt->transport_lock); del_timer_sync(&xprt->timer); + spin_unlock(&xprt->transport_lock); /* * Destroy sockets etc from the system workqueue so they can diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c index 12e535b43d887bac125cd925db0c3cbf376f8cce..6911f1cab2063fb5dd3842301c04b91d66c4ba1c 100644 --- a/net/tipc/bearer.c +++ b/net/tipc/bearer.c @@ -342,16 +342,18 @@ static int tipc_enable_bearer(struct net *net, const char *name, goto rejected; } - test_and_set_bit_lock(0, &b->up); - rcu_assign_pointer(tn->bearer_list[bearer_id], b); - if (skb) - tipc_bearer_xmit_skb(net, bearer_id, skb, &b->bcast_addr); - + /* Create monitoring data before accepting activate messages */ if (tipc_mon_create(net, bearer_id)) { bearer_disable(net, b); + kfree_skb(skb); return -ENOMEM; } + test_and_set_bit_lock(0, &b->up); + rcu_assign_pointer(tn->bearer_list[bearer_id], b); + if (skb) + tipc_bearer_xmit_skb(net, bearer_id, skb, &b->bcast_addr); + pr_info("Enabled bearer <%s>, priority %u\n", name, prio); return res; diff --git a/net/tipc/crypto.c b/net/tipc/crypto.c index d8a2f424786fcc4a2162b888e0f43597d4e05a6f..6f91b9a306dc39e8ce91977c5485948b2ff7cde7 100644 --- a/net/tipc/crypto.c +++ b/net/tipc/crypto.c @@ -2280,7 +2280,7 @@ static bool tipc_crypto_key_rcv(struct tipc_crypto *rx, struct tipc_msg *hdr) struct tipc_crypto *tx = tipc_net(rx->net)->crypto_tx; struct tipc_aead_key *skey = NULL; u16 key_gen = msg_key_gen(hdr); - u16 size = msg_data_sz(hdr); + u32 size = msg_data_sz(hdr); u8 *data = msg_data(hdr); unsigned int keylen; diff --git a/net/tipc/link.c b/net/tipc/link.c index fb835a3822f49d5d509b5b73f552d61328884197..7a353ff6284486de8d752bc3a476272b3e83668b 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c @@ -2245,6 +2245,11 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb, break; case STATE_MSG: + /* Validate Gap ACK blocks, drop if invalid */ + glen = tipc_get_gap_ack_blks(&ga, l, hdr, true); + if (glen > dlen) + break; + l->rcv_nxt_state = msg_seqno(hdr) + 1; /* Update own tolerance if peer indicates a non-zero value */ @@ -2270,10 +2275,6 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb, break; } - /* Receive Gap ACK blocks from peer if any */ - glen = tipc_get_gap_ack_blks(&ga, l, hdr, true); - if(glen > dlen) - break; tipc_mon_rcv(l->net, data + glen, dlen - glen, l->addr, &l->mon_state, l->bearer_id); diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c index f6a6acef42235e170dfd1806e657022ce75bb093..54c5328f492d2e7bc3870a624cc0236e159168eb 100644 --- a/net/tipc/name_table.c +++ b/net/tipc/name_table.c @@ -931,7 +931,7 @@ static int __tipc_nl_add_nametable_publ(struct tipc_nl_msg *msg, list_for_each_entry(p, &sr->all_publ, all_publ) if (p->key == *last_key) break; - if (p->key != *last_key) + if (list_entry_is_head(p, &sr->all_publ, all_publ)) return -EPIPE; } else { p = list_first_entry(&sr->all_publ, diff --git a/net/tipc/socket.c b/net/tipc/socket.c index ce957ee5383c4c8907645a3c5012935fd6c9640d..42283dc6c5b7c724cb40120050e6e9f90fd53730 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -2846,7 +2846,8 @@ static void tipc_sk_retry_connect(struct sock *sk, struct sk_buff_head *list) /* Try again later if dest link is congested */ if (tsk->cong_link_cnt) { - sk_reset_timer(sk, &sk->sk_timer, msecs_to_jiffies(100)); + sk_reset_timer(sk, &sk->sk_timer, + jiffies + msecs_to_jiffies(100)); return; } /* Prepare SYN for retransmit */ @@ -3743,7 +3744,7 @@ static int __tipc_nl_list_sk_publ(struct sk_buff *skb, if (p->key == *last_publ) break; } - if (p->key != *last_publ) { + if (list_entry_is_head(p, &tsk->publications, binding_sock)) { /* We never set seq or call nl_dump_check_consistent() * this means that setting prev_seq here will cause the * consistence check to fail in the netlink callback diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c index 5a8f1c65ce80ae2446329b77bf7cf6487339cd40..8cbd95630c442de1df2b67b995f7e9eb0d8681f0 100644 --- a/net/tls/tls_device.c +++ b/net/tls/tls_device.c @@ -328,7 +328,7 @@ static int tls_device_record_close(struct sock *sk, /* fill prepend */ tls_fill_prepend(ctx, skb_frag_address(&record->frags[0]), record->len - prot->overhead_size, - record_type, prot->version); + record_type); return ret; } @@ -483,11 +483,13 @@ static int tls_push_data(struct sock *sk, copy = min_t(size_t, size, (pfrag->size - pfrag->offset)); copy = min_t(size_t, copy, (max_open_record_len - record->len)); - rc = tls_device_copy_data(page_address(pfrag->page) + - pfrag->offset, copy, msg_iter); - if (rc) - goto handle_error; - tls_append_frag(record, pfrag, copy); + if (copy) { + rc = tls_device_copy_data(page_address(pfrag->page) + + pfrag->offset, copy, msg_iter); + if (rc) + goto handle_error; + tls_append_frag(record, pfrag, copy); + } size -= copy; if (!size) { @@ -1010,7 +1012,7 @@ static void tls_device_attach(struct tls_context *ctx, struct sock *sk, int tls_set_device_offload(struct sock *sk, struct tls_context *ctx) { - u16 nonce_size, tag_size, iv_size, rec_seq_size; + u16 nonce_size, tag_size, iv_size, rec_seq_size, salt_size; struct tls_context *tls_ctx = tls_get_ctx(sk); struct tls_prot_info *prot = &tls_ctx->prot_info; struct tls_record_info *start_marker_record; @@ -1051,6 +1053,7 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx) iv_size = TLS_CIPHER_AES_GCM_128_IV_SIZE; iv = ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->iv; rec_seq_size = TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE; + salt_size = TLS_CIPHER_AES_GCM_128_SALT_SIZE; rec_seq = ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->rec_seq; break; @@ -1071,6 +1074,7 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx) prot->tag_size = tag_size; prot->overhead_size = prot->prepend_size + prot->tag_size; prot->iv_size = iv_size; + prot->salt_size = salt_size; ctx->tx.iv = kmalloc(iv_size + TLS_CIPHER_AES_GCM_128_SALT_SIZE, GFP_KERNEL); if (!ctx->tx.iv) { diff --git a/net/tls/tls_device_fallback.c b/net/tls/tls_device_fallback.c index 0d40016bf69e034bfa0b7cd3d5a52641b7d7eef0..24226254c8207e6906b46335fcc5687d77485ce3 100644 --- a/net/tls/tls_device_fallback.c +++ b/net/tls/tls_device_fallback.c @@ -49,7 +49,8 @@ static int tls_enc_record(struct aead_request *aead_req, struct crypto_aead *aead, char *aad, char *iv, __be64 rcd_sn, struct scatter_walk *in, - struct scatter_walk *out, int *in_len) + struct scatter_walk *out, int *in_len, + struct tls_prot_info *prot) { unsigned char buf[TLS_HEADER_SIZE + TLS_CIPHER_AES_GCM_128_IV_SIZE]; struct scatterlist sg_in[3]; @@ -73,8 +74,7 @@ static int tls_enc_record(struct aead_request *aead_req, len -= TLS_CIPHER_AES_GCM_128_IV_SIZE; tls_make_aad(aad, len - TLS_CIPHER_AES_GCM_128_TAG_SIZE, - (char *)&rcd_sn, sizeof(rcd_sn), buf[0], - TLS_1_2_VERSION); + (char *)&rcd_sn, buf[0], prot); memcpy(iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, buf + TLS_HEADER_SIZE, TLS_CIPHER_AES_GCM_128_IV_SIZE); @@ -140,7 +140,7 @@ static struct aead_request *tls_alloc_aead_request(struct crypto_aead *aead, static int tls_enc_records(struct aead_request *aead_req, struct crypto_aead *aead, struct scatterlist *sg_in, struct scatterlist *sg_out, char *aad, char *iv, - u64 rcd_sn, int len) + u64 rcd_sn, int len, struct tls_prot_info *prot) { struct scatter_walk out, in; int rc; @@ -150,7 +150,7 @@ static int tls_enc_records(struct aead_request *aead_req, do { rc = tls_enc_record(aead_req, aead, aad, iv, - cpu_to_be64(rcd_sn), &in, &out, &len); + cpu_to_be64(rcd_sn), &in, &out, &len, prot); rcd_sn++; } while (rc == 0 && len); @@ -348,7 +348,8 @@ static struct sk_buff *tls_enc_skb(struct tls_context *tls_ctx, payload_len, sync_size, dummy_buf); if (tls_enc_records(aead_req, ctx->aead_send, sg_in, sg_out, aad, iv, - rcd_sn, sync_size + payload_len) < 0) + rcd_sn, sync_size + payload_len, + &tls_ctx->prot_info) < 0) goto free_nskb; complete_skb(nskb, skb, tcp_payload_offset); diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c index 58d22d6b86ae6f04d4903032fda3b837c3a2157c..29c7503e4b281bb652adf634cbbed6565e278a55 100644 --- a/net/tls/tls_main.c +++ b/net/tls/tls_main.c @@ -423,6 +423,46 @@ static int do_tls_getsockopt_conf(struct sock *sk, char __user *optval, rc = -EFAULT; break; } + case TLS_CIPHER_SM4_GCM: { + struct tls12_crypto_info_sm4_gcm *sm4_gcm_info = + container_of(crypto_info, + struct tls12_crypto_info_sm4_gcm, info); + + if (len != sizeof(*sm4_gcm_info)) { + rc = -EINVAL; + goto out; + } + lock_sock(sk); + memcpy(sm4_gcm_info->iv, + cctx->iv + TLS_CIPHER_SM4_GCM_SALT_SIZE, + TLS_CIPHER_SM4_GCM_IV_SIZE); + memcpy(sm4_gcm_info->rec_seq, cctx->rec_seq, + TLS_CIPHER_SM4_GCM_REC_SEQ_SIZE); + release_sock(sk); + if (copy_to_user(optval, sm4_gcm_info, sizeof(*sm4_gcm_info))) + rc = -EFAULT; + break; + } + case TLS_CIPHER_SM4_CCM: { + struct tls12_crypto_info_sm4_ccm *sm4_ccm_info = + container_of(crypto_info, + struct tls12_crypto_info_sm4_ccm, info); + + if (len != sizeof(*sm4_ccm_info)) { + rc = -EINVAL; + goto out; + } + lock_sock(sk); + memcpy(sm4_ccm_info->iv, + cctx->iv + TLS_CIPHER_SM4_CCM_SALT_SIZE, + TLS_CIPHER_SM4_CCM_IV_SIZE); + memcpy(sm4_ccm_info->rec_seq, cctx->rec_seq, + TLS_CIPHER_SM4_CCM_REC_SEQ_SIZE); + release_sock(sk); + if (copy_to_user(optval, sm4_ccm_info, sizeof(*sm4_ccm_info))) + rc = -EFAULT; + break; + } default: rc = -EINVAL; } @@ -523,6 +563,12 @@ static int do_tls_setsockopt_conf(struct sock *sk, sockptr_t optval, case TLS_CIPHER_AES_CCM_128: optsize = sizeof(struct tls12_crypto_info_aes_ccm_128); break; + case TLS_CIPHER_SM4_GCM: + optsize = sizeof(struct tls12_crypto_info_sm4_gcm); + break; + case TLS_CIPHER_SM4_CCM: + optsize = sizeof(struct tls12_crypto_info_sm4_ccm); + break; default: rc = -EINVAL; goto err_crypto_info; diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index 8cd011ea9fbb851769bf1db4196a99ffe9e11585..3ee8aa7ec04dc16d0d9bd83921d62db665b749ab 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@ -507,15 +507,21 @@ static int tls_do_encryption(struct sock *sk, int rc, iv_offset = 0; /* For CCM based ciphers, first byte of IV is a constant */ - if (prot->cipher_type == TLS_CIPHER_AES_CCM_128) { + switch (prot->cipher_type) { + case TLS_CIPHER_AES_CCM_128: rec->iv_data[0] = TLS_AES_CCM_IV_B0_BYTE; iv_offset = 1; + break; + case TLS_CIPHER_SM4_CCM: + rec->iv_data[0] = TLS_SM4_CCM_IV_B0_BYTE; + iv_offset = 1; + break; } memcpy(&rec->iv_data[iv_offset], tls_ctx->tx.iv, prot->iv_size + prot->salt_size); - xor_iv_with_seq(prot->version, rec->iv_data + iv_offset, tls_ctx->tx.rec_seq); + xor_iv_with_seq(prot, rec->iv_data + iv_offset, tls_ctx->tx.rec_seq); sge->offset += prot->prepend_size; sge->length -= prot->prepend_size; @@ -758,14 +764,13 @@ static int tls_push_record(struct sock *sk, int flags, sg_chain(rec->sg_aead_out, 2, &msg_en->sg.data[i]); tls_make_aad(rec->aad_space, msg_pl->sg.size + prot->tail_size, - tls_ctx->tx.rec_seq, prot->rec_seq_size, - record_type, prot->version); + tls_ctx->tx.rec_seq, record_type, prot); tls_fill_prepend(tls_ctx, page_address(sg_page(&msg_en->sg.data[i])) + msg_en->sg.data[i].offset, msg_pl->sg.size + prot->tail_size, - record_type, prot->version); + record_type); tls_ctx->pending_open_record_frags = false; @@ -1467,10 +1472,16 @@ static int decrypt_internal(struct sock *sk, struct sk_buff *skb, aad = (u8 *)(sgout + n_sgout); iv = aad + prot->aad_size; - /* For CCM based ciphers, first byte of nonce+iv is always '2' */ - if (prot->cipher_type == TLS_CIPHER_AES_CCM_128) { - iv[0] = 2; + /* For CCM based ciphers, first byte of nonce+iv is a constant */ + switch (prot->cipher_type) { + case TLS_CIPHER_AES_CCM_128: + iv[0] = TLS_AES_CCM_IV_B0_BYTE; iv_offset = 1; + break; + case TLS_CIPHER_SM4_CCM: + iv[0] = TLS_SM4_CCM_IV_B0_BYTE; + iv_offset = 1; + break; } /* Prepare IV */ @@ -1483,17 +1494,16 @@ static int decrypt_internal(struct sock *sk, struct sk_buff *skb, } if (prot->version == TLS_1_3_VERSION) memcpy(iv + iv_offset, tls_ctx->rx.iv, - crypto_aead_ivsize(ctx->aead_recv)); + prot->iv_size + prot->salt_size); else memcpy(iv + iv_offset, tls_ctx->rx.iv, prot->salt_size); - xor_iv_with_seq(prot->version, iv + iv_offset, tls_ctx->rx.rec_seq); + xor_iv_with_seq(prot, iv + iv_offset, tls_ctx->rx.rec_seq); /* Prepare AAD */ tls_make_aad(aad, rxm->full_len - prot->overhead_size + prot->tail_size, - tls_ctx->rx.rec_seq, prot->rec_seq_size, - ctx->control, prot->version); + tls_ctx->rx.rec_seq, ctx->control, prot); /* Prepare sgin */ sg_init_table(sgin, n_sgin); @@ -1994,6 +2004,7 @@ ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos, struct sock *sk = sock->sk; struct sk_buff *skb; ssize_t copied = 0; + bool from_queue; int err = 0; long timeo; int chunk; @@ -2003,14 +2014,20 @@ ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos, timeo = sock_rcvtimeo(sk, flags & SPLICE_F_NONBLOCK); - skb = tls_wait_data(sk, NULL, flags & SPLICE_F_NONBLOCK, timeo, &err); - if (!skb) - goto splice_read_end; + from_queue = !skb_queue_empty(&ctx->rx_list); + if (from_queue) { + skb = __skb_dequeue(&ctx->rx_list); + } else { + skb = tls_wait_data(sk, NULL, flags & SPLICE_F_NONBLOCK, timeo, + &err); + if (!skb) + goto splice_read_end; - err = decrypt_skb_update(sk, skb, NULL, &chunk, &zc, false); - if (err < 0) { - tls_err_abort(sk, -EBADMSG); - goto splice_read_end; + err = decrypt_skb_update(sk, skb, NULL, &chunk, &zc, false); + if (err < 0) { + tls_err_abort(sk, -EBADMSG); + goto splice_read_end; + } } /* splice does not support reading control messages */ @@ -2026,8 +2043,17 @@ ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos, if (copied < 0) goto splice_read_end; - if (likely(!(flags & MSG_PEEK))) - tls_sw_advance_skb(sk, skb, copied); + if (!from_queue) { + ctx->recv_pkt = NULL; + __strp_unpause(&ctx->strp); + } + if (chunk < rxm->full_len) { + __skb_queue_head(&ctx->rx_list, skb); + rxm->offset += len; + rxm->full_len -= len; + } else { + consume_skb(skb); + } splice_read_end: release_sock(sk); @@ -2415,6 +2441,40 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx) cipher_name = "ccm(aes)"; break; } + case TLS_CIPHER_SM4_GCM: { + struct tls12_crypto_info_sm4_gcm *sm4_gcm_info; + + sm4_gcm_info = (void *)crypto_info; + nonce_size = TLS_CIPHER_SM4_GCM_IV_SIZE; + tag_size = TLS_CIPHER_SM4_GCM_TAG_SIZE; + iv_size = TLS_CIPHER_SM4_GCM_IV_SIZE; + iv = sm4_gcm_info->iv; + rec_seq_size = TLS_CIPHER_SM4_GCM_REC_SEQ_SIZE; + rec_seq = sm4_gcm_info->rec_seq; + keysize = TLS_CIPHER_SM4_GCM_KEY_SIZE; + key = sm4_gcm_info->key; + salt = sm4_gcm_info->salt; + salt_size = TLS_CIPHER_SM4_GCM_SALT_SIZE; + cipher_name = "gcm(sm4)"; + break; + } + case TLS_CIPHER_SM4_CCM: { + struct tls12_crypto_info_sm4_ccm *sm4_ccm_info; + + sm4_ccm_info = (void *)crypto_info; + nonce_size = TLS_CIPHER_SM4_CCM_IV_SIZE; + tag_size = TLS_CIPHER_SM4_CCM_TAG_SIZE; + iv_size = TLS_CIPHER_SM4_CCM_IV_SIZE; + iv = sm4_ccm_info->iv; + rec_seq_size = TLS_CIPHER_SM4_CCM_REC_SEQ_SIZE; + rec_seq = sm4_ccm_info->rec_seq; + keysize = TLS_CIPHER_SM4_CCM_KEY_SIZE; + key = sm4_ccm_info->key; + salt = sm4_ccm_info->salt; + salt_size = TLS_CIPHER_SM4_CCM_SALT_SIZE; + cipher_name = "ccm(sm4)"; + break; + } default: rc = -EINVAL; goto free_priv; diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c index 005aa701f4d522023c713149e360914a8e44e375..c59806253a65abc62dfe763b9c123db77f5484bb 100644 --- a/net/vmw_vsock/af_vsock.c +++ b/net/vmw_vsock/af_vsock.c @@ -333,7 +333,8 @@ void vsock_remove_sock(struct vsock_sock *vsk) } EXPORT_SYMBOL_GPL(vsock_remove_sock); -void vsock_for_each_connected_socket(void (*fn)(struct sock *sk)) +void vsock_for_each_connected_socket(struct vsock_transport *transport, + void (*fn)(struct sock *sk)) { int i; @@ -342,8 +343,12 @@ void vsock_for_each_connected_socket(void (*fn)(struct sock *sk)) for (i = 0; i < ARRAY_SIZE(vsock_connected_table); i++) { struct vsock_sock *vsk; list_for_each_entry(vsk, &vsock_connected_table[i], - connected_table) + connected_table) { + if (vsk->transport != transport) + continue; + fn(sk_vsock(vsk)); + } } spin_unlock_bh(&vsock_table_lock); diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c index 3a056f8affd1d20265cb256bb2b6eeddf8e09310..e131121533ad93d718a826f4a634aa12ce2aa476 100644 --- a/net/vmw_vsock/virtio_transport.c +++ b/net/vmw_vsock/virtio_transport.c @@ -24,6 +24,7 @@ static struct workqueue_struct *virtio_vsock_workqueue; static struct virtio_vsock __rcu *the_virtio_vsock; static DEFINE_MUTEX(the_virtio_vsock_mutex); /* protects the_virtio_vsock */ +static struct virtio_transport virtio_transport; /* forward declaration */ struct virtio_vsock { struct virtio_device *vdev; @@ -383,7 +384,8 @@ static void virtio_vsock_event_handle(struct virtio_vsock *vsock, switch (le32_to_cpu(event->id)) { case VIRTIO_VSOCK_EVENT_TRANSPORT_RESET: virtio_vsock_update_guest_cid(vsock); - vsock_for_each_connected_socket(virtio_vsock_reset_sock); + vsock_for_each_connected_socket(&virtio_transport.transport, + virtio_vsock_reset_sock); break; } } @@ -635,7 +637,8 @@ static void virtio_vsock_remove(struct virtio_device *vdev) synchronize_rcu(); /* Reset all connected sockets when the device disappear */ - vsock_for_each_connected_socket(virtio_vsock_reset_sock); + vsock_for_each_connected_socket(&virtio_transport.transport, + virtio_vsock_reset_sock); /* Stop all work handlers to make sure no one is accessing the device, * so we can safely call vdev->config->reset(). diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c index 1c9ecb18b8e644db60264354918a0ad59f12cf05..a9ca95a0fcdda6f1b39b2787efd3f2cea5b14762 100644 --- a/net/vmw_vsock/vmci_transport.c +++ b/net/vmw_vsock/vmci_transport.c @@ -75,6 +75,8 @@ static u32 vmci_transport_qp_resumed_sub_id = VMCI_INVALID_ID; static int PROTOCOL_OVERRIDE = -1; +static struct vsock_transport vmci_transport; /* forward declaration */ + /* Helper function to convert from a VMCI error code to a VSock error code. */ static s32 vmci_transport_error_to_vsock_error(s32 vmci_error) @@ -882,7 +884,8 @@ static void vmci_transport_qp_resumed_cb(u32 sub_id, const struct vmci_event_data *e_data, void *client_data) { - vsock_for_each_connected_socket(vmci_transport_handle_detach); + vsock_for_each_connected_socket(&vmci_transport, + vmci_transport_handle_detach); } static void vmci_transport_recv_pkt_work(struct work_struct *work) diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 8fb0478888fb29de7fe602727df86ddf79e06e61..0df8b9a19952cddc5e50f5864941967964ffe1b3 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -12930,6 +12930,9 @@ static int handle_nan_filter(struct nlattr *attr_filter, i = 0; nla_for_each_nested(attr, attr_filter, rem) { filter[i].filter = nla_memdup(attr, GFP_KERNEL); + if (!filter[i].filter) + goto err; + filter[i].len = nla_len(attr); i++; } @@ -12942,6 +12945,15 @@ static int handle_nan_filter(struct nlattr *attr_filter, } return 0; + +err: + i = 0; + nla_for_each_nested(attr, attr_filter, rem) { + kfree(filter[i].filter); + i++; + } + kfree(filter); + return -ENOMEM; } static int nl80211_nan_add_func(struct sk_buff *skb, @@ -17115,7 +17127,8 @@ void cfg80211_ch_switch_notify(struct net_device *dev, wdev->chandef = *chandef; wdev->preset_chandef = *chandef; - if (wdev->iftype == NL80211_IFTYPE_STATION && + if ((wdev->iftype == NL80211_IFTYPE_STATION || + wdev->iftype == NL80211_IFTYPE_P2P_CLIENT) && !WARN_ON(!wdev->current_bss)) cfg80211_update_assoc_bss_entry(wdev, chandef->chan); diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c index e843b0d9e2a61c16551be51f69bc441ccad4f921..c255aac6b816b4911b435ae76cd3377b88f2d9dd 100644 --- a/net/xfrm/xfrm_device.c +++ b/net/xfrm/xfrm_device.c @@ -223,6 +223,9 @@ int xfrm_dev_state_add(struct net *net, struct xfrm_state *x, if (x->encap || x->tfcpad) return -EINVAL; + if (xuo->flags & ~(XFRM_OFFLOAD_IPV6 | XFRM_OFFLOAD_INBOUND)) + return -EINVAL; + dev = dev_get_by_index(net, xuo->ifindex); if (!dev) { if (!(xuo->flags & XFRM_OFFLOAD_INBOUND)) { @@ -261,7 +264,8 @@ int xfrm_dev_state_add(struct net *net, struct xfrm_state *x, xso->dev = dev; xso->real_dev = dev; xso->num_exthdrs = 1; - xso->flags = xuo->flags; + /* Don't forward bit that is not implemented */ + xso->flags = xuo->flags & ~XFRM_OFFLOAD_IPV6; err = dev->xfrmdev_ops->xdo_dev_state_add(x); if (err) { diff --git a/net/xfrm/xfrm_interface.c b/net/xfrm/xfrm_interface.c index e1fae61a5bb90d8dd16f286529f716db18810c74..da518b4ca84c6cad59903be1fcc0cd6c4dab1846 100644 --- a/net/xfrm/xfrm_interface.c +++ b/net/xfrm/xfrm_interface.c @@ -303,7 +303,10 @@ xfrmi_xmit2(struct sk_buff *skb, struct net_device *dev, struct flowi *fl) if (mtu < IPV6_MIN_MTU) mtu = IPV6_MIN_MTU; - icmpv6_ndo_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); + if (skb->len > 1280) + icmpv6_ndo_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); + else + goto xmit; } else { if (!(ip_hdr(skb)->frag_off & htons(IP_DF))) goto xmit; @@ -679,12 +682,12 @@ static int xfrmi_changelink(struct net_device *dev, struct nlattr *tb[], struct net *net = xi->net; struct xfrm_if_parms p = {}; + xfrmi_netlink_parms(data, &p); if (!p.if_id) { NL_SET_ERR_MSG(extack, "if_id must be non zero"); return -EINVAL; } - xfrmi_netlink_parms(data, &p); xi = xfrmi_locate(net, &p); if (!xi) { xi = netdev_priv(dev); diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index c4a195cb36817ccbadd630dd2f798bd9bcc500fd..3d0ffd9270041d2840e96670575cbb407dcf7a46 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -4287,7 +4287,7 @@ static bool xfrm_migrate_selector_match(const struct xfrm_selector *sel_cmp, } static struct xfrm_policy *xfrm_migrate_policy_find(const struct xfrm_selector *sel, - u8 dir, u8 type, struct net *net) + u8 dir, u8 type, struct net *net, u32 if_id) { struct xfrm_policy *pol, *ret = NULL; struct hlist_head *chain; @@ -4296,7 +4296,8 @@ static struct xfrm_policy *xfrm_migrate_policy_find(const struct xfrm_selector * spin_lock_bh(&net->xfrm.xfrm_policy_lock); chain = policy_hash_direct(net, &sel->daddr, &sel->saddr, sel->family, dir); hlist_for_each_entry(pol, chain, bydst) { - if (xfrm_migrate_selector_match(sel, &pol->selector) && + if ((if_id == 0 || pol->if_id == if_id) && + xfrm_migrate_selector_match(sel, &pol->selector) && pol->type == type) { ret = pol; priority = ret->priority; @@ -4308,7 +4309,8 @@ static struct xfrm_policy *xfrm_migrate_policy_find(const struct xfrm_selector * if ((pol->priority >= priority) && ret) break; - if (xfrm_migrate_selector_match(sel, &pol->selector) && + if ((if_id == 0 || pol->if_id == if_id) && + xfrm_migrate_selector_match(sel, &pol->selector) && pol->type == type) { ret = pol; break; @@ -4424,7 +4426,7 @@ static int xfrm_migrate_check(const struct xfrm_migrate *m, int num_migrate) int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type, struct xfrm_migrate *m, int num_migrate, struct xfrm_kmaddress *k, struct net *net, - struct xfrm_encap_tmpl *encap) + struct xfrm_encap_tmpl *encap, u32 if_id) { int i, err, nx_cur = 0, nx_new = 0; struct xfrm_policy *pol = NULL; @@ -4443,14 +4445,14 @@ int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type, } /* Stage 1 - find policy */ - if ((pol = xfrm_migrate_policy_find(sel, dir, type, net)) == NULL) { + if ((pol = xfrm_migrate_policy_find(sel, dir, type, net, if_id)) == NULL) { err = -ENOENT; goto out; } /* Stage 2 - find and update state(s) */ for (i = 0, mp = m; i < num_migrate; i++, mp++) { - if ((x = xfrm_migrate_state_find(mp, net))) { + if ((x = xfrm_migrate_state_find(mp, net, if_id))) { x_cur[nx_cur] = x; nx_cur++; xc = xfrm_state_migrate(x, mp, encap); diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index c158e70e8ae1020eb63dd09b632dd38a4037138a..3d75a4f103601cb23d270d633975662d7f0b29fa 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -1542,9 +1542,6 @@ static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig, memcpy(&x->mark, &orig->mark, sizeof(x->mark)); memcpy(&x->props.smark, &orig->props.smark, sizeof(x->props.smark)); - if (xfrm_init_state(x) < 0) - goto error; - x->props.flags = orig->props.flags; x->props.extra_flags = orig->props.extra_flags; @@ -1566,7 +1563,8 @@ static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig, return NULL; } -struct xfrm_state *xfrm_migrate_state_find(struct xfrm_migrate *m, struct net *net) +struct xfrm_state *xfrm_migrate_state_find(struct xfrm_migrate *m, struct net *net, + u32 if_id) { unsigned int h; struct xfrm_state *x = NULL; @@ -1582,6 +1580,8 @@ struct xfrm_state *xfrm_migrate_state_find(struct xfrm_migrate *m, struct net *n continue; if (m->reqid && x->props.reqid != m->reqid) continue; + if (if_id != 0 && x->if_id != if_id) + continue; if (!xfrm_addr_equal(&x->id.daddr, &m->old_daddr, m->old_family) || !xfrm_addr_equal(&x->props.saddr, &m->old_saddr, @@ -1597,6 +1597,8 @@ struct xfrm_state *xfrm_migrate_state_find(struct xfrm_migrate *m, struct net *n if (x->props.mode != m->mode || x->id.proto != m->proto) continue; + if (if_id != 0 && x->if_id != if_id) + continue; if (!xfrm_addr_equal(&x->id.daddr, &m->old_daddr, m->old_family) || !xfrm_addr_equal(&x->props.saddr, &m->old_saddr, @@ -1623,6 +1625,11 @@ struct xfrm_state *xfrm_state_migrate(struct xfrm_state *x, if (!xc) return NULL; + xc->props.family = m->new_family; + + if (xfrm_init_state(xc) < 0) + goto error; + memcpy(&xc->id.daddr, &m->new_daddr, sizeof(xc->id.daddr)); memcpy(&xc->props.saddr, &m->new_saddr, sizeof(xc->props.saddr)); @@ -2516,7 +2523,7 @@ void xfrm_state_delete_tunnel(struct xfrm_state *x) } EXPORT_SYMBOL(xfrm_state_delete_tunnel); -u32 __xfrm_state_mtu(struct xfrm_state *x, int mtu) +u32 xfrm_state_mtu(struct xfrm_state *x, int mtu) { const struct xfrm_type *type = READ_ONCE(x->type); struct crypto_aead *aead; @@ -2547,17 +2554,7 @@ u32 __xfrm_state_mtu(struct xfrm_state *x, int mtu) return ((mtu - x->props.header_len - crypto_aead_authsize(aead) - net_adj) & ~(blksize - 1)) + net_adj - 2; } -EXPORT_SYMBOL_GPL(__xfrm_state_mtu); - -u32 xfrm_state_mtu(struct xfrm_state *x, int mtu) -{ - mtu = __xfrm_state_mtu(x, mtu); - - if (x->props.family == AF_INET6 && mtu < IPV6_MIN_MTU) - return IPV6_MIN_MTU; - - return mtu; -} +EXPORT_SYMBOL_GPL(xfrm_state_mtu); int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload) { diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index ddf1b3a5f7c1f986685061ca037cf77c4c46248e..4a307106f98f49716b81205f8cd415be289ed9d7 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c @@ -621,13 +621,8 @@ static struct xfrm_state *xfrm_state_construct(struct net *net, xfrm_smark_init(attrs, &x->props.smark); - if (attrs[XFRMA_IF_ID]) { + if (attrs[XFRMA_IF_ID]) x->if_id = nla_get_u32(attrs[XFRMA_IF_ID]); - if (!x->if_id) { - err = -EINVAL; - goto error; - } - } err = __xfrm_init_state(x, false, attrs[XFRMA_OFFLOAD_DEV]); if (err) @@ -1358,13 +1353,8 @@ static int xfrm_alloc_userspi(struct sk_buff *skb, struct nlmsghdr *nlh, mark = xfrm_mark_get(attrs, &m); - if (attrs[XFRMA_IF_ID]) { + if (attrs[XFRMA_IF_ID]) if_id = nla_get_u32(attrs[XFRMA_IF_ID]); - if (!if_id) { - err = -EINVAL; - goto out_noput; - } - } if (p->info.seq) { x = xfrm_find_acq_byseq(net, mark, p->info.seq); @@ -1677,13 +1667,8 @@ static struct xfrm_policy *xfrm_policy_construct(struct net *net, struct xfrm_us xfrm_mark_get(attrs, &xp->mark); - if (attrs[XFRMA_IF_ID]) { + if (attrs[XFRMA_IF_ID]) xp->if_id = nla_get_u32(attrs[XFRMA_IF_ID]); - if (!xp->if_id) { - err = -EINVAL; - goto error; - } - } return xp; error: @@ -2438,6 +2423,7 @@ static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh, int n = 0; struct net *net = sock_net(skb->sk); struct xfrm_encap_tmpl *encap = NULL; + u32 if_id = 0; if (attrs[XFRMA_MIGRATE] == NULL) return -EINVAL; @@ -2462,7 +2448,10 @@ static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh, return 0; } - err = xfrm_migrate(&pi->sel, pi->dir, type, m, n, kmp, net, encap); + if (attrs[XFRMA_IF_ID]) + if_id = nla_get_u32(attrs[XFRMA_IF_ID]); + + err = xfrm_migrate(&pi->sel, pi->dir, type, m, n, kmp, net, encap, if_id); kfree(encap); diff --git a/samples/bpf/xdpsock_user.c b/samples/bpf/xdpsock_user.c index 2e4508a6cb3a7c4dec89a924c7188a2ab332ec2d..cf5b0a8952254bf86adbc5496fbfbe45976a3f4c 100644 --- a/samples/bpf/xdpsock_user.c +++ b/samples/bpf/xdpsock_user.c @@ -1520,14 +1520,15 @@ int main(int argc, char **argv) setlocale(LC_ALL, ""); + prev_time = get_nsecs(); + start_time = prev_time; + if (!opt_quiet) { ret = pthread_create(&pt, NULL, poller, NULL); if (ret) exit_with_error(ret); } - prev_time = get_nsecs(); - start_time = prev_time; if (opt_bench == BENCH_RXDROP) rx_drop_all(); diff --git a/scripts/dtc/Makefile b/scripts/dtc/Makefile index 4852bf44e913e25b4e69c4b066c068d2254a13d4..f1d201782346fa84f616b7b15824fc2273e98479 100644 --- a/scripts/dtc/Makefile +++ b/scripts/dtc/Makefile @@ -22,7 +22,7 @@ dtc-objs += yamltree.o # To include installed in a non-default path HOSTCFLAGS_yamltree.o := $(shell pkg-config --cflags yaml-0.1) # To link libyaml installed in a non-default path -HOSTLDLIBS_dtc := $(shell pkg-config yaml-0.1 --libs) +HOSTLDLIBS_dtc := $(shell pkg-config --libs yaml-0.1) endif # Generated files need one more search path to include headers in source tree diff --git a/scripts/gcc-plugins/stackleak_plugin.c b/scripts/gcc-plugins/stackleak_plugin.c index 48e141e07956261e74fdb5b2f56983c285614e36..dacd697ffd3830b5aa32eb93535f7341ace59859 100644 --- a/scripts/gcc-plugins/stackleak_plugin.c +++ b/scripts/gcc-plugins/stackleak_plugin.c @@ -431,6 +431,23 @@ static unsigned int stackleak_cleanup_execute(void) return 0; } +/* + * STRING_CST may or may not be NUL terminated: + * https://gcc.gnu.org/onlinedocs/gccint/Constant-expressions.html + */ +static inline bool string_equal(tree node, const char *string, int length) +{ + if (TREE_STRING_LENGTH(node) < length) + return false; + if (TREE_STRING_LENGTH(node) > length + 1) + return false; + if (TREE_STRING_LENGTH(node) == length + 1 && + TREE_STRING_POINTER(node)[length] != '\0') + return false; + return !memcmp(TREE_STRING_POINTER(node), string, length); +} +#define STRING_EQUAL(node, str) string_equal(node, str, strlen(str)) + static bool stackleak_gate(void) { tree section; @@ -440,13 +457,13 @@ static bool stackleak_gate(void) if (section && TREE_VALUE(section)) { section = TREE_VALUE(TREE_VALUE(section)); - if (!strncmp(TREE_STRING_POINTER(section), ".init.text", 10)) + if (STRING_EQUAL(section, ".init.text")) return false; - if (!strncmp(TREE_STRING_POINTER(section), ".devinit.text", 13)) + if (STRING_EQUAL(section, ".devinit.text")) return false; - if (!strncmp(TREE_STRING_POINTER(section), ".cpuinit.text", 13)) + if (STRING_EQUAL(section, ".cpuinit.text")) return false; - if (!strncmp(TREE_STRING_POINTER(section), ".meminit.text", 13)) + if (STRING_EQUAL(section, ".meminit.text")) return false; } diff --git a/scripts/sign-file.c b/scripts/sign-file.c index fbd34b8e8f578aba003533348c907899779fb62c..acc9e5f2eb04a3e9445beda94ecaf313361fe5ab 100644 --- a/scripts/sign-file.c +++ b/scripts/sign-file.c @@ -206,6 +206,28 @@ static X509 *read_x509(const char *x509_name) return x509; } +#if defined(EVP_PKEY_SM2) +static int pkey_is_sm2(EVP_PKEY *pkey) +{ + EC_KEY *eckey = NULL; + + const EC_GROUP *group = NULL; + + if (pkey == NULL || EVP_PKEY_id(pkey) != EVP_PKEY_EC) + return 0; + + eckey = EVP_PKEY_get0_EC_KEY(pkey); + if (eckey == NULL) + return 0; + + group = EC_KEY_get0_group(eckey); + if (group == NULL) + return 0; + + return EC_GROUP_get_curve_name(group) == NID_sm2; +} +#endif + int main(int argc, char **argv) { struct module_signature sig_info = { .id_type = PKEY_ID_PKCS7 }; @@ -220,6 +242,10 @@ int main(int argc, char **argv) unsigned int use_signed_attrs; const EVP_MD *digest_algo; EVP_PKEY *private_key; +#if defined(EVP_PKEY_SM2) + EVP_PKEY *public_key; +#endif + #ifndef USE_PKCS7 CMS_ContentInfo *cms = NULL; unsigned int use_keyid = 0; @@ -303,6 +329,16 @@ int main(int argc, char **argv) digest_algo = EVP_get_digestbyname(hash_algo); ERR(!digest_algo, "EVP_get_digestbyname"); +#if defined(EVP_PKEY_SM2) + if (pkey_is_sm2(private_key)) + EVP_PKEY_set_alias_type(private_key, EVP_PKEY_SM2); + + public_key = X509_get0_pubkey(x509); + ERR(!public_key, "X509_get0_pubkey"); + if (pkey_is_sm2(public_key)) + EVP_PKEY_set_alias_type(public_key, EVP_PKEY_SM2); +#endif + #ifndef USE_PKCS7 /* Load the signature message from the digest buffer. */ cms = CMS_sign(NULL, NULL, NULL, NULL, diff --git a/security/integrity/evm/evm_main.c b/security/integrity/evm/evm_main.c index f3dd2641d29af1b3bb96bb22dc12109fa785f16d..cddfc0e43a808b918c7768bf23b19e2f914da72c 100644 --- a/security/integrity/evm/evm_main.c +++ b/security/integrity/evm/evm_main.c @@ -70,7 +70,7 @@ static int __init evm_set_param(char *str) else pr_err("invalid \"%s\" mode", str); - return 0; + return 1; } __setup("evm=", evm_set_param); diff --git a/security/keys/keyctl_pkey.c b/security/keys/keyctl_pkey.c index 931d8dfb4a7f42172a934236d046d19de4130c86..63e5c646f76207959d8b3485a4733829be0c26dd 100644 --- a/security/keys/keyctl_pkey.c +++ b/security/keys/keyctl_pkey.c @@ -135,15 +135,23 @@ static int keyctl_pkey_params_get_2(const struct keyctl_pkey_params __user *_par switch (op) { case KEYCTL_PKEY_ENCRYPT: + if (uparams.in_len > info.max_dec_size || + uparams.out_len > info.max_enc_size) + return -EINVAL; + break; case KEYCTL_PKEY_DECRYPT: if (uparams.in_len > info.max_enc_size || uparams.out_len > info.max_dec_size) return -EINVAL; break; case KEYCTL_PKEY_SIGN: + if (uparams.in_len > info.max_data_size || + uparams.out_len > info.max_sig_size) + return -EINVAL; + break; case KEYCTL_PKEY_VERIFY: - if (uparams.in_len > info.max_sig_size || - uparams.out_len > info.max_data_size) + if (uparams.in_len > info.max_data_size || + uparams.in2_len > info.max_sig_size) return -EINVAL; break; default: @@ -151,7 +159,7 @@ static int keyctl_pkey_params_get_2(const struct keyctl_pkey_params __user *_par } params->in_len = uparams.in_len; - params->out_len = uparams.out_len; + params->out_len = uparams.out_len; /* Note: same as in2_len */ return 0; } diff --git a/security/security.c b/security/security.c index 4fb58543eeb9ba1e38795a7941cace0098b62b02..926e035f9978cb018771f6efc6601645a4f3b568 100644 --- a/security/security.c +++ b/security/security.c @@ -59,10 +59,12 @@ const char *const lockdown_reasons[LOCKDOWN_CONFIDENTIALITY_MAX+1] = { [LOCKDOWN_DEBUGFS] = "debugfs access", [LOCKDOWN_XMON_WR] = "xmon write access", [LOCKDOWN_BPF_WRITE_USER] = "use of bpf to write user RAM", + [LOCKDOWN_DBG_WRITE_KERNEL] = "use of kgdb/kdb to write kernel RAM", [LOCKDOWN_INTEGRITY_MAX] = "integrity", [LOCKDOWN_KCORE] = "/proc/kcore access", [LOCKDOWN_KPROBES] = "use of kprobes", [LOCKDOWN_BPF_READ] = "use of bpf to read kernel RAM", + [LOCKDOWN_DBG_READ_KERNEL] = "use of kgdb/kdb to read kernel RAM", [LOCKDOWN_PERF] = "unsafe use of perf", [LOCKDOWN_TRACEFS] = "use of tracefs", [LOCKDOWN_XMON_RW] = "xmon read and write access", @@ -860,9 +862,22 @@ int security_fs_context_dup(struct fs_context *fc, struct fs_context *src_fc) return call_int_hook(fs_context_dup, 0, fc, src_fc); } -int security_fs_context_parse_param(struct fs_context *fc, struct fs_parameter *param) +int security_fs_context_parse_param(struct fs_context *fc, + struct fs_parameter *param) { - return call_int_hook(fs_context_parse_param, -ENOPARAM, fc, param); + struct security_hook_list *hp; + int trc; + int rc = -ENOPARAM; + + hlist_for_each_entry(hp, &security_hook_heads.fs_context_parse_param, + list) { + trc = hp->hook.fs_context_parse_param(fc, param); + if (trc == 0) + rc = 0; + else if (trc != -ENOPARAM) + return trc; + } + return rc; } int security_sb_alloc(struct super_block *sb) diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index 86159b32921ccadc2c49b88452ba9047cd8da8a0..8c901ae05dd845b7df58379459a697a32eb065b8 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c @@ -2820,10 +2820,9 @@ static int selinux_fs_context_parse_param(struct fs_context *fc, return opt; rc = selinux_add_opt(opt, param->string, &fc->security); - if (!rc) { + if (!rc) param->string = NULL; - rc = 1; - } + return rc; } @@ -3648,6 +3647,12 @@ static int selinux_file_ioctl(struct file *file, unsigned int cmd, CAP_OPT_NONE, true); break; + case FIOCLEX: + case FIONCLEX: + if (!selinux_policycap_ioctl_skip_cloexec()) + error = ioctl_has_perm(cred, file, FILE__IOCTL, (u16) cmd); + break; + /* default case assumes that the command will go * to the file's ioctl() function. */ diff --git a/security/selinux/include/policycap.h b/security/selinux/include/policycap.h index 2ec038efbb03cc53d4e2c5780641f213dadd9894..a9e572ca4fd96d00ea26c917c25f6c91d552475f 100644 --- a/security/selinux/include/policycap.h +++ b/security/selinux/include/policycap.h @@ -11,6 +11,7 @@ enum { POLICYDB_CAPABILITY_CGROUPSECLABEL, POLICYDB_CAPABILITY_NNP_NOSUID_TRANSITION, POLICYDB_CAPABILITY_GENFS_SECLABEL_SYMLINKS, + POLICYDB_CAPABILITY_IOCTL_SKIP_CLOEXEC, __POLICYDB_CAPABILITY_MAX }; #define POLICYDB_CAPABILITY_MAX (__POLICYDB_CAPABILITY_MAX - 1) diff --git a/security/selinux/include/policycap_names.h b/security/selinux/include/policycap_names.h index b89289f092c938ede5c28b293cfb3f51e7d3979c..ebd64afe1defd13dfb20fd9f9fd05c54f61f218f 100644 --- a/security/selinux/include/policycap_names.h +++ b/security/selinux/include/policycap_names.h @@ -12,7 +12,8 @@ const char *selinux_policycap_names[__POLICYDB_CAPABILITY_MAX] = { "always_check_network", "cgroup_seclabel", "nnp_nosuid_transition", - "genfs_seclabel_symlinks" + "genfs_seclabel_symlinks", + "ioctl_skip_cloexec" }; #endif /* _SELINUX_POLICYCAP_NAMES_H_ */ diff --git a/security/selinux/include/security.h b/security/selinux/include/security.h index 63ca6e79daeb996723177b2cf842146fecd49594..1521460a97d4ee9359fa65ae1215e3994deec189 100644 --- a/security/selinux/include/security.h +++ b/security/selinux/include/security.h @@ -219,6 +219,13 @@ static inline bool selinux_policycap_genfs_seclabel_symlinks(void) return READ_ONCE(state->policycap[POLICYDB_CAPABILITY_GENFS_SECLABEL_SYMLINKS]); } +static inline bool selinux_policycap_ioctl_skip_cloexec(void) +{ + struct selinux_state *state = &selinux_state; + + return READ_ONCE(state->policycap[POLICYDB_CAPABILITY_IOCTL_SKIP_CLOEXEC]); +} + struct selinux_policy_convert_data; struct selinux_load_state { diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c index 2b745ae8cb9814a5862534c93f9da482072d1136..d893c2280f595e084cab2089593cd1410eb8b167 100644 --- a/security/selinux/selinuxfs.c +++ b/security/selinux/selinuxfs.c @@ -2124,6 +2124,8 @@ static int sel_fill_super(struct super_block *sb, struct fs_context *fc) } ret = sel_make_avc_files(dentry); + if (ret) + goto err; dentry = sel_make_dir(sb->s_root, "ss", &fsi->last_ino); if (IS_ERR(dentry)) { diff --git a/security/selinux/xfrm.c b/security/selinux/xfrm.c index 7314196185d15f4f357cc2553d860a69af6fe80e..00e95f8bd7c73ff05a2105e902b547540754f8cb 100644 --- a/security/selinux/xfrm.c +++ b/security/selinux/xfrm.c @@ -346,7 +346,7 @@ int selinux_xfrm_state_alloc_acquire(struct xfrm_state *x, int rc; struct xfrm_sec_ctx *ctx; char *ctx_str = NULL; - int str_len; + u32 str_len; if (!polsec) return 0; diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c index 5c90b9fa4d405c69e9876e6a5b35e8d0cd9c477c..b36b8668f1f4a3f9e80a142f153892f500f87dd4 100644 --- a/security/smack/smack_lsm.c +++ b/security/smack/smack_lsm.c @@ -2506,7 +2506,7 @@ static int smk_ipv6_check(struct smack_known *subject, #ifdef CONFIG_AUDIT smk_ad_init_net(&ad, __func__, LSM_AUDIT_DATA_NET, &net); ad.a.u.net->family = PF_INET6; - ad.a.u.net->dport = ntohs(address->sin6_port); + ad.a.u.net->dport = address->sin6_port; if (act == SMK_RECEIVING) ad.a.u.net->v6info.saddr = address->sin6_addr; else diff --git a/security/tomoyo/load_policy.c b/security/tomoyo/load_policy.c index 3445ae6fd4794eaf430384ec63622004bc93f8fb..363b65be87ab768ed4993df1e5a361892958b3ab 100644 --- a/security/tomoyo/load_policy.c +++ b/security/tomoyo/load_policy.c @@ -24,7 +24,7 @@ static const char *tomoyo_loader; static int __init tomoyo_loader_setup(char *str) { tomoyo_loader = str; - return 0; + return 1; } __setup("TOMOYO_loader=", tomoyo_loader_setup); @@ -64,7 +64,7 @@ static const char *tomoyo_trigger; static int __init tomoyo_trigger_setup(char *str) { tomoyo_trigger = str; - return 0; + return 1; } __setup("TOMOYO_trigger=", tomoyo_trigger_setup); diff --git a/sound/arm/aaci.c b/sound/arm/aaci.c index a0996c47e58fee5b72da40163162ab895770758a..b326a5f5f0d53096a7109fa837c9fac32ed68373 100644 --- a/sound/arm/aaci.c +++ b/sound/arm/aaci.c @@ -1055,7 +1055,7 @@ static int aaci_probe(struct amba_device *dev, return ret; } -static int aaci_remove(struct amba_device *dev) +static void aaci_remove(struct amba_device *dev) { struct snd_card *card = amba_get_drvdata(dev); @@ -1066,8 +1066,6 @@ static int aaci_remove(struct amba_device *dev) snd_card_free(card); amba_release_regions(dev); } - - return 0; } static struct amba_id aaci_ids[] = { diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c index d79febeebf0c591725703112330e020426ab7669..f88de74da1eb36d889886b9637264719b21bb182 100644 --- a/sound/core/oss/pcm_oss.c +++ b/sound/core/oss/pcm_oss.c @@ -774,6 +774,11 @@ static int snd_pcm_oss_period_size(struct snd_pcm_substream *substream, if (oss_period_size < 16) return -EINVAL; + + /* don't allocate too large period; 1MB period must be enough */ + if (oss_period_size > 1024 * 1024) + return -ENOMEM; + runtime->oss.period_bytes = oss_period_size; runtime->oss.period_frames = 1; runtime->oss.periods = oss_periods; @@ -1042,10 +1047,9 @@ static int snd_pcm_oss_change_params_locked(struct snd_pcm_substream *substream) goto failure; } #endif - oss_period_size *= oss_frame_size; - - oss_buffer_size = oss_period_size * runtime->oss.periods; - if (oss_buffer_size < 0) { + oss_period_size = array_size(oss_period_size, oss_frame_size); + oss_buffer_size = array_size(oss_period_size, runtime->oss.periods); + if (oss_buffer_size <= 0) { err = -EINVAL; goto failure; } diff --git a/sound/core/oss/pcm_plugin.c b/sound/core/oss/pcm_plugin.c index d5ca161d588c504fa79257a300a958313b98c74d..1e2d1b35c1946cdc768d4b453833c5b4e0b90778 100644 --- a/sound/core/oss/pcm_plugin.c +++ b/sound/core/oss/pcm_plugin.c @@ -61,7 +61,10 @@ static int snd_pcm_plugin_alloc(struct snd_pcm_plugin *plugin, snd_pcm_uframes_t } if ((width = snd_pcm_format_physical_width(format->format)) < 0) return width; - size = frames * format->channels * width; + size = array3_size(frames, format->channels, width); + /* check for too large period size once again */ + if (size > 1024 * 1024) + return -ENOMEM; if (snd_BUG_ON(size % 8)) return -ENXIO; size /= 8; diff --git a/sound/core/pcm.c b/sound/core/pcm.c index a8ae5928decdad11d29699114ea8dea3757a37c2..59d222446d77738b25af374aa15dab527767a2e2 100644 --- a/sound/core/pcm.c +++ b/sound/core/pcm.c @@ -969,6 +969,8 @@ int snd_pcm_attach_substream(struct snd_pcm *pcm, int stream, init_waitqueue_head(&runtime->tsleep); runtime->status->state = SNDRV_PCM_STATE_OPEN; + mutex_init(&runtime->buffer_mutex); + atomic_set(&runtime->buffer_accessing, 0); substream->runtime = runtime; substream->private_data = pcm->private_data; @@ -1002,6 +1004,7 @@ void snd_pcm_detach_substream(struct snd_pcm_substream *substream) } else { substream->runtime = NULL; } + mutex_destroy(&runtime->buffer_mutex); kfree(runtime); put_pid(substream->pid); substream->pid = NULL; diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c index 5e04c4b9e02399cd1cd697dbadd1a23ceb8a7da6..289f52af15b96b16a94b3572fb73eee720fafc8b 100644 --- a/sound/core/pcm_lib.c +++ b/sound/core/pcm_lib.c @@ -2221,10 +2221,15 @@ snd_pcm_sframes_t __snd_pcm_lib_xfer(struct snd_pcm_substream *substream, err = -EINVAL; goto _end_unlock; } + if (!atomic_inc_unless_negative(&runtime->buffer_accessing)) { + err = -EBUSY; + goto _end_unlock; + } snd_pcm_stream_unlock_irq(substream); err = writer(substream, appl_ofs, data, offset, frames, transfer); snd_pcm_stream_lock_irq(substream); + atomic_dec(&runtime->buffer_accessing); if (err < 0) goto _end_unlock; err = pcm_accessible_state(runtime); diff --git a/sound/core/pcm_memory.c b/sound/core/pcm_memory.c index 4f03ba8ed0ae597ca01547c811f3e6058e8efb87..a9a0d74f31656c976c1102aa3978179b7b6e6c66 100644 --- a/sound/core/pcm_memory.c +++ b/sound/core/pcm_memory.c @@ -164,19 +164,20 @@ static void snd_pcm_lib_preallocate_proc_write(struct snd_info_entry *entry, size_t size; struct snd_dma_buffer new_dmab; + mutex_lock(&substream->pcm->open_mutex); if (substream->runtime) { buffer->error = -EBUSY; - return; + goto unlock; } if (!snd_info_get_line(buffer, line, sizeof(line))) { snd_info_get_str(str, line, sizeof(str)); size = simple_strtoul(str, NULL, 10) * 1024; if ((size != 0 && size < 8192) || size > substream->dma_max) { buffer->error = -EINVAL; - return; + goto unlock; } if (substream->dma_buffer.bytes == size) - return; + goto unlock; memset(&new_dmab, 0, sizeof(new_dmab)); new_dmab.dev = substream->dma_buffer.dev; if (size > 0) { @@ -185,7 +186,7 @@ static void snd_pcm_lib_preallocate_proc_write(struct snd_info_entry *entry, substream->dma_buffer.dev.dev, size, &new_dmab) < 0) { buffer->error = -ENOMEM; - return; + goto unlock; } substream->buffer_bytes_max = size; } else { @@ -197,6 +198,8 @@ static void snd_pcm_lib_preallocate_proc_write(struct snd_info_entry *entry, } else { buffer->error = -EINVAL; } + unlock: + mutex_unlock(&substream->pcm->open_mutex); } static inline void preallocate_info_init(struct snd_pcm_substream *substream) diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c index c5ef5182fcf1907676bca812d544a3624ffb1fa0..6cc7c2a9fe732de3c32392428d05e95fff7f9b51 100644 --- a/sound/core/pcm_native.c +++ b/sound/core/pcm_native.c @@ -667,6 +667,30 @@ static int snd_pcm_hw_params_choose(struct snd_pcm_substream *pcm, return 0; } +/* acquire buffer_mutex; if it's in r/w operation, return -EBUSY, otherwise + * block the further r/w operations + */ +static int snd_pcm_buffer_access_lock(struct snd_pcm_runtime *runtime) +{ + if (!atomic_dec_unless_positive(&runtime->buffer_accessing)) + return -EBUSY; + mutex_lock(&runtime->buffer_mutex); + return 0; /* keep buffer_mutex, unlocked by below */ +} + +/* release buffer_mutex and clear r/w access flag */ +static void snd_pcm_buffer_access_unlock(struct snd_pcm_runtime *runtime) +{ + mutex_unlock(&runtime->buffer_mutex); + atomic_inc(&runtime->buffer_accessing); +} + +#if IS_ENABLED(CONFIG_SND_PCM_OSS) +#define is_oss_stream(substream) ((substream)->oss.oss) +#else +#define is_oss_stream(substream) false +#endif + static int snd_pcm_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { @@ -678,22 +702,25 @@ static int snd_pcm_hw_params(struct snd_pcm_substream *substream, if (PCM_RUNTIME_CHECK(substream)) return -ENXIO; runtime = substream->runtime; + err = snd_pcm_buffer_access_lock(runtime); + if (err < 0) + return err; snd_pcm_stream_lock_irq(substream); switch (runtime->status->state) { case SNDRV_PCM_STATE_OPEN: case SNDRV_PCM_STATE_SETUP: case SNDRV_PCM_STATE_PREPARED: + if (!is_oss_stream(substream) && + atomic_read(&substream->mmap_count)) + err = -EBADFD; break; default: - snd_pcm_stream_unlock_irq(substream); - return -EBADFD; + err = -EBADFD; + break; } snd_pcm_stream_unlock_irq(substream); -#if IS_ENABLED(CONFIG_SND_PCM_OSS) - if (!substream->oss.oss) -#endif - if (atomic_read(&substream->mmap_count)) - return -EBADFD; + if (err) + goto unlock; snd_pcm_sync_stop(substream, true); @@ -780,16 +807,21 @@ static int snd_pcm_hw_params(struct snd_pcm_substream *substream, if ((usecs = period_to_usecs(runtime)) >= 0) cpu_latency_qos_add_request(&substream->latency_pm_qos_req, usecs); - return 0; + err = 0; _error: - /* hardware might be unusable from this time, - so we force application to retry to set - the correct hardware parameter settings */ - snd_pcm_set_state(substream, SNDRV_PCM_STATE_OPEN); - if (substream->ops->hw_free != NULL) - substream->ops->hw_free(substream); - if (substream->managed_buffer_alloc) - snd_pcm_lib_free_pages(substream); + if (err) { + /* hardware might be unusable from this time, + * so we force application to retry to set + * the correct hardware parameter settings + */ + snd_pcm_set_state(substream, SNDRV_PCM_STATE_OPEN); + if (substream->ops->hw_free != NULL) + substream->ops->hw_free(substream); + if (substream->managed_buffer_alloc) + snd_pcm_lib_free_pages(substream); + } + unlock: + snd_pcm_buffer_access_unlock(runtime); return err; } @@ -829,26 +861,33 @@ static int do_hw_free(struct snd_pcm_substream *substream) static int snd_pcm_hw_free(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime; - int result; + int result = 0; if (PCM_RUNTIME_CHECK(substream)) return -ENXIO; runtime = substream->runtime; + result = snd_pcm_buffer_access_lock(runtime); + if (result < 0) + return result; snd_pcm_stream_lock_irq(substream); switch (runtime->status->state) { case SNDRV_PCM_STATE_SETUP: case SNDRV_PCM_STATE_PREPARED: + if (atomic_read(&substream->mmap_count)) + result = -EBADFD; break; default: - snd_pcm_stream_unlock_irq(substream); - return -EBADFD; + result = -EBADFD; + break; } snd_pcm_stream_unlock_irq(substream); - if (atomic_read(&substream->mmap_count)) - return -EBADFD; + if (result) + goto unlock; result = do_hw_free(substream); snd_pcm_set_state(substream, SNDRV_PCM_STATE_OPEN); cpu_latency_qos_remove_request(&substream->latency_pm_qos_req); + unlock: + snd_pcm_buffer_access_unlock(runtime); return result; } @@ -1154,15 +1193,17 @@ struct action_ops { static int snd_pcm_action_group(const struct action_ops *ops, struct snd_pcm_substream *substream, snd_pcm_state_t state, - bool do_lock) + bool stream_lock) { struct snd_pcm_substream *s = NULL; struct snd_pcm_substream *s1; int res = 0, depth = 1; snd_pcm_group_for_each_entry(s, substream) { - if (do_lock && s != substream) { - if (s->pcm->nonatomic) + if (s != substream) { + if (!stream_lock) + mutex_lock_nested(&s->runtime->buffer_mutex, depth); + else if (s->pcm->nonatomic) mutex_lock_nested(&s->self_group.mutex, depth); else spin_lock_nested(&s->self_group.lock, depth); @@ -1190,18 +1231,18 @@ static int snd_pcm_action_group(const struct action_ops *ops, ops->post_action(s, state); } _unlock: - if (do_lock) { - /* unlock streams */ - snd_pcm_group_for_each_entry(s1, substream) { - if (s1 != substream) { - if (s1->pcm->nonatomic) - mutex_unlock(&s1->self_group.mutex); - else - spin_unlock(&s1->self_group.lock); - } - if (s1 == s) /* end */ - break; + /* unlock streams */ + snd_pcm_group_for_each_entry(s1, substream) { + if (s1 != substream) { + if (!stream_lock) + mutex_unlock(&s1->runtime->buffer_mutex); + else if (s1->pcm->nonatomic) + mutex_unlock(&s1->self_group.mutex); + else + spin_unlock(&s1->self_group.lock); } + if (s1 == s) /* end */ + break; } return res; } @@ -1331,10 +1372,15 @@ static int snd_pcm_action_nonatomic(const struct action_ops *ops, /* Guarantee the group members won't change during non-atomic action */ down_read(&snd_pcm_link_rwsem); + res = snd_pcm_buffer_access_lock(substream->runtime); + if (res < 0) + goto unlock; if (snd_pcm_stream_linked(substream)) res = snd_pcm_action_group(ops, substream, state, false); else res = snd_pcm_action_single(ops, substream, state); + snd_pcm_buffer_access_unlock(substream->runtime); + unlock: up_read(&snd_pcm_link_rwsem); return res; } @@ -1829,11 +1875,13 @@ static int snd_pcm_do_reset(struct snd_pcm_substream *substream, int err = snd_pcm_ops_ioctl(substream, SNDRV_PCM_IOCTL1_RESET, NULL); if (err < 0) return err; + snd_pcm_stream_lock_irq(substream); runtime->hw_ptr_base = 0; runtime->hw_ptr_interrupt = runtime->status->hw_ptr - runtime->status->hw_ptr % runtime->period_size; runtime->silence_start = runtime->status->hw_ptr; runtime->silence_filled = 0; + snd_pcm_stream_unlock_irq(substream); return 0; } @@ -1841,10 +1889,12 @@ static void snd_pcm_post_reset(struct snd_pcm_substream *substream, snd_pcm_state_t state) { struct snd_pcm_runtime *runtime = substream->runtime; + snd_pcm_stream_lock_irq(substream); runtime->control->appl_ptr = runtime->status->hw_ptr; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK && runtime->silence_size > 0) snd_pcm_playback_silence(substream, ULONG_MAX); + snd_pcm_stream_unlock_irq(substream); } static const struct action_ops snd_pcm_action_reset = { diff --git a/sound/firewire/fcp.c b/sound/firewire/fcp.c index bbfbebf4affbc20964e52c5553381facbd7f4118..df44dd5dc4b229785e3dac955105e4da27dbcccf 100644 --- a/sound/firewire/fcp.c +++ b/sound/firewire/fcp.c @@ -240,9 +240,7 @@ int fcp_avc_transaction(struct fw_unit *unit, t.response_match_bytes = response_match_bytes; t.state = STATE_PENDING; init_waitqueue_head(&t.wait); - - if (*(const u8 *)command == 0x00 || *(const u8 *)command == 0x03) - t.deferrable = true; + t.deferrable = (*(const u8 *)command == 0x00 || *(const u8 *)command == 0x03); spin_lock_irq(&transactions_lock); list_add_tail(&t.list, &transactions); diff --git a/sound/isa/cs423x/cs4236.c b/sound/isa/cs423x/cs4236.c index fa3c39cff5f854a88b602feb832fa174624f3a84..9ee3a312c6793248e7752bf8b3691b4b43fd5dce 100644 --- a/sound/isa/cs423x/cs4236.c +++ b/sound/isa/cs423x/cs4236.c @@ -544,7 +544,7 @@ static int snd_cs423x_pnpbios_detect(struct pnp_dev *pdev, static int dev; int err; struct snd_card *card; - struct pnp_dev *cdev; + struct pnp_dev *cdev, *iter; char cid[PNP_ID_LEN]; if (pnp_device_is_isapnp(pdev)) @@ -560,9 +560,11 @@ static int snd_cs423x_pnpbios_detect(struct pnp_dev *pdev, strcpy(cid, pdev->id[0].id); cid[5] = '1'; cdev = NULL; - list_for_each_entry(cdev, &(pdev->protocol->devices), protocol_list) { - if (!strcmp(cdev->id[0].id, cid)) + list_for_each_entry(iter, &(pdev->protocol->devices), protocol_list) { + if (!strcmp(iter->id[0].id, cid)) { + cdev = iter; break; + } } err = snd_cs423x_card_new(&pdev->dev, dev, &card); if (err < 0) diff --git a/sound/pci/ac97/ac97_codec.c b/sound/pci/ac97/ac97_codec.c index 012a7ee849e8aad424e82752c73250b5d0a0d845..963731cf0d8c880f6c5ba5c9235dad01ca040ac5 100644 --- a/sound/pci/ac97/ac97_codec.c +++ b/sound/pci/ac97/ac97_codec.c @@ -938,8 +938,8 @@ static int snd_ac97_ad18xx_pcm_get_volume(struct snd_kcontrol *kcontrol, struct int codec = kcontrol->private_value & 3; mutex_lock(&ac97->page_mutex); - ucontrol->value.integer.value[0] = 31 - ((ac97->spec.ad18xx.pcmreg[codec] >> 0) & 31); - ucontrol->value.integer.value[1] = 31 - ((ac97->spec.ad18xx.pcmreg[codec] >> 8) & 31); + ucontrol->value.integer.value[0] = 31 - ((ac97->spec.ad18xx.pcmreg[codec] >> 8) & 31); + ucontrol->value.integer.value[1] = 31 - ((ac97->spec.ad18xx.pcmreg[codec] >> 0) & 31); mutex_unlock(&ac97->page_mutex); return 0; } diff --git a/sound/pci/cmipci.c b/sound/pci/cmipci.c index 7363d61eaec23fc6754d5d39dbf8717dfc9d1d89..120dd8b33ac81ca825dc11748e3202f12b47adbb 100644 --- a/sound/pci/cmipci.c +++ b/sound/pci/cmipci.c @@ -302,7 +302,6 @@ MODULE_PARM_DESC(joystick_port, "Joystick port address."); #define CM_MICGAINZ 0x01 /* mic boost */ #define CM_MICGAINZ_SHIFT 0 -#define CM_REG_MIXER3 0x24 #define CM_REG_AUX_VOL 0x26 #define CM_VAUXL_MASK 0xf0 #define CM_VAUXR_MASK 0x0f @@ -3291,7 +3290,7 @@ static void snd_cmipci_remove(struct pci_dev *pci) */ static const unsigned char saved_regs[] = { CM_REG_FUNCTRL1, CM_REG_CHFORMAT, CM_REG_LEGACY_CTRL, CM_REG_MISC_CTRL, - CM_REG_MIXER0, CM_REG_MIXER1, CM_REG_MIXER2, CM_REG_MIXER3, CM_REG_PLL, + CM_REG_MIXER0, CM_REG_MIXER1, CM_REG_MIXER2, CM_REG_AUX_VOL, CM_REG_PLL, CM_REG_CH0_FRAME1, CM_REG_CH0_FRAME2, CM_REG_CH1_FRAME1, CM_REG_CH1_FRAME2, CM_REG_EXT_MISC, CM_REG_INT_STATUS, CM_REG_INT_HLDCLR, CM_REG_FUNCTRL0, diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c index fbfdfcefeb0b623a8446681315e0c74d4e2e1b0e..de710e6dd56b8754075ec2afbd8df8226ce6cfee 100644 --- a/sound/pci/hda/patch_hdmi.c +++ b/sound/pci/hda/patch_hdmi.c @@ -1608,6 +1608,7 @@ static void hdmi_present_sense_via_verbs(struct hdmi_spec_per_pin *per_pin, struct hda_codec *codec = per_pin->codec; struct hdmi_spec *spec = codec->spec; struct hdmi_eld *eld = &spec->temp_eld; + struct device *dev = hda_codec_dev(codec); hda_nid_t pin_nid = per_pin->pin_nid; int dev_id = per_pin->dev_id; /* @@ -1621,8 +1622,13 @@ static void hdmi_present_sense_via_verbs(struct hdmi_spec_per_pin *per_pin, int present; int ret; +#ifdef CONFIG_PM + if (dev->power.runtime_status == RPM_SUSPENDING) + return; +#endif + ret = snd_hda_power_up_pm(codec); - if (ret < 0 && pm_runtime_suspended(hda_codec_dev(codec))) + if (ret < 0 && pm_runtime_suspended(dev)) goto out; present = snd_hda_jack_pin_sense(codec, pin_nid, dev_id); diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index ed0cfcb05ef0d4ee0243a1ed4d47ea17a494313f..b886326ce9b96a2a6cef3ac691dc985c5f1830c3 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -3615,8 +3615,8 @@ static void alc256_shutup(struct hda_codec *codec) /* If disable 3k pulldown control for alc257, the Mic detection will not work correctly * when booting with headset plugged. So skip setting it for the codec alc257 */ - if (spec->codec_variant != ALC269_TYPE_ALC257 && - spec->codec_variant != ALC269_TYPE_ALC256) + if (codec->core.vendor_id != 0x10ec0236 && + codec->core.vendor_id != 0x10ec0257) alc_update_coef_idx(codec, 0x46, 0, 3 << 12); if (!spec->no_shutup_pins) @@ -6762,6 +6762,7 @@ enum { ALC236_FIXUP_HP_MUTE_LED, ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF, ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET, + ALC256_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET, ALC295_FIXUP_ASUS_MIC_NO_PRESENCE, ALC269VC_FIXUP_ACER_VCOPPERBOX_PINS, ALC269VC_FIXUP_ACER_HEADSET_MIC, @@ -8083,6 +8084,14 @@ static const struct hda_fixup alc269_fixups[] = { { } }, }, + [ALC256_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET] = { + .type = HDA_FIXUP_VERBS, + .v.verbs = (const struct hda_verb[]) { + { 0x20, AC_VERB_SET_COEF_INDEX, 0x08}, + { 0x20, AC_VERB_SET_PROC_COEF, 0x2fcf}, + { } + }, + }, [ALC295_FIXUP_ASUS_MIC_NO_PRESENCE] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { @@ -8801,6 +8810,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1043, 0x1e51, "ASUS Zephyrus M15", ALC294_FIXUP_ASUS_GU502_PINS), SND_PCI_QUIRK(0x1043, 0x1e8e, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA401), SND_PCI_QUIRK(0x1043, 0x1f11, "ASUS Zephyrus G14", ALC289_FIXUP_ASUS_GA401), + SND_PCI_QUIRK(0x1043, 0x1d42, "ASUS Zephyrus G14 2022", ALC289_FIXUP_ASUS_GA401), SND_PCI_QUIRK(0x1043, 0x16b2, "ASUS GU603", ALC289_FIXUP_ASUS_GA401), SND_PCI_QUIRK(0x1043, 0x3030, "ASUS ZN270IE", ALC256_FIXUP_ASUS_AIO_GPIO2), SND_PCI_QUIRK(0x1043, 0x831a, "ASUS P901", ALC269_FIXUP_STEREO_DMIC), @@ -8834,6 +8844,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x144d, 0xc740, "Samsung Ativ book 8 (NP870Z5G)", ALC269_FIXUP_ATIV_BOOK_8), SND_PCI_QUIRK(0x144d, 0xc812, "Samsung Notebook Pen S (NT950SBE-X58)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET), SND_PCI_QUIRK(0x144d, 0xc830, "Samsung Galaxy Book Ion (NT950XCJ-X716A)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET), + SND_PCI_QUIRK(0x144d, 0xc832, "Samsung Galaxy Book Flex Alpha (NP730QCJ)", ALC256_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET), SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_HEADSET_MIC), SND_PCI_QUIRK(0x1462, 0xb120, "MSI Cubi MS-B120", ALC283_FIXUP_HEADSET_MIC), SND_PCI_QUIRK(0x1462, 0xb171, "Cubi N 8GL (MS-B171)", ALC283_FIXUP_HEADSET_MIC), @@ -8884,6 +8895,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1558, 0x8561, "System76 Gazelle (gaze14)", ALC269_FIXUP_HEADSET_MIC), SND_PCI_QUIRK(0x1558, 0x8562, "Clevo NH[5|7][0-9]RZ[Q]", ALC269_FIXUP_DMIC), SND_PCI_QUIRK(0x1558, 0x8668, "Clevo NP50B[BE]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1558, 0x866d, "Clevo NP5[05]PN[HJK]", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1558, 0x867d, "Clevo NP7[01]PN[HJK]", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0x8680, "Clevo NJ50LU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0x8686, "Clevo NH50[CZ]U", ALC256_FIXUP_MIC_NO_PRESENCE_AND_RESUME), SND_PCI_QUIRK(0x1558, 0x8a20, "Clevo NH55DCQ-Y", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), @@ -9174,6 +9187,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = { {.id = ALC298_FIXUP_HUAWEI_MBX_STEREO, .name = "huawei-mbx-stereo"}, {.id = ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE, .name = "alc256-medion-headset"}, {.id = ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET, .name = "alc298-samsung-headphone"}, + {.id = ALC256_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET, .name = "alc256-samsung-headphone"}, {.id = ALC255_FIXUP_XIAOMI_HEADSET_MIC, .name = "alc255-xiaomi-headset"}, {.id = ALC274_FIXUP_HP_MIC, .name = "alc274-hp-mic-detect"}, {.id = ALC245_FIXUP_HP_X360_AMP, .name = "alc245-hp-x360-amp"}, @@ -10839,6 +10853,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = { SND_PCI_QUIRK(0x1028, 0x069f, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800), SND_PCI_QUIRK(0x103c, 0x873e, "HP", ALC671_FIXUP_HP_HEADSET_MIC2), + SND_PCI_QUIRK(0x103c, 0x885f, "HP 288 Pro G8", ALC671_FIXUP_HP_HEADSET_MIC2), SND_PCI_QUIRK(0x1043, 0x1080, "Asus UX501VW", ALC668_FIXUP_HEADSET_MODE), SND_PCI_QUIRK(0x1043, 0x11cd, "Asus N550", ALC662_FIXUP_ASUS_Nx50), SND_PCI_QUIRK(0x1043, 0x129d, "Asus N750", ALC662_FIXUP_ASUS_Nx50), diff --git a/sound/soc/atmel/atmel_ssc_dai.c b/sound/soc/atmel/atmel_ssc_dai.c index 6a63e8797a0b6b5ecd58076dd1cd751dd81a1afd..97533412ce11e8457d7513e8b804ffbaf9dd69e7 100644 --- a/sound/soc/atmel/atmel_ssc_dai.c +++ b/sound/soc/atmel/atmel_ssc_dai.c @@ -280,7 +280,10 @@ static int atmel_ssc_startup(struct snd_pcm_substream *substream, /* Enable PMC peripheral clock for this SSC */ pr_debug("atmel_ssc_dai: Starting clock\n"); - clk_enable(ssc_p->ssc->clk); + ret = clk_enable(ssc_p->ssc->clk); + if (ret) + return ret; + ssc_p->mck_rate = clk_get_rate(ssc_p->ssc->clk); /* Reset the SSC unless initialized to keep it in a clean state */ diff --git a/sound/soc/atmel/sam9g20_wm8731.c b/sound/soc/atmel/sam9g20_wm8731.c index ed1f69b570244f7078cc2c31a987a21c6153b901..8a55d59a6c2aa97bc7680287479d0a1c9dd2859d 100644 --- a/sound/soc/atmel/sam9g20_wm8731.c +++ b/sound/soc/atmel/sam9g20_wm8731.c @@ -214,6 +214,7 @@ static int at91sam9g20ek_audio_probe(struct platform_device *pdev) cpu_np = of_parse_phandle(np, "atmel,ssc-controller", 0); if (!cpu_np) { dev_err(&pdev->dev, "dai and pcm info missing\n"); + of_node_put(codec_np); return -EINVAL; } at91sam9g20ek_dai.cpus->of_node = cpu_np; diff --git a/sound/soc/atmel/sam9x5_wm8731.c b/sound/soc/atmel/sam9x5_wm8731.c index 9fbc3c1113cc5ecb2bb6bb7b3ad6e73ddb2ec04c..529604a06c53207303cf76e968e7f6a31a2d132e 100644 --- a/sound/soc/atmel/sam9x5_wm8731.c +++ b/sound/soc/atmel/sam9x5_wm8731.c @@ -142,7 +142,7 @@ static int sam9x5_wm8731_driver_probe(struct platform_device *pdev) if (!cpu_np) { dev_err(&pdev->dev, "atmel,ssc-controller node missing\n"); ret = -EINVAL; - goto out; + goto out_put_codec_np; } dai->cpus->of_node = cpu_np; dai->platforms->of_node = cpu_np; @@ -153,13 +153,10 @@ static int sam9x5_wm8731_driver_probe(struct platform_device *pdev) if (ret != 0) { dev_err(&pdev->dev, "Failed to set SSC %d for audio: %d\n", ret, priv->ssc_id); - goto out; + goto out_put_cpu_np; } - of_node_put(codec_np); - of_node_put(cpu_np); - - ret = snd_soc_register_card(card); + ret = devm_snd_soc_register_card(&pdev->dev, card); if (ret) { dev_err(&pdev->dev, "Platform device allocation failed\n"); goto out_put_audio; @@ -167,10 +164,14 @@ static int sam9x5_wm8731_driver_probe(struct platform_device *pdev) dev_dbg(&pdev->dev, "%s ok\n", __func__); - return ret; + goto out_put_cpu_np; out_put_audio: atmel_ssc_put_audio(priv->ssc_id); +out_put_cpu_np: + of_node_put(cpu_np); +out_put_codec_np: + of_node_put(codec_np); out: return ret; } @@ -180,7 +181,6 @@ static int sam9x5_wm8731_driver_remove(struct platform_device *pdev) struct snd_soc_card *card = platform_get_drvdata(pdev); struct sam9x5_drvdata *priv = card->drvdata; - snd_soc_unregister_card(card); atmel_ssc_put_audio(priv->ssc_id); return 0; diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig index 34c6dd04b85a3c9e2619fa1d27a210bf2e484a8f..52c89a6f54e9a6b53e6ea41d88fd5cf8700e220d 100644 --- a/sound/soc/codecs/Kconfig +++ b/sound/soc/codecs/Kconfig @@ -659,6 +659,7 @@ config SND_SOC_CS4349 config SND_SOC_CS47L15 tristate + depends on MFD_CS47L15 config SND_SOC_CS47L24 tristate @@ -666,15 +667,19 @@ config SND_SOC_CS47L24 config SND_SOC_CS47L35 tristate + depends on MFD_CS47L35 config SND_SOC_CS47L85 tristate + depends on MFD_CS47L85 config SND_SOC_CS47L90 tristate + depends on MFD_CS47L90 config SND_SOC_CS47L92 tristate + depends on MFD_CS47L92 # Cirrus Logic Quad-Channel ADC config SND_SOC_CS53L30 diff --git a/sound/soc/codecs/cs4265.c b/sound/soc/codecs/cs4265.c index d76be44f46b406df325c9cda1d6384a99642dbce..36b9e4fab099b971c9f6d4d131f0f86ec40a40b4 100644 --- a/sound/soc/codecs/cs4265.c +++ b/sound/soc/codecs/cs4265.c @@ -150,7 +150,6 @@ static const struct snd_kcontrol_new cs4265_snd_controls[] = { SOC_SINGLE("E to F Buffer Disable Switch", CS4265_SPDIF_CTL1, 6, 1, 0), SOC_ENUM("C Data Access", cam_mode_enum), - SOC_SINGLE("SPDIF Switch", CS4265_SPDIF_CTL2, 5, 1, 1), SOC_SINGLE("Validity Bit Control Switch", CS4265_SPDIF_CTL2, 3, 1, 0), SOC_ENUM("SPDIF Mono/Stereo", spdif_mono_stereo_enum), @@ -186,7 +185,7 @@ static const struct snd_soc_dapm_widget cs4265_dapm_widgets[] = { SND_SOC_DAPM_SWITCH("Loopback", SND_SOC_NOPM, 0, 0, &loopback_ctl), - SND_SOC_DAPM_SWITCH("SPDIF", SND_SOC_NOPM, 0, 0, + SND_SOC_DAPM_SWITCH("SPDIF", CS4265_SPDIF_CTL2, 5, 1, &spdif_switch), SND_SOC_DAPM_SWITCH("DAC", CS4265_PWRCTL, 1, 1, &dac_switch), diff --git a/sound/soc/codecs/msm8916-wcd-analog.c b/sound/soc/codecs/msm8916-wcd-analog.c index 3ddd822240e3aa05fc8381f1aebf11be933fb723..971b8360b5b1be05a73afa6c412332c6092b1c42 100644 --- a/sound/soc/codecs/msm8916-wcd-analog.c +++ b/sound/soc/codecs/msm8916-wcd-analog.c @@ -1221,8 +1221,10 @@ static int pm8916_wcd_analog_spmi_probe(struct platform_device *pdev) } irq = platform_get_irq_byname(pdev, "mbhc_switch_int"); - if (irq < 0) - return irq; + if (irq < 0) { + ret = irq; + goto err_disable_clk; + } ret = devm_request_threaded_irq(dev, irq, NULL, pm8916_mbhc_switch_irq_handler, @@ -1234,8 +1236,10 @@ static int pm8916_wcd_analog_spmi_probe(struct platform_device *pdev) if (priv->mbhc_btn_enabled) { irq = platform_get_irq_byname(pdev, "mbhc_but_press_det"); - if (irq < 0) - return irq; + if (irq < 0) { + ret = irq; + goto err_disable_clk; + } ret = devm_request_threaded_irq(dev, irq, NULL, mbhc_btn_press_irq_handler, @@ -1246,8 +1250,10 @@ static int pm8916_wcd_analog_spmi_probe(struct platform_device *pdev) dev_err(dev, "cannot request mbhc button press irq\n"); irq = platform_get_irq_byname(pdev, "mbhc_but_rel_det"); - if (irq < 0) - return irq; + if (irq < 0) { + ret = irq; + goto err_disable_clk; + } ret = devm_request_threaded_irq(dev, irq, NULL, mbhc_btn_release_irq_handler, @@ -1264,6 +1270,10 @@ static int pm8916_wcd_analog_spmi_probe(struct platform_device *pdev) return devm_snd_soc_register_component(dev, &pm8916_wcd_analog, pm8916_wcd_analog_dai, ARRAY_SIZE(pm8916_wcd_analog_dai)); + +err_disable_clk: + clk_disable_unprepare(priv->mclk); + return ret; } static int pm8916_wcd_analog_spmi_remove(struct platform_device *pdev) diff --git a/sound/soc/codecs/msm8916-wcd-digital.c b/sound/soc/codecs/msm8916-wcd-digital.c index fcc10c8bc625951c9ffb12699cf05f3fb6f4b50b..9ad7fc0baf072678b40063b96fa8450738b06d70 100644 --- a/sound/soc/codecs/msm8916-wcd-digital.c +++ b/sound/soc/codecs/msm8916-wcd-digital.c @@ -1201,7 +1201,7 @@ static int msm8916_wcd_digital_probe(struct platform_device *pdev) ret = clk_prepare_enable(priv->mclk); if (ret < 0) { dev_err(dev, "failed to enable mclk %d\n", ret); - return ret; + goto err_clk; } dev_set_drvdata(dev, priv); @@ -1209,6 +1209,9 @@ static int msm8916_wcd_digital_probe(struct platform_device *pdev) return devm_snd_soc_register_component(dev, &msm8916_wcd_digital, msm8916_wcd_digital_dai, ARRAY_SIZE(msm8916_wcd_digital_dai)); +err_clk: + clk_disable_unprepare(priv->ahbclk); + return ret; } static int msm8916_wcd_digital_remove(struct platform_device *pdev) diff --git a/sound/soc/codecs/mt6358.c b/sound/soc/codecs/mt6358.c index 1f39d5998cf67518807f04fd30ddd7306af7132c..456d9b24d0249e9bad4ced42de6d82ab0677dd4c 100644 --- a/sound/soc/codecs/mt6358.c +++ b/sound/soc/codecs/mt6358.c @@ -107,6 +107,7 @@ int mt6358_set_mtkaif_protocol(struct snd_soc_component *cmpnt, priv->mtkaif_protocol = mtkaif_protocol; return 0; } +EXPORT_SYMBOL_GPL(mt6358_set_mtkaif_protocol); static void playback_gpio_set(struct mt6358_priv *priv) { @@ -273,6 +274,7 @@ int mt6358_mtkaif_calibration_enable(struct snd_soc_component *cmpnt) 1 << RG_AUD_PAD_TOP_DAT_MISO_LOOPBACK_SFT); return 0; } +EXPORT_SYMBOL_GPL(mt6358_mtkaif_calibration_enable); int mt6358_mtkaif_calibration_disable(struct snd_soc_component *cmpnt) { @@ -296,6 +298,7 @@ int mt6358_mtkaif_calibration_disable(struct snd_soc_component *cmpnt) capture_gpio_reset(priv); return 0; } +EXPORT_SYMBOL_GPL(mt6358_mtkaif_calibration_disable); int mt6358_set_mtkaif_calibration_phase(struct snd_soc_component *cmpnt, int phase_1, int phase_2) @@ -310,6 +313,7 @@ int mt6358_set_mtkaif_calibration_phase(struct snd_soc_component *cmpnt, phase_2 << RG_AUD_PAD_TOP_PHASE_MODE2_SFT); return 0; } +EXPORT_SYMBOL_GPL(mt6358_set_mtkaif_calibration_phase); /* dl pga gain */ enum { diff --git a/sound/soc/codecs/rt5663.c b/sound/soc/codecs/rt5663.c index db8a41aaa3859c15a5735852126c4383575fee92..4423e61bf1abf611380a2ffbff14427a0f8f6e06 100644 --- a/sound/soc/codecs/rt5663.c +++ b/sound/soc/codecs/rt5663.c @@ -3478,6 +3478,8 @@ static int rt5663_parse_dp(struct rt5663_priv *rt5663, struct device *dev) table_size = sizeof(struct impedance_mapping_table) * rt5663->pdata.impedance_sensing_num; rt5663->imp_table = devm_kzalloc(dev, table_size, GFP_KERNEL); + if (!rt5663->imp_table) + return -ENOMEM; ret = device_property_read_u32_array(dev, "realtek,impedance_sensing_table", (u32 *)rt5663->imp_table, table_size); diff --git a/sound/soc/codecs/rt5668.c b/sound/soc/codecs/rt5668.c index bc69adc9c8b707bb8c3e292e35cd7a9ec474e093..e625df57c69e5a99d2195d259a4ed939e455bd1b 100644 --- a/sound/soc/codecs/rt5668.c +++ b/sound/soc/codecs/rt5668.c @@ -1022,11 +1022,13 @@ static void rt5668_jack_detect_handler(struct work_struct *work) container_of(work, struct rt5668_priv, jack_detect_work.work); int val, btn_type; - while (!rt5668->component) - usleep_range(10000, 15000); - - while (!rt5668->component->card->instantiated) - usleep_range(10000, 15000); + if (!rt5668->component || !rt5668->component->card || + !rt5668->component->card->instantiated) { + /* card not yet ready, try later */ + mod_delayed_work(system_power_efficient_wq, + &rt5668->jack_detect_work, msecs_to_jiffies(15)); + return; + } mutex_lock(&rt5668->calibrate_mutex); diff --git a/sound/soc/codecs/rt5682.c b/sound/soc/codecs/rt5682.c index aaef76cc151fa7bf3e8bf09c2ebc99efba6771ba..113ed00ddf1e5839574ed53c251a148ca0fad55c 100644 --- a/sound/soc/codecs/rt5682.c +++ b/sound/soc/codecs/rt5682.c @@ -1081,11 +1081,13 @@ void rt5682_jack_detect_handler(struct work_struct *work) container_of(work, struct rt5682_priv, jack_detect_work.work); int val, btn_type; - while (!rt5682->component) - usleep_range(10000, 15000); - - while (!rt5682->component->card->instantiated) - usleep_range(10000, 15000); + if (!rt5682->component || !rt5682->component->card || + !rt5682->component->card->instantiated) { + /* card not yet ready, try later */ + mod_delayed_work(system_power_efficient_wq, + &rt5682->jack_detect_work, msecs_to_jiffies(15)); + return; + } mutex_lock(&rt5682->calibrate_mutex); diff --git a/sound/soc/codecs/wcd934x.c b/sound/soc/codecs/wcd934x.c index 01df3f4e045a9e18bcd5a7a396348d2fcd776c6b..8540ac230d0eda3115736cc608789c4412d1ea7f 100644 --- a/sound/soc/codecs/wcd934x.c +++ b/sound/soc/codecs/wcd934x.c @@ -2522,13 +2522,16 @@ static int wcd934x_rx_hph_mode_put(struct snd_kcontrol *kc, mode_val = ucontrol->value.enumerated.item[0]; + if (mode_val == wcd->hph_mode) + return 0; + if (mode_val == 0) { dev_err(wcd->dev, "Invalid HPH Mode, default to ClSH HiFi\n"); mode_val = CLS_H_LOHIFI; } wcd->hph_mode = mode_val; - return 0; + return 1; } static int slim_rx_mux_get(struct snd_kcontrol *kc, @@ -5044,6 +5047,7 @@ static int wcd934x_codec_parse_data(struct wcd934x_codec *wcd) } wcd->sidev = of_slim_get_device(wcd->sdev->ctrl, ifc_dev_np); + of_node_put(ifc_dev_np); if (!wcd->sidev) { dev_err(dev, "Unable to get SLIM Interface device\n"); return -EINVAL; diff --git a/sound/soc/codecs/wm8350.c b/sound/soc/codecs/wm8350.c index a6aa212fa0c89faa2865755cf68da635226f27a6..ec5d997725b9c10f3c875972aef7870af53547b2 100644 --- a/sound/soc/codecs/wm8350.c +++ b/sound/soc/codecs/wm8350.c @@ -1536,18 +1536,38 @@ static int wm8350_component_probe(struct snd_soc_component *component) wm8350_clear_bits(wm8350, WM8350_JACK_DETECT, WM8350_JDL_ENA | WM8350_JDR_ENA); - wm8350_register_irq(wm8350, WM8350_IRQ_CODEC_JCK_DET_L, + ret = wm8350_register_irq(wm8350, WM8350_IRQ_CODEC_JCK_DET_L, wm8350_hpl_jack_handler, 0, "Left jack detect", priv); - wm8350_register_irq(wm8350, WM8350_IRQ_CODEC_JCK_DET_R, + if (ret != 0) + goto err; + + ret = wm8350_register_irq(wm8350, WM8350_IRQ_CODEC_JCK_DET_R, wm8350_hpr_jack_handler, 0, "Right jack detect", priv); - wm8350_register_irq(wm8350, WM8350_IRQ_CODEC_MICSCD, + if (ret != 0) + goto free_jck_det_l; + + ret = wm8350_register_irq(wm8350, WM8350_IRQ_CODEC_MICSCD, wm8350_mic_handler, 0, "Microphone short", priv); - wm8350_register_irq(wm8350, WM8350_IRQ_CODEC_MICD, + if (ret != 0) + goto free_jck_det_r; + + ret = wm8350_register_irq(wm8350, WM8350_IRQ_CODEC_MICD, wm8350_mic_handler, 0, "Microphone detect", priv); + if (ret != 0) + goto free_micscd; return 0; + +free_micscd: + wm8350_free_irq(wm8350, WM8350_IRQ_CODEC_MICSCD, priv); +free_jck_det_r: + wm8350_free_irq(wm8350, WM8350_IRQ_CODEC_JCK_DET_R, priv); +free_jck_det_l: + wm8350_free_irq(wm8350, WM8350_IRQ_CODEC_JCK_DET_L, priv); +err: + return ret; } static void wm8350_component_remove(struct snd_soc_component *component) diff --git a/sound/soc/dwc/dwc-i2s.c b/sound/soc/dwc/dwc-i2s.c index fd4160289faca7b44b8f6feb13d60bf8b4f4c491..36da0f01571a111f899fc12c60bab3cde08efde6 100644 --- a/sound/soc/dwc/dwc-i2s.c +++ b/sound/soc/dwc/dwc-i2s.c @@ -403,9 +403,13 @@ static int dw_i2s_runtime_suspend(struct device *dev) static int dw_i2s_runtime_resume(struct device *dev) { struct dw_i2s_dev *dw_dev = dev_get_drvdata(dev); + int ret; - if (dw_dev->capability & DW_I2S_MASTER) - clk_enable(dw_dev->clk); + if (dw_dev->capability & DW_I2S_MASTER) { + ret = clk_enable(dw_dev->clk); + if (ret) + return ret; + } return 0; } @@ -422,10 +426,13 @@ static int dw_i2s_resume(struct snd_soc_component *component) { struct dw_i2s_dev *dev = snd_soc_component_get_drvdata(component); struct snd_soc_dai *dai; - int stream; + int stream, ret; - if (dev->capability & DW_I2S_MASTER) - clk_enable(dev->clk); + if (dev->capability & DW_I2S_MASTER) { + ret = clk_enable(dev->clk); + if (ret) + return ret; + } for_each_component_dais(component, dai) { for_each_pcm_streams(stream) diff --git a/sound/soc/fsl/fsl_spdif.c b/sound/soc/fsl/fsl_spdif.c index 15bcb0f38ec9e89f5a6c9f161b8d00572ff4e390..d01e8d516df1f8da072031f5cdf77fc2fff76f50 100644 --- a/sound/soc/fsl/fsl_spdif.c +++ b/sound/soc/fsl/fsl_spdif.c @@ -544,6 +544,8 @@ static void fsl_spdif_shutdown(struct snd_pcm_substream *substream, mask = SCR_TXFIFO_AUTOSYNC_MASK | SCR_TXFIFO_CTRL_MASK | SCR_TXSEL_MASK | SCR_USRC_SEL_MASK | SCR_TXFIFO_FSEL_MASK; + /* Disable TX clock */ + regmap_update_bits(regmap, REG_SPDIF_STC, STC_TXCLK_ALL_EN_MASK, 0); } else { scr = SCR_RXFIFO_OFF | SCR_RXFIFO_CTL_ZERO; mask = SCR_RXFIFO_FSEL_MASK | SCR_RXFIFO_AUTOSYNC_MASK| diff --git a/sound/soc/fsl/imx-es8328.c b/sound/soc/fsl/imx-es8328.c index fad1eb6253d53e66aa7646337fc0de34c5745f78..9e602c3456196c5a86ddfb4abf084b577da4ee91 100644 --- a/sound/soc/fsl/imx-es8328.c +++ b/sound/soc/fsl/imx-es8328.c @@ -87,6 +87,7 @@ static int imx_es8328_probe(struct platform_device *pdev) if (int_port > MUX_PORT_MAX || int_port == 0) { dev_err(dev, "mux-int-port: hardware only has %d mux ports\n", MUX_PORT_MAX); + ret = -EINVAL; goto fail; } diff --git a/sound/soc/generic/simple-card-utils.c b/sound/soc/generic/simple-card-utils.c index 6cada4c1e283befe3c52848bda0340f082696a88..d0d79f47bfdd50401099c9ce2a0ab9aba3817eb4 100644 --- a/sound/soc/generic/simple-card-utils.c +++ b/sound/soc/generic/simple-card-utils.c @@ -255,7 +255,7 @@ int asoc_simple_hw_params(struct snd_pcm_substream *substream, struct simple_dai_props *dai_props = simple_priv_to_props(priv, rtd->num); unsigned int mclk, mclk_fs = 0; - int ret = 0; + int ret; if (dai_props->mclk_fs) mclk_fs = dai_props->mclk_fs; diff --git a/sound/soc/mxs/mxs-saif.c b/sound/soc/mxs/mxs-saif.c index 07f8cf9980e31afe73d3b695e7dd2f8f3f1a3de2..f2eda81985e27fd151a5ce7430c9f2a2ea516396 100644 --- a/sound/soc/mxs/mxs-saif.c +++ b/sound/soc/mxs/mxs-saif.c @@ -455,7 +455,10 @@ static int mxs_saif_hw_params(struct snd_pcm_substream *substream, * basic clock which should be fast enough for the internal * logic. */ - clk_enable(saif->clk); + ret = clk_enable(saif->clk); + if (ret) + return ret; + ret = clk_set_rate(saif->clk, 24000000); clk_disable(saif->clk); if (ret) diff --git a/sound/soc/mxs/mxs-sgtl5000.c b/sound/soc/mxs/mxs-sgtl5000.c index a6407f4388de7de3fde2ebb8cb9ec032f6c09eaa..fb721bc499496b18b68057b467fa9ae14082503f 100644 --- a/sound/soc/mxs/mxs-sgtl5000.c +++ b/sound/soc/mxs/mxs-sgtl5000.c @@ -118,6 +118,9 @@ static int mxs_sgtl5000_probe(struct platform_device *pdev) codec_np = of_parse_phandle(np, "audio-codec", 0); if (!saif_np[0] || !saif_np[1] || !codec_np) { dev_err(&pdev->dev, "phandle missing or invalid\n"); + of_node_put(codec_np); + of_node_put(saif_np[0]); + of_node_put(saif_np[1]); return -EINVAL; } diff --git a/sound/soc/rockchip/rockchip_i2s.c b/sound/soc/rockchip/rockchip_i2s.c index fa84ec695b5251903e661cc05567e5796a1c27a9..785baf98f9da25387a568cb0f1efd2b70c3cd3b5 100644 --- a/sound/soc/rockchip/rockchip_i2s.c +++ b/sound/soc/rockchip/rockchip_i2s.c @@ -624,20 +624,23 @@ static int rockchip_i2s_probe(struct platform_device *pdev) i2s->mclk = devm_clk_get(&pdev->dev, "i2s_clk"); if (IS_ERR(i2s->mclk)) { dev_err(&pdev->dev, "Can't retrieve i2s master clock\n"); - return PTR_ERR(i2s->mclk); + ret = PTR_ERR(i2s->mclk); + goto err_clk; } - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - regs = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(regs)) - return PTR_ERR(regs); + regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res); + if (IS_ERR(regs)) { + ret = PTR_ERR(regs); + goto err_clk; + } i2s->regmap = devm_regmap_init_mmio(&pdev->dev, regs, &rockchip_i2s_regmap_config); if (IS_ERR(i2s->regmap)) { dev_err(&pdev->dev, "Failed to initialise managed register map\n"); - return PTR_ERR(i2s->regmap); + ret = PTR_ERR(i2s->regmap); + goto err_clk; } i2s->playback_dma_data.addr = res->start + I2S_TXDR; @@ -696,7 +699,8 @@ static int rockchip_i2s_probe(struct platform_device *pdev) i2s_runtime_suspend(&pdev->dev); err_pm_disable: pm_runtime_disable(&pdev->dev); - +err_clk: + clk_disable_unprepare(i2s->hclk); return ret; } diff --git a/sound/soc/sh/fsi.c b/sound/soc/sh/fsi.c index 3c574792231bc5c376d201838f497bd3cb063744..0fa72907d5bf14658be2e23ec08fcec358a2b483 100644 --- a/sound/soc/sh/fsi.c +++ b/sound/soc/sh/fsi.c @@ -816,14 +816,27 @@ static int fsi_clk_enable(struct device *dev, return ret; } - clk_enable(clock->xck); - clk_enable(clock->ick); - clk_enable(clock->div); + ret = clk_enable(clock->xck); + if (ret) + goto err; + ret = clk_enable(clock->ick); + if (ret) + goto disable_xck; + ret = clk_enable(clock->div); + if (ret) + goto disable_ick; clock->count++; } return ret; + +disable_ick: + clk_disable(clock->ick); +disable_xck: + clk_disable(clock->xck); +err: + return ret; } static int fsi_clk_disable(struct device *dev, diff --git a/sound/soc/soc-compress.c b/sound/soc/soc-compress.c index 3a6a60215e815cf1535828759d86a6667a416712..d0f3ff8edd904e2b9765c939c219786412dd9163 100644 --- a/sound/soc/soc-compress.c +++ b/sound/soc/soc-compress.c @@ -766,6 +766,11 @@ int snd_soc_new_compress(struct snd_soc_pcm_runtime *rtd, int num) return -EINVAL; } + if (!codec_dai) { + dev_err(rtd->card->dev, "Missing codec\n"); + return -EINVAL; + } + /* check client and interface hw capabilities */ if (snd_soc_dai_stream_valid(codec_dai, SNDRV_PCM_STREAM_PLAYBACK) && snd_soc_dai_stream_valid(cpu_dai, SNDRV_PCM_STREAM_PLAYBACK)) diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c index 13329659686468e35d81b36284cce8ce79f01e17..a6d6d10cd471bd3b7e92a941634d239e194651ff 100644 --- a/sound/soc/soc-core.c +++ b/sound/soc/soc-core.c @@ -3020,7 +3020,7 @@ int snd_soc_get_dai_name(struct of_phandle_args *args, for_each_component(pos) { component_of_node = soc_component_to_node(pos); - if (component_of_node != args->np) + if (component_of_node != args->np || !pos->num_dai) continue; ret = snd_soc_component_of_xlate_dai_name(pos, args, dai_name); diff --git a/sound/soc/soc-generic-dmaengine-pcm.c b/sound/soc/soc-generic-dmaengine-pcm.c index 9ef80a48707eb6583ff51ec278c1a5b035970ba1..0d100b4e43f7ef6489cbc4a12ff7de91bde2f7dd 100644 --- a/sound/soc/soc-generic-dmaengine-pcm.c +++ b/sound/soc/soc-generic-dmaengine-pcm.c @@ -83,10 +83,10 @@ static int dmaengine_pcm_hw_params(struct snd_soc_component *component, memset(&slave_config, 0, sizeof(slave_config)); - if (!pcm->config) - prepare_slave_config = snd_dmaengine_pcm_prepare_slave_config; - else + if (pcm->config && pcm->config->prepare_slave_config) prepare_slave_config = pcm->config->prepare_slave_config; + else + prepare_slave_config = snd_dmaengine_pcm_prepare_slave_config; if (prepare_slave_config) { ret = prepare_slave_config(substream, params, &slave_config); diff --git a/sound/soc/soc-ops.c b/sound/soc/soc-ops.c index caa8d45ebb209231e724ff35adc783a261b7be1e..2bc9fa6a34b8faf645ef36dcce95d1a38778671c 100644 --- a/sound/soc/soc-ops.c +++ b/sound/soc/soc-ops.c @@ -317,7 +317,7 @@ int snd_soc_put_volsw(struct snd_kcontrol *kcontrol, mask = BIT(sign_bit + 1) - 1; val = ucontrol->value.integer.value[0]; - if (mc->platform_max && val > mc->platform_max) + if (mc->platform_max && ((int)val + min) > mc->platform_max) return -EINVAL; if (val > max - min) return -EINVAL; @@ -330,7 +330,7 @@ int snd_soc_put_volsw(struct snd_kcontrol *kcontrol, val = val << shift; if (snd_soc_volsw_is_stereo(mc)) { val2 = ucontrol->value.integer.value[1]; - if (mc->platform_max && val2 > mc->platform_max) + if (mc->platform_max && ((int)val2 + min) > mc->platform_max) return -EINVAL; if (val2 > max - min) return -EINVAL; diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c index 4d24ac255d2532a64def67a2b04a9011a05ef087..23a5f9a52da0fddf8ea0b95d3910811893deee70 100644 --- a/sound/soc/soc-topology.c +++ b/sound/soc/soc-topology.c @@ -578,7 +578,8 @@ static int soc_tplg_kcontrol_bind_io(struct snd_soc_tplg_ctl_hdr *hdr, if (le32_to_cpu(hdr->ops.info) == SND_SOC_TPLG_CTL_BYTES && k->iface & SNDRV_CTL_ELEM_IFACE_MIXER - && k->access & SNDRV_CTL_ELEM_ACCESS_TLV_READWRITE + && (k->access & SNDRV_CTL_ELEM_ACCESS_TLV_READ + || k->access & SNDRV_CTL_ELEM_ACCESS_TLV_WRITE) && k->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) { struct soc_bytes_ext *sbe; struct snd_soc_tplg_bytes_control *be; diff --git a/sound/soc/sof/imx/imx8m.c b/sound/soc/sof/imx/imx8m.c index cb822d9537678d0e9e9972bd47375054cfdefaa0..6943c05273ae7b7068cc33c7682b3d0860598ec2 100644 --- a/sound/soc/sof/imx/imx8m.c +++ b/sound/soc/sof/imx/imx8m.c @@ -191,6 +191,7 @@ static int imx8m_probe(struct snd_sof_dev *sdev) } ret = of_address_to_resource(res_node, 0, &res); + of_node_put(res_node); if (ret) { dev_err(&pdev->dev, "failed to get reserved region address\n"); goto exit_pdev_unregister; diff --git a/sound/soc/sof/intel/hda-loader.c b/sound/soc/sof/intel/hda-loader.c index 2707a16c6a4d3db863fe6d322f705f468fdde48d..347636a80b48765109274604b6d9e775775a7162 100644 --- a/sound/soc/sof/intel/hda-loader.c +++ b/sound/soc/sof/intel/hda-loader.c @@ -47,7 +47,7 @@ static struct hdac_ext_stream *cl_stream_prepare(struct snd_sof_dev *sdev, unsig ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV_SG, &pci->dev, size, dmab); if (ret < 0) { dev_err(sdev->dev, "error: memory alloc failed: %x\n", ret); - goto error; + goto out_put; } hstream->period_bytes = 0;/* initialize period_bytes */ @@ -58,22 +58,23 @@ static struct hdac_ext_stream *cl_stream_prepare(struct snd_sof_dev *sdev, unsig ret = hda_dsp_iccmax_stream_hw_params(sdev, dsp_stream, dmab, NULL); if (ret < 0) { dev_err(sdev->dev, "error: iccmax stream prepare failed: %x\n", ret); - goto error; + goto out_free; } } else { ret = hda_dsp_stream_hw_params(sdev, dsp_stream, dmab, NULL); if (ret < 0) { dev_err(sdev->dev, "error: hdac prepare failed: %x\n", ret); - goto error; + goto out_free; } hda_dsp_stream_spib_config(sdev, dsp_stream, HDA_DSP_SPIB_ENABLE, size); } return dsp_stream; -error: - hda_dsp_stream_put(sdev, direction, hstream->stream_tag); +out_free: snd_dma_free_pages(dmab); +out_put: + hda_dsp_stream_put(sdev, direction, hstream->stream_tag); return ERR_PTR(ret); } diff --git a/sound/soc/sti/uniperif_player.c b/sound/soc/sti/uniperif_player.c index 2ed92c990b97c1c0d663b9a4c9b023507db580f2..dd9013c4766491af0980e74020c4dcfb4d4d17a2 100644 --- a/sound/soc/sti/uniperif_player.c +++ b/sound/soc/sti/uniperif_player.c @@ -91,7 +91,7 @@ static irqreturn_t uni_player_irq_handler(int irq, void *dev_id) SET_UNIPERIF_ITM_BCLR_FIFO_ERROR(player); /* Stop the player */ - snd_pcm_stop_xrun(player->substream); + snd_pcm_stop(player->substream, SNDRV_PCM_STATE_XRUN); } ret = IRQ_HANDLED; @@ -105,7 +105,7 @@ static irqreturn_t uni_player_irq_handler(int irq, void *dev_id) SET_UNIPERIF_ITM_BCLR_DMA_ERROR(player); /* Stop the player */ - snd_pcm_stop_xrun(player->substream); + snd_pcm_stop(player->substream, SNDRV_PCM_STATE_XRUN); ret = IRQ_HANDLED; } @@ -138,7 +138,7 @@ static irqreturn_t uni_player_irq_handler(int irq, void *dev_id) dev_err(player->dev, "Underflow recovery failed\n"); /* Stop the player */ - snd_pcm_stop_xrun(player->substream); + snd_pcm_stop(player->substream, SNDRV_PCM_STATE_XRUN); ret = IRQ_HANDLED; } diff --git a/sound/soc/sti/uniperif_reader.c b/sound/soc/sti/uniperif_reader.c index 136059331211d2c37da46dff52e332534f95bb39..065c5f0d1f5f00c65715ccde98360816ddbf67a9 100644 --- a/sound/soc/sti/uniperif_reader.c +++ b/sound/soc/sti/uniperif_reader.c @@ -65,7 +65,7 @@ static irqreturn_t uni_reader_irq_handler(int irq, void *dev_id) if (unlikely(status & UNIPERIF_ITS_FIFO_ERROR_MASK(reader))) { dev_err(reader->dev, "FIFO error detected\n"); - snd_pcm_stop_xrun(reader->substream); + snd_pcm_stop(reader->substream, SNDRV_PCM_STATE_XRUN); ret = IRQ_HANDLED; } diff --git a/sound/soc/ti/davinci-i2s.c b/sound/soc/ti/davinci-i2s.c index dd34504c09ba8178ab5fff4882a3735df46181f6..4895bcee1f5578c24bf5da64505317f74b536d3f 100644 --- a/sound/soc/ti/davinci-i2s.c +++ b/sound/soc/ti/davinci-i2s.c @@ -708,7 +708,9 @@ static int davinci_i2s_probe(struct platform_device *pdev) dev->clk = clk_get(&pdev->dev, NULL); if (IS_ERR(dev->clk)) return -ENODEV; - clk_enable(dev->clk); + ret = clk_enable(dev->clk); + if (ret) + goto err_put_clk; dev->dev = &pdev->dev; dev_set_drvdata(&pdev->dev, dev); @@ -730,6 +732,7 @@ static int davinci_i2s_probe(struct platform_device *pdev) snd_soc_unregister_component(&pdev->dev); err_release_clk: clk_disable(dev->clk); +err_put_clk: clk_put(dev->clk); return ret; } diff --git a/sound/soc/xilinx/xlnx_formatter_pcm.c b/sound/soc/xilinx/xlnx_formatter_pcm.c index ce19a6058b27964dfcd45336770b2e7cdd417cec..5c4158069a5a893ebf7cfec6d7a6249c353c68df 100644 --- a/sound/soc/xilinx/xlnx_formatter_pcm.c +++ b/sound/soc/xilinx/xlnx_formatter_pcm.c @@ -84,6 +84,7 @@ struct xlnx_pcm_drv_data { struct snd_pcm_substream *play_stream; struct snd_pcm_substream *capture_stream; struct clk *axi_clk; + unsigned int sysclk; }; /* @@ -314,6 +315,15 @@ static irqreturn_t xlnx_s2mm_irq_handler(int irq, void *arg) return IRQ_NONE; } +static int xlnx_formatter_set_sysclk(struct snd_soc_component *component, + int clk_id, int source, unsigned int freq, int dir) +{ + struct xlnx_pcm_drv_data *adata = dev_get_drvdata(component->dev); + + adata->sysclk = freq; + return 0; +} + static int xlnx_formatter_pcm_open(struct snd_soc_component *component, struct snd_pcm_substream *substream) { @@ -450,11 +460,25 @@ static int xlnx_formatter_pcm_hw_params(struct snd_soc_component *component, u64 size; struct snd_pcm_runtime *runtime = substream->runtime; struct xlnx_pcm_stream_param *stream_data = runtime->private_data; + struct xlnx_pcm_drv_data *adata = dev_get_drvdata(component->dev); active_ch = params_channels(params); if (active_ch > stream_data->ch_limit) return -EINVAL; + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK && + adata->sysclk) { + unsigned int mclk_fs = adata->sysclk / params_rate(params); + + if (adata->sysclk % params_rate(params) != 0) { + dev_warn(component->dev, "sysclk %u not divisible by rate %u\n", + adata->sysclk, params_rate(params)); + return -EINVAL; + } + + writel(mclk_fs, stream_data->mmio + XLNX_AUD_FS_MULTIPLIER); + } + if (substream->stream == SNDRV_PCM_STREAM_CAPTURE && stream_data->xfer_mode == AES_TO_PCM) { val = readl(stream_data->mmio + XLNX_AUD_STS); @@ -552,6 +576,7 @@ static int xlnx_formatter_pcm_new(struct snd_soc_component *component, static const struct snd_soc_component_driver xlnx_asoc_component = { .name = DRV_NAME, + .set_sysclk = xlnx_formatter_set_sysclk, .open = xlnx_formatter_pcm_open, .close = xlnx_formatter_pcm_close, .hw_params = xlnx_formatter_pcm_hw_params, diff --git a/sound/spi/at73c213.c b/sound/spi/at73c213.c index 76c0e37a838cf430f1fdec54b07368c11a05c010..8a2da6b1012eb97a004f0ac20d244bbc8f8cc390 100644 --- a/sound/spi/at73c213.c +++ b/sound/spi/at73c213.c @@ -218,7 +218,9 @@ static int snd_at73c213_pcm_open(struct snd_pcm_substream *substream) runtime->hw = snd_at73c213_playback_hw; chip->substream = substream; - clk_enable(chip->ssc->clk); + err = clk_enable(chip->ssc->clk); + if (err) + return err; return 0; } @@ -776,7 +778,9 @@ static int snd_at73c213_chip_init(struct snd_at73c213 *chip) goto out; /* Enable DAC master clock. */ - clk_enable(chip->board->dac_clk); + retval = clk_enable(chip->board->dac_clk); + if (retval) + goto out; /* Initialize at73c213 on SPI bus. */ retval = snd_at73c213_write_reg(chip, DAC_RST, 0x04); @@ -889,7 +893,9 @@ static int snd_at73c213_dev_init(struct snd_card *card, chip->card = card; chip->irq = -1; - clk_enable(chip->ssc->clk); + retval = clk_enable(chip->ssc->clk); + if (retval) + return retval; retval = request_irq(irq, snd_at73c213_interrupt, 0, "at73c213", chip); if (retval) { @@ -1008,7 +1014,9 @@ static int snd_at73c213_remove(struct spi_device *spi) int retval; /* Stop playback. */ - clk_enable(chip->ssc->clk); + retval = clk_enable(chip->ssc->clk); + if (retval) + goto out; ssc_writel(chip->ssc->regs, CR, SSC_BIT(CR_TXDIS)); clk_disable(chip->ssc->clk); @@ -1088,9 +1096,16 @@ static int snd_at73c213_resume(struct device *dev) { struct snd_card *card = dev_get_drvdata(dev); struct snd_at73c213 *chip = card->private_data; + int retval; - clk_enable(chip->board->dac_clk); - clk_enable(chip->ssc->clk); + retval = clk_enable(chip->board->dac_clk); + if (retval) + return retval; + retval = clk_enable(chip->ssc->clk); + if (retval) { + clk_disable(chip->board->dac_clk); + return retval; + } ssc_writel(chip->ssc->regs, CR, SSC_BIT(CR_TXEN)); return 0; diff --git a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c index 8f6823df944fffe5af6e91b83a6fb799d0445a2f..81ace832d7e42da21935ef4e2c4c06dbcf17eeb7 100644 --- a/sound/usb/mixer_maps.c +++ b/sound/usb/mixer_maps.c @@ -542,6 +542,16 @@ static const struct usbmix_ctl_map usbmix_ctl_maps[] = { .id = USB_ID(0x25c4, 0x0003), .map = scms_usb3318_map, }, + { + /* Corsair Virtuoso SE Latest (wired mode) */ + .id = USB_ID(0x1b1c, 0x0a3f), + .map = corsair_virtuoso_map, + }, + { + /* Corsair Virtuoso SE Latest (wireless mode) */ + .id = USB_ID(0x1b1c, 0x0a40), + .map = corsair_virtuoso_map, + }, { .id = USB_ID(0x30be, 0x0101), /* Schiit Hel */ .ignore_ctl_error = 1, diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c index 86fdd669f3fd76694234ddfd2c9b36dde2acc744..99f2203bf51f140c3ddfd13ab9277a48edea1341 100644 --- a/sound/usb/mixer_quirks.c +++ b/sound/usb/mixer_quirks.c @@ -3135,9 +3135,10 @@ void snd_usb_mixer_fu_apply_quirk(struct usb_mixer_interface *mixer, if (unitid == 7 && cval->control == UAC_FU_VOLUME) snd_dragonfly_quirk_db_scale(mixer, cval, kctl); break; - /* lowest playback value is muted on C-Media devices */ - case USB_ID(0x0d8c, 0x000c): - case USB_ID(0x0d8c, 0x0014): + /* lowest playback value is muted on some devices */ + case USB_ID(0x0d8c, 0x000c): /* C-Media */ + case USB_ID(0x0d8c, 0x0014): /* C-Media */ + case USB_ID(0x19f7, 0x0003): /* RODE NT-USB */ if (strstr(kctl->id.name, "Playback")) cval->min_mute = 1; break; diff --git a/sound/x86/intel_hdmi_audio.c b/sound/x86/intel_hdmi_audio.c index 9f9fcd2749f224d8ec09e8fae677991f60cc0c9f..dbaa43ffbbd2dca3001a9aec772a6e4ade9a369e 100644 --- a/sound/x86/intel_hdmi_audio.c +++ b/sound/x86/intel_hdmi_audio.c @@ -1276,7 +1276,7 @@ static int had_pcm_mmap(struct snd_pcm_substream *substream, { vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); return remap_pfn_range(vma, vma->vm_start, - substream->dma_buffer.addr >> PAGE_SHIFT, + substream->runtime->dma_addr >> PAGE_SHIFT, vma->vm_end - vma->vm_start, vma->vm_page_prot); } diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h index b58730cc12e836ababc811f52b228d1181564074..a7b5c5efcf3b0b5fe5c1b186c4795748662b891c 100644 --- a/tools/arch/x86/include/asm/cpufeatures.h +++ b/tools/arch/x86/include/asm/cpufeatures.h @@ -417,5 +417,6 @@ #define X86_BUG_TAA X86_BUG(22) /* CPU is affected by TSX Async Abort(TAA) */ #define X86_BUG_ITLB_MULTIHIT X86_BUG(23) /* CPU may incur MCE during certain page attribute changes */ #define X86_BUG_SRBDS X86_BUG(24) /* CPU may leak RNG bits if not mitigated */ +#define X86_BUG_MMIO_STALE_DATA X86_BUG(25) /* CPU is affected by Processor MMIO Stale Data vulnerabilities */ #endif /* _ASM_X86_CPUFEATURES_H */ diff --git a/tools/arch/x86/include/asm/msr-index.h b/tools/arch/x86/include/asm/msr-index.h index c36a083c8ec0acf3d3629edbf6c095dbdc5242ec..7b9259868243a95eadd4bc6d5cfec58ebea3a900 100644 --- a/tools/arch/x86/include/asm/msr-index.h +++ b/tools/arch/x86/include/asm/msr-index.h @@ -114,6 +114,30 @@ * Not susceptible to * TSX Async Abort (TAA) vulnerabilities. */ +#define ARCH_CAP_SBDR_SSDP_NO BIT(13) /* + * Not susceptible to SBDR and SSDP + * variants of Processor MMIO stale data + * vulnerabilities. + */ +#define ARCH_CAP_FBSDP_NO BIT(14) /* + * Not susceptible to FBSDP variant of + * Processor MMIO stale data + * vulnerabilities. + */ +#define ARCH_CAP_PSDP_NO BIT(15) /* + * Not susceptible to PSDP variant of + * Processor MMIO stale data + * vulnerabilities. + */ +#define ARCH_CAP_FB_CLEAR BIT(17) /* + * VERW clears CPU fill buffer + * even on MDS_NO CPUs. + */ +#define ARCH_CAP_FB_CLEAR_CTRL BIT(18) /* + * MSR_IA32_MCU_OPT_CTRL[FB_CLEAR_DIS] + * bit available to control VERW + * behavior. + */ #define MSR_IA32_FLUSH_CMD 0x0000010b #define L1D_FLUSH BIT(0) /* @@ -131,6 +155,7 @@ /* SRBDS support */ #define MSR_IA32_MCU_OPT_CTRL 0x00000123 #define RNGDS_MITG_DIS BIT(0) +#define FB_CLEAR_DIS BIT(3) /* CPU Fill buffer clear disable */ #define MSR_IA32_SYSENTER_CS 0x00000174 #define MSR_IA32_SYSENTER_ESP 0x00000175 diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 75617c529efd681b836061f9d1b2ffb97899e797..a44cb51558254c7bc1dcd9f4c55b17f88c177e40 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -1490,8 +1490,8 @@ union bpf_attr { * Return * The return value depends on the result of the test, and can be: * - * * 0, if current task belongs to the cgroup2. - * * 1, if current task does not belong to the cgroup2. + * * 1, if current task belongs to the cgroup2. + * * 0, if current task does not belong to the cgroup2. * * A negative error code, if an error occurred. * * long bpf_skb_change_tail(struct sk_buff *skb, u32 len, u64 flags) diff --git a/tools/lib/bpf/btf_dump.c b/tools/lib/bpf/btf_dump.c index 0911aea4cdbe5c68406b5bc9c15ec58c37058c94..bd22853be4a6b6c4ad710d5f870467235ab01571 100644 --- a/tools/lib/bpf/btf_dump.c +++ b/tools/lib/bpf/btf_dump.c @@ -1416,6 +1416,11 @@ static const char *btf_dump_resolve_name(struct btf_dump *d, __u32 id, if (s->name_resolved) return *cached_name ? *cached_name : orig_name; + if (btf_is_fwd(t) || (btf_is_enum(t) && btf_vlen(t) == 0)) { + s->name_resolved = 1; + return orig_name; + } + dup_cnt = btf_dump_name_dups(d, name_map, orig_name); if (dup_cnt > 1) { const size_t max_len = 256; diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 2dfe07a872fcbd9483d51f46921e65968c31eee4..c9f5eef6d3d80502f6ca23e4c6902a448f0c8925 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -10924,6 +10924,9 @@ void bpf_object__detach_skeleton(struct bpf_object_skeleton *s) void bpf_object__destroy_skeleton(struct bpf_object_skeleton *s) { + if (!s) + return; + if (s->progs) bpf_object__detach_skeleton(s); if (s->obj) diff --git a/tools/lib/bpf/xsk.c b/tools/lib/bpf/xsk.c index 3028f932e10c072a60fd517086aa4fe1dabfe259..c4390ef98b1929e410275fbbd6d02072df57caf5 100644 --- a/tools/lib/bpf/xsk.c +++ b/tools/lib/bpf/xsk.c @@ -895,12 +895,23 @@ int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname, int xsk_umem__delete(struct xsk_umem *umem) { + struct xdp_mmap_offsets off; + int err; + if (!umem) return 0; if (umem->refcount) return -EBUSY; + err = xsk_get_mmap_offsets(umem->fd, &off); + if (!err && umem->fill_save && umem->comp_save) { + munmap(umem->fill_save->ring - off.fr.desc, + off.fr.desc + umem->config.fill_size * sizeof(__u64)); + munmap(umem->comp_save->ring - off.cr.desc, + off.cr.desc + umem->config.comp_size * sizeof(__u64)); + } + close(umem->fd); free(umem); diff --git a/tools/perf/util/data.c b/tools/perf/util/data.c index bcb494dc816a0b47aa1884ba82d7a85d2e176b19..48754083791d8d35594a942809b189a727997689 100644 --- a/tools/perf/util/data.c +++ b/tools/perf/util/data.c @@ -44,10 +44,6 @@ int perf_data__create_dir(struct perf_data *data, int nr) if (!files) return -ENOMEM; - data->dir.version = PERF_DIR_VERSION; - data->dir.files = files; - data->dir.nr = nr; - for (i = 0; i < nr; i++) { struct perf_data_file *file = &files[i]; @@ -62,6 +58,9 @@ int perf_data__create_dir(struct perf_data *data, int nr) file->fd = ret; } + data->dir.version = PERF_DIR_VERSION; + data->dir.files = files; + data->dir.nr = nr; return 0; out_err: diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 4d569ad7db02dd9a3b823503f2e8434b226eb911..3609da7cce0abb781889f39887dbf4b2a73915e0 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -231,7 +231,7 @@ void symbols__fixup_end(struct rb_root_cached *symbols) prev = curr; curr = rb_entry(nd, struct symbol, rb_node); - if (prev->end == prev->start && prev->end != curr->start) + if (prev->end == prev->start || prev->end != curr->start) arch__symbols__fixup_end(prev, curr); } diff --git a/tools/testing/selftests/bpf/progs/test_sock_fields.c b/tools/testing/selftests/bpf/progs/test_sock_fields.c index 81b57b9aaaeae2e68d81db22410594dc02fc6060..7967348b11af69efbf6de34515903983b79d0a1e 100644 --- a/tools/testing/selftests/bpf/progs/test_sock_fields.c +++ b/tools/testing/selftests/bpf/progs/test_sock_fields.c @@ -113,7 +113,7 @@ static void tpcpy(struct bpf_tcp_sock *dst, #define RET_LOG() ({ \ linum = __LINE__; \ - bpf_map_update_elem(&linum_map, &linum_idx, &linum, BPF_NOEXIST); \ + bpf_map_update_elem(&linum_map, &linum_idx, &linum, BPF_ANY); \ return CG_OK; \ }) diff --git a/tools/testing/selftests/bpf/progs/test_sockmap_kern.h b/tools/testing/selftests/bpf/progs/test_sockmap_kern.h index 1858435de7aaf91a3f2ef12e1c8a74e9ee934c4e..5cb90ca29218643a08fe5edd832f0675dfaee38e 100644 --- a/tools/testing/selftests/bpf/progs/test_sockmap_kern.h +++ b/tools/testing/selftests/bpf/progs/test_sockmap_kern.h @@ -235,7 +235,7 @@ SEC("sk_msg1") int bpf_prog4(struct sk_msg_md *msg) { int *bytes, zero = 0, one = 1, two = 2, three = 3, four = 4, five = 5; - int *start, *end, *start_push, *end_push, *start_pop, *pop; + int *start, *end, *start_push, *end_push, *start_pop, *pop, err = 0; bytes = bpf_map_lookup_elem(&sock_apply_bytes, &zero); if (bytes) @@ -249,8 +249,11 @@ int bpf_prog4(struct sk_msg_md *msg) bpf_msg_pull_data(msg, *start, *end, 0); start_push = bpf_map_lookup_elem(&sock_bytes, &two); end_push = bpf_map_lookup_elem(&sock_bytes, &three); - if (start_push && end_push) - bpf_msg_push_data(msg, *start_push, *end_push, 0); + if (start_push && end_push) { + err = bpf_msg_push_data(msg, *start_push, *end_push, 0); + if (err) + return SK_DROP; + } start_pop = bpf_map_lookup_elem(&sock_bytes, &four); pop = bpf_map_lookup_elem(&sock_bytes, &five); if (start_pop && pop) @@ -263,6 +266,7 @@ int bpf_prog6(struct sk_msg_md *msg) { int zero = 0, one = 1, two = 2, three = 3, four = 4, five = 5, key = 0; int *bytes, *start, *end, *start_push, *end_push, *start_pop, *pop, *f; + int err = 0; __u64 flags = 0; bytes = bpf_map_lookup_elem(&sock_apply_bytes, &zero); @@ -279,8 +283,11 @@ int bpf_prog6(struct sk_msg_md *msg) start_push = bpf_map_lookup_elem(&sock_bytes, &two); end_push = bpf_map_lookup_elem(&sock_bytes, &three); - if (start_push && end_push) - bpf_msg_push_data(msg, *start_push, *end_push, 0); + if (start_push && end_push) { + err = bpf_msg_push_data(msg, *start_push, *end_push, 0); + if (err) + return SK_DROP; + } start_pop = bpf_map_lookup_elem(&sock_bytes, &four); pop = bpf_map_lookup_elem(&sock_bytes, &five); @@ -338,7 +345,7 @@ SEC("sk_msg5") int bpf_prog10(struct sk_msg_md *msg) { int *bytes, *start, *end, *start_push, *end_push, *start_pop, *pop; - int zero = 0, one = 1, two = 2, three = 3, four = 4, five = 5; + int zero = 0, one = 1, two = 2, three = 3, four = 4, five = 5, err = 0; bytes = bpf_map_lookup_elem(&sock_apply_bytes, &zero); if (bytes) @@ -352,8 +359,11 @@ int bpf_prog10(struct sk_msg_md *msg) bpf_msg_pull_data(msg, *start, *end, 0); start_push = bpf_map_lookup_elem(&sock_bytes, &two); end_push = bpf_map_lookup_elem(&sock_bytes, &three); - if (start_push && end_push) - bpf_msg_push_data(msg, *start_push, *end_push, 0); + if (start_push && end_push) { + err = bpf_msg_push_data(msg, *start_push, *end_push, 0); + if (err) + return SK_PASS; + } start_pop = bpf_map_lookup_elem(&sock_bytes, &four); pop = bpf_map_lookup_elem(&sock_bytes, &five); if (start_pop && pop) diff --git a/tools/testing/selftests/bpf/test_lirc_mode2.sh b/tools/testing/selftests/bpf/test_lirc_mode2.sh index ec4e15948e40631cdb063a5d700310796e3386de..5252b91f48a18c7d632f47cf7140a2282bbf38bb 100755 --- a/tools/testing/selftests/bpf/test_lirc_mode2.sh +++ b/tools/testing/selftests/bpf/test_lirc_mode2.sh @@ -3,6 +3,7 @@ # Kselftest framework requirement - SKIP code is 4. ksft_skip=4 +ret=$ksft_skip msg="skip all tests:" if [ $UID != 0 ]; then @@ -25,7 +26,7 @@ do fi done -if [ -n $LIRCDEV ]; +if [ -n "$LIRCDEV" ]; then TYPE=lirc_mode2 ./test_lirc_mode2_user $LIRCDEV $INPUTDEV @@ -36,3 +37,5 @@ then echo -e ${GREEN}"PASS: $TYPE"${NC} fi fi + +exit $ret diff --git a/tools/testing/selftests/bpf/test_lwt_ip_encap.sh b/tools/testing/selftests/bpf/test_lwt_ip_encap.sh index b497bb85b667f78f0bfa393e5ffaca0766b345f0..6c69c42b1d607075646d1db87804dafd65055a04 100755 --- a/tools/testing/selftests/bpf/test_lwt_ip_encap.sh +++ b/tools/testing/selftests/bpf/test_lwt_ip_encap.sh @@ -120,6 +120,14 @@ setup() ip netns exec ${NS2} sysctl -wq net.ipv4.conf.default.rp_filter=0 ip netns exec ${NS3} sysctl -wq net.ipv4.conf.default.rp_filter=0 + # disable IPv6 DAD because it sometimes takes too long and fails tests + ip netns exec ${NS1} sysctl -wq net.ipv6.conf.all.accept_dad=0 + ip netns exec ${NS2} sysctl -wq net.ipv6.conf.all.accept_dad=0 + ip netns exec ${NS3} sysctl -wq net.ipv6.conf.all.accept_dad=0 + ip netns exec ${NS1} sysctl -wq net.ipv6.conf.default.accept_dad=0 + ip netns exec ${NS2} sysctl -wq net.ipv6.conf.default.accept_dad=0 + ip netns exec ${NS3} sysctl -wq net.ipv6.conf.default.accept_dad=0 + ip link add veth1 type veth peer name veth2 ip link add veth3 type veth peer name veth4 ip link add veth5 type veth peer name veth6 @@ -289,7 +297,7 @@ test_ping() ip netns exec ${NS1} ping -c 1 -W 1 -I veth1 ${IPv4_DST} 2>&1 > /dev/null RET=$? elif [ "${PROTO}" == "IPv6" ] ; then - ip netns exec ${NS1} ping6 -c 1 -W 6 -I veth1 ${IPv6_DST} 2>&1 > /dev/null + ip netns exec ${NS1} ping6 -c 1 -W 1 -I veth1 ${IPv6_DST} 2>&1 > /dev/null RET=$? else echo " test_ping: unknown PROTO: ${PROTO}" diff --git a/tools/testing/selftests/cgroup/cgroup_util.c b/tools/testing/selftests/cgroup/cgroup_util.c index 05853b0b883181fa4dc784166f2828fb17cbbedf..391ab5675290aa8c651fe7109aaadd2829d6227d 100644 --- a/tools/testing/selftests/cgroup/cgroup_util.c +++ b/tools/testing/selftests/cgroup/cgroup_util.c @@ -17,6 +17,7 @@ #include "cgroup_util.h" #include "../clone3/clone3_selftests.h" +/* Returns read len on success, or -errno on failure. */ static ssize_t read_text(const char *path, char *buf, size_t max_len) { ssize_t len; @@ -24,35 +25,29 @@ static ssize_t read_text(const char *path, char *buf, size_t max_len) fd = open(path, O_RDONLY); if (fd < 0) - return fd; + return -errno; len = read(fd, buf, max_len - 1); - if (len < 0) - goto out; - buf[len] = 0; -out: + if (len >= 0) + buf[len] = 0; + close(fd); - return len; + return len < 0 ? -errno : len; } +/* Returns written len on success, or -errno on failure. */ static ssize_t write_text(const char *path, char *buf, ssize_t len) { int fd; fd = open(path, O_WRONLY | O_APPEND); if (fd < 0) - return fd; + return -errno; len = write(fd, buf, len); - if (len < 0) { - close(fd); - return len; - } - close(fd); - - return len; + return len < 0 ? -errno : len; } char *cg_name(const char *root, const char *name) @@ -85,16 +80,16 @@ char *cg_control(const char *cgroup, const char *control) return ret; } +/* Returns 0 on success, or -errno on failure. */ int cg_read(const char *cgroup, const char *control, char *buf, size_t len) { char path[PATH_MAX]; + ssize_t ret; snprintf(path, sizeof(path), "%s/%s", cgroup, control); - if (read_text(path, buf, len) >= 0) - return 0; - - return -1; + ret = read_text(path, buf, len); + return ret >= 0 ? 0 : ret; } int cg_read_strcmp(const char *cgroup, const char *control, @@ -175,17 +170,15 @@ long cg_read_lc(const char *cgroup, const char *control) return cnt; } +/* Returns 0 on success, or -errno on failure. */ int cg_write(const char *cgroup, const char *control, char *buf) { char path[PATH_MAX]; - ssize_t len = strlen(buf); + ssize_t len = strlen(buf), ret; snprintf(path, sizeof(path), "%s/%s", cgroup, control); - - if (write_text(path, buf, len) == len) - return 0; - - return -1; + ret = write_text(path, buf, len); + return ret == len ? 0 : ret; } int cg_find_unified_root(char *root, size_t len) @@ -539,7 +532,8 @@ ssize_t proc_read_text(int pid, bool thread, const char *item, char *buf, size_t else snprintf(path, sizeof(path), "/proc/%d/%s", pid, item); - return read_text(path, buf, size); + size = read_text(path, buf, size); + return size < 0 ? -1 : size; } int proc_read_strstr(int pid, bool thread, const char *item, const char *needle) diff --git a/tools/testing/selftests/cgroup/test_memcontrol.c b/tools/testing/selftests/cgroup/test_memcontrol.c index c19a97dd02d4968fd25d1ede46d507fd23a19108..94e16e383bcf879ea73bb257cbfa67466a8b5e52 100644 --- a/tools/testing/selftests/cgroup/test_memcontrol.c +++ b/tools/testing/selftests/cgroup/test_memcontrol.c @@ -210,13 +210,17 @@ static int alloc_pagecache_50M_noexit(const char *cgroup, void *arg) static int alloc_anon_noexit(const char *cgroup, void *arg) { int ppid = getppid(); + size_t size = (unsigned long)arg; + char *buf, *ptr; - if (alloc_anon(cgroup, arg)) - return -1; + buf = malloc(size); + for (ptr = buf; ptr < buf + size; ptr += PAGE_SIZE) + *ptr = 0; while (getppid() == ppid) sleep(1); + free(buf); return 0; } @@ -679,6 +683,111 @@ static int test_memcg_max(const char *root) return ret; } +/* + * This test checks that memory.reclaim reclaims the given + * amount of memory (from both anon and file, if possible). + */ +static int test_memcg_reclaim(const char *root) +{ + int ret = KSFT_FAIL, fd, retries; + char *memcg; + long current, expected_usage, to_reclaim; + char buf[64]; + + memcg = cg_name(root, "memcg_test"); + if (!memcg) + goto cleanup; + + if (cg_create(memcg)) + goto cleanup; + + current = cg_read_long(memcg, "memory.current"); + if (current != 0) + goto cleanup; + + fd = get_temp_fd(); + if (fd < 0) + goto cleanup; + + cg_run_nowait(memcg, alloc_pagecache_50M_noexit, (void *)(long)fd); + + /* + * If swap is enabled, try to reclaim from both anon and file, else try + * to reclaim from file only. + */ + if (is_swap_enabled()) { + cg_run_nowait(memcg, alloc_anon_noexit, (void *) MB(50)); + expected_usage = MB(100); + } else + expected_usage = MB(50); + + /* + * Wait until current usage reaches the expected usage (or we run out of + * retries). + */ + retries = 5; + while (!values_close(cg_read_long(memcg, "memory.current"), + expected_usage, 10)) { + if (retries--) { + sleep(1); + continue; + } else { + fprintf(stderr, + "failed to allocate %ld for memcg reclaim test\n", + expected_usage); + goto cleanup; + } + } + + /* + * Reclaim until current reaches 30M, this makes sure we hit both anon + * and file if swap is enabled. + */ + retries = 5; + while (true) { + int err; + + current = cg_read_long(memcg, "memory.current"); + to_reclaim = current - MB(30); + + /* + * We only keep looping if we get EAGAIN, which means we could + * not reclaim the full amount. + */ + if (to_reclaim <= 0) + goto cleanup; + + + snprintf(buf, sizeof(buf), "%ld", to_reclaim); + err = cg_write(memcg, "memory.reclaim", buf); + if (!err) { + /* + * If writing succeeds, then the written amount should have been + * fully reclaimed (and maybe more). + */ + current = cg_read_long(memcg, "memory.current"); + if (!values_close(current, MB(30), 3) && current > MB(30)) + goto cleanup; + break; + } + + /* The kernel could not reclaim the full amount, try again. */ + if (err == -EAGAIN && retries--) + continue; + + /* We got an unexpected error or ran out of retries. */ + goto cleanup; + } + + ret = KSFT_PASS; +cleanup: + cg_destroy(memcg); + free(memcg); + close(fd); + + return ret; +} + static int alloc_anon_50M_check_swap(const char *cgroup, void *arg) { long mem_max = (long)arg; @@ -1181,6 +1290,7 @@ struct memcg_test { T(test_memcg_low), T(test_memcg_high), T(test_memcg_max), + T(test_memcg_reclaim), T(test_memcg_oom_events), T(test_memcg_swap_max), T(test_memcg_sock), diff --git a/tools/testing/selftests/drivers/net/mlxsw/tc_police_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/tc_police_scale.sh index 3e3e06ea5703cd93e73f619f4c39ffe16115eb80..86e787895f78b19300677529884f01e6eeb65cab 100644 --- a/tools/testing/selftests/drivers/net/mlxsw/tc_police_scale.sh +++ b/tools/testing/selftests/drivers/net/mlxsw/tc_police_scale.sh @@ -60,7 +60,8 @@ __tc_police_test() tc_police_rules_create $count $should_fail - offload_count=$(tc filter show dev $swp1 ingress | grep in_hw | wc -l) + offload_count=$(tc -j filter show dev $swp1 ingress | + jq "[.[] | select(.options.in_hw == true)] | length") ((offload_count == count)) check_err_fail $should_fail $? "tc police offload count" } diff --git a/tools/testing/selftests/memfd/memfd_test.c b/tools/testing/selftests/memfd/memfd_test.c index 334a7eea200428b07d8964555783f9959d028aa0..fba322d1c67a1714d74e84d545087cc0aec955c6 100644 --- a/tools/testing/selftests/memfd/memfd_test.c +++ b/tools/testing/selftests/memfd/memfd_test.c @@ -455,6 +455,7 @@ static void mfd_fail_write(int fd) printf("mmap()+mprotect() didn't fail as expected\n"); abort(); } + munmap(p, mfd_def_size); } /* verify PUNCH_HOLE fails */ diff --git a/tools/testing/selftests/net/pmtu.sh b/tools/testing/selftests/net/pmtu.sh index 3367fb5f2feff5bd161d3512362705e20d6c0395..3253fdc780d62909b659229b37b237a2893fa4d6 100755 --- a/tools/testing/selftests/net/pmtu.sh +++ b/tools/testing/selftests/net/pmtu.sh @@ -799,7 +799,6 @@ setup_ovs_bridge() { setup() { [ "$(id -u)" -ne 0 ] && echo " need to run as root" && return $ksft_skip - cleanup for arg do eval setup_${arg} || { echo " ${arg} not supported"; return 1; } done @@ -810,7 +809,7 @@ trace() { for arg do [ "${ns_cmd}" = "" ] && ns_cmd="${arg}" && continue - ${ns_cmd} tcpdump -s 0 -i "${arg}" -w "${name}_${arg}.pcap" 2> /dev/null & + ${ns_cmd} tcpdump --immediate-mode -s 0 -i "${arg}" -w "${name}_${arg}.pcap" 2> /dev/null & tcpdump_pids="${tcpdump_pids} $!" ns_cmd= done @@ -1636,6 +1635,10 @@ run_test() { unset IFS + # Since cleanup() relies on variables modified by this subshell, it + # has to run in this context. + trap cleanup EXIT + if [ "$VERBOSE" = "1" ]; then printf "\n##########################################################################\n\n" fi diff --git a/tools/testing/selftests/net/test_vxlan_under_vrf.sh b/tools/testing/selftests/net/test_vxlan_under_vrf.sh index 09f9ed92cbe4c8b6e1837698aa273b2f7848145b..a44b9aca7427234346b58a2477324e3bcddfb684 100755 --- a/tools/testing/selftests/net/test_vxlan_under_vrf.sh +++ b/tools/testing/selftests/net/test_vxlan_under_vrf.sh @@ -118,11 +118,11 @@ echo "[ OK ]" # Move the underlay to a non-default VRF ip -netns hv-1 link set veth0 vrf vrf-underlay -ip -netns hv-1 link set veth0 down -ip -netns hv-1 link set veth0 up +ip -netns hv-1 link set vxlan0 down +ip -netns hv-1 link set vxlan0 up ip -netns hv-2 link set veth0 vrf vrf-underlay -ip -netns hv-2 link set veth0 down -ip -netns hv-2 link set veth0 up +ip -netns hv-2 link set vxlan0 down +ip -netns hv-2 link set vxlan0 up echo -n "Check VM connectivity through VXLAN (underlay in a VRF) " ip netns exec vm-1 ping -c 1 -W 1 10.0.0.2 &> /dev/null || (echo "[FAIL]"; false) diff --git a/tools/testing/selftests/net/tls.c b/tools/testing/selftests/net/tls.c index b599f1fa99b55aba5d359163f35a9fc32713e5ee..3d39a735443121ced08ae5fe327326556909831e 100644 --- a/tools/testing/selftests/net/tls.c +++ b/tools/testing/selftests/net/tls.c @@ -25,26 +25,80 @@ #define TLS_PAYLOAD_MAX_LEN 16384 #define SOL_TLS 282 -FIXTURE(tls_basic) -{ - int fd, cfd; - bool notls; +struct tls_crypto_info_keys { + union { + struct tls12_crypto_info_aes_gcm_128 aes128; + struct tls12_crypto_info_sm4_gcm sm4gcm; + struct tls12_crypto_info_sm4_ccm sm4ccm; + struct tls12_crypto_info_aes_ccm_128 aesccm128; + struct tls12_crypto_info_aes_gcm_256 aesgcm256; + }; + size_t len; }; -FIXTURE_SETUP(tls_basic) +static void tls_crypto_info_init(uint16_t tls_version, uint16_t cipher_type, + struct tls_crypto_info_keys *tls12) +{ + memset(tls12, 0, sizeof(*tls12)); + + switch (cipher_type) { + case TLS_CIPHER_AES_GCM_128: + tls12->len = sizeof(struct tls12_crypto_info_aes_gcm_128); + tls12->aes128.info.version = tls_version; + tls12->aes128.info.cipher_type = cipher_type; + break; + case TLS_CIPHER_SM4_GCM: + tls12->len = sizeof(struct tls12_crypto_info_sm4_gcm); + tls12->sm4gcm.info.version = tls_version; + tls12->sm4gcm.info.cipher_type = cipher_type; + break; + case TLS_CIPHER_SM4_CCM: + tls12->len = sizeof(struct tls12_crypto_info_sm4_ccm); + tls12->sm4ccm.info.version = tls_version; + tls12->sm4ccm.info.cipher_type = cipher_type; + break; + case TLS_CIPHER_AES_CCM_128: + tls12->len = sizeof(struct tls12_crypto_info_aes_ccm_128); + tls12->aesccm128.info.version = tls_version; + tls12->aesccm128.info.cipher_type = cipher_type; + break; + case TLS_CIPHER_AES_GCM_256: + tls12->len = sizeof(struct tls12_crypto_info_aes_gcm_256); + tls12->aesgcm256.info.version = tls_version; + tls12->aesgcm256.info.cipher_type = cipher_type; + break; + default: + break; + } +} + +static void memrnd(void *s, size_t n) +{ + int *dword = s; + char *byte; + + for (; n >= 4; n -= 4) + *dword++ = rand(); + byte = (void *)dword; + while (n--) + *byte++ = rand(); +} + +static void ulp_sock_pair(struct __test_metadata *_metadata, + int *fd, int *cfd, bool *notls) { struct sockaddr_in addr; socklen_t len; int sfd, ret; - self->notls = false; + *notls = false; len = sizeof(addr); addr.sin_family = AF_INET; addr.sin_addr.s_addr = htonl(INADDR_ANY); addr.sin_port = 0; - self->fd = socket(AF_INET, SOCK_STREAM, 0); + *fd = socket(AF_INET, SOCK_STREAM, 0); sfd = socket(AF_INET, SOCK_STREAM, 0); ret = bind(sfd, &addr, sizeof(addr)); @@ -55,26 +109,96 @@ FIXTURE_SETUP(tls_basic) ret = getsockname(sfd, &addr, &len); ASSERT_EQ(ret, 0); - ret = connect(self->fd, &addr, sizeof(addr)); + ret = connect(*fd, &addr, sizeof(addr)); ASSERT_EQ(ret, 0); - self->cfd = accept(sfd, &addr, &len); - ASSERT_GE(self->cfd, 0); + *cfd = accept(sfd, &addr, &len); + ASSERT_GE(*cfd, 0); close(sfd); - ret = setsockopt(self->fd, IPPROTO_TCP, TCP_ULP, "tls", sizeof("tls")); + ret = setsockopt(*fd, IPPROTO_TCP, TCP_ULP, "tls", sizeof("tls")); if (ret != 0) { ASSERT_EQ(errno, ENOENT); - self->notls = true; + *notls = true; printf("Failure setting TCP_ULP, testing without tls\n"); return; } - ret = setsockopt(self->cfd, IPPROTO_TCP, TCP_ULP, "tls", sizeof("tls")); + ret = setsockopt(*cfd, IPPROTO_TCP, TCP_ULP, "tls", sizeof("tls")); ASSERT_EQ(ret, 0); } +/* Produce a basic cmsg */ +static int tls_send_cmsg(int fd, unsigned char record_type, + void *data, size_t len, int flags) +{ + char cbuf[CMSG_SPACE(sizeof(char))]; + int cmsg_len = sizeof(char); + struct cmsghdr *cmsg; + struct msghdr msg; + struct iovec vec; + + vec.iov_base = data; + vec.iov_len = len; + memset(&msg, 0, sizeof(struct msghdr)); + msg.msg_iov = &vec; + msg.msg_iovlen = 1; + msg.msg_control = cbuf; + msg.msg_controllen = sizeof(cbuf); + cmsg = CMSG_FIRSTHDR(&msg); + cmsg->cmsg_level = SOL_TLS; + /* test sending non-record types. */ + cmsg->cmsg_type = TLS_SET_RECORD_TYPE; + cmsg->cmsg_len = CMSG_LEN(cmsg_len); + *CMSG_DATA(cmsg) = record_type; + msg.msg_controllen = cmsg->cmsg_len; + + return sendmsg(fd, &msg, flags); +} + +static int tls_recv_cmsg(struct __test_metadata *_metadata, + int fd, unsigned char record_type, + void *data, size_t len, int flags) +{ + char cbuf[CMSG_SPACE(sizeof(char))]; + struct cmsghdr *cmsg; + unsigned char ctype; + struct msghdr msg; + struct iovec vec; + int n; + + vec.iov_base = data; + vec.iov_len = len; + memset(&msg, 0, sizeof(struct msghdr)); + msg.msg_iov = &vec; + msg.msg_iovlen = 1; + msg.msg_control = cbuf; + msg.msg_controllen = sizeof(cbuf); + + n = recvmsg(fd, &msg, flags); + + cmsg = CMSG_FIRSTHDR(&msg); + EXPECT_NE(cmsg, NULL); + EXPECT_EQ(cmsg->cmsg_level, SOL_TLS); + EXPECT_EQ(cmsg->cmsg_type, TLS_GET_RECORD_TYPE); + ctype = *((unsigned char *)CMSG_DATA(cmsg)); + EXPECT_EQ(ctype, record_type); + + return n; +} + +FIXTURE(tls_basic) +{ + int fd, cfd; + bool notls; +}; + +FIXTURE_SETUP(tls_basic) +{ + ulp_sock_pair(_metadata, &self->fd, &self->cfd, &self->notls); +} + FIXTURE_TEARDOWN(tls_basic) { close(self->fd); @@ -103,77 +227,76 @@ FIXTURE(tls) FIXTURE_VARIANT(tls) { - unsigned int tls_version; + uint16_t tls_version; + uint16_t cipher_type; }; -FIXTURE_VARIANT_ADD(tls, 12) +FIXTURE_VARIANT_ADD(tls, 12_aes_gcm) { .tls_version = TLS_1_2_VERSION, + .cipher_type = TLS_CIPHER_AES_GCM_128, }; -FIXTURE_VARIANT_ADD(tls, 13) +FIXTURE_VARIANT_ADD(tls, 13_aes_gcm) { .tls_version = TLS_1_3_VERSION, + .cipher_type = TLS_CIPHER_AES_GCM_128, }; -FIXTURE_SETUP(tls) +FIXTURE_VARIANT_ADD(tls, 13_sm4_gcm) { - struct tls12_crypto_info_aes_gcm_128 tls12; - struct sockaddr_in addr; - socklen_t len; - int sfd, ret; - - self->notls = false; - len = sizeof(addr); - - memset(&tls12, 0, sizeof(tls12)); - tls12.info.version = variant->tls_version; - tls12.info.cipher_type = TLS_CIPHER_AES_GCM_128; + .tls_version = TLS_1_3_VERSION, + .cipher_type = TLS_CIPHER_SM4_GCM, +}; - addr.sin_family = AF_INET; - addr.sin_addr.s_addr = htonl(INADDR_ANY); - addr.sin_port = 0; +FIXTURE_VARIANT_ADD(tls, 13_sm4_ccm) +{ + .tls_version = TLS_1_3_VERSION, + .cipher_type = TLS_CIPHER_SM4_CCM, +}; - self->fd = socket(AF_INET, SOCK_STREAM, 0); - sfd = socket(AF_INET, SOCK_STREAM, 0); +FIXTURE_VARIANT_ADD(tls, 12_aes_ccm) +{ + .tls_version = TLS_1_2_VERSION, + .cipher_type = TLS_CIPHER_AES_CCM_128, +}; - ret = bind(sfd, &addr, sizeof(addr)); - ASSERT_EQ(ret, 0); - ret = listen(sfd, 10); - ASSERT_EQ(ret, 0); +FIXTURE_VARIANT_ADD(tls, 13_aes_ccm) +{ + .tls_version = TLS_1_3_VERSION, + .cipher_type = TLS_CIPHER_AES_CCM_128, +}; - ret = getsockname(sfd, &addr, &len); - ASSERT_EQ(ret, 0); +FIXTURE_VARIANT_ADD(tls, 12_aes_gcm_256) +{ + .tls_version = TLS_1_2_VERSION, + .cipher_type = TLS_CIPHER_AES_GCM_256, +}; - ret = connect(self->fd, &addr, sizeof(addr)); - ASSERT_EQ(ret, 0); +FIXTURE_VARIANT_ADD(tls, 13_aes_gcm_256) +{ + .tls_version = TLS_1_3_VERSION, + .cipher_type = TLS_CIPHER_AES_GCM_256, +}; - ret = setsockopt(self->fd, IPPROTO_TCP, TCP_ULP, "tls", sizeof("tls")); - if (ret != 0) { - self->notls = true; - printf("Failure setting TCP_ULP, testing without tls\n"); - } +FIXTURE_SETUP(tls) +{ + struct tls_crypto_info_keys tls12; + int ret; - if (!self->notls) { - ret = setsockopt(self->fd, SOL_TLS, TLS_TX, &tls12, - sizeof(tls12)); - ASSERT_EQ(ret, 0); - } + tls_crypto_info_init(variant->tls_version, variant->cipher_type, + &tls12); - self->cfd = accept(sfd, &addr, &len); - ASSERT_GE(self->cfd, 0); + ulp_sock_pair(_metadata, &self->fd, &self->cfd, &self->notls); - if (!self->notls) { - ret = setsockopt(self->cfd, IPPROTO_TCP, TCP_ULP, "tls", - sizeof("tls")); - ASSERT_EQ(ret, 0); + if (self->notls) + return; - ret = setsockopt(self->cfd, SOL_TLS, TLS_RX, &tls12, - sizeof(tls12)); - ASSERT_EQ(ret, 0); - } + ret = setsockopt(self->fd, SOL_TLS, TLS_TX, &tls12, tls12.len); + ASSERT_EQ(ret, 0); - close(sfd); + ret = setsockopt(self->cfd, SOL_TLS, TLS_RX, &tls12, tls12.len); + ASSERT_EQ(ret, 0); } FIXTURE_TEARDOWN(tls) @@ -277,6 +400,8 @@ TEST_F(tls, recv_max) char recv_mem[TLS_PAYLOAD_MAX_LEN]; char buf[TLS_PAYLOAD_MAX_LEN]; + memrnd(buf, sizeof(buf)); + EXPECT_GE(send(self->fd, buf, send_len, 0), 0); EXPECT_NE(recv(self->cfd, recv_mem, send_len, 0), -1); EXPECT_EQ(memcmp(buf, recv_mem, send_len), 0); @@ -387,8 +512,9 @@ TEST_F(tls, sendmsg_large) EXPECT_EQ(sendmsg(self->cfd, &msg, 0), send_len); } - while (recvs++ < sends) + while (recvs++ < sends) { EXPECT_NE(recv(self->fd, mem, send_len, 0), -1); + } free(mem); } @@ -531,6 +657,101 @@ TEST_F(tls, splice_to_pipe) EXPECT_EQ(memcmp(mem_send, mem_recv, send_len), 0); } +TEST_F(tls, splice_cmsg_to_pipe) +{ + char *test_str = "test_read"; + char record_type = 100; + int send_len = 10; + char buf[10]; + int p[2]; + + if (self->notls) + SKIP(return, "no TLS support"); + + ASSERT_GE(pipe(p), 0); + EXPECT_EQ(tls_send_cmsg(self->fd, 100, test_str, send_len, 0), 10); + EXPECT_EQ(splice(self->cfd, NULL, p[1], NULL, send_len, 0), -1); + EXPECT_EQ(errno, EINVAL); + EXPECT_EQ(recv(self->cfd, buf, send_len, 0), -1); + EXPECT_EQ(errno, EIO); + EXPECT_EQ(tls_recv_cmsg(_metadata, self->cfd, record_type, + buf, sizeof(buf), MSG_WAITALL), + send_len); + EXPECT_EQ(memcmp(test_str, buf, send_len), 0); +} + +TEST_F(tls, splice_dec_cmsg_to_pipe) +{ + char *test_str = "test_read"; + char record_type = 100; + int send_len = 10; + char buf[10]; + int p[2]; + + if (self->notls) + SKIP(return, "no TLS support"); + + ASSERT_GE(pipe(p), 0); + EXPECT_EQ(tls_send_cmsg(self->fd, 100, test_str, send_len, 0), 10); + EXPECT_EQ(recv(self->cfd, buf, send_len, 0), -1); + EXPECT_EQ(errno, EIO); + EXPECT_EQ(splice(self->cfd, NULL, p[1], NULL, send_len, 0), -1); + EXPECT_EQ(errno, EINVAL); + EXPECT_EQ(tls_recv_cmsg(_metadata, self->cfd, record_type, + buf, sizeof(buf), MSG_WAITALL), + send_len); + EXPECT_EQ(memcmp(test_str, buf, send_len), 0); +} + +TEST_F(tls, recv_and_splice) +{ + int send_len = TLS_PAYLOAD_MAX_LEN; + char mem_send[TLS_PAYLOAD_MAX_LEN]; + char mem_recv[TLS_PAYLOAD_MAX_LEN]; + int half = send_len / 2; + int p[2]; + + ASSERT_GE(pipe(p), 0); + EXPECT_EQ(send(self->fd, mem_send, send_len, 0), send_len); + /* Recv hald of the record, splice the other half */ + EXPECT_EQ(recv(self->cfd, mem_recv, half, MSG_WAITALL), half); + EXPECT_EQ(splice(self->cfd, NULL, p[1], NULL, half, SPLICE_F_NONBLOCK), + half); + EXPECT_EQ(read(p[0], &mem_recv[half], half), half); + EXPECT_EQ(memcmp(mem_send, mem_recv, send_len), 0); +} + +TEST_F(tls, peek_and_splice) +{ + int send_len = TLS_PAYLOAD_MAX_LEN; + char mem_send[TLS_PAYLOAD_MAX_LEN]; + char mem_recv[TLS_PAYLOAD_MAX_LEN]; + int chunk = TLS_PAYLOAD_MAX_LEN / 4; + int n, i, p[2]; + + memrnd(mem_send, sizeof(mem_send)); + + ASSERT_GE(pipe(p), 0); + for (i = 0; i < 4; i++) + EXPECT_EQ(send(self->fd, &mem_send[chunk * i], chunk, 0), + chunk); + + EXPECT_EQ(recv(self->cfd, mem_recv, chunk * 5 / 2, + MSG_WAITALL | MSG_PEEK), + chunk * 5 / 2); + EXPECT_EQ(memcmp(mem_send, mem_recv, chunk * 5 / 2), 0); + + n = 0; + while (n < send_len) { + i = splice(self->cfd, NULL, p[1], NULL, send_len - n, 0); + EXPECT_GT(i, 0); + n += i; + } + EXPECT_EQ(n, send_len); + EXPECT_EQ(read(p[0], mem_recv, send_len), send_len); + EXPECT_EQ(memcmp(mem_send, mem_recv, send_len), 0); +} + TEST_F(tls, recvmsg_single) { char const *test_str = "test_recvmsg_single"; @@ -557,6 +778,8 @@ TEST_F(tls, recvmsg_single_max) struct iovec vec; struct msghdr hdr; + memrnd(send_mem, sizeof(send_mem)); + EXPECT_EQ(send(self->fd, send_mem, send_len, 0), send_len); vec.iov_base = (char *)recv_mem; vec.iov_len = TLS_PAYLOAD_MAX_LEN; @@ -579,6 +802,8 @@ TEST_F(tls, recvmsg_multiple) struct msghdr hdr; int i; + memrnd(buf, sizeof(buf)); + EXPECT_EQ(send(self->fd, buf, send_len, 0), send_len); for (i = 0; i < msg_iovlen; i++) { iov_base[i] = (char *)malloc(iov_len); @@ -603,6 +828,8 @@ TEST_F(tls, single_send_multiple_recv) char send_mem[TLS_PAYLOAD_MAX_LEN * 2]; char recv_mem[TLS_PAYLOAD_MAX_LEN * 2]; + memrnd(send_mem, sizeof(send_mem)); + EXPECT_GE(send(self->fd, send_mem, total_len, 0), 0); memset(recv_mem, 0, total_len); @@ -803,18 +1030,17 @@ TEST_F(tls, bidir) int ret; if (!self->notls) { - struct tls12_crypto_info_aes_gcm_128 tls12; + struct tls_crypto_info_keys tls12; - memset(&tls12, 0, sizeof(tls12)); - tls12.info.version = variant->tls_version; - tls12.info.cipher_type = TLS_CIPHER_AES_GCM_128; + tls_crypto_info_init(variant->tls_version, variant->cipher_type, + &tls12); ret = setsockopt(self->fd, SOL_TLS, TLS_RX, &tls12, - sizeof(tls12)); + tls12.len); ASSERT_EQ(ret, 0); ret = setsockopt(self->cfd, SOL_TLS, TLS_TX, &tls12, - sizeof(tls12)); + tls12.len); ASSERT_EQ(ret, 0); } @@ -1109,60 +1335,30 @@ TEST_F(tls, mutliproc_sendpage_writers) TEST_F(tls, control_msg) { - if (self->notls) - return; - - char cbuf[CMSG_SPACE(sizeof(char))]; - char const *test_str = "test_read"; - int cmsg_len = sizeof(char); + char *test_str = "test_read"; char record_type = 100; - struct cmsghdr *cmsg; - struct msghdr msg; int send_len = 10; - struct iovec vec; char buf[10]; - vec.iov_base = (char *)test_str; - vec.iov_len = 10; - memset(&msg, 0, sizeof(struct msghdr)); - msg.msg_iov = &vec; - msg.msg_iovlen = 1; - msg.msg_control = cbuf; - msg.msg_controllen = sizeof(cbuf); - cmsg = CMSG_FIRSTHDR(&msg); - cmsg->cmsg_level = SOL_TLS; - /* test sending non-record types. */ - cmsg->cmsg_type = TLS_SET_RECORD_TYPE; - cmsg->cmsg_len = CMSG_LEN(cmsg_len); - *CMSG_DATA(cmsg) = record_type; - msg.msg_controllen = cmsg->cmsg_len; + if (self->notls) + SKIP(return, "no TLS support"); - EXPECT_EQ(sendmsg(self->fd, &msg, 0), send_len); + EXPECT_EQ(tls_send_cmsg(self->fd, record_type, test_str, send_len, 0), + send_len); /* Should fail because we didn't provide a control message */ EXPECT_EQ(recv(self->cfd, buf, send_len, 0), -1); - vec.iov_base = buf; - EXPECT_EQ(recvmsg(self->cfd, &msg, MSG_WAITALL | MSG_PEEK), send_len); - - cmsg = CMSG_FIRSTHDR(&msg); - EXPECT_NE(cmsg, NULL); - EXPECT_EQ(cmsg->cmsg_level, SOL_TLS); - EXPECT_EQ(cmsg->cmsg_type, TLS_GET_RECORD_TYPE); - record_type = *((unsigned char *)CMSG_DATA(cmsg)); - EXPECT_EQ(record_type, 100); + EXPECT_EQ(tls_recv_cmsg(_metadata, self->cfd, record_type, + buf, sizeof(buf), MSG_WAITALL | MSG_PEEK), + send_len); EXPECT_EQ(memcmp(buf, test_str, send_len), 0); /* Recv the message again without MSG_PEEK */ - record_type = 0; memset(buf, 0, sizeof(buf)); - EXPECT_EQ(recvmsg(self->cfd, &msg, MSG_WAITALL), send_len); - cmsg = CMSG_FIRSTHDR(&msg); - EXPECT_NE(cmsg, NULL); - EXPECT_EQ(cmsg->cmsg_level, SOL_TLS); - EXPECT_EQ(cmsg->cmsg_type, TLS_GET_RECORD_TYPE); - record_type = *((unsigned char *)CMSG_DATA(cmsg)); - EXPECT_EQ(record_type, 100); + EXPECT_EQ(tls_recv_cmsg(_metadata, self->cfd, record_type, + buf, sizeof(buf), MSG_WAITALL), + send_len); EXPECT_EQ(memcmp(buf, test_str, send_len), 0); } @@ -1217,6 +1413,160 @@ TEST_F(tls, shutdown_reuse) EXPECT_EQ(errno, EISCONN); } +FIXTURE(tls_err) +{ + int fd, cfd; + int fd2, cfd2; + bool notls; +}; + +FIXTURE_VARIANT(tls_err) +{ + uint16_t tls_version; +}; + +FIXTURE_VARIANT_ADD(tls_err, 12_aes_gcm) +{ + .tls_version = TLS_1_2_VERSION, +}; + +FIXTURE_VARIANT_ADD(tls_err, 13_aes_gcm) +{ + .tls_version = TLS_1_3_VERSION, +}; + +FIXTURE_SETUP(tls_err) +{ + struct tls_crypto_info_keys tls12; + int ret; + + tls_crypto_info_init(variant->tls_version, TLS_CIPHER_AES_GCM_128, + &tls12); + + ulp_sock_pair(_metadata, &self->fd, &self->cfd, &self->notls); + ulp_sock_pair(_metadata, &self->fd2, &self->cfd2, &self->notls); + if (self->notls) + return; + + ret = setsockopt(self->fd, SOL_TLS, TLS_TX, &tls12, tls12.len); + ASSERT_EQ(ret, 0); + + ret = setsockopt(self->cfd2, SOL_TLS, TLS_RX, &tls12, tls12.len); + ASSERT_EQ(ret, 0); +} + +FIXTURE_TEARDOWN(tls_err) +{ + close(self->fd); + close(self->cfd); + close(self->fd2); + close(self->cfd2); +} + +TEST_F(tls_err, bad_rec) +{ + char buf[64]; + + if (self->notls) + SKIP(return, "no TLS support"); + + memset(buf, 0x55, sizeof(buf)); + EXPECT_EQ(send(self->fd2, buf, sizeof(buf), 0), sizeof(buf)); + EXPECT_EQ(recv(self->cfd2, buf, sizeof(buf), 0), -1); + EXPECT_EQ(errno, EMSGSIZE); + EXPECT_EQ(recv(self->cfd2, buf, sizeof(buf), MSG_DONTWAIT), -1); + EXPECT_EQ(errno, EAGAIN); +} + +TEST_F(tls_err, bad_auth) +{ + char buf[128]; + int n; + + if (self->notls) + SKIP(return, "no TLS support"); + + memrnd(buf, sizeof(buf) / 2); + EXPECT_EQ(send(self->fd, buf, sizeof(buf) / 2, 0), sizeof(buf) / 2); + n = recv(self->cfd, buf, sizeof(buf), 0); + EXPECT_GT(n, sizeof(buf) / 2); + + buf[n - 1]++; + + EXPECT_EQ(send(self->fd2, buf, n, 0), n); + EXPECT_EQ(recv(self->cfd2, buf, sizeof(buf), 0), -1); + EXPECT_EQ(errno, EBADMSG); + EXPECT_EQ(recv(self->cfd2, buf, sizeof(buf), 0), -1); + EXPECT_EQ(errno, EBADMSG); +} + +TEST_F(tls_err, bad_in_large_read) +{ + char txt[3][64]; + char cip[3][128]; + char buf[3 * 128]; + int i, n; + + if (self->notls) + SKIP(return, "no TLS support"); + + /* Put 3 records in the sockets */ + for (i = 0; i < 3; i++) { + memrnd(txt[i], sizeof(txt[i])); + EXPECT_EQ(send(self->fd, txt[i], sizeof(txt[i]), 0), + sizeof(txt[i])); + n = recv(self->cfd, cip[i], sizeof(cip[i]), 0); + EXPECT_GT(n, sizeof(txt[i])); + /* Break the third message */ + if (i == 2) + cip[2][n - 1]++; + EXPECT_EQ(send(self->fd2, cip[i], n, 0), n); + } + + /* We should be able to receive the first two messages */ + EXPECT_EQ(recv(self->cfd2, buf, sizeof(buf), 0), sizeof(txt[0]) * 2); + EXPECT_EQ(memcmp(buf, txt[0], sizeof(txt[0])), 0); + EXPECT_EQ(memcmp(buf + sizeof(txt[0]), txt[1], sizeof(txt[1])), 0); + /* Third mesasge is bad */ + EXPECT_EQ(recv(self->cfd2, buf, sizeof(buf), 0), -1); + EXPECT_EQ(errno, EBADMSG); + EXPECT_EQ(recv(self->cfd2, buf, sizeof(buf), 0), -1); + EXPECT_EQ(errno, EBADMSG); +} + +TEST_F(tls_err, bad_cmsg) +{ + char *test_str = "test_read"; + int send_len = 10; + char cip[128]; + char buf[128]; + char txt[64]; + int n; + + if (self->notls) + SKIP(return, "no TLS support"); + + /* Queue up one data record */ + memrnd(txt, sizeof(txt)); + EXPECT_EQ(send(self->fd, txt, sizeof(txt), 0), sizeof(txt)); + n = recv(self->cfd, cip, sizeof(cip), 0); + EXPECT_GT(n, sizeof(txt)); + EXPECT_EQ(send(self->fd2, cip, n, 0), n); + + EXPECT_EQ(tls_send_cmsg(self->fd, 100, test_str, send_len, 0), 10); + n = recv(self->cfd, cip, sizeof(cip), 0); + cip[n - 1]++; /* Break it */ + EXPECT_GT(n, send_len); + EXPECT_EQ(send(self->fd2, cip, n, 0), n); + + EXPECT_EQ(recv(self->cfd2, buf, sizeof(buf), 0), sizeof(txt)); + EXPECT_EQ(memcmp(buf, txt, sizeof(txt)), 0); + EXPECT_EQ(recv(self->cfd2, buf, sizeof(buf), 0), -1); + EXPECT_EQ(errno, EBADMSG); + EXPECT_EQ(recv(self->cfd2, buf, sizeof(buf), 0), -1); + EXPECT_EQ(errno, EBADMSG); +} + TEST(non_established) { struct tls12_crypto_info_aes_gcm_256 tls12; struct sockaddr_in addr; @@ -1271,64 +1621,82 @@ TEST(non_established) { TEST(keysizes) { struct tls12_crypto_info_aes_gcm_256 tls12; - struct sockaddr_in addr; - int sfd, ret, fd, cfd; - socklen_t len; + int ret, fd, cfd; bool notls; - notls = false; - len = sizeof(addr); - memset(&tls12, 0, sizeof(tls12)); tls12.info.version = TLS_1_2_VERSION; tls12.info.cipher_type = TLS_CIPHER_AES_GCM_256; - addr.sin_family = AF_INET; - addr.sin_addr.s_addr = htonl(INADDR_ANY); - addr.sin_port = 0; + ulp_sock_pair(_metadata, &fd, &cfd, ¬ls); - fd = socket(AF_INET, SOCK_STREAM, 0); - sfd = socket(AF_INET, SOCK_STREAM, 0); + if (!notls) { + ret = setsockopt(fd, SOL_TLS, TLS_TX, &tls12, + sizeof(tls12)); + EXPECT_EQ(ret, 0); + + ret = setsockopt(cfd, SOL_TLS, TLS_RX, &tls12, + sizeof(tls12)); + EXPECT_EQ(ret, 0); + } + + close(fd); + close(cfd); +} + +TEST(tls_v6ops) { + struct tls_crypto_info_keys tls12; + struct sockaddr_in6 addr, addr2; + int sfd, ret, fd; + socklen_t len, len2; + + tls_crypto_info_init(TLS_1_2_VERSION, TLS_CIPHER_AES_GCM_128, &tls12); + + addr.sin6_family = AF_INET6; + addr.sin6_addr = in6addr_any; + addr.sin6_port = 0; + + fd = socket(AF_INET6, SOCK_STREAM, 0); + sfd = socket(AF_INET6, SOCK_STREAM, 0); ret = bind(sfd, &addr, sizeof(addr)); ASSERT_EQ(ret, 0); ret = listen(sfd, 10); ASSERT_EQ(ret, 0); + len = sizeof(addr); ret = getsockname(sfd, &addr, &len); ASSERT_EQ(ret, 0); ret = connect(fd, &addr, sizeof(addr)); ASSERT_EQ(ret, 0); + len = sizeof(addr); + ret = getsockname(fd, &addr, &len); + ASSERT_EQ(ret, 0); + ret = setsockopt(fd, IPPROTO_TCP, TCP_ULP, "tls", sizeof("tls")); - if (ret != 0) { - notls = true; - printf("Failure setting TCP_ULP, testing without tls\n"); + if (ret) { + ASSERT_EQ(errno, ENOENT); + SKIP(return, "no TLS support"); } + ASSERT_EQ(ret, 0); - if (!notls) { - ret = setsockopt(fd, SOL_TLS, TLS_TX, &tls12, - sizeof(tls12)); - EXPECT_EQ(ret, 0); - } + ret = setsockopt(fd, SOL_TLS, TLS_TX, &tls12, tls12.len); + ASSERT_EQ(ret, 0); - cfd = accept(sfd, &addr, &len); - ASSERT_GE(cfd, 0); + ret = setsockopt(fd, SOL_TLS, TLS_RX, &tls12, tls12.len); + ASSERT_EQ(ret, 0); - if (!notls) { - ret = setsockopt(cfd, IPPROTO_TCP, TCP_ULP, "tls", - sizeof("tls")); - EXPECT_EQ(ret, 0); + len2 = sizeof(addr2); + ret = getsockname(fd, &addr2, &len2); + ASSERT_EQ(ret, 0); - ret = setsockopt(cfd, SOL_TLS, TLS_RX, &tls12, - sizeof(tls12)); - EXPECT_EQ(ret, 0); - } + EXPECT_EQ(len2, len); + EXPECT_EQ(memcmp(&addr, &addr2, len), 0); - close(sfd); close(fd); - close(cfd); + close(sfd); } TEST_HARNESS_MAIN diff --git a/tools/testing/selftests/seccomp/Makefile b/tools/testing/selftests/seccomp/Makefile index 0ebfe8b0e147fa9bd4d3bd895774393cf0d32c28..585f7a0c10cbea67d418366c94b3415cf9e00bfe 100644 --- a/tools/testing/selftests/seccomp/Makefile +++ b/tools/testing/selftests/seccomp/Makefile @@ -1,5 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 -CFLAGS += -Wl,-no-as-needed -Wall +CFLAGS += -Wl,-no-as-needed -Wall -isystem ../../../../usr/include/ LDFLAGS += -lpthread TEST_GEN_PROGS := seccomp_bpf seccomp_benchmark diff --git a/tools/testing/selftests/vm/Makefile b/tools/testing/selftests/vm/Makefile index b2ed35f244f9207b53320436630c337bf02eb418..b150cc837177a1227dc036f9c2fd1fa552a2d4cc 100644 --- a/tools/testing/selftests/vm/Makefile +++ b/tools/testing/selftests/vm/Makefile @@ -41,9 +41,9 @@ TEST_GEN_FILES += userfaultfd TEST_GEN_FILES += khugepaged ifeq ($(MACHINE),x86_64) -CAN_BUILD_I386 := $(shell ./../x86/check_cc.sh $(CC) ../x86/trivial_32bit_program.c -m32) -CAN_BUILD_X86_64 := $(shell ./../x86/check_cc.sh $(CC) ../x86/trivial_64bit_program.c) -CAN_BUILD_WITH_NOPIE := $(shell ./../x86/check_cc.sh $(CC) ../x86/trivial_program.c -no-pie) +CAN_BUILD_I386 := $(shell ./../x86/check_cc.sh "$(CC)" ../x86/trivial_32bit_program.c -m32) +CAN_BUILD_X86_64 := $(shell ./../x86/check_cc.sh "$(CC)" ../x86/trivial_64bit_program.c) +CAN_BUILD_WITH_NOPIE := $(shell ./../x86/check_cc.sh "$(CC)" ../x86/trivial_program.c -no-pie) TARGETS := protection_keys BINARIES_32 := $(TARGETS:%=%_32) diff --git a/tools/testing/selftests/vm/map_fixed_noreplace.c b/tools/testing/selftests/vm/map_fixed_noreplace.c index d91bde511268667ab183b98bdd1fa99258173df9..eed44322d1a635e6405bd6bc79eacf434a76697d 100644 --- a/tools/testing/selftests/vm/map_fixed_noreplace.c +++ b/tools/testing/selftests/vm/map_fixed_noreplace.c @@ -17,9 +17,6 @@ #define MAP_FIXED_NOREPLACE 0x100000 #endif -#define BASE_ADDRESS (256ul * 1024 * 1024) - - static void dump_maps(void) { char cmd[32]; @@ -28,18 +25,46 @@ static void dump_maps(void) system(cmd); } +static unsigned long find_base_addr(unsigned long size) +{ + void *addr; + unsigned long flags; + + flags = MAP_PRIVATE | MAP_ANONYMOUS; + addr = mmap(NULL, size, PROT_NONE, flags, -1, 0); + if (addr == MAP_FAILED) { + printf("Error: couldn't map the space we need for the test\n"); + return 0; + } + + if (munmap(addr, size) != 0) { + printf("Error: couldn't map the space we need for the test\n"); + return 0; + } + return (unsigned long)addr; +} + int main(void) { + unsigned long base_addr; unsigned long flags, addr, size, page_size; char *p; page_size = sysconf(_SC_PAGE_SIZE); + //let's find a base addr that is free before we start the tests + size = 5 * page_size; + base_addr = find_base_addr(size); + if (!base_addr) { + printf("Error: couldn't map the space we need for the test\n"); + return 1; + } + flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED_NOREPLACE; // Check we can map all the areas we need below errno = 0; - addr = BASE_ADDRESS; + addr = base_addr; size = 5 * page_size; p = mmap((void *)addr, size, PROT_NONE, flags, -1, 0); @@ -60,7 +85,7 @@ int main(void) printf("unmap() successful\n"); errno = 0; - addr = BASE_ADDRESS + page_size; + addr = base_addr + page_size; size = 3 * page_size; p = mmap((void *)addr, size, PROT_NONE, flags, -1, 0); printf("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p); @@ -80,7 +105,7 @@ int main(void) * +4 | free | new */ errno = 0; - addr = BASE_ADDRESS; + addr = base_addr; size = 5 * page_size; p = mmap((void *)addr, size, PROT_NONE, flags, -1, 0); printf("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p); @@ -101,7 +126,7 @@ int main(void) * +4 | free | */ errno = 0; - addr = BASE_ADDRESS + (2 * page_size); + addr = base_addr + (2 * page_size); size = page_size; p = mmap((void *)addr, size, PROT_NONE, flags, -1, 0); printf("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p); @@ -121,7 +146,7 @@ int main(void) * +4 | free | new */ errno = 0; - addr = BASE_ADDRESS + (3 * page_size); + addr = base_addr + (3 * page_size); size = 2 * page_size; p = mmap((void *)addr, size, PROT_NONE, flags, -1, 0); printf("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p); @@ -141,7 +166,7 @@ int main(void) * +4 | free | */ errno = 0; - addr = BASE_ADDRESS; + addr = base_addr; size = 2 * page_size; p = mmap((void *)addr, size, PROT_NONE, flags, -1, 0); printf("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p); @@ -161,7 +186,7 @@ int main(void) * +4 | free | */ errno = 0; - addr = BASE_ADDRESS; + addr = base_addr; size = page_size; p = mmap((void *)addr, size, PROT_NONE, flags, -1, 0); printf("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p); @@ -181,7 +206,7 @@ int main(void) * +4 | free | new */ errno = 0; - addr = BASE_ADDRESS + (4 * page_size); + addr = base_addr + (4 * page_size); size = page_size; p = mmap((void *)addr, size, PROT_NONE, flags, -1, 0); printf("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p); @@ -192,7 +217,7 @@ int main(void) return 1; } - addr = BASE_ADDRESS; + addr = base_addr; size = 5 * page_size; if (munmap((void *)addr, size) != 0) { dump_maps(); diff --git a/tools/testing/selftests/vm/userfaultfd.c b/tools/testing/selftests/vm/userfaultfd.c index d418ca5f903997a97acc62bbfdbd7463418843c1..034245ea397f6ce032067a5e8e1f7a0e6f9f9fae 100644 --- a/tools/testing/selftests/vm/userfaultfd.c +++ b/tools/testing/selftests/vm/userfaultfd.c @@ -46,6 +46,7 @@ #include #include #include +#include #include #include #include diff --git a/tools/testing/selftests/x86/Makefile b/tools/testing/selftests/x86/Makefile index 458ca0209dcf4342358330831aae3c1c1dbbab84..9c4c75f06396ceb1e21cdd1b2bdaf944f0a0ccb5 100644 --- a/tools/testing/selftests/x86/Makefile +++ b/tools/testing/selftests/x86/Makefile @@ -6,9 +6,9 @@ include ../lib.mk .PHONY: all all_32 all_64 warn_32bit_failure clean UNAME_M := $(shell uname -m) -CAN_BUILD_I386 := $(shell ./check_cc.sh $(CC) trivial_32bit_program.c -m32) -CAN_BUILD_X86_64 := $(shell ./check_cc.sh $(CC) trivial_64bit_program.c) -CAN_BUILD_WITH_NOPIE := $(shell ./check_cc.sh $(CC) trivial_program.c -no-pie) +CAN_BUILD_I386 := $(shell ./check_cc.sh "$(CC)" trivial_32bit_program.c -m32) +CAN_BUILD_X86_64 := $(shell ./check_cc.sh "$(CC)" trivial_64bit_program.c) +CAN_BUILD_WITH_NOPIE := $(shell ./check_cc.sh "$(CC)" trivial_program.c -no-pie) TARGETS_C_BOTHBITS := single_step_syscall sysret_ss_attrs syscall_nt test_mremap_vdso \ check_initial_reg_state sigreturn iopl ioperm \ diff --git a/tools/testing/selftests/x86/check_cc.sh b/tools/testing/selftests/x86/check_cc.sh index 3e2089c8cf54967fdd375ec151a15827e104fc53..8c669c0d662ee275f27132d2fcab174660ab4c81 100755 --- a/tools/testing/selftests/x86/check_cc.sh +++ b/tools/testing/selftests/x86/check_cc.sh @@ -7,7 +7,7 @@ CC="$1" TESTPROG="$2" shift 2 -if "$CC" -o /dev/null "$TESTPROG" -O0 "$@" 2>/dev/null; then +if [ -n "$CC" ] && $CC -o /dev/null "$TESTPROG" -O0 "$@" 2>/dev/null; then echo 1 else echo 0 diff --git a/tools/virtio/virtio_test.c b/tools/virtio/virtio_test.c index cb3f29c09aff3522b6225f1710d71e27097de9a2..23f142af544ad796146361cc81ce3315d303f42e 100644 --- a/tools/virtio/virtio_test.c +++ b/tools/virtio/virtio_test.c @@ -130,6 +130,7 @@ static void vdev_info_init(struct vdev_info* dev, unsigned long long features) memset(dev, 0, sizeof *dev); dev->vdev.features = features; INIT_LIST_HEAD(&dev->vdev.vqs); + spin_lock_init(&dev->vdev.vqs_list_lock); dev->buf_size = 1024; dev->buf = malloc(dev->buf_size); assert(dev->buf); diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 29bdd17fc135af23649836a1fa051eff96d95767..a5c107bbf022fd066a8842b7e31895ed8fd9d84a 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -114,6 +114,8 @@ EXPORT_SYMBOL_GPL(kvm_debugfs_dir); static int kvm_debugfs_num_entries; static const struct file_operations stat_fops_per_vm; +static struct file_operations kvm_chardev_ops; + static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl, unsigned long arg); #ifdef CONFIG_KVM_COMPAT @@ -162,6 +164,10 @@ __weak void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, { } +__weak void kvm_arch_guest_memory_reclaimed(struct kvm *kvm) +{ +} + bool kvm_is_zone_device_pfn(kvm_pfn_t pfn) { /* @@ -338,6 +344,12 @@ void kvm_flush_remote_tlbs(struct kvm *kvm) EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs); #endif +static void kvm_flush_shadow_all(struct kvm *kvm) +{ + kvm_arch_flush_shadow_all(kvm); + kvm_arch_guest_memory_reclaimed(kvm); +} + void kvm_reload_remote_mmus(struct kvm *kvm) { kvm_make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD); @@ -492,6 +504,7 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, kvm_flush_remote_tlbs(kvm); spin_unlock(&kvm->mmu_lock); + kvm_arch_guest_memory_reclaimed(kvm); srcu_read_unlock(&kvm->srcu, idx); return 0; @@ -595,7 +608,7 @@ static void kvm_mmu_notifier_release(struct mmu_notifier *mn, int idx; idx = srcu_read_lock(&kvm->srcu); - kvm_arch_flush_shadow_all(kvm); + kvm_flush_shadow_all(kvm); srcu_read_unlock(&kvm->srcu, idx); } @@ -823,6 +836,16 @@ static struct kvm *kvm_create_vm(unsigned long type) preempt_notifier_inc(); + /* + * When the fd passed to this ioctl() is opened it pins the module, + * but try_module_get() also prevents getting a reference if the module + * is in MODULE_STATE_GOING (e.g. if someone ran "rmmod --wait"). + */ + if (!try_module_get(kvm_chardev_ops.owner)) { + r = -ENODEV; + goto out_err; + } + return kvm; out_err: @@ -892,7 +915,7 @@ static void kvm_destroy_vm(struct kvm *kvm) #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm); #else - kvm_arch_flush_shadow_all(kvm); + kvm_flush_shadow_all(kvm); #endif kvm_arch_destroy_vm(kvm); kvm_destroy_devices(kvm); @@ -904,6 +927,7 @@ static void kvm_destroy_vm(struct kvm *kvm) preempt_notifier_dec(); hardware_disable_all(); mmdrop(mm); + module_put(kvm_chardev_ops.owner); } void kvm_get_kvm(struct kvm *kvm) @@ -1233,6 +1257,7 @@ static int kvm_set_memslot(struct kvm *kvm, * - kvm_is_visible_gfn (mmu_check_root) */ kvm_arch_flush_shadow_memslot(kvm, slot); + kvm_arch_guest_memory_reclaimed(kvm); } r = kvm_arch_prepare_memory_region(kvm, new, mem, change);