diff --git a/Documentation/ABI/testing/sysfs-bus-iio-lptimer-stm32 b/Documentation/ABI/testing/sysfs-bus-iio-lptimer-stm32 deleted file mode 100644 index 73498ff666bd74a92269bf5e1006d34571869233..0000000000000000000000000000000000000000 --- a/Documentation/ABI/testing/sysfs-bus-iio-lptimer-stm32 +++ /dev/null @@ -1,62 +0,0 @@ -What: /sys/bus/iio/devices/iio:deviceX/in_count0_preset -KernelVersion: 4.13 -Contact: fabrice.gasnier@st.com -Description: - Reading returns the current preset value. Writing sets the - preset value. Encoder counts continuously from 0 to preset - value, depending on direction (up/down). - -What: /sys/bus/iio/devices/iio:deviceX/in_count_quadrature_mode_available -KernelVersion: 4.13 -Contact: fabrice.gasnier@st.com -Description: - Reading returns the list possible quadrature modes. - -What: /sys/bus/iio/devices/iio:deviceX/in_count0_quadrature_mode -KernelVersion: 4.13 -Contact: fabrice.gasnier@st.com -Description: - Configure the device counter quadrature modes: - - - non-quadrature: - Encoder IN1 input servers as the count input (up - direction). - - - quadrature: - Encoder IN1 and IN2 inputs are mixed to get direction - and count. - -What: /sys/bus/iio/devices/iio:deviceX/in_count_polarity_available -KernelVersion: 4.13 -Contact: fabrice.gasnier@st.com -Description: - Reading returns the list possible active edges. - -What: /sys/bus/iio/devices/iio:deviceX/in_count0_polarity -KernelVersion: 4.13 -Contact: fabrice.gasnier@st.com -Description: - Configure the device encoder/counter active edge: - - - rising-edge - - falling-edge - - both-edges - - In non-quadrature mode, device counts up on active edge. - - In quadrature mode, encoder counting scenarios are as follows: - - +---------+----------+--------------------+--------------------+ - | Active | Level on | IN1 signal | IN2 signal | - | edge | opposite +----------+---------+----------+---------+ - | | signal | Rising | Falling | Rising | Falling | - +---------+----------+----------+---------+----------+---------+ - | Rising | High -> | Down | - | Up | - | - | edge | Low -> | Up | - | Down | - | - +---------+----------+----------+---------+----------+---------+ - | Falling | High -> | - | Up | - | Down | - | edge | Low -> | - | Down | - | Up | - +---------+----------+----------+---------+----------+---------+ - | Both | High -> | Down | Up | Up | Down | - | edges | Low -> | Up | Down | Down | Up | - +---------+----------+----------+---------+----------+---------+ diff --git a/Documentation/ABI/testing/sysfs-class-intel_pmt b/Documentation/ABI/testing/sysfs-class-intel_pmt new file mode 100644 index 0000000000000000000000000000000000000000..ed4c886a21b1ee1640d21f11e5347fa06a5b95b6 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-class-intel_pmt @@ -0,0 +1,119 @@ +What: /sys/class/intel_pmt/ +Date: October 2020 +KernelVersion: 5.10 +Contact: David Box +Description: + The intel_pmt/ class directory contains information for + devices that expose hardware telemetry using Intel Platform + Monitoring Technology (PMT) + +What: /sys/class/intel_pmt/telem +Date: October 2020 +KernelVersion: 5.10 +Contact: David Box +Description: + The telem directory contains files describing an instance of + a PMT telemetry device that exposes hardware telemetry. Each + telem directory has an associated telem file. This file + may be opened and mapped or read to access the telemetry space + of the device. The register layout of the telemetry space is + determined from an XML file that matches the PCI device id and + GUID for the device. + +What: /sys/class/intel_pmt/telem/telem +Date: October 2020 +KernelVersion: 5.10 +Contact: David Box +Description: + (RO) The telemetry data for this telemetry device. This file + may be mapped or read to obtain the data. + +What: /sys/class/intel_pmt/telem/guid +Date: October 2020 +KernelVersion: 5.10 +Contact: David Box +Description: + (RO) The GUID for this telemetry device. The GUID identifies + the version of the XML file for the parent device that is to + be used to get the register layout. + +What: /sys/class/intel_pmt/telem/size +Date: October 2020 +KernelVersion: 5.10 +Contact: David Box +Description: + (RO) The size of telemetry region in bytes that corresponds to + the mapping size for the telem file. + +What: /sys/class/intel_pmt/telem/offset +Date: October 2020 +KernelVersion: 5.10 +Contact: David Box +Description: + (RO) The offset of telemetry region in bytes that corresponds to + the mapping for the telem file. + +What: /sys/class/intel_pmt/crashlog +Date: October 2020 +KernelVersion: 5.10 +Contact: Alexander Duyck +Description: + The crashlog directory contains files for configuring an + instance of a PMT crashlog device that can perform crash data + recording. Each crashlog device has an associated crashlog + file. This file can be opened and mapped or read to access the + resulting crashlog buffer. The register layout for the buffer + can be determined from an XML file of specified GUID for the + parent device. + +What: /sys/class/intel_pmt/crashlog/crashlog +Date: October 2020 +KernelVersion: 5.10 +Contact: David Box +Description: + (RO) The crashlog buffer for this crashlog device. This file + may be mapped or read to obtain the data. + +What: /sys/class/intel_pmt/crashlog/guid +Date: October 2020 +KernelVersion: 5.10 +Contact: Alexander Duyck +Description: + (RO) The GUID for this crashlog device. The GUID identifies the + version of the XML file for the parent device that should be + used to determine the register layout. + +What: /sys/class/intel_pmt/crashlog/size +Date: October 2020 +KernelVersion: 5.10 +Contact: Alexander Duyck +Description: + (RO) The length of the result buffer in bytes that corresponds + to the size for the crashlog buffer. + +What: /sys/class/intel_pmt/crashlog/offset +Date: October 2020 +KernelVersion: 5.10 +Contact: Alexander Duyck +Description: + (RO) The offset of the buffer in bytes that corresponds + to the mapping for the crashlog device. + +What: /sys/class/intel_pmt/crashlog/enable +Date: October 2020 +KernelVersion: 5.10 +Contact: Alexander Duyck +Description: + (RW) Boolean value controlling if the crashlog functionality + is enabled for the crashlog device. + +What: /sys/class/intel_pmt/crashlog/trigger +Date: October 2020 +KernelVersion: 5.10 +Contact: Alexander Duyck +Description: + (RW) Boolean value controlling the triggering of the crashlog + device node. When read it provides data on if the crashlog has + been triggered. When written to it can be used to either clear + the current trigger by writing false, or to trigger a new + event if the trigger is not currently set. diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu index 1a04ca8162ad882483e402256059446fea8d6943..44c6e57303988cf527e9e8f9619071b2d8571f46 100644 --- a/Documentation/ABI/testing/sysfs-devices-system-cpu +++ b/Documentation/ABI/testing/sysfs-devices-system-cpu @@ -510,6 +510,7 @@ What: /sys/devices/system/cpu/vulnerabilities /sys/devices/system/cpu/vulnerabilities/srbds /sys/devices/system/cpu/vulnerabilities/tsx_async_abort /sys/devices/system/cpu/vulnerabilities/itlb_multihit + /sys/devices/system/cpu/vulnerabilities/mmio_stale_data Date: January 2018 Contact: Linux kernel mailing list Description: Information about CPU vulnerabilities diff --git a/Documentation/accounting/psi.rst b/Documentation/accounting/psi.rst index f2b3439edcc2cc772c7f92cd3510060df6b5e8b8..5e40b3f437f90c2d202198aa6bfac5707fa7ac63 100644 --- a/Documentation/accounting/psi.rst +++ b/Documentation/accounting/psi.rst @@ -37,11 +37,7 @@ Pressure interface Pressure information for each resource is exported through the respective file in /proc/pressure/ -- cpu, memory, and io. -The format for CPU is as such:: - - some avg10=0.00 avg60=0.00 avg300=0.00 total=0 - -and for memory and IO:: +The format is as such:: some avg10=0.00 avg60=0.00 avg300=0.00 total=0 full avg10=0.00 avg60=0.00 avg300=0.00 total=0 @@ -58,6 +54,9 @@ situation from a state where some tasks are stalled but the CPU is still doing productive work. As such, time spent in this subset of the stall state is tracked separately and exported in the "full" averages. +CPU full is undefined at the system level, but has been reported +since 5.13, so it is set to zero for backward compatibility. + The ratios (in %) are tracked as recent trends over ten, sixty, and three hundred second windows, which gives insight into short term events as well as medium and long term trends. The total absolute stall time @@ -92,7 +91,8 @@ Triggers can be set on more than one psi metric and more than one trigger for the same psi metric can be specified. However for each trigger a separate file descriptor is required to be able to poll it separately from others, therefore for each trigger a separate open() syscall should be made even -when opening the same psi interface file. +when opening the same psi interface file. Write operations to a file descriptor +with an already existing psi trigger will fail with EBUSY. Monitors activate only when system enters stall state for the monitored psi metric and deactivates upon exit from the stall state. While system is diff --git a/Documentation/admin-guide/cgroup-v2.rst b/Documentation/admin-guide/cgroup-v2.rst index 608d7c279396b5832a07dea3a1d5ce3c75c24142..5d9b7e552fb0e2112eebc55e94f71e0c75a09bb9 100644 --- a/Documentation/admin-guide/cgroup-v2.rst +++ b/Documentation/admin-guide/cgroup-v2.rst @@ -997,6 +997,8 @@ All time durations are in microseconds. - nr_periods - nr_throttled - throttled_usec + - nr_bursts + - burst_usec cpu.weight A read-write single value file which exists on non-root @@ -1028,6 +1030,12 @@ All time durations are in microseconds. $PERIOD duration. "max" for $MAX indicates no limit. If only one number is written, $MAX is updated. + cpu.max.burst + A read-write single value file which exists on non-root + cgroups. The default is "0". + + The burst in the range [0, $MAX]. + cpu.pressure A read-only nested-key file which exists on non-root cgroups. @@ -1181,6 +1189,27 @@ PAGE_SIZE multiple when read back. high limit is used and monitored properly, this limit's utility is limited to providing the final safety net. + memory.reclaim + A write-only nested-keyed file which exists for all cgroups. + + This is a simple interface to trigger memory reclaim in the + target cgroup. + + This file accepts a single key, the number of bytes to reclaim. + No nested keys are currently supported. + + Example:: + + echo "1G" > memory.reclaim + + The interface can be later extended with nested keys to + configure the reclaim behavior. For example, specify the + type of memory to reclaim from (anon, file, ..). + + Please note that the kernel can over or under reclaim from + the target cgroup. If less bytes are reclaimed than the + specified amount, -EAGAIN is returned. + memory.oom.group A read-write single value file which exists on non-root cgroups. The default value is "0". diff --git a/Documentation/admin-guide/hw-vuln/index.rst b/Documentation/admin-guide/hw-vuln/index.rst index ca4dbdd9016d5a873b11381dbe75bdff5ee13038..2adec1e6520a68e4d6dd9c52bd86f48158e30957 100644 --- a/Documentation/admin-guide/hw-vuln/index.rst +++ b/Documentation/admin-guide/hw-vuln/index.rst @@ -15,3 +15,4 @@ are configurable at compile, boot or run time. tsx_async_abort multihit.rst special-register-buffer-data-sampling.rst + processor_mmio_stale_data.rst diff --git a/Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst b/Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst new file mode 100644 index 0000000000000000000000000000000000000000..9393c50b5afc9c9fe8b9ac90ed9fe4774e1d1550 --- /dev/null +++ b/Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst @@ -0,0 +1,246 @@ +========================================= +Processor MMIO Stale Data Vulnerabilities +========================================= + +Processor MMIO Stale Data Vulnerabilities are a class of memory-mapped I/O +(MMIO) vulnerabilities that can expose data. The sequences of operations for +exposing data range from simple to very complex. Because most of the +vulnerabilities require the attacker to have access to MMIO, many environments +are not affected. System environments using virtualization where MMIO access is +provided to untrusted guests may need mitigation. These vulnerabilities are +not transient execution attacks. However, these vulnerabilities may propagate +stale data into core fill buffers where the data can subsequently be inferred +by an unmitigated transient execution attack. Mitigation for these +vulnerabilities includes a combination of microcode update and software +changes, depending on the platform and usage model. Some of these mitigations +are similar to those used to mitigate Microarchitectural Data Sampling (MDS) or +those used to mitigate Special Register Buffer Data Sampling (SRBDS). + +Data Propagators +================ +Propagators are operations that result in stale data being copied or moved from +one microarchitectural buffer or register to another. Processor MMIO Stale Data +Vulnerabilities are operations that may result in stale data being directly +read into an architectural, software-visible state or sampled from a buffer or +register. + +Fill Buffer Stale Data Propagator (FBSDP) +----------------------------------------- +Stale data may propagate from fill buffers (FB) into the non-coherent portion +of the uncore on some non-coherent writes. Fill buffer propagation by itself +does not make stale data architecturally visible. Stale data must be propagated +to a location where it is subject to reading or sampling. + +Sideband Stale Data Propagator (SSDP) +------------------------------------- +The sideband stale data propagator (SSDP) is limited to the client (including +Intel Xeon server E3) uncore implementation. The sideband response buffer is +shared by all client cores. For non-coherent reads that go to sideband +destinations, the uncore logic returns 64 bytes of data to the core, including +both requested data and unrequested stale data, from a transaction buffer and +the sideband response buffer. As a result, stale data from the sideband +response and transaction buffers may now reside in a core fill buffer. + +Primary Stale Data Propagator (PSDP) +------------------------------------ +The primary stale data propagator (PSDP) is limited to the client (including +Intel Xeon server E3) uncore implementation. Similar to the sideband response +buffer, the primary response buffer is shared by all client cores. For some +processors, MMIO primary reads will return 64 bytes of data to the core fill +buffer including both requested data and unrequested stale data. This is +similar to the sideband stale data propagator. + +Vulnerabilities +=============== +Device Register Partial Write (DRPW) (CVE-2022-21166) +----------------------------------------------------- +Some endpoint MMIO registers incorrectly handle writes that are smaller than +the register size. Instead of aborting the write or only copying the correct +subset of bytes (for example, 2 bytes for a 2-byte write), more bytes than +specified by the write transaction may be written to the register. On +processors affected by FBSDP, this may expose stale data from the fill buffers +of the core that created the write transaction. + +Shared Buffers Data Sampling (SBDS) (CVE-2022-21125) +---------------------------------------------------- +After propagators may have moved data around the uncore and copied stale data +into client core fill buffers, processors affected by MFBDS can leak data from +the fill buffer. It is limited to the client (including Intel Xeon server E3) +uncore implementation. + +Shared Buffers Data Read (SBDR) (CVE-2022-21123) +------------------------------------------------ +It is similar to Shared Buffer Data Sampling (SBDS) except that the data is +directly read into the architectural software-visible state. It is limited to +the client (including Intel Xeon server E3) uncore implementation. + +Affected Processors +=================== +Not all the CPUs are affected by all the variants. For instance, most +processors for the server market (excluding Intel Xeon E3 processors) are +impacted by only Device Register Partial Write (DRPW). + +Below is the list of affected Intel processors [#f1]_: + + =================== ============ ========= + Common name Family_Model Steppings + =================== ============ ========= + HASWELL_X 06_3FH 2,4 + SKYLAKE_L 06_4EH 3 + BROADWELL_X 06_4FH All + SKYLAKE_X 06_55H 3,4,6,7,11 + BROADWELL_D 06_56H 3,4,5 + SKYLAKE 06_5EH 3 + ICELAKE_X 06_6AH 4,5,6 + ICELAKE_D 06_6CH 1 + ICELAKE_L 06_7EH 5 + ATOM_TREMONT_D 06_86H All + LAKEFIELD 06_8AH 1 + KABYLAKE_L 06_8EH 9 to 12 + ATOM_TREMONT 06_96H 1 + ATOM_TREMONT_L 06_9CH 0 + KABYLAKE 06_9EH 9 to 13 + COMETLAKE 06_A5H 2,3,5 + COMETLAKE_L 06_A6H 0,1 + ROCKETLAKE 06_A7H 1 + =================== ============ ========= + +If a CPU is in the affected processor list, but not affected by a variant, it +is indicated by new bits in MSR IA32_ARCH_CAPABILITIES. As described in a later +section, mitigation largely remains the same for all the variants, i.e. to +clear the CPU fill buffers via VERW instruction. + +New bits in MSRs +================ +Newer processors and microcode update on existing affected processors added new +bits to IA32_ARCH_CAPABILITIES MSR. These bits can be used to enumerate +specific variants of Processor MMIO Stale Data vulnerabilities and mitigation +capability. + +MSR IA32_ARCH_CAPABILITIES +-------------------------- +Bit 13 - SBDR_SSDP_NO - When set, processor is not affected by either the + Shared Buffers Data Read (SBDR) vulnerability or the sideband stale + data propagator (SSDP). +Bit 14 - FBSDP_NO - When set, processor is not affected by the Fill Buffer + Stale Data Propagator (FBSDP). +Bit 15 - PSDP_NO - When set, processor is not affected by Primary Stale Data + Propagator (PSDP). +Bit 17 - FB_CLEAR - When set, VERW instruction will overwrite CPU fill buffer + values as part of MD_CLEAR operations. Processors that do not + enumerate MDS_NO (meaning they are affected by MDS) but that do + enumerate support for both L1D_FLUSH and MD_CLEAR implicitly enumerate + FB_CLEAR as part of their MD_CLEAR support. +Bit 18 - FB_CLEAR_CTRL - Processor supports read and write to MSR + IA32_MCU_OPT_CTRL[FB_CLEAR_DIS]. On such processors, the FB_CLEAR_DIS + bit can be set to cause the VERW instruction to not perform the + FB_CLEAR action. Not all processors that support FB_CLEAR will support + FB_CLEAR_CTRL. + +MSR IA32_MCU_OPT_CTRL +--------------------- +Bit 3 - FB_CLEAR_DIS - When set, VERW instruction does not perform the FB_CLEAR +action. This may be useful to reduce the performance impact of FB_CLEAR in +cases where system software deems it warranted (for example, when performance +is more critical, or the untrusted software has no MMIO access). Note that +FB_CLEAR_DIS has no impact on enumeration (for example, it does not change +FB_CLEAR or MD_CLEAR enumeration) and it may not be supported on all processors +that enumerate FB_CLEAR. + +Mitigation +========== +Like MDS, all variants of Processor MMIO Stale Data vulnerabilities have the +same mitigation strategy to force the CPU to clear the affected buffers before +an attacker can extract the secrets. + +This is achieved by using the otherwise unused and obsolete VERW instruction in +combination with a microcode update. The microcode clears the affected CPU +buffers when the VERW instruction is executed. + +Kernel reuses the MDS function to invoke the buffer clearing: + + mds_clear_cpu_buffers() + +On MDS affected CPUs, the kernel already invokes CPU buffer clear on +kernel/userspace, hypervisor/guest and C-state (idle) transitions. No +additional mitigation is needed on such CPUs. + +For CPUs not affected by MDS or TAA, mitigation is needed only for the attacker +with MMIO capability. Therefore, VERW is not required for kernel/userspace. For +virtualization case, VERW is only needed at VMENTER for a guest with MMIO +capability. + +Mitigation points +----------------- +Return to user space +^^^^^^^^^^^^^^^^^^^^ +Same mitigation as MDS when affected by MDS/TAA, otherwise no mitigation +needed. + +C-State transition +^^^^^^^^^^^^^^^^^^ +Control register writes by CPU during C-state transition can propagate data +from fill buffer to uncore buffers. Execute VERW before C-state transition to +clear CPU fill buffers. + +Guest entry point +^^^^^^^^^^^^^^^^^ +Same mitigation as MDS when processor is also affected by MDS/TAA, otherwise +execute VERW at VMENTER only for MMIO capable guests. On CPUs not affected by +MDS/TAA, guest without MMIO access cannot extract secrets using Processor MMIO +Stale Data vulnerabilities, so there is no need to execute VERW for such guests. + +Mitigation control on the kernel command line +--------------------------------------------- +The kernel command line allows to control the Processor MMIO Stale Data +mitigations at boot time with the option "mmio_stale_data=". The valid +arguments for this option are: + + ========== ================================================================= + full If the CPU is vulnerable, enable mitigation; CPU buffer clearing + on exit to userspace and when entering a VM. Idle transitions are + protected as well. It does not automatically disable SMT. + full,nosmt Same as full, with SMT disabled on vulnerable CPUs. This is the + complete mitigation. + off Disables mitigation completely. + ========== ================================================================= + +If the CPU is affected and mmio_stale_data=off is not supplied on the kernel +command line, then the kernel selects the appropriate mitigation. + +Mitigation status information +----------------------------- +The Linux kernel provides a sysfs interface to enumerate the current +vulnerability status of the system: whether the system is vulnerable, and +which mitigations are active. The relevant sysfs file is: + + /sys/devices/system/cpu/vulnerabilities/mmio_stale_data + +The possible values in this file are: + + .. list-table:: + + * - 'Not affected' + - The processor is not vulnerable + * - 'Vulnerable' + - The processor is vulnerable, but no mitigation enabled + * - 'Vulnerable: Clear CPU buffers attempted, no microcode' + - The processor is vulnerable, but microcode is not updated. The + mitigation is enabled on a best effort basis. + * - 'Mitigation: Clear CPU buffers' + - The processor is vulnerable and the CPU buffer clearing mitigation is + enabled. + +If the processor is vulnerable then the following information is appended to +the above information: + + ======================== =========================================== + 'SMT vulnerable' SMT is enabled + 'SMT disabled' SMT is disabled + 'SMT Host state unknown' Kernel runs in a VM, Host SMT state unknown + ======================== =========================================== + +References +---------- +.. [#f1] Affected Processors + https://www.intel.com/content/www/us/en/developer/topic-technology/software-security-guidance/processors-affected-consolidated-product-cpu-model.html diff --git a/Documentation/admin-guide/hw-vuln/spectre.rst b/Documentation/admin-guide/hw-vuln/spectre.rst index e05e581af5cfe617f38112907aadea8d03f2a9d6..6bd97cd50d6256fbb024278e5f233372b43adce6 100644 --- a/Documentation/admin-guide/hw-vuln/spectre.rst +++ b/Documentation/admin-guide/hw-vuln/spectre.rst @@ -60,8 +60,8 @@ privileged data touched during the speculative execution. Spectre variant 1 attacks take advantage of speculative execution of conditional branches, while Spectre variant 2 attacks use speculative execution of indirect branches to leak privileged memory. -See :ref:`[1] ` :ref:`[5] ` :ref:`[7] ` -:ref:`[10] ` :ref:`[11] `. +See :ref:`[1] ` :ref:`[5] ` :ref:`[6] ` +:ref:`[7] ` :ref:`[10] ` :ref:`[11] `. Spectre variant 1 (Bounds Check Bypass) --------------------------------------- @@ -131,6 +131,19 @@ steer its indirect branch speculations to gadget code, and measure the speculative execution's side effects left in level 1 cache to infer the victim's data. +Yet another variant 2 attack vector is for the attacker to poison the +Branch History Buffer (BHB) to speculatively steer an indirect branch +to a specific Branch Target Buffer (BTB) entry, even if the entry isn't +associated with the source address of the indirect branch. Specifically, +the BHB might be shared across privilege levels even in the presence of +Enhanced IBRS. + +Currently the only known real-world BHB attack vector is via +unprivileged eBPF. Therefore, it's highly recommended to not enable +unprivileged eBPF, especially when eIBRS is used (without retpolines). +For a full mitigation against BHB attacks, it's recommended to use +retpolines (or eIBRS combined with retpolines). + Attack scenarios ---------------- @@ -364,13 +377,15 @@ The possible values in this file are: - Kernel status: - ==================================== ================================= - 'Not affected' The processor is not vulnerable - 'Vulnerable' Vulnerable, no mitigation - 'Mitigation: Full generic retpoline' Software-focused mitigation - 'Mitigation: Full AMD retpoline' AMD-specific software mitigation - 'Mitigation: Enhanced IBRS' Hardware-focused mitigation - ==================================== ================================= + ======================================== ================================= + 'Not affected' The processor is not vulnerable + 'Mitigation: None' Vulnerable, no mitigation + 'Mitigation: Retpolines' Use Retpoline thunks + 'Mitigation: LFENCE' Use LFENCE instructions + 'Mitigation: Enhanced IBRS' Hardware-focused mitigation + 'Mitigation: Enhanced IBRS + Retpolines' Hardware-focused + Retpolines + 'Mitigation: Enhanced IBRS + LFENCE' Hardware-focused + LFENCE + ======================================== ================================= - Firmware status: Show if Indirect Branch Restricted Speculation (IBRS) is used to protect against Spectre variant 2 attacks when calling firmware (x86 only). @@ -468,7 +483,7 @@ Spectre variant 2 before invoking any firmware code to prevent Spectre variant 2 exploits using the firmware. - Using kernel address space randomization (CONFIG_RANDOMIZE_SLAB=y + Using kernel address space randomization (CONFIG_RANDOMIZE_BASE=y and CONFIG_SLAB_FREELIST_RANDOM=y in the kernel configuration) makes attacks on the kernel generally more difficult. @@ -584,12 +599,13 @@ kernel command line. Specific mitigations can also be selected manually: - retpoline - replace indirect branches - retpoline,generic - google's original retpoline - retpoline,amd - AMD-specific minimal thunk + retpoline auto pick between generic,lfence + retpoline,generic Retpolines + retpoline,lfence LFENCE; indirect branch + retpoline,amd alias for retpoline,lfence + eibrs enhanced IBRS + eibrs,retpoline enhanced IBRS + Retpolines + eibrs,lfence enhanced IBRS + LFENCE Not specifying this option is equivalent to spectre_v2=auto. @@ -730,7 +746,7 @@ AMD white papers: .. _spec_ref6: -[6] `Software techniques for managing speculation on AMD processors `_. +[6] `Software techniques for managing speculation on AMD processors `_. ARM white papers: diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index b5524464f1cbc99322b5bfdf66b34c29e1e4daa5..2b04cf8fbab4989c9cedbfa0585436e684fb4c78 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -800,6 +800,12 @@ cs89x0_media= [HW,NET] Format: { rj45 | aui | bnc } + csdlock_debug= [KNL] Enable debug add-ons of cross-CPU function call + handling. When switched on, additional debug data is + printed to the console in case a hanging CPU is + detected, and that CPU is pinged again in order to try + to resolve the hang situation. + dasd= [HW,NET] See header of drivers/s390/block/dasd_devmap.c. @@ -1558,6 +1564,13 @@ hugepages using the cma allocator. If enabled, the boot-time allocation of gigantic hugepages is skipped. + hugepage_prohibit_sz= + [HW] HugeTLB pages should not alloc when the rest of + the normal pages less than hugepage_prohibit_sz. This + setting is to make sure a system can start even when + part of physical memory is broken, admin users can + adjust this according to typical environment. + hugepages= [HW] Number of HugeTLB pages to allocate at boot. If this follows hugepagesz (below), it specifies the number of pages of hugepagesz to be allocated. @@ -1600,6 +1613,9 @@ off: Disable the feature Equivalent to: nohugevmalloc + hugetlb_hwpoison_full + [HW] Enable memory error handling of 1GB hugepage. + hung_task_panic= [KNL] Should the hung task detector generate panics. Format: 0 | 1 @@ -2807,6 +2823,8 @@ memmap=exactmap [KNL,X86,ARM64] Enable setting of an exact E820 and ARM64 memory map, as specified by the user. + For ARM64, this setting is limited to dt boot mode as + exact mapping must be done after initializing memblock. Such memmap=exactmap lines can be constructed based on BIOS output or other requirements. See the memmap=nn@ss option description. @@ -2830,7 +2848,8 @@ [KNL,ACPI] Mark specific memory as reserved. Region of memory to be reserved is from ss to ss+nn. For ARM64, reserved memory must be in the range of - existed memory. + existed memory and do not overlap in-use memory region, + otherwise request will be ignored. Example: Exclude memory from 0x18690000-0x1869ffff memmap=64K$0x18690000 or @@ -2969,6 +2988,7 @@ kvm.nx_huge_pages=off [X86] no_entry_flush [PPC] no_uaccess_flush [PPC] + mmio_stale_data=off [X86] Exceptions: This does not have any effect on @@ -2990,6 +3010,7 @@ Equivalent to: l1tf=flush,nosmt [X86] mds=full,nosmt [X86] tsx_async_abort=full,nosmt [X86] + mmio_stale_data=full,nosmt [X86] mminit_loglevel= [KNL] When CONFIG_DEBUG_MEMORY_INIT is set, this @@ -2999,6 +3020,40 @@ log everything. Information is printed at KERN_DEBUG so loglevel=8 may also need to be specified. + mmio_stale_data= + [X86,INTEL] Control mitigation for the Processor + MMIO Stale Data vulnerabilities. + + Processor MMIO Stale Data is a class of + vulnerabilities that may expose data after an MMIO + operation. Exposed data could originate or end in + the same CPU buffers as affected by MDS and TAA. + Therefore, similar to MDS and TAA, the mitigation + is to clear the affected CPU buffers. + + This parameter controls the mitigation. The + options are: + + full - Enable mitigation on vulnerable CPUs + + full,nosmt - Enable mitigation and disable SMT on + vulnerable CPUs. + + off - Unconditionally disable mitigation + + On MDS or TAA affected machines, + mmio_stale_data=off can be prevented by an active + MDS or TAA mitigation as these vulnerabilities are + mitigated with the same mechanism so in order to + disable this mitigation, you need to specify + mds=off and tsx_async_abort=off too. + + Not specifying this option is equivalent to + mmio_stale_data=full. + + For details see: + Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst + module.sig_enforce [KNL] When CONFIG_MODULE_SIG is set, this means that modules without (valid) signatures will fail to load. @@ -5110,8 +5165,12 @@ Specific mitigations can also be selected manually: retpoline - replace indirect branches - retpoline,generic - google's original retpoline - retpoline,amd - AMD-specific minimal thunk + retpoline,generic - Retpolines + retpoline,lfence - LFENCE; indirect branch + retpoline,amd - alias for retpoline,lfence + eibrs - enhanced IBRS + eibrs,retpoline - enhanced IBRS + Retpolines + eibrs,lfence - enhanced IBRS + LFENCE Not specifying this option is equivalent to spectre_v2=auto. diff --git a/Documentation/admin-guide/sysctl/kernel.rst b/Documentation/admin-guide/sysctl/kernel.rst index 7d5e8a67c775f16dcc63bacdd50dbad80248d111..773747c1b329b74c0dd17b1eb67248e7dfd34de6 100644 --- a/Documentation/admin-guide/sysctl/kernel.rst +++ b/Documentation/admin-guide/sysctl/kernel.rst @@ -787,6 +787,7 @@ bit 1 print system memory info bit 2 print timer info bit 3 print locks info if ``CONFIG_LOCKDEP`` is on bit 4 print ftrace buffer +bit 5 print all printk messages in buffer ===== ============================================ So for example to print tasks and memory info on panic, user can:: @@ -1526,3 +1527,20 @@ is 10 seconds. The softlockup threshold is (``2 * watchdog_thresh``). Setting this tunable to zero will disable lockup detection altogether. + +uce_kernel_recovery(ARM64 only) +=============================== + +This value can be used to control whether panic the kernel when UCE RAS +errors occur in a specific scenario. Each bit controls a scene, 1 means +avoid kernel panic when encountering UCE RAS error in this scenario, and +0 means kernel panic. + +Current usage of each bit: + +============ ============== +bit0 reserved +bit1 reserved +bit2 copy_from_user +bit3 ~ bit31 reserved +============ ============== diff --git a/Documentation/arm64/cpu-feature-registers.rst b/Documentation/arm64/cpu-feature-registers.rst index 9f9b8fd060892bad7fb370f6124135ff8c2b58f9..749ae970c31955a6ddd54807d4f7a0700cf85295 100644 --- a/Documentation/arm64/cpu-feature-registers.rst +++ b/Documentation/arm64/cpu-feature-registers.rst @@ -275,6 +275,23 @@ infrastructure: | SVEVer | [3-0] | y | +------------------------------+---------+---------+ + 8) ID_AA64MMFR1_EL1 - Memory model feature register 1 + + +------------------------------+---------+---------+ + | Name | bits | visible | + +------------------------------+---------+---------+ + | AFP | [47-44] | y | + +------------------------------+---------+---------+ + + 9) ID_AA64ISAR2_EL1 - Instruction set attribute register 2 + + +------------------------------+---------+---------+ + | Name | bits | visible | + +------------------------------+---------+---------+ + | RPRES | [7-4] | y | + +------------------------------+---------+---------+ + + Appendix I: Example ------------------- diff --git a/Documentation/arm64/elf_hwcaps.rst b/Documentation/arm64/elf_hwcaps.rst index 95e66bd7dd17efdf855ab4268eab7146c4349f38..e88d245d426da330240d12edae7941d905b8ffe8 100644 --- a/Documentation/arm64/elf_hwcaps.rst +++ b/Documentation/arm64/elf_hwcaps.rst @@ -249,6 +249,14 @@ HWCAP2_ECV Functionality implied by ID_AA64MMFR0_EL1.ECV == 0b0001. +HWCAP2_AFP + + Functionality implied by ID_AA64MFR1_EL1.AFP == 0b0001. + +HWCAP2_RPRES + + Functionality implied by ID_AA64ISAR2_EL1.RPRES == 0b0001. + 4. Unused AT_HWCAP bits ----------------------- diff --git a/Documentation/bpf/bpf_design_QA.rst b/Documentation/bpf/bpf_design_QA.rst index 2df7b067ab93f532c4d34b4fdf7142b58f965502..0e15f9b05c9d6e191a7696076f8351d50f4bbebe 100644 --- a/Documentation/bpf/bpf_design_QA.rst +++ b/Documentation/bpf/bpf_design_QA.rst @@ -208,6 +208,12 @@ data structures and compile with kernel internal headers. Both of these kernel internals are subject to change and can break with newer kernels such that the program needs to be adapted accordingly. +Q: Are tracepoints part of the stable ABI? +------------------------------------------ +A: NO. Tracepoints are tied to internal implementation details hence they are +subject to change and can break with newer kernels. BPF programs need to change +accordingly when this happens. + Q: How much stack space a BPF program uses? ------------------------------------------- A: Currently all program types are limited to 512 bytes of stack diff --git a/Documentation/dev-tools/kfence.rst b/Documentation/dev-tools/kfence.rst index 5d194615aed01111ef6ed9abd6713b73bd4a0348..2e26d2998722ef64bffd18920e6c57bb7fb5ee95 100644 --- a/Documentation/dev-tools/kfence.rst +++ b/Documentation/dev-tools/kfence.rst @@ -61,6 +61,17 @@ The total memory dedicated to the KFENCE memory pool can be computed as:: Using the default config, and assuming a page size of 4 KiB, results in dedicating 2 MiB to the KFENCE memory pool. +KFENCE allow re-enabling after system startup, but ifndef CONFIG_CONTIG_ALLOC +and KFENCE_NUM_OBJECTS exceeds MAX_ORDER, alloc KFENCE pool after system startup +is not supported. + +For arm64, re-enabling KFENCE is kind of conflict with map the ages in KFENCE +pool itself at page granularity. For the flexibility, scale sample_interval to +control whether arm64 supported to enable kfence after system startup. +Once this is set to -1 in boot parameter, kfence_pool will be allocated from +early memory no matter kfence is enabled or not. Otherwise, re-enabling is not +supported on arm64. + Note: On architectures that support huge pages, KFENCE will ensure that the pool is using pages of size ``PAGE_SIZE``. This will result in additional page tables being allocated. diff --git a/Documentation/devicetree/bindings/arm/omap/omap.txt b/Documentation/devicetree/bindings/arm/omap/omap.txt index e77635c5422c6a03995bc45e02049d6b44e4b190..fa8b31660cadd1e2cfd0c7cc8c19bb971be3808e 100644 --- a/Documentation/devicetree/bindings/arm/omap/omap.txt +++ b/Documentation/devicetree/bindings/arm/omap/omap.txt @@ -119,6 +119,9 @@ Boards (incomplete list of examples): - OMAP3 BeagleBoard : Low cost community board compatible = "ti,omap3-beagle", "ti,omap3430", "ti,omap3" +- OMAP3 BeagleBoard A to B4 : Early BeagleBoard revisions A to B4 with a timer quirk + compatible = "ti,omap3-beagle-ab4", "ti,omap3-beagle", "ti,omap3430", "ti,omap3" + - OMAP3 Tobi with Overo : Commercial expansion board with daughter board compatible = "gumstix,omap3-overo-tobi", "gumstix,omap3-overo", "ti,omap3430", "ti,omap3" diff --git a/Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.yaml b/Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.yaml index 0da42ab8fd3a52b2f9fef7b1ff47919a273c3003..8a67bb889f18a851cc0c3d6e471bc25c9bc13f39 100644 --- a/Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.yaml +++ b/Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.yaml @@ -10,6 +10,9 @@ title: Amlogic specific extensions to the Synopsys Designware HDMI Controller maintainers: - Neil Armstrong +allOf: + - $ref: /schemas/sound/name-prefix.yaml# + description: | The Amlogic Meson Synopsys Designware Integration is composed of - A Synopsys DesignWare HDMI Controller IP @@ -99,6 +102,8 @@ properties: "#sound-dai-cells": const: 0 + sound-name-prefix: true + required: - compatible - reg diff --git a/Documentation/devicetree/bindings/display/amlogic,meson-vpu.yaml b/Documentation/devicetree/bindings/display/amlogic,meson-vpu.yaml index a8d202c9d004c67f7009e17d8928cbb415e45355..b8cb1b4dae1ff02d836441380bd1be7ae98675e0 100644 --- a/Documentation/devicetree/bindings/display/amlogic,meson-vpu.yaml +++ b/Documentation/devicetree/bindings/display/amlogic,meson-vpu.yaml @@ -78,6 +78,10 @@ properties: interrupts: maxItems: 1 + amlogic,canvas: + description: should point to a canvas provider node + $ref: /schemas/types.yaml#/definitions/phandle + power-domains: maxItems: 1 description: phandle to the associated power domain @@ -106,6 +110,7 @@ required: - port@1 - "#address-cells" - "#size-cells" + - amlogic,canvas additionalProperties: false @@ -118,6 +123,7 @@ examples: interrupts = <3>; #address-cells = <1>; #size-cells = <0>; + amlogic,canvas = <&canvas>; /* CVBS VDAC output port */ port@0 { diff --git a/Documentation/devicetree/bindings/mtd/nand-controller.yaml b/Documentation/devicetree/bindings/mtd/nand-controller.yaml index b29050fd7470a3811530572ef5d4d25922ffee56..6fe2a3d8ee6b86dd3d83b63512834c70fee18fce 100644 --- a/Documentation/devicetree/bindings/mtd/nand-controller.yaml +++ b/Documentation/devicetree/bindings/mtd/nand-controller.yaml @@ -44,7 +44,7 @@ patternProperties: properties: reg: description: - Contains the native Ready/Busy IDs. + Contains the chip-select IDs. nand-ecc-mode: description: @@ -174,6 +174,6 @@ examples: nand-ecc-mode = "soft"; nand-ecc-algo = "bch"; - /* controller specific properties */ + /* NAND chip specific properties */ }; }; diff --git a/Documentation/devicetree/bindings/net/can/tcan4x5x.txt b/Documentation/devicetree/bindings/net/can/tcan4x5x.txt index 0968b40aef1e8147a63087f8f360107285fb900d..e3501bfa22e904fe52b7944a37ab541eaf531fd2 100644 --- a/Documentation/devicetree/bindings/net/can/tcan4x5x.txt +++ b/Documentation/devicetree/bindings/net/can/tcan4x5x.txt @@ -31,7 +31,7 @@ tcan4x5x: tcan4x5x@0 { #address-cells = <1>; #size-cells = <1>; spi-max-frequency = <10000000>; - bosch,mram-cfg = <0x0 0 0 32 0 0 1 1>; + bosch,mram-cfg = <0x0 0 0 16 0 0 1 1>; interrupt-parent = <&gpio1>; interrupts = <14 IRQ_TYPE_LEVEL_LOW>; device-state-gpios = <&gpio3 21 GPIO_ACTIVE_HIGH>; diff --git a/Documentation/devicetree/bindings/spi/spi-mxic.txt b/Documentation/devicetree/bindings/spi/spi-mxic.txt index 529f2dab2648a78b5a82d7b7ffd95bb07e8c51e1..7bcbb229b78bb3c6d03652d63ad5da6ace0304f9 100644 --- a/Documentation/devicetree/bindings/spi/spi-mxic.txt +++ b/Documentation/devicetree/bindings/spi/spi-mxic.txt @@ -8,11 +8,13 @@ Required properties: - reg: should contain 2 entries, one for the registers and one for the direct mapping area - reg-names: should contain "regs" and "dirmap" -- interrupts: interrupt line connected to the SPI controller - clock-names: should contain "ps_clk", "send_clk" and "send_dly_clk" - clocks: should contain 3 entries for the "ps_clk", "send_clk" and "send_dly_clk" clocks +Optional properties: +- interrupts: interrupt line connected to the SPI controller + Example: spi@43c30000 { diff --git a/Documentation/devicetree/bindings/thermal/thermal-zones.yaml b/Documentation/devicetree/bindings/thermal/thermal-zones.yaml index 164f71598c5956fa16b21284ba1e2c3d3523dca8..1b3954aa71c157069b9ec34a02c9d008a56d552b 100644 --- a/Documentation/devicetree/bindings/thermal/thermal-zones.yaml +++ b/Documentation/devicetree/bindings/thermal/thermal-zones.yaml @@ -199,12 +199,11 @@ patternProperties: contribution: $ref: /schemas/types.yaml#/definitions/uint32 - minimum: 0 - maximum: 100 description: - The percentage contribution of the cooling devices at the - specific trip temperature referenced in this map - to this thermal zone + The cooling contribution to the thermal zone of the referred + cooling device at the referred trip point. The contribution is + a ratio of the sum of all cooling contributions within a + thermal zone. required: - trip diff --git a/Documentation/devicetree/bindings/watchdog/samsung-wdt.yaml b/Documentation/devicetree/bindings/watchdog/samsung-wdt.yaml index 76cb9586ee00cab46858e1004089ab505c8c9a0c..93cd77a6e92c0132a51d353c3d5a75bdb032159a 100644 --- a/Documentation/devicetree/bindings/watchdog/samsung-wdt.yaml +++ b/Documentation/devicetree/bindings/watchdog/samsung-wdt.yaml @@ -39,8 +39,8 @@ properties: samsung,syscon-phandle: $ref: /schemas/types.yaml#/definitions/phandle description: - Phandle to the PMU system controller node (in case of Exynos5250 - and Exynos5420). + Phandle to the PMU system controller node (in case of Exynos5250, + Exynos5420 and Exynos7). required: - compatible @@ -58,6 +58,7 @@ allOf: enum: - samsung,exynos5250-wdt - samsung,exynos5420-wdt + - samsung,exynos7-wdt then: required: - samsung,syscon-phandle diff --git a/Documentation/driver-api/dmaengine/dmatest.rst b/Documentation/driver-api/dmaengine/dmatest.rst index ee268d445d38b64b173098630a1db287f7bcd1b0..d2e1d8b58e7dc13b7285ece750917952a180a559 100644 --- a/Documentation/driver-api/dmaengine/dmatest.rst +++ b/Documentation/driver-api/dmaengine/dmatest.rst @@ -143,13 +143,14 @@ Part 5 - Handling channel allocation Allocating Channels ------------------- -Channels are required to be configured prior to starting the test run. -Attempting to run the test without configuring the channels will fail. +Channels do not need to be configured prior to starting a test run. Attempting +to run the test without configuring the channels will result in testing any +channels that are available. Example:: % echo 1 > /sys/module/dmatest/parameters/run - dmatest: Could not start test, no channels configured + dmatest: No channels configured, continue with any Channels are registered using the "channel" parameter. Channels can be requested by their name, once requested, the channel is registered and a pending thread is added to the test list. diff --git a/Documentation/driver-api/firewire.rst b/Documentation/driver-api/firewire.rst index 94a2d7f01d99924e0d338b57e09387ecb230c41a..d3cfa73cbb2b47e624961f3e15aadb6528dfec37 100644 --- a/Documentation/driver-api/firewire.rst +++ b/Documentation/driver-api/firewire.rst @@ -19,7 +19,7 @@ of kernel interfaces is available via exported symbols in `firewire-core` module Firewire char device data structures ==================================== -.. include:: /ABI/stable/firewire-cdev +.. include:: ../ABI/stable/firewire-cdev :literal: .. kernel-doc:: include/uapi/linux/firewire-cdev.h @@ -28,7 +28,7 @@ Firewire char device data structures Firewire device probing and sysfs interfaces ============================================ -.. include:: /ABI/stable/sysfs-bus-firewire +.. include:: ../ABI/stable/sysfs-bus-firewire :literal: .. kernel-doc:: drivers/firewire/core-device.c diff --git a/Documentation/firmware-guide/acpi/apei/einj.rst b/Documentation/firmware-guide/acpi/apei/einj.rst index e588bccf5158370fb03dfe4db06b20ee93114108..2fa989c0a12d9e8138711179a5426b04da1e056e 100644 --- a/Documentation/firmware-guide/acpi/apei/einj.rst +++ b/Documentation/firmware-guide/acpi/apei/einj.rst @@ -181,5 +181,24 @@ You should see something like this in dmesg:: [22715.834759] EDAC sbridge MC3: PROCESSOR 0:306e7 TIME 1422553404 SOCKET 0 APIC 0 [22716.616173] EDAC MC3: 1 CE memory read error on CPU_SrcID#0_Channel#0_DIMM#0 (channel:0 slot:0 page:0x12345 offset:0x0 grain:32 syndrome:0x0 - area:DRAM err_code:0001:0090 socket:0 channel_mask:1 rank:0) +Special notes for injection into SGX enclaves: + +There may be a separate BIOS setup option to enable SGX injection. + +The injection process consists of setting some special memory controller +trigger that will inject the error on the next write to the target +address. But the h/w prevents any software outside of an SGX enclave +from accessing enclave pages (even BIOS SMM mode). + +The following sequence can be used: + 1) Determine physical address of enclave page + 2) Use "notrigger=1" mode to inject (this will setup + the injection address, but will not actually inject) + 3) Enter the enclave + 4) Store data to the virtual address matching physical address from step 1 + 5) Execute CLFLUSH for that virtual address + 6) Spin delay for 250ms + 7) Read from the virtual address. This will trigger the error + For more information about EINJ, please refer to ACPI specification version 4.0, section 17.5 and ACPI 5.0, section 18.6. diff --git a/Documentation/firmware-guide/acpi/dsd/data-node-references.rst b/Documentation/firmware-guide/acpi/dsd/data-node-references.rst index 9b17dc77d18c5c0c916007cf8cc2f160ba56969c..da0e46496fc4db82086fc05709f9bd04220cd268 100644 --- a/Documentation/firmware-guide/acpi/dsd/data-node-references.rst +++ b/Documentation/firmware-guide/acpi/dsd/data-node-references.rst @@ -5,7 +5,7 @@ Referencing hierarchical data nodes =================================== -:Copyright: |copy| 2018 Intel Corporation +:Copyright: |copy| 2018, 2021 Intel Corporation :Author: Sakari Ailus ACPI in general allows referring to device objects in the tree only. @@ -52,12 +52,14 @@ the ANOD object which is also the final target node of the reference. Name (NOD0, Package() { ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"), Package () { + Package () { "reg", 0 }, Package () { "random-property", 3 }, } }) Name (NOD1, Package() { ToUUID("dbb8e3e6-5886-4ba6-8795-1319f52a966b"), Package () { + Package () { "reg", 1 }, Package () { "anothernode", "ANOD" }, } }) @@ -74,7 +76,11 @@ the ANOD object which is also the final target node of the reference. Name (_DSD, Package () { ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"), Package () { - Package () { "reference", ^DEV0, "node@1", "anothernode" }, + Package () { + "reference", Package () { + ^DEV0, "node@1", "anothernode" + } + }, } }) } diff --git a/Documentation/gpu/todo.rst b/Documentation/gpu/todo.rst index 7272a4bd74dd05a89ba7668e69980c906596665b..28841609aa4f8d589d3952d7381d3ceaefa1f7fa 100644 --- a/Documentation/gpu/todo.rst +++ b/Documentation/gpu/todo.rst @@ -273,24 +273,6 @@ Contact: Daniel Vetter, Noralf Tronnes Level: Advanced -Garbage collect fbdev scrolling acceleration --------------------------------------------- - -Scroll acceleration is disabled in fbcon by hard-wiring p->scrollmode = -SCROLL_REDRAW. There's a ton of code this will allow us to remove: -- lots of code in fbcon.c -- a bunch of the hooks in fbcon_ops, maybe the remaining hooks could be called - directly instead of the function table (with a switch on p->rotate) -- fb_copyarea is unused after this, and can be deleted from all drivers - -Note that not all acceleration code can be deleted, since clearing and cursor -support is still accelerated, which might be good candidates for further -deletion projects. - -Contact: Daniel Vetter - -Level: Intermediate - idr_init_base() --------------- diff --git a/Documentation/process/stable-kernel-rules.rst b/Documentation/process/stable-kernel-rules.rst index 003c865e9c212342ecac53c42eddd6848077de4f..fbcb48bc2a9030caa28ebc04d8c24f305bb505bd 100644 --- a/Documentation/process/stable-kernel-rules.rst +++ b/Documentation/process/stable-kernel-rules.rst @@ -168,7 +168,16 @@ Trees - The finalized and tagged releases of all stable kernels can be found in separate branches per version at: - https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux-stable.git + https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git + + - The release candidate of all stable kernel versions can be found at: + + https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux-stable-rc.git/ + + .. warning:: + The -stable-rc tree is a snapshot in time of the stable-queue tree and + will change frequently, hence will be rebased often. It should only be + used for testing purposes (e.g. to be consumed by CI systems). Review committee diff --git a/Documentation/scheduler/sched-bwc.rst b/Documentation/scheduler/sched-bwc.rst index 9801d6b284b1ecfdd51b7e1ab5ab5f9bc6da3b40..5723d8c69e3542e1b44054613e2e8e7f9c9abc03 100644 --- a/Documentation/scheduler/sched-bwc.rst +++ b/Documentation/scheduler/sched-bwc.rst @@ -21,33 +21,84 @@ cfs_quota units at each period boundary. As threads consume this bandwidth it is transferred to cpu-local "silos" on a demand basis. The amount transferred within each of these updates is tunable and described as the "slice". +Burst feature +------------- +This feature borrows time now against our future underrun, at the cost of +increased interference against the other system users. All nicely bounded. + +Traditional (UP-EDF) bandwidth control is something like: + + (U = \Sum u_i) <= 1 + +This guaranteeds both that every deadline is met and that the system is +stable. After all, if U were > 1, then for every second of walltime, +we'd have to run more than a second of program time, and obviously miss +our deadline, but the next deadline will be further out still, there is +never time to catch up, unbounded fail. + +The burst feature observes that a workload doesn't always executes the full +quota; this enables one to describe u_i as a statistical distribution. + +For example, have u_i = {x,e}_i, where x is the p(95) and x+e p(100) +(the traditional WCET). This effectively allows u to be smaller, +increasing the efficiency (we can pack more tasks in the system), but at +the cost of missing deadlines when all the odds line up. However, it +does maintain stability, since every overrun must be paired with an +underrun as long as our x is above the average. + +That is, suppose we have 2 tasks, both specify a p(95) value, then we +have a p(95)*p(95) = 90.25% chance both tasks are within their quota and +everything is good. At the same time we have a p(5)p(5) = 0.25% chance +both tasks will exceed their quota at the same time (guaranteed deadline +fail). Somewhere in between there's a threshold where one exceeds and +the other doesn't underrun enough to compensate; this depends on the +specific CDFs. + +At the same time, we can say that the worst case deadline miss, will be +\Sum e_i; that is, there is a bounded tardiness (under the assumption +that x+e is indeed WCET). + +The interferenece when using burst is valued by the possibilities for +missing the deadline and the average WCET. Test results showed that when +there many cgroups or CPU is under utilized, the interference is +limited. More details are shown in: +https://lore.kernel.org/lkml/5371BD36-55AE-4F71-B9D7-B86DC32E3D2B@linux.alibaba.com/ + Management ---------- -Quota and period are managed within the cpu subsystem via cgroupfs. +Quota, period and burst are managed within the cpu subsystem via cgroupfs. -cpu.cfs_quota_us: the total available run-time within a period (in microseconds) +cpu.cfs_quota_us: run-time replenished within a period (in microseconds) cpu.cfs_period_us: the length of a period (in microseconds) cpu.stat: exports throttling statistics [explained further below] +cpu.cfs_burst_us: the maximum accumulated run-time (in microseconds) The default values are:: cpu.cfs_period_us=100ms - cpu.cfs_quota=-1 + cpu.cfs_quota_us=-1 + cpu.cfs_burst_us=0 A value of -1 for cpu.cfs_quota_us indicates that the group does not have any bandwidth restriction in place, such a group is described as an unconstrained bandwidth group. This represents the traditional work-conserving behavior for CFS. -Writing any (valid) positive value(s) will enact the specified bandwidth limit. -The minimum quota allowed for the quota or period is 1ms. There is also an -upper bound on the period length of 1s. Additional restrictions exist when -bandwidth limits are used in a hierarchical fashion, these are explained in -more detail below. +Writing any (valid) positive value(s) no smaller than cpu.cfs_burst_us will +enact the specified bandwidth limit. The minimum quota allowed for the quota or +period is 1ms. There is also an upper bound on the period length of 1s. +Additional restrictions exist when bandwidth limits are used in a hierarchical +fashion, these are explained in more detail below. Writing any negative value to cpu.cfs_quota_us will remove the bandwidth limit and return the group to an unconstrained state once more. +A value of 0 for cpu.cfs_burst_us indicates that the group can not accumulate +any unused bandwidth. It makes the traditional bandwidth control behavior for +CFS unchanged. Writing any (valid) positive value(s) no larger than +cpu.cfs_quota_us into cpu.cfs_burst_us will enact the cap on unused bandwidth +accumulation. + Any updates to a group's bandwidth specification will result in it becoming unthrottled if it is in a constrained state. @@ -67,7 +118,7 @@ for more fine-grained consumption. Statistics ---------- -A group's bandwidth statistics are exported via 3 fields in cpu.stat. +A group's bandwidth statistics are exported via 5 fields in cpu.stat. cpu.stat: @@ -75,6 +126,9 @@ cpu.stat: - nr_throttled: Number of times the group has been throttled/limited. - throttled_time: The total time duration (in nanoseconds) for which entities of the group have been throttled. +- nr_bursts: Number of periods burst occurs. +- burst_time: Cumulative wall-time (in nanoseconds) that any CPUs has used + above quota in respective periods This interface is read-only. @@ -172,3 +226,15 @@ Examples By using a small period here we are ensuring a consistent latency response at the expense of burst capacity. + +4. Limit a group to 40% of 1 CPU, and allow accumulate up to 20% of 1 CPU + additionally, in case accumulation has been done. + + With 50ms period, 20ms quota will be equivalent to 40% of 1 CPU. + And 10ms burst will be equivalent to 20% of 1 CPU. + + # echo 20000 > cpu.cfs_quota_us /* quota = 20ms */ + # echo 50000 > cpu.cfs_period_us /* period = 50ms */ + # echo 10000 > cpu.cfs_burst_us /* burst = 10ms */ + + Larger buffer setting (no larger than quota) allows greater burst capacity. diff --git a/Documentation/sound/hd-audio/models.rst b/Documentation/sound/hd-audio/models.rst index d25335993e55309d8c7799178231a251ec890e42..9b52f50a68542b932879f054bdd6b436ee7658a1 100644 --- a/Documentation/sound/hd-audio/models.rst +++ b/Documentation/sound/hd-audio/models.rst @@ -261,6 +261,10 @@ alc-sense-combo huawei-mbx-stereo Enable initialization verbs for Huawei MBX stereo speakers; might be risky, try this at your own risk +alc298-samsung-headphone + Samsung laptops with ALC298 +alc256-samsung-headphone + Samsung laptops with ALC256 ALC66x/67x/892 ============== diff --git a/Documentation/trace/events.rst b/Documentation/trace/events.rst index 2a5aa48eff6c78dd59ebbf23930f8ce135dd9e87..9df29a935757afe25590f36cc2746eeabf8646bb 100644 --- a/Documentation/trace/events.rst +++ b/Documentation/trace/events.rst @@ -198,6 +198,15 @@ The glob (~) accepts a wild card character (\*,?) and character classes prev_comm ~ "*sh*" prev_comm ~ "ba*sh" +If the field is a pointer that points into user space (for example +"filename" from sys_enter_openat), then you have to append ".ustring" to the +field name:: + + filename.ustring ~ "password" + +As the kernel will have to know how to retrieve the memory that the pointer +is at from user space. + 5.2 Setting filters ------------------- @@ -230,6 +239,16 @@ Currently the caret ('^') for an error always appears at the beginning of the filter string; the error message should still be useful though even without more accurate position info. +5.2.1 Filter limitations +------------------------ + +If a filter is placed on a string pointer ``(char *)`` that does not point +to a string on the ring buffer, but instead points to kernel or user space +memory, then, for safety reasons, at most 1024 bytes of the content is +copied onto a temporary buffer to do the compare. If the copy of the memory +faults (the pointer points to memory that should not be accessed), then the +string compare will be treated as not matching. + 5.3 Clearing filters -------------------- diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst index 56deaceab6254a07cd6fcc86080c85b3b8b73447..d542bd99a9a416c02d2af232026223dffbfa2294 100644 --- a/Documentation/virt/kvm/api.rst +++ b/Documentation/virt/kvm/api.rst @@ -6197,6 +6197,29 @@ KVM_EXIT_X86_RDMSR and KVM_EXIT_X86_WRMSR exit notifications which user space can then handle to implement model specific MSR handling and/or user notifications to inform a user that an MSR was not handled. +7.25 KVM_CAP_SGX_ATTRIBUTE +---------------------- + +:Architectures: x86 +:Target: VM +:Parameters: args[0] is a file handle of a SGX attribute file in securityfs +:Returns: 0 on success, -EINVAL if the file handle is invalid or if a requested + attribute is not supported by KVM. + +KVM_CAP_SGX_ATTRIBUTE enables a userspace VMM to grant a VM access to one or +more priveleged enclave attributes. args[0] must hold a file handle to a valid +SGX attribute file corresponding to an attribute that is supported/restricted +by KVM (currently only PROVISIONKEY). + +The SGX subsystem restricts access to a subset of enclave attributes to provide +additional security for an uncompromised kernel, e.g. use of the PROVISIONKEY +is restricted to deter malware from using the PROVISIONKEY to obtain a stable +system fingerprint. To prevent userspace from circumventing such restrictions +by running an enclave in a VM, KVM prevents access to privileged attributes by +default. + +See Documentation/x86/sgx.rst for more details. + 8. Other capabilities. ====================== diff --git a/Documentation/vm/memcg_memfs_info.rst b/Documentation/vm/memcg_memfs_info.rst new file mode 100644 index 0000000000000000000000000000000000000000..aff432d125e52f7e4baaa1af490a4c9fe57eddd0 --- /dev/null +++ b/Documentation/vm/memcg_memfs_info.rst @@ -0,0 +1,40 @@ +.. SPDX-License-Identifier: GPL-2.0+ + +================ +Memcg Memfs Info +================ + +Overview +======== + +Support to print rootfs files and tmpfs files that having pages charged +in given memory cgroup. The files infomations can be printed through +interface "memory.memfs_files_info" or printed when OOM is triggered. + +User control +============ + +1. /sys/kernel/mm/memcg_memfs_info/enable +----------------------------------------- + +Boolean type. The default value is 0, set it to 1 to enable the feature. + +2. /sys/kernel/mm/memcg_memfs_info/max_print_files_in_oom +--------------------------------------------------------- + +Unsigned long type. The default value is 500, indicating that the maximum of +files can be print to console when OOM is triggered. + +3. /sys/kernel/mm/memcg_memfs_info/size_threshold +------------------------------------------------- + +Unsigned long type. The default value is 0, indicating that the minimum size of +files that can be printed. + +4. /sys/fs/cgroup/memory//memory.memfs_files_info +--------------------------------------------------------- + +Outputs the files who use memory in this memory cgroup. + +--- +Liu Shixin, Jan 2022 diff --git a/Documentation/x86/index.rst b/Documentation/x86/index.rst index 647a570d4931b74202592c4e9a00ca1cb6d0acbe..6832df92f084be6965ba1b3946b6d5c5c619ad0e 100644 --- a/Documentation/x86/index.rst +++ b/Documentation/x86/index.rst @@ -21,6 +21,7 @@ x86-specific Documentation tlb mtrr pat + intel-hfi intel-iommu intel_txt amd-memory-encryption diff --git a/Documentation/x86/intel-hfi.rst b/Documentation/x86/intel-hfi.rst new file mode 100644 index 0000000000000000000000000000000000000000..49dea58ea4fb2fc7accc2c00e56a84f2512a67e1 --- /dev/null +++ b/Documentation/x86/intel-hfi.rst @@ -0,0 +1,72 @@ +.. SPDX-License-Identifier: GPL-2.0 + +============================================================ +Hardware-Feedback Interface for scheduling on Intel Hardware +============================================================ + +Overview +-------- + +Intel has described the Hardware Feedback Interface (HFI) in the Intel 64 and +IA-32 Architectures Software Developer's Manual (Intel SDM) Volume 3 Section +14.6 [1]_. + +The HFI gives the operating system a performance and energy efficiency +capability data for each CPU in the system. Linux can use the information from +the HFI to influence task placement decisions. + +The Hardware Feedback Interface +------------------------------- + +The Hardware Feedback Interface provides to the operating system information +about the performance and energy efficiency of each CPU in the system. Each +capability is given as a unit-less quantity in the range [0-255]. Higher values +indicate higher capability. Energy efficiency and performance are reported in +separate capabilities. Even though on some systems these two metrics may be +related, they are specified as independent capabilities in the Intel SDM. + +These capabilities may change at runtime as a result of changes in the +operating conditions of the system or the action of external factors. The rate +at which these capabilities are updated is specific to each processor model. On +some models, capabilities are set at boot time and never change. On others, +capabilities may change every tens of milliseconds. For instance, a remote +mechanism may be used to lower Thermal Design Power. Such change can be +reflected in the HFI. Likewise, if the system needs to be throttled due to +excessive heat, the HFI may reflect reduced performance on specific CPUs. + +The kernel or a userspace policy daemon can use these capabilities to modify +task placement decisions. For instance, if either the performance or energy +capabilities of a given logical processor becomes zero, it is an indication that +the hardware recommends to the operating system to not schedule any tasks on +that processor for performance or energy efficiency reasons, respectively. + +Implementation details for Linux +-------------------------------- + +The infrastructure to handle thermal event interrupts has two parts. In the +Local Vector Table of a CPU's local APIC, there exists a register for the +Thermal Monitor Register. This register controls how interrupts are delivered +to a CPU when the thermal monitor generates and interrupt. Further details +can be found in the Intel SDM Vol. 3 Section 10.5 [1]_. + +The thermal monitor may generate interrupts per CPU or per package. The HFI +generates package-level interrupts. This monitor is configured and initialized +via a set of machine-specific registers. Specifically, the HFI interrupt and +status are controlled via designated bits in the IA32_PACKAGE_THERM_INTERRUPT +and IA32_PACKAGE_THERM_STATUS registers, respectively. There exists one HFI +table per package. Further details can be found in the Intel SDM Vol. 3 +Section 14.9 [1]_. + +The hardware issues an HFI interrupt after updating the HFI table and is ready +for the operating system to consume it. CPUs receive such interrupt via the +thermal entry in the Local APIC's Local Vector Table. + +When servicing such interrupt, the HFI driver parses the updated table and +relays the update to userspace using the thermal notification framework. Given +that there may be many HFI updates every second, the updates relayed to +userspace are throttled at a rate of CONFIG_HZ jiffies. + +References +---------- + +.. [1] https://www.intel.com/sdm diff --git a/Documentation/x86/sgx.rst b/Documentation/x86/sgx.rst index eaee1368b4fd8b838afba5d69d626fad1ec9e39d..a608f667fb9532bdf100e87745fd9737dcbbf1dd 100644 --- a/Documentation/x86/sgx.rst +++ b/Documentation/x86/sgx.rst @@ -209,3 +209,79 @@ An application may be loaded into a container enclave which is specially configured with a library OS and run-time which permits the application to run. The enclave run-time and library OS work together to execute the application when a thread enters the enclave. + +Impact of Potential Kernel SGX Bugs +=================================== + +EPC leaks +--------- + +When EPC page leaks happen, a WARNING like this is shown in dmesg: + +"EREMOVE returned ... and an EPC page was leaked. SGX may become unusable..." + +This is effectively a kernel use-after-free of an EPC page, and due +to the way SGX works, the bug is detected at freeing. Rather than +adding the page back to the pool of available EPC pages, the kernel +intentionally leaks the page to avoid additional errors in the future. + +When this happens, the kernel will likely soon leak more EPC pages, and +SGX will likely become unusable because the memory available to SGX is +limited. However, while this may be fatal to SGX, the rest of the kernel +is unlikely to be impacted and should continue to work. + +As a result, when this happpens, user should stop running any new +SGX workloads, (or just any new workloads), and migrate all valuable +workloads. Although a machine reboot can recover all EPC memory, the bug +should be reported to Linux developers. + + +Virtual EPC +=========== + +The implementation has also a virtual EPC driver to support SGX enclaves +in guests. Unlike the SGX driver, an EPC page allocated by the virtual +EPC driver doesn't have a specific enclave associated with it. This is +because KVM doesn't track how a guest uses EPC pages. + +As a result, the SGX core page reclaimer doesn't support reclaiming EPC +pages allocated to KVM guests through the virtual EPC driver. If the +user wants to deploy SGX applications both on the host and in guests +on the same machine, the user should reserve enough EPC (by taking out +total virtual EPC size of all SGX VMs from the physical EPC size) for +host SGX applications so they can run with acceptable performance. + +Architectural behavior is to restore all EPC pages to an uninitialized +state also after a guest reboot. Because this state can be reached only +through the privileged ``ENCLS[EREMOVE]`` instruction, ``/dev/sgx_vepc`` +provides the ``SGX_IOC_VEPC_REMOVE_ALL`` ioctl to execute the instruction +on all pages in the virtual EPC. + +``EREMOVE`` can fail for three reasons. Userspace must pay attention +to expected failures and handle them as follows: + +1. Page removal will always fail when any thread is running in the + enclave to which the page belongs. In this case the ioctl will + return ``EBUSY`` independent of whether it has successfully removed + some pages; userspace can avoid these failures by preventing execution + of any vcpu which maps the virtual EPC. + +2. Page removal will cause a general protection fault if two calls to + ``EREMOVE`` happen concurrently for pages that refer to the same + "SECS" metadata pages. This can happen if there are concurrent + invocations to ``SGX_IOC_VEPC_REMOVE_ALL``, or if a ``/dev/sgx_vepc`` + file descriptor in the guest is closed at the same time as + ``SGX_IOC_VEPC_REMOVE_ALL``; it will also be reported as ``EBUSY``. + This can be avoided in userspace by serializing calls to the ioctl() + and to close(), but in general it should not be a problem. + +3. Finally, page removal will fail for SECS metadata pages which still + have child pages. Child pages can be removed by executing + ``SGX_IOC_VEPC_REMOVE_ALL`` on all ``/dev/sgx_vepc`` file descriptors + mapped into the guest. This means that the ioctl() must be called + twice: an initial set of calls to remove child pages and a subsequent + set of calls to remove SECS pages. The second set of calls is only + required for those mappings that returned a nonzero value from the + first call. It indicates a bug in the kernel or the userspace client + if any of the second round of ``SGX_IOC_VEPC_REMOVE_ALL`` calls has + a return code other than 0. diff --git a/MAINTAINERS b/MAINTAINERS index 23a23bd94c0033d95460f96faf799dc6ff9209d4..466b1c599848abe2282e9780fb34059c86f89414 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -9038,6 +9038,12 @@ F: drivers/mfd/intel_soc_pmic* F: include/linux/mfd/intel_msic.h F: include/linux/mfd/intel_soc_pmic* +INTEL PMT DRIVER +M: "David E. Box" +S: Maintained +F: drivers/mfd/intel_pmt.c +F: drivers/platform/x86/intel_pmt_* + INTEL PRO/WIRELESS 2100, 2200BG, 2915ABG NETWORK CONNECTION SUPPORT M: Stanislav Yakovlev L: linux-wireless@vger.kernel.org diff --git a/Makefile b/Makefile index fbd2136f7bf8fa4ab398dcd11642a7fb1609d57b..3ebf74787e93fccfbecd2bb0217720bf97fb15c7 100644 --- a/Makefile +++ b/Makefile @@ -1073,7 +1073,7 @@ export mod_sign_cmd HOST_LIBELF_LIBS = $(shell pkg-config libelf --libs 2>/dev/null || echo -lelf) has_libelf = $(call try-run,\ - echo "int main() {}" | $(HOSTCC) -xc -o /dev/null $(HOST_LIBELF_LIBS) -,1,0) + echo "int main() {}" | $(HOSTCC) $(KBUILD_HOSTLDFLAGS) -xc -o /dev/null $(HOST_LIBELF_LIBS) -,1,0) ifdef CONFIG_STACK_VALIDATION ifeq ($(has_libelf),1) diff --git a/arch/arc/kernel/process.c b/arch/arc/kernel/process.c index 37f724ad5e3992297b4513bc7498bd4e981cdac7..a85e9c625ab50b693337ad1ebbf1f5b36372b68f 100644 --- a/arch/arc/kernel/process.c +++ b/arch/arc/kernel/process.c @@ -43,7 +43,7 @@ SYSCALL_DEFINE0(arc_gettls) return task_thread_info(current)->thr_ptr; } -SYSCALL_DEFINE3(arc_usr_cmpxchg, int *, uaddr, int, expected, int, new) +SYSCALL_DEFINE3(arc_usr_cmpxchg, int __user *, uaddr, int, expected, int, new) { struct pt_regs *regs = current_pt_regs(); u32 uval; diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug index 8986a91a6f31b82c2ef81f7b7a9b4c425425a9a6..dd1cf70353986045b55accb19ee4cff56e8b3d91 100644 --- a/arch/arm/Kconfig.debug +++ b/arch/arm/Kconfig.debug @@ -400,12 +400,12 @@ choice Say Y here if you want kernel low-level debugging support on i.MX25. - config DEBUG_IMX21_IMX27_UART - bool "i.MX21 and i.MX27 Debug UART" - depends on SOC_IMX21 || SOC_IMX27 + config DEBUG_IMX27_UART + bool "i.MX27 Debug UART" + depends on SOC_IMX27 help Say Y here if you want kernel low-level debugging support - on i.MX21 or i.MX27. + on i.MX27. config DEBUG_IMX28_UART bool "i.MX28 Debug UART" @@ -1523,7 +1523,7 @@ config DEBUG_IMX_UART_PORT int "i.MX Debug UART Port Selection" depends on DEBUG_IMX1_UART || \ DEBUG_IMX25_UART || \ - DEBUG_IMX21_IMX27_UART || \ + DEBUG_IMX27_UART || \ DEBUG_IMX31_UART || \ DEBUG_IMX35_UART || \ DEBUG_IMX50_UART || \ @@ -1591,12 +1591,12 @@ config DEBUG_LL_INCLUDE default "debug/icedcc.S" if DEBUG_ICEDCC default "debug/imx.S" if DEBUG_IMX1_UART || \ DEBUG_IMX25_UART || \ - DEBUG_IMX21_IMX27_UART || \ + DEBUG_IMX27_UART || \ DEBUG_IMX31_UART || \ DEBUG_IMX35_UART || \ DEBUG_IMX50_UART || \ DEBUG_IMX51_UART || \ - DEBUG_IMX53_UART ||\ + DEBUG_IMX53_UART || \ DEBUG_IMX6Q_UART || \ DEBUG_IMX6SL_UART || \ DEBUG_IMX6SX_UART || \ diff --git a/arch/arm/boot/compressed/efi-header.S b/arch/arm/boot/compressed/efi-header.S index c0e7a745103e22b8618618e971ba3ab41b95e5af..230030c1308538f2cef2e9f7118d6e691ab8bd59 100644 --- a/arch/arm/boot/compressed/efi-header.S +++ b/arch/arm/boot/compressed/efi-header.S @@ -9,16 +9,22 @@ #include .macro __nop -#ifdef CONFIG_EFI_STUB - @ This is almost but not quite a NOP, since it does clobber the - @ condition flags. But it is the best we can do for EFI, since - @ PE/COFF expects the magic string "MZ" at offset 0, while the - @ ARM/Linux boot protocol expects an executable instruction - @ there. - .inst MZ_MAGIC | (0x1310 << 16) @ tstne r0, #0x4d000 -#else AR_CLASS( mov r0, r0 ) M_CLASS( nop.w ) + .endm + + .macro __initial_nops +#ifdef CONFIG_EFI_STUB + @ This is a two-instruction NOP, which happens to bear the + @ PE/COFF signature "MZ" in the first two bytes, so the kernel + @ is accepted as an EFI binary. Booting via the UEFI stub + @ will not execute those instructions, but the ARM/Linux + @ boot protocol does, so we need some NOPs here. + .inst MZ_MAGIC | (0xe225 << 16) @ eor r5, r5, 0x4d000 + eor r5, r5, 0x4d000 @ undo previous insn +#else + __nop + __nop #endif .endm diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S index 1ba21b868ea34396459fd4fb0461d45f7127803f..25cee93b91ddc0e9874d9f096a0d56a4b4aa85a4 100644 --- a/arch/arm/boot/compressed/head.S +++ b/arch/arm/boot/compressed/head.S @@ -209,7 +209,8 @@ start: * were patching the initial instructions of the kernel, i.e * had started to exploit this "patch area". */ - .rept 7 + __initial_nops + .rept 5 __nop .endr #ifndef CONFIG_THUMB2_KERNEL diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile index ce66ffd5a1bbc4e7ab6aa4b3e4b20cd1bef3c8ef..7e8151681597c18ad90e3b10c64f74b3f623aa5b 100644 --- a/arch/arm/boot/dts/Makefile +++ b/arch/arm/boot/dts/Makefile @@ -731,6 +731,7 @@ dtb-$(CONFIG_ARCH_OMAP3) += \ logicpd-som-lv-37xx-devkit.dtb \ omap3430-sdp.dtb \ omap3-beagle.dtb \ + omap3-beagle-ab4.dtb \ omap3-beagle-xm.dtb \ omap3-beagle-xm-ab.dtb \ omap3-cm-t3517.dtb \ diff --git a/arch/arm/boot/dts/armada-38x.dtsi b/arch/arm/boot/dts/armada-38x.dtsi index 9b1a24cc5e91f2f52c32eeb248831f60d13040e0..df3c8d1d8f641230f9b17b8dcde7f58336918eb1 100644 --- a/arch/arm/boot/dts/armada-38x.dtsi +++ b/arch/arm/boot/dts/armada-38x.dtsi @@ -168,7 +168,7 @@ }; uart0: serial@12000 { - compatible = "marvell,armada-38x-uart"; + compatible = "marvell,armada-38x-uart", "ns16550a"; reg = <0x12000 0x100>; reg-shift = <2>; interrupts = ; @@ -178,7 +178,7 @@ }; uart1: serial@12100 { - compatible = "marvell,armada-38x-uart"; + compatible = "marvell,armada-38x-uart", "ns16550a"; reg = <0x12100 0x100>; reg-shift = <2>; interrupts = ; diff --git a/arch/arm/boot/dts/aspeed-g6-pinctrl.dtsi b/arch/arm/boot/dts/aspeed-g6-pinctrl.dtsi index 910eacc8ad3bd25fb1eb0f84b6c9916bc66d3838..a362714ae9fc0307b083dc837c416e59c520ecc9 100644 --- a/arch/arm/boot/dts/aspeed-g6-pinctrl.dtsi +++ b/arch/arm/boot/dts/aspeed-g6-pinctrl.dtsi @@ -118,7 +118,7 @@ }; pinctrl_fwqspid_default: fwqspid_default { - function = "FWQSPID"; + function = "FWSPID"; groups = "FWQSPID"; }; diff --git a/arch/arm/boot/dts/bcm2711.dtsi b/arch/arm/boot/dts/bcm2711.dtsi index 55ec83bde5a61f4ccb085923b7684df37eba8abc..b50229c3102fabe58d02ab4f027de282883726cd 100644 --- a/arch/arm/boot/dts/bcm2711.dtsi +++ b/arch/arm/boot/dts/bcm2711.dtsi @@ -290,6 +290,7 @@ hvs: hvs@7e400000 { compatible = "brcm,bcm2711-hvs"; + reg = <0x7e400000 0x8000>; interrupts = ; }; @@ -432,12 +433,26 @@ #size-cells = <0>; enable-method = "brcm,bcm2836-smp"; // for ARM 32-bit + /* Source for d/i-cache-line-size and d/i-cache-sets + * https://developer.arm.com/documentation/100095/0003 + * /Level-1-Memory-System/About-the-L1-memory-system?lang=en + * Source for d/i-cache-size + * https://www.raspberrypi.com/documentation/computers + * /processors.html#bcm2711 + */ cpu0: cpu@0 { device_type = "cpu"; compatible = "arm,cortex-a72"; reg = <0>; enable-method = "spin-table"; cpu-release-addr = <0x0 0x000000d8>; + d-cache-size = <0x8000>; + d-cache-line-size = <64>; + d-cache-sets = <256>; // 32KiB(size)/64(line-size)=512ways/2-way set + i-cache-size = <0xc000>; + i-cache-line-size = <64>; + i-cache-sets = <256>; // 48KiB(size)/64(line-size)=768ways/3-way set + next-level-cache = <&l2>; }; cpu1: cpu@1 { @@ -446,6 +461,13 @@ reg = <1>; enable-method = "spin-table"; cpu-release-addr = <0x0 0x000000e0>; + d-cache-size = <0x8000>; + d-cache-line-size = <64>; + d-cache-sets = <256>; // 32KiB(size)/64(line-size)=512ways/2-way set + i-cache-size = <0xc000>; + i-cache-line-size = <64>; + i-cache-sets = <256>; // 48KiB(size)/64(line-size)=768ways/3-way set + next-level-cache = <&l2>; }; cpu2: cpu@2 { @@ -454,6 +476,13 @@ reg = <2>; enable-method = "spin-table"; cpu-release-addr = <0x0 0x000000e8>; + d-cache-size = <0x8000>; + d-cache-line-size = <64>; + d-cache-sets = <256>; // 32KiB(size)/64(line-size)=512ways/2-way set + i-cache-size = <0xc000>; + i-cache-line-size = <64>; + i-cache-sets = <256>; // 48KiB(size)/64(line-size)=768ways/3-way set + next-level-cache = <&l2>; }; cpu3: cpu@3 { @@ -462,6 +491,28 @@ reg = <3>; enable-method = "spin-table"; cpu-release-addr = <0x0 0x000000f0>; + d-cache-size = <0x8000>; + d-cache-line-size = <64>; + d-cache-sets = <256>; // 32KiB(size)/64(line-size)=512ways/2-way set + i-cache-size = <0xc000>; + i-cache-line-size = <64>; + i-cache-sets = <256>; // 48KiB(size)/64(line-size)=768ways/3-way set + next-level-cache = <&l2>; + }; + + /* Source for d/i-cache-line-size and d/i-cache-sets + * https://developer.arm.com/documentation/100095/0003 + * /Level-2-Memory-System/About-the-L2-memory-system?lang=en + * Source for d/i-cache-size + * https://www.raspberrypi.com/documentation/computers + * /processors.html#bcm2711 + */ + l2: l2-cache0 { + compatible = "cache"; + cache-size = <0x100000>; + cache-line-size = <64>; + cache-sets = <1024>; // 1MiB(size)/64(line-size)=16384ways/16-way set + cache-level = <2>; }; }; diff --git a/arch/arm/boot/dts/bcm2837.dtsi b/arch/arm/boot/dts/bcm2837.dtsi index 0199ec98cd61690ad964c1bbe782fcf64583b236..5dbdebc4625946a266d2c3f1719256086d5a2a59 100644 --- a/arch/arm/boot/dts/bcm2837.dtsi +++ b/arch/arm/boot/dts/bcm2837.dtsi @@ -40,12 +40,26 @@ #size-cells = <0>; enable-method = "brcm,bcm2836-smp"; // for ARM 32-bit + /* Source for d/i-cache-line-size and d/i-cache-sets + * https://developer.arm.com/documentation/ddi0500/e/level-1-memory-system + * /about-the-l1-memory-system?lang=en + * + * Source for d/i-cache-size + * https://magpi.raspberrypi.com/articles/raspberry-pi-3-specs-benchmarks + */ cpu0: cpu@0 { device_type = "cpu"; compatible = "arm,cortex-a53"; reg = <0>; enable-method = "spin-table"; cpu-release-addr = <0x0 0x000000d8>; + d-cache-size = <0x8000>; + d-cache-line-size = <64>; + d-cache-sets = <128>; // 32KiB(size)/64(line-size)=512ways/4-way set + i-cache-size = <0x8000>; + i-cache-line-size = <64>; + i-cache-sets = <256>; // 32KiB(size)/64(line-size)=512ways/2-way set + next-level-cache = <&l2>; }; cpu1: cpu@1 { @@ -54,6 +68,13 @@ reg = <1>; enable-method = "spin-table"; cpu-release-addr = <0x0 0x000000e0>; + d-cache-size = <0x8000>; + d-cache-line-size = <64>; + d-cache-sets = <128>; // 32KiB(size)/64(line-size)=512ways/4-way set + i-cache-size = <0x8000>; + i-cache-line-size = <64>; + i-cache-sets = <256>; // 32KiB(size)/64(line-size)=512ways/2-way set + next-level-cache = <&l2>; }; cpu2: cpu@2 { @@ -62,6 +83,13 @@ reg = <2>; enable-method = "spin-table"; cpu-release-addr = <0x0 0x000000e8>; + d-cache-size = <0x8000>; + d-cache-line-size = <64>; + d-cache-sets = <128>; // 32KiB(size)/64(line-size)=512ways/4-way set + i-cache-size = <0x8000>; + i-cache-line-size = <64>; + i-cache-sets = <256>; // 32KiB(size)/64(line-size)=512ways/2-way set + next-level-cache = <&l2>; }; cpu3: cpu@3 { @@ -70,6 +98,27 @@ reg = <3>; enable-method = "spin-table"; cpu-release-addr = <0x0 0x000000f0>; + d-cache-size = <0x8000>; + d-cache-line-size = <64>; + d-cache-sets = <128>; // 32KiB(size)/64(line-size)=512ways/4-way set + i-cache-size = <0x8000>; + i-cache-line-size = <64>; + i-cache-sets = <256>; // 32KiB(size)/64(line-size)=512ways/2-way set + next-level-cache = <&l2>; + }; + + /* Source for cache-line-size + cache-sets + * https://developer.arm.com/documentation/ddi0500 + * /e/level-2-memory-system/about-the-l2-memory-system?lang=en + * Source for cache-size + * https://datasheets.raspberrypi.com/cm/cm1-and-cm3-datasheet.pdf + */ + l2: l2-cache0 { + compatible = "cache"; + cache-size = <0x80000>; + cache-line-size = <64>; + cache-sets = <512>; // 512KiB(size)/64(line-size)=8192ways/16-way set + cache-level = <2>; }; }; }; diff --git a/arch/arm/boot/dts/dra7-l4.dtsi b/arch/arm/boot/dts/dra7-l4.dtsi index 30b72f4318501181c543dd028971d5eccf40c567..f8c0eee7a62b99728989a68d867e3c0d0282528a 100644 --- a/arch/arm/boot/dts/dra7-l4.dtsi +++ b/arch/arm/boot/dts/dra7-l4.dtsi @@ -3448,8 +3448,7 @@ ti,timer-pwm; }; }; - - target-module@2c000 { /* 0x4882c000, ap 17 02.0 */ + timer15_target: target-module@2c000 { /* 0x4882c000, ap 17 02.0 */ compatible = "ti,sysc-omap4-timer", "ti,sysc"; reg = <0x2c000 0x4>, <0x2c010 0x4>; @@ -3477,7 +3476,7 @@ }; }; - target-module@2e000 { /* 0x4882e000, ap 19 14.0 */ + timer16_target: target-module@2e000 { /* 0x4882e000, ap 19 14.0 */ compatible = "ti,sysc-omap4-timer", "ti,sysc"; reg = <0x2e000 0x4>, <0x2e010 0x4>; diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi index 7ecf8f86ac747ce2c4af9cd1d17d82937509a19a..9989321366560fd2f03d6837c8d63192a1dbf4c3 100644 --- a/arch/arm/boot/dts/dra7.dtsi +++ b/arch/arm/boot/dts/dra7.dtsi @@ -1093,20 +1093,20 @@ }; /* Local timers, see ARM architected timer wrap erratum i940 */ -&timer3_target { +&timer15_target { ti,no-reset-on-init; ti,no-idle; timer@0 { - assigned-clocks = <&l4per_clkctrl DRA7_L4PER_TIMER3_CLKCTRL 24>; + assigned-clocks = <&l4per3_clkctrl DRA7_L4PER3_TIMER15_CLKCTRL 24>; assigned-clock-parents = <&timer_sys_clk_div>; }; }; -&timer4_target { +&timer16_target { ti,no-reset-on-init; ti,no-idle; timer@0 { - assigned-clocks = <&l4per_clkctrl DRA7_L4PER_TIMER4_CLKCTRL 24>; + assigned-clocks = <&l4per3_clkctrl DRA7_L4PER3_TIMER16_CLKCTRL 24>; assigned-clock-parents = <&timer_sys_clk_div>; }; }; diff --git a/arch/arm/boot/dts/exynos4210-i9100.dts b/arch/arm/boot/dts/exynos4210-i9100.dts index 7777bf51a6e642c64775b175bfcc5426dd045235..ecc9d4dc707e45c5cc58e18b18b597f89f402d0e 100644 --- a/arch/arm/boot/dts/exynos4210-i9100.dts +++ b/arch/arm/boot/dts/exynos4210-i9100.dts @@ -765,7 +765,7 @@ compatible = "brcm,bcm4330-bt"; shutdown-gpios = <&gpl0 4 GPIO_ACTIVE_HIGH>; - reset-gpios = <&gpl1 0 GPIO_ACTIVE_HIGH>; + reset-gpios = <&gpl1 0 GPIO_ACTIVE_LOW>; device-wakeup-gpios = <&gpx3 1 GPIO_ACTIVE_HIGH>; host-wakeup-gpios = <&gpx2 6 GPIO_ACTIVE_HIGH>; }; diff --git a/arch/arm/boot/dts/exynos5250-pinctrl.dtsi b/arch/arm/boot/dts/exynos5250-pinctrl.dtsi index d31a68672bfacb3a2f6575c26790db05b5498d6c..d7d756614edd1f27e125bd7b791a8ae196eba99b 100644 --- a/arch/arm/boot/dts/exynos5250-pinctrl.dtsi +++ b/arch/arm/boot/dts/exynos5250-pinctrl.dtsi @@ -260,7 +260,7 @@ }; uart3_data: uart3-data { - samsung,pins = "gpa1-4", "gpa1-4"; + samsung,pins = "gpa1-4", "gpa1-5"; samsung,pin-function = ; samsung,pin-pud = ; samsung,pin-drv = ; diff --git a/arch/arm/boot/dts/exynos5250-smdk5250.dts b/arch/arm/boot/dts/exynos5250-smdk5250.dts index d0e48c10aec2bfc0624baf8417b3a2d7b242a551..572198b6834e6b2aecee01a121fce38a1d97761a 100644 --- a/arch/arm/boot/dts/exynos5250-smdk5250.dts +++ b/arch/arm/boot/dts/exynos5250-smdk5250.dts @@ -118,6 +118,9 @@ status = "okay"; ddc = <&i2c_2>; hpd-gpios = <&gpx3 7 GPIO_ACTIVE_HIGH>; + vdd-supply = <&ldo8_reg>; + vdd_osc-supply = <&ldo10_reg>; + vdd_pll-supply = <&ldo8_reg>; }; &i2c_0 { diff --git a/arch/arm/boot/dts/exynos5420-smdk5420.dts b/arch/arm/boot/dts/exynos5420-smdk5420.dts index 4e49d8095b2927d8d31849916d1bee1637824429..741294bd564e7f455baacbc31fba2bc324ef12e5 100644 --- a/arch/arm/boot/dts/exynos5420-smdk5420.dts +++ b/arch/arm/boot/dts/exynos5420-smdk5420.dts @@ -124,6 +124,9 @@ hpd-gpios = <&gpx3 7 GPIO_ACTIVE_HIGH>; pinctrl-names = "default"; pinctrl-0 = <&hdmi_hpd_irq>; + vdd-supply = <&ldo6_reg>; + vdd_osc-supply = <&ldo7_reg>; + vdd_pll-supply = <&ldo6_reg>; }; &hsi2c_4 { diff --git a/arch/arm/boot/dts/gemini-nas4220b.dts b/arch/arm/boot/dts/gemini-nas4220b.dts index 13112a8a5dd88dbd7274e860ad3c17b4d295761b..6544c730340fa4cefd916c22d37342c3080084a9 100644 --- a/arch/arm/boot/dts/gemini-nas4220b.dts +++ b/arch/arm/boot/dts/gemini-nas4220b.dts @@ -84,7 +84,7 @@ partitions { compatible = "redboot-fis"; /* Eraseblock at 0xfe0000 */ - fis-index-block = <0x1fc>; + fis-index-block = <0x7f>; }; }; diff --git a/arch/arm/boot/dts/imx23-evk.dts b/arch/arm/boot/dts/imx23-evk.dts index 8cbaf1c8117456a11125de5f1f9986e1c9078e7d..3b609d987d88300f68f1402057dfc9b47b8bb146 100644 --- a/arch/arm/boot/dts/imx23-evk.dts +++ b/arch/arm/boot/dts/imx23-evk.dts @@ -79,7 +79,6 @@ MX23_PAD_LCD_RESET__GPIO_1_18 MX23_PAD_PWM3__GPIO_1_29 MX23_PAD_PWM4__GPIO_1_30 - MX23_PAD_SSP1_DETECT__SSP1_DETECT >; fsl,drive-strength = ; fsl,voltage = ; diff --git a/arch/arm/boot/dts/imx53-m53menlo.dts b/arch/arm/boot/dts/imx53-m53menlo.dts index 4f88e96d81ddbdd42078409d0a94e43f3e194566..d5c68d1ea707c6610ff1ef2b1c3bb9883ad5e803 100644 --- a/arch/arm/boot/dts/imx53-m53menlo.dts +++ b/arch/arm/boot/dts/imx53-m53menlo.dts @@ -53,6 +53,31 @@ }; }; + lvds-decoder { + compatible = "ti,ds90cf364a", "lvds-decoder"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + + lvds_decoder_in: endpoint { + remote-endpoint = <&lvds0_out>; + }; + }; + + port@1 { + reg = <1>; + + lvds_decoder_out: endpoint { + remote-endpoint = <&panel_in>; + }; + }; + }; + }; + panel { compatible = "edt,etm0700g0dh6"; pinctrl-0 = <&pinctrl_display_gpio>; @@ -61,7 +86,7 @@ port { panel_in: endpoint { - remote-endpoint = <&lvds0_out>; + remote-endpoint = <&lvds_decoder_out>; }; }; }; @@ -450,7 +475,7 @@ reg = <2>; lvds0_out: endpoint { - remote-endpoint = <&panel_in>; + remote-endpoint = <&lvds_decoder_in>; }; }; }; diff --git a/arch/arm/boot/dts/imx6qdl-udoo.dtsi b/arch/arm/boot/dts/imx6qdl-udoo.dtsi index d07d8f83456d241a288be60ce729579b9dded12f..ccfa8e320be62e26eea26a970eca940e440e2752 100644 --- a/arch/arm/boot/dts/imx6qdl-udoo.dtsi +++ b/arch/arm/boot/dts/imx6qdl-udoo.dtsi @@ -5,6 +5,8 @@ * Author: Fabio Estevam */ +#include + / { aliases { backlight = &backlight; @@ -226,6 +228,7 @@ MX6QDL_PAD_SD3_DAT1__SD3_DATA1 0x17059 MX6QDL_PAD_SD3_DAT2__SD3_DATA2 0x17059 MX6QDL_PAD_SD3_DAT3__SD3_DATA3 0x17059 + MX6QDL_PAD_SD3_DAT5__GPIO7_IO00 0x1b0b0 >; }; @@ -304,7 +307,7 @@ &usdhc3 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usdhc3>; - non-removable; + cd-gpios = <&gpio7 0 GPIO_ACTIVE_LOW>; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx7-colibri.dtsi b/arch/arm/boot/dts/imx7-colibri.dtsi index 62b771c1d5a9a71058dfb478a6f6f14b4641dbaa..f1c60b0cb143edad66f9376f34b0a856bfc86187 100644 --- a/arch/arm/boot/dts/imx7-colibri.dtsi +++ b/arch/arm/boot/dts/imx7-colibri.dtsi @@ -40,7 +40,7 @@ dailink_master: simple-audio-card,codec { sound-dai = <&codec>; - clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_CLK>; + clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_DIV>; }; }; }; @@ -293,7 +293,7 @@ compatible = "fsl,sgtl5000"; #sound-dai-cells = <0>; reg = <0x0a>; - clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_CLK>; + clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_DIV>; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_sai1_mclk>; VDDA-supply = <®_module_3v3_avdd>; diff --git a/arch/arm/boot/dts/imx7-mba7.dtsi b/arch/arm/boot/dts/imx7-mba7.dtsi index 50abf18ad30b20dc530c3b558d19956e98443c2f..887497e3bb4b8ff0e608835bc01bfe86ec07a78f 100644 --- a/arch/arm/boot/dts/imx7-mba7.dtsi +++ b/arch/arm/boot/dts/imx7-mba7.dtsi @@ -250,7 +250,7 @@ tlv320aic32x4: audio-codec@18 { compatible = "ti,tlv320aic32x4"; reg = <0x18>; - clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_CLK>; + clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_DIV>; clock-names = "mclk"; ldoin-supply = <®_audio_3v3>; iov-supply = <®_audio_3v3>; diff --git a/arch/arm/boot/dts/imx7d-nitrogen7.dts b/arch/arm/boot/dts/imx7d-nitrogen7.dts index e0751e6ba3c0f7e0d4709a5c1106eaeb7ab4f33b..a31de900139d6d10f0c849756bbf95e83ec5157d 100644 --- a/arch/arm/boot/dts/imx7d-nitrogen7.dts +++ b/arch/arm/boot/dts/imx7d-nitrogen7.dts @@ -288,7 +288,7 @@ codec: wm8960@1a { compatible = "wlf,wm8960"; reg = <0x1a>; - clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_CLK>; + clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_DIV>; clock-names = "mclk"; wlf,shared-lrclk; }; diff --git a/arch/arm/boot/dts/imx7d-pico-hobbit.dts b/arch/arm/boot/dts/imx7d-pico-hobbit.dts index 7b2198a9372c621e0276bd509c3b373cf02c4072..d917dc4f2f22759bc546c18248bbff7fcc3d726f 100644 --- a/arch/arm/boot/dts/imx7d-pico-hobbit.dts +++ b/arch/arm/boot/dts/imx7d-pico-hobbit.dts @@ -31,7 +31,7 @@ dailink_master: simple-audio-card,codec { sound-dai = <&sgtl5000>; - clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_CLK>; + clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_DIV>; }; }; }; @@ -41,7 +41,7 @@ #sound-dai-cells = <0>; reg = <0x0a>; compatible = "fsl,sgtl5000"; - clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_CLK>; + clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_DIV>; VDDA-supply = <®_2p5v>; VDDIO-supply = <®_vref_1v8>; }; diff --git a/arch/arm/boot/dts/imx7d-pico-pi.dts b/arch/arm/boot/dts/imx7d-pico-pi.dts index 70bea95c06d83f84737d13478d1d7b37dca2272d..f263e391e24cbb6c44d569cf284dde74020f58ac 100644 --- a/arch/arm/boot/dts/imx7d-pico-pi.dts +++ b/arch/arm/boot/dts/imx7d-pico-pi.dts @@ -31,7 +31,7 @@ dailink_master: simple-audio-card,codec { sound-dai = <&sgtl5000>; - clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_CLK>; + clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_DIV>; }; }; }; @@ -41,7 +41,7 @@ #sound-dai-cells = <0>; reg = <0x0a>; compatible = "fsl,sgtl5000"; - clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_CLK>; + clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_DIV>; VDDA-supply = <®_2p5v>; VDDIO-supply = <®_vref_1v8>; }; diff --git a/arch/arm/boot/dts/imx7d-sdb.dts b/arch/arm/boot/dts/imx7d-sdb.dts index ac0751bc1177e1544c3c00b9a9073a7e89252d71..6823b9f1a2a32a960b1940d878f716af6bf857e8 100644 --- a/arch/arm/boot/dts/imx7d-sdb.dts +++ b/arch/arm/boot/dts/imx7d-sdb.dts @@ -378,14 +378,14 @@ codec: wm8960@1a { compatible = "wlf,wm8960"; reg = <0x1a>; - clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_CLK>; + clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_DIV>; clock-names = "mclk"; wlf,shared-lrclk; wlf,hp-cfg = <2 2 3>; wlf,gpio-cfg = <1 3>; assigned-clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_SRC>, <&clks IMX7D_PLL_AUDIO_POST_DIV>, - <&clks IMX7D_AUDIO_MCLK_ROOT_CLK>; + <&clks IMX7D_AUDIO_MCLK_ROOT_DIV>; assigned-clock-parents = <&clks IMX7D_PLL_AUDIO_POST_DIV>; assigned-clock-rates = <0>, <884736000>, <12288000>; }; diff --git a/arch/arm/boot/dts/imx7s-warp.dts b/arch/arm/boot/dts/imx7s-warp.dts index d6b4888fa686bcc8b33699c4078429da53fb762f..e035dd5bf4f62ec6f6a687fb97e25c3650fe8e0d 100644 --- a/arch/arm/boot/dts/imx7s-warp.dts +++ b/arch/arm/boot/dts/imx7s-warp.dts @@ -75,7 +75,7 @@ dailink_master: simple-audio-card,codec { sound-dai = <&codec>; - clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_CLK>; + clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_DIV>; }; }; }; @@ -232,7 +232,7 @@ #sound-dai-cells = <0>; reg = <0x0a>; compatible = "fsl,sgtl5000"; - clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_CLK>; + clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_DIV>; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_sai1_mclk>; VDDA-supply = <&vgen4_reg>; diff --git a/arch/arm/boot/dts/imx7ulp.dtsi b/arch/arm/boot/dts/imx7ulp.dtsi index b7ea37ad4e55c45f5615766f3fa30bf7d9eab4a4..bcec98b96411437c831d9e91ddaf2f63d657ea9e 100644 --- a/arch/arm/boot/dts/imx7ulp.dtsi +++ b/arch/arm/boot/dts/imx7ulp.dtsi @@ -259,7 +259,7 @@ interrupts = ; clocks = <&pcc2 IMX7ULP_CLK_WDG1>; assigned-clocks = <&pcc2 IMX7ULP_CLK_WDG1>; - assigned-clocks-parents = <&scg1 IMX7ULP_CLK_FIRC_BUS_CLK>; + assigned-clock-parents = <&scg1 IMX7ULP_CLK_FIRC_BUS_CLK>; timeout-sec = <40>; }; diff --git a/arch/arm/boot/dts/meson.dtsi b/arch/arm/boot/dts/meson.dtsi index 7649dd1e0b9ee5cd6aa67d0a8ef48a3f80e08a84..c928ae312e19c0c329efb8d68a4ca1f4a5561d21 100644 --- a/arch/arm/boot/dts/meson.dtsi +++ b/arch/arm/boot/dts/meson.dtsi @@ -42,14 +42,14 @@ }; uart_A: serial@84c0 { - compatible = "amlogic,meson6-uart", "amlogic,meson-uart"; + compatible = "amlogic,meson6-uart"; reg = <0x84c0 0x18>; interrupts = ; status = "disabled"; }; uart_B: serial@84dc { - compatible = "amlogic,meson6-uart", "amlogic,meson-uart"; + compatible = "amlogic,meson6-uart"; reg = <0x84dc 0x18>; interrupts = ; status = "disabled"; @@ -87,7 +87,7 @@ }; uart_C: serial@8700 { - compatible = "amlogic,meson6-uart", "amlogic,meson-uart"; + compatible = "amlogic,meson6-uart"; reg = <0x8700 0x18>; interrupts = ; status = "disabled"; @@ -203,7 +203,7 @@ }; uart_AO: serial@4c0 { - compatible = "amlogic,meson6-uart", "amlogic,meson-ao-uart", "amlogic,meson-uart"; + compatible = "amlogic,meson6-uart", "amlogic,meson-ao-uart"; reg = <0x4c0 0x18>; interrupts = ; status = "disabled"; diff --git a/arch/arm/boot/dts/meson8.dtsi b/arch/arm/boot/dts/meson8.dtsi index 740a6c816266cc6a20469f592f547474cc351a2c..08533116a39ce0a80264cafe325cdac5d3abc15a 100644 --- a/arch/arm/boot/dts/meson8.dtsi +++ b/arch/arm/boot/dts/meson8.dtsi @@ -598,27 +598,27 @@ }; &uart_AO { - compatible = "amlogic,meson8-uart", "amlogic,meson-uart"; - clocks = <&clkc CLKID_CLK81>, <&xtal>, <&clkc CLKID_CLK81>; - clock-names = "baud", "xtal", "pclk"; + compatible = "amlogic,meson8-uart", "amlogic,meson-ao-uart"; + clocks = <&xtal>, <&clkc CLKID_CLK81>, <&clkc CLKID_CLK81>; + clock-names = "xtal", "pclk", "baud"; }; &uart_A { - compatible = "amlogic,meson8-uart", "amlogic,meson-uart"; - clocks = <&clkc CLKID_CLK81>, <&xtal>, <&clkc CLKID_UART0>; - clock-names = "baud", "xtal", "pclk"; + compatible = "amlogic,meson8-uart"; + clocks = <&xtal>, <&clkc CLKID_UART0>, <&clkc CLKID_CLK81>; + clock-names = "xtal", "pclk", "baud"; }; &uart_B { - compatible = "amlogic,meson8-uart", "amlogic,meson-uart"; - clocks = <&clkc CLKID_CLK81>, <&xtal>, <&clkc CLKID_UART1>; - clock-names = "baud", "xtal", "pclk"; + compatible = "amlogic,meson8-uart"; + clocks = <&xtal>, <&clkc CLKID_UART0>, <&clkc CLKID_CLK81>; + clock-names = "xtal", "pclk", "baud"; }; &uart_C { - compatible = "amlogic,meson8-uart", "amlogic,meson-uart"; - clocks = <&clkc CLKID_CLK81>, <&xtal>, <&clkc CLKID_UART2>; - clock-names = "baud", "xtal", "pclk"; + compatible = "amlogic,meson8-uart"; + clocks = <&xtal>, <&clkc CLKID_UART0>, <&clkc CLKID_CLK81>; + clock-names = "xtal", "pclk", "baud"; }; &usb0 { diff --git a/arch/arm/boot/dts/meson8b.dtsi b/arch/arm/boot/dts/meson8b.dtsi index 2401cdf5f7511be3bd16e4b77db46e3e88f7152f..f6eb7c803174e7c74c52244d3a5913113d248f41 100644 --- a/arch/arm/boot/dts/meson8b.dtsi +++ b/arch/arm/boot/dts/meson8b.dtsi @@ -586,27 +586,27 @@ }; &uart_AO { - compatible = "amlogic,meson8b-uart", "amlogic,meson-uart"; - clocks = <&clkc CLKID_CLK81>, <&xtal>, <&clkc CLKID_CLK81>; - clock-names = "baud", "xtal", "pclk"; + compatible = "amlogic,meson8b-uart", "amlogic,meson-ao-uart"; + clocks = <&xtal>, <&clkc CLKID_CLK81>, <&clkc CLKID_CLK81>; + clock-names = "xtal", "pclk", "baud"; }; &uart_A { - compatible = "amlogic,meson8b-uart", "amlogic,meson-uart"; - clocks = <&clkc CLKID_CLK81>, <&xtal>, <&clkc CLKID_UART0>; - clock-names = "baud", "xtal", "pclk"; + compatible = "amlogic,meson8b-uart"; + clocks = <&xtal>, <&clkc CLKID_UART0>, <&clkc CLKID_CLK81>; + clock-names = "xtal", "pclk", "baud"; }; &uart_B { - compatible = "amlogic,meson8b-uart", "amlogic,meson-uart"; - clocks = <&clkc CLKID_CLK81>, <&xtal>, <&clkc CLKID_UART1>; - clock-names = "baud", "xtal", "pclk"; + compatible = "amlogic,meson8b-uart"; + clocks = <&xtal>, <&clkc CLKID_UART0>, <&clkc CLKID_CLK81>; + clock-names = "xtal", "pclk", "baud"; }; &uart_C { - compatible = "amlogic,meson8b-uart", "amlogic,meson-uart"; - clocks = <&clkc CLKID_CLK81>, <&xtal>, <&clkc CLKID_UART2>; - clock-names = "baud", "xtal", "pclk"; + compatible = "amlogic,meson8b-uart"; + clocks = <&xtal>, <&clkc CLKID_UART0>, <&clkc CLKID_CLK81>; + clock-names = "xtal", "pclk", "baud"; }; &usb0 { diff --git a/arch/arm/boot/dts/omap3-beagle-ab4.dts b/arch/arm/boot/dts/omap3-beagle-ab4.dts new file mode 100644 index 0000000000000000000000000000000000000000..990ff2d8468684469dbe5704ebe6a75ef32bd66c --- /dev/null +++ b/arch/arm/boot/dts/omap3-beagle-ab4.dts @@ -0,0 +1,47 @@ +// SPDX-License-Identifier: GPL-2.0-only +/dts-v1/; + +#include "omap3-beagle.dts" + +/ { + model = "TI OMAP3 BeagleBoard A to B4"; + compatible = "ti,omap3-beagle-ab4", "ti,omap3-beagle", "ti,omap3430", "ti,omap3"; +}; + +/* + * Workaround for capacitor C70 issue, see "Boards revision A and < B5" + * section at https://elinux.org/BeagleBoard_Community + */ + +/* Unusable as clocksource because of unreliable oscillator */ +&counter32k { + status = "disabled"; +}; + +/* Unusable as clockevent because of unreliable oscillator, allow to idle */ +&timer1_target { + /delete-property/ti,no-reset-on-init; + /delete-property/ti,no-idle; + timer@0 { + /delete-property/ti,timer-alwon; + }; +}; + +/* Preferred always-on timer for clocksource */ +&timer12_target { + ti,no-reset-on-init; + ti,no-idle; + timer@0 { + /* Always clocked by secure_32k_fck */ + }; +}; + +/* Preferred timer for clockevent */ +&timer2_target { + ti,no-reset-on-init; + ti,no-idle; + timer@0 { + assigned-clocks = <&gpt2_fck>; + assigned-clock-parents = <&sys_ck>; + }; +}; diff --git a/arch/arm/boot/dts/omap3-beagle.dts b/arch/arm/boot/dts/omap3-beagle.dts index f9f34b8458e91c871de4a2e2d1093a306ba99cfd..0548b391334fdbf992beb9a3ae41c3edb7b741b4 100644 --- a/arch/arm/boot/dts/omap3-beagle.dts +++ b/arch/arm/boot/dts/omap3-beagle.dts @@ -304,39 +304,6 @@ phys = <0 &hsusb2_phy>; }; -/* Unusable as clocksource because of unreliable oscillator */ -&counter32k { - status = "disabled"; -}; - -/* Unusable as clockevent because if unreliable oscillator, allow to idle */ -&timer1_target { - /delete-property/ti,no-reset-on-init; - /delete-property/ti,no-idle; - timer@0 { - /delete-property/ti,timer-alwon; - }; -}; - -/* Preferred always-on timer for clocksource */ -&timer12_target { - ti,no-reset-on-init; - ti,no-idle; - timer@0 { - /* Always clocked by secure_32k_fck */ - }; -}; - -/* Preferred timer for clockevent */ -&timer2_target { - ti,no-reset-on-init; - ti,no-idle; - timer@0 { - assigned-clocks = <&gpt2_fck>; - assigned-clock-parents = <&sys_ck>; - }; -}; - &twl_gpio { ti,use-leds; /* pullups: BIT(1) */ diff --git a/arch/arm/boot/dts/omap3-devkit8000-common.dtsi b/arch/arm/boot/dts/omap3-devkit8000-common.dtsi index 2c19d6e255bdc9c4c15cefcb4e12ecd85c2de28d..6883ccb45600b38a752353ad8118eb57695150c1 100644 --- a/arch/arm/boot/dts/omap3-devkit8000-common.dtsi +++ b/arch/arm/boot/dts/omap3-devkit8000-common.dtsi @@ -158,6 +158,24 @@ status = "disabled"; }; +/* Unusable as clockevent because if unreliable oscillator, allow to idle */ +&timer1_target { + /delete-property/ti,no-reset-on-init; + /delete-property/ti,no-idle; + timer@0 { + /delete-property/ti,timer-alwon; + }; +}; + +/* Preferred timer for clockevent */ +&timer12_target { + ti,no-reset-on-init; + ti,no-idle; + timer@0 { + /* Always clocked by secure_32k_fck */ + }; +}; + &twl_gpio { ti,use-leds; /* diff --git a/arch/arm/boot/dts/omap3-devkit8000.dts b/arch/arm/boot/dts/omap3-devkit8000.dts index c2995a280729d27900eeda63af011ce94d8b995a..162d0726b00801c50f4b029ef011f6d24cced686 100644 --- a/arch/arm/boot/dts/omap3-devkit8000.dts +++ b/arch/arm/boot/dts/omap3-devkit8000.dts @@ -14,36 +14,3 @@ display2 = &tv0; }; }; - -/* Unusable as clocksource because of unreliable oscillator */ -&counter32k { - status = "disabled"; -}; - -/* Unusable as clockevent because if unreliable oscillator, allow to idle */ -&timer1_target { - /delete-property/ti,no-reset-on-init; - /delete-property/ti,no-idle; - timer@0 { - /delete-property/ti,timer-alwon; - }; -}; - -/* Preferred always-on timer for clocksource */ -&timer12_target { - ti,no-reset-on-init; - ti,no-idle; - timer@0 { - /* Always clocked by secure_32k_fck */ - }; -}; - -/* Preferred timer for clockevent */ -&timer2_target { - ti,no-reset-on-init; - ti,no-idle; - timer@0 { - assigned-clocks = <&gpt2_fck>; - assigned-clock-parents = <&sys_ck>; - }; -}; diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts index 32335d4ce478b77b4224f05f266c322641a76277..d40c3d2c4914e400cf1365397632dc3d59ebf0f8 100644 --- a/arch/arm/boot/dts/omap3-n900.dts +++ b/arch/arm/boot/dts/omap3-n900.dts @@ -8,6 +8,7 @@ #include "omap34xx.dtsi" #include +#include /* * Default secure signed bootloader (Nokia X-Loader) does not enable L3 firewall @@ -630,63 +631,92 @@ }; lp5523: lp5523@32 { + #address-cells = <1>; + #size-cells = <0>; compatible = "national,lp5523"; reg = <0x32>; clock-mode = /bits/ 8 <0>; /* LP55XX_CLOCK_AUTO */ - enable-gpio = <&gpio2 9 GPIO_ACTIVE_HIGH>; /* 41 */ + enable-gpios = <&gpio2 9 GPIO_ACTIVE_HIGH>; /* 41 */ - chan0 { + led@0 { + reg = <0>; chan-name = "lp5523:kb1"; led-cur = /bits/ 8 <50>; max-cur = /bits/ 8 <100>; + color = ; + function = LED_FUNCTION_KBD_BACKLIGHT; }; - chan1 { + led@1 { + reg = <1>; chan-name = "lp5523:kb2"; led-cur = /bits/ 8 <50>; max-cur = /bits/ 8 <100>; + color = ; + function = LED_FUNCTION_KBD_BACKLIGHT; }; - chan2 { + led@2 { + reg = <2>; chan-name = "lp5523:kb3"; led-cur = /bits/ 8 <50>; max-cur = /bits/ 8 <100>; + color = ; + function = LED_FUNCTION_KBD_BACKLIGHT; }; - chan3 { + led@3 { + reg = <3>; chan-name = "lp5523:kb4"; led-cur = /bits/ 8 <50>; max-cur = /bits/ 8 <100>; + color = ; + function = LED_FUNCTION_KBD_BACKLIGHT; }; - chan4 { + led@4 { + reg = <4>; chan-name = "lp5523:b"; led-cur = /bits/ 8 <50>; max-cur = /bits/ 8 <100>; + color = ; + function = LED_FUNCTION_STATUS; }; - chan5 { + led@5 { + reg = <5>; chan-name = "lp5523:g"; led-cur = /bits/ 8 <50>; max-cur = /bits/ 8 <100>; + color = ; + function = LED_FUNCTION_STATUS; }; - chan6 { + led@6 { + reg = <6>; chan-name = "lp5523:r"; led-cur = /bits/ 8 <50>; max-cur = /bits/ 8 <100>; + color = ; + function = LED_FUNCTION_STATUS; }; - chan7 { + led@7 { + reg = <7>; chan-name = "lp5523:kb5"; led-cur = /bits/ 8 <50>; max-cur = /bits/ 8 <100>; + color = ; + function = LED_FUNCTION_KBD_BACKLIGHT; }; - chan8 { + led@8 { + reg = <8>; chan-name = "lp5523:kb6"; led-cur = /bits/ 8 <50>; max-cur = /bits/ 8 <100>; + color = ; + function = LED_FUNCTION_KBD_BACKLIGHT; }; }; diff --git a/arch/arm/boot/dts/qcom-ipq4019.dtsi b/arch/arm/boot/dts/qcom-ipq4019.dtsi index 74d8e2c8e4b343f55742b49ed8797793b67d2d20..3defd47fd8fabb026f1972afa78680c1f787815d 100644 --- a/arch/arm/boot/dts/qcom-ipq4019.dtsi +++ b/arch/arm/boot/dts/qcom-ipq4019.dtsi @@ -142,7 +142,8 @@ clocks { sleep_clk: sleep_clk { compatible = "fixed-clock"; - clock-frequency = <32768>; + clock-frequency = <32000>; + clock-output-names = "gcc_sleep_clk_src"; #clock-cells = <0>; }; diff --git a/arch/arm/boot/dts/qcom-msm8960.dtsi b/arch/arm/boot/dts/qcom-msm8960.dtsi index 172ea3c70eac200e85f89917ba3c1533ee9ea036..c197927e7435f5434a2a3e97ee15c0b0b51cbbd1 100644 --- a/arch/arm/boot/dts/qcom-msm8960.dtsi +++ b/arch/arm/boot/dts/qcom-msm8960.dtsi @@ -146,7 +146,9 @@ reg = <0x108000 0x1000>; qcom,ipc = <&l2cc 0x8 2>; - interrupts = <0 19 0>, <0 21 0>, <0 22 0>; + interrupts = , + , + ; interrupt-names = "ack", "err", "wakeup"; regulators { @@ -192,7 +194,7 @@ compatible = "qcom,msm-uartdm-v1.3", "qcom,msm-uartdm"; reg = <0x16440000 0x1000>, <0x16400000 0x1000>; - interrupts = <0 154 0x0>; + interrupts = ; clocks = <&gcc GSBI5_UART_CLK>, <&gcc GSBI5_H_CLK>; clock-names = "core", "iface"; status = "disabled"; @@ -318,7 +320,7 @@ #address-cells = <1>; #size-cells = <0>; reg = <0x16080000 0x1000>; - interrupts = <0 147 0>; + interrupts = ; spi-max-frequency = <24000000>; cs-gpios = <&msmgpio 8 0>; diff --git a/arch/arm/boot/dts/rk322x.dtsi b/arch/arm/boot/dts/rk322x.dtsi index 7de8b006ca13aad537e06aa5fa41a57b1f14906a..2f17bf35d7a65f5a68bcd0bc3287cc46ca090053 100644 --- a/arch/arm/boot/dts/rk322x.dtsi +++ b/arch/arm/boot/dts/rk322x.dtsi @@ -640,8 +640,8 @@ interrupts = ; assigned-clocks = <&cru SCLK_HDMI_PHY>; assigned-clock-parents = <&hdmi_phy>; - clocks = <&cru SCLK_HDMI_HDCP>, <&cru PCLK_HDMI_CTRL>, <&cru SCLK_HDMI_CEC>; - clock-names = "isfr", "iahb", "cec"; + clocks = <&cru PCLK_HDMI_CTRL>, <&cru SCLK_HDMI_HDCP>, <&cru SCLK_HDMI_CEC>; + clock-names = "iahb", "isfr", "cec"; pinctrl-names = "default"; pinctrl-0 = <&hdmii2c_xfer &hdmi_hpd &hdmi_cec>; resets = <&cru SRST_HDMI_P>; diff --git a/arch/arm/boot/dts/rk3288.dtsi b/arch/arm/boot/dts/rk3288.dtsi index 0d89ad274268baec38f05ef8d624b3f9863233fe..9051fb4a267d4f9daac2db4d034945605172bbb4 100644 --- a/arch/arm/boot/dts/rk3288.dtsi +++ b/arch/arm/boot/dts/rk3288.dtsi @@ -990,7 +990,7 @@ status = "disabled"; }; - crypto: cypto-controller@ff8a0000 { + crypto: crypto@ff8a0000 { compatible = "rockchip,rk3288-crypto"; reg = <0x0 0xff8a0000 0x0 0x4000>; interrupts = ; diff --git a/arch/arm/boot/dts/sama5d2.dtsi b/arch/arm/boot/dts/sama5d2.dtsi index 2c4952427296efc6ba96428096a6cf71b59e455b..12f57278ba4a53b9ff948f36917f5b672f5669c6 100644 --- a/arch/arm/boot/dts/sama5d2.dtsi +++ b/arch/arm/boot/dts/sama5d2.dtsi @@ -413,7 +413,7 @@ pmecc: ecc-engine@f8014070 { compatible = "atmel,sama5d2-pmecc"; reg = <0xf8014070 0x490>, - <0xf8014500 0x100>; + <0xf8014500 0x200>; }; }; diff --git a/arch/arm/boot/dts/spear1340.dtsi b/arch/arm/boot/dts/spear1340.dtsi index 1a8f5e8b10e3a2fd472634d852c66b8131eaae2e..66cd473ecb61796b9f919e0b50c1df5ee1fc7df8 100644 --- a/arch/arm/boot/dts/spear1340.dtsi +++ b/arch/arm/boot/dts/spear1340.dtsi @@ -136,9 +136,9 @@ reg = <0xb4100000 0x1000>; interrupts = <0 105 0x4>; status = "disabled"; - dmas = <&dwdma0 12 0 1>, - <&dwdma0 13 1 0>; - dma-names = "tx", "rx"; + dmas = <&dwdma0 13 0 1>, + <&dwdma0 12 1 0>; + dma-names = "rx", "tx"; }; thermal@e07008c4 { diff --git a/arch/arm/boot/dts/spear13xx.dtsi b/arch/arm/boot/dts/spear13xx.dtsi index c87b881b2c8bb244fc43cdc37c97c6bd0f7657eb..9135533676879e8abb0d24aeb9a6c14b4701261d 100644 --- a/arch/arm/boot/dts/spear13xx.dtsi +++ b/arch/arm/boot/dts/spear13xx.dtsi @@ -284,9 +284,9 @@ #size-cells = <0>; interrupts = <0 31 0x4>; status = "disabled"; - dmas = <&dwdma0 4 0 0>, - <&dwdma0 5 0 0>; - dma-names = "tx", "rx"; + dmas = <&dwdma0 5 0 0>, + <&dwdma0 4 0 0>; + dma-names = "rx", "tx"; }; rtc@e0580000 { diff --git a/arch/arm/boot/dts/ste-ux500-samsung-skomer.dts b/arch/arm/boot/dts/ste-ux500-samsung-skomer.dts index 08bddbf0336da8833de225d3a5907c3d81050b7d..446d93c1c78241e388904ee6004c24e2f094a63c 100644 --- a/arch/arm/boot/dts/ste-ux500-samsung-skomer.dts +++ b/arch/arm/boot/dts/ste-ux500-samsung-skomer.dts @@ -154,10 +154,6 @@ cap-sd-highspeed; cap-mmc-highspeed; /* All direction control is used */ - st,sig-dir-cmd; - st,sig-dir-dat0; - st,sig-dir-dat2; - st,sig-dir-dat31; st,sig-pin-fbclk; full-pwr-cycle; vmmc-supply = <&ab8500_ldo_aux3_reg>; diff --git a/arch/arm/boot/dts/stm32f429-disco.dts b/arch/arm/boot/dts/stm32f429-disco.dts index 075ac57d0bf4af7ed5c5fea8233725963fd00a08..6435e099c632627708b6b5ad648643752db3a9e7 100644 --- a/arch/arm/boot/dts/stm32f429-disco.dts +++ b/arch/arm/boot/dts/stm32f429-disco.dts @@ -192,7 +192,7 @@ display: display@1{ /* Connect panel-ilitek-9341 to ltdc */ - compatible = "st,sf-tc240t-9370-t"; + compatible = "st,sf-tc240t-9370-t", "ilitek,ili9341"; reg = <1>; spi-3wire; spi-max-frequency = <10000000>; diff --git a/arch/arm/boot/dts/sun8i-v3s.dtsi b/arch/arm/boot/dts/sun8i-v3s.dtsi index 89abd4cc7e23a381953d70de63e4b40539cae4ea..b21ecb820b133055caf17d8c7be3f584d42365d2 100644 --- a/arch/arm/boot/dts/sun8i-v3s.dtsi +++ b/arch/arm/boot/dts/sun8i-v3s.dtsi @@ -524,6 +524,17 @@ #size-cells = <0>; }; + gic: interrupt-controller@1c81000 { + compatible = "arm,gic-400"; + reg = <0x01c81000 0x1000>, + <0x01c82000 0x2000>, + <0x01c84000 0x2000>, + <0x01c86000 0x2000>; + interrupt-controller; + #interrupt-cells = <3>; + interrupts = ; + }; + csi1: camera@1cb4000 { compatible = "allwinner,sun8i-v3s-csi"; reg = <0x01cb4000 0x3000>; @@ -535,16 +546,5 @@ resets = <&ccu RST_BUS_CSI>; status = "disabled"; }; - - gic: interrupt-controller@1c81000 { - compatible = "arm,gic-400"; - reg = <0x01c81000 0x1000>, - <0x01c82000 0x2000>, - <0x01c84000 0x2000>, - <0x01c86000 0x2000>; - interrupt-controller; - #interrupt-cells = <3>; - interrupts = ; - }; }; }; diff --git a/arch/arm/boot/dts/tegra124-nyan-big.dts b/arch/arm/boot/dts/tegra124-nyan-big.dts index 1d2aac2cb6d038b50db7e48fbcdc2432e7564d13..fdc1d64dfff9dccbd9d3cc69a90e8c803de56dd1 100644 --- a/arch/arm/boot/dts/tegra124-nyan-big.dts +++ b/arch/arm/boot/dts/tegra124-nyan-big.dts @@ -13,12 +13,15 @@ "google,nyan-big-rev1", "google,nyan-big-rev0", "google,nyan-big", "google,nyan", "nvidia,tegra124"; - panel: panel { - compatible = "auo,b133xtn01"; - - power-supply = <&vdd_3v3_panel>; - backlight = <&backlight>; - ddc-i2c-bus = <&dpaux>; + host1x@50000000 { + dpaux@545c0000 { + aux-bus { + panel: panel { + compatible = "auo,b133xtn01"; + backlight = <&backlight>; + }; + }; + }; }; mmc@700b0400 { /* SD Card on this bus */ diff --git a/arch/arm/boot/dts/tegra124-nyan-blaze.dts b/arch/arm/boot/dts/tegra124-nyan-blaze.dts index 677babde6460ed1eb39a1e5d2db5fc42c896e1f4..abdf4456826f8f7100519e742fadb01b110e04db 100644 --- a/arch/arm/boot/dts/tegra124-nyan-blaze.dts +++ b/arch/arm/boot/dts/tegra124-nyan-blaze.dts @@ -15,12 +15,15 @@ "google,nyan-blaze-rev0", "google,nyan-blaze", "google,nyan", "nvidia,tegra124"; - panel: panel { - compatible = "samsung,ltn140at29-301"; - - power-supply = <&vdd_3v3_panel>; - backlight = <&backlight>; - ddc-i2c-bus = <&dpaux>; + host1x@50000000 { + dpaux@545c0000 { + aux-bus { + panel: panel { + compatible = "samsung,ltn140at29-301"; + backlight = <&backlight>; + }; + }; + }; }; sound { diff --git a/arch/arm/boot/dts/tegra124-venice2.dts b/arch/arm/boot/dts/tegra124-venice2.dts index e6b54ac1ebd1a4252c2386a2370795a0c9326876..84e2d24065e9ae98a635ac29d40aeb3a0a543986 100644 --- a/arch/arm/boot/dts/tegra124-venice2.dts +++ b/arch/arm/boot/dts/tegra124-venice2.dts @@ -48,6 +48,13 @@ dpaux@545c0000 { vdd-supply = <&vdd_3v3_panel>; status = "okay"; + + aux-bus { + panel: panel { + compatible = "lg,lp129qe"; + backlight = <&backlight>; + }; + }; }; }; @@ -1079,13 +1086,6 @@ }; }; - panel: panel { - compatible = "lg,lp129qe"; - power-supply = <&vdd_3v3_panel>; - backlight = <&backlight>; - ddc-i2c-bus = <&dpaux>; - }; - vdd_mux: regulator@0 { compatible = "regulator-fixed"; regulator-name = "+VDD_MUX"; diff --git a/arch/arm/boot/dts/tegra20-tamonten.dtsi b/arch/arm/boot/dts/tegra20-tamonten.dtsi index dd4d506683de7dcf07c45b173e57077ce79acbf0..7f14f0d005c3e2055103943c4f7ac93997a9ba14 100644 --- a/arch/arm/boot/dts/tegra20-tamonten.dtsi +++ b/arch/arm/boot/dts/tegra20-tamonten.dtsi @@ -183,8 +183,8 @@ }; conf_ata { nvidia,pins = "ata", "atb", "atc", "atd", "ate", - "cdev1", "cdev2", "dap1", "dtb", "gma", - "gmb", "gmc", "gmd", "gme", "gpu7", + "cdev1", "cdev2", "dap1", "dtb", "dtf", + "gma", "gmb", "gmc", "gmd", "gme", "gpu7", "gpv", "i2cp", "irrx", "irtx", "pta", "rm", "slxa", "slxk", "spia", "spib", "uac"; @@ -203,7 +203,7 @@ }; conf_crtp { nvidia,pins = "crtp", "dap2", "dap3", "dap4", - "dtc", "dte", "dtf", "gpu", "sdio1", + "dtc", "dte", "gpu", "sdio1", "slxc", "slxd", "spdi", "spdo", "spig", "uda"; nvidia,pull = ; diff --git a/arch/arm/configs/multi_v5_defconfig b/arch/arm/configs/multi_v5_defconfig index e00be9faa23bfa7e1a28a8b8f8d0bd8ba0c60a2b..4393e689f2354cb9a79da77e11c729ac6905873e 100644 --- a/arch/arm/configs/multi_v5_defconfig +++ b/arch/arm/configs/multi_v5_defconfig @@ -187,6 +187,7 @@ CONFIG_REGULATOR=y CONFIG_REGULATOR_FIXED_VOLTAGE=y CONFIG_MEDIA_SUPPORT=y CONFIG_MEDIA_CAMERA_SUPPORT=y +CONFIG_MEDIA_PLATFORM_SUPPORT=y CONFIG_V4L_PLATFORM_DRIVERS=y CONFIG_VIDEO_ASPEED=m CONFIG_VIDEO_ATMEL_ISI=m diff --git a/arch/arm/crypto/Kconfig b/arch/arm/crypto/Kconfig index c9bf2df85cb904d735bac933817041af206b8479..c46c05548080aaaa0a6366bb0c25e4ee40ae5e70 100644 --- a/arch/arm/crypto/Kconfig +++ b/arch/arm/crypto/Kconfig @@ -83,6 +83,8 @@ config CRYPTO_AES_ARM_BS depends on KERNEL_MODE_NEON select CRYPTO_SKCIPHER select CRYPTO_LIB_AES + select CRYPTO_AES + select CRYPTO_CBC select CRYPTO_SIMD help Use a faster and more secure NEON based implementation of AES in CBC, diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h index 20993615087acd16834672d7d12075b5c1d4441e..00c9aafa3a52aec8a8389eb73e1e65c863c2fa05 100644 --- a/arch/arm/include/asm/assembler.h +++ b/arch/arm/include/asm/assembler.h @@ -108,6 +108,16 @@ .endm #endif +#if __LINUX_ARM_ARCH__ < 7 + .macro dsb, args + mcr p15, 0, r0, c7, c10, 4 + .endm + + .macro isb, args + mcr p15, 0, r0, c7, c5, 4 + .endm +#endif + .macro asm_trace_hardirqs_off, save=1 #if defined(CONFIG_TRACE_IRQFLAGS) .if \save diff --git a/arch/arm/include/asm/livepatch.h b/arch/arm/include/asm/livepatch.h index 4f1cf4c72097efa40ce4df696e013ec9025a99aa..47d8b01618c74743324f061f2cf9034570a8b670 100644 --- a/arch/arm/include/asm/livepatch.h +++ b/arch/arm/include/asm/livepatch.h @@ -23,6 +23,8 @@ #include +#define KLP_ARM_BREAKPOINT_INSTRUCTION 0xe7f001f9 + struct klp_patch; struct klp_func; @@ -41,17 +43,24 @@ int klp_check_calltrace(struct klp_patch *patch, int enable); #ifdef CONFIG_ARM_MODULE_PLTS #define LJMP_INSN_SIZE 3 -#endif +#else +#define LJMP_INSN_SIZE 1 +#endif /* CONFIG_ARM_MODULE_PLTS */ struct arch_klp_data { -#ifdef CONFIG_ARM_MODULE_PLTS u32 old_insns[LJMP_INSN_SIZE]; -#else - u32 old_insn; -#endif + + /* + * Saved opcode at the entry of the old func (which maybe replaced + * with breakpoint). + */ + u32 saved_opcode; }; +int arch_klp_add_breakpoint(struct arch_klp_data *arch_data, void *old_func); +void arch_klp_remove_breakpoint(struct arch_klp_data *arch_data, void *old_func); long arch_klp_save_old_code(struct arch_klp_data *arch_data, void *old_func); +int arch_klp_module_check_calltrace(void *data); #endif diff --git a/arch/arm/include/asm/module.h b/arch/arm/include/asm/module.h index 734b8fe36896e830b4b366481f02d75abaf6f55a..c7c5dc6f3777dc52997065c7db0c646f5815dd72 100644 --- a/arch/arm/include/asm/module.h +++ b/arch/arm/include/asm/module.h @@ -3,20 +3,10 @@ #define _ASM_ARM_MODULE_H #include - -struct unwind_table; +#include #ifdef CONFIG_ARM_UNWIND -enum { - ARM_SEC_INIT, - ARM_SEC_DEVINIT, - ARM_SEC_CORE, - ARM_SEC_EXIT, - ARM_SEC_DEVEXIT, - ARM_SEC_HOT, - ARM_SEC_UNLIKELY, - ARM_SEC_MAX, -}; +#define ELF_SECTION_UNWIND 0x70000001 #endif #define PLT_ENT_STRIDE L1_CACHE_BYTES @@ -37,7 +27,8 @@ struct mod_plt_sec { struct mod_arch_specific { #ifdef CONFIG_ARM_UNWIND - struct unwind_table *unwind[ARM_SEC_MAX]; + struct list_head unwind_list; + struct unwind_table *init_table; #endif #ifdef CONFIG_ARM_MODULE_PLTS struct mod_plt_sec core; diff --git a/arch/arm/include/asm/spectre.h b/arch/arm/include/asm/spectre.h new file mode 100644 index 0000000000000000000000000000000000000000..85f9e538fb325730613f78419f52826a68b8d76c --- /dev/null +++ b/arch/arm/include/asm/spectre.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef __ASM_SPECTRE_H +#define __ASM_SPECTRE_H + +enum { + SPECTRE_UNAFFECTED, + SPECTRE_MITIGATED, + SPECTRE_VULNERABLE, +}; + +enum { + __SPECTRE_V2_METHOD_BPIALL, + __SPECTRE_V2_METHOD_ICIALLU, + __SPECTRE_V2_METHOD_SMC, + __SPECTRE_V2_METHOD_HVC, + __SPECTRE_V2_METHOD_LOOP8, +}; + +enum { + SPECTRE_V2_METHOD_BPIALL = BIT(__SPECTRE_V2_METHOD_BPIALL), + SPECTRE_V2_METHOD_ICIALLU = BIT(__SPECTRE_V2_METHOD_ICIALLU), + SPECTRE_V2_METHOD_SMC = BIT(__SPECTRE_V2_METHOD_SMC), + SPECTRE_V2_METHOD_HVC = BIT(__SPECTRE_V2_METHOD_HVC), + SPECTRE_V2_METHOD_LOOP8 = BIT(__SPECTRE_V2_METHOD_LOOP8), +}; + +#ifdef CONFIG_GENERIC_CPU_VULNERABILITIES +void spectre_v2_update_state(unsigned int state, unsigned int methods); +#else +static inline void spectre_v2_update_state(unsigned int state, + unsigned int methods) +{} +#endif + +int spectre_bhb_update_vectors(unsigned int method); + +#endif diff --git a/arch/arm/include/asm/unwind.h b/arch/arm/include/asm/unwind.h index 0f8a3439902d0613ac2b4ef98a3561d950ec6a43..b51f85417f58e185edd1ae6b2dd01caaac03b82c 100644 --- a/arch/arm/include/asm/unwind.h +++ b/arch/arm/include/asm/unwind.h @@ -24,6 +24,7 @@ struct unwind_idx { struct unwind_table { struct list_head list; + struct list_head mod_list; const struct unwind_idx *start; const struct unwind_idx *origin; const struct unwind_idx *stop; diff --git a/arch/arm/include/asm/vmlinux.lds.h b/arch/arm/include/asm/vmlinux.lds.h index be04f5b5056f76a01c5319ed4a054a321a311fb5..0b2b1c577eab84b79a31e1234135789d6d3a2b3c 100644 --- a/arch/arm/include/asm/vmlinux.lds.h +++ b/arch/arm/include/asm/vmlinux.lds.h @@ -26,6 +26,19 @@ #define ARM_MMU_DISCARD(x) x #endif +/* + * ld.lld does not support NOCROSSREFS: + * https://github.com/ClangBuiltLinux/linux/issues/1609 + */ +#ifdef CONFIG_LD_IS_LLD +#define NOCROSSREFS +#endif + +/* Set start/end symbol names to the LMA for the section */ +#define ARM_LMA(sym, section) \ + sym##_start = LOADADDR(section); \ + sym##_end = LOADADDR(section) + SIZEOF(section) + #define PROC_INFO \ . = ALIGN(4); \ __proc_info_begin = .; \ @@ -115,19 +128,31 @@ * only thing that matters is their relative offsets */ #define ARM_VECTORS \ - __vectors_start = .; \ - .vectors 0xffff0000 : AT(__vectors_start) { \ - *(.vectors) \ + __vectors_lma = .; \ + OVERLAY 0xffff0000 : NOCROSSREFS AT(__vectors_lma) { \ + .vectors { \ + *(.vectors) \ + } \ + .vectors.bhb.loop8 { \ + *(.vectors.bhb.loop8) \ + } \ + .vectors.bhb.bpiall { \ + *(.vectors.bhb.bpiall) \ + } \ } \ - . = __vectors_start + SIZEOF(.vectors); \ - __vectors_end = .; \ + ARM_LMA(__vectors, .vectors); \ + ARM_LMA(__vectors_bhb_loop8, .vectors.bhb.loop8); \ + ARM_LMA(__vectors_bhb_bpiall, .vectors.bhb.bpiall); \ + . = __vectors_lma + SIZEOF(.vectors) + \ + SIZEOF(.vectors.bhb.loop8) + \ + SIZEOF(.vectors.bhb.bpiall); \ \ - __stubs_start = .; \ - .stubs ADDR(.vectors) + 0x1000 : AT(__stubs_start) { \ + __stubs_lma = .; \ + .stubs ADDR(.vectors) + 0x1000 : AT(__stubs_lma) { \ *(.stubs) \ } \ - . = __stubs_start + SIZEOF(.stubs); \ - __stubs_end = .; + ARM_LMA(__stubs, .stubs); \ + . = __stubs_lma + SIZEOF(.stubs); \ #define ARM_TCM \ __itcm_start = ALIGN(4); \ diff --git a/arch/arm/include/debug/imx-uart.h b/arch/arm/include/debug/imx-uart.h index c8eb83d4b8964b1a5e0d1a01ec8b486154bab456..3edbb3c5b42bfcfc81bf60db3d7baa55ae74af6e 100644 --- a/arch/arm/include/debug/imx-uart.h +++ b/arch/arm/include/debug/imx-uart.h @@ -11,13 +11,6 @@ #define IMX1_UART_BASE_ADDR(n) IMX1_UART##n##_BASE_ADDR #define IMX1_UART_BASE(n) IMX1_UART_BASE_ADDR(n) -#define IMX21_UART1_BASE_ADDR 0x1000a000 -#define IMX21_UART2_BASE_ADDR 0x1000b000 -#define IMX21_UART3_BASE_ADDR 0x1000c000 -#define IMX21_UART4_BASE_ADDR 0x1000d000 -#define IMX21_UART_BASE_ADDR(n) IMX21_UART##n##_BASE_ADDR -#define IMX21_UART_BASE(n) IMX21_UART_BASE_ADDR(n) - #define IMX25_UART1_BASE_ADDR 0x43f90000 #define IMX25_UART2_BASE_ADDR 0x43f94000 #define IMX25_UART3_BASE_ADDR 0x5000c000 @@ -26,6 +19,13 @@ #define IMX25_UART_BASE_ADDR(n) IMX25_UART##n##_BASE_ADDR #define IMX25_UART_BASE(n) IMX25_UART_BASE_ADDR(n) +#define IMX27_UART1_BASE_ADDR 0x1000a000 +#define IMX27_UART2_BASE_ADDR 0x1000b000 +#define IMX27_UART3_BASE_ADDR 0x1000c000 +#define IMX27_UART4_BASE_ADDR 0x1000d000 +#define IMX27_UART_BASE_ADDR(n) IMX27_UART##n##_BASE_ADDR +#define IMX27_UART_BASE(n) IMX27_UART_BASE_ADDR(n) + #define IMX31_UART1_BASE_ADDR 0x43f90000 #define IMX31_UART2_BASE_ADDR 0x43f94000 #define IMX31_UART3_BASE_ADDR 0x5000c000 @@ -112,10 +112,10 @@ #ifdef CONFIG_DEBUG_IMX1_UART #define UART_PADDR IMX_DEBUG_UART_BASE(IMX1) -#elif defined(CONFIG_DEBUG_IMX21_IMX27_UART) -#define UART_PADDR IMX_DEBUG_UART_BASE(IMX21) #elif defined(CONFIG_DEBUG_IMX25_UART) #define UART_PADDR IMX_DEBUG_UART_BASE(IMX25) +#elif defined(CONFIG_DEBUG_IMX27_UART) +#define UART_PADDR IMX_DEBUG_UART_BASE(IMX27) #elif defined(CONFIG_DEBUG_IMX31_UART) #define UART_PADDR IMX_DEBUG_UART_BASE(IMX31) #elif defined(CONFIG_DEBUG_IMX35_UART) diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile index 20900568c5685cd235a767f37fe48d4d9ba5cfa8..b381ad96adef86ee4a88f4c9e69ad57d2d57c012 100644 --- a/arch/arm/kernel/Makefile +++ b/arch/arm/kernel/Makefile @@ -111,4 +111,6 @@ endif obj-$(CONFIG_HAVE_ARM_SMCCC) += smccc-call.o +obj-$(CONFIG_GENERIC_CPU_VULNERABILITIES) += spectre.o + extra-y := $(head-y) vmlinux.lds diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index 4c43295e198e00f995922a41699d7808f345dda5..d74678d9598bbb10301cb4ec8e1c96a889142583 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -1005,12 +1005,11 @@ __kuser_helper_end: sub lr, lr, #\correction .endif - @ - @ Save r0, lr_ (parent PC) and spsr_ - @ (parent CPSR) - @ + @ Save r0, lr_ (parent PC) stmia sp, {r0, lr} @ save r0, lr - mrs lr, spsr + + @ Save spsr_ (parent CPSR) +2: mrs lr, spsr str lr, [sp, #8] @ save spsr @ @@ -1031,6 +1030,44 @@ __kuser_helper_end: movs pc, lr @ branch to handler in SVC mode ENDPROC(.Lvector_\name) +#ifdef CONFIG_HARDEN_BRANCH_HISTORY + .subsection 1 + .align 5 +vector_bhb_loop8_\name: + .if \correction + sub lr, lr, #\correction + .endif + + @ Save r0, lr_ (parent PC) + stmia sp, {r0, lr} + + @ bhb workaround + mov r0, #8 +3: b . + 4 + subs r0, r0, #1 + bne 3b + dsb + isb + b 2b +ENDPROC(vector_bhb_loop8_\name) + +vector_bhb_bpiall_\name: + .if \correction + sub lr, lr, #\correction + .endif + + @ Save r0, lr_ (parent PC) + stmia sp, {r0, lr} + + @ bhb workaround + mcr p15, 0, r0, c7, c5, 6 @ BPIALL + @ isb not needed due to "movs pc, lr" in the vector stub + @ which gives a "context synchronisation". + b 2b +ENDPROC(vector_bhb_bpiall_\name) + .previous +#endif + .align 2 @ handler addresses follow this label 1: @@ -1043,6 +1080,10 @@ ENDPROC(.Lvector_\name) #endif @ This must be the first word .word vector_swi +#ifdef CONFIG_HARDEN_BRANCH_HISTORY + .word vector_bhb_loop8_swi + .word vector_bhb_bpiall_swi +#endif .Lvector_rst: ARM( swi SYS_ERROR0 ) @@ -1157,8 +1198,10 @@ ENDPROC(.Lvector_\name) * FIQ "NMI" handler *----------------------------------------------------------------------------- * Handle a FIQ using the SVC stack allowing FIQ act like NMI on x86 - * systems. + * systems. This must be the last vector stub, so lets place it in its own + * subsection. */ + .subsection 2 vector_stub fiq, FIQ_MODE, 4 .long __fiq_usr @ 0 (USR_26 / USR_32) @@ -1189,6 +1232,30 @@ ENDPROC(.Lvector_\name) W(b) .Lvector_irq W(b) .Lvector_fiq +#ifdef CONFIG_HARDEN_BRANCH_HISTORY + .section .vectors.bhb.loop8, "ax", %progbits +.L__vectors_bhb_loop8_start: + W(b) .Lvector_rst + W(b) vector_bhb_loop8_und + W(ldr) pc, .L__vectors_bhb_loop8_start + 0x1004 + W(b) vector_bhb_loop8_pabt + W(b) vector_bhb_loop8_dabt + W(b) .Lvector_addrexcptn + W(b) vector_bhb_loop8_irq + W(b) vector_bhb_loop8_fiq + + .section .vectors.bhb.bpiall, "ax", %progbits +.L__vectors_bhb_bpiall_start: + W(b) .Lvector_rst + W(b) vector_bhb_bpiall_und + W(ldr) pc, .L__vectors_bhb_bpiall_start + 0x1008 + W(b) vector_bhb_bpiall_pabt + W(b) vector_bhb_bpiall_dabt + W(b) .Lvector_addrexcptn + W(b) vector_bhb_bpiall_irq + W(b) vector_bhb_bpiall_fiq +#endif + .data .align 2 diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index fee279e28a72e05bdb67ebc4e7a37a3b35ca3824..7a2e63dfb4d9a13b914568931f88e0f5c78f5654 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -165,6 +165,29 @@ ENDPROC(ret_from_fork) *----------------------------------------------------------------------------- */ + .align 5 +#ifdef CONFIG_HARDEN_BRANCH_HISTORY +ENTRY(vector_bhb_loop8_swi) + sub sp, sp, #PT_REGS_SIZE + stmia sp, {r0 - r12} + mov r8, #8 +1: b 2f +2: subs r8, r8, #1 + bne 1b + dsb + isb + b 3f +ENDPROC(vector_bhb_loop8_swi) + + .align 5 +ENTRY(vector_bhb_bpiall_swi) + sub sp, sp, #PT_REGS_SIZE + stmia sp, {r0 - r12} + mcr p15, 0, r8, c7, c5, 6 @ BPIALL + isb + b 3f +ENDPROC(vector_bhb_bpiall_swi) +#endif .align 5 ENTRY(vector_swi) #ifdef CONFIG_CPU_V7M @@ -172,6 +195,7 @@ ENTRY(vector_swi) #else sub sp, sp, #PT_REGS_SIZE stmia sp, {r0 - r12} @ Calling r0 - r12 +3: ARM( add r8, sp, #S_PC ) ARM( stmdb r8, {sp, lr}^ ) @ Calling sp, lr THUMB( mov r8, sp ) diff --git a/arch/arm/kernel/entry-ftrace.S b/arch/arm/kernel/entry-ftrace.S index a74289ebc803699955155b4f31bd387c8f23b9bd..5f1b1ce10473aa80c8fae70d130d45723c2bfdf2 100644 --- a/arch/arm/kernel/entry-ftrace.S +++ b/arch/arm/kernel/entry-ftrace.S @@ -22,10 +22,7 @@ * mcount can be thought of as a function called in the middle of a subroutine * call. As such, it needs to be transparent for both the caller and the * callee: the original lr needs to be restored when leaving mcount, and no - * registers should be clobbered. (In the __gnu_mcount_nc implementation, we - * clobber the ip register. This is OK because the ARM calling convention - * allows it to be clobbered in subroutines and doesn't use it to hold - * parameters.) + * registers should be clobbered. * * When using dynamic ftrace, we patch out the mcount call by a "pop {lr}" * instead of the __gnu_mcount_nc call (see arch/arm/kernel/ftrace.c). @@ -70,26 +67,25 @@ .macro __ftrace_regs_caller - sub sp, sp, #8 @ space for PC and CPSR OLD_R0, + str lr, [sp, #-8]! @ store LR as PC and make space for CPSR/OLD_R0, @ OLD_R0 will overwrite previous LR - add ip, sp, #12 @ move in IP the value of SP as it was - @ before the push {lr} of the mcount mechanism + ldr lr, [sp, #8] @ get previous LR - str lr, [sp, #0] @ store LR instead of PC + str r0, [sp, #8] @ write r0 as OLD_R0 over previous LR - ldr lr, [sp, #8] @ get previous LR + str lr, [sp, #-4]! @ store previous LR as LR - str r0, [sp, #8] @ write r0 as OLD_R0 over previous LR + add lr, sp, #16 @ move in LR the value of SP as it was + @ before the push {lr} of the mcount mechanism - stmdb sp!, {ip, lr} - stmdb sp!, {r0-r11, lr} + push {r0-r11, ip, lr} @ stack content at this point: @ 0 4 48 52 56 60 64 68 72 - @ R0 | R1 | ... | LR | SP + 4 | previous LR | LR | PSR | OLD_R0 | + @ R0 | R1 | ... | IP | SP + 4 | previous LR | LR | PSR | OLD_R0 | - mov r3, sp @ struct pt_regs* + mov r3, sp @ struct pt_regs* ldr r2, =function_trace_op ldr r2, [r2] @ pointer to the current @@ -112,11 +108,9 @@ ftrace_graph_regs_call: #endif @ pop saved regs - ldmia sp!, {r0-r12} @ restore r0 through r12 - ldr ip, [sp, #8] @ restore PC - ldr lr, [sp, #4] @ restore LR - ldr sp, [sp, #0] @ restore SP - mov pc, ip @ return + pop {r0-r11, ip, lr} @ restore r0 through r12 + ldr lr, [sp], #4 @ restore LR + ldr pc, [sp], #12 .endm #ifdef CONFIG_FUNCTION_GRAPH_TRACER @@ -132,11 +126,9 @@ ftrace_graph_regs_call: bl prepare_ftrace_return @ pop registers saved in ftrace_regs_caller - ldmia sp!, {r0-r12} @ restore r0 through r12 - ldr ip, [sp, #8] @ restore PC - ldr lr, [sp, #4] @ restore LR - ldr sp, [sp, #0] @ restore SP - mov pc, ip @ return + pop {r0-r11, ip, lr} @ restore r0 through r12 + ldr lr, [sp], #4 @ restore LR + ldr pc, [sp], #12 .endm #endif @@ -202,16 +194,17 @@ ftrace_graph_call\suffix: .endm .macro mcount_exit - ldmia sp!, {r0-r3, ip, lr} - ret ip + ldmia sp!, {r0-r3} + ldr lr, [sp, #4] + ldr pc, [sp], #8 .endm ENTRY(__gnu_mcount_nc) UNWIND(.fnstart) #ifdef CONFIG_DYNAMIC_FTRACE - mov ip, lr - ldmia sp!, {lr} - ret ip + push {lr} + ldr lr, [sp, #4] + ldr pc, [sp], #8 #else __mcount #endif diff --git a/arch/arm/kernel/kgdb.c b/arch/arm/kernel/kgdb.c index 7bd30c0a4280d9a6e029c05c49e8fb2d9b373b00..22f937e6f3ffb12a7e854179b73ea2a77c0eb06b 100644 --- a/arch/arm/kernel/kgdb.c +++ b/arch/arm/kernel/kgdb.c @@ -154,22 +154,38 @@ static int kgdb_compiled_brk_fn(struct pt_regs *regs, unsigned int instr) return 0; } -static struct undef_hook kgdb_brkpt_hook = { +static struct undef_hook kgdb_brkpt_arm_hook = { .instr_mask = 0xffffffff, .instr_val = KGDB_BREAKINST, - .cpsr_mask = MODE_MASK, + .cpsr_mask = PSR_T_BIT | MODE_MASK, .cpsr_val = SVC_MODE, .fn = kgdb_brk_fn }; -static struct undef_hook kgdb_compiled_brkpt_hook = { +static struct undef_hook kgdb_brkpt_thumb_hook = { + .instr_mask = 0xffff, + .instr_val = KGDB_BREAKINST & 0xffff, + .cpsr_mask = PSR_T_BIT | MODE_MASK, + .cpsr_val = PSR_T_BIT | SVC_MODE, + .fn = kgdb_brk_fn +}; + +static struct undef_hook kgdb_compiled_brkpt_arm_hook = { .instr_mask = 0xffffffff, .instr_val = KGDB_COMPILED_BREAK, - .cpsr_mask = MODE_MASK, + .cpsr_mask = PSR_T_BIT | MODE_MASK, .cpsr_val = SVC_MODE, .fn = kgdb_compiled_brk_fn }; +static struct undef_hook kgdb_compiled_brkpt_thumb_hook = { + .instr_mask = 0xffff, + .instr_val = KGDB_COMPILED_BREAK & 0xffff, + .cpsr_mask = PSR_T_BIT | MODE_MASK, + .cpsr_val = PSR_T_BIT | SVC_MODE, + .fn = kgdb_compiled_brk_fn +}; + static int __kgdb_notify(struct die_args *args, unsigned long cmd) { struct pt_regs *regs = args->regs; @@ -210,8 +226,10 @@ int kgdb_arch_init(void) if (ret != 0) return ret; - register_undef_hook(&kgdb_brkpt_hook); - register_undef_hook(&kgdb_compiled_brkpt_hook); + register_undef_hook(&kgdb_brkpt_arm_hook); + register_undef_hook(&kgdb_brkpt_thumb_hook); + register_undef_hook(&kgdb_compiled_brkpt_arm_hook); + register_undef_hook(&kgdb_compiled_brkpt_thumb_hook); return 0; } @@ -224,8 +242,10 @@ int kgdb_arch_init(void) */ void kgdb_arch_exit(void) { - unregister_undef_hook(&kgdb_brkpt_hook); - unregister_undef_hook(&kgdb_compiled_brkpt_hook); + unregister_undef_hook(&kgdb_brkpt_arm_hook); + unregister_undef_hook(&kgdb_brkpt_thumb_hook); + unregister_undef_hook(&kgdb_compiled_brkpt_arm_hook); + unregister_undef_hook(&kgdb_compiled_brkpt_thumb_hook); unregister_die_notifier(&kgdb_notifier); } diff --git a/arch/arm/kernel/livepatch.c b/arch/arm/kernel/livepatch.c index 4b07e73ad37bf22073fe986e70e907ac076dad21..713ce67fa6e3e287cd8372998831f76a78061514 100644 --- a/arch/arm/kernel/livepatch.c +++ b/arch/arm/kernel/livepatch.c @@ -28,6 +28,8 @@ #include #include #include +#include +#include #include #include @@ -37,15 +39,9 @@ #define ARM_INSN_SIZE 4 #endif -#ifdef CONFIG_ARM_MODULE_PLTS #define MAX_SIZE_TO_CHECK (LJMP_INSN_SIZE * ARM_INSN_SIZE) #define CHECK_JUMP_RANGE LJMP_INSN_SIZE -#else -#define MAX_SIZE_TO_CHECK ARM_INSN_SIZE -#define CHECK_JUMP_RANGE 1 -#endif - #ifdef CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY /* * The instruction set on arm is A32. @@ -79,6 +75,7 @@ struct klp_func_list { struct walk_stackframe_args { int enable; struct klp_func_list *check_funcs; + struct module *mod; int ret; }; @@ -92,16 +89,6 @@ static inline unsigned long klp_size_to_check(unsigned long func_size, return size; } -static inline int klp_compare_address(unsigned long pc, unsigned long func_addr, - const char *func_name, unsigned long check_size) -{ - if (pc >= func_addr && pc < func_addr + check_size) { - pr_err("func %s is in use!\n", func_name); - return -EBUSY; - } - return 0; -} - static bool check_jump_insn(unsigned long func_addr) { unsigned long i; @@ -153,7 +140,7 @@ static int klp_check_activeness_func(struct klp_patch *patch, int enable, for (obj = patch->objs; obj->funcs; obj++) { for (func = obj->funcs; func->old_name; func++) { if (enable) { - if (func->force == KLP_ENFORCEMENT) + if (func->patched || func->force == KLP_ENFORCEMENT) continue; /* * When enable, checking the currently @@ -278,27 +265,18 @@ static void free_list(struct klp_func_list **funcs) } } -int klp_check_calltrace(struct klp_patch *patch, int enable) +static int do_check_calltrace(struct walk_stackframe_args *args, + int (*fn)(struct stackframe *, void *)) { struct task_struct *g, *t; struct stackframe frame; - int ret = 0; - struct klp_func_list *check_funcs = NULL; - struct walk_stackframe_args args = { - .ret = 0 - }; - - ret = klp_check_activeness_func(patch, enable, &check_funcs); - if (ret) - goto out; - args.check_funcs = check_funcs; for_each_process_thread(g, t) { if (t == current) { frame.fp = (unsigned long)__builtin_frame_address(0); frame.sp = current_stack_pointer; frame.lr = (unsigned long)__builtin_return_address(0); - frame.pc = (unsigned long)klp_check_calltrace; + frame.pc = (unsigned long)do_check_calltrace; } else if (strncmp(t->comm, "migration/", 10) == 0) { /* * current on other CPU @@ -316,21 +294,104 @@ int klp_check_calltrace(struct klp_patch *patch, int enable) frame.lr = 0; /* recovered from the stack */ frame.pc = thread_saved_pc(t); } - if (check_funcs != NULL) { - walk_stackframe(&frame, klp_check_jump_func, &args); - if (args.ret) { - ret = args.ret; - pr_info("PID: %d Comm: %.20s\n", t->pid, t->comm); - show_stack(t, NULL, KERN_INFO); - goto out; - } + walk_stackframe(&frame, fn, args); + if (args->ret) { + pr_info("PID: %d Comm: %.20s\n", t->pid, t->comm); + show_stack(t, NULL, KERN_INFO); + return args->ret; } } + return 0; +} + +int klp_check_calltrace(struct klp_patch *patch, int enable) +{ + int ret = 0; + struct klp_func_list *check_funcs = NULL; + struct walk_stackframe_args args = { + .enable = enable, + .ret = 0 + }; + + ret = klp_check_activeness_func(patch, enable, &check_funcs); + if (ret) { + pr_err("collect active functions failed, ret=%d\n", ret); + goto out; + } + if (!check_funcs) + goto out; + + args.check_funcs = check_funcs; + ret = do_check_calltrace(&args, klp_check_jump_func); out: free_list(&check_funcs); return ret; } + +static int check_module_calltrace(struct stackframe *frame, void *data) +{ + struct walk_stackframe_args *args = data; + + if (within_module_core(frame->pc, args->mod)) { + pr_err("module %s is in use!\n", args->mod->name); + return (args->ret = -EBUSY); + } + return 0; +} + +int arch_klp_module_check_calltrace(void *data) +{ + struct walk_stackframe_args args = { + .mod = (struct module *)data, + .ret = 0 + }; + + return do_check_calltrace(&args, check_module_calltrace); +} + +int arch_klp_add_breakpoint(struct arch_klp_data *arch_data, void *old_func) +{ + u32 *addr = (u32 *)old_func; + + arch_data->saved_opcode = le32_to_cpu(*addr); + patch_text(old_func, KLP_ARM_BREAKPOINT_INSTRUCTION); + return 0; +} + +void arch_klp_remove_breakpoint(struct arch_klp_data *arch_data, void *old_func) +{ + patch_text(old_func, arch_data->saved_opcode); +} + +static int klp_trap_handler(struct pt_regs *regs, unsigned int instr) +{ + void *brk_func = NULL; + unsigned long addr = regs->ARM_pc; + + brk_func = klp_get_brk_func((void *)addr); + if (!brk_func) { + pr_warn("Unrecoverable livepatch detected.\n"); + BUG(); + } + + regs->ARM_pc = (unsigned long)brk_func; + return 0; +} + +static struct undef_hook klp_arm_break_hook = { + .instr_mask = 0x0fffffff, + .instr_val = (KLP_ARM_BREAKPOINT_INSTRUCTION & 0x0fffffff), + .cpsr_mask = MODE_MASK, + .cpsr_val = SVC_MODE, + .fn = klp_trap_handler, +}; + +void arch_klp_init(void) +{ + register_undef_hook(&klp_arm_break_hook); +} + #endif static inline bool offset_in_range(unsigned long pc, unsigned long addr, @@ -356,7 +417,6 @@ long arm_insn_read(void *addr, u32 *insnp) long arch_klp_save_old_code(struct arch_klp_data *arch_data, void *old_func) { long ret; -#ifdef CONFIG_ARM_MODULE_PLTS int i; for (i = 0; i < LJMP_INSN_SIZE; i++) { @@ -364,28 +424,17 @@ long arch_klp_save_old_code(struct arch_klp_data *arch_data, void *old_func) if (ret) break; } -#else - ret = arm_insn_read(old_func, &arch_data->old_insn); -#endif return ret; } -int arch_klp_patch_func(struct klp_func *func) +static int do_patch(unsigned long pc, unsigned long new_addr) { - struct klp_func_node *func_node; - unsigned long pc, new_addr; - u32 insn; -#ifdef CONFIG_ARM_MODULE_PLTS - int i; u32 insns[LJMP_INSN_SIZE]; -#endif - func_node = func->func_node; - list_add_rcu(&func->stack_node, &func_node->func_stack); - pc = (unsigned long)func->old_func; - new_addr = (unsigned long)func->new_func; -#ifdef CONFIG_ARM_MODULE_PLTS if (!offset_in_range(pc, new_addr, SZ_32M)) { +#ifdef CONFIG_ARM_MODULE_PLTS + int i; + /* * [0] LDR PC, [PC+8] * [4] nop @@ -397,71 +446,54 @@ int arch_klp_patch_func(struct klp_func *func) for (i = 0; i < LJMP_INSN_SIZE; i++) __patch_text(((u32 *)pc) + i, insns[i]); - - } else { - insn = arm_gen_branch(pc, new_addr); - __patch_text((void *)pc, insn); - } #else - insn = arm_gen_branch(pc, new_addr); - __patch_text((void *)pc, insn); + /* + * When offset from 'new_addr' to 'pc' is out of SZ_32M range but + * CONFIG_ARM_MODULE_PLTS not enabled, we should stop patching. + */ + pr_err("new address out of range\n"); + return -EFAULT; #endif - + } else { + insns[0] = arm_gen_branch(pc, new_addr); + __patch_text((void *)pc, insns[0]); + } return 0; } +int arch_klp_patch_func(struct klp_func *func) +{ + struct klp_func_node *func_node; + int ret; + + func_node = func->func_node; + list_add_rcu(&func->stack_node, &func_node->func_stack); + ret = do_patch((unsigned long)func->old_func, (unsigned long)func->new_func); + if (ret) + list_del_rcu(&func->stack_node); + return ret; +} + void arch_klp_unpatch_func(struct klp_func *func) { struct klp_func_node *func_node; struct klp_func *next_func; - unsigned long pc, new_addr; - u32 insn; -#ifdef CONFIG_ARM_MODULE_PLTS - int i; - u32 insns[LJMP_INSN_SIZE]; -#endif + unsigned long pc; func_node = func->func_node; pc = (unsigned long)func_node->old_func; - if (list_is_singular(&func_node->func_stack)) { -#ifdef CONFIG_ARM_MODULE_PLTS + list_del_rcu(&func->stack_node); + if (list_empty(&func_node->func_stack)) { + int i; + for (i = 0; i < LJMP_INSN_SIZE; i++) { - insns[i] = func_node->arch_data.old_insns[i]; - __patch_text(((u32 *)pc) + i, insns[i]); + __patch_text(((u32 *)pc) + i, func_node->arch_data.old_insns[i]); } -#else - insn = func_node->arch_data.old_insn; - __patch_text((void *)pc, insn); -#endif - list_del_rcu(&func->stack_node); } else { - list_del_rcu(&func->stack_node); next_func = list_first_or_null_rcu(&func_node->func_stack, struct klp_func, stack_node); - new_addr = (unsigned long)next_func->new_func; -#ifdef CONFIG_ARM_MODULE_PLTS - if (!offset_in_range(pc, new_addr, SZ_32M)) { - /* - * [0] LDR PC, [PC+8] - * [4] nop - * [8] new_addr_to_jump - */ - insns[0] = __opcode_to_mem_arm(0xe59ff000); - insns[1] = __opcode_to_mem_arm(0xe320f000); - insns[2] = new_addr; - - for (i = 0; i < LJMP_INSN_SIZE; i++) - __patch_text(((u32 *)pc) + i, insns[i]); - - } else { - insn = arm_gen_branch(pc, new_addr); - __patch_text((void *)pc, insn); - } -#else - insn = arm_gen_branch(pc, new_addr); - __patch_text((void *)pc, insn); -#endif + do_patch(pc, (unsigned long)next_func->new_func); } } diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c index 1cd09cf38c69bb18cc95ba0acb5de525b0d7c23d..bfe2bc380d38338d23c52342646039280b641647 100644 --- a/arch/arm/kernel/module.c +++ b/arch/arm/kernel/module.c @@ -369,46 +369,40 @@ int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs, #ifdef CONFIG_ARM_UNWIND const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; const Elf_Shdr *sechdrs_end = sechdrs + hdr->e_shnum; - struct mod_unwind_map maps[ARM_SEC_MAX]; - int i; + struct list_head *unwind_list = &mod->arch.unwind_list; - memset(maps, 0, sizeof(maps)); + INIT_LIST_HEAD(unwind_list); + mod->arch.init_table = NULL; for (s = sechdrs; s < sechdrs_end; s++) { const char *secname = secstrs + s->sh_name; + const char *txtname; + const Elf_Shdr *txt_sec; - if (!(s->sh_flags & SHF_ALLOC)) + if (!(s->sh_flags & SHF_ALLOC) || + s->sh_type != ELF_SECTION_UNWIND) continue; - if (strcmp(".ARM.exidx.init.text", secname) == 0) - maps[ARM_SEC_INIT].unw_sec = s; - else if (strcmp(".ARM.exidx", secname) == 0) - maps[ARM_SEC_CORE].unw_sec = s; - else if (strcmp(".ARM.exidx.exit.text", secname) == 0) - maps[ARM_SEC_EXIT].unw_sec = s; - else if (strcmp(".ARM.exidx.text.unlikely", secname) == 0) - maps[ARM_SEC_UNLIKELY].unw_sec = s; - else if (strcmp(".ARM.exidx.text.hot", secname) == 0) - maps[ARM_SEC_HOT].unw_sec = s; - else if (strcmp(".init.text", secname) == 0) - maps[ARM_SEC_INIT].txt_sec = s; - else if (strcmp(".text", secname) == 0) - maps[ARM_SEC_CORE].txt_sec = s; - else if (strcmp(".exit.text", secname) == 0) - maps[ARM_SEC_EXIT].txt_sec = s; - else if (strcmp(".text.unlikely", secname) == 0) - maps[ARM_SEC_UNLIKELY].txt_sec = s; - else if (strcmp(".text.hot", secname) == 0) - maps[ARM_SEC_HOT].txt_sec = s; - } + if (!strcmp(".ARM.exidx", secname)) + txtname = ".text"; + else + txtname = secname + strlen(".ARM.exidx"); + txt_sec = find_mod_section(hdr, sechdrs, txtname); + + if (txt_sec) { + struct unwind_table *table = + unwind_table_add(s->sh_addr, + s->sh_size, + txt_sec->sh_addr, + txt_sec->sh_size); - for (i = 0; i < ARM_SEC_MAX; i++) - if (maps[i].unw_sec && maps[i].txt_sec) - mod->arch.unwind[i] = - unwind_table_add(maps[i].unw_sec->sh_addr, - maps[i].unw_sec->sh_size, - maps[i].txt_sec->sh_addr, - maps[i].txt_sec->sh_size); + list_add(&table->mod_list, unwind_list); + + /* save init table for module_arch_freeing_init */ + if (strcmp(".ARM.exidx.init.text", secname) == 0) + mod->arch.init_table = table; + } + } #endif #ifdef CONFIG_ARM_PATCH_PHYS_VIRT s = find_mod_section(hdr, sechdrs, ".pv_table"); @@ -429,19 +423,27 @@ void module_arch_cleanup(struct module *mod) { #ifdef CONFIG_ARM_UNWIND - int i; + struct unwind_table *tmp; + struct unwind_table *n; - for (i = 0; i < ARM_SEC_MAX; i++) { - unwind_table_del(mod->arch.unwind[i]); - mod->arch.unwind[i] = NULL; + list_for_each_entry_safe(tmp, n, + &mod->arch.unwind_list, mod_list) { + list_del(&tmp->mod_list); + unwind_table_del(tmp); } + mod->arch.init_table = NULL; #endif } void __weak module_arch_freeing_init(struct module *mod) { #ifdef CONFIG_ARM_UNWIND - unwind_table_del(mod->arch.unwind[ARM_SEC_INIT]); - mod->arch.unwind[ARM_SEC_INIT] = NULL; + struct unwind_table *init = mod->arch.init_table; + + if (init) { + mod->arch.init_table = NULL; + list_del(&init->mod_list); + unwind_table_del(init); + } #endif } diff --git a/arch/arm/kernel/paravirt.c b/arch/arm/kernel/paravirt.c index 4cfed91fe256e5c94f6d6386f6673918ee593398..3c34f456e400056af38d1858fd183a7321971277 100644 --- a/arch/arm/kernel/paravirt.c +++ b/arch/arm/kernel/paravirt.c @@ -15,4 +15,4 @@ struct static_key paravirt_steal_enabled; struct static_key paravirt_steal_rq_enabled; struct paravirt_patch_template pv_ops; -EXPORT_SYMBOL_GPL(pv_ops); +EXPORT_SYMBOL(pv_ops); diff --git a/arch/arm/kernel/perf_callchain.c b/arch/arm/kernel/perf_callchain.c index 3b69a76d341e784075a1f8ef053f0d308177feee..1626dfc6f6ce61c6aa7b94973190a86e6887f74a 100644 --- a/arch/arm/kernel/perf_callchain.c +++ b/arch/arm/kernel/perf_callchain.c @@ -62,9 +62,10 @@ user_backtrace(struct frame_tail __user *tail, void perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) { + struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs(); struct frame_tail __user *tail; - if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { + if (guest_cbs && guest_cbs->is_in_guest()) { /* We don't support guest os callchain now */ return; } @@ -98,9 +99,10 @@ callchain_trace(struct stackframe *fr, void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) { + struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs(); struct stackframe fr; - if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { + if (guest_cbs && guest_cbs->is_in_guest()) { /* We don't support guest os callchain now */ return; } @@ -111,18 +113,21 @@ perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *re unsigned long perf_instruction_pointer(struct pt_regs *regs) { - if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) - return perf_guest_cbs->get_guest_ip(); + struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs(); + + if (guest_cbs && guest_cbs->is_in_guest()) + return guest_cbs->get_guest_ip(); return instruction_pointer(regs); } unsigned long perf_misc_flags(struct pt_regs *regs) { + struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs(); int misc = 0; - if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { - if (perf_guest_cbs->is_user_mode()) + if (guest_cbs && guest_cbs->is_in_guest()) { + if (guest_cbs->is_user_mode()) misc |= PERF_RECORD_MISC_GUEST_USER; else misc |= PERF_RECORD_MISC_GUEST_KERNEL; diff --git a/arch/arm/kernel/spectre.c b/arch/arm/kernel/spectre.c new file mode 100644 index 0000000000000000000000000000000000000000..0dcefc36fb7a08af113ef923af1fe9c826da662d --- /dev/null +++ b/arch/arm/kernel/spectre.c @@ -0,0 +1,71 @@ +// SPDX-License-Identifier: GPL-2.0-only +#include +#include +#include + +#include + +static bool _unprivileged_ebpf_enabled(void) +{ +#ifdef CONFIG_BPF_SYSCALL + return !sysctl_unprivileged_bpf_disabled; +#else + return false; +#endif +} + +ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, + char *buf) +{ + return sprintf(buf, "Mitigation: __user pointer sanitization\n"); +} + +static unsigned int spectre_v2_state; +static unsigned int spectre_v2_methods; + +void spectre_v2_update_state(unsigned int state, unsigned int method) +{ + if (state > spectre_v2_state) + spectre_v2_state = state; + spectre_v2_methods |= method; +} + +ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, + char *buf) +{ + const char *method; + + if (spectre_v2_state == SPECTRE_UNAFFECTED) + return sprintf(buf, "%s\n", "Not affected"); + + if (spectre_v2_state != SPECTRE_MITIGATED) + return sprintf(buf, "%s\n", "Vulnerable"); + + if (_unprivileged_ebpf_enabled()) + return sprintf(buf, "Vulnerable: Unprivileged eBPF enabled\n"); + + switch (spectre_v2_methods) { + case SPECTRE_V2_METHOD_BPIALL: + method = "Branch predictor hardening"; + break; + + case SPECTRE_V2_METHOD_ICIALLU: + method = "I-cache invalidation"; + break; + + case SPECTRE_V2_METHOD_SMC: + case SPECTRE_V2_METHOD_HVC: + method = "Firmware call"; + break; + + case SPECTRE_V2_METHOD_LOOP8: + method = "History overwrite"; + break; + + default: + method = "Multiple mitigations"; + break; + } + + return sprintf(buf, "Mitigation: %s\n", method); +} diff --git a/arch/arm/kernel/stacktrace.c b/arch/arm/kernel/stacktrace.c index db798eac743159ce288640970cdc09616868a82a..82477499982591a09eb26c3890b0277570f825f5 100644 --- a/arch/arm/kernel/stacktrace.c +++ b/arch/arm/kernel/stacktrace.c @@ -53,17 +53,17 @@ int notrace unwind_frame(struct stackframe *frame) return -EINVAL; frame->sp = frame->fp; - frame->fp = *(unsigned long *)(fp); - frame->pc = *(unsigned long *)(fp + 4); + frame->fp = READ_ONCE_NOCHECK(*(unsigned long *)(fp)); + frame->pc = READ_ONCE_NOCHECK(*(unsigned long *)(fp + 4)); #else /* check current frame pointer is within bounds */ if (fp < low + 12 || fp > high - 4) return -EINVAL; /* restore the registers from the stack frame */ - frame->fp = *(unsigned long *)(fp - 12); - frame->sp = *(unsigned long *)(fp - 8); - frame->pc = *(unsigned long *)(fp - 4); + frame->fp = READ_ONCE_NOCHECK(*(unsigned long *)(fp - 12)); + frame->sp = READ_ONCE_NOCHECK(*(unsigned long *)(fp - 8)); + frame->pc = READ_ONCE_NOCHECK(*(unsigned long *)(fp - 4)); #endif return 0; diff --git a/arch/arm/kernel/swp_emulate.c b/arch/arm/kernel/swp_emulate.c index f8023061075348ce79bb40e47186ba0da30efa01..2d803839aa0676dc2a4593d7870ba6c63446acc8 100644 --- a/arch/arm/kernel/swp_emulate.c +++ b/arch/arm/kernel/swp_emulate.c @@ -193,7 +193,7 @@ static int swp_handler(struct pt_regs *regs, unsigned int instr) destreg, EXTRACT_REG_NUM(instr, RT2_OFFSET), data); /* Check access in reasonable access range for both SWP and SWPB */ - if (!access_ok((address & ~3), 4)) { + if (!access_ok((void __user *)(address & ~3), 4)) { pr_debug("SWP{B} emulation: access to %p not allowed!\n", (void *)address); res = -EFAULT; diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index 17d5a785df28b10ee5714246d3ed342192c25e75..a531afad87fdb984fc8444e9a9e971c7100dff73 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -30,6 +30,7 @@ #include #include #include +#include #include #include #include @@ -588,7 +589,7 @@ do_cache_op(unsigned long start, unsigned long end, int flags) if (end < start || flags) return -EINVAL; - if (!access_ok(start, end - start)) + if (!access_ok((void __user *)start, end - start)) return -EFAULT; return __do_cache_op(start, end); @@ -806,10 +807,59 @@ static inline void __init kuser_init(void *vectors) } #endif +#ifndef CONFIG_CPU_V7M +static void copy_from_lma(void *vma, void *lma_start, void *lma_end) +{ + memcpy(vma, lma_start, lma_end - lma_start); +} + +static void flush_vectors(void *vma, size_t offset, size_t size) +{ + unsigned long start = (unsigned long)vma + offset; + unsigned long end = start + size; + + flush_icache_range(start, end); +} + +#ifdef CONFIG_HARDEN_BRANCH_HISTORY +int spectre_bhb_update_vectors(unsigned int method) +{ + extern char __vectors_bhb_bpiall_start[], __vectors_bhb_bpiall_end[]; + extern char __vectors_bhb_loop8_start[], __vectors_bhb_loop8_end[]; + void *vec_start, *vec_end; + + if (system_state > SYSTEM_SCHEDULING) { + pr_err("CPU%u: Spectre BHB workaround too late - system vulnerable\n", + smp_processor_id()); + return SPECTRE_VULNERABLE; + } + + switch (method) { + case SPECTRE_V2_METHOD_LOOP8: + vec_start = __vectors_bhb_loop8_start; + vec_end = __vectors_bhb_loop8_end; + break; + + case SPECTRE_V2_METHOD_BPIALL: + vec_start = __vectors_bhb_bpiall_start; + vec_end = __vectors_bhb_bpiall_end; + break; + + default: + pr_err("CPU%u: unknown Spectre BHB state %d\n", + smp_processor_id(), method); + return SPECTRE_VULNERABLE; + } + + copy_from_lma(vectors_page, vec_start, vec_end); + flush_vectors(vectors_page, 0, vec_end - vec_start); + + return SPECTRE_MITIGATED; +} +#endif + void __init early_trap_init(void *vectors_base) { -#ifndef CONFIG_CPU_V7M - unsigned long vectors = (unsigned long)vectors_base; extern char __stubs_start[], __stubs_end[]; extern char __vectors_start[], __vectors_end[]; unsigned i; @@ -830,17 +880,20 @@ void __init early_trap_init(void *vectors_base) * into the vector page, mapped at 0xffff0000, and ensure these * are visible to the instruction stream. */ - memcpy((void *)vectors, __vectors_start, __vectors_end - __vectors_start); - memcpy((void *)vectors + 0x1000, __stubs_start, __stubs_end - __stubs_start); + copy_from_lma(vectors_base, __vectors_start, __vectors_end); + copy_from_lma(vectors_base + 0x1000, __stubs_start, __stubs_end); kuser_init(vectors_base); - flush_icache_range(vectors, vectors + PAGE_SIZE * 2); + flush_vectors(vectors_base, 0, PAGE_SIZE * 2); +} #else /* ifndef CONFIG_CPU_V7M */ +void __init early_trap_init(void *vectors_base) +{ /* * on V7-M there is no need to copy the vector table to a dedicated * memory area. The address is configurable and so a table in the kernel * image can be used. */ -#endif } +#endif diff --git a/arch/arm/mach-davinci/board-da850-evm.c b/arch/arm/mach-davinci/board-da850-evm.c index 428012687a802a9f2cbeeb9e24f950e5bd5f960f..7f7f6bae21c2d7dd0ffb8f17e58b9fb5ea25cb47 100644 --- a/arch/arm/mach-davinci/board-da850-evm.c +++ b/arch/arm/mach-davinci/board-da850-evm.c @@ -1101,11 +1101,13 @@ static int __init da850_evm_config_emac(void) int ret; u32 val; struct davinci_soc_info *soc_info = &davinci_soc_info; - u8 rmii_en = soc_info->emac_pdata->rmii_en; + u8 rmii_en; if (!machine_is_davinci_da850_evm()) return 0; + rmii_en = soc_info->emac_pdata->rmii_en; + cfg_chip3_base = DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP3_REG); val = __raw_readl(cfg_chip3_base); diff --git a/arch/arm/mach-iop32x/include/mach/entry-macro.S b/arch/arm/mach-iop32x/include/mach/entry-macro.S index 8e6766d4621eb7c6bf53afbd575f2eb5ec6f056f..341e5d9a6616d3287a87dc45ff8fb711566093c5 100644 --- a/arch/arm/mach-iop32x/include/mach/entry-macro.S +++ b/arch/arm/mach-iop32x/include/mach/entry-macro.S @@ -20,7 +20,7 @@ mrc p6, 0, \irqstat, c8, c0, 0 @ Read IINTSRC cmp \irqstat, #0 clzne \irqnr, \irqstat - rsbne \irqnr, \irqnr, #31 + rsbne \irqnr, \irqnr, #32 .endm .macro arch_ret_to_user, tmp1, tmp2 diff --git a/arch/arm/mach-iop32x/include/mach/irqs.h b/arch/arm/mach-iop32x/include/mach/irqs.h index c4e78df428e860e5b2b13b4bccd96eab8ea44ef2..e09ae5f48aec5c558cbf2582480b6f41ad58ff18 100644 --- a/arch/arm/mach-iop32x/include/mach/irqs.h +++ b/arch/arm/mach-iop32x/include/mach/irqs.h @@ -9,6 +9,6 @@ #ifndef __IRQS_H #define __IRQS_H -#define NR_IRQS 32 +#define NR_IRQS 33 #endif diff --git a/arch/arm/mach-iop32x/irq.c b/arch/arm/mach-iop32x/irq.c index 2d48bf1398c10d26c41776b9dfa9d1d6bc505f29..d1e8824cbd824a4620a045328e4b4025318b096f 100644 --- a/arch/arm/mach-iop32x/irq.c +++ b/arch/arm/mach-iop32x/irq.c @@ -32,14 +32,14 @@ static void intstr_write(u32 val) static void iop32x_irq_mask(struct irq_data *d) { - iop32x_mask &= ~(1 << d->irq); + iop32x_mask &= ~(1 << (d->irq - 1)); intctl_write(iop32x_mask); } static void iop32x_irq_unmask(struct irq_data *d) { - iop32x_mask |= 1 << d->irq; + iop32x_mask |= 1 << (d->irq - 1); intctl_write(iop32x_mask); } @@ -65,7 +65,7 @@ void __init iop32x_init_irq(void) machine_is_em7210()) *IOP3XX_PCIIRSR = 0x0f; - for (i = 0; i < NR_IRQS; i++) { + for (i = 1; i < NR_IRQS; i++) { irq_set_chip_and_handler(i, &ext_chip, handle_level_irq); irq_clear_status_flags(i, IRQ_NOREQUEST | IRQ_NOPROBE); } diff --git a/arch/arm/mach-iop32x/irqs.h b/arch/arm/mach-iop32x/irqs.h index 69858e4e905d13d37beb484f80459bf3c76cfe6a..e1dfc8b4e7d7e3e503a5f57ea49448b4f91d0189 100644 --- a/arch/arm/mach-iop32x/irqs.h +++ b/arch/arm/mach-iop32x/irqs.h @@ -7,36 +7,40 @@ #ifndef __IOP32X_IRQS_H #define __IOP32X_IRQS_H +/* Interrupts in Linux start at 1, hardware starts at 0 */ + +#define IOP_IRQ(x) ((x) + 1) + /* * IOP80321 chipset interrupts */ -#define IRQ_IOP32X_DMA0_EOT 0 -#define IRQ_IOP32X_DMA0_EOC 1 -#define IRQ_IOP32X_DMA1_EOT 2 -#define IRQ_IOP32X_DMA1_EOC 3 -#define IRQ_IOP32X_AA_EOT 6 -#define IRQ_IOP32X_AA_EOC 7 -#define IRQ_IOP32X_CORE_PMON 8 -#define IRQ_IOP32X_TIMER0 9 -#define IRQ_IOP32X_TIMER1 10 -#define IRQ_IOP32X_I2C_0 11 -#define IRQ_IOP32X_I2C_1 12 -#define IRQ_IOP32X_MESSAGING 13 -#define IRQ_IOP32X_ATU_BIST 14 -#define IRQ_IOP32X_PERFMON 15 -#define IRQ_IOP32X_CORE_PMU 16 -#define IRQ_IOP32X_BIU_ERR 17 -#define IRQ_IOP32X_ATU_ERR 18 -#define IRQ_IOP32X_MCU_ERR 19 -#define IRQ_IOP32X_DMA0_ERR 20 -#define IRQ_IOP32X_DMA1_ERR 21 -#define IRQ_IOP32X_AA_ERR 23 -#define IRQ_IOP32X_MSG_ERR 24 -#define IRQ_IOP32X_SSP 25 -#define IRQ_IOP32X_XINT0 27 -#define IRQ_IOP32X_XINT1 28 -#define IRQ_IOP32X_XINT2 29 -#define IRQ_IOP32X_XINT3 30 -#define IRQ_IOP32X_HPI 31 +#define IRQ_IOP32X_DMA0_EOT IOP_IRQ(0) +#define IRQ_IOP32X_DMA0_EOC IOP_IRQ(1) +#define IRQ_IOP32X_DMA1_EOT IOP_IRQ(2) +#define IRQ_IOP32X_DMA1_EOC IOP_IRQ(3) +#define IRQ_IOP32X_AA_EOT IOP_IRQ(6) +#define IRQ_IOP32X_AA_EOC IOP_IRQ(7) +#define IRQ_IOP32X_CORE_PMON IOP_IRQ(8) +#define IRQ_IOP32X_TIMER0 IOP_IRQ(9) +#define IRQ_IOP32X_TIMER1 IOP_IRQ(10) +#define IRQ_IOP32X_I2C_0 IOP_IRQ(11) +#define IRQ_IOP32X_I2C_1 IOP_IRQ(12) +#define IRQ_IOP32X_MESSAGING IOP_IRQ(13) +#define IRQ_IOP32X_ATU_BIST IOP_IRQ(14) +#define IRQ_IOP32X_PERFMON IOP_IRQ(15) +#define IRQ_IOP32X_CORE_PMU IOP_IRQ(16) +#define IRQ_IOP32X_BIU_ERR IOP_IRQ(17) +#define IRQ_IOP32X_ATU_ERR IOP_IRQ(18) +#define IRQ_IOP32X_MCU_ERR IOP_IRQ(19) +#define IRQ_IOP32X_DMA0_ERR IOP_IRQ(20) +#define IRQ_IOP32X_DMA1_ERR IOP_IRQ(21) +#define IRQ_IOP32X_AA_ERR IOP_IRQ(23) +#define IRQ_IOP32X_MSG_ERR IOP_IRQ(24) +#define IRQ_IOP32X_SSP IOP_IRQ(25) +#define IRQ_IOP32X_XINT0 IOP_IRQ(27) +#define IRQ_IOP32X_XINT1 IOP_IRQ(28) +#define IRQ_IOP32X_XINT2 IOP_IRQ(29) +#define IRQ_IOP32X_XINT3 IOP_IRQ(30) +#define IRQ_IOP32X_HPI IOP_IRQ(31) #endif diff --git a/arch/arm/mach-mmp/sram.c b/arch/arm/mach-mmp/sram.c index 6794e2db1ad5f5ae1f0ea7026b73bf5fb7894b38..ecc46c31004f660961a450027a6d1bfd8e86a053 100644 --- a/arch/arm/mach-mmp/sram.c +++ b/arch/arm/mach-mmp/sram.c @@ -72,6 +72,8 @@ static int sram_probe(struct platform_device *pdev) if (!info) return -ENOMEM; + platform_set_drvdata(pdev, info); + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (res == NULL) { dev_err(&pdev->dev, "no memory resource defined\n"); @@ -107,8 +109,6 @@ static int sram_probe(struct platform_device *pdev) list_add(&info->node, &sram_bank_list); mutex_unlock(&sram_lock); - platform_set_drvdata(pdev, info); - dev_info(&pdev->dev, "initialized\n"); return 0; @@ -127,17 +127,19 @@ static int sram_remove(struct platform_device *pdev) struct sram_bank_info *info; info = platform_get_drvdata(pdev); - if (info == NULL) - return -ENODEV; - mutex_lock(&sram_lock); - list_del(&info->node); - mutex_unlock(&sram_lock); + if (info->sram_size) { + mutex_lock(&sram_lock); + list_del(&info->node); + mutex_unlock(&sram_lock); + + gen_pool_destroy(info->gpool); + iounmap(info->sram_virt); + kfree(info->pool_name); + } - gen_pool_destroy(info->gpool); - iounmap(info->sram_virt); - kfree(info->pool_name); kfree(info); + return 0; } diff --git a/arch/arm/mach-mstar/Kconfig b/arch/arm/mach-mstar/Kconfig index 576d1ab293c8734c587d2dd2d918c21358fbf874..30560fdf87ed224c29d106f930bc12571745098e 100644 --- a/arch/arm/mach-mstar/Kconfig +++ b/arch/arm/mach-mstar/Kconfig @@ -3,6 +3,7 @@ menuconfig ARCH_MSTARV7 depends on ARCH_MULTI_V7 select ARM_GIC select ARM_HEAVY_MB + select HAVE_ARM_ARCH_TIMER select MST_IRQ help Support for newer MStar/Sigmastar SoC families that are diff --git a/arch/arm/mach-omap2/display.c b/arch/arm/mach-omap2/display.c index 2000fca6bd4e6ec585930729381e552e8f13c251..6098666e928d0678e8fa2cba26d51e039515ea2e 100644 --- a/arch/arm/mach-omap2/display.c +++ b/arch/arm/mach-omap2/display.c @@ -263,9 +263,9 @@ static int __init omapdss_init_of(void) } r = of_platform_populate(node, NULL, NULL, &pdev->dev); + put_device(&pdev->dev); if (r) { pr_err("Unable to populate DSS submodule devices\n"); - put_device(&pdev->dev); return r; } diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c index 9443f129859b23653cc2caf2abec4b02e43c0840..1fd67abca055b26dd489fc736769ffdb946dcf50 100644 --- a/arch/arm/mach-omap2/omap_hwmod.c +++ b/arch/arm/mach-omap2/omap_hwmod.c @@ -749,8 +749,10 @@ static int __init _init_clkctrl_providers(void) for_each_matching_node(np, ti_clkctrl_match_table) { ret = _setup_clkctrl_provider(np); - if (ret) + if (ret) { + of_node_put(np); break; + } } return ret; diff --git a/arch/arm/mach-s3c/mach-jive.c b/arch/arm/mach-s3c/mach-jive.c index 2a29c3eca559eb6bf1c2448d9a5cfd12d522e216..ae6a1c9ebf78cb01ee4faefa4298f0ed51dd6b78 100644 --- a/arch/arm/mach-s3c/mach-jive.c +++ b/arch/arm/mach-s3c/mach-jive.c @@ -236,11 +236,11 @@ static int __init jive_mtdset(char *options) unsigned long set; if (options == NULL || options[0] == '\0') - return 0; + return 1; if (kstrtoul(options, 10, &set)) { printk(KERN_ERR "failed to parse mtdset=%s\n", options); - return 0; + return 1; } switch (set) { @@ -255,7 +255,7 @@ static int __init jive_mtdset(char *options) "using default.", set); } - return 0; + return 1; } /* parse the mtdset= option given to the kernel command line */ diff --git a/arch/arm/mach-shmobile/regulator-quirk-rcar-gen2.c b/arch/arm/mach-shmobile/regulator-quirk-rcar-gen2.c index ee949255ced3f01ad4256b2b7bea3ba669006309..09ef73b99dd86a851a98bc7c75c433821c5b5bf1 100644 --- a/arch/arm/mach-shmobile/regulator-quirk-rcar-gen2.c +++ b/arch/arm/mach-shmobile/regulator-quirk-rcar-gen2.c @@ -154,8 +154,10 @@ static int __init rcar_gen2_regulator_quirk(void) return -ENODEV; for_each_matching_node_and_match(np, rcar_gen2_quirk_match, &id) { - if (!of_device_is_available(np)) + if (!of_device_is_available(np)) { + of_node_put(np); break; + } ret = of_property_read_u32(np, "reg", &addr); if (ret) /* Skip invalid entry and continue */ @@ -164,6 +166,7 @@ static int __init rcar_gen2_regulator_quirk(void) quirk = kzalloc(sizeof(*quirk), GFP_KERNEL); if (!quirk) { ret = -ENOMEM; + of_node_put(np); goto err_mem; } diff --git a/arch/arm/mach-socfpga/Kconfig b/arch/arm/mach-socfpga/Kconfig index c3bb68d57cea2e5143a61073744050c23be44233..b62ae4dafa2eb676a11e5b6ac9c01a4833dc2966 100644 --- a/arch/arm/mach-socfpga/Kconfig +++ b/arch/arm/mach-socfpga/Kconfig @@ -2,6 +2,7 @@ menuconfig ARCH_SOCFPGA bool "Altera SOCFPGA family" depends on ARCH_MULTI_V7 + select ARCH_HAS_RESET_CONTROLLER select ARCH_SUPPORTS_BIG_ENDIAN select ARM_AMBA select ARM_GIC @@ -18,6 +19,7 @@ menuconfig ARCH_SOCFPGA select PL310_ERRATA_727915 select PL310_ERRATA_753970 if PL310 select PL310_ERRATA_769419 + select RESET_CONTROLLER if ARCH_SOCFPGA config SOCFPGA_SUSPEND diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 423a97dd2f57c2fb940cb72840727d2370f0ae9b..c6bf34a33849c77a5c1fa4af189d87ac1b1c7156 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -833,6 +833,7 @@ config CPU_BPREDICT_DISABLE config CPU_SPECTRE bool + select GENERIC_CPU_VULNERABILITIES config HARDEN_BRANCH_PREDICTOR bool "Harden the branch predictor against aliasing attacks" if EXPERT @@ -853,6 +854,16 @@ config HARDEN_BRANCH_PREDICTOR If unsure, say Y. +config HARDEN_BRANCH_HISTORY + bool "Harden Spectre style attacks against branch history" if EXPERT + depends on CPU_SPECTRE + default y + help + Speculation attacks against some high-performance processors can + make use of branch history to influence future speculation. When + taking an exception, a sequence of branches overwrites the branch + history, or branch history is invalidated. + config TLS_REG_EMUL bool select NEED_KUSER_HELPERS diff --git a/arch/arm/mm/kasan_init.c b/arch/arm/mm/kasan_init.c index 9c348042a7244cf3b4bc6314b0c0b6117d743c21..4b1619584b23c17ae7a446e53d2e8e615659cc7b 100644 --- a/arch/arm/mm/kasan_init.c +++ b/arch/arm/mm/kasan_init.c @@ -226,7 +226,7 @@ void __init kasan_init(void) BUILD_BUG_ON(pgd_index(KASAN_SHADOW_START) != pgd_index(KASAN_SHADOW_END)); memcpy(tmp_pmd_table, - pgd_page_vaddr(*pgd_offset_k(KASAN_SHADOW_START)), + (void*)pgd_page_vaddr(*pgd_offset_k(KASAN_SHADOW_START)), sizeof(tmp_pmd_table)); set_pgd(&tmp_pgd_table[pgd_index(KASAN_SHADOW_START)], __pgd(__pa(tmp_pmd_table) | PMD_TYPE_TABLE | L_PGD_SWAPPER)); diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index c9e32610f05ee79b5750bc6fd3407441aa424db9..aa4490ccb8f43a2ebf7be4ae79c5538a2326e2ce 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -213,12 +213,14 @@ early_param("ecc", early_ecc); static int __init early_cachepolicy(char *p) { pr_warn("cachepolicy kernel parameter not supported without cp15\n"); + return 0; } early_param("cachepolicy", early_cachepolicy); static int __init noalign_setup(char *__unused) { pr_warn("noalign kernel parameter not supported without cp15\n"); + return 1; } __setup("noalign", noalign_setup); diff --git a/arch/arm/mm/proc-v7-bugs.c b/arch/arm/mm/proc-v7-bugs.c index d7750cddc334d27576f5cbabe46586600cc1209e..6ecefc6962e5291b94141966d6a46e53f613b149 100644 --- a/arch/arm/mm/proc-v7-bugs.c +++ b/arch/arm/mm/proc-v7-bugs.c @@ -6,6 +6,7 @@ #include #include #include +#include #include /* @@ -21,6 +22,32 @@ static int __init nospectre_v2_setup(char *str) } early_param("nospectre_v2", nospectre_v2_setup); +#ifdef CONFIG_ARM_PSCI +static int __maybe_unused spectre_v2_get_cpu_fw_mitigation_state(void) +{ + struct arm_smccc_res res; + + arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, + ARM_SMCCC_ARCH_WORKAROUND_1, &res); + + switch ((int)res.a0) { + case SMCCC_RET_SUCCESS: + return SPECTRE_MITIGATED; + + case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED: + return SPECTRE_UNAFFECTED; + + default: + return SPECTRE_VULNERABLE; + } +} +#else +static int __maybe_unused spectre_v2_get_cpu_fw_mitigation_state(void) +{ + return SPECTRE_VULNERABLE; +} +#endif + #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR DEFINE_PER_CPU(harden_branch_predictor_fn_t, harden_branch_predictor_fn); @@ -49,19 +76,67 @@ static void __maybe_unused call_hvc_arch_workaround_1(void) arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL); } -static void cpu_v7_spectre_init(void) +static unsigned int spectre_v2_install_workaround(unsigned int method) { const char *spectre_v2_method = NULL; int cpu = smp_processor_id(); + if (per_cpu(harden_branch_predictor_fn, cpu)) + return SPECTRE_MITIGATED; + + switch (method) { + case SPECTRE_V2_METHOD_BPIALL: + per_cpu(harden_branch_predictor_fn, cpu) = + harden_branch_predictor_bpiall; + spectre_v2_method = "BPIALL"; + break; + + case SPECTRE_V2_METHOD_ICIALLU: + per_cpu(harden_branch_predictor_fn, cpu) = + harden_branch_predictor_iciallu; + spectre_v2_method = "ICIALLU"; + break; + + case SPECTRE_V2_METHOD_HVC: + per_cpu(harden_branch_predictor_fn, cpu) = + call_hvc_arch_workaround_1; + cpu_do_switch_mm = cpu_v7_hvc_switch_mm; + spectre_v2_method = "hypervisor"; + break; + + case SPECTRE_V2_METHOD_SMC: + per_cpu(harden_branch_predictor_fn, cpu) = + call_smc_arch_workaround_1; + cpu_do_switch_mm = cpu_v7_smc_switch_mm; + spectre_v2_method = "firmware"; + break; + } + + if (spectre_v2_method) + pr_info("CPU%u: Spectre v2: using %s workaround\n", + smp_processor_id(), spectre_v2_method); + + return SPECTRE_MITIGATED; +} +#else +static unsigned int spectre_v2_install_workaround(unsigned int method) +{ + pr_info("CPU%u: Spectre V2: workarounds disabled by configuration\n", + smp_processor_id()); + + return SPECTRE_VULNERABLE; +} +#endif + +static void cpu_v7_spectre_v2_init(void) +{ + unsigned int state, method = 0; + if (nospectre_v2) { pr_info_once("Spectre v2: hardening is disabled\n"); return; } - if (per_cpu(harden_branch_predictor_fn, cpu)) - return; - switch (read_cpuid_part()) { case ARM_CPU_PART_CORTEX_A8: case ARM_CPU_PART_CORTEX_A9: @@ -69,69 +144,133 @@ static void cpu_v7_spectre_init(void) case ARM_CPU_PART_CORTEX_A17: case ARM_CPU_PART_CORTEX_A73: case ARM_CPU_PART_CORTEX_A75: - per_cpu(harden_branch_predictor_fn, cpu) = - harden_branch_predictor_bpiall; - spectre_v2_method = "BPIALL"; + state = SPECTRE_MITIGATED; + method = SPECTRE_V2_METHOD_BPIALL; break; case ARM_CPU_PART_CORTEX_A15: case ARM_CPU_PART_BRAHMA_B15: - per_cpu(harden_branch_predictor_fn, cpu) = - harden_branch_predictor_iciallu; - spectre_v2_method = "ICIALLU"; + state = SPECTRE_MITIGATED; + method = SPECTRE_V2_METHOD_ICIALLU; break; -#ifdef CONFIG_ARM_PSCI case ARM_CPU_PART_BRAHMA_B53: /* Requires no workaround */ + state = SPECTRE_UNAFFECTED; break; + default: /* Other ARM CPUs require no workaround */ - if (read_cpuid_implementor() == ARM_CPU_IMP_ARM) + if (read_cpuid_implementor() == ARM_CPU_IMP_ARM) { + state = SPECTRE_UNAFFECTED; break; + } + fallthrough; - /* Cortex A57/A72 require firmware workaround */ - case ARM_CPU_PART_CORTEX_A57: - case ARM_CPU_PART_CORTEX_A72: { - struct arm_smccc_res res; - arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, - ARM_SMCCC_ARCH_WORKAROUND_1, &res); - if ((int)res.a0 != 0) - return; + /* Cortex A57/A72 require firmware workaround */ + case ARM_CPU_PART_CORTEX_A57: + case ARM_CPU_PART_CORTEX_A72: + state = spectre_v2_get_cpu_fw_mitigation_state(); + if (state != SPECTRE_MITIGATED) + break; switch (arm_smccc_1_1_get_conduit()) { case SMCCC_CONDUIT_HVC: - per_cpu(harden_branch_predictor_fn, cpu) = - call_hvc_arch_workaround_1; - cpu_do_switch_mm = cpu_v7_hvc_switch_mm; - spectre_v2_method = "hypervisor"; + method = SPECTRE_V2_METHOD_HVC; break; case SMCCC_CONDUIT_SMC: - per_cpu(harden_branch_predictor_fn, cpu) = - call_smc_arch_workaround_1; - cpu_do_switch_mm = cpu_v7_smc_switch_mm; - spectre_v2_method = "firmware"; + method = SPECTRE_V2_METHOD_SMC; break; default: + state = SPECTRE_VULNERABLE; break; } } -#endif + + if (state == SPECTRE_MITIGATED) + state = spectre_v2_install_workaround(method); + + spectre_v2_update_state(state, method); +} + +#ifdef CONFIG_HARDEN_BRANCH_HISTORY +static int spectre_bhb_method; + +static const char *spectre_bhb_method_name(int method) +{ + switch (method) { + case SPECTRE_V2_METHOD_LOOP8: + return "loop"; + + case SPECTRE_V2_METHOD_BPIALL: + return "BPIALL"; + + default: + return "unknown"; } +} - if (spectre_v2_method) - pr_info("CPU%u: Spectre v2: using %s workaround\n", - smp_processor_id(), spectre_v2_method); +static int spectre_bhb_install_workaround(int method) +{ + if (spectre_bhb_method != method) { + if (spectre_bhb_method) { + pr_err("CPU%u: Spectre BHB: method disagreement, system vulnerable\n", + smp_processor_id()); + + return SPECTRE_VULNERABLE; + } + + if (spectre_bhb_update_vectors(method) == SPECTRE_VULNERABLE) + return SPECTRE_VULNERABLE; + + spectre_bhb_method = method; + } + + pr_info("CPU%u: Spectre BHB: using %s workaround\n", + smp_processor_id(), spectre_bhb_method_name(method)); + + return SPECTRE_MITIGATED; } #else -static void cpu_v7_spectre_init(void) +static int spectre_bhb_install_workaround(int method) { + return SPECTRE_VULNERABLE; } #endif +static void cpu_v7_spectre_bhb_init(void) +{ + unsigned int state, method = 0; + + switch (read_cpuid_part()) { + case ARM_CPU_PART_CORTEX_A15: + case ARM_CPU_PART_BRAHMA_B15: + case ARM_CPU_PART_CORTEX_A57: + case ARM_CPU_PART_CORTEX_A72: + state = SPECTRE_MITIGATED; + method = SPECTRE_V2_METHOD_LOOP8; + break; + + case ARM_CPU_PART_CORTEX_A73: + case ARM_CPU_PART_CORTEX_A75: + state = SPECTRE_MITIGATED; + method = SPECTRE_V2_METHOD_BPIALL; + break; + + default: + state = SPECTRE_UNAFFECTED; + break; + } + + if (state == SPECTRE_MITIGATED) + state = spectre_bhb_install_workaround(method); + + spectre_v2_update_state(state, method); +} + static __maybe_unused bool cpu_v7_check_auxcr_set(bool *warned, u32 mask, const char *msg) { @@ -160,16 +299,17 @@ static bool check_spectre_auxcr(bool *warned, u32 bit) void cpu_v7_ca8_ibe(void) { if (check_spectre_auxcr(this_cpu_ptr(&spectre_warned), BIT(6))) - cpu_v7_spectre_init(); + cpu_v7_spectre_v2_init(); } void cpu_v7_ca15_ibe(void) { if (check_spectre_auxcr(this_cpu_ptr(&spectre_warned), BIT(0))) - cpu_v7_spectre_init(); + cpu_v7_spectre_v2_init(); } void cpu_v7_bugs_init(void) { - cpu_v7_spectre_init(); + cpu_v7_spectre_v2_init(); + cpu_v7_spectre_bhb_init(); } diff --git a/arch/arm/xen/p2m.c b/arch/arm/xen/p2m.c index acb464547a54f45d90a05437517ce14e5448f21a..4a1991a103ea0c336b6212a7565da52c0cadf8ad 100644 --- a/arch/arm/xen/p2m.c +++ b/arch/arm/xen/p2m.c @@ -62,11 +62,12 @@ static int xen_add_phys_to_mach_entry(struct xen_p2m_entry *new) unsigned long __pfn_to_mfn(unsigned long pfn) { - struct rb_node *n = phys_to_mach.rb_node; + struct rb_node *n; struct xen_p2m_entry *entry; unsigned long irqflags; read_lock_irqsave(&p2m_lock, irqflags); + n = phys_to_mach.rb_node; while (n) { entry = rb_entry(n, struct xen_p2m_entry, rbnode_phys); if (entry->pfn <= pfn && @@ -153,10 +154,11 @@ bool __set_phys_to_machine_multi(unsigned long pfn, int rc; unsigned long irqflags; struct xen_p2m_entry *p2m_entry; - struct rb_node *n = phys_to_mach.rb_node; + struct rb_node *n; if (mfn == INVALID_P2M_ENTRY) { write_lock_irqsave(&p2m_lock, irqflags); + n = phys_to_mach.rb_node; while (n) { p2m_entry = rb_entry(n, struct xen_p2m_entry, rbnode_phys); if (p2m_entry->pfn <= pfn && diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index c0f6a275f798986171318c68be466d12bc2925ca..e253fdba1249cc196ecb2a1b0413fced0f86762a 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -138,6 +138,7 @@ config ARM64 select HAVE_ARCH_JUMP_LABEL select HAVE_ARCH_JUMP_LABEL_RELATIVE select HAVE_ARCH_KASAN if !(ARM64_16K_PAGES && ARM64_VA_BITS_48) + select HAVE_ARCH_KASAN_VMALLOC if HAVE_ARCH_KASAN select HAVE_ARCH_KASAN_SW_TAGS if HAVE_ARCH_KASAN select HAVE_ARCH_KFENCE select HAVE_ARCH_KGDB @@ -1155,6 +1156,15 @@ config ARCH_WANT_HUGE_PMD_SHARE config ARCH_HAS_CACHE_LINE_SIZE def_bool y +config ARCH_LLC_128_LINE_SIZE + bool "Force 128 bytes alignment for fitting LLC cacheline" + depends on ARM64 + default y + help + As specific machine's LLC cacheline size may be up to + 128 bytes, gaining performance improvement from fitting + 128 Bytes LLC cache aligned. + config ARCH_HAS_FILTER_PGPROT def_bool y @@ -1310,6 +1320,15 @@ config UNMAP_KERNEL_AT_EL0 If unsure, say Y. +config MITIGATE_SPECTRE_BRANCH_HISTORY + bool "Mitigate Spectre style attacks against branch history" if EXPERT + default y + help + Speculation attacks against some high-performance processors can + make use of branch history to influence future speculation. + When taking an exception from user-space, a sequence of branches + or a firmware call overwrites the branch history. + config RODATA_FULL_DEFAULT_ENABLED bool "Apply r/o permissions of VM areas also to their linear aliases" default y @@ -1625,6 +1644,15 @@ config ARM64_CNP at runtime, and does not affect PEs that do not implement this feature. +config ARM64_UCE_KERNEL_RECOVERY + bool "arm64 uce kernel recovery for special scenario" + depends on ACPI_APEI_SEA + help + With ARM v8.2 RAS Extension, SEA are usually triggered when memory + error are consumed. In some cases, if the error address is in a + user page there is a chance to recover. we can isolate this page + and killing process instead of die. + endmenu menu "ARMv8.3 architectural features" diff --git a/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi index 959b299344e54c7346b596fd990e66a68d69faf0..075153a4d49fc597a616467f3decf8135125c7db 100644 --- a/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi @@ -52,7 +52,7 @@ secure-monitor = <&sm>; }; - gpu_opp_table: gpu-opp-table { + gpu_opp_table: opp-table-gpu { compatible = "operating-points-v2"; opp-124999998 { @@ -101,6 +101,12 @@ no-map; }; + /* 32 MiB reserved for ARM Trusted Firmware (BL32) */ + secmon_reserved_bl32: secmon@5300000 { + reg = <0x0 0x05300000 0x0 0x2000000>; + no-map; + }; + linux,cma { compatible = "shared-dma-pool"; reusable; diff --git a/arch/arm64/boot/dts/amlogic/meson-g12a-sei510.dts b/arch/arm64/boot/dts/amlogic/meson-g12a-sei510.dts index 4d5b3e514b514be0034aa35a9c57dd5df8c7fca9..71f91e31c181887b6aa6abbd69365f8917d8439c 100644 --- a/arch/arm64/boot/dts/amlogic/meson-g12a-sei510.dts +++ b/arch/arm64/boot/dts/amlogic/meson-g12a-sei510.dts @@ -157,14 +157,6 @@ regulator-always-on; }; - reserved-memory { - /* TEE Reserved Memory */ - bl32_reserved: bl32@5000000 { - reg = <0x0 0x05300000 0x0 0x2000000>; - no-map; - }; - }; - sdio_pwrseq: sdio-pwrseq { compatible = "mmc-pwrseq-simple"; reset-gpios = <&gpio GPIOX_6 GPIO_ACTIVE_LOW>; diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-odroid-n2.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12b-odroid-n2.dtsi index 59b5f390887577d01afe9bda087e4dc6178bfa26..87e8e64ad5caee06df5c2a6a50338c74d8ae48ff 100644 --- a/arch/arm64/boot/dts/amlogic/meson-g12b-odroid-n2.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-g12b-odroid-n2.dtsi @@ -15,7 +15,7 @@ ethernet0 = ðmac; }; - dioo2133: audio-amplifier-0 { + dio2133: audio-amplifier-0 { compatible = "simple-audio-amplifier"; enable-gpios = <&gpio_ao GPIOAO_2 GPIO_ACTIVE_HIGH>; VCC-supply = <&vcc_5v>; @@ -215,7 +215,7 @@ audio-widgets = "Line", "Lineout"; audio-aux-devs = <&tdmout_b>, <&tdmout_c>, <&tdmin_a>, <&tdmin_b>, <&tdmin_c>, <&tdmin_lb>, - <&dioo2133>; + <&dio2133>; audio-routing = "TDMOUT_B IN 0", "FRDDR_A OUT 1", "TDMOUT_B IN 1", "FRDDR_B OUT 1", "TDMOUT_B IN 2", "FRDDR_C OUT 1", @@ -543,7 +543,7 @@ pinctrl-0 = <&nor_pins>; pinctrl-names = "default"; - mx25u64: spi-flash@0 { + mx25u64: flash@0 { #address-cells = <1>; #size-cells = <1>; compatible = "mxicy,mx25u6435f", "jedec,spi-nor"; diff --git a/arch/arm64/boot/dts/amlogic/meson-gx.dtsi b/arch/arm64/boot/dts/amlogic/meson-gx.dtsi index 0edd137151f89eb6ba63695d28abdfaf18cf6434..47cbb0a1eb183ca0c7993f9f69337a47f7306a81 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gx.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gx.dtsi @@ -43,6 +43,12 @@ no-map; }; + /* 32 MiB reserved for ARM Trusted Firmware (BL32) */ + secmon_reserved_bl32: secmon@5300000 { + reg = <0x0 0x05300000 0x0 0x2000000>; + no-map; + }; + linux,cma { compatible = "shared-dma-pool"; reusable; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek.dtsi index a350fee1264d7b1dfe2cf83d3a8a41bd16efebc3..a4d34398da358c054749e6599bb445dee01ab3e9 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek.dtsi @@ -6,6 +6,7 @@ */ #include "meson-gxbb.dtsi" +#include / { aliases { @@ -64,6 +65,7 @@ regulator-name = "VDDIO_AO18"; regulator-min-microvolt = <1800000>; regulator-max-microvolt = <1800000>; + regulator-always-on; }; vcc_3v3: regulator-vcc_3v3 { @@ -161,6 +163,7 @@ status = "okay"; pinctrl-0 = <&hdmi_hpd_pins>, <&hdmi_i2c_pins>; pinctrl-names = "default"; + hdmi-supply = <&vddio_ao18>; }; &hdmi_tx_tmds_port { diff --git a/arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts b/arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts index 5ab139a34c0186dd522b063000ef7f90e315a9c5..c21178e9c6064fc49a67219878ccfbac8004ee7e 100644 --- a/arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts +++ b/arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts @@ -203,14 +203,6 @@ regulator-always-on; }; - reserved-memory { - /* TEE Reserved Memory */ - bl32_reserved: bl32@5000000 { - reg = <0x0 0x05300000 0x0 0x2000000>; - no-map; - }; - }; - sdio_pwrseq: sdio-pwrseq { compatible = "mmc-pwrseq-simple"; reset-gpios = <&gpio GPIOX_6 GPIO_ACTIVE_LOW>; diff --git a/arch/arm64/boot/dts/broadcom/northstar2/ns2-svk.dts b/arch/arm64/boot/dts/broadcom/northstar2/ns2-svk.dts index ec19fbf928a142db6d9e853fee7e98f1e31d66b2..12a4b1c03390c0e2be02d6b2f2f921a1f6dc3d05 100644 --- a/arch/arm64/boot/dts/broadcom/northstar2/ns2-svk.dts +++ b/arch/arm64/boot/dts/broadcom/northstar2/ns2-svk.dts @@ -111,8 +111,8 @@ compatible = "silabs,si3226x"; reg = <0>; spi-max-frequency = <5000000>; - spi-cpha = <1>; - spi-cpol = <1>; + spi-cpha; + spi-cpol; pl022,hierarchy = <0>; pl022,interface = <0>; pl022,slave-tx-disable = <0>; @@ -135,8 +135,8 @@ at25,byte-len = <0x8000>; at25,addr-mode = <2>; at25,page-size = <64>; - spi-cpha = <1>; - spi-cpol = <1>; + spi-cpha; + spi-cpol; pl022,hierarchy = <0>; pl022,interface = <0>; pl022,slave-tx-disable = <0>; diff --git a/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi b/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi index 2cfeaf3b0a87685cc270b22f8e6bf862721e529f..8c218689fef70e745061ea92047179cfb53613a3 100644 --- a/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi +++ b/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi @@ -687,7 +687,7 @@ }; }; - sata: ahci@663f2000 { + sata: sata@663f2000 { compatible = "brcm,iproc-ahci", "generic-ahci"; reg = <0x663f2000 0x1000>; dma-coherent; diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a-qds.dts b/arch/arm64/boot/dts/freescale/fsl-ls1028a-qds.dts index 13cdc958ba3ea2182a60cfad0e29be68bb0fb567..71858c9376c25d87330e236b089c85a4c294e408 100644 --- a/arch/arm64/boot/dts/freescale/fsl-ls1028a-qds.dts +++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a-qds.dts @@ -261,11 +261,6 @@ vcc-supply = <&sb_3v3>; }; - rtc@51 { - compatible = "nxp,pcf2129"; - reg = <0x51>; - }; - eeprom@56 { compatible = "atmel,24c512"; reg = <0x56>; @@ -307,6 +302,15 @@ }; +&i2c1 { + status = "okay"; + + rtc@51 { + compatible = "nxp,pcf2129"; + reg = <0x51>; + }; +}; + &enetc_port1 { phy-handle = <&qds_phy1>; phy-connection-type = "rgmii-id"; diff --git a/arch/arm64/boot/dts/freescale/imx8mq.dtsi b/arch/arm64/boot/dts/freescale/imx8mq.dtsi index c86cf786f4061f0cc4df9b3660f79cb4b2851b4c..8d0d41973ff54288b91d0f8841129787e0acbc6c 100644 --- a/arch/arm64/boot/dts/freescale/imx8mq.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mq.dtsi @@ -524,7 +524,7 @@ assigned-clock-rates = <0>, <0>, <0>, <594000000>; status = "disabled"; - port@0 { + port { lcdif_mipi_dsi: endpoint { remote-endpoint = <&mipi_dsi_lcdif_in>; }; diff --git a/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi b/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi index 07c099b4ed5b565f3832c24d4240eef8a9414bf0..1e0c9415bfcd044507d9ced5e0ff4eeea2fea6f0 100644 --- a/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi +++ b/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi @@ -476,7 +476,7 @@ }; usb0: usb@ffb00000 { - compatible = "snps,dwc2"; + compatible = "intel,socfpga-agilex-hsotg", "snps,dwc2"; reg = <0xffb00000 0x40000>; interrupts = <0 93 4>; phys = <&usbphy0>; @@ -489,7 +489,7 @@ }; usb1: usb@ffb40000 { - compatible = "snps,dwc2"; + compatible = "intel,socfpga-agilex-hsotg", "snps,dwc2"; reg = <0xffb40000 0x40000>; interrupts = <0 94 4>; phys = <&usbphy0>; diff --git a/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts b/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts index 2e437f20da39b72d638e9da6d3a7eff9e6540dc3..00e5dbf4b82363094e30a387f082ae6b739c440f 100644 --- a/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts +++ b/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts @@ -18,6 +18,7 @@ aliases { spi0 = &spi0; + ethernet0 = ð0; ethernet1 = ð1; mmc0 = &sdhci0; mmc1 = &sdhci1; @@ -137,7 +138,9 @@ /* * U-Boot port for Turris Mox has a bug which always expects that "ranges" DT property * contains exactly 2 ranges with 3 (child) address cells, 2 (parent) address cells and - * 2 size cells and also expects that the second range starts at 16 MB offset. If these + * 2 size cells and also expects that the second range starts at 16 MB offset. Also it + * expects that first range uses same address for PCI (child) and CPU (parent) cells (so + * no remapping) and that this address is the lowest from all specified ranges. If these * conditions are not met then U-Boot crashes during loading kernel DTB file. PCIe address * space is 128 MB long, so the best split between MEM and IO is to use fixed 16 MB window * for IO and the rest 112 MB (64+32+16) for MEM, despite that maximal IO size is just 64 kB. @@ -146,6 +149,9 @@ * https://source.denx.de/u-boot/u-boot/-/commit/cb2ddb291ee6fcbddd6d8f4ff49089dfe580f5d7 * https://source.denx.de/u-boot/u-boot/-/commit/c64ac3b3185aeb3846297ad7391fc6df8ecd73bf * https://source.denx.de/u-boot/u-boot/-/commit/4a82fca8e330157081fc132a591ebd99ba02ee33 + * Bug related to requirement of same child and parent addresses for first range is fixed + * in U-Boot version 2022.04 by following commit: + * https://source.denx.de/u-boot/u-boot/-/commit/1fd54253bca7d43d046bba4853fe5fafd034bc17 */ #address-cells = <3>; #size-cells = <2>; diff --git a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi index 2a2015a153627c7e05b976597204b4e694a2fd77..0f4bcd15d8580a2ad0d42f0ca1d05c0a83cbf393 100644 --- a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi +++ b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi @@ -495,7 +495,7 @@ * (totaling 127 MiB) for MEM. */ ranges = <0x82000000 0 0xe8000000 0 0xe8000000 0 0x07f00000 /* Port 0 MEM */ - 0x81000000 0 0xefff0000 0 0xefff0000 0 0x00010000>; /* Port 0 IO */ + 0x81000000 0 0x00000000 0 0xefff0000 0 0x00010000>; /* Port 0 IO */ interrupt-map-mask = <0 0 0 7>; interrupt-map = <0 0 0 1 &pcie_intc 0>, <0 0 0 2 &pcie_intc 1>, diff --git a/arch/arm64/boot/dts/marvell/cn9130.dtsi b/arch/arm64/boot/dts/marvell/cn9130.dtsi index a2b7e5ec979d325d935a42bdda4caa13dc565f09..327b04134134ff1b0dd19ce3cdff2b42f96beddb 100644 --- a/arch/arm64/boot/dts/marvell/cn9130.dtsi +++ b/arch/arm64/boot/dts/marvell/cn9130.dtsi @@ -11,6 +11,13 @@ model = "Marvell Armada CN9130 SoC"; compatible = "marvell,cn9130", "marvell,armada-ap807-quad", "marvell,armada-ap807"; + + aliases { + gpio1 = &cp0_gpio1; + gpio2 = &cp0_gpio2; + spi1 = &cp0_spi0; + spi2 = &cp0_spi1; + }; }; /* @@ -35,3 +42,11 @@ #undef CP11X_PCIE0_BASE #undef CP11X_PCIE1_BASE #undef CP11X_PCIE2_BASE + +&cp0_gpio1 { + status = "okay"; +}; + +&cp0_gpio2 { + status = "okay"; +}; diff --git a/arch/arm64/boot/dts/nvidia/tegra186.dtsi b/arch/arm64/boot/dts/nvidia/tegra186.dtsi index 0c46ab7bbbf376eecadb9f25b1b74abdd7ed2097..eec6418ecdb1a3cb6fecaec1cae7e4a88dbd9d54 100644 --- a/arch/arm64/boot/dts/nvidia/tegra186.dtsi +++ b/arch/arm64/boot/dts/nvidia/tegra186.dtsi @@ -985,7 +985,7 @@ ccplex@e000000 { compatible = "nvidia,tegra186-ccplex-cluster"; - reg = <0x0 0x0e000000 0x0 0x3fffff>; + reg = <0x0 0x0e000000 0x0 0x400000>; nvidia,bpmp = <&bpmp>; }; diff --git a/arch/arm64/boot/dts/nvidia/tegra194.dtsi b/arch/arm64/boot/dts/nvidia/tegra194.dtsi index 9b5007e5f790fd802eefc6e15c7643cc423552a6..05cf606b85c9fd8907e851a718cc6e0e635cc78d 100644 --- a/arch/arm64/boot/dts/nvidia/tegra194.dtsi +++ b/arch/arm64/boot/dts/nvidia/tegra194.dtsi @@ -782,13 +782,12 @@ reg = <0x3510000 0x10000>; interrupts = ; clocks = <&bpmp TEGRA194_CLK_HDA>, - <&bpmp TEGRA194_CLK_HDA2CODEC_2X>, - <&bpmp TEGRA194_CLK_HDA2HDMICODEC>; - clock-names = "hda", "hda2codec_2x", "hda2hdmi"; + <&bpmp TEGRA194_CLK_HDA2HDMICODEC>, + <&bpmp TEGRA194_CLK_HDA2CODEC_2X>; + clock-names = "hda", "hda2hdmi", "hda2codec_2x"; resets = <&bpmp TEGRA194_RESET_HDA>, - <&bpmp TEGRA194_RESET_HDA2CODEC_2X>, <&bpmp TEGRA194_RESET_HDA2HDMICODEC>; - reset-names = "hda", "hda2codec_2x", "hda2hdmi"; + reset-names = "hda", "hda2hdmi"; power-domains = <&bpmp TEGRA194_POWER_DOMAIN_DISP>; interconnects = <&mc TEGRA194_MEMORY_CLIENT_HDAR &emc>, <&mc TEGRA194_MEMORY_CLIENT_HDAW &emc>; diff --git a/arch/arm64/boot/dts/qcom/ipq6018.dtsi b/arch/arm64/boot/dts/qcom/ipq6018.dtsi index 9cb8f7a052df947782f7c9236df00bcc8e48c105..2a1f03cdb52c77bb1bb9d9aa9850dde1c27b0227 100644 --- a/arch/arm64/boot/dts/qcom/ipq6018.dtsi +++ b/arch/arm64/boot/dts/qcom/ipq6018.dtsi @@ -221,7 +221,7 @@ interrupts = ; gpio-controller; #gpio-cells = <2>; - gpio-ranges = <&tlmm 0 80>; + gpio-ranges = <&tlmm 0 0 80>; interrupt-controller; #interrupt-cells = <2>; diff --git a/arch/arm64/boot/dts/qcom/msm8916.dtsi b/arch/arm64/boot/dts/qcom/msm8916.dtsi index b1ffc056eea0bbef80fbe4a6843708debae9ed10..291276a38d7cd77df5609b89c6134af5ff9a3beb 100644 --- a/arch/arm64/boot/dts/qcom/msm8916.dtsi +++ b/arch/arm64/boot/dts/qcom/msm8916.dtsi @@ -18,8 +18,8 @@ #size-cells = <2>; aliases { - sdhc1 = &sdhc_1; /* SDC1 eMMC slot */ - sdhc2 = &sdhc_2; /* SDC2 SD card slot */ + mmc0 = &sdhc_1; /* SDC1 eMMC slot */ + mmc1 = &sdhc_2; /* SDC2 SD card slot */ }; chosen { }; diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi b/arch/arm64/boot/dts/qcom/msm8996.dtsi index eef17434d12ae624e42fb020807a22590bd1be31..ef5d03a1506930262c2d6308b255f56a10e2e834 100644 --- a/arch/arm64/boot/dts/qcom/msm8996.dtsi +++ b/arch/arm64/boot/dts/qcom/msm8996.dtsi @@ -645,9 +645,6 @@ nvmem-cells = <&gpu_speed_bin>; nvmem-cell-names = "speed_bin"; - qcom,gpu-quirk-two-pass-use-wfi; - qcom,gpu-quirk-fault-detect-mask; - operating-points-v2 = <&gpu_opp_table>; gpu_opp_table: opp-table { diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi index ea6e3a11e641b0f4df11600f6ea56ba5a9672995..9beb3c34fcdb5f8ed4eaf38ad92e4d680560f14b 100644 --- a/arch/arm64/boot/dts/qcom/sdm845.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi @@ -3406,10 +3406,10 @@ #clock-cells = <0>; clock-frequency = <9600000>; clock-output-names = "mclk"; - qcom,micbias1-millivolt = <1800>; - qcom,micbias2-millivolt = <1800>; - qcom,micbias3-millivolt = <1800>; - qcom,micbias4-millivolt = <1800>; + qcom,micbias1-microvolt = <1800000>; + qcom,micbias2-microvolt = <1800000>; + qcom,micbias3-microvolt = <1800000>; + qcom,micbias4-microvolt = <1800000>; #address-cells = <1>; #size-cells = <1>; diff --git a/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts b/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts index ad6561843ba28489effeb34e75aecabbc4f91d9e..e080c317b5e3dd52090daaae94f2e9ae866ec6ca 100644 --- a/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts +++ b/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts @@ -365,6 +365,10 @@ dai@1 { reg = <1>; }; + + dai@2 { + reg = <2>; + }; }; &sound { @@ -377,6 +381,7 @@ "SpkrLeft IN", "SPK1 OUT", "SpkrRight IN", "SPK2 OUT", "MM_DL1", "MultiMedia1 Playback", + "MM_DL3", "MultiMedia3 Playback", "MultiMedia2 Capture", "MM_UL2"; mm1-dai-link { @@ -393,6 +398,13 @@ }; }; + mm3-dai-link { + link-name = "MultiMedia3"; + cpu { + sound-dai = <&q6asmdai MSM_FRONTEND_DAI_MULTIMEDIA3>; + }; + }; + slim-dai-link { link-name = "SLIM Playback"; cpu { @@ -422,6 +434,21 @@ sound-dai = <&wcd9340 1>; }; }; + + slim-wcd-dai-link { + link-name = "SLIM WCD Playback"; + cpu { + sound-dai = <&q6afedai SLIMBUS_1_RX>; + }; + + platform { + sound-dai = <&q6routing>; + }; + + codec { + sound-dai = <&wcd9340 2>; + }; + }; }; &tlmm { diff --git a/arch/arm64/boot/dts/qcom/sm8150.dtsi b/arch/arm64/boot/dts/qcom/sm8150.dtsi index 1aec54590a11aba8170d7293680f374521a837b7..a8a47378ba689b09af24dc2b2e6722c769e63715 100644 --- a/arch/arm64/boot/dts/qcom/sm8150.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150.dtsi @@ -1114,9 +1114,9 @@ qcom,tcs-offset = <0xd00>; qcom,drv-id = <2>; qcom,tcs-config = , - , - , - ; + , + , + ; rpmhcc: clock-controller { compatible = "qcom,sm8150-rpmh-clk"; diff --git a/arch/arm64/boot/dts/renesas/cat875.dtsi b/arch/arm64/boot/dts/renesas/cat875.dtsi index 801ea54b027c43d96a78ea29c3e7589a0ff15419..20f8adc635e72aa0ea0e20ed0a009120b9660003 100644 --- a/arch/arm64/boot/dts/renesas/cat875.dtsi +++ b/arch/arm64/boot/dts/renesas/cat875.dtsi @@ -18,6 +18,7 @@ pinctrl-names = "default"; renesas,no-ether-link; phy-handle = <&phy0>; + phy-mode = "rgmii-id"; status = "okay"; phy0: ethernet-phy@0 { diff --git a/arch/arm64/boot/dts/rockchip/rk3399-firefly.dts b/arch/arm64/boot/dts/rockchip/rk3399-firefly.dts index 6db18808b9c54be1f9c91398b7aa71545c8c446b..dc45ec372ada46bd4b97ba24456b4fe3dac93679 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-firefly.dts +++ b/arch/arm64/boot/dts/rockchip/rk3399-firefly.dts @@ -665,8 +665,8 @@ sd-uhs-sdr104; /* Power supply */ - vqmmc-supply = &vcc1v8_s3; /* IO line */ - vmmc-supply = &vcc_sdio; /* card's power */ + vqmmc-supply = <&vcc1v8_s3>; /* IO line */ + vmmc-supply = <&vcc_sdio>; /* card's power */ #address-cells = <1>; #size-cells = <0>; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi index 765b24a2bcbf06d217a9400cca4e7df51c732088..fb0a13cad6c93d8d8dd1033e1efe5d6d0491bdc6 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi @@ -281,7 +281,7 @@ sound: sound { compatible = "rockchip,rk3399-gru-sound"; - rockchip,cpu = <&i2s0 &i2s2>; + rockchip,cpu = <&i2s0 &spdif>; }; }; @@ -432,10 +432,6 @@ ap_i2c_audio: &i2c8 { status = "okay"; }; -&i2s2 { - status = "okay"; -}; - &io_domains { status = "okay"; @@ -532,6 +528,17 @@ ap_i2c_audio: &i2c8 { vqmmc-supply = <&ppvar_sd_card_io>; }; +&spdif { + status = "okay"; + + /* + * SPDIF is routed internally to DP; we either don't use these pins, or + * mux them to something else. + */ + /delete-property/ pinctrl-0; + /delete-property/ pinctrl-names; +}; + &spi1 { status = "okay"; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi index 4660416c8f382aa97b485023e8682fb8725b00a3..544110aaffc569b183a61820a7fcd895f4a29a46 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi @@ -472,6 +472,12 @@ }; &sdhci { + /* + * Signal integrity isn't great at 200MHz but 100MHz has proven stable + * enough. + */ + max-frequency = <100000000>; + bus-width = <8>; mmc-hs400-1_8v; mmc-hs400-enhanced-strobe; diff --git a/arch/arm64/boot/dts/rockchip/rk3399.dtsi b/arch/arm64/boot/dts/rockchip/rk3399.dtsi index 4b6065dbba55eacb162b730895db83abdf8cbab0..52ba4d07e77123bb5fbcd257e32ed81ee10e0dbf 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3399.dtsi @@ -1770,10 +1770,10 @@ interrupts = ; clocks = <&cru PCLK_HDMI_CTRL>, <&cru SCLK_HDMI_SFR>, - <&cru PLL_VPLL>, + <&cru SCLK_HDMI_CEC>, <&cru PCLK_VIO_GRF>, - <&cru SCLK_HDMI_CEC>; - clock-names = "iahb", "isfr", "vpll", "grf", "cec"; + <&cru PLL_VPLL>; + clock-names = "iahb", "isfr", "cec", "grf", "vpll"; power-domains = <&power RK3399_PD_HDCP>; reg-io-width = <4>; rockchip,grf = <&grf>; diff --git a/arch/arm64/boot/dts/ti/k3-am65-main.dtsi b/arch/arm64/boot/dts/ti/k3-am65-main.dtsi index b9662205be9bf95b8c4efb160f457a169d926b21..d04189771c773d08c1ec7f6d4591af77f56fd6a0 100644 --- a/arch/arm64/boot/dts/ti/k3-am65-main.dtsi +++ b/arch/arm64/boot/dts/ti/k3-am65-main.dtsi @@ -35,7 +35,10 @@ #interrupt-cells = <3>; interrupt-controller; reg = <0x00 0x01800000 0x00 0x10000>, /* GICD */ - <0x00 0x01880000 0x00 0x90000>; /* GICR */ + <0x00 0x01880000 0x00 0x90000>, /* GICR */ + <0x00 0x6f000000 0x00 0x2000>, /* GICC */ + <0x00 0x6f010000 0x00 0x1000>, /* GICH */ + <0x00 0x6f020000 0x00 0x2000>; /* GICV */ /* * vcpumntirq: * virtual CPU interface maintenance interrupt diff --git a/arch/arm64/boot/dts/ti/k3-am65.dtsi b/arch/arm64/boot/dts/ti/k3-am65.dtsi index d84c0bc05023373e7cbebdd41c2c21655be79bb0..c6a3fecc7518ef4f90afd09d9ff41a7af848a7cb 100644 --- a/arch/arm64/boot/dts/ti/k3-am65.dtsi +++ b/arch/arm64/boot/dts/ti/k3-am65.dtsi @@ -84,6 +84,7 @@ <0x00 0x46000000 0x00 0x46000000 0x00 0x00200000>, <0x00 0x47000000 0x00 0x47000000 0x00 0x00068400>, <0x00 0x50000000 0x00 0x50000000 0x00 0x8000000>, + <0x00 0x6f000000 0x00 0x6f000000 0x00 0x00310000>, /* A53 PERIPHBASE */ <0x00 0x70000000 0x00 0x70000000 0x00 0x200000>, <0x05 0x00000000 0x05 0x00000000 0x01 0x0000000>, <0x07 0x00000000 0x07 0x00000000 0x01 0x0000000>; diff --git a/arch/arm64/boot/dts/ti/k3-j7200-main.dtsi b/arch/arm64/boot/dts/ti/k3-j7200-main.dtsi index 5832ad830ed149c06c371bc3f39734e2a76e5905..bef47f96376d997b827a4234f2c2379f791bfe29 100644 --- a/arch/arm64/boot/dts/ti/k3-j7200-main.dtsi +++ b/arch/arm64/boot/dts/ti/k3-j7200-main.dtsi @@ -25,7 +25,7 @@ #size-cells = <1>; ranges = <0x00 0x00 0x00100000 0x1c000>; - serdes_ln_ctrl: serdes-ln-ctrl@4080 { + serdes_ln_ctrl: mux-controller@4080 { compatible = "mmio-mux"; #mux-control-cells = <1>; mux-reg-masks = <0x4080 0x3>, <0x4084 0x3>, /* SERDES0 lane0/1 select */ @@ -47,7 +47,10 @@ #interrupt-cells = <3>; interrupt-controller; reg = <0x00 0x01800000 0x00 0x10000>, /* GICD */ - <0x00 0x01900000 0x00 0x100000>; /* GICR */ + <0x00 0x01900000 0x00 0x100000>, /* GICR */ + <0x00 0x6f000000 0x00 0x2000>, /* GICC */ + <0x00 0x6f010000 0x00 0x1000>, /* GICH */ + <0x00 0x6f020000 0x00 0x2000>; /* GICV */ /* vcpumntirq: virtual CPU interface maintenance interrupt */ interrupts = ; diff --git a/arch/arm64/boot/dts/ti/k3-j7200.dtsi b/arch/arm64/boot/dts/ti/k3-j7200.dtsi index 66169bcf7c9a408e827dd21ffd51fe1617bd77d7..59f5113e657dd62c18ee13bc26709f47bbb23f4d 100644 --- a/arch/arm64/boot/dts/ti/k3-j7200.dtsi +++ b/arch/arm64/boot/dts/ti/k3-j7200.dtsi @@ -60,7 +60,7 @@ i-cache-sets = <256>; d-cache-size = <0x8000>; d-cache-line-size = <64>; - d-cache-sets = <128>; + d-cache-sets = <256>; next-level-cache = <&L2_0>; }; @@ -74,7 +74,7 @@ i-cache-sets = <256>; d-cache-size = <0x8000>; d-cache-line-size = <64>; - d-cache-sets = <128>; + d-cache-sets = <256>; next-level-cache = <&L2_0>; }; }; @@ -84,7 +84,7 @@ cache-level = <2>; cache-size = <0x100000>; cache-line-size = <64>; - cache-sets = <2048>; + cache-sets = <1024>; next-level-cache = <&msmc_l3>; }; @@ -127,6 +127,7 @@ <0x00 0x00a40000 0x00 0x00a40000 0x00 0x00000800>, /* timesync router */ <0x00 0x01000000 0x00 0x01000000 0x00 0x0d000000>, /* Most peripherals */ <0x00 0x30000000 0x00 0x30000000 0x00 0x0c400000>, /* MAIN NAVSS */ + <0x00 0x6f000000 0x00 0x6f000000 0x00 0x00310000>, /* A72 PERIPHBASE */ <0x00 0x70000000 0x00 0x70000000 0x00 0x00800000>, /* MSMC RAM */ <0x00 0x18000000 0x00 0x18000000 0x00 0x08000000>, /* PCIe1 DAT0 */ <0x41 0x00000000 0x41 0x00000000 0x01 0x00000000>, /* PCIe1 DAT1 */ diff --git a/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi b/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi index 85526f72b4616c888fc953bc21d4b7f25dc0c48d..0350ddfe2c72384313539d98a23ab50c33dbe4e1 100644 --- a/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi +++ b/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi @@ -108,7 +108,10 @@ #interrupt-cells = <3>; interrupt-controller; reg = <0x00 0x01800000 0x00 0x10000>, /* GICD */ - <0x00 0x01900000 0x00 0x100000>; /* GICR */ + <0x00 0x01900000 0x00 0x100000>, /* GICR */ + <0x00 0x6f000000 0x00 0x2000>, /* GICC */ + <0x00 0x6f010000 0x00 0x1000>, /* GICH */ + <0x00 0x6f020000 0x00 0x2000>; /* GICV */ /* vcpumntirq: virtual CPU interface maintenance interrupt */ interrupts = ; diff --git a/arch/arm64/boot/dts/ti/k3-j721e.dtsi b/arch/arm64/boot/dts/ti/k3-j721e.dtsi index cc483f7344af3132aaa0059eb3e5b6c38ea19a5a..ba4fe3f9831586921472504bc00cb3148747c168 100644 --- a/arch/arm64/boot/dts/ti/k3-j721e.dtsi +++ b/arch/arm64/boot/dts/ti/k3-j721e.dtsi @@ -61,7 +61,7 @@ i-cache-sets = <256>; d-cache-size = <0x8000>; d-cache-line-size = <64>; - d-cache-sets = <128>; + d-cache-sets = <256>; next-level-cache = <&L2_0>; }; @@ -75,7 +75,7 @@ i-cache-sets = <256>; d-cache-size = <0x8000>; d-cache-line-size = <64>; - d-cache-sets = <128>; + d-cache-sets = <256>; next-level-cache = <&L2_0>; }; }; @@ -85,7 +85,7 @@ cache-level = <2>; cache-size = <0x100000>; cache-line-size = <64>; - cache-sets = <2048>; + cache-sets = <1024>; next-level-cache = <&msmc_l3>; }; @@ -136,6 +136,7 @@ <0x00 0x0e000000 0x00 0x0e000000 0x00 0x01800000>, /* PCIe Core*/ <0x00 0x10000000 0x00 0x10000000 0x00 0x10000000>, /* PCIe DAT */ <0x00 0x64800000 0x00 0x64800000 0x00 0x00800000>, /* C71 */ + <0x00 0x6f000000 0x00 0x6f000000 0x00 0x00310000>, /* A72 PERIPHBASE */ <0x44 0x00000000 0x44 0x00000000 0x00 0x08000000>, /* PCIe2 DAT */ <0x44 0x10000000 0x44 0x10000000 0x00 0x08000000>, /* PCIe3 DAT */ <0x4d 0x80800000 0x4d 0x80800000 0x00 0x00800000>, /* C66_0 */ diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig index 5e7d86cf5dfa4886dd24db6e03b60a8314214ad6..d025bafcce433cfeee2c7b374915a41de900969d 100644 --- a/arch/arm64/configs/defconfig +++ b/arch/arm64/configs/defconfig @@ -837,7 +837,7 @@ CONFIG_DMADEVICES=y CONFIG_DMA_BCM2835=y CONFIG_DMA_SUN6I=m CONFIG_FSL_EDMA=y -CONFIG_IMX_SDMA=y +CONFIG_IMX_SDMA=m CONFIG_K3_DMA=y CONFIG_MV_XOR=y CONFIG_MV_XOR_V2=y diff --git a/arch/arm64/configs/openeuler_defconfig b/arch/arm64/configs/openeuler_defconfig index 8d150178b5b8b8b558a6f0c2beb3ffe22aa720bd..d246fd508ef65104a038ce72758f562989965a44 100644 --- a/arch/arm64/configs/openeuler_defconfig +++ b/arch/arm64/configs/openeuler_defconfig @@ -133,10 +133,12 @@ CONFIG_PAGE_COUNTER=y CONFIG_MEMCG=y CONFIG_MEMCG_SWAP=y CONFIG_MEMCG_KMEM=y +CONFIG_MEMCG_MEMFS_INFO=y CONFIG_BLK_CGROUP=y CONFIG_CGROUP_WRITEBACK=y CONFIG_CGROUP_SCHED=y CONFIG_FAIR_GROUP_SCHED=y +CONFIG_QOS_SCHED_SMT_EXPELLER=y CONFIG_CFS_BANDWIDTH=y CONFIG_RT_GROUP_SCHED=y CONFIG_CGROUP_PIDS=y @@ -434,12 +436,11 @@ CONFIG_ARM64_CPU_PARK=y # CONFIG_XEN is not set CONFIG_FORCE_MAX_ZONEORDER=11 CONFIG_UNMAP_KERNEL_AT_EL0=y -CONFIG_RODATA_FULL_DEFAULT_ENABLED=y +# CONFIG_RODATA_FULL_DEFAULT_ENABLED is not set CONFIG_ARM64_PMEM_RESERVE=y CONFIG_ARM64_PMEM_LEGACY=m # CONFIG_ARM64_SW_TTBR0_PAN is not set CONFIG_ARM64_TAGGED_ADDR_ABI=y -CONFIG_ARM64_ILP32=y CONFIG_AARCH32_EL0=y # CONFIG_KUSER_HELPERS is not set CONFIG_ARMV8_DEPRECATED=y @@ -465,6 +466,7 @@ CONFIG_ARM64_UAO=y CONFIG_ARM64_PMEM=y CONFIG_ARM64_RAS_EXTN=y CONFIG_ARM64_CNP=y +CONFIG_ARM64_UCE_KERNEL_RECOVERY=y # end of ARMv8.2 architectural features # @@ -707,7 +709,12 @@ CONFIG_ACPI_REDUCED_HARDWARE_ONLY=y CONFIG_ACPI_NFIT=m # CONFIG_NFIT_SECURITY_DEBUG is not set CONFIG_ACPI_NUMA=y -# CONFIG_ACPI_HMAT is not set +CONFIG_ACPI_HMAT=y +CONFIG_EFI_SOFT_RESERVE=y +# CONFIG_ZONE_DEVICE is not set +CONFIG_HMEM_REPORTING=y +CONFIG_DEV_DAX_HMEM=m +CONFIG_DEV_DAX_HMEM_DEVICES=y CONFIG_HAVE_ACPI_APEI=y CONFIG_ACPI_APEI=y CONFIG_ACPI_APEI_GHES=y @@ -1044,7 +1051,7 @@ CONFIG_COHERENT_DEVICE=y CONFIG_MEMORY_HOTPLUG=y CONFIG_MEMORY_HOTPLUG_SPARSE=y CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE=y -# CONFIG_MEMORY_HOTREMOVE is not set +CONFIG_MEMORY_HOTREMOVE=y CONFIG_SPLIT_PTLOCK_CPUS=4 CONFIG_MEMORY_BALLOON=y CONFIG_BALLOON_COMPACTION=y @@ -2403,7 +2410,6 @@ CONFIG_SCSI_QLA_FC=m CONFIG_SCSI_QLA_ISCSI=m CONFIG_QEDI=m CONFIG_QEDF=m -CONFIG_SPFC=m CONFIG_SCSI_HUAWEI_FC=m CONFIG_SCSI_FC_HIFC=m CONFIG_SCSI_LPFC=m @@ -2816,8 +2822,6 @@ CONFIG_NET_VENDOR_QUALCOMM=y # CONFIG_QCA7000_SPI is not set CONFIG_QCOM_EMAC=m # CONFIG_RMNET is not set -CONFIG_NET_VENDOR_RAMAXEL=y -CONFIG_SPNIC=m # CONFIG_NET_VENDOR_RDC is not set CONFIG_NET_VENDOR_REALTEK=y CONFIG_8139CP=m diff --git a/arch/arm64/crypto/Kconfig b/arch/arm64/crypto/Kconfig index 55f19450091b20de917ec929b695a6f18e02d178..124da08a10988b797095dd4040b4999bfc622d34 100644 --- a/arch/arm64/crypto/Kconfig +++ b/arch/arm64/crypto/Kconfig @@ -51,7 +51,19 @@ config CRYPTO_SM4_ARM64_CE tristate "SM4 symmetric cipher (ARMv8.2 Crypto Extensions)" depends on KERNEL_MODE_NEON select CRYPTO_ALGAPI - select CRYPTO_LIB_SM4 + select CRYPTO_SM4 + +config CRYPTO_SM4_ARM64_CE_BLK + tristate "SM4 in ECB/CBC/CFB/CTR modes using ARMv8 Crypto Extensions" + depends on KERNEL_MODE_NEON + select CRYPTO_SKCIPHER + select CRYPTO_SM4 + +config CRYPTO_SM4_ARM64_NEON_BLK + tristate "SM4 in ECB/CBC/CFB/CTR modes using NEON instructions" + depends on KERNEL_MODE_NEON + select CRYPTO_SKCIPHER + select CRYPTO_SM4 config CRYPTO_GHASH_ARM64_CE tristate "GHASH/AES-GCM using ARMv8 Crypto Extensions" diff --git a/arch/arm64/crypto/Makefile b/arch/arm64/crypto/Makefile index a5d4b672b6e1079a23d46bbf8ab3fcfe0761d313..5b2bb7e92bad91c507a580151a2b8e047a627193 100644 --- a/arch/arm64/crypto/Makefile +++ b/arch/arm64/crypto/Makefile @@ -20,9 +20,15 @@ sha3-ce-y := sha3-ce-glue.o sha3-ce-core.o obj-$(CONFIG_CRYPTO_SM3_ARM64_CE) += sm3-ce.o sm3-ce-y := sm3-ce-glue.o sm3-ce-core.o -obj-$(CONFIG_CRYPTO_SM4_ARM64_CE) += sm4-ce.o +obj-$(CONFIG_CRYPTO_SM4_ARM64_CE) += sm4-ce-cipher.o +sm4-ce-cipher-y := sm4-ce-cipher-glue.o sm4-ce-cipher-core.o + +obj-$(CONFIG_CRYPTO_SM4_ARM64_CE_BLK) += sm4-ce.o sm4-ce-y := sm4-ce-glue.o sm4-ce-core.o +obj-$(CONFIG_CRYPTO_SM4_ARM64_NEON_BLK) += sm4-neon.o +sm4-neon-y := sm4-neon-glue.o sm4-neon-core.o + obj-$(CONFIG_CRYPTO_GHASH_ARM64_CE) += ghash-ce.o ghash-ce-y := ghash-ce-glue.o ghash-ce-core.o diff --git a/arch/arm64/crypto/crct10dif-neon_glue.c b/arch/arm64/crypto/crct10dif-neon_glue.c index af731b3ec30efa716de8ec3353e5dc15880c34b5..6fad09de212d4260a2cd999927452bfc96f94d08 100644 --- a/arch/arm64/crypto/crct10dif-neon_glue.c +++ b/arch/arm64/crypto/crct10dif-neon_glue.c @@ -55,10 +55,10 @@ static int chksum_final(struct shash_desc *desc, u8 *out) return 0; } -static int __chksum_finup(__u16 *crcp, const u8 *data, unsigned int len, +static int __chksum_finup(__u16 crc, const u8 *data, unsigned int len, u8 *out) { - *(__u16 *)out = crc_t10dif_neon(*crcp, data, len); + *(__u16 *)out = crc_t10dif_neon(crc, data, len); return 0; } @@ -67,15 +67,13 @@ static int chksum_finup(struct shash_desc *desc, const u8 *data, { struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); - return __chksum_finup(&ctx->crc, data, len, out); + return __chksum_finup(ctx->crc, data, len, out); } static int chksum_digest(struct shash_desc *desc, const u8 *data, unsigned int length, u8 *out) { - struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); - - return __chksum_finup(&ctx->crc, data, length, out); + return __chksum_finup(0, data, length, out); } static struct shash_alg alg = { diff --git a/arch/arm64/crypto/sm3-ce-glue.c b/arch/arm64/crypto/sm3-ce-glue.c index d71faca322f2a8618d8173c8dcd345eacd86a811..ee98954ae8ca682651a00b6de2241bd61210813a 100644 --- a/arch/arm64/crypto/sm3-ce-glue.c +++ b/arch/arm64/crypto/sm3-ce-glue.c @@ -26,8 +26,10 @@ asmlinkage void sm3_ce_transform(struct sm3_state *sst, u8 const *src, static int sm3_ce_update(struct shash_desc *desc, const u8 *data, unsigned int len) { - if (!crypto_simd_usable()) - return crypto_sm3_update(desc, data, len); + if (!crypto_simd_usable()) { + sm3_update(shash_desc_ctx(desc), data, len); + return 0; + } kernel_neon_begin(); sm3_base_do_update(desc, data, len, sm3_ce_transform); @@ -38,8 +40,10 @@ static int sm3_ce_update(struct shash_desc *desc, const u8 *data, static int sm3_ce_final(struct shash_desc *desc, u8 *out) { - if (!crypto_simd_usable()) - return crypto_sm3_finup(desc, NULL, 0, out); + if (!crypto_simd_usable()) { + sm3_final(shash_desc_ctx(desc), out); + return 0; + } kernel_neon_begin(); sm3_base_do_finalize(desc, sm3_ce_transform); @@ -51,14 +55,22 @@ static int sm3_ce_final(struct shash_desc *desc, u8 *out) static int sm3_ce_finup(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) { - if (!crypto_simd_usable()) - return crypto_sm3_finup(desc, data, len, out); + if (!crypto_simd_usable()) { + struct sm3_state *sctx = shash_desc_ctx(desc); + + if (len) + sm3_update(sctx, data, len); + sm3_final(sctx, out); + return 0; + } kernel_neon_begin(); - sm3_base_do_update(desc, data, len, sm3_ce_transform); + if (len) + sm3_base_do_update(desc, data, len, sm3_ce_transform); + sm3_base_do_finalize(desc, sm3_ce_transform); kernel_neon_end(); - return sm3_ce_final(desc, out); + return sm3_base_finish(desc, out); } static struct shash_alg sm3_alg = { diff --git a/arch/arm64/crypto/sm4-ce-cipher-core.S b/arch/arm64/crypto/sm4-ce-cipher-core.S new file mode 100644 index 0000000000000000000000000000000000000000..4ac6cfbc57970e2449067144aaa4e27c6d4373d4 --- /dev/null +++ b/arch/arm64/crypto/sm4-ce-cipher-core.S @@ -0,0 +1,36 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include + + .irp b, 0, 1, 2, 3, 4, 5, 6, 7, 8 + .set .Lv\b\().4s, \b + .endr + + .macro sm4e, rd, rn + .inst 0xcec08400 | .L\rd | (.L\rn << 5) + .endm + + /* + * void sm4_ce_do_crypt(const u32 *rk, u32 *out, const u32 *in); + */ + .text +SYM_FUNC_START(sm4_ce_do_crypt) + ld1 {v8.4s}, [x2] + ld1 {v0.4s-v3.4s}, [x0], #64 +CPU_LE( rev32 v8.16b, v8.16b ) + ld1 {v4.4s-v7.4s}, [x0] + sm4e v8.4s, v0.4s + sm4e v8.4s, v1.4s + sm4e v8.4s, v2.4s + sm4e v8.4s, v3.4s + sm4e v8.4s, v4.4s + sm4e v8.4s, v5.4s + sm4e v8.4s, v6.4s + sm4e v8.4s, v7.4s + rev64 v8.4s, v8.4s + ext v8.16b, v8.16b, v8.16b, #8 +CPU_LE( rev32 v8.16b, v8.16b ) + st1 {v8.4s}, [x1] + ret +SYM_FUNC_END(sm4_ce_do_crypt) diff --git a/arch/arm64/crypto/sm4-ce-cipher-glue.c b/arch/arm64/crypto/sm4-ce-cipher-glue.c new file mode 100644 index 0000000000000000000000000000000000000000..76a34ef4abbbf198d1fe62e898c37da4c4d3e365 --- /dev/null +++ b/arch/arm64/crypto/sm4-ce-cipher-glue.c @@ -0,0 +1,82 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include +#include +#include +#include +#include +#include + +MODULE_ALIAS_CRYPTO("sm4"); +MODULE_ALIAS_CRYPTO("sm4-ce"); +MODULE_DESCRIPTION("SM4 symmetric cipher using ARMv8 Crypto Extensions"); +MODULE_AUTHOR("Ard Biesheuvel "); +MODULE_LICENSE("GPL v2"); + +asmlinkage void sm4_ce_do_crypt(const u32 *rk, void *out, const void *in); + +static int sm4_ce_setkey(struct crypto_tfm *tfm, const u8 *key, + unsigned int key_len) +{ + struct sm4_ctx *ctx = crypto_tfm_ctx(tfm); + + return sm4_expandkey(ctx, key, key_len); +} + +static void sm4_ce_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) +{ + const struct sm4_ctx *ctx = crypto_tfm_ctx(tfm); + + if (!crypto_simd_usable()) { + sm4_crypt_block(ctx->rkey_enc, out, in); + } else { + kernel_neon_begin(); + sm4_ce_do_crypt(ctx->rkey_enc, out, in); + kernel_neon_end(); + } +} + +static void sm4_ce_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) +{ + const struct sm4_ctx *ctx = crypto_tfm_ctx(tfm); + + if (!crypto_simd_usable()) { + sm4_crypt_block(ctx->rkey_dec, out, in); + } else { + kernel_neon_begin(); + sm4_ce_do_crypt(ctx->rkey_dec, out, in); + kernel_neon_end(); + } +} + +static struct crypto_alg sm4_ce_alg = { + .cra_name = "sm4", + .cra_driver_name = "sm4-ce", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_CIPHER, + .cra_blocksize = SM4_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct sm4_ctx), + .cra_module = THIS_MODULE, + .cra_u.cipher = { + .cia_min_keysize = SM4_KEY_SIZE, + .cia_max_keysize = SM4_KEY_SIZE, + .cia_setkey = sm4_ce_setkey, + .cia_encrypt = sm4_ce_encrypt, + .cia_decrypt = sm4_ce_decrypt + } +}; + +static int __init sm4_ce_mod_init(void) +{ + return crypto_register_alg(&sm4_ce_alg); +} + +static void __exit sm4_ce_mod_fini(void) +{ + crypto_unregister_alg(&sm4_ce_alg); +} + +module_cpu_feature_match(SM4, sm4_ce_mod_init); +module_exit(sm4_ce_mod_fini); diff --git a/arch/arm64/crypto/sm4-ce-core.S b/arch/arm64/crypto/sm4-ce-core.S index 4ac6cfbc57970e2449067144aaa4e27c6d4373d4..934e0f093279968362d7ddc2e4c1e7774c34077e 100644 --- a/arch/arm64/crypto/sm4-ce-core.S +++ b/arch/arm64/crypto/sm4-ce-core.S @@ -1,36 +1,660 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * SM4 Cipher Algorithm for ARMv8 with Crypto Extensions + * as specified in + * https://tools.ietf.org/id/draft-ribose-cfrg-sm4-10.html + * + * Copyright (C) 2022, Alibaba Group. + * Copyright (C) 2022 Tianjia Zhang + */ #include #include - .irp b, 0, 1, 2, 3, 4, 5, 6, 7, 8 - .set .Lv\b\().4s, \b - .endr - - .macro sm4e, rd, rn - .inst 0xcec08400 | .L\rd | (.L\rn << 5) - .endm - - /* - * void sm4_ce_do_crypt(const u32 *rk, u32 *out, const u32 *in); - */ - .text -SYM_FUNC_START(sm4_ce_do_crypt) - ld1 {v8.4s}, [x2] - ld1 {v0.4s-v3.4s}, [x0], #64 -CPU_LE( rev32 v8.16b, v8.16b ) - ld1 {v4.4s-v7.4s}, [x0] - sm4e v8.4s, v0.4s - sm4e v8.4s, v1.4s - sm4e v8.4s, v2.4s - sm4e v8.4s, v3.4s - sm4e v8.4s, v4.4s - sm4e v8.4s, v5.4s - sm4e v8.4s, v6.4s - sm4e v8.4s, v7.4s - rev64 v8.4s, v8.4s - ext v8.16b, v8.16b, v8.16b, #8 -CPU_LE( rev32 v8.16b, v8.16b ) - st1 {v8.4s}, [x1] - ret -SYM_FUNC_END(sm4_ce_do_crypt) +.arch armv8-a+crypto + +.irp b, 0, 1, 2, 3, 4, 5, 6, 7, 16, 20, 24, 25, 26, 27, 28, 29, 30, 31 + .set .Lv\b\().4s, \b +.endr + +.macro sm4e, vd, vn + .inst 0xcec08400 | (.L\vn << 5) | .L\vd +.endm + +.macro sm4ekey, vd, vn, vm + .inst 0xce60c800 | (.L\vm << 16) | (.L\vn << 5) | .L\vd +.endm + +/* Register macros */ + +#define RTMP0 v16 +#define RTMP1 v17 +#define RTMP2 v18 +#define RTMP3 v19 + +#define RIV v20 + +/* Helper macros. */ + +#define PREPARE \ + ld1 {v24.16b-v27.16b}, [x0], #64; \ + ld1 {v28.16b-v31.16b}, [x0]; + +#define SM4_CRYPT_BLK(b0) \ + rev32 b0.16b, b0.16b; \ + sm4e b0.4s, v24.4s; \ + sm4e b0.4s, v25.4s; \ + sm4e b0.4s, v26.4s; \ + sm4e b0.4s, v27.4s; \ + sm4e b0.4s, v28.4s; \ + sm4e b0.4s, v29.4s; \ + sm4e b0.4s, v30.4s; \ + sm4e b0.4s, v31.4s; \ + rev64 b0.4s, b0.4s; \ + ext b0.16b, b0.16b, b0.16b, #8; \ + rev32 b0.16b, b0.16b; + +#define SM4_CRYPT_BLK4(b0, b1, b2, b3) \ + rev32 b0.16b, b0.16b; \ + rev32 b1.16b, b1.16b; \ + rev32 b2.16b, b2.16b; \ + rev32 b3.16b, b3.16b; \ + sm4e b0.4s, v24.4s; \ + sm4e b1.4s, v24.4s; \ + sm4e b2.4s, v24.4s; \ + sm4e b3.4s, v24.4s; \ + sm4e b0.4s, v25.4s; \ + sm4e b1.4s, v25.4s; \ + sm4e b2.4s, v25.4s; \ + sm4e b3.4s, v25.4s; \ + sm4e b0.4s, v26.4s; \ + sm4e b1.4s, v26.4s; \ + sm4e b2.4s, v26.4s; \ + sm4e b3.4s, v26.4s; \ + sm4e b0.4s, v27.4s; \ + sm4e b1.4s, v27.4s; \ + sm4e b2.4s, v27.4s; \ + sm4e b3.4s, v27.4s; \ + sm4e b0.4s, v28.4s; \ + sm4e b1.4s, v28.4s; \ + sm4e b2.4s, v28.4s; \ + sm4e b3.4s, v28.4s; \ + sm4e b0.4s, v29.4s; \ + sm4e b1.4s, v29.4s; \ + sm4e b2.4s, v29.4s; \ + sm4e b3.4s, v29.4s; \ + sm4e b0.4s, v30.4s; \ + sm4e b1.4s, v30.4s; \ + sm4e b2.4s, v30.4s; \ + sm4e b3.4s, v30.4s; \ + sm4e b0.4s, v31.4s; \ + sm4e b1.4s, v31.4s; \ + sm4e b2.4s, v31.4s; \ + sm4e b3.4s, v31.4s; \ + rev64 b0.4s, b0.4s; \ + rev64 b1.4s, b1.4s; \ + rev64 b2.4s, b2.4s; \ + rev64 b3.4s, b3.4s; \ + ext b0.16b, b0.16b, b0.16b, #8; \ + ext b1.16b, b1.16b, b1.16b, #8; \ + ext b2.16b, b2.16b, b2.16b, #8; \ + ext b3.16b, b3.16b, b3.16b, #8; \ + rev32 b0.16b, b0.16b; \ + rev32 b1.16b, b1.16b; \ + rev32 b2.16b, b2.16b; \ + rev32 b3.16b, b3.16b; + +#define SM4_CRYPT_BLK8(b0, b1, b2, b3, b4, b5, b6, b7) \ + rev32 b0.16b, b0.16b; \ + rev32 b1.16b, b1.16b; \ + rev32 b2.16b, b2.16b; \ + rev32 b3.16b, b3.16b; \ + rev32 b4.16b, b4.16b; \ + rev32 b5.16b, b5.16b; \ + rev32 b6.16b, b6.16b; \ + rev32 b7.16b, b7.16b; \ + sm4e b0.4s, v24.4s; \ + sm4e b1.4s, v24.4s; \ + sm4e b2.4s, v24.4s; \ + sm4e b3.4s, v24.4s; \ + sm4e b4.4s, v24.4s; \ + sm4e b5.4s, v24.4s; \ + sm4e b6.4s, v24.4s; \ + sm4e b7.4s, v24.4s; \ + sm4e b0.4s, v25.4s; \ + sm4e b1.4s, v25.4s; \ + sm4e b2.4s, v25.4s; \ + sm4e b3.4s, v25.4s; \ + sm4e b4.4s, v25.4s; \ + sm4e b5.4s, v25.4s; \ + sm4e b6.4s, v25.4s; \ + sm4e b7.4s, v25.4s; \ + sm4e b0.4s, v26.4s; \ + sm4e b1.4s, v26.4s; \ + sm4e b2.4s, v26.4s; \ + sm4e b3.4s, v26.4s; \ + sm4e b4.4s, v26.4s; \ + sm4e b5.4s, v26.4s; \ + sm4e b6.4s, v26.4s; \ + sm4e b7.4s, v26.4s; \ + sm4e b0.4s, v27.4s; \ + sm4e b1.4s, v27.4s; \ + sm4e b2.4s, v27.4s; \ + sm4e b3.4s, v27.4s; \ + sm4e b4.4s, v27.4s; \ + sm4e b5.4s, v27.4s; \ + sm4e b6.4s, v27.4s; \ + sm4e b7.4s, v27.4s; \ + sm4e b0.4s, v28.4s; \ + sm4e b1.4s, v28.4s; \ + sm4e b2.4s, v28.4s; \ + sm4e b3.4s, v28.4s; \ + sm4e b4.4s, v28.4s; \ + sm4e b5.4s, v28.4s; \ + sm4e b6.4s, v28.4s; \ + sm4e b7.4s, v28.4s; \ + sm4e b0.4s, v29.4s; \ + sm4e b1.4s, v29.4s; \ + sm4e b2.4s, v29.4s; \ + sm4e b3.4s, v29.4s; \ + sm4e b4.4s, v29.4s; \ + sm4e b5.4s, v29.4s; \ + sm4e b6.4s, v29.4s; \ + sm4e b7.4s, v29.4s; \ + sm4e b0.4s, v30.4s; \ + sm4e b1.4s, v30.4s; \ + sm4e b2.4s, v30.4s; \ + sm4e b3.4s, v30.4s; \ + sm4e b4.4s, v30.4s; \ + sm4e b5.4s, v30.4s; \ + sm4e b6.4s, v30.4s; \ + sm4e b7.4s, v30.4s; \ + sm4e b0.4s, v31.4s; \ + sm4e b1.4s, v31.4s; \ + sm4e b2.4s, v31.4s; \ + sm4e b3.4s, v31.4s; \ + sm4e b4.4s, v31.4s; \ + sm4e b5.4s, v31.4s; \ + sm4e b6.4s, v31.4s; \ + sm4e b7.4s, v31.4s; \ + rev64 b0.4s, b0.4s; \ + rev64 b1.4s, b1.4s; \ + rev64 b2.4s, b2.4s; \ + rev64 b3.4s, b3.4s; \ + rev64 b4.4s, b4.4s; \ + rev64 b5.4s, b5.4s; \ + rev64 b6.4s, b6.4s; \ + rev64 b7.4s, b7.4s; \ + ext b0.16b, b0.16b, b0.16b, #8; \ + ext b1.16b, b1.16b, b1.16b, #8; \ + ext b2.16b, b2.16b, b2.16b, #8; \ + ext b3.16b, b3.16b, b3.16b, #8; \ + ext b4.16b, b4.16b, b4.16b, #8; \ + ext b5.16b, b5.16b, b5.16b, #8; \ + ext b6.16b, b6.16b, b6.16b, #8; \ + ext b7.16b, b7.16b, b7.16b, #8; \ + rev32 b0.16b, b0.16b; \ + rev32 b1.16b, b1.16b; \ + rev32 b2.16b, b2.16b; \ + rev32 b3.16b, b3.16b; \ + rev32 b4.16b, b4.16b; \ + rev32 b5.16b, b5.16b; \ + rev32 b6.16b, b6.16b; \ + rev32 b7.16b, b7.16b; + + +.align 3 +SYM_FUNC_START(sm4_ce_expand_key) + /* input: + * x0: 128-bit key + * x1: rkey_enc + * x2: rkey_dec + * x3: fk array + * x4: ck array + */ + ld1 {v0.16b}, [x0]; + rev32 v0.16b, v0.16b; + ld1 {v1.16b}, [x3]; + /* load ck */ + ld1 {v24.16b-v27.16b}, [x4], #64; + ld1 {v28.16b-v31.16b}, [x4]; + + /* input ^ fk */ + eor v0.16b, v0.16b, v1.16b; + + sm4ekey v0.4s, v0.4s, v24.4s; + sm4ekey v1.4s, v0.4s, v25.4s; + sm4ekey v2.4s, v1.4s, v26.4s; + sm4ekey v3.4s, v2.4s, v27.4s; + sm4ekey v4.4s, v3.4s, v28.4s; + sm4ekey v5.4s, v4.4s, v29.4s; + sm4ekey v6.4s, v5.4s, v30.4s; + sm4ekey v7.4s, v6.4s, v31.4s; + + st1 {v0.16b-v3.16b}, [x1], #64; + st1 {v4.16b-v7.16b}, [x1]; + rev64 v7.4s, v7.4s; + rev64 v6.4s, v6.4s; + rev64 v5.4s, v5.4s; + rev64 v4.4s, v4.4s; + rev64 v3.4s, v3.4s; + rev64 v2.4s, v2.4s; + rev64 v1.4s, v1.4s; + rev64 v0.4s, v0.4s; + ext v7.16b, v7.16b, v7.16b, #8; + ext v6.16b, v6.16b, v6.16b, #8; + ext v5.16b, v5.16b, v5.16b, #8; + ext v4.16b, v4.16b, v4.16b, #8; + ext v3.16b, v3.16b, v3.16b, #8; + ext v2.16b, v2.16b, v2.16b, #8; + ext v1.16b, v1.16b, v1.16b, #8; + ext v0.16b, v0.16b, v0.16b, #8; + st1 {v7.16b}, [x2], #16; + st1 {v6.16b}, [x2], #16; + st1 {v5.16b}, [x2], #16; + st1 {v4.16b}, [x2], #16; + st1 {v3.16b}, [x2], #16; + st1 {v2.16b}, [x2], #16; + st1 {v1.16b}, [x2], #16; + st1 {v0.16b}, [x2]; + + ret; +SYM_FUNC_END(sm4_ce_expand_key) + +.align 3 +SYM_FUNC_START(sm4_ce_crypt_block) + /* input: + * x0: round key array, CTX + * x1: dst + * x2: src + */ + PREPARE; + + ld1 {v0.16b}, [x2]; + SM4_CRYPT_BLK(v0); + st1 {v0.16b}, [x1]; + + ret; +SYM_FUNC_END(sm4_ce_crypt_block) + +.align 3 +SYM_FUNC_START(sm4_ce_crypt) + /* input: + * x0: round key array, CTX + * x1: dst + * x2: src + * w3: nblocks + */ + PREPARE; + +.Lcrypt_loop_blk: + sub w3, w3, #8; + tbnz w3, #31, .Lcrypt_tail8; + + ld1 {v0.16b-v3.16b}, [x2], #64; + ld1 {v4.16b-v7.16b}, [x2], #64; + + SM4_CRYPT_BLK8(v0, v1, v2, v3, v4, v5, v6, v7); + + st1 {v0.16b-v3.16b}, [x1], #64; + st1 {v4.16b-v7.16b}, [x1], #64; + + cbz w3, .Lcrypt_end; + b .Lcrypt_loop_blk; + +.Lcrypt_tail8: + add w3, w3, #8; + cmp w3, #4; + blt .Lcrypt_tail4; + + sub w3, w3, #4; + + ld1 {v0.16b-v3.16b}, [x2], #64; + SM4_CRYPT_BLK4(v0, v1, v2, v3); + st1 {v0.16b-v3.16b}, [x1], #64; + + cbz w3, .Lcrypt_end; + +.Lcrypt_tail4: + sub w3, w3, #1; + + ld1 {v0.16b}, [x2], #16; + SM4_CRYPT_BLK(v0); + st1 {v0.16b}, [x1], #16; + + cbnz w3, .Lcrypt_tail4; + +.Lcrypt_end: + ret; +SYM_FUNC_END(sm4_ce_crypt) + +.align 3 +SYM_FUNC_START(sm4_ce_cbc_enc) + /* input: + * x0: round key array, CTX + * x1: dst + * x2: src + * x3: iv (big endian, 128 bit) + * w4: nblocks + */ + PREPARE; + + ld1 {RIV.16b}, [x3]; + +.Lcbc_enc_loop: + sub w4, w4, #1; + + ld1 {RTMP0.16b}, [x2], #16; + eor RIV.16b, RIV.16b, RTMP0.16b; + + SM4_CRYPT_BLK(RIV); + + st1 {RIV.16b}, [x1], #16; + + cbnz w4, .Lcbc_enc_loop; + + /* store new IV */ + st1 {RIV.16b}, [x3]; + + ret; +SYM_FUNC_END(sm4_ce_cbc_enc) + +.align 3 +SYM_FUNC_START(sm4_ce_cbc_dec) + /* input: + * x0: round key array, CTX + * x1: dst + * x2: src + * x3: iv (big endian, 128 bit) + * w4: nblocks + */ + PREPARE; + + ld1 {RIV.16b}, [x3]; + +.Lcbc_loop_blk: + sub w4, w4, #8; + tbnz w4, #31, .Lcbc_tail8; + + ld1 {v0.16b-v3.16b}, [x2], #64; + ld1 {v4.16b-v7.16b}, [x2]; + + SM4_CRYPT_BLK8(v0, v1, v2, v3, v4, v5, v6, v7); + + sub x2, x2, #64; + eor v0.16b, v0.16b, RIV.16b; + ld1 {RTMP0.16b-RTMP3.16b}, [x2], #64; + eor v1.16b, v1.16b, RTMP0.16b; + eor v2.16b, v2.16b, RTMP1.16b; + eor v3.16b, v3.16b, RTMP2.16b; + st1 {v0.16b-v3.16b}, [x1], #64; + + eor v4.16b, v4.16b, RTMP3.16b; + ld1 {RTMP0.16b-RTMP3.16b}, [x2], #64; + eor v5.16b, v5.16b, RTMP0.16b; + eor v6.16b, v6.16b, RTMP1.16b; + eor v7.16b, v7.16b, RTMP2.16b; + + mov RIV.16b, RTMP3.16b; + st1 {v4.16b-v7.16b}, [x1], #64; + + cbz w4, .Lcbc_end; + b .Lcbc_loop_blk; + +.Lcbc_tail8: + add w4, w4, #8; + cmp w4, #4; + blt .Lcbc_tail4; + + sub w4, w4, #4; + + ld1 {v0.16b-v3.16b}, [x2]; + + SM4_CRYPT_BLK4(v0, v1, v2, v3); + + eor v0.16b, v0.16b, RIV.16b; + ld1 {RTMP0.16b-RTMP3.16b}, [x2], #64; + eor v1.16b, v1.16b, RTMP0.16b; + eor v2.16b, v2.16b, RTMP1.16b; + eor v3.16b, v3.16b, RTMP2.16b; + + mov RIV.16b, RTMP3.16b; + st1 {v0.16b-v3.16b}, [x1], #64; + + cbz w4, .Lcbc_end; + +.Lcbc_tail4: + sub w4, w4, #1; + + ld1 {v0.16b}, [x2]; + + SM4_CRYPT_BLK(v0); + + eor v0.16b, v0.16b, RIV.16b; + ld1 {RIV.16b}, [x2], #16; + st1 {v0.16b}, [x1], #16; + + cbnz w4, .Lcbc_tail4; + +.Lcbc_end: + /* store new IV */ + st1 {RIV.16b}, [x3]; + + ret; +SYM_FUNC_END(sm4_ce_cbc_dec) + +.align 3 +SYM_FUNC_START(sm4_ce_cfb_enc) + /* input: + * x0: round key array, CTX + * x1: dst + * x2: src + * x3: iv (big endian, 128 bit) + * w4: nblocks + */ + PREPARE; + + ld1 {RIV.16b}, [x3]; + +.Lcfb_enc_loop: + sub w4, w4, #1; + + SM4_CRYPT_BLK(RIV); + + ld1 {RTMP0.16b}, [x2], #16; + eor RIV.16b, RIV.16b, RTMP0.16b; + st1 {RIV.16b}, [x1], #16; + + cbnz w4, .Lcfb_enc_loop; + + /* store new IV */ + st1 {RIV.16b}, [x3]; + + ret; +SYM_FUNC_END(sm4_ce_cfb_enc) + +.align 3 +SYM_FUNC_START(sm4_ce_cfb_dec) + /* input: + * x0: round key array, CTX + * x1: dst + * x2: src + * x3: iv (big endian, 128 bit) + * w4: nblocks + */ + PREPARE; + + ld1 {v0.16b}, [x3]; + +.Lcfb_loop_blk: + sub w4, w4, #8; + tbnz w4, #31, .Lcfb_tail8; + + ld1 {v1.16b, v2.16b, v3.16b}, [x2], #48; + ld1 {v4.16b-v7.16b}, [x2]; + + SM4_CRYPT_BLK8(v0, v1, v2, v3, v4, v5, v6, v7); + + sub x2, x2, #48; + ld1 {RTMP0.16b-RTMP3.16b}, [x2], #64; + eor v0.16b, v0.16b, RTMP0.16b; + eor v1.16b, v1.16b, RTMP1.16b; + eor v2.16b, v2.16b, RTMP2.16b; + eor v3.16b, v3.16b, RTMP3.16b; + st1 {v0.16b-v3.16b}, [x1], #64; + + ld1 {RTMP0.16b-RTMP3.16b}, [x2], #64; + eor v4.16b, v4.16b, RTMP0.16b; + eor v5.16b, v5.16b, RTMP1.16b; + eor v6.16b, v6.16b, RTMP2.16b; + eor v7.16b, v7.16b, RTMP3.16b; + st1 {v4.16b-v7.16b}, [x1], #64; + + mov v0.16b, RTMP3.16b; + + cbz w4, .Lcfb_end; + b .Lcfb_loop_blk; + +.Lcfb_tail8: + add w4, w4, #8; + cmp w4, #4; + blt .Lcfb_tail4; + + sub w4, w4, #4; + + ld1 {v1.16b, v2.16b, v3.16b}, [x2]; + + SM4_CRYPT_BLK4(v0, v1, v2, v3); + + ld1 {RTMP0.16b-RTMP3.16b}, [x2], #64; + eor v0.16b, v0.16b, RTMP0.16b; + eor v1.16b, v1.16b, RTMP1.16b; + eor v2.16b, v2.16b, RTMP2.16b; + eor v3.16b, v3.16b, RTMP3.16b; + st1 {v0.16b-v3.16b}, [x1], #64; + + mov v0.16b, RTMP3.16b; + + cbz w4, .Lcfb_end; + +.Lcfb_tail4: + sub w4, w4, #1; + + SM4_CRYPT_BLK(v0); + + ld1 {RTMP0.16b}, [x2], #16; + eor v0.16b, v0.16b, RTMP0.16b; + st1 {v0.16b}, [x1], #16; + + mov v0.16b, RTMP0.16b; + + cbnz w4, .Lcfb_tail4; + +.Lcfb_end: + /* store new IV */ + st1 {v0.16b}, [x3]; + + ret; +SYM_FUNC_END(sm4_ce_cfb_dec) + +.align 3 +SYM_FUNC_START(sm4_ce_ctr_enc) + /* input: + * x0: round key array, CTX + * x1: dst + * x2: src + * x3: ctr (big endian, 128 bit) + * w4: nblocks + */ + PREPARE; + + ldp x7, x8, [x3]; + rev x7, x7; + rev x8, x8; + +.Lctr_loop_blk: + sub w4, w4, #8; + tbnz w4, #31, .Lctr_tail8; + +#define inc_le128(vctr) \ + mov vctr.d[1], x8; \ + mov vctr.d[0], x7; \ + adds x8, x8, #1; \ + adc x7, x7, xzr; \ + rev64 vctr.16b, vctr.16b; + + /* construct CTRs */ + inc_le128(v0); /* +0 */ + inc_le128(v1); /* +1 */ + inc_le128(v2); /* +2 */ + inc_le128(v3); /* +3 */ + inc_le128(v4); /* +4 */ + inc_le128(v5); /* +5 */ + inc_le128(v6); /* +6 */ + inc_le128(v7); /* +7 */ + + SM4_CRYPT_BLK8(v0, v1, v2, v3, v4, v5, v6, v7); + + ld1 {RTMP0.16b-RTMP3.16b}, [x2], #64; + eor v0.16b, v0.16b, RTMP0.16b; + eor v1.16b, v1.16b, RTMP1.16b; + eor v2.16b, v2.16b, RTMP2.16b; + eor v3.16b, v3.16b, RTMP3.16b; + st1 {v0.16b-v3.16b}, [x1], #64; + + ld1 {RTMP0.16b-RTMP3.16b}, [x2], #64; + eor v4.16b, v4.16b, RTMP0.16b; + eor v5.16b, v5.16b, RTMP1.16b; + eor v6.16b, v6.16b, RTMP2.16b; + eor v7.16b, v7.16b, RTMP3.16b; + st1 {v4.16b-v7.16b}, [x1], #64; + + cbz w4, .Lctr_end; + b .Lctr_loop_blk; + +.Lctr_tail8: + add w4, w4, #8; + cmp w4, #4; + blt .Lctr_tail4; + + sub w4, w4, #4; + + /* construct CTRs */ + inc_le128(v0); /* +0 */ + inc_le128(v1); /* +1 */ + inc_le128(v2); /* +2 */ + inc_le128(v3); /* +3 */ + + SM4_CRYPT_BLK4(v0, v1, v2, v3); + + ld1 {RTMP0.16b-RTMP3.16b}, [x2], #64; + eor v0.16b, v0.16b, RTMP0.16b; + eor v1.16b, v1.16b, RTMP1.16b; + eor v2.16b, v2.16b, RTMP2.16b; + eor v3.16b, v3.16b, RTMP3.16b; + st1 {v0.16b-v3.16b}, [x1], #64; + + cbz w4, .Lctr_end; + +.Lctr_tail4: + sub w4, w4, #1; + + /* construct CTRs */ + inc_le128(v0); + + SM4_CRYPT_BLK(v0); + + ld1 {RTMP0.16b}, [x2], #16; + eor v0.16b, v0.16b, RTMP0.16b; + st1 {v0.16b}, [x1], #16; + + cbnz w4, .Lctr_tail4; + +.Lctr_end: + /* store new CTR */ + rev x7, x7; + rev x8, x8; + stp x7, x8, [x3]; + + ret; +SYM_FUNC_END(sm4_ce_ctr_enc) diff --git a/arch/arm64/crypto/sm4-ce-glue.c b/arch/arm64/crypto/sm4-ce-glue.c index 9c93cfc4841bc3527eef0371fa0f579cb49a1842..496d55c0d01a4617ca3c014382ad3e9ea246f866 100644 --- a/arch/arm64/crypto/sm4-ce-glue.c +++ b/arch/arm64/crypto/sm4-ce-glue.c @@ -1,82 +1,372 @@ -// SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * SM4 Cipher Algorithm, using ARMv8 Crypto Extensions + * as specified in + * https://tools.ietf.org/id/draft-ribose-cfrg-sm4-10.html + * + * Copyright (C) 2022, Alibaba Group. + * Copyright (C) 2022 Tianjia Zhang + */ +#include +#include +#include +#include #include #include -#include #include -#include -#include -#include -#include +#include +#include -MODULE_ALIAS_CRYPTO("sm4"); -MODULE_ALIAS_CRYPTO("sm4-ce"); -MODULE_DESCRIPTION("SM4 symmetric cipher using ARMv8 Crypto Extensions"); -MODULE_AUTHOR("Ard Biesheuvel "); -MODULE_LICENSE("GPL v2"); +#define BYTES2BLKS(nbytes) ((nbytes) >> 4) + +asmlinkage void sm4_ce_expand_key(const u8 *key, u32 *rkey_enc, u32 *rkey_dec, + const u32 *fk, const u32 *ck); +asmlinkage void sm4_ce_crypt_block(const u32 *rkey, u8 *dst, const u8 *src); +asmlinkage void sm4_ce_crypt(const u32 *rkey, u8 *dst, const u8 *src, + unsigned int nblks); +asmlinkage void sm4_ce_cbc_enc(const u32 *rkey, u8 *dst, const u8 *src, + u8 *iv, unsigned int nblks); +asmlinkage void sm4_ce_cbc_dec(const u32 *rkey, u8 *dst, const u8 *src, + u8 *iv, unsigned int nblks); +asmlinkage void sm4_ce_cfb_enc(const u32 *rkey, u8 *dst, const u8 *src, + u8 *iv, unsigned int nblks); +asmlinkage void sm4_ce_cfb_dec(const u32 *rkey, u8 *dst, const u8 *src, + u8 *iv, unsigned int nblks); +asmlinkage void sm4_ce_ctr_enc(const u32 *rkey, u8 *dst, const u8 *src, + u8 *iv, unsigned int nblks); + +static int sm4_setkey(struct crypto_skcipher *tfm, const u8 *key, + unsigned int key_len) +{ + struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm); + + if (key_len != SM4_KEY_SIZE) + return -EINVAL; + + sm4_ce_expand_key(key, ctx->rkey_enc, ctx->rkey_dec, + crypto_sm4_fk, crypto_sm4_ck); + return 0; +} + +static int sm4_ecb_do_crypt(struct skcipher_request *req, const u32 *rkey) +{ + struct skcipher_walk walk; + unsigned int nbytes; + int err; + + err = skcipher_walk_virt(&walk, req, false); + + while ((nbytes = walk.nbytes) > 0) { + const u8 *src = walk.src.virt.addr; + u8 *dst = walk.dst.virt.addr; + unsigned int nblks; + + kernel_neon_begin(); + + nblks = BYTES2BLKS(nbytes); + if (nblks) { + sm4_ce_crypt(rkey, dst, src, nblks); + nbytes -= nblks * SM4_BLOCK_SIZE; + } + + kernel_neon_end(); + + err = skcipher_walk_done(&walk, nbytes); + } + + return err; +} + +static int sm4_ecb_encrypt(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm); + + return sm4_ecb_do_crypt(req, ctx->rkey_enc); +} + +static int sm4_ecb_decrypt(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm); + + return sm4_ecb_do_crypt(req, ctx->rkey_dec); +} + +static int sm4_cbc_encrypt(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk walk; + unsigned int nbytes; + int err; + + err = skcipher_walk_virt(&walk, req, false); + + while ((nbytes = walk.nbytes) > 0) { + const u8 *src = walk.src.virt.addr; + u8 *dst = walk.dst.virt.addr; + unsigned int nblks; + + kernel_neon_begin(); + + nblks = BYTES2BLKS(nbytes); + if (nblks) { + sm4_ce_cbc_enc(ctx->rkey_enc, dst, src, walk.iv, nblks); + nbytes -= nblks * SM4_BLOCK_SIZE; + } + + kernel_neon_end(); -asmlinkage void sm4_ce_do_crypt(const u32 *rk, void *out, const void *in); + err = skcipher_walk_done(&walk, nbytes); + } + + return err; +} -static int sm4_ce_setkey(struct crypto_tfm *tfm, const u8 *key, - unsigned int key_len) +static int sm4_cbc_decrypt(struct skcipher_request *req) { - struct sm4_ctx *ctx = crypto_tfm_ctx(tfm); + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk walk; + unsigned int nbytes; + int err; + + err = skcipher_walk_virt(&walk, req, false); + + while ((nbytes = walk.nbytes) > 0) { + const u8 *src = walk.src.virt.addr; + u8 *dst = walk.dst.virt.addr; + unsigned int nblks; + + kernel_neon_begin(); + + nblks = BYTES2BLKS(nbytes); + if (nblks) { + sm4_ce_cbc_dec(ctx->rkey_dec, dst, src, walk.iv, nblks); + nbytes -= nblks * SM4_BLOCK_SIZE; + } + + kernel_neon_end(); + + err = skcipher_walk_done(&walk, nbytes); + } - return sm4_expandkey(ctx, key, key_len); + return err; } -static void sm4_ce_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) +static int sm4_cfb_encrypt(struct skcipher_request *req) { - const struct sm4_ctx *ctx = crypto_tfm_ctx(tfm); + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk walk; + unsigned int nbytes; + int err; + + err = skcipher_walk_virt(&walk, req, false); + + while ((nbytes = walk.nbytes) > 0) { + const u8 *src = walk.src.virt.addr; + u8 *dst = walk.dst.virt.addr; + unsigned int nblks; - if (!crypto_simd_usable()) { - sm4_crypt_block(ctx->rkey_enc, out, in); - } else { kernel_neon_begin(); - sm4_ce_do_crypt(ctx->rkey_enc, out, in); + + nblks = BYTES2BLKS(nbytes); + if (nblks) { + sm4_ce_cfb_enc(ctx->rkey_enc, dst, src, walk.iv, nblks); + dst += nblks * SM4_BLOCK_SIZE; + src += nblks * SM4_BLOCK_SIZE; + nbytes -= nblks * SM4_BLOCK_SIZE; + } + + /* tail */ + if (walk.nbytes == walk.total && nbytes > 0) { + u8 keystream[SM4_BLOCK_SIZE]; + + sm4_ce_crypt_block(ctx->rkey_enc, keystream, walk.iv); + crypto_xor_cpy(dst, src, keystream, nbytes); + nbytes = 0; + } + kernel_neon_end(); + + err = skcipher_walk_done(&walk, nbytes); } + + return err; } -static void sm4_ce_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) +static int sm4_cfb_decrypt(struct skcipher_request *req) { - const struct sm4_ctx *ctx = crypto_tfm_ctx(tfm); + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk walk; + unsigned int nbytes; + int err; + + err = skcipher_walk_virt(&walk, req, false); + + while ((nbytes = walk.nbytes) > 0) { + const u8 *src = walk.src.virt.addr; + u8 *dst = walk.dst.virt.addr; + unsigned int nblks; - if (!crypto_simd_usable()) { - sm4_crypt_block(ctx->rkey_dec, out, in); - } else { kernel_neon_begin(); - sm4_ce_do_crypt(ctx->rkey_dec, out, in); + + nblks = BYTES2BLKS(nbytes); + if (nblks) { + sm4_ce_cfb_dec(ctx->rkey_enc, dst, src, walk.iv, nblks); + dst += nblks * SM4_BLOCK_SIZE; + src += nblks * SM4_BLOCK_SIZE; + nbytes -= nblks * SM4_BLOCK_SIZE; + } + + /* tail */ + if (walk.nbytes == walk.total && nbytes > 0) { + u8 keystream[SM4_BLOCK_SIZE]; + + sm4_ce_crypt_block(ctx->rkey_enc, keystream, walk.iv); + crypto_xor_cpy(dst, src, keystream, nbytes); + nbytes = 0; + } + kernel_neon_end(); + + err = skcipher_walk_done(&walk, nbytes); } + + return err; } -static struct crypto_alg sm4_ce_alg = { - .cra_name = "sm4", - .cra_driver_name = "sm4-ce", - .cra_priority = 200, - .cra_flags = CRYPTO_ALG_TYPE_CIPHER, - .cra_blocksize = SM4_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct sm4_ctx), - .cra_module = THIS_MODULE, - .cra_u.cipher = { - .cia_min_keysize = SM4_KEY_SIZE, - .cia_max_keysize = SM4_KEY_SIZE, - .cia_setkey = sm4_ce_setkey, - .cia_encrypt = sm4_ce_encrypt, - .cia_decrypt = sm4_ce_decrypt +static int sm4_ctr_crypt(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk walk; + unsigned int nbytes; + int err; + + err = skcipher_walk_virt(&walk, req, false); + + while ((nbytes = walk.nbytes) > 0) { + const u8 *src = walk.src.virt.addr; + u8 *dst = walk.dst.virt.addr; + unsigned int nblks; + + kernel_neon_begin(); + + nblks = BYTES2BLKS(nbytes); + if (nblks) { + sm4_ce_ctr_enc(ctx->rkey_enc, dst, src, walk.iv, nblks); + dst += nblks * SM4_BLOCK_SIZE; + src += nblks * SM4_BLOCK_SIZE; + nbytes -= nblks * SM4_BLOCK_SIZE; + } + + /* tail */ + if (walk.nbytes == walk.total && nbytes > 0) { + u8 keystream[SM4_BLOCK_SIZE]; + + sm4_ce_crypt_block(ctx->rkey_enc, keystream, walk.iv); + crypto_inc(walk.iv, SM4_BLOCK_SIZE); + crypto_xor_cpy(dst, src, keystream, nbytes); + nbytes = 0; + } + + kernel_neon_end(); + + err = skcipher_walk_done(&walk, nbytes); + } + + return err; +} + +static struct skcipher_alg sm4_algs[] = { + { + .base = { + .cra_name = "ecb(sm4)", + .cra_driver_name = "ecb-sm4-ce", + .cra_priority = 400, + .cra_blocksize = SM4_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct sm4_ctx), + .cra_module = THIS_MODULE, + }, + .min_keysize = SM4_KEY_SIZE, + .max_keysize = SM4_KEY_SIZE, + .setkey = sm4_setkey, + .encrypt = sm4_ecb_encrypt, + .decrypt = sm4_ecb_decrypt, + }, { + .base = { + .cra_name = "cbc(sm4)", + .cra_driver_name = "cbc-sm4-ce", + .cra_priority = 400, + .cra_blocksize = SM4_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct sm4_ctx), + .cra_module = THIS_MODULE, + }, + .min_keysize = SM4_KEY_SIZE, + .max_keysize = SM4_KEY_SIZE, + .ivsize = SM4_BLOCK_SIZE, + .setkey = sm4_setkey, + .encrypt = sm4_cbc_encrypt, + .decrypt = sm4_cbc_decrypt, + }, { + .base = { + .cra_name = "cfb(sm4)", + .cra_driver_name = "cfb-sm4-ce", + .cra_priority = 400, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct sm4_ctx), + .cra_module = THIS_MODULE, + }, + .min_keysize = SM4_KEY_SIZE, + .max_keysize = SM4_KEY_SIZE, + .ivsize = SM4_BLOCK_SIZE, + .chunksize = SM4_BLOCK_SIZE, + .setkey = sm4_setkey, + .encrypt = sm4_cfb_encrypt, + .decrypt = sm4_cfb_decrypt, + }, { + .base = { + .cra_name = "ctr(sm4)", + .cra_driver_name = "ctr-sm4-ce", + .cra_priority = 400, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct sm4_ctx), + .cra_module = THIS_MODULE, + }, + .min_keysize = SM4_KEY_SIZE, + .max_keysize = SM4_KEY_SIZE, + .ivsize = SM4_BLOCK_SIZE, + .chunksize = SM4_BLOCK_SIZE, + .setkey = sm4_setkey, + .encrypt = sm4_ctr_crypt, + .decrypt = sm4_ctr_crypt, } }; -static int __init sm4_ce_mod_init(void) +static int __init sm4_init(void) { - return crypto_register_alg(&sm4_ce_alg); + return crypto_register_skciphers(sm4_algs, ARRAY_SIZE(sm4_algs)); } -static void __exit sm4_ce_mod_fini(void) +static void __exit sm4_exit(void) { - crypto_unregister_alg(&sm4_ce_alg); + crypto_unregister_skciphers(sm4_algs, ARRAY_SIZE(sm4_algs)); } -module_cpu_feature_match(SM4, sm4_ce_mod_init); -module_exit(sm4_ce_mod_fini); +module_cpu_feature_match(SM4, sm4_init); +module_exit(sm4_exit); + +MODULE_DESCRIPTION("SM4 ECB/CBC/CFB/CTR using ARMv8 Crypto Extensions"); +MODULE_ALIAS_CRYPTO("sm4-ce"); +MODULE_ALIAS_CRYPTO("sm4"); +MODULE_ALIAS_CRYPTO("ecb(sm4)"); +MODULE_ALIAS_CRYPTO("cbc(sm4)"); +MODULE_ALIAS_CRYPTO("cfb(sm4)"); +MODULE_ALIAS_CRYPTO("ctr(sm4)"); +MODULE_AUTHOR("Tianjia Zhang "); +MODULE_LICENSE("GPL v2"); diff --git a/arch/arm64/crypto/sm4-neon-core.S b/arch/arm64/crypto/sm4-neon-core.S new file mode 100644 index 0000000000000000000000000000000000000000..3d5256b354d27fb2f5464fc810f595ab17b1a115 --- /dev/null +++ b/arch/arm64/crypto/sm4-neon-core.S @@ -0,0 +1,487 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * SM4 Cipher Algorithm for ARMv8 NEON + * as specified in + * https://tools.ietf.org/id/draft-ribose-cfrg-sm4-10.html + * + * Copyright (C) 2022, Alibaba Group. + * Copyright (C) 2022 Tianjia Zhang + */ + +#include +#include + +/* Register macros */ + +#define RTMP0 v8 +#define RTMP1 v9 +#define RTMP2 v10 +#define RTMP3 v11 + +#define RX0 v12 +#define RX1 v13 +#define RKEY v14 +#define RIV v15 + +/* Helper macros. */ + +#define PREPARE \ + adr_l x5, crypto_sm4_sbox; \ + ld1 {v16.16b-v19.16b}, [x5], #64; \ + ld1 {v20.16b-v23.16b}, [x5], #64; \ + ld1 {v24.16b-v27.16b}, [x5], #64; \ + ld1 {v28.16b-v31.16b}, [x5]; + +#define transpose_4x4(s0, s1, s2, s3) \ + zip1 RTMP0.4s, s0.4s, s1.4s; \ + zip1 RTMP1.4s, s2.4s, s3.4s; \ + zip2 RTMP2.4s, s0.4s, s1.4s; \ + zip2 RTMP3.4s, s2.4s, s3.4s; \ + zip1 s0.2d, RTMP0.2d, RTMP1.2d; \ + zip2 s1.2d, RTMP0.2d, RTMP1.2d; \ + zip1 s2.2d, RTMP2.2d, RTMP3.2d; \ + zip2 s3.2d, RTMP2.2d, RTMP3.2d; + +#define rotate_clockwise_90(s0, s1, s2, s3) \ + zip1 RTMP0.4s, s1.4s, s0.4s; \ + zip2 RTMP1.4s, s1.4s, s0.4s; \ + zip1 RTMP2.4s, s3.4s, s2.4s; \ + zip2 RTMP3.4s, s3.4s, s2.4s; \ + zip1 s0.2d, RTMP2.2d, RTMP0.2d; \ + zip2 s1.2d, RTMP2.2d, RTMP0.2d; \ + zip1 s2.2d, RTMP3.2d, RTMP1.2d; \ + zip2 s3.2d, RTMP3.2d, RTMP1.2d; + +#define ROUND4(round, s0, s1, s2, s3) \ + dup RX0.4s, RKEY.s[round]; \ + /* rk ^ s1 ^ s2 ^ s3 */ \ + eor RTMP1.16b, s2.16b, s3.16b; \ + eor RX0.16b, RX0.16b, s1.16b; \ + eor RX0.16b, RX0.16b, RTMP1.16b; \ + \ + /* sbox, non-linear part */ \ + movi RTMP3.16b, #64; /* sizeof(sbox) / 4 */ \ + tbl RTMP0.16b, {v16.16b-v19.16b}, RX0.16b; \ + sub RX0.16b, RX0.16b, RTMP3.16b; \ + tbx RTMP0.16b, {v20.16b-v23.16b}, RX0.16b; \ + sub RX0.16b, RX0.16b, RTMP3.16b; \ + tbx RTMP0.16b, {v24.16b-v27.16b}, RX0.16b; \ + sub RX0.16b, RX0.16b, RTMP3.16b; \ + tbx RTMP0.16b, {v28.16b-v31.16b}, RX0.16b; \ + \ + /* linear part */ \ + shl RTMP1.4s, RTMP0.4s, #8; \ + shl RTMP2.4s, RTMP0.4s, #16; \ + shl RTMP3.4s, RTMP0.4s, #24; \ + sri RTMP1.4s, RTMP0.4s, #(32-8); \ + sri RTMP2.4s, RTMP0.4s, #(32-16); \ + sri RTMP3.4s, RTMP0.4s, #(32-24); \ + /* RTMP1 = x ^ rol32(x, 8) ^ rol32(x, 16) */ \ + eor RTMP1.16b, RTMP1.16b, RTMP0.16b; \ + eor RTMP1.16b, RTMP1.16b, RTMP2.16b; \ + /* RTMP3 = x ^ rol32(x, 24) ^ rol32(RTMP1, 2) */ \ + eor RTMP3.16b, RTMP3.16b, RTMP0.16b; \ + shl RTMP2.4s, RTMP1.4s, 2; \ + sri RTMP2.4s, RTMP1.4s, #(32-2); \ + eor RTMP3.16b, RTMP3.16b, RTMP2.16b; \ + /* s0 ^= RTMP3 */ \ + eor s0.16b, s0.16b, RTMP3.16b; + +#define SM4_CRYPT_BLK4(b0, b1, b2, b3) \ + rev32 b0.16b, b0.16b; \ + rev32 b1.16b, b1.16b; \ + rev32 b2.16b, b2.16b; \ + rev32 b3.16b, b3.16b; \ + \ + transpose_4x4(b0, b1, b2, b3); \ + \ + mov x6, 8; \ +4: \ + ld1 {RKEY.4s}, [x0], #16; \ + subs x6, x6, #1; \ + \ + ROUND4(0, b0, b1, b2, b3); \ + ROUND4(1, b1, b2, b3, b0); \ + ROUND4(2, b2, b3, b0, b1); \ + ROUND4(3, b3, b0, b1, b2); \ + \ + bne 4b; \ + \ + rotate_clockwise_90(b0, b1, b2, b3); \ + rev32 b0.16b, b0.16b; \ + rev32 b1.16b, b1.16b; \ + rev32 b2.16b, b2.16b; \ + rev32 b3.16b, b3.16b; \ + \ + /* repoint to rkey */ \ + sub x0, x0, #128; + +#define ROUND8(round, s0, s1, s2, s3, t0, t1, t2, t3) \ + /* rk ^ s1 ^ s2 ^ s3 */ \ + dup RX0.4s, RKEY.s[round]; \ + eor RTMP0.16b, s2.16b, s3.16b; \ + mov RX1.16b, RX0.16b; \ + eor RTMP1.16b, t2.16b, t3.16b; \ + eor RX0.16b, RX0.16b, s1.16b; \ + eor RX1.16b, RX1.16b, t1.16b; \ + eor RX0.16b, RX0.16b, RTMP0.16b; \ + eor RX1.16b, RX1.16b, RTMP1.16b; \ + \ + /* sbox, non-linear part */ \ + movi RTMP3.16b, #64; /* sizeof(sbox) / 4 */ \ + tbl RTMP0.16b, {v16.16b-v19.16b}, RX0.16b; \ + tbl RTMP1.16b, {v16.16b-v19.16b}, RX1.16b; \ + sub RX0.16b, RX0.16b, RTMP3.16b; \ + sub RX1.16b, RX1.16b, RTMP3.16b; \ + tbx RTMP0.16b, {v20.16b-v23.16b}, RX0.16b; \ + tbx RTMP1.16b, {v20.16b-v23.16b}, RX1.16b; \ + sub RX0.16b, RX0.16b, RTMP3.16b; \ + sub RX1.16b, RX1.16b, RTMP3.16b; \ + tbx RTMP0.16b, {v24.16b-v27.16b}, RX0.16b; \ + tbx RTMP1.16b, {v24.16b-v27.16b}, RX1.16b; \ + sub RX0.16b, RX0.16b, RTMP3.16b; \ + sub RX1.16b, RX1.16b, RTMP3.16b; \ + tbx RTMP0.16b, {v28.16b-v31.16b}, RX0.16b; \ + tbx RTMP1.16b, {v28.16b-v31.16b}, RX1.16b; \ + \ + /* linear part */ \ + shl RX0.4s, RTMP0.4s, #8; \ + shl RX1.4s, RTMP1.4s, #8; \ + shl RTMP2.4s, RTMP0.4s, #16; \ + shl RTMP3.4s, RTMP1.4s, #16; \ + sri RX0.4s, RTMP0.4s, #(32 - 8); \ + sri RX1.4s, RTMP1.4s, #(32 - 8); \ + sri RTMP2.4s, RTMP0.4s, #(32 - 16); \ + sri RTMP3.4s, RTMP1.4s, #(32 - 16); \ + /* RX = x ^ rol32(x, 8) ^ rol32(x, 16) */ \ + eor RX0.16b, RX0.16b, RTMP0.16b; \ + eor RX1.16b, RX1.16b, RTMP1.16b; \ + eor RX0.16b, RX0.16b, RTMP2.16b; \ + eor RX1.16b, RX1.16b, RTMP3.16b; \ + /* RTMP0/1 ^= x ^ rol32(x, 24) ^ rol32(RX, 2) */ \ + shl RTMP2.4s, RTMP0.4s, #24; \ + shl RTMP3.4s, RTMP1.4s, #24; \ + sri RTMP2.4s, RTMP0.4s, #(32 - 24); \ + sri RTMP3.4s, RTMP1.4s, #(32 - 24); \ + eor RTMP0.16b, RTMP0.16b, RTMP2.16b; \ + eor RTMP1.16b, RTMP1.16b, RTMP3.16b; \ + shl RTMP2.4s, RX0.4s, #2; \ + shl RTMP3.4s, RX1.4s, #2; \ + sri RTMP2.4s, RX0.4s, #(32 - 2); \ + sri RTMP3.4s, RX1.4s, #(32 - 2); \ + eor RTMP0.16b, RTMP0.16b, RTMP2.16b; \ + eor RTMP1.16b, RTMP1.16b, RTMP3.16b; \ + /* s0/t0 ^= RTMP0/1 */ \ + eor s0.16b, s0.16b, RTMP0.16b; \ + eor t0.16b, t0.16b, RTMP1.16b; + +#define SM4_CRYPT_BLK8(b0, b1, b2, b3, b4, b5, b6, b7) \ + rev32 b0.16b, b0.16b; \ + rev32 b1.16b, b1.16b; \ + rev32 b2.16b, b2.16b; \ + rev32 b3.16b, b3.16b; \ + rev32 b4.16b, b4.16b; \ + rev32 b5.16b, b5.16b; \ + rev32 b6.16b, b6.16b; \ + rev32 b7.16b, b7.16b; \ + \ + transpose_4x4(b0, b1, b2, b3); \ + transpose_4x4(b4, b5, b6, b7); \ + \ + mov x6, 8; \ +8: \ + ld1 {RKEY.4s}, [x0], #16; \ + subs x6, x6, #1; \ + \ + ROUND8(0, b0, b1, b2, b3, b4, b5, b6, b7); \ + ROUND8(1, b1, b2, b3, b0, b5, b6, b7, b4); \ + ROUND8(2, b2, b3, b0, b1, b6, b7, b4, b5); \ + ROUND8(3, b3, b0, b1, b2, b7, b4, b5, b6); \ + \ + bne 8b; \ + \ + rotate_clockwise_90(b0, b1, b2, b3); \ + rotate_clockwise_90(b4, b5, b6, b7); \ + rev32 b0.16b, b0.16b; \ + rev32 b1.16b, b1.16b; \ + rev32 b2.16b, b2.16b; \ + rev32 b3.16b, b3.16b; \ + rev32 b4.16b, b4.16b; \ + rev32 b5.16b, b5.16b; \ + rev32 b6.16b, b6.16b; \ + rev32 b7.16b, b7.16b; \ + \ + /* repoint to rkey */ \ + sub x0, x0, #128; + + +.align 3 +SYM_FUNC_START_LOCAL(__sm4_neon_crypt_blk1_4) + /* input: + * x0: round key array, CTX + * x1: dst + * x2: src + * w3: num blocks (1..4) + */ + PREPARE; + + ld1 {v0.16b}, [x2], #16; + mov v1.16b, v0.16b; + mov v2.16b, v0.16b; + mov v3.16b, v0.16b; + cmp w3, #2; + blt .Lblk4_load_input_done; + ld1 {v1.16b}, [x2], #16; + beq .Lblk4_load_input_done; + ld1 {v2.16b}, [x2], #16; + cmp w3, #3; + beq .Lblk4_load_input_done; + ld1 {v3.16b}, [x2]; + +.Lblk4_load_input_done: + SM4_CRYPT_BLK4(v0, v1, v2, v3); + + st1 {v0.16b}, [x1], #16; + cmp w3, #2; + blt .Lblk4_store_output_done; + st1 {v1.16b}, [x1], #16; + beq .Lblk4_store_output_done; + st1 {v2.16b}, [x1], #16; + cmp w3, #3; + beq .Lblk4_store_output_done; + st1 {v3.16b}, [x1]; + +.Lblk4_store_output_done: + ret; +SYM_FUNC_END(__sm4_neon_crypt_blk1_4) + +.align 3 +SYM_FUNC_START(sm4_neon_crypt_blk1_8) + /* input: + * x0: round key array, CTX + * x1: dst + * x2: src + * w3: num blocks (1..8) + */ + cmp w3, #5; + blt __sm4_neon_crypt_blk1_4; + + PREPARE; + + ld1 {v0.16b-v3.16b}, [x2], #64; + ld1 {v4.16b}, [x2], #16; + mov v5.16b, v4.16b; + mov v6.16b, v4.16b; + mov v7.16b, v4.16b; + beq .Lblk8_load_input_done; + ld1 {v5.16b}, [x2], #16; + cmp w3, #7; + blt .Lblk8_load_input_done; + ld1 {v6.16b}, [x2], #16; + beq .Lblk8_load_input_done; + ld1 {v7.16b}, [x2]; + +.Lblk8_load_input_done: + SM4_CRYPT_BLK8(v0, v1, v2, v3, v4, v5, v6, v7); + + cmp w3, #6; + st1 {v0.16b-v3.16b}, [x1], #64; + st1 {v4.16b}, [x1], #16; + blt .Lblk8_store_output_done; + st1 {v5.16b}, [x1], #16; + beq .Lblk8_store_output_done; + st1 {v6.16b}, [x1], #16; + cmp w3, #7; + beq .Lblk8_store_output_done; + st1 {v7.16b}, [x1]; + +.Lblk8_store_output_done: + ret; +SYM_FUNC_END(sm4_neon_crypt_blk1_8) + +.align 3 +SYM_FUNC_START(sm4_neon_crypt_blk8) + /* input: + * x0: round key array, CTX + * x1: dst + * x2: src + * w3: nblocks (multiples of 8) + */ + PREPARE; + +.Lcrypt_loop_blk: + subs w3, w3, #8; + bmi .Lcrypt_end; + + ld1 {v0.16b-v3.16b}, [x2], #64; + ld1 {v4.16b-v7.16b}, [x2], #64; + + SM4_CRYPT_BLK8(v0, v1, v2, v3, v4, v5, v6, v7); + + st1 {v0.16b-v3.16b}, [x1], #64; + st1 {v4.16b-v7.16b}, [x1], #64; + + b .Lcrypt_loop_blk; + +.Lcrypt_end: + ret; +SYM_FUNC_END(sm4_neon_crypt_blk8) + +.align 3 +SYM_FUNC_START(sm4_neon_cbc_dec_blk8) + /* input: + * x0: round key array, CTX + * x1: dst + * x2: src + * x3: iv (big endian, 128 bit) + * w4: nblocks (multiples of 8) + */ + PREPARE; + + ld1 {RIV.16b}, [x3]; + +.Lcbc_loop_blk: + subs w4, w4, #8; + bmi .Lcbc_end; + + ld1 {v0.16b-v3.16b}, [x2], #64; + ld1 {v4.16b-v7.16b}, [x2]; + + SM4_CRYPT_BLK8(v0, v1, v2, v3, v4, v5, v6, v7); + + sub x2, x2, #64; + eor v0.16b, v0.16b, RIV.16b; + ld1 {RTMP0.16b-RTMP3.16b}, [x2], #64; + eor v1.16b, v1.16b, RTMP0.16b; + eor v2.16b, v2.16b, RTMP1.16b; + eor v3.16b, v3.16b, RTMP2.16b; + st1 {v0.16b-v3.16b}, [x1], #64; + + eor v4.16b, v4.16b, RTMP3.16b; + ld1 {RTMP0.16b-RTMP3.16b}, [x2], #64; + eor v5.16b, v5.16b, RTMP0.16b; + eor v6.16b, v6.16b, RTMP1.16b; + eor v7.16b, v7.16b, RTMP2.16b; + + mov RIV.16b, RTMP3.16b; + st1 {v4.16b-v7.16b}, [x1], #64; + + b .Lcbc_loop_blk; + +.Lcbc_end: + /* store new IV */ + st1 {RIV.16b}, [x3]; + + ret; +SYM_FUNC_END(sm4_neon_cbc_dec_blk8) + +.align 3 +SYM_FUNC_START(sm4_neon_cfb_dec_blk8) + /* input: + * x0: round key array, CTX + * x1: dst + * x2: src + * x3: iv (big endian, 128 bit) + * w4: nblocks (multiples of 8) + */ + PREPARE; + + ld1 {v0.16b}, [x3]; + +.Lcfb_loop_blk: + subs w4, w4, #8; + bmi .Lcfb_end; + + ld1 {v1.16b, v2.16b, v3.16b}, [x2], #48; + ld1 {v4.16b-v7.16b}, [x2]; + + SM4_CRYPT_BLK8(v0, v1, v2, v3, v4, v5, v6, v7); + + sub x2, x2, #48; + ld1 {RTMP0.16b-RTMP3.16b}, [x2], #64; + eor v0.16b, v0.16b, RTMP0.16b; + eor v1.16b, v1.16b, RTMP1.16b; + eor v2.16b, v2.16b, RTMP2.16b; + eor v3.16b, v3.16b, RTMP3.16b; + st1 {v0.16b-v3.16b}, [x1], #64; + + ld1 {RTMP0.16b-RTMP3.16b}, [x2], #64; + eor v4.16b, v4.16b, RTMP0.16b; + eor v5.16b, v5.16b, RTMP1.16b; + eor v6.16b, v6.16b, RTMP2.16b; + eor v7.16b, v7.16b, RTMP3.16b; + st1 {v4.16b-v7.16b}, [x1], #64; + + mov v0.16b, RTMP3.16b; + + b .Lcfb_loop_blk; + +.Lcfb_end: + /* store new IV */ + st1 {v0.16b}, [x3]; + + ret; +SYM_FUNC_END(sm4_neon_cfb_dec_blk8) + +.align 3 +SYM_FUNC_START(sm4_neon_ctr_enc_blk8) + /* input: + * x0: round key array, CTX + * x1: dst + * x2: src + * x3: ctr (big endian, 128 bit) + * w4: nblocks (multiples of 8) + */ + PREPARE; + + ldp x7, x8, [x3]; + rev x7, x7; + rev x8, x8; + +.Lctr_loop_blk: + subs w4, w4, #8; + bmi .Lctr_end; + +#define inc_le128(vctr) \ + mov vctr.d[1], x8; \ + mov vctr.d[0], x7; \ + adds x8, x8, #1; \ + adc x7, x7, xzr; \ + rev64 vctr.16b, vctr.16b; + + /* construct CTRs */ + inc_le128(v0); /* +0 */ + inc_le128(v1); /* +1 */ + inc_le128(v2); /* +2 */ + inc_le128(v3); /* +3 */ + inc_le128(v4); /* +4 */ + inc_le128(v5); /* +5 */ + inc_le128(v6); /* +6 */ + inc_le128(v7); /* +7 */ + + SM4_CRYPT_BLK8(v0, v1, v2, v3, v4, v5, v6, v7); + + ld1 {RTMP0.16b-RTMP3.16b}, [x2], #64; + eor v0.16b, v0.16b, RTMP0.16b; + eor v1.16b, v1.16b, RTMP1.16b; + eor v2.16b, v2.16b, RTMP2.16b; + eor v3.16b, v3.16b, RTMP3.16b; + st1 {v0.16b-v3.16b}, [x1], #64; + + ld1 {RTMP0.16b-RTMP3.16b}, [x2], #64; + eor v4.16b, v4.16b, RTMP0.16b; + eor v5.16b, v5.16b, RTMP1.16b; + eor v6.16b, v6.16b, RTMP2.16b; + eor v7.16b, v7.16b, RTMP3.16b; + st1 {v4.16b-v7.16b}, [x1], #64; + + b .Lctr_loop_blk; + +.Lctr_end: + /* store new CTR */ + rev x7, x7; + rev x8, x8; + stp x7, x8, [x3]; + + ret; +SYM_FUNC_END(sm4_neon_ctr_enc_blk8) diff --git a/arch/arm64/crypto/sm4-neon-glue.c b/arch/arm64/crypto/sm4-neon-glue.c new file mode 100644 index 0000000000000000000000000000000000000000..03a6a6866a3112f0dc10dc2fc119f7d53c867a1d --- /dev/null +++ b/arch/arm64/crypto/sm4-neon-glue.c @@ -0,0 +1,442 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * SM4 Cipher Algorithm, using ARMv8 NEON + * as specified in + * https://tools.ietf.org/id/draft-ribose-cfrg-sm4-10.html + * + * Copyright (C) 2022, Alibaba Group. + * Copyright (C) 2022 Tianjia Zhang + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define BYTES2BLKS(nbytes) ((nbytes) >> 4) +#define BYTES2BLK8(nbytes) (((nbytes) >> 4) & ~(8 - 1)) + +asmlinkage void sm4_neon_crypt_blk1_8(const u32 *rkey, u8 *dst, const u8 *src, + unsigned int nblks); +asmlinkage void sm4_neon_crypt_blk8(const u32 *rkey, u8 *dst, const u8 *src, + unsigned int nblks); +asmlinkage void sm4_neon_cbc_dec_blk8(const u32 *rkey, u8 *dst, const u8 *src, + u8 *iv, unsigned int nblks); +asmlinkage void sm4_neon_cfb_dec_blk8(const u32 *rkey, u8 *dst, const u8 *src, + u8 *iv, unsigned int nblks); +asmlinkage void sm4_neon_ctr_enc_blk8(const u32 *rkey, u8 *dst, const u8 *src, + u8 *iv, unsigned int nblks); + +static int sm4_setkey(struct crypto_skcipher *tfm, const u8 *key, + unsigned int key_len) +{ + struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm); + + return sm4_expandkey(ctx, key, key_len); +} + +static int sm4_ecb_do_crypt(struct skcipher_request *req, const u32 *rkey) +{ + struct skcipher_walk walk; + unsigned int nbytes; + int err; + + err = skcipher_walk_virt(&walk, req, false); + + while ((nbytes = walk.nbytes) > 0) { + const u8 *src = walk.src.virt.addr; + u8 *dst = walk.dst.virt.addr; + unsigned int nblks; + + kernel_neon_begin(); + + nblks = BYTES2BLK8(nbytes); + if (nblks) { + sm4_neon_crypt_blk8(rkey, dst, src, nblks); + dst += nblks * SM4_BLOCK_SIZE; + src += nblks * SM4_BLOCK_SIZE; + nbytes -= nblks * SM4_BLOCK_SIZE; + } + + nblks = BYTES2BLKS(nbytes); + if (nblks) { + sm4_neon_crypt_blk1_8(rkey, dst, src, nblks); + nbytes -= nblks * SM4_BLOCK_SIZE; + } + + kernel_neon_end(); + + err = skcipher_walk_done(&walk, nbytes); + } + + return err; +} + +static int sm4_ecb_encrypt(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm); + + return sm4_ecb_do_crypt(req, ctx->rkey_enc); +} + +static int sm4_ecb_decrypt(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm); + + return sm4_ecb_do_crypt(req, ctx->rkey_dec); +} + +static int sm4_cbc_encrypt(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk walk; + unsigned int nbytes; + int err; + + err = skcipher_walk_virt(&walk, req, false); + + while ((nbytes = walk.nbytes) > 0) { + const u8 *iv = walk.iv; + const u8 *src = walk.src.virt.addr; + u8 *dst = walk.dst.virt.addr; + + while (nbytes >= SM4_BLOCK_SIZE) { + crypto_xor_cpy(dst, src, iv, SM4_BLOCK_SIZE); + sm4_crypt_block(ctx->rkey_enc, dst, dst); + iv = dst; + src += SM4_BLOCK_SIZE; + dst += SM4_BLOCK_SIZE; + nbytes -= SM4_BLOCK_SIZE; + } + if (iv != walk.iv) + memcpy(walk.iv, iv, SM4_BLOCK_SIZE); + + err = skcipher_walk_done(&walk, nbytes); + } + + return err; +} + +static int sm4_cbc_decrypt(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk walk; + unsigned int nbytes; + int err; + + err = skcipher_walk_virt(&walk, req, false); + + while ((nbytes = walk.nbytes) > 0) { + const u8 *src = walk.src.virt.addr; + u8 *dst = walk.dst.virt.addr; + unsigned int nblks; + + kernel_neon_begin(); + + nblks = BYTES2BLK8(nbytes); + if (nblks) { + sm4_neon_cbc_dec_blk8(ctx->rkey_dec, dst, src, + walk.iv, nblks); + dst += nblks * SM4_BLOCK_SIZE; + src += nblks * SM4_BLOCK_SIZE; + nbytes -= nblks * SM4_BLOCK_SIZE; + } + + nblks = BYTES2BLKS(nbytes); + if (nblks) { + u8 keystream[SM4_BLOCK_SIZE * 8]; + u8 iv[SM4_BLOCK_SIZE]; + int i; + + sm4_neon_crypt_blk1_8(ctx->rkey_dec, keystream, + src, nblks); + + src += ((int)nblks - 2) * SM4_BLOCK_SIZE; + dst += (nblks - 1) * SM4_BLOCK_SIZE; + memcpy(iv, src + SM4_BLOCK_SIZE, SM4_BLOCK_SIZE); + + for (i = nblks - 1; i > 0; i--) { + crypto_xor_cpy(dst, src, + &keystream[i * SM4_BLOCK_SIZE], + SM4_BLOCK_SIZE); + src -= SM4_BLOCK_SIZE; + dst -= SM4_BLOCK_SIZE; + } + crypto_xor_cpy(dst, walk.iv, + keystream, SM4_BLOCK_SIZE); + memcpy(walk.iv, iv, SM4_BLOCK_SIZE); + nbytes -= nblks * SM4_BLOCK_SIZE; + } + + kernel_neon_end(); + + err = skcipher_walk_done(&walk, nbytes); + } + + return err; +} + +static int sm4_cfb_encrypt(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk walk; + unsigned int nbytes; + int err; + + err = skcipher_walk_virt(&walk, req, false); + + while ((nbytes = walk.nbytes) > 0) { + u8 keystream[SM4_BLOCK_SIZE]; + const u8 *iv = walk.iv; + const u8 *src = walk.src.virt.addr; + u8 *dst = walk.dst.virt.addr; + + while (nbytes >= SM4_BLOCK_SIZE) { + sm4_crypt_block(ctx->rkey_enc, keystream, iv); + crypto_xor_cpy(dst, src, keystream, SM4_BLOCK_SIZE); + iv = dst; + src += SM4_BLOCK_SIZE; + dst += SM4_BLOCK_SIZE; + nbytes -= SM4_BLOCK_SIZE; + } + if (iv != walk.iv) + memcpy(walk.iv, iv, SM4_BLOCK_SIZE); + + /* tail */ + if (walk.nbytes == walk.total && nbytes > 0) { + sm4_crypt_block(ctx->rkey_enc, keystream, walk.iv); + crypto_xor_cpy(dst, src, keystream, nbytes); + nbytes = 0; + } + + err = skcipher_walk_done(&walk, nbytes); + } + + return err; +} + +static int sm4_cfb_decrypt(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk walk; + unsigned int nbytes; + int err; + + err = skcipher_walk_virt(&walk, req, false); + + while ((nbytes = walk.nbytes) > 0) { + const u8 *src = walk.src.virt.addr; + u8 *dst = walk.dst.virt.addr; + unsigned int nblks; + + kernel_neon_begin(); + + nblks = BYTES2BLK8(nbytes); + if (nblks) { + sm4_neon_cfb_dec_blk8(ctx->rkey_enc, dst, src, + walk.iv, nblks); + dst += nblks * SM4_BLOCK_SIZE; + src += nblks * SM4_BLOCK_SIZE; + nbytes -= nblks * SM4_BLOCK_SIZE; + } + + nblks = BYTES2BLKS(nbytes); + if (nblks) { + u8 keystream[SM4_BLOCK_SIZE * 8]; + + memcpy(keystream, walk.iv, SM4_BLOCK_SIZE); + if (nblks > 1) + memcpy(&keystream[SM4_BLOCK_SIZE], src, + (nblks - 1) * SM4_BLOCK_SIZE); + memcpy(walk.iv, src + (nblks - 1) * SM4_BLOCK_SIZE, + SM4_BLOCK_SIZE); + + sm4_neon_crypt_blk1_8(ctx->rkey_enc, keystream, + keystream, nblks); + + crypto_xor_cpy(dst, src, keystream, + nblks * SM4_BLOCK_SIZE); + dst += nblks * SM4_BLOCK_SIZE; + src += nblks * SM4_BLOCK_SIZE; + nbytes -= nblks * SM4_BLOCK_SIZE; + } + + kernel_neon_end(); + + /* tail */ + if (walk.nbytes == walk.total && nbytes > 0) { + u8 keystream[SM4_BLOCK_SIZE]; + + sm4_crypt_block(ctx->rkey_enc, keystream, walk.iv); + crypto_xor_cpy(dst, src, keystream, nbytes); + nbytes = 0; + } + + err = skcipher_walk_done(&walk, nbytes); + } + + return err; +} + +static int sm4_ctr_crypt(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk walk; + unsigned int nbytes; + int err; + + err = skcipher_walk_virt(&walk, req, false); + + while ((nbytes = walk.nbytes) > 0) { + const u8 *src = walk.src.virt.addr; + u8 *dst = walk.dst.virt.addr; + unsigned int nblks; + + kernel_neon_begin(); + + nblks = BYTES2BLK8(nbytes); + if (nblks) { + sm4_neon_ctr_enc_blk8(ctx->rkey_enc, dst, src, + walk.iv, nblks); + dst += nblks * SM4_BLOCK_SIZE; + src += nblks * SM4_BLOCK_SIZE; + nbytes -= nblks * SM4_BLOCK_SIZE; + } + + nblks = BYTES2BLKS(nbytes); + if (nblks) { + u8 keystream[SM4_BLOCK_SIZE * 8]; + int i; + + for (i = 0; i < nblks; i++) { + memcpy(&keystream[i * SM4_BLOCK_SIZE], + walk.iv, SM4_BLOCK_SIZE); + crypto_inc(walk.iv, SM4_BLOCK_SIZE); + } + sm4_neon_crypt_blk1_8(ctx->rkey_enc, keystream, + keystream, nblks); + + crypto_xor_cpy(dst, src, keystream, + nblks * SM4_BLOCK_SIZE); + dst += nblks * SM4_BLOCK_SIZE; + src += nblks * SM4_BLOCK_SIZE; + nbytes -= nblks * SM4_BLOCK_SIZE; + } + + kernel_neon_end(); + + /* tail */ + if (walk.nbytes == walk.total && nbytes > 0) { + u8 keystream[SM4_BLOCK_SIZE]; + + sm4_crypt_block(ctx->rkey_enc, keystream, walk.iv); + crypto_inc(walk.iv, SM4_BLOCK_SIZE); + crypto_xor_cpy(dst, src, keystream, nbytes); + nbytes = 0; + } + + err = skcipher_walk_done(&walk, nbytes); + } + + return err; +} + +static struct skcipher_alg sm4_algs[] = { + { + .base = { + .cra_name = "ecb(sm4)", + .cra_driver_name = "ecb-sm4-neon", + .cra_priority = 200, + .cra_blocksize = SM4_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct sm4_ctx), + .cra_module = THIS_MODULE, + }, + .min_keysize = SM4_KEY_SIZE, + .max_keysize = SM4_KEY_SIZE, + .setkey = sm4_setkey, + .encrypt = sm4_ecb_encrypt, + .decrypt = sm4_ecb_decrypt, + }, { + .base = { + .cra_name = "cbc(sm4)", + .cra_driver_name = "cbc-sm4-neon", + .cra_priority = 200, + .cra_blocksize = SM4_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct sm4_ctx), + .cra_module = THIS_MODULE, + }, + .min_keysize = SM4_KEY_SIZE, + .max_keysize = SM4_KEY_SIZE, + .ivsize = SM4_BLOCK_SIZE, + .setkey = sm4_setkey, + .encrypt = sm4_cbc_encrypt, + .decrypt = sm4_cbc_decrypt, + }, { + .base = { + .cra_name = "cfb(sm4)", + .cra_driver_name = "cfb-sm4-neon", + .cra_priority = 200, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct sm4_ctx), + .cra_module = THIS_MODULE, + }, + .min_keysize = SM4_KEY_SIZE, + .max_keysize = SM4_KEY_SIZE, + .ivsize = SM4_BLOCK_SIZE, + .chunksize = SM4_BLOCK_SIZE, + .setkey = sm4_setkey, + .encrypt = sm4_cfb_encrypt, + .decrypt = sm4_cfb_decrypt, + }, { + .base = { + .cra_name = "ctr(sm4)", + .cra_driver_name = "ctr-sm4-neon", + .cra_priority = 200, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct sm4_ctx), + .cra_module = THIS_MODULE, + }, + .min_keysize = SM4_KEY_SIZE, + .max_keysize = SM4_KEY_SIZE, + .ivsize = SM4_BLOCK_SIZE, + .chunksize = SM4_BLOCK_SIZE, + .setkey = sm4_setkey, + .encrypt = sm4_ctr_crypt, + .decrypt = sm4_ctr_crypt, + } +}; + +static int __init sm4_init(void) +{ + return crypto_register_skciphers(sm4_algs, ARRAY_SIZE(sm4_algs)); +} + +static void __exit sm4_exit(void) +{ + crypto_unregister_skciphers(sm4_algs, ARRAY_SIZE(sm4_algs)); +} + +module_init(sm4_init); +module_exit(sm4_exit); + +MODULE_DESCRIPTION("SM4 ECB/CBC/CFB/CTR using ARMv8 NEON"); +MODULE_ALIAS_CRYPTO("sm4-neon"); +MODULE_ALIAS_CRYPTO("sm4"); +MODULE_ALIAS_CRYPTO("ecb(sm4)"); +MODULE_ALIAS_CRYPTO("cbc(sm4)"); +MODULE_ALIAS_CRYPTO("cfb(sm4)"); +MODULE_ALIAS_CRYPTO("ctr(sm4)"); +MODULE_AUTHOR("Tianjia Zhang "); +MODULE_LICENSE("GPL v2"); diff --git a/arch/arm64/include/asm/arch_gicv3.h b/arch/arm64/include/asm/arch_gicv3.h index 12aced900ada4ef79609fb8062285e093c8a69a3..80714a8589a03e8ccb4fef277a57031a0df6a3b8 100644 --- a/arch/arm64/include/asm/arch_gicv3.h +++ b/arch/arm64/include/asm/arch_gicv3.h @@ -191,9 +191,8 @@ static inline void gic_arch_disable_irqs(void) static inline void gic_arch_restore_irqs(unsigned long flags) { - if (gic_supports_nmi()) - asm volatile ("msr daif, %0" : : "r" (flags >> 32) - : "memory"); + if (gic_supports_nmi() && !(flags & GIC_PRIO_PSR_I_SET)) + gic_arch_enable_irqs(); } #endif /* __ASSEMBLY__ */ #endif /* __ASM_ARCH_GICV3_H */ diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index c7b9b859a8607908574b0bf6fbfed441d7999359..ef5e60d6d57709e7be6b1208efdfe700ae180670 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -106,6 +106,13 @@ hint #20 .endm +/* + * Clear Branch History instruction + */ + .macro clearbhb + hint #22 + .endm + /* * Speculation barrier */ @@ -763,4 +770,30 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU #endif /* GNU_PROPERTY_AARCH64_FEATURE_1_DEFAULT */ + .macro __mitigate_spectre_bhb_loop tmp +#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY +alternative_cb spectre_bhb_patch_loop_iter + mov \tmp, #32 // Patched to correct the immediate +alternative_cb_end +.Lspectre_bhb_loop\@: + b . + 4 + subs \tmp, \tmp, #1 + b.ne .Lspectre_bhb_loop\@ + sb +#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */ + .endm + + /* Save/restores x0-x3 to the stack */ + .macro __mitigate_spectre_bhb_fw +#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY + stp x0, x1, [sp, #-16]! + stp x2, x3, [sp, #-16]! + mov w0, #ARM_SMCCC_ARCH_WORKAROUND_3 +alternative_cb smccc_patch_fw_mitigation_conduit + nop // Patched to SMC/HVC #0 +alternative_cb_end + ldp x2, x3, [sp], #16 + ldp x0, x1, [sp], #16 +#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */ + .endm #endif /* __ASM_ASSEMBLER_H */ diff --git a/arch/arm64/include/asm/brk-imm.h b/arch/arm64/include/asm/brk-imm.h index ec7720dbe2c801bc44344d7893cfc510fed7d0d7..1ac8bc293ea2cb85a703ef76529bd879fdd48baa 100644 --- a/arch/arm64/include/asm/brk-imm.h +++ b/arch/arm64/include/asm/brk-imm.h @@ -21,6 +21,7 @@ #define KPROBES_BRK_IMM 0x004 #define UPROBES_BRK_IMM 0x005 #define KPROBES_BRK_SS_IMM 0x006 +#define KLP_BRK_IMM 0x007 #define FAULT_BRK_IMM 0x100 #define KGDB_DYN_DBG_BRK_IMM 0x400 #define KGDB_COMPILED_DBG_BRK_IMM 0x401 diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h index cf8e78585865a2f369129a81a4791b0c48e9db63..f7e1d1bb81727f349f1fac2899fc12cc43f1d3b1 100644 --- a/arch/arm64/include/asm/cache.h +++ b/arch/arm64/include/asm/cache.h @@ -31,6 +31,12 @@ #define L1_CACHE_SHIFT (6) #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) +#ifdef CONFIG_ARCH_LLC_128_LINE_SIZE +#ifndef ____cacheline_aligned_128 +#define ____cacheline_aligned_128 __attribute__((__aligned__(128))) +#endif +#endif + #define CLIDR_LOUU_SHIFT 27 #define CLIDR_LOC_SHIFT 24 diff --git a/arch/arm64/include/asm/cpu.h b/arch/arm64/include/asm/cpu.h index 7faae6ff3ab4d5c27896305d609494cdb13d54e6..24ed6643da2660d7f2ae0381e7690942680771d4 100644 --- a/arch/arm64/include/asm/cpu.h +++ b/arch/arm64/include/asm/cpu.h @@ -25,6 +25,7 @@ struct cpuinfo_arm64 { u64 reg_id_aa64dfr1; u64 reg_id_aa64isar0; u64 reg_id_aa64isar1; + u64 reg_id_aa64isar2; u64 reg_id_aa64mmfr0; u64 reg_id_aa64mmfr1; u64 reg_id_aa64mmfr2; diff --git a/arch/arm64/include/asm/cpu_park.h b/arch/arm64/include/asm/cpu_park.h new file mode 100644 index 0000000000000000000000000000000000000000..0aa4ebf6f8303a1ecbcd23974d342476b3200c79 --- /dev/null +++ b/arch/arm64/include/asm/cpu_park.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef __ASM_CPU_PARK_H +#define __ASM_CPU_PARK_H + +#ifdef CONFIG_ARM64_CPU_PARK + +/* CPU park state flag: "park" */ +#define PARK_MAGIC 0x7061726b + +#ifndef __ASSEMBLY__ +extern void enter_cpu_park(unsigned long text, unsigned long exit); +extern void do_cpu_park(unsigned long exit); +extern void reserve_park_mem(void); +extern int write_park_exit(unsigned int cpu); +extern int uninstall_cpu_park(unsigned int cpu); +extern void cpu_park_stop(void); +extern int kexec_smp_send_park(void); +#endif /* ifndef __ASSEMBLY__ */ + +#else +static inline void reserve_park_mem(void) {} +static inline int write_park_exit(unsigned int cpu) { return -EINVAL; } +static inline int uninstall_cpu_park(unsigned int cpu) { return -EINVAL; } +static inline void cpu_park_stop(void) {} +static inline int kexec_smp_send_park(void) { return -EINVAL; } +#endif + +#endif /* ifndef __ASM_CPU_PARK_H */ diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h index 836d716f2bf2635a211f531ced6ac10b3126a3c0..3ffa6108c96dbfeb86146270d9ddea7c7b7afc02 100644 --- a/arch/arm64/include/asm/cpucaps.h +++ b/arch/arm64/include/asm/cpucaps.h @@ -71,6 +71,7 @@ #define ARM64_WORKAROUND_HISILICON_1980005 63 #define ARM64_HAS_ECV 64 #define ARM64_HAS_EPAN 65 +#define ARM64_SPECTRE_BHB 66 #define ARM64_NCAPS 80 diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index a55e39cac4f47813844e16e62c4614da5f7bde45..44c856df88484836a71fa943a6b32e34a248cd61 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -608,6 +608,34 @@ static inline bool cpu_supports_mixed_endian_el0(void) return id_aa64mmfr0_mixed_endian_el0(read_cpuid(ID_AA64MMFR0_EL1)); } +static inline bool supports_csv2p3(int scope) +{ + u64 pfr0; + u8 csv2_val; + + if (scope == SCOPE_LOCAL_CPU) + pfr0 = read_sysreg_s(SYS_ID_AA64PFR0_EL1); + else + pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1); + + csv2_val = cpuid_feature_extract_unsigned_field(pfr0, + ID_AA64PFR0_CSV2_SHIFT); + return csv2_val == 3; +} + +static inline bool supports_clearbhb(int scope) +{ + u64 isar2; + + if (scope == SCOPE_LOCAL_CPU) + isar2 = read_sysreg_s(SYS_ID_AA64ISAR2_EL1); + else + isar2 = read_sanitised_ftr_reg(SYS_ID_AA64ISAR2_EL1); + + return cpuid_feature_extract_unsigned_field(isar2, + ID_AA64ISAR2_CLEARBHB_SHIFT); +} + static inline bool system_supports_32bit_el0(void) { return cpus_have_const_cap(ARM64_HAS_32BIT_EL0); diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h index 896c2635c411dcffe195cc2f6b6d309f46b121cf..662708c56397d07acd42054d25f67b85b0986858 100644 --- a/arch/arm64/include/asm/cputype.h +++ b/arch/arm64/include/asm/cputype.h @@ -60,6 +60,7 @@ #define ARM_CPU_IMP_FUJITSU 0x46 #define ARM_CPU_IMP_HISI 0x48 #define ARM_CPU_IMP_PHYTIUM 0x70 +#define ARM_CPU_IMP_APPLE 0x61 #define ARM_CPU_PART_AEM_V8 0xD0F #define ARM_CPU_PART_FOUNDATION 0xD00 @@ -73,6 +74,15 @@ #define ARM_CPU_PART_CORTEX_A76 0xD0B #define ARM_CPU_PART_NEOVERSE_N1 0xD0C #define ARM_CPU_PART_CORTEX_A77 0xD0D +#define ARM_CPU_PART_NEOVERSE_V1 0xD40 +#define ARM_CPU_PART_CORTEX_A78 0xD41 +#define ARM_CPU_PART_CORTEX_A78AE 0xD42 +#define ARM_CPU_PART_CORTEX_X1 0xD44 +#define ARM_CPU_PART_CORTEX_A78C 0xD4B +#define ARM_CPU_PART_CORTEX_A510 0xD46 +#define ARM_CPU_PART_CORTEX_A710 0xD47 +#define ARM_CPU_PART_CORTEX_X2 0xD48 +#define ARM_CPU_PART_NEOVERSE_N2 0xD49 #define APM_CPU_PART_POTENZA 0x000 @@ -107,6 +117,9 @@ #define PHYTIUM_CPU_PART_2004 0X663 #define PHYTIUM_CPU_PART_2500 0X663 +#define APPLE_CPU_PART_M1_ICESTORM 0x022 +#define APPLE_CPU_PART_M1_FIRESTORM 0x023 + #define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53) #define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57) #define MIDR_CORTEX_A72 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A72) @@ -117,6 +130,15 @@ #define MIDR_CORTEX_A76 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A76) #define MIDR_NEOVERSE_N1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N1) #define MIDR_CORTEX_A77 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A77) +#define MIDR_NEOVERSE_V1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V1) +#define MIDR_CORTEX_A78 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78) +#define MIDR_CORTEX_A78AE MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78AE) +#define MIDR_CORTEX_X1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X1) +#define MIDR_CORTEX_A78C MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78C) +#define MIDR_CORTEX_A510 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A510) +#define MIDR_CORTEX_A710 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A710) +#define MIDR_CORTEX_X2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X2) +#define MIDR_NEOVERSE_N2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N2) #define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX) #define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX) #define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX) @@ -141,6 +163,8 @@ #define MIDR_FT_2000PLUS MIDR_CPU_MODEL(ARM_CPU_IMP_PHYTIUM, PHYTIUM_CPU_PART_2000PLUS) #define MIDR_FT_2004 MIDR_CPU_MODEL(ARM_CPU_IMP_PHYTIUM, PHYTIUM_CPU_PART_2004) #define MIDR_FT_2500 MIDR_CPU_MODEL(ARM_CPU_IMP_PHYTIUM, PHYTIUM_CPU_PART_2500) +#define MIDR_APPLE_M1_ICESTORM MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_ICESTORM) +#define MIDR_APPLE_M1_FIRESTORM MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_FIRESTORM) /* Fujitsu Erratum 010001 affects A64FX 1.0 and 1.1, (v0r0 and v1r0) */ #define MIDR_FUJITSU_ERRATUM_010001 MIDR_FUJITSU_A64FX diff --git a/arch/arm64/include/asm/debug-monitors.h b/arch/arm64/include/asm/debug-monitors.h index 657c921fd784a7c12a0e45d814c6bc089f9ca0ec..bc015465ecd29f1b239e064d5f8ffac6865ec66b 100644 --- a/arch/arm64/include/asm/debug-monitors.h +++ b/arch/arm64/include/asm/debug-monitors.h @@ -56,6 +56,8 @@ #define BRK64_OPCODE_KPROBES_SS (AARCH64_BREAK_MON | (KPROBES_BRK_SS_IMM << 5)) /* uprobes BRK opcodes with ESR encoding */ #define BRK64_OPCODE_UPROBES (AARCH64_BREAK_MON | (UPROBES_BRK_IMM << 5)) +/* klp BRK opcodes with ESR encoding */ +#define BRK64_OPCODE_KLP (AARCH64_BREAK_MON | (KLP_BRK_IMM << 5)) /* AArch32 */ #define DBG_ESR_EVT_BKPT 0x4 diff --git a/arch/arm64/include/asm/exception.h b/arch/arm64/include/asm/exception.h index 0756191f44f644b9a3153dd584cda74ecc75dd4e..731cf01d9296025be3e3936402ea0c9da5fc7034 100644 --- a/arch/arm64/include/asm/exception.h +++ b/arch/arm64/include/asm/exception.h @@ -19,6 +19,19 @@ #define __exception_irq_entry __kprobes #endif +#ifdef CONFIG_ARM64_UCE_KERNEL_RECOVERY +bool arm64_process_kernel_sea(unsigned long addr, unsigned int esr, + struct pt_regs *regs, int sig, + int code, void __user *siaddr); +#else +static inline bool arm64_process_kernel_sea(unsigned long addr, unsigned int esr, + struct pt_regs *regs, int sig, + int code, void __user *siaddr) +{ + return false; +} +#endif + static inline u32 disr_to_esr(u64 disr) { unsigned int esr = ESR_ELx_EC_SERROR << ESR_ELx_EC_SHIFT; diff --git a/arch/arm64/include/asm/fixmap.h b/arch/arm64/include/asm/fixmap.h index 4335800201c97ad1d22bc61ec2a3b12f498f2377..daff882883f92c956775b3a8cf15f560f2de2d56 100644 --- a/arch/arm64/include/asm/fixmap.h +++ b/arch/arm64/include/asm/fixmap.h @@ -62,9 +62,11 @@ enum fixed_addresses { #endif /* CONFIG_ACPI_APEI_GHES */ #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 + FIX_ENTRY_TRAMP_TEXT3, + FIX_ENTRY_TRAMP_TEXT2, + FIX_ENTRY_TRAMP_TEXT1, FIX_ENTRY_TRAMP_DATA, - FIX_ENTRY_TRAMP_TEXT, -#define TRAMP_VALIAS (__fix_to_virt(FIX_ENTRY_TRAMP_TEXT)) +#define TRAMP_VALIAS (__fix_to_virt(FIX_ENTRY_TRAMP_TEXT1)) #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */ __end_of_permanent_fixed_addresses, diff --git a/arch/arm64/include/asm/hwcap.h b/arch/arm64/include/asm/hwcap.h index be9cb527b309e86a31608850898c85ac35411626..b2cb230ef21eb0909bb32d0c716da99341f2c147 100644 --- a/arch/arm64/include/asm/hwcap.h +++ b/arch/arm64/include/asm/hwcap.h @@ -106,6 +106,8 @@ #define KERNEL_HWCAP_BTI __khwcap2_feature(BTI) #define KERNEL_HWCAP_MTE __khwcap2_feature(MTE) #define KERNEL_HWCAP_ECV __khwcap2_feature(ECV) +#define KERNEL_HWCAP_AFP __khwcap2_feature(AFP) +#define KERNEL_HWCAP_RPRES __khwcap2_feature(RPRES) /* * This yields a mask that user programs can use to figure out what diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h index 4ebb9c054cccd7fa9c70a47d3065dad6de0842ca..c0973345e6e1d582ff3261f3e2d61a6e6da4706a 100644 --- a/arch/arm64/include/asm/insn.h +++ b/arch/arm64/include/asm/insn.h @@ -64,6 +64,7 @@ enum aarch64_insn_hint_cr_op { AARCH64_INSN_HINT_PSB = 0x11 << 5, AARCH64_INSN_HINT_TSB = 0x12 << 5, AARCH64_INSN_HINT_CSDB = 0x14 << 5, + AARCH64_INSN_HINT_CLEARBHB = 0x16 << 5, AARCH64_INSN_HINT_BTI = 0x20 << 5, AARCH64_INSN_HINT_BTIC = 0x22 << 5, diff --git a/arch/arm64/include/asm/kexec.h b/arch/arm64/include/asm/kexec.h index 20bee23b6503787e2ad10de5e5dcbc8bc6cccbd6..d797dbab3aad45f59842bc2fbbeda69605d21f59 100644 --- a/arch/arm64/include/asm/kexec.h +++ b/arch/arm64/include/asm/kexec.h @@ -32,11 +32,6 @@ #define CRASH_ADDR_HIGH_MAX MEMBLOCK_ALLOC_ACCESSIBLE -#ifdef CONFIG_ARM64_CPU_PARK -/* CPU park state flag: "park" */ -#define PARK_MAGIC 0x7061726b -#endif - #ifndef __ASSEMBLY__ /** @@ -102,8 +97,11 @@ static inline void crash_prepare_suspend(void) {} static inline void crash_post_resume(void) {} #endif +extern bool crash_low_mem_page_map; + #ifdef CONFIG_KEXEC_CORE extern void __init reserve_crashkernel(void); +extern void __init reserve_crashkernel_high(void); #endif void machine_kexec_mask_interrupts(void); diff --git a/arch/arm64/include/asm/kfence.h b/arch/arm64/include/asm/kfence.h index 322e95bc228db1cc98f437d361475eb00da40615..64d7cbfe067baf84ebf9b8b403ee03587338bda1 100644 --- a/arch/arm64/include/asm/kfence.h +++ b/arch/arm64/include/asm/kfence.h @@ -10,73 +10,11 @@ #include #include -#include - -static inline int split_pud_page(pud_t *pud, unsigned long addr) -{ - int i; - pmd_t *pmd = pmd_alloc_one(&init_mm, addr); - unsigned long pfn = PFN_DOWN(__pa(addr)); - - if (!pmd) - return -ENOMEM; - - for (i = 0; i < PTRS_PER_PMD; i++) - set_pmd(pmd + i, pmd_mkhuge(pfn_pmd(pfn + i * PTRS_PER_PTE, PAGE_KERNEL))); - - smp_wmb(); /* See comment in __pte_alloc */ - pud_populate(&init_mm, pud, pmd); - - flush_tlb_kernel_range(addr, addr + PUD_SIZE); - return 0; -} - -static inline int split_pmd_page(pmd_t *pmd, unsigned long addr) -{ - int i; - pte_t *pte = pte_alloc_one_kernel(&init_mm); - unsigned long pfn = PFN_DOWN(__pa(addr)); - - if (!pte) - return -ENOMEM; - - for (i = 0; i < PTRS_PER_PTE; i++) - set_pte(pte + i, pfn_pte(pfn + i, PAGE_KERNEL)); - - smp_wmb(); /* See comment in __pte_alloc */ - pmd_populate_kernel(&init_mm, pmd, pte); - - flush_tlb_kernel_range(addr, addr + PMD_SIZE); - return 0; -} static inline bool arch_kfence_init_pool(void) { - unsigned long addr; - pgd_t *pgd; - p4d_t *p4d; - pud_t *pud; - pmd_t *pmd; + memset(__kfence_pool, 0, KFENCE_POOL_SIZE); - for (addr = (unsigned long)__kfence_pool; is_kfence_address((void *)addr); - addr += PAGE_SIZE) { - pgd = pgd_offset(&init_mm, addr); - if (pgd_leaf(*pgd)) - return false; - p4d = p4d_offset(pgd, addr); - if (p4d_leaf(*p4d)) - return false; - pud = pud_offset(p4d, addr); - if (pud_leaf(*pud)) { - if (split_pud_page(pud, addr & PUD_MASK)) - return false; - } - pmd = pmd_offset(pud, addr); - if (pmd_leaf(*pmd)) { - if (split_pmd_page(pmd, addr & PMD_MASK)) - return false; - } - } return true; } diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index 044bb9e2cd74f72347743f04e8d91b486b401dfb..ada24a20a5671db51522e6fcdbdd9e7c3b3edf59 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@ -35,6 +35,9 @@ #define KVM_VECTOR_PREAMBLE (2 * AARCH64_INSN_SIZE) #define __SMCCC_WORKAROUND_1_SMC_SZ 36 +#define __SMCCC_WORKAROUND_3_SMC_SZ 36 +#define __SPECTRE_BHB_LOOP_SZ 44 +#define __SPECTRE_BHB_CLEARBHB_SZ 12 #define KVM_HOST_SMCCC_ID(id) \ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ @@ -199,6 +202,11 @@ extern void __vgic_v3_init_lrs(void); extern u32 __kvm_get_mdcr_el2(void); extern char __smccc_workaround_1_smc[__SMCCC_WORKAROUND_1_SMC_SZ]; +extern char __smccc_workaround_3_smc[__SMCCC_WORKAROUND_3_SMC_SZ]; +extern char __spectre_bhb_loop_k8[__SPECTRE_BHB_LOOP_SZ]; +extern char __spectre_bhb_loop_k24[__SPECTRE_BHB_LOOP_SZ]; +extern char __spectre_bhb_loop_k32[__SPECTRE_BHB_LOOP_SZ]; +extern char __spectre_bhb_clearbhb[__SPECTRE_BHB_LOOP_SZ]; /* * Obtain the PC-relative address of a kernel symbol diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index da041664602b4383cb12ee1ec9ebb78c66a00350..1dd256648428151f365257b5469fa917ab4722e3 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -237,7 +237,8 @@ static inline void *kvm_get_hyp_vector(void) void *vect = kern_hyp_va(kvm_ksym_ref(__kvm_hyp_vector)); int slot = -1; - if (cpus_have_const_cap(ARM64_SPECTRE_V2) && data->fn) { + if ((cpus_have_const_cap(ARM64_SPECTRE_V2) || + cpus_have_const_cap(ARM64_SPECTRE_BHB)) && data->template_start) { vect = kern_hyp_va(kvm_ksym_ref(__bp_harden_hyp_vecs)); slot = data->hyp_vectors_slot; } diff --git a/arch/arm64/include/asm/livepatch.h b/arch/arm64/include/asm/livepatch.h index a9bc7ce4cc6ed2270c34e566e6b7a42e45c929fd..bcb6c4081978f749a74e40b7c985bc1caa9c2848 100644 --- a/arch/arm64/include/asm/livepatch.h +++ b/arch/arm64/include/asm/livepatch.h @@ -48,20 +48,28 @@ int klp_check_calltrace(struct klp_patch *patch, int enable); #error Live patching support is disabled; check CONFIG_LIVEPATCH #endif - #if defined(CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY) +#ifdef CONFIG_ARM64_MODULE_PLTS #define LJMP_INSN_SIZE 4 +#else +#define LJMP_INSN_SIZE 1 +#endif /* CONFIG_ARM64_MODULE_PLTS */ struct arch_klp_data { -#ifdef CONFIG_ARM64_MODULE_PLTS u32 old_insns[LJMP_INSN_SIZE]; -#else - u32 old_insn; -#endif + + /* + * Saved opcode at the entry of the old func (which maybe replaced + * with breakpoint). + */ + u32 saved_opcode; }; +int arch_klp_add_breakpoint(struct arch_klp_data *arch_data, void *old_func); +void arch_klp_remove_breakpoint(struct arch_klp_data *arch_data, void *old_func); long arch_klp_save_old_code(struct arch_klp_data *arch_data, void *old_func); +int arch_klp_module_check_calltrace(void *data); #endif diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h index c7315862e2435945e4ed71949b1c7ec4379fae00..bc151b7dc042cfd2f78c5d52d0de956df3844d5d 100644 --- a/arch/arm64/include/asm/mmu.h +++ b/arch/arm64/include/asm/mmu.h @@ -67,6 +67,12 @@ typedef void (*bp_hardening_cb_t)(void); struct bp_hardening_data { int hyp_vectors_slot; bp_hardening_cb_t fn; + + /* + * template_start is only used by the BHB mitigation to identify the + * hyp_vectors_slot sequence. + */ + const char *template_start; }; DECLARE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data); diff --git a/arch/arm64/include/asm/module.lds.h b/arch/arm64/include/asm/module.lds.h index 810045628c66e7588da28fb9208b6882e760b41d..0522337d600a7c4957874372c74849a8bc2ce84f 100644 --- a/arch/arm64/include/asm/module.lds.h +++ b/arch/arm64/include/asm/module.lds.h @@ -1,7 +1,7 @@ #ifdef CONFIG_ARM64_MODULE_PLTS SECTIONS { - .plt 0 (NOLOAD) : { BYTE(0) } - .init.plt 0 (NOLOAD) : { BYTE(0) } - .text.ftrace_trampoline 0 (NOLOAD) : { BYTE(0) } + .plt 0 : { BYTE(0) } + .init.plt 0 : { BYTE(0) } + .text.ftrace_trampoline 0 : { BYTE(0) } } #endif diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index 8e6f1af816c966a6d24bde62ac73f65cf6d7006a..617cdd40c61b0e931cee6b01849a8fc35a0fb791 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -49,7 +49,7 @@ #define TASK_SIZE_64 (UL(1) << vabits_actual) #ifdef CONFIG_COMPAT -#define TASK_SIZE_MAX (is_compat_task() ? \ +#define TASK_SIZE_MAX (is_ilp32_compat_task() ? \ UL(0x100000000) : (UL(1) << VA_BITS)) #if defined(CONFIG_ARM64_64K_PAGES) && defined(CONFIG_KUSER_HELPERS) /* diff --git a/arch/arm64/include/asm/sections.h b/arch/arm64/include/asm/sections.h index 3994169985efc62b2c84556934d31bd5c8c1cadd..6a45c26da46e351c690c64e2dfb9b76cab2a7505 100644 --- a/arch/arm64/include/asm/sections.h +++ b/arch/arm64/include/asm/sections.h @@ -19,4 +19,9 @@ extern char __irqentry_text_start[], __irqentry_text_end[]; extern char __mmuoff_data_start[], __mmuoff_data_end[]; extern char __entry_tramp_text_start[], __entry_tramp_text_end[]; +static inline size_t entry_tramp_text_size(void) +{ + return __entry_tramp_text_end - __entry_tramp_text_start; +} + #endif /* __ASM_SECTIONS_H */ diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h index 8c5d2d650b8aa68f1fa0cba1563e9163aa086836..f4b19b8f323ab47d091c27a951f3e5395267871e 100644 --- a/arch/arm64/include/asm/smp.h +++ b/arch/arm64/include/asm/smp.h @@ -145,21 +145,7 @@ bool cpus_are_stuck_in_kernel(void); extern void crash_smp_send_stop(void); extern bool smp_crash_stop_failed(void); -#ifdef CONFIG_ARM64_CPU_PARK -#define PARK_SECTION_SIZE 1024 -struct cpu_park_info { - /* Physical address of reserved park memory. */ - unsigned long start; - /* park reserve mem len should be PARK_SECTION_SIZE * NR_CPUS */ - unsigned long len; - /* Virtual address of reserved park memory. */ - unsigned long start_v; -}; -extern struct cpu_park_info park_info; -extern void enter_cpu_park(unsigned long text, unsigned long exit); -extern void do_cpu_park(unsigned long exit); -extern int kexec_smp_send_park(void); -#endif +extern void smp_cross_send_stop(cpumask_t *cpumask); #endif /* ifndef __ASSEMBLY__ */ diff --git a/arch/arm64/include/asm/spectre.h b/arch/arm64/include/asm/spectre.h index fcdfbce302bdfb9fec12cef198139b4814e9642d..4b3a5f050f71f946f8a8143733b99b1ee2ef75db 100644 --- a/arch/arm64/include/asm/spectre.h +++ b/arch/arm64/include/asm/spectre.h @@ -29,4 +29,8 @@ bool has_spectre_v4(const struct arm64_cpu_capabilities *cap, int scope); void spectre_v4_enable_mitigation(const struct arm64_cpu_capabilities *__unused); void spectre_v4_enable_task_mitigation(struct task_struct *tsk); +enum mitigation_state arm64_get_spectre_bhb_state(void); +bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry, int scope); +u8 spectre_bhb_loop_affected(int scope); +void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *__unused); #endif /* __ASM_SPECTRE_H */ diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index fad9ee4f4eba7de3fc0b0ca8541342ddf6f897a9..098247c4c6685aa12d37f978dd41a92f4aada6f4 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -179,6 +179,7 @@ #define SYS_ID_AA64ISAR0_EL1 sys_reg(3, 0, 0, 6, 0) #define SYS_ID_AA64ISAR1_EL1 sys_reg(3, 0, 0, 6, 1) +#define SYS_ID_AA64ISAR2_EL1 sys_reg(3, 0, 0, 6, 2) #define SYS_ID_AA64MMFR0_EL1 sys_reg(3, 0, 0, 7, 0) #define SYS_ID_AA64MMFR1_EL1 sys_reg(3, 0, 0, 7, 1) @@ -705,6 +706,21 @@ #define ID_AA64ISAR1_GPI_NI 0x0 #define ID_AA64ISAR1_GPI_IMP_DEF 0x1 +/* id_aa64isar2 */ +#define ID_AA64ISAR2_CLEARBHB_SHIFT 28 +#define ID_AA64ISAR2_RPRES_SHIFT 4 +#define ID_AA64ISAR2_WFXT_SHIFT 0 + +#define ID_AA64ISAR2_RPRES_8BIT 0x0 +#define ID_AA64ISAR2_RPRES_12BIT 0x1 +/* + * Value 0x1 has been removed from the architecture, and is + * reserved, but has not yet been removed from the ARM ARM + * as of ARM DDI 0487G.b. + */ +#define ID_AA64ISAR2_WFXT_NI 0x0 +#define ID_AA64ISAR2_WFXT_SUPPORTED 0x2 + /* id_aa64pfr0 */ #define ID_AA64PFR0_CSV3_SHIFT 60 #define ID_AA64PFR0_CSV2_SHIFT 56 @@ -805,6 +821,8 @@ #endif /* id_aa64mmfr1 */ +#define ID_AA64MMFR1_ECBHB_SHIFT 60 +#define ID_AA64MMFR1_AFP_SHIFT 44 #define ID_AA64MMFR1_ETS_SHIFT 36 #define ID_AA64MMFR1_TWED_SHIFT 32 #define ID_AA64MMFR1_XNX_SHIFT 28 diff --git a/arch/arm64/include/asm/vectors.h b/arch/arm64/include/asm/vectors.h new file mode 100644 index 0000000000000000000000000000000000000000..bc9a2145f4194e5d8bff099f36a9c1e9a568df91 --- /dev/null +++ b/arch/arm64/include/asm/vectors.h @@ -0,0 +1,73 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2022 ARM Ltd. + */ +#ifndef __ASM_VECTORS_H +#define __ASM_VECTORS_H + +#include +#include + +#include + +extern char vectors[]; +extern char tramp_vectors[]; +extern char __bp_harden_el1_vectors[]; + +/* + * Note: the order of this enum corresponds to two arrays in entry.S: + * tramp_vecs and __bp_harden_el1_vectors. By default the canonical + * 'full fat' vectors are used directly. + */ +enum arm64_bp_harden_el1_vectors { +#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY + /* + * Perform the BHB loop mitigation, before branching to the canonical + * vectors. + */ + EL1_VECTOR_BHB_LOOP, + + /* + * Make the SMC call for firmware mitigation, before branching to the + * canonical vectors. + */ + EL1_VECTOR_BHB_FW, + + /* + * Use the ClearBHB instruction, before branching to the canonical + * vectors. + */ + EL1_VECTOR_BHB_CLEAR_INSN, +#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */ + + /* + * Remap the kernel before branching to the canonical vectors. + */ + EL1_VECTOR_KPTI, +}; + +#ifndef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY +#define EL1_VECTOR_BHB_LOOP -1 +#define EL1_VECTOR_BHB_FW -1 +#define EL1_VECTOR_BHB_CLEAR_INSN -1 +#endif /* !CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */ + +/* The vectors to use on return from EL0. e.g. to remap the kernel */ +DECLARE_PER_CPU_READ_MOSTLY(const char *, this_cpu_vector); + +#ifndef CONFIG_UNMAP_KERNEL_AT_EL0 +#define TRAMP_VALIAS 0ul +#endif + +static inline const char * +arm64_get_bp_hardening_vector(enum arm64_bp_harden_el1_vectors slot) +{ + if (arm64_kernel_unmapped_at_el0()) + return (char *)(TRAMP_VALIAS + SZ_2K * slot); + + WARN_ON_ONCE(slot == EL1_VECTOR_KPTI); + + return __bp_harden_el1_vectors + SZ_2K * slot; +} + +#endif /* __ASM_VECTORS_H */ diff --git a/arch/arm64/include/uapi/asm/hwcap.h b/arch/arm64/include/uapi/asm/hwcap.h index 7b23b16f21ce39ff25595920e3d561f292c623e5..f03731847d9dfdbed849baceafb61f91745bd1cc 100644 --- a/arch/arm64/include/uapi/asm/hwcap.h +++ b/arch/arm64/include/uapi/asm/hwcap.h @@ -76,5 +76,7 @@ #define HWCAP2_BTI (1 << 17) #define HWCAP2_MTE (1 << 18) #define HWCAP2_ECV (1 << 19) +#define HWCAP2_AFP (1 << 20) +#define HWCAP2_RPRES (1 << 21) #endif /* _UAPI__ASM_HWCAP_H */ diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h index 1c17c3a24411d6b0dfd33424d1b493d4750d0221..531ff62e82e959636b32e3d51da1e4d6cfd2c275 100644 --- a/arch/arm64/include/uapi/asm/kvm.h +++ b/arch/arm64/include/uapi/asm/kvm.h @@ -273,6 +273,11 @@ struct kvm_vcpu_events { #define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED 3 #define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED (1U << 4) +#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3 KVM_REG_ARM_FW_REG(3) +#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_NOT_AVAIL 0 +#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_AVAIL 1 +#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_NOT_REQUIRED 2 + /* SVE registers */ #define KVM_REG_ARM64_SVE (0x15 << KVM_REG_ARM_COPROC_SHIFT) diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index 169d90f11cf55accf7c770ea8f5ea27eb9515f35..4cf75b247461ec1487696f97bc8004a7c9ad5de9 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile @@ -56,7 +56,7 @@ obj-$(CONFIG_RANDOMIZE_BASE) += kaslr.o obj-$(CONFIG_HIBERNATION) += hibernate.o hibernate-asm.o obj-$(CONFIG_KEXEC_CORE) += machine_kexec.o relocate_kernel.o \ cpu-reset.o -obj-$(CONFIG_ARM64_CPU_PARK) += cpu-park.o +obj-$(CONFIG_ARM64_CPU_PARK) += cpu-park.o arm64_cpu_park.o obj-$(CONFIG_KEXEC_FILE) += machine_kexec_file.o kexec_image.o obj-$(CONFIG_ARM64_RELOC_TEST) += arm64-reloc-test.o arm64-reloc-test-y := reloc_test_core.o reloc_test_syms.o diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c index 73039949b5ce2f6227f11d0d1591967c38bda8f6..5f8e4c2df53ccc9717d147485be606504abc8407 100644 --- a/arch/arm64/kernel/alternative.c +++ b/arch/arm64/kernel/alternative.c @@ -41,7 +41,7 @@ bool alternative_is_applied(u16 cpufeature) /* * Check if the target PC is within an alternative block. */ -static bool branch_insn_requires_update(struct alt_instr *alt, unsigned long pc) +static __always_inline bool branch_insn_requires_update(struct alt_instr *alt, unsigned long pc) { unsigned long replptr = (unsigned long)ALT_REPL_PTR(alt); return !(pc >= replptr && pc <= (replptr + alt->alt_len)); @@ -49,7 +49,7 @@ static bool branch_insn_requires_update(struct alt_instr *alt, unsigned long pc) #define align_down(x, a) ((unsigned long)(x) & ~(((unsigned long)(a)) - 1)) -static u32 get_alt_insn(struct alt_instr *alt, __le32 *insnptr, __le32 *altinsnptr) +static __always_inline u32 get_alt_insn(struct alt_instr *alt, __le32 *insnptr, __le32 *altinsnptr) { u32 insn; @@ -94,7 +94,7 @@ static u32 get_alt_insn(struct alt_instr *alt, __le32 *insnptr, __le32 *altinsnp return insn; } -static void patch_alternative(struct alt_instr *alt, +static noinstr void patch_alternative(struct alt_instr *alt, __le32 *origptr, __le32 *updptr, int nr_inst) { __le32 *replptr; diff --git a/arch/arm64/kernel/arm64_cpu_park.c b/arch/arm64/kernel/arm64_cpu_park.c new file mode 100644 index 0000000000000000000000000000000000000000..c54ffa26a2c793d78136b6bf30c3d2b37123a350 --- /dev/null +++ b/arch/arm64/kernel/arm64_cpu_park.c @@ -0,0 +1,289 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#define pr_fmt(fmt) "arm64 cpu-park: " fmt + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#define PARK_SECTION_SIZE 1024 + +struct cpu_park_section { + unsigned long exit; /* exit address of park look */ + unsigned long magic; /* maigc represent park state */ + char text[0]; /* text section of park */ +}; + +struct cpu_park_info { + /* Physical address of reserved park memory. */ + unsigned long start; + /* park reserve mem len should be PARK_SECTION_SIZE * NR_CPUS */ + unsigned long len; + /* Virtual address of reserved park memory. */ + unsigned long start_v; +}; + +static struct cpu_park_info park_info = { + .start = 0, + .len = PARK_SECTION_SIZE * NR_CPUS, + .start_v = 0, +}; + +static int __init parse_park_mem(char *p) +{ + if (!p) + return 0; + + park_info.start = PAGE_ALIGN(memparse(p, NULL)); + if (park_info.start == 0) + pr_info("cpu park mem params[%s]", p); + + return 0; +} +early_param("cpuparkmem", parse_park_mem); + +void __init reserve_park_mem(void) +{ + if (park_info.start == 0 || park_info.len == 0) + return; + + park_info.start = PAGE_ALIGN(park_info.start); + park_info.len = PAGE_ALIGN(park_info.len); + + if (!memblock_is_region_memory(park_info.start, park_info.len)) { + pr_warn("region is not memory!"); + goto out; + } + + if (memblock_is_region_reserved(park_info.start, park_info.len)) { + pr_warn("region overlaps reserved memory!"); + goto out; + } + + memblock_remove(park_info.start, park_info.len); + pr_info("mem reserved: 0x%016lx - 0x%016lx (%ld MB)", + park_info.start, park_info.start + park_info.len, + park_info.len >> 20); + + return; +out: + park_info.start = 0; + park_info.len = 0; + return; +} + +static int mmap_cpu_park_mem(void) +{ + if (!park_info.start) + return -ENOMEM; + + if (park_info.start_v) + return 0; + + park_info.start_v = (unsigned long)__ioremap(park_info.start, + park_info.len, + PAGE_KERNEL_EXEC); + if (!park_info.start_v) { + pr_warn("map park memory failed."); + return -ENOMEM; + } + + return 0; +} + +static inline unsigned long cpu_park_section_v(unsigned int cpu) +{ + return park_info.start_v + PARK_SECTION_SIZE * (cpu - 1); +} + +static inline unsigned long cpu_park_section_p(unsigned int cpu) +{ + return park_info.start + PARK_SECTION_SIZE * (cpu - 1); +} + +/* + * Write the secondary_entry to exit section of park state. + * Then the secondary cpu will jump straight into the kernel + * by the secondary_entry. + */ +int write_park_exit(unsigned int cpu) +{ + struct cpu_park_section *park_section; + unsigned long *park_exit; + unsigned long *park_text; + + if (mmap_cpu_park_mem() != 0) + return -EPERM; + + park_section = (struct cpu_park_section *)cpu_park_section_v(cpu); + park_exit = &park_section->exit; + park_text = (unsigned long *)park_section->text; + pr_debug("park_text 0x%lx : 0x%lx, do_cpu_park text 0x%lx : 0x%lx", + (unsigned long)park_text, *park_text, + (unsigned long)do_cpu_park, + *(unsigned long *)do_cpu_park); + + /* + * Test first 8 bytes to determine + * whether needs to write cpu park exit. + */ + if (*park_text == *(unsigned long *)do_cpu_park) { + writeq_relaxed(__pa_symbol(secondary_entry), park_exit); + __flush_dcache_area((__force void *)park_exit, + sizeof(unsigned long)); + flush_icache_range((unsigned long)park_exit, + (unsigned long)(park_exit + 1)); + sev(); + dsb(sy); + isb(); + + pr_debug("Write cpu %u secondary entry 0x%lx to 0x%lx.", + cpu, *park_exit, (unsigned long)park_exit); + pr_info("Boot cpu %u from PARK state.", cpu); + return 0; + } + + return -EPERM; +} + +/* Install cpu park sections for the specific cpu. */ +static void install_cpu_park(unsigned int cpu) +{ + struct cpu_park_section *park_section; + unsigned long *park_exit; + unsigned long *park_magic; + unsigned long park_text_len; + + park_section = (struct cpu_park_section *)cpu_park_section_v(cpu); + pr_debug("Install cpu park on cpu %u park exit 0x%lx park text 0x%lx", + cpu, (unsigned long)park_section, + (unsigned long)(park_section->text)); + + park_exit = &park_section->exit; + park_magic = &park_section->magic; + park_text_len = PARK_SECTION_SIZE - sizeof(struct cpu_park_section); + + *park_exit = 0UL; + *park_magic = 0UL; + memcpy((void *)park_section->text, do_cpu_park, park_text_len); + __flush_dcache_area((void *)park_section, PARK_SECTION_SIZE); +} + +int uninstall_cpu_park(unsigned int cpu) +{ + unsigned long park_section; + + if (mmap_cpu_park_mem() != 0) + return -EPERM; + + park_section = cpu_park_section_v(cpu); + memset((void *)park_section, 0, PARK_SECTION_SIZE); + __flush_dcache_area((void *)park_section, PARK_SECTION_SIZE); + + return 0; +} + +static int cpu_wait_park(unsigned int cpu) +{ + long timeout; + struct cpu_park_section *park_section; + + volatile unsigned long *park_magic; + + park_section = (struct cpu_park_section *)cpu_park_section_v(cpu); + park_magic = &park_section->magic; + + timeout = USEC_PER_SEC; + while (*park_magic != PARK_MAGIC && timeout--) + udelay(1); + + if (timeout > 0) + pr_debug("cpu %u park done.", cpu); + else + pr_err("cpu %u park failed.", cpu); + + return *park_magic == PARK_MAGIC; +} + +static void cpu_park(unsigned int cpu) +{ + unsigned long park_section_p; + unsigned long park_exit_phy; + unsigned long do_park; + typeof(enter_cpu_park) *park; + + park_section_p = cpu_park_section_p(cpu); + park_exit_phy = park_section_p; + pr_debug("Go to park cpu %u exit address 0x%lx", cpu, park_exit_phy); + + do_park = park_section_p + sizeof(struct cpu_park_section); + park = (void *)__pa_symbol(enter_cpu_park); + + cpu_install_idmap(); + park(do_park, park_exit_phy); + unreachable(); +} + +void cpu_park_stop(void) +{ + int cpu = smp_processor_id(); + const struct cpu_operations *ops = NULL; + /* + * Go to cpu park state. + * Otherwise go to cpu die. + */ + if (kexec_in_progress && park_info.start_v) { + machine_kexec_mask_interrupts(); + cpu_park(cpu); + + ops = get_cpu_ops(cpu); + if (ops && ops->cpu_die) + ops->cpu_die(cpu); + } +} + +int kexec_smp_send_park(void) +{ + unsigned long cpu; + + if (WARN_ON(!kexec_in_progress)) { + pr_crit("%s called not in kexec progress.", __func__); + return -EPERM; + } + + if (mmap_cpu_park_mem() != 0) { + pr_info("no cpuparkmem, goto normal way."); + return -EPERM; + } + + local_irq_disable(); + + if (num_online_cpus() > 1) { + cpumask_t mask; + + cpumask_copy(&mask, cpu_online_mask); + cpumask_clear_cpu(smp_processor_id(), &mask); + + for_each_cpu(cpu, &mask) + install_cpu_park(cpu); + smp_cross_send_stop(&mask); + + /* Wait for other CPUs to park */ + for_each_cpu(cpu, &mask) + cpu_wait_park(cpu); + pr_info("smp park other cpus done\n"); + } + + sdei_mask_local_cpu(); + + return 0; +} diff --git a/arch/arm64/kernel/cpu-park.S b/arch/arm64/kernel/cpu-park.S index 07290dabe10cddfcc64765f15d5d36f8c2c245fe..8bcfcf0dc0f5432aabb92db36e5639e31fa8651d 100644 --- a/arch/arm64/kernel/cpu-park.S +++ b/arch/arm64/kernel/cpu-park.S @@ -11,7 +11,7 @@ #include #include -#include +#include #include #include diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index f78ce1e6dfa4fa82df6fecfb45a3f4145a01020b..56848478671ea022f556cd6797f10065640b4577 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -553,6 +553,13 @@ const struct arm64_cpu_capabilities arm64_errata[] = { .matches = has_spectre_v4, .cpu_enable = spectre_v4_enable_mitigation, }, + { + .desc = "Spectre-BHB", + .capability = ARM64_SPECTRE_BHB, + .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, + .matches = is_spectre_bhb_affected, + .cpu_enable = spectre_bhb_enable_mitigation, + }, #ifdef CONFIG_ARM64_ERRATUM_1418040 { .desc = "ARM erratum 1418040", diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 809dcac24e1822a84eae5db58085e690e68ea5a4..f5ce1e3a532fcc78a3af99a9d685b6b8f9c5bc1a 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -65,11 +65,13 @@ #include #include #include +#include #include #include #include #include #include + #include #include #include @@ -79,6 +81,7 @@ #include #include #include +#include #include /* Kernel representation of AT_HWCAP and AT_HWCAP2 */ @@ -104,6 +107,8 @@ DECLARE_BITMAP(boot_capabilities, ARM64_NPATCHABLE); bool arm64_use_ng_mappings = false; EXPORT_SYMBOL(arm64_use_ng_mappings); +DEFINE_PER_CPU_READ_MOSTLY(const char *, this_cpu_vector) = vectors; + /* * Flag to indicate if we have computed the system wide * capabilities based on the boot time active CPUs. This @@ -201,6 +206,12 @@ static const struct arm64_ftr_bits ftr_id_aa64isar1[] = { ARM64_FTR_END, }; +static const struct arm64_ftr_bits ftr_id_aa64isar2[] = { + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_HIGHER_SAFE, ID_AA64ISAR2_CLEARBHB_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_RPRES_SHIFT, 4, 0), + ARM64_FTR_END, +}; + static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = { ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV3_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV2_SHIFT, 4, 0), @@ -301,6 +312,7 @@ static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = { }; static const struct arm64_ftr_bits ftr_id_aa64mmfr1[] = { + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_AFP_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_ETS_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_TWED_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_XNX_SHIFT, 4, 0), @@ -592,6 +604,7 @@ static const struct __ftr_reg_entry { /* Op1 = 0, CRn = 0, CRm = 6 */ ARM64_FTR_REG(SYS_ID_AA64ISAR0_EL1, ftr_id_aa64isar0), ARM64_FTR_REG(SYS_ID_AA64ISAR1_EL1, ftr_id_aa64isar1), + ARM64_FTR_REG(SYS_ID_AA64ISAR2_EL1, ftr_id_aa64isar2), /* Op1 = 0, CRn = 0, CRm = 7 */ ARM64_FTR_REG(SYS_ID_AA64MMFR0_EL1, ftr_id_aa64mmfr0), @@ -826,6 +839,7 @@ void __init init_cpu_features(struct cpuinfo_arm64 *info) init_cpu_ftr_reg(SYS_ID_AA64DFR1_EL1, info->reg_id_aa64dfr1); init_cpu_ftr_reg(SYS_ID_AA64ISAR0_EL1, info->reg_id_aa64isar0); init_cpu_ftr_reg(SYS_ID_AA64ISAR1_EL1, info->reg_id_aa64isar1); + init_cpu_ftr_reg(SYS_ID_AA64ISAR2_EL1, info->reg_id_aa64isar2); init_cpu_ftr_reg(SYS_ID_AA64MMFR0_EL1, info->reg_id_aa64mmfr0); init_cpu_ftr_reg(SYS_ID_AA64MMFR1_EL1, info->reg_id_aa64mmfr1); init_cpu_ftr_reg(SYS_ID_AA64MMFR2_EL1, info->reg_id_aa64mmfr2); @@ -1054,6 +1068,8 @@ void update_cpu_features(int cpu, info->reg_id_aa64isar0, boot->reg_id_aa64isar0); taint |= check_update_ftr_reg(SYS_ID_AA64ISAR1_EL1, cpu, info->reg_id_aa64isar1, boot->reg_id_aa64isar1); + taint |= check_update_ftr_reg(SYS_ID_AA64ISAR2_EL1, cpu, + info->reg_id_aa64isar2, boot->reg_id_aa64isar2); /* * Differing PARange support is fine as long as all peripherals and @@ -1166,6 +1182,7 @@ static u64 __read_sysreg_by_encoding(u32 sys_id) read_sysreg_case(SYS_ID_AA64MMFR2_EL1); read_sysreg_case(SYS_ID_AA64ISAR0_EL1); read_sysreg_case(SYS_ID_AA64ISAR1_EL1); + read_sysreg_case(SYS_ID_AA64ISAR2_EL1); read_sysreg_case(SYS_CNTFRQ_EL0); read_sysreg_case(SYS_CTR_EL0); @@ -1439,6 +1456,12 @@ kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused) int cpu = smp_processor_id(); + if (__this_cpu_read(this_cpu_vector) == vectors) { + const char *v = arm64_get_bp_hardening_vector(EL1_VECTOR_KPTI); + + __this_cpu_write(this_cpu_vector, v); + } + /* * We don't need to rewrite the page-tables if either we've done * it already or we have KASLR enabled and therefore have not @@ -1792,6 +1815,9 @@ static bool has_mor_nontemporal(const struct arm64_cpu_capabilities *entry) static bool can_clearpage_use_stnp(const struct arm64_cpu_capabilities *entry, int scope) { + if (read_sysreg(dczid_el0) & BIT(DCZID_DZP_SHIFT)) + return true; + return use_clearpage_stnp && has_mor_nontemporal(entry); } @@ -2355,6 +2381,8 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = { HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_MTE_SHIFT, FTR_UNSIGNED, ID_AA64PFR1_MTE, CAP_HWCAP, KERNEL_HWCAP_MTE), #endif /* CONFIG_ARM64_MTE */ HWCAP_CAP(SYS_ID_AA64MMFR0_EL1, ID_AA64MMFR0_ECV_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ECV), + HWCAP_CAP(SYS_ID_AA64MMFR1_EL1, ID_AA64MMFR1_AFP_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_AFP), + HWCAP_CAP(SYS_ID_AA64ISAR2_EL1, ID_AA64ISAR2_RPRES_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_RPRES), {}, }; diff --git a/arch/arm64/kernel/cpuidle.c b/arch/arm64/kernel/cpuidle.c index b512b5503f6e676d7bdb370b854e0bf5177bb84c..d4ff9ae673fa472c6769347507db4d3b9eea02a0 100644 --- a/arch/arm64/kernel/cpuidle.c +++ b/arch/arm64/kernel/cpuidle.c @@ -54,6 +54,9 @@ static int psci_acpi_cpu_init_idle(unsigned int cpu) struct acpi_lpi_state *lpi; struct acpi_processor *pr = per_cpu(processors, cpu); + if (unlikely(!pr || !pr->flags.has_lpi)) + return -EINVAL; + /* * If the PSCI cpu_suspend function hook has not been initialized * idle states must not be enabled, so bail out @@ -61,9 +64,6 @@ static int psci_acpi_cpu_init_idle(unsigned int cpu) if (!psci_ops.cpu_suspend) return -EOPNOTSUPP; - if (unlikely(!pr || !pr->flags.has_lpi)) - return -EINVAL; - count = pr->power.count - 1; if (count <= 0) return -ENODEV; diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c index 50bbce672a3c158eeb912a0186473a477f920993..97dab8f4634f71ac4b884d102fa7bb20bdc4e7e2 100644 --- a/arch/arm64/kernel/cpuinfo.c +++ b/arch/arm64/kernel/cpuinfo.c @@ -95,6 +95,8 @@ static const char *const hwcap_str[] = { [KERNEL_HWCAP_BTI] = "bti", [KERNEL_HWCAP_MTE] = "mte", [KERNEL_HWCAP_ECV] = "ecv", + [KERNEL_HWCAP_AFP] = "afp", + [KERNEL_HWCAP_RPRES] = "rpres", }; #ifdef CONFIG_AARCH32_EL0 @@ -365,6 +367,7 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info) info->reg_id_aa64dfr1 = read_cpuid(ID_AA64DFR1_EL1); info->reg_id_aa64isar0 = read_cpuid(ID_AA64ISAR0_EL1); info->reg_id_aa64isar1 = read_cpuid(ID_AA64ISAR1_EL1); + info->reg_id_aa64isar2 = read_cpuid(ID_AA64ISAR2_EL1); info->reg_id_aa64mmfr0 = read_cpuid(ID_AA64MMFR0_EL1); info->reg_id_aa64mmfr1 = read_cpuid(ID_AA64MMFR1_EL1); info->reg_id_aa64mmfr2 = read_cpuid(ID_AA64MMFR2_EL1); diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 21bcf1e79e8f36520af632e19fa517539c35fb23..64145bfab48f4f7ac44658be0a475b5e04312f2d 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -62,18 +62,21 @@ .macro kernel_ventry, el, label, regsize = 64 .align 7 -#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 +.Lventry_start\@: .if \el == 0 -alternative_if ARM64_UNMAP_KERNEL_AT_EL0 + /* + * This must be the first instruction of the EL0 vector entries. It is + * skipped by the trampoline vectors, to trigger the cleanup. + */ + b .Lskip_tramp_vectors_cleanup\@ .if \regsize == 64 mrs x30, tpidrro_el0 msr tpidrro_el0, xzr .else mov x30, xzr .endif -alternative_else_nop_endif +.Lskip_tramp_vectors_cleanup\@: .endif -#endif sub sp, sp, #S_FRAME_SIZE #ifdef CONFIG_VMAP_STACK @@ -120,11 +123,15 @@ alternative_else_nop_endif mrs x0, tpidrro_el0 #endif b el\()\el\()_\label +.org .Lventry_start\@ + 128 // Did we overflow the ventry slot? .endm - .macro tramp_alias, dst, sym + .macro tramp_alias, dst, sym, tmp mov_q \dst, TRAMP_VALIAS - add \dst, \dst, #(\sym - .entry.tramp.text) + adr_l \tmp, \sym + add \dst, \dst, \tmp + adr_l \tmp, .entry.tramp.text + sub \dst, \dst, \tmp .endm /* @@ -141,7 +148,7 @@ alternative_cb_end tbnz \tmp2, #TIF_SSBD, .L__asm_ssbd_skip\@ mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2 mov w1, #\state -alternative_cb spectre_v4_patch_fw_mitigation_conduit +alternative_cb smccc_patch_fw_mitigation_conduit nop // Patched to SMC/HVC #0 alternative_cb_end .L__asm_ssbd_skip\@: @@ -249,6 +256,7 @@ alternative_else_nop_endif str w21, [sp, #S_SYSCALLNO] .endif +#ifdef CONFIG_ARM64_PSEUDO_NMI /* Save pmr */ alternative_if ARM64_HAS_IRQ_PRIO_MASKING mrs_s x20, SYS_ICC_PMR_EL1 @@ -256,6 +264,7 @@ alternative_if ARM64_HAS_IRQ_PRIO_MASKING mov x20, #GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET msr_s SYS_ICC_PMR_EL1, x20 alternative_else_nop_endif +#endif /* Re-enable tag checking (TCO set on exception entry) */ #ifdef CONFIG_ARM64_MTE @@ -279,6 +288,7 @@ alternative_else_nop_endif disable_daif .endif +#ifdef CONFIG_ARM64_PSEUDO_NMI /* Restore pmr */ alternative_if ARM64_HAS_IRQ_PRIO_MASKING ldr x20, [sp, #S_PMR_SAVE] @@ -288,6 +298,7 @@ alternative_if ARM64_HAS_IRQ_PRIO_MASKING dsb sy // Ensure priority change is seen by redistributor .L__skip_pmr_sync\@: alternative_else_nop_endif +#endif ldp x21, x22, [sp, #S_PC] // load ELR, SPSR @@ -339,21 +350,26 @@ alternative_else_nop_endif ldp x24, x25, [sp, #16 * 12] ldp x26, x27, [sp, #16 * 13] ldp x28, x29, [sp, #16 * 14] - ldr lr, [sp, #S_LR] - add sp, sp, #S_FRAME_SIZE // restore sp .if \el == 0 -alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0 +alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0 + ldr lr, [sp, #S_LR] + add sp, sp, #S_FRAME_SIZE // restore sp + eret +alternative_else_nop_endif #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 bne 4f - msr far_el1, x30 - tramp_alias x30, tramp_exit_native + msr far_el1, x29 + tramp_alias x30, tramp_exit_native, x29 br x30 4: - tramp_alias x30, tramp_exit_compat + tramp_alias x30, tramp_exit_compat, x29 br x30 #endif .else + ldr lr, [sp, #S_LR] + add sp, sp, #S_FRAME_SIZE // restore sp + /* Ensure any device/NC reads complete */ alternative_insn nop, "dmb sy", ARM64_WORKAROUND_1508412 @@ -495,6 +511,7 @@ alternative_endif #ifdef CONFIG_PREEMPTION ldr x24, [tsk, #TSK_TI_PREEMPT] // get preempt count +#ifdef CONFIG_ARM64_PSEUDO_NMI alternative_if ARM64_HAS_IRQ_PRIO_MASKING /* * DA_F were cleared at start of handling. If anything is set in DAIF, @@ -503,6 +520,7 @@ alternative_if ARM64_HAS_IRQ_PRIO_MASKING mrs x0, daif orr x24, x24, x0 alternative_else_nop_endif +#endif cbnz x24, 1f // preempt count != 0 || NMI return path bl arm64_preempt_schedule_irq // irq en/disable is done inside 1: @@ -752,12 +770,6 @@ SYM_CODE_END(ret_to_user) .popsection // .entry.text -#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 -/* - * Exception vectors trampoline. - */ - .pushsection ".entry.tramp.text", "ax" - // Move from tramp_pg_dir to swapper_pg_dir .macro tramp_map_kernel, tmp mrs \tmp, ttbr1_el1 @@ -791,12 +803,47 @@ alternative_else_nop_endif */ .endm - .macro tramp_ventry, regsize = 64 + .macro tramp_data_page dst + adr_l \dst, .entry.tramp.text + sub \dst, \dst, PAGE_SIZE + .endm + + .macro tramp_data_read_var dst, var +#ifdef CONFIG_RANDOMIZE_BASE + tramp_data_page \dst + add \dst, \dst, #:lo12:__entry_tramp_data_\var + ldr \dst, [\dst] +#else + ldr \dst, =\var +#endif + .endm + +#define BHB_MITIGATION_NONE 0 +#define BHB_MITIGATION_LOOP 1 +#define BHB_MITIGATION_FW 2 +#define BHB_MITIGATION_INSN 3 + + .macro tramp_ventry, vector_start, regsize, kpti, bhb .align 7 1: .if \regsize == 64 msr tpidrro_el0, x30 // Restored in kernel_ventry .endif + + .if \bhb == BHB_MITIGATION_LOOP + /* + * This sequence must appear before the first indirect branch. i.e. the + * ret out of tramp_ventry. It appears here because x30 is free. + */ + __mitigate_spectre_bhb_loop x30 + .endif // \bhb == BHB_MITIGATION_LOOP + + .if \bhb == BHB_MITIGATION_INSN + clearbhb + isb + .endif // \bhb == BHB_MITIGATION_INSN + + .if \kpti == 1 /* * Defend against branch aliasing attacks by pushing a dummy * entry onto the return stack and using a RET instruction to @@ -806,46 +853,75 @@ alternative_else_nop_endif b . 2: tramp_map_kernel x30 -#ifdef CONFIG_RANDOMIZE_BASE - adr x30, tramp_vectors + PAGE_SIZE alternative_insn isb, nop, ARM64_WORKAROUND_QCOM_FALKOR_E1003 - ldr x30, [x30] -#else - ldr x30, =vectors -#endif + tramp_data_read_var x30, vectors alternative_if_not ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM - prfm plil1strm, [x30, #(1b - tramp_vectors)] + prfm plil1strm, [x30, #(1b - \vector_start)] alternative_else_nop_endif + msr vbar_el1, x30 - add x30, x30, #(1b - tramp_vectors) isb + .else + ldr x30, =vectors + .endif // \kpti == 1 + + .if \bhb == BHB_MITIGATION_FW + /* + * The firmware sequence must appear before the first indirect branch. + * i.e. the ret out of tramp_ventry. But it also needs the stack to be + * mapped to save/restore the registers the SMC clobbers. + */ + __mitigate_spectre_bhb_fw + .endif // \bhb == BHB_MITIGATION_FW + + add x30, x30, #(1b - \vector_start + 4) ret +.org 1b + 128 // Did we overflow the ventry slot? .endm .macro tramp_exit, regsize = 64 - adr x30, tramp_vectors + tramp_data_read_var x30, this_cpu_vector + this_cpu_offset x29 + ldr x30, [x30, x29] + msr vbar_el1, x30 - tramp_unmap_kernel x30 + ldr lr, [sp, #S_LR] + tramp_unmap_kernel x29 .if \regsize == 64 - mrs x30, far_el1 + mrs x29, far_el1 .endif + add sp, sp, #S_FRAME_SIZE // restore sp eret sb .endm - .align 11 -SYM_CODE_START_NOALIGN(tramp_vectors) + .macro generate_tramp_vector, kpti, bhb +.Lvector_start\@: .space 0x400 - tramp_ventry - tramp_ventry - tramp_ventry - tramp_ventry + .rept 4 + tramp_ventry .Lvector_start\@, 64, \kpti, \bhb + .endr + .rept 4 + tramp_ventry .Lvector_start\@, 32, \kpti, \bhb + .endr + .endm - tramp_ventry 32 - tramp_ventry 32 - tramp_ventry 32 - tramp_ventry 32 +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 +/* + * Exception vectors trampoline. + * The order must match __bp_harden_el1_vectors and the + * arm64_bp_harden_el1_vectors enum. + */ + .pushsection ".entry.tramp.text", "ax" + .align 11 +SYM_CODE_START_NOALIGN(tramp_vectors) +#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY + generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_LOOP + generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_FW + generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_INSN +#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */ + generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_NONE SYM_CODE_END(tramp_vectors) SYM_CODE_START(tramp_exit_native) @@ -862,12 +938,56 @@ SYM_CODE_END(tramp_exit_compat) .pushsection ".rodata", "a" .align PAGE_SHIFT SYM_DATA_START(__entry_tramp_data_start) +__entry_tramp_data_vectors: .quad vectors +#ifdef CONFIG_ARM_SDE_INTERFACE +__entry_tramp_data___sdei_asm_handler: + .quad __sdei_asm_handler +#endif /* CONFIG_ARM_SDE_INTERFACE */ +__entry_tramp_data_this_cpu_vector: + .quad this_cpu_vector SYM_DATA_END(__entry_tramp_data_start) .popsection // .rodata #endif /* CONFIG_RANDOMIZE_BASE */ #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */ +/* + * Exception vectors for spectre mitigations on entry from EL1 when + * kpti is not in use. + */ + .macro generate_el1_vector, bhb +.Lvector_start\@: + kernel_ventry 1, sync_invalid // Synchronous EL1t + kernel_ventry 1, irq_invalid // IRQ EL1t + kernel_ventry 1, fiq_invalid // FIQ EL1t + kernel_ventry 1, error_invalid // Error EL1t + + kernel_ventry 1, sync // Synchronous EL1h + kernel_ventry 1, irq // IRQ EL1h + kernel_ventry 1, fiq_invalid // FIQ EL1h + kernel_ventry 1, error // Error EL1h + + .rept 4 + tramp_ventry .Lvector_start\@, 64, 0, \bhb + .endr + .rept 4 + tramp_ventry .Lvector_start\@, 32, 0, \bhb + .endr + .endm + +/* The order must match tramp_vecs and the arm64_bp_harden_el1_vectors enum. */ + .pushsection ".entry.text", "ax" + .align 11 +SYM_CODE_START(__bp_harden_el1_vectors) +#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY + generate_el1_vector bhb=BHB_MITIGATION_LOOP + generate_el1_vector bhb=BHB_MITIGATION_FW + generate_el1_vector bhb=BHB_MITIGATION_INSN +#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */ +SYM_CODE_END(__bp_harden_el1_vectors) + .popsection + + /* * Register switch for AArch64. The callee-saved registers need to be saved * and restored. On entry: @@ -956,13 +1076,7 @@ SYM_CODE_START(__sdei_asm_entry_trampoline) */ 1: str x4, [x1, #(SDEI_EVENT_INTREGS + S_SDEI_TTBR1)] -#ifdef CONFIG_RANDOMIZE_BASE - adr x4, tramp_vectors + PAGE_SIZE - add x4, x4, #:lo12:__sdei_asm_trampoline_next_handler - ldr x4, [x4] -#else - ldr x4, =__sdei_asm_handler -#endif + tramp_data_read_var x4, __sdei_asm_handler br x4 SYM_CODE_END(__sdei_asm_entry_trampoline) NOKPROBE(__sdei_asm_entry_trampoline) @@ -985,13 +1099,6 @@ SYM_CODE_END(__sdei_asm_exit_trampoline) NOKPROBE(__sdei_asm_exit_trampoline) .ltorg .popsection // .entry.tramp.text -#ifdef CONFIG_RANDOMIZE_BASE -.pushsection ".rodata", "a" -SYM_DATA_START(__sdei_asm_trampoline_next_handler) - .quad __sdei_asm_handler -SYM_DATA_END(__sdei_asm_trampoline_next_handler) -.popsection // .rodata -#endif /* CONFIG_RANDOMIZE_BASE */ #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */ /* @@ -1099,7 +1206,7 @@ alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0 alternative_else_nop_endif #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 - tramp_alias dst=x5, sym=__sdei_asm_exit_trampoline + tramp_alias dst=x5, sym=__sdei_asm_exit_trampoline, tmp=x3 br x5 #endif SYM_CODE_END(__sdei_asm_handler) diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c index 6c0de2f60ea96a19fc15aa098604b833cae0a2a6..7d4fdf9745428a6f8aaad4149571f99d4128e949 100644 --- a/arch/arm64/kernel/insn.c +++ b/arch/arm64/kernel/insn.c @@ -216,8 +216,8 @@ static int __kprobes aarch64_insn_patch_text_cb(void *arg) int i, ret = 0; struct aarch64_insn_patch *pp = arg; - /* The first CPU becomes master */ - if (atomic_inc_return(&pp->cpu_count) == 1) { + /* The last CPU becomes master */ + if (atomic_inc_return(&pp->cpu_count) == num_online_cpus()) { for (i = 0; ret == 0 && i < pp->insn_cnt; i++) ret = aarch64_insn_patch_text_nosync(pp->text_addrs[i], pp->new_insns[i]); diff --git a/arch/arm64/kernel/ipi_nmi.c b/arch/arm64/kernel/ipi_nmi.c index 3b105852fc176c757ca001ba1208b143f2b091fa..2cf28e511b23b4d045c80864d4adf2d0bfb41d84 100644 --- a/arch/arm64/kernel/ipi_nmi.c +++ b/arch/arm64/kernel/ipi_nmi.c @@ -33,12 +33,24 @@ void arm64_send_nmi(cpumask_t *mask) __ipi_send_mask(ipi_nmi_desc, mask); } +static void ipi_cpu_backtrace(void *info) +{ + printk_safe_enter(); + nmi_cpu_backtrace(get_irq_regs()); + printk_safe_exit(); +} + +static void arm64_send_ipi(cpumask_t *mask) +{ + smp_call_function_many(mask, ipi_cpu_backtrace, NULL, false); +} + bool arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self) { if (!ipi_nmi_desc) - return false; - - nmi_trigger_cpumask_backtrace(mask, exclude_self, arm64_send_nmi); + nmi_trigger_cpumask_backtrace(mask, exclude_self, arm64_send_ipi); + else + nmi_trigger_cpumask_backtrace(mask, exclude_self, arm64_send_nmi); return true; } diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c index b181e0544b79909b8d514e69514c57af8a8cb258..9181d2856be339463e26d6abf10a7b36e61cbc23 100644 --- a/arch/arm64/kernel/kaslr.c +++ b/arch/arm64/kernel/kaslr.c @@ -151,14 +151,15 @@ u64 __init kaslr_early_init(u64 dt_phys) /* use the top 16 bits to randomize the linear region */ memstart_offset_seed = seed >> 48; - if (IS_ENABLED(CONFIG_KASAN)) + if (!IS_ENABLED(CONFIG_KASAN_VMALLOC) && (IS_ENABLED(CONFIG_KASAN))) /* - * KASAN does not expect the module region to intersect the - * vmalloc region, since shadow memory is allocated for each - * module at load time, whereas the vmalloc region is shadowed - * by KASAN zero pages. So keep modules out of the vmalloc - * region if KASAN is enabled, and put the kernel well within - * 4 GB of the module region. + * KASAN without KASAN_VMALLOC does not expect the module region + * to intersect the vmalloc region, since shadow memory is + * allocated for each module at load time, whereas the vmalloc + * region is shadowed by KASAN zero pages. So keep modules + * out of the vmalloc region if KASAN is enabled without + * KASAN_VMALLOC, and put the kernel well within 4 GB of the + * module region. */ return offset % SZ_2G; diff --git a/arch/arm64/kernel/livepatch.c b/arch/arm64/kernel/livepatch.c index 2c292008440cd1ff557af2c1d5db5764ba28ff51..cda56066d85962238ee520657691d1f9ec147ced 100644 --- a/arch/arm64/kernel/livepatch.c +++ b/arch/arm64/kernel/livepatch.c @@ -30,11 +30,11 @@ #include #include #include +#include #include #include #include -#ifdef CONFIG_ARM64_MODULE_PLTS #define MAX_SIZE_TO_CHECK (LJMP_INSN_SIZE * sizeof(u32)) #define CHECK_JUMP_RANGE LJMP_INSN_SIZE @@ -46,11 +46,6 @@ static inline bool offset_in_range(unsigned long pc, unsigned long addr, return (offset >= -range && offset < range); } -#else -#define MAX_SIZE_TO_CHECK sizeof(u32) -#define CHECK_JUMP_RANGE 1 -#endif - #ifdef CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY /* * The instruction set on arm64 is A64. @@ -73,6 +68,7 @@ struct klp_func_list { struct walk_stackframe_args { int enable; struct klp_func_list *check_funcs; + struct module *mod; int ret; }; @@ -86,16 +82,6 @@ static inline unsigned long klp_size_to_check(unsigned long func_size, return size; } -static inline int klp_compare_address(unsigned long pc, unsigned long func_addr, - const char *func_name, unsigned long check_size) -{ - if (pc >= func_addr && pc < func_addr + check_size) { - pr_err("func %s is in use!\n", func_name); - return -EBUSY; - } - return 0; -} - static bool check_jump_insn(unsigned long func_addr) { unsigned long i; @@ -147,7 +133,7 @@ static int klp_check_activeness_func(struct klp_patch *patch, int enable, for (obj = patch->objs; obj->funcs; obj++) { for (func = obj->funcs; func->old_name; func++) { if (enable) { - if (func->force == KLP_ENFORCEMENT) + if (func->patched || func->force == KLP_ENFORCEMENT) continue; /* * When enable, checking the currently @@ -270,21 +256,11 @@ static void free_list(struct klp_func_list **funcs) } } -int klp_check_calltrace(struct klp_patch *patch, int enable) +static int do_check_calltrace(struct walk_stackframe_args *args, + bool (*fn)(void *, unsigned long)) { struct task_struct *g, *t; struct stackframe frame; - int ret = 0; - struct klp_func_list *check_funcs = NULL; - struct walk_stackframe_args args = { - .enable = enable, - .ret = 0 - }; - - ret = klp_check_activeness_func(patch, enable, &check_funcs); - if (ret) - goto out; - args.check_funcs = check_funcs; for_each_process_thread(g, t) { /* @@ -297,7 +273,7 @@ int klp_check_calltrace(struct klp_patch *patch, int enable) if (t == current) { /* current on this CPU */ frame.fp = (unsigned long)__builtin_frame_address(0); - frame.pc = (unsigned long)klp_check_calltrace; + frame.pc = (unsigned long)do_check_calltrace; } else if (strncmp(t->comm, "migration/", 10) == 0) { /* * current on other CPU @@ -306,35 +282,114 @@ int klp_check_calltrace(struct klp_patch *patch, int enable) * task_comm here, because we can't get the * cpu_curr(task_cpu(t))). This assumes that no * other thread will pretend to be a stopper via - * task_comm.  + * task_comm. */ continue; } else { frame.fp = thread_saved_fp(t); frame.pc = thread_saved_pc(t); } - if (check_funcs != NULL) { - start_backtrace(&frame, frame.fp, frame.pc); - walk_stackframe(t, &frame, klp_check_jump_func, &args); - if (args.ret) { - ret = args.ret; - pr_info("PID: %d Comm: %.20s\n", t->pid, t->comm); - show_stack(t, NULL, KERN_INFO); - goto out; - } + start_backtrace(&frame, frame.fp, frame.pc); + walk_stackframe(t, &frame, fn, args); + if (args->ret) { + pr_info("PID: %d Comm: %.20s\n", t->pid, t->comm); + show_stack(t, NULL, KERN_INFO); + return args->ret; } } + return 0; +} + +int klp_check_calltrace(struct klp_patch *patch, int enable) +{ + int ret = 0; + struct klp_func_list *check_funcs = NULL; + struct walk_stackframe_args args = { + .enable = enable, + .ret = 0 + }; + + ret = klp_check_activeness_func(patch, enable, &check_funcs); + if (ret) { + pr_err("collect active functions failed, ret=%d\n", ret); + goto out; + } + if (!check_funcs) + goto out; + args.check_funcs = check_funcs; + ret = do_check_calltrace(&args, klp_check_jump_func); out: free_list(&check_funcs); return ret; } + +static bool check_module_calltrace(void *data, unsigned long pc) +{ + struct walk_stackframe_args *args = data; + + if (within_module_core(pc, args->mod)) { + pr_err("module %s is in use!\n", args->mod->name); + args->ret = -EBUSY; + return false; + } + return true; +} + +int arch_klp_module_check_calltrace(void *data) +{ + struct walk_stackframe_args args = { + .mod = (struct module *)data, + .ret = 0 + }; + + return do_check_calltrace(&args, check_module_calltrace); +} + +int arch_klp_add_breakpoint(struct arch_klp_data *arch_data, void *old_func) +{ + u32 insn = BRK64_OPCODE_KLP; + u32 *addr = (u32 *)old_func; + + arch_data->saved_opcode = le32_to_cpu(*addr); + aarch64_insn_patch_text(&old_func, &insn, 1); + return 0; +} + +void arch_klp_remove_breakpoint(struct arch_klp_data *arch_data, void *old_func) +{ + aarch64_insn_patch_text(&old_func, &arch_data->saved_opcode, 1); +} + +static int klp_breakpoint_handler(struct pt_regs *regs, unsigned int esr) +{ + void *brk_func = NULL; + unsigned long addr = instruction_pointer(regs); + + brk_func = klp_get_brk_func((void *)addr); + if (!brk_func) { + pr_warn("Unrecoverable livepatch detected.\n"); + BUG(); + } + + instruction_pointer_set(regs, (unsigned long)brk_func); + return 0; +} + +static struct break_hook klp_break_hook = { + .imm = KLP_BRK_IMM, + .fn = klp_breakpoint_handler, +}; + +void arch_klp_init(void) +{ + register_kernel_break_hook(&klp_break_hook); +} #endif long arch_klp_save_old_code(struct arch_klp_data *arch_data, void *old_func) { long ret; -#ifdef CONFIG_ARM64_MODULE_PLTS int i; for (i = 0; i < LJMP_INSN_SIZE; i++) { @@ -343,116 +398,89 @@ long arch_klp_save_old_code(struct arch_klp_data *arch_data, void *old_func) if (ret) break; } -#else - ret = aarch64_insn_read(old_func, &arch_data->old_insn); -#endif return ret; } -int arch_klp_patch_func(struct klp_func *func) +static int do_patch(unsigned long pc, unsigned long new_addr) { - struct klp_func_node *func_node; - unsigned long pc, new_addr; - u32 insn; -#ifdef CONFIG_ARM64_MODULE_PLTS - int i; u32 insns[LJMP_INSN_SIZE]; -#endif + int ret; - func_node = func->func_node; - list_add_rcu(&func->stack_node, &func_node->func_stack); - pc = (unsigned long)func->old_func; - new_addr = (unsigned long)func->new_func; -#ifdef CONFIG_ARM64_MODULE_PLTS if (offset_in_range(pc, new_addr, SZ_128M)) { - insn = aarch64_insn_gen_branch_imm(pc, new_addr, - AARCH64_INSN_BRANCH_NOLINK); - if (aarch64_insn_patch_text_nosync((void *)pc, insn)) - goto ERR_OUT; + insns[0] = aarch64_insn_gen_branch_imm(pc, new_addr, + AARCH64_INSN_BRANCH_NOLINK); + ret = aarch64_insn_patch_text_nosync((void *)pc, insns[0]); + if (ret) { + pr_err("patch instruction small range failed, ret=%d\n", ret); + return -EPERM; + } } else { - insns[0] = cpu_to_le32(0x92800010 | (((~new_addr) & 0xffff)) << 5); - insns[1] = cpu_to_le32(0xf2a00010 | (((new_addr >> 16) & 0xffff)) << 5); - insns[2] = cpu_to_le32(0xf2c00010 | (((new_addr >> 32) & 0xffff)) << 5); - insns[3] = cpu_to_le32(0xd61f0200); +#ifdef CONFIG_ARM64_MODULE_PLTS + int i; + + insns[0] = 0x92800010 | (((~new_addr) & 0xffff)) << 5; + insns[1] = 0xf2a00010 | (((new_addr >> 16) & 0xffff)) << 5; + insns[2] = 0xf2c00010 | (((new_addr >> 32) & 0xffff)) << 5; + insns[3] = 0xd61f0200; for (i = 0; i < LJMP_INSN_SIZE; i++) { - if (aarch64_insn_patch_text_nosync(((u32 *)pc) + i, insns[i])) - goto ERR_OUT; + ret = aarch64_insn_patch_text_nosync(((u32 *)pc) + i, insns[i]); + if (ret) { + pr_err("patch instruction(%d) large range failed, ret=%d\n", + i, ret); + return -EPERM; + } } - } #else - insn = aarch64_insn_gen_branch_imm(pc, new_addr, - AARCH64_INSN_BRANCH_NOLINK); - - if (aarch64_insn_patch_text_nosync((void *)pc, insn)) - goto ERR_OUT; + /* + * When offset from 'new_addr' to 'pc' is out of SZ_128M range but + * CONFIG_ARM64_MODULE_PLTS not enabled, we should stop patching. + */ + pr_err("new address out of range\n"); + return -EFAULT; #endif + } return 0; +} -ERR_OUT: - list_del_rcu(&func->stack_node); +int arch_klp_patch_func(struct klp_func *func) +{ + struct klp_func_node *func_node; + int ret; - return -EPERM; + func_node = func->func_node; + list_add_rcu(&func->stack_node, &func_node->func_stack); + ret = do_patch((unsigned long)func->old_func, (unsigned long)func->new_func); + if (ret) + list_del_rcu(&func->stack_node); + return ret; } void arch_klp_unpatch_func(struct klp_func *func) { struct klp_func_node *func_node; struct klp_func *next_func; - unsigned long pc, new_addr; - u32 insn; -#ifdef CONFIG_ARM64_MODULE_PLTS + unsigned long pc; int i; - u32 insns[LJMP_INSN_SIZE]; -#endif + int ret; func_node = func->func_node; pc = (unsigned long)func_node->old_func; - if (list_is_singular(&func_node->func_stack)) { -#ifdef CONFIG_ARM64_MODULE_PLTS - for (i = 0; i < LJMP_INSN_SIZE; i++) - insns[i] = func_node->arch_data.old_insns[i]; -#else - insn = func_node->arch_data.old_insn; -#endif - list_del_rcu(&func->stack_node); - -#ifdef CONFIG_ARM64_MODULE_PLTS + list_del_rcu(&func->stack_node); + if (list_empty(&func_node->func_stack)) { for (i = 0; i < LJMP_INSN_SIZE; i++) { - aarch64_insn_patch_text_nosync(((u32 *)pc) + i, - insns[i]); + ret = aarch64_insn_patch_text_nosync(((u32 *)pc) + i, + func_node->arch_data.old_insns[i]); + if (ret) { + pr_err("restore instruction(%d) failed, ret=%d\n", i, ret); + return; + } } -#else - aarch64_insn_patch_text_nosync((void *)pc, insn); -#endif } else { - list_del_rcu(&func->stack_node); next_func = list_first_or_null_rcu(&func_node->func_stack, struct klp_func, stack_node); if (WARN_ON(!next_func)) return; - - new_addr = (unsigned long)next_func->new_func; -#ifdef CONFIG_ARM64_MODULE_PLTS - if (offset_in_range(pc, new_addr, SZ_128M)) { - insn = aarch64_insn_gen_branch_imm(pc, new_addr, - AARCH64_INSN_BRANCH_NOLINK); - - aarch64_insn_patch_text_nosync((void *)pc, insn); - } else { - insns[0] = cpu_to_le32(0x92800010 | (((~new_addr) & 0xffff)) << 5); - insns[1] = cpu_to_le32(0xf2a00010 | (((new_addr >> 16) & 0xffff)) << 5); - insns[2] = cpu_to_le32(0xf2c00010 | (((new_addr >> 32) & 0xffff)) << 5); - insns[3] = cpu_to_le32(0xd61f0200); - for (i = 0; i < LJMP_INSN_SIZE; i++) - aarch64_insn_patch_text_nosync(((u32 *)pc) + i, - insns[i]); - } -#else - insn = aarch64_insn_gen_branch_imm(pc, new_addr, - AARCH64_INSN_BRANCH_NOLINK); - - aarch64_insn_patch_text_nosync((void *)pc, insn); -#endif + do_patch(pc, (unsigned long)next_func->new_func); } } diff --git a/arch/arm64/kernel/machine_kexec_file.c b/arch/arm64/kernel/machine_kexec_file.c index 63634b4d72c158f3e3ea487b3ae36d8a75f0a398..59c648d518488869b6cc4c6b227933aa3419bedd 100644 --- a/arch/arm64/kernel/machine_kexec_file.c +++ b/arch/arm64/kernel/machine_kexec_file.c @@ -149,6 +149,7 @@ int load_other_segments(struct kimage *image, initrd_len, cmdline, 0); if (!dtb) { pr_err("Preparing for new dtb failed\n"); + ret = -EINVAL; goto out_err; } diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c index 031be3c6a9d5dcba01135555a1b8d5bea2ed98f8..acd557c83b6f333b05ae01d44e4ff813ed7ff9a3 100644 --- a/arch/arm64/kernel/module.c +++ b/arch/arm64/kernel/module.c @@ -39,13 +39,14 @@ void *module_alloc(unsigned long size) NUMA_NO_NODE, __builtin_return_address(0)); if (!p && IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) && - !IS_ENABLED(CONFIG_KASAN)) + (IS_ENABLED(CONFIG_KASAN_VMALLOC) || !IS_ENABLED(CONFIG_KASAN))) /* - * KASAN can only deal with module allocations being served - * from the reserved module region, since the remainder of - * the vmalloc region is already backed by zero shadow pages, - * and punching holes into it is non-trivial. Since the module - * region is not randomized when KASAN is enabled, it is even + * KASAN without KASAN_VMALLOC can only deal with module + * allocations being served from the reserved module region, + * since the remainder of the vmalloc region is already + * backed by zero shadow pages, and punching holes into it + * is non-trivial. Since the module region is not randomized + * when KASAN is enabled without KASAN_VMALLOC, it is even * less likely that the module region gets exhausted, so we * can simply omit this fallback in that case. */ diff --git a/arch/arm64/kernel/paravirt.c b/arch/arm64/kernel/paravirt.c index 847b3c8b12182e8e160a94fb10cea9e72bafa012..ecbd2bf2e6fb3b595775e9f058edc07f34e5e4f4 100644 --- a/arch/arm64/kernel/paravirt.c +++ b/arch/arm64/kernel/paravirt.c @@ -38,7 +38,7 @@ struct paravirt_patch_template pv_ops = { #endif .lock.vcpu_is_preempted = __native_vcpu_is_preempted, }; -EXPORT_SYMBOL_GPL(pv_ops); +EXPORT_SYMBOL(pv_ops); struct pv_time_stolen_time_region { struct pvclock_vcpu_stolen_time *kaddr; diff --git a/arch/arm64/kernel/perf_callchain.c b/arch/arm64/kernel/perf_callchain.c index 0fc67c8a2b95fa268c0bd69708e4ec28ebf2efb4..47eb9b208e9bb275f6c9e65f17438eaa4e2b437b 100644 --- a/arch/arm64/kernel/perf_callchain.c +++ b/arch/arm64/kernel/perf_callchain.c @@ -102,7 +102,9 @@ compat_user_backtrace(struct a32_frame_tail __user *tail, void perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) { - if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { + struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs(); + + if (guest_cbs && guest_cbs->is_in_guest()) { /* We don't support guest os callchain now */ return; } @@ -147,9 +149,10 @@ static bool callchain_trace(void *data, unsigned long pc) void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) { + struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs(); struct stackframe frame; - if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { + if (guest_cbs && guest_cbs->is_in_guest()) { /* We don't support guest os callchain now */ return; } @@ -160,18 +163,21 @@ void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, unsigned long perf_instruction_pointer(struct pt_regs *regs) { - if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) - return perf_guest_cbs->get_guest_ip(); + struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs(); + + if (guest_cbs && guest_cbs->is_in_guest()) + return guest_cbs->get_guest_ip(); return instruction_pointer(regs); } unsigned long perf_misc_flags(struct pt_regs *regs) { + struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs(); int misc = 0; - if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { - if (perf_guest_cbs->is_user_mode()) + if (guest_cbs && guest_cbs->is_in_guest()) { + if (guest_cbs->is_user_mode()) misc |= PERF_RECORD_MISC_GUEST_USER; else misc |= PERF_RECORD_MISC_GUEST_KERNEL; diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index da5ed561e96240909da4c7b84a562545e4701a0b..7317c79db9e8ba2f2302894a34ba4b25887474a9 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -57,6 +57,7 @@ #include #include #include +#include #if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK) #include @@ -151,10 +152,8 @@ void arch_cpu_idle_dead(void) */ void machine_shutdown(void) { -#ifdef CONFIG_ARM64_CPU_PARK if (kexec_smp_send_park() == 0) return; -#endif smp_shutdown_nonboot_cpus(reboot_cpu); } @@ -507,34 +506,26 @@ static void entry_task_switch(struct task_struct *next) /* * ARM erratum 1418040 handling, affecting the 32bit view of CNTVCT. - * Assuming the virtual counter is enabled at the beginning of times: - * - * - disable access when switching from a 64bit task to a 32bit task - * - enable access when switching from a 32bit task to a 64bit task + * Ensure access is disabled when switching to a 32bit task, ensure + * access is enabled when switching to a 64bit task. */ -static void erratum_1418040_thread_switch(struct task_struct *prev, - struct task_struct *next) +static void erratum_1418040_thread_switch(struct task_struct *next) { - bool prev32, next32; - u64 val; - - if (!IS_ENABLED(CONFIG_ARM64_ERRATUM_1418040)) - return; - - prev32 = is_a32_compat_thread(task_thread_info(prev)); - next32 = is_a32_compat_thread(task_thread_info(next)); - - if (prev32 == next32 || !this_cpu_has_cap(ARM64_WORKAROUND_1418040)) + if (!IS_ENABLED(CONFIG_ARM64_ERRATUM_1418040) || + !this_cpu_has_cap(ARM64_WORKAROUND_1418040)) return; - val = read_sysreg(cntkctl_el1); - - if (!next32) - val |= ARCH_TIMER_USR_VCT_ACCESS_EN; + if (is_a32_compat_thread(task_thread_info(next))) + sysreg_clear_set(cntkctl_el1, ARCH_TIMER_USR_VCT_ACCESS_EN, 0); else - val &= ~ARCH_TIMER_USR_VCT_ACCESS_EN; + sysreg_clear_set(cntkctl_el1, 0, ARCH_TIMER_USR_VCT_ACCESS_EN); +} - write_sysreg(val, cntkctl_el1); +static void erratum_1418040_new_exec(void) +{ + preempt_disable(); + erratum_1418040_thread_switch(current); + preempt_enable(); } /* @@ -551,7 +542,7 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev, contextidr_thread_switch(next); entry_task_switch(next); ssbs_thread_switch(next); - erratum_1418040_thread_switch(prev, next); + erratum_1418040_thread_switch(next); /* * Complete any pending TLB or cache maintenance on this CPU in case @@ -619,6 +610,7 @@ void arch_setup_new_exec(void) current->mm->context.flags = is_compat_task() ? MMCF_AARCH32 : 0; ptrauth_thread_init_user(current); + erratum_1418040_new_exec(); if (task_spec_ssb_noexec(current)) { arch_prctl_spec_ctrl_set(current, PR_SPEC_STORE_BYPASS, diff --git a/arch/arm64/kernel/proton-pack.c b/arch/arm64/kernel/proton-pack.c index 09ebcd306f1b90fe04419806d0e26def9ac50ef2..e807f77737e053507691a2bdd8a438f9a1e1efc0 100644 --- a/arch/arm64/kernel/proton-pack.c +++ b/arch/arm64/kernel/proton-pack.c @@ -18,6 +18,7 @@ */ #include +#include #include #include #include @@ -27,6 +28,8 @@ #include #include #include +#include +#include /* * We try to ensure that the mitigation state can never change as the result of @@ -95,14 +98,51 @@ static bool spectre_v2_mitigations_off(void) return ret; } +static const char *get_bhb_affected_string(enum mitigation_state bhb_state) +{ + switch (bhb_state) { + case SPECTRE_UNAFFECTED: + return ""; + default: + case SPECTRE_VULNERABLE: + return ", but not BHB"; + case SPECTRE_MITIGATED: + return ", BHB"; + } +} + +static bool _unprivileged_ebpf_enabled(void) +{ +#ifdef CONFIG_BPF_SYSCALL + return !sysctl_unprivileged_bpf_disabled; +#else + return false; +#endif +} + ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf) { + enum mitigation_state bhb_state = arm64_get_spectre_bhb_state(); + const char *bhb_str = get_bhb_affected_string(bhb_state); + const char *v2_str = "Branch predictor hardening"; + switch (spectre_v2_state) { case SPECTRE_UNAFFECTED: - return sprintf(buf, "Not affected\n"); + if (bhb_state == SPECTRE_UNAFFECTED) + return sprintf(buf, "Not affected\n"); + + /* + * Platforms affected by Spectre-BHB can't report + * "Not affected" for Spectre-v2. + */ + v2_str = "CSV2"; + fallthrough; case SPECTRE_MITIGATED: - return sprintf(buf, "Mitigation: Branch predictor hardening\n"); + if (bhb_state == SPECTRE_MITIGATED && _unprivileged_ebpf_enabled()) + return sprintf(buf, "Vulnerable: Unprivileged eBPF enabled\n"); + + return sprintf(buf, "Mitigation: %s%s\n", v2_str, bhb_str); case SPECTRE_VULNERABLE: fallthrough; default: @@ -196,9 +236,9 @@ static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start, __flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K); } +static DEFINE_RAW_SPINLOCK(bp_lock); static void install_bp_hardening_cb(bp_hardening_cb_t fn) { - static DEFINE_RAW_SPINLOCK(bp_lock); int cpu, slot = -1; const char *hyp_vecs_start = __smccc_workaround_1_smc; const char *hyp_vecs_end = __smccc_workaround_1_smc + @@ -229,6 +269,7 @@ static void install_bp_hardening_cb(bp_hardening_cb_t fn) __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot); __this_cpu_write(bp_hardening_data.fn, fn); + __this_cpu_write(bp_hardening_data.template_start, hyp_vecs_start); raw_spin_unlock(&bp_lock); } #else @@ -572,9 +613,9 @@ void __init spectre_v4_patch_fw_mitigation_enable(struct alt_instr *alt, * Patch a NOP in the Spectre-v4 mitigation code with an SMC/HVC instruction * to call into firmware to adjust the mitigation state. */ -void __init spectre_v4_patch_fw_mitigation_conduit(struct alt_instr *alt, - __le32 *origptr, - __le32 *updptr, int nr_inst) +void __init smccc_patch_fw_mitigation_conduit(struct alt_instr *alt, + __le32 *origptr, + __le32 *updptr, int nr_inst) { u32 insn; @@ -788,3 +829,306 @@ int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which) return -ENODEV; } } + +/* + * Spectre BHB. + * + * A CPU is either: + * - Mitigated by a branchy loop a CPU specific number of times, and listed + * in our "loop mitigated list". + * - Mitigated in software by the firmware Spectre v2 call. + * - Has the ClearBHB instruction to perform the mitigation. + * - Has the 'Exception Clears Branch History Buffer' (ECBHB) feature, so no + * software mitigation in the vectors is needed. + * - Has CSV2.3, so is unaffected. + */ +static enum mitigation_state spectre_bhb_state; + +enum mitigation_state arm64_get_spectre_bhb_state(void) +{ + return spectre_bhb_state; +} + +/* + * This must be called with SCOPE_LOCAL_CPU for each type of CPU, before any + * SCOPE_SYSTEM call will give the right answer. + */ +u8 spectre_bhb_loop_affected(int scope) +{ + u8 k = 0; + static u8 max_bhb_k; + + if (scope == SCOPE_LOCAL_CPU) { + static const struct midr_range spectre_bhb_k32_list[] = { + MIDR_ALL_VERSIONS(MIDR_CORTEX_A78), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A78AE), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C), + MIDR_ALL_VERSIONS(MIDR_CORTEX_X1), + MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1), + {}, + }; + static const struct midr_range spectre_bhb_k24_list[] = { + MIDR_ALL_VERSIONS(MIDR_CORTEX_A76), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A77), + MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1), + {}, + }; + static const struct midr_range spectre_bhb_k8_list[] = { + MIDR_ALL_VERSIONS(MIDR_CORTEX_A72), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A57), + {}, + }; + + if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k32_list)) + k = 32; + else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k24_list)) + k = 24; + else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k8_list)) + k = 8; + + max_bhb_k = max(max_bhb_k, k); + } else { + k = max_bhb_k; + } + + return k; +} + +static enum mitigation_state spectre_bhb_get_cpu_fw_mitigation_state(void) +{ + int ret; + struct arm_smccc_res res; + + arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, + ARM_SMCCC_ARCH_WORKAROUND_3, &res); + + ret = res.a0; + switch (ret) { + case SMCCC_RET_SUCCESS: + return SPECTRE_MITIGATED; + case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED: + return SPECTRE_UNAFFECTED; + default: + fallthrough; + case SMCCC_RET_NOT_SUPPORTED: + return SPECTRE_VULNERABLE; + } +} + +static bool is_spectre_bhb_fw_affected(int scope) +{ + static bool system_affected; + enum mitigation_state fw_state; + bool has_smccc = arm_smccc_1_1_get_conduit() != SMCCC_CONDUIT_NONE; + static const struct midr_range spectre_bhb_firmware_mitigated_list[] = { + MIDR_ALL_VERSIONS(MIDR_CORTEX_A73), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A75), + {}, + }; + bool cpu_in_list = is_midr_in_range_list(read_cpuid_id(), + spectre_bhb_firmware_mitigated_list); + + if (scope != SCOPE_LOCAL_CPU) + return system_affected; + + fw_state = spectre_bhb_get_cpu_fw_mitigation_state(); + if (cpu_in_list || (has_smccc && fw_state == SPECTRE_MITIGATED)) { + system_affected = true; + return true; + } + + return false; +} + +static bool supports_ecbhb(int scope) +{ + u64 mmfr1; + + if (scope == SCOPE_LOCAL_CPU) + mmfr1 = read_sysreg_s(SYS_ID_AA64MMFR1_EL1); + else + mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1); + + return cpuid_feature_extract_unsigned_field(mmfr1, + ID_AA64MMFR1_ECBHB_SHIFT); +} + +bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry, + int scope) +{ + WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); + + if (supports_csv2p3(scope)) + return false; + + if (supports_clearbhb(scope)) + return true; + + if (spectre_bhb_loop_affected(scope)) + return true; + + if (is_spectre_bhb_fw_affected(scope)) + return true; + + return false; +} + +static void this_cpu_set_vectors(enum arm64_bp_harden_el1_vectors slot) +{ + const char *v = arm64_get_bp_hardening_vector(slot); + + if (slot < 0) + return; + + __this_cpu_write(this_cpu_vector, v); + + /* + * When KPTI is in use, the vectors are switched when exiting to + * user-space. + */ + if (arm64_kernel_unmapped_at_el0()) + return; + + write_sysreg(v, vbar_el1); + isb(); +} + +#ifdef CONFIG_KVM +static int kvm_bhb_get_vecs_size(const char *start) +{ + if (start == __smccc_workaround_3_smc) + return __SMCCC_WORKAROUND_3_SMC_SZ; + else if (start == __spectre_bhb_loop_k8 || + start == __spectre_bhb_loop_k24 || + start == __spectre_bhb_loop_k32) + return __SPECTRE_BHB_LOOP_SZ; + else if (start == __spectre_bhb_clearbhb) + return __SPECTRE_BHB_CLEARBHB_SZ; + + return 0; +} + +static void kvm_setup_bhb_slot(const char *hyp_vecs_start) +{ + int cpu, slot = -1, size; + const char *hyp_vecs_end; + + if (!IS_ENABLED(CONFIG_KVM) || !is_hyp_mode_available()) + return; + + size = kvm_bhb_get_vecs_size(hyp_vecs_start); + if (WARN_ON_ONCE(!hyp_vecs_start || !size)) + return; + hyp_vecs_end = hyp_vecs_start + size; + + raw_spin_lock(&bp_lock); + for_each_possible_cpu(cpu) { + if (per_cpu(bp_hardening_data.template_start, cpu) == hyp_vecs_start) { + slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu); + break; + } + } + + if (slot == -1) { + slot = atomic_inc_return(&arm64_el2_vector_last_slot); + BUG_ON(slot >= BP_HARDEN_EL2_SLOTS); + __copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end); + } + + __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot); + __this_cpu_write(bp_hardening_data.template_start, hyp_vecs_start); + raw_spin_unlock(&bp_lock); +} +#else +#define __smccc_workaround_3_smc NULL +#define __spectre_bhb_loop_k8 NULL +#define __spectre_bhb_loop_k24 NULL +#define __spectre_bhb_loop_k32 NULL +#define __spectre_bhb_clearbhb NULL + +static void kvm_setup_bhb_slot(const char *hyp_vecs_start) { } +#endif /* CONFIG_KVM */ + +void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry) +{ + enum mitigation_state fw_state, state = SPECTRE_VULNERABLE; + + if (!is_spectre_bhb_affected(entry, SCOPE_LOCAL_CPU)) + return; + + if (arm64_get_spectre_v2_state() == SPECTRE_VULNERABLE) { + /* No point mitigating Spectre-BHB alone. */ + } else if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY)) { + pr_info_once("spectre-bhb mitigation disabled by compile time option\n"); + } else if (cpu_mitigations_off()) { + pr_info_once("spectre-bhb mitigation disabled by command line option\n"); + } else if (supports_ecbhb(SCOPE_LOCAL_CPU)) { + state = SPECTRE_MITIGATED; + } else if (supports_clearbhb(SCOPE_LOCAL_CPU)) { + kvm_setup_bhb_slot(__spectre_bhb_clearbhb); + this_cpu_set_vectors(EL1_VECTOR_BHB_CLEAR_INSN); + + state = SPECTRE_MITIGATED; + } else if (spectre_bhb_loop_affected(SCOPE_LOCAL_CPU)) { + switch (spectre_bhb_loop_affected(SCOPE_SYSTEM)) { + case 8: + kvm_setup_bhb_slot(__spectre_bhb_loop_k8); + break; + case 24: + kvm_setup_bhb_slot(__spectre_bhb_loop_k24); + break; + case 32: + kvm_setup_bhb_slot(__spectre_bhb_loop_k32); + break; + default: + WARN_ON_ONCE(1); + } + this_cpu_set_vectors(EL1_VECTOR_BHB_LOOP); + + state = SPECTRE_MITIGATED; + } else if (is_spectre_bhb_fw_affected(SCOPE_LOCAL_CPU)) { + fw_state = spectre_bhb_get_cpu_fw_mitigation_state(); + if (fw_state == SPECTRE_MITIGATED) { + kvm_setup_bhb_slot(__smccc_workaround_3_smc); + this_cpu_set_vectors(EL1_VECTOR_BHB_FW); + + state = SPECTRE_MITIGATED; + } + } + + update_mitigation_state(&spectre_bhb_state, state); +} + +/* Patched to correct the immediate */ +void noinstr spectre_bhb_patch_loop_iter(struct alt_instr *alt, + __le32 *origptr, __le32 *updptr, int nr_inst) +{ + u8 rd; + u32 insn; + u16 loop_count = spectre_bhb_loop_affected(SCOPE_SYSTEM); + + BUG_ON(nr_inst != 1); /* MOV -> MOV */ + + if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY)) + return; + + insn = le32_to_cpu(*origptr); + rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, insn); + insn = aarch64_insn_gen_movewide(rd, loop_count, 0, + AARCH64_INSN_VARIANT_64BIT, + AARCH64_INSN_MOVEWIDE_ZERO); + *updptr++ = cpu_to_le32(insn); +} + +#ifdef CONFIG_BPF_SYSCALL +#define EBPF_WARN "Unprivileged eBPF is enabled, data leaks possible via Spectre v2 BHB attacks!\n" +void unpriv_ebpf_notify(int new_state) +{ + if (spectre_v2_state == SPECTRE_VULNERABLE || + spectre_bhb_state != SPECTRE_MITIGATED) + return; + + if (!new_state) + pr_err("WARNING: %s", EBPF_WARN); +} +#endif diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index 3c834d7c299aea509108ee30fe01453177339b4c..ddca8d27fca6f58d56265ff7dfe36a0f1a9e5a55 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include @@ -52,7 +53,7 @@ #include #include -#include "../mm/pmem_reserve.h" +#include "../mm/internal.h" static int num_standard_resources; static struct resource *standard_resources; @@ -273,16 +274,9 @@ static void __init request_standard_resources(void) request_memmap_resources(res); #ifdef CONFIG_KEXEC_CORE - /* - * Userspace will find "Crash kernel" or "Crash kernel (low)" - * region in /proc/iomem. - * In order to distinct from the high region and make no effect - * to the use of existing kexec-tools, rename the low region as - * "Crash kernel (low)". - */ + /* Userspace will find "Crash kernel" region in /proc/iomem. */ if (crashk_low_res.end && crashk_low_res.start >= res->start && crashk_low_res.end <= res->end) { - crashk_low_res.name = "Crash kernel (low)"; request_resource(res, &crashk_low_res); } if (crashk_res.end && crashk_res.start >= res->start && @@ -290,13 +284,7 @@ static void __init request_standard_resources(void) request_resource(res, &crashk_res); #endif -#ifdef CONFIG_QUICK_KEXEC - if (quick_kexec_res.end && - quick_kexec_res.start >= res->start && - quick_kexec_res.end <= res->end) - request_resource(res, &quick_kexec_res); -#endif - + request_quick_kexec_res(res); request_pin_mem_res(res); } @@ -387,6 +375,8 @@ void __init __no_sanitize_address setup_arch(char **cmdline_p) arm64_memblock_init(); + kfence_early_alloc_pool(); + efi_fake_memmap(); efi_find_mirror(); diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c index d288bb4a138b70a3ec6ffb3dbd54822866347ed9..e5e2f1e888a29239f7a6b729a4d8846d98735e86 100644 --- a/arch/arm64/kernel/signal.c +++ b/arch/arm64/kernel/signal.c @@ -447,10 +447,12 @@ int setup_sigframe_layout(struct rt_sigframe_user_layout *user, bool add_all) { int err; - err = sigframe_alloc(user, &user->fpsimd_offset, - sizeof(struct fpsimd_context)); - if (err) - return err; + if (system_supports_fpsimd()) { + err = sigframe_alloc(user, &user->fpsimd_offset, + sizeof(struct fpsimd_context)); + if (err) + return err; + } /* fault information, if valid */ if (add_all || current->thread.fault_code) { diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index fc099cda70a3fddf0faac7ad4cd5ff26e5d059d7..dd4c76ed8ca65aa3a1906bcb5f5174b86276b3cb 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -55,6 +55,7 @@ #include #include #include +#include #define CREATE_TRACE_POINTS #include @@ -98,167 +99,6 @@ static inline int op_cpu_kill(unsigned int cpu) } #endif -#ifdef CONFIG_ARM64_CPU_PARK -struct cpu_park_section { - unsigned long exit; /* exit address of park look */ - unsigned long magic; /* maigc represent park state */ - char text[0]; /* text section of park */ -}; - -static int mmap_cpu_park_mem(void) -{ - if (!park_info.start) - return -ENOMEM; - - if (park_info.start_v) - return 0; - - park_info.start_v = (unsigned long)__ioremap(park_info.start, - park_info.len, - PAGE_KERNEL_EXEC); - if (!park_info.start_v) { - pr_warn("map park memory failed."); - return -ENOMEM; - } - - return 0; -} - -static inline unsigned long cpu_park_section_v(unsigned int cpu) -{ - return park_info.start_v + PARK_SECTION_SIZE * (cpu - 1); -} - -static inline unsigned long cpu_park_section_p(unsigned int cpu) -{ - return park_info.start + PARK_SECTION_SIZE * (cpu - 1); -} - -/* - * Write the secondary_entry to exit section of park state. - * Then the secondary cpu will jump straight into the kernel - * by the secondary_entry. - */ -static int write_park_exit(unsigned int cpu) -{ - struct cpu_park_section *park_section; - unsigned long *park_exit; - unsigned long *park_text; - - if (mmap_cpu_park_mem() != 0) - return -EPERM; - - park_section = (struct cpu_park_section *)cpu_park_section_v(cpu); - park_exit = &park_section->exit; - park_text = (unsigned long *)park_section->text; - pr_debug("park_text 0x%lx : 0x%lx, do_cpu_park text 0x%lx : 0x%lx", - (unsigned long)park_text, *park_text, - (unsigned long)do_cpu_park, - *(unsigned long *)do_cpu_park); - - /* - * Test first 8 bytes to determine - * whether needs to write cpu park exit. - */ - if (*park_text == *(unsigned long *)do_cpu_park) { - writeq_relaxed(__pa_symbol(secondary_entry), park_exit); - __flush_dcache_area((__force void *)park_exit, - sizeof(unsigned long)); - flush_icache_range((unsigned long)park_exit, - (unsigned long)(park_exit + 1)); - sev(); - dsb(sy); - isb(); - - pr_debug("Write cpu %u secondary entry 0x%lx to 0x%lx.", - cpu, *park_exit, (unsigned long)park_exit); - pr_info("Boot cpu %u from PARK state.", cpu); - return 0; - } - - return -EPERM; -} - -/* Install cpu park sections for the specific cpu. */ -static int install_cpu_park(unsigned int cpu) -{ - struct cpu_park_section *park_section; - unsigned long *park_exit; - unsigned long *park_magic; - unsigned long park_text_len; - - park_section = (struct cpu_park_section *)cpu_park_section_v(cpu); - pr_debug("Install cpu park on cpu %u park exit 0x%lx park text 0x%lx", - cpu, (unsigned long)park_section, - (unsigned long)(park_section->text)); - - park_exit = &park_section->exit; - park_magic = &park_section->magic; - park_text_len = PARK_SECTION_SIZE - sizeof(struct cpu_park_section); - - *park_exit = 0UL; - *park_magic = 0UL; - memcpy((void *)park_section->text, do_cpu_park, park_text_len); - __flush_dcache_area((void *)park_section, PARK_SECTION_SIZE); - - return 0; -} - -static int uninstall_cpu_park(unsigned int cpu) -{ - unsigned long park_section; - - if (mmap_cpu_park_mem() != 0) - return -EPERM; - - park_section = cpu_park_section_v(cpu); - memset((void *)park_section, 0, PARK_SECTION_SIZE); - __flush_dcache_area((void *)park_section, PARK_SECTION_SIZE); - - return 0; -} - -static int cpu_wait_park(unsigned int cpu) -{ - long timeout; - struct cpu_park_section *park_section; - - volatile unsigned long *park_magic; - - park_section = (struct cpu_park_section *)cpu_park_section_v(cpu); - park_magic = &park_section->magic; - - timeout = USEC_PER_SEC; - while (*park_magic != PARK_MAGIC && timeout--) - udelay(1); - - if (timeout > 0) - pr_debug("cpu %u park done.", cpu); - else - pr_err("cpu %u park failed.", cpu); - - return *park_magic == PARK_MAGIC; -} - -static void cpu_park(unsigned int cpu) -{ - unsigned long park_section_p; - unsigned long park_exit_phy; - unsigned long do_park; - typeof(enter_cpu_park) *park; - - park_section_p = cpu_park_section_p(cpu); - park_exit_phy = park_section_p; - pr_debug("Go to park cpu %u exit address 0x%lx", cpu, park_exit_phy); - - do_park = park_section_p + sizeof(struct cpu_park_section); - park = (void *)__pa_symbol(enter_cpu_park); - - cpu_install_idmap(); - park(do_park, park_exit_phy); - unreachable(); -} -#endif /* * Boot a secondary CPU, and assign it the specified idle task. @@ -268,10 +108,8 @@ static int boot_secondary(unsigned int cpu, struct task_struct *idle) { const struct cpu_operations *ops = get_cpu_ops(cpu); -#ifdef CONFIG_ARM64_CPU_PARK if (write_park_exit(cpu) == 0) return 0; -#endif if (ops->cpu_boot) return ops->cpu_boot(cpu); @@ -307,9 +145,8 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle) */ wait_for_completion_timeout(&cpu_running, msecs_to_jiffies(5000)); -#ifdef CONFIG_ARM64_CPU_PARK uninstall_cpu_park(cpu); -#endif + if (cpu_online(cpu)) return 0; @@ -1057,31 +894,12 @@ void arch_irq_work_raise(void) static void local_cpu_stop(void) { -#ifdef CONFIG_ARM64_CPU_PARK - int cpu; - const struct cpu_operations *ops = NULL; -#endif - set_cpu_online(smp_processor_id(), false); local_daif_mask(); sdei_mask_local_cpu(); -#ifdef CONFIG_ARM64_CPU_PARK - /* - * Go to cpu park state. - * Otherwise go to cpu die. - */ - cpu = smp_processor_id(); - if (kexec_in_progress && park_info.start_v) { - machine_kexec_mask_interrupts(); - cpu_park(cpu); - - ops = get_cpu_ops(cpu); - if (ops && ops->cpu_die) - ops->cpu_die(cpu); - } -#endif + cpu_park_stop(); cpu_park_loop(); } @@ -1295,44 +1113,10 @@ void smp_send_stop(void) sdei_mask_local_cpu(); } -#ifdef CONFIG_ARM64_CPU_PARK -int kexec_smp_send_park(void) +void smp_cross_send_stop(cpumask_t *mask) { - unsigned long cpu; - - if (WARN_ON(!kexec_in_progress)) { - pr_crit("%s called not in kexec progress.", __func__); - return -EPERM; - } - - if (mmap_cpu_park_mem() != 0) { - pr_info("no cpuparkmem, goto normal way."); - return -EPERM; - } - - local_irq_disable(); - - if (num_online_cpus() > 1) { - cpumask_t mask; - - cpumask_copy(&mask, cpu_online_mask); - cpumask_clear_cpu(smp_processor_id(), &mask); - - for_each_cpu(cpu, &mask) - install_cpu_park(cpu); - smp_cross_call(&mask, IPI_CPU_STOP); - - /* Wait for other CPUs to park */ - for_each_cpu(cpu, &mask) - cpu_wait_park(cpu); - pr_info("smp park other cpus done\n"); - } - - sdei_mask_local_cpu(); - - return 0; + smp_cross_call(mask, IPI_CPU_STOP); } -#endif #ifdef CONFIG_KEXEC_CORE void crash_smp_send_stop(void) diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S index 30c102978942748b995d51729ca3a4e0b82dddba..71f4b5f24d15fafecae3f7f039b8677b3a37df33 100644 --- a/arch/arm64/kernel/vmlinux.lds.S +++ b/arch/arm64/kernel/vmlinux.lds.S @@ -299,7 +299,7 @@ ASSERT(__hibernate_exit_text_end - (__hibernate_exit_text_start & ~(SZ_4K - 1)) <= SZ_4K, "Hibernate exit text too big or misaligned") #endif #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 -ASSERT((__entry_tramp_text_end - __entry_tramp_text_start) == PAGE_SIZE, +ASSERT((__entry_tramp_text_end - __entry_tramp_text_start) <= 3*PAGE_SIZE, "Entry trampoline text too big") #endif /* diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 3ae13ef0c980ae9ebe506d36c8b3f333f396fb92..d7745ff2eb59f4d52f3b448bb492644d5c4e9852 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -1416,7 +1416,8 @@ static int kvm_map_vectors(void) * !SV2 + HEL2 -> allocate one vector slot and use exec mapping * SV2 + HEL2 -> use hardened vectors and use exec mapping */ - if (cpus_have_const_cap(ARM64_SPECTRE_V2)) { + if (cpus_have_const_cap(ARM64_SPECTRE_V2) || + cpus_have_const_cap(ARM64_SPECTRE_BHB)) { __kvm_bp_vect_base = kvm_ksym_ref(__bp_harden_hyp_vecs); __kvm_bp_vect_base = kern_hyp_va(__kvm_bp_vect_base); } diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S index bcbead3746c6697730a9e46d6975a608d60dac69..bc06243cf422544ad6e27dffecb3682dcdde5e14 100644 --- a/arch/arm64/kvm/hyp/hyp-entry.S +++ b/arch/arm64/kvm/hyp/hyp-entry.S @@ -61,6 +61,10 @@ el1_sync: // Guest trapped into EL2 /* ARM_SMCCC_ARCH_WORKAROUND_2 handling */ eor w1, w1, #(ARM_SMCCC_ARCH_WORKAROUND_1 ^ \ ARM_SMCCC_ARCH_WORKAROUND_2) + cbz w1, wa_epilogue + + eor w1, w1, #(ARM_SMCCC_ARCH_WORKAROUND_2 ^ \ + ARM_SMCCC_ARCH_WORKAROUND_3) cbnz w1, el1_trap wa_epilogue: diff --git a/arch/arm64/kvm/hyp/smccc_wa.S b/arch/arm64/kvm/hyp/smccc_wa.S index b0441dbdf68bd012f84d9c4f25eac4824dddb417..533b0aa73256a07e92628014c3f723fe2730a541 100644 --- a/arch/arm64/kvm/hyp/smccc_wa.S +++ b/arch/arm64/kvm/hyp/smccc_wa.S @@ -30,3 +30,78 @@ SYM_DATA_START(__smccc_workaround_1_smc) 1: .org __smccc_workaround_1_smc + __SMCCC_WORKAROUND_1_SMC_SZ .org 1b SYM_DATA_END(__smccc_workaround_1_smc) + + .global __smccc_workaround_3_smc +SYM_DATA_START(__smccc_workaround_3_smc) + esb + sub sp, sp, #(8 * 4) + stp x2, x3, [sp, #(8 * 0)] + stp x0, x1, [sp, #(8 * 2)] + mov w0, #ARM_SMCCC_ARCH_WORKAROUND_3 + smc #0 + ldp x2, x3, [sp, #(8 * 0)] + ldp x0, x1, [sp, #(8 * 2)] + add sp, sp, #(8 * 4) +1: .org __smccc_workaround_3_smc + __SMCCC_WORKAROUND_3_SMC_SZ + .org 1b +SYM_DATA_END(__smccc_workaround_3_smc) + + .global __spectre_bhb_loop_k8 +SYM_DATA_START(__spectre_bhb_loop_k8) + esb + sub sp, sp, #(8 * 2) + stp x0, x1, [sp, #(8 * 0)] + mov x0, #8 +2: b . + 4 + subs x0, x0, #1 + b.ne 2b + dsb nsh + isb + ldp x0, x1, [sp, #(8 * 0)] + add sp, sp, #(8 * 2) +1: .org __spectre_bhb_loop_k8 + __SPECTRE_BHB_LOOP_SZ + .org 1b +SYM_DATA_END(__spectre_bhb_loop_k8) + + .global __spectre_bhb_loop_k24 +SYM_DATA_START(__spectre_bhb_loop_k24) + esb + sub sp, sp, #(8 * 2) + stp x0, x1, [sp, #(8 * 0)] + mov x0, #24 +2: b . + 4 + subs x0, x0, #1 + b.ne 2b + dsb nsh + isb + ldp x0, x1, [sp, #(8 * 0)] + add sp, sp, #(8 * 2) +1: .org __spectre_bhb_loop_k24 + __SPECTRE_BHB_LOOP_SZ + .org 1b +SYM_DATA_END(__spectre_bhb_loop_k24) + + .global __spectre_bhb_loop_k32 +SYM_DATA_START(__spectre_bhb_loop_k32) + esb + sub sp, sp, #(8 * 2) + stp x0, x1, [sp, #(8 * 0)] + mov x0, #32 +2: b . + 4 + subs x0, x0, #1 + b.ne 2b + dsb nsh + isb + ldp x0, x1, [sp, #(8 * 0)] + add sp, sp, #(8 * 2) +1: .org __spectre_bhb_loop_k32 + __SPECTRE_BHB_LOOP_SZ + .org 1b +SYM_DATA_END(__spectre_bhb_loop_k32) + + .global __spectre_bhb_clearbhb +SYM_DATA_START(__spectre_bhb_clearbhb) + esb + clearbhb + isb +1: .org __spectre_bhb_clearbhb + __SPECTRE_BHB_CLEARBHB_SZ + .org 1b +SYM_DATA_END(__spectre_bhb_clearbhb) diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c index 62546e20b25115ecac7a3c00f932cf8e8a60ad17..532e687f693660aa77cfc7b0d27aad3e26c0e244 100644 --- a/arch/arm64/kvm/hyp/vhe/switch.c +++ b/arch/arm64/kvm/hyp/vhe/switch.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include @@ -25,6 +26,7 @@ #include #include #include +#include const char __hyp_panic_string[] = "HYP panic:\nPS:%08llx PC:%016llx ESR:%08llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%p\n"; @@ -70,7 +72,7 @@ NOKPROBE_SYMBOL(__activate_traps); static void __deactivate_traps(struct kvm_vcpu *vcpu) { - extern char vectors[]; /* kernel exception vectors */ + const char *host_vectors = vectors; ___deactivate_traps(vcpu); @@ -84,7 +86,10 @@ static void __deactivate_traps(struct kvm_vcpu *vcpu) asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT)); write_sysreg(CPACR_EL1_DEFAULT, cpacr_el1); - write_sysreg(vectors, vbar_el1); + + if (!arm64_kernel_unmapped_at_el0()) + host_vectors = __this_cpu_read(this_cpu_vector); + write_sysreg(host_vectors, vbar_el1); } NOKPROBE_SYMBOL(__deactivate_traps); diff --git a/arch/arm64/kvm/hypercalls.c b/arch/arm64/kvm/hypercalls.c index 10d91047d38a5028f882fea6ffff193e6446bb6d..3ed08077fe15737a5fe1178e4fcf6cfb47fc2e56 100644 --- a/arch/arm64/kvm/hypercalls.c +++ b/arch/arm64/kvm/hypercalls.c @@ -58,6 +58,18 @@ int kvm_hvc_call_handler(struct kvm_vcpu *vcpu) break; } break; + case ARM_SMCCC_ARCH_WORKAROUND_3: + switch (arm64_get_spectre_bhb_state()) { + case SPECTRE_VULNERABLE: + break; + case SPECTRE_MITIGATED: + val = SMCCC_RET_SUCCESS; + break; + case SPECTRE_UNAFFECTED: + val = SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED; + break; + } + break; case ARM_SMCCC_HV_PV_TIME_FEATURES: val = SMCCC_RET_SUCCESS; break; diff --git a/arch/arm64/kvm/psci.c b/arch/arm64/kvm/psci.c index db4056ecccfda9319233e7be71c27d8d9db16c84..20ba5136ac3ddab57ff5858a2825efa41925de54 100644 --- a/arch/arm64/kvm/psci.c +++ b/arch/arm64/kvm/psci.c @@ -397,7 +397,7 @@ int kvm_psci_call(struct kvm_vcpu *vcpu) int kvm_arm_get_fw_num_regs(struct kvm_vcpu *vcpu) { - return 3; /* PSCI version and two workaround registers */ + return 4; /* PSCI version and three workaround registers */ } int kvm_arm_copy_fw_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) @@ -411,6 +411,9 @@ int kvm_arm_copy_fw_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) if (put_user(KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2, uindices++)) return -EFAULT; + if (put_user(KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3, uindices++)) + return -EFAULT; + return 0; } @@ -450,6 +453,17 @@ static int get_kernel_wa_level(u64 regid) case SPECTRE_VULNERABLE: return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL; } + break; + case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3: + switch (arm64_get_spectre_bhb_state()) { + case SPECTRE_VULNERABLE: + return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_NOT_AVAIL; + case SPECTRE_MITIGATED: + return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_AVAIL; + case SPECTRE_UNAFFECTED: + return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_NOT_REQUIRED; + } + return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_NOT_AVAIL; } return -EINVAL; @@ -466,6 +480,7 @@ int kvm_arm_get_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) break; case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1: case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2: + case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3: val = get_kernel_wa_level(reg->id) & KVM_REG_FEATURE_LEVEL_MASK; break; default: @@ -511,6 +526,7 @@ int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) } case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1: + case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3: if (val & ~KVM_REG_FEATURE_LEVEL_MASK) return -EINVAL; diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 2b862fe21712f293a0376e0f9518cf5e009af278..6c1bc564b91fbe1cd8d989a71a8d948cdcf81021 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -1548,7 +1548,7 @@ static const struct sys_reg_desc sys_reg_descs[] = { /* CRm=6 */ ID_SANITISED(ID_AA64ISAR0_EL1), ID_SANITISED(ID_AA64ISAR1_EL1), - ID_UNALLOCATED(6,2), + ID_SANITISED(ID_AA64ISAR2_EL1), ID_UNALLOCATED(6,3), ID_UNALLOCATED(6,4), ID_UNALLOCATED(6,5), diff --git a/arch/arm64/kvm/vgic/vgic-mmio.c b/arch/arm64/kvm/vgic/vgic-mmio.c index b2d73fc0d1ef48091ee428f915cad6054b3c3c21..9e1459534ce54e074bbc580362fac31c297044f6 100644 --- a/arch/arm64/kvm/vgic/vgic-mmio.c +++ b/arch/arm64/kvm/vgic/vgic-mmio.c @@ -248,6 +248,8 @@ unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu, IRQCHIP_STATE_PENDING, &val); WARN_RATELIMIT(err, "IRQ %d", irq->host_irq); + } else if (vgic_irq_is_mapped_level(irq)) { + val = vgic_get_phys_line_level(irq); } else { val = irq_is_pending(irq); } diff --git a/arch/arm64/kvm/vgic/vgic.c b/arch/arm64/kvm/vgic/vgic.c index 15b666200f0b4be8a70f10952f77ce6d1e1eff40..afb077b1cda6bdea2e2e972eac0989884a84bd25 100644 --- a/arch/arm64/kvm/vgic/vgic.c +++ b/arch/arm64/kvm/vgic/vgic.c @@ -106,7 +106,6 @@ struct vgic_irq *vgic_get_irq(struct kvm *kvm, struct kvm_vcpu *vcpu, if (intid >= VGIC_MIN_LPI) return vgic_get_lpi(kvm, intid); - WARN(1, "Looking up struct vgic_irq for reserved INTID"); return NULL; } diff --git a/arch/arm64/lib/copy_from_user.S b/arch/arm64/lib/copy_from_user.S index 2cf999e41d30e7ab29cc8a78547bf50a0d90881f..100de4e2d9ee2cd45ec44c0933751e214a616636 100644 --- a/arch/arm64/lib/copy_from_user.S +++ b/arch/arm64/lib/copy_from_user.S @@ -60,6 +60,17 @@ SYM_FUNC_START(__arch_copy_from_user) #include "copy_template.S" mov x0, #0 // Nothing to copy ret + +/* + * In feature CONFIG_ARM64_UCE_KERNEL_RECOVERY, if RAS error is triggered + * in copy_from_user(), RAS error is processed in do_sea() and + * copy_from_user_sea_fallback will be assigned to regs->pc, finally return + * here to continue processing. + */ + .global copy_from_user_sea_fallback +copy_from_user_sea_fallback: + sub x0, end, dst // bytes not copied + ret SYM_FUNC_END(__arch_copy_from_user) EXPORT_SYMBOL(__arch_copy_from_user) diff --git a/arch/arm64/mm/Makefile b/arch/arm64/mm/Makefile index 42e107d6da4f78b7cfe01077af60d148f4e38af7..68a32305cff96405f637b2d339ad929958714cc5 100644 --- a/arch/arm64/mm/Makefile +++ b/arch/arm64/mm/Makefile @@ -11,7 +11,10 @@ obj-$(CONFIG_DEBUG_VIRTUAL) += physaddr.o obj-$(CONFIG_ARM64_MTE) += mteswap.o KASAN_SANITIZE_physaddr.o += n +obj-$(CONFIG_ARM64_UCE_KERNEL_RECOVERY) += uce_kernel_recovery.o + obj-$(CONFIG_KASAN) += kasan_init.o KASAN_SANITIZE_kasan_init.o := n obj-$(CONFIG_ARM64_PMEM_RESERVE) += pmem_reserve.o +obj-$(CONFIG_QUICK_KEXEC) += quick_kexec.o diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 3fc5aceb72eb866b98107a9f75b7f2b522e95e4d..7da2f8118b35819b610918fa8f9990a5866d85ed 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -653,6 +653,10 @@ static int do_sea(unsigned long addr, unsigned int esr, struct pt_regs *regs) siaddr = NULL; else siaddr = (void __user *)addr; + + if (arm64_process_kernel_sea(addr, esr, regs, inf->sig, inf->code, siaddr)) + return 0; + arm64_notify_die(inf->name, regs, inf->sig, inf->code, siaddr, esr); return 0; diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 5ab9dd7d55d94977d8eff11e0072e8c0e9a5d270..5023c7e1f7540f153d32f8d107be79939f45df67 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -44,8 +44,9 @@ #include #include #include +#include -#include "pmem_reserve.h" +#include "internal.h" /* * We need to be able to catch inadvertent references to memstart_addr @@ -62,13 +63,43 @@ EXPORT_SYMBOL(memstart_addr); * unless restricted on specific platforms (e.g. 30-bit on Raspberry Pi 4). * In such case, ZONE_DMA32 covers the rest of the 32-bit addressable memory, * otherwise it is empty. + * + * Memory reservation for crash kernel either done early or deferred + * depending on DMA memory zones configs (ZONE_DMA) -- + * + * In absence of ZONE_DMA configs arm64_dma_phys_limit initialized + * here instead of max_zone_phys(). This lets early reservation of + * crash kernel memory which has a dependency on arm64_dma_phys_limit. + * Reserving memory early for crash kernel allows linear creation of block + * mappings (greater than page-granularity) for all the memory bank rangs. + * In this scheme a comparatively quicker boot is observed. + * + * If ZONE_DMA configs are defined, crash kernel memory reservation + * is delayed until DMA zone memory range size initilazation performed in + * zone_sizes_init(). The defer is necessary to steer clear of DMA zone + * memory range to avoid overlap allocation. So crash kernel memory boundaries + * are not known when mapping all bank memory ranges, which otherwise means + * not possible to exclude crash kernel range from creating block mappings + * so page-granularity mappings are created for the entire memory range. + * Hence a slightly slower boot is observed. + * + * Note: Page-granularity mapppings are necessary for crash kernel memory + * range for shrinking its size via /sys/kernel/kexec_crash_size interface. */ -phys_addr_t arm64_dma_phys_limit __ro_after_init; +#if IS_ENABLED(CONFIG_ZONE_DMA) || IS_ENABLED(CONFIG_ZONE_DMA32) +phys_addr_t __ro_after_init arm64_dma_phys_limit; +#else +phys_addr_t __ro_after_init arm64_dma_phys_limit = PHYS_MASK + 1; +#endif #ifndef CONFIG_KEXEC_CORE static void __init reserve_crashkernel(void) { } + +static void __init reserve_crashkernel_high(void) +{ +} #endif /* @@ -131,45 +162,6 @@ static void __init reserve_elfcorehdr(void) } #endif /* CONFIG_CRASH_DUMP */ -#ifdef CONFIG_QUICK_KEXEC -static int __init parse_quick_kexec(char *p) -{ - if (!p) - return 0; - - quick_kexec_res.end = PAGE_ALIGN(memparse(p, NULL)); - - return 0; -} -early_param("quickkexec", parse_quick_kexec); - -static void __init reserve_quick_kexec(void) -{ - unsigned long long mem_start, mem_len; - - mem_len = quick_kexec_res.end; - if (mem_len == 0) - return; - - /* Current arm64 boot protocol requires 2MB alignment */ - mem_start = memblock_find_in_range(0, arm64_dma_phys_limit, - mem_len, SZ_2M); - if (mem_start == 0) { - pr_warn("cannot allocate quick kexec mem (size:0x%llx)\n", - mem_len); - quick_kexec_res.end = 0; - return; - } - - memblock_reserve(mem_start, mem_len); - pr_info("quick kexec mem reserved: 0x%016llx - 0x%016llx (%lld MB)\n", - mem_start, mem_start + mem_len, mem_len >> 20); - - quick_kexec_res.start = mem_start; - quick_kexec_res.end = mem_start + mem_len - 1; -} -#endif - /* * Return the maximum physical address for a zone accessible by the given bits * limit. If DRAM starts above 32-bit, expand the zone to the maximum @@ -207,8 +199,6 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max) if (!arm64_dma_phys_limit) arm64_dma_phys_limit = dma32_phys_limit; #endif - if (!arm64_dma_phys_limit) - arm64_dma_phys_limit = PHYS_MASK + 1; max_zone_pfns[ZONE_NORMAL] = max; free_area_init(max_zone_pfns); @@ -307,56 +297,55 @@ static void __init fdt_enforce_memory_region(void) memblock_add(usable_rgns[1].base, usable_rgns[1].size); } -#ifdef CONFIG_ARM64_CPU_PARK -struct cpu_park_info park_info = { - .start = 0, - .len = PARK_SECTION_SIZE * NR_CPUS, - .start_v = 0, -}; +#define MAX_RES_REGIONS 32 -static int __init parse_park_mem(char *p) -{ - if (!p) - return 0; +static struct memblock_region mbk_memmap_regions[MAX_RES_REGIONS] __initdata_memblock; +static int mbk_memmap_cnt __initdata; - park_info.start = PAGE_ALIGN(memparse(p, NULL)); - if (park_info.start == 0) - pr_info("cpu park mem params[%s]", p); +static void __init setup_mbk_memmap_regions(phys_addr_t base, phys_addr_t size) +{ + if (mbk_memmap_cnt >= MAX_RES_REGIONS) { + pr_err("Too many memmap specified, exceed %d\n", MAX_RES_REGIONS); + return; + } - return 0; + mbk_memmap_regions[mbk_memmap_cnt].base = base; + mbk_memmap_regions[mbk_memmap_cnt].size = size; + mbk_memmap_cnt++; } -early_param("cpuparkmem", parse_park_mem); -static int __init reserve_park_mem(void) +static void __init reserve_memmap_regions(void) { - if (park_info.start == 0 || park_info.len == 0) - return 0; + phys_addr_t base, size; + int i; - park_info.start = PAGE_ALIGN(park_info.start); - park_info.len = PAGE_ALIGN(park_info.len); + for (i = 0; i < mbk_memmap_cnt; i++) { + base = mbk_memmap_regions[i].base; + size = mbk_memmap_regions[i].size; - if (!memblock_is_region_memory(park_info.start, park_info.len)) { - pr_warn("cannot reserve park mem: region is not memory!"); - goto out; - } + if (!memblock_is_region_memory(base, size)) { + pr_warn("memmap reserve: 0x%08llx - 0x%08llx is not a memory region - ignore\n", + base, base + size); + continue; + } - if (memblock_is_region_reserved(park_info.start, park_info.len)) { - pr_warn("cannot reserve park mem: region overlaps reserved memory!"); - goto out; - } + if (memblock_is_region_reserved(base, size)) { + pr_warn("memmap reserve: 0x%08llx - 0x%08llx overlaps in-use memory region - ignore\n", + base, base + size); + continue; + } - memblock_remove(park_info.start, park_info.len); - pr_info("cpu park mem reserved: 0x%016lx - 0x%016lx (%ld MB)", - park_info.start, park_info.start + park_info.len, - park_info.len >> 20); + if (memblock_reserve(base, size)) { + pr_warn("memmap reserve: 0x%08llx - 0x%08llx failed\n", + base, base + size); + continue; + } - return 0; -out: - park_info.start = 0; - park_info.len = 0; - return -EINVAL; + pr_info("memmap reserved: 0x%08llx - 0x%08llx (%lld MB)", + base, base + size, size >> 20); + memblock_mark_memmap(base, size); + } } -#endif static int need_remove_real_memblock __initdata; @@ -394,8 +383,7 @@ static int __init parse_memmap_one(char *p) memblock_add(start_at, mem_size); } else if (*p == '$') { start_at = memparse(p + 1, &p); - memblock_reserve(start_at, mem_size); - memblock_mark_memmap(start_at, mem_size); + setup_mbk_memmap_regions(start_at, mem_size); } else if (*p == '!') { start_at = memparse(p + 1, &p); setup_reserve_pmem(start_at, mem_size); @@ -530,8 +518,13 @@ void __init arm64_memblock_init(void) early_init_fdt_scan_reserved_mem(); + reserve_crashkernel_high(); + reserve_elfcorehdr(); + if (!IS_ENABLED(CONFIG_ZONE_DMA) && !IS_ENABLED(CONFIG_ZONE_DMA32)) + reserve_crashkernel(); + high_memory = __va(memblock_end_of_DRAM() - 1) + 1; } @@ -581,19 +574,18 @@ void __init bootmem_init(void) * So reserve park memory firstly is better, but it may cause * crashkernel or quickkexec reserving failed. */ -#ifdef CONFIG_ARM64_CPU_PARK reserve_park_mem(); -#endif /* * request_standard_resources() depends on crashkernel's memory being * reserved, so do it here. */ - reserve_crashkernel(); + if (IS_ENABLED(CONFIG_ZONE_DMA) || IS_ENABLED(CONFIG_ZONE_DMA32)) + reserve_crashkernel(); -#ifdef CONFIG_QUICK_KEXEC reserve_quick_kexec(); -#endif + + reserve_memmap_regions(); reserve_pmem(); diff --git a/arch/arm64/mm/pmem_reserve.h b/arch/arm64/mm/internal.h similarity index 53% rename from arch/arm64/mm/pmem_reserve.h rename to arch/arm64/mm/internal.h index d143198c9696e6b22d10ad65af9c0cf87a5411ae..9b8e20d87172b66a4d3947f3f4b7617f36f342fa 100644 --- a/arch/arm64/mm/pmem_reserve.h +++ b/arch/arm64/mm/internal.h @@ -1,5 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ARM64_MM_INTERNAL_H +#define __ARM64_MM_INTERNAL_H + #include #ifdef CONFIG_ARM64_PMEM_RESERVE @@ -11,3 +14,12 @@ static inline void __init setup_reserve_pmem(u64 start, u64 size) {} static inline void __init reserve_pmem(void) {} static inline void __init request_pmem_res_resource(void) {} #endif +#ifdef CONFIG_QUICK_KEXEC +void __init reserve_quick_kexec(void); +void __init request_quick_kexec_res(struct resource *res); +#else +static inline void __init reserve_quick_kexec(void) {} +static inline void __init request_quick_kexec_res(struct resource *res) {} +#endif + +#endif /* ifndef _ARM64_MM_INTERNAL_H */ diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c index b24e43d20667ecd0ae7cc768503c40b9294c1564..02051d4074c41c555329823b62bda34e2b8fa9b1 100644 --- a/arch/arm64/mm/kasan_init.c +++ b/arch/arm64/mm/kasan_init.c @@ -212,15 +212,18 @@ void __init kasan_init(void) { u64 kimg_shadow_start, kimg_shadow_end; u64 mod_shadow_start, mod_shadow_end; + u64 vmalloc_shadow_end; phys_addr_t pa_start, pa_end; u64 i; - kimg_shadow_start = (u64)kasan_mem_to_shadow(_text) & PAGE_MASK; - kimg_shadow_end = PAGE_ALIGN((u64)kasan_mem_to_shadow(_end)); + kimg_shadow_start = (u64)kasan_mem_to_shadow(KERNEL_START) & PAGE_MASK; + kimg_shadow_end = PAGE_ALIGN((u64)kasan_mem_to_shadow(KERNEL_END)); mod_shadow_start = (u64)kasan_mem_to_shadow((void *)MODULES_VADDR); mod_shadow_end = (u64)kasan_mem_to_shadow((void *)MODULES_END); + vmalloc_shadow_end = (u64)kasan_mem_to_shadow((void *)VMALLOC_END); + /* * We are going to perform proper setup of shadow memory. * At first we should unmap early shadow (clear_pgds() call below). @@ -235,16 +238,22 @@ void __init kasan_init(void) clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END); kasan_map_populate(kimg_shadow_start, kimg_shadow_end, - early_pfn_to_nid(virt_to_pfn(lm_alias(_text)))); + early_pfn_to_nid(virt_to_pfn(lm_alias(KERNEL_START)))); kasan_populate_early_shadow(kasan_mem_to_shadow((void *)PAGE_END), (void *)mod_shadow_start); - kasan_populate_early_shadow((void *)kimg_shadow_end, - (void *)KASAN_SHADOW_END); - if (kimg_shadow_start > mod_shadow_end) - kasan_populate_early_shadow((void *)mod_shadow_end, - (void *)kimg_shadow_start); + if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) { + BUILD_BUG_ON(VMALLOC_START != MODULES_END); + kasan_populate_early_shadow((void *)vmalloc_shadow_end, + (void *)KASAN_SHADOW_END); + } else { + kasan_populate_early_shadow((void *)kimg_shadow_end, + (void *)KASAN_SHADOW_END); + if (kimg_shadow_start > mod_shadow_end) + kasan_populate_early_shadow((void *)mod_shadow_end, + (void *)kimg_shadow_start); + } for_each_mem_range(i, &pa_start, &pa_end) { void *start = (void *)__phys_to_virt(pa_start); diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index a318b702805a5e1dcb5458b6d940772def637479..e767653540407334bc782a5331578933297ca8ad 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include @@ -61,6 +62,7 @@ static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss __maybe_unused; static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss __maybe_unused; static DEFINE_SPINLOCK(swapper_pgdir_lock); +static DEFINE_MUTEX(fixmap_lock); void set_swapper_pgd(pgd_t *pgdp, pgd_t pgd) { @@ -314,6 +316,12 @@ static void alloc_init_pud(pgd_t *pgdp, unsigned long addr, unsigned long end, } BUG_ON(p4d_bad(p4d)); + /* + * No need for locking during early boot. And it doesn't work as + * expected with KASLR enabled. + */ + if (system_state != SYSTEM_BOOTING) + mutex_lock(&fixmap_lock); pudp = pud_set_fixmap_offset(p4dp, addr); do { pud_t old_pud = READ_ONCE(*pudp); @@ -344,6 +352,8 @@ static void alloc_init_pud(pgd_t *pgdp, unsigned long addr, unsigned long end, } while (pudp++, addr = next, addr != end); pud_clear_fixmap(); + if (system_state != SYSTEM_BOOTING) + mutex_unlock(&fixmap_lock); } static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys, @@ -469,32 +479,27 @@ void __init mark_linear_text_alias_ro(void) PAGE_KERNEL_RO); } -static bool crash_mem_map __initdata; - -static int __init enable_crash_mem_map(char *arg) -{ - /* - * Proper parameter parsing is done by reserve_crashkernel(). We only - * need to know if the linear map has to avoid block mappings so that - * the crashkernel reservations can be unmapped later. - */ - crash_mem_map = true; - - return 0; -} -early_param("crashkernel", enable_crash_mem_map); - static void __init map_mem(pgd_t *pgdp) { phys_addr_t kernel_start = __pa_symbol(_text); phys_addr_t kernel_end = __pa_symbol(__init_begin); phys_addr_t start, end; - int flags = 0; + int flags = 0, eflags = 0; u64 i; - if (rodata_full || crash_mem_map || debug_pagealloc_enabled()) + if (rodata_full || debug_pagealloc_enabled()) flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; +#ifdef CONFIG_KFENCE + /* + * KFENCE requires linear map to be mapped at page granularity, so + * temporarily skip mapping for __kfence_pool in the following + * for-loop + */ + if (__kfence_pool) + memblock_mark_nomap(__pa(__kfence_pool), KFENCE_POOL_SIZE); +#endif + /* * Take care not to create a writable alias for the * read-only text and rodata sections of the kernel image. @@ -503,17 +508,44 @@ static void __init map_mem(pgd_t *pgdp) */ memblock_mark_nomap(kernel_start, kernel_end - kernel_start); +#ifdef CONFIG_KEXEC_CORE + if (crash_low_mem_page_map) + eflags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; + + if (crashk_res.end) + memblock_mark_nomap(crashk_res.start, + resource_size(&crashk_res)); +#endif + /* map all the memory banks */ for_each_mem_range(i, &start, &end) { if (start >= end) break; + +#ifdef CONFIG_KEXEC_CORE + if (eflags && (end >= SZ_4G)) { + /* + * The memory block cross the 4G boundary. + * Forcibly use page-level mappings for memory under 4G. + */ + if (start < SZ_4G) { + __map_memblock(pgdp, start, SZ_4G - 1, + pgprot_tagged(PAGE_KERNEL), flags | eflags); + start = SZ_4G; + } + + /* Page-level mappings is not mandatory for memory above 4G */ + eflags = 0; + } +#endif + /* * The linear map must allow allocation tags reading/writing * if MTE is present. Otherwise, it has the same attributes as * PAGE_KERNEL. */ __map_memblock(pgdp, start, end, pgprot_tagged(PAGE_KERNEL), - flags); + flags | eflags); } /* @@ -529,6 +561,35 @@ static void __init map_mem(pgd_t *pgdp) __map_memblock(pgdp, kernel_start, kernel_end, PAGE_KERNEL, NO_CONT_MAPPINGS); memblock_clear_nomap(kernel_start, kernel_end - kernel_start); + +#ifdef CONFIG_KFENCE + /* + * Map the __kfence_pool at page granularity now. + */ + if (__kfence_pool) { + __map_memblock(pgdp, __pa(__kfence_pool), + __pa(__kfence_pool + KFENCE_POOL_SIZE), + pgprot_tagged(PAGE_KERNEL), + NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS); + memblock_clear_nomap(__pa(__kfence_pool), KFENCE_POOL_SIZE); + } +#endif + + /* + * Use page-level mappings here so that we can shrink the region + * in page granularity and put back unused memory to buddy system + * through /sys/kernel/kexec_crash_size interface. + */ +#ifdef CONFIG_KEXEC_CORE + if (crashk_res.end) { + __map_memblock(pgdp, crashk_res.start, + crashk_res.end + 1, + PAGE_KERNEL, + NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS); + memblock_clear_nomap(crashk_res.start, + resource_size(&crashk_res)); + } +#endif } void mark_rodata_ro(void) @@ -592,6 +653,8 @@ early_param("rodata", parse_rodata); #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 static int __init map_entry_trampoline(void) { + int i; + pgprot_t prot = rodata_enabled ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC; phys_addr_t pa_start = __pa_symbol(__entry_tramp_text_start); @@ -600,11 +663,15 @@ static int __init map_entry_trampoline(void) /* Map only the text into the trampoline page table */ memset(tramp_pg_dir, 0, PGD_SIZE); - __create_pgd_mapping(tramp_pg_dir, pa_start, TRAMP_VALIAS, PAGE_SIZE, - prot, __pgd_pgtable_alloc, 0); + __create_pgd_mapping(tramp_pg_dir, pa_start, TRAMP_VALIAS, + entry_tramp_text_size(), prot, + __pgd_pgtable_alloc, NO_BLOCK_MAPPINGS); /* Map both the text and data into the kernel page table */ - __set_fixmap(FIX_ENTRY_TRAMP_TEXT, pa_start, prot); + for (i = 0; i < DIV_ROUND_UP(entry_tramp_text_size(), PAGE_SIZE); i++) + __set_fixmap(FIX_ENTRY_TRAMP_TEXT1 - i, + pa_start + i * PAGE_SIZE, prot); + if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) { extern char __entry_tramp_data_start[]; @@ -1453,10 +1520,6 @@ int arch_add_memory(int nid, u64 start, u64 size, } - /* - * KFENCE requires linear map to be mapped at page granularity, so that - * it is possible to protect/unprotect single pages in the KFENCE pool. - */ if (rodata_full || debug_pagealloc_enabled()) flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; diff --git a/arch/arm64/mm/quick_kexec.c b/arch/arm64/mm/quick_kexec.c new file mode 100644 index 0000000000000000000000000000000000000000..fb68346f45a93e9e65f3cbff4e516c3fdf89ba1b --- /dev/null +++ b/arch/arm64/mm/quick_kexec.c @@ -0,0 +1,53 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#define pr_fmt(fmt) "quick_kexec: " fmt + +#include +#include +#include +#include + +static int __init parse_quick_kexec(char *p) +{ + if (!p) + return 0; + + quick_kexec_res.end = PAGE_ALIGN(memparse(p, NULL)); + + return 0; +} +early_param("quickkexec", parse_quick_kexec); + +void __init reserve_quick_kexec(void) +{ + unsigned long long mem_start, mem_len; + + mem_len = quick_kexec_res.end; + if (mem_len == 0) + return; + + /* Current arm64 boot protocol requires 2MB alignment */ + mem_start = memblock_find_in_range(0, arm64_dma_phys_limit, + mem_len, SZ_2M); + if (mem_start == 0) { + pr_warn("cannot allocate quick kexec mem (size:0x%llx)\n", + mem_len); + quick_kexec_res.end = 0; + return; + } + + memblock_reserve(mem_start, mem_len); + pr_info("quick kexec mem reserved: 0x%016llx - 0x%016llx (%lld MB)\n", + mem_start, mem_start + mem_len, mem_len >> 20); + + quick_kexec_res.start = mem_start; + quick_kexec_res.end = mem_start + mem_len - 1; +} + +void __init request_quick_kexec_res(struct resource *res) +{ + if (quick_kexec_res.end && + quick_kexec_res.start >= res->start && + quick_kexec_res.end <= res->end) + request_resource(res, &quick_kexec_res); +} diff --git a/arch/arm64/mm/uce_kernel_recovery.c b/arch/arm64/mm/uce_kernel_recovery.c new file mode 100644 index 0000000000000000000000000000000000000000..c654dc6c4dfde270b906b15c59c8ca33323c020f --- /dev/null +++ b/arch/arm64/mm/uce_kernel_recovery.c @@ -0,0 +1,198 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#define pr_fmt(fmt) "ARM64 UCE: " fmt + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +struct uce_kernel_recovery_info { + int (*fn)(void); + const char *name; + unsigned long addr; + unsigned long size; +}; + +int copy_from_user_sea_fallback(void); + +static int kernel_access_sea_recovery; +static int kernel_uce_recovery_sysctl_max = 7; + +#define UCE_KER_REC_NUM ARRAY_SIZE(reco_info) +static struct uce_kernel_recovery_info reco_info[] = { + {NULL, NULL, 0, 0}, /* reserved */ + {NULL, NULL, 0, 0}, /* reserved */ + {copy_from_user_sea_fallback, "__arch_copy_from_user", (unsigned long)__arch_copy_from_user, 0}, +}; + +static struct ctl_table uce_kernel_recovery_ctl_table[] = { + { + .procname = "uce_kernel_recovery", + .data = &kernel_access_sea_recovery, + .maxlen = sizeof(kernel_access_sea_recovery), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = SYSCTL_ZERO, + .extra2 = &kernel_uce_recovery_sysctl_max, + }, + { } +}; + +static int __init kernel_access_sea_recovery_init(void) +{ + unsigned long addr, size, offset; + unsigned int i; + + for (i = 0; i < UCE_KER_REC_NUM; i++) { + addr = reco_info[i].addr; + + if (!addr) + continue; + + if (!kallsyms_lookup_size_offset(addr, &size, &offset)) { + pr_info("symbol %s lookup addr fail.\n", + reco_info[i].name); + size = 0; + } + + reco_info[i].size = size; + } + + if (!register_sysctl("kernel", uce_kernel_recovery_ctl_table)) + pr_err("register sysctl table fail.\n"); + + return 1; +} +fs_initcall(kernel_access_sea_recovery_init); + +static int __init enable_kernel_access_sea_recovery(char *str) +{ + int max = (1 << UCE_KER_REC_NUM) - 1; + int val; + + if (kstrtoint(str, 0, &val)) + return -EINVAL; + + if (val < 0 || val > max) { + pr_info("invalid uce_kernel_recovery value %d", val); + return -EINVAL; + } + + kernel_access_sea_recovery = val; + + return 1; +} +__setup("uce_kernel_recovery=", enable_kernel_access_sea_recovery); + +/* + * what is kernel recovery? + * If the process's private data is accessed in the kernel mode to trigger + * special sea fault, it can controlled by killing the process and isolating + * the failure pages instead of die. + */ +static int is_in_kernel_recovery(unsigned int esr, struct pt_regs *regs) +{ + /* + * target insn: ldp-pre, ldp-post, ldp-offset, + * ldr-64bit-pre/pose, ldr-32bit-pre/post, ldrb-pre/post, ldrh-pre/post + */ + u32 target_insn[] = {0xa8c, 0xa9c, 0xa94, 0xf84, 0x784, 0x384, 0xb84}; + void *pc = (void *)instruction_pointer(regs); + struct uce_kernel_recovery_info *info; + bool insn_match = false; + u32 insn; + int i; + + pr_emerg("%s-%d, kernel recovery: 0x%x, esr: 0x%08x -- %s, %pS\n", + current->comm, current->pid, kernel_access_sea_recovery, esr, + esr_get_class_string(esr), pc); + + if (aarch64_insn_read((void *)pc, &insn)) { + pr_emerg("insn read fail.\n"); + return -EFAULT; + } + + /* + * We process special ESR: + * EC : 0b100101 Data Abort taken without a change in Exception level. + * DFSC : 0b010000 Synchronous External abort, not on translation table + * walk or hardware update of translation table. + * eg: 0x96000610 + */ + if (ESR_ELx_EC(esr) != ESR_ELx_EC_DABT_CUR || + (esr & ESR_ELx_FSC) != ESR_ELx_FSC_EXTABT) { + pr_emerg("esr not match.\n"); + return -EINVAL; + } + + insn = (insn >> 20) & 0xffc; + for (i = 0; i < ARRAY_SIZE(target_insn); i++) { + if (insn == target_insn[i]) { + insn_match = true; + break; + } + } + + if (!insn_match) { + pr_emerg("insn 0x%x is not match.\n", insn); + return -EINVAL; + } + + for (i = 0; i < UCE_KER_REC_NUM; i++) { + if (!((kernel_access_sea_recovery >> i) & 0x1)) + continue; + + info = &reco_info[i]; + if (info->fn && regs->pc >= info->addr && + regs->pc < (info->addr + info->size)) { + pr_emerg("total match %s success.\n", info->name); + return i; + } + } + + pr_emerg("scene is not match, kernel recovery %d.\n", + kernel_access_sea_recovery); + return -EINVAL; +} + +bool arm64_process_kernel_sea(unsigned long addr, unsigned int esr, + struct pt_regs *regs, int sig, + int code, void __user *siaddr) +{ + int idx; + + if (user_mode(regs) || apei_claim_sea(regs) < 0) + return false; + + if (!current->mm || !kernel_access_sea_recovery) { + pr_emerg("kernel recovery %d, %s-%d is %s-thread.\n", + kernel_access_sea_recovery, + current->comm, current->pid, + (current->mm) ? "user" : "kernel"); + + return false; + } + + idx = is_in_kernel_recovery(esr, regs); + if (idx < 0 || idx >= UCE_KER_REC_NUM) { + pr_emerg("Uncorrected hardware memory error (sence not match or sence switch is off) in kernel-access\n"); + return false; + } + + current->thread.fault_address = 0; + current->thread.fault_code = esr; + regs->pc = (unsigned long)reco_info[idx].fn; + + arm64_force_sig_fault(sig, code, siaddr, + "Uncorrected hardware memory use with kernel recovery in kernel-access\n"); + + return true; +} diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c index 064577ff9ff591983209225ac7d89e584de5394d..9c6cab71ba98b4c859f918ea10cd18f29ab84f7b 100644 --- a/arch/arm64/net/bpf_jit_comp.c +++ b/arch/arm64/net/bpf_jit_comp.c @@ -1040,15 +1040,18 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) goto out_off; } - /* 1. Initial fake pass to compute ctx->idx. */ - - /* Fake pass to fill in ctx->offset. */ - if (build_body(&ctx, extra_pass)) { + /* + * 1. Initial fake pass to compute ctx->idx and ctx->offset. + * + * BPF line info needs ctx->offset[i] to be the offset of + * instruction[i] in jited image, so build prologue first. + */ + if (build_prologue(&ctx, was_classic)) { prog = orig_prog; goto out_off; } - if (build_prologue(&ctx, was_classic)) { + if (build_body(&ctx, extra_pass)) { prog = orig_prog; goto out_off; } @@ -1121,6 +1124,11 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) prog->jited_len = prog_size; if (!prog->is_func || extra_pass) { + int i; + + /* offset[prog->len] is the size of program */ + for (i = 0; i <= prog->len; i++) + ctx.offset[i] *= AARCH64_INSN_SIZE; bpf_prog_fill_jited_linfo(prog, ctx.offset + 1); out_off: kfree(ctx.offset); diff --git a/arch/csky/kernel/perf_callchain.c b/arch/csky/kernel/perf_callchain.c index ab55e98ee8f62dc3f98f4d6529d7c05775670878..75e1f9df5f60449c6ec65c3e97ed3bd5ca2b07e2 100644 --- a/arch/csky/kernel/perf_callchain.c +++ b/arch/csky/kernel/perf_callchain.c @@ -49,7 +49,7 @@ static unsigned long user_backtrace(struct perf_callchain_entry_ctx *entry, { struct stackframe buftail; unsigned long lr = 0; - unsigned long *user_frame_tail = (unsigned long *)fp; + unsigned long __user *user_frame_tail = (unsigned long __user *)fp; /* Check accessibility of one struct frame_tail beyond */ if (!access_ok(user_frame_tail, sizeof(buftail))) @@ -86,10 +86,11 @@ static unsigned long user_backtrace(struct perf_callchain_entry_ctx *entry, void perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) { + struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs(); unsigned long fp = 0; /* C-SKY does not support virtualization. */ - if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) + if (guest_cbs && guest_cbs->is_in_guest()) return; fp = regs->regs[4]; @@ -110,10 +111,11 @@ void perf_callchain_user(struct perf_callchain_entry_ctx *entry, void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) { + struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs(); struct stackframe fr; /* C-SKY does not support virtualization. */ - if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { + if (guest_cbs && guest_cbs->is_in_guest()) { pr_warn("C-SKY does not support perf in guest mode!"); return; } diff --git a/arch/csky/kernel/signal.c b/arch/csky/kernel/signal.c index 0ca49b5e3dd378145dda5f3fbbf52b5587ed0275..243228b0aa075e4d9862905afeb88824f924f265 100644 --- a/arch/csky/kernel/signal.c +++ b/arch/csky/kernel/signal.c @@ -136,7 +136,7 @@ static inline void __user *get_sigframe(struct ksignal *ksig, static int setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs) { - struct rt_sigframe *frame; + struct rt_sigframe __user *frame; int err = 0; struct csky_vdso *vdso = current->mm->context.vdso; diff --git a/arch/m68k/coldfire/device.c b/arch/m68k/coldfire/device.c index 59f7dfe50a4d011b437c69c4db5ed440cbe11545..a055616942a1eee821fb7207f9558856fbac8896 100644 --- a/arch/m68k/coldfire/device.c +++ b/arch/m68k/coldfire/device.c @@ -480,7 +480,7 @@ static struct platform_device mcf_i2c5 = { #endif /* MCFI2C_BASE5 */ #endif /* IS_ENABLED(CONFIG_I2C_IMX) */ -#if IS_ENABLED(CONFIG_MCF_EDMA) +#ifdef MCFEDMA_BASE static const struct dma_slave_map mcf_edma_map[] = { { "dreq0", "rx-tx", MCF_EDMA_FILTER_PARAM(0) }, @@ -552,7 +552,7 @@ static struct platform_device mcf_edma = { .platform_data = &mcf_edma_data, } }; -#endif /* IS_ENABLED(CONFIG_MCF_EDMA) */ +#endif /* MCFEDMA_BASE */ #ifdef MCFSDHC_BASE static struct mcf_esdhc_platform_data mcf_esdhc_data = { @@ -610,7 +610,7 @@ static struct platform_device *mcf_devices[] __initdata = { &mcf_i2c5, #endif #endif -#if IS_ENABLED(CONFIG_MCF_EDMA) +#ifdef MCFEDMA_BASE &mcf_edma, #endif #ifdef MCFSDHC_BASE diff --git a/arch/microblaze/include/asm/uaccess.h b/arch/microblaze/include/asm/uaccess.h index 304b04ffea2faf4104cab64cfba5036ad010abf3..7c5d92e2915ca585ec076644770b22986a477aee 100644 --- a/arch/microblaze/include/asm/uaccess.h +++ b/arch/microblaze/include/asm/uaccess.h @@ -167,27 +167,27 @@ extern long __user_bad(void); #define __get_user(x, ptr) \ ({ \ - unsigned long __gu_val = 0; \ long __gu_err; \ switch (sizeof(*(ptr))) { \ case 1: \ - __get_user_asm("lbu", (ptr), __gu_val, __gu_err); \ + __get_user_asm("lbu", (ptr), x, __gu_err); \ break; \ case 2: \ - __get_user_asm("lhu", (ptr), __gu_val, __gu_err); \ + __get_user_asm("lhu", (ptr), x, __gu_err); \ break; \ case 4: \ - __get_user_asm("lw", (ptr), __gu_val, __gu_err); \ + __get_user_asm("lw", (ptr), x, __gu_err); \ break; \ - case 8: \ - __gu_err = __copy_from_user(&__gu_val, ptr, 8); \ - if (__gu_err) \ - __gu_err = -EFAULT; \ + case 8: { \ + __u64 __x = 0; \ + __gu_err = raw_copy_from_user(&__x, ptr, 8) ? \ + -EFAULT : 0; \ + (x) = (typeof(x))(typeof((x) - (x)))__x; \ break; \ + } \ default: \ /* __gu_val = 0; __gu_err = -EINVAL;*/ __gu_err = __user_bad();\ } \ - x = (__force __typeof__(*(ptr))) __gu_val; \ __gu_err; \ }) diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index bd4bb55aeab050e2090ce0247092e1f7bba71924..896a29df1a6d63a59adc57d570813ff9a7b70067 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -1982,6 +1982,10 @@ config SYS_HAS_CPU_MIPS64_R1 config SYS_HAS_CPU_MIPS64_R2 bool +config SYS_HAS_CPU_MIPS64_R5 + bool + select ARCH_HAS_SYNC_DMA_FOR_CPU if DMA_NONCOHERENT + config SYS_HAS_CPU_MIPS64_R6 bool select ARCH_HAS_SYNC_DMA_FOR_CPU if DMA_NONCOHERENT @@ -2143,7 +2147,7 @@ config CPU_SUPPORTS_ADDRWINCFG bool config CPU_SUPPORTS_HUGEPAGES bool - depends on !(32BIT && (ARCH_PHYS_ADDR_T_64BIT || EVA)) + depends on !(32BIT && (PHYS_ADDR_T_64BIT || EVA)) config MIPS_PGD_C0_CONTEXT bool default y if 64BIT && (CPU_MIPSR2 || CPU_MIPSR6) && !CPU_XLP diff --git a/arch/mips/bcm63xx/clk.c b/arch/mips/bcm63xx/clk.c index aba6e2d6a736c26a0f2e9c23d28bc95a4ee0446a..dcfa0ea912fe1633a10d0acbb87fe1cb85612ee4 100644 --- a/arch/mips/bcm63xx/clk.c +++ b/arch/mips/bcm63xx/clk.c @@ -387,6 +387,12 @@ struct clk *clk_get_parent(struct clk *clk) } EXPORT_SYMBOL(clk_get_parent); +int clk_set_parent(struct clk *clk, struct clk *parent) +{ + return 0; +} +EXPORT_SYMBOL(clk_set_parent); + unsigned long clk_get_rate(struct clk *clk) { if (!clk) diff --git a/arch/mips/boot/dts/ingenic/jz4780.dtsi b/arch/mips/boot/dts/ingenic/jz4780.dtsi index dfb5a7e1bb21d66f0916433db8d652130f6824dd..830e5dd3550e2cf95038bf61d16445f2cc32785c 100644 --- a/arch/mips/boot/dts/ingenic/jz4780.dtsi +++ b/arch/mips/boot/dts/ingenic/jz4780.dtsi @@ -429,7 +429,7 @@ #address-cells = <1>; #size-cells = <1>; - eth0_addr: eth-mac-addr@0x22 { + eth0_addr: eth-mac-addr@22 { reg = <0x22 0x6>; }; }; diff --git a/arch/mips/cavium-octeon/octeon-platform.c b/arch/mips/cavium-octeon/octeon-platform.c index d56e9b9d2e434d72ec1c8ed398dd70dac8e7b6e0..a994022e32c9f0ee0ed4f91612d5fbd26071e218 100644 --- a/arch/mips/cavium-octeon/octeon-platform.c +++ b/arch/mips/cavium-octeon/octeon-platform.c @@ -328,6 +328,7 @@ static int __init octeon_ehci_device_init(void) pd->dev.platform_data = &octeon_ehci_pdata; octeon_ehci_hw_start(&pd->dev); + put_device(&pd->dev); return ret; } @@ -391,6 +392,7 @@ static int __init octeon_ohci_device_init(void) pd->dev.platform_data = &octeon_ohci_pdata; octeon_ohci_hw_start(&pd->dev); + put_device(&pd->dev); return ret; } diff --git a/arch/mips/cavium-octeon/octeon-usb.c b/arch/mips/cavium-octeon/octeon-usb.c index 950e6c6e862973b421393f9afcc02f9fdb1f3f0f..fa87e5aa1811d8876713e048d4ccc2fd8e89fd57 100644 --- a/arch/mips/cavium-octeon/octeon-usb.c +++ b/arch/mips/cavium-octeon/octeon-usb.c @@ -544,6 +544,7 @@ static int __init dwc3_octeon_device_init(void) devm_iounmap(&pdev->dev, base); devm_release_mem_region(&pdev->dev, res->start, resource_size(res)); + put_device(&pdev->dev); } } while (node != NULL); diff --git a/arch/mips/dec/int-handler.S b/arch/mips/dec/int-handler.S index ea5b5a83f1e11b82fea00297ef50d966950a2d37..011d1d678840aa513166e0cfbf209bb975dc6a90 100644 --- a/arch/mips/dec/int-handler.S +++ b/arch/mips/dec/int-handler.S @@ -131,7 +131,7 @@ */ mfc0 t0,CP0_CAUSE # get pending interrupts mfc0 t1,CP0_STATUS -#ifdef CONFIG_32BIT +#if defined(CONFIG_32BIT) && defined(CONFIG_MIPS_FP_SUPPORT) lw t2,cpu_fpu_mask #endif andi t0,ST0_IM # CAUSE.CE may be non-zero! @@ -139,7 +139,7 @@ beqz t0,spurious -#ifdef CONFIG_32BIT +#if defined(CONFIG_32BIT) && defined(CONFIG_MIPS_FP_SUPPORT) and t2,t0 bnez t2,fpu # handle FPU immediately #endif @@ -280,7 +280,7 @@ handle_it: j dec_irq_dispatch nop -#ifdef CONFIG_32BIT +#if defined(CONFIG_32BIT) && defined(CONFIG_MIPS_FP_SUPPORT) fpu: lw t0,fpu_kstat_irq nop diff --git a/arch/mips/dec/prom/Makefile b/arch/mips/dec/prom/Makefile index d95016016b42bef365d7b8bb6348888df9e386f2..2bad87551203b2714529f7bbd38a832bf2b30a1e 100644 --- a/arch/mips/dec/prom/Makefile +++ b/arch/mips/dec/prom/Makefile @@ -6,4 +6,4 @@ lib-y += init.o memory.o cmdline.o identify.o console.o -lib-$(CONFIG_32BIT) += locore.o +lib-$(CONFIG_CPU_R3000) += locore.o diff --git a/arch/mips/dec/setup.c b/arch/mips/dec/setup.c index eaad0ed4b523bbc0ee70c58c994f91a9c59d0818..99b9b29750db3cd116467991a823777e8bada15c 100644 --- a/arch/mips/dec/setup.c +++ b/arch/mips/dec/setup.c @@ -746,7 +746,8 @@ void __init arch_init_irq(void) dec_interrupt[DEC_IRQ_HALT] = -1; /* Register board interrupts: FPU and cascade. */ - if (dec_interrupt[DEC_IRQ_FPU] >= 0 && cpu_has_fpu) { + if (IS_ENABLED(CONFIG_MIPS_FP_SUPPORT) && + dec_interrupt[DEC_IRQ_FPU] >= 0 && cpu_has_fpu) { struct irq_desc *desc_fpu; int irq_fpu; diff --git a/arch/mips/include/asm/dec/prom.h b/arch/mips/include/asm/dec/prom.h index 62c7dfb90e06c35afa168fa999e408f3b240004d..1e1247add1cf802b9abe3a5216b3a785f83e5988 100644 --- a/arch/mips/include/asm/dec/prom.h +++ b/arch/mips/include/asm/dec/prom.h @@ -43,16 +43,11 @@ */ #define REX_PROM_MAGIC 0x30464354 -#ifdef CONFIG_64BIT - -#define prom_is_rex(magic) 1 /* KN04 and KN05 are REX PROMs. */ - -#else /* !CONFIG_64BIT */ - -#define prom_is_rex(magic) ((magic) == REX_PROM_MAGIC) - -#endif /* !CONFIG_64BIT */ - +/* KN04 and KN05 are REX PROMs, so only do the check for R3k systems. */ +static inline bool prom_is_rex(u32 magic) +{ + return !IS_ENABLED(CONFIG_CPU_R3000) || magic == REX_PROM_MAGIC; +} /* * 3MIN/MAXINE PROM entry points for DS5000/1xx's, DS5000/xx's and diff --git a/arch/mips/include/asm/mach-loongson64/kernel-entry-init.h b/arch/mips/include/asm/mach-loongson64/kernel-entry-init.h index 87a5bfbf8cfe9b925c351f42de612be1b8dcaf82..28572ddfb004a2b384d4ac33ebb865bd4c4e413e 100644 --- a/arch/mips/include/asm/mach-loongson64/kernel-entry-init.h +++ b/arch/mips/include/asm/mach-loongson64/kernel-entry-init.h @@ -36,7 +36,7 @@ nop /* Loongson-3A R2/R3 */ andi t0, (PRID_IMP_MASK | PRID_REV_MASK) - slti t0, (PRID_IMP_LOONGSON_64C | PRID_REV_LOONGSON3A_R2_0) + slti t0, t0, (PRID_IMP_LOONGSON_64C | PRID_REV_LOONGSON3A_R2_0) bnez t0, 2f nop 1: @@ -71,7 +71,7 @@ nop /* Loongson-3A R2/R3 */ andi t0, (PRID_IMP_MASK | PRID_REV_MASK) - slti t0, (PRID_IMP_LOONGSON_64C | PRID_REV_LOONGSON3A_R2_0) + slti t0, t0, (PRID_IMP_LOONGSON_64C | PRID_REV_LOONGSON3A_R2_0) bnez t0, 2f nop 1: diff --git a/arch/mips/include/asm/octeon/cvmx-bootinfo.h b/arch/mips/include/asm/octeon/cvmx-bootinfo.h index c114a7ba0badd28ddf9dfa41d600774e03d943ec..e77e8b7c00838a1fc1ff30176ac6a7d5d01386dd 100644 --- a/arch/mips/include/asm/octeon/cvmx-bootinfo.h +++ b/arch/mips/include/asm/octeon/cvmx-bootinfo.h @@ -317,7 +317,7 @@ enum cvmx_chip_types_enum { /* Functions to return string based on type */ #define ENUM_BRD_TYPE_CASE(x) \ - case x: return(#x + 16); /* Skip CVMX_BOARD_TYPE_ */ + case x: return (&#x[16]); /* Skip CVMX_BOARD_TYPE_ */ static inline const char *cvmx_board_type_to_string(enum cvmx_board_types_enum type) { @@ -408,7 +408,7 @@ static inline const char *cvmx_board_type_to_string(enum } #define ENUM_CHIP_TYPE_CASE(x) \ - case x: return(#x + 15); /* Skip CVMX_CHIP_TYPE */ + case x: return (&#x[15]); /* Skip CVMX_CHIP_TYPE */ static inline const char *cvmx_chip_type_to_string(enum cvmx_chip_types_enum type) { diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h index 139b4050259fab5b6971f3c0d153cb66b288e887..71153c369f2948ec25ea296bd410468bc2960d6f 100644 --- a/arch/mips/include/asm/pgalloc.h +++ b/arch/mips/include/asm/pgalloc.h @@ -15,6 +15,7 @@ #define __HAVE_ARCH_PMD_ALLOC_ONE #define __HAVE_ARCH_PUD_ALLOC_ONE +#define __HAVE_ARCH_PGD_FREE #include static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, @@ -49,6 +50,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) extern void pgd_init(unsigned long page); extern pgd_t *pgd_alloc(struct mm_struct *mm); +static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) +{ + free_pages((unsigned long)pgd, PGD_ORDER); +} + #define __pte_free_tlb(tlb,pte,address) \ do { \ pgtable_pte_page_dtor(pte); \ diff --git a/arch/mips/include/asm/setup.h b/arch/mips/include/asm/setup.h index bb36a400203df50d8a76a74420213bad6b61b45c..8c56b862fd9c2b003fe00fbad9cb88ae88ea6d16 100644 --- a/arch/mips/include/asm/setup.h +++ b/arch/mips/include/asm/setup.h @@ -16,7 +16,7 @@ static inline void setup_8250_early_printk_port(unsigned long base, unsigned int reg_shift, unsigned int timeout) {} #endif -extern void set_handler(unsigned long offset, void *addr, unsigned long len); +void set_handler(unsigned long offset, const void *addr, unsigned long len); extern void set_uncached_handler(unsigned long offset, void *addr, unsigned long len); typedef void (*vi_handler_t)(void); diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index 535eb49e5904a9380ac7e3dbeda77b5644a251b5..b258dc96841a380d38d6a113a60758427e800cc1 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c @@ -351,6 +351,9 @@ asmlinkage void start_secondary(void) cpu = smp_processor_id(); cpu_data[cpu].udelay_val = loops_per_jiffy; + set_cpu_sibling_map(cpu); + set_cpu_core_map(cpu); + cpumask_set_cpu(cpu, &cpu_coherent_mask); notify_cpu_starting(cpu); @@ -362,9 +365,6 @@ asmlinkage void start_secondary(void) /* The CPU is running and counters synchronised, now mark it online */ set_cpu_online(cpu, true); - set_cpu_sibling_map(cpu); - set_cpu_core_map(cpu); - calculate_cpu_foreign_map(); /* diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index e0352958e2f720be5b9bd407e208f249716f7cad..b1fe4518bd221b546253103f64bb044c55f95f0e 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -2097,19 +2097,19 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs) * If no shadow set is selected then use the default handler * that does normal register saving and standard interrupt exit */ - extern char except_vec_vi, except_vec_vi_lui; - extern char except_vec_vi_ori, except_vec_vi_end; - extern char rollback_except_vec_vi; - char *vec_start = using_rollback_handler() ? - &rollback_except_vec_vi : &except_vec_vi; + extern const u8 except_vec_vi[], except_vec_vi_lui[]; + extern const u8 except_vec_vi_ori[], except_vec_vi_end[]; + extern const u8 rollback_except_vec_vi[]; + const u8 *vec_start = using_rollback_handler() ? + rollback_except_vec_vi : except_vec_vi; #if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN) - const int lui_offset = &except_vec_vi_lui - vec_start + 2; - const int ori_offset = &except_vec_vi_ori - vec_start + 2; + const int lui_offset = except_vec_vi_lui - vec_start + 2; + const int ori_offset = except_vec_vi_ori - vec_start + 2; #else - const int lui_offset = &except_vec_vi_lui - vec_start; - const int ori_offset = &except_vec_vi_ori - vec_start; + const int lui_offset = except_vec_vi_lui - vec_start; + const int ori_offset = except_vec_vi_ori - vec_start; #endif - const int handler_len = &except_vec_vi_end - vec_start; + const int handler_len = except_vec_vi_end - vec_start; if (handler_len > VECTORSPACING) { /* @@ -2317,7 +2317,7 @@ void per_cpu_trap_init(bool is_boot_cpu) } /* Install CPU exception handler */ -void set_handler(unsigned long offset, void *addr, unsigned long size) +void set_handler(unsigned long offset, const void *addr, unsigned long size) { #ifdef CONFIG_CPU_MICROMIPS memcpy((void *)(ebase + offset), ((unsigned char *)addr - 1), size); diff --git a/arch/mips/lantiq/clk.c b/arch/mips/lantiq/clk.c index 4916cccf378fdd1d446f6ed23420828133e73563..7a623684d9b5ed415e167af74e0b9af22ea5d74c 100644 --- a/arch/mips/lantiq/clk.c +++ b/arch/mips/lantiq/clk.c @@ -164,6 +164,12 @@ struct clk *clk_get_parent(struct clk *clk) } EXPORT_SYMBOL(clk_get_parent); +int clk_set_parent(struct clk *clk, struct clk *parent) +{ + return 0; +} +EXPORT_SYMBOL(clk_set_parent); + static inline u32 get_counter_resolution(void) { u32 res; diff --git a/arch/mips/ralink/ill_acc.c b/arch/mips/ralink/ill_acc.c index bdf53807d7c2b543cb0faa50f3f41aa565905ac6..bea857c9da8b7aa605b08615951a5a414fdb0bdd 100644 --- a/arch/mips/ralink/ill_acc.c +++ b/arch/mips/ralink/ill_acc.c @@ -61,6 +61,7 @@ static int __init ill_acc_of_setup(void) pdev = of_find_device_by_node(np); if (!pdev) { pr_err("%pOFn: failed to lookup pdev\n", np); + of_node_put(np); return -EINVAL; } diff --git a/arch/mips/rb532/devices.c b/arch/mips/rb532/devices.c index dd34f1b32b7976ca316f50235448788d20b49160..0e3c8d761a451b7d67239107b72fa86b32b60d9b 100644 --- a/arch/mips/rb532/devices.c +++ b/arch/mips/rb532/devices.c @@ -310,11 +310,9 @@ static int __init plat_setup_devices(void) static int __init setup_kmac(char *s) { printk(KERN_INFO "korina mac = %s\n", s); - if (!mac_pton(s, korina_dev0_data.mac)) { + if (!mac_pton(s, korina_dev0_data.mac)) printk(KERN_ERR "Invalid mac\n"); - return -EINVAL; - } - return 0; + return 1; } __setup("kmac=", setup_kmac); diff --git a/arch/nds32/include/asm/uaccess.h b/arch/nds32/include/asm/uaccess.h index 010ba5f1d7dd6b45445e51faef8a1b0809d2f3a6..54500e81efe59a040531fc863b2658b5cb7981f4 100644 --- a/arch/nds32/include/asm/uaccess.h +++ b/arch/nds32/include/asm/uaccess.h @@ -70,9 +70,7 @@ static inline void set_fs(mm_segment_t fs) * versions are void (ie, don't return a value as such). */ -#define get_user __get_user \ - -#define __get_user(x, ptr) \ +#define get_user(x, ptr) \ ({ \ long __gu_err = 0; \ __get_user_check((x), (ptr), __gu_err); \ @@ -85,6 +83,14 @@ static inline void set_fs(mm_segment_t fs) (void)0; \ }) +#define __get_user(x, ptr) \ +({ \ + long __gu_err = 0; \ + const __typeof__(*(ptr)) __user *__p = (ptr); \ + __get_user_err((x), __p, (__gu_err)); \ + __gu_err; \ +}) + #define __get_user_check(x, ptr, err) \ ({ \ const __typeof__(*(ptr)) __user *__p = (ptr); \ @@ -165,12 +171,18 @@ do { \ : "r"(addr), "i"(-EFAULT) \ : "cc") -#define put_user __put_user \ +#define put_user(x, ptr) \ +({ \ + long __pu_err = 0; \ + __put_user_check((x), (ptr), __pu_err); \ + __pu_err; \ +}) #define __put_user(x, ptr) \ ({ \ long __pu_err = 0; \ - __put_user_err((x), (ptr), __pu_err); \ + __typeof__(*(ptr)) __user *__p = (ptr); \ + __put_user_err((x), __p, __pu_err); \ __pu_err; \ }) diff --git a/arch/nds32/kernel/perf_event_cpu.c b/arch/nds32/kernel/perf_event_cpu.c index 0ce6f9f307e6ad34188ced46a9f8f5c71fc1c564..f3879196078133d6645f667d0b900cfc41deb1f5 100644 --- a/arch/nds32/kernel/perf_event_cpu.c +++ b/arch/nds32/kernel/perf_event_cpu.c @@ -1363,6 +1363,7 @@ void perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) { + struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs(); unsigned long fp = 0; unsigned long gp = 0; unsigned long lp = 0; @@ -1371,7 +1372,7 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, leaf_fp = 0; - if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { + if (guest_cbs && guest_cbs->is_in_guest()) { /* We don't support guest os callchain now */ return; } @@ -1479,9 +1480,10 @@ void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) { + struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs(); struct stackframe fr; - if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { + if (guest_cbs && guest_cbs->is_in_guest()) { /* We don't support guest os callchain now */ return; } @@ -1493,20 +1495,23 @@ perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, unsigned long perf_instruction_pointer(struct pt_regs *regs) { + struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs(); + /* However, NDS32 does not support virtualization */ - if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) - return perf_guest_cbs->get_guest_ip(); + if (guest_cbs && guest_cbs->is_in_guest()) + return guest_cbs->get_guest_ip(); return instruction_pointer(regs); } unsigned long perf_misc_flags(struct pt_regs *regs) { + struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs(); int misc = 0; /* However, NDS32 does not support virtualization */ - if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { - if (perf_guest_cbs->is_user_mode()) + if (guest_cbs && guest_cbs->is_in_guest()) { + if (guest_cbs->is_user_mode()) misc |= PERF_RECORD_MISC_GUEST_USER; else misc |= PERF_RECORD_MISC_GUEST_KERNEL; diff --git a/arch/nios2/include/asm/uaccess.h b/arch/nios2/include/asm/uaccess.h index a741abbed6fbf5ae8738730e4abeebe6c1627cb0..8a386e6c07df19905d7dd7413d75a11f7818be58 100644 --- a/arch/nios2/include/asm/uaccess.h +++ b/arch/nios2/include/asm/uaccess.h @@ -89,6 +89,7 @@ extern __must_check long strnlen_user(const char __user *s, long n); /* Optimized macros */ #define __get_user_asm(val, insn, addr, err) \ { \ + unsigned long __gu_val; \ __asm__ __volatile__( \ " movi %0, %3\n" \ "1: " insn " %1, 0(%2)\n" \ @@ -97,14 +98,20 @@ extern __must_check long strnlen_user(const char __user *s, long n); " .section __ex_table,\"a\"\n" \ " .word 1b, 2b\n" \ " .previous" \ - : "=&r" (err), "=r" (val) \ + : "=&r" (err), "=r" (__gu_val) \ : "r" (addr), "i" (-EFAULT)); \ + val = (__force __typeof__(*(addr)))__gu_val; \ } -#define __get_user_unknown(val, size, ptr, err) do { \ +extern void __get_user_unknown(void); + +#define __get_user_8(val, ptr, err) do { \ + u64 __val = 0; \ err = 0; \ - if (__copy_from_user(&(val), ptr, size)) { \ + if (raw_copy_from_user(&(__val), ptr, sizeof(val))) { \ err = -EFAULT; \ + } else { \ + val = (typeof(val))(typeof((val) - (val)))__val; \ } \ } while (0) @@ -120,8 +127,11 @@ do { \ case 4: \ __get_user_asm(val, "ldw", ptr, err); \ break; \ + case 8: \ + __get_user_8(val, ptr, err); \ + break; \ default: \ - __get_user_unknown(val, size, ptr, err); \ + __get_user_unknown(); \ break; \ } \ } while (0) @@ -130,9 +140,7 @@ do { \ ({ \ long __gu_err = -EFAULT; \ const __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \ - unsigned long __gu_val = 0; \ - __get_user_common(__gu_val, sizeof(*(ptr)), __gu_ptr, __gu_err);\ - (x) = (__force __typeof__(x))__gu_val; \ + __get_user_common(x, sizeof(*(ptr)), __gu_ptr, __gu_err); \ __gu_err; \ }) @@ -140,11 +148,9 @@ do { \ ({ \ long __gu_err = -EFAULT; \ const __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \ - unsigned long __gu_val = 0; \ if (access_ok( __gu_ptr, sizeof(*__gu_ptr))) \ - __get_user_common(__gu_val, sizeof(*__gu_ptr), \ + __get_user_common(x, sizeof(*__gu_ptr), \ __gu_ptr, __gu_err); \ - (x) = (__force __typeof__(x))__gu_val; \ __gu_err; \ }) diff --git a/arch/nios2/kernel/signal.c b/arch/nios2/kernel/signal.c index cf2dca2ac7c37d8f49af4a0a725a03509cd37629..e45491d1d3e4425c6c6d288959f64b46773f1360 100644 --- a/arch/nios2/kernel/signal.c +++ b/arch/nios2/kernel/signal.c @@ -36,10 +36,10 @@ struct rt_sigframe { static inline int rt_restore_ucontext(struct pt_regs *regs, struct switch_stack *sw, - struct ucontext *uc, int *pr2) + struct ucontext __user *uc, int *pr2) { int temp; - unsigned long *gregs = uc->uc_mcontext.gregs; + unsigned long __user *gregs = uc->uc_mcontext.gregs; int err; /* Always make any pending restarted system calls return -EINTR */ @@ -102,10 +102,11 @@ asmlinkage int do_rt_sigreturn(struct switch_stack *sw) { struct pt_regs *regs = (struct pt_regs *)(sw + 1); /* Verify, can we follow the stack back */ - struct rt_sigframe *frame = (struct rt_sigframe *) regs->sp; + struct rt_sigframe __user *frame; sigset_t set; int rval; + frame = (struct rt_sigframe __user *) regs->sp; if (!access_ok(frame, sizeof(*frame))) goto badframe; @@ -124,10 +125,10 @@ asmlinkage int do_rt_sigreturn(struct switch_stack *sw) return 0; } -static inline int rt_setup_ucontext(struct ucontext *uc, struct pt_regs *regs) +static inline int rt_setup_ucontext(struct ucontext __user *uc, struct pt_regs *regs) { struct switch_stack *sw = (struct switch_stack *)regs - 1; - unsigned long *gregs = uc->uc_mcontext.gregs; + unsigned long __user *gregs = uc->uc_mcontext.gregs; int err = 0; err |= __put_user(MCONTEXT_VERSION, &uc->uc_mcontext.version); @@ -162,8 +163,9 @@ static inline int rt_setup_ucontext(struct ucontext *uc, struct pt_regs *regs) return err; } -static inline void *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, - size_t frame_size) +static inline void __user *get_sigframe(struct ksignal *ksig, + struct pt_regs *regs, + size_t frame_size) { unsigned long usp; @@ -174,13 +176,13 @@ static inline void *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, usp = sigsp(usp, ksig); /* Verify, is it 32 or 64 bit aligned */ - return (void *)((usp - frame_size) & -8UL); + return (void __user *)((usp - frame_size) & -8UL); } static int setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs) { - struct rt_sigframe *frame; + struct rt_sigframe __user *frame; int err = 0; frame = get_sigframe(ksig, regs, sizeof(*frame)); diff --git a/arch/openrisc/include/asm/syscalls.h b/arch/openrisc/include/asm/syscalls.h index 3a7eeae6f56a8259714670dd00154ee8da8657d4..aa1c7e98722e3a6bfb39903d59162bfb1c611da9 100644 --- a/arch/openrisc/include/asm/syscalls.h +++ b/arch/openrisc/include/asm/syscalls.h @@ -22,9 +22,11 @@ asmlinkage long sys_or1k_atomic(unsigned long type, unsigned long *v1, asmlinkage long __sys_clone(unsigned long clone_flags, unsigned long newsp, void __user *parent_tid, void __user *child_tid, int tls); +asmlinkage long __sys_clone3(struct clone_args __user *uargs, size_t size); asmlinkage long __sys_fork(void); #define sys_clone __sys_clone +#define sys_clone3 __sys_clone3 #define sys_fork __sys_fork #endif /* __ASM_OPENRISC_SYSCALLS_H */ diff --git a/arch/openrisc/kernel/entry.S b/arch/openrisc/kernel/entry.S index 98e4f97db51596050fc7d02416ebf655286f9366..b42d32d79b2e614d9ea62fa0117f2fee49846c75 100644 --- a/arch/openrisc/kernel/entry.S +++ b/arch/openrisc/kernel/entry.S @@ -1170,6 +1170,11 @@ ENTRY(__sys_clone) l.j _fork_save_extra_regs_and_call l.nop +ENTRY(__sys_clone3) + l.movhi r29,hi(sys_clone3) + l.j _fork_save_extra_regs_and_call + l.ori r29,r29,lo(sys_clone3) + ENTRY(__sys_fork) l.movhi r29,hi(sys_fork) l.ori r29,r29,lo(sys_fork) diff --git a/arch/parisc/include/asm/special_insns.h b/arch/parisc/include/asm/special_insns.h index a303ae9a77f41fe8ceef5490ab2be5b394fc199d..16ee41e77174f14e016dac77ab6e512c49b02816 100644 --- a/arch/parisc/include/asm/special_insns.h +++ b/arch/parisc/include/asm/special_insns.h @@ -2,28 +2,32 @@ #ifndef __PARISC_SPECIAL_INSNS_H #define __PARISC_SPECIAL_INSNS_H -#define lpa(va) ({ \ - unsigned long pa; \ - __asm__ __volatile__( \ - "copy %%r0,%0\n\t" \ - "lpa %%r0(%1),%0" \ - : "=r" (pa) \ - : "r" (va) \ - : "memory" \ - ); \ - pa; \ +#define lpa(va) ({ \ + unsigned long pa; \ + __asm__ __volatile__( \ + "copy %%r0,%0\n" \ + "8:\tlpa %%r0(%1),%0\n" \ + "9:\n" \ + ASM_EXCEPTIONTABLE_ENTRY(8b, 9b) \ + : "=&r" (pa) \ + : "r" (va) \ + : "memory" \ + ); \ + pa; \ }) -#define lpa_user(va) ({ \ - unsigned long pa; \ - __asm__ __volatile__( \ - "copy %%r0,%0\n\t" \ - "lpa %%r0(%%sr3,%1),%0" \ - : "=r" (pa) \ - : "r" (va) \ - : "memory" \ - ); \ - pa; \ +#define lpa_user(va) ({ \ + unsigned long pa; \ + __asm__ __volatile__( \ + "copy %%r0,%0\n" \ + "8:\tlpa %%r0(%%sr3,%1),%0\n" \ + "9:\n" \ + ASM_EXCEPTIONTABLE_ENTRY(8b, 9b) \ + : "=&r" (pa) \ + : "r" (va) \ + : "memory" \ + ); \ + pa; \ }) #define mfctl(reg) ({ \ diff --git a/arch/parisc/include/asm/traps.h b/arch/parisc/include/asm/traps.h index 8ecc1f0c0483d5a4a60dd44c08b6e49b9d3b776e..d0e090a2c000da4a94ee55fa84e2956ae96c3821 100644 --- a/arch/parisc/include/asm/traps.h +++ b/arch/parisc/include/asm/traps.h @@ -17,6 +17,7 @@ void die_if_kernel(char *str, struct pt_regs *regs, long err); const char *trap_name(unsigned long code); void do_page_fault(struct pt_regs *regs, unsigned long code, unsigned long address); +int handle_nadtlb_fault(struct pt_regs *regs); #endif #endif diff --git a/arch/parisc/kernel/patch.c b/arch/parisc/kernel/patch.c index 80a0ab372802db148a1ccb0867bc0eb8ae3b7b69..e59574f65e641a09cbedb2e0ca7fa5e6045f3650 100644 --- a/arch/parisc/kernel/patch.c +++ b/arch/parisc/kernel/patch.c @@ -40,10 +40,7 @@ static void __kprobes *patch_map(void *addr, int fixmap, unsigned long *flags, *need_unmap = 1; set_fixmap(fixmap, page_to_phys(page)); - if (flags) - raw_spin_lock_irqsave(&patch_lock, *flags); - else - __acquire(&patch_lock); + raw_spin_lock_irqsave(&patch_lock, *flags); return (void *) (__fix_to_virt(fixmap) + (uintaddr & ~PAGE_MASK)); } @@ -52,10 +49,7 @@ static void __kprobes patch_unmap(int fixmap, unsigned long *flags) { clear_fixmap(fixmap); - if (flags) - raw_spin_unlock_irqrestore(&patch_lock, *flags); - else - __release(&patch_lock); + raw_spin_unlock_irqrestore(&patch_lock, *flags); } void __kprobes __patch_text_multiple(void *addr, u32 *insn, unsigned int len) @@ -67,8 +61,9 @@ void __kprobes __patch_text_multiple(void *addr, u32 *insn, unsigned int len) int mapped; /* Make sure we don't have any aliases in cache */ - flush_kernel_vmap_range(addr, len); - flush_icache_range(start, end); + flush_kernel_dcache_range_asm(start, end); + flush_kernel_icache_range_asm(start, end); + flush_tlb_kernel_range(start, end); p = fixmap = patch_map(addr, FIX_TEXT_POKE0, &flags, &mapped); @@ -81,8 +76,10 @@ void __kprobes __patch_text_multiple(void *addr, u32 *insn, unsigned int len) * We're crossing a page boundary, so * need to remap */ - flush_kernel_vmap_range((void *)fixmap, - (p-fixmap) * sizeof(*p)); + flush_kernel_dcache_range_asm((unsigned long)fixmap, + (unsigned long)p); + flush_tlb_kernel_range((unsigned long)fixmap, + (unsigned long)p); if (mapped) patch_unmap(FIX_TEXT_POKE0, &flags); p = fixmap = patch_map(addr, FIX_TEXT_POKE0, &flags, @@ -90,10 +87,10 @@ void __kprobes __patch_text_multiple(void *addr, u32 *insn, unsigned int len) } } - flush_kernel_vmap_range((void *)fixmap, (p-fixmap) * sizeof(*p)); + flush_kernel_dcache_range_asm((unsigned long)fixmap, (unsigned long)p); + flush_tlb_kernel_range((unsigned long)fixmap, (unsigned long)p); if (mapped) patch_unmap(FIX_TEXT_POKE0, &flags); - flush_icache_range(start, end); } void __kprobes __patch_text(void *addr, u32 insn) diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c index 43f56335759a44a4c9347e49c1e0b99cdff0d31d..bce47e0fb692cb77cfc803d95b41939d2d8799c0 100644 --- a/arch/parisc/kernel/traps.c +++ b/arch/parisc/kernel/traps.c @@ -661,6 +661,8 @@ void notrace handle_interruption(int code, struct pt_regs *regs) by hand. Technically we need to emulate: fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw */ + if (code == 17 && handle_nadtlb_fault(regs)) + return; fault_address = regs->ior; fault_space = regs->isr; break; @@ -784,7 +786,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs) * unless pagefault_disable() was called before. */ - if (fault_space == 0 && !faulthandler_disabled()) + if (faulthandler_disabled() || fault_space == 0) { /* Clean up and return if in exception table. */ if (fixup_exception(regs)) diff --git a/arch/parisc/kernel/unaligned.c b/arch/parisc/kernel/unaligned.c index 237d20dd5622de53fb207ec5888f972c7237a9e3..286cec4d86d7b0165a7b81ffcfdc48512a568acf 100644 --- a/arch/parisc/kernel/unaligned.c +++ b/arch/parisc/kernel/unaligned.c @@ -340,7 +340,7 @@ static int emulate_stw(struct pt_regs *regs, int frreg, int flop) : "r" (val), "r" (regs->ior), "r" (regs->isr) : "r19", "r20", "r21", "r22", "r1", FIXUP_BRANCH_CLOBBER ); - return 0; + return ret; } static int emulate_std(struct pt_regs *regs, int frreg, int flop) { @@ -397,7 +397,7 @@ static int emulate_std(struct pt_regs *regs, int frreg, int flop) __asm__ __volatile__ ( " mtsp %4, %%sr1\n" " zdep %2, 29, 2, %%r19\n" -" dep %%r0, 31, 2, %2\n" +" dep %%r0, 31, 2, %3\n" " mtsar %%r19\n" " zvdepi -2, 32, %%r19\n" "1: ldw 0(%%sr1,%3),%%r20\n" @@ -409,7 +409,7 @@ static int emulate_std(struct pt_regs *regs, int frreg, int flop) " andcm %%r21, %%r19, %%r21\n" " or %1, %%r20, %1\n" " or %2, %%r21, %2\n" -"3: stw %1,0(%%sr1,%1)\n" +"3: stw %1,0(%%sr1,%3)\n" "4: stw %%r1,4(%%sr1,%3)\n" "5: stw %2,8(%%sr1,%3)\n" " copy %%r0, %0\n" @@ -596,7 +596,6 @@ void handle_unaligned(struct pt_regs *regs) ret = ERR_NOTHANDLED; /* "undefined", but lets kill them. */ break; } -#ifdef CONFIG_PA20 switch (regs->iir & OPCODE2_MASK) { case OPCODE_FLDD_L: @@ -607,22 +606,23 @@ void handle_unaligned(struct pt_regs *regs) flop=1; ret = emulate_std(regs, R2(regs->iir),1); break; +#ifdef CONFIG_PA20 case OPCODE_LDD_L: ret = emulate_ldd(regs, R2(regs->iir),0); break; case OPCODE_STD_L: ret = emulate_std(regs, R2(regs->iir),0); break; - } #endif + } switch (regs->iir & OPCODE3_MASK) { case OPCODE_FLDW_L: flop=1; - ret = emulate_ldw(regs, R2(regs->iir),0); + ret = emulate_ldw(regs, R2(regs->iir), 1); break; case OPCODE_LDW_M: - ret = emulate_ldw(regs, R2(regs->iir),1); + ret = emulate_ldw(regs, R2(regs->iir), 0); break; case OPCODE_FSTW_L: diff --git a/arch/parisc/lib/iomap.c b/arch/parisc/lib/iomap.c index f03adb1999e77ed05f1282be1b24aa5c601403ce..e362d6a147311264462d2c2789d26ea39822da3a 100644 --- a/arch/parisc/lib/iomap.c +++ b/arch/parisc/lib/iomap.c @@ -346,6 +346,16 @@ u64 ioread64be(const void __iomem *addr) return *((u64 *)addr); } +u64 ioread64_lo_hi(const void __iomem *addr) +{ + u32 low, high; + + low = ioread32(addr); + high = ioread32(addr + sizeof(u32)); + + return low + ((u64)high << 32); +} + u64 ioread64_hi_lo(const void __iomem *addr) { u32 low, high; @@ -419,6 +429,12 @@ void iowrite64be(u64 datum, void __iomem *addr) } } +void iowrite64_lo_hi(u64 val, void __iomem *addr) +{ + iowrite32(val, addr); + iowrite32(val >> 32, addr + sizeof(u32)); +} + void iowrite64_hi_lo(u64 val, void __iomem *addr) { iowrite32(val >> 32, addr + sizeof(u32)); @@ -527,6 +543,7 @@ EXPORT_SYMBOL(ioread32); EXPORT_SYMBOL(ioread32be); EXPORT_SYMBOL(ioread64); EXPORT_SYMBOL(ioread64be); +EXPORT_SYMBOL(ioread64_lo_hi); EXPORT_SYMBOL(ioread64_hi_lo); EXPORT_SYMBOL(iowrite8); EXPORT_SYMBOL(iowrite16); @@ -535,6 +552,7 @@ EXPORT_SYMBOL(iowrite32); EXPORT_SYMBOL(iowrite32be); EXPORT_SYMBOL(iowrite64); EXPORT_SYMBOL(iowrite64be); +EXPORT_SYMBOL(iowrite64_lo_hi); EXPORT_SYMBOL(iowrite64_hi_lo); EXPORT_SYMBOL(ioread8_rep); EXPORT_SYMBOL(ioread16_rep); diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c index 716960f5d92ea460a4b1084954e7bb69ac6cc476..5faa3cff47387ac289fe5792e260c4b1bde0bd2b 100644 --- a/arch/parisc/mm/fault.c +++ b/arch/parisc/mm/fault.c @@ -424,3 +424,92 @@ void do_page_fault(struct pt_regs *regs, unsigned long code, goto no_context; pagefault_out_of_memory(); } + +/* Handle non-access data TLB miss faults. + * + * For probe instructions, accesses to userspace are considered allowed + * if they lie in a valid VMA and the access type matches. We are not + * allowed to handle MM faults here so there may be situations where an + * actual access would fail even though a probe was successful. + */ +int +handle_nadtlb_fault(struct pt_regs *regs) +{ + unsigned long insn = regs->iir; + int breg, treg, xreg, val = 0; + struct vm_area_struct *vma, *prev_vma; + struct task_struct *tsk; + struct mm_struct *mm; + unsigned long address; + unsigned long acc_type; + + switch (insn & 0x380) { + case 0x280: + /* FDC instruction */ + fallthrough; + case 0x380: + /* PDC and FIC instructions */ + if (printk_ratelimit()) { + pr_warn("BUG: nullifying cache flush/purge instruction\n"); + show_regs(regs); + } + if (insn & 0x20) { + /* Base modification */ + breg = (insn >> 21) & 0x1f; + xreg = (insn >> 16) & 0x1f; + if (breg && xreg) + regs->gr[breg] += regs->gr[xreg]; + } + regs->gr[0] |= PSW_N; + return 1; + + case 0x180: + /* PROBE instruction */ + treg = insn & 0x1f; + if (regs->isr) { + tsk = current; + mm = tsk->mm; + if (mm) { + /* Search for VMA */ + address = regs->ior; + mmap_read_lock(mm); + vma = find_vma_prev(mm, address, &prev_vma); + mmap_read_unlock(mm); + + /* + * Check if access to the VMA is okay. + * We don't allow for stack expansion. + */ + acc_type = (insn & 0x40) ? VM_WRITE : VM_READ; + if (vma + && address >= vma->vm_start + && (vma->vm_flags & acc_type) == acc_type) + val = 1; + } + } + if (treg) + regs->gr[treg] = val; + regs->gr[0] |= PSW_N; + return 1; + + case 0x300: + /* LPA instruction */ + if (insn & 0x20) { + /* Base modification */ + breg = (insn >> 21) & 0x1f; + xreg = (insn >> 16) & 0x1f; + if (breg && xreg) + regs->gr[breg] += regs->gr[xreg]; + } + treg = insn & 0x1f; + if (treg) + regs->gr[treg] = 0; + regs->gr[0] |= PSW_N; + return 1; + + default: + break; + } + + return 0; +} diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c index 8f10cc6ee0fcea554aa47356047ef911ce0b4709..319afa00cdf7bfa4686c2bcb5f72750c746d5cf3 100644 --- a/arch/parisc/mm/init.c +++ b/arch/parisc/mm/init.c @@ -346,9 +346,9 @@ static void __init setup_bootmem(void) static bool kernel_set_to_readonly; -static void __init map_pages(unsigned long start_vaddr, - unsigned long start_paddr, unsigned long size, - pgprot_t pgprot, int force) +static void __ref map_pages(unsigned long start_vaddr, + unsigned long start_paddr, unsigned long size, + pgprot_t pgprot, int force) { pmd_t *pmd; pte_t *pg_table; @@ -458,7 +458,7 @@ void __init set_kernel_text_rw(int enable_read_write) flush_tlb_all(); } -void __ref free_initmem(void) +void free_initmem(void) { unsigned long init_begin = (unsigned long)__init_begin; unsigned long init_end = (unsigned long)__init_end; @@ -472,7 +472,6 @@ void __ref free_initmem(void) /* The init text pages are marked R-X. We have to * flush the icache and mark them RW- * - * This is tricky, because map_pages is in the init section. * Do a dummy remap of the data section first (the data * section is already PAGE_KERNEL) to pull in the TLB entries * for map_kernel */ diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile index 35bf7634e095ec34071c5b2d0bbe2b4e6de73d87..a7c353ea01665c5cacc4a7947c5579888c407daa 100644 --- a/arch/powerpc/Makefile +++ b/arch/powerpc/Makefile @@ -172,7 +172,7 @@ else CFLAGS-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=power7,$(call cc-option,-mtune=power5)) CFLAGS-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mcpu=power5,-mcpu=power4) endif -else +else ifdef CONFIG_PPC_BOOK3E_64 CFLAGS-$(CONFIG_GENERIC_CPU) += -mcpu=powerpc64 endif diff --git a/arch/powerpc/boot/dts/fsl/qoriq-fman3l-0.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-fman3l-0.dtsi index c90702b04a530f798886e71be187cf36574d94b1..48e5cd61599c6ba0409f106fda093bf19f40b5a9 100644 --- a/arch/powerpc/boot/dts/fsl/qoriq-fman3l-0.dtsi +++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3l-0.dtsi @@ -79,6 +79,7 @@ fman0: fman@400000 { #size-cells = <0>; compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio"; reg = <0xfc000 0x1000>; + fsl,erratum-a009885; }; xmdio0: mdio@fd000 { @@ -86,6 +87,7 @@ fman0: fman@400000 { #size-cells = <0>; compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio"; reg = <0xfd000 0x1000>; + fsl,erratum-a009885; }; }; diff --git a/arch/powerpc/boot/dts/fsl/t1040rdb-rev-a.dts b/arch/powerpc/boot/dts/fsl/t1040rdb-rev-a.dts new file mode 100644 index 0000000000000000000000000000000000000000..73f8c998c64dfefa6859cd5bcda8ddafcdc51f38 --- /dev/null +++ b/arch/powerpc/boot/dts/fsl/t1040rdb-rev-a.dts @@ -0,0 +1,30 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * T1040RDB-REV-A Device Tree Source + * + * Copyright 2014 - 2015 Freescale Semiconductor Inc. + * + */ + +#include "t1040rdb.dts" + +/ { + model = "fsl,T1040RDB-REV-A"; + compatible = "fsl,T1040RDB-REV-A"; +}; + +&seville_port0 { + label = "ETH5"; +}; + +&seville_port2 { + label = "ETH7"; +}; + +&seville_port4 { + label = "ETH9"; +}; + +&seville_port6 { + label = "ETH11"; +}; diff --git a/arch/powerpc/boot/dts/fsl/t1040rdb.dts b/arch/powerpc/boot/dts/fsl/t1040rdb.dts index af0c8a6f561385ce9fcf9f24a4effccf9df21fe2..b6733e7e65805e47fc52d345040dd190ca0ea2e7 100644 --- a/arch/powerpc/boot/dts/fsl/t1040rdb.dts +++ b/arch/powerpc/boot/dts/fsl/t1040rdb.dts @@ -119,7 +119,7 @@ managed = "in-band-status"; phy-handle = <&phy_qsgmii_0>; phy-mode = "qsgmii"; - label = "ETH5"; + label = "ETH3"; status = "okay"; }; @@ -135,7 +135,7 @@ managed = "in-band-status"; phy-handle = <&phy_qsgmii_2>; phy-mode = "qsgmii"; - label = "ETH7"; + label = "ETH5"; status = "okay"; }; @@ -151,7 +151,7 @@ managed = "in-band-status"; phy-handle = <&phy_qsgmii_4>; phy-mode = "qsgmii"; - label = "ETH9"; + label = "ETH7"; status = "okay"; }; @@ -167,7 +167,7 @@ managed = "in-band-status"; phy-handle = <&phy_qsgmii_6>; phy-mode = "qsgmii"; - label = "ETH11"; + label = "ETH9"; status = "okay"; }; diff --git a/arch/powerpc/boot/dts/fsl/t104xrdb.dtsi b/arch/powerpc/boot/dts/fsl/t104xrdb.dtsi index 099a598c74c00dda112ace336ab970d6c4e709a9..bfe1ed5be337492ee6146dc1b36ac4f746fb812a 100644 --- a/arch/powerpc/boot/dts/fsl/t104xrdb.dtsi +++ b/arch/powerpc/boot/dts/fsl/t104xrdb.dtsi @@ -139,12 +139,12 @@ fman@400000 { ethernet@e6000 { phy-handle = <&phy_rgmii_0>; - phy-connection-type = "rgmii"; + phy-connection-type = "rgmii-id"; }; ethernet@e8000 { phy-handle = <&phy_rgmii_1>; - phy-connection-type = "rgmii"; + phy-connection-type = "rgmii-id"; }; mdio0: mdio@fc000 { diff --git a/arch/powerpc/include/asm/book3s/32/mmu-hash.h b/arch/powerpc/include/asm/book3s/32/mmu-hash.h index a8982d52f6b1d6c850600d343c632961e2a8502c..cbde06d0fb380c02ccd1a903c3213abd77efbfd9 100644 --- a/arch/powerpc/include/asm/book3s/32/mmu-hash.h +++ b/arch/powerpc/include/asm/book3s/32/mmu-hash.h @@ -102,6 +102,8 @@ extern s32 patch__hash_page_B, patch__hash_page_C; extern s32 patch__flush_hash_A0, patch__flush_hash_A1, patch__flush_hash_A2; extern s32 patch__flush_hash_B; +int __init find_free_bat(void); +unsigned int bat_block_size(unsigned long base, unsigned long top); #endif /* !__ASSEMBLY__ */ /* We happily ignore the smaller BATs on 601, we don't actually use diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h index 523d3e6e2400917f95bbcda21cec6b6b8b10d453..94c5c66231a8c394b00ab24127e9041ee95c9ef6 100644 --- a/arch/powerpc/include/asm/book3s/32/pgtable.h +++ b/arch/powerpc/include/asm/book3s/32/pgtable.h @@ -142,6 +142,7 @@ static inline bool pte_user(pte_t pte) #ifndef __ASSEMBLY__ int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot); +void unmap_kernel_page(unsigned long va); #endif /* !__ASSEMBLY__ */ diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h index d9af0d5a60d5fa2b85ea7bb51336c94a970e97ef..2b4af824bdc5505fea9c653570a10712a8c66b44 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h @@ -1061,6 +1061,8 @@ static inline int map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t p return hash__map_kernel_page(ea, pa, prot); } +void unmap_kernel_page(unsigned long va); + static inline int __meminit vmemmap_create_mapping(unsigned long start, unsigned long page_size, unsigned long phys) diff --git a/arch/powerpc/include/asm/cpu_setup_power.h b/arch/powerpc/include/asm/cpu_setup_power.h new file mode 100644 index 0000000000000000000000000000000000000000..24be9131f8032d80b002f2bf0b055e54981717bf --- /dev/null +++ b/arch/powerpc/include/asm/cpu_setup_power.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (C) 2020 IBM Corporation + */ +void __setup_cpu_power7(unsigned long offset, struct cpu_spec *spec); +void __restore_cpu_power7(void); +void __setup_cpu_power8(unsigned long offset, struct cpu_spec *spec); +void __restore_cpu_power8(void); +void __setup_cpu_power9(unsigned long offset, struct cpu_spec *spec); +void __restore_cpu_power9(void); +void __setup_cpu_power10(unsigned long offset, struct cpu_spec *spec); +void __restore_cpu_power10(void); diff --git a/arch/powerpc/include/asm/fixmap.h b/arch/powerpc/include/asm/fixmap.h index 591b2f4deed533df0832db93bec2441fd738ec9b..897cc68758d44aca52afb362b0e018ceb7d1ea82 100644 --- a/arch/powerpc/include/asm/fixmap.h +++ b/arch/powerpc/include/asm/fixmap.h @@ -111,8 +111,10 @@ static inline void __set_fixmap(enum fixed_addresses idx, BUILD_BUG_ON(idx >= __end_of_fixed_addresses); else if (WARN_ON(idx >= __end_of_fixed_addresses)) return; - - map_kernel_page(__fix_to_virt(idx), phys, flags); + if (pgprot_val(flags)) + map_kernel_page(__fix_to_virt(idx), phys, flags); + else + unmap_kernel_page(__fix_to_virt(idx)); } #define __early_set_fixmap __set_fixmap diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h index 3e8e19f5746c7bf18b515402801b90a13b958a39..00c8cda1c9c3179e41ae91691fced33f58b9ac70 100644 --- a/arch/powerpc/include/asm/hvcall.h +++ b/arch/powerpc/include/asm/hvcall.h @@ -382,6 +382,8 @@ #define H_CPU_BEHAV_BNDS_CHK_SPEC_BAR (1ull << 61) // IBM bit 2 #define H_CPU_BEHAV_FLUSH_COUNT_CACHE (1ull << 58) // IBM bit 5 #define H_CPU_BEHAV_FLUSH_LINK_STACK (1ull << 57) // IBM bit 6 +#define H_CPU_BEHAV_NO_L1D_FLUSH_ENTRY (1ull << 56) // IBM bit 7 +#define H_CPU_BEHAV_NO_L1D_FLUSH_UACCESS (1ull << 55) // IBM bit 8 /* Flag values used in H_REGISTER_PROC_TBL hcall */ #define PROC_TABLE_OP_MASK 0x18 diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h index 0363734ff56e0f9fc0e2c6ddb013cea24d7cbbd6..0f2acbb9667400226d98d18509c36f7e57c37b69 100644 --- a/arch/powerpc/include/asm/hw_irq.h +++ b/arch/powerpc/include/asm/hw_irq.h @@ -38,6 +38,8 @@ #define PACA_IRQ_MUST_HARD_MASK (PACA_IRQ_EE) #endif +#endif /* CONFIG_PPC64 */ + /* * flags for paca->irq_soft_mask */ @@ -46,8 +48,6 @@ #define IRQS_PMI_DISABLED 2 #define IRQS_ALL_DISABLED (IRQS_DISABLED | IRQS_PMI_DISABLED) -#endif /* CONFIG_PPC64 */ - #ifndef __ASSEMBLY__ extern void replay_system_reset(void); @@ -175,6 +175,42 @@ static inline bool arch_irqs_disabled(void) return arch_irqs_disabled_flags(arch_local_save_flags()); } +static inline void set_pmi_irq_pending(void) +{ + /* + * Invoked from PMU callback functions to set PMI bit in the paca. + * This has to be called with irq's disabled (via hard_irq_disable()). + */ + if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) + WARN_ON_ONCE(mfmsr() & MSR_EE); + + get_paca()->irq_happened |= PACA_IRQ_PMI; +} + +static inline void clear_pmi_irq_pending(void) +{ + /* + * Invoked from PMU callback functions to clear the pending PMI bit + * in the paca. + */ + if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) + WARN_ON_ONCE(mfmsr() & MSR_EE); + + get_paca()->irq_happened &= ~PACA_IRQ_PMI; +} + +static inline bool pmi_irq_pending(void) +{ + /* + * Invoked from PMU callback functions to check if there is a pending + * PMI bit in the paca. + */ + if (get_paca()->irq_happened & PACA_IRQ_PMI) + return true; + + return false; +} + #ifdef CONFIG_PPC_BOOK3S /* * To support disabling and enabling of irq with PMI, set of @@ -296,6 +332,10 @@ extern void irq_set_pending_from_srr1(unsigned long srr1); extern void force_external_irq_replay(void); +static inline void irq_soft_mask_regs_set_state(struct pt_regs *regs, unsigned long val) +{ + regs->softe = val; +} #else /* CONFIG_PPC64 */ static inline unsigned long arch_local_save_flags(void) @@ -364,6 +404,13 @@ static inline bool arch_irq_disabled_regs(struct pt_regs *regs) static inline void may_hard_irq_enable(void) { } +static inline void clear_pmi_irq_pending(void) { } +static inline void set_pmi_irq_pending(void) { } +static inline bool pmi_irq_pending(void) { return false; } + +static inline void irq_soft_mask_regs_set_state(struct pt_regs *regs, unsigned long val) +{ +} #endif /* CONFIG_PPC64 */ #define ARCH_IRQ_INIT_FLAGS IRQ_NOREQUEST diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h index 58635960403c058b6c0d29f5e7b77c015c004303..0182b291248ace8e4cb2bbbe7ce3422524de322c 100644 --- a/arch/powerpc/include/asm/io.h +++ b/arch/powerpc/include/asm/io.h @@ -344,25 +344,37 @@ static inline void __raw_writeq_be(unsigned long v, volatile void __iomem *addr) */ static inline void __raw_rm_writeb(u8 val, volatile void __iomem *paddr) { - __asm__ __volatile__("stbcix %0,0,%1" + __asm__ __volatile__(".machine push; \ + .machine power6; \ + stbcix %0,0,%1; \ + .machine pop;" : : "r" (val), "r" (paddr) : "memory"); } static inline void __raw_rm_writew(u16 val, volatile void __iomem *paddr) { - __asm__ __volatile__("sthcix %0,0,%1" + __asm__ __volatile__(".machine push; \ + .machine power6; \ + sthcix %0,0,%1; \ + .machine pop;" : : "r" (val), "r" (paddr) : "memory"); } static inline void __raw_rm_writel(u32 val, volatile void __iomem *paddr) { - __asm__ __volatile__("stwcix %0,0,%1" + __asm__ __volatile__(".machine push; \ + .machine power6; \ + stwcix %0,0,%1; \ + .machine pop;" : : "r" (val), "r" (paddr) : "memory"); } static inline void __raw_rm_writeq(u64 val, volatile void __iomem *paddr) { - __asm__ __volatile__("stdcix %0,0,%1" + __asm__ __volatile__(".machine push; \ + .machine power6; \ + stdcix %0,0,%1; \ + .machine pop;" : : "r" (val), "r" (paddr) : "memory"); } @@ -374,7 +386,10 @@ static inline void __raw_rm_writeq_be(u64 val, volatile void __iomem *paddr) static inline u8 __raw_rm_readb(volatile void __iomem *paddr) { u8 ret; - __asm__ __volatile__("lbzcix %0,0, %1" + __asm__ __volatile__(".machine push; \ + .machine power6; \ + lbzcix %0,0, %1; \ + .machine pop;" : "=r" (ret) : "r" (paddr) : "memory"); return ret; } @@ -382,7 +397,10 @@ static inline u8 __raw_rm_readb(volatile void __iomem *paddr) static inline u16 __raw_rm_readw(volatile void __iomem *paddr) { u16 ret; - __asm__ __volatile__("lhzcix %0,0, %1" + __asm__ __volatile__(".machine push; \ + .machine power6; \ + lhzcix %0,0, %1; \ + .machine pop;" : "=r" (ret) : "r" (paddr) : "memory"); return ret; } @@ -390,7 +408,10 @@ static inline u16 __raw_rm_readw(volatile void __iomem *paddr) static inline u32 __raw_rm_readl(volatile void __iomem *paddr) { u32 ret; - __asm__ __volatile__("lwzcix %0,0, %1" + __asm__ __volatile__(".machine push; \ + .machine power6; \ + lwzcix %0,0, %1; \ + .machine pop;" : "=r" (ret) : "r" (paddr) : "memory"); return ret; } @@ -398,7 +419,10 @@ static inline u32 __raw_rm_readl(volatile void __iomem *paddr) static inline u64 __raw_rm_readq(volatile void __iomem *paddr) { u64 ret; - __asm__ __volatile__("ldcix %0,0, %1" + __asm__ __volatile__(".machine push; \ + .machine power6; \ + ldcix %0,0, %1; \ + .machine pop;" : "=r" (ret) : "r" (paddr) : "memory"); return ret; } diff --git a/arch/powerpc/include/asm/livepatch.h b/arch/powerpc/include/asm/livepatch.h index fea12c6b915cf190cc5e78dfad0bc103546f0035..39dcfc3c28ce9ca594fdabaa880f01ec1b579f26 100644 --- a/arch/powerpc/include/asm/livepatch.h +++ b/arch/powerpc/include/asm/livepatch.h @@ -75,6 +75,11 @@ extern void livepatch_branch_stub_end(void); #ifdef PPC64_ELF_ABI_v1 extern void livepatch_branch_trampoline(void); extern void livepatch_branch_trampoline_end(void); +extern void livepatch_brk_trampoline(void); +void livepatch_create_btramp(struct ppc64_klp_btramp_entry *entry, unsigned long addr); +#else +static inline void livepatch_create_btramp(struct ppc64_klp_btramp_entry *entry, + unsigned long addr) {} #endif /* PPC64_ELF_ABI_v1 */ int livepatch_create_branch(unsigned long pc, @@ -89,6 +94,12 @@ struct arch_klp_data { #else unsigned long trampoline; #endif /* PPC64_ELF_ABI_v1 */ + + /* + * Saved opcode at the entry of the old func (which maybe replaced + * with breakpoint). + */ + u32 saved_opcode; }; #elif defined(CONFIG_PPC32) @@ -97,11 +108,32 @@ struct arch_klp_data { #define LJMP_INSN_SIZE 4 struct arch_klp_data { u32 old_insns[LJMP_INSN_SIZE]; + + /* + * Saved opcode at the entry of the old func (which maybe replaced + * with breakpoint). + */ + u32 saved_opcode; }; #endif /* CONFIG_PPC64 */ +struct stackframe { + unsigned long sp; + unsigned long pc; + unsigned long nip; +}; + +#ifdef PPC64_ELF_ABI_v1 +struct klp_func_node; +void arch_klp_set_brk_func(struct klp_func_node *func_node, void *new_func); +#endif +int klp_brk_handler(struct pt_regs *regs); +int arch_klp_add_breakpoint(struct arch_klp_data *arch_data, void *old_func); +void arch_klp_remove_breakpoint(struct arch_klp_data *arch_data, void *old_func); long arch_klp_save_old_code(struct arch_klp_data *arch_data, void *old_func); +int arch_klp_module_check_calltrace(void *data); +int klp_unwind_frame(struct task_struct *tsk, struct stackframe *frame); #endif /* CONFIG_LIVEPATCH_FTRACE */ diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h index 96522f7f0618a0eb9096858f2b63ff2b98bc3046..e53cc07e6b9ec9d433f4969d9100d277e7ac0c3a 100644 --- a/arch/powerpc/include/asm/nohash/32/pgtable.h +++ b/arch/powerpc/include/asm/nohash/32/pgtable.h @@ -65,6 +65,7 @@ extern int icache_44x_need_flush; #ifndef __ASSEMBLY__ int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot); +void unmap_kernel_page(unsigned long va); #endif /* !__ASSEMBLY__ */ diff --git a/arch/powerpc/include/asm/nohash/64/pgtable.h b/arch/powerpc/include/asm/nohash/64/pgtable.h index 4158c90c572a1c0e39f4303c84570f98d57d14dd..a4d475c0fc2c023ee810f1cb69f48ff4693b77d0 100644 --- a/arch/powerpc/include/asm/nohash/64/pgtable.h +++ b/arch/powerpc/include/asm/nohash/64/pgtable.h @@ -315,6 +315,7 @@ static inline void __ptep_set_access_flags(struct vm_area_struct *vma, #define __swp_entry_to_pte(x) __pte((x).val) int map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot); +void unmap_kernel_page(unsigned long va); extern int __meminit vmemmap_create_mapping(unsigned long start, unsigned long page_size, unsigned long phys); diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h index 300d4c105a3a5a88275f0fb5858679f9db899510..f2c5c26869f1a43888681bdee0633aab3cbf3784 100644 --- a/arch/powerpc/include/asm/page.h +++ b/arch/powerpc/include/asm/page.h @@ -132,9 +132,10 @@ static inline bool pfn_valid(unsigned long pfn) #define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr)) #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) -#define virt_addr_valid(vaddr) ({ \ - unsigned long _addr = (unsigned long)vaddr; \ - (unsigned long)(_addr) >= PAGE_OFFSET && pfn_valid(virt_to_pfn(_addr)); \ +#define virt_addr_valid(vaddr) ({ \ + unsigned long _addr = (unsigned long)vaddr; \ + _addr >= PAGE_OFFSET && _addr < (unsigned long)high_memory && \ + pfn_valid(virt_to_pfn(_addr)); \ }) /* diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h index a6e3700c4566adbe194f76643f2464bfdc16d2b1..f0c0816f572707a2f0467c6b78a21b869c17e539 100644 --- a/arch/powerpc/include/asm/ppc-opcode.h +++ b/arch/powerpc/include/asm/ppc-opcode.h @@ -449,6 +449,7 @@ #define PPC_RAW_LDX(r, base, b) (0x7c00002a | ___PPC_RT(r) | ___PPC_RA(base) | ___PPC_RB(b)) #define PPC_RAW_LHZ(r, base, i) (0xa0000000 | ___PPC_RT(r) | ___PPC_RA(base) | IMM_L(i)) #define PPC_RAW_LHBRX(r, base, b) (0x7c00062c | ___PPC_RT(r) | ___PPC_RA(base) | ___PPC_RB(b)) +#define PPC_RAW_LWBRX(r, base, b) (0x7c00042c | ___PPC_RT(r) | ___PPC_RA(base) | ___PPC_RB(b)) #define PPC_RAW_LDBRX(r, base, b) (0x7c000428 | ___PPC_RT(r) | ___PPC_RA(base) | ___PPC_RB(b)) #define PPC_RAW_STWCX(s, a, b) (0x7c00012d | ___PPC_RS(s) | ___PPC_RA(a) | ___PPC_RB(b)) #define PPC_RAW_CMPWI(a, i) (0x2c000000 | ___PPC_RA(a) | IMM_L(i)) diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index f4b98903064f5ea9f22953d50e85f418ba6ea8d0..6afb14b6bbc26ca80646015a51f72095cfb73bb6 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -865,6 +865,7 @@ #define MMCR0_BHRBA 0x00200000UL /* BHRB Access allowed in userspace */ #define MMCR0_EBE 0x00100000UL /* Event based branch enable */ #define MMCR0_PMCC 0x000c0000UL /* PMC control */ +#define MMCR0_PMCCEXT ASM_CONST(0x00000200) /* PMCCEXT control */ #define MMCR0_PMCC_U6 0x00080000UL /* PMC1-6 are R/W by user (PR) */ #define MMCR0_PMC1CE 0x00008000UL /* PMC1 count enable*/ #define MMCR0_PMCjCE ASM_CONST(0x00004000) /* PMCj count enable*/ diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h index f53bfefb4a5773e15d2b36d4b9ee0bb3046e0831..6b808bcdecd52d4fb143ef77a319fa42e612ed2b 100644 --- a/arch/powerpc/include/asm/uaccess.h +++ b/arch/powerpc/include/asm/uaccess.h @@ -229,8 +229,11 @@ extern long __get_user_bad(void); */ #define __get_user_atomic_128_aligned(kaddr, uaddr, err) \ __asm__ __volatile__( \ + ".machine push\n" \ + ".machine altivec\n" \ "1: lvx 0,0,%1 # get user\n" \ " stvx 0,0,%2 # put kernel\n" \ + ".machine pop\n" \ "2:\n" \ ".section .fixup,\"ax\"\n" \ "3: li %0,%3\n" \ diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index cbe9dbfaf005f049ad0b04bea8a0aea062d2af37..32c617ba6901a992677fce86cc586c45d5c2929c 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -11,6 +11,7 @@ CFLAGS_prom_init.o += -fPIC CFLAGS_btext.o += -fPIC endif +CFLAGS_early_32.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) CFLAGS_cputable.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) CFLAGS_prom_init.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) CFLAGS_btext.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) @@ -94,7 +95,7 @@ obj-$(CONFIG_44x) += cpu_setup_44x.o obj-$(CONFIG_PPC_FSL_BOOK3E) += cpu_setup_fsl_booke.o obj-$(CONFIG_PPC_DOORBELL) += dbell.o obj-$(CONFIG_JUMP_LABEL) += jump_label.o -obj-$(CONFIG_LIVEPATCH_WO_FTRACE) += livepatch_$(BITS).o +obj-$(CONFIG_LIVEPATCH_WO_FTRACE) += livepatch.o livepatch_$(BITS).o extra-$(CONFIG_PPC64) := head_64.o extra-$(CONFIG_PPC_BOOK3S_32) := head_book3s_32.o diff --git a/arch/powerpc/kernel/btext.c b/arch/powerpc/kernel/btext.c index 803c2a45b22ac2092137e1b2a715d9f51a23b434..1cffb5e7c38d6c0f1315dcb9c729842b351a68d9 100644 --- a/arch/powerpc/kernel/btext.c +++ b/arch/powerpc/kernel/btext.c @@ -241,8 +241,10 @@ int __init btext_find_display(int allow_nonstdout) rc = btext_initialize(np); printk("result: %d\n", rc); } - if (rc == 0) + if (rc == 0) { + of_node_put(np); break; + } } return rc; } diff --git a/arch/powerpc/kernel/cpu_setup_power.S b/arch/powerpc/kernel/cpu_setup_power.S deleted file mode 100644 index 704e8b9501eee84ade894358b57986268c2ac17f..0000000000000000000000000000000000000000 --- a/arch/powerpc/kernel/cpu_setup_power.S +++ /dev/null @@ -1,252 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * This file contains low level CPU setup functions. - * Copyright (C) 2003 Benjamin Herrenschmidt (benh@kernel.crashing.org) - */ - -#include -#include -#include -#include -#include -#include -#include - -/* Entry: r3 = crap, r4 = ptr to cputable entry - * - * Note that we can be called twice for pseudo-PVRs - */ -_GLOBAL(__setup_cpu_power7) - mflr r11 - bl __init_hvmode_206 - mtlr r11 - beqlr - li r0,0 - mtspr SPRN_LPID,r0 - LOAD_REG_IMMEDIATE(r0, PCR_MASK) - mtspr SPRN_PCR,r0 - mfspr r3,SPRN_LPCR - li r4,(LPCR_LPES1 >> LPCR_LPES_SH) - bl __init_LPCR_ISA206 - mtlr r11 - blr - -_GLOBAL(__restore_cpu_power7) - mflr r11 - mfmsr r3 - rldicl. r0,r3,4,63 - beqlr - li r0,0 - mtspr SPRN_LPID,r0 - LOAD_REG_IMMEDIATE(r0, PCR_MASK) - mtspr SPRN_PCR,r0 - mfspr r3,SPRN_LPCR - li r4,(LPCR_LPES1 >> LPCR_LPES_SH) - bl __init_LPCR_ISA206 - mtlr r11 - blr - -_GLOBAL(__setup_cpu_power8) - mflr r11 - bl __init_FSCR - bl __init_PMU - bl __init_PMU_ISA207 - bl __init_hvmode_206 - mtlr r11 - beqlr - li r0,0 - mtspr SPRN_LPID,r0 - LOAD_REG_IMMEDIATE(r0, PCR_MASK) - mtspr SPRN_PCR,r0 - mfspr r3,SPRN_LPCR - ori r3, r3, LPCR_PECEDH - li r4,0 /* LPES = 0 */ - bl __init_LPCR_ISA206 - bl __init_HFSCR - bl __init_PMU_HV - bl __init_PMU_HV_ISA207 - mtlr r11 - blr - -_GLOBAL(__restore_cpu_power8) - mflr r11 - bl __init_FSCR - bl __init_PMU - bl __init_PMU_ISA207 - mfmsr r3 - rldicl. r0,r3,4,63 - mtlr r11 - beqlr - li r0,0 - mtspr SPRN_LPID,r0 - LOAD_REG_IMMEDIATE(r0, PCR_MASK) - mtspr SPRN_PCR,r0 - mfspr r3,SPRN_LPCR - ori r3, r3, LPCR_PECEDH - li r4,0 /* LPES = 0 */ - bl __init_LPCR_ISA206 - bl __init_HFSCR - bl __init_PMU_HV - bl __init_PMU_HV_ISA207 - mtlr r11 - blr - -_GLOBAL(__setup_cpu_power10) - mflr r11 - bl __init_FSCR_power10 - bl __init_PMU - bl __init_PMU_ISA31 - b 1f - -_GLOBAL(__setup_cpu_power9) - mflr r11 - bl __init_FSCR_power9 - bl __init_PMU -1: bl __init_hvmode_206 - mtlr r11 - beqlr - li r0,0 - mtspr SPRN_PSSCR,r0 - mtspr SPRN_LPID,r0 - mtspr SPRN_PID,r0 - LOAD_REG_IMMEDIATE(r0, PCR_MASK) - mtspr SPRN_PCR,r0 - mfspr r3,SPRN_LPCR - LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE | LPCR_HEIC) - or r3, r3, r4 - LOAD_REG_IMMEDIATE(r4, LPCR_UPRT | LPCR_HR) - andc r3, r3, r4 - li r4,0 /* LPES = 0 */ - bl __init_LPCR_ISA300 - bl __init_HFSCR - bl __init_PMU_HV - mtlr r11 - blr - -_GLOBAL(__restore_cpu_power10) - mflr r11 - bl __init_FSCR_power10 - bl __init_PMU - bl __init_PMU_ISA31 - b 1f - -_GLOBAL(__restore_cpu_power9) - mflr r11 - bl __init_FSCR_power9 - bl __init_PMU -1: mfmsr r3 - rldicl. r0,r3,4,63 - mtlr r11 - beqlr - li r0,0 - mtspr SPRN_PSSCR,r0 - mtspr SPRN_LPID,r0 - mtspr SPRN_PID,r0 - LOAD_REG_IMMEDIATE(r0, PCR_MASK) - mtspr SPRN_PCR,r0 - mfspr r3,SPRN_LPCR - LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE | LPCR_HEIC) - or r3, r3, r4 - LOAD_REG_IMMEDIATE(r4, LPCR_UPRT | LPCR_HR) - andc r3, r3, r4 - li r4,0 /* LPES = 0 */ - bl __init_LPCR_ISA300 - bl __init_HFSCR - bl __init_PMU_HV - mtlr r11 - blr - -__init_hvmode_206: - /* Disable CPU_FTR_HVMODE and exit if MSR:HV is not set */ - mfmsr r3 - rldicl. r0,r3,4,63 - bnelr - ld r5,CPU_SPEC_FEATURES(r4) - LOAD_REG_IMMEDIATE(r6,CPU_FTR_HVMODE | CPU_FTR_P9_TM_HV_ASSIST) - andc r5,r5,r6 - std r5,CPU_SPEC_FEATURES(r4) - blr - -__init_LPCR_ISA206: - /* Setup a sane LPCR: - * Called with initial LPCR in R3 and desired LPES 2-bit value in R4 - * - * LPES = 0b01 (HSRR0/1 used for 0x500) - * PECE = 0b111 - * DPFD = 4 - * HDICE = 0 - * VC = 0b100 (VPM0=1, VPM1=0, ISL=0) - * VRMASD = 0b10000 (L=1, LP=00) - * - * Other bits untouched for now - */ - li r5,0x10 - rldimi r3,r5, LPCR_VRMASD_SH, 64-LPCR_VRMASD_SH-5 - - /* POWER9 has no VRMASD */ -__init_LPCR_ISA300: - rldimi r3,r4, LPCR_LPES_SH, 64-LPCR_LPES_SH-2 - ori r3,r3,(LPCR_PECE0|LPCR_PECE1|LPCR_PECE2) - li r5,4 - rldimi r3,r5, LPCR_DPFD_SH, 64-LPCR_DPFD_SH-3 - clrrdi r3,r3,1 /* clear HDICE */ - li r5,4 - rldimi r3,r5, LPCR_VC_SH, 0 - mtspr SPRN_LPCR,r3 - isync - blr - -__init_FSCR_power10: - mfspr r3, SPRN_FSCR - ori r3, r3, FSCR_PREFIX - mtspr SPRN_FSCR, r3 - // fall through - -__init_FSCR_power9: - mfspr r3, SPRN_FSCR - ori r3, r3, FSCR_SCV - mtspr SPRN_FSCR, r3 - // fall through - -__init_FSCR: - mfspr r3,SPRN_FSCR - ori r3,r3,FSCR_TAR|FSCR_EBB - mtspr SPRN_FSCR,r3 - blr - -__init_HFSCR: - mfspr r3,SPRN_HFSCR - ori r3,r3,HFSCR_TAR|HFSCR_TM|HFSCR_BHRB|HFSCR_PM|\ - HFSCR_DSCR|HFSCR_VECVSX|HFSCR_FP|HFSCR_EBB|HFSCR_MSGP - mtspr SPRN_HFSCR,r3 - blr - -__init_PMU_HV: - li r5,0 - mtspr SPRN_MMCRC,r5 - blr - -__init_PMU_HV_ISA207: - li r5,0 - mtspr SPRN_MMCRH,r5 - blr - -__init_PMU: - li r5,0 - mtspr SPRN_MMCRA,r5 - mtspr SPRN_MMCR0,r5 - mtspr SPRN_MMCR1,r5 - mtspr SPRN_MMCR2,r5 - blr - -__init_PMU_ISA207: - li r5,0 - mtspr SPRN_MMCRS,r5 - blr - -__init_PMU_ISA31: - li r5,0 - mtspr SPRN_MMCR3,r5 - LOAD_REG_IMMEDIATE(r5, MMCRA_BHRB_DISABLE) - mtspr SPRN_MMCRA,r5 - blr diff --git a/arch/powerpc/kernel/cpu_setup_power.c b/arch/powerpc/kernel/cpu_setup_power.c new file mode 100644 index 0000000000000000000000000000000000000000..3cca88ee96d711813cb64430b838d95ec63bb98b --- /dev/null +++ b/arch/powerpc/kernel/cpu_setup_power.c @@ -0,0 +1,272 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright 2020, Jordan Niethe, IBM Corporation. + * + * This file contains low level CPU setup functions. + * Originally written in assembly by Benjamin Herrenschmidt & various other + * authors. + */ + +#include +#include +#include +#include +#include + +/* Disable CPU_FTR_HVMODE and return false if MSR:HV is not set */ +static bool init_hvmode_206(struct cpu_spec *t) +{ + u64 msr; + + msr = mfmsr(); + if (msr & MSR_HV) + return true; + + t->cpu_features &= ~(CPU_FTR_HVMODE | CPU_FTR_P9_TM_HV_ASSIST); + return false; +} + +static void init_LPCR_ISA300(u64 lpcr, u64 lpes) +{ + /* POWER9 has no VRMASD */ + lpcr |= (lpes << LPCR_LPES_SH) & LPCR_LPES; + lpcr |= LPCR_PECE0|LPCR_PECE1|LPCR_PECE2; + lpcr |= (4ull << LPCR_DPFD_SH) & LPCR_DPFD; + lpcr &= ~LPCR_HDICE; /* clear HDICE */ + lpcr |= (4ull << LPCR_VC_SH); + mtspr(SPRN_LPCR, lpcr); + isync(); +} + +/* + * Setup a sane LPCR: + * Called with initial LPCR and desired LPES 2-bit value + * + * LPES = 0b01 (HSRR0/1 used for 0x500) + * PECE = 0b111 + * DPFD = 4 + * HDICE = 0 + * VC = 0b100 (VPM0=1, VPM1=0, ISL=0) + * VRMASD = 0b10000 (L=1, LP=00) + * + * Other bits untouched for now + */ +static void init_LPCR_ISA206(u64 lpcr, u64 lpes) +{ + lpcr |= (0x10ull << LPCR_VRMASD_SH) & LPCR_VRMASD; + init_LPCR_ISA300(lpcr, lpes); +} + +static void init_FSCR(void) +{ + u64 fscr; + + fscr = mfspr(SPRN_FSCR); + fscr |= FSCR_TAR|FSCR_EBB; + mtspr(SPRN_FSCR, fscr); +} + +static void init_FSCR_power9(void) +{ + u64 fscr; + + fscr = mfspr(SPRN_FSCR); + fscr |= FSCR_SCV; + mtspr(SPRN_FSCR, fscr); + init_FSCR(); +} + +static void init_FSCR_power10(void) +{ + u64 fscr; + + fscr = mfspr(SPRN_FSCR); + fscr |= FSCR_PREFIX; + mtspr(SPRN_FSCR, fscr); + init_FSCR_power9(); +} + +static void init_HFSCR(void) +{ + u64 hfscr; + + hfscr = mfspr(SPRN_HFSCR); + hfscr |= HFSCR_TAR|HFSCR_TM|HFSCR_BHRB|HFSCR_PM|HFSCR_DSCR|\ + HFSCR_VECVSX|HFSCR_FP|HFSCR_EBB|HFSCR_MSGP; + mtspr(SPRN_HFSCR, hfscr); +} + +static void init_PMU_HV(void) +{ + mtspr(SPRN_MMCRC, 0); +} + +static void init_PMU_HV_ISA207(void) +{ + mtspr(SPRN_MMCRH, 0); +} + +static void init_PMU(void) +{ + mtspr(SPRN_MMCRA, 0); + mtspr(SPRN_MMCR0, 0); + mtspr(SPRN_MMCR1, 0); + mtspr(SPRN_MMCR2, 0); +} + +static void init_PMU_ISA207(void) +{ + mtspr(SPRN_MMCRS, 0); +} + +static void init_PMU_ISA31(void) +{ + mtspr(SPRN_MMCR3, 0); + mtspr(SPRN_MMCRA, MMCRA_BHRB_DISABLE); + mtspr(SPRN_MMCR0, MMCR0_PMCCEXT); +} + +/* + * Note that we can be called twice of pseudo-PVRs. + * The parameter offset is not used. + */ + +void __setup_cpu_power7(unsigned long offset, struct cpu_spec *t) +{ + if (!init_hvmode_206(t)) + return; + + mtspr(SPRN_LPID, 0); + mtspr(SPRN_PCR, PCR_MASK); + init_LPCR_ISA206(mfspr(SPRN_LPCR), LPCR_LPES1 >> LPCR_LPES_SH); +} + +void __restore_cpu_power7(void) +{ + u64 msr; + + msr = mfmsr(); + if (!(msr & MSR_HV)) + return; + + mtspr(SPRN_LPID, 0); + mtspr(SPRN_PCR, PCR_MASK); + init_LPCR_ISA206(mfspr(SPRN_LPCR), LPCR_LPES1 >> LPCR_LPES_SH); +} + +void __setup_cpu_power8(unsigned long offset, struct cpu_spec *t) +{ + init_FSCR(); + init_PMU(); + init_PMU_ISA207(); + + if (!init_hvmode_206(t)) + return; + + mtspr(SPRN_LPID, 0); + mtspr(SPRN_PCR, PCR_MASK); + init_LPCR_ISA206(mfspr(SPRN_LPCR) | LPCR_PECEDH, 0); /* LPES = 0 */ + init_HFSCR(); + init_PMU_HV(); + init_PMU_HV_ISA207(); +} + +void __restore_cpu_power8(void) +{ + u64 msr; + + init_FSCR(); + init_PMU(); + init_PMU_ISA207(); + + msr = mfmsr(); + if (!(msr & MSR_HV)) + return; + + mtspr(SPRN_LPID, 0); + mtspr(SPRN_PCR, PCR_MASK); + init_LPCR_ISA206(mfspr(SPRN_LPCR) | LPCR_PECEDH, 0); /* LPES = 0 */ + init_HFSCR(); + init_PMU_HV(); + init_PMU_HV_ISA207(); +} + +void __setup_cpu_power9(unsigned long offset, struct cpu_spec *t) +{ + init_FSCR_power9(); + init_PMU(); + + if (!init_hvmode_206(t)) + return; + + mtspr(SPRN_PSSCR, 0); + mtspr(SPRN_LPID, 0); + mtspr(SPRN_PID, 0); + mtspr(SPRN_PCR, PCR_MASK); + init_LPCR_ISA300((mfspr(SPRN_LPCR) | LPCR_PECEDH | LPCR_PECE_HVEE |\ + LPCR_HVICE | LPCR_HEIC) & ~(LPCR_UPRT | LPCR_HR), 0); + init_HFSCR(); + init_PMU_HV(); +} + +void __restore_cpu_power9(void) +{ + u64 msr; + + init_FSCR_power9(); + init_PMU(); + + msr = mfmsr(); + if (!(msr & MSR_HV)) + return; + + mtspr(SPRN_PSSCR, 0); + mtspr(SPRN_LPID, 0); + mtspr(SPRN_PID, 0); + mtspr(SPRN_PCR, PCR_MASK); + init_LPCR_ISA300((mfspr(SPRN_LPCR) | LPCR_PECEDH | LPCR_PECE_HVEE |\ + LPCR_HVICE | LPCR_HEIC) & ~(LPCR_UPRT | LPCR_HR), 0); + init_HFSCR(); + init_PMU_HV(); +} + +void __setup_cpu_power10(unsigned long offset, struct cpu_spec *t) +{ + init_FSCR_power10(); + init_PMU(); + init_PMU_ISA31(); + + if (!init_hvmode_206(t)) + return; + + mtspr(SPRN_PSSCR, 0); + mtspr(SPRN_LPID, 0); + mtspr(SPRN_PID, 0); + mtspr(SPRN_PCR, PCR_MASK); + init_LPCR_ISA300((mfspr(SPRN_LPCR) | LPCR_PECEDH | LPCR_PECE_HVEE |\ + LPCR_HVICE | LPCR_HEIC) & ~(LPCR_UPRT | LPCR_HR), 0); + init_HFSCR(); + init_PMU_HV(); +} + +void __restore_cpu_power10(void) +{ + u64 msr; + + init_FSCR_power10(); + init_PMU(); + init_PMU_ISA31(); + + msr = mfmsr(); + if (!(msr & MSR_HV)) + return; + + mtspr(SPRN_PSSCR, 0); + mtspr(SPRN_LPID, 0); + mtspr(SPRN_PID, 0); + mtspr(SPRN_PCR, PCR_MASK); + init_LPCR_ISA300((mfspr(SPRN_LPCR) | LPCR_PECEDH | LPCR_PECE_HVEE |\ + LPCR_HVICE | LPCR_HEIC) & ~(LPCR_UPRT | LPCR_HR), 0); + init_HFSCR(); + init_PMU_HV(); +} diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index 29de58d4dfb767e01409629796e955b7c7a80841..8fdb40ee86d11930e4bd25220d16672ffa1f21ce 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c @@ -60,19 +60,15 @@ extern void __setup_cpu_7410(unsigned long offset, struct cpu_spec* spec); extern void __setup_cpu_745x(unsigned long offset, struct cpu_spec* spec); #endif /* CONFIG_PPC32 */ #ifdef CONFIG_PPC64 +#include extern void __setup_cpu_ppc970(unsigned long offset, struct cpu_spec* spec); extern void __setup_cpu_ppc970MP(unsigned long offset, struct cpu_spec* spec); extern void __setup_cpu_pa6t(unsigned long offset, struct cpu_spec* spec); extern void __restore_cpu_pa6t(void); extern void __restore_cpu_ppc970(void); -extern void __setup_cpu_power7(unsigned long offset, struct cpu_spec* spec); -extern void __restore_cpu_power7(void); -extern void __setup_cpu_power8(unsigned long offset, struct cpu_spec* spec); -extern void __restore_cpu_power8(void); -extern void __setup_cpu_power9(unsigned long offset, struct cpu_spec* spec); -extern void __restore_cpu_power9(void); -extern void __setup_cpu_power10(unsigned long offset, struct cpu_spec* spec); -extern void __restore_cpu_power10(void); +extern long __machine_check_early_realmode_p7(struct pt_regs *regs); +extern long __machine_check_early_realmode_p8(struct pt_regs *regs); +extern long __machine_check_early_realmode_p9(struct pt_regs *regs); #endif /* CONFIG_PPC64 */ #if defined(CONFIG_E500) extern void __setup_cpu_e5500(unsigned long offset, struct cpu_spec* spec); diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c index 1098863e17ee8d90cdd9b1363ed16b40a2748e95..9d079659b24d344698f33451935ad68b2cef75be 100644 --- a/arch/powerpc/kernel/dt_cpu_ftrs.c +++ b/arch/powerpc/kernel/dt_cpu_ftrs.c @@ -454,6 +454,7 @@ static void init_pmu_power10(void) mtspr(SPRN_MMCR3, 0); mtspr(SPRN_MMCRA, MMCRA_BHRB_DISABLE); + mtspr(SPRN_MMCR0, MMCR0_PMCCEXT); } static int __init feat_enable_pmu_power10(struct dt_cpu_feature *f) diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 71ff3a4f10a6bd8c57dc0a1428cf13bfdf1bd1dc..ad3281b092be41a3b4cfb069bc332165cb66d518 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -1068,5 +1068,41 @@ _GLOBAL(livepatch_branch_trampoline) blr _GLOBAL(livepatch_branch_trampoline_end) nop + +/* + * This function is the trampoline of livepatch brk handler. + * + * brk -> traps + * - klp_brk_handler + * - set R11 to new_func address + * - set NIP to livepatch_brk_trampoline address + * see arch/powerpc/kernel/livepatch.c + */ +_GLOBAL(livepatch_brk_trampoline) + mflr r0 + std r0, 16(r1) + std r2, 24(r1) + stdu r1, -STACK_FRAME_OVERHEAD(r1) + + /* Call NEW_FUNC */ + ld r12, 0(r11) /* load new func address to R12 */ +#ifdef PPC64_ELF_ABI_v1 + ld r2, 8(r11) /* set up new R2 */ +#endif + mtctr r12 /* load R12(new func address) to CTR */ + bctrl /* call new func */ + + /* + * Now we are returning from the patched function to the original + * caller A. We are free to use r11, r12 and we can use r2 until we + * restore it. + */ + addi r1, r1, STACK_FRAME_OVERHEAD + ld r2, 24(r1) + ld r0, 16(r1) + mtlr r0 + + /* Return to original caller of live patched function */ + blr #endif #endif /* CONFIG_LIVEPATCH_WO_FTRACE */ diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c index eddf362caedce8091bcc417bde5a99547d836019..c3bb800dc4352398dcad2186fb6c6fdb624fd1e3 100644 --- a/arch/powerpc/kernel/fadump.c +++ b/arch/powerpc/kernel/fadump.c @@ -1641,6 +1641,14 @@ int __init setup_fadump(void) else if (fw_dump.reserve_dump_area_size) fw_dump.ops->fadump_init_mem_struct(&fw_dump); + /* + * In case of panic, fadump is triggered via ppc_panic_event() + * panic notifier. Setting crash_kexec_post_notifiers to 'true' + * lets panic() function take crash friendly path before panic + * notifiers are invoked. + */ + crash_kexec_post_notifiers = true; + return 1; } subsys_initcall(setup_fadump); diff --git a/arch/powerpc/kernel/head_40x.S b/arch/powerpc/kernel/head_40x.S index a1ae00689e0f4898d17a6782a9e82afc7756edd7..aeb9bc9958749e8a67a4887bf09b3f6fae51172d 100644 --- a/arch/powerpc/kernel/head_40x.S +++ b/arch/powerpc/kernel/head_40x.S @@ -27,6 +27,7 @@ #include #include +#include #include #include #include @@ -626,7 +627,7 @@ start_here: b . /* prevent prefetch past rfi */ /* Set up the initial MMU state so we can do the first level of - * kernel initialization. This maps the first 16 MBytes of memory 1:1 + * kernel initialization. This maps the first 32 MBytes of memory 1:1 * virtual to physical and more importantly sets the cache mode. */ initial_mmu: @@ -663,6 +664,12 @@ initial_mmu: tlbwe r4,r0,TLB_DATA /* Load the data portion of the entry */ tlbwe r3,r0,TLB_TAG /* Load the tag portion of the entry */ + li r0,62 /* TLB slot 62 */ + addis r4,r4,SZ_16M@h + addis r3,r3,SZ_16M@h + tlbwe r4,r0,TLB_DATA /* Load the data portion of the entry */ + tlbwe r3,r0,TLB_TAG /* Load the tag portion of the entry */ + isync /* Establish the exception vector base diff --git a/arch/powerpc/kernel/kvm.c b/arch/powerpc/kernel/kvm.c index 617eba82531cb9916c5d34c7a5f087849a724601..d89cf802d9aa771788b550cf9359571b77c5a4a8 100644 --- a/arch/powerpc/kernel/kvm.c +++ b/arch/powerpc/kernel/kvm.c @@ -669,7 +669,7 @@ static void __init kvm_use_magic_page(void) on_each_cpu(kvm_map_magic_page, &features, 1); /* Quick self-test to see if the mapping works */ - if (!fault_in_pages_readable((const char *)KVM_MAGIC_PAGE, sizeof(u32))) { + if (fault_in_pages_readable((const char *)KVM_MAGIC_PAGE, sizeof(u32))) { kvm_patching_worked = false; return; } diff --git a/arch/powerpc/kernel/livepatch.c b/arch/powerpc/kernel/livepatch.c new file mode 100644 index 0000000000000000000000000000000000000000..d568e8c8b16bd6cb777adf12482fb527b71aa5d2 --- /dev/null +++ b/arch/powerpc/kernel/livepatch.c @@ -0,0 +1,114 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * livepatch.c - powerpc-specific Kernel Live Patching Core + * + * Copyright (C) 2022 Huawei Technologies Co., Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include + +int arch_klp_add_breakpoint(struct arch_klp_data *arch_data, void *old_func) +{ + struct ppc_inst insn = ppc_inst_read((struct ppc_inst *)old_func); + + arch_data->saved_opcode = ppc_inst_val(insn); + patch_instruction((struct ppc_inst *)old_func, ppc_inst(BREAKPOINT_INSTRUCTION)); + return 0; +} + +void arch_klp_remove_breakpoint(struct arch_klp_data *arch_data, void *old_func) +{ + patch_instruction((struct ppc_inst *)old_func, ppc_inst(arch_data->saved_opcode)); +} + +int klp_brk_handler(struct pt_regs *regs) +{ + void *brk_func = NULL; + unsigned long addr = regs->nip; + + if (user_mode(regs)) + return 0; + + brk_func = klp_get_brk_func((void *)addr); + if (!brk_func) + return 0; + +#ifdef PPC64_ELF_ABI_v1 + /* + * Only static trampoline can be used here to prevent + * resource release caused by rollback. + */ + regs->gpr[PT_R11] = (unsigned long)brk_func; + regs->nip = ppc_function_entry((void *)livepatch_brk_trampoline); +#else + regs->nip = (unsigned long)brk_func; +#endif + + return 1; +} + +int klp_unwind_frame(struct task_struct *tsk, struct stackframe *frame) +{ + unsigned long *stack; +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + int ftrace_idx = 0; +#endif + + if (!validate_sp(frame->sp, tsk, STACK_FRAME_OVERHEAD)) + return -1; + + if (frame->nip != 0) + frame->nip = 0; + + stack = (unsigned long *)frame->sp; + + /* + * When switching to the exception stack, + * we save the NIP in pt_regs + * + * See if this is an exception frame. + * We look for the "regshere" marker in the current frame. + */ + if (validate_sp(frame->sp, tsk, STACK_INT_FRAME_SIZE) + && stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) { + struct pt_regs *regs = (struct pt_regs *) + (frame->sp + STACK_FRAME_OVERHEAD); + frame->nip = regs->nip; + pr_debug("--- interrupt: task = %d/%s, trap %lx at NIP=x%lx/%pS, LR=0x%lx/%pS\n", + tsk->pid, tsk->comm, regs->trap, + regs->nip, (void *)regs->nip, + regs->link, (void *)regs->link); + } + + frame->sp = stack[0]; + frame->pc = stack[STACK_FRAME_LR_SAVE]; +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + /* + * IMHO these tests do not belong in + * arch-dependent code, they are generic. + */ + frame->pc = ftrace_graph_ret_addr(tsk, &ftrace_idx, frame->pc, stack); +#endif + + return 0; +} diff --git a/arch/powerpc/kernel/livepatch_32.c b/arch/powerpc/kernel/livepatch_32.c index 99acabd730e0d83067c315806b289a4c0a055917..8f53386e7cf8bb29edcdd20636529f2e0f18de44 100644 --- a/arch/powerpc/kernel/livepatch_32.c +++ b/arch/powerpc/kernel/livepatch_32.c @@ -62,14 +62,10 @@ struct klp_func_list { int force; }; -struct stackframe { - unsigned long sp; - unsigned long pc; -}; - struct walk_stackframe_args { int enable; struct klp_func_list *check_funcs; + struct module *mod; int ret; }; @@ -83,16 +79,6 @@ static inline unsigned long klp_size_to_check(unsigned long func_size, return size; } -static inline int klp_compare_address(unsigned long pc, unsigned long func_addr, - const char *func_name, unsigned long check_size) -{ - if (pc >= func_addr && pc < func_addr + check_size) { - pr_err("func %s is in use!\n", func_name); - return -EBUSY; - } - return 0; -} - static bool check_jump_insn(unsigned long func_addr) { unsigned long i; @@ -144,7 +130,7 @@ static int klp_check_activeness_func(struct klp_patch *patch, int enable, for (obj = patch->objs; obj->funcs; obj++) { for (func = obj->funcs; func->old_name; func++) { if (enable) { - if (func->force == KLP_ENFORCEMENT) + if (func->patched || func->force == KLP_ENFORCEMENT) continue; /* * When enable, checking the currently @@ -235,20 +221,6 @@ static int klp_check_activeness_func(struct klp_patch *patch, int enable, return 0; } -static int unwind_frame(struct task_struct *tsk, struct stackframe *frame) -{ - - unsigned long *stack; - - if (!validate_sp(frame->sp, tsk, STACK_FRAME_OVERHEAD)) - return -1; - - stack = (unsigned long *)frame->sp; - frame->sp = stack[0]; - frame->pc = stack[STACK_FRAME_LR_SAVE]; - return 0; -} - void notrace klp_walk_stackframe(struct stackframe *frame, int (*fn)(struct stackframe *, void *), struct task_struct *tsk, void *data) @@ -258,7 +230,7 @@ void notrace klp_walk_stackframe(struct stackframe *frame, if (fn(frame, data)) break; - ret = unwind_frame(tsk, frame); + ret = klp_unwind_frame(tsk, frame); if (ret < 0) break; } @@ -282,9 +254,14 @@ static int klp_check_jump_func(struct stackframe *frame, void *data) struct walk_stackframe_args *args = data; struct klp_func_list *check_funcs = args->check_funcs; - if (!check_func_list(check_funcs, &args->ret, frame->pc)) { + /* check the PC first */ + if (!check_func_list(check_funcs, &args->ret, frame->pc)) return args->ret; - } + + /* check NIP when the exception stack switching */ + if (frame->nip && !check_func_list(check_funcs, &args->ret, frame->nip)) + return args->ret; + return 0; } @@ -299,21 +276,12 @@ static void free_list(struct klp_func_list **funcs) } } -int klp_check_calltrace(struct klp_patch *patch, int enable) +static int do_check_calltrace(struct walk_stackframe_args *args, + int (*fn)(struct stackframe *, void *)) { struct task_struct *g, *t; struct stackframe frame; unsigned long *stack; - int ret = 0; - struct klp_func_list *check_funcs = NULL; - struct walk_stackframe_args args = { - .ret = 0 - }; - - ret = klp_check_activeness_func(patch, enable, &check_funcs); - if (ret) - goto out; - args.check_funcs = check_funcs; for_each_process_thread(g, t) { if (t == current) { @@ -352,21 +320,70 @@ int klp_check_calltrace(struct klp_patch *patch, int enable) frame.sp = (unsigned long)stack; frame.pc = stack[STACK_FRAME_LR_SAVE]; - if (check_funcs != NULL) { - klp_walk_stackframe(&frame, klp_check_jump_func, t, &args); - if (args.ret) { - ret = args.ret; - pr_info("PID: %d Comm: %.20s\n", t->pid, t->comm); - show_stack(t, NULL, KERN_INFO); - goto out; - } + frame.nip = 0; + klp_walk_stackframe(&frame, fn, t, args); + if (args->ret) { + pr_info("PID: %d Comm: %.20s\n", t->pid, t->comm); + show_stack(t, NULL, KERN_INFO); + return args->ret; } } + return 0; +} + +int klp_check_calltrace(struct klp_patch *patch, int enable) +{ + int ret = 0; + struct klp_func_list *check_funcs = NULL; + struct walk_stackframe_args args = { + .ret = 0 + }; + + ret = klp_check_activeness_func(patch, enable, &check_funcs); + if (ret) { + pr_err("collect active functions failed, ret=%d\n", ret); + goto out; + } + if (!check_funcs) + goto out; + + args.check_funcs = check_funcs; + ret = do_check_calltrace(&args, klp_check_jump_func); out: free_list(&check_funcs); return ret; } + +static int check_module_calltrace(struct stackframe *frame, void *data) +{ + struct walk_stackframe_args *args = data; + + /* check the PC first */ + if (within_module_core(frame->pc, args->mod)) + goto err_out; + + /* check NIP when the exception stack switching */ + if (frame->nip && within_module_core(frame->nip, args->mod)) + goto err_out; + + return 0; + +err_out: + pr_err("module %s is in use!\n", args->mod->name); + return (args->ret = -EBUSY); +} + +int arch_klp_module_check_calltrace(void *data) +{ + struct walk_stackframe_args args = { + .mod = (struct module *)data, + .ret = 0 + }; + + return do_check_calltrace(&args, check_module_calltrace); +} + #endif #ifdef CONFIG_LIVEPATCH_WO_FTRACE @@ -392,24 +409,21 @@ long arch_klp_save_old_code(struct arch_klp_data *arch_data, void *old_func) return ret; } -int arch_klp_patch_func(struct klp_func *func) +static int do_patch(unsigned long pc, unsigned long new_addr) { - struct klp_func_node *func_node; - unsigned long pc, new_addr; - long ret; + int ret; int i; u32 insns[LJMP_INSN_SIZE]; - func_node = func->func_node; - list_add_rcu(&func->stack_node, &func_node->func_stack); - pc = (unsigned long)func->old_func; - new_addr = (unsigned long)func->new_func; if (offset_in_range(pc, new_addr, SZ_32M)) { struct ppc_inst instr; create_branch(&instr, (struct ppc_inst *)pc, new_addr, 0); - if (patch_instruction((struct ppc_inst *)pc, instr)) - goto ERR_OUT; + ret = patch_instruction((struct ppc_inst *)pc, instr); + if (ret) { + pr_err("patch instruction small range failed, ret=%d\n", ret); + return -EPERM; + } } else { /* * lis r12,sym@ha @@ -425,65 +439,53 @@ int arch_klp_patch_func(struct klp_func *func) for (i = 0; i < LJMP_INSN_SIZE; i++) { ret = patch_instruction((struct ppc_inst *)(((u32 *)pc) + i), ppc_inst(insns[i])); - if (ret) - goto ERR_OUT; + if (ret) { + pr_err("patch instruction(%d) large range failed, ret=%d\n", + i, ret); + return -EPERM; + } } } - return 0; +} -ERR_OUT: - list_del_rcu(&func->stack_node); +int arch_klp_patch_func(struct klp_func *func) +{ + struct klp_func_node *func_node; + int ret; - return -EPERM; + func_node = func->func_node; + list_add_rcu(&func->stack_node, &func_node->func_stack); + ret = do_patch((unsigned long)func->old_func, (unsigned long)func->new_func); + if (ret) + list_del_rcu(&func->stack_node); + return ret; } void arch_klp_unpatch_func(struct klp_func *func) { struct klp_func_node *func_node; struct klp_func *next_func; - unsigned long pc, new_addr; - u32 insns[LJMP_INSN_SIZE]; + unsigned long pc; int i; + int ret; func_node = func->func_node; pc = (unsigned long)func_node->old_func; - if (list_is_singular(&func_node->func_stack)) { - for (i = 0; i < LJMP_INSN_SIZE; i++) - insns[i] = func_node->arch_data.old_insns[i]; - - list_del_rcu(&func->stack_node); - - for (i = 0; i < LJMP_INSN_SIZE; i++) - patch_instruction((struct ppc_inst *)(((u32 *)pc) + i), - ppc_inst(insns[i])); + list_del_rcu(&func->stack_node); + if (list_empty(&func_node->func_stack)) { + for (i = 0; i < LJMP_INSN_SIZE; i++) { + ret = patch_instruction((struct ppc_inst *)(((u32 *)pc) + i), + ppc_inst(func_node->arch_data.old_insns[i])); + if (ret) { + pr_err("restore instruction(%d) failed, ret=%d\n", i, ret); + return; + } + } } else { - list_del_rcu(&func->stack_node); next_func = list_first_or_null_rcu(&func_node->func_stack, struct klp_func, stack_node); - - new_addr = (unsigned long)next_func->new_func; - if (offset_in_range(pc, new_addr, SZ_32M)) { - struct ppc_inst instr; - - create_branch(&instr, (struct ppc_inst *)pc, new_addr, 0); - patch_instruction((struct ppc_inst *)pc, instr); - } else { - /* - * lis r12,sym@ha - * addi r12,r12,sym@l - * mtctr r12 - * bctr - */ - insns[0] = 0x3d800000 + ((new_addr + 0x8000) >> 16); - insns[1] = 0x398c0000 + (new_addr & 0xffff); - insns[2] = 0x7d8903a6; - insns[3] = 0x4e800420; - - for (i = 0; i < LJMP_INSN_SIZE; i++) - patch_instruction((struct ppc_inst *)(((u32 *)pc) + i), - ppc_inst(insns[i])); - } + do_patch(pc, (unsigned long)next_func->new_func); } } diff --git a/arch/powerpc/kernel/livepatch_64.c b/arch/powerpc/kernel/livepatch_64.c index b319675afd4c9141bab90e69eeac199e28d800a6..cbb5e02cccff69b52e2ffa37118a1f321cae73ab 100644 --- a/arch/powerpc/kernel/livepatch_64.c +++ b/arch/powerpc/kernel/livepatch_64.c @@ -67,15 +67,10 @@ struct klp_func_list { int force; }; -struct stackframe { - unsigned long sp; - unsigned long pc; - unsigned long nip; -}; - struct walk_stackframe_args { int enable; struct klp_func_list *check_funcs; + struct module *mod; int ret; }; @@ -89,16 +84,6 @@ static inline unsigned long klp_size_to_check(unsigned long func_size, return size; } -static inline int klp_compare_address(unsigned long pc, unsigned long func_addr, - const char *func_name, unsigned long check_size) -{ - if (pc >= func_addr && pc < func_addr + check_size) { - pr_err("func %s is in use!\n", func_name); - return -EBUSY; - } - return 0; -} - static bool check_jump_insn(unsigned long func_addr) { unsigned long i; @@ -153,7 +138,7 @@ static int klp_check_activeness_func(struct klp_patch *patch, int enable, /* Check func address in stack */ if (enable) { - if (func->force == KLP_ENFORCEMENT) + if (func->patched || func->force == KLP_ENFORCEMENT) continue; /* * When enable, checking the currently @@ -255,50 +240,6 @@ static int klp_check_activeness_func(struct klp_patch *patch, int enable, return 0; } -static int unwind_frame(struct task_struct *tsk, struct stackframe *frame) -{ - - unsigned long *stack; - - if (!validate_sp(frame->sp, tsk, STACK_FRAME_OVERHEAD)) - return -1; - - if (frame->nip != 0) - frame->nip = 0; - - stack = (unsigned long *)frame->sp; - - /* - * When switching to the exception stack, - * we save the NIP in pt_regs - * - * See if this is an exception frame. - * We look for the "regshere" marker in the current frame. - */ - if (validate_sp(frame->sp, tsk, STACK_INT_FRAME_SIZE) - && stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) { - struct pt_regs *regs = (struct pt_regs *) - (frame->sp + STACK_FRAME_OVERHEAD); - frame->nip = regs->nip; - pr_debug("--- interrupt: task = %d/%s, trap %lx at NIP=x%lx/%pS, LR=0x%lx/%pS\n", - tsk->pid, tsk->comm, regs->trap, - regs->nip, (void *)regs->nip, - regs->link, (void *)regs->link); - } - - frame->sp = stack[0]; - frame->pc = stack[STACK_FRAME_LR_SAVE]; -#ifdef CONFIG_FUNCTION_GRAPH_TRACE - /* - * IMHO these tests do not belong in - * arch-dependent code, they are generic. - */ - frame->pc = ftrace_graph_ret_addr(tsk, &ftrace_idx, frame->ip, stack); -#endif - - return 0; -} - static void notrace klp_walk_stackframe(struct stackframe *frame, int (*fn)(struct stackframe *, void *), struct task_struct *tsk, void *data) @@ -308,7 +249,7 @@ static void notrace klp_walk_stackframe(struct stackframe *frame, if (fn(frame, data)) break; - ret = unwind_frame(tsk, frame); + ret = klp_unwind_frame(tsk, frame); if (ret < 0) break; } @@ -332,9 +273,14 @@ static int klp_check_jump_func(struct stackframe *frame, void *data) struct walk_stackframe_args *args = data; struct klp_func_list *check_funcs = args->check_funcs; - if (!check_func_list(check_funcs, &args->ret, frame->pc)) { + /* check the PC first */ + if (!check_func_list(check_funcs, &args->ret, frame->pc)) return args->ret; - } + + /* check NIP when the exception stack switching */ + if (frame->nip && !check_func_list(check_funcs, &args->ret, frame->nip)) + return args->ret; + return 0; } @@ -349,20 +295,12 @@ static void free_list(struct klp_func_list **funcs) } } -int klp_check_calltrace(struct klp_patch *patch, int enable) +static int do_check_calltrace(struct walk_stackframe_args *args, + int (*fn)(struct stackframe *, void *)) { struct task_struct *g, *t; struct stackframe frame; unsigned long *stack; - int ret = 0; - struct klp_func_list *check_funcs = NULL; - struct walk_stackframe_args args; - - ret = klp_check_activeness_func(patch, enable, &check_funcs); - if (ret) - goto out; - args.check_funcs = check_funcs; - args.ret = 0; for_each_process_thread(g, t) { if (t == current) { @@ -404,23 +342,71 @@ int klp_check_calltrace(struct klp_patch *patch, int enable) frame.sp = (unsigned long)stack; frame.pc = stack[STACK_FRAME_LR_SAVE]; frame.nip = 0; - if (check_funcs != NULL) { - klp_walk_stackframe(&frame, klp_check_jump_func, t, &args); - if (args.ret) { - ret = args.ret; - pr_debug("%s FAILED when %s\n", __func__, - enable ? "enabling" : "disabling"); - pr_info("PID: %d Comm: %.20s\n", t->pid, t->comm); - show_stack(t, NULL, KERN_INFO); - goto out; - } + klp_walk_stackframe(&frame, fn, t, args); + if (args->ret) { + pr_debug("%s FAILED when %s\n", __func__, + args->enable ? "enabling" : "disabling"); + pr_info("PID: %d Comm: %.20s\n", t->pid, t->comm); + show_stack(t, NULL, KERN_INFO); + return args->ret; } } + return 0; +} + +int klp_check_calltrace(struct klp_patch *patch, int enable) +{ + int ret = 0; + struct klp_func_list *check_funcs = NULL; + struct walk_stackframe_args args; + + ret = klp_check_activeness_func(patch, enable, &check_funcs); + if (ret) { + pr_err("collect active functions failed, ret=%d\n", ret); + goto out; + } + if (!check_funcs) + goto out; + + args.check_funcs = check_funcs; + args.ret = 0; + args.enable = enable; + ret = do_check_calltrace(&args, klp_check_jump_func); out: free_list(&check_funcs); return ret; } + +static int check_module_calltrace(struct stackframe *frame, void *data) +{ + struct walk_stackframe_args *args = data; + + /* check the PC first */ + if (within_module_core(frame->pc, args->mod)) + goto err_out; + + /* check NIP when the exception stack switching */ + if (frame->nip && within_module_core(frame->nip, args->mod)) + goto err_out; + + return 0; + +err_out: + pr_err("module %s is in use!\n", args->mod->name); + return (args->ret = -EBUSY); +} + +int arch_klp_module_check_calltrace(void *data) +{ + struct walk_stackframe_args args = { + .mod = (struct module *)data, + .ret = 0 + }; + + return do_check_calltrace(&args, check_module_calltrace); +} + #endif #ifdef CONFIG_LIVEPATCH_WO_FTRACE @@ -439,78 +425,70 @@ long arch_klp_save_old_code(struct arch_klp_data *arch_data, void *old_func) return ret; } -int arch_klp_patch_func(struct klp_func *func) +static int do_patch(unsigned long pc, unsigned long new_addr, + struct arch_klp_data *arch_data, struct module *old_mod) { - struct klp_func_node *func_node; - unsigned long pc, new_addr; - long ret; - - func_node = func->func_node; - list_add_rcu(&func->stack_node, &func_node->func_stack); - - pc = (unsigned long)func->old_func; - new_addr = (unsigned long)func->new_func; - ret = livepatch_create_branch(pc, (unsigned long)&func_node->arch_data.trampoline, - new_addr, func->old_mod); - if (ret) - goto ERR_OUT; - flush_icache_range((unsigned long)pc, - (unsigned long)pc + LJMP_INSN_SIZE * PPC64_INSN_SIZE); + int ret; + ret = livepatch_create_branch(pc, (unsigned long)&arch_data->trampoline, + new_addr, old_mod); + if (ret) { + pr_err("create branch failed, ret=%d\n", ret); + return -EPERM; + } + flush_icache_range(pc, pc + LJMP_INSN_SIZE * PPC64_INSN_SIZE); pr_debug("[%s %d] old = 0x%lx/0x%lx/%pS, new = 0x%lx/0x%lx/%pS\n", __func__, __LINE__, pc, ppc_function_entry((void *)pc), (void *)pc, new_addr, ppc_function_entry((void *)new_addr), (void *)ppc_function_entry((void *)new_addr)); - return 0; +} -ERR_OUT: - list_del_rcu(&func->stack_node); +int arch_klp_patch_func(struct klp_func *func) +{ + struct klp_func_node *func_node; + int ret; - return -EPERM; + func_node = func->func_node; + list_add_rcu(&func->stack_node, &func_node->func_stack); + ret = do_patch((unsigned long)func->old_func, + (unsigned long)func->new_func, + &func_node->arch_data, func->old_mod); + if (ret) + list_del_rcu(&func->stack_node); + return ret; } void arch_klp_unpatch_func(struct klp_func *func) { struct klp_func_node *func_node; struct klp_func *next_func; - unsigned long pc, new_addr; - u32 insns[LJMP_INSN_SIZE]; + unsigned long pc; int i; + int ret; func_node = func->func_node; pc = (unsigned long)func_node->old_func; - if (list_is_singular(&func_node->func_stack)) { - for (i = 0; i < LJMP_INSN_SIZE; i++) - insns[i] = func_node->arch_data.old_insns[i]; - - list_del_rcu(&func->stack_node); - - for (i = 0; i < LJMP_INSN_SIZE; i++) - patch_instruction((struct ppc_inst *)((u32 *)pc + i), - ppc_inst(insns[i])); + list_del_rcu(&func->stack_node); + if (list_empty(&func_node->func_stack)) { + for (i = 0; i < LJMP_INSN_SIZE; i++) { + ret = patch_instruction((struct ppc_inst *)((u32 *)pc + i), + ppc_inst(func_node->arch_data.old_insns[i])); + if (ret) { + pr_err("restore instruction(%d) failed, ret=%d\n", i, ret); + break; + } + } pr_debug("[%s %d] restore insns at 0x%lx\n", __func__, __LINE__, pc); + flush_icache_range(pc, pc + LJMP_INSN_SIZE * PPC64_INSN_SIZE); } else { - list_del_rcu(&func->stack_node); next_func = list_first_or_null_rcu(&func_node->func_stack, struct klp_func, stack_node); - new_addr = (unsigned long)next_func->new_func; - - livepatch_create_branch(pc, (unsigned long)&func_node->arch_data.trampoline, - new_addr, func->old_mod); - - pr_debug("[%s %d] old = 0x%lx/0x%lx/%pS, new = 0x%lx/0x%lx/%pS\n", - __func__, __LINE__, - pc, ppc_function_entry((void *)pc), (void *)pc, - new_addr, ppc_function_entry((void *)new_addr), - (void *)ppc_function_entry((void *)new_addr)); - + do_patch(pc, (unsigned long)next_func->new_func, + &func_node->arch_data, func->old_mod); } - - flush_icache_range((unsigned long)pc, - (unsigned long)pc + LJMP_INSN_SIZE * PPC64_INSN_SIZE); } /* return 0 if the func can be patched */ diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c index 7a143ab7d433d3700858699b9413713acc6b2966..ef093691f6063c3b9f4c192d57d6f63de20600ec 100644 --- a/arch/powerpc/kernel/module_64.c +++ b/arch/powerpc/kernel/module_64.c @@ -835,16 +835,15 @@ static int livepatch_create_bstub(struct ppc64_klp_bstub_entry *entry, return 0; } - if (entry->magic != BRANCH_STUB_MAGIC) { - stub_start = ppc_function_entry((void *)livepatch_branch_stub); - stub_end = ppc_function_entry((void *)livepatch_branch_stub_end); - stub_size = stub_end - stub_start; - memcpy(entry->jump, (u32 *)stub_start, stub_size); - - entry->jump[0] |= PPC_HA(reladdr); - entry->jump[1] |= PPC_LO(reladdr); - entry->magic = BRANCH_STUB_MAGIC; - } + + stub_start = ppc_function_entry((void *)livepatch_branch_stub); + stub_end = ppc_function_entry((void *)livepatch_branch_stub_end); + stub_size = stub_end - stub_start; + memcpy(entry->jump, (u32 *)stub_start, stub_size); + + entry->jump[0] |= PPC_HA(reladdr); + entry->jump[1] |= PPC_LO(reladdr); + entry->magic = BRANCH_STUB_MAGIC; entry->trampoline = addr; pr_debug("Create livepatch branch stub 0x%px with reladdr 0x%lx r2 0x%lx to trampoline 0x%lx\n", @@ -854,9 +853,8 @@ static int livepatch_create_bstub(struct ppc64_klp_bstub_entry *entry, } #ifdef PPC64_ELF_ABI_v1 -static void livepatch_create_btramp(struct ppc64_klp_btramp_entry *entry, - unsigned long addr, - struct module *me) +void livepatch_create_btramp(struct ppc64_klp_btramp_entry *entry, + unsigned long addr) { unsigned long reladdr, tramp_start, tramp_end, tramp_size; @@ -894,7 +892,7 @@ int livepatch_create_branch(unsigned long pc, { #ifdef PPC64_ELF_ABI_v1 /* Create trampoline to addr(new func) */ - livepatch_create_btramp((struct ppc64_klp_btramp_entry *)trampoline, addr, me); + livepatch_create_btramp((struct ppc64_klp_btramp_entry *)trampoline, addr); #else trampoline = addr; #endif diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index 7e337c570ea6b3ea4b0f7a1b4f372835cb5e8ae5..9e71c0739f08dd32a70a56a5a7fb76a09364f665 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c @@ -2956,7 +2956,7 @@ static void __init fixup_device_tree_efika_add_phy(void) /* Check if the phy-handle property exists - bail if it does */ rv = prom_getprop(node, "phy-handle", prop, sizeof(prop)); - if (!rv) + if (rv <= 0) return; /* diff --git a/arch/powerpc/kernel/ptrace/ptrace.c b/arch/powerpc/kernel/ptrace/ptrace.c old mode 100644 new mode 100755 index f6e51be47c6e4d7150d7b148dac6c9fe786edc88..81125c8220089252b073db9a17d07fd88eef242e --- a/arch/powerpc/kernel/ptrace/ptrace.c +++ b/arch/powerpc/kernel/ptrace/ptrace.c @@ -74,10 +74,13 @@ long arch_ptrace(struct task_struct *child, long request, unsigned int fpidx = index - PT_FPR0; flush_fp_to_thread(child); - if (fpidx < (PT_FPSCR - PT_FPR0)) - memcpy(&tmp, &child->thread.TS_FPR(fpidx), - sizeof(long)); - else + if (fpidx < (PT_FPSCR - PT_FPR0)) { + if (IS_ENABLED(CONFIG_PPC32)) + // On 32-bit the index we are passed refers to 32-bit words + tmp = ((u32 *)child->thread.fp_state.fpr)[fpidx]; + else + memcpy(&tmp, &child->thread.TS_FPR(fpidx), sizeof(long)); + } else tmp = child->thread.fp_state.fpscr; } ret = put_user(tmp, datalp); @@ -107,10 +110,13 @@ long arch_ptrace(struct task_struct *child, long request, unsigned int fpidx = index - PT_FPR0; flush_fp_to_thread(child); - if (fpidx < (PT_FPSCR - PT_FPR0)) - memcpy(&child->thread.TS_FPR(fpidx), &data, - sizeof(long)); - else + if (fpidx < (PT_FPSCR - PT_FPR0)) { + if (IS_ENABLED(CONFIG_PPC32)) + // On 32-bit the index we are passed refers to 32-bit words + ((u32 *)child->thread.fp_state.fpr)[fpidx] = data; + else + memcpy(&child->thread.TS_FPR(fpidx), &data, sizeof(long)); + } else child->thread.fp_state.fpscr = data; ret = 0; } @@ -478,4 +484,7 @@ void __init pt_regs_check(void) * real registers. */ BUILD_BUG_ON(PT_DSCR < sizeof(struct user_pt_regs) / sizeof(unsigned long)); + + // ptrace_get/put_fpr() rely on PPC32 and VSX being incompatible + BUILD_BUG_ON(IS_ENABLED(CONFIG_PPC32) && IS_ENABLED(CONFIG_VSX)); } diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c index cccb32cf0e08c768d116ff4cbfbcf08d46fe561a..cf421eb7f90d47941b221ef42da885415fe42bda 100644 --- a/arch/powerpc/kernel/rtas.c +++ b/arch/powerpc/kernel/rtas.c @@ -1296,6 +1296,12 @@ int __init early_init_dt_scan_rtas(unsigned long node, entryp = of_get_flat_dt_prop(node, "linux,rtas-entry", NULL); sizep = of_get_flat_dt_prop(node, "rtas-size", NULL); +#ifdef CONFIG_PPC64 + /* need this feature to decide the crashkernel offset */ + if (of_get_flat_dt_prop(node, "ibm,hypertas-functions", NULL)) + powerpc_firmware_features |= FW_FEATURE_LPAR; +#endif + if (basep && entryp && sizep) { rtas.base = *basep; rtas.entry = *entryp; diff --git a/arch/powerpc/kernel/secvar-sysfs.c b/arch/powerpc/kernel/secvar-sysfs.c index a0a78aba2083e073a537398d9c9d39b899edad87..1ee4640a26413a606e0d505046bfb5e254152994 100644 --- a/arch/powerpc/kernel/secvar-sysfs.c +++ b/arch/powerpc/kernel/secvar-sysfs.c @@ -26,15 +26,18 @@ static ssize_t format_show(struct kobject *kobj, struct kobj_attribute *attr, const char *format; node = of_find_compatible_node(NULL, NULL, "ibm,secvar-backend"); - if (!of_device_is_available(node)) - return -ENODEV; + if (!of_device_is_available(node)) { + rc = -ENODEV; + goto out; + } rc = of_property_read_string(node, "format", &format); if (rc) - return rc; + goto out; rc = sprintf(buf, "%s\n", format); +out: of_node_put(node); return rc; diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index ab2d3446e62844422aa9c73fee2632de785defb4..b12e4437bf0a247f3269919969a432a88da37e6e 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -60,6 +60,7 @@ #include #include #include +#include #ifdef DEBUG #include @@ -594,6 +595,45 @@ void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *)) } #endif +#ifdef CONFIG_NMI_IPI +static void crash_stop_this_cpu(struct pt_regs *regs) +#else +static void crash_stop_this_cpu(void *dummy) +#endif +{ + /* + * Just busy wait here and avoid marking CPU as offline to ensure + * register data is captured appropriately. + */ + while (1) + cpu_relax(); +} + +void crash_smp_send_stop(void) +{ + static bool stopped = false; + + /* + * In case of fadump, register data for all CPUs is captured by f/w + * on ibm,os-term rtas call. Skip IPI callbacks to other CPUs before + * this rtas call to avoid tricky post processing of those CPUs' + * backtraces. + */ + if (should_fadump_crash()) + return; + + if (stopped) + return; + + stopped = true; + +#ifdef CONFIG_NMI_IPI + smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, crash_stop_this_cpu, 1000000); +#else + smp_call_function(crash_stop_this_cpu, NULL, 0); +#endif /* CONFIG_NMI_IPI */ +} + #ifdef CONFIG_NMI_IPI static void nmi_stop_this_cpu(struct pt_regs *regs) { @@ -1491,10 +1531,12 @@ void start_secondary(void *unused) BUG(); } +#ifdef CONFIG_PROFILING int setup_profiling_timer(unsigned int multiplier) { return 0; } +#endif static void fixup_topology(void) { diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index 77dffea3d5373d69b886577edd9f9d88b978ab2c..d2f6b2e30b6ae0c2db3b57d30a9071574136ef09 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -67,6 +67,9 @@ #include #include #include +#ifdef CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY +#include +#endif #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC_CORE) int (*__debugger)(struct pt_regs *regs) __read_mostly; @@ -1491,6 +1494,11 @@ void program_check_exception(struct pt_regs *regs) if (kprobe_handler(regs)) goto bail; +#ifdef CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY + if (klp_brk_handler(regs)) + goto bail; +#endif + /* trap exception */ if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP) == NOTIFY_STOP) @@ -1922,11 +1930,40 @@ void vsx_unavailable_tm(struct pt_regs *regs) } #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ -void performance_monitor_exception(struct pt_regs *regs) +static void performance_monitor_exception_nmi(struct pt_regs *regs) +{ + nmi_enter(); + + __this_cpu_inc(irq_stat.pmu_irqs); + + perf_irq(regs); + + nmi_exit(); +} + +static void performance_monitor_exception_async(struct pt_regs *regs) { + irq_enter(); + __this_cpu_inc(irq_stat.pmu_irqs); perf_irq(regs); + + irq_exit(); +} + +void performance_monitor_exception(struct pt_regs *regs) +{ + /* + * On 64-bit, if perf interrupts hit in a local_irq_disable + * (soft-masked) region, we consider them as NMIs. This is required to + * prevent hash faults on user addresses when reading callchains (and + * looks better from an irq tracing perspective). + */ + if (IS_ENABLED(CONFIG_PPC64) && unlikely(arch_irq_disabled_regs(regs))) + performance_monitor_exception_nmi(regs); + else + performance_monitor_exception_async(regs); } #ifdef CONFIG_PPC_ADV_DEBUG_REGS diff --git a/arch/powerpc/kernel/watchdog.c b/arch/powerpc/kernel/watchdog.c index af3c15a1d41eb159670be90218c12b9966afa7f1..75b2a6c4db5a5001313ddb2e0764a42b7656ee9c 100644 --- a/arch/powerpc/kernel/watchdog.c +++ b/arch/powerpc/kernel/watchdog.c @@ -132,6 +132,10 @@ static void set_cpumask_stuck(const struct cpumask *cpumask, u64 tb) { cpumask_or(&wd_smp_cpus_stuck, &wd_smp_cpus_stuck, cpumask); cpumask_andnot(&wd_smp_cpus_pending, &wd_smp_cpus_pending, cpumask); + /* + * See wd_smp_clear_cpu_pending() + */ + smp_mb(); if (cpumask_empty(&wd_smp_cpus_pending)) { wd_smp_last_reset_tb = tb; cpumask_andnot(&wd_smp_cpus_pending, @@ -217,13 +221,44 @@ static void wd_smp_clear_cpu_pending(int cpu, u64 tb) cpumask_clear_cpu(cpu, &wd_smp_cpus_stuck); wd_smp_unlock(&flags); + } else { + /* + * The last CPU to clear pending should have reset the + * watchdog so we generally should not find it empty + * here if our CPU was clear. However it could happen + * due to a rare race with another CPU taking the + * last CPU out of the mask concurrently. + * + * We can't add a warning for it. But just in case + * there is a problem with the watchdog that is causing + * the mask to not be reset, try to kick it along here. + */ + if (unlikely(cpumask_empty(&wd_smp_cpus_pending))) + goto none_pending; } return; } + cpumask_clear_cpu(cpu, &wd_smp_cpus_pending); + + /* + * Order the store to clear pending with the load(s) to check all + * words in the pending mask to check they are all empty. This orders + * with the same barrier on another CPU. This prevents two CPUs + * clearing the last 2 pending bits, but neither seeing the other's + * store when checking if the mask is empty, and missing an empty + * mask, which ends with a false positive. + */ + smp_mb(); if (cpumask_empty(&wd_smp_cpus_pending)) { unsigned long flags; +none_pending: + /* + * Double check under lock because more than one CPU could see + * a clear mask with the lockless check after clearing their + * pending bits. + */ wd_smp_lock(&flags); if (cpumask_empty(&wd_smp_cpus_pending)) { wd_smp_last_reset_tb = tb; @@ -314,8 +349,12 @@ void arch_touch_nmi_watchdog(void) { unsigned long ticks = tb_ticks_per_usec * wd_timer_period_ms * 1000; int cpu = smp_processor_id(); - u64 tb = get_tb(); + u64 tb; + if (!cpumask_test_cpu(cpu, &watchdog_cpumask)) + return; + + tb = get_tb(); if (tb - per_cpu(wd_timer_tb, cpu) >= ticks) { per_cpu(wd_timer_tb, cpu) = tb; wd_smp_clear_cpu_pending(cpu, tb); diff --git a/arch/powerpc/kexec/core.c b/arch/powerpc/kexec/core.c index 56da5eb2b923abe76df7e42dd1ea163653725915..80c79cb5010c5ac28b248723e8ef26f8b5c75b30 100644 --- a/arch/powerpc/kexec/core.c +++ b/arch/powerpc/kexec/core.c @@ -147,11 +147,18 @@ void __init reserve_crashkernel(void) if (!crashk_res.start) { #ifdef CONFIG_PPC64 /* - * On 64bit we split the RMO in half but cap it at half of - * a small SLB (128MB) since the crash kernel needs to place - * itself and some stacks to be in the first segment. + * On the LPAR platform place the crash kernel to mid of + * RMA size (512MB or more) to ensure the crash kernel + * gets enough space to place itself and some stack to be + * in the first segment. At the same time normal kernel + * also get enough space to allocate memory for essential + * system resource in the first segment. Keep the crash + * kernel starts at 128MB offset on other platforms. */ - crashk_res.start = min(0x8000000ULL, (ppc64_rma_size / 2)); + if (firmware_has_feature(FW_FEATURE_LPAR)) + crashk_res.start = ppc64_rma_size / 2; + else + crashk_res.start = min(0x8000000ULL, (ppc64_rma_size / 2)); #else crashk_res.start = KDUMP_KERNELBASE; #endif diff --git a/arch/powerpc/kexec/elf_64.c b/arch/powerpc/kexec/elf_64.c index 0492ca6003f35a5c3c75ba1ec87fe39847e3e97a..ee292bd2edf23b4030c2f8a806d10b2cfc544778 100644 --- a/arch/powerpc/kexec/elf_64.c +++ b/arch/powerpc/kexec/elf_64.c @@ -45,7 +45,7 @@ static void *elf64_load(struct kimage *image, char *kernel_buf, ret = kexec_build_elf_info(kernel_buf, kernel_len, &ehdr, &elf_info); if (ret) - goto out; + return ERR_PTR(ret); if (image->type == KEXEC_TYPE_CRASH) { /* min & max buffer values for kdump case */ @@ -114,7 +114,7 @@ static void *elf64_load(struct kimage *image, char *kernel_buf, ret = setup_new_fdt_ppc64(image, fdt, initrd_load_addr, initrd_len, cmdline); if (ret) - goto out; + goto out_free_fdt; fdt_pack(fdt); @@ -125,7 +125,7 @@ static void *elf64_load(struct kimage *image, char *kernel_buf, kbuf.mem = KEXEC_BUF_MEM_UNKNOWN; ret = kexec_add_buffer(&kbuf); if (ret) - goto out; + goto out_free_fdt; /* FDT will be freed in arch_kimage_file_post_load_cleanup */ image->arch.fdt = fdt; @@ -140,18 +140,14 @@ static void *elf64_load(struct kimage *image, char *kernel_buf, if (ret) pr_err("Error setting up the purgatory.\n"); + goto out; + +out_free_fdt: + kvfree(fdt); out: kfree(modified_cmdline); kexec_free_elf_info(&elf_info); - /* - * Once FDT buffer has been successfully passed to kexec_add_buffer(), - * the FDT buffer address is saved in image->arch.fdt. In that case, - * the memory cannot be freed here in case of any other error. - */ - if (ret && !image->arch.fdt) - kvfree(fdt); - return ret ? ERR_PTR(ret) : NULL; } diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 175967a195c4459075cdb7cc3c70841a64a7a4cc..38b7a3491aac080a26aef0120c09fc32db7176af 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -4557,8 +4557,12 @@ static int kvmppc_core_prepare_memory_region_hv(struct kvm *kvm, unsigned long npages = mem->memory_size >> PAGE_SHIFT; if (change == KVM_MR_CREATE) { - slot->arch.rmap = vzalloc(array_size(npages, - sizeof(*slot->arch.rmap))); + unsigned long size = array_size(npages, sizeof(*slot->arch.rmap)); + + if ((size >> PAGE_SHIFT) > totalram_pages()) + return -ENOMEM; + + slot->arch.rmap = vzalloc(size); if (!slot->arch.rmap) return -ENOMEM; } @@ -5748,8 +5752,11 @@ static int kvmppc_book3s_init_hv(void) if (r) return r; - if (kvmppc_radix_possible()) + if (kvmppc_radix_possible()) { r = kvmppc_radix_init(); + if (r) + return r; + } /* * POWER9 chips before version 2.02 can't have some threads in diff --git a/arch/powerpc/kvm/book3s_hv_nested.c b/arch/powerpc/kvm/book3s_hv_nested.c index a5f1ae892ba68ddd1d6dca6728d10339fe3c950c..d0b6c8c16c48a5ed7e475ce11af48af2acf124b4 100644 --- a/arch/powerpc/kvm/book3s_hv_nested.c +++ b/arch/powerpc/kvm/book3s_hv_nested.c @@ -510,7 +510,7 @@ long kvmhv_copy_tofrom_guest_nested(struct kvm_vcpu *vcpu) if (eaddr & (0xFFFUL << 52)) return H_PARAMETER; - buf = kzalloc(n, GFP_KERNEL); + buf = kzalloc(n, GFP_KERNEL | __GFP_NOWARN); if (!buf) return H_NO_MEM; diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 543db9157f3b1578ed4c3da17a4819f278c79876..ef8077a739b8813c6fcb4be11b616a560c06615f 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -1500,7 +1500,7 @@ int kvmppc_handle_vmx_load(struct kvm_vcpu *vcpu, { enum emulation_result emulated = EMULATE_DONE; - if (vcpu->arch.mmio_vsx_copy_nums > 2) + if (vcpu->arch.mmio_vmx_copy_nums > 2) return EMULATE_FAIL; while (vcpu->arch.mmio_vmx_copy_nums) { @@ -1597,7 +1597,7 @@ int kvmppc_handle_vmx_store(struct kvm_vcpu *vcpu, unsigned int index = rs & KVM_MMIO_REG_MASK; enum emulation_result emulated = EMULATE_DONE; - if (vcpu->arch.mmio_vsx_copy_nums > 2) + if (vcpu->arch.mmio_vmx_copy_nums > 2) return EMULATE_FAIL; vcpu->arch.io_gpr = rs; diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile index 58991233381ed6d0d1f730b3c6836e53e006d9a7..0697a0e014ae86651bf9090c96c87d91be660e52 100644 --- a/arch/powerpc/lib/Makefile +++ b/arch/powerpc/lib/Makefile @@ -19,6 +19,9 @@ CFLAGS_code-patching.o += -DDISABLE_BRANCH_PROFILING CFLAGS_feature-fixups.o += -DDISABLE_BRANCH_PROFILING endif +CFLAGS_code-patching.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) +CFLAGS_feature-fixups.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) + obj-y += alloc.o code-patching.o feature-fixups.o pmem.o inst.o test_code-patching.o ifndef CONFIG_KASAN diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c index a2e067f68dee867762fb4657d4e8aaf5ec68e62c..2d19655328f12c7d7b9c3141b7251b71874c3444 100644 --- a/arch/powerpc/lib/sstep.c +++ b/arch/powerpc/lib/sstep.c @@ -108,9 +108,9 @@ static nokprobe_inline long address_ok(struct pt_regs *regs, { if (!user_mode(regs)) return 1; - if (__access_ok(ea, nb)) + if (access_ok((void __user *)ea, nb)) return 1; - if (__access_ok(ea, 1)) + if (access_ok((void __user *)ea, 1)) /* Access overlaps the end of the user region */ regs->dar = TASK_SIZE_MAX - 1; else @@ -949,7 +949,10 @@ NOKPROBE_SYMBOL(emulate_dcbz); #define __put_user_asmx(x, addr, err, op, cr) \ __asm__ __volatile__( \ + ".machine push\n" \ + ".machine power8\n" \ "1: " op " %2,0,%3\n" \ + ".machine pop\n" \ " mfcr %1\n" \ "2:\n" \ ".section .fixup,\"ax\"\n" \ @@ -962,7 +965,10 @@ NOKPROBE_SYMBOL(emulate_dcbz); #define __get_user_asmx(x, addr, err, op) \ __asm__ __volatile__( \ + ".machine push\n" \ + ".machine power8\n" \ "1: "op" %1,0,%2\n" \ + ".machine pop\n" \ "2:\n" \ ".section .fixup,\"ax\"\n" \ "3: li %0,%3\n" \ @@ -3062,12 +3068,14 @@ void emulate_update_regs(struct pt_regs *regs, struct instruction_op *op) case BARRIER_EIEIO: eieio(); break; +#ifdef CONFIG_PPC64 case BARRIER_LWSYNC: asm volatile("lwsync" : : : "memory"); break; case BARRIER_PTESYNC: asm volatile("ptesync" : : : "memory"); break; +#endif } break; @@ -3185,7 +3193,7 @@ int emulate_loadstore(struct pt_regs *regs, struct instruction_op *op) __put_user_asmx(op->val, ea, err, "stbcx.", cr); break; case 2: - __put_user_asmx(op->val, ea, err, "stbcx.", cr); + __put_user_asmx(op->val, ea, err, "sthcx.", cr); break; #endif case 4: diff --git a/arch/powerpc/mm/book3s32/mmu.c b/arch/powerpc/mm/book3s32/mmu.c index f23e4295214d62cd331591320caf1fd87e87294e..39af73bea44c652c6ef4091ef4cd0567b2c4bb04 100644 --- a/arch/powerpc/mm/book3s32/mmu.c +++ b/arch/powerpc/mm/book3s32/mmu.c @@ -72,7 +72,7 @@ unsigned long p_block_mapped(phys_addr_t pa) return 0; } -static int find_free_bat(void) +int __init find_free_bat(void) { int b; int n = mmu_has_feature(MMU_FTR_USE_HIGH_BATS) ? 8 : 4; @@ -96,7 +96,7 @@ static int find_free_bat(void) * - block size has to be a power of two. This is calculated by finding the * highest bit set to 1. */ -static unsigned int block_size(unsigned long base, unsigned long top) +unsigned int bat_block_size(unsigned long base, unsigned long top) { unsigned int max_size = SZ_256M; unsigned int base_shift = (ffs(base) - 1) & 31; @@ -141,7 +141,7 @@ static unsigned long __init __mmu_mapin_ram(unsigned long base, unsigned long to int idx; while ((idx = find_free_bat()) != -1 && base != top) { - unsigned int size = block_size(base, top); + unsigned int size = bat_block_size(base, top); if (size < 128 << 10) break; @@ -199,18 +199,17 @@ void mmu_mark_initmem_nx(void) int nb = mmu_has_feature(MMU_FTR_USE_HIGH_BATS) ? 8 : 4; int i; unsigned long base = (unsigned long)_stext - PAGE_OFFSET; - unsigned long top = (unsigned long)_etext - PAGE_OFFSET; + unsigned long top = ALIGN((unsigned long)_etext - PAGE_OFFSET, SZ_128K); unsigned long border = (unsigned long)__init_begin - PAGE_OFFSET; unsigned long size; - for (i = 0; i < nb - 1 && base < top && top - base > (128 << 10);) { - size = block_size(base, top); + for (i = 0; i < nb - 1 && base < top;) { + size = bat_block_size(base, top); setibat(i++, PAGE_OFFSET + base, base, size, PAGE_KERNEL_TEXT); base += size; } if (base < top) { - size = block_size(base, top); - size = max(size, 128UL << 10); + size = bat_block_size(base, top); if ((top - base) > size) { size <<= 1; if (strict_kernel_rwx_enabled() && base + size > border) diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c index 9c4fddf40ec55700fc75f378024f5c1b7a3adbce..10fd67f79e0b801676239ce6b04bb1c964d9cc61 100644 --- a/arch/powerpc/mm/book3s64/radix_pgtable.c +++ b/arch/powerpc/mm/book3s64/radix_pgtable.c @@ -1136,7 +1136,7 @@ int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot) int pud_clear_huge(pud_t *pud) { - if (pud_huge(*pud)) { + if (pud_is_leaf(*pud)) { pud_clear(pud); return 1; } @@ -1183,7 +1183,7 @@ int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot) int pmd_clear_huge(pmd_t *pmd) { - if (pmd_huge(*pmd)) { + if (pmd_is_leaf(*pmd)) { pmd_clear(pmd); return 1; } diff --git a/arch/powerpc/mm/kasan/book3s_32.c b/arch/powerpc/mm/kasan/book3s_32.c index 202bd260a009567b242b7b0177d4f47bfc53f364..450a67ef0bbe1eb4fc4f897e897f1ff24727f44c 100644 --- a/arch/powerpc/mm/kasan/book3s_32.c +++ b/arch/powerpc/mm/kasan/book3s_32.c @@ -10,47 +10,51 @@ int __init kasan_init_region(void *start, size_t size) { unsigned long k_start = (unsigned long)kasan_mem_to_shadow(start); unsigned long k_end = (unsigned long)kasan_mem_to_shadow(start + size); - unsigned long k_cur = k_start; - int k_size = k_end - k_start; - int k_size_base = 1 << (ffs(k_size) - 1); + unsigned long k_nobat = k_start; + unsigned long k_cur; + phys_addr_t phys; int ret; - void *block; - block = memblock_alloc(k_size, k_size_base); - - if (block && k_size_base >= SZ_128K && k_start == ALIGN(k_start, k_size_base)) { - int k_size_more = 1 << (ffs(k_size - k_size_base) - 1); - - setbat(-1, k_start, __pa(block), k_size_base, PAGE_KERNEL); - if (k_size_more >= SZ_128K) - setbat(-1, k_start + k_size_base, __pa(block) + k_size_base, - k_size_more, PAGE_KERNEL); - if (v_block_mapped(k_start)) - k_cur = k_start + k_size_base; - if (v_block_mapped(k_start + k_size_base)) - k_cur = k_start + k_size_base + k_size_more; - - update_bats(); + while (k_nobat < k_end) { + unsigned int k_size = bat_block_size(k_nobat, k_end); + int idx = find_free_bat(); + + if (idx == -1) + break; + if (k_size < SZ_128K) + break; + phys = memblock_phys_alloc_range(k_size, k_size, 0, + MEMBLOCK_ALLOC_ANYWHERE); + if (!phys) + break; + + setbat(idx, k_nobat, phys, k_size, PAGE_KERNEL); + k_nobat += k_size; } + if (k_nobat != k_start) + update_bats(); - if (!block) - block = memblock_alloc(k_size, PAGE_SIZE); - if (!block) - return -ENOMEM; + if (k_nobat < k_end) { + phys = memblock_phys_alloc_range(k_end - k_nobat, PAGE_SIZE, 0, + MEMBLOCK_ALLOC_ANYWHERE); + if (!phys) + return -ENOMEM; + } ret = kasan_init_shadow_page_tables(k_start, k_end); if (ret) return ret; - kasan_update_early_region(k_start, k_cur, __pte(0)); + kasan_update_early_region(k_start, k_nobat, __pte(0)); - for (; k_cur < k_end; k_cur += PAGE_SIZE) { + for (k_cur = k_nobat; k_cur < k_end; k_cur += PAGE_SIZE) { pmd_t *pmd = pmd_off_k(k_cur); - void *va = block + k_cur - k_start; - pte_t pte = pfn_pte(PHYS_PFN(__pa(va)), PAGE_KERNEL); + pte_t pte = pfn_pte(PHYS_PFN(phys + k_cur - k_nobat), PAGE_KERNEL); __set_pte_at(&init_mm, k_cur, pte_offset_kernel(pmd, k_cur), pte, 0); } flush_tlb_kernel_range(k_start, k_end); + memset(kasan_mem_to_shadow(start), 0, k_end - k_start); + return 0; } diff --git a/arch/powerpc/mm/kasan/kasan_init_32.c b/arch/powerpc/mm/kasan/kasan_init_32.c index cf8770b1a692ec4a2349fc0645ca1375252df7bb..f3e4d069e0ba7b822cee880ecf4e016addf4b2b0 100644 --- a/arch/powerpc/mm/kasan/kasan_init_32.c +++ b/arch/powerpc/mm/kasan/kasan_init_32.c @@ -83,13 +83,12 @@ void __init kasan_update_early_region(unsigned long k_start, unsigned long k_end, pte_t pte) { unsigned long k_cur; - phys_addr_t pa = __pa(kasan_early_shadow_page); for (k_cur = k_start; k_cur != k_end; k_cur += PAGE_SIZE) { pmd_t *pmd = pmd_off_k(k_cur); pte_t *ptep = pte_offset_kernel(pmd, k_cur); - if ((pte_val(*ptep) & PTE_RPN_MASK) != pa) + if (pte_page(*ptep) != virt_to_page(lm_alias(kasan_early_shadow_page))) continue; __set_pte_at(&init_mm, k_cur, ptep, pte, 0); diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index 23bfe00811aea0427a0154b30d3c2f3297c3c6b3..eb882413dd17ecec70514bc83e3f09bc241d16c4 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -739,7 +739,9 @@ static int __init parse_numa_properties(void) of_node_put(cpu); } - node_set_online(nid); + /* node_set_online() is an UB if 'nid' is negative */ + if (likely(nid >= 0)) + node_set_online(nid); } get_n_mem_cells(&n_mem_addr_cells, &n_mem_size_cells); diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c index 15555c95cebc70ea708e41753148f73142af0367..faaf33e204de1064af5843c03058b05428e40271 100644 --- a/arch/powerpc/mm/pgtable.c +++ b/arch/powerpc/mm/pgtable.c @@ -194,6 +194,15 @@ void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, __set_pte_at(mm, addr, ptep, pte, 0); } +void unmap_kernel_page(unsigned long va) +{ + pmd_t *pmdp = pmd_off_k(va); + pte_t *ptep = pte_offset_kernel(pmdp, va); + + pte_clear(&init_mm, va, ptep); + flush_tlb_kernel_range(va, va + PAGE_SIZE); +} + /* * This is called when relaxing access to a PTE. It's also called in the page * fault path when we don't hit any of the major fault cases, ie, a minor diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c index 78c8cf01db5f902bc2727f65ac6aaa0c92f8d9f6..175aabf101e8787efdf6c23e8ab918e9b30cc765 100644 --- a/arch/powerpc/mm/pgtable_64.c +++ b/arch/powerpc/mm/pgtable_64.c @@ -102,7 +102,8 @@ EXPORT_SYMBOL(__pte_frag_size_shift); struct page *p4d_page(p4d_t p4d) { if (p4d_is_leaf(p4d)) { - VM_WARN_ON(!p4d_huge(p4d)); + if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMAP)) + VM_WARN_ON(!p4d_huge(p4d)); return pte_page(p4d_pte(p4d)); } return virt_to_page(p4d_pgtable(p4d)); @@ -112,7 +113,8 @@ struct page *p4d_page(p4d_t p4d) struct page *pud_page(pud_t pud) { if (pud_is_leaf(pud)) { - VM_WARN_ON(!pud_huge(pud)); + if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMAP)) + VM_WARN_ON(!pud_huge(pud)); return pte_page(pud_pte(pud)); } return virt_to_page(pud_pgtable(pud)); @@ -125,7 +127,13 @@ struct page *pud_page(pud_t pud) struct page *pmd_page(pmd_t pmd) { if (pmd_is_leaf(pmd)) { - VM_WARN_ON(!(pmd_large(pmd) || pmd_huge(pmd))); + /* + * vmalloc_to_page may be called on any vmap address (not only + * vmalloc), and it uses pmd_page() etc., when huge vmap is + * enabled so these checks can't be used. + */ + if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMAP)) + VM_WARN_ON(!(pmd_large(pmd) || pmd_huge(pmd))); return pte_page(pmd_pte(pmd)); } return virt_to_page(pmd_page_vaddr(pmd)); diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c index 8936090acb57960a13e19c3048b59aae5e32092d..0d47514e8870db6919a615a72a3b15c82ef52c75 100644 --- a/arch/powerpc/net/bpf_jit_comp64.c +++ b/arch/powerpc/net/bpf_jit_comp64.c @@ -651,17 +651,21 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, EMIT(PPC_RAW_MR(dst_reg, b2p[TMP_REG_1])); break; case 64: - /* - * Way easier and faster(?) to store the value - * into stack and then use ldbrx - * - * ctx->seen will be reliable in pass2, but - * the instructions generated will remain the - * same across all passes - */ + /* Store the value to stack and then use byte-reverse loads */ PPC_BPF_STL(dst_reg, 1, bpf_jit_stack_local(ctx)); EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], 1, bpf_jit_stack_local(ctx))); - EMIT(PPC_RAW_LDBRX(dst_reg, 0, b2p[TMP_REG_1])); + if (cpu_has_feature(CPU_FTR_ARCH_206)) { + EMIT(PPC_RAW_LDBRX(dst_reg, 0, b2p[TMP_REG_1])); + } else { + EMIT(PPC_RAW_LWBRX(dst_reg, 0, b2p[TMP_REG_1])); + if (IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN)) + EMIT(PPC_RAW_SLDI(dst_reg, dst_reg, 32)); + EMIT(PPC_RAW_LI(b2p[TMP_REG_2], 4)); + EMIT(PPC_RAW_LWBRX(b2p[TMP_REG_2], b2p[TMP_REG_2], b2p[TMP_REG_1])); + if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) + EMIT(PPC_RAW_SLDI(b2p[TMP_REG_2], b2p[TMP_REG_2], 32)); + EMIT(PPC_RAW_OR(dst_reg, dst_reg, b2p[TMP_REG_2])); + } break; } break; diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index 91452313489f1ae7f5e990aedae09a5b24788f43..e49aa8fc6a491a972829788d91583e4fbbbc0eb5 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c @@ -95,6 +95,7 @@ static unsigned int freeze_events_kernel = MMCR0_FCS; #define SPRN_SIER3 0 #define MMCRA_SAMPLE_ENABLE 0 #define MMCRA_BHRB_DISABLE 0 +#define MMCR0_PMCCEXT 0 static inline unsigned long perf_ip_adjust(struct pt_regs *regs) { @@ -109,10 +110,6 @@ static inline void perf_read_regs(struct pt_regs *regs) { regs->result = 0; } -static inline int perf_intr_is_nmi(struct pt_regs *regs) -{ - return 0; -} static inline int siar_valid(struct pt_regs *regs) { @@ -331,15 +328,6 @@ static inline void perf_read_regs(struct pt_regs *regs) regs->result = use_siar; } -/* - * If interrupts were soft-disabled when a PMU interrupt occurs, treat - * it as an NMI. - */ -static inline int perf_intr_is_nmi(struct pt_regs *regs) -{ - return (regs->softe & IRQS_DISABLED); -} - /* * On processors like P7+ that have the SIAR-Valid bit, marked instructions * must be sampled only if the SIAR-valid bit is set. @@ -817,6 +805,19 @@ static void write_pmc(int idx, unsigned long val) } } +static int any_pmc_overflown(struct cpu_hw_events *cpuhw) +{ + int i, idx; + + for (i = 0; i < cpuhw->n_events; i++) { + idx = cpuhw->event[i]->hw.idx; + if ((idx) && ((int)read_pmc(idx) < 0)) + return idx; + } + + return 0; +} + /* Called from sysrq_handle_showregs() */ void perf_event_print_debug(void) { @@ -1240,11 +1241,16 @@ static void power_pmu_disable(struct pmu *pmu) /* * Set the 'freeze counters' bit, clear EBE/BHRBA/PMCC/PMAO/FC56 + * Also clear PMXE to disable PMI's getting triggered in some + * corner cases during PMU disable. */ val = mmcr0 = mfspr(SPRN_MMCR0); val |= MMCR0_FC; val &= ~(MMCR0_EBE | MMCR0_BHRBA | MMCR0_PMCC | MMCR0_PMAO | - MMCR0_FC56); + MMCR0_PMXE | MMCR0_FC56); + /* Set mmcr0 PMCCEXT for p10 */ + if (ppmu->flags & PPMU_ARCH_31) + val |= MMCR0_PMCCEXT; /* * The barrier is to make sure the mtspr has been @@ -1255,6 +1261,34 @@ static void power_pmu_disable(struct pmu *pmu) mb(); isync(); + /* + * Some corner cases could clear the PMU counter overflow + * while a masked PMI is pending. One such case is when + * a PMI happens during interrupt replay and perf counter + * values are cleared by PMU callbacks before replay. + * + * If any PMC corresponding to the active PMU events are + * overflown, disable the interrupt by clearing the paca + * bit for PMI since we are disabling the PMU now. + * Otherwise provide a warning if there is PMI pending, but + * no counter is found overflown. + */ + if (any_pmc_overflown(cpuhw)) { + /* + * Since power_pmu_disable runs under local_irq_save, it + * could happen that code hits a PMC overflow without PMI + * pending in paca. Hence only clear PMI pending if it was + * set. + * + * If a PMI is pending, then MSR[EE] must be disabled (because + * the masked PMI handler disabling EE). So it is safe to + * call clear_pmi_irq_pending(). + */ + if (pmi_irq_pending()) + clear_pmi_irq_pending(); + } else + WARN_ON(pmi_irq_pending()); + val = mmcra = cpuhw->mmcr.mmcra; /* @@ -1346,6 +1380,15 @@ static void power_pmu_enable(struct pmu *pmu) * (possibly updated for removal of events). */ if (!cpuhw->n_added) { + /* + * If there is any active event with an overflown PMC + * value, set back PACA_IRQ_PMI which would have been + * cleared in power_pmu_disable(). + */ + hard_irq_disable(); + if (any_pmc_overflown(cpuhw)) + set_pmi_irq_pending(); + mtspr(SPRN_MMCRA, cpuhw->mmcr.mmcra & ~MMCRA_SAMPLE_ENABLE); mtspr(SPRN_MMCR1, cpuhw->mmcr.mmcr1); if (ppmu->flags & PPMU_ARCH_31) @@ -2250,7 +2293,6 @@ static void __perf_event_interrupt(struct pt_regs *regs) struct perf_event *event; unsigned long val[8]; int found, active; - int nmi; if (cpuhw->n_limited) freeze_limited_counters(cpuhw, mfspr(SPRN_PMC5), @@ -2258,18 +2300,6 @@ static void __perf_event_interrupt(struct pt_regs *regs) perf_read_regs(regs); - /* - * If perf interrupts hit in a local_irq_disable (soft-masked) region, - * we consider them as NMIs. This is required to prevent hash faults on - * user addresses when reading callchains. See the NMI test in - * do_hash_page. - */ - nmi = perf_intr_is_nmi(regs); - if (nmi) - nmi_enter(); - else - irq_enter(); - /* Read all the PMCs since we'll need them a bunch of times */ for (i = 0; i < ppmu->n_counter; ++i) val[i] = read_pmc(i + 1); @@ -2296,6 +2326,14 @@ static void __perf_event_interrupt(struct pt_regs *regs) break; } } + + /* + * Clear PACA_IRQ_PMI in case it was set by + * set_pmi_irq_pending() when PMU was enabled + * after accounting for interrupts. + */ + clear_pmi_irq_pending(); + if (!active) /* reset non active counters that have overflowed */ write_pmc(i + 1, 0); @@ -2315,8 +2353,15 @@ static void __perf_event_interrupt(struct pt_regs *regs) } } } - if (!found && !nmi && printk_ratelimit()) - printk(KERN_WARNING "Can't find PMC that caused IRQ\n"); + + /* + * During system wide profling or while specific CPU is monitored for an + * event, some corner cases could cause PMC to overflow in idle path. This + * will trigger a PMI after waking up from idle. Since counter values are _not_ + * saved/restored in idle path, can lead to below "Can't find PMC" message. + */ + if (unlikely(!found) && !arch_irq_disabled_regs(regs)) + printk_ratelimited(KERN_WARNING "Can't find PMC that caused IRQ\n"); /* * Reset MMCR0 to its normal value. This will set PMXE and @@ -2326,11 +2371,6 @@ static void __perf_event_interrupt(struct pt_regs *regs) * we get back out of this interrupt. */ write_mmcr0(cpuhw, cpuhw->mmcr.mmcr0); - - if (nmi) - nmi_exit(); - else - irq_exit(); } static void perf_event_interrupt(struct pt_regs *regs) diff --git a/arch/powerpc/perf/core-fsl-emb.c b/arch/powerpc/perf/core-fsl-emb.c index e0e7e276bfd25aeecf709ee49399ec87692bcb16..ee721f420a7bac495c3363b74bf78a8c6e0b044e 100644 --- a/arch/powerpc/perf/core-fsl-emb.c +++ b/arch/powerpc/perf/core-fsl-emb.c @@ -31,19 +31,6 @@ static atomic_t num_events; /* Used to avoid races in calling reserve/release_pmc_hardware */ static DEFINE_MUTEX(pmc_reserve_mutex); -/* - * If interrupts were soft-disabled when a PMU interrupt occurs, treat - * it as an NMI. - */ -static inline int perf_intr_is_nmi(struct pt_regs *regs) -{ -#ifdef __powerpc64__ - return (regs->softe & IRQS_DISABLED); -#else - return 0; -#endif -} - static void perf_event_interrupt(struct pt_regs *regs); /* @@ -659,13 +646,6 @@ static void perf_event_interrupt(struct pt_regs *regs) struct perf_event *event; unsigned long val; int found = 0; - int nmi; - - nmi = perf_intr_is_nmi(regs); - if (nmi) - nmi_enter(); - else - irq_enter(); for (i = 0; i < ppmu->n_counter; ++i) { event = cpuhw->event[i]; @@ -690,11 +670,6 @@ static void perf_event_interrupt(struct pt_regs *regs) mtmsr(mfmsr() | MSR_PMM); mtpmr(PMRN_PMGC0, PMGC0_PMIE | PMGC0_FCECE); isync(); - - if (nmi) - nmi_exit(); - else - irq_exit(); } void hw_perf_event_setup(int cpu) diff --git a/arch/powerpc/perf/imc-pmu.c b/arch/powerpc/perf/imc-pmu.c index 7b25548ec42b0db3ae872adce803de2a75ef9e03..e8074d7f2401b7eef06c27dd1124bc092b0274eb 100644 --- a/arch/powerpc/perf/imc-pmu.c +++ b/arch/powerpc/perf/imc-pmu.c @@ -1457,7 +1457,11 @@ static int trace_imc_event_init(struct perf_event *event) event->hw.idx = -1; - event->pmu->task_ctx_nr = perf_hw_context; + /* + * There can only be a single PMU for perf_hw_context events which is assigned to + * core PMU. Hence use "perf_sw_context" for trace_imc. + */ + event->pmu->task_ctx_nr = perf_sw_context; event->destroy = reset_global_refc; return 0; } diff --git a/arch/powerpc/perf/isa207-common.c b/arch/powerpc/perf/isa207-common.c index 5e8eedda45d39fdd48da8ee9ce3b5970348e1f77..58448f0e472132e9fa24163e10b0162120ae83b3 100644 --- a/arch/powerpc/perf/isa207-common.c +++ b/arch/powerpc/perf/isa207-common.c @@ -561,6 +561,14 @@ int isa207_compute_mmcr(u64 event[], int n_ev, if (!(pmc_inuse & 0x60)) mmcr->mmcr0 |= MMCR0_FC56; + /* + * Set mmcr0 (PMCCEXT) for p10 which + * will restrict access to group B registers + * when MMCR0 PMCC=0b00. + */ + if (cpu_has_feature(CPU_FTR_ARCH_31)) + mmcr->mmcr0 |= MMCR0_PMCCEXT; + mmcr->mmcr1 = mmcr1; mmcr->mmcra = mmcra; mmcr->mmcr2 = mmcr2; diff --git a/arch/powerpc/platforms/8xx/pic.c b/arch/powerpc/platforms/8xx/pic.c index f2ba837249d694ab47a843ecff366191b64ea4cb..04a6abf14c2958ca5f3f98a3bba203152be5104c 100644 --- a/arch/powerpc/platforms/8xx/pic.c +++ b/arch/powerpc/platforms/8xx/pic.c @@ -153,6 +153,7 @@ int __init mpc8xx_pic_init(void) if (mpc8xx_pic_host == NULL) { printk(KERN_ERR "MPC8xx PIC: failed to allocate irq host!\n"); ret = -ENOMEM; + goto out; } ret = 0; diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c index 2124831cf57c081e7625be24fc25ea9c0e0ba286..d04079b34d7c2881e9229807111629f28a03fd18 100644 --- a/arch/powerpc/platforms/cell/iommu.c +++ b/arch/powerpc/platforms/cell/iommu.c @@ -976,6 +976,7 @@ static int __init cell_iommu_fixed_mapping_init(void) if (hbase < dbase || (hend > (dbase + dsize))) { pr_debug("iommu: hash window doesn't fit in" "real DMA window\n"); + of_node_put(np); return -1; } } diff --git a/arch/powerpc/platforms/cell/pervasive.c b/arch/powerpc/platforms/cell/pervasive.c index 9068edef71f7832c375be25cfb0754e92553b146..59999902e4a6aff631b1ba7efab56efb6080ef23 100644 --- a/arch/powerpc/platforms/cell/pervasive.c +++ b/arch/powerpc/platforms/cell/pervasive.c @@ -77,6 +77,7 @@ static int cbe_system_reset_exception(struct pt_regs *regs) switch (regs->msr & SRR1_WAKEMASK) { case SRR1_WAKEDEC: set_dec(1); + break; case SRR1_WAKEEE: /* * Handle these when interrupts get re-enabled and we take diff --git a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c index a1b7f79a8a152eae5e9ae783f9c3d69654719012..de10c13de15c6655e4ca39e433874c789d5704fa 100644 --- a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c +++ b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c @@ -215,6 +215,7 @@ void hlwd_pic_probe(void) irq_set_chained_handler(cascade_virq, hlwd_pic_irq_cascade); hlwd_irq_host = host; + of_node_put(np); break; } } diff --git a/arch/powerpc/platforms/powermac/low_i2c.c b/arch/powerpc/platforms/powermac/low_i2c.c index f77a59b5c2e1a9dcc7a810fc3e112ef03e7b8b32..df89d916236d965595702eef3a5f7a813deae485 100644 --- a/arch/powerpc/platforms/powermac/low_i2c.c +++ b/arch/powerpc/platforms/powermac/low_i2c.c @@ -582,6 +582,7 @@ static void __init kw_i2c_add(struct pmac_i2c_host_kw *host, bus->close = kw_i2c_close; bus->xfer = kw_i2c_xfer; mutex_init(&bus->mutex); + lockdep_register_key(&bus->lock_key); lockdep_set_class(&bus->mutex, &bus->lock_key); if (controller == busnode) bus->flags = pmac_i2c_multibus; @@ -810,6 +811,7 @@ static void __init pmu_i2c_probe(void) bus->hostdata = bus + 1; bus->xfer = pmu_i2c_xfer; mutex_init(&bus->mutex); + lockdep_register_key(&bus->lock_key); lockdep_set_class(&bus->mutex, &bus->lock_key); bus->flags = pmac_i2c_multibus; list_add(&bus->link, &pmac_i2c_busses); @@ -933,6 +935,7 @@ static void __init smu_i2c_probe(void) bus->hostdata = bus + 1; bus->xfer = smu_i2c_xfer; mutex_init(&bus->mutex); + lockdep_register_key(&bus->lock_key); lockdep_set_class(&bus->mutex, &bus->lock_key); bus->flags = 0; list_add(&bus->link, &pmac_i2c_busses); diff --git a/arch/powerpc/platforms/powernv/opal-lpc.c b/arch/powerpc/platforms/powernv/opal-lpc.c index 608569082ba0bb12e555c0b4b2ad4c392bcdd6ba..123a0e799b7bdbbd3dc507e3a5016f5728eab47a 100644 --- a/arch/powerpc/platforms/powernv/opal-lpc.c +++ b/arch/powerpc/platforms/powernv/opal-lpc.c @@ -396,6 +396,7 @@ void __init opal_lpc_init(void) if (!of_get_property(np, "primary", NULL)) continue; opal_lpc_chip_id = of_get_ibm_chip_id(np); + of_node_put(np); break; } if (opal_lpc_chip_id < 0) diff --git a/arch/powerpc/platforms/powernv/rng.c b/arch/powerpc/platforms/powernv/rng.c index 72c25295c1c2b4e9bd89824dbf99d5646a1d5624..69c344c8884f36b120075ecf2c9123481299756a 100644 --- a/arch/powerpc/platforms/powernv/rng.c +++ b/arch/powerpc/platforms/powernv/rng.c @@ -43,7 +43,11 @@ static unsigned long rng_whiten(struct powernv_rng *rng, unsigned long val) unsigned long parity; /* Calculate the parity of the value */ - asm ("popcntd %0,%1" : "=r" (parity) : "r" (val)); + asm (".machine push; \ + .machine power7; \ + popcntd %0,%1; \ + .machine pop;" + : "=r" (parity) : "r" (val)); /* xor our value with the previous mask */ val ^= rng->mask; diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index 5f0d446a2325e746046c0db642ee685ff7b0b920..47dfada140e19d2334ab09505bb6b34a26ad3624 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c @@ -538,6 +538,12 @@ static void init_cpu_char_feature_flags(struct h_cpu_char_result *result) if (!(result->behaviour & H_CPU_BEHAV_L1D_FLUSH_PR)) security_ftr_clear(SEC_FTR_L1D_FLUSH_PR); + if (result->behaviour & H_CPU_BEHAV_NO_L1D_FLUSH_ENTRY) + security_ftr_clear(SEC_FTR_L1D_FLUSH_ENTRY); + + if (result->behaviour & H_CPU_BEHAV_NO_L1D_FLUSH_UACCESS) + security_ftr_clear(SEC_FTR_L1D_FLUSH_UACCESS); + if (!(result->behaviour & H_CPU_BEHAV_BNDS_CHK_SPEC_BAR)) security_ftr_clear(SEC_FTR_BNDS_CHK_SPEC_BAR); } diff --git a/arch/powerpc/sysdev/fsl_gtm.c b/arch/powerpc/sysdev/fsl_gtm.c index 8963eaffb1b7b54234cecaedea9f8fd42677470e..39186ad6b3c3a96f7291ed77782b0f028350656a 100644 --- a/arch/powerpc/sysdev/fsl_gtm.c +++ b/arch/powerpc/sysdev/fsl_gtm.c @@ -86,7 +86,7 @@ static LIST_HEAD(gtms); */ struct gtm_timer *gtm_get_timer16(void) { - struct gtm *gtm = NULL; + struct gtm *gtm; int i; list_for_each_entry(gtm, >ms, list_node) { @@ -103,7 +103,7 @@ struct gtm_timer *gtm_get_timer16(void) spin_unlock_irq(>m->lock); } - if (gtm) + if (!list_empty(>ms)) return ERR_PTR(-EBUSY); return ERR_PTR(-ENODEV); } diff --git a/arch/powerpc/sysdev/xive/spapr.c b/arch/powerpc/sysdev/xive/spapr.c index 1e3674d7ea7bce396f791b0558b52597d8f938a5..b57eeaff7bb33395289788d0b88832b0c46ca248 100644 --- a/arch/powerpc/sysdev/xive/spapr.c +++ b/arch/powerpc/sysdev/xive/spapr.c @@ -658,6 +658,9 @@ static int xive_spapr_debug_show(struct seq_file *m, void *private) struct xive_irq_bitmap *xibm; char *buf = kmalloc(PAGE_SIZE, GFP_KERNEL); + if (!buf) + return -ENOMEM; + list_for_each_entry(xibm, &xive_irq_bitmaps, list) { memset(buf, 0, PAGE_SIZE); bitmap_print_to_pagebuf(true, buf, xibm->bitmap, xibm->count); diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile index dc04f47714b4174c4ba7194a568fa7288a37abd3..e8462dd7f7eefe2deac517f5e55b5fac684fb0d7 100644 --- a/arch/riscv/Makefile +++ b/arch/riscv/Makefile @@ -50,6 +50,12 @@ riscv-march-$(CONFIG_ARCH_RV32I) := rv32ima riscv-march-$(CONFIG_ARCH_RV64I) := rv64ima riscv-march-$(CONFIG_FPU) := $(riscv-march-y)fd riscv-march-$(CONFIG_RISCV_ISA_C) := $(riscv-march-y)c + +# Newer binutils versions default to ISA spec version 20191213 which moves some +# instructions from the I extension to the Zicsr and Zifencei extensions. +toolchain-need-zicsr-zifencei := $(call cc-option-yn, -march=$(riscv-march-y)_zicsr_zifencei) +riscv-march-$(toolchain-need-zicsr-zifencei) := $(riscv-march-y)_zicsr_zifencei + KBUILD_CFLAGS += -march=$(subst fd,,$(riscv-march-y)) KBUILD_AFLAGS += -march=$(riscv-march-y) diff --git a/arch/riscv/include/asm/module.lds.h b/arch/riscv/include/asm/module.lds.h index 4254ff2ff04943f7df7053d6d8263edaba7a3f36..1075beae1ac64521e4c98eadf9acc785deb93d3e 100644 --- a/arch/riscv/include/asm/module.lds.h +++ b/arch/riscv/include/asm/module.lds.h @@ -2,8 +2,8 @@ /* Copyright (C) 2017 Andes Technology Corporation */ #ifdef CONFIG_MODULE_SECTIONS SECTIONS { - .plt (NOLOAD) : { BYTE(0) } - .got (NOLOAD) : { BYTE(0) } - .got.plt (NOLOAD) : { BYTE(0) } + .plt : { BYTE(0) } + .got : { BYTE(0) } + .got.plt : { BYTE(0) } } #endif diff --git a/arch/riscv/include/asm/thread_info.h b/arch/riscv/include/asm/thread_info.h index a390711129de6471524141120419213e228a47e2..d79ae9d98999f6b9dce68aaf00412fe08635e31b 100644 --- a/arch/riscv/include/asm/thread_info.h +++ b/arch/riscv/include/asm/thread_info.h @@ -11,11 +11,17 @@ #include #include +#ifdef CONFIG_KASAN +#define KASAN_STACK_ORDER 1 +#else +#define KASAN_STACK_ORDER 0 +#endif + /* thread information allocation */ #ifdef CONFIG_64BIT -#define THREAD_SIZE_ORDER (2) +#define THREAD_SIZE_ORDER (2 + KASAN_STACK_ORDER) #else -#define THREAD_SIZE_ORDER (1) +#define THREAD_SIZE_ORDER (1 + KASAN_STACK_ORDER) #endif #define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER) diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile index 62de075fc60c0a617b7230e7063a6f44e0917824..bc49d5f2302b605ca72e28d43b71f35259ec8996 100644 --- a/arch/riscv/kernel/Makefile +++ b/arch/riscv/kernel/Makefile @@ -44,6 +44,8 @@ obj-$(CONFIG_MODULE_SECTIONS) += module-sections.o obj-$(CONFIG_FUNCTION_TRACER) += mcount.o ftrace.o obj-$(CONFIG_DYNAMIC_FTRACE) += mcount-dyn.o +obj-$(CONFIG_TRACE_IRQFLAGS) += trace_irq.o + obj-$(CONFIG_RISCV_BASE_PMU) += perf_event.o obj-$(CONFIG_PERF_EVENTS) += perf_callchain.o obj-$(CONFIG_HAVE_PERF_REGS) += perf_regs.o diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S index 3a4f24a5b32b24fceaf5e8b2e88c2557852ba9cf..3b3e25fa26d4bf9346bad77a6bd192fb70b452e1 100644 --- a/arch/riscv/kernel/entry.S +++ b/arch/riscv/kernel/entry.S @@ -98,7 +98,7 @@ _save_context: .option pop #ifdef CONFIG_TRACE_IRQFLAGS - call trace_hardirqs_off + call __trace_hardirqs_off #endif #ifdef CONFIG_CONTEXT_TRACKING @@ -131,7 +131,7 @@ skip_context_tracking: andi t0, s1, SR_PIE beqz t0, 1f #ifdef CONFIG_TRACE_IRQFLAGS - call trace_hardirqs_on + call __trace_hardirqs_on #endif csrs CSR_STATUS, SR_IE @@ -222,7 +222,7 @@ ret_from_exception: REG_L s0, PT_STATUS(sp) csrc CSR_STATUS, SR_IE #ifdef CONFIG_TRACE_IRQFLAGS - call trace_hardirqs_off + call __trace_hardirqs_off #endif #ifdef CONFIG_RISCV_M_MODE /* the MPP value is too large to be used as an immediate arg for addi */ @@ -258,10 +258,10 @@ restore_all: REG_L s1, PT_STATUS(sp) andi t0, s1, SR_PIE beqz t0, 1f - call trace_hardirqs_on + call __trace_hardirqs_on j 2f 1: - call trace_hardirqs_off + call __trace_hardirqs_off 2: #endif REG_L a0, PT_STATUS(sp) diff --git a/arch/riscv/kernel/module.c b/arch/riscv/kernel/module.c index 104fba889cf767cb159840954de99f985f7f0929..c3310a68ac463d092ea8362c9df8fbc07de80ae1 100644 --- a/arch/riscv/kernel/module.c +++ b/arch/riscv/kernel/module.c @@ -13,6 +13,19 @@ #include #include +/* + * The auipc+jalr instruction pair can reach any PC-relative offset + * in the range [-2^31 - 2^11, 2^31 - 2^11) + */ +static bool riscv_insn_valid_32bit_offset(ptrdiff_t val) +{ +#ifdef CONFIG_32BIT + return true; +#else + return (-(1L << 31) - (1L << 11)) <= val && val < ((1L << 31) - (1L << 11)); +#endif +} + static int apply_r_riscv_32_rela(struct module *me, u32 *location, Elf_Addr v) { if (v != (u32)v) { @@ -95,7 +108,7 @@ static int apply_r_riscv_pcrel_hi20_rela(struct module *me, u32 *location, ptrdiff_t offset = (void *)v - (void *)location; s32 hi20; - if (offset != (s32)offset) { + if (!riscv_insn_valid_32bit_offset(offset)) { pr_err( "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n", me->name, (long long)v, location); @@ -197,10 +210,9 @@ static int apply_r_riscv_call_plt_rela(struct module *me, u32 *location, Elf_Addr v) { ptrdiff_t offset = (void *)v - (void *)location; - s32 fill_v = offset; u32 hi20, lo12; - if (offset != fill_v) { + if (!riscv_insn_valid_32bit_offset(offset)) { /* Only emit the plt entry if offset over 32-bit range */ if (IS_ENABLED(CONFIG_MODULE_SECTIONS)) { offset = module_emit_plt_entry(me, v); @@ -224,10 +236,9 @@ static int apply_r_riscv_call_rela(struct module *me, u32 *location, Elf_Addr v) { ptrdiff_t offset = (void *)v - (void *)location; - s32 fill_v = offset; u32 hi20, lo12; - if (offset != fill_v) { + if (!riscv_insn_valid_32bit_offset(offset)) { pr_err( "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n", me->name, (long long)v, location); diff --git a/arch/riscv/kernel/perf_callchain.c b/arch/riscv/kernel/perf_callchain.c index cf190197a22f6556b2b05fe686589b3442e7a090..fb02811df71434ce30f2db7dbcf80a49f1bf9c64 100644 --- a/arch/riscv/kernel/perf_callchain.c +++ b/arch/riscv/kernel/perf_callchain.c @@ -19,8 +19,8 @@ static unsigned long user_backtrace(struct perf_callchain_entry_ctx *entry, { struct stackframe buftail; unsigned long ra = 0; - unsigned long *user_frame_tail = - (unsigned long *)(fp - sizeof(struct stackframe)); + unsigned long __user *user_frame_tail = + (unsigned long __user *)(fp - sizeof(struct stackframe)); /* Check accessibility of one struct frame_tail beyond */ if (!access_ok(user_frame_tail, sizeof(buftail))) @@ -60,10 +60,11 @@ static unsigned long user_backtrace(struct perf_callchain_entry_ctx *entry, void perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) { + struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs(); unsigned long fp = 0; /* RISC-V does not support perf in guest mode. */ - if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) + if (guest_cbs && guest_cbs->is_in_guest()) return; fp = regs->s0; @@ -76,7 +77,7 @@ void perf_callchain_user(struct perf_callchain_entry_ctx *entry, bool fill_callchain(unsigned long pc, void *entry) { - return perf_callchain_store(entry, pc); + return perf_callchain_store(entry, pc) == 0; } void notrace walk_stackframe(struct task_struct *task, @@ -84,8 +85,10 @@ void notrace walk_stackframe(struct task_struct *task, void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) { + struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs(); + /* RISC-V does not support perf in guest mode. */ - if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { + if (guest_cbs && guest_cbs->is_in_guest()) { pr_warn("RISC-V does not support perf in guest mode!"); return; } diff --git a/arch/riscv/kernel/trace_irq.c b/arch/riscv/kernel/trace_irq.c new file mode 100644 index 0000000000000000000000000000000000000000..095ac976d7da1092cbbec8d1a2c11282236d7c36 --- /dev/null +++ b/arch/riscv/kernel/trace_irq.c @@ -0,0 +1,27 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2022 Changbin Du + */ + +#include +#include +#include "trace_irq.h" + +/* + * trace_hardirqs_on/off require the caller to setup frame pointer properly. + * Otherwise, CALLER_ADDR1 might trigger an pagging exception in kernel. + * Here we add one extra level so they can be safely called by low + * level entry code which $fp is used for other purpose. + */ + +void __trace_hardirqs_on(void) +{ + trace_hardirqs_on(); +} +NOKPROBE_SYMBOL(__trace_hardirqs_on); + +void __trace_hardirqs_off(void) +{ + trace_hardirqs_off(); +} +NOKPROBE_SYMBOL(__trace_hardirqs_off); diff --git a/arch/riscv/kernel/trace_irq.h b/arch/riscv/kernel/trace_irq.h new file mode 100644 index 0000000000000000000000000000000000000000..99fe67377e5ed6795b1a45bf6b2a0d8af6fef41d --- /dev/null +++ b/arch/riscv/kernel/trace_irq.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2022 Changbin Du + */ +#ifndef __TRACE_IRQ_H +#define __TRACE_IRQ_H + +void __trace_hardirqs_on(void); +void __trace_hardirqs_off(void); + +#endif /* __TRACE_IRQ_H */ diff --git a/arch/riscv/mm/Makefile b/arch/riscv/mm/Makefile index 7ebaef10ea1b69e1557c9d08fec4d288ac31e842..ac7a25298a04af665ff0552d99a95e1671013ddd 100644 --- a/arch/riscv/mm/Makefile +++ b/arch/riscv/mm/Makefile @@ -24,6 +24,9 @@ obj-$(CONFIG_KASAN) += kasan_init.o ifdef CONFIG_KASAN KASAN_SANITIZE_kasan_init.o := n KASAN_SANITIZE_init.o := n +ifdef CONFIG_DEBUG_VIRTUAL +KASAN_SANITIZE_physaddr.o := n +endif endif obj-$(CONFIG_DEBUG_VIRTUAL) += physaddr.o diff --git a/arch/riscv/mm/kasan_init.c b/arch/riscv/mm/kasan_init.c index 883c3be43ea983020b6e4c82e418089215f1a2e6..2db442701ee28f528602f9d7aca5d7174938322f 100644 --- a/arch/riscv/mm/kasan_init.c +++ b/arch/riscv/mm/kasan_init.c @@ -21,8 +21,7 @@ asmlinkage void __init kasan_early_init(void) for (i = 0; i < PTRS_PER_PTE; ++i) set_pte(kasan_early_shadow_pte + i, - mk_pte(virt_to_page(kasan_early_shadow_page), - PAGE_KERNEL)); + pfn_pte(virt_to_pfn(kasan_early_shadow_page), PAGE_KERNEL)); for (i = 0; i < PTRS_PER_PMD; ++i) set_pmd(kasan_early_shadow_pmd + i, diff --git a/arch/s390/hypfs/hypfs_vm.c b/arch/s390/hypfs/hypfs_vm.c index e1fcc03159ef25c42e90fa55e072f9914602ee5c..a927adccb4ba7f81b08cb6f24e7ec3c48a6aa60d 100644 --- a/arch/s390/hypfs/hypfs_vm.c +++ b/arch/s390/hypfs/hypfs_vm.c @@ -20,6 +20,7 @@ static char local_guest[] = " "; static char all_guests[] = "* "; +static char *all_groups = all_guests; static char *guest_query; struct diag2fc_data { @@ -62,10 +63,11 @@ static int diag2fc(int size, char* query, void *addr) memcpy(parm_list.userid, query, NAME_LEN); ASCEBC(parm_list.userid, NAME_LEN); - parm_list.addr = (unsigned long) addr ; + memcpy(parm_list.aci_grp, all_groups, NAME_LEN); + ASCEBC(parm_list.aci_grp, NAME_LEN); + parm_list.addr = (unsigned long)addr; parm_list.size = size; parm_list.fmt = 0x02; - memset(parm_list.aci_grp, 0x40, NAME_LEN); rc = -1; diag_stat_inc(DIAG_STAT_X2FC); diff --git a/arch/s390/include/asm/extable.h b/arch/s390/include/asm/extable.h index 3beb294fd553148486014d1a63057807066a20ac..ce0db8172aad1a876f8bf9444c49d6b76cab47e3 100644 --- a/arch/s390/include/asm/extable.h +++ b/arch/s390/include/asm/extable.h @@ -69,8 +69,13 @@ static inline void swap_ex_entry_fixup(struct exception_table_entry *a, { a->fixup = b->fixup + delta; b->fixup = tmp.fixup - delta; - a->handler = b->handler + delta; - b->handler = tmp.handler - delta; + a->handler = b->handler; + if (a->handler) + a->handler += delta; + b->handler = tmp.handler; + if (b->handler) + b->handler -= delta; } +#define swap_ex_entry_fixup swap_ex_entry_fixup #endif diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c index 4055f1c4981472b049f9029e49f53bbddc3cdb6e..b81bc96216b972274a9ec386998a74310b6e3ec8 100644 --- a/arch/s390/kernel/module.c +++ b/arch/s390/kernel/module.c @@ -30,7 +30,7 @@ #define DEBUGP(fmt , ...) #endif -#define PLT_ENTRY_SIZE 20 +#define PLT_ENTRY_SIZE 22 void *module_alloc(unsigned long size) { @@ -330,27 +330,26 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, case R_390_PLTOFF32: /* 32 bit offset from GOT to PLT. */ case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */ if (info->plt_initialized == 0) { - unsigned int insn[5]; - unsigned int *ip = me->core_layout.base + - me->arch.plt_offset + - info->plt_offset; - - insn[0] = 0x0d10e310; /* basr 1,0 */ - insn[1] = 0x100a0004; /* lg 1,10(1) */ + unsigned char insn[PLT_ENTRY_SIZE]; + char *plt_base; + char *ip; + + plt_base = me->core_layout.base + me->arch.plt_offset; + ip = plt_base + info->plt_offset; + *(int *)insn = 0x0d10e310; /* basr 1,0 */ + *(int *)&insn[4] = 0x100c0004; /* lg 1,12(1) */ if (IS_ENABLED(CONFIG_EXPOLINE) && !nospec_disable) { - unsigned int *ij; - ij = me->core_layout.base + - me->arch.plt_offset + - me->arch.plt_size - PLT_ENTRY_SIZE; - insn[2] = 0xa7f40000 + /* j __jump_r1 */ - (unsigned int)(u16) - (((unsigned long) ij - 8 - - (unsigned long) ip) / 2); + char *jump_r1; + + jump_r1 = plt_base + me->arch.plt_size - + PLT_ENTRY_SIZE; + /* brcl 0xf,__jump_r1 */ + *(short *)&insn[8] = 0xc0f4; + *(int *)&insn[10] = (jump_r1 - (ip + 8)) / 2; } else { - insn[2] = 0x07f10000; /* br %r1 */ + *(int *)&insn[8] = 0x07f10000; /* br %r1 */ } - insn[3] = (unsigned int) (val >> 32); - insn[4] = (unsigned int) val; + *(long *)&insn[14] = val; write(ip, insn, sizeof(insn)); info->plt_initialized = 1; diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index e6c4f29fc6956ff7421d9bffbcff634321b098d1..b51ab19eb9721479bb05718895e1a159fa763fef 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -2115,6 +2115,13 @@ int kvm_s390_is_stop_irq_pending(struct kvm_vcpu *vcpu) return test_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs); } +int kvm_s390_is_restart_irq_pending(struct kvm_vcpu *vcpu) +{ + struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; + + return test_bit(IRQ_PEND_RESTART, &li->pending_irqs); +} + void kvm_s390_clear_stop_irq(struct kvm_vcpu *vcpu) { struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 00f03f363c9b0b848a83174b5074df5c52acf47c..d8e9239c24ffc62c91a0f280fe5bddd89029a664 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -4588,10 +4588,15 @@ int kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu) } } - /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */ + /* + * Set the VCPU to STOPPED and THEN clear the interrupt flag, + * now that the SIGP STOP and SIGP STOP AND STORE STATUS orders + * have been fully processed. This will ensure that the VCPU + * is kept BUSY if another VCPU is inquiring with SIGP SENSE. + */ + kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOPPED); kvm_s390_clear_stop_irq(vcpu); - kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOPPED); __disable_ibs_on_vcpu(vcpu); for (i = 0; i < online_vcpus; i++) { @@ -4649,6 +4654,8 @@ static long kvm_s390_guest_sida_op(struct kvm_vcpu *vcpu, return -EINVAL; if (mop->size + mop->sida_offset > sida_size(vcpu->arch.sie_block)) return -E2BIG; + if (!kvm_s390_pv_cpu_is_protected(vcpu)) + return -EINVAL; switch (mop->op) { case KVM_S390_MEMOP_SIDA_READ: diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h index 2d134833bca6964c0464fdcfe8f748fa636c1858..a3e9b71d426f9313ac3764c034c0e8aa85293480 100644 --- a/arch/s390/kvm/kvm-s390.h +++ b/arch/s390/kvm/kvm-s390.h @@ -418,6 +418,7 @@ void kvm_s390_destroy_adapters(struct kvm *kvm); int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu); extern struct kvm_device_ops kvm_flic_ops; int kvm_s390_is_stop_irq_pending(struct kvm_vcpu *vcpu); +int kvm_s390_is_restart_irq_pending(struct kvm_vcpu *vcpu); void kvm_s390_clear_stop_irq(struct kvm_vcpu *vcpu); int kvm_s390_set_irq_state(struct kvm_vcpu *vcpu, void __user *buf, int len); diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c index 683036c1c92a8f9428622c8ba5b897b86cd3e6ae..3dc921e853b6e26e8cc35a9d39f29cc655216e7f 100644 --- a/arch/s390/kvm/sigp.c +++ b/arch/s390/kvm/sigp.c @@ -288,6 +288,34 @@ static int handle_sigp_dst(struct kvm_vcpu *vcpu, u8 order_code, if (!dst_vcpu) return SIGP_CC_NOT_OPERATIONAL; + /* + * SIGP RESTART, SIGP STOP, and SIGP STOP AND STORE STATUS orders + * are processed asynchronously. Until the affected VCPU finishes + * its work and calls back into KVM to clear the (RESTART or STOP) + * interrupt, we need to return any new non-reset orders "busy". + * + * This is important because a single VCPU could issue: + * 1) SIGP STOP $DESTINATION + * 2) SIGP SENSE $DESTINATION + * + * If the SIGP SENSE would not be rejected as "busy", it could + * return an incorrect answer as to whether the VCPU is STOPPED + * or OPERATING. + */ + if (order_code != SIGP_INITIAL_CPU_RESET && + order_code != SIGP_CPU_RESET) { + /* + * Lockless check. Both SIGP STOP and SIGP (RE)START + * properly synchronize everything while processing + * their orders, while the guest cannot observe a + * difference when issuing other orders from two + * different VCPUs. + */ + if (kvm_s390_is_stop_irq_pending(dst_vcpu) || + kvm_s390_is_restart_irq_pending(dst_vcpu)) + return SIGP_CC_BUSY; + } + switch (order_code) { case SIGP_SENSE: vcpu->stat.instruction_sigp_sense++; diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c index 11d2c8395e2ae92fdc2ac8c94283da9cada12cfc..6d99b1be0082f933c7749de801634585e782507d 100644 --- a/arch/s390/mm/pgalloc.c +++ b/arch/s390/mm/pgalloc.c @@ -253,13 +253,15 @@ void page_table_free(struct mm_struct *mm, unsigned long *table) /* Free 2K page table fragment of a 4K page */ bit = (__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t)); spin_lock_bh(&mm->context.lock); - mask = atomic_xor_bits(&page->_refcount, 1U << (bit + 24)); + mask = atomic_xor_bits(&page->_refcount, 0x11U << (bit + 24)); mask >>= 24; if (mask & 3) list_add(&page->lru, &mm->context.pgtable_list); else list_del(&page->lru); spin_unlock_bh(&mm->context.lock); + mask = atomic_xor_bits(&page->_refcount, 0x10U << (bit + 24)); + mask >>= 24; if (mask != 0) return; } else { diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c index 741d0701003af072bf2be08ac6ff8529da81dd6a..1da36dd34990b59b1d26ff18c01bbb046924802d 100644 --- a/arch/sparc/kernel/signal_32.c +++ b/arch/sparc/kernel/signal_32.c @@ -65,7 +65,7 @@ struct rt_signal_frame { */ static inline bool invalid_frame_pointer(void __user *fp, int fplen) { - if ((((unsigned long) fp) & 15) || !__access_ok((unsigned long)fp, fplen)) + if ((((unsigned long) fp) & 15) || !access_ok(fp, fplen)) return true; return false; diff --git a/arch/sw_64/Kconfig b/arch/sw_64/Kconfig index bef7ab3816742bfc1f968e4424c5316e4204eb09..cf2f6f00708c64669537b78371b9af7450a2a67b 100644 --- a/arch/sw_64/Kconfig +++ b/arch/sw_64/Kconfig @@ -3,23 +3,17 @@ config SW64 bool default y select AUDIT_ARCH - select VIRT_IO - select HAVE_AOUT select HAVE_IDE select HAVE_OPROFILE -# select HAVE_SYSCALL_WRAPPERS - select HAVE_IRQ_WORK select HAVE_PCSPKR_PLATFORM select HAVE_PERF_EVENTS - select HAVE_GENERIC_HARDIRQS + select HAVE_FAST_GUP select GENERIC_CLOCKEVENTS select GENERIC_IRQ_PROBE select GENERIC_IRQ_LEGACY - select GENERIC_IDLE_LOOP select GENERIC_IRQ_SHOW select ARCH_WANT_IPC_PARSE_VERSION select ARCH_HAVE_NMI_SAFE_CMPXCHG - select ARCH_SUPPORTS_MSI select ARCH_MIGHT_HAVE_PC_SERIO select ARCH_NO_PREEMPT select ARCH_USE_CMPXCHG_LOCKREF @@ -27,18 +21,16 @@ config SW64 select HAVE_MOD_ARCH_SPECIFIC select MODULES_USE_ELF_RELA select ARCH_SUPPORTS_NUMA_BALANCING - select ARCH_WANTS_PROT_NUMA_PROT_NONE select HAVE_ARCH_TRANSPARENT_HUGEPAGE - select HAVE_GENERIC_RCU_GUP select HAVE_ARCH_AUDITSYSCALL select HAVE_ARCH_SECCOMP_FILTER - select GENERIC_SIGALTSTACK select OLD_SIGACTION select OLD_SIGSUSPEND select GENERIC_STRNCPY_FROM_USER select GENERIC_STRNLEN_USER select HAVE_ARCH_KGDB select ARCH_HAS_PHYS_TO_DMA + select SWIOTLB select HAVE_MEMBLOCK select HAVE_MEMBLOCK_NODE_MAP select NO_BOOTMEM @@ -46,36 +38,37 @@ config SW64 select ARCH_USE_QUEUED_SPINLOCKS select COMMON_CLK select HANDLE_DOMAIN_IRQ - select ARCH_INLINE_READ_LOCK if !PREEMPT - select ARCH_INLINE_READ_LOCK_BH if !PREEMPT - select ARCH_INLINE_READ_LOCK_IRQ if !PREEMPT - select ARCH_INLINE_READ_LOCK_IRQSAVE if !PREEMPT - select ARCH_INLINE_READ_UNLOCK if !PREEMPT - select ARCH_INLINE_READ_UNLOCK_BH if !PREEMPT - select ARCH_INLINE_READ_UNLOCK_IRQ if !PREEMPT - select ARCH_INLINE_READ_UNLOCK_IRQRESTORE if !PREEMPT - select ARCH_INLINE_WRITE_LOCK if !PREEMPT - select ARCH_INLINE_WRITE_LOCK_BH if !PREEMPT - select ARCH_INLINE_WRITE_LOCK_IRQ if !PREEMPT - select ARCH_INLINE_WRITE_LOCK_IRQSAVE if !PREEMPT - select ARCH_INLINE_WRITE_UNLOCK if !PREEMPT - select ARCH_INLINE_WRITE_UNLOCK_BH if !PREEMPT - select ARCH_INLINE_WRITE_UNLOCK_IRQ if !PREEMPT - select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE if !PREEMPT - select ARCH_INLINE_SPIN_TRYLOCK if !PREEMPT - select ARCH_INLINE_SPIN_TRYLOCK_BH if !PREEMPT - select ARCH_INLINE_SPIN_LOCK if !PREEMPT - select ARCH_INLINE_SPIN_LOCK_BH if !PREEMPT - select ARCH_INLINE_SPIN_LOCK_IRQ if !PREEMPT - select ARCH_INLINE_SPIN_LOCK_IRQSAVE if !PREEMPT - select ARCH_INLINE_SPIN_UNLOCK if !PREEMPT - select ARCH_INLINE_SPIN_UNLOCK_BH if !PREEMPT - select ARCH_INLINE_SPIN_UNLOCK_IRQ if !PREEMPT - select ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE if !PREEMPT + select ARCH_INLINE_READ_LOCK + select ARCH_INLINE_READ_LOCK_BH + select ARCH_INLINE_READ_LOCK_IRQ + select ARCH_INLINE_READ_LOCK_IRQSAVE + select ARCH_INLINE_READ_UNLOCK + select ARCH_INLINE_READ_UNLOCK_BH + select ARCH_INLINE_READ_UNLOCK_IRQ + select ARCH_INLINE_READ_UNLOCK_IRQRESTORE + select ARCH_INLINE_WRITE_LOCK + select ARCH_INLINE_WRITE_LOCK_BH + select ARCH_INLINE_WRITE_LOCK_IRQ + select ARCH_INLINE_WRITE_LOCK_IRQSAVE + select ARCH_INLINE_WRITE_UNLOCK + select ARCH_INLINE_WRITE_UNLOCK_BH + select ARCH_INLINE_WRITE_UNLOCK_IRQ + select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE + select ARCH_INLINE_SPIN_TRYLOCK + select ARCH_INLINE_SPIN_TRYLOCK_BH + select ARCH_INLINE_SPIN_LOCK + select ARCH_INLINE_SPIN_LOCK_BH + select ARCH_INLINE_SPIN_LOCK_IRQ + select ARCH_INLINE_SPIN_LOCK_IRQSAVE + select ARCH_INLINE_SPIN_UNLOCK + select ARCH_INLINE_SPIN_UNLOCK_BH + select ARCH_INLINE_SPIN_UNLOCK_IRQ + select ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE select ARCH_SUPPORTS_ATOMIC_RMW select ARCH_HAS_SG_CHAIN select IRQ_FORCED_THREADING select GENERIC_IRQ_MIGRATION if SMP + select HAVE_ARCH_TRACEHOOK select HAVE_FUNCTION_TRACER select HAVE_DYNAMIC_FTRACE select HAVE_FTRACE_MCOUNT_RECORD @@ -95,12 +88,15 @@ config SW64 select HAVE_PERF_USER_STACK_DUMP select HAVE_PERF_REGS select ARCH_SUPPORTS_ACPI - select ACPI if ARCH_SUPPORTS_ACPI - select ACPI_REDUCED_HARDWARE_ONLY if ACPI + select ACPI + select ACPI_REDUCED_HARDWARE_ONLY select GENERIC_TIME_VSYSCALL select SET_FS select PCI_MSI_ARCH_FALLBACKS select DMA_OPS if PCI + select HAVE_REGS_AND_STACK_ACCESS_API + select ARCH_HAS_PTE_SPECIAL + select HARDIRQS_SW_RESEND config LOCKDEP_SUPPORT def_bool y @@ -238,6 +234,7 @@ config PLATFORM_XUELANG depends on SW64_CHIP3 select SPARSE_IRQ select SYS_HAS_EARLY_PRINTK + select SW64_INTC_V2 help Sunway chip3 board chipset @@ -249,22 +246,252 @@ config LOCK_MEMB bool "Insert mem barrier before lock instruction" default y +menu "CPU Frequency scaling" + +config CPU_FREQ + bool "CPU Frequency scaling" + select SRCU + help + CPU Frequency scaling allows you to change the clock speed of + CPUs on the fly. This is a nice method to save power, because + the lower the CPU clock speed, the less power the CPU consumes. + + Note that this driver doesn't automatically change the CPU + clock speed, you need to either enable a dynamic cpufreq governor + (see below) after boot, or use a userspace tool. + + For details, take a look at . + + If in doubt, say N. + +if CPU_FREQ + +config SW64_CPUFREQ + bool "sw64 CPU Frequency interface for Chip3 Asic" + depends on SW64_CHIP3 + default y + help + Turns on the interface for SW64_CPU Frequency. + +config SW64_CPUAUTOPLUG + bool "sw64 CPU Autoplug interface" + depends on SW64_CPUFREQ + default y + help + Turns on the interface for SW64_CPU CPUAUTOPLUG. + +config CPU_FREQ_GOV_ATTR_SET + bool + +config CPU_FREQ_GOV_COMMON + select CPU_FREQ_GOV_ATTR_SET + select IRQ_WORK + bool + +config CPU_FREQ_BOOST_SW + bool + depends on THERMAL + +config CPU_FREQ_STAT + bool "CPU frequency transition statistics" + help + Export CPU frequency statistics information through sysfs. + + If in doubt, say N. + choice - prompt "DMA Mapping Type" - depends on SW64 && PCI + prompt "Default CPUFreq governor" + default CPU_FREQ_DEFAULT_GOV_USERSPACE if ARM_SA1100_CPUFREQ || ARM_SA1110_CPUFREQ + default CPU_FREQ_DEFAULT_GOV_PERFORMANCE + help + This option sets which CPUFreq governor shall be loaded at + startup. If in doubt, select 'performance'. -config DIRECT_DMA - bool "Direct DMA Mapping" - depends on SW64 && PCI +config CPU_FREQ_DEFAULT_GOV_PERFORMANCE + bool "performance" + select CPU_FREQ_GOV_PERFORMANCE + help + Use the CPUFreq governor 'performance' as default. This sets + the frequency statically to the highest frequency supported by + the CPU. -config SWIOTLB - bool "Software IO TLB" - depends on SW64 && PCI +config CPU_FREQ_DEFAULT_GOV_POWERSAVE + bool "powersave" + select CPU_FREQ_GOV_POWERSAVE help - Software IO TLB + Use the CPUFreq governor 'powersave' as default. This sets + the frequency statically to the lowest frequency supported by + the CPU. + +config CPU_FREQ_DEFAULT_GOV_USERSPACE + bool "userspace" + select CPU_FREQ_GOV_USERSPACE + help + Use the CPUFreq governor 'userspace' as default. This allows + you to set the CPU frequency manually or when a userspace + program shall be able to set the CPU dynamically without having + to enable the userspace governor manually. + +config CPU_FREQ_DEFAULT_GOV_ONDEMAND + bool "ondemand" + select CPU_FREQ_GOV_ONDEMAND + select CPU_FREQ_GOV_PERFORMANCE + help + Use the CPUFreq governor 'ondemand' as default. This allows + you to get a full dynamic frequency capable system by simply + loading your cpufreq low-level hardware driver. + Be aware that not all cpufreq drivers support the ondemand + governor. If unsure have a look at the help section of the + driver. Fallback governor will be the performance governor. + +config CPU_FREQ_DEFAULT_GOV_CONSERVATIVE + bool "conservative" + select CPU_FREQ_GOV_CONSERVATIVE + select CPU_FREQ_GOV_PERFORMANCE + help + Use the CPUFreq governor 'conservative' as default. This allows + you to get a full dynamic frequency capable system by simply + loading your cpufreq low-level hardware driver. + Be aware that not all cpufreq drivers support the conservative + governor. If unsure have a look at the help section of the + driver. Fallback governor will be the performance governor. + +config CPU_FREQ_DEFAULT_GOV_SCHEDUTIL + bool "schedutil" + depends on SMP + select CPU_FREQ_GOV_SCHEDUTIL + select CPU_FREQ_GOV_PERFORMANCE + help + Use the 'schedutil' CPUFreq governor by default. If unsure, + have a look at the help section of that governor. The fallback + governor will be 'performance'. endchoice +config CPU_FREQ_GOV_PERFORMANCE + tristate "'performance' governor" + help + This cpufreq governor sets the frequency statically to the + highest available CPU frequency. + + To compile this driver as a module, choose M here: the + module will be called cpufreq_performance. + + If in doubt, say Y. + +config CPU_FREQ_GOV_POWERSAVE + tristate "'powersave' governor" + help + This cpufreq governor sets the frequency statically to the + lowest available CPU frequency. + + To compile this driver as a module, choose M here: the + module will be called cpufreq_powersave. + + If in doubt, say Y. + +config CPU_FREQ_GOV_USERSPACE + tristate "'userspace' governor for userspace frequency scaling" + help + Enable this cpufreq governor when you either want to set the + CPU frequency manually or when a userspace program shall + be able to set the CPU dynamically, like on LART + . + + To compile this driver as a module, choose M here: the + module will be called cpufreq_userspace. + + For details, take a look at . + + If in doubt, say Y. + +config CPU_FREQ_GOV_ONDEMAND + tristate "'ondemand' cpufreq policy governor" + select CPU_FREQ_GOV_COMMON + help + 'ondemand' - This driver adds a dynamic cpufreq policy governor. + The governor does a periodic polling and + changes frequency based on the CPU utilization. + The support for this governor depends on CPU capability to + do fast frequency switching (i.e, very low latency frequency + transitions). + + To compile this driver as a module, choose M here: the + module will be called cpufreq_ondemand. + + For details, take a look at linux/Documentation/cpu-freq. + + If in doubt, say N. + +config CPU_FREQ_GOV_CONSERVATIVE + tristate "'conservative' cpufreq governor" + depends on CPU_FREQ + select CPU_FREQ_GOV_COMMON + help + 'conservative' - this driver is rather similar to the 'ondemand' + governor both in its source code and its purpose, the difference is + its optimisation for better suitability in a battery powered + environment. The frequency is gracefully increased and decreased + rather than jumping to 100% when speed is required. + + If you have a desktop machine then you should really be considering + the 'ondemand' governor instead, however if you are using a laptop, + PDA or even an AMD64 based computer (due to the unacceptable + step-by-step latency issues between the minimum and maximum frequency + transitions in the CPU) you will probably want to use this governor. + + To compile this driver as a module, choose M here: the + module will be called cpufreq_conservative. + + For details, take a look at linux/Documentation/cpu-freq. + + If in doubt, say N. + +config CPU_FREQ_GOV_SCHEDUTIL + bool "'schedutil' cpufreq policy governor" + depends on CPU_FREQ && SMP + select CPU_FREQ_GOV_ATTR_SET + select IRQ_WORK + help + This governor makes decisions based on the utilization data provided + by the scheduler. It sets the CPU frequency to be proportional to + the utilization/capacity ratio coming from the scheduler. If the + utilization is frequency-invariant, the new frequency is also + proportional to the maximum available frequency. If that is not the + case, it is proportional to the current frequency of the CPU. The + frequency tipping point is at utilization/capacity equal to 80% in + both cases. + + If in doubt, say N. + +comment "CPU frequency scaling drivers" + +config CPUFREQ_DT + tristate "Generic DT based cpufreq driver" + depends on HAVE_CLK && OF + # if CPU_THERMAL is on and THERMAL=m, CPUFREQ_DT cannot be =y: + depends on !CPU_THERMAL || THERMAL + select CPUFREQ_DT_PLATDEV + select PM_OPP + help + This adds a generic DT based cpufreq driver for frequency management. + It supports both uniprocessor (UP) and symmetric multiprocessor (SMP) + systems. + + If in doubt, say N. + +config CPUFREQ_DT_PLATDEV + bool + help + This adds a generic DT based cpufreq platdev driver for frequency + management. This creates a 'cpufreq-dt' platform device, on the + supported platforms. + + If in doubt, say N. + +endif +endmenu + # clear all implied options (don't want default values for those): # Most of these machines have ISA slots; not exactly sure which don't, # and this doesn't activate hordes of code, so do it always. @@ -294,8 +521,7 @@ config PCI VESA. If you have PCI, say Y, otherwise N. config PCI_DOMAINS - bool - default y + def_bool PCI config PCI_SYSCALL def_bool PCI @@ -425,18 +651,6 @@ config ARCH_SPARSEMEM_ENABLE depends on SMP select SPARSEMEM_VMEMMAP_ENABLE -config ARCH_DISCONTIGMEM_ENABLE - bool "Discontiguous Memory Support" - depends on SMP - help - Say Y to support efficient handling of discontiguous physical memory, - for architectures which are either NUMA (Non-Uniform Memory Access) - or have huge holes in the physical address space for other reasons. - See for more. - -source "kernel/Kconfig.preempt" - - config NUMA bool "NUMA Support" depends on SMP && !FLATMEM @@ -523,15 +737,10 @@ endmenu menu "Boot options" -config SW64_IRQ_CHIP - bool - config USE_OF bool "Flattened Device Tree support" - select GENERIC_IRQ_CHIP - select IRQ_DOMAIN - select SW64_IRQ_CHIP select OF + select IRQ_DOMAIN help Include support for flattened device tree machine descriptions. @@ -675,12 +884,4 @@ source "drivers/idle/Kconfig" endmenu -# DUMMY_CONSOLE may be defined in drivers/video/console/Kconfig -# but we also need it if VGA_HOSE is set -config DUMMY_CONSOLE - bool - depends on VGA_HOSE - default y - - source "arch/sw_64/kvm/Kconfig" diff --git a/arch/sw_64/Makefile b/arch/sw_64/Makefile index 341fe6a0d9c8c262590b201fa520b41a015aaacf..7d86e80362f69b24cbce973da7da1179ded57f03 100644 --- a/arch/sw_64/Makefile +++ b/arch/sw_64/Makefile @@ -31,6 +31,7 @@ cflags-y += $(call cc-option, -fno-jump-tables) cflags-y += $(cpuflags-y) KBUILD_CFLAGS += $(cflags-y) +KBUILD_DEFCONFIG = defconfig head-y := arch/sw_64/kernel/head.o diff --git a/arch/sw_64/boot/dts/chip3.dts b/arch/sw_64/boot/dts/chip3.dts index ce61dfe6e7bd7a3ddfd435c09f1508b2d6699efa..082506393ac98ffb1219d124029cafe8608ce4dd 100644 --- a/arch/sw_64/boot/dts/chip3.dts +++ b/arch/sw_64/boot/dts/chip3.dts @@ -32,12 +32,20 @@ }; - intc: interrupt-controller{ + intc: interrupt-controller { compatible = "sw64,sw6_irq_controller"; interrupt-controller; #interrupt-cells = <1>; }; + lpc_intc: interrupt-controller@0x8037 { + compatible = "sw64,lpc_intc"; + reg = <0x8037 0x40000000 0x0 0x8000>; + interrupt-controller; + #interrupt-cells = <1>; + interrupt-parent = <&intc>; + interrupts = <2>; + }; uart: serial0@8033 { #address-cells = <2>; @@ -103,8 +111,21 @@ compatible = "nxp,pcf8523"; reg = <0x68>; }; + + lm75: at30tse752a@48 { + compatible = "microchip,tcn75"; + reg = <0x48>; + }; }; + pvt: pvt@0x8030 { + #address-cells = <2>; + #size-cells = <2>; + compatible = "sw64,pvt-vol"; + reg = <0x8030 0x0 0x0 0x7c00>; + status = "okay"; + }; + spi: spi@0x8032 { #address-cells = <2>; #size-cells = <2>; @@ -131,7 +152,7 @@ #size-cells = <1>; partition@0 { - label = "test"; + label = "spares0"; reg = <0 0x400000>; }; }; @@ -153,7 +174,7 @@ #size-cells = <1>; partition@0 { - label = "test"; + label = "spares1"; reg = <0 0x400000>; }; }; @@ -163,14 +184,38 @@ lpc: lpc@0x8037 { #address-cells = <2>; #size-cells = <2>; - compatible = "sw,sw6b_lpc"; + compatible = "sunway,chip3_lpc"; reg = <0x8037 0x40000000 0x0 0x8000>; - interrupt-parent=<&intc>; - interrupts = <2>; status = "okay"; }; + ipmi-kcs@0x8037 { + #address-cells = <2>; + #size-cells = <2>; + device_type = "ipmi"; + compatible = "ipmi-kcs"; + reg = <0x8037 0x10000ca2 0x0 0x10>; + reg-size = <1>; + reg-spacing = <1>; + reg-shift = <0>; + status = "disabled"; + }; + + ipmi-bt@0x8037 { + #address-cells = <2>; + #size-cells = <2>; + device_type = "ipmi"; + compatible = "ipmi-bt"; + reg = <0x8037 0x100000e4 0x0 0x10>; + interrupt-parent=<&lpc_intc>; + interrupts = <10>; + reg-size = <1>; + reg-spacing = <1>; + reg-shift = <0>; + status = "disabled"; + }; + gpio: gpio@8036 { #address-cells = <2>; #size-cells = <2>; diff --git a/arch/sw_64/boot/dts/chip_vt.dts b/arch/sw_64/boot/dts/chip_vt.dts index f0bcf1db1d089bc827e259f69983e278a43b83bd..abad29dee97e16c4dd062a524908bab604a3351c 100644 --- a/arch/sw_64/boot/dts/chip_vt.dts +++ b/arch/sw_64/boot/dts/chip_vt.dts @@ -34,5 +34,17 @@ clock-frequency = <24000000>; status = "okay"; }; + misc: misc0@8036 { + #address-cells = <2>; + #size-cells = <2>; + compatible = "sw6,sunway-ged"; + reg = <0x8036 0x0 0x0 0x20>; + interrupt-parent=<&intc>; + interrupts = <13>; + reg-shift = <0>; + reg-io-width = <8>; + clock-frequency = <24000000>; + status = "okay"; + }; }; }; diff --git a/arch/sw_64/chip/chip3/Makefile b/arch/sw_64/chip/chip3/Makefile index 2b7b5790003f133b0716390ee941ffa342cdd283..ba0ab3f67f98faef6232654ee5f5e90f37262e70 100644 --- a/arch/sw_64/chip/chip3/Makefile +++ b/arch/sw_64/chip/chip3/Makefile @@ -4,5 +4,4 @@ obj-y := chip.o i2c-lib.o obj-$(CONFIG_PCI) += pci-quirks.o obj-$(CONFIG_PCI_MSI) += msi.o vt_msi.o -obj-$(CONFIG_SW64_IRQ_CHIP) += irq_chip.o obj-$(CONFIG_CPUFREQ_DEBUGFS) += cpufreq_debugfs.o diff --git a/arch/sw_64/chip/chip3/chip.c b/arch/sw_64/chip/chip3/chip.c index adb4d325fc9176a9ea58907a520b2a46b867b311..84ca7ffcb2ef528d114c39b50fde947561dedc07 100644 --- a/arch/sw_64/chip/chip3/chip.c +++ b/arch/sw_64/chip/chip3/chip.c @@ -1,16 +1,12 @@ // SPDX-License-Identifier: GPL-2.0 #include -#include #include -#include -#include + #include #include #include -#include #include #include -#include #include "../../../../drivers/pci/pci.h" static u64 read_longtime(struct clocksource *cs) @@ -58,14 +54,9 @@ static struct clocksource clocksource_longtime = { static u64 read_vtime(struct clocksource *cs) { u64 result; - unsigned long node; - unsigned long vtime_addr = PAGE_OFFSET | IO_BASE | LONG_TIME; - - if (is_in_guest()) - result = rdio64(vtime_addr); - else - result = sw64_io_read(node, LONG_TIME); + unsigned long vtime_addr = IO_BASE | LONG_TIME; + result = rdio64(vtime_addr); return result; } @@ -99,6 +90,25 @@ void setup_chip_clocksource(void) #endif } +void set_devint_wken(int node) +{ + unsigned long val; + + /* enable INTD wakeup */ + val = 0x80; + sw64_io_write(node, DEVINT_WKEN, val); + sw64_io_write(node, DEVINTWK_INTEN, val); +} + +void set_pcieport_service_irq(int node, int index) +{ + if (IS_ENABLED(CONFIG_PCIE_PME)) + write_piu_ior0(node, index, PMEINTCONFIG, PME_ENABLE_INTD_CORE0); + + if (IS_ENABLED(CONFIG_PCIEAER)) + write_piu_ior0(node, index, AERERRINTCONFIG, AER_ENABLE_INTD_CORE0); +} + static int chip3_get_cpu_nums(void) { unsigned long trkmode; @@ -116,7 +126,7 @@ static int chip3_get_cpu_nums(void) static unsigned long chip3_get_vt_node_mem(int nodeid) { - return *(unsigned long *)MMSIZE; + return *(unsigned long *)MMSIZE & MMSIZE_MASK; } static unsigned long chip3_get_node_mem(int nodeid) @@ -133,6 +143,19 @@ static unsigned long chip3_get_node_mem(int nodeid) return node_mem; } +static void chip3_setup_vt_core_start(struct cpumask *cpumask) +{ + int i; + unsigned long coreonline; + + coreonline = sw64_io_read(0, CORE_ONLINE); + + for (i = 0; i < 64 ; i++) { + if (coreonline & (1UL << i)) + cpumask_set_cpu(i, cpumask); + } +} + static void chip3_setup_core_start(struct cpumask *cpumask) { int i, j, cpus; @@ -155,18 +178,20 @@ int chip_pcie_configure(struct pci_controller *hose) struct pci_bus *bus, *top; struct list_head *next; unsigned int max_read_size, smallest_max_payload; - int max_payloadsize, iov_bus = 0; + int max_payloadsize; unsigned long rc_index, node; unsigned long piuconfig0, value; unsigned int pcie_caps_offset; unsigned int rc_conf_value; u16 devctl, new_values; bool rc_ari_disabled = false, found = false; + unsigned char bus_max_num; node = hose->node; rc_index = hose->index; smallest_max_payload = read_rc_conf(node, rc_index, RC_EXP_DEVCAP); smallest_max_payload &= PCI_EXP_DEVCAP_PAYLOAD; + bus_max_num = hose->busn_space->start; top = hose->bus; bus = top; @@ -177,6 +202,7 @@ int chip_pcie_configure(struct pci_controller *hose) /* end of this bus, go up or finish */ if (bus == top) break; + next = bus->self->bus_list.next; bus = bus->self->bus; continue; @@ -201,10 +227,8 @@ int chip_pcie_configure(struct pci_controller *hose) } } -#ifdef CONFIG_PCI_IOV - if (dev->is_physfn) - iov_bus += dev->sriov->max_VF_buses - dev->bus->number; -#endif + if (bus->busn_res.end > bus_max_num) + bus_max_num = bus->busn_res.end; /* Query device PCIe capability register */ pcie_caps_offset = dev->pcie_cap; @@ -283,7 +307,7 @@ int chip_pcie_configure(struct pci_controller *hose) pci_write_config_word(dev, pcie_caps_offset + PCI_EXP_DEVCTL, devctl); } - return iov_bus; + return bus_max_num; } static int chip3_check_pci_vt_linkup(unsigned long node, unsigned long index) @@ -408,7 +432,10 @@ static int chip3_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { struct pci_controller *hose = dev->sysdata; - return hose->int_irq; + if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) + return hose->service_irq; + else + return hose->int_irq; } extern struct pci_controller *hose_head, **hose_tail; @@ -427,6 +454,23 @@ static void sw6_handle_intx(unsigned int offset) value = value | (1UL << 62); write_piu_ior0(hose->node, hose->index, INTACONFIG + (offset << 7), value); } + + if (IS_ENABLED(CONFIG_PCIE_PME)) { + value = read_piu_ior0(hose->node, hose->index, PMEINTCONFIG); + if (value >> 63) { + handle_irq(hose->service_irq); + write_piu_ior0(hose->node, hose->index, PMEINTCONFIG, value); + } + } + + if (IS_ENABLED(CONFIG_PCIEAER)) { + value = read_piu_ior0(hose->node, hose->index, AERERRINTCONFIG); + if (value >> 63) { + handle_irq(hose->service_irq); + write_piu_ior0(hose->node, hose->index, AERERRINTCONFIG, value); + } + } + if (hose->iommu_enable) { value = read_piu_ior0(hose->node, hose->index, IOMMUEXCPT_STATUS); if (value >> 63) @@ -450,76 +494,6 @@ static void chip3_device_interrupt(unsigned long irq_info) } } -static void set_devint_wken(int node, int val) -{ - sw64_io_write(node, DEVINT_WKEN, val); - sw64_io_write(node, DEVINTWK_INTEN, 0x0); -} - -static void clear_rc_status(int node, int rc) -{ - unsigned int val, status; - - val = 0x10000; - do { - write_rc_conf(node, rc, RC_STATUS, val); - mb(); - status = read_rc_conf(node, rc, RC_STATUS); - } while (status >> 16); -} - -static void chip3_suspend(int wake) -{ - unsigned long val; - unsigned int val_32; - unsigned long rc_start; - int node, rc, index, cpus; - - cpus = chip3_get_cpu_nums(); - for (node = 0; node < cpus; node++) { - rc = -1; - rc_start = sw64_io_read(node, IO_START); - index = ffs(rc_start); - while (index) { - rc += index; - if (wake) { - val_32 = read_rc_conf(node, rc, RC_CONTROL); - val_32 &= ~0x8; - write_rc_conf(node, rc, RC_CONTROL, val_32); - - set_devint_wken(node, 0x0); - val = 0x8000000000000000UL; - write_piu_ior0(node, rc, PMEINTCONFIG, val); - write_piu_ior0(node, rc, PMEMSICONFIG, val); - - clear_rc_status(node, rc); - } else { - val_32 = read_rc_conf(node, rc, RC_CONTROL); - val_32 |= 0x8; - write_rc_conf(node, rc, RC_CONTROL, val_32); - - clear_rc_status(node, rc); - set_devint_wken(node, 0x1f0); -#ifdef CONFIG_PCI_MSI //USE MSI - val_32 = read_rc_conf(node, rc, RC_COMMAND); - val_32 |= 0x400; - write_rc_conf(node, rc, RC_COMMAND, val_32); - val_32 = read_rc_conf(node, rc, RC_MSI_CONTROL); - val_32 |= 0x10000; - write_rc_conf(node, rc, RC_MSI_CONTROL, val_32); - val = 0x4000000000000000UL; - write_piu_ior0(node, rc, PMEMSICONFIG, val); -#else //USE INT - val = 0x4000000000000400UL; - write_piu_ior0(node, rc, PMEINTCONFIG, val); -#endif - } - rc_start = rc_start >> index; - index = ffs(rc_start); - } - } -} - static void chip3_hose_init(struct pci_controller *hose) { unsigned long pci_io_base; @@ -531,13 +505,10 @@ static void chip3_hose_init(struct pci_controller *hose) hose->dense_mem_base = pci_io_base; hose->dense_io_base = pci_io_base | PCI_LEGACY_IO; - hose->ep_config_space_base = PAGE_OFFSET | pci_io_base | PCI_EP_CFG; - hose->rc_config_space_base = PAGE_OFFSET | pci_io_base | PCI_RC_CFG; + hose->ep_config_space_base = __va(pci_io_base | PCI_EP_CFG); + hose->rc_config_space_base = __va(pci_io_base | PCI_RC_CFG); - if (is_in_host()) - hose->mem_space->start = pci_io_base + PCI_32BIT_MEMIO; - else - hose->mem_space->start = pci_io_base + PCI_32BIT_VT_MEMIO; + hose->mem_space->start = pci_io_base + PCI_32BIT_MEMIO; hose->mem_space->end = hose->mem_space->start + PCI_32BIT_MEMIO_SIZE - 1; hose->mem_space->name = "pci memory space"; hose->mem_space->flags = IORESOURCE_MEM; @@ -574,6 +545,7 @@ static void chip3_hose_init(struct pci_controller *hose) static void chip3_init_ops_fixup(void) { if (is_guest_or_emul()) { + sw64_chip_init->early_init.setup_core_start = chip3_setup_vt_core_start; sw64_chip_init->early_init.get_node_mem = chip3_get_vt_node_mem; sw64_chip_init->pci_init.check_pci_linkup = chip3_check_pci_vt_linkup; } @@ -603,7 +575,6 @@ static struct sw64_chip_init_ops chip3_chip_init_ops = { static struct sw64_chip_ops chip3_chip_ops = { .get_cpu_num = chip3_get_cpu_nums, - .suspend = chip3_suspend, .fixup = chip3_ops_fixup, }; @@ -731,6 +702,11 @@ void handle_chip_irq(unsigned long type, unsigned long vector, handle_irq(type); set_irq_regs(old_regs); return; + case INT_VT_HOTPLUG: + old_regs = set_irq_regs(regs); + handle_irq(type); + set_irq_regs(old_regs); + return; case INT_PC0: perf_irq(PERFMON_PC0, regs); return; @@ -782,14 +758,16 @@ static void chip3_pci_fixup_root_complex(struct pci_dev *dev) } dev->class &= 0xff; - dev->class |= PCI_CLASS_BRIDGE_HOST << 8; + dev->class |= PCI_CLASS_BRIDGE_PCI << 8; for (i = 0; i < PCI_NUM_RESOURCES; i++) { dev->resource[i].start = 0; dev->resource[i].end = 0; - dev->resource[i].flags = 0; + dev->resource[i].flags = IORESOURCE_PCI_FIXED; } } atomic_inc(&dev->enable_cnt); + + dev->no_msi = 1; } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_JN, PCI_DEVICE_ID_CHIP3, chip3_pci_fixup_root_complex); diff --git a/arch/sw_64/chip/chip3/cpufreq_debugfs.c b/arch/sw_64/chip/chip3/cpufreq_debugfs.c index 3b152f84454fc7ba277830ae612ed3c58a42346e..13696360ef0294dd02cea16aa70cc31d2f6cb043 100644 --- a/arch/sw_64/chip/chip3/cpufreq_debugfs.c +++ b/arch/sw_64/chip/chip3/cpufreq_debugfs.c @@ -1,15 +1,10 @@ // SPDX-License-Identifier: GPL-2.0 #include -#include -#include -#include -#include #include #include -#include + #include -#include #include #define CLK_PRT 0x1UL diff --git a/arch/sw_64/chip/chip3/i2c-lib.c b/arch/sw_64/chip/chip3/i2c-lib.c index 581f2b3d81a1f847fb2bcc919cec7063a3519d46..e70f0f0c9a56af9e507db4637e75f83d2b36b8ad 100644 --- a/arch/sw_64/chip/chip3/i2c-lib.c +++ b/arch/sw_64/chip/chip3/i2c-lib.c @@ -14,14 +14,12 @@ * of the interrupt mode. */ -#include -#include #include #include #include #include -#include -#include + +#include #define CPLD_BUSNR 2 @@ -98,7 +96,7 @@ enum i2c_bus_operation { I2C_BUS_WRITE, }; -static uint64_t m_i2c_base_address; +static void __iomem *m_i2c_base_address; /* * This function get I2Cx controller base address @@ -106,33 +104,28 @@ static uint64_t m_i2c_base_address; * @param i2c_controller_index Bus Number of I2C controller. * @return I2C BAR. */ -uint64_t get_i2c_bar_addr(uint8_t i2c_controller_index) +void __iomem *get_i2c_bar_addr(uint8_t i2c_controller_index) { - uint64_t base_addr = 0; - - if (i2c_controller_index == 0) - base_addr = PAGE_OFFSET | IO_BASE | IIC0_BASE; - else if (i2c_controller_index == 1) - base_addr = PAGE_OFFSET | IO_BASE | IIC1_BASE; - else if (i2c_controller_index == 2) - base_addr = PAGE_OFFSET | IO_BASE | IIC2_BASE; - - return base_addr; + switch (i2c_controller_index) { + case 0: + return __va(IO_BASE | IIC0_BASE); + case 1: + return __va(IO_BASE | IIC1_BASE); + case 2: + return __va(IO_BASE | IIC2_BASE); + default: + return NULL; + } } -void write_cpu_i2c_controller(uint64_t offset, uint32_t data) +static inline void write_cpu_i2c_controller(uint64_t offset, uint32_t data) { - mb(); - *(volatile uint32_t *)(m_i2c_base_address + offset) = data; + writel(data, m_i2c_base_address + offset); } -uint32_t read_cpu_i2c_controller(uint64_t offset) +static inline uint32_t read_cpu_i2c_controller(uint64_t offset) { - uint32_t data; - - data = *(volatile uint32_t *)(m_i2c_base_address + offset); - mb(); - return data; + return readl(m_i2c_base_address + offset); } static int poll_for_status_set0(uint16_t status_bit) @@ -243,7 +236,7 @@ static int i2c_read(uint8_t reg_offset, uint8_t *buffer, uint32_t length) write_cpu_i2c_controller(DW_IC_DATA_CMD, DW_IC_CMD); if (poll_for_status_set0(DW_IC_STATUS_RFNE) == 0) - buffer[i] = *(uint8_t *) (m_i2c_base_address + DW_IC_DATA_CMD); + buffer[i] = readb(m_i2c_base_address + DW_IC_DATA_CMD); else pr_err("Read timeout line %d.\n", __LINE__); } diff --git a/arch/sw_64/chip/chip3/msi.c b/arch/sw_64/chip/chip3/msi.c index 0c6d415e082e4588a81cdbc7a5267b1e74948b8b..43688c96ccabeda7f5152da9f82d0c87f3d47abf 100644 --- a/arch/sw_64/chip/chip3/msi.c +++ b/arch/sw_64/chip/chip3/msi.c @@ -1,15 +1,10 @@ // SPDX-License-Identifier: GPL-2.0 #include -#include -#include -#include #include #include #include + #include -#include -#include -#include static struct irq_domain *msi_default_domain; static DEFINE_RAW_SPINLOCK(vector_lock); diff --git a/arch/sw_64/chip/chip3/pci-quirks.c b/arch/sw_64/chip/chip3/pci-quirks.c index e70c211df68f0fae28891ae2cff0b49a73687f6a..22887d269fe38c6d2328b2f72618b087ce63dad7 100644 --- a/arch/sw_64/chip/chip3/pci-quirks.c +++ b/arch/sw_64/chip/chip3/pci-quirks.c @@ -1,9 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 #include #include -#include + #include -#include static int handshake(void __iomem *ptr, u32 mask, u32 done, int wait_usec, int delay_usec) @@ -232,9 +231,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, enable_sw_dca); void __init reserve_mem_for_pci(void) { int ret; - unsigned long base; - - base = is_in_host() ? PCI_32BIT_MEMIO : PCI_32BIT_VT_MEMIO; + unsigned long base = PCI_32BIT_MEMIO; ret = add_memmap_region(base, PCI_32BIT_MEMIO_SIZE, memmap_pci); if (ret) { diff --git a/arch/sw_64/chip/chip3/vt_msi.c b/arch/sw_64/chip/chip3/vt_msi.c index 31f49d3c3511af86ab6794eeedb8cb4a18702f33..428757642342e01491332a682e01668563ed5557 100644 --- a/arch/sw_64/chip/chip3/vt_msi.c +++ b/arch/sw_64/chip/chip3/vt_msi.c @@ -1,14 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 #include #include -#include -#include #include #include -#include -#include -#include -#include #define QEMU_MSIX_MSG_ADDR (0x8000fee00000UL) diff --git a/arch/sw_64/configs/openeuler_defconfig b/arch/sw_64/configs/openeuler_defconfig new file mode 100644 index 0000000000000000000000000000000000000000..0e77721dae36685d5aa67b3482581fc50fe5ddc6 --- /dev/null +++ b/arch/sw_64/configs/openeuler_defconfig @@ -0,0 +1,4312 @@ +# +# Automatically generated file; DO NOT EDIT. +# Linux/sw_64 5.10.0 Kernel Configuration +# +CONFIG_CC_VERSION_TEXT="sw_64sw6b-sunway-linux-gnu-gcc (GCC) 7.1.0" +CONFIG_CC_IS_GCC=y +CONFIG_GCC_VERSION=70100 +CONFIG_LD_VERSION=226010000 +CONFIG_CLANG_VERSION=0 +CONFIG_LLD_VERSION=0 +CONFIG_CC_CAN_LINK=y +CONFIG_CC_CAN_LINK_STATIC=y +CONFIG_CC_HAS_ASM_GOTO=y +CONFIG_IRQ_WORK=y + +# +# General setup +# +CONFIG_INIT_ENV_ARG_LIMIT=32 +# CONFIG_COMPILE_TEST is not set +CONFIG_LOCALVERSION="-xuelang" +CONFIG_LOCALVERSION_AUTO=y +CONFIG_BUILD_SALT="" +CONFIG_DEFAULT_INIT="" +CONFIG_DEFAULT_HOSTNAME="(none)" +CONFIG_SWAP=y +CONFIG_SYSVIPC=y +CONFIG_SYSVIPC_SYSCTL=y +CONFIG_POSIX_MQUEUE=y +CONFIG_POSIX_MQUEUE_SYSCTL=y +# CONFIG_WATCH_QUEUE is not set +# CONFIG_CROSS_MEMORY_ATTACH is not set +CONFIG_USELIB=y +# CONFIG_AUDIT is not set +CONFIG_HAVE_ARCH_AUDITSYSCALL=y + +# +# IRQ subsystem +# +CONFIG_GENERIC_IRQ_LEGACY=y +CONFIG_GENERIC_IRQ_PROBE=y +CONFIG_GENERIC_IRQ_SHOW=y +CONFIG_GENERIC_IRQ_MIGRATION=y +CONFIG_GENERIC_IRQ_CHIP=y +CONFIG_IRQ_DOMAIN=y +CONFIG_IRQ_DOMAIN_HIERARCHY=y +CONFIG_GENERIC_MSI_IRQ=y +CONFIG_GENERIC_MSI_IRQ_DOMAIN=y +CONFIG_HANDLE_DOMAIN_IRQ=y +CONFIG_IRQ_FORCED_THREADING=y +CONFIG_SPARSE_IRQ=y +# CONFIG_GENERIC_IRQ_DEBUGFS is not set +# end of IRQ subsystem + +CONFIG_GENERIC_TIME_VSYSCALL=y +CONFIG_GENERIC_CLOCKEVENTS=y + +# +# Timers subsystem +# +CONFIG_TICK_ONESHOT=y +CONFIG_HZ_PERIODIC=y +# CONFIG_NO_HZ_IDLE is not set +# CONFIG_NO_HZ is not set +CONFIG_HIGH_RES_TIMERS=y +# end of Timers subsystem + +CONFIG_PREEMPT_NONE=y + +# +# CPU/Task time and stats accounting +# +CONFIG_TICK_CPU_ACCOUNTING=y +# CONFIG_BSD_PROCESS_ACCT is not set +# CONFIG_TASKSTATS is not set +# CONFIG_PSI is not set +# end of CPU/Task time and stats accounting + +CONFIG_CPU_ISOLATION=y + +# +# RCU Subsystem +# +CONFIG_TREE_RCU=y +# CONFIG_RCU_EXPERT is not set +CONFIG_SRCU=y +CONFIG_TREE_SRCU=y +CONFIG_RCU_STALL_COMMON=y +CONFIG_RCU_NEED_SEGCBLIST=y +# end of RCU Subsystem + +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +# CONFIG_IKHEADERS is not set +CONFIG_LOG_BUF_SHIFT=18 +CONFIG_LOG_CPU_MAX_BUF_SHIFT=12 +CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT=13 + +# +# Scheduler features +# + +# +# Intelligent aware scheduler +# +# CONFIG_IAS_SMART_IDLE is not set +# CONFIG_IAS_SMART_LOAD_TRACKING is not set +# end of Intelligent aware scheduler +# end of Scheduler features + +CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y +CONFIG_CC_HAS_INT128=y +# CONFIG_NUMA_BALANCING is not set +CONFIG_CGROUPS=y +CONFIG_PAGE_COUNTER=y +CONFIG_MEMCG=y +CONFIG_MEMCG_SWAP=y +CONFIG_MEMCG_KMEM=y +CONFIG_BLK_CGROUP=y +CONFIG_CGROUP_WRITEBACK=y +CONFIG_CGROUP_SCHED=y +CONFIG_FAIR_GROUP_SCHED=y +# CONFIG_CFS_BANDWIDTH is not set +# CONFIG_RT_GROUP_SCHED is not set +CONFIG_CGROUP_PIDS=y +# CONFIG_CGROUP_RDMA is not set +CONFIG_CGROUP_FREEZER=y +# CONFIG_CGROUP_HUGETLB is not set +CONFIG_CPUSETS=y +CONFIG_PROC_PID_CPUSET=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_CPUACCT=y +# CONFIG_CGROUP_PERF is not set +# CONFIG_CGROUP_DEBUG is not set +CONFIG_SOCK_CGROUP_DATA=y +# CONFIG_CGROUP_FILES is not set +CONFIG_NAMESPACES=y +CONFIG_UTS_NS=y +CONFIG_IPC_NS=y +# CONFIG_USER_NS is not set +CONFIG_PID_NS=y +CONFIG_NET_NS=y +# CONFIG_SCHED_STEAL is not set +# CONFIG_CHECKPOINT_RESTORE is not set +CONFIG_SCHED_AUTOGROUP=y +# CONFIG_SYSFS_DEPRECATED is not set +CONFIG_RELAY=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_INITRAMFS_SOURCE="" +CONFIG_RD_GZIP=y +CONFIG_RD_BZIP2=y +CONFIG_RD_LZMA=y +CONFIG_RD_XZ=y +CONFIG_RD_LZO=y +CONFIG_RD_LZ4=y +CONFIG_RD_ZSTD=y +CONFIG_INITRAMFS_FILE_METADATA="" +# CONFIG_BOOT_CONFIG is not set +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set +CONFIG_SYSCTL=y +CONFIG_HAVE_PCSPKR_PLATFORM=y +CONFIG_BPF=y +CONFIG_EXPERT=y +CONFIG_MULTIUSER=y +# CONFIG_SGETMASK_SYSCALL is not set +CONFIG_SYSFS_SYSCALL=y +CONFIG_FHANDLE=y +CONFIG_POSIX_TIMERS=y +CONFIG_PRINTK=y +CONFIG_BUG=y +CONFIG_ELF_CORE=y +CONFIG_PCSPKR_PLATFORM=y +CONFIG_BASE_FULL=y +CONFIG_FUTEX=y +CONFIG_FUTEX_PI=y +CONFIG_EPOLL=y +CONFIG_SIGNALFD=y +CONFIG_TIMERFD=y +CONFIG_EVENTFD=y +CONFIG_SHMEM=y +CONFIG_AIO=y +CONFIG_IO_URING=y +CONFIG_ADVISE_SYSCALLS=y +CONFIG_MEMBARRIER=y +CONFIG_KALLSYMS=y +CONFIG_KALLSYMS_ALL=y +CONFIG_KALLSYMS_BASE_RELATIVE=y +# CONFIG_BPF_SYSCALL is not set +# CONFIG_USERFAULTFD is not set +CONFIG_KCMP=y +# CONFIG_EMBEDDED is not set +CONFIG_HAVE_PERF_EVENTS=y +CONFIG_PERF_USE_VMALLOC=y +# CONFIG_PC104 is not set + +# +# Kernel Performance Events And Counters +# +CONFIG_PERF_EVENTS=y +CONFIG_DEBUG_PERF_USE_VMALLOC=y +# end of Kernel Performance Events And Counters + +CONFIG_VM_EVENT_COUNTERS=y +CONFIG_SLUB_DEBUG=y +# CONFIG_SLUB_MEMCG_SYSFS_ON is not set +# CONFIG_COMPAT_BRK is not set +# CONFIG_SLAB is not set +CONFIG_SLUB=y +# CONFIG_SLOB is not set +CONFIG_SLAB_MERGE_DEFAULT=y +# CONFIG_SLAB_FREELIST_RANDOM is not set +# CONFIG_SLAB_FREELIST_HARDENED is not set +# CONFIG_SHUFFLE_PAGE_ALLOCATOR is not set +CONFIG_SLUB_CPU_PARTIAL=y +# CONFIG_PROFILING is not set +# end of General setup + +CONFIG_SW64=y +CONFIG_LOCKDEP_SUPPORT=y +CONFIG_64BIT=y +CONFIG_MMU=y +CONFIG_PGTABLE_LEVELS=4 +CONFIG_SYS_SUPPORTS_HUGETLBFS=y +CONFIG_RWSEM_XCHGADD_ALGORITHM=y +CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y +CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y +CONFIG_ZONE_DMA32=y +CONFIG_NEED_DMA_MAP_STATE=y +CONFIG_NEED_SG_DMA_LENGTH=y +CONFIG_ARCH_WANT_HUGE_PMD_SHARE=y +CONFIG_GENERIC_ISA_DMA=y +CONFIG_NONCACHE_PAGE=y +CONFIG_AUDIT_ARCH=y +CONFIG_SYS_HAS_EARLY_PRINTK=y + +# +# System setup +# + +# +# Machine Configuration +# +CONFIG_SUBARCH_C3B=y +CONFIG_SW64_CHIP3=y +# CONFIG_SW64_FPGA is not set +# CONFIG_SW64_SIM is not set +CONFIG_SW64_ASIC=y +# CONFIG_SW64_CHIP3_ASIC_DEBUG is not set +CONFIG_CPUFREQ_DEBUGFS=y +CONFIG_PLATFORM_XUELANG=y +# end of Machine Configuration + +# CONFIG_LOCK_MEMB is not set +# CONFIG_DIRECT_DMA is not set +CONFIG_SWIOTLB=y +CONFIG_ISA=y +CONFIG_ISA_DMA_API=y +CONFIG_PCI=y +CONFIG_PCI_DOMAINS=y +CONFIG_PCI_SYSCALL=y +CONFIG_IOMMU_HELPER=y +CONFIG_PHYSICAL_START=0x900000 +# CONFIG_KEXEC is not set +# CONFIG_CRASH_DUMP is not set +CONFIG_SECCOMP=y +CONFIG_GENERIC_HWEIGHT=y +# CONFIG_LOCK_FIXUP is not set +CONFIG_SMP=y +CONFIG_ARCH_PROC_KCORE_TEXT=y +CONFIG_TRACE_IRQFLAGS_SUPPORT=y +CONFIG_ARCH_SUPPORTS_UPROBES=y +CONFIG_NR_CPUS=64 +CONFIG_HOTPLUG_CPU=y +CONFIG_ARCH_SPARSEMEM_ENABLE=y +# CONFIG_ARCH_DISCONTIGMEM_ENABLE is not set +CONFIG_NUMA=y +CONFIG_USE_PERCPU_NUMA_NODE_ID=y +CONFIG_NODES_SHIFT=7 +# CONFIG_RELOCATABLE is not set +CONFIG_HZ=100 +# CONFIG_PCIEPORTBUS is not set +CONFIG_PCIEASPM=y +CONFIG_PCIEASPM_DEFAULT=y +# CONFIG_PCIEASPM_POWERSAVE is not set +# CONFIG_PCIEASPM_POWER_SUPERSAVE is not set +# CONFIG_PCIEASPM_PERFORMANCE is not set +# CONFIG_PCIE_PTM is not set +CONFIG_PCI_MSI=y +CONFIG_PCI_MSI_IRQ_DOMAIN=y +CONFIG_PCI_MSI_ARCH_FALLBACKS=y +CONFIG_PCI_QUIRKS=y +# CONFIG_PCI_DEBUG is not set +# CONFIG_PCI_REALLOC_ENABLE_AUTO is not set +# CONFIG_PCI_STUB is not set +# CONFIG_PCI_PF_STUB is not set +CONFIG_PCI_ATS=y +CONFIG_PCI_IOV=y +# CONFIG_PCI_PRI is not set +# CONFIG_PCI_PASID is not set +CONFIG_PCI_LABEL=y +# CONFIG_PCIE_BUS_TUNE_OFF is not set +CONFIG_PCIE_BUS_DEFAULT=y +# CONFIG_PCIE_BUS_SAFE is not set +# CONFIG_PCIE_BUS_PERFORMANCE is not set +# CONFIG_PCIE_BUS_PEER2PEER is not set +# CONFIG_HOTPLUG_PCI is not set + +# +# PCI controller drivers +# +# CONFIG_PCI_FTPCI100 is not set +# CONFIG_PCI_HOST_GENERIC is not set +# CONFIG_PCIE_XILINX is not set + +# +# DesignWare PCI Core Support +# +# CONFIG_PCIE_DW_PLAT_HOST is not set +# CONFIG_PCI_MESON is not set +# end of DesignWare PCI Core Support + +# +# Mobiveil PCIe Core Support +# +# end of Mobiveil PCIe Core Support + +# +# Cadence PCIe controllers support +# +# CONFIG_PCIE_CADENCE_PLAT_HOST is not set +# CONFIG_PCI_J721E_HOST is not set +# end of Cadence PCIe controllers support +# end of PCI controller drivers + +# +# PCI Endpoint +# +# end of PCI Endpoint + +# +# PCI switch controller drivers +# +# CONFIG_PCI_SW_SWITCHTEC is not set +# end of PCI switch controller drivers + +# CONFIG_PCCARD is not set + +# +# Executable file formats +# +CONFIG_BINFMT_ELF=y +CONFIG_ELFCORE=y +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +CONFIG_BINFMT_SCRIPT=y +CONFIG_HAVE_AOUT=y +# CONFIG_BINFMT_AOUT is not set +# CONFIG_BINFMT_MISC is not set +CONFIG_COREDUMP=y +# end of Executable file formats + +# +# Library optimization options +# +CONFIG_DEEP_CLEAR_PAGE=y +CONFIG_DEEP_COPY_PAGE=y +CONFIG_DEEP_COPY_USER=y +CONFIG_DEEP_MEMCPY=y +CONFIG_DEEP_MEMSET=y +# end of Library optimization options +# end of System setup + +# +# Boot options +# +CONFIG_SW64_IRQ_CHIP=y +CONFIG_USE_OF=y +# CONFIG_SW64_BUILTIN_DTB is not set +CONFIG_EFI=y +CONFIG_DMI=y +# CONFIG_CMDLINE_BOOL is not set +CONFIG_FORCE_MAX_ZONEORDER=16 +# end of Boot options + +# +# Firmware Drivers +# +CONFIG_FIRMWARE_MEMMAP=y +CONFIG_DMIID=y +CONFIG_DMI_SYSFS=m +# CONFIG_ISCSI_IBFT is not set +# CONFIG_GOOGLE_FIRMWARE is not set + +# +# EFI (Extensible Firmware Interface) Support +# +CONFIG_EFI_ESRT=y +CONFIG_EFI_RUNTIME_WRAPPERS=y +# CONFIG_EFI_BOOTLOADER_CONTROL is not set +# CONFIG_EFI_CAPSULE_LOADER is not set +# CONFIG_EFI_TEST is not set +# CONFIG_EFI_DISABLE_PCI_DMA is not set +# end of EFI (Extensible Firmware Interface) Support + +# CONFIG_EFI_CUSTOM_SSDT_OVERLAYS is not set + +# +# Tegra firmware driver +# +# end of Tegra firmware driver +# end of Firmware Drivers + +# +# Power management options +# +# CONFIG_SUSPEND is not set +# CONFIG_HIBERNATION is not set +# CONFIG_PM is not set +CONFIG_ARCH_SUPPORTS_ACPI=y +CONFIG_ACPI=y +# CONFIG_ACPI_DEBUGGER is not set +# CONFIG_ACPI_SPCR_TABLE is not set +# CONFIG_ACPI_EC_DEBUGFS is not set +CONFIG_ACPI_AC=y +CONFIG_ACPI_BATTERY=y +CONFIG_ACPI_BUTTON=y +# CONFIG_ACPI_DOCK is not set +CONFIG_ACPI_CUSTOM_DSDT_FILE="" +# CONFIG_ACPI_DEBUG is not set +# CONFIG_ACPI_PCI_SLOT is not set +# CONFIG_ACPI_CONTAINER is not set +# CONFIG_ACPI_HED is not set +# CONFIG_ACPI_CUSTOM_METHOD is not set +CONFIG_ACPI_REDUCED_HARDWARE_ONLY=y +# CONFIG_ACPI_CONFIGFS is not set +# CONFIG_PMIC_OPREGION is not set +CONFIG_ARCH_SUSPEND_POSSIBLE=y +CONFIG_ARCH_HIBERNATION_POSSIBLE=y + +# +# CPU Idle +# +# CONFIG_CPU_IDLE is not set +# end of CPU Idle +# end of Power management options + +CONFIG_DUMMY_CONSOLE=y +CONFIG_HAVE_KVM_IRQCHIP=y +CONFIG_HAVE_KVM_IRQFD=y +CONFIG_HAVE_KVM_IRQ_ROUTING=y +CONFIG_HAVE_KVM_EVENTFD=y +CONFIG_HAVE_KVM_MSI=y +CONFIG_KVM_VFIO=y +CONFIG_VIRTUALIZATION=y +CONFIG_KVM=y +CONFIG_KVM_SW64_HOST=y +CONFIG_VHOST_IOTLB=m +CONFIG_VHOST=m +CONFIG_VHOST_MENU=y +CONFIG_VHOST_NET=m +CONFIG_VHOST_SCSI=m +CONFIG_VHOST_VSOCK=m +CONFIG_VHOST_CROSS_ENDIAN_LEGACY=y + +# +# General architecture-dependent options +# +CONFIG_CRASH_CORE=y +CONFIG_SET_FS=y +CONFIG_HAVE_OPROFILE=y +# CONFIG_KPROBES is not set +# CONFIG_JUMP_LABEL is not set +CONFIG_HAVE_64BIT_ALIGNED_ACCESS=y +CONFIG_HAVE_KPROBES=y +CONFIG_HAVE_KRETPROBES=y +CONFIG_GENERIC_SMP_IDLE_THREAD=y +CONFIG_HAVE_ASM_MODVERSIONS=y +CONFIG_HAVE_PERF_REGS=y +CONFIG_HAVE_PERF_USER_STACK_DUMP=y +CONFIG_HAVE_ARCH_JUMP_LABEL=y +CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y +CONFIG_ARCH_WANT_IPC_PARSE_VERSION=y +CONFIG_HAVE_ARCH_SECCOMP=y +CONFIG_HAVE_ARCH_SECCOMP_FILTER=y +CONFIG_SECCOMP_FILTER=y +# CONFIG_SECCOMP_CACHE_DEBUG is not set +CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y +CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y +CONFIG_HAVE_MOD_ARCH_SPECIFIC=y +CONFIG_MODULES_USE_ELF_RELA=y +CONFIG_ARCH_HAS_ELF_RANDOMIZE=y +CONFIG_ISA_BUS_API=y +CONFIG_OLD_SIGSUSPEND=y +CONFIG_OLD_SIGACTION=y +# CONFIG_COMPAT_32BIT_TIME is not set +CONFIG_ARCH_NO_PREEMPT=y +CONFIG_ARCH_HAS_PHYS_TO_DMA=y +# CONFIG_LOCK_EVENT_COUNTS is not set + +# +# GCOV-based kernel profiling +# +# CONFIG_GCOV_KERNEL is not set +# end of GCOV-based kernel profiling +# end of General architecture-dependent options + +CONFIG_RT_MUTEXES=y +CONFIG_BASE_SMALL=0 +CONFIG_MODULES=y +CONFIG_MODULE_FORCE_LOAD=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_FORCE_UNLOAD=y +CONFIG_MODVERSIONS=y +CONFIG_ASM_MODVERSIONS=y +# CONFIG_MODULE_SRCVERSION_ALL is not set +# CONFIG_MODULE_SIG is not set +# CONFIG_MODULE_COMPRESS is not set +# CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS is not set +# CONFIG_UNUSED_SYMBOLS is not set +# CONFIG_TRIM_UNUSED_KSYMS is not set +CONFIG_MODULES_TREE_LOOKUP=y +CONFIG_BLOCK=y +CONFIG_BLK_SCSI_REQUEST=y +CONFIG_BLK_DEV_BSG=y +CONFIG_BLK_DEV_BSGLIB=y +CONFIG_BLK_DEV_INTEGRITY=y +CONFIG_BLK_DEV_INTEGRITY_T10=y +# CONFIG_BLK_DEV_ZONED is not set +# CONFIG_BLK_DEV_THROTTLING is not set +# CONFIG_BLK_CMDLINE_PARSER is not set +# CONFIG_BLK_WBT is not set +# CONFIG_BLK_CGROUP_IOLATENCY is not set +# CONFIG_BLK_CGROUP_IOCOST is not set +CONFIG_BLK_DEBUG_FS=y +# CONFIG_BLK_SED_OPAL is not set +# CONFIG_BLK_INLINE_ENCRYPTION is not set + +# +# Partition Types +# +CONFIG_PARTITION_ADVANCED=y +# CONFIG_ACORN_PARTITION is not set +# CONFIG_AIX_PARTITION is not set +CONFIG_OSF_PARTITION=y +# CONFIG_AMIGA_PARTITION is not set +# CONFIG_ATARI_PARTITION is not set +# CONFIG_MAC_PARTITION is not set +CONFIG_MSDOS_PARTITION=y +CONFIG_BSD_DISKLABEL=y +CONFIG_MINIX_SUBPARTITION=y +CONFIG_SOLARIS_X86_PARTITION=y +CONFIG_UNIXWARE_DISKLABEL=y +CONFIG_LDM_PARTITION=y +# CONFIG_LDM_DEBUG is not set +CONFIG_SGI_PARTITION=y +CONFIG_ULTRIX_PARTITION=y +# CONFIG_SUN_PARTITION is not set +# CONFIG_KARMA_PARTITION is not set +CONFIG_EFI_PARTITION=y +# CONFIG_SYSV68_PARTITION is not set +# CONFIG_CMDLINE_PARTITION is not set +# end of Partition Types + +CONFIG_BLK_MQ_PCI=y +CONFIG_BLK_MQ_VIRTIO=y +CONFIG_BLK_MQ_RDMA=y + +# +# IO Schedulers +# +CONFIG_MQ_IOSCHED_DEADLINE=y +CONFIG_MQ_IOSCHED_KYBER=y +# CONFIG_IOSCHED_BFQ is not set +# end of IO Schedulers + +CONFIG_PREEMPT_NOTIFIERS=y +CONFIG_ASN1=m +CONFIG_ARCH_INLINE_SPIN_TRYLOCK=y +CONFIG_ARCH_INLINE_SPIN_TRYLOCK_BH=y +CONFIG_ARCH_INLINE_SPIN_LOCK=y +CONFIG_ARCH_INLINE_SPIN_LOCK_BH=y +CONFIG_ARCH_INLINE_SPIN_LOCK_IRQ=y +CONFIG_ARCH_INLINE_SPIN_LOCK_IRQSAVE=y +CONFIG_ARCH_INLINE_SPIN_UNLOCK=y +CONFIG_ARCH_INLINE_SPIN_UNLOCK_BH=y +CONFIG_ARCH_INLINE_SPIN_UNLOCK_IRQ=y +CONFIG_ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE=y +CONFIG_ARCH_INLINE_READ_LOCK=y +CONFIG_ARCH_INLINE_READ_LOCK_BH=y +CONFIG_ARCH_INLINE_READ_LOCK_IRQ=y +CONFIG_ARCH_INLINE_READ_LOCK_IRQSAVE=y +CONFIG_ARCH_INLINE_READ_UNLOCK=y +CONFIG_ARCH_INLINE_READ_UNLOCK_BH=y +CONFIG_ARCH_INLINE_READ_UNLOCK_IRQ=y +CONFIG_ARCH_INLINE_READ_UNLOCK_IRQRESTORE=y +CONFIG_ARCH_INLINE_WRITE_LOCK=y +CONFIG_ARCH_INLINE_WRITE_LOCK_BH=y +CONFIG_ARCH_INLINE_WRITE_LOCK_IRQ=y +CONFIG_ARCH_INLINE_WRITE_LOCK_IRQSAVE=y +CONFIG_ARCH_INLINE_WRITE_UNLOCK=y +CONFIG_ARCH_INLINE_WRITE_UNLOCK_BH=y +CONFIG_ARCH_INLINE_WRITE_UNLOCK_IRQ=y +CONFIG_ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE=y +CONFIG_INLINE_SPIN_TRYLOCK=y +CONFIG_INLINE_SPIN_TRYLOCK_BH=y +CONFIG_INLINE_SPIN_LOCK=y +CONFIG_INLINE_SPIN_LOCK_BH=y +CONFIG_INLINE_SPIN_LOCK_IRQ=y +CONFIG_INLINE_SPIN_LOCK_IRQSAVE=y +CONFIG_INLINE_SPIN_UNLOCK_BH=y +CONFIG_INLINE_SPIN_UNLOCK_IRQ=y +CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE=y +CONFIG_INLINE_READ_LOCK=y +CONFIG_INLINE_READ_LOCK_BH=y +CONFIG_INLINE_READ_LOCK_IRQ=y +CONFIG_INLINE_READ_LOCK_IRQSAVE=y +CONFIG_INLINE_READ_UNLOCK=y +CONFIG_INLINE_READ_UNLOCK_BH=y +CONFIG_INLINE_READ_UNLOCK_IRQ=y +CONFIG_INLINE_READ_UNLOCK_IRQRESTORE=y +CONFIG_INLINE_WRITE_LOCK=y +CONFIG_INLINE_WRITE_LOCK_BH=y +CONFIG_INLINE_WRITE_LOCK_IRQ=y +CONFIG_INLINE_WRITE_LOCK_IRQSAVE=y +CONFIG_INLINE_WRITE_UNLOCK=y +CONFIG_INLINE_WRITE_UNLOCK_BH=y +CONFIG_INLINE_WRITE_UNLOCK_IRQ=y +CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE=y +CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y +CONFIG_MUTEX_SPIN_ON_OWNER=y +CONFIG_RWSEM_SPIN_ON_OWNER=y +CONFIG_LOCK_SPIN_ON_OWNER=y +CONFIG_ARCH_USE_QUEUED_SPINLOCKS=y +CONFIG_QUEUED_SPINLOCKS=y +CONFIG_ARCH_USE_QUEUED_RWLOCKS=y +CONFIG_QUEUED_RWLOCKS=y +CONFIG_FREEZER=y + +# +# Executable file formats +# +# end of Executable file formats + +# +# Memory Management options +# +CONFIG_SPARSEMEM=y +CONFIG_NEED_MULTIPLE_NODES=y +CONFIG_SPARSEMEM_EXTREME=y +CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y +CONFIG_SPARSEMEM_VMEMMAP=y +CONFIG_MEMORY_ISOLATION=y +# CONFIG_MEMORY_HOTPLUG is not set +CONFIG_SPLIT_PTLOCK_CPUS=4 +CONFIG_COMPACTION=y +# CONFIG_PAGE_REPORTING is not set +CONFIG_MIGRATION=y +CONFIG_CONTIG_ALLOC=y +CONFIG_PHYS_ADDR_T_64BIT=y +CONFIG_MMU_NOTIFIER=y +# CONFIG_KSM is not set +CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 +CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS=y +# CONFIG_TRANSPARENT_HUGEPAGE_MADVISE is not set +# CONFIG_CLEANCACHE is not set +# CONFIG_FRONTSWAP is not set +# CONFIG_SHRINK_PAGECACHE is not set +CONFIG_CMA=y +# CONFIG_CMA_DEBUG is not set +# CONFIG_CMA_DEBUGFS is not set +CONFIG_CMA_AREAS=7 +# CONFIG_ZPOOL is not set +# CONFIG_ZBUD is not set +# CONFIG_ZSMALLOC is not set +# CONFIG_DEFERRED_STRUCT_PAGE_INIT is not set +# CONFIG_IDLE_PAGE_TRACKING is not set +CONFIG_HMM_MIRROR=y +# CONFIG_PERCPU_STATS is not set +# CONFIG_GUP_BENCHMARK is not set +# CONFIG_READ_ONLY_THP_FOR_FS is not set + +# +# Data Access Monitoring +# +# CONFIG_DAMON is not set +# end of Data Access Monitoring +# end of Memory Management options + +CONFIG_NET=y +CONFIG_NET_INGRESS=y +CONFIG_NET_EGRESS=y +CONFIG_SKB_EXTENSIONS=y + +# +# Networking options +# +CONFIG_PACKET=y +CONFIG_PACKET_DIAG=y +CONFIG_UNIX=y +CONFIG_UNIX_SCM=y +CONFIG_UNIX_DIAG=y +CONFIG_TLS=m +CONFIG_TLS_DEVICE=y +# CONFIG_TLS_TOE is not set +CONFIG_XFRM=y +CONFIG_XFRM_OFFLOAD=y +CONFIG_XFRM_ALGO=m +CONFIG_XFRM_USER=m +CONFIG_XFRM_INTERFACE=m +CONFIG_XFRM_SUB_POLICY=y +CONFIG_XFRM_MIGRATE=y +CONFIG_XFRM_STATISTICS=y +CONFIG_XFRM_AH=m +CONFIG_XFRM_ESP=m +CONFIG_XFRM_IPCOMP=m +CONFIG_NET_KEY=m +CONFIG_NET_KEY_MIGRATE=y +# CONFIG_SMC is not set +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_FIB_TRIE_STATS=y +CONFIG_IP_MULTIPLE_TABLES=y +# CONFIG_IP_ROUTE_MULTIPATH is not set +# CONFIG_IP_ROUTE_VERBOSE is not set +CONFIG_IP_ROUTE_CLASSID=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +# CONFIG_IP_PNP_BOOTP is not set +# CONFIG_IP_PNP_RARP is not set +CONFIG_NET_IPIP=m +CONFIG_NET_IPGRE_DEMUX=m +CONFIG_NET_IP_TUNNEL=m +# CONFIG_NET_IPGRE is not set +CONFIG_IP_MROUTE_COMMON=y +CONFIG_IP_MROUTE=y +# CONFIG_IP_MROUTE_MULTIPLE_TABLES is not set +# CONFIG_IP_PIMSM_V1 is not set +# CONFIG_IP_PIMSM_V2 is not set +CONFIG_SYN_COOKIES=y +CONFIG_NET_IPVTI=m +# CONFIG_NET_FOU is not set +# CONFIG_NET_FOU_IP_TUNNELS is not set +CONFIG_INET_AH=m +CONFIG_INET_ESP=m +CONFIG_INET_ESP_OFFLOAD=m +# CONFIG_INET_ESPINTCP is not set +CONFIG_INET_IPCOMP=m +CONFIG_INET_XFRM_TUNNEL=m +CONFIG_INET_TUNNEL=m +CONFIG_INET_DIAG=y +CONFIG_INET_TCP_DIAG=y +CONFIG_INET_UDP_DIAG=m +# CONFIG_INET_RAW_DIAG is not set +# CONFIG_INET_DIAG_DESTROY is not set +CONFIG_TCP_CONG_ADVANCED=y +CONFIG_TCP_CONG_BIC=m +CONFIG_TCP_CONG_CUBIC=y +CONFIG_TCP_CONG_WESTWOOD=m +CONFIG_TCP_CONG_HTCP=m +# CONFIG_TCP_CONG_HSTCP is not set +# CONFIG_TCP_CONG_HYBLA is not set +# CONFIG_TCP_CONG_VEGAS is not set +# CONFIG_TCP_CONG_NV is not set +# CONFIG_TCP_CONG_SCALABLE is not set +# CONFIG_TCP_CONG_LP is not set +# CONFIG_TCP_CONG_VENO is not set +# CONFIG_TCP_CONG_YEAH is not set +# CONFIG_TCP_CONG_ILLINOIS is not set +# CONFIG_TCP_CONG_DCTCP is not set +# CONFIG_TCP_CONG_CDG is not set +# CONFIG_TCP_CONG_BBR is not set +CONFIG_DEFAULT_CUBIC=y +# CONFIG_DEFAULT_RENO is not set +CONFIG_DEFAULT_TCP_CONG="cubic" +CONFIG_TCP_MD5SIG=y +# CONFIG_TCP_COMP is not set +CONFIG_IPV6=m +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=m +CONFIG_INET6_ESP=m +CONFIG_INET6_ESP_OFFLOAD=m +# CONFIG_INET6_ESPINTCP is not set +CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_MIP6=m +CONFIG_IPV6_ILA=m +CONFIG_INET6_XFRM_TUNNEL=m +CONFIG_INET6_TUNNEL=m +CONFIG_IPV6_VTI=m +CONFIG_IPV6_SIT=m +CONFIG_IPV6_SIT_6RD=y +CONFIG_IPV6_NDISC_NODETYPE=y +CONFIG_IPV6_TUNNEL=m +CONFIG_IPV6_GRE=m +CONFIG_IPV6_MULTIPLE_TABLES=y +CONFIG_IPV6_SUBTREES=y +CONFIG_IPV6_MROUTE=y +CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y +CONFIG_IPV6_PIMSM_V2=y +CONFIG_IPV6_SEG6_LWTUNNEL=y +CONFIG_IPV6_SEG6_HMAC=y +# CONFIG_IPV6_RPL_LWTUNNEL is not set +# CONFIG_NETLABEL is not set +# CONFIG_MPTCP is not set +# CONFIG_NETWORK_SECMARK is not set +CONFIG_NET_PTP_CLASSIFY=y +# CONFIG_NETWORK_PHY_TIMESTAMPING is not set +CONFIG_NETFILTER=y +CONFIG_NETFILTER_ADVANCED=y +CONFIG_BRIDGE_NETFILTER=m + +# +# Core Netfilter Configuration +# +CONFIG_NETFILTER_INGRESS=y +CONFIG_NETFILTER_NETLINK=m +CONFIG_NETFILTER_FAMILY_BRIDGE=y +CONFIG_NETFILTER_FAMILY_ARP=y +CONFIG_NETFILTER_NETLINK_ACCT=m +CONFIG_NETFILTER_NETLINK_QUEUE=m +CONFIG_NETFILTER_NETLINK_LOG=m +CONFIG_NETFILTER_NETLINK_OSF=m +CONFIG_NF_CONNTRACK=m +CONFIG_NF_LOG_COMMON=m +CONFIG_NF_LOG_NETDEV=m +CONFIG_NETFILTER_CONNCOUNT=m +CONFIG_NF_CONNTRACK_MARK=y +CONFIG_NF_CONNTRACK_ZONES=y +CONFIG_NF_CONNTRACK_PROCFS=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CONNTRACK_TIMEOUT=y +CONFIG_NF_CONNTRACK_TIMESTAMP=y +CONFIG_NF_CONNTRACK_LABELS=y +CONFIG_NF_CT_PROTO_DCCP=y +CONFIG_NF_CT_PROTO_GRE=y +CONFIG_NF_CT_PROTO_SCTP=y +CONFIG_NF_CT_PROTO_UDPLITE=y +CONFIG_NF_CONNTRACK_AMANDA=m +CONFIG_NF_CONNTRACK_FTP=m +CONFIG_NF_CONNTRACK_H323=m +CONFIG_NF_CONNTRACK_IRC=m +CONFIG_NF_CONNTRACK_BROADCAST=m +CONFIG_NF_CONNTRACK_NETBIOS_NS=m +CONFIG_NF_CONNTRACK_SNMP=m +CONFIG_NF_CONNTRACK_PPTP=m +CONFIG_NF_CONNTRACK_SANE=m +CONFIG_NF_CONNTRACK_SIP=m +CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_CT_NETLINK=m +CONFIG_NF_CT_NETLINK_TIMEOUT=m +CONFIG_NF_CT_NETLINK_HELPER=m +CONFIG_NETFILTER_NETLINK_GLUE_CT=y +CONFIG_NF_NAT=m +CONFIG_NF_NAT_AMANDA=m +CONFIG_NF_NAT_FTP=m +CONFIG_NF_NAT_IRC=m +CONFIG_NF_NAT_SIP=m +CONFIG_NF_NAT_TFTP=m +CONFIG_NF_NAT_REDIRECT=y +CONFIG_NF_NAT_MASQUERADE=y +CONFIG_NETFILTER_SYNPROXY=m +CONFIG_NF_TABLES=m +# CONFIG_NF_TABLES_INET is not set +CONFIG_NF_TABLES_NETDEV=y +CONFIG_NFT_NUMGEN=m +CONFIG_NFT_CT=m +# CONFIG_NFT_FLOW_OFFLOAD is not set +CONFIG_NFT_COUNTER=m +CONFIG_NFT_CONNLIMIT=m +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_MASQ=m +CONFIG_NFT_REDIR=m +CONFIG_NFT_NAT=m +CONFIG_NFT_TUNNEL=m +CONFIG_NFT_OBJREF=m +CONFIG_NFT_QUEUE=m +CONFIG_NFT_QUOTA=m +CONFIG_NFT_REJECT=m +CONFIG_NFT_COMPAT=m +CONFIG_NFT_HASH=m +CONFIG_NFT_FIB=m +# CONFIG_NFT_XFRM is not set +CONFIG_NFT_SOCKET=m +CONFIG_NFT_OSF=m +CONFIG_NFT_TPROXY=m +# CONFIG_NFT_SYNPROXY is not set +CONFIG_NF_DUP_NETDEV=m +CONFIG_NFT_DUP_NETDEV=m +CONFIG_NFT_FWD_NETDEV=m +CONFIG_NF_FLOW_TABLE_INET=m +CONFIG_NF_FLOW_TABLE=m +CONFIG_NETFILTER_XTABLES=m + +# +# Xtables combined modules +# +CONFIG_NETFILTER_XT_MARK=m +CONFIG_NETFILTER_XT_CONNMARK=m +CONFIG_NETFILTER_XT_SET=m + +# +# Xtables targets +# +CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m +CONFIG_NETFILTER_XT_TARGET_CONNMARK=m +CONFIG_NETFILTER_XT_TARGET_CT=m +CONFIG_NETFILTER_XT_TARGET_DSCP=m +CONFIG_NETFILTER_XT_TARGET_HL=m +CONFIG_NETFILTER_XT_TARGET_HMARK=m +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m +CONFIG_NETFILTER_XT_TARGET_LOG=m +CONFIG_NETFILTER_XT_TARGET_MARK=m +CONFIG_NETFILTER_XT_NAT=m +CONFIG_NETFILTER_XT_TARGET_NETMAP=m +CONFIG_NETFILTER_XT_TARGET_NFLOG=m +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m +CONFIG_NETFILTER_XT_TARGET_NOTRACK=m +CONFIG_NETFILTER_XT_TARGET_RATEEST=m +CONFIG_NETFILTER_XT_TARGET_REDIRECT=m +CONFIG_NETFILTER_XT_TARGET_MASQUERADE=m +CONFIG_NETFILTER_XT_TARGET_TEE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m +CONFIG_NETFILTER_XT_TARGET_TRACE=m +CONFIG_NETFILTER_XT_TARGET_TCPMSS=m +CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m + +# +# Xtables matches +# +CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m +CONFIG_NETFILTER_XT_MATCH_BPF=m +CONFIG_NETFILTER_XT_MATCH_CGROUP=m +CONFIG_NETFILTER_XT_MATCH_CLUSTER=m +CONFIG_NETFILTER_XT_MATCH_COMMENT=m +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m +CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m +CONFIG_NETFILTER_XT_MATCH_CONNMARK=m +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m +CONFIG_NETFILTER_XT_MATCH_CPU=m +CONFIG_NETFILTER_XT_MATCH_DCCP=m +CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m +CONFIG_NETFILTER_XT_MATCH_DSCP=m +CONFIG_NETFILTER_XT_MATCH_ECN=m +CONFIG_NETFILTER_XT_MATCH_ESP=m +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m +CONFIG_NETFILTER_XT_MATCH_HELPER=m +CONFIG_NETFILTER_XT_MATCH_HL=m +CONFIG_NETFILTER_XT_MATCH_IPCOMP=m +CONFIG_NETFILTER_XT_MATCH_IPRANGE=m +CONFIG_NETFILTER_XT_MATCH_IPVS=m +CONFIG_NETFILTER_XT_MATCH_L2TP=m +CONFIG_NETFILTER_XT_MATCH_LENGTH=m +CONFIG_NETFILTER_XT_MATCH_LIMIT=m +CONFIG_NETFILTER_XT_MATCH_MAC=m +CONFIG_NETFILTER_XT_MATCH_MARK=m +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m +CONFIG_NETFILTER_XT_MATCH_NFACCT=m +CONFIG_NETFILTER_XT_MATCH_OSF=m +CONFIG_NETFILTER_XT_MATCH_OWNER=m +CONFIG_NETFILTER_XT_MATCH_POLICY=m +CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m +CONFIG_NETFILTER_XT_MATCH_QUOTA=m +CONFIG_NETFILTER_XT_MATCH_RATEEST=m +CONFIG_NETFILTER_XT_MATCH_REALM=m +CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SCTP=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m +CONFIG_NETFILTER_XT_MATCH_STATE=m +CONFIG_NETFILTER_XT_MATCH_STATISTIC=m +CONFIG_NETFILTER_XT_MATCH_STRING=m +CONFIG_NETFILTER_XT_MATCH_TCPMSS=m +CONFIG_NETFILTER_XT_MATCH_TIME=m +CONFIG_NETFILTER_XT_MATCH_U32=m +# end of Core Netfilter Configuration + +CONFIG_IP_SET=m +CONFIG_IP_SET_MAX=256 +CONFIG_IP_SET_BITMAP_IP=m +CONFIG_IP_SET_BITMAP_IPMAC=m +CONFIG_IP_SET_BITMAP_PORT=m +CONFIG_IP_SET_HASH_IP=m +CONFIG_IP_SET_HASH_IPMARK=m +CONFIG_IP_SET_HASH_IPPORT=m +CONFIG_IP_SET_HASH_IPPORTIP=m +CONFIG_IP_SET_HASH_IPPORTNET=m +CONFIG_IP_SET_HASH_IPMAC=m +CONFIG_IP_SET_HASH_MAC=m +CONFIG_IP_SET_HASH_NETPORTNET=m +CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETNET=m +CONFIG_IP_SET_HASH_NETPORT=m +CONFIG_IP_SET_HASH_NETIFACE=m +CONFIG_IP_SET_LIST_SET=m +CONFIG_IP_VS=m +# CONFIG_IP_VS_IPV6 is not set +# CONFIG_IP_VS_DEBUG is not set +CONFIG_IP_VS_TAB_BITS=12 + +# +# IPVS transport protocol load balancing support +# +# CONFIG_IP_VS_PROTO_TCP is not set +# CONFIG_IP_VS_PROTO_UDP is not set +# CONFIG_IP_VS_PROTO_ESP is not set +# CONFIG_IP_VS_PROTO_AH is not set +# CONFIG_IP_VS_PROTO_SCTP is not set + +# +# IPVS scheduler +# +# CONFIG_IP_VS_RR is not set +# CONFIG_IP_VS_WRR is not set +# CONFIG_IP_VS_LC is not set +# CONFIG_IP_VS_WLC is not set +# CONFIG_IP_VS_FO is not set +# CONFIG_IP_VS_OVF is not set +# CONFIG_IP_VS_LBLC is not set +# CONFIG_IP_VS_LBLCR is not set +# CONFIG_IP_VS_DH is not set +# CONFIG_IP_VS_SH is not set +# CONFIG_IP_VS_MH is not set +# CONFIG_IP_VS_SED is not set +# CONFIG_IP_VS_NQ is not set + +# +# IPVS SH scheduler +# +CONFIG_IP_VS_SH_TAB_BITS=8 + +# +# IPVS MH scheduler +# +CONFIG_IP_VS_MH_TAB_INDEX=12 + +# +# IPVS application helper +# +# CONFIG_IP_VS_NFCT is not set + +# +# IP: Netfilter Configuration +# +CONFIG_NF_DEFRAG_IPV4=m +CONFIG_NF_SOCKET_IPV4=m +CONFIG_NF_TPROXY_IPV4=m +CONFIG_NF_TABLES_IPV4=y +CONFIG_NFT_REJECT_IPV4=m +CONFIG_NFT_DUP_IPV4=m +CONFIG_NFT_FIB_IPV4=m +CONFIG_NF_TABLES_ARP=y +CONFIG_NF_FLOW_TABLE_IPV4=m +CONFIG_NF_DUP_IPV4=m +CONFIG_NF_LOG_ARP=m +CONFIG_NF_LOG_IPV4=m +CONFIG_NF_REJECT_IPV4=m +CONFIG_NF_NAT_SNMP_BASIC=m +CONFIG_NF_NAT_PPTP=m +CONFIG_NF_NAT_H323=m +CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_MATCH_AH=m +CONFIG_IP_NF_MATCH_ECN=m +CONFIG_IP_NF_MATCH_RPFILTER=m +CONFIG_IP_NF_MATCH_TTL=m +CONFIG_IP_NF_FILTER=m +CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_SYNPROXY=m +CONFIG_IP_NF_NAT=m +CONFIG_IP_NF_TARGET_MASQUERADE=m +CONFIG_IP_NF_TARGET_NETMAP=m +CONFIG_IP_NF_TARGET_REDIRECT=m +CONFIG_IP_NF_MANGLE=m +CONFIG_IP_NF_TARGET_CLUSTERIP=m +CONFIG_IP_NF_TARGET_ECN=m +CONFIG_IP_NF_TARGET_TTL=m +CONFIG_IP_NF_RAW=m +CONFIG_IP_NF_SECURITY=m +CONFIG_IP_NF_ARPTABLES=m +CONFIG_IP_NF_ARPFILTER=m +CONFIG_IP_NF_ARP_MANGLE=m +# end of IP: Netfilter Configuration + +# +# IPv6: Netfilter Configuration +# +# CONFIG_NF_SOCKET_IPV6 is not set +# CONFIG_NF_TPROXY_IPV6 is not set +# CONFIG_NF_TABLES_IPV6 is not set +# CONFIG_NF_FLOW_TABLE_IPV6 is not set +# CONFIG_NF_DUP_IPV6 is not set +# CONFIG_NF_REJECT_IPV6 is not set +# CONFIG_NF_LOG_IPV6 is not set +# CONFIG_IP6_NF_IPTABLES is not set +# end of IPv6: Netfilter Configuration + +CONFIG_NF_DEFRAG_IPV6=m +CONFIG_NF_TABLES_BRIDGE=m +# CONFIG_NFT_BRIDGE_META is not set +CONFIG_NF_LOG_BRIDGE=m +# CONFIG_NF_CONNTRACK_BRIDGE is not set +CONFIG_BRIDGE_NF_EBTABLES=m +CONFIG_BRIDGE_EBT_BROUTE=m +CONFIG_BRIDGE_EBT_T_FILTER=m +CONFIG_BRIDGE_EBT_T_NAT=m +CONFIG_BRIDGE_EBT_802_3=m +CONFIG_BRIDGE_EBT_AMONG=m +CONFIG_BRIDGE_EBT_ARP=m +CONFIG_BRIDGE_EBT_IP=m +# CONFIG_BRIDGE_EBT_IP6 is not set +CONFIG_BRIDGE_EBT_LIMIT=m +CONFIG_BRIDGE_EBT_MARK=m +CONFIG_BRIDGE_EBT_PKTTYPE=m +CONFIG_BRIDGE_EBT_STP=m +CONFIG_BRIDGE_EBT_VLAN=m +CONFIG_BRIDGE_EBT_ARPREPLY=m +CONFIG_BRIDGE_EBT_DNAT=m +CONFIG_BRIDGE_EBT_MARK_T=m +CONFIG_BRIDGE_EBT_REDIRECT=m +CONFIG_BRIDGE_EBT_SNAT=m +CONFIG_BRIDGE_EBT_LOG=m +CONFIG_BRIDGE_EBT_NFLOG=m +# CONFIG_BPFILTER is not set +# CONFIG_IP_DCCP is not set +# CONFIG_IP_SCTP is not set +# CONFIG_RDS is not set +# CONFIG_TIPC is not set +# CONFIG_ATM is not set +# CONFIG_L2TP is not set +CONFIG_STP=m +CONFIG_GARP=m +CONFIG_MRP=m +CONFIG_BRIDGE=m +CONFIG_BRIDGE_IGMP_SNOOPING=y +# CONFIG_BRIDGE_VLAN_FILTERING is not set +# CONFIG_BRIDGE_MRP is not set +CONFIG_HAVE_NET_DSA=y +# CONFIG_NET_DSA is not set +CONFIG_VLAN_8021Q=m +CONFIG_VLAN_8021Q_GVRP=y +CONFIG_VLAN_8021Q_MVRP=y +# CONFIG_DECNET is not set +CONFIG_LLC=m +# CONFIG_LLC2 is not set +# CONFIG_ATALK is not set +# CONFIG_X25 is not set +# CONFIG_LAPB is not set +# CONFIG_PHONET is not set +# CONFIG_6LOWPAN is not set +# CONFIG_IEEE802154 is not set +CONFIG_NET_SCHED=y + +# +# Queueing/Scheduling +# +CONFIG_NET_SCH_CBQ=m +CONFIG_NET_SCH_HTB=m +CONFIG_NET_SCH_HFSC=m +CONFIG_NET_SCH_PRIO=m +CONFIG_NET_SCH_MULTIQ=m +CONFIG_NET_SCH_RED=m +CONFIG_NET_SCH_SFB=m +CONFIG_NET_SCH_SFQ=m +CONFIG_NET_SCH_TEQL=m +CONFIG_NET_SCH_TBF=m +CONFIG_NET_SCH_CBS=m +CONFIG_NET_SCH_ETF=m +# CONFIG_NET_SCH_TAPRIO is not set +CONFIG_NET_SCH_GRED=m +CONFIG_NET_SCH_DSMARK=m +CONFIG_NET_SCH_NETEM=m +CONFIG_NET_SCH_DRR=m +CONFIG_NET_SCH_MQPRIO=m +CONFIG_NET_SCH_SKBPRIO=m +CONFIG_NET_SCH_CHOKE=m +CONFIG_NET_SCH_QFQ=m +CONFIG_NET_SCH_CODEL=m +CONFIG_NET_SCH_FQ_CODEL=m +CONFIG_NET_SCH_CAKE=m +CONFIG_NET_SCH_FQ=m +CONFIG_NET_SCH_HHF=m +CONFIG_NET_SCH_PIE=m +# CONFIG_NET_SCH_FQ_PIE is not set +CONFIG_NET_SCH_INGRESS=m +CONFIG_NET_SCH_PLUG=m +# CONFIG_NET_SCH_ETS is not set +CONFIG_NET_SCH_DEFAULT=y +# CONFIG_DEFAULT_FQ is not set +# CONFIG_DEFAULT_CODEL is not set +# CONFIG_DEFAULT_FQ_CODEL is not set +# CONFIG_DEFAULT_SFQ is not set +CONFIG_DEFAULT_PFIFO_FAST=y +CONFIG_DEFAULT_NET_SCH="pfifo_fast" + +# +# Classification +# +CONFIG_NET_CLS=y +CONFIG_NET_CLS_BASIC=m +CONFIG_NET_CLS_TCINDEX=m +CONFIG_NET_CLS_ROUTE4=m +CONFIG_NET_CLS_FW=m +CONFIG_NET_CLS_U32=m +CONFIG_CLS_U32_PERF=y +CONFIG_CLS_U32_MARK=y +CONFIG_NET_CLS_RSVP=m +CONFIG_NET_CLS_RSVP6=m +CONFIG_NET_CLS_FLOW=m +CONFIG_NET_CLS_CGROUP=m +CONFIG_NET_CLS_BPF=m +CONFIG_NET_CLS_FLOWER=m +CONFIG_NET_CLS_MATCHALL=m +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_STACK=32 +CONFIG_NET_EMATCH_CMP=m +CONFIG_NET_EMATCH_NBYTE=m +CONFIG_NET_EMATCH_U32=m +CONFIG_NET_EMATCH_META=m +CONFIG_NET_EMATCH_TEXT=m +# CONFIG_NET_EMATCH_IPSET is not set +# CONFIG_NET_EMATCH_IPT is not set +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=m +CONFIG_NET_ACT_GACT=m +CONFIG_GACT_PROB=y +CONFIG_NET_ACT_MIRRED=m +CONFIG_NET_ACT_SAMPLE=m +# CONFIG_NET_ACT_IPT is not set +CONFIG_NET_ACT_NAT=m +CONFIG_NET_ACT_PEDIT=m +CONFIG_NET_ACT_SIMP=m +CONFIG_NET_ACT_SKBEDIT=m +CONFIG_NET_ACT_CSUM=m +# CONFIG_NET_ACT_MPLS is not set +CONFIG_NET_ACT_VLAN=m +CONFIG_NET_ACT_BPF=m +# CONFIG_NET_ACT_CONNMARK is not set +# CONFIG_NET_ACT_CTINFO is not set +CONFIG_NET_ACT_SKBMOD=m +CONFIG_NET_ACT_IFE=m +CONFIG_NET_ACT_TUNNEL_KEY=m +# CONFIG_NET_ACT_CT is not set +# CONFIG_NET_ACT_GATE is not set +CONFIG_NET_IFE_SKBMARK=m +CONFIG_NET_IFE_SKBPRIO=m +CONFIG_NET_IFE_SKBTCINDEX=m +# CONFIG_NET_TC_SKB_EXT is not set +CONFIG_NET_SCH_FIFO=y +# CONFIG_DCB is not set +# CONFIG_DNS_RESOLVER is not set +# CONFIG_BATMAN_ADV is not set +CONFIG_OPENVSWITCH=m +CONFIG_VSOCKETS=m +CONFIG_VSOCKETS_DIAG=m +CONFIG_VSOCKETS_LOOPBACK=m +# CONFIG_VIRTIO_VSOCKETS is not set +CONFIG_VIRTIO_VSOCKETS_COMMON=m +CONFIG_NETLINK_DIAG=m +CONFIG_MPLS=y +CONFIG_NET_MPLS_GSO=m +# CONFIG_MPLS_ROUTING is not set +CONFIG_NET_NSH=m +# CONFIG_HSR is not set +# CONFIG_NET_SWITCHDEV is not set +# CONFIG_NET_L3_MASTER_DEV is not set +# CONFIG_QRTR is not set +# CONFIG_NET_NCSI is not set +CONFIG_RPS=y +CONFIG_RFS_ACCEL=y +CONFIG_XPS=y +CONFIG_CGROUP_NET_PRIO=y +CONFIG_CGROUP_NET_CLASSID=y +CONFIG_NET_RX_BUSY_POLL=y +CONFIG_BQL=y +CONFIG_BPF_JIT=y +CONFIG_NET_FLOW_LIMIT=y + +# +# Network testing +# +# CONFIG_NET_PKTGEN is not set +# end of Network testing +# end of Networking options + +# CONFIG_HAMRADIO is not set +# CONFIG_CAN is not set +# CONFIG_BT is not set +# CONFIG_AF_RXRPC is not set +# CONFIG_AF_KCM is not set +CONFIG_STREAM_PARSER=y +CONFIG_FIB_RULES=y +# CONFIG_WIRELESS is not set +# CONFIG_WIMAX is not set +# CONFIG_RFKILL is not set +# CONFIG_NET_9P is not set +# CONFIG_CAIF is not set +# CONFIG_CEPH_LIB is not set +# CONFIG_NFC is not set +CONFIG_PSAMPLE=m +CONFIG_NET_IFE=m +CONFIG_LWTUNNEL=y +CONFIG_LWTUNNEL_BPF=y +CONFIG_DST_CACHE=y +CONFIG_GRO_CELLS=y +CONFIG_SOCK_VALIDATE_XMIT=y +CONFIG_NET_SOCK_MSG=y +CONFIG_NET_DEVLINK=y +CONFIG_PAGE_POOL=y +CONFIG_FAILOVER=y +CONFIG_ETHTOOL_NETLINK=y +CONFIG_HAVE_EBPF_JIT=y + +# +# Device Drivers +# + +# +# PCI controller drivers +# + +# +# DesignWare PCI Core Support +# +# end of DesignWare PCI Core Support + +# +# Mobiveil PCIe Core Support +# +# end of Mobiveil PCIe Core Support + +# +# Cadence PCIe controllers support +# +# end of Cadence PCIe controllers support +# end of PCI controller drivers + +# +# PCI Endpoint +# +# end of PCI Endpoint + +# +# PCI switch controller drivers +# +# end of PCI switch controller drivers + +# CONFIG_RAPIDIO is not set + +# +# Generic Driver Options +# +CONFIG_UEVENT_HELPER=y +CONFIG_UEVENT_HELPER_PATH="" +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +# CONFIG_STANDALONE is not set +# CONFIG_PREVENT_FIRMWARE_BUILD is not set + +# +# Firmware loader +# +CONFIG_FW_LOADER=y +CONFIG_EXTRA_FIRMWARE="" +# CONFIG_FW_LOADER_USER_HELPER is not set +# CONFIG_FW_LOADER_COMPRESS is not set +# end of Firmware loader + +CONFIG_ALLOW_DEV_COREDUMP=y +# CONFIG_DEBUG_DRIVER is not set +# CONFIG_DEBUG_DEVRES is not set +# CONFIG_DEBUG_TEST_DRIVER_REMOVE is not set +# CONFIG_TEST_ASYNC_DRIVER_PROBE is not set +CONFIG_DMA_SHARED_BUFFER=y +# CONFIG_DMA_FENCE_TRACE is not set +# end of Generic Driver Options + +# +# Bus devices +# +# CONFIG_MOXTET is not set +# CONFIG_MHI_BUS is not set +# end of Bus devices + +CONFIG_CONNECTOR=m +# CONFIG_GNSS is not set +CONFIG_MTD=y +# CONFIG_MTD_TESTS is not set + +# +# Partition parsers +# +# CONFIG_MTD_AR7_PARTS is not set +CONFIG_MTD_CMDLINE_PARTS=y +CONFIG_MTD_OF_PARTS=y +# CONFIG_MTD_REDBOOT_PARTS is not set +# end of Partition parsers + +# +# User Modules And Translation Layers +# +CONFIG_MTD_BLKDEVS=y +CONFIG_MTD_BLOCK=y +# CONFIG_FTL is not set +# CONFIG_NFTL is not set +# CONFIG_INFTL is not set +# CONFIG_RFD_FTL is not set +# CONFIG_SSFDC is not set +# CONFIG_SM_FTL is not set +# CONFIG_MTD_OOPS is not set +# CONFIG_MTD_SWAP is not set +# CONFIG_MTD_PARTITIONED_MASTER is not set + +# +# RAM/ROM/Flash chip drivers +# +CONFIG_MTD_CFI=y +CONFIG_MTD_JEDECPROBE=y +CONFIG_MTD_GEN_PROBE=y +CONFIG_MTD_CFI_ADV_OPTIONS=y +CONFIG_MTD_CFI_NOSWAP=y +# CONFIG_MTD_CFI_BE_BYTE_SWAP is not set +# CONFIG_MTD_CFI_LE_BYTE_SWAP is not set +# CONFIG_MTD_CFI_GEOMETRY is not set +CONFIG_MTD_MAP_BANK_WIDTH_1=y +CONFIG_MTD_MAP_BANK_WIDTH_2=y +CONFIG_MTD_MAP_BANK_WIDTH_4=y +CONFIG_MTD_CFI_I1=y +CONFIG_MTD_CFI_I2=y +# CONFIG_MTD_OTP is not set +CONFIG_MTD_CFI_INTELEXT=y +CONFIG_MTD_CFI_AMDSTD=y +CONFIG_MTD_CFI_STAA=y +CONFIG_MTD_CFI_UTIL=y +CONFIG_MTD_RAM=y +CONFIG_MTD_ROM=y +CONFIG_MTD_ABSENT=y +# end of RAM/ROM/Flash chip drivers + +# +# Mapping drivers for chip access +# +CONFIG_MTD_COMPLEX_MAPPINGS=y +CONFIG_MTD_PHYSMAP=y +# CONFIG_MTD_PHYSMAP_COMPAT is not set +CONFIG_MTD_PHYSMAP_OF=y +# CONFIG_MTD_PCI is not set +# CONFIG_MTD_INTEL_VR_NOR is not set +CONFIG_MTD_PLATRAM=y +# end of Mapping drivers for chip access + +# +# Self-contained MTD device drivers +# +# CONFIG_MTD_PMC551 is not set +# CONFIG_MTD_DATAFLASH is not set +# CONFIG_MTD_MCHP23K256 is not set +# CONFIG_MTD_SST25L is not set +# CONFIG_MTD_SLRAM is not set +# CONFIG_MTD_PHRAM is not set +# CONFIG_MTD_MTDRAM is not set +# CONFIG_MTD_BLOCK2MTD is not set + +# +# Disk-On-Chip Device Drivers +# +# CONFIG_MTD_DOCG3 is not set +# end of Self-contained MTD device drivers + +# +# NAND +# +# CONFIG_MTD_ONENAND is not set +# CONFIG_MTD_RAW_NAND is not set +# CONFIG_MTD_SPI_NAND is not set + +# +# ECC engine support +# +# end of ECC engine support +# end of NAND + +# +# LPDDR & LPDDR2 PCM memory drivers +# +# CONFIG_MTD_LPDDR is not set +# end of LPDDR & LPDDR2 PCM memory drivers + +CONFIG_MTD_SPI_NOR=y +CONFIG_MTD_SPI_NOR_USE_4K_SECTORS=y +# CONFIG_MTD_UBI is not set +# CONFIG_MTD_HYPERBUS is not set +CONFIG_DTC=y +CONFIG_OF=y +# CONFIG_OF_UNITTEST is not set +CONFIG_OF_FLATTREE=y +CONFIG_OF_EARLY_FLATTREE=y +CONFIG_OF_KOBJ=y +CONFIG_OF_ADDRESS=y +CONFIG_OF_IRQ=y +CONFIG_OF_NET=y +CONFIG_OF_RESERVED_MEM=y +# CONFIG_OF_OVERLAY is not set +# CONFIG_PARPORT is not set +CONFIG_PNP=y +CONFIG_PNP_DEBUG_MESSAGES=y + +# +# Protocols +# +# CONFIG_ISAPNP is not set +CONFIG_PNPACPI=y +CONFIG_BLK_DEV=y +# CONFIG_BLK_DEV_NULL_BLK is not set +CONFIG_CDROM=y +# CONFIG_BLK_DEV_PCIESSD_MTIP32XX is not set +# CONFIG_BLK_DEV_UMEM is not set +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_LOOP_MIN_COUNT=8 +# CONFIG_BLK_DEV_CRYPTOLOOP is not set +# CONFIG_BLK_DEV_DRBD is not set +CONFIG_BLK_DEV_NBD=m +# CONFIG_BLK_DEV_SKD is not set +# CONFIG_BLK_DEV_SX8 is not set +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_COUNT=16 +CONFIG_BLK_DEV_RAM_SIZE=5000000 +# CONFIG_CDROM_PKTCDVD is not set +# CONFIG_ATA_OVER_ETH is not set +CONFIG_VIRTIO_BLK=y +# CONFIG_BLK_DEV_RBD is not set +# CONFIG_BLK_DEV_RSXX is not set + +# +# NVME Support +# +CONFIG_NVME_CORE=y +CONFIG_BLK_DEV_NVME=y +CONFIG_NVME_MULTIPATH=y +# CONFIG_NVME_HWMON is not set +CONFIG_NVME_FABRICS=y +CONFIG_NVME_RDMA=m +CONFIG_NVME_FC=y +# CONFIG_NVME_TCP is not set +CONFIG_NVME_TARGET=y +# CONFIG_NVME_TARGET_PASSTHRU is not set +CONFIG_NVME_TARGET_LOOP=y +CONFIG_NVME_TARGET_RDMA=m +CONFIG_NVME_TARGET_FC=y +CONFIG_NVME_TARGET_FCLOOP=y +# CONFIG_NVME_TARGET_TCP is not set +# end of NVME Support + +# +# Misc devices +# +# CONFIG_AD525X_DPOT is not set +# CONFIG_DUMMY_IRQ is not set +# CONFIG_PHANTOM is not set +# CONFIG_TIFM_CORE is not set +# CONFIG_ICS932S401 is not set +# CONFIG_ENCLOSURE_SERVICES is not set +# CONFIG_HP_ILO is not set +# CONFIG_APDS9802ALS is not set +# CONFIG_ISL29003 is not set +# CONFIG_ISL29020 is not set +# CONFIG_SENSORS_TSL2550 is not set +# CONFIG_SENSORS_BH1770 is not set +# CONFIG_SENSORS_APDS990X is not set +# CONFIG_HMC6352 is not set +# CONFIG_DS1682 is not set +# CONFIG_LATTICE_ECP3_CONFIG is not set +# CONFIG_SRAM is not set +# CONFIG_PCI_ENDPOINT_TEST is not set +# CONFIG_XILINX_SDFEC is not set +# CONFIG_PVPANIC is not set +# CONFIG_C2PORT is not set + +# +# EEPROM support +# +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_AT25 is not set +# CONFIG_EEPROM_LEGACY is not set +# CONFIG_EEPROM_MAX6875 is not set +# CONFIG_EEPROM_93CX6 is not set +# CONFIG_EEPROM_93XX46 is not set +# CONFIG_EEPROM_IDT_89HPESX is not set +# CONFIG_EEPROM_EE1004 is not set +# end of EEPROM support + +# CONFIG_CB710_CORE is not set + +# +# Texas Instruments shared transport line discipline +# +# end of Texas Instruments shared transport line discipline + +# CONFIG_SENSORS_LIS3_I2C is not set +# CONFIG_ALTERA_STAPL is not set +# CONFIG_GENWQE is not set +# CONFIG_ECHO is not set +# CONFIG_MISC_ALCOR_PCI is not set +# CONFIG_MISC_RTSX_PCI is not set +# CONFIG_MISC_RTSX_USB is not set +# CONFIG_HABANA_AI is not set +# CONFIG_UACCE is not set +# end of Misc devices + +CONFIG_HAVE_IDE=y +# CONFIG_IDE is not set + +# +# SCSI device support +# +CONFIG_SCSI_MOD=y +CONFIG_RAID_ATTRS=y +CONFIG_SCSI=y +CONFIG_SCSI_DMA=y +CONFIG_SCSI_NETLINK=y +CONFIG_SCSI_PROC_FS=y + +# +# SCSI support type (disk, tape, CD-ROM) +# +CONFIG_BLK_DEV_SD=y +CONFIG_CHR_DEV_ST=y +CONFIG_BLK_DEV_SR=y +CONFIG_CHR_DEV_SG=y +CONFIG_CHR_DEV_SCH=y +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y +CONFIG_SCSI_SCAN_ASYNC=y + +# +# SCSI Transports +# +CONFIG_SCSI_SPI_ATTRS=y +CONFIG_SCSI_FC_ATTRS=m +CONFIG_SCSI_ISCSI_ATTRS=m +CONFIG_SCSI_SAS_ATTRS=y +CONFIG_SCSI_SAS_LIBSAS=y +CONFIG_SCSI_SAS_ATA=y +CONFIG_SCSI_SAS_HOST_SMP=y +CONFIG_SCSI_SRP_ATTRS=y +# end of SCSI Transports + +CONFIG_SCSI_LOWLEVEL=y +CONFIG_ISCSI_TCP=m +CONFIG_ISCSI_BOOT_SYSFS=y +CONFIG_SCSI_CXGB3_ISCSI=m +CONFIG_SCSI_CXGB4_ISCSI=m +CONFIG_SCSI_BNX2_ISCSI=m +# CONFIG_BE2ISCSI is not set +# CONFIG_BLK_DEV_3W_XXXX_RAID is not set +# CONFIG_SCSI_HPSA is not set +# CONFIG_SCSI_3W_9XXX is not set +# CONFIG_SCSI_3W_SAS is not set +# CONFIG_SCSI_ACARD is not set +# CONFIG_SCSI_AHA152X is not set +# CONFIG_SCSI_AHA1542 is not set +# CONFIG_SCSI_AACRAID is not set +# CONFIG_SCSI_AIC7XXX is not set +# CONFIG_SCSI_AIC79XX is not set +# CONFIG_SCSI_AIC94XX is not set +# CONFIG_SCSI_MVSAS is not set +# CONFIG_SCSI_MVUMI is not set +# CONFIG_SCSI_ADVANSYS is not set +# CONFIG_SCSI_ARCMSR is not set +# CONFIG_SCSI_ESAS2R is not set +CONFIG_MEGARAID_NEWGEN=y +CONFIG_MEGARAID_MM=m +CONFIG_MEGARAID_MAILBOX=m +CONFIG_MEGARAID_LEGACY=m +CONFIG_MEGARAID_SAS=m +CONFIG_SCSI_MPT3SAS=m +CONFIG_SCSI_MPT2SAS_MAX_SGE=128 +CONFIG_SCSI_MPT3SAS_MAX_SGE=128 +# CONFIG_SCSI_MPT2SAS is not set +# CONFIG_SCSI_SMARTPQI is not set +# CONFIG_SCSI_UFSHCD is not set +# CONFIG_SCSI_HPTIOP is not set +# CONFIG_SCSI_MYRB is not set +# CONFIG_SCSI_MYRS is not set +# CONFIG_LIBFC is not set +# CONFIG_SCSI_SNIC is not set +# CONFIG_SCSI_DMX3191D is not set +# CONFIG_SCSI_FDOMAIN_PCI is not set +# CONFIG_SCSI_FDOMAIN_ISA is not set +# CONFIG_SCSI_GDTH is not set +# CONFIG_SCSI_GENERIC_NCR5380 is not set +# CONFIG_SCSI_IPS is not set +# CONFIG_SCSI_INITIO is not set +# CONFIG_SCSI_INIA100 is not set +# CONFIG_SCSI_STEX is not set +# CONFIG_SCSI_SYM53C8XX_2 is not set +# CONFIG_SCSI_IPR is not set +# CONFIG_SCSI_QLOGIC_FAS is not set +# CONFIG_SCSI_QLOGIC_1280 is not set +# CONFIG_SCSI_QLA_FC is not set +# CONFIG_SCSI_QLA_ISCSI is not set +# CONFIG_SCSI_DC395x is not set +# CONFIG_SCSI_AM53C974 is not set +# CONFIG_SCSI_WD719X is not set +# CONFIG_SCSI_DEBUG is not set +# CONFIG_SCSI_PMCRAID is not set +# CONFIG_SCSI_PM8001 is not set +# CONFIG_SCSI_BFA_FC is not set +# CONFIG_SCSI_VIRTIO is not set +# CONFIG_SCSI_CHELSIO_FCOE is not set +CONFIG_SCSI_DH=y +CONFIG_SCSI_DH_RDAC=y +CONFIG_SCSI_DH_HP_SW=y +CONFIG_SCSI_DH_EMC=y +CONFIG_SCSI_DH_ALUA=y +# end of SCSI device support + +CONFIG_ATA=y +CONFIG_SATA_HOST=y +CONFIG_PATA_TIMINGS=y +CONFIG_ATA_VERBOSE_ERROR=y +CONFIG_ATA_FORCE=y +CONFIG_ATA_ACPI=y +CONFIG_SATA_PMP=y + +# +# Controllers with non-SFF native interface +# +CONFIG_SATA_AHCI=y +CONFIG_SATA_MOBILE_LPM_POLICY=0 +# CONFIG_SATA_AHCI_PLATFORM is not set +# CONFIG_AHCI_CEVA is not set +# CONFIG_AHCI_QORIQ is not set +# CONFIG_SATA_INIC162X is not set +# CONFIG_SATA_ACARD_AHCI is not set +# CONFIG_SATA_SIL24 is not set +# CONFIG_ATA_SFF is not set +CONFIG_MD=y +CONFIG_BLK_DEV_MD=m +CONFIG_MD_LINEAR=m +CONFIG_MD_RAID0=m +CONFIG_MD_RAID1=m +CONFIG_MD_RAID10=m +CONFIG_MD_RAID456=m +CONFIG_MD_MULTIPATH=m +CONFIG_MD_FAULTY=m +CONFIG_BCACHE=m +CONFIG_BCACHE_DEBUG=y +CONFIG_BCACHE_CLOSURES_DEBUG=y +# CONFIG_BCACHE_ASYNC_REGISTRATION is not set +CONFIG_BLK_DEV_DM_BUILTIN=y +CONFIG_BLK_DEV_DM=m +CONFIG_DM_DEBUG=y +CONFIG_DM_BUFIO=m +CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING=y +CONFIG_DM_DEBUG_BLOCK_STACK_TRACING=y +CONFIG_DM_BIO_PRISON=m +CONFIG_DM_PERSISTENT_DATA=m +CONFIG_DM_UNSTRIPED=m +CONFIG_DM_CRYPT=m +CONFIG_DM_SNAPSHOT=m +CONFIG_DM_THIN_PROVISIONING=m +CONFIG_DM_CACHE=m +CONFIG_DM_CACHE_SMQ=m +CONFIG_DM_WRITECACHE=m +# CONFIG_DM_EBS is not set +CONFIG_DM_ERA=m +# CONFIG_DM_CLONE is not set +CONFIG_DM_MIRROR=m +CONFIG_DM_LOG_USERSPACE=m +CONFIG_DM_RAID=m +CONFIG_DM_ZERO=m +CONFIG_DM_MULTIPATH=m +CONFIG_DM_MULTIPATH_QL=m +CONFIG_DM_MULTIPATH_ST=m +# CONFIG_DM_MULTIPATH_HST is not set +CONFIG_DM_DELAY=m +# CONFIG_DM_DUST is not set +CONFIG_DM_UEVENT=y +CONFIG_DM_FLAKEY=m +CONFIG_DM_VERITY=m +# CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG is not set +CONFIG_DM_VERITY_FEC=y +CONFIG_DM_SWITCH=m +CONFIG_DM_LOG_WRITES=m +CONFIG_DM_INTEGRITY=m +CONFIG_TARGET_CORE=m +CONFIG_TCM_IBLOCK=m +CONFIG_TCM_FILEIO=m +CONFIG_TCM_PSCSI=m +CONFIG_TCM_USER2=m +CONFIG_LOOPBACK_TARGET=m +CONFIG_ISCSI_TARGET=m +# CONFIG_ISCSI_TARGET_CXGB4 is not set +# CONFIG_FUSION is not set + +# +# IEEE 1394 (FireWire) support +# +# CONFIG_FIREWIRE is not set +# CONFIG_FIREWIRE_NOSY is not set +# end of IEEE 1394 (FireWire) support + +CONFIG_NETDEVICES=y +CONFIG_MII=y +CONFIG_NET_CORE=y +# CONFIG_BONDING is not set +# CONFIG_DUMMY is not set +# CONFIG_WIREGUARD is not set +# CONFIG_EQUALIZER is not set +CONFIG_NET_FC=y +# CONFIG_IFB is not set +# CONFIG_NET_TEAM is not set +CONFIG_MACVLAN=m +CONFIG_MACVTAP=m +# CONFIG_IPVLAN is not set +# CONFIG_VXLAN is not set +# CONFIG_GENEVE is not set +# CONFIG_BAREUDP is not set +# CONFIG_GTP is not set +# CONFIG_MACSEC is not set +# CONFIG_NETCONSOLE is not set +CONFIG_TUN=y +CONFIG_TAP=m +# CONFIG_TUN_VNET_CROSS_LE is not set +# CONFIG_VETH is not set +CONFIG_VIRTIO_NET=y +# CONFIG_NLMON is not set +# CONFIG_VSOCKMON is not set +# CONFIG_ARCNET is not set + +# +# Distributed Switch Architecture drivers +# +# end of Distributed Switch Architecture drivers + +CONFIG_ETHERNET=y +CONFIG_MDIO=m +# CONFIG_NET_VENDOR_3COM is not set +CONFIG_NET_VENDOR_ADAPTEC=y +# CONFIG_ADAPTEC_STARFIRE is not set +CONFIG_NET_VENDOR_AGERE=y +# CONFIG_ET131X is not set +CONFIG_NET_VENDOR_ALACRITECH=y +# CONFIG_SLICOSS is not set +CONFIG_NET_VENDOR_ALTEON=y +# CONFIG_ACENIC is not set +# CONFIG_ALTERA_TSE is not set +CONFIG_NET_VENDOR_AMAZON=y +# CONFIG_ENA_ETHERNET is not set +# CONFIG_NET_VENDOR_AMD is not set +CONFIG_NET_VENDOR_AQUANTIA=y +# CONFIG_NET_VENDOR_ARC is not set +CONFIG_NET_VENDOR_ATHEROS=y +# CONFIG_ATL2 is not set +# CONFIG_ATL1 is not set +# CONFIG_ATL1E is not set +# CONFIG_ATL1C is not set +# CONFIG_ALX is not set +# CONFIG_NET_VENDOR_AURORA is not set +CONFIG_NET_VENDOR_BROADCOM=y +# CONFIG_B44 is not set +# CONFIG_BCMGENET is not set +CONFIG_BNX2=m +CONFIG_CNIC=m +# CONFIG_TIGON3 is not set +# CONFIG_BNX2X is not set +# CONFIG_SYSTEMPORT is not set +# CONFIG_BNXT is not set +CONFIG_NET_VENDOR_BROCADE=y +# CONFIG_BNA is not set +CONFIG_NET_VENDOR_CADENCE=y +# CONFIG_MACB is not set +CONFIG_NET_VENDOR_CAVIUM=y +# CONFIG_THUNDER_NIC_PF is not set +# CONFIG_THUNDER_NIC_VF is not set +# CONFIG_THUNDER_NIC_BGX is not set +# CONFIG_THUNDER_NIC_RGX is not set +CONFIG_CAVIUM_PTP=y +# CONFIG_LIQUIDIO is not set +# CONFIG_LIQUIDIO_VF is not set +CONFIG_NET_VENDOR_CHELSIO=y +# CONFIG_CHELSIO_T1 is not set +CONFIG_CHELSIO_T3=m +CONFIG_CHELSIO_T4=m +# CONFIG_CHELSIO_T4VF is not set +CONFIG_CHELSIO_LIB=m +CONFIG_CHELSIO_INLINE_CRYPTO=y +# CONFIG_CHELSIO_IPSEC_INLINE is not set +# CONFIG_CHELSIO_TLS_DEVICE is not set +# CONFIG_NET_VENDOR_CIRRUS is not set +CONFIG_NET_VENDOR_CISCO=y +# CONFIG_ENIC is not set +CONFIG_NET_VENDOR_CORTINA=y +# CONFIG_GEMINI_ETHERNET is not set +# CONFIG_DNET is not set +CONFIG_NET_VENDOR_DEC=y +# CONFIG_NET_TULIP is not set +CONFIG_NET_VENDOR_DLINK=y +# CONFIG_DL2K is not set +# CONFIG_SUNDANCE is not set +CONFIG_NET_VENDOR_EMULEX=y +# CONFIG_BE2NET is not set +# CONFIG_NET_VENDOR_EZCHIP is not set +CONFIG_NET_VENDOR_GOOGLE=y +# CONFIG_GVE is not set +CONFIG_NET_VENDOR_HUAWEI=y +# CONFIG_BMA is not set +CONFIG_NET_VENDOR_I825XX=y +CONFIG_NET_VENDOR_INTEL=y +CONFIG_E100=y +CONFIG_E1000=y +CONFIG_E1000E=y +CONFIG_IGB=y +CONFIG_IGB_HWMON=y +CONFIG_IGBVF=m +CONFIG_IXGB=m +CONFIG_IXGBE=m +CONFIG_IXGBE_HWMON=y +CONFIG_IXGBE_IPSEC=y +CONFIG_IXGBEVF=m +CONFIG_IXGBEVF_IPSEC=y +CONFIG_I40E=y +CONFIG_IAVF=y +CONFIG_I40EVF=y +# CONFIG_ICE is not set +# CONFIG_FM10K is not set +# CONFIG_IGC is not set +CONFIG_NET_VENDOR_NETSWIFT=y +# CONFIG_JME is not set +# CONFIG_NET_VENDOR_MARVELL is not set +CONFIG_NET_VENDOR_MELLANOX=y +CONFIG_MLX4_EN=y +CONFIG_MLX4_CORE=y +CONFIG_MLX4_DEBUG=y +CONFIG_MLX4_CORE_GEN2=y +CONFIG_MLX5_CORE=m +CONFIG_MLX5_ACCEL=y +CONFIG_MLX5_FPGA=y +CONFIG_MLX5_CORE_EN=y +CONFIG_MLX5_EN_ARFS=y +CONFIG_MLX5_EN_RXNFC=y +CONFIG_MLX5_MPFS=y +# CONFIG_MLX5_CORE_IPOIB is not set +# CONFIG_MLX5_FPGA_IPSEC is not set +# CONFIG_MLX5_IPSEC is not set +# CONFIG_MLX5_FPGA_TLS is not set +# CONFIG_MLX5_TLS is not set +CONFIG_MLXSW_CORE=y +CONFIG_MLXSW_CORE_HWMON=y +CONFIG_MLXSW_PCI=y +CONFIG_MLXSW_I2C=y +CONFIG_MLXSW_MINIMAL=y +CONFIG_MLXFW=y +# CONFIG_NET_VENDOR_MICREL is not set +CONFIG_NET_VENDOR_MICROCHIP=y +# CONFIG_ENC28J60 is not set +# CONFIG_ENCX24J600 is not set +# CONFIG_LAN743X is not set +CONFIG_NET_VENDOR_MICROSEMI=y +CONFIG_NET_VENDOR_MYRI=y +# CONFIG_MYRI10GE is not set +# CONFIG_FEALNX is not set +# CONFIG_NET_VENDOR_NATSEMI is not set +CONFIG_NET_VENDOR_NETERION=y +# CONFIG_S2IO is not set +# CONFIG_VXGE is not set +CONFIG_NET_VENDOR_NETRONOME=y +# CONFIG_NFP is not set +CONFIG_NET_VENDOR_NI=y +# CONFIG_NI_XGE_MANAGEMENT_ENET is not set +CONFIG_NET_VENDOR_NVIDIA=y +# CONFIG_FORCEDETH is not set +CONFIG_NET_VENDOR_OKI=y +# CONFIG_ETHOC is not set +CONFIG_NET_VENDOR_PACKET_ENGINES=y +# CONFIG_HAMACHI is not set +# CONFIG_YELLOWFIN is not set +CONFIG_NET_VENDOR_PENSANDO=y +# CONFIG_IONIC is not set +CONFIG_NET_VENDOR_QLOGIC=y +# CONFIG_QLA3XXX is not set +# CONFIG_QLCNIC is not set +# CONFIG_NETXEN_NIC is not set +# CONFIG_QED is not set +# CONFIG_NET_VENDOR_QUALCOMM is not set +CONFIG_NET_VENDOR_RAMAXEL=y +CONFIG_NET_VENDOR_RDC=y +# CONFIG_R6040 is not set +CONFIG_NET_VENDOR_REALTEK=y +# CONFIG_8139CP is not set +# CONFIG_8139TOO is not set +# CONFIG_R8169 is not set +# CONFIG_NET_VENDOR_RENESAS is not set +# CONFIG_NET_VENDOR_ROCKER is not set +# CONFIG_NET_VENDOR_SAMSUNG is not set +# CONFIG_NET_VENDOR_SEEQ is not set +CONFIG_NET_VENDOR_SOLARFLARE=y +# CONFIG_SFC is not set +# CONFIG_SFC_FALCON is not set +CONFIG_NET_VENDOR_SILAN=y +# CONFIG_SC92031 is not set +CONFIG_NET_VENDOR_SIS=y +# CONFIG_SIS900 is not set +# CONFIG_SIS190 is not set +# CONFIG_NET_VENDOR_SMSC is not set +CONFIG_NET_VENDOR_SOCIONEXT=y +# CONFIG_NET_VENDOR_STMICRO is not set +CONFIG_NET_VENDOR_SUN=y +# CONFIG_HAPPYMEAL is not set +# CONFIG_SUNGEM is not set +# CONFIG_CASSINI is not set +# CONFIG_NIU is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set +CONFIG_NET_VENDOR_TEHUTI=y +# CONFIG_TEHUTI is not set +CONFIG_NET_VENDOR_TI=y +# CONFIG_TI_CPSW_PHY_SEL is not set +# CONFIG_TLAN is not set +# CONFIG_NET_VENDOR_VIA is not set +# CONFIG_NET_VENDOR_WIZNET is not set +CONFIG_NET_VENDOR_XILINX=y +# CONFIG_XILINX_AXI_EMAC is not set +# CONFIG_XILINX_LL_TEMAC is not set +# CONFIG_FDDI is not set +# CONFIG_HIPPI is not set +# CONFIG_NET_SB1000 is not set +CONFIG_PHYLIB=m +CONFIG_SWPHY=y +CONFIG_FIXED_PHY=m + +# +# MII PHY device drivers +# +# CONFIG_AMD_PHY is not set +# CONFIG_ADIN_PHY is not set +# CONFIG_AQUANTIA_PHY is not set +# CONFIG_AX88796B_PHY is not set +# CONFIG_BROADCOM_PHY is not set +# CONFIG_BCM54140_PHY is not set +# CONFIG_BCM7XXX_PHY is not set +# CONFIG_BCM84881_PHY is not set +# CONFIG_BCM87XX_PHY is not set +# CONFIG_CICADA_PHY is not set +# CONFIG_CORTINA_PHY is not set +# CONFIG_DAVICOM_PHY is not set +# CONFIG_ICPLUS_PHY is not set +# CONFIG_LXT_PHY is not set +# CONFIG_INTEL_XWAY_PHY is not set +# CONFIG_LSI_ET1011C_PHY is not set +# CONFIG_MARVELL_PHY is not set +# CONFIG_MARVELL_10G_PHY is not set +# CONFIG_MICREL_PHY is not set +# CONFIG_MICROCHIP_PHY is not set +# CONFIG_MICROCHIP_T1_PHY is not set +# CONFIG_MICROSEMI_PHY is not set +# CONFIG_NATIONAL_PHY is not set +# CONFIG_NXP_TJA11XX_PHY is not set +# CONFIG_QSEMI_PHY is not set +# CONFIG_REALTEK_PHY is not set +# CONFIG_RENESAS_PHY is not set +# CONFIG_ROCKCHIP_PHY is not set +# CONFIG_SMSC_PHY is not set +# CONFIG_STE10XP is not set +# CONFIG_TERANETICS_PHY is not set +# CONFIG_DP83822_PHY is not set +# CONFIG_DP83TC811_PHY is not set +# CONFIG_DP83848_PHY is not set +# CONFIG_DP83867_PHY is not set +# CONFIG_DP83869_PHY is not set +# CONFIG_VITESSE_PHY is not set +# CONFIG_XILINX_GMII2RGMII is not set +# CONFIG_MICREL_KS8995MA is not set +CONFIG_MDIO_DEVICE=m +CONFIG_MDIO_BUS=m +CONFIG_OF_MDIO=m +CONFIG_MDIO_DEVRES=m +# CONFIG_MDIO_BITBANG is not set +# CONFIG_MDIO_BCM_UNIMAC is not set +# CONFIG_MDIO_HISI_FEMAC is not set +# CONFIG_MDIO_MVUSB is not set +# CONFIG_MDIO_MSCC_MIIM is not set +# CONFIG_MDIO_OCTEON is not set +# CONFIG_MDIO_IPQ4019 is not set +# CONFIG_MDIO_THUNDER is not set + +# +# MDIO Multiplexers +# +# CONFIG_MDIO_BUS_MUX_MULTIPLEXER is not set +# CONFIG_MDIO_BUS_MUX_MMIOREG is not set + +# +# PCS device drivers +# +# CONFIG_PCS_XPCS is not set +# end of PCS device drivers + +# CONFIG_PPP is not set +# CONFIG_SLIP is not set +CONFIG_USB_NET_DRIVERS=y +# CONFIG_USB_CATC is not set +# CONFIG_USB_KAWETH is not set +# CONFIG_USB_PEGASUS is not set +# CONFIG_USB_RTL8150 is not set +# CONFIG_USB_RTL8152 is not set +# CONFIG_USB_LAN78XX is not set +# CONFIG_USB_USBNET is not set +# CONFIG_USB_IPHETH is not set +# CONFIG_WLAN is not set + +# +# Enable WiMAX (Networking options) to see the WiMAX drivers +# +# CONFIG_WAN is not set +# CONFIG_VMXNET3 is not set +# CONFIG_FUJITSU_ES is not set +# CONFIG_NETDEVSIM is not set +CONFIG_NET_FAILOVER=y +# CONFIG_ISDN is not set +# CONFIG_NVM is not set + +# +# Input device support +# +CONFIG_INPUT=y +CONFIG_INPUT_FF_MEMLESS=y +CONFIG_INPUT_POLLDEV=y +# CONFIG_INPUT_SPARSEKMAP is not set +# CONFIG_INPUT_MATRIXKMAP is not set + +# +# Userland interfaces +# +CONFIG_INPUT_MOUSEDEV=y +# CONFIG_INPUT_MOUSEDEV_PSAUX is not set +CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 +CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 +# CONFIG_INPUT_JOYDEV is not set +CONFIG_INPUT_EVDEV=y +# CONFIG_INPUT_EVBUG is not set + +# +# Input Device Drivers +# +CONFIG_INPUT_KEYBOARD=y +# CONFIG_KEYBOARD_ADP5588 is not set +# CONFIG_KEYBOARD_ADP5589 is not set +# CONFIG_KEYBOARD_ATKBD is not set +# CONFIG_KEYBOARD_QT1050 is not set +# CONFIG_KEYBOARD_QT1070 is not set +# CONFIG_KEYBOARD_QT2160 is not set +# CONFIG_KEYBOARD_DLINK_DIR685 is not set +# CONFIG_KEYBOARD_LKKBD is not set +# CONFIG_KEYBOARD_TCA6416 is not set +# CONFIG_KEYBOARD_TCA8418 is not set +# CONFIG_KEYBOARD_LM8333 is not set +# CONFIG_KEYBOARD_MAX7359 is not set +# CONFIG_KEYBOARD_MCS is not set +# CONFIG_KEYBOARD_MPR121 is not set +# CONFIG_KEYBOARD_NEWTON is not set +# CONFIG_KEYBOARD_OPENCORES is not set +# CONFIG_KEYBOARD_SAMSUNG is not set +# CONFIG_KEYBOARD_STOWAWAY is not set +# CONFIG_KEYBOARD_SUNKBD is not set +# CONFIG_KEYBOARD_OMAP4 is not set +# CONFIG_KEYBOARD_XTKBD is not set +# CONFIG_KEYBOARD_CAP11XX is not set +# CONFIG_KEYBOARD_BCM is not set +# CONFIG_INPUT_MOUSE is not set +# CONFIG_INPUT_JOYSTICK is not set +# CONFIG_INPUT_TABLET is not set +# CONFIG_INPUT_TOUCHSCREEN is not set +# CONFIG_INPUT_MISC is not set +# CONFIG_RMI4_CORE is not set + +# +# Hardware I/O ports +# +CONFIG_SERIO=y +CONFIG_ARCH_MIGHT_HAVE_PC_SERIO=y +# CONFIG_SERIO_I8042 is not set +# CONFIG_SERIO_SERPORT is not set +# CONFIG_SERIO_PCIPS2 is not set +CONFIG_SERIO_LIBPS2=y +# CONFIG_SERIO_RAW is not set +# CONFIG_SERIO_ALTERA_PS2 is not set +# CONFIG_SERIO_PS2MULT is not set +# CONFIG_SERIO_ARC_PS2 is not set +# CONFIG_SERIO_APBPS2 is not set +# CONFIG_USERIO is not set +# CONFIG_GAMEPORT is not set +# end of Hardware I/O ports +# end of Input device support + +# +# Character devices +# +CONFIG_TTY=y +CONFIG_VT=y +CONFIG_CONSOLE_TRANSLATIONS=y +CONFIG_VT_CONSOLE=y +CONFIG_HW_CONSOLE=y +CONFIG_VT_HW_CONSOLE_BINDING=y +CONFIG_UNIX98_PTYS=y +CONFIG_LEGACY_PTYS=y +CONFIG_LEGACY_PTY_COUNT=256 +CONFIG_LDISC_AUTOLOAD=y + +# +# Serial drivers +# +CONFIG_SERIAL_EARLYCON=y +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_DEPRECATED_OPTIONS=y +CONFIG_SERIAL_8250_PNP=y +CONFIG_SERIAL_8250_16550A_VARIANTS=y +# CONFIG_SERIAL_8250_FINTEK is not set +CONFIG_SERIAL_8250_CONSOLE=y +# CONFIG_SERIAL_8250_PCI is not set +CONFIG_SERIAL_8250_NR_UARTS=4 +CONFIG_SERIAL_8250_RUNTIME_UARTS=4 +# CONFIG_SERIAL_8250_EXTENDED is not set +# CONFIG_SERIAL_8250_DW is not set +CONFIG_SERIAL_8250_SUNWAY=y +# CONFIG_SERIAL_8250_RT288X is not set +CONFIG_SERIAL_OF_PLATFORM=y + +# +# Non-8250 serial port support +# +# CONFIG_SERIAL_MAX3100 is not set +# CONFIG_SERIAL_MAX310X is not set +# CONFIG_SERIAL_UARTLITE is not set +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +# CONFIG_SERIAL_JSM is not set +# CONFIG_SERIAL_SIFIVE is not set +# CONFIG_SERIAL_SCCNXP is not set +# CONFIG_SERIAL_SC16IS7XX is not set +# CONFIG_SERIAL_ALTERA_JTAGUART is not set +# CONFIG_SERIAL_ALTERA_UART is not set +# CONFIG_SERIAL_XILINX_PS_UART is not set +# CONFIG_SERIAL_ARC is not set +# CONFIG_SERIAL_RP2 is not set +# CONFIG_SERIAL_FSL_LPUART is not set +# CONFIG_SERIAL_FSL_LINFLEXUART is not set +# CONFIG_SERIAL_CONEXANT_DIGICOLOR is not set +# CONFIG_SERIAL_SPRD is not set +# end of Serial drivers + +# CONFIG_SERIAL_NONSTANDARD is not set +# CONFIG_N_GSM is not set +# CONFIG_NOZOMI is not set +# CONFIG_NULL_TTY is not set +# CONFIG_TRACE_SINK is not set +CONFIG_HVC_DRIVER=y +# CONFIG_SERIAL_DEV_BUS is not set +# CONFIG_TTY_PRINTK is not set +CONFIG_VIRTIO_CONSOLE=y +# CONFIG_IPMI_HANDLER is not set +# CONFIG_HW_RANDOM is not set +# CONFIG_DTLK is not set +# CONFIG_APPLICOM is not set +CONFIG_DEVMEM=y +# CONFIG_DEVKMEM is not set +# CONFIG_RAW_DRIVER is not set +CONFIG_DEVPORT=y +# CONFIG_TCG_TPM is not set +# CONFIG_XILLYBUS is not set +# end of Character devices + +# CONFIG_RANDOM_TRUST_BOOTLOADER is not set + +# +# I2C support +# +CONFIG_I2C=y +CONFIG_ACPI_I2C_OPREGION=y +CONFIG_I2C_BOARDINFO=y +# CONFIG_I2C_COMPAT is not set +CONFIG_I2C_CHARDEV=y +CONFIG_I2C_MUX=y + +# +# Multiplexer I2C Chip support +# +# CONFIG_I2C_MUX_GPMUX is not set +# CONFIG_I2C_MUX_LTC4306 is not set +# CONFIG_I2C_MUX_PCA9541 is not set +# CONFIG_I2C_MUX_REG is not set +# CONFIG_I2C_MUX_MLXCPLD is not set +# end of Multiplexer I2C Chip support + +CONFIG_I2C_HELPER_AUTO=y +CONFIG_I2C_ALGOBIT=y + +# +# I2C Hardware Bus support +# + +# +# PC SMBus host controller drivers +# +# CONFIG_I2C_ALI1535 is not set +# CONFIG_I2C_ALI1563 is not set +# CONFIG_I2C_ALI15X3 is not set +# CONFIG_I2C_AMD756 is not set +# CONFIG_I2C_AMD8111 is not set +# CONFIG_I2C_AMD_MP2 is not set +# CONFIG_I2C_I801 is not set +# CONFIG_I2C_ISCH is not set +# CONFIG_I2C_PIIX4 is not set +# CONFIG_I2C_NFORCE2 is not set +# CONFIG_I2C_NVIDIA_GPU is not set +# CONFIG_I2C_SIS5595 is not set +# CONFIG_I2C_SIS630 is not set +# CONFIG_I2C_SIS96X is not set +# CONFIG_I2C_VIA is not set +# CONFIG_I2C_VIAPRO is not set + +# +# ACPI drivers +# +# CONFIG_I2C_SCMI is not set + +# +# I2C system bus drivers (mostly embedded / system-on-chip) +# +# CONFIG_I2C_DESIGNWARE_PLATFORM is not set +CONFIG_I2C_SUNWAY_SW6=y +# CONFIG_I2C_DESIGNWARE_PCI is not set +# CONFIG_I2C_EMEV2 is not set +# CONFIG_I2C_OCORES is not set +# CONFIG_I2C_PCA_PLATFORM is not set +# CONFIG_I2C_RK3X is not set +# CONFIG_I2C_SIMTEC is not set +# CONFIG_I2C_XILINX is not set + +# +# External I2C/SMBus adapter drivers +# +# CONFIG_I2C_DIOLAN_U2C is not set +# CONFIG_I2C_ROBOTFUZZ_OSIF is not set +# CONFIG_I2C_TAOS_EVM is not set +# CONFIG_I2C_TINY_USB is not set + +# +# Other I2C/SMBus bus drivers +# +# CONFIG_I2C_PCA_ISA is not set +# end of I2C Hardware Bus support + +# CONFIG_I2C_STUB is not set +# CONFIG_I2C_SLAVE is not set +# CONFIG_I2C_DEBUG_CORE is not set +# CONFIG_I2C_DEBUG_ALGO is not set +# CONFIG_I2C_DEBUG_BUS is not set +# end of I2C support + +# CONFIG_I3C is not set +CONFIG_SPI=y +# CONFIG_SPI_DEBUG is not set +CONFIG_SPI_MASTER=y +CONFIG_SPI_MEM=y + +# +# SPI Master Controller Drivers +# +# CONFIG_SPI_ALTERA is not set +# CONFIG_SPI_AXI_SPI_ENGINE is not set +# CONFIG_SPI_BITBANG is not set +# CONFIG_SPI_CADENCE is not set +# CONFIG_SPI_DESIGNWARE is not set +# CONFIG_SPI_NXP_FLEXSPI is not set +# CONFIG_SPI_FSL_SPI is not set +# CONFIG_SPI_PXA2XX is not set +# CONFIG_SPI_ROCKCHIP is not set +# CONFIG_SPI_SC18IS602 is not set +# CONFIG_SPI_SIFIVE is not set +# CONFIG_SPI_MXIC is not set +# CONFIG_SPI_XCOMM is not set +# CONFIG_SPI_XILINX is not set +# CONFIG_SPI_ZYNQMP_GQSPI is not set +# CONFIG_SPI_AMD is not set +CONFIG_SPI_CHIP3=y + +# +# SPI Multiplexer support +# +# CONFIG_SPI_MUX is not set + +# +# SPI Protocol Masters +# +CONFIG_SPI_SPIDEV=y +# CONFIG_SPI_LOOPBACK_TEST is not set +# CONFIG_SPI_TLE62X0 is not set +# CONFIG_SPI_SLAVE is not set +CONFIG_SPI_DYNAMIC=y +# CONFIG_SPMI is not set +# CONFIG_HSI is not set +CONFIG_PPS=y +# CONFIG_PPS_DEBUG is not set +# CONFIG_NTP_PPS is not set + +# +# PPS clients support +# +# CONFIG_PPS_CLIENT_KTIMER is not set +# CONFIG_PPS_CLIENT_LDISC is not set +# CONFIG_PPS_CLIENT_GPIO is not set + +# +# PPS generators support +# + +# +# PTP clock support +# +CONFIG_PTP_1588_CLOCK=y + +# +# Enable PHYLIB and NETWORK_PHY_TIMESTAMPING to see the additional clocks. +# +# CONFIG_PTP_1588_CLOCK_IDT82P33 is not set +# CONFIG_PTP_1588_CLOCK_IDTCM is not set +# end of PTP clock support + +# CONFIG_PINCTRL is not set +# CONFIG_GPIOLIB is not set +# CONFIG_W1 is not set +# CONFIG_POWER_RESET is not set +CONFIG_POWER_SUPPLY=y +# CONFIG_POWER_SUPPLY_DEBUG is not set +CONFIG_POWER_SUPPLY_HWMON=y +# CONFIG_PDA_POWER is not set +# CONFIG_TEST_POWER is not set +# CONFIG_CHARGER_ADP5061 is not set +# CONFIG_BATTERY_CW2015 is not set +# CONFIG_BATTERY_DS2780 is not set +# CONFIG_BATTERY_DS2781 is not set +# CONFIG_BATTERY_DS2782 is not set +# CONFIG_BATTERY_SBS is not set +# CONFIG_CHARGER_SBS is not set +# CONFIG_BATTERY_BQ27XXX is not set +# CONFIG_BATTERY_MAX17040 is not set +# CONFIG_BATTERY_MAX17042 is not set +# CONFIG_CHARGER_MAX8903 is not set +# CONFIG_CHARGER_LP8727 is not set +# CONFIG_CHARGER_DETECTOR_MAX14656 is not set +# CONFIG_CHARGER_BQ2415X is not set +# CONFIG_CHARGER_SMB347 is not set +# CONFIG_BATTERY_GAUGE_LTC2941 is not set +# CONFIG_BATTERY_RT5033 is not set +# CONFIG_CHARGER_BD99954 is not set +CONFIG_HWMON=y +# CONFIG_HWMON_DEBUG_CHIP is not set + +# +# Native drivers +# +# CONFIG_SENSORS_AD7314 is not set +# CONFIG_SENSORS_AD7414 is not set +# CONFIG_SENSORS_AD7418 is not set +# CONFIG_SENSORS_ADM1021 is not set +# CONFIG_SENSORS_ADM1025 is not set +# CONFIG_SENSORS_ADM1026 is not set +# CONFIG_SENSORS_ADM1029 is not set +# CONFIG_SENSORS_ADM1031 is not set +# CONFIG_SENSORS_ADM1177 is not set +# CONFIG_SENSORS_ADM9240 is not set +# CONFIG_SENSORS_ADT7310 is not set +# CONFIG_SENSORS_ADT7410 is not set +# CONFIG_SENSORS_ADT7411 is not set +# CONFIG_SENSORS_ADT7462 is not set +# CONFIG_SENSORS_ADT7470 is not set +# CONFIG_SENSORS_ADT7475 is not set +# CONFIG_SENSORS_AS370 is not set +# CONFIG_SENSORS_ASC7621 is not set +# CONFIG_SENSORS_AXI_FAN_CONTROL is not set +# CONFIG_SENSORS_ASPEED is not set +# CONFIG_SENSORS_ATXP1 is not set +# CONFIG_SENSORS_CORSAIR_CPRO is not set +# CONFIG_SENSORS_DRIVETEMP is not set +# CONFIG_SENSORS_DS620 is not set +# CONFIG_SENSORS_DS1621 is not set +# CONFIG_SENSORS_I5K_AMB is not set +# CONFIG_SENSORS_F71805F is not set +# CONFIG_SENSORS_F71882FG is not set +# CONFIG_SENSORS_F75375S is not set +# CONFIG_SENSORS_GL518SM is not set +# CONFIG_SENSORS_GL520SM is not set +# CONFIG_SENSORS_G760A is not set +# CONFIG_SENSORS_G762 is not set +# CONFIG_SENSORS_HIH6130 is not set +# CONFIG_SENSORS_IT87 is not set +# CONFIG_SENSORS_JC42 is not set +# CONFIG_SENSORS_POWR1220 is not set +# CONFIG_SENSORS_LINEAGE is not set +# CONFIG_SENSORS_LTC2945 is not set +# CONFIG_SENSORS_LTC2947_I2C is not set +# CONFIG_SENSORS_LTC2947_SPI is not set +# CONFIG_SENSORS_LTC2990 is not set +# CONFIG_SENSORS_LTC4151 is not set +# CONFIG_SENSORS_LTC4215 is not set +# CONFIG_SENSORS_LTC4222 is not set +# CONFIG_SENSORS_LTC4245 is not set +# CONFIG_SENSORS_LTC4260 is not set +# CONFIG_SENSORS_LTC4261 is not set +# CONFIG_SENSORS_MAX1111 is not set +# CONFIG_SENSORS_MAX16065 is not set +# CONFIG_SENSORS_MAX1619 is not set +# CONFIG_SENSORS_MAX1668 is not set +# CONFIG_SENSORS_MAX197 is not set +# CONFIG_SENSORS_MAX31722 is not set +# CONFIG_SENSORS_MAX31730 is not set +# CONFIG_SENSORS_MAX6621 is not set +# CONFIG_SENSORS_MAX6639 is not set +# CONFIG_SENSORS_MAX6642 is not set +# CONFIG_SENSORS_MAX6650 is not set +# CONFIG_SENSORS_MAX6697 is not set +# CONFIG_SENSORS_MAX31790 is not set +# CONFIG_SENSORS_MCP3021 is not set +# CONFIG_SENSORS_TC654 is not set +# CONFIG_SENSORS_MR75203 is not set +# CONFIG_SENSORS_ADCXX is not set +# CONFIG_SENSORS_LM63 is not set +# CONFIG_SENSORS_LM70 is not set +# CONFIG_SENSORS_LM73 is not set +# CONFIG_SENSORS_LM75 is not set +# CONFIG_SENSORS_LM77 is not set +# CONFIG_SENSORS_LM78 is not set +# CONFIG_SENSORS_LM80 is not set +# CONFIG_SENSORS_LM83 is not set +# CONFIG_SENSORS_LM85 is not set +# CONFIG_SENSORS_LM87 is not set +# CONFIG_SENSORS_LM90 is not set +# CONFIG_SENSORS_LM92 is not set +# CONFIG_SENSORS_LM93 is not set +# CONFIG_SENSORS_LM95234 is not set +# CONFIG_SENSORS_LM95241 is not set +# CONFIG_SENSORS_LM95245 is not set +# CONFIG_SENSORS_PC87360 is not set +# CONFIG_SENSORS_PC87427 is not set +# CONFIG_SENSORS_NTC_THERMISTOR is not set +# CONFIG_SENSORS_NCT6683 is not set +# CONFIG_SENSORS_NCT6775 is not set +# CONFIG_SENSORS_NCT7802 is not set +# CONFIG_SENSORS_NPCM7XX is not set +# CONFIG_SENSORS_PCF8591 is not set +# CONFIG_PMBUS is not set +# CONFIG_SENSORS_SHT21 is not set +# CONFIG_SENSORS_SHT3x is not set +# CONFIG_SENSORS_SHTC1 is not set +# CONFIG_SENSORS_SIS5595 is not set +# CONFIG_SENSORS_DME1737 is not set +# CONFIG_SENSORS_EMC1403 is not set +# CONFIG_SENSORS_EMC2103 is not set +# CONFIG_SENSORS_EMC6W201 is not set +# CONFIG_SENSORS_SMSC47M1 is not set +# CONFIG_SENSORS_SMSC47M192 is not set +# CONFIG_SENSORS_SMSC47B397 is not set +# CONFIG_SENSORS_STTS751 is not set +# CONFIG_SENSORS_SMM665 is not set +# CONFIG_SENSORS_ADC128D818 is not set +# CONFIG_SENSORS_ADS7828 is not set +# CONFIG_SENSORS_ADS7871 is not set +# CONFIG_SENSORS_AMC6821 is not set +# CONFIG_SENSORS_INA209 is not set +# CONFIG_SENSORS_INA2XX is not set +# CONFIG_SENSORS_INA3221 is not set +# CONFIG_SENSORS_TC74 is not set +# CONFIG_SENSORS_THMC50 is not set +# CONFIG_SENSORS_TMP102 is not set +# CONFIG_SENSORS_TMP103 is not set +# CONFIG_SENSORS_TMP108 is not set +# CONFIG_SENSORS_TMP401 is not set +# CONFIG_SENSORS_TMP421 is not set +# CONFIG_SENSORS_TMP513 is not set +# CONFIG_SENSORS_VIA686A is not set +# CONFIG_SENSORS_VT1211 is not set +# CONFIG_SENSORS_VT8231 is not set +# CONFIG_SENSORS_W83773G is not set +# CONFIG_SENSORS_W83781D is not set +# CONFIG_SENSORS_W83791D is not set +# CONFIG_SENSORS_W83792D is not set +# CONFIG_SENSORS_W83793 is not set +# CONFIG_SENSORS_W83795 is not set +# CONFIG_SENSORS_W83L785TS is not set +# CONFIG_SENSORS_W83L786NG is not set +# CONFIG_SENSORS_W83627HF is not set +# CONFIG_SENSORS_W83627EHF is not set + +# +# ACPI drivers +# +# CONFIG_SENSORS_ACPI_POWER is not set +# CONFIG_THERMAL is not set +# CONFIG_WATCHDOG is not set +CONFIG_SSB_POSSIBLE=y +CONFIG_SSB=y +CONFIG_SSB_SPROM=y +CONFIG_SSB_PCIHOST_POSSIBLE=y +CONFIG_SSB_PCIHOST=y +CONFIG_SSB_DRIVER_PCICORE_POSSIBLE=y +# CONFIG_SSB_DRIVER_PCICORE is not set +CONFIG_BCMA_POSSIBLE=y +# CONFIG_BCMA is not set + +# +# Multifunction device drivers +# +# CONFIG_MFD_ACT8945A is not set +# CONFIG_MFD_AS3711 is not set +# CONFIG_MFD_AS3722 is not set +# CONFIG_PMIC_ADP5520 is not set +# CONFIG_MFD_ATMEL_FLEXCOM is not set +# CONFIG_MFD_ATMEL_HLCDC is not set +# CONFIG_MFD_BCM590XX is not set +# CONFIG_MFD_BD9571MWV is not set +# CONFIG_MFD_AXP20X_I2C is not set +# CONFIG_MFD_MADERA is not set +# CONFIG_PMIC_DA903X is not set +# CONFIG_MFD_DA9052_SPI is not set +# CONFIG_MFD_DA9052_I2C is not set +# CONFIG_MFD_DA9055 is not set +# CONFIG_MFD_DA9062 is not set +# CONFIG_MFD_DA9063 is not set +# CONFIG_MFD_DA9150 is not set +# CONFIG_MFD_DLN2 is not set +# CONFIG_MFD_GATEWORKS_GSC is not set +# CONFIG_MFD_MC13XXX_SPI is not set +# CONFIG_MFD_MC13XXX_I2C is not set +# CONFIG_MFD_MP2629 is not set +# CONFIG_MFD_HI6421_PMIC is not set +# CONFIG_HTC_PASIC3 is not set +# CONFIG_LPC_ICH is not set +# CONFIG_LPC_SCH is not set +# CONFIG_LPC_CHIP3 is not set +# CONFIG_SUNWAY_SUPERIO_AST2400 is not set +# CONFIG_MFD_IQS62X is not set +# CONFIG_MFD_JANZ_CMODIO is not set +# CONFIG_MFD_KEMPLD is not set +# CONFIG_MFD_88PM800 is not set +# CONFIG_MFD_88PM805 is not set +# CONFIG_MFD_88PM860X is not set +# CONFIG_MFD_MAX14577 is not set +# CONFIG_MFD_MAX77620 is not set +# CONFIG_MFD_MAX77650 is not set +# CONFIG_MFD_MAX77686 is not set +# CONFIG_MFD_MAX77693 is not set +# CONFIG_MFD_MAX77843 is not set +# CONFIG_MFD_MAX8907 is not set +# CONFIG_MFD_MAX8925 is not set +# CONFIG_MFD_MAX8997 is not set +# CONFIG_MFD_MAX8998 is not set +# CONFIG_MFD_MT6360 is not set +# CONFIG_MFD_MT6397 is not set +# CONFIG_MFD_MENF21BMC is not set +# CONFIG_EZX_PCAP is not set +# CONFIG_MFD_CPCAP is not set +# CONFIG_MFD_VIPERBOARD is not set +# CONFIG_MFD_RETU is not set +# CONFIG_MFD_PCF50633 is not set +# CONFIG_MFD_RDC321X is not set +# CONFIG_MFD_RT5033 is not set +# CONFIG_MFD_RC5T583 is not set +# CONFIG_MFD_RK808 is not set +# CONFIG_MFD_RN5T618 is not set +# CONFIG_MFD_SEC_CORE is not set +# CONFIG_MFD_SI476X_CORE is not set +# CONFIG_MFD_SM501 is not set +# CONFIG_MFD_SKY81452 is not set +# CONFIG_ABX500_CORE is not set +# CONFIG_MFD_STMPE is not set +# CONFIG_MFD_SYSCON is not set +# CONFIG_MFD_TI_AM335X_TSCADC is not set +# CONFIG_MFD_LP3943 is not set +# CONFIG_MFD_LP8788 is not set +# CONFIG_MFD_TI_LMU is not set +# CONFIG_MFD_PALMAS is not set +# CONFIG_TPS6105X is not set +# CONFIG_TPS6507X is not set +# CONFIG_MFD_TPS65086 is not set +# CONFIG_MFD_TPS65090 is not set +# CONFIG_MFD_TPS65217 is not set +# CONFIG_MFD_TI_LP873X is not set +# CONFIG_MFD_TI_LP87565 is not set +# CONFIG_MFD_TPS65218 is not set +# CONFIG_MFD_TPS6586X is not set +# CONFIG_MFD_TPS65912_I2C is not set +# CONFIG_MFD_TPS65912_SPI is not set +# CONFIG_MFD_TPS80031 is not set +# CONFIG_TWL4030_CORE is not set +# CONFIG_TWL6040_CORE is not set +# CONFIG_MFD_WL1273_CORE is not set +# CONFIG_MFD_LM3533 is not set +# CONFIG_MFD_TC3589X is not set +# CONFIG_MFD_TQMX86 is not set +# CONFIG_MFD_VX855 is not set +# CONFIG_MFD_LOCHNAGAR is not set +# CONFIG_MFD_ARIZONA_I2C is not set +# CONFIG_MFD_ARIZONA_SPI is not set +# CONFIG_MFD_WM8400 is not set +# CONFIG_MFD_WM831X_I2C is not set +# CONFIG_MFD_WM831X_SPI is not set +# CONFIG_MFD_WM8350_I2C is not set +# CONFIG_MFD_WM8994 is not set +# CONFIG_MFD_ROHM_BD718XX is not set +# CONFIG_MFD_ROHM_BD70528 is not set +# CONFIG_MFD_ROHM_BD71828 is not set +# CONFIG_MFD_STPMIC1 is not set +# CONFIG_MFD_STMFX is not set +# CONFIG_MFD_INTEL_M10_BMC is not set +# end of Multifunction device drivers + +# CONFIG_REGULATOR is not set +# CONFIG_RC_CORE is not set +# CONFIG_MEDIA_CEC_SUPPORT is not set +# CONFIG_MEDIA_SUPPORT is not set + +# +# Graphics support +# +CONFIG_VGA_ARB=y +CONFIG_VGA_ARB_MAX_GPUS=16 +CONFIG_DRM=y +# CONFIG_DRM_DP_AUX_CHARDEV is not set +# CONFIG_DRM_DEBUG_MM is not set +# CONFIG_DRM_DEBUG_SELFTEST is not set +CONFIG_DRM_KMS_HELPER=y +CONFIG_DRM_KMS_FB_HELPER=y +# CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS is not set +CONFIG_DRM_FBDEV_EMULATION=y +CONFIG_DRM_FBDEV_OVERALLOC=100 +# CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM is not set +# CONFIG_DRM_LOAD_EDID_FIRMWARE is not set +# CONFIG_DRM_DP_CEC is not set +CONFIG_DRM_TTM=y +CONFIG_DRM_TTM_DMA_PAGE_POOL=y +CONFIG_DRM_VRAM_HELPER=y +CONFIG_DRM_TTM_HELPER=y +CONFIG_DRM_GEM_SHMEM_HELPER=y + +# +# I2C encoder or helper chips +# +# CONFIG_DRM_I2C_CH7006 is not set +# CONFIG_DRM_I2C_SIL164 is not set +# CONFIG_DRM_I2C_NXP_TDA998X is not set +# CONFIG_DRM_I2C_NXP_TDA9950 is not set +# end of I2C encoder or helper chips + +# +# ARM devices +# +# CONFIG_DRM_KOMEDA is not set +# end of ARM devices + +CONFIG_DRM_RADEON=y +# CONFIG_DRM_RADEON_USERPTR is not set +# CONFIG_DRM_AMDGPU is not set +# CONFIG_DRM_NOUVEAU is not set +# CONFIG_DRM_VGEM is not set +# CONFIG_DRM_VKMS is not set +# CONFIG_DRM_UDL is not set +CONFIG_DRM_AST=y +# CONFIG_DRM_MGAG200 is not set +# CONFIG_DRM_RCAR_DW_HDMI is not set +# CONFIG_DRM_RCAR_LVDS is not set +# CONFIG_DRM_QXL is not set +# CONFIG_DRM_BOCHS is not set +CONFIG_DRM_VIRTIO_GPU=y +CONFIG_DRM_PANEL=y + +# +# Display Panels +# +# CONFIG_DRM_PANEL_LVDS is not set +# CONFIG_DRM_PANEL_SIMPLE is not set +# CONFIG_DRM_PANEL_ILITEK_IL9322 is not set +# CONFIG_DRM_PANEL_SAMSUNG_LD9040 is not set +# CONFIG_DRM_PANEL_LG_LG4573 is not set +# CONFIG_DRM_PANEL_NOVATEK_NT39016 is not set +# CONFIG_DRM_PANEL_OLIMEX_LCD_OLINUXINO is not set +# CONFIG_DRM_PANEL_SAMSUNG_S6E63M0 is not set +# CONFIG_DRM_PANEL_SAMSUNG_S6E88A0_AMS452EF01 is not set +# CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0 is not set +# CONFIG_DRM_PANEL_SEIKO_43WVF1G is not set +# CONFIG_DRM_PANEL_SITRONIX_ST7789V is not set +# CONFIG_DRM_PANEL_TPO_TD028TTEC1 is not set +# end of Display Panels + +CONFIG_DRM_BRIDGE=y +CONFIG_DRM_PANEL_BRIDGE=y + +# +# Display Interface Bridges +# +# CONFIG_DRM_CDNS_DSI is not set +# CONFIG_DRM_CHRONTEL_CH7033 is not set +# CONFIG_DRM_DISPLAY_CONNECTOR is not set +# CONFIG_DRM_LONTIUM_LT9611 is not set +# CONFIG_DRM_LVDS_CODEC is not set +# CONFIG_DRM_MEGACHIPS_STDPXXXX_GE_B850V3_FW is not set +# CONFIG_DRM_NWL_MIPI_DSI is not set +# CONFIG_DRM_NXP_PTN3460 is not set +# CONFIG_DRM_PARADE_PS8622 is not set +# CONFIG_DRM_PARADE_PS8640 is not set +# CONFIG_DRM_SIL_SII8620 is not set +# CONFIG_DRM_SII902X is not set +# CONFIG_DRM_SII9234 is not set +# CONFIG_DRM_SIMPLE_BRIDGE is not set +# CONFIG_DRM_THINE_THC63LVD1024 is not set +# CONFIG_DRM_TOSHIBA_TC358762 is not set +# CONFIG_DRM_TOSHIBA_TC358764 is not set +# CONFIG_DRM_TOSHIBA_TC358767 is not set +# CONFIG_DRM_TOSHIBA_TC358768 is not set +# CONFIG_DRM_TOSHIBA_TC358775 is not set +# CONFIG_DRM_TI_TFP410 is not set +# CONFIG_DRM_TI_SN65DSI86 is not set +# CONFIG_DRM_TI_TPD12S015 is not set +# CONFIG_DRM_ANALOGIX_ANX6345 is not set +# CONFIG_DRM_ANALOGIX_ANX78XX is not set +# CONFIG_DRM_I2C_ADV7511 is not set +# CONFIG_DRM_CDNS_MHDP8546 is not set +# end of Display Interface Bridges + +# CONFIG_DRM_ETNAVIV is not set +# CONFIG_DRM_ARCPGU is not set +# CONFIG_DRM_MXSFB is not set +# CONFIG_DRM_CIRRUS_QEMU is not set +# CONFIG_DRM_GM12U320 is not set +# CONFIG_TINYDRM_HX8357D is not set +# CONFIG_TINYDRM_ILI9225 is not set +# CONFIG_TINYDRM_ILI9341 is not set +# CONFIG_TINYDRM_ILI9486 is not set +# CONFIG_TINYDRM_MI0283QT is not set +# CONFIG_TINYDRM_REPAPER is not set +# CONFIG_TINYDRM_ST7586 is not set +# CONFIG_TINYDRM_ST7735R is not set +# CONFIG_DRM_LEGACY is not set +CONFIG_DRM_PANEL_ORIENTATION_QUIRKS=y + +# +# Frame buffer Devices +# +CONFIG_FB_CMDLINE=y +CONFIG_FB_NOTIFY=y +CONFIG_FB=y +CONFIG_FIRMWARE_EDID=y +CONFIG_FB_CFB_FILLRECT=y +CONFIG_FB_CFB_COPYAREA=y +CONFIG_FB_CFB_IMAGEBLIT=y +CONFIG_FB_SYS_FILLRECT=y +CONFIG_FB_SYS_COPYAREA=y +CONFIG_FB_SYS_IMAGEBLIT=y +# CONFIG_FB_FOREIGN_ENDIAN is not set +CONFIG_FB_SYS_FOPS=y +CONFIG_FB_DEFERRED_IO=y +CONFIG_FB_MODE_HELPERS=y +# CONFIG_FB_TILEBLITTING is not set + +# +# Frame buffer hardware drivers +# +# CONFIG_FB_CIRRUS is not set +# CONFIG_FB_PM2 is not set +# CONFIG_FB_CYBER2000 is not set +# CONFIG_FB_ASILIANT is not set +# CONFIG_FB_IMSTT is not set +# CONFIG_FB_UVESA is not set +# CONFIG_FB_EFI is not set +# CONFIG_FB_OPENCORES is not set +# CONFIG_FB_S1D13XXX is not set +# CONFIG_FB_NVIDIA is not set +# CONFIG_FB_RIVA is not set +# CONFIG_FB_I740 is not set +# CONFIG_FB_MATROX is not set +# CONFIG_FB_RADEON is not set +# CONFIG_FB_ATY128 is not set +# CONFIG_FB_ATY is not set +# CONFIG_FB_S3 is not set +# CONFIG_FB_SAVAGE is not set +# CONFIG_FB_SIS is not set +# CONFIG_FB_NEOMAGIC is not set +# CONFIG_FB_KYRO is not set +# CONFIG_FB_3DFX is not set +# CONFIG_FB_VOODOO1 is not set +# CONFIG_FB_VT8623 is not set +# CONFIG_FB_TRIDENT is not set +# CONFIG_FB_ARK is not set +# CONFIG_FB_PM3 is not set +# CONFIG_FB_CARMINE is not set +# CONFIG_FB_SMSCUFX is not set +# CONFIG_FB_UDL is not set +# CONFIG_FB_IBM_GXT4500 is not set +# CONFIG_FB_VIRTUAL is not set +# CONFIG_FB_METRONOME is not set +# CONFIG_FB_MB862XX is not set +# CONFIG_FB_SIMPLE is not set +# CONFIG_FB_SM712 is not set +# end of Frame buffer Devices + +# +# Backlight & LCD device support +# +CONFIG_LCD_CLASS_DEVICE=y +# CONFIG_LCD_LTV350QV is not set +# CONFIG_LCD_ILI922X is not set +# CONFIG_LCD_ILI9320 is not set +# CONFIG_LCD_TDO24M is not set +# CONFIG_LCD_VGG2432A4 is not set +# CONFIG_LCD_PLATFORM is not set +# CONFIG_LCD_AMS369FG06 is not set +# CONFIG_LCD_LMS501KF03 is not set +# CONFIG_LCD_HX8357 is not set +# CONFIG_LCD_OTM3225A is not set +CONFIG_BACKLIGHT_CLASS_DEVICE=y +# CONFIG_BACKLIGHT_QCOM_WLED is not set +# CONFIG_BACKLIGHT_ADP8860 is not set +# CONFIG_BACKLIGHT_ADP8870 is not set +# CONFIG_BACKLIGHT_LM3639 is not set +# CONFIG_BACKLIGHT_LV5207LP is not set +# CONFIG_BACKLIGHT_BD6107 is not set +# CONFIG_BACKLIGHT_ARCXCNN is not set +# end of Backlight & LCD device support + +CONFIG_HDMI=y + +# +# Console display driver support +# +# CONFIG_VGA_CONSOLE is not set +# CONFIG_MDA_CONSOLE is not set +CONFIG_DUMMY_CONSOLE_COLUMNS=80 +CONFIG_DUMMY_CONSOLE_ROWS=25 +CONFIG_FRAMEBUFFER_CONSOLE=y +CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y +CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y +# CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER is not set +# end of Console display driver support + +CONFIG_LOGO=y +CONFIG_LOGO_LINUX_MONO=y +CONFIG_LOGO_LINUX_VGA16=y +CONFIG_LOGO_LINUX_CLUT224=y +# end of Graphics support + +# CONFIG_SOUND is not set + +# +# HID support +# +CONFIG_HID=y +# CONFIG_HID_BATTERY_STRENGTH is not set +# CONFIG_HIDRAW is not set +# CONFIG_UHID is not set +CONFIG_HID_GENERIC=y + +# +# Special HID drivers +# +# CONFIG_HID_A4TECH is not set +# CONFIG_HID_ACCUTOUCH is not set +# CONFIG_HID_ACRUX is not set +# CONFIG_HID_APPLE is not set +# CONFIG_HID_APPLEIR is not set +# CONFIG_HID_AUREAL is not set +# CONFIG_HID_BELKIN is not set +# CONFIG_HID_BETOP_FF is not set +# CONFIG_HID_CHERRY is not set +# CONFIG_HID_CHICONY is not set +# CONFIG_HID_COUGAR is not set +# CONFIG_HID_MACALLY is not set +# CONFIG_HID_CMEDIA is not set +# CONFIG_HID_CREATIVE_SB0540 is not set +# CONFIG_HID_CYPRESS is not set +# CONFIG_HID_DRAGONRISE is not set +# CONFIG_HID_EMS_FF is not set +# CONFIG_HID_ELECOM is not set +# CONFIG_HID_ELO is not set +# CONFIG_HID_EZKEY is not set +# CONFIG_HID_GEMBIRD is not set +# CONFIG_HID_GFRM is not set +# CONFIG_HID_GLORIOUS is not set +# CONFIG_HID_HOLTEK is not set +# CONFIG_HID_VIVALDI is not set +# CONFIG_HID_KEYTOUCH is not set +# CONFIG_HID_KYE is not set +# CONFIG_HID_UCLOGIC is not set +# CONFIG_HID_WALTOP is not set +# CONFIG_HID_VIEWSONIC is not set +# CONFIG_HID_GYRATION is not set +# CONFIG_HID_ICADE is not set +# CONFIG_HID_ITE is not set +# CONFIG_HID_JABRA is not set +# CONFIG_HID_TWINHAN is not set +# CONFIG_HID_KENSINGTON is not set +# CONFIG_HID_LCPOWER is not set +# CONFIG_HID_LENOVO is not set +# CONFIG_HID_MAGICMOUSE is not set +# CONFIG_HID_MALTRON is not set +# CONFIG_HID_MAYFLASH is not set +# CONFIG_HID_REDRAGON is not set +# CONFIG_HID_MICROSOFT is not set +# CONFIG_HID_MONTEREY is not set +# CONFIG_HID_MULTITOUCH is not set +# CONFIG_HID_NTI is not set +# CONFIG_HID_NTRIG is not set +# CONFIG_HID_ORTEK is not set +# CONFIG_HID_PANTHERLORD is not set +# CONFIG_HID_PENMOUNT is not set +# CONFIG_HID_PETALYNX is not set +# CONFIG_HID_PICOLCD is not set +# CONFIG_HID_PLANTRONICS is not set +# CONFIG_HID_PRIMAX is not set +# CONFIG_HID_RETRODE is not set +# CONFIG_HID_ROCCAT is not set +# CONFIG_HID_SAITEK is not set +# CONFIG_HID_SAMSUNG is not set +# CONFIG_HID_SPEEDLINK is not set +# CONFIG_HID_STEAM is not set +# CONFIG_HID_STEELSERIES is not set +# CONFIG_HID_SUNPLUS is not set +# CONFIG_HID_RMI is not set +# CONFIG_HID_GREENASIA is not set +# CONFIG_HID_SMARTJOYPLUS is not set +# CONFIG_HID_TIVO is not set +# CONFIG_HID_TOPSEED is not set +# CONFIG_HID_THRUSTMASTER is not set +# CONFIG_HID_UDRAW_PS3 is not set +# CONFIG_HID_WACOM is not set +# CONFIG_HID_XINMO is not set +# CONFIG_HID_ZEROPLUS is not set +# CONFIG_HID_ZYDACRON is not set +# CONFIG_HID_SENSOR_HUB is not set +# CONFIG_HID_ALPS is not set +# end of Special HID drivers + +# +# USB HID support +# +CONFIG_USB_HID=y +# CONFIG_HID_PID is not set +# CONFIG_USB_HIDDEV is not set +# end of USB HID support + +# +# I2C HID support +# +# CONFIG_I2C_HID is not set +# end of I2C HID support +# end of HID support + +CONFIG_USB_OHCI_LITTLE_ENDIAN=y +CONFIG_USB_SUPPORT=y +CONFIG_USB_COMMON=y +# CONFIG_USB_ULPI_BUS is not set +CONFIG_USB_ARCH_HAS_HCD=y +CONFIG_USB=y +CONFIG_USB_PCI=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y + +# +# Miscellaneous USB options +# +CONFIG_USB_DEFAULT_PERSIST=y +# CONFIG_USB_FEW_INIT_RETRIES is not set +# CONFIG_USB_DYNAMIC_MINORS is not set +# CONFIG_USB_OTG_PRODUCTLIST is not set +# CONFIG_USB_OTG_DISABLE_EXTERNAL_HUB is not set +CONFIG_USB_AUTOSUSPEND_DELAY=2 +# CONFIG_USB_MON is not set + +# +# USB Host Controller Drivers +# +# CONFIG_USB_C67X00_HCD is not set +CONFIG_USB_XHCI_HCD=y +# CONFIG_USB_XHCI_DBGCAP is not set +CONFIG_USB_XHCI_PCI=y +# CONFIG_USB_XHCI_PCI_RENESAS is not set +# CONFIG_USB_XHCI_PLATFORM is not set +# CONFIG_USB_EHCI_HCD is not set +# CONFIG_USB_OXU210HP_HCD is not set +# CONFIG_USB_ISP116X_HCD is not set +# CONFIG_USB_FOTG210_HCD is not set +# CONFIG_USB_MAX3421_HCD is not set +# CONFIG_USB_OHCI_HCD is not set +# CONFIG_USB_UHCI_HCD is not set +# CONFIG_USB_SL811_HCD is not set +# CONFIG_USB_R8A66597_HCD is not set +# CONFIG_USB_HCD_SSB is not set +# CONFIG_USB_HCD_TEST_MODE is not set + +# +# USB Device Class drivers +# +# CONFIG_USB_ACM is not set +# CONFIG_USB_PRINTER is not set +# CONFIG_USB_WDM is not set +# CONFIG_USB_TMC is not set + +# +# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may +# + +# +# also be needed; see USB_STORAGE Help for more info +# +CONFIG_USB_STORAGE=y +# CONFIG_USB_STORAGE_DEBUG is not set +# CONFIG_USB_STORAGE_REALTEK is not set +# CONFIG_USB_STORAGE_DATAFAB is not set +# CONFIG_USB_STORAGE_FREECOM is not set +# CONFIG_USB_STORAGE_ISD200 is not set +# CONFIG_USB_STORAGE_USBAT is not set +# CONFIG_USB_STORAGE_SDDR09 is not set +# CONFIG_USB_STORAGE_SDDR55 is not set +# CONFIG_USB_STORAGE_JUMPSHOT is not set +# CONFIG_USB_STORAGE_ALAUDA is not set +# CONFIG_USB_STORAGE_ONETOUCH is not set +# CONFIG_USB_STORAGE_KARMA is not set +# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set +# CONFIG_USB_STORAGE_ENE_UB6250 is not set +# CONFIG_USB_UAS is not set + +# +# USB Imaging devices +# +# CONFIG_USB_MDC800 is not set +# CONFIG_USB_MICROTEK is not set +# CONFIG_USBIP_CORE is not set +# CONFIG_USB_CDNS3 is not set +# CONFIG_USB_MUSB_HDRC is not set +# CONFIG_USB_DWC3 is not set +# CONFIG_USB_DWC2 is not set +# CONFIG_USB_ISP1760 is not set + +# +# USB port drivers +# +# CONFIG_USB_SERIAL is not set + +# +# USB Miscellaneous drivers +# +# CONFIG_USB_EMI62 is not set +# CONFIG_USB_EMI26 is not set +# CONFIG_USB_ADUTUX is not set +# CONFIG_USB_SEVSEG is not set +# CONFIG_USB_LEGOTOWER is not set +# CONFIG_USB_LCD is not set +# CONFIG_USB_CYPRESS_CY7C63 is not set +# CONFIG_USB_CYTHERM is not set +# CONFIG_USB_IDMOUSE is not set +# CONFIG_USB_FTDI_ELAN is not set +# CONFIG_USB_APPLEDISPLAY is not set +# CONFIG_APPLE_MFI_FASTCHARGE is not set +# CONFIG_USB_LD is not set +# CONFIG_USB_TRANCEVIBRATOR is not set +# CONFIG_USB_IOWARRIOR is not set +# CONFIG_USB_TEST is not set +# CONFIG_USB_EHSET_TEST_FIXTURE is not set +# CONFIG_USB_ISIGHTFW is not set +# CONFIG_USB_YUREX is not set +# CONFIG_USB_EZUSB_FX2 is not set +# CONFIG_USB_HUB_USB251XB is not set +# CONFIG_USB_HSIC_USB3503 is not set +# CONFIG_USB_HSIC_USB4604 is not set +# CONFIG_USB_LINK_LAYER_TEST is not set + +# +# USB Physical Layer drivers +# +# CONFIG_NOP_USB_XCEIV is not set +# CONFIG_USB_ISP1301 is not set +# end of USB Physical Layer drivers + +# CONFIG_USB_GADGET is not set +# CONFIG_TYPEC is not set +# CONFIG_USB_ROLE_SWITCH is not set +# CONFIG_MMC is not set +# CONFIG_MEMSTICK is not set +# CONFIG_NEW_LEDS is not set +# CONFIG_ACCESSIBILITY is not set +CONFIG_INFINIBAND=m +CONFIG_INFINIBAND_USER_MAD=m +CONFIG_INFINIBAND_USER_ACCESS=m +CONFIG_INFINIBAND_USER_MEM=y +CONFIG_INFINIBAND_ON_DEMAND_PAGING=y +CONFIG_INFINIBAND_ADDR_TRANS=y +CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS=y +CONFIG_INFINIBAND_VIRT_DMA=y +CONFIG_INFINIBAND_MTHCA=m +# CONFIG_INFINIBAND_MTHCA_DEBUG is not set +# CONFIG_INFINIBAND_CXGB4 is not set +# CONFIG_INFINIBAND_EFA is not set +# CONFIG_INFINIBAND_I40IW is not set +CONFIG_MLX4_INFINIBAND=m +CONFIG_MLX5_INFINIBAND=m +# CONFIG_INFINIBAND_OCRDMA is not set +# CONFIG_RDMA_RXE is not set +# CONFIG_RDMA_SIW is not set +CONFIG_INFINIBAND_IPOIB=m +CONFIG_INFINIBAND_IPOIB_CM=y +CONFIG_INFINIBAND_IPOIB_DEBUG=y +# CONFIG_INFINIBAND_IPOIB_DEBUG_DATA is not set +# CONFIG_INFINIBAND_SRP is not set +# CONFIG_INFINIBAND_SRPT is not set +# CONFIG_INFINIBAND_ISER is not set +# CONFIG_INFINIBAND_ISERT is not set +# CONFIG_INFINIBAND_RTRS_CLIENT is not set +# CONFIG_INFINIBAND_RTRS_SERVER is not set +CONFIG_RTC_LIB=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_HCTOSYS=y +CONFIG_RTC_HCTOSYS_DEVICE="rtc0" +CONFIG_RTC_SYSTOHC=y +CONFIG_RTC_SYSTOHC_DEVICE="rtc0" +# CONFIG_RTC_DEBUG is not set +# CONFIG_RTC_NVMEM is not set + +# +# RTC interfaces +# +CONFIG_RTC_INTF_SYSFS=y +# CONFIG_RTC_INTF_PROC is not set +CONFIG_RTC_INTF_DEV=y +# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set +# CONFIG_RTC_DRV_TEST is not set + +# +# I2C RTC drivers +# +# CONFIG_RTC_DRV_ABB5ZES3 is not set +# CONFIG_RTC_DRV_ABEOZ9 is not set +# CONFIG_RTC_DRV_ABX80X is not set +# CONFIG_RTC_DRV_DS1307 is not set +# CONFIG_RTC_DRV_DS1374 is not set +# CONFIG_RTC_DRV_DS1672 is not set +# CONFIG_RTC_DRV_HYM8563 is not set +# CONFIG_RTC_DRV_MAX6900 is not set +# CONFIG_RTC_DRV_RS5C372 is not set +# CONFIG_RTC_DRV_ISL1208 is not set +# CONFIG_RTC_DRV_ISL12022 is not set +# CONFIG_RTC_DRV_ISL12026 is not set +# CONFIG_RTC_DRV_X1205 is not set +CONFIG_RTC_DRV_PCF8523=y +# CONFIG_RTC_DRV_PCF85063 is not set +# CONFIG_RTC_DRV_PCF85363 is not set +# CONFIG_RTC_DRV_PCF8563 is not set +# CONFIG_RTC_DRV_PCF8583 is not set +# CONFIG_RTC_DRV_M41T80 is not set +# CONFIG_RTC_DRV_BQ32K is not set +# CONFIG_RTC_DRV_S35390A is not set +# CONFIG_RTC_DRV_FM3130 is not set +# CONFIG_RTC_DRV_RX8010 is not set +# CONFIG_RTC_DRV_RX8581 is not set +# CONFIG_RTC_DRV_RX8025 is not set +# CONFIG_RTC_DRV_EM3027 is not set +# CONFIG_RTC_DRV_RV3028 is not set +# CONFIG_RTC_DRV_RV3032 is not set +# CONFIG_RTC_DRV_RV8803 is not set +# CONFIG_RTC_DRV_SD3078 is not set + +# +# SPI RTC drivers +# +# CONFIG_RTC_DRV_M41T93 is not set +# CONFIG_RTC_DRV_M41T94 is not set +# CONFIG_RTC_DRV_DS1302 is not set +# CONFIG_RTC_DRV_DS1305 is not set +# CONFIG_RTC_DRV_DS1343 is not set +# CONFIG_RTC_DRV_DS1347 is not set +# CONFIG_RTC_DRV_DS1390 is not set +# CONFIG_RTC_DRV_MAX6916 is not set +# CONFIG_RTC_DRV_R9701 is not set +# CONFIG_RTC_DRV_RX4581 is not set +# CONFIG_RTC_DRV_RX6110 is not set +# CONFIG_RTC_DRV_RS5C348 is not set +# CONFIG_RTC_DRV_MAX6902 is not set +# CONFIG_RTC_DRV_PCF2123 is not set +# CONFIG_RTC_DRV_MCP795 is not set +CONFIG_RTC_I2C_AND_SPI=y + +# +# SPI and I2C RTC drivers +# +# CONFIG_RTC_DRV_DS3232 is not set +# CONFIG_RTC_DRV_PCF2127 is not set +# CONFIG_RTC_DRV_RV3029C2 is not set + +# +# Platform RTC drivers +# +CONFIG_RTC_DRV_SW64_VIRT=y +# CONFIG_RTC_DRV_DS1286 is not set +# CONFIG_RTC_DRV_DS1511 is not set +# CONFIG_RTC_DRV_DS1553 is not set +# CONFIG_RTC_DRV_DS1685_FAMILY is not set +# CONFIG_RTC_DRV_DS1742 is not set +# CONFIG_RTC_DRV_DS2404 is not set +# CONFIG_RTC_DRV_EFI is not set +# CONFIG_RTC_DRV_STK17TA8 is not set +# CONFIG_RTC_DRV_M48T86 is not set +# CONFIG_RTC_DRV_M48T35 is not set +# CONFIG_RTC_DRV_M48T59 is not set +# CONFIG_RTC_DRV_MSM6242 is not set +# CONFIG_RTC_DRV_BQ4802 is not set +# CONFIG_RTC_DRV_RP5C01 is not set +# CONFIG_RTC_DRV_V3020 is not set +# CONFIG_RTC_DRV_ZYNQMP is not set + +# +# on-CPU RTC drivers +# +# CONFIG_RTC_DRV_CADENCE is not set +# CONFIG_RTC_DRV_FTRTC010 is not set +# CONFIG_RTC_DRV_R7301 is not set + +# +# HID Sensor RTC drivers +# +# CONFIG_DMADEVICES is not set + +# +# DMABUF options +# +CONFIG_SYNC_FILE=y +# CONFIG_SW_SYNC is not set +# CONFIG_UDMABUF is not set +# CONFIG_DMABUF_MOVE_NOTIFY is not set +# CONFIG_DMABUF_SELFTESTS is not set +# CONFIG_DMABUF_HEAPS is not set +# end of DMABUF options + +# CONFIG_AUXDISPLAY is not set +CONFIG_UIO=y +# CONFIG_UIO_CIF is not set +# CONFIG_UIO_PDRV_GENIRQ is not set +# CONFIG_UIO_DMEM_GENIRQ is not set +# CONFIG_UIO_AEC is not set +# CONFIG_UIO_SERCOS3 is not set +CONFIG_UIO_PCI_GENERIC=m +# CONFIG_UIO_NETX is not set +# CONFIG_UIO_PRUSS is not set +# CONFIG_UIO_MF624 is not set +# CONFIG_VFIO is not set +# CONFIG_VIRT_DRIVERS is not set +CONFIG_VIRTIO=y +CONFIG_VIRTIO_MENU=y +CONFIG_VIRTIO_PCI=y +# CONFIG_VIRTIO_PCI_LEGACY is not set +# CONFIG_VIRTIO_BALLOON is not set +# CONFIG_VIRTIO_INPUT is not set +CONFIG_VIRTIO_MMIO=y +# CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES is not set +CONFIG_VIRTIO_DMA_SHARED_BUFFER=y +# CONFIG_VDPA is not set + +# +# Microsoft Hyper-V guest support +# +# end of Microsoft Hyper-V guest support + +# CONFIG_GREYBUS is not set +CONFIG_STAGING=y +# CONFIG_COMEDI is not set +# CONFIG_RTS5208 is not set +CONFIG_FB_SM750=y +# CONFIG_STAGING_MEDIA is not set + +# +# Android +# +# end of Android + +# CONFIG_STAGING_BOARD is not set +# CONFIG_LTE_GDM724X is not set +# CONFIG_GS_FPGABOOT is not set +# CONFIG_UNISYSSPAR is not set +# CONFIG_COMMON_CLK_XLNX_CLKWZRD is not set +# CONFIG_PI433 is not set + +# +# Gasket devices +# +# end of Gasket devices + +# CONFIG_XIL_AXIS_FIFO is not set +# CONFIG_FIELDBUS_DEV is not set +# CONFIG_KPC2000 is not set +# CONFIG_QLGE is not set +# CONFIG_GOLDFISH is not set +CONFIG_HAVE_CLK=y +CONFIG_CLKDEV_LOOKUP=y +CONFIG_HAVE_CLK_PREPARE=y +CONFIG_COMMON_CLK=y +# CONFIG_COMMON_CLK_MAX9485 is not set +# CONFIG_COMMON_CLK_SI5341 is not set +# CONFIG_COMMON_CLK_SI5351 is not set +# CONFIG_COMMON_CLK_SI514 is not set +# CONFIG_COMMON_CLK_SI544 is not set +# CONFIG_COMMON_CLK_SI570 is not set +# CONFIG_COMMON_CLK_CDCE706 is not set +# CONFIG_COMMON_CLK_CDCE925 is not set +# CONFIG_COMMON_CLK_CS2000_CP is not set +# CONFIG_COMMON_CLK_VC5 is not set +# CONFIG_COMMON_CLK_FIXED_MMIO is not set +# CONFIG_HWSPINLOCK is not set + +# +# Clock Source drivers +# +CONFIG_I8253_LOCK=y +CONFIG_CLKBLD_I8253=y +# CONFIG_MICROCHIP_PIT64B is not set +# end of Clock Source drivers + +# CONFIG_MAILBOX is not set +CONFIG_IOMMU_IOVA=y +CONFIG_IOMMU_API=y +CONFIG_IOMMU_SUPPORT=y + +# +# Generic IOMMU Pagetable Support +# +# end of Generic IOMMU Pagetable Support + +# CONFIG_IOMMU_DEBUGFS is not set +CONFIG_IOMMU_DEFAULT_PASSTHROUGH=y +CONFIG_OF_IOMMU=y +CONFIG_SUNWAY_IOMMU=y + +# +# Remoteproc drivers +# +# CONFIG_REMOTEPROC is not set +# end of Remoteproc drivers + +# +# Rpmsg drivers +# +# CONFIG_RPMSG_VIRTIO is not set +# end of Rpmsg drivers + +# CONFIG_SOUNDWIRE is not set + +# +# SOC (System On Chip) specific Drivers +# + +# +# Amlogic SoC drivers +# +# end of Amlogic SoC drivers + +# +# Aspeed SoC drivers +# +# end of Aspeed SoC drivers + +# +# Broadcom SoC drivers +# +# end of Broadcom SoC drivers + +# +# NXP/Freescale QorIQ SoC drivers +# +# end of NXP/Freescale QorIQ SoC drivers + +# +# i.MX SoC drivers +# +# end of i.MX SoC drivers + +# +# Qualcomm SoC drivers +# +# end of Qualcomm SoC drivers + +# CONFIG_SOC_TI is not set + +# +# Xilinx SoC drivers +# +# CONFIG_XILINX_VCU is not set +# end of Xilinx SoC drivers +# end of SOC (System On Chip) specific Drivers + +# CONFIG_PM_DEVFREQ is not set +# CONFIG_EXTCON is not set +# CONFIG_MEMORY is not set +# CONFIG_IIO is not set +# CONFIG_NTB is not set +# CONFIG_VME_BUS is not set +# CONFIG_PWM is not set + +# +# IRQ chip support +# +CONFIG_SW64_INTC=y +CONFIG_IRQCHIP=y +# CONFIG_AL_FIC is not set +# end of IRQ chip support + +# CONFIG_IPACK_BUS is not set +# CONFIG_RESET_CONTROLLER is not set + +# +# PHY Subsystem +# +# CONFIG_GENERIC_PHY is not set +# CONFIG_BCM_KONA_USB2_PHY is not set +# CONFIG_PHY_CADENCE_TORRENT is not set +# CONFIG_PHY_CADENCE_DPHY is not set +# CONFIG_PHY_CADENCE_SALVO is not set +# CONFIG_PHY_FSL_IMX8MQ_USB is not set +# CONFIG_PHY_MIXEL_MIPI_DPHY is not set +# CONFIG_PHY_PXA_28NM_HSIC is not set +# CONFIG_PHY_PXA_28NM_USB2 is not set +# end of PHY Subsystem + +# CONFIG_POWERCAP is not set +# CONFIG_MCB is not set + +# +# Performance monitor support +# +# end of Performance monitor support + +# CONFIG_RAS is not set +# CONFIG_USB4 is not set + +# +# Android +# +# CONFIG_ANDROID is not set +# end of Android + +# CONFIG_LIBNVDIMM is not set +# CONFIG_DAX is not set +CONFIG_NVMEM=y +CONFIG_NVMEM_SYSFS=y + +# +# HW tracing support +# +# CONFIG_STM is not set +# CONFIG_INTEL_TH is not set +# end of HW tracing support + +# CONFIG_FPGA is not set +# CONFIG_FSI is not set +# CONFIG_SIOX is not set +# CONFIG_SLIMBUS is not set +# CONFIG_INTERCONNECT is not set +# CONFIG_COUNTER is not set +# CONFIG_MOST is not set +# end of Device Drivers + +# +# File systems +# +# CONFIG_VALIDATE_FS_PARSER is not set +CONFIG_FS_IOMAP=y +# CONFIG_EXT2_FS is not set +# CONFIG_EXT3_FS is not set +CONFIG_EXT4_FS=y +CONFIG_EXT4_USE_FOR_EXT2=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y +CONFIG_EXT4_DEBUG=y +CONFIG_JBD2=y +# CONFIG_JBD2_DEBUG is not set +CONFIG_FS_MBCACHE=y +# CONFIG_REISERFS_FS is not set +# CONFIG_JFS_FS is not set +CONFIG_XFS_FS=y +CONFIG_XFS_SUPPORT_V4=y +# CONFIG_XFS_QUOTA is not set +# CONFIG_XFS_POSIX_ACL is not set +# CONFIG_XFS_RT is not set +# CONFIG_XFS_ONLINE_SCRUB is not set +# CONFIG_XFS_WARN is not set +# CONFIG_XFS_DEBUG is not set +CONFIG_GFS2_FS=y +# CONFIG_OCFS2_FS is not set +# CONFIG_BTRFS_FS is not set +# CONFIG_NILFS2_FS is not set +# CONFIG_F2FS_FS is not set +# CONFIG_FS_DAX is not set +CONFIG_FS_POSIX_ACL=y +CONFIG_EXPORTFS=y +CONFIG_EXPORTFS_BLOCK_OPS=y +CONFIG_FILE_LOCKING=y +CONFIG_MANDATORY_FILE_LOCKING=y +# CONFIG_FS_ENCRYPTION is not set +# CONFIG_FS_VERITY is not set +CONFIG_FSNOTIFY=y +CONFIG_DNOTIFY=y +CONFIG_INOTIFY_USER=y +CONFIG_FANOTIFY=y +# CONFIG_FANOTIFY_ACCESS_PERMISSIONS is not set +CONFIG_QUOTA=y +# CONFIG_QUOTA_NETLINK_INTERFACE is not set +CONFIG_PRINT_QUOTA_WARNING=y +# CONFIG_QUOTA_DEBUG is not set +# CONFIG_QFMT_V1 is not set +# CONFIG_QFMT_V2 is not set +CONFIG_QUOTACTL=y +CONFIG_AUTOFS4_FS=y +CONFIG_AUTOFS_FS=y +CONFIG_FUSE_FS=y +# CONFIG_CUSE is not set +# CONFIG_VIRTIO_FS is not set +# CONFIG_OVERLAY_FS is not set + +# +# Caches +# +CONFIG_FSCACHE=y +# CONFIG_FSCACHE_STATS is not set +# CONFIG_FSCACHE_HISTOGRAM is not set +# CONFIG_FSCACHE_DEBUG is not set +# CONFIG_FSCACHE_OBJECT_LIST is not set +# CONFIG_CACHEFILES is not set +# end of Caches + +# +# CD-ROM/DVD Filesystems +# +CONFIG_ISO9660_FS=y +CONFIG_JOLIET=y +CONFIG_ZISOFS=y +CONFIG_UDF_FS=y +# end of CD-ROM/DVD Filesystems + +# +# DOS/FAT/EXFAT/NT Filesystems +# +CONFIG_FAT_FS=y +CONFIG_MSDOS_FS=y +CONFIG_VFAT_FS=y +CONFIG_FAT_DEFAULT_CODEPAGE=437 +CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1" +CONFIG_FAT_DEFAULT_UTF8=y +# CONFIG_EXFAT_FS is not set +CONFIG_NTFS_FS=y +# CONFIG_NTFS_DEBUG is not set +CONFIG_NTFS_RW=y +# end of DOS/FAT/EXFAT/NT Filesystems + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_PROC_KCORE=y +CONFIG_PROC_SYSCTL=y +CONFIG_PROC_PAGE_MONITOR=y +# CONFIG_PROC_CHILDREN is not set +CONFIG_KERNFS=y +CONFIG_SYSFS=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_TMPFS_XATTR=y +# CONFIG_TMPFS_INODE64 is not set +CONFIG_HUGETLBFS=y +CONFIG_HUGETLB_PAGE=y +CONFIG_MEMFD_CREATE=y +CONFIG_CONFIGFS_FS=y +CONFIG_EFIVAR_FS=m +# end of Pseudo filesystems + +# CONFIG_MISC_FILESYSTEMS is not set +CONFIG_NETWORK_FILESYSTEMS=y +CONFIG_NFS_FS=y +CONFIG_NFS_V2=y +CONFIG_NFS_V3=y +CONFIG_NFS_V3_ACL=y +CONFIG_NFS_V4=y +CONFIG_NFS_SWAP=y +CONFIG_NFS_V4_1=y +CONFIG_NFS_V4_2=y +CONFIG_PNFS_FILE_LAYOUT=y +CONFIG_PNFS_BLOCK=m +CONFIG_PNFS_FLEXFILE_LAYOUT=y +CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN="kernel.org" +CONFIG_NFS_V4_1_MIGRATION=y +CONFIG_NFS_V4_SECURITY_LABEL=y +CONFIG_ROOT_NFS=y +CONFIG_NFS_FSCACHE=y +CONFIG_NFS_USE_LEGACY_DNS=y +CONFIG_NFS_DISABLE_UDP_SUPPORT=y +# CONFIG_NFS_V4_2_READ_PLUS is not set +CONFIG_NFSD=m +CONFIG_NFSD_V2_ACL=y +CONFIG_NFSD_V3=y +CONFIG_NFSD_V3_ACL=y +CONFIG_NFSD_V4=y +CONFIG_NFSD_PNFS=y +# CONFIG_NFSD_BLOCKLAYOUT is not set +CONFIG_NFSD_SCSILAYOUT=y +# CONFIG_NFSD_FLEXFILELAYOUT is not set +# CONFIG_NFSD_V4_2_INTER_SSC is not set +CONFIG_NFSD_V4_SECURITY_LABEL=y +CONFIG_GRACE_PERIOD=y +CONFIG_LOCKD=y +CONFIG_LOCKD_V4=y +CONFIG_NFS_ACL_SUPPORT=y +CONFIG_NFS_COMMON=y +CONFIG_SUNRPC=y +CONFIG_SUNRPC_GSS=y +CONFIG_SUNRPC_BACKCHANNEL=y +CONFIG_SUNRPC_SWAP=y +# CONFIG_SUNRPC_DEBUG is not set +CONFIG_SUNRPC_XPRT_RDMA=m +# CONFIG_CEPH_FS is not set +# CONFIG_CIFS is not set +# CONFIG_CODA_FS is not set +# CONFIG_AFS_FS is not set +CONFIG_NLS=y +CONFIG_NLS_DEFAULT="iso8859-1" +CONFIG_NLS_CODEPAGE_437=m +CONFIG_NLS_CODEPAGE_737=m +CONFIG_NLS_CODEPAGE_775=m +CONFIG_NLS_CODEPAGE_850=m +CONFIG_NLS_CODEPAGE_852=m +CONFIG_NLS_CODEPAGE_855=m +CONFIG_NLS_CODEPAGE_857=m +CONFIG_NLS_CODEPAGE_860=m +CONFIG_NLS_CODEPAGE_861=m +CONFIG_NLS_CODEPAGE_862=m +CONFIG_NLS_CODEPAGE_863=m +CONFIG_NLS_CODEPAGE_864=m +CONFIG_NLS_CODEPAGE_865=m +CONFIG_NLS_CODEPAGE_866=m +CONFIG_NLS_CODEPAGE_869=m +CONFIG_NLS_CODEPAGE_936=y +CONFIG_NLS_CODEPAGE_950=y +CONFIG_NLS_CODEPAGE_932=m +CONFIG_NLS_CODEPAGE_949=m +CONFIG_NLS_CODEPAGE_874=m +CONFIG_NLS_ISO8859_8=m +CONFIG_NLS_CODEPAGE_1250=m +CONFIG_NLS_CODEPAGE_1251=m +CONFIG_NLS_ASCII=m +CONFIG_NLS_ISO8859_1=m +CONFIG_NLS_ISO8859_2=m +CONFIG_NLS_ISO8859_3=m +CONFIG_NLS_ISO8859_4=m +CONFIG_NLS_ISO8859_5=m +CONFIG_NLS_ISO8859_6=m +CONFIG_NLS_ISO8859_7=m +CONFIG_NLS_ISO8859_9=m +CONFIG_NLS_ISO8859_13=m +CONFIG_NLS_ISO8859_14=m +CONFIG_NLS_ISO8859_15=m +CONFIG_NLS_KOI8_R=m +CONFIG_NLS_KOI8_U=m +CONFIG_NLS_MAC_ROMAN=m +CONFIG_NLS_MAC_CELTIC=m +CONFIG_NLS_MAC_CENTEURO=m +CONFIG_NLS_MAC_CROATIAN=m +CONFIG_NLS_MAC_CYRILLIC=m +CONFIG_NLS_MAC_GAELIC=m +CONFIG_NLS_MAC_GREEK=m +CONFIG_NLS_MAC_ICELAND=m +CONFIG_NLS_MAC_INUIT=m +CONFIG_NLS_MAC_ROMANIAN=m +CONFIG_NLS_MAC_TURKISH=m +CONFIG_NLS_UTF8=y +# CONFIG_DLM is not set +# CONFIG_UNICODE is not set +CONFIG_IO_WQ=y +# end of File systems + +# +# Security options +# +CONFIG_KEYS=y +# CONFIG_KEYS_REQUEST_CACHE is not set +# CONFIG_PERSISTENT_KEYRINGS is not set +# CONFIG_ENCRYPTED_KEYS is not set +# CONFIG_KEY_DH_OPERATIONS is not set +# CONFIG_SECURITY_DMESG_RESTRICT is not set +CONFIG_SECURITY=y +# CONFIG_SECURITYFS is not set +CONFIG_SECURITY_NETWORK=y +CONFIG_SECURITY_INFINIBAND=y +# CONFIG_SECURITY_NETWORK_XFRM is not set +CONFIG_SECURITY_PATH=y +CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR=y +# CONFIG_HARDENED_USERCOPY is not set +# CONFIG_STATIC_USERMODEHELPER is not set +# CONFIG_SECURITY_SMACK is not set +# CONFIG_SECURITY_TOMOYO is not set +# CONFIG_SECURITY_APPARMOR is not set +# CONFIG_SECURITY_LOADPIN is not set +# CONFIG_SECURITY_YAMA is not set +# CONFIG_SECURITY_SAFESETID is not set +# CONFIG_SECURITY_LOCKDOWN_LSM is not set +CONFIG_INTEGRITY=y +# CONFIG_INTEGRITY_SIGNATURE is not set +# CONFIG_IMA is not set +# CONFIG_EVM is not set +CONFIG_DEFAULT_SECURITY_DAC=y +CONFIG_LSM="lockdown,yama,loadpin,safesetid,integrity,bpf" + +# +# Kernel hardening options +# + +# +# Memory initialization +# +CONFIG_INIT_STACK_NONE=y +# CONFIG_INIT_ON_ALLOC_DEFAULT_ON is not set +# CONFIG_INIT_ON_FREE_DEFAULT_ON is not set +# end of Memory initialization +# end of Kernel hardening options + +# CONFIG_SECURITY_BOOT_INIT is not set +# end of Security options + +CONFIG_XOR_BLOCKS=m +CONFIG_ASYNC_CORE=m +CONFIG_ASYNC_MEMCPY=m +CONFIG_ASYNC_XOR=m +CONFIG_ASYNC_PQ=m +CONFIG_ASYNC_RAID6_RECOV=m +CONFIG_CRYPTO=y + +# +# Crypto core or helper +# +CONFIG_CRYPTO_ALGAPI=y +CONFIG_CRYPTO_ALGAPI2=y +CONFIG_CRYPTO_AEAD=y +CONFIG_CRYPTO_AEAD2=y +CONFIG_CRYPTO_SKCIPHER=y +CONFIG_CRYPTO_SKCIPHER2=y +CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_RNG=y +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_RNG_DEFAULT=y +CONFIG_CRYPTO_AKCIPHER2=y +CONFIG_CRYPTO_KPP2=y +CONFIG_CRYPTO_ACOMP2=y +CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_MANAGER2=y +# CONFIG_CRYPTO_USER is not set +CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y +CONFIG_CRYPTO_GF128MUL=y +CONFIG_CRYPTO_NULL=y +CONFIG_CRYPTO_NULL2=y +# CONFIG_CRYPTO_PCRYPT is not set +# CONFIG_CRYPTO_CRYPTD is not set +CONFIG_CRYPTO_AUTHENC=y +# CONFIG_CRYPTO_TEST is not set + +# +# Public-key cryptography +# +# CONFIG_CRYPTO_RSA is not set +# CONFIG_CRYPTO_DH is not set +# CONFIG_CRYPTO_ECDH is not set +# CONFIG_CRYPTO_ECDSA is not set +# CONFIG_CRYPTO_ECRDSA is not set +# CONFIG_CRYPTO_SM2 is not set +# CONFIG_CRYPTO_CURVE25519 is not set + +# +# Authenticated Encryption with Associated Data +# +# CONFIG_CRYPTO_CCM is not set +CONFIG_CRYPTO_GCM=y +# CONFIG_CRYPTO_CHACHA20POLY1305 is not set +# CONFIG_CRYPTO_AEGIS128 is not set +CONFIG_CRYPTO_SEQIV=y +CONFIG_CRYPTO_ECHAINIV=y + +# +# Block modes +# +CONFIG_CRYPTO_CBC=y +# CONFIG_CRYPTO_CFB is not set +CONFIG_CRYPTO_CTR=y +# CONFIG_CRYPTO_CTS is not set +# CONFIG_CRYPTO_ECB is not set +# CONFIG_CRYPTO_LRW is not set +# CONFIG_CRYPTO_OFB is not set +# CONFIG_CRYPTO_PCBC is not set +# CONFIG_CRYPTO_XTS is not set +# CONFIG_CRYPTO_KEYWRAP is not set +# CONFIG_CRYPTO_ADIANTUM is not set +CONFIG_CRYPTO_ESSIV=m + +# +# Hash modes +# +# CONFIG_CRYPTO_CMAC is not set +CONFIG_CRYPTO_HMAC=y +# CONFIG_CRYPTO_XCBC is not set +# CONFIG_CRYPTO_VMAC is not set + +# +# Digest +# +CONFIG_CRYPTO_CRC32C=y +# CONFIG_CRYPTO_CRC32 is not set +# CONFIG_CRYPTO_XXHASH is not set +# CONFIG_CRYPTO_BLAKE2B is not set +# CONFIG_CRYPTO_BLAKE2S is not set +CONFIG_CRYPTO_CRCT10DIF=y +CONFIG_CRYPTO_GHASH=y +# CONFIG_CRYPTO_POLY1305 is not set +# CONFIG_CRYPTO_MD4 is not set +CONFIG_CRYPTO_MD5=y +# CONFIG_CRYPTO_MICHAEL_MIC is not set +# CONFIG_CRYPTO_RMD128 is not set +# CONFIG_CRYPTO_RMD160 is not set +# CONFIG_CRYPTO_RMD256 is not set +# CONFIG_CRYPTO_RMD320 is not set +CONFIG_CRYPTO_SHA1=y +CONFIG_CRYPTO_SHA256=y +# CONFIG_CRYPTO_SHA512 is not set +# CONFIG_CRYPTO_SHA3 is not set +# CONFIG_CRYPTO_SM3 is not set +# CONFIG_CRYPTO_STREEBOG is not set +# CONFIG_CRYPTO_TGR192 is not set +# CONFIG_CRYPTO_WP512 is not set + +# +# Ciphers +# +CONFIG_CRYPTO_AES=y +# CONFIG_CRYPTO_AES_TI is not set +# CONFIG_CRYPTO_BLOWFISH is not set +# CONFIG_CRYPTO_CAMELLIA is not set +# CONFIG_CRYPTO_CAST5 is not set +# CONFIG_CRYPTO_CAST6 is not set +CONFIG_CRYPTO_DES=y +# CONFIG_CRYPTO_FCRYPT is not set +# CONFIG_CRYPTO_SALSA20 is not set +# CONFIG_CRYPTO_CHACHA20 is not set +# CONFIG_CRYPTO_SERPENT is not set +# CONFIG_CRYPTO_SM4 is not set +# CONFIG_CRYPTO_TWOFISH is not set + +# +# Compression +# +CONFIG_CRYPTO_DEFLATE=y +CONFIG_CRYPTO_LZO=y +# CONFIG_CRYPTO_842 is not set +# CONFIG_CRYPTO_LZ4 is not set +# CONFIG_CRYPTO_LZ4HC is not set +# CONFIG_CRYPTO_ZSTD is not set + +# +# Random Number Generation +# +# CONFIG_CRYPTO_ANSI_CPRNG is not set +CONFIG_CRYPTO_DRBG_MENU=y +CONFIG_CRYPTO_DRBG_HMAC=y +# CONFIG_CRYPTO_DRBG_HASH is not set +# CONFIG_CRYPTO_DRBG_CTR is not set +CONFIG_CRYPTO_DRBG=y +CONFIG_CRYPTO_JITTERENTROPY=y +# CONFIG_CRYPTO_USER_API_HASH is not set +# CONFIG_CRYPTO_USER_API_SKCIPHER is not set +# CONFIG_CRYPTO_USER_API_RNG is not set +# CONFIG_CRYPTO_USER_API_AEAD is not set + +# +# Crypto library routines +# +CONFIG_CRYPTO_LIB_AES=y +# CONFIG_CRYPTO_LIB_BLAKE2S is not set +# CONFIG_CRYPTO_LIB_CHACHA is not set +# CONFIG_CRYPTO_LIB_CURVE25519 is not set +CONFIG_CRYPTO_LIB_DES=y +CONFIG_CRYPTO_LIB_POLY1305_RSIZE=1 +# CONFIG_CRYPTO_LIB_POLY1305 is not set +# CONFIG_CRYPTO_LIB_CHACHA20POLY1305 is not set +CONFIG_CRYPTO_LIB_SHA256=y +# CONFIG_CRYPTO_HW is not set +# CONFIG_ASYMMETRIC_KEY_TYPE is not set + +# +# Certificates for signature checking +# +# CONFIG_SYSTEM_BLACKLIST_KEYRING is not set +# CONFIG_PGP_PRELOAD_PUBLIC_KEYS is not set +# end of Certificates for signature checking + +# +# Library routines +# +CONFIG_RAID6_PQ=m +CONFIG_RAID6_PQ_BENCHMARK=y +# CONFIG_PACKING is not set +CONFIG_BITREVERSE=y +CONFIG_GENERIC_STRNCPY_FROM_USER=y +CONFIG_GENERIC_STRNLEN_USER=y +CONFIG_GENERIC_NET_UTILS=y +# CONFIG_CORDIC is not set +# CONFIG_PRIME_NUMBERS is not set +CONFIG_RATIONAL=y +CONFIG_GENERIC_PCI_IOMAP=y +CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y +# CONFIG_CRC_CCITT is not set +CONFIG_CRC16=y +CONFIG_CRC_T10DIF=y +CONFIG_CRC_ITU_T=y +CONFIG_CRC32=y +# CONFIG_CRC32_SELFTEST is not set +CONFIG_CRC32_SLICEBY8=y +# CONFIG_CRC32_SLICEBY4 is not set +# CONFIG_CRC32_SARWATE is not set +# CONFIG_CRC32_BIT is not set +CONFIG_CRC64=m +# CONFIG_CRC4 is not set +# CONFIG_CRC7 is not set +CONFIG_LIBCRC32C=y +# CONFIG_CRC8 is not set +CONFIG_XXHASH=y +# CONFIG_RANDOM32_SELFTEST is not set +CONFIG_ZLIB_INFLATE=y +CONFIG_ZLIB_DEFLATE=y +CONFIG_LZO_COMPRESS=y +CONFIG_LZO_DECOMPRESS=y +CONFIG_LZ4_DECOMPRESS=y +CONFIG_ZSTD_DECOMPRESS=y +CONFIG_XZ_DEC=y +CONFIG_XZ_DEC_X86=y +CONFIG_XZ_DEC_POWERPC=y +CONFIG_XZ_DEC_IA64=y +CONFIG_XZ_DEC_ARM=y +CONFIG_XZ_DEC_ARMTHUMB=y +CONFIG_XZ_DEC_SPARC=y +CONFIG_XZ_DEC_BCJ=y +# CONFIG_XZ_DEC_TEST is not set +CONFIG_DECOMPRESS_GZIP=y +CONFIG_DECOMPRESS_BZIP2=y +CONFIG_DECOMPRESS_LZMA=y +CONFIG_DECOMPRESS_XZ=y +CONFIG_DECOMPRESS_LZO=y +CONFIG_DECOMPRESS_LZ4=y +CONFIG_DECOMPRESS_ZSTD=y +CONFIG_GENERIC_ALLOCATOR=y +CONFIG_REED_SOLOMON=m +CONFIG_REED_SOLOMON_DEC8=y +CONFIG_TEXTSEARCH=y +CONFIG_TEXTSEARCH_KMP=m +CONFIG_TEXTSEARCH_BM=m +CONFIG_TEXTSEARCH_FSM=m +CONFIG_INTERVAL_TREE=y +CONFIG_XARRAY_MULTI=y +CONFIG_ASSOCIATIVE_ARRAY=y +CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT_MAP=y +CONFIG_HAS_DMA=y +CONFIG_DMA_OPS=y +CONFIG_ARCH_DMA_ADDR_T_64BIT=y +CONFIG_DMA_DECLARE_COHERENT=y +# CONFIG_DMA_API_DEBUG is not set +CONFIG_SGL_ALLOC=y +CONFIG_CPU_RMAP=y +CONFIG_DQL=y +CONFIG_GLOB=y +# CONFIG_GLOB_SELFTEST is not set +CONFIG_NLATTR=y +CONFIG_IRQ_POLL=y +CONFIG_DIMLIB=y +CONFIG_LIBFDT=y +CONFIG_OID_REGISTRY=y +CONFIG_UCS2_STRING=y +CONFIG_FONT_SUPPORT=y +# CONFIG_FONTS is not set +CONFIG_FONT_8x8=y +CONFIG_FONT_8x16=y +CONFIG_SG_POOL=y +CONFIG_SBITMAP=y +# CONFIG_STRING_SELFTEST is not set +# end of Library routines + +# +# Kernel hacking +# + +# +# printk and dmesg options +# +# CONFIG_PRINTK_TIME is not set +# CONFIG_PRINTK_CALLER is not set +CONFIG_CONSOLE_LOGLEVEL_DEFAULT=7 +CONFIG_CONSOLE_LOGLEVEL_QUIET=7 +CONFIG_MESSAGE_LOGLEVEL_DEFAULT=4 +# CONFIG_DYNAMIC_DEBUG is not set +# CONFIG_DYNAMIC_DEBUG_CORE is not set +CONFIG_SYMBOLIC_ERRNAME=y +# end of printk and dmesg options + +# +# Compile-time checks and compiler options +# +# CONFIG_DEBUG_INFO is not set +# CONFIG_ENABLE_MUST_CHECK is not set +CONFIG_FRAME_WARN=2048 +# CONFIG_STRIP_ASM_SYMS is not set +# CONFIG_READABLE_ASM is not set +# CONFIG_HEADERS_INSTALL is not set +# CONFIG_OPTIMIZE_INLINING is not set +# CONFIG_DEBUG_SECTION_MISMATCH is not set +CONFIG_SECTION_MISMATCH_WARN_ONLY=y +# CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_32B is not set +CONFIG_ARCH_WANT_FRAME_POINTERS=y +CONFIG_FRAME_POINTER=y +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set +# end of Compile-time checks and compiler options + +# +# Generic Kernel Debugging Instruments +# +# CONFIG_MAGIC_SYSRQ is not set +CONFIG_DEBUG_FS=y +CONFIG_DEBUG_FS_ALLOW_ALL=y +# CONFIG_DEBUG_FS_DISALLOW_MOUNT is not set +# CONFIG_DEBUG_FS_ALLOW_NONE is not set +CONFIG_HAVE_ARCH_KGDB=y +# CONFIG_KGDB is not set +# CONFIG_UBSAN is not set +# end of Generic Kernel Debugging Instruments + +CONFIG_DEBUG_KERNEL=y +CONFIG_DEBUG_MISC=y + +# +# Memory Debugging +# +# CONFIG_PAGE_EXTENSION is not set +# CONFIG_DEBUG_PAGEALLOC is not set +# CONFIG_PAGE_OWNER is not set +# CONFIG_PAGE_POISONING is not set +# CONFIG_DEBUG_OBJECTS is not set +# CONFIG_SLUB_DEBUG_ON is not set +# CONFIG_SLUB_STATS is not set +# CONFIG_DEBUG_STACK_USAGE is not set +# CONFIG_SCHED_STACK_END_CHECK is not set +# CONFIG_DEBUG_VM is not set +# CONFIG_DEBUG_MEMORY_INIT is not set +# CONFIG_DEBUG_PER_CPU_MAPS is not set +# end of Memory Debugging + +# CONFIG_DEBUG_SHIRQ is not set + +# +# Debug Oops, Lockups and Hangs +# +# CONFIG_PANIC_ON_OOPS is not set +CONFIG_PANIC_ON_OOPS_VALUE=0 +CONFIG_PANIC_TIMEOUT=0 +# CONFIG_SOFTLOCKUP_DETECTOR is not set +# CONFIG_DETECT_HUNG_TASK is not set +# CONFIG_WQ_WATCHDOG is not set +# CONFIG_TEST_LOCKUP is not set +# end of Debug Oops, Lockups and Hangs + +# +# Scheduler Debugging +# +CONFIG_SCHED_DEBUG=y +# CONFIG_SCHEDSTATS is not set +# end of Scheduler Debugging + +# CONFIG_DEBUG_TIMEKEEPING is not set + +# +# Lock Debugging (spinlocks, mutexes, etc...) +# +CONFIG_LOCK_DEBUGGING_SUPPORT=y +# CONFIG_PROVE_LOCKING is not set +# CONFIG_LOCK_STAT is not set +# CONFIG_DEBUG_RT_MUTEXES is not set +# CONFIG_DEBUG_SPINLOCK is not set +# CONFIG_DEBUG_MUTEXES is not set +# CONFIG_DEBUG_WW_MUTEX_SLOWPATH is not set +# CONFIG_DEBUG_RWSEMS is not set +# CONFIG_DEBUG_LOCK_ALLOC is not set +# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set +# CONFIG_LOCK_TORTURE_TEST is not set +# CONFIG_WW_MUTEX_SELFTEST is not set +# CONFIG_SCF_TORTURE_TEST is not set +# CONFIG_CSD_LOCK_WAIT_DEBUG is not set +# end of Lock Debugging (spinlocks, mutexes, etc...) + +CONFIG_STACKTRACE=y +# CONFIG_WARN_ALL_UNSEEDED_RANDOM is not set +# CONFIG_DEBUG_KOBJECT is not set + +# +# Debug kernel data structures +# +# CONFIG_DEBUG_LIST is not set +# CONFIG_DEBUG_PLIST is not set +# CONFIG_DEBUG_SG is not set +# CONFIG_DEBUG_NOTIFIERS is not set +# CONFIG_BUG_ON_DATA_CORRUPTION is not set +# end of Debug kernel data structures + +# CONFIG_DEBUG_CREDENTIALS is not set + +# +# RCU Debugging +# +# CONFIG_RCU_SCALE_TEST is not set +# CONFIG_RCU_TORTURE_TEST is not set +# CONFIG_RCU_REF_SCALE_TEST is not set +CONFIG_RCU_CPU_STALL_TIMEOUT=21 +# CONFIG_RCU_TRACE is not set +# CONFIG_RCU_EQS_DEBUG is not set +# end of RCU Debugging + +# CONFIG_DEBUG_WQ_FORCE_RR_CPU is not set +# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set +# CONFIG_CPU_HOTPLUG_STATE_CONTROL is not set +# CONFIG_LATENCYTOP is not set +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_HAVE_DYNAMIC_FTRACE=y +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y +CONFIG_HAVE_SYSCALL_TRACEPOINTS=y +CONFIG_HAVE_C_RECORDMCOUNT=y +CONFIG_TRACING_SUPPORT=y +# CONFIG_FTRACE is not set +# CONFIG_SAMPLES is not set + +# +# sw_64 Debugging +# +CONFIG_EARLY_PRINTK=y +# CONFIG_UNA_PRINT is not set +CONFIG_MATHEMU=y +CONFIG_STACKTRACE_SUPPORT=y +# CONFIG_SW64_RRU is not set +# CONFIG_SW64_RRK is not set +# end of sw_64 Debugging + +# +# Kernel Testing and Coverage +# +# CONFIG_KUNIT is not set +# CONFIG_NOTIFIER_ERROR_INJECTION is not set +# CONFIG_FAULT_INJECTION is not set +CONFIG_CC_HAS_SANCOV_TRACE_PC=y +CONFIG_RUNTIME_TESTING_MENU=y +# CONFIG_LKDTM is not set +# CONFIG_TEST_LIST_SORT is not set +# CONFIG_TEST_MIN_HEAP is not set +# CONFIG_TEST_SORT is not set +# CONFIG_BACKTRACE_SELF_TEST is not set +# CONFIG_RBTREE_TEST is not set +# CONFIG_REED_SOLOMON_TEST is not set +# CONFIG_INTERVAL_TREE_TEST is not set +# CONFIG_PERCPU_TEST is not set +# CONFIG_ATOMIC64_SELFTEST is not set +# CONFIG_ASYNC_RAID6_TEST is not set +# CONFIG_TEST_HEXDUMP is not set +# CONFIG_TEST_STRING_HELPERS is not set +# CONFIG_TEST_STRSCPY is not set +# CONFIG_TEST_KSTRTOX is not set +# CONFIG_TEST_PRINTF is not set +# CONFIG_TEST_BITMAP is not set +# CONFIG_TEST_UUID is not set +# CONFIG_TEST_XARRAY is not set +# CONFIG_TEST_OVERFLOW is not set +# CONFIG_TEST_RHASHTABLE is not set +# CONFIG_TEST_HASH is not set +# CONFIG_TEST_IDA is not set +# CONFIG_TEST_LKM is not set +# CONFIG_TEST_BITOPS is not set +# CONFIG_TEST_VMALLOC is not set +# CONFIG_TEST_USER_COPY is not set +# CONFIG_TEST_BPF is not set +# CONFIG_TEST_BLACKHOLE_DEV is not set +# CONFIG_FIND_BIT_BENCHMARK is not set +# CONFIG_TEST_FIRMWARE is not set +# CONFIG_TEST_SYSCTL is not set +# CONFIG_TEST_UDELAY is not set +# CONFIG_TEST_STATIC_KEYS is not set +# CONFIG_TEST_KMOD is not set +# CONFIG_TEST_MEMCAT_P is not set +# CONFIG_TEST_STACKINIT is not set +# CONFIG_TEST_MEMINIT is not set +# CONFIG_TEST_FREE_PAGES is not set +# CONFIG_MEMTEST is not set +# end of Kernel Testing and Coverage +# end of Kernel hacking + +CONFIG_KABI_SIZE_ALIGN_CHECKS=y +CONFIG_KABI_RESERVE=y diff --git a/arch/sw_64/defconfig b/arch/sw_64/defconfig deleted file mode 100644 index d641ca0c108aa4c590f393b64d8bbadf961a7b35..0000000000000000000000000000000000000000 --- a/arch/sw_64/defconfig +++ /dev/null @@ -1,73 +0,0 @@ -CONFIG_EXPERIMENTAL=y -CONFIG_SYSVIPC=y -CONFIG_POSIX_MQUEUE=y -CONFIG_LOG_BUF_SHIFT=14 -# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set -CONFIG_KALLSYMS_ALL=y -CONFIG_MODULES=y -CONFIG_MODULE_UNLOAD=y -CONFIG_VERBOSE_MCHECK=y -CONFIG_NET=y -CONFIG_PACKET=y -CONFIG_UNIX=y -CONFIG_XFRM_USER=m -CONFIG_NET_KEY=m -CONFIG_INET=y -CONFIG_IP_MULTICAST=y -CONFIG_INET_AH=m -CONFIG_INET_ESP=m -# CONFIG_IPV6 is not set -CONFIG_NETFILTER=y -CONFIG_IP_NF_IPTABLES=m -CONFIG_IP_NF_FILTER=m -CONFIG_VLAN_8021Q=m -CONFIG_PNP=y -CONFIG_ISAPNP=y -CONFIG_BLK_DEV_FD=y -CONFIG_BLK_DEV_LOOP=m -CONFIG_IDE=y -CONFIG_BLK_DEV_IDECD=y -CONFIG_IDE_GENERIC=y -CONFIG_BLK_DEV_GENERIC=y -CONFIG_BLK_DEV_ALI15X3=y -CONFIG_BLK_DEV_CMD64X=y -CONFIG_BLK_DEV_CY82C693=y -CONFIG_SCSI=y -CONFIG_BLK_DEV_SD=y -CONFIG_BLK_DEV_SR=y -CONFIG_BLK_DEV_SR_VENDOR=y -CONFIG_SCSI_AIC7XXX=m -CONFIG_AIC7XXX_CMDS_PER_DEVICE=253 -# CONFIG_AIC7XXX_DEBUG_ENABLE is not set -CONFIG_NETDEVICES=y -CONFIG_DUMMY=m -CONFIG_NET_ETHERNET=y -CONFIG_NET_VENDOR_3COM=y -CONFIG_VORTEX=y -CONFIG_NET_TULIP=y -CONFIG_DE2104X=m -CONFIG_TULIP=y -CONFIG_TULIP_MMIO=y -CONFIG_NET_PCI=y -CONFIG_YELLOWFIN=y -CONFIG_SERIAL_8250=y -CONFIG_SERIAL_8250_CONSOLE=y -CONFIG_RTC=y -CONFIG_EXT2_FS=y -CONFIG_REISERFS_FS=m -CONFIG_ISO9660_FS=y -CONFIG_MSDOS_FS=y -CONFIG_VFAT_FS=y -CONFIG_PROC_KCORE=y -CONFIG_TMPFS=y -CONFIG_NFS_FS=m -CONFIG_NFS_V3=y -CONFIG_NFSD=m -CONFIG_NFSD_V3=y -CONFIG_NLS_CODEPAGE_437=y -CONFIG_MAGIC_SYSRQ=y -CONFIG_DEBUG_KERNEL=y -CONFIG_DEBUG_INFO=y -CONFIG_SW64_LEGACY_START_ADDRESS=y -CONFIG_MATHEMU=y -CONFIG_CRYPTO_HMAC=y diff --git a/arch/sw_64/include/asm/Kbuild b/arch/sw_64/include/asm/Kbuild index ab266af1a06d6a7f0a535a57b4c617bba78e11be..d08f0b08918efb2a28d0caaa2eb420c9864e6bff 100644 --- a/arch/sw_64/include/asm/Kbuild +++ b/arch/sw_64/include/asm/Kbuild @@ -1,22 +1,17 @@ # SPDX-License-Identifier: GPL-2.0 -header-y += compiler.h -header-y += console.h -header-y += fpu.h -header-y += gentrap.h -header-y += hmcall.h -header-y += reg.h -header-y += regdef.h -header-y += sysinfo.h -header-y += page.h -header-y += elf.h -generated-y += syscall_table.h +generic-y += clkdev.h generic-y += export.h generic-y += kvm_types.h -generic-y += rwsem.h - +generic-y += local64.h +generic-y += mcs_spinlock.h +generic-y += param.h generic-y += qrwlock.h generic-y += qspinlock.h -generic-y += mcs_spinlock.h -generic-y += clkdev.h -generic-y += scatterlist.h +generic-y += rwsem.h +generic-y += seccomp.h +generic-y += segment.h +generic-y += types.h +generic-y += user.h + +generated-y += syscall_table.h diff --git a/arch/sw_64/include/asm/a.out-core.h b/arch/sw_64/include/asm/a.out-core.h deleted file mode 100644 index 39dc16142955da86a69b4ac2ffa37a707957d668..0000000000000000000000000000000000000000 --- a/arch/sw_64/include/asm/a.out-core.h +++ /dev/null @@ -1,80 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* a.out coredump register dumper - * - * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. - * Written by David Howells (dhowells@redhat.com) - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public Licence - * as published by the Free Software Foundation; either version - * 2 of the Licence, or (at your option) any later version. - */ - -#ifndef _ASM_SW64_OUT_CORE_H -#define _ASM_SW64_OUT_CORE_H - -#ifdef __KERNEL__ - -#include - -/* - * Fill in the user structure for an ECOFF core dump. - */ -static inline void aout_dump_thread(struct pt_regs *pt, struct user *dump) -{ - /* switch stack follows right below pt_regs: */ - struct switch_stack *sw = ((struct switch_stack *) pt) - 1; - - dump->magic = CMAGIC; - dump->start_code = current->mm->start_code; - dump->start_data = current->mm->start_data; - dump->start_stack = rdusp() & ~(PAGE_SIZE - 1); - dump->u_tsize = ((current->mm->end_code - dump->start_code) - >> PAGE_SHIFT); - dump->u_dsize = ((current->mm->brk + PAGE_SIZE - 1 - dump->start_data) - >> PAGE_SHIFT); - dump->u_ssize = (current->mm->start_stack - dump->start_stack - + PAGE_SIZE - 1) >> PAGE_SHIFT; - - /* - * We store the registers in an order/format that makes life easier - * for gdb. - */ - dump->regs[EF_V0] = pt->r0; - dump->regs[EF_T0] = pt->r1; - dump->regs[EF_T1] = pt->r2; - dump->regs[EF_T2] = pt->r3; - dump->regs[EF_T3] = pt->r4; - dump->regs[EF_T4] = pt->r5; - dump->regs[EF_T5] = pt->r6; - dump->regs[EF_T6] = pt->r7; - dump->regs[EF_T7] = pt->r8; - dump->regs[EF_S0] = sw->r9; - dump->regs[EF_S1] = sw->r10; - dump->regs[EF_S2] = sw->r11; - dump->regs[EF_S3] = sw->r12; - dump->regs[EF_S4] = sw->r13; - dump->regs[EF_S5] = sw->r14; - dump->regs[EF_S6] = sw->r15; - dump->regs[EF_A3] = pt->r19; - dump->regs[EF_A4] = pt->r20; - dump->regs[EF_A5] = pt->r21; - dump->regs[EF_T8] = pt->r22; - dump->regs[EF_T9] = pt->r23; - dump->regs[EF_T10] = pt->r24; - dump->regs[EF_T11] = pt->r25; - dump->regs[EF_RA] = pt->r26; - dump->regs[EF_T12] = pt->r27; - dump->regs[EF_AT] = pt->r28; - dump->regs[EF_SP] = rdusp(); - dump->regs[EF_PS] = pt->ps; - dump->regs[EF_PC] = pt->pc; - dump->regs[EF_GP] = pt->gp; - dump->regs[EF_A0] = pt->r16; - dump->regs[EF_A1] = pt->r17; - dump->regs[EF_A2] = pt->r18; - memcpy((char *)dump->regs + EF_SIZE, sw->fp, 32 * 8); -} - -#endif /* __KERNEL__ */ -#endif /* _ASM_SW64_OUT_CORE_H */ diff --git a/arch/sw_64/include/asm/a.out.h b/arch/sw_64/include/asm/a.out.h deleted file mode 100644 index 4f2004a7fa8e0d71e75673ee1621210677d58d64..0000000000000000000000000000000000000000 --- a/arch/sw_64/include/asm/a.out.h +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _ASM_SW64_A_OUT_H -#define _ASM_SW64_A_OUT_H - -#include - -/* Assume that start addresses below 4G belong to a TASO application. - * Unfortunately, there is no proper bit in the exec header to check. - * Worse, we have to notice the start address before swapping to use - * /sbin/loader, which of course is _not_ a TASO application. - */ -#define SET_AOUT_PERSONALITY(BFPM, EX) \ - set_personality(((BFPM->taso || EX.ah.entry < 0x100000000L \ - ? ADDR_LIMIT_32BIT : 0) | PER_OSF4)) - -#endif /* _ASM_SW64_A_OUT_H */ diff --git a/arch/sw_64/include/asm/agp.h b/arch/sw_64/include/asm/agp.h deleted file mode 100644 index e9d16888910ee56c4413dabc1b6b4c60cc9b9df4..0000000000000000000000000000000000000000 --- a/arch/sw_64/include/asm/agp.h +++ /dev/null @@ -1,19 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _ASM_SW64_AGP_H -#define _ASM_SW64_AGP_H 1 - -#include - -/* dummy for now */ - -#define map_page_into_agp(page) -#define unmap_page_from_agp(page) -#define flush_agp_cache() mb() - -/* GATT allocation. Returns/accepts GATT kernel virtual address. */ -#define alloc_gatt_pages(order) \ - ((char *)__get_free_pages(GFP_KERNEL, (order))) -#define free_gatt_pages(table, order) \ - free_pages((unsigned long)(table), (order)) - -#endif diff --git a/arch/sw_64/include/asm/asm-prototypes.h b/arch/sw_64/include/asm/asm-prototypes.h index 21f4f494d74d93e8fa80e0cb4cc26bc41a711db2..15bad8ef6883027ff7502a8e362e7b936dbc8d4b 100644 --- a/arch/sw_64/include/asm/asm-prototypes.h +++ b/arch/sw_64/include/asm/asm-prototypes.h @@ -4,7 +4,6 @@ #include #include -#include #include #include #include diff --git a/arch/sw_64/include/asm/cache.h b/arch/sw_64/include/asm/cache.h index a59a74110884288b0e1364f8e1065581173893ad..1dca2e2e04a4360c28b2a68fdccd6eac011001b9 100644 --- a/arch/sw_64/include/asm/cache.h +++ b/arch/sw_64/include/asm/cache.h @@ -5,9 +5,7 @@ #ifndef _ASM_SW64_CACHE_H #define _ASM_SW64_CACHE_H -#define L1_CACHE_BYTES 128 #define L1_CACHE_SHIFT 7 - -#define SMP_CACHE_BYTES L1_CACHE_BYTES +#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) #endif diff --git a/arch/sw_64/include/asm/checksum.h b/arch/sw_64/include/asm/checksum.h index 0bb933350dc67b3a62d5e4adef7cff3ed7213ced..284c1678f51ea084f71ee6a3747ce49854c512eb 100644 --- a/arch/sw_64/include/asm/checksum.h +++ b/arch/sw_64/include/asm/checksum.h @@ -4,9 +4,33 @@ #include +#define extll(x, y, z) \ + ({__asm__ __volatile__("extll %1, %2, %0" : "=r" (z) \ + : "r" (x), "r" (y)); }) + +#define exthl(x, y, z) \ + ({__asm__ __volatile__("exthl %1, %2, %0" : "=r" (z) \ + : "r" (x), "r" (y)); }) + +#define maskll(x, y, z) \ + ({__asm__ __volatile__("maskll %1, %2, %0" : "=r" (z) \ + : "r" (x), "r" (y)); }) + +#define maskhl(x, y, z) \ + ({__asm__ __volatile__("maskhl %1, %2, %0" : "=r" (z) \ + : "r" (x), "r" (y)); }) + +#define insll(x, y, z) \ + ({__asm__ __volatile__("insll %1, %2, %0" : "=r" (z) \ + : "r" (x), "r" (y)); }) + +#define inshl(x, y, z) \ + ({__asm__ __volatile__("inshl %1, %2, %0" : "=r" (z) \ + : "r" (x), "r" (y)); }) + /* - * This is a version of ip_compute_csum() optimized for IP headers, - * which always checksum on 4 octet boundaries. + * This is a version of ip_compute_csum() optimized for IP headers, + * which always checksum on 4 octet boundaries. */ extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl); @@ -55,7 +79,7 @@ __wsum csum_partial_copy_nocheck(const void *src, void *dst, int len); extern __sum16 ip_compute_csum(const void *buff, int len); /* - * Fold a partial checksum without adding pseudo headers + * Fold a partial checksum without adding pseudo headers */ static inline __sum16 csum_fold(__wsum csum) @@ -71,4 +95,32 @@ static inline __sum16 csum_fold(__wsum csum) extern __sum16 csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr, __u32 len, __u8 proto, __wsum sum); + +static inline unsigned short from64to16(unsigned long x) +{ + /* + * Using extract instructions is a bit more efficient + * than the original shift/bitmask version. + */ + + union { + unsigned long ul; + unsigned int ui[2]; + unsigned short us[4]; + } in_v, tmp_v, out_v; + + in_v.ul = x; + tmp_v.ul = (unsigned long)in_v.ui[0] + (unsigned long)in_v.ui[1]; + + /* + * Since the bits of tmp_v.sh[3] are going to always be zero, + * we don't have to bother to add that in. + */ + out_v.ul = (unsigned long)tmp_v.us[0] + (unsigned long)tmp_v.us[1] + + (unsigned long)tmp_v.us[2]; + + /* Similarly, out_v.us[2] is always zero for the final add. */ + return out_v.us[0] + out_v.us[1]; +} + #endif diff --git a/arch/sw_64/include/asm/chip3_io.h b/arch/sw_64/include/asm/chip3_io.h index 1028842f7a817bce1be5b4ab67870384aac72826..14d02c080607403805e357207ec8fa1de9eb6376 100644 --- a/arch/sw_64/include/asm/chip3_io.h +++ b/arch/sw_64/include/asm/chip3_io.h @@ -19,7 +19,6 @@ #define PCI_LEGACY_IO (0x1UL << 32) #define PCI_LEGACY_IO_SIZE (0x100000000UL) #define PCI_MEM_UNPRE 0x0UL -#define PCI_32BIT_VT_MEMIO (0xc0000000UL) #define PCI_32BIT_MEMIO (0xe0000000UL) #define PCI_32BIT_MEMIO_SIZE (0x20000000UL) #define PCI_64BIT_MEMIO (0x1UL << 39) @@ -70,6 +69,9 @@ #define DLI_PHY_CTL (0x10UL << 24) #define PCI_VT_LEGACY_IO (IO_BASE | PCI_BASE | PCI_LEGACY_IO) +#define PME_ENABLE_INTD_CORE0 (0x1UL << 62 | 0x1UL << 10) +#define AER_ENABLE_INTD_CORE0 (0x1UL << 62 | 0x1UL << 10) + /*-----------------------addr-----------------------*/ /* CAB0 REG */ enum { diff --git a/arch/sw_64/include/asm/clock.h b/arch/sw_64/include/asm/clock.h new file mode 100644 index 0000000000000000000000000000000000000000..06ad4bcd6ad3f2599a15a8acfc18dd08eb3685e8 --- /dev/null +++ b/arch/sw_64/include/asm/clock.h @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _ASM_SW64_CLOCK_H +#define _ASM_SW64_CLOCK_H + +#include +#include +#include +#include +#include + +struct clk; + +extern struct cpufreq_frequency_table sw64_clockmod_table[]; + +extern char curruent_policy[CPUFREQ_NAME_LEN]; + +struct clk_ops { + void (*init)(struct clk *clk); + void (*enable)(struct clk *clk); + void (*disable)(struct clk *clk); + void (*recalc)(struct clk *clk); + int (*set_rate)(struct clk *clk, unsigned long rate, int algo_id); + long (*round_rate)(struct clk *clk, unsigned long rate); +}; + +struct clk { + struct list_head node; + const char *name; + int id; + struct module *owner; + + struct clk *parent; + const struct clk_ops *ops; + + struct kref kref; + + unsigned long rate; + unsigned long flags; +}; + +#define CLK_ALWAYS_ENABLED (1 << 0) +#define CLK_RATE_PROPAGATES (1 << 1) + +int clk_init(void); + +int sw64_set_rate(int index, unsigned long rate); + +struct clk *sw64_clk_get(struct device *dev, const char *id); + +unsigned long sw64_clk_get_rate(struct clk *clk); + +void sw64_update_clockevents(unsigned long cpu, u32 freq); + +void sw64_store_policy(struct cpufreq_policy *policy); +#endif /* _ASM_SW64_CLOCK_H */ diff --git a/arch/sw_64/include/asm/compiler.h b/arch/sw_64/include/asm/compiler.h deleted file mode 100644 index 9a80aa6a0ba88d8082fe60ef6aa12d67fcf51fe2..0000000000000000000000000000000000000000 --- a/arch/sw_64/include/asm/compiler.h +++ /dev/null @@ -1,7 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _ASM_SW64_COMPILER_H -#define _ASM_SW64_COMPILER_H - -#include - -#endif /* _ASM_SW64_COMPILER_H */ diff --git a/arch/sw_64/include/asm/console.h b/arch/sw_64/include/asm/console.h deleted file mode 100644 index 0c01cb740bce139bac7a31f1220e52273b244cf2..0000000000000000000000000000000000000000 --- a/arch/sw_64/include/asm/console.h +++ /dev/null @@ -1,11 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _ASM_SW64_CONSOLE_H -#define _ASM_SW64_CONSOLE_H - -#include -#ifndef __ASSEMBLY__ -struct crb_struct; -extern int callback_init_done; -extern void callback_init(void); -#endif /* __ASSEMBLY__ */ -#endif /* _ASM_SW64_CONSOLE_H */ diff --git a/arch/sw_64/include/asm/cputime.h b/arch/sw_64/include/asm/cputime.h index bada5a01d887b7aa58bf9c1349197bc5ba206934..cdd46b05e22840bbbe033ca200951269afa0b98f 100644 --- a/arch/sw_64/include/asm/cputime.h +++ b/arch/sw_64/include/asm/cputime.h @@ -2,6 +2,8 @@ #ifndef _ASM_SW64_CPUTIME_H #define _ASM_SW64_CPUTIME_H -#include +typedef u64 __nocast cputime64_t; + +#define jiffies64_to_cputime64(__jif) ((__force cputime64_t)(__jif)) #endif /* _ASM_SW64_CPUTIME_H */ diff --git a/arch/sw_64/include/asm/div64.h b/arch/sw_64/include/asm/div64.h deleted file mode 100644 index 306581407ba50d51632f1cd99b7a3d0006ab0e0b..0000000000000000000000000000000000000000 --- a/arch/sw_64/include/asm/div64.h +++ /dev/null @@ -1,7 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _ASM_SW64_DIV64_H -#define _ASM_SW64_DIV64_H - -#include - -#endif diff --git a/arch/sw_64/include/asm/early_ioremap.h b/arch/sw_64/include/asm/early_ioremap.h index 6f6fc6218cb38562da1469cc677687ac1eddf634..930c6bf36ad3c69a0be9dbe175649fc17d780d84 100644 --- a/arch/sw_64/include/asm/early_ioremap.h +++ b/arch/sw_64/include/asm/early_ioremap.h @@ -14,7 +14,7 @@ early_ioremap(unsigned long phys_addr, unsigned long size) y = (unsigned long) phys_to_virt(__pa(phys_addr)); } else { y = phys_addr; - y += PAGE_OFFSET; + y |= PAGE_OFFSET; } return (void __iomem *) y; diff --git a/arch/sw_64/include/asm/elf.h b/arch/sw_64/include/asm/elf.h index 150629b0b615b57194f64745443cebbdd3b09ca1..8c858cff5573955714ad9785ca7e9f983e046aaf 100644 --- a/arch/sw_64/include/asm/elf.h +++ b/arch/sw_64/include/asm/elf.h @@ -3,7 +3,6 @@ #define _ASM_SW64_ELF_H #ifdef __KERNEL__ #include -#include #endif /* Special values for the st_other field in the symbol table. */ @@ -56,23 +55,18 @@ #define EF_SW64_32BIT 1 /* All addresses are below 2GB */ /* - * ELF register definitions.. - */ - -/* - * The legacy version of makes gregset_t 46 entries long. - * I have no idea why that is so. For now, we just leave it at 33 - * (32 general regs + processor status word). + * ELF register definitions. + * + * For now, we just leave it at 33 (32 general regs + processor status word). */ #define ELF_NGREG 33 -#define ELF_NFPREG 32 - typedef unsigned long elf_greg_t; typedef elf_greg_t elf_gregset_t[ELF_NGREG]; -typedef double elf_fpreg_t; -typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG]; +/* Same with user_fpsimd_state */ +#include +typedef struct user_fpsimd_state elf_fpregset_t; /* * This is used to ensure we don't load something for the wrong architecture. @@ -122,30 +116,16 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm, #ifdef __KERNEL__ struct pt_regs; -struct thread_info; struct task_struct; -extern void dump_elf_thread(elf_greg_t *dest, struct pt_regs *pt, - struct thread_info *ti); -#define ELF_CORE_COPY_REGS(DEST, REGS) \ - dump_elf_thread(DEST, REGS, current_thread_info()); - -/* Similar, but for a thread other than current. */ - -extern int dump_elf_task(elf_greg_t *dest, struct task_struct *task); -#define ELF_CORE_COPY_TASK_REGS(TASK, DEST) dump_elf_task(*(DEST), TASK) - -/* Similar, but for the FP registers. */ - -extern int dump_elf_task_fp(elf_fpreg_t *dest, struct task_struct *task); -#define ELF_CORE_COPY_FPREGS(TASK, DEST) dump_elf_task_fp(*(DEST), TASK) +extern void sw64_elf_core_copy_regs(elf_greg_t *dest, struct pt_regs *pt); +#define ELF_CORE_COPY_REGS(DEST, REGS) sw64_elf_core_copy_regs(DEST, REGS); /* * This yields a mask that user programs can use to figure out what - * instruction set this CPU supports. This is trivial on SW-64, - * but not so on other machines. + * instruction set this CPU supports. */ -#define ELF_HWCAP (~amask(-1)) +#define ELF_HWCAP 0 /* * This yields a string that ld.so will use to load implementation diff --git a/arch/sw_64/include/asm/emergency-restart.h b/arch/sw_64/include/asm/emergency-restart.h deleted file mode 100644 index fabb33ebf0ebc1d50c73644eb327948539039894..0000000000000000000000000000000000000000 --- a/arch/sw_64/include/asm/emergency-restart.h +++ /dev/null @@ -1,7 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _ASM_SW64_EMERGENCY_RESTART_H -#define _ASM_SW64_EMERGENCY_RESTART_H - -#include - -#endif /* _ASM_SW64_EMERGENCY_RESTART_H */ diff --git a/arch/sw_64/include/asm/exec.h b/arch/sw_64/include/asm/exec.h deleted file mode 100644 index 4a9cb71c5c4799ce10284d6c87b0727a8926d873..0000000000000000000000000000000000000000 --- a/arch/sw_64/include/asm/exec.h +++ /dev/null @@ -1,7 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _ASM_SW64_EXEC_H -#define _ASM_SW64_EXEC_H - -#define arch_align_stack(x) (x) - -#endif /* _ASM_SW64_EXEC_H */ diff --git a/arch/sw_64/include/asm/extable.h b/arch/sw_64/include/asm/extable.h index 12b50b68a0d2ab381a036557ef5984cc4db23e11..ae753772a45a9b12e8c4476b9e78be7921371822 100644 --- a/arch/sw_64/include/asm/extable.h +++ b/arch/sw_64/include/asm/extable.h @@ -52,4 +52,8 @@ struct exception_table_entry { (b)->fixup.unit = (tmp).fixup.unit; \ } while (0) +/* Macro for exception fixup code to access integer registers. */ +extern short regoffsets[]; +#define map_regs(r) (*(unsigned long *)((char *)regs + regoffsets[r])) + #endif diff --git a/arch/sw_64/include/asm/floppy.h b/arch/sw_64/include/asm/floppy.h deleted file mode 100644 index f4646d99d80cb70a234f713583691f61a4a1ed6e..0000000000000000000000000000000000000000 --- a/arch/sw_64/include/asm/floppy.h +++ /dev/null @@ -1,116 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Architecture specific parts of the Floppy driver - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 1995 - */ -#ifndef _ASM_SW64_FLOPPY_H -#define _ASM_SW64_FLOPPY_H - -#define fd_inb(port) inb_p(port) -#define fd_outb(value, port) outb_p(value, port) - -#define fd_enable_dma() enable_dma(FLOPPY_DMA) -#define fd_disable_dma() disable_dma(FLOPPY_DMA) -#define fd_request_dma() request_dma(FLOPPY_DMA, "floppy") -#define fd_free_dma() free_dma(FLOPPY_DMA) -#define fd_clear_dma_ff() clear_dma_ff(FLOPPY_DMA) -#define fd_set_dma_mode(mode) set_dma_mode(FLOPPY_DMA, mode) -#define fd_set_dma_addr(addr) set_dma_addr(FLOPPY_DMA, virt_to_bus(addr)) -#define fd_set_dma_count(count) set_dma_count(FLOPPY_DMA, count) -#define fd_enable_irq() enable_irq(FLOPPY_IRQ) -#define fd_disable_irq() disable_irq(FLOPPY_IRQ) -#define fd_cacheflush(addr, size) /* nothing */ -#define fd_request_irq() \ - request_irq(FLOPPY_IRQ, floppy_interrupt, 0, "floppy", NULL) -#define fd_free_irq() free_irq(FLOPPY_IRQ, NULL) - -#ifdef CONFIG_PCI - -#include - -#define fd_dma_setup(addr, size, mode, io) \ - sw64_fd_dma_setup(addr, size, mode, io) - -static inline int -sw64_fd_dma_setup(char *addr, unsigned long size, int mode, int io) -{ - static unsigned long prev_size; - static dma_addr_t bus_addr; - static char *prev_addr; - static int prev_dir; - int dir; - - dir = (mode != DMA_MODE_READ) ? PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE; - - if (bus_addr - && (addr != prev_addr || size != prev_size || dir != prev_dir)) { - /* different from last time -- unmap prev */ - bus_addr = 0; - } - - if (!bus_addr) /* need to map it */ - bus_addr = virt_to_bus(addr); - - /* remember this one as prev */ - prev_addr = addr; - prev_size = size; - prev_dir = dir; - - fd_clear_dma_ff(); - fd_cacheflush(addr, size); - fd_set_dma_mode(mode); - set_dma_addr(FLOPPY_DMA, bus_addr); - fd_set_dma_count(size); - virtual_dma_port = io; - fd_enable_dma(); - - return 0; -} - -#endif /* CONFIG_PCI */ - -inline void virtual_dma_init(void) -{ - /* Nothing to do on an sw64 */ -} - -static int FDC1 = 0x3f0; -static int FDC2 = -1; - -/* - * Again, the CMOS information doesn't work on the sw64.. - */ -#define FLOPPY0_TYPE 6 -#define FLOPPY1_TYPE 0 - -#define N_FDC 2 -#define N_DRIVE 8 - -/* - * Most sw64s have no problems with floppy DMA crossing 64k borders, - * except for certain ones, like XL and RUFFIAN. - * - * However, the test is simple and fast, and this *is* floppy, after all, - * so we do it for all platforms, just to make sure. - * - * This is advantageous in other circumstances as well, as in moving - * about the PCI DMA windows and forcing the floppy to start doing - * scatter-gather when it never had before, and there *is* a problem - * on that platform... ;-} - */ - -static inline unsigned long CROSS_64KB(void *a, unsigned long s) -{ - unsigned long p = (unsigned long)a; - - return ((p + s - 1) ^ p) & ~0xffffUL; -} - -#define EXTRA_FLOPPY_PARAMS - -#endif /* __ASM_SW64_FLOPPY_H */ diff --git a/arch/sw_64/include/asm/hcall.h b/arch/sw_64/include/asm/hcall.h index 8117752b657e9c8f1daa306480aa048da389ddf9..b5438b477c87113db265f0fd7dd51b3a61f53bf4 100644 --- a/arch/sw_64/include/asm/hcall.h +++ b/arch/sw_64/include/asm/hcall.h @@ -18,6 +18,7 @@ enum HCALL_TYPE { HCALL_SWNET = 20, /* guest request swnet service */ HCALL_SWNET_IRQ = 21, /* guest request swnet intr */ HCALL_FATAL_ERROR = 22, /* guest fatal error, issued by hmcode */ + HCALL_MEMHOTPLUG = 23, /* guest memory hotplug event */ NR_HCALL }; diff --git a/arch/sw_64/include/asm/hw_init.h b/arch/sw_64/include/asm/hw_init.h index 9a56590ef653e4d7107f0f581da5e25d1187f455..f60a58570a9219c5d796d0383843b6c62d1eb93b 100644 --- a/arch/sw_64/include/asm/hw_init.h +++ b/arch/sw_64/include/asm/hw_init.h @@ -2,6 +2,7 @@ #ifndef _ASM_SW64_HW_INIT_H #define _ASM_SW64_HW_INIT_H #include +#include #define MMSIZE __va(0x2040) @@ -96,26 +97,17 @@ static inline bool icache_is_vivt_no_ictag(void) return (cpu_desc.arch_var == 0x3 && cpu_desc.arch_rev == 0x1); } -enum RUNMODE { - HOST_MODE = 0, - GUEST_MODE = 1, - EMUL_MODE = 2, -}; - -static inline bool is_in_host(void) -{ - return !cpu_desc.run_mode; -} +#define EMUL_FLAG (0x1UL << 63) +#define MMSIZE_MASK (EMUL_FLAG - 1) -static inline bool is_in_guest(void) -{ - return cpu_desc.run_mode == GUEST_MODE; -} +DECLARE_STATIC_KEY_TRUE(run_mode_host_key); +DECLARE_STATIC_KEY_FALSE(run_mode_guest_key); +DECLARE_STATIC_KEY_FALSE(run_mode_emul_key); -static inline bool is_guest_or_emul(void) -{ - return !!cpu_desc.run_mode; -} +#define is_in_host() static_branch_likely(&run_mode_host_key) +#define is_in_guest() static_branch_unlikely(&run_mode_guest_key) +#define is_in_emul() static_branch_unlikely(&run_mode_emul_key) +#define is_guest_or_emul() !static_branch_likely(&run_mode_host_key) #define CPU_SW3231 0x31 #define CPU_SW831 0x32 @@ -176,5 +168,6 @@ static inline bool is_guest_or_emul(void) #define CACHE_INDEX_BITS_MASK (0x3fUL << CACHE_INDEX_BITS_SHIFT) #define CACHE_INDEX_BITS(val) \ (((val) & CACHE_INDEX_BITS_MASK) >> CACHE_INDEX_BITS_SHIFT) +#define current_cpu_data cpu_data[smp_processor_id()] #endif /* HW_INIT_H */ diff --git a/arch/sw_64/include/asm/io.h b/arch/sw_64/include/asm/io.h index 6796c64f94ae71791ebd0c9276ae9940716dd29a..fc11cb82f5b5742dd5ee7f3ad2acd49a2f694571 100644 --- a/arch/sw_64/include/asm/io.h +++ b/arch/sw_64/include/asm/io.h @@ -64,15 +64,6 @@ static inline void * __deprecated bus_to_virt(unsigned long address) } #define isa_bus_to_virt bus_to_virt -/* - * There are different chipsets to interface the sw64 CPUs to the world. - */ - -#define IO_CONCAT(a, b) _IO_CONCAT(a, b) -#define _IO_CONCAT(a, b) a ## _ ## b - -#include - /* * Generic IO read/write. These perform native-endian accesses. */ @@ -184,14 +175,6 @@ extern void outb(u8 b, unsigned long port); extern void outw(u16 b, unsigned long port); extern void outl(u32 b, unsigned long port); -/* - * Mapping from port numbers to __iomem space is pretty easy. - */ -static inline void __iomem *ioportmap(unsigned long addr) -{ - return sw64_platform->ioportmap(addr); -} - static inline void __iomem *__ioremap(phys_addr_t addr, size_t size, pgprot_t prot) { @@ -211,22 +194,6 @@ static inline void __iounmap(volatile void __iomem *addr) #define iounmap __iounmap -static inline int __is_ioaddr(unsigned long addr) -{ - return addr >= (PAGE_OFFSET | IO_BASE); -} - -#define __is_ioaddr(a) __is_ioaddr((unsigned long)(a)) - -static inline int __is_mmio(const volatile void __iomem *xaddr) -{ - unsigned long addr = (unsigned long)xaddr; - - return (addr & 0x100000000UL) == 0; -} - - - #define ioread16be(p) be16_to_cpu(ioread16(p)) #define ioread32be(p) be32_to_cpu(ioread32(p)) #define iowrite16be(v, p) iowrite16(cpu_to_be16(v), (p)) diff --git a/arch/sw_64/include/asm/irq_impl.h b/arch/sw_64/include/asm/irq_impl.h index 7132670771426e5ab040e68c2719d4aaed49f751..b568efef699487405884b82789c61f69b216371c 100644 --- a/arch/sw_64/include/asm/irq_impl.h +++ b/arch/sw_64/include/asm/irq_impl.h @@ -11,6 +11,8 @@ #include #include +#include + #define SW64_PCIE0_INT_BASE 17 #define SW64_PCIE0_MSI_BASE 21 @@ -30,6 +32,7 @@ enum sw64_irq_type { INT_RTC = 9, INT_FAULT = 10, INT_VT_SERIAL = 12, + INT_VT_HOTPLUG = 13, INT_DEV = 17, INT_NMI = 18, INT_LEGACY = 31, diff --git a/arch/sw_64/include/asm/irq_regs.h b/arch/sw_64/include/asm/irq_regs.h deleted file mode 100644 index bba48f36a40fbf9af5878b48d4b854b255afa9d0..0000000000000000000000000000000000000000 --- a/arch/sw_64/include/asm/irq_regs.h +++ /dev/null @@ -1,7 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _ASM_SW64_IRQ_REGS_H -#define _ASM_SW64_IRQ_REGS_H - -#include - -#endif diff --git a/arch/sw_64/include/asm/irqflags.h b/arch/sw_64/include/asm/irqflags.h index 6101b6ad2e99d6777b616bcf01e58763260d34a2..b4440f25a51d622402198e1239bcac10908ee5a5 100644 --- a/arch/sw_64/include/asm/irqflags.h +++ b/arch/sw_64/include/asm/irqflags.h @@ -5,14 +5,6 @@ #include #define IPL_MIN 0 -#define IPL_SW0 1 -#define IPL_SW1 2 -#define IPL_DEV0 3 -#define IPL_DEV1 4 -#define IPL_TIMER 5 -#define IPL_PERF 6 -#define IPL_POWERFAIL 6 -#define IPL_MCHECK 7 #define IPL_MAX 7 #define getipl() (rdps() & 7) diff --git a/arch/sw_64/include/asm/kmap_types.h b/arch/sw_64/include/asm/kmap_types.h deleted file mode 100644 index 8e86b08dee9470031fa3a251c8806920a9465705..0000000000000000000000000000000000000000 --- a/arch/sw_64/include/asm/kmap_types.h +++ /dev/null @@ -1,15 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _ASM_SW64_KMAP_TYPES_H -#define _ASM_SW64_KMAP_TYPES_H - -/* Dummy header just to define km_type. */ - -#ifdef CONFIG_DEBUG_HIGHMEM -#define __WITH_KM_FENCE -#endif - -#include - -#undef __WITH_KM_FENCE - -#endif diff --git a/arch/sw_64/include/asm/kvm_asm.h b/arch/sw_64/include/asm/kvm_asm.h index 4b851682188c8af841b2c1e34379246f3d5d0b8b..7e2c92ed45749d0defdb2771d98a6e431bfbb0f3 100644 --- a/arch/sw_64/include/asm/kvm_asm.h +++ b/arch/sw_64/include/asm/kvm_asm.h @@ -11,4 +11,7 @@ #define SW64_KVM_EXIT_RESTART 17 #define SW64_KVM_EXIT_FATAL_ERROR 22 +#ifdef CONFIG_KVM_MEMHOTPLUG +#define SW64_KVM_EXIT_MEMHOTPLUG 23 +#endif #endif /* _ASM_SW64_KVM_ASM_H */ diff --git a/arch/sw_64/include/asm/kvm_host.h b/arch/sw_64/include/asm/kvm_host.h index 913a2e9789c11c4764a934e68a55469c35af5417..6d292c0863478e6d2bef2d293280fe9ec4aa3cbe 100644 --- a/arch/sw_64/include/asm/kvm_host.h +++ b/arch/sw_64/include/asm/kvm_host.h @@ -29,7 +29,7 @@ #include #define KVM_MAX_VCPUS 64 -#define KVM_USER_MEM_SLOTS 512 +#define KVM_USER_MEM_SLOTS 64 #define KVM_HALT_POLL_NS_DEFAULT 0 #define KVM_IRQCHIP_NUM_PINS 256 @@ -42,11 +42,16 @@ #define KVM_PAGES_PER_HPAGE(x) (KVM_HPAGE_SIZE(x) / PAGE_SIZE) struct kvm_arch_memory_slot { - + unsigned long host_phys_addr; + bool valid; }; struct kvm_arch { - struct swvm_mem mem; + unsigned long host_phys_addr; + unsigned long size; + + /* segment table */ + unsigned long *seg_pgd; }; @@ -99,6 +104,9 @@ struct kvm_vcpu_stat { u64 halt_poll_invalid; }; +#ifdef CONFIG_KVM_MEMHOTPLUG +void vcpu_mem_hotplug(struct kvm_vcpu *vcpu, unsigned long start_addr); +#endif int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, int exception_index, struct hcall_args *hargs); void vcpu_send_ipi(struct kvm_vcpu *vcpu, int target_vcpuid); diff --git a/arch/sw_64/include/asm/local.h b/arch/sw_64/include/asm/local.h deleted file mode 100644 index 9144600f641d47d67bdc1dc0304eaacd16d3f0f6..0000000000000000000000000000000000000000 --- a/arch/sw_64/include/asm/local.h +++ /dev/null @@ -1,125 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _ASM_SW64_LOCAL_H -#define _ASM_SW64_LOCAL_H - -#include -#include - -typedef struct { - atomic_long_t a; -} local_t; - -#define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) } -#define local_read(l) atomic_long_read(&(l)->a) -#define local_set(l, i) atomic_long_set(&(l)->a, (i)) -#define local_inc(l) atomic_long_inc(&(l)->a) -#define local_dec(l) atomic_long_dec(&(l)->a) -#define local_add(i, l) atomic_long_add((i), (&(l)->a)) -#define local_sub(i, l) atomic_long_sub((i), (&(l)->a)) - -static inline long local_add_return(long i, local_t *l) -{ - long temp1, temp2, result, addr; - - __asm__ __volatile__( -#ifdef CONFIG_LOCK_MEMB - " memb\n" -#endif - " ldi %4, %2\n" - "1: lldl %0, 0(%4)\n" - " ldi %1, 1\n" - " wr_f %1\n" - " addl %0, %5, %3\n" - " addl %0, %5, %0\n" -#ifdef CONFIG_LOCK_FIXUP - " memb\n" -#endif - " lstl %0, 0(%4)\n" - " rd_f %0\n" - " beq %0, 2f\n" - ".subsection 2\n" - "2: br 1b\n" - ".previous" - : "=&r" (temp1), "=&r" (temp2), "=m" (l->a.counter), - "=&r" (result), "=&r" (addr) - : "Ir" (i), "m" (l->a.counter) : "memory"); - return result; -} - -static inline long local_sub_return(long i, local_t *l) -{ - long temp1, temp2, result, addr; - - __asm__ __volatile__( -#ifdef CONFIG_LOCK_MEMB - " memb\n" -#endif - " ldi %4, %2\n" - "1: lldl %0, 0(%4)\n" - " ldi %1, 1\n" - " wr_f %1\n" - " subl %0, %5, %3\n" - " subl %0, %5, %0\n" -#ifdef CONFIG_LOCK_FIXUP - " memb\n" -#endif - " lstl %0, 0(%4)\n" - " rd_f %0\n" - " beq %0, 2f\n" - ".subsection 2\n" - "2: br 1b\n" - ".previous" - : "=&r" (temp1), "=&r" (temp2), "=m" (l->a.counter), - "=&r" (result), "=&r" (addr) - : "Ir" (i), "m" (l->a.counter) : "memory"); - return result; -} - -#define local_cmpxchg(l, o, n) \ - (cmpxchg_local(&((l)->a.counter), (o), (n))) -#define local_xchg(l, n) (xchg_local(&((l)->a.counter), (n))) - -/** - * local_add_unless - add unless the number is a given value - * @l: pointer of type local_t - * @a: the amount to add to l... - * @u: ...unless l is equal to u. - * - * Atomically adds @a to @l, so long as it was not @u. - * Returns non-zero if @l was not @u, and zero otherwise. - */ -#define local_add_unless(l, a, u) \ -({ \ - long c, old; \ - c = local_read(l); \ - for (;;) { \ - if (unlikely(c == (u))) \ - break; \ - old = local_cmpxchg((l), c, c + (a)); \ - if (likely(old == c)) \ - break; \ - c = old; \ - } \ - c != (u); \ -}) -#define local_inc_not_zero(l) local_add_unless((l), 1, 0) - -#define local_add_negative(a, l) (local_add_return((a), (l)) < 0) - -#define local_dec_return(l) local_sub_return(1, (l)) - -#define local_inc_return(l) local_add_return(1, (l)) - -#define local_sub_and_test(i, l) (local_sub_return((i), (l)) == 0) - -#define local_inc_and_test(l) (local_add_return(1, (l)) == 0) - -#define local_dec_and_test(l) (local_sub_return(1, (l)) == 0) - -/* Verify if faster than atomic ops */ -#define __local_inc(l) ((l)->a.counter++) -#define __local_dec(l) ((l)->a.counter++) -#define __local_add(i, l) ((l)->a.counter += (i)) -#define __local_sub(i, l) ((l)->a.counter -= (i)) - -#endif /* _ASM_SW64_LOCAL_H */ diff --git a/arch/sw_64/include/asm/local64.h b/arch/sw_64/include/asm/local64.h deleted file mode 100644 index 4278133cd8fa1c2d5451e68e7b21ac790dd6061a..0000000000000000000000000000000000000000 --- a/arch/sw_64/include/asm/local64.h +++ /dev/null @@ -1,7 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _ASM_SW64_LOCAL64_H -#define _ASM_SW64_LOCAL64_H - -#include - -#endif diff --git a/arch/sw_64/include/asm/memory.h b/arch/sw_64/include/asm/memory.h index d3191165c7b5df0aabbe2a81479d508b38736668..b2b7492ae477d81e92bc806e093ace0c4ade9c2f 100644 --- a/arch/sw_64/include/asm/memory.h +++ b/arch/sw_64/include/asm/memory.h @@ -6,6 +6,7 @@ #include #endif +#define MIN_MEMORY_BLOCK_SIZE_VM_MEMHP (1UL << 30) #define NODE0_START (_TEXT_START - __START_KERNEL_map) #define MAX_PHYSMEM_BITS 48 diff --git a/arch/sw_64/include/asm/mmu_context.h b/arch/sw_64/include/asm/mmu_context.h index 6b2ab3224ec9a2b2028fb99722a5bbc3ac2e6605..d6cd01d5571211908bf5424f7f01665c4542d1a0 100644 --- a/arch/sw_64/include/asm/mmu_context.h +++ b/arch/sw_64/include/asm/mmu_context.h @@ -48,7 +48,6 @@ __reload_thread(struct pcb_struct *pcb) */ #ifdef CONFIG_SUBARCH_C3B -#define MAX_ASN 1023 #define WIDTH_HARDWARE_ASN 10 #endif @@ -89,7 +88,7 @@ __get_new_mm_context(struct mm_struct *mm, long cpu) unsigned long asn = cpu_last_asn(cpu); unsigned long next = asn + 1; - if ((asn & HARDWARE_ASN_MASK) >= MAX_ASN) { + if ((asn & HARDWARE_ASN_MASK) >= HARDWARE_ASN_MASK) { tbiap(); next = (asn & ~HARDWARE_ASN_MASK) + ASN_FIRST_VERSION; } @@ -132,7 +131,7 @@ switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm, * Always update the PCB PTBR. If next is kernel thread, it must * update PTBR. If next is user process, it's ok to update PTBR. */ - task_thread_info(next)->pcb.ptbr = (__pa(next_mm->pgd)) >> PAGE_SHIFT; + task_thread_info(next)->pcb.ptbr = virt_to_pfn(next_mm->pgd); load_asn_ptbr(task_thread_info(next)->pcb.asn, task_thread_info(next)->pcb.ptbr); } @@ -171,8 +170,7 @@ static inline int init_new_context(struct task_struct *tsk, for_each_possible_cpu(i) mm->context.asid[i] = 0; if (tsk != current) - task_thread_info(tsk)->pcb.ptbr - = (__pa(mm->pgd)) >> PAGE_SHIFT; + task_thread_info(tsk)->pcb.ptbr = virt_to_pfn(mm->pgd); return 0; } @@ -184,8 +182,7 @@ static inline void destroy_context(struct mm_struct *mm) static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) { - task_thread_info(tsk)->pcb.ptbr - = (__pa(mm->pgd)) >> PAGE_SHIFT; + task_thread_info(tsk)->pcb.ptbr = virt_to_pfn(mm->pgd); } static inline int arch_dup_mmap(struct mm_struct *oldmm, diff --git a/arch/sw_64/include/asm/mmzone.h b/arch/sw_64/include/asm/mmzone.h index 924e33f6d3267ab1394615d8667df36242752259..3849d26e389046c34e7260ba49b63f14faa2ce35 100644 --- a/arch/sw_64/include/asm/mmzone.h +++ b/arch/sw_64/include/asm/mmzone.h @@ -14,34 +14,4 @@ extern pg_data_t *node_data[]; #define NODE_DATA(nid) (node_data[(nid)]) #endif -#ifdef CONFIG_DISCONTIGMEM -extern int pa_to_nid(unsigned long pa); -extern int pfn_valid(unsigned long pfn); - -#define mk_pte(page, pgprot) \ -({ \ - pte_t pte; \ - unsigned long pfn; \ - \ - pfn = page_to_pfn(page) << _PTE_FLAGS_BITS; \ - pte_val(pte) = pfn | pgprot_val(pgprot); \ - \ - pte; \ -}) - -#define pte_page(x) \ -({ \ - unsigned long kvirt; \ - struct page *__xx; \ - \ - kvirt = (unsigned long)__va(pte_val(x) >> (_PTE_FLAGS_BITS-PAGE_SHIFT));\ - __xx = virt_to_page(kvirt); \ - \ - __xx; \ -}) - -#define page_to_pa(page) (page_to_pfn(page) << PAGE_SHIFT) -#define pfn_to_nid(pfn) pa_to_nid(((u64)(pfn) << PAGE_SHIFT)) -#endif /* CONFIG_DISCONTIGMEM */ - #endif /* _ASM_SW64_MMZONE_H */ diff --git a/arch/sw_64/include/asm/module.h b/arch/sw_64/include/asm/module.h index 55e6e333585fc1edf4d0c1bec8545cfd5ace9cf9..d1663aab4097ab2cca1d2bb3ae4496245c8d0ea7 100644 --- a/arch/sw_64/include/asm/module.h +++ b/arch/sw_64/include/asm/module.h @@ -2,18 +2,12 @@ #ifndef _ASM_SW64_MODULE_H #define _ASM_SW64_MODULE_H +#include + struct mod_arch_specific { unsigned int gotsecindex; }; -#define Elf_Sym Elf64_Sym -#define Elf_Shdr Elf64_Shdr -#define Elf_Ehdr Elf64_Ehdr -#define Elf_Phdr Elf64_Phdr -#define Elf_Dyn Elf64_Dyn -#define Elf_Rel Elf64_Rel -#define Elf_Rela Elf64_Rela - #define ARCH_SHF_SMALL SHF_SW64_GPREL #ifdef MODULE diff --git a/arch/sw_64/include/asm/numa.h b/arch/sw_64/include/asm/numa.h index 47071007e8ff6ae62b8bb328684baa5a8b92380a..4ea8b8de248af170f1cc9107e20d078f17a29f37 100644 --- a/arch/sw_64/include/asm/numa.h +++ b/arch/sw_64/include/asm/numa.h @@ -4,6 +4,7 @@ #define _ASM_SW64_NUMA_H #include +#include #ifdef CONFIG_NUMA extern nodemask_t numa_nodes_parsed __initdata; diff --git a/arch/sw_64/include/asm/page.h b/arch/sw_64/include/asm/page.h index 6e17d5e437c55bae7e09d7554fb35026ae6c9f55..dc6a89c37231232058ea7c60359108f162f2952c 100644 --- a/arch/sw_64/include/asm/page.h +++ b/arch/sw_64/include/asm/page.h @@ -46,10 +46,13 @@ extern unsigned long __phys_addr(unsigned long); #endif #define __pa(x) __phys_addr((unsigned long)(x)) -#define __va(x) ((void *)((unsigned long) (x) + PAGE_OFFSET)) +#define __va(x) ((void *)((unsigned long) (x) | PAGE_OFFSET)) #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) +#define virt_to_pfn(vaddr) (PHYS_PFN(__pa(vaddr))) +#define pfn_to_virt(pfn) (__va(PFN_PHYS(pfn))) + #ifdef CONFIG_FLATMEM #define pfn_valid(pfn) ((pfn) < max_mapnr) #endif /* CONFIG_FLATMEM */ diff --git a/arch/sw_64/include/asm/param.h b/arch/sw_64/include/asm/param.h deleted file mode 100644 index 49c5d03a337061b1a0e98040a4bd98c8e6efddef..0000000000000000000000000000000000000000 --- a/arch/sw_64/include/asm/param.h +++ /dev/null @@ -1,11 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _ASM_SW64_PARAM_H -#define _ASM_SW64_PARAM_H - -#include - -#undef HZ -#define HZ CONFIG_HZ -#define USER_HZ 100 -#define CLOCKS_PER_SEC USER_HZ /* frequency at which times() counts */ -#endif /* _ASM_SW64_PARAM_H */ diff --git a/arch/sw_64/include/asm/parport.h b/arch/sw_64/include/asm/parport.h deleted file mode 100644 index 82b9a219b797ccf87796418b6d7241783b99e10a..0000000000000000000000000000000000000000 --- a/arch/sw_64/include/asm/parport.h +++ /dev/null @@ -1,19 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * parport.h: platform-specific PC-style parport initialisation - * - * Copyright (C) 1999, 2000 Tim Waugh - * - * This file should only be included by drivers/parport/parport_pc.c. - */ - -#ifndef _ASM_SW64_PARPORT_H -#define _ASM_SW64_PARPORT_H - -static int parport_pc_find_isa_ports(int autoirq, int autodma); -static int parport_pc_find_nonpci_ports(int autoirq, int autodma) -{ - return parport_pc_find_isa_ports(autoirq, autodma); -} - -#endif /* !(_ASM_SW64_PARPORT_H) */ diff --git a/arch/sw_64/include/asm/pci.h b/arch/sw_64/include/asm/pci.h index 7e0c03da1d17a7113f9d288f3df081d279ba88c0..a90f80152470911af9b6c3d2080fb0c3956982ea 100644 --- a/arch/sw_64/include/asm/pci.h +++ b/arch/sw_64/include/asm/pci.h @@ -15,7 +15,6 @@ struct pci_dev; struct pci_bus; struct resource; -struct pci_iommu_arena; struct sunway_iommu; struct page; @@ -35,20 +34,18 @@ struct pci_controller { unsigned long dense_io_base; /* This one's for the kernel only. It's in KSEG somewhere. */ - unsigned long ep_config_space_base; - unsigned long rc_config_space_base; + void __iomem *ep_config_space_base; + void __iomem *rc_config_space_base; unsigned long index; unsigned long node; DECLARE_BITMAP(piu_msiconfig, 256); int int_irq; + int service_irq; /* For compatibility with current (as of July 2003) pciutils - and XFree86. Eventually will be removed. */ + * and XFree86. Eventually will be removed. + */ unsigned int need_domain_info; - - struct pci_iommu_arena *sg_pci; - struct pci_iommu_arena *sg_isa; - bool iommu_enable; struct sunway_iommu *pci_iommu; int first_busno; @@ -66,27 +63,23 @@ struct pci_controller { #define PCIBIOS_MIN_IO 0 #define PCIBIOS_MIN_MEM 0 -extern void pcibios_set_master(struct pci_dev *dev); +/* generic pci stuff */ +#include + extern void __init sw64_init_pci(void); extern void __init sw64_device_interrupt(unsigned long vector); extern void __init sw64_init_irq(void); extern void __init sw64_init_arch(void); -extern unsigned char sw64_swizzle(struct pci_dev *dev, u8 *pinp); extern struct pci_ops sw64_pci_ops; extern int sw64_map_irq(const struct pci_dev *dev, u8 slot, u8 pin); extern struct pci_controller *hose_head; -/* TODO: integrate with include/asm-generic/pci.h ? */ -static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) -{ - return channel ? 15 : 14; -} - #ifdef CONFIG_SUNWAY_IOMMU extern struct syscore_ops iommu_cpu_syscore_ops; #endif -#define pci_domain_nr(bus) 0 +#ifdef CONFIG_PCI_DOMAINS +static inline int pci_domain_nr(struct pci_bus *bus) { return 0; } static inline int pci_proc_domain(struct pci_bus *bus) { @@ -94,6 +87,7 @@ static inline int pci_proc_domain(struct pci_bus *bus) return hose->need_domain_info; } +#endif #ifdef CONFIG_NUMA static inline int __pcibus_to_node(const struct pci_bus *bus) diff --git a/arch/sw_64/include/asm/pgalloc.h b/arch/sw_64/include/asm/pgalloc.h index 3cfdcbef7ef8c0bfb5ab47c905588f1f962e5aa7..9572b4709ff45303a02527490e1554f04d678118 100644 --- a/arch/sw_64/include/asm/pgalloc.h +++ b/arch/sw_64/include/asm/pgalloc.h @@ -15,7 +15,7 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t pte) { - pmd_set(pmd, (pte_t *)(page_to_pa(pte) + PAGE_OFFSET)); + pmd_set(pmd, (pte_t *)__va(page_to_pa(pte))); } #define pmd_pgtable(pmd) pmd_page(pmd) diff --git a/arch/sw_64/include/asm/pgtable.h b/arch/sw_64/include/asm/pgtable.h index 32fde38a2be0b196c33bdc33111bdb049e0a17a5..76c782baf242bd6bdf40c682b5b95b693f4e94a4 100644 --- a/arch/sw_64/include/asm/pgtable.h +++ b/arch/sw_64/include/asm/pgtable.h @@ -26,10 +26,18 @@ struct vm_area_struct; * hook is made available. */ #define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval)) -#define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval) +static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pteval) +{ + set_pte(ptep, pteval); +} #define set_pmd(pmdptr, pmdval) ((*(pmdptr)) = (pmdval)) -#define set_pmd_at(mm, addr, pmdp, pmdval) set_pmd(pmdp, pmdval) +static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, + pmd_t *pmdp, pmd_t pmdval) +{ + set_pmd(pmdp, pmdval); +} /* PGDIR_SHIFT determines what a forth-level page table entry can map */ #define PGDIR_SHIFT (PAGE_SHIFT + 3 * (PAGE_SHIFT - 3)) @@ -81,6 +89,7 @@ struct vm_area_struct; #define _PAGE_PHU 0x0020 /* used for 256M page size bit */ #define _PAGE_PSE 0x0040 /* used for 8M page size bit */ #define _PAGE_PROTNONE 0x0080 /* used for numa page balancing */ +#define _PAGE_SPECIAL 0x0100 #define _PAGE_KRE 0x0400 /* xxx - see below on the "accessed" bit */ #define _PAGE_URE 0x0800 /* xxx */ #define _PAGE_KWE 0x4000 /* used to do the dirty bit in software */ @@ -110,12 +119,11 @@ struct vm_area_struct; #define __ACCESS_BITS (_PAGE_ACCESSED | _PAGE_KRE | _PAGE_URE) -#define _PFN_MASK 0xFFFFFFFFF0000000UL -#define _PFN_BITS 36 -#define _PTE_FLAGS_BITS (64 - _PFN_BITS) +#define _PFN_SHIFT 28 +#define _PFN_MASK ((-1UL) << _PFN_SHIFT) #define _PAGE_TABLE (_PAGE_VALID | __DIRTY_BITS | __ACCESS_BITS) -#define _PAGE_CHG_MASK (_PFN_MASK | __DIRTY_BITS | __ACCESS_BITS) +#define _PAGE_CHG_MASK (_PFN_MASK | __DIRTY_BITS | __ACCESS_BITS | _PAGE_SPECIAL) #define _HPAGE_CHG_MASK (_PAGE_CHG_MASK | _PAGE_PSE | _PAGE_PHU) /* @@ -172,53 +180,19 @@ struct vm_area_struct; extern struct page *empty_zero_page; #define ZERO_PAGE(vaddr) (empty_zero_page) -/* number of bits that fit into a memory pointer */ -#define BITS_PER_PTR (8 * sizeof(unsigned long)) - -/* to align the pointer to a pointer address */ -#define PTR_MASK (~(sizeof(void *) - 1)) - -/* sizeof(void*)==1<> (PAGE_SHIFT - SIZEOF_PTR_LOG2) & PTR_MASK & ~PAGE_MASK) - -#define PHYS_TWIDDLE(pfn) (pfn) - -/* - * Conversion functions: convert a page and protection to a page entry, - * and a page entry and page directory to the page they refer to. - */ -#define page_to_pa(page) (page_to_pfn(page) << PAGE_SHIFT) - -#define pmd_pfn(pmd) (pmd_val(pmd) >> _PTE_FLAGS_BITS) -#define pte_pfn(pte) (pte_val(pte) >> _PTE_FLAGS_BITS) -#ifndef CONFIG_DISCONTIGMEM -#define pte_page(pte) pfn_to_page(pte_pfn(pte)) -#define mk_pte(page, pgprot) \ -({ \ - pte_t pte; \ - \ - pte_val(pte) = (page_to_pfn(page) << _PTE_FLAGS_BITS) | pgprot_val(pgprot); \ - pte; \ -}) -#endif - -static inline pte_t pfn_pte(unsigned long physpfn, pgprot_t pgprot) +static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot) { pte_t pte; - pte_val(pte) = (PHYS_TWIDDLE(physpfn) << _PTE_FLAGS_BITS) | pgprot_val(pgprot); + pte_val(pte) = (pfn << _PFN_SHIFT) | pgprot_val(prot); return pte; } -static inline pmd_t pfn_pmd(unsigned long physpfn, pgprot_t pgprot) +static inline pmd_t pfn_pmd(unsigned long pfn, pgprot_t prot) { pmd_t pmd; - pmd_val(pmd) = (PHYS_TWIDDLE(physpfn) << _PTE_FLAGS_BITS) | pgprot_val(pgprot); + pmd_val(pmd) = (pfn << _PFN_SHIFT) | pgprot_val(prot); return pmd; } @@ -236,37 +210,48 @@ static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) static inline void pmd_set(pmd_t *pmdp, pte_t *ptep) { - pmd_val(*pmdp) = _PAGE_TABLE | (__pa(ptep) << (_PTE_FLAGS_BITS - PAGE_SHIFT)); + pmd_val(*pmdp) = _PAGE_TABLE | (virt_to_pfn(ptep) << _PFN_SHIFT); } static inline void pud_set(pud_t *pudp, pmd_t *pmdp) { - pud_val(*pudp) = _PAGE_TABLE | (__pa(pmdp) << (_PTE_FLAGS_BITS - PAGE_SHIFT)); + pud_val(*pudp) = _PAGE_TABLE | (virt_to_pfn(pmdp) << _PFN_SHIFT); } static inline void p4d_set(p4d_t *p4dp, pud_t *pudp) { - p4d_val(*p4dp) = _PAGE_TABLE | (__pa(pudp) << (_PTE_FLAGS_BITS - PAGE_SHIFT)); + p4d_val(*p4dp) = _PAGE_TABLE | (virt_to_pfn(pudp) << _PFN_SHIFT); } -static inline unsigned long -pmd_page_vaddr(pmd_t pmd) +static inline unsigned long pmd_page_vaddr(pmd_t pmd) { - return ((pmd_val(pmd) & _PFN_MASK) >> (_PTE_FLAGS_BITS-PAGE_SHIFT)) + PAGE_OFFSET; + return (unsigned long)pfn_to_virt(pmd_val(pmd) >> _PFN_SHIFT); } -#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> _PTE_FLAGS_BITS)) -#define pud_page(pud) (pfn_to_page(pud_val(pud) >> _PTE_FLAGS_BITS)) -#define p4d_page(p4d) (pfn_to_page(p4d_val(p4d) >> _PTE_FLAGS_BITS)) +/* + * Conversion functions: convert a page and protection to a page entry, + * and a page entry and page directory to the page they refer to. + */ +#define page_to_pa(page) (page_to_pfn(page) << PAGE_SHIFT) + +#define pmd_pfn(pmd) (pmd_val(pmd) >> _PFN_SHIFT) +#define pte_pfn(pte) (pte_val(pte) >> _PFN_SHIFT) + +#define pte_page(pte) pfn_to_page(pte_pfn(pte)) +#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot) + +#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> _PFN_SHIFT)) +#define pud_page(pud) (pfn_to_page(pud_val(pud) >> _PFN_SHIFT)) +#define p4d_page(p4d) (pfn_to_page(p4d_val(p4d) >> _PFN_SHIFT)) static inline pud_t *p4d_pgtable(p4d_t p4d) { - return (pud_t *)(PAGE_OFFSET + ((p4d_val(p4d) & _PFN_MASK) >> (_PTE_FLAGS_BITS-PAGE_SHIFT))); + return (pud_t *)pfn_to_virt(p4d_val(p4d) >> _PFN_SHIFT); } static inline pmd_t *pud_pgtable(pud_t pud) { - return (pmd_t *)(PAGE_OFFSET + ((pud_val(pud) & _PFN_MASK) >> (_PTE_FLAGS_BITS-PAGE_SHIFT))); + return (pmd_t *)pfn_to_virt(pud_val(pud) >> _PFN_SHIFT); } static inline int pte_none(pte_t pte) @@ -448,6 +433,11 @@ static inline int pte_young(pte_t pte) return pte_val(pte) & _PAGE_ACCESSED; } +static inline int pte_special(pte_t pte) +{ + return pte_val(pte) & _PAGE_SPECIAL; +} + static inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) |= _PAGE_FOW; @@ -491,6 +481,12 @@ static inline pte_t pte_mkhuge(pte_t pte) return pte; } +static inline pte_t pte_mkspecial(pte_t pte) +{ + pte_val(pte) |= _PAGE_SPECIAL; + return pte; +} + #ifdef CONFIG_NUMA_BALANCING /* * See the comment in include/asm-generic/pgtable.h @@ -546,7 +542,7 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm, set_bit(_PAGE_BIT_FOW, (unsigned long *)pmdp); } -#define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot)) +#define mk_pmd(page, prot) pfn_pmd(page_to_pfn(page), (prot)) #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS extern int pmdp_set_access_flags(struct vm_area_struct *vma, @@ -566,15 +562,6 @@ extern int pmdp_clear_flush_young(struct vm_area_struct *vma, extern void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmdp); -#define PAGE_DIR_OFFSET(tsk, address) pgd_offset((tsk), (address)) - -/* to find an entry in a kernel page-table-directory */ -#define pgd_offset_k(address) pgd_offset(&init_mm, (address)) - -/* to find an entry in a page-table-directory. */ -#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)) -#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address)) - extern pgd_t swapper_pg_dir[1024]; /* @@ -609,14 +596,7 @@ extern pgd_t swapper_pg_dir[1024]; #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) -#if defined(CONFIG_FLATMEM) #define kern_addr_valid(addr) (1) -#elif defined(CONFIG_DISCONTIGMEM) -/* XXX: FIXME -- wli */ -#define kern_addr_valid(kaddr) (0) -#elif defined(CONFIG_SPARSEMEM) -#define kern_addr_valid(addr) (1) -#endif #define pte_ERROR(e) \ pr_err("%s: %d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e)) diff --git a/arch/sw_64/include/asm/preempt.h b/arch/sw_64/include/asm/preempt.h deleted file mode 100644 index dc6643a437667d4f603dcfe2ceb627faac7838a1..0000000000000000000000000000000000000000 --- a/arch/sw_64/include/asm/preempt.h +++ /dev/null @@ -1,7 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _ASM_SW64_PREEMPT_H -#define _ASM_SW64_PREEMPT_H - -#include - -#endif /* _ASM_SW64_PREEMPT_H */ diff --git a/arch/sw_64/include/asm/processor.h b/arch/sw_64/include/asm/processor.h index 645c33a596fff58136f2e42f115080c1c6c5ff7f..886f28635dd45343a2f3b19e8b2909996eb90944 100644 --- a/arch/sw_64/include/asm/processor.h +++ b/arch/sw_64/include/asm/processor.h @@ -9,6 +9,10 @@ #define _ASM_SW64_PROCESSOR_H #include /* for ADDR_LIMIT_32BIT */ +#include + +#define task_pt_regs(task) \ + ((struct pt_regs *) (task_stack_page(task) + 2 * PAGE_SIZE) - 1) /* * Returns current instruction pointer ("program counter"). @@ -37,47 +41,12 @@ #define TASK_UNMAPPED_BASE \ ((current->personality & ADDR_LIMIT_32BIT) ? 0x40000000 : UNMAPPED_BASE) -typedef struct { - unsigned long seg; -} mm_segment_t; - -struct context_fpregs { - unsigned long f0[4]; - unsigned long f1[4]; - unsigned long f2[4]; - unsigned long f3[4]; - unsigned long f4[4]; - unsigned long f5[4]; - unsigned long f6[4]; - unsigned long f7[4]; - unsigned long f8[4]; - unsigned long f9[4]; - unsigned long f10[4]; - unsigned long f11[4]; - unsigned long f12[4]; - unsigned long f13[4]; - unsigned long f14[4]; - unsigned long f15[4]; - unsigned long f16[4]; - unsigned long f17[4]; - unsigned long f18[4]; - unsigned long f19[4]; - unsigned long f20[4]; - unsigned long f21[4]; - unsigned long f22[4]; - unsigned long f23[4]; - unsigned long f24[4]; - unsigned long f25[4]; - unsigned long f26[4]; - unsigned long f27[4]; - unsigned long f28[4]; - unsigned long f29[4]; - unsigned long f30[4]; -} __aligned(32); /* 256 bits aligned for simd */ - struct thread_struct { - struct context_fpregs ctx_fp; - unsigned long fpcr; + struct user_fpsimd_state fpstate; + /* Callee-saved registers */ + unsigned long ra; + unsigned long sp; + unsigned long s[7]; /* s0 ~ s6 */ }; #define INIT_THREAD { } diff --git a/arch/sw_64/include/asm/ptrace.h b/arch/sw_64/include/asm/ptrace.h index 1dde5e6cba8ad46753d0db8cccb539d854777d7e..ac99430156639b8e96fe2e0307dcd90cb10a2df7 100644 --- a/arch/sw_64/include/asm/ptrace.h +++ b/arch/sw_64/include/asm/ptrace.h @@ -3,7 +3,56 @@ #define _ASM_SW64_PTRACE_H #include +#include +#include +#include +/* + * This struct defines the way the registers are stored on the + * kernel stack during a system call or other kernel entry + */ + +struct pt_regs { + unsigned long r0; + unsigned long r1; + unsigned long r2; + unsigned long r3; + unsigned long r4; + unsigned long r5; + unsigned long r6; + unsigned long r7; + unsigned long r8; + unsigned long r9; + unsigned long r10; + unsigned long r11; + unsigned long r12; + unsigned long r13; + unsigned long r14; + unsigned long r15; + /* r16 ~ r18 saved by hmcode */ + unsigned long r19; + unsigned long r20; + unsigned long r21; + unsigned long r22; + unsigned long r23; + unsigned long r24; + unsigned long r25; + unsigned long r26; + unsigned long r27; + unsigned long r28; + unsigned long hae; +/* JRP - These are the values provided to a0-a2 by HMcode */ + unsigned long trap_a0; + unsigned long trap_a1; + unsigned long trap_a2; +/* These are saved by HMcode: */ + unsigned long ps; + unsigned long pc; + unsigned long gp; + unsigned long r16; + unsigned long r17; + unsigned long r18; +}; #define arch_has_single_step() (1) #define user_mode(regs) (((regs)->ps & 8) != 0) @@ -14,8 +63,6 @@ #define kernel_stack_pointer(regs) (((regs->ps) >> 4) & (TASK_SIZE - 1)) #define instruction_pointer_set(regs, val) ((regs)->pc = val) -#define task_pt_regs(task) \ - ((struct pt_regs *) (task_stack_page(task) + 2 * PAGE_SIZE) - 1) #define current_pt_regs() \ ((struct pt_regs *) ((char *)current_thread_info() + 2 * PAGE_SIZE) - 1) @@ -24,6 +71,9 @@ #define force_successful_syscall_return() (current_pt_regs()->r0 = 0) #define MAX_REG_OFFSET (offsetof(struct pt_regs, r18)) + +extern short regoffsets[]; + /** * regs_get_register() - get register value from its offset * @regs: pt_regs from which register value is gotten @@ -41,6 +91,8 @@ static inline u64 regs_get_register(struct pt_regs *regs, unsigned int offset) return *(unsigned long *)((unsigned long)regs + offset); } extern int regs_query_register_offset(const char *name); +extern unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, + unsigned int n); static inline unsigned long regs_return_value(struct pt_regs *regs) { diff --git a/arch/sw_64/include/asm/seccomp.h b/arch/sw_64/include/asm/seccomp.h deleted file mode 100644 index db2f298862c339d54ae2e9d2ed0cc7a62d598588..0000000000000000000000000000000000000000 --- a/arch/sw_64/include/asm/seccomp.h +++ /dev/null @@ -1,15 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * arch/sw_64/include/asm/seccomp.h - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#ifndef _ASM_SW64_SECCOMP_H -#define _ASM_SW64_SECCOMP_H - -#include -#include - -#endif /* _ASM_SW64_SECCOMP_H */ diff --git a/arch/sw_64/include/asm/sections.h b/arch/sw_64/include/asm/sections.h deleted file mode 100644 index 37dab4fde720e01d184ddc2b4b335e52660efa74..0000000000000000000000000000000000000000 --- a/arch/sw_64/include/asm/sections.h +++ /dev/null @@ -1,8 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _ASM_SW64_SECTIONS_H -#define _ASM_SW64_SECTIONS_H - -/* nothing to see, move along */ -#include - -#endif diff --git a/arch/sw_64/include/asm/segment.h b/arch/sw_64/include/asm/segment.h deleted file mode 100644 index dc90357765e582c5adf6dd8e35c370211aa74b27..0000000000000000000000000000000000000000 --- a/arch/sw_64/include/asm/segment.h +++ /dev/null @@ -1,7 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _ASM_SW64_SEGMENT_H -#define _ASM_SW64_SEGMENT_H - -/* Only here because we have some old header files that expect it.. */ - -#endif diff --git a/arch/sw_64/include/asm/serial.h b/arch/sw_64/include/asm/serial.h deleted file mode 100644 index 059e603642b937420652a5b438a1306c2db87d25..0000000000000000000000000000000000000000 --- a/arch/sw_64/include/asm/serial.h +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _ASM_SW64_SERIAL_H -#define _ASM_SW64_SERIAL_H - -#define BASE_BAUD (1843200 / 16) - -/* Standard COM flags (except for COM4, because of the 8514 problem) */ -#ifdef CONFIG_SERIAL_8250_DETECT_IRQ -#define STD_COM_FLAGS (UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_AUTO_IRQ) -#define STD_COM4_FLAGS (UPF_BOOT_AUTOCONF | UPF_AUTO_IRQ) -#else -#define STD_COM_FLAGS (UPF_BOOT_AUTOCONF | UPF_SKIP_TEST) -#define STD_COM4_FLAGS UPF_BOOT_AUTOCONF -#endif - -#endif /* _ASM_SW64_SERIAL_H */ diff --git a/arch/sw_64/include/asm/shmparam.h b/arch/sw_64/include/asm/shmparam.h deleted file mode 100644 index 15f71533b1ed5cd88263ff040a000f37ede15a02..0000000000000000000000000000000000000000 --- a/arch/sw_64/include/asm/shmparam.h +++ /dev/null @@ -1,7 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _ASM_SW64_SHMPARAM_H -#define _ASM_SW64_SHMPARAM_H - -#define SHMLBA PAGE_SIZE /* attach addr a multiple of this */ - -#endif /* _ASM_SW64_SHMPARAM_H */ diff --git a/arch/sw_64/include/asm/special_insns.h b/arch/sw_64/include/asm/special_insns.h deleted file mode 100644 index 7f5a52b20444db377fd76660de18a28c8227becc..0000000000000000000000000000000000000000 --- a/arch/sw_64/include/asm/special_insns.h +++ /dev/null @@ -1,20 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _ASM_SW64_SPECIAL_INSNS_H -#define _ASM_SW64_SPECIAL_INSNS_H - -enum amask_enum { - AMASK_BWX = (1UL << 0), - AMASK_FIX = (1UL << 1), - AMASK_CIX = (1UL << 2), - AMASK_MAX = (1UL << 8), - AMASK_PRECISE_TRAP = (1UL << 9), -}; - -#define amask(mask) \ -({ \ - unsigned long __amask, __input = (mask); \ - __asm__ ("mov %1, %0" : "=r"(__amask) : "rI"(__input)); \ - __amask; \ -}) - -#endif /* _ASM_SW64_SPECIAL_INSNS_H */ diff --git a/arch/sw_64/include/asm/stacktrace.h b/arch/sw_64/include/asm/stacktrace.h new file mode 100644 index 0000000000000000000000000000000000000000..ed691a72573bd210be25f8c372324a0d2c076b1f --- /dev/null +++ b/arch/sw_64/include/asm/stacktrace.h @@ -0,0 +1,72 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _ASM_SW64_STACKTRACE_H +#define _ASM_SW64_STACKTRACE_H + +#include +#include +#include +#include +#include + +struct stackframe { + unsigned long pc; + unsigned long fp; +}; + +enum stack_type { + STACK_TYPE_UNKNOWN, + STACK_TYPE_TASK, +}; + +struct stack_info { + unsigned long low; + unsigned long high; + enum stack_type type; +}; + +/* The form of the top of the frame on the stack */ +struct stack_frame { + unsigned long return_address; + struct stack_frame *next_frame; +}; + +extern int unwind_frame(struct task_struct *tsk, struct stackframe *frame); +extern void walk_stackframe(struct task_struct *tsk, struct pt_regs *regs, + int (*fn)(unsigned long, void *), void *data); + +static inline bool on_task_stack(struct task_struct *tsk, unsigned long sp, + struct stack_info *info) +{ + unsigned long low = (unsigned long)task_stack_page(tsk); + unsigned long high = low + THREAD_SIZE; + + if (sp < low || sp >= high) + return false; + + if (info) { + info->low = low; + info->high = high; + info->type = STACK_TYPE_TASK; + } + + return true; +} + +/* + * We can only safely access per-cpu stacks from current in a non-preemptible + * context. + */ +static inline bool on_accessible_stack(struct task_struct *tsk, + unsigned long sp, + struct stack_info *info) +{ + if (on_task_stack(tsk, sp, info)) + return true; + if (tsk != current || preemptible()) + return false; + + return false; +} + +#endif /* _ASM_SW64_STACKTRACE_H */ diff --git a/arch/sw_64/include/asm/sw64_init.h b/arch/sw_64/include/asm/sw64_init.h index 15842d22e5bafb66f5acb614f221de9e4c011dba..2d9140605d0b041694cb5c359656c9e8e9735125 100644 --- a/arch/sw_64/include/asm/sw64_init.h +++ b/arch/sw_64/include/asm/sw64_init.h @@ -5,6 +5,7 @@ #include #include +#include struct sw64_early_init_ops { void (*setup_core_start)(struct cpumask *cpumask); diff --git a/arch/sw_64/include/asm/sw64io.h b/arch/sw_64/include/asm/sw64io.h index 7c032070acf0024cf3509b995bc1c9a63947afb1..7d79a5b75090d82b2df8cde97918405cc4dcbb3c 100644 --- a/arch/sw_64/include/asm/sw64io.h +++ b/arch/sw_64/include/asm/sw64io.h @@ -2,6 +2,7 @@ #ifndef _ASM_SW64_SW64IO_H #define _ASM_SW64_SW64IO_H +#include #include extern void setup_chip_clocksource(void); @@ -11,105 +12,87 @@ extern void setup_chip_clocksource(void); #endif #define MK_RC_CFG(nid, idx) \ - (PAGE_OFFSET | SW64_PCI_IO_BASE((nid), (idx)) | PCI_RC_CFG) + (SW64_PCI_IO_BASE((nid), (idx)) | PCI_RC_CFG) #define MK_PIU_IOR0(nid, idx) \ - (PAGE_OFFSET | SW64_PCI_IO_BASE((nid), (idx)) | PCI_IOR0_BASE) + (SW64_PCI_IO_BASE((nid), (idx)) | PCI_IOR0_BASE) #define MK_PIU_IOR1(nid, idx) \ - (PAGE_OFFSET | SW64_PCI_IO_BASE((nid), (idx)) | PCI_IOR1_BASE) + (SW64_PCI_IO_BASE((nid), (idx)) | PCI_IOR1_BASE) static inline unsigned int -read_rc_conf(unsigned long node, unsigned long rc_index, - unsigned int conf_offset) +read_rc_conf(unsigned long node, unsigned long rc, + unsigned int offset) { - unsigned long addr; - unsigned int value; + void __iomem *addr; - addr = MK_RC_CFG(node, rc_index) | conf_offset; - value = *(volatile unsigned int *)addr; - mb(); - - return value; + addr = __va(MK_RC_CFG(node, rc) | offset); + return readl(addr); } static inline void -write_rc_conf(unsigned long node, unsigned long rc_index, - unsigned int conf_offset, unsigned int data) +write_rc_conf(unsigned long node, unsigned long rc, + unsigned int offset, unsigned int data) { - unsigned long addr; + void __iomem *addr; - addr = MK_RC_CFG(node, rc_index) | conf_offset; - *(unsigned int *)addr = data; - mb(); + addr = __va(MK_RC_CFG(node, rc) | offset); + writel(data, addr); } static inline unsigned long -read_piu_ior0(unsigned long node, unsigned long rc_index, +read_piu_ior0(unsigned long node, unsigned long rc, unsigned int reg) { - unsigned long addr; - unsigned long value; - - addr = MK_PIU_IOR0(node, rc_index) + reg; - value = *(volatile unsigned long __iomem *)addr; - mb(); + void __iomem *addr; - return value; + addr = __va(MK_PIU_IOR0(node, rc) + reg); + return readq(addr); } static inline void -write_piu_ior0(unsigned long node, unsigned long rc_index, +write_piu_ior0(unsigned long node, unsigned long rc, unsigned int reg, unsigned long data) { - unsigned long addr; + void __iomem *addr; - addr = MK_PIU_IOR0(node, rc_index) + reg; - *(unsigned long __iomem *)addr = data; - mb(); + addr = __va(MK_PIU_IOR0(node, rc) + reg); + writeq(data, addr); } static inline unsigned long -read_piu_ior1(unsigned long node, unsigned long rc_index, +read_piu_ior1(unsigned long node, unsigned long rc, unsigned int reg) { - unsigned long addr, value; + void __iomem *addr; - addr = MK_PIU_IOR1(node, rc_index) + reg; - value = *(volatile unsigned long __iomem *)addr; - mb(); - - return value; + addr = __va(MK_PIU_IOR1(node, rc) + reg); + return readq(addr); } static inline void -write_piu_ior1(unsigned long node, unsigned long rc_index, +write_piu_ior1(unsigned long node, unsigned long rc, unsigned int reg, unsigned long data) { - unsigned long addr; + void __iomem *addr; - addr = MK_PIU_IOR1(node, rc_index) + reg; - *(volatile unsigned long __iomem *)addr = data; - mb(); + addr = __va(MK_PIU_IOR1(node, rc) + reg); + writeq(data, addr); } static inline unsigned long sw64_io_read(unsigned long node, unsigned long reg) { - unsigned long addr, value; - - addr = PAGE_OFFSET | SW64_IO_BASE(node) | reg; - value = *(volatile unsigned long __iomem *)addr; - mb(); + void __iomem *addr; - return value; + addr = __va(SW64_IO_BASE(node) | reg); + return readq(addr); } static inline void sw64_io_write(unsigned long node, unsigned long reg, unsigned long data) { - unsigned long addr; + void __iomem *addr; - addr = PAGE_OFFSET | SW64_IO_BASE(node) | reg; - *(volatile unsigned long __iomem *)addr = data; - mb(); + addr = __va(SW64_IO_BASE(node) | reg); + writeq(data, addr); } #endif diff --git a/arch/sw_64/include/asm/switch_to.h b/arch/sw_64/include/asm/switch_to.h index 22045b24755747308e7e17ced4a6eb9034a622fe..d503fc59390f51d0ba34d21785f0ac1811b453a8 100644 --- a/arch/sw_64/include/asm/switch_to.h +++ b/arch/sw_64/include/asm/switch_to.h @@ -2,12 +2,41 @@ #ifndef _ASM_SW64_SWITCH_TO_H #define _ASM_SW64_SWITCH_TO_H -struct task_struct; -extern struct task_struct *__switch_to(unsigned long, struct task_struct *); +#include + +extern void __fpstate_save(struct task_struct *save_to); +extern void __fpstate_restore(struct task_struct *restore_from); +extern struct task_struct *__switch_to(unsigned long pcb, + struct task_struct *prev, struct task_struct *next); extern void restore_da_match_after_sched(void); -#define switch_to(P, N, L) \ + +static inline void fpstate_save(struct task_struct *task) +{ + if (likely(!(task->flags & PF_KTHREAD))) + __fpstate_save(task); +} + +static inline void fpstate_restore(struct task_struct *task) +{ + if (likely(!(task->flags & PF_KTHREAD))) + __fpstate_restore(task); +} + +static inline void __switch_to_aux(struct task_struct *prev, + struct task_struct *next) +{ + fpstate_save(prev); + fpstate_restore(next); +} + + +#define switch_to(prev, next, last) \ do { \ - (L) = __switch_to(virt_to_phys(&task_thread_info(N)->pcb), (P));\ + struct task_struct *__prev = (prev); \ + struct task_struct *__next = (next); \ + __u64 __nextpcb = virt_to_phys(&task_thread_info(__next)->pcb); \ + __switch_to_aux(__prev, __next); \ + (last) = __switch_to(__nextpcb, __prev, __next); \ check_mmu_context(); \ } while (0) diff --git a/arch/sw_64/include/asm/thread_info.h b/arch/sw_64/include/asm/thread_info.h index cffb09fc6262802ee27b5f9e38afaf411d014add..33b95f815448456b0bdd8723f102dc826285ab96 100644 --- a/arch/sw_64/include/asm/thread_info.h +++ b/arch/sw_64/include/asm/thread_info.h @@ -9,6 +9,11 @@ #include #include +typedef struct { + unsigned long seg; +} mm_segment_t; + + struct pcb_struct { unsigned long ksp; unsigned long usp; diff --git a/arch/sw_64/include/asm/topology.h b/arch/sw_64/include/asm/topology.h index 79af6349fe80bfca3a264a95581b66198770ee22..f8242d00290b8cecef3a59582b69b09756b74cc8 100644 --- a/arch/sw_64/include/asm/topology.h +++ b/arch/sw_64/include/asm/topology.h @@ -32,28 +32,21 @@ static inline int rcid_to_package(int rcid) #ifdef CONFIG_NUMA -#ifndef CONFIG_USE_PERCPU_NUMA_NODE_ID -extern int cpuid_to_nid(int cpuid); -static inline int cpu_to_node(int cpu) -{ - int node; - - node = cpuid_to_nid(cpu); - -#ifdef DEBUG_NUMA - BUG_ON(node < 0); -#endif - - return node; -} - -static inline void set_cpu_numa_node(int cpu, int node) { } -#endif /* CONFIG_USE_PERCPU_NUMA_NODE_ID */ - +#ifndef CONFIG_DEBUG_PER_CPU_MAPS +extern cpumask_var_t node_to_cpumask_map[MAX_NUMNODES]; +/* Returns a pointer to the cpumask of CPUs on Node 'node'. */ +#define cpumask_of_node(node) ((node) == NUMA_NO_NODE ? \ + cpu_all_mask : \ + node_to_cpumask_map[node]) +#else extern const struct cpumask *cpumask_of_node(int node); +#endif /* CONFIG_DEBUG_PER_CPU_MAPS */ + extern void numa_add_cpu(unsigned int cpu); extern void numa_remove_cpu(unsigned int cpu); extern void numa_store_cpu_info(unsigned int cpu); +extern int __node_distance(int from, int to); +#define node_distance(a, b) __node_distance(a, b) #define parent_node(node) (node) #define cpumask_of_pcibus(bus) (cpu_online_mask) #else /* !CONFIG_NUMA */ diff --git a/arch/sw_64/include/asm/trace_clock.h b/arch/sw_64/include/asm/trace_clock.h deleted file mode 100644 index 57324215a83749955f63a087a3901c73fb80436f..0000000000000000000000000000000000000000 --- a/arch/sw_64/include/asm/trace_clock.h +++ /dev/null @@ -1,10 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _ASM_SW64_TRACE_CLOCK_H -#define _ASM_SW64_TRACE_CLOCK_H - -#include -#include - -#define ARCH_TRACE_CLOCKS - -#endif /* _ASM_SW64_TRACE_CLOCK_H */ diff --git a/arch/sw_64/include/asm/types.h b/arch/sw_64/include/asm/types.h deleted file mode 100644 index 37d626269a026be08ea074e134acd0c083675a41..0000000000000000000000000000000000000000 --- a/arch/sw_64/include/asm/types.h +++ /dev/null @@ -1,7 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _ASM_SW64_TYPES_H -#define _ASM_SW64_TYPES_H - -#include - -#endif /* _ASM_SW64_TYPES_H */ diff --git a/arch/sw_64/include/asm/unaligned.h b/arch/sw_64/include/asm/unaligned.h deleted file mode 100644 index 91fdff923ce5b3712080243718f46caf6f13dfdc..0000000000000000000000000000000000000000 --- a/arch/sw_64/include/asm/unaligned.h +++ /dev/null @@ -1,12 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _ASM_SW64_UNALIGNED_H -#define _ASM_SW64_UNALIGNED_H - -#include -#include -#include - -#define get_unaligned __get_unaligned_le -#define put_unaligned __put_unaligned_le - -#endif /* _ASM_SW64_UNALIGNED_H */ diff --git a/arch/sw_64/include/asm/unistd.h b/arch/sw_64/include/asm/unistd.h index c1778adf4fbab44f1b577293d8eb3cd1536a44d0..6d1b8d1e201167d56c5e6ca8e0ecff89160b7af1 100644 --- a/arch/sw_64/include/asm/unistd.h +++ b/arch/sw_64/include/asm/unistd.h @@ -4,7 +4,7 @@ #include -#define NR_SYSCALLS 519 +#define NR_SYSCALLS __NR_syscalls #define NR_syscalls NR_SYSCALLS #define __ARCH_WANT_NEW_STAT @@ -22,5 +22,6 @@ #define __ARCH_WANT_SYS_CLONE #define __ARCH_WANT_SYS_SOCKETCALL #define __ARCH_WANT_SYS_SIGPROCMASK +#define __ARCH_WANT_SYS_CLONE3 #endif /* _ASM_SW64_UNISTD_H */ diff --git a/arch/sw_64/include/asm/user.h b/arch/sw_64/include/asm/user.h deleted file mode 100644 index a6ff58097ea3ca993bce1287cf064c58cfe7dae8..0000000000000000000000000000000000000000 --- a/arch/sw_64/include/asm/user.h +++ /dev/null @@ -1,53 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _ASM_SW64_USER_H -#define _ASM_SW64_USER_H - -#include -#include - -#include -#include - -/* - * Core file format: The core file is written in such a way that gdb - * can understand it and provide useful information to the user (under - * linux we use the `trad-core' bfd). The file contents are as follows: - * - * upage: 1 page consisting of a user struct that tells gdb - * what is present in the file. Directly after this is a - * copy of the task_struct, which is currently not used by gdb, - * but it may come in handy at some point. All of the registers - * are stored as part of the upage. The upage should always be - * only one page long. - * data: The data segment follows next. We use current->end_text to - * current->brk to pick up all of the user variables, plus any memory - * that may have been sbrk'ed. No attempt is made to determine if a - * page is demand-zero or if a page is totally unused, we just cover - * the entire range. All of the addresses are rounded in such a way - * that an integral number of pages is written. - * stack: We need the stack information in order to get a meaningful - * backtrace. We need to write the data from usp to - * current->start_stack, so we round each of these in order to be able - * to write an integer number of pages. - */ -struct user { - unsigned long regs[EF_SIZE/8+32]; /* integer and fp regs */ - size_t u_tsize; /* text size (pages) */ - size_t u_dsize; /* data size (pages) */ - size_t u_ssize; /* stack size (pages) */ - unsigned long start_code; /* text starting address */ - unsigned long start_data; /* data starting address */ - unsigned long start_stack; /* stack starting address */ - long signal; /* signal causing core dump */ - unsigned long u_ar0; /* help gdb find registers */ - unsigned long magic; /* identifies a core file */ - char u_comm[32]; /* user command name */ -}; - -#define NBPG PAGE_SIZE -#define UPAGES 1 -#define HOST_TEXT_START_ADDR (u.start_code) -#define HOST_DATA_START_ADDR (u.start_data) -#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG) - -#endif /* _ASM_SW64_USER_H */ diff --git a/arch/sw_64/include/asm/vdso.h b/arch/sw_64/include/asm/vdso.h index 8ecd5add42ad89871fbc23edbc4a165777f4da01..7a2e23c648f3d48c50ab72709e9d2091aee9410c 100644 --- a/arch/sw_64/include/asm/vdso.h +++ b/arch/sw_64/include/asm/vdso.h @@ -41,8 +41,8 @@ struct vdso_data { u64 xtime_sec; u64 xtime_nsec; - u32 wall_to_mono_sec; - u32 wall_to_mono_nsec; + u64 wall_to_mono_sec; + u64 wall_to_mono_nsec; u32 cs_shift; u32 cs_mult; u64 cs_cycle_last; diff --git a/arch/sw_64/include/asm/vga.h b/arch/sw_64/include/asm/vga.h deleted file mode 100644 index 3ca5c397b9460c5fab43e3c0ceb3cd4cc363a738..0000000000000000000000000000000000000000 --- a/arch/sw_64/include/asm/vga.h +++ /dev/null @@ -1,85 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Access to VGA videoram - * - * (c) 1998 Martin Mares - */ - -#ifndef _ASM_SW64_VGA_H -#define _ASM_SW64_VGA_H - -#include - -#define VT_BUF_HAVE_RW -#define VT_BUF_HAVE_MEMSETW -#define VT_BUF_HAVE_MEMCPYW - -static inline void scr_writew(u16 val, volatile u16 *addr) -{ - if (__is_ioaddr(addr)) - __raw_writew(val, (volatile u16 __iomem *) addr); - else - *addr = val; -} - -static inline u16 scr_readw(volatile const u16 *addr) -{ - if (__is_ioaddr(addr)) - return __raw_readw((volatile const u16 __iomem *) addr); - else - return *addr; -} - -static inline void scr_memsetw(u16 *s, u16 c, unsigned int count) -{ - if (__is_ioaddr(s)) - memsetw_io((u16 __iomem *) s, c, count); - else - memsetw(s, c, count); -} - -/* Do not trust that the usage will be correct; analyze the arguments. */ -extern void scr_memcpyw(u16 *d, const u16 *s, unsigned int count); - -/* - * ??? These are currently only used for downloading character sets. As - * such, they don't need memory barriers. Is this all they are intended - * to be used for? - */ -#define vga_readb(a) readb((u8 __iomem *)(a)) -#define vga_writeb(v, a) writeb(v, (u8 __iomem *)(a)) - -#ifdef CONFIG_VGA_HOSE -#include -#include - -extern struct pci_controller *pci_vga_hose; - -# define __is_port_vga(a) \ - (((a) >= 0x3b0) && ((a) < 0x3e0) && \ - ((a) != 0x3b3) && ((a) != 0x3d3)) - -# define __is_mem_vga(a) \ - (((a) >= 0xa0000) && ((a) <= 0xc0000)) - -# define FIXUP_IOADDR_VGA(a) do { \ - if (pci_vga_hose && __is_port_vga(a)) \ - (a) += pci_vga_hose->io_space->start; \ -} while (0) - -# define FIXUP_MEMADDR_VGA(a) do { \ - if (pci_vga_hose && __is_mem_vga(a)) \ - (a) += pci_vga_hose->mem_space->start; \ -} while (0) - -#else /* CONFIG_VGA_HOSE */ -# define pci_vga_hose 0 -# define __is_port_vga(a) 0 -# define __is_mem_vga(a) 0 -# define FIXUP_IOADDR_VGA(a) -# define FIXUP_MEMADDR_VGA(a) -#endif /* CONFIG_VGA_HOSE */ - -#define VGA_MAP_MEM(x, s) ((unsigned long)ioremap(x, s)) - -#endif diff --git a/arch/sw_64/include/asm/wrperfmon.h b/arch/sw_64/include/asm/wrperfmon.h index eaa6735b5a257a329005e80b41c001885c313eb4..15f7f6beb07c0606924c0c1e74a88762f8b88ed5 100644 --- a/arch/sw_64/include/asm/wrperfmon.h +++ b/arch/sw_64/include/asm/wrperfmon.h @@ -33,10 +33,12 @@ #define PC0_RAW_BASE 0x0 #define PC1_RAW_BASE 0x100 -#define PC0_MIN 0x0 #define PC0_MAX 0xF -#define PC1_MIN 0x0 -#define PC1_MAX 0x37 +#define PC1_MAX 0x3D + +#define SW64_PERFCTRL_KM 2 +#define SW64_PERFCTRL_UM 3 +#define SW64_PERFCTRL_AM 4 /* pc0 events */ #define PC0_INSTRUCTIONS 0x0 diff --git a/arch/sw_64/include/uapi/asm/Kbuild b/arch/sw_64/include/uapi/asm/Kbuild index a01bfb9600eca9da02d89e9adcea72a20d86a3d9..15700040f13870d24902a9cb9ed60961b6144cca 100644 --- a/arch/sw_64/include/uapi/asm/Kbuild +++ b/arch/sw_64/include/uapi/asm/Kbuild @@ -1,4 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 # UAPI Header export list +generic-y += kvm_para.h generated-y += unistd_64.h diff --git a/arch/sw_64/include/uapi/asm/a.out.h b/arch/sw_64/include/uapi/asm/a.out.h deleted file mode 100644 index addb648b8ed67d754d963458778223411aeb0150..0000000000000000000000000000000000000000 --- a/arch/sw_64/include/uapi/asm/a.out.h +++ /dev/null @@ -1,88 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _UAPI_ASM_SW64_A_OUT_H -#define _UAPI_ASM_SW64_A_OUT_H - -#include - -/* - * ECOFF header structs. ECOFF files consist of: - * - a file header (struct filehdr), - * - an a.out header (struct aouthdr), - * - one or more section headers (struct scnhdr). - * The filhdr's "f_nscns" field contains the - * number of section headers. - */ - -struct filehdr { - /* "file" header */ - __u16 f_magic, f_nscns; - __u32 f_timdat; - __u64 f_symptr; - __u32 f_nsyms; - __u16 f_opthdr, f_flags; -}; - -struct aouthdr { - __u64 info; /* after that it looks quite normal.. */ - __u64 tsize; - __u64 dsize; - __u64 bsize; - __u64 entry; - __u64 text_start; /* with a few additions that actually make sense */ - __u64 data_start; - __u64 bss_start; - __u32 gprmask, fprmask; /* bitmask of general & floating point regs used in binary */ - __u64 gpvalue; -}; - -struct scnhdr { - char s_name[8]; - __u64 s_paddr; - __u64 s_vaddr; - __u64 s_size; - __u64 s_scnptr; - __u64 s_relptr; - __u64 s_lnnoptr; - __u16 s_nreloc; - __u16 s_nlnno; - __u32 s_flags; -}; - -struct exec { - /* "file" header */ - struct filehdr fh; - struct aouthdr ah; -}; - -/* - * Define's so that the kernel exec code can access the a.out header - * fields... - */ -#define a_info ah.info -#define a_text ah.tsize -#define a_data ah.dsize -#define a_bss ah.bsize -#define a_entry ah.entry -#define a_textstart ah.text_start -#define a_datastart ah.data_start -#define a_bssstart ah.bss_start -#define a_gprmask ah.gprmask -#define a_fprmask ah.fprmask -#define a_gpvalue ah.gpvalue - -#define N_TXTADDR(x) ((x).a_textstart) -#define N_DATADDR(x) ((x).a_datastart) -#define N_BSSADDR(x) ((x).a_bssstart) -#define N_DRSIZE(x) 0 -#define N_TRSIZE(x) 0 -#define N_SYMSIZE(x) 0 - -#define AOUTHSZ sizeof(struct aouthdr) -#define SCNHSZ sizeof(struct scnhdr) -#define SCNROUND 16 - -#define N_TXTOFF(x) \ - ((long) N_MAGIC(x) == ZMAGIC ? 0 : \ - (sizeof(struct exec) + (x).fh.f_nscns * SCNHSZ + SCNROUND - 1) & ~(SCNROUND - 1)) - -#endif /* _UAPI_ASM_SW64_A_OUT_H */ diff --git a/arch/sw_64/include/uapi/asm/auxvec.h b/arch/sw_64/include/uapi/asm/auxvec.h index 59854f3ac501952bd55aa91f823e47af1de89fbb..309a8294be7a839fac7c55fe5959d8b1ad1404fc 100644 --- a/arch/sw_64/include/uapi/asm/auxvec.h +++ b/arch/sw_64/include/uapi/asm/auxvec.h @@ -1,28 +1,11 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _UAPI_ASM_SW64_AUXVEC_H #define _UAPI_ASM_SW64_AUXVEC_H -/* Reserve these numbers for any future use of a VDSO. */ -#if 1 -#define AT_SYSINFO 32 +/* VDSO location. */ #define AT_SYSINFO_EHDR 33 -#endif -/* - * More complete cache descriptions than AT_[DIU]CACHEBSIZE. If the - * value is -1, then the cache doesn't exist. Otherwise: - * - * bit 0-3: Cache set-associativity; 0 means fully associative. - * bit 4-7: Log2 of cacheline size. - * bit 8-31: Size of the entire cache >> 8. - * bit 32-63: Reserved. - */ - -#define AT_L1I_CACHESHAPE 34 -#define AT_L1D_CACHESHAPE 35 -#define AT_L2_CACHESHAPE 36 -#define AT_L3_CACHESHAPE 37 - -#define AT_VECTOR_SIZE_ARCH 4 /* entries in ARCH_DLINFO */ +/* entries in ARCH_DLINFO */ +#define AT_VECTOR_SIZE_ARCH 1 #endif /* _UAPI_ASM_SW64_AUXVEC_H */ diff --git a/arch/sw_64/include/uapi/asm/bitsperlong.h b/arch/sw_64/include/uapi/asm/bitsperlong.h index 5d2c677a86b80ea42add009167429e1aa69f65ed..712c823e23d82cc177f74a77fed021a68e35a941 100644 --- a/arch/sw_64/include/uapi/asm/bitsperlong.h +++ b/arch/sw_64/include/uapi/asm/bitsperlong.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _UAPI_ASM_SW64_BITSPERLONG_H #define _UAPI_ASM_SW64_BITSPERLONG_H diff --git a/arch/sw_64/include/uapi/asm/byteorder.h b/arch/sw_64/include/uapi/asm/byteorder.h index 1b1698df58ca68cd11eead76be10a6a9d9347c18..ededdd045e96b2dc915be110f6d278a5bbc58654 100644 --- a/arch/sw_64/include/uapi/asm/byteorder.h +++ b/arch/sw_64/include/uapi/asm/byteorder.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _UAPI_ASM_SW64_BYTEORDER_H #define _UAPI_ASM_SW64_BYTEORDER_H diff --git a/arch/sw_64/include/uapi/asm/compiler.h b/arch/sw_64/include/uapi/asm/compiler.h index e5cf0fb170fa2626adfb5a13cf764d9c2f778599..64786df0f2668734957147e7ba30dbb52feb8dd1 100644 --- a/arch/sw_64/include/uapi/asm/compiler.h +++ b/arch/sw_64/include/uapi/asm/compiler.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _UAPI_ASM_SW64_COMPILER_H #define _UAPI_ASM_SW64_COMPILER_H diff --git a/arch/sw_64/include/uapi/asm/console.h b/arch/sw_64/include/uapi/asm/console.h deleted file mode 100644 index 91246b759ecf1c940906a7b0914e821aff23a0ec..0000000000000000000000000000000000000000 --- a/arch/sw_64/include/uapi/asm/console.h +++ /dev/null @@ -1,51 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _UAPI_ASM_SW64_CONSOLE_H -#define _UAPI_ASM_SW64_CONSOLE_H - -/* - * Console callback routine numbers - */ -#define CCB_GETC 0x01 -#define CCB_PUTS 0x02 -#define CCB_RESET_TERM 0x03 -#define CCB_SET_TERM_INT 0x04 -#define CCB_SET_TERM_CTL 0x05 -#define CCB_PROCESS_KEYCODE 0x06 -#define CCB_OPEN_CONSOLE 0x07 -#define CCB_CLOSE_CONSOLE 0x08 - -#define CCB_OPEN 0x10 -#define CCB_CLOSE 0x11 -#define CCB_IOCTL 0x12 -#define CCB_READ 0x13 -#define CCB_WRITE 0x14 - -#define CCB_SET_ENV 0x20 -#define CCB_RESET_ENV 0x21 -#define CCB_GET_ENV 0x22 -#define CCB_SAVE_ENV 0x23 - -#define CCB_PSWITCH 0x30 -#define CCB_BIOS_EMUL 0x32 - -/* - * Environment variable numbers - */ -#define ENV_AUTO_ACTION 0x01 -#define ENV_BOOT_DEV 0x02 -#define ENV_BOOTDEF_DEV 0x03 -#define ENV_BOOTED_DEV 0x04 -#define ENV_BOOT_FILE 0x05 -#define ENV_BOOTED_FILE 0x06 -#define ENV_BOOT_OSFLAGS 0x07 -#define ENV_BOOTED_OSFLAGS 0x08 -#define ENV_BOOT_RESET 0x09 -#define ENV_DUMP_DEV 0x0A -#define ENV_ENABLE_AUDIT 0x0B -#define ENV_LICENSE 0x0C -#define ENV_CHAR_SET 0x0D -#define ENV_LANGUAGE 0x0E -#define ENV_TTY_DEV 0x0F - - -#endif /* _UAPI_ASM_SW64_CONSOLE_H */ diff --git a/arch/sw_64/include/uapi/asm/errno.h b/arch/sw_64/include/uapi/asm/errno.h index 04b07f30c787d9ab27d00bfd0b82d3f6889e3677..0d8438f6bd402aea665d96c8f7d983271ad91d24 100644 --- a/arch/sw_64/include/uapi/asm/errno.h +++ b/arch/sw_64/include/uapi/asm/errno.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _UAPI_ASM_SW64_ERRNO_H #define _UAPI_ASM_SW64_ERRNO_H diff --git a/arch/sw_64/include/uapi/asm/fcntl.h b/arch/sw_64/include/uapi/asm/fcntl.h index 29c3aece8b555dbb265f6db7693ad3827b5bc70d..99e1a31c5e8606000808bd62c6a6db25c0982501 100644 --- a/arch/sw_64/include/uapi/asm/fcntl.h +++ b/arch/sw_64/include/uapi/asm/fcntl.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _UAPI_ASM_SW64_FCNTL_H #define _UAPI_ASM_SW64_FCNTL_H diff --git a/arch/sw_64/include/uapi/asm/fpu.h b/arch/sw_64/include/uapi/asm/fpu.h index 9b25f97e6a3a0416c533ad8d4995c29b393d671c..035ca65b1ba38a711348e5bb85c1b22f5d699eef 100644 --- a/arch/sw_64/include/uapi/asm/fpu.h +++ b/arch/sw_64/include/uapi/asm/fpu.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _UAPI_ASM_SW64_FPU_H #define _UAPI_ASM_SW64_FPU_H diff --git a/arch/sw_64/include/uapi/asm/gentrap.h b/arch/sw_64/include/uapi/asm/gentrap.h index 4345058291fbc6d6735b22cd73b1a140e4085538..3786b8b52add336464589167bf99296d59e74c65 100644 --- a/arch/sw_64/include/uapi/asm/gentrap.h +++ b/arch/sw_64/include/uapi/asm/gentrap.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _UAPI_ASM_SW64_GENTRAP_H #define _UAPI_ASM_SW64_GENTRAP_H diff --git a/arch/sw_64/include/uapi/asm/hmcall.h b/arch/sw_64/include/uapi/asm/hmcall.h index 524101102fb8a2bb2b5010fc7d9cdb4fd3d95f8b..f10378ba99c8042db62524c013ba64a44a5c2d48 100644 --- a/arch/sw_64/include/uapi/asm/hmcall.h +++ b/arch/sw_64/include/uapi/asm/hmcall.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _UAPI_ASM_SW64_HMCALL_H #define _UAPI_ASM_SW64_HMCALL_H diff --git a/arch/sw_64/include/uapi/asm/ioctl.h b/arch/sw_64/include/uapi/asm/ioctl.h index d62f10a6fa643c6361476039bb04b7303ecbb760..fb5267b034fca832f44e0c0794b5d0248f0e86b8 100644 --- a/arch/sw_64/include/uapi/asm/ioctl.h +++ b/arch/sw_64/include/uapi/asm/ioctl.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _UAPI_ASM_SW64_IOCTL_H #define _UAPI_ASM_SW64_IOCTL_H diff --git a/arch/sw_64/include/uapi/asm/ioctls.h b/arch/sw_64/include/uapi/asm/ioctls.h index eab34173f222fd2bab60af216bb66717d7cddb73..db8e456290e6592da5e15e7d68b4449729b9ffb9 100644 --- a/arch/sw_64/include/uapi/asm/ioctls.h +++ b/arch/sw_64/include/uapi/asm/ioctls.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _UAPI_ASM_SW64_IOCTLS_H #define _UAPI_ASM_SW64_IOCTLS_H @@ -52,20 +52,20 @@ #define TIOCMBIS 0x5416 #define TIOCMBIC 0x5417 #define TIOCMSET 0x5418 -# define TIOCM_LE 0x001 -# define TIOCM_DTR 0x002 -# define TIOCM_RTS 0x004 -# define TIOCM_ST 0x008 -# define TIOCM_SR 0x010 -# define TIOCM_CTS 0x020 -# define TIOCM_CAR 0x040 -# define TIOCM_RNG 0x080 -# define TIOCM_DSR 0x100 -# define TIOCM_CD TIOCM_CAR -# define TIOCM_RI TIOCM_RNG -# define TIOCM_OUT1 0x2000 -# define TIOCM_OUT2 0x4000 -# define TIOCM_LOOP 0x8000 +#define TIOCM_LE 0x001 +#define TIOCM_DTR 0x002 +#define TIOCM_RTS 0x004 +#define TIOCM_ST 0x008 +#define TIOCM_SR 0x010 +#define TIOCM_CTS 0x020 +#define TIOCM_CAR 0x040 +#define TIOCM_RNG 0x080 +#define TIOCM_DSR 0x100 +#define TIOCM_CD TIOCM_CAR +#define TIOCM_RI TIOCM_RNG +#define TIOCM_OUT1 0x2000 +#define TIOCM_OUT2 0x4000 +#define TIOCM_LOOP 0x8000 #define TIOCGSOFTCAR 0x5419 #define TIOCSSOFTCAR 0x541A @@ -74,14 +74,14 @@ #define TIOCGSERIAL 0x541E #define TIOCSSERIAL 0x541F #define TIOCPKT 0x5420 -# define TIOCPKT_DATA 0 -# define TIOCPKT_FLUSHREAD 1 -# define TIOCPKT_FLUSHWRITE 2 -# define TIOCPKT_STOP 4 -# define TIOCPKT_START 8 -# define TIOCPKT_NOSTOP 16 -# define TIOCPKT_DOSTOP 32 -# define TIOCPKT_IOCTL 64 +#define TIOCPKT_DATA 0 +#define TIOCPKT_FLUSHREAD 1 +#define TIOCPKT_FLUSHWRITE 2 +#define TIOCPKT_STOP 4 +#define TIOCPKT_START 8 +#define TIOCPKT_NOSTOP 16 +#define TIOCPKT_DOSTOP 32 +#define TIOCPKT_IOCTL 64 #define TIOCNOTTY 0x5422 @@ -113,7 +113,7 @@ #define TIOCSERGSTRUCT 0x5458 /* For debugging only */ #define TIOCSERGETLSR 0x5459 /* Get line status register */ /* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */ -# define TIOCSER_TEMT 0x01 /* Transmitter physically empty */ +#define TIOCSER_TEMT 0x01 /* Transmitter physically empty */ #define TIOCSERGETMULTI 0x545A /* Get multiport config */ #define TIOCSERSETMULTI 0x545B /* Set multiport config */ diff --git a/arch/sw_64/include/uapi/asm/ipcbuf.h b/arch/sw_64/include/uapi/asm/ipcbuf.h deleted file mode 100644 index f063105ba09f307c72e25f1374e3453497481eca..0000000000000000000000000000000000000000 --- a/arch/sw_64/include/uapi/asm/ipcbuf.h +++ /dev/null @@ -1,7 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _UAPI_ASM_SW64_IPCBUF_H -#define _UAPI_ASM_SW64_IPCBUF_H - -#include - -#endif diff --git a/arch/sw_64/include/uapi/asm/kvm.h b/arch/sw_64/include/uapi/asm/kvm.h index 47877b56e980facf32d680898c389a8d879ea75a..ff1b6e7f096f77405394fa307a4f64b304598e33 100644 --- a/arch/sw_64/include/uapi/asm/kvm.h +++ b/arch/sw_64/include/uapi/asm/kvm.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _UAPI_ASM_SW64_KVM_H #define _UAPI_ASM_SW64_KVM_H @@ -114,16 +114,4 @@ struct kvm_sync_regs { struct kvm_sregs { }; - -struct swvm_mem_bank { - unsigned long guest_phys_addr; - unsigned long host_phys_addr; - unsigned long host_addr; - unsigned long size; -}; - -struct swvm_mem { - struct swvm_mem_bank membank[SWVM_NUM_NUMA_MEMBANKS]; -}; - #endif /* _UAPI_ASM_SW64_KVM_H */ diff --git a/arch/sw_64/include/uapi/asm/kvm_para.h b/arch/sw_64/include/uapi/asm/kvm_para.h deleted file mode 100644 index 405840b0e1d8f827e7b1db8166f1cc12079a98e9..0000000000000000000000000000000000000000 --- a/arch/sw_64/include/uapi/asm/kvm_para.h +++ /dev/null @@ -1,7 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _UAPI_ASM_SW64_KVM_PARA_H -#define _UAPI_ASM_SW64_KVM_PARA_H - -#include - -#endif diff --git a/arch/sw_64/include/uapi/asm/mman.h b/arch/sw_64/include/uapi/asm/mman.h index f9ac285702a522819102af9e940d32ff9401a2b0..57970e1e3a2cb6514b5650b012738b74f4c82238 100644 --- a/arch/sw_64/include/uapi/asm/mman.h +++ b/arch/sw_64/include/uapi/asm/mman.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _UAPI_ASM_SW64_MMAN_H #define _UAPI_ASM_SW64_MMAN_H diff --git a/arch/sw_64/include/uapi/asm/msgbuf.h b/arch/sw_64/include/uapi/asm/msgbuf.h deleted file mode 100644 index d61eea10813d65698f7edeef2a756a6782f833a2..0000000000000000000000000000000000000000 --- a/arch/sw_64/include/uapi/asm/msgbuf.h +++ /dev/null @@ -1,28 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _UAPI_ASM_SW64_MSGBUF_H -#define _UAPI_ASM_SW64_MSGBUF_H - -/* - * The msqid64_ds structure for sw64 architecture. - * Note extra padding because this structure is passed back and forth - * between kernel and user space. - * - * Pad space is left for: - * - 2 miscellaneous 64-bit values - */ - -struct msqid64_ds { - struct ipc64_perm msg_perm; - long msg_stime; /* last msgsnd time */ - long msg_rtime; /* last msgrcv time */ - long msg_ctime; /* last change time */ - unsigned long msg_cbytes; /* current number of bytes on queue */ - unsigned long msg_qnum; /* number of messages in queue */ - unsigned long msg_qbytes; /* max number of bytes on queue */ - __kernel_pid_t msg_lspid; /* pid of last msgsnd */ - __kernel_pid_t msg_lrpid; /* last receive pid */ - unsigned long __unused1; - unsigned long __unused2; -}; - -#endif /* _UAPI_ASM_SW64_MSGBUF_H */ diff --git a/arch/sw_64/include/uapi/asm/param.h b/arch/sw_64/include/uapi/asm/param.h index 75eeac6a7dc85c0428f5c18e570e5427b6c87fe2..d38e8202dd97e8b4867e70bb3853da9440a1a0c1 100644 --- a/arch/sw_64/include/uapi/asm/param.h +++ b/arch/sw_64/include/uapi/asm/param.h @@ -1,16 +1,9 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _UAPI_ASM_SW64_PARAM_H #define _UAPI_ASM_SW64_PARAM_H -#define HZ 100 - #define EXEC_PAGESIZE 8192 -#ifndef NOGROUP -#define NOGROUP (-1) -#endif - -#define MAXHOSTNAMELEN 64 /* max length of hostname */ - +#include #endif /* _UAPI_ASM_SW64_PARAM_H */ diff --git a/arch/sw_64/include/uapi/asm/perf_regs.h b/arch/sw_64/include/uapi/asm/perf_regs.h index 426ae642fcc8505ce0c9815e6b91ed2b56630116..1378a7397951d6ba3085cf64bdda6ef0453a3932 100644 --- a/arch/sw_64/include/uapi/asm/perf_regs.h +++ b/arch/sw_64/include/uapi/asm/perf_regs.h @@ -13,6 +13,13 @@ enum perf_event_sw64_regs { PERF_REG_SW64_R6, PERF_REG_SW64_R7, PERF_REG_SW64_R8, + PERF_REG_SW64_R9, + PERF_REG_SW64_R10, + PERF_REG_SW64_R11, + PERF_REG_SW64_R12, + PERF_REG_SW64_R13, + PERF_REG_SW64_R14, + PERF_REG_SW64_R15, PERF_REG_SW64_R19, PERF_REG_SW64_R20, PERF_REG_SW64_R21, diff --git a/arch/sw_64/include/uapi/asm/poll.h b/arch/sw_64/include/uapi/asm/poll.h deleted file mode 100644 index 5e2de318205050bc66f0a72ff8b102d6d2042865..0000000000000000000000000000000000000000 --- a/arch/sw_64/include/uapi/asm/poll.h +++ /dev/null @@ -1,7 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _UAPI_ASM_SW64_POLL_H -#define _UAPI_ASM_SW64_POLL_H - -#include - -#endif diff --git a/arch/sw_64/include/uapi/asm/posix_types.h b/arch/sw_64/include/uapi/asm/posix_types.h deleted file mode 100644 index fb7badf78c3ccb2db967989760a2f9fd9deb8358..0000000000000000000000000000000000000000 --- a/arch/sw_64/include/uapi/asm/posix_types.h +++ /dev/null @@ -1,18 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _UAPI_ASM_SW64_POSIX_TYPES_H -#define _UAPI_ASM_SW64_POSIX_TYPES_H - -/* - * This file is generally used by user-level software, so you need to - * be a little careful about namespace pollution etc. Also, we cannot - * assume GCC is being used. - */ - -typedef unsigned long __kernel_ino_t; -#define __kernel_ino_t __kernel_ino_t - -typedef unsigned long __kernel_sigset_t; /* at least 32 bits */ - -#include - -#endif /* _UAPI_ASM_SW64_POSIX_TYPES_H */ diff --git a/arch/sw_64/include/uapi/asm/ptrace.h b/arch/sw_64/include/uapi/asm/ptrace.h index 7cf7bf5a75b4ccf0fc3e005d5364e31f038b3dac..80bad067fc15523e92a61f1cfed0a159e7803677 100644 --- a/arch/sw_64/include/uapi/asm/ptrace.h +++ b/arch/sw_64/include/uapi/asm/ptrace.h @@ -1,75 +1,31 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _UAPI_ASM_SW64_PTRACE_H #define _UAPI_ASM_SW64_PTRACE_H +#include +#ifndef __ASSEMBLY__ /* - * This struct defines the way the registers are stored on the - * kernel stack during a system call or other kernel entry - * - * NOTE! I want to minimize the overhead of system calls, so this - * struct has as little information as possible. I does not have - * - * - floating point regs: the kernel doesn't change those - * - r9-15: saved by the C compiler - * - * This makes "fork()" and "exec()" a bit more complex, but should - * give us low system call latency. + * User structures for general purpose, floating point and debug registers. */ +struct user_pt_regs { + __u64 regs[31]; + __u64 pc; + __u64 pstate; +}; -struct pt_regs { - unsigned long r0; - unsigned long r1; - unsigned long r2; - unsigned long r3; - unsigned long r4; - unsigned long r5; - unsigned long r6; - unsigned long r7; - unsigned long r8; - unsigned long r19; - unsigned long r20; - unsigned long r21; - unsigned long r22; - unsigned long r23; - unsigned long r24; - unsigned long r25; - unsigned long r26; - unsigned long r27; - unsigned long r28; - unsigned long hae; -/* JRP - These are the values provided to a0-a2 by HMcode */ - unsigned long trap_a0; - unsigned long trap_a1; - unsigned long trap_a2; -/* These are saved by HMcode: */ - unsigned long ps; - unsigned long pc; - unsigned long gp; - unsigned long r16; - unsigned long r17; - unsigned long r18; +/* 256 bits aligned for simd */ +struct fpreg { + __u64 v[4] __attribute__((aligned(32))); }; -/* - * This is the extended stack used by signal handlers and the context - * switcher: it's pushed after the normal "struct pt_regs". - */ -struct switch_stack { - unsigned long r9; - unsigned long r10; - unsigned long r11; - unsigned long r12; - unsigned long r13; - unsigned long r14; - unsigned long r15; - unsigned long r26; +struct user_fpsimd_state { + struct fpreg fp[31]; + __u64 fpcr; + __u64 __reserved[3]; }; +#endif -#define PTRACE_GETREGS 12 /* get general purpose registers */ -#define PTRACE_SETREGS 13 /* set general purpose registers */ -#define PTRACE_GETFPREGS 14 /* get floating-point registers */ -#define PTRACE_SETFPREGS 15 /* set floating-point registers */ /* PTRACE_ATTACH is 16 */ /* PTRACE_DETACH is 17 */ diff --git a/arch/sw_64/include/uapi/asm/reg.h b/arch/sw_64/include/uapi/asm/reg.h index a19dc4cbf744afb04377a8c66cd2f5a09cc155cb..e692e45a4936f4e492fca3f85aee3344fb572646 100644 --- a/arch/sw_64/include/uapi/asm/reg.h +++ b/arch/sw_64/include/uapi/asm/reg.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _UAPI_ASM_SW64_REG_H #define _UAPI_ASM_SW64_REG_H diff --git a/arch/sw_64/include/uapi/asm/regdef.h b/arch/sw_64/include/uapi/asm/regdef.h index 5031abc0947af4aeebde27cbfb3017be2885c9a8..ad4475b7943517e62b8f0935c442a260bed9fa22 100644 --- a/arch/sw_64/include/uapi/asm/regdef.h +++ b/arch/sw_64/include/uapi/asm/regdef.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _UAPI_ASM_SW64_REGDEF_H #define _UAPI_ASM_SW64_REGDEF_H diff --git a/arch/sw_64/include/uapi/asm/resource.h b/arch/sw_64/include/uapi/asm/resource.h index ff7dc683c195984e3815b73a92a2fb37cfe27290..fecca2214849d99ad460c547ff93a8c5d2f4e1c9 100644 --- a/arch/sw_64/include/uapi/asm/resource.h +++ b/arch/sw_64/include/uapi/asm/resource.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _UAPI_ASM_SW64_RESOURCE_H #define _UAPI_ASM_SW64_RESOURCE_H diff --git a/arch/sw_64/include/uapi/asm/sembuf.h b/arch/sw_64/include/uapi/asm/sembuf.h deleted file mode 100644 index f574390bcd5782840e0e7e3a1a83cd9519cdd090..0000000000000000000000000000000000000000 --- a/arch/sw_64/include/uapi/asm/sembuf.h +++ /dev/null @@ -1,23 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _UAPI_ASM_SW64_SEMBUF_H -#define _UAPI_ASM_SW64_SEMBUF_H - -/* - * The semid64_ds structure for sw64 architecture. - * Note extra padding because this structure is passed back and forth - * between kernel and user space. - * - * Pad space is left for: - * - 2 miscellaneous 64-bit values - */ - -struct semid64_ds { - struct ipc64_perm sem_perm; /* permissions .. see ipc.h */ - long sem_otime; /* last semop time */ - long sem_ctime; /* last change time */ - unsigned long sem_nsems; /* no. of semaphores in array */ - unsigned long __unused1; - unsigned long __unused2; -}; - -#endif /* _UAPI_ASM_SW64_SEMBUF_H */ diff --git a/arch/sw_64/include/uapi/asm/setup.h b/arch/sw_64/include/uapi/asm/setup.h index fefd57415a3b7a75b25929706033025597571013..10ce5dba9c3066a3334f8ecfe59f569403024178 100644 --- a/arch/sw_64/include/uapi/asm/setup.h +++ b/arch/sw_64/include/uapi/asm/setup.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _UAPI_ASM_SW64_SETUP_H #define _UAPI_ASM_SW64_SETUP_H diff --git a/arch/sw_64/include/uapi/asm/shmbuf.h b/arch/sw_64/include/uapi/asm/shmbuf.h deleted file mode 100644 index 66d8cb5b2ba30f67745280f5ac7cae640d7c705b..0000000000000000000000000000000000000000 --- a/arch/sw_64/include/uapi/asm/shmbuf.h +++ /dev/null @@ -1,39 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _UAPI_ASM_SW64_SHMBUF_H -#define _UAPI_ASM_SW64_SHMBUF_H - -/* - * The shmid64_ds structure for sw64 architecture. - * Note extra padding because this structure is passed back and forth - * between kernel and user space. - * - * Pad space is left for: - * - 2 miscellaneous 64-bit values - */ - -struct shmid64_ds { - struct ipc64_perm shm_perm; /* operation perms */ - size_t shm_segsz; /* size of segment (bytes) */ - long shm_atime; /* last attach time */ - long shm_dtime; /* last detach time */ - long shm_ctime; /* last change time */ - __kernel_pid_t shm_cpid; /* pid of creator */ - __kernel_pid_t shm_lpid; /* pid of last operator */ - unsigned long shm_nattch; /* no. of current attaches */ - unsigned long __unused1; - unsigned long __unused2; -}; - -struct shminfo64 { - unsigned long shmmax; - unsigned long shmmin; - unsigned long shmmni; - unsigned long shmseg; - unsigned long shmall; - unsigned long __unused1; - unsigned long __unused2; - unsigned long __unused3; - unsigned long __unused4; -}; - -#endif /* _UAPI_ASM_SW64_SHMBUF_H */ diff --git a/arch/sw_64/include/uapi/asm/sigcontext.h b/arch/sw_64/include/uapi/asm/sigcontext.h index c2b7cff884ebbbdf9b902588e2468a91ad450c44..facbf34e920d4f57b2ba51069e760920f8cde730 100644 --- a/arch/sw_64/include/uapi/asm/sigcontext.h +++ b/arch/sw_64/include/uapi/asm/sigcontext.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _UAPI_ASM_SW64_SIGCONTEXT_H #define _UAPI_ASM_SW64_SIGCONTEXT_H diff --git a/arch/sw_64/include/uapi/asm/siginfo.h b/arch/sw_64/include/uapi/asm/siginfo.h index b50afbf15f7cd2ec201cdd2ab190b9da7c14026b..4a58eea9b67c9ab254f7142aeb37bf70758b5d8a 100644 --- a/arch/sw_64/include/uapi/asm/siginfo.h +++ b/arch/sw_64/include/uapi/asm/siginfo.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _UAPI_ASM_SW64_SIGINFO_H #define _UAPI_ASM_SW64_SIGINFO_H diff --git a/arch/sw_64/include/uapi/asm/signal.h b/arch/sw_64/include/uapi/asm/signal.h index 71471c8c762481b596cd33b6de27908151edbb9b..5bad0adae93f3ab8ff40abe9e3842bc45a6c7503 100644 --- a/arch/sw_64/include/uapi/asm/signal.h +++ b/arch/sw_64/include/uapi/asm/signal.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _UAPI_ASM_SW64_SIGNAL_H #define _UAPI_ASM_SW64_SIGNAL_H diff --git a/arch/sw_64/include/uapi/asm/socket.h b/arch/sw_64/include/uapi/asm/socket.h index abfa2108522c9a68e22304e0da71fe202973374c..d47041ebe08a2991423ef3ff1e238f6081479e04 100644 --- a/arch/sw_64/include/uapi/asm/socket.h +++ b/arch/sw_64/include/uapi/asm/socket.h @@ -1,7 +1,8 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _UAPI_ASM_SW64_SOCKET_H #define _UAPI_ASM_SW64_SOCKET_H +#include #include /* For setsockopt(2) */ @@ -51,13 +52,9 @@ #define SO_GET_FILTER SO_ATTACH_FILTER #define SO_PEERNAME 28 -#define SO_TIMESTAMP 29 -#define SCM_TIMESTAMP SO_TIMESTAMP #define SO_PEERSEC 30 #define SO_PASSSEC 34 -#define SO_TIMESTAMPNS 35 -#define SCM_TIMESTAMPNS SO_TIMESTAMPNS /* Security levels - as per NRL IPv6 - don't actually do anything */ #define SO_SECURITY_AUTHENTICATION 19 @@ -66,9 +63,6 @@ #define SO_MARK 36 -#define SO_TIMESTAMPING 37 -#define SCM_TIMESTAMPING SO_TIMESTAMPING - #define SO_RXQ_OVFL 40 #define SO_WIFI_STATUS 41 @@ -124,4 +118,28 @@ #define SO_DETACH_REUSEPORT_BPF 68 +#if !defined(__KERNEL__) + +#if __BITS_PER_LONG == 64 +#define SO_TIMESTAMP SO_TIMESTAMP_OLD +#define SO_TIMESTAMPNS SO_TIMESTAMPNS_OLD +#define SO_TIMESTAMPING SO_TIMESTAMPING_OLD + +#define SO_RCVTIMEO SO_RCVTIMEO_OLD +#define SO_SNDTIMEO SO_SNDTIMEO_OLD +#else +#define SO_TIMESTAMP (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_TIMESTAMP_OLD : SO_TIMESTAMP_NEW) +#define SO_TIMESTAMPNS (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_TIMESTAMPNS_OLD : SO_TIMESTAMPNS_NEW) +#define SO_TIMESTAMPING (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_TIMESTAMPING_OLD : SO_TIMESTAMPING_NEW) + +#define SO_RCVTIMEO (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_RCVTIMEO_OLD : SO_RCVTIMEO_NEW) +#define SO_SNDTIMEO (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_SNDTIMEO_OLD : SO_SNDTIMEO_NEW) +#endif + +#define SCM_TIMESTAMP SO_TIMESTAMP +#define SCM_TIMESTAMPNS SO_TIMESTAMPNS +#define SCM_TIMESTAMPING SO_TIMESTAMPING + +#endif + #endif /* _UAPI_ASM_SW64_SOCKET_H */ diff --git a/arch/sw_64/include/uapi/asm/sockios.h b/arch/sw_64/include/uapi/asm/sockios.h index 1f30fb881065ad6b38820d71cdf5ea0ed6486b3c..88e89dcf8300ea66394003769c351ed91c14cd55 100644 --- a/arch/sw_64/include/uapi/asm/sockios.h +++ b/arch/sw_64/include/uapi/asm/sockios.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _UAPI_ASM_SW64_SOCKIOS_H #define _UAPI_ASM_SW64_SOCKIOS_H diff --git a/arch/sw_64/include/uapi/asm/stat.h b/arch/sw_64/include/uapi/asm/stat.h index b1c1c5e3db22c25229a7b65236ef43daf171d8e0..d2b21128c56947bfeef183113ba6a51e149a91a1 100644 --- a/arch/sw_64/include/uapi/asm/stat.h +++ b/arch/sw_64/include/uapi/asm/stat.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _UAPI_ASM_SW64_STAT_H #define _UAPI_ASM_SW64_STAT_H diff --git a/arch/sw_64/include/uapi/asm/statfs.h b/arch/sw_64/include/uapi/asm/statfs.h deleted file mode 100644 index 3b8d1e3300a91cf01f09b47603bd95d6d4fad4f7..0000000000000000000000000000000000000000 --- a/arch/sw_64/include/uapi/asm/statfs.h +++ /dev/null @@ -1,9 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _UAPI_ASM_SW64_STATFS_H -#define _UAPI_ASM_SW64_STATFS_H - -#include - -#include - -#endif diff --git a/arch/sw_64/include/uapi/asm/swab.h b/arch/sw_64/include/uapi/asm/swab.h index a3d67645aa524c025c6e02c9e35dc8e3841c73c3..275661b346ac202afed612cee1f75bc1a0b6209e 100644 --- a/arch/sw_64/include/uapi/asm/swab.h +++ b/arch/sw_64/include/uapi/asm/swab.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _UAPI_ASM_SW64_SWAB_H #define _UAPI_ASM_SW64_SWAB_H diff --git a/arch/sw_64/include/uapi/asm/sysinfo.h b/arch/sw_64/include/uapi/asm/sysinfo.h index 9d2112f8bc4d4a1c314828013dd09f1419dd96c2..667405c3447cd841cda75d7e8cf624166db70d2c 100644 --- a/arch/sw_64/include/uapi/asm/sysinfo.h +++ b/arch/sw_64/include/uapi/asm/sysinfo.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ /* * include/asm/sysinfo.h */ diff --git a/arch/sw_64/include/uapi/asm/termbits.h b/arch/sw_64/include/uapi/asm/termbits.h index bcb9adb11e81d4ce995205e6a0e52075c8a5242c..83de6ff63234f69a7ddf3b169c4e204961640d3d 100644 --- a/arch/sw_64/include/uapi/asm/termbits.h +++ b/arch/sw_64/include/uapi/asm/termbits.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _UAPI_ASM_SW64_TERMBITS_H #define _UAPI_ASM_SW64_TERMBITS_H diff --git a/arch/sw_64/include/uapi/asm/termios.h b/arch/sw_64/include/uapi/asm/termios.h index d44e218b29b5587dd7d63aab6dcc97362aa2c5da..62f4b40551b241ff1ca7fd9c1c25e65415b976c4 100644 --- a/arch/sw_64/include/uapi/asm/termios.h +++ b/arch/sw_64/include/uapi/asm/termios.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _UAPI_ASM_SW64_TERMIOS_H #define _UAPI_ASM_SW64_TERMIOS_H diff --git a/arch/sw_64/include/uapi/asm/types.h b/arch/sw_64/include/uapi/asm/types.h deleted file mode 100644 index 9c605ea7bba92a69d3ac8e77c7915896f12a5734..0000000000000000000000000000000000000000 --- a/arch/sw_64/include/uapi/asm/types.h +++ /dev/null @@ -1,28 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _UAPI_ASM_SW64_TYPES_H -#define _UAPI_ASM_SW64_TYPES_H - -/* - * This file is never included by application software unless - * explicitly requested (e.g., via linux/types.h) in which case the - * application is Linux specific so (user-) name space pollution is - * not a major issue. However, for interoperability, libraries still - * need to be careful to avoid a name clashes. - */ - -/* - * This is here because we used to use l64 for sw64 and we don't want - * to impact user mode with our change to ll64 in the kernel. - * - * However, some user programs are fine with this. They can - * flag __SANE_USERSPACE_TYPES__ to get int-ll64.h here. - */ -#ifndef __KERNEL__ -#ifndef __SANE_USERSPACE_TYPES__ -#include -#else -#include -#endif /* __SANE_USERSPACE_TYPES__ */ -#endif /* __KERNEL__ */ - -#endif /* _UAPI_ASM_SW64_TYPES_H */ diff --git a/arch/sw_64/include/asm/ucontext.h b/arch/sw_64/include/uapi/asm/ucontext.h similarity index 56% rename from arch/sw_64/include/asm/ucontext.h rename to arch/sw_64/include/uapi/asm/ucontext.h index d40eebe988ef1461161921351c6812eb3a3e3d87..c5d6e24e3e5fb0d8e8b9d9db8ab4c7e1a53f4c17 100644 --- a/arch/sw_64/include/asm/ucontext.h +++ b/arch/sw_64/include/uapi/asm/ucontext.h @@ -1,6 +1,6 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _ASM_SW64_UCONTEXT_H -#define _ASM_SW64_UCONTEXT_H +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_UCONTEXT_H +#define _UAPI_ASM_SW64_UCONTEXT_H struct ucontext { unsigned long uc_flags; @@ -11,4 +11,4 @@ struct ucontext { sigset_t uc_sigmask; /* mask last for extensibility */ }; -#endif /* _ASM_SW64_UCONTEXT_H */ +#endif /* _UAPI_ASM_SW64_UCONTEXT_H */ diff --git a/arch/sw_64/include/uapi/asm/unistd.h b/arch/sw_64/include/uapi/asm/unistd.h index 225358536dc9eb9cf358fb2477ab3de5d4056add..be844b2be9d5591b183e6fc54ae2d682a83c89d0 100644 --- a/arch/sw_64/include/uapi/asm/unistd.h +++ b/arch/sw_64/include/uapi/asm/unistd.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _UAPI_ASM_SW64_UNISTD_H #define _UAPI_ASM_SW64_UNISTD_H @@ -9,9 +9,4 @@ #include -/* sw64 doesn't have protection keys. */ -#define __IGNORE_pkey_mprotect -#define __IGNORE_pkey_alloc -#define __IGNORE_pkey_free - #endif /* _UAPI_ASM_SW64_UNISTD_H */ diff --git a/arch/sw_64/kernel/Makefile b/arch/sw_64/kernel/Makefile index d9e2fcbc1e91aca998bc955be5ac596195d8a431..d4dc9e175d67d21a64b15781e5bf2c09a5df64c3 100644 --- a/arch/sw_64/kernel/Makefile +++ b/arch/sw_64/kernel/Makefile @@ -13,9 +13,9 @@ CFLAGS_REMOVE_insn.o = -pg CFLAGS_REMOVE_printk.o = -pg endif -obj-y := entry.o traps.o process.o sys_sw64.o irq.o \ +obj-y := entry.o fpu.o traps.o process.o sys_sw64.o irq.o \ irq_sw64.o signal.o setup.o ptrace.o time.o \ - systbls.o dup_print.o tc.o \ + systbls.o dup_print.o tc.o timer.o \ insn.o early_init.o topology.o cacheinfo.o \ vdso.o vdso/ @@ -29,10 +29,9 @@ obj-$(CONFIG_SUSPEND) += suspend_asm.o suspend.o obj-$(CONFIG_PERF_EVENTS) += perf_event.o obj-$(CONFIG_HIBERNATION) += hibernate_asm.o hibernate.o obj-$(CONFIG_AUDIT) += audit.o -obj-$(CONFIG_DIRECT_DMA) += pci_common.o -obj-$(CONFIG_SWIOTLB) += dma_swiotlb.o +obj-$(CONFIG_PCI) += pci_common.o obj-$(CONFIG_RELOCATABLE) += relocate.o -obj-$(CONFIG_DEBUG_FS) += unaligned.o segvdbg.o +obj-$(CONFIG_DEBUG_FS) += segvdbg.o bindvcpu.o obj-$(CONFIG_JUMP_LABEL) += jump_label.o ifndef CONFIG_PCI @@ -44,7 +43,8 @@ obj-y += kvm_cma.o endif # Core logic support -obj-$(CONFIG_SW64) += core.o timer.o +obj-$(CONFIG_SW64_CPUFREQ) += platform.o clock.o +obj-$(CONFIG_SW64_CPUAUTOPLUG) += cpuautoplug.o obj-$(CONFIG_CRASH_DUMP) += crash_dump.o obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o diff --git a/arch/sw_64/kernel/acpi.c b/arch/sw_64/kernel/acpi.c index 1c1afe8e812e67e45e52d4fdf12fbe24d73c1c37..a0b5c4a57a07e698a8fd6e05c171e16c14a16c7e 100644 --- a/arch/sw_64/kernel/acpi.c +++ b/arch/sw_64/kernel/acpi.c @@ -2,25 +2,8 @@ #include #include -#include -#include -#include -#include -#include -#include -#include #include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include + #include int acpi_disabled = 1; diff --git a/arch/sw_64/kernel/asm-offsets.c b/arch/sw_64/kernel/asm-offsets.c index 44e7fa77265edd327dbde23ad011556714bebfd6..9e6c338a5edd8bc0ebef67b2ebc1ca3fdbdfc996 100644 --- a/arch/sw_64/kernel/asm-offsets.c +++ b/arch/sw_64/kernel/asm-offsets.c @@ -5,17 +5,16 @@ * and format the required data. */ -#include #include #include -#include #include #include -#include + #include +#include + #include "traps.c" -#include void foo(void) { DEFINE(TI_TASK, offsetof(struct thread_info, task)); @@ -73,6 +72,13 @@ void foo(void) DEFINE(PT_REGS_R6, offsetof(struct pt_regs, r6)); DEFINE(PT_REGS_R7, offsetof(struct pt_regs, r7)); DEFINE(PT_REGS_R8, offsetof(struct pt_regs, r8)); + DEFINE(PT_REGS_R9, offsetof(struct pt_regs, r9)); + DEFINE(PT_REGS_R10, offsetof(struct pt_regs, r10)); + DEFINE(PT_REGS_R11, offsetof(struct pt_regs, r11)); + DEFINE(PT_REGS_R12, offsetof(struct pt_regs, r12)); + DEFINE(PT_REGS_R13, offsetof(struct pt_regs, r13)); + DEFINE(PT_REGS_R14, offsetof(struct pt_regs, r14)); + DEFINE(PT_REGS_R15, offsetof(struct pt_regs, r15)); DEFINE(PT_REGS_R19, offsetof(struct pt_regs, r19)); DEFINE(PT_REGS_R20, offsetof(struct pt_regs, r20)); DEFINE(PT_REGS_R21, offsetof(struct pt_regs, r21)); @@ -94,58 +100,6 @@ void foo(void) DEFINE(PT_REGS_R18, offsetof(struct pt_regs, r18)); BLANK(); - DEFINE(SWITCH_STACK_SIZE, sizeof(struct switch_stack)); - DEFINE(SWITCH_STACK_R9, offsetof(struct switch_stack, r9)); - DEFINE(SWITCH_STACK_R10, offsetof(struct switch_stack, r10)); - DEFINE(SWITCH_STACK_R11, offsetof(struct switch_stack, r11)); - DEFINE(SWITCH_STACK_R12, offsetof(struct switch_stack, r12)); - DEFINE(SWITCH_STACK_R13, offsetof(struct switch_stack, r13)); - DEFINE(SWITCH_STACK_R14, offsetof(struct switch_stack, r14)); - DEFINE(SWITCH_STACK_R15, offsetof(struct switch_stack, r15)); - DEFINE(SWITCH_STACK_RA, offsetof(struct switch_stack, r26)); - BLANK(); - - DEFINE(ALLREGS_SIZE, sizeof(struct allregs)); - DEFINE(ALLREGS_R0, offsetof(struct allregs, regs[0])); - DEFINE(ALLREGS_R1, offsetof(struct allregs, regs[1])); - DEFINE(ALLREGS_R2, offsetof(struct allregs, regs[2])); - DEFINE(ALLREGS_R3, offsetof(struct allregs, regs[3])); - DEFINE(ALLREGS_R4, offsetof(struct allregs, regs[4])); - DEFINE(ALLREGS_R5, offsetof(struct allregs, regs[5])); - DEFINE(ALLREGS_R6, offsetof(struct allregs, regs[6])); - DEFINE(ALLREGS_R7, offsetof(struct allregs, regs[7])); - DEFINE(ALLREGS_R8, offsetof(struct allregs, regs[8])); - DEFINE(ALLREGS_R9, offsetof(struct allregs, regs[9])); - DEFINE(ALLREGS_R10, offsetof(struct allregs, regs[10])); - DEFINE(ALLREGS_R11, offsetof(struct allregs, regs[11])); - DEFINE(ALLREGS_R12, offsetof(struct allregs, regs[12])); - DEFINE(ALLREGS_R13, offsetof(struct allregs, regs[13])); - DEFINE(ALLREGS_R14, offsetof(struct allregs, regs[14])); - DEFINE(ALLREGS_R15, offsetof(struct allregs, regs[15])); - DEFINE(ALLREGS_R16, offsetof(struct allregs, regs[16])); - DEFINE(ALLREGS_R17, offsetof(struct allregs, regs[17])); - DEFINE(ALLREGS_R18, offsetof(struct allregs, regs[18])); - DEFINE(ALLREGS_R19, offsetof(struct allregs, regs[19])); - DEFINE(ALLREGS_R20, offsetof(struct allregs, regs[20])); - DEFINE(ALLREGS_R21, offsetof(struct allregs, regs[21])); - DEFINE(ALLREGS_R22, offsetof(struct allregs, regs[22])); - DEFINE(ALLREGS_R23, offsetof(struct allregs, regs[23])); - DEFINE(ALLREGS_R24, offsetof(struct allregs, regs[24])); - DEFINE(ALLREGS_R25, offsetof(struct allregs, regs[25])); - DEFINE(ALLREGS_R26, offsetof(struct allregs, regs[26])); - DEFINE(ALLREGS_R27, offsetof(struct allregs, regs[27])); - DEFINE(ALLREGS_R28, offsetof(struct allregs, regs[28])); - DEFINE(ALLREGS_R29, offsetof(struct allregs, regs[29])); - DEFINE(ALLREGS_R30, offsetof(struct allregs, regs[30])); - DEFINE(ALLREGS_R31, offsetof(struct allregs, regs[31])); - DEFINE(ALLREGS_PS, offsetof(struct allregs, ps)); - DEFINE(ALLREGS_PC, offsetof(struct allregs, pc)); - DEFINE(ALLREGS_GP, offsetof(struct allregs, gp)); - DEFINE(ALLREGS_A0, offsetof(struct allregs, a0)); - DEFINE(ALLREGS_A1, offsetof(struct allregs, a1)); - DEFINE(ALLREGS_A2, offsetof(struct allregs, a2)); - BLANK(); - DEFINE(KVM_REGS_SIZE, sizeof(struct kvm_regs)); DEFINE(KVM_REGS_R0, offsetof(struct kvm_regs, r0)); DEFINE(KVM_REGS_R1, offsetof(struct kvm_regs, r1)); @@ -224,39 +178,48 @@ void foo(void) DEFINE(HOST_INT_R16, offsetof(struct host_int_args, r16)); BLANK(); - DEFINE(TASK_THREAD, offsetof(struct task_struct, thread)); - DEFINE(THREAD_CTX_FP, offsetof(struct thread_struct, ctx_fp)); - DEFINE(THREAD_FPCR, offsetof(struct thread_struct, fpcr)); - DEFINE(CTX_FP_F0, offsetof(struct context_fpregs, f0)); - DEFINE(CTX_FP_F1, offsetof(struct context_fpregs, f1)); - DEFINE(CTX_FP_F2, offsetof(struct context_fpregs, f2)); - DEFINE(CTX_FP_F3, offsetof(struct context_fpregs, f3)); - DEFINE(CTX_FP_F4, offsetof(struct context_fpregs, f4)); - DEFINE(CTX_FP_F5, offsetof(struct context_fpregs, f5)); - DEFINE(CTX_FP_F6, offsetof(struct context_fpregs, f6)); - DEFINE(CTX_FP_F7, offsetof(struct context_fpregs, f7)); - DEFINE(CTX_FP_F8, offsetof(struct context_fpregs, f8)); - DEFINE(CTX_FP_F9, offsetof(struct context_fpregs, f9)); - DEFINE(CTX_FP_F10, offsetof(struct context_fpregs, f10)); - DEFINE(CTX_FP_F11, offsetof(struct context_fpregs, f11)); - DEFINE(CTX_FP_F12, offsetof(struct context_fpregs, f12)); - DEFINE(CTX_FP_F13, offsetof(struct context_fpregs, f13)); - DEFINE(CTX_FP_F14, offsetof(struct context_fpregs, f14)); - DEFINE(CTX_FP_F15, offsetof(struct context_fpregs, f15)); - DEFINE(CTX_FP_F16, offsetof(struct context_fpregs, f16)); - DEFINE(CTX_FP_F17, offsetof(struct context_fpregs, f17)); - DEFINE(CTX_FP_F18, offsetof(struct context_fpregs, f18)); - DEFINE(CTX_FP_F19, offsetof(struct context_fpregs, f19)); - DEFINE(CTX_FP_F20, offsetof(struct context_fpregs, f20)); - DEFINE(CTX_FP_F21, offsetof(struct context_fpregs, f21)); - DEFINE(CTX_FP_F22, offsetof(struct context_fpregs, f22)); - DEFINE(CTX_FP_F23, offsetof(struct context_fpregs, f23)); - DEFINE(CTX_FP_F24, offsetof(struct context_fpregs, f24)); - DEFINE(CTX_FP_F25, offsetof(struct context_fpregs, f25)); - DEFINE(CTX_FP_F26, offsetof(struct context_fpregs, f26)); - DEFINE(CTX_FP_F27, offsetof(struct context_fpregs, f27)); - DEFINE(CTX_FP_F28, offsetof(struct context_fpregs, f28)); - DEFINE(CTX_FP_F29, offsetof(struct context_fpregs, f29)); - DEFINE(CTX_FP_F30, offsetof(struct context_fpregs, f30)); + OFFSET(TASK_THREAD, task_struct, thread); + OFFSET(TASK_THREAD_F0, task_struct, thread.fpstate.fp[0]); + OFFSET(TASK_THREAD_F1, task_struct, thread.fpstate.fp[1]); + OFFSET(TASK_THREAD_F2, task_struct, thread.fpstate.fp[2]); + OFFSET(TASK_THREAD_F3, task_struct, thread.fpstate.fp[3]); + OFFSET(TASK_THREAD_F4, task_struct, thread.fpstate.fp[4]); + OFFSET(TASK_THREAD_F5, task_struct, thread.fpstate.fp[5]); + OFFSET(TASK_THREAD_F6, task_struct, thread.fpstate.fp[6]); + OFFSET(TASK_THREAD_F7, task_struct, thread.fpstate.fp[7]); + OFFSET(TASK_THREAD_F8, task_struct, thread.fpstate.fp[8]); + OFFSET(TASK_THREAD_F9, task_struct, thread.fpstate.fp[9]); + OFFSET(TASK_THREAD_F10, task_struct, thread.fpstate.fp[10]); + OFFSET(TASK_THREAD_F11, task_struct, thread.fpstate.fp[11]); + OFFSET(TASK_THREAD_F12, task_struct, thread.fpstate.fp[12]); + OFFSET(TASK_THREAD_F13, task_struct, thread.fpstate.fp[13]); + OFFSET(TASK_THREAD_F14, task_struct, thread.fpstate.fp[14]); + OFFSET(TASK_THREAD_F15, task_struct, thread.fpstate.fp[15]); + OFFSET(TASK_THREAD_F16, task_struct, thread.fpstate.fp[16]); + OFFSET(TASK_THREAD_F17, task_struct, thread.fpstate.fp[17]); + OFFSET(TASK_THREAD_F18, task_struct, thread.fpstate.fp[18]); + OFFSET(TASK_THREAD_F19, task_struct, thread.fpstate.fp[19]); + OFFSET(TASK_THREAD_F20, task_struct, thread.fpstate.fp[20]); + OFFSET(TASK_THREAD_F21, task_struct, thread.fpstate.fp[21]); + OFFSET(TASK_THREAD_F22, task_struct, thread.fpstate.fp[22]); + OFFSET(TASK_THREAD_F23, task_struct, thread.fpstate.fp[23]); + OFFSET(TASK_THREAD_F24, task_struct, thread.fpstate.fp[24]); + OFFSET(TASK_THREAD_F25, task_struct, thread.fpstate.fp[25]); + OFFSET(TASK_THREAD_F26, task_struct, thread.fpstate.fp[26]); + OFFSET(TASK_THREAD_F27, task_struct, thread.fpstate.fp[27]); + OFFSET(TASK_THREAD_F28, task_struct, thread.fpstate.fp[28]); + OFFSET(TASK_THREAD_F29, task_struct, thread.fpstate.fp[29]); + OFFSET(TASK_THREAD_F30, task_struct, thread.fpstate.fp[30]); + OFFSET(TASK_THREAD_FPCR, task_struct, thread.fpstate.fpcr); + BLANK(); + OFFSET(TASK_THREAD_RA, task_struct, thread.ra); + OFFSET(TASK_THREAD_SP, task_struct, thread.sp); + OFFSET(TASK_THREAD_S0, task_struct, thread.s[0]); + OFFSET(TASK_THREAD_S1, task_struct, thread.s[1]); + OFFSET(TASK_THREAD_S2, task_struct, thread.s[2]); + OFFSET(TASK_THREAD_S3, task_struct, thread.s[3]); + OFFSET(TASK_THREAD_S4, task_struct, thread.s[4]); + OFFSET(TASK_THREAD_S5, task_struct, thread.s[5]); + OFFSET(TASK_THREAD_S6, task_struct, thread.s[6]); BLANK(); } diff --git a/arch/sw_64/kernel/audit.c b/arch/sw_64/kernel/audit.c index adc4622211d21ce8e9f0114199949f87e7bcdcea..dcf58deee3e2018e0efa6cb355e8802aef04de35 100644 --- a/arch/sw_64/kernel/audit.c +++ b/arch/sw_64/kernel/audit.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 #include -#include #include + #include static unsigned int dir_class[] = { diff --git a/arch/sw_64/kernel/bindvcpu.c b/arch/sw_64/kernel/bindvcpu.c new file mode 100644 index 0000000000000000000000000000000000000000..611c395c144b69ea5133504214fc18c75065438a --- /dev/null +++ b/arch/sw_64/kernel/bindvcpu.c @@ -0,0 +1,29 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2022 Wang Yuanheng + * Author: Wang Yuanheng + * + */ + +#include +#include +#include +#include +#include + +extern bool bind_vcpu_enabled; + +static int __init bind_vcpu_init(void) +{ + struct dentry *bindvcpu; + + if (!sw64_debugfs_dir) + return -ENODEV; + + bindvcpu = debugfs_create_bool("bind_vcpu", 0644, + sw64_debugfs_dir, &bind_vcpu_enabled); + if (!bindvcpu) + return -ENOMEM; + return 0; +} +late_initcall(bind_vcpu_init); diff --git a/arch/sw_64/kernel/cacheinfo.c b/arch/sw_64/kernel/cacheinfo.c index 5193d7544b5933d20474115a9f90eaeaddaf29b5..87d3f4bcd10f12efdea915910fbeb2ab9f25c50d 100644 --- a/arch/sw_64/kernel/cacheinfo.c +++ b/arch/sw_64/kernel/cacheinfo.c @@ -14,9 +14,8 @@ * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ -#include #include -#include + #include /* Populates leaf and increments to next leaf */ diff --git a/arch/sw_64/kernel/clock.c b/arch/sw_64/kernel/clock.c new file mode 100644 index 0000000000000000000000000000000000000000..f31f596a00521e4b8ea193eb8adc69896f272c92 --- /dev/null +++ b/arch/sw_64/kernel/clock.c @@ -0,0 +1,184 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * linux/arch/sw/kernel/setup.c + * + * Copyright (C) 1995 Linus Torvalds + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#define CLK_PRT 0x1UL +#define CORE_CLK0_V (0x1UL << 1) +#define CORE_CLK0_R (0x1UL << 2) +#define CORE_CLK2_V (0x1UL << 15) +#define CORE_CLK2_R (0x1UL << 16) + +#define CLK_LV1_SEL_PRT 0x1UL +#define CLK_LV1_SEL_MUXA (0x1UL << 2) +#define CLK_LV1_SEL_MUXB (0x1UL << 3) + +#define CORE_PLL0_CFG_SHIFT 4 +#define CORE_PLL2_CFG_SHIFT 18 + +char curruent_policy[CPUFREQ_NAME_LEN]; + +/* Minimum CLK support */ +enum { + DC_0, DC_1, DC_2, DC_3, DC_4, DC_5, DC_6, DC_7, DC_8, + DC_9, DC_10, DC_11, DC_12, DC_13, DC_14, DC_15, DC_16, DC_RESV +}; + +static int cpu_freq[14] = { + 0, 1200, 1800, 1900, + 1950, 2000, 2050, 2100, + 2150, 2200, 2250, 2300, + 2350, 2400 }; + +struct cpufreq_frequency_table sw64_clockmod_table[] = { + {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, + {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, + {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, + {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, + {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, + {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, + {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, + {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, + {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, + {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, + {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, + {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, + {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, + {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, + {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, + {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, + {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, + {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, + {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, + {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, + {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, + {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, + {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, + {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, + {0, DC_1, 0}, + {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, + {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, + {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, + {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, + {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, + {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, + {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, + {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, + {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, + {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, + {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, + {0, DC_2, 0}, + {-1, DC_RESV, CPUFREQ_ENTRY_INVALID}, + {0, DC_3, 0}, + {0, DC_4, 0}, + {0, DC_5, 0}, + {0, DC_6, 0}, + {0, DC_7, 0}, + {0, DC_8, 0}, + {0, DC_9, 0}, + {0, DC_10, 0}, + {0, DC_11, 0}, + {0, DC_12, 0}, + {0, DC_13, 0}, +{-1, DC_RESV, CPUFREQ_TABLE_END}, +}; +EXPORT_SYMBOL_GPL(sw64_clockmod_table); + +static struct clk cpu_clk = { + .name = "cpu_clk", + .flags = CLK_ALWAYS_ENABLED | CLK_RATE_PROPAGATES, + .rate = 2400000000, +}; + +struct clk *sw64_clk_get(struct device *dev, const char *id) +{ + return &cpu_clk; +} +EXPORT_SYMBOL(sw64_clk_get); + +unsigned long sw64_clk_get_rate(struct clk *clk) +{ + if (!clk) + return 0; + + return (unsigned long)clk->rate; +} +EXPORT_SYMBOL(sw64_clk_get_rate); + +void sw64_store_policy(struct cpufreq_policy *policy) +{ + memcpy(curruent_policy, policy->governor->name, CPUFREQ_NAME_LEN); +} +EXPORT_SYMBOL_GPL(sw64_store_policy); + +int sw64_set_rate(int index, unsigned long rate) +{ + unsigned int i, val; + + rate /= 1000000; + + for (i = 0; i < sizeof(cpu_freq)/sizeof(int); i++) { + if (rate == cpu_freq[i]) { + index = i; + break; + } + } + + if (index < 0) + return -EINVAL; + + sw64_io_write(0, CLK_CTL, CORE_CLK2_R | CORE_CLK2_V | CLK_PRT); + sw64_io_write(1, CLK_CTL, CORE_CLK2_R | CORE_CLK2_V | CLK_PRT); + val = sw64_io_read(0, CLK_CTL); + + sw64_io_write(0, CLK_CTL, val | index << CORE_PLL2_CFG_SHIFT); + sw64_io_write(1, CLK_CTL, val | index << CORE_PLL2_CFG_SHIFT); + + udelay(1); + + sw64_io_write(0, CLK_CTL, CORE_CLK2_V | CLK_PRT + | index << CORE_PLL2_CFG_SHIFT); + sw64_io_write(1, CLK_CTL, CORE_CLK2_V | CLK_PRT + | index << CORE_PLL2_CFG_SHIFT); + val = sw64_io_read(0, CLK_CTL); + + /* LV1 select PLL1/PLL2 */ + sw64_io_write(0, CLU_LV1_SEL, CLK_LV1_SEL_MUXA | CLK_LV1_SEL_PRT); + sw64_io_write(1, CLU_LV1_SEL, CLK_LV1_SEL_MUXA | CLK_LV1_SEL_PRT); + + /* Set CLK_CTL PLL0 */ + sw64_io_write(0, CLK_CTL, val | CORE_CLK0_R | CORE_CLK0_V); + sw64_io_write(1, CLK_CTL, val | CORE_CLK0_R | CORE_CLK0_V); + + sw64_io_write(0, CLK_CTL, val | CORE_CLK0_R | CORE_CLK0_V + | index << CORE_PLL0_CFG_SHIFT); + sw64_io_write(1, CLK_CTL, val | CORE_CLK0_R | CORE_CLK0_V + | index << CORE_PLL0_CFG_SHIFT); + + udelay(1); + + sw64_io_write(0, CLK_CTL, val | CORE_CLK0_V + | index << CORE_PLL0_CFG_SHIFT); + sw64_io_write(1, CLK_CTL, val | CORE_CLK0_V + | index << CORE_PLL0_CFG_SHIFT); + + /* LV1 select PLL0/PLL1 */ + sw64_io_write(0, CLU_LV1_SEL, CLK_LV1_SEL_MUXB | CLK_LV1_SEL_PRT); + sw64_io_write(1, CLU_LV1_SEL, CLK_LV1_SEL_MUXB | CLK_LV1_SEL_PRT); + + return index; +} +EXPORT_SYMBOL_GPL(sw64_set_rate); diff --git a/arch/sw_64/kernel/core.c b/arch/sw_64/kernel/core.c deleted file mode 100644 index 4a35c1dc1e1934b65496e0030e7bb8cb3e095233..0000000000000000000000000000000000000000 --- a/arch/sw_64/kernel/core.c +++ /dev/null @@ -1,72 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#ifdef CONFIG_NUMA -#include -#endif -#include "pci_impl.h" - -#ifdef CONFIG_NUMA -#ifdef CONFIG_DISCONTIGMEM -int pa_to_nid(unsigned long pa) -{ - int i = 0; - phys_addr_t pfn_base, pfn_size, pfn; - - pfn = pa >> PAGE_SHIFT; - for (i = 0; i < MAX_NUMNODES; i++) { - if (!NODE_DATA(i)) - continue; - - pfn_base = NODE_DATA(i)->node_start_pfn; - pfn_size = NODE_DATA(i)->node_spanned_pages; - - if (pfn >= pfn_base && pfn < pfn_base + pfn_size) - return i; - } - - pr_err("%s: pa %#lx does not belong to any node, return node 0\n", __func__, pa); - return 0; -} -EXPORT_SYMBOL(pa_to_nid); -#endif /* CONFIG_DISCONTIGMEM */ - -#ifndef CONFIG_USE_PERCPU_NUMA_NODE_ID -extern int cpu_to_node_map[NR_CPUS]; -int cpuid_to_nid(int cpuid) -{ - return cpu_to_node_map[cpuid]; -} -EXPORT_SYMBOL(cpuid_to_nid); -#endif /* CONFIG_USE_PERCPU_NUMA_NODE_ID */ -#else /* !CONFIG_NUMA */ -#ifdef CONFIG_DISCONTIGMEM -int pa_to_nid(unsigned long pa) -{ - return 0; -} -EXPORT_SYMBOL(pa_to_nid); -#endif /* CONFIG_DISCONTIGMEM */ - -#ifndef CONFIG_USE_PERCPU_NUMA_NODE_ID -int cpuid_to_nid(int cpuid) -{ - return 0; -} -EXPORT_SYMBOL(cpuid_to_nid); -#endif /* CONFIG_USE_PERCPU_NUMA_NODE_ID */ -#endif /* CONFIG_NUMA */ diff --git a/arch/sw_64/kernel/cpuautoplug.c b/arch/sw_64/kernel/cpuautoplug.c new file mode 100644 index 0000000000000000000000000000000000000000..de6f77086185abdc164ffc7cdac79fa89f5f28ef --- /dev/null +++ b/arch/sw_64/kernel/cpuautoplug.c @@ -0,0 +1,496 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * linux/arch/sw/kernel/setup.c + * + * Copyright (C) 1995 Linus Torvalds + */ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +int autoplug_enabled; +int autoplug_verbose; +int autoplug_adjusting; + +DEFINE_PER_CPU(int, cpu_adjusting); + +struct cpu_autoplug_info { + cputime64_t prev_idle; + cputime64_t prev_wall; + struct delayed_work work; + unsigned int sampling_rate; + int maxcpus; /* max cpus for autoplug */ + int mincpus; /* min cpus for autoplug */ + int dec_reqs; /* continuous core-decreasing requests */ + int inc_reqs; /* continuous core-increasing requests */ +}; + +struct cpu_autoplug_info ap_info; + +static ssize_t enabled_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", autoplug_enabled); +} + + +static ssize_t enabled_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + char val[5]; + int n; + + memcpy(val, buf, count); + n = kstrtol(val, 0, 0); + + if (n > 1 || n < 0) + return -EINVAL; + + autoplug_enabled = n; + + return count; +} + +static ssize_t verbose_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", autoplug_verbose); +} + +static ssize_t verbose_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + char val[5]; + int n; + + memcpy(val, buf, count); + n = kstrtol(val, 0, 0); + + if (n > 1 || n < 0) + return -EINVAL; + + autoplug_verbose = n; + + return count; +} + +static ssize_t maxcpus_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", ap_info.maxcpus); +} + +static ssize_t maxcpus_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + char val[5]; + int n; + + memcpy(val, buf, count); + n = kstrtol(val, 0, 0); + + if (n > num_possible_cpus() || n < ap_info.mincpus) + return -EINVAL; + + ap_info.maxcpus = n; + + return count; +} + +static ssize_t mincpus_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", ap_info.mincpus); +} + +static ssize_t mincpus_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + char val[5]; + int n; + + memcpy(val, buf, count); + n = kstrtol(val, 0, 0); + + if (n > ap_info.maxcpus || n < 1) + return -EINVAL; + + ap_info.mincpus = n; + + return count; +} + +static ssize_t sampling_rate_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", ap_info.sampling_rate); +} + +#define SAMPLING_RATE_MAX 1000 +#define SAMPLING_RATE_MIN 600 + +static ssize_t sampling_rate_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + char val[6]; + int n; + + memcpy(val, buf, count); + n = kstrtol(val, 0, 0); + + if (n > SAMPLING_RATE_MAX || n < SAMPLING_RATE_MIN) + return -EINVAL; + + ap_info.sampling_rate = n; + + return count; +} + +static ssize_t available_value_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "enabled: 0-1\nverbose: 0-1\nmaxcpus:" + "1-%d\nmincpus: 1-%d\nsampling_rate: %d-%d\n", + num_possible_cpus(), num_possible_cpus(), + SAMPLING_RATE_MIN, SAMPLING_RATE_MAX); +} + +static DEVICE_ATTR_RW(enabled); +static DEVICE_ATTR_RW(verbose); +static DEVICE_ATTR_RW(maxcpus); +static DEVICE_ATTR_RW(mincpus); +static DEVICE_ATTR_RW(sampling_rate); +static DEVICE_ATTR(available_value, 0644, available_value_show, NULL); + +static struct attribute *cpuclass_default_attrs[] = { + &dev_attr_enabled.attr, + &dev_attr_verbose.attr, + &dev_attr_maxcpus.attr, + &dev_attr_mincpus.attr, + &dev_attr_sampling_rate.attr, + &dev_attr_available_value.attr, + NULL +}; + +static struct attribute_group cpuclass_attr_group = { + .attrs = cpuclass_default_attrs, + .name = "cpuautoplug", +}; + +#ifndef MODULE +static int __init setup_autoplug(char *str) +{ + if (!strcmp(str, "off")) + autoplug_enabled = 0; + else if (!strcmp(str, "on")) + autoplug_enabled = 1; + else + return 0; + return 1; +} + +__setup("autoplug=", setup_autoplug); +#endif + +static cputime64_t calc_busy_time(unsigned int cpu) +{ + cputime64_t busy_time; + + busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE]; + busy_time += 1; + + return busy_time; +} + +static inline cputime64_t get_idle_time_jiffy(cputime64_t *wall) +{ + unsigned int cpu; + cputime64_t idle_time = 0; + cputime64_t cur_wall_time; + cputime64_t busy_time; + + cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); + + for_each_online_cpu(cpu) { + busy_time = calc_busy_time(cpu); + + idle_time += cur_wall_time - busy_time; + } + + if (wall) + *wall = (cputime64_t)jiffies_to_usecs(cur_wall_time); + + return (cputime64_t)jiffies_to_usecs(idle_time); +} + +static inline cputime64_t sw64_get_idle_time(cputime64_t *wall) +{ + unsigned int cpu; + u64 idle_time = 0; + + for_each_online_cpu(cpu) { + idle_time += get_cpu_idle_time_us(cpu, wall); + if (idle_time == -1ULL) + return get_idle_time_jiffy(wall); + } + + return idle_time; +} + +static cputime64_t get_min_busy_time(cputime64_t arr[], int size) +{ + int loop, min_idx; + cputime64_t min_time = arr[0]; + + for (loop = 1; loop < size; loop++) { + if (arr[loop] > 0) { + if (arr[loop] < min_time) { + min_time = arr[loop]; + min_idx = loop; + } + } + } + return min_idx; +} + +static int find_min_busy_cpu(void) +{ + int nr_all_cpus = num_possible_cpus(); + unsigned int cpus, target_cpu; + cputime64_t busy_time; + cputime64_t b_time[nr_all_cpus]; + + memset(b_time, 0, sizeof(b_time)); + for_each_online_cpu(cpus) { + busy_time = calc_busy_time(cpus); + b_time[cpus] = busy_time; + } + target_cpu = get_min_busy_time(b_time, nr_all_cpus); + pr_info("The target_cpu is %d, the cpu_num is %d\n", + target_cpu, num_online_cpus() - 1); + return target_cpu; +} + +static void increase_cores(int cur_cpus) +{ + if (cur_cpus == ap_info.maxcpus) + return; + + cur_cpus = cpumask_next_zero(0, cpu_online_mask); + + struct device *dev = get_cpu_device(cur_cpus); + + per_cpu(cpu_adjusting, dev->id) = 1; + lock_device_hotplug(); + cpu_device_up(dev); + pr_info("The target_cpu is %d, After cpu_up, the cpu_num is %d\n", + dev->id, num_online_cpus()); + get_cpu_device(dev->id)->offline = false; + unlock_device_hotplug(); + per_cpu(cpu_adjusting, dev->id) = 0; +} + +static void decrease_cores(int cur_cpus) +{ + if (cur_cpus == ap_info.mincpus) + return; + + cur_cpus = find_min_busy_cpu(); + + struct device *dev = get_cpu_device(cur_cpus); + + if (dev->id > 0) { + per_cpu(cpu_adjusting, dev->id) = -1; + lock_device_hotplug(); + cpu_device_down(dev); + get_cpu_device(dev->id)->offline = true; + unlock_device_hotplug(); + per_cpu(cpu_adjusting, dev->id) = 0; + } +} + +#define INC_THRESHOLD 80 +#define DEC_THRESHOLD 40 + +static void do_autoplug_timer(struct work_struct *work) +{ + cputime64_t cur_wall_time = 0, cur_idle_time; + unsigned long idle_time, wall_time; + int delay, load; + int nr_cur_cpus = num_online_cpus(); + int nr_all_cpus = num_possible_cpus(); + int inc_req = 1, dec_req = 2; + + ap_info.maxcpus = + setup_max_cpus > nr_cpu_ids ? nr_cpu_ids : setup_max_cpus; + ap_info.mincpus = ap_info.maxcpus / 4; + + if (strcmp(curruent_policy, "performance") == 0) { + ap_info.mincpus = ap_info.maxcpus; + } else if (strcmp(curruent_policy, "powersave") == 0) { + ap_info.maxcpus = ap_info.mincpus; + } else if (strcmp(curruent_policy, "ondemand") == 0) { + ap_info.sampling_rate = 500; + inc_req = 0; + dec_req = 2; + } else if (strcmp(curruent_policy, "conservative") == 0) { + inc_req = 1; + dec_req = 3; + ap_info.sampling_rate = 1000; /* 1s */ + } + + BUG_ON(smp_processor_id() != 0); + delay = msecs_to_jiffies(ap_info.sampling_rate); + if (!autoplug_enabled || system_state != SYSTEM_RUNNING) + goto out; + + autoplug_adjusting = 1; + + if (nr_cur_cpus > ap_info.maxcpus) { + decrease_cores(nr_cur_cpus); + autoplug_adjusting = 0; + goto out; + } + if (nr_cur_cpus < ap_info.mincpus) { + increase_cores(nr_cur_cpus); + autoplug_adjusting = 0; + goto out; + } + + cur_idle_time = sw64_get_idle_time(&cur_wall_time); + if (cur_wall_time == 0) + cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); + + wall_time = (unsigned int)(cur_wall_time - ap_info.prev_wall); + ap_info.prev_wall = cur_wall_time; + + idle_time = (unsigned int)(cur_idle_time - ap_info.prev_idle); + idle_time += wall_time * (nr_all_cpus - nr_cur_cpus); + ap_info.prev_wall = cur_idle_time; + + if (unlikely(!wall_time || wall_time * nr_all_cpus < idle_time)) { + autoplug_adjusting = 0; + goto out; + } + + load = 100 * (wall_time * nr_all_cpus - idle_time) / wall_time; + + if (load < (nr_cur_cpus - 1) * 100 - DEC_THRESHOLD) { + ap_info.inc_reqs = 0; + if (ap_info.dec_reqs < dec_req) + ap_info.dec_reqs++; + else { + ap_info.dec_reqs = 0; + decrease_cores(nr_cur_cpus); + } + } else { + ap_info.dec_reqs = 0; + if (load > (nr_cur_cpus - 1) * 100 + INC_THRESHOLD) { + if (ap_info.inc_reqs < inc_req) + ap_info.inc_reqs++; + else { + ap_info.inc_reqs = 0; + increase_cores(nr_cur_cpus); + } + } + } + + autoplug_adjusting = 0; +out: + schedule_delayed_work_on(0, &ap_info.work, delay); +} + +static struct platform_device_id platform_device_ids[] = { + { + .name = "sw64_cpuautoplug", + }, + {} +}; + +MODULE_DEVICE_TABLE(platform, platform_device_ids); + +static struct platform_driver platform_driver = { + .driver = { + .name = "sw64_cpuautoplug", + .owner = THIS_MODULE, + }, + .id_table = platform_device_ids, +}; + +static int __init cpuautoplug_init(void) +{ + int i, ret, delay; + + ret = sysfs_create_group(&cpu_subsys.dev_root->kobj, + &cpuclass_attr_group); + if (ret) + return ret; + + ret = platform_driver_register(&platform_driver); + if (ret) + return ret; + + pr_info("cpuautoplug: SW64 CPU autoplug driver.\n"); + + ap_info.maxcpus = + setup_max_cpus > nr_cpu_ids ? nr_cpu_ids : setup_max_cpus; + ap_info.mincpus = 16; + ap_info.dec_reqs = 0; + ap_info.inc_reqs = 0; + ap_info.sampling_rate = 720; /* 720ms */ + if (setup_max_cpus == 0) { /* boot with npsmp */ + ap_info.maxcpus = 1; + autoplug_enabled = 0; + } + if (setup_max_cpus > num_possible_cpus()) + ap_info.maxcpus = num_possible_cpus(); + + pr_info("mincpu = %d, maxcpu = %d, autoplug_enabled = %d, rate = %d\n", + ap_info.mincpus, ap_info.maxcpus, autoplug_enabled, + ap_info.sampling_rate); + + for_each_possible_cpu(i) + per_cpu(cpu_adjusting, i) = 0; +#ifndef MODULE + delay = msecs_to_jiffies(ap_info.sampling_rate * 24); +#else + delay = msecs_to_jiffies(ap_info.sampling_rate * 8); +#endif + INIT_DEFERRABLE_WORK(&ap_info.work, do_autoplug_timer); + schedule_delayed_work_on(0, &ap_info.work, delay); + + if (!autoplug_enabled) + cancel_delayed_work_sync(&ap_info.work); + + return ret; +} + +static void __exit cpuautoplug_exit(void) +{ + cancel_delayed_work_sync(&ap_info.work); + platform_driver_unregister(&platform_driver); + sysfs_remove_group(&cpu_subsys.dev_root->kobj, &cpuclass_attr_group); +} + +late_initcall(cpuautoplug_init); +module_exit(cpuautoplug_exit); + +MODULE_DESCRIPTION("cpuautoplug driver for SW64"); diff --git a/arch/sw_64/kernel/crash_dump.c b/arch/sw_64/kernel/crash_dump.c index f3836afe3e25528267a14bcf1424f01f887ac9d6..4484673823b8e6065d9efb5f2299a21df67d421a 100644 --- a/arch/sw_64/kernel/crash_dump.c +++ b/arch/sw_64/kernel/crash_dump.c @@ -14,8 +14,6 @@ * published by the Free Software Foundation. */ -#include -#include #include #include diff --git a/arch/sw_64/kernel/dup_print.c b/arch/sw_64/kernel/dup_print.c index ac0a95d4d30ba3f8b955936bd19c2ab720d8bba7..02639f40a4bc6d5901344c41249f34c8bc8305be 100644 --- a/arch/sw_64/kernel/dup_print.c +++ b/arch/sw_64/kernel/dup_print.c @@ -1,11 +1,10 @@ // SPDX-License-Identifier: GPL-2.0 -#include #include -#include #include -#include #include -#include + +#include +#include #ifdef CONFIG_SW64_RRK @@ -20,7 +19,7 @@ unsigned long sw64_printk_offset; * For output the kernel message on the console * with full-system emulator. */ -#define QEMU_PRINTF_BUFF_BASE (0x805000040000ULL | PAGE_OFFSET) +#define QEMU_PRINTF_BUFF_BASE (IO_BASE | MCU_BASE | 0x40000UL) int sw64_printk(const char *fmt, va_list args) { @@ -39,10 +38,11 @@ int sw64_printk(const char *fmt, va_list args) printed_len += vscnprintf(sw64_printk_buf, 1024, fmt, args); } else { printed_len += vscnprintf(sw64_printk_buf, 1024, fmt, args); - if (is_guest_or_emul()) { - unsigned long write_addr = QEMU_PRINTF_BUFF_BASE; - *(unsigned long *)write_addr = (unsigned long)((((unsigned long)sw64_printk_buf) & 0xffffffffUL) - | ((unsigned long)printed_len << 32)); + if (is_in_emul()) { + void __iomem *addr = __va(QEMU_PRINTF_BUFF_BASE); + u64 data = ((u64)sw64_printk_buf & 0xffffffffUL) + | ((u64)printed_len << 32); + *(u64 *)addr = data; } } sw64_printk_offset += printed_len; diff --git a/arch/sw_64/kernel/early_printk.c b/arch/sw_64/kernel/early_printk.c index f4d5f2d5c876e6b2f08d0e0c745af100ab3cbbb4..62902175217a6f8f74cad4cdbb3156039009c341 100644 --- a/arch/sw_64/kernel/early_printk.c +++ b/arch/sw_64/kernel/early_printk.c @@ -1,9 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 #include #include -#include -#include -#include + #include static unsigned long early_serial_base; /* ttyS0 */ diff --git a/arch/sw_64/kernel/entry.S b/arch/sw_64/kernel/entry.S index 753eb31a76c6b648b1476f60633d467b13b7a4c5..f79c9a6ddf3692f4c5427c7b83a555ac9d68fd71 100644 --- a/arch/sw_64/kernel/entry.S +++ b/arch/sw_64/kernel/entry.S @@ -21,52 +21,84 @@ * the hmcode-provided values are available to the signal handler. */ -#define SAVE_ALL \ - subl $sp, PT_REGS_PS, $sp; \ - stl $0, PT_REGS_R0($sp); \ - stl $1, PT_REGS_R1($sp); \ - stl $2, PT_REGS_R2($sp); \ - stl $3, PT_REGS_R3($sp); \ - stl $4, PT_REGS_R4($sp); \ - stl $28, PT_REGS_R28($sp); \ - stl $5, PT_REGS_R5($sp); \ - stl $6, PT_REGS_R6($sp); \ - stl $7, PT_REGS_R7($sp); \ - stl $8, PT_REGS_R8($sp); \ - stl $19, PT_REGS_R19($sp); \ - stl $20, PT_REGS_R20($sp); \ - stl $21, PT_REGS_R21($sp); \ - stl $22, PT_REGS_R22($sp); \ - stl $23, PT_REGS_R23($sp); \ - stl $24, PT_REGS_R24($sp); \ - stl $25, PT_REGS_R25($sp); \ - stl $26, PT_REGS_R26($sp); \ - stl $27, PT_REGS_R27($sp); \ - stl $16, PT_REGS_TRAP_A0($sp); \ - stl $17, PT_REGS_TRAP_A1($sp); \ + .macro SAVE_COMMON_REGS + ldi $sp, -PT_REGS_PS($sp) + stl $0, PT_REGS_R0($sp) + stl $1, PT_REGS_R1($sp) + stl $2, PT_REGS_R2($sp) + stl $3, PT_REGS_R3($sp) + stl $4, PT_REGS_R4($sp) + stl $28, PT_REGS_R28($sp) + stl $5, PT_REGS_R5($sp) + stl $6, PT_REGS_R6($sp) + stl $7, PT_REGS_R7($sp) + stl $8, PT_REGS_R8($sp) + stl $19, PT_REGS_R19($sp) + stl $20, PT_REGS_R20($sp) + stl $21, PT_REGS_R21($sp) + stl $22, PT_REGS_R22($sp) + stl $23, PT_REGS_R23($sp) + stl $24, PT_REGS_R24($sp) + stl $25, PT_REGS_R25($sp) + stl $26, PT_REGS_R26($sp) + stl $27, PT_REGS_R27($sp) + stl $16, PT_REGS_TRAP_A0($sp) + stl $17, PT_REGS_TRAP_A1($sp) stl $18, PT_REGS_TRAP_A2($sp) + .endm + + .macro RESTORE_COMMON_REGS + ldl $0, PT_REGS_R0($sp) + ldl $1, PT_REGS_R1($sp) + ldl $2, PT_REGS_R2($sp) + ldl $3, PT_REGS_R3($sp) + ldl $4, PT_REGS_R4($sp) + ldl $5, PT_REGS_R5($sp) + ldl $6, PT_REGS_R6($sp) + ldl $7, PT_REGS_R7($sp) + ldl $8, PT_REGS_R8($sp) + ldl $19, PT_REGS_R19($sp) + ldl $20, PT_REGS_R20($sp) + ldl $21, PT_REGS_R21($sp) + ldl $22, PT_REGS_R22($sp) + ldl $23, PT_REGS_R23($sp) + ldl $24, PT_REGS_R24($sp) + ldl $25, PT_REGS_R25($sp) + ldl $26, PT_REGS_R26($sp) + ldl $27, PT_REGS_R27($sp) + ldl $28, PT_REGS_R28($sp) + ldi $sp, PT_REGS_PS($sp) + .endm -#define RESTORE_ALL \ - ldl $0, PT_REGS_R0($sp); \ - ldl $1, PT_REGS_R1($sp); \ - ldl $2, PT_REGS_R2($sp); \ - ldl $3, PT_REGS_R3($sp); \ - ldl $4, PT_REGS_R4($sp); \ - ldl $5, PT_REGS_R5($sp); \ - ldl $6, PT_REGS_R6($sp); \ - ldl $7, PT_REGS_R7($sp); \ - ldl $8, PT_REGS_R8($sp); \ - ldl $19, PT_REGS_R19($sp); \ - ldl $20, PT_REGS_R20($sp); \ - ldl $21, PT_REGS_R21($sp); \ - ldl $22, PT_REGS_R22($sp); \ - ldl $23, PT_REGS_R23($sp); \ - ldl $24, PT_REGS_R24($sp); \ - ldl $25, PT_REGS_R25($sp); \ - ldl $26, PT_REGS_R26($sp); \ - ldl $27, PT_REGS_R27($sp); \ - ldl $28, PT_REGS_R28($sp); \ - addl $sp, PT_REGS_PS, $sp + .macro SAVE_CALLEE_REGS + stl $9, PT_REGS_R9($sp) + stl $10, PT_REGS_R10($sp) + stl $11, PT_REGS_R11($sp) + stl $12, PT_REGS_R12($sp) + stl $13, PT_REGS_R13($sp) + stl $14, PT_REGS_R14($sp) + stl $15, PT_REGS_R15($sp) + .endm + + .macro RESTORE_CALLEE_REGS + ldl $9, PT_REGS_R9($sp) + ldl $10, PT_REGS_R10($sp) + ldl $11, PT_REGS_R11($sp) + ldl $12, PT_REGS_R12($sp) + ldl $13, PT_REGS_R13($sp) + ldl $14, PT_REGS_R14($sp) + ldl $15, PT_REGS_R15($sp) + .endm + + .macro SAVE_ALL + SAVE_COMMON_REGS + SAVE_CALLEE_REGS + .endm + + .macro RESTORE_ALL + RESTORE_CALLEE_REGS + RESTORE_COMMON_REGS + .endm /* * Non-syscall kernel entry points. @@ -101,31 +133,11 @@ entArith: .ent entMM entMM: SAVE_ALL -/* save $9 - $15 so the inline exception code can manipulate them. */ - subl $sp, SWITCH_STACK_RA, $sp - stl $9, SWITCH_STACK_R9($sp) - stl $10, SWITCH_STACK_R10($sp) - stl $11, SWITCH_STACK_R11($sp) - stl $12, SWITCH_STACK_R12($sp) - stl $13, SWITCH_STACK_R13($sp) - stl $14, SWITCH_STACK_R14($sp) - stl $15, SWITCH_STACK_R15($sp) - addl $sp, SWITCH_STACK_RA, $19 -/* handle the fault */ ldi $8, 0x3fff + ldi $26, ret_from_sys_call bic $sp, $8, $8 - call $26, do_page_fault -/* reload the registers after the exception code played. */ - ldl $9, SWITCH_STACK_R9($sp) - ldl $10, SWITCH_STACK_R10($sp) - ldl $11, SWITCH_STACK_R11($sp) - ldl $12, SWITCH_STACK_R12($sp) - ldl $13, SWITCH_STACK_R13($sp) - ldl $14, SWITCH_STACK_R14($sp) - ldl $15, SWITCH_STACK_R15($sp) - addl $sp, SWITCH_STACK_RA, $sp -/* finish up the syscall as normal. */ - br ret_from_sys_call + mov $sp, $19 + call $31, do_page_fault .end entMM .align 4 @@ -140,109 +152,32 @@ entIF: call $31, do_entIF .end entIF +/* + * Handle unalignment exception. + * We don't handle the "gp" register correctly, but if we fault on a + * gp-register unaligned load/store, something is _very_ wrong in the + * kernel anyway. + */ .align 4 .globl entUna .ent entUna entUna: - ldi $sp, -ALLREGS_PS($sp) - stl $0, ALLREGS_R0($sp) - ldl $0, ALLREGS_PS($sp) /* get PS */ - stl $1, ALLREGS_R1($sp) - stl $2, ALLREGS_R2($sp) - stl $3, ALLREGS_R3($sp) - and $0, 8, $0 /* user mode? */ - stl $4, ALLREGS_R4($sp) - bne $0, entUnaUser /* yup -> do user-level unaligned fault */ - stl $5, ALLREGS_R5($sp) - stl $6, ALLREGS_R6($sp) - stl $7, ALLREGS_R7($sp) - stl $8, ALLREGS_R8($sp) - stl $9, ALLREGS_R9($sp) - stl $10, ALLREGS_R10($sp) - stl $11, ALLREGS_R11($sp) - stl $12, ALLREGS_R12($sp) - stl $13, ALLREGS_R13($sp) - stl $14, ALLREGS_R14($sp) - stl $15, ALLREGS_R15($sp) - /* 16-18 HMCODE-saved */ - stl $19, ALLREGS_R19($sp) - stl $20, ALLREGS_R20($sp) - stl $21, ALLREGS_R21($sp) - stl $22, ALLREGS_R22($sp) - stl $23, ALLREGS_R23($sp) - stl $24, ALLREGS_R24($sp) - stl $25, ALLREGS_R25($sp) - stl $26, ALLREGS_R26($sp) - stl $27, ALLREGS_R27($sp) - stl $28, ALLREGS_R28($sp) - mov $sp, $19 - stl $gp, ALLREGS_R29($sp) + SAVE_ALL ldi $8, 0x3fff - stl $31, ALLREGS_R31($sp) bic $sp, $8, $8 + mov $sp, $19 + ldl $0, PT_REGS_PS($sp) + and $0, 8, $0 /* user mode ? */ + beq $0, 1f + ldi $26, ret_from_sys_call + call $31, do_entUnaUser /* return to ret_from_syscall */ +1: ldl $9, PT_REGS_GP($sp) call $26, do_entUna - ldl $0, ALLREGS_R0($sp) - ldl $1, ALLREGS_R1($sp) - ldl $2, ALLREGS_R2($sp) - ldl $3, ALLREGS_R3($sp) - ldl $4, ALLREGS_R4($sp) - ldl $5, ALLREGS_R5($sp) - ldl $6, ALLREGS_R6($sp) - ldl $7, ALLREGS_R7($sp) - ldl $8, ALLREGS_R8($sp) - ldl $9, ALLREGS_R9($sp) - ldl $10, ALLREGS_R10($sp) - ldl $11, ALLREGS_R11($sp) - ldl $12, ALLREGS_R12($sp) - ldl $13, ALLREGS_R13($sp) - ldl $14, ALLREGS_R14($sp) - ldl $15, ALLREGS_R15($sp) - /* 16-18 HMCODE-saved */ - ldl $19, ALLREGS_R19($sp) - ldl $20, ALLREGS_R20($sp) - ldl $21, ALLREGS_R21($sp) - ldl $22, ALLREGS_R22($sp) - ldl $23, ALLREGS_R23($sp) - ldl $24, ALLREGS_R24($sp) - ldl $25, ALLREGS_R25($sp) - ldl $26, ALLREGS_R26($sp) - ldl $27, ALLREGS_R27($sp) - ldl $28, ALLREGS_R28($sp) - ldl $gp, ALLREGS_R29($sp) - ldi $sp, ALLREGS_PS($sp) + stl $9, PT_REGS_GP($sp) + RESTORE_ALL sys_call HMC_rti .end entUna - .align 4 - .ent entUnaUser -entUnaUser: - ldl $0, ALLREGS_R0($sp) /* restore original $0 */ - ldi $sp, ALLREGS_PS($sp) /* pop entUna's stack frame */ - SAVE_ALL /* setup normal kernel stack */ - ldi $sp, -SWITCH_STACK_RA($sp) - stl $9, SWITCH_STACK_R9($sp) - stl $10, SWITCH_STACK_R10($sp) - stl $11, SWITCH_STACK_R11($sp) - stl $12, SWITCH_STACK_R12($sp) - stl $13, SWITCH_STACK_R13($sp) - stl $14, SWITCH_STACK_R14($sp) - stl $15, SWITCH_STACK_R15($sp) - ldi $8, 0x3fff - addl $sp, SWITCH_STACK_RA, $19 - bic $sp, $8, $8 - call $26, do_entUnaUser - ldl $9, SWITCH_STACK_R9($sp) - ldl $10, SWITCH_STACK_R10($sp) - ldl $11, SWITCH_STACK_R11($sp) - ldl $12, SWITCH_STACK_R12($sp) - ldl $13, SWITCH_STACK_R13($sp) - ldl $14, SWITCH_STACK_R14($sp) - ldl $15, SWITCH_STACK_R15($sp) - ldi $sp, SWITCH_STACK_RA($sp) - br ret_from_sys_call - .end entUnaUser - - /* * The system call entry point is special. Most importantly, it looks * like a function call to userspace as far as clobbered registers. We @@ -368,9 +303,7 @@ $work_resched: $work_notifysig: mov $sp, $16 - bsr $1, do_switch_stack call $26, do_work_pending - bsr $1, undo_switch_stack br restore_all .end work_pending @@ -384,14 +317,9 @@ $work_notifysig: .ent strace strace: /* set up signal stack, call syscall_trace */ - bsr $1, do_switch_stack mov $0, $9 mov $19, $10 call $26, syscall_trace_enter - mov $9, $18 - mov $10, $19 - bsr $1, undo_switch_stack - blt $0, $syscall_trace_failed /* get the system call number and the arguments back.. */ @@ -420,10 +348,7 @@ ret_from_straced: stl $31, PT_REGS_R19($sp) /* a3=0 => no error */ $strace_success: stl $0, PT_REGS_R0($sp) /* save return value */ - - bsr $1, do_switch_stack call $26, syscall_trace_leave - bsr $1, undo_switch_stack br $31, ret_from_sys_call .align 3 @@ -438,172 +363,66 @@ $strace_error: stl $0, PT_REGS_R0($sp) stl $1, PT_REGS_R19($sp) /* a3 for return */ - bsr $1, do_switch_stack mov $18, $9 /* save old syscall number */ mov $19, $10 /* save old a3 */ call $26, syscall_trace_leave mov $9, $18 mov $10, $19 - bsr $1, undo_switch_stack mov $31, $26 /* tell "ret_from_sys_call" we can restart */ br ret_from_sys_call $syscall_trace_failed: - bsr $1, do_switch_stack - mov $18, $9 - mov $19, $10 call $26, syscall_trace_leave mov $9, $18 mov $10, $19 - bsr $1, undo_switch_stack mov $31, $26 /* tell "ret_from_sys_call" we can restart */ br ret_from_sys_call .end strace - .align 4 - .ent do_switch_stack -do_switch_stack: - ldi $sp, -SWITCH_STACK_SIZE($sp) - flds $f31, 0($sp) /* fillde hint */ - stl $9, SWITCH_STACK_R9($sp) - stl $10, SWITCH_STACK_R10($sp) - stl $11, SWITCH_STACK_R11($sp) - stl $12, SWITCH_STACK_R12($sp) - stl $13, SWITCH_STACK_R13($sp) - stl $14, SWITCH_STACK_R14($sp) - stl $15, SWITCH_STACK_R15($sp) - stl $26, SWITCH_STACK_RA($sp) - // SIMD-FP - ldl $9, TI_TASK($8) - ldi $9, TASK_THREAD($9) - ldi $10, THREAD_CTX_FP($9) - vstd $f0, CTX_FP_F0($10) - vstd $f1, CTX_FP_F1($10) - vstd $f2, CTX_FP_F2($10) - vstd $f3, CTX_FP_F3($10) - vstd $f4, CTX_FP_F4($10) - vstd $f5, CTX_FP_F5($10) - vstd $f6, CTX_FP_F6($10) - vstd $f7, CTX_FP_F7($10) - vstd $f8, CTX_FP_F8($10) - vstd $f9, CTX_FP_F9($10) - vstd $f10, CTX_FP_F10($10) - vstd $f11, CTX_FP_F11($10) - vstd $f12, CTX_FP_F12($10) - vstd $f13, CTX_FP_F13($10) - vstd $f14, CTX_FP_F14($10) - vstd $f15, CTX_FP_F15($10) - vstd $f16, CTX_FP_F16($10) - vstd $f17, CTX_FP_F17($10) - vstd $f18, CTX_FP_F18($10) - vstd $f19, CTX_FP_F19($10) - vstd $f20, CTX_FP_F20($10) - vstd $f21, CTX_FP_F21($10) - vstd $f22, CTX_FP_F22($10) - vstd $f23, CTX_FP_F23($10) - vstd $f24, CTX_FP_F24($10) - vstd $f25, CTX_FP_F25($10) - vstd $f26, CTX_FP_F26($10) - vstd $f27, CTX_FP_F27($10) - rfpcr $f0 - vstd $f28, CTX_FP_F28($10) - vstd $f29, CTX_FP_F29($10) - vstd $f30, CTX_FP_F30($10) - fstd $f0, THREAD_FPCR($9) - vldd $f0, CTX_FP_F0($10) - ldl $9, SWITCH_STACK_R9($sp) - ldl $10, SWITCH_STACK_R10($sp) - ret $31, ($1), 1 - .end do_switch_stack - - .align 4 - .ent undo_switch_stack -undo_switch_stack: -#ifdef CONFIG_SUBARCH_C3B - fillcs 0($sp) /* prefetch */ -#endif - ldl $11, SWITCH_STACK_R11($sp) - ldl $12, SWITCH_STACK_R12($sp) - ldl $13, SWITCH_STACK_R13($sp) - ldl $14, SWITCH_STACK_R14($sp) - ldl $15, SWITCH_STACK_R15($sp) - ldl $26, SWITCH_STACK_RA($sp) - // SIMD-FP - ldl $9, TI_TASK($8) - ldi $9, TASK_THREAD($9) - fldd $f0, THREAD_FPCR($9) - wfpcr $f0 - fimovd $f0, $10 - and $10, 0x3, $10 - beq $10, $setfpec_0 - subl $10, 0x1, $10 - beq $10, $setfpec_1 - subl $10, 0x1, $10 - beq $10, $setfpec_2 - setfpec3 - br $setfpec_over -$setfpec_0: - setfpec0 - br $setfpec_over -$setfpec_1: - setfpec1 - br $setfpec_over -$setfpec_2: - setfpec2 -$setfpec_over: - ldi $10, THREAD_CTX_FP($9) - vldd $f0, CTX_FP_F0($10) - vldd $f1, CTX_FP_F1($10) - vldd $f2, CTX_FP_F2($10) - vldd $f3, CTX_FP_F3($10) - vldd $f4, CTX_FP_F4($10) - vldd $f5, CTX_FP_F5($10) - vldd $f6, CTX_FP_F6($10) - vldd $f7, CTX_FP_F7($10) - vldd $f8, CTX_FP_F8($10) - vldd $f9, CTX_FP_F9($10) - vldd $f10, CTX_FP_F10($10) - vldd $f11, CTX_FP_F11($10) - vldd $f12, CTX_FP_F12($10) - vldd $f13, CTX_FP_F13($10) - vldd $f14, CTX_FP_F14($10) - vldd $f15, CTX_FP_F15($10) - vldd $f16, CTX_FP_F16($10) - vldd $f17, CTX_FP_F17($10) - vldd $f18, CTX_FP_F18($10) - vldd $f19, CTX_FP_F19($10) - vldd $f20, CTX_FP_F20($10) - vldd $f21, CTX_FP_F21($10) - vldd $f22, CTX_FP_F22($10) - vldd $f23, CTX_FP_F23($10) - vldd $f24, CTX_FP_F24($10) - vldd $f25, CTX_FP_F25($10) - vldd $f26, CTX_FP_F26($10) - vldd $f27, CTX_FP_F27($10) - vldd $f28, CTX_FP_F28($10) - vldd $f29, CTX_FP_F29($10) - vldd $f30, CTX_FP_F30($10) - ldl $9, SWITCH_STACK_R9($sp) - ldl $10, SWITCH_STACK_R10($sp) - ldi $sp, SWITCH_STACK_SIZE($sp) - ret $31, ($1), 1 - .end undo_switch_stack - /* - * The meat of the context switch code. + * Integer register context switch + * The callee-saved registers must be saved and restored. + * + * a0: physical address of next task's pcb, used by hmcode + * a1: previous task_struct (must be preserved across the switch) + * a2: next task_struct + * + * The value of a1 must be preserved by this function, as that's how + * arguments are passed to schedule_tail. */ - .align 4 .globl __switch_to .ent __switch_to __switch_to: .prologue 0 - bsr $1, do_switch_stack + /* Save context into prev->thread */ + stl $26, TASK_THREAD_RA($17) + stl $30, TASK_THREAD_SP($17) + stl $9, TASK_THREAD_S0($17) + stl $10, TASK_THREAD_S1($17) + stl $11, TASK_THREAD_S2($17) + stl $12, TASK_THREAD_S3($17) + stl $13, TASK_THREAD_S4($17) + stl $14, TASK_THREAD_S5($17) + stl $15, TASK_THREAD_S6($17) + /* Restore context from next->thread */ + ldl $26, TASK_THREAD_RA($18) + ldl $9, TASK_THREAD_S0($18) + ldl $10, TASK_THREAD_S1($18) + ldl $11, TASK_THREAD_S2($18) + ldl $12, TASK_THREAD_S3($18) + ldl $13, TASK_THREAD_S4($18) + ldl $14, TASK_THREAD_S5($18) + ldl $15, TASK_THREAD_S6($18) sys_call HMC_swpctx + /* + * SP has been saved and restored by HMC_swpctx, + * and restore it again here for future expansion. + */ + ldl $30, TASK_THREAD_SP($18) ldi $8, 0x3fff bic $sp, $8, $8 - bsr $1, undo_switch_stack mov $17, $0 ret .end __switch_to @@ -637,29 +456,6 @@ ret_from_kernel_thread: br $31, ret_to_user .end ret_from_kernel_thread -/* - * Special system calls. Most of these are special in that they either - * have to play switch_stack games or in some way use the pt_regs struct. - */ - -.macro fork_like name - .align 4 - .globl sw64_\name - .ent sw64_\name -sw64_\name: - .prologue 0 - bsr $1, do_switch_stack - call $26, sys_\name - ldl $26, SWITCH_STACK_RA($sp) - ldi $sp, SWITCH_STACK_SIZE($sp) - ret - .end sw64_\name - .endm - -fork_like fork -fork_like vfork -fork_like clone - .align 4 .globl sys_sigreturn .ent sys_sigreturn @@ -667,12 +463,10 @@ sys_sigreturn: .prologue 0 ldi $9, ret_from_straced cmpult $26, $9, $9 - ldi $sp, -SWITCH_STACK_SIZE($sp) call $26, do_sigreturn bne $9, 1f call $26, syscall_trace_leave -1: br $1, undo_switch_stack - br ret_from_sys_call +1: br ret_from_sys_call .end sys_sigreturn .align 4 @@ -682,12 +476,10 @@ sys_rt_sigreturn: .prologue 0 ldi $9, ret_from_straced cmpult $26, $9, $9 - ldi $sp, -SWITCH_STACK_SIZE($sp) call $26, do_rt_sigreturn bne $9, 1f call $26, syscall_trace_leave -1: br $1, undo_switch_stack - br ret_from_sys_call +1: br ret_from_sys_call .end sys_rt_sigreturn .align 4 diff --git a/arch/sw_64/kernel/fpu.S b/arch/sw_64/kernel/fpu.S new file mode 100644 index 0000000000000000000000000000000000000000..3cb3bfab08e8270e9b0fe60f08e17b0700fc0486 --- /dev/null +++ b/arch/sw_64/kernel/fpu.S @@ -0,0 +1,102 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#include +#include +#include +#include + + .text + .set noat +ENTRY(__fpstate_save) + /* a0: prev task */ + vstd $f0, TASK_THREAD_F0(a0) + vstd $f1, TASK_THREAD_F1(a0) + vstd $f2, TASK_THREAD_F2(a0) + vstd $f3, TASK_THREAD_F3(a0) + vstd $f4, TASK_THREAD_F4(a0) + vstd $f5, TASK_THREAD_F5(a0) + vstd $f6, TASK_THREAD_F6(a0) + vstd $f7, TASK_THREAD_F7(a0) + vstd $f8, TASK_THREAD_F8(a0) + vstd $f9, TASK_THREAD_F9(a0) + vstd $f10, TASK_THREAD_F10(a0) + vstd $f11, TASK_THREAD_F11(a0) + vstd $f12, TASK_THREAD_F12(a0) + vstd $f13, TASK_THREAD_F13(a0) + vstd $f14, TASK_THREAD_F14(a0) + vstd $f15, TASK_THREAD_F15(a0) + vstd $f16, TASK_THREAD_F16(a0) + vstd $f17, TASK_THREAD_F17(a0) + vstd $f18, TASK_THREAD_F18(a0) + vstd $f19, TASK_THREAD_F19(a0) + vstd $f20, TASK_THREAD_F20(a0) + vstd $f21, TASK_THREAD_F21(a0) + vstd $f22, TASK_THREAD_F22(a0) + vstd $f23, TASK_THREAD_F23(a0) + vstd $f24, TASK_THREAD_F24(a0) + vstd $f25, TASK_THREAD_F25(a0) + vstd $f26, TASK_THREAD_F26(a0) + vstd $f27, TASK_THREAD_F27(a0) + rfpcr $f0 + vstd $f28, TASK_THREAD_F28(a0) + vstd $f29, TASK_THREAD_F29(a0) + vstd $f30, TASK_THREAD_F30(a0) + fstd $f0, TASK_THREAD_FPCR(a0) + vldd $f0, TASK_THREAD_F0(a0) + ret +END(__fpstate_save) + +ENTRY(__fpstate_restore) + /* a0: next task */ + fldd $f0, TASK_THREAD_FPCR(a0) + wfpcr $f0 + fimovd $f0, t1 + and t1, 0x3, t1 + beq t1, $setfpec_0 + subl t1, 0x1, t1 + beq t1, $setfpec_1 + subl t1, 0x1, t1 + beq t1, $setfpec_2 + setfpec3 + br $setfpec_over +$setfpec_0: + setfpec0 + br $setfpec_over +$setfpec_1: + setfpec1 + br $setfpec_over +$setfpec_2: + setfpec2 +$setfpec_over: + vldd $f0, TASK_THREAD_F0(a0) + vldd $f1, TASK_THREAD_F1(a0) + vldd $f2, TASK_THREAD_F2(a0) + vldd $f3, TASK_THREAD_F3(a0) + vldd $f4, TASK_THREAD_F4(a0) + vldd $f5, TASK_THREAD_F5(a0) + vldd $f6, TASK_THREAD_F6(a0) + vldd $f7, TASK_THREAD_F7(a0) + vldd $f8, TASK_THREAD_F8(a0) + vldd $f9, TASK_THREAD_F9(a0) + vldd $f10, TASK_THREAD_F10(a0) + vldd $f11, TASK_THREAD_F11(a0) + vldd $f12, TASK_THREAD_F12(a0) + vldd $f13, TASK_THREAD_F13(a0) + vldd $f14, TASK_THREAD_F14(a0) + vldd $f15, TASK_THREAD_F15(a0) + vldd $f16, TASK_THREAD_F16(a0) + vldd $f17, TASK_THREAD_F17(a0) + vldd $f18, TASK_THREAD_F18(a0) + vldd $f19, TASK_THREAD_F19(a0) + vldd $f20, TASK_THREAD_F20(a0) + vldd $f21, TASK_THREAD_F21(a0) + vldd $f22, TASK_THREAD_F22(a0) + vldd $f23, TASK_THREAD_F23(a0) + vldd $f24, TASK_THREAD_F24(a0) + vldd $f25, TASK_THREAD_F25(a0) + vldd $f26, TASK_THREAD_F26(a0) + vldd $f27, TASK_THREAD_F27(a0) + vldd $f28, TASK_THREAD_F28(a0) + vldd $f29, TASK_THREAD_F29(a0) + vldd $f30, TASK_THREAD_F30(a0) + ret +END(__fpstate_restore) diff --git a/arch/sw_64/kernel/ftrace.c b/arch/sw_64/kernel/ftrace.c index 413562b5d9be939a480a3eb092aa451136fea662..42efca28d3864c84e625e38c803b88499673c3d4 100644 --- a/arch/sw_64/kernel/ftrace.c +++ b/arch/sw_64/kernel/ftrace.c @@ -10,13 +10,8 @@ */ #include -#include -#include -#include -#include #include -#include #ifdef CONFIG_FUNCTION_TRACER EXPORT_SYMBOL(_mcount); diff --git a/arch/sw_64/kernel/insn.c b/arch/sw_64/kernel/insn.c index 71d3832d1fe325b88d204757686ba796638d55b4..e8dd41b6b7c420a056c181bcc4e6a6ff8385cbaf 100644 --- a/arch/sw_64/kernel/insn.c +++ b/arch/sw_64/kernel/insn.c @@ -14,22 +14,9 @@ * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ -#include -#include -#include -#include -#include -#include #include -#include -#include -#include #include -#include -#include - - //static DEFINE_RAW_SPINLOCK(patch_lock); int __kprobes sw64_insn_read(void *addr, u32 *insnp) diff --git a/arch/sw_64/kernel/irq.c b/arch/sw_64/kernel/irq.c index 6cd26af15b230027f4bd9ea78a7c9ee4c8afc6d9..126fe2f70495e10c9cc313dc2cdecb0e6b65516d 100644 --- a/arch/sw_64/kernel/irq.c +++ b/arch/sw_64/kernel/irq.c @@ -12,24 +12,9 @@ */ #include -#include -#include -#include -#include -#include -#include #include -#include #include -#include #include -#include -#include -#include - -#include -#include -#include volatile unsigned long irq_err_count; DEFINE_PER_CPU(unsigned long, irq_pmi_count); diff --git a/arch/sw_64/kernel/irq_sw64.c b/arch/sw_64/kernel/irq_sw64.c index 376e8397ba3578453576c2dfee7e1813f1966465..8ab845d153eb15ddf1978e069680757d2cdd4136 100644 --- a/arch/sw_64/kernel/irq_sw64.c +++ b/arch/sw_64/kernel/irq_sw64.c @@ -3,18 +3,11 @@ * SW64 specific irq code. */ -#include #include -#include -#include #include -#include -#include + #include #include -#include -#include -#include asmlinkage void do_entInt(unsigned long type, unsigned long vector, diff --git a/arch/sw_64/kernel/jump_label.c b/arch/sw_64/kernel/jump_label.c index a67d16eb3076f9fa50b2dfdc5cd89b6218feaa37..f3bc40370e4de9b77889343338b509d6bdcad8c6 100644 --- a/arch/sw_64/kernel/jump_label.c +++ b/arch/sw_64/kernel/jump_label.c @@ -1,8 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 -#include #include -#include + #include #include diff --git a/arch/sw_64/kernel/kgdb.c b/arch/sw_64/kernel/kgdb.c index c1100ef8fcdd8df15ab2f7ab70cca8c015bbb601..ac2f397f16096b39454c766c48e7054c221474c3 100644 --- a/arch/sw_64/kernel/kgdb.c +++ b/arch/sw_64/kernel/kgdb.c @@ -20,11 +20,8 @@ * along with this program. If not, see . */ -#include #include #include -#include -#include struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = { { "r0", 8, offsetof(struct pt_regs, r0)}, @@ -37,13 +34,13 @@ struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = { { "r7", 8, offsetof(struct pt_regs, r7)}, { "r8", 8, offsetof(struct pt_regs, r8)}, - { "r9", 8, -1 }, - { "r10", 8, -1 }, - { "r11", 8, -1 }, - { "r12", 8, -1 }, - { "r13", 8, -1 }, - { "r14", 8, -1 }, - { "r15", 8, -1 }, + { "r9", 8, offsetof(struct pt_regs, r9)}, + { "r10", 8, offsetof(struct pt_regs, r10)}, + { "r11", 8, offsetof(struct pt_regs, r11)}, + { "r12", 8, offsetof(struct pt_regs, r12)}, + { "r13", 8, offsetof(struct pt_regs, r13)}, + { "r14", 8, offsetof(struct pt_regs, r14)}, + { "r15", 8, offsetof(struct pt_regs, r15)}, { "r16", 8, offsetof(struct pt_regs, r16)}, { "r17", 8, offsetof(struct pt_regs, r17)}, @@ -142,12 +139,12 @@ void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc) pr_info("AFTER SET PC IS %lx\n", instruction_pointer(regs)); } -static void kgdb_call_nmi_hook(void *ignored) +void kgdb_call_nmi_hook(void *ignored) { kgdb_nmicallback(raw_smp_processor_id(), NULL); } -void kgdb_roundup_cpus(unsigned long flags) +void kgdb_roundup_cpus(void) { local_irq_enable(); smp_call_function(kgdb_call_nmi_hook, NULL, 0); @@ -231,6 +228,6 @@ void kgdb_arch_exit(void) * sw64 instructions are always in LE. * Break instruction is encoded in LE format */ -struct kgdb_arch arch_kgdb_ops = { +const struct kgdb_arch arch_kgdb_ops = { .gdb_bpt_instr = {0x80, 00, 00, 00} }; diff --git a/arch/sw_64/kernel/kprobes/decode-insn.c b/arch/sw_64/kernel/kprobes/decode-insn.c index e3ab856d60840f096371f1267ebc901d3ca6ed99..d376a7e2bee41a167270f4dd54b1c0a4755d462d 100644 --- a/arch/sw_64/kernel/kprobes/decode-insn.c +++ b/arch/sw_64/kernel/kprobes/decode-insn.c @@ -12,12 +12,8 @@ * General Public License for more details. */ -#include #include -#include -#include -#include -#include + #include "common.h" static bool __kprobes sw64_insn_is_steppable(u32 insn) diff --git a/arch/sw_64/kernel/kprobes/kprobes.c b/arch/sw_64/kernel/kprobes/kprobes.c index 85400f96f9916d5a3bb65f4cd052686c80cc3216..59f040eaa3e17f0f30c26e84f86332363c4a6e58 100644 --- a/arch/sw_64/kernel/kprobes/kprobes.c +++ b/arch/sw_64/kernel/kprobes/kprobes.c @@ -5,13 +5,9 @@ */ #include -#include -#include #include #include -#include -#include #include "common.h" static u32 breakpoint_insn = BREAK_KPROBE; diff --git a/arch/sw_64/kernel/kvm_cma.c b/arch/sw_64/kernel/kvm_cma.c index dc61e2e369e8e91815fce34007979a120969f75d..054dec95b996c0c622501b6244c94a7ebff86142 100644 --- a/arch/sw_64/kernel/kvm_cma.c +++ b/arch/sw_64/kernel/kvm_cma.c @@ -10,12 +10,8 @@ #include #include -#include #include #include -#include -#include -#include #include #include diff --git a/arch/sw_64/kernel/machine_kexec.c b/arch/sw_64/kernel/machine_kexec.c index c778bc1374afb46e05bfabeae3040efd2f408168..c9ca7a728bd458f323575ceae67657b379d6d925 100644 --- a/arch/sw_64/kernel/machine_kexec.c +++ b/arch/sw_64/kernel/machine_kexec.c @@ -5,18 +5,13 @@ * This source code is licensed under the GNU General Public License, * Version 2. See the file COPYING for more details. */ -#include #include #include -#include #include #include #include + #include -#include -#include -#include -#include extern void *kexec_control_page; extern const unsigned char relocate_new_kernel[]; diff --git a/arch/sw_64/kernel/module.c b/arch/sw_64/kernel/module.c index c75d8a2e43090ff0874907c18f33dda2b3a1c759..2904bb750eb5d0b272e7017bb580bdfdb11c4033 100644 --- a/arch/sw_64/kernel/module.c +++ b/arch/sw_64/kernel/module.c @@ -1,17 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 #include -#include -#include -#include -#include -#include #include -#if 0 -#define DEBUGP printk -#else #define DEBUGP(fmt...) -#endif /* Allocate the GOT at the end of the core sections. */ diff --git a/arch/sw_64/kernel/msi.c b/arch/sw_64/kernel/msi.c index 644e4010af8a15f69f294b4078325d9eaa82e396..ee1bda3c644741915f6a6cc376d8cf0fd0f41f38 100644 --- a/arch/sw_64/kernel/msi.c +++ b/arch/sw_64/kernel/msi.c @@ -1,14 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 #include #include -#include #include -#include -#include -#include -#include -#include - int msi_compose_msg(unsigned int irq, struct msi_msg *msg) { @@ -22,26 +15,8 @@ void sw64_irq_noop(struct irq_data *d) { } -void destroy_irq(unsigned int irq) -{ -#if 0 - int pos; - - irq_init_desc(irq); - - if (irq < RC1_FIRST_MSI_VECTOR) { - pos = irq - RC0_FIRST_MSI_VECTOR; - clear_bit(pos, msi0_irq_in_use); - } else { - pos = irq - RC1_FIRST_MSI_VECTOR; - clear_bit(pos, msi1_irq_in_use); - } -#endif -} - void arch_teardown_msi_irq(unsigned int irq) { - destroy_irq(irq); } static int __init msi_init(void) diff --git a/arch/sw_64/kernel/pci-noop.c b/arch/sw_64/kernel/pci-noop.c index 4ef694e629e8512321b28ba755667055c5f494ce..a0aa2e5bb675d2c181d6711a3973778f61cbf5d7 100644 --- a/arch/sw_64/kernel/pci-noop.c +++ b/arch/sw_64/kernel/pci-noop.c @@ -6,16 +6,8 @@ */ #include -#include #include -#include -#include -#include -#include -#include #include -#include -#include /* * The PCI controller list. diff --git a/arch/sw_64/kernel/pci-sysfs.c b/arch/sw_64/kernel/pci-sysfs.c index 584243922df99e151ac74578f445409ebc93f41d..504fd4a0075491ef16e4b6ccd2a939f526098e57 100644 --- a/arch/sw_64/kernel/pci-sysfs.c +++ b/arch/sw_64/kernel/pci-sysfs.c @@ -10,9 +10,6 @@ * drivers/pci/pci-sysfs.c */ -#include -#include -#include #include static int hose_mmap_page_range(struct pci_controller *hose, diff --git a/arch/sw_64/kernel/pci.c b/arch/sw_64/kernel/pci.c index 36616d31f32fb28274db34b11a9d2abcb6eca493..fcc6e0f02a93aa93c7cfad18ff960973c08dbb50 100644 --- a/arch/sw_64/kernel/pci.c +++ b/arch/sw_64/kernel/pci.c @@ -1,38 +1,16 @@ // SPDX-License-Identifier: GPL-2.0 -/* - * linux/arch/sw_64/kernel/pci.c - * Modified by Suweiqiang 2013-9-30 - */ - -#include #include +#include #include -#include -#include #include -#include -#include -#include -#include -#include -#include -#include #include -#include + #include -#include #include "pci_impl.h" unsigned long rc_linkup; -/* Indicate whether we respect the PCI setup left by console. */ -/* - * Make this long-lived so that we know when shutting down - * whether we probed only or not. - */ -int pci_probe_only; - /* * raw_pci_read/write - Platform-specific PCI config space access. */ @@ -58,12 +36,12 @@ int raw_pci_write(unsigned int domain, unsigned int bus, unsigned int devfn, return -EINVAL; } +#ifdef CONFIG_ACPI struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root) { - struct pci_bus *bus; - - return bus; + return NULL; } +#endif /* * The PCI controller list. @@ -86,6 +64,14 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82378, quirk_i #define MB (1024*KB) #define GB (1024*MB) +resource_size_t pcibios_default_alignment(void) +{ + if (is_in_guest()) + return PAGE_SIZE; + else + return 0; +} + resource_size_t pcibios_align_resource(void *data, const struct resource *res, resource_size_t size, resource_size_t align) { @@ -106,7 +92,7 @@ resource_size_t pcibios_align_resource(void *data, const struct resource *res, } else if (res->flags & IORESOURCE_MEM) { /* Make sure we start at our min on all hoses */ if (start - hose->mem_space->start < PCIBIOS_MIN_MEM) - start = PCIBIOS_MIN_MEM + hose->mem_space->start; //0xc0000000- 0xffffffff + start = PCIBIOS_MIN_MEM + hose->mem_space->start; /* * The following holds at least for the Low Cost * Sw_64 implementation of the PCI interface: @@ -153,7 +139,6 @@ pcibios_init(void) sw64_init_pci(); return 0; } - subsys_initcall(pcibios_init); char *pcibios_setup(char *str) @@ -164,20 +149,13 @@ char *pcibios_setup(char *str) void pcibios_fixup_bus(struct pci_bus *bus) { /* Propagate hose info into the subordinate devices. */ - struct pci_controller *hose = bus->sysdata; struct pci_dev *dev = bus->self; - if (!dev || bus->number == hose->first_busno) { - /* Root bus. */ - unsigned long end; - + if (!dev || bus->number == hose->first_busno) { bus->resource[0] = hose->io_space; bus->resource[1] = hose->mem_space; bus->resource[2] = hose->pre_mem_space; - } else if (pci_probe_only && - (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) { - pci_read_bridge_bases(bus); } } @@ -195,21 +173,6 @@ struct pci_dev *sw64_gendev_to_pci(struct device *dev) return NULL; } -/* - * If we set up a device for bus mastering, we need to check the latency - * timer as certain firmware forgets to set it properly. - */ -void pcibios_set_master(struct pci_dev *dev) -{ - u8 lat; - - pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat); - if (lat >= 16) - return; - pr_info("PCI: Setting latency timer of device %s to 64\n", pci_name(dev)); - pci_write_config_byte(dev, PCI_LATENCY_TIMER, 64); -} - void __init pcibios_claim_one_bus(struct pci_bus *b) { struct pci_dev *dev; @@ -223,7 +186,7 @@ void __init pcibios_claim_one_bus(struct pci_bus *b) if (r->parent || !r->start || !r->flags) continue; - if (pci_probe_only || (r->flags & IORESOURCE_PCI_FIXED)) { + if (r->flags & IORESOURCE_PCI_FIXED) { if (pci_claim_resource(dev, i) == 0) continue; @@ -258,7 +221,7 @@ void __init common_init_pci(void) struct pci_bus *bus; unsigned int init_busnr; int need_domain_info = 0; - int ret, iov_bus; + int ret; unsigned long offset; /* Scan all of the recorded PCI controllers. */ @@ -270,13 +233,11 @@ void __init common_init_pci(void) hose->busn_space->start = last_bus; init_busnr = (0xff << 16) + ((last_bus + 1) << 8) + (last_bus); write_rc_conf(hose->node, hose->index, RC_PRIMARY_BUS, init_busnr); - if (is_in_host()) { - offset = hose->mem_space->start - PCI_32BIT_MEMIO; + offset = hose->mem_space->start - PCI_32BIT_MEMIO; + if (is_in_host()) hose->first_busno = last_bus + 1; - } else { - offset = hose->mem_space->start - PCI_32BIT_VT_MEMIO; + else hose->first_busno = last_bus; - } pci_add_resource_offset(&bridge->windows, hose->mem_space, offset); pci_add_resource_offset(&bridge->windows, hose->io_space, hose->io_space->start); pci_add_resource_offset(&bridge->windows, hose->pre_mem_space, 0); @@ -285,7 +246,7 @@ void __init common_init_pci(void) bridge->sysdata = hose; bridge->busnr = hose->busn_space->start; bridge->ops = &sw64_pci_ops; - bridge->swizzle_irq = sw64_swizzle; + bridge->swizzle_irq = pci_common_swizzle; bridge->map_irq = sw64_map_irq; ret = pci_scan_root_bus_bridge(bridge); @@ -296,20 +257,20 @@ void __init common_init_pci(void) bus = hose->bus = bridge->bus; hose->need_domain_info = need_domain_info; - while (pci_find_bus(pci_domain_nr(bus), last_bus)) - last_bus++; if (is_in_host()) - iov_bus = chip_pcie_configure(hose); - last_bus += iov_bus; + last_bus = chip_pcie_configure(hose); + else + while (pci_find_bus(pci_domain_nr(bus), last_bus)) + last_bus++; - hose->last_busno = hose->busn_space->end = last_bus - 1; + hose->last_busno = hose->busn_space->end = last_bus; init_busnr = read_rc_conf(hose->node, hose->index, RC_PRIMARY_BUS); init_busnr &= ~(0xff << 16); - init_busnr |= (last_bus - 1) << 16; + init_busnr |= last_bus << 16; write_rc_conf(hose->node, hose->index, RC_PRIMARY_BUS, init_busnr); - pci_bus_update_busn_res_end(bus, last_bus - 1); - + pci_bus_update_busn_res_end(bus, last_bus); + last_bus++; } pcibios_claim_console_setup(); @@ -397,12 +358,8 @@ asmlinkage long sys_pciconfig_iobase(long which, unsigned long bus, unsigned lon return -EOPNOTSUPP; } -/* Destroy an __iomem token. Not copied from lib/iomap.c. */ - void pci_iounmap(struct pci_dev *dev, void __iomem *addr) { - if (__is_mmio(addr)) - iounmap(addr); } EXPORT_SYMBOL(pci_iounmap); @@ -441,7 +398,7 @@ int sw6_pcie_read_rc_cfg(struct pci_bus *bus, unsigned int devfn, { u32 data; struct pci_controller *hose = bus->sysdata; - void __iomem *cfg_iobase = (void *)hose->rc_config_space_base; + void __iomem *cfg_iobase = hose->rc_config_space_base; if (IS_ENABLED(CONFIG_PCI_DEBUG)) pr_debug("rc read addr:%px bus %d, devfn %#x, where %#x size=%d\t", @@ -592,9 +549,8 @@ static void __iomem *sw6_pcie_map_bus(struct pci_bus *bus, return NULL; relbus = (bus->number << 24) | (devfn << 16) | where; - relbus |= PCI_EP_CFG; - cfg_iobase = (void *)(hose->ep_config_space_base | relbus); + cfg_iobase = hose->ep_config_space_base + relbus; if (IS_ENABLED(CONFIG_PCI_DEBUG)) pr_debug("addr:%px bus %d, devfn %d, where %d\n", @@ -613,11 +569,6 @@ int sw64_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) return sw64_chip_init->pci_init.map_irq(dev, slot, pin); } -unsigned char sw64_swizzle(struct pci_dev *dev, u8 *pinp) -{ - return PCI_SLOT(dev->devfn); -} - static void __init sw64_init_host(unsigned long node, unsigned long index) { @@ -649,6 +600,8 @@ sw64_init_host(unsigned long node, unsigned long index) } } +void __weak set_devint_wken(int node) {} + void __init sw64_init_arch(void) { if (IS_ENABLED(CONFIG_PCI)) { @@ -661,6 +614,7 @@ void __init sw64_init_arch(void) cpu_num = sw64_chip->get_cpu_num(); for (node = 0; node < cpu_num; node++) { + set_devint_wken(node); rc_enable = sw64_chip_init->pci_init.get_rc_enable(node); if (rc_enable == 0) { printk("PCIe is disabled on node %ld\n", node); @@ -689,6 +643,8 @@ void __init sw64_init_arch(void) } } +void __weak set_pcieport_service_irq(int node, int index) {} + static void __init sw64_init_intx(struct pci_controller *hose) { unsigned long int_conf, node, val_node; @@ -702,11 +658,13 @@ static void __init sw64_init_intx(struct pci_controller *hose) val_node = next_node_in(node, node_online_map); else val_node = node; - irq = irq_alloc_descs_from(NR_IRQS_LEGACY, 1, val_node); + irq = irq_alloc_descs_from(NR_IRQS_LEGACY, 2, val_node); WARN_ON(irq < 0); irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_level_irq); irq_set_status_flags(irq, IRQ_LEVEL); hose->int_irq = irq; + irq_set_chip_and_handler(irq + 1, &dummy_irq_chip, handle_level_irq); + hose->service_irq = irq + 1; rcid = cpu_to_rcid(0); printk_once(KERN_INFO "INTx are directed to node %d core %d.\n", @@ -714,6 +672,8 @@ static void __init sw64_init_intx(struct pci_controller *hose) int_conf = 1UL << 62 | rcid; /* rebase all intx on the first logical cpu */ if (sw64_chip_init->pci_init.set_intx) sw64_chip_init->pci_init.set_intx(node, index, int_conf); + + set_pcieport_service_irq(node, index); } void __init sw64_init_irq(void) @@ -731,3 +691,16 @@ sw64_init_pci(void) { common_init_pci(); } + +static int setup_bus_dma_cb(struct pci_dev *pdev, void *data) +{ + pdev->dev.bus_dma_limit = DMA_BIT_MASK(32); + return 0; +} + +static void fix_bus_dma_limit(struct pci_dev *dev) +{ + pci_walk_bus(dev->subordinate, setup_bus_dma_cb, NULL); + pr_info("Set zx200 bus_dma_limit to 32-bit\n"); +} +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ZHAOXIN, 0x071f, fix_bus_dma_limit); diff --git a/arch/sw_64/kernel/pci_common.c b/arch/sw_64/kernel/pci_common.c index c8c4bf08a4589afbe26772b5e8c824e632dd825e..f996baca9d935a7881a510c1aaf190a2f0975a66 100644 --- a/arch/sw_64/kernel/pci_common.c +++ b/arch/sw_64/kernel/pci_common.c @@ -3,210 +3,113 @@ * linux/arch/sw_64/kernel/pci_iommu.c */ -#include -#include #include -#include -#include #include -#include -#include #include -#include -#include #include #include -#include -#include -#include -#include - -#include "pci_impl.h" - -#define DEBUG_ALLOC 0 -#if DEBUG_ALLOC > 0 -# define DBGA(args...) printk(KERN_DEBUG args) -#else -# define DBGA(args...) -#endif -#if DEBUG_ALLOC > 1 -# define DBGA2(args...) printk(KERN_DEBUG args) -#else -# define DBGA2(args...) -#endif - -#define DEBUG_NODIRECT 0 - -#define ISA_DMA_MASK 0x00ffffff - -/* - * Map a single buffer of the indicated size for PCI DMA in streaming - * mode. The 32-bit PCI bus mastering address to use is returned. - * Once the device is given the dma address, the device owns this memory - * until either pci_unmap_single or pci_dma_sync_single is performed. - */ - -static dma_addr_t -pci_direct_map_single_1(struct pci_dev *pdev, void *cpu_addr) -{ - struct pci_controller *hose = pdev->sysdata; - unsigned long paddr; - unsigned long dma_offset; - - if (hose == NULL) { - pr_err("%s: hose does not exist!\n", __func__); - return 0; - } - - dma_offset = read_piu_ior0(hose->node, hose->index, EPDMABAR); - paddr = __pa(cpu_addr) + dma_offset; - return paddr; -} - -/* Helper for generic DMA-mapping functions. */ -static struct pci_dev *sw64_direct_gendev_to_pci(struct device *dev) -{ - if (dev && dev->bus == &pci_bus_type) - return to_pci_dev(dev); - - /* This assumes ISA bus master with dma_mask 0xffffff. */ - return NULL; -} static dma_addr_t sw64_direct_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, unsigned long attrs) { - struct pci_dev *pdev = sw64_direct_gendev_to_pci(dev); + dma_addr_t dma_addr = page_to_phys(page) + offset; - if (dir == PCI_DMA_NONE) - BUG(); + if (unlikely(swiotlb_force == SWIOTLB_FORCE)) + return swiotlb_map(dev, dma_addr, size, dir, attrs); - return pci_direct_map_single_1(pdev, (char *)page_address(page) + offset); -} + if (unlikely(!dma_capable(dev, dma_addr, size, true))) { + if (swiotlb_force != SWIOTLB_NO_FORCE) + return swiotlb_map(dev, dma_addr, size, dir, attrs); -/* - * Unmap a single streaming mode DMA translation. The DMA_ADDR and - * SIZE must match what was provided for in a previous pci_map_single - * call. All other usages are undefined. After this call, reads by - * the cpu to the buffer are guaranteed to see whatever the device - * wrote there. - */ + dev_WARN_ONCE(dev, 1, + "DMA addr %pad+%zu overflow (mask %llx, bus limit %llx).\n", + &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit); + return DMA_MAPPING_ERROR; + } -static inline void sw64_direct_unmap_page(struct device *dev, dma_addr_t dma_addr, + return dma_addr; +} + +static inline void sw64_direct_unmap_page(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir, unsigned long attrs) { + if (unlikely(is_swiotlb_buffer(addr))) + swiotlb_tbl_unmap_single(dev, addr, size, size, dir, attrs); } -/* Allocate and map kernel buffer using consistent mode DMA for PCI - * device. Returns non-NULL cpu-view pointer to the buffer if - * successful and sets *DMA_ADDRP to the pci side dma address as well, - * else DMA_ADDRP is undefined. - */ +static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size) +{ + return phys + size - 1 <= + min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit); +} static void *sw64_direct_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addrp, gfp_t gfp, unsigned long attrs) { - struct pci_dev *pdev = sw64_direct_gendev_to_pci(dev); - void *cpu_addr; - long order = get_order(size); - - gfp &= ~GFP_DMA; - -#ifdef CONFIG_ZONE_DMA - if (dev->coherent_dma_mask < DMA_BIT_MASK(32)) - gfp |= GFP_DMA; -#endif - -try_again: - cpu_addr = (void *)__get_free_pages(gfp, order); - if (!cpu_addr) { - pr_info("pci_alloc_consistent: get_free_pages failed from %ps\n", - __builtin_return_address(0)); - /* ??? Really atomic allocation? Otherwise we could play - * with vmalloc and sg if we can't find contiguous memory. - */ - return NULL; + struct page *page; + void *ret; + u64 dma_limit; + + size = PAGE_ALIGN(size); + if (attrs & DMA_ATTR_NO_WARN) + gfp |= __GFP_NOWARN; + + dma_limit = min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit); + if (dma_limit <= DMA_BIT_MASK(32)) + gfp |= GFP_DMA32; + + /* we always manually zero the memory once we are done */ + gfp &= ~__GFP_ZERO; +again: + page = alloc_pages_node(dev_to_node(dev), gfp, get_order(size)); + if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) { + dma_free_contiguous(dev, page, size); + page = NULL; + + if (IS_ENABLED(CONFIG_ZONE_DMA32) && + dma_limit < DMA_BIT_MASK(64) && + !(gfp & (GFP_DMA32 | GFP_DMA))) { + gfp |= GFP_DMA32; + goto again; + } } - memset(cpu_addr, 0, size); - *dma_addrp = pci_direct_map_single_1(pdev, cpu_addr); - if (*dma_addrp == 0) { - free_pages((unsigned long)cpu_addr, order); - if (gfp & GFP_DMA) - return NULL; - /* The address doesn't fit required mask and we - * do not have iommu. Try again with GFP_DMA. - */ - gfp |= GFP_DMA; - goto try_again; - } + if (!page) + return NULL; - DBGA2("pci_alloc_consistent: %zx -> [%p,%llx] from %ps\n", - size, cpu_addr, *dma_addrp, __builtin_return_address(0)); + ret = page_address(page); + memset(ret, 0, size); + *dma_addrp = page_to_phys(page); - return cpu_addr; + return ret; } -/* Free and unmap a consistent DMA buffer. CPU_ADDR and DMA_ADDR must - * be values that were returned from pci_alloc_consistent. SIZE must - * be the same as what as passed into pci_alloc_consistent. - * References to the memory and mappings associated with CPU_ADDR or - * DMA_ADDR past this call are illegal. - */ - static void sw64_direct_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs) { - struct pci_dev *pdev = sw64_direct_gendev_to_pci(dev); - - pci_unmap_single(pdev, dma_addr, size, PCI_DMA_BIDIRECTIONAL); - free_pages((unsigned long)cpu_addr, get_order(size)); - DBGA2("pci_free_consistent: [%llx,%zx] from %ps\n", - dma_addr, size, __builtin_return_address(0)); -} -#define SG_ENT_VIRT_ADDRESS(SG) (sg_virt((SG))) -#define SG_ENT_PHYS_ADDRESS(SG) __pa(SG_ENT_VIRT_ADDRESS(SG)) - -static dma_addr_t sw64_phys_to_dma(struct device *dev, phys_addr_t pa) -{ - unsigned long dma_offset; - struct pci_dev *pdev = sw64_gendev_to_pci(dev); - struct pci_controller *hose = pdev->sysdata; - - if (hose == NULL) { - pr_err("%s: hose does not exist!\n", __func__); - return 0; + if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) { + /* cpu_addr is a struct page cookie, not a kernel address */ + dma_free_contiguous(dev, cpu_addr, size); + return; } - dma_offset = read_piu_ior0(hose->node, hose->index, EPDMABAR); - return pa + dma_offset; + free_pages((unsigned long)cpu_addr, get_order(size)); } -static bool -check_addr(struct device *dev, dma_addr_t dma_addr, size_t size, - const char *caller) +static void sw64_direct_unmap_sg(struct device *dev, struct scatterlist *sgl, + int nents, enum dma_data_direction dir, unsigned long attrs) { - if (unlikely(dev && !dma_capable(dev, dma_addr, size, true))) { - if (!dev->dma_mask) { - dev_err(dev, - "%s: call on device without dma_mask\n", - caller); - return false; - } + struct scatterlist *sg; + int i; - if (*dev->dma_mask >= DMA_BIT_MASK(32)) { - dev_err(dev, - "%s: overflow %pad+%zu of device mask %llx\n", - caller, &dma_addr, size, *dev->dma_mask); - } - return false; - } - return true; + for_each_sg(sgl, sg, nents, i) + sw64_direct_unmap_page(dev, sg->dma_address, sg_dma_len(sg), dir, + attrs); } static int sw64_direct_map_sg(struct device *dev, struct scatterlist *sgl, @@ -216,58 +119,16 @@ static int sw64_direct_map_sg(struct device *dev, struct scatterlist *sgl, struct scatterlist *sg; for_each_sg(sgl, sg, nents, i) { - BUG_ON(!sg_page(sg)); - - sg_dma_address(sg) = sw64_phys_to_dma(dev, sg_phys(sg)); - if (!check_addr(dev, sg_dma_address(sg), sg->length, __func__)) - return 0; + sg_dma_address(sg) = sw64_direct_map_page(dev, sg_page(sg), + sg->offset, sg->length, dir, attrs); + if (sg->dma_address == DMA_MAPPING_ERROR) + goto out_unmap; sg_dma_len(sg) = sg->length; } - return nents; -} - -/* Unmap a set of streaming mode DMA translations. Again, cpu read - * rules concerning calls here are the same as for pci_unmap_single() - * above. - */ - -static inline void sw64_direct_unmap_sg(struct device *dev, struct scatterlist *sg, - int nents, enum dma_data_direction dir, - unsigned long attrs) -{ -} - -/* Return whether the given PCI device DMA address mask can be - * supported properly. - */ - -static int sw64_direct_supported(struct device *dev, u64 mask) -{ - struct pci_dev *pdev = sw64_direct_gendev_to_pci(dev); - struct pci_controller *hose; - - if ((max_low_pfn << PAGE_SHIFT) - 1 <= mask) - return 1; - - /* Check that we have a scatter-gather arena that fits. */ - hose = pdev->sysdata; - if (hose == NULL) { - pr_err("%s: hose does not exist!\n", __func__); - return 0; - } - - /* As last resort try ZONE_DMA. */ - if (MAX_DMA_ADDRESS - PAGE_OFFSET - 1 <= mask) - return 1; - - /* - * Upstream PCI/PCIe bridges or SoC interconnects may not carry - * as many DMA address bits as the device itself supports. - */ - if (dev->bus_dma_limit && mask > dev->bus_dma_limit) - return 0; +out_unmap: + sw64_direct_unmap_sg(dev, sgl, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC); return 0; } @@ -278,7 +139,7 @@ const struct dma_map_ops sw64_dma_direct_ops = { .unmap_page = sw64_direct_unmap_page, .map_sg = sw64_direct_map_sg, .unmap_sg = sw64_direct_unmap_sg, - .dma_supported = sw64_direct_supported, + .dma_supported = dma_direct_supported, }; const struct dma_map_ops *dma_ops = &sw64_dma_direct_ops; diff --git a/arch/sw_64/kernel/pci_impl.h b/arch/sw_64/kernel/pci_impl.h index 0cb6d1b1d1e3799cd9ae46dbd52f7fc6be792c63..6025145cb1c5c31ae749d892a92028f75d987e38 100644 --- a/arch/sw_64/kernel/pci_impl.h +++ b/arch/sw_64/kernel/pci_impl.h @@ -6,62 +6,15 @@ #ifndef _SW64_KERNEL_PCI_IMPL_H #define _SW64_KERNEL_PCI_IMPL_H +#include + struct pci_dev; struct pci_controller; -struct pci_iommu_arena; - -/* - * We can't just blindly use 64K for machines with EISA busses; they - * may also have PCI-PCI bridges present, and then we'd configure the - * bridge incorrectly. - * - * Also, we start at 0x8000 or 0x9000, in hopes to get all devices' - * IO space areas allocated *before* 0xC000; this is because certain - * BIOSes (Millennium for one) use PCI Config space "mechanism #2" - * accesses to probe the bus. If a device's registers appear at 0xC000, - * it may see an INx/OUTx at that address during BIOS emulation of the - * VGA BIOS, and some cards, notably Adaptec 2940UW, take mortal offense. - */ - -#define EISA_DEFAULT_IO_BASE 0x9000 /* start above 8th slot */ -#define DEFAULT_IO_BASE 0x0 /* start at 8th slot */ - -/* - * We try to make the DEFAULT_MEM_BASE addresses *always* have more than - * a single bit set. This is so that devices like the broken Myrinet card - * will always have a PCI memory address that will never match a IDSEL - * address in PCI Config space, which can cause problems with early rev cards. - */ - -#define DEFAULT_MEM_BASE 0 - -/* - * A PCI IOMMU allocation arena. There are typically two of these - * regions per bus. - * ??? The 8400 has a 32-byte pte entry, and the entire table apparently - * lives directly on the host bridge (no tlb?). We don't support this - * machine, but if we ever did, we'd need to parameterize all this quite - * a bit further. Probably with per-bus operation tables. - */ - -struct pci_iommu_arena { - spinlock_t lock; - struct pci_controller *hose; -#define IOMMU_INVALID_PTE 0x2 /* 32:63 bits MBZ */ -#define IOMMU_RESERVED_PTE 0xface - unsigned long *ptes; - dma_addr_t dma_base; - unsigned int size; - unsigned int next_entry; - unsigned int align_entry; -}; - /* The hose list. */ extern struct pci_controller *hose_head, **hose_tail; extern void common_init_pci(void); -#define common_swizzle pci_common_swizzle extern struct pci_controller *alloc_pci_controller(void); extern struct resource *alloc_resource(void); diff --git a/arch/sw_64/kernel/perf_event.c b/arch/sw_64/kernel/perf_event.c index dac979d4b09aa679934062ef87cf1456df251573..6e344239917b8d703db5870d33930056aac9fe1e 100644 --- a/arch/sw_64/kernel/perf_event.c +++ b/arch/sw_64/kernel/perf_event.c @@ -6,18 +6,7 @@ */ #include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include +#include /* For tracking PMCs and the hw events they monitor on each CPU. */ struct cpu_hw_events { @@ -255,14 +244,13 @@ static const struct sw64_perf_event *core3_map_cache_event(u64 config) /* * r0xx for counter0, r1yy for counter1. - * According to the datasheet, 00 <= xx <= 0F, 00 <= yy <= 37 + * According to the datasheet, 00 <= xx <= 0F, 00 <= yy <= 3D */ static bool core3_raw_event_valid(u64 config) { - if ((config >= (PC0_RAW_BASE + PC0_MIN) && config <= (PC0_RAW_BASE + PC0_MAX)) || - (config >= (PC1_RAW_BASE + PC1_MIN) && config <= (PC1_RAW_BASE + PC1_MAX))) { + if ((config >= PC0_RAW_BASE && config <= (PC0_RAW_BASE + PC0_MAX)) || + (config >= PC1_RAW_BASE && config <= (PC1_RAW_BASE + PC1_MAX))) return true; - } pr_info("sw64 pmu: invalid raw event config %#llx\n", config); return false; @@ -309,31 +297,33 @@ static int sw64_perf_event_set_period(struct perf_event *event, { long left = local64_read(&hwc->period_left); long period = hwc->sample_period; - int ret = 0; + int overflow = 0; + unsigned long value; if (unlikely(left <= -period)) { left = period; local64_set(&hwc->period_left, left); hwc->last_period = period; - ret = 1; + overflow = 1; } if (unlikely(left <= 0)) { left += period; local64_set(&hwc->period_left, left); hwc->last_period = period; - ret = 1; + overflow = 1; } if (left > (long)sw64_pmu->pmc_max_period) left = sw64_pmu->pmc_max_period; - local64_set(&hwc->prev_count, (unsigned long)(-left)); - sw64_write_pmc(idx, (unsigned long)(sw64_pmu->pmc_max_period - left)); + value = sw64_pmu->pmc_max_period - left; + local64_set(&hwc->prev_count, value); + sw64_write_pmc(idx, value); perf_event_update_userpage(event); - return ret; + return overflow; } /* @@ -469,8 +459,8 @@ static void sw64_pmu_start(struct perf_event *event, int flags) hwc->state = 0; - /* counting in all modes, for both counters */ - wrperfmon(PERFMON_CMD_PM, 4); + /* counting in selected modes, for both counters */ + wrperfmon(PERFMON_CMD_PM, hwc->config_base); if (hwc->idx == PERFMON_PC0) { wrperfmon(PERFMON_CMD_EVENT_PC0, hwc->event_base); wrperfmon(PERFMON_CMD_ENABLE, PERFMON_ENABLE_ARGS_PC0); @@ -531,9 +521,12 @@ static int __hw_perf_event_init(struct perf_event *event) const struct sw64_perf_event *event_type; - /* SW64 do not have per-counter usr/os/guest/host bits */ - if (event->attr.exclude_user || event->attr.exclude_kernel || - event->attr.exclude_hv || event->attr.exclude_idle || + /* + * SW64 does not have per-counter usr/os/guest/host bits, + * we can distinguish exclude_user and exclude_kernel by + * sample mode. + */ + if (event->attr.exclude_hv || event->attr.exclude_idle || event->attr.exclude_host || event->attr.exclude_guest) return -EINVAL; @@ -565,6 +558,13 @@ static int __hw_perf_event_init(struct perf_event *event) hwc->event_base = attr->config & 0xff; /* event selector */ } + hwc->config_base = SW64_PERFCTRL_AM; + + if (attr->exclude_user) + hwc->config_base = SW64_PERFCTRL_KM; + if (attr->exclude_kernel) + hwc->config_base = SW64_PERFCTRL_UM; + hwc->config = attr->config; if (!is_sampling_event(event)) @@ -699,6 +699,36 @@ bool valid_dy_addr(unsigned long addr) return ret; } +#ifdef CONFIG_FRAME_POINTER +void perf_callchain_user(struct perf_callchain_entry_ctx *entry, + struct pt_regs *regs) +{ + + struct stack_frame frame; + unsigned long __user *fp; + int err; + + perf_callchain_store(entry, regs->pc); + + fp = (unsigned long __user *)regs->r15; + + while (entry->nr < entry->max_stack && (unsigned long)fp < current->mm->start_stack) { + if (!access_ok(fp, sizeof(frame))) + break; + + pagefault_disable(); + err = __copy_from_user_inatomic(&frame, fp, sizeof(frame)); + pagefault_enable(); + + if (err) + break; + + if (valid_utext_addr(frame.return_address) || valid_dy_addr(frame.return_address)) + perf_callchain_store(entry, frame.return_address); + fp = (void __user *)frame.next_frame; + } +} +#else /* !CONFIG_FRAME_POINTER */ void perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) { @@ -711,30 +741,38 @@ void perf_callchain_user(struct perf_callchain_entry_ctx *entry, while (entry->nr < entry->max_stack && usp < current->mm->start_stack) { if (!access_ok(usp, 8)) break; + pagefault_disable(); err = __get_user(user_addr, (unsigned long *)usp); pagefault_enable(); + if (err) break; + if (valid_utext_addr(user_addr) || valid_dy_addr(user_addr)) perf_callchain_store(entry, user_addr); usp = usp + 8; } } +#endif/* CONFIG_FRAME_POINTER */ -void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, - struct pt_regs *regs) +/* + * Gets called by walk_stackframe() for every stackframe. This will be called + * whist unwinding the stackframe and is like a subroutine return so we use + * the PC. + */ +static int callchain_trace(unsigned long pc, void *data) { - unsigned long *sp = (unsigned long *)current_thread_info()->pcb.ksp; - unsigned long addr; + struct perf_callchain_entry_ctx *entry = data; - perf_callchain_store(entry, regs->pc); + perf_callchain_store(entry, pc); + return 0; +} - while (!kstack_end(sp) && entry->nr < entry->max_stack) { - addr = *sp++; - if (__kernel_text_address(addr)) - perf_callchain_store(entry, addr); - } +void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, + struct pt_regs *regs) +{ + walk_stackframe(NULL, regs, callchain_trace, entry); } /* diff --git a/arch/sw_64/kernel/perf_regs.c b/arch/sw_64/kernel/perf_regs.c index 8eec2179eb863157aaa80f07cd94ba9a719e079d..4c12a2cdf912020c4e49df19f4c9fa99e2ffae36 100644 --- a/arch/sw_64/kernel/perf_regs.c +++ b/arch/sw_64/kernel/perf_regs.c @@ -1,11 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 #include -#include #include -#include -#include -#include u64 perf_reg_value(struct pt_regs *regs, int idx) { diff --git a/arch/sw_64/kernel/platform.c b/arch/sw_64/kernel/platform.c new file mode 100644 index 0000000000000000000000000000000000000000..f4c880acaa40da8c366675b1b31047d980e3a932 --- /dev/null +++ b/arch/sw_64/kernel/platform.c @@ -0,0 +1,20 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * linux/arch/sw/kernel/setup.c + * + * Copyright (C) 1995 Linus Torvalds + */ + +#include + +static struct platform_device sw64_cpufreq_device = { + .name = "sw64_cpufreq", + .id = -1, +}; + +static int __init sw64_cpufreq_init(void) +{ + return platform_device_register(&sw64_cpufreq_device); +} + +arch_initcall(sw64_cpufreq_init); diff --git a/arch/sw_64/kernel/process.c b/arch/sw_64/kernel/process.c index 8fd493776bec5da719152799f2b7820ae04afdf5..a75ae20205f3215e92257b6edb8075efcc198d70 100644 --- a/arch/sw_64/kernel/process.c +++ b/arch/sw_64/kernel/process.c @@ -3,42 +3,17 @@ * This file handles the architecture-dependent parts of process handling. */ -#include -#include -#include #include -#include -#include -#include -#include -#include -#include -#include #include -#include -#include -#include -#include -#include -#include #include #include -#include -#include #include -#include -#include #include -#include -#include -#include -#include #include -#include +#include #include "proto.h" -#include "pci_impl.h" /* * Power off function, if any @@ -135,7 +110,7 @@ void show_regs(struct pt_regs *regs) { show_regs_print_info(KERN_DEFAULT); - dik_show_regs(regs, NULL); + dik_show_regs(regs); } /* @@ -169,6 +144,13 @@ release_thread(struct task_struct *dead_task) { } +int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) +{ + fpstate_save(src); + *dst = *src; + return 0; +} + /* * Copy architecture-specific thread state */ @@ -184,19 +166,17 @@ copy_thread(unsigned long clone_flags, unsigned long usp, struct thread_info *childti = task_thread_info(p); struct pt_regs *childregs = task_pt_regs(p); struct pt_regs *regs = current_pt_regs(); - struct switch_stack *childstack, *stack; - childstack = ((struct switch_stack *) childregs) - 1; - childti->pcb.ksp = (unsigned long) childstack; + childti->pcb.ksp = (unsigned long) childregs; childti->pcb.flags = 7; /* set FEN, clear everything else */ + p->thread.sp = (unsigned long) childregs; if (unlikely(p->flags & PF_KTHREAD)) { /* kernel thread */ - memset(childstack, 0, - sizeof(struct switch_stack) + sizeof(struct pt_regs)); - childstack->r26 = (unsigned long) ret_from_kernel_thread; - childstack->r9 = usp; /* function */ - childstack->r10 = kthread_arg; + memset(childregs, 0, sizeof(struct pt_regs)); + p->thread.ra = (unsigned long) ret_from_kernel_thread; + p->thread.s[0] = usp; /* function */ + p->thread.s[1] = kthread_arg; childti->pcb.usp = 0; return 0; } @@ -215,136 +195,36 @@ copy_thread(unsigned long clone_flags, unsigned long usp, *childregs = *regs; childregs->r0 = 0; childregs->r19 = 0; - stack = ((struct switch_stack *) regs) - 1; - *childstack = *stack; - p->thread = current->thread; - childstack->r26 = (unsigned long) ret_from_fork; + p->thread.ra = (unsigned long) ret_from_fork; return 0; } /* * Fill in the user structure for a ELF core dump. + * @regs: should be signal_pt_regs() or task_pt_reg(task) */ -void -dump_elf_thread(elf_greg_t *dest, struct pt_regs *pt, struct thread_info *ti) +void sw64_elf_core_copy_regs(elf_greg_t *dest, struct pt_regs *regs) { - /* switch stack follows right below pt_regs: */ - struct switch_stack *sw = ((struct switch_stack *) pt) - 1; - - dest[0] = pt->r0; - dest[1] = pt->r1; - dest[2] = pt->r2; - dest[3] = pt->r3; - dest[4] = pt->r4; - dest[5] = pt->r5; - dest[6] = pt->r6; - dest[7] = pt->r7; - dest[8] = pt->r8; - dest[9] = sw->r9; - dest[10] = sw->r10; - dest[11] = sw->r11; - dest[12] = sw->r12; - dest[13] = sw->r13; - dest[14] = sw->r14; - dest[15] = sw->r15; - dest[16] = pt->r16; - dest[17] = pt->r17; - dest[18] = pt->r18; - dest[19] = pt->r19; - dest[20] = pt->r20; - dest[21] = pt->r21; - dest[22] = pt->r22; - dest[23] = pt->r23; - dest[24] = pt->r24; - dest[25] = pt->r25; - dest[26] = pt->r26; - dest[27] = pt->r27; - dest[28] = pt->r28; - dest[29] = pt->gp; - dest[30] = ti == current_thread_info() ? rdusp() : ti->pcb.usp; - dest[31] = pt->pc; + int i; + struct thread_info *ti; - /* Once upon a time this was the PS value. Which is stupid - * since that is always 8 for usermode. Usurped for the more - * useful value of the thread's UNIQUE field. - */ - dest[32] = ti->pcb.unique; -} -EXPORT_SYMBOL(dump_elf_thread); + ti = (void *)((__u64)regs & ~(THREAD_SIZE - 1)); -int -dump_elf_task(elf_greg_t *dest, struct task_struct *task) -{ - dump_elf_thread(dest, task_pt_regs(task), task_thread_info(task)); - return 1; + for (i = 0; i < 30; i++) + dest[i] = *(__u64 *)((void *)regs + regoffsets[i]); + dest[30] = ti == current_thread_info() ? rdusp() : ti->pcb.usp; + dest[31] = regs->pc; + dest[32] = ti->pcb.unique; } -EXPORT_SYMBOL(dump_elf_task); +EXPORT_SYMBOL(sw64_elf_core_copy_regs); -int -dump_elf_task_fp(elf_fpreg_t *dest, struct task_struct *task) +/* Fill in the fpu structure for a core dump. */ +int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu) { - memcpy(dest, &task->thread.ctx_fp, 32 * 8); + memcpy(fpu, ¤t->thread.fpstate, sizeof(*fpu)); return 1; } -EXPORT_SYMBOL(dump_elf_task_fp); - -/* - * Return saved PC of a blocked thread. This assumes the frame - * pointer is the 6th saved long on the kernel stack and that the - * saved return address is the first long in the frame. This all - * holds provided the thread blocked through a call to schedule() ($15 - * is the frame pointer in schedule() and $15 is saved at offset 48 by - * entry.S:do_switch_stack). - * - * Under heavy swap load I've seen this lose in an ugly way. So do - * some extra sanity checking on the ranges we expect these pointers - * to be in so that we can fail gracefully. This is just for ps after - * all. -- r~ - */ - -unsigned long -thread_saved_pc(struct task_struct *t) -{ - unsigned long base = (unsigned long)task_stack_page(t); - unsigned long fp, sp = task_thread_info(t)->pcb.ksp; - - if (sp > base && sp+6*8 < base + 16*1024) { - fp = ((unsigned long *)sp)[6]; - if (fp > sp && fp < base + 16*1024) - return *(unsigned long *)fp; - } - - return 0; -} - -unsigned long -get_wchan(struct task_struct *p) -{ - unsigned long schedule_frame; - unsigned long pc, base, sp; - - if (!p || p == current || p->state == TASK_RUNNING) - return 0; - /* - * This one depends on the frame size of schedule(). Do a - * "disass schedule" in gdb to find the frame size. Also, the - * code assumes that sleep_on() follows immediately after - * interruptible_sleep_on() and that add_timer() follows - * immediately after interruptible_sleep(). Ugly, isn't it? - * Maybe adding a wchan field to task_struct would be better, - * after all... - */ - - pc = thread_saved_pc(p); - if (in_sched_functions(pc)) { - base = (unsigned long)task_stack_page(p); - sp = task_thread_info(p)->pcb.ksp; - schedule_frame = ((unsigned long *)sp)[6]; - if (schedule_frame > sp && schedule_frame < base + 16*1024) - return ((unsigned long *)schedule_frame)[12]; - } - return pc; -} +EXPORT_SYMBOL(dump_fpu); unsigned long arch_randomize_brk(struct mm_struct *mm) { diff --git a/arch/sw_64/kernel/proto.h b/arch/sw_64/kernel/proto.h index 1a729a8f21c33f902fcf728fd4e9f0ebc1e98131..189074f8bd5c7892afe3a1f6720a9322a3d3b5ba 100644 --- a/arch/sw_64/kernel/proto.h +++ b/arch/sw_64/kernel/proto.h @@ -5,14 +5,15 @@ #include #include #include +#include /* ptrace.c */ extern int ptrace_set_bpt(struct task_struct *child); extern int ptrace_cancel_bpt(struct task_struct *child); /* traps.c */ -extern void dik_show_regs(struct pt_regs *regs, unsigned long *r9_15); -extern void die_if_kernel(char *str, struct pt_regs *regs, long err, unsigned long *r9_15); +extern void dik_show_regs(struct pt_regs *regs); +extern void die_if_kernel(char *str, struct pt_regs *regs, long err); /* timer.c */ extern void setup_timer(void); diff --git a/arch/sw_64/kernel/ptrace.c b/arch/sw_64/kernel/ptrace.c index 5f29c500c8b1913c9952f3d7b6077bffbe3c7dad..bdbd0d97a130910a170a836b67ee2f82b30ec354 100644 --- a/arch/sw_64/kernel/ptrace.c +++ b/arch/sw_64/kernel/ptrace.c @@ -5,48 +5,20 @@ /* mangled further by Bob Manson (manson@santafe.edu) */ /* more mutilation by David Mosberger (davidm@azstarnet.com) */ -#include -#include -#include -#include -#include -#include -#include -#include -#include #include -#include #include -#include +#include +#include +#include + +#include +#include -#include -#include -#include #include "proto.h" #define CREATE_TRACE_POINTS #include -#define DEBUG DBG_MEM -#undef DEBUG - -#define DEBUG 0 - -#ifdef DEBUG -enum { - DBG_MEM = (1 << 0), - DBG_BPT = (1 << 1), - DBG_MEM_ALL = (1 << 2) -}; -#define DBG(fac, args) \ -{ \ - if ((fac) & DEBUG) \ - printk args; \ -} -#else -#define DBG(fac, args) -#endif - #define BREAKINST 0x00000080 /* sys_call bpt */ /* @@ -65,10 +37,6 @@ enum { * | frame generated by SAVE_ALL | | * | | v * +================================+ - * | | ^ - * | frame saved by do_switch_stack | | struct switch_stack - * | | v - * +================================+ */ /* @@ -88,27 +56,18 @@ enum { REG_GP = 29 }; -#define PT_REG(reg) \ - (PAGE_SIZE * 2 - sizeof(struct pt_regs) + offsetof(struct pt_regs, reg)) - -#define SW_REG(reg) \ - (PAGE_SIZE * 2 - sizeof(struct pt_regs) - sizeof(struct switch_stack) \ - + offsetof(struct switch_stack, reg)) - -#define FP_REG(fp_regno, vector_regno) \ - (fp_regno * 32 + vector_regno * 8) - -static int regoff[] = { - PT_REG(r0), PT_REG(r1), PT_REG(r2), PT_REG(r3), - PT_REG(r4), PT_REG(r5), PT_REG(r6), PT_REG(r7), - PT_REG(r8), SW_REG(r9), SW_REG(r10), SW_REG(r11), - SW_REG(r12), SW_REG(r13), SW_REG(r14), SW_REG(r15), - PT_REG(r16), PT_REG(r17), PT_REG(r18), PT_REG(r19), - PT_REG(r20), PT_REG(r21), PT_REG(r22), PT_REG(r23), - PT_REG(r24), PT_REG(r25), PT_REG(r26), PT_REG(r27), - PT_REG(r28), PT_REG(gp), -1, -1 +#define R(x) ((size_t) &((struct pt_regs *)0)->x) + +short regoffsets[32] = { + R(r0), R(r1), R(r2), R(r3), R(r4), R(r5), R(r6), R(r7), R(r8), + R(r9), R(r10), R(r11), R(r12), R(r13), R(r14), R(r15), + R(r16), R(r17), R(r18), + R(r19), R(r20), R(r21), R(r22), R(r23), R(r24), R(r25), R(r26), + R(r27), R(r28), R(gp), 0, 0 }; +#undef R + #define PCB_OFF(var) offsetof(struct pcb_struct, var) static int pcboff[] = { @@ -130,8 +89,8 @@ static unsigned long zero; static unsigned long * get_reg_addr(struct task_struct *task, unsigned long regno) { - unsigned long *addr; - int fp_regno, vector_regno; + void *addr; + int fno, vno; switch (regno) { case USP: @@ -144,12 +103,11 @@ get_reg_addr(struct task_struct *task, unsigned long regno) addr = (void *)task_thread_info(task) + pcboff[regno]; break; case REG_BASE ... REG_END: - addr = (void *)task_thread_info(task) + regoff[regno]; + addr = (void *)task_pt_regs(task) + regoffsets[regno]; break; case FPREG_BASE ... FPREG_END: - fp_regno = regno - FPREG_BASE; - vector_regno = 0; - addr = (void *)((unsigned long)&task->thread.ctx_fp + FP_REG(fp_regno, vector_regno)); + fno = regno - FPREG_BASE; + addr = &task->thread.fpstate.fp[fno].v[0]; break; case VECREG_BASE ... VECREG_END: /* @@ -160,15 +118,15 @@ get_reg_addr(struct task_struct *task, unsigned long regno) addr = &zero; break; } - fp_regno = (regno - VECREG_BASE) & 0x1f; - vector_regno = 1 + ((regno - VECREG_BASE) >> 5); - addr = (void *)((unsigned long)&task->thread.ctx_fp + FP_REG(fp_regno, vector_regno)); + fno = (regno - VECREG_BASE) & 0x1f; + vno = 1 + ((regno - VECREG_BASE) >> 5); + addr = &task->thread.fpstate.fp[fno].v[vno]; break; case FPCR: - addr = (void *)&task->thread.fpcr; + addr = &task->thread.fpstate.fpcr; break; case PC: - addr = (void *)task_thread_info(task) + PT_REG(pc); + addr = (void *)task_pt_regs(task) + PT_REGS_PC; break; default: addr = &zero; @@ -243,15 +201,12 @@ ptrace_set_bpt(struct task_struct *child) if (displ) /* guard against unoptimized code */ task_thread_info(child)->bpt_addr[nsaved++] = pc + 4 + displ; - DBG(DBG_BPT, ("execing branch\n")); /*call ret jmp*/ } else if (op_code >= 0x1 && op_code <= 0x3) { reg_b = (insn >> 16) & 0x1f; task_thread_info(child)->bpt_addr[nsaved++] = get_reg(child, reg_b); - DBG(DBG_BPT, ("execing jump\n")); } else { task_thread_info(child)->bpt_addr[nsaved++] = pc + 4; - DBG(DBG_BPT, ("execing normal insn\n")); } /* install breakpoints: */ @@ -261,8 +216,6 @@ ptrace_set_bpt(struct task_struct *child) if (res < 0) return res; task_thread_info(child)->bpt_insn[i] = insn; - DBG(DBG_BPT, (" -> next_pc=%lx\n", - task_thread_info(child)->bpt_addr[i])); res = write_int(child, task_thread_info(child)->bpt_addr[i], BREAKINST); if (res < 0) @@ -316,114 +269,103 @@ void ptrace_disable(struct task_struct *child) user_disable_single_step(child); } -int ptrace_getregs(struct task_struct *child, __s64 __user *data) +static int gpr_get(struct task_struct *target, + const struct user_regset *regset, + struct membuf to) { - int ret, retval = 0; - int i; - unsigned long regval; + struct pt_regs *regs; + struct user_pt_regs uregs; + int i, ret; - if (!access_ok(data, sizeof(long) * 33)) - return -EIO; + regs = task_pt_regs(target); + for (i = 0; i < 30; i++) + uregs.regs[i] = *(__u64 *)((void *)regs + regoffsets[i]); + + uregs.regs[30] = task_thread_info(target)->pcb.usp; + uregs.pc = regs->pc; + uregs.pstate = regs->ps; + + ret = membuf_write(&to, &uregs, sizeof(uregs)); - /* r0-r15 */ - for (i = 0; i < 16; i++) { - regval = get_reg(child, i); - retval |= __put_user((long)regval, data + i); - } - /* r19-r28 */ - for (i = 19; i < 29; i++) { - regval = get_reg(child, i); - retval |= __put_user((long)regval, data + i - 3); - } - /*SP, PS ,PC,GP*/ - retval |= __put_user((long)(get_reg(child, REG_SP)), data + EF_SP); - retval |= __put_user((long)(get_reg(child, REG_PS)), data + EF_PS); - retval |= __put_user((long)(get_reg(child, REG_PC)), data + EF_PC); - retval |= __put_user((long)(get_reg(child, REG_GP)), data + EF_GP); - /* r16-r18 */ - retval |= __put_user((long)(get_reg(child, 16)), data + EF_A0); - retval |= __put_user((long)(get_reg(child, 17)), data + EF_A1); - retval |= __put_user((long)(get_reg(child, 18)), data + EF_A2); - - ret = retval ? -EIO : 0; return ret; } -int ptrace_setregs(struct task_struct *child, __s64 __user *data) +static int gpr_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) { - int ret, retval = 0; - int i; - unsigned long regval; + struct pt_regs *regs; + struct user_pt_regs uregs; + int i, ret; - if (!access_ok(data, sizeof(long) * 33)) - return -EIO; + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + &uregs, 0, sizeof(uregs)); + if (ret) + return ret; + + regs = task_pt_regs(target); + for (i = 0; i < 30; i++) + *(__u64 *)((void *)regs + regoffsets[i]) = uregs.regs[i]; + + task_thread_info(target)->pcb.usp = uregs.regs[30]; + regs->pc = uregs.pc; + regs->ps = uregs.pstate; - /* r0-r15 */ - for (i = 0; i < 16; i++) { - retval |= __get_user(regval, data + i); - ret = put_reg(child, i, regval); - } - /* r19-r28 */ - for (i = 19; i < 29; i++) { - retval |= __get_user(regval, data + i - 3); - ret = put_reg(child, i, regval); - } - /*SP, PS ,PC,GP*/ - retval |= __get_user(regval, data + EF_SP); - ret = put_reg(child, REG_SP, regval); - retval |= __get_user(regval, data + EF_PS); - ret = put_reg(child, REG_PS, regval); - retval |= __get_user(regval, data + EF_PC); - ret = put_reg(child, REG_PC, regval); - retval |= __get_user(regval, data + EF_GP); - ret = put_reg(child, REG_GP, regval); - /* r16-r18 */ - retval |= __get_user(regval, data + EF_A0); - ret = put_reg(child, 16, regval); - retval |= __get_user(regval, data + EF_A1); - ret = put_reg(child, 17, regval); - retval |= __get_user(regval, data + EF_A2); - ret = put_reg(child, 18, regval); - - ret = retval ? -EIO : 0; return 0; } -int ptrace_getfpregs(struct task_struct *child, __s64 __user *data) +static int fpr_get(struct task_struct *target, + const struct user_regset *regset, + struct membuf to) { - int ret, retval = 0; - int i; - unsigned long regval; - - if (!access_ok(data, sizeof(long) * 32)) - return -EIO; - /* fp0-fp31 */ - for (i = 0; i < 32; i++) { - regval = get_reg(child, REG_F0 + i); - retval |= __put_user((long)regval, data + i); - } - - ret = retval ? -EIO : 0; - return 0; + return membuf_write(&to, &target->thread.fpstate, + sizeof(struct user_fpsimd_state)); } -int ptrace_setfpregs(struct task_struct *child, __s64 __user *data) +static int fpr_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) { - int ret, retval = 0; - int i; - unsigned long regval; + return user_regset_copyin(&pos, &count, &kbuf, &ubuf, + &target->thread.fpstate, 0, + sizeof(struct user_fpsimd_state)); +} - if (!access_ok(data, sizeof(long) * 32)) - return -EIO; +enum sw64_regset { + REGSET_GPR, + REGSET_FPR, +}; - /* fp0-fp31 */ - for (i = 0; i < 32; i++) { - retval |= __get_user(regval, data + i); - ret = put_reg(child, REG_F0 + i, regval); - } +static const struct user_regset sw64_regsets[] = { + [REGSET_GPR] = { + .core_note_type = NT_PRSTATUS, + .n = ELF_NGREG, + .size = sizeof(elf_greg_t), + .align = sizeof(elf_greg_t), + .regset_get = gpr_get, + .set = gpr_set + }, + [REGSET_FPR] = { + .core_note_type = NT_PRFPREG, + .n = sizeof(struct user_fpsimd_state) / sizeof(u64), + .size = sizeof(u64), + .align = sizeof(u64), + .regset_get = fpr_get, + .set = fpr_set + }, +}; - return ret; +static const struct user_regset_view user_sw64_view = { + .name = "sw64", .e_machine = EM_SW64, + .regsets = sw64_regsets, .n = ARRAY_SIZE(sw64_regsets) +}; + +const struct user_regset_view *task_user_regset_view(struct task_struct *task) +{ + return &user_sw64_view; } long arch_ptrace(struct task_struct *child, long request, @@ -432,7 +374,6 @@ long arch_ptrace(struct task_struct *child, long request, unsigned long tmp; size_t copied; long ret; - void __user *datavp = (void __user *) data; switch (request) { /* When I and D space are separate, these will need to be fixed. */ @@ -451,7 +392,6 @@ long arch_ptrace(struct task_struct *child, long request, case PTRACE_PEEKUSR: force_successful_syscall_return(); ret = get_reg(child, addr); - DBG(DBG_MEM, ("peek $%lu->%#lx\n", addr, ret)); break; /* When I and D space are separate, this will have to be fixed. */ @@ -461,21 +401,8 @@ long arch_ptrace(struct task_struct *child, long request, break; case PTRACE_POKEUSR: /* write the specified register */ - DBG(DBG_MEM, ("poke $%lu<-%#lx\n", addr, data)); ret = put_reg(child, addr, data); break; - case PTRACE_GETREGS: - ret = ptrace_getregs(child, datavp); - break; - case PTRACE_SETREGS: - ret = ptrace_setregs(child, datavp); - break; - case PTRACE_GETFPREGS: - ret = ptrace_getfpregs(child, datavp); - break; - case PTRACE_SETFPREGS: - ret = ptrace_setfpregs(child, datavp); - break; default: ret = ptrace_request(child, request, addr, data); break; @@ -560,7 +487,7 @@ int do_match(unsigned long address, unsigned long mmcsr, long cause, struct pt_r case MMCSR__DA_MATCH: case MMCSR__DV_MATCH: case MMCSR__DAV_MATCH: - dik_show_regs(regs, (unsigned long *)regs-15); + dik_show_regs(regs); if (!(current->ptrace & PT_PTRACED)) { printk(" pid %d %s not be ptraced, return\n", current->pid, current->comm); @@ -647,8 +574,8 @@ struct pt_regs_offset { int offset; }; -#define REG_OFFSET_NAME(reg, r) { \ - .name = #reg, \ +#define REG_OFFSET_NAME(r) { \ + .name = #r, \ .offset = offsetof(struct pt_regs, r) \ } @@ -658,37 +585,45 @@ struct pt_regs_offset { } static const struct pt_regs_offset regoffset_table[] = { - REG_OFFSET_NAME(r0, r0), - REG_OFFSET_NAME(r1, r1), - REG_OFFSET_NAME(r2, r2), - REG_OFFSET_NAME(r3, r3), - REG_OFFSET_NAME(r4, r4), - REG_OFFSET_NAME(r5, r5), - REG_OFFSET_NAME(r6, r6), - REG_OFFSET_NAME(r7, r7), - REG_OFFSET_NAME(r8, r8), - REG_OFFSET_NAME(r19, r19), - REG_OFFSET_NAME(r20, r20), - REG_OFFSET_NAME(r21, r21), - REG_OFFSET_NAME(r22, r22), - REG_OFFSET_NAME(r23, r23), - REG_OFFSET_NAME(r24, r24), - REG_OFFSET_NAME(r25, r25), - REG_OFFSET_NAME(r26, r26), - REG_OFFSET_NAME(r27, r27), - REG_OFFSET_NAME(r28, r28), - REG_OFFSET_NAME(hae, hae), - REG_OFFSET_NAME(trap_a0, trap_a0), - REG_OFFSET_NAME(trap_a1, trap_a1), - REG_OFFSET_NAME(trap_a2, trap_a2), - REG_OFFSET_NAME(ps, ps), - REG_OFFSET_NAME(pc, pc), - REG_OFFSET_NAME(gp, gp), - REG_OFFSET_NAME(r16, r16), - REG_OFFSET_NAME(r17, r17), - REG_OFFSET_NAME(r18, r18), + REG_OFFSET_NAME(r0), + REG_OFFSET_NAME(r1), + REG_OFFSET_NAME(r2), + REG_OFFSET_NAME(r3), + REG_OFFSET_NAME(r4), + REG_OFFSET_NAME(r5), + REG_OFFSET_NAME(r6), + REG_OFFSET_NAME(r7), + REG_OFFSET_NAME(r8), + REG_OFFSET_NAME(r9), + REG_OFFSET_NAME(r10), + REG_OFFSET_NAME(r11), + REG_OFFSET_NAME(r12), + REG_OFFSET_NAME(r13), + REG_OFFSET_NAME(r14), + REG_OFFSET_NAME(r15), + REG_OFFSET_NAME(r19), + REG_OFFSET_NAME(r20), + REG_OFFSET_NAME(r21), + REG_OFFSET_NAME(r22), + REG_OFFSET_NAME(r23), + REG_OFFSET_NAME(r24), + REG_OFFSET_NAME(r25), + REG_OFFSET_NAME(r26), + REG_OFFSET_NAME(r27), + REG_OFFSET_NAME(r28), + REG_OFFSET_NAME(hae), + REG_OFFSET_NAME(trap_a0), + REG_OFFSET_NAME(trap_a1), + REG_OFFSET_NAME(trap_a2), + REG_OFFSET_NAME(ps), + REG_OFFSET_NAME(pc), + REG_OFFSET_NAME(gp), + REG_OFFSET_NAME(r16), + REG_OFFSET_NAME(r17), + REG_OFFSET_NAME(r18), REG_OFFSET_END, }; + /** * regs_query_register_offset() - query register offset from its name * @name: the name of a register @@ -705,3 +640,29 @@ int regs_query_register_offset(const char *name) return roff->offset; return -EINVAL; } + +static int regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr) +{ + unsigned long ksp = kernel_stack_pointer(regs); + + return (addr & ~(THREAD_SIZE - 1)) == (ksp & ~(THREAD_SIZE - 1)); +} + +/** + * regs_get_kernel_stack_nth() - get Nth entry of the stack + * @regs:pt_regs which contains kernel stack pointer. + * @n:stack entry number. + * + * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which + * is specifined by @regs. If the @n th entry is NOT in the kernel stack, + * this returns 0. + */ +unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n) +{ + unsigned long addr; + + addr = kernel_stack_pointer(regs) + n * sizeof(long); + if (!regs_within_kernel_stack(regs, addr)) + return 0; + return *(unsigned long *)addr; +} diff --git a/arch/sw_64/kernel/relocate.c b/arch/sw_64/kernel/relocate.c index 36b16d84d5ab38150eafe5cec4614a25bcc8b669..fe403f9c70c74f35be4fa575ac3d3e743ff5b0e5 100644 --- a/arch/sw_64/kernel/relocate.c +++ b/arch/sw_64/kernel/relocate.c @@ -9,21 +9,12 @@ * Copyright (C) 2019 He Sheng * Authors: He Sheng (hesheng05@gmail.com) */ -#include -#include -#include -#include #include -#include -#include -#include -#include -#include -#include -#include #include #include +#include + #define INITRD_ADDR 0x3000000UL #define KTEXT_MAX 0xffffffffa0000000UL #define RELOCATED(x) ((void *)((unsigned long)x + offset)) diff --git a/arch/sw_64/kernel/segvdbg.c b/arch/sw_64/kernel/segvdbg.c index aee4b38630724595e58827cd3a797429578078ed..5b8a638bf8b93f6aa217da97efb78647e2177256 100644 --- a/arch/sw_64/kernel/segvdbg.c +++ b/arch/sw_64/kernel/segvdbg.c @@ -9,9 +9,7 @@ */ #include -#include -#include -#include + #include extern bool segv_debug_enabled; diff --git a/arch/sw_64/kernel/setup.c b/arch/sw_64/kernel/setup.c index cc33a6f3b4f96f804ce972548393b4263fe2baea..0e93643539d32c770a9b7b07f23a469475fa0b47 100644 --- a/arch/sw_64/kernel/setup.c +++ b/arch/sw_64/kernel/setup.c @@ -9,31 +9,13 @@ * Bootup setup stuff. */ -#include -#include -#include -#include -#include -#include -#include -#include #include #include #include #include -#include -#include -#include -#include -#include -#include #include -#include -#include #include #include -#include -#include #ifdef CONFIG_MAGIC_SYSRQ #include #include @@ -41,26 +23,12 @@ #ifdef CONFIG_DEBUG_FS #include #endif -#include -#include -#include #include #include -#include -#include #include #include -#include -#include + #include -#include -#include -#include -#include -#include -#include -#include -#include #include #include @@ -74,14 +42,15 @@ #define DBGDCONT(args...) #endif + DEFINE_PER_CPU(unsigned long, hard_node_id) = { 0 }; #if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) struct cma *sw64_kvm_cma; EXPORT_SYMBOL(sw64_kvm_cma); -static phys_addr_t size_cmdline; -static phys_addr_t base_cmdline; +static phys_addr_t kvm_mem_size; +static phys_addr_t kvm_mem_base; struct gen_pool *sw64_kvm_pool; EXPORT_SYMBOL(sw64_kvm_pool); @@ -133,6 +102,9 @@ static struct resource bss_resource = { struct cpuinfo_sw64 cpu_data[NR_CPUS]; EXPORT_SYMBOL(cpu_data); +DEFINE_STATIC_KEY_TRUE(run_mode_host_key); +DEFINE_STATIC_KEY_FALSE(run_mode_guest_key); +DEFINE_STATIC_KEY_FALSE(run_mode_emul_key); struct cpu_desc_t cpu_desc; struct socket_desc_t socket_desc[MAX_NUMSOCKETS]; int memmap_nr; @@ -175,7 +147,8 @@ static void __init kexec_control_page_init(void) { phys_addr_t addr; - addr = memblock_alloc_base(KEXEC_CONTROL_PAGE_SIZE, PAGE_SIZE, KTEXT_MAX); + addr = memblock_phys_alloc_range(KEXEC_CONTROL_PAGE_SIZE, PAGE_SIZE, + 0, KTEXT_MAX); kexec_control_page = (void *)(__START_KERNEL_map + addr); } @@ -356,7 +329,7 @@ static void * __init move_initrd(unsigned long mem_limit) static int __init memmap_range_valid(phys_addr_t base, phys_addr_t size) { - if (phys_to_virt(base + size - 1) < phys_to_virt(PFN_PHYS(max_low_pfn))) + if ((base + size) <= memblock_end_of_DRAM()) return true; else return false; @@ -367,6 +340,7 @@ void __init process_memmap(void) static int i; // Make it static so we won't start over again every time. int ret; phys_addr_t base, size; + unsigned long dma_end __maybe_unused = virt_to_phys((void *)MAX_DMA_ADDRESS); if (!memblock_initialized) return; @@ -378,24 +352,27 @@ void __init process_memmap(void) case memmap_reserved: if (!memmap_range_valid(base, size)) { pr_err("reserved memmap region [mem %#018llx-%#018llx] extends beyond end of memory (%#018llx)\n", - base, base + size - 1, PFN_PHYS(max_low_pfn)); + base, base + size - 1, memblock_end_of_DRAM()); } else { pr_info("reserved memmap region [mem %#018llx-%#018llx]\n", base, base + size - 1); - ret = memblock_remove(base, size); + ret = memblock_mark_nomap(base, size); if (ret) pr_err("reserve memmap region [mem %#018llx-%#018llx] failed\n", base, base + size - 1); + else if (IS_ENABLED(CONFIG_ZONE_DMA32) && (base < dma_end)) + pr_warn("memmap region [mem %#018llx-%#018llx] overlapped with DMA32 region\n", + base, base + size - 1); } break; case memmap_pci: if (!memmap_range_valid(base, size)) { pr_info("pci memmap region [mem %#018llx-%#018llx] extends beyond end of memory (%#018llx)\n", - base, base + size - 1, PFN_PHYS(max_low_pfn)); + base, base + size - 1, memblock_end_of_DRAM()); } else { pr_info("pci memmap region [mem %#018llx-%#018llx]\n", base, base + size - 1); - ret = memblock_remove(base, size); + ret = memblock_mark_nomap(base, size); if (ret) pr_err("reserve memmap region [mem %#018llx-%#018llx] failed\n", base, base + size - 1); @@ -403,10 +380,12 @@ void __init process_memmap(void) break; case memmap_initrd: if (!memmap_range_valid(base, size)) { - base = (unsigned long) move_initrd(PFN_PHYS(max_low_pfn)); + phys_addr_t old_base = base; + + base = (unsigned long) move_initrd(memblock_end_of_DRAM()); if (!base) { pr_err("initrd memmap region [mem %#018llx-%#018llx] extends beyond end of memory (%#018llx)\n", - base, base + size - 1, PFN_PHYS(max_low_pfn)); + old_base, old_base + size - 1, memblock_end_of_DRAM()); } else { memmap_map[i].addr = base; pr_info("initrd memmap region [mem %#018llx-%#018llx]\n", @@ -490,7 +469,6 @@ insert_ram_resource(u64 start, u64 end, bool reserved) static int __init request_standard_resources(void) { - int i; struct memblock_region *mblk; extern char _text[], _etext[]; @@ -498,17 +476,12 @@ static int __init request_standard_resources(void) extern char __bss_start[], __bss_stop[]; for_each_mem_region(mblk) { - insert_ram_resource(mblk->base, mblk->base + mblk->size - 1, 0); - } - - for (i = 0; i < memmap_nr; i++) { - switch (memmap_map[i].type) { - case memmap_crashkernel: - break; - default: - insert_ram_resource(memmap_map[i].addr, - memmap_map[i].addr + memmap_map[i].size - 1, 1); - } + if (!memblock_is_nomap(mblk)) + insert_ram_resource(mblk->base, + mblk->base + mblk->size - 1, 0); + else + insert_ram_resource(mblk->base, + mblk->base + mblk->size - 1, 1); } code_resource.start = __pa_symbol(_text); @@ -587,16 +560,20 @@ static void __init setup_machine_fdt(void) #ifdef CONFIG_USE_OF void *dt_virt; const char *name; - unsigned long phys_addr; /* Give a chance to select kernel builtin DTB firstly */ if (IS_ENABLED(CONFIG_SW64_BUILTIN_DTB)) dt_virt = (void *)__dtb_start; - else + else { dt_virt = (void *)sunway_boot_params->dtb_start; + if (virt_to_phys(dt_virt) < virt_to_phys(__bss_stop)) { + pr_emerg("BUG: DTB has been corrupted by kernel image!\n"); + while (true) + cpu_relax(); + } + } - phys_addr = __phys_addr((unsigned long)dt_virt); - if (!phys_addr_valid(phys_addr) || + if (!phys_addr_valid(virt_to_phys(dt_virt)) || !early_init_dt_scan(dt_virt)) { pr_crit("\n" "Error: invalid device tree blob at virtual address %px\n" @@ -639,10 +616,25 @@ static void __init setup_cpu_info(void) cpu_desc.arch_rev = CPUID_ARCH_REV(val); cpu_desc.pa_bits = CPUID_PA_BITS(val); cpu_desc.va_bits = CPUID_VA_BITS(val); - cpu_desc.run_mode = HOST_MODE; - if (*(unsigned long *)MMSIZE) - cpu_desc.run_mode = GUEST_MODE; + if (*(unsigned long *)MMSIZE) { + static_branch_disable(&run_mode_host_key); + if (*(unsigned long *)MMSIZE & EMUL_FLAG) { + pr_info("run mode: emul\n"); + static_branch_disable(&run_mode_guest_key); + static_branch_enable(&run_mode_emul_key); + + } else { + pr_info("run mode: guest\n"); + static_branch_enable(&run_mode_guest_key); + static_branch_disable(&run_mode_emul_key); + } + } else { + pr_info("run mode: host\n"); + static_branch_enable(&run_mode_host_key); + static_branch_disable(&run_mode_guest_key); + static_branch_disable(&run_mode_emul_key); + } for (i = 0; i < VENDOR_ID_MAX; i++) { val = cpuid(GET_VENDOR_ID, i); @@ -729,17 +721,17 @@ static int __init early_kvm_reserved_mem(char *p) return -EINVAL; } - size_cmdline = memparse(p, &p); + kvm_mem_size = memparse(p, &p); if (*p != '@') return -EINVAL; - base_cmdline = memparse(p + 1, &p); + kvm_mem_base = memparse(p + 1, &p); return 0; } early_param("kvm_mem", early_kvm_reserved_mem); void __init sw64_kvm_reserve(void) { - kvm_cma_declare_contiguous(base_cmdline, size_cmdline, 0, + kvm_cma_declare_contiguous(kvm_mem_base, kvm_mem_size, 0, PAGE_SIZE, 0, "sw64_kvm_cma", &sw64_kvm_cma); } #endif @@ -747,6 +739,7 @@ void __init sw64_kvm_reserve(void) void __init setup_arch(char **cmdline_p) { + jump_label_init(); setup_cpu_info(); sw64_chip->fixup(); sw64_chip_init->fixup(); @@ -754,7 +747,6 @@ setup_arch(char **cmdline_p) show_socket_mem_layout(); sw64_chip_init->early_init.setup_core_start(&core_start); - jump_label_init(); setup_sched_clock(); #ifdef CONFIG_GENERIC_SCHED_CLOCK sw64_sched_clock_init(); @@ -913,7 +905,7 @@ show_cpuinfo(struct seq_file *f, void *slot) "physical id\t: %d\n" "bogomips\t: %lu.%02lu\n", cpu_freq, cpu_data[i].tcache.size >> 10, - cpu_to_rcid(i), + cpu_topology[i].package_id, loops_per_jiffy / (500000/HZ), (loops_per_jiffy / (5000/HZ)) % 100); @@ -938,6 +930,7 @@ c_start(struct seq_file *f, loff_t *pos) static void * c_next(struct seq_file *f, void *v, loff_t *pos) { + (*pos)++; return NULL; } @@ -983,7 +976,7 @@ static int __init debugfs_sw64(void) { struct dentry *d; - d = debugfs_create_dir("sw_64", NULL); + d = debugfs_create_dir("sw64", NULL); if (!d) return -ENOMEM; sw64_debugfs_dir = d; @@ -1011,14 +1004,14 @@ static int __init sw64_kvm_pool_init(void) if (!sw64_kvm_cma) goto out; - kvm_pool_virt = (unsigned long)base_cmdline; + kvm_pool_virt = (unsigned long)kvm_mem_base; sw64_kvm_pool = gen_pool_create(PAGE_SHIFT, -1); if (!sw64_kvm_pool) goto out; - status = gen_pool_add_virt(sw64_kvm_pool, kvm_pool_virt, base_cmdline, - size_cmdline, -1); + status = gen_pool_add_virt(sw64_kvm_pool, kvm_pool_virt, kvm_mem_base, + kvm_mem_size, -1); if (status < 0) { pr_err("failed to add memory chunks to sw64 kvm pool\n"); gen_pool_destroy(sw64_kvm_pool); @@ -1027,13 +1020,13 @@ static int __init sw64_kvm_pool_init(void) } gen_pool_set_algo(sw64_kvm_pool, gen_pool_best_fit, NULL); - base_page = pfn_to_page(base_cmdline >> PAGE_SHIFT); - end_page = pfn_to_page((base_cmdline + size_cmdline) >> PAGE_SHIFT); + base_page = pfn_to_page(kvm_mem_base >> PAGE_SHIFT); + end_page = pfn_to_page((kvm_mem_base + kvm_mem_size - 1) >> PAGE_SHIFT); p = base_page; - while (page_ref_count(p) == 0 && - (unsigned long)p <= (unsigned long)end_page) { + while (p <= end_page && page_ref_count(p) == 0) { set_page_count(p, 1); + page_mapcount_reset(p); SetPageReserved(p); p++; } diff --git a/arch/sw_64/kernel/signal.c b/arch/sw_64/kernel/signal.c index 74e98063c874a128557012798c5b99e1abbf5c44..6a6203ccb04f489ef0b6b1bbf59b4635c3f88d50 100644 --- a/arch/sw_64/kernel/signal.c +++ b/arch/sw_64/kernel/signal.c @@ -7,26 +7,14 @@ * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson */ -#include -#include #include #include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include #include -#include +#include -#include #include #include +#include #include "proto.h" @@ -35,7 +23,20 @@ #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) -asmlinkage void ret_from_sys_call(void); +SYSCALL_DEFINE2(odd_sigprocmask, int, how, unsigned long, newmask) +{ + sigset_t oldmask; + sigset_t mask; + unsigned long res; + + siginitset(&mask, newmask & _BLOCKABLE); + res = sigprocmask(how, &mask, &oldmask); + if (!res) { + force_successful_syscall_return(); + res = oldmask.sig[0]; + } + return res; +} /* * Do a signal return; undo the signal stack. @@ -48,7 +49,6 @@ asmlinkage void ret_from_sys_call(void); struct rt_sigframe { struct siginfo info; struct ucontext uc; - unsigned int retcode[3]; }; /* @@ -59,22 +59,14 @@ struct rt_sigframe { extern char compile_time_assert [offsetof(struct rt_sigframe, uc.uc_mcontext) == 176 ? 1 : -1]; -#define INSN_MOV_R30_R16 0x47fe0410 -#define INSN_LDI_R0 0x201f0000 -#define INSN_CALLSYS 0x00000083 - static long restore_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs) { unsigned long usp; - struct switch_stack *sw = (struct switch_stack *)regs - 1; - unsigned long *ctx_fp = (unsigned long *)¤t->thread.ctx_fp; - long i, err = __get_user(regs->pc, &sc->sc_pc); + long err = __get_user(regs->pc, &sc->sc_pc); current->restart_block.fn = do_no_restart_syscall; - sw->r26 = (unsigned long) ret_from_sys_call; - err |= __get_user(regs->r0, sc->sc_regs+0); err |= __get_user(regs->r1, sc->sc_regs+1); err |= __get_user(regs->r2, sc->sc_regs+2); @@ -84,13 +76,13 @@ restore_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs) err |= __get_user(regs->r6, sc->sc_regs+6); err |= __get_user(regs->r7, sc->sc_regs+7); err |= __get_user(regs->r8, sc->sc_regs+8); - err |= __get_user(sw->r9, sc->sc_regs+9); - err |= __get_user(sw->r10, sc->sc_regs+10); - err |= __get_user(sw->r11, sc->sc_regs+11); - err |= __get_user(sw->r12, sc->sc_regs+12); - err |= __get_user(sw->r13, sc->sc_regs+13); - err |= __get_user(sw->r14, sc->sc_regs+14); - err |= __get_user(sw->r15, sc->sc_regs+15); + err |= __get_user(regs->r9, sc->sc_regs+9); + err |= __get_user(regs->r10, sc->sc_regs+10); + err |= __get_user(regs->r11, sc->sc_regs+11); + err |= __get_user(regs->r12, sc->sc_regs+12); + err |= __get_user(regs->r13, sc->sc_regs+13); + err |= __get_user(regs->r14, sc->sc_regs+14); + err |= __get_user(regs->r15, sc->sc_regs+15); err |= __get_user(regs->r16, sc->sc_regs+16); err |= __get_user(regs->r17, sc->sc_regs+17); err |= __get_user(regs->r18, sc->sc_regs+18); @@ -108,9 +100,12 @@ restore_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs) err |= __get_user(usp, sc->sc_regs+30); wrusp(usp); /* simd-fp */ - for (i = 0; i < 31 * 4; i++) - err |= __get_user(ctx_fp[i], sc->sc_fpregs + i); - err |= __get_user(current->thread.fpcr, &sc->sc_fpcr); + err |= __copy_from_user(¤t->thread.fpstate, &sc->sc_fpregs, + offsetof(struct user_fpsimd_state, fpcr)); + err |= __get_user(current->thread.fpstate.fpcr, &sc->sc_fpcr); + + if (likely(!err)) + __fpstate_restore(current); return err; } @@ -140,8 +135,8 @@ do_sigreturn(struct sigcontext __user *sc) /* Send SIGTRAP if we're single-stepping: */ if (ptrace_cancel_bpt(current)) { - send_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *) regs->pc, 0, - current); + force_sig_fault(SIGTRAP, TRAP_BRKPT, + (void __user *)regs->pc, 0); } return; @@ -171,8 +166,8 @@ do_rt_sigreturn(struct rt_sigframe __user *frame) /* Send SIGTRAP if we're single-stepping: */ if (ptrace_cancel_bpt(current)) { - send_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *) regs->pc, 0, - current); + force_sig_fault(SIGTRAP, TRAP_BRKPT, + (void __user *)regs->pc, 0); } return; @@ -195,9 +190,7 @@ static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, unsigned long mask, unsigned long sp) { - struct switch_stack *sw = (struct switch_stack *)regs - 1; - unsigned long *ctx_fp = (unsigned long *)¤t->thread.ctx_fp; - long i, err = 0; + long err = 0; err |= __put_user(on_sig_stack((unsigned long)sc), &sc->sc_onstack); err |= __put_user(mask, &sc->sc_mask); @@ -213,13 +206,13 @@ setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, err |= __put_user(regs->r6, sc->sc_regs+6); err |= __put_user(regs->r7, sc->sc_regs+7); err |= __put_user(regs->r8, sc->sc_regs+8); - err |= __put_user(sw->r9, sc->sc_regs+9); - err |= __put_user(sw->r10, sc->sc_regs+10); - err |= __put_user(sw->r11, sc->sc_regs+11); - err |= __put_user(sw->r12, sc->sc_regs+12); - err |= __put_user(sw->r13, sc->sc_regs+13); - err |= __put_user(sw->r14, sc->sc_regs+14); - err |= __put_user(sw->r15, sc->sc_regs+15); + err |= __put_user(regs->r9, sc->sc_regs+9); + err |= __put_user(regs->r10, sc->sc_regs+10); + err |= __put_user(regs->r11, sc->sc_regs+11); + err |= __put_user(regs->r12, sc->sc_regs+12); + err |= __put_user(regs->r13, sc->sc_regs+13); + err |= __put_user(regs->r14, sc->sc_regs+14); + err |= __put_user(regs->r15, sc->sc_regs+15); err |= __put_user(regs->r16, sc->sc_regs+16); err |= __put_user(regs->r17, sc->sc_regs+17); err |= __put_user(regs->r18, sc->sc_regs+18); @@ -237,9 +230,10 @@ setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, err |= __put_user(sp, sc->sc_regs+30); err |= __put_user(0, sc->sc_regs+31); /* simd-fp */ - for (i = 0; i < 31 * 4; i++) - err |= __put_user(ctx_fp[i], sc->sc_fpregs + i); - err |= __put_user(current->thread.fpcr, &sc->sc_fpcr); + __fpstate_save(current); + err |= __copy_to_user(&sc->sc_fpregs, ¤t->thread.fpstate, + offsetof(struct user_fpsimd_state, fpcr)); + err |= __put_user(current->thread.fpstate.fpcr, &sc->sc_fpcr); err |= __put_user(regs->trap_a0, &sc->sc_traparg_a0); err |= __put_user(regs->trap_a1, &sc->sc_traparg_a1); @@ -259,7 +253,8 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs) if (!access_ok(frame, sizeof(*frame))) return -EFAULT; - err |= copy_siginfo_to_user(&frame->info, &ksig->info); + if (ksig->ka.sa.sa_flags & SA_SIGINFO) + err |= copy_siginfo_to_user(&frame->info, &ksig->info); /* Create the ucontext. */ err |= __put_user(0, &frame->uc.uc_flags); @@ -277,15 +272,19 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs) */ r26 = VDSO_SYMBOL(current->mm->context.vdso, rt_sigreturn); - if (err) - return -EFAULT; - /* "Return" to the handler */ regs->r26 = r26; regs->r27 = regs->pc = (unsigned long) ksig->ka.sa.sa_handler; regs->r16 = ksig->sig; /* a0: signal number */ - regs->r17 = (unsigned long) &frame->info; /* a1: siginfo pointer */ - regs->r18 = (unsigned long) &frame->uc; /* a2: ucontext pointer */ + if (ksig->ka.sa.sa_flags & SA_SIGINFO) { + /* a1: siginfo pointer, a2: ucontext pointer */ + regs->r17 = (unsigned long) &frame->info; + regs->r18 = (unsigned long) &frame->uc; + } else { + /* a1: exception code, a2: sigcontext pointer */ + regs->r17 = 0; + regs->r18 = (unsigned long) &frame->uc.uc_mcontext; + } wrusp((unsigned long) frame); #if DEBUG_SIG diff --git a/arch/sw_64/kernel/smp.c b/arch/sw_64/kernel/smp.c index 7d9c5c90f1ac1b946d09bb43240388db4542be0c..fb915d1660691e19d92b9dd648a932b978eed6a7 100644 --- a/arch/sw_64/kernel/smp.c +++ b/arch/sw_64/kernel/smp.c @@ -4,41 +4,18 @@ */ #include -#include -#include -#include #include #include -#include -#include -#include #include -#include -#include #include -#include #include -#include -#include -#include #include -#include -#include - -#include -#include -#include -#include -#include #include #include -#include -#include -#include #include #include -#include + #include "proto.h" struct smp_rcb_struct *smp_rcb; diff --git a/arch/sw_64/kernel/stacktrace.c b/arch/sw_64/kernel/stacktrace.c index bb501c14565b42bb307b7ee2c0c543c51723e465..7b5ddc78bd6d21efd36e70a58f7711ea1ade8e7d 100644 --- a/arch/sw_64/kernel/stacktrace.c +++ b/arch/sw_64/kernel/stacktrace.c @@ -6,41 +6,209 @@ */ #include #include -#include #include #include +#include +#include +#include +#include /* - * Save stack-backtrace addresses into a stack_trace buffer. + * sw_64 PCS assigns the frame pointer to r15. + * + * A simple function prologue looks like this: + * ldi sp,-xx(sp) + * stl ra,0(sp) + * stl fp,8(sp) + * mov sp,fp + * + * A simple function epilogue looks like this: + * mov fp,sp + * ldl ra,0(sp) + * ldl fp,8(sp) + * ldi sp,+xx(sp) */ -void save_stack_trace(struct stack_trace *trace) + +#ifdef CONFIG_FRAME_POINTER + +int unwind_frame(struct task_struct *tsk, struct stackframe *frame) { - save_stack_trace_tsk(current, trace); + unsigned long fp = frame->fp; + + if (fp & 0x7) + return -EINVAL; + + if (!tsk) + tsk = current; + + if (!on_accessible_stack(tsk, fp, NULL)) + return -EINVAL; + + frame->pc = READ_ONCE_NOCHECK(*(unsigned long *)(fp)); + frame->fp = READ_ONCE_NOCHECK(*(unsigned long *)(fp + 8)); + + /* + * Frames created upon entry from user have NULL FP and PC values, so + * don't bother reporting these. Frames created by __noreturn functions + * might have a valid FP even if PC is bogus, so only terminate where + * both are NULL. + */ + if (!frame->fp && !frame->pc) + return -EINVAL; + + return 0; } -EXPORT_SYMBOL_GPL(save_stack_trace); +EXPORT_SYMBOL_GPL(unwind_frame); +void walk_stackframe(struct task_struct *tsk, struct pt_regs *regs, + int (*fn)(unsigned long, void *), void *data) +{ + unsigned long pc, fp; -void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) + struct stackframe frame; + + if (regs) { + pc = regs->pc; + fp = regs->r15; + } else if (tsk == current || tsk == NULL) { + fp = (unsigned long)__builtin_frame_address(0); + pc = (unsigned long)walk_stackframe; + } else { + fp = tsk->thread.s[6]; + pc = tsk->thread.ra; + } + + if (!__kernel_text_address(pc) || fn(pc, data)) + return; + + frame.pc = pc; + frame.fp = fp; + while (1) { + int ret; + ret = unwind_frame(tsk, &frame); + if (ret < 0) + break; + + if (fn(frame.pc, data)) + break; + } +} +EXPORT_SYMBOL_GPL(walk_stackframe); + +#else /* !CONFIG_FRAME_POINTER */ +void walk_stackframe(struct task_struct *tsk, struct pt_regs *regs, + int (*fn)(unsigned long, void *), void *data) { - unsigned long *sp = (unsigned long *)task_thread_info(tsk)->pcb.ksp; - unsigned long addr; - - WARN_ON(trace->nr_entries || !trace->max_entries); - - while (!kstack_end(sp)) { - addr = *sp++; - if (__kernel_text_address(addr) && - !in_sched_functions(addr)) { - if (trace->skip > 0) - trace->skip--; - else - trace->entries[trace->nr_entries++] = addr; - if (trace->nr_entries >= trace->max_entries) - break; - } + unsigned long *ksp; + unsigned long sp, pc; + + if (regs) { + sp = (unsigned long)(regs+1); + pc = regs->pc; + } else if (tsk == current || tsk == NULL) { + register unsigned long current_sp __asm__ ("$30"); + sp = current_sp; + pc = (unsigned long)walk_stackframe; + } else { + sp = tsk->thread.sp; + pc = tsk->thread.ra; + } + + ksp = (unsigned long *)sp; + + while (!kstack_end(ksp)) { + if (__kernel_text_address(pc) && fn(pc, data)) + break; + pc = (*ksp++) - 0x4; } +} +EXPORT_SYMBOL_GPL(walk_stackframe); + +#endif/* CONFIG_FRAME_POINTER */ + +static int print_address_trace(unsigned long pc, void *data) +{ + print_ip_sym((const char *)data, pc); + return 0; +} + +void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl) +{ + pr_info("Trace:\n"); + walk_stackframe(task, NULL, print_address_trace, (void *)loglvl); +} + +#ifdef CONFIG_STACKTRACE +/* + * Save stack-backtrace addresses into a stack_trace buffer. + */ +struct stack_trace_data { + struct stack_trace *trace; + unsigned int nosched; +}; + +int save_trace(unsigned long pc, void *d) +{ + struct stack_trace_data *data = d; + struct stack_trace *trace = data->trace; + + if (data->nosched && in_sched_functions(pc)) + return 0; + if (trace->skip > 0) { + trace->skip--; + return 0; + } + + trace->entries[trace->nr_entries++] = pc; + return (trace->nr_entries >= trace->max_entries); +} + +static void __save_stack_trace(struct task_struct *tsk, + struct stack_trace *trace, unsigned int nosched) +{ + struct stack_trace_data data; + + data.trace = trace; + data.nosched = nosched; + + walk_stackframe(tsk, NULL, save_trace, &data); + if (trace->nr_entries < trace->max_entries) trace->entries[trace->nr_entries++] = ULONG_MAX; } + +void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) +{ + __save_stack_trace(tsk, trace, 1); +} EXPORT_SYMBOL_GPL(save_stack_trace_tsk); + +void save_stack_trace(struct stack_trace *trace) +{ + __save_stack_trace(current, trace, 0); +} +EXPORT_SYMBOL_GPL(save_stack_trace); +#endif + +static int save_pc(unsigned long pc, void *data) +{ + unsigned long *p = data; + *p = 0; + + if (!in_sched_functions(pc)) + *p = pc; + + return *p; +} + +unsigned long get_wchan(struct task_struct *tsk) +{ + unsigned long pc; + + if (!tsk || tsk == current || tsk->state == TASK_RUNNING) + return 0; + walk_stackframe(tsk, NULL, save_pc, &pc); + + return pc; +} diff --git a/arch/sw_64/kernel/suspend.c b/arch/sw_64/kernel/suspend.c index b2b07ac3042b8c171ed39f361f3125c6426aec20..369bc1e19b85713cb0ebe9a0719fd3a7a68ec358 100644 --- a/arch/sw_64/kernel/suspend.c +++ b/arch/sw_64/kernel/suspend.c @@ -1,17 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 #include -#include -#include -#include -#include -#include -#include + #include -#include -#include -#include -#include -#include #include struct processor_state suspend_state; @@ -43,20 +33,14 @@ void sw64_suspend_enter(void) */ disable_local_timer(); -#ifdef CONFIG_PCI - if (sw64_chip->suspend) - sw64_chip->suspend(0); -#endif + #ifdef CONFIG_SW64_SUSPEND_DEEPSLEEP_BOOTCORE sw64_suspend_deep_sleep(&suspend_state); #else mtinten(); asm("halt"); #endif -#ifdef CONFIG_PCI - if (sw64_chip->suspend) - sw64_chip->suspend(1); -#endif + disable_local_timer(); } diff --git a/arch/sw_64/kernel/syscalls/syscall.tbl b/arch/sw_64/kernel/syscalls/syscall.tbl index 37b1e3f9f9e2a3a8174a4a8530913fb6c2039f14..42a179422b6b22fbe14fa10237dfb93ba6dc9261 100644 --- a/arch/sw_64/kernel/syscalls/syscall.tbl +++ b/arch/sw_64/kernel/syscalls/syscall.tbl @@ -55,7 +55,7 @@ 45 common open sys_open #46 is unused 47 common getxgid sys_getxgid -48 common sigprocmask sys_sigprocmask +48 common odd_sigprocmask sys_odd_sigprocmask #49 is unused #50 is unused 51 common acct sys_acct @@ -73,7 +73,7 @@ 63 common getpgrp sys_getpgrp #64 is unused #65 is unused -66 common vfork sw64_vfork +66 common vfork sys_vfork 67 common stat sys_newstat 68 common lstat sys_newlstat #69 is unused @@ -211,13 +211,13 @@ 201 common msgget sys_msgget 202 common msgrcv sys_msgrcv 203 common msgsnd sys_msgsnd -204 common semctl sys_semctl +204 common semctl sys_old_semctl 205 common semget sys_semget 206 common semop sys_semop #207 is unused 208 common lchown sys_lchown 209 common shmat sys_shmat -210 common shmctl sys_shmctl +210 common shmctl sys_old_shmctl 211 common shmdt sys_shmdt 212 common shmget sys_shmget #213 is unused @@ -290,14 +290,14 @@ 280 common fspick sys_fspick 281 common pidfd_open sys_pidfd_open 282 common clone3 sys_clone3 -283 common close_range sys_close_range -284 common openat2 sys_openat2 -285 common pidfd_getfd sys_pidfd_getfd -286 common faccessat2 sys_faccessat2 -287 common process_madvise sys_process_madvise -#288 is unused -#289 is unused -#290 is unused +283 common close_range sys_close_range +284 common openat2 sys_openat2 +285 common pidfd_getfd sys_pidfd_getfd +286 common faccessat2 sys_faccessat2 +287 common process_madvise sys_process_madvise +288 common pkey_mprotect sys_pkey_mprotect +289 common pkey_alloc sys_pkey_alloc +290 common pkey_free sys_pkey_free #291 is unused #292 is unused #293 is unused @@ -306,7 +306,7 @@ #296 is unused #297 is unused 298 common getpriority sys_getpriority -#299 is unused +299 common sigprocmask sys_sigprocmask 300 common bdflush sys_bdflush #301 is unused 302 common mount sys_mount @@ -319,7 +319,7 @@ 309 common get_kernel_syms sys_ni_syscall 310 common syslog sys_syslog 311 common reboot sys_reboot -312 common clone sw64_clone +312 common clone sys_clone 313 common uselib sys_uselib 314 common mlock sys_mlock 315 common munlock sys_munlock @@ -377,7 +377,7 @@ 367 common getcwd sys_getcwd 368 common capget sys_capget 369 common capset sys_capset -370 common sendfile sys_sendfile +370 common sendfile sys_sendfile64 371 common setresgid sys_setresgid 372 common getresgid sys_getresgid 373 common dipc sys_ni_syscall diff --git a/arch/sw_64/kernel/tc.c b/arch/sw_64/kernel/tc.c index c047d457e55abfec92e29975430d3a181c7aceec..f2de5ac3d9dc440ca8685e33354c5e0b35919f91 100644 --- a/arch/sw_64/kernel/tc.c +++ b/arch/sw_64/kernel/tc.c @@ -5,9 +5,6 @@ #include -#include -#include -#include #include /* diff --git a/arch/sw_64/kernel/time.c b/arch/sw_64/kernel/time.c index 0815d06b03d4d1023f47fe049adbcfb6a9dafce2..15035a01e48a8856b73b40d2b2349f38d19223e1 100644 --- a/arch/sw_64/kernel/time.c +++ b/arch/sw_64/kernel/time.c @@ -1,34 +1,11 @@ // SPDX-License-Identifier: GPL-2.0 #include #include -#include -#include -#include -#include -#include #include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include - #include -#include -#include #include -#include -#include + +#include #include "proto.h" @@ -237,8 +214,8 @@ static int __init sched_clock_debug_init(void) if (!sw64_debugfs_dir) return -ENODEV; - sched_clock_status = debugfs_create_file_unsafe("use_tc_as_sched_clock", - 0666, sw64_debugfs_dir, NULL, + sched_clock_status = debugfs_create_file("tc_sched_clock", + 0644, sw64_debugfs_dir, NULL, &sched_clock_status_fops); if (!sched_clock_status) diff --git a/arch/sw_64/kernel/timer.c b/arch/sw_64/kernel/timer.c index c29e7d1b664bd37a538a72266dce876f12b8b894..268537d5e483956070ef05c21b873f0ddb4debfb 100644 --- a/arch/sw_64/kernel/timer.c +++ b/arch/sw_64/kernel/timer.c @@ -4,20 +4,9 @@ * Description: percpu local timer, based on arch/x86/kernel/apic/apic.c */ -#include -#include -#include -#include #include -#include -#include -#include #include -#include -#include -#include -#include -#include + #include #include @@ -98,6 +87,14 @@ static int timer_set_oneshot(struct clock_event_device *evt) return 0; } +void sw64_update_clockevents(unsigned long cpu, u32 freq) +{ + struct clock_event_device *swevt = &per_cpu(timer_events, cpu); + + if (cpu == smp_processor_id()) + clockevents_update_freq(swevt, freq); +} + /* * Setup the local timer for this CPU. Copy the initilized values * of the boot CPU and register the clock event in the framework. diff --git a/arch/sw_64/kernel/topology.c b/arch/sw_64/kernel/topology.c index e6df862705831fc0cb51093589a2290aaec8381c..964d6a83d901e10b7f2e35f0276c6a27c856a05f 100644 --- a/arch/sw_64/kernel/topology.c +++ b/arch/sw_64/kernel/topology.c @@ -1,20 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 #include -#include -#include -#include -#include -#include -#include -#include -#include #include -#include -#include -#include -#include -#include #include @@ -138,16 +125,10 @@ void remove_cpu_topology(int cpu) } #ifdef CONFIG_ACPI -static bool __init acpi_cpu_is_threaded(int cpu) -{ - return 0; -} - static int __init parse_acpi_topology(void) { return 0; } - #else static inline int __init parse_acpi_topology(void) { diff --git a/arch/sw_64/kernel/traps.c b/arch/sw_64/kernel/traps.c index c736a67ef7b8b49b51fca652b512b16aaf2a1a85..4e95cab13daafa120daad777a81c3811db70f739 100644 --- a/arch/sw_64/kernel/traps.c +++ b/arch/sw_64/kernel/traps.c @@ -9,34 +9,27 @@ * This file initializes the trap entry points */ -#include -#include -#include -#include -#include -#include #include -#include -#include -#include #include #include +#include #include +#include +#include +#include #include -#include -#include #include -#include #include #include #include -#include +#include +#include +#include #include "proto.h" -void -dik_show_regs(struct pt_regs *regs, unsigned long *r9_15) +void dik_show_regs(struct pt_regs *regs) { printk("pc = [<%016lx>] ra = [<%016lx>] ps = %04lx %s\n", regs->pc, regs->r26, regs->ps, print_tainted()); @@ -49,13 +42,12 @@ dik_show_regs(struct pt_regs *regs, unsigned long *r9_15) printk("t5 = %016lx t6 = %016lx t7 = %016lx\n", regs->r6, regs->r7, regs->r8); - if (r9_15) { - printk("s0 = %016lx s1 = %016lx s2 = %016lx\n", - r9_15[9], r9_15[10], r9_15[11]); - printk("s3 = %016lx s4 = %016lx s5 = %016lx\n", - r9_15[12], r9_15[13], r9_15[14]); - printk("s6 = %016lx\n", r9_15[15]); - } + printk("s0 = %016lx s1 = %016lx s2 = %016lx\n", + regs->r9, regs->r10, regs->r11); + printk("s3 = %016lx s4 = %016lx s5 = %016lx\n", + regs->r12, regs->r13, regs->r14); + printk("s6 = %016lx\n", + regs->r15); printk("a0 = %016lx a1 = %016lx a2 = %016lx\n", regs->r16, regs->r17, regs->r18); @@ -83,55 +75,7 @@ dik_show_code(unsigned int *pc) printk("\n"); } -static void -dik_show_trace(unsigned long *sp, const char *loglvl) -{ - long i = 0; - unsigned long tmp; - - printk("%sTrace:\n", loglvl); - while (0x1ff8 & (unsigned long)sp) { - tmp = *sp; - sp++; - if (!__kernel_text_address(tmp)) - continue; - printk("%s[<%lx>] %pSR\n", loglvl, tmp, (void *)tmp); - if (i > 40) { - printk("%s ...", loglvl); - break; - } - } - printk("\n"); -} - -static int kstack_depth_to_print = 24; - -void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl) -{ - unsigned long *stack; - int i; - - /* - * debugging aid: "show_stack(NULL, NULL, KERN_EMERG);" prints the - * back trace for this cpu. - */ - if (sp == NULL) - sp = (unsigned long *)&sp; - - stack = sp; - for (i = 0; i < kstack_depth_to_print; i++) { - if (((long) stack & (THREAD_SIZE-1)) == 0) - break; - if (i && ((i % 4) == 0)) - printk("%s ", loglvl); - printk("%016lx ", *stack++); - } - printk("\n"); - dik_show_trace(sp, loglvl); -} - -void -die_if_kernel(char *str, struct pt_regs *regs, long err, unsigned long *r9_15) +void die_if_kernel(char *str, struct pt_regs *regs, long err) { if (regs->ps & 8) return; @@ -139,9 +83,9 @@ die_if_kernel(char *str, struct pt_regs *regs, long err, unsigned long *r9_15) printk("CPU %d ", hard_smp_processor_id()); #endif printk("%s(%d): %s %ld\n", current->comm, task_pid_nr(current), str, err); - dik_show_regs(regs, r9_15); + dik_show_regs(regs); add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); - dik_show_trace((unsigned long *)(regs+1), KERN_DEFAULT); + show_stack(current, NULL, KERN_EMERG); dik_show_code((unsigned int *)regs->pc); if (test_and_set_thread_flag(TIF_DIE_IF_KERNEL)) { @@ -191,9 +135,9 @@ do_entArith(unsigned long summary, unsigned long write_mask, if (si_code == 0) return; } - die_if_kernel("Arithmetic fault", regs, 0, NULL); + die_if_kernel("Arithmetic fault", regs, 0); - send_sig_fault(SIGFPE, si_code, (void __user *) regs->pc, 0, current); + force_sig_fault(SIGFPE, si_code, (void __user *)regs->pc, 0); } asmlinkage void @@ -218,7 +162,7 @@ do_entIF(unsigned long inst_type, struct pt_regs *regs) return; } die_if_kernel((type == 1 ? "Kernel Bug" : "Instruction fault"), - regs, type, NULL); + regs, type); } switch (type) { @@ -226,13 +170,11 @@ do_entIF(unsigned long inst_type, struct pt_regs *regs) if (ptrace_cancel_bpt(current)) regs->pc -= 4; /* make pc point to former bpt */ - send_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->pc, 0, - current); + force_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->pc, 0); return; case 1: /* bugcheck */ - send_sig_fault(SIGTRAP, TRAP_UNK, (void __user *)regs->pc, 0, - current); + force_sig_fault(SIGTRAP, TRAP_UNK, (void __user *)regs->pc, 0); return; case 2: /* gentrap */ @@ -293,8 +235,7 @@ do_entIF(unsigned long inst_type, struct pt_regs *regs) break; } - send_sig_fault(signo, code, (void __user *)regs->pc, 0, - current); + force_sig_fault(signo, code, (void __user *)regs->pc, regs->r16); return; case 4: /* opDEC */ @@ -313,15 +254,14 @@ do_entIF(unsigned long inst_type, struct pt_regs *regs) return; } if ((regs->ps & ~IPL_MAX) == 0) - die_if_kernel("Instruction fault", regs, type, NULL); + die_if_kernel("Instruction fault", regs, type); break; case 3: /* FEN fault */ /* * Irritating users can call HMC_clrfen to disable the - * FPU for the process. The kernel will then trap in - * do_switch_stack and undo_switch_stack when we try - * to save and restore the FP registers. + * FPU for the process. The kernel will then trap to + * save and restore the FP registers. * Given that GCC by default generates code that uses the * FP registers, HMC_clrfen is not useful except for DoS @@ -337,51 +277,18 @@ do_entIF(unsigned long inst_type, struct pt_regs *regs) break; } - send_sig_fault(SIGILL, ILL_ILLOPC, (void __user *)regs->pc, 0, - current); + force_sig_fault(SIGILL, ILL_ILLOPC, (void __user *)regs->pc, 0); } -/* - * entUna has a different register layout to be reasonably simple. It - * needs access to all the integer registers (the kernel doesn't use - * fp-regs), and it needs to have them in order for simpler access. - * - * Due to the non-standard register layout (and because we don't want - * to handle floating-point regs), user-mode unaligned accesses are - * handled separately by do_entUnaUser below. - * - * Oh, btw, we don't handle the "gp" register correctly, but if we fault - * on a gp-register unaligned load/store, something is _very_ wrong - * in the kernel anyway.. - */ -struct allregs { - unsigned long regs[32]; - unsigned long ps, pc, gp, a0, a1, a2; -}; - -struct unaligned_stat { - unsigned long count, va, pc; -} unaligned[2]; - - -/* Macro for exception fixup code to access integer registers. */ -#define una_reg(r) (_regs[(r) >= 16 && (r) <= 18 ? (r) + 19 : (r)]) - - asmlinkage void do_entUna(void *va, unsigned long opcode, unsigned long reg, - struct allregs *regs) + struct pt_regs *regs) { long error; unsigned long tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp8; unsigned long pc = regs->pc - 4; - unsigned long *_regs = regs->regs; const struct exception_table_entry *fixup; - unaligned[0].count++; - unaligned[0].va = (unsigned long) va; - unaligned[0].pc = pc; - /* * We don't want to use the generic get/put unaligned macros as * we want to trap exceptions. Only if we actually get an @@ -407,7 +314,7 @@ do_entUna(void *va, unsigned long opcode, unsigned long reg, if (error) goto got_exception; - una_reg(reg) = tmp1 | tmp2; + map_regs(reg) = tmp1 | tmp2; return; case 0x22: @@ -428,7 +335,7 @@ do_entUna(void *va, unsigned long opcode, unsigned long reg, if (error) goto got_exception; - una_reg(reg) = (int)(tmp1 | tmp2); + map_regs(reg) = (int)(tmp1 | tmp2); return; case 0x23: /* ldl */ @@ -449,17 +356,16 @@ do_entUna(void *va, unsigned long opcode, unsigned long reg, if (error) goto got_exception; - una_reg(reg) = tmp1 | tmp2; + map_regs(reg) = tmp1 | tmp2; return; case 0x29: /* sth */ __asm__ __volatile__( " zap %6, 2, %1\n" " srl %6, 8, %2\n" - " stb %1, 0x0(%5)\n" - " stb %2, 0x1(%5)\n" + "1: stb %1, 0x0(%5)\n" + "2: stb %2, 0x1(%5)\n" "3:\n" - ".section __ex_table, \"a\"\n" " .long 1b - .\n" " ldi %2, 3b-1b(%0)\n" @@ -468,7 +374,7 @@ do_entUna(void *va, unsigned long opcode, unsigned long reg, ".previous" : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), "=&r"(tmp3), "=&r"(tmp4) - : "r"(va), "r"(una_reg(reg)), "0"(0)); + : "r"(va), "r"(map_regs(reg)), "0"(0)); if (error) goto got_exception; @@ -500,7 +406,7 @@ do_entUna(void *va, unsigned long opcode, unsigned long reg, ".previous" : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), "=&r"(tmp3), "=&r"(tmp4) - : "r"(va), "r"(una_reg(reg)), "0"(0)); + : "r"(va), "r"(map_regs(reg)), "0"(0)); if (error) goto got_exception; @@ -552,7 +458,7 @@ do_entUna(void *va, unsigned long opcode, unsigned long reg, ".previous" : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), "=&r"(tmp3), "=&r"(tmp4), "=&r"(tmp5), "=&r"(tmp6), "=&r"(tmp7), "=&r"(tmp8) - : "r"(va), "r"(una_reg(reg)), "0"(0)); + : "r"(va), "r"(map_regs(reg)), "0"(0)); if (error) goto got_exception; @@ -571,7 +477,7 @@ do_entUna(void *va, unsigned long opcode, unsigned long reg, if (fixup != 0) { unsigned long newpc; - newpc = fixup_exception(una_reg, fixup, pc); + newpc = fixup_exception(map_regs, fixup, pc); printk("Forwarding unaligned exception at %lx (%lx)\n", pc, newpc); @@ -587,31 +493,9 @@ do_entUna(void *va, unsigned long opcode, unsigned long reg, printk("%s(%d): unhandled unaligned exception\n", current->comm, task_pid_nr(current)); - printk("pc = [<%016lx>] ra = [<%016lx>] ps = %04lx\n", - pc, una_reg(26), regs->ps); - printk("r0 = %016lx r1 = %016lx r2 = %016lx\n", - una_reg(0), una_reg(1), una_reg(2)); - printk("r3 = %016lx r4 = %016lx r5 = %016lx\n", - una_reg(3), una_reg(4), una_reg(5)); - printk("r6 = %016lx r7 = %016lx r8 = %016lx\n", - una_reg(6), una_reg(7), una_reg(8)); - printk("r9 = %016lx r10= %016lx r11= %016lx\n", - una_reg(9), una_reg(10), una_reg(11)); - printk("r12= %016lx r13= %016lx r14= %016lx\n", - una_reg(12), una_reg(13), una_reg(14)); - printk("r15= %016lx\n", una_reg(15)); - printk("r16= %016lx r17= %016lx r18= %016lx\n", - una_reg(16), una_reg(17), una_reg(18)); - printk("r19= %016lx r20= %016lx r21= %016lx\n", - una_reg(19), una_reg(20), una_reg(21)); - printk("r22= %016lx r23= %016lx r24= %016lx\n", - una_reg(22), una_reg(23), una_reg(24)); - printk("r25= %016lx r27= %016lx r28= %016lx\n", - una_reg(25), una_reg(27), una_reg(28)); - printk("gp = %016lx sp = %p\n", regs->gp, regs+1); - + dik_show_regs(regs); dik_show_code((unsigned int *)pc); - dik_show_trace((unsigned long *)(regs+1), KERN_DEFAULT); + show_stack(current, NULL, KERN_EMERG); if (test_and_set_thread_flag(TIF_DIE_IF_KERNEL)) { printk("die_if_kernel recursion detected.\n"); @@ -689,20 +573,6 @@ s_reg_to_mem(unsigned long s_reg) 1L << 0x2c | 1L << 0x2d | /* stw stl */ \ 1L << 0x0d | 1L << 0x0e) /* sth stb */ -#define R(x) ((size_t) &((struct pt_regs *)0)->x) - -static int unauser_reg_offsets[32] = { - R(r0), R(r1), R(r2), R(r3), R(r4), R(r5), R(r6), R(r7), R(r8), - /* r9 ... r15 are stored in front of regs. */ - -56, -48, -40, -32, -24, -16, -8, - R(r16), R(r17), R(r18), - R(r19), R(r20), R(r21), R(r22), R(r23), R(r24), R(r25), R(r26), - R(r27), R(r28), R(gp), - 0, 0 -}; - -#undef R - asmlinkage void do_entUnaUser(void __user *va, unsigned long opcode, unsigned long reg, struct pt_regs *regs) @@ -747,15 +617,11 @@ do_entUnaUser(void __user *va, unsigned long opcode, if ((unsigned long)va >= TASK_SIZE) goto give_sigsegv; - ++unaligned[1].count; - unaligned[1].va = (unsigned long)va; - unaligned[1].pc = regs->pc - 4; - if ((1L << opcode) & OP_INT_MASK) { /* it's an integer load/store */ if (reg < 30) { reg_addr = (unsigned long *) - ((char *)regs + unauser_reg_offsets[reg]); + ((char *)regs + regoffsets[reg]); } else if (reg == 30) { /* usp in HMCODE regs */ fake_reg = rdusp(); @@ -990,20 +856,16 @@ do_entUnaUser(void __user *va, unsigned long opcode, sw64_read_simd_fp_m_s(reg, fp); if ((unsigned long)va<<61 == 0) { __asm__ __volatile__( - "1: bis %4, %4, %1\n" - "2: bis %5, %5, %2\n" - "3: stl %1, 0(%3)\n" - "4: stl %2, 8(%3)\n" - "5:\n" + " bis %4, %4, %1\n" + " bis %5, %5, %2\n" + "1: stl %1, 0(%3)\n" + "2: stl %2, 8(%3)\n" + "3:\n" ".section __ex_table, \"a\"\n\t" " .long 1b - .\n" - " ldi %1, 5b-1b(%0)\n" + " ldi $31, 3b-1b(%0)\n" " .long 2b - .\n" - " ldi %2, 5b-2b(%0)\n" - " .long 3b - .\n" - " ldi $31, 5b-3b(%0)\n" - " .long 4b - .\n" - " ldi $31, 5b-4b(%0)\n" + " ldi $31, 3b-2b(%0)\n" ".previous" : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) : "r"(va), "r"(fp[0]), "r"(fp[1]), "0"(0)); @@ -1123,20 +985,16 @@ do_entUnaUser(void __user *va, unsigned long opcode, sw64_read_simd_fp_m_d(reg, fp); if ((unsigned long)va<<61 == 0) { __asm__ __volatile__( - "1: bis %4, %4, %1\n" - "2: bis %5, %5, %2\n" - "3: stl %1, 0(%3)\n" - "4: stl %2, 8(%3)\n" - "5:\n" + " bis %4, %4, %1\n" + " bis %5, %5, %2\n" + "1: stl %1, 0(%3)\n" + "2: stl %2, 8(%3)\n" + "3:\n" ".section __ex_table, \"a\"\n\t" " .long 1b - .\n" - " ldi %1, 5b-1b(%0)\n" + " ldi $31, 3b-1b(%0)\n" " .long 2b - .\n" - " ldi %2, 5b-2b(%0)\n" - " .long 3b - .\n" - " ldi $31, 5b-3b(%0)\n" - " .long 4b - .\n" - " ldi $31, 5b-4b(%0)\n" + " ldi $31, 3b-2b(%0)\n" ".previous" : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) : "r"(va), "r"(fp[0]), "r"(fp[1]), "0"(0)); @@ -1148,20 +1006,16 @@ do_entUnaUser(void __user *va, unsigned long opcode, __asm__ __volatile__( - "1: bis %4, %4, %1\n" - "2: bis %5, %5, %2\n" - "3: stl %1, 0(%3)\n" - "4: stl %2, 8(%3)\n" - "5:\n" + " bis %4, %4, %1\n" + " bis %5, %5, %2\n" + "1: stl %1, 0(%3)\n" + "2: stl %2, 8(%3)\n" + "3:\n" ".section __ex_table, \"a\"\n\t" " .long 1b - .\n" - " ldi %1, 5b-1b(%0)\n" + " ldi $31, 3b-1b(%0)\n" " .long 2b - .\n" - " ldi %2, 5b-2b(%0)\n" - " .long 3b - .\n" - " ldi $31, 5b-3b(%0)\n" - " .long 4b - .\n" - " ldi $31, 5b-4b(%0)\n" + " ldi $31, 3b-2b(%0)\n" ".previous" : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) : "r"(vb), "r"(fp[2]), "r"(fp[3]), "0"(0)); @@ -1489,10 +1343,9 @@ do_entUnaUser(void __user *va, unsigned long opcode, __asm__ __volatile__( " zap %6, 2, %1\n" " srl %6, 8, %2\n" - " stb %1, 0x0(%5)\n" - " stb %2, 0x1(%5)\n" + "1: stb %1, 0x0(%5)\n" + "2: stb %2, 0x1(%5)\n" "3:\n" - ".section __ex_table, \"a\"\n" " .long 1b - .\n" " ldi %2, 3b-1b(%0)\n" @@ -1628,12 +1481,12 @@ do_entUnaUser(void __user *va, unsigned long opcode, si_code = SEGV_MAPERR; up_read(&mm->mmap_lock); } - send_sig_fault(SIGBUS, si_code, va, 0, current); + force_sig_fault(SIGSEGV, si_code, va, 0); return; give_sigbus: regs->pc -= 4; - send_sig_fault(SIGBUS, BUS_ADRALN, va, 0, current); + force_sig_fault(SIGBUS, BUS_ADRALN, va, 0); } void diff --git a/arch/sw_64/kernel/unaligned.c b/arch/sw_64/kernel/unaligned.c deleted file mode 100644 index 4ec1187d6cd00db8213bcccec5d952364beea632..0000000000000000000000000000000000000000 --- a/arch/sw_64/kernel/unaligned.c +++ /dev/null @@ -1,59 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 - -/* - * Copyright (C) 2020 Mao Minkai - * Author: Mao Minkai - * - * This code is taken from arch/mips/kernel/segment.c - * Copyright (C) 2013 Imagination Technologies Ltd. - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - */ - -#include -#include -#include -#include -#include - -static int show_unaligned(struct seq_file *sf, void *v) -{ - extern struct unaligned_stat { - unsigned long count, va, pc; - } unaligned[2]; - - seq_printf(sf, "kernel unaligned acc\t: %ld (pc=%lx, va=%lx)\n", unaligned[0].count, unaligned[0].pc, unaligned[0].va); - seq_printf(sf, "user unaligned acc\t: %ld (pc=%lx, va=%lx)\n", unaligned[1].count, unaligned[1].pc, unaligned[1].va); - - return 0; -} - -static int unaligned_open(struct inode *inode, struct file *file) -{ - return single_open(file, show_unaligned, NULL); -} - -static const struct file_operations unaligned_fops = { - .open = unaligned_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; - -static int __init unaligned_info(void) -{ - struct dentry *unaligned; - - if (!sw64_debugfs_dir) - return -ENODEV; - - unaligned = debugfs_create_file("unaligned", S_IRUGO, - sw64_debugfs_dir, NULL, - &unaligned_fops); - if (!unaligned) - return -ENOMEM; - return 0; -} -device_initcall(unaligned_info); diff --git a/arch/sw_64/kernel/uprobes.c b/arch/sw_64/kernel/uprobes.c index d10464d0dcdd3dff06af4da1d696b1bb523cc1d1..786f2e38a59f831cb832840d5a27e2f50788a5c3 100644 --- a/arch/sw_64/kernel/uprobes.c +++ b/arch/sw_64/kernel/uprobes.c @@ -1,14 +1,9 @@ // SPDX-License-Identifier: GPL-2.0 #include #include -#include -#include -#include #include #include -#include - #define UPROBE_TRAP_NR ULONG_MAX /** diff --git a/arch/sw_64/kernel/vdso.c b/arch/sw_64/kernel/vdso.c index 32ed952748f056620f2b8e636916464b55d6423b..b4126cbaa4bda220635ac284a6fe526ee02a923b 100644 --- a/arch/sw_64/kernel/vdso.c +++ b/arch/sw_64/kernel/vdso.c @@ -14,20 +14,11 @@ * */ -#include -#include #include -#include -#include -#include #include -#include -#include #include #include -#include -#include #include extern char vdso_start, vdso_end; diff --git a/arch/sw_64/kernel/vdso/so2s.sh b/arch/sw_64/kernel/vdso/so2s.sh index 8f23ac544d1b3ebffa66e0fc19c28c6256a8e716..e1763af8e7301a0ec8ca7e9f901c6bf438c5920d 100755 --- a/arch/sw_64/kernel/vdso/so2s.sh +++ b/arch/sw_64/kernel/vdso/so2s.sh @@ -1,5 +1,4 @@ #!/bin/sh # SPDX-License-Identifier: GPL-2.0+ -# Copyright 2020 Palmer Dabbelt -grep -v "LINUX" | sed 's/\([0-9a-f]*\) T \([a-z0-9_]*\)/.globl\t\2\n\2:\n.quad\t0x\1/' +grep "__vdso_" | sed 's/\([0-9a-f]*\) T \([a-z0-9_]*\)\(@@LINUX_.*\)*/.globl\t\2\n\2:\n.quad\t0x\1/' diff --git a/arch/sw_64/kernel/vdso/vdso.S b/arch/sw_64/kernel/vdso/vdso.S index ce5448d00cf7bee127be338a01f351be03c56ebc..edd9be27db9d5b90652553bf48196b4f0f999e3a 100644 --- a/arch/sw_64/kernel/vdso/vdso.S +++ b/arch/sw_64/kernel/vdso/vdso.S @@ -15,9 +15,7 @@ * */ -#include #include -#include #include __PAGE_ALIGNED_DATA diff --git a/arch/sw_64/kernel/vdso/vdso.lds.S b/arch/sw_64/kernel/vdso/vdso.lds.S index 67a635d6dfafaa09c776a076bcfc0cae15acc365..de1782ccb7b678c44497377f2a7985355b94a4a4 100644 --- a/arch/sw_64/kernel/vdso/vdso.lds.S +++ b/arch/sw_64/kernel/vdso/vdso.lds.S @@ -79,7 +79,7 @@ PHDRS */ VERSION { - LINUX_2.6.39 { + LINUX_2.6 { global: __vdso_rt_sigreturn; __vdso_gettimeofday; diff --git a/arch/sw_64/kernel/vdso/vgettimeofday.c b/arch/sw_64/kernel/vdso/vgettimeofday.c index 6ba9ff6e33d505ddd67e0aacbaeaa268e829dce5..b9c9a137f9d3438d500db5f511008652e1f371d6 100644 --- a/arch/sw_64/kernel/vdso/vgettimeofday.c +++ b/arch/sw_64/kernel/vdso/vgettimeofday.c @@ -13,10 +13,26 @@ */ #include -#include + #include #include -#include + +static __always_inline int syscall_fallback(clockid_t clkid, struct timespec64 *ts) +{ + register int r0 asm("$0"); + register unsigned long r19 asm("$19"); + asm volatile( + " mov %0, $16\n" + " mov %1, $17\n" + " ldi $0, %2\n" + " sys_call 0x83\n" + :: "r"(clkid), "r"(ts), "i"(__NR_clock_gettime) + : "$0", "$16", "$17", "$19"); + if (unlikely(r19)) + return -r0; + else + return r0; +} static __always_inline int do_realtime_coarse(struct timespec64 *ts, const struct vdso_data *data) @@ -38,8 +54,8 @@ static __always_inline int do_monotonic_coarse(struct timespec64 *ts, const struct vdso_data *data) { u32 start_seq; - u32 to_mono_sec; - u32 to_mono_nsec; + u64 to_mono_sec; + u64 to_mono_nsec; do { start_seq = vdso_data_read_begin(data); @@ -107,8 +123,8 @@ static __always_inline int do_monotonic(struct timespec64 *ts, { u32 start_seq; u64 ns; - u32 to_mono_sec; - u32 to_mono_nsec; + u64 to_mono_sec; + u64 to_mono_nsec; do { start_seq = vdso_data_read_begin(data); @@ -170,10 +186,9 @@ int __vdso_clock_gettime(clockid_t clkid, struct timespec64 *ts) ret = do_monotonic(ts, data); break; default: - ret = -ENOSYS; - break; + /* fall back to a syscall */ + ret = syscall_fallback(clkid, ts); } - /* If we return -ENOSYS libc should fall back to a syscall. */ return ret; } diff --git a/arch/sw_64/kernel/vdso/vrt_sigreturn.S b/arch/sw_64/kernel/vdso/vrt_sigreturn.S index c07eb7244d0c4adc0740c7cda1609a0743c02759..6aa7aa300b4d119a844ea1fcd75a1985b1685573 100644 --- a/arch/sw_64/kernel/vdso/vrt_sigreturn.S +++ b/arch/sw_64/kernel/vdso/vrt_sigreturn.S @@ -20,10 +20,50 @@ #include #include +#define RT_SIGFRAME_SIZE 1600 +#define RT_SIGFRAME_MCTX 176 + .text + .macro SIGCONTEXT_REGS_I base, from = 0 + .cfi_offset \from, \base + (4 + \from) * 8 + .if 30 - \from + SIGCONTEXT_REGS_I \base, "(\from + 1)" + .endif + .endm + + .macro SIGCONTEXT_REGS_F base, from = 32 + .cfi_offset \from, \base + (4 + 32 + 1) * 8 + (\from - 32) * 32 + .if 62 - \from + SIGCONTEXT_REGS_F \base, "(\from + 1)" + .endif + .endm + + .macro SIGCONTEXT_REGS_V base, from = 67 + .cfi_offset \from, \base + (4 + 32 + 1) * 8 + ((\from - 67) & 0x1f) * 32 + (((\from - 67) >> 5) + 1) * 8 + .if 161 - \from + SIGCONTEXT_REGS_V \base, "(\from + 1)" + .endif + .endm + + .macro SIGCONTEXT_REGS base + SIGCONTEXT_REGS_I \base + SIGCONTEXT_REGS_F \base + SIGCONTEXT_REGS_V \base + .cfi_offset 63, \base + (4 + 32 + 1) * 8 + 32 * 32 + .cfi_offset 64, \base + 2 * 8 + .endm + + .cfi_startproc + .cfi_return_column 64 + .cfi_signal_frame + SIGCONTEXT_REGS -RT_SIGFRAME_SIZE + RT_SIGFRAME_MCTX + .cfi_def_cfa_offset RT_SIGFRAME_SIZE + + nop ENTRY(__vdso_rt_sigreturn) mov $sp, $16 ldi $0, __NR_rt_sigreturn sys_call 0x83 ENDPROC(__vdso_rt_sigreturn) + .cfi_endproc diff --git a/arch/sw_64/kvm/Kconfig b/arch/sw_64/kvm/Kconfig index 230ac526911c417f91ba578f7956622426d5329b..85323b48f56438f9e237ccf57a9f308cd8d20e1a 100644 --- a/arch/sw_64/kvm/Kconfig +++ b/arch/sw_64/kvm/Kconfig @@ -42,6 +42,13 @@ config KVM_SW64_HOST Provides host support for SW64 processors. To compile this as a module, choose M here. +config KVM_MEMHOTPLUG + bool "Memory hotplug support for guest" + depends on KVM + help + Provides memory hotplug support for SW64 guest. + + source "drivers/vhost/Kconfig" endif # VIRTUALIZATION diff --git a/arch/sw_64/kvm/emulate.c b/arch/sw_64/kvm/emulate.c index 1552119e63463edd0596e527e8436c23d60cba49..bcc06c0dd618757c844a54cfc86c0bc8c9beab58 100644 --- a/arch/sw_64/kvm/emulate.c +++ b/arch/sw_64/kvm/emulate.c @@ -32,6 +32,7 @@ void sw64_decode(struct kvm_vcpu *vcpu, unsigned int insn, struct kvm_run *run) vcpu->arch.mmio_decode.rt = ra; break; case 0x23: /* LDL */ + case 0x24: /* LDL_U */ run->mmio.is_write = 0; run->mmio.len = 8; vcpu->arch.mmio_decode.rt = ra; @@ -52,6 +53,7 @@ void sw64_decode(struct kvm_vcpu *vcpu, unsigned int insn, struct kvm_run *run) run->mmio.len = 4; break; case 0x2b: /* STL */ + case 0x2c: /* STL_U */ run->mmio.is_write = 1; *(unsigned long *)run->mmio.data = vcpu_get_reg(vcpu, ra); run->mmio.len = 8; diff --git a/arch/sw_64/kvm/entry.S b/arch/sw_64/kvm/entry.S index 76ebdda920cb7ee566d6755017cede7d31623b50..0c02b68ee7d06a6e5d83d5c0c7c8db5afed6b862 100644 --- a/arch/sw_64/kvm/entry.S +++ b/arch/sw_64/kvm/entry.S @@ -15,18 +15,16 @@ ENTRY(__sw64_vcpu_run) /* save host fpregs */ ldl $1, TI_TASK($8) - ldi $1, TASK_THREAD($1) rfpcr $f0 - fstd $f0, THREAD_FPCR($1) - ldi $1, THREAD_CTX_FP($1) - vstd $f2, CTX_FP_F2($1) - vstd $f3, CTX_FP_F3($1) - vstd $f4, CTX_FP_F4($1) - vstd $f5, CTX_FP_F5($1) - vstd $f6, CTX_FP_F6($1) - vstd $f7, CTX_FP_F7($1) - vstd $f8, CTX_FP_F8($1) - vstd $f9, CTX_FP_F9($1) + fstd $f0, TASK_THREAD_FPCR($1) + vstd $f2, TASK_THREAD_F2($1) + vstd $f3, TASK_THREAD_F3($1) + vstd $f4, TASK_THREAD_F4($1) + vstd $f5, TASK_THREAD_F5($1) + vstd $f6, TASK_THREAD_F6($1) + vstd $f7, TASK_THREAD_F7($1) + vstd $f8, TASK_THREAD_F8($1) + vstd $f9, TASK_THREAD_F9($1) ldi sp, -VCPU_RET_SIZE(sp) /* r16 = guest kvm_vcpu_arch.vcb struct pointer */ @@ -34,20 +32,15 @@ ENTRY(__sw64_vcpu_run) /* r18 = hcall args */ /* save host pt_regs to current kernel stack */ ldi sp, -PT_REGS_SIZE(sp) - - stl $8, PT_REGS_R8(sp) + stl $9, PT_REGS_R9(sp) + stl $10, PT_REGS_R10(sp) + stl $11, PT_REGS_R11(sp) + stl $12, PT_REGS_R12(sp) + stl $13, PT_REGS_R13(sp) + stl $14, PT_REGS_R14(sp) + stl $15, PT_REGS_R15(sp) stl $26, PT_REGS_R26(sp) - /* save host switch stack to current kernel stack */ - ldi sp, -SWITCH_STACK_SIZE(sp) - stl $9, SWITCH_STACK_R9(sp) - stl $10, SWITCH_STACK_R10(sp) - stl $11, SWITCH_STACK_R11(sp) - stl $12, SWITCH_STACK_R12(sp) - stl $13, SWITCH_STACK_R13(sp) - stl $14, SWITCH_STACK_R14(sp) - stl $15, SWITCH_STACK_R15(sp) - /* restore guest switch stack from guest kvm_regs struct */ ldl $0, KVM_REGS_R0($17) ldl $1, KVM_REGS_R1($17) @@ -203,27 +196,22 @@ $g_setfpec_over: stl $27, KVM_REGS_R27($17) stl $28, KVM_REGS_R28($17) - /* restore host switch stack from host sp */ - ldl $9, SWITCH_STACK_R9(sp) - ldl $10, SWITCH_STACK_R10(sp) - ldl $11, SWITCH_STACK_R11(sp) - ldl $12, SWITCH_STACK_R12(sp) - ldl $13, SWITCH_STACK_R13(sp) - ldl $14, SWITCH_STACK_R14(sp) - ldl $15, SWITCH_STACK_R15(sp) - - ldi sp, SWITCH_STACK_SIZE(sp) - /* restore host regs from host sp */ - ldl $8, PT_REGS_R8(sp) + ldl $9, PT_REGS_R9(sp) + ldl $10, PT_REGS_R10(sp) + ldl $11, PT_REGS_R11(sp) + ldl $12, PT_REGS_R12(sp) + ldl $13, PT_REGS_R13(sp) + ldl $14, PT_REGS_R14(sp) + ldl $15, PT_REGS_R15(sp) ldl $26, PT_REGS_R26(sp) - ldi sp, PT_REGS_SIZE(sp) + ldi $8, 0x3fff + bic sp, $8, $8 /* restore host fpregs */ ldl $1, TI_TASK($8) - ldi $1, TASK_THREAD($1) - fldd $f0, THREAD_FPCR($1) + fldd $f0, TASK_THREAD_FPCR($1) wfpcr $f0 fimovd $f0, $2 and $2, 0x3, $2 @@ -243,15 +231,14 @@ $setfpec_1: $setfpec_2: setfpec2 $setfpec_over: - ldi $1, THREAD_CTX_FP($1) - vldd $f2, CTX_FP_F2($1) - vldd $f3, CTX_FP_F3($1) - vldd $f4, CTX_FP_F4($1) - vldd $f5, CTX_FP_F5($1) - vldd $f6, CTX_FP_F6($1) - vldd $f7, CTX_FP_F7($1) - vldd $f8, CTX_FP_F8($1) - vldd $f9, CTX_FP_F9($1) + vldd $f2, TASK_THREAD_F2($1) + vldd $f3, TASK_THREAD_F3($1) + vldd $f4, TASK_THREAD_F4($1) + vldd $f5, TASK_THREAD_F5($1) + vldd $f6, TASK_THREAD_F6($1) + vldd $f7, TASK_THREAD_F7($1) + vldd $f8, TASK_THREAD_F8($1) + vldd $f9, TASK_THREAD_F9($1) /* if $0 > 0, handle hcall */ bgt $0, $ret_to @@ -261,25 +248,17 @@ $setfpec_over: /* Hmcode will setup in */ /* restore $16 $17 $18, do interrupt trick */ - ldi sp, -(HOST_INT_SIZE + PT_REGS_SIZE + SWITCH_STACK_SIZE)(sp) + ldi sp, -(HOST_INT_SIZE + PT_REGS_SIZE)(sp) ldl $16, HOST_INT_R16(sp) ldl $17, HOST_INT_R17(sp) ldl $18, HOST_INT_R18(sp) - ldi sp, (HOST_INT_SIZE + PT_REGS_SIZE + SWITCH_STACK_SIZE)(sp) + ldi sp, (HOST_INT_SIZE + PT_REGS_SIZE)(sp) - ldi $8, 0x3fff - bic sp, $8, $8 ldi $19, -PT_REGS_SIZE(sp) - - ldi $26, ret_from_do_entInt_noregs - call $31, do_entInt - - /* ret($0) indicate hcall number */ -ret_from_do_entInt_noregs: + call $26, do_entInt ldl $26, VCPU_RET_RA(sp) ldl $0, VCPU_RET_R0(sp) - - /* restore r16 - r19 */ $ret_to: + /* ret($0) indicate hcall number */ ldi sp, VCPU_RET_SIZE(sp) /* pop stack */ ret diff --git a/arch/sw_64/kvm/handle_exit.c b/arch/sw_64/kvm/handle_exit.c index 0d6806051fc744d56c82e52538ea923ca6cccc92..5016bc0eddc2f86fa99f3b9e3247c7f138344e54 100644 --- a/arch/sw_64/kvm/handle_exit.c +++ b/arch/sw_64/kvm/handle_exit.c @@ -34,6 +34,11 @@ int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, case SW64_KVM_EXIT_IPI: vcpu_send_ipi(vcpu, hargs->arg0); return 1; +#ifdef CONFIG_KVM_MEMHOTPLUG + case SW64_KVM_EXIT_MEMHOTPLUG: + vcpu_mem_hotplug(vcpu, hargs->arg0); + return 1; +#endif case SW64_KVM_EXIT_FATAL_ERROR: printk("Guest fatal error: Reason=[%lx], EXC_PC=[%lx], DVA=[%lx]", hargs->arg0, hargs->arg1, hargs->arg2); vcpu->run->exit_reason = KVM_EXIT_UNKNOWN; diff --git a/arch/sw_64/kvm/kvm-sw64.c b/arch/sw_64/kvm/kvm-sw64.c index 1481c3dbb211caf9f4c4090f18386fc8895de617..de81f7efe01a5fa219a4aacc3664c6cefed86704 100644 --- a/arch/sw_64/kvm/kvm-sw64.c +++ b/arch/sw_64/kvm/kvm-sw64.c @@ -5,39 +5,26 @@ * linhn */ -#include #include -#include #include #include -#include -#include #include #include -#include -#include #include #include -#include -#include -#include -#include - +#include #include -#include #include -#include #include "../kernel/pci_impl.h" - #include "vmem.c" bool set_msi_flag; unsigned long sw64_kvm_last_vpn[NR_CPUS]; +__read_mostly bool bind_vcpu_enabled; #define cpu_last_vpn(cpuid) sw64_kvm_last_vpn[cpuid] #ifdef CONFIG_SUBARCH_C3B -#define MAX_VPN 255 #define WIDTH_HARDWARE_VPN 8 #endif @@ -70,17 +57,25 @@ int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm, int irq extern int __sw64_vcpu_run(struct vcpucb *vcb, struct kvm_regs *regs, struct hcall_args *args); -static unsigned long get_vpcr(unsigned long machine_mem_offset, unsigned long memory_size, unsigned long vpn) +#ifdef CONFIG_KVM_MEMHOTPLUG +static u64 get_vpcr_memhp(u64 seg_base, u64 vpn) { - return (machine_mem_offset >> 23) | ((memory_size >> 23) << 16) | ((vpn & HARDWARE_VPN_MASK) << 44); + return seg_base | ((vpn & HARDWARE_VPN_MASK) << 44); } +#else +static u64 get_vpcr(u64 hpa_base, u64 mem_size, u64 vpn) +{ + return (hpa_base >> 23) | ((mem_size >> 23) << 16) + | ((vpn & HARDWARE_VPN_MASK) << 44); +} +#endif static unsigned long __get_new_vpn_context(struct kvm_vcpu *vcpu, long cpu) { unsigned long vpn = cpu_last_vpn(cpu); unsigned long next = vpn + 1; - if ((vpn & HARDWARE_VPN_MASK) >= MAX_VPN) { + if ((vpn & HARDWARE_VPN_MASK) >= HARDWARE_VPN_MASK) { tbia(); next = (vpn & ~HARDWARE_VPN_MASK) + VPN_FIRST_VERSION + 1; /* bypass 0 */ } @@ -226,12 +221,38 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) { +#ifdef CONFIG_KVM_MEMHOTPLUG + unsigned long *seg_pgd; + + if (kvm->arch.seg_pgd != NULL) { + kvm_err("kvm_arch already initialized?\n"); + return -EINVAL; + } + + seg_pgd = alloc_pages_exact(PAGE_SIZE, GFP_KERNEL | __GFP_ZERO); + if (!seg_pgd) + return -ENOMEM; + + kvm->arch.seg_pgd = seg_pgd; +#endif + return 0; } void kvm_arch_destroy_vm(struct kvm *kvm) { int i; +#ifdef CONFIG_KVM_MEMHOTPLUG + void *seg_pgd = NULL; + + if (kvm->arch.seg_pgd) { + seg_pgd = READ_ONCE(kvm->arch.seg_pgd); + kvm->arch.seg_pgd = NULL; + } + + if (seg_pgd) + free_pages_exact(seg_pgd, PAGE_SIZE); +#endif for (i = 0; i < KVM_MAX_VCPUS; ++i) { if (kvm->vcpus[i]) { @@ -241,7 +262,6 @@ void kvm_arch_destroy_vm(struct kvm *kvm) } atomic_set(&kvm->online_vcpus, 0); - } long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) @@ -255,6 +275,22 @@ int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, return 0; } +#ifdef CONFIG_KVM_MEMHOTPLUG +static void setup_segment_table(struct kvm *kvm, + struct kvm_memory_slot *memslot, unsigned long addr, size_t size) +{ + unsigned long *seg_pgd = kvm->arch.seg_pgd; + unsigned int num_of_entry = size >> 30; + unsigned long base_hpa = addr >> 30; + int i; + + for (i = 0; i < num_of_entry; i++) { + *seg_pgd = base_hpa + i; + seg_pgd++; + } +} +#endif + int kvm_arch_prepare_memory_region(struct kvm *kvm, struct kvm_memory_slot *memslot, const struct kvm_userspace_memory_region *mem, @@ -267,8 +303,15 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, unsigned long ret; size_t size; - if (change == KVM_MR_FLAGS_ONLY) + if (change == KVM_MR_FLAGS_ONLY || change == KVM_MR_DELETE) + return 0; + +#ifndef CONFIG_KVM_MEMHOTPLUG + if (mem->guest_phys_addr) { + pr_info("%s, No KVM MEMHOTPLUG support!\n", __func__); return 0; + } +#endif if (test_bit(IO_MARK_BIT, &(mem->guest_phys_addr))) return 0; @@ -290,7 +333,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, if (!vm_file) { info = kzalloc(sizeof(struct vmem_info), GFP_KERNEL); - size = round_up(mem->memory_size, 8<<20); + size = round_up(mem->memory_size, 8 << 20); addr = gen_pool_alloc(sw64_kvm_pool, size); if (!addr) return -ENOMEM; @@ -298,10 +341,25 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, ret = vm_mmap(vm_file, mem->userspace_addr, mem->memory_size, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED, 0); + if ((long)ret < 0) + return ret; + vma = find_vma(current->mm, mem->userspace_addr); if (!vma) return -ENOMEM; +#ifdef CONFIG_KVM_MEMHOTPLUG + if (memslot->base_gfn == 0x0UL) { + setup_segment_table(kvm, memslot, addr, size); + kvm->arch.host_phys_addr = (u64)addr; + memslot->arch.host_phys_addr = addr; + } else { + /* used for memory hotplug */ + memslot->arch.host_phys_addr = addr; + memslot->arch.valid = false; + } +#endif + info->start = addr; info->size = size; vma->vm_private_data = (void *) info; @@ -309,11 +367,8 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, vma->vm_ops = &vmem_vm_ops; vma->vm_ops->open(vma); - remap_pfn_range(vma, mem->userspace_addr, - addr >> PAGE_SHIFT, - mem->memory_size, vma->vm_page_prot); - - if ((long)ret < 0) + ret = vmem_vm_insert_page(vma); + if ((int)ret < 0) return ret; } else { info = vm_file->private_data; @@ -322,11 +377,13 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, pr_info("guest phys addr = %#lx, size = %#lx\n", addr, vma->vm_end - vma->vm_start); - kvm->arch.mem.membank[0].guest_phys_addr = 0; - kvm->arch.mem.membank[0].host_phys_addr = (u64)addr; - kvm->arch.mem.membank[0].size = round_up(mem->memory_size, 8<<20); - memset((void *)(PAGE_OFFSET + addr), 0, 0x2000000); +#ifndef CONFIG_KVM_MEMHOTPLUG + kvm->arch.host_phys_addr = (u64)addr; + kvm->arch.size = round_up(mem->memory_size, 8 << 20); +#endif + + memset(__va(addr), 0, 0x2000000); return 0; } @@ -343,7 +400,6 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) /* For guest kernel "sys_call HMC_whami", indicate virtual cpu id */ vcpu->arch.vcb.whami = vcpu->vcpu_id; vcpu->arch.vcb.vcpu_irq_disabled = 1; - vcpu->arch.vcb.pcbb = vcpu->kvm->arch.mem.membank[0].host_phys_addr; vcpu->arch.pcpu_id = -1; /* force flush tlb for the first time */ return 0; @@ -351,17 +407,16 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu) { - unsigned long addr = vcpu->kvm->arch.mem.membank[0].host_phys_addr; + unsigned long addr = vcpu->kvm->arch.host_phys_addr; vcpu->arch.vcb.whami = vcpu->vcpu_id; vcpu->arch.vcb.vcpu_irq_disabled = 1; - vcpu->arch.vcb.pcbb = vcpu->kvm->arch.mem.membank[0].host_phys_addr; vcpu->arch.pcpu_id = -1; /* force flush tlb for the first time */ vcpu->arch.power_off = 0; memset(&vcpu->arch.irqs_pending, 0, sizeof(vcpu->arch.irqs_pending)); if (vcpu->vcpu_id == 0) - memset((void *)(PAGE_OFFSET + addr), 0, 0x2000000); + memset(__va(addr), 0, 0x2000000); return 0; } @@ -448,19 +503,18 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_ void _debug_printk_vcpu(struct kvm_vcpu *vcpu) { unsigned long pc = vcpu->arch.regs.pc; - unsigned long offset = vcpu->kvm->arch.mem.membank[0].host_phys_addr; - unsigned long pc_phys = PAGE_OFFSET | ((pc & 0x7fffffffUL) + offset); + unsigned long offset = vcpu->kvm->arch.host_phys_addr; + unsigned int *pc_phys = __va((pc & 0x7fffffffUL) + offset); unsigned int insn; int opc, ra, disp16; - insn = *(unsigned int *)pc_phys; - + insn = *pc_phys; opc = (insn >> 26) & 0x3f; ra = (insn >> 21) & 0x1f; disp16 = insn & 0xffff; if (opc == 0x06 && disp16 == 0x1000) /* RD_F */ - pr_info("vcpu exit: pc = %#lx (%#lx), insn[%x] : rd_f r%d [%#lx]\n", + pr_info("vcpu exit: pc = %#lx (%px), insn[%x] : rd_f r%d [%#lx]\n", pc, pc_phys, insn, ra, vcpu_get_reg(vcpu, ra)); } @@ -481,8 +535,24 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) /* Set guest vcb */ /* vpn will update later when vcpu is running */ if (vcpu->arch.vcb.vpcr == 0) { +#ifndef CONFIG_KVM_MEMHOTPLUG vcpu->arch.vcb.vpcr - = get_vpcr(vcpu->kvm->arch.mem.membank[0].host_phys_addr, vcpu->kvm->arch.mem.membank[0].size, 0); + = get_vpcr(vcpu->kvm->arch.host_phys_addr, vcpu->kvm->arch.size, 0); + + if (unlikely(bind_vcpu_enabled)) { + int nid; + unsigned long end; + + end = vcpu->kvm->arch.host_phys_addr + vcpu->kvm->arch.size; + nid = pfn_to_nid(PHYS_PFN(vcpu->kvm->arch.host_phys_addr)); + if (pfn_to_nid(PHYS_PFN(end)) == nid) + set_cpus_allowed_ptr(vcpu->arch.tsk, node_to_cpumask_map[nid]); + } +#else + unsigned long seg_base = virt_to_phys(vcpu->kvm->arch.seg_pgd); + + vcpu->arch.vcb.vpcr = get_vpcr_memhp(seg_base, 0); +#endif vcpu->arch.vcb.upcr = 0x7; } @@ -658,6 +728,30 @@ int kvm_dev_ioctl_check_extension(long ext) return r; } +#ifdef CONFIG_KVM_MEMHOTPLUG +void vcpu_mem_hotplug(struct kvm_vcpu *vcpu, unsigned long start_addr) +{ + struct kvm *kvm = vcpu->kvm; + struct kvm_memory_slot *slot; + unsigned long start_pfn = start_addr >> PAGE_SHIFT; + + kvm_for_each_memslot(slot, kvm_memslots(kvm)) { + if (start_pfn == slot->base_gfn) { + unsigned long *seg_pgd; + unsigned long num_of_entry = slot->npages >> 17; + unsigned long base_hpa = slot->arch.host_phys_addr; + int i; + + seg_pgd = kvm->arch.seg_pgd + (start_pfn >> 17); + for (i = 0; i < num_of_entry; i++) { + *seg_pgd = (base_hpa >> 30) + i; + seg_pgd++; + } + } + } +} +#endif + void vcpu_send_ipi(struct kvm_vcpu *vcpu, int target_vcpuid) { struct kvm_vcpu *target_vcpu = kvm_get_vcpu(vcpu->kvm, target_vcpuid); diff --git a/arch/sw_64/kvm/vmem.c b/arch/sw_64/kvm/vmem.c index b8a585ec1ad1058074980ad6e531fd749321ace0..c6f9d6cdf03b120b32d3e39ac017e412838f9efb 100644 --- a/arch/sw_64/kvm/vmem.c +++ b/arch/sw_64/kvm/vmem.c @@ -28,6 +28,35 @@ static bool addr_in_pool(struct gen_pool *pool, return found; } +static int vmem_vm_insert_page(struct vm_area_struct *vma) +{ + unsigned long addr, uaddr; + struct page *vmem_page; + struct vmem_info *info; + size_t size; + int ret; + + info = vma->vm_private_data; + addr = info->start; + size = info->size; + uaddr = vma->vm_start; + + vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP | VM_MIXEDMAP; + vmem_page = pfn_to_page(addr >> PAGE_SHIFT); + do { + ret = vm_insert_page(vma, uaddr, vmem_page); + if (ret < 0) { + pr_info("vm_insert_page failed: %d\n", ret); + return ret; + } + vmem_page++; + uaddr += PAGE_SIZE; + size -= PAGE_SIZE; + } while (size > 0); + + return 0; +} + static void vmem_vm_open(struct vm_area_struct *vma) { struct vmem_info *info = vma->vm_private_data; @@ -83,6 +112,7 @@ static int vmem_mmap(struct file *flip, struct vm_area_struct *vma) unsigned long addr; static struct vmem_info *info; size_t size = vma->vm_end - vma->vm_start; + int ret; if (!(vma->vm_flags & VM_SHARED)) { pr_err("%s: mapping must be shared\n", __func__); @@ -114,10 +144,9 @@ static int vmem_mmap(struct file *flip, struct vm_area_struct *vma) /*to do if size bigger than vm_mem_size*/ pr_info("sw64_vmem: vm_start=%#lx, size= %#lx\n", vma->vm_start, size); - /*remap_pfn_range - remap kernel memory to userspace*/ - if (remap_pfn_range(vma, vma->vm_start, addr >> PAGE_SHIFT, size, - vma->vm_page_prot)) - return -EAGAIN; + vmem_vm_insert_page(vma); + if (ret < 0) + return ret; return 0; } diff --git a/arch/sw_64/lib/checksum.c b/arch/sw_64/lib/checksum.c index 561bbac59f8dbfd58691ab42d681a0712415104b..d1314caa15bf44591ec1f6121017a77da16c6e2e 100644 --- a/arch/sw_64/lib/checksum.c +++ b/arch/sw_64/lib/checksum.c @@ -7,31 +7,7 @@ #include #include #include - -static inline unsigned short from64to16(unsigned long x) -{ - /* Using extract instructions is a bit more efficient - * than the original shift/bitmask version. - */ - - union { - unsigned long ul; - unsigned int ui[2]; - unsigned short us[4]; - } in_v, tmp_v, out_v; - - in_v.ul = x; - tmp_v.ul = (unsigned long) in_v.ui[0] + (unsigned long) in_v.ui[1]; - - /* Since the bits of tmp_v.sh[3] are going to always be zero, - *we don't have to bother to add that in. - */ - out_v.ul = (unsigned long) tmp_v.us[0] + (unsigned long) tmp_v.us[1] - + (unsigned long) tmp_v.us[2]; - - /* Similarly, out_v.us[2] is always zero for the final add. */ - return out_v.us[0] + out_v.us[1]; -} +#include /* * computes the checksum of the TCP/UDP pseudo-header @@ -69,73 +45,61 @@ EXPORT_SYMBOL(csum_tcpudp_nofold); /* * Do a 64-bit checksum on an arbitrary memory area.. - * - * This isn't a great routine, but it's not _horrible_ either. The - * inner loop could be unrolled a bit further, and there are better - * ways to do the carry, but this is reasonable. */ static inline unsigned long do_csum(const unsigned char *buff, int len) { - int odd, count; - unsigned long result = 0; - - if (len <= 0) - goto out; - odd = 1 & (unsigned long) buff; - if (odd) { - result = *buff << 8; - len--; - buff++; - } - count = len >> 1; /* nr of 16-bit words.. */ - if (count) { - if (2 & (unsigned long) buff) { - result += *(unsigned short *) buff; - count--; - len -= 2; - buff += 2; - } - count >>= 1; /* nr of 32-bit words.. */ - if (count) { - if (4 & (unsigned long) buff) { - result += *(unsigned int *) buff; - count--; - len -= 4; - buff += 4; - } - count >>= 1; /* nr of 64-bit words.. */ - if (count) { - unsigned long carry = 0; - - do { - unsigned long w = *(unsigned long *) buff; - - count--; - buff += 8; - result += carry; - result += w; - carry = (w > result); - } while (count); - result += carry; - result = (result & 0xffffffff) + (result >> 32); - } - if (len & 4) { - result += *(unsigned int *) buff; - buff += 4; - } + const unsigned long *dst = (unsigned long *)buff; + unsigned long doff = 7 & (unsigned long) dst; + unsigned long checksum = 0; + unsigned long word, patch; + unsigned long partial_dest, second_dest; + + len -= 8; + + if (!doff) { + while (len > 0) { + word = *dst; + checksum += word; + checksum += (checksum < word); + dst++; + len -= 8; } - if (len & 2) { - result += *(unsigned short *) buff; - buff += 2; + + len += 8; + word = *dst; + + if (len != 8) + maskll(word, len, word); + + checksum += word; + checksum += (checksum < word); + } else { + dst = (unsigned long *)((unsigned long)dst & (~7UL)); + word = *dst; + inshl(word, 8 - doff, partial_dest); + dst++; + + while (len >= 0) { + word = *dst; + insll(word, 8 - doff, second_dest); + patch = partial_dest | second_dest; + checksum += patch; + checksum += (checksum < patch); + inshl(word, 8 - doff, partial_dest); + dst++; + len -= 8; } + + len += 8; + word = *dst; + insll(word, 8 - doff, second_dest); + patch = partial_dest | second_dest; + maskll(patch, len, patch); + checksum += patch; + checksum += (checksum < patch); } - if (len & 1) - result += *buff; - result = from64to16(result); - if (odd) - result = ((result >> 8) & 0xff) | ((result & 0xff) << 8); -out: - return result; + + return from64to16(checksum); } /* diff --git a/arch/sw_64/lib/csum_partial_copy.c b/arch/sw_64/lib/csum_partial_copy.c index 678d9aa78d159929bd1663b7540e32288f70aa31..742dd63cdb702c5980adc5aa9cec898948105303 100644 --- a/arch/sw_64/lib/csum_partial_copy.c +++ b/arch/sw_64/lib/csum_partial_copy.c @@ -11,6 +11,7 @@ #include #include #include +#include #define ldl_u(x, y) \ @@ -37,25 +38,6 @@ static inline void sthl_u(unsigned long data, unsigned long *dst) *((char *)dst + 8 - doff + i) = *((char *)&data + 8 - doff + i); } -#define extll(x, y, z) \ - __asm__ __volatile__("extll %1, %2, %0":"=r" (z):"r" (x), "r" (y)) - -#define exthl(x, y, z) \ - __asm__ __volatile__("exthl %1, %2, %0":"=r" (z):"r" (x), "r" (y)) - -#define maskll(x, y, z) \ - __asm__ __volatile__("maskll %1, %2, %0":"=r" (z):"r" (x), "r" (y)) - -#define maskhl(x, y, z) \ - __asm__ __volatile__("maskhl %1, %2, %0":"=r" (z):"r" (x), "r" (y)) - -#define insll(x, y, z) \ - __asm__ __volatile__("insll %1, %2, %0":"=r" (z):"r" (x), "r" (y)) - -#define inshl(x, y, z) \ - __asm__ __volatile__("inshl %1, %2, %0":"=r" (z):"r" (x), "r" (y)) - - #define __get_word(insn, x, ptr) \ ({ \ long __guu_err; \ @@ -71,286 +53,91 @@ static inline void sthl_u(unsigned long data, unsigned long *dst) __guu_err; \ }) -static inline unsigned short from64to16(unsigned long x) -{ - /* Using extract instructions is a bit more efficient - * than the original shift/bitmask version. - */ - - union { - unsigned long ul; - unsigned int ui[2]; - unsigned short us[4]; - } in_v, tmp_v, out_v; - - in_v.ul = x; - tmp_v.ul = (unsigned long) in_v.ui[0] + (unsigned long) in_v.ui[1]; - - /* Since the bits of tmp_v.sh[3] are going to always be zero, - * we don't have to bother to add that in. - */ - out_v.ul = (unsigned long) tmp_v.us[0] + (unsigned long) tmp_v.us[1] - + (unsigned long) tmp_v.us[2]; - - /* Similarly, out_v.us[2] is always zero for the final add. */ - return out_v.us[0] + out_v.us[1]; -} - -/* - * Ok. This isn't fun, but this is the EASY case. - */ -static inline unsigned long -csum_partial_cfu_aligned(const unsigned long __user *src, unsigned long *dst, - long len) -{ - unsigned long checksum = ~0U; - unsigned long carry = 0; - - while (len >= 0) { - unsigned long word; - - if (__get_word(ldl, word, src)) - return 0; - checksum += carry; - src++; - checksum += word; - len -= 8; - carry = checksum < word; - *dst = word; - dst++; - } - len += 8; - checksum += carry; - if (len) { - int i = 0; - unsigned long word; - - if (__get_word(ldl, word, src)) - return 0; - maskll(word, len, word); - checksum += word; - carry = checksum < word; - for (; i < len; i++) - *((char *)dst + i) = *((char *)&word + i); - checksum += carry; - } - return checksum; -} - -/* - * This is even less fun, but this is still reasonably - * easy. - */ static inline unsigned long csum_partial_cfu_dest_aligned(const unsigned long __user *src, - unsigned long *dst, unsigned long soff, long len) + unsigned long *dst, long len) { - unsigned long first; - unsigned long word, carry; - unsigned long lastsrc = 7+len+(unsigned long)src; + unsigned long word; unsigned long checksum = ~0U; + int err = 0; - if (__get_word(ldl_u, first, src)) - return 0; - carry = 0; - while (len >= 0) { - unsigned long second; + if (likely(!uaccess_kernel())) + err = __copy_from_user(dst, src, len + 8); + else + memcpy(dst, src, len + 8); - if (__get_word(ldl_u, second, src+1)) - return 0; - extll(first, soff, word); - len -= 8; - src++; - exthl(second, soff, first); - checksum += carry; - word |= first; - first = second; + while (len > 0) { + word = *dst; checksum += word; - *dst = word; + checksum += (checksum < word); dst++; - carry = checksum < word; - } - len += 8; - checksum += carry; - if (len) { - int i = 0; - unsigned long second; - - if (__get_word(ldl_u, second, lastsrc)) - return 0; - extll(first, soff, word); - exthl(second, soff, first); - word |= first; - maskll(word, len, word); - checksum += word; - carry = checksum < word; - for (; i < len; i++) - *((char *)dst + i) = *((char *)&word + i); - checksum += carry; - } - return checksum; -} - -/* - * This is slightly less fun than the above.. - */ -static inline unsigned long -csum_partial_cfu_src_aligned(const unsigned long __user *src, - unsigned long *dst, unsigned long doff, - long len, unsigned long partial_dest) -{ - unsigned long carry = 0; - unsigned long word; - unsigned long second_dest; - int i; - unsigned long checksum = ~0U; - - if (len >= 0) { - if (__get_word(ldl, word, src)) - return 0; - checksum += carry; - checksum += word; - carry = checksum < word; - stll_u(word, dst); len -= 8; - src++; - dst++; - - inshl(word, doff, partial_dest); - while (len >= 0) { - if (__get_word(ldl, word, src)) - return 0; - len -= 8; - insll(word, doff, second_dest); - checksum += carry; - stl_u(partial_dest | second_dest, dst); - src++; - checksum += word; - inshl(word, doff, partial_dest); - carry = checksum < word; - dst++; - } - sthl_u(word, dst - 1); } len += 8; + word = *dst; - if (__get_word(ldl, word, src)) - return 0; - maskll(word, len, word); - checksum += carry; + if (len != 8) + maskll(word, len, word); checksum += word; - carry = checksum < word; - for (i = 0; i < len; i++) - *((char *)dst + i) = *((char *)&word + i); + checksum += (checksum < word); - checksum += carry; return checksum; } -/* - * This is so totally un-fun that it's frightening. Don't - * look at this too closely, you'll go blind. - */ static inline unsigned long -csum_partial_cfu_unaligned(const unsigned long __user *src, - unsigned long *dst, unsigned long soff, unsigned long doff, - long len, unsigned long partial_dest) +csum_partial_cfu_dest_unaligned(const unsigned long __user *src, + unsigned long *dst, unsigned long doff, long len) { - unsigned long carry = 0; - unsigned long first; - unsigned long second, word; - unsigned long second_dest; - int i; + unsigned long word, patch; + unsigned long partial_dest, second_dest; unsigned long checksum = ~0U; + int err = 0; - if (__get_word(ldl_u, first, src)) - return 0; - if (len >= 0) { - extll(first, soff, word); - if (__get_word(ldl_u, second, src+1)) - return 0; - exthl(second, soff, first); - word |= first; - checksum += carry; - checksum += word; - carry = checksum < word; - stll_u(word, dst); - sthl_u(word, dst); - len -= 8; - src++; - dst++; + if (likely(!uaccess_kernel())) + err = __copy_from_user(dst, src, len + 8); + else + memcpy(dst, src, len + 8); - if (__get_word(ldl_u, first, src)) - return 0; - ldl_u(partial_dest, dst); - maskll(partial_dest, doff, partial_dest); - while (len >= 0) { - if (__get_word(ldl_u, second, src+1)) - return 0; - extll(first, soff, word); - checksum += carry; - len -= 8; - exthl(second, soff, first); - src++; - word |= first; - first = second; - insll(word, doff, second_dest); - checksum += word; - stl_u(partial_dest | second_dest, dst); - carry = checksum < word; - inshl(word, doff, partial_dest); - dst++; - } - sthl_u(word, dst - 1); + dst = (unsigned long *)((unsigned long)dst & (~7UL)); + word = *dst; + inshl(word, 8 - doff, partial_dest); + dst++; + + while (len >= 0) { + word = *dst; + insll(word, 8 - doff, second_dest); + patch = partial_dest | second_dest; + checksum += patch; + checksum += (checksum < patch); + inshl(word, 8 - doff, partial_dest); + dst++; + len -= 8; } - len += 8; - checksum += carry; - if (__get_word(ldl_u, second, src+1)) - return 0; - extll(first, soff, word); - exthl(second, soff, first); - word |= first; - maskll(word, len, word); - checksum += word; - carry = checksum < word; - for (i = 0; i < len; i++) - *((char *)dst + i) = *((char *)&word + i); + len += 8; + word = *dst; + insll(word, 8 - doff, second_dest); + patch = partial_dest | second_dest; + maskll(patch, len, patch); + checksum += patch; + checksum += (checksum < patch); - checksum += carry; return checksum; } static __wsum __csum_and_copy(const void __user *src, void *dst, int len) { unsigned long checksum; - unsigned long soff = 7 & (unsigned long) src; unsigned long doff = 7 & (unsigned long) dst; if (!doff) { - if (!soff) - checksum = csum_partial_cfu_aligned( - (const unsigned long __user *) src, - (unsigned long *) dst, len-8); - else - checksum = csum_partial_cfu_dest_aligned( - (const unsigned long __user *) src, - (unsigned long *) dst, - soff, len-8); + checksum = csum_partial_cfu_dest_aligned( + (const unsigned long __user *) src, + (unsigned long *) dst, len-8); } else { - unsigned long partial_dest; - - ldl_u(partial_dest, dst); - if (!soff) - checksum = csum_partial_cfu_src_aligned( - (const unsigned long __user *) src, - (unsigned long *) dst, - doff, len-8, partial_dest); - else - checksum = csum_partial_cfu_unaligned( - (const unsigned long __user *) src, - (unsigned long *) dst, - soff, doff, len-8, partial_dest); + checksum = csum_partial_cfu_dest_unaligned( + (const unsigned long __user *) src, + (unsigned long *) dst, doff, len-8); } return (__force __wsum)from64to16(checksum); } diff --git a/arch/sw_64/lib/deep-memcpy.S b/arch/sw_64/lib/deep-memcpy.S index e847ec3d08df6f3976ca101dc15ba7225be1e4b0..83c726d42778ef7d85758236e9d7cac601b8548d 100644 --- a/arch/sw_64/lib/deep-memcpy.S +++ b/arch/sw_64/lib/deep-memcpy.S @@ -1,240 +1,309 @@ /* SPDX-License-Identifier: GPL-2.0 */ + #include - .set noreorder - .set noat - .align 4 +#define NC_STORE_THRESHOLD 2048 + +#define SAVE_SIMD_REGS \ + ldi $sp, -0x60($sp); \ + addl $sp, 0x1f, $23; \ + bic $23, 0x1f, $23; \ + vstd $f1, 0($23); \ + vstd $f2, 0x20($23) + +#define RESTORE_SIMD_REGS \ + addl $sp, 0x1f, $23; \ + bic $23, 0x1f, $23; \ + vldd $f1, 0($23); \ + vldd $f2, 0x20($23); \ + ldi $sp, 0x60($sp) + +#define SAVE_SIMD_U_REGS \ + ldi $sp, -0x120($sp); \ + addl $sp, 0x1f, $23; \ + bic $23, 0x1f, $23; \ + vstd $f1, 0($23); \ + vstd $f2, 0x20($23); \ + vstd $f4, 0x40($23); \ + vstd $f5, 0x60($23); \ + vstd $f10, 0x80($23); \ + vstd $f11, 0xa0($23); \ + vstd $f20, 0xc0($23); \ + vstd $f21, 0xe0($23) + +#define RESTORE_SIMD_U_REGS \ + addl $sp, 0x1f, $23; \ + bic $23, 0x1f, $23; \ + vldd $f1, 0($23); \ + vldd $f2, 0x20($23); \ + vldd $f4, 0x40($23); \ + vldd $f5, 0x60($23); \ + vldd $f10, 0x80($23); \ + vldd $f11, 0xa0($23); \ + vldd $f20, 0xc0($23); \ + vldd $f21, 0xe0($23); \ + ldi $sp, 0x120($sp) + + .set noat + .align 4 .globl memcpy .ent memcpy - memcpy: .frame $30, 0, $26, 0 .prologue 0 - subl $sp, 0xa0, $sp - ldi $4, 0x40($sp) - stl $4, 0($sp) - bic $4, 0x1f, $4 - vstd $f4, 0($4) - vstd $f5, 0x20($4) - mov $16, $0 - ble $18, $nomoredata - xor $16, $17, $1 - and $1, 7, $1 - - bne $1, $misaligned - + ble $18, $out and $16, 7, $1 - beq $1, $both_0mod8 + beq $1, $dest_aligned_8 -$head_align: - ldbu $1, 0($17) + .align 4 +$byte_loop_head: + ldbu $2, 0($17) subl $18, 1, $18 addl $17, 1, $17 - stb $1, 0($16) + stb $2, 0($16) addl $16, 1, $16 + ble $18, $out and $16, 7, $1 - ble $18, $nomoredata - bne $1, $head_align + bne $1, $byte_loop_head -$both_0mod8: - cmple $18, 127, $1 - bne $1, $no_unroll - and $16, 63, $1 - beq $1, $do_unroll - -$single_head_quad: - ldl $1, 0($17) +$dest_aligned_8: + and $17, 7, $4 + subl $18, 16, $18 + blt $18, $quad_end + subl $18, 64, $18 + blt $18, $simd_end + and $16, 31, $1 + beq $1, $dest_aligned_32 + bne $4, $quad_u_loop_head + + .align 5 +$quad_loop_head: + ldl $2, 0($17) subl $18, 8, $18 addl $17, 8, $17 - - stl $1, 0($16) + stl $2, 0($16) addl $16, 8, $16 - and $16, 63, $1 - bne $1, $single_head_quad - -$do_unroll: - addl $16, 64, $7 - cmple $18, 127, $1 - bne $1, $tail_quads - -#JJ - and $17, 31, $1 - bne $1, $unroll_body - -$unroll_body_simd: - ldwe $f31,128*5($17) - vldd $f4, 0($17) - vldd $f5, 32($17) - vstd_nc $f4, 0($16) - vstd_nc $f5, 32($16) + and $16, 31, $1 + blt $18, $simd_end + beq $16, $dest_aligned_32 + br $31, $quad_loop_head + +$dest_aligned_32: + and $17, 31, $5 + bne $5, $prep_simd_u_loop + +$prep_simd_loop: + SAVE_SIMD_REGS + ldi $1, NC_STORE_THRESHOLD($31) + cmple $18, $1, $1 + bne $1, $simd_loop + + .align 5 +$simd_loop_nc: + fillcs 128 * 5($17) + vldd $f1, 0($17) + vldd $f2, 32($17) + subl $18, 64, $18 + addl $17, 64, $17 + vstd_nc $f1, 0($16) + vstd_nc $f2, 32($16) addl $16, 64, $16 + bge $18, $simd_loop_nc + memb # required for _nc store instructions + br $31, $simd_loop_end + + .align 5 +$simd_loop: + fillcs 128 * 5($17) + vldd $f1, 0($17) + vldd $f2, 32($17) subl $18, 64, $18 addl $17, 64, $17 - cmple $18, 63, $1 - beq $1, $unroll_body_simd - memb - br $no_unroll -#endJJ - -$unroll_body: - #wh64 ($7) - #e_fillcs 0($7) - - ldl $6, 0($17) - #e_fillcs 256($17) - - ldl $4, 8($17) - ldl $5, 16($17) - addl $7, 64, $7 - - ldl $3, 24($17) - addl $16, 64, $1 - + vstd $f1, 0($16) + vstd $f2, 32($16) + addl $16, 64, $16 + bge $18, $simd_loop + +$simd_loop_end: + addl $18, 64, $1 + cmplt $1, 32, $1 + bne $1, $no_more_simd + vldd $f1, 0($17) + subl $18, 32, $18 addl $17, 32, $17 - stl_nc $6, 0($16) - - stl_nc $4, 8($16) - stl_nc $5, 16($16) - subl $18, 192, $2 - - stl_nc $3, 24($16) + vstd $f1, 0($16) addl $16, 32, $16 - ldl $6, 0($17) - ldwe $f31, 4*128($17) - #e_fillcs 288($17) - ldl $4, 8($17) - #cmovlt $2, $1, $7 - sellt $2, $1, $7, $7 +$no_more_simd: + RESTORE_SIMD_REGS - ldl $5, 16($17) - ldl $3, 24($17) - addl $16, 32, $16 - subl $18, 64, $18 - - addl $17, 32, $17 - stl_nc $6, -32($16) - stl_nc $4, -24($16) - cmple $18, 63, $1 - - stl_nc $5, -16($16) - stl_nc $3, -8($16) - beq $1, $unroll_body +$simd_end: + addl $18, 64, $18 + blt $18, $quad_end + bne $4, $prep_quad_u_loop_tail - memb - -$tail_quads: -$no_unroll: .align 4 - subl $18, 8, $18 - blt $18, $less_than_8 - -$move_a_quad: - ldl $1, 0($17) +$quad_loop_tail: + ldl $2, 0($17) + ldl $3, 8($17) + subl $18, 16, $18 + addl $17, 16, $17 + stl $2, 0($16) + stl $3, 8($16) + addl $16, 16, $16 + bge $18, $quad_loop_tail + +$quad_end: + addl $18, 16, $18 + ble $18, $out + cmplt $18, 8, $1 + bne $1, $byte_loop_tail + bne $4, $move_one_quad_u + +$move_one_quad: + ldl $2, 0($17) subl $18, 8, $18 addl $17, 8, $17 - - stl $1, 0($16) + stl $2, 0($16) addl $16, 8, $16 - bge $18, $move_a_quad + ble $18, $out -$less_than_8: .align 4 - addl $18, 8, $18 - ble $18, $nomoredata - - -$tail_bytes: +$byte_loop_tail: + ldbu $2, 0($17) subl $18, 1, $18 - ldbu $1, 0($17) addl $17, 1, $17 - - stb $1, 0($16) + stb $2, 0($16) addl $16, 1, $16 - bgt $18, $tail_bytes - - ldi $4, 0x40($sp) - bic $4, 0x1f, $4 - vldd $f4, 0($4) - vldd $f5, 0x20($4) - ldl $4, 0($sp) - addl $sp, 0xa0, $sp + bgt $18, $byte_loop_tail +$out: ret $31, ($26), 1 -$misaligned: - mov $0, $4 - and $0, 7, $1 - beq $1, $dest_0mod8 - -$aligndest: - ble $18, $nomoredata - ldbu $1, 0($17) - subl $18, 1, $18 - addl $17, 1, $17 - stb $1, 0($4) - addl $4, 1, $4 - and $4, 7, $1 - bne $1, $aligndest - -$dest_0mod8: + .align 5 +$quad_u_loop_head: + ldl_u $2, 0($17) + ldl_u $3, 7($17) subl $18, 8, $18 - blt $18, $misalign_tail - ldl_u $3, 0($17) - -$mis_quad: - ldl_u $16, 8($17) - #extql $3, $17, $3 - fillde 256($17) - and $17, 7, $1 - sll $1, 3, $1 - srl $3, $1, $3 - - #extqh $16, $17, $1 - subl $1, 64, $1 - negl $1, $1 - sll $16, $1, $1 - - bis $3, $1, $1 + addl $17, 8, $17 + extll $2, $4, $2 + exthl $3, $4, $3 + bis $2, $3, $2 + stl $2, 0($16) + addl $16, 8, $16 + blt $18, $simd_end + beq $16, $dest_aligned_32 + br $31, $quad_u_loop_head + +$prep_simd_u_loop: + SAVE_SIMD_U_REGS + andnot $17, 31, $3 + ldi $2, 256($31) + sll $5, 3, $1 + subl $2, $1, $2 + sll $1, 29, $1 + sll $2, 29, $2 + ifmovd $1, $f1 + ifmovd $2, $f2 + vldd $f4, 0($3) + ldi $1, NC_STORE_THRESHOLD($31) + cmple $18, $1, $1 + bne $1, $simd_u_loop + + .align 5 +$simd_u_loop_nc: + vldd $f5, 32($3) + fillcs 128 * 5($3) + srlow $f4, $f1, $f10 + sllow $f5, $f2, $f11 + vlogfc $f10, $f11, $f31, $f10 + vldd $f4, 64($3) + srlow $f5, $f1, $f20 + sllow $f4, $f2, $f21 + vlogfc $f20, $f21, $f31, $f20 + vstd_nc $f10, 0($16) + vstd_nc $f20, 32($16) + subl $18, 64, $18 + addl $3, 64, $3 + addl $16, 64, $16 + bge $18, $simd_u_loop_nc + memb # required for _nc store instructions + br $31, $simd_u_loop_end + + .align 5 +$simd_u_loop: + vldd $f5, 32($3) + fillcs 128 * 5($3) + srlow $f4, $f1, $f10 + sllow $f5, $f2, $f11 + vlogfc $f10, $f11, $f31, $f10 + vldd $f4, 64($3) + srlow $f5, $f1, $f20 + sllow $f4, $f2, $f21 + vlogfc $f20, $f21, $f31, $f20 + vstd $f10, 0($16) + vstd $f20, 32($16) + subl $18, 64, $18 + addl $3, 64, $3 + addl $16, 64, $16 + bge $18, $simd_u_loop + +$simd_u_loop_end: + addl $18, 64, $1 + cmplt $1, 32, $1 + bne $1, $no_more_simd_u + vldd $f5, 32($3) + srlow $f4, $f1, $f10 + sllow $f5, $f2, $f11 + vlogfc $f10, $f11, $f31, $f10 + vstd $f10, 0($16) + subl $18, 32, $18 + addl $3, 32, $3 + addl $16, 32, $16 +$no_more_simd_u: + RESTORE_SIMD_U_REGS + bis $3, $5, $17 + br $31, $simd_end + +$prep_quad_u_loop_tail: + ldl_u $2, 0($17) + .align 5 +$quad_u_loop_tail: + ldl_u $3, 8($17) + extll $2, $4, $22 + exthl $3, $4, $23 + bis $22, $23, $22 + stl $22, 0($16) + ldl_u $2, 16($17) + extll $3, $4, $24 + exthl $2, $4, $25 + bis $24, $25, $24 + stl $24, 8($16) + subl $18, 16, $18 + addl $17, 16, $17 + addl $16, 16, $16 + bge $18, $quad_u_loop_tail + br $31, $quad_end + +$move_one_quad_u: + ldl_u $2, 0($17) + ldl_u $3, 8($17) subl $18, 8, $18 addl $17, 8, $17 - fillde 128($4) - stl $1, 0($4) - mov $16, $3 - - addl $4, 8, $4 - bge $18, $mis_quad - -$misalign_tail: - addl $18, 8, $18 - ble $18, $nomoredata - -$misalign_byte: - ldbu $1, 0($17) - subl $18, 1, $18 - addl $17, 1, $17 - - stb $1, 0($4) - addl $4, 1, $4 - bgt $18, $misalign_byte - - -$nomoredata: - ldi $4, 0x40($sp) - bic $4, 0x1f, $4 - vldd $f4, 0($4) - vldd $f5, 0x20($4) - ldl $4, 0($sp) - addl $sp, 0xa0, $sp - - ret $31, ($26), 1 + extll $2, $4, $22 + exthl $3, $4, $23 + bis $22, $23, $22 + stl $22, 0($16) + addl $16, 8, $16 + ble $18, $out + br $31, $byte_loop_tail .end memcpy - EXPORT_SYMBOL(memcpy) + EXPORT_SYMBOL(memcpy) __memcpy = memcpy .globl __memcpy diff --git a/arch/sw_64/lib/deep-memset.S b/arch/sw_64/lib/deep-memset.S index 4efba2062e119c58849b20af93f1394f5c944a3c..7fbd529c72a84f842f59284399f3089e644b4c79 100644 --- a/arch/sw_64/lib/deep-memset.S +++ b/arch/sw_64/lib/deep-memset.S @@ -27,6 +27,8 @@ #include +#define NC_STORE_THRESHOLD 2048 + .set noat .set noreorder .text @@ -35,6 +37,7 @@ .globl __memset .globl ___memset .globl __memsetw + .globl __constant_c_memset .ent ___memset ___memset: .frame $30, 0, $26, 0 @@ -56,6 +59,7 @@ __constant_c_memset: bne $5, $tail_loop /* loop until SRC is 8 bytes aligned */ + .align 5 $head_loop: and $16, 0x7, $1 beq $1, $mod8_aligned @@ -68,6 +72,7 @@ $head_loop: $mod8_aligned: /* set 8 bytes each time */ + .align 5 $mod8_loop: and $16, 0x1f, $1 beq $1, $mod32_aligned @@ -86,23 +91,40 @@ $mod32_aligned: ifmovd $17, $f10 vcpyf $f10, $f10 + ldi $1, NC_STORE_THRESHOLD($31) + cmple $18, $1, $1 + bne $1, $mod32_loop + /* set 64 bytes each time */ -$mod32_loop: + .align 5 +$mod32_loop_nc: subl $18, 64, $18 - blt $18, $mod32_tail + blt $18, $mod32_tail_memb vstd_nc $f10, 0($16) vstd_nc $f10, 32($16) addl $16, 64, $16 + br $31, $mod32_loop_nc + + .align 5 +$mod32_loop: + subl $18, 64, $18 + blt $18, $mod32_tail + vstd $f10, 0($16) + vstd $f10, 32($16) + addl $16, 64, $16 br $31, $mod32_loop +$mod32_tail_memb: + memb # required for _nc store instructions $mod32_tail: vldd $f10, 0($4) addl $sp, 64, $sp addl $18, 64, $18 + .align 5 $mod32_tail_loop: subl $18, 8, $18 blt $18, $tail - stl_nc $17, 0($16) + stl $17, 0($16) addl $16, 8, $16 br $31, $mod32_tail_loop @@ -110,6 +132,7 @@ $tail: addl $18, 8, $18 /* set one byte each time */ + .align 5 $tail_loop: beq $18, $out stb $17, 0($16) @@ -119,7 +142,6 @@ $tail_loop: /* done, return */ $out: - memb # required for _nc store instructions ret .end ___memset diff --git a/arch/sw_64/lib/fls.c b/arch/sw_64/lib/fls.c index e960b1c06782e483d91522e73d81c5ce8f405c67..aa4231f7e472dc1fdca58ea2d63631b64cc2fc4f 100644 --- a/arch/sw_64/lib/fls.c +++ b/arch/sw_64/lib/fls.c @@ -1,6 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 #include -#include /* This is fls(x)-1, except zero is held to zero. This allows most * efficient input into extbl, plus it allows easy handling of fls(0)=0. diff --git a/arch/sw_64/lib/iomap.c b/arch/sw_64/lib/iomap.c index 30d24923624d0968bc10467d797ed4548df30ef5..3a8d879ef070e27bc27a5d386ba5708480a3d063 100644 --- a/arch/sw_64/lib/iomap.c +++ b/arch/sw_64/lib/iomap.c @@ -3,11 +3,10 @@ * Sw_64 IO and memory functions. */ -#include -#include -#include #include + #include +#include /* * Here comes the sw64 implementation of the IOMAP interfaces. @@ -459,46 +458,9 @@ void _memset_c_io(volatile void __iomem *to, unsigned long c, long count) } EXPORT_SYMBOL(_memset_c_io); -/* - * A version of memcpy used by the vga console routines to move data around - * arbitrarily between screen and main memory. - */ - -void -scr_memcpyw(u16 *d, const u16 *s, unsigned int count) -{ - const u16 __iomem *ios = (const u16 __iomem *) s; - u16 __iomem *iod = (u16 __iomem *) d; - int s_isio = __is_ioaddr(s); - int d_isio = __is_ioaddr(d); - u16 tmp; - - if (s_isio) { - if (d_isio) { - /* - * FIXME: Should handle unaligned ops and - * operation widening. - */ - - count /= 2; - while (count--) { - tmp = __raw_readw(ios++); - __raw_writew(tmp, iod++); - } - } else - memcpy_fromio(d, ios, count); - } else { - if (d_isio) - memcpy_toio(iod, s, count); - else - memcpy(d, s, count); - } -} -EXPORT_SYMBOL(scr_memcpyw); - void __iomem *ioport_map(unsigned long port, unsigned int size) { - return ioportmap(port); + return sw64_platform->ioportmap(port); } EXPORT_SYMBOL(ioport_map); diff --git a/arch/sw_64/lib/udelay.c b/arch/sw_64/lib/udelay.c index 595887caa7b3cca41de7a32dd7fd0b7e98e7d23a..48356ab8872f89f6f3fb75189c4fa9760f14b1bc 100644 --- a/arch/sw_64/lib/udelay.c +++ b/arch/sw_64/lib/udelay.c @@ -6,11 +6,6 @@ */ #include -#include /* for udelay's use of smp_processor_id */ -#include -#include -#include -#include /* * Use only for very small delays (< 1 msec). diff --git a/arch/sw_64/math-emu/math.c b/arch/sw_64/math-emu/math.c index 3903b421b8f48273239da06c063eafff57f39956..9f281d82ad83cc76632901f9f2f96aaac7a17b47 100644 --- a/arch/sw_64/math-emu/math.c +++ b/arch/sw_64/math-emu/math.c @@ -8,16 +8,12 @@ * fire3 2008-12-27 Add SIMD floating emulation code for SW64 */ -#include -#include -#include -#include -#include - - #include +#include + #include "sfp-util.h" + #include #include #include diff --git a/arch/sw_64/mm/fault.c b/arch/sw_64/mm/fault.c index c68be4a40d23a039dc202545fd317259c054f135..d596fc50772da73d307540ec66d82481fb5d8a37 100644 --- a/arch/sw_64/mm/fault.c +++ b/arch/sw_64/mm/fault.c @@ -3,28 +3,11 @@ * Copyright (C) 1995 Linus Torvalds */ -#include -#include -#include -#include - -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include #include #include #include -#include + +#include __read_mostly bool segv_debug_enabled; @@ -48,8 +31,8 @@ static inline int notify_page_fault(struct pt_regs *regs, unsigned long mmcsr) } #endif -extern void die_if_kernel(char *, struct pt_regs *, long, unsigned long *); -extern void dik_show_regs(struct pt_regs *regs, unsigned long *r9_15); +extern void die_if_kernel(char *, struct pt_regs *, long); +extern void dik_show_regs(struct pt_regs *regs); void show_all_vma(void) { @@ -97,7 +80,7 @@ __load_new_mm_context(struct mm_struct *next_mm) pcb = ¤t_thread_info()->pcb; pcb->asn = mmc & HARDWARE_ASN_MASK; - pcb->ptbr = ((unsigned long) next_mm->pgd - PAGE_OFFSET) >> PAGE_SHIFT; + pcb->ptbr = virt_to_pfn(next_mm->pgd); __reload_thread(pcb); } @@ -124,10 +107,6 @@ __load_new_mm_context(struct mm_struct *next_mm) * modify them. */ -/* Macro for exception fixup code to access integer registers. */ -#define dpf_reg(r) \ - (((unsigned long *)regs)[(r) <= 8 ? (r) : (r) <= 15 ? (r)-16 : \ - (r) <= 18 ? (r)+10 : (r)-10]) unsigned long show_va_to_pa(struct mm_struct *mm, unsigned long addr) { pgd_t *pgd = NULL; @@ -143,7 +122,7 @@ unsigned long show_va_to_pa(struct mm_struct *mm, unsigned long addr) pr_debug("addr = %#lx, pgd = %#lx\n", addr, pgd_val(*pgd)); goto out; } - p4d = pgd_offset(pgd, addr); + p4d = p4d_offset(pgd, addr); if (p4d_none(*p4d)) { ret = 0; pr_debug("addr = %#lx, pgd = %#lx, p4d = %#lx\n", @@ -167,7 +146,7 @@ unsigned long show_va_to_pa(struct mm_struct *mm, unsigned long addr) } pte = pte_offset_map(pmd, addr); if (pte_present(*pte)) { - ret = ((unsigned long)__va(((pte_val(*pte) >> 32)) << PAGE_SHIFT)); + ret = (unsigned long)pfn_to_virt(pte_val(*pte) >> _PFN_SHIFT); pr_debug("addr = %#lx, pgd = %#lx, pud = %#lx, pmd = %#lx, pte = %#lx, ret = %#lx\n", addr, *(unsigned long *)pgd, *(unsigned long *)pud, *(unsigned long *)pmd, *(unsigned long *)pte, ret); @@ -311,7 +290,7 @@ do_page_fault(unsigned long address, unsigned long mmcsr, if (fixup != 0) { unsigned long newpc; - newpc = fixup_exception(dpf_reg, fixup, regs->pc); + newpc = fixup_exception(map_regs, fixup, regs->pc); regs->pc = newpc; return; } @@ -322,7 +301,7 @@ do_page_fault(unsigned long address, unsigned long mmcsr, */ pr_alert("Unable to handle kernel paging request at virtual address %016lx\n", address); - die_if_kernel("Oops", regs, cause, (unsigned long *)regs - 16); + die_if_kernel("Oops", regs, cause); do_exit(SIGKILL); /* @@ -353,7 +332,7 @@ do_page_fault(unsigned long address, unsigned long mmcsr, if (unlikely(segv_debug_enabled)) { pr_info("fault: want to send_segv: pid %d, cause = %#lx, mmcsr = %#lx, address = %#lx, pc %#lx\n", current->pid, cause, mmcsr, address, regs->pc); - dik_show_regs(regs, (unsigned long *)regs-16); + dik_show_regs(regs); show_all_vma(); } diff --git a/arch/sw_64/mm/hugetlbpage.c b/arch/sw_64/mm/hugetlbpage.c index 3c03709d441c3e27e5fd6fe0e3fdf0b44597c9d5..2a40225af4d810361068bc5a6d8dcbad9b846e6e 100644 --- a/arch/sw_64/mm/hugetlbpage.c +++ b/arch/sw_64/mm/hugetlbpage.c @@ -3,18 +3,13 @@ * SW64 Huge TLB Page Support for Kernel. */ -#include -#include #include #include #include -#include #include -#include + #include #include -#include -#include /* * pmd_huge() returns 1 if @pmd is hugetlb related entry, that is normal diff --git a/arch/sw_64/mm/init.c b/arch/sw_64/mm/init.c index d0e934356dd5000c92179f711f9fba560ded09f2..82f2414ef7f77f29cac58323185a12d279a6d87e 100644 --- a/arch/sw_64/mm/init.c +++ b/arch/sw_64/mm/init.c @@ -5,38 +5,14 @@ /* 2.3.x zone allocator, 1999 Andrea Arcangeli */ -#include -#include -#include -#include -#include -#include -#include -#include -#include #include #include -#include -#include -#include -#include #include -#include #include #include +#include -#include -#include -#include #include -#include -#include -#include -#include -#include -#include - -extern void die_if_kernel(char *, struct pt_regs *, long); struct mem_desc_t mem_desc; #ifndef CONFIG_NUMA @@ -58,6 +34,14 @@ static pud_t vmalloc_pud[1024] __attribute__((__aligned__(PAGE_SIZE))); static phys_addr_t mem_start; static phys_addr_t mem_size_limit; +unsigned long memory_block_size_bytes(void) +{ + if (is_in_guest()) + return MIN_MEMORY_BLOCK_SIZE_VM_MEMHP; + else + return MIN_MEMORY_BLOCK_SIZE; +} + static int __init setup_mem_size(char *p) { char *oldp; @@ -112,7 +96,7 @@ switch_to_system_map(void) * the last slot of the L1 page table. */ memset(swapper_pg_dir, 0, PAGE_SIZE); - newptbr = __pa(swapper_pg_dir) >> PAGE_SHIFT; + newptbr = virt_to_pfn(swapper_pg_dir); /* Also set up the real kernel PCB while we're at it. */ init_thread_info.pcb.ptbr = newptbr; @@ -193,15 +177,27 @@ void __init sw64_memblock_init(void) memblock_remove(1ULL << MAX_PHYSMEM_BITS, PHYS_ADDR_MAX); - /* Make sure kernel text is in memory range. */ - memblock_add(__pa_symbol(_text), (unsigned long)(_end - _text)); - memblock_reserve(__pa_symbol(_text), _end - _text); - max_pfn = max_low_pfn = PFN_DOWN(memblock_end_of_DRAM()); memblock_allow_resize(); memblock_initialized = true; process_memmap(); + + /* Make sure kernel text is in memory range. */ + memblock_add(__pa_symbol(_text), _end - _text); + memblock_reserve(__pa_symbol(_text), _end - _text); + + /* Make sure initrd is in memory range. */ + if (sunway_boot_params->initrd_start) { + phys_addr_t base = __pa(sunway_boot_params->initrd_start); + phys_addr_t size = sunway_boot_params->initrd_size; + + memblock_add(base, size); + memblock_reserve(base, size); + } + + /* end of DRAM range may have been changed */ + max_pfn = max_low_pfn = PFN_DOWN(memblock_end_of_DRAM()); } #ifndef CONFIG_NUMA @@ -257,18 +253,6 @@ void vmemmap_free(unsigned long start, unsigned long end, } #endif -#ifdef CONFIG_DISCONTIGMEM -int pfn_valid(unsigned long pfn) -{ - phys_addr_t addr = pfn << PAGE_SHIFT; - - if ((addr >> PAGE_SHIFT) != pfn) - return 0; - return memblock_is_map_memory(addr); -} -EXPORT_SYMBOL(pfn_valid); -#endif - #ifdef CONFIG_HAVE_MEMBLOCK #ifndef MIN_MEMBLOCK_ADDR #define MIN_MEMBLOCK_ADDR __pa(PAGE_OFFSET) @@ -323,14 +307,13 @@ void __init early_init_dt_add_memory_arch(u64 base, u64 size) #endif #ifdef CONFIG_MEMORY_HOTPLUG -int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap, - bool want_memblock) +int arch_add_memory(int nid, u64 start, u64 size, struct mhp_params *params) { unsigned long start_pfn = start >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT; int ret; - ret = __add_pages(nid, start_pfn, nr_pages, altmap, want_memblock); + ret = __add_pages(nid, start_pfn, nr_pages, params); if (ret) printk("%s: Problem encountered in __add_pages() as ret=%d\n", __func__, ret); diff --git a/arch/sw_64/mm/numa.c b/arch/sw_64/mm/numa.c index 97288d91d7bb22fd2f5240e31b783a2301a4b1b3..7cb13587e465d821f8fb56a6ea8e782091921d11 100644 --- a/arch/sw_64/mm/numa.c +++ b/arch/sw_64/mm/numa.c @@ -3,27 +3,11 @@ * DISCONTIGMEM NUMA sw64 support. */ -#include -#include -#include #include -#include -#include -#include -#include #include -#include -#ifdef CONFIG_PCI -#include -#endif #include #include -#include -#include -#include -#include -#include #include int cpu_to_node_map[NR_CPUS]; @@ -417,24 +401,32 @@ void numa_store_cpu_info(unsigned int cpu) set_cpu_numa_node(cpu, cpu_to_node_map[cpu]); } +#ifdef CONFIG_DEBUG_PER_CPU_MAPS /* * Returns a pointer to the bitmask of CPUs on Node 'node'. */ const struct cpumask *cpumask_of_node(int node) { - if (node == NUMA_NO_NODE) + if (node == NUMA_NO_NODE) { + pr_warn("%s: NUMA_NO_NODE\n", __func__); return cpu_all_mask; + } - if (WARN_ON(node < 0 || node >= nr_node_ids)) + if (WARN_ON(node < 0 || node >= nr_node_ids)) { + pr_warn("%s: invalid node %d\n", __func__, node); return cpu_none_mask; + } - if (WARN_ON(node_to_cpumask_map[node] == NULL)) + if (WARN_ON(node_to_cpumask_map[node] == NULL)) { + pr_warn("%s: uninitialized node %d\n", __func__, node); return cpu_online_mask; + } return node_to_cpumask_map[node]; } EXPORT_SYMBOL(cpumask_of_node); +#endif static void numa_update_cpu(unsigned int cpu, bool remove) { diff --git a/arch/sw_64/mm/physaddr.c b/arch/sw_64/mm/physaddr.c index d5cf83e671ae3cb2ae87919e9edf8ae0c3e4cc31..26769f0bf7bf976a5cee620a4aa77f3f246b15b4 100644 --- a/arch/sw_64/mm/physaddr.c +++ b/arch/sw_64/mm/physaddr.c @@ -1,39 +1,34 @@ // SPDX-License-Identifier: GPL-2.0 #include #include -#include #include unsigned long __phys_addr(unsigned long x) { - unsigned long y = x; - - if (y >= __START_KERNEL_map) { - y -= __START_KERNEL_map; - VIRTUAL_BUG_ON(y >= KERNEL_IMAGE_SIZE); + if (x >= __START_KERNEL_map) { + x -= __START_KERNEL_map; + VIRTUAL_BUG_ON(x >= KERNEL_IMAGE_SIZE); } else { - VIRTUAL_BUG_ON(y < PAGE_OFFSET); - y -= PAGE_OFFSET; - VIRTUAL_BUG_ON(!phys_addr_valid(y)); + VIRTUAL_BUG_ON(x < PAGE_OFFSET); + x -= PAGE_OFFSET; + VIRTUAL_BUG_ON(!phys_addr_valid(x)); } - return y; + return x; } EXPORT_SYMBOL(__phys_addr); bool __virt_addr_valid(unsigned long x) { - unsigned long y = x; - - if (y >= __START_KERNEL_map) { - y -= __START_KERNEL_map; - if (y >= KERNEL_IMAGE_SIZE) + if (x >= __START_KERNEL_map) { + x -= __START_KERNEL_map; + if (x >= KERNEL_IMAGE_SIZE) return false; } else { - if (y < PAGE_OFFSET) + if (x < PAGE_OFFSET) return false; - y -= PAGE_OFFSET; + x -= PAGE_OFFSET; } - return pfn_valid(y >> PAGE_SHIFT); + return pfn_valid(x >> PAGE_SHIFT); } EXPORT_SYMBOL(__virt_addr_valid); diff --git a/arch/sw_64/mm/thp.c b/arch/sw_64/mm/thp.c index 68260dd0e837926664ca9918ddb5ffb3a4f430f4..833bb59f79d0e9f01fb9813eec8fc5cb24df9da3 100644 --- a/arch/sw_64/mm/thp.c +++ b/arch/sw_64/mm/thp.c @@ -1,13 +1,4 @@ // SPDX-License-Identifier: GPL-2.0 -#include -#include -#include -#include -#include -#include -#include -#include -#include #include #include diff --git a/arch/sw_64/platform/platform_xuelang.c b/arch/sw_64/platform/platform_xuelang.c index f0e33c664b0e0db2c6f8297c4abfb69b49b6ec60..ae8179b53b4c2f8bba2cf4ca393e9c4e243170c0 100644 --- a/arch/sw_64/platform/platform_xuelang.c +++ b/arch/sw_64/platform/platform_xuelang.c @@ -26,9 +26,25 @@ extern void cpld_write(uint8_t slave_addr, uint8_t reg, uint8_t data); static void xuelang_kill_arch(int mode) { + struct pci_dev *pdev; + struct pci_controller *hose; + int val; + if (is_in_host()) { switch (mode) { case LINUX_REBOOT_CMD_RESTART: + pdev = pci_get_device(PCI_VENDOR_ID_JMICRON, + 0x0585, NULL); + if (pdev) { + hose = (struct pci_controller *)pdev->sysdata; + val = read_rc_conf(hose->node, hose->index, + RC_PORT_LINK_CTL); + write_rc_conf(hose->node, hose->index, + RC_PORT_LINK_CTL, val | 0x8); + write_rc_conf(hose->node, hose->index, + RC_PORT_LINK_CTL, val); + } + cpld_write(0x64, 0x00, 0xc3); mb(); break; @@ -54,7 +70,7 @@ static inline void __iomem *xuelang_ioportmap(unsigned long addr) addr = addr | io_offset; } - return (void __iomem *)(addr | PAGE_OFFSET); + return __va(addr); } struct sw64_platform_ops xuelang_ops = { diff --git a/arch/um/drivers/mconsole_kern.c b/arch/um/drivers/mconsole_kern.c index 6a22ead31c5b8159537e12abd2ce1f263a56d5ee..66d5452c2bcb6f2405efc3ebbcd0ce4d640f53b7 100644 --- a/arch/um/drivers/mconsole_kern.c +++ b/arch/um/drivers/mconsole_kern.c @@ -223,7 +223,7 @@ void mconsole_go(struct mc_request *req) void mconsole_stop(struct mc_request *req) { - deactivate_fd(req->originating_fd, MCONSOLE_IRQ); + block_signals(); os_set_fd_block(req->originating_fd, 1); mconsole_reply(req, "stopped", 0, 0); for (;;) { @@ -246,6 +246,7 @@ void mconsole_stop(struct mc_request *req) } os_set_fd_block(req->originating_fd, 0); mconsole_reply(req, "", 0, 0); + unblock_signals(); } static DEFINE_SPINLOCK(mc_devices_lock); diff --git a/arch/um/drivers/virtio_uml.c b/arch/um/drivers/virtio_uml.c index d11b3d41c378552b8890c305ad1437096bca838f..d5d768188b3ba3f19edafc091ee1fe8bc672c849 100644 --- a/arch/um/drivers/virtio_uml.c +++ b/arch/um/drivers/virtio_uml.c @@ -1076,6 +1076,8 @@ static void virtio_uml_release_dev(struct device *d) container_of(d, struct virtio_device, dev); struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev); + time_travel_propagate_time(); + /* might not have been opened due to not negotiating the feature */ if (vu_dev->req_fd >= 0) { um_free_irq(VIRTIO_IRQ, vu_dev); @@ -1109,6 +1111,8 @@ static int virtio_uml_probe(struct platform_device *pdev) vu_dev->pdev = pdev; vu_dev->req_fd = -1; + time_travel_propagate_time(); + do { rc = os_connect_socket(pdata->socket_path); } while (rc == -EINTR); diff --git a/arch/um/include/asm/delay.h b/arch/um/include/asm/delay.h index 56fc2b8f2dd019ec8b75065f2a7f2c791c0c2d7a..e79b2ab6f40c81d6afd54391dbdfabaed82cdaaf 100644 --- a/arch/um/include/asm/delay.h +++ b/arch/um/include/asm/delay.h @@ -14,7 +14,7 @@ static inline void um_ndelay(unsigned long nsecs) ndelay(nsecs); } #undef ndelay -#define ndelay um_ndelay +#define ndelay(n) um_ndelay(n) static inline void um_udelay(unsigned long usecs) { @@ -26,5 +26,5 @@ static inline void um_udelay(unsigned long usecs) udelay(usecs); } #undef udelay -#define udelay um_udelay +#define udelay(n) um_udelay(n) #endif /* __UM_DELAY_H */ diff --git a/arch/um/include/shared/registers.h b/arch/um/include/shared/registers.h index 0c50fa6e8a55b7330f43027c4dea3170e5173d72..fbb709a222839132cec2a2bbf16c4467d88450e6 100644 --- a/arch/um/include/shared/registers.h +++ b/arch/um/include/shared/registers.h @@ -16,8 +16,8 @@ extern int restore_fp_registers(int pid, unsigned long *fp_regs); extern int save_fpx_registers(int pid, unsigned long *fp_regs); extern int restore_fpx_registers(int pid, unsigned long *fp_regs); extern int save_registers(int pid, struct uml_pt_regs *regs); -extern int restore_registers(int pid, struct uml_pt_regs *regs); -extern int init_registers(int pid); +extern int restore_pid_registers(int pid, struct uml_pt_regs *regs); +extern int init_pid_registers(int pid); extern void get_safe_registers(unsigned long *regs, unsigned long *fp_regs); extern unsigned long get_thread_reg(int reg, jmp_buf *buf); extern int get_fp_registers(int pid, unsigned long *regs); diff --git a/arch/um/os-Linux/registers.c b/arch/um/os-Linux/registers.c index 2d9270508e1565620e418f0134bf7573474351eb..b123955be7accf01958684dbbd2fea0ef8130202 100644 --- a/arch/um/os-Linux/registers.c +++ b/arch/um/os-Linux/registers.c @@ -21,7 +21,7 @@ int save_registers(int pid, struct uml_pt_regs *regs) return 0; } -int restore_registers(int pid, struct uml_pt_regs *regs) +int restore_pid_registers(int pid, struct uml_pt_regs *regs) { int err; @@ -36,7 +36,7 @@ int restore_registers(int pid, struct uml_pt_regs *regs) static unsigned long exec_regs[MAX_REG_NR]; static unsigned long exec_fp_regs[FP_SIZE]; -int init_registers(int pid) +int init_pid_registers(int pid) { int err; diff --git a/arch/um/os-Linux/start_up.c b/arch/um/os-Linux/start_up.c index f79dc338279e65e09653e23c1ac7169bf26076ff..b28373a2b8d2d035c7f96d374c50f9b479250459 100644 --- a/arch/um/os-Linux/start_up.c +++ b/arch/um/os-Linux/start_up.c @@ -336,7 +336,7 @@ void __init os_early_checks(void) check_tmpexec(); pid = start_ptraced_child(); - if (init_registers(pid)) + if (init_pid_registers(pid)) fatal("Failed to initialize default registers"); stop_ptraced_child(pid, 1, 1); } diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 040fb77366dd7ed693753524b3cf616dfe56e23a..d17396ef4323d97d584c720eaa0333319065ae92 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -1164,10 +1164,6 @@ config X86_MCE_INJECT If you don't know what a machine check is and you don't do kernel QA it is safe to say n. -config X86_THERMAL_VECTOR - def_bool y - depends on X86_MCE_INTEL - source "arch/x86/events/Kconfig" config X86_LEGACY_VM86 @@ -1967,6 +1963,7 @@ config X86_SGX select SRCU select MMU_NOTIFIER select NUMA_KEEP_MEMINFO if NUMA + select XARRAY_MULTI help Intel(R) Software Guard eXtensions (SGX) is a set of CPU instructions that can be used by applications to set aside private regions of code @@ -2908,6 +2905,11 @@ config IA32_AOUT config X86_X32 bool "x32 ABI for 64-bit mode" depends on X86_64 + # llvm-objcopy does not convert x86_64 .note.gnu.property or + # compressed debug sections to x86_x32 properly: + # https://github.com/ClangBuiltLinux/linux/issues/514 + # https://github.com/ClangBuiltLinux/linux/issues/1141 + depends on $(success,$(OBJCOPY) --version | head -n1 | grep -qv llvm) help Include code to run binaries for the x32 native 32-bit ABI for 64-bit processors. An x32 process gets access to the diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile index 6004047d25fdda7135884cb5f85363caab72d8b4..bf91e0a36d77fa1fb3882e247633873ed18a6bc4 100644 --- a/arch/x86/boot/compressed/Makefile +++ b/arch/x86/boot/compressed/Makefile @@ -28,7 +28,11 @@ KCOV_INSTRUMENT := n targets := vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma \ vmlinux.bin.xz vmlinux.bin.lzo vmlinux.bin.lz4 vmlinux.bin.zst -KBUILD_CFLAGS := -m$(BITS) -O2 +# CLANG_FLAGS must come before any cc-disable-warning or cc-option calls in +# case of cross compiling, as it has the '--target=' flag, which is needed to +# avoid errors with '-march=i386', and future flags may depend on the target to +# be valid. +KBUILD_CFLAGS := -m$(BITS) -O2 $(CLANG_FLAGS) KBUILD_CFLAGS += -fno-strict-aliasing -fPIE KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING cflags-$(CONFIG_X86_32) := -march=i386 @@ -46,7 +50,6 @@ KBUILD_CFLAGS += -D__DISABLE_EXPORTS # Disable relocation relaxation in case the link is not PIE. KBUILD_CFLAGS += $(call as-option,-Wa$(comma)-mrelax-relocations=no) KBUILD_CFLAGS += -include $(srctree)/include/linux/hidden.h -KBUILD_CFLAGS += $(CLANG_FLAGS) # sev-es.c indirectly inludes inat-table.h which is generated during # compilation and stored in $(objtree). Add the directory to the includes so diff --git a/arch/x86/configs/i386_defconfig b/arch/x86/configs/i386_defconfig index ce8a6a514afd8b5bc46aef15f4942896c1f889a6..f14356cdc76f5ac7a45f85d91ba79893fec31fda 100644 --- a/arch/x86/configs/i386_defconfig +++ b/arch/x86/configs/i386_defconfig @@ -265,3 +265,4 @@ CONFIG_PROVIDE_OHCI1394_DMA_INIT=y CONFIG_EARLY_PRINTK_DBGP=y CONFIG_DEBUG_BOOT_PARAMS=y CONFIG_OPTIMIZE_INLINING=y +CONFIG_KALLSYMS_ALL=y diff --git a/arch/x86/configs/openeuler_defconfig b/arch/x86/configs/openeuler_defconfig index 64afe7021c45692da862b0664b63f6ed7beef9d6..3eac70518e6f43e638575840a9a30fd1ecba0cf1 100644 --- a/arch/x86/configs/openeuler_defconfig +++ b/arch/x86/configs/openeuler_defconfig @@ -152,11 +152,13 @@ CONFIG_PAGE_COUNTER=y CONFIG_MEMCG=y CONFIG_MEMCG_SWAP=y CONFIG_MEMCG_KMEM=y +CONFIG_MEMCG_MEMFS_INFO=y CONFIG_BLK_CGROUP=y CONFIG_CGROUP_WRITEBACK=y CONFIG_CGROUP_SCHED=y CONFIG_QOS_SCHED=y CONFIG_FAIR_GROUP_SCHED=y +CONFIG_QOS_SCHED_SMT_EXPELLER=y CONFIG_CFS_BANDWIDTH=y CONFIG_RT_GROUP_SCHED=y CONFIG_CGROUP_PIDS=y @@ -2367,7 +2369,6 @@ CONFIG_SCSI_QLA_FC=m CONFIG_SCSI_QLA_ISCSI=m CONFIG_QEDI=m CONFIG_QEDF=m -CONFIG_SPFC=m CONFIG_SCSI_HUAWEI_FC=m CONFIG_SCSI_FC_HIFC=m CONFIG_SCSI_LPFC=m @@ -2793,8 +2794,6 @@ CONFIG_QED_ISCSI=y CONFIG_QED_FCOE=y CONFIG_QED_OOO=y # CONFIG_NET_VENDOR_QUALCOMM is not set -CONFIG_NET_VENDOR_RAMAXEL=y -CONFIG_SPNIC=m # CONFIG_NET_VENDOR_RDC is not set CONFIG_NET_VENDOR_REALTEK=y # CONFIG_ATP is not set @@ -4054,6 +4053,7 @@ CONFIG_SENSORS_TMP421=m # CONFIG_SENSORS_TMP513 is not set CONFIG_SENSORS_VIA_CPUTEMP=m CONFIG_SENSORS_VIA686A=m +CONFIG_SENSORS_ZHAOXIN_CPUTEMP=m CONFIG_SENSORS_VT1211=m CONFIG_SENSORS_VT8231=m # CONFIG_SENSORS_W83773G is not set @@ -6371,7 +6371,7 @@ CONFIG_HSU_DMA=y # DMA Clients # CONFIG_ASYNC_TX_DMA=y -CONFIG_DMATEST=y +CONFIG_DMATEST=m CONFIG_DMA_ENGINE_RAID=y # diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig index 8ee371591a9927951baf12c79f09ecfa91ef6397..ea69f5cc01a6d038a801bd78cc23b6c7f1b31a49 100644 --- a/arch/x86/configs/x86_64_defconfig +++ b/arch/x86/configs/x86_64_defconfig @@ -261,3 +261,4 @@ CONFIG_PROVIDE_OHCI1394_DMA_INIT=y CONFIG_EARLY_PRINTK_DBGP=y CONFIG_DEBUG_BOOT_PARAMS=y CONFIG_OPTIMIZE_INLINING=y +CONFIG_KALLSYMS_ALL=y diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile index 697b8ccfb763e566c386b8d98000ac3616eb24f6..f03ded83dfa246a295f016baa0deb142040c528e 100644 --- a/arch/x86/crypto/Makefile +++ b/arch/x86/crypto/Makefile @@ -92,6 +92,9 @@ nhpoly1305-avx2-y := nh-avx2-x86_64.o nhpoly1305-avx2-glue.o obj-$(CONFIG_CRYPTO_CURVE25519_X86) += curve25519-x86_64.o +obj-$(CONFIG_CRYPTO_SM3_AVX_X86_64) += sm3-avx-x86_64.o +sm3-avx-x86_64-y := sm3-avx-asm_64.o sm3_avx_glue.o + obj-$(CONFIG_CRYPTO_SM4_AESNI_AVX_X86_64) += sm4-aesni-avx-x86_64.o sm4-aesni-avx-x86_64-y := sm4-aesni-avx-asm_64.o sm4_aesni_avx_glue.o diff --git a/arch/x86/crypto/sm3-avx-asm_64.S b/arch/x86/crypto/sm3-avx-asm_64.S new file mode 100644 index 0000000000000000000000000000000000000000..71e6aae23e17c9637eae112f875e4a7c0643ff0a --- /dev/null +++ b/arch/x86/crypto/sm3-avx-asm_64.S @@ -0,0 +1,517 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * SM3 AVX accelerated transform. + * specified in: https://datatracker.ietf.org/doc/html/draft-sca-cfrg-sm3-02 + * + * Copyright (C) 2021 Jussi Kivilinna + * Copyright (C) 2021 Tianjia Zhang + */ + +/* Based on SM3 AES/BMI2 accelerated work by libgcrypt at: + * https://gnupg.org/software/libgcrypt/index.html + */ + +#include +#include + +/* Context structure */ + +#define state_h0 0 +#define state_h1 4 +#define state_h2 8 +#define state_h3 12 +#define state_h4 16 +#define state_h5 20 +#define state_h6 24 +#define state_h7 28 + +/* Constants */ + +/* Round constant macros */ + +#define K0 2043430169 /* 0x79cc4519 */ +#define K1 -208106958 /* 0xf3988a32 */ +#define K2 -416213915 /* 0xe7311465 */ +#define K3 -832427829 /* 0xce6228cb */ +#define K4 -1664855657 /* 0x9cc45197 */ +#define K5 965255983 /* 0x3988a32f */ +#define K6 1930511966 /* 0x7311465e */ +#define K7 -433943364 /* 0xe6228cbc */ +#define K8 -867886727 /* 0xcc451979 */ +#define K9 -1735773453 /* 0x988a32f3 */ +#define K10 823420391 /* 0x311465e7 */ +#define K11 1646840782 /* 0x6228cbce */ +#define K12 -1001285732 /* 0xc451979c */ +#define K13 -2002571463 /* 0x88a32f39 */ +#define K14 289824371 /* 0x11465e73 */ +#define K15 579648742 /* 0x228cbce6 */ +#define K16 -1651869049 /* 0x9d8a7a87 */ +#define K17 991229199 /* 0x3b14f50f */ +#define K18 1982458398 /* 0x7629ea1e */ +#define K19 -330050500 /* 0xec53d43c */ +#define K20 -660100999 /* 0xd8a7a879 */ +#define K21 -1320201997 /* 0xb14f50f3 */ +#define K22 1654563303 /* 0x629ea1e7 */ +#define K23 -985840690 /* 0xc53d43ce */ +#define K24 -1971681379 /* 0x8a7a879d */ +#define K25 351604539 /* 0x14f50f3b */ +#define K26 703209078 /* 0x29ea1e76 */ +#define K27 1406418156 /* 0x53d43cec */ +#define K28 -1482130984 /* 0xa7a879d8 */ +#define K29 1330705329 /* 0x4f50f3b1 */ +#define K30 -1633556638 /* 0x9ea1e762 */ +#define K31 1027854021 /* 0x3d43cec5 */ +#define K32 2055708042 /* 0x7a879d8a */ +#define K33 -183551212 /* 0xf50f3b14 */ +#define K34 -367102423 /* 0xea1e7629 */ +#define K35 -734204845 /* 0xd43cec53 */ +#define K36 -1468409689 /* 0xa879d8a7 */ +#define K37 1358147919 /* 0x50f3b14f */ +#define K38 -1578671458 /* 0xa1e7629e */ +#define K39 1137624381 /* 0x43cec53d */ +#define K40 -2019718534 /* 0x879d8a7a */ +#define K41 255530229 /* 0x0f3b14f5 */ +#define K42 511060458 /* 0x1e7629ea */ +#define K43 1022120916 /* 0x3cec53d4 */ +#define K44 2044241832 /* 0x79d8a7a8 */ +#define K45 -206483632 /* 0xf3b14f50 */ +#define K46 -412967263 /* 0xe7629ea1 */ +#define K47 -825934525 /* 0xcec53d43 */ +#define K48 -1651869049 /* 0x9d8a7a87 */ +#define K49 991229199 /* 0x3b14f50f */ +#define K50 1982458398 /* 0x7629ea1e */ +#define K51 -330050500 /* 0xec53d43c */ +#define K52 -660100999 /* 0xd8a7a879 */ +#define K53 -1320201997 /* 0xb14f50f3 */ +#define K54 1654563303 /* 0x629ea1e7 */ +#define K55 -985840690 /* 0xc53d43ce */ +#define K56 -1971681379 /* 0x8a7a879d */ +#define K57 351604539 /* 0x14f50f3b */ +#define K58 703209078 /* 0x29ea1e76 */ +#define K59 1406418156 /* 0x53d43cec */ +#define K60 -1482130984 /* 0xa7a879d8 */ +#define K61 1330705329 /* 0x4f50f3b1 */ +#define K62 -1633556638 /* 0x9ea1e762 */ +#define K63 1027854021 /* 0x3d43cec5 */ + +/* Register macros */ + +#define RSTATE %rdi +#define RDATA %rsi +#define RNBLKS %rdx + +#define t0 %eax +#define t1 %ebx +#define t2 %ecx + +#define a %r8d +#define b %r9d +#define c %r10d +#define d %r11d +#define e %r12d +#define f %r13d +#define g %r14d +#define h %r15d + +#define W0 %xmm0 +#define W1 %xmm1 +#define W2 %xmm2 +#define W3 %xmm3 +#define W4 %xmm4 +#define W5 %xmm5 + +#define XTMP0 %xmm6 +#define XTMP1 %xmm7 +#define XTMP2 %xmm8 +#define XTMP3 %xmm9 +#define XTMP4 %xmm10 +#define XTMP5 %xmm11 +#define XTMP6 %xmm12 + +#define BSWAP_REG %xmm15 + +/* Stack structure */ + +#define STACK_W_SIZE (32 * 2 * 3) +#define STACK_REG_SAVE_SIZE (64) + +#define STACK_W (0) +#define STACK_REG_SAVE (STACK_W + STACK_W_SIZE) +#define STACK_SIZE (STACK_REG_SAVE + STACK_REG_SAVE_SIZE) + +/* Instruction helpers. */ + +#define roll2(v, reg) \ + roll $(v), reg; + +#define roll3mov(v, src, dst) \ + movl src, dst; \ + roll $(v), dst; + +#define roll3(v, src, dst) \ + rorxl $(32-(v)), src, dst; + +#define addl2(a, out) \ + leal (a, out), out; + +/* Round function macros. */ + +#define GG1(x, y, z, o, t) \ + movl x, o; \ + xorl y, o; \ + xorl z, o; + +#define FF1(x, y, z, o, t) GG1(x, y, z, o, t) + +#define GG2(x, y, z, o, t) \ + andnl z, x, o; \ + movl y, t; \ + andl x, t; \ + addl2(t, o); + +#define FF2(x, y, z, o, t) \ + movl y, o; \ + xorl x, o; \ + movl y, t; \ + andl x, t; \ + andl z, o; \ + xorl t, o; + +#define R(i, a, b, c, d, e, f, g, h, round, widx, wtype) \ + /* rol(a, 12) => t0 */ \ + roll3mov(12, a, t0); /* rorxl here would reduce perf by 6% on zen3 */ \ + /* rol (t0 + e + t), 7) => t1 */ \ + leal K##round(t0, e, 1), t1; \ + roll2(7, t1); \ + /* h + w1 => h */ \ + addl wtype##_W1_ADDR(round, widx), h; \ + /* h + t1 => h */ \ + addl2(t1, h); \ + /* t1 ^ t0 => t0 */ \ + xorl t1, t0; \ + /* w1w2 + d => d */ \ + addl wtype##_W1W2_ADDR(round, widx), d; \ + /* FF##i(a,b,c) => t1 */ \ + FF##i(a, b, c, t1, t2); \ + /* d + t1 => d */ \ + addl2(t1, d); \ + /* GG#i(e,f,g) => t2 */ \ + GG##i(e, f, g, t2, t1); \ + /* h + t2 => h */ \ + addl2(t2, h); \ + /* rol (f, 19) => f */ \ + roll2(19, f); \ + /* d + t0 => d */ \ + addl2(t0, d); \ + /* rol (b, 9) => b */ \ + roll2(9, b); \ + /* P0(h) => h */ \ + roll3(9, h, t2); \ + roll3(17, h, t1); \ + xorl t2, h; \ + xorl t1, h; + +#define R1(a, b, c, d, e, f, g, h, round, widx, wtype) \ + R(1, a, b, c, d, e, f, g, h, round, widx, wtype) + +#define R2(a, b, c, d, e, f, g, h, round, widx, wtype) \ + R(2, a, b, c, d, e, f, g, h, round, widx, wtype) + +/* Input expansion macros. */ + +/* Byte-swapped input address. */ +#define IW_W_ADDR(round, widx, offs) \ + (STACK_W + ((round) / 4) * 64 + (offs) + ((widx) * 4))(%rsp) + +/* Expanded input address. */ +#define XW_W_ADDR(round, widx, offs) \ + (STACK_W + ((((round) / 3) - 4) % 2) * 64 + (offs) + ((widx) * 4))(%rsp) + +/* Rounds 1-12, byte-swapped input block addresses. */ +#define IW_W1_ADDR(round, widx) IW_W_ADDR(round, widx, 0) +#define IW_W1W2_ADDR(round, widx) IW_W_ADDR(round, widx, 32) + +/* Rounds 1-12, expanded input block addresses. */ +#define XW_W1_ADDR(round, widx) XW_W_ADDR(round, widx, 0) +#define XW_W1W2_ADDR(round, widx) XW_W_ADDR(round, widx, 32) + +/* Input block loading. */ +#define LOAD_W_XMM_1() \ + vmovdqu 0*16(RDATA), XTMP0; /* XTMP0: w3, w2, w1, w0 */ \ + vmovdqu 1*16(RDATA), XTMP1; /* XTMP1: w7, w6, w5, w4 */ \ + vmovdqu 2*16(RDATA), XTMP2; /* XTMP2: w11, w10, w9, w8 */ \ + vmovdqu 3*16(RDATA), XTMP3; /* XTMP3: w15, w14, w13, w12 */ \ + vpshufb BSWAP_REG, XTMP0, XTMP0; \ + vpshufb BSWAP_REG, XTMP1, XTMP1; \ + vpshufb BSWAP_REG, XTMP2, XTMP2; \ + vpshufb BSWAP_REG, XTMP3, XTMP3; \ + vpxor XTMP0, XTMP1, XTMP4; \ + vpxor XTMP1, XTMP2, XTMP5; \ + vpxor XTMP2, XTMP3, XTMP6; \ + leaq 64(RDATA), RDATA; \ + vmovdqa XTMP0, IW_W1_ADDR(0, 0); \ + vmovdqa XTMP4, IW_W1W2_ADDR(0, 0); \ + vmovdqa XTMP1, IW_W1_ADDR(4, 0); \ + vmovdqa XTMP5, IW_W1W2_ADDR(4, 0); + +#define LOAD_W_XMM_2() \ + vmovdqa XTMP2, IW_W1_ADDR(8, 0); \ + vmovdqa XTMP6, IW_W1W2_ADDR(8, 0); + +#define LOAD_W_XMM_3() \ + vpshufd $0b00000000, XTMP0, W0; /* W0: xx, w0, xx, xx */ \ + vpshufd $0b11111001, XTMP0, W1; /* W1: xx, w3, w2, w1 */ \ + vmovdqa XTMP1, W2; /* W2: xx, w6, w5, w4 */ \ + vpalignr $12, XTMP1, XTMP2, W3; /* W3: xx, w9, w8, w7 */ \ + vpalignr $8, XTMP2, XTMP3, W4; /* W4: xx, w12, w11, w10 */ \ + vpshufd $0b11111001, XTMP3, W5; /* W5: xx, w15, w14, w13 */ + +/* Message scheduling. Note: 3 words per XMM register. */ +#define SCHED_W_0(round, w0, w1, w2, w3, w4, w5) \ + /* Load (w[i - 16]) => XTMP0 */ \ + vpshufd $0b10111111, w0, XTMP0; \ + vpalignr $12, XTMP0, w1, XTMP0; /* XTMP0: xx, w2, w1, w0 */ \ + /* Load (w[i - 13]) => XTMP1 */ \ + vpshufd $0b10111111, w1, XTMP1; \ + vpalignr $12, XTMP1, w2, XTMP1; \ + /* w[i - 9] == w3 */ \ + /* XMM3 ^ XTMP0 => XTMP0 */ \ + vpxor w3, XTMP0, XTMP0; + +#define SCHED_W_1(round, w0, w1, w2, w3, w4, w5) \ + /* w[i - 3] == w5 */ \ + /* rol(XMM5, 15) ^ XTMP0 => XTMP0 */ \ + vpslld $15, w5, XTMP2; \ + vpsrld $(32-15), w5, XTMP3; \ + vpxor XTMP2, XTMP3, XTMP3; \ + vpxor XTMP3, XTMP0, XTMP0; \ + /* rol(XTMP1, 7) => XTMP1 */ \ + vpslld $7, XTMP1, XTMP5; \ + vpsrld $(32-7), XTMP1, XTMP1; \ + vpxor XTMP5, XTMP1, XTMP1; \ + /* XMM4 ^ XTMP1 => XTMP1 */ \ + vpxor w4, XTMP1, XTMP1; \ + /* w[i - 6] == XMM4 */ \ + /* P1(XTMP0) ^ XTMP1 => XMM0 */ \ + vpslld $15, XTMP0, XTMP5; \ + vpsrld $(32-15), XTMP0, XTMP6; \ + vpslld $23, XTMP0, XTMP2; \ + vpsrld $(32-23), XTMP0, XTMP3; \ + vpxor XTMP0, XTMP1, XTMP1; \ + vpxor XTMP6, XTMP5, XTMP5; \ + vpxor XTMP3, XTMP2, XTMP2; \ + vpxor XTMP2, XTMP5, XTMP5; \ + vpxor XTMP5, XTMP1, w0; + +#define SCHED_W_2(round, w0, w1, w2, w3, w4, w5) \ + /* W1 in XMM12 */ \ + vpshufd $0b10111111, w4, XTMP4; \ + vpalignr $12, XTMP4, w5, XTMP4; \ + vmovdqa XTMP4, XW_W1_ADDR((round), 0); \ + /* W1 ^ W2 => XTMP1 */ \ + vpxor w0, XTMP4, XTMP1; \ + vmovdqa XTMP1, XW_W1W2_ADDR((round), 0); + + +.section .rodata.cst16, "aM", @progbits, 16 +.align 16 + +.Lbe32mask: + .long 0x00010203, 0x04050607, 0x08090a0b, 0x0c0d0e0f + +.text + +/* + * Transform nblocks*64 bytes (nblocks*16 32-bit words) at DATA. + * + * void sm3_transform_avx(struct sm3_state *state, + * const u8 *data, int nblocks); + */ +.align 16 +SYM_FUNC_START(sm3_transform_avx) + /* input: + * %rdi: ctx, CTX + * %rsi: data (64*nblks bytes) + * %rdx: nblocks + */ + vzeroupper; + + pushq %rbp; + movq %rsp, %rbp; + + movq %rdx, RNBLKS; + + subq $STACK_SIZE, %rsp; + andq $(~63), %rsp; + + movq %rbx, (STACK_REG_SAVE + 0 * 8)(%rsp); + movq %r15, (STACK_REG_SAVE + 1 * 8)(%rsp); + movq %r14, (STACK_REG_SAVE + 2 * 8)(%rsp); + movq %r13, (STACK_REG_SAVE + 3 * 8)(%rsp); + movq %r12, (STACK_REG_SAVE + 4 * 8)(%rsp); + + vmovdqa .Lbe32mask (%rip), BSWAP_REG; + + /* Get the values of the chaining variables. */ + movl state_h0(RSTATE), a; + movl state_h1(RSTATE), b; + movl state_h2(RSTATE), c; + movl state_h3(RSTATE), d; + movl state_h4(RSTATE), e; + movl state_h5(RSTATE), f; + movl state_h6(RSTATE), g; + movl state_h7(RSTATE), h; + +.align 16 +.Loop: + /* Load data part1. */ + LOAD_W_XMM_1(); + + leaq -1(RNBLKS), RNBLKS; + + /* Transform 0-3 + Load data part2. */ + R1(a, b, c, d, e, f, g, h, 0, 0, IW); LOAD_W_XMM_2(); + R1(d, a, b, c, h, e, f, g, 1, 1, IW); + R1(c, d, a, b, g, h, e, f, 2, 2, IW); + R1(b, c, d, a, f, g, h, e, 3, 3, IW); LOAD_W_XMM_3(); + + /* Transform 4-7 + Precalc 12-14. */ + R1(a, b, c, d, e, f, g, h, 4, 0, IW); + R1(d, a, b, c, h, e, f, g, 5, 1, IW); + R1(c, d, a, b, g, h, e, f, 6, 2, IW); SCHED_W_0(12, W0, W1, W2, W3, W4, W5); + R1(b, c, d, a, f, g, h, e, 7, 3, IW); SCHED_W_1(12, W0, W1, W2, W3, W4, W5); + + /* Transform 8-11 + Precalc 12-17. */ + R1(a, b, c, d, e, f, g, h, 8, 0, IW); SCHED_W_2(12, W0, W1, W2, W3, W4, W5); + R1(d, a, b, c, h, e, f, g, 9, 1, IW); SCHED_W_0(15, W1, W2, W3, W4, W5, W0); + R1(c, d, a, b, g, h, e, f, 10, 2, IW); SCHED_W_1(15, W1, W2, W3, W4, W5, W0); + R1(b, c, d, a, f, g, h, e, 11, 3, IW); SCHED_W_2(15, W1, W2, W3, W4, W5, W0); + + /* Transform 12-14 + Precalc 18-20 */ + R1(a, b, c, d, e, f, g, h, 12, 0, XW); SCHED_W_0(18, W2, W3, W4, W5, W0, W1); + R1(d, a, b, c, h, e, f, g, 13, 1, XW); SCHED_W_1(18, W2, W3, W4, W5, W0, W1); + R1(c, d, a, b, g, h, e, f, 14, 2, XW); SCHED_W_2(18, W2, W3, W4, W5, W0, W1); + + /* Transform 15-17 + Precalc 21-23 */ + R1(b, c, d, a, f, g, h, e, 15, 0, XW); SCHED_W_0(21, W3, W4, W5, W0, W1, W2); + R2(a, b, c, d, e, f, g, h, 16, 1, XW); SCHED_W_1(21, W3, W4, W5, W0, W1, W2); + R2(d, a, b, c, h, e, f, g, 17, 2, XW); SCHED_W_2(21, W3, W4, W5, W0, W1, W2); + + /* Transform 18-20 + Precalc 24-26 */ + R2(c, d, a, b, g, h, e, f, 18, 0, XW); SCHED_W_0(24, W4, W5, W0, W1, W2, W3); + R2(b, c, d, a, f, g, h, e, 19, 1, XW); SCHED_W_1(24, W4, W5, W0, W1, W2, W3); + R2(a, b, c, d, e, f, g, h, 20, 2, XW); SCHED_W_2(24, W4, W5, W0, W1, W2, W3); + + /* Transform 21-23 + Precalc 27-29 */ + R2(d, a, b, c, h, e, f, g, 21, 0, XW); SCHED_W_0(27, W5, W0, W1, W2, W3, W4); + R2(c, d, a, b, g, h, e, f, 22, 1, XW); SCHED_W_1(27, W5, W0, W1, W2, W3, W4); + R2(b, c, d, a, f, g, h, e, 23, 2, XW); SCHED_W_2(27, W5, W0, W1, W2, W3, W4); + + /* Transform 24-26 + Precalc 30-32 */ + R2(a, b, c, d, e, f, g, h, 24, 0, XW); SCHED_W_0(30, W0, W1, W2, W3, W4, W5); + R2(d, a, b, c, h, e, f, g, 25, 1, XW); SCHED_W_1(30, W0, W1, W2, W3, W4, W5); + R2(c, d, a, b, g, h, e, f, 26, 2, XW); SCHED_W_2(30, W0, W1, W2, W3, W4, W5); + + /* Transform 27-29 + Precalc 33-35 */ + R2(b, c, d, a, f, g, h, e, 27, 0, XW); SCHED_W_0(33, W1, W2, W3, W4, W5, W0); + R2(a, b, c, d, e, f, g, h, 28, 1, XW); SCHED_W_1(33, W1, W2, W3, W4, W5, W0); + R2(d, a, b, c, h, e, f, g, 29, 2, XW); SCHED_W_2(33, W1, W2, W3, W4, W5, W0); + + /* Transform 30-32 + Precalc 36-38 */ + R2(c, d, a, b, g, h, e, f, 30, 0, XW); SCHED_W_0(36, W2, W3, W4, W5, W0, W1); + R2(b, c, d, a, f, g, h, e, 31, 1, XW); SCHED_W_1(36, W2, W3, W4, W5, W0, W1); + R2(a, b, c, d, e, f, g, h, 32, 2, XW); SCHED_W_2(36, W2, W3, W4, W5, W0, W1); + + /* Transform 33-35 + Precalc 39-41 */ + R2(d, a, b, c, h, e, f, g, 33, 0, XW); SCHED_W_0(39, W3, W4, W5, W0, W1, W2); + R2(c, d, a, b, g, h, e, f, 34, 1, XW); SCHED_W_1(39, W3, W4, W5, W0, W1, W2); + R2(b, c, d, a, f, g, h, e, 35, 2, XW); SCHED_W_2(39, W3, W4, W5, W0, W1, W2); + + /* Transform 36-38 + Precalc 42-44 */ + R2(a, b, c, d, e, f, g, h, 36, 0, XW); SCHED_W_0(42, W4, W5, W0, W1, W2, W3); + R2(d, a, b, c, h, e, f, g, 37, 1, XW); SCHED_W_1(42, W4, W5, W0, W1, W2, W3); + R2(c, d, a, b, g, h, e, f, 38, 2, XW); SCHED_W_2(42, W4, W5, W0, W1, W2, W3); + + /* Transform 39-41 + Precalc 45-47 */ + R2(b, c, d, a, f, g, h, e, 39, 0, XW); SCHED_W_0(45, W5, W0, W1, W2, W3, W4); + R2(a, b, c, d, e, f, g, h, 40, 1, XW); SCHED_W_1(45, W5, W0, W1, W2, W3, W4); + R2(d, a, b, c, h, e, f, g, 41, 2, XW); SCHED_W_2(45, W5, W0, W1, W2, W3, W4); + + /* Transform 42-44 + Precalc 48-50 */ + R2(c, d, a, b, g, h, e, f, 42, 0, XW); SCHED_W_0(48, W0, W1, W2, W3, W4, W5); + R2(b, c, d, a, f, g, h, e, 43, 1, XW); SCHED_W_1(48, W0, W1, W2, W3, W4, W5); + R2(a, b, c, d, e, f, g, h, 44, 2, XW); SCHED_W_2(48, W0, W1, W2, W3, W4, W5); + + /* Transform 45-47 + Precalc 51-53 */ + R2(d, a, b, c, h, e, f, g, 45, 0, XW); SCHED_W_0(51, W1, W2, W3, W4, W5, W0); + R2(c, d, a, b, g, h, e, f, 46, 1, XW); SCHED_W_1(51, W1, W2, W3, W4, W5, W0); + R2(b, c, d, a, f, g, h, e, 47, 2, XW); SCHED_W_2(51, W1, W2, W3, W4, W5, W0); + + /* Transform 48-50 + Precalc 54-56 */ + R2(a, b, c, d, e, f, g, h, 48, 0, XW); SCHED_W_0(54, W2, W3, W4, W5, W0, W1); + R2(d, a, b, c, h, e, f, g, 49, 1, XW); SCHED_W_1(54, W2, W3, W4, W5, W0, W1); + R2(c, d, a, b, g, h, e, f, 50, 2, XW); SCHED_W_2(54, W2, W3, W4, W5, W0, W1); + + /* Transform 51-53 + Precalc 57-59 */ + R2(b, c, d, a, f, g, h, e, 51, 0, XW); SCHED_W_0(57, W3, W4, W5, W0, W1, W2); + R2(a, b, c, d, e, f, g, h, 52, 1, XW); SCHED_W_1(57, W3, W4, W5, W0, W1, W2); + R2(d, a, b, c, h, e, f, g, 53, 2, XW); SCHED_W_2(57, W3, W4, W5, W0, W1, W2); + + /* Transform 54-56 + Precalc 60-62 */ + R2(c, d, a, b, g, h, e, f, 54, 0, XW); SCHED_W_0(60, W4, W5, W0, W1, W2, W3); + R2(b, c, d, a, f, g, h, e, 55, 1, XW); SCHED_W_1(60, W4, W5, W0, W1, W2, W3); + R2(a, b, c, d, e, f, g, h, 56, 2, XW); SCHED_W_2(60, W4, W5, W0, W1, W2, W3); + + /* Transform 57-59 + Precalc 63 */ + R2(d, a, b, c, h, e, f, g, 57, 0, XW); SCHED_W_0(63, W5, W0, W1, W2, W3, W4); + R2(c, d, a, b, g, h, e, f, 58, 1, XW); + R2(b, c, d, a, f, g, h, e, 59, 2, XW); SCHED_W_1(63, W5, W0, W1, W2, W3, W4); + + /* Transform 60-62 + Precalc 63 */ + R2(a, b, c, d, e, f, g, h, 60, 0, XW); + R2(d, a, b, c, h, e, f, g, 61, 1, XW); SCHED_W_2(63, W5, W0, W1, W2, W3, W4); + R2(c, d, a, b, g, h, e, f, 62, 2, XW); + + /* Transform 63 */ + R2(b, c, d, a, f, g, h, e, 63, 0, XW); + + /* Update the chaining variables. */ + xorl state_h0(RSTATE), a; + xorl state_h1(RSTATE), b; + xorl state_h2(RSTATE), c; + xorl state_h3(RSTATE), d; + movl a, state_h0(RSTATE); + movl b, state_h1(RSTATE); + movl c, state_h2(RSTATE); + movl d, state_h3(RSTATE); + xorl state_h4(RSTATE), e; + xorl state_h5(RSTATE), f; + xorl state_h6(RSTATE), g; + xorl state_h7(RSTATE), h; + movl e, state_h4(RSTATE); + movl f, state_h5(RSTATE); + movl g, state_h6(RSTATE); + movl h, state_h7(RSTATE); + + cmpq $0, RNBLKS; + jne .Loop; + + vzeroall; + + movq (STACK_REG_SAVE + 0 * 8)(%rsp), %rbx; + movq (STACK_REG_SAVE + 1 * 8)(%rsp), %r15; + movq (STACK_REG_SAVE + 2 * 8)(%rsp), %r14; + movq (STACK_REG_SAVE + 3 * 8)(%rsp), %r13; + movq (STACK_REG_SAVE + 4 * 8)(%rsp), %r12; + + vmovdqa %xmm0, IW_W1_ADDR(0, 0); + vmovdqa %xmm0, IW_W1W2_ADDR(0, 0); + vmovdqa %xmm0, IW_W1_ADDR(4, 0); + vmovdqa %xmm0, IW_W1W2_ADDR(4, 0); + vmovdqa %xmm0, IW_W1_ADDR(8, 0); + vmovdqa %xmm0, IW_W1W2_ADDR(8, 0); + + movq %rbp, %rsp; + popq %rbp; + ret; +SYM_FUNC_END(sm3_transform_avx) diff --git a/arch/x86/crypto/sm3_avx_glue.c b/arch/x86/crypto/sm3_avx_glue.c new file mode 100644 index 0000000000000000000000000000000000000000..661b6f22ffcd80cf20fe824f8524c03e1691a032 --- /dev/null +++ b/arch/x86/crypto/sm3_avx_glue.c @@ -0,0 +1,134 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * SM3 Secure Hash Algorithm, AVX assembler accelerated. + * specified in: https://datatracker.ietf.org/doc/html/draft-sca-cfrg-sm3-02 + * + * Copyright (C) 2021 Tianjia Zhang + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include + +asmlinkage void sm3_transform_avx(struct sm3_state *state, + const u8 *data, int nblocks); + +static int sm3_avx_update(struct shash_desc *desc, const u8 *data, + unsigned int len) +{ + struct sm3_state *sctx = shash_desc_ctx(desc); + + if (!crypto_simd_usable() || + (sctx->count % SM3_BLOCK_SIZE) + len < SM3_BLOCK_SIZE) { + sm3_update(sctx, data, len); + return 0; + } + + /* + * Make sure struct sm3_state begins directly with the SM3 + * 256-bit internal state, as this is what the asm functions expect. + */ + BUILD_BUG_ON(offsetof(struct sm3_state, state) != 0); + + kernel_fpu_begin(); + sm3_base_do_update(desc, data, len, sm3_transform_avx); + kernel_fpu_end(); + + return 0; +} + +static int sm3_avx_finup(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *out) +{ + if (!crypto_simd_usable()) { + struct sm3_state *sctx = shash_desc_ctx(desc); + + if (len) + sm3_update(sctx, data, len); + + sm3_final(sctx, out); + return 0; + } + + kernel_fpu_begin(); + if (len) + sm3_base_do_update(desc, data, len, sm3_transform_avx); + sm3_base_do_finalize(desc, sm3_transform_avx); + kernel_fpu_end(); + + return sm3_base_finish(desc, out); +} + +static int sm3_avx_final(struct shash_desc *desc, u8 *out) +{ + if (!crypto_simd_usable()) { + sm3_final(shash_desc_ctx(desc), out); + return 0; + } + + kernel_fpu_begin(); + sm3_base_do_finalize(desc, sm3_transform_avx); + kernel_fpu_end(); + + return sm3_base_finish(desc, out); +} + +static struct shash_alg sm3_avx_alg = { + .digestsize = SM3_DIGEST_SIZE, + .init = sm3_base_init, + .update = sm3_avx_update, + .final = sm3_avx_final, + .finup = sm3_avx_finup, + .descsize = sizeof(struct sm3_state), + .base = { + .cra_name = "sm3", + .cra_driver_name = "sm3-avx", + .cra_priority = 300, + .cra_blocksize = SM3_BLOCK_SIZE, + .cra_module = THIS_MODULE, + } +}; + +static int __init sm3_avx_mod_init(void) +{ + const char *feature_name; + + if (!boot_cpu_has(X86_FEATURE_AVX)) { + pr_info("AVX instruction are not detected.\n"); + return -ENODEV; + } + + if (!boot_cpu_has(X86_FEATURE_BMI2)) { + pr_info("BMI2 instruction are not detected.\n"); + return -ENODEV; + } + + if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, + &feature_name)) { + pr_info("CPU feature '%s' is not supported.\n", feature_name); + return -ENODEV; + } + + return crypto_register_shash(&sm3_avx_alg); +} + +static void __exit sm3_avx_mod_exit(void) +{ + crypto_unregister_shash(&sm3_avx_alg); +} + +module_init(sm3_avx_mod_init); +module_exit(sm3_avx_mod_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Tianjia Zhang "); +MODULE_DESCRIPTION("SM3 Secure Hash Algorithm, AVX assembler accelerated"); +MODULE_ALIAS_CRYPTO("sm3"); +MODULE_ALIAS_CRYPTO("sm3-avx"); diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 1f5d96ba4866d811247e582e329ab3135034a50b..b79b9f21cbb3be91ecd35838c2ef2b704b21a780 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -2545,10 +2545,11 @@ static bool perf_hw_regs(struct pt_regs *regs) void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) { + struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs(); struct unwind_state state; unsigned long addr; - if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { + if (guest_cbs && guest_cbs->is_in_guest()) { /* TODO: We don't support guest os callchain now */ return; } @@ -2648,10 +2649,11 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *ent void perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) { + struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs(); struct stack_frame frame; const struct stack_frame __user *fp; - if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { + if (guest_cbs && guest_cbs->is_in_guest()) { /* TODO: We don't support guest os callchain now */ return; } @@ -2728,18 +2730,21 @@ static unsigned long code_segment_base(struct pt_regs *regs) unsigned long perf_instruction_pointer(struct pt_regs *regs) { - if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) - return perf_guest_cbs->get_guest_ip(); + struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs(); + + if (guest_cbs && guest_cbs->is_in_guest()) + return guest_cbs->get_guest_ip(); return regs->ip + code_segment_base(regs); } unsigned long perf_misc_flags(struct pt_regs *regs) { + struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs(); int misc = 0; - if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { - if (perf_guest_cbs->is_user_mode()) + if (guest_cbs && guest_cbs->is_in_guest()) { + if (guest_cbs->is_user_mode()) misc |= PERF_RECORD_MISC_GUEST_USER; else misc |= PERF_RECORD_MISC_GUEST_KERNEL; diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index a521135247eb64513fef5762caa3400603c8cb3a..5ba13b00e3a714108b8dca51b5ac106d9e7298bc 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -2586,6 +2586,7 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status) { struct perf_sample_data data; struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + struct perf_guest_info_callbacks *guest_cbs; int bit; int handled = 0; @@ -2651,9 +2652,11 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status) */ if (__test_and_clear_bit(GLOBAL_STATUS_TRACE_TOPAPMI_BIT, (unsigned long *)&status)) { handled++; - if (unlikely(perf_guest_cbs && perf_guest_cbs->is_in_guest() && - perf_guest_cbs->handle_intel_pt_intr)) - perf_guest_cbs->handle_intel_pt_intr(); + + guest_cbs = perf_get_guest_cbs(); + if (unlikely(guest_cbs && guest_cbs->is_in_guest() && + guest_cbs->handle_intel_pt_intr)) + guest_cbs->handle_intel_pt_intr(); else intel_pt_interrupt(); } @@ -4350,6 +4353,19 @@ static __initconst const struct x86_pmu intel_pmu = { .lbr_read = intel_pmu_lbr_read_64, .lbr_save = intel_pmu_lbr_save, .lbr_restore = intel_pmu_lbr_restore, + + /* + * SMM has access to all 4 rings and while traditionally SMM code only + * ran in CPL0, 2021-era firmware is starting to make use of CPL3 in SMM. + * + * Since the EVENTSEL.{USR,OS} CPL filtering makes no distinction + * between SMM or not, this results in what should be pure userspace + * counters including SMM data. + * + * This is a clear privilege issue, therefore globally disable + * counting SMM by default. + */ + .attr_freeze_on_smi = 1, }; static __init void intel_clovertown_quirk(void) diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c index 9c1a013d5682297885704ee425127db6d464b741..bd8516e6c353c75c524df1b8ed84733c61407890 100644 --- a/arch/x86/events/intel/lbr.c +++ b/arch/x86/events/intel/lbr.c @@ -1734,6 +1734,9 @@ static bool is_arch_lbr_xsave_available(void) * Check the LBR state with the corresponding software structure. * Disable LBR XSAVES support if the size doesn't match. */ + if (xfeature_size(XFEATURE_LBR) == 0) + return false; + if (WARN_ON(xfeature_size(XFEATURE_LBR) != get_lbr_state_size())) return false; diff --git a/arch/x86/events/intel/pt.c b/arch/x86/events/intel/pt.c index 37129b76135a14b55a9105da3c5cedcb06a20f67..cc3b79c06685303983984ef191973f12ebe7ca90 100644 --- a/arch/x86/events/intel/pt.c +++ b/arch/x86/events/intel/pt.c @@ -472,7 +472,7 @@ static u64 pt_config_filters(struct perf_event *event) pt->filters.filter[range].msr_b = filter->msr_b; } - rtit_ctl |= filter->config << pt_address_ranges[range].reg_off; + rtit_ctl |= (u64)filter->config << pt_address_ranges[range].reg_off; } return rtit_ctl; @@ -897,8 +897,9 @@ static void pt_handle_status(struct pt *pt) * means we are already losing data; need to let the decoder * know. */ - if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries) || - buf->output_off == pt_buffer_region_size(buf)) { + if (!buf->single && + (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries) || + buf->output_off == pt_buffer_region_size(buf))) { perf_aux_output_flag(&pt->handle, PERF_AUX_FLAG_TRUNCATED); advance++; diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c index ba26792d96731ae9f441086bba1a9be7e8241516..03c8047bebb38f83cf83f07ce412f5053e1eba14 100644 --- a/arch/x86/events/intel/uncore_snbep.c +++ b/arch/x86/events/intel/uncore_snbep.c @@ -5239,7 +5239,7 @@ static struct intel_uncore_type icx_uncore_imc = { .fixed_ctr_bits = 48, .fixed_ctr = SNR_IMC_MMIO_PMON_FIXED_CTR, .fixed_ctl = SNR_IMC_MMIO_PMON_FIXED_CTL, - .event_descs = hswep_uncore_imc_events, + .event_descs = snr_uncore_imc_events, .perf_ctr = SNR_IMC_MMIO_PMON_CTR0, .event_ctl = SNR_IMC_MMIO_PMON_CTL0, .event_mask = SNBEP_PMON_RAW_EVENT_MASK, diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index f5ef2d5b9231cc4e6cf94f28a32761b788c99d07..6e149ab5db2d0728f53d84858290964b69609ec8 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -204,7 +204,7 @@ #define X86_FEATURE_SME ( 7*32+10) /* AMD Secure Memory Encryption */ #define X86_FEATURE_PTI ( 7*32+11) /* Kernel Page Table Isolation enabled */ #define X86_FEATURE_RETPOLINE ( 7*32+12) /* "" Generic Retpoline mitigation for Spectre variant 2 */ -#define X86_FEATURE_RETPOLINE_AMD ( 7*32+13) /* "" AMD Retpoline mitigation for Spectre variant 2 */ +#define X86_FEATURE_RETPOLINE_LFENCE ( 7*32+13) /* "" Use LFENCE for Spectre variant 2 */ #define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */ #define X86_FEATURE_CDP_L2 ( 7*32+15) /* Code and Data Prioritization L2 */ #define X86_FEATURE_MSR_SPEC_CTRL ( 7*32+16) /* "" MSR SPEC_CTRL is implemented */ @@ -291,8 +291,11 @@ #define X86_FEATURE_FENCE_SWAPGS_KERNEL (11*32+ 5) /* "" LFENCE in kernel entry SWAPGS path */ #define X86_FEATURE_SPLIT_LOCK_DETECT (11*32+ 6) /* #AC for split lock */ #define X86_FEATURE_PER_THREAD_MBA (11*32+ 7) /* "" Per-thread Memory Bandwidth Allocation */ +#define X86_FEATURE_SGX1 (11*32+ 8) /* "" Basic SGX */ +#define X86_FEATURE_SGX2 (11*32+ 9) /* "" SGX Enclave Dynamic Memory Management (EDMM) */ /* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */ +#define X86_FEATURE_AVX_VNNI (12*32+ 4) /* AVX VNNI instructions */ #define X86_FEATURE_AVX512_BF16 (12*32+ 5) /* AVX512 BFLOAT16 instructions */ /* AMD-defined CPU features, CPUID level 0x80000008 (EBX), word 13 */ @@ -321,6 +324,7 @@ #define X86_FEATURE_HWP_ACT_WINDOW (14*32+ 9) /* HWP Activity Window */ #define X86_FEATURE_HWP_EPP (14*32+10) /* HWP Energy Perf. Preference */ #define X86_FEATURE_HWP_PKG_REQ (14*32+11) /* HWP Package Level Request */ +#define X86_FEATURE_HFI (14*32+19) /* Hardware Feedback Interface */ /* AMD SVM Feature Identification, CPUID level 0x8000000a (EDX), word 15 */ #define X86_FEATURE_NPT (15*32+ 0) /* Nested Page Table support */ @@ -376,6 +380,7 @@ #define X86_FEATURE_TSXLDTRK (18*32+16) /* TSX Suspend Load Address Tracking */ #define X86_FEATURE_PCONFIG (18*32+18) /* Intel PCONFIG */ #define X86_FEATURE_ARCH_LBR (18*32+19) /* Intel ARCH LBR */ +#define X86_FEATURE_AVX512_FP16 (18*32+23) /* AVX512 FP16 */ #define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */ #define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */ #define X86_FEATURE_FLUSH_L1D (18*32+28) /* Flush L1D cache */ @@ -419,5 +424,6 @@ #define X86_BUG_TAA X86_BUG(22) /* CPU is affected by TSX Async Abort(TAA) */ #define X86_BUG_ITLB_MULTIHIT X86_BUG(23) /* CPU may incur MCE during certain page attribute changes */ #define X86_BUG_SRBDS X86_BUG(24) /* CPU may leak RNG bits if not mitigated */ +#define X86_BUG_MMIO_STALE_DATA X86_BUG(25) /* CPU is affected by Processor MMIO Stale Data vulnerabilities */ #endif /* _ASM_X86_CPUFEATURES_H */ diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h index 3be8754408d51905c31c81fe8c61c3cfc1530815..3f58bc3fb5505cda59e544d171792ac7296e4f0c 100644 --- a/arch/x86/include/asm/efi.h +++ b/arch/x86/include/asm/efi.h @@ -353,7 +353,6 @@ static inline bool efi_is_64bit(void) extern bool efi_reboot_required(void); extern bool efi_is_table_address(unsigned long phys_addr); -extern void efi_find_mirror(void); extern void efi_reserve_boot_services(void); #else static inline void parse_efi_setup(u64 phys_addr, u32 data_len) {} @@ -365,9 +364,6 @@ static inline bool efi_is_table_address(unsigned long phys_addr) { return false; } -static inline void efi_find_mirror(void) -{ -} static inline void efi_reserve_boot_services(void) { } diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h index 4e5af2b00d89ba9651d3e5a84ef754ce96b68ce1..70b9bc5403c5e175081d3ba7c351af1b7ffa78d5 100644 --- a/arch/x86/include/asm/fpu/internal.h +++ b/arch/x86/include/asm/fpu/internal.h @@ -531,9 +531,11 @@ static inline void __fpregs_load_activate(void) * The FPU context is only stored/restored for a user task and * PF_KTHREAD is used to distinguish between kernel and user threads. */ -static inline void switch_fpu_prepare(struct fpu *old_fpu, int cpu) +static inline void switch_fpu_prepare(struct task_struct *prev, int cpu) { - if (static_cpu_has(X86_FEATURE_FPU) && !(current->flags & PF_KTHREAD)) { + struct fpu *old_fpu = &prev->thread.fpu; + + if (static_cpu_has(X86_FEATURE_FPU) && !(prev->flags & PF_KTHREAD)) { if (!copy_fpregs_to_fpstate(old_fpu)) old_fpu->last_cpu = -1; else @@ -552,10 +554,11 @@ static inline void switch_fpu_prepare(struct fpu *old_fpu, int cpu) * Load PKRU from the FPU context if available. Delay loading of the * complete FPU state until the return to userland. */ -static inline void switch_fpu_finish(struct fpu *new_fpu) +static inline void switch_fpu_finish(struct task_struct *next) { u32 pkru_val = init_pkru_value; struct pkru_state *pk; + struct fpu *next_fpu = &next->thread.fpu; if (!static_cpu_has(X86_FEATURE_FPU)) return; @@ -569,7 +572,7 @@ static inline void switch_fpu_finish(struct fpu *new_fpu) * PKRU state is switched eagerly because it needs to be valid before we * return to userland e.g. for a copy_to_user() operation. */ - if (!(current->flags & PF_KTHREAD)) { + if (!(next->flags & PF_KTHREAD)) { /* * If the PKRU bit in xsave.header.xfeatures is not set, * then the PKRU component was in init state, which means @@ -578,7 +581,7 @@ static inline void switch_fpu_finish(struct fpu *new_fpu) * in memory is not valid. This means pkru_val has to be * set to 0 and not to init_pkru_value. */ - pk = get_xsave_addr(&new_fpu->state.xsave, XFEATURE_PKRU); + pk = get_xsave_addr(&next_fpu->state.xsave, XFEATURE_PKRU); pkru_val = pk ? pk->pkru : 0; } __write_pkru(pkru_val); diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index fcb5f0090a5fc75c83cfb1962723542d6bd1a669..3d9da462932572c07ee8bf644ed64f17ca92f9e0 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -216,6 +216,7 @@ enum x86_intercept_stage; #define PFERR_RSVD_BIT 3 #define PFERR_FETCH_BIT 4 #define PFERR_PK_BIT 5 +#define PFERR_SGX_BIT 15 #define PFERR_GUEST_FINAL_BIT 32 #define PFERR_GUEST_PAGE_BIT 33 @@ -225,6 +226,7 @@ enum x86_intercept_stage; #define PFERR_RSVD_MASK (1U << PFERR_RSVD_BIT) #define PFERR_FETCH_MASK (1U << PFERR_FETCH_BIT) #define PFERR_PK_MASK (1U << PFERR_PK_BIT) +#define PFERR_SGX_MASK (1U << PFERR_SGX_BIT) #define PFERR_GUEST_FINAL_MASK (1ULL << PFERR_GUEST_FINAL_BIT) #define PFERR_GUEST_PAGE_MASK (1ULL << PFERR_GUEST_PAGE_BIT) @@ -992,6 +994,9 @@ struct kvm_arch { bool bus_lock_detection_enabled; + /* Guest can access the SGX PROVISIONKEY. */ + bool sgx_provisioning_allowed; + /* Deflect RDMSR and WRMSR to user space when they trigger a #GP */ u32 user_space_msr_mask; @@ -1286,6 +1291,7 @@ struct kvm_x86_ops { int (*mem_enc_op)(struct kvm *kvm, void __user *argp); int (*mem_enc_reg_region)(struct kvm *kvm, struct kvm_enc_region *argp); int (*mem_enc_unreg_region)(struct kvm *kvm, struct kvm_enc_region *argp); + void (*guest_memory_reclaimed)(struct kvm *kvm); int (*get_msr_feature)(struct kvm_msr_entry *entry); @@ -1299,6 +1305,7 @@ struct kvm_x86_ops { }; struct kvm_x86_nested_ops { + void (*leave_nested)(struct kvm_vcpu *vcpu); int (*check_events)(struct kvm_vcpu *vcpu); bool (*hv_timer_pending)(struct kvm_vcpu *vcpu); int (*get_state)(struct kvm_vcpu *vcpu, @@ -1320,6 +1327,7 @@ struct kvm_x86_init_ops { int (*disabled_by_bios)(void); int (*check_processor_compatibility)(void); int (*hardware_setup)(void); + bool (*intel_pt_intr_in_guest)(void); struct kvm_x86_ops *runtime_ops; }; @@ -1352,8 +1360,9 @@ static inline int kvm_arch_flush_remote_tlb(struct kvm *kvm) return -ENOTSUPP; } -int kvm_mmu_module_init(void); -void kvm_mmu_module_exit(void); +void kvm_mmu_x86_module_init(void); +int kvm_mmu_vendor_module_init(void); +void kvm_mmu_vendor_module_exit(void); void kvm_mmu_destroy(struct kvm_vcpu *vcpu); int kvm_mmu_create(struct kvm_vcpu *vcpu); diff --git a/arch/x86/include/asm/livepatch.h b/arch/x86/include/asm/livepatch.h index e23c2da3c323ae83d3f0a243bc904e3606405f01..b510f935ec111284a06d964153f5b58a666582b9 100644 --- a/arch/x86/include/asm/livepatch.h +++ b/arch/x86/include/asm/livepatch.h @@ -37,9 +37,23 @@ int klp_check_calltrace(struct klp_patch *patch, int enable); #define JMP_E9_INSN_SIZE 5 struct arch_klp_data { unsigned char old_code[JMP_E9_INSN_SIZE]; +#ifdef CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY + /* + * Saved opcode at the entry of the old func (which maybe replaced + * with breakpoint). + */ + unsigned char saved_opcode; +#endif }; long arch_klp_save_old_code(struct arch_klp_data *arch_data, void *old_func); +#ifdef CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY +int arch_klp_check_breakpoint(struct arch_klp_data *arch_data, void *old_func); +int arch_klp_add_breakpoint(struct arch_klp_data *arch_data, void *old_func); +void arch_klp_remove_breakpoint(struct arch_klp_data *arch_data, void *old_func); +int klp_int3_handler(struct pt_regs *regs); +int arch_klp_module_check_calltrace(void *data); +#endif #endif diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h index 9b5ff423e939857a325b4b22edd03f2d569580e7..28c112695e92054ac87a9d57004e0cf06c3777e3 100644 --- a/arch/x86/include/asm/mce.h +++ b/arch/x86/include/asm/mce.h @@ -283,28 +283,6 @@ extern void (*mce_threshold_vector)(void); /* Deferred error interrupt handler */ extern void (*deferred_error_int_vector)(void); -/* - * Thermal handler - */ - -void intel_init_thermal(struct cpuinfo_x86 *c); - -/* Interrupt Handler for core thermal thresholds */ -extern int (*platform_thermal_notify)(__u64 msr_val); - -/* Interrupt Handler for package thermal thresholds */ -extern int (*platform_thermal_package_notify)(__u64 msr_val); - -/* Callback support of rate control, return true, if - * callback has rate control */ -extern bool (*platform_thermal_package_rate_control)(void); - -#ifdef CONFIG_X86_THERMAL_VECTOR -extern void mcheck_intel_therm_init(void); -#else -static inline void mcheck_intel_therm_init(void) { } -#endif - /* * Used by APEI to report memory error via /dev/mcelog */ diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index 5de2040b73a77d9f09682bb76a6955d0522a0196..2f0ca77d24bcc872004d8f1e5395a244202b2d43 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -114,6 +114,30 @@ * Not susceptible to * TSX Async Abort (TAA) vulnerabilities. */ +#define ARCH_CAP_SBDR_SSDP_NO BIT(13) /* + * Not susceptible to SBDR and SSDP + * variants of Processor MMIO stale data + * vulnerabilities. + */ +#define ARCH_CAP_FBSDP_NO BIT(14) /* + * Not susceptible to FBSDP variant of + * Processor MMIO stale data + * vulnerabilities. + */ +#define ARCH_CAP_PSDP_NO BIT(15) /* + * Not susceptible to PSDP variant of + * Processor MMIO stale data + * vulnerabilities. + */ +#define ARCH_CAP_FB_CLEAR BIT(17) /* + * VERW clears CPU fill buffer + * even on MDS_NO CPUs. + */ +#define ARCH_CAP_FB_CLEAR_CTRL BIT(18) /* + * MSR_IA32_MCU_OPT_CTRL[FB_CLEAR_DIS] + * bit available to control VERW + * behavior. + */ #define MSR_IA32_FLUSH_CMD 0x0000010b #define L1D_FLUSH BIT(0) /* @@ -131,6 +155,7 @@ /* SRBDS support */ #define MSR_IA32_MCU_OPT_CTRL 0x00000123 #define RNGDS_MITG_DIS BIT(0) +#define FB_CLEAR_DIS BIT(3) /* CPU Fill buffer clear disable */ #define MSR_IA32_SYSENTER_CS 0x00000174 #define MSR_IA32_SYSENTER_ESP 0x00000175 @@ -681,12 +706,14 @@ #define PACKAGE_THERM_STATUS_PROCHOT (1 << 0) #define PACKAGE_THERM_STATUS_POWER_LIMIT (1 << 10) +#define PACKAGE_THERM_STATUS_HFI_UPDATED (1 << 26) #define MSR_IA32_PACKAGE_THERM_INTERRUPT 0x000001b2 #define PACKAGE_THERM_INT_HIGH_ENABLE (1 << 0) #define PACKAGE_THERM_INT_LOW_ENABLE (1 << 1) #define PACKAGE_THERM_INT_PLN_ENABLE (1 << 24) +#define PACKAGE_THERM_INT_HFI_ENABLE (1 << 25) /* Thermal Thresholds Support */ #define THERM_INT_THRESHOLD0_ENABLE (1 << 15) @@ -931,4 +958,8 @@ #define MSR_VM_IGNNE 0xc0010115 #define MSR_VM_HSAVE_PA 0xc0010117 +/* Hardware Feedback Interface */ +#define MSR_IA32_HW_FEEDBACK_PTR 0x17d0 +#define MSR_IA32_HW_FEEDBACK_CONFIG 0x17d1 + #endif /* _ASM_X86_MSR_INDEX_H */ diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index cb9ad6b739737e4e36f47f3e93e0604f5a4c7713..e247151c3dcf241347e1f4db5b39daba3217a815 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h @@ -82,7 +82,7 @@ #ifdef CONFIG_RETPOLINE ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; jmp *%\reg), \ __stringify(jmp __x86_retpoline_\reg), X86_FEATURE_RETPOLINE, \ - __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; jmp *%\reg), X86_FEATURE_RETPOLINE_AMD + __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; jmp *%\reg), X86_FEATURE_RETPOLINE_LFENCE #else jmp *%\reg #endif @@ -92,7 +92,7 @@ #ifdef CONFIG_RETPOLINE ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; call *%\reg), \ __stringify(call __x86_retpoline_\reg), X86_FEATURE_RETPOLINE, \ - __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; call *%\reg), X86_FEATURE_RETPOLINE_AMD + __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; call *%\reg), X86_FEATURE_RETPOLINE_LFENCE #else call *%\reg #endif @@ -134,7 +134,7 @@ "lfence;\n" \ ANNOTATE_RETPOLINE_SAFE \ "call *%[thunk_target]\n", \ - X86_FEATURE_RETPOLINE_AMD) + X86_FEATURE_RETPOLINE_LFENCE) # define THUNK_TARGET(addr) [thunk_target] "r" (addr) @@ -164,7 +164,7 @@ "lfence;\n" \ ANNOTATE_RETPOLINE_SAFE \ "call *%[thunk_target]\n", \ - X86_FEATURE_RETPOLINE_AMD) + X86_FEATURE_RETPOLINE_LFENCE) # define THUNK_TARGET(addr) [thunk_target] "rm" (addr) #endif @@ -176,9 +176,11 @@ /* The Spectre V2 mitigation variants */ enum spectre_v2_mitigation { SPECTRE_V2_NONE, - SPECTRE_V2_RETPOLINE_GENERIC, - SPECTRE_V2_RETPOLINE_AMD, - SPECTRE_V2_IBRS_ENHANCED, + SPECTRE_V2_RETPOLINE, + SPECTRE_V2_LFENCE, + SPECTRE_V2_EIBRS, + SPECTRE_V2_EIBRS_RETPOLINE, + SPECTRE_V2_EIBRS_LFENCE, }; /* The indirect branch speculation control variants */ @@ -253,6 +255,8 @@ DECLARE_STATIC_KEY_FALSE(switch_mm_always_ibpb); DECLARE_STATIC_KEY_FALSE(mds_user_clear); DECLARE_STATIC_KEY_FALSE(mds_idle_clear); +DECLARE_STATIC_KEY_FALSE(mmio_stale_data_clear); + #include /** diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index d428d611a43a97fa4ce48671c8ac0a89b73e7b55..a3d0152a4361386eab160edab502be504b3531c1 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -858,4 +858,12 @@ enum mds_mitigations { MDS_MITIGATION_VMWERV, }; +#ifdef CONFIG_X86_SGX +int arch_memory_failure(unsigned long pfn, int flags); +#define arch_memory_failure arch_memory_failure + +bool arch_is_platform_page(u64 paddr); +#define arch_is_platform_page arch_is_platform_page +#endif + #endif /* _ASM_X86_PROCESSOR_H */ diff --git a/arch/x86/include/asm/realmode.h b/arch/x86/include/asm/realmode.h index 5db5d083c87322ec4913252d1c22752b85d517f4..331474b150f16964e1ba8788e6a623d0214aee62 100644 --- a/arch/x86/include/asm/realmode.h +++ b/arch/x86/include/asm/realmode.h @@ -89,6 +89,7 @@ static inline void set_real_mode_mem(phys_addr_t mem) } void reserve_real_mode(void); +void load_trampoline_pgtable(void); #endif /* __ASSEMBLY__ */ diff --git a/arch/x86/include/asm/set_memory.h b/arch/x86/include/asm/set_memory.h index 5948218f35c584e00d605a4b76115c2bd9897cda..ef47246d22f5d25126d19db570755b5649a5f197 100644 --- a/arch/x86/include/asm/set_memory.h +++ b/arch/x86/include/asm/set_memory.h @@ -2,6 +2,7 @@ #ifndef _ASM_X86_SET_MEMORY_H #define _ASM_X86_SET_MEMORY_H +#include #include #include @@ -97,6 +98,9 @@ static inline int set_mce_nospec(unsigned long pfn, bool unmap) unsigned long decoy_addr; int rc; + /* SGX pages are not in the 1:1 map */ + if (arch_is_platform_page(pfn << PAGE_SHIFT)) + return 0; /* * We would like to just call: * set_memory_XX((unsigned long)pfn_to_kaddr(pfn), 1); diff --git a/arch/x86/include/asm/sgx.h b/arch/x86/include/asm/sgx.h index d96e5451d28a1059387a2ff416ae44a06dc98d13..a16e2c9154a31262582fdf8df955394de8fbb17c 100644 --- a/arch/x86/include/asm/sgx.h +++ b/arch/x86/include/asm/sgx.h @@ -27,16 +27,36 @@ /* The bitmask for the EPC section type. */ #define SGX_CPUID_EPC_MASK GENMASK(3, 0) +enum sgx_encls_function { + ECREATE = 0x00, + EADD = 0x01, + EINIT = 0x02, + EREMOVE = 0x03, + EDGBRD = 0x04, + EDGBWR = 0x05, + EEXTEND = 0x06, + ELDU = 0x08, + EBLOCK = 0x09, + EPA = 0x0A, + EWB = 0x0B, + ETRACK = 0x0C, + EAUG = 0x0D, + EMODPR = 0x0E, + EMODT = 0x0F, +}; + /** * enum sgx_return_code - The return code type for ENCLS, ENCLU and ENCLV * %SGX_NOT_TRACKED: Previous ETRACK's shootdown sequence has not * been completed yet. + * %SGX_CHILD_PRESENT SECS has child pages present in the EPC. * %SGX_INVALID_EINITTOKEN: EINITTOKEN is invalid and enclave signer's * public key does not match IA32_SGXLEPUBKEYHASH. * %SGX_UNMASKED_EVENT: An unmasked event, e.g. INTR, was received */ enum sgx_return_code { SGX_NOT_TRACKED = 11, + SGX_CHILD_PRESENT = 13, SGX_INVALID_EINITTOKEN = 16, SGX_UNMASKED_EVENT = 128, }; @@ -345,4 +365,14 @@ struct sgx_sigstruct { * comment! */ +#ifdef CONFIG_X86_SGX_KVM +int sgx_virt_ecreate(struct sgx_pageinfo *pageinfo, void __user *secs, + int *trapnr); +int sgx_virt_einit(void __user *sigstruct, void __user *token, + void __user *secs, u64 *lepubkeyhash, int *trapnr); +#endif + +int sgx_set_attribute(unsigned long *allowed_attributes, + unsigned int attribute_fd); + #endif /* _ASM_X86_SGX_H */ diff --git a/arch/x86/include/asm/thermal.h b/arch/x86/include/asm/thermal.h new file mode 100644 index 0000000000000000000000000000000000000000..ddbdefd5b94f1024dab964540b00b787a81031af --- /dev/null +++ b/arch/x86/include/asm/thermal.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_X86_THERMAL_H +#define _ASM_X86_THERMAL_H + +#ifdef CONFIG_X86_THERMAL_VECTOR +void intel_init_thermal(struct cpuinfo_x86 *c); +bool x86_thermal_enabled(void); +void intel_thermal_interrupt(void); +#else +static inline void intel_init_thermal(struct cpuinfo_x86 *c) { } +#endif + +#endif /* _ASM_X86_THERMAL_H */ diff --git a/arch/x86/include/asm/trap_pf.h b/arch/x86/include/asm/trap_pf.h index 305bc1214aef96e170dfd2613447240eae2ce854..10b1de500ab1c298cfe9d092f6cc5435df3f0571 100644 --- a/arch/x86/include/asm/trap_pf.h +++ b/arch/x86/include/asm/trap_pf.h @@ -11,6 +11,7 @@ * bit 3 == 1: use of reserved bit detected * bit 4 == 1: fault was an instruction fetch * bit 5 == 1: protection keys block access + * bit 15 == 1: SGX MMU page-fault */ enum x86_pf_error_code { X86_PF_PROT = 1 << 0, @@ -19,6 +20,7 @@ enum x86_pf_error_code { X86_PF_RSVD = 1 << 3, X86_PF_INSTR = 1 << 4, X86_PF_PK = 1 << 5, + X86_PF_SGX = 1 << 15, }; #endif /* _ASM_X86_TRAP_PF_H */ diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index 5c95d242f38d708e313f39cc781fa8455fdd16ed..bb1430283c726c1fa5791b56ca88bee298200b94 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h @@ -314,11 +314,12 @@ do { \ do { \ __chk_user_ptr(ptr); \ switch (size) { \ - unsigned char x_u8__; \ - case 1: \ + case 1: { \ + unsigned char x_u8__; \ __get_user_asm(x_u8__, ptr, "b", "=q", label); \ (x) = x_u8__; \ break; \ + } \ case 2: \ __get_user_asm(x, ptr, "w", "=r", label); \ break; \ diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h index f8ba5289ecb01ed24b149eb5fa5c0a120de7c5ae..c6f028bac3ff869e2e3a65a28575d663a39eda82 100644 --- a/arch/x86/include/asm/vmx.h +++ b/arch/x86/include/asm/vmx.h @@ -371,6 +371,7 @@ enum vmcs_field { #define GUEST_INTR_STATE_MOV_SS 0x00000002 #define GUEST_INTR_STATE_SMI 0x00000004 #define GUEST_INTR_STATE_NMI 0x00000008 +#define GUEST_INTR_STATE_ENCLAVE_INTR 0x00000010 /* GUEST_ACTIVITY_STATE flags */ #define GUEST_ACTIVITY_ACTIVE 0 diff --git a/arch/x86/include/uapi/asm/sgx.h b/arch/x86/include/uapi/asm/sgx.h index 791e45334a4a210c0ec6d2810980af454e549f86..c815a6fec9aaf9bd91722a4cb544c2432b3888b6 100644 --- a/arch/x86/include/uapi/asm/sgx.h +++ b/arch/x86/include/uapi/asm/sgx.h @@ -27,6 +27,8 @@ enum sgx_page_flags { _IOW(SGX_MAGIC, 0x02, struct sgx_enclave_init) #define SGX_IOC_ENCLAVE_PROVISION \ _IOW(SGX_MAGIC, 0x03, struct sgx_enclave_provision) +#define SGX_IOC_VEPC_REMOVE_ALL \ + _IO(SGX_MAGIC, 0x04) /** * struct sgx_enclave_create - parameter structure for the diff --git a/arch/x86/include/uapi/asm/vmx.h b/arch/x86/include/uapi/asm/vmx.h index b8ff9e8ac0d516b183eaf2fc64bcd0ddbaf3b15c..df6707a76a3d06424ac6102aa6dfbfa879136614 100644 --- a/arch/x86/include/uapi/asm/vmx.h +++ b/arch/x86/include/uapi/asm/vmx.h @@ -27,6 +27,7 @@ #define VMX_EXIT_REASONS_FAILED_VMENTRY 0x80000000 +#define VMX_EXIT_REASONS_SGX_ENCLAVE_MODE 0x08000000 #define EXIT_REASON_EXCEPTION_NMI 0 #define EXIT_REASON_EXTERNAL_INTERRUPT 1 diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 14cd3186dc77dc7a929a47026bfa8c3152f89308..55562a9b7f92e9d6bc5f5db958dee01753f8321c 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -1340,6 +1340,17 @@ static int __init disable_acpi_pci(const struct dmi_system_id *d) return 0; } +static int __init disable_acpi_xsdt(const struct dmi_system_id *d) +{ + if (!acpi_force) { + pr_notice("%s detected: force use of acpi=rsdt\n", d->ident); + acpi_gbl_do_not_use_xsdt = TRUE; + } else { + pr_notice("Warning: DMI blacklist says broken, but acpi XSDT forced\n"); + } + return 0; +} + static int __init dmi_disable_acpi(const struct dmi_system_id *d) { if (!acpi_force) { @@ -1464,6 +1475,19 @@ static const struct dmi_system_id acpi_dmi_table[] __initconst = { DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 360"), }, }, + /* + * Boxes that need ACPI XSDT use disabled due to corrupted tables + */ + { + .callback = disable_acpi_xsdt, + .ident = "Advantech DAC-BJ01", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "NEC"), + DMI_MATCH(DMI_PRODUCT_NAME, "Bearlake CRB Board"), + DMI_MATCH(DMI_BIOS_VERSION, "V1.12"), + DMI_MATCH(DMI_BIOS_DATE, "02/01/2011"), + }, + }, {} }; diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index d41b70fe4918e3f8f9613b87e452d55d01984671..2a21046846b6fd005a59e6a4defbbd789f9b2948 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include @@ -40,8 +41,10 @@ static void __init spectre_v2_select_mitigation(void); static void __init ssb_select_mitigation(void); static void __init l1tf_select_mitigation(void); static void __init mds_select_mitigation(void); -static void __init mds_print_mitigation(void); +static void __init md_clear_update_mitigation(void); +static void __init md_clear_select_mitigation(void); static void __init taa_select_mitigation(void); +static void __init mmio_select_mitigation(void); static void __init srbds_select_mitigation(void); /* The base value of the SPEC_CTRL MSR that always has to be preserved. */ @@ -76,6 +79,10 @@ EXPORT_SYMBOL_GPL(mds_user_clear); DEFINE_STATIC_KEY_FALSE(mds_idle_clear); EXPORT_SYMBOL_GPL(mds_idle_clear); +/* Controls CPU Fill buffer clear before KVM guest MMIO accesses */ +DEFINE_STATIC_KEY_FALSE(mmio_stale_data_clear); +EXPORT_SYMBOL_GPL(mmio_stale_data_clear); + void __init check_bugs(void) { identify_boot_cpu(); @@ -108,16 +115,9 @@ void __init check_bugs(void) spectre_v2_select_mitigation(); ssb_select_mitigation(); l1tf_select_mitigation(); - mds_select_mitigation(); - taa_select_mitigation(); + md_clear_select_mitigation(); srbds_select_mitigation(); - /* - * As MDS and TAA mitigations are inter-related, print MDS - * mitigation until after TAA mitigation selection is done. - */ - mds_print_mitigation(); - arch_smt_update(); #ifdef CONFIG_X86_32 @@ -257,14 +257,6 @@ static void __init mds_select_mitigation(void) } } -static void __init mds_print_mitigation(void) -{ - if (!boot_cpu_has_bug(X86_BUG_MDS) || cpu_mitigations_off()) - return; - - pr_info("%s\n", mds_strings[mds_mitigation]); -} - static int __init mds_cmdline(char *str) { if (!boot_cpu_has_bug(X86_BUG_MDS)) @@ -319,7 +311,7 @@ static void __init taa_select_mitigation(void) /* TSX previously disabled by tsx=off */ if (!boot_cpu_has(X86_FEATURE_RTM)) { taa_mitigation = TAA_MITIGATION_TSX_DISABLED; - goto out; + return; } if (cpu_mitigations_off()) { @@ -333,7 +325,7 @@ static void __init taa_select_mitigation(void) */ if (taa_mitigation == TAA_MITIGATION_OFF && mds_mitigation == MDS_MITIGATION_OFF) - goto out; + return; if (boot_cpu_has(X86_FEATURE_MD_CLEAR)) taa_mitigation = TAA_MITIGATION_VERW; @@ -365,18 +357,6 @@ static void __init taa_select_mitigation(void) if (taa_nosmt || cpu_mitigations_auto_nosmt()) cpu_smt_disable(false); - - /* - * Update MDS mitigation, if necessary, as the mds_user_clear is - * now enabled for TAA mitigation. - */ - if (mds_mitigation == MDS_MITIGATION_OFF && - boot_cpu_has_bug(X86_BUG_MDS)) { - mds_mitigation = MDS_MITIGATION_FULL; - mds_select_mitigation(); - } -out: - pr_info("%s\n", taa_strings[taa_mitigation]); } static int __init tsx_async_abort_parse_cmdline(char *str) @@ -400,6 +380,151 @@ static int __init tsx_async_abort_parse_cmdline(char *str) } early_param("tsx_async_abort", tsx_async_abort_parse_cmdline); +#undef pr_fmt +#define pr_fmt(fmt) "MMIO Stale Data: " fmt + +enum mmio_mitigations { + MMIO_MITIGATION_OFF, + MMIO_MITIGATION_UCODE_NEEDED, + MMIO_MITIGATION_VERW, +}; + +/* Default mitigation for Processor MMIO Stale Data vulnerabilities */ +static enum mmio_mitigations mmio_mitigation __ro_after_init = MMIO_MITIGATION_VERW; +static bool mmio_nosmt __ro_after_init = false; + +static const char * const mmio_strings[] = { + [MMIO_MITIGATION_OFF] = "Vulnerable", + [MMIO_MITIGATION_UCODE_NEEDED] = "Vulnerable: Clear CPU buffers attempted, no microcode", + [MMIO_MITIGATION_VERW] = "Mitigation: Clear CPU buffers", +}; + +static void __init mmio_select_mitigation(void) +{ + u64 ia32_cap; + + if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA) || + cpu_mitigations_off()) { + mmio_mitigation = MMIO_MITIGATION_OFF; + return; + } + + if (mmio_mitigation == MMIO_MITIGATION_OFF) + return; + + ia32_cap = x86_read_arch_cap_msr(); + + /* + * Enable CPU buffer clear mitigation for host and VMM, if also affected + * by MDS or TAA. Otherwise, enable mitigation for VMM only. + */ + if (boot_cpu_has_bug(X86_BUG_MDS) || (boot_cpu_has_bug(X86_BUG_TAA) && + boot_cpu_has(X86_FEATURE_RTM))) + static_branch_enable(&mds_user_clear); + else + static_branch_enable(&mmio_stale_data_clear); + + /* + * If Processor-MMIO-Stale-Data bug is present and Fill Buffer data can + * be propagated to uncore buffers, clearing the Fill buffers on idle + * is required irrespective of SMT state. + */ + if (!(ia32_cap & ARCH_CAP_FBSDP_NO)) + static_branch_enable(&mds_idle_clear); + + /* + * Check if the system has the right microcode. + * + * CPU Fill buffer clear mitigation is enumerated by either an explicit + * FB_CLEAR or by the presence of both MD_CLEAR and L1D_FLUSH on MDS + * affected systems. + */ + if ((ia32_cap & ARCH_CAP_FB_CLEAR) || + (boot_cpu_has(X86_FEATURE_MD_CLEAR) && + boot_cpu_has(X86_FEATURE_FLUSH_L1D) && + !(ia32_cap & ARCH_CAP_MDS_NO))) + mmio_mitigation = MMIO_MITIGATION_VERW; + else + mmio_mitigation = MMIO_MITIGATION_UCODE_NEEDED; + + if (mmio_nosmt || cpu_mitigations_auto_nosmt()) + cpu_smt_disable(false); +} + +static int __init mmio_stale_data_parse_cmdline(char *str) +{ + if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) + return 0; + + if (!str) + return -EINVAL; + + if (!strcmp(str, "off")) { + mmio_mitigation = MMIO_MITIGATION_OFF; + } else if (!strcmp(str, "full")) { + mmio_mitigation = MMIO_MITIGATION_VERW; + } else if (!strcmp(str, "full,nosmt")) { + mmio_mitigation = MMIO_MITIGATION_VERW; + mmio_nosmt = true; + } + + return 0; +} +early_param("mmio_stale_data", mmio_stale_data_parse_cmdline); + +#undef pr_fmt +#define pr_fmt(fmt) "" fmt + +static void __init md_clear_update_mitigation(void) +{ + if (cpu_mitigations_off()) + return; + + if (!static_key_enabled(&mds_user_clear)) + goto out; + + /* + * mds_user_clear is now enabled. Update MDS, TAA and MMIO Stale Data + * mitigation, if necessary. + */ + if (mds_mitigation == MDS_MITIGATION_OFF && + boot_cpu_has_bug(X86_BUG_MDS)) { + mds_mitigation = MDS_MITIGATION_FULL; + mds_select_mitigation(); + } + if (taa_mitigation == TAA_MITIGATION_OFF && + boot_cpu_has_bug(X86_BUG_TAA)) { + taa_mitigation = TAA_MITIGATION_VERW; + taa_select_mitigation(); + } + if (mmio_mitigation == MMIO_MITIGATION_OFF && + boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) { + mmio_mitigation = MMIO_MITIGATION_VERW; + mmio_select_mitigation(); + } +out: + if (boot_cpu_has_bug(X86_BUG_MDS)) + pr_info("MDS: %s\n", mds_strings[mds_mitigation]); + if (boot_cpu_has_bug(X86_BUG_TAA)) + pr_info("TAA: %s\n", taa_strings[taa_mitigation]); + if (boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) + pr_info("MMIO Stale Data: %s\n", mmio_strings[mmio_mitigation]); +} + +static void __init md_clear_select_mitigation(void) +{ + mds_select_mitigation(); + taa_select_mitigation(); + mmio_select_mitigation(); + + /* + * As MDS, TAA and MMIO Stale Data mitigations are inter-related, update + * and print their mitigation after MDS, TAA and MMIO Stale Data + * mitigation selection is done. + */ + md_clear_update_mitigation(); +} + #undef pr_fmt #define pr_fmt(fmt) "SRBDS: " fmt @@ -461,11 +586,13 @@ static void __init srbds_select_mitigation(void) return; /* - * Check to see if this is one of the MDS_NO systems supporting - * TSX that are only exposed to SRBDS when TSX is enabled. + * Check to see if this is one of the MDS_NO systems supporting TSX that + * are only exposed to SRBDS when TSX is enabled or when CPU is affected + * by Processor MMIO Stale Data vulnerability. */ ia32_cap = x86_read_arch_cap_msr(); - if ((ia32_cap & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM)) + if ((ia32_cap & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM) && + !boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) srbds_mitigation = SRBDS_MITIGATION_TSX_OFF; else if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) srbds_mitigation = SRBDS_MITIGATION_HYPERVISOR; @@ -613,6 +740,32 @@ static inline const char *spectre_v2_module_string(void) static inline const char *spectre_v2_module_string(void) { return ""; } #endif +#define SPECTRE_V2_LFENCE_MSG "WARNING: LFENCE mitigation is not recommended for this CPU, data leaks possible!\n" +#define SPECTRE_V2_EIBRS_EBPF_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS on, data leaks possible via Spectre v2 BHB attacks!\n" +#define SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS+LFENCE mitigation and SMT, data leaks possible via Spectre v2 BHB attacks!\n" + +#ifdef CONFIG_BPF_SYSCALL +void unpriv_ebpf_notify(int new_state) +{ + if (new_state) + return; + + /* Unprivileged eBPF is enabled */ + + switch (spectre_v2_enabled) { + case SPECTRE_V2_EIBRS: + pr_err(SPECTRE_V2_EIBRS_EBPF_MSG); + break; + case SPECTRE_V2_EIBRS_LFENCE: + if (sched_smt_active()) + pr_err(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG); + break; + default: + break; + } +} +#endif + static inline bool match_option(const char *arg, int arglen, const char *opt) { int len = strlen(opt); @@ -627,7 +780,10 @@ enum spectre_v2_mitigation_cmd { SPECTRE_V2_CMD_FORCE, SPECTRE_V2_CMD_RETPOLINE, SPECTRE_V2_CMD_RETPOLINE_GENERIC, - SPECTRE_V2_CMD_RETPOLINE_AMD, + SPECTRE_V2_CMD_RETPOLINE_LFENCE, + SPECTRE_V2_CMD_EIBRS, + SPECTRE_V2_CMD_EIBRS_RETPOLINE, + SPECTRE_V2_CMD_EIBRS_LFENCE, }; enum spectre_v2_user_cmd { @@ -700,6 +856,13 @@ spectre_v2_parse_user_cmdline(enum spectre_v2_mitigation_cmd v2_cmd) return SPECTRE_V2_USER_CMD_AUTO; } +static inline bool spectre_v2_in_eibrs_mode(enum spectre_v2_mitigation mode) +{ + return (mode == SPECTRE_V2_EIBRS || + mode == SPECTRE_V2_EIBRS_RETPOLINE || + mode == SPECTRE_V2_EIBRS_LFENCE); +} + static void __init spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd) { @@ -767,7 +930,7 @@ spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd) */ if (!boot_cpu_has(X86_FEATURE_STIBP) || !smt_possible || - spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED) + spectre_v2_in_eibrs_mode(spectre_v2_enabled)) return; /* @@ -787,9 +950,11 @@ spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd) static const char * const spectre_v2_strings[] = { [SPECTRE_V2_NONE] = "Vulnerable", - [SPECTRE_V2_RETPOLINE_GENERIC] = "Mitigation: Full generic retpoline", - [SPECTRE_V2_RETPOLINE_AMD] = "Mitigation: Full AMD retpoline", - [SPECTRE_V2_IBRS_ENHANCED] = "Mitigation: Enhanced IBRS", + [SPECTRE_V2_RETPOLINE] = "Mitigation: Retpolines", + [SPECTRE_V2_LFENCE] = "Mitigation: LFENCE", + [SPECTRE_V2_EIBRS] = "Mitigation: Enhanced IBRS", + [SPECTRE_V2_EIBRS_LFENCE] = "Mitigation: Enhanced IBRS + LFENCE", + [SPECTRE_V2_EIBRS_RETPOLINE] = "Mitigation: Enhanced IBRS + Retpolines", }; static const struct { @@ -800,8 +965,12 @@ static const struct { { "off", SPECTRE_V2_CMD_NONE, false }, { "on", SPECTRE_V2_CMD_FORCE, true }, { "retpoline", SPECTRE_V2_CMD_RETPOLINE, false }, - { "retpoline,amd", SPECTRE_V2_CMD_RETPOLINE_AMD, false }, + { "retpoline,amd", SPECTRE_V2_CMD_RETPOLINE_LFENCE, false }, + { "retpoline,lfence", SPECTRE_V2_CMD_RETPOLINE_LFENCE, false }, { "retpoline,generic", SPECTRE_V2_CMD_RETPOLINE_GENERIC, false }, + { "eibrs", SPECTRE_V2_CMD_EIBRS, false }, + { "eibrs,lfence", SPECTRE_V2_CMD_EIBRS_LFENCE, false }, + { "eibrs,retpoline", SPECTRE_V2_CMD_EIBRS_RETPOLINE, false }, { "auto", SPECTRE_V2_CMD_AUTO, false }, }; @@ -838,17 +1007,30 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void) } if ((cmd == SPECTRE_V2_CMD_RETPOLINE || - cmd == SPECTRE_V2_CMD_RETPOLINE_AMD || - cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC) && + cmd == SPECTRE_V2_CMD_RETPOLINE_LFENCE || + cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC || + cmd == SPECTRE_V2_CMD_EIBRS_LFENCE || + cmd == SPECTRE_V2_CMD_EIBRS_RETPOLINE) && !IS_ENABLED(CONFIG_RETPOLINE)) { - pr_err("%s selected but not compiled in. Switching to AUTO select\n", mitigation_options[i].option); + pr_err("%s selected but not compiled in. Switching to AUTO select\n", + mitigation_options[i].option); + return SPECTRE_V2_CMD_AUTO; + } + + if ((cmd == SPECTRE_V2_CMD_EIBRS || + cmd == SPECTRE_V2_CMD_EIBRS_LFENCE || + cmd == SPECTRE_V2_CMD_EIBRS_RETPOLINE) && + !boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) { + pr_err("%s selected but CPU doesn't have eIBRS. Switching to AUTO select\n", + mitigation_options[i].option); return SPECTRE_V2_CMD_AUTO; } - if (cmd == SPECTRE_V2_CMD_RETPOLINE_AMD && - boot_cpu_data.x86_vendor != X86_VENDOR_HYGON && - boot_cpu_data.x86_vendor != X86_VENDOR_AMD) { - pr_err("retpoline,amd selected but CPU is not AMD. Switching to AUTO select\n"); + if ((cmd == SPECTRE_V2_CMD_RETPOLINE_LFENCE || + cmd == SPECTRE_V2_CMD_EIBRS_LFENCE) && + !boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) { + pr_err("%s selected, but CPU doesn't have a serializing LFENCE. Switching to AUTO select\n", + mitigation_options[i].option); return SPECTRE_V2_CMD_AUTO; } @@ -857,6 +1039,16 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void) return cmd; } +static enum spectre_v2_mitigation __init spectre_v2_select_retpoline(void) +{ + if (!IS_ENABLED(CONFIG_RETPOLINE)) { + pr_err("Kernel not compiled with retpoline; no mitigation available!"); + return SPECTRE_V2_NONE; + } + + return SPECTRE_V2_RETPOLINE; +} + static void __init spectre_v2_select_mitigation(void) { enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline(); @@ -877,49 +1069,64 @@ static void __init spectre_v2_select_mitigation(void) case SPECTRE_V2_CMD_FORCE: case SPECTRE_V2_CMD_AUTO: if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) { - mode = SPECTRE_V2_IBRS_ENHANCED; - /* Force it so VMEXIT will restore correctly */ - x86_spec_ctrl_base |= SPEC_CTRL_IBRS; - wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); - goto specv2_set_mode; + mode = SPECTRE_V2_EIBRS; + break; } - if (IS_ENABLED(CONFIG_RETPOLINE)) - goto retpoline_auto; + + mode = spectre_v2_select_retpoline(); break; - case SPECTRE_V2_CMD_RETPOLINE_AMD: - if (IS_ENABLED(CONFIG_RETPOLINE)) - goto retpoline_amd; + + case SPECTRE_V2_CMD_RETPOLINE_LFENCE: + pr_err(SPECTRE_V2_LFENCE_MSG); + mode = SPECTRE_V2_LFENCE; break; + case SPECTRE_V2_CMD_RETPOLINE_GENERIC: - if (IS_ENABLED(CONFIG_RETPOLINE)) - goto retpoline_generic; + mode = SPECTRE_V2_RETPOLINE; break; + case SPECTRE_V2_CMD_RETPOLINE: - if (IS_ENABLED(CONFIG_RETPOLINE)) - goto retpoline_auto; + mode = spectre_v2_select_retpoline(); + break; + + case SPECTRE_V2_CMD_EIBRS: + mode = SPECTRE_V2_EIBRS; + break; + + case SPECTRE_V2_CMD_EIBRS_LFENCE: + mode = SPECTRE_V2_EIBRS_LFENCE; + break; + + case SPECTRE_V2_CMD_EIBRS_RETPOLINE: + mode = SPECTRE_V2_EIBRS_RETPOLINE; break; } - pr_err("Spectre mitigation: kernel not compiled with retpoline; no mitigation available!"); - return; -retpoline_auto: - if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD || - boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { - retpoline_amd: - if (!boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) { - pr_err("Spectre mitigation: LFENCE not serializing, switching to generic retpoline\n"); - goto retpoline_generic; - } - mode = SPECTRE_V2_RETPOLINE_AMD; - setup_force_cpu_cap(X86_FEATURE_RETPOLINE_AMD); - setup_force_cpu_cap(X86_FEATURE_RETPOLINE); - } else { - retpoline_generic: - mode = SPECTRE_V2_RETPOLINE_GENERIC; + if (mode == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled()) + pr_err(SPECTRE_V2_EIBRS_EBPF_MSG); + + if (spectre_v2_in_eibrs_mode(mode)) { + /* Force it so VMEXIT will restore correctly */ + x86_spec_ctrl_base |= SPEC_CTRL_IBRS; + wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); + } + + switch (mode) { + case SPECTRE_V2_NONE: + case SPECTRE_V2_EIBRS: + break; + + case SPECTRE_V2_LFENCE: + case SPECTRE_V2_EIBRS_LFENCE: + setup_force_cpu_cap(X86_FEATURE_RETPOLINE_LFENCE); + fallthrough; + + case SPECTRE_V2_RETPOLINE: + case SPECTRE_V2_EIBRS_RETPOLINE: setup_force_cpu_cap(X86_FEATURE_RETPOLINE); + break; } -specv2_set_mode: spectre_v2_enabled = mode; pr_info("%s\n", spectre_v2_strings[mode]); @@ -945,7 +1152,7 @@ static void __init spectre_v2_select_mitigation(void) * the CPU supports Enhanced IBRS, kernel might un-intentionally not * enable IBRS around firmware calls. */ - if (boot_cpu_has(X86_FEATURE_IBRS) && mode != SPECTRE_V2_IBRS_ENHANCED) { + if (boot_cpu_has(X86_FEATURE_IBRS) && !spectre_v2_in_eibrs_mode(mode)) { setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW); pr_info("Enabling Restricted Speculation for firmware calls\n"); } @@ -991,6 +1198,8 @@ static void update_indir_branch_cond(void) /* Update the static key controlling the MDS CPU buffer clear in idle */ static void update_mds_branch_idle(void) { + u64 ia32_cap = x86_read_arch_cap_msr(); + /* * Enable the idle clearing if SMT is active on CPUs which are * affected only by MSBDS and not any other MDS variant. @@ -1002,19 +1211,26 @@ static void update_mds_branch_idle(void) if (!boot_cpu_has_bug(X86_BUG_MSBDS_ONLY)) return; - if (sched_smt_active()) + if (sched_smt_active()) { static_branch_enable(&mds_idle_clear); - else + } else if (mmio_mitigation == MMIO_MITIGATION_OFF || + (ia32_cap & ARCH_CAP_FBSDP_NO)) { static_branch_disable(&mds_idle_clear); + } } #define MDS_MSG_SMT "MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.\n" #define TAA_MSG_SMT "TAA CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/tsx_async_abort.html for more details.\n" +#define MMIO_MSG_SMT "MMIO Stale Data CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/processor_mmio_stale_data.html for more details.\n" void cpu_bugs_smt_update(void) { mutex_lock(&spec_ctrl_mutex); + if (sched_smt_active() && unprivileged_ebpf_enabled() && + spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE) + pr_warn_once(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG); + switch (spectre_v2_user_stibp) { case SPECTRE_V2_USER_NONE: break; @@ -1050,6 +1266,16 @@ void cpu_bugs_smt_update(void) break; } + switch (mmio_mitigation) { + case MMIO_MITIGATION_VERW: + case MMIO_MITIGATION_UCODE_NEEDED: + if (sched_smt_active()) + pr_warn_once(MMIO_MSG_SMT); + break; + case MMIO_MITIGATION_OFF: + break; + } + mutex_unlock(&spec_ctrl_mutex); } @@ -1619,9 +1845,23 @@ static ssize_t tsx_async_abort_show_state(char *buf) sched_smt_active() ? "vulnerable" : "disabled"); } +static ssize_t mmio_stale_data_show_state(char *buf) +{ + if (mmio_mitigation == MMIO_MITIGATION_OFF) + return sysfs_emit(buf, "%s\n", mmio_strings[mmio_mitigation]); + + if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) { + return sysfs_emit(buf, "%s; SMT Host state unknown\n", + mmio_strings[mmio_mitigation]); + } + + return sysfs_emit(buf, "%s; SMT %s\n", mmio_strings[mmio_mitigation], + sched_smt_active() ? "vulnerable" : "disabled"); +} + static char *stibp_state(void) { - if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED) + if (spectre_v2_in_eibrs_mode(spectre_v2_enabled)) return ""; switch (spectre_v2_user_stibp) { @@ -1651,6 +1891,27 @@ static char *ibpb_state(void) return ""; } +static ssize_t spectre_v2_show_state(char *buf) +{ + if (spectre_v2_enabled == SPECTRE_V2_LFENCE) + return sprintf(buf, "Vulnerable: LFENCE\n"); + + if (spectre_v2_enabled == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled()) + return sprintf(buf, "Vulnerable: eIBRS with unprivileged eBPF\n"); + + if (sched_smt_active() && unprivileged_ebpf_enabled() && + spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE) + return sprintf(buf, "Vulnerable: eIBRS+LFENCE with unprivileged eBPF and SMT\n"); + + return sprintf(buf, "%s%s%s%s%s%s\n", + spectre_v2_strings[spectre_v2_enabled], + ibpb_state(), + boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "", + stibp_state(), + boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB filling" : "", + spectre_v2_module_string()); +} + static ssize_t srbds_show_state(char *buf) { return sprintf(buf, "%s\n", srbds_strings[srbds_mitigation]); @@ -1676,12 +1937,7 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr return sprintf(buf, "%s\n", spectre_v1_strings[spectre_v1_mitigation]); case X86_BUG_SPECTRE_V2: - return sprintf(buf, "%s%s%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled], - ibpb_state(), - boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "", - stibp_state(), - boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB filling" : "", - spectre_v2_module_string()); + return spectre_v2_show_state(buf); case X86_BUG_SPEC_STORE_BYPASS: return sprintf(buf, "%s\n", ssb_strings[ssb_mode]); @@ -1703,6 +1959,9 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr case X86_BUG_SRBDS: return srbds_show_state(buf); + case X86_BUG_MMIO_STALE_DATA: + return mmio_stale_data_show_state(buf); + default: break; } @@ -1754,4 +2013,9 @@ ssize_t cpu_show_srbds(struct device *dev, struct device_attribute *attr, char * { return cpu_show_common(dev, attr, buf, X86_BUG_SRBDS); } + +ssize_t cpu_show_mmio_stale_data(struct device *dev, struct device_attribute *attr, char *buf) +{ + return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_STALE_DATA); +} #endif diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 9c8fc6f513ed3f9b9142a1c253c65f3760345814..4917c2698ac1f2a7aebb34a62716d8d3520c9e35 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -1098,18 +1098,42 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = { X86_FEATURE_ANY, issues) #define SRBDS BIT(0) +/* CPU is affected by X86_BUG_MMIO_STALE_DATA */ +#define MMIO BIT(1) +/* CPU is affected by Shared Buffers Data Sampling (SBDS), a variant of X86_BUG_MMIO_STALE_DATA */ +#define MMIO_SBDS BIT(2) static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = { VULNBL_INTEL_STEPPINGS(IVYBRIDGE, X86_STEPPING_ANY, SRBDS), VULNBL_INTEL_STEPPINGS(HASWELL, X86_STEPPING_ANY, SRBDS), VULNBL_INTEL_STEPPINGS(HASWELL_L, X86_STEPPING_ANY, SRBDS), VULNBL_INTEL_STEPPINGS(HASWELL_G, X86_STEPPING_ANY, SRBDS), + VULNBL_INTEL_STEPPINGS(HASWELL_X, BIT(2) | BIT(4), MMIO), + VULNBL_INTEL_STEPPINGS(BROADWELL_D, X86_STEPPINGS(0x3, 0x5), MMIO), VULNBL_INTEL_STEPPINGS(BROADWELL_G, X86_STEPPING_ANY, SRBDS), + VULNBL_INTEL_STEPPINGS(BROADWELL_X, X86_STEPPING_ANY, MMIO), VULNBL_INTEL_STEPPINGS(BROADWELL, X86_STEPPING_ANY, SRBDS), + VULNBL_INTEL_STEPPINGS(SKYLAKE_L, X86_STEPPINGS(0x3, 0x3), SRBDS | MMIO), VULNBL_INTEL_STEPPINGS(SKYLAKE_L, X86_STEPPING_ANY, SRBDS), + VULNBL_INTEL_STEPPINGS(SKYLAKE_X, BIT(3) | BIT(4) | BIT(6) | + BIT(7) | BIT(0xB), MMIO), + VULNBL_INTEL_STEPPINGS(SKYLAKE, X86_STEPPINGS(0x3, 0x3), SRBDS | MMIO), VULNBL_INTEL_STEPPINGS(SKYLAKE, X86_STEPPING_ANY, SRBDS), - VULNBL_INTEL_STEPPINGS(KABYLAKE_L, X86_STEPPINGS(0x0, 0xC), SRBDS), - VULNBL_INTEL_STEPPINGS(KABYLAKE, X86_STEPPINGS(0x0, 0xD), SRBDS), + VULNBL_INTEL_STEPPINGS(KABYLAKE_L, X86_STEPPINGS(0x9, 0xC), SRBDS | MMIO), + VULNBL_INTEL_STEPPINGS(KABYLAKE_L, X86_STEPPINGS(0x0, 0x8), SRBDS), + VULNBL_INTEL_STEPPINGS(KABYLAKE, X86_STEPPINGS(0x9, 0xD), SRBDS | MMIO), + VULNBL_INTEL_STEPPINGS(KABYLAKE, X86_STEPPINGS(0x0, 0x8), SRBDS), + VULNBL_INTEL_STEPPINGS(ICELAKE_L, X86_STEPPINGS(0x5, 0x5), MMIO | MMIO_SBDS), + VULNBL_INTEL_STEPPINGS(ICELAKE_D, X86_STEPPINGS(0x1, 0x1), MMIO), + VULNBL_INTEL_STEPPINGS(ICELAKE_X, X86_STEPPINGS(0x4, 0x6), MMIO), + VULNBL_INTEL_STEPPINGS(COMETLAKE, BIT(2) | BIT(3) | BIT(5), MMIO | MMIO_SBDS), + VULNBL_INTEL_STEPPINGS(COMETLAKE_L, X86_STEPPINGS(0x1, 0x1), MMIO | MMIO_SBDS), + VULNBL_INTEL_STEPPINGS(COMETLAKE_L, X86_STEPPINGS(0x0, 0x0), MMIO), + VULNBL_INTEL_STEPPINGS(LAKEFIELD, X86_STEPPINGS(0x1, 0x1), MMIO | MMIO_SBDS), + VULNBL_INTEL_STEPPINGS(ROCKETLAKE, X86_STEPPINGS(0x1, 0x1), MMIO), + VULNBL_INTEL_STEPPINGS(ATOM_TREMONT, X86_STEPPINGS(0x1, 0x1), MMIO | MMIO_SBDS), + VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_D, X86_STEPPING_ANY, MMIO), + VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_L, X86_STEPPINGS(0x0, 0x0), MMIO | MMIO_SBDS), {} }; @@ -1130,6 +1154,13 @@ u64 x86_read_arch_cap_msr(void) return ia32_cap; } +static bool arch_cap_mmio_immune(u64 ia32_cap) +{ + return (ia32_cap & ARCH_CAP_FBSDP_NO && + ia32_cap & ARCH_CAP_PSDP_NO && + ia32_cap & ARCH_CAP_SBDR_SSDP_NO); +} + static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) { u64 ia32_cap = x86_read_arch_cap_msr(); @@ -1183,12 +1214,27 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) /* * SRBDS affects CPUs which support RDRAND or RDSEED and are listed * in the vulnerability blacklist. + * + * Some of the implications and mitigation of Shared Buffers Data + * Sampling (SBDS) are similar to SRBDS. Give SBDS same treatment as + * SRBDS. */ if ((cpu_has(c, X86_FEATURE_RDRAND) || cpu_has(c, X86_FEATURE_RDSEED)) && - cpu_matches(cpu_vuln_blacklist, SRBDS)) + cpu_matches(cpu_vuln_blacklist, SRBDS | MMIO_SBDS)) setup_force_cpu_bug(X86_BUG_SRBDS); + /* + * Processor MMIO Stale Data bug enumeration + * + * Affected CPU list is generally enough to enumerate the vulnerability, + * but for virtualization case check for ARCH_CAP MSR bits also, VMM may + * not want the guest to enumerate the bug. + */ + if (cpu_matches(cpu_vuln_blacklist, MMIO) && + !arch_cap_mmio_immune(ia32_cap)) + setup_force_cpu_bug(X86_BUG_MMIO_STALE_DATA); + if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN)) return; diff --git a/arch/x86/kernel/cpu/cpuid-deps.c b/arch/x86/kernel/cpu/cpuid-deps.c index d502241995a39df7f82ab16a723146d0eecb30d8..defda61f372df532dadf52dd452949135ee0881b 100644 --- a/arch/x86/kernel/cpu/cpuid-deps.c +++ b/arch/x86/kernel/cpu/cpuid-deps.c @@ -69,8 +69,12 @@ static const struct cpuid_dep cpuid_deps[] = { { X86_FEATURE_CQM_MBM_TOTAL, X86_FEATURE_CQM_LLC }, { X86_FEATURE_CQM_MBM_LOCAL, X86_FEATURE_CQM_LLC }, { X86_FEATURE_AVX512_BF16, X86_FEATURE_AVX512VL }, + { X86_FEATURE_AVX512_FP16, X86_FEATURE_AVX512BW }, { X86_FEATURE_ENQCMD, X86_FEATURE_XSAVES }, { X86_FEATURE_PER_THREAD_MBA, X86_FEATURE_MBA }, + { X86_FEATURE_SGX_LC, X86_FEATURE_SGX }, + { X86_FEATURE_SGX1, X86_FEATURE_SGX }, + { X86_FEATURE_SGX2, X86_FEATURE_SGX1 }, {} }; diff --git a/arch/x86/kernel/cpu/feat_ctl.c b/arch/x86/kernel/cpu/feat_ctl.c index 3b1b01f2b248a57cd0dee06fbd35077e42ed1a76..da696eb4821a0b159e14ea0246beea0bcaf22cfc 100644 --- a/arch/x86/kernel/cpu/feat_ctl.c +++ b/arch/x86/kernel/cpu/feat_ctl.c @@ -93,15 +93,9 @@ static void init_vmx_capabilities(struct cpuinfo_x86 *c) } #endif /* CONFIG_X86_VMX_FEATURE_NAMES */ -static void clear_sgx_caps(void) -{ - setup_clear_cpu_cap(X86_FEATURE_SGX); - setup_clear_cpu_cap(X86_FEATURE_SGX_LC); -} - static int __init nosgx(char *str) { - clear_sgx_caps(); + setup_clear_cpu_cap(X86_FEATURE_SGX); return 0; } @@ -110,23 +104,30 @@ early_param("nosgx", nosgx); void init_ia32_feat_ctl(struct cpuinfo_x86 *c) { + bool enable_sgx_kvm = false, enable_sgx_driver = false; bool tboot = tboot_enabled(); - bool enable_sgx; + bool enable_vmx; u64 msr; if (rdmsrl_safe(MSR_IA32_FEAT_CTL, &msr)) { clear_cpu_cap(c, X86_FEATURE_VMX); - clear_sgx_caps(); + clear_cpu_cap(c, X86_FEATURE_SGX); return; } - /* - * Enable SGX if and only if the kernel supports SGX and Launch Control - * is supported, i.e. disable SGX if the LE hash MSRs can't be written. - */ - enable_sgx = cpu_has(c, X86_FEATURE_SGX) && - cpu_has(c, X86_FEATURE_SGX_LC) && - IS_ENABLED(CONFIG_X86_SGX); + enable_vmx = cpu_has(c, X86_FEATURE_VMX) && + IS_ENABLED(CONFIG_KVM_INTEL); + + if (cpu_has(c, X86_FEATURE_SGX) && IS_ENABLED(CONFIG_X86_SGX)) { + /* + * Separate out SGX driver enabling from KVM. This allows KVM + * guests to use SGX even if the kernel SGX driver refuses to + * use it. This happens if flexible Launch Control is not + * available. + */ + enable_sgx_driver = cpu_has(c, X86_FEATURE_SGX_LC); + enable_sgx_kvm = enable_vmx && IS_ENABLED(CONFIG_X86_SGX_KVM); + } if (msr & FEAT_CTL_LOCKED) goto update_caps; @@ -142,15 +143,18 @@ void init_ia32_feat_ctl(struct cpuinfo_x86 *c) * i.e. KVM is enabled, to avoid unnecessarily adding an attack vector * for the kernel, e.g. using VMX to hide malicious code. */ - if (cpu_has(c, X86_FEATURE_VMX) && IS_ENABLED(CONFIG_KVM_INTEL)) { + if (enable_vmx) { msr |= FEAT_CTL_VMX_ENABLED_OUTSIDE_SMX; if (tboot) msr |= FEAT_CTL_VMX_ENABLED_INSIDE_SMX; } - if (enable_sgx) - msr |= FEAT_CTL_SGX_ENABLED | FEAT_CTL_SGX_LC_ENABLED; + if (enable_sgx_kvm || enable_sgx_driver) { + msr |= FEAT_CTL_SGX_ENABLED; + if (enable_sgx_driver) + msr |= FEAT_CTL_SGX_LC_ENABLED; + } wrmsrl(MSR_IA32_FEAT_CTL, msr); @@ -173,10 +177,29 @@ void init_ia32_feat_ctl(struct cpuinfo_x86 *c) } update_sgx: - if (!(msr & FEAT_CTL_SGX_ENABLED) || - !(msr & FEAT_CTL_SGX_LC_ENABLED) || !enable_sgx) { - if (enable_sgx) - pr_err_once("SGX disabled by BIOS\n"); - clear_sgx_caps(); + if (!(msr & FEAT_CTL_SGX_ENABLED)) { + if (enable_sgx_kvm || enable_sgx_driver) + pr_err_once("SGX disabled by BIOS.\n"); + clear_cpu_cap(c, X86_FEATURE_SGX); + return; + } + + /* + * VMX feature bit may be cleared due to being disabled in BIOS, + * in which case SGX virtualization cannot be supported either. + */ + if (!cpu_has(c, X86_FEATURE_VMX) && enable_sgx_kvm) { + pr_err_once("SGX virtualization disabled due to lack of VMX.\n"); + enable_sgx_kvm = 0; + } + + if (!(msr & FEAT_CTL_SGX_LC_ENABLED) && enable_sgx_driver) { + if (!enable_sgx_kvm) { + pr_err_once("SGX Launch Control is locked. Disable SGX.\n"); + clear_cpu_cap(c, X86_FEATURE_SGX); + } else { + pr_err_once("SGX Launch Control is locked. Support SGX virtualization only.\n"); + clear_cpu_cap(c, X86_FEATURE_SGX_LC); + } } } diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 816fdbec795a47333a67188252c2e3c496e3b9bc..0e422a5448351c6d418a3bc40f40b39e5d7c2e1d 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -24,6 +24,7 @@ #include #include #include +#include #ifdef CONFIG_X86_64 #include @@ -719,6 +720,8 @@ static void init_intel(struct cpuinfo_x86 *c) tsx_disable(); split_lock_init(); + + intel_init_thermal(c); } #ifdef CONFIG_X86_32 diff --git a/arch/x86/kernel/cpu/mce/Makefile b/arch/x86/kernel/cpu/mce/Makefile index 9f020c9941545ed033d41acd94092d6d0a85b6b9..015856abdbb1937ea9526c531c5d249d4eda9437 100644 --- a/arch/x86/kernel/cpu/mce/Makefile +++ b/arch/x86/kernel/cpu/mce/Makefile @@ -9,8 +9,6 @@ obj-$(CONFIG_X86_MCE_THRESHOLD) += threshold.o mce-inject-y := inject.o obj-$(CONFIG_X86_MCE_INJECT) += mce-inject.o -obj-$(CONFIG_X86_THERMAL_VECTOR) += therm_throt.o - obj-$(CONFIG_ACPI_APEI) += apei.o obj-$(CONFIG_X86_MCELOG_LEGACY) += dev-mcelog.o diff --git a/arch/x86/kernel/cpu/mce/amd.c b/arch/x86/kernel/cpu/mce/amd.c index 0c6b02dd744c1424c3235035aab439d2f1d0d1b8..f73f1184b1c13d9b2ef62fecda27b85138f10869 100644 --- a/arch/x86/kernel/cpu/mce/amd.c +++ b/arch/x86/kernel/cpu/mce/amd.c @@ -387,7 +387,7 @@ static void threshold_restart_bank(void *_tr) u32 hi, lo; /* sysfs write might race against an offline operation */ - if (this_cpu_read(threshold_banks)) + if (!this_cpu_read(threshold_banks) && !tr->set_lvt_off) return; rdmsr(tr->b->address, lo, hi); diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c index a0400f02df1fbdcbdf9aa82c4f8642cc402b9383..ed49a4abd20ec7c1b025da10d7c692fc64b02238 100644 --- a/arch/x86/kernel/cpu/mce/core.c +++ b/arch/x86/kernel/cpu/mce/core.c @@ -295,11 +295,17 @@ static void wait_for_panic(void) panic("Panicing machine check CPU died"); } -static void mce_panic(const char *msg, struct mce *final, char *exp) +static noinstr void mce_panic(const char *msg, struct mce *final, char *exp) { - int apei_err = 0; struct llist_node *pending; struct mce_evt_llist *l; + int apei_err = 0; + + /* + * Allow instrumentation around external facilities usage. Not that it + * matters a whole lot since the machine is going to panic anyway. + */ + instrumentation_begin(); if (!fake_panic) { /* @@ -314,7 +320,7 @@ static void mce_panic(const char *msg, struct mce *final, char *exp) } else { /* Don't log too much for fake panic */ if (atomic_inc_return(&mce_fake_panicked) > 1) - return; + goto out; } pending = mce_gen_pool_prepare_records(); /* First print corrected ones that are still unlogged */ @@ -352,6 +358,9 @@ static void mce_panic(const char *msg, struct mce *final, char *exp) panic(msg); } else pr_emerg(HW_ERR "Fake kernel panic: %s\n", msg); + +out: + instrumentation_end(); } /* Support code for software error injection */ @@ -684,7 +693,7 @@ static struct notifier_block mce_default_nb = { /* * Read ADDR and MISC registers. */ -static void mce_read_aux(struct mce *m, int i) +static noinstr void mce_read_aux(struct mce *m, int i) { if (m->status & MCI_STATUS_MISCV) m->misc = mce_rdmsrl(msr_ops.misc(i)); @@ -1063,10 +1072,13 @@ static int mce_start(int *no_way_out) * Synchronize between CPUs after main scanning loop. * This invokes the bulk of the Monarch processing. */ -static int mce_end(int order) +static noinstr int mce_end(int order) { - int ret = -1; u64 timeout = (u64)mca_cfg.monarch_timeout * NSEC_PER_USEC; + int ret = -1; + + /* Allow instrumentation around external facilities. */ + instrumentation_begin(); if (!timeout) goto reset; @@ -1110,7 +1122,8 @@ static int mce_end(int order) /* * Don't reset anything. That's done by the Monarch. */ - return 0; + ret = 0; + goto out; } /* @@ -1125,6 +1138,10 @@ static int mce_end(int order) * Let others run again. */ atomic_set(&mce_executing, 0); + +out: + instrumentation_end(); + return ret; } @@ -1447,6 +1464,14 @@ noinstr void do_machine_check(struct pt_regs *regs) if (worst != MCE_AR_SEVERITY && !kill_it) goto out; + /* + * Enable instrumentation around the external facilities like + * task_work_add() (via queue_task_work()), fixup_exception() etc. + * For now, that is. Fixing this properly would need a lot more involved + * reorganization. + */ + instrumentation_begin(); + /* Fault was in user mode and we need to take some action */ if ((m.cs & 3) == 3) { /* If this triggers there is no way to recover. Die hard. */ @@ -1472,6 +1497,9 @@ noinstr void do_machine_check(struct pt_regs *regs) if (m.kflags & MCE_IN_KERNEL_COPYIN) queue_task_work(&m, msg, kill_it); } + + instrumentation_end(); + out: mce_wrmsrl(MSR_IA32_MCG_STATUS, 0); } @@ -2203,7 +2231,6 @@ __setup("mce", mcheck_enable); int __init mcheck_init(void) { - mcheck_intel_therm_init(); mce_register_decode_chain(&early_nb); mce_register_decode_chain(&mce_uc_nb); mce_register_decode_chain(&mce_default_nb); diff --git a/arch/x86/kernel/cpu/mce/inject.c b/arch/x86/kernel/cpu/mce/inject.c index 3a44346f2276601a06d9aa5a5aae4b78c48b2418..e7808309d471073d9e3b29dbec5291a01376e451 100644 --- a/arch/x86/kernel/cpu/mce/inject.c +++ b/arch/x86/kernel/cpu/mce/inject.c @@ -347,7 +347,7 @@ static ssize_t flags_write(struct file *filp, const char __user *ubuf, char buf[MAX_FLAG_OPT_SIZE], *__buf; int err; - if (cnt > MAX_FLAG_OPT_SIZE) + if (!cnt || cnt > MAX_FLAG_OPT_SIZE) return -EINVAL; if (copy_from_user(&buf, ubuf, cnt)) diff --git a/arch/x86/kernel/cpu/mce/intel.c b/arch/x86/kernel/cpu/mce/intel.c index eb8282d600b9aab92c7b14470788b6aaa6174e71..1adf67e0fcba8af1875e7e27b1685ee86776498d 100644 --- a/arch/x86/kernel/cpu/mce/intel.c +++ b/arch/x86/kernel/cpu/mce/intel.c @@ -487,6 +487,8 @@ static void intel_ppin_init(struct cpuinfo_x86 *c) case INTEL_FAM6_BROADWELL_X: case INTEL_FAM6_SKYLAKE_X: case INTEL_FAM6_ICELAKE_X: + case INTEL_FAM6_ICELAKE_D: + case INTEL_FAM6_SAPPHIRERAPIDS_X: case INTEL_FAM6_XEON_PHI_KNL: case INTEL_FAM6_XEON_PHI_KNM: @@ -512,7 +514,6 @@ static void intel_ppin_init(struct cpuinfo_x86 *c) void mce_intel_feature_init(struct cpuinfo_x86 *c) { - intel_init_thermal(c); intel_init_cmci(); intel_init_lmce(); intel_ppin_init(c); diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c index 866c9a9bcdee7b1eb4c525aea2fb74ab5ec82882..839b54a08e09e0c4bbb86d00a1159885d10b379c 100644 --- a/arch/x86/kernel/cpu/scattered.c +++ b/arch/x86/kernel/cpu/scattered.c @@ -36,6 +36,8 @@ static const struct cpuid_bit cpuid_bits[] = { { X86_FEATURE_CDP_L2, CPUID_ECX, 2, 0x00000010, 2 }, { X86_FEATURE_MBA, CPUID_EBX, 3, 0x00000010, 0 }, { X86_FEATURE_PER_THREAD_MBA, CPUID_ECX, 0, 0x00000010, 3 }, + { X86_FEATURE_SGX1, CPUID_EAX, 0, 0x00000012, 0 }, + { X86_FEATURE_SGX2, CPUID_EAX, 1, 0x00000012, 0 }, { X86_FEATURE_HW_PSTATE, CPUID_EDX, 7, 0x80000007, 0 }, { X86_FEATURE_CPB, CPUID_EDX, 9, 0x80000007, 0 }, { X86_FEATURE_PROC_FEEDBACK, CPUID_EDX, 11, 0x80000007, 0 }, diff --git a/arch/x86/kernel/cpu/sgx/Makefile b/arch/x86/kernel/cpu/sgx/Makefile index 91d3dc784a2925ec39d8a2251822df36d41ec017..9c1656779b2a058ff6f15d30bbac00ca557fd148 100644 --- a/arch/x86/kernel/cpu/sgx/Makefile +++ b/arch/x86/kernel/cpu/sgx/Makefile @@ -3,3 +3,4 @@ obj-y += \ encl.o \ ioctl.o \ main.o +obj-$(CONFIG_X86_SGX_KVM) += virt.o diff --git a/arch/x86/kernel/cpu/sgx/driver.c b/arch/x86/kernel/cpu/sgx/driver.c index 8ce6d8371cfbf74eba9a4eba624220350d43f1c9..aa9b8b8688676fc66ebc6fdd605fe4467991af13 100644 --- a/arch/x86/kernel/cpu/sgx/driver.c +++ b/arch/x86/kernel/cpu/sgx/driver.c @@ -136,10 +136,6 @@ static const struct file_operations sgx_encl_fops = { .get_unmapped_area = sgx_get_unmapped_area, }; -const struct file_operations sgx_provision_fops = { - .owner = THIS_MODULE, -}; - static struct miscdevice sgx_dev_enclave = { .minor = MISC_DYNAMIC_MINOR, .name = "sgx_enclave", @@ -147,13 +143,6 @@ static struct miscdevice sgx_dev_enclave = { .fops = &sgx_encl_fops, }; -static struct miscdevice sgx_dev_provision = { - .minor = MISC_DYNAMIC_MINOR, - .name = "sgx_provision", - .nodename = "sgx_provision", - .fops = &sgx_provision_fops, -}; - int __init sgx_drv_init(void) { unsigned int eax, ebx, ecx, edx; @@ -187,11 +176,5 @@ int __init sgx_drv_init(void) if (ret) return ret; - ret = misc_register(&sgx_dev_provision); - if (ret) { - misc_deregister(&sgx_dev_enclave); - return ret; - } - return 0; } diff --git a/arch/x86/kernel/cpu/sgx/encl.c b/arch/x86/kernel/cpu/sgx/encl.c index 97fb7efce224332c81fb713933b8ddc3a5a067c9..9a1a93ed2562d1e94e5c4cbf5cec30c493cc0871 100644 --- a/arch/x86/kernel/cpu/sgx/encl.c +++ b/arch/x86/kernel/cpu/sgx/encl.c @@ -12,6 +12,116 @@ #include "encls.h" #include "sgx.h" +#define PCMDS_PER_PAGE (PAGE_SIZE / sizeof(struct sgx_pcmd)) +/* + * 32 PCMD entries share a PCMD page. PCMD_FIRST_MASK is used to + * determine the page index associated with the first PCMD entry + * within a PCMD page. + */ +#define PCMD_FIRST_MASK GENMASK(4, 0) + +/** + * reclaimer_writing_to_pcmd() - Query if any enclave page associated with + * a PCMD page is in process of being reclaimed. + * @encl: Enclave to which PCMD page belongs + * @start_addr: Address of enclave page using first entry within the PCMD page + * + * When an enclave page is reclaimed some Paging Crypto MetaData (PCMD) is + * stored. The PCMD data of a reclaimed enclave page contains enough + * information for the processor to verify the page at the time + * it is loaded back into the Enclave Page Cache (EPC). + * + * The backing storage to which enclave pages are reclaimed is laid out as + * follows: + * Encrypted enclave pages:SECS page:PCMD pages + * + * Each PCMD page contains the PCMD metadata of + * PAGE_SIZE/sizeof(struct sgx_pcmd) enclave pages. + * + * A PCMD page can only be truncated if it is (a) empty, and (b) not in the + * process of getting data (and thus soon being non-empty). (b) is tested with + * a check if an enclave page sharing the PCMD page is in the process of being + * reclaimed. + * + * The reclaimer sets the SGX_ENCL_PAGE_BEING_RECLAIMED flag when it + * intends to reclaim that enclave page - it means that the PCMD page + * associated with that enclave page is about to get some data and thus + * even if the PCMD page is empty, it should not be truncated. + * + * Context: Enclave mutex (&sgx_encl->lock) must be held. + * Return: 1 if the reclaimer is about to write to the PCMD page + * 0 if the reclaimer has no intention to write to the PCMD page + */ +static int reclaimer_writing_to_pcmd(struct sgx_encl *encl, + unsigned long start_addr) +{ + int reclaimed = 0; + int i; + + /* + * PCMD_FIRST_MASK is based on number of PCMD entries within + * PCMD page being 32. + */ + BUILD_BUG_ON(PCMDS_PER_PAGE != 32); + + for (i = 0; i < PCMDS_PER_PAGE; i++) { + struct sgx_encl_page *entry; + unsigned long addr; + + addr = start_addr + i * PAGE_SIZE; + + /* + * Stop when reaching the SECS page - it does not + * have a page_array entry and its reclaim is + * started and completed with enclave mutex held so + * it does not use the SGX_ENCL_PAGE_BEING_RECLAIMED + * flag. + */ + if (addr == encl->base + encl->size) + break; + + entry = xa_load(&encl->page_array, PFN_DOWN(addr)); + if (!entry) + continue; + + /* + * VA page slot ID uses same bit as the flag so it is important + * to ensure that the page is not already in backing store. + */ + if (entry->epc_page && + (entry->desc & SGX_ENCL_PAGE_BEING_RECLAIMED)) { + reclaimed = 1; + break; + } + } + + return reclaimed; +} + +/* + * Calculate byte offset of a PCMD struct associated with an enclave page. PCMD's + * follow right after the EPC data in the backing storage. In addition to the + * visible enclave pages, there's one extra page slot for SECS, before PCMD + * structs. + */ +static inline pgoff_t sgx_encl_get_backing_page_pcmd_offset(struct sgx_encl *encl, + unsigned long page_index) +{ + pgoff_t epc_end_off = encl->size + sizeof(struct sgx_secs); + + return epc_end_off + page_index * sizeof(struct sgx_pcmd); +} + +/* + * Free a page from the backing storage in the given page index. + */ +static inline void sgx_encl_truncate_backing_page(struct sgx_encl *encl, unsigned long page_index) +{ + struct inode *inode = file_inode(encl->backing); + + shmem_truncate_range(inode, PFN_PHYS(page_index), PFN_PHYS(page_index) + PAGE_SIZE - 1); +} + /* * ELDU: Load an EPC page as unblocked. For more info, see "OS Management of EPC * Pages" in the SDM. @@ -22,9 +132,12 @@ static int __sgx_encl_eldu(struct sgx_encl_page *encl_page, { unsigned long va_offset = encl_page->desc & SGX_ENCL_PAGE_VA_OFFSET_MASK; struct sgx_encl *encl = encl_page->encl; + pgoff_t page_index, page_pcmd_off; + unsigned long pcmd_first_page; struct sgx_pageinfo pginfo; struct sgx_backing b; - pgoff_t page_index; + bool pcmd_page_empty; + u8 *pcmd_page; int ret; if (secs_page) @@ -32,14 +145,21 @@ static int __sgx_encl_eldu(struct sgx_encl_page *encl_page, else page_index = PFN_DOWN(encl->size); + /* + * Address of enclave page using the first entry within the PCMD page. + */ + pcmd_first_page = PFN_PHYS(page_index & ~PCMD_FIRST_MASK) + encl->base; + + page_pcmd_off = sgx_encl_get_backing_page_pcmd_offset(encl, page_index); + ret = sgx_encl_get_backing(encl, page_index, &b); if (ret) return ret; pginfo.addr = encl_page->desc & PAGE_MASK; pginfo.contents = (unsigned long)kmap_atomic(b.contents); - pginfo.metadata = (unsigned long)kmap_atomic(b.pcmd) + - b.pcmd_offset; + pcmd_page = kmap_atomic(b.pcmd); + pginfo.metadata = (unsigned long)pcmd_page + b.pcmd_offset; if (secs_page) pginfo.secs = (u64)sgx_get_epc_virt_addr(secs_page); @@ -55,11 +175,25 @@ static int __sgx_encl_eldu(struct sgx_encl_page *encl_page, ret = -EFAULT; } - kunmap_atomic((void *)(unsigned long)(pginfo.metadata - b.pcmd_offset)); + memset(pcmd_page + b.pcmd_offset, 0, sizeof(struct sgx_pcmd)); + set_page_dirty(b.pcmd); + + /* + * The area for the PCMD in the page was zeroed above. Check if the + * whole page is now empty meaning that all PCMD's have been zeroed: + */ + pcmd_page_empty = !memchr_inv(pcmd_page, 0, PAGE_SIZE); + + kunmap_atomic(pcmd_page); kunmap_atomic((void *)(unsigned long)pginfo.contents); sgx_encl_put_backing(&b, false); + sgx_encl_truncate_backing_page(encl, page_index); + + if (pcmd_page_empty && !reclaimer_writing_to_pcmd(encl, pcmd_first_page)) + sgx_encl_truncate_backing_page(encl, PFN_DOWN(page_pcmd_off)); + return ret; } @@ -78,7 +212,7 @@ static struct sgx_epc_page *sgx_encl_eldu(struct sgx_encl_page *encl_page, ret = __sgx_encl_eldu(encl_page, epc_page, secs_page); if (ret) { - sgx_free_epc_page(epc_page); + sgx_encl_free_epc_page(epc_page); return ERR_PTR(ret); } @@ -404,18 +538,20 @@ void sgx_encl_release(struct kref *ref) if (sgx_unmark_page_reclaimable(entry->epc_page)) continue; - sgx_free_epc_page(entry->epc_page); + sgx_encl_free_epc_page(entry->epc_page); encl->secs_child_cnt--; entry->epc_page = NULL; } kfree(entry); + /* Invoke scheduler to prevent soft lockups. */ + cond_resched(); } xa_destroy(&encl->page_array); if (!encl->secs_child_cnt && encl->secs.epc_page) { - sgx_free_epc_page(encl->secs.epc_page); + sgx_encl_free_epc_page(encl->secs.epc_page); encl->secs.epc_page = NULL; } @@ -423,7 +559,7 @@ void sgx_encl_release(struct kref *ref) va_page = list_first_entry(&encl->va_pages, struct sgx_va_page, list); list_del(&va_page->list); - sgx_free_epc_page(va_page->epc_page); + sgx_encl_free_epc_page(va_page->epc_page); kfree(va_page); } @@ -577,7 +713,7 @@ static struct page *sgx_encl_get_backing_page(struct sgx_encl *encl, int sgx_encl_get_backing(struct sgx_encl *encl, unsigned long page_index, struct sgx_backing *backing) { - pgoff_t pcmd_index = PFN_DOWN(encl->size) + 1 + (page_index >> 5); + pgoff_t page_pcmd_off = sgx_encl_get_backing_page_pcmd_offset(encl, page_index); struct page *contents; struct page *pcmd; @@ -585,7 +721,7 @@ int sgx_encl_get_backing(struct sgx_encl *encl, unsigned long page_index, if (IS_ERR(contents)) return PTR_ERR(contents); - pcmd = sgx_encl_get_backing_page(encl, pcmd_index); + pcmd = sgx_encl_get_backing_page(encl, PFN_DOWN(page_pcmd_off)); if (IS_ERR(pcmd)) { put_page(contents); return PTR_ERR(pcmd); @@ -594,9 +730,7 @@ int sgx_encl_get_backing(struct sgx_encl *encl, unsigned long page_index, backing->page_index = page_index; backing->contents = contents; backing->pcmd = pcmd; - backing->pcmd_offset = - (page_index & (PAGE_SIZE / sizeof(struct sgx_pcmd) - 1)) * - sizeof(struct sgx_pcmd); + backing->pcmd_offset = page_pcmd_off & (PAGE_SIZE - 1); return 0; } @@ -686,7 +820,7 @@ struct sgx_epc_page *sgx_alloc_va_page(void) ret = __epa(sgx_get_epc_virt_addr(epc_page)); if (ret) { WARN_ONCE(1, "EPA returned %d (0x%x)", ret, ret); - sgx_free_epc_page(epc_page); + sgx_encl_free_epc_page(epc_page); return ERR_PTR(-EFAULT); } @@ -735,3 +869,24 @@ bool sgx_va_page_full(struct sgx_va_page *va_page) return slot == SGX_VA_SLOT_COUNT; } + +/** + * sgx_encl_free_epc_page - free an EPC page assigned to an enclave + * @page: EPC page to be freed + * + * Free an EPC page assigned to an enclave. It does EREMOVE for the page, and + * only upon success, it puts the page back to free page list. Otherwise, it + * gives a WARNING to indicate page is leaked. + */ +void sgx_encl_free_epc_page(struct sgx_epc_page *page) +{ + int ret; + + WARN_ON_ONCE(page->flags & SGX_EPC_PAGE_RECLAIMER_TRACKED); + + ret = __eremove(sgx_get_epc_virt_addr(page)); + if (WARN_ONCE(ret, EREMOVE_ERROR_MESSAGE, ret, ret)) + return; + + sgx_free_epc_page(page); +} diff --git a/arch/x86/kernel/cpu/sgx/encl.h b/arch/x86/kernel/cpu/sgx/encl.h index d8d30ccbef4c95017bec5926f1fab6d04fb12185..fec43ca65065b0caecf5e05dd261c62cf045494e 100644 --- a/arch/x86/kernel/cpu/sgx/encl.h +++ b/arch/x86/kernel/cpu/sgx/encl.h @@ -91,8 +91,8 @@ static inline int sgx_encl_find(struct mm_struct *mm, unsigned long addr, { struct vm_area_struct *result; - result = find_vma(mm, addr); - if (!result || result->vm_ops != &sgx_vm_ops || addr < result->vm_start) + result = vma_lookup(mm, addr); + if (!result || result->vm_ops != &sgx_vm_ops) return -EINVAL; *vma = result; @@ -115,5 +115,6 @@ struct sgx_epc_page *sgx_alloc_va_page(void); unsigned int sgx_alloc_va_slot(struct sgx_va_page *va_page); void sgx_free_va_slot(struct sgx_va_page *va_page, unsigned int offset); bool sgx_va_page_full(struct sgx_va_page *va_page); +void sgx_encl_free_epc_page(struct sgx_epc_page *page); #endif /* _X86_ENCL_H */ diff --git a/arch/x86/kernel/cpu/sgx/encls.h b/arch/x86/kernel/cpu/sgx/encls.h index 443188fe7e7057bb0dba10bda49acfdbf42757e4..9b204843b78d3ec7ff3903548358d39c4665dee8 100644 --- a/arch/x86/kernel/cpu/sgx/encls.h +++ b/arch/x86/kernel/cpu/sgx/encls.h @@ -11,21 +11,6 @@ #include #include "sgx.h" -enum sgx_encls_function { - ECREATE = 0x00, - EADD = 0x01, - EINIT = 0x02, - EREMOVE = 0x03, - EDGBRD = 0x04, - EDGBWR = 0x05, - EEXTEND = 0x06, - ELDU = 0x08, - EBLOCK = 0x09, - EPA = 0x0A, - EWB = 0x0B, - ETRACK = 0x0C, -}; - /** * ENCLS_FAULT_FLAG - flag signifying an ENCLS return code is a trapnr * @@ -55,6 +40,19 @@ enum sgx_encls_function { } while (0); \ } +/* + * encls_faulted() - Check if an ENCLS leaf faulted given an error code + * @ret: the return value of an ENCLS leaf function call + * + * Return: + * - true: ENCLS leaf faulted. + * - false: Otherwise. + */ +static inline bool encls_faulted(int ret) +{ + return ret & ENCLS_FAULT_FLAG; +} + /** * encls_failed() - Check if an ENCLS function failed * @ret: the return value of an ENCLS function call @@ -65,7 +63,7 @@ enum sgx_encls_function { */ static inline bool encls_failed(int ret) { - if (ret & ENCLS_FAULT_FLAG) + if (encls_faulted(ret)) return ENCLS_TRAPNR(ret) != X86_TRAP_PF; return !!ret; diff --git a/arch/x86/kernel/cpu/sgx/ioctl.c b/arch/x86/kernel/cpu/sgx/ioctl.c index 2e10367ea66cf0ea2d615e6210f3373962925757..83df20e3e633353ca0b707cda6cae51cb6f09714 100644 --- a/arch/x86/kernel/cpu/sgx/ioctl.c +++ b/arch/x86/kernel/cpu/sgx/ioctl.c @@ -2,6 +2,7 @@ /* Copyright(c) 2016-20 Intel Corporation. */ #include +#include #include #include #include @@ -47,7 +48,7 @@ static void sgx_encl_shrink(struct sgx_encl *encl, struct sgx_va_page *va_page) encl->page_cnt--; if (va_page) { - sgx_free_epc_page(va_page->epc_page); + sgx_encl_free_epc_page(va_page->epc_page); list_del(&va_page->list); kfree(va_page); } @@ -117,7 +118,7 @@ static int sgx_encl_create(struct sgx_encl *encl, struct sgx_secs *secs) return 0; err_out: - sgx_free_epc_page(encl->secs.epc_page); + sgx_encl_free_epc_page(encl->secs.epc_page); encl->secs.epc_page = NULL; err_out_backing: @@ -365,7 +366,7 @@ static int sgx_encl_add_page(struct sgx_encl *encl, unsigned long src, mmap_read_unlock(current->mm); err_out_free: - sgx_free_epc_page(epc_page); + sgx_encl_free_epc_page(epc_page); kfree(encl_page); return ret; @@ -495,7 +496,7 @@ static int sgx_encl_init(struct sgx_encl *encl, struct sgx_sigstruct *sigstruct, void *token) { u64 mrsigner[4]; - int i, j, k; + int i, j; void *addr; int ret; @@ -544,8 +545,7 @@ static int sgx_encl_init(struct sgx_encl *encl, struct sgx_sigstruct *sigstruct, preempt_disable(); - for (k = 0; k < 4; k++) - wrmsrl(MSR_IA32_SGXLEPUBKEYHASH0 + k, mrsigner[k]); + sgx_update_lepubkeyhash(mrsigner); ret = __einit(sigstruct, token, addr); @@ -568,7 +568,7 @@ static int sgx_encl_init(struct sgx_encl *encl, struct sgx_sigstruct *sigstruct, } } - if (ret & ENCLS_FAULT_FLAG) { + if (encls_faulted(ret)) { if (encls_failed(ret)) ENCLS_WARN(ret, "EINIT"); @@ -667,24 +667,11 @@ static long sgx_ioc_enclave_init(struct sgx_encl *encl, void __user *arg) static long sgx_ioc_enclave_provision(struct sgx_encl *encl, void __user *arg) { struct sgx_enclave_provision params; - struct file *file; if (copy_from_user(¶ms, arg, sizeof(params))) return -EFAULT; - file = fget(params.fd); - if (!file) - return -EINVAL; - - if (file->f_op != &sgx_provision_fops) { - fput(file); - return -EINVAL; - } - - encl->attributes_mask |= SGX_ATTR_PROVISIONKEY; - - fput(file); - return 0; + return sgx_set_attribute(&encl->attributes_mask, params.fd); } long sgx_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) diff --git a/arch/x86/kernel/cpu/sgx/main.c b/arch/x86/kernel/cpu/sgx/main.c index 13a7599ce7d401cf40c5f8b65dcb748a0240b067..4036f50fc42ceed02848ab46909b4b45f68be6aa 100644 --- a/arch/x86/kernel/cpu/sgx/main.c +++ b/arch/x86/kernel/cpu/sgx/main.c @@ -1,14 +1,17 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2016-20 Intel Corporation. */ +#include #include #include #include +#include #include #include #include #include #include +#include #include "driver.h" #include "encl.h" #include "encls.h" @@ -17,6 +20,7 @@ struct sgx_epc_section sgx_epc_sections[SGX_MAX_EPC_SECTIONS]; static int sgx_nr_epc_sections; static struct task_struct *ksgxd_tsk; static DECLARE_WAIT_QUEUE_HEAD(ksgxd_waitq); +static DEFINE_XARRAY(sgx_epc_address_space); /* * These variables are part of the state of the reclaimer, and must be accessed @@ -25,8 +29,7 @@ static DECLARE_WAIT_QUEUE_HEAD(ksgxd_waitq); static LIST_HEAD(sgx_active_page_list); static DEFINE_SPINLOCK(sgx_reclaimer_lock); -/* The free page list lock protected variables prepend the lock. */ -static unsigned long sgx_nr_free_pages; +static atomic_long_t sgx_nr_free_pages = ATOMIC_LONG_INIT(0); /* Nodes with one or more EPC sections. */ static nodemask_t sgx_numa_mask; @@ -58,6 +61,24 @@ static void __sgx_sanitize_pages(struct list_head *dirty_page_list) page = list_first_entry(dirty_page_list, struct sgx_epc_page, list); + /* + * Checking page->poison without holding the node->lock + * is racy, but losing the race (i.e. poison is set just + * after the check) just means __eremove() will be uselessly + * called for a page that sgx_free_epc_page() will put onto + * the node->sgx_poison_page_list later. + */ + if (page->poison) { + struct sgx_epc_section *section = &sgx_epc_sections[page->section]; + struct sgx_numa_node *node = section->node; + + spin_lock(&node->lock); + list_move(&page->list, &node->sgx_poison_page_list); + spin_unlock(&node->lock); + + continue; + } + ret = __eremove(sgx_get_epc_virt_addr(page)); if (!ret) { /* @@ -294,7 +315,7 @@ static void sgx_reclaimer_write(struct sgx_epc_page *epc_page, sgx_encl_ewb(encl->secs.epc_page, &secs_backing); - sgx_free_epc_page(encl->secs.epc_page); + sgx_encl_free_epc_page(encl->secs.epc_page); encl->secs.epc_page = NULL; sgx_encl_put_backing(&secs_backing, true); @@ -400,14 +421,15 @@ static void sgx_reclaim_pages(void) spin_lock(&node->lock); list_add_tail(&epc_page->list, &node->free_page_list); - sgx_nr_free_pages++; spin_unlock(&node->lock); + atomic_long_inc(&sgx_nr_free_pages); } } static bool sgx_should_reclaim(unsigned long watermark) { - return sgx_nr_free_pages < watermark && !list_empty(&sgx_active_page_list); + return atomic_long_read(&sgx_nr_free_pages) < watermark && + !list_empty(&sgx_active_page_list); } static int ksgxd(void *p) @@ -468,9 +490,10 @@ static struct sgx_epc_page *__sgx_alloc_epc_page_from_node(int nid) page = list_first_entry(&node->free_page_list, struct sgx_epc_page, list); list_del_init(&page->list); - sgx_nr_free_pages--; + page->flags = 0; spin_unlock(&node->lock); + atomic_long_dec(&sgx_nr_free_pages); return page; } @@ -609,26 +632,27 @@ struct sgx_epc_page *sgx_alloc_epc_page(void *owner, bool reclaim) * sgx_free_epc_page() - Free an EPC page * @page: an EPC page * - * Call EREMOVE for an EPC page and insert it back to the list of free pages. + * Put the EPC page back to the list of free pages. It's the caller's + * responsibility to make sure that the page is in uninitialized state. In other + * words, do EREMOVE, EWB or whatever operation is necessary before calling + * this function. */ void sgx_free_epc_page(struct sgx_epc_page *page) { struct sgx_epc_section *section = &sgx_epc_sections[page->section]; struct sgx_numa_node *node = section->node; - int ret; - - WARN_ON_ONCE(page->flags & SGX_EPC_PAGE_RECLAIMER_TRACKED); - - ret = __eremove(sgx_get_epc_virt_addr(page)); - if (WARN_ONCE(ret, "EREMOVE returned %d (0x%x)", ret, ret)) - return; spin_lock(&node->lock); - list_add_tail(&page->list, &node->free_page_list); - sgx_nr_free_pages++; + page->owner = NULL; + if (page->poison) + list_add(&page->list, &node->sgx_poison_page_list); + else + list_add_tail(&page->list, &node->free_page_list); + page->flags = SGX_EPC_PAGE_IS_FREE; spin_unlock(&node->lock); + atomic_long_inc(&sgx_nr_free_pages); } static bool __init sgx_setup_epc_section(u64 phys_addr, u64 size, @@ -649,18 +673,102 @@ static bool __init sgx_setup_epc_section(u64 phys_addr, u64 size, } section->phys_addr = phys_addr; + xa_store_range(&sgx_epc_address_space, section->phys_addr, + phys_addr + size - 1, section, GFP_KERNEL); for (i = 0; i < nr_pages; i++) { section->pages[i].section = index; section->pages[i].flags = 0; section->pages[i].owner = NULL; + section->pages[i].poison = 0; list_add_tail(§ion->pages[i].list, &sgx_dirty_page_list); } - sgx_nr_free_pages += nr_pages; return true; } +bool arch_is_platform_page(u64 paddr) +{ + return !!xa_load(&sgx_epc_address_space, paddr); +} +EXPORT_SYMBOL_GPL(arch_is_platform_page); + +static struct sgx_epc_page *sgx_paddr_to_page(u64 paddr) +{ + struct sgx_epc_section *section; + + section = xa_load(&sgx_epc_address_space, paddr); + if (!section) + return NULL; + + return §ion->pages[PFN_DOWN(paddr - section->phys_addr)]; +} + +/* + * Called in process context to handle a hardware reported + * error in an SGX EPC page. + * If the MF_ACTION_REQUIRED bit is set in flags, then the + * context is the task that consumed the poison data. Otherwise + * this is called from a kernel thread unrelated to the page. + */ +int arch_memory_failure(unsigned long pfn, int flags) +{ + struct sgx_epc_page *page = sgx_paddr_to_page(pfn << PAGE_SHIFT); + struct sgx_epc_section *section; + struct sgx_numa_node *node; + + /* + * mm/memory-failure.c calls this routine for all errors + * where there isn't a "struct page" for the address. But that + * includes other address ranges besides SGX. + */ + if (!page) + return -ENXIO; + + /* + * If poison was consumed synchronously. Send a SIGBUS to + * the task. Hardware has already exited the SGX enclave and + * will not allow re-entry to an enclave that has a memory + * error. The signal may help the task understand why the + * enclave is broken. + */ + if (flags & MF_ACTION_REQUIRED) + force_sig(SIGBUS); + + section = &sgx_epc_sections[page->section]; + node = section->node; + + spin_lock(&node->lock); + + /* Already poisoned? Nothing more to do */ + if (page->poison) + goto out; + + page->poison = 1; + + /* + * If the page is on a free list, move it to the per-node + * poison page list. + */ + if (page->flags & SGX_EPC_PAGE_IS_FREE) { + list_move(&page->list, &node->sgx_poison_page_list); + goto out; + } + + /* + * TBD: Add additional plumbing to enable pre-emptive + * action for asynchronous poison notification. Until + * then just hope that the poison: + * a) is not accessed - sgx_free_epc_page() will deal with it + * when the user gives it back + * b) results in a recoverable machine check rather than + * a fatal one + */ +out: + spin_unlock(&node->lock); + return 0; +} + /** * A section metric is concatenated in a way that @low bits 12-31 define the * bits 12-31 of the metric and @high bits 0-19 define the bits 32-51 of the @@ -715,6 +823,7 @@ static bool __init sgx_page_cache_init(void) if (!node_isset(nid, sgx_numa_mask)) { spin_lock_init(&sgx_numa_nodes[nid].lock); INIT_LIST_HEAD(&sgx_numa_nodes[nid].free_page_list); + INIT_LIST_HEAD(&sgx_numa_nodes[nid].sgx_poison_page_list); node_set(nid, sgx_numa_mask); } @@ -731,6 +840,67 @@ static bool __init sgx_page_cache_init(void) return true; } +/* + * Update the SGX_LEPUBKEYHASH MSRs to the values specified by caller. + * Bare-metal driver requires to update them to hash of enclave's signer + * before EINIT. KVM needs to update them to guest's virtual MSR values + * before doing EINIT from guest. + */ +void sgx_update_lepubkeyhash(u64 *lepubkeyhash) +{ + int i; + + WARN_ON_ONCE(preemptible()); + + for (i = 0; i < 4; i++) + wrmsrl(MSR_IA32_SGXLEPUBKEYHASH0 + i, lepubkeyhash[i]); +} + +const struct file_operations sgx_provision_fops = { + .owner = THIS_MODULE, +}; + +static struct miscdevice sgx_dev_provision = { + .minor = MISC_DYNAMIC_MINOR, + .name = "sgx_provision", + .nodename = "sgx_provision", + .fops = &sgx_provision_fops, +}; + +/** + * sgx_set_attribute() - Update allowed attributes given file descriptor + * @allowed_attributes: Pointer to allowed enclave attributes + * @attribute_fd: File descriptor for specific attribute + * + * Append enclave attribute indicated by file descriptor to allowed + * attributes. Currently only SGX_ATTR_PROVISIONKEY indicated by + * /dev/sgx_provision is supported. + * + * Return: + * -0: SGX_ATTR_PROVISIONKEY is appended to allowed_attributes + * -EINVAL: Invalid, or not supported file descriptor + */ +int sgx_set_attribute(unsigned long *allowed_attributes, + unsigned int attribute_fd) +{ + struct file *file; + + file = fget(attribute_fd); + if (!file) + return -EINVAL; + + if (file->f_op != &sgx_provision_fops) { + fput(file); + return -EINVAL; + } + + *allowed_attributes |= SGX_ATTR_PROVISIONKEY; + + fput(file); + return 0; +} +EXPORT_SYMBOL_GPL(sgx_set_attribute); + static int __init sgx_init(void) { int ret; @@ -747,12 +917,28 @@ static int __init sgx_init(void) goto err_page_cache; } - ret = sgx_drv_init(); + ret = misc_register(&sgx_dev_provision); if (ret) goto err_kthread; + /* + * Always try to initialize the native *and* KVM drivers. + * The KVM driver is less picky than the native one and + * can function if the native one is not supported on the + * current system or fails to initialize. + * + * Error out only if both fail to initialize. + */ + ret = sgx_drv_init(); + + if (sgx_vepc_init() && ret) + goto err_provision; + return 0; +err_provision: + misc_deregister(&sgx_dev_provision); + err_kthread: kthread_stop(ksgxd_tsk); diff --git a/arch/x86/kernel/cpu/sgx/sgx.h b/arch/x86/kernel/cpu/sgx/sgx.h index 2a2b5c857451debb4d24ec1d38178359416e6732..9ec3136c780091ca538fb9d152820216118f7894 100644 --- a/arch/x86/kernel/cpu/sgx/sgx.h +++ b/arch/x86/kernel/cpu/sgx/sgx.h @@ -13,6 +13,10 @@ #undef pr_fmt #define pr_fmt(fmt) "sgx: " fmt +#define EREMOVE_ERROR_MESSAGE \ + "EREMOVE returned %d (0x%x) and an EPC page was leaked. SGX may become unusable. " \ + "Refer to Documentation/x86/sgx.rst for more information." + #define SGX_MAX_EPC_SECTIONS 8 #define SGX_EEXTEND_BLOCK_SIZE 256 #define SGX_NR_TO_SCAN 16 @@ -22,9 +26,13 @@ /* Pages, which are being tracked by the page reclaimer. */ #define SGX_EPC_PAGE_RECLAIMER_TRACKED BIT(0) +/* Pages on free list */ +#define SGX_EPC_PAGE_IS_FREE BIT(1) + struct sgx_epc_page { unsigned int section; - unsigned int flags; + u16 flags; + u16 poison; struct sgx_encl_page *owner; struct list_head list; }; @@ -35,6 +43,7 @@ struct sgx_epc_page { */ struct sgx_numa_node { struct list_head free_page_list; + struct list_head sgx_poison_page_list; spinlock_t lock; }; @@ -80,4 +89,15 @@ void sgx_mark_page_reclaimable(struct sgx_epc_page *page); int sgx_unmark_page_reclaimable(struct sgx_epc_page *page); struct sgx_epc_page *sgx_alloc_epc_page(void *owner, bool reclaim); +#ifdef CONFIG_X86_SGX_KVM +int __init sgx_vepc_init(void); +#else +static inline int __init sgx_vepc_init(void) +{ + return -ENODEV; +} +#endif + +void sgx_update_lepubkeyhash(u64 *lepubkeyhash); + #endif /* _X86_SGX_H */ diff --git a/arch/x86/kernel/cpu/sgx/virt.c b/arch/x86/kernel/cpu/sgx/virt.c new file mode 100644 index 0000000000000000000000000000000000000000..b4f9a50de776e29cf058afb1575da4fceec1ad0b --- /dev/null +++ b/arch/x86/kernel/cpu/sgx/virt.c @@ -0,0 +1,432 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Device driver to expose SGX enclave memory to KVM guests. + * + * Copyright(c) 2021 Intel Corporation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "encls.h" +#include "sgx.h" + +struct sgx_vepc { + struct xarray page_array; + struct mutex lock; +}; + +/* + * Temporary SECS pages that cannot be EREMOVE'd due to having child in other + * virtual EPC instances, and the lock to protect it. + */ +static struct mutex zombie_secs_pages_lock; +static struct list_head zombie_secs_pages; + +static int __sgx_vepc_fault(struct sgx_vepc *vepc, + struct vm_area_struct *vma, unsigned long addr) +{ + struct sgx_epc_page *epc_page; + unsigned long index, pfn; + int ret; + + WARN_ON(!mutex_is_locked(&vepc->lock)); + + /* Calculate index of EPC page in virtual EPC's page_array */ + index = vma->vm_pgoff + PFN_DOWN(addr - vma->vm_start); + + epc_page = xa_load(&vepc->page_array, index); + if (epc_page) + return 0; + + epc_page = sgx_alloc_epc_page(vepc, false); + if (IS_ERR(epc_page)) + return PTR_ERR(epc_page); + + ret = xa_err(xa_store(&vepc->page_array, index, epc_page, GFP_KERNEL)); + if (ret) + goto err_free; + + pfn = PFN_DOWN(sgx_get_epc_phys_addr(epc_page)); + + ret = vmf_insert_pfn(vma, addr, pfn); + if (ret != VM_FAULT_NOPAGE) { + ret = -EFAULT; + goto err_delete; + } + + return 0; + +err_delete: + xa_erase(&vepc->page_array, index); +err_free: + sgx_free_epc_page(epc_page); + return ret; +} + +static vm_fault_t sgx_vepc_fault(struct vm_fault *vmf) +{ + struct vm_area_struct *vma = vmf->vma; + struct sgx_vepc *vepc = vma->vm_private_data; + int ret; + + mutex_lock(&vepc->lock); + ret = __sgx_vepc_fault(vepc, vma, vmf->address); + mutex_unlock(&vepc->lock); + + if (!ret) + return VM_FAULT_NOPAGE; + + if (ret == -EBUSY && (vmf->flags & FAULT_FLAG_ALLOW_RETRY)) { + mmap_read_unlock(vma->vm_mm); + return VM_FAULT_RETRY; + } + + return VM_FAULT_SIGBUS; +} + +const struct vm_operations_struct sgx_vepc_vm_ops = { + .fault = sgx_vepc_fault, +}; + +static int sgx_vepc_mmap(struct file *file, struct vm_area_struct *vma) +{ + struct sgx_vepc *vepc = file->private_data; + + if (!(vma->vm_flags & VM_SHARED)) + return -EINVAL; + + vma->vm_ops = &sgx_vepc_vm_ops; + /* Don't copy VMA in fork() */ + vma->vm_flags |= VM_PFNMAP | VM_IO | VM_DONTDUMP | VM_DONTCOPY; + vma->vm_private_data = vepc; + + return 0; +} + +static int sgx_vepc_remove_page(struct sgx_epc_page *epc_page) +{ + /* + * Take a previously guest-owned EPC page and return it to the + * general EPC page pool. + * + * Guests can not be trusted to have left this page in a good + * state, so run EREMOVE on the page unconditionally. In the + * case that a guest properly EREMOVE'd this page, a superfluous + * EREMOVE is harmless. + */ + return __eremove(sgx_get_epc_virt_addr(epc_page)); +} + +static int sgx_vepc_free_page(struct sgx_epc_page *epc_page) +{ + int ret = sgx_vepc_remove_page(epc_page); + if (ret) { + /* + * Only SGX_CHILD_PRESENT is expected, which is because of + * EREMOVE'ing an SECS still with child, in which case it can + * be handled by EREMOVE'ing the SECS again after all pages in + * virtual EPC have been EREMOVE'd. See comments in below in + * sgx_vepc_release(). + * + * The user of virtual EPC (KVM) needs to guarantee there's no + * logical processor is still running in the enclave in guest, + * otherwise EREMOVE will get SGX_ENCLAVE_ACT which cannot be + * handled here. + */ + WARN_ONCE(ret != SGX_CHILD_PRESENT, EREMOVE_ERROR_MESSAGE, + ret, ret); + return ret; + } + + sgx_free_epc_page(epc_page); + return 0; +} + +static long sgx_vepc_remove_all(struct sgx_vepc *vepc) +{ + struct sgx_epc_page *entry; + unsigned long index; + long failures = 0; + + xa_for_each(&vepc->page_array, index, entry) { + int ret = sgx_vepc_remove_page(entry); + if (ret) { + if (ret == SGX_CHILD_PRESENT) { + /* The page is a SECS, userspace will retry. */ + failures++; + } else { + /* + * Report errors due to #GP or SGX_ENCLAVE_ACT; do not + * WARN, as userspace can induce said failures by + * calling the ioctl concurrently on multiple vEPCs or + * while one or more CPUs is running the enclave. Only + * a #PF on EREMOVE indicates a kernel/hardware issue. + */ + WARN_ON_ONCE(encls_faulted(ret) && + ENCLS_TRAPNR(ret) != X86_TRAP_GP); + return -EBUSY; + } + } + cond_resched(); + } + + /* + * Return the number of SECS pages that failed to be removed, so + * userspace knows that it has to retry. + */ + return failures; +} + +static int sgx_vepc_release(struct inode *inode, struct file *file) +{ + struct sgx_vepc *vepc = file->private_data; + struct sgx_epc_page *epc_page, *tmp, *entry; + unsigned long index; + + LIST_HEAD(secs_pages); + + xa_for_each(&vepc->page_array, index, entry) { + /* + * Remove all normal, child pages. sgx_vepc_free_page() + * will fail if EREMOVE fails, but this is OK and expected on + * SECS pages. Those can only be EREMOVE'd *after* all their + * child pages. Retries below will clean them up. + */ + if (sgx_vepc_free_page(entry)) + continue; + + xa_erase(&vepc->page_array, index); + } + + /* + * Retry EREMOVE'ing pages. This will clean up any SECS pages that + * only had children in this 'epc' area. + */ + xa_for_each(&vepc->page_array, index, entry) { + epc_page = entry; + /* + * An EREMOVE failure here means that the SECS page still + * has children. But, since all children in this 'sgx_vepc' + * have been removed, the SECS page must have a child on + * another instance. + */ + if (sgx_vepc_free_page(epc_page)) + list_add_tail(&epc_page->list, &secs_pages); + + xa_erase(&vepc->page_array, index); + } + + /* + * SECS pages are "pinned" by child pages, and "unpinned" once all + * children have been EREMOVE'd. A child page in this instance + * may have pinned an SECS page encountered in an earlier release(), + * creating a zombie. Since some children were EREMOVE'd above, + * try to EREMOVE all zombies in the hopes that one was unpinned. + */ + mutex_lock(&zombie_secs_pages_lock); + list_for_each_entry_safe(epc_page, tmp, &zombie_secs_pages, list) { + /* + * Speculatively remove the page from the list of zombies, + * if the page is successfully EREMOVE'd it will be added to + * the list of free pages. If EREMOVE fails, throw the page + * on the local list, which will be spliced on at the end. + */ + list_del(&epc_page->list); + + if (sgx_vepc_free_page(epc_page)) + list_add_tail(&epc_page->list, &secs_pages); + } + + if (!list_empty(&secs_pages)) + list_splice_tail(&secs_pages, &zombie_secs_pages); + mutex_unlock(&zombie_secs_pages_lock); + + xa_destroy(&vepc->page_array); + kfree(vepc); + + return 0; +} + +static int sgx_vepc_open(struct inode *inode, struct file *file) +{ + struct sgx_vepc *vepc; + + vepc = kzalloc(sizeof(struct sgx_vepc), GFP_KERNEL); + if (!vepc) + return -ENOMEM; + mutex_init(&vepc->lock); + xa_init(&vepc->page_array); + + file->private_data = vepc; + + return 0; +} + +static long sgx_vepc_ioctl(struct file *file, + unsigned int cmd, unsigned long arg) +{ + struct sgx_vepc *vepc = file->private_data; + + switch (cmd) { + case SGX_IOC_VEPC_REMOVE_ALL: + if (arg) + return -EINVAL; + return sgx_vepc_remove_all(vepc); + + default: + return -ENOTTY; + } +} + +static const struct file_operations sgx_vepc_fops = { + .owner = THIS_MODULE, + .open = sgx_vepc_open, + .unlocked_ioctl = sgx_vepc_ioctl, + .compat_ioctl = sgx_vepc_ioctl, + .release = sgx_vepc_release, + .mmap = sgx_vepc_mmap, +}; + +static struct miscdevice sgx_vepc_dev = { + .minor = MISC_DYNAMIC_MINOR, + .name = "sgx_vepc", + .nodename = "sgx_vepc", + .fops = &sgx_vepc_fops, +}; + +int __init sgx_vepc_init(void) +{ + /* SGX virtualization requires KVM to work */ + if (!cpu_feature_enabled(X86_FEATURE_VMX)) + return -ENODEV; + + INIT_LIST_HEAD(&zombie_secs_pages); + mutex_init(&zombie_secs_pages_lock); + + return misc_register(&sgx_vepc_dev); +} + +/** + * sgx_virt_ecreate() - Run ECREATE on behalf of guest + * @pageinfo: Pointer to PAGEINFO structure + * @secs: Userspace pointer to SECS page + * @trapnr: trap number injected to guest in case of ECREATE error + * + * Run ECREATE on behalf of guest after KVM traps ECREATE for the purpose + * of enforcing policies of guest's enclaves, and return the trap number + * which should be injected to guest in case of any ECREATE error. + * + * Return: + * - 0: ECREATE was successful. + * - <0: on error. + */ +int sgx_virt_ecreate(struct sgx_pageinfo *pageinfo, void __user *secs, + int *trapnr) +{ + int ret; + + /* + * @secs is an untrusted, userspace-provided address. It comes from + * KVM and is assumed to be a valid pointer which points somewhere in + * userspace. This can fault and call SGX or other fault handlers when + * userspace mapping @secs doesn't exist. + * + * Add a WARN() to make sure @secs is already valid userspace pointer + * from caller (KVM), who should already have handled invalid pointer + * case (for instance, made by malicious guest). All other checks, + * such as alignment of @secs, are deferred to ENCLS itself. + */ + if (WARN_ON_ONCE(!access_ok(secs, PAGE_SIZE))) + return -EINVAL; + + __uaccess_begin(); + ret = __ecreate(pageinfo, (void *)secs); + __uaccess_end(); + + if (encls_faulted(ret)) { + *trapnr = ENCLS_TRAPNR(ret); + return -EFAULT; + } + + /* ECREATE doesn't return an error code, it faults or succeeds. */ + WARN_ON_ONCE(ret); + return 0; +} +EXPORT_SYMBOL_GPL(sgx_virt_ecreate); + +static int __sgx_virt_einit(void __user *sigstruct, void __user *token, + void __user *secs) +{ + int ret; + + /* + * Make sure all userspace pointers from caller (KVM) are valid. + * All other checks deferred to ENCLS itself. Also see comment + * for @secs in sgx_virt_ecreate(). + */ +#define SGX_EINITTOKEN_SIZE 304 + if (WARN_ON_ONCE(!access_ok(sigstruct, sizeof(struct sgx_sigstruct)) || + !access_ok(token, SGX_EINITTOKEN_SIZE) || + !access_ok(secs, PAGE_SIZE))) + return -EINVAL; + + __uaccess_begin(); + ret = __einit((void *)sigstruct, (void *)token, (void *)secs); + __uaccess_end(); + + return ret; +} + +/** + * sgx_virt_einit() - Run EINIT on behalf of guest + * @sigstruct: Userspace pointer to SIGSTRUCT structure + * @token: Userspace pointer to EINITTOKEN structure + * @secs: Userspace pointer to SECS page + * @lepubkeyhash: Pointer to guest's *virtual* SGX_LEPUBKEYHASH MSR values + * @trapnr: trap number injected to guest in case of EINIT error + * + * Run EINIT on behalf of guest after KVM traps EINIT. If SGX_LC is available + * in host, SGX driver may rewrite the hardware values at wish, therefore KVM + * needs to update hardware values to guest's virtual MSR values in order to + * ensure EINIT is executed with expected hardware values. + * + * Return: + * - 0: EINIT was successful. + * - <0: on error. + */ +int sgx_virt_einit(void __user *sigstruct, void __user *token, + void __user *secs, u64 *lepubkeyhash, int *trapnr) +{ + int ret; + + if (!cpu_feature_enabled(X86_FEATURE_SGX_LC)) { + ret = __sgx_virt_einit(sigstruct, token, secs); + } else { + preempt_disable(); + + sgx_update_lepubkeyhash(lepubkeyhash); + + ret = __sgx_virt_einit(sigstruct, token, secs); + preempt_enable(); + } + + /* Propagate up the error from the WARN_ON_ONCE in __sgx_virt_einit() */ + if (ret == -EINVAL) + return ret; + + if (encls_faulted(ret)) { + *trapnr = ENCLS_TRAPNR(ret); + return -EFAULT; + } + + return ret; +} +EXPORT_SYMBOL_GPL(sgx_virt_einit); diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c index 629c4994f1654cd059daa7116b511a3578e22b66..7f57110f958e1cc6e853a697b619117528d7e05b 100644 --- a/arch/x86/kernel/e820.c +++ b/arch/x86/kernel/e820.c @@ -995,8 +995,10 @@ early_param("memmap", parse_memmap_opt); */ void __init e820__reserve_setup_data(void) { + struct setup_indirect *indirect; struct setup_data *data; - u64 pa_data; + u64 pa_data, pa_next; + u32 len; pa_data = boot_params.hdr.setup_data; if (!pa_data) @@ -1004,6 +1006,14 @@ void __init e820__reserve_setup_data(void) while (pa_data) { data = early_memremap(pa_data, sizeof(*data)); + if (!data) { + pr_warn("e820: failed to memremap setup_data entry\n"); + return; + } + + len = sizeof(*data); + pa_next = data->next; + e820__range_update(pa_data, sizeof(*data)+data->len, E820_TYPE_RAM, E820_TYPE_RESERVED_KERN); /* @@ -1015,18 +1025,27 @@ void __init e820__reserve_setup_data(void) sizeof(*data) + data->len, E820_TYPE_RAM, E820_TYPE_RESERVED_KERN); - if (data->type == SETUP_INDIRECT && - ((struct setup_indirect *)data->data)->type != SETUP_INDIRECT) { - e820__range_update(((struct setup_indirect *)data->data)->addr, - ((struct setup_indirect *)data->data)->len, - E820_TYPE_RAM, E820_TYPE_RESERVED_KERN); - e820__range_update_kexec(((struct setup_indirect *)data->data)->addr, - ((struct setup_indirect *)data->data)->len, - E820_TYPE_RAM, E820_TYPE_RESERVED_KERN); + if (data->type == SETUP_INDIRECT) { + len += data->len; + early_memunmap(data, sizeof(*data)); + data = early_memremap(pa_data, len); + if (!data) { + pr_warn("e820: failed to memremap indirect setup_data\n"); + return; + } + + indirect = (struct setup_indirect *)data->data; + + if (indirect->type != SETUP_INDIRECT) { + e820__range_update(indirect->addr, indirect->len, + E820_TYPE_RAM, E820_TYPE_RESERVED_KERN); + e820__range_update_kexec(indirect->addr, indirect->len, + E820_TYPE_RAM, E820_TYPE_RESERVED_KERN); + } } - pa_data = data->next; - early_memunmap(data, sizeof(*data)); + pa_data = pa_next; + early_memunmap(data, len); } e820__update_table(e820_table); diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c index 0c6d1dc59fa21c1156dd398f309c9164909f3a62..8e27cbefaa4bf1e33420601adebf9815cc701584 100644 --- a/arch/x86/kernel/early-quirks.c +++ b/arch/x86/kernel/early-quirks.c @@ -515,6 +515,7 @@ static const struct intel_early_ops gen11_early_ops __initconst = { .stolen_size = gen9_stolen_size, }; +/* Intel integrated GPUs for which we need to reserve "stolen memory" */ static const struct pci_device_id intel_early_ids[] __initconst = { INTEL_I830_IDS(&i830_early_ops), INTEL_I845G_IDS(&i845_early_ops), @@ -588,6 +589,13 @@ static void __init intel_graphics_quirks(int num, int slot, int func) u16 device; int i; + /* + * Reserve "stolen memory" for an integrated GPU. If we've already + * found one, there's nothing to do for other (discrete) GPUs. + */ + if (resource_size(&intel_graphics_stolen_res)) + return; + device = read_pci_config_16(num, slot, func, PCI_DEVICE_ID); for (i = 0; i < ARRAY_SIZE(intel_early_ids); i++) { @@ -700,7 +708,7 @@ static struct chipset early_qrk[] __initdata = { { PCI_VENDOR_ID_INTEL, 0x3406, PCI_CLASS_BRIDGE_HOST, PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check }, { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA, PCI_ANY_ID, - QFLAG_APPLY_ONCE, intel_graphics_quirks }, + 0, intel_graphics_quirks }, /* * HPET on the current version of the Baytrail platform has accuracy * problems: it will halt in deep idle state - so we disable it. diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index ce904c89c6c7068df8fb20bcf29103a5b5ea3c77..cb23373bffa8e6263927826c19a94c3c1fd7d22f 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c @@ -21,6 +21,7 @@ #include #include #include +#include #define CREATE_TRACE_POINTS #include @@ -376,3 +377,23 @@ void fixup_irqs(void) } } #endif + +#ifdef CONFIG_X86_THERMAL_VECTOR +static void smp_thermal_vector(void) +{ + if (x86_thermal_enabled()) + intel_thermal_interrupt(); + else + pr_err("CPU%d: Unexpected LVT thermal interrupt!\n", + smp_processor_id()); +} + +DEFINE_IDTENTRY_SYSVEC(sysvec_thermal) +{ + trace_thermal_apic_entry(THERMAL_APIC_VECTOR); + inc_irq_stat(irq_thermal_count); + smp_thermal_vector(); + trace_thermal_apic_exit(THERMAL_APIC_VECTOR); + ack_APIC_irq(); +} +#endif diff --git a/arch/x86/kernel/kdebugfs.c b/arch/x86/kernel/kdebugfs.c index 64b6da95af984868962777bb4978b3fa8a4eff16..e2e89bebcbc32840357788cbd803805e5f652c62 100644 --- a/arch/x86/kernel/kdebugfs.c +++ b/arch/x86/kernel/kdebugfs.c @@ -88,11 +88,13 @@ create_setup_data_node(struct dentry *parent, int no, static int __init create_setup_data_nodes(struct dentry *parent) { + struct setup_indirect *indirect; struct setup_data_node *node; struct setup_data *data; - int error; + u64 pa_data, pa_next; struct dentry *d; - u64 pa_data; + int error; + u32 len; int no = 0; d = debugfs_create_dir("setup_data", parent); @@ -112,12 +114,29 @@ static int __init create_setup_data_nodes(struct dentry *parent) error = -ENOMEM; goto err_dir; } - - if (data->type == SETUP_INDIRECT && - ((struct setup_indirect *)data->data)->type != SETUP_INDIRECT) { - node->paddr = ((struct setup_indirect *)data->data)->addr; - node->type = ((struct setup_indirect *)data->data)->type; - node->len = ((struct setup_indirect *)data->data)->len; + pa_next = data->next; + + if (data->type == SETUP_INDIRECT) { + len = sizeof(*data) + data->len; + memunmap(data); + data = memremap(pa_data, len, MEMREMAP_WB); + if (!data) { + kfree(node); + error = -ENOMEM; + goto err_dir; + } + + indirect = (struct setup_indirect *)data->data; + + if (indirect->type != SETUP_INDIRECT) { + node->paddr = indirect->addr; + node->type = indirect->type; + node->len = indirect->len; + } else { + node->paddr = pa_data; + node->type = data->type; + node->len = data->len; + } } else { node->paddr = pa_data; node->type = data->type; @@ -125,7 +144,7 @@ static int __init create_setup_data_nodes(struct dentry *parent) } create_setup_data_node(d, no, node); - pa_data = data->next; + pa_data = pa_next; memunmap(data); no++; diff --git a/arch/x86/kernel/ksysfs.c b/arch/x86/kernel/ksysfs.c index d0a19121c6a4f1f4bb1f62e05f12d8350d4710a1..257892fcefa794803d8eaf2d3d1810ebb278957b 100644 --- a/arch/x86/kernel/ksysfs.c +++ b/arch/x86/kernel/ksysfs.c @@ -91,26 +91,41 @@ static int get_setup_data_paddr(int nr, u64 *paddr) static int __init get_setup_data_size(int nr, size_t *size) { - int i = 0; + u64 pa_data = boot_params.hdr.setup_data, pa_next; + struct setup_indirect *indirect; struct setup_data *data; - u64 pa_data = boot_params.hdr.setup_data; + int i = 0; + u32 len; while (pa_data) { data = memremap(pa_data, sizeof(*data), MEMREMAP_WB); if (!data) return -ENOMEM; + pa_next = data->next; + if (nr == i) { - if (data->type == SETUP_INDIRECT && - ((struct setup_indirect *)data->data)->type != SETUP_INDIRECT) - *size = ((struct setup_indirect *)data->data)->len; - else + if (data->type == SETUP_INDIRECT) { + len = sizeof(*data) + data->len; + memunmap(data); + data = memremap(pa_data, len, MEMREMAP_WB); + if (!data) + return -ENOMEM; + + indirect = (struct setup_indirect *)data->data; + + if (indirect->type != SETUP_INDIRECT) + *size = indirect->len; + else + *size = data->len; + } else { *size = data->len; + } memunmap(data); return 0; } - pa_data = data->next; + pa_data = pa_next; memunmap(data); i++; } @@ -120,9 +135,11 @@ static int __init get_setup_data_size(int nr, size_t *size) static ssize_t type_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { + struct setup_indirect *indirect; + struct setup_data *data; int nr, ret; u64 paddr; - struct setup_data *data; + u32 len; ret = kobj_to_setup_data_nr(kobj, &nr); if (ret) @@ -135,10 +152,20 @@ static ssize_t type_show(struct kobject *kobj, if (!data) return -ENOMEM; - if (data->type == SETUP_INDIRECT) - ret = sprintf(buf, "0x%x\n", ((struct setup_indirect *)data->data)->type); - else + if (data->type == SETUP_INDIRECT) { + len = sizeof(*data) + data->len; + memunmap(data); + data = memremap(paddr, len, MEMREMAP_WB); + if (!data) + return -ENOMEM; + + indirect = (struct setup_indirect *)data->data; + + ret = sprintf(buf, "0x%x\n", indirect->type); + } else { ret = sprintf(buf, "0x%x\n", data->type); + } + memunmap(data); return ret; } @@ -149,9 +176,10 @@ static ssize_t setup_data_data_read(struct file *fp, char *buf, loff_t off, size_t count) { + struct setup_indirect *indirect; + struct setup_data *data; int nr, ret = 0; u64 paddr, len; - struct setup_data *data; void *p; ret = kobj_to_setup_data_nr(kobj, &nr); @@ -165,10 +193,27 @@ static ssize_t setup_data_data_read(struct file *fp, if (!data) return -ENOMEM; - if (data->type == SETUP_INDIRECT && - ((struct setup_indirect *)data->data)->type != SETUP_INDIRECT) { - paddr = ((struct setup_indirect *)data->data)->addr; - len = ((struct setup_indirect *)data->data)->len; + if (data->type == SETUP_INDIRECT) { + len = sizeof(*data) + data->len; + memunmap(data); + data = memremap(paddr, len, MEMREMAP_WB); + if (!data) + return -ENOMEM; + + indirect = (struct setup_indirect *)data->data; + + if (indirect->type != SETUP_INDIRECT) { + paddr = indirect->addr; + len = indirect->len; + } else { + /* + * Even though this is technically undefined, return + * the data as though it is a normal setup_data struct. + * This will at least allow it to be inspected. + */ + paddr += sizeof(*data); + len = data->len; + } } else { paddr += sizeof(*data); len = data->len; diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index dc5512a789241b83fc29cc31579eca8aa8e76395..9ada27e1cb4d822888ec7f94fc1cc113df3cf96a 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -532,7 +532,7 @@ static void __send_ipi_mask(const struct cpumask *mask, int vector) } else if (apic_id < min && max - apic_id < KVM_IPI_CLUSTER_SIZE) { ipi_bitmap <<= min - apic_id; min = apic_id; - } else if (apic_id < min + KVM_IPI_CLUSTER_SIZE) { + } else if (apic_id > min && apic_id < min + KVM_IPI_CLUSTER_SIZE) { max = apic_id < max ? max : apic_id; } else { ret = kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap, diff --git a/arch/x86/kernel/livepatch.c b/arch/x86/kernel/livepatch.c index 2a541c7de167fe205af024703793a064b72cc7ae..d134169488b63b000eced609c3f66e97a8f27057 100644 --- a/arch/x86/kernel/livepatch.c +++ b/arch/x86/kernel/livepatch.c @@ -31,6 +31,10 @@ #include #include +#ifdef CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY +#include +#endif + #ifdef CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY /* * The instruction set on x86 is CISC. @@ -66,17 +70,6 @@ static inline unsigned long klp_size_to_check(unsigned long func_size, return size; } -static inline int klp_compare_address(unsigned long stack_addr, - unsigned long func_addr, const char *func_name, - unsigned long check_size) -{ - if (stack_addr >= func_addr && stack_addr < func_addr + check_size) { - pr_err("func %s is in use!\n", func_name); - return -EBUSY; - } - return 0; -} - static bool check_jump_insn(unsigned long func_addr) { int len = JMP_E9_INSN_SIZE; @@ -137,7 +130,7 @@ static int klp_check_activeness_func(struct klp_patch *patch, int enable, /* Check func address in stack */ if (enable) { - if (func->force == KLP_ENFORCEMENT) + if (func->patched || func->force == KLP_ENFORCEMENT) continue; /* * When enable, checking the currently @@ -253,8 +246,10 @@ static void klp_print_stack_trace(void *trace_ptr, int trace_len) #endif #define MAX_STACK_ENTRIES 100 -static bool check_func_list(struct klp_func_list *funcs, int *ret, unsigned long pc) +static bool check_func_list(void *data, int *ret, unsigned long pc) { + struct klp_func_list *funcs = (struct klp_func_list *)data; + while (funcs != NULL) { *ret = klp_compare_address(pc, funcs->func_addr, funcs->func_name, klp_size_to_check(funcs->func_size, funcs->force)); @@ -267,7 +262,7 @@ static bool check_func_list(struct klp_func_list *funcs, int *ret, unsigned long } static int klp_check_stack(void *trace_ptr, int trace_len, - struct klp_func_list *check_funcs) + bool (*fn)(void *, int *, unsigned long), void *data) { #ifdef CONFIG_ARCH_STACKWALK unsigned long *trace = trace_ptr; @@ -284,7 +279,7 @@ static int klp_check_stack(void *trace_ptr, int trace_len, for (i = 0; i < trace->nr_entries; i++) { address = trace->entries[i]; #endif - if (!check_func_list(check_funcs, &ret, address)) { + if (!fn(data, &ret, address)) { #ifdef CONFIG_ARCH_STACKWALK klp_print_stack_trace(trace_ptr, trace_len); #else @@ -308,11 +303,10 @@ static void free_list(struct klp_func_list **funcs) } } -int klp_check_calltrace(struct klp_patch *patch, int enable) +static int do_check_calltrace(bool (*fn)(void *, int *, unsigned long), void *data) { struct task_struct *g, *t; int ret = 0; - struct klp_func_list *check_funcs = NULL; static unsigned long trace_entries[MAX_STACK_ENTRIES]; #ifdef CONFIG_ARCH_STACKWALK int trace_len; @@ -320,45 +314,148 @@ int klp_check_calltrace(struct klp_patch *patch, int enable) struct stack_trace trace; #endif - ret = klp_check_activeness_func(patch, enable, &check_funcs); - if (ret) - goto out; for_each_process_thread(g, t) { if (!strncmp(t->comm, "migration/", 10)) continue; #ifdef CONFIG_ARCH_STACKWALK ret = stack_trace_save_tsk_reliable(t, trace_entries, MAX_STACK_ENTRIES); - if (ret < 0) - goto out; + if (ret < 0) { + pr_err("%s:%d has an unreliable stack, ret=%d\n", + t->comm, t->pid, ret); + return ret; + } trace_len = ret; - ret = 0; + ret = klp_check_stack(trace_entries, trace_len, fn, data); #else trace.skip = 0; trace.nr_entries = 0; trace.max_entries = MAX_STACK_ENTRIES; trace.entries = trace_entries; ret = save_stack_trace_tsk_reliable(t, &trace); -#endif WARN_ON_ONCE(ret == -ENOSYS); if (ret) { - pr_info("%s: %s:%d has an unreliable stack\n", - __func__, t->comm, t->pid); - goto out; + pr_err("%s: %s:%d has an unreliable stack, ret=%d\n", + __func__, t->comm, t->pid, ret); + return ret; } -#ifdef CONFIG_ARCH_STACKWALK - ret = klp_check_stack(trace_entries, trace_len, check_funcs); -#else - ret = klp_check_stack(&trace, 0, check_funcs); + ret = klp_check_stack(&trace, 0, fn, data); #endif - if (ret) - goto out; + if (ret) { + pr_err("%s:%d check stack failed, ret=%d\n", + t->comm, t->pid, ret); + return ret; + } + } + + return 0; +} + +int klp_check_calltrace(struct klp_patch *patch, int enable) +{ + int ret = 0; + struct klp_func_list *check_funcs = NULL; + + ret = klp_check_activeness_func(patch, enable, &check_funcs); + if (ret) { + pr_err("collect active functions failed, ret=%d\n", ret); + goto out; } + if (!check_funcs) + goto out; + + ret = do_check_calltrace(check_func_list, (void *)check_funcs); + out: free_list(&check_funcs); return ret; } + +static bool check_module_calltrace(void *data, int *ret, unsigned long pc) +{ + struct module *mod = (struct module *)data; + + if (within_module_core(pc, mod)) { + pr_err("module %s is in use!\n", mod->name); + *ret = -EBUSY; + return false; + } + return true; +} + +int arch_klp_module_check_calltrace(void *data) +{ + return do_check_calltrace(check_module_calltrace, data); +} + +int arch_klp_check_breakpoint(struct arch_klp_data *arch_data, void *old_func) +{ + int ret; + unsigned char opcode; + + ret = copy_from_kernel_nofault(&opcode, old_func, INT3_INSN_SIZE); + if (ret) + return ret; + + /* Another subsystem puts a breakpoint, reject patching at this time */ + if (opcode == INT3_INSN_OPCODE) + return -EBUSY; + + return 0; +} + +int arch_klp_add_breakpoint(struct arch_klp_data *arch_data, void *old_func) +{ + unsigned char int3 = INT3_INSN_OPCODE; + int ret; + + ret = copy_from_kernel_nofault(&arch_data->saved_opcode, old_func, + INT3_INSN_SIZE); + if (ret) + return ret; + + text_poke(old_func, &int3, INT3_INSN_SIZE); + /* arch_klp_code_modify_post_process() will do text_poke_sync() */ + + return 0; +} + +void arch_klp_remove_breakpoint(struct arch_klp_data *arch_data, void *old_func) +{ + unsigned char opcode; + int ret; + + ret = copy_from_kernel_nofault(&opcode, old_func, INT3_INSN_SIZE); + if (ret) { + pr_warn("%s: failed to read opcode, ret=%d\n", __func__, ret); + return; + } + + /* instruction have been recovered at arch_klp_unpatch_func() */ + if (opcode != INT3_INSN_OPCODE) + return; + + text_poke(old_func, &arch_data->saved_opcode, INT3_INSN_SIZE); + /* arch_klp_code_modify_post_process() will do text_poke_sync() */ +} + +int klp_int3_handler(struct pt_regs *regs) +{ + unsigned long addr = regs->ip - INT3_INSN_SIZE; + void *brk_func; + + if (user_mode(regs)) + return 0; + + brk_func = klp_get_brk_func((void *)addr); + if (!brk_func) + return 0; + + int3_emulate_jmp(regs, (unsigned long)brk_func); + return 1; +} +NOKPROBE_SYMBOL(klp_int3_handler); #endif #ifdef CONFIG_LIVEPATCH_WO_FTRACE @@ -382,23 +479,37 @@ void arch_klp_code_modify_post_process(void) long arch_klp_save_old_code(struct arch_klp_data *arch_data, void *old_func) { - return copy_from_kernel_nofault(arch_data->old_code, - old_func, JMP_E9_INSN_SIZE); + long ret; + + /* Prevent text modification */ + mutex_lock(&text_mutex); + ret = copy_from_kernel_nofault(arch_data->old_code, + old_func, JMP_E9_INSN_SIZE); + mutex_unlock(&text_mutex); + + return ret; } int arch_klp_patch_func(struct klp_func *func) { struct klp_func_node *func_node; unsigned long ip, new_addr; - void *new; + unsigned char *new; func_node = func->func_node; ip = (unsigned long)func->old_func; list_add_rcu(&func->stack_node, &func_node->func_stack); new_addr = (unsigned long)func->new_func; /* replace the text with the new text */ - new = klp_jmp_code(ip, new_addr); + new = (unsigned char *)klp_jmp_code(ip, new_addr); +#ifdef CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY + /* update jmp offset */ + text_poke((void *)(ip + 1), new + 1, JMP_E9_INSN_SIZE - 1); + /* update jmp opcode */ + text_poke((void *)ip, new, 1); +#else text_poke((void *)ip, new, JMP_E9_INSN_SIZE); +#endif return 0; } @@ -412,11 +523,10 @@ void arch_klp_unpatch_func(struct klp_func *func) func_node = func->func_node; ip = (unsigned long)func_node->old_func; - if (list_is_singular(&func_node->func_stack)) { - list_del_rcu(&func->stack_node); + list_del_rcu(&func->stack_node); + if (list_empty(&func_node->func_stack)) { new = func_node->arch_data.old_code; } else { - list_del_rcu(&func->stack_node); next_func = list_first_or_null_rcu(&func_node->func_stack, struct klp_func, stack_node); diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c old mode 100644 new mode 100755 index 035cd1e1ede5c879db831f5d0c67ce1017957200..ca588a3ac01bcbf8a44c5c3eae0de0914d473ea0 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -43,6 +43,7 @@ #include #include #include +#include #include "process.h" @@ -918,8 +919,8 @@ unsigned long arch_randomize_brk(struct mm_struct *mm) */ unsigned long get_wchan(struct task_struct *p) { - unsigned long start, bottom, top, sp, fp, ip, ret = 0; - int count = 0; + struct unwind_state state; + unsigned long addr = 0; if (p == current || p->state == TASK_RUNNING) return 0; @@ -927,49 +928,19 @@ unsigned long get_wchan(struct task_struct *p) if (!try_get_task_stack(p)) return 0; - start = (unsigned long)task_stack_page(p); - if (!start) - goto out; - - /* - * Layout of the stack page: - * - * ----------- topmax = start + THREAD_SIZE - sizeof(unsigned long) - * PADDING - * ----------- top = topmax - TOP_OF_KERNEL_STACK_PADDING - * stack - * ----------- bottom = start - * - * The tasks stack pointer points at the location where the - * framepointer is stored. The data on the stack is: - * ... IP FP ... IP FP - * - * We need to read FP and IP, so we need to adjust the upper - * bound by another unsigned long. - */ - top = start + THREAD_SIZE - TOP_OF_KERNEL_STACK_PADDING; - top -= 2 * sizeof(unsigned long); - bottom = start; - - sp = READ_ONCE(p->thread.sp); - if (sp < bottom || sp > top) - goto out; - - fp = READ_ONCE_NOCHECK(((struct inactive_task_frame *)sp)->bp); - do { - if (fp < bottom || fp > top) - goto out; - ip = READ_ONCE_NOCHECK(*(unsigned long *)(fp + sizeof(unsigned long))); - if (!in_sched_functions(ip)) { - ret = ip; - goto out; - } - fp = READ_ONCE_NOCHECK(*(unsigned long *)fp); - } while (count++ < 16 && p->state != TASK_RUNNING); - -out: + for (unwind_start(&state, p, NULL, NULL); !unwind_done(&state); + unwind_next_frame(&state)) { + addr = unwind_get_return_address(&state); + if (!addr) + break; + if (in_sched_functions(addr)) + continue; + break; + } + put_task_stack(p); - return ret; + + return addr; } long do_arch_prctl_common(struct task_struct *task, int option, diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 4f2f54e1281c3f1d35d4166c8835f71bf7954f99..98bf8fd189025d02168436669b28f11cfad81b73 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c @@ -159,14 +159,12 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) { struct thread_struct *prev = &prev_p->thread, *next = &next_p->thread; - struct fpu *prev_fpu = &prev->fpu; - struct fpu *next_fpu = &next->fpu; int cpu = smp_processor_id(); /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */ if (!test_thread_flag(TIF_NEED_FPU_LOAD)) - switch_fpu_prepare(prev_fpu, cpu); + switch_fpu_prepare(prev_p, cpu); /* * Save away %gs. No need to save %fs, as it was saved on the @@ -213,7 +211,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) this_cpu_write(current_task, next_p); - switch_fpu_finish(next_fpu); + switch_fpu_finish(next_p); /* Load the Intel cache allocation PQR MSR. */ resctrl_sched_in(); diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index df342bedea88afc1e8571d6e20024bf477569c82..ad3f82a18de9df1e151e41ea3c5f184a493b9cac 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -535,15 +535,13 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) { struct thread_struct *prev = &prev_p->thread; struct thread_struct *next = &next_p->thread; - struct fpu *prev_fpu = &prev->fpu; - struct fpu *next_fpu = &next->fpu; int cpu = smp_processor_id(); WARN_ON_ONCE(IS_ENABLED(CONFIG_DEBUG_ENTRY) && this_cpu_read(irq_count) != -1); if (!test_thread_flag(TIF_NEED_FPU_LOAD)) - switch_fpu_prepare(prev_fpu, cpu); + switch_fpu_prepare(prev_p, cpu); /* We must save %fs and %gs before load_TLS() because * %fs and %gs may be cleared by load_TLS(). @@ -595,7 +593,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) this_cpu_write(current_task, next_p); this_cpu_write(cpu_current_top_of_stack, task_top_of_stack(next_p)); - switch_fpu_finish(next_fpu); + switch_fpu_finish(next_p); /* Reload sp0. */ update_task_stack(next_p); diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index e50da62a67a23f85a926c6f163d052fac1a9e505..df951a8f39d28bd24d5eea0ce986552a9edd49e1 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c @@ -113,17 +113,9 @@ void __noreturn machine_real_restart(unsigned int type) spin_unlock(&rtc_lock); /* - * Switch back to the initial page table. + * Switch to the trampoline page table. */ -#ifdef CONFIG_X86_32 - load_cr3(initial_page_table); -#else - write_cr3(real_mode_header->trampoline_pgd); - - /* Exiting long mode will fail if CR4.PCIDE is set. */ - if (boot_cpu_has(X86_FEATURE_PCID)) - cr4_clear_bits(X86_CR4_PCIDE); -#endif + load_trampoline_pgtable(); /* Jump to the identity-mapped low memory code */ #ifdef CONFIG_X86_32 diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 62df2aa1ac32ae0f6a82964e906dacf6f52431c3..85979c1a404e933654642d7f07ba062976e9cd2b 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -372,21 +372,41 @@ static void __init parse_setup_data(void) static void __init memblock_x86_reserve_range_setup_data(void) { + struct setup_indirect *indirect; struct setup_data *data; - u64 pa_data; + u64 pa_data, pa_next; + u32 len; pa_data = boot_params.hdr.setup_data; while (pa_data) { data = early_memremap(pa_data, sizeof(*data)); + if (!data) { + pr_warn("setup: failed to memremap setup_data entry\n"); + return; + } + + len = sizeof(*data); + pa_next = data->next; + memblock_reserve(pa_data, sizeof(*data) + data->len); - if (data->type == SETUP_INDIRECT && - ((struct setup_indirect *)data->data)->type != SETUP_INDIRECT) - memblock_reserve(((struct setup_indirect *)data->data)->addr, - ((struct setup_indirect *)data->data)->len); + if (data->type == SETUP_INDIRECT) { + len += data->len; + early_memunmap(data, sizeof(*data)); + data = early_memremap(pa_data, len); + if (!data) { + pr_warn("setup: failed to memremap indirect setup_data\n"); + return; + } - pa_data = data->next; - early_memunmap(data, sizeof(*data)); + indirect = (struct setup_indirect *)data->data; + + if (indirect->type != SETUP_INDIRECT) + memblock_reserve(indirect->addr, indirect->len); + } + + pa_data = pa_next; + early_memunmap(data, len); } } diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 5da01819fb479858989a0f5a6c18b4a58fafde0a..696ec85164e626ef32fb2c81d10cacc2cbdeff18 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -62,6 +62,10 @@ #include #include +#ifdef CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY +#include +#endif + #ifdef CONFIG_X86_64 #include #include @@ -654,10 +658,17 @@ static bool do_int3(struct pt_regs *regs) if (kprobe_int3_handler(regs)) return true; #endif + +#ifdef CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY + if (klp_int3_handler(regs)) + return true; +#endif + res = notify_die(DIE_INT3, "int3", regs, 0, X86_TRAP_BP, SIGTRAP); return res == NOTIFY_STOP; } +NOKPROBE_SYMBOL(do_int3); static void do_int3_user(struct pt_regs *regs) { diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig index f92dfd8ef10dbca080c0cf21a3bc45792cab4bea..71fb38d83e4ad0bcd74ea5082699b92c30bb4e53 100644 --- a/arch/x86/kvm/Kconfig +++ b/arch/x86/kvm/Kconfig @@ -84,6 +84,18 @@ config KVM_INTEL To compile this as a module, choose M here: the module will be called kvm-intel. +config X86_SGX_KVM + bool "Software Guard eXtensions (SGX) Virtualization" + depends on X86_SGX && KVM_INTEL + help + + Enables KVM guests to create SGX enclaves. + + This includes support to expose "raw" unreclaimable enclave memory to + guests via a device node, e.g. /dev/sgx_vepc. + + If unsure, say N. + config KVM_AMD tristate "KVM for AMD processors support" depends on KVM diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile index b804444e16d47d017db4d24c9a52872701199128..1c6c9adcb730f64d0914162f45bc545cbe8e2cfc 100644 --- a/arch/x86/kvm/Makefile +++ b/arch/x86/kvm/Makefile @@ -20,6 +20,8 @@ kvm-y += x86.o emulate.o i8259.o irq.o lapic.o \ kvm-intel-y += vmx/vmx.o vmx/vmenter.o vmx/pmu_intel.o vmx/vmcs12.o \ vmx/evmcs.o vmx/nested.o vmx/posted_intr.o +kvm-intel-$(CONFIG_X86_SGX_KVM) += vmx/sgx.o + kvm-amd-y += svm/svm.o svm/vmenter.o svm/pmu.o svm/nested.o svm/avic.o svm/sev.o obj-$(CONFIG_KVM) += kvm.o diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index 41b0dc37720e0f0b572827c6e50d343aa6b43552..911c6efe60aa97683c586fe28c0afc62916c8110 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -18,6 +18,7 @@ #include #include #include +#include #include "cpuid.h" #include "lapic.h" #include "mmu.h" @@ -28,7 +29,7 @@ * Unlike "struct cpuinfo_x86.x86_capability", kvm_cpu_caps doesn't need to be * aligned to sizeof(unsigned long) because it's not accessed via bitops. */ -u32 kvm_cpu_caps[NCAPINTS] __read_mostly; +u32 kvm_cpu_caps[NR_KVM_CPU_CAPS] __read_mostly; EXPORT_SYMBOL_GPL(kvm_cpu_caps); static u32 xstate_required_size(u64 xstate_bv, bool compacted) @@ -53,6 +54,7 @@ static u32 xstate_required_size(u64 xstate_bv, bool compacted) } #define F feature_bit +#define SF(name) (boot_cpu_has(X86_FEATURE_##name) ? F(name) : 0) static inline struct kvm_cpuid_entry2 *cpuid_entry2_find( struct kvm_cpuid_entry2 *entries, int nent, u32 function, u32 index) @@ -169,6 +171,21 @@ static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) vcpu->arch.guest_supported_xcr0 = (best->eax | ((u64)best->edx << 32)) & supported_xcr0; + /* + * Bits 127:0 of the allowed SECS.ATTRIBUTES (CPUID.0x12.0x1) enumerate + * the supported XSAVE Feature Request Mask (XFRM), i.e. the enclave's + * requested XCR0 value. The enclave's XFRM must be a subset of XCRO + * at the time of EENTER, thus adjust the allowed XFRM by the guest's + * supported XCR0. Similar to XCR0 handling, FP and SSE are forced to + * '1' even on CPUs that don't support XSAVE. + */ + best = kvm_find_cpuid_entry(vcpu, 0x12, 0x1); + if (best) { + best->ecx &= vcpu->arch.guest_supported_xcr0 & 0xffffffff; + best->edx &= vcpu->arch.guest_supported_xcr0 >> 32; + best->ecx |= XFEATURE_MASK_FPSSE; + } + kvm_update_pv_runtime(vcpu); vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu); @@ -330,13 +347,13 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu, return r; } -static __always_inline void kvm_cpu_cap_mask(enum cpuid_leafs leaf, u32 mask) +/* Mask kvm_cpu_caps for @leaf with the raw CPUID capabilities of this CPU. */ +static __always_inline void __kvm_cpu_cap_mask(unsigned int leaf) { const struct cpuid_reg cpuid = x86_feature_cpuid(leaf * 32); struct kvm_cpuid_entry2 entry; reverse_cpuid_check(leaf); - kvm_cpu_caps[leaf] &= mask; cpuid_count(cpuid.function, cpuid.index, &entry.eax, &entry.ebx, &entry.ecx, &entry.edx); @@ -344,6 +361,27 @@ static __always_inline void kvm_cpu_cap_mask(enum cpuid_leafs leaf, u32 mask) kvm_cpu_caps[leaf] &= *__cpuid_entry_get_reg(&entry, cpuid.reg); } +static __always_inline +void kvm_cpu_cap_init_scattered(enum kvm_only_cpuid_leafs leaf, u32 mask) +{ + /* Use kvm_cpu_cap_mask for non-scattered leafs. */ + BUILD_BUG_ON(leaf < NCAPINTS); + + kvm_cpu_caps[leaf] = mask; + + __kvm_cpu_cap_mask(leaf); +} + +static __always_inline void kvm_cpu_cap_mask(enum cpuid_leafs leaf, u32 mask) +{ + /* Use kvm_cpu_cap_init_scattered for scattered leafs. */ + BUILD_BUG_ON(leaf >= NCAPINTS); + + kvm_cpu_caps[leaf] &= mask; + + __kvm_cpu_cap_mask(leaf); +} + void kvm_set_cpu_caps(void) { unsigned int f_nx = is_efer_nx() ? F(NX) : 0; @@ -354,12 +392,13 @@ void kvm_set_cpu_caps(void) unsigned int f_gbpages = 0; unsigned int f_lm = 0; #endif + memset(kvm_cpu_caps, 0, sizeof(kvm_cpu_caps)); - BUILD_BUG_ON(sizeof(kvm_cpu_caps) > + BUILD_BUG_ON(sizeof(kvm_cpu_caps) - (NKVMCAPINTS * sizeof(*kvm_cpu_caps)) > sizeof(boot_cpu_data.x86_capability)); memcpy(&kvm_cpu_caps, &boot_cpu_data.x86_capability, - sizeof(kvm_cpu_caps)); + sizeof(kvm_cpu_caps) - (NKVMCAPINTS * sizeof(*kvm_cpu_caps))); kvm_cpu_cap_mask(CPUID_1_ECX, /* @@ -390,7 +429,7 @@ void kvm_set_cpu_caps(void) ); kvm_cpu_cap_mask(CPUID_7_0_EBX, - F(FSGSBASE) | F(BMI1) | F(HLE) | F(AVX2) | F(SMEP) | + F(FSGSBASE) | F(SGX) | F(BMI1) | F(HLE) | F(AVX2) | F(SMEP) | F(BMI2) | F(ERMS) | 0 /*INVPCID*/ | F(RTM) | 0 /*MPX*/ | F(RDSEED) | F(ADX) | F(SMAP) | F(AVX512IFMA) | F(AVX512F) | F(AVX512PF) | F(AVX512ER) | F(AVX512CD) | F(CLFLUSHOPT) | F(CLWB) | F(AVX512DQ) | @@ -401,7 +440,8 @@ void kvm_set_cpu_caps(void) F(AVX512VBMI) | F(LA57) | F(PKU) | 0 /*OSPKE*/ | F(RDPID) | F(AVX512_VPOPCNTDQ) | F(UMIP) | F(AVX512_VBMI2) | F(GFNI) | F(VAES) | F(VPCLMULQDQ) | F(AVX512_VNNI) | F(AVX512_BITALG) | - F(CLDEMOTE) | F(MOVDIRI) | F(MOVDIR64B) | 0 /*WAITPKG*/ + F(CLDEMOTE) | F(MOVDIRI) | F(MOVDIR64B) | 0 /*WAITPKG*/ | + F(SGX_LC) ); /* Set LA57 based on hardware capability. */ if (cpuid_ecx(7) & F(LA57)) @@ -440,6 +480,10 @@ void kvm_set_cpu_caps(void) F(XSAVEOPT) | F(XSAVEC) | F(XGETBV1) | F(XSAVES) ); + kvm_cpu_cap_init_scattered(CPUID_12_EAX, + SF(SGX1) | SF(SGX2) + ); + kvm_cpu_cap_mask(CPUID_8000_0001_ECX, F(LAHF_LM) | F(CMP_LEGACY) | 0 /*SVM*/ | 0 /* ExtApicSpace */ | F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) | @@ -763,6 +807,38 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function) entry->edx = 0; } break; + case 0x12: + /* Intel SGX */ + if (!kvm_cpu_cap_has(X86_FEATURE_SGX)) { + entry->eax = entry->ebx = entry->ecx = entry->edx = 0; + break; + } + + /* + * Index 0: Sub-features, MISCSELECT (a.k.a extended features) + * and max enclave sizes. The SGX sub-features and MISCSELECT + * are restricted by kernel and KVM capabilities (like most + * feature flags), while enclave size is unrestricted. + */ + cpuid_entry_override(entry, CPUID_12_EAX); + entry->ebx &= SGX_MISC_EXINFO; + + entry = do_host_cpuid(array, function, 1); + if (!entry) + goto out; + + /* + * Index 1: SECS.ATTRIBUTES. ATTRIBUTES are restricted a la + * feature flags. Advertise all supported flags, including + * privileged attributes that require explicit opt-in from + * userspace. ATTRIBUTES.XFRM is not adjusted as userspace is + * expected to derive it from supported XCR0. + */ + entry->eax &= SGX_ATTR_DEBUG | SGX_ATTR_MODE64BIT | + SGX_ATTR_PROVISIONKEY | SGX_ATTR_EINITTOKENKEY | + SGX_ATTR_KSS; + entry->ebx &= 0; + break; /* Intel PT */ case 0x14: if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT)) { diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h index dc921d76e42e8b353989ea0a96962640085e95b8..9fbc0de7c337b8fb4c3e2b3ce27aa659eaf95438 100644 --- a/arch/x86/kvm/cpuid.h +++ b/arch/x86/kvm/cpuid.h @@ -7,7 +7,25 @@ #include #include -extern u32 kvm_cpu_caps[NCAPINTS] __read_mostly; +/* + * Hardware-defined CPUID leafs that are scattered in the kernel, but need to + * be directly used by KVM. Note, these word values conflict with the kernel's + * "bug" caps, but KVM doesn't use those. + */ +enum kvm_only_cpuid_leafs { + CPUID_12_EAX = NCAPINTS, + NR_KVM_CPU_CAPS, + + NKVMCAPINTS = NR_KVM_CPU_CAPS - NCAPINTS, +}; + +#define KVM_X86_FEATURE(w, f) ((w)*32 + (f)) + +/* Intel-defined SGX sub-features, CPUID level 0x12 (EAX). */ +#define KVM_X86_FEATURE_SGX1 KVM_X86_FEATURE(CPUID_12_EAX, 0) +#define KVM_X86_FEATURE_SGX2 KVM_X86_FEATURE(CPUID_12_EAX, 1) + +extern u32 kvm_cpu_caps[NR_KVM_CPU_CAPS] __read_mostly; void kvm_set_cpu_caps(void); void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu); @@ -63,6 +81,7 @@ static const struct cpuid_reg reverse_cpuid[] = { [CPUID_8000_0007_EBX] = {0x80000007, 0, CPUID_EBX}, [CPUID_7_EDX] = { 7, 0, CPUID_EDX}, [CPUID_7_1_EAX] = { 7, 1, CPUID_EAX}, + [CPUID_12_EAX] = {0x00000012, 0, CPUID_EAX}, }; /* @@ -83,6 +102,25 @@ static __always_inline void reverse_cpuid_check(unsigned int x86_leaf) BUILD_BUG_ON(reverse_cpuid[x86_leaf].function == 0); } +/* + * Translate feature bits that are scattered in the kernel's cpufeatures word + * into KVM feature words that align with hardware's definitions. + */ +static __always_inline u32 __feature_translate(int x86_feature) +{ + if (x86_feature == X86_FEATURE_SGX1) + return KVM_X86_FEATURE_SGX1; + else if (x86_feature == X86_FEATURE_SGX2) + return KVM_X86_FEATURE_SGX2; + + return x86_feature; +} + +static __always_inline u32 __feature_leaf(int x86_feature) +{ + return __feature_translate(x86_feature) / 32; +} + /* * Retrieve the bit mask from an X86_FEATURE_* definition. Features contain * the hardware defined bit number (stored in bits 4:0) and a software defined @@ -91,6 +129,8 @@ static __always_inline void reverse_cpuid_check(unsigned int x86_leaf) */ static __always_inline u32 __feature_bit(int x86_feature) { + x86_feature = __feature_translate(x86_feature); + reverse_cpuid_check(x86_feature / 32); return 1 << (x86_feature & 31); } @@ -99,7 +139,7 @@ static __always_inline u32 __feature_bit(int x86_feature) static __always_inline struct cpuid_reg x86_feature_cpuid(unsigned int x86_feature) { - unsigned int x86_leaf = x86_feature / 32; + unsigned int x86_leaf = __feature_leaf(x86_feature); reverse_cpuid_check(x86_leaf); return reverse_cpuid[x86_leaf]; @@ -178,7 +218,7 @@ static __always_inline void cpuid_entry_change(struct kvm_cpuid_entry2 *entry, } static __always_inline void cpuid_entry_override(struct kvm_cpuid_entry2 *entry, - enum cpuid_leafs leaf) + unsigned int leaf) { u32 *reg = cpuid_entry_get_reg(entry, leaf * 32); @@ -291,7 +331,7 @@ static inline bool cpuid_fault_enabled(struct kvm_vcpu *vcpu) static __always_inline void kvm_cpu_cap_clear(unsigned int x86_feature) { - unsigned int x86_leaf = x86_feature / 32; + unsigned int x86_leaf = __feature_leaf(x86_feature); reverse_cpuid_check(x86_leaf); kvm_cpu_caps[x86_leaf] &= ~__feature_bit(x86_feature); @@ -299,7 +339,7 @@ static __always_inline void kvm_cpu_cap_clear(unsigned int x86_feature) static __always_inline void kvm_cpu_cap_set(unsigned int x86_feature) { - unsigned int x86_leaf = x86_feature / 32; + unsigned int x86_leaf = __feature_leaf(x86_feature); reverse_cpuid_check(x86_leaf); kvm_cpu_caps[x86_leaf] |= __feature_bit(x86_feature); @@ -307,7 +347,7 @@ static __always_inline void kvm_cpu_cap_set(unsigned int x86_feature) static __always_inline u32 kvm_cpu_cap_get(unsigned int x86_feature) { - unsigned int x86_leaf = x86_feature / 32; + unsigned int x86_leaf = __feature_leaf(x86_feature); reverse_cpuid_check(x86_leaf); return kvm_cpu_caps[x86_leaf] & __feature_bit(x86_feature); diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index e82151ba95c091ffce968e3422536a5484ba500c..71e1a2d39f21893e1bc4592095572636f9623c58 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -1718,11 +1718,6 @@ static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt, goto exception; } - if (!seg_desc.p) { - err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR; - goto exception; - } - dpl = seg_desc.dpl; switch (seg) { @@ -1762,6 +1757,10 @@ static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt, case VCPU_SREG_TR: if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9)) goto exception; + if (!seg_desc.p) { + err_vec = NP_VECTOR; + goto exception; + } old_desc = seg_desc; seg_desc.type |= 2; /* busy */ ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc, @@ -1786,6 +1785,11 @@ static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt, break; } + if (!seg_desc.p) { + err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR; + goto exception; + } + if (seg_desc.s) { /* mark segment as accessed */ if (!(seg_desc.type & 1)) { @@ -3607,8 +3611,10 @@ static int em_rdpid(struct x86_emulate_ctxt *ctxt) { u64 tsc_aux = 0; - if (ctxt->ops->get_msr(ctxt, MSR_TSC_AUX, &tsc_aux)) + if (!ctxt->ops->guest_has_rdpid(ctxt)) return emulate_ud(ctxt); + + ctxt->ops->get_msr(ctxt, MSR_TSC_AUX, &tsc_aux); ctxt->dst.val = tsc_aux; return X86EMUL_CONTINUE; } diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c index 328f37e4fd3a723a48b198d56141aa2d99be1dbc..d806139377bc684aca5fef4593c34ec86f8151fc 100644 --- a/arch/x86/kvm/hyperv.c +++ b/arch/x86/kvm/hyperv.c @@ -207,7 +207,7 @@ static int synic_set_msr(struct kvm_vcpu_hv_synic *synic, struct kvm_vcpu *vcpu = synic_to_vcpu(synic); int ret; - if (!synic->active && !host) + if (!synic->active && (!host || data)) return 1; trace_kvm_hv_synic_set_msr(vcpu->vcpu_id, msr, data, host); @@ -253,6 +253,9 @@ static int synic_set_msr(struct kvm_vcpu_hv_synic *synic, case HV_X64_MSR_EOM: { int i; + if (!synic->active) + break; + for (i = 0; i < ARRAY_SIZE(synic->sint); i++) kvm_hv_notify_acked_sint(vcpu, i); break; @@ -636,7 +639,7 @@ static int stimer_set_config(struct kvm_vcpu_hv_stimer *stimer, u64 config, struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer); struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu); - if (!synic->active && !host) + if (!synic->active && (!host || config)) return 1; trace_kvm_hv_stimer_set_config(stimer_to_vcpu(stimer)->vcpu_id, @@ -660,7 +663,7 @@ static int stimer_set_count(struct kvm_vcpu_hv_stimer *stimer, u64 count, struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer); struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu); - if (!synic->active && !host) + if (!synic->active && (!host || count)) return 1; trace_kvm_hv_stimer_set_count(stimer_to_vcpu(stimer)->vcpu_id, diff --git a/arch/x86/kvm/kvm_emulate.h b/arch/x86/kvm/kvm_emulate.h index 7d5be04dc66168ee1510fb8dd99d7649465b6103..aeed6da60e0c722667fc0cee7095fd14e5950828 100644 --- a/arch/x86/kvm/kvm_emulate.h +++ b/arch/x86/kvm/kvm_emulate.h @@ -225,6 +225,7 @@ struct x86_emulate_ops { bool (*guest_has_long_mode)(struct x86_emulate_ctxt *ctxt); bool (*guest_has_movbe)(struct x86_emulate_ctxt *ctxt); bool (*guest_has_fxsr)(struct x86_emulate_ctxt *ctxt); + bool (*guest_has_rdpid)(struct x86_emulate_ctxt *ctxt); void (*set_nmi_mask)(struct x86_emulate_ctxt *ctxt, bool masked); diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 70dcb723a0f9d7887a178d1cedb4860b71a9fbd7..d62390885a9d40c00b90933007e6e656a1d5464b 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -2232,10 +2232,7 @@ void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data) void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8) { - struct kvm_lapic *apic = vcpu->arch.apic; - - apic_set_tpr(apic, ((cr8 & 0x0f) << 4) - | (kvm_lapic_get_reg(apic, APIC_TASKPRI) & 4)); + apic_set_tpr(vcpu->arch.apic, (cr8 & 0x0f) << 4); } u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c old mode 100644 new mode 100755 index c2516ddc3cbec958a78473aa46c927d8885018ec..99ae11011ed4e7f0a9ab5683fc7a1f3c34ed1d68 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -3631,12 +3631,23 @@ static void shadow_page_table_clear_flood(struct kvm_vcpu *vcpu, gva_t addr) walk_shadow_page_lockless_end(vcpu); } +static u32 alloc_apf_token(struct kvm_vcpu *vcpu) +{ + /* make sure the token value is not 0 */ + u32 id = vcpu->arch.apf.id; + + if (id << 12 == 0) + vcpu->arch.apf.id = 1; + + return (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id; +} + static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, gfn_t gfn) { struct kvm_arch_async_pf arch; - arch.token = (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id; + arch.token = alloc_apf_token(vcpu); arch.gfn = gfn; arch.direct_map = vcpu->arch.mmu->direct_map; arch.cr3 = vcpu->arch.mmu->get_guest_pgd(vcpu); @@ -5165,14 +5176,16 @@ void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid) uint i; if (pcid == kvm_get_active_pcid(vcpu)) { - mmu->invlpg(vcpu, gva, mmu->root_hpa); + if (mmu->invlpg) + mmu->invlpg(vcpu, gva, mmu->root_hpa); tlb_flush = true; } for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) { if (VALID_PAGE(mmu->prev_roots[i].hpa) && pcid == kvm_get_pcid(vcpu, mmu->prev_roots[i].pgd)) { - mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa); + if (mmu->invlpg) + mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa); tlb_flush = true; } } @@ -5865,12 +5878,24 @@ static int set_nx_huge_pages(const char *val, const struct kernel_param *kp) return 0; } -int kvm_mmu_module_init(void) +/* + * nx_huge_pages needs to be resolved to true/false when kvm.ko is loaded, as + * its default value of -1 is technically undefined behavior for a boolean. + */ +void kvm_mmu_x86_module_init(void) { - int ret = -ENOMEM; - if (nx_huge_pages == -1) __set_nx_huge_pages(get_nx_auto_mode()); +} + +/* + * The bulk of the MMU initialization is deferred until the vendor module is + * loaded as many of the masks/values may be modified by VMX or SVM, i.e. need + * to be reset when a potentially different vendor module is loaded. + */ +int kvm_mmu_vendor_module_init(void) +{ + int ret = -ENOMEM; /* * MMU roles use union aliasing which is, generally speaking, an @@ -5944,7 +5969,7 @@ void kvm_mmu_destroy(struct kvm_vcpu *vcpu) mmu_free_memory_caches(vcpu); } -void kvm_mmu_module_exit(void) +void kvm_mmu_vendor_module_exit(void) { mmu_destroy_caches(); percpu_counter_destroy(&kvm_total_used_mmu_pages); diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h index f8829134bf3413c8141e1c74cabb888853a90a15..c6daeeff1d9c9d958e08375c8baaedc295f7069f 100644 --- a/arch/x86/kvm/mmu/paging_tmpl.h +++ b/arch/x86/kvm/mmu/paging_tmpl.h @@ -34,9 +34,8 @@ #define PT_HAVE_ACCESSED_DIRTY(mmu) true #ifdef CONFIG_X86_64 #define PT_MAX_FULL_LEVELS PT64_ROOT_MAX_LEVEL - #define CMPXCHG cmpxchg + #define CMPXCHG "cmpxchgq" #else - #define CMPXCHG cmpxchg64 #define PT_MAX_FULL_LEVELS 2 #endif #elif PTTYPE == 32 @@ -52,7 +51,7 @@ #define PT_GUEST_DIRTY_SHIFT PT_DIRTY_SHIFT #define PT_GUEST_ACCESSED_SHIFT PT_ACCESSED_SHIFT #define PT_HAVE_ACCESSED_DIRTY(mmu) true - #define CMPXCHG cmpxchg + #define CMPXCHG "cmpxchgl" #elif PTTYPE == PTTYPE_EPT #define pt_element_t u64 #define guest_walker guest_walkerEPT @@ -65,7 +64,9 @@ #define PT_GUEST_DIRTY_SHIFT 9 #define PT_GUEST_ACCESSED_SHIFT 8 #define PT_HAVE_ACCESSED_DIRTY(mmu) ((mmu)->ept_ad) - #define CMPXCHG cmpxchg64 + #ifdef CONFIG_X86_64 + #define CMPXCHG "cmpxchgq" + #endif #define PT_MAX_FULL_LEVELS PT64_ROOT_MAX_LEVEL #else #error Invalid PTTYPE value @@ -147,43 +148,39 @@ static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, pt_element_t __user *ptep_user, unsigned index, pt_element_t orig_pte, pt_element_t new_pte) { - int npages; - pt_element_t ret; - pt_element_t *table; - struct page *page; - - npages = get_user_pages_fast((unsigned long)ptep_user, 1, FOLL_WRITE, &page); - if (likely(npages == 1)) { - table = kmap_atomic(page); - ret = CMPXCHG(&table[index], orig_pte, new_pte); - kunmap_atomic(table); - - kvm_release_page_dirty(page); - } else { - struct vm_area_struct *vma; - unsigned long vaddr = (unsigned long)ptep_user & PAGE_MASK; - unsigned long pfn; - unsigned long paddr; - - mmap_read_lock(current->mm); - vma = find_vma_intersection(current->mm, vaddr, vaddr + PAGE_SIZE); - if (!vma || !(vma->vm_flags & VM_PFNMAP)) { - mmap_read_unlock(current->mm); - return -EFAULT; - } - pfn = ((vaddr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; - paddr = pfn << PAGE_SHIFT; - table = memremap(paddr, PAGE_SIZE, MEMREMAP_WB); - if (!table) { - mmap_read_unlock(current->mm); - return -EFAULT; - } - ret = CMPXCHG(&table[index], orig_pte, new_pte); - memunmap(table); - mmap_read_unlock(current->mm); - } + int r = -EFAULT; + + if (!user_access_begin(ptep_user, sizeof(pt_element_t))) + return -EFAULT; + +#ifdef CMPXCHG + asm volatile("1:" LOCK_PREFIX CMPXCHG " %[new], %[ptr]\n" + "mov $0, %[r]\n" + "setnz %b[r]\n" + "2:" + _ASM_EXTABLE_UA(1b, 2b) + : [ptr] "+m" (*ptep_user), + [old] "+a" (orig_pte), + [r] "+q" (r) + : [new] "r" (new_pte) + : "memory"); +#else + asm volatile("1:" LOCK_PREFIX "cmpxchg8b %[ptr]\n" + "movl $0, %[r]\n" + "jz 2f\n" + "incl %[r]\n" + "2:" + _ASM_EXTABLE_UA(1b, 2b) + : [ptr] "+m" (*ptep_user), + [old] "+A" (orig_pte), + [r] "+rm" (r) + : [new_lo] "b" ((u32)new_pte), + [new_hi] "c" ((u32)(new_pte >> 32)) + : "memory"); +#endif - return (ret != orig_pte); + user_access_end(); + return r; } static bool FNAME(prefetch_invalid_gpte)(struct kvm_vcpu *vcpu, diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index f2ddf663e72e92a0f871522dc97e776ac283fc29..073514bbb5f715684d8ef584583d6dc27a732c9c 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -902,6 +902,9 @@ static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, if (tdp_mmu_iter_cond_resched(kvm, &iter, false)) continue; + if (!is_shadow_present_pte(iter.old_spte)) + continue; + if (spte_ad_need_write_protect(iter.old_spte)) { if (is_writable_pte(iter.old_spte)) new_spte = iter.old_spte & ~PT_WRITABLE_MASK; @@ -1130,12 +1133,12 @@ static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root, bool spte_set = false; tdp_root_for_each_leaf_pte(iter, root, gfn, gfn + 1) { - if (!is_writable_pte(iter.old_spte)) - break; - new_spte = iter.old_spte & ~(PT_WRITABLE_MASK | SPTE_MMU_WRITEABLE); + if (new_spte == iter.old_spte) + break; + tdp_mmu_set_spte(kvm, &iter, new_spte); spte_set = true; } diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c index 136dc2f3c5d318741115e4412fc05224610c120d..f2c3869475d962478d6e4f5bb8b67b5608d3b4ec 100644 --- a/arch/x86/kvm/pmu.c +++ b/arch/x86/kvm/pmu.c @@ -95,7 +95,7 @@ static void kvm_perf_overflow_intr(struct perf_event *perf_event, } static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type, - unsigned config, bool exclude_user, + u64 config, bool exclude_user, bool exclude_kernel, bool intr, bool in_tx, bool in_tx_cp) { @@ -170,8 +170,8 @@ static bool pmc_resume_counter(struct kvm_pmc *pmc) void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel) { - unsigned config, type = PERF_TYPE_RAW; - u8 event_select, unit_mask; + u64 config; + u32 type = PERF_TYPE_RAW; struct kvm *kvm = pmc->vcpu->kvm; struct kvm_pmu_event_filter *filter; int i; @@ -203,23 +203,18 @@ void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel) if (!allow_event) return; - event_select = eventsel & ARCH_PERFMON_EVENTSEL_EVENT; - unit_mask = (eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8; - if (!(eventsel & (ARCH_PERFMON_EVENTSEL_EDGE | ARCH_PERFMON_EVENTSEL_INV | ARCH_PERFMON_EVENTSEL_CMASK | HSW_IN_TX | HSW_IN_TX_CHECKPOINTED))) { - config = kvm_x86_ops.pmu_ops->find_arch_event(pmc_to_pmu(pmc), - event_select, - unit_mask); + config = kvm_x86_ops.pmu_ops->pmc_perf_hw_id(pmc); if (config != PERF_COUNT_HW_MAX) type = PERF_TYPE_HARDWARE; } if (type == PERF_TYPE_RAW) - config = eventsel & X86_RAW_EVENT_MASK; + config = eventsel & AMD64_RAW_EVENT_MASK; if (pmc->current_config == eventsel && pmc_resume_counter(pmc)) return; diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h index 7b30bc967af38c2da2781cba6dbb0d1fb906ff15..cd35624595bf358fc653ea8808e2f47fbb4ff772 100644 --- a/arch/x86/kvm/pmu.h +++ b/arch/x86/kvm/pmu.h @@ -24,8 +24,7 @@ struct kvm_event_hw_type_mapping { }; struct kvm_pmu_ops { - unsigned (*find_arch_event)(struct kvm_pmu *pmu, u8 event_select, - u8 unit_mask); + unsigned int (*pmc_perf_hw_id)(struct kvm_pmc *pmc); unsigned (*find_fixed_event)(int idx); bool (*pmc_is_enabled)(struct kvm_pmc *pmc); struct kvm_pmc *(*pmc_idx_to_pmc)(struct kvm_pmu *pmu, int pmc_idx); diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c index 8c550999ace0c26066824c59fe3fb8b616659ace..3e5cb74c0b5386f4b81a3e7845d1353d1e92dbbc 100644 --- a/arch/x86/kvm/svm/avic.c +++ b/arch/x86/kvm/svm/avic.c @@ -344,8 +344,6 @@ int avic_incomplete_ipi_interception(struct vcpu_svm *svm) break; } case AVIC_IPI_FAILURE_INVALID_TARGET: - WARN_ONCE(1, "Invalid IPI target: index=%u, vcpu=%d, icr=%#0x:%#0x\n", - index, svm->vcpu.vcpu_id, icrh, icrl); break; case AVIC_IPI_FAILURE_INVALID_BACKING_PAGE: WARN_ONCE(1, "Invalid backing page\n"); @@ -808,7 +806,7 @@ int svm_update_pi_irte(struct kvm *kvm, unsigned int host_irq, { struct kvm_kernel_irq_routing_entry *e; struct kvm_irq_routing_table *irq_rt; - int idx, ret = -EINVAL; + int idx, ret = 0; if (!kvm_arch_has_assigned_device(kvm) || !irq_remapping_cap(IRQ_POSTING_CAP)) @@ -819,7 +817,13 @@ int svm_update_pi_irte(struct kvm *kvm, unsigned int host_irq, idx = srcu_read_lock(&kvm->irq_srcu); irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu); - WARN_ON(guest_irq >= irq_rt->nr_rt_entries); + + if (guest_irq >= irq_rt->nr_rt_entries || + hlist_empty(&irq_rt->map[guest_irq])) { + pr_warn_once("no route for guest_irq %u/%u (broken user space?)\n", + guest_irq, irq_rt->nr_rt_entries); + goto out; + } hlist_for_each_entry(e, &irq_rt->map[guest_irq], link) { struct vcpu_data vcpu_info; diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c index f0946872f5e6d5f298db28d4327502f734ffc91b..23910e6a3f011ccdf30b90954fbf253061cc65ec 100644 --- a/arch/x86/kvm/svm/nested.c +++ b/arch/x86/kvm/svm/nested.c @@ -783,8 +783,10 @@ void svm_free_nested(struct vcpu_svm *svm) /* * Forcibly leave nested mode in order to be able to reset the VCPU later on. */ -void svm_leave_nested(struct vcpu_svm *svm) +void svm_leave_nested(struct kvm_vcpu *vcpu) { + struct vcpu_svm *svm = to_svm(vcpu); + if (is_guest_mode(&svm->vcpu)) { struct vmcb *hsave = svm->nested.hsave; struct vmcb *vmcb = svm->vmcb; @@ -1185,7 +1187,7 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu, return -EINVAL; if (!(kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE)) { - svm_leave_nested(svm); + svm_leave_nested(vcpu); svm_set_gif(svm, !!(kvm_state->flags & KVM_STATE_NESTED_GIF_SET)); return 0; } @@ -1238,6 +1240,9 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu, copy_vmcb_control_area(&hsave->control, &svm->vmcb->control); hsave->save = *save; + if (is_guest_mode(vcpu)) + svm_leave_nested(vcpu); + svm->nested.vmcb12_gpa = kvm_state->hdr.svm.vmcb_pa; load_nested_vmcb_control(svm, ctl); nested_prepare_vmcb_control(svm); @@ -1252,6 +1257,7 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu, } struct kvm_x86_nested_ops svm_nested_ops = { + .leave_nested = svm_leave_nested, .check_events = svm_check_nested_events, .get_nested_state_pages = svm_get_nested_state_pages, .get_state = svm_get_nested_state, diff --git a/arch/x86/kvm/svm/pmu.c b/arch/x86/kvm/svm/pmu.c index 5a5c165a30ed1fb89e91c78221d1701935df639e..0e9c2322d3988b2d54be8e9f3a3e0dddfd2b7ec8 100644 --- a/arch/x86/kvm/svm/pmu.c +++ b/arch/x86/kvm/svm/pmu.c @@ -126,10 +126,10 @@ static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr, return &pmu->gp_counters[msr_to_index(msr)]; } -static unsigned amd_find_arch_event(struct kvm_pmu *pmu, - u8 event_select, - u8 unit_mask) +static unsigned int amd_pmc_perf_hw_id(struct kvm_pmc *pmc) { + u8 event_select = pmc->eventsel & ARCH_PERFMON_EVENTSEL_EVENT; + u8 unit_mask = (pmc->eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8; int i; for (i = 0; i < ARRAY_SIZE(amd_event_mapping); i++) @@ -253,12 +253,10 @@ static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) /* MSR_EVNTSELn */ pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL); if (pmc) { - if (data == pmc->eventsel) - return 0; - if (!(data & pmu->reserved_bits)) { + data &= ~pmu->reserved_bits; + if (data != pmc->eventsel) reprogram_gp_counter(pmc, data); - return 0; - } + return 0; } return 1; @@ -312,7 +310,7 @@ static void amd_pmu_reset(struct kvm_vcpu *vcpu) } struct kvm_pmu_ops amd_pmu_ops = { - .find_arch_event = amd_find_arch_event, + .pmc_perf_hw_id = amd_pmc_perf_hw_id, .find_fixed_event = amd_find_fixed_event, .pmc_is_enabled = amd_pmc_is_enabled, .pmc_idx_to_pmc = amd_pmc_idx_to_pmc, diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 6c82ef22985d98e8820638c70e396f58345c7a0e..7828b36d67c1f89846549b55b7186481719037ae 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -1177,6 +1177,14 @@ void sev_hardware_teardown(void) sev_flush_asids(); } +void sev_guest_memory_reclaimed(struct kvm *kvm) +{ + if (!sev_guest(kvm)) + return; + + wbinvd_on_all_cpus(); +} + void pre_sev_run(struct vcpu_svm *svm, int cpu) { struct svm_cpu_data *sd = per_cpu(svm_data, cpu); diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 5e1d7396a6b8a3c1cebf620bbefaf79c643cd137..2124fe54abfb5e2ff655a8793c9813ebade5096a 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -279,7 +279,7 @@ int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer) if ((old_efer & EFER_SVME) != (efer & EFER_SVME)) { if (!(efer & EFER_SVME)) { - svm_leave_nested(svm); + svm_leave_nested(vcpu); svm_set_gif(svm, true); /* @@ -4103,6 +4103,10 @@ static bool svm_can_emulate_instruction(struct kvm_vcpu *vcpu, void *insn, int i bool smep, smap, is_user; unsigned long cr4; + /* Emulation is always possible when KVM has access to all guest state. */ + if (!sev_guest(vcpu->kvm)) + return true; + /* * Detect and workaround Errata 1096 Fam_17h_00_0Fh. * @@ -4146,23 +4150,27 @@ static bool svm_can_emulate_instruction(struct kvm_vcpu *vcpu, void *insn, int i if (likely(!insn || insn_len)) return true; - /* - * If RIP is invalid, go ahead with emulation which will cause an - * internal error exit. - */ - if (!kvm_vcpu_gfn_to_memslot(vcpu, kvm_rip_read(vcpu) >> PAGE_SHIFT)) - return true; - cr4 = kvm_read_cr4(vcpu); smep = cr4 & X86_CR4_SMEP; smap = cr4 & X86_CR4_SMAP; is_user = svm_get_cpl(vcpu) == 3; if (smap && (!smep || is_user)) { - if (!sev_guest(vcpu->kvm)) - return true; - pr_err_ratelimited("KVM: SEV Guest triggered AMD Erratum 1096\n"); - kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); + + /* + * If the fault occurred in userspace, arbitrarily inject #GP + * to avoid killing the guest and to hopefully avoid confusing + * the guest kernel too much, e.g. injecting #PF would not be + * coherent with respect to the guest's page tables. Request + * triple fault if the fault occurred in the kernel as there's + * no fault that KVM can inject without confusing the guest. + * In practice, the triple fault is moot as no sane SEV kernel + * will execute from user memory while also running with SMAP=1. + */ + if (is_user) + kvm_inject_gp(vcpu, 0); + else + kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); } return false; @@ -4317,6 +4325,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = { .mem_enc_op = svm_mem_enc_op, .mem_enc_reg_region = svm_register_enc_region, .mem_enc_unreg_region = svm_unregister_enc_region, + .guest_memory_reclaimed = sev_guest_memory_reclaimed, .can_emulate_instruction = svm_can_emulate_instruction, diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index be74e22b82ea7332d80670f355c94cd8b1a7ccc5..c707d689b60ee264741e04ad2800a5c8225ab517 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -393,7 +393,7 @@ static inline bool nested_exit_on_nmi(struct vcpu_svm *svm) int enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa, struct vmcb *nested_vmcb); -void svm_leave_nested(struct vcpu_svm *svm); +void svm_leave_nested(struct kvm_vcpu *vcpu); void svm_free_nested(struct vcpu_svm *svm); int svm_allocate_nested(struct vcpu_svm *svm); int nested_svm_vmrun(struct vcpu_svm *svm); @@ -491,6 +491,8 @@ int svm_register_enc_region(struct kvm *kvm, struct kvm_enc_region *range); int svm_unregister_enc_region(struct kvm *kvm, struct kvm_enc_region *range); +void sev_guest_memory_reclaimed(struct kvm *kvm); + void pre_sev_run(struct vcpu_svm *svm, int cpu); int __init sev_hardware_setup(void); void sev_hardware_teardown(void); diff --git a/arch/x86/kvm/vmx/evmcs.c b/arch/x86/kvm/vmx/evmcs.c index c0d6fee9225feb9ea672396a634f4813d1409b8d..5b68034ec5f9c336d628ad9036b6589ed5970550 100644 --- a/arch/x86/kvm/vmx/evmcs.c +++ b/arch/x86/kvm/vmx/evmcs.c @@ -361,6 +361,7 @@ void nested_evmcs_filter_control_msr(u32 msr_index, u64 *pdata) case MSR_IA32_VMX_PROCBASED_CTLS2: ctl_high &= ~EVMCS1_UNSUPPORTED_2NDEXEC; break; + case MSR_IA32_VMX_TRUE_PINBASED_CTLS: case MSR_IA32_VMX_PINBASED_CTLS: ctl_high &= ~EVMCS1_UNSUPPORTED_PINCTRL; break; diff --git a/arch/x86/kvm/vmx/evmcs.h b/arch/x86/kvm/vmx/evmcs.h index bd41d9462355fd65909554b41b3ce49654ef3e4d..011929a63823084e21e2dc5575e1567bbc478ee7 100644 --- a/arch/x86/kvm/vmx/evmcs.h +++ b/arch/x86/kvm/vmx/evmcs.h @@ -59,7 +59,9 @@ DECLARE_STATIC_KEY_FALSE(enable_evmcs); SECONDARY_EXEC_SHADOW_VMCS | \ SECONDARY_EXEC_TSC_SCALING | \ SECONDARY_EXEC_PAUSE_LOOP_EXITING) -#define EVMCS1_UNSUPPORTED_VMEXIT_CTRL (VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL) +#define EVMCS1_UNSUPPORTED_VMEXIT_CTRL \ + (VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | \ + VM_EXIT_SAVE_VMX_PREEMPTION_TIMER) #define EVMCS1_UNSUPPORTED_VMENTRY_CTRL (VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL) #define EVMCS1_UNSUPPORTED_VMFUNC (VMX_VMFUNC_EPTP_SWITCHING) diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index 36661b15c3d047ec7c28a6d4d102a03c15deaf93..d2a0464e95c7a8351b70809b4148277ec3d11c47 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -11,6 +11,7 @@ #include "mmu.h" #include "nested.h" #include "pmu.h" +#include "sgx.h" #include "trace.h" #include "x86.h" @@ -2326,6 +2327,9 @@ static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12) if (!nested_cpu_has2(vmcs12, SECONDARY_EXEC_UNRESTRICTED_GUEST)) exec_control &= ~SECONDARY_EXEC_UNRESTRICTED_GUEST; + if (exec_control & SECONDARY_EXEC_ENCLS_EXITING) + vmx_write_encls_bitmap(&vmx->vcpu, vmcs12); + secondary_exec_controls_set(vmx, exec_control); } @@ -4142,6 +4146,8 @@ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, { /* update exit information fields: */ vmcs12->vm_exit_reason = vm_exit_reason; + if (to_vmx(vcpu)->exit_reason.enclave_mode) + vmcs12->vm_exit_reason |= VMX_EXIT_REASONS_SGX_ENCLAVE_MODE; vmcs12->exit_qualification = exit_qualification; vmcs12->vm_exit_intr_info = exit_intr_info; @@ -5736,6 +5742,21 @@ static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu, return false; } +static bool nested_vmx_exit_handled_encls(struct kvm_vcpu *vcpu, + struct vmcs12 *vmcs12) +{ + u32 encls_leaf; + + if (!guest_cpuid_has(vcpu, X86_FEATURE_SGX) || + !nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENCLS_EXITING)) + return false; + + encls_leaf = kvm_rax_read(vcpu); + if (encls_leaf > 62) + encls_leaf = 63; + return vmcs12->encls_exiting_bitmap & BIT_ULL(encls_leaf); +} + static bool nested_vmx_exit_handled_vmcs_access(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, gpa_t bitmap) { @@ -5833,9 +5854,6 @@ static bool nested_vmx_l0_wants_exit(struct kvm_vcpu *vcpu, case EXIT_REASON_VMFUNC: /* VM functions are emulated through L2->L0 vmexits. */ return true; - case EXIT_REASON_ENCLS: - /* SGX is never exposed to L1 */ - return true; default: break; } @@ -5959,6 +5977,8 @@ static bool nested_vmx_l1_wants_exit(struct kvm_vcpu *vcpu, case EXIT_REASON_TPAUSE: return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE); + case EXIT_REASON_ENCLS: + return nested_vmx_exit_handled_encls(vcpu, vmcs12); default: return true; } @@ -6534,6 +6554,9 @@ void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps) msrs->secondary_ctls_high |= SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; + if (enable_sgx) + msrs->secondary_ctls_high |= SECONDARY_EXEC_ENCLS_EXITING; + /* miscellaneous data */ rdmsr(MSR_IA32_VMX_MISC, msrs->misc_low, @@ -6628,6 +6651,7 @@ __init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu *)) } struct kvm_x86_nested_ops vmx_nested_ops = { + .leave_nested = vmx_leave_nested, .check_events = vmx_check_nested_events, .hv_timer_pending = nested_vmx_preemption_timer_pending, .get_state = vmx_get_nested_state, diff --git a/arch/x86/kvm/vmx/nested.h b/arch/x86/kvm/vmx/nested.h index 197148d76b8fdb8ab8d0dc370e56525f28426608..184418baeb3cbaaf8bd70c2d2e3ea51f87fafdc2 100644 --- a/arch/x86/kvm/vmx/nested.h +++ b/arch/x86/kvm/vmx/nested.h @@ -244,6 +244,11 @@ static inline bool nested_exit_on_intr(struct kvm_vcpu *vcpu) PIN_BASED_EXT_INTR_MASK; } +static inline bool nested_cpu_has_encls_exit(struct vmcs12 *vmcs12) +{ + return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENCLS_EXITING); +} + /* * if fixed0[i] == 1: val[i] must be 1 * if fixed1[i] == 0: val[i] must be 0 diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c index 9efc1a6b86930ad5879faa487270c8ab459a2da6..44cd1379081078ac7a6f81ea277d9766c95c75e8 100644 --- a/arch/x86/kvm/vmx/pmu_intel.c +++ b/arch/x86/kvm/vmx/pmu_intel.c @@ -68,10 +68,11 @@ static void global_ctrl_changed(struct kvm_pmu *pmu, u64 data) reprogram_counter(pmu, bit); } -static unsigned intel_find_arch_event(struct kvm_pmu *pmu, - u8 event_select, - u8 unit_mask) +static unsigned int intel_pmc_perf_hw_id(struct kvm_pmc *pmc) { + struct kvm_pmu *pmu = pmc_to_pmu(pmc); + u8 event_select = pmc->eventsel & ARCH_PERFMON_EVENTSEL_EVENT; + u8 unit_mask = (pmc->eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8; int i; for (i = 0; i < ARRAY_SIZE(intel_arch_events); i++) @@ -706,7 +707,7 @@ static void intel_pmu_cleanup(struct kvm_vcpu *vcpu) } struct kvm_pmu_ops intel_pmu_ops = { - .find_arch_event = intel_find_arch_event, + .pmc_perf_hw_id = intel_pmc_perf_hw_id, .find_fixed_event = intel_find_fixed_event, .pmc_is_enabled = intel_pmc_is_enabled, .pmc_idx_to_pmc = intel_pmc_idx_to_pmc, diff --git a/arch/x86/kvm/vmx/posted_intr.c b/arch/x86/kvm/vmx/posted_intr.c index fbd9b10354790dd6709c60f70cf5ec98a76361cf..5f8acd2faa7c1ea4848b257a5564e54376fe25ec 100644 --- a/arch/x86/kvm/vmx/posted_intr.c +++ b/arch/x86/kvm/vmx/posted_intr.c @@ -15,7 +15,7 @@ * can find which vCPU should be waken up. */ static DEFINE_PER_CPU(struct list_head, blocked_vcpu_on_cpu); -static DEFINE_PER_CPU(spinlock_t, blocked_vcpu_on_cpu_lock); +static DEFINE_PER_CPU(raw_spinlock_t, blocked_vcpu_on_cpu_lock); static inline struct pi_desc *vcpu_to_pi_desc(struct kvm_vcpu *vcpu) { @@ -121,9 +121,9 @@ static void __pi_post_block(struct kvm_vcpu *vcpu) new.control) != old.control); if (!WARN_ON_ONCE(vcpu->pre_pcpu == -1)) { - spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu)); + raw_spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu)); list_del(&vcpu->blocked_vcpu_list); - spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu)); + raw_spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu)); vcpu->pre_pcpu = -1; } } @@ -154,11 +154,11 @@ int pi_pre_block(struct kvm_vcpu *vcpu) local_irq_disable(); if (!WARN_ON_ONCE(vcpu->pre_pcpu != -1)) { vcpu->pre_pcpu = vcpu->cpu; - spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu)); + raw_spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu)); list_add_tail(&vcpu->blocked_vcpu_list, &per_cpu(blocked_vcpu_on_cpu, vcpu->pre_pcpu)); - spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu)); + raw_spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu)); } do { @@ -215,7 +215,7 @@ void pi_wakeup_handler(void) struct kvm_vcpu *vcpu; int cpu = smp_processor_id(); - spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, cpu)); + raw_spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, cpu)); list_for_each_entry(vcpu, &per_cpu(blocked_vcpu_on_cpu, cpu), blocked_vcpu_list) { struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu); @@ -223,13 +223,13 @@ void pi_wakeup_handler(void) if (pi_test_on(pi_desc) == 1) kvm_vcpu_kick(vcpu); } - spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, cpu)); + raw_spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, cpu)); } void __init pi_init_cpu(int cpu) { INIT_LIST_HEAD(&per_cpu(blocked_vcpu_on_cpu, cpu)); - spin_lock_init(&per_cpu(blocked_vcpu_on_cpu_lock, cpu)); + raw_spin_lock_init(&per_cpu(blocked_vcpu_on_cpu_lock, cpu)); } bool pi_has_pending_interrupt(struct kvm_vcpu *vcpu) diff --git a/arch/x86/kvm/vmx/sgx.c b/arch/x86/kvm/vmx/sgx.c new file mode 100644 index 0000000000000000000000000000000000000000..6693ebdc07701449bb297de0ef68873f1f53d3b2 --- /dev/null +++ b/arch/x86/kvm/vmx/sgx.c @@ -0,0 +1,502 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Intel Corporation. */ + +#include + +#include "cpuid.h" +#include "kvm_cache_regs.h" +#include "nested.h" +#include "sgx.h" +#include "vmx.h" +#include "x86.h" + +bool __read_mostly enable_sgx = 1; +module_param_named(sgx, enable_sgx, bool, 0444); + +/* Initial value of guest's virtual SGX_LEPUBKEYHASHn MSRs */ +static u64 sgx_pubkey_hash[4] __ro_after_init; + +/* + * ENCLS's memory operands use a fixed segment (DS) and a fixed + * address size based on the mode. Related prefixes are ignored. + */ +static int sgx_get_encls_gva(struct kvm_vcpu *vcpu, unsigned long offset, + int size, int alignment, gva_t *gva) +{ + struct kvm_segment s; + bool fault; + + /* Skip vmcs.GUEST_DS retrieval for 64-bit mode to avoid VMREADs. */ + *gva = offset; + if (!is_long_mode(vcpu)) { + vmx_get_segment(vcpu, &s, VCPU_SREG_DS); + *gva += s.base; + } + + if (!IS_ALIGNED(*gva, alignment)) { + fault = true; + } else if (likely(is_long_mode(vcpu))) { + fault = is_noncanonical_address(*gva, vcpu); + } else { + *gva &= 0xffffffff; + fault = (s.unusable) || + (s.type != 2 && s.type != 3) || + (*gva > s.limit) || + ((s.base != 0 || s.limit != 0xffffffff) && + (((u64)*gva + size - 1) > s.limit + 1)); + } + if (fault) + kvm_inject_gp(vcpu, 0); + return fault ? -EINVAL : 0; +} + +static void sgx_handle_emulation_failure(struct kvm_vcpu *vcpu, u64 addr, + unsigned int size) +{ + vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; + vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; + vcpu->run->internal.ndata = 2; + vcpu->run->internal.data[0] = addr; + vcpu->run->internal.data[1] = size; +} + +static int sgx_read_hva(struct kvm_vcpu *vcpu, unsigned long hva, void *data, + unsigned int size) +{ + if (__copy_from_user(data, (void __user *)hva, size)) { + sgx_handle_emulation_failure(vcpu, hva, size); + return -EFAULT; + } + + return 0; +} + +static int sgx_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t gva, bool write, + gpa_t *gpa) +{ + struct x86_exception ex; + + if (write) + *gpa = kvm_mmu_gva_to_gpa_write(vcpu, gva, &ex); + else + *gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, &ex); + + if (*gpa == UNMAPPED_GVA) { + kvm_inject_emulated_page_fault(vcpu, &ex); + return -EFAULT; + } + + return 0; +} + +static int sgx_gpa_to_hva(struct kvm_vcpu *vcpu, gpa_t gpa, unsigned long *hva) +{ + *hva = kvm_vcpu_gfn_to_hva(vcpu, PFN_DOWN(gpa)); + if (kvm_is_error_hva(*hva)) { + sgx_handle_emulation_failure(vcpu, gpa, 1); + return -EFAULT; + } + + *hva |= gpa & ~PAGE_MASK; + + return 0; +} + +static int sgx_inject_fault(struct kvm_vcpu *vcpu, gva_t gva, int trapnr) +{ + struct x86_exception ex; + + /* + * A non-EPCM #PF indicates a bad userspace HVA. This *should* check + * for PFEC.SGX and not assume any #PF on SGX2 originated in the EPC, + * but the error code isn't (yet) plumbed through the ENCLS helpers. + */ + if (trapnr == PF_VECTOR && !boot_cpu_has(X86_FEATURE_SGX2)) { + vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; + vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; + vcpu->run->internal.ndata = 0; + return 0; + } + + /* + * If the guest thinks it's running on SGX2 hardware, inject an SGX + * #PF if the fault matches an EPCM fault signature (#GP on SGX1, + * #PF on SGX2). The assumption is that EPCM faults are much more + * likely than a bad userspace address. + */ + if ((trapnr == PF_VECTOR || !boot_cpu_has(X86_FEATURE_SGX2)) && + guest_cpuid_has(vcpu, X86_FEATURE_SGX2)) { + memset(&ex, 0, sizeof(ex)); + ex.vector = PF_VECTOR; + ex.error_code = PFERR_PRESENT_MASK | PFERR_WRITE_MASK | + PFERR_SGX_MASK; + ex.address = gva; + ex.error_code_valid = true; + ex.nested_page_fault = false; + kvm_inject_page_fault(vcpu, &ex); + } else { + kvm_inject_gp(vcpu, 0); + } + return 1; +} + +static int __handle_encls_ecreate(struct kvm_vcpu *vcpu, + struct sgx_pageinfo *pageinfo, + unsigned long secs_hva, + gva_t secs_gva) +{ + struct sgx_secs *contents = (struct sgx_secs *)pageinfo->contents; + struct kvm_cpuid_entry2 *sgx_12_0, *sgx_12_1; + u64 attributes, xfrm, size; + u32 miscselect; + u8 max_size_log2; + int trapnr, ret; + + sgx_12_0 = kvm_find_cpuid_entry(vcpu, 0x12, 0); + sgx_12_1 = kvm_find_cpuid_entry(vcpu, 0x12, 1); + if (!sgx_12_0 || !sgx_12_1) { + vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; + vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; + vcpu->run->internal.ndata = 0; + return 0; + } + + miscselect = contents->miscselect; + attributes = contents->attributes; + xfrm = contents->xfrm; + size = contents->size; + + /* Enforce restriction of access to the PROVISIONKEY. */ + if (!vcpu->kvm->arch.sgx_provisioning_allowed && + (attributes & SGX_ATTR_PROVISIONKEY)) { + if (sgx_12_1->eax & SGX_ATTR_PROVISIONKEY) + pr_warn_once("KVM: SGX PROVISIONKEY advertised but not allowed\n"); + kvm_inject_gp(vcpu, 0); + return 1; + } + + /* Enforce CPUID restrictions on MISCSELECT, ATTRIBUTES and XFRM. */ + if ((u32)miscselect & ~sgx_12_0->ebx || + (u32)attributes & ~sgx_12_1->eax || + (u32)(attributes >> 32) & ~sgx_12_1->ebx || + (u32)xfrm & ~sgx_12_1->ecx || + (u32)(xfrm >> 32) & ~sgx_12_1->edx) { + kvm_inject_gp(vcpu, 0); + return 1; + } + + /* Enforce CPUID restriction on max enclave size. */ + max_size_log2 = (attributes & SGX_ATTR_MODE64BIT) ? sgx_12_0->edx >> 8 : + sgx_12_0->edx; + if (size >= BIT_ULL(max_size_log2)) + kvm_inject_gp(vcpu, 0); + + /* + * sgx_virt_ecreate() returns: + * 1) 0: ECREATE was successful + * 2) -EFAULT: ECREATE was run but faulted, and trapnr was set to the + * exception number. + * 3) -EINVAL: access_ok() on @secs_hva failed. This should never + * happen as KVM checks host addresses at memslot creation. + * sgx_virt_ecreate() has already warned in this case. + */ + ret = sgx_virt_ecreate(pageinfo, (void __user *)secs_hva, &trapnr); + if (!ret) + return kvm_skip_emulated_instruction(vcpu); + if (ret == -EFAULT) + return sgx_inject_fault(vcpu, secs_gva, trapnr); + + return ret; +} + +static int handle_encls_ecreate(struct kvm_vcpu *vcpu) +{ + gva_t pageinfo_gva, secs_gva; + gva_t metadata_gva, contents_gva; + gpa_t metadata_gpa, contents_gpa, secs_gpa; + unsigned long metadata_hva, contents_hva, secs_hva; + struct sgx_pageinfo pageinfo; + struct sgx_secs *contents; + struct x86_exception ex; + int r; + + if (sgx_get_encls_gva(vcpu, kvm_rbx_read(vcpu), 32, 32, &pageinfo_gva) || + sgx_get_encls_gva(vcpu, kvm_rcx_read(vcpu), 4096, 4096, &secs_gva)) + return 1; + + /* + * Copy the PAGEINFO to local memory, its pointers need to be + * translated, i.e. we need to do a deep copy/translate. + */ + r = kvm_read_guest_virt(vcpu, pageinfo_gva, &pageinfo, + sizeof(pageinfo), &ex); + if (r == X86EMUL_PROPAGATE_FAULT) { + kvm_inject_emulated_page_fault(vcpu, &ex); + return 1; + } else if (r != X86EMUL_CONTINUE) { + sgx_handle_emulation_failure(vcpu, pageinfo_gva, + sizeof(pageinfo)); + return 0; + } + + if (sgx_get_encls_gva(vcpu, pageinfo.metadata, 64, 64, &metadata_gva) || + sgx_get_encls_gva(vcpu, pageinfo.contents, 4096, 4096, + &contents_gva)) + return 1; + + /* + * Translate the SECINFO, SOURCE and SECS pointers from GVA to GPA. + * Resume the guest on failure to inject a #PF. + */ + if (sgx_gva_to_gpa(vcpu, metadata_gva, false, &metadata_gpa) || + sgx_gva_to_gpa(vcpu, contents_gva, false, &contents_gpa) || + sgx_gva_to_gpa(vcpu, secs_gva, true, &secs_gpa)) + return 1; + + /* + * ...and then to HVA. The order of accesses isn't architectural, i.e. + * KVM doesn't have to fully process one address at a time. Exit to + * userspace if a GPA is invalid. + */ + if (sgx_gpa_to_hva(vcpu, metadata_gpa, &metadata_hva) || + sgx_gpa_to_hva(vcpu, contents_gpa, &contents_hva) || + sgx_gpa_to_hva(vcpu, secs_gpa, &secs_hva)) + return 0; + + /* + * Copy contents into kernel memory to prevent TOCTOU attack. E.g. the + * guest could do ECREATE w/ SECS.SGX_ATTR_PROVISIONKEY=0, and + * simultaneously set SGX_ATTR_PROVISIONKEY to bypass the check to + * enforce restriction of access to the PROVISIONKEY. + */ + contents = (struct sgx_secs *)__get_free_page(GFP_KERNEL_ACCOUNT); + if (!contents) + return -ENOMEM; + + /* Exit to userspace if copying from a host userspace address fails. */ + if (sgx_read_hva(vcpu, contents_hva, (void *)contents, PAGE_SIZE)) { + free_page((unsigned long)contents); + return 0; + } + + pageinfo.metadata = metadata_hva; + pageinfo.contents = (u64)contents; + + r = __handle_encls_ecreate(vcpu, &pageinfo, secs_hva, secs_gva); + + free_page((unsigned long)contents); + + return r; +} + +static int handle_encls_einit(struct kvm_vcpu *vcpu) +{ + unsigned long sig_hva, secs_hva, token_hva, rflags; + struct vcpu_vmx *vmx = to_vmx(vcpu); + gva_t sig_gva, secs_gva, token_gva; + gpa_t sig_gpa, secs_gpa, token_gpa; + int ret, trapnr; + + if (sgx_get_encls_gva(vcpu, kvm_rbx_read(vcpu), 1808, 4096, &sig_gva) || + sgx_get_encls_gva(vcpu, kvm_rcx_read(vcpu), 4096, 4096, &secs_gva) || + sgx_get_encls_gva(vcpu, kvm_rdx_read(vcpu), 304, 512, &token_gva)) + return 1; + + /* + * Translate the SIGSTRUCT, SECS and TOKEN pointers from GVA to GPA. + * Resume the guest on failure to inject a #PF. + */ + if (sgx_gva_to_gpa(vcpu, sig_gva, false, &sig_gpa) || + sgx_gva_to_gpa(vcpu, secs_gva, true, &secs_gpa) || + sgx_gva_to_gpa(vcpu, token_gva, false, &token_gpa)) + return 1; + + /* + * ...and then to HVA. The order of accesses isn't architectural, i.e. + * KVM doesn't have to fully process one address at a time. Exit to + * userspace if a GPA is invalid. Note, all structures are aligned and + * cannot split pages. + */ + if (sgx_gpa_to_hva(vcpu, sig_gpa, &sig_hva) || + sgx_gpa_to_hva(vcpu, secs_gpa, &secs_hva) || + sgx_gpa_to_hva(vcpu, token_gpa, &token_hva)) + return 0; + + ret = sgx_virt_einit((void __user *)sig_hva, (void __user *)token_hva, + (void __user *)secs_hva, + vmx->msr_ia32_sgxlepubkeyhash, &trapnr); + + if (ret == -EFAULT) + return sgx_inject_fault(vcpu, secs_gva, trapnr); + + /* + * sgx_virt_einit() returns -EINVAL when access_ok() fails on @sig_hva, + * @token_hva or @secs_hva. This should never happen as KVM checks host + * addresses at memslot creation. sgx_virt_einit() has already warned + * in this case, so just return. + */ + if (ret < 0) + return ret; + + rflags = vmx_get_rflags(vcpu) & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | + X86_EFLAGS_AF | X86_EFLAGS_SF | + X86_EFLAGS_OF); + if (ret) + rflags |= X86_EFLAGS_ZF; + else + rflags &= ~X86_EFLAGS_ZF; + vmx_set_rflags(vcpu, rflags); + + kvm_rax_write(vcpu, ret); + return kvm_skip_emulated_instruction(vcpu); +} + +static inline bool encls_leaf_enabled_in_guest(struct kvm_vcpu *vcpu, u32 leaf) +{ + if (!enable_sgx || !guest_cpuid_has(vcpu, X86_FEATURE_SGX)) + return false; + + if (leaf >= ECREATE && leaf <= ETRACK) + return guest_cpuid_has(vcpu, X86_FEATURE_SGX1); + + if (leaf >= EAUG && leaf <= EMODT) + return guest_cpuid_has(vcpu, X86_FEATURE_SGX2); + + return false; +} + +static inline bool sgx_enabled_in_guest_bios(struct kvm_vcpu *vcpu) +{ + const u64 bits = FEAT_CTL_SGX_ENABLED | FEAT_CTL_LOCKED; + + return (to_vmx(vcpu)->msr_ia32_feature_control & bits) == bits; +} + +int handle_encls(struct kvm_vcpu *vcpu) +{ + u32 leaf = (u32)kvm_rax_read(vcpu); + + if (!encls_leaf_enabled_in_guest(vcpu, leaf)) { + kvm_queue_exception(vcpu, UD_VECTOR); + } else if (!sgx_enabled_in_guest_bios(vcpu)) { + kvm_inject_gp(vcpu, 0); + } else { + if (leaf == ECREATE) + return handle_encls_ecreate(vcpu); + if (leaf == EINIT) + return handle_encls_einit(vcpu); + WARN(1, "KVM: unexpected exit on ENCLS[%u]", leaf); + vcpu->run->exit_reason = KVM_EXIT_UNKNOWN; + vcpu->run->hw.hardware_exit_reason = EXIT_REASON_ENCLS; + return 0; + } + return 1; +} + +void setup_default_sgx_lepubkeyhash(void) +{ + /* + * Use Intel's default value for Skylake hardware if Launch Control is + * not supported, i.e. Intel's hash is hardcoded into silicon, or if + * Launch Control is supported and enabled, i.e. mimic the reset value + * and let the guest write the MSRs at will. If Launch Control is + * supported but disabled, then use the current MSR values as the hash + * MSRs exist but are read-only (locked and not writable). + */ + if (!enable_sgx || boot_cpu_has(X86_FEATURE_SGX_LC) || + rdmsrl_safe(MSR_IA32_SGXLEPUBKEYHASH0, &sgx_pubkey_hash[0])) { + sgx_pubkey_hash[0] = 0xa6053e051270b7acULL; + sgx_pubkey_hash[1] = 0x6cfbe8ba8b3b413dULL; + sgx_pubkey_hash[2] = 0xc4916d99f2b3735dULL; + sgx_pubkey_hash[3] = 0xd4f8c05909f9bb3bULL; + } else { + /* MSR_IA32_SGXLEPUBKEYHASH0 is read above */ + rdmsrl(MSR_IA32_SGXLEPUBKEYHASH1, sgx_pubkey_hash[1]); + rdmsrl(MSR_IA32_SGXLEPUBKEYHASH2, sgx_pubkey_hash[2]); + rdmsrl(MSR_IA32_SGXLEPUBKEYHASH3, sgx_pubkey_hash[3]); + } +} + +void vcpu_setup_sgx_lepubkeyhash(struct kvm_vcpu *vcpu) +{ + struct vcpu_vmx *vmx = to_vmx(vcpu); + + memcpy(vmx->msr_ia32_sgxlepubkeyhash, sgx_pubkey_hash, + sizeof(sgx_pubkey_hash)); +} + +/* + * ECREATE must be intercepted to enforce MISCSELECT, ATTRIBUTES and XFRM + * restrictions if the guest's allowed-1 settings diverge from hardware. + */ +static bool sgx_intercept_encls_ecreate(struct kvm_vcpu *vcpu) +{ + struct kvm_cpuid_entry2 *guest_cpuid; + u32 eax, ebx, ecx, edx; + + if (!vcpu->kvm->arch.sgx_provisioning_allowed) + return true; + + guest_cpuid = kvm_find_cpuid_entry(vcpu, 0x12, 0); + if (!guest_cpuid) + return true; + + cpuid_count(0x12, 0, &eax, &ebx, &ecx, &edx); + if (guest_cpuid->ebx != ebx || guest_cpuid->edx != edx) + return true; + + guest_cpuid = kvm_find_cpuid_entry(vcpu, 0x12, 1); + if (!guest_cpuid) + return true; + + cpuid_count(0x12, 1, &eax, &ebx, &ecx, &edx); + if (guest_cpuid->eax != eax || guest_cpuid->ebx != ebx || + guest_cpuid->ecx != ecx || guest_cpuid->edx != edx) + return true; + + return false; +} + +void vmx_write_encls_bitmap(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) +{ + /* + * There is no software enable bit for SGX that is virtualized by + * hardware, e.g. there's no CR4.SGXE, so when SGX is disabled in the + * guest (either by the host or by the guest's BIOS) but enabled in the + * host, trap all ENCLS leafs and inject #UD/#GP as needed to emulate + * the expected system behavior for ENCLS. + */ + u64 bitmap = -1ull; + + /* Nothing to do if hardware doesn't support SGX */ + if (!cpu_has_vmx_encls_vmexit()) + return; + + if (guest_cpuid_has(vcpu, X86_FEATURE_SGX) && + sgx_enabled_in_guest_bios(vcpu)) { + if (guest_cpuid_has(vcpu, X86_FEATURE_SGX1)) { + bitmap &= ~GENMASK_ULL(ETRACK, ECREATE); + if (sgx_intercept_encls_ecreate(vcpu)) + bitmap |= (1 << ECREATE); + } + + if (guest_cpuid_has(vcpu, X86_FEATURE_SGX2)) + bitmap &= ~GENMASK_ULL(EMODT, EAUG); + + /* + * Trap and execute EINIT if launch control is enabled in the + * host using the guest's values for launch control MSRs, even + * if the guest's values are fixed to hardware default values. + * The MSRs are not loaded/saved on VM-Enter/VM-Exit as writing + * the MSRs is extraordinarily expensive. + */ + if (boot_cpu_has(X86_FEATURE_SGX_LC)) + bitmap |= (1 << EINIT); + + if (!vmcs12 && is_guest_mode(vcpu)) + vmcs12 = get_vmcs12(vcpu); + if (vmcs12 && nested_cpu_has_encls_exit(vmcs12)) + bitmap |= vmcs12->encls_exiting_bitmap; + } + vmcs_write64(ENCLS_EXITING_BITMAP, bitmap); +} diff --git a/arch/x86/kvm/vmx/sgx.h b/arch/x86/kvm/vmx/sgx.h new file mode 100644 index 0000000000000000000000000000000000000000..a400888b376d31527d8d2ffb0747e8eada015835 --- /dev/null +++ b/arch/x86/kvm/vmx/sgx.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __KVM_X86_SGX_H +#define __KVM_X86_SGX_H + +#include + +#include "capabilities.h" +#include "vmx_ops.h" + +#ifdef CONFIG_X86_SGX_KVM +extern bool __read_mostly enable_sgx; + +int handle_encls(struct kvm_vcpu *vcpu); + +void setup_default_sgx_lepubkeyhash(void); +void vcpu_setup_sgx_lepubkeyhash(struct kvm_vcpu *vcpu); + +void vmx_write_encls_bitmap(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12); +#else +#define enable_sgx 0 + +static inline void setup_default_sgx_lepubkeyhash(void) { } +static inline void vcpu_setup_sgx_lepubkeyhash(struct kvm_vcpu *vcpu) { } + +static inline void vmx_write_encls_bitmap(struct kvm_vcpu *vcpu, + struct vmcs12 *vmcs12) +{ + /* Nothing to do if hardware doesn't support SGX */ + if (cpu_has_vmx_encls_vmexit()) + vmcs_write64(ENCLS_EXITING_BITMAP, -1ull); +} +#endif + +#endif /* __KVM_X86_SGX_H */ diff --git a/arch/x86/kvm/vmx/vmcs12.c b/arch/x86/kvm/vmx/vmcs12.c index c8e51c004f78232a0e8ae66eb5d108e359112e8a..034adb6404dcaf0062998b43d2d7e88c4521e16a 100644 --- a/arch/x86/kvm/vmx/vmcs12.c +++ b/arch/x86/kvm/vmx/vmcs12.c @@ -50,6 +50,7 @@ const unsigned short vmcs_field_to_offset_table[] = { FIELD64(VMREAD_BITMAP, vmread_bitmap), FIELD64(VMWRITE_BITMAP, vmwrite_bitmap), FIELD64(XSS_EXIT_BITMAP, xss_exit_bitmap), + FIELD64(ENCLS_EXITING_BITMAP, encls_exiting_bitmap), FIELD64(GUEST_PHYSICAL_ADDRESS, guest_physical_address), FIELD64(VMCS_LINK_POINTER, vmcs_link_pointer), FIELD64(GUEST_IA32_DEBUGCTL, guest_ia32_debugctl), diff --git a/arch/x86/kvm/vmx/vmcs12.h b/arch/x86/kvm/vmx/vmcs12.h index 80232daf00ff1368df3e4f01a2732b49e678e86a..13494956d0e97f7cb6435de3e58c590b343c7093 100644 --- a/arch/x86/kvm/vmx/vmcs12.h +++ b/arch/x86/kvm/vmx/vmcs12.h @@ -69,7 +69,8 @@ struct __packed vmcs12 { u64 vm_function_control; u64 eptp_list_address; u64 pml_address; - u64 padding64[3]; /* room for future expansion */ + u64 encls_exiting_bitmap; + u64 padding64[2]; /* room for future expansion */ /* * To allow migration of L1 (complete with its L2 guests) between * machines of different natural widths (32 or 64 bit), we cannot have @@ -256,6 +257,7 @@ static inline void vmx_check_vmcs12_offsets(void) CHECK_OFFSET(vm_function_control, 296); CHECK_OFFSET(eptp_list_address, 304); CHECK_OFFSET(pml_address, 312); + CHECK_OFFSET(encls_exiting_bitmap, 320); CHECK_OFFSET(cr0_guest_host_mask, 344); CHECK_OFFSET(cr4_guest_host_mask, 352); CHECK_OFFSET(cr0_read_shadow, 360); diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 70b7aae5b9ed8ba519584d3a676a832770ca347b..6bb07e495ecac87abc4dc90a850e41eb40ad8e88 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -58,6 +58,7 @@ #include "mmu.h" #include "nested.h" #include "pmu.h" +#include "sgx.h" #include "trace.h" #include "vmcs.h" #include "vmcs12.h" @@ -226,6 +227,9 @@ static const struct { #define L1D_CACHE_ORDER 4 static void *vmx_l1d_flush_pages; +/* Control for disabling CPU Fill buffer clear */ +static bool __read_mostly vmx_fb_clear_ctrl_available; + static int vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf) { struct page *page; @@ -357,6 +361,60 @@ static int vmentry_l1d_flush_get(char *s, const struct kernel_param *kp) return sprintf(s, "%s\n", vmentry_l1d_param[l1tf_vmx_mitigation].option); } +static void vmx_setup_fb_clear_ctrl(void) +{ + u64 msr; + + if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES) && + !boot_cpu_has_bug(X86_BUG_MDS) && + !boot_cpu_has_bug(X86_BUG_TAA)) { + rdmsrl(MSR_IA32_ARCH_CAPABILITIES, msr); + if (msr & ARCH_CAP_FB_CLEAR_CTRL) + vmx_fb_clear_ctrl_available = true; + } +} + +static __always_inline void vmx_disable_fb_clear(struct vcpu_vmx *vmx) +{ + u64 msr; + + if (!vmx->disable_fb_clear) + return; + + rdmsrl(MSR_IA32_MCU_OPT_CTRL, msr); + msr |= FB_CLEAR_DIS; + wrmsrl(MSR_IA32_MCU_OPT_CTRL, msr); + /* Cache the MSR value to avoid reading it later */ + vmx->msr_ia32_mcu_opt_ctrl = msr; +} + +static __always_inline void vmx_enable_fb_clear(struct vcpu_vmx *vmx) +{ + if (!vmx->disable_fb_clear) + return; + + vmx->msr_ia32_mcu_opt_ctrl &= ~FB_CLEAR_DIS; + wrmsrl(MSR_IA32_MCU_OPT_CTRL, vmx->msr_ia32_mcu_opt_ctrl); +} + +static void vmx_update_fb_clear_dis(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx) +{ + vmx->disable_fb_clear = vmx_fb_clear_ctrl_available; + + /* + * If guest will not execute VERW, there is no need to set FB_CLEAR_DIS + * at VMEntry. Skip the MSR read/write when a guest has no use case to + * execute VERW. + */ + if ((vcpu->arch.arch_capabilities & ARCH_CAP_FB_CLEAR) || + ((vcpu->arch.arch_capabilities & ARCH_CAP_MDS_NO) && + (vcpu->arch.arch_capabilities & ARCH_CAP_TAA_NO) && + (vcpu->arch.arch_capabilities & ARCH_CAP_PSDP_NO) && + (vcpu->arch.arch_capabilities & ARCH_CAP_FBSDP_NO) && + (vcpu->arch.arch_capabilities & ARCH_CAP_SBDR_SSDP_NO))) + vmx->disable_fb_clear = false; +} + static const struct kernel_param_ops vmentry_l1d_flush_ops = { .set = vmentry_l1d_flush_set, .get = vmentry_l1d_flush_get, @@ -367,11 +425,6 @@ static u32 vmx_segment_access_rights(struct kvm_segment *var); static __always_inline void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type); -void kvm_arch_vcpu_stat_reset(struct kvm_vcpu_stat *vcpu_stat) -{ - vcpu_stat->st_max = 0; -} - void vmx_vmexit(void); #define vmx_insn_failed(fmt...) \ @@ -1578,12 +1631,25 @@ static int vmx_rtit_ctl_check(struct kvm_vcpu *vcpu, u64 data) static bool vmx_can_emulate_instruction(struct kvm_vcpu *vcpu, void *insn, int insn_len) { + /* + * Emulation of instructions in SGX enclaves is impossible as RIP does + * not point tthe failing instruction, and even if it did, the code + * stream is inaccessible. Inject #UD instead of exiting to userspace + * so that guest userspace can't DoS the guest simply by triggering + * emulation (enclaves are CPL3 only). + */ + if (to_vmx(vcpu)->exit_reason.enclave_mode) { + kvm_queue_exception(vcpu, UD_VECTOR); + return false; + } return true; } static int skip_emulated_instruction(struct kvm_vcpu *vcpu) { + union vmx_exit_reason exit_reason = to_vmx(vcpu)->exit_reason; unsigned long rip, orig_rip; + u32 instr_len; /* * Using VMCS.VM_EXIT_INSTRUCTION_LEN on EPT misconfig depends on @@ -1594,9 +1660,33 @@ static int skip_emulated_instruction(struct kvm_vcpu *vcpu) * i.e. we end up advancing IP with some random value. */ if (!static_cpu_has(X86_FEATURE_HYPERVISOR) || - to_vmx(vcpu)->exit_reason.basic != EXIT_REASON_EPT_MISCONFIG) { + exit_reason.basic != EXIT_REASON_EPT_MISCONFIG) { + instr_len = vmcs_read32(VM_EXIT_INSTRUCTION_LEN); + + /* + * Emulating an enclave's instructions isn't supported as KVM + * cannot access the enclave's memory or its true RIP, e.g. the + * vmcs.GUEST_RIP points at the exit point of the enclave, not + * the RIP that actually triggered the VM-Exit. But, because + * most instructions that cause VM-Exit will #UD in an enclave, + * most instruction-based VM-Exits simply do not occur. + * + * There are a few exceptions, notably the debug instructions + * INT1ICEBRK and INT3, as they are allowed in debug enclaves + * and generate #DB/#BP as expected, which KVM might intercept. + * But again, the CPU does the dirty work and saves an instr + * length of zero so VMMs don't shoot themselves in the foot. + * WARN if KVM tries to skip a non-zero length instruction on + * a VM-Exit from an enclave. + */ + if (!instr_len) + goto rip_updated; + + WARN(exit_reason.enclave_mode, + "KVM: skipping instruction after SGX enclave VM-Exit"); + orig_rip = kvm_rip_read(vcpu); - rip = orig_rip + vmcs_read32(VM_EXIT_INSTRUCTION_LEN); + rip = orig_rip + instr_len; #ifdef CONFIG_X86_64 /* * We need to mask out the high 32 bits of RIP if not in 64-bit @@ -1612,6 +1702,7 @@ static int skip_emulated_instruction(struct kvm_vcpu *vcpu) return 0; } +rip_updated: /* skipping an emulated instruction also counts */ vmx_set_interrupt_shadow(vcpu, 0); @@ -1873,6 +1964,13 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) case MSR_IA32_FEAT_CTL: msr_info->data = vmx->msr_ia32_feature_control; break; + case MSR_IA32_SGXLEPUBKEYHASH0 ... MSR_IA32_SGXLEPUBKEYHASH3: + if (!msr_info->host_initiated && + !guest_cpuid_has(vcpu, X86_FEATURE_SGX_LC)) + return 1; + msr_info->data = to_vmx(vcpu)->msr_ia32_sgxlepubkeyhash + [msr_info->index - MSR_IA32_SGXLEPUBKEYHASH0]; + break; case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC: if (!nested_vmx_allowed(vcpu)) return 1; @@ -2167,6 +2265,29 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) vmx->msr_ia32_feature_control = data; if (msr_info->host_initiated && data == 0) vmx_leave_nested(vcpu); + + /* SGX may be enabled/disabled by guest's firmware */ + vmx_write_encls_bitmap(vcpu, NULL); + break; + case MSR_IA32_SGXLEPUBKEYHASH0 ... MSR_IA32_SGXLEPUBKEYHASH3: + /* + * On real hardware, the LE hash MSRs are writable before + * the firmware sets bit 0 in MSR 0x7a ("activating" SGX), + * at which point SGX related bits in IA32_FEATURE_CONTROL + * become writable. + * + * KVM does not emulate SGX activation for simplicity, so + * allow writes to the LE hash MSRs if IA32_FEATURE_CONTROL + * is unlocked. This is technically not architectural + * behavior, but it's close enough. + */ + if (!msr_info->host_initiated && + (!guest_cpuid_has(vcpu, X86_FEATURE_SGX_LC) || + ((vmx->msr_ia32_feature_control & FEAT_CTL_LOCKED) && + !(vmx->msr_ia32_feature_control & FEAT_CTL_SGX_LC_ENABLED)))) + return 1; + vmx->msr_ia32_sgxlepubkeyhash + [msr_index - MSR_IA32_SGXLEPUBKEYHASH0] = data; break; case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC: if (!msr_info->host_initiated) @@ -2264,6 +2385,10 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) ret = kvm_set_msr_common(vcpu, msr_info); } + /* FB_CLEAR may have changed, also update the FB_CLEAR_DIS behavior */ + if (msr_index == MSR_IA32_ARCH_CAPABILITIES) + vmx_update_fb_clear_dis(vcpu, vmx); + return ret; } @@ -4432,8 +4557,7 @@ static void init_vmcs(struct vcpu_vmx *vmx) vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1); } - if (cpu_has_vmx_encls_vmexit()) - vmcs_write64(ENCLS_EXITING_BITMAP, -1ull); + vmx_write_encls_bitmap(&vmx->vcpu, NULL); if (vmx_pt_mode_is_host_guest()) { memset(&vmx->pt_desc, 0, sizeof(vmx->pt_desc)); @@ -4536,6 +4660,8 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) vpid_sync_context(vmx->vpid); if (init_event) vmx_clear_hlt(vcpu); + + vmx_update_fb_clear_dis(vcpu, vmx); } static void enable_irq_window(struct kvm_vcpu *vcpu) @@ -4899,8 +5025,33 @@ static int handle_exception_nmi(struct kvm_vcpu *vcpu) dr6 = vmx_get_exit_qual(vcpu); if (!(vcpu->guest_debug & (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) { + /* + * If the #DB was due to ICEBP, a.k.a. INT1, skip the + * instruction. ICEBP generates a trap-like #DB, but + * despite its interception control being tied to #DB, + * is an instruction intercept, i.e. the VM-Exit occurs + * on the ICEBP itself. Note, skipping ICEBP also + * clears STI and MOVSS blocking. + * + * For all other #DBs, set vmcs.PENDING_DBG_EXCEPTIONS.BS + * if single-step is enabled in RFLAGS and STI or MOVSS + * blocking is active, as the CPU doesn't set the bit + * on VM-Exit due to #DB interception. VM-Entry has a + * consistency check that a single-step #DB is pending + * in this scenario as the previous instruction cannot + * have toggled RFLAGS.TF 0=>1 (because STI and POP/MOV + * don't modify RFLAGS), therefore the one instruction + * delay when activating single-step breakpoints must + * have already expired. Note, the CPU sets/clears BS + * as appropriate for all other VM-Exits types. + */ if (is_icebp(intr_info)) WARN_ON(!skip_emulated_instruction(vcpu)); + else if ((vmx_get_rflags(vcpu) & X86_EFLAGS_TF) && + (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & + (GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS))) + vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS, + vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS) | DR6_BS); kvm_queue_exception_p(vcpu, DB_VECTOR, dr6); return 1; @@ -5425,6 +5576,9 @@ static int handle_ept_misconfig(struct kvm_vcpu *vcpu) { gpa_t gpa; + if (!vmx_can_emulate_instruction(vcpu, NULL, 0)) + return 1; + /* * A nested guest cannot optimize MMIO vmexits, because we have an * nGPA here instead of the required GPA. @@ -5676,16 +5830,18 @@ static int handle_vmx_instruction(struct kvm_vcpu *vcpu) return 1; } +#ifndef CONFIG_X86_SGX_KVM static int handle_encls(struct kvm_vcpu *vcpu) { /* - * SGX virtualization is not yet supported. There is no software - * enable bit for SGX, so we have to trap ENCLS and inject a #UD - * to prevent the guest from executing ENCLS. + * SGX virtualization is disabled. There is no software enable bit for + * SGX, so KVM intercepts all ENCLS leafs and injects a #UD to prevent + * the guest from executing ENCLS (when SGX is supported by hardware). */ kvm_queue_exception(vcpu, UD_VECTOR); return 1; } +#endif /* CONFIG_X86_SGX_KVM */ /* * The exit handlers return 1 if the exit was handled fully and guest execution @@ -6690,6 +6846,11 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu, vmx_l1d_flush(vcpu); else if (static_branch_unlikely(&mds_user_clear)) mds_clear_cpu_buffers(); + else if (static_branch_unlikely(&mmio_stale_data_clear) && + kvm_arch_has_assigned_device(vcpu->kvm)) + mds_clear_cpu_buffers(); + + vmx_disable_fb_clear(vmx); if (vcpu->arch.cr2 != native_read_cr2()) native_write_cr2(vcpu->arch.cr2); @@ -6699,6 +6860,8 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu, vcpu->arch.cr2 = native_read_cr2(); + vmx_enable_fb_clear(vmx); + /* * VMEXIT disables interrupts (host state), but tracing and lockdep * have them in state 'on' as recorded before entering guest mode. @@ -7011,6 +7174,8 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu) else memset(&vmx->nested.msrs, 0, sizeof(vmx->nested.msrs)); + vcpu_setup_sgx_lepubkeyhash(vcpu); + vmx->nested.posted_intr_nv = -1; vmx->nested.current_vmptr = -1ull; @@ -7337,6 +7502,19 @@ static void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) set_cr4_guest_host_mask(vmx); + vmx_write_encls_bitmap(vcpu, NULL); + if (guest_cpuid_has(vcpu, X86_FEATURE_SGX)) + vmx->msr_ia32_feature_control_valid_bits |= FEAT_CTL_SGX_ENABLED; + else + vmx->msr_ia32_feature_control_valid_bits &= ~FEAT_CTL_SGX_ENABLED; + + if (guest_cpuid_has(vcpu, X86_FEATURE_SGX_LC)) + vmx->msr_ia32_feature_control_valid_bits |= + FEAT_CTL_SGX_LC_ENABLED; + else + vmx->msr_ia32_feature_control_valid_bits &= + ~FEAT_CTL_SGX_LC_ENABLED; + /* Refresh #PF interception to account for MAXPHYADDR changes. */ update_exception_bitmap(vcpu); } @@ -7357,6 +7535,13 @@ static __init void vmx_set_cpu_caps(void) if (vmx_pt_mode_is_host_guest()) kvm_cpu_cap_check_and_set(X86_FEATURE_INTEL_PT); + if (!enable_sgx) { + kvm_cpu_cap_clear(X86_FEATURE_SGX); + kvm_cpu_cap_clear(X86_FEATURE_SGX_LC); + kvm_cpu_cap_clear(X86_FEATURE_SGX1); + kvm_cpu_cap_clear(X86_FEATURE_SGX2); + } + if (vmx_umip_emulated()) kvm_cpu_cap_set(X86_FEATURE_UMIP); @@ -7953,6 +8138,8 @@ static __init int hardware_setup(void) if (!enable_ept || !cpu_has_vmx_intel_pt()) pt_mode = PT_MODE_SYSTEM; + setup_default_sgx_lepubkeyhash(); + if (nested) { nested_vmx_setup_ctls_msrs(&vmcs_config.nested, vmx_capability.ept); @@ -7978,6 +8165,7 @@ static struct kvm_x86_init_ops vmx_init_ops __initdata = { .disabled_by_bios = vmx_disabled_by_bios, .check_processor_compatibility = vmx_check_processor_compat, .hardware_setup = hardware_setup, + .intel_pt_intr_in_guest = vmx_pt_mode_is_host_guest, .runtime_ops = &vmx_x86_ops, }; @@ -8084,6 +8272,8 @@ static int __init vmx_init(void) return r; } + vmx_setup_fb_clear_ctrl(); + for_each_possible_cpu(cpu) { INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu)); diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h index c0b52498e4bb465c77c9f3a84b8cb32e778e158a..1848fef1f96ebb8b07b27833a9d7e653206103f8 100644 --- a/arch/x86/kvm/vmx/vmx.h +++ b/arch/x86/kvm/vmx/vmx.h @@ -324,7 +324,12 @@ struct vcpu_vmx { */ u64 msr_ia32_feature_control; u64 msr_ia32_feature_control_valid_bits; + /* SGX Launch Control public key hash */ + u64 msr_ia32_sgxlepubkeyhash[4]; + u64 ept_pointer; + u64 msr_ia32_mcu_opt_ctrl; + bool disable_fb_clear; struct pt_desc pt_desc; struct lbr_desc lbr_desc; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index db64955c6e5b110c0823f84987a3180f809b832e..e43760895eec76fd910038d2e628cb9bb7fd6fd6 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -74,6 +74,7 @@ #include #include #include +#include #include #define CREATE_TRACE_POINTS @@ -1273,7 +1274,7 @@ static const u32 msrs_to_save_all[] = { MSR_IA32_UMWAIT_CONTROL, MSR_ARCH_PERFMON_FIXED_CTR0, MSR_ARCH_PERFMON_FIXED_CTR1, - MSR_ARCH_PERFMON_FIXED_CTR0 + 2, MSR_ARCH_PERFMON_FIXED_CTR0 + 3, + MSR_ARCH_PERFMON_FIXED_CTR0 + 2, MSR_CORE_PERF_FIXED_CTR_CTRL, MSR_CORE_PERF_GLOBAL_STATUS, MSR_CORE_PERF_GLOBAL_CTRL, MSR_CORE_PERF_GLOBAL_OVF_CTRL, MSR_ARCH_PERFMON_PERFCTR0, MSR_ARCH_PERFMON_PERFCTR1, @@ -1459,6 +1460,9 @@ static u64 kvm_get_arch_capabilities(void) */ } + /* Guests don't need to know "Fill buffer clear control" exists */ + data &= ~ARCH_CAP_FB_CLEAR_CTRL; + return data; } @@ -3227,6 +3231,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) if (data & ~supported_xss) return 1; vcpu->arch.ia32_xss = data; + kvm_update_cpuid_runtime(vcpu); break; case MSR_SMI_COUNT: if (!msr_info->host_initiated) @@ -3838,6 +3843,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_X86_USER_SPACE_MSR: case KVM_CAP_X86_MSR_FILTER: case KVM_CAP_ENFORCE_PV_FEATURE_CPUID: +#ifdef CONFIG_X86_SGX_KVM + case KVM_CAP_SGX_ATTRIBUTE: +#endif r = 1; break; case KVM_CAP_SYNC_REGS: @@ -4450,6 +4458,8 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, vcpu->arch.hflags |= HF_SMM_MASK; else vcpu->arch.hflags &= ~HF_SMM_MASK; + + kvm_x86_ops.nested_ops->leave_nested(vcpu); kvm_smm_changed(vcpu); } @@ -5388,6 +5398,23 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm, kvm->arch.user_space_msr_mask = cap->args[0]; r = 0; break; +#ifdef CONFIG_X86_SGX_KVM + case KVM_CAP_SGX_ATTRIBUTE: { + unsigned long allowed_attributes = 0; + + r = sgx_set_attribute(&allowed_attributes, cap->args[0]); + if (r) + break; + + /* KVM only supports the PROVISIONKEY privileged attribute. */ + if ((allowed_attributes & SGX_ATTR_PROVISIONKEY) && + !(allowed_attributes & ~SGX_ATTR_PROVISIONKEY)) + kvm->arch.sgx_provisioning_allowed = true; + else + r = -EINVAL; + break; + } +#endif default: r = -EINVAL; break; @@ -5999,6 +6026,7 @@ gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, u32 access = (kvm_x86_ops.get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); } +EXPORT_SYMBOL_GPL(kvm_mmu_gva_to_gpa_read); gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, struct x86_exception *exception) @@ -6015,6 +6043,7 @@ gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, access |= PFERR_WRITE_MASK; return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); } +EXPORT_SYMBOL_GPL(kvm_mmu_gva_to_gpa_write); /* uses this to access any guest's mapped memory without checking CPL */ gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, @@ -6932,6 +6961,11 @@ static bool emulator_guest_has_fxsr(struct x86_emulate_ctxt *ctxt) return guest_cpuid_has(emul_to_vcpu(ctxt), X86_FEATURE_FXSR); } +static bool emulator_guest_has_rdpid(struct x86_emulate_ctxt *ctxt) +{ + return guest_cpuid_has(emul_to_vcpu(ctxt), X86_FEATURE_RDPID); +} + static ulong emulator_read_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg) { return kvm_register_read(emul_to_vcpu(ctxt), reg); @@ -7015,6 +7049,7 @@ static const struct x86_emulate_ops emulate_ops = { .guest_has_long_mode = emulator_guest_has_long_mode, .guest_has_movbe = emulator_guest_has_movbe, .guest_has_fxsr = emulator_guest_has_fxsr, + .guest_has_rdpid = emulator_guest_has_rdpid, .set_nmi_mask = emulator_set_nmi_mask, .get_hflags = emulator_get_hflags, .set_hflags = emulator_set_hflags, @@ -7346,7 +7381,7 @@ int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu) } EXPORT_SYMBOL_GPL(kvm_skip_emulated_instruction); -static bool kvm_vcpu_check_breakpoint(struct kvm_vcpu *vcpu, int *r) +static bool kvm_vcpu_check_code_breakpoint(struct kvm_vcpu *vcpu, int *r) { if (unlikely(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) && (vcpu->arch.guest_debug_dr7 & DR7_BP_EN_MASK)) { @@ -7415,25 +7450,23 @@ static bool is_vmware_backdoor_opcode(struct x86_emulate_ctxt *ctxt) } /* - * Decode to be emulated instruction. Return EMULATION_OK if success. + * Decode an instruction for emulation. The caller is responsible for handling + * code breakpoints. Note, manually detecting code breakpoints is unnecessary + * (and wrong) when emulating on an intercepted fault-like exception[*], as + * code breakpoints have higher priority and thus have already been done by + * hardware. + * + * [*] Except #MC, which is higher priority, but KVM should never emulate in + * response to a machine check. */ int x86_decode_emulated_instruction(struct kvm_vcpu *vcpu, int emulation_type, void *insn, int insn_len) { - int r = EMULATION_OK; struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; + int r; init_emulate_ctxt(vcpu); - /* - * We will reenter on the same instruction since we do not set - * complete_userspace_io. This does not handle watchpoints yet, - * those would be handled in the emulate_ops. - */ - if (!(emulation_type & EMULTYPE_SKIP) && - kvm_vcpu_check_breakpoint(vcpu, &r)) - return r; - ctxt->ud = emulation_type & EMULTYPE_TRAP_UD; r = x86_decode_insn(ctxt, insn, insn_len); @@ -7468,6 +7501,15 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, if (!(emulation_type & EMULTYPE_NO_DECODE)) { kvm_clear_exception_queue(vcpu); + /* + * Return immediately if RIP hits a code breakpoint, such #DBs + * are fault-like and are higher priority than any faults on + * the code fetch itself. + */ + if (!(emulation_type & EMULTYPE_SKIP) && + kvm_vcpu_check_code_breakpoint(vcpu, &r)) + return r; + r = x86_decode_emulated_instruction(vcpu, emulation_type, insn, insn_len); if (r != EMULATION_OK) { @@ -7942,7 +7984,7 @@ static struct perf_guest_info_callbacks kvm_guest_cbs = { .is_in_guest = kvm_is_in_guest, .is_user_mode = kvm_is_user_mode, .get_guest_ip = kvm_get_guest_ip, - .handle_intel_pt_intr = kvm_handle_intel_pt_intr, + .handle_intel_pt_intr = NULL, }; #ifdef CONFIG_X86_64 @@ -8056,7 +8098,7 @@ int kvm_arch_init(void *opaque) goto out_free_x86_emulator_cache; } - r = kvm_mmu_module_init(); + r = kvm_mmu_vendor_module_init(); if (r) goto out_free_percpu; @@ -8065,6 +8107,8 @@ int kvm_arch_init(void *opaque) PT_PRESENT_MASK, 0, sme_me_mask); kvm_timer_init(); + if (ops->intel_pt_intr_in_guest && ops->intel_pt_intr_in_guest()) + kvm_guest_cbs.handle_intel_pt_intr = kvm_handle_intel_pt_intr; perf_register_guest_info_callbacks(&kvm_guest_cbs); if (boot_cpu_has(X86_FEATURE_XSAVE)) { @@ -8102,6 +8146,7 @@ void kvm_arch_exit(void) #endif kvm_lapic_exit(); perf_unregister_guest_info_callbacks(&kvm_guest_cbs); + kvm_guest_cbs.handle_intel_pt_intr = NULL; if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block, @@ -8113,7 +8158,7 @@ void kvm_arch_exit(void) cancel_work_sync(&pvclock_gtod_work); #endif kvm_x86_ops.hardware_enable = NULL; - kvm_mmu_module_exit(); + kvm_mmu_vendor_module_exit(); free_percpu(user_return_msrs); kmem_cache_destroy(x86_emulator_cache); kmem_cache_destroy(x86_fpu_cache); @@ -8889,6 +8934,14 @@ void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, kvm_make_all_cpus_request(kvm, KVM_REQ_APIC_PAGE_RELOAD); } +void kvm_arch_guest_memory_reclaimed(struct kvm *kvm) +{ + if (!kvm_x86_ops.guest_memory_reclaimed) + return; + + kvm_x86_ops.guest_memory_reclaimed(kvm); +} + void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu) { if (!lapic_in_kernel(vcpu)) @@ -11469,6 +11522,11 @@ int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva) } EXPORT_SYMBOL_GPL(kvm_handle_invpcid); +void kvm_arch_vcpu_stat_reset(struct kvm_vcpu_stat *vcpu_stat) +{ + vcpu_stat->st_max = 0; +} + EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_fast_mmio); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq); @@ -11491,3 +11549,19 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_unaccelerated_access); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_incomplete_ipi); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_ga_log); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_apicv_update_request); + +static int __init kvm_x86_init(void) +{ + kvm_mmu_x86_module_init(); + return 0; +} +module_init(kvm_x86_init); + +static void __exit kvm_x86_exit(void) +{ + /* + * If module_init() is implemented, module_exit() must also be + * implemented to allow module unload. + */ +} +module_exit(kvm_x86_exit); diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 34112c63b34798d99e2ee6e672353d98e9a64614..058ff3f6944c840778d54f0377e873836b23b14b 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -1123,6 +1123,18 @@ access_error(unsigned long error_code, struct vm_area_struct *vma) if (error_code & X86_PF_PK) return 1; + /* + * SGX hardware blocked the access. This usually happens + * when the enclave memory contents have been destroyed, like + * after a suspend/resume cycle. In any case, the kernel can't + * fix the cause of the fault. Handle the fault as an access + * error even in cases where no actual access violation + * occurred. This allows userspace to rebuild the enclave in + * response to the signal. + */ + if (unlikely(error_code & X86_PF_SGX)) + return 1; + /* * Make sure to check the VMA so that we do not perform * faults just to hit a X86_PF_PK as soon as we fill in a diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index 60ade7dd71bd99b99ca9cee5b2cd95a506212d9c..7ce9b8dd875773bc480a7cbd26bb1ebfa4c97ccd 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c @@ -614,6 +614,7 @@ static bool memremap_is_efi_data(resource_size_t phys_addr, static bool memremap_is_setup_data(resource_size_t phys_addr, unsigned long size) { + struct setup_indirect *indirect; struct setup_data *data; u64 paddr, paddr_next; @@ -626,6 +627,10 @@ static bool memremap_is_setup_data(resource_size_t phys_addr, data = memremap(paddr, sizeof(*data), MEMREMAP_WB | MEMREMAP_DEC); + if (!data) { + pr_warn("failed to memremap setup_data entry\n"); + return false; + } paddr_next = data->next; len = data->len; @@ -635,10 +640,21 @@ static bool memremap_is_setup_data(resource_size_t phys_addr, return true; } - if (data->type == SETUP_INDIRECT && - ((struct setup_indirect *)data->data)->type != SETUP_INDIRECT) { - paddr = ((struct setup_indirect *)data->data)->addr; - len = ((struct setup_indirect *)data->data)->len; + if (data->type == SETUP_INDIRECT) { + memunmap(data); + data = memremap(paddr, sizeof(*data) + len, + MEMREMAP_WB | MEMREMAP_DEC); + if (!data) { + pr_warn("failed to memremap indirect setup_data\n"); + return false; + } + + indirect = (struct setup_indirect *)data->data; + + if (indirect->type != SETUP_INDIRECT) { + paddr = indirect->addr; + len = indirect->len; + } } memunmap(data); @@ -659,22 +675,51 @@ static bool memremap_is_setup_data(resource_size_t phys_addr, static bool __init early_memremap_is_setup_data(resource_size_t phys_addr, unsigned long size) { + struct setup_indirect *indirect; struct setup_data *data; u64 paddr, paddr_next; paddr = boot_params.hdr.setup_data; while (paddr) { - unsigned int len; + unsigned int len, size; if (phys_addr == paddr) return true; data = early_memremap_decrypted(paddr, sizeof(*data)); + if (!data) { + pr_warn("failed to early memremap setup_data entry\n"); + return false; + } + + size = sizeof(*data); paddr_next = data->next; len = data->len; - early_memunmap(data, sizeof(*data)); + if ((phys_addr > paddr) && (phys_addr < (paddr + len))) { + early_memunmap(data, sizeof(*data)); + return true; + } + + if (data->type == SETUP_INDIRECT) { + size += len; + early_memunmap(data, sizeof(*data)); + data = early_memremap_decrypted(paddr, size); + if (!data) { + pr_warn("failed to early memremap indirect setup_data\n"); + return false; + } + + indirect = (struct setup_indirect *)data->data; + + if (indirect->type != SETUP_INDIRECT) { + paddr = indirect->addr; + len = indirect->len; + } + } + + early_memunmap(data, size); if ((phys_addr > paddr) && (phys_addr < (paddr + len))) return true; diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c index db1378c6ff2621dcf5b5363424b79d63aea67991..decebcd8ee1c7ed95c80f2792b42ae161b9a33ef 100644 --- a/arch/x86/power/cpu.c +++ b/arch/x86/power/cpu.c @@ -40,7 +40,8 @@ static void msr_save_context(struct saved_context *ctxt) struct saved_msr *end = msr + ctxt->saved_msrs.num; while (msr < end) { - msr->valid = !rdmsrl_safe(msr->info.msr_no, &msr->info.reg.q); + if (msr->valid) + rdmsrl(msr->info.msr_no, msr->info.reg.q); msr++; } } @@ -427,8 +428,10 @@ static int msr_build_context(const u32 *msr_id, const int num) } for (i = saved_msrs->num, j = 0; i < total_num; i++, j++) { + u64 dummy; + msr_array[i].info.msr_no = msr_id[j]; - msr_array[i].valid = false; + msr_array[i].valid = !rdmsrl_safe(msr_id[j], &dummy); msr_array[i].info.reg.q = 0; } saved_msrs->num = total_num; @@ -503,10 +506,24 @@ static int pm_cpu_check(const struct x86_cpu_id *c) return ret; } +static void pm_save_spec_msr(void) +{ + u32 spec_msr_id[] = { + MSR_IA32_SPEC_CTRL, + MSR_IA32_TSX_CTRL, + MSR_TSX_FORCE_ABORT, + MSR_IA32_MCU_OPT_CTRL, + MSR_AMD64_LS_CFG, + }; + + msr_build_context(spec_msr_id, ARRAY_SIZE(spec_msr_id)); +} + static int pm_check_save_msr(void) { dmi_check_system(msr_save_dmi_table); pm_cpu_check(msr_save_cpu_table); + pm_save_spec_msr(); return 0; } diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c index 3313bffbecd4d3d8b179ceaef3cab7c73d83f977..1a702c6a226ec2daff44df71e742c0cc126bc370 100644 --- a/arch/x86/realmode/init.c +++ b/arch/x86/realmode/init.c @@ -17,6 +17,32 @@ u32 *trampoline_cr4_features; /* Hold the pgd entry used on booting additional CPUs */ pgd_t trampoline_pgd_entry; +void load_trampoline_pgtable(void) +{ +#ifdef CONFIG_X86_32 + load_cr3(initial_page_table); +#else + /* + * This function is called before exiting to real-mode and that will + * fail with CR4.PCIDE still set. + */ + if (boot_cpu_has(X86_FEATURE_PCID)) + cr4_clear_bits(X86_CR4_PCIDE); + + write_cr3(real_mode_header->trampoline_pgd); +#endif + + /* + * The CR3 write above will not flush global TLB entries. + * Stale, global entries from previous page tables may still be + * present. Flush those stale entries. + * + * This ensures that memory accessed while running with + * trampoline_pgd is *actually* mapped into trampoline_pgd. + */ + __flush_tlb_all(); +} + void __init reserve_real_mode(void) { phys_addr_t mem; diff --git a/arch/x86/um/syscalls_64.c b/arch/x86/um/syscalls_64.c index 58f51667e2e4beb82ec794cf618bf7026cfcb66c..8249685b409605a486d19621c2a78a9713856c58 100644 --- a/arch/x86/um/syscalls_64.c +++ b/arch/x86/um/syscalls_64.c @@ -11,6 +11,7 @@ #include #include /* XXX This should get the constants from libc */ #include +#include long arch_prctl(struct task_struct *task, int option, unsigned long __user *arg2) @@ -35,7 +36,7 @@ long arch_prctl(struct task_struct *task, int option, switch (option) { case ARCH_SET_FS: case ARCH_SET_GS: - ret = restore_registers(pid, ¤t->thread.regs.regs); + ret = restore_pid_registers(pid, ¤t->thread.regs.regs); if (ret) return ret; break; diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c index 16ff25d6935e78d69a42c4a24174fd5702c280b9..804c65d2b95f3c066f9040d5d5580e1b6256f0ce 100644 --- a/arch/x86/xen/enlighten_pv.c +++ b/arch/x86/xen/enlighten_pv.c @@ -1387,10 +1387,6 @@ asmlinkage __visible void __init xen_start_kernel(void) xen_acpi_sleep_register(); - /* Avoid searching for BIOS MP tables */ - x86_init.mpparse.find_smp_config = x86_init_noop; - x86_init.mpparse.get_smp_config = x86_init_uint_noop; - xen_boot_params_init_edd(); #ifdef CONFIG_ACPI diff --git a/arch/x86/xen/pmu.c b/arch/x86/xen/pmu.c index e13b0b49fcdfc181c19f76de1c968b9a344e8a70..d7249f4c90f1b9c48d68f70f51e6d7d03d6a65ca 100644 --- a/arch/x86/xen/pmu.c +++ b/arch/x86/xen/pmu.c @@ -512,10 +512,7 @@ irqreturn_t xen_pmu_irq_handler(int irq, void *dev_id) return ret; } -bool is_xen_pmu(int cpu) -{ - return (get_xenpmu_data() != NULL); -} +bool is_xen_pmu; void xen_pmu_init(int cpu) { @@ -526,7 +523,7 @@ void xen_pmu_init(int cpu) BUILD_BUG_ON(sizeof(struct xen_pmu_data) > PAGE_SIZE); - if (xen_hvm_domain()) + if (xen_hvm_domain() || (cpu != 0 && !is_xen_pmu)) return; xenpmu_data = (struct xen_pmu_data *)get_zeroed_page(GFP_KERNEL); @@ -547,7 +544,8 @@ void xen_pmu_init(int cpu) per_cpu(xenpmu_shared, cpu).xenpmu_data = xenpmu_data; per_cpu(xenpmu_shared, cpu).flags = 0; - if (cpu == 0) { + if (!is_xen_pmu) { + is_xen_pmu = true; perf_register_guest_info_callbacks(&xen_guest_cbs); xen_pmu_arch_init(); } diff --git a/arch/x86/xen/pmu.h b/arch/x86/xen/pmu.h index 0e83a160589bc2e6c9149bbbae7e049c08e0e8c0..65c58894fc79f6d94a1ce860112ded726e0801f2 100644 --- a/arch/x86/xen/pmu.h +++ b/arch/x86/xen/pmu.h @@ -4,6 +4,8 @@ #include +extern bool is_xen_pmu; + irqreturn_t xen_pmu_irq_handler(int irq, void *dev_id); #ifdef CONFIG_XEN_HAVE_VPMU void xen_pmu_init(int cpu); @@ -12,7 +14,6 @@ void xen_pmu_finish(int cpu); static inline void xen_pmu_init(int cpu) {} static inline void xen_pmu_finish(int cpu) {} #endif -bool is_xen_pmu(int cpu); bool pmu_msr_read(unsigned int msr, uint64_t *val, int *err); bool pmu_msr_write(unsigned int msr, uint32_t low, uint32_t high, int *err); int pmu_apic_update(uint32_t reg); diff --git a/arch/x86/xen/smp_hvm.c b/arch/x86/xen/smp_hvm.c index 6ff3c887e0b99523cd69774b8de8f3009f69d92f..b70afdff419ca3cc9591e7d5e180b765271ed34a 100644 --- a/arch/x86/xen/smp_hvm.c +++ b/arch/x86/xen/smp_hvm.c @@ -19,6 +19,12 @@ static void __init xen_hvm_smp_prepare_boot_cpu(void) */ xen_vcpu_setup(0); + /* + * Called again in case the kernel boots on vcpu >= MAX_VIRT_CPUS. + * Refer to comments in xen_hvm_init_time_ops(). + */ + xen_hvm_init_time_ops(); + /* * The alternative logic (which patches the unlock/lock) runs before * the smp bootup up code is activated. Hence we need to set this up diff --git a/arch/x86/xen/smp_pv.c b/arch/x86/xen/smp_pv.c index c2ac319f11a4b785e07ac52041f54d8adc2c4e85..35b6d15d874d046cc9c0a4a73aad541dbfcfa963 100644 --- a/arch/x86/xen/smp_pv.c +++ b/arch/x86/xen/smp_pv.c @@ -130,7 +130,7 @@ int xen_smp_intr_init_pv(unsigned int cpu) per_cpu(xen_irq_work, cpu).irq = rc; per_cpu(xen_irq_work, cpu).name = callfunc_name; - if (is_xen_pmu(cpu)) { + if (is_xen_pmu) { pmu_name = kasprintf(GFP_KERNEL, "pmu%d", cpu); rc = bind_virq_to_irqhandler(VIRQ_XENPMU, cpu, xen_pmu_irq_handler, @@ -149,28 +149,12 @@ int xen_smp_intr_init_pv(unsigned int cpu) return rc; } -static void __init xen_fill_possible_map(void) -{ - int i, rc; - - if (xen_initial_domain()) - return; - - for (i = 0; i < nr_cpu_ids; i++) { - rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL); - if (rc >= 0) { - num_processors++; - set_cpu_possible(i, true); - } - } -} - -static void __init xen_filter_cpu_maps(void) +static void __init _get_smp_config(unsigned int early) { int i, rc; unsigned int subtract = 0; - if (!xen_initial_domain()) + if (early) return; num_processors = 0; @@ -211,7 +195,6 @@ static void __init xen_pv_smp_prepare_boot_cpu(void) * sure the old memory can be recycled. */ make_lowmem_page_readwrite(xen_initial_gdt); - xen_filter_cpu_maps(); xen_setup_vcpu_info_placement(); /* @@ -491,5 +474,8 @@ static const struct smp_ops xen_smp_ops __initconst = { void __init xen_smp_init(void) { smp_ops = xen_smp_ops; - xen_fill_possible_map(); + + /* Avoid searching for BIOS MP tables */ + x86_init.mpparse.find_smp_config = x86_init_noop; + x86_init.mpparse.get_smp_config = _get_smp_config; } diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c index 91f5b330dcc6db596b5ef19bb1c7f03d3abeeb3c..8183d17e1cf1769dee6da556805f5276d26dbc0a 100644 --- a/arch/x86/xen/time.c +++ b/arch/x86/xen/time.c @@ -556,6 +556,11 @@ static void xen_hvm_setup_cpu_clockevents(void) void __init xen_hvm_init_time_ops(void) { + static bool hvm_time_initialized; + + if (hvm_time_initialized) + return; + /* * vector callback is needed otherwise we cannot receive interrupts * on cpu > 0 and at this point we don't know how many cpus are @@ -565,7 +570,22 @@ void __init xen_hvm_init_time_ops(void) return; if (!xen_feature(XENFEAT_hvm_safe_pvclock)) { - pr_info("Xen doesn't support pvclock on HVM, disable pv timer"); + pr_info_once("Xen doesn't support pvclock on HVM, disable pv timer"); + return; + } + + /* + * Only MAX_VIRT_CPUS 'vcpu_info' are embedded inside 'shared_info'. + * The __this_cpu_read(xen_vcpu) is still NULL when Xen HVM guest + * boots on vcpu >= MAX_VIRT_CPUS (e.g., kexec), To access + * __this_cpu_read(xen_vcpu) via xen_clocksource_read() will panic. + * + * The xen_hvm_init_time_ops() should be called again later after + * __this_cpu_read(xen_vcpu) is available. + */ + if (!__this_cpu_read(xen_vcpu)) { + pr_info("Delay xen_init_time_common() as kernel is running on vcpu=%d\n", + xen_vcpu_nr(0)); return; } @@ -577,6 +597,8 @@ void __init xen_hvm_init_time_ops(void) x86_platform.calibrate_tsc = xen_tsc_khz; x86_platform.get_wallclock = xen_get_wallclock; x86_platform.set_wallclock = xen_set_wallclock; + + hvm_time_initialized = true; } #endif diff --git a/arch/xtensa/boot/dts/xtfpga-flash-128m.dtsi b/arch/xtensa/boot/dts/xtfpga-flash-128m.dtsi index 9bf8bad1dd18afcf2ac4e64264784067e5863efe..c33932568aa73e618397095d30707db5913825f5 100644 --- a/arch/xtensa/boot/dts/xtfpga-flash-128m.dtsi +++ b/arch/xtensa/boot/dts/xtfpga-flash-128m.dtsi @@ -8,19 +8,19 @@ reg = <0x00000000 0x08000000>; bank-width = <2>; device-width = <2>; - partition@0x0 { + partition@0 { label = "data"; reg = <0x00000000 0x06000000>; }; - partition@0x6000000 { + partition@6000000 { label = "boot loader area"; reg = <0x06000000 0x00800000>; }; - partition@0x6800000 { + partition@6800000 { label = "kernel image"; reg = <0x06800000 0x017e0000>; }; - partition@0x7fe0000 { + partition@7fe0000 { label = "boot environment"; reg = <0x07fe0000 0x00020000>; }; diff --git a/arch/xtensa/boot/dts/xtfpga-flash-16m.dtsi b/arch/xtensa/boot/dts/xtfpga-flash-16m.dtsi index 40c2f81f7cb66f8f31b700070017698069b9c3c1..7bde2ab2d6fb5e49292a072c86ed665ef044fe56 100644 --- a/arch/xtensa/boot/dts/xtfpga-flash-16m.dtsi +++ b/arch/xtensa/boot/dts/xtfpga-flash-16m.dtsi @@ -8,19 +8,19 @@ reg = <0x08000000 0x01000000>; bank-width = <2>; device-width = <2>; - partition@0x0 { + partition@0 { label = "boot loader area"; reg = <0x00000000 0x00400000>; }; - partition@0x400000 { + partition@400000 { label = "kernel image"; reg = <0x00400000 0x00600000>; }; - partition@0xa00000 { + partition@a00000 { label = "data"; reg = <0x00a00000 0x005e0000>; }; - partition@0xfe0000 { + partition@fe0000 { label = "boot environment"; reg = <0x00fe0000 0x00020000>; }; diff --git a/arch/xtensa/boot/dts/xtfpga-flash-4m.dtsi b/arch/xtensa/boot/dts/xtfpga-flash-4m.dtsi index fb8d3a9f33c2308ca0e57ba7fd0caca83ed99b90..0655b868749a47ecbce044e9206b3215da3e0be3 100644 --- a/arch/xtensa/boot/dts/xtfpga-flash-4m.dtsi +++ b/arch/xtensa/boot/dts/xtfpga-flash-4m.dtsi @@ -8,11 +8,11 @@ reg = <0x08000000 0x00400000>; bank-width = <2>; device-width = <2>; - partition@0x0 { + partition@0 { label = "boot loader area"; reg = <0x00000000 0x003f0000>; }; - partition@0x3f0000 { + partition@3f0000 { label = "boot environment"; reg = <0x003f0000 0x00010000>; }; diff --git a/arch/xtensa/include/asm/processor.h b/arch/xtensa/include/asm/processor.h index 7f63aca6a0d340632646ccceb9f31ae63b878402..9dd4efe1bf0bd0108f1c809636aa78776858876e 100644 --- a/arch/xtensa/include/asm/processor.h +++ b/arch/xtensa/include/asm/processor.h @@ -226,8 +226,8 @@ extern unsigned long get_wchan(struct task_struct *p); #define xtensa_set_sr(x, sr) \ ({ \ - unsigned int v = (unsigned int)(x); \ - __asm__ __volatile__ ("wsr %0, "__stringify(sr) :: "a"(v)); \ + __asm__ __volatile__ ("wsr %0, "__stringify(sr) :: \ + "a"((unsigned int)(x))); \ }) #define xtensa_get_sr(sr) \ diff --git a/arch/xtensa/kernel/jump_label.c b/arch/xtensa/kernel/jump_label.c index 61cf6497a646b7ec0273f0c8e823df64cffd9824..0dde21e0d3de4c2836bbce5c7fee361811863ec8 100644 --- a/arch/xtensa/kernel/jump_label.c +++ b/arch/xtensa/kernel/jump_label.c @@ -61,7 +61,7 @@ static void patch_text(unsigned long addr, const void *data, size_t sz) .data = data, }; stop_machine_cpuslocked(patch_text_stop_machine, - &patch, NULL); + &patch, cpu_online_mask); } else { unsigned long flags; diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c index 70f2aeadd21c321d2e080c0f86e51bb37f8848a1..195ab5219736259e37b2292c4b9c2399b646fdcd 100644 --- a/block/bfq-iosched.c +++ b/block/bfq-iosched.c @@ -1871,7 +1871,11 @@ static void bfq_add_request(struct request *rq) bfq_log_bfqq(bfqd, bfqq, "add_request %d", rq_is_sync(rq)); bfqq->queued[rq_is_sync(rq)]++; - bfqd->queued++; + /* + * Updating of 'bfqd->queued' is protected by 'bfqd->lock', however, it + * may be read without holding the lock in bfq_has_work(). + */ + WRITE_ONCE(bfqd->queued, bfqd->queued + 1); if (RB_EMPTY_ROOT(&bfqq->sort_list) && bfq_bfqq_sync(bfqq)) { /* @@ -2164,7 +2168,11 @@ static void bfq_remove_request(struct request_queue *q, if (rq->queuelist.prev != &rq->queuelist) list_del_init(&rq->queuelist); bfqq->queued[sync]--; - bfqd->queued--; + /* + * Updating of 'bfqd->queued' is protected by 'bfqd->lock', however, it + * may be read without holding the lock in bfq_has_work(). + */ + WRITE_ONCE(bfqd->queued, bfqd->queued - 1); elv_rb_del(&bfqq->sort_list, rq); elv_rqhash_del(q, rq); @@ -2534,6 +2542,15 @@ bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq) * are likely to increase the throughput. */ bfqq->new_bfqq = new_bfqq; + /* + * The above assignment schedules the following redirections: + * each time some I/O for bfqq arrives, the process that + * generated that I/O is disassociated from bfqq and + * associated with new_bfqq. Here we increases new_bfqq->ref + * in advance, adding the number of processes that are + * expected to be associated with new_bfqq as they happen to + * issue I/O. + */ new_bfqq->ref += process_refs; return new_bfqq; } @@ -2593,6 +2610,10 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq, { struct bfq_queue *in_service_bfqq, *new_bfqq; + /* if a merge has already been setup, then proceed with that first */ + if (bfqq->new_bfqq) + return bfqq->new_bfqq; + /* * Do not perform queue merging if the device is non * rotational and performs internal queueing. In fact, such a @@ -2647,9 +2668,6 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq, if (bfq_too_late_for_merging(bfqq)) return NULL; - if (bfqq->new_bfqq) - return bfqq->new_bfqq; - if (!io_struct || unlikely(bfqq == &bfqd->oom_bfqq)) return NULL; @@ -4651,15 +4669,12 @@ static bool bfq_has_work(struct blk_mq_hw_ctx *hctx) { struct bfq_data *bfqd = hctx->queue->elevator->elevator_data; - if (!atomic_read(&hctx->elevator_queued)) - return false; - /* - * Avoiding lock: a race on bfqd->busy_queues should cause at + * Avoiding lock: a race on bfqd->queued should cause at * most a call to dispatch for nothing */ return !list_empty_careful(&bfqd->dispatch) || - bfq_tot_busy_queues(bfqd) > 0; + READ_ONCE(bfqd->queued); } static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx) @@ -4807,7 +4822,7 @@ static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx) struct bfq_data *bfqd = hctx->queue->elevator->elevator_data; struct request *rq; struct bfq_queue *in_serv_queue; - bool waiting_rq, idle_timer_disabled; + bool waiting_rq, idle_timer_disabled = false; spin_lock_irq(&bfqd->lock); @@ -4815,14 +4830,15 @@ static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx) waiting_rq = in_serv_queue && bfq_bfqq_wait_request(in_serv_queue); rq = __bfq_dispatch_request(hctx); - - idle_timer_disabled = - waiting_rq && !bfq_bfqq_wait_request(in_serv_queue); + if (in_serv_queue == bfqd->in_service_queue) { + idle_timer_disabled = + waiting_rq && !bfq_bfqq_wait_request(in_serv_queue); + } spin_unlock_irq(&bfqd->lock); - - bfq_update_dispatch_stats(hctx->queue, rq, in_serv_queue, - idle_timer_disabled); + bfq_update_dispatch_stats(hctx->queue, rq, + idle_timer_disabled ? in_serv_queue : NULL, + idle_timer_disabled); return rq; } @@ -5570,7 +5586,6 @@ static void bfq_insert_requests(struct blk_mq_hw_ctx *hctx, rq = list_first_entry(list, struct request, queuelist); list_del_init(&rq->queuelist); bfq_insert_request(hctx, rq, at_head); - atomic_inc(&hctx->elevator_queued); } } @@ -5935,7 +5950,6 @@ static void bfq_finish_requeue_request(struct request *rq) bfq_update_inject_limit(bfqd, bfqq); bfq_completed_request(bfqq, bfqd); - atomic_dec(&rq->mq_hctx->elevator_queued); } bfq_finish_requeue_request_body(bfqq); spin_unlock_irqrestore(&bfqd->lock, flags); @@ -6392,6 +6406,8 @@ static void bfq_exit_queue(struct elevator_queue *e) spin_unlock_irq(&bfqd->lock); #endif + wbt_enable_default(bfqd->queue); + kfree(bfqd); /* Re-enable throttling in case elevator disabled it */ diff --git a/block/bio-integrity.c b/block/bio-integrity.c index 9ffd7e2895547676ed801c099306f5a728918840..4f6f140a44e064f18d9c6b3726e3fdcb8572ed40 100644 --- a/block/bio-integrity.c +++ b/block/bio-integrity.c @@ -384,7 +384,7 @@ void bio_integrity_advance(struct bio *bio, unsigned int bytes_done) struct blk_integrity *bi = blk_get_integrity(bio->bi_disk); unsigned bytes = bio_integrity_bytes(bi, bytes_done >> 9); - bip->bip_iter.bi_sector += bytes_done >> 9; + bip->bip_iter.bi_sector += bio_integrity_intervals(bi, bytes_done >> 9); bvec_iter_advance(bip->bip_vec, &bip->bip_iter, bytes); } diff --git a/block/bio.c b/block/bio.c index b2e0f0c94c5f71b526aa2a570f639b8d6d18fd00..28191e7035f4f46bb04317372c2c22676d219fb6 100644 --- a/block/bio.c +++ b/block/bio.c @@ -575,7 +575,8 @@ void bio_truncate(struct bio *bio, unsigned new_size) offset = new_size - done; else offset = 0; - zero_user(bv.bv_page, offset, bv.bv_len - offset); + zero_user(bv.bv_page, bv.bv_offset + offset, + bv.bv_len - offset); truncated = true; } done += bv.bv_len; @@ -1110,6 +1111,9 @@ static int __bio_iov_append_get_pages(struct bio *bio, struct iov_iter *iter) * fit into the bio, or are requested in @iter, whatever is smaller. If * MM encounters an error pinning the requested pages, it stops. Error * is returned only if 0 pages could be pinned. + * + * It's intended for direct IO, so doesn't do PSI tracking, the caller is + * responsible for setting BIO_WORKINGSET if necessary. */ int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter) { @@ -1134,6 +1138,9 @@ int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter) if (is_bvec) bio_set_flag(bio, BIO_NO_PAGE_REF); + + /* don't account direct I/O as memory stall */ + bio_clear_flag(bio, BIO_WORKINGSET); return bio->bi_vcnt ? 0 : ret; } EXPORT_SYMBOL_GPL(bio_iov_iter_get_pages); diff --git a/block/blk-core.c b/block/blk-core.c index 019d583b355cd02bcf9b5c908d2640a8f603cc3a..109fb2750453a32913b9ff531fec43c44e913497 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -398,8 +398,10 @@ void blk_cleanup_queue(struct request_queue *q) del_timer_sync(&q->backing_dev_info->laptop_mode_wb_timer); blk_sync_queue(q); - if (queue_is_mq(q)) + if (queue_is_mq(q)) { + blk_mq_cancel_work_sync(q); blk_mq_exit_queue(q); + } /* * In theory, request pool of sched_tags belongs to request queue. @@ -517,13 +519,15 @@ static void blk_timeout_work(struct work_struct *work) struct request_queue *blk_alloc_queue(int node_id) { struct request_queue *q; + struct request_queue_wrapper *q_wrapper; int ret; - q = kmem_cache_alloc_node(blk_requestq_cachep, + q_wrapper = kmem_cache_alloc_node(blk_requestq_cachep, GFP_KERNEL | __GFP_ZERO, node_id); - if (!q) + if (!q_wrapper) return NULL; + q = &q_wrapper->q; q->last_merge = NULL; q->id = ida_simple_get(&blk_queue_ida, 0, 0, GFP_KERNEL); @@ -594,7 +598,7 @@ struct request_queue *blk_alloc_queue(int node_id) fail_id: ida_simple_remove(&blk_queue_ida, q->id); fail_q: - kmem_cache_free(blk_requestq_cachep, q); + kmem_cache_free(blk_requestq_cachep, q_wrapper); return NULL; } EXPORT_SYMBOL(blk_alloc_queue); @@ -1288,13 +1292,32 @@ void blk_account_io_done(struct request *req, u64 now) !(req->rq_flags & RQF_FLUSH_SEQ)) { const int sgrp = op_stat_group(req_op(req)); struct hd_struct *part; +#ifdef CONFIG_64BIT + u64 stat_time; + struct request_wrapper *rq_wrapper = request_to_wrapper(req); +#endif part_stat_lock(); part = req->part; - update_io_ticks(part, jiffies, true); part_stat_inc(part, ios[sgrp]); +#ifdef CONFIG_64BIT + stat_time = READ_ONCE(rq_wrapper->stat_time_ns); + /* + * This might fail if 'stat_time_ns' is updated + * in blk_mq_check_inflight_with_stat(). + */ + if (likely(now > stat_time && + cmpxchg64(&rq_wrapper->stat_time_ns, stat_time, now) + == stat_time)) { + u64 duation = stat_time ? now - stat_time : + now - req->start_time_ns; + + part_stat_add(req->part, nsecs[sgrp], duation); + } +#else part_stat_add(part, nsecs[sgrp], now - req->start_time_ns); +#endif part_stat_unlock(); hd_struct_put(part); @@ -1796,7 +1819,7 @@ int __init blk_dev_init(void) panic("Failed to create kblockd\n"); blk_requestq_cachep = kmem_cache_create("request_queue", - sizeof(struct request_queue), 0, SLAB_PANIC, NULL); + sizeof(struct request_queue_wrapper), 0, SLAB_PANIC, NULL); blk_debugfs_root = debugfs_create_dir("block", NULL); diff --git a/block/blk-flush.c b/block/blk-flush.c index 82919829bc4d593e2b3c12047477e3127cad30b9..71faf07a626f887aaa01b220ca7dafbbf68818bf 100644 --- a/block/blk-flush.c +++ b/block/blk-flush.c @@ -470,7 +470,7 @@ struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size, gfp_t flags) { struct blk_flush_queue *fq; - int rq_sz = sizeof(struct request); + int rq_sz = sizeof(struct request_wrapper); fq = kzalloc_node(sizeof(*fq), flags, node); if (!fq) diff --git a/block/blk-map.c b/block/blk-map.c index 21630dccac628c3fac3131c5d900fc144ea962e1..ede73f4f70147eec51e7c89ed48a6534c19eef99 100644 --- a/block/blk-map.c +++ b/block/blk-map.c @@ -488,7 +488,7 @@ static struct bio *bio_copy_kern(struct request_queue *q, void *data, if (bytes > len) bytes = len; - page = alloc_page(q->bounce_gfp | gfp_mask); + page = alloc_page(q->bounce_gfp | __GFP_ZERO | gfp_mask); if (!page) goto cleanup; diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c index b5f26082b959453a7ac728bfb4ac5bcc6272516e..f3a263a1bb43d28864ac8cf04d47fef2901a3d94 100644 --- a/block/blk-mq-debugfs.c +++ b/block/blk-mq-debugfs.c @@ -204,6 +204,39 @@ static ssize_t queue_write_hint_store(void *data, const char __user *buf, return count; } +static int queue_tag_set_show(void *data, struct seq_file *m) +{ + struct request_queue *q = data; + struct blk_mq_tag_set *set = q->tag_set; + + seq_printf(m, "nr_hw_queues=%u\n", set->nr_hw_queues); + seq_printf(m, "queue_depth=%u\n", set->queue_depth); + seq_printf(m, "reserved_tags=%u\n", set->reserved_tags); + seq_printf(m, "cmd_size=%u\n", set->cmd_size); + seq_printf(m, "numa_node=%d\n", set->numa_node); + seq_printf(m, "timeout=%u\n", set->timeout); + seq_printf(m, "flags=%u\n", set->flags); + seq_printf(m, "active_queues_shared_sbitmap=%d\n", + atomic_read(&set->active_queues_shared_sbitmap)); + seq_printf(m, "pending_queues_shared_sbitmap=%d\n", + atomic_read(&set->pending_queues_shared_sbitmap)); + + return 0; +} + +static int queue_dtag_wait_time_show(void *data, struct seq_file *m) +{ + struct request_queue *q = data; + unsigned int time = 0; + + if (test_bit(QUEUE_FLAG_HCTX_WAIT, &q->queue_flags)) + time = jiffies_to_msecs(jiffies - READ_ONCE(q->dtag_wait_time)); + + seq_printf(m, "%u\n", time); + + return 0; +} + static const struct blk_mq_debugfs_attr blk_mq_debugfs_queue_attrs[] = { { "poll_stat", 0400, queue_poll_stat_show }, { "requeue_list", 0400, .seq_ops = &queue_requeue_list_seq_ops }, @@ -211,6 +244,8 @@ static const struct blk_mq_debugfs_attr blk_mq_debugfs_queue_attrs[] = { { "state", 0600, queue_state_show, queue_state_write }, { "write_hints", 0600, queue_write_hint_show, queue_write_hint_store }, { "zone_wlock", 0400, queue_zone_wlock_show, NULL }, + { "tag_set", 0400, queue_tag_set_show, NULL }, + { "dtag_wait_time_ms", 0400, queue_dtag_wait_time_show, NULL }, { }, }; @@ -453,6 +488,8 @@ static void blk_mq_debugfs_tags_show(struct seq_file *m, seq_printf(m, "nr_reserved_tags=%u\n", tags->nr_reserved_tags); seq_printf(m, "active_queues=%d\n", atomic_read(&tags->active_queues)); + seq_printf(m, "pending_queues=%d\n", + atomic_read(&tags->pending_queues)); seq_puts(m, "\nbitmap_tags:\n"); sbitmap_queue_show(tags->bitmap_tags, m); @@ -628,6 +665,20 @@ static int hctx_dispatch_busy_show(void *data, struct seq_file *m) return 0; } +static int hctx_dtag_wait_time_show(void *data, struct seq_file *m) +{ + struct blk_mq_hw_ctx *hctx = data; + unsigned int time = 0; + + if (test_bit(BLK_MQ_S_DTAG_WAIT, &hctx->state)) + time = jiffies_to_msecs(jiffies - + READ_ONCE(hctx->dtag_wait_time)); + + seq_printf(m, "%u\n", time); + + return 0; +} + #define CTX_RQ_SEQ_OPS(name, type) \ static void *ctx_##name##_rq_list_start(struct seq_file *m, loff_t *pos) \ __acquires(&ctx->lock) \ @@ -798,6 +849,7 @@ static const struct blk_mq_debugfs_attr blk_mq_debugfs_hctx_attrs[] = { {"active", 0400, hctx_active_show}, {"dispatch_busy", 0400, hctx_dispatch_busy_show}, {"type", 0400, hctx_type_show}, + {"dtag_wait_time_ms", 0400, hctx_dtag_wait_time_show}, {}, }; diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index 606bef13f1c210d88dd7c970b3670125cf63c6d8..0aa2069d95d5e470cbee7b14d44faf8b0c934504 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c @@ -194,11 +194,18 @@ static int __blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx) static int blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx) { + unsigned long end = jiffies + HZ; int ret; do { ret = __blk_mq_do_dispatch_sched(hctx); - } while (ret == 1); + if (ret != 1) + break; + if (need_resched() || time_is_before_jiffies(end)) { + blk_mq_delay_run_hw_queue(hctx, 0); + break; + } + } while (1); return ret; } @@ -512,18 +519,16 @@ static int blk_mq_sched_alloc_tags(struct request_queue *q, unsigned int hctx_idx) { struct blk_mq_tag_set *set = q->tag_set; - /* Clear HCTX_SHARED so tags are init'ed */ - unsigned int flags = set->flags & ~BLK_MQ_F_TAG_HCTX_SHARED; int ret; hctx->sched_tags = blk_mq_alloc_rq_map(set, hctx_idx, q->nr_requests, - set->reserved_tags, flags); + set->reserved_tags, set->flags); if (!hctx->sched_tags) return -ENOMEM; ret = blk_mq_alloc_rqs(set, hctx->sched_tags, hctx_idx, q->nr_requests); if (ret) { - blk_mq_free_rq_map(hctx->sched_tags, flags); + blk_mq_free_rq_map(hctx->sched_tags, set->flags); hctx->sched_tags = NULL; } @@ -537,16 +542,53 @@ static void blk_mq_sched_tags_teardown(struct request_queue *q) int i; queue_for_each_hw_ctx(q, hctx, i) { - /* Clear HCTX_SHARED so tags are freed */ - unsigned int flags = hctx->flags & ~BLK_MQ_F_TAG_HCTX_SHARED; - if (hctx->sched_tags) { - blk_mq_free_rq_map(hctx->sched_tags, flags); + blk_mq_free_rq_map(hctx->sched_tags, hctx->flags); hctx->sched_tags = NULL; } } } +static int blk_mq_init_sched_shared_sbitmap(struct request_queue *queue) +{ + struct blk_mq_tag_set *set = queue->tag_set; + int alloc_policy = BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags); + struct blk_mq_hw_ctx *hctx; + int ret, i; + struct request_queue_wrapper *q_wrapper = queue_to_wrapper(queue); + + /* + * Set initial depth at max so that we don't need to reallocate for + * updating nr_requests. + */ + ret = blk_mq_init_bitmaps(&q_wrapper->sched_bitmap_tags, + &q_wrapper->sched_breserved_tags, + MAX_SCHED_RQ, set->reserved_tags, + set->numa_node, alloc_policy); + if (ret) + return ret; + + queue_for_each_hw_ctx(queue, hctx, i) { + hctx->sched_tags->bitmap_tags = + &q_wrapper->sched_bitmap_tags; + hctx->sched_tags->breserved_tags = + &q_wrapper->sched_breserved_tags; + } + + sbitmap_queue_resize(&q_wrapper->sched_bitmap_tags, + queue->nr_requests - set->reserved_tags); + + return 0; +} + +static void blk_mq_exit_sched_shared_sbitmap(struct request_queue *queue) +{ + struct request_queue_wrapper *q_wrapper = queue_to_wrapper(queue); + + sbitmap_queue_free(&q_wrapper->sched_bitmap_tags); + sbitmap_queue_free(&q_wrapper->sched_breserved_tags); +} + int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e) { struct blk_mq_hw_ctx *hctx; @@ -571,12 +613,18 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e) queue_for_each_hw_ctx(q, hctx, i) { ret = blk_mq_sched_alloc_tags(q, hctx, i); if (ret) - goto err; + goto err_free_tags; + } + + if (blk_mq_is_sbitmap_shared(q->tag_set->flags)) { + ret = blk_mq_init_sched_shared_sbitmap(q); + if (ret) + goto err_free_tags; } ret = e->ops.init_sched(q, e); if (ret) - goto err; + goto err_free_sbitmap; blk_mq_debugfs_register_sched(q); @@ -596,7 +644,10 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e) return 0; -err: +err_free_sbitmap: + if (blk_mq_is_sbitmap_shared(q->tag_set->flags)) + blk_mq_exit_sched_shared_sbitmap(q); +err_free_tags: blk_mq_sched_free_requests(q); blk_mq_sched_tags_teardown(q); q->elevator = NULL; @@ -622,6 +673,7 @@ void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e) { struct blk_mq_hw_ctx *hctx; unsigned int i; + unsigned int flags = 0; queue_for_each_hw_ctx(q, hctx, i) { blk_mq_debugfs_unregister_sched_hctx(hctx); @@ -629,10 +681,13 @@ void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e) e->type->ops.exit_hctx(hctx, i); hctx->sched_data = NULL; } + flags = hctx->flags; } blk_mq_debugfs_unregister_sched(q); if (e->type->ops.exit_sched) e->type->ops.exit_sched(e); blk_mq_sched_tags_teardown(q); + if (blk_mq_is_sbitmap_shared(flags)) + blk_mq_exit_sched_shared_sbitmap(q); q->elevator = NULL; } diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h index 15f3d611db1048760d71710f290c69e60643d7ce..b228ee0674912b65a6e1b4009f22741b55fc40d4 100644 --- a/block/blk-mq-sched.h +++ b/block/blk-mq-sched.h @@ -5,6 +5,8 @@ #include "blk-mq.h" #include "blk-mq-tag.h" +#define MAX_SCHED_RQ (16 * BLKDEV_MAX_RQ) + void blk_mq_sched_assign_ioc(struct request *rq); void blk_mq_sched_request_inserted(struct request *rq); diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index 16ad9e65661086f982ce7efa437825aa9b731c39..98e4edd03ad4c607998307d375941fd7203c179a 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -13,8 +13,11 @@ #include #include "blk.h" #include "blk-mq.h" +#include "blk-mq-sched.h" #include "blk-mq-tag.h" +#define BLK_MQ_DTAG_WAIT_EXPIRE (5 * HZ) + /* * If a previously inactive queue goes active, bump the active user count. * We need to do this before try to allocate driver tag, then even if fail @@ -73,6 +76,64 @@ void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx) blk_mq_tag_wakeup_all(tags, false); } +void __blk_mq_dtag_busy(struct blk_mq_hw_ctx *hctx) +{ + if (blk_mq_is_sbitmap_shared(hctx->flags)) { + struct request_queue *q = hctx->queue; + struct blk_mq_tag_set *set = q->tag_set; + + if (!test_bit(QUEUE_FLAG_HCTX_WAIT, &q->queue_flags) && + !test_and_set_bit(QUEUE_FLAG_HCTX_WAIT, &q->queue_flags)) { + WRITE_ONCE(q->dtag_wait_time, jiffies); + atomic_inc(&set->pending_queues_shared_sbitmap); + } + } else { + if (!test_bit(BLK_MQ_S_DTAG_WAIT, &hctx->state) && + !test_and_set_bit(BLK_MQ_S_DTAG_WAIT, &hctx->state)) { + WRITE_ONCE(hctx->dtag_wait_time, jiffies); + atomic_inc(&hctx->tags->pending_queues); + } + } +} + +void __blk_mq_dtag_idle(struct blk_mq_hw_ctx *hctx, bool force) +{ + struct blk_mq_tags *tags = hctx->tags; + struct request_queue *q = hctx->queue; + struct blk_mq_tag_set *set = q->tag_set; + + if (blk_mq_is_sbitmap_shared(hctx->flags)) { + if (!test_bit(QUEUE_FLAG_HCTX_WAIT, &q->queue_flags)) + return; + + if (!force && time_before(jiffies, + READ_ONCE(q->dtag_wait_time) + + BLK_MQ_DTAG_WAIT_EXPIRE)) + return; + + if (!test_and_clear_bit(QUEUE_FLAG_HCTX_WAIT, + &q->queue_flags)) + return; + + WRITE_ONCE(q->dtag_wait_time, jiffies); + atomic_dec(&set->pending_queues_shared_sbitmap); + } else { + if (!test_bit(BLK_MQ_S_DTAG_WAIT, &hctx->state)) + return; + + if (!force && time_before(jiffies, + READ_ONCE(hctx->dtag_wait_time) + + BLK_MQ_DTAG_WAIT_EXPIRE)) + return; + + if (!test_and_clear_bit(BLK_MQ_S_DTAG_WAIT, &hctx->state)) + return; + + WRITE_ONCE(hctx->dtag_wait_time, jiffies); + atomic_dec(&tags->pending_queues); + } +} + static int __blk_mq_get_tag(struct blk_mq_alloc_data *data, struct sbitmap_queue *bt) { @@ -111,8 +172,11 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data) if (tag != BLK_MQ_NO_TAG) goto found_tag; - if (data->flags & BLK_MQ_REQ_NOWAIT) + if (data->flags & BLK_MQ_REQ_NOWAIT) { + if (!data->q->elevator) + blk_mq_dtag_busy(data->hctx); return BLK_MQ_NO_TAG; + } ws = bt_wait_ptr(bt, data->hctx); do { @@ -139,6 +203,8 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data) if (tag != BLK_MQ_NO_TAG) break; + if (!data->q->elevator) + blk_mq_dtag_busy(data->hctx); bt_prev = bt; io_schedule(); @@ -167,6 +233,8 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data) sbitmap_finish_wait(bt, ws, &wait); found_tag: + if (!data->q->elevator) + blk_mq_dtag_idle(data->hctx, false); /* * Give up this allocation if the hctx is inactive. The caller will * retry on an active hctx. @@ -471,39 +539,54 @@ static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth, node); } -static int blk_mq_init_bitmap_tags(struct blk_mq_tags *tags, - int node, int alloc_policy) +int blk_mq_init_bitmaps(struct sbitmap_queue *bitmap_tags, + struct sbitmap_queue *breserved_tags, + unsigned int queue_depth, unsigned int reserved, + int node, int alloc_policy) { - unsigned int depth = tags->nr_tags - tags->nr_reserved_tags; + unsigned int depth = queue_depth - reserved; bool round_robin = alloc_policy == BLK_TAG_ALLOC_RR; - if (bt_alloc(&tags->__bitmap_tags, depth, round_robin, node)) + if (bt_alloc(bitmap_tags, depth, round_robin, node)) return -ENOMEM; - if (bt_alloc(&tags->__breserved_tags, tags->nr_reserved_tags, - round_robin, node)) + if (bt_alloc(breserved_tags, reserved, round_robin, node)) goto free_bitmap_tags; + return 0; + +free_bitmap_tags: + sbitmap_queue_free(bitmap_tags); + return -ENOMEM; +} + +static int blk_mq_init_bitmap_tags(struct blk_mq_tags *tags, + int node, int alloc_policy) +{ + int ret; + + ret = blk_mq_init_bitmaps(&tags->__bitmap_tags, + &tags->__breserved_tags, + tags->nr_tags, tags->nr_reserved_tags, + node, alloc_policy); + if (ret) + return ret; + tags->bitmap_tags = &tags->__bitmap_tags; tags->breserved_tags = &tags->__breserved_tags; return 0; -free_bitmap_tags: - sbitmap_queue_free(&tags->__bitmap_tags); - return -ENOMEM; } -int blk_mq_init_shared_sbitmap(struct blk_mq_tag_set *set, unsigned int flags) +int blk_mq_init_shared_sbitmap(struct blk_mq_tag_set *set) { - unsigned int depth = set->queue_depth - set->reserved_tags; int alloc_policy = BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags); - bool round_robin = alloc_policy == BLK_TAG_ALLOC_RR; - int i, node = set->numa_node; + int i, ret; - if (bt_alloc(&set->__bitmap_tags, depth, round_robin, node)) - return -ENOMEM; - if (bt_alloc(&set->__breserved_tags, set->reserved_tags, - round_robin, node)) - goto free_bitmap_tags; + ret = blk_mq_init_bitmaps(&set->__bitmap_tags, &set->__breserved_tags, + set->queue_depth, set->reserved_tags, + set->numa_node, alloc_policy); + if (ret) + return ret; for (i = 0; i < set->nr_hw_queues; i++) { struct blk_mq_tags *tags = set->tags[i]; @@ -513,9 +596,6 @@ int blk_mq_init_shared_sbitmap(struct blk_mq_tag_set *set, unsigned int flags) } return 0; -free_bitmap_tags: - sbitmap_queue_free(&set->__bitmap_tags); - return -ENOMEM; } void blk_mq_exit_shared_sbitmap(struct blk_mq_tag_set *set) @@ -578,8 +658,6 @@ int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx, */ if (tdepth > tags->nr_tags) { struct blk_mq_tag_set *set = hctx->queue->tag_set; - /* Only sched tags can grow, so clear HCTX_SHARED flag */ - unsigned int flags = set->flags & ~BLK_MQ_F_TAG_HCTX_SHARED; struct blk_mq_tags *new; bool ret; @@ -590,21 +668,21 @@ int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx, * We need some sort of upper limit, set it high enough that * no valid use cases should require more. */ - if (tdepth > 16 * BLKDEV_MAX_RQ) + if (tdepth > MAX_SCHED_RQ) return -EINVAL; new = blk_mq_alloc_rq_map(set, hctx->queue_num, tdepth, - tags->nr_reserved_tags, flags); + tags->nr_reserved_tags, set->flags); if (!new) return -ENOMEM; ret = blk_mq_alloc_rqs(set, new, hctx->queue_num, tdepth); if (ret) { - blk_mq_free_rq_map(new, flags); + blk_mq_free_rq_map(new, set->flags); return -ENOMEM; } blk_mq_free_rqs(set, *tagsptr, hctx->queue_num); - blk_mq_free_rq_map(*tagsptr, flags); + blk_mq_free_rq_map(*tagsptr, set->flags); *tagsptr = new; } else { /* diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h index 674b0d80f4d27d09e422f991ab2ab1d428108da6..baa36e5f495d447db1580925207abf414859afcc 100644 --- a/block/blk-mq-tag.h +++ b/block/blk-mq-tag.h @@ -4,6 +4,7 @@ #include +extern bool mq_unfair_dtag; /* * Tag address space map. */ @@ -12,6 +13,11 @@ struct blk_mq_tags { unsigned int nr_reserved_tags; atomic_t active_queues; + /* + * If multiple queues share a tag set, pending_queues record the + * number of queues that can't get driver tag. + */ + atomic_t pending_queues; struct sbitmap_queue *bitmap_tags; struct sbitmap_queue *breserved_tags; @@ -39,11 +45,14 @@ extern struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags, unsigned int reserved_tags, int node, unsigned int flags); extern void blk_mq_free_tags(struct blk_mq_tags *tags, unsigned int flags); +extern int blk_mq_init_bitmaps(struct sbitmap_queue *bitmap_tags, + struct sbitmap_queue *breserved_tags, + unsigned int queue_depth, + unsigned int reserved, + int node, int alloc_policy); -extern int blk_mq_init_shared_sbitmap(struct blk_mq_tag_set *set, - unsigned int flags); +extern int blk_mq_init_shared_sbitmap(struct blk_mq_tag_set *set); extern void blk_mq_exit_shared_sbitmap(struct blk_mq_tag_set *set); - extern unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data); extern void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx, unsigned int tag); @@ -73,8 +82,11 @@ enum { BLK_MQ_TAG_MAX = BLK_MQ_NO_TAG - 1, }; -extern bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *); -extern void __blk_mq_tag_idle(struct blk_mq_hw_ctx *); +extern bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx); +extern void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx); +extern void __blk_mq_dtag_busy(struct blk_mq_hw_ctx *hctx); +extern void __blk_mq_dtag_idle(struct blk_mq_hw_ctx *hctx, bool force); + static inline bool blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) { @@ -92,6 +104,22 @@ static inline void blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx) __blk_mq_tag_idle(hctx); } +static inline void blk_mq_dtag_busy(struct blk_mq_hw_ctx *hctx) +{ + if (!(mq_unfair_dtag && (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED))) + return; + + __blk_mq_dtag_busy(hctx); +} + +static inline void blk_mq_dtag_idle(struct blk_mq_hw_ctx *hctx, bool force) +{ + if (!(mq_unfair_dtag && (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED))) + return; + + __blk_mq_dtag_idle(hctx, force); +} + static inline bool blk_mq_tag_is_reserved(struct blk_mq_tags *tags, unsigned int tag) { diff --git a/block/blk-mq.c b/block/blk-mq.c index b8cf684030dce724f1da77b71e05932c4b2d2e3d..1941ffc4db85c2819047fd95cb429878f9a38a5e 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -41,6 +41,9 @@ #include "blk-mq-sched.h" #include "blk-rq-qos.h" +bool mq_unfair_dtag = true; +module_param_named(unfair_dtag, mq_unfair_dtag, bool, 0444); + static DEFINE_PER_CPU(struct list_head, blk_cpu_done); static void blk_mq_poll_stats_start(struct request_queue *q); @@ -99,6 +102,61 @@ struct mq_inflight { unsigned int inflight[2]; }; +#ifdef CONFIG_64BIT +static bool blk_mq_check_inflight_with_stat(struct blk_mq_hw_ctx *hctx, + struct request *rq, void *priv, + bool reserved) +{ + struct mq_inflight *mi = priv; + + if ((!mi->part->partno || rq->part == mi->part) && + blk_mq_rq_state(rq) == MQ_RQ_IN_FLIGHT) { + u64 stat_time; + struct request_wrapper *rq_wrapper; + + mi->inflight[rq_data_dir(rq)]++; + if (!rq->part) + return true; + + /* + * If the request is started after 'part->stat_time' is set, + * don't update 'nsces' here. + */ + if (rq->part->stat_time <= rq->start_time_ns) + return true; + + rq_wrapper = request_to_wrapper(rq); + stat_time = READ_ONCE(rq_wrapper->stat_time_ns); + /* + * This might fail if 'stat_time_ns' is updated in + * blk_account_io_done(). + */ + if (likely(rq->part->stat_time > stat_time && + cmpxchg64(&rq_wrapper->stat_time_ns, stat_time, + rq->part->stat_time) == stat_time)) { + int sgrp = op_stat_group(req_op(rq)); + u64 duation = stat_time ? + rq->part->stat_time - stat_time : + rq->part->stat_time - rq->start_time_ns; + + part_stat_add(rq->part, nsecs[sgrp], duation); + } + } + + return true; +} + +unsigned int blk_mq_in_flight_with_stat(struct request_queue *q, + struct hd_struct *part) +{ + struct mq_inflight mi = { .part = part }; + + blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight_with_stat, &mi); + + return mi.inflight[0] + mi.inflight[1]; +} +#endif + static bool blk_mq_check_inflight(struct blk_mq_hw_ctx *hctx, struct request *rq, void *priv, bool reserved) @@ -321,6 +379,7 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data, #ifdef CONFIG_BLK_RQ_ALLOC_TIME rq->alloc_time_ns = alloc_time_ns; #endif + request_to_wrapper(rq)->stat_time_ns = 0; if (blk_mq_need_time_stamp(rq)) rq->start_time_ns = ktime_get_ns(); else @@ -535,8 +594,13 @@ void blk_mq_free_request(struct request *rq) } ctx->rq_completed[rq_is_sync(rq)]++; - if (rq->rq_flags & RQF_MQ_INFLIGHT) + if (rq->rq_flags & RQF_MQ_INFLIGHT) { __blk_mq_dec_active_requests(hctx); + if (mq_unfair_dtag && !__blk_mq_active_requests(hctx)) { + blk_mq_tag_idle(hctx); + blk_mq_dtag_idle(hctx, true); + } + } if (unlikely(laptop_mode && !blk_rq_is_passthrough(rq))) laptop_io_completion(q->backing_dev_info); @@ -1003,8 +1067,10 @@ static void blk_mq_timeout_work(struct work_struct *work) */ queue_for_each_hw_ctx(q, hctx, i) { /* the hctx may be unmapped, so check it here */ - if (blk_mq_hw_queue_mapped(hctx)) + if (blk_mq_hw_queue_mapped(hctx)) { blk_mq_tag_idle(hctx); + blk_mq_dtag_idle(hctx, true); + } } } blk_queue_exit(q); @@ -1109,9 +1175,12 @@ static bool __blk_mq_get_driver_tag(struct request *rq) } tag = __sbitmap_queue_get(bt); - if (tag == BLK_MQ_NO_TAG) + if (tag == BLK_MQ_NO_TAG) { + blk_mq_dtag_busy(rq->mq_hctx); return false; + } + blk_mq_dtag_idle(rq->mq_hctx, false); rq->tag = tag + tag_offset; return true; } @@ -1614,8 +1683,16 @@ static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async, put_cpu(); } + /* + * No need to queue work if there is no io, and this can avoid race + * with blk_cleanup_queue(). + */ + if (!percpu_ref_tryget(&hctx->queue->q_usage_counter)) + return; + kblockd_mod_delayed_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work, msecs_to_jiffies(msecs)); + percpu_ref_put(&hctx->queue->q_usage_counter); } /** @@ -1663,6 +1740,42 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) } EXPORT_SYMBOL(blk_mq_run_hw_queue); +/* + * Is the request queue handled by an IO scheduler that does not respect + * hardware queues when dispatching? + */ +static bool blk_mq_has_sqsched(struct request_queue *q) +{ + struct elevator_queue *e = q->elevator; + + if (e && e->type->ops.dispatch_request && + !(e->type->elevator_features & ELEVATOR_F_MQ_AWARE)) + return true; + return false; +} + +/* + * Return prefered queue to dispatch from (if any) for non-mq aware IO + * scheduler. + */ +static struct blk_mq_hw_ctx *blk_mq_get_sq_hctx(struct request_queue *q) +{ + struct blk_mq_hw_ctx *hctx; + + /* + * If the IO scheduler does not respect hardware queues when + * dispatching, we just don't bother with multiple HW queues and + * dispatch from hctx for the current CPU since running multiple queues + * just causes lock contention inside the scheduler and pointless cache + * bouncing. + */ + hctx = blk_mq_map_queue_type(q, HCTX_TYPE_DEFAULT, + raw_smp_processor_id()); + if (!blk_mq_hctx_stopped(hctx)) + return hctx; + return NULL; +} + /** * blk_mq_run_hw_queues - Run all hardware queues in a request queue. * @q: Pointer to the request queue to run. @@ -1670,14 +1783,23 @@ EXPORT_SYMBOL(blk_mq_run_hw_queue); */ void blk_mq_run_hw_queues(struct request_queue *q, bool async) { - struct blk_mq_hw_ctx *hctx; + struct blk_mq_hw_ctx *hctx, *sq_hctx; int i; + sq_hctx = NULL; + if (blk_mq_has_sqsched(q)) + sq_hctx = blk_mq_get_sq_hctx(q); queue_for_each_hw_ctx(q, hctx, i) { if (blk_mq_hctx_stopped(hctx)) continue; - - blk_mq_run_hw_queue(hctx, async); + /* + * Dispatch from this hctx either if there's no hctx preferred + * by IO scheduler or if it has requests that bypass the + * scheduler. + */ + if (!sq_hctx || sq_hctx == hctx || + !list_empty_careful(&hctx->dispatch)) + blk_mq_run_hw_queue(hctx, async); } } EXPORT_SYMBOL(blk_mq_run_hw_queues); @@ -1689,14 +1811,23 @@ EXPORT_SYMBOL(blk_mq_run_hw_queues); */ void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs) { - struct blk_mq_hw_ctx *hctx; + struct blk_mq_hw_ctx *hctx, *sq_hctx; int i; + sq_hctx = NULL; + if (blk_mq_has_sqsched(q)) + sq_hctx = blk_mq_get_sq_hctx(q); queue_for_each_hw_ctx(q, hctx, i) { if (blk_mq_hctx_stopped(hctx)) continue; - - blk_mq_delay_run_hw_queue(hctx, msecs); + /* + * Dispatch from this hctx either if there's no hctx preferred + * by IO scheduler or if it has requests that bypass the + * scheduler. + */ + if (!sq_hctx || sq_hctx == hctx || + !list_empty_careful(&hctx->dispatch)) + blk_mq_delay_run_hw_queue(hctx, msecs); } } EXPORT_SYMBOL(blk_mq_delay_run_hw_queues); @@ -2435,7 +2566,7 @@ int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, * rq_size is the size of the request plus driver payload, rounded * to the cacheline size */ - rq_size = round_up(sizeof(struct request) + set->cmd_size, + rq_size = round_up(sizeof(struct request_wrapper) + set->cmd_size, cache_line_size()); left = rq_size * depth; @@ -2657,11 +2788,14 @@ static void blk_mq_exit_hctx(struct request_queue *q, { struct request *flush_rq = hctx->fq->flush_rq; - if (blk_mq_hw_queue_mapped(hctx)) + if (blk_mq_hw_queue_mapped(hctx)) { blk_mq_tag_idle(hctx); + blk_mq_dtag_idle(hctx, true); + } - blk_mq_clear_flush_rq_mapping(set->tags[hctx_idx], - set->queue_depth, flush_rq); + if (blk_queue_init_done(q)) + blk_mq_clear_flush_rq_mapping(set->tags[hctx_idx], + set->queue_depth, flush_rq); if (set->ops->exit_request) set->ops->exit_request(set, flush_rq, hctx_idx); @@ -2748,7 +2882,6 @@ blk_mq_alloc_hctx(struct request_queue *q, struct blk_mq_tag_set *set, goto free_hctx; atomic_set(&hctx->nr_active, 0); - atomic_set(&hctx->elevator_queued, 0); if (node == NUMA_NO_NODE) node = set->numa_node; hctx->numa_node = node; @@ -2758,6 +2891,7 @@ blk_mq_alloc_hctx(struct request_queue *q, struct blk_mq_tag_set *set, INIT_LIST_HEAD(&hctx->dispatch); hctx->queue = q; hctx->flags = set->flags & ~BLK_MQ_F_TAG_QUEUE_SHARED; + hctx->dtag_wait_time = jiffies; INIT_LIST_HEAD(&hctx->hctx_list); @@ -2783,12 +2917,16 @@ blk_mq_alloc_hctx(struct request_queue *q, struct blk_mq_tag_set *set, if (!hctx->fq) goto free_bitmap; - if (hctx->flags & BLK_MQ_F_BLOCKING) - init_srcu_struct(hctx->srcu); + if (hctx->flags & BLK_MQ_F_BLOCKING) { + if (init_srcu_struct(hctx->srcu) != 0) + goto free_flush_queue; + } blk_mq_hctx_kobj_init(hctx); return hctx; + free_flush_queue: + blk_free_flush_queue(hctx->fq); free_bitmap: sbitmap_free(&hctx->ctx_map); free_ctxs: @@ -2980,6 +3118,7 @@ static void queue_set_hctx_shared(struct request_queue *q, bool shared) hctx->flags |= BLK_MQ_F_TAG_QUEUE_SHARED; } else { blk_mq_tag_idle(hctx); + blk_mq_dtag_idle(hctx, true); hctx->flags &= ~BLK_MQ_F_TAG_QUEUE_SHARED; } } @@ -3206,7 +3345,15 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set, if (hctxs) memcpy(new_hctxs, hctxs, q->nr_hw_queues * sizeof(*hctxs)); - q->queue_hw_ctx = new_hctxs; + + rcu_assign_pointer(q->queue_hw_ctx, new_hctxs); + /* + * Make sure reading the old queue_hw_ctx from other + * context concurrently won't trigger uaf. and when + * it is in start up time, no need to sync rcu. + */ + if (hctxs) + synchronize_rcu(); kfree(hctxs); hctxs = new_hctxs; } @@ -3307,6 +3454,7 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, spin_lock_init(&q->requeue_lock); q->nr_requests = set->queue_depth; + q->dtag_wait_time = jiffies; /* * Default to classic polling @@ -3536,8 +3684,9 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set) if (blk_mq_is_sbitmap_shared(set->flags)) { atomic_set(&set->active_queues_shared_sbitmap, 0); + atomic_set(&set->pending_queues_shared_sbitmap, 0); - if (blk_mq_init_shared_sbitmap(set, set->flags)) { + if (blk_mq_init_shared_sbitmap(set)) { ret = -ENOMEM; goto out_free_mq_rq_maps; } @@ -3587,6 +3736,7 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr) struct blk_mq_tag_set *set = q->tag_set; struct blk_mq_hw_ctx *hctx; int i, ret; + struct request_queue_wrapper *q_wrapper = queue_to_wrapper(q); if (!set) return -EINVAL; @@ -3613,15 +3763,24 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr) } else { ret = blk_mq_tag_update_depth(hctx, &hctx->sched_tags, nr, true); + if (blk_mq_is_sbitmap_shared(set->flags)) { + hctx->sched_tags->bitmap_tags = + &q_wrapper->sched_bitmap_tags; + hctx->sched_tags->breserved_tags = + &q_wrapper->sched_breserved_tags; + } } if (ret) break; if (q->elevator && q->elevator->type->ops.depth_updated) q->elevator->type->ops.depth_updated(hctx); } - - if (!ret) + if (!ret) { q->nr_requests = nr; + if (q->elevator && blk_mq_is_sbitmap_shared(set->flags)) + sbitmap_queue_resize(&q_wrapper->sched_bitmap_tags, + nr - set->reserved_tags); + } blk_mq_unquiesce_queue(q); blk_mq_unfreeze_queue(q); diff --git a/block/blk-mq.h b/block/blk-mq.h index 7f3194657dffbd150fd33fc72c316e1d47c56f22..ad2d74f887f20e7909e1a1ae0e1c9f619a555acc 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -187,6 +187,10 @@ static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx) unsigned int blk_mq_in_flight(struct request_queue *q, struct hd_struct *part); void blk_mq_in_flight_rw(struct request_queue *q, struct hd_struct *part, unsigned int inflight[2]); +#ifdef CONFIG_64BIT +unsigned int blk_mq_in_flight_with_stat(struct request_queue *q, + struct hd_struct *part); +#endif static inline void blk_mq_put_dispatch_budget(struct request_queue *q) { @@ -316,10 +320,15 @@ static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx, struct request_queue *q = hctx->queue; struct blk_mq_tag_set *set = q->tag_set; + if (mq_unfair_dtag && + !atomic_read(&set->pending_queues_shared_sbitmap)) + return true; if (!test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags)) return true; users = atomic_read(&set->active_queues_shared_sbitmap); } else { + if (mq_unfair_dtag && !atomic_read(&hctx->tags->pending_queues)) + return true; if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) return true; users = atomic_read(&hctx->tags->active_queues); diff --git a/block/blk-pm.c b/block/blk-pm.c index 17bd020268d421434a3724ad63552f2df49da0f4..2dad62cc157272e114eb31255026d2d49b30d550 100644 --- a/block/blk-pm.c +++ b/block/blk-pm.c @@ -163,27 +163,19 @@ EXPORT_SYMBOL(blk_pre_runtime_resume); /** * blk_post_runtime_resume - Post runtime resume processing * @q: the queue of the device - * @err: return value of the device's runtime_resume function * * Description: - * Update the queue's runtime status according to the return value of the - * device's runtime_resume function. If the resume was successful, call - * blk_set_runtime_active() to do the real work of restarting the queue. + * For historical reasons, this routine merely calls blk_set_runtime_active() + * to do the real work of restarting the queue. It does this regardless of + * whether the device's runtime-resume succeeded; even if it failed the + * driver or error handler will need to communicate with the device. * * This function should be called near the end of the device's * runtime_resume callback. */ -void blk_post_runtime_resume(struct request_queue *q, int err) +void blk_post_runtime_resume(struct request_queue *q) { - if (!q->dev) - return; - if (!err) { - blk_set_runtime_active(q); - } else { - spin_lock_irq(&q->queue_lock); - q->rpm_status = RPM_SUSPENDED; - spin_unlock_irq(&q->queue_lock); - } + blk_set_runtime_active(q); } EXPORT_SYMBOL(blk_post_runtime_resume); @@ -201,7 +193,7 @@ EXPORT_SYMBOL(blk_post_runtime_resume); * runtime PM status and re-enable peeking requests from the queue. It * should be called before first request is added to the queue. * - * This function is also called by blk_post_runtime_resume() for successful + * This function is also called by blk_post_runtime_resume() for * runtime resumes. It does everything necessary to restart the queue. */ void blk_set_runtime_active(struct request_queue *q) diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 66765740902bb6d911719c1fbe07393bc26a692f..0a4fcbda8ab45b9f235995b52b1ef5271f6a0112 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -726,7 +726,7 @@ static void blk_free_queue_rcu(struct rcu_head *rcu_head) { struct request_queue *q = container_of(rcu_head, struct request_queue, rcu_head); - kmem_cache_free(blk_requestq_cachep, q); + kmem_cache_free(blk_requestq_cachep, queue_to_wrapper(q)); } /* Unconfigure the I/O scheduler and dissociate from the cgroup controller. */ @@ -790,16 +790,6 @@ static void blk_release_queue(struct kobject *kobj) blk_free_queue_stats(q->stats); - if (queue_is_mq(q)) { - struct blk_mq_hw_ctx *hctx; - int i; - - cancel_delayed_work_sync(&q->requeue_work); - - queue_for_each_hw_ctx(q, hctx, i) - cancel_delayed_work_sync(&hctx->run_work); - } - blk_exit_queue(q); blk_queue_free_zone_bitmaps(q); @@ -965,15 +955,17 @@ void blk_unregister_queue(struct gendisk *disk) */ if (queue_is_mq(q)) blk_mq_unregister_dev(disk_to_dev(disk), q); - - kobject_uevent(&q->kobj, KOBJ_REMOVE); - kobject_del(&q->kobj); blk_trace_remove_sysfs(disk_to_dev(disk)); mutex_lock(&q->sysfs_lock); if (q->elevator) elv_unregister_queue(q); mutex_unlock(&q->sysfs_lock); + + /* Now that we've deleted all child objects, we can delete the queue. */ + kobject_uevent(&q->kobj, KOBJ_REMOVE); + kobject_del(&q->kobj); + mutex_unlock(&q->sysfs_dir_lock); kobject_put(&disk_to_dev(disk)->kobj); diff --git a/block/blk-throttle.c b/block/blk-throttle.c index 942776a10599bd6481e6dc0f7088ee728455bef4..0427c9c63e811bbf2059fd3faf006d987683e756 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c @@ -1421,7 +1421,57 @@ static int tg_print_conf_uint(struct seq_file *sf, void *v) return 0; } -static void tg_conf_updated(struct throtl_grp *tg, bool global) +static u64 throtl_update_bytes_disp(u64 dispatched, u64 new_limit, + u64 old_limit) +{ + if (new_limit == old_limit) + return dispatched; + + if (!dispatched) + return 0; + + /* + * In the case that multiply will overflow, just return 0. It will only + * let bios to be dispatched earlier. + */ + if (div64_u64(U64_MAX, dispatched) < new_limit) + return 0; + + dispatched *= new_limit; + return div64_u64(dispatched, old_limit); +} + +static u32 throtl_update_io_disp(u32 dispatched, u32 new_limit, u32 old_limit) +{ + if (new_limit == old_limit) + return dispatched; + + if (!dispatched) + return 0; + /* + * In the case that multiply will overflow, just return 0. It will only + * let bios to be dispatched earlier. + */ + if (UINT_MAX / dispatched < new_limit) + return 0; + + dispatched *= new_limit; + return dispatched / old_limit; +} + +static void throtl_update_slice(struct throtl_grp *tg, u64 *old_limits) +{ + tg->bytes_disp[READ] = throtl_update_bytes_disp(tg->bytes_disp[READ], + tg_bps_limit(tg, READ), old_limits[0]); + tg->bytes_disp[WRITE] = throtl_update_bytes_disp(tg->bytes_disp[WRITE], + tg_bps_limit(tg, WRITE), old_limits[1]); + tg->io_disp[READ] = throtl_update_io_disp(tg->io_disp[READ], + tg_iops_limit(tg, READ), (u32)old_limits[2]); + tg->io_disp[WRITE] = throtl_update_io_disp(tg->io_disp[WRITE], + tg_iops_limit(tg, WRITE), (u32)old_limits[3]); +} + +static void tg_conf_updated(struct throtl_grp *tg, u64 *old_limits, bool global) { struct throtl_service_queue *sq = &tg->service_queue; struct cgroup_subsys_state *pos_css; @@ -1460,16 +1510,7 @@ static void tg_conf_updated(struct throtl_grp *tg, bool global) parent_tg->latency_target); } - /* - * We're already holding queue_lock and know @tg is valid. Let's - * apply the new config directly. - * - * Restart the slices for both READ and WRITES. It might happen - * that a group's limit are dropped suddenly and we don't want to - * account recently dispatched IO with new low rate. - */ - throtl_start_new_slice(tg, READ); - throtl_start_new_slice(tg, WRITE); + throtl_update_slice(tg, old_limits); if (tg->flags & THROTL_TG_PENDING) { tg_update_disptime(tg); @@ -1502,6 +1543,14 @@ static inline int throtl_restart_syscall_when_busy(int errno) return ret; } +static void tg_get_limits(struct throtl_grp *tg, u64 *limits) +{ + limits[0] = tg_bps_limit(tg, READ); + limits[1] = tg_bps_limit(tg, WRITE); + limits[2] = tg_iops_limit(tg, READ); + limits[3] = tg_iops_limit(tg, WRITE); +} + static ssize_t tg_set_conf(struct kernfs_open_file *of, char *buf, size_t nbytes, loff_t off, bool is_u64) { @@ -1510,6 +1559,7 @@ static ssize_t tg_set_conf(struct kernfs_open_file *of, struct throtl_grp *tg; int ret; u64 v; + u64 old_limits[4]; ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx); if (ret) @@ -1526,13 +1576,14 @@ static ssize_t tg_set_conf(struct kernfs_open_file *of, v = U64_MAX; tg = blkg_to_tg(ctx.blkg); + tg_get_limits(tg, old_limits); if (is_u64) *(u64 *)((void *)tg + of_cft(of)->private) = v; else *(unsigned int *)((void *)tg + of_cft(of)->private) = v; - tg_conf_updated(tg, false); + tg_conf_updated(tg, old_limits, false); ret = 0; out_finish: blkg_conf_finish(&ctx); @@ -1703,6 +1754,7 @@ static ssize_t tg_set_limit(struct kernfs_open_file *of, struct blkg_conf_ctx ctx; struct throtl_grp *tg; u64 v[4]; + u64 old_limits[4]; unsigned long idle_time; unsigned long latency_time; int ret; @@ -1721,6 +1773,7 @@ static ssize_t tg_set_limit(struct kernfs_open_file *of, v[1] = tg->bps_conf[WRITE][index]; v[2] = tg->iops_conf[READ][index]; v[3] = tg->iops_conf[WRITE][index]; + tg_get_limits(tg, old_limits); idle_time = tg->idletime_threshold_conf; latency_time = tg->latency_target_conf; @@ -1807,7 +1860,7 @@ static ssize_t tg_set_limit(struct kernfs_open_file *of, tg->td->limit_index = LIMIT_LOW; } else tg->td->limit_index = LIMIT_MAX; - tg_conf_updated(tg, index == LIMIT_LOW && + tg_conf_updated(tg, old_limits, index == LIMIT_LOW && tg->td->limit_valid[LIMIT_LOW]); ret = 0; out_finish: @@ -2266,13 +2319,16 @@ bool blk_throtl_bio(struct bio *bio) struct throtl_service_queue *sq; bool rw = bio_data_dir(bio); bool throttled = false; + bool locked = true; struct throtl_data *td = tg->td; rcu_read_lock(); /* see throtl_charge_bio() */ - if (bio_flagged(bio, BIO_THROTTLED)) + if (bio_flagged(bio, BIO_THROTTLED)) { + locked = false; goto out; + } if (!cgroup_subsys_on_dfl(io_cgrp_subsys)) { blkg_rwstat_add(&tg->stat_bytes, bio->bi_opf, @@ -2280,8 +2336,10 @@ bool blk_throtl_bio(struct bio *bio) blkg_rwstat_add(&tg->stat_ios, bio->bi_opf, 1); } - if (!tg->has_rules[rw]) + if (!tg->has_rules[rw]) { + locked = false; goto out; + } spin_lock_irq(&q->queue_lock); @@ -2336,7 +2394,7 @@ bool blk_throtl_bio(struct bio *bio) sq = sq->parent_sq; tg = sq_to_tg(sq); if (!tg) - goto out_unlock; + goto out; } /* out-of-limit, queue to @tg */ @@ -2364,8 +2422,6 @@ bool blk_throtl_bio(struct bio *bio) throtl_schedule_next_dispatch(tg->service_queue.parent_sq, true); } -out_unlock: - spin_unlock_irq(&q->queue_lock); out: bio_set_flag(bio, BIO_THROTTLED); @@ -2373,6 +2429,9 @@ bool blk_throtl_bio(struct bio *bio) if (throttled || !td->track_bio_latency) bio->bi_issue.value |= BIO_ISSUE_THROTL_SKIP_LATENCY; #endif + if (locked) + spin_unlock_irq(&q->queue_lock); + rcu_read_unlock(); return throttled; } diff --git a/block/blk.h b/block/blk.h index 3165c16725d53496bddae46a391b3bef8b4de4ae..b8948fda06e11b4b0b732c2f538728c40accc767 100644 --- a/block/blk.h +++ b/block/blk.h @@ -28,6 +28,19 @@ struct blk_flush_queue { spinlock_t mq_flush_lock; }; +/* + * The wrapper of request_queue to fix kabi while adding members. + */ +struct request_queue_wrapper { + struct request_queue q; + + struct sbitmap_queue sched_bitmap_tags; + struct sbitmap_queue sched_breserved_tags; +}; + +#define queue_to_wrapper(queue) \ + container_of(queue, struct request_queue_wrapper, q) + extern struct kmem_cache *blk_requestq_cachep; extern struct kobj_type blk_queue_ktype; extern struct ida blk_queue_ida; diff --git a/block/genhd.c b/block/genhd.c index f94152e99876b5314f1b0201f142afdffdd8d949..8b37fcfa10d18995c83f14810f479c1cf77f31f0 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -1293,25 +1293,56 @@ ssize_t part_size_show(struct device *dev, (unsigned long long)part_nr_sects_read(p)); } +#ifdef CONFIG_64BIT +static void part_set_stat_time(struct hd_struct *hd) +{ + u64 now = ktime_get_ns(); + +again: + hd->stat_time = now; + if (hd->partno) { + hd = &part_to_disk(hd)->part0; + goto again; + } +} +#endif + +static void part_get_stat_info(struct hd_struct *hd, struct disk_stats *stat, + unsigned int *inflight) +{ +#ifdef CONFIG_64BIT + struct request_queue *q = part_to_disk(hd)->queue; + if (queue_is_mq(q)) { + mutex_lock(&part_to_dev(hd)->mutex); + part_stat_lock(); + part_set_stat_time(hd); + *inflight = blk_mq_in_flight_with_stat(q, hd); + part_stat_unlock(); + mutex_unlock(&part_to_dev(hd)->mutex); + } else { + *inflight = part_in_flight(hd); + } +#else + *inflight = part_in_flight(hd); +#endif + if (*inflight) { + part_stat_lock(); + update_io_ticks(hd, jiffies, true); + part_stat_unlock(); + } + + part_stat_read_all(hd, stat); +} + ssize_t part_stat_show(struct device *dev, struct device_attribute *attr, char *buf) { struct hd_struct *p = dev_to_part(dev); - struct request_queue *q = part_to_disk(p)->queue; struct disk_stats stat; unsigned int inflight; - if (queue_is_mq(q)) - inflight = blk_mq_in_flight(q, p); - else - inflight = part_in_flight(p); + part_get_stat_info(p, &stat, &inflight); - if (inflight) { - part_stat_lock(); - update_io_ticks(p, jiffies, true); - part_stat_unlock(); - } - part_stat_read_all(p, &stat); return sprintf(buf, "%8lu %8lu %8llu %8u " "%8lu %8lu %8llu %8u " @@ -1628,17 +1659,7 @@ static int diskstats_show(struct seq_file *seqf, void *v) disk_part_iter_init(&piter, gp, DISK_PITER_INCL_EMPTY_PART0); while ((hd = disk_part_iter_next(&piter))) { - if (queue_is_mq(gp->queue)) - inflight = blk_mq_in_flight(gp->queue, hd); - else - inflight = part_in_flight(hd); - - if (inflight) { - part_stat_lock(); - update_io_ticks(hd, jiffies, true); - part_stat_unlock(); - } - part_stat_read_all(hd, &stat); + part_get_stat_info(hd, &stat, &inflight); seq_printf(seqf, "%4d %7d %s " "%lu %lu %lu %u " "%lu %lu %lu %u " diff --git a/block/kyber-iosched.c b/block/kyber-iosched.c index d2648356e430070fb4c81f9a2e4005638c2539f0..448ae410f5105f15656783ad8fcd552a87d38bfb 100644 --- a/block/kyber-iosched.c +++ b/block/kyber-iosched.c @@ -1027,6 +1027,7 @@ static struct elevator_type kyber_sched = { #endif .elevator_attrs = kyber_sched_attrs, .elevator_name = "kyber", + .elevator_features = ELEVATOR_F_MQ_AWARE, .elevator_owner = THIS_MODULE, }; diff --git a/block/mq-deadline.c b/block/mq-deadline.c index 43994cce1eb26ba65f23a0f25da72c96362291ea..42b6e9dbe7c705ca245ed0f9bfd79ec0606a64cd 100644 --- a/block/mq-deadline.c +++ b/block/mq-deadline.c @@ -386,8 +386,6 @@ static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx) spin_lock(&dd->lock); rq = __dd_dispatch_request(dd); spin_unlock(&dd->lock); - if (rq) - atomic_dec(&rq->mq_hctx->elevator_queued); return rq; } @@ -539,7 +537,6 @@ static void dd_insert_requests(struct blk_mq_hw_ctx *hctx, rq = list_first_entry(list, struct request, queuelist); list_del_init(&rq->queuelist); dd_insert_request(hctx, rq, at_head); - atomic_inc(&hctx->elevator_queued); } spin_unlock(&dd->lock); } @@ -586,9 +583,6 @@ static bool dd_has_work(struct blk_mq_hw_ctx *hctx) { struct deadline_data *dd = hctx->queue->elevator->elevator_data; - if (!atomic_read(&hctx->elevator_queued)) - return false; - return !list_empty_careful(&dd->dispatch) || !list_empty_careful(&dd->fifo_list[0]) || !list_empty_careful(&dd->fifo_list[1]); diff --git a/block/partitions/core.c b/block/partitions/core.c index 569b0ca9f6e1a115f1b517207d2d8ef4b66c0fe6..8f32f3cd0edebb94bf6a9be98a31b89303b3b218 100644 --- a/block/partitions/core.c +++ b/block/partitions/core.c @@ -415,6 +415,7 @@ static struct hd_struct *add_partition(struct gendisk *disk, int partno, p->nr_sects = len; p->partno = partno; p->read_only = get_disk_ro(disk) | test_bit(partno, disk->user_ro_bitmap); + p->stat_time = 0; if (info) { struct partition_meta_info *pinfo; diff --git a/crypto/Kconfig b/crypto/Kconfig index 9a4878cb0141efb1ef8e75dd4de047b0eed2452c..ef9000c5db4d210e970b6cda6022d2a326f55f2e 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -1039,8 +1039,12 @@ config CRYPTO_SHA3 http://keccak.noekeon.org/ config CRYPTO_SM3 + tristate + +config CRYPTO_SM3_GENERIC tristate "SM3 digest algorithm" select CRYPTO_HASH + select CRYPTO_SM3 help SM3 secure hash function as defined by OSCCA GM/T 0004-2012 SM3). It is part of the Chinese Commercial Cryptography suite. @@ -1049,6 +1053,19 @@ config CRYPTO_SM3 http://www.oscca.gov.cn/UpFile/20101222141857786.pdf https://datatracker.ietf.org/doc/html/draft-shen-sm3-hash +config CRYPTO_SM3_AVX_X86_64 + tristate "SM3 digest algorithm (x86_64/AVX)" + depends on X86 && 64BIT + select CRYPTO_HASH + select CRYPTO_SM3 + help + SM3 secure hash function as defined by OSCCA GM/T 0004-2012 SM3). + It is part of the Chinese Commercial Cryptography suite. This is + SM3 optimized implementation using Advanced Vector Extensions (AVX) + when available. + + If unsure, say N. + config CRYPTO_STREEBOG tristate "Streebog Hash Function" select CRYPTO_HASH @@ -1617,9 +1634,12 @@ config CRYPTO_SERPENT_AVX2_X86_64 config CRYPTO_SM4 + tristate + +config CRYPTO_SM4_GENERIC tristate "SM4 cipher algorithm" select CRYPTO_ALGAPI - select CRYPTO_LIB_SM4 + select CRYPTO_SM4 help SM4 cipher algorithms (OSCCA GB/T 32907-2016). @@ -1648,7 +1668,7 @@ config CRYPTO_SM4_AESNI_AVX_X86_64 select CRYPTO_SKCIPHER select CRYPTO_SIMD select CRYPTO_ALGAPI - select CRYPTO_LIB_SM4 + select CRYPTO_SM4 help SM4 cipher algorithms (OSCCA GB/T 32907-2016) (x86_64/AES-NI/AVX). @@ -1669,7 +1689,7 @@ config CRYPTO_SM4_AESNI_AVX2_X86_64 select CRYPTO_SKCIPHER select CRYPTO_SIMD select CRYPTO_ALGAPI - select CRYPTO_LIB_SM4 + select CRYPTO_SM4 select CRYPTO_SM4_AESNI_AVX_X86_64 help SM4 cipher algorithms (OSCCA GB/T 32907-2016) (x86_64/AES-NI/AVX2). diff --git a/crypto/Makefile b/crypto/Makefile index 982066c6bdfb1438e87c3620a5ee77251ca095a6..58dac31a3367611e0fbe1dd2a377fb71f927a501 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -81,7 +81,8 @@ obj-$(CONFIG_CRYPTO_SHA1) += sha1_generic.o obj-$(CONFIG_CRYPTO_SHA256) += sha256_generic.o obj-$(CONFIG_CRYPTO_SHA512) += sha512_generic.o obj-$(CONFIG_CRYPTO_SHA3) += sha3_generic.o -obj-$(CONFIG_CRYPTO_SM3) += sm3_generic.o +obj-$(CONFIG_CRYPTO_SM3) += sm3.o +obj-$(CONFIG_CRYPTO_SM3_GENERIC) += sm3_generic.o obj-$(CONFIG_CRYPTO_STREEBOG) += streebog_generic.o obj-$(CONFIG_CRYPTO_WP512) += wp512.o CFLAGS_wp512.o := $(call cc-option,-fno-schedule-insns) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79149 @@ -136,7 +137,8 @@ obj-$(CONFIG_CRYPTO_SERPENT) += serpent_generic.o CFLAGS_serpent_generic.o := $(call cc-option,-fsched-pressure) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79149 obj-$(CONFIG_CRYPTO_AES) += aes_generic.o CFLAGS_aes_generic.o := $(call cc-option,-fno-code-hoisting) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=83356 -obj-$(CONFIG_CRYPTO_SM4) += sm4_generic.o +obj-$(CONFIG_CRYPTO_SM4) += sm4.o +obj-$(CONFIG_CRYPTO_SM4_GENERIC) += sm4_generic.o obj-$(CONFIG_CRYPTO_AES_TI) += aes_ti.o obj-$(CONFIG_CRYPTO_CAMELLIA) += camellia_generic.o obj-$(CONFIG_CRYPTO_CAST_COMMON) += cast_common.o diff --git a/crypto/algapi.c b/crypto/algapi.c index fdabf2675b63fefed7e93ad994e35e7f7b3844d5..9de27daa98b47b7c8503c5615dd5f4c2fea9fb60 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c @@ -1295,3 +1295,4 @@ module_exit(crypto_algapi_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Cryptographic algorithms API"); +MODULE_SOFTDEP("pre: cryptomgr"); diff --git a/crypto/api.c b/crypto/api.c index c4eda56cff8917e08c2de3b0ada13df291c785ab..5ffcd3ab4a75309edcd605122d9cf9b1c7b5b978 100644 --- a/crypto/api.c +++ b/crypto/api.c @@ -603,4 +603,3 @@ EXPORT_SYMBOL_GPL(crypto_req_done); MODULE_DESCRIPTION("Cryptographic core API"); MODULE_LICENSE("GPL"); -MODULE_SOFTDEP("pre: cryptomgr"); diff --git a/crypto/asymmetric_keys/pgp_public_key.c b/crypto/asymmetric_keys/pgp_public_key.c index 27b9efeafc4fc70ae484b25f536a062257aae28d..928029a1343576c5adcf4d0bbe56a864562c2df5 100644 --- a/crypto/asymmetric_keys/pgp_public_key.c +++ b/crypto/asymmetric_keys/pgp_public_key.c @@ -152,8 +152,10 @@ static int pgp_generate_fingerprint(struct pgp_key_data_parse_context *ctx, digest_size = crypto_shash_digestsize(tfm); raw_fingerprint = kmalloc(digest_size, GFP_KERNEL); - if (!raw_fingerprint) + if (!raw_fingerprint) { + ret = -ENOMEM; goto cleanup_hash; + } ret = crypto_shash_final(digest, raw_fingerprint); if (ret < 0) @@ -161,8 +163,10 @@ static int pgp_generate_fingerprint(struct pgp_key_data_parse_context *ctx, ctx->fingerprint_len = digest_size * 2; fingerprint = kmalloc(digest_size * 2 + 1, GFP_KERNEL); - if (!fingerprint) + if (!fingerprint) { + ret = -ENOMEM; goto cleanup_raw_fingerprint; + } offset = digest_size - 8; pr_debug("offset %u/%u\n", offset, digest_size); @@ -279,7 +283,8 @@ static struct asymmetric_key_ids *pgp_key_generate_id( goto error; kids->id[0] = kid; - kids->id[1] = kmemdup(kid, sizeof(kid) + fingerprint_len, GFP_KERNEL); + kids->id[1] = kmemdup(kid, struct_size(kid, data, fingerprint_len), + GFP_KERNEL); if (!kids->id[1]) goto error; @@ -311,6 +316,11 @@ static int pgp_key_parse(struct key_preparsed_payload *prep) if (ret < 0) goto error; + if (!ctx.fingerprint) { + ret = -EINVAL; + goto error; + } + if (ctx.user_id && ctx.user_id_len > 0) { /* Propose a description for the key * (user ID without the comment) diff --git a/crypto/asymmetric_keys/pkcs7_parser.c b/crypto/asymmetric_keys/pkcs7_parser.c index 967329e0a07b7958a60b334d24d89273868cb5ba..6cf6c4552c1138190d1de0efa231ef023e44f05a 100644 --- a/crypto/asymmetric_keys/pkcs7_parser.c +++ b/crypto/asymmetric_keys/pkcs7_parser.c @@ -248,6 +248,9 @@ int pkcs7_sig_note_digest_algo(void *context, size_t hdrlen, case OID_sha224: ctx->sinfo->sig->hash_algo = "sha224"; break; + case OID_sm3: + ctx->sinfo->sig->hash_algo = "sm3"; + break; default: printk("Unsupported digest algo: %u\n", ctx->last_oid); return -ENOPKG; @@ -269,6 +272,10 @@ int pkcs7_sig_note_pkey_algo(void *context, size_t hdrlen, ctx->sinfo->sig->pkey_algo = "rsa"; ctx->sinfo->sig->encoding = "pkcs1"; break; + case OID_SM2_with_SM3: + ctx->sinfo->sig->pkey_algo = "sm2"; + ctx->sinfo->sig->encoding = "raw"; + break; default: printk("Unsupported pkey algo: %u\n", ctx->last_oid); return -ENOPKG; diff --git a/crypto/authenc.c b/crypto/authenc.c index 670bf1a01d00e4c3fd1cda495973a78da9ae6db2..17f674a7cdff5434a213f66f66ca5c6976b565de 100644 --- a/crypto/authenc.c +++ b/crypto/authenc.c @@ -253,7 +253,7 @@ static int crypto_authenc_decrypt_tail(struct aead_request *req, dst = scatterwalk_ffwd(areq_ctx->dst, req->dst, req->assoclen); skcipher_request_set_tfm(skreq, ctx->enc); - skcipher_request_set_callback(skreq, aead_request_flags(req), + skcipher_request_set_callback(skreq, flags, req->base.complete, req->base.data); skcipher_request_set_crypt(skreq, src, dst, req->cryptlen - authsize, req->iv); diff --git a/crypto/jitterentropy.c b/crypto/jitterentropy.c index 6e147c43fc186c2c540c6a23c8b995c1da413c2d..37c4c308339e4f225ce8b224c7058edd543f3bfe 100644 --- a/crypto/jitterentropy.c +++ b/crypto/jitterentropy.c @@ -265,7 +265,6 @@ static int jent_stuck(struct rand_data *ec, __u64 current_delta) { __u64 delta2 = jent_delta(ec->last_delta, current_delta); __u64 delta3 = jent_delta(ec->last_delta2, delta2); - unsigned int delta_masked = current_delta & JENT_APT_WORD_MASK; ec->last_delta = current_delta; ec->last_delta2 = delta2; @@ -274,7 +273,7 @@ static int jent_stuck(struct rand_data *ec, __u64 current_delta) * Insert the result of the comparison of two back-to-back time * deltas. */ - jent_apt_insert(ec, delta_masked); + jent_apt_insert(ec, current_delta); if (!current_delta || !delta2 || !delta3) { /* RCT with a stuck bit */ diff --git a/crypto/rsa-pkcs1pad.c b/crypto/rsa-pkcs1pad.c index 8ac3e73e8ea65121a9ccfbe9e598d757f426e933..9d804831c8b3f9d1dba390cd6f92aab574af7967 100644 --- a/crypto/rsa-pkcs1pad.c +++ b/crypto/rsa-pkcs1pad.c @@ -476,6 +476,8 @@ static int pkcs1pad_verify_complete(struct akcipher_request *req, int err) pos++; if (digest_info) { + if (digest_info->size > dst_len - pos) + goto done; if (crypto_memneq(out_buf + pos, digest_info->data, digest_info->size)) goto done; @@ -495,7 +497,7 @@ static int pkcs1pad_verify_complete(struct akcipher_request *req, int err) sg_nents_for_len(req->src, req->src_len + req->dst_len), req_ctx->out_buf + ctx->key_size, - req->dst_len, ctx->key_size); + req->dst_len, req->src_len); /* Do the actual verification step. */ if (memcmp(req_ctx->out_buf + ctx->key_size, out_buf + pos, req->dst_len) != 0) @@ -538,7 +540,7 @@ static int pkcs1pad_verify(struct akcipher_request *req) if (WARN_ON(req->dst) || WARN_ON(!req->dst_len) || - !ctx->key_size || req->src_len < ctx->key_size) + !ctx->key_size || req->src_len != ctx->key_size) return -EINVAL; req_ctx->out_buf = kmalloc(ctx->key_size + req->dst_len, GFP_KERNEL); @@ -621,6 +623,11 @@ static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb) rsa_alg = crypto_spawn_akcipher_alg(&ctx->spawn); + if (strcmp(rsa_alg->base.cra_name, "rsa") != 0) { + err = -EINVAL; + goto err_free_inst; + } + err = -ENAMETOOLONG; hash_name = crypto_attr_alg_name(tb[2]); if (IS_ERR(hash_name)) { diff --git a/crypto/sm2.c b/crypto/sm2.c index db8a4a265669d9e85b6f56f7305b419b5c9c80a7..ae3f77a66070317e4f667dac7889e42969a1e5eb 100644 --- a/crypto/sm2.c +++ b/crypto/sm2.c @@ -13,7 +13,7 @@ #include #include #include -#include +#include #include #include #include "sm2signature.asn1.h" @@ -213,7 +213,7 @@ int sm2_get_signature_s(void *context, size_t hdrlen, unsigned char tag, return 0; } -static int sm2_z_digest_update(struct shash_desc *desc, +static int sm2_z_digest_update(struct sm3_state *sctx, MPI m, unsigned int pbytes) { static const unsigned char zero[32]; @@ -226,20 +226,20 @@ static int sm2_z_digest_update(struct shash_desc *desc, if (inlen < pbytes) { /* padding with zero */ - crypto_sm3_update(desc, zero, pbytes - inlen); - crypto_sm3_update(desc, in, inlen); + sm3_update(sctx, zero, pbytes - inlen); + sm3_update(sctx, in, inlen); } else if (inlen > pbytes) { /* skip the starting zero */ - crypto_sm3_update(desc, in + inlen - pbytes, pbytes); + sm3_update(sctx, in + inlen - pbytes, pbytes); } else { - crypto_sm3_update(desc, in, inlen); + sm3_update(sctx, in, inlen); } kfree(in); return 0; } -static int sm2_z_digest_update_point(struct shash_desc *desc, +static int sm2_z_digest_update_point(struct sm3_state *sctx, MPI_POINT point, struct mpi_ec_ctx *ec, unsigned int pbytes) { MPI x, y; @@ -249,8 +249,8 @@ static int sm2_z_digest_update_point(struct shash_desc *desc, y = mpi_new(0); if (!mpi_ec_get_affine(x, y, point, ec) && - !sm2_z_digest_update(desc, x, pbytes) && - !sm2_z_digest_update(desc, y, pbytes)) + !sm2_z_digest_update(sctx, x, pbytes) && + !sm2_z_digest_update(sctx, y, pbytes)) ret = 0; mpi_free(x); @@ -265,7 +265,7 @@ int sm2_compute_z_digest(struct crypto_akcipher *tfm, struct mpi_ec_ctx *ec = akcipher_tfm_ctx(tfm); uint16_t bits_len; unsigned char entl[2]; - SHASH_DESC_ON_STACK(desc, NULL); + struct sm3_state sctx; unsigned int pbytes; if (id_len > (USHRT_MAX / 8) || !ec->Q) @@ -278,17 +278,17 @@ int sm2_compute_z_digest(struct crypto_akcipher *tfm, pbytes = MPI_NBYTES(ec->p); /* ZA = H256(ENTLA | IDA | a | b | xG | yG | xA | yA) */ - sm3_base_init(desc); - crypto_sm3_update(desc, entl, 2); - crypto_sm3_update(desc, id, id_len); - - if (sm2_z_digest_update(desc, ec->a, pbytes) || - sm2_z_digest_update(desc, ec->b, pbytes) || - sm2_z_digest_update_point(desc, ec->G, ec, pbytes) || - sm2_z_digest_update_point(desc, ec->Q, ec, pbytes)) + sm3_init(&sctx); + sm3_update(&sctx, entl, 2); + sm3_update(&sctx, id, id_len); + + if (sm2_z_digest_update(&sctx, ec->a, pbytes) || + sm2_z_digest_update(&sctx, ec->b, pbytes) || + sm2_z_digest_update_point(&sctx, ec->G, ec, pbytes) || + sm2_z_digest_update_point(&sctx, ec->Q, ec, pbytes)) return -EINVAL; - crypto_sm3_final(desc, dgst); + sm3_final(&sctx, dgst); return 0; } EXPORT_SYMBOL(sm2_compute_z_digest); diff --git a/crypto/sm3.c b/crypto/sm3.c new file mode 100644 index 0000000000000000000000000000000000000000..d473e358a873a82cdabbb322e90ea24a193c0604 --- /dev/null +++ b/crypto/sm3.c @@ -0,0 +1,246 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * SM3 secure hash, as specified by OSCCA GM/T 0004-2012 SM3 and described + * at https://datatracker.ietf.org/doc/html/draft-sca-cfrg-sm3-02 + * + * Copyright (C) 2017 ARM Limited or its affiliates. + * Copyright (C) 2017 Gilad Ben-Yossef + * Copyright (C) 2021 Tianjia Zhang + */ + +#include +#include +#include + +static const u32 ____cacheline_aligned K[64] = { + 0x79cc4519, 0xf3988a32, 0xe7311465, 0xce6228cb, + 0x9cc45197, 0x3988a32f, 0x7311465e, 0xe6228cbc, + 0xcc451979, 0x988a32f3, 0x311465e7, 0x6228cbce, + 0xc451979c, 0x88a32f39, 0x11465e73, 0x228cbce6, + 0x9d8a7a87, 0x3b14f50f, 0x7629ea1e, 0xec53d43c, + 0xd8a7a879, 0xb14f50f3, 0x629ea1e7, 0xc53d43ce, + 0x8a7a879d, 0x14f50f3b, 0x29ea1e76, 0x53d43cec, + 0xa7a879d8, 0x4f50f3b1, 0x9ea1e762, 0x3d43cec5, + 0x7a879d8a, 0xf50f3b14, 0xea1e7629, 0xd43cec53, + 0xa879d8a7, 0x50f3b14f, 0xa1e7629e, 0x43cec53d, + 0x879d8a7a, 0x0f3b14f5, 0x1e7629ea, 0x3cec53d4, + 0x79d8a7a8, 0xf3b14f50, 0xe7629ea1, 0xcec53d43, + 0x9d8a7a87, 0x3b14f50f, 0x7629ea1e, 0xec53d43c, + 0xd8a7a879, 0xb14f50f3, 0x629ea1e7, 0xc53d43ce, + 0x8a7a879d, 0x14f50f3b, 0x29ea1e76, 0x53d43cec, + 0xa7a879d8, 0x4f50f3b1, 0x9ea1e762, 0x3d43cec5 +}; + +/* + * Transform the message X which consists of 16 32-bit-words. See + * GM/T 004-2012 for details. + */ +#define R(i, a, b, c, d, e, f, g, h, t, w1, w2) \ + do { \ + ss1 = rol32((rol32((a), 12) + (e) + (t)), 7); \ + ss2 = ss1 ^ rol32((a), 12); \ + d += FF ## i(a, b, c) + ss2 + ((w1) ^ (w2)); \ + h += GG ## i(e, f, g) + ss1 + (w1); \ + b = rol32((b), 9); \ + f = rol32((f), 19); \ + h = P0((h)); \ + } while (0) + +#define R1(a, b, c, d, e, f, g, h, t, w1, w2) \ + R(1, a, b, c, d, e, f, g, h, t, w1, w2) +#define R2(a, b, c, d, e, f, g, h, t, w1, w2) \ + R(2, a, b, c, d, e, f, g, h, t, w1, w2) + +#define FF1(x, y, z) (x ^ y ^ z) +#define FF2(x, y, z) ((x & y) | (x & z) | (y & z)) + +#define GG1(x, y, z) FF1(x, y, z) +#define GG2(x, y, z) ((x & y) | (~x & z)) + +/* Message expansion */ +#define P0(x) ((x) ^ rol32((x), 9) ^ rol32((x), 17)) +#define P1(x) ((x) ^ rol32((x), 15) ^ rol32((x), 23)) +#define I(i) (W[i] = get_unaligned_be32(data + i * 4)) +#define W1(i) (W[i & 0x0f]) +#define W2(i) (W[i & 0x0f] = \ + P1(W[i & 0x0f] \ + ^ W[(i-9) & 0x0f] \ + ^ rol32(W[(i-3) & 0x0f], 15)) \ + ^ rol32(W[(i-13) & 0x0f], 7) \ + ^ W[(i-6) & 0x0f]) + +static void sm3_transform(struct sm3_state *sctx, u8 const *data, u32 W[16]) +{ + u32 a, b, c, d, e, f, g, h, ss1, ss2; + + a = sctx->state[0]; + b = sctx->state[1]; + c = sctx->state[2]; + d = sctx->state[3]; + e = sctx->state[4]; + f = sctx->state[5]; + g = sctx->state[6]; + h = sctx->state[7]; + + R1(a, b, c, d, e, f, g, h, K[0], I(0), I(4)); + R1(d, a, b, c, h, e, f, g, K[1], I(1), I(5)); + R1(c, d, a, b, g, h, e, f, K[2], I(2), I(6)); + R1(b, c, d, a, f, g, h, e, K[3], I(3), I(7)); + R1(a, b, c, d, e, f, g, h, K[4], W1(4), I(8)); + R1(d, a, b, c, h, e, f, g, K[5], W1(5), I(9)); + R1(c, d, a, b, g, h, e, f, K[6], W1(6), I(10)); + R1(b, c, d, a, f, g, h, e, K[7], W1(7), I(11)); + R1(a, b, c, d, e, f, g, h, K[8], W1(8), I(12)); + R1(d, a, b, c, h, e, f, g, K[9], W1(9), I(13)); + R1(c, d, a, b, g, h, e, f, K[10], W1(10), I(14)); + R1(b, c, d, a, f, g, h, e, K[11], W1(11), I(15)); + R1(a, b, c, d, e, f, g, h, K[12], W1(12), W2(16)); + R1(d, a, b, c, h, e, f, g, K[13], W1(13), W2(17)); + R1(c, d, a, b, g, h, e, f, K[14], W1(14), W2(18)); + R1(b, c, d, a, f, g, h, e, K[15], W1(15), W2(19)); + + R2(a, b, c, d, e, f, g, h, K[16], W1(16), W2(20)); + R2(d, a, b, c, h, e, f, g, K[17], W1(17), W2(21)); + R2(c, d, a, b, g, h, e, f, K[18], W1(18), W2(22)); + R2(b, c, d, a, f, g, h, e, K[19], W1(19), W2(23)); + R2(a, b, c, d, e, f, g, h, K[20], W1(20), W2(24)); + R2(d, a, b, c, h, e, f, g, K[21], W1(21), W2(25)); + R2(c, d, a, b, g, h, e, f, K[22], W1(22), W2(26)); + R2(b, c, d, a, f, g, h, e, K[23], W1(23), W2(27)); + R2(a, b, c, d, e, f, g, h, K[24], W1(24), W2(28)); + R2(d, a, b, c, h, e, f, g, K[25], W1(25), W2(29)); + R2(c, d, a, b, g, h, e, f, K[26], W1(26), W2(30)); + R2(b, c, d, a, f, g, h, e, K[27], W1(27), W2(31)); + R2(a, b, c, d, e, f, g, h, K[28], W1(28), W2(32)); + R2(d, a, b, c, h, e, f, g, K[29], W1(29), W2(33)); + R2(c, d, a, b, g, h, e, f, K[30], W1(30), W2(34)); + R2(b, c, d, a, f, g, h, e, K[31], W1(31), W2(35)); + + R2(a, b, c, d, e, f, g, h, K[32], W1(32), W2(36)); + R2(d, a, b, c, h, e, f, g, K[33], W1(33), W2(37)); + R2(c, d, a, b, g, h, e, f, K[34], W1(34), W2(38)); + R2(b, c, d, a, f, g, h, e, K[35], W1(35), W2(39)); + R2(a, b, c, d, e, f, g, h, K[36], W1(36), W2(40)); + R2(d, a, b, c, h, e, f, g, K[37], W1(37), W2(41)); + R2(c, d, a, b, g, h, e, f, K[38], W1(38), W2(42)); + R2(b, c, d, a, f, g, h, e, K[39], W1(39), W2(43)); + R2(a, b, c, d, e, f, g, h, K[40], W1(40), W2(44)); + R2(d, a, b, c, h, e, f, g, K[41], W1(41), W2(45)); + R2(c, d, a, b, g, h, e, f, K[42], W1(42), W2(46)); + R2(b, c, d, a, f, g, h, e, K[43], W1(43), W2(47)); + R2(a, b, c, d, e, f, g, h, K[44], W1(44), W2(48)); + R2(d, a, b, c, h, e, f, g, K[45], W1(45), W2(49)); + R2(c, d, a, b, g, h, e, f, K[46], W1(46), W2(50)); + R2(b, c, d, a, f, g, h, e, K[47], W1(47), W2(51)); + + R2(a, b, c, d, e, f, g, h, K[48], W1(48), W2(52)); + R2(d, a, b, c, h, e, f, g, K[49], W1(49), W2(53)); + R2(c, d, a, b, g, h, e, f, K[50], W1(50), W2(54)); + R2(b, c, d, a, f, g, h, e, K[51], W1(51), W2(55)); + R2(a, b, c, d, e, f, g, h, K[52], W1(52), W2(56)); + R2(d, a, b, c, h, e, f, g, K[53], W1(53), W2(57)); + R2(c, d, a, b, g, h, e, f, K[54], W1(54), W2(58)); + R2(b, c, d, a, f, g, h, e, K[55], W1(55), W2(59)); + R2(a, b, c, d, e, f, g, h, K[56], W1(56), W2(60)); + R2(d, a, b, c, h, e, f, g, K[57], W1(57), W2(61)); + R2(c, d, a, b, g, h, e, f, K[58], W1(58), W2(62)); + R2(b, c, d, a, f, g, h, e, K[59], W1(59), W2(63)); + R2(a, b, c, d, e, f, g, h, K[60], W1(60), W2(64)); + R2(d, a, b, c, h, e, f, g, K[61], W1(61), W2(65)); + R2(c, d, a, b, g, h, e, f, K[62], W1(62), W2(66)); + R2(b, c, d, a, f, g, h, e, K[63], W1(63), W2(67)); + + sctx->state[0] ^= a; + sctx->state[1] ^= b; + sctx->state[2] ^= c; + sctx->state[3] ^= d; + sctx->state[4] ^= e; + sctx->state[5] ^= f; + sctx->state[6] ^= g; + sctx->state[7] ^= h; +} +#undef R +#undef R1 +#undef R2 +#undef I +#undef W1 +#undef W2 + +static inline void sm3_block(struct sm3_state *sctx, + u8 const *data, int blocks, u32 W[16]) +{ + while (blocks--) { + sm3_transform(sctx, data, W); + data += SM3_BLOCK_SIZE; + } +} + +void sm3_update(struct sm3_state *sctx, const u8 *data, unsigned int len) +{ + unsigned int partial = sctx->count % SM3_BLOCK_SIZE; + u32 W[16]; + + sctx->count += len; + + if ((partial + len) >= SM3_BLOCK_SIZE) { + int blocks; + + if (partial) { + int p = SM3_BLOCK_SIZE - partial; + + memcpy(sctx->buffer + partial, data, p); + data += p; + len -= p; + + sm3_block(sctx, sctx->buffer, 1, W); + } + + blocks = len / SM3_BLOCK_SIZE; + len %= SM3_BLOCK_SIZE; + + if (blocks) { + sm3_block(sctx, data, blocks, W); + data += blocks * SM3_BLOCK_SIZE; + } + + memzero_explicit(W, sizeof(W)); + + partial = 0; + } + if (len) + memcpy(sctx->buffer + partial, data, len); +} +EXPORT_SYMBOL_GPL(sm3_update); + +void sm3_final(struct sm3_state *sctx, u8 *out) +{ + const int bit_offset = SM3_BLOCK_SIZE - sizeof(u64); + __be64 *bits = (__be64 *)(sctx->buffer + bit_offset); + __be32 *digest = (__be32 *)out; + unsigned int partial = sctx->count % SM3_BLOCK_SIZE; + u32 W[16]; + int i; + + sctx->buffer[partial++] = 0x80; + if (partial > bit_offset) { + memset(sctx->buffer + partial, 0, SM3_BLOCK_SIZE - partial); + partial = 0; + + sm3_block(sctx, sctx->buffer, 1, W); + } + + memset(sctx->buffer + partial, 0, bit_offset - partial); + *bits = cpu_to_be64(sctx->count << 3); + sm3_block(sctx, sctx->buffer, 1, W); + + for (i = 0; i < 8; i++) + put_unaligned_be32(sctx->state[i], digest++); + + /* Zeroize sensitive information. */ + memzero_explicit(W, sizeof(W)); + memzero_explicit(sctx, sizeof(*sctx)); +} +EXPORT_SYMBOL_GPL(sm3_final); + +MODULE_DESCRIPTION("Generic SM3 library"); +MODULE_LICENSE("GPL v2"); diff --git a/crypto/sm3_generic.c b/crypto/sm3_generic.c index 193c4584bd00462da87fe1eb0bf1e0aad3eba467..a215c1c37e730a08174afe8c697b8ab6e39d3e2c 100644 --- a/crypto/sm3_generic.c +++ b/crypto/sm3_generic.c @@ -5,6 +5,7 @@ * * Copyright (C) 2017 ARM Limited or its affiliates. * Written by Gilad Ben-Yossef + * Copyright (C) 2021 Tianjia Zhang */ #include @@ -26,143 +27,29 @@ const u8 sm3_zero_message_hash[SM3_DIGEST_SIZE] = { }; EXPORT_SYMBOL_GPL(sm3_zero_message_hash); -static inline u32 p0(u32 x) -{ - return x ^ rol32(x, 9) ^ rol32(x, 17); -} - -static inline u32 p1(u32 x) -{ - return x ^ rol32(x, 15) ^ rol32(x, 23); -} - -static inline u32 ff(unsigned int n, u32 a, u32 b, u32 c) -{ - return (n < 16) ? (a ^ b ^ c) : ((a & b) | (a & c) | (b & c)); -} - -static inline u32 gg(unsigned int n, u32 e, u32 f, u32 g) -{ - return (n < 16) ? (e ^ f ^ g) : ((e & f) | ((~e) & g)); -} - -static inline u32 t(unsigned int n) -{ - return (n < 16) ? SM3_T1 : SM3_T2; -} - -static void sm3_expand(u32 *t, u32 *w, u32 *wt) -{ - int i; - unsigned int tmp; - - /* load the input */ - for (i = 0; i <= 15; i++) - w[i] = get_unaligned_be32((__u32 *)t + i); - - for (i = 16; i <= 67; i++) { - tmp = w[i - 16] ^ w[i - 9] ^ rol32(w[i - 3], 15); - w[i] = p1(tmp) ^ (rol32(w[i - 13], 7)) ^ w[i - 6]; - } - - for (i = 0; i <= 63; i++) - wt[i] = w[i] ^ w[i + 4]; -} - -static void sm3_compress(u32 *w, u32 *wt, u32 *m) -{ - u32 ss1; - u32 ss2; - u32 tt1; - u32 tt2; - u32 a, b, c, d, e, f, g, h; - int i; - - a = m[0]; - b = m[1]; - c = m[2]; - d = m[3]; - e = m[4]; - f = m[5]; - g = m[6]; - h = m[7]; - - for (i = 0; i <= 63; i++) { - - ss1 = rol32((rol32(a, 12) + e + rol32(t(i), i & 31)), 7); - - ss2 = ss1 ^ rol32(a, 12); - - tt1 = ff(i, a, b, c) + d + ss2 + *wt; - wt++; - - tt2 = gg(i, e, f, g) + h + ss1 + *w; - w++; - - d = c; - c = rol32(b, 9); - b = a; - a = tt1; - h = g; - g = rol32(f, 19); - f = e; - e = p0(tt2); - } - - m[0] = a ^ m[0]; - m[1] = b ^ m[1]; - m[2] = c ^ m[2]; - m[3] = d ^ m[3]; - m[4] = e ^ m[4]; - m[5] = f ^ m[5]; - m[6] = g ^ m[6]; - m[7] = h ^ m[7]; - - a = b = c = d = e = f = g = h = ss1 = ss2 = tt1 = tt2 = 0; -} - -static void sm3_transform(struct sm3_state *sst, u8 const *src) -{ - unsigned int w[68]; - unsigned int wt[64]; - - sm3_expand((u32 *)src, w, wt); - sm3_compress(w, wt, sst->state); - - memzero_explicit(w, sizeof(w)); - memzero_explicit(wt, sizeof(wt)); -} - -static void sm3_generic_block_fn(struct sm3_state *sst, u8 const *src, - int blocks) -{ - while (blocks--) { - sm3_transform(sst, src); - src += SM3_BLOCK_SIZE; - } -} - -int crypto_sm3_update(struct shash_desc *desc, const u8 *data, +static int crypto_sm3_update(struct shash_desc *desc, const u8 *data, unsigned int len) { - return sm3_base_do_update(desc, data, len, sm3_generic_block_fn); + sm3_update(shash_desc_ctx(desc), data, len); + return 0; } -EXPORT_SYMBOL(crypto_sm3_update); -int crypto_sm3_final(struct shash_desc *desc, u8 *out) +static int crypto_sm3_final(struct shash_desc *desc, u8 *out) { - sm3_base_do_finalize(desc, sm3_generic_block_fn); - return sm3_base_finish(desc, out); + sm3_final(shash_desc_ctx(desc), out); + return 0; } -EXPORT_SYMBOL(crypto_sm3_final); -int crypto_sm3_finup(struct shash_desc *desc, const u8 *data, +static int crypto_sm3_finup(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *hash) { - sm3_base_do_update(desc, data, len, sm3_generic_block_fn); - return crypto_sm3_final(desc, hash); + struct sm3_state *sctx = shash_desc_ctx(desc); + + if (len) + sm3_update(sctx, data, len); + sm3_final(sctx, hash); + return 0; } -EXPORT_SYMBOL(crypto_sm3_finup); static struct shash_alg sm3_alg = { .digestsize = SM3_DIGEST_SIZE, @@ -174,6 +61,7 @@ static struct shash_alg sm3_alg = { .base = { .cra_name = "sm3", .cra_driver_name = "sm3-generic", + .cra_priority = 100, .cra_blocksize = SM3_BLOCK_SIZE, .cra_module = THIS_MODULE, } diff --git a/lib/crypto/sm4.c b/crypto/sm4.c similarity index 94% rename from lib/crypto/sm4.c rename to crypto/sm4.c index 284e62576d0c621692b368bed91936bfaeb6fe4b..2c44193bc27e4a0369a795ed641a6e32ca33cb6b 100644 --- a/lib/crypto/sm4.c +++ b/crypto/sm4.c @@ -11,7 +11,7 @@ #include #include -static const u32 fk[4] = { +static const u32 ____cacheline_aligned fk[4] = { 0xa3b1bac6, 0x56aa3350, 0x677d9197, 0xb27022dc }; @@ -61,6 +61,14 @@ static const u8 ____cacheline_aligned sbox[256] = { 0x79, 0xee, 0x5f, 0x3e, 0xd7, 0xcb, 0x39, 0x48 }; +extern const u32 crypto_sm4_fk[4] __alias(fk); +extern const u32 crypto_sm4_ck[32] __alias(ck); +extern const u8 crypto_sm4_sbox[256] __alias(sbox); + +EXPORT_SYMBOL(crypto_sm4_fk); +EXPORT_SYMBOL(crypto_sm4_ck); +EXPORT_SYMBOL(crypto_sm4_sbox); + static inline u32 sm4_t_non_lin_sub(u32 x) { u32 out; diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index 41b3443f56a06ae2fcf8f65b0ff46e385da1b3de..50efdfbe71e3b747995a807f99f4e256a8e78f56 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -2619,31 +2619,35 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb) if (mode > 400 && mode < 500) break; fallthrough; case 422: + test_ahash_speed("sm3", sec, generic_hash_speed_template); + if (mode > 400 && mode < 500) break; + fallthrough; + case 450: test_mb_ahash_speed("sha1", sec, generic_hash_speed_template, num_mb); if (mode > 400 && mode < 500) break; fallthrough; - case 423: + case 451: test_mb_ahash_speed("sha256", sec, generic_hash_speed_template, num_mb); if (mode > 400 && mode < 500) break; fallthrough; - case 424: + case 452: test_mb_ahash_speed("sha512", sec, generic_hash_speed_template, num_mb); if (mode > 400 && mode < 500) break; fallthrough; - case 425: + case 453: test_mb_ahash_speed("sm3", sec, generic_hash_speed_template, num_mb); if (mode > 400 && mode < 500) break; fallthrough; - case 426: + case 454: test_mb_ahash_speed("streebog256", sec, generic_hash_speed_template, num_mb); if (mode > 400 && mode < 500) break; fallthrough; - case 427: + case 455: test_mb_ahash_speed("streebog512", sec, generic_hash_speed_template, num_mb); if (mode > 400 && mode < 500) break; diff --git a/drivers/accessibility/speakup/speakup_dectlk.c b/drivers/accessibility/speakup/speakup_dectlk.c index ab6d61e80b1cbac8d66951190a89d5f6c99ee7f7..d689ec5e276f12aa8fb8630bf7cdddcf9aad86a7 100644 --- a/drivers/accessibility/speakup/speakup_dectlk.c +++ b/drivers/accessibility/speakup/speakup_dectlk.c @@ -44,6 +44,7 @@ static struct var_t vars[] = { { CAPS_START, .u.s = {"[:dv ap 160] " } }, { CAPS_STOP, .u.s = {"[:dv ap 100 ] " } }, { RATE, .u.n = {"[:ra %d] ", 180, 75, 650, 0, 0, NULL } }, + { PITCH, .u.n = {"[:dv ap %d] ", 122, 50, 350, 0, 0, NULL } }, { INFLECTION, .u.n = {"[:dv pr %d] ", 100, 0, 10000, 0, 0, NULL } }, { VOL, .u.n = {"[:dv g5 %d] ", 86, 60, 86, 0, 0, NULL } }, { PUNCT, .u.n = {"[:pu %c] ", 0, 0, 2, 0, 0, "nsa" } }, diff --git a/drivers/acpi/acpica/exfield.c b/drivers/acpi/acpica/exfield.c index 3323a2ba6a3138472403a0a4b700a77825dbe839..b3230e511870aad7ec54e49c0cad6cb901ca9aa1 100644 --- a/drivers/acpi/acpica/exfield.c +++ b/drivers/acpi/acpica/exfield.c @@ -326,12 +326,7 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc, obj_desc->field.base_byte_offset, source_desc->buffer.pointer, data_length); - if ((obj_desc->field.region_obj->region.address == - PCC_MASTER_SUBSPACE - && MASTER_SUBSPACE_COMMAND(obj_desc->field. - base_byte_offset)) - || GENERIC_SUBSPACE_COMMAND(obj_desc->field. - base_byte_offset)) { + if (MASTER_SUBSPACE_COMMAND(obj_desc->field.base_byte_offset)) { /* Perform the write */ diff --git a/drivers/acpi/acpica/exoparg1.c b/drivers/acpi/acpica/exoparg1.c index a46d685a3ffcf0888571e89cbdde6e0263197adc..9d67dfd93d5b652af943cd8e3317c699413d1f07 100644 --- a/drivers/acpi/acpica/exoparg1.c +++ b/drivers/acpi/acpica/exoparg1.c @@ -1007,7 +1007,8 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state) (walk_state, return_desc, &temp_desc); if (ACPI_FAILURE(status)) { - goto cleanup; + return_ACPI_STATUS + (status); } return_desc = temp_desc; diff --git a/drivers/acpi/acpica/hwesleep.c b/drivers/acpi/acpica/hwesleep.c index 4836a4b8b38b87709091009efe209d29fd400d03..142a755be68811445ebef9b90660c5ac8924c421 100644 --- a/drivers/acpi/acpica/hwesleep.c +++ b/drivers/acpi/acpica/hwesleep.c @@ -104,7 +104,9 @@ acpi_status acpi_hw_extended_sleep(u8 sleep_state) /* Flush caches, as per ACPI specification */ - ACPI_FLUSH_CPU_CACHE(); + if (sleep_state < ACPI_STATE_S4) { + ACPI_FLUSH_CPU_CACHE(); + } status = acpi_os_enter_sleep(sleep_state, sleep_control, 0); if (status == AE_CTRL_TERMINATE) { diff --git a/drivers/acpi/acpica/hwsleep.c b/drivers/acpi/acpica/hwsleep.c index fcc84d196238a90c6c4a6e6d46895f7a8d157114..6a20bb5059c1d815a33ab83149dca348065e5a44 100644 --- a/drivers/acpi/acpica/hwsleep.c +++ b/drivers/acpi/acpica/hwsleep.c @@ -110,7 +110,9 @@ acpi_status acpi_hw_legacy_sleep(u8 sleep_state) /* Flush caches, as per ACPI specification */ - ACPI_FLUSH_CPU_CACHE(); + if (sleep_state < ACPI_STATE_S4) { + ACPI_FLUSH_CPU_CACHE(); + } status = acpi_os_enter_sleep(sleep_state, pm1a_control, pm1b_control); if (status == AE_CTRL_TERMINATE) { diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c index f1645d87864c3dcd6dc32ca3f35e6c6e69b4fbb1..3948c34d85830e0e671ff4fd2008431d21ffe5e0 100644 --- a/drivers/acpi/acpica/hwxfsleep.c +++ b/drivers/acpi/acpica/hwxfsleep.c @@ -162,8 +162,6 @@ acpi_status acpi_enter_sleep_state_s4bios(void) return_ACPI_STATUS(status); } - ACPI_FLUSH_CPU_CACHE(); - status = acpi_hw_write_port(acpi_gbl_FADT.smi_command, (u32)acpi_gbl_FADT.s4_bios_request, 8); if (ACPI_FAILURE(status)) { diff --git a/drivers/acpi/acpica/nswalk.c b/drivers/acpi/acpica/nswalk.c index b7f3e8603ad841f8af776d88415a960087a5ca1d..901fa5ca284d2f54f973a25b9529e88090d69193 100644 --- a/drivers/acpi/acpica/nswalk.c +++ b/drivers/acpi/acpica/nswalk.c @@ -169,6 +169,9 @@ acpi_ns_walk_namespace(acpi_object_type type, if (start_node == ACPI_ROOT_OBJECT) { start_node = acpi_gbl_root_node; + if (!start_node) { + return_ACPI_STATUS(AE_NO_NAMESPACE); + } } /* Null child means "get first node" */ diff --git a/drivers/acpi/acpica/utdelete.c b/drivers/acpi/acpica/utdelete.c index 72d2c0b656339c1f06473b252765713c2053039f..cb1750e7a6281ae9a91bed0d586dffc6b605db6d 100644 --- a/drivers/acpi/acpica/utdelete.c +++ b/drivers/acpi/acpica/utdelete.c @@ -422,6 +422,7 @@ acpi_ut_update_ref_count(union acpi_operand_object *object, u32 action) ACPI_WARNING((AE_INFO, "Obj %p, Reference Count is already zero, cannot decrement\n", object)); + return; } ACPI_DEBUG_PRINT_RAW((ACPI_DB_ALLOCATIONS, diff --git a/drivers/acpi/apei/bert.c b/drivers/acpi/apei/bert.c index 19e50fcbf4d6f52771f573094f4fa635bcfd5ff6..598fd19b65fa489d1cc4d44da8db7ee072612e1a 100644 --- a/drivers/acpi/apei/bert.c +++ b/drivers/acpi/apei/bert.c @@ -29,6 +29,7 @@ #undef pr_fmt #define pr_fmt(fmt) "BERT: " fmt +#define ACPI_BERT_PRINT_MAX_LEN 1024 static int bert_disable; @@ -58,8 +59,11 @@ static void __init bert_print_all(struct acpi_bert_region *region, } pr_info_once("Error records from previous boot:\n"); - - cper_estatus_print(KERN_INFO HW_ERR, estatus); + if (region_len < ACPI_BERT_PRINT_MAX_LEN) + cper_estatus_print(KERN_INFO HW_ERR, estatus); + else + pr_info_once("Max print length exceeded, table data is available at:\n" + "/sys/firmware/acpi/tables/data/BERT"); /* * Because the boot error source is "one-time polled" type, @@ -77,7 +81,7 @@ static int __init setup_bert_disable(char *str) { bert_disable = 1; - return 0; + return 1; } __setup("bert_disable", setup_bert_disable); diff --git a/drivers/acpi/apei/einj.c b/drivers/acpi/apei/einj.c index 1331567595512f7ddc6b880a0d1e7ffd499677e1..4384269ad1591ec688aa43dc79a7a02b8b346c78 100644 --- a/drivers/acpi/apei/einj.c +++ b/drivers/acpi/apei/einj.c @@ -544,7 +544,8 @@ static int einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2, ((region_intersects(base_addr, size, IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE) != REGION_INTERSECTS) && (region_intersects(base_addr, size, IORESOURCE_MEM, IORES_DESC_PERSISTENT_MEMORY) - != REGION_INTERSECTS))) + != REGION_INTERSECTS) && + !arch_is_platform_page(base_addr))) return -EINVAL; inject: diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c index 2e0b0fcad9607c3803c6db616be62967e725d512..83efb52a3f31d070cfde3fe9e34166ba3bed64de 100644 --- a/drivers/acpi/apei/erst.c +++ b/drivers/acpi/apei/erst.c @@ -891,7 +891,7 @@ EXPORT_SYMBOL_GPL(erst_clear); static int __init setup_erst_disable(char *str) { erst_disable = 1; - return 0; + return 1; } __setup("erst_disable", setup_erst_disable); diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c index 9ac1e45dbba0f83bf77319543db878dcf09990ae..9c38c2cdd2fd218356112ccf26fb5b1cfec31c9f 100644 --- a/drivers/acpi/apei/ghes.c +++ b/drivers/acpi/apei/ghes.c @@ -452,7 +452,7 @@ static bool ghes_do_memory_failure(u64 physical_addr, int flags) return false; pfn = PHYS_PFN(physical_addr); - if (!pfn_valid(pfn)) { + if (!pfn_valid(pfn) && !arch_is_platform_page(physical_addr)) { pr_warn_ratelimited(FW_WARN GHES_PFX "Invalid address in generic error data: %#llx\n", physical_addr); diff --git a/drivers/acpi/apei/hest.c b/drivers/acpi/apei/hest.c index 6e980fe16772cc194e8c8daa0589c31102d31b52..7bf48c2776fbf5e417ccbb795fc59ffa9e3a2f4b 100644 --- a/drivers/acpi/apei/hest.c +++ b/drivers/acpi/apei/hest.c @@ -219,7 +219,7 @@ static int __init hest_ghes_dev_register(unsigned int ghes_count) static int __init setup_hest_disable(char *str) { hest_disable = HEST_DISABLED; - return 0; + return 1; } __setup("hest_disable", setup_hest_disable); diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c index 0828f70cb782f3a06a0124460bbe24a688b353e2..d6e1569a72b15637a84e26dd8cd01e0c8867277c 100644 --- a/drivers/acpi/arm64/iort.c +++ b/drivers/acpi/arm64/iort.c @@ -1457,9 +1457,17 @@ static void __init arm_smmu_v3_pmcg_init_resources(struct resource *res, res[0].start = pmcg->page0_base_address; res[0].end = pmcg->page0_base_address + SZ_4K - 1; res[0].flags = IORESOURCE_MEM; - res[1].start = pmcg->page1_base_address; - res[1].end = pmcg->page1_base_address + SZ_4K - 1; - res[1].flags = IORESOURCE_MEM; + /* + * The initial version in DEN0049C lacked a way to describe register + * page 1, which makes it broken for most PMCG implementations; in + * that case, just let the driver fail gracefully if it expects to + * find a second memory resource. + */ + if (node->revision > 0) { + res[1].start = pmcg->page1_base_address; + res[1].end = pmcg->page1_base_address + SZ_4K - 1; + res[1].flags = IORESOURCE_MEM; + } if (pmcg->overflow_gsiv) acpi_iort_register_irq(pmcg->overflow_gsiv, "overflow", diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c index e04352c1dc2ce825b981f4e066633951b85aedbd..be743d177bcbf8ddc5089d6079c651550fb40179 100644 --- a/drivers/acpi/battery.c +++ b/drivers/acpi/battery.c @@ -59,12 +59,17 @@ static int battery_bix_broken_package; static int battery_notification_delay_ms; static int battery_ac_is_broken; static int battery_check_pmic = 1; +static int battery_quirk_notcharging; static unsigned int cache_time = 1000; module_param(cache_time, uint, 0644); MODULE_PARM_DESC(cache_time, "cache time in milliseconds"); static const struct acpi_device_id battery_device_ids[] = { {"PNP0C0A", 0}, + + /* Microsoft Surface Go 3 */ + {"MSHW0146", 0}, + {"", 0}, }; @@ -222,6 +227,8 @@ static int acpi_battery_get_property(struct power_supply *psy, val->intval = POWER_SUPPLY_STATUS_CHARGING; else if (acpi_battery_is_charged(battery)) val->intval = POWER_SUPPLY_STATUS_FULL; + else if (battery_quirk_notcharging) + val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING; else val->intval = POWER_SUPPLY_STATUS_UNKNOWN; break; @@ -1105,6 +1112,12 @@ battery_do_not_check_pmic_quirk(const struct dmi_system_id *d) return 0; } +static int __init battery_quirk_not_charging(const struct dmi_system_id *d) +{ + battery_quirk_notcharging = 1; + return 0; +} + static const struct dmi_system_id bat_dmi_table[] __initconst = { { /* NEC LZ750/LS */ @@ -1149,6 +1162,27 @@ static const struct dmi_system_id bat_dmi_table[] __initconst = { DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo MIIX 320-10ICR"), }, }, + { + /* + * On Lenovo ThinkPads the BIOS specification defines + * a state when the bits for charging and discharging + * are both set to 0. That state is "Not Charging". + */ + .callback = battery_quirk_not_charging, + .ident = "Lenovo ThinkPad", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad"), + }, + }, + { + /* Microsoft Surface Go 3 */ + .callback = battery_notification_delay_quirk, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, "Surface Go 3"), + }, + }, {}, }; diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index e317214aabec5570fcc1b5bbe1a89211fabe0198..5e14288fcabe904e493e7b68af3968cccddc5f80 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c @@ -98,8 +98,8 @@ int acpi_bus_get_status(struct acpi_device *device) acpi_status status; unsigned long long sta; - if (acpi_device_always_present(device)) { - acpi_set_device_status(device, ACPI_STA_DEFAULT); + if (acpi_device_override_status(device, &sta)) { + acpi_set_device_status(device, sta); return 0; } diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c index dc8ac435dea1611fb95c7bb1c07485f8ee932651..e4ef64988761f84240530cd304f17c3b3f5cd4b2 100644 --- a/drivers/acpi/cppc_acpi.c +++ b/drivers/acpi/cppc_acpi.c @@ -804,6 +804,11 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr) cpc_obj = &out_obj->package.elements[0]; if (cpc_obj->type == ACPI_TYPE_INTEGER) { num_ent = cpc_obj->integer.value; + if (num_ent <= 1) { + pr_debug("Unexpected _CPC NumEntries value (%d) for CPU:%d\n", + num_ent, pr->id); + goto out_free; + } } else { pr_debug("Unexpected entry type(%d) for NumEntries\n", cpc_obj->type); diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index be3e0921a6c006f820d2cfa1fea47e995e3c8fa7..3f2e5ea9ab6b7a62b99c225c68cf1b9ae9458b79 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -166,6 +166,7 @@ struct acpi_ec_query { struct transaction transaction; struct work_struct work; struct acpi_ec_query_handler *handler; + struct acpi_ec *ec; }; static int acpi_ec_query(struct acpi_ec *ec, u8 *data); @@ -469,6 +470,7 @@ static void acpi_ec_submit_query(struct acpi_ec *ec) ec_dbg_evt("Command(%s) submitted/blocked", acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY)); ec->nr_pending_queries++; + ec->events_in_progress++; queue_work(ec_wq, &ec->work); } } @@ -535,7 +537,7 @@ static void acpi_ec_enable_event(struct acpi_ec *ec) #ifdef CONFIG_PM_SLEEP static void __acpi_ec_flush_work(void) { - drain_workqueue(ec_wq); /* flush ec->work */ + flush_workqueue(ec_wq); /* flush ec->work */ flush_workqueue(ec_query_wq); /* flush queries */ } @@ -1116,7 +1118,7 @@ void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit) } EXPORT_SYMBOL_GPL(acpi_ec_remove_query_handler); -static struct acpi_ec_query *acpi_ec_create_query(u8 *pval) +static struct acpi_ec_query *acpi_ec_create_query(struct acpi_ec *ec, u8 *pval) { struct acpi_ec_query *q; struct transaction *t; @@ -1124,11 +1126,13 @@ static struct acpi_ec_query *acpi_ec_create_query(u8 *pval) q = kzalloc(sizeof (struct acpi_ec_query), GFP_KERNEL); if (!q) return NULL; + INIT_WORK(&q->work, acpi_ec_event_processor); t = &q->transaction; t->command = ACPI_EC_COMMAND_QUERY; t->rdata = pval; t->rlen = 1; + q->ec = ec; return q; } @@ -1145,13 +1149,21 @@ static void acpi_ec_event_processor(struct work_struct *work) { struct acpi_ec_query *q = container_of(work, struct acpi_ec_query, work); struct acpi_ec_query_handler *handler = q->handler; + struct acpi_ec *ec = q->ec; ec_dbg_evt("Query(0x%02x) started", handler->query_bit); + if (handler->func) handler->func(handler->data); else if (handler->handle) acpi_evaluate_object(handler->handle, NULL, NULL, NULL); + ec_dbg_evt("Query(0x%02x) stopped", handler->query_bit); + + spin_lock_irq(&ec->lock); + ec->queries_in_progress--; + spin_unlock_irq(&ec->lock); + acpi_ec_delete_query(q); } @@ -1161,7 +1173,7 @@ static int acpi_ec_query(struct acpi_ec *ec, u8 *data) int result; struct acpi_ec_query *q; - q = acpi_ec_create_query(&value); + q = acpi_ec_create_query(ec, &value); if (!q) return -ENOMEM; @@ -1183,19 +1195,20 @@ static int acpi_ec_query(struct acpi_ec *ec, u8 *data) } /* - * It is reported that _Qxx are evaluated in a parallel way on - * Windows: + * It is reported that _Qxx are evaluated in a parallel way on Windows: * https://bugzilla.kernel.org/show_bug.cgi?id=94411 * - * Put this log entry before schedule_work() in order to make - * it appearing before any other log entries occurred during the - * work queue execution. + * Put this log entry before queue_work() to make it appear in the log + * before any other messages emitted during workqueue handling. */ ec_dbg_evt("Query(0x%02x) scheduled", value); - if (!queue_work(ec_query_wq, &q->work)) { - ec_dbg_evt("Query(0x%02x) overlapped", value); - result = -EBUSY; - } + + spin_lock_irq(&ec->lock); + + ec->queries_in_progress++; + queue_work(ec_query_wq, &q->work); + + spin_unlock_irq(&ec->lock); err_exit: if (result) @@ -1253,6 +1266,10 @@ static void acpi_ec_event_handler(struct work_struct *work) ec_dbg_evt("Event stopped"); acpi_ec_check_event(ec); + + spin_lock_irqsave(&ec->lock, flags); + ec->events_in_progress--; + spin_unlock_irqrestore(&ec->lock, flags); } static void acpi_ec_handle_interrupt(struct acpi_ec *ec) @@ -2034,6 +2051,7 @@ void acpi_ec_set_gpe_wake_mask(u8 action) bool acpi_ec_dispatch_gpe(void) { + bool work_in_progress; u32 ret; if (!first_ec) @@ -2054,8 +2072,19 @@ bool acpi_ec_dispatch_gpe(void) if (ret == ACPI_INTERRUPT_HANDLED) pm_pr_dbg("ACPI EC GPE dispatched\n"); - /* Flush the event and query workqueues. */ - acpi_ec_flush_work(); + /* Drain EC work. */ + do { + acpi_ec_flush_work(); + + pm_pr_dbg("ACPI EC work flushed\n"); + + spin_lock_irq(&first_ec->lock); + + work_in_progress = first_ec->events_in_progress + + first_ec->queries_in_progress > 0; + + spin_unlock_irq(&first_ec->lock); + } while (work_in_progress && !pm_wakeup_pending()); return false; } diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h index a958ad60a3394d4398ed774c6b42187329ec13b7..125e4901c9b47834238778e61a7315108ab976a9 100644 --- a/drivers/acpi/internal.h +++ b/drivers/acpi/internal.h @@ -184,6 +184,8 @@ struct acpi_ec { struct work_struct work; unsigned long timestamp; unsigned long nr_pending_queries; + unsigned int events_in_progress; + unsigned int queries_in_progress; bool busy_polling; unsigned int polling_guard; }; diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index 8377c3ed10ffa148cdb852e0cec8f3bf89932645..9921b481c7ee1b089ea8346132bfbc1923896a06 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -1080,6 +1080,11 @@ static int flatten_lpi_states(struct acpi_processor *pr, return 0; } +int __weak acpi_processor_ffh_lpi_probe(unsigned int cpu) +{ + return -EOPNOTSUPP; +} + static int acpi_processor_get_lpi_info(struct acpi_processor *pr) { int ret, i; @@ -1088,6 +1093,11 @@ static int acpi_processor_get_lpi_info(struct acpi_processor *pr) struct acpi_device *d = NULL; struct acpi_lpi_states_array info[2], *tmp, *prev, *curr; + /* make sure our architecture has support */ + ret = acpi_processor_ffh_lpi_probe(pr->id); + if (ret == -EOPNOTSUPP) + return ret; + if (!osc_pc_lpi_support_confirmed) return -EOPNOTSUPP; @@ -1139,11 +1149,6 @@ static int acpi_processor_get_lpi_info(struct acpi_processor *pr) return 0; } -int __weak acpi_processor_ffh_lpi_probe(unsigned int cpu) -{ - return -ENODEV; -} - int __weak acpi_processor_ffh_lpi_enter(struct acpi_lpi_state *lpi) { return -ENODEV; diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c index 18bd428f11ac03dc7d8b34df308816ae3451da9f..bd16340088389a5733f5c815234a37b09c019455 100644 --- a/drivers/acpi/property.c +++ b/drivers/acpi/property.c @@ -685,7 +685,7 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode, */ if (obj->type == ACPI_TYPE_LOCAL_REFERENCE) { if (index) - return -EINVAL; + return -ENOENT; ret = acpi_bus_get_device(obj->reference.handle, &device); if (ret) diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index de0533bd4e086a0804aea4abe5c79424ddc83d57..67a5ee2fedfd3daf9305c3a028b2793703c13790 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -1577,6 +1577,7 @@ static bool acpi_device_enumeration_by_parent(struct acpi_device *device) { struct list_head resource_list; bool is_serial_bus_slave = false; + static const struct acpi_device_id ignore_serial_bus_ids[] = { /* * These devices have multiple I2cSerialBus resources and an i2c-client * must be instantiated for each, each with its own i2c_device_id. @@ -1585,11 +1586,18 @@ static bool acpi_device_enumeration_by_parent(struct acpi_device *device) * drivers/platform/x86/i2c-multi-instantiate.c driver, which knows * which i2c_device_id to use for each resource. */ - static const struct acpi_device_id i2c_multi_instantiate_ids[] = { {"BSG1160", }, {"BSG2150", }, {"INT33FE", }, {"INT3515", }, + /* + * HIDs of device with an UartSerialBusV2 resource for which userspace + * expects a regular tty cdev to be created (instead of the in kernel + * serdev) and which have a kernel driver which expects a platform_dev + * such as the rfkill-gpio driver. + */ + {"BCM4752", }, + {"LNV4752", }, {} }; @@ -1603,8 +1611,7 @@ static bool acpi_device_enumeration_by_parent(struct acpi_device *device) fwnode_property_present(&device->fwnode, "baud"))) return true; - /* Instantiate a pdev for the i2c-multi-instantiate drv to bind to */ - if (!acpi_match_device_ids(device, i2c_multi_instantiate_ids)) + if (!acpi_match_device_ids(device, ignore_serial_bus_ids)) return false; INIT_LIST_HEAD(&resource_list); diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index 31c9d0c8ae11f67092e9fef9ed9a953cee0a6e28..503935b1deeb1efba7ec001123b6db402d446425 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c @@ -1040,6 +1040,7 @@ static bool acpi_s2idle_wake(void) return true; } + pm_wakeup_clear(acpi_sci_irq); rearm_wake_irq(acpi_sci_irq); } diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c index 33474fd969913c513d1acfd1577a0135f1c8a8d9..7b9793cb55c504c6b027d031d7e435677a61cf66 100644 --- a/drivers/acpi/video_detect.c +++ b/drivers/acpi/video_detect.c @@ -409,6 +409,81 @@ static const struct dmi_system_id video_detect_dmi_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "GA503"), }, }, + /* + * Clevo NL5xRU and NL5xNU/TUXEDO Aura 15 Gen1 and Gen2 have both a + * working native and video interface. However the default detection + * mechanism first registers the video interface before unregistering + * it again and switching to the native interface during boot. This + * results in a dangling SBIOS request for backlight change for some + * reason, causing the backlight to switch to ~2% once per boot on the + * first power cord connect or disconnect event. Setting the native + * interface explicitly circumvents this buggy behaviour, by avoiding + * the unregistering process. + */ + { + .callback = video_detect_force_native, + .ident = "Clevo NL5xRU", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"), + DMI_MATCH(DMI_BOARD_NAME, "NL5xRU"), + }, + }, + { + .callback = video_detect_force_native, + .ident = "Clevo NL5xRU", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "SchenkerTechnologiesGmbH"), + DMI_MATCH(DMI_BOARD_NAME, "NL5xRU"), + }, + }, + { + .callback = video_detect_force_native, + .ident = "Clevo NL5xRU", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Notebook"), + DMI_MATCH(DMI_BOARD_NAME, "NL5xRU"), + }, + }, + { + .callback = video_detect_force_native, + .ident = "Clevo NL5xRU", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"), + DMI_MATCH(DMI_BOARD_NAME, "AURA1501"), + }, + }, + { + .callback = video_detect_force_native, + .ident = "Clevo NL5xRU", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"), + DMI_MATCH(DMI_BOARD_NAME, "EDUBOOK1502"), + }, + }, + { + .callback = video_detect_force_native, + .ident = "Clevo NL5xNU", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"), + DMI_MATCH(DMI_BOARD_NAME, "NL5xNU"), + }, + }, + { + .callback = video_detect_force_native, + .ident = "Clevo NL5xNU", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "SchenkerTechnologiesGmbH"), + DMI_MATCH(DMI_BOARD_NAME, "NL5xNU"), + }, + }, + { + .callback = video_detect_force_native, + .ident = "Clevo NL5xNU", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Notebook"), + DMI_MATCH(DMI_BOARD_NAME, "NL5xNU"), + }, + }, /* * Desktops which falsely report a backlight and which our heuristics diff --git a/drivers/acpi/x86/utils.c b/drivers/acpi/x86/utils.c index bdc1ba00aee9f7e54f8b3c20e04577135cdbb430..3f9a162be84e373648ac0b01ff5b54a21feeeb3e 100644 --- a/drivers/acpi/x86/utils.c +++ b/drivers/acpi/x86/utils.c @@ -22,58 +22,71 @@ * Some BIOS-es (temporarily) hide specific APCI devices to work around Windows * driver bugs. We use DMI matching to match known cases of this. * - * We work around this by always reporting ACPI_STA_DEFAULT for these - * devices. Note this MUST only be done for devices where this is safe. + * Likewise sometimes some not-actually present devices are sometimes + * reported as present, which may cause issues. * - * This forcing of devices to be present is limited to specific CPU (SoC) - * models both to avoid potentially causing trouble on other models and - * because some HIDs are re-used on different SoCs for completely - * different devices. + * We work around this by using the below quirk list to override the status + * reported by the _STA method with a fixed value (ACPI_STA_DEFAULT or 0). + * Note this MUST only be done for devices where this is safe. + * + * This status overriding is limited to specific CPU (SoC) models both to + * avoid potentially causing trouble on other models and because some HIDs + * are re-used on different SoCs for completely different devices. */ -struct always_present_id { +struct override_status_id { struct acpi_device_id hid[2]; struct x86_cpu_id cpu_ids[2]; struct dmi_system_id dmi_ids[2]; /* Optional */ const char *uid; + const char *path; + unsigned long long status; }; -#define X86_MATCH(model) X86_MATCH_INTEL_FAM6_MODEL(model, NULL) - -#define ENTRY(hid, uid, cpu_models, dmi...) { \ +#define ENTRY(status, hid, uid, path, cpu_model, dmi...) { \ { { hid, }, {} }, \ - { cpu_models, {} }, \ + { X86_MATCH_INTEL_FAM6_MODEL(cpu_model, NULL), {} }, \ { { .matches = dmi }, {} }, \ uid, \ + path, \ + status, \ } -static const struct always_present_id always_present_ids[] = { +#define PRESENT_ENTRY_HID(hid, uid, cpu_model, dmi...) \ + ENTRY(ACPI_STA_DEFAULT, hid, uid, NULL, cpu_model, dmi) + +#define NOT_PRESENT_ENTRY_HID(hid, uid, cpu_model, dmi...) \ + ENTRY(0, hid, uid, NULL, cpu_model, dmi) + +#define PRESENT_ENTRY_PATH(path, cpu_model, dmi...) \ + ENTRY(ACPI_STA_DEFAULT, "", NULL, path, cpu_model, dmi) + +#define NOT_PRESENT_ENTRY_PATH(path, cpu_model, dmi...) \ + ENTRY(0, "", NULL, path, cpu_model, dmi) + +static const struct override_status_id override_status_ids[] = { /* * Bay / Cherry Trail PWM directly poked by GPU driver in win10, * but Linux uses a separate PWM driver, harmless if not used. */ - ENTRY("80860F09", "1", X86_MATCH(ATOM_SILVERMONT), {}), - ENTRY("80862288", "1", X86_MATCH(ATOM_AIRMONT), {}), + PRESENT_ENTRY_HID("80860F09", "1", ATOM_SILVERMONT, {}), + PRESENT_ENTRY_HID("80862288", "1", ATOM_AIRMONT, {}), - /* Lenovo Yoga Book uses PWM2 for keyboard backlight control */ - ENTRY("80862289", "2", X86_MATCH(ATOM_AIRMONT), { - DMI_MATCH(DMI_PRODUCT_NAME, "Lenovo YB1-X9"), - }), /* * The INT0002 device is necessary to clear wakeup interrupt sources * on Cherry Trail devices, without it we get nobody cared IRQ msgs. */ - ENTRY("INT0002", "1", X86_MATCH(ATOM_AIRMONT), {}), + PRESENT_ENTRY_HID("INT0002", "1", ATOM_AIRMONT, {}), /* * On the Dell Venue 11 Pro 7130 and 7139, the DSDT hides * the touchscreen ACPI device until a certain time * after _SB.PCI0.GFX0.LCD.LCD1._ON gets called has passed * *and* _STA has been called at least 3 times since. */ - ENTRY("SYNA7500", "1", X86_MATCH(HASWELL_L), { + PRESENT_ENTRY_HID("SYNA7500", "1", HASWELL_L, { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "Venue 11 Pro 7130"), }), - ENTRY("SYNA7500", "1", X86_MATCH(HASWELL_L), { + PRESENT_ENTRY_HID("SYNA7500", "1", HASWELL_L, { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "Venue 11 Pro 7139"), }), @@ -81,54 +94,83 @@ static const struct always_present_id always_present_ids[] = { /* * The GPD win BIOS dated 20170221 has disabled the accelerometer, the * drivers sometimes cause crashes under Windows and this is how the - * manufacturer has solved this :| Note that the the DMI data is less - * generic then it seems, a board_vendor of "AMI Corporation" is quite - * rare and a board_name of "Default String" also is rare. + * manufacturer has solved this :| The DMI match may not seem unique, + * but it is. In the 67000+ DMI decode dumps from linux-hardware.org + * only 116 have board_vendor set to "AMI Corporation" and of those 116 + * only the GPD win and pocket entries' board_name is "Default string". * * Unfortunately the GPD pocket also uses these strings and its BIOS * was copy-pasted from the GPD win, so it has a disabled KIOX000A * node which we should not enable, thus we also check the BIOS date. */ - ENTRY("KIOX000A", "1", X86_MATCH(ATOM_AIRMONT), { + PRESENT_ENTRY_HID("KIOX000A", "1", ATOM_AIRMONT, { DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"), DMI_MATCH(DMI_BOARD_NAME, "Default string"), DMI_MATCH(DMI_PRODUCT_NAME, "Default string"), DMI_MATCH(DMI_BIOS_DATE, "02/21/2017") }), - ENTRY("KIOX000A", "1", X86_MATCH(ATOM_AIRMONT), { + PRESENT_ENTRY_HID("KIOX000A", "1", ATOM_AIRMONT, { DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"), DMI_MATCH(DMI_BOARD_NAME, "Default string"), DMI_MATCH(DMI_PRODUCT_NAME, "Default string"), DMI_MATCH(DMI_BIOS_DATE, "03/20/2017") }), - ENTRY("KIOX000A", "1", X86_MATCH(ATOM_AIRMONT), { + PRESENT_ENTRY_HID("KIOX000A", "1", ATOM_AIRMONT, { DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"), DMI_MATCH(DMI_BOARD_NAME, "Default string"), DMI_MATCH(DMI_PRODUCT_NAME, "Default string"), DMI_MATCH(DMI_BIOS_DATE, "05/25/2017") }), + + /* + * The GPD win/pocket have a PCI wifi card, but its DSDT has the SDIO + * mmc controller enabled and that has a child-device which _PS3 + * method sets a GPIO causing the PCI wifi card to turn off. + * See above remark about uniqueness of the DMI match. + */ + NOT_PRESENT_ENTRY_PATH("\\_SB_.PCI0.SDHB.BRC1", ATOM_AIRMONT, { + DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "Default string"), + DMI_EXACT_MATCH(DMI_BOARD_SERIAL, "Default string"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Default string"), + }), }; -bool acpi_device_always_present(struct acpi_device *adev) +bool acpi_device_override_status(struct acpi_device *adev, unsigned long long *status) { bool ret = false; unsigned int i; - for (i = 0; i < ARRAY_SIZE(always_present_ids); i++) { - if (acpi_match_device_ids(adev, always_present_ids[i].hid)) + for (i = 0; i < ARRAY_SIZE(override_status_ids); i++) { + if (!x86_match_cpu(override_status_ids[i].cpu_ids)) continue; - if (!adev->pnp.unique_id || - strcmp(adev->pnp.unique_id, always_present_ids[i].uid)) + if (override_status_ids[i].dmi_ids[0].matches[0].slot && + !dmi_check_system(override_status_ids[i].dmi_ids)) continue; - if (!x86_match_cpu(always_present_ids[i].cpu_ids)) - continue; + if (override_status_ids[i].path) { + struct acpi_buffer path = { ACPI_ALLOCATE_BUFFER, NULL }; + bool match; - if (always_present_ids[i].dmi_ids[0].matches[0].slot && - !dmi_check_system(always_present_ids[i].dmi_ids)) - continue; + if (acpi_get_name(adev->handle, ACPI_FULL_PATHNAME, &path)) + continue; + + match = strcmp((char *)path.pointer, override_status_ids[i].path) == 0; + kfree(path.pointer); + + if (!match) + continue; + } else { + if (acpi_match_device_ids(adev, override_status_ids[i].hid)) + continue; + + if (!adev->pnp.unique_id || + strcmp(adev->pnp.unique_id, override_status_ids[i].uid)) + continue; + } + *status = override_status_ids[i].status; ret = true; break; } diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c index 8f4ae6e967e3925538b6d747df2d6394b4a76633..47c72447ccd5948280b511db47faaa039921ff58 100644 --- a/drivers/amba/bus.c +++ b/drivers/amba/bus.c @@ -299,11 +299,10 @@ static int amba_remove(struct device *dev) { struct amba_device *pcdev = to_amba_device(dev); struct amba_driver *drv = to_amba_driver(dev->driver); - int ret = 0; pm_runtime_get_sync(dev); if (drv->remove) - ret = drv->remove(pcdev); + drv->remove(pcdev); pm_runtime_put_noidle(dev); /* Undo the runtime PM settings in amba_probe() */ @@ -314,7 +313,7 @@ static int amba_remove(struct device *dev) amba_put_disable_pclk(pcdev); dev_pm_domain_detach(dev, true); - return ret; + return 0; } static void amba_shutdown(struct device *dev) diff --git a/drivers/android/binder.c b/drivers/android/binder.c index 80e2bbb36422ed0195a49b29a69df77e6baacb9a..366b12405708187e0116337a4a316cdec45fb9d3 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -2657,8 +2657,8 @@ static int binder_translate_fd_array(struct binder_fd_array_object *fda, if (!ret) ret = binder_translate_fd(fd, offset, t, thread, in_reply_to); - if (ret < 0) - return ret; + if (ret) + return ret > 0 ? -EINVAL : ret; } return 0; } diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 1f54f82d22d61450d747985ec997994721161bf9..f963a0a7da46ac91084a362b672233122297533f 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -3974,6 +3974,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { ATA_HORKAGE_ZERO_AFTER_TRIM, }, { "Crucial_CT*MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | ATA_HORKAGE_ZERO_AFTER_TRIM, }, + { "Samsung SSD 840 EVO*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | + ATA_HORKAGE_NO_DMA_LOG | + ATA_HORKAGE_ZERO_AFTER_TRIM, }, { "Samsung SSD 840*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | ATA_HORKAGE_ZERO_AFTER_TRIM, }, { "Samsung SSD 850*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | @@ -3989,6 +3992,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { /* devices that don't properly handle TRIM commands */ { "SuperSSpeed S238*", NULL, ATA_HORKAGE_NOTRIM, }, + { "M88V29*", NULL, ATA_HORKAGE_NOTRIM, }, /* * As defined, the DRAT (Deterministic Read After Trim) and RZAT diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c index fad6c6a873130a5b4af26e117a36ba94ec9fe42f..fef46de2f6b23c29f2c8aba3d26850c1222d542e 100644 --- a/drivers/ata/pata_hpt37x.c +++ b/drivers/ata/pata_hpt37x.c @@ -917,6 +917,20 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id) irqmask &= ~0x10; pci_write_config_byte(dev, 0x5a, irqmask); + /* + * HPT371 chips physically have only one channel, the secondary one, + * but the primary channel registers do exist! Go figure... + * So, we manually disable the non-existing channel here + * (if the BIOS hasn't done this already). + */ + if (dev->device == PCI_DEVICE_ID_TTI_HPT371) { + u8 mcr1; + + pci_read_config_byte(dev, 0x50, &mcr1); + mcr1 &= ~0x04; + pci_write_config_byte(dev, 0x50, mcr1); + } + /* * default to pci clock. make sure MA15/16 are set to output * to prevent drives having problems with 40-pin cables. Needed @@ -948,14 +962,14 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id) if ((freq >> 12) != 0xABCDE) { int i; - u8 sr; + u16 sr; u32 total = 0; pr_warn("BIOS has not set timing clocks\n"); /* This is the process the HPT371 BIOS is reported to use */ for (i = 0; i < 128; i++) { - pci_read_config_byte(dev, 0x78, &sr); + pci_read_config_word(dev, 0x78, &sr); total += sr & 0x1FF; udelay(15); } diff --git a/drivers/ata/sata_dwc_460ex.c b/drivers/ata/sata_dwc_460ex.c index 982fe91125322bbda1be54c676ea5f1ca2acb769..464260f6687082c9552f5bfd23a0dd3d75c67abf 100644 --- a/drivers/ata/sata_dwc_460ex.c +++ b/drivers/ata/sata_dwc_460ex.c @@ -145,7 +145,11 @@ struct sata_dwc_device { #endif }; -#define SATA_DWC_QCMD_MAX 32 +/* + * Allow one extra special slot for commands and DMA management + * to account for libata internal commands. + */ +#define SATA_DWC_QCMD_MAX (ATA_MAX_QUEUE + 1) struct sata_dwc_device_port { struct sata_dwc_device *hsdev; diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c index b574cce98dc3686ce473b1f2cc132b4de8616ed2..9fcc49be499f18a3cdfcf607f79891a345e36111 100644 --- a/drivers/atm/eni.c +++ b/drivers/atm/eni.c @@ -1112,6 +1112,8 @@ DPRINTK("iovcnt = %d\n",skb_shinfo(skb)->nr_frags); skb_data3 = skb->data[3]; paddr = dma_map_single(&eni_dev->pci_dev->dev,skb->data,skb->len, DMA_TO_DEVICE); + if (dma_mapping_error(&eni_dev->pci_dev->dev, paddr)) + return enq_next; ENI_PRV_PADDR(skb) = paddr; /* prepare DMA queue entries */ j = 0; diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c index 0ddd611b427766f4f4d01eb17334a1e7ff13124b..43a34aee33b82059983637eb259b200ea46f3e64 100644 --- a/drivers/atm/firestream.c +++ b/drivers/atm/firestream.c @@ -1675,6 +1675,8 @@ static int fs_init(struct fs_dev *dev) dev->hw_base = pci_resource_start(pci_dev, 0); dev->base = ioremap(dev->hw_base, 0x1000); + if (!dev->base) + return 1; reset_chip (dev); diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c index 21e63b6fab8360b3f6024183d6ceaa93a638e9bb..0d19fe5c99d4d72ec3abb8cd564783cf10f5ae71 100644 --- a/drivers/base/arch_topology.c +++ b/drivers/base/arch_topology.c @@ -586,6 +586,8 @@ void remove_cpu_topology(unsigned int cpu) cpumask_clear_cpu(cpu, topology_core_cpumask(sibling)); for_each_cpu(sibling, topology_sibling_cpumask(cpu)) cpumask_clear_cpu(cpu, topology_sibling_cpumask(sibling)); + for_each_cpu(sibling, topology_cluster_cpumask(cpu)) + cpumask_clear_cpu(cpu, topology_cluster_cpumask(sibling)); for_each_cpu(sibling, topology_llc_cpumask(cpu)) cpumask_clear_cpu(cpu, topology_llc_cpumask(sibling)); diff --git a/drivers/base/core.c b/drivers/base/core.c index 389d13616d1df5371576399be1e276efcc92dda0..c0566aff535513601666083c93c3fafb69948d67 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -348,8 +348,7 @@ static void device_link_release_fn(struct work_struct *work) /* Ensure that all references to the link object have been dropped. */ device_link_synchronize_removal(); - while (refcount_dec_not_one(&link->rpm_active)) - pm_runtime_put(link->supplier); + pm_runtime_release_supplier(link, true); put_device(link->consumer); put_device(link->supplier); diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index 8f1d6569564c4099deea31b1ebfd2371481ee36a..8ecb9f90f467b0ebfe2bd471c9cce3b6a1ddb96e 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c @@ -566,6 +566,12 @@ ssize_t __weak cpu_show_srbds(struct device *dev, return sysfs_emit(buf, "Not affected\n"); } +ssize_t __weak cpu_show_mmio_stale_data(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sysfs_emit(buf, "Not affected\n"); +} + static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL); static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL); static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL); @@ -575,6 +581,7 @@ static DEVICE_ATTR(mds, 0444, cpu_show_mds, NULL); static DEVICE_ATTR(tsx_async_abort, 0444, cpu_show_tsx_async_abort, NULL); static DEVICE_ATTR(itlb_multihit, 0444, cpu_show_itlb_multihit, NULL); static DEVICE_ATTR(srbds, 0444, cpu_show_srbds, NULL); +static DEVICE_ATTR(mmio_stale_data, 0444, cpu_show_mmio_stale_data, NULL); static struct attribute *cpu_root_vulnerabilities_attrs[] = { &dev_attr_meltdown.attr, @@ -586,6 +593,7 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = { &dev_attr_tsx_async_abort.attr, &dev_attr_itlb_multihit.attr, &dev_attr_srbds.attr, + &dev_attr_mmio_stale_data.attr, NULL }; diff --git a/drivers/base/dd.c b/drivers/base/dd.c index 81ad4f867f02d584eb08a34cd4a082c8e44ba34d..c72f6f5b3297ded6172e63edb59a8c9ce7d5f36a 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c @@ -592,6 +592,9 @@ static int really_probe(struct device *dev, struct device_driver *drv) drv->remove(dev); devres_release_all(dev); + arch_teardown_dma_ops(dev); + kfree(dev->dma_range_map); + dev->dma_range_map = NULL; driver_sysfs_remove(dev); dev->driver = NULL; dev_set_drvdata(dev, NULL); @@ -768,7 +771,7 @@ static int __init save_async_options(char *buf) pr_warn("Too long list of driver names for 'driver_async_probe'!\n"); strlcpy(async_probe_drv_names, buf, ASYNC_DRV_NAMES_MAX_LEN); - return 0; + return 1; } __setup("driver_async_probe=", save_async_options); @@ -894,6 +897,7 @@ static void __device_attach_async_helper(void *_dev, async_cookie_t cookie) static int __device_attach(struct device *dev, bool allow_async) { int ret = 0; + bool async = false; device_lock(dev); if (dev->p->dead) { @@ -932,7 +936,7 @@ static int __device_attach(struct device *dev, bool allow_async) */ dev_dbg(dev, "scheduling asynchronous probe\n"); get_device(dev); - async_schedule_dev(__device_attach_async_helper, dev); + async = true; } else { pm_request_idle(dev); } @@ -942,6 +946,8 @@ static int __device_attach(struct device *dev, bool allow_async) } out_unlock: device_unlock(dev); + if (async) + async_schedule_dev(__device_attach_async_helper, dev); return ret; } @@ -1056,6 +1062,7 @@ static int __driver_attach(struct device *dev, void *data) { struct device_driver *drv = data; int ret; + bool async = false; /* * Lock device and try to bind to it. We drop the error @@ -1092,9 +1099,11 @@ static int __driver_attach(struct device *dev, void *data) if (!dev->driver) { get_device(dev); dev->p->async_driver = drv; - async_schedule_dev(__driver_attach_async_helper, dev); + async = true; } device_unlock(dev); + if (async) + async_schedule_dev(__driver_attach_async_helper, dev); return 0; } @@ -1168,6 +1177,8 @@ static void __device_release_driver(struct device *dev, struct device *parent) devres_release_all(dev); arch_teardown_dma_ops(dev); + kfree(dev->dma_range_map); + dev->dma_range_map = NULL; dev->driver = NULL; dev_set_drvdata(dev, NULL); if (dev->pm_domain && dev->pm_domain->dismiss) diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c index a71d1411794394d0781ec22412c5bf87109950ea..b5cbaa61cbea7ad51c85df214b546a70f5b07b76 100644 --- a/drivers/base/devtmpfs.c +++ b/drivers/base/devtmpfs.c @@ -59,8 +59,15 @@ static struct dentry *public_dev_mount(struct file_system_type *fs_type, int fla const char *dev_name, void *data) { struct super_block *s = mnt->mnt_sb; + int err; + atomic_inc(&s->s_active); down_write(&s->s_umount); + err = reconfigure_single(s, flags, data); + if (err < 0) { + deactivate_locked_super(s); + return ERR_PTR(err); + } return dget(s->s_root); } diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 4167e2aef397519645b6be5c1cf42bd6f3571321..1dbaaddf540e162c9147fd6bd24b8f2de9a7b506 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -1994,7 +1994,9 @@ static bool pm_ops_is_empty(const struct dev_pm_ops *ops) void device_pm_check_callbacks(struct device *dev) { - spin_lock_irq(&dev->power.lock); + unsigned long flags; + + spin_lock_irqsave(&dev->power.lock, flags); dev->power.no_pm_callbacks = (!dev->bus || (pm_ops_is_empty(dev->bus->pm) && !dev->bus->suspend && !dev->bus->resume)) && @@ -2003,7 +2005,7 @@ void device_pm_check_callbacks(struct device *dev) (!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) && (!dev->driver || (pm_ops_is_empty(dev->driver->pm) && !dev->driver->suspend && !dev->driver->resume)); - spin_unlock_irq(&dev->power.lock); + spin_unlock_irqrestore(&dev->power.lock, flags); } bool dev_pm_skip_suspend(struct device *dev) diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index bc649da4899a0b873884226c40af571c6cfed8a2..1573319404888ad8b07bffdb4f0620d4dc553a4b 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -305,19 +305,40 @@ static int rpm_get_suppliers(struct device *dev) return 0; } +/** + * pm_runtime_release_supplier - Drop references to device link's supplier. + * @link: Target device link. + * @check_idle: Whether or not to check if the supplier device is idle. + * + * Drop all runtime PM references associated with @link to its supplier device + * and if @check_idle is set, check if that device is idle (and so it can be + * suspended). + */ +void pm_runtime_release_supplier(struct device_link *link, bool check_idle) +{ + struct device *supplier = link->supplier; + + /* + * The additional power.usage_count check is a safety net in case + * the rpm_active refcount becomes saturated, in which case + * refcount_dec_not_one() would return true forever, but it is not + * strictly necessary. + */ + while (refcount_dec_not_one(&link->rpm_active) && + atomic_read(&supplier->power.usage_count) > 0) + pm_runtime_put_noidle(supplier); + + if (check_idle) + pm_request_idle(supplier); +} + static void __rpm_put_suppliers(struct device *dev, bool try_to_suspend) { struct device_link *link; list_for_each_entry_rcu(link, &dev->links.suppliers, c_node, - device_links_read_lock_held()) { - - while (refcount_dec_not_one(&link->rpm_active)) - pm_runtime_put_noidle(link->supplier); - - if (try_to_suspend) - pm_request_idle(link->supplier); - } + device_links_read_lock_held()) + pm_runtime_release_supplier(link, try_to_suspend); } static void rpm_put_suppliers(struct device *dev) @@ -1755,9 +1776,7 @@ void pm_runtime_drop_link(struct device_link *link) return; pm_runtime_drop_link_count(link->consumer); - - while (refcount_dec_not_one(&link->rpm_active)) - pm_runtime_put(link->supplier); + pm_runtime_release_supplier(link, true); } static bool pm_runtime_need_not_resume(struct device *dev) diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index 92073ac68473c5c677bd5d48c8701a0d11a52440..8997e0227eb9d83f65d7664cedf53d0054cda5b3 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c @@ -34,7 +34,8 @@ suspend_state_t pm_suspend_target_state; bool events_check_enabled __read_mostly; /* First wakeup IRQ seen by the kernel in the last cycle. */ -unsigned int pm_wakeup_irq __read_mostly; +static unsigned int wakeup_irq[2] __read_mostly; +static DEFINE_RAW_SPINLOCK(wakeup_irq_lock); /* If greater than 0 and the system is suspending, terminate the suspend. */ static atomic_t pm_abort_suspend __read_mostly; @@ -941,19 +942,45 @@ void pm_system_cancel_wakeup(void) atomic_dec_if_positive(&pm_abort_suspend); } -void pm_wakeup_clear(bool reset) +void pm_wakeup_clear(unsigned int irq_number) { - pm_wakeup_irq = 0; - if (reset) + raw_spin_lock_irq(&wakeup_irq_lock); + + if (irq_number && wakeup_irq[0] == irq_number) + wakeup_irq[0] = wakeup_irq[1]; + else + wakeup_irq[0] = 0; + + wakeup_irq[1] = 0; + + raw_spin_unlock_irq(&wakeup_irq_lock); + + if (!irq_number) atomic_set(&pm_abort_suspend, 0); } void pm_system_irq_wakeup(unsigned int irq_number) { - if (pm_wakeup_irq == 0) { - pm_wakeup_irq = irq_number; + unsigned long flags; + + raw_spin_lock_irqsave(&wakeup_irq_lock, flags); + + if (wakeup_irq[0] == 0) + wakeup_irq[0] = irq_number; + else if (wakeup_irq[1] == 0) + wakeup_irq[1] = irq_number; + else + irq_number = 0; + + raw_spin_unlock_irqrestore(&wakeup_irq_lock, flags); + + if (irq_number) pm_system_wakeup(); - } +} + +unsigned int pm_wakeup_irq(void) +{ + return wakeup_irq[0]; } /** diff --git a/drivers/base/property.c b/drivers/base/property.c index 4c43d30145c6b086dfeb6aa57ef5d9c2a53f5353..cf88a5554d9c5111a5686ca9488b0fbd679f3c76 100644 --- a/drivers/base/property.c +++ b/drivers/base/property.c @@ -1195,8 +1195,10 @@ fwnode_graph_devcon_match(struct fwnode_handle *fwnode, const char *con_id, fwnode_graph_for_each_endpoint(fwnode, ep) { node = fwnode_graph_get_remote_port_parent(ep); - if (!fwnode_device_is_available(node)) + if (!fwnode_device_is_available(node)) { + fwnode_handle_put(node); continue; + } ret = match(node, con_id, data); fwnode_handle_put(node); diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c index ad5c2de395d1f11690e4b6dd03fc435565a51a85..87c5c421e0f461a307c484821243f82aef75a1b0 100644 --- a/drivers/base/regmap/regmap-irq.c +++ b/drivers/base/regmap/regmap-irq.c @@ -170,11 +170,9 @@ static void regmap_irq_sync_unlock(struct irq_data *data) ret = regmap_write(map, reg, d->mask_buf[i]); if (d->chip->clear_ack) { if (d->chip->ack_invert && !ret) - ret = regmap_write(map, reg, - d->mask_buf[i]); + ret = regmap_write(map, reg, UINT_MAX); else if (!ret) - ret = regmap_write(map, reg, - ~d->mask_buf[i]); + ret = regmap_write(map, reg, 0); } if (ret != 0) dev_err(d->map->dev, "Failed to ack 0x%x: %d\n", @@ -509,11 +507,9 @@ static irqreturn_t regmap_irq_thread(int irq, void *d) data->status_buf[i]); if (chip->clear_ack) { if (chip->ack_invert && !ret) - ret = regmap_write(map, reg, - data->status_buf[i]); + ret = regmap_write(map, reg, UINT_MAX); else if (!ret) - ret = regmap_write(map, reg, - ~data->status_buf[i]); + ret = regmap_write(map, reg, 0); } if (ret != 0) dev_err(map->dev, "Failed to ack 0x%x: %d\n", @@ -745,13 +741,9 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode, d->status_buf[i] & d->mask_buf[i]); if (chip->clear_ack) { if (chip->ack_invert && !ret) - ret = regmap_write(map, reg, - (d->status_buf[i] & - d->mask_buf[i])); + ret = regmap_write(map, reg, UINT_MAX); else if (!ret) - ret = regmap_write(map, reg, - ~(d->status_buf[i] & - d->mask_buf[i])); + ret = regmap_write(map, reg, 0); } if (ret != 0) { dev_err(map->dev, "Failed to ack 0x%x: %d\n", diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index 456a1787e18d06b5ad5eec33a46a65eae7cb93bd..55a30afc14a00cc3dd3bba15281f623561442b62 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c @@ -620,6 +620,7 @@ int regmap_attach_dev(struct device *dev, struct regmap *map, if (ret) return ret; + regmap_debugfs_exit(map); regmap_debugfs_init(map); /* Add a devres resource for dev_get_regmap() */ diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c index 206bd4d7d7e237beaf36531833787e6005135767..d2fb3eb5816c3518aa0ad4ad1af666230a4f7bc5 100644 --- a/drivers/base/swnode.c +++ b/drivers/base/swnode.c @@ -519,7 +519,7 @@ software_node_get_reference_args(const struct fwnode_handle *fwnode, return -ENOENT; if (nargs_prop) { - error = property_entry_read_int_array(swnode->node->properties, + error = property_entry_read_int_array(ref->node->properties, nargs_prop, sizeof(u32), &nargs_prop_val, 1); if (error) diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig index f2548049aa0e92137f2f59728d517ffdc6c53cc2..40c53632512b764eee3b720a973b55930096e06c 100644 --- a/drivers/block/Kconfig +++ b/drivers/block/Kconfig @@ -39,6 +39,22 @@ config BLK_DEV_FD To compile this driver as a module, choose M here: the module will be called floppy. +config BLK_DEV_FD_RAWCMD + bool "Support for raw floppy disk commands (DEPRECATED)" + depends on BLK_DEV_FD + help + If you want to use actual physical floppies and expect to do + special low-level hardware accesses to them (access and use + non-standard formats, for example), then enable this. + + Note that the code enabled by this option is rarely used and + might be unstable or insecure, and distros should not enable it. + + Note: FDRAWCMD is deprecated and will be removed from the kernel + in the near future. + + If unsure, say N. + config AMIGA_FLOPPY tristate "Amiga floppy support" depends on AMIGA diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index 8f879e5c2f67061f8ce035c6e76c7761c58f14c1..60b9ca53c0a354ec6260b92463b616d11538a3ed 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h @@ -1644,22 +1644,22 @@ struct sib_info { }; void drbd_bcast_event(struct drbd_device *device, const struct sib_info *sib); -extern void notify_resource_state(struct sk_buff *, +extern int notify_resource_state(struct sk_buff *, unsigned int, struct drbd_resource *, struct resource_info *, enum drbd_notification_type); -extern void notify_device_state(struct sk_buff *, +extern int notify_device_state(struct sk_buff *, unsigned int, struct drbd_device *, struct device_info *, enum drbd_notification_type); -extern void notify_connection_state(struct sk_buff *, +extern int notify_connection_state(struct sk_buff *, unsigned int, struct drbd_connection *, struct connection_info *, enum drbd_notification_type); -extern void notify_peer_device_state(struct sk_buff *, +extern int notify_peer_device_state(struct sk_buff *, unsigned int, struct drbd_peer_device *, struct peer_device_info *, diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c index bf7de4c7b96c19397ca43577f9858b1a8fe356ca..f8d0146bf7852e031bd044427ad4f229906b7f3c 100644 --- a/drivers/block/drbd/drbd_nl.c +++ b/drivers/block/drbd/drbd_nl.c @@ -4614,7 +4614,7 @@ static int nla_put_notification_header(struct sk_buff *msg, return drbd_notification_header_to_skb(msg, &nh, true); } -void notify_resource_state(struct sk_buff *skb, +int notify_resource_state(struct sk_buff *skb, unsigned int seq, struct drbd_resource *resource, struct resource_info *resource_info, @@ -4656,16 +4656,17 @@ void notify_resource_state(struct sk_buff *skb, if (err && err != -ESRCH) goto failed; } - return; + return 0; nla_put_failure: nlmsg_free(skb); failed: drbd_err(resource, "Error %d while broadcasting event. Event seq:%u\n", err, seq); + return err; } -void notify_device_state(struct sk_buff *skb, +int notify_device_state(struct sk_buff *skb, unsigned int seq, struct drbd_device *device, struct device_info *device_info, @@ -4705,16 +4706,17 @@ void notify_device_state(struct sk_buff *skb, if (err && err != -ESRCH) goto failed; } - return; + return 0; nla_put_failure: nlmsg_free(skb); failed: drbd_err(device, "Error %d while broadcasting event. Event seq:%u\n", err, seq); + return err; } -void notify_connection_state(struct sk_buff *skb, +int notify_connection_state(struct sk_buff *skb, unsigned int seq, struct drbd_connection *connection, struct connection_info *connection_info, @@ -4754,16 +4756,17 @@ void notify_connection_state(struct sk_buff *skb, if (err && err != -ESRCH) goto failed; } - return; + return 0; nla_put_failure: nlmsg_free(skb); failed: drbd_err(connection, "Error %d while broadcasting event. Event seq:%u\n", err, seq); + return err; } -void notify_peer_device_state(struct sk_buff *skb, +int notify_peer_device_state(struct sk_buff *skb, unsigned int seq, struct drbd_peer_device *peer_device, struct peer_device_info *peer_device_info, @@ -4804,13 +4807,14 @@ void notify_peer_device_state(struct sk_buff *skb, if (err && err != -ESRCH) goto failed; } - return; + return 0; nla_put_failure: nlmsg_free(skb); failed: drbd_err(peer_device, "Error %d while broadcasting event. Event seq:%u\n", err, seq); + return err; } void notify_helper(enum drbd_notification_type type, @@ -4861,7 +4865,7 @@ void notify_helper(enum drbd_notification_type type, err, seq); } -static void notify_initial_state_done(struct sk_buff *skb, unsigned int seq) +static int notify_initial_state_done(struct sk_buff *skb, unsigned int seq) { struct drbd_genlmsghdr *dh; int err; @@ -4875,11 +4879,12 @@ static void notify_initial_state_done(struct sk_buff *skb, unsigned int seq) if (nla_put_notification_header(skb, NOTIFY_EXISTS)) goto nla_put_failure; genlmsg_end(skb, dh); - return; + return 0; nla_put_failure: nlmsg_free(skb); pr_err("Error %d sending event. Event seq:%u\n", err, seq); + return err; } static void free_state_changes(struct list_head *list) @@ -4906,6 +4911,7 @@ static int get_initial_state(struct sk_buff *skb, struct netlink_callback *cb) unsigned int seq = cb->args[2]; unsigned int n; enum drbd_notification_type flags = 0; + int err = 0; /* There is no need for taking notification_mutex here: it doesn't matter if the initial state events mix with later state chage @@ -4914,32 +4920,32 @@ static int get_initial_state(struct sk_buff *skb, struct netlink_callback *cb) cb->args[5]--; if (cb->args[5] == 1) { - notify_initial_state_done(skb, seq); + err = notify_initial_state_done(skb, seq); goto out; } n = cb->args[4]++; if (cb->args[4] < cb->args[3]) flags |= NOTIFY_CONTINUES; if (n < 1) { - notify_resource_state_change(skb, seq, state_change->resource, + err = notify_resource_state_change(skb, seq, state_change->resource, NOTIFY_EXISTS | flags); goto next; } n--; if (n < state_change->n_connections) { - notify_connection_state_change(skb, seq, &state_change->connections[n], + err = notify_connection_state_change(skb, seq, &state_change->connections[n], NOTIFY_EXISTS | flags); goto next; } n -= state_change->n_connections; if (n < state_change->n_devices) { - notify_device_state_change(skb, seq, &state_change->devices[n], + err = notify_device_state_change(skb, seq, &state_change->devices[n], NOTIFY_EXISTS | flags); goto next; } n -= state_change->n_devices; if (n < state_change->n_devices * state_change->n_connections) { - notify_peer_device_state_change(skb, seq, &state_change->peer_devices[n], + err = notify_peer_device_state_change(skb, seq, &state_change->peer_devices[n], NOTIFY_EXISTS | flags); goto next; } @@ -4954,7 +4960,10 @@ static int get_initial_state(struct sk_buff *skb, struct netlink_callback *cb) cb->args[4] = 0; } out: - return skb->len; + if (err) + return err; + else + return skb->len; } int drbd_adm_get_initial_state(struct sk_buff *skb, struct netlink_callback *cb) diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c index 330f851cb8f0b0acccb15160f7a8f9cc64f723b8..69638146f949cfa339e6c428de6d8af1b0dd1a12 100644 --- a/drivers/block/drbd/drbd_req.c +++ b/drivers/block/drbd/drbd_req.c @@ -177,7 +177,8 @@ void start_new_tl_epoch(struct drbd_connection *connection) void complete_master_bio(struct drbd_device *device, struct bio_and_error *m) { - m->bio->bi_status = errno_to_blk_status(m->error); + if (unlikely(m->error)) + m->bio->bi_status = errno_to_blk_status(m->error); bio_endio(m->bio); dec_ap_bio(device); } diff --git a/drivers/block/drbd/drbd_state.c b/drivers/block/drbd/drbd_state.c index 0067d328f0b56aad6c5bd4624a45f13187ecea07..5fbaea6b77b14e9fb5898b54cdff57fb570bc9e9 100644 --- a/drivers/block/drbd/drbd_state.c +++ b/drivers/block/drbd/drbd_state.c @@ -1537,7 +1537,7 @@ int drbd_bitmap_io_from_worker(struct drbd_device *device, return rv; } -void notify_resource_state_change(struct sk_buff *skb, +int notify_resource_state_change(struct sk_buff *skb, unsigned int seq, struct drbd_resource_state_change *resource_state_change, enum drbd_notification_type type) @@ -1550,10 +1550,10 @@ void notify_resource_state_change(struct sk_buff *skb, .res_susp_fen = resource_state_change->susp_fen[NEW], }; - notify_resource_state(skb, seq, resource, &resource_info, type); + return notify_resource_state(skb, seq, resource, &resource_info, type); } -void notify_connection_state_change(struct sk_buff *skb, +int notify_connection_state_change(struct sk_buff *skb, unsigned int seq, struct drbd_connection_state_change *connection_state_change, enum drbd_notification_type type) @@ -1564,10 +1564,10 @@ void notify_connection_state_change(struct sk_buff *skb, .conn_role = connection_state_change->peer_role[NEW], }; - notify_connection_state(skb, seq, connection, &connection_info, type); + return notify_connection_state(skb, seq, connection, &connection_info, type); } -void notify_device_state_change(struct sk_buff *skb, +int notify_device_state_change(struct sk_buff *skb, unsigned int seq, struct drbd_device_state_change *device_state_change, enum drbd_notification_type type) @@ -1577,10 +1577,10 @@ void notify_device_state_change(struct sk_buff *skb, .dev_disk_state = device_state_change->disk_state[NEW], }; - notify_device_state(skb, seq, device, &device_info, type); + return notify_device_state(skb, seq, device, &device_info, type); } -void notify_peer_device_state_change(struct sk_buff *skb, +int notify_peer_device_state_change(struct sk_buff *skb, unsigned int seq, struct drbd_peer_device_state_change *p, enum drbd_notification_type type) @@ -1594,7 +1594,7 @@ void notify_peer_device_state_change(struct sk_buff *skb, .peer_resync_susp_dependency = p->resync_susp_dependency[NEW], }; - notify_peer_device_state(skb, seq, peer_device, &peer_device_info, type); + return notify_peer_device_state(skb, seq, peer_device, &peer_device_info, type); } static void broadcast_state_change(struct drbd_state_change *state_change) @@ -1602,7 +1602,7 @@ static void broadcast_state_change(struct drbd_state_change *state_change) struct drbd_resource_state_change *resource_state_change = &state_change->resource[0]; bool resource_state_has_changed; unsigned int n_device, n_connection, n_peer_device, n_peer_devices; - void (*last_func)(struct sk_buff *, unsigned int, void *, + int (*last_func)(struct sk_buff *, unsigned int, void *, enum drbd_notification_type) = NULL; void *last_arg = NULL; diff --git a/drivers/block/drbd/drbd_state_change.h b/drivers/block/drbd/drbd_state_change.h index ba80f612d6abbc185dc9a73ee5e72dbb86932b8f..d5b0479bc9a6649e6bd423d5793861e5fa0022f6 100644 --- a/drivers/block/drbd/drbd_state_change.h +++ b/drivers/block/drbd/drbd_state_change.h @@ -44,19 +44,19 @@ extern struct drbd_state_change *remember_old_state(struct drbd_resource *, gfp_ extern void copy_old_to_new_state_change(struct drbd_state_change *); extern void forget_state_change(struct drbd_state_change *); -extern void notify_resource_state_change(struct sk_buff *, +extern int notify_resource_state_change(struct sk_buff *, unsigned int, struct drbd_resource_state_change *, enum drbd_notification_type type); -extern void notify_connection_state_change(struct sk_buff *, +extern int notify_connection_state_change(struct sk_buff *, unsigned int, struct drbd_connection_state_change *, enum drbd_notification_type type); -extern void notify_device_state_change(struct sk_buff *, +extern int notify_device_state_change(struct sk_buff *, unsigned int, struct drbd_device_state_change *, enum drbd_notification_type type); -extern void notify_peer_device_state_change(struct sk_buff *, +extern int notify_peer_device_state_change(struct sk_buff *, unsigned int, struct drbd_peer_device_state_change *, enum drbd_notification_type type); diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index 7df79ae6b0a1e194834b938844697520bc7e8852..4ef407a33996aaf95e8297374e623fae8c81ba28 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c @@ -509,8 +509,8 @@ static unsigned long fdc_busy; static DECLARE_WAIT_QUEUE_HEAD(fdc_wait); static DECLARE_WAIT_QUEUE_HEAD(command_done); -/* Errors during formatting are counted here. */ -static int format_errors; +/* errors encountered on the current (or last) request */ +static int floppy_errors; /* Format request descriptor. */ static struct format_descr format_req; @@ -530,7 +530,6 @@ static struct format_descr format_req; static char *floppy_track_buffer; static int max_buffer_sectors; -static int *errors; typedef void (*done_f)(int); static const struct cont_t { void (*interrupt)(void); @@ -1015,7 +1014,7 @@ static DECLARE_DELAYED_WORK(fd_timer, fd_timer_workfn); static void cancel_activity(void) { do_floppy = NULL; - cancel_delayed_work_sync(&fd_timer); + cancel_delayed_work(&fd_timer); cancel_work_sync(&floppy_work); } @@ -1455,7 +1454,7 @@ static int interpret_errors(void) if (drive_params[current_drive].flags & FTD_MSG) DPRINT("Over/Underrun - retrying\n"); bad = 0; - } else if (*errors >= drive_params[current_drive].max_errors.reporting) { + } else if (floppy_errors >= drive_params[current_drive].max_errors.reporting) { print_errors(); } if (reply_buffer[ST2] & ST2_WC || reply_buffer[ST2] & ST2_BC) @@ -2095,7 +2094,7 @@ static void bad_flp_intr(void) if (!next_valid_format(current_drive)) return; } - err_count = ++(*errors); + err_count = ++floppy_errors; INFBOUND(write_errors[current_drive].badness, err_count); if (err_count > drive_params[current_drive].max_errors.abort) cont->done(0); @@ -2240,9 +2239,8 @@ static int do_format(int drive, struct format_descr *tmp_format_req) return -EINVAL; } format_req = *tmp_format_req; - format_errors = 0; cont = &format_cont; - errors = &format_errors; + floppy_errors = 0; ret = wait_til_done(redo_format, true); if (ret == -EINTR) return -EINTR; @@ -2721,7 +2719,7 @@ static int make_raw_rw_request(void) */ if (!direct || (indirect * 2 > direct * 3 && - *errors < drive_params[current_drive].max_errors.read_track && + floppy_errors < drive_params[current_drive].max_errors.read_track && ((!probing || (drive_params[current_drive].read_track & (1 << drive_state[current_drive].probed_format)))))) { max_size = blk_rq_sectors(current_req); @@ -2846,10 +2844,11 @@ static int set_next_request(void) current_req = list_first_entry_or_null(&floppy_reqs, struct request, queuelist); if (current_req) { - current_req->error_count = 0; + floppy_errors = 0; list_del_init(¤t_req->queuelist); + return 1; } - return current_req != NULL; + return 0; } /* Starts or continues processing request. Will automatically unlock the @@ -2908,7 +2907,6 @@ static void redo_fd_request(void) _floppy = floppy_type + drive_params[current_drive].autodetect[drive_state[current_drive].probed_format]; } else probing = 0; - errors = &(current_req->error_count); tmp = make_raw_rw_request(); if (tmp < 2) { request_done(tmp); @@ -3069,6 +3067,8 @@ static const char *drive_name(int type, int drive) return "(null)"; } +#ifdef CONFIG_BLK_DEV_FD_RAWCMD + /* raw commands */ static void raw_cmd_done(int flag) { @@ -3169,6 +3169,8 @@ static void raw_cmd_free(struct floppy_raw_cmd **ptr) } } +#define MAX_LEN (1UL << MAX_ORDER << PAGE_SHIFT) + static int raw_cmd_copyin(int cmd, void __user *param, struct floppy_raw_cmd **rcmd) { @@ -3198,7 +3200,7 @@ static int raw_cmd_copyin(int cmd, void __user *param, ptr->resultcode = 0; if (ptr->flags & (FD_RAW_READ | FD_RAW_WRITE)) { - if (ptr->length <= 0) + if (ptr->length <= 0 || ptr->length >= MAX_LEN) return -EINVAL; ptr->kernel_data = (char *)fd_dma_mem_alloc(ptr->length); fallback_on_nodma_alloc(&ptr->kernel_data, ptr->length); @@ -3271,6 +3273,35 @@ static int raw_cmd_ioctl(int cmd, void __user *param) return ret; } +static int floppy_raw_cmd_ioctl(int type, int drive, int cmd, + void __user *param) +{ + int ret; + + pr_warn_once("Note: FDRAWCMD is deprecated and will be removed from the kernel in the near future.\n"); + + if (type) + return -EINVAL; + if (lock_fdc(drive)) + return -EINTR; + set_floppy(drive); + ret = raw_cmd_ioctl(cmd, param); + if (ret == -EINTR) + return -EINTR; + process_fd_request(); + return ret; +} + +#else /* CONFIG_BLK_DEV_FD_RAWCMD */ + +static int floppy_raw_cmd_ioctl(int type, int drive, int cmd, + void __user *param) +{ + return -EOPNOTSUPP; +} + +#endif + static int invalidate_drive(struct block_device *bdev) { /* invalidate the buffer track to force a reread */ @@ -3459,7 +3490,6 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int { int drive = (long)bdev->bd_disk->private_data; int type = ITYPE(drive_state[drive].fd_device); - int i; int ret; int size; union inparam { @@ -3610,16 +3640,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int outparam = &write_errors[drive]; break; case FDRAWCMD: - if (type) - return -EINVAL; - if (lock_fdc(drive)) - return -EINTR; - set_floppy(drive); - i = raw_cmd_ioctl(cmd, (void __user *)param); - if (i == -EINTR) - return -EINTR; - process_fd_request(); - return i; + return floppy_raw_cmd_ioctl(type, drive, cmd, (void __user *)param); case FDTWADDLE: if (lock_fdc(drive)) return -EINTR; diff --git a/drivers/block/loop.c b/drivers/block/loop.c index e354faf7c9e6fca8430bc47d25f2fe93b973a521..9ee97e5933471b6dfec315529e38a38e0b5d33ce 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -797,33 +797,33 @@ static ssize_t loop_attr_backing_file_show(struct loop_device *lo, char *buf) static ssize_t loop_attr_offset_show(struct loop_device *lo, char *buf) { - return sprintf(buf, "%llu\n", (unsigned long long)lo->lo_offset); + return sysfs_emit(buf, "%llu\n", (unsigned long long)lo->lo_offset); } static ssize_t loop_attr_sizelimit_show(struct loop_device *lo, char *buf) { - return sprintf(buf, "%llu\n", (unsigned long long)lo->lo_sizelimit); + return sysfs_emit(buf, "%llu\n", (unsigned long long)lo->lo_sizelimit); } static ssize_t loop_attr_autoclear_show(struct loop_device *lo, char *buf) { int autoclear = (lo->lo_flags & LO_FLAGS_AUTOCLEAR); - return sprintf(buf, "%s\n", autoclear ? "1" : "0"); + return sysfs_emit(buf, "%s\n", autoclear ? "1" : "0"); } static ssize_t loop_attr_partscan_show(struct loop_device *lo, char *buf) { int partscan = (lo->lo_flags & LO_FLAGS_PARTSCAN); - return sprintf(buf, "%s\n", partscan ? "1" : "0"); + return sysfs_emit(buf, "%s\n", partscan ? "1" : "0"); } static ssize_t loop_attr_dio_show(struct loop_device *lo, char *buf) { int dio = (lo->lo_flags & LO_FLAGS_DIRECT_IO); - return sprintf(buf, "%s\n", dio ? "1" : "0"); + return sysfs_emit(buf, "%s\n", dio ? "1" : "0"); } LOOP_ATTR_RO(backing_file); diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 1bb9f45f2e3d7478515fa1d70597e945dd13221e..b45f4e5585b51dc29acda4e0fe423b885b6358b8 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -395,13 +395,14 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req, if (!mutex_trylock(&cmd->lock)) return BLK_EH_RESET_TIMER; - if (!__test_and_clear_bit(NBD_CMD_INFLIGHT, &cmd->flags)) { + if (!test_bit(NBD_CMD_INFLIGHT, &cmd->flags)) { mutex_unlock(&cmd->lock); return BLK_EH_DONE; } if (!refcount_inc_not_zero(&nbd->config_refs)) { cmd->status = BLK_STS_TIMEOUT; + __clear_bit(NBD_CMD_INFLIGHT, &cmd->flags); mutex_unlock(&cmd->lock); goto done; } @@ -470,6 +471,7 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req, dev_err_ratelimited(nbd_to_dev(nbd), "Connection timed out\n"); set_bit(NBD_RT_TIMEDOUT, &config->runtime_flags); cmd->status = BLK_STS_IOERR; + __clear_bit(NBD_CMD_INFLIGHT, &cmd->flags); mutex_unlock(&cmd->lock); sock_shutdown(nbd); nbd_config_put(nbd); @@ -737,7 +739,7 @@ static struct nbd_cmd *nbd_handle_reply(struct nbd_device *nbd, int index, cmd = blk_mq_rq_to_pdu(req); mutex_lock(&cmd->lock); - if (!__test_and_clear_bit(NBD_CMD_INFLIGHT, &cmd->flags)) { + if (!test_bit(NBD_CMD_INFLIGHT, &cmd->flags)) { dev_err(disk_to_dev(nbd->disk), "Suspicious reply %d (status %u flags %lu)", tag, cmd->status, cmd->flags); ret = -ENOENT; @@ -844,8 +846,16 @@ static void recv_work(struct work_struct *work) } rq = blk_mq_rq_from_pdu(cmd); - if (likely(!blk_should_fake_timeout(rq->q))) - blk_mq_complete_request(rq); + if (likely(!blk_should_fake_timeout(rq->q))) { + bool complete; + + mutex_lock(&cmd->lock); + complete = __test_and_clear_bit(NBD_CMD_INFLIGHT, + &cmd->flags); + mutex_unlock(&cmd->lock); + if (complete) + blk_mq_complete_request(rq); + } percpu_ref_put(&q->q_usage_counter); } @@ -1414,7 +1424,7 @@ static int nbd_start_device_ioctl(struct nbd_device *nbd, struct block_device *b static void nbd_clear_sock_ioctl(struct nbd_device *nbd, struct block_device *bdev) { - sock_shutdown(nbd); + nbd_clear_sock(nbd); __invalidate_device(bdev, true); nbd_bdev_reset(bdev); if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF, @@ -1530,15 +1540,20 @@ static struct nbd_config *nbd_alloc_config(void) { struct nbd_config *config; + if (!try_module_get(THIS_MODULE)) + return ERR_PTR(-ENODEV); + config = kzalloc(sizeof(struct nbd_config), GFP_NOFS); - if (!config) - return NULL; + if (!config) { + module_put(THIS_MODULE); + return ERR_PTR(-ENOMEM); + } + atomic_set(&config->recv_threads, 0); init_waitqueue_head(&config->recv_wq); init_waitqueue_head(&config->conn_wait); config->blksize = NBD_DEF_BLKSIZE; atomic_set(&config->live_connections, 0); - try_module_get(THIS_MODULE); return config; } @@ -1565,12 +1580,13 @@ static int nbd_open(struct block_device *bdev, fmode_t mode) mutex_unlock(&nbd->config_lock); goto out; } - config = nbd->config = nbd_alloc_config(); - if (!config) { - ret = -ENOMEM; + config = nbd_alloc_config(); + if (IS_ERR(config)) { + ret = PTR_ERR(config); mutex_unlock(&nbd->config_lock); goto out; } + nbd->config = config; refcount_set(&nbd->config_refs, 1); refcount_inc(&nbd->refs); mutex_unlock(&nbd->config_lock); @@ -1760,14 +1776,6 @@ static int nbd_dev_add(int index) int err = -ENOMEM; int first_minor = index << part_shift; - /* - * Too big index can cause duplicate creation of sysfs files/links, - * because MKDEV() expect that the max first minor is MINORMASK, or - * index << part_shift can overflow. - */ - if (first_minor < index || first_minor > MINORMASK) - return -EINVAL; - nbd = kzalloc(sizeof(struct nbd_device), GFP_KERNEL); if (!nbd) goto out; @@ -1924,8 +1932,20 @@ static int nbd_genl_connect(struct sk_buff *skb, struct genl_info *info) if (!netlink_capable(skb, CAP_SYS_ADMIN)) return -EPERM; - if (info->attrs[NBD_ATTR_INDEX]) + if (info->attrs[NBD_ATTR_INDEX]) { index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]); + + /* + * Too big first_minor can cause duplicate creation of + * sysfs files/links, since index << part_shift might + * overflow, or MKDEV() expect that the max bits of + * first_minor is 20. + */ + if (index < 0 || index > MINORMASK >> part_shift) { + printk(KERN_ERR "nbd: illegal input index %d\n", index); + return -EINVAL; + } + } if (!info->attrs[NBD_ATTR_SOCKETS]) { printk(KERN_ERR "nbd: must specify at least one socket\n"); return -EINVAL; @@ -2001,13 +2021,14 @@ static int nbd_genl_connect(struct sk_buff *skb, struct genl_info *info) nbd_put(nbd); return -EINVAL; } - config = nbd->config = nbd_alloc_config(); - if (!nbd->config) { + config = nbd_alloc_config(); + if (IS_ERR(config)) { mutex_unlock(&nbd->config_lock); nbd_put(nbd); printk(KERN_ERR "nbd: couldn't allocate config\n"); - return -ENOMEM; + return PTR_ERR(config); } + nbd->config = config; refcount_set(&nbd->config_refs, 1); set_bit(NBD_RT_BOUND, &config->runtime_flags); @@ -2533,6 +2554,12 @@ static void __exit nbd_cleanup(void) struct nbd_device *nbd; LIST_HEAD(del_list); + /* + * Unregister netlink interface prior to waiting + * for the completion of netlink commands. + */ + genl_unregister_family(&nbd_genl_family); + nbd_dbg_close(); mutex_lock(&nbd_index_mutex); @@ -2542,13 +2569,15 @@ static void __exit nbd_cleanup(void) while (!list_empty(&del_list)) { nbd = list_first_entry(&del_list, struct nbd_device, list); list_del_init(&nbd->list); + if (refcount_read(&nbd->config_refs)) + printk(KERN_ERR "nbd: possibly leaking nbd_config (ref %d)\n", + refcount_read(&nbd->config_refs)); if (refcount_read(&nbd->refs) != 1) printk(KERN_ERR "nbd: possibly leaking a device\n"); nbd_put(nbd); } idr_destroy(&nbd_index_idr); - genl_unregister_family(&nbd_genl_family); unregister_blkdev(NBD_MAJOR, "nbd"); } diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 42acf9587ef38947a68485dd884f5c2507b6cc45..02e2056780ad2287f3cbb6b42514fa5cc9ebf7b3 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -825,9 +825,17 @@ static int virtblk_probe(struct virtio_device *vdev) err = virtio_cread_feature(vdev, VIRTIO_BLK_F_BLK_SIZE, struct virtio_blk_config, blk_size, &blk_size); - if (!err) + if (!err) { + err = blk_validate_block_size(blk_size); + if (err) { + dev_err(&vdev->dev, + "virtio_blk: invalid block size: 0x%x\n", + blk_size); + goto out_free_tags; + } + blk_queue_logical_block_size(q, blk_size); - else + } else blk_size = queue_logical_block_size(q); /* Use topology information if available */ @@ -869,9 +877,15 @@ static int virtblk_probe(struct virtio_device *vdev) virtio_cread(vdev, struct virtio_blk_config, max_discard_seg, &v); + + /* + * max_discard_seg == 0 is out of spec but we always + * handled it. + */ + if (!v) + v = sg_elems - 2; blk_queue_max_discard_segments(q, - min_not_zero(v, - MAX_DISCARD_SEGMENTS)); + min(v, MAX_DISCARD_SEGMENTS)); blk_queue_flag_set(QUEUE_FLAG_DISCARD, q); } diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 22842d2938c2819e192480dff4ce3ed18a675c5d..abbb68b6d9bd53b5428c9ba3f4ee5173f576739e 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -151,6 +151,10 @@ static unsigned int xen_blkif_max_ring_order; module_param_named(max_ring_page_order, xen_blkif_max_ring_order, int, 0444); MODULE_PARM_DESC(max_ring_page_order, "Maximum order of pages to be used for the shared ring"); +static bool __read_mostly xen_blkif_trusted = true; +module_param_named(trusted, xen_blkif_trusted, bool, 0644); +MODULE_PARM_DESC(trusted, "Is the backend trusted"); + #define BLK_RING_SIZE(info) \ __CONST_RING_SIZE(blkif, XEN_PAGE_SIZE * (info)->nr_ring_pages) @@ -208,6 +212,7 @@ struct blkfront_info unsigned int feature_discard:1; unsigned int feature_secdiscard:1; unsigned int feature_persistent:1; + unsigned int bounce:1; unsigned int discard_granularity; unsigned int discard_alignment; /* Number of 4KB segments handled */ @@ -310,8 +315,8 @@ static int fill_grant_buffer(struct blkfront_ring_info *rinfo, int num) if (!gnt_list_entry) goto out_of_memory; - if (info->feature_persistent) { - granted_page = alloc_page(GFP_NOIO); + if (info->bounce) { + granted_page = alloc_page(GFP_NOIO | __GFP_ZERO); if (!granted_page) { kfree(gnt_list_entry); goto out_of_memory; @@ -330,7 +335,7 @@ static int fill_grant_buffer(struct blkfront_ring_info *rinfo, int num) list_for_each_entry_safe(gnt_list_entry, n, &rinfo->grants, node) { list_del(&gnt_list_entry->node); - if (info->feature_persistent) + if (info->bounce) __free_page(gnt_list_entry->page); kfree(gnt_list_entry); i--; @@ -376,7 +381,7 @@ static struct grant *get_grant(grant_ref_t *gref_head, /* Assign a gref to this page */ gnt_list_entry->gref = gnttab_claim_grant_reference(gref_head); BUG_ON(gnt_list_entry->gref == -ENOSPC); - if (info->feature_persistent) + if (info->bounce) grant_foreign_access(gnt_list_entry, info); else { /* Grant access to the GFN passed by the caller */ @@ -400,7 +405,7 @@ static struct grant *get_indirect_grant(grant_ref_t *gref_head, /* Assign a gref to this page */ gnt_list_entry->gref = gnttab_claim_grant_reference(gref_head); BUG_ON(gnt_list_entry->gref == -ENOSPC); - if (!info->feature_persistent) { + if (!info->bounce) { struct page *indirect_page; /* Fetch a pre-allocated page to use for indirect grefs */ @@ -715,7 +720,7 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri .grant_idx = 0, .segments = NULL, .rinfo = rinfo, - .need_copy = rq_data_dir(req) && info->feature_persistent, + .need_copy = rq_data_dir(req) && info->bounce, }; /* @@ -1035,11 +1040,12 @@ static void xlvbd_flush(struct blkfront_info *info) { blk_queue_write_cache(info->rq, info->feature_flush ? true : false, info->feature_fua ? true : false); - pr_info("blkfront: %s: %s %s %s %s %s\n", + pr_info("blkfront: %s: %s %s %s %s %s %s %s\n", info->gd->disk_name, flush_info(info), "persistent grants:", info->feature_persistent ? "enabled;" : "disabled;", "indirect descriptors:", - info->max_indirect_segments ? "enabled;" : "disabled;"); + info->max_indirect_segments ? "enabled;" : "disabled;", + "bounce buffer:", info->bounce ? "enabled" : "disabled;"); } static int xen_translate_vdev(int vdevice, int *minor, unsigned int *offset) @@ -1273,7 +1279,7 @@ static void blkif_free_ring(struct blkfront_ring_info *rinfo) if (!list_empty(&rinfo->indirect_pages)) { struct page *indirect_page, *n; - BUG_ON(info->feature_persistent); + BUG_ON(info->bounce); list_for_each_entry_safe(indirect_page, n, &rinfo->indirect_pages, lru) { list_del(&indirect_page->lru); __free_page(indirect_page); @@ -1290,7 +1296,7 @@ static void blkif_free_ring(struct blkfront_ring_info *rinfo) 0, 0UL); rinfo->persistent_gnts_c--; } - if (info->feature_persistent) + if (info->bounce) __free_page(persistent_gnt->page); kfree(persistent_gnt); } @@ -1311,7 +1317,7 @@ static void blkif_free_ring(struct blkfront_ring_info *rinfo) for (j = 0; j < segs; j++) { persistent_gnt = rinfo->shadow[i].grants_used[j]; gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL); - if (info->feature_persistent) + if (info->bounce) __free_page(persistent_gnt->page); kfree(persistent_gnt); } @@ -1352,7 +1358,8 @@ static void blkif_free_ring(struct blkfront_ring_info *rinfo) rinfo->ring_ref[i] = GRANT_INVALID_REF; } } - free_pages((unsigned long)rinfo->ring.sring, get_order(info->nr_ring_pages * XEN_PAGE_SIZE)); + free_pages_exact(rinfo->ring.sring, + info->nr_ring_pages * XEN_PAGE_SIZE); rinfo->ring.sring = NULL; if (rinfo->irq) @@ -1436,9 +1443,15 @@ static int blkif_get_final_status(enum blk_req_status s1, return BLKIF_RSP_OKAY; } -static bool blkif_completion(unsigned long *id, - struct blkfront_ring_info *rinfo, - struct blkif_response *bret) +/* + * Return values: + * 1 response processed. + * 0 missing further responses. + * -1 error while processing. + */ +static int blkif_completion(unsigned long *id, + struct blkfront_ring_info *rinfo, + struct blkif_response *bret) { int i = 0; struct scatterlist *sg; @@ -1461,7 +1474,7 @@ static bool blkif_completion(unsigned long *id, /* Wait the second response if not yet here. */ if (s2->status < REQ_DONE) - return false; + return 0; bret->status = blkif_get_final_status(s->status, s2->status); @@ -1494,7 +1507,7 @@ static bool blkif_completion(unsigned long *id, data.s = s; num_sg = s->num_sg; - if (bret->operation == BLKIF_OP_READ && info->feature_persistent) { + if (bret->operation == BLKIF_OP_READ && info->bounce) { for_each_sg(s->sg, sg, num_sg, i) { BUG_ON(sg->offset + sg->length > PAGE_SIZE); @@ -1512,47 +1525,48 @@ static bool blkif_completion(unsigned long *id, } /* Add the persistent grant into the list of free grants */ for (i = 0; i < num_grant; i++) { - if (gnttab_query_foreign_access(s->grants_used[i]->gref)) { + if (!gnttab_try_end_foreign_access(s->grants_used[i]->gref)) { /* * If the grant is still mapped by the backend (the * backend has chosen to make this grant persistent) * we add it at the head of the list, so it will be * reused first. */ - if (!info->feature_persistent) - pr_alert_ratelimited("backed has not unmapped grant: %u\n", - s->grants_used[i]->gref); + if (!info->feature_persistent) { + pr_alert("backed has not unmapped grant: %u\n", + s->grants_used[i]->gref); + return -1; + } list_add(&s->grants_used[i]->node, &rinfo->grants); rinfo->persistent_gnts_c++; } else { /* - * If the grant is not mapped by the backend we end the - * foreign access and add it to the tail of the list, - * so it will not be picked again unless we run out of - * persistent grants. + * If the grant is not mapped by the backend we add it + * to the tail of the list, so it will not be picked + * again unless we run out of persistent grants. */ - gnttab_end_foreign_access(s->grants_used[i]->gref, 0, 0UL); s->grants_used[i]->gref = GRANT_INVALID_REF; list_add_tail(&s->grants_used[i]->node, &rinfo->grants); } } if (s->req.operation == BLKIF_OP_INDIRECT) { for (i = 0; i < INDIRECT_GREFS(num_grant); i++) { - if (gnttab_query_foreign_access(s->indirect_grants[i]->gref)) { - if (!info->feature_persistent) - pr_alert_ratelimited("backed has not unmapped grant: %u\n", - s->indirect_grants[i]->gref); + if (!gnttab_try_end_foreign_access(s->indirect_grants[i]->gref)) { + if (!info->feature_persistent) { + pr_alert("backed has not unmapped grant: %u\n", + s->indirect_grants[i]->gref); + return -1; + } list_add(&s->indirect_grants[i]->node, &rinfo->grants); rinfo->persistent_gnts_c++; } else { struct page *indirect_page; - gnttab_end_foreign_access(s->indirect_grants[i]->gref, 0, 0UL); /* * Add the used indirect page back to the list of * available pages for indirect grefs. */ - if (!info->feature_persistent) { + if (!info->bounce) { indirect_page = s->indirect_grants[i]->page; list_add(&indirect_page->lru, &rinfo->indirect_pages); } @@ -1562,7 +1576,7 @@ static bool blkif_completion(unsigned long *id, } } - return true; + return 1; } static irqreturn_t blkif_interrupt(int irq, void *dev_id) @@ -1628,12 +1642,17 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) } if (bret.operation != BLKIF_OP_DISCARD) { + int ret; + /* * We may need to wait for an extra response if the * I/O request is split in 2 */ - if (!blkif_completion(&id, rinfo, &bret)) + ret = blkif_completion(&id, rinfo, &bret); + if (!ret) continue; + if (unlikely(ret < 0)) + goto err; } if (add_id_to_freelist(rinfo, id)) { @@ -1740,8 +1759,7 @@ static int setup_blkring(struct xenbus_device *dev, for (i = 0; i < info->nr_ring_pages; i++) rinfo->ring_ref[i] = GRANT_INVALID_REF; - sring = (struct blkif_sring *)__get_free_pages(GFP_NOIO | __GFP_HIGH, - get_order(ring_size)); + sring = alloc_pages_exact(ring_size, GFP_NOIO | __GFP_ZERO); if (!sring) { xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring"); return -ENOMEM; @@ -1751,7 +1769,7 @@ static int setup_blkring(struct xenbus_device *dev, err = xenbus_grant_ring(dev, rinfo->ring.sring, info->nr_ring_pages, gref); if (err < 0) { - free_pages((unsigned long)sring, get_order(ring_size)); + free_pages_exact(sring, ring_size); rinfo->ring.sring = NULL; goto fail; } @@ -1845,6 +1863,10 @@ static int talk_to_blkback(struct xenbus_device *dev, if (!info) return -ENODEV; + /* Check if backend is trusted. */ + info->bounce = !xen_blkif_trusted || + !xenbus_read_unsigned(dev->nodename, "trusted", 1); + max_page_order = xenbus_read_unsigned(info->xbdev->otherend, "max-ring-page-order", 0); ring_page_order = min(xen_blkif_max_ring_order, max_page_order); @@ -2271,17 +2293,18 @@ static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo) if (err) goto out_of_memory; - if (!info->feature_persistent && info->max_indirect_segments) { + if (!info->bounce && info->max_indirect_segments) { /* - * We are using indirect descriptors but not persistent - * grants, we need to allocate a set of pages that can be + * We are using indirect descriptors but don't have a bounce + * buffer, we need to allocate a set of pages that can be * used for mapping indirect grefs */ int num = INDIRECT_GREFS(grants) * BLK_RING_SIZE(info); BUG_ON(!list_empty(&rinfo->indirect_pages)); for (i = 0; i < num; i++) { - struct page *indirect_page = alloc_page(GFP_KERNEL); + struct page *indirect_page = alloc_page(GFP_KERNEL | + __GFP_ZERO); if (!indirect_page) goto out_of_memory; list_add(&indirect_page->lru, &rinfo->indirect_pages); @@ -2374,6 +2397,8 @@ static void blkfront_gather_backend_features(struct blkfront_info *info) info->feature_persistent = !!xenbus_read_unsigned(info->xbdev->otherend, "feature-persistent", 0); + if (info->feature_persistent) + info->bounce = true; indirect_segments = xenbus_read_unsigned(info->xbdev->otherend, "feature-max-indirect-segments", 0); @@ -2729,11 +2754,10 @@ static void purge_persistent_grants(struct blkfront_info *info) list_for_each_entry_safe(gnt_list_entry, tmp, &rinfo->grants, node) { if (gnt_list_entry->gref == GRANT_INVALID_REF || - gnttab_query_foreign_access(gnt_list_entry->gref)) + !gnttab_try_end_foreign_access(gnt_list_entry->gref)) continue; list_del(&gnt_list_entry->node); - gnttab_end_foreign_access(gnt_list_entry->gref, 0, 0UL); rinfo->persistent_gnts_c--; gnt_list_entry->gref = GRANT_INVALID_REF; list_add_tail(&gnt_list_entry->node, &rinfo->grants); @@ -2748,6 +2772,13 @@ static void blkfront_delay_work(struct work_struct *work) struct blkfront_info *info; bool need_schedule_work = false; + /* + * Note that when using bounce buffers but not persistent grants + * there's no need to run blkfront_delay_work because grants are + * revoked in blkif_completion or else an error is reported and the + * connection is closed. + */ + mutex_lock(&blkfront_mutex); list_for_each_entry(info, &info_list, info_list) { diff --git a/drivers/bluetooth/bfusb.c b/drivers/bluetooth/bfusb.c index 5a321b4076aababf90bdaac7b6bdb1cae173e4f6..cab93935cc7f16aabb2688dad98fe2065940b622 100644 --- a/drivers/bluetooth/bfusb.c +++ b/drivers/bluetooth/bfusb.c @@ -628,6 +628,9 @@ static int bfusb_probe(struct usb_interface *intf, const struct usb_device_id *i data->bulk_out_ep = bulk_out_ep->desc.bEndpointAddress; data->bulk_pkt_size = le16_to_cpu(bulk_out_ep->desc.wMaxPacketSize); + if (!data->bulk_pkt_size) + goto done; + rwlock_init(&data->lock); data->reassembly = NULL; diff --git a/drivers/bluetooth/btmtksdio.c b/drivers/bluetooth/btmtksdio.c index 5f9f027956317ef3a5bb71f6753f39d7522023cf..c41560be39fb6bcacb6a58a8a7b7e789b90c40d6 100644 --- a/drivers/bluetooth/btmtksdio.c +++ b/drivers/bluetooth/btmtksdio.c @@ -981,6 +981,8 @@ static int btmtksdio_probe(struct sdio_func *func, hdev->manufacturer = 70; set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks); + sdio_set_drvdata(func, bdev); + err = hci_register_dev(hdev); if (err < 0) { dev_err(&func->dev, "Can't register HCI device\n"); @@ -988,8 +990,6 @@ static int btmtksdio_probe(struct sdio_func *func, return err; } - sdio_set_drvdata(func, bdev); - /* pm_runtime_enable would be done after the firmware is being * downloaded because the core layer probably already enables * runtime PM for this func such as the case host->caps & @@ -1042,6 +1042,8 @@ static int btmtksdio_runtime_suspend(struct device *dev) if (!bdev) return 0; + sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER); + sdio_claim_host(bdev->func); sdio_writel(bdev->func, C_FW_OWN_REQ_SET, MTK_REG_CHLPCR, &err); diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index e0859f4e2807351dc8d6cf4900e1c1c7ebce44b6..538232b4c42ac165de8d5c4b1086097a4be80cdb 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -379,6 +379,15 @@ static const struct usb_device_id blacklist_table[] = { { USB_DEVICE(0x8087, 0x0aaa), .driver_info = BTUSB_INTEL_NEW | BTUSB_WIDEBAND_SPEECH | BTUSB_VALID_LE_STATES }, + { USB_DEVICE(0x10ab, 0x9309), .driver_info = BTUSB_QCA_WCN6855 | + BTUSB_WIDEBAND_SPEECH | + BTUSB_VALID_LE_STATES }, + { USB_DEVICE(0x10ab, 0x9409), .driver_info = BTUSB_QCA_WCN6855 | + BTUSB_WIDEBAND_SPEECH | + BTUSB_VALID_LE_STATES }, + { USB_DEVICE(0x0489, 0xe0d0), .driver_info = BTUSB_QCA_WCN6855 | + BTUSB_WIDEBAND_SPEECH | + BTUSB_VALID_LE_STATES }, /* Other Intel Bluetooth devices */ { USB_VENDOR_AND_INTERFACE_INFO(0x8087, 0xe0, 0x01, 0x01), @@ -400,6 +409,14 @@ static const struct usb_device_id blacklist_table[] = { BTUSB_WIDEBAND_SPEECH | BTUSB_VALID_LE_STATES }, + /* MediaTek MT7922A Bluetooth devices */ + { USB_DEVICE(0x0489, 0xe0d8), .driver_info = BTUSB_MEDIATEK | + BTUSB_WIDEBAND_SPEECH | + BTUSB_VALID_LE_STATES }, + { USB_DEVICE(0x0489, 0xe0d9), .driver_info = BTUSB_MEDIATEK | + BTUSB_WIDEBAND_SPEECH | + BTUSB_VALID_LE_STATES }, + /* Additional Realtek 8723AE Bluetooth devices */ { USB_DEVICE(0x0930, 0x021d), .driver_info = BTUSB_REALTEK }, { USB_DEVICE(0x13d3, 0x3394), .driver_info = BTUSB_REALTEK }, @@ -2845,6 +2862,7 @@ static void btusb_mtk_wmt_recv(struct urb *urb) skb = bt_skb_alloc(HCI_WMT_MAX_EVENT_SIZE, GFP_ATOMIC); if (!skb) { hdev->stat.err_rx++; + kfree(urb->setup_packet); return; } @@ -2865,6 +2883,7 @@ static void btusb_mtk_wmt_recv(struct urb *urb) data->evt_skb = skb_clone(skb, GFP_ATOMIC); if (!data->evt_skb) { kfree_skb(skb); + kfree(urb->setup_packet); return; } } @@ -2873,6 +2892,7 @@ static void btusb_mtk_wmt_recv(struct urb *urb) if (err < 0) { kfree_skb(data->evt_skb); data->evt_skb = NULL; + kfree(urb->setup_packet); return; } @@ -2883,6 +2903,7 @@ static void btusb_mtk_wmt_recv(struct urb *urb) wake_up_bit(&data->flags, BTUSB_TX_WAIT_VND_EVT); } + kfree(urb->setup_packet); return; } else if (urb->status == -ENOENT) { /* Avoid suspend failed when usb_kill_urb */ @@ -2903,6 +2924,7 @@ static void btusb_mtk_wmt_recv(struct urb *urb) usb_anchor_urb(urb, &data->ctrl_anchor); err = usb_submit_urb(urb, GFP_ATOMIC); if (err < 0) { + kfree(urb->setup_packet); /* -EPERM: urb is being killed; * -ENODEV: device got disconnected */ diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c index 8ea5ca8d71d6d2260965cfc1c9f4ae0e0a645d13..259a643377c242d8409cf4f8a03c423d982c55c4 100644 --- a/drivers/bluetooth/hci_bcm.c +++ b/drivers/bluetooth/hci_bcm.c @@ -1164,7 +1164,12 @@ static int bcm_probe(struct platform_device *pdev) return -ENOMEM; dev->dev = &pdev->dev; - dev->irq = platform_get_irq(pdev, 0); + + ret = platform_get_irq(pdev, 0); + if (ret < 0) + return ret; + + dev->irq = ret; /* Initialize routing field to an unused value */ dev->pcm_int_params[0] = 0xff; diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c index 4184faef9f169b9defdfede96734f9486487a40d..dc7ee5dd2eeca3f15685ea97ba85e8396a3f77c4 100644 --- a/drivers/bluetooth/hci_qca.c +++ b/drivers/bluetooth/hci_qca.c @@ -1844,6 +1844,9 @@ static int qca_power_off(struct hci_dev *hdev) hu->hdev->hw_error = NULL; hu->hdev->cmd_timeout = NULL; + del_timer_sync(&qca->wake_retrans_timer); + del_timer_sync(&qca->tx_idle_timer); + /* Stop sending shutdown command if soc crashes. */ if (soc_type != QCA_ROME && qca->memdump_state == QCA_MEMDUMP_IDLE) { @@ -1987,7 +1990,7 @@ static int qca_serdev_probe(struct serdev_device *serdev) qcadev->bt_en = devm_gpiod_get_optional(&serdev->dev, "enable", GPIOD_OUT_LOW); - if (!qcadev->bt_en) { + if (IS_ERR_OR_NULL(qcadev->bt_en)) { dev_warn(&serdev->dev, "failed to acquire enable gpio\n"); power_ctrl_enabled = false; } diff --git a/drivers/bluetooth/hci_serdev.c b/drivers/bluetooth/hci_serdev.c index 9e03402ef1b378c2f613df7697f1708beba1f9b9..e9a44ab3812df06e1714cb237eb15d4b72f3b5bd 100644 --- a/drivers/bluetooth/hci_serdev.c +++ b/drivers/bluetooth/hci_serdev.c @@ -305,6 +305,8 @@ int hci_uart_register_device(struct hci_uart *hu, if (err) return err; + percpu_init_rwsem(&hu->proto_lock); + err = p->open(hu); if (err) goto err_open; @@ -327,7 +329,6 @@ int hci_uart_register_device(struct hci_uart *hu, INIT_WORK(&hu->init_ready, hci_uart_init_work); INIT_WORK(&hu->write_work, hci_uart_write_work); - percpu_init_rwsem(&hu->proto_lock); /* Only when vendor specific setup callback is provided, consider * the manufacturer information valid. This avoids filling in the diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c index 8ab26dec5f6e8ccdd1a9c796f4c66ebdd13a63c7..8469f9876dd2674f86d4f02705013140c1c1f060 100644 --- a/drivers/bluetooth/hci_vhci.c +++ b/drivers/bluetooth/hci_vhci.c @@ -121,6 +121,8 @@ static int __vhci_create_device(struct vhci_data *data, __u8 opcode) if (opcode & 0x80) set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks); + set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks); + if (hci_register_dev(hdev) < 0) { BT_ERR("Can't register HCI device"); hci_free_dev(hdev); diff --git a/drivers/bus/mips_cdmm.c b/drivers/bus/mips_cdmm.c index 626dedd110cbc2463b67cf48599242740a037e21..fca0d0669aa97e78168b20aa503a60fa9cdf72d1 100644 --- a/drivers/bus/mips_cdmm.c +++ b/drivers/bus/mips_cdmm.c @@ -351,6 +351,7 @@ phys_addr_t __weak mips_cdmm_phys_base(void) np = of_find_compatible_node(NULL, NULL, "mti,mips-cdmm"); if (np) { err = of_address_to_resource(np, 0, &res); + of_node_put(np); if (!err) return res.start; } diff --git a/drivers/bus/simple-pm-bus.c b/drivers/bus/simple-pm-bus.c index 244b8f3b38b402a0aa375853a81903f3c9956bfa..c5eb46cbf388b507608f96ed1b55082d42d54981 100644 --- a/drivers/bus/simple-pm-bus.c +++ b/drivers/bus/simple-pm-bus.c @@ -16,33 +16,7 @@ static int simple_pm_bus_probe(struct platform_device *pdev) { - const struct device *dev = &pdev->dev; - struct device_node *np = dev->of_node; - const struct of_device_id *match; - - /* - * Allow user to use driver_override to bind this driver to a - * transparent bus device which has a different compatible string - * that's not listed in simple_pm_bus_of_match. We don't want to do any - * of the simple-pm-bus tasks for these devices, so return early. - */ - if (pdev->driver_override) - return 0; - - match = of_match_device(dev->driver->of_match_table, dev); - /* - * These are transparent bus devices (not simple-pm-bus matches) that - * have their child nodes populated automatically. So, don't need to - * do anything more. We only match with the device if this driver is - * the most specific match because we don't want to incorrectly bind to - * a device that has a more specific driver. - */ - if (match && match->data) { - if (of_property_match_string(np, "compatible", match->compatible) == 0) - return 0; - else - return -ENODEV; - } + struct device_node *np = pdev->dev.of_node; dev_dbg(&pdev->dev, "%s\n", __func__); @@ -56,25 +30,14 @@ static int simple_pm_bus_probe(struct platform_device *pdev) static int simple_pm_bus_remove(struct platform_device *pdev) { - const void *data = of_device_get_match_data(&pdev->dev); - - if (pdev->driver_override || data) - return 0; - dev_dbg(&pdev->dev, "%s\n", __func__); pm_runtime_disable(&pdev->dev); return 0; } -#define ONLY_BUS ((void *) 1) /* Match if the device is only a bus. */ - static const struct of_device_id simple_pm_bus_of_match[] = { { .compatible = "simple-pm-bus", }, - { .compatible = "simple-bus", .data = ONLY_BUS }, - { .compatible = "simple-mfd", .data = ONLY_BUS }, - { .compatible = "isa", .data = ONLY_BUS }, - { .compatible = "arm,amba-bus", .data = ONLY_BUS }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, simple_pm_bus_of_match); diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig index dda4a9dfad2e8f609170d2d6dd006ab1676030f8..87cebb34aeccdad84d581343250dd4ea57cc2e57 100644 --- a/drivers/char/hw_random/Kconfig +++ b/drivers/char/hw_random/Kconfig @@ -427,7 +427,7 @@ config HW_RANDOM_MESON config HW_RANDOM_CAVIUM tristate "Cavium ThunderX Random Number Generator support" - depends on HW_RANDOM && PCI && (ARM64 || (COMPILE_TEST && 64BIT)) + depends on HW_RANDOM && PCI && ARCH_THUNDER default HW_RANDOM help This driver provides kernel-side support for the Random Number diff --git a/drivers/char/hw_random/atmel-rng.c b/drivers/char/hw_random/atmel-rng.c index ecb71c4317a503a8772fb078cfec2950ce3a39d8..8cf0ef501341eafbdeb6ae31d95bc2327f7a0532 100644 --- a/drivers/char/hw_random/atmel-rng.c +++ b/drivers/char/hw_random/atmel-rng.c @@ -114,6 +114,7 @@ static int atmel_trng_probe(struct platform_device *pdev) err_register: clk_disable_unprepare(trng->clk); + atmel_trng_disable(trng); return ret; } diff --git a/drivers/char/hw_random/cavium-rng-vf.c b/drivers/char/hw_random/cavium-rng-vf.c index 3de4a6a443ef98ac3369228593147afd7cd1b862..6f66919652bf571b25a8c2762de0fe12398aab67 100644 --- a/drivers/char/hw_random/cavium-rng-vf.c +++ b/drivers/char/hw_random/cavium-rng-vf.c @@ -1,10 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0 /* - * Hardware Random Number Generator support for Cavium, Inc. - * Thunder processor family. - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. + * Hardware Random Number Generator support. + * Cavium Thunder, Marvell OcteonTx/Tx2 processor families. * * Copyright (C) 2016 Cavium, Inc. */ @@ -15,16 +12,146 @@ #include #include +#include + +/* PCI device IDs */ +#define PCI_DEVID_CAVIUM_RNG_PF 0xA018 +#define PCI_DEVID_CAVIUM_RNG_VF 0xA033 + +#define HEALTH_STATUS_REG 0x38 + +/* RST device info */ +#define PCI_DEVICE_ID_RST_OTX2 0xA085 +#define RST_BOOT_REG 0x1600ULL +#define CLOCK_BASE_RATE 50000000ULL +#define MSEC_TO_NSEC(x) (x * 1000000) + struct cavium_rng { struct hwrng ops; void __iomem *result; + void __iomem *pf_regbase; + struct pci_dev *pdev; + u64 clock_rate; + u64 prev_error; + u64 prev_time; }; +static inline bool is_octeontx(struct pci_dev *pdev) +{ + if (midr_is_cpu_model_range(read_cpuid_id(), MIDR_THUNDERX_83XX, + MIDR_CPU_VAR_REV(0, 0), + MIDR_CPU_VAR_REV(3, 0)) || + midr_is_cpu_model_range(read_cpuid_id(), MIDR_THUNDERX_81XX, + MIDR_CPU_VAR_REV(0, 0), + MIDR_CPU_VAR_REV(3, 0)) || + midr_is_cpu_model_range(read_cpuid_id(), MIDR_THUNDERX, + MIDR_CPU_VAR_REV(0, 0), + MIDR_CPU_VAR_REV(3, 0))) + return true; + + return false; +} + +static u64 rng_get_coprocessor_clkrate(void) +{ + u64 ret = CLOCK_BASE_RATE * 16; /* Assume 800Mhz as default */ + struct pci_dev *pdev; + void __iomem *base; + + pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, + PCI_DEVICE_ID_RST_OTX2, NULL); + if (!pdev) + goto error; + + base = pci_ioremap_bar(pdev, 0); + if (!base) + goto error_put_pdev; + + /* RST: PNR_MUL * 50Mhz gives clockrate */ + ret = CLOCK_BASE_RATE * ((readq(base + RST_BOOT_REG) >> 33) & 0x3F); + + iounmap(base); + +error_put_pdev: + pci_dev_put(pdev); + +error: + return ret; +} + +static int check_rng_health(struct cavium_rng *rng) +{ + u64 cur_err, cur_time; + u64 status, cycles; + u64 time_elapsed; + + + /* Skip checking health for OcteonTx */ + if (!rng->pf_regbase) + return 0; + + status = readq(rng->pf_regbase + HEALTH_STATUS_REG); + if (status & BIT_ULL(0)) { + dev_err(&rng->pdev->dev, "HWRNG: Startup health test failed\n"); + return -EIO; + } + + cycles = status >> 1; + if (!cycles) + return 0; + + cur_time = arch_timer_read_counter(); + + /* RNM_HEALTH_STATUS[CYCLES_SINCE_HEALTH_FAILURE] + * Number of coprocessor cycles times 2 since the last failure. + * This field doesn't get cleared/updated until another failure. + */ + cycles = cycles / 2; + cur_err = (cycles * 1000000000) / rng->clock_rate; /* In nanosec */ + + /* Ignore errors that happenned a long time ago, these + * are most likely false positive errors. + */ + if (cur_err > MSEC_TO_NSEC(10)) { + rng->prev_error = 0; + rng->prev_time = 0; + return 0; + } + + if (rng->prev_error) { + /* Calculate time elapsed since last error + * '1' tick of CNTVCT is 10ns, since it runs at 100Mhz. + */ + time_elapsed = (cur_time - rng->prev_time) * 10; + time_elapsed += rng->prev_error; + + /* Check if current error is a new one or the old one itself. + * If error is a new one then consider there is a persistent + * issue with entropy, declare hardware failure. + */ + if (cur_err < time_elapsed) { + dev_err(&rng->pdev->dev, "HWRNG failure detected\n"); + rng->prev_error = cur_err; + rng->prev_time = cur_time; + return -EIO; + } + } + + rng->prev_error = cur_err; + rng->prev_time = cur_time; + return 0; +} + /* Read data from the RNG unit */ static int cavium_rng_read(struct hwrng *rng, void *dat, size_t max, bool wait) { struct cavium_rng *p = container_of(rng, struct cavium_rng, ops); unsigned int size = max; + int err = 0; + + err = check_rng_health(p); + if (err) + return err; while (size >= 8) { *((u64 *)dat) = readq(p->result); @@ -39,6 +166,39 @@ static int cavium_rng_read(struct hwrng *rng, void *dat, size_t max, bool wait) return max; } +static int cavium_map_pf_regs(struct cavium_rng *rng) +{ + struct pci_dev *pdev; + + /* Health status is not supported on 83xx, skip mapping PF CSRs */ + if (is_octeontx(rng->pdev)) { + rng->pf_regbase = NULL; + return 0; + } + + pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, + PCI_DEVID_CAVIUM_RNG_PF, NULL); + if (!pdev) { + dev_err(&pdev->dev, "Cannot find RNG PF device\n"); + return -EIO; + } + + rng->pf_regbase = ioremap(pci_resource_start(pdev, 0), + pci_resource_len(pdev, 0)); + if (!rng->pf_regbase) { + dev_err(&pdev->dev, "Failed to map PF CSR region\n"); + pci_dev_put(pdev); + return -ENOMEM; + } + + pci_dev_put(pdev); + + /* Get co-processor clock rate */ + rng->clock_rate = rng_get_coprocessor_clkrate(); + + return 0; +} + /* Map Cavium RNG to an HWRNG object */ static int cavium_rng_probe_vf(struct pci_dev *pdev, const struct pci_device_id *id) @@ -50,6 +210,8 @@ static int cavium_rng_probe_vf(struct pci_dev *pdev, if (!rng) return -ENOMEM; + rng->pdev = pdev; + /* Map the RNG result */ rng->result = pcim_iomap(pdev, 0, 0); if (!rng->result) { @@ -67,6 +229,11 @@ static int cavium_rng_probe_vf(struct pci_dev *pdev, pci_set_drvdata(pdev, rng); + /* Health status is available only at PF, hence map PF registers. */ + ret = cavium_map_pf_regs(rng); + if (ret) + return ret; + ret = devm_hwrng_register(&pdev->dev, &rng->ops); if (ret) { dev_err(&pdev->dev, "Error registering device as HWRNG.\n"); @@ -76,10 +243,18 @@ static int cavium_rng_probe_vf(struct pci_dev *pdev, return 0; } +/* Remove the VF */ +static void cavium_rng_remove_vf(struct pci_dev *pdev) +{ + struct cavium_rng *rng; + + rng = pci_get_drvdata(pdev); + iounmap(rng->pf_regbase); +} static const struct pci_device_id cavium_rng_vf_id_table[] = { - { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, 0xa033), 0, 0, 0}, - {0,}, + { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CAVIUM_RNG_VF) }, + { 0, } }; MODULE_DEVICE_TABLE(pci, cavium_rng_vf_id_table); @@ -87,8 +262,9 @@ static struct pci_driver cavium_rng_vf_driver = { .name = "cavium_rng_vf", .id_table = cavium_rng_vf_id_table, .probe = cavium_rng_probe_vf, + .remove = cavium_rng_remove_vf, }; module_pci_driver(cavium_rng_vf_driver); MODULE_AUTHOR("Omer Khaliq "); -MODULE_LICENSE("GPL"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/char/hw_random/cavium-rng.c b/drivers/char/hw_random/cavium-rng.c index 63d6e68c24d2fc979f54345c2f71ad2e58b36236..b96579222408ba0bd24c4cdf2f063aa1bedf7514 100644 --- a/drivers/char/hw_random/cavium-rng.c +++ b/drivers/char/hw_random/cavium-rng.c @@ -1,10 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0 /* - * Hardware Random Number Generator support for Cavium Inc. - * Thunder processor family. - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. + * Hardware Random Number Generator support. + * Cavium Thunder, Marvell OcteonTx/Tx2 processor families. * * Copyright (C) 2016 Cavium, Inc. */ @@ -91,4 +88,4 @@ static struct pci_driver cavium_rng_pf_driver = { module_pci_driver(cavium_rng_pf_driver); MODULE_AUTHOR("Omer Khaliq "); -MODULE_LICENSE("GPL"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/char/hw_random/nomadik-rng.c b/drivers/char/hw_random/nomadik-rng.c index b0ded41eb865f56483c2fa80dbf7361832680b3c..e8f9621e795410a21fdc6760e8c7b949c67172e7 100644 --- a/drivers/char/hw_random/nomadik-rng.c +++ b/drivers/char/hw_random/nomadik-rng.c @@ -65,15 +65,14 @@ static int nmk_rng_probe(struct amba_device *dev, const struct amba_id *id) out_release: amba_release_regions(dev); out_clk: - clk_disable(rng_clk); + clk_disable_unprepare(rng_clk); return ret; } -static int nmk_rng_remove(struct amba_device *dev) +static void nmk_rng_remove(struct amba_device *dev) { amba_release_regions(dev); - clk_disable(rng_clk); - return 0; + clk_disable_unprepare(rng_clk); } static const struct amba_id nmk_rng_ids[] = { diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c index 92eda5b2f1341ba46053a67c715b6ecc67be7516..883b4a3410122b84a143e6c73d517597663f5d7d 100644 --- a/drivers/char/ipmi/ipmi_watchdog.c +++ b/drivers/char/ipmi/ipmi_watchdog.c @@ -503,7 +503,7 @@ static void panic_halt_ipmi_heartbeat(void) msg.cmd = IPMI_WDOG_RESET_TIMER; msg.data = NULL; msg.data_len = 0; - atomic_add(1, &panic_done_count); + atomic_add(2, &panic_done_count); rv = ipmi_request_supply_msgs(watchdog_user, (struct ipmi_addr *) &addr, 0, @@ -513,7 +513,7 @@ static void panic_halt_ipmi_heartbeat(void) &panic_halt_heartbeat_recv_msg, 1); if (rv) - atomic_sub(1, &panic_done_count); + atomic_sub(2, &panic_done_count); } static struct ipmi_smi_msg panic_halt_smi_msg = { @@ -537,12 +537,12 @@ static void panic_halt_ipmi_set_timeout(void) /* Wait for the messages to be free. */ while (atomic_read(&panic_done_count) != 0) ipmi_poll_interface(watchdog_user); - atomic_add(1, &panic_done_count); + atomic_add(2, &panic_done_count); rv = __ipmi_set_timeout(&panic_halt_smi_msg, &panic_halt_recv_msg, &send_heartbeat_now); if (rv) { - atomic_sub(1, &panic_done_count); + atomic_sub(2, &panic_done_count); pr_warn("Unable to extend the watchdog timeout\n"); } else { if (send_heartbeat_now) diff --git a/drivers/char/mwave/3780i.h b/drivers/char/mwave/3780i.h index 9ccb6b270b071cfbf03b250d1af442f28d75bafe..95164246afd1a313d3fb3a2726c6abcff35c77b0 100644 --- a/drivers/char/mwave/3780i.h +++ b/drivers/char/mwave/3780i.h @@ -68,7 +68,7 @@ typedef struct { unsigned char ClockControl:1; /* RW: Clock control: 0=normal, 1=stop 3780i clocks */ unsigned char SoftReset:1; /* RW: Soft reset 0=normal, 1=soft reset active */ unsigned char ConfigMode:1; /* RW: Configuration mode, 0=normal, 1=config mode */ - unsigned char Reserved:5; /* 0: Reserved */ + unsigned short Reserved:13; /* 0: Reserved */ } DSP_ISA_SLAVE_CONTROL; diff --git a/drivers/char/random.c b/drivers/char/random.c index 4d659c4fc2edd83ddd9c36806cd0059798e80b04..8f29cbc0849e9dd02b85c6ec5875a5e7822446de 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -461,6 +461,7 @@ static struct crng_state primary_crng = { * its value (from 0->1->2). */ static int crng_init = 0; +static bool crng_need_final_init = false; #define crng_ready() (likely(crng_init > 1)) static int crng_init_cnt = 0; static unsigned long crng_global_init_time = 0; @@ -838,6 +839,36 @@ static void __init crng_initialize_primary(struct crng_state *crng) crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1; } +static void crng_finalize_init(struct crng_state *crng) +{ + if (crng != &primary_crng || crng_init >= 2) + return; + if (!system_wq) { + /* We can't call numa_crng_init until we have workqueues, + * so mark this for processing later. */ + crng_need_final_init = true; + return; + } + + invalidate_batched_entropy(); + numa_crng_init(); + crng_init = 2; + process_random_ready_list(); + wake_up_interruptible(&crng_init_wait); + kill_fasync(&fasync, SIGIO, POLL_IN); + pr_notice("crng init done\n"); + if (unseeded_warning.missed) { + pr_notice("%d get_random_xx warning(s) missed due to ratelimiting\n", + unseeded_warning.missed); + unseeded_warning.missed = 0; + } + if (urandom_warning.missed) { + pr_notice("%d urandom warning(s) missed due to ratelimiting\n", + urandom_warning.missed); + urandom_warning.missed = 0; + } +} + #ifdef CONFIG_NUMA static void do_numa_crng_init(struct work_struct *work) { @@ -853,8 +884,8 @@ static void do_numa_crng_init(struct work_struct *work) crng_initialize_secondary(crng); pool[i] = crng; } - mb(); - if (cmpxchg(&crng_node_pool, NULL, pool)) { + /* pairs with READ_ONCE() in select_crng() */ + if (cmpxchg_release(&crng_node_pool, NULL, pool) != NULL) { for_each_node(i) kfree(pool[i]); kfree(pool); @@ -867,18 +898,38 @@ static void numa_crng_init(void) { schedule_work(&numa_crng_init_work); } + +static struct crng_state *select_crng(void) +{ + struct crng_state **pool; + int nid = numa_node_id(); + + /* pairs with cmpxchg_release() in do_numa_crng_init() */ + pool = READ_ONCE(crng_node_pool); + if (pool && pool[nid]) + return pool[nid]; + + return &primary_crng; +} #else static void numa_crng_init(void) {} + +static struct crng_state *select_crng(void) +{ + return &primary_crng; +} #endif /* * crng_fast_load() can be called by code in the interrupt service - * path. So we can't afford to dilly-dally. + * path. So we can't afford to dilly-dally. Returns the number of + * bytes processed from cp. */ -static int crng_fast_load(const char *cp, size_t len) +static size_t crng_fast_load(const char *cp, size_t len) { unsigned long flags; char *p; + size_t ret = 0; if (!spin_trylock_irqsave(&primary_crng.lock, flags)) return 0; @@ -889,7 +940,7 @@ static int crng_fast_load(const char *cp, size_t len) p = (unsigned char *) &primary_crng.state[4]; while (len > 0 && crng_init_cnt < CRNG_INIT_CNT_THRESH) { p[crng_init_cnt % CHACHA_KEY_SIZE] ^= *cp; - cp++; crng_init_cnt++; len--; + cp++; crng_init_cnt++; len--; ret++; } spin_unlock_irqrestore(&primary_crng.lock, flags); if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) { @@ -897,7 +948,7 @@ static int crng_fast_load(const char *cp, size_t len) crng_init = 1; pr_notice("fast init done\n"); } - return 1; + return ret; } /* @@ -972,38 +1023,23 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r) crng->state[i+4] ^= buf.key[i] ^ rv; } memzero_explicit(&buf, sizeof(buf)); - crng->init_time = jiffies; + WRITE_ONCE(crng->init_time, jiffies); spin_unlock_irqrestore(&crng->lock, flags); - if (crng == &primary_crng && crng_init < 2) { - invalidate_batched_entropy(); - numa_crng_init(); - crng_init = 2; - process_random_ready_list(); - wake_up_interruptible(&crng_init_wait); - kill_fasync(&fasync, SIGIO, POLL_IN); - pr_notice("crng init done\n"); - if (unseeded_warning.missed) { - pr_notice("%d get_random_xx warning(s) missed due to ratelimiting\n", - unseeded_warning.missed); - unseeded_warning.missed = 0; - } - if (urandom_warning.missed) { - pr_notice("%d urandom warning(s) missed due to ratelimiting\n", - urandom_warning.missed); - urandom_warning.missed = 0; - } - } + crng_finalize_init(crng); } static void _extract_crng(struct crng_state *crng, __u8 out[CHACHA_BLOCK_SIZE]) { - unsigned long v, flags; - - if (crng_ready() && - (time_after(crng_global_init_time, crng->init_time) || - time_after(jiffies, crng->init_time + CRNG_RESEED_INTERVAL))) - crng_reseed(crng, crng == &primary_crng ? &input_pool : NULL); + unsigned long v, flags, init_time; + + if (crng_ready()) { + init_time = READ_ONCE(crng->init_time); + if (time_after(READ_ONCE(crng_global_init_time), init_time) || + time_after(jiffies, init_time + CRNG_RESEED_INTERVAL)) + crng_reseed(crng, crng == &primary_crng ? + &input_pool : NULL); + } spin_lock_irqsave(&crng->lock, flags); if (arch_get_random_long(&v)) crng->state[14] ^= v; @@ -1015,15 +1051,7 @@ static void _extract_crng(struct crng_state *crng, static void extract_crng(__u8 out[CHACHA_BLOCK_SIZE]) { - struct crng_state *crng = NULL; - -#ifdef CONFIG_NUMA - if (crng_node_pool) - crng = crng_node_pool[numa_node_id()]; - if (crng == NULL) -#endif - crng = &primary_crng; - _extract_crng(crng, out); + _extract_crng(select_crng(), out); } /* @@ -1052,15 +1080,7 @@ static void _crng_backtrack_protect(struct crng_state *crng, static void crng_backtrack_protect(__u8 tmp[CHACHA_BLOCK_SIZE], int used) { - struct crng_state *crng = NULL; - -#ifdef CONFIG_NUMA - if (crng_node_pool) - crng = crng_node_pool[numa_node_id()]; - if (crng == NULL) -#endif - crng = &primary_crng; - _crng_backtrack_protect(crng, tmp, used); + _crng_backtrack_protect(select_crng(), tmp, used); } static ssize_t extract_crng_user(void __user *buf, size_t nbytes) @@ -1281,7 +1301,7 @@ void add_interrupt_randomness(int irq, int irq_flags) if (unlikely(crng_init == 0)) { if ((fast_pool->count >= 64) && crng_fast_load((char *) fast_pool->pool, - sizeof(fast_pool->pool))) { + sizeof(fast_pool->pool)) > 0) { fast_pool->count = 0; fast_pool->last = now; } @@ -1800,6 +1820,8 @@ static void __init init_std_data(struct entropy_store *r) int __init rand_initialize(void) { init_std_data(&input_pool); + if (crng_need_final_init) + crng_finalize_init(&primary_crng); crng_initialize_primary(&primary_crng); crng_global_init_time = jiffies; if (ratelimit_disable) { @@ -1966,7 +1988,10 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg) */ if (!capable(CAP_SYS_ADMIN)) return -EPERM; - input_pool.entropy_count = 0; + if (xchg(&input_pool.entropy_count, 0) && random_write_wakeup_bits) { + wake_up_interruptible(&random_write_wait); + kill_fasync(&fasync, SIGIO, POLL_OUT); + } return 0; case RNDRESEEDCRNG: if (!capable(CAP_SYS_ADMIN)) @@ -1974,7 +1999,7 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg) if (crng_init < 2) return -ENODATA; crng_reseed(&primary_crng, &input_pool); - crng_global_init_time = jiffies - 1; + WRITE_ONCE(crng_global_init_time, jiffies - 1); return 0; default: return -EINVAL; @@ -2300,15 +2325,19 @@ void add_hwgenerator_randomness(const char *buffer, size_t count, struct entropy_store *poolp = &input_pool; if (unlikely(crng_init == 0)) { - crng_fast_load(buffer, count); - return; + size_t ret = crng_fast_load(buffer, count); + count -= ret; + buffer += ret; + if (!count || crng_init == 0) + return; } /* Suspend writing if we're above the trickle threshold. * We'll be woken up again once below random_write_wakeup_thresh, * or when the calling thread is about to terminate. */ - wait_event_interruptible(random_write_wait, kthread_should_stop() || + wait_event_interruptible(random_write_wait, + !system_wq || kthread_should_stop() || ENTROPY_BITS(&input_pool) <= random_write_wakeup_bits); mix_pool_bytes(poolp, buffer, count); credit_entropy_bits(poolp, entropy); diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c index ddaeceb7e109105297f6baf53d74138911246fed..ed600473ad7e3e63d4d27a7e634bb96dc59320fa 100644 --- a/drivers/char/tpm/tpm-chip.c +++ b/drivers/char/tpm/tpm-chip.c @@ -274,14 +274,6 @@ static void tpm_dev_release(struct device *dev) kfree(chip); } -static void tpm_devs_release(struct device *dev) -{ - struct tpm_chip *chip = container_of(dev, struct tpm_chip, devs); - - /* release the master device reference */ - put_device(&chip->dev); -} - /** * tpm_class_shutdown() - prepare the TPM device for loss of power. * @dev: device to which the chip is associated. @@ -344,7 +336,6 @@ struct tpm_chip *tpm_chip_alloc(struct device *pdev, chip->dev_num = rc; device_initialize(&chip->dev); - device_initialize(&chip->devs); chip->dev.class = tpm_class; chip->dev.class->shutdown_pre = tpm_class_shutdown; @@ -352,29 +343,12 @@ struct tpm_chip *tpm_chip_alloc(struct device *pdev, chip->dev.parent = pdev; chip->dev.groups = chip->groups; - chip->devs.parent = pdev; - chip->devs.class = tpmrm_class; - chip->devs.release = tpm_devs_release; - /* get extra reference on main device to hold on - * behalf of devs. This holds the chip structure - * while cdevs is in use. The corresponding put - * is in the tpm_devs_release (TPM2 only) - */ - if (chip->flags & TPM_CHIP_FLAG_TPM2) - get_device(&chip->dev); - if (chip->dev_num == 0) chip->dev.devt = MKDEV(MISC_MAJOR, TPM_MINOR); else chip->dev.devt = MKDEV(MAJOR(tpm_devt), chip->dev_num); - chip->devs.devt = - MKDEV(MAJOR(tpm_devt), chip->dev_num + TPM_NUM_DEVICES); - rc = dev_set_name(&chip->dev, "tpm%d", chip->dev_num); - if (rc) - goto out; - rc = dev_set_name(&chip->devs, "tpmrm%d", chip->dev_num); if (rc) goto out; @@ -382,9 +356,7 @@ struct tpm_chip *tpm_chip_alloc(struct device *pdev, chip->flags |= TPM_CHIP_FLAG_VIRTUAL; cdev_init(&chip->cdev, &tpm_fops); - cdev_init(&chip->cdevs, &tpmrm_fops); chip->cdev.owner = THIS_MODULE; - chip->cdevs.owner = THIS_MODULE; rc = tpm2_init_space(&chip->work_space, TPM2_SPACE_BUFFER_SIZE); if (rc) { @@ -396,7 +368,6 @@ struct tpm_chip *tpm_chip_alloc(struct device *pdev, return chip; out: - put_device(&chip->devs); put_device(&chip->dev); return ERR_PTR(rc); } @@ -445,14 +416,9 @@ static int tpm_add_char_device(struct tpm_chip *chip) } if (chip->flags & TPM_CHIP_FLAG_TPM2) { - rc = cdev_device_add(&chip->cdevs, &chip->devs); - if (rc) { - dev_err(&chip->devs, - "unable to cdev_device_add() %s, major %d, minor %d, err=%d\n", - dev_name(&chip->devs), MAJOR(chip->devs.devt), - MINOR(chip->devs.devt), rc); - return rc; - } + rc = tpm_devs_add(chip); + if (rc) + goto err_del_cdev; } /* Make the chip available. */ @@ -460,6 +426,10 @@ static int tpm_add_char_device(struct tpm_chip *chip) idr_replace(&dev_nums_idr, chip, chip->dev_num); mutex_unlock(&idr_lock); + return 0; + +err_del_cdev: + cdev_device_del(&chip->cdev, &chip->dev); return rc; } @@ -641,7 +611,7 @@ void tpm_chip_unregister(struct tpm_chip *chip) hwrng_unregister(&chip->hwrng); tpm_bios_log_teardown(chip); if (chip->flags & TPM_CHIP_FLAG_TPM2) - cdev_device_del(&chip->cdevs, &chip->devs); + tpm_devs_remove(chip); tpm_del_char_device(chip); } EXPORT_SYMBOL_GPL(tpm_chip_unregister); diff --git a/drivers/char/tpm/tpm-dev-common.c b/drivers/char/tpm/tpm-dev-common.c index 1784530b8387bb46bec8694a49ddb5385833be80..b99e1941c52c98359863ed1bfb9c785ed22ceabe 100644 --- a/drivers/char/tpm/tpm-dev-common.c +++ b/drivers/char/tpm/tpm-dev-common.c @@ -70,7 +70,13 @@ static void tpm_dev_async_work(struct work_struct *work) ret = tpm_dev_transmit(priv->chip, priv->space, priv->data_buffer, sizeof(priv->data_buffer)); tpm_put_ops(priv->chip); - if (ret > 0) { + + /* + * If ret is > 0 then tpm_dev_transmit returned the size of the + * response. If ret is < 0 then tpm_dev_transmit failed and + * returned an error code. + */ + if (ret != 0) { priv->response_length = ret; mod_timer(&priv->user_read_timer, jiffies + (120 * HZ)); } diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h index 283f78211c3a7bc09092085d12834c48571f93f3..2163c6ee0d364f3f8ee935e5abae8f547b4a886f 100644 --- a/drivers/char/tpm/tpm.h +++ b/drivers/char/tpm/tpm.h @@ -234,6 +234,8 @@ int tpm2_prepare_space(struct tpm_chip *chip, struct tpm_space *space, u8 *cmd, size_t cmdsiz); int tpm2_commit_space(struct tpm_chip *chip, struct tpm_space *space, void *buf, size_t *bufsiz); +int tpm_devs_add(struct tpm_chip *chip); +void tpm_devs_remove(struct tpm_chip *chip); void tpm_bios_log_setup(struct tpm_chip *chip); void tpm_bios_log_teardown(struct tpm_chip *chip); diff --git a/drivers/char/tpm/tpm2-space.c b/drivers/char/tpm/tpm2-space.c index 97e916856cf3e25445c94256dbe94569f5e84524..ffb35f0154c16c463082962426dcc7bdaa4c3a38 100644 --- a/drivers/char/tpm/tpm2-space.c +++ b/drivers/char/tpm/tpm2-space.c @@ -58,12 +58,12 @@ int tpm2_init_space(struct tpm_space *space, unsigned int buf_size) void tpm2_del_space(struct tpm_chip *chip, struct tpm_space *space) { - mutex_lock(&chip->tpm_mutex); - if (!tpm_chip_start(chip)) { + + if (tpm_try_get_ops(chip) == 0) { tpm2_flush_sessions(chip, space); - tpm_chip_stop(chip); + tpm_put_ops(chip); } - mutex_unlock(&chip->tpm_mutex); + kfree(space->context_buf); kfree(space->session_buf); } @@ -574,3 +574,68 @@ int tpm2_commit_space(struct tpm_chip *chip, struct tpm_space *space, dev_err(&chip->dev, "%s: error %d\n", __func__, rc); return rc; } + +/* + * Put the reference to the main device. + */ +static void tpm_devs_release(struct device *dev) +{ + struct tpm_chip *chip = container_of(dev, struct tpm_chip, devs); + + /* release the master device reference */ + put_device(&chip->dev); +} + +/* + * Remove the device file for exposed TPM spaces and release the device + * reference. This may also release the reference to the master device. + */ +void tpm_devs_remove(struct tpm_chip *chip) +{ + cdev_device_del(&chip->cdevs, &chip->devs); + put_device(&chip->devs); +} + +/* + * Add a device file to expose TPM spaces. Also take a reference to the + * main device. + */ +int tpm_devs_add(struct tpm_chip *chip) +{ + int rc; + + device_initialize(&chip->devs); + chip->devs.parent = chip->dev.parent; + chip->devs.class = tpmrm_class; + + /* + * Get extra reference on main device to hold on behalf of devs. + * This holds the chip structure while cdevs is in use. The + * corresponding put is in the tpm_devs_release. + */ + get_device(&chip->dev); + chip->devs.release = tpm_devs_release; + chip->devs.devt = MKDEV(MAJOR(tpm_devt), chip->dev_num + TPM_NUM_DEVICES); + cdev_init(&chip->cdevs, &tpmrm_fops); + chip->cdevs.owner = THIS_MODULE; + + rc = dev_set_name(&chip->devs, "tpmrm%d", chip->dev_num); + if (rc) + goto err_put_devs; + + rc = cdev_device_add(&chip->cdevs, &chip->devs); + if (rc) { + dev_err(&chip->devs, + "unable to cdev_device_add() %s, major %d, minor %d, err=%d\n", + dev_name(&chip->devs), MAJOR(chip->devs.devt), + MINOR(chip->devs.devt), rc); + goto err_put_devs; + } + + return 0; + +err_put_devs: + put_device(&chip->devs); + + return rc; +} diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c index b2659a4c401687342f2ce21bfa80ceea3e238842..dc56b976d8162cff62ad6e6d3af5ba60a239606a 100644 --- a/drivers/char/tpm/tpm_tis_core.c +++ b/drivers/char/tpm/tpm_tis_core.c @@ -950,9 +950,11 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq, priv->timeout_max = TPM_TIMEOUT_USECS_MAX; priv->phy_ops = phy_ops; + dev_set_drvdata(&chip->dev, priv); + rc = tpm_tis_read32(priv, TPM_DID_VID(0), &vendor); if (rc < 0) - goto out_err; + return rc; priv->manufacturer_id = vendor; @@ -962,8 +964,6 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq, priv->timeout_max = TIS_TIMEOUT_MAX_ATML; } - dev_set_drvdata(&chip->dev, priv); - if (is_bsw()) { priv->ilb_base_addr = ioremap(INTEL_LEGACY_BLK_BASE_ADDR, ILB_REMAP_SIZE); @@ -994,7 +994,15 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq, intmask |= TPM_INTF_CMD_READY_INT | TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_DATA_AVAIL_INT | TPM_INTF_STS_VALID_INT; intmask &= ~TPM_GLOBAL_INT_ENABLE; + + rc = request_locality(chip, 0); + if (rc < 0) { + rc = -ENODEV; + goto out_err; + } + tpm_tis_write32(priv, TPM_INT_ENABLE(priv->locality), intmask); + release_locality(chip, 0); rc = tpm_chip_start(chip); if (rc) diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index 673522874cec43e6e502b83c9795978671c0ef96..6d361420ffe827f615b3b06ebc68335758b7e292 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c @@ -1959,6 +1959,13 @@ static void virtcons_remove(struct virtio_device *vdev) list_del(&portdev->list); spin_unlock_irq(&pdrvdata_lock); + /* Device is going away, exit any polling for buffers */ + virtio_break_device(vdev); + if (use_multiport(portdev)) + flush_work(&portdev->control_work); + else + flush_work(&portdev->config_work); + /* Disable interrupts for vqs */ vdev->config->reset(vdev); /* Finish up work that's lined up */ @@ -2232,7 +2239,7 @@ static struct virtio_driver virtio_rproc_serial = { .remove = virtcons_remove, }; -static int __init init(void) +static int __init virtio_console_init(void) { int err; @@ -2269,7 +2276,7 @@ static int __init init(void) return err; } -static void __exit fini(void) +static void __exit virtio_console_fini(void) { reclaim_dma_bufs(); @@ -2279,8 +2286,8 @@ static void __exit fini(void) class_destroy(pdrvdata.class); debugfs_remove_recursive(pdrvdata.debugfs_dir); } -module_init(init); -module_exit(fini); +module_init(virtio_console_init); +module_exit(virtio_console_fini); MODULE_DESCRIPTION("Virtio console driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/clk/actions/owl-s700.c b/drivers/clk/actions/owl-s700.c index a2f34d13fb54304357eed076ceb11e31863e76d2..6ea7da1d6d755e25c191fc91d5e2780343c08680 100644 --- a/drivers/clk/actions/owl-s700.c +++ b/drivers/clk/actions/owl-s700.c @@ -162,6 +162,7 @@ static struct clk_div_table hdmia_div_table[] = { static struct clk_div_table rmii_div_table[] = { {0, 4}, {1, 10}, + {0, 0} }; /* divider clocks */ diff --git a/drivers/clk/actions/owl-s900.c b/drivers/clk/actions/owl-s900.c index 790890978424a241c8229c7592c2843b1fdb694d..5144ada2c7e1a46ac2617e0cd1af7f9dcc1c37c7 100644 --- a/drivers/clk/actions/owl-s900.c +++ b/drivers/clk/actions/owl-s900.c @@ -140,7 +140,7 @@ static struct clk_div_table rmii_ref_div_table[] = { static struct clk_div_table usb3_mac_div_table[] = { { 1, 2 }, { 2, 3 }, { 3, 4 }, - { 0, 8 }, + { 0, 0 } }; static struct clk_div_table i2s_div_table[] = { diff --git a/drivers/clk/at91/sama7g5.c b/drivers/clk/at91/sama7g5.c index a092a940baa40bc857ff5f26d3bd1d07c4f06ec8..9d25b23fb99d7f8c706aa4d36e9b18afca2bb3d1 100644 --- a/drivers/clk/at91/sama7g5.c +++ b/drivers/clk/at91/sama7g5.c @@ -606,16 +606,16 @@ static const struct { { .n = "pdmc0_gclk", .id = 68, .r = { .max = 50000000 }, - .pp = { "syspll_divpmcck", "baudpll_divpmcck", }, - .pp_mux_table = { 5, 8, }, + .pp = { "syspll_divpmcck", "audiopll_divpmcck", }, + .pp_mux_table = { 5, 9, }, .pp_count = 2, .pp_chg_id = INT_MIN, }, { .n = "pdmc1_gclk", .id = 69, .r = { .max = 50000000, }, - .pp = { "syspll_divpmcck", "baudpll_divpmcck", }, - .pp_mux_table = { 5, 8, }, + .pp = { "syspll_divpmcck", "audiopll_divpmcck", }, + .pp_mux_table = { 5, 9, }, .pp_count = 2, .pp_chg_id = INT_MIN, }, diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c index 1ac803e14fa3e17bcc8775dcb12aed891e7ea629..178886823b90ca2a8449c3ae00f992607030c1f7 100644 --- a/drivers/clk/bcm/clk-bcm2835.c +++ b/drivers/clk/bcm/clk-bcm2835.c @@ -933,8 +933,7 @@ static int bcm2835_clock_is_on(struct clk_hw *hw) static u32 bcm2835_clock_choose_div(struct clk_hw *hw, unsigned long rate, - unsigned long parent_rate, - bool round_up) + unsigned long parent_rate) { struct bcm2835_clock *clock = bcm2835_clock_from_hw(hw); const struct bcm2835_clock_data *data = clock->data; @@ -946,10 +945,6 @@ static u32 bcm2835_clock_choose_div(struct clk_hw *hw, rem = do_div(temp, rate); div = temp; - - /* Round up and mask off the unused bits */ - if (round_up && ((div & unused_frac_mask) != 0 || rem != 0)) - div += unused_frac_mask + 1; div &= ~unused_frac_mask; /* different clamping limits apply for a mash clock */ @@ -1080,7 +1075,7 @@ static int bcm2835_clock_set_rate(struct clk_hw *hw, struct bcm2835_clock *clock = bcm2835_clock_from_hw(hw); struct bcm2835_cprman *cprman = clock->cprman; const struct bcm2835_clock_data *data = clock->data; - u32 div = bcm2835_clock_choose_div(hw, rate, parent_rate, false); + u32 div = bcm2835_clock_choose_div(hw, rate, parent_rate); u32 ctl; spin_lock(&cprman->regs_lock); @@ -1131,7 +1126,7 @@ static unsigned long bcm2835_clock_choose_div_and_prate(struct clk_hw *hw, if (!(BIT(parent_idx) & data->set_rate_parent)) { *prate = clk_hw_get_rate(parent); - *div = bcm2835_clock_choose_div(hw, rate, *prate, true); + *div = bcm2835_clock_choose_div(hw, rate, *prate); *avgrate = bcm2835_clock_rate_from_divisor(clock, *prate, *div); @@ -1217,7 +1212,7 @@ static int bcm2835_clock_determine_rate(struct clk_hw *hw, rate = bcm2835_clock_choose_div_and_prate(hw, i, req->rate, &div, &prate, &avgrate); - if (rate > best_rate && rate <= req->rate) { + if (abs(req->rate - rate) < abs(req->rate - best_rate)) { best_parent = parent; best_prate = prate; best_rate = rate; diff --git a/drivers/clk/clk-bm1880.c b/drivers/clk/clk-bm1880.c index e6d6599d310a1da8b2011151c81d13efb8d42cb7..fad78a22218e8d7cb12a05bf49ed60bbf2b91072 100644 --- a/drivers/clk/clk-bm1880.c +++ b/drivers/clk/clk-bm1880.c @@ -522,14 +522,6 @@ static struct clk_hw *bm1880_clk_register_pll(struct bm1880_pll_hw_clock *pll_cl return hw; } -static void bm1880_clk_unregister_pll(struct clk_hw *hw) -{ - struct bm1880_pll_hw_clock *pll_hw = to_bm1880_pll_clk(hw); - - clk_hw_unregister(hw); - kfree(pll_hw); -} - static int bm1880_clk_register_plls(struct bm1880_pll_hw_clock *clks, int num_clks, struct bm1880_clock_data *data) @@ -555,7 +547,7 @@ static int bm1880_clk_register_plls(struct bm1880_pll_hw_clock *clks, err_clk: while (i--) - bm1880_clk_unregister_pll(data->hw_data.hws[clks[i].pll.id]); + clk_hw_unregister(data->hw_data.hws[clks[i].pll.id]); return PTR_ERR(hw); } @@ -695,14 +687,6 @@ static struct clk_hw *bm1880_clk_register_div(struct bm1880_div_hw_clock *div_cl return hw; } -static void bm1880_clk_unregister_div(struct clk_hw *hw) -{ - struct bm1880_div_hw_clock *div_hw = to_bm1880_div_clk(hw); - - clk_hw_unregister(hw); - kfree(div_hw); -} - static int bm1880_clk_register_divs(struct bm1880_div_hw_clock *clks, int num_clks, struct bm1880_clock_data *data) @@ -729,7 +713,7 @@ static int bm1880_clk_register_divs(struct bm1880_div_hw_clock *clks, err_clk: while (i--) - bm1880_clk_unregister_div(data->hw_data.hws[clks[i].div.id]); + clk_hw_unregister(data->hw_data.hws[clks[i].div.id]); return PTR_ERR(hw); } diff --git a/drivers/clk/clk-clps711x.c b/drivers/clk/clk-clps711x.c index a2c6486ef1708bb387fd88b33e8bbd1fd04c730d..f8417ee2961aaabc9c46e9007716d83b25a3c7bc 100644 --- a/drivers/clk/clk-clps711x.c +++ b/drivers/clk/clk-clps711x.c @@ -28,11 +28,13 @@ static const struct clk_div_table spi_div_table[] = { { .val = 1, .div = 8, }, { .val = 2, .div = 2, }, { .val = 3, .div = 1, }, + { /* sentinel */ } }; static const struct clk_div_table timer_div_table[] = { { .val = 0, .div = 256, }, { .val = 1, .div = 1, }, + { /* sentinel */ } }; struct clps711x_clk { diff --git a/drivers/clk/clk-si5341.c b/drivers/clk/clk-si5341.c index eb22f4fdbc6b42c2f3384bafdc33ef93c8c8375e..382a0619a04883deab26619e80012fe060c842f1 100644 --- a/drivers/clk/clk-si5341.c +++ b/drivers/clk/clk-si5341.c @@ -789,6 +789,15 @@ static unsigned long si5341_output_clk_recalc_rate(struct clk_hw *hw, u32 r_divider; u8 r[3]; + err = regmap_read(output->data->regmap, + SI5341_OUT_CONFIG(output), &val); + if (err < 0) + return err; + + /* If SI5341_OUT_CFG_RDIV_FORCE2 is set, r_divider is 2 */ + if (val & SI5341_OUT_CFG_RDIV_FORCE2) + return parent_rate / 2; + err = regmap_bulk_read(output->data->regmap, SI5341_OUT_R_REG(output), r, 3); if (err < 0) @@ -805,13 +814,6 @@ static unsigned long si5341_output_clk_recalc_rate(struct clk_hw *hw, r_divider += 1; r_divider <<= 1; - err = regmap_read(output->data->regmap, - SI5341_OUT_CONFIG(output), &val); - if (err < 0) - return err; - - if (val & SI5341_OUT_CFG_RDIV_FORCE2) - r_divider = 2; return parent_rate / r_divider; } @@ -1576,7 +1578,7 @@ static int si5341_probe(struct i2c_client *client, clk_prepare(data->clk[i].hw.clk); } - err = of_clk_add_hw_provider(client->dev.of_node, of_clk_si5341_get, + err = devm_of_clk_add_hw_provider(&client->dev, of_clk_si5341_get, data); if (err) { dev_err(&client->dev, "unable to add clk provider\n"); diff --git a/drivers/clk/clk-stm32f4.c b/drivers/clk/clk-stm32f4.c index 5c75e3d906c209767b5716992b8f6b718cd3d098..682a18b392f0843ae4aeab2b56caaa7a62153add 100644 --- a/drivers/clk/clk-stm32f4.c +++ b/drivers/clk/clk-stm32f4.c @@ -129,7 +129,6 @@ static const struct stm32f4_gate_data stm32f429_gates[] __initconst = { { STM32F4_RCC_APB2ENR, 20, "spi5", "apb2_div" }, { STM32F4_RCC_APB2ENR, 21, "spi6", "apb2_div" }, { STM32F4_RCC_APB2ENR, 22, "sai1", "apb2_div" }, - { STM32F4_RCC_APB2ENR, 26, "ltdc", "apb2_div" }, }; static const struct stm32f4_gate_data stm32f469_gates[] __initconst = { @@ -211,7 +210,6 @@ static const struct stm32f4_gate_data stm32f469_gates[] __initconst = { { STM32F4_RCC_APB2ENR, 20, "spi5", "apb2_div" }, { STM32F4_RCC_APB2ENR, 21, "spi6", "apb2_div" }, { STM32F4_RCC_APB2ENR, 22, "sai1", "apb2_div" }, - { STM32F4_RCC_APB2ENR, 26, "ltdc", "apb2_div" }, }; static const struct stm32f4_gate_data stm32f746_gates[] __initconst = { @@ -286,7 +284,6 @@ static const struct stm32f4_gate_data stm32f746_gates[] __initconst = { { STM32F4_RCC_APB2ENR, 21, "spi6", "apb2_div" }, { STM32F4_RCC_APB2ENR, 22, "sai1", "apb2_div" }, { STM32F4_RCC_APB2ENR, 23, "sai2", "apb2_div" }, - { STM32F4_RCC_APB2ENR, 26, "ltdc", "apb2_div" }, }; static const struct stm32f4_gate_data stm32f769_gates[] __initconst = { @@ -364,7 +361,6 @@ static const struct stm32f4_gate_data stm32f769_gates[] __initconst = { { STM32F4_RCC_APB2ENR, 21, "spi6", "apb2_div" }, { STM32F4_RCC_APB2ENR, 22, "sai1", "apb2_div" }, { STM32F4_RCC_APB2ENR, 23, "sai2", "apb2_div" }, - { STM32F4_RCC_APB2ENR, 26, "ltdc", "apb2_div" }, { STM32F4_RCC_APB2ENR, 30, "mdio", "apb2_div" }, }; diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index 515ef39c4610cf01068df106a3044971aefd73cb..2e56cc0a3bce60d7860071d22d98c48e642ea4cc 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -631,6 +631,24 @@ static void clk_core_get_boundaries(struct clk_core *core, *max_rate = min(*max_rate, clk_user->max_rate); } +static bool clk_core_check_boundaries(struct clk_core *core, + unsigned long min_rate, + unsigned long max_rate) +{ + struct clk *user; + + lockdep_assert_held(&prepare_lock); + + if (min_rate > core->max_rate || max_rate < core->min_rate) + return false; + + hlist_for_each_entry(user, &core->clks, clks_node) + if (min_rate > user->max_rate || max_rate < user->min_rate) + return false; + + return true; +} + void clk_hw_set_rate_range(struct clk_hw *hw, unsigned long min_rate, unsigned long max_rate) { @@ -2332,6 +2350,11 @@ int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max) clk->min_rate = min; clk->max_rate = max; + if (!clk_core_check_boundaries(clk->core, min, max)) { + ret = -EINVAL; + goto out; + } + rate = clk_core_get_rate_nolock(clk->core); if (rate < min || rate > max) { /* @@ -2360,6 +2383,7 @@ int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max) } } +out: if (clk->exclusive_count) clk_core_rate_protect(clk->core); @@ -3314,6 +3338,24 @@ static int __init clk_debug_init(void) { struct clk_core *core; +#ifdef CLOCK_ALLOW_WRITE_DEBUGFS + pr_warn("\n"); + pr_warn("********************************************************************\n"); + pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n"); + pr_warn("** **\n"); + pr_warn("** WRITEABLE clk DebugFS SUPPORT HAS BEEN ENABLED IN THIS KERNEL **\n"); + pr_warn("** **\n"); + pr_warn("** This means that this kernel is built to expose clk operations **\n"); + pr_warn("** such as parent or rate setting, enabling, disabling, etc. **\n"); + pr_warn("** to userspace, which may compromise security on your system. **\n"); + pr_warn("** **\n"); + pr_warn("** If you see this message and you are not debugging the **\n"); + pr_warn("** kernel, report this immediately to your vendor! **\n"); + pr_warn("** **\n"); + pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n"); + pr_warn("********************************************************************\n"); +#endif + rootdir = debugfs_create_dir("clk", NULL); debugfs_create_file("clk_summary", 0444, rootdir, &all_lists, @@ -3366,6 +3408,19 @@ static void clk_core_reparent_orphans_nolock(void) __clk_set_parent_after(orphan, parent, NULL); __clk_recalc_accuracies(orphan); __clk_recalc_rates(orphan, 0); + + /* + * __clk_init_parent() will set the initial req_rate to + * 0 if the clock doesn't have clk_ops::recalc_rate and + * is an orphan when it's registered. + * + * 'req_rate' is used by clk_set_rate_range() and + * clk_put() to trigger a clk_set_rate() call whenever + * the boundaries are modified. Let's make sure + * 'req_rate' is set to something non-zero so that + * clk_set_rate_range() doesn't drop the frequency. + */ + orphan->req_rate = orphan->rate; } } } diff --git a/drivers/clk/imx/clk-imx7d.c b/drivers/clk/imx/clk-imx7d.c index c4e0f1c07192f2d8ccd9c059940c104fe6205291..3f6fd7ef2a68fcca4c38de27afcbfaafa61af218 100644 --- a/drivers/clk/imx/clk-imx7d.c +++ b/drivers/clk/imx/clk-imx7d.c @@ -849,7 +849,6 @@ static void __init imx7d_clocks_init(struct device_node *ccm_node) hws[IMX7D_WDOG4_ROOT_CLK] = imx_clk_hw_gate4("wdog4_root_clk", "wdog_post_div", base + 0x49f0, 0); hws[IMX7D_KPP_ROOT_CLK] = imx_clk_hw_gate4("kpp_root_clk", "ipg_root_clk", base + 0x4aa0, 0); hws[IMX7D_CSI_MCLK_ROOT_CLK] = imx_clk_hw_gate4("csi_mclk_root_clk", "csi_mclk_post_div", base + 0x4490, 0); - hws[IMX7D_AUDIO_MCLK_ROOT_CLK] = imx_clk_hw_gate4("audio_mclk_root_clk", "audio_mclk_post_div", base + 0x4790, 0); hws[IMX7D_WRCLK_ROOT_CLK] = imx_clk_hw_gate4("wrclk_root_clk", "wrclk_post_div", base + 0x47a0, 0); hws[IMX7D_USB_CTRL_CLK] = imx_clk_hw_gate4("usb_ctrl_clk", "ahb_root_clk", base + 0x4680, 0); hws[IMX7D_USB_PHY1_CLK] = imx_clk_hw_gate4("usb_phy1_clk", "pll_usb1_main_clk", base + 0x46a0, 0); diff --git a/drivers/clk/imx/clk-imx8mn.c b/drivers/clk/imx/clk-imx8mn.c index 33a7ddc23cd24e7e643faf5b93b0dfabd98113a9..db122d94db583364a321094774ee4b055342d626 100644 --- a/drivers/clk/imx/clk-imx8mn.c +++ b/drivers/clk/imx/clk-imx8mn.c @@ -274,9 +274,9 @@ static const char * const imx8mn_pdm_sels[] = {"osc_24m", "sys_pll2_100m", "audi static const char * const imx8mn_dram_core_sels[] = {"dram_pll_out", "dram_alt_root", }; -static const char * const imx8mn_clko1_sels[] = {"osc_24m", "sys_pll1_800m", "osc_27m", - "sys_pll1_200m", "audio_pll2_out", "vpu_pll", - "sys_pll1_80m", }; +static const char * const imx8mn_clko1_sels[] = {"osc_24m", "sys_pll1_800m", "dummy", + "sys_pll1_200m", "audio_pll2_out", "sys_pll2_500m", + "dummy", "sys_pll1_80m", }; static const char * const imx8mn_clko2_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll1_400m", "sys_pll2_166m", "sys_pll3_out", "audio_pll1_out", "video_pll1_out", "osc_32k", }; diff --git a/drivers/clk/ingenic/jz4725b-cgu.c b/drivers/clk/ingenic/jz4725b-cgu.c index 8c38e72d14a79919f51569ee226f4b94558f6722..786e361a4a6a456ef090fa770a27704f4ba5c783 100644 --- a/drivers/clk/ingenic/jz4725b-cgu.c +++ b/drivers/clk/ingenic/jz4725b-cgu.c @@ -139,11 +139,10 @@ static const struct ingenic_cgu_clk_info jz4725b_cgu_clocks[] = { }, [JZ4725B_CLK_I2S] = { - "i2s", CGU_CLK_MUX | CGU_CLK_DIV | CGU_CLK_GATE, + "i2s", CGU_CLK_MUX | CGU_CLK_DIV, .parents = { JZ4725B_CLK_EXT, JZ4725B_CLK_PLL_HALF, -1, -1 }, .mux = { CGU_REG_CPCCR, 31, 1 }, .div = { CGU_REG_I2SCDR, 0, 1, 9, -1, -1, -1 }, - .gate = { CGU_REG_CLKGR, 6 }, }, [JZ4725B_CLK_SPI] = { diff --git a/drivers/clk/loongson1/clk-loongson1c.c b/drivers/clk/loongson1/clk-loongson1c.c index 703f87622cf5f7042c216a27b5842ec83e146a99..1ebf740380efbd38f6a912e59e3488c4fc76a2e9 100644 --- a/drivers/clk/loongson1/clk-loongson1c.c +++ b/drivers/clk/loongson1/clk-loongson1c.c @@ -37,6 +37,7 @@ static const struct clk_div_table ahb_div_table[] = { [1] = { .val = 1, .div = 4 }, [2] = { .val = 2, .div = 3 }, [3] = { .val = 3, .div = 3 }, + [4] = { /* sentinel */ } }; void __init ls1x_clk_init(void) diff --git a/drivers/clk/meson/gxbb.c b/drivers/clk/meson/gxbb.c index 0a68af6eec3ddf31e470fe52031410a297d53fb8..d42551a46ec913c73c4d049050e9c039bc0a5106 100644 --- a/drivers/clk/meson/gxbb.c +++ b/drivers/clk/meson/gxbb.c @@ -712,6 +712,35 @@ static struct clk_regmap gxbb_mpll_prediv = { }; static struct clk_regmap gxbb_mpll0_div = { + .data = &(struct meson_clk_mpll_data){ + .sdm = { + .reg_off = HHI_MPLL_CNTL7, + .shift = 0, + .width = 14, + }, + .sdm_en = { + .reg_off = HHI_MPLL_CNTL, + .shift = 25, + .width = 1, + }, + .n2 = { + .reg_off = HHI_MPLL_CNTL7, + .shift = 16, + .width = 9, + }, + .lock = &meson_clk_lock, + }, + .hw.init = &(struct clk_init_data){ + .name = "mpll0_div", + .ops = &meson_clk_mpll_ops, + .parent_hws = (const struct clk_hw *[]) { + &gxbb_mpll_prediv.hw + }, + .num_parents = 1, + }, +}; + +static struct clk_regmap gxl_mpll0_div = { .data = &(struct meson_clk_mpll_data){ .sdm = { .reg_off = HHI_MPLL_CNTL7, @@ -748,7 +777,16 @@ static struct clk_regmap gxbb_mpll0 = { .hw.init = &(struct clk_init_data){ .name = "mpll0", .ops = &clk_regmap_gate_ops, - .parent_hws = (const struct clk_hw *[]) { &gxbb_mpll0_div.hw }, + .parent_data = &(const struct clk_parent_data) { + /* + * Note: + * GXL and GXBB have different SDM_EN registers. We + * fallback to the global naming string mechanism so + * mpll0_div picks up the appropriate one. + */ + .name = "mpll0_div", + .index = -1, + }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, }, @@ -3043,7 +3081,7 @@ static struct clk_hw_onecell_data gxl_hw_onecell_data = { [CLKID_VAPB_1] = &gxbb_vapb_1.hw, [CLKID_VAPB_SEL] = &gxbb_vapb_sel.hw, [CLKID_VAPB] = &gxbb_vapb.hw, - [CLKID_MPLL0_DIV] = &gxbb_mpll0_div.hw, + [CLKID_MPLL0_DIV] = &gxl_mpll0_div.hw, [CLKID_MPLL1_DIV] = &gxbb_mpll1_div.hw, [CLKID_MPLL2_DIV] = &gxbb_mpll2_div.hw, [CLKID_MPLL_PREDIV] = &gxbb_mpll_prediv.hw, @@ -3438,7 +3476,7 @@ static struct clk_regmap *const gxl_clk_regmaps[] = { &gxbb_mpll0, &gxbb_mpll1, &gxbb_mpll2, - &gxbb_mpll0_div, + &gxl_mpll0_div, &gxbb_mpll1_div, &gxbb_mpll2_div, &gxbb_cts_amclk_div, diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c index 59a5a0f261f336098748b02453a4b59a9910e7f1..71a0d30cf44dffc7c44708b32c869197360335ee 100644 --- a/drivers/clk/qcom/clk-rcg2.c +++ b/drivers/clk/qcom/clk-rcg2.c @@ -264,7 +264,7 @@ static int clk_rcg2_determine_floor_rate(struct clk_hw *hw, static int __clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f) { - u32 cfg, mask; + u32 cfg, mask, d_val, not2d_val, n_minus_m; struct clk_hw *hw = &rcg->clkr.hw; int ret, index = qcom_find_src_index(hw, rcg->parent_map, f->src); @@ -283,8 +283,17 @@ static int __clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f) if (ret) return ret; + /* Calculate 2d value */ + d_val = f->n; + + n_minus_m = f->n - f->m; + n_minus_m *= 2; + + d_val = clamp_t(u32, d_val, f->m, n_minus_m); + not2d_val = ~d_val & mask; + ret = regmap_update_bits(rcg->clkr.regmap, - RCG_D_OFFSET(rcg), mask, ~f->n); + RCG_D_OFFSET(rcg), mask, not2d_val); if (ret) return ret; } @@ -639,6 +648,7 @@ static const struct frac_entry frac_table_pixel[] = { { 2, 9 }, { 4, 9 }, { 1, 1 }, + { 2, 3 }, { } }; diff --git a/drivers/clk/qcom/gcc-ipq8074.c b/drivers/clk/qcom/gcc-ipq8074.c index 108fe27bee10f611f7417f01ffd52aec2c3fd4aa..541016db3c4bbae76a6374eebe383bd39f95bb6d 100644 --- a/drivers/clk/qcom/gcc-ipq8074.c +++ b/drivers/clk/qcom/gcc-ipq8074.c @@ -60,11 +60,6 @@ static const struct parent_map gcc_xo_gpll0_gpll0_out_main_div2_map[] = { { P_GPLL0_DIV2, 4 }, }; -static const char * const gcc_xo_gpll0[] = { - "xo", - "gpll0", -}; - static const struct parent_map gcc_xo_gpll0_map[] = { { P_XO, 0 }, { P_GPLL0, 1 }, @@ -956,6 +951,11 @@ static struct clk_rcg2 blsp1_uart6_apps_clk_src = { }, }; +static const struct clk_parent_data gcc_xo_gpll0[] = { + { .fw_name = "xo" }, + { .hw = &gpll0.clkr.hw }, +}; + static const struct freq_tbl ftbl_pcie_axi_clk_src[] = { F(19200000, P_XO, 1, 0, 0), F(200000000, P_GPLL0, 4, 0, 0), @@ -969,7 +969,7 @@ static struct clk_rcg2 pcie0_axi_clk_src = { .parent_map = gcc_xo_gpll0_map, .clkr.hw.init = &(struct clk_init_data){ .name = "pcie0_axi_clk_src", - .parent_names = gcc_xo_gpll0, + .parent_data = gcc_xo_gpll0, .num_parents = 2, .ops = &clk_rcg2_ops, }, @@ -1016,7 +1016,7 @@ static struct clk_rcg2 pcie1_axi_clk_src = { .parent_map = gcc_xo_gpll0_map, .clkr.hw.init = &(struct clk_init_data){ .name = "pcie1_axi_clk_src", - .parent_names = gcc_xo_gpll0, + .parent_data = gcc_xo_gpll0, .num_parents = 2, .ops = &clk_rcg2_ops, }, @@ -1074,7 +1074,7 @@ static struct clk_rcg2 sdcc1_apps_clk_src = { .name = "sdcc1_apps_clk_src", .parent_names = gcc_xo_gpll0_gpll2_gpll0_out_main_div2, .num_parents = 4, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_floor_ops, }, }; @@ -1330,7 +1330,7 @@ static struct clk_rcg2 nss_ce_clk_src = { .parent_map = gcc_xo_gpll0_map, .clkr.hw.init = &(struct clk_init_data){ .name = "nss_ce_clk_src", - .parent_names = gcc_xo_gpll0, + .parent_data = gcc_xo_gpll0, .num_parents = 2, .ops = &clk_rcg2_ops, }, @@ -4329,8 +4329,7 @@ static struct clk_rcg2 pcie0_rchng_clk_src = { .parent_map = gcc_xo_gpll0_map, .clkr.hw.init = &(struct clk_init_data){ .name = "pcie0_rchng_clk_src", - .parent_hws = (const struct clk_hw *[]) { - &gpll0.clkr.hw }, + .parent_data = gcc_xo_gpll0, .num_parents = 2, .ops = &clk_rcg2_ops, }, diff --git a/drivers/clk/qcom/gcc-msm8994.c b/drivers/clk/qcom/gcc-msm8994.c index 144d2ba7a9bef10660884cf4185dc551694260f8..463a444c8a7e4dad1e349a9c658dafd07b947c73 100644 --- a/drivers/clk/qcom/gcc-msm8994.c +++ b/drivers/clk/qcom/gcc-msm8994.c @@ -108,6 +108,7 @@ static struct clk_alpha_pll gpll4_early = { static struct clk_alpha_pll_postdiv gpll4 = { .offset = 0x1dc0, + .width = 4, .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], .clkr.hw.init = &(struct clk_init_data) { diff --git a/drivers/clk/qcom/gdsc.c b/drivers/clk/qcom/gdsc.c index 4ece326ea233e90e51c7dd4e5eecf48b8d6e424d..cf23cfd7e46743703776d12b0d593a05524df31c 100644 --- a/drivers/clk/qcom/gdsc.c +++ b/drivers/clk/qcom/gdsc.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2015, 2017-2018, The Linux Foundation. All rights reserved. + * Copyright (c) 2015, 2017-2018, 2022, The Linux Foundation. All rights reserved. */ #include @@ -34,9 +34,14 @@ #define CFG_GDSCR_OFFSET 0x4 /* Wait 2^n CXO cycles between all states. Here, n=2 (4 cycles). */ -#define EN_REST_WAIT_VAL (0x2 << 20) -#define EN_FEW_WAIT_VAL (0x8 << 16) -#define CLK_DIS_WAIT_VAL (0x2 << 12) +#define EN_REST_WAIT_VAL 0x2 +#define EN_FEW_WAIT_VAL 0x8 +#define CLK_DIS_WAIT_VAL 0x2 + +/* Transition delay shifts */ +#define EN_REST_WAIT_SHIFT 20 +#define EN_FEW_WAIT_SHIFT 16 +#define CLK_DIS_WAIT_SHIFT 12 #define RETAIN_MEM BIT(14) #define RETAIN_PERIPH BIT(13) @@ -341,7 +346,18 @@ static int gdsc_init(struct gdsc *sc) */ mask = HW_CONTROL_MASK | SW_OVERRIDE_MASK | EN_REST_WAIT_MASK | EN_FEW_WAIT_MASK | CLK_DIS_WAIT_MASK; - val = EN_REST_WAIT_VAL | EN_FEW_WAIT_VAL | CLK_DIS_WAIT_VAL; + + if (!sc->en_rest_wait_val) + sc->en_rest_wait_val = EN_REST_WAIT_VAL; + if (!sc->en_few_wait_val) + sc->en_few_wait_val = EN_FEW_WAIT_VAL; + if (!sc->clk_dis_wait_val) + sc->clk_dis_wait_val = CLK_DIS_WAIT_VAL; + + val = sc->en_rest_wait_val << EN_REST_WAIT_SHIFT | + sc->en_few_wait_val << EN_FEW_WAIT_SHIFT | + sc->clk_dis_wait_val << CLK_DIS_WAIT_SHIFT; + ret = regmap_update_bits(sc->regmap, sc->gdscr, mask, val); if (ret) return ret; diff --git a/drivers/clk/qcom/gdsc.h b/drivers/clk/qcom/gdsc.h index 5bb396b344d16f814f23c0ce7f717eac859c7b54..762f1b5e1ec51b1a4d09f23630d1988ed8f7144f 100644 --- a/drivers/clk/qcom/gdsc.h +++ b/drivers/clk/qcom/gdsc.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (c) 2015, 2017-2018, The Linux Foundation. All rights reserved. + * Copyright (c) 2015, 2017-2018, 2022, The Linux Foundation. All rights reserved. */ #ifndef __QCOM_GDSC_H__ @@ -22,6 +22,9 @@ struct reset_controller_dev; * @cxcs: offsets of branch registers to toggle mem/periph bits in * @cxc_count: number of @cxcs * @pwrsts: Possible powerdomain power states + * @en_rest_wait_val: transition delay value for receiving enr ack signal + * @en_few_wait_val: transition delay value for receiving enf ack signal + * @clk_dis_wait_val: transition delay value for halting clock * @resets: ids of resets associated with this gdsc * @reset_count: number of @resets * @rcdev: reset controller @@ -35,6 +38,9 @@ struct gdsc { unsigned int clamp_io_ctrl; unsigned int *cxcs; unsigned int cxc_count; + unsigned int en_rest_wait_val; + unsigned int en_few_wait_val; + unsigned int clk_dis_wait_val; const u8 pwrsts; /* Powerdomain allowable state bitfields */ #define PWRSTS_OFF BIT(0) diff --git a/drivers/clk/tegra/clk-tegra124-emc.c b/drivers/clk/tegra/clk-tegra124-emc.c index 745f9faa98d8ef00e7bcc3483bfdf4584ba525d8..733a962ff521ac53d5f1310abd740955c90d4872 100644 --- a/drivers/clk/tegra/clk-tegra124-emc.c +++ b/drivers/clk/tegra/clk-tegra124-emc.c @@ -191,6 +191,7 @@ static struct tegra_emc *emc_ensure_emc_driver(struct tegra_clk_emc *tegra) tegra->emc = platform_get_drvdata(pdev); if (!tegra->emc) { + put_device(&pdev->dev); pr_err("%s: cannot find EMC driver\n", __func__); return NULL; } diff --git a/drivers/clk/ti/clk.c b/drivers/clk/ti/clk.c index 3da33c786d77ce397db85ec185aa5f77530e321c..29eafab4353ef16c6006f2887838617a1e67ba72 100644 --- a/drivers/clk/ti/clk.c +++ b/drivers/clk/ti/clk.c @@ -131,7 +131,7 @@ int ti_clk_setup_ll_ops(struct ti_clk_ll_ops *ops) void __init ti_dt_clocks_register(struct ti_dt_clk oclks[]) { struct ti_dt_clk *c; - struct device_node *node, *parent; + struct device_node *node, *parent, *child; struct clk *clk; struct of_phandle_args clkspec; char buf[64]; @@ -171,10 +171,13 @@ void __init ti_dt_clocks_register(struct ti_dt_clk oclks[]) node = of_find_node_by_name(NULL, buf); if (num_args && compat_mode) { parent = node; - node = of_get_child_by_name(parent, "clock"); - if (!node) - node = of_get_child_by_name(parent, "clk"); - of_node_put(parent); + child = of_get_child_by_name(parent, "clock"); + if (!child) + child = of_get_child_by_name(parent, "clk"); + if (child) { + of_node_put(parent); + node = child; + } } clkspec.np = node; diff --git a/drivers/clk/uniphier/clk-uniphier-fixed-rate.c b/drivers/clk/uniphier/clk-uniphier-fixed-rate.c index 5319cd3804801f03bf181e0807b965ab4ee0c1fd..3bc55ab75314bf8d87e05490d95ae0c1c0c4c174 100644 --- a/drivers/clk/uniphier/clk-uniphier-fixed-rate.c +++ b/drivers/clk/uniphier/clk-uniphier-fixed-rate.c @@ -24,6 +24,7 @@ struct clk_hw *uniphier_clk_register_fixed_rate(struct device *dev, init.name = name; init.ops = &clk_fixed_rate_ops; + init.flags = 0; init.parent_names = NULL; init.num_parents = 0; diff --git a/drivers/clocksource/acpi_pm.c b/drivers/clocksource/acpi_pm.c index eb596ff9e7bb30908e82f80c04e8046893ce2b8c..279ddff81ab4955a7106dd3504433fa41f546a4e 100644 --- a/drivers/clocksource/acpi_pm.c +++ b/drivers/clocksource/acpi_pm.c @@ -229,8 +229,10 @@ static int __init parse_pmtmr(char *arg) int ret; ret = kstrtouint(arg, 16, &base); - if (ret) - return ret; + if (ret) { + pr_warn("PMTMR: invalid 'pmtmr=' value: '%s'\n", arg); + return 1; + } pr_info("PMTMR IOPort override: 0x%04x -> 0x%04x\n", pmtmr_ioport, base); diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c index fabad79baafce28ea848a1b42538bed66b3d6893..df194b05e944c631415207f19aa166f36fddf691 100644 --- a/drivers/clocksource/exynos_mct.c +++ b/drivers/clocksource/exynos_mct.c @@ -494,11 +494,14 @@ static int exynos4_mct_dying_cpu(unsigned int cpu) return 0; } -static int __init exynos4_timer_resources(struct device_node *np, void __iomem *base) +static int __init exynos4_timer_resources(struct device_node *np) { - int err, cpu; struct clk *mct_clk, *tick_clk; + reg_base = of_iomap(np, 0); + if (!reg_base) + panic("%s: unable to ioremap mct address space\n", __func__); + tick_clk = of_clk_get_by_name(np, "fin_pll"); if (IS_ERR(tick_clk)) panic("%s: unable to determine tick clock rate\n", __func__); @@ -509,9 +512,32 @@ static int __init exynos4_timer_resources(struct device_node *np, void __iomem * panic("%s: unable to retrieve mct clock instance\n", __func__); clk_prepare_enable(mct_clk); - reg_base = base; - if (!reg_base) - panic("%s: unable to ioremap mct address space\n", __func__); + return 0; +} + +static int __init exynos4_timer_interrupts(struct device_node *np, + unsigned int int_type) +{ + int nr_irqs, i, err, cpu; + + mct_int_type = int_type; + + /* This driver uses only one global timer interrupt */ + mct_irqs[MCT_G0_IRQ] = irq_of_parse_and_map(np, MCT_G0_IRQ); + + /* + * Find out the number of local irqs specified. The local + * timer irqs are specified after the four global timer + * irqs are specified. + */ + nr_irqs = of_irq_count(np); + if (nr_irqs > ARRAY_SIZE(mct_irqs)) { + pr_err("exynos-mct: too many (%d) interrupts configured in DT\n", + nr_irqs); + nr_irqs = ARRAY_SIZE(mct_irqs); + } + for (i = MCT_L0_IRQ; i < nr_irqs; i++) + mct_irqs[i] = irq_of_parse_and_map(np, i); if (mct_int_type == MCT_INT_PPI) { @@ -522,11 +548,14 @@ static int __init exynos4_timer_resources(struct device_node *np, void __iomem * mct_irqs[MCT_L0_IRQ], err); } else { for_each_possible_cpu(cpu) { - int mct_irq = mct_irqs[MCT_L0_IRQ + cpu]; + int mct_irq; struct mct_clock_event_device *pcpu_mevt = per_cpu_ptr(&percpu_mct_tick, cpu); pcpu_mevt->evt.irq = -1; + if (MCT_L0_IRQ + cpu >= ARRAY_SIZE(mct_irqs)) + break; + mct_irq = mct_irqs[MCT_L0_IRQ + cpu]; irq_set_status_flags(mct_irq, IRQ_NOAUTOEN); if (request_irq(mct_irq, @@ -571,24 +600,13 @@ static int __init exynos4_timer_resources(struct device_node *np, void __iomem * static int __init mct_init_dt(struct device_node *np, unsigned int int_type) { - u32 nr_irqs, i; int ret; - mct_int_type = int_type; - - /* This driver uses only one global timer interrupt */ - mct_irqs[MCT_G0_IRQ] = irq_of_parse_and_map(np, MCT_G0_IRQ); - - /* - * Find out the number of local irqs specified. The local - * timer irqs are specified after the four global timer - * irqs are specified. - */ - nr_irqs = of_irq_count(np); - for (i = MCT_L0_IRQ; i < nr_irqs; i++) - mct_irqs[i] = irq_of_parse_and_map(np, i); + ret = exynos4_timer_resources(np); + if (ret) + return ret; - ret = exynos4_timer_resources(np, of_iomap(np, 0)); + ret = exynos4_timer_interrupts(np, int_type); if (ret) return ret; diff --git a/drivers/clocksource/timer-microchip-pit64b.c b/drivers/clocksource/timer-microchip-pit64b.c index 59e11ca8ee73e02186335c9707f02b2030b511bc..5c9485cb4e0590bfde56ba72d4c20afdc288fc76 100644 --- a/drivers/clocksource/timer-microchip-pit64b.c +++ b/drivers/clocksource/timer-microchip-pit64b.c @@ -121,7 +121,7 @@ static u64 mchp_pit64b_clksrc_read(struct clocksource *cs) return mchp_pit64b_cnt_read(mchp_pit64b_cs_base); } -static u64 mchp_pit64b_sched_read_clk(void) +static u64 notrace mchp_pit64b_sched_read_clk(void) { return mchp_pit64b_cnt_read(mchp_pit64b_cs_base); } diff --git a/drivers/clocksource/timer-of.c b/drivers/clocksource/timer-of.c index 572da477c6d35c5edc64f16b5d8562cde5311d53..b965f20174e3aadf405ccf4998bd63ba1ec647ae 100644 --- a/drivers/clocksource/timer-of.c +++ b/drivers/clocksource/timer-of.c @@ -157,9 +157,9 @@ static __init int timer_of_base_init(struct device_node *np, of_base->base = of_base->name ? of_io_request_and_map(np, of_base->index, of_base->name) : of_iomap(np, of_base->index); - if (IS_ERR(of_base->base)) { - pr_err("Failed to iomap (%s)\n", of_base->name); - return PTR_ERR(of_base->base); + if (IS_ERR_OR_NULL(of_base->base)) { + pr_err("Failed to iomap (%s:%s)\n", np->name, of_base->name); + return of_base->base ? PTR_ERR(of_base->base) : -ENOMEM; } return 0; diff --git a/drivers/clocksource/timer-ti-dm-systimer.c b/drivers/clocksource/timer-ti-dm-systimer.c index b6f97960d8ee020f908dbd260b6c04c188c0b4d8..2737407ff0698033daa955a41abd65cba914af6f 100644 --- a/drivers/clocksource/timer-ti-dm-systimer.c +++ b/drivers/clocksource/timer-ti-dm-systimer.c @@ -241,8 +241,7 @@ static void __init dmtimer_systimer_assign_alwon(void) bool quirk_unreliable_oscillator = false; /* Quirk unreliable 32 KiHz oscillator with incomplete dts */ - if (of_machine_is_compatible("ti,omap3-beagle") || - of_machine_is_compatible("timll,omap3-devkit8000")) { + if (of_machine_is_compatible("ti,omap3-beagle-ab4")) { quirk_unreliable_oscillator = true; counter_32k = -ENODEV; } @@ -695,9 +694,9 @@ static int __init dmtimer_percpu_quirk_init(struct device_node *np, u32 pa) return 0; } - if (pa == 0x48034000) /* dra7 dmtimer3 */ + if (pa == 0x4882c000) /* dra7 dmtimer15 */ return dmtimer_percpu_timer_init(np, 0); - else if (pa == 0x48036000) /* dra7 dmtimer4 */ + else if (pa == 0x4882e000) /* dra7 dmtimer16 */ return dmtimer_percpu_timer_init(np, 1); return 0; diff --git a/drivers/counter/Kconfig b/drivers/counter/Kconfig index 2de53ab0dd252be4bb89f018b0827d3bf1fd6627..cbdf84200e278695d9da6149655c70ce0f66694a 100644 --- a/drivers/counter/Kconfig +++ b/drivers/counter/Kconfig @@ -41,7 +41,7 @@ config STM32_TIMER_CNT config STM32_LPTIMER_CNT tristate "STM32 LP Timer encoder counter driver" - depends on (MFD_STM32_LPTIMER || COMPILE_TEST) && IIO + depends on MFD_STM32_LPTIMER || COMPILE_TEST help Select this option to enable STM32 Low-Power Timer quadrature encoder and counter driver. diff --git a/drivers/counter/stm32-lptimer-cnt.c b/drivers/counter/stm32-lptimer-cnt.c index fd6828e2d34f532629217c5167d188c6e4241e2f..937439635d53f4a6fcc0fd334ca70caee19a521d 100644 --- a/drivers/counter/stm32-lptimer-cnt.c +++ b/drivers/counter/stm32-lptimer-cnt.c @@ -12,8 +12,8 @@ #include #include -#include #include +#include #include #include #include @@ -107,249 +107,27 @@ static int stm32_lptim_setup(struct stm32_lptim_cnt *priv, int enable) return regmap_update_bits(priv->regmap, STM32_LPTIM_CFGR, mask, val); } -static int stm32_lptim_write_raw(struct iio_dev *indio_dev, - struct iio_chan_spec const *chan, - int val, int val2, long mask) -{ - struct stm32_lptim_cnt *priv = iio_priv(indio_dev); - int ret; - - switch (mask) { - case IIO_CHAN_INFO_ENABLE: - if (val < 0 || val > 1) - return -EINVAL; - - /* Check nobody uses the timer, or already disabled/enabled */ - ret = stm32_lptim_is_enabled(priv); - if ((ret < 0) || (!ret && !val)) - return ret; - if (val && ret) - return -EBUSY; - - ret = stm32_lptim_setup(priv, val); - if (ret) - return ret; - return stm32_lptim_set_enable_state(priv, val); - - default: - return -EINVAL; - } -} - -static int stm32_lptim_read_raw(struct iio_dev *indio_dev, - struct iio_chan_spec const *chan, - int *val, int *val2, long mask) -{ - struct stm32_lptim_cnt *priv = iio_priv(indio_dev); - u32 dat; - int ret; - - switch (mask) { - case IIO_CHAN_INFO_RAW: - ret = regmap_read(priv->regmap, STM32_LPTIM_CNT, &dat); - if (ret) - return ret; - *val = dat; - return IIO_VAL_INT; - - case IIO_CHAN_INFO_ENABLE: - ret = stm32_lptim_is_enabled(priv); - if (ret < 0) - return ret; - *val = ret; - return IIO_VAL_INT; - - case IIO_CHAN_INFO_SCALE: - /* Non-quadrature mode: scale = 1 */ - *val = 1; - *val2 = 0; - if (priv->quadrature_mode) { - /* - * Quadrature encoder mode: - * - both edges, quarter cycle, scale is 0.25 - * - either rising/falling edge scale is 0.5 - */ - if (priv->polarity > 1) - *val2 = 2; - else - *val2 = 1; - } - return IIO_VAL_FRACTIONAL_LOG2; - - default: - return -EINVAL; - } -} - -static const struct iio_info stm32_lptim_cnt_iio_info = { - .read_raw = stm32_lptim_read_raw, - .write_raw = stm32_lptim_write_raw, -}; - -static const char *const stm32_lptim_quadrature_modes[] = { - "non-quadrature", - "quadrature", -}; - -static int stm32_lptim_get_quadrature_mode(struct iio_dev *indio_dev, - const struct iio_chan_spec *chan) -{ - struct stm32_lptim_cnt *priv = iio_priv(indio_dev); - - return priv->quadrature_mode; -} - -static int stm32_lptim_set_quadrature_mode(struct iio_dev *indio_dev, - const struct iio_chan_spec *chan, - unsigned int type) -{ - struct stm32_lptim_cnt *priv = iio_priv(indio_dev); - - if (stm32_lptim_is_enabled(priv)) - return -EBUSY; - - priv->quadrature_mode = type; - - return 0; -} - -static const struct iio_enum stm32_lptim_quadrature_mode_en = { - .items = stm32_lptim_quadrature_modes, - .num_items = ARRAY_SIZE(stm32_lptim_quadrature_modes), - .get = stm32_lptim_get_quadrature_mode, - .set = stm32_lptim_set_quadrature_mode, -}; - -static const char * const stm32_lptim_cnt_polarity[] = { - "rising-edge", "falling-edge", "both-edges", -}; - -static int stm32_lptim_cnt_get_polarity(struct iio_dev *indio_dev, - const struct iio_chan_spec *chan) -{ - struct stm32_lptim_cnt *priv = iio_priv(indio_dev); - - return priv->polarity; -} - -static int stm32_lptim_cnt_set_polarity(struct iio_dev *indio_dev, - const struct iio_chan_spec *chan, - unsigned int type) -{ - struct stm32_lptim_cnt *priv = iio_priv(indio_dev); - - if (stm32_lptim_is_enabled(priv)) - return -EBUSY; - - priv->polarity = type; - - return 0; -} - -static const struct iio_enum stm32_lptim_cnt_polarity_en = { - .items = stm32_lptim_cnt_polarity, - .num_items = ARRAY_SIZE(stm32_lptim_cnt_polarity), - .get = stm32_lptim_cnt_get_polarity, - .set = stm32_lptim_cnt_set_polarity, -}; - -static ssize_t stm32_lptim_cnt_get_ceiling(struct stm32_lptim_cnt *priv, - char *buf) -{ - return snprintf(buf, PAGE_SIZE, "%u\n", priv->ceiling); -} - -static ssize_t stm32_lptim_cnt_set_ceiling(struct stm32_lptim_cnt *priv, - const char *buf, size_t len) -{ - int ret; - - if (stm32_lptim_is_enabled(priv)) - return -EBUSY; - - ret = kstrtouint(buf, 0, &priv->ceiling); - if (ret) - return ret; - - if (priv->ceiling > STM32_LPTIM_MAX_ARR) - return -EINVAL; - - return len; -} - -static ssize_t stm32_lptim_cnt_get_preset_iio(struct iio_dev *indio_dev, - uintptr_t private, - const struct iio_chan_spec *chan, - char *buf) -{ - struct stm32_lptim_cnt *priv = iio_priv(indio_dev); - - return stm32_lptim_cnt_get_ceiling(priv, buf); -} - -static ssize_t stm32_lptim_cnt_set_preset_iio(struct iio_dev *indio_dev, - uintptr_t private, - const struct iio_chan_spec *chan, - const char *buf, size_t len) -{ - struct stm32_lptim_cnt *priv = iio_priv(indio_dev); - - return stm32_lptim_cnt_set_ceiling(priv, buf, len); -} - -/* LP timer with encoder */ -static const struct iio_chan_spec_ext_info stm32_lptim_enc_ext_info[] = { - { - .name = "preset", - .shared = IIO_SEPARATE, - .read = stm32_lptim_cnt_get_preset_iio, - .write = stm32_lptim_cnt_set_preset_iio, - }, - IIO_ENUM("polarity", IIO_SEPARATE, &stm32_lptim_cnt_polarity_en), - IIO_ENUM_AVAILABLE("polarity", &stm32_lptim_cnt_polarity_en), - IIO_ENUM("quadrature_mode", IIO_SEPARATE, - &stm32_lptim_quadrature_mode_en), - IIO_ENUM_AVAILABLE("quadrature_mode", &stm32_lptim_quadrature_mode_en), - {} -}; - -static const struct iio_chan_spec stm32_lptim_enc_channels = { - .type = IIO_COUNT, - .channel = 0, - .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | - BIT(IIO_CHAN_INFO_ENABLE) | - BIT(IIO_CHAN_INFO_SCALE), - .ext_info = stm32_lptim_enc_ext_info, - .indexed = 1, -}; - -/* LP timer without encoder (counter only) */ -static const struct iio_chan_spec_ext_info stm32_lptim_cnt_ext_info[] = { - { - .name = "preset", - .shared = IIO_SEPARATE, - .read = stm32_lptim_cnt_get_preset_iio, - .write = stm32_lptim_cnt_set_preset_iio, - }, - IIO_ENUM("polarity", IIO_SEPARATE, &stm32_lptim_cnt_polarity_en), - IIO_ENUM_AVAILABLE("polarity", &stm32_lptim_cnt_polarity_en), - {} -}; - -static const struct iio_chan_spec stm32_lptim_cnt_channels = { - .type = IIO_COUNT, - .channel = 0, - .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | - BIT(IIO_CHAN_INFO_ENABLE) | - BIT(IIO_CHAN_INFO_SCALE), - .ext_info = stm32_lptim_cnt_ext_info, - .indexed = 1, -}; - /** * enum stm32_lptim_cnt_function - enumerates LPTimer counter & encoder modes * @STM32_LPTIM_COUNTER_INCREASE: up count on IN1 rising, falling or both edges * @STM32_LPTIM_ENCODER_BOTH_EDGE: count on both edges (IN1 & IN2 quadrature) + * + * In non-quadrature mode, device counts up on active edge. + * In quadrature mode, encoder counting scenarios are as follows: + * +---------+----------+--------------------+--------------------+ + * | Active | Level on | IN1 signal | IN2 signal | + * | edge | opposite +----------+---------+----------+---------+ + * | | signal | Rising | Falling | Rising | Falling | + * +---------+----------+----------+---------+----------+---------+ + * | Rising | High -> | Down | - | Up | - | + * | edge | Low -> | Up | - | Down | - | + * +---------+----------+----------+---------+----------+---------+ + * | Falling | High -> | - | Up | - | Down | + * | edge | Low -> | - | Down | - | Up | + * +---------+----------+----------+---------+----------+---------+ + * | Both | High -> | Down | Up | Up | Down | + * | edges | Low -> | Up | Down | Down | Up | + * +---------+----------+----------+---------+----------+---------+ */ enum stm32_lptim_cnt_function { STM32_LPTIM_COUNTER_INCREASE, @@ -484,7 +262,7 @@ static ssize_t stm32_lptim_cnt_ceiling_read(struct counter_device *counter, { struct stm32_lptim_cnt *const priv = counter->priv; - return stm32_lptim_cnt_get_ceiling(priv, buf); + return snprintf(buf, PAGE_SIZE, "%u\n", priv->ceiling); } static ssize_t stm32_lptim_cnt_ceiling_write(struct counter_device *counter, @@ -493,8 +271,22 @@ static ssize_t stm32_lptim_cnt_ceiling_write(struct counter_device *counter, const char *buf, size_t len) { struct stm32_lptim_cnt *const priv = counter->priv; + unsigned int ceiling; + int ret; + + if (stm32_lptim_is_enabled(priv)) + return -EBUSY; + + ret = kstrtouint(buf, 0, &ceiling); + if (ret) + return ret; + + if (ceiling > STM32_LPTIM_MAX_ARR) + return -EINVAL; + + priv->ceiling = ceiling; - return stm32_lptim_cnt_set_ceiling(priv, buf, len); + return len; } static const struct counter_count_ext stm32_lptim_cnt_ext[] = { @@ -630,32 +422,19 @@ static int stm32_lptim_cnt_probe(struct platform_device *pdev) { struct stm32_lptimer *ddata = dev_get_drvdata(pdev->dev.parent); struct stm32_lptim_cnt *priv; - struct iio_dev *indio_dev; - int ret; if (IS_ERR_OR_NULL(ddata)) return -EINVAL; - indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*priv)); - if (!indio_dev) + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) return -ENOMEM; - priv = iio_priv(indio_dev); priv->dev = &pdev->dev; priv->regmap = ddata->regmap; priv->clk = ddata->clk; priv->ceiling = STM32_LPTIM_MAX_ARR; - /* Initialize IIO device */ - indio_dev->name = dev_name(&pdev->dev); - indio_dev->dev.of_node = pdev->dev.of_node; - indio_dev->info = &stm32_lptim_cnt_iio_info; - if (ddata->has_encoder) - indio_dev->channels = &stm32_lptim_enc_channels; - else - indio_dev->channels = &stm32_lptim_cnt_channels; - indio_dev->num_channels = 1; - /* Initialize Counter device */ priv->counter.name = dev_name(&pdev->dev); priv->counter.parent = &pdev->dev; @@ -673,10 +452,6 @@ static int stm32_lptim_cnt_probe(struct platform_device *pdev) platform_set_drvdata(pdev, priv); - ret = devm_iio_device_register(&pdev->dev, indio_dev); - if (ret) - return ret; - return devm_counter_register(&pdev->dev, &priv->counter); } diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index f1b7e3dd6e5daf38c249f15ec2aeb2baa636369c..7c762e105146420872b0f6f43eeeff68f45d90e8 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -109,3 +109,4 @@ obj-$(CONFIG_LOONGSON1_CPUFREQ) += loongson1-cpufreq.o obj-$(CONFIG_SH_CPU_FREQ) += sh-cpufreq.o obj-$(CONFIG_SPARC_US2E_CPUFREQ) += sparc-us2e-cpufreq.o obj-$(CONFIG_SPARC_US3_CPUFREQ) += sparc-us3-cpufreq.o +obj-$(CONFIG_SW64_CPUFREQ) += sw64_cpufreq.o diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 8e159fb6af9cdab10ebfab8f6fa0eb32fa767eb0..30dafe8fc5054eadbdc3a592f89576f18a066718 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -1400,7 +1400,7 @@ static int cpufreq_online(unsigned int cpu) ret = freq_qos_add_request(&policy->constraints, policy->min_freq_req, FREQ_QOS_MIN, - policy->min); + FREQ_QOS_MIN_DEFAULT_VALUE); if (ret < 0) { /* * So we don't call freq_qos_remove_request() for an @@ -1420,7 +1420,7 @@ static int cpufreq_online(unsigned int cpu) ret = freq_qos_add_request(&policy->constraints, policy->max_freq_req, FREQ_QOS_MAX, - policy->max); + FREQ_QOS_MAX_DEFAULT_VALUE); if (ret < 0) { policy->max_freq_req = NULL; goto out_destroy_policy; diff --git a/drivers/cpufreq/qcom-cpufreq-nvmem.c b/drivers/cpufreq/qcom-cpufreq-nvmem.c index fba9937a406b387164a3836c69af2b1ebf2aa0c1..7fdd30e92e42973bfb549500f77a06a4638fe9b4 100644 --- a/drivers/cpufreq/qcom-cpufreq-nvmem.c +++ b/drivers/cpufreq/qcom-cpufreq-nvmem.c @@ -130,7 +130,7 @@ static void get_krait_bin_format_b(struct device *cpu_dev, } /* Check PVS_BLOW_STATUS */ - pte_efuse = *(((u32 *)buf) + 4); + pte_efuse = *(((u32 *)buf) + 1); pte_efuse &= BIT(21); if (pte_efuse) { dev_dbg(cpu_dev, "PVS bin: %d\n", *pvs); diff --git a/drivers/cpufreq/sw64_cpufreq.c b/drivers/cpufreq/sw64_cpufreq.c new file mode 100644 index 0000000000000000000000000000000000000000..5f49b5175d34f634d13e71c18c87a6e142186359 --- /dev/null +++ b/drivers/cpufreq/sw64_cpufreq.c @@ -0,0 +1,186 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * linux/arch/sw/kernel/setup.c + * + * Copyright (C) 1995 Linus Torvalds + */ + +/* + * Cpufreq driver for the sw64 processors + * + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include /* set_cpus_allowed() */ +#include +#include +#include + +#include +#include + +static uint nowait; + +static struct clk *cpuclk; + +static int sw64_cpu_freq_notifier(struct notifier_block *nb, + unsigned long val, void *data); + +static struct notifier_block sw64_cpufreq_notifier_block = { + .notifier_call = sw64_cpu_freq_notifier +}; + +static int sw64_cpu_freq_notifier(struct notifier_block *nb, + unsigned long val, void *data) +{ + struct cpufreq_freqs *freqs = (struct cpufreq_freqs *)data; + unsigned long cpu; + + for_each_online_cpu(cpu) { + if (val == CPUFREQ_POSTCHANGE) { + sw64_update_clockevents(cpu, freqs->new * 1000); + current_cpu_data.loops_per_jiffy = loops_per_jiffy; + } + } + + return 0; +} + +static unsigned int sw64_cpufreq_get(unsigned int cpu) +{ + struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu); + + if (!policy || IS_ERR(policy->clk)) { + pr_err("%s: No %s associated to cpu: %d\n", + __func__, policy ? "clk" : "policy", cpu); + return 0; + } + + return sw64_clk_get_rate(policy->clk); +} + +/* + * Here we notify other drivers of the proposed change and the final change. + */ +static int sw64_cpufreq_target(struct cpufreq_policy *policy, + unsigned int index) +{ + unsigned long freq; + + freq = (get_cpu_freq() / 1000) * index / 48; + + sw64_store_policy(policy); + + /* setting the cpu frequency */ + sw64_set_rate(-1, freq * 1000); + + return 0; +} + +static int sw64_cpufreq_cpu_init(struct cpufreq_policy *policy) +{ + unsigned long rate; + int i; + + cpuclk = sw64_clk_get(NULL, "cpu_clk"); + if (IS_ERR(cpuclk)) { + pr_err("couldn't get CPU clk\n"); + return PTR_ERR(cpuclk); + } + + rate = get_cpu_freq() / 1000; + + /* clock table init */ + for (i = 0; + (sw64_clockmod_table[i].frequency != CPUFREQ_TABLE_END); + i++) + if (sw64_clockmod_table[i].frequency == 0) + sw64_clockmod_table[i].frequency = (rate * i) / 48; + + sw64_set_rate(-1, rate * 1000); + + policy->clk = cpuclk; + + cpufreq_generic_init(policy, &sw64_clockmod_table[0], 0); + + return 0; +} + +static int sw64_cpufreq_verify(struct cpufreq_policy_data *policy) +{ + return cpufreq_frequency_table_verify(policy, &sw64_clockmod_table[0]); +} + +static int sw64_cpufreq_exit(struct cpufreq_policy *policy) +{ + return 0; +} + +static struct freq_attr *sw64_table_attr[] = { + &cpufreq_freq_attr_scaling_available_freqs, NULL, +}; + +static struct cpufreq_driver sw64_cpufreq_driver = { + .name = "sw64", + .init = sw64_cpufreq_cpu_init, + .verify = sw64_cpufreq_verify, + .target_index = sw64_cpufreq_target, + .get = sw64_cpufreq_get, + .exit = sw64_cpufreq_exit, + .attr = sw64_table_attr, +}; + +static const struct platform_device_id platform_device_ids[] = { + { + .name = "sw64_cpufreq", + }, + {} +}; + +MODULE_DEVICE_TABLE(platform, platform_device_ids); + +static struct platform_driver platform_driver = { + .driver = { + .name = "sw64_cpufreq", + }, + .id_table = platform_device_ids, +}; + + +static int __init cpufreq_init(void) +{ + int ret; + + /* Register platform stuff */ + ret = platform_driver_register(&platform_driver); + if (ret) + return ret; + + pr_info("SW-64 CPU frequency driver\n"); + + cpufreq_register_notifier(&sw64_cpufreq_notifier_block, + CPUFREQ_TRANSITION_NOTIFIER); + + return cpufreq_register_driver(&sw64_cpufreq_driver); +} + +static void __exit cpufreq_exit(void) +{ + cpufreq_unregister_driver(&sw64_cpufreq_driver); + cpufreq_unregister_notifier(&sw64_cpufreq_notifier_block, + CPUFREQ_TRANSITION_NOTIFIER); + + platform_driver_unregister(&platform_driver); +} + +module_init(cpufreq_init); +module_exit(cpufreq_exit); + +module_param(nowait, uint, 0644); +MODULE_PARM_DESC(nowait, "Disable SW-64 specific wait"); + +MODULE_DESCRIPTION("cpufreq driver for sw64"); +MODULE_LICENSE("GPL"); diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c index 33707a2e55ff0c2dd0e2f16c1487c708038db867..64133d4da3d566b88fdb905e5ecc3c39cbe74a98 100644 --- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c @@ -11,6 +11,7 @@ * You could find a link for the datasheet in Documentation/arm/sunxi.rst */ +#include #include #include #include @@ -280,7 +281,9 @@ static int sun8i_ce_cipher_run(struct crypto_engine *engine, void *areq) flow = rctx->flow; err = sun8i_ce_run_task(ce, flow, crypto_tfm_alg_name(breq->base.tfm)); + local_bh_disable(); crypto_finalize_skcipher_request(engine, breq, err); + local_bh_enable(); return 0; } diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c index 2f09a37306e28412c81c3f4a7576526c5312af10..7f16b9406a41fc38f3f824fa98e478a6f1b8c538 100644 --- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c @@ -9,6 +9,7 @@ * * You could find the datasheet in Documentation/arm/sunxi.rst */ +#include #include #include #include @@ -413,6 +414,8 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq) theend: kfree(buf); kfree(result); + local_bh_disable(); crypto_finalize_hash_request(engine, breq, err); + local_bh_enable(); return 0; } diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c index 7c355bc2fb0664172bd3c1f499bafd55a82234d1..f783748462f94781f5553dff988afd2b0f51d877 100644 --- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c @@ -11,6 +11,7 @@ * You could find a link for the datasheet in Documentation/arm/sunxi.rst */ +#include #include #include #include @@ -271,7 +272,9 @@ static int sun8i_ss_handle_cipher_request(struct crypto_engine *engine, void *ar struct skcipher_request *breq = container_of(areq, struct skcipher_request, base); err = sun8i_ss_cipher(breq); + local_bh_disable(); crypto_finalize_skcipher_request(engine, breq, err); + local_bh_enable(); return 0; } diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c index 80e89066dbd1ae60f3a646a8a69481267b1d5074..319fe3279a7162e5bc5811822878fe87a9543129 100644 --- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c @@ -30,6 +30,8 @@ static const struct ss_variant ss_a80_variant = { .alg_cipher = { SS_ALG_AES, SS_ALG_DES, SS_ALG_3DES, }, + .alg_hash = { SS_ID_NOTSUPP, SS_ID_NOTSUPP, SS_ID_NOTSUPP, SS_ID_NOTSUPP, + }, .op_mode = { SS_OP_ECB, SS_OP_CBC, }, .ss_clks = { diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c index 64446b86c927f15dbc21e8dbb7cc52929a1f3a23..7b1d00fbbeb0f3aa56874fdfdf0c5734f004b43c 100644 --- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c @@ -9,6 +9,7 @@ * * You could find the datasheet in Documentation/arm/sunxi.rst */ +#include #include #include #include @@ -441,6 +442,8 @@ int sun8i_ss_hash_run(struct crypto_engine *engine, void *breq) theend: kfree(pad); kfree(result); + local_bh_disable(); crypto_finalize_hash_request(engine, breq, err); + local_bh_enable(); return 0; } diff --git a/drivers/crypto/amlogic/amlogic-gxl-cipher.c b/drivers/crypto/amlogic/amlogic-gxl-cipher.c index 8b5e07316352c3df6ac52ea27bebd11c0897fe72..652e72d030bb09a51f2d772e6817283b489b2ad8 100644 --- a/drivers/crypto/amlogic/amlogic-gxl-cipher.c +++ b/drivers/crypto/amlogic/amlogic-gxl-cipher.c @@ -265,7 +265,9 @@ static int meson_handle_cipher_request(struct crypto_engine *engine, struct skcipher_request *breq = container_of(areq, struct skcipher_request, base); err = meson_cipher(breq); + local_bh_disable(); crypto_finalize_skcipher_request(engine, breq, err); + local_bh_enable(); return 0; } diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c index a780e627838ae9a3aa41835a32d353c4d4ea00ff..5a40c7d10cc9a6912e3dba2104c0d6990dc78111 100644 --- a/drivers/crypto/caam/caamalg_qi2.c +++ b/drivers/crypto/caam/caamalg_qi2.c @@ -5467,7 +5467,7 @@ int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req) dpaa2_fd_set_len(&fd, dpaa2_fl_get_len(&req->fd_flt[1])); dpaa2_fd_set_flc(&fd, req->flc_dma); - ppriv = this_cpu_ptr(priv->ppriv); + ppriv = raw_cpu_ptr(priv->ppriv); for (i = 0; i < (priv->dpseci_attr.num_tx_queues << 1); i++) { err = dpaa2_io_service_enqueue_fq(ppriv->dpio, ppriv->req_fqid, &fd); diff --git a/drivers/crypto/ccp/ccp-dmaengine.c b/drivers/crypto/ccp/ccp-dmaengine.c index 0770a83bf1a570c1b0392ab82f76e323ea4636b5..b3eea329f840fbc974aed0b93c2412c11efd9895 100644 --- a/drivers/crypto/ccp/ccp-dmaengine.c +++ b/drivers/crypto/ccp/ccp-dmaengine.c @@ -633,6 +633,20 @@ static int ccp_terminate_all(struct dma_chan *dma_chan) return 0; } +static void ccp_dma_release(struct ccp_device *ccp) +{ + struct ccp_dma_chan *chan; + struct dma_chan *dma_chan; + unsigned int i; + + for (i = 0; i < ccp->cmd_q_count; i++) { + chan = ccp->ccp_dma_chan + i; + dma_chan = &chan->dma_chan; + tasklet_kill(&chan->cleanup_tasklet); + list_del_rcu(&dma_chan->device_node); + } +} + int ccp_dmaengine_register(struct ccp_device *ccp) { struct ccp_dma_chan *chan; @@ -737,6 +751,7 @@ int ccp_dmaengine_register(struct ccp_device *ccp) return 0; err_reg: + ccp_dma_release(ccp); kmem_cache_destroy(ccp->dma_desc_cache); err_cache: @@ -753,6 +768,7 @@ void ccp_dmaengine_unregister(struct ccp_device *ccp) return; dma_async_device_unregister(dma_dev); + ccp_dma_release(ccp); kmem_cache_destroy(ccp->dma_desc_cache); kmem_cache_destroy(ccp->dma_cmd_cache); diff --git a/drivers/crypto/ccree/cc_buffer_mgr.c b/drivers/crypto/ccree/cc_buffer_mgr.c index a5e041d9d2cf132516aa43db7bb3d13ec04f703a..11e0278c8631d2e0b2ea5f0e406b02074218a658 100644 --- a/drivers/crypto/ccree/cc_buffer_mgr.c +++ b/drivers/crypto/ccree/cc_buffer_mgr.c @@ -258,6 +258,13 @@ static int cc_map_sg(struct device *dev, struct scatterlist *sg, { int ret = 0; + if (!nbytes) { + *mapped_nents = 0; + *lbytes = 0; + *nents = 0; + return 0; + } + *nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes); if (*nents > max_sg_nents) { *nents = 0; diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c index dafa6577a8451f2c175f44070a10be4760b34b92..c289e4d5cbdc0517a9fba64ca05c0015400cefae 100644 --- a/drivers/crypto/ccree/cc_cipher.c +++ b/drivers/crypto/ccree/cc_cipher.c @@ -254,8 +254,8 @@ static void cc_cipher_exit(struct crypto_tfm *tfm) &ctx_p->user.key_dma_addr); /* Free key buffer in context */ - kfree_sensitive(ctx_p->user.key); dev_dbg(dev, "Free key buffer in context. key=@%p\n", ctx_p->user.key); + kfree_sensitive(ctx_p->user.key); } struct tdes_keys { diff --git a/drivers/crypto/hisilicon/Kconfig b/drivers/crypto/hisilicon/Kconfig index ae0bd02f5c40ebac9f17cb61eb35abb8d6cc76a7..f9f749e96aae266179a048323d00865752c72b34 100644 --- a/drivers/crypto/hisilicon/Kconfig +++ b/drivers/crypto/hisilicon/Kconfig @@ -26,6 +26,7 @@ config CRYPTO_DEV_HISI_SEC2 select CRYPTO_SHA1 select CRYPTO_SHA256 select CRYPTO_SHA512 + select CRYPTO_SM4 depends on PCI && PCI_MSI depends on UACCE || UACCE=n depends on ARM64 || (COMPILE_TEST && 64BIT) diff --git a/drivers/crypto/hisilicon/hpre/hpre.h b/drivers/crypto/hisilicon/hpre/hpre.h index e0b4a1982ee9ef709db645b5e755a2edaaa1605b..9a0558ed82f904c419850fbade118133c81cd9cd 100644 --- a/drivers/crypto/hisilicon/hpre/hpre.h +++ b/drivers/crypto/hisilicon/hpre/hpre.h @@ -4,7 +4,7 @@ #define __HISI_HPRE_H #include -#include "../qm.h" +#include #define HPRE_SQE_SIZE sizeof(struct hpre_sqe) #define HPRE_PF_DEF_Q_NUM 64 diff --git a/drivers/crypto/hisilicon/hpre/hpre_crypto.c b/drivers/crypto/hisilicon/hpre/hpre_crypto.c index a032c192ef1d6f0b149c52c13308b4fdc0a750a5..97d54c1465c2b05c629133b76117ec6a1594521c 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_crypto.c +++ b/drivers/crypto/hisilicon/hpre/hpre_crypto.c @@ -1177,13 +1177,10 @@ static void hpre_rsa_exit_tfm(struct crypto_akcipher *tfm) static void hpre_key_to_big_end(u8 *data, int len) { int i, j; - u8 tmp; for (i = 0; i < len / 2; i++) { j = len - i - 1; - tmp = data[j]; - data[j] = data[i]; - data[i] = tmp; + swap(data[j], data[i]); } } @@ -1865,7 +1862,7 @@ static int hpre_curve25519_src_init(struct hpre_asym_request *hpre_req, */ if (memcmp(ptr, p, ctx->key_sz) == 0) { dev_err(dev, "gx is p!\n"); - return -EINVAL; + goto err; } else if (memcmp(ptr, p, ctx->key_sz) > 0) { hpre_curve25519_src_modulo_p(ptr); } diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c index 65a641396c07fe88043c707a49066f1584084842..8200793aa15c290c2ba360aef0e6e3f1cd7f3c26 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_main.c +++ b/drivers/crypto/hisilicon/hpre/hpre_main.c @@ -36,6 +36,12 @@ #define HPRE_DATA_WUSER_CFG 0x301040 #define HPRE_INT_MASK 0x301400 #define HPRE_INT_STATUS 0x301800 +#define HPRE_HAC_INT_MSK 0x301400 +#define HPRE_HAC_RAS_CE_ENB 0x301410 +#define HPRE_HAC_RAS_NFE_ENB 0x301414 +#define HPRE_HAC_RAS_FE_ENB 0x301418 +#define HPRE_HAC_INT_SET 0x301500 +#define HPRE_RNG_TIMEOUT_NUM 0x301A34 #define HPRE_CORE_INT_ENABLE 0 #define HPRE_CORE_INT_DISABLE GENMASK(21, 0) #define HPRE_RDCHN_INI_ST 0x301a00 @@ -68,8 +74,7 @@ #define HPRE_REG_RD_INTVRL_US 10 #define HPRE_REG_RD_TMOUT_US 1000 #define HPRE_DBGFS_VAL_MAX_LEN 20 -#define HPRE_PCI_DEVICE_ID 0xa258 -#define HPRE_PCI_VF_DEVICE_ID 0xa259 +#define PCI_DEVICE_ID_HUAWEI_HPRE_PF 0xa258 #define HPRE_QM_USR_CFG_MASK GENMASK(31, 1) #define HPRE_QM_AXI_CFG_MASK GENMASK(15, 0) #define HPRE_QM_VFG_AX_MASK GENMASK(7, 0) @@ -103,16 +108,25 @@ #define HPRE_QM_PM_FLR BIT(11) #define HPRE_QM_SRIOV_FLR BIT(12) -#define HPRE_SHAPER_TYPE_RATE 128 +#define HPRE_SHAPER_TYPE_RATE 640 #define HPRE_VIA_MSI_DSM 1 #define HPRE_SQE_MASK_OFFSET 8 #define HPRE_SQE_MASK_LEN 24 +#define HPRE_DFX_BASE 0x301000 +#define HPRE_DFX_COMMON1 0x301400 +#define HPRE_DFX_COMMON2 0x301A00 +#define HPRE_DFX_CORE 0x302000 +#define HPRE_DFX_BASE_LEN 0x55 +#define HPRE_DFX_COMMON1_LEN 0x41 +#define HPRE_DFX_COMMON2_LEN 0xE +#define HPRE_DFX_CORE_LEN 0x43 + static const char hpre_name[] = "hisi_hpre"; static struct dentry *hpre_debugfs_root; static const struct pci_device_id hpre_dev_ids[] = { - { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HPRE_PCI_DEVICE_ID) }, - { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HPRE_PCI_VF_DEVICE_ID) }, + { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HUAWEI_HPRE_PF) }, + { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HUAWEI_HPRE_VF) }, { 0, } }; @@ -193,28 +207,32 @@ static const u64 hpre_cluster_offsets[] = { }; static const struct debugfs_reg32 hpre_cluster_dfx_regs[] = { - {"CORES_EN_STATUS ", HPRE_CORE_EN_OFFSET}, - {"CORES_INI_CFG ", HPRE_CORE_INI_CFG_OFFSET}, - {"CORES_INI_STATUS ", HPRE_CORE_INI_STATUS_OFFSET}, - {"CORES_HTBT_WARN ", HPRE_CORE_HTBT_WARN_OFFSET}, - {"CORES_IS_SCHD ", HPRE_CORE_IS_SCHD_OFFSET}, + {"CORES_EN_STATUS ", HPRE_CORE_EN_OFFSET}, + {"CORES_INI_CFG ", HPRE_CORE_INI_CFG_OFFSET}, + {"CORES_INI_STATUS ", HPRE_CORE_INI_STATUS_OFFSET}, + {"CORES_HTBT_WARN ", HPRE_CORE_HTBT_WARN_OFFSET}, + {"CORES_IS_SCHD ", HPRE_CORE_IS_SCHD_OFFSET}, }; static const struct debugfs_reg32 hpre_com_dfx_regs[] = { - {"READ_CLR_EN ", HPRE_CTRL_CNT_CLR_CE}, - {"AXQOS ", HPRE_VFG_AXQOS}, - {"AWUSR_CFG ", HPRE_AWUSR_FP_CFG}, - {"QM_ARUSR_MCFG1 ", QM_ARUSER_M_CFG_1}, - {"QM_AWUSR_MCFG1 ", QM_AWUSER_M_CFG_1}, - {"BD_ENDIAN ", HPRE_BD_ENDIAN}, - {"ECC_CHECK_CTRL ", HPRE_ECC_BYPASS}, - {"RAS_INT_WIDTH ", HPRE_RAS_WIDTH_CFG}, - {"POISON_BYPASS ", HPRE_POISON_BYPASS}, - {"BD_ARUSER ", HPRE_BD_ARUSR_CFG}, - {"BD_AWUSER ", HPRE_BD_AWUSR_CFG}, - {"DATA_ARUSER ", HPRE_DATA_RUSER_CFG}, - {"DATA_AWUSER ", HPRE_DATA_WUSER_CFG}, - {"INT_STATUS ", HPRE_INT_STATUS}, + {"READ_CLR_EN ", HPRE_CTRL_CNT_CLR_CE}, + {"AXQOS ", HPRE_VFG_AXQOS}, + {"AWUSR_CFG ", HPRE_AWUSR_FP_CFG}, + {"BD_ENDIAN ", HPRE_BD_ENDIAN}, + {"ECC_CHECK_CTRL ", HPRE_ECC_BYPASS}, + {"RAS_INT_WIDTH ", HPRE_RAS_WIDTH_CFG}, + {"POISON_BYPASS ", HPRE_POISON_BYPASS}, + {"BD_ARUSER ", HPRE_BD_ARUSR_CFG}, + {"BD_AWUSER ", HPRE_BD_AWUSR_CFG}, + {"DATA_ARUSER ", HPRE_DATA_RUSER_CFG}, + {"DATA_AWUSER ", HPRE_DATA_WUSER_CFG}, + {"INT_STATUS ", HPRE_INT_STATUS}, + {"INT_MASK ", HPRE_HAC_INT_MSK}, + {"RAS_CE_ENB ", HPRE_HAC_RAS_CE_ENB}, + {"RAS_NFE_ENB ", HPRE_HAC_RAS_NFE_ENB}, + {"RAS_FE_ENB ", HPRE_HAC_RAS_FE_ENB}, + {"INT_SET ", HPRE_HAC_INT_SET}, + {"RNG_TIMEOUT_NUM ", HPRE_RNG_TIMEOUT_NUM}, }; static const char *hpre_dfx_files[HPRE_DFX_FILE_NUM] = { @@ -227,6 +245,53 @@ static const char *hpre_dfx_files[HPRE_DFX_FILE_NUM] = { "invalid_req_cnt" }; +/* define the HPRE's dfx regs region and region length */ +static struct dfx_diff_registers hpre_diff_regs[] = { + { + .reg_offset = HPRE_DFX_BASE, + .reg_len = HPRE_DFX_BASE_LEN, + }, { + .reg_offset = HPRE_DFX_COMMON1, + .reg_len = HPRE_DFX_COMMON1_LEN, + }, { + .reg_offset = HPRE_DFX_COMMON2, + .reg_len = HPRE_DFX_COMMON2_LEN, + }, { + .reg_offset = HPRE_DFX_CORE, + .reg_len = HPRE_DFX_CORE_LEN, + }, +}; + +static int hpre_diff_regs_show(struct seq_file *s, void *unused) +{ + struct hisi_qm *qm = s->private; + + hisi_qm_acc_diff_regs_dump(qm, s, qm->debug.acc_diff_regs, + ARRAY_SIZE(hpre_diff_regs)); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(hpre_diff_regs); + +static int hpre_com_regs_show(struct seq_file *s, void *unused) +{ + hisi_qm_regs_dump(s, s->private); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(hpre_com_regs); + +static int hpre_cluster_regs_show(struct seq_file *s, void *unused) +{ + hisi_qm_regs_dump(s, s->private); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(hpre_cluster_regs); + static const struct kernel_param_ops hpre_uacce_mode_ops = { .set = uacce_mode_set, .get = param_get_int, @@ -242,7 +307,7 @@ MODULE_PARM_DESC(uacce_mode, UACCE_MODE_DESC); static int pf_q_num_set(const char *val, const struct kernel_param *kp) { - return q_num_set(val, kp, HPRE_PCI_DEVICE_ID); + return q_num_set(val, kp, PCI_DEVICE_ID_HUAWEI_HPRE_PF); } static const struct kernel_param_ops hpre_pf_q_num_ops = { @@ -780,24 +845,6 @@ static int hpre_debugfs_atomic64_set(void *data, u64 val) DEFINE_DEBUGFS_ATTRIBUTE(hpre_atomic64_ops, hpre_debugfs_atomic64_get, hpre_debugfs_atomic64_set, "%llu\n"); -static int hpre_com_regs_show(struct seq_file *s, void *unused) -{ - hisi_qm_regs_dump(s, s->private); - - return 0; -} - -DEFINE_SHOW_ATTRIBUTE(hpre_com_regs); - -static int hpre_cluster_regs_show(struct seq_file *s, void *unused) -{ - hisi_qm_regs_dump(s, s->private); - - return 0; -} - -DEFINE_SHOW_ATTRIBUTE(hpre_cluster_regs); - static int hpre_create_debugfs_file(struct hisi_qm *qm, struct dentry *dir, enum hpre_ctrl_dbgfs_file type, int indx) { @@ -896,6 +943,7 @@ static int hpre_ctrl_debug_init(struct hisi_qm *qm) static void hpre_dfx_debug_init(struct hisi_qm *qm) { + struct dfx_diff_registers *hpre_regs = qm->debug.acc_diff_regs; struct hpre *hpre = container_of(qm, struct hpre, qm); struct hpre_dfx *dfx = hpre->debug.dfx; struct dentry *parent; @@ -907,6 +955,10 @@ static void hpre_dfx_debug_init(struct hisi_qm *qm) debugfs_create_file(hpre_dfx_files[i], 0644, parent, &dfx[i], &hpre_atomic64_ops); } + + if (qm->fun_type == QM_HW_PF && hpre_regs) + debugfs_create_file("diff_regs", 0444, parent, + qm, &hpre_diff_regs_fops); } static int hpre_debugfs_init(struct hisi_qm *qm) @@ -919,9 +971,16 @@ static int hpre_debugfs_init(struct hisi_qm *qm) qm->debug.sqe_mask_offset = HPRE_SQE_MASK_OFFSET; qm->debug.sqe_mask_len = HPRE_SQE_MASK_LEN; + ret = hisi_qm_diff_regs_init(qm, hpre_diff_regs, + ARRAY_SIZE(hpre_diff_regs)); + if (ret) { + dev_warn(dev, "Failed to init HPRE diff regs!\n"); + goto debugfs_remove; + } + hisi_qm_debug_init(qm); - if (qm->pdev->device == HPRE_PCI_DEVICE_ID) { + if (qm->pdev->device == PCI_DEVICE_ID_HUAWEI_HPRE_PF) { ret = hpre_ctrl_debug_init(qm); if (ret) goto failed_to_create; @@ -932,12 +991,16 @@ static int hpre_debugfs_init(struct hisi_qm *qm) return 0; failed_to_create: + hisi_qm_diff_regs_uninit(qm, ARRAY_SIZE(hpre_diff_regs)); +debugfs_remove: debugfs_remove_recursive(qm->debug.debug_root); return ret; } static void hpre_debugfs_exit(struct hisi_qm *qm) { + hisi_qm_diff_regs_uninit(qm, ARRAY_SIZE(hpre_diff_regs)); + debugfs_remove_recursive(qm->debug.debug_root); } @@ -958,7 +1021,7 @@ static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) qm->sqe_size = HPRE_SQE_SIZE; qm->dev_name = hpre_name; - qm->fun_type = (pdev->device == HPRE_PCI_DEVICE_ID) ? + qm->fun_type = (pdev->device == PCI_DEVICE_ID_HUAWEI_HPRE_PF) ? QM_HW_PF : QM_HW_VF; if (qm->fun_type == QM_HW_PF) { qm->qp_base = HPRE_PF_DEF_Q_BASE; @@ -970,6 +1033,82 @@ static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) return hisi_qm_init(qm); } +static int hpre_show_last_regs_init(struct hisi_qm *qm) +{ + int cluster_dfx_regs_num = ARRAY_SIZE(hpre_cluster_dfx_regs); + int com_dfx_regs_num = ARRAY_SIZE(hpre_com_dfx_regs); + u8 clusters_num = hpre_cluster_num(qm); + struct qm_debug *debug = &qm->debug; + void __iomem *io_base; + int i, j, idx; + + debug->last_words = kcalloc(cluster_dfx_regs_num * clusters_num + + com_dfx_regs_num, sizeof(unsigned int), GFP_KERNEL); + if (!debug->last_words) + return -ENOMEM; + + for (i = 0; i < com_dfx_regs_num; i++) + debug->last_words[i] = readl_relaxed(qm->io_base + + hpre_com_dfx_regs[i].offset); + + for (i = 0; i < clusters_num; i++) { + io_base = qm->io_base + hpre_cluster_offsets[i]; + for (j = 0; j < cluster_dfx_regs_num; j++) { + idx = com_dfx_regs_num + i * cluster_dfx_regs_num + j; + debug->last_words[idx] = readl_relaxed( + io_base + hpre_cluster_dfx_regs[j].offset); + } + } + + return 0; +} + +static void hpre_show_last_regs_uninit(struct hisi_qm *qm) +{ + struct qm_debug *debug = &qm->debug; + + if (qm->fun_type == QM_HW_VF || !debug->last_words) + return; + + kfree(debug->last_words); + debug->last_words = NULL; +} + +static void hpre_show_last_dfx_regs(struct hisi_qm *qm) +{ + int cluster_dfx_regs_num = ARRAY_SIZE(hpre_cluster_dfx_regs); + int com_dfx_regs_num = ARRAY_SIZE(hpre_com_dfx_regs); + u8 clusters_num = hpre_cluster_num(qm); + struct qm_debug *debug = &qm->debug; + struct pci_dev *pdev = qm->pdev; + void __iomem *io_base; + int i, j, idx; + u32 val; + + if (qm->fun_type == QM_HW_VF || !debug->last_words) + return; + + /* dumps last word of the debugging registers during controller reset */ + for (i = 0; i < com_dfx_regs_num; i++) { + val = readl_relaxed(qm->io_base + hpre_com_dfx_regs[i].offset); + if (debug->last_words[i] != val) + pci_info(pdev, "Common_core:%s \t= 0x%08x => 0x%08x\n", + hpre_com_dfx_regs[i].name, debug->last_words[i], val); + } + + for (i = 0; i < clusters_num; i++) { + io_base = qm->io_base + hpre_cluster_offsets[i]; + for (j = 0; j < cluster_dfx_regs_num; j++) { + val = readl_relaxed(io_base + + hpre_cluster_dfx_regs[j].offset); + idx = com_dfx_regs_num + i * cluster_dfx_regs_num + j; + if (debug->last_words[idx] != val) + pci_info(pdev, "cluster-%d:%s \t= 0x%08x => 0x%08x\n", + i, hpre_cluster_dfx_regs[j].name, debug->last_words[idx], val); + } + } +} + static void hpre_log_hw_error(struct hisi_qm *qm, u32 err_sts) { const struct hpre_hw_error *err = hpre_hw_errors; @@ -1028,6 +1167,7 @@ static const struct hisi_qm_err_ini hpre_err_ini = { .open_axi_master_ooo = hpre_open_axi_master_ooo, .open_sva_prefetch = hpre_open_sva_prefetch, .close_sva_prefetch = hpre_close_sva_prefetch, + .show_last_dfx_regs = hpre_show_last_dfx_regs, .err_info_init = hpre_err_info_init, }; @@ -1045,8 +1185,11 @@ static int hpre_pf_probe_init(struct hpre *hpre) qm->err_ini = &hpre_err_ini; qm->err_ini->err_info_init(qm); hisi_qm_dev_err_init(qm); + ret = hpre_show_last_regs_init(qm); + if (ret) + pci_err(qm->pdev, "Failed to init last word regs!\n"); - return 0; + return ret; } static int hpre_probe_init(struct hpre *hpre) @@ -1132,6 +1275,7 @@ static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id) hisi_qm_stop(qm, QM_NORMAL); err_with_err_init: + hpre_show_last_regs_uninit(qm); hisi_qm_dev_err_uninit(qm); err_with_qm_init: @@ -1162,6 +1306,7 @@ static void hpre_remove(struct pci_dev *pdev) if (qm->fun_type == QM_HW_PF) { hpre_cnt_regs_clear(qm); qm->debug.curr_qm_qp_num = 0; + hpre_show_last_regs_uninit(qm); hisi_qm_dev_err_uninit(qm); } diff --git a/drivers/crypto/hisilicon/migration/acc_vf_migration.h b/drivers/crypto/hisilicon/migration/acc_vf_migration.h index 28aa7e318ca9ec205cadd818b6833a879ceb1dd9..2f459eecb8f03ea7b5f14409d4e4437c85e72ac8 100644 --- a/drivers/crypto/hisilicon/migration/acc_vf_migration.h +++ b/drivers/crypto/hisilicon/migration/acc_vf_migration.h @@ -8,7 +8,7 @@ #include #include -#include "../qm.h" +#include #define VFIO_PCI_OFFSET_SHIFT 40 #define VFIO_PCI_OFFSET_TO_INDEX(off) ((off) >> VFIO_PCI_OFFSET_SHIFT) diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index edcd4282d95112bcf0f1c857ee211f70cc49a4f5..f75e14e64c3c292d780c105b9e563b88d2b2fbb3 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -15,7 +15,7 @@ #include #include #include -#include "qm.h" +#include /* eq/aeq irq enable */ #define QM_VF_AEQ_INT_SOURCE 0x0 @@ -33,23 +33,6 @@ #define QM_ABNORMAL_EVENT_IRQ_VECTOR 3 /* mailbox */ -#define QM_MB_CMD_SQC 0x0 -#define QM_MB_CMD_CQC 0x1 -#define QM_MB_CMD_EQC 0x2 -#define QM_MB_CMD_AEQC 0x3 -#define QM_MB_CMD_SQC_BT 0x4 -#define QM_MB_CMD_CQC_BT 0x5 -#define QM_MB_CMD_SQC_VFT_V2 0x6 -#define QM_MB_CMD_STOP_QP 0x8 -#define QM_MB_CMD_SRC 0xc -#define QM_MB_CMD_DST 0xd - -#define QM_MB_CMD_SEND_BASE 0x300 -#define QM_MB_EVENT_SHIFT 8 -#define QM_MB_BUSY_SHIFT 13 -#define QM_MB_OP_SHIFT 14 -#define QM_MB_CMD_DATA_ADDR_L 0x304 -#define QM_MB_CMD_DATA_ADDR_H 0x308 #define QM_MB_PING_ALL_VFS 0xffff #define QM_MB_CMD_DATA_SHIFT 32 #define QM_MB_CMD_DATA_MASK GENMASK(31, 0) @@ -89,6 +72,10 @@ #define QM_AEQE_PHASE(aeqe) ((le32_to_cpu((aeqe)->dw0) >> 16) & 0x1) #define QM_AEQE_TYPE_SHIFT 17 +#define QM_AEQE_CQN_MASK GENMASK(15, 0) +#define QM_CQ_OVERFLOW 0 +#define QM_EQ_OVERFLOW 1 +#define QM_CQE_ERROR 2 #define QM_DOORBELL_CMD_SQ 0 #define QM_DOORBELL_CMD_CQ 1 @@ -99,19 +86,12 @@ #define QM_DB_CMD_SHIFT_V1 16 #define QM_DB_INDEX_SHIFT_V1 32 #define QM_DB_PRIORITY_SHIFT_V1 48 -#define QM_DOORBELL_SQ_CQ_BASE_V2 0x1000 -#define QM_DOORBELL_EQ_AEQ_BASE_V2 0x2000 #define QM_QUE_ISO_CFG_V 0x0030 #define QM_PAGE_SIZE 0x0034 #define QM_QUE_ISO_EN 0x100154 #define QM_CAPBILITY 0x100158 #define QM_QP_NUN_MASK GENMASK(10, 0) #define QM_QP_DB_INTERVAL 0x10000 -#define QM_QP_MAX_NUM_SHIFT 11 -#define QM_DB_CMD_SHIFT_V2 12 -#define QM_DB_RAND_SHIFT_V2 16 -#define QM_DB_INDEX_SHIFT_V2 32 -#define QM_DB_PRIORITY_SHIFT_V2 48 #define QM_MEM_START_INIT 0x100040 #define QM_MEM_INIT_DONE 0x100044 @@ -122,6 +102,8 @@ #define QM_CQC_VFT 0x1 #define QM_VFT_CFG 0x100060 #define QM_VFT_CFG_OP_ENABLE 0x100054 +#define QM_PM_CTRL 0x100148 +#define QM_IDLE_DISABLE BIT(9) #define QM_VFT_CFG_DATA_L 0x100064 #define QM_VFT_CFG_DATA_H 0x100068 @@ -271,7 +253,15 @@ #define QM_QOS_MAX_CIR_U 6 #define QM_QOS_MAX_CIR_S 11 #define QM_QOS_VAL_MAX_LEN 32 - +#define QM_DFX_BASE 0x0100000 +#define QM_DFX_STATE1 0x0104000 +#define QM_DFX_STATE2 0x01040C8 +#define QM_DFX_COMMON 0x0000 +#define QM_DFX_BASE_LEN 0x5A +#define QM_DFX_STATE1_LEN 0x2E +#define QM_DFX_STATE2_LEN 0x11 +#define QM_DFX_COMMON_LEN 0xC3 +#define QM_DFX_REGS_LEN 4UL #define QM_AUTOSUSPEND_DELAY 3000 #define QM_MK_CQC_DW3_V1(hop_num, pg_sz, buf_sz, cqe_sz) \ @@ -485,6 +475,23 @@ static const struct hisi_qm_hw_error qm_hw_error[] = { { /* sentinel */ } }; +/* define the QM's dfx regs region and region length */ +static struct dfx_diff_registers qm_diff_regs[] = { + { + .reg_offset = QM_DFX_BASE, + .reg_len = QM_DFX_BASE_LEN, + }, { + .reg_offset = QM_DFX_STATE1, + .reg_len = QM_DFX_STATE1_LEN, + }, { + .reg_offset = QM_DFX_STATE2, + .reg_len = QM_DFX_STATE2_LEN, + }, { + .reg_offset = QM_DFX_COMMON, + .reg_len = QM_DFX_COMMON_LEN, + }, +}; + static const char * const qm_db_timeout[] = { "sq", "cq", "eq", "aeq", }; @@ -501,10 +508,30 @@ static const char * const qp_s[] = { "none", "init", "start", "stop", "close", }; -static const u32 typical_qos_val[QM_QOS_TYPICAL_NUM] = {100, 250, 500, 1000, - 10000, 25000, 50000, 100000}; -static const u32 typical_qos_cbs_s[QM_QOS_TYPICAL_NUM] = {9, 10, 11, 12, 16, - 17, 18, 19}; +struct qm_typical_qos_table { + u32 start; + u32 end; + u32 val; +}; + +/* the qos step is 100 */ +static struct qm_typical_qos_table shaper_cir_s[] = { + {100, 100, 4}, + {200, 200, 3}, + {300, 500, 2}, + {600, 1000, 1}, + {1100, 100000, 0}, +}; + +static struct qm_typical_qos_table shaper_cbs_s[] = { + {100, 200, 9}, + {300, 500, 11}, + {600, 1000, 12}, + {1100, 10000, 16}, + {10100, 25000, 17}, + {25100, 50000, 18}, + {50100, 100000, 19} +}; static bool qm_avail_state(struct hisi_qm *qm, enum qm_state new) { @@ -585,6 +612,75 @@ static bool qm_qp_avail_state(struct hisi_qm *qm, struct hisi_qp *qp, return avail; } +static u32 qm_get_hw_error_status(struct hisi_qm *qm) +{ + return readl(qm->io_base + QM_ABNORMAL_INT_STATUS); +} + +static u32 qm_get_dev_err_status(struct hisi_qm *qm) +{ + return qm->err_ini->get_dev_hw_err_status(qm); +} + +/* Check if the error causes the master ooo block */ +static int qm_check_dev_error(struct hisi_qm *qm) +{ + u32 val, dev_val; + + if (qm->fun_type == QM_HW_VF) + return 0; + + val = qm_get_hw_error_status(qm); + dev_val = qm_get_dev_err_status(qm); + + if (qm->ver < QM_HW_V3) + return (val & QM_ECC_MBIT) || + (dev_val & qm->err_info.ecc_2bits_mask); + + return (val & readl(qm->io_base + QM_OOO_SHUTDOWN_SEL)) || + (dev_val & (~qm->err_info.dev_ce_mask)); +} + +static int qm_wait_reset_finish(struct hisi_qm *qm) +{ + int delay = 0; + + /* All reset requests need to be queued for processing */ + while (test_and_set_bit(QM_RESETTING, &qm->misc_ctl)) { + msleep(++delay); + if (delay > QM_RESET_WAIT_TIMEOUT) + return -EBUSY; + } + + return 0; +} + +static int qm_reset_prepare_ready(struct hisi_qm *qm) +{ + struct pci_dev *pdev = qm->pdev; + struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev)); + + /* + * PF and VF on host doesnot support resetting at the + * same time on Kunpeng920. + */ + if (qm->ver < QM_HW_V3) + return qm_wait_reset_finish(pf_qm); + + return qm_wait_reset_finish(qm); +} + +static void qm_reset_bit_clear(struct hisi_qm *qm) +{ + struct pci_dev *pdev = qm->pdev; + struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev)); + + if (qm->ver < QM_HW_V3) + clear_bit(QM_RESETTING, &pf_qm->misc_ctl); + + clear_bit(QM_RESETTING, &qm->misc_ctl); +} + static void qm_mb_pre_init(struct qm_mailbox *mailbox, u8 cmd, u64 base, u16 queue, bool op) { @@ -598,7 +694,7 @@ static void qm_mb_pre_init(struct qm_mailbox *mailbox, u8 cmd, } /* return 0 mailbox ready, -ETIMEDOUT hardware timeout */ -static int qm_wait_mb_ready(struct hisi_qm *qm) +int hisi_qm_wait_mb_ready(struct hisi_qm *qm) { u32 val; @@ -606,6 +702,7 @@ static int qm_wait_mb_ready(struct hisi_qm *qm) val, !((val >> QM_MB_BUSY_SHIFT) & 0x1), POLL_PERIOD, POLL_TIMEOUT); } +EXPORT_SYMBOL_GPL(hisi_qm_wait_mb_ready); /* 128 bit should be written to hardware at one time to trigger a mailbox */ static void qm_mb_write(struct hisi_qm *qm, const void *src) @@ -615,13 +712,13 @@ static void qm_mb_write(struct hisi_qm *qm, const void *src) if (!IS_ENABLED(CONFIG_ARM64)) { memcpy_toio(fun_base, src, 16); - wmb(); + dma_wmb(); return; } asm volatile("ldp %0, %1, %3\n" "stp %0, %1, %2\n" - "dsb sy\n" + "dmb oshst\n" : "=&r" (tmp0), "=&r" (tmp1), "+Q" (*((char __iomem *)fun_base)) @@ -631,14 +728,14 @@ static void qm_mb_write(struct hisi_qm *qm, const void *src) static int qm_mb_nolock(struct hisi_qm *qm, struct qm_mailbox *mailbox) { - if (unlikely(qm_wait_mb_ready(qm))) { + if (unlikely(hisi_qm_wait_mb_ready(qm))) { dev_err(&qm->pdev->dev, "QM mailbox is busy to start!\n"); goto mb_busy; } qm_mb_write(qm, mailbox); - if (unlikely(qm_wait_mb_ready(qm))) { + if (unlikely(hisi_qm_wait_mb_ready(qm))) { dev_err(&qm->pdev->dev, "QM mailbox operation timeout!\n"); goto mb_busy; } @@ -650,8 +747,8 @@ static int qm_mb_nolock(struct hisi_qm *qm, struct qm_mailbox *mailbox) return -EBUSY; } -static int qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue, - bool op) +int hisi_qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue, + bool op) { struct qm_mailbox mailbox; int ret; @@ -667,6 +764,7 @@ static int qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue, return ret; } +EXPORT_SYMBOL_GPL(hisi_qm_mb); static void qm_db_v1(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority) { @@ -707,6 +805,19 @@ static void qm_db(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority) qm->ops->qm_db(qm, qn, cmd, index, priority); } +static void qm_disable_clock_gate(struct hisi_qm *qm) +{ + u32 val; + + /* if qm enables clock gating in Kunpeng930, qos will be inaccurate. */ + if (qm->ver < QM_HW_V3) + return; + + val = readl(qm->io_base + QM_PM_CTRL); + val |= QM_IDLE_DISABLE; + writel(val, qm->io_base + QM_PM_CTRL); +} + static int qm_dev_mem_reset(struct hisi_qm *qm) { u32 val; @@ -896,27 +1007,74 @@ static void qm_set_qp_disable(struct hisi_qp *qp, int offset) *addr = 1; /* make sure setup is completed */ - mb(); + smp_wmb(); } -static irqreturn_t qm_aeq_irq(int irq, void *data) +static void qm_disable_qp(struct hisi_qm *qm, u32 qp_id) +{ + struct hisi_qp *qp = &qm->qp_array[qp_id]; + + qm_set_qp_disable(qp, QM_RESET_STOP_TX_OFFSET); + hisi_qm_stop_qp(qp); + qm_set_qp_disable(qp, QM_RESET_STOP_RX_OFFSET); +} + +static void qm_reset_function(struct hisi_qm *qm) +{ + struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(qm->pdev)); + struct device *dev = &qm->pdev->dev; + int ret; + + if (qm_check_dev_error(pf_qm)) + return; + + ret = qm_reset_prepare_ready(qm); + if (ret) { + dev_err(dev, "reset function not ready\n"); + return; + } + + ret = hisi_qm_stop(qm, QM_FLR); + if (ret) { + dev_err(dev, "failed to stop qm when reset function\n"); + goto clear_bit; + } + + ret = hisi_qm_start(qm); + if (ret) + dev_err(dev, "failed to start qm when reset function\n"); + +clear_bit: + qm_reset_bit_clear(qm); +} + +static irqreturn_t qm_aeq_thread(int irq, void *data) { struct hisi_qm *qm = data; struct qm_aeqe *aeqe = qm->aeqe + qm->status.aeq_head; - u32 type; - - atomic64_inc(&qm->debug.dfx.aeq_irq_cnt); - if (!readl(qm->io_base + QM_VF_AEQ_INT_SOURCE)) - return IRQ_NONE; + u32 type, qp_id; while (QM_AEQE_PHASE(aeqe) == qm->status.aeqc_phase) { type = le32_to_cpu(aeqe->dw0) >> QM_AEQE_TYPE_SHIFT; - if (type < ARRAY_SIZE(qm_fifo_overflow)) - dev_err(&qm->pdev->dev, "%s overflow\n", - qm_fifo_overflow[type]); - else + qp_id = le32_to_cpu(aeqe->dw0) & QM_AEQE_CQN_MASK; + + switch (type) { + case QM_EQ_OVERFLOW: + dev_err(&qm->pdev->dev, "eq overflow, reset function\n"); + qm_reset_function(qm); + return IRQ_HANDLED; + case QM_CQ_OVERFLOW: + dev_err(&qm->pdev->dev, "cq overflow, stop qp(%u)\n", + qp_id); + fallthrough; + case QM_CQE_ERROR: + qm_disable_qp(qm, qp_id); + break; + default: dev_err(&qm->pdev->dev, "unknown error type %u\n", type); + break; + } if (qm->status.aeq_head == QM_Q_DEPTH - 1) { qm->status.aeqc_phase = !qm->status.aeqc_phase; @@ -926,13 +1084,24 @@ static irqreturn_t qm_aeq_irq(int irq, void *data) aeqe++; qm->status.aeq_head++; } - - qm_db(qm, 0, QM_DOORBELL_CMD_AEQ, qm->status.aeq_head, 0); } + qm_db(qm, 0, QM_DOORBELL_CMD_AEQ, qm->status.aeq_head, 0); + return IRQ_HANDLED; } +static irqreturn_t qm_aeq_irq(int irq, void *data) +{ + struct hisi_qm *qm = data; + + atomic64_inc(&qm->debug.dfx.aeq_irq_cnt); + if (!readl(qm->io_base + QM_VF_AEQ_INT_SOURCE)) + return IRQ_NONE; + + return IRQ_WAKE_THREAD; +} + static void qm_irq_unregister(struct hisi_qm *qm) { struct pci_dev *pdev = qm->pdev; @@ -988,12 +1157,14 @@ static void qm_init_prefetch(struct hisi_qm *qm) } /* + * acc_shaper_para_calc() Get the IR value by the qos formula, the return value + * is the expected qos calculated. * the formula: * IR = X Mbps if ir = 1 means IR = 100 Mbps, if ir = 10000 means = 10Gbps * - * IR_b * (2 ^ IR_u) * 8 - * IR(Mbps) * 10 ^ -3 = ------------------------- - * Tick * (2 ^ IR_s) + * IR_b * (2 ^ IR_u) * 8000 + * IR(Mbps) = ------------------------- + * Tick * (2 ^ IR_s) */ static u32 acc_shaper_para_calc(u64 cir_b, u64 cir_u, u64 cir_s) { @@ -1003,17 +1174,28 @@ static u32 acc_shaper_para_calc(u64 cir_b, u64 cir_u, u64 cir_s) static u32 acc_shaper_calc_cbs_s(u32 ir) { + int table_size = ARRAY_SIZE(shaper_cbs_s); int i; - if (ir < typical_qos_val[0]) - return QM_SHAPER_MIN_CBS_S; + for (i = 0; i < table_size; i++) { + if (ir >= shaper_cbs_s[i].start && ir <= shaper_cbs_s[i].end) + return shaper_cbs_s[i].val; + } - for (i = 1; i < QM_QOS_TYPICAL_NUM; i++) { - if (ir >= typical_qos_val[i - 1] && ir < typical_qos_val[i]) - return typical_qos_cbs_s[i - 1]; + return QM_SHAPER_MIN_CBS_S; +} + +static u32 acc_shaper_calc_cir_s(u32 ir) +{ + int table_size = ARRAY_SIZE(shaper_cir_s); + int i; + + for (i = 0; i < table_size; i++) { + if (ir >= shaper_cir_s[i].start && ir <= shaper_cir_s[i].end) + return shaper_cir_s[i].val; } - return typical_qos_cbs_s[QM_QOS_TYPICAL_NUM - 1]; + return 0; } static int qm_get_shaper_para(u32 ir, struct qm_shaper_factor *factor) @@ -1022,25 +1204,18 @@ static int qm_get_shaper_para(u32 ir, struct qm_shaper_factor *factor) u32 error_rate; factor->cbs_s = acc_shaper_calc_cbs_s(ir); + cir_s = acc_shaper_calc_cir_s(ir); for (cir_b = QM_QOS_MIN_CIR_B; cir_b <= QM_QOS_MAX_CIR_B; cir_b++) { for (cir_u = 0; cir_u <= QM_QOS_MAX_CIR_U; cir_u++) { - for (cir_s = 0; cir_s <= QM_QOS_MAX_CIR_S; cir_s++) { - /** the formula is changed to: - * IR_b * (2 ^ IR_u) * DIVISOR_CLK - * IR(Mbps) = ------------------------- - * 768 * (2 ^ IR_s) - */ - ir_calc = acc_shaper_para_calc(cir_b, cir_u, - cir_s); - error_rate = QM_QOS_EXPAND_RATE * (u32)abs(ir_calc - ir) / ir; - if (error_rate <= QM_QOS_MIN_ERROR_RATE) { - factor->cir_b = cir_b; - factor->cir_u = cir_u; - factor->cir_s = cir_s; - - return 0; - } + ir_calc = acc_shaper_para_calc(cir_b, cir_u, cir_s); + + error_rate = QM_QOS_EXPAND_RATE * (u32)abs(ir_calc - ir) / ir; + if (error_rate <= QM_QOS_MIN_ERROR_RATE) { + factor->cir_b = cir_b; + factor->cir_u = cir_u; + factor->cir_s = cir_s; + return 0; } } } @@ -1126,10 +1301,10 @@ static int qm_set_vft_common(struct hisi_qm *qm, enum vft_type type, static int qm_shaper_init_vft(struct hisi_qm *qm, u32 fun_num) { + u32 qos = qm->factor[fun_num].func_qos; int ret, i; - qm->factor[fun_num].func_qos = QM_QOS_MAX_VAL; - ret = qm_get_shaper_para(QM_QOS_MAX_VAL * QM_QOS_RATE, &qm->factor[fun_num]); + ret = qm_get_shaper_para(qos * QM_QOS_RATE, &qm->factor[fun_num]); if (ret) { dev_err(&qm->pdev->dev, "failed to calculate shaper parameter!\n"); return ret; @@ -1179,7 +1354,7 @@ static int qm_get_vft_v2(struct hisi_qm *qm, u32 *base, u32 *number) u64 sqc_vft; int ret; - ret = qm_mb(qm, QM_MB_CMD_SQC_VFT_V2, 0, 0, 1); + ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_VFT_V2, 0, 0, 1); if (ret) return ret; @@ -1475,6 +1650,156 @@ static int qm_regs_show(struct seq_file *s, void *unused) DEFINE_SHOW_ATTRIBUTE(qm_regs); +static struct dfx_diff_registers *dfx_regs_init(struct hisi_qm *qm, + const struct dfx_diff_registers *cregs, int reg_len) +{ + struct dfx_diff_registers *diff_regs; + u32 j, base_offset; + int i; + + diff_regs = kcalloc(reg_len, sizeof(*diff_regs), GFP_KERNEL); + if (!diff_regs) + return ERR_PTR(-ENOMEM); + + for (i = 0; i < reg_len; i++) { + if (!cregs[i].reg_len) + continue; + + diff_regs[i].reg_offset = cregs[i].reg_offset; + diff_regs[i].reg_len = cregs[i].reg_len; + diff_regs[i].regs = kcalloc(QM_DFX_REGS_LEN, cregs[i].reg_len, + GFP_KERNEL); + if (!diff_regs[i].regs) + goto alloc_error; + + for (j = 0; j < diff_regs[i].reg_len; j++) { + base_offset = diff_regs[i].reg_offset + + j * QM_DFX_REGS_LEN; + diff_regs[i].regs[j] = readl(qm->io_base + base_offset); + } + } + + return diff_regs; + +alloc_error: + while (i > 0) { + i--; + kfree(diff_regs[i].regs); + } + kfree(diff_regs); + return ERR_PTR(-ENOMEM); +} + +static void dfx_regs_uninit(struct hisi_qm *qm, + struct dfx_diff_registers *dregs, int reg_len) +{ + int i; + + /* Setting the pointer is NULL to prevent double free */ + for (i = 0; i < reg_len; i++) { + kfree(dregs[i].regs); + dregs[i].regs = NULL; + } + kfree(dregs); + dregs = NULL; +} + +/** + * hisi_qm_diff_regs_init() - Allocate memory for registers. + * @qm: device qm handle. + * @dregs: diff registers handle. + * @reg_len: diff registers region length. + */ +int hisi_qm_diff_regs_init(struct hisi_qm *qm, + struct dfx_diff_registers *dregs, int reg_len) +{ + if (!qm || !dregs || reg_len <= 0) + return -EINVAL; + + if (qm->fun_type != QM_HW_PF) + return 0; + + qm->debug.qm_diff_regs = dfx_regs_init(qm, qm_diff_regs, + ARRAY_SIZE(qm_diff_regs)); + if (IS_ERR(qm->debug.qm_diff_regs)) + return PTR_ERR(qm->debug.qm_diff_regs); + + qm->debug.acc_diff_regs = dfx_regs_init(qm, dregs, reg_len); + if (IS_ERR(qm->debug.acc_diff_regs)) { + dfx_regs_uninit(qm, qm->debug.qm_diff_regs, + ARRAY_SIZE(qm_diff_regs)); + return PTR_ERR(qm->debug.acc_diff_regs); + } + + return 0; +} +EXPORT_SYMBOL_GPL(hisi_qm_diff_regs_init); + +/** + * hisi_qm_diff_regs_uninit() - Free memory for registers. + * @qm: device qm handle. + * @reg_len: diff registers region length. + */ +void hisi_qm_diff_regs_uninit(struct hisi_qm *qm, int reg_len) +{ + if (!qm || reg_len <= 0 || qm->fun_type != QM_HW_PF) + return; + + dfx_regs_uninit(qm, qm->debug.acc_diff_regs, reg_len); + dfx_regs_uninit(qm, qm->debug.qm_diff_regs, ARRAY_SIZE(qm_diff_regs)); +} +EXPORT_SYMBOL_GPL(hisi_qm_diff_regs_uninit); + +/** + * hisi_qm_acc_diff_regs_dump() - Dump registers's value. + * @qm: device qm handle. + * @s: Debugfs file handle. + * @dregs: diff registers handle. + * @regs_len: diff registers region length. + */ +void hisi_qm_acc_diff_regs_dump(struct hisi_qm *qm, struct seq_file *s, + struct dfx_diff_registers *dregs, int regs_len) +{ + u32 j, val, base_offset; + int i, ret; + + if (!qm || !s || !dregs || regs_len <= 0) + return; + + ret = hisi_qm_get_dfx_access(qm); + if (ret) + return; + + down_read(&qm->qps_lock); + for (i = 0; i < regs_len; i++) { + if (!dregs[i].reg_len) + continue; + + for (j = 0; j < dregs[i].reg_len; j++) { + base_offset = dregs[i].reg_offset + j * QM_DFX_REGS_LEN; + val = readl(qm->io_base + base_offset); + if (val != dregs[i].regs[j]) + seq_printf(s, "0x%08x = 0x%08x ---> 0x%08x\n", + base_offset, dregs[i].regs[j], val); + } + } + up_read(&qm->qps_lock); + + hisi_qm_put_dfx_access(qm); +} +EXPORT_SYMBOL_GPL(hisi_qm_acc_diff_regs_dump); + +static int qm_diff_regs_show(struct seq_file *s, void *unused) +{ + struct hisi_qm *qm = s->private; + + hisi_qm_acc_diff_regs_dump(qm, s, qm->debug.qm_diff_regs, + ARRAY_SIZE(qm_diff_regs)); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(qm_diff_regs); + static ssize_t qm_cmd_read(struct file *filp, char __user *buffer, size_t count, loff_t *pos) { @@ -1553,12 +1878,12 @@ static int dump_show(struct hisi_qm *qm, void *info, static int qm_dump_sqc_raw(struct hisi_qm *qm, dma_addr_t dma_addr, u16 qp_id) { - return qm_mb(qm, QM_MB_CMD_SQC, dma_addr, qp_id, 1); + return hisi_qm_mb(qm, QM_MB_CMD_SQC, dma_addr, qp_id, 1); } static int qm_dump_cqc_raw(struct hisi_qm *qm, dma_addr_t dma_addr, u16 qp_id) { - return qm_mb(qm, QM_MB_CMD_CQC, dma_addr, qp_id, 1); + return hisi_qm_mb(qm, QM_MB_CMD_CQC, dma_addr, qp_id, 1); } static int qm_sqc_dump(struct hisi_qm *qm, const char *s) @@ -1670,7 +1995,7 @@ static int qm_eqc_aeqc_dump(struct hisi_qm *qm, char *s, size_t size, if (IS_ERR(xeqc)) return PTR_ERR(xeqc); - ret = qm_mb(qm, cmd, xeqc_dma, 0, 1); + ret = hisi_qm_mb(qm, cmd, xeqc_dma, 0, 1); if (ret) goto err_free_ctx; @@ -2082,35 +2407,6 @@ static enum acc_err_result qm_hw_error_handle_v2(struct hisi_qm *qm) return ACC_ERR_RECOVERED; } -static u32 qm_get_hw_error_status(struct hisi_qm *qm) -{ - return readl(qm->io_base + QM_ABNORMAL_INT_STATUS); -} - -static u32 qm_get_dev_err_status(struct hisi_qm *qm) -{ - return qm->err_ini->get_dev_hw_err_status(qm); -} - -/* Check if the error causes the master ooo block */ -static int qm_check_dev_error(struct hisi_qm *qm) -{ - u32 val, dev_val; - - if (qm->fun_type == QM_HW_VF) - return 0; - - val = qm_get_hw_error_status(qm); - dev_val = qm_get_dev_err_status(qm); - - if (qm->ver < QM_HW_V3) - return (val & QM_ECC_MBIT) || - (dev_val & qm->err_info.ecc_2bits_mask); - - return (val & readl(qm->io_base + QM_OOO_SHUTDOWN_SEL)) || - (dev_val & (~qm->err_info.dev_ce_mask)); -} - static int qm_get_mb_cmd(struct hisi_qm *qm, u64 *msg, u16 fun_num) { struct qm_mailbox mailbox; @@ -2352,7 +2648,7 @@ static int qm_ping_pf(struct hisi_qm *qm, u64 cmd) static int qm_stop_qp(struct hisi_qp *qp) { - return qm_mb(qp->qm, QM_MB_CMD_STOP_QP, 0, qp->qp_id, 0); + return hisi_qm_mb(qp->qm, QM_MB_CMD_STOP_QP, 0, qp->qp_id, 0); } static int qm_set_msi(struct hisi_qm *qm, bool set) @@ -2539,7 +2835,7 @@ static struct hisi_qp *qm_create_qp_nolock(struct hisi_qm *qm, u8 alg_type) * return created qp, -EBUSY if all qps in qm allocated, -ENOMEM if allocating * qp memory fails. */ -struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, u8 alg_type) +static struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, u8 alg_type) { struct hisi_qp *qp; int ret; @@ -2557,7 +2853,6 @@ struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, u8 alg_type) return qp; } -EXPORT_SYMBOL_GPL(hisi_qm_create_qp); /** * hisi_qm_release_qp() - Release a qp back to its qm. @@ -2565,7 +2860,7 @@ EXPORT_SYMBOL_GPL(hisi_qm_create_qp); * * This function releases the resource of a qp. */ -void hisi_qm_release_qp(struct hisi_qp *qp) +static void hisi_qm_release_qp(struct hisi_qp *qp) { struct hisi_qm *qm = qp->qm; @@ -2583,7 +2878,6 @@ void hisi_qm_release_qp(struct hisi_qp *qp) qm_pm_put_sync(qm); } -EXPORT_SYMBOL_GPL(hisi_qm_release_qp); static int qm_sq_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid) { @@ -2620,7 +2914,7 @@ static int qm_sq_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid) return -ENOMEM; } - ret = qm_mb(qm, QM_MB_CMD_SQC, sqc_dma, qp_id, 0); + ret = hisi_qm_mb(qm, QM_MB_CMD_SQC, sqc_dma, qp_id, 0); dma_unmap_single(dev, sqc_dma, sizeof(struct qm_sqc), DMA_TO_DEVICE); kfree(sqc); @@ -2661,7 +2955,7 @@ static int qm_cq_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid) return -ENOMEM; } - ret = qm_mb(qm, QM_MB_CMD_CQC, cqc_dma, qp_id, 0); + ret = hisi_qm_mb(qm, QM_MB_CMD_CQC, cqc_dma, qp_id, 0); dma_unmap_single(dev, cqc_dma, sizeof(struct qm_cqc), DMA_TO_DEVICE); kfree(cqc); @@ -2756,6 +3050,7 @@ static int qm_drain_qp(struct hisi_qp *qp) { size_t size = sizeof(struct qm_sqc) + sizeof(struct qm_cqc); struct hisi_qm *qm = qp->qm; + struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(qm->pdev)); struct device *dev = &qm->pdev->dev; struct qm_sqc *sqc; struct qm_cqc *cqc; @@ -2764,7 +3059,7 @@ static int qm_drain_qp(struct hisi_qp *qp) void *addr; /* No need to judge if master OOO is blocked. */ - if (qm_check_dev_error(qm)) + if (qm_check_dev_error(pf_qm)) return 0; /* Kunpeng930 supports drain qp by device */ @@ -2932,9 +3227,17 @@ static void qm_qp_event_notifier(struct hisi_qp *qp) wake_up_interruptible(&qp->uacce_q->wait); } + /* This function returns free number of qp in qm. */ static int hisi_qm_get_available_instances(struct uacce_device *uacce) { - return hisi_qm_get_free_qp_num(uacce->priv); + struct hisi_qm *qm = uacce->priv; + int ret; + + down_read(&qm->qps_lock); + ret = qm->qp_num - qm->qp_in_used; + up_read(&qm->qps_lock); + + return ret; } static void hisi_qm_set_hw_reset(struct hisi_qm *qm, int offset) @@ -2971,7 +3274,6 @@ static void hisi_qm_uacce_put_queue(struct uacce_queue *q) { struct hisi_qp *qp = q->priv; - hisi_qm_cache_wb(qp->qm); hisi_qm_release_qp(qp); } @@ -3118,7 +3420,7 @@ static int qm_alloc_uacce(struct hisi_qm *qm) }; int ret; - ret = strscpy(interface.name, pdev->driver->name, + ret = strscpy(interface.name, dev_driver_string(&pdev->dev), sizeof(interface.name)); if (ret < 0) return -ENAMETOOLONG; @@ -3246,24 +3548,6 @@ void hisi_qm_wait_task_finish(struct hisi_qm *qm, struct hisi_qm_list *qm_list) } EXPORT_SYMBOL_GPL(hisi_qm_wait_task_finish); -/** - * hisi_qm_get_free_qp_num() - Get free number of qp in qm. - * @qm: The qm which want to get free qp. - * - * This function return free number of qp in qm. - */ -int hisi_qm_get_free_qp_num(struct hisi_qm *qm) -{ - int ret; - - down_read(&qm->qps_lock); - ret = qm->qp_num - qm->qp_in_used; - up_read(&qm->qps_lock); - - return ret; -} -EXPORT_SYMBOL_GPL(hisi_qm_get_free_qp_num); - static void hisi_qp_memory_uninit(struct hisi_qm *qm, int num) { struct device *dev = &qm->pdev->dev; @@ -3308,6 +3592,17 @@ static void hisi_qm_set_state(struct hisi_qm *qm, enum vf_state state) writel(state, qm->io_base + QM_VF_STATE); } +static void qm_last_regs_uninit(struct hisi_qm *qm) +{ + struct qm_debug *debug = &qm->debug; + + if (qm->fun_type == QM_HW_VF || !debug->qm_last_words) + return; + + kfree(debug->qm_last_words); + debug->qm_last_words = NULL; +} + static void hisi_qm_pre_init(struct hisi_qm *qm) { struct pci_dev *pdev = qm->pdev; @@ -3389,6 +3684,8 @@ void hisi_qm_uninit(struct hisi_qm *qm) struct pci_dev *pdev = qm->pdev; struct device *dev = &pdev->dev; + qm_last_regs_uninit(qm); + qm_cmd_uninit(qm); kfree(qm->factor); down_write(&qm->qps_lock); @@ -3406,8 +3703,8 @@ void hisi_qm_uninit(struct hisi_qm *qm) dma_free_coherent(dev, qm->qdma.size, qm->qdma.va, qm->qdma.dma); } - hisi_qm_set_state(qm, VF_NOT_READY); + up_write(&qm->qps_lock); qm_irq_unregister(qm); hisi_qm_pci_uninit(qm); @@ -3415,8 +3712,6 @@ void hisi_qm_uninit(struct hisi_qm *qm) uacce_remove(qm->uacce); qm->uacce = NULL; } - - up_write(&qm->qps_lock); } EXPORT_SYMBOL_GPL(hisi_qm_uninit); @@ -3427,12 +3722,11 @@ EXPORT_SYMBOL_GPL(hisi_qm_uninit); * @number: The number of queues in vft. * * We can allocate multiple queues to a qm by configuring virtual function - * table. We get related configures by this function. Normally, we call this * function in VF driver to get the queue information. * * qm hw v1 does not support this interface. */ -int hisi_qm_get_vft(struct hisi_qm *qm, u32 *base, u32 *number) +static int hisi_qm_get_vft(struct hisi_qm *qm, u32 *base, u32 *number) { if (!base || !number) return -EINVAL; @@ -3444,7 +3738,6 @@ int hisi_qm_get_vft(struct hisi_qm *qm, u32 *base, u32 *number) return qm->ops->get_vft(qm, base, number); } -EXPORT_SYMBOL_GPL(hisi_qm_get_vft); /** * hisi_qm_set_vft() - Set vft to a qm. @@ -3482,6 +3775,22 @@ static void qm_init_eq_aeq_status(struct hisi_qm *qm) status->aeqc_phase = true; } +static void qm_enable_eq_aeq_interrupts(struct hisi_qm *qm) +{ + /* Clear eq/aeq interrupt source */ + qm_db(qm, 0, QM_DOORBELL_CMD_AEQ, qm->status.aeq_head, 0); + qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0); + + writel(0x0, qm->io_base + QM_VF_EQ_INT_MASK); + writel(0x0, qm->io_base + QM_VF_AEQ_INT_MASK); +} + +static void qm_disable_eq_aeq_interrupts(struct hisi_qm *qm) +{ + writel(0x1, qm->io_base + QM_VF_EQ_INT_MASK); + writel(0x1, qm->io_base + QM_VF_AEQ_INT_MASK); +} + static int qm_eq_ctx_cfg(struct hisi_qm *qm) { struct device *dev = &qm->pdev->dev; @@ -3506,7 +3815,7 @@ static int qm_eq_ctx_cfg(struct hisi_qm *qm) return -ENOMEM; } - ret = qm_mb(qm, QM_MB_CMD_EQC, eqc_dma, 0, 0); + ret = hisi_qm_mb(qm, QM_MB_CMD_EQC, eqc_dma, 0, 0); dma_unmap_single(dev, eqc_dma, sizeof(struct qm_eqc), DMA_TO_DEVICE); kfree(eqc); @@ -3535,7 +3844,7 @@ static int qm_aeq_ctx_cfg(struct hisi_qm *qm) return -ENOMEM; } - ret = qm_mb(qm, QM_MB_CMD_AEQC, aeqc_dma, 0, 0); + ret = hisi_qm_mb(qm, QM_MB_CMD_AEQC, aeqc_dma, 0, 0); dma_unmap_single(dev, aeqc_dma, sizeof(struct qm_aeqc), DMA_TO_DEVICE); kfree(aeqc); @@ -3565,10 +3874,6 @@ static int __hisi_qm_start(struct hisi_qm *qm) WARN_ON(!qm->qdma.va); if (qm->fun_type == QM_HW_PF) { - ret = qm_dev_mem_reset(qm); - if (ret) - return ret; - ret = hisi_qm_set_vft(qm, 0, qm->qp_base, qm->qp_num); if (ret) return ret; @@ -3578,18 +3883,16 @@ static int __hisi_qm_start(struct hisi_qm *qm) if (ret) return ret; - ret = qm_mb(qm, QM_MB_CMD_SQC_BT, qm->sqc_dma, 0, 0); + ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_BT, qm->sqc_dma, 0, 0); if (ret) return ret; - ret = qm_mb(qm, QM_MB_CMD_CQC_BT, qm->cqc_dma, 0, 0); + ret = hisi_qm_mb(qm, QM_MB_CMD_CQC_BT, qm->cqc_dma, 0, 0); if (ret) return ret; qm_init_prefetch(qm); - - writel(0x0, qm->io_base + QM_VF_EQ_INT_MASK); - writel(0x0, qm->io_base + QM_VF_AEQ_INT_MASK); + qm_enable_eq_aeq_interrupts(qm); return 0; } @@ -3699,7 +4002,7 @@ static void qm_clear_queues(struct hisi_qm *qm) for (i = 0; i < qm->qp_num; i++) { qp = &qm->qp_array[i]; - if (qp->is_resetting) + if (qp->is_in_kernel && qp->is_resetting) memset(qp->qdma.va, 0, qp->qdma.size); } @@ -3741,10 +4044,7 @@ int hisi_qm_stop(struct hisi_qm *qm, enum qm_stop_reason r) hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET); } - /* Mask eq and aeq irq */ - writel(0x1, qm->io_base + QM_VF_EQ_INT_MASK); - writel(0x1, qm->io_base + QM_VF_AEQ_INT_MASK); - + qm_disable_eq_aeq_interrupts(qm); if (qm->fun_type == QM_HW_PF) { ret = hisi_qm_set_vft(qm, 0, 0, 0); if (ret < 0) { @@ -4159,7 +4459,7 @@ static void qm_vf_get_qos(struct hisi_qm *qm, u32 fun_num) static int qm_vf_read_qos(struct hisi_qm *qm) { int cnt = 0; - int ret; + int ret = -EINVAL; /* reset mailbox qos val */ qm->mb_qos = 0; @@ -4244,66 +4544,69 @@ static ssize_t qm_qos_value_init(const char *buf, unsigned long *val) return 0; } +static ssize_t qm_get_qos_value(struct hisi_qm *qm, const char *buf, + unsigned long *val, + unsigned int *fun_index) +{ + char tbuf_bdf[QM_DBG_READ_LEN] = {0}; + char val_buf[QM_QOS_VAL_MAX_LEN] = {0}; + u32 tmp1, device, function; + int ret, bus; + + ret = sscanf(buf, "%s %s", tbuf_bdf, val_buf); + if (ret != QM_QOS_PARAM_NUM) + return -EINVAL; + + ret = qm_qos_value_init(val_buf, val); + if (ret || *val == 0 || *val > QM_QOS_MAX_VAL) { + pci_err(qm->pdev, "input qos value is error, please set 1~1000!\n"); + return -EINVAL; + } + + ret = sscanf(tbuf_bdf, "%u:%x:%u.%u", &tmp1, &bus, &device, &function); + if (ret != QM_QOS_BDF_PARAM_NUM) { + pci_err(qm->pdev, "input pci bdf value is error!\n"); + return -EINVAL; + } + + *fun_index = PCI_DEVFN(device, function); + + return 0; +} + static ssize_t qm_algqos_write(struct file *filp, const char __user *buf, size_t count, loff_t *pos) { struct hisi_qm *qm = filp->private_data; char tbuf[QM_DBG_READ_LEN]; - int tmp1, bus, device, function; - char tbuf_bdf[QM_DBG_READ_LEN] = {0}; - char val_buf[QM_QOS_VAL_MAX_LEN] = {0}; unsigned int fun_index; - unsigned long val = 0; + unsigned long val; int len, ret; if (qm->fun_type == QM_HW_VF) return -EINVAL; - /* Mailbox and reset cannot be operated at the same time */ - if (test_and_set_bit(QM_RESETTING, &qm->misc_ctl)) { - pci_err(qm->pdev, "dev resetting, write alg qos failed!\n"); - return -EAGAIN; - } - - if (*pos != 0) { - ret = 0; - goto err_get_status; - } + if (*pos != 0) + return 0; - if (count >= QM_DBG_READ_LEN) { - ret = -ENOSPC; - goto err_get_status; - } + if (count >= QM_DBG_READ_LEN) + return -ENOSPC; len = simple_write_to_buffer(tbuf, QM_DBG_READ_LEN - 1, pos, buf, count); - if (len < 0) { - ret = len; - goto err_get_status; - } + if (len < 0) + return len; tbuf[len] = '\0'; - ret = sscanf(tbuf, "%s %s", tbuf_bdf, val_buf); - if (ret != QM_QOS_PARAM_NUM) { - ret = -EINVAL; - goto err_get_status; - } - - ret = qm_qos_value_init(val_buf, &val); - if (val == 0 || val > QM_QOS_MAX_VAL || ret) { - pci_err(qm->pdev, "input qos value is error, please set 1~1000!\n"); - ret = -EINVAL; - goto err_get_status; - } + ret = qm_get_qos_value(qm, tbuf, &val, &fun_index); + if (ret) + return ret; - ret = sscanf(tbuf_bdf, "%d:%x:%d.%d", &tmp1, &bus, &device, &function); - if (ret != QM_QOS_BDF_PARAM_NUM) { - pci_err(qm->pdev, "input pci bdf value is error!\n"); - ret = -EINVAL; - goto err_get_status; + /* Mailbox and reset cannot be operated at the same time */ + if (test_and_set_bit(QM_RESETTING, &qm->misc_ctl)) { + pci_err(qm->pdev, "dev resetting, write alg qos failed!\n"); + return -EAGAIN; } - fun_index = device * 8 + function; - ret = qm_pm_get_sync(qm); if (ret) { ret = -EINVAL; @@ -4317,6 +4620,8 @@ static ssize_t qm_algqos_write(struct file *filp, const char __user *buf, goto err_put_sync; } + pci_info(qm->pdev, "the qos value of function%u is set to %lu.\n", + fun_index, val); ret = count; err_put_sync: @@ -4357,6 +4662,7 @@ static void hisi_qm_set_algqos_init(struct hisi_qm *qm) */ void hisi_qm_debug_init(struct hisi_qm *qm) { + struct dfx_diff_registers *qm_regs = qm->debug.qm_diff_regs; struct qm_dfx *dfx = &qm->debug.dfx; struct dentry *qm_d; void *data; @@ -4372,6 +4678,10 @@ void hisi_qm_debug_init(struct hisi_qm *qm) qm_create_debugfs_file(qm, qm->debug.qm_d, i); } + if (qm_regs) + debugfs_create_file("diff_regs", 0444, qm->debug.qm_d, + qm, &qm_diff_regs_fops); + debugfs_create_file("regs", 0444, qm->debug.qm_d, qm, &qm_regs_fops); debugfs_create_file("cmd", 0600, qm->debug.qm_d, qm, &qm_cmd_fops); @@ -4741,46 +5051,6 @@ static int qm_try_stop_vfs(struct hisi_qm *qm, u64 cmd, return ret; } -static int qm_wait_reset_finish(struct hisi_qm *qm) -{ - int delay = 0; - - /* All reset requests need to be queued for processing */ - while (test_and_set_bit(QM_RESETTING, &qm->misc_ctl)) { - msleep(++delay); - if (delay > QM_RESET_WAIT_TIMEOUT) - return -EBUSY; - } - - return 0; -} - -static int qm_reset_prepare_ready(struct hisi_qm *qm) -{ - struct pci_dev *pdev = qm->pdev; - struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev)); - - /* - * PF and VF on host doesnot support resetting at the - * same time on Kunpeng920. - */ - if (qm->ver < QM_HW_V3) - return qm_wait_reset_finish(pf_qm); - - return qm_wait_reset_finish(qm); -} - -static void qm_reset_bit_clear(struct hisi_qm *qm) -{ - struct pci_dev *pdev = qm->pdev; - struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev)); - - if (qm->ver < QM_HW_V3) - clear_bit(QM_RESETTING, &pf_qm->misc_ctl); - - clear_bit(QM_RESETTING, &qm->misc_ctl); -} - static int qm_controller_reset_prepare(struct hisi_qm *qm) { struct pci_dev *pdev = qm->pdev; @@ -5066,6 +5336,12 @@ static int qm_controller_reset_done(struct hisi_qm *qm) if (qm->err_ini->open_axi_master_ooo) qm->err_ini->open_axi_master_ooo(qm); + ret = qm_dev_mem_reset(qm); + if (ret) { + pci_err(pdev, "failed to reset device memory\n"); + return ret; + } + ret = qm_restart(qm); if (ret) { pci_err(pdev, "Failed to start QM!\n"); @@ -5088,6 +5364,24 @@ static int qm_controller_reset_done(struct hisi_qm *qm) return 0; } +static void qm_show_last_dfx_regs(struct hisi_qm *qm) +{ + struct qm_debug *debug = &qm->debug; + struct pci_dev *pdev = qm->pdev; + u32 val; + int i; + + if (qm->fun_type == QM_HW_VF || !debug->qm_last_words) + return; + + for (i = 0; i < ARRAY_SIZE(qm_dfx_regs); i++) { + val = readl_relaxed(qm->io_base + qm_dfx_regs[i].offset); + if (debug->qm_last_words[i] != val) + pci_info(pdev, "%s \t= 0x%08x => 0x%08x\n", + qm_dfx_regs[i].name, debug->qm_last_words[i], val); + } +} + static int qm_controller_reset(struct hisi_qm *qm) { struct pci_dev *pdev = qm->pdev; @@ -5103,6 +5397,10 @@ static int qm_controller_reset(struct hisi_qm *qm) return ret; } + qm_show_last_dfx_regs(qm); + if (qm->err_ini->show_last_dfx_regs) + qm->err_ini->show_last_dfx_regs(qm); + ret = qm_soft_reset(qm); if (ret) { pci_err(pdev, "Controller reset failed (%d)\n", ret); @@ -5280,8 +5578,10 @@ static int qm_irq_register(struct hisi_qm *qm) return ret; if (qm->ver > QM_HW_V1) { - ret = request_irq(pci_irq_vector(pdev, QM_AEQ_EVENT_IRQ_VECTOR), - qm_aeq_irq, 0, qm->dev_name, qm); + ret = request_threaded_irq(pci_irq_vector(pdev, + QM_AEQ_EVENT_IRQ_VECTOR), + qm_aeq_irq, qm_aeq_thread, + 0, qm->dev_name, qm); if (ret) goto err_aeq_irq; @@ -5370,11 +5670,14 @@ static void qm_pf_reset_vf_prepare(struct hisi_qm *qm, atomic_set(&qm->status.flags, QM_STOP); cmd = QM_VF_PREPARE_FAIL; goto err_prepare; + } else { + goto out; } err_prepare: hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET); hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET); +out: pci_save_state(pdev); ret = qm->ops->ping_pf(qm, cmd); if (ret) @@ -5762,13 +6065,15 @@ static int hisi_qp_alloc_memory(struct hisi_qm *qm) static int hisi_qm_memory_init(struct hisi_qm *qm) { struct device *dev = &qm->pdev->dev; - int ret, total_vfs; + int ret, total_func, i; size_t off = 0; - total_vfs = pci_sriov_get_totalvfs(qm->pdev); - qm->factor = kcalloc(total_vfs + 1, sizeof(struct qm_shaper_factor), GFP_KERNEL); + total_func = pci_sriov_get_totalvfs(qm->pdev) + 1; + qm->factor = kcalloc(total_func, sizeof(struct qm_shaper_factor), GFP_KERNEL); if (!qm->factor) return -ENOMEM; + for (i = 0; i < total_func; i++) + qm->factor[i].func_qos = QM_QOS_MAX_VAL; #define QM_INIT_BUF(qm, type, num) do { \ (qm)->type = ((qm)->qdma.va + (off)); \ @@ -5808,6 +6113,26 @@ static int hisi_qm_memory_init(struct hisi_qm *qm) return ret; } +static void qm_last_regs_init(struct hisi_qm *qm) +{ + int dfx_regs_num = ARRAY_SIZE(qm_dfx_regs); + struct qm_debug *debug = &qm->debug; + int i; + + if (qm->fun_type == QM_HW_VF) + return; + + debug->qm_last_words = kcalloc(dfx_regs_num, sizeof(unsigned int), + GFP_KERNEL); + if (!debug->qm_last_words) + return; + + for (i = 0; i < dfx_regs_num; i++) { + debug->qm_last_words[i] = readl_relaxed(qm->io_base + + qm_dfx_regs[i].offset); + } +} + /** * hisi_qm_init() - Initialize configures about qm. * @qm: The qm needing init. @@ -5837,6 +6162,15 @@ int hisi_qm_init(struct hisi_qm *qm) goto err_irq_register; } + if (qm->fun_type == QM_HW_PF) { + qm_disable_clock_gate(qm); + ret = qm_dev_mem_reset(qm); + if (ret) { + dev_err(dev, "failed to reset device memory\n"); + goto err_irq_register; + } + } + if (qm->mode == UACCE_MODE_SVA) { ret = qm_alloc_uacce(qm); if (ret < 0) @@ -5851,6 +6185,8 @@ int hisi_qm_init(struct hisi_qm *qm) qm_cmd_init(qm); atomic_set(&qm->status.flags, QM_INIT); + qm_last_regs_init(qm); + return 0; err_alloc_uacce: @@ -5994,8 +6330,12 @@ static int qm_rebuild_for_resume(struct hisi_qm *qm) qm_cmd_init(qm); hisi_qm_dev_err_init(qm); + qm_disable_clock_gate(qm); + ret = qm_dev_mem_reset(qm); + if (ret) + pci_err(pdev, "failed to reset device memory\n"); - return 0; + return ret; } /** @@ -6050,7 +6390,7 @@ int hisi_qm_resume(struct device *dev) if (ret) pci_err(pdev, "failed to start qm(%d)\n", ret); - return 0; + return ret; } EXPORT_SYMBOL_GPL(hisi_qm_resume); diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h index d97cf02b1df7509ebf77d9a888569b17de57ed8e..a44c8dba3cda65e2cc59092a3f1b40acf3153984 100644 --- a/drivers/crypto/hisilicon/sec2/sec.h +++ b/drivers/crypto/hisilicon/sec2/sec.h @@ -4,7 +4,7 @@ #ifndef __HISI_SEC_V2_H #define __HISI_SEC_V2_H -#include "../qm.h" +#include #include "sec_crypto.h" /* Algorithm resource per hardware SEC queue */ @@ -119,7 +119,7 @@ struct sec_qp_ctx { struct idr req_idr; struct sec_alg_res res[QM_Q_DEPTH]; struct sec_ctx *ctx; - struct mutex req_lock; + spinlock_t req_lock; struct list_head backlog; struct hisi_acc_sgl_pool *c_in_pool; struct hisi_acc_sgl_pool *c_out_pool; diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c index f77be0e6cf6530131e393cdb9a21b309fc8f7065..71dfa7db639478ab8850245e6dc72ebd7736c6c9 100644 --- a/drivers/crypto/hisilicon/sec2/sec_crypto.c +++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c @@ -127,11 +127,11 @@ static int sec_alloc_req_id(struct sec_req *req, struct sec_qp_ctx *qp_ctx) { int req_id; - mutex_lock(&qp_ctx->req_lock); + spin_lock_bh(&qp_ctx->req_lock); req_id = idr_alloc_cyclic(&qp_ctx->req_idr, NULL, 0, QM_Q_DEPTH, GFP_ATOMIC); - mutex_unlock(&qp_ctx->req_lock); + spin_unlock_bh(&qp_ctx->req_lock); if (unlikely(req_id < 0)) { dev_err(req->ctx->dev, "alloc req id fail!\n"); return req_id; @@ -156,9 +156,9 @@ static void sec_free_req_id(struct sec_req *req) qp_ctx->req_list[req_id] = NULL; req->qp_ctx = NULL; - mutex_lock(&qp_ctx->req_lock); + spin_lock_bh(&qp_ctx->req_lock); idr_remove(&qp_ctx->req_idr, req_id); - mutex_unlock(&qp_ctx->req_lock); + spin_unlock_bh(&qp_ctx->req_lock); } static u8 pre_parse_finished_bd(struct bd_status *status, void *resp) @@ -240,7 +240,7 @@ static void sec_req_cb(struct hisi_qp *qp, void *resp) if (unlikely(type != type_supported)) { atomic64_inc(&dfx->err_bd_cnt); - pr_err("err bd type [%d]\n", type); + pr_err("err bd type [%u]\n", type); return; } @@ -273,7 +273,7 @@ static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req) !(req->flag & CRYPTO_TFM_REQ_MAY_BACKLOG)) return -EBUSY; - mutex_lock(&qp_ctx->req_lock); + spin_lock_bh(&qp_ctx->req_lock); ret = hisi_qp_send(qp_ctx->qp, &req->sec_sqe); if (ctx->fake_req_limit <= @@ -281,10 +281,10 @@ static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req) list_add_tail(&req->backlog_head, &qp_ctx->backlog); atomic64_inc(&ctx->sec->debug.dfx.send_cnt); atomic64_inc(&ctx->sec->debug.dfx.send_busy_cnt); - mutex_unlock(&qp_ctx->req_lock); + spin_unlock_bh(&qp_ctx->req_lock); return -EBUSY; } - mutex_unlock(&qp_ctx->req_lock); + spin_unlock_bh(&qp_ctx->req_lock); if (unlikely(ret == -EBUSY)) return -ENOBUFS; @@ -487,7 +487,7 @@ static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx, qp->req_cb = sec_req_cb; - mutex_init(&qp_ctx->req_lock); + spin_lock_init(&qp_ctx->req_lock); idr_init(&qp_ctx->req_idr); INIT_LIST_HEAD(&qp_ctx->backlog); @@ -644,13 +644,15 @@ static int sec_skcipher_fbtfm_init(struct crypto_skcipher *tfm) struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; c_ctx->fallback = false; + + /* Currently, only XTS mode need fallback tfm when using 192bit key */ if (likely(strncmp(alg, "xts", SEC_XTS_NAME_SZ))) return 0; c_ctx->fbtfm = crypto_alloc_sync_skcipher(alg, 0, CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(c_ctx->fbtfm)) { - pr_err("failed to alloc fallback tfm!\n"); + pr_err("failed to alloc xts mode fallback tfm!\n"); return PTR_ERR(c_ctx->fbtfm); } @@ -811,7 +813,7 @@ static int sec_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key, } memcpy(c_ctx->c_key, key, keylen); - if (c_ctx->fallback) { + if (c_ctx->fallback && c_ctx->fbtfm) { ret = crypto_sync_skcipher_setkey(c_ctx->fbtfm, key, keylen); if (ret) { dev_err(dev, "failed to set fallback skcipher key!\n"); @@ -1380,7 +1382,7 @@ static struct sec_req *sec_back_req_clear(struct sec_ctx *ctx, { struct sec_req *backlog_req = NULL; - mutex_lock(&qp_ctx->req_lock); + spin_lock_bh(&qp_ctx->req_lock); if (ctx->fake_req_limit >= atomic_read(&qp_ctx->qp->qp_status.used) && !list_empty(&qp_ctx->backlog)) { @@ -1388,7 +1390,7 @@ static struct sec_req *sec_back_req_clear(struct sec_ctx *ctx, typeof(*backlog_req), backlog_head); list_del(&backlog_req->backlog_head); } - mutex_unlock(&qp_ctx->req_lock); + spin_unlock_bh(&qp_ctx->req_lock); return backlog_req; } @@ -2039,13 +2041,12 @@ static int sec_skcipher_soft_crypto(struct sec_ctx *ctx, struct skcipher_request *sreq, bool encrypt) { struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; + SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, c_ctx->fbtfm); struct device *dev = ctx->dev; int ret; - SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, c_ctx->fbtfm); - if (!c_ctx->fbtfm) { - dev_err(dev, "failed to check fallback tfm\n"); + dev_err_ratelimited(dev, "the soft tfm isn't supported in the current system.\n"); return -EINVAL; } @@ -2112,7 +2113,6 @@ static int sec_skcipher_decrypt(struct skcipher_request *sk_req) .cra_driver_name = "hisi_sec_"sec_cra_name,\ .cra_priority = SEC_PRIORITY,\ .cra_flags = CRYPTO_ALG_ASYNC |\ - CRYPTO_ALG_ALLOCATES_MEMORY |\ CRYPTO_ALG_NEED_FALLBACK,\ .cra_blocksize = blk_size,\ .cra_ctxsize = sizeof(struct sec_ctx),\ @@ -2267,7 +2267,6 @@ static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq) if (ctx->sec->qm.ver == QM_HW_V2) { if (unlikely(!req->cryptlen || (!sreq->c_req.encrypt && req->cryptlen <= authsize))) { - dev_err(dev, "Kunpeng920 not support 0 length!\n"); ctx->a_ctx.fallback = true; return -EINVAL; } @@ -2295,9 +2294,10 @@ static int sec_aead_soft_crypto(struct sec_ctx *ctx, struct aead_request *aead_req, bool encrypt) { - struct aead_request *subreq = aead_request_ctx(aead_req); struct sec_auth_ctx *a_ctx = &ctx->a_ctx; struct device *dev = ctx->dev; + struct aead_request *subreq; + int ret; /* Kunpeng920 aead mode not support input 0 size */ if (!a_ctx->fallback_aead_tfm) { @@ -2305,6 +2305,10 @@ static int sec_aead_soft_crypto(struct sec_ctx *ctx, return -EINVAL; } + subreq = aead_request_alloc(a_ctx->fallback_aead_tfm, GFP_KERNEL); + if (!subreq) + return -ENOMEM; + aead_request_set_tfm(subreq, a_ctx->fallback_aead_tfm); aead_request_set_callback(subreq, aead_req->base.flags, aead_req->base.complete, aead_req->base.data); @@ -2312,8 +2316,13 @@ static int sec_aead_soft_crypto(struct sec_ctx *ctx, aead_req->cryptlen, aead_req->iv); aead_request_set_ad(subreq, aead_req->assoclen); - return encrypt ? crypto_aead_encrypt(subreq) : - crypto_aead_decrypt(subreq); + if (encrypt) + ret = crypto_aead_encrypt(subreq); + else + ret = crypto_aead_decrypt(subreq); + aead_request_free(subreq); + + return ret; } static int sec_aead_crypto(struct aead_request *a_req, bool encrypt) @@ -2356,7 +2365,6 @@ static int sec_aead_decrypt(struct aead_request *a_req) .cra_driver_name = "hisi_sec_"sec_cra_name,\ .cra_priority = SEC_PRIORITY,\ .cra_flags = CRYPTO_ALG_ASYNC |\ - CRYPTO_ALG_ALLOCATES_MEMORY |\ CRYPTO_ALG_NEED_FALLBACK,\ .cra_blocksize = blk_size,\ .cra_ctxsize = sizeof(struct sec_ctx),\ diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c index 90551bf38b523a3e3f67974c93a4a855e73de7d5..eb6474d3f2cc8eb403df1b6058667241c1e0d32d 100644 --- a/drivers/crypto/hisilicon/sec2/sec_main.c +++ b/drivers/crypto/hisilicon/sec2/sec_main.c @@ -20,8 +20,7 @@ #define SEC_VF_NUM 63 #define SEC_QUEUE_NUM_V1 4096 -#define SEC_PF_PCI_DEVICE_ID 0xa255 -#define SEC_VF_PCI_DEVICE_ID 0xa256 +#define PCI_DEVICE_ID_HUAWEI_SEC_PF 0xa255 #define SEC_BD_ERR_CHK_EN0 0xEFFFFFFF #define SEC_BD_ERR_CHK_EN1 0x7ffff7fd @@ -90,6 +89,10 @@ SEC_USER1_WB_DATA_SSV) #define SEC_USER1_SMMU_SVA (SEC_USER1_SMMU_NORMAL | SEC_USER1_SVA_SET) #define SEC_USER1_SMMU_MASK (~SEC_USER1_SVA_SET) +#define SEC_INTERFACE_USER_CTRL0_REG_V3 0x302220 +#define SEC_INTERFACE_USER_CTRL1_REG_V3 0x302224 +#define SEC_USER1_SMMU_NORMAL_V3 (BIT(23) | BIT(17) | BIT(11) | BIT(5)) +#define SEC_USER1_SMMU_MASK_V3 0xFF79E79E #define SEC_CORE_INT_STATUS_M_ECC BIT(2) #define SEC_PREFETCH_CFG 0x301130 @@ -105,7 +108,16 @@ #define SEC_SQE_MASK_OFFSET 64 #define SEC_SQE_MASK_LEN 48 -#define SEC_SHAPER_TYPE_RATE 128 +#define SEC_SHAPER_TYPE_RATE 400 + +#define SEC_DFX_BASE 0x301000 +#define SEC_DFX_CORE 0x302100 +#define SEC_DFX_COMMON1 0x301600 +#define SEC_DFX_COMMON2 0x301C00 +#define SEC_DFX_BASE_LEN 0x9D +#define SEC_DFX_CORE_LEN 0x32B +#define SEC_DFX_COMMON1_LEN 0x45 +#define SEC_DFX_COMMON2_LEN 0xBA struct sec_hw_error { u32 int_msk; @@ -223,9 +235,37 @@ static const struct debugfs_reg32 sec_dfx_regs[] = { {"SEC_BD_SAA8 ", 0x301C40}, }; +/* define the SEC's dfx regs region and region length */ +static struct dfx_diff_registers sec_diff_regs[] = { + { + .reg_offset = SEC_DFX_BASE, + .reg_len = SEC_DFX_BASE_LEN, + }, { + .reg_offset = SEC_DFX_COMMON1, + .reg_len = SEC_DFX_COMMON1_LEN, + }, { + .reg_offset = SEC_DFX_COMMON2, + .reg_len = SEC_DFX_COMMON2_LEN, + }, { + .reg_offset = SEC_DFX_CORE, + .reg_len = SEC_DFX_CORE_LEN, + }, +}; + +static int sec_diff_regs_show(struct seq_file *s, void *unused) +{ + struct hisi_qm *qm = s->private; + + hisi_qm_acc_diff_regs_dump(qm, s, qm->debug.acc_diff_regs, + ARRAY_SIZE(sec_diff_regs)); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(sec_diff_regs); + static int sec_pf_q_num_set(const char *val, const struct kernel_param *kp) { - return q_num_set(val, kp, SEC_PF_PCI_DEVICE_ID); + return q_num_set(val, kp, PCI_DEVICE_ID_HUAWEI_SEC_PF); } static const struct kernel_param_ops sec_pf_q_num_ops = { @@ -313,8 +353,8 @@ module_param_cb(uacce_mode, &sec_uacce_mode_ops, &uacce_mode, 0444); MODULE_PARM_DESC(uacce_mode, UACCE_MODE_DESC); static const struct pci_device_id sec_dev_ids[] = { - { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, SEC_PF_PCI_DEVICE_ID) }, - { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, SEC_VF_PCI_DEVICE_ID) }, + { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HUAWEI_SEC_PF) }, + { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HUAWEI_SEC_VF) }, { 0, } }; MODULE_DEVICE_TABLE(pci, sec_dev_ids); @@ -335,6 +375,41 @@ static void sec_set_endian(struct hisi_qm *qm) writel_relaxed(reg, qm->io_base + SEC_CONTROL_REG); } +static void sec_engine_sva_config(struct hisi_qm *qm) +{ + u32 reg; + + if (qm->ver > QM_HW_V2) { + reg = readl_relaxed(qm->io_base + + SEC_INTERFACE_USER_CTRL0_REG_V3); + reg |= SEC_USER0_SMMU_NORMAL; + writel_relaxed(reg, qm->io_base + + SEC_INTERFACE_USER_CTRL0_REG_V3); + + reg = readl_relaxed(qm->io_base + + SEC_INTERFACE_USER_CTRL1_REG_V3); + reg &= SEC_USER1_SMMU_MASK_V3; + reg |= SEC_USER1_SMMU_NORMAL_V3; + writel_relaxed(reg, qm->io_base + + SEC_INTERFACE_USER_CTRL1_REG_V3); + } else { + reg = readl_relaxed(qm->io_base + + SEC_INTERFACE_USER_CTRL0_REG); + reg |= SEC_USER0_SMMU_NORMAL; + writel_relaxed(reg, qm->io_base + + SEC_INTERFACE_USER_CTRL0_REG); + reg = readl_relaxed(qm->io_base + + SEC_INTERFACE_USER_CTRL1_REG); + reg &= SEC_USER1_SMMU_MASK; + if (qm->use_sva) + reg |= SEC_USER1_SMMU_SVA; + else + reg |= SEC_USER1_SMMU_NORMAL; + writel_relaxed(reg, qm->io_base + + SEC_INTERFACE_USER_CTRL1_REG); + } +} + static void sec_open_sva_prefetch(struct hisi_qm *qm) { u32 val; @@ -426,26 +501,18 @@ static int sec_engine_init(struct hisi_qm *qm) reg |= (0x1 << SEC_TRNG_EN_SHIFT); writel_relaxed(reg, qm->io_base + SEC_CONTROL_REG); - reg = readl_relaxed(qm->io_base + SEC_INTERFACE_USER_CTRL0_REG); - reg |= SEC_USER0_SMMU_NORMAL; - writel_relaxed(reg, qm->io_base + SEC_INTERFACE_USER_CTRL0_REG); - - reg = readl_relaxed(qm->io_base + SEC_INTERFACE_USER_CTRL1_REG); - reg &= SEC_USER1_SMMU_MASK; - if (qm->use_sva && qm->ver == QM_HW_V2) - reg |= SEC_USER1_SMMU_SVA; - else - reg |= SEC_USER1_SMMU_NORMAL; - writel_relaxed(reg, qm->io_base + SEC_INTERFACE_USER_CTRL1_REG); + sec_engine_sva_config(qm); writel(SEC_SINGLE_PORT_MAX_TRANS, qm->io_base + AM_CFG_SINGLE_PORT_MAX_TRANS); writel(SEC_SAA_ENABLE, qm->io_base + SEC_SAA_EN_REG); - /* Enable sm4 extra mode, as ctr/ecb */ - writel_relaxed(SEC_BD_ERR_CHK_EN0, - qm->io_base + SEC_BD_ERR_CHK_EN_REG0); + /* HW V2 enable sm4 extra mode, as ctr/ecb */ + if (qm->ver < QM_HW_V3) + writel_relaxed(SEC_BD_ERR_CHK_EN0, + qm->io_base + SEC_BD_ERR_CHK_EN_REG0); + /* Enable sm4 xts mode multiple iv */ writel_relaxed(SEC_BD_ERR_CHK_EN1, qm->io_base + SEC_BD_ERR_CHK_EN_REG1); @@ -699,6 +766,7 @@ DEFINE_SHOW_ATTRIBUTE(sec_regs); static int sec_core_debug_init(struct hisi_qm *qm) { + struct dfx_diff_registers *sec_regs = qm->debug.acc_diff_regs; struct sec_dev *sec = container_of(qm, struct sec_dev, qm); struct device *dev = &qm->pdev->dev; struct sec_dfx *dfx = &sec->debug.dfx; @@ -717,8 +785,11 @@ static int sec_core_debug_init(struct hisi_qm *qm) regset->base = qm->io_base; regset->dev = dev; - if (qm->pdev->device == SEC_PF_PCI_DEVICE_ID) + if (qm->pdev->device == PCI_DEVICE_ID_HUAWEI_SEC_PF) debugfs_create_file("regs", 0444, tmp_d, regset, &sec_regs_fops); + if (qm->fun_type == QM_HW_PF && sec_regs) + debugfs_create_file("diff_regs", 0444, tmp_d, + qm, &sec_diff_regs_fops); for (i = 0; i < ARRAY_SIZE(sec_dfx_labels); i++) { atomic64_t *data = (atomic64_t *)((uintptr_t)dfx + @@ -735,7 +806,7 @@ static int sec_debug_init(struct hisi_qm *qm) struct sec_dev *sec = container_of(qm, struct sec_dev, qm); int i; - if (qm->pdev->device == SEC_PF_PCI_DEVICE_ID) { + if (qm->pdev->device == PCI_DEVICE_ID_HUAWEI_SEC_PF) { for (i = SEC_CLEAR_ENABLE; i < SEC_DEBUG_FILE_NUM; i++) { spin_lock_init(&sec->debug.files[i].lock); sec->debug.files[i].index = i; @@ -760,6 +831,14 @@ static int sec_debugfs_init(struct hisi_qm *qm) sec_debugfs_root); qm->debug.sqe_mask_offset = SEC_SQE_MASK_OFFSET; qm->debug.sqe_mask_len = SEC_SQE_MASK_LEN; + + ret = hisi_qm_diff_regs_init(qm, sec_diff_regs, + ARRAY_SIZE(sec_diff_regs)); + if (ret) { + dev_warn(dev, "Failed to init SEC diff regs!\n"); + goto debugfs_remove; + } + hisi_qm_debug_init(qm); ret = sec_debug_init(qm); @@ -769,15 +848,66 @@ static int sec_debugfs_init(struct hisi_qm *qm) return 0; failed_to_create: + hisi_qm_diff_regs_uninit(qm, ARRAY_SIZE(sec_diff_regs)); +debugfs_remove: debugfs_remove_recursive(sec_debugfs_root); return ret; } static void sec_debugfs_exit(struct hisi_qm *qm) { + hisi_qm_diff_regs_uninit(qm, ARRAY_SIZE(sec_diff_regs)); + debugfs_remove_recursive(qm->debug.debug_root); } +static int sec_show_last_regs_init(struct hisi_qm *qm) +{ + struct qm_debug *debug = &qm->debug; + int i; + + debug->last_words = kcalloc(ARRAY_SIZE(sec_dfx_regs), + sizeof(unsigned int), GFP_KERNEL); + if (!debug->last_words) + return -ENOMEM; + + for (i = 0; i < ARRAY_SIZE(sec_dfx_regs); i++) + debug->last_words[i] = readl_relaxed(qm->io_base + + sec_dfx_regs[i].offset); + + return 0; +} + +static void sec_show_last_regs_uninit(struct hisi_qm *qm) +{ + struct qm_debug *debug = &qm->debug; + + if (qm->fun_type == QM_HW_VF || !debug->last_words) + return; + + kfree(debug->last_words); + debug->last_words = NULL; +} + +static void sec_show_last_dfx_regs(struct hisi_qm *qm) +{ + struct qm_debug *debug = &qm->debug; + struct pci_dev *pdev = qm->pdev; + u32 val; + int i; + + if (qm->fun_type == QM_HW_VF || !debug->last_words) + return; + + /* dumps last word of the debugging registers during controller reset */ + for (i = 0; i < ARRAY_SIZE(sec_dfx_regs); i++) { + val = readl_relaxed(qm->io_base + sec_dfx_regs[i].offset); + if (val != debug->last_words[i]) + pci_info(pdev, "%s \t= 0x%08x => 0x%08x\n", + sec_dfx_regs[i].name, debug->last_words[i], val); + } +} + static void sec_log_hw_error(struct hisi_qm *qm, u32 err_sts) { const struct sec_hw_error *errs = sec_hw_errors; @@ -844,6 +974,7 @@ static const struct hisi_qm_err_ini sec_err_ini = { .open_axi_master_ooo = sec_open_axi_master_ooo, .open_sva_prefetch = sec_open_sva_prefetch, .close_sva_prefetch = sec_close_sva_prefetch, + .show_last_dfx_regs = sec_show_last_dfx_regs, .err_info_init = sec_err_info_init, }; @@ -862,8 +993,11 @@ static int sec_pf_probe_init(struct sec_dev *sec) sec_open_sva_prefetch(qm); hisi_qm_dev_err_init(qm); sec_debug_regs_clear(qm); + ret = sec_show_last_regs_init(qm); + if (ret) + pci_err(qm->pdev, "Failed to init last word regs!\n"); - return 0; + return ret; } static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) @@ -877,7 +1011,7 @@ static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) qm->sqe_size = SEC_SQE_SIZE; qm->dev_name = sec_name; - qm->fun_type = (pdev->device == SEC_PF_PCI_DEVICE_ID) ? + qm->fun_type = (pdev->device == PCI_DEVICE_ID_HUAWEI_SEC_PF) ? QM_HW_PF : QM_HW_VF; if (qm->fun_type == QM_HW_PF) { qm->qp_base = SEC_PF_DEF_Q_BASE; @@ -1037,6 +1171,7 @@ static int sec_probe(struct pci_dev *pdev, const struct pci_device_id *id) sec_debugfs_exit(qm); hisi_qm_stop(qm, QM_NORMAL); err_probe_uninit: + sec_show_last_regs_uninit(qm); sec_probe_uninit(qm); err_qm_uninit: sec_qm_uninit(qm); @@ -1061,6 +1196,7 @@ static void sec_remove(struct pci_dev *pdev) if (qm->fun_type == QM_HW_PF) sec_debug_regs_clear(qm); + sec_show_last_regs_uninit(qm); sec_probe_uninit(qm); diff --git a/drivers/crypto/hisilicon/sgl.c b/drivers/crypto/hisilicon/sgl.c index 057273769f264eebe026942718ac26de93bedf83..2b6f2281cfd6c02a3ea3e4369f62279b4fd96894 100644 --- a/drivers/crypto/hisilicon/sgl.c +++ b/drivers/crypto/hisilicon/sgl.c @@ -1,9 +1,10 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2019 HiSilicon Limited. */ +#include #include +#include #include #include -#include "qm.h" #define HISI_ACC_SGL_SGE_NR_MIN 1 #define HISI_ACC_SGL_NR_MAX 256 @@ -64,8 +65,9 @@ struct hisi_acc_sgl_pool *hisi_acc_create_sgl_pool(struct device *dev, if (!dev || !count || !sge_nr || sge_nr > HISI_ACC_SGL_SGE_NR_MAX) return ERR_PTR(-EINVAL); - sgl_size = sizeof(struct acc_hw_sge) * sge_nr + - sizeof(struct hisi_acc_hw_sgl); + sgl_size = ALIGN(sizeof(struct acc_hw_sge) * sge_nr + + sizeof(struct hisi_acc_hw_sgl), + HISI_ACC_SGL_ALIGN_SIZE); /* * the pool may allocate a block of memory of size PAGE_SIZE * 2^(MAX_ORDER - 1), diff --git a/drivers/crypto/hisilicon/zip/zip.h b/drivers/crypto/hisilicon/zip/zip.h index 517fdbdff3ea476c81c327b2295638b1b646b1d8..3dfd3bac5a33552a7fe5e85a290a085db1b3c027 100644 --- a/drivers/crypto/hisilicon/zip/zip.h +++ b/drivers/crypto/hisilicon/zip/zip.h @@ -7,7 +7,7 @@ #define pr_fmt(fmt) "hisi_zip: " fmt #include -#include "../qm.h" +#include enum hisi_zip_error_type { /* negative compression */ diff --git a/drivers/crypto/hisilicon/zip/zip_crypto.c b/drivers/crypto/hisilicon/zip/zip_crypto.c index 9520a4113c81e5fba635b54b48cc2cc0b87362cb..67869513e48c1a09579634ef3a8e1ce7e0c7123a 100644 --- a/drivers/crypto/hisilicon/zip/zip_crypto.c +++ b/drivers/crypto/hisilicon/zip/zip_crypto.c @@ -521,7 +521,7 @@ static int hisi_zip_start_qp(struct hisi_qp *qp, struct hisi_zip_qp_ctx *ctx, static void hisi_zip_release_qp(struct hisi_zip_qp_ctx *ctx) { hisi_qm_stop_qp(ctx->qp); - hisi_qm_release_qp(ctx->qp); + hisi_qm_free_qps(&ctx->qp, 1); } static const struct hisi_zip_sqe_ops hisi_zip_ops_v1 = { diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c index 9c4fb0af5ebd9b695eba0f75e71f7a7c0bec82b8..727030e1a57eaf7087d481b70e5ddbb990776606 100644 --- a/drivers/crypto/hisilicon/zip/zip_main.c +++ b/drivers/crypto/hisilicon/zip/zip_main.c @@ -15,8 +15,7 @@ #include #include "zip.h" -#define PCI_DEVICE_ID_ZIP_PF 0xa250 -#define PCI_DEVICE_ID_ZIP_VF 0xa251 +#define PCI_DEVICE_ID_HUAWEI_ZIP_PF 0xa250 #define HZIP_QUEUE_NUM_V1 4096 @@ -50,14 +49,18 @@ #define HZIP_QM_IDEL_STATUS 0x3040e4 -#define HZIP_CORE_DEBUG_COMP_0 0x302000 -#define HZIP_CORE_DEBUG_COMP_1 0x303000 -#define HZIP_CORE_DEBUG_DECOMP_0 0x304000 -#define HZIP_CORE_DEBUG_DECOMP_1 0x305000 -#define HZIP_CORE_DEBUG_DECOMP_2 0x306000 -#define HZIP_CORE_DEBUG_DECOMP_3 0x307000 -#define HZIP_CORE_DEBUG_DECOMP_4 0x308000 -#define HZIP_CORE_DEBUG_DECOMP_5 0x309000 +#define HZIP_CORE_DFX_BASE 0x301000 +#define HZIP_CLOCK_GATED_CONTL 0X301004 +#define HZIP_CORE_DFX_COMP_0 0x302000 +#define HZIP_CORE_DFX_COMP_1 0x303000 +#define HZIP_CORE_DFX_DECOMP_0 0x304000 +#define HZIP_CORE_DFX_DECOMP_1 0x305000 +#define HZIP_CORE_DFX_DECOMP_2 0x306000 +#define HZIP_CORE_DFX_DECOMP_3 0x307000 +#define HZIP_CORE_DFX_DECOMP_4 0x308000 +#define HZIP_CORE_DFX_DECOMP_5 0x309000 +#define HZIP_CORE_REGS_BASE_LEN 0xB0 +#define HZIP_CORE_REGS_DFX_LEN 0x28 #define HZIP_CORE_INT_SOURCE 0x3010A0 #define HZIP_CORE_INT_MASK_REG 0x3010A4 @@ -103,8 +106,8 @@ #define HZIP_PREFETCH_ENABLE (~(BIT(26) | BIT(17) | BIT(0))) #define HZIP_SVA_PREFETCH_DISABLE BIT(26) #define HZIP_SVA_DISABLE_READY (BIT(26) | BIT(30)) -#define HZIP_SHAPER_RATE_COMPRESS 252 -#define HZIP_SHAPER_RATE_DECOMPRESS 229 +#define HZIP_SHAPER_RATE_COMPRESS 750 +#define HZIP_SHAPER_RATE_DECOMPRESS 140 #define HZIP_DELAY_1_US 1 #define HZIP_POLL_TIMEOUT_US 1000 @@ -231,6 +234,64 @@ static const struct debugfs_reg32 hzip_dfx_regs[] = { {"HZIP_DECOMP_LZ77_CURR_ST ", 0x9cull}, }; +static const struct debugfs_reg32 hzip_com_dfx_regs[] = { + {"HZIP_CLOCK_GATE_CTRL ", 0x301004}, + {"HZIP_CORE_INT_RAS_CE_ENB ", 0x301160}, + {"HZIP_CORE_INT_RAS_NFE_ENB ", 0x301164}, + {"HZIP_CORE_INT_RAS_FE_ENB ", 0x301168}, + {"HZIP_UNCOM_ERR_RAS_CTRL ", 0x30116C}, +}; + +static const struct debugfs_reg32 hzip_dump_dfx_regs[] = { + {"HZIP_GET_BD_NUM ", 0x00ull}, + {"HZIP_GET_RIGHT_BD ", 0x04ull}, + {"HZIP_GET_ERROR_BD ", 0x08ull}, + {"HZIP_DONE_BD_NUM ", 0x0cull}, + {"HZIP_MAX_DELAY ", 0x20ull}, +}; + +/* define the ZIP's dfx regs region and region length */ +static struct dfx_diff_registers hzip_diff_regs[] = { + { + .reg_offset = HZIP_CORE_DFX_BASE, + .reg_len = HZIP_CORE_REGS_BASE_LEN, + }, { + .reg_offset = HZIP_CORE_DFX_COMP_0, + .reg_len = HZIP_CORE_REGS_DFX_LEN, + }, { + .reg_offset = HZIP_CORE_DFX_COMP_1, + .reg_len = HZIP_CORE_REGS_DFX_LEN, + }, { + .reg_offset = HZIP_CORE_DFX_DECOMP_0, + .reg_len = HZIP_CORE_REGS_DFX_LEN, + }, { + .reg_offset = HZIP_CORE_DFX_DECOMP_1, + .reg_len = HZIP_CORE_REGS_DFX_LEN, + }, { + .reg_offset = HZIP_CORE_DFX_DECOMP_2, + .reg_len = HZIP_CORE_REGS_DFX_LEN, + }, { + .reg_offset = HZIP_CORE_DFX_DECOMP_3, + .reg_len = HZIP_CORE_REGS_DFX_LEN, + }, { + .reg_offset = HZIP_CORE_DFX_DECOMP_4, + .reg_len = HZIP_CORE_REGS_DFX_LEN, + }, { + .reg_offset = HZIP_CORE_DFX_DECOMP_5, + .reg_len = HZIP_CORE_REGS_DFX_LEN, + }, +}; + +static int hzip_diff_regs_show(struct seq_file *s, void *unused) +{ + struct hisi_qm *qm = s->private; + + hisi_qm_acc_diff_regs_dump(qm, s, qm->debug.acc_diff_regs, + ARRAY_SIZE(hzip_diff_regs)); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(hzip_diff_regs); static const struct kernel_param_ops zip_uacce_mode_ops = { .set = uacce_mode_set, .get = param_get_int, @@ -246,7 +307,7 @@ MODULE_PARM_DESC(uacce_mode, UACCE_MODE_DESC); static int pf_q_num_set(const char *val, const struct kernel_param *kp) { - return q_num_set(val, kp, PCI_DEVICE_ID_ZIP_PF); + return q_num_set(val, kp, PCI_DEVICE_ID_HUAWEI_ZIP_PF); } static const struct kernel_param_ops pf_q_num_ops = { @@ -268,8 +329,8 @@ module_param_cb(vfs_num, &vfs_num_ops, &vfs_num, 0444); MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63), 0(default)"); static const struct pci_device_id hisi_zip_dev_ids[] = { - { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_ZIP_PF) }, - { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_ZIP_VF) }, + { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HUAWEI_ZIP_PF) }, + { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HUAWEI_ZIP_VF) }, { 0, } }; MODULE_DEVICE_TABLE(pci, hisi_zip_dev_ids); @@ -622,6 +683,7 @@ static int hisi_zip_core_debug_init(struct hisi_qm *qm) static void hisi_zip_dfx_debug_init(struct hisi_qm *qm) { + struct dfx_diff_registers *hzip_regs = qm->debug.acc_diff_regs; struct hisi_zip *zip = container_of(qm, struct hisi_zip, qm); struct hisi_zip_dfx *dfx = &zip->dfx; struct dentry *tmp_dir; @@ -635,6 +697,10 @@ static void hisi_zip_dfx_debug_init(struct hisi_qm *qm) 0644, tmp_dir, data, &zip_atomic64_ops); } + + if (qm->fun_type == QM_HW_PF && hzip_regs) + debugfs_create_file("diff_regs", 0444, tmp_dir, + qm, &hzip_diff_regs_fops); } static int hisi_zip_ctrl_debug_init(struct hisi_qm *qm) @@ -667,6 +733,13 @@ static int hisi_zip_debugfs_init(struct hisi_qm *qm) qm->debug.sqe_mask_offset = HZIP_SQE_MASK_OFFSET; qm->debug.sqe_mask_len = HZIP_SQE_MASK_LEN; qm->debug.debug_root = dev_d; + ret = hisi_qm_diff_regs_init(qm, hzip_diff_regs, + ARRAY_SIZE(hzip_diff_regs)); + if (ret) { + dev_warn(dev, "Failed to init ZIP diff regs!\n"); + goto debugfs_remove; + } + hisi_qm_debug_init(qm); if (qm->fun_type == QM_HW_PF) { @@ -680,6 +753,8 @@ static int hisi_zip_debugfs_init(struct hisi_qm *qm) return 0; failed_to_create: + hisi_qm_diff_regs_uninit(qm, ARRAY_SIZE(hzip_diff_regs)); +debugfs_remove: debugfs_remove_recursive(hzip_debugfs_root); return ret; } @@ -704,6 +779,8 @@ static void hisi_zip_debug_regs_clear(struct hisi_qm *qm) static void hisi_zip_debugfs_exit(struct hisi_qm *qm) { + hisi_qm_diff_regs_uninit(qm, ARRAY_SIZE(hzip_diff_regs)); + debugfs_remove_recursive(qm->debug.debug_root); if (qm->fun_type == QM_HW_PF) { @@ -712,6 +789,87 @@ static void hisi_zip_debugfs_exit(struct hisi_qm *qm) } } +static int hisi_zip_show_last_regs_init(struct hisi_qm *qm) +{ + int core_dfx_regs_num = ARRAY_SIZE(hzip_dump_dfx_regs); + int com_dfx_regs_num = ARRAY_SIZE(hzip_com_dfx_regs); + struct qm_debug *debug = &qm->debug; + void __iomem *io_base; + int i, j, idx; + + debug->last_words = kcalloc(core_dfx_regs_num * HZIP_CORE_NUM + + com_dfx_regs_num, sizeof(unsigned int), GFP_KERNEL); + if (!debug->last_words) + return -ENOMEM; + + for (i = 0; i < com_dfx_regs_num; i++) { + io_base = qm->io_base + hzip_com_dfx_regs[i].offset; + debug->last_words[i] = readl_relaxed(io_base); + } + + for (i = 0; i < HZIP_CORE_NUM; i++) { + io_base = qm->io_base + core_offsets[i]; + for (j = 0; j < core_dfx_regs_num; j++) { + idx = com_dfx_regs_num + i * core_dfx_regs_num + j; + debug->last_words[idx] = readl_relaxed( + io_base + hzip_dump_dfx_regs[j].offset); + } + } + + return 0; +} + +static void hisi_zip_show_last_regs_uninit(struct hisi_qm *qm) +{ + struct qm_debug *debug = &qm->debug; + + if (qm->fun_type == QM_HW_VF || !debug->last_words) + return; + + kfree(debug->last_words); + debug->last_words = NULL; +} + +static void hisi_zip_show_last_dfx_regs(struct hisi_qm *qm) +{ + int core_dfx_regs_num = ARRAY_SIZE(hzip_dump_dfx_regs); + int com_dfx_regs_num = ARRAY_SIZE(hzip_com_dfx_regs); + struct qm_debug *debug = &qm->debug; + char buf[HZIP_BUF_SIZE]; + void __iomem *base; + int i, j, idx; + u32 val; + + if (qm->fun_type == QM_HW_VF || !debug->last_words) + return; + + for (i = 0; i < com_dfx_regs_num; i++) { + val = readl_relaxed(qm->io_base + hzip_com_dfx_regs[i].offset); + if (debug->last_words[i] != val) + pci_info(qm->pdev, "com_dfx: %s \t= 0x%08x => 0x%08x\n", + hzip_com_dfx_regs[i].name, debug->last_words[i], val); + } + + for (i = 0; i < HZIP_CORE_NUM; i++) { + if (i < HZIP_COMP_CORE_NUM) + scnprintf(buf, sizeof(buf), "Comp_core-%d", i); + else + scnprintf(buf, sizeof(buf), "Decomp_core-%d", + i - HZIP_COMP_CORE_NUM); + base = qm->io_base + core_offsets[i]; + + pci_info(qm->pdev, "==>%s:\n", buf); + /* dump last word for dfx regs during control resetting */ + for (j = 0; j < core_dfx_regs_num; j++) { + idx = com_dfx_regs_num + i * core_dfx_regs_num + j; + val = readl_relaxed(base + hzip_dump_dfx_regs[j].offset); + if (debug->last_words[idx] != val) + pci_info(qm->pdev, "%s \t= 0x%08x => 0x%08x\n", + hzip_dump_dfx_regs[j].name, debug->last_words[idx], val); + } + } +} + static void hisi_zip_log_hw_error(struct hisi_qm *qm, u32 err_sts) { const struct hisi_zip_hw_error *err = zip_hw_error; @@ -799,6 +957,7 @@ static const struct hisi_qm_err_ini hisi_zip_err_ini = { .close_axi_master_ooo = hisi_zip_close_axi_master_ooo, .open_sva_prefetch = hisi_zip_open_sva_prefetch, .close_sva_prefetch = hisi_zip_close_sva_prefetch, + .show_last_dfx_regs = hisi_zip_show_last_dfx_regs, .err_info_init = hisi_zip_err_info_init, }; @@ -806,6 +965,7 @@ static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip) { struct hisi_qm *qm = &hisi_zip->qm; struct hisi_zip_ctrl *ctrl; + int ret; ctrl = devm_kzalloc(&qm->pdev->dev, sizeof(*ctrl), GFP_KERNEL); if (!ctrl) @@ -821,7 +981,11 @@ static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip) hisi_qm_dev_err_init(qm); hisi_zip_debug_regs_clear(qm); - return 0; + ret = hisi_zip_show_last_regs_init(qm); + if (ret) + pci_err(qm->pdev, "Failed to init last word regs!\n"); + + return ret; } static int hisi_zip_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) @@ -838,7 +1002,7 @@ static int hisi_zip_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) qm->sqe_size = HZIP_SQE_SIZE; qm->dev_name = hisi_zip_name; - qm->fun_type = (pdev->device == PCI_DEVICE_ID_ZIP_PF) ? + qm->fun_type = (pdev->device == PCI_DEVICE_ID_HUAWEI_ZIP_PF) ? QM_HW_PF : QM_HW_VF; if (qm->fun_type == QM_HW_PF) { qm->qp_base = HZIP_PF_DEF_Q_BASE; @@ -965,6 +1129,7 @@ static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id) hisi_qm_stop(qm, QM_NORMAL); err_dev_err_uninit: + hisi_zip_show_last_regs_uninit(qm); hisi_qm_dev_err_uninit(qm); err_qm_uninit: @@ -986,6 +1151,7 @@ static void hisi_zip_remove(struct pci_dev *pdev) hisi_zip_debugfs_exit(qm); hisi_qm_stop(qm, QM_NORMAL); + hisi_zip_show_last_regs_uninit(qm); hisi_qm_dev_err_uninit(qm); hisi_zip_qm_uninit(qm); } diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c index d19e5ffb5104b953c90e771401ca5c5a3778fd48..d6f9e2fe863d771d64146b8cc203e857174c394c 100644 --- a/drivers/crypto/mxs-dcp.c +++ b/drivers/crypto/mxs-dcp.c @@ -331,7 +331,7 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq) memset(key + AES_KEYSIZE_128, 0, AES_KEYSIZE_128); } - for_each_sg(req->src, src, sg_nents(src), i) { + for_each_sg(req->src, src, sg_nents(req->src), i) { src_buf = sg_virt(src); len = sg_dma_len(src); tlen += len; diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c index 9b968ac4ee7b617302ae4748b4b130a10ae580e3..a196bb8b170103a28d2a9140d3dee7a3c3d51e35 100644 --- a/drivers/crypto/omap-aes.c +++ b/drivers/crypto/omap-aes.c @@ -1302,7 +1302,7 @@ static int omap_aes_suspend(struct device *dev) static int omap_aes_resume(struct device *dev) { - pm_runtime_resume_and_get(dev); + pm_runtime_get_sync(dev); return 0; } #endif diff --git a/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c index d7ca222f0df18b427b1e5059e5111151836aa209..74afafc84c7164ca4dc2bfe873f2682df64b84a7 100644 --- a/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c +++ b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c @@ -111,37 +111,19 @@ static int __adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr) mutex_lock(lock); - /* Check if PF2VF CSR is in use by remote function */ + /* Check if the PFVF CSR is in use by remote function */ val = ADF_CSR_RD(pmisc_bar_addr, pf2vf_offset); if ((val & remote_in_use_mask) == remote_in_use_pattern) { dev_dbg(&GET_DEV(accel_dev), - "PF2VF CSR in use by remote function\n"); + "PFVF CSR in use by remote function\n"); ret = -EBUSY; goto out; } - /* Attempt to get ownership of PF2VF CSR */ msg &= ~local_in_use_mask; msg |= local_in_use_pattern; - ADF_CSR_WR(pmisc_bar_addr, pf2vf_offset, msg); - /* Wait in case remote func also attempting to get ownership */ - msleep(ADF_IOV_MSG_COLLISION_DETECT_DELAY); - - val = ADF_CSR_RD(pmisc_bar_addr, pf2vf_offset); - if ((val & local_in_use_mask) != local_in_use_pattern) { - dev_dbg(&GET_DEV(accel_dev), - "PF2VF CSR in use by remote - collision detected\n"); - ret = -EBUSY; - goto out; - } - - /* - * This function now owns the PV2VF CSR. The IN_USE_BY pattern must - * remain in the PF2VF CSR for all writes including ACK from remote - * until this local function relinquishes the CSR. Send the message - * by interrupting the remote. - */ + /* Attempt to get ownership of the PFVF CSR */ ADF_CSR_WR(pmisc_bar_addr, pf2vf_offset, msg | int_bit); /* Wait for confirmation from remote func it received the message */ @@ -150,6 +132,12 @@ static int __adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr) val = ADF_CSR_RD(pmisc_bar_addr, pf2vf_offset); } while ((val & int_bit) && (count++ < ADF_IOV_MSG_ACK_MAX_RETRY)); + if (val & int_bit) { + dev_dbg(&GET_DEV(accel_dev), "ACK not received from remote\n"); + val &= ~int_bit; + ret = -EIO; + } + if (val != msg) { dev_dbg(&GET_DEV(accel_dev), "Collision - PFVF CSR overwritten by remote function\n"); @@ -157,13 +145,7 @@ static int __adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr) goto out; } - if (val & int_bit) { - dev_dbg(&GET_DEV(accel_dev), "ACK not received from remote\n"); - val &= ~int_bit; - ret = -EIO; - } - - /* Finished with PF2VF CSR; relinquish it and leave msg in CSR */ + /* Finished with the PFVF CSR; relinquish it and leave msg in CSR */ ADF_CSR_WR(pmisc_bar_addr, pf2vf_offset, val & ~local_in_use_mask); out: mutex_unlock(lock); @@ -171,12 +153,13 @@ static int __adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr) } /** - * adf_iov_putmsg() - send PF2VF message + * adf_iov_putmsg() - send PFVF message * @accel_dev: Pointer to acceleration device. * @msg: Message to send - * @vf_nr: VF number to which the message will be sent + * @vf_nr: VF number to which the message will be sent if on PF, ignored + * otherwise * - * Function sends a messge from the PF to a VF + * Function sends a message through the PFVF channel * * Return: 0 on success, error code otherwise. */ diff --git a/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c b/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c index 54b738da829d899e4558ccbb4d7e07ac8c36e5d5..3e25fac051b25f005a29cf6275fc9aa4a539f4bf 100644 --- a/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c +++ b/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c @@ -8,7 +8,7 @@ * adf_vf2pf_notify_init() - send init msg to PF * @accel_dev: Pointer to acceleration VF device. * - * Function sends an init messge from the VF to a PF + * Function sends an init message from the VF to a PF * * Return: 0 on success, error code otherwise. */ @@ -31,7 +31,7 @@ EXPORT_SYMBOL_GPL(adf_vf2pf_notify_init); * adf_vf2pf_notify_shutdown() - send shutdown msg to PF * @accel_dev: Pointer to acceleration VF device. * - * Function sends a shutdown messge from the VF to a PF + * Function sends a shutdown message from the VF to a PF * * Return: void */ diff --git a/drivers/crypto/qat/qat_common/qat_crypto.c b/drivers/crypto/qat/qat_common/qat_crypto.c index ab621b7dbd203e700b1f252ac3c33a9d7b69318b..9210af8a1f58ccda41fca0f0983750abc5e03aef 100644 --- a/drivers/crypto/qat/qat_common/qat_crypto.c +++ b/drivers/crypto/qat/qat_common/qat_crypto.c @@ -126,6 +126,14 @@ int qat_crypto_dev_config(struct adf_accel_dev *accel_dev) goto err; if (adf_cfg_section_add(accel_dev, "Accelerator0")) goto err; + + /* Temporarily set the number of crypto instances to zero to avoid + * registering the crypto algorithms. + * This will be removed when the algorithms will support the + * CRYPTO_TFM_REQ_MAY_BACKLOG flag + */ + instances = 0; + for (i = 0; i < instances; i++) { val = i; snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_BANK_NUM, i); diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c index 87be96a0b0bba65f2f4c58dbb4994fa6fa42d027..8b4e79d882af45c6a9e1effa893f57becba194bc 100644 --- a/drivers/crypto/qce/sha.c +++ b/drivers/crypto/qce/sha.c @@ -533,8 +533,8 @@ static int qce_ahash_register_one(const struct qce_ahash_def *def, ret = crypto_register_ahash(alg); if (ret) { - kfree(tmpl); dev_err(qce->dev, "%s registration failed\n", base->cra_name); + kfree(tmpl); return ret; } diff --git a/drivers/crypto/qce/skcipher.c b/drivers/crypto/qce/skcipher.c index d8053789c8828a6dc6ac0e0a02ead50e3eacc248..89c7fc3efbd717511d72bbf7ef52333320641314 100644 --- a/drivers/crypto/qce/skcipher.c +++ b/drivers/crypto/qce/skcipher.c @@ -433,8 +433,8 @@ static int qce_skcipher_register_one(const struct qce_skcipher_def *def, ret = crypto_register_skcipher(alg); if (ret) { - kfree(tmpl); dev_err(qce->dev, "%s registration failed\n", alg->base.cra_name); + kfree(tmpl); return ret; } diff --git a/drivers/crypto/qcom-rng.c b/drivers/crypto/qcom-rng.c index 99ba8d51d10209de2d99c666bb790c181d800c64..11f30fd48c1414780006ec57b7fca020ce1e891d 100644 --- a/drivers/crypto/qcom-rng.c +++ b/drivers/crypto/qcom-rng.c @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include @@ -43,16 +44,19 @@ static int qcom_rng_read(struct qcom_rng *rng, u8 *data, unsigned int max) { unsigned int currsize = 0; u32 val; + int ret; /* read random data from hardware */ do { - val = readl_relaxed(rng->base + PRNG_STATUS); - if (!(val & PRNG_STATUS_DATA_AVAIL)) - break; + ret = readl_poll_timeout(rng->base + PRNG_STATUS, val, + val & PRNG_STATUS_DATA_AVAIL, + 200, 10000); + if (ret) + return ret; val = readl_relaxed(rng->base + PRNG_DATA_OUT); if (!val) - break; + return -EINVAL; if ((max - currsize) >= WORD_SZ) { memcpy(data, &val, WORD_SZ); @@ -61,11 +65,10 @@ static int qcom_rng_read(struct qcom_rng *rng, u8 *data, unsigned int max) } else { /* copy only remaining bytes */ memcpy(data, &val, max - currsize); - break; } } while (currsize < max); - return currsize; + return 0; } static int qcom_rng_generate(struct crypto_rng *tfm, @@ -87,7 +90,7 @@ static int qcom_rng_generate(struct crypto_rng *tfm, mutex_unlock(&rng->lock); clk_disable_unprepare(rng->clk); - return 0; + return ret; } static int qcom_rng_seed(struct crypto_rng *tfm, const u8 *seed, diff --git a/drivers/crypto/rockchip/rk3288_crypto_skcipher.c b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c index 1cece1a7d3f008fb60a2d2ecedfffc5ec8757e36..5bbf0d2722e11cffef500ad42a27cb7d5e65980a 100644 --- a/drivers/crypto/rockchip/rk3288_crypto_skcipher.c +++ b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c @@ -506,7 +506,6 @@ struct rk_crypto_tmp rk_ecb_des3_ede_alg = { .exit = rk_ablk_exit_tfm, .min_keysize = DES3_EDE_KEY_SIZE, .max_keysize = DES3_EDE_KEY_SIZE, - .ivsize = DES_BLOCK_SIZE, .setkey = rk_tdes_setkey, .encrypt = rk_des3_ede_ecb_encrypt, .decrypt = rk_des3_ede_ecb_decrypt, diff --git a/drivers/crypto/stm32/stm32-crc32.c b/drivers/crypto/stm32/stm32-crc32.c index 75867c0b00172a013edcb2bfea5070e785ec5557..be1bf39a317de15b335d6289bf556e7e1205371b 100644 --- a/drivers/crypto/stm32/stm32-crc32.c +++ b/drivers/crypto/stm32/stm32-crc32.c @@ -279,7 +279,7 @@ static struct shash_alg algs[] = { .digestsize = CHKSUM_DIGEST_SIZE, .base = { .cra_name = "crc32", - .cra_driver_name = DRIVER_NAME, + .cra_driver_name = "stm32-crc32-crc32", .cra_priority = 200, .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, .cra_blocksize = CHKSUM_BLOCK_SIZE, @@ -301,7 +301,7 @@ static struct shash_alg algs[] = { .digestsize = CHKSUM_DIGEST_SIZE, .base = { .cra_name = "crc32c", - .cra_driver_name = DRIVER_NAME, + .cra_driver_name = "stm32-crc32-crc32c", .cra_priority = 200, .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, .cra_blocksize = CHKSUM_BLOCK_SIZE, diff --git a/drivers/crypto/stm32/stm32-cryp.c b/drivers/crypto/stm32/stm32-cryp.c index 7999b26a16ed057217a570cfe780c29d0d5a229c..81eb136b6c11df0308969c9ee0ec91f696a0213f 100644 --- a/drivers/crypto/stm32/stm32-cryp.c +++ b/drivers/crypto/stm32/stm32-cryp.c @@ -37,7 +37,6 @@ /* Mode mask = bits [15..0] */ #define FLG_MODE_MASK GENMASK(15, 0) /* Bit [31..16] status */ -#define FLG_CCM_PADDED_WA BIT(16) /* Registers */ #define CRYP_CR 0x00000000 @@ -105,8 +104,6 @@ /* Misc */ #define AES_BLOCK_32 (AES_BLOCK_SIZE / sizeof(u32)) #define GCM_CTR_INIT 2 -#define _walked_in (cryp->in_walk.offset - cryp->in_sg->offset) -#define _walked_out (cryp->out_walk.offset - cryp->out_sg->offset) #define CRYP_AUTOSUSPEND_DELAY 50 struct stm32_cryp_caps { @@ -144,26 +141,16 @@ struct stm32_cryp { size_t authsize; size_t hw_blocksize; - size_t total_in; - size_t total_in_save; - size_t total_out; - size_t total_out_save; + size_t payload_in; + size_t header_in; + size_t payload_out; - struct scatterlist *in_sg; struct scatterlist *out_sg; - struct scatterlist *out_sg_save; - - struct scatterlist in_sgl; - struct scatterlist out_sgl; - bool sgs_copied; - - int in_sg_len; - int out_sg_len; struct scatter_walk in_walk; struct scatter_walk out_walk; - u32 last_ctr[4]; + __be32 last_ctr[4]; u32 gcm_ctr; }; @@ -262,6 +249,7 @@ static inline int stm32_cryp_wait_output(struct stm32_cryp *cryp) } static int stm32_cryp_read_auth_tag(struct stm32_cryp *cryp); +static void stm32_cryp_finish_req(struct stm32_cryp *cryp, int err); static struct stm32_cryp *stm32_cryp_find_dev(struct stm32_cryp_ctx *ctx) { @@ -283,103 +271,6 @@ static struct stm32_cryp *stm32_cryp_find_dev(struct stm32_cryp_ctx *ctx) return cryp; } -static int stm32_cryp_check_aligned(struct scatterlist *sg, size_t total, - size_t align) -{ - int len = 0; - - if (!total) - return 0; - - if (!IS_ALIGNED(total, align)) - return -EINVAL; - - while (sg) { - if (!IS_ALIGNED(sg->offset, sizeof(u32))) - return -EINVAL; - - if (!IS_ALIGNED(sg->length, align)) - return -EINVAL; - - len += sg->length; - sg = sg_next(sg); - } - - if (len != total) - return -EINVAL; - - return 0; -} - -static int stm32_cryp_check_io_aligned(struct stm32_cryp *cryp) -{ - int ret; - - ret = stm32_cryp_check_aligned(cryp->in_sg, cryp->total_in, - cryp->hw_blocksize); - if (ret) - return ret; - - ret = stm32_cryp_check_aligned(cryp->out_sg, cryp->total_out, - cryp->hw_blocksize); - - return ret; -} - -static void sg_copy_buf(void *buf, struct scatterlist *sg, - unsigned int start, unsigned int nbytes, int out) -{ - struct scatter_walk walk; - - if (!nbytes) - return; - - scatterwalk_start(&walk, sg); - scatterwalk_advance(&walk, start); - scatterwalk_copychunks(buf, &walk, nbytes, out); - scatterwalk_done(&walk, out, 0); -} - -static int stm32_cryp_copy_sgs(struct stm32_cryp *cryp) -{ - void *buf_in, *buf_out; - int pages, total_in, total_out; - - if (!stm32_cryp_check_io_aligned(cryp)) { - cryp->sgs_copied = 0; - return 0; - } - - total_in = ALIGN(cryp->total_in, cryp->hw_blocksize); - pages = total_in ? get_order(total_in) : 1; - buf_in = (void *)__get_free_pages(GFP_ATOMIC, pages); - - total_out = ALIGN(cryp->total_out, cryp->hw_blocksize); - pages = total_out ? get_order(total_out) : 1; - buf_out = (void *)__get_free_pages(GFP_ATOMIC, pages); - - if (!buf_in || !buf_out) { - dev_err(cryp->dev, "Can't allocate pages when unaligned\n"); - cryp->sgs_copied = 0; - return -EFAULT; - } - - sg_copy_buf(buf_in, cryp->in_sg, 0, cryp->total_in, 0); - - sg_init_one(&cryp->in_sgl, buf_in, total_in); - cryp->in_sg = &cryp->in_sgl; - cryp->in_sg_len = 1; - - sg_init_one(&cryp->out_sgl, buf_out, total_out); - cryp->out_sg_save = cryp->out_sg; - cryp->out_sg = &cryp->out_sgl; - cryp->out_sg_len = 1; - - cryp->sgs_copied = 1; - - return 0; -} - static void stm32_cryp_hw_write_iv(struct stm32_cryp *cryp, __be32 *iv) { if (!iv) @@ -481,16 +372,99 @@ static int stm32_cryp_gcm_init(struct stm32_cryp *cryp, u32 cfg) /* Wait for end of processing */ ret = stm32_cryp_wait_enable(cryp); - if (ret) + if (ret) { dev_err(cryp->dev, "Timeout (gcm init)\n"); + return ret; + } - return ret; + /* Prepare next phase */ + if (cryp->areq->assoclen) { + cfg |= CR_PH_HEADER; + stm32_cryp_write(cryp, CRYP_CR, cfg); + } else if (stm32_cryp_get_input_text_len(cryp)) { + cfg |= CR_PH_PAYLOAD; + stm32_cryp_write(cryp, CRYP_CR, cfg); + } + + return 0; +} + +static void stm32_crypt_gcmccm_end_header(struct stm32_cryp *cryp) +{ + u32 cfg; + int err; + + /* Check if whole header written */ + if (!cryp->header_in) { + /* Wait for completion */ + err = stm32_cryp_wait_busy(cryp); + if (err) { + dev_err(cryp->dev, "Timeout (gcm/ccm header)\n"); + stm32_cryp_write(cryp, CRYP_IMSCR, 0); + stm32_cryp_finish_req(cryp, err); + return; + } + + if (stm32_cryp_get_input_text_len(cryp)) { + /* Phase 3 : payload */ + cfg = stm32_cryp_read(cryp, CRYP_CR); + cfg &= ~CR_CRYPEN; + stm32_cryp_write(cryp, CRYP_CR, cfg); + + cfg &= ~CR_PH_MASK; + cfg |= CR_PH_PAYLOAD | CR_CRYPEN; + stm32_cryp_write(cryp, CRYP_CR, cfg); + } else { + /* + * Phase 4 : tag. + * Nothing to read, nothing to write, caller have to + * end request + */ + } + } +} + +static void stm32_cryp_write_ccm_first_header(struct stm32_cryp *cryp) +{ + unsigned int i; + size_t written; + size_t len; + u32 alen = cryp->areq->assoclen; + u32 block[AES_BLOCK_32] = {0}; + u8 *b8 = (u8 *)block; + + if (alen <= 65280) { + /* Write first u32 of B1 */ + b8[0] = (alen >> 8) & 0xFF; + b8[1] = alen & 0xFF; + len = 2; + } else { + /* Build the two first u32 of B1 */ + b8[0] = 0xFF; + b8[1] = 0xFE; + b8[2] = (alen & 0xFF000000) >> 24; + b8[3] = (alen & 0x00FF0000) >> 16; + b8[4] = (alen & 0x0000FF00) >> 8; + b8[5] = alen & 0x000000FF; + len = 6; + } + + written = min_t(size_t, AES_BLOCK_SIZE - len, alen); + + scatterwalk_copychunks((char *)block + len, &cryp->in_walk, written, 0); + for (i = 0; i < AES_BLOCK_32; i++) + stm32_cryp_write(cryp, CRYP_DIN, block[i]); + + cryp->header_in -= written; + + stm32_crypt_gcmccm_end_header(cryp); } static int stm32_cryp_ccm_init(struct stm32_cryp *cryp, u32 cfg) { int ret; - u8 iv[AES_BLOCK_SIZE], b0[AES_BLOCK_SIZE]; + u32 iv_32[AES_BLOCK_32], b0_32[AES_BLOCK_32]; + u8 *iv = (u8 *)iv_32, *b0 = (u8 *)b0_32; __be32 *bd; u32 *d; unsigned int i, textlen; @@ -531,10 +505,24 @@ static int stm32_cryp_ccm_init(struct stm32_cryp *cryp, u32 cfg) /* Wait for end of processing */ ret = stm32_cryp_wait_enable(cryp); - if (ret) + if (ret) { dev_err(cryp->dev, "Timeout (ccm init)\n"); + return ret; + } - return ret; + /* Prepare next phase */ + if (cryp->areq->assoclen) { + cfg |= CR_PH_HEADER | CR_CRYPEN; + stm32_cryp_write(cryp, CRYP_CR, cfg); + + /* Write first (special) block (may move to next phase [payload]) */ + stm32_cryp_write_ccm_first_header(cryp); + } else if (stm32_cryp_get_input_text_len(cryp)) { + cfg |= CR_PH_PAYLOAD; + stm32_cryp_write(cryp, CRYP_CR, cfg); + } + + return 0; } static int stm32_cryp_hw_init(struct stm32_cryp *cryp) @@ -542,7 +530,7 @@ static int stm32_cryp_hw_init(struct stm32_cryp *cryp) int ret; u32 cfg, hw_mode; - pm_runtime_resume_and_get(cryp->dev); + pm_runtime_get_sync(cryp->dev); /* Disable interrupt */ stm32_cryp_write(cryp, CRYP_IMSCR, 0); @@ -605,16 +593,6 @@ static int stm32_cryp_hw_init(struct stm32_cryp *cryp) if (ret) return ret; - /* Phase 2 : header (authenticated data) */ - if (cryp->areq->assoclen) { - cfg |= CR_PH_HEADER; - } else if (stm32_cryp_get_input_text_len(cryp)) { - cfg |= CR_PH_PAYLOAD; - stm32_cryp_write(cryp, CRYP_CR, cfg); - } else { - cfg |= CR_PH_INIT; - } - break; case CR_DES_CBC: @@ -633,8 +611,6 @@ static int stm32_cryp_hw_init(struct stm32_cryp *cryp) stm32_cryp_write(cryp, CRYP_CR, cfg); - cryp->flags &= ~FLG_CCM_PADDED_WA; - return 0; } @@ -644,28 +620,9 @@ static void stm32_cryp_finish_req(struct stm32_cryp *cryp, int err) /* Phase 4 : output tag */ err = stm32_cryp_read_auth_tag(cryp); - if (!err && (!(is_gcm(cryp) || is_ccm(cryp)))) + if (!err && (!(is_gcm(cryp) || is_ccm(cryp) || is_ecb(cryp)))) stm32_cryp_get_iv(cryp); - if (cryp->sgs_copied) { - void *buf_in, *buf_out; - int pages, len; - - buf_in = sg_virt(&cryp->in_sgl); - buf_out = sg_virt(&cryp->out_sgl); - - sg_copy_buf(buf_out, cryp->out_sg_save, 0, - cryp->total_out_save, 1); - - len = ALIGN(cryp->total_in_save, cryp->hw_blocksize); - pages = len ? get_order(len) : 1; - free_pages((unsigned long)buf_in, pages); - - len = ALIGN(cryp->total_out_save, cryp->hw_blocksize); - pages = len ? get_order(len) : 1; - free_pages((unsigned long)buf_out, pages); - } - pm_runtime_mark_last_busy(cryp->dev); pm_runtime_put_autosuspend(cryp->dev); @@ -674,8 +631,6 @@ static void stm32_cryp_finish_req(struct stm32_cryp *cryp, int err) else crypto_finalize_skcipher_request(cryp->engine, cryp->req, err); - - memset(cryp->ctx->key, 0, cryp->ctx->keylen); } static int stm32_cryp_cpu_start(struct stm32_cryp *cryp) @@ -801,7 +756,20 @@ static int stm32_cryp_aes_aead_setkey(struct crypto_aead *tfm, const u8 *key, static int stm32_cryp_aes_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize) { - return authsize == AES_BLOCK_SIZE ? 0 : -EINVAL; + switch (authsize) { + case 4: + case 8: + case 12: + case 13: + case 14: + case 15: + case 16: + break; + default: + return -EINVAL; + } + + return 0; } static int stm32_cryp_aes_ccm_setauthsize(struct crypto_aead *tfm, @@ -825,31 +793,61 @@ static int stm32_cryp_aes_ccm_setauthsize(struct crypto_aead *tfm, static int stm32_cryp_aes_ecb_encrypt(struct skcipher_request *req) { + if (req->cryptlen % AES_BLOCK_SIZE) + return -EINVAL; + + if (req->cryptlen == 0) + return 0; + return stm32_cryp_crypt(req, FLG_AES | FLG_ECB | FLG_ENCRYPT); } static int stm32_cryp_aes_ecb_decrypt(struct skcipher_request *req) { + if (req->cryptlen % AES_BLOCK_SIZE) + return -EINVAL; + + if (req->cryptlen == 0) + return 0; + return stm32_cryp_crypt(req, FLG_AES | FLG_ECB); } static int stm32_cryp_aes_cbc_encrypt(struct skcipher_request *req) { + if (req->cryptlen % AES_BLOCK_SIZE) + return -EINVAL; + + if (req->cryptlen == 0) + return 0; + return stm32_cryp_crypt(req, FLG_AES | FLG_CBC | FLG_ENCRYPT); } static int stm32_cryp_aes_cbc_decrypt(struct skcipher_request *req) { + if (req->cryptlen % AES_BLOCK_SIZE) + return -EINVAL; + + if (req->cryptlen == 0) + return 0; + return stm32_cryp_crypt(req, FLG_AES | FLG_CBC); } static int stm32_cryp_aes_ctr_encrypt(struct skcipher_request *req) { + if (req->cryptlen == 0) + return 0; + return stm32_cryp_crypt(req, FLG_AES | FLG_CTR | FLG_ENCRYPT); } static int stm32_cryp_aes_ctr_decrypt(struct skcipher_request *req) { + if (req->cryptlen == 0) + return 0; + return stm32_cryp_crypt(req, FLG_AES | FLG_CTR); } @@ -863,53 +861,122 @@ static int stm32_cryp_aes_gcm_decrypt(struct aead_request *req) return stm32_cryp_aead_crypt(req, FLG_AES | FLG_GCM); } +static inline int crypto_ccm_check_iv(const u8 *iv) +{ + /* 2 <= L <= 8, so 1 <= L' <= 7. */ + if (iv[0] < 1 || iv[0] > 7) + return -EINVAL; + + return 0; +} + static int stm32_cryp_aes_ccm_encrypt(struct aead_request *req) { + int err; + + err = crypto_ccm_check_iv(req->iv); + if (err) + return err; + return stm32_cryp_aead_crypt(req, FLG_AES | FLG_CCM | FLG_ENCRYPT); } static int stm32_cryp_aes_ccm_decrypt(struct aead_request *req) { + int err; + + err = crypto_ccm_check_iv(req->iv); + if (err) + return err; + return stm32_cryp_aead_crypt(req, FLG_AES | FLG_CCM); } static int stm32_cryp_des_ecb_encrypt(struct skcipher_request *req) { + if (req->cryptlen % DES_BLOCK_SIZE) + return -EINVAL; + + if (req->cryptlen == 0) + return 0; + return stm32_cryp_crypt(req, FLG_DES | FLG_ECB | FLG_ENCRYPT); } static int stm32_cryp_des_ecb_decrypt(struct skcipher_request *req) { + if (req->cryptlen % DES_BLOCK_SIZE) + return -EINVAL; + + if (req->cryptlen == 0) + return 0; + return stm32_cryp_crypt(req, FLG_DES | FLG_ECB); } static int stm32_cryp_des_cbc_encrypt(struct skcipher_request *req) { + if (req->cryptlen % DES_BLOCK_SIZE) + return -EINVAL; + + if (req->cryptlen == 0) + return 0; + return stm32_cryp_crypt(req, FLG_DES | FLG_CBC | FLG_ENCRYPT); } static int stm32_cryp_des_cbc_decrypt(struct skcipher_request *req) { + if (req->cryptlen % DES_BLOCK_SIZE) + return -EINVAL; + + if (req->cryptlen == 0) + return 0; + return stm32_cryp_crypt(req, FLG_DES | FLG_CBC); } static int stm32_cryp_tdes_ecb_encrypt(struct skcipher_request *req) { + if (req->cryptlen % DES_BLOCK_SIZE) + return -EINVAL; + + if (req->cryptlen == 0) + return 0; + return stm32_cryp_crypt(req, FLG_TDES | FLG_ECB | FLG_ENCRYPT); } static int stm32_cryp_tdes_ecb_decrypt(struct skcipher_request *req) { + if (req->cryptlen % DES_BLOCK_SIZE) + return -EINVAL; + + if (req->cryptlen == 0) + return 0; + return stm32_cryp_crypt(req, FLG_TDES | FLG_ECB); } static int stm32_cryp_tdes_cbc_encrypt(struct skcipher_request *req) { + if (req->cryptlen % DES_BLOCK_SIZE) + return -EINVAL; + + if (req->cryptlen == 0) + return 0; + return stm32_cryp_crypt(req, FLG_TDES | FLG_CBC | FLG_ENCRYPT); } static int stm32_cryp_tdes_cbc_decrypt(struct skcipher_request *req) { + if (req->cryptlen % DES_BLOCK_SIZE) + return -EINVAL; + + if (req->cryptlen == 0) + return 0; + return stm32_cryp_crypt(req, FLG_TDES | FLG_CBC); } @@ -919,6 +986,7 @@ static int stm32_cryp_prepare_req(struct skcipher_request *req, struct stm32_cryp_ctx *ctx; struct stm32_cryp *cryp; struct stm32_cryp_reqctx *rctx; + struct scatterlist *in_sg; int ret; if (!req && !areq) @@ -944,76 +1012,55 @@ static int stm32_cryp_prepare_req(struct skcipher_request *req, if (req) { cryp->req = req; cryp->areq = NULL; - cryp->total_in = req->cryptlen; - cryp->total_out = cryp->total_in; + cryp->header_in = 0; + cryp->payload_in = req->cryptlen; + cryp->payload_out = req->cryptlen; + cryp->authsize = 0; } else { /* * Length of input and output data: * Encryption case: - * INPUT = AssocData || PlainText + * INPUT = AssocData || PlainText * <- assoclen -> <- cryptlen -> - * <------- total_in -----------> * - * OUTPUT = AssocData || CipherText || AuthTag - * <- assoclen -> <- cryptlen -> <- authsize -> - * <---------------- total_out -----------------> + * OUTPUT = AssocData || CipherText || AuthTag + * <- assoclen -> <-- cryptlen --> <- authsize -> * * Decryption case: - * INPUT = AssocData || CipherText || AuthTag - * <- assoclen -> <--------- cryptlen ---------> - * <- authsize -> - * <---------------- total_in ------------------> + * INPUT = AssocData || CipherTex || AuthTag + * <- assoclen ---> <---------- cryptlen ----------> * - * OUTPUT = AssocData || PlainText - * <- assoclen -> <- crypten - authsize -> - * <---------- total_out -----------------> + * OUTPUT = AssocData || PlainText + * <- assoclen -> <- cryptlen - authsize -> */ cryp->areq = areq; cryp->req = NULL; cryp->authsize = crypto_aead_authsize(crypto_aead_reqtfm(areq)); - cryp->total_in = areq->assoclen + areq->cryptlen; - if (is_encrypt(cryp)) - /* Append auth tag to output */ - cryp->total_out = cryp->total_in + cryp->authsize; - else - /* No auth tag in output */ - cryp->total_out = cryp->total_in - cryp->authsize; + if (is_encrypt(cryp)) { + cryp->payload_in = areq->cryptlen; + cryp->header_in = areq->assoclen; + cryp->payload_out = areq->cryptlen; + } else { + cryp->payload_in = areq->cryptlen - cryp->authsize; + cryp->header_in = areq->assoclen; + cryp->payload_out = cryp->payload_in; + } } - cryp->total_in_save = cryp->total_in; - cryp->total_out_save = cryp->total_out; + in_sg = req ? req->src : areq->src; + scatterwalk_start(&cryp->in_walk, in_sg); - cryp->in_sg = req ? req->src : areq->src; cryp->out_sg = req ? req->dst : areq->dst; - cryp->out_sg_save = cryp->out_sg; - - cryp->in_sg_len = sg_nents_for_len(cryp->in_sg, cryp->total_in); - if (cryp->in_sg_len < 0) { - dev_err(cryp->dev, "Cannot get in_sg_len\n"); - ret = cryp->in_sg_len; - return ret; - } - - cryp->out_sg_len = sg_nents_for_len(cryp->out_sg, cryp->total_out); - if (cryp->out_sg_len < 0) { - dev_err(cryp->dev, "Cannot get out_sg_len\n"); - ret = cryp->out_sg_len; - return ret; - } - - ret = stm32_cryp_copy_sgs(cryp); - if (ret) - return ret; - - scatterwalk_start(&cryp->in_walk, cryp->in_sg); scatterwalk_start(&cryp->out_walk, cryp->out_sg); if (is_gcm(cryp) || is_ccm(cryp)) { /* In output, jump after assoc data */ - scatterwalk_advance(&cryp->out_walk, cryp->areq->assoclen); - cryp->total_out -= cryp->areq->assoclen; + scatterwalk_copychunks(NULL, &cryp->out_walk, cryp->areq->assoclen, 2); } + if (is_ctr(cryp)) + memset(cryp->last_ctr, 0, sizeof(cryp->last_ctr)); + ret = stm32_cryp_hw_init(cryp); return ret; } @@ -1061,8 +1108,7 @@ static int stm32_cryp_aead_one_req(struct crypto_engine *engine, void *areq) if (!cryp) return -ENODEV; - if (unlikely(!cryp->areq->assoclen && - !stm32_cryp_get_input_text_len(cryp))) { + if (unlikely(!cryp->payload_in && !cryp->header_in)) { /* No input data to process: get tag and finish */ stm32_cryp_finish_req(cryp, 0); return 0; @@ -1071,43 +1117,10 @@ static int stm32_cryp_aead_one_req(struct crypto_engine *engine, void *areq) return stm32_cryp_cpu_start(cryp); } -static u32 *stm32_cryp_next_out(struct stm32_cryp *cryp, u32 *dst, - unsigned int n) -{ - scatterwalk_advance(&cryp->out_walk, n); - - if (unlikely(cryp->out_sg->length == _walked_out)) { - cryp->out_sg = sg_next(cryp->out_sg); - if (cryp->out_sg) { - scatterwalk_start(&cryp->out_walk, cryp->out_sg); - return (sg_virt(cryp->out_sg) + _walked_out); - } - } - - return (u32 *)((u8 *)dst + n); -} - -static u32 *stm32_cryp_next_in(struct stm32_cryp *cryp, u32 *src, - unsigned int n) -{ - scatterwalk_advance(&cryp->in_walk, n); - - if (unlikely(cryp->in_sg->length == _walked_in)) { - cryp->in_sg = sg_next(cryp->in_sg); - if (cryp->in_sg) { - scatterwalk_start(&cryp->in_walk, cryp->in_sg); - return (sg_virt(cryp->in_sg) + _walked_in); - } - } - - return (u32 *)((u8 *)src + n); -} - static int stm32_cryp_read_auth_tag(struct stm32_cryp *cryp) { - u32 cfg, size_bit, *dst, d32; - u8 *d8; - unsigned int i, j; + u32 cfg, size_bit; + unsigned int i; int ret = 0; /* Update Config */ @@ -1130,7 +1143,7 @@ static int stm32_cryp_read_auth_tag(struct stm32_cryp *cryp) stm32_cryp_write(cryp, CRYP_DIN, size_bit); size_bit = is_encrypt(cryp) ? cryp->areq->cryptlen : - cryp->areq->cryptlen - AES_BLOCK_SIZE; + cryp->areq->cryptlen - cryp->authsize; size_bit *= 8; if (cryp->caps->swap_final) size_bit = (__force u32)cpu_to_be32(size_bit); @@ -1139,11 +1152,9 @@ static int stm32_cryp_read_auth_tag(struct stm32_cryp *cryp) stm32_cryp_write(cryp, CRYP_DIN, size_bit); } else { /* CCM: write CTR0 */ - u8 iv[AES_BLOCK_SIZE]; - u32 *iv32 = (u32 *)iv; - __be32 *biv; - - biv = (void *)iv; + u32 iv32[AES_BLOCK_32]; + u8 *iv = (u8 *)iv32; + __be32 *biv = (__be32 *)iv32; memcpy(iv, cryp->areq->iv, AES_BLOCK_SIZE); memset(iv + AES_BLOCK_SIZE - 1 - iv[0], 0, iv[0] + 1); @@ -1165,39 +1176,18 @@ static int stm32_cryp_read_auth_tag(struct stm32_cryp *cryp) } if (is_encrypt(cryp)) { + u32 out_tag[AES_BLOCK_32]; + /* Get and write tag */ - dst = sg_virt(cryp->out_sg) + _walked_out; + for (i = 0; i < AES_BLOCK_32; i++) + out_tag[i] = stm32_cryp_read(cryp, CRYP_DOUT); - for (i = 0; i < AES_BLOCK_32; i++) { - if (cryp->total_out >= sizeof(u32)) { - /* Read a full u32 */ - *dst = stm32_cryp_read(cryp, CRYP_DOUT); - - dst = stm32_cryp_next_out(cryp, dst, - sizeof(u32)); - cryp->total_out -= sizeof(u32); - } else if (!cryp->total_out) { - /* Empty fifo out (data from input padding) */ - stm32_cryp_read(cryp, CRYP_DOUT); - } else { - /* Read less than an u32 */ - d32 = stm32_cryp_read(cryp, CRYP_DOUT); - d8 = (u8 *)&d32; - - for (j = 0; j < cryp->total_out; j++) { - *((u8 *)dst) = *(d8++); - dst = stm32_cryp_next_out(cryp, dst, 1); - } - cryp->total_out = 0; - } - } + scatterwalk_copychunks(out_tag, &cryp->out_walk, cryp->authsize, 1); } else { /* Get and check tag */ u32 in_tag[AES_BLOCK_32], out_tag[AES_BLOCK_32]; - scatterwalk_map_and_copy(in_tag, cryp->in_sg, - cryp->total_in_save - cryp->authsize, - cryp->authsize, 0); + scatterwalk_copychunks(in_tag, &cryp->in_walk, cryp->authsize, 0); for (i = 0; i < AES_BLOCK_32; i++) out_tag[i] = stm32_cryp_read(cryp, CRYP_DOUT); @@ -1217,115 +1207,59 @@ static void stm32_cryp_check_ctr_counter(struct stm32_cryp *cryp) { u32 cr; - if (unlikely(cryp->last_ctr[3] == 0xFFFFFFFF)) { - cryp->last_ctr[3] = 0; - cryp->last_ctr[2]++; - if (!cryp->last_ctr[2]) { - cryp->last_ctr[1]++; - if (!cryp->last_ctr[1]) - cryp->last_ctr[0]++; - } + if (unlikely(cryp->last_ctr[3] == cpu_to_be32(0xFFFFFFFF))) { + /* + * In this case, we need to increment manually the ctr counter, + * as HW doesn't handle the U32 carry. + */ + crypto_inc((u8 *)cryp->last_ctr, sizeof(cryp->last_ctr)); cr = stm32_cryp_read(cryp, CRYP_CR); stm32_cryp_write(cryp, CRYP_CR, cr & ~CR_CRYPEN); - stm32_cryp_hw_write_iv(cryp, (u32 *)cryp->last_ctr); + stm32_cryp_hw_write_iv(cryp, cryp->last_ctr); stm32_cryp_write(cryp, CRYP_CR, cr); } - cryp->last_ctr[0] = stm32_cryp_read(cryp, CRYP_IV0LR); - cryp->last_ctr[1] = stm32_cryp_read(cryp, CRYP_IV0RR); - cryp->last_ctr[2] = stm32_cryp_read(cryp, CRYP_IV1LR); - cryp->last_ctr[3] = stm32_cryp_read(cryp, CRYP_IV1RR); + /* The IV registers are BE */ + cryp->last_ctr[0] = cpu_to_be32(stm32_cryp_read(cryp, CRYP_IV0LR)); + cryp->last_ctr[1] = cpu_to_be32(stm32_cryp_read(cryp, CRYP_IV0RR)); + cryp->last_ctr[2] = cpu_to_be32(stm32_cryp_read(cryp, CRYP_IV1LR)); + cryp->last_ctr[3] = cpu_to_be32(stm32_cryp_read(cryp, CRYP_IV1RR)); } -static bool stm32_cryp_irq_read_data(struct stm32_cryp *cryp) +static void stm32_cryp_irq_read_data(struct stm32_cryp *cryp) { - unsigned int i, j; - u32 d32, *dst; - u8 *d8; - size_t tag_size; - - /* Do no read tag now (if any) */ - if (is_encrypt(cryp) && (is_gcm(cryp) || is_ccm(cryp))) - tag_size = cryp->authsize; - else - tag_size = 0; - - dst = sg_virt(cryp->out_sg) + _walked_out; + unsigned int i; + u32 block[AES_BLOCK_32]; - for (i = 0; i < cryp->hw_blocksize / sizeof(u32); i++) { - if (likely(cryp->total_out - tag_size >= sizeof(u32))) { - /* Read a full u32 */ - *dst = stm32_cryp_read(cryp, CRYP_DOUT); + for (i = 0; i < cryp->hw_blocksize / sizeof(u32); i++) + block[i] = stm32_cryp_read(cryp, CRYP_DOUT); - dst = stm32_cryp_next_out(cryp, dst, sizeof(u32)); - cryp->total_out -= sizeof(u32); - } else if (cryp->total_out == tag_size) { - /* Empty fifo out (data from input padding) */ - d32 = stm32_cryp_read(cryp, CRYP_DOUT); - } else { - /* Read less than an u32 */ - d32 = stm32_cryp_read(cryp, CRYP_DOUT); - d8 = (u8 *)&d32; - - for (j = 0; j < cryp->total_out - tag_size; j++) { - *((u8 *)dst) = *(d8++); - dst = stm32_cryp_next_out(cryp, dst, 1); - } - cryp->total_out = tag_size; - } - } - - return !(cryp->total_out - tag_size) || !cryp->total_in; + scatterwalk_copychunks(block, &cryp->out_walk, min_t(size_t, cryp->hw_blocksize, + cryp->payload_out), 1); + cryp->payload_out -= min_t(size_t, cryp->hw_blocksize, + cryp->payload_out); } static void stm32_cryp_irq_write_block(struct stm32_cryp *cryp) { - unsigned int i, j; - u32 *src; - u8 d8[4]; - size_t tag_size; - - /* Do no write tag (if any) */ - if (is_decrypt(cryp) && (is_gcm(cryp) || is_ccm(cryp))) - tag_size = cryp->authsize; - else - tag_size = 0; - - src = sg_virt(cryp->in_sg) + _walked_in; + unsigned int i; + u32 block[AES_BLOCK_32] = {0}; - for (i = 0; i < cryp->hw_blocksize / sizeof(u32); i++) { - if (likely(cryp->total_in - tag_size >= sizeof(u32))) { - /* Write a full u32 */ - stm32_cryp_write(cryp, CRYP_DIN, *src); + scatterwalk_copychunks(block, &cryp->in_walk, min_t(size_t, cryp->hw_blocksize, + cryp->payload_in), 0); + for (i = 0; i < cryp->hw_blocksize / sizeof(u32); i++) + stm32_cryp_write(cryp, CRYP_DIN, block[i]); - src = stm32_cryp_next_in(cryp, src, sizeof(u32)); - cryp->total_in -= sizeof(u32); - } else if (cryp->total_in == tag_size) { - /* Write padding data */ - stm32_cryp_write(cryp, CRYP_DIN, 0); - } else { - /* Write less than an u32 */ - memset(d8, 0, sizeof(u32)); - for (j = 0; j < cryp->total_in - tag_size; j++) { - d8[j] = *((u8 *)src); - src = stm32_cryp_next_in(cryp, src, 1); - } - - stm32_cryp_write(cryp, CRYP_DIN, *(u32 *)d8); - cryp->total_in = tag_size; - } - } + cryp->payload_in -= min_t(size_t, cryp->hw_blocksize, cryp->payload_in); } static void stm32_cryp_irq_write_gcm_padded_data(struct stm32_cryp *cryp) { int err; - u32 cfg, tmp[AES_BLOCK_32]; - size_t total_in_ori = cryp->total_in; - struct scatterlist *out_sg_ori = cryp->out_sg; + u32 cfg, block[AES_BLOCK_32] = {0}; unsigned int i; /* 'Special workaround' procedure described in the datasheet */ @@ -1350,18 +1284,25 @@ static void stm32_cryp_irq_write_gcm_padded_data(struct stm32_cryp *cryp) /* b) pad and write the last block */ stm32_cryp_irq_write_block(cryp); - cryp->total_in = total_in_ori; + /* wait end of process */ err = stm32_cryp_wait_output(cryp); if (err) { - dev_err(cryp->dev, "Timeout (write gcm header)\n"); + dev_err(cryp->dev, "Timeout (write gcm last data)\n"); return stm32_cryp_finish_req(cryp, err); } /* c) get and store encrypted data */ - stm32_cryp_irq_read_data(cryp); - scatterwalk_map_and_copy(tmp, out_sg_ori, - cryp->total_in_save - total_in_ori, - total_in_ori, 0); + /* + * Same code as stm32_cryp_irq_read_data(), but we want to store + * block value + */ + for (i = 0; i < cryp->hw_blocksize / sizeof(u32); i++) + block[i] = stm32_cryp_read(cryp, CRYP_DOUT); + + scatterwalk_copychunks(block, &cryp->out_walk, min_t(size_t, cryp->hw_blocksize, + cryp->payload_out), 1); + cryp->payload_out -= min_t(size_t, cryp->hw_blocksize, + cryp->payload_out); /* d) change mode back to AES GCM */ cfg &= ~CR_ALGO_MASK; @@ -1374,19 +1315,13 @@ static void stm32_cryp_irq_write_gcm_padded_data(struct stm32_cryp *cryp) stm32_cryp_write(cryp, CRYP_CR, cfg); /* f) write padded data */ - for (i = 0; i < AES_BLOCK_32; i++) { - if (cryp->total_in) - stm32_cryp_write(cryp, CRYP_DIN, tmp[i]); - else - stm32_cryp_write(cryp, CRYP_DIN, 0); - - cryp->total_in -= min_t(size_t, sizeof(u32), cryp->total_in); - } + for (i = 0; i < AES_BLOCK_32; i++) + stm32_cryp_write(cryp, CRYP_DIN, block[i]); /* g) Empty fifo out */ err = stm32_cryp_wait_output(cryp); if (err) { - dev_err(cryp->dev, "Timeout (write gcm header)\n"); + dev_err(cryp->dev, "Timeout (write gcm padded data)\n"); return stm32_cryp_finish_req(cryp, err); } @@ -1399,16 +1334,14 @@ static void stm32_cryp_irq_write_gcm_padded_data(struct stm32_cryp *cryp) static void stm32_cryp_irq_set_npblb(struct stm32_cryp *cryp) { - u32 cfg, payload_bytes; + u32 cfg; /* disable ip, set NPBLB and reneable ip */ cfg = stm32_cryp_read(cryp, CRYP_CR); cfg &= ~CR_CRYPEN; stm32_cryp_write(cryp, CRYP_CR, cfg); - payload_bytes = is_decrypt(cryp) ? cryp->total_in - cryp->authsize : - cryp->total_in; - cfg |= (cryp->hw_blocksize - payload_bytes) << CR_NBPBL_SHIFT; + cfg |= (cryp->hw_blocksize - cryp->payload_in) << CR_NBPBL_SHIFT; cfg |= CR_CRYPEN; stm32_cryp_write(cryp, CRYP_CR, cfg); } @@ -1417,13 +1350,11 @@ static void stm32_cryp_irq_write_ccm_padded_data(struct stm32_cryp *cryp) { int err = 0; u32 cfg, iv1tmp; - u32 cstmp1[AES_BLOCK_32], cstmp2[AES_BLOCK_32], tmp[AES_BLOCK_32]; - size_t last_total_out, total_in_ori = cryp->total_in; - struct scatterlist *out_sg_ori = cryp->out_sg; + u32 cstmp1[AES_BLOCK_32], cstmp2[AES_BLOCK_32]; + u32 block[AES_BLOCK_32] = {0}; unsigned int i; /* 'Special workaround' procedure described in the datasheet */ - cryp->flags |= FLG_CCM_PADDED_WA; /* a) disable ip */ stm32_cryp_write(cryp, CRYP_IMSCR, 0); @@ -1453,7 +1384,7 @@ static void stm32_cryp_irq_write_ccm_padded_data(struct stm32_cryp *cryp) /* b) pad and write the last block */ stm32_cryp_irq_write_block(cryp); - cryp->total_in = total_in_ori; + /* wait end of process */ err = stm32_cryp_wait_output(cryp); if (err) { dev_err(cryp->dev, "Timeout (wite ccm padded data)\n"); @@ -1461,13 +1392,16 @@ static void stm32_cryp_irq_write_ccm_padded_data(struct stm32_cryp *cryp) } /* c) get and store decrypted data */ - last_total_out = cryp->total_out; - stm32_cryp_irq_read_data(cryp); + /* + * Same code as stm32_cryp_irq_read_data(), but we want to store + * block value + */ + for (i = 0; i < cryp->hw_blocksize / sizeof(u32); i++) + block[i] = stm32_cryp_read(cryp, CRYP_DOUT); - memset(tmp, 0, sizeof(tmp)); - scatterwalk_map_and_copy(tmp, out_sg_ori, - cryp->total_out_save - last_total_out, - last_total_out, 0); + scatterwalk_copychunks(block, &cryp->out_walk, min_t(size_t, cryp->hw_blocksize, + cryp->payload_out), 1); + cryp->payload_out -= min_t(size_t, cryp->hw_blocksize, cryp->payload_out); /* d) Load again CRYP_CSGCMCCMxR */ for (i = 0; i < ARRAY_SIZE(cstmp2); i++) @@ -1484,10 +1418,10 @@ static void stm32_cryp_irq_write_ccm_padded_data(struct stm32_cryp *cryp) stm32_cryp_write(cryp, CRYP_CR, cfg); /* g) XOR and write padded data */ - for (i = 0; i < ARRAY_SIZE(tmp); i++) { - tmp[i] ^= cstmp1[i]; - tmp[i] ^= cstmp2[i]; - stm32_cryp_write(cryp, CRYP_DIN, tmp[i]); + for (i = 0; i < ARRAY_SIZE(block); i++) { + block[i] ^= cstmp1[i]; + block[i] ^= cstmp2[i]; + stm32_cryp_write(cryp, CRYP_DIN, block[i]); } /* h) wait for completion */ @@ -1501,30 +1435,34 @@ static void stm32_cryp_irq_write_ccm_padded_data(struct stm32_cryp *cryp) static void stm32_cryp_irq_write_data(struct stm32_cryp *cryp) { - if (unlikely(!cryp->total_in)) { + if (unlikely(!cryp->payload_in)) { dev_warn(cryp->dev, "No more data to process\n"); return; } - if (unlikely(cryp->total_in < AES_BLOCK_SIZE && + if (unlikely(cryp->payload_in < AES_BLOCK_SIZE && (stm32_cryp_get_hw_mode(cryp) == CR_AES_GCM) && is_encrypt(cryp))) { /* Padding for AES GCM encryption */ - if (cryp->caps->padding_wa) + if (cryp->caps->padding_wa) { /* Special case 1 */ - return stm32_cryp_irq_write_gcm_padded_data(cryp); + stm32_cryp_irq_write_gcm_padded_data(cryp); + return; + } /* Setting padding bytes (NBBLB) */ stm32_cryp_irq_set_npblb(cryp); } - if (unlikely((cryp->total_in - cryp->authsize < AES_BLOCK_SIZE) && + if (unlikely((cryp->payload_in < AES_BLOCK_SIZE) && (stm32_cryp_get_hw_mode(cryp) == CR_AES_CCM) && is_decrypt(cryp))) { /* Padding for AES CCM decryption */ - if (cryp->caps->padding_wa) + if (cryp->caps->padding_wa) { /* Special case 2 */ - return stm32_cryp_irq_write_ccm_padded_data(cryp); + stm32_cryp_irq_write_ccm_padded_data(cryp); + return; + } /* Setting padding bytes (NBBLB) */ stm32_cryp_irq_set_npblb(cryp); @@ -1536,192 +1474,60 @@ static void stm32_cryp_irq_write_data(struct stm32_cryp *cryp) stm32_cryp_irq_write_block(cryp); } -static void stm32_cryp_irq_write_gcm_header(struct stm32_cryp *cryp) +static void stm32_cryp_irq_write_gcmccm_header(struct stm32_cryp *cryp) { - int err; - unsigned int i, j; - u32 cfg, *src; - - src = sg_virt(cryp->in_sg) + _walked_in; - - for (i = 0; i < AES_BLOCK_32; i++) { - stm32_cryp_write(cryp, CRYP_DIN, *src); - - src = stm32_cryp_next_in(cryp, src, sizeof(u32)); - cryp->total_in -= min_t(size_t, sizeof(u32), cryp->total_in); - - /* Check if whole header written */ - if ((cryp->total_in_save - cryp->total_in) == - cryp->areq->assoclen) { - /* Write padding if needed */ - for (j = i + 1; j < AES_BLOCK_32; j++) - stm32_cryp_write(cryp, CRYP_DIN, 0); - - /* Wait for completion */ - err = stm32_cryp_wait_busy(cryp); - if (err) { - dev_err(cryp->dev, "Timeout (gcm header)\n"); - return stm32_cryp_finish_req(cryp, err); - } - - if (stm32_cryp_get_input_text_len(cryp)) { - /* Phase 3 : payload */ - cfg = stm32_cryp_read(cryp, CRYP_CR); - cfg &= ~CR_CRYPEN; - stm32_cryp_write(cryp, CRYP_CR, cfg); - - cfg &= ~CR_PH_MASK; - cfg |= CR_PH_PAYLOAD; - cfg |= CR_CRYPEN; - stm32_cryp_write(cryp, CRYP_CR, cfg); - } else { - /* Phase 4 : tag */ - stm32_cryp_write(cryp, CRYP_IMSCR, 0); - stm32_cryp_finish_req(cryp, 0); - } - - break; - } - - if (!cryp->total_in) - break; - } -} + unsigned int i; + u32 block[AES_BLOCK_32] = {0}; + size_t written; -static void stm32_cryp_irq_write_ccm_header(struct stm32_cryp *cryp) -{ - int err; - unsigned int i = 0, j, k; - u32 alen, cfg, *src; - u8 d8[4]; - - src = sg_virt(cryp->in_sg) + _walked_in; - alen = cryp->areq->assoclen; - - if (!_walked_in) { - if (cryp->areq->assoclen <= 65280) { - /* Write first u32 of B1 */ - d8[0] = (alen >> 8) & 0xFF; - d8[1] = alen & 0xFF; - d8[2] = *((u8 *)src); - src = stm32_cryp_next_in(cryp, src, 1); - d8[3] = *((u8 *)src); - src = stm32_cryp_next_in(cryp, src, 1); - - stm32_cryp_write(cryp, CRYP_DIN, *(u32 *)d8); - i++; - - cryp->total_in -= min_t(size_t, 2, cryp->total_in); - } else { - /* Build the two first u32 of B1 */ - d8[0] = 0xFF; - d8[1] = 0xFE; - d8[2] = alen & 0xFF000000; - d8[3] = alen & 0x00FF0000; - - stm32_cryp_write(cryp, CRYP_DIN, *(u32 *)d8); - i++; - - d8[0] = alen & 0x0000FF00; - d8[1] = alen & 0x000000FF; - d8[2] = *((u8 *)src); - src = stm32_cryp_next_in(cryp, src, 1); - d8[3] = *((u8 *)src); - src = stm32_cryp_next_in(cryp, src, 1); - - stm32_cryp_write(cryp, CRYP_DIN, *(u32 *)d8); - i++; - - cryp->total_in -= min_t(size_t, 2, cryp->total_in); - } - } + written = min_t(size_t, AES_BLOCK_SIZE, cryp->header_in); - /* Write next u32 */ - for (; i < AES_BLOCK_32; i++) { - /* Build an u32 */ - memset(d8, 0, sizeof(u32)); - for (k = 0; k < sizeof(u32); k++) { - d8[k] = *((u8 *)src); - src = stm32_cryp_next_in(cryp, src, 1); - - cryp->total_in -= min_t(size_t, 1, cryp->total_in); - if ((cryp->total_in_save - cryp->total_in) == alen) - break; - } + scatterwalk_copychunks(block, &cryp->in_walk, written, 0); + for (i = 0; i < AES_BLOCK_32; i++) + stm32_cryp_write(cryp, CRYP_DIN, block[i]); - stm32_cryp_write(cryp, CRYP_DIN, *(u32 *)d8); - - if ((cryp->total_in_save - cryp->total_in) == alen) { - /* Write padding if needed */ - for (j = i + 1; j < AES_BLOCK_32; j++) - stm32_cryp_write(cryp, CRYP_DIN, 0); - - /* Wait for completion */ - err = stm32_cryp_wait_busy(cryp); - if (err) { - dev_err(cryp->dev, "Timeout (ccm header)\n"); - return stm32_cryp_finish_req(cryp, err); - } - - if (stm32_cryp_get_input_text_len(cryp)) { - /* Phase 3 : payload */ - cfg = stm32_cryp_read(cryp, CRYP_CR); - cfg &= ~CR_CRYPEN; - stm32_cryp_write(cryp, CRYP_CR, cfg); - - cfg &= ~CR_PH_MASK; - cfg |= CR_PH_PAYLOAD; - cfg |= CR_CRYPEN; - stm32_cryp_write(cryp, CRYP_CR, cfg); - } else { - /* Phase 4 : tag */ - stm32_cryp_write(cryp, CRYP_IMSCR, 0); - stm32_cryp_finish_req(cryp, 0); - } + cryp->header_in -= written; - break; - } - } + stm32_crypt_gcmccm_end_header(cryp); } static irqreturn_t stm32_cryp_irq_thread(int irq, void *arg) { struct stm32_cryp *cryp = arg; u32 ph; + u32 it_mask = stm32_cryp_read(cryp, CRYP_IMSCR); if (cryp->irq_status & MISR_OUT) /* Output FIFO IRQ: read data */ - if (unlikely(stm32_cryp_irq_read_data(cryp))) { - /* All bytes processed, finish */ - stm32_cryp_write(cryp, CRYP_IMSCR, 0); - stm32_cryp_finish_req(cryp, 0); - return IRQ_HANDLED; - } + stm32_cryp_irq_read_data(cryp); if (cryp->irq_status & MISR_IN) { - if (is_gcm(cryp)) { + if (is_gcm(cryp) || is_ccm(cryp)) { ph = stm32_cryp_read(cryp, CRYP_CR) & CR_PH_MASK; if (unlikely(ph == CR_PH_HEADER)) /* Write Header */ - stm32_cryp_irq_write_gcm_header(cryp); - else - /* Input FIFO IRQ: write data */ - stm32_cryp_irq_write_data(cryp); - cryp->gcm_ctr++; - } else if (is_ccm(cryp)) { - ph = stm32_cryp_read(cryp, CRYP_CR) & CR_PH_MASK; - if (unlikely(ph == CR_PH_HEADER)) - /* Write Header */ - stm32_cryp_irq_write_ccm_header(cryp); + stm32_cryp_irq_write_gcmccm_header(cryp); else /* Input FIFO IRQ: write data */ stm32_cryp_irq_write_data(cryp); + if (is_gcm(cryp)) + cryp->gcm_ctr++; } else { /* Input FIFO IRQ: write data */ stm32_cryp_irq_write_data(cryp); } } + /* Mask useless interrupts */ + if (!cryp->payload_in && !cryp->header_in) + it_mask &= ~IMSCR_IN; + if (!cryp->payload_out) + it_mask &= ~IMSCR_OUT; + stm32_cryp_write(cryp, CRYP_IMSCR, it_mask); + + if (!cryp->payload_in && !cryp->header_in && !cryp->payload_out) + stm32_cryp_finish_req(cryp, 0); + return IRQ_HANDLED; } @@ -1742,7 +1548,7 @@ static struct skcipher_alg crypto_algs[] = { .base.cra_flags = CRYPTO_ALG_ASYNC, .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct stm32_cryp_ctx), - .base.cra_alignmask = 0xf, + .base.cra_alignmask = 0, .base.cra_module = THIS_MODULE, .init = stm32_cryp_init_tfm, @@ -1759,7 +1565,7 @@ static struct skcipher_alg crypto_algs[] = { .base.cra_flags = CRYPTO_ALG_ASYNC, .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct stm32_cryp_ctx), - .base.cra_alignmask = 0xf, + .base.cra_alignmask = 0, .base.cra_module = THIS_MODULE, .init = stm32_cryp_init_tfm, @@ -1777,7 +1583,7 @@ static struct skcipher_alg crypto_algs[] = { .base.cra_flags = CRYPTO_ALG_ASYNC, .base.cra_blocksize = 1, .base.cra_ctxsize = sizeof(struct stm32_cryp_ctx), - .base.cra_alignmask = 0xf, + .base.cra_alignmask = 0, .base.cra_module = THIS_MODULE, .init = stm32_cryp_init_tfm, @@ -1795,7 +1601,7 @@ static struct skcipher_alg crypto_algs[] = { .base.cra_flags = CRYPTO_ALG_ASYNC, .base.cra_blocksize = DES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct stm32_cryp_ctx), - .base.cra_alignmask = 0xf, + .base.cra_alignmask = 0, .base.cra_module = THIS_MODULE, .init = stm32_cryp_init_tfm, @@ -1812,7 +1618,7 @@ static struct skcipher_alg crypto_algs[] = { .base.cra_flags = CRYPTO_ALG_ASYNC, .base.cra_blocksize = DES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct stm32_cryp_ctx), - .base.cra_alignmask = 0xf, + .base.cra_alignmask = 0, .base.cra_module = THIS_MODULE, .init = stm32_cryp_init_tfm, @@ -1830,7 +1636,7 @@ static struct skcipher_alg crypto_algs[] = { .base.cra_flags = CRYPTO_ALG_ASYNC, .base.cra_blocksize = DES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct stm32_cryp_ctx), - .base.cra_alignmask = 0xf, + .base.cra_alignmask = 0, .base.cra_module = THIS_MODULE, .init = stm32_cryp_init_tfm, @@ -1847,7 +1653,7 @@ static struct skcipher_alg crypto_algs[] = { .base.cra_flags = CRYPTO_ALG_ASYNC, .base.cra_blocksize = DES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct stm32_cryp_ctx), - .base.cra_alignmask = 0xf, + .base.cra_alignmask = 0, .base.cra_module = THIS_MODULE, .init = stm32_cryp_init_tfm, @@ -1877,7 +1683,7 @@ static struct aead_alg aead_algs[] = { .cra_flags = CRYPTO_ALG_ASYNC, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct stm32_cryp_ctx), - .cra_alignmask = 0xf, + .cra_alignmask = 0, .cra_module = THIS_MODULE, }, }, @@ -1897,7 +1703,7 @@ static struct aead_alg aead_algs[] = { .cra_flags = CRYPTO_ALG_ASYNC, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct stm32_cryp_ctx), - .cra_alignmask = 0xf, + .cra_alignmask = 0, .cra_module = THIS_MODULE, }, }, @@ -2025,8 +1831,6 @@ static int stm32_cryp_probe(struct platform_device *pdev) list_del(&cryp->list); spin_unlock(&cryp_list.lock); - pm_runtime_disable(dev); - pm_runtime_put_noidle(dev); pm_runtime_disable(dev); pm_runtime_put_noidle(dev); diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c index 389de9e3302d5658c92e1eb5c7cc3cc4f100a2ac..d33006d43f761b2b3dafd68ad888dd7b71b80a04 100644 --- a/drivers/crypto/stm32/stm32-hash.c +++ b/drivers/crypto/stm32/stm32-hash.c @@ -813,7 +813,7 @@ static void stm32_hash_finish_req(struct ahash_request *req, int err) static int stm32_hash_hw_init(struct stm32_hash_dev *hdev, struct stm32_hash_request_ctx *rctx) { - pm_runtime_resume_and_get(hdev->dev); + pm_runtime_get_sync(hdev->dev); if (!(HASH_FLAGS_INIT & hdev->flags)) { stm32_hash_write(hdev, HASH_CR, HASH_CR_INIT); @@ -962,7 +962,7 @@ static int stm32_hash_export(struct ahash_request *req, void *out) u32 *preg; unsigned int i; - pm_runtime_resume_and_get(hdev->dev); + pm_runtime_get_sync(hdev->dev); while ((stm32_hash_read(hdev, HASH_SR) & HASH_SR_BUSY)) cpu_relax(); @@ -1000,7 +1000,7 @@ static int stm32_hash_import(struct ahash_request *req, const void *in) preg = rctx->hw_context; - pm_runtime_resume_and_get(hdev->dev); + pm_runtime_get_sync(hdev->dev); stm32_hash_write(hdev, HASH_IMR, *preg++); stm32_hash_write(hdev, HASH_STR, *preg++); diff --git a/drivers/crypto/vmx/Kconfig b/drivers/crypto/vmx/Kconfig index c85fab7ef0bdd21754ad1a30ad833778b92832f6..b2c28b87f14b3d1566cbca862441f80bcc3e1ad9 100644 --- a/drivers/crypto/vmx/Kconfig +++ b/drivers/crypto/vmx/Kconfig @@ -2,7 +2,11 @@ config CRYPTO_DEV_VMX_ENCRYPT tristate "Encryption acceleration support on P8 CPU" depends on CRYPTO_DEV_VMX + select CRYPTO_AES + select CRYPTO_CBC + select CRYPTO_CTR select CRYPTO_GHASH + select CRYPTO_XTS default m help Support for VMX cryptographic acceleration instructions on Power8 CPU. diff --git a/drivers/dax/super.c b/drivers/dax/super.c index cadbd0a1a1ef0220296faa93736f53b1b359783c..260a247c60d2da5c56c93144cdaac1340fd2750a 100644 --- a/drivers/dax/super.c +++ b/drivers/dax/super.c @@ -723,6 +723,7 @@ static int dax_fs_init(void) static void dax_fs_exit(void) { kern_unmount(dax_mnt); + rcu_barrier(); kmem_cache_destroy(dax_cache); } diff --git a/drivers/dma-buf/dma-fence-array.c b/drivers/dma-buf/dma-fence-array.c index d3fbd950be944f7a3bc7c2d231a0573cea5eac70..3e07f961e2f3d75af19b0e88f9346318fc3e6eb2 100644 --- a/drivers/dma-buf/dma-fence-array.c +++ b/drivers/dma-buf/dma-fence-array.c @@ -104,7 +104,11 @@ static bool dma_fence_array_signaled(struct dma_fence *fence) { struct dma_fence_array *array = to_dma_fence_array(fence); - return atomic_read(&array->num_pending) <= 0; + if (atomic_read(&array->num_pending) > 0) + return false; + + dma_fence_array_clear_pending_error(array); + return true; } static void dma_fence_array_release(struct dma_fence *fence) diff --git a/drivers/dma-buf/dma-heap.c b/drivers/dma-buf/dma-heap.c index afd22c9dbdcfa16864f07e2fdab2e1ed8f5992fb..798f86fcd50fa47c61a2103cf3c7e806a8ea0c53 100644 --- a/drivers/dma-buf/dma-heap.c +++ b/drivers/dma-buf/dma-heap.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include @@ -123,6 +124,7 @@ static long dma_heap_ioctl(struct file *file, unsigned int ucmd, if (nr >= ARRAY_SIZE(dma_heap_ioctl_cmds)) return -EINVAL; + nr = array_index_nospec(nr, ARRAY_SIZE(dma_heap_ioctl_cmds)); /* Get the kernel ioctl cmd that matches */ kcmd = dma_heap_ioctl_cmds[nr]; diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c index db732f71e59aded339b3b0199579864436aa5510..cfbf10128aaedbc04ec6be9d6b6ab164c6e5ea79 100644 --- a/drivers/dma-buf/udmabuf.c +++ b/drivers/dma-buf/udmabuf.c @@ -181,6 +181,10 @@ static long udmabuf_create(struct miscdevice *device, if (ubuf->pagecount > pglimit) goto err; } + + if (!ubuf->pagecount) + goto err; + ubuf->pages = kmalloc_array(ubuf->pagecount, sizeof(*ubuf->pages), GFP_KERNEL); if (!ubuf->pages) { diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index 627ad74c879fdb6276d13085e3fe1c936a693be6..90afba0b36fe97514b661d8489be85b20b95966e 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c @@ -89,6 +89,7 @@ #define AT_XDMAC_CNDC_NDE (0x1 << 0) /* Channel x Next Descriptor Enable */ #define AT_XDMAC_CNDC_NDSUP (0x1 << 1) /* Channel x Next Descriptor Source Update */ #define AT_XDMAC_CNDC_NDDUP (0x1 << 2) /* Channel x Next Descriptor Destination Update */ +#define AT_XDMAC_CNDC_NDVIEW_MASK GENMASK(28, 27) #define AT_XDMAC_CNDC_NDVIEW_NDV0 (0x0 << 3) /* Channel x Next Descriptor View 0 */ #define AT_XDMAC_CNDC_NDVIEW_NDV1 (0x1 << 3) /* Channel x Next Descriptor View 1 */ #define AT_XDMAC_CNDC_NDVIEW_NDV2 (0x2 << 3) /* Channel x Next Descriptor View 2 */ @@ -220,15 +221,15 @@ struct at_xdmac { /* Linked List Descriptor */ struct at_xdmac_lld { - dma_addr_t mbr_nda; /* Next Descriptor Member */ - u32 mbr_ubc; /* Microblock Control Member */ - dma_addr_t mbr_sa; /* Source Address Member */ - dma_addr_t mbr_da; /* Destination Address Member */ - u32 mbr_cfg; /* Configuration Register */ - u32 mbr_bc; /* Block Control Register */ - u32 mbr_ds; /* Data Stride Register */ - u32 mbr_sus; /* Source Microblock Stride Register */ - u32 mbr_dus; /* Destination Microblock Stride Register */ + u32 mbr_nda; /* Next Descriptor Member */ + u32 mbr_ubc; /* Microblock Control Member */ + u32 mbr_sa; /* Source Address Member */ + u32 mbr_da; /* Destination Address Member */ + u32 mbr_cfg; /* Configuration Register */ + u32 mbr_bc; /* Block Control Register */ + u32 mbr_ds; /* Data Stride Register */ + u32 mbr_sus; /* Source Microblock Stride Register */ + u32 mbr_dus; /* Destination Microblock Stride Register */ }; /* 64-bit alignment needed to update CNDA and CUBC registers in an atomic way. */ @@ -338,9 +339,6 @@ static void at_xdmac_start_xfer(struct at_xdmac_chan *atchan, dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, first); - if (at_xdmac_chan_is_enabled(atchan)) - return; - /* Set transfer as active to not try to start it again. */ first->active_xfer = true; @@ -356,7 +354,8 @@ static void at_xdmac_start_xfer(struct at_xdmac_chan *atchan, */ if (at_xdmac_chan_is_cyclic(atchan)) reg = AT_XDMAC_CNDC_NDVIEW_NDV1; - else if (first->lld.mbr_ubc & AT_XDMAC_MBR_UBC_NDV3) + else if ((first->lld.mbr_ubc & + AT_XDMAC_CNDC_NDVIEW_MASK) == AT_XDMAC_MBR_UBC_NDV3) reg = AT_XDMAC_CNDC_NDVIEW_NDV3; else reg = AT_XDMAC_CNDC_NDVIEW_NDV2; @@ -427,13 +426,12 @@ static dma_cookie_t at_xdmac_tx_submit(struct dma_async_tx_descriptor *tx) spin_lock_irqsave(&atchan->lock, irqflags); cookie = dma_cookie_assign(tx); + list_add_tail(&desc->xfer_node, &atchan->xfers_list); + spin_unlock_irqrestore(&atchan->lock, irqflags); + dev_vdbg(chan2dev(tx->chan), "%s: atchan 0x%p, add desc 0x%p to xfers_list\n", __func__, atchan, desc); - list_add_tail(&desc->xfer_node, &atchan->xfers_list); - if (list_is_singular(&atchan->xfers_list)) - at_xdmac_start_xfer(atchan, desc); - spin_unlock_irqrestore(&atchan->lock, irqflags); return cookie; } @@ -1563,14 +1561,17 @@ static void at_xdmac_handle_cyclic(struct at_xdmac_chan *atchan) struct at_xdmac_desc *desc; struct dma_async_tx_descriptor *txd; - if (!list_empty(&atchan->xfers_list)) { - desc = list_first_entry(&atchan->xfers_list, - struct at_xdmac_desc, xfer_node); - txd = &desc->tx_dma_desc; - - if (txd->flags & DMA_PREP_INTERRUPT) - dmaengine_desc_get_callback_invoke(txd, NULL); + spin_lock_irq(&atchan->lock); + if (list_empty(&atchan->xfers_list)) { + spin_unlock_irq(&atchan->lock); + return; } + desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, + xfer_node); + spin_unlock_irq(&atchan->lock); + txd = &desc->tx_dma_desc; + if (txd->flags & DMA_PREP_INTERRUPT) + dmaengine_desc_get_callback_invoke(txd, NULL); } static void at_xdmac_handle_error(struct at_xdmac_chan *atchan) @@ -1724,11 +1725,9 @@ static void at_xdmac_issue_pending(struct dma_chan *chan) dev_dbg(chan2dev(&atchan->chan), "%s\n", __func__); - if (!at_xdmac_chan_is_cyclic(atchan)) { - spin_lock_irqsave(&atchan->lock, flags); - at_xdmac_advance_work(atchan); - spin_unlock_irqrestore(&atchan->lock, flags); - } + spin_lock_irqsave(&atchan->lock, flags); + at_xdmac_advance_work(atchan); + spin_unlock_irqrestore(&atchan->lock, flags); return; } diff --git a/drivers/dma/hisi_dma.c b/drivers/dma/hisi_dma.c index e1a958ae7925477b1d7c2ba310721ac046e094cd..3e83769615d1cb71d335093aa2b8aeae276b5e0e 100644 --- a/drivers/dma/hisi_dma.c +++ b/drivers/dma/hisi_dma.c @@ -30,7 +30,7 @@ #define HISI_DMA_MODE 0x217c #define HISI_DMA_OFFSET 0x100 -#define HISI_DMA_MSI_NUM 30 +#define HISI_DMA_MSI_NUM 32 #define HISI_DMA_CHAN_NUM 30 #define HISI_DMA_Q_DEPTH_VAL 1024 diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c index b84303be8edf55d5ed1dfdf80e5705d8f30fc918..4eb63f1ad224765d690256e9b5eef55d0abe50c7 100644 --- a/drivers/dma/mmp_pdma.c +++ b/drivers/dma/mmp_pdma.c @@ -728,12 +728,6 @@ static int mmp_pdma_config_write(struct dma_chan *dchan, chan->dir = direction; chan->dev_addr = addr; - /* FIXME: drivers should be ported over to use the filter - * function. Once that's done, the following two lines can - * be removed. - */ - if (cfg->slave_id) - chan->drcmr = cfg->slave_id; return 0; } diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index dfbf514188f3702cf9fbaa9fb9b90e5e580117c8..6dca548f4dab1bf71f579b7eb6e4f53bdf445cce 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -3199,7 +3199,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) return ret; } -static int pl330_remove(struct amba_device *adev) +static void pl330_remove(struct amba_device *adev) { struct pl330_dmac *pl330 = amba_get_drvdata(adev); struct dma_pl330_chan *pch, *_p; @@ -3239,7 +3239,6 @@ static int pl330_remove(struct amba_device *adev) if (pl330->rstc) reset_control_assert(pl330->rstc); - return 0; } static const struct amba_id pl330_ids[] = { diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c index 349fb312c8725678a2a1268a4371047dbcb2ed11..b4ef4f19f7decb9c007a3ecfa369ba974fd1faca 100644 --- a/drivers/dma/pxa_dma.c +++ b/drivers/dma/pxa_dma.c @@ -911,13 +911,6 @@ static void pxad_get_config(struct pxad_chan *chan, *dcmd |= PXA_DCMD_BURST16; else if (maxburst == 32) *dcmd |= PXA_DCMD_BURST32; - - /* FIXME: drivers should be ported over to use the filter - * function. Once that's done, the following two lines can - * be removed. - */ - if (chan->cfg.slave_id) - chan->drcmr = chan->cfg.slave_id; } static struct dma_async_tx_descriptor * diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c index 991a7b5da29f085625356abf60344f4e8be8fc20..7c268d1bd205071d70f638d915c05f50fedd5cb2 100644 --- a/drivers/dma/sh/rcar-dmac.c +++ b/drivers/dma/sh/rcar-dmac.c @@ -1844,8 +1844,13 @@ static int rcar_dmac_probe(struct platform_device *pdev) dmac->dev = &pdev->dev; platform_set_drvdata(pdev, dmac); - dma_set_max_seg_size(dmac->dev, RCAR_DMATCR_MASK); - dma_set_mask_and_coherent(dmac->dev, DMA_BIT_MASK(40)); + ret = dma_set_max_seg_size(dmac->dev, RCAR_DMATCR_MASK); + if (ret) + return ret; + + ret = dma_set_mask_and_coherent(dmac->dev, DMA_BIT_MASK(40)); + if (ret) + return ret; ret = rcar_dmac_parse_of(&pdev->dev, dmac); if (ret < 0) diff --git a/drivers/dma/stm32-dmamux.c b/drivers/dma/stm32-dmamux.c index bddd3b23f33fc8dc54ff25b1667ae1d4ad93297a..f04bcffd3c24a847ca2a88a80baf1c0345a04f14 100644 --- a/drivers/dma/stm32-dmamux.c +++ b/drivers/dma/stm32-dmamux.c @@ -292,10 +292,12 @@ static int stm32_dmamux_probe(struct platform_device *pdev) ret = of_dma_router_register(node, stm32_dmamux_route_allocate, &stm32_dmamux->dmarouter); if (ret) - goto err_clk; + goto pm_disable; return 0; +pm_disable: + pm_runtime_disable(&pdev->dev); err_clk: clk_disable_unprepare(stm32_dmamux->clk); diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c index 9d473923712adab957e8dcbc200d496b2c786c37..fe36738f2dd7e35f71b46669e6d1bfd761236add 100644 --- a/drivers/dma/stm32-mdma.c +++ b/drivers/dma/stm32-mdma.c @@ -184,7 +184,7 @@ #define STM32_MDMA_CTBR(x) (0x68 + 0x40 * (x)) #define STM32_MDMA_CTBR_DBUS BIT(17) #define STM32_MDMA_CTBR_SBUS BIT(16) -#define STM32_MDMA_CTBR_TSEL_MASK GENMASK(7, 0) +#define STM32_MDMA_CTBR_TSEL_MASK GENMASK(5, 0) #define STM32_MDMA_CTBR_TSEL(n) STM32_MDMA_SET(n, \ STM32_MDMA_CTBR_TSEL_MASK) diff --git a/drivers/dma/uniphier-xdmac.c b/drivers/dma/uniphier-xdmac.c index d6b8a202474f4a8f817da25fb689701538fa7460..290836b7e1be2b9ac86dbd437038d8c828a0832e 100644 --- a/drivers/dma/uniphier-xdmac.c +++ b/drivers/dma/uniphier-xdmac.c @@ -131,8 +131,9 @@ uniphier_xdmac_next_desc(struct uniphier_xdmac_chan *xc) static void uniphier_xdmac_chan_start(struct uniphier_xdmac_chan *xc, struct uniphier_xdmac_desc *xd) { - u32 src_mode, src_addr, src_width; - u32 dst_mode, dst_addr, dst_width; + u32 src_mode, src_width; + u32 dst_mode, dst_width; + dma_addr_t src_addr, dst_addr; u32 val, its, tnum; enum dma_slave_buswidth buswidth; diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c index e91cf1147a4e0f4740a65f39560c35bc5a5ecec0..be38fd71f731a5c4989cff9a340734e4d552adba 100644 --- a/drivers/edac/altera_edac.c +++ b/drivers/edac/altera_edac.c @@ -349,7 +349,7 @@ static int altr_sdram_probe(struct platform_device *pdev) if (irq < 0) { edac_printk(KERN_ERR, EDAC_MC, "No irq %d in DT\n", irq); - return -ENODEV; + return irq; } /* Arria10 has a 2nd IRQ */ diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c index 01ff71f7b64562f37459b5dd6dd86835458d215e..f4eb071327be08163e9463d9abbbb3ac1885f9c1 100644 --- a/drivers/edac/edac_mc.c +++ b/drivers/edac/edac_mc.c @@ -210,7 +210,7 @@ void *edac_align_ptr(void **p, unsigned int size, int n_elems) else return (char *)ptr; - r = (unsigned long)p % align; + r = (unsigned long)ptr % align; if (r == 0) return (char *)ptr; diff --git a/drivers/edac/synopsys_edac.c b/drivers/edac/synopsys_edac.c index 1a801a5d3b08b3c66bd117a6a55b7cf475992e0c..92906b56b1a2b9f64045fe2acacbed2b7a39fa7a 100644 --- a/drivers/edac/synopsys_edac.c +++ b/drivers/edac/synopsys_edac.c @@ -1351,8 +1351,7 @@ static int mc_probe(struct platform_device *pdev) } } - if (of_device_is_compatible(pdev->dev.of_node, - "xlnx,zynqmp-ddrc-2.40a")) + if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT) setup_address_map(priv); #endif diff --git a/drivers/edac/xgene_edac.c b/drivers/edac/xgene_edac.c index 1d2c27a00a4a8348eb5e0d0c203eafa4520c7030..cd1eefeff1923192735c787d05143eb6fc7661e2 100644 --- a/drivers/edac/xgene_edac.c +++ b/drivers/edac/xgene_edac.c @@ -1919,7 +1919,7 @@ static int xgene_edac_probe(struct platform_device *pdev) irq = platform_get_irq(pdev, i); if (irq < 0) { dev_err(&pdev->dev, "No IRQ resource\n"); - rc = -EINVAL; + rc = irq; goto out_err; } rc = devm_request_irq(&pdev->dev, irq, diff --git a/drivers/firmware/arm_scmi/clock.c b/drivers/firmware/arm_scmi/clock.c index 4645677d86f1b1648bccfb99c9f88a2393a98dff..a45678cd9b74004a28ebcced068d8ca576f9bb4f 100644 --- a/drivers/firmware/arm_scmi/clock.c +++ b/drivers/firmware/arm_scmi/clock.c @@ -202,7 +202,8 @@ scmi_clock_describe_rates_get(const struct scmi_handle *handle, u32 clk_id, if (rate_discrete && rate) { clk->list.num_rates = tot_rate_cnt; - sort(rate, tot_rate_cnt, sizeof(*rate), rate_cmp_func, NULL); + sort(clk->list.rates, tot_rate_cnt, sizeof(*rate), + rate_cmp_func, NULL); } clk->rate_discrete = rate_discrete; diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c index 763223248664530fa2c9d799ef60fd74a8fc6a2a..745b7f9eb335108823cd0103423422d4c37e2a5b 100644 --- a/drivers/firmware/arm_scmi/driver.c +++ b/drivers/firmware/arm_scmi/driver.c @@ -979,7 +979,7 @@ static void __exit scmi_driver_exit(void) } module_exit(scmi_driver_exit); -MODULE_ALIAS("platform: arm-scmi"); +MODULE_ALIAS("platform:arm-scmi"); MODULE_AUTHOR("Sudeep Holla "); MODULE_DESCRIPTION("ARM SCMI protocol driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/firmware/efi/apple-properties.c b/drivers/firmware/efi/apple-properties.c index e1926483ae2fdc910a1cc785ac390c11c2ebd408..e51838d749e2e415f4fc891b9ab0fc8f36831e4f 100644 --- a/drivers/firmware/efi/apple-properties.c +++ b/drivers/firmware/efi/apple-properties.c @@ -24,7 +24,7 @@ static bool dump_properties __initdata; static int __init dump_properties_enable(char *arg) { dump_properties = true; - return 0; + return 1; } __setup("dump_apple_properties", dump_properties_enable); diff --git a/drivers/firmware/efi/efi-pstore.c b/drivers/firmware/efi/efi-pstore.c index 0ef086e43090bb14f3789466bedf0af049e3d18d..7e771c56c13c6194cbda0ac31a1359dd9d2d8d8f 100644 --- a/drivers/firmware/efi/efi-pstore.c +++ b/drivers/firmware/efi/efi-pstore.c @@ -266,7 +266,7 @@ static int efi_pstore_write(struct pstore_record *record) efi_name[i] = name[i]; ret = efivar_entry_set_safe(efi_name, vendor, PSTORE_EFI_ATTRIBUTES, - preemptible(), record->size, record->psi->buf); + false, record->size, record->psi->buf); if (record->reason == KMSG_DUMP_OOPS && try_module_get(THIS_MODULE)) if (!schedule_work(&efivar_work)) diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c index 098d8796113a4a933e782fb81b8bb05fb6be7b8d..c406de00883aea7044a14fda3338b82fcdb8cc94 100644 --- a/drivers/firmware/efi/efi.c +++ b/drivers/firmware/efi/efi.c @@ -209,7 +209,7 @@ static int __init efivar_ssdt_setup(char *str) memcpy(efivar_ssdt, str, strlen(str)); else pr_warn("efivar_ssdt: name too long: %s\n", str); - return 0; + return 1; } __setup("efivar_ssdt=", efivar_ssdt_setup); @@ -745,6 +745,13 @@ void __init efi_systab_report_header(const efi_table_hdr_t *systab_hdr, systab_hdr->revision >> 16, systab_hdr->revision & 0xffff, vendor); + + if (IS_ENABLED(CONFIG_X86_64) && + systab_hdr->revision > EFI_1_10_SYSTEM_TABLE_REVISION && + !strcmp(vendor, "Apple")) { + pr_info("Apple Mac detected, using EFI v1.10 runtime services only\n"); + efi.runtime_version = EFI_1_10_SYSTEM_TABLE_REVISION; + } } static __initdata char memory_type_name[][13] = { diff --git a/drivers/firmware/efi/libstub/arm64-stub.c b/drivers/firmware/efi/libstub/arm64-stub.c index c1b57dfb127763dbe4a2b7062f57406e42e0abae..4ee5ced0c6a4d9522703d0319aabd83d4a05649d 100644 --- a/drivers/firmware/efi/libstub/arm64-stub.c +++ b/drivers/firmware/efi/libstub/arm64-stub.c @@ -79,6 +79,18 @@ static bool check_image_region(u64 base, u64 size) return ret; } +/* + * Although relocatable kernels can fix up the misalignment with respect to + * MIN_KIMG_ALIGN, the resulting virtual text addresses are subtly out of + * sync with those recorded in the vmlinux when kaslr is disabled but the + * image required relocation anyway. Therefore retain 2M alignment unless + * KASLR is in use. + */ +static u64 min_kimg_align(void) +{ + return efi_nokaslr ? MIN_KIMG_ALIGN : EFI_KIMG_ALIGN; +} + efi_status_t handle_kernel_image(unsigned long *image_addr, unsigned long *image_size, unsigned long *reserve_addr, @@ -89,16 +101,6 @@ efi_status_t handle_kernel_image(unsigned long *image_addr, unsigned long kernel_size, kernel_memsize = 0; u32 phys_seed = 0; - /* - * Although relocatable kernels can fix up the misalignment with - * respect to MIN_KIMG_ALIGN, the resulting virtual text addresses are - * subtly out of sync with those recorded in the vmlinux when kaslr is - * disabled but the image required relocation anyway. Therefore retain - * 2M alignment if KASLR was explicitly disabled, even if it was not - * going to be activated to begin with. - */ - u64 min_kimg_align = efi_nokaslr ? MIN_KIMG_ALIGN : EFI_KIMG_ALIGN; - if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) { if (!efi_nokaslr) { status = efi_get_random_bytes(sizeof(phys_seed), @@ -119,9 +121,9 @@ efi_status_t handle_kernel_image(unsigned long *image_addr, if (image->image_base != _text) efi_err("FIRMWARE BUG: efi_loaded_image_t::image_base has bogus value\n"); - if (!IS_ALIGNED((u64)_text, EFI_KIMG_ALIGN)) - efi_err("FIRMWARE BUG: kernel image not aligned on %ldk boundary\n", - EFI_KIMG_ALIGN >> 10); + if (!IS_ALIGNED((u64)_text, SEGMENT_ALIGN)) + efi_err("FIRMWARE BUG: kernel image not aligned on %dk boundary\n", + SEGMENT_ALIGN >> 10); kernel_size = _edata - _text; kernel_memsize = kernel_size + (_end - _edata); @@ -132,7 +134,7 @@ efi_status_t handle_kernel_image(unsigned long *image_addr, * If KASLR is enabled, and we have some randomness available, * locate the kernel at a randomized offset in physical memory. */ - status = efi_random_alloc(*reserve_size, min_kimg_align, + status = efi_random_alloc(*reserve_size, min_kimg_align(), reserve_addr, phys_seed); } else { status = EFI_OUT_OF_RESOURCES; @@ -141,7 +143,7 @@ efi_status_t handle_kernel_image(unsigned long *image_addr, if (status != EFI_SUCCESS) { if (!check_image_region((u64)_text, kernel_memsize)) { efi_err("FIRMWARE BUG: Image BSS overlaps adjacent EFI memory region\n"); - } else if (IS_ALIGNED((u64)_text, min_kimg_align)) { + } else if (IS_ALIGNED((u64)_text, min_kimg_align())) { /* * Just execute from wherever we were loaded by the * UEFI PE/COFF loader if the alignment is suitable. @@ -152,7 +154,7 @@ efi_status_t handle_kernel_image(unsigned long *image_addr, } status = efi_allocate_pages_aligned(*reserve_size, reserve_addr, - ULONG_MAX, min_kimg_align); + ULONG_MAX, min_kimg_align()); if (status != EFI_SUCCESS) { efi_err("Failed to relocate kernel\n"); diff --git a/drivers/firmware/efi/libstub/riscv-stub.c b/drivers/firmware/efi/libstub/riscv-stub.c index 380e4e2513994e9d21fa7ecc520c00218d645ce3..9c460843442f5ad56a8865a19088d7a2cb3dbcbd 100644 --- a/drivers/firmware/efi/libstub/riscv-stub.c +++ b/drivers/firmware/efi/libstub/riscv-stub.c @@ -25,7 +25,7 @@ typedef void __noreturn (*jump_kernel_func)(unsigned int, unsigned long); static u32 hartid; -static u32 get_boot_hartid_from_fdt(void) +static int get_boot_hartid_from_fdt(void) { const void *fdt; int chosen_node, len; @@ -33,23 +33,26 @@ static u32 get_boot_hartid_from_fdt(void) fdt = get_efi_config_table(DEVICE_TREE_GUID); if (!fdt) - return U32_MAX; + return -EINVAL; chosen_node = fdt_path_offset(fdt, "/chosen"); if (chosen_node < 0) - return U32_MAX; + return -EINVAL; prop = fdt_getprop((void *)fdt, chosen_node, "boot-hartid", &len); if (!prop || len != sizeof(u32)) - return U32_MAX; + return -EINVAL; - return fdt32_to_cpu(*prop); + hartid = fdt32_to_cpu(*prop); + return 0; } efi_status_t check_platform_features(void) { - hartid = get_boot_hartid_from_fdt(); - if (hartid == U32_MAX) { + int ret; + + ret = get_boot_hartid_from_fdt(); + if (ret) { efi_err("/chosen/boot-hartid missing or invalid!\n"); return EFI_UNSUPPORTED; } diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c index abdc8a6a396318a915455dd73e26439b88c1b003..cae590bd08f27c3c769459802d3546d1542c2ff8 100644 --- a/drivers/firmware/efi/vars.c +++ b/drivers/firmware/efi/vars.c @@ -742,6 +742,7 @@ int efivar_entry_set_safe(efi_char16_t *name, efi_guid_t vendor, u32 attributes, { const struct efivar_operations *ops; efi_status_t status; + unsigned long varsize; if (!__efivars) return -EINVAL; @@ -764,15 +765,17 @@ int efivar_entry_set_safe(efi_char16_t *name, efi_guid_t vendor, u32 attributes, return efivar_entry_set_nonblocking(name, vendor, attributes, size, data); + varsize = size + ucs2_strsize(name, 1024); if (!block) { if (down_trylock(&efivars_lock)) return -EBUSY; + status = check_var_size_nonblocking(attributes, varsize); } else { if (down_interruptible(&efivars_lock)) return -EINTR; + status = check_var_size(attributes, varsize); } - status = check_var_size(attributes, size + ucs2_strsize(name, 1024)); if (status != EFI_SUCCESS) { up(&efivars_lock); return -ENOSPC; diff --git a/drivers/firmware/google/Kconfig b/drivers/firmware/google/Kconfig index 97968aece54f889bbb6083fe956aceb1b2e9f162..983e07dc022ede84376add92c25bc651b0b16bb2 100644 --- a/drivers/firmware/google/Kconfig +++ b/drivers/firmware/google/Kconfig @@ -3,9 +3,9 @@ menuconfig GOOGLE_FIRMWARE bool "Google Firmware Drivers" default n help - These firmware drivers are used by Google's servers. They are - only useful if you are working directly on one of their - proprietary servers. If in doubt, say "N". + These firmware drivers are used by Google servers, + Chromebooks and other devices using coreboot firmware. + If in doubt, say "N". if GOOGLE_FIRMWARE @@ -21,7 +21,7 @@ config GOOGLE_SMI config GOOGLE_COREBOOT_TABLE tristate "Coreboot Table Access" - depends on ACPI || OF + depends on HAS_IOMEM && (ACPI || OF) help This option enables the coreboot_table module, which provides other firmware modules access to the coreboot table. The coreboot table diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c index e10a99860ca4b489544f14bbd9c9895ab1377517..d417199f8fe9433bc069c125c6da93340b03b9fc 100644 --- a/drivers/firmware/qcom_scm.c +++ b/drivers/firmware/qcom_scm.c @@ -749,12 +749,6 @@ int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare) }; int ret; - desc.args[0] = addr; - desc.args[1] = size; - desc.args[2] = spare; - desc.arginfo = QCOM_SCM_ARGS(3, QCOM_SCM_RW, QCOM_SCM_VAL, - QCOM_SCM_VAL); - ret = qcom_scm_call(__scm->dev, &desc, NULL); /* the pg table has been initialized already, ignore the error */ diff --git a/drivers/firmware/qemu_fw_cfg.c b/drivers/firmware/qemu_fw_cfg.c index 172c751a4f6c24acc1e5e8cb04b594b1843a5b86..f08e056ed0ae4506db5f6336534b4095ea4cc388 100644 --- a/drivers/firmware/qemu_fw_cfg.c +++ b/drivers/firmware/qemu_fw_cfg.c @@ -388,9 +388,7 @@ static void fw_cfg_sysfs_cache_cleanup(void) struct fw_cfg_sysfs_entry *entry, *next; list_for_each_entry_safe(entry, next, &fw_cfg_entry_cache, list) { - /* will end up invoking fw_cfg_sysfs_cache_delist() - * via each object's release() method (i.e. destructor) - */ + fw_cfg_sysfs_cache_delist(entry); kobject_put(&entry->kobj); } } @@ -448,7 +446,6 @@ static void fw_cfg_sysfs_release_entry(struct kobject *kobj) { struct fw_cfg_sysfs_entry *entry = to_entry(kobj); - fw_cfg_sysfs_cache_delist(entry); kfree(entry); } @@ -601,20 +598,18 @@ static int fw_cfg_register_file(const struct fw_cfg_file *f) /* set file entry information */ entry->size = be32_to_cpu(f->size); entry->select = be16_to_cpu(f->select); - memcpy(entry->name, f->name, FW_CFG_MAX_FILE_PATH); + strscpy(entry->name, f->name, FW_CFG_MAX_FILE_PATH); /* register entry under "/sys/firmware/qemu_fw_cfg/by_key/" */ err = kobject_init_and_add(&entry->kobj, &fw_cfg_sysfs_entry_ktype, fw_cfg_sel_ko, "%d", entry->select); - if (err) { - kobject_put(&entry->kobj); - return err; - } + if (err) + goto err_put_entry; /* add raw binary content access */ err = sysfs_create_bin_file(&entry->kobj, &fw_cfg_sysfs_attr_raw); if (err) - goto err_add_raw; + goto err_del_entry; /* try adding "/sys/firmware/qemu_fw_cfg/by_name/" symlink */ fw_cfg_build_symlink(fw_cfg_fname_kset, &entry->kobj, entry->name); @@ -623,9 +618,10 @@ static int fw_cfg_register_file(const struct fw_cfg_file *f) fw_cfg_sysfs_cache_enlist(entry); return 0; -err_add_raw: +err_del_entry: kobject_del(&entry->kobj); - kfree(entry); +err_put_entry: + kobject_put(&entry->kobj); return err; } diff --git a/drivers/firmware/stratix10-svc.c b/drivers/firmware/stratix10-svc.c index 2a7687911c097c964bed2444d45262441f2872e3..53c7e3f8cfde2925fba08d52c3292450ad79db74 100644 --- a/drivers/firmware/stratix10-svc.c +++ b/drivers/firmware/stratix10-svc.c @@ -477,7 +477,7 @@ static int svc_normal_to_secure_thread(void *data) case INTEL_SIP_SMC_RSU_ERROR: pr_err("%s: STATUS_ERROR\n", __func__); cbdata->status = BIT(SVC_STATUS_ERROR); - cbdata->kaddr1 = NULL; + cbdata->kaddr1 = &res.a1; cbdata->kaddr2 = NULL; cbdata->kaddr3 = NULL; pdata->chan->scl->receive_cb(pdata->chan->scl, cbdata); diff --git a/drivers/fsi/fsi-master-aspeed.c b/drivers/fsi/fsi-master-aspeed.c index dbad73162c8333bf5161b688d9a53a4eb4ae71b4..87edc77260d20598ac5851c7e6098db674654db7 100644 --- a/drivers/fsi/fsi-master-aspeed.c +++ b/drivers/fsi/fsi-master-aspeed.c @@ -525,7 +525,6 @@ static int tacoma_cabled_fsi_fixup(struct device *dev) static int fsi_master_aspeed_probe(struct platform_device *pdev) { struct fsi_master_aspeed *aspeed; - struct resource *res; int rc, links, reg; __be32 raw; @@ -535,26 +534,28 @@ static int fsi_master_aspeed_probe(struct platform_device *pdev) return rc; } - aspeed = devm_kzalloc(&pdev->dev, sizeof(*aspeed), GFP_KERNEL); + aspeed = kzalloc(sizeof(*aspeed), GFP_KERNEL); if (!aspeed) return -ENOMEM; aspeed->dev = &pdev->dev; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - aspeed->base = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(aspeed->base)) - return PTR_ERR(aspeed->base); + aspeed->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(aspeed->base)) { + rc = PTR_ERR(aspeed->base); + goto err_free_aspeed; + } aspeed->clk = devm_clk_get(aspeed->dev, NULL); if (IS_ERR(aspeed->clk)) { dev_err(aspeed->dev, "couldn't get clock\n"); - return PTR_ERR(aspeed->clk); + rc = PTR_ERR(aspeed->clk); + goto err_free_aspeed; } rc = clk_prepare_enable(aspeed->clk); if (rc) { dev_err(aspeed->dev, "couldn't enable clock\n"); - return rc; + goto err_free_aspeed; } rc = setup_cfam_reset(aspeed); @@ -589,7 +590,7 @@ static int fsi_master_aspeed_probe(struct platform_device *pdev) rc = opb_readl(aspeed, ctrl_base + FSI_MVER, &raw); if (rc) { dev_err(&pdev->dev, "failed to read hub version\n"); - return rc; + goto err_release; } reg = be32_to_cpu(raw); @@ -628,6 +629,8 @@ static int fsi_master_aspeed_probe(struct platform_device *pdev) err_release: clk_disable_unprepare(aspeed->clk); +err_free_aspeed: + kfree(aspeed); return rc; } diff --git a/drivers/gpio/gpio-aggregator.c b/drivers/gpio/gpio-aggregator.c index dfd8a4876a27ac4baa5cac2aaffdbfb0223aab54..d5f25246404d9e28e434046f921b5052525172c2 100644 --- a/drivers/gpio/gpio-aggregator.c +++ b/drivers/gpio/gpio-aggregator.c @@ -330,7 +330,8 @@ static int gpio_fwd_get(struct gpio_chip *chip, unsigned int offset) { struct gpiochip_fwd *fwd = gpiochip_get_data(chip); - return gpiod_get_value(fwd->descs[offset]); + return chip->can_sleep ? gpiod_get_value_cansleep(fwd->descs[offset]) + : gpiod_get_value(fwd->descs[offset]); } static int gpio_fwd_get_multiple(struct gpiochip_fwd *fwd, unsigned long *mask, @@ -349,7 +350,10 @@ static int gpio_fwd_get_multiple(struct gpiochip_fwd *fwd, unsigned long *mask, for_each_set_bit(i, mask, fwd->chip.ngpio) descs[j++] = fwd->descs[i]; - error = gpiod_get_array_value(j, descs, NULL, values); + if (fwd->chip.can_sleep) + error = gpiod_get_array_value_cansleep(j, descs, NULL, values); + else + error = gpiod_get_array_value(j, descs, NULL, values); if (error) return error; @@ -384,7 +388,10 @@ static void gpio_fwd_set(struct gpio_chip *chip, unsigned int offset, int value) { struct gpiochip_fwd *fwd = gpiochip_get_data(chip); - gpiod_set_value(fwd->descs[offset], value); + if (chip->can_sleep) + gpiod_set_value_cansleep(fwd->descs[offset], value); + else + gpiod_set_value(fwd->descs[offset], value); } static void gpio_fwd_set_multiple(struct gpiochip_fwd *fwd, unsigned long *mask, @@ -403,7 +410,10 @@ static void gpio_fwd_set_multiple(struct gpiochip_fwd *fwd, unsigned long *mask, descs[j++] = fwd->descs[i]; } - gpiod_set_array_value(j, descs, NULL, values); + if (fwd->chip.can_sleep) + gpiod_set_array_value_cansleep(j, descs, NULL, values); + else + gpiod_set_array_value(j, descs, NULL, values); } static void gpio_fwd_set_multiple_locked(struct gpio_chip *chip, diff --git a/drivers/gpio/gpio-aspeed.c b/drivers/gpio/gpio-aspeed.c index b966f5e28ebffd5f3a97bcd43f1001156641b43d..e0d5d80ec8e0f2f650804c523f4f624b48f4ded7 100644 --- a/drivers/gpio/gpio-aspeed.c +++ b/drivers/gpio/gpio-aspeed.c @@ -53,7 +53,7 @@ struct aspeed_gpio_config { struct aspeed_gpio { struct gpio_chip chip; struct irq_chip irqc; - spinlock_t lock; + raw_spinlock_t lock; void __iomem *base; int irq; const struct aspeed_gpio_config *config; @@ -413,14 +413,14 @@ static void aspeed_gpio_set(struct gpio_chip *gc, unsigned int offset, unsigned long flags; bool copro; - spin_lock_irqsave(&gpio->lock, flags); + raw_spin_lock_irqsave(&gpio->lock, flags); copro = aspeed_gpio_copro_request(gpio, offset); __aspeed_gpio_set(gc, offset, val); if (copro) aspeed_gpio_copro_release(gpio, offset); - spin_unlock_irqrestore(&gpio->lock, flags); + raw_spin_unlock_irqrestore(&gpio->lock, flags); } static int aspeed_gpio_dir_in(struct gpio_chip *gc, unsigned int offset) @@ -435,7 +435,7 @@ static int aspeed_gpio_dir_in(struct gpio_chip *gc, unsigned int offset) if (!have_input(gpio, offset)) return -ENOTSUPP; - spin_lock_irqsave(&gpio->lock, flags); + raw_spin_lock_irqsave(&gpio->lock, flags); reg = ioread32(addr); reg &= ~GPIO_BIT(offset); @@ -445,7 +445,7 @@ static int aspeed_gpio_dir_in(struct gpio_chip *gc, unsigned int offset) if (copro) aspeed_gpio_copro_release(gpio, offset); - spin_unlock_irqrestore(&gpio->lock, flags); + raw_spin_unlock_irqrestore(&gpio->lock, flags); return 0; } @@ -463,7 +463,7 @@ static int aspeed_gpio_dir_out(struct gpio_chip *gc, if (!have_output(gpio, offset)) return -ENOTSUPP; - spin_lock_irqsave(&gpio->lock, flags); + raw_spin_lock_irqsave(&gpio->lock, flags); reg = ioread32(addr); reg |= GPIO_BIT(offset); @@ -474,7 +474,7 @@ static int aspeed_gpio_dir_out(struct gpio_chip *gc, if (copro) aspeed_gpio_copro_release(gpio, offset); - spin_unlock_irqrestore(&gpio->lock, flags); + raw_spin_unlock_irqrestore(&gpio->lock, flags); return 0; } @@ -492,11 +492,11 @@ static int aspeed_gpio_get_direction(struct gpio_chip *gc, unsigned int offset) if (!have_output(gpio, offset)) return GPIO_LINE_DIRECTION_IN; - spin_lock_irqsave(&gpio->lock, flags); + raw_spin_lock_irqsave(&gpio->lock, flags); val = ioread32(bank_reg(gpio, bank, reg_dir)) & GPIO_BIT(offset); - spin_unlock_irqrestore(&gpio->lock, flags); + raw_spin_unlock_irqrestore(&gpio->lock, flags); return val ? GPIO_LINE_DIRECTION_OUT : GPIO_LINE_DIRECTION_IN; } @@ -539,14 +539,14 @@ static void aspeed_gpio_irq_ack(struct irq_data *d) status_addr = bank_reg(gpio, bank, reg_irq_status); - spin_lock_irqsave(&gpio->lock, flags); + raw_spin_lock_irqsave(&gpio->lock, flags); copro = aspeed_gpio_copro_request(gpio, offset); iowrite32(bit, status_addr); if (copro) aspeed_gpio_copro_release(gpio, offset); - spin_unlock_irqrestore(&gpio->lock, flags); + raw_spin_unlock_irqrestore(&gpio->lock, flags); } static void aspeed_gpio_irq_set_mask(struct irq_data *d, bool set) @@ -565,7 +565,7 @@ static void aspeed_gpio_irq_set_mask(struct irq_data *d, bool set) addr = bank_reg(gpio, bank, reg_irq_enable); - spin_lock_irqsave(&gpio->lock, flags); + raw_spin_lock_irqsave(&gpio->lock, flags); copro = aspeed_gpio_copro_request(gpio, offset); reg = ioread32(addr); @@ -577,7 +577,7 @@ static void aspeed_gpio_irq_set_mask(struct irq_data *d, bool set) if (copro) aspeed_gpio_copro_release(gpio, offset); - spin_unlock_irqrestore(&gpio->lock, flags); + raw_spin_unlock_irqrestore(&gpio->lock, flags); } static void aspeed_gpio_irq_mask(struct irq_data *d) @@ -629,7 +629,7 @@ static int aspeed_gpio_set_type(struct irq_data *d, unsigned int type) return -EINVAL; } - spin_lock_irqsave(&gpio->lock, flags); + raw_spin_lock_irqsave(&gpio->lock, flags); copro = aspeed_gpio_copro_request(gpio, offset); addr = bank_reg(gpio, bank, reg_irq_type0); @@ -649,7 +649,7 @@ static int aspeed_gpio_set_type(struct irq_data *d, unsigned int type) if (copro) aspeed_gpio_copro_release(gpio, offset); - spin_unlock_irqrestore(&gpio->lock, flags); + raw_spin_unlock_irqrestore(&gpio->lock, flags); irq_set_handler_locked(d, handler); @@ -719,7 +719,7 @@ static int aspeed_gpio_reset_tolerance(struct gpio_chip *chip, treg = bank_reg(gpio, to_bank(offset), reg_tolerance); - spin_lock_irqsave(&gpio->lock, flags); + raw_spin_lock_irqsave(&gpio->lock, flags); copro = aspeed_gpio_copro_request(gpio, offset); val = readl(treg); @@ -733,7 +733,7 @@ static int aspeed_gpio_reset_tolerance(struct gpio_chip *chip, if (copro) aspeed_gpio_copro_release(gpio, offset); - spin_unlock_irqrestore(&gpio->lock, flags); + raw_spin_unlock_irqrestore(&gpio->lock, flags); return 0; } @@ -859,7 +859,7 @@ static int enable_debounce(struct gpio_chip *chip, unsigned int offset, return rc; } - spin_lock_irqsave(&gpio->lock, flags); + raw_spin_lock_irqsave(&gpio->lock, flags); if (timer_allocation_registered(gpio, offset)) { rc = unregister_allocated_timer(gpio, offset); @@ -919,7 +919,7 @@ static int enable_debounce(struct gpio_chip *chip, unsigned int offset, configure_timer(gpio, offset, i); out: - spin_unlock_irqrestore(&gpio->lock, flags); + raw_spin_unlock_irqrestore(&gpio->lock, flags); return rc; } @@ -930,13 +930,13 @@ static int disable_debounce(struct gpio_chip *chip, unsigned int offset) unsigned long flags; int rc; - spin_lock_irqsave(&gpio->lock, flags); + raw_spin_lock_irqsave(&gpio->lock, flags); rc = unregister_allocated_timer(gpio, offset); if (!rc) configure_timer(gpio, offset, 0); - spin_unlock_irqrestore(&gpio->lock, flags); + raw_spin_unlock_irqrestore(&gpio->lock, flags); return rc; } @@ -1018,7 +1018,7 @@ int aspeed_gpio_copro_grab_gpio(struct gpio_desc *desc, return -EINVAL; bindex = offset >> 3; - spin_lock_irqsave(&gpio->lock, flags); + raw_spin_lock_irqsave(&gpio->lock, flags); /* Sanity check, this shouldn't happen */ if (gpio->cf_copro_bankmap[bindex] == 0xff) { @@ -1039,7 +1039,7 @@ int aspeed_gpio_copro_grab_gpio(struct gpio_desc *desc, if (bit) *bit = GPIO_OFFSET(offset); bail: - spin_unlock_irqrestore(&gpio->lock, flags); + raw_spin_unlock_irqrestore(&gpio->lock, flags); return rc; } EXPORT_SYMBOL_GPL(aspeed_gpio_copro_grab_gpio); @@ -1063,7 +1063,7 @@ int aspeed_gpio_copro_release_gpio(struct gpio_desc *desc) return -EINVAL; bindex = offset >> 3; - spin_lock_irqsave(&gpio->lock, flags); + raw_spin_lock_irqsave(&gpio->lock, flags); /* Sanity check, this shouldn't happen */ if (gpio->cf_copro_bankmap[bindex] == 0) { @@ -1077,7 +1077,7 @@ int aspeed_gpio_copro_release_gpio(struct gpio_desc *desc) aspeed_gpio_change_cmd_source(gpio, bank, bindex, GPIO_CMDSRC_ARM); bail: - spin_unlock_irqrestore(&gpio->lock, flags); + raw_spin_unlock_irqrestore(&gpio->lock, flags); return rc; } EXPORT_SYMBOL_GPL(aspeed_gpio_copro_release_gpio); @@ -1151,7 +1151,7 @@ static int __init aspeed_gpio_probe(struct platform_device *pdev) if (IS_ERR(gpio->base)) return PTR_ERR(gpio->base); - spin_lock_init(&gpio->lock); + raw_spin_lock_init(&gpio->lock); gpio_id = of_match_node(aspeed_gpio_of_table, pdev->dev.of_node); if (!gpio_id) diff --git a/drivers/gpio/gpio-sifive.c b/drivers/gpio/gpio-sifive.c index d5eb9ca119016c6ba7882334ee2eaf16ce77949a..4f28fa73450c175bb7baa5ed8e78db0db161f0b0 100644 --- a/drivers/gpio/gpio-sifive.c +++ b/drivers/gpio/gpio-sifive.c @@ -206,7 +206,7 @@ static int sifive_gpio_probe(struct platform_device *pdev) NULL, chip->base + SIFIVE_GPIO_OUTPUT_EN, chip->base + SIFIVE_GPIO_INPUT_EN, - 0); + BGPIOF_READ_OUTPUT_REG_SET); if (ret) { dev_err(dev, "unable to init generic GPIO\n"); return ret; diff --git a/drivers/gpio/gpio-tegra186.c b/drivers/gpio/gpio-tegra186.c index 9500074b1f1b55e00951af9b7b9f2d956f37a7ef..7fbe5f0681b956b582a03f12c5b409e9f7f6de14 100644 --- a/drivers/gpio/gpio-tegra186.c +++ b/drivers/gpio/gpio-tegra186.c @@ -337,9 +337,12 @@ static int tegra186_gpio_of_xlate(struct gpio_chip *chip, return offset + pin; } +#define to_tegra_gpio(x) container_of((x), struct tegra_gpio, gpio) + static void tegra186_irq_ack(struct irq_data *data) { - struct tegra_gpio *gpio = irq_data_get_irq_chip_data(data); + struct gpio_chip *gc = irq_data_get_irq_chip_data(data); + struct tegra_gpio *gpio = to_tegra_gpio(gc); void __iomem *base; base = tegra186_gpio_get_base(gpio, data->hwirq); @@ -351,7 +354,8 @@ static void tegra186_irq_ack(struct irq_data *data) static void tegra186_irq_mask(struct irq_data *data) { - struct tegra_gpio *gpio = irq_data_get_irq_chip_data(data); + struct gpio_chip *gc = irq_data_get_irq_chip_data(data); + struct tegra_gpio *gpio = to_tegra_gpio(gc); void __iomem *base; u32 value; @@ -366,7 +370,8 @@ static void tegra186_irq_mask(struct irq_data *data) static void tegra186_irq_unmask(struct irq_data *data) { - struct tegra_gpio *gpio = irq_data_get_irq_chip_data(data); + struct gpio_chip *gc = irq_data_get_irq_chip_data(data); + struct tegra_gpio *gpio = to_tegra_gpio(gc); void __iomem *base; u32 value; @@ -381,7 +386,8 @@ static void tegra186_irq_unmask(struct irq_data *data) static int tegra186_irq_set_type(struct irq_data *data, unsigned int type) { - struct tegra_gpio *gpio = irq_data_get_irq_chip_data(data); + struct gpio_chip *gc = irq_data_get_irq_chip_data(data); + struct tegra_gpio *gpio = to_tegra_gpio(gc); void __iomem *base; u32 value; diff --git a/drivers/gpio/gpio-ts4900.c b/drivers/gpio/gpio-ts4900.c index d885032cf814d89e184f7a10ab51fa09b2efc033..d918d2df4de2cbf536a67483b496f5079640ebfd 100644 --- a/drivers/gpio/gpio-ts4900.c +++ b/drivers/gpio/gpio-ts4900.c @@ -1,7 +1,7 @@ /* * Digital I/O driver for Technologic Systems I2C FPGA Core * - * Copyright (C) 2015 Technologic Systems + * Copyright (C) 2015, 2018 Technologic Systems * Copyright (C) 2016 Savoir-Faire Linux * * This program is free software; you can redistribute it and/or @@ -55,19 +55,33 @@ static int ts4900_gpio_direction_input(struct gpio_chip *chip, { struct ts4900_gpio_priv *priv = gpiochip_get_data(chip); - /* - * This will clear the output enable bit, the other bits are - * dontcare when this is cleared + /* Only clear the OE bit here, requires a RMW. Prevents potential issue + * with OE and data getting to the physical pin at different times. */ - return regmap_write(priv->regmap, offset, 0); + return regmap_update_bits(priv->regmap, offset, TS4900_GPIO_OE, 0); } static int ts4900_gpio_direction_output(struct gpio_chip *chip, unsigned int offset, int value) { struct ts4900_gpio_priv *priv = gpiochip_get_data(chip); + unsigned int reg; int ret; + /* If changing from an input to an output, we need to first set the + * proper data bit to what is requested and then set OE bit. This + * prevents a glitch that can occur on the IO line + */ + regmap_read(priv->regmap, offset, ®); + if (!(reg & TS4900_GPIO_OE)) { + if (value) + reg = TS4900_GPIO_OUT; + else + reg &= ~TS4900_GPIO_OUT; + + regmap_write(priv->regmap, offset, reg); + } + if (value) ret = regmap_write(priv->regmap, offset, TS4900_GPIO_OE | TS4900_GPIO_OUT); diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c index 6f11714ce02396abcc0268019336d542080b2a2a..44ee319da1b357c9cd105419c5593683373d40d1 100644 --- a/drivers/gpio/gpiolib-acpi.c +++ b/drivers/gpio/gpiolib-acpi.c @@ -276,8 +276,8 @@ static acpi_status acpi_gpiochip_alloc_event(struct acpi_resource *ares, pin = agpio->pin_table[0]; if (pin <= 255) { - char ev_name[5]; - sprintf(ev_name, "_%c%02hhX", + char ev_name[8]; + sprintf(ev_name, "_%c%02X", agpio->triggering == ACPI_EDGE_SENSITIVE ? 'E' : 'L', pin); if (ACPI_SUCCESS(acpi_get_handle(handle, ev_name, &evt_handle))) @@ -969,10 +969,17 @@ int acpi_dev_gpio_irq_get_by(struct acpi_device *adev, const char *name, int ind irq_flags = acpi_dev_get_irq_type(info.triggering, info.polarity); - /* Set type if specified and different than the current one */ - if (irq_flags != IRQ_TYPE_NONE && - irq_flags != irq_get_trigger_type(irq)) - irq_set_irq_type(irq, irq_flags); + /* + * If the IRQ is not already in use then set type + * if specified and different than the current one. + */ + if (can_request_irq(irq, irq_flags)) { + if (irq_flags != IRQ_TYPE_NONE && + irq_flags != irq_get_trigger_type(irq)) + irq_set_irq_type(irq, irq_flags); + } else { + dev_dbg(&adev->dev, "IRQ %d already in use\n", irq); + } return irq; } diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index af5bb8fedfea78cfea00eb6d22f804f0b8a4a9b2..d18078748200933391555f6dccf834f06f22570c 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -1411,6 +1411,16 @@ static int gpiochip_to_irq(struct gpio_chip *gc, unsigned offset) { struct irq_domain *domain = gc->irq.domain; +#ifdef CONFIG_GPIOLIB_IRQCHIP + /* + * Avoid race condition with other code, which tries to lookup + * an IRQ before the irqchip has been properly registered, + * i.e. while gpiochip is still being brought up. + */ + if (!gc->irq.initialized) + return -EPROBE_DEFER; +#endif + if (!gpiochip_irqchip_irq_valid(gc, offset)) return -ENXIO; @@ -1604,6 +1614,15 @@ static int gpiochip_add_irqchip(struct gpio_chip *gc, acpi_gpiochip_request_interrupts(gc); + /* + * Using barrier() here to prevent compiler from reordering + * gc->irq.initialized before initialization of above + * GPIO chip irq members. + */ + barrier(); + + gc->irq.initialized = true; + return 0; } @@ -3215,6 +3234,16 @@ int gpiod_to_irq(const struct gpio_desc *desc) return retirq; } +#ifdef CONFIG_GPIOLIB_IRQCHIP + if (gc->irq.chip) { + /* + * Avoid race condition with other code, which tries to lookup + * an IRQ before the irqchip has been properly registered, + * i.e. while gpiochip is still being brought up. + */ + return -EPROBE_DEFER; + } +#endif return -ENXIO; } EXPORT_SYMBOL_GPL(gpiod_to_irq); diff --git a/drivers/gpu/drm/amd/amdgpu/ObjectID.h b/drivers/gpu/drm/amd/amdgpu/ObjectID.h index 5b393622f59205700acf687179841e7e0b0b8f5b..a0f0a17e224fe554aeff766a5dc3dcc5b4b2144d 100644 --- a/drivers/gpu/drm/amd/amdgpu/ObjectID.h +++ b/drivers/gpu/drm/amd/amdgpu/ObjectID.h @@ -119,6 +119,7 @@ #define CONNECTOR_OBJECT_ID_eDP 0x14 #define CONNECTOR_OBJECT_ID_MXM 0x15 #define CONNECTOR_OBJECT_ID_LVDS_eDP 0x16 +#define CONNECTOR_OBJECT_ID_USBC 0x17 /* deleted */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 26f8a21383774337ca09128072cc05ec170de53f..1b4c7ced8b92c78667d82f51aa83b0fd08932c5d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -1024,11 +1024,15 @@ int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd, struct dma_fence **ef) { struct amdgpu_device *adev = get_amdgpu_device(kgd); - struct drm_file *drm_priv = filp->private_data; - struct amdgpu_fpriv *drv_priv = drm_priv->driver_priv; - struct amdgpu_vm *avm = &drv_priv->vm; + struct amdgpu_fpriv *drv_priv; + struct amdgpu_vm *avm; int ret; + ret = amdgpu_file_to_fpriv(filp, &drv_priv); + if (ret) + return ret; + avm = &drv_priv->vm; + /* Already a compute VM? */ if (avm->process_info) return -EINVAL; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c index 0de66f59adb8ab0aaf4e555ea8220ae1716f45e9..df1f9b88a53f9fb04d5c20c5ad703a99a1208e45 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c @@ -387,6 +387,9 @@ amdgpu_connector_lcd_native_mode(struct drm_encoder *encoder) native_mode->vdisplay != 0 && native_mode->clock != 0) { mode = drm_mode_duplicate(dev, native_mode); + if (!mode) + return NULL; + mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER; drm_mode_set_name(mode); @@ -401,6 +404,9 @@ amdgpu_connector_lcd_native_mode(struct drm_encoder *encoder) * simpler. */ mode = drm_cvt_mode(dev, native_mode->hdisplay, native_mode->vdisplay, 60, true, false, false); + if (!mode) + return NULL; + mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER; DRM_DEBUG_KMS("Adding cvt approximation of native panel mode %s\n", mode->name); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 12598a4b5c788f5473d08aad318e1aa1a0b35508..867fcee6b0d3be9d160d1987ea2b817d4a283478 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -1484,6 +1484,7 @@ int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data, return 0; default: + dma_fence_put(fence); return -EINVAL; } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index ed13a2f76884c9e93a40308287f6d71742e35cd5..30659c1776e81a1e0ede20624fcc1cea5de9ef9d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -632,7 +632,7 @@ MODULE_PARM_DESC(sched_policy, * Maximum number of processes that HWS can schedule concurrently. The maximum is the * number of VMIDs assigned to the HWS, which is also the default. */ -int hws_max_conc_proc = 8; +int hws_max_conc_proc = -1; module_param(hws_max_conc_proc, int, 0444); MODULE_PARM_DESC(hws_max_conc_proc, "Max # processes HWS can execute concurrently when sched_policy=0 (0 = no concurrency, #VMIDs for KFD = Maximum(default))"); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 9f9f55a2b257cb705a729654c62209ea0d0c83de..f84582b70d0edcc61cd2fe8fbcd92109002d20d8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -263,7 +263,7 @@ static int amdgpu_gfx_kiq_acquire(struct amdgpu_device *adev, * adev->gfx.mec.num_pipe_per_mec * adev->gfx.mec.num_queue_per_pipe; - while (queue_bit-- >= 0) { + while (--queue_bit >= 0) { if (test_bit(queue_bit, adev->gfx.mec.queue_bitmap)) continue; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c index 2f70fdd6104f20df7192687ed1fbe28028c5f4da..582055136cdbfc9c7765fe696fbcc444437c7e0a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c @@ -267,7 +267,6 @@ int amdgpu_irq_init(struct amdgpu_device *adev) if (!amdgpu_device_has_dc_support(adev)) { if (!adev->enable_virtual_display) /* Disable vblank IRQs aggressively for power-saving */ - /* XXX: can this be enabled for DC? */ adev_to_drm(adev)->vblank_disable_immediate = true; r = drm_vblank_init(adev_to_drm(adev), adev->mode_info.num_crtc); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index efda38349a0322f108c2eb7d1abd2a4fa85d233a..917b94002f4b70170cc04616670fd55d7e9b9817 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -766,9 +766,9 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file dev_info.high_va_offset = AMDGPU_GMC_HOLE_END; dev_info.high_va_max = AMDGPU_GMC_HOLE_END | vm_size; } - dev_info.virtual_address_alignment = max((int)PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE); + dev_info.virtual_address_alignment = max_t(u32, PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE); dev_info.pte_fragment_size = (1 << adev->vm_manager.fragment_size) * AMDGPU_GPU_PAGE_SIZE; - dev_info.gart_page_size = AMDGPU_GPU_PAGE_SIZE; + dev_info.gart_page_size = max_t(u32, PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE); dev_info.cu_active_number = adev->gfx.cu_info.number; dev_info.cu_ao_mask = adev->gfx.cu_info.ao_cu_mask; dev_info.ce_ram_size = adev->gfx.ce_ram_size; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index ad9863b84f1fcca28984b9a5c8b74865330339f1..f615ecc06a22306d768786a0abba982d032beede 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -1338,7 +1338,8 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo) !(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE)) return; - dma_resv_lock(bo->base.resv, NULL); + if (WARN_ON_ONCE(!dma_resv_trylock(bo->base.resv))) + return; r = amdgpu_fill_buffer(abo, AMDGPU_POISON, bo->base.resv, &fence); if (!WARN_ON(r)) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 5207ad654f18e9e8afd96f7155044a515dc45a77..0b162928a248b971c2baf0e846c3811325b09fd2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -2120,7 +2120,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset, unsigned i; int r; - if (direct_submit && !ring->sched.ready) { + if (!direct_submit && !ring->sched.ready) { DRM_ERROR("Trying to move memory with ring turned off.\n"); return -EINVAL; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index b47829ff30af79237cad7cdf2b463d6b583b0e5b..635601d8b131052c50ed1f2bd7d5463c0348be18 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -715,11 +715,17 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, * Check if all VM PDs/PTs are ready for updates * * Returns: - * True if eviction list is empty. + * True if VM is not evicting. */ bool amdgpu_vm_ready(struct amdgpu_vm *vm) { - return list_empty(&vm->evicted); + bool ret; + + amdgpu_vm_eviction_lock(vm); + ret = !vm->evicting; + amdgpu_vm_eviction_unlock(vm); + + return ret && list_empty(&vm->evicted); } /** diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index c36258d56b4455ca3b6270be5216fcc4029c815d..28c4e1fe5cd4cc2d401732917ee8a2c650539bfb 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -1354,7 +1354,11 @@ static int gfx_v8_0_mec_init(struct amdgpu_device *adev) return r; } +#if IS_ENABLED(CONFIG_SW64) + memset_io(hpd, 0, mec_hpd_size); +#else memset(hpd, 0, mec_hpd_size); +#endif amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj); amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj); @@ -4649,7 +4653,11 @@ static int gfx_v8_0_kiq_init_queue(struct amdgpu_ring *ring) vi_srbm_select(adev, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); } else { +#if IS_ENABLED(CONFIG_SW64) + memset_io((void *)mqd, 0, sizeof(struct vi_mqd_allocation)); +#else memset((void *)mqd, 0, sizeof(struct vi_mqd_allocation)); +#endif ((struct vi_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF; ((struct vi_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF; mutex_lock(&adev->srbm_mutex); @@ -4660,7 +4668,11 @@ static int gfx_v8_0_kiq_init_queue(struct amdgpu_ring *ring) mutex_unlock(&adev->srbm_mutex); if (adev->gfx.mec.mqd_backup[mqd_idx]) +#if IS_ENABLED(CONFIG_SW64) + memcpy_fromio(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct vi_mqd_allocation)); +#else memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct vi_mqd_allocation)); +#endif } return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 8dd7587fea2667712942b1ab882fb1d02c8242ae..c621ebd9003101c0fc0fdc44e2d236c730e32346 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -1248,6 +1248,8 @@ static const struct amdgpu_gfxoff_quirk amdgpu_gfxoff_quirk_list[] = { { 0x1002, 0x15dd, 0x103c, 0x83e7, 0xd3 }, /* GFXOFF is unstable on C6 parts with a VBIOS 113-RAVEN-114 */ { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc6 }, + /* Apple MacBook Pro (15-inch, 2019) Radeon Pro Vega 20 4 GB */ + { 0x1002, 0x69af, 0x106b, 0x019a, 0xc0 }, { 0, 0, 0, 0, 0 }, }; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index 9ab65ca7df777f02c519b3c2d3b3a9cee45eac3f..873bc33912e23e74fee63e8404537dfb942a8dd3 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -524,10 +524,10 @@ static void gmc_v8_0_mc_program(struct amdgpu_device *adev) static int gmc_v8_0_mc_init(struct amdgpu_device *adev) { int r; + u32 tmp; adev->gmc.vram_width = amdgpu_atombios_get_vram_width(adev); if (!adev->gmc.vram_width) { - u32 tmp; int chansize, numchan; /* Get VRAM informations */ @@ -571,8 +571,15 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev) adev->gmc.vram_width = numchan * chansize; } /* size in MB on si */ - adev->gmc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL; - adev->gmc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL; + tmp = RREG32(mmCONFIG_MEMSIZE); + /* some boards may have garbage in the upper 16 bits */ + if (tmp & 0xffff0000) { + DRM_INFO("Probable bad vram size: 0x%08x\n", tmp); + if (tmp & 0xffff) + tmp &= 0xffff; + } + adev->gmc.mc_vram_size = tmp * 1024ULL * 1024ULL; + adev->gmc.real_vram_size = adev->gmc.mc_vram_size; if (!(adev->flags & AMD_IS_APU)) { r = amdgpu_device_resize_fb_bar(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 37226cbbbd11a4c3d79dbe41ac9b581eb6f049e0..7212b9900e0abafdf8386235789ec6f25167384d 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -1194,8 +1194,11 @@ static int soc15_common_early_init(void *handle) AMD_CG_SUPPORT_SDMA_MGCG | AMD_CG_SUPPORT_SDMA_LS; + /* + * MMHUB PG needs to be disabled for Picasso for + * stability reasons. + */ adev->pg_flags = AMD_PG_SUPPORT_SDMA | - AMD_PG_SUPPORT_MMHUB | AMD_PG_SUPPORT_VCN; } else { adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c index 2099f6ebd83386873738719dcfab641cc8a94362..bdb8e596bda6abd27574d3daa56f75335fb4fd64 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c @@ -1429,8 +1429,11 @@ static int vcn_v3_0_start_sriov(struct amdgpu_device *adev) static int vcn_v3_0_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx) { + struct dpg_pause_state state = {.fw_based = VCN_DPG_STATE__UNPAUSE}; uint32_t tmp; + vcn_v3_0_pause_dpg_mode(adev, 0, &state); + /* Wait for power status to be 1 */ SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1, UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c index 31d793ee0836e1e32f7eb447c3ff58bd97b62e75..86b4dadf772e3dba438a0998101b028b06177bec 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c @@ -784,7 +784,7 @@ int kfd_create_crat_image_acpi(void **crat_image, size_t *size) /* Fetch the CRAT table from ACPI */ status = acpi_get_table(CRAT_SIGNATURE, 0, &crat_table); if (status == AE_NOT_FOUND) { - pr_warn("CRAT table not found\n"); + pr_info("CRAT table not found\n"); return -ENODATA; } else if (ACPI_FAILURE(status)) { const char *err = acpi_format_exception(status); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 84313135c2eae0f6ce5134a2ce2e6c387f40f2a7..148e43dee657ac0217306a2ba519f2150873965a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -664,15 +664,10 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, - kfd->vm_info.first_vmid_kfd + 1; /* Verify module parameters regarding mapped process number*/ - if ((hws_max_conc_proc < 0) - || (hws_max_conc_proc > kfd->vm_info.vmid_num_kfd)) { - dev_err(kfd_device, - "hws_max_conc_proc %d must be between 0 and %d, use %d instead\n", - hws_max_conc_proc, kfd->vm_info.vmid_num_kfd, - kfd->vm_info.vmid_num_kfd); + if (hws_max_conc_proc >= 0) + kfd->max_proc_per_quantum = min((u32)hws_max_conc_proc, kfd->vm_info.vmid_num_kfd); + else kfd->max_proc_per_quantum = kfd->vm_info.vmid_num_kfd; - } else - kfd->max_proc_per_quantum = hws_max_conc_proc; /* calculate max size of mqds needed for queues */ size = max_num_of_queues_per_device * diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c index ba2c2ce0c55afc43b33519a9eec48f079e200e34..159be13ef20bb63629a3e9ad30f1ab5d0741bcdf 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c @@ -531,6 +531,8 @@ static struct kfd_event_waiter *alloc_event_waiters(uint32_t num_events) event_waiters = kmalloc_array(num_events, sizeof(struct kfd_event_waiter), GFP_KERNEL); + if (!event_waiters) + return NULL; for (i = 0; (event_waiters) && (i < num_events) ; i++) { init_wait(&event_waiters[i].wait); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c index 17d1736367ea3e7ab9dddb123594434b57504b2e..bd4caa36ab2e2c26602aafafb69766b28ec522ee 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c @@ -270,15 +270,6 @@ int kfd_smi_event_open(struct kfd_dev *dev, uint32_t *fd) return ret; } - ret = anon_inode_getfd(kfd_smi_name, &kfd_smi_ev_fops, (void *)client, - O_RDWR); - if (ret < 0) { - kfifo_free(&client->fifo); - kfree(client); - return ret; - } - *fd = ret; - init_waitqueue_head(&client->wait_queue); spin_lock_init(&client->lock); client->events = 0; @@ -288,5 +279,20 @@ int kfd_smi_event_open(struct kfd_dev *dev, uint32_t *fd) list_add_rcu(&client->list, &dev->smi_clients); spin_unlock(&dev->smi_lock); + ret = anon_inode_getfd(kfd_smi_name, &kfd_smi_ev_fops, (void *)client, + O_RDWR); + if (ret < 0) { + spin_lock(&dev->smi_lock); + list_del_rcu(&client->list); + spin_unlock(&dev->smi_lock); + + synchronize_rcu(); + + kfifo_free(&client->fifo); + kfree(client); + return ret; + } + *fd = ret; + return 0; } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index a5b6f36fe1d722226f16f7e0f7f219bd9299cf97..7bb151283f44bb894f91b6f61c70fd1f243c8083 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -1069,6 +1069,9 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size; adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size; + /* Disable vblank IRQs aggressively for power-saving */ + adev_to_drm(adev)->vblank_disable_immediate = true; + if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) { DRM_ERROR( "amdgpu: failed to initialize sw for display support.\n"); @@ -2019,7 +2022,8 @@ static int dm_resume(void *handle) * this is the case when traversing through already created * MST connectors, should be skipped */ - if (aconnector->mst_port) + if (aconnector->dc_link && + aconnector->dc_link->type == dc_connection_mst_branch) continue; mutex_lock(&aconnector->hpd_lock); @@ -6393,6 +6397,9 @@ static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder, mode = amdgpu_dm_create_common_mode(encoder, common_modes[i].name, common_modes[i].w, common_modes[i].h); + if (!mode) + continue; + drm_mode_probed_add(connector, mode); amdgpu_dm_connector->num_modes++; } @@ -8609,10 +8616,13 @@ static int dm_update_plane_state(struct dc *dc, static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc) { struct drm_connector *connector; - struct drm_connector_state *conn_state; + struct drm_connector_state *conn_state, *old_conn_state; struct amdgpu_dm_connector *aconnector = NULL; int i; - for_each_new_connector_in_state(state, connector, conn_state, i) { + for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) { + if (!conn_state->crtc) + conn_state = old_conn_state; + if (conn_state->crtc != crtc) continue; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 284ed1c8a35acef795887ec7b2abc42fc9cf2751..93f5229c303e7bc484244f71bfd03edd5c6a51af 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -2436,7 +2436,8 @@ static void commit_planes_for_stream(struct dc *dc, } if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed) - if (top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) { + if (top_pipe_to_program && + top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) { if (should_use_dmub_lock(stream->link)) { union dmub_hw_lock_flags hw_locks = { 0 }; struct dmub_hw_lock_inst_flags inst_flags = { 0 }; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index a7f8caf1086b95a62f15aa39b08c2ca807eb306c..0e359a299f9ecb68198cf1965918f7713da41401 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -3587,6 +3587,26 @@ static bool retrieve_link_cap(struct dc_link *link) dp_hw_fw_revision.ieee_fw_rev, sizeof(dp_hw_fw_revision.ieee_fw_rev)); + /* Quirk for Apple MBP 2018 15" Retina panels: wrong DP_MAX_LINK_RATE */ + { + uint8_t str_mbp_2018[] = { 101, 68, 21, 103, 98, 97 }; + uint8_t fwrev_mbp_2018[] = { 7, 4 }; + uint8_t fwrev_mbp_2018_vega[] = { 8, 4 }; + + /* We also check for the firmware revision as 16,1 models have an + * identical device id and are incorrectly quirked otherwise. + */ + if ((link->dpcd_caps.sink_dev_id == 0x0010fa) && + !memcmp(link->dpcd_caps.sink_dev_id_str, str_mbp_2018, + sizeof(str_mbp_2018)) && + (!memcmp(link->dpcd_caps.sink_fw_revision, fwrev_mbp_2018, + sizeof(fwrev_mbp_2018)) || + !memcmp(link->dpcd_caps.sink_fw_revision, fwrev_mbp_2018_vega, + sizeof(fwrev_mbp_2018_vega)))) { + link->reported_link_cap.link_rate = LINK_RATE_RBR2; + } + } + memset(&link->dpcd_caps.dsc_caps, '\0', sizeof(link->dpcd_caps.dsc_caps)); memset(&link->dpcd_caps.fec_cap, '\0', sizeof(link->dpcd_caps.fec_cap)); diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index 5f4cdb05c4db90ebe0efa7076095beab2bc2fca9..1e47afc4ccc1deea8efb4edc265f8f626588bde7 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -1674,6 +1674,9 @@ static bool are_stream_backends_same( if (is_timing_changed(stream_a, stream_b)) return false; + if (stream_a->signal != stream_b->signal) + return false; + if (stream_a->dpms_off != stream_b->dpms_off) return false; @@ -1698,8 +1701,8 @@ bool dc_is_stream_unchanged( if (old_stream->ignore_msa_timing_param != stream->ignore_msa_timing_param) return false; - // Only Have Audio left to check whether it is same or not. This is a corner case for Tiled sinks - if (old_stream->audio_info.mode_count != stream->audio_info.mode_count) + /*compare audio info*/ + if (memcmp(&old_stream->audio_info, &stream->audio_info, sizeof(stream->audio_info)) != 0) return false; return true; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index 532f6a1145b55772a11d4bbe568d85df842c4db0..31a13daf4289c44df5422caf2c3d78982cbb00ea 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -2387,14 +2387,18 @@ void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) &blnd_cfg.black_color); } - if (per_pixel_alpha) - blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA; - else - blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA; - blnd_cfg.overlap_only = false; blnd_cfg.global_gain = 0xff; + if (per_pixel_alpha && pipe_ctx->plane_state->global_alpha) { + blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN; + blnd_cfg.global_gain = pipe_ctx->plane_state->global_alpha_value; + } else if (per_pixel_alpha) { + blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA; + } else { + blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA; + } + if (pipe_ctx->plane_state->global_alpha) blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value; else diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index 79a2b9c785f05820bb1435cb333a6f75e0c3cdca..3d778760a3b55bdf3ebb024246efa3bed7224eef 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -2270,14 +2270,18 @@ void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) pipe_ctx, &blnd_cfg.black_color); } - if (per_pixel_alpha) - blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA; - else - blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA; - blnd_cfg.overlap_only = false; blnd_cfg.global_gain = 0xff; + if (per_pixel_alpha && pipe_ctx->plane_state->global_alpha) { + blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN; + blnd_cfg.global_gain = pipe_ctx->plane_state->global_alpha_value; + } else if (per_pixel_alpha) { + blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA; + } else { + blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA; + } + if (pipe_ctx->plane_state->global_alpha) blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value; else diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c index 0e0f494fbb5e138b1f739cd664252d2899c2fc12..b037fd57fd366ce2237592ebf2e0b03928f53bf4 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c @@ -227,14 +227,6 @@ static const struct irq_source_info_funcs vupdate_no_lock_irq_info_funcs = { .funcs = &pflip_irq_info_funcs\ } -#define vupdate_int_entry(reg_num)\ - [DC_IRQ_SOURCE_VUPDATE1 + reg_num] = {\ - IRQ_REG_ENTRY(OTG, reg_num,\ - OTG_GLOBAL_SYNC_STATUS, VUPDATE_INT_EN,\ - OTG_GLOBAL_SYNC_STATUS, VUPDATE_EVENT_CLEAR),\ - .funcs = &vblank_irq_info_funcs\ - } - /* vupdate_no_lock_int_entry maps to DC_IRQ_SOURCE_VUPDATEx, to match semantic * of DCE's DC_IRQ_SOURCE_VUPDATEx. */ @@ -348,12 +340,6 @@ irq_source_info_dcn21[DAL_IRQ_SOURCES_NUMBER] = { dc_underflow_int_entry(6), [DC_IRQ_SOURCE_DMCU_SCP] = dummy_irq_entry(), [DC_IRQ_SOURCE_VBIOS_SW] = dummy_irq_entry(), - vupdate_int_entry(0), - vupdate_int_entry(1), - vupdate_int_entry(2), - vupdate_int_entry(3), - vupdate_int_entry(4), - vupdate_int_entry(5), vupdate_no_lock_int_entry(0), vupdate_no_lock_int_entry(1), vupdate_no_lock_int_entry(2), diff --git a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c index 0fdf7a3e96deaca5c5e5a0f0f9fb3cf1b04d6d95..96e18050a6175e70fa040e49db33ffc64259c44b 100644 --- a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c +++ b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c @@ -100,7 +100,8 @@ enum vsc_packet_revision { //PB7 = MD0 #define MASK_VTEM_MD0__VRR_EN 0x01 #define MASK_VTEM_MD0__M_CONST 0x02 -#define MASK_VTEM_MD0__RESERVED2 0x0C +#define MASK_VTEM_MD0__QMS_EN 0x04 +#define MASK_VTEM_MD0__RESERVED2 0x08 #define MASK_VTEM_MD0__FVA_FACTOR_M1 0xF0 //MD1 @@ -109,7 +110,7 @@ enum vsc_packet_revision { //MD2 #define MASK_VTEM_MD2__BASE_REFRESH_RATE_98 0x03 #define MASK_VTEM_MD2__RB 0x04 -#define MASK_VTEM_MD2__RESERVED3 0xF8 +#define MASK_VTEM_MD2__NEXT_TFR 0xF8 //MD3 #define MASK_VTEM_MD3__BASE_REFRESH_RATE_07 0xFF diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c index 9f383b9041d28c4e585060792974cb7bcdc16076..5abb68017f6ed6df2165114a36881264be9c4437 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c @@ -2098,6 +2098,12 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_ } } + /* setting should not be allowed from VF if not in one VF mode */ + if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) { + dev_attr->attr.mode &= ~S_IWUGO; + dev_attr->store = NULL; + } + #undef DEVICE_ATTR_IS return 0; diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c index e6f40ee9f31340ae05cbd00c3f48c93ee56d9366..9d97938bd49eff7c6bedf2589153418f15224023 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c @@ -709,13 +709,13 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetHardMinFclkByFreq, hwmgr->display_config->num_display > 3 ? - data->clock_vol_info.vdd_dep_on_fclk->entries[0].clk : + (data->clock_vol_info.vdd_dep_on_fclk->entries[0].clk / 100) : min_mclk, NULL); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetHardMinSocclkByFreq, - data->clock_vol_info.vdd_dep_on_socclk->entries[0].clk, + data->clock_vol_info.vdd_dep_on_socclk->entries[0].clk / 100, NULL); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetHardMinVcn, @@ -728,11 +728,11 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, NULL); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSoftMaxFclkByFreq, - data->clock_vol_info.vdd_dep_on_fclk->entries[index_fclk].clk, + data->clock_vol_info.vdd_dep_on_fclk->entries[index_fclk].clk / 100, NULL); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSoftMaxSocclkByFreq, - data->clock_vol_info.vdd_dep_on_socclk->entries[index_socclk].clk, + data->clock_vol_info.vdd_dep_on_socclk->entries[index_socclk].clk / 100, NULL); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSoftMaxVcn, diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index e5893218fa4bb5c62fcd7b76a9336b5645527565..ee27970cfff952d38b5d02cd8e368f8851d8ae57 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -115,7 +115,7 @@ int smu_get_dpm_freq_range(struct smu_context *smu, uint32_t *min, uint32_t *max) { - int ret = 0; + int ret = -ENOTSUPP; if (!min && !max) return -EINVAL; diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511.h b/drivers/gpu/drm/bridge/adv7511/adv7511.h index a9bb734366ae6ffa1172b394b851047170ff7e2c..a0f6ee15c24859a48f39c7120ad36a48ca28eec0 100644 --- a/drivers/gpu/drm/bridge/adv7511/adv7511.h +++ b/drivers/gpu/drm/bridge/adv7511/adv7511.h @@ -169,6 +169,7 @@ #define ADV7511_PACKET_ENABLE_SPARE2 BIT(1) #define ADV7511_PACKET_ENABLE_SPARE1 BIT(0) +#define ADV7535_REG_POWER2_HPD_OVERRIDE BIT(6) #define ADV7511_REG_POWER2_HPD_SRC_MASK 0xc0 #define ADV7511_REG_POWER2_HPD_SRC_BOTH 0x00 #define ADV7511_REG_POWER2_HPD_SRC_HPD 0x40 diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c index a0d392c338da5241744835e0ed3d89ba14440e8b..c6f059be4b8976a38dfb169df9e652da1bbc5c81 100644 --- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c +++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c @@ -351,11 +351,17 @@ static void __adv7511_power_on(struct adv7511 *adv7511) * from standby or are enabled. When the HPD goes low the adv7511 is * reset and the outputs are disabled which might cause the monitor to * go to standby again. To avoid this we ignore the HPD pin for the - * first few seconds after enabling the output. + * first few seconds after enabling the output. On the other hand + * adv7535 require to enable HPD Override bit for proper HPD. */ - regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2, - ADV7511_REG_POWER2_HPD_SRC_MASK, - ADV7511_REG_POWER2_HPD_SRC_NONE); + if (adv7511->type == ADV7535) + regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2, + ADV7535_REG_POWER2_HPD_OVERRIDE, + ADV7535_REG_POWER2_HPD_OVERRIDE); + else + regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2, + ADV7511_REG_POWER2_HPD_SRC_MASK, + ADV7511_REG_POWER2_HPD_SRC_NONE); } static void adv7511_power_on(struct adv7511 *adv7511) @@ -375,6 +381,10 @@ static void adv7511_power_on(struct adv7511 *adv7511) static void __adv7511_power_off(struct adv7511 *adv7511) { /* TODO: setup additional power down modes */ + if (adv7511->type == ADV7535) + regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2, + ADV7535_REG_POWER2_HPD_OVERRIDE, 0); + regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER, ADV7511_POWER_POWER_DOWN, ADV7511_POWER_POWER_DOWN); @@ -672,9 +682,14 @@ adv7511_detect(struct adv7511 *adv7511, struct drm_connector *connector) status = connector_status_disconnected; } else { /* Renable HPD sensing */ - regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2, - ADV7511_REG_POWER2_HPD_SRC_MASK, - ADV7511_REG_POWER2_HPD_SRC_BOTH); + if (adv7511->type == ADV7535) + regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2, + ADV7535_REG_POWER2_HPD_OVERRIDE, + ADV7535_REG_POWER2_HPD_OVERRIDE); + else + regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2, + ADV7511_REG_POWER2_HPD_SRC_MASK, + ADV7511_REG_POWER2_HPD_SRC_BOTH); } adv7511->status = status; diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c index 914c569ab8c15d10e58e3194f7b4b921816cdb93..cab3f5c4e2fc8368e8819fc5fc27d6cae1df16c9 100644 --- a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c +++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c @@ -1086,11 +1086,21 @@ int analogix_dp_send_psr_spd(struct analogix_dp_device *dp, if (!blocking) return 0; + /* + * db[1]!=0: entering PSR, wait for fully active remote frame buffer. + * db[1]==0: exiting PSR, wait for either + * (a) ACTIVE_RESYNC - the sink "must display the + * incoming active frames from the Source device with no visible + * glitches and/or artifacts", even though timings may still be + * re-synchronizing; or + * (b) INACTIVE - the transition is fully complete. + */ ret = readx_poll_timeout(analogix_dp_get_psr_status, dp, psr_status, psr_status >= 0 && ((vsc->db[1] && psr_status == DP_PSR_SINK_ACTIVE_RFB) || - (!vsc->db[1] && psr_status == DP_PSR_SINK_INACTIVE)), 1500, - DP_TIMEOUT_PSR_LOOP_MS * 1000); + (!vsc->db[1] && (psr_status == DP_PSR_SINK_ACTIVE_RESYNC || + psr_status == DP_PSR_SINK_INACTIVE))), + 1500, DP_TIMEOUT_PSR_LOOP_MS * 1000); if (ret) { dev_warn(dp->dev, "Failed to apply PSR %d\n", ret); return ret; diff --git a/drivers/gpu/drm/bridge/cdns-dsi.c b/drivers/gpu/drm/bridge/cdns-dsi.c index b31281f76117c02335393fc9b447a6e166b2570b..0ced08d81d7a262dc3c36af5b1f81ca460be2864 100644 --- a/drivers/gpu/drm/bridge/cdns-dsi.c +++ b/drivers/gpu/drm/bridge/cdns-dsi.c @@ -1286,6 +1286,7 @@ static const struct of_device_id cdns_dsi_of_match[] = { { .compatible = "cdns,dsi" }, { }, }; +MODULE_DEVICE_TABLE(of, cdns_dsi_of_match); static struct platform_driver cdns_dsi_platform_driver = { .probe = cdns_dsi_drm_probe, diff --git a/drivers/gpu/drm/bridge/display-connector.c b/drivers/gpu/drm/bridge/display-connector.c index 4d278573cdb99817bab4e58705fedcd9f43f8949..544a47335cac4a8f130af61c4c03467c7205cb3a 100644 --- a/drivers/gpu/drm/bridge/display-connector.c +++ b/drivers/gpu/drm/bridge/display-connector.c @@ -104,7 +104,7 @@ static int display_connector_probe(struct platform_device *pdev) { struct display_connector *conn; unsigned int type; - const char *label; + const char *label = NULL; int ret; conn = devm_kzalloc(&pdev->dev, sizeof(*conn), GFP_KERNEL); diff --git a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c index d2808c4a6fb1cfa12ddaab5e4ada9ae578955bb7..cce98bf2a4e7324f85c1d4e113e44ae80abed7a7 100644 --- a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c +++ b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c @@ -306,19 +306,10 @@ static void ge_b850v3_lvds_remove(void) mutex_unlock(&ge_b850v3_lvds_dev_mutex); } -static int stdp4028_ge_b850v3_fw_probe(struct i2c_client *stdp4028_i2c, - const struct i2c_device_id *id) +static int ge_b850v3_register(void) { + struct i2c_client *stdp4028_i2c = ge_b850v3_lvds_ptr->stdp4028_i2c; struct device *dev = &stdp4028_i2c->dev; - int ret; - - ret = ge_b850v3_lvds_init(dev); - - if (ret) - return ret; - - ge_b850v3_lvds_ptr->stdp4028_i2c = stdp4028_i2c; - i2c_set_clientdata(stdp4028_i2c, ge_b850v3_lvds_ptr); /* drm bridge initialization */ ge_b850v3_lvds_ptr->bridge.funcs = &ge_b850v3_lvds_funcs; @@ -343,6 +334,27 @@ static int stdp4028_ge_b850v3_fw_probe(struct i2c_client *stdp4028_i2c, "ge-b850v3-lvds-dp", ge_b850v3_lvds_ptr); } +static int stdp4028_ge_b850v3_fw_probe(struct i2c_client *stdp4028_i2c, + const struct i2c_device_id *id) +{ + struct device *dev = &stdp4028_i2c->dev; + int ret; + + ret = ge_b850v3_lvds_init(dev); + + if (ret) + return ret; + + ge_b850v3_lvds_ptr->stdp4028_i2c = stdp4028_i2c; + i2c_set_clientdata(stdp4028_i2c, ge_b850v3_lvds_ptr); + + /* Only register after both bridges are probed */ + if (!ge_b850v3_lvds_ptr->stdp2690_i2c) + return 0; + + return ge_b850v3_register(); +} + static int stdp4028_ge_b850v3_fw_remove(struct i2c_client *stdp4028_i2c) { ge_b850v3_lvds_remove(); @@ -386,7 +398,11 @@ static int stdp2690_ge_b850v3_fw_probe(struct i2c_client *stdp2690_i2c, ge_b850v3_lvds_ptr->stdp2690_i2c = stdp2690_i2c; i2c_set_clientdata(stdp2690_i2c, ge_b850v3_lvds_ptr); - return 0; + /* Only register after both bridges are probed */ + if (!ge_b850v3_lvds_ptr->stdp4028_i2c) + return 0; + + return ge_b850v3_register(); } static int stdp2690_ge_b850v3_fw_remove(struct i2c_client *stdp2690_i2c) diff --git a/drivers/gpu/drm/bridge/nwl-dsi.c b/drivers/gpu/drm/bridge/nwl-dsi.c index 6cac2e58cd15fb15e51b7026aca23ac9200c8421..b68d335981588e42d884959a2e5fc4c1d0b1af10 100644 --- a/drivers/gpu/drm/bridge/nwl-dsi.c +++ b/drivers/gpu/drm/bridge/nwl-dsi.c @@ -1188,6 +1188,7 @@ static int nwl_dsi_probe(struct platform_device *pdev) ret = nwl_dsi_select_input(dsi); if (ret < 0) { + pm_runtime_disable(dev); mipi_dsi_host_unregister(&dsi->dsi_host); return ret; } diff --git a/drivers/gpu/drm/bridge/sil-sii8620.c b/drivers/gpu/drm/bridge/sil-sii8620.c index 843265d7f1b123b3dee5c07cc4830314b8168c4e..ec7745c31da07aaccdbde0a90cd220b6dd441e19 100644 --- a/drivers/gpu/drm/bridge/sil-sii8620.c +++ b/drivers/gpu/drm/bridge/sil-sii8620.c @@ -2120,7 +2120,7 @@ static void sii8620_init_rcp_input_dev(struct sii8620 *ctx) if (ret) { dev_err(ctx->dev, "Failed to register RC device\n"); ctx->error = ret; - rc_free_device(ctx->rc_dev); + rc_free_device(rc_dev); return; } ctx->rc_dev = rc_dev; diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c index d0db1acf11d7319ec722f33202cb91db27971620..7d2ed0ed2fe26c75cebb8f484028ec2be13973ab 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c @@ -320,13 +320,17 @@ static int dw_hdmi_open(struct snd_pcm_substream *substream) struct snd_pcm_runtime *runtime = substream->runtime; struct snd_dw_hdmi *dw = substream->private_data; void __iomem *base = dw->data.base; + u8 *eld; int ret; runtime->hw = dw_hdmi_hw; - ret = snd_pcm_hw_constraint_eld(runtime, dw->data.eld); - if (ret < 0) - return ret; + eld = dw->data.get_eld(dw->data.hdmi); + if (eld) { + ret = snd_pcm_hw_constraint_eld(runtime, eld); + if (ret < 0) + return ret; + } ret = snd_pcm_limit_hw_rates(runtime); if (ret < 0) diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-audio.h b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-audio.h index cb07dc0da5a70462972ae0592370b6ef22a35e16..f72d27208ebef5fb14825df3afa11a772be32ce5 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-audio.h +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-audio.h @@ -9,15 +9,15 @@ struct dw_hdmi_audio_data { void __iomem *base; int irq; struct dw_hdmi *hdmi; - u8 *eld; + u8 *(*get_eld)(struct dw_hdmi *hdmi); }; struct dw_hdmi_i2s_audio_data { struct dw_hdmi *hdmi; - u8 *eld; void (*write)(struct dw_hdmi *hdmi, u8 val, int offset); u8 (*read)(struct dw_hdmi *hdmi, int offset); + u8 *(*get_eld)(struct dw_hdmi *hdmi); }; #endif diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c index 9fef6413741dcbd674d78cae5d5478aa0bb63ce0..9682416056ed61929ab81e348152025932360756 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c @@ -135,8 +135,15 @@ static int dw_hdmi_i2s_get_eld(struct device *dev, void *data, uint8_t *buf, size_t len) { struct dw_hdmi_i2s_audio_data *audio = data; + u8 *eld; + + eld = audio->get_eld(audio->hdmi); + if (eld) + memcpy(buf, eld, min_t(size_t, MAX_ELD_BYTES, len)); + else + /* Pass en empty ELD if connector not available */ + memset(buf, 0, len); - memcpy(buf, audio->eld, min_t(size_t, MAX_ELD_BYTES, len)); return 0; } diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c index 0c79a9ba48bb6407d7a001b2e03cca139d4ed28d..b10228b9e3a93bf4144ce6d6d3d818907edf9b92 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c @@ -756,6 +756,14 @@ static void hdmi_enable_audio_clk(struct dw_hdmi *hdmi, bool enable) hdmi_writeb(hdmi, hdmi->mc_clkdis, HDMI_MC_CLKDIS); } +static u8 *hdmi_audio_get_eld(struct dw_hdmi *hdmi) +{ + if (!hdmi->curr_conn) + return NULL; + + return hdmi->curr_conn->eld; +} + static void dw_hdmi_ahb_audio_enable(struct dw_hdmi *hdmi) { hdmi_set_cts_n(hdmi, hdmi->audio_cts, hdmi->audio_n); @@ -2558,8 +2566,9 @@ static u32 *dw_hdmi_bridge_atomic_get_output_bus_fmts(struct drm_bridge *bridge, if (!output_fmts) return NULL; - /* If dw-hdmi is the only bridge, avoid negociating with ourselves */ - if (list_is_singular(&bridge->encoder->bridge_chain)) { + /* If dw-hdmi is the first or only bridge, avoid negociating with ourselves */ + if (list_is_singular(&bridge->encoder->bridge_chain) || + list_is_first(&bridge->chain_node, &bridge->encoder->bridge_chain)) { *num_output_fmts = 1; output_fmts[0] = MEDIA_BUS_FMT_FIXED; @@ -3395,7 +3404,7 @@ struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev, audio.base = hdmi->regs; audio.irq = irq; audio.hdmi = hdmi; - audio.eld = hdmi->connector.eld; + audio.get_eld = hdmi_audio_get_eld; hdmi->enable_audio = dw_hdmi_ahb_audio_enable; hdmi->disable_audio = dw_hdmi_ahb_audio_disable; @@ -3408,7 +3417,7 @@ struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev, struct dw_hdmi_i2s_audio_data audio; audio.hdmi = hdmi; - audio.eld = hdmi->connector.eld; + audio.get_eld = hdmi_audio_get_eld; audio.write = hdmi_writeb; audio.read = hdmi_readb; hdmi->enable_audio = dw_hdmi_i2s_audio_enable; diff --git a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c index 6b268f9445b36aea903d13fdd8c76822d50c66dc..376fa6eb46f6978afff4fd4d4b63c37c4818a8ad 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c @@ -1172,6 +1172,7 @@ __dw_mipi_dsi_probe(struct platform_device *pdev, ret = mipi_dsi_host_register(&dsi->dsi_host); if (ret) { dev_err(dev, "Failed to register MIPI host: %d\n", ret); + pm_runtime_disable(dev); dw_mipi_dsi_debugfs_remove(dsi); return ERR_PTR(ret); } diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c index ecdf9b01340f5e6413f10c26054a722560ea72d2..1a58481037b3fec5b2b762031855f7856de619f4 100644 --- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c +++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c @@ -171,6 +171,7 @@ static const struct regmap_config ti_sn_bridge_regmap_config = { .val_bits = 8, .volatile_table = &ti_sn_bridge_volatile_table, .cache_type = REGCACHE_NONE, + .max_register = 0xFF, }; static void ti_sn_bridge_write_u16(struct ti_sn_bridge *pdata, diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c index 717c4e7271b0422692c485d8c8cb426de0bc006f..5163433ac561b884e2dfd60f1aa4509c9399b5c3 100644 --- a/drivers/gpu/drm/drm_connector.c +++ b/drivers/gpu/drm/drm_connector.c @@ -2155,6 +2155,9 @@ EXPORT_SYMBOL(drm_connector_attach_max_bpc_property); void drm_connector_set_vrr_capable_property( struct drm_connector *connector, bool capable) { + if (!connector->vrr_capable_property) + return; + drm_object_property_set_value(&connector->base, connector->vrr_capable_property, capable); diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index cd162d406078aa6d6aa802e5f90ee6124be61b4b..006e3b896caea7ed28a9c3df19bb5b134eef9ab9 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -577,6 +577,7 @@ static int drm_dev_init(struct drm_device *dev, struct drm_driver *driver, struct device *parent) { + struct inode *inode; int ret; if (!drm_core_init_complete) { @@ -613,13 +614,15 @@ static int drm_dev_init(struct drm_device *dev, if (ret) return ret; - dev->anon_inode = drm_fs_inode_new(); - if (IS_ERR(dev->anon_inode)) { - ret = PTR_ERR(dev->anon_inode); + inode = drm_fs_inode_new(); + if (IS_ERR(inode)) { + ret = PTR_ERR(inode); DRM_ERROR("Cannot allocate anonymous inode: %d\n", ret); goto err; } + dev->anon_inode = inode; + if (drm_core_check_feature(dev, DRIVER_RENDER)) { ret = drm_minor_alloc(dev, DRM_MINOR_RENDER); if (ret) diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index add317bd8d55c4c423c239aefa4d66f472235f33..862e173d34315e7d3ccadaa2b7d604ce07d34fc2 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -4806,7 +4806,8 @@ bool drm_detect_monitor_audio(struct edid *edid) if (!edid_ext) goto end; - has_audio = ((edid_ext[3] & EDID_BASIC_AUDIO) != 0); + has_audio = (edid_ext[0] == CEA_EXT && + (edid_ext[3] & EDID_BASIC_AUDIO) != 0); if (has_audio) { DRM_DEBUG_KMS("Monitor has basic audio support\n"); @@ -4959,16 +4960,8 @@ static void drm_parse_hdmi_deep_color_info(struct drm_connector *connector, connector->name, dc_bpc); info->bpc = dc_bpc; - /* - * Deep color support mandates RGB444 support for all video - * modes and forbids YCRCB422 support for all video modes per - * HDMI 1.3 spec. - */ - info->color_formats = DRM_COLOR_FORMAT_RGB444; - /* YCRCB444 is optional according to spec. */ if (hdmi[6] & DRM_EDID_HDMI_DC_Y444) { - info->color_formats |= DRM_COLOR_FORMAT_YCRCB444; DRM_DEBUG("%s: HDMI sink does YCRCB444 in deep color.\n", connector->name); } @@ -5132,6 +5125,7 @@ u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edi if (!(edid->input & DRM_EDID_INPUT_DIGITAL)) return quirks; + info->color_formats |= DRM_COLOR_FORMAT_RGB444; drm_parse_cea_ext(connector, edid); /* @@ -5180,7 +5174,6 @@ u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edi DRM_DEBUG("%s: Assigning EDID-1.4 digital sink color depth as %d bpc.\n", connector->name, info->bpc); - info->color_formats |= DRM_COLOR_FORMAT_RGB444; if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB444) info->color_formats |= DRM_COLOR_FORMAT_YCRCB444; if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB422) diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c index a950d5db211c51547fbb3f4fa98a4cb68d41af64..f5ab891731d0b3821ed123c4e5548bace813cc9c 100644 --- a/drivers/gpu/drm/drm_panel_orientation_quirks.c +++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c @@ -115,6 +115,12 @@ static const struct drm_dmi_panel_orientation_data lcd1280x1920_rightside_up = { .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP, }; +static const struct drm_dmi_panel_orientation_data lcd1600x2560_leftside_up = { + .width = 1600, + .height = 2560, + .orientation = DRM_MODE_PANEL_ORIENTATION_LEFT_UP, +}; + static const struct dmi_system_id orientation_data[] = { { /* Acer One 10 (S1003) */ .matches = { @@ -160,6 +166,12 @@ static const struct dmi_system_id orientation_data[] = { DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "MicroPC"), }, .driver_data = (void *)&lcd720x1280_rightside_up, + }, { /* GPD Win Max */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "GPD"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "G1619-01"), + }, + .driver_data = (void *)&lcd800x1280_rightside_up, }, { /* * GPD Pocket, note that the the DMI data is less generic then * it seems, devices with a board-vendor of "AMI Corporation" @@ -248,6 +260,12 @@ static const struct dmi_system_id orientation_data[] = { DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad D330-10IGM"), }, .driver_data = (void *)&lcd1200x1920_rightside_up, + }, { /* Lenovo Yoga Book X90F / X91F / X91L */ + .matches = { + /* Non exact match to match all versions */ + DMI_MATCH(DMI_PRODUCT_NAME, "Lenovo YB1-X9"), + }, + .driver_data = (void *)&lcd1200x1920_rightside_up, }, { /* OneGX1 Pro */ .matches = { DMI_EXACT_MATCH(DMI_SYS_VENDOR, "SYSTEM_MANUFACTURER"), @@ -255,6 +273,12 @@ static const struct dmi_system_id orientation_data[] = { DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Default string"), }, .driver_data = (void *)&onegx1_pro, + }, { /* OneXPlayer */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ONE-NETBOOK TECHNOLOGY CO., LTD."), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "ONE XPLAYER"), + }, + .driver_data = (void *)&lcd1600x2560_leftside_up, }, { /* Samsung GalaxyBook 10.6 */ .matches = { DMI_EXACT_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c index 5f24cc52c2878660179a73e656c226c8db30bc09..ddf539f26f2dad3891fac462060b60ea7f7317c7 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c @@ -469,6 +469,12 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data, return -EINVAL; } + if (args->stream_size > SZ_128K || args->nr_relocs > SZ_128K || + args->nr_bos > SZ_128K || args->nr_pmrs > 128) { + DRM_ERROR("submit arguments out of size limits\n"); + return -EINVAL; + } + /* * Copy the command submission and bo array to kernel space in * one go, and do this outside of any locks. diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h index 1c75c8ed5bcea2c5bec1d923ed35fc9a78884c67..85eddd492774d59c34189f6768e24e3f42dd8766 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h @@ -130,6 +130,7 @@ struct etnaviv_gpu { /* hang detection */ u32 hangcheck_dma_addr; + u32 hangcheck_fence; void __iomem *mmio; int irq; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c index cd46c882269cc959070822de22ea9a61d09cf544..026b6c0731198093cb33b444aa85c688fafcc949 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c @@ -106,8 +106,10 @@ static void etnaviv_sched_timedout_job(struct drm_sched_job *sched_job) */ dma_addr = gpu_read(gpu, VIVS_FE_DMA_ADDRESS); change = dma_addr - gpu->hangcheck_dma_addr; - if (change < 0 || change > 16) { + if (gpu->completed_fence != gpu->hangcheck_fence || + change < 0 || change > 16) { gpu->hangcheck_dma_addr = dma_addr; + gpu->hangcheck_fence = gpu->completed_fence; goto out_no_timeout; } diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig index 1e1cb245fca778e71d0553cb15e66693f1f0f2c6..8eb9bf3a1617e6bed7bf08e16ea914969a58a12c 100644 --- a/drivers/gpu/drm/i915/Kconfig +++ b/drivers/gpu/drm/i915/Kconfig @@ -100,6 +100,7 @@ config DRM_I915_USERPTR config DRM_I915_GVT bool "Enable Intel GVT-g graphics virtualization host support" depends on DRM_I915 + depends on X86 depends on 64BIT default n help diff --git a/drivers/gpu/drm/i915/display/intel_opregion.c b/drivers/gpu/drm/i915/display/intel_opregion.c index de995362f42832206be9f512aa6f0effc68cda6e..abff2d6cedd12ca99ecb9004635d5d20026d1240 100644 --- a/drivers/gpu/drm/i915/display/intel_opregion.c +++ b/drivers/gpu/drm/i915/display/intel_opregion.c @@ -361,6 +361,21 @@ int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, port++; } + /* + * The port numbering and mapping here is bizarre. The now-obsolete + * swsci spec supports ports numbered [0..4]. Port E is handled as a + * special case, but port F and beyond are not. The functionality is + * supposed to be obsolete for new platforms. Just bail out if the port + * number is out of bounds after mapping. + */ + if (port > 4) { + drm_dbg_kms(&dev_priv->drm, + "[ENCODER:%d:%s] port %c (index %u) out of bounds for display power state notification\n", + intel_encoder->base.base.id, intel_encoder->base.name, + port_name(intel_encoder->port), port); + return -EINVAL; + } + if (!enable) parm |= 4 << 8; diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c index 0e60aec0bb19122a161899f6190a54f752454ca0..b561e9e00153e6ee0140fb380995cbd69078feac 100644 --- a/drivers/gpu/drm/i915/display/intel_overlay.c +++ b/drivers/gpu/drm/i915/display/intel_overlay.c @@ -932,6 +932,9 @@ static int check_overlay_dst(struct intel_overlay *overlay, const struct intel_crtc_state *pipe_config = overlay->crtc->config; + if (rec->dst_height == 0 || rec->dst_width == 0) + return -EINVAL; + if (rec->dst_x < pipe_config->pipe_src_w && rec->dst_x + rec->dst_width <= pipe_config->pipe_src_w && rec->dst_y < pipe_config->pipe_src_h && diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c index 5754bccff4d15030dd1a81d10a222e0b29c1fc2b..92dd65befbcb833244edfe0b88f3a1082e54ae68 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c @@ -423,7 +423,7 @@ vm_access(struct vm_area_struct *area, unsigned long addr, return -EACCES; addr -= area->vm_start; - if (addr >= obj->base.size) + if (range_overflows_t(u64, addr, len, obj->base.size)) return -EINVAL; /* As this is primarily for debugging, let's focus on simplicity */ diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h index d6711caa7f399564120d7eb9dca039bcf3183bd0..dbc88fc7136bfeaff9916635a063bf0ae5928f27 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h @@ -159,6 +159,7 @@ struct drm_i915_gem_object { #define I915_BO_ALLOC_VOLATILE BIT(1) #define I915_BO_ALLOC_FLAGS (I915_BO_ALLOC_CONTIGUOUS | I915_BO_ALLOC_VOLATILE) #define I915_BO_READONLY BIT(2) +#define I915_BO_WAS_BOUND_BIT 3 /* * Is the object to be mapped as read-only to the GPU diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c index f60ca6dc911f29d7d8881b566bc51f11e4554bb4..27d24cb38c0d2c42b406160e8b95f6189202f6c9 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c @@ -10,6 +10,8 @@ #include "i915_gem_lmem.h" #include "i915_gem_mman.h" +#include "gt/intel_gt.h" + void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, struct sg_table *pages, unsigned int sg_page_sizes) @@ -186,6 +188,14 @@ __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj) __i915_gem_object_reset_page_iter(obj); obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0; + if (test_and_clear_bit(I915_BO_WAS_BOUND_BIT, &obj->flags)) { + struct drm_i915_private *i915 = to_i915(obj->base.dev); + intel_wakeref_t wakeref; + + with_intel_runtime_pm_if_active(&i915->runtime_pm, wakeref) + intel_gt_invalidate_tlbs(&i915->gt); + } + return pages; } diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c index 39b428c5049c04f1dbb27a917e20c58b015acd03..6615eb5147e234795b8014cc9846aa794c1823c9 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.c +++ b/drivers/gpu/drm/i915/gt/intel_gt.c @@ -26,6 +26,8 @@ void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915) spin_lock_init(>->irq_lock); + mutex_init(>->tlb_invalidate_lock); + INIT_LIST_HEAD(>->closed_vma); spin_lock_init(>->closed_lock); @@ -661,3 +663,103 @@ void intel_gt_info_print(const struct intel_gt_info *info, intel_sseu_dump(&info->sseu, p); } + +struct reg_and_bit { + i915_reg_t reg; + u32 bit; +}; + +static struct reg_and_bit +get_reg_and_bit(const struct intel_engine_cs *engine, const bool gen8, + const i915_reg_t *regs, const unsigned int num) +{ + const unsigned int class = engine->class; + struct reg_and_bit rb = { }; + + if (drm_WARN_ON_ONCE(&engine->i915->drm, + class >= num || !regs[class].reg)) + return rb; + + rb.reg = regs[class]; + if (gen8 && class == VIDEO_DECODE_CLASS) + rb.reg.reg += 4 * engine->instance; /* GEN8_M2TCR */ + else + rb.bit = engine->instance; + + rb.bit = BIT(rb.bit); + + return rb; +} + +void intel_gt_invalidate_tlbs(struct intel_gt *gt) +{ + static const i915_reg_t gen8_regs[] = { + [RENDER_CLASS] = GEN8_RTCR, + [VIDEO_DECODE_CLASS] = GEN8_M1TCR, /* , GEN8_M2TCR */ + [VIDEO_ENHANCEMENT_CLASS] = GEN8_VTCR, + [COPY_ENGINE_CLASS] = GEN8_BTCR, + }; + static const i915_reg_t gen12_regs[] = { + [RENDER_CLASS] = GEN12_GFX_TLB_INV_CR, + [VIDEO_DECODE_CLASS] = GEN12_VD_TLB_INV_CR, + [VIDEO_ENHANCEMENT_CLASS] = GEN12_VE_TLB_INV_CR, + [COPY_ENGINE_CLASS] = GEN12_BLT_TLB_INV_CR, + }; + struct drm_i915_private *i915 = gt->i915; + struct intel_uncore *uncore = gt->uncore; + struct intel_engine_cs *engine; + enum intel_engine_id id; + const i915_reg_t *regs; + unsigned int num = 0; + + if (I915_SELFTEST_ONLY(gt->awake == -ENODEV)) + return; + + if (INTEL_GEN(i915) == 12) { + regs = gen12_regs; + num = ARRAY_SIZE(gen12_regs); + } else if (INTEL_GEN(i915) >= 8 && INTEL_GEN(i915) <= 11) { + regs = gen8_regs; + num = ARRAY_SIZE(gen8_regs); + } else if (INTEL_GEN(i915) < 8) { + return; + } + + if (drm_WARN_ONCE(&i915->drm, !num, + "Platform does not implement TLB invalidation!")) + return; + + GEM_TRACE("\n"); + + assert_rpm_wakelock_held(&i915->runtime_pm); + + mutex_lock(>->tlb_invalidate_lock); + intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL); + + for_each_engine(engine, gt, id) { + /* + * HW architecture suggest typical invalidation time at 40us, + * with pessimistic cases up to 100us and a recommendation to + * cap at 1ms. We go a bit higher just in case. + */ + const unsigned int timeout_us = 100; + const unsigned int timeout_ms = 4; + struct reg_and_bit rb; + + rb = get_reg_and_bit(engine, regs == gen8_regs, regs, num); + if (!i915_mmio_reg_offset(rb.reg)) + continue; + + intel_uncore_write_fw(uncore, rb.reg, rb.bit); + if (__intel_wait_for_register_fw(uncore, + rb.reg, rb.bit, 0, + timeout_us, timeout_ms, + NULL)) + drm_err_ratelimited(>->i915->drm, + "%s TLB invalidation did not complete in %ums!\n", + engine->name, timeout_ms); + } + + intel_uncore_forcewake_put_delayed(uncore, FORCEWAKE_ALL); + mutex_unlock(>->tlb_invalidate_lock); +} diff --git a/drivers/gpu/drm/i915/gt/intel_gt.h b/drivers/gpu/drm/i915/gt/intel_gt.h index 9157c7411f60398cf6daabb029a7135cbd5b70fa..d9a1168172ae33af369c39e530a181c5f324f1cf 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.h +++ b/drivers/gpu/drm/i915/gt/intel_gt.h @@ -77,4 +77,6 @@ static inline bool intel_gt_is_wedged(const struct intel_gt *gt) void intel_gt_info_print(const struct intel_gt_info *info, struct drm_printer *p); +void intel_gt_invalidate_tlbs(struct intel_gt *gt); + #endif /* __INTEL_GT_H__ */ diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h index 6d39a4a11bf3983e81a57915f129c23e6f28fa1e..78c061614d8bba9e7c6a32fcdb5e929877be7301 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_types.h +++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h @@ -36,6 +36,8 @@ struct intel_gt { struct intel_uc uc; + struct mutex tlb_invalidate_lock; + struct intel_gt_timelines { spinlock_t lock; /* protects active_list */ struct list_head active_list; diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index ce8c91c5fdd3b4cc2285523a50c95da8e10e9237..12488996a7f4f759bb966f7da7320384b3d8b49c 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -2639,6 +2639,12 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING (1 << 28) #define GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT (1 << 24) +#define GEN8_RTCR _MMIO(0x4260) +#define GEN8_M1TCR _MMIO(0x4264) +#define GEN8_M2TCR _MMIO(0x4268) +#define GEN8_BTCR _MMIO(0x426c) +#define GEN8_VTCR _MMIO(0x4270) + #if 0 #define PRB0_TAIL _MMIO(0x2030) #define PRB0_HEAD _MMIO(0x2034) @@ -2728,6 +2734,11 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define FAULT_VA_HIGH_BITS (0xf << 0) #define FAULT_GTT_SEL (1 << 4) +#define GEN12_GFX_TLB_INV_CR _MMIO(0xced8) +#define GEN12_VD_TLB_INV_CR _MMIO(0xcedc) +#define GEN12_VE_TLB_INV_CR _MMIO(0xcee0) +#define GEN12_BLT_TLB_INV_CR _MMIO(0xcee4) + #define GEN12_AUX_ERR_DBG _MMIO(0x43f4) #define FPGA_DBG _MMIO(0x42300) diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index caa9b041616b0d0903788589ad4a7ae7965339c6..9aa4a6ce9fbf0bd0a171cbc25c12440e54f753a3 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -230,7 +230,7 @@ vma_create(struct drm_i915_gem_object *obj, } static struct i915_vma * -vma_lookup(struct drm_i915_gem_object *obj, +i915_vma_lookup(struct drm_i915_gem_object *obj, struct i915_address_space *vm, const struct i915_ggtt_view *view) { @@ -278,7 +278,7 @@ i915_vma_instance(struct drm_i915_gem_object *obj, GEM_BUG_ON(!atomic_read(&vm->open)); spin_lock(&obj->vma.lock); - vma = vma_lookup(obj, vm, view); + vma = i915_vma_lookup(obj, vm, view); spin_unlock(&obj->vma.lock); /* vma_create() will resolve the race if another creates the vma */ @@ -439,6 +439,9 @@ int i915_vma_bind(struct i915_vma *vma, vma->ops->bind_vma(vma->vm, NULL, vma, cache_level, bind_flags); } + if (vma->obj) + set_bit(I915_BO_WAS_BOUND_BIT, &vma->obj->flags); + atomic_or(bind_flags, &vma->flags); return 0; } diff --git a/drivers/gpu/drm/i915/intel_pch.c b/drivers/gpu/drm/i915/intel_pch.c index 6c97192e9ca87ea923d522ca0fcc75c40f7f0a79..a0d5e95234fd0a340405e3f51d39fda4e22271e6 100644 --- a/drivers/gpu/drm/i915/intel_pch.c +++ b/drivers/gpu/drm/i915/intel_pch.c @@ -110,6 +110,7 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id) /* Comet Lake V PCH is based on KBP, which is SPT compatible */ return PCH_SPT; case INTEL_PCH_ICP_DEVICE_ID_TYPE: + case INTEL_PCH_ICP2_DEVICE_ID_TYPE: drm_dbg_kms(&dev_priv->drm, "Found Ice Lake PCH\n"); drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv)); return PCH_ICP; @@ -124,7 +125,6 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id) !IS_ROCKETLAKE(dev_priv)); return PCH_TGP; case INTEL_PCH_JSP_DEVICE_ID_TYPE: - case INTEL_PCH_JSP2_DEVICE_ID_TYPE: drm_dbg_kms(&dev_priv->drm, "Found Jasper Lake PCH\n"); drm_WARN_ON(&dev_priv->drm, !IS_ELKHARTLAKE(dev_priv)); return PCH_JSP; diff --git a/drivers/gpu/drm/i915/intel_pch.h b/drivers/gpu/drm/i915/intel_pch.h index 06d2cd50af0b9d4d8b28ee48a9d1fa23cdfd476f..49325022b3c9628b605723735043556e35296cfc 100644 --- a/drivers/gpu/drm/i915/intel_pch.h +++ b/drivers/gpu/drm/i915/intel_pch.h @@ -48,11 +48,11 @@ enum intel_pch { #define INTEL_PCH_CMP2_DEVICE_ID_TYPE 0x0680 #define INTEL_PCH_CMP_V_DEVICE_ID_TYPE 0xA380 #define INTEL_PCH_ICP_DEVICE_ID_TYPE 0x3480 +#define INTEL_PCH_ICP2_DEVICE_ID_TYPE 0x3880 #define INTEL_PCH_MCC_DEVICE_ID_TYPE 0x4B00 #define INTEL_PCH_TGP_DEVICE_ID_TYPE 0xA080 #define INTEL_PCH_TGP2_DEVICE_ID_TYPE 0x4380 #define INTEL_PCH_JSP_DEVICE_ID_TYPE 0x4D80 -#define INTEL_PCH_JSP2_DEVICE_ID_TYPE 0x3880 #define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100 #define INTEL_PCH_P3X_DEVICE_ID_TYPE 0x7000 #define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */ diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 1f23cb6ece58854a29a82f254b8250dc0b00399c..472aaea75ef84303ec901d4b81df104c25c15e3d 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -3044,9 +3044,9 @@ static void snb_wm_latency_quirk(struct drm_i915_private *dev_priv) * The BIOS provided WM memory latency values are often * inadequate for high resolution displays. Adjust them. */ - changed = ilk_increase_wm_latency(dev_priv, dev_priv->wm.pri_latency, 12) | - ilk_increase_wm_latency(dev_priv, dev_priv->wm.spr_latency, 12) | - ilk_increase_wm_latency(dev_priv, dev_priv->wm.cur_latency, 12); + changed = ilk_increase_wm_latency(dev_priv, dev_priv->wm.pri_latency, 12); + changed |= ilk_increase_wm_latency(dev_priv, dev_priv->wm.spr_latency, 12); + changed |= ilk_increase_wm_latency(dev_priv, dev_priv->wm.cur_latency, 12); if (!changed) return; @@ -3996,6 +3996,17 @@ static int intel_compute_sagv_mask(struct intel_atomic_state *state) return ret; } + if (intel_can_enable_sagv(dev_priv, new_bw_state) != + intel_can_enable_sagv(dev_priv, old_bw_state)) { + ret = intel_atomic_serialize_global_state(&new_bw_state->base); + if (ret) + return ret; + } else if (new_bw_state->pipe_sagv_reject != old_bw_state->pipe_sagv_reject) { + ret = intel_atomic_lock_global_state(&new_bw_state->base); + if (ret) + return ret; + } + for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { struct skl_pipe_wm *pipe_wm = &new_crtc_state->wm.skl.optimal; @@ -4010,17 +4021,6 @@ static int intel_compute_sagv_mask(struct intel_atomic_state *state) intel_can_enable_sagv(dev_priv, new_bw_state); } - if (intel_can_enable_sagv(dev_priv, new_bw_state) != - intel_can_enable_sagv(dev_priv, old_bw_state)) { - ret = intel_atomic_serialize_global_state(&new_bw_state->base); - if (ret) - return ret; - } else if (new_bw_state->pipe_sagv_reject != old_bw_state->pipe_sagv_reject) { - ret = intel_atomic_lock_global_state(&new_bw_state->base); - if (ret) - return ret; - } - return 0; } diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index 97ded2a59cf4c50838148c09c0004013bcb091ea..01849840ac560064d5fab2b285c98d0f21e3f5a1 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -694,7 +694,8 @@ void intel_uncore_forcewake_get__locked(struct intel_uncore *uncore, } static void __intel_uncore_forcewake_put(struct intel_uncore *uncore, - enum forcewake_domains fw_domains) + enum forcewake_domains fw_domains, + bool delayed) { struct intel_uncore_forcewake_domain *domain; unsigned int tmp; @@ -709,7 +710,11 @@ static void __intel_uncore_forcewake_put(struct intel_uncore *uncore, continue; } - uncore->funcs.force_wake_put(uncore, domain->mask); + if (delayed && + !(domain->uncore->fw_domains_timer & domain->mask)) + fw_domain_arm_timer(domain); + else + uncore->funcs.force_wake_put(uncore, domain->mask); } } @@ -730,7 +735,20 @@ void intel_uncore_forcewake_put(struct intel_uncore *uncore, return; spin_lock_irqsave(&uncore->lock, irqflags); - __intel_uncore_forcewake_put(uncore, fw_domains); + __intel_uncore_forcewake_put(uncore, fw_domains, false); + spin_unlock_irqrestore(&uncore->lock, irqflags); +} + +void intel_uncore_forcewake_put_delayed(struct intel_uncore *uncore, + enum forcewake_domains fw_domains) +{ + unsigned long irqflags; + + if (!uncore->funcs.force_wake_put) + return; + + spin_lock_irqsave(&uncore->lock, irqflags); + __intel_uncore_forcewake_put(uncore, fw_domains, true); spin_unlock_irqrestore(&uncore->lock, irqflags); } @@ -772,7 +790,7 @@ void intel_uncore_forcewake_put__locked(struct intel_uncore *uncore, if (!uncore->funcs.force_wake_put) return; - __intel_uncore_forcewake_put(uncore, fw_domains); + __intel_uncore_forcewake_put(uncore, fw_domains, false); } void assert_forcewakes_inactive(struct intel_uncore *uncore) diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h index c4b22d9d0b45108aab6a3d0dee5f5f7cea8efac4..034f04e0de8b7774ff8cd6eab16c9e577cb375f9 100644 --- a/drivers/gpu/drm/i915/intel_uncore.h +++ b/drivers/gpu/drm/i915/intel_uncore.h @@ -211,6 +211,8 @@ void intel_uncore_forcewake_get(struct intel_uncore *uncore, enum forcewake_domains domains); void intel_uncore_forcewake_put(struct intel_uncore *uncore, enum forcewake_domains domains); +void intel_uncore_forcewake_put_delayed(struct intel_uncore *uncore, + enum forcewake_domains domains); void intel_uncore_forcewake_flush(struct intel_uncore *uncore, enum forcewake_domains fw_domains); diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c index 75036aaa0c63959dd06cc94106635c2ba18c4005..efd13e533726be8d1b9005f725f6d99027062fe5 100644 --- a/drivers/gpu/drm/imx/imx-ldb.c +++ b/drivers/gpu/drm/imx/imx-ldb.c @@ -553,6 +553,8 @@ static int imx_ldb_panel_ddc(struct device *dev, edidp = of_get_property(child, "edid", &edid_len); if (edidp) { channel->edid = kmemdup(edidp, edid_len, GFP_KERNEL); + if (!channel->edid) + return -ENOMEM; } else if (!channel->panel) { /* fallback to display-timings node */ ret = of_get_drm_display_mode(child, diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c index 2eb8df4697dfac6c0bd1e6a2098a62a296e0498c..b61bfa84b6bbd152688113b4cb56929fb12bd5c4 100644 --- a/drivers/gpu/drm/imx/parallel-display.c +++ b/drivers/gpu/drm/imx/parallel-display.c @@ -70,8 +70,10 @@ static int imx_pd_connector_get_modes(struct drm_connector *connector) ret = of_get_drm_display_mode(np, &imxpd->mode, &imxpd->bus_flags, OF_USE_NATIVE_MODE); - if (ret) + if (ret) { + drm_mode_destroy(connector->dev, mode); return ret; + } drm_mode_copy(mode, &imxpd->mode); mode->type |= DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED, @@ -212,14 +214,6 @@ static int imx_pd_bridge_atomic_check(struct drm_bridge *bridge, if (!imx_pd_format_supported(bus_fmt)) return -EINVAL; - if (bus_flags & - ~(DRM_BUS_FLAG_DE_LOW | DRM_BUS_FLAG_DE_HIGH | - DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE | - DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE)) { - dev_warn(imxpd->dev, "invalid bus_flags (%x)\n", bus_flags); - return -EINVAL; - } - bridge_state->output_bus_cfg.flags = bus_flags; bridge_state->input_bus_cfg.flags = bus_flags; imx_crtc_state->bus_flags = bus_flags; diff --git a/drivers/gpu/drm/lima/lima_device.c b/drivers/gpu/drm/lima/lima_device.c index 65fdca366e41f00d940f13a0865234fc47df5595..36c99058942788fe6e31cc127b7b22c666bc5c21 100644 --- a/drivers/gpu/drm/lima/lima_device.c +++ b/drivers/gpu/drm/lima/lima_device.c @@ -357,6 +357,7 @@ int lima_device_init(struct lima_device *ldev) int err, i; dma_set_coherent_mask(ldev->dev, DMA_BIT_MASK(32)); + dma_set_max_seg_size(ldev->dev, UINT_MAX); err = lima_clk_init(ldev); if (err) diff --git a/drivers/gpu/drm/mediatek/mtk_mipi_tx.c b/drivers/gpu/drm/mediatek/mtk_mipi_tx.c index 8cee2591e7284ba06e4a02475eba09331e47e1a2..ccc742dc78bd99a262385c0088c6c7ca48abc270 100644 --- a/drivers/gpu/drm/mediatek/mtk_mipi_tx.c +++ b/drivers/gpu/drm/mediatek/mtk_mipi_tx.c @@ -147,6 +147,8 @@ static int mtk_mipi_tx_probe(struct platform_device *pdev) return -ENOMEM; mipi_tx->driver_data = of_device_get_match_data(dev); + if (!mipi_tx->driver_data) + return -ENODEV; mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); mipi_tx->regs = devm_ioremap_resource(dev, mem); diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c index 2753067c08e68bfe22e483fae825fbfed9af5934..728fea50941243f15c377f4bc40fd6ca99bf3952 100644 --- a/drivers/gpu/drm/meson/meson_drv.c +++ b/drivers/gpu/drm/meson/meson_drv.c @@ -396,10 +396,8 @@ static void meson_drv_unbind(struct device *dev) drm_irq_uninstall(drm); drm_dev_put(drm); - if (priv->afbcd.ops) { - priv->afbcd.ops->reset(priv); - meson_rdma_free(priv); - } + if (priv->afbcd.ops) + priv->afbcd.ops->exit(priv); } static const struct component_master_ops meson_drv_master_ops = { diff --git a/drivers/gpu/drm/meson/meson_osd_afbcd.c b/drivers/gpu/drm/meson/meson_osd_afbcd.c index ffc6b584dbf85d82b778b37ebc3b0a069063ea08..0cdbe899402f84571e508f09f7c4a978d9d77d31 100644 --- a/drivers/gpu/drm/meson/meson_osd_afbcd.c +++ b/drivers/gpu/drm/meson/meson_osd_afbcd.c @@ -79,11 +79,6 @@ static bool meson_gxm_afbcd_supported_fmt(u64 modifier, uint32_t format) return meson_gxm_afbcd_pixel_fmt(modifier, format) >= 0; } -static int meson_gxm_afbcd_init(struct meson_drm *priv) -{ - return 0; -} - static int meson_gxm_afbcd_reset(struct meson_drm *priv) { writel_relaxed(VIU_SW_RESET_OSD1_AFBCD, @@ -93,6 +88,16 @@ static int meson_gxm_afbcd_reset(struct meson_drm *priv) return 0; } +static int meson_gxm_afbcd_init(struct meson_drm *priv) +{ + return 0; +} + +static void meson_gxm_afbcd_exit(struct meson_drm *priv) +{ + meson_gxm_afbcd_reset(priv); +} + static int meson_gxm_afbcd_enable(struct meson_drm *priv) { writel_relaxed(FIELD_PREP(OSD1_AFBCD_ID_FIFO_THRD, 0x40) | @@ -172,6 +177,7 @@ static int meson_gxm_afbcd_setup(struct meson_drm *priv) struct meson_afbcd_ops meson_afbcd_gxm_ops = { .init = meson_gxm_afbcd_init, + .exit = meson_gxm_afbcd_exit, .reset = meson_gxm_afbcd_reset, .enable = meson_gxm_afbcd_enable, .disable = meson_gxm_afbcd_disable, @@ -269,6 +275,18 @@ static bool meson_g12a_afbcd_supported_fmt(u64 modifier, uint32_t format) return meson_g12a_afbcd_pixel_fmt(modifier, format) >= 0; } +static int meson_g12a_afbcd_reset(struct meson_drm *priv) +{ + meson_rdma_reset(priv); + + meson_rdma_writel_sync(priv, VIU_SW_RESET_G12A_AFBC_ARB | + VIU_SW_RESET_G12A_OSD1_AFBCD, + VIU_SW_RESET); + meson_rdma_writel_sync(priv, 0, VIU_SW_RESET); + + return 0; +} + static int meson_g12a_afbcd_init(struct meson_drm *priv) { int ret; @@ -286,16 +304,10 @@ static int meson_g12a_afbcd_init(struct meson_drm *priv) return 0; } -static int meson_g12a_afbcd_reset(struct meson_drm *priv) +static void meson_g12a_afbcd_exit(struct meson_drm *priv) { - meson_rdma_reset(priv); - - meson_rdma_writel_sync(priv, VIU_SW_RESET_G12A_AFBC_ARB | - VIU_SW_RESET_G12A_OSD1_AFBCD, - VIU_SW_RESET); - meson_rdma_writel_sync(priv, 0, VIU_SW_RESET); - - return 0; + meson_g12a_afbcd_reset(priv); + meson_rdma_free(priv); } static int meson_g12a_afbcd_enable(struct meson_drm *priv) @@ -380,6 +392,7 @@ static int meson_g12a_afbcd_setup(struct meson_drm *priv) struct meson_afbcd_ops meson_afbcd_g12a_ops = { .init = meson_g12a_afbcd_init, + .exit = meson_g12a_afbcd_exit, .reset = meson_g12a_afbcd_reset, .enable = meson_g12a_afbcd_enable, .disable = meson_g12a_afbcd_disable, diff --git a/drivers/gpu/drm/meson/meson_osd_afbcd.h b/drivers/gpu/drm/meson/meson_osd_afbcd.h index 5e5523304f42f95c305a850bcde238fc57988ca5..e77ddeb6416f3c2998c45131e14dd9f3fb503f6d 100644 --- a/drivers/gpu/drm/meson/meson_osd_afbcd.h +++ b/drivers/gpu/drm/meson/meson_osd_afbcd.h @@ -14,6 +14,7 @@ struct meson_afbcd_ops { int (*init)(struct meson_drm *priv); + void (*exit)(struct meson_drm *priv); int (*reset)(struct meson_drm *priv); int (*enable)(struct meson_drm *priv); int (*disable)(struct meson_drm *priv); diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c index 509968c0d16bc7ad8874b33baf362078c3e29db9..2a13e297e16df5a529e9a52ec6cbe283c4285ea7 100644 --- a/drivers/gpu/drm/mgag200/mgag200_mode.c +++ b/drivers/gpu/drm/mgag200/mgag200_mode.c @@ -1243,7 +1243,10 @@ static void mgag200_set_format_regs(struct mga_device *mdev, WREG_GFX(3, 0x00); WREG_GFX(4, 0x00); WREG_GFX(5, 0x40); - WREG_GFX(6, 0x05); + /* GCTL6 should be 0x05, but we configure memmapsl to 0xb8000 (text mode), + * so that it doesn't hang when running kexec/kdump on G200_SE rev42. + */ + WREG_GFX(6, 0x0d); WREG_GFX(7, 0x0f); WREG_GFX(8, 0x0f); diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig index dabb4a1ccdcf70150db023a2815e460e87ea6333..1aad34b5ffd7f42d9e605ed79de1cec10bcbb5c6 100644 --- a/drivers/gpu/drm/msm/Kconfig +++ b/drivers/gpu/drm/msm/Kconfig @@ -60,6 +60,7 @@ config DRM_MSM_HDMI_HDCP config DRM_MSM_DP bool "Enable DisplayPort support in MSM DRM driver" depends on DRM_MSM + select RATIONAL default y help Compile in support for DP driver in MSM DRM driver. DP external diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c index 9e09805575db40792313cfe280b026b8fa899406..39563daff4a0b64800fb038ca5bd79b2ccbfe956 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c @@ -1222,7 +1222,7 @@ a6xx_create_private_address_space(struct msm_gpu *gpu) return ERR_CAST(mmu); return msm_gem_address_space_create(mmu, - "gpu", 0x100000000ULL, 0x1ffffffffULL); + "gpu", 0x100000000ULL, SZ_4G); } static uint32_t a6xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring) diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c index f7f5c258b5537b38946e29150b99c4710abcd28c..a0274fcfe9c9d58e92f2c8118e6ad3bfae004efb 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c @@ -1113,7 +1113,7 @@ static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc) } - if (dpu_enc->disp_info.intf_type == DRM_MODE_CONNECTOR_DisplayPort && + if (dpu_enc->disp_info.intf_type == DRM_MODE_ENCODER_TMDS && dpu_enc->cur_master->hw_mdptop && dpu_enc->cur_master->hw_mdptop->ops.intf_audio_select) dpu_enc->cur_master->hw_mdptop->ops.intf_audio_select( diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c index a7a24539921f3a58f18aa4c7c87021dd166d68b2..a6efc11eba93fdc2e7786312d71d52717e374fcf 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c @@ -26,9 +26,16 @@ static void dpu_setup_dspp_pcc(struct dpu_hw_dspp *ctx, struct dpu_hw_pcc_cfg *cfg) { - u32 base = ctx->cap->sblk->pcc.base; + u32 base; - if (!ctx || !base) { + if (!ctx) { + DRM_ERROR("invalid ctx %pK\n", ctx); + return; + } + + base = ctx->cap->sblk->pcc.base; + + if (!base) { DRM_ERROR("invalid ctx %pK pcc base 0x%x\n", ctx, base); return; } diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c index b4a2e8eb35dd25f905a6eb23959c86e0a3b16554..08e082d0443af22af0087f49d60ba5f14110c2f2 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c @@ -71,8 +71,8 @@ static int _dpu_danger_signal_status(struct seq_file *s, &status); } else { seq_puts(s, "\nSafe signal status:\n"); - if (kms->hw_mdp->ops.get_danger_status) - kms->hw_mdp->ops.get_danger_status(kms->hw_mdp, + if (kms->hw_mdp->ops.get_safe_status) + kms->hw_mdp->ops.get_safe_status(kms->hw_mdp, &status); } pm_runtime_put_sync(&kms->pdev->dev); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c index 9b2b5044e8e05490003f534a6d34f10204304d22..74a13ccad34c0bb942983385ec46db5b5554241f 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c @@ -34,6 +34,14 @@ int dpu_rm_destroy(struct dpu_rm *rm) { int i; + for (i = 0; i < ARRAY_SIZE(rm->dspp_blks); i++) { + struct dpu_hw_dspp *hw; + + if (rm->dspp_blks[i]) { + hw = to_dpu_hw_dspp(rm->dspp_blks[i]); + dpu_hw_dspp_destroy(hw); + } + } for (i = 0; i < ARRAY_SIZE(rm->pingpong_blks); i++) { struct dpu_hw_pingpong *hw; diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c index 66f2ea3d42fc2074f430d598a6887a79b6105a7a..6cd6934c8c9f10760a2b89b2393a43ba721d7cf2 100644 --- a/drivers/gpu/drm/msm/dp/dp_display.c +++ b/drivers/gpu/drm/msm/dp/dp_display.c @@ -1336,6 +1336,7 @@ int msm_dp_modeset_init(struct msm_dp *dp_display, struct drm_device *dev, struct drm_encoder *encoder) { struct msm_drm_private *priv; + struct dp_display_private *dp_priv; int ret; if (WARN_ON(!encoder) || WARN_ON(!dp_display) || WARN_ON(!dev)) @@ -1344,6 +1345,8 @@ int msm_dp_modeset_init(struct msm_dp *dp_display, struct drm_device *dev, priv = dev->dev_private; dp_display->drm_dev = dev; + dp_priv = container_of(dp_display, struct dp_display_private, dp_display); + ret = dp_display_request_irq(dp_display); if (ret) { DRM_ERROR("request_irq failed, ret=%d\n", ret); @@ -1361,6 +1364,8 @@ int msm_dp_modeset_init(struct msm_dp *dp_display, struct drm_device *dev, return ret; } + dp_priv->panel->connector = dp_display->connector; + priv->connectors[priv->num_connectors++] = dp_display->connector; return 0; } diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c index 1adead764feed8558a6ef5191175251a1c94bdce..f845333593daa7d1821f54536e8dbc41fb901786 100644 --- a/drivers/gpu/drm/msm/dsi/dsi.c +++ b/drivers/gpu/drm/msm/dsi/dsi.c @@ -33,7 +33,12 @@ static int dsi_get_phy(struct msm_dsi *msm_dsi) of_node_put(phy_node); - if (!phy_pdev || !msm_dsi->phy) { + if (!phy_pdev) { + DRM_DEV_ERROR(&pdev->dev, "%s: phy driver is not ready\n", __func__); + return -EPROBE_DEFER; + } + if (!msm_dsi->phy) { + put_device(&phy_pdev->dev); DRM_DEV_ERROR(&pdev->dev, "%s: phy driver is not ready\n", __func__); return -EPROBE_DEFER; } diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c index 1d28dfba2c9bb5d67fdadb2f8c82411df7c47ac4..fb421ca56b3da7e84389e2f85296d3303bc31186 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_manager.c +++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c @@ -644,7 +644,7 @@ struct drm_connector *msm_dsi_manager_connector_init(u8 id) return connector; fail: - connector->funcs->destroy(msm_dsi->connector); + connector->funcs->destroy(connector); return ERR_PTR(ret); } diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c index e8c1a727179cc29b8afceda742353b71c72a7a96..e07986ab52c2235a115b61b70541dc002dfd1c1d 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c @@ -769,12 +769,14 @@ void __exit msm_dsi_phy_driver_unregister(void) int msm_dsi_phy_enable(struct msm_dsi_phy *phy, int src_pll_id, struct msm_dsi_phy_clk_request *clk_req) { - struct device *dev = &phy->pdev->dev; + struct device *dev; int ret; if (!phy || !phy->cfg->ops.enable) return -EINVAL; + dev = &phy->pdev->dev; + ret = dsi_phy_enable_resource(phy); if (ret) { DRM_DEV_ERROR(dev, "%s: resource enable failed, %d\n", diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c index 737453b6e596646b9a530ee16a18901af83c69e8..94f948ef279d107408e1c4b820176f5d78245c60 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi.c @@ -97,10 +97,15 @@ static int msm_hdmi_get_phy(struct hdmi *hdmi) of_node_put(phy_node); - if (!phy_pdev || !hdmi->phy) { + if (!phy_pdev) { DRM_DEV_ERROR(&pdev->dev, "phy driver is not ready\n"); return -EPROBE_DEFER; } + if (!hdmi->phy) { + DRM_DEV_ERROR(&pdev->dev, "phy driver is not ready\n"); + put_device(&phy_pdev->dev); + return -EPROBE_DEFER; + } hdmi->phy_dev = get_device(&phy_pdev->dev); diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index 33e42b2f9cfcb4a79cda66378ab6ac4d0205770a..e37e5afc680a2b49bf0afcc3e232ca1bc58f8384 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -350,7 +350,7 @@ static int msm_init_vram(struct drm_device *dev) of_node_put(node); if (ret) return ret; - size = r.end - r.start; + size = r.end - r.start + 1; DRM_INFO("using VRAM carveout: %lx@%pa\n", size, &r.start); /* if we have no IOMMU, then we need to use carveout allocator. diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 819567e40565c092819f7de1e7970dee9e347fa5..9c05bf6c45510dd36de2031ca585ac60c6a720eb 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -849,6 +849,7 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m) get_pid_task(aspace->pid, PIDTYPE_PID); if (task) { comm = kstrdup(task->comm, GFP_KERNEL); + put_task_struct(task); } else { comm = NULL; } diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.c b/drivers/gpu/drm/nouveau/dispnv04/disp.c index 7739f46470d3e1215402e541f55194d6764d34a9..99fee4d8cd3187dc0549f412ff384926f82a10b6 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv04/disp.c @@ -205,7 +205,7 @@ nv04_display_destroy(struct drm_device *dev) nvif_notify_dtor(&disp->flip); nouveau_display(dev)->priv = NULL; - kfree(disp); + vfree(disp); nvif_object_unmap(&drm->client.device.object); } @@ -223,7 +223,7 @@ nv04_display_create(struct drm_device *dev) struct nv04_display *disp; int i, ret; - disp = kzalloc(sizeof(*disp), GFP_KERNEL); + disp = vzalloc(sizeof(*disp)); if (!disp) return -ENOMEM; diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/base.c b/drivers/gpu/drm/nouveau/nvkm/falcon/base.c index c6a3448180d6fdb9102026bbbd17bffad1ca1775..93d9575181c67f7ae01203ff9b34e3c91e3cc6ec 100644 --- a/drivers/gpu/drm/nouveau/nvkm/falcon/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/falcon/base.c @@ -119,8 +119,12 @@ nvkm_falcon_disable(struct nvkm_falcon *falcon) int nvkm_falcon_reset(struct nvkm_falcon *falcon) { - nvkm_falcon_disable(falcon); - return nvkm_falcon_enable(falcon); + if (!falcon->func->reset) { + nvkm_falcon_disable(falcon); + return nvkm_falcon_enable(falcon); + } + + return falcon->func->reset(falcon); } int diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/hsfw.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/hsfw.c index 667fa016496eeb11e61b774f8db98bb9565829b3..a6ea89a5d51ab90806de88908bb7e7d961412c3b 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/hsfw.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/hsfw.c @@ -142,11 +142,12 @@ nvkm_acr_hsfw_load_bl(struct nvkm_acr *acr, const char *name, int ver, hsfw->imem_size = desc->code_size; hsfw->imem_tag = desc->start_tag; - hsfw->imem = kmalloc(desc->code_size, GFP_KERNEL); - memcpy(hsfw->imem, data + desc->code_off, desc->code_size); - + hsfw->imem = kmemdup(data + desc->code_off, desc->code_size, GFP_KERNEL); nvkm_firmware_put(fw); - return 0; + if (!hsfw->imem) + return -ENOMEM; + else + return 0; } int diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c index f3c30b2a788e8534fe72c23729779afd5e39d573..8bff14ae16b0e5bd76ef0f7541040dda3cb80e70 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c @@ -38,7 +38,7 @@ nvbios_addr(struct nvkm_bios *bios, u32 *addr, u8 size) *addr += bios->imaged_addr; } - if (unlikely(*addr + size >= bios->size)) { + if (unlikely(*addr + size > bios->size)) { nvkm_error(&bios->subdev, "OOB %d %08x %08x\n", size, p, *addr); return false; } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c index a0fe607c9c07fc5f59f88c45ff4c4538dc5badc2..3bfc55c571b5e0aca53846dd68a0eb460d6b743e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c @@ -94,20 +94,13 @@ nvkm_pmu_fini(struct nvkm_subdev *subdev, bool suspend) return 0; } -static int +static void nvkm_pmu_reset(struct nvkm_pmu *pmu) { struct nvkm_device *device = pmu->subdev.device; if (!pmu->func->enabled(pmu)) - return 0; - - /* Inhibit interrupts, and wait for idle. */ - nvkm_wr32(device, 0x10a014, 0x0000ffff); - nvkm_msec(device, 2000, - if (!nvkm_rd32(device, 0x10a04c)) - break; - ); + return; /* Reset. */ if (pmu->func->reset) @@ -118,25 +111,37 @@ nvkm_pmu_reset(struct nvkm_pmu *pmu) if (!(nvkm_rd32(device, 0x10a10c) & 0x00000006)) break; ); - - return 0; } static int nvkm_pmu_preinit(struct nvkm_subdev *subdev) { struct nvkm_pmu *pmu = nvkm_pmu(subdev); - return nvkm_pmu_reset(pmu); + nvkm_pmu_reset(pmu); + return 0; } static int nvkm_pmu_init(struct nvkm_subdev *subdev) { struct nvkm_pmu *pmu = nvkm_pmu(subdev); - int ret = nvkm_pmu_reset(pmu); - if (ret == 0 && pmu->func->init) - ret = pmu->func->init(pmu); - return ret; + struct nvkm_device *device = pmu->subdev.device; + + if (!pmu->func->init) + return 0; + + if (pmu->func->enabled(pmu)) { + /* Inhibit interrupts, and wait for idle. */ + nvkm_wr32(device, 0x10a014, 0x0000ffff); + nvkm_msec(device, 2000, + if (!nvkm_rd32(device, 0x10a04c)) + break; + ); + + nvkm_pmu_reset(pmu); + } + + return pmu->func->init(pmu); } static void * diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm200.c index 383376addb41c4a55dd1261ea65c748c846233fa..a9d6c36195ed1f175cdb83b32f203b8695302f72 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm200.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm200.c @@ -23,9 +23,38 @@ */ #include "priv.h" +static int +gm200_pmu_flcn_reset(struct nvkm_falcon *falcon) +{ + struct nvkm_pmu *pmu = container_of(falcon, typeof(*pmu), falcon); + + nvkm_falcon_wr32(falcon, 0x014, 0x0000ffff); + pmu->func->reset(pmu); + return nvkm_falcon_enable(falcon); +} + +const struct nvkm_falcon_func +gm200_pmu_flcn = { + .debug = 0xc08, + .fbif = 0xe00, + .load_imem = nvkm_falcon_v1_load_imem, + .load_dmem = nvkm_falcon_v1_load_dmem, + .read_dmem = nvkm_falcon_v1_read_dmem, + .bind_context = nvkm_falcon_v1_bind_context, + .wait_for_halt = nvkm_falcon_v1_wait_for_halt, + .clear_interrupt = nvkm_falcon_v1_clear_interrupt, + .set_start_addr = nvkm_falcon_v1_set_start_addr, + .start = nvkm_falcon_v1_start, + .enable = nvkm_falcon_v1_enable, + .disable = nvkm_falcon_v1_disable, + .reset = gm200_pmu_flcn_reset, + .cmdq = { 0x4a0, 0x4b0, 4 }, + .msgq = { 0x4c8, 0x4cc, 0 }, +}; + static const struct nvkm_pmu_func gm200_pmu = { - .flcn = >215_pmu_flcn, + .flcn = &gm200_pmu_flcn, .enabled = gf100_pmu_enabled, .reset = gf100_pmu_reset, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c index 8f6ed5373ea16e046a7748717639a304ac1327f9..d82529becfdc951725c3123bcdf476fb84f33730 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c @@ -211,11 +211,12 @@ gm20b_pmu_recv(struct nvkm_pmu *pmu) static const struct nvkm_pmu_func gm20b_pmu = { - .flcn = >215_pmu_flcn, + .flcn = &gm200_pmu_flcn, .enabled = gf100_pmu_enabled, .intr = gt215_pmu_intr, .recv = gm20b_pmu_recv, .initmsg = gm20b_pmu_initmsg, + .reset = gf100_pmu_reset, }; #if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC) diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c index 3d8ce14dba7bf1fb2b7a1d4c54dd9ced9f2f6ed0..9f32982216b6f8d1e7ca8c9c0f1b0410faa2420f 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c @@ -23,7 +23,7 @@ */ #include "priv.h" -static void +void gp102_pmu_reset(struct nvkm_pmu *pmu) { struct nvkm_device *device = pmu->subdev.device; @@ -39,7 +39,7 @@ gp102_pmu_enabled(struct nvkm_pmu *pmu) static const struct nvkm_pmu_func gp102_pmu = { - .flcn = >215_pmu_flcn, + .flcn = &gm200_pmu_flcn, .enabled = gp102_pmu_enabled, .reset = gp102_pmu_reset, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c index 9c237c426599b543e38f39e3d2fd3888bb70e510..0bd4b32ad863fa09bdf492e726c60bdae2643672 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c @@ -78,11 +78,12 @@ gp10b_pmu_acr = { static const struct nvkm_pmu_func gp10b_pmu = { - .flcn = >215_pmu_flcn, + .flcn = &gm200_pmu_flcn, .enabled = gf100_pmu_enabled, .intr = gt215_pmu_intr, .recv = gm20b_pmu_recv, .initmsg = gm20b_pmu_initmsg, + .reset = gp102_pmu_reset, }; #if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC) diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h index 276b6d778e532fc54b0049833ddb88d498c6f21e..80c4cb861d40e6d987cbd8dfdd5894c60690d665 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h @@ -41,9 +41,12 @@ int gt215_pmu_send(struct nvkm_pmu *, u32[2], u32, u32, u32, u32); bool gf100_pmu_enabled(struct nvkm_pmu *); void gf100_pmu_reset(struct nvkm_pmu *); +void gp102_pmu_reset(struct nvkm_pmu *pmu); void gk110_pmu_pgob(struct nvkm_pmu *, bool); +extern const struct nvkm_falcon_func gm200_pmu_flcn; + void gm20b_pmu_acr_bld_patch(struct nvkm_acr *, u32, s64); void gm20b_pmu_acr_bld_write(struct nvkm_acr *, u32, struct nvkm_acr_lsfw *); int gm20b_pmu_acr_boot(struct nvkm_falcon *); diff --git a/drivers/gpu/drm/panel/panel-innolux-p079zca.c b/drivers/gpu/drm/panel/panel-innolux-p079zca.c index aea31622539146ebbf9bfbcb9f08a3c65b92e633..f194b62e290cae561b2c2b188701481f6aa4ee73 100644 --- a/drivers/gpu/drm/panel/panel-innolux-p079zca.c +++ b/drivers/gpu/drm/panel/panel-innolux-p079zca.c @@ -484,6 +484,7 @@ static void innolux_panel_del(struct innolux_panel *innolux) static int innolux_panel_probe(struct mipi_dsi_device *dsi) { const struct panel_desc *desc; + struct innolux_panel *innolux; int err; desc = of_device_get_match_data(&dsi->dev); @@ -495,7 +496,14 @@ static int innolux_panel_probe(struct mipi_dsi_device *dsi) if (err < 0) return err; - return mipi_dsi_attach(dsi); + err = mipi_dsi_attach(dsi); + if (err < 0) { + innolux = mipi_dsi_get_drvdata(dsi); + innolux_panel_del(innolux); + return err; + } + + return 0; } static int innolux_panel_remove(struct mipi_dsi_device *dsi) diff --git a/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c b/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c index 86e4213e8bb13f6e834c91bb755d599143f3adc4..daccb1fd5fdad82dcdfd8caacfe89e43a5bb5746 100644 --- a/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c +++ b/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c @@ -406,7 +406,13 @@ static int kingdisplay_panel_probe(struct mipi_dsi_device *dsi) if (err < 0) return err; - return mipi_dsi_attach(dsi); + err = mipi_dsi_attach(dsi); + if (err < 0) { + kingdisplay_panel_del(kingdisplay); + return err; + } + + return 0; } static int kingdisplay_panel_remove(struct mipi_dsi_device *dsi) diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c index 204674fccd6465fed6c03166c2008efbefc011ee..959dcbd8a29c175b3b697e23b784779a365f04ef 100644 --- a/drivers/gpu/drm/panel/panel-simple.c +++ b/drivers/gpu/drm/panel/panel-simple.c @@ -557,6 +557,7 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc) err = panel_dpi_probe(dev, panel); if (err) goto free_ddc; + desc = panel->desc; } else { if (!of_get_display_timing(dev->of_node, "panel-timing", &dt)) panel_simple_parse_panel_timing_node(dev, panel, &dt); @@ -2131,7 +2132,7 @@ static const struct display_timing innolux_g070y2_l01_timing = { static const struct panel_desc innolux_g070y2_l01 = { .timings = &innolux_g070y2_l01_timing, .num_timings = 1, - .bpc = 6, + .bpc = 8, .size = { .width = 152, .height = 91, diff --git a/drivers/gpu/drm/panfrost/panfrost_gpu.c b/drivers/gpu/drm/panfrost/panfrost_gpu.c index 2aae636f1cf5cbe10f0f51724a1062c19c6ab29b..107ad2d764ec0de1fef6911ec691f9d457eb5f1b 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gpu.c +++ b/drivers/gpu/drm/panfrost/panfrost_gpu.c @@ -359,8 +359,11 @@ int panfrost_gpu_init(struct panfrost_device *pfdev) panfrost_gpu_init_features(pfdev); - dma_set_mask_and_coherent(pfdev->dev, + err = dma_set_mask_and_coherent(pfdev->dev, DMA_BIT_MASK(FIELD_GET(0xff00, pfdev->features.mmu_features))); + if (err) + return err; + dma_set_max_seg_size(pfdev->dev, UINT_MAX); irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "gpu"); diff --git a/drivers/gpu/drm/pl111/pl111_drv.c b/drivers/gpu/drm/pl111/pl111_drv.c index 46b0d1c4a16c65547092e7c60a49d3550f2cce5c..d5e8e3a8bff3ec198972f6cc6261b5322de20034 100644 --- a/drivers/gpu/drm/pl111/pl111_drv.c +++ b/drivers/gpu/drm/pl111/pl111_drv.c @@ -324,7 +324,7 @@ static int pl111_amba_probe(struct amba_device *amba_dev, return ret; } -static int pl111_amba_remove(struct amba_device *amba_dev) +static void pl111_amba_remove(struct amba_device *amba_dev) { struct device *dev = &amba_dev->dev; struct drm_device *drm = amba_get_drvdata(amba_dev); @@ -335,8 +335,6 @@ static int pl111_amba_remove(struct amba_device *amba_dev) drm_panel_bridge_remove(priv->bridge); drm_dev_put(drm); of_reserved_mem_device_release(dev); - - return 0; } /* diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c index cc5ee1b3af84f27462ddcaf64b33d20ceab3c96a..12aa7877a625a389a418cd3e8f3f9835595bc86e 100644 --- a/drivers/gpu/drm/radeon/atombios_encoders.c +++ b/drivers/gpu/drm/radeon/atombios_encoders.c @@ -197,7 +197,8 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder, * so don't register a backlight device */ if ((rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) && - (rdev->pdev->device == 0x6741)) + (rdev->pdev->device == 0x6741) && + !dmi_match(DMI_PRODUCT_NAME, "iMac12,1")) return; if (!radeon_encoder->enc_priv) diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 8c0a572940e826aa05d49645f0a28b14cf8e285a..32070e94f6c4941120a0a31a7a1f4691278a7376 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -634,6 +634,8 @@ void radeon_driver_lastclose_kms(struct drm_device *dev) int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) { struct radeon_device *rdev = dev->dev_private; + struct radeon_fpriv *fpriv; + struct radeon_vm *vm; int r; file_priv->driver_priv = NULL; @@ -646,48 +648,52 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) /* new gpu have virtual address space support */ if (rdev->family >= CHIP_CAYMAN) { - struct radeon_fpriv *fpriv; - struct radeon_vm *vm; fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL); if (unlikely(!fpriv)) { r = -ENOMEM; - goto out_suspend; + goto err_suspend; } if (rdev->accel_working) { vm = &fpriv->vm; r = radeon_vm_init(rdev, vm); - if (r) { - kfree(fpriv); - goto out_suspend; - } + if (r) + goto err_fpriv; r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); - if (r) { - radeon_vm_fini(rdev, vm); - kfree(fpriv); - goto out_suspend; - } + if (r) + goto err_vm_fini; /* map the ib pool buffer read only into * virtual address space */ vm->ib_bo_va = radeon_vm_bo_add(rdev, vm, rdev->ring_tmp_bo.bo); + if (!vm->ib_bo_va) { + r = -ENOMEM; + goto err_vm_fini; + } + r = radeon_vm_bo_set_addr(rdev, vm->ib_bo_va, RADEON_VA_IB_OFFSET, RADEON_VM_PAGE_READABLE | RADEON_VM_PAGE_SNOOPED); - if (r) { - radeon_vm_fini(rdev, vm); - kfree(fpriv); - goto out_suspend; - } + if (r) + goto err_vm_fini; } file_priv->driver_priv = fpriv; } -out_suspend: + pm_runtime_mark_last_busy(dev->dev); + pm_runtime_put_autosuspend(dev->dev); + return 0; + +err_vm_fini: + radeon_vm_fini(rdev, vm); +err_fpriv: + kfree(fpriv); + +err_suspend: pm_runtime_mark_last_busy(dev->dev); pm_runtime_put_autosuspend(dev->dev); return r; diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c index 1f4e3396d097cd9d6c01408754053f41359350e4..a42ea2b76985264eba78b21aa0f39418eefab1de 100644 --- a/drivers/gpu/drm/radeon/radeon_uvd.c +++ b/drivers/gpu/drm/radeon/radeon_uvd.c @@ -275,6 +275,11 @@ int radeon_uvd_suspend(struct radeon_device *rdev) } } +#if IS_ENABLED(CONFIG_SW64) + /* Finish executing delayed work */ + flush_delayed_work(&rdev->uvd.idle_work); +#endif + return 0; } diff --git a/drivers/gpu/drm/radeon/radeon_vce.c b/drivers/gpu/drm/radeon/radeon_vce.c index 68cc5a347d3bfe2ef6015ac9cbd2d259ced0b21d..b9680d38d9242322b3290234560f855ac8d9aec8 100644 --- a/drivers/gpu/drm/radeon/radeon_vce.c +++ b/drivers/gpu/drm/radeon/radeon_vce.c @@ -240,7 +240,7 @@ int radeon_vce_resume(struct radeon_device *rdev) } #ifdef __sw_64__ - _memset_c_io(cpu_addr, 0, radeon_bo_size(rdev->vce.vcpu_bo)); + memset_io(cpu_addr, 0, radeon_bo_size(rdev->vce.vcpu_bo)); #else memset(cpu_addr, 0, radeon_bo_size(rdev->vce.vcpu_bo)); #endif diff --git a/drivers/gpu/drm/radeon/vce_v1_0.c b/drivers/gpu/drm/radeon/vce_v1_0.c index bd75bbcf5bf6368dcb3623aba159fd67f0eb2004..fbd8a5d9a691f3795467178a79d012ff1af19599 100644 --- a/drivers/gpu/drm/radeon/vce_v1_0.c +++ b/drivers/gpu/drm/radeon/vce_v1_0.c @@ -193,8 +193,13 @@ int vce_v1_0_load_fw(struct radeon_device *rdev, uint32_t *data) data[3] = sign->val[i].nonce[3]; data[4] = cpu_to_le32(le32_to_cpu(sign->len) + 64); +#if IS_ENABLED(CONFIG_SW64) + memset_io(&data[5], 0, 44); + memcpy_toio(&data[16], &sign[1], rdev->vce_fw->size - sizeof(*sign)); +#else memset(&data[5], 0, 44); memcpy(&data[16], &sign[1], rdev->vce_fw->size - sizeof(*sign)); +#endif data += (le32_to_cpu(sign->len) + 64) / 4; data[0] = sign->val[i].sigval[0]; diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c index 1b9738e44909d45a3942671a8872a8daa846adb9..065604c5837de3ab4870c862570cd9b8b85a702a 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c @@ -215,6 +215,7 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc) const struct drm_display_mode *mode = &rcrtc->crtc.state->adjusted_mode; struct rcar_du_device *rcdu = rcrtc->dev; unsigned long mode_clock = mode->clock * 1000; + unsigned int hdse_offset; u32 dsmr; u32 escr; @@ -298,10 +299,15 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc) | DSMR_DIPM_DISP | DSMR_CSPM; rcar_du_crtc_write(rcrtc, DSMR, dsmr); + hdse_offset = 19; + if (rcrtc->group->cmms_mask & BIT(rcrtc->index % 2)) + hdse_offset += 25; + /* Display timings */ - rcar_du_crtc_write(rcrtc, HDSR, mode->htotal - mode->hsync_start - 19); + rcar_du_crtc_write(rcrtc, HDSR, mode->htotal - mode->hsync_start - + hdse_offset); rcar_du_crtc_write(rcrtc, HDER, mode->htotal - mode->hsync_start + - mode->hdisplay - 19); + mode->hdisplay - hdse_offset); rcar_du_crtc_write(rcrtc, HSWR, mode->hsync_end - mode->hsync_start - 1); rcar_du_crtc_write(rcrtc, HCR, mode->htotal - 1); @@ -831,6 +837,7 @@ rcar_du_crtc_mode_valid(struct drm_crtc *crtc, struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc); struct rcar_du_device *rcdu = rcrtc->dev; bool interlaced = mode->flags & DRM_MODE_FLAG_INTERLACE; + unsigned int min_sync_porch; unsigned int vbp; if (interlaced && !rcar_du_has(rcdu, RCAR_DU_FEATURE_INTERLACED)) @@ -838,9 +845,14 @@ rcar_du_crtc_mode_valid(struct drm_crtc *crtc, /* * The hardware requires a minimum combined horizontal sync and back - * porch of 20 pixels and a minimum vertical back porch of 3 lines. + * porch of 20 pixels (when CMM isn't used) or 45 pixels (when CMM is + * used), and a minimum vertical back porch of 3 lines. */ - if (mode->htotal - mode->hsync_start < 20) + min_sync_porch = 20; + if (rcrtc->group->cmms_mask & BIT(rcrtc->index % 2)) + min_sync_porch += 25; + + if (mode->htotal - mode->hsync_start < min_sync_porch) return MODE_HBLANK_NARROW; vbp = (mode->vtotal - mode->vsync_end) / (interlaced ? 2 : 1); diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c index d0c9610ad22025140f9e15c84a54e681eccc73ac..b0fb3c3cba5962a151263af9365677838737d23a 100644 --- a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c +++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c @@ -243,6 +243,8 @@ struct dw_mipi_dsi_rockchip { struct dw_mipi_dsi *dmd; const struct rockchip_dw_dsi_chip_data *cdata; struct dw_mipi_dsi_plat_data pdata; + + bool dsi_bound; }; struct dphy_pll_parameter_map { @@ -753,10 +755,6 @@ static void dw_mipi_dsi_encoder_enable(struct drm_encoder *encoder) if (mux < 0) return; - pm_runtime_get_sync(dsi->dev); - if (dsi->slave) - pm_runtime_get_sync(dsi->slave->dev); - /* * For the RK3399, the clk of grf must be enabled before writing grf * register. And for RK3288 or other soc, this grf_clk must be NULL, @@ -775,20 +773,10 @@ static void dw_mipi_dsi_encoder_enable(struct drm_encoder *encoder) clk_disable_unprepare(dsi->grf_clk); } -static void dw_mipi_dsi_encoder_disable(struct drm_encoder *encoder) -{ - struct dw_mipi_dsi_rockchip *dsi = to_dsi(encoder); - - if (dsi->slave) - pm_runtime_put(dsi->slave->dev); - pm_runtime_put(dsi->dev); -} - static const struct drm_encoder_helper_funcs dw_mipi_dsi_encoder_helper_funcs = { .atomic_check = dw_mipi_dsi_encoder_atomic_check, .enable = dw_mipi_dsi_encoder_enable, - .disable = dw_mipi_dsi_encoder_disable, }; static int rockchip_dsi_drm_create_encoder(struct dw_mipi_dsi_rockchip *dsi, @@ -918,10 +906,14 @@ static int dw_mipi_dsi_rockchip_bind(struct device *dev, put_device(second); } + pm_runtime_get_sync(dsi->dev); + if (dsi->slave) + pm_runtime_get_sync(dsi->slave->dev); + ret = clk_prepare_enable(dsi->pllref_clk); if (ret) { DRM_DEV_ERROR(dev, "Failed to enable pllref_clk: %d\n", ret); - return ret; + goto out_pm_runtime; } /* @@ -933,7 +925,7 @@ static int dw_mipi_dsi_rockchip_bind(struct device *dev, ret = clk_prepare_enable(dsi->grf_clk); if (ret) { DRM_DEV_ERROR(dsi->dev, "Failed to enable grf_clk: %d\n", ret); - return ret; + goto out_pll_clk; } dw_mipi_dsi_rockchip_config(dsi); @@ -945,16 +937,27 @@ static int dw_mipi_dsi_rockchip_bind(struct device *dev, ret = rockchip_dsi_drm_create_encoder(dsi, drm_dev); if (ret) { DRM_DEV_ERROR(dev, "Failed to create drm encoder\n"); - return ret; + goto out_pll_clk; } ret = dw_mipi_dsi_bind(dsi->dmd, &dsi->encoder); if (ret) { DRM_DEV_ERROR(dev, "Failed to bind: %d\n", ret); - return ret; + goto out_pll_clk; } + dsi->dsi_bound = true; + return 0; + +out_pll_clk: + clk_disable_unprepare(dsi->pllref_clk); +out_pm_runtime: + pm_runtime_put(dsi->dev); + if (dsi->slave) + pm_runtime_put(dsi->slave->dev); + + return ret; } static void dw_mipi_dsi_rockchip_unbind(struct device *dev, @@ -966,9 +969,15 @@ static void dw_mipi_dsi_rockchip_unbind(struct device *dev, if (dsi->is_slave) return; + dsi->dsi_bound = false; + dw_mipi_dsi_unbind(dsi->dmd); clk_disable_unprepare(dsi->pllref_clk); + + pm_runtime_put(dsi->dev); + if (dsi->slave) + pm_runtime_put(dsi->slave->dev); } static const struct component_ops dw_mipi_dsi_rockchip_ops = { @@ -1026,6 +1035,36 @@ static const struct dw_mipi_dsi_host_ops dw_mipi_dsi_rockchip_host_ops = { .detach = dw_mipi_dsi_rockchip_host_detach, }; +static int __maybe_unused dw_mipi_dsi_rockchip_resume(struct device *dev) +{ + struct dw_mipi_dsi_rockchip *dsi = dev_get_drvdata(dev); + int ret; + + /* + * Re-configure DSI state, if we were previously initialized. We need + * to do this before rockchip_drm_drv tries to re-enable() any panels. + */ + if (dsi->dsi_bound) { + ret = clk_prepare_enable(dsi->grf_clk); + if (ret) { + DRM_DEV_ERROR(dsi->dev, "Failed to enable grf_clk: %d\n", ret); + return ret; + } + + dw_mipi_dsi_rockchip_config(dsi); + if (dsi->slave) + dw_mipi_dsi_rockchip_config(dsi->slave); + + clk_disable_unprepare(dsi->grf_clk); + } + + return 0; +} + +static const struct dev_pm_ops dw_mipi_dsi_rockchip_pm_ops = { + SET_LATE_SYSTEM_SLEEP_PM_OPS(NULL, dw_mipi_dsi_rockchip_resume) +}; + static int dw_mipi_dsi_rockchip_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -1126,14 +1165,10 @@ static int dw_mipi_dsi_rockchip_probe(struct platform_device *pdev) if (ret != -EPROBE_DEFER) DRM_DEV_ERROR(dev, "Failed to probe dw_mipi_dsi: %d\n", ret); - goto err_clkdisable; + return ret; } return 0; - -err_clkdisable: - clk_disable_unprepare(dsi->pllref_clk); - return ret; } static int dw_mipi_dsi_rockchip_remove(struct platform_device *pdev) @@ -1249,6 +1284,7 @@ struct platform_driver dw_mipi_dsi_rockchip_driver = { .remove = dw_mipi_dsi_rockchip_remove, .driver = { .of_match_table = dw_mipi_dsi_rockchip_dt_ids, + .pm = &dw_mipi_dsi_rockchip_pm_ops, .name = "dw-mipi-dsi-rockchip", }, }; diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c index 23de359a1dec60116f29a22f045f663b2b7ec707..515e6f187dc777aa6abcdab46a45733c058d8b9a 100644 --- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c +++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c @@ -529,13 +529,6 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master, return ret; } - ret = clk_prepare_enable(hdmi->vpll_clk); - if (ret) { - DRM_DEV_ERROR(hdmi->dev, "Failed to enable HDMI vpll: %d\n", - ret); - return ret; - } - hdmi->phy = devm_phy_optional_get(dev, "hdmi"); if (IS_ERR(hdmi->phy)) { ret = PTR_ERR(hdmi->phy); @@ -544,6 +537,13 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master, return ret; } + ret = clk_prepare_enable(hdmi->vpll_clk); + if (ret) { + DRM_DEV_ERROR(hdmi->dev, "Failed to enable HDMI vpll: %d\n", + ret); + return ret; + } + drm_encoder_helper_add(encoder, &dw_hdmi_rockchip_encoder_helper_funcs); drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS); diff --git a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c index a6fe03c3748aad414911cddccc6062c712940432..39e1e1ebea92841dd0ca037da8d9975d50a1ab0d 100644 --- a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c +++ b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c @@ -873,6 +873,7 @@ static const struct vop_win_phy rk3399_win01_data = { .enable = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 0), .format = VOP_REG(RK3288_WIN0_CTRL0, 0x7, 1), .rb_swap = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 12), + .x_mir_en = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 21), .y_mir_en = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 22), .act_info = VOP_REG(RK3288_WIN0_ACT_INFO, 0x1fff1fff, 0), .dsp_info = VOP_REG(RK3288_WIN0_DSP_INFO, 0x0fff0fff, 0), @@ -883,6 +884,7 @@ static const struct vop_win_phy rk3399_win01_data = { .uv_vir = VOP_REG(RK3288_WIN0_VIR, 0x3fff, 16), .src_alpha_ctl = VOP_REG(RK3288_WIN0_SRC_ALPHA_CTRL, 0xff, 0), .dst_alpha_ctl = VOP_REG(RK3288_WIN0_DST_ALPHA_CTRL, 0xff, 0), + .channel = VOP_REG(RK3288_WIN0_CTRL2, 0xff, 0), }; /* @@ -893,11 +895,11 @@ static const struct vop_win_phy rk3399_win01_data = { static const struct vop_win_data rk3399_vop_win_data[] = { { .base = 0x00, .phy = &rk3399_win01_data, .type = DRM_PLANE_TYPE_PRIMARY }, - { .base = 0x40, .phy = &rk3288_win01_data, + { .base = 0x40, .phy = &rk3368_win01_data, .type = DRM_PLANE_TYPE_OVERLAY }, - { .base = 0x00, .phy = &rk3288_win23_data, + { .base = 0x00, .phy = &rk3368_win23_data, .type = DRM_PLANE_TYPE_OVERLAY }, - { .base = 0x50, .phy = &rk3288_win23_data, + { .base = 0x50, .phy = &rk3368_win23_data, .type = DRM_PLANE_TYPE_CURSOR }, }; diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.h b/drivers/gpu/drm/sun4i/sun8i_mixer.h index 7576b523fdbb1409c720ca5874425a71be9cc411..b0178c045267c6f6e584d46a076e5780c8144bb8 100644 --- a/drivers/gpu/drm/sun4i/sun8i_mixer.h +++ b/drivers/gpu/drm/sun4i/sun8i_mixer.h @@ -113,10 +113,10 @@ /* format 13 is semi-planar YUV411 VUVU */ #define SUN8I_MIXER_FBFMT_YUV411 14 /* format 15 doesn't exist */ -/* format 16 is P010 YVU */ -#define SUN8I_MIXER_FBFMT_P010_YUV 17 -/* format 18 is P210 YVU */ -#define SUN8I_MIXER_FBFMT_P210_YUV 19 +#define SUN8I_MIXER_FBFMT_P010_YUV 16 +/* format 17 is P010 YVU */ +#define SUN8I_MIXER_FBFMT_P210_YUV 18 +/* format 19 is P210 YVU */ /* format 20 is packed YVU444 10-bit */ /* format 21 is packed YUV444 10-bit */ diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c index f46d377f0c3046309c2bf1d12415f593f6aed467..de1333dc0d8670aa30dc9867d5c13a41691ea4a3 100644 --- a/drivers/gpu/drm/tegra/dsi.c +++ b/drivers/gpu/drm/tegra/dsi.c @@ -1538,8 +1538,10 @@ static int tegra_dsi_ganged_probe(struct tegra_dsi *dsi) dsi->slave = platform_get_drvdata(gangster); of_node_put(np); - if (!dsi->slave) + if (!dsi->slave) { + put_device(&gangster->dev); return -EPROBE_DEFER; + } dsi->slave->master = dsi; } diff --git a/drivers/gpu/drm/tegra/vic.c b/drivers/gpu/drm/tegra/vic.c index b77f726303d89d186427e6d6285ab3563e367a20..ec0e4d8f0aade7e9f8f2b89ba687cdda1886f92f 100644 --- a/drivers/gpu/drm/tegra/vic.c +++ b/drivers/gpu/drm/tegra/vic.c @@ -5,6 +5,7 @@ #include #include +#include #include #include #include @@ -265,10 +266,8 @@ static int vic_load_firmware(struct vic *vic) if (!client->group) { virt = dma_alloc_coherent(vic->dev, size, &iova, GFP_KERNEL); - - err = dma_mapping_error(vic->dev, iova); - if (err < 0) - return err; + if (!virt) + return -ENOMEM; } else { virt = tegra_drm_alloc(tegra, size, &iova); } diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index eb4b7df02ca034ab55c963e7c5d00430bd302b19..f673292eec9dbb6b5c050bfca006cbc4c6a151cc 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -789,6 +789,8 @@ int ttm_mem_evict_first(struct ttm_bo_device *bdev, ret = ttm_bo_evict(bo, ctx); if (locked) ttm_bo_unreserve(bo); + else + ttm_bo_move_to_lru_tail_unlocked(bo); ttm_bo_put(bo); return ret; diff --git a/drivers/gpu/drm/vboxvideo/vbox_main.c b/drivers/gpu/drm/vboxvideo/vbox_main.c index d68d9bad76747015c446b3f5908014ca81dd3b70..c5ea880d17b29bc227d523e59a7bb07502ea4751 100644 --- a/drivers/gpu/drm/vboxvideo/vbox_main.c +++ b/drivers/gpu/drm/vboxvideo/vbox_main.c @@ -123,8 +123,8 @@ int vbox_hw_init(struct vbox_private *vbox) /* Create guest-heap mem-pool use 2^4 = 16 byte chunks */ vbox->guest_pool = devm_gen_pool_create(vbox->ddev.dev, 4, -1, "vboxvideo-accel"); - if (!vbox->guest_pool) - return -ENOMEM; + if (IS_ERR(vbox->guest_pool)) + return PTR_ERR(vbox->guest_pool); ret = gen_pool_add_virt(vbox->guest_pool, (unsigned long)vbox->guest_heap, diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c index ee293f061f0a8706a2997bece79927c6809733fd..a308f2d05d17386c9b9e48351c5439bc01c99def 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi.c +++ b/drivers/gpu/drm/vc4/vc4_hdmi.c @@ -79,6 +79,7 @@ # define VC4_HD_M_SW_RST BIT(2) # define VC4_HD_M_ENABLE BIT(0) +#define HSM_MIN_CLOCK_FREQ 120000000 #define CEC_CLOCK_FREQ 40000 #define VC4_HSM_MID_CLOCK 149985000 @@ -799,6 +800,7 @@ static int vc4_hdmi_encoder_atomic_check(struct drm_encoder *encoder, unsigned long long tmds_rate; if (vc4_hdmi->variant->unsupported_odd_h_timings && + !(mode->flags & DRM_MODE_FLAG_DBLCLK) && ((mode->hdisplay % 2) || (mode->hsync_start % 2) || (mode->hsync_end % 2) || (mode->htotal % 2))) return -EINVAL; @@ -833,6 +835,7 @@ vc4_hdmi_encoder_mode_valid(struct drm_encoder *encoder, struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder); if (vc4_hdmi->variant->unsupported_odd_h_timings && + !(mode->flags & DRM_MODE_FLAG_DBLCLK) && ((mode->hdisplay % 2) || (mode->hsync_start % 2) || (mode->hsync_end % 2) || (mode->htotal % 2))) return MODE_H_ILLEGAL; @@ -1806,6 +1809,19 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data) vc4_hdmi->disable_wifi_frequencies = of_property_read_bool(dev->of_node, "wifi-2.4ghz-coexistence"); + /* + * If we boot without any cable connected to the HDMI connector, + * the firmware will skip the HSM initialization and leave it + * with a rate of 0, resulting in a bus lockup when we're + * accessing the registers even if it's enabled. + * + * Let's put a sensible default at runtime_resume so that we + * don't end up in this situation. + */ + ret = clk_set_min_rate(vc4_hdmi->hsm_clock, HSM_MIN_CLOCK_FREQ); + if (ret) + goto err_put_ddc; + if (vc4_hdmi->variant->reset) vc4_hdmi->variant->reset(vc4_hdmi); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 1523b51a7284c03928195c67063db5ab96762f8d..ad208a5f4ebe52e1f14c982bd0c75a2aa3e5fef0 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -1088,15 +1088,14 @@ extern int vmw_execbuf_fence_commands(struct drm_file *file_priv, struct vmw_private *dev_priv, struct vmw_fence_obj **p_fence, uint32_t *p_handle); -extern void vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv, +extern int vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv, struct vmw_fpriv *vmw_fp, int ret, struct drm_vmw_fence_rep __user *user_fence_rep, struct vmw_fence_obj *fence, uint32_t fence_handle, - int32_t out_fence_fd, - struct sync_file *sync_file); + int32_t out_fence_fd); bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd); /** diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index 83e1b54eb8647d0a9cc3e0d8493172ed16d12349..739cbc77d8867cdb98fb4555467096c8eca4c6ea 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -3816,17 +3816,17 @@ int vmw_execbuf_fence_commands(struct drm_file *file_priv, * Also if copying fails, user-space will be unable to signal the fence object * so we wait for it immediately, and then unreference the user-space reference. */ -void +int vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv, struct vmw_fpriv *vmw_fp, int ret, struct drm_vmw_fence_rep __user *user_fence_rep, struct vmw_fence_obj *fence, uint32_t fence_handle, - int32_t out_fence_fd, struct sync_file *sync_file) + int32_t out_fence_fd) { struct drm_vmw_fence_rep fence_rep; if (user_fence_rep == NULL) - return; + return 0; memset(&fence_rep, 0, sizeof(fence_rep)); @@ -3854,20 +3854,14 @@ vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv, * handle. */ if (unlikely(ret != 0) && (fence_rep.error == 0)) { - if (sync_file) - fput(sync_file->file); - - if (fence_rep.fd != -1) { - put_unused_fd(fence_rep.fd); - fence_rep.fd = -1; - } - ttm_ref_object_base_unref(vmw_fp->tfile, fence_handle, TTM_REF_USAGE); VMW_DEBUG_USER("Fence copy error. Syncing.\n"); (void) vmw_fence_obj_wait(fence, false, false, VMW_FENCE_WAIT_TIMEOUT); } + + return ret ? -EFAULT : 0; } /** @@ -4209,16 +4203,23 @@ int vmw_execbuf_process(struct drm_file *file_priv, (void) vmw_fence_obj_wait(fence, false, false, VMW_FENCE_WAIT_TIMEOUT); + } + } + + ret = vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret, + user_fence_rep, fence, handle, out_fence_fd); + + if (sync_file) { + if (ret) { + /* usercopy of fence failed, put the file object */ + fput(sync_file->file); + put_unused_fd(out_fence_fd); } else { /* Link the fence with the FD created earlier */ fd_install(out_fence_fd, sync_file->file); } } - vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret, - user_fence_rep, fence, handle, out_fence_fd, - sync_file); - /* Don't unreference when handing fence out */ if (unlikely(out_fence != NULL)) { *out_fence = fence; @@ -4236,7 +4237,7 @@ int vmw_execbuf_process(struct drm_file *file_priv, */ vmw_validation_unref_lists(&val_ctx); - return 0; + return ret; out_unlock_binding: mutex_unlock(&dev_priv->binding_mutex); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c index 0f8d293971576a4f6d2faf78c6e24e8825457a58..8bc41ec97d71a29fb9bf804ca8fc41671e716e82 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c @@ -1171,7 +1171,7 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data, } vmw_execbuf_copy_fence_user(dev_priv, vmw_fp, 0, user_fence_rep, fence, - handle, -1, NULL); + handle, -1); vmw_fence_obj_unreference(&fence); return 0; out_no_create: diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 312ed0881a99b63c106abe60ab5bf2daa43a8aa8..e58112997c88136e4ecce5c238e99d366f2f5b6c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -2479,7 +2479,7 @@ void vmw_kms_helper_validation_finish(struct vmw_private *dev_priv, if (file_priv) vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret, user_fence_rep, fence, - handle, -1, NULL); + handle, -1); if (out_fence) *out_fence = fence; else diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c index d0ebb70e2fdd6e56750db8a0772b8491f8b7bf54..8659558b518d6afc3100c19631363c42e6fe1b43 100644 --- a/drivers/gpu/host1x/dev.c +++ b/drivers/gpu/host1x/dev.c @@ -18,6 +18,10 @@ #include #undef CREATE_TRACE_POINTS +#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU) +#include +#endif + #include "bus.h" #include "channel.h" #include "debug.h" @@ -232,6 +236,17 @@ static struct iommu_domain *host1x_iommu_attach(struct host1x *host) struct iommu_domain *domain = iommu_get_domain_for_dev(host->dev); int err; +#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU) + if (host->dev->archdata.mapping) { + struct dma_iommu_mapping *mapping = + to_dma_iommu_mapping(host->dev); + arm_iommu_detach_device(host->dev); + arm_iommu_release_mapping(mapping); + + domain = iommu_get_domain_for_dev(host->dev); + } +#endif + /* * We may not always want to enable IOMMU support (for example if the * host1x firewall is already enabled and we don't support addressing @@ -505,6 +520,7 @@ static int host1x_remove(struct platform_device *pdev) host1x_syncpt_deinit(host); reset_control_assert(host->rst); clk_disable_unprepare(host->clk); + host1x_channel_list_free(&host->channel_list); host1x_iommu_exit(host); return 0; diff --git a/drivers/gpu/ipu-v3/ipu-di.c b/drivers/gpu/ipu-v3/ipu-di.c index b4a31d506fccf0cf733e91ea5c39ed39f959af45..74eca68891add5b371486d2b6b8d896411507071 100644 --- a/drivers/gpu/ipu-v3/ipu-di.c +++ b/drivers/gpu/ipu-v3/ipu-di.c @@ -451,8 +451,9 @@ static void ipu_di_config_clock(struct ipu_di *di, error = rate / (sig->mode.pixelclock / 1000); - dev_dbg(di->ipu->dev, " IPU clock can give %lu with divider %u, error %d.%u%%\n", - rate, div, (signed)(error - 1000) / 10, error % 10); + dev_dbg(di->ipu->dev, " IPU clock can give %lu with divider %u, error %c%d.%d%%\n", + rate, div, error < 1000 ? '-' : '+', + abs(error - 1000) / 10, abs(error - 1000) % 10); /* Allow a 1% error */ if (error < 1010 && error >= 990) { diff --git a/drivers/greybus/svc.c b/drivers/greybus/svc.c index ce7740ef449babaf0f7bfd1afd2021cb6c7a7aec..51d0875a34800aec4ea11347060e4d1956443aba 100644 --- a/drivers/greybus/svc.c +++ b/drivers/greybus/svc.c @@ -866,8 +866,14 @@ static int gb_svc_hello(struct gb_operation *op) gb_svc_debugfs_init(svc); - return gb_svc_queue_deferred_request(op); + ret = gb_svc_queue_deferred_request(op); + if (ret) + goto err_remove_debugfs; + + return 0; +err_remove_debugfs: + gb_svc_debugfs_exit(svc); err_unregister_device: gb_svc_watchdog_destroy(svc); device_del(&svc->dev); diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c index 5c1d33cda863b2be27caaf278ee835d5b0d244e2..e5d2e7e9541b8296b421c2933ec299a46bc99fe7 100644 --- a/drivers/hid/hid-apple.c +++ b/drivers/hid/hid-apple.c @@ -415,7 +415,7 @@ static int apple_input_configured(struct hid_device *hdev, if ((asc->quirks & APPLE_HAS_FN) && !asc->fn_found) { hid_info(hdev, "Fn key not found (Apple Wireless Keyboard clone?), disabling Fn key handling\n"); - asc->quirks = 0; + asc->quirks &= ~APPLE_HAS_FN; } return 0; diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c index 982737827b871278239c2372984e8a0e29ca0cd0..f4e2e6937758952ed499e6adbaf89ed147221945 100644 --- a/drivers/hid/hid-debug.c +++ b/drivers/hid/hid-debug.c @@ -823,7 +823,9 @@ static const char *keys[KEY_MAX + 1] = { [KEY_F22] = "F22", [KEY_F23] = "F23", [KEY_F24] = "F24", [KEY_PLAYCD] = "PlayCD", [KEY_PAUSECD] = "PauseCD", [KEY_PROG3] = "Prog3", - [KEY_PROG4] = "Prog4", [KEY_SUSPEND] = "Suspend", + [KEY_PROG4] = "Prog4", + [KEY_ALL_APPLICATIONS] = "AllApplications", + [KEY_SUSPEND] = "Suspend", [KEY_CLOSE] = "Close", [KEY_PLAY] = "Play", [KEY_FASTFORWARD] = "FastForward", [KEY_BASSBOOST] = "BassBoost", [KEY_PRINT] = "Print", [KEY_HP] = "HP", @@ -930,6 +932,7 @@ static const char *keys[KEY_MAX + 1] = { [KEY_SCREENSAVER] = "ScreenSaver", [KEY_VOICECOMMAND] = "VoiceCommand", [KEY_EMOJI_PICKER] = "EmojiPicker", + [KEY_DICTATE] = "Dictate", [KEY_BRIGHTNESS_MIN] = "BrightnessMin", [KEY_BRIGHTNESS_MAX] = "BrightnessMax", [KEY_BRIGHTNESS_AUTO] = "BrightnessAuto", diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 370ec4402ebe3b01dd705a203029ab066dcc3dbd..d2e4f9f5507d53070a124d29e46554935d0effb4 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -1318,6 +1318,7 @@ #define USB_VENDOR_ID_UGTIZER 0x2179 #define USB_DEVICE_ID_UGTIZER_TABLET_GP0610 0x0053 #define USB_DEVICE_ID_UGTIZER_TABLET_GT5040 0x0077 +#define USB_DEVICE_ID_UGTIZER_TABLET_WP5540 0x0004 #define USB_VENDOR_ID_VIEWSONIC 0x0543 #define USB_DEVICE_ID_VIEWSONIC_PD1011 0xe621 diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c index 580d378342c41d7d99dcf60587ace66ba0807258..a17d1dda95703718087f59aa1ed349f9a1af6c4d 100644 --- a/drivers/hid/hid-input.c +++ b/drivers/hid/hid-input.c @@ -956,6 +956,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel case 0x0cd: map_key_clear(KEY_PLAYPAUSE); break; case 0x0cf: map_key_clear(KEY_VOICECOMMAND); break; + case 0x0d8: map_key_clear(KEY_DICTATE); break; case 0x0d9: map_key_clear(KEY_EMOJI_PICKER); break; case 0x0e0: map_abs_clear(ABS_VOLUME); break; @@ -1047,6 +1048,8 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel case 0x29d: map_key_clear(KEY_KBD_LAYOUT_NEXT); break; + case 0x2a2: map_key_clear(KEY_ALL_APPLICATIONS); break; + case 0x2c7: map_key_clear(KEY_KBDINPUTASSIST_PREV); break; case 0x2c8: map_key_clear(KEY_KBDINPUTASSIST_NEXT); break; case 0x2c9: map_key_clear(KEY_KBDINPUTASSIST_PREVGROUP); break; @@ -1288,6 +1291,12 @@ void hidinput_hid_event(struct hid_device *hid, struct hid_field *field, struct input = field->hidinput->input; + if (usage->type == EV_ABS && + (((*quirks & HID_QUIRK_X_INVERT) && usage->code == ABS_X) || + ((*quirks & HID_QUIRK_Y_INVERT) && usage->code == ABS_Y))) { + value = field->logical_maximum - value; + } + if (usage->hat_min < usage->hat_max || usage->hat_dir) { int hat_dir = usage->hat_dir; if (!hat_dir) diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c index a311b0a33eba7ff45501c396281daba4919a79b8..587259b3db97c5c547b049ea81ba5017252e4771 100644 --- a/drivers/hid/hid-logitech-dj.c +++ b/drivers/hid/hid-logitech-dj.c @@ -1000,6 +1000,7 @@ static void logi_hidpp_recv_queue_notif(struct hid_device *hdev, workitem.reports_supported |= STD_KEYBOARD; break; case 0x0f: + case 0x11: device_type = "eQUAD Lightspeed 1.2"; logi_hidpp_dev_conn_notif_equad(hdev, hidpp_report, &workitem); workitem.reports_supported |= STD_KEYBOARD; diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c index 84a30202e3dbe3f5d84912c5227e40563dd1f977..2ab71d717bb03935688b2282bb1de4e28343f7f8 100644 --- a/drivers/hid/hid-quirks.c +++ b/drivers/hid/hid-quirks.c @@ -187,6 +187,7 @@ static const struct hid_device_id hid_quirks[] = { { HID_USB_DEVICE(USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_KEYBOARD), HID_QUIRK_NOGET }, { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_KNA5), HID_QUIRK_MULTI_INPUT }, { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_TWA60), HID_QUIRK_MULTI_INPUT }, + { HID_USB_DEVICE(USB_VENDOR_ID_UGTIZER, USB_DEVICE_ID_UGTIZER_TABLET_WP5540), HID_QUIRK_MULTI_INPUT }, { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_10_6_INCH), HID_QUIRK_MULTI_INPUT }, { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_14_1_INCH), HID_QUIRK_MULTI_INPUT }, { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SIRIUS_BATTERY_FREE_TABLET), HID_QUIRK_MULTI_INPUT }, diff --git a/drivers/hid/hid-uclogic-params.c b/drivers/hid/hid-uclogic-params.c index dd05bed4ca53a9696409303d21e7494761b3dc57..38f9bbad81c1774508f6291e16b3159fe5e3844e 100644 --- a/drivers/hid/hid-uclogic-params.c +++ b/drivers/hid/hid-uclogic-params.c @@ -65,7 +65,7 @@ static int uclogic_params_get_str_desc(__u8 **pbuf, struct hid_device *hdev, __u8 idx, size_t len) { int rc; - struct usb_device *udev = hid_to_usb_dev(hdev); + struct usb_device *udev; __u8 *buf = NULL; /* Check arguments */ @@ -74,6 +74,8 @@ static int uclogic_params_get_str_desc(__u8 **pbuf, struct hid_device *hdev, goto cleanup; } + udev = hid_to_usb_dev(hdev); + buf = kmalloc(len, GFP_KERNEL); if (buf == NULL) { rc = -ENOMEM; @@ -449,7 +451,7 @@ static int uclogic_params_frame_init_v1_buttonpad( { int rc; bool found = false; - struct usb_device *usb_dev = hid_to_usb_dev(hdev); + struct usb_device *usb_dev; char *str_buf = NULL; const size_t str_len = 16; @@ -459,6 +461,8 @@ static int uclogic_params_frame_init_v1_buttonpad( goto cleanup; } + usb_dev = hid_to_usb_dev(hdev); + /* * Enable generic button mode */ @@ -705,9 +709,9 @@ static int uclogic_params_huion_init(struct uclogic_params *params, struct hid_device *hdev) { int rc; - struct usb_device *udev = hid_to_usb_dev(hdev); - struct usb_interface *iface = to_usb_interface(hdev->dev.parent); - __u8 bInterfaceNumber = iface->cur_altsetting->desc.bInterfaceNumber; + struct usb_device *udev; + struct usb_interface *iface; + __u8 bInterfaceNumber; bool found; /* The resulting parameters (noop) */ struct uclogic_params p = {0, }; @@ -721,6 +725,10 @@ static int uclogic_params_huion_init(struct uclogic_params *params, goto cleanup; } + udev = hid_to_usb_dev(hdev); + iface = to_usb_interface(hdev->dev.parent); + bInterfaceNumber = iface->cur_altsetting->desc.bInterfaceNumber; + /* If it's not a pen interface */ if (bInterfaceNumber != 0) { /* TODO: Consider marking the interface invalid */ @@ -832,10 +840,10 @@ int uclogic_params_init(struct uclogic_params *params, struct hid_device *hdev) { int rc; - struct usb_device *udev = hid_to_usb_dev(hdev); - __u8 bNumInterfaces = udev->config->desc.bNumInterfaces; - struct usb_interface *iface = to_usb_interface(hdev->dev.parent); - __u8 bInterfaceNumber = iface->cur_altsetting->desc.bInterfaceNumber; + struct usb_device *udev; + __u8 bNumInterfaces; + struct usb_interface *iface; + __u8 bInterfaceNumber; bool found; /* The resulting parameters (noop) */ struct uclogic_params p = {0, }; @@ -846,6 +854,11 @@ int uclogic_params_init(struct uclogic_params *params, goto cleanup; } + udev = hid_to_usb_dev(hdev); + bNumInterfaces = udev->config->desc.bNumInterfaces; + iface = to_usb_interface(hdev->dev.parent); + bInterfaceNumber = iface->cur_altsetting->desc.bInterfaceNumber; + /* * Set replacement report descriptor if the original matches the * specified size. Otherwise keep interface unchanged. diff --git a/drivers/hid/hid-vivaldi.c b/drivers/hid/hid-vivaldi.c index 72957a9f7117052fb7b7e9bdf0fd87d14018400e..d57ec17670379cf75b28dfd8bab713332fa30b4a 100644 --- a/drivers/hid/hid-vivaldi.c +++ b/drivers/hid/hid-vivaldi.c @@ -74,10 +74,11 @@ static void vivaldi_feature_mapping(struct hid_device *hdev, struct hid_usage *usage) { struct vivaldi_data *drvdata = hid_get_drvdata(hdev); + struct hid_report *report = field->report; int fn_key; int ret; u32 report_len; - u8 *buf; + u8 *report_data, *buf; if (field->logical != HID_USAGE_FN_ROW_PHYSMAP || (usage->hid & HID_USAGE_PAGE) != HID_UP_ORDINAL) @@ -89,12 +90,24 @@ static void vivaldi_feature_mapping(struct hid_device *hdev, if (fn_key > drvdata->max_function_row_key) drvdata->max_function_row_key = fn_key; - buf = hid_alloc_report_buf(field->report, GFP_KERNEL); - if (!buf) + report_data = buf = hid_alloc_report_buf(report, GFP_KERNEL); + if (!report_data) return; - report_len = hid_report_len(field->report); - ret = hid_hw_raw_request(hdev, field->report->id, buf, + report_len = hid_report_len(report); + if (!report->id) { + /* + * hid_hw_raw_request() will stuff report ID (which will be 0) + * into the first byte of the buffer even for unnumbered + * reports, so we need to account for this to avoid getting + * -EOVERFLOW in return. + * Note that hid_alloc_report_buf() adds 7 bytes to the size + * so we can safely say that we have space for an extra byte. + */ + report_len++; + } + + ret = hid_hw_raw_request(hdev, report->id, report_data, report_len, HID_FEATURE_REPORT, HID_REQ_GET_REPORT); if (ret < 0) { @@ -103,7 +116,16 @@ static void vivaldi_feature_mapping(struct hid_device *hdev, goto out; } - ret = hid_report_raw_event(hdev, HID_FEATURE_REPORT, buf, + if (!report->id) { + /* + * Undo the damage from hid_hw_raw_request() for unnumbered + * reports. + */ + report_data++; + report_len--; + } + + ret = hid_report_raw_event(hdev, HID_FEATURE_REPORT, report_data, report_len, 0); if (ret) { dev_warn(&hdev->dev, "failed to report feature %d\n", @@ -121,7 +143,7 @@ static void vivaldi_feature_mapping(struct hid_device *hdev, static int vivaldi_input_configured(struct hid_device *hdev, struct hid_input *hidinput) { - return sysfs_create_group(&hdev->dev.kobj, &input_attribute_group); + return devm_device_add_group(&hdev->dev, &input_attribute_group); } static const struct hid_device_id vivaldi_table[] = { diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c index 998aad8a9e60893d5b5b5830245ea311fc6068c4..14811d42a5a918bfdd2ba09c7dedaaf50317917a 100644 --- a/drivers/hid/i2c-hid/i2c-hid-core.c +++ b/drivers/hid/i2c-hid/i2c-hid-core.c @@ -620,6 +620,17 @@ static int i2c_hid_get_raw_report(struct hid_device *hid, if (report_type == HID_OUTPUT_REPORT) return -EINVAL; + /* + * In case of unnumbered reports the response from the device will + * not have the report ID that the upper layers expect, so we need + * to stash it the buffer ourselves and adjust the data size. + */ + if (!report_number) { + buf[0] = 0; + buf++; + count--; + } + /* +2 bytes to include the size of the reply in the query buffer */ ask_count = min(count + 2, (size_t)ihid->bufsize); @@ -641,6 +652,9 @@ static int i2c_hid_get_raw_report(struct hid_device *hid, count = min(count, ret_count - 2); memcpy(buf, ihid->rawbuf + 2, count); + if (!report_number) + count++; + return count; } @@ -657,17 +671,19 @@ static int i2c_hid_output_raw_report(struct hid_device *hid, __u8 *buf, mutex_lock(&ihid->reset_lock); - if (report_id) { - buf++; - count--; - } - + /* + * Note that both numbered and unnumbered reports passed here + * are supposed to have report ID stored in the 1st byte of the + * buffer, so we strip it off unconditionally before passing payload + * to i2c_hid_set_or_send_report which takes care of encoding + * everything properly. + */ ret = i2c_hid_set_or_send_report(client, report_type == HID_FEATURE_REPORT ? 0x03 : 0x02, - report_id, buf, count, use_data); + report_id, buf + 1, count - 1, use_data); - if (report_id && ret >= 0) - ret++; /* add report_id to the number of transfered bytes */ + if (ret >= 0) + ret++; /* add report_id to the number of transferred bytes */ mutex_unlock(&ihid->reset_lock); diff --git a/drivers/hid/intel-ish-hid/ishtp-fw-loader.c b/drivers/hid/intel-ish-hid/ishtp-fw-loader.c index 6cf59fd26ad784356923115c3c28054a344990b0..b6d6d119035ca0405a46c3b94d64f35ea5159be0 100644 --- a/drivers/hid/intel-ish-hid/ishtp-fw-loader.c +++ b/drivers/hid/intel-ish-hid/ishtp-fw-loader.c @@ -656,21 +656,12 @@ static int ish_fw_xfer_direct_dma(struct ishtp_cl_data *client_data, */ payload_max_size &= ~(L1_CACHE_BYTES - 1); - dma_buf = kmalloc(payload_max_size, GFP_KERNEL | GFP_DMA32); + dma_buf = dma_alloc_coherent(devc, payload_max_size, &dma_buf_phy, GFP_KERNEL); if (!dma_buf) { client_data->flag_retry = true; return -ENOMEM; } - dma_buf_phy = dma_map_single(devc, dma_buf, payload_max_size, - DMA_TO_DEVICE); - if (dma_mapping_error(devc, dma_buf_phy)) { - dev_err(cl_data_to_dev(client_data), "DMA map failed\n"); - client_data->flag_retry = true; - rv = -ENOMEM; - goto end_err_dma_buf_release; - } - ldr_xfer_dma_frag.fragment.hdr.command = LOADER_CMD_XFER_FRAGMENT; ldr_xfer_dma_frag.fragment.xfer_mode = LOADER_XFER_MODE_DIRECT_DMA; ldr_xfer_dma_frag.ddr_phys_addr = (u64)dma_buf_phy; @@ -690,14 +681,7 @@ static int ish_fw_xfer_direct_dma(struct ishtp_cl_data *client_data, ldr_xfer_dma_frag.fragment.size = fragment_size; memcpy(dma_buf, &fw->data[fragment_offset], fragment_size); - dma_sync_single_for_device(devc, dma_buf_phy, - payload_max_size, - DMA_TO_DEVICE); - - /* - * Flush cache here because the dma_sync_single_for_device() - * does not do for x86. - */ + /* Flush cache to be sure the data is in main memory. */ clflush_cache_range(dma_buf, payload_max_size); dev_dbg(cl_data_to_dev(client_data), @@ -720,15 +704,8 @@ static int ish_fw_xfer_direct_dma(struct ishtp_cl_data *client_data, fragment_offset += fragment_size; } - dma_unmap_single(devc, dma_buf_phy, payload_max_size, DMA_TO_DEVICE); - kfree(dma_buf); - return 0; - end_err_resp_buf_release: - /* Free ISH buffer if not done already, in error case */ - dma_unmap_single(devc, dma_buf_phy, payload_max_size, DMA_TO_DEVICE); -end_err_dma_buf_release: - kfree(dma_buf); + dma_free_coherent(devc, payload_max_size, dma_buf, dma_buf_phy); return rv; } diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c index 8fe3efcb832715c4a978625387dc0bb0ea5b00a2..fc06d8bb42e0fe94e75167fe360c600283f80a4d 100644 --- a/drivers/hid/uhid.c +++ b/drivers/hid/uhid.c @@ -28,11 +28,22 @@ struct uhid_device { struct mutex devlock; + + /* This flag tracks whether the HID device is usable for commands from + * userspace. The flag is already set before hid_add_device(), which + * runs in workqueue context, to allow hid_add_device() to communicate + * with userspace. + * However, if hid_add_device() fails, the flag is cleared without + * holding devlock. + * We guarantee that if @running changes from true to false while you're + * holding @devlock, it's still fine to access @hid. + */ bool running; __u8 *rd_data; uint rd_size; + /* When this is NULL, userspace may use UHID_CREATE/UHID_CREATE2. */ struct hid_device *hid; struct uhid_event input_buf; @@ -63,9 +74,18 @@ static void uhid_device_add_worker(struct work_struct *work) if (ret) { hid_err(uhid->hid, "Cannot register HID device: error %d\n", ret); - hid_destroy_device(uhid->hid); - uhid->hid = NULL; + /* We used to call hid_destroy_device() here, but that's really + * messy to get right because we have to coordinate with + * concurrent writes from userspace that might be in the middle + * of using uhid->hid. + * Just leave uhid->hid as-is for now, and clean it up when + * userspace tries to close or reinitialize the uhid instance. + * + * However, we do have to clear the ->running flag and do a + * wakeup to make sure userspace knows that the device is gone. + */ uhid->running = false; + wake_up_interruptible(&uhid->report_wait); } } @@ -474,7 +494,7 @@ static int uhid_dev_create2(struct uhid_device *uhid, void *rd_data; int ret; - if (uhid->running) + if (uhid->hid) return -EALREADY; rd_size = ev->u.create2.rd_size; @@ -556,7 +576,7 @@ static int uhid_dev_create(struct uhid_device *uhid, static int uhid_dev_destroy(struct uhid_device *uhid) { - if (!uhid->running) + if (!uhid->hid) return -EINVAL; uhid->running = false; @@ -565,6 +585,7 @@ static int uhid_dev_destroy(struct uhid_device *uhid) cancel_work_sync(&uhid->worker); hid_destroy_device(uhid->hid); + uhid->hid = NULL; kfree(uhid->rd_data); return 0; diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c index c25274275258f92344acf355f56dfcd45836593d..d90bfa8b7313e4b05a6d697d7a14bc81ac952c45 100644 --- a/drivers/hid/wacom_wac.c +++ b/drivers/hid/wacom_wac.c @@ -2566,6 +2566,24 @@ static void wacom_wac_finger_slot(struct wacom_wac *wacom_wac, } } +static bool wacom_wac_slot_is_active(struct input_dev *dev, int key) +{ + struct input_mt *mt = dev->mt; + struct input_mt_slot *s; + + if (!mt) + return false; + + for (s = mt->slots; s != mt->slots + mt->num_slots; s++) { + if (s->key == key && + input_mt_get_value(s, ABS_MT_TRACKING_ID) >= 0) { + return true; + } + } + + return false; +} + static void wacom_wac_finger_event(struct hid_device *hdev, struct hid_field *field, struct hid_usage *usage, __s32 value) { @@ -2613,9 +2631,14 @@ static void wacom_wac_finger_event(struct hid_device *hdev, } if (usage->usage_index + 1 == field->report_count) { - if (equivalent_usage == wacom_wac->hid_data.last_slot_field && - wacom_wac->hid_data.confidence) - wacom_wac_finger_slot(wacom_wac, wacom_wac->touch_input); + if (equivalent_usage == wacom_wac->hid_data.last_slot_field) { + bool touch_removed = wacom_wac_slot_is_active(wacom_wac->touch_input, + wacom_wac->hid_data.id) && !wacom_wac->hid_data.tipswitch; + + if (wacom_wac->hid_data.confidence || touch_removed) { + wacom_wac_finger_slot(wacom_wac, wacom_wac->touch_input); + } + } } } @@ -2631,6 +2654,10 @@ static void wacom_wac_finger_pre_report(struct hid_device *hdev, hid_data->confidence = true; + hid_data->cc_report = 0; + hid_data->cc_index = -1; + hid_data->cc_value_index = -1; + for (i = 0; i < report->maxfield; i++) { struct hid_field *field = report->field[i]; int j; @@ -2664,11 +2691,14 @@ static void wacom_wac_finger_pre_report(struct hid_device *hdev, hid_data->cc_index >= 0) { struct hid_field *field = report->field[hid_data->cc_index]; int value = field->value[hid_data->cc_value_index]; - if (value) + if (value) { hid_data->num_expected = value; + hid_data->num_received = 0; + } } else { hid_data->num_expected = wacom_wac->features.touch_max; + hid_data->num_received = 0; } } @@ -2692,6 +2722,7 @@ static void wacom_wac_finger_report(struct hid_device *hdev, input_sync(input); wacom_wac->hid_data.num_received = 0; + wacom_wac->hid_data.num_expected = 0; /* keep touch state for pen event */ wacom_wac->shared->touch_down = wacom_wac_finger_count_touches(wacom_wac); diff --git a/drivers/hsi/hsi_core.c b/drivers/hsi/hsi_core.c index a5f92e2889cb81c58e66048b25d145502ea2a1bb..a330f58d45fc637d502a9efc331b9d9bb7948963 100644 --- a/drivers/hsi/hsi_core.c +++ b/drivers/hsi/hsi_core.c @@ -102,6 +102,7 @@ struct hsi_client *hsi_new_client(struct hsi_port *port, if (device_register(&cl->device) < 0) { pr_err("hsi: failed to register client: %s\n", info->name); put_device(&cl->device); + goto err; } return cl; diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c index 6476bfe193afdefdabb14e2752b6b281e6982b9e..5dbb949b1afd8f55aad130a76d8c67cf923fba2d 100644 --- a/drivers/hv/channel_mgmt.c +++ b/drivers/hv/channel_mgmt.c @@ -350,7 +350,7 @@ void vmbus_channel_map_relid(struct vmbus_channel *channel) * execute: * * (a) In the "normal (i.e., not resuming from hibernation)" path, - * the full barrier in smp_store_mb() guarantees that the store + * the full barrier in virt_store_mb() guarantees that the store * is propagated to all CPUs before the add_channel_work work * is queued. In turn, add_channel_work is queued before the * channel's ring buffer is allocated/initialized and the @@ -362,14 +362,14 @@ void vmbus_channel_map_relid(struct vmbus_channel *channel) * recv_int_page before retrieving the channel pointer from the * array of channels. * - * (b) In the "resuming from hibernation" path, the smp_store_mb() + * (b) In the "resuming from hibernation" path, the virt_store_mb() * guarantees that the store is propagated to all CPUs before * the VMBus connection is marked as ready for the resume event * (cf. check_ready_for_resume_event()). The interrupt handler * of the VMBus driver and vmbus_chan_sched() can not run before * vmbus_bus_resume() has completed execution (cf. resume_noirq). */ - smp_store_mb( + virt_store_mb( vmbus_connection.channels[channel->offermsg.child_relid], channel); } diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c index eb56e09ae15f3999478b0e75f601836c92ad175a..6a716996a6250b30ac458a8a95714b44c9f04d8e 100644 --- a/drivers/hv/hv_balloon.c +++ b/drivers/hv/hv_balloon.c @@ -1558,7 +1558,7 @@ static void balloon_onchannelcallback(void *context) break; default: - pr_warn("Unhandled message: type: %d\n", dm_hdr->type); + pr_warn_ratelimited("Unhandled message: type: %d\n", dm_hdr->type); } } diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c index 356e22159e8348e9119e7f35e255d5a3aaa762b6..769851b6e74c5c5d15d38446493b98654365a199 100644 --- a/drivers/hv/ring_buffer.c +++ b/drivers/hv/ring_buffer.c @@ -378,7 +378,16 @@ int hv_ringbuffer_read(struct vmbus_channel *channel, static u32 hv_pkt_iter_avail(const struct hv_ring_buffer_info *rbi) { u32 priv_read_loc = rbi->priv_read_index; - u32 write_loc = READ_ONCE(rbi->ring_buffer->write_index); + u32 write_loc; + + /* + * The Hyper-V host writes the packet data, then uses + * store_release() to update the write_index. Use load_acquire() + * here to prevent loads of the packet data from being re-ordered + * before the read of the write_index and potentially getting + * stale data. + */ + write_loc = virt_load_acquire(&rbi->ring_buffer->write_index); if (write_loc >= priv_read_loc) return write_loc - priv_read_loc; diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c index a5a402e776c77f2640a1de5b09a13b3f0ff330e2..b9ac357e465db91d2420bfe71ec7b37506bb0bad 100644 --- a/drivers/hv/vmbus_drv.c +++ b/drivers/hv/vmbus_drv.c @@ -1944,8 +1944,10 @@ int vmbus_add_channel_kobj(struct hv_device *dev, struct vmbus_channel *channel) kobj->kset = dev->channels_kset; ret = kobject_init_and_add(kobj, &vmbus_chan_ktype, NULL, "%u", relid); - if (ret) + if (ret) { + kobject_put(kobj); return ret; + } ret = sysfs_create_group(kobj, &vmbus_chan_group); @@ -1954,6 +1956,7 @@ int vmbus_add_channel_kobj(struct hv_device *dev, struct vmbus_channel *channel) * The calling functions' error handling paths will cleanup the * empty channel directory. */ + kobject_put(kobj); dev_err(device, "Unable to set up channel sysfs files\n"); return ret; } @@ -2670,10 +2673,15 @@ static void __exit vmbus_exit(void) if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) { kmsg_dump_unregister(&hv_kmsg_dumper); unregister_die_notifier(&hyperv_die_block); - atomic_notifier_chain_unregister(&panic_notifier_list, - &hyperv_panic_block); } + /* + * The panic notifier is always registered, hence we should + * also unconditionally unregister it here as well. + */ + atomic_notifier_chain_unregister(&panic_notifier_list, + &hyperv_panic_block); + free_page((unsigned long)hv_panic_page); unregister_sysctl_table(hv_ctl_table_hdr); hv_ctl_table_hdr = NULL; diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig index 0c2b032ee6176c755e319139dd9c612154eec9d8..f2fe56e6f8bdfd72db21b234500dce8ea33eca01 100644 --- a/drivers/hwmon/Kconfig +++ b/drivers/hwmon/Kconfig @@ -51,6 +51,16 @@ config SENSORS_AB8500 This driver can also be built as a module. If so, the module will be called abx500-temp. +config SENSORS_PVT + tristate "SW64 PVT monitor" + depends on SW64 + help + If you say yes here you get support for the voltage + sensor inside your CPU. + + This driver can also be built as a module. If so, the module + will be called PVT. + config SENSORS_ABITUGURU tristate "Abit uGuru (rev 1 & 2)" depends on X86 && DMI @@ -1889,6 +1899,15 @@ config SENSORS_VIA_CPUTEMP sensor inside your CPU. Supported are all known variants of the VIA C7 and Nano. +config SENSORS_ZHAOXIN_CPUTEMP + tristate "Zhaoxin CPU temperature sensor" + depends on X86 + select HWMON_VID + help + If you say yes here you get support for the temperature + sensor inside your CPU. Supported are all known variants of + the Zhaoxin processors. + config SENSORS_VIA686A tristate "VIA686A" depends on PCI diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile index 9db2903b61e5b7a45eb9a197b2677166e281f0b4..95908c478b94554dedb5c20d0a27b565348d6d9c 100644 --- a/drivers/hwmon/Makefile +++ b/drivers/hwmon/Makefile @@ -184,6 +184,7 @@ obj-$(CONFIG_SENSORS_TMP421) += tmp421.o obj-$(CONFIG_SENSORS_TMP513) += tmp513.o obj-$(CONFIG_SENSORS_VEXPRESS) += vexpress-hwmon.o obj-$(CONFIG_SENSORS_VIA_CPUTEMP)+= via-cputemp.o +obj-$(CONFIG_SENSORS_ZHAOXIN_CPUTEMP) += zhaoxin-cputemp.o obj-$(CONFIG_SENSORS_VIA686A) += via686a.o obj-$(CONFIG_SENSORS_VT1211) += vt1211.o obj-$(CONFIG_SENSORS_VT8231) += vt8231.o @@ -193,6 +194,7 @@ obj-$(CONFIG_SENSORS_W83L786NG) += w83l786ng.o obj-$(CONFIG_SENSORS_WM831X) += wm831x-hwmon.o obj-$(CONFIG_SENSORS_WM8350) += wm8350-hwmon.o obj-$(CONFIG_SENSORS_XGENE) += xgene-hwmon.o +obj-$(CONFIG_SENSORS_PVT) += sw64_pvt.o obj-$(CONFIG_SENSORS_OCC) += occ/ obj-$(CONFIG_PMBUS) += pmbus/ diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c index 87f401100466d3bd66082bb021d4ccf257ea16b3..10c7b6295b02e8b2819e2fa61cc09ce467d19e35 100644 --- a/drivers/hwmon/dell-smm-hwmon.c +++ b/drivers/hwmon/dell-smm-hwmon.c @@ -317,7 +317,7 @@ static int i8k_enable_fan_auto_mode(bool enable) } /* - * Set the fan speed (off, low, high). Returns the new fan status. + * Set the fan speed (off, low, high, ...). */ static int i8k_set_fan(int fan, int speed) { @@ -329,7 +329,7 @@ static int i8k_set_fan(int fan, int speed) speed = (speed < 0) ? 0 : ((speed > i8k_fan_max) ? i8k_fan_max : speed); regs.ebx = (fan & 0xff) | (speed << 8); - return i8k_smm(®s) ? : i8k_get_fan_status(fan); + return i8k_smm(®s); } static int i8k_get_temp_type(int sensor) @@ -443,7 +443,7 @@ static int i8k_ioctl_unlocked(struct file *fp, unsigned int cmd, unsigned long arg) { int val = 0; - int speed; + int speed, err; unsigned char buff[16]; int __user *argp = (int __user *)arg; @@ -504,7 +504,11 @@ i8k_ioctl_unlocked(struct file *fp, unsigned int cmd, unsigned long arg) if (copy_from_user(&speed, argp + 1, sizeof(int))) return -EFAULT; - val = i8k_set_fan(val, speed); + err = i8k_set_fan(val, speed); + if (err < 0) + return err; + + val = i8k_get_fan_status(val); break; default: diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c index e5a83f74926772ac138e3479945612669c5ec2bd..d649fea8299948a183b582669df670c728877927 100644 --- a/drivers/hwmon/hwmon.c +++ b/drivers/hwmon/hwmon.c @@ -178,12 +178,14 @@ static int hwmon_thermal_add_sensor(struct device *dev, int index) tzd = devm_thermal_zone_of_sensor_register(dev, index, tdata, &hwmon_thermal_ops); - /* - * If CONFIG_THERMAL_OF is disabled, this returns -ENODEV, - * so ignore that error but forward any other error. - */ - if (IS_ERR(tzd) && (PTR_ERR(tzd) != -ENODEV)) - return PTR_ERR(tzd); + if (IS_ERR(tzd)) { + if (PTR_ERR(tzd) != -ENODEV) + return PTR_ERR(tzd); + dev_info(dev, "temp%d_input not attached to any thermal zone\n", + index + 1); + devm_kfree(dev, tdata); + return 0; + } err = devm_add_action(dev, hwmon_thermal_remove_sensor, &tdata->node); if (err) diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c index 959446b0137bcba40ab1c54d1c2f8c46efb368f5..a7142c32889c056bc25572cce925afbbe0546bff 100644 --- a/drivers/hwmon/lm90.c +++ b/drivers/hwmon/lm90.c @@ -373,7 +373,7 @@ static const struct lm90_params lm90_params[] = { .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT | LM90_HAVE_BROKEN_ALERT | LM90_HAVE_CRIT, .alert_alarms = 0x7c, - .max_convrate = 8, + .max_convrate = 7, }, [lm86] = { .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT @@ -394,12 +394,13 @@ static const struct lm90_params lm90_params[] = { .max_convrate = 9, }, [max6646] = { - .flags = LM90_HAVE_CRIT, + .flags = LM90_HAVE_CRIT | LM90_HAVE_BROKEN_ALERT, .alert_alarms = 0x7c, .max_convrate = 6, .reg_local_ext = MAX6657_REG_R_LOCAL_TEMPL, }, [max6654] = { + .flags = LM90_HAVE_BROKEN_ALERT, .alert_alarms = 0x7c, .max_convrate = 7, .reg_local_ext = MAX6657_REG_R_LOCAL_TEMPL, @@ -418,7 +419,7 @@ static const struct lm90_params lm90_params[] = { }, [max6680] = { .flags = LM90_HAVE_OFFSET | LM90_HAVE_CRIT - | LM90_HAVE_CRIT_ALRM_SWP, + | LM90_HAVE_CRIT_ALRM_SWP | LM90_HAVE_BROKEN_ALERT, .alert_alarms = 0x7c, .max_convrate = 7, }, diff --git a/drivers/hwmon/mr75203.c b/drivers/hwmon/mr75203.c index 18da5a25e89abfb950fe228ebe243d98ec93abb6..046523d47c29b75dfb045e95ad6a8ea86a42752e 100644 --- a/drivers/hwmon/mr75203.c +++ b/drivers/hwmon/mr75203.c @@ -93,7 +93,7 @@ #define VM_CH_REQ BIT(21) #define IP_TMR 0x05 -#define POWER_DELAY_CYCLE_256 0x80 +#define POWER_DELAY_CYCLE_256 0x100 #define POWER_DELAY_CYCLE_64 0x40 #define PVT_POLL_DELAY_US 20 diff --git a/drivers/hwmon/pmbus/pmbus.h b/drivers/hwmon/pmbus/pmbus.h index 88a5df2633fb254cbbf6474eae59899eb7ec0081..de27837e852717a42e98b902295bf2c8c79b628f 100644 --- a/drivers/hwmon/pmbus/pmbus.h +++ b/drivers/hwmon/pmbus/pmbus.h @@ -319,6 +319,7 @@ enum pmbus_fan_mode { percent = 0, rpm }; /* * STATUS_VOUT, STATUS_INPUT */ +#define PB_VOLTAGE_VIN_OFF BIT(3) #define PB_VOLTAGE_UV_FAULT BIT(4) #define PB_VOLTAGE_UV_WARNING BIT(5) #define PB_VOLTAGE_OV_WARNING BIT(6) diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c index b0e2820a2d578f62ca0db7ac565ca5404065c790..117e3ce9c76ad8a775b4b6f75c0285f98cf1133b 100644 --- a/drivers/hwmon/pmbus/pmbus_core.c +++ b/drivers/hwmon/pmbus/pmbus_core.c @@ -898,6 +898,11 @@ static int pmbus_get_boolean(struct i2c_client *client, struct pmbus_boolean *b, pmbus_update_sensor_data(client, s2); regval = status & mask; + if (regval) { + ret = pmbus_write_byte_data(client, page, reg, regval); + if (ret) + goto unlock; + } if (s1 && s2) { s64 v1, v2; @@ -1355,7 +1360,7 @@ static const struct pmbus_limit_attr vin_limit_attrs[] = { .reg = PMBUS_VIN_UV_FAULT_LIMIT, .attr = "lcrit", .alarm = "lcrit_alarm", - .sbit = PB_VOLTAGE_UV_FAULT, + .sbit = PB_VOLTAGE_UV_FAULT | PB_VOLTAGE_VIN_OFF, }, { .reg = PMBUS_VIN_OV_WARN_LIMIT, .attr = "max", @@ -2250,10 +2255,14 @@ static int pmbus_regulator_is_enabled(struct regulator_dev *rdev) { struct device *dev = rdev_get_dev(rdev); struct i2c_client *client = to_i2c_client(dev->parent); + struct pmbus_data *data = i2c_get_clientdata(client); u8 page = rdev_get_id(rdev); int ret; + mutex_lock(&data->update_lock); ret = pmbus_read_byte_data(client, page, PMBUS_OPERATION); + mutex_unlock(&data->update_lock); + if (ret < 0) return ret; @@ -2264,11 +2273,17 @@ static int _pmbus_regulator_on_off(struct regulator_dev *rdev, bool enable) { struct device *dev = rdev_get_dev(rdev); struct i2c_client *client = to_i2c_client(dev->parent); + struct pmbus_data *data = i2c_get_clientdata(client); u8 page = rdev_get_id(rdev); + int ret; - return pmbus_update_byte_data(client, page, PMBUS_OPERATION, - PB_OPERATION_CONTROL_ON, - enable ? PB_OPERATION_CONTROL_ON : 0); + mutex_lock(&data->update_lock); + ret = pmbus_update_byte_data(client, page, PMBUS_OPERATION, + PB_OPERATION_CONTROL_ON, + enable ? PB_OPERATION_CONTROL_ON : 0); + mutex_unlock(&data->update_lock); + + return ret; } static int pmbus_regulator_enable(struct regulator_dev *rdev) diff --git a/drivers/hwmon/sch56xx-common.c b/drivers/hwmon/sch56xx-common.c index 6c84780e358e8d12cc21a73deaa4644ae637ec0c..066b12990fbfb6f97ee7f16beb174ab388595013 100644 --- a/drivers/hwmon/sch56xx-common.c +++ b/drivers/hwmon/sch56xx-common.c @@ -424,7 +424,7 @@ struct sch56xx_watchdog_data *sch56xx_watchdog_register(struct device *parent, if (nowayout) set_bit(WDOG_NO_WAY_OUT, &data->wddev.status); if (output_enable & SCH56XX_WDOG_OUTPUT_ENABLE) - set_bit(WDOG_ACTIVE, &data->wddev.status); + set_bit(WDOG_HW_RUNNING, &data->wddev.status); /* Since the watchdog uses a downcounter there is no register to read the BIOS set timeout from (if any was set at all) -> diff --git a/drivers/hwmon/sw64_pvt.c b/drivers/hwmon/sw64_pvt.c new file mode 100644 index 0000000000000000000000000000000000000000..9e292a90af38887bed826284ac64f6b06c1c7e8b --- /dev/null +++ b/drivers/hwmon/sw64_pvt.c @@ -0,0 +1,223 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * PVT device driver. + * + * Part of lm_sensors, Linux kernel modules + * for hardware monitoring in sunway. + */ +#include +#include +#include +#include +#include +#include +#include + +#define PVT_VSYS 0 +#define PVT0_CTRL 0x7c00 +#define PVT02SPBU_DATA_OUT (0x1 << 26) +#define PVT_READ 0xc000 +#define PVT_WADDR 0xc800 +#define PVT_WDATA 0xcc00 + +/* The PVT registers */ +#define PVT_SAFECTRL 0x0 +#define CLK_SEL 0x1 +#define PVT_RUN 0x2 +#define PVT_CONFIG 0x3 +#define PVT_WAIT_TIME 0x4 +#define TS_ALARM_HVALUE_L 0x5 +#define TS_ALARM_HVALUE_H 0x6 +#define TS_ALARM_LVALUE_L 0x7 +#define TS_ALARM_LVALUE_H 0x8 +#define TS_ALARM_TIMES 0x9 +#define TRIMG 0xa +#define TRIMO 0xb +#define VS_ALARM_HVALUE_L 0xc +#define VS_ALARM_HVALUE_H 0xd +#define VS_ALARM_LVALUE_L 0xe +#define VS_ALARM_LVALUE_H 0xf +#define VS_ALARM_TIMES 0x10 +#define PVT_ALARM_CLEAR 0x11 +#define PVT_ALARM_MASK 0x12 +#define PVT_DATA_OUT_L 0x13 +#define PVT_DATA_OUT_H 0x14 +#define PVT_STATE_INFO 0x15 +#define PVT_ALARM_INFO 0x16 +#define COFFICIENT 71 +#define FIXEDVAL 45598 + +#define vol_algorithm(m, n) (((((m >> 16) & 0x3) * 0x100) +\ + ((n >> 16) & 0xff)) * COFFICIENT + FIXEDVAL) + + +struct pvt_hwmon { + struct pvt *pvt; + void __iomem *base; +}; + +static const char * const input_names[] = { + [PVT_VSYS] = "voltage", +}; + +static inline void pvt_write_reg(struct pvt_hwmon *pvtvol, u64 a, + u64 b, unsigned int offset) +{ + writel(a | b, pvtvol->base + offset); +} + +static inline u64 pvt_read_reg(struct pvt_hwmon *pvtvol, unsigned int offset) +{ + u64 value; + + value = readl(pvtvol->base + offset); + return value; +} + +void pvt_configure(struct pvt_hwmon *pvtvol, u64 value, u64 reg) +{ + pvt_write_reg(pvtvol, PVT_WDATA, value, PVT0_CTRL); + pvt_write_reg(pvtvol, PVT_WADDR, reg, PVT0_CTRL); +} + +static inline u64 pvt_read_vol(struct pvt_hwmon *pvtvol, u64 data, + u64 reg, unsigned int offset) +{ + unsigned int value; + + pvt_write_reg(pvtvol, data, reg, offset); + msleep(100); + value = pvt_read_reg(pvtvol, offset); + return value; +} + +static int pvt_get_vol(struct pvt_hwmon *pvtvol) +{ + unsigned long long data_h, data_l; + + pvt_configure(pvtvol, 0x1, PVT_SAFECTRL); + + /* configure PVT mode */ + pvt_configure(pvtvol, 0x3, PVT_CONFIG); + + /* PVT monitor enable */ + pvt_configure(pvtvol, 0x1, PVT_RUN); + + /* get the upper 2 bits of the PVT voltage */ + data_h = pvt_read_vol(pvtvol, PVT_READ, PVT_DATA_OUT_H, PVT0_CTRL); + if ((data_h & PVT02SPBU_DATA_OUT) == 0) { + pr_err("error: the Voltage_h is error\n"); + return false; + } + + /* get the lower 8 bits of the PVT voltage */ + data_l = pvt_read_vol(pvtvol, PVT_READ, PVT_DATA_OUT_L, PVT0_CTRL); + if ((data_l & PVT02SPBU_DATA_OUT) == 0) { + pr_err("error: the Voltage_l is error\n"); + return false; + } + + return vol_algorithm(data_h, data_l); +} + +static ssize_t pvt_read(struct device *dev, + struct device_attribute *devattr, char *buf) +{ + struct pvt_hwmon *pvtvol = dev_get_drvdata(dev); + unsigned long long pvt_vol; + + pvt_vol = pvt_get_vol(pvtvol); + return sprintf(buf, "%lld\n", (pvt_vol / 100)); +} + +static ssize_t show_label(struct device *dev, + struct device_attribute *devattr, char *buf) +{ + return sprintf(buf, "%s\n", + input_names[to_sensor_dev_attr(devattr)->index]); +} + +static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, pvt_read, NULL, + PVT_VSYS); +static SENSOR_DEVICE_ATTR(in0_label, S_IRUGO, show_label, NULL, + PVT_VSYS); + +static struct attribute *pvt_attrs[] = { + &sensor_dev_attr_in0_input.dev_attr.attr, + &sensor_dev_attr_in0_label.dev_attr.attr, + NULL +}; + +ATTRIBUTE_GROUPS(pvt); + +static int pvt_vol_plat_probe(struct platform_device *pdev) +{ + struct resource *res; + struct pvt_hwmon *pvtvol; + struct device *hwmon_dev; + unsigned long long value; + struct device *dev = &pdev->dev; + + pvtvol = devm_kzalloc(&pdev->dev, sizeof(*pvtvol), GFP_KERNEL); + if (!pvtvol) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + goto err; + + pvtvol->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(pvtvol->base)) + return PTR_ERR(pvtvol->base); + + platform_set_drvdata(pdev, pvtvol); + hwmon_dev = devm_hwmon_device_register_with_groups(dev, "pvt", + pvtvol, pvt_groups); + + if (IS_ERR(hwmon_dev)) + return PTR_ERR(hwmon_dev); + + value = pvt_get_vol(pvtvol); + if (!value) { + dev_info(&pdev->dev, "pvt_vol get failed\n"); + return false; + } + + return 0; + +err: + dev_err(&pdev->dev, "no PVT resource\n"); + return -ENXIO; +} + +#ifdef CONFIG_OF +static const struct of_device_id pvt_vol_of_match[] = { + { .compatible = "sw64,pvt-vol", }, + {}, +}; +MODULE_DEVICE_TABLE(of, pvt_vol_of_match); +#endif + +static struct platform_driver pvt_vol_driver = { + .probe = pvt_vol_plat_probe, + .driver = { + .name = "pvt-sw64", + .of_match_table = of_match_ptr(pvt_vol_of_match), + }, +}; + +static int __init pvt_vol_init_driver(void) +{ + return platform_driver_register(&pvt_vol_driver); +} +subsys_initcall(pvt_vol_init_driver); + +static void __exit pvt_vol_exit_driver(void) +{ + platform_driver_unregister(&pvt_vol_driver); +} +module_exit(pvt_vol_exit_driver); + +MODULE_AUTHOR("Wang Yingying "); +MODULE_DESCRIPTION("pvt controller"); +MODULE_LICENSE("GPL"); diff --git a/drivers/hwmon/via-cputemp.c b/drivers/hwmon/via-cputemp.c index e5d18dac8ee7ba91682cc9c421baa6632169443b..0a5057dbe51a63fae6b5a92e22e3ee630a129927 100644 --- a/drivers/hwmon/via-cputemp.c +++ b/drivers/hwmon/via-cputemp.c @@ -273,7 +273,6 @@ static const struct x86_cpu_id __initconst cputemp_ids[] = { X86_MATCH_VENDOR_FAM_MODEL(CENTAUR, 6, X86_CENTAUR_FAM6_C7_A, NULL), X86_MATCH_VENDOR_FAM_MODEL(CENTAUR, 6, X86_CENTAUR_FAM6_C7_D, NULL), X86_MATCH_VENDOR_FAM_MODEL(CENTAUR, 6, X86_CENTAUR_FAM6_NANO, NULL), - X86_MATCH_VENDOR_FAM_MODEL(CENTAUR, 7, X86_MODEL_ANY, NULL), {} }; MODULE_DEVICE_TABLE(x86cpu, cputemp_ids); diff --git a/drivers/hwmon/zhaoxin-cputemp.c b/drivers/hwmon/zhaoxin-cputemp.c new file mode 100644 index 0000000000000000000000000000000000000000..39e729590ebac9fa49dce32bf690738b3fbc228f --- /dev/null +++ b/drivers/hwmon/zhaoxin-cputemp.c @@ -0,0 +1,292 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * zhaoxin-cputemp.c - Driver for Zhaoxin CPU core temperature monitoring + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DRVNAME "zhaoxin_cputemp" + +enum { SHOW_TEMP, SHOW_LABEL, SHOW_NAME }; + +/* Functions declaration */ + +struct zhaoxin_cputemp_data { + struct device *hwmon_dev; + const char *name; + u8 vrm; + u32 id; + u32 msr_temp; + u32 msr_vid; +}; + +/* Sysfs stuff */ + +static ssize_t name_show(struct device *dev, struct device_attribute *devattr, + char *buf) +{ + int ret; + struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); + struct zhaoxin_cputemp_data *data = dev_get_drvdata(dev); + + if (attr->index == SHOW_NAME) + ret = sprintf(buf, "%s\n", data->name); + else /* show label */ + ret = sprintf(buf, "Core %d\n", data->id); + return ret; +} + +static ssize_t temp_show(struct device *dev, struct device_attribute *devattr, char *buf) +{ + struct zhaoxin_cputemp_data *data = dev_get_drvdata(dev); + u32 eax, edx; + int err; + + err = rdmsr_safe_on_cpu(data->id, data->msr_temp, &eax, &edx); + if (err) + return -EAGAIN; + + return sprintf(buf, "%lu\n", ((unsigned long)eax & 0xffffff) * 1000); +} + +static ssize_t cpu0_vid_show(struct device *dev, struct device_attribute *devattr, char *buf) +{ + struct zhaoxin_cputemp_data *data = dev_get_drvdata(dev); + u32 eax, edx; + int err; + + err = rdmsr_safe_on_cpu(data->id, data->msr_vid, &eax, &edx); + if (err) + return -EAGAIN; + + return sprintf(buf, "%d\n", vid_from_reg(~edx & 0x7f, data->vrm)); +} + +static SENSOR_DEVICE_ATTR_RO(temp1_input, temp, SHOW_TEMP); +static SENSOR_DEVICE_ATTR_RO(temp1_label, name, SHOW_LABEL); +static SENSOR_DEVICE_ATTR_RO(name, name, SHOW_NAME); + +static struct attribute *zhaoxin_cputemp_attributes[] = { + &sensor_dev_attr_name.dev_attr.attr, + &sensor_dev_attr_temp1_label.dev_attr.attr, + &sensor_dev_attr_temp1_input.dev_attr.attr, + NULL +}; + +static const struct attribute_group zhaoxin_cputemp_group = { + .attrs = zhaoxin_cputemp_attributes, +}; + +/* Optional attributes */ +static DEVICE_ATTR_RO(cpu0_vid); + +static int zhaoxin_cputemp_probe(struct platform_device *pdev) +{ + struct zhaoxin_cputemp_data *data; + int err; + u32 eax, edx; + + data = devm_kzalloc(&pdev->dev, sizeof(struct zhaoxin_cputemp_data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->id = pdev->id; + data->name = "zhaoxin_cputemp"; + data->msr_temp = 0x1423; + + /* test if we can access the TEMPERATURE MSR */ + err = rdmsr_safe_on_cpu(data->id, data->msr_temp, &eax, &edx); + if (err) { + dev_err(&pdev->dev, "Unable to access TEMPERATURE MSR, giving up\n"); + return err; + } + + platform_set_drvdata(pdev, data); + + err = sysfs_create_group(&pdev->dev.kobj, &zhaoxin_cputemp_group); + if (err) + return err; + + if (data->msr_vid) + data->vrm = vid_which_vrm(); + + if (data->vrm) { + err = device_create_file(&pdev->dev, &dev_attr_cpu0_vid); + if (err) + goto exit_remove; + } + + data->hwmon_dev = hwmon_device_register(&pdev->dev); + if (IS_ERR(data->hwmon_dev)) { + err = PTR_ERR(data->hwmon_dev); + dev_err(&pdev->dev, "Class registration failed (%d)\n", err); + goto exit_remove; + } + + return 0; + +exit_remove: + if (data->vrm) + device_remove_file(&pdev->dev, &dev_attr_cpu0_vid); + sysfs_remove_group(&pdev->dev.kobj, &zhaoxin_cputemp_group); + return err; +} + +static int zhaoxin_cputemp_remove(struct platform_device *pdev) +{ + struct zhaoxin_cputemp_data *data = platform_get_drvdata(pdev); + + hwmon_device_unregister(data->hwmon_dev); + if (data->vrm) + device_remove_file(&pdev->dev, &dev_attr_cpu0_vid); + sysfs_remove_group(&pdev->dev.kobj, &zhaoxin_cputemp_group); + return 0; +} + +static struct platform_driver zhaoxin_cputemp_driver = { + .driver = { + .name = DRVNAME, + }, + .probe = zhaoxin_cputemp_probe, + .remove = zhaoxin_cputemp_remove, +}; + +struct pdev_entry { + struct list_head list; + struct platform_device *pdev; + unsigned int cpu; +}; + +static LIST_HEAD(pdev_list); +static DEFINE_MUTEX(pdev_list_mutex); + +static int zhaoxin_cputemp_online(unsigned int cpu) +{ + int err; + struct platform_device *pdev; + struct pdev_entry *pdev_entry; + + pdev = platform_device_alloc(DRVNAME, cpu); + if (!pdev) { + err = -ENOMEM; + pr_err("Device allocation failed\n"); + goto exit; + } + + pdev_entry = kzalloc(sizeof(struct pdev_entry), GFP_KERNEL); + if (!pdev_entry) { + err = -ENOMEM; + goto exit_device_put; + } + + err = platform_device_add(pdev); + if (err) { + pr_err("Device addition failed (%d)\n", err); + goto exit_device_free; + } + + pdev_entry->pdev = pdev; + pdev_entry->cpu = cpu; + mutex_lock(&pdev_list_mutex); + list_add_tail(&pdev_entry->list, &pdev_list); + mutex_unlock(&pdev_list_mutex); + + return 0; + +exit_device_free: + kfree(pdev_entry); +exit_device_put: + platform_device_put(pdev); +exit: + return err; +} + +static int zhaoxin_cputemp_down_prep(unsigned int cpu) +{ + struct pdev_entry *p; + + mutex_lock(&pdev_list_mutex); + list_for_each_entry(p, &pdev_list, list) { + if (p->cpu == cpu) { + platform_device_unregister(p->pdev); + list_del(&p->list); + mutex_unlock(&pdev_list_mutex); + kfree(p); + return 0; + } + } + mutex_unlock(&pdev_list_mutex); + return 0; +} + +static const struct x86_cpu_id __initconst cputemp_ids[] = { + X86_MATCH_VENDOR_FAM_MODEL(CENTAUR, 7, X86_MODEL_ANY, NULL), + X86_MATCH_VENDOR_FAM_MODEL(ZHAOXIN, 7, X86_MODEL_ANY, NULL), + {} +}; +MODULE_DEVICE_TABLE(x86cpu, cputemp_ids); + +static enum cpuhp_state zhaoxin_temp_online; + +static int __init zhaoxin_cputemp_init(void) +{ + int err; + + if (!x86_match_cpu(cputemp_ids)) + return -ENODEV; + + err = platform_driver_register(&zhaoxin_cputemp_driver); + if (err) + goto exit; + + err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "hwmon/zhaoxin:online", + zhaoxin_cputemp_online, zhaoxin_cputemp_down_prep); + if (err < 0) + goto exit_driver_unreg; + zhaoxin_temp_online = err; + +#ifndef CONFIG_HOTPLUG_CPU + if (list_empty(&pdev_list)) { + err = -ENODEV; + goto exit_hp_unreg; + } +#endif + return 0; + +#ifndef CONFIG_HOTPLUG_CPU +exit_hp_unreg: + cpuhp_remove_state_nocalls(zhaoxin_temp_online); +#endif +exit_driver_unreg: + platform_driver_unregister(&zhaoxin_cputemp_driver); +exit: + return err; +} + +static void __exit zhaoxin_cputemp_exit(void) +{ + cpuhp_remove_state(zhaoxin_temp_online); + platform_driver_unregister(&zhaoxin_cputemp_driver); +} + +MODULE_DESCRIPTION("Zhaoxin CPU temperature monitor"); +MODULE_LICENSE("GPL"); + +module_init(zhaoxin_cputemp_init) +module_exit(zhaoxin_cputemp_exit) diff --git a/drivers/hwtracing/coresight/coresight-catu.c b/drivers/hwtracing/coresight/coresight-catu.c index a61313f320bda220e54c41e7ef536edffb91474e..8e19e8cdcce5e735f946bf510a5dab2b45ed3e6d 100644 --- a/drivers/hwtracing/coresight/coresight-catu.c +++ b/drivers/hwtracing/coresight/coresight-catu.c @@ -567,12 +567,11 @@ static int catu_probe(struct amba_device *adev, const struct amba_id *id) return ret; } -static int catu_remove(struct amba_device *adev) +static void catu_remove(struct amba_device *adev) { struct catu_drvdata *drvdata = dev_get_drvdata(&adev->dev); coresight_unregister(drvdata->csdev); - return 0; } static struct amba_id catu_ids[] = { diff --git a/drivers/hwtracing/coresight/coresight-cpu-debug.c b/drivers/hwtracing/coresight/coresight-cpu-debug.c index e1d232411d8d753b992eccdcc70014d5356751fa..2dcf13de751fc2296ad5e0c26f8d8a92aaa2049e 100644 --- a/drivers/hwtracing/coresight/coresight-cpu-debug.c +++ b/drivers/hwtracing/coresight/coresight-cpu-debug.c @@ -627,7 +627,7 @@ static int debug_probe(struct amba_device *adev, const struct amba_id *id) return ret; } -static int debug_remove(struct amba_device *adev) +static void debug_remove(struct amba_device *adev) { struct device *dev = &adev->dev; struct debug_drvdata *drvdata = amba_get_drvdata(adev); @@ -642,8 +642,6 @@ static int debug_remove(struct amba_device *adev) if (!--debug_count) debug_func_exit(); - - return 0; } static const struct amba_cs_uci_id uci_id_debug[] = { diff --git a/drivers/hwtracing/coresight/coresight-cti-core.c b/drivers/hwtracing/coresight/coresight-cti-core.c index 7ea93598f0eeafd00db4890961dd5cf28085e30c..0276700c246d59351c4792b09c19b5f793f1a2bc 100644 --- a/drivers/hwtracing/coresight/coresight-cti-core.c +++ b/drivers/hwtracing/coresight/coresight-cti-core.c @@ -836,7 +836,7 @@ static void cti_device_release(struct device *dev) if (drvdata->csdev_release) drvdata->csdev_release(dev); } -static int cti_remove(struct amba_device *adev) +static void cti_remove(struct amba_device *adev) { struct cti_drvdata *drvdata = dev_get_drvdata(&adev->dev); @@ -845,8 +845,6 @@ static int cti_remove(struct amba_device *adev) mutex_unlock(&ect_mutex); coresight_unregister(drvdata->csdev); - - return 0; } static int cti_probe(struct amba_device *adev, const struct amba_id *id) diff --git a/drivers/hwtracing/coresight/coresight-etb10.c b/drivers/hwtracing/coresight/coresight-etb10.c index 0cf6f0b947b6f8849d3732f9e0fa7a062e245323..51c801c05e5c30cac3e4f1b84bb79e01a5da4eb5 100644 --- a/drivers/hwtracing/coresight/coresight-etb10.c +++ b/drivers/hwtracing/coresight/coresight-etb10.c @@ -803,7 +803,7 @@ static int etb_probe(struct amba_device *adev, const struct amba_id *id) return ret; } -static int etb_remove(struct amba_device *adev) +static void etb_remove(struct amba_device *adev) { struct etb_drvdata *drvdata = dev_get_drvdata(&adev->dev); @@ -814,8 +814,6 @@ static int etb_remove(struct amba_device *adev) */ misc_deregister(&drvdata->miscdev); coresight_unregister(drvdata->csdev); - - return 0; } #ifdef CONFIG_PM diff --git a/drivers/hwtracing/coresight/coresight-etm3x-core.c b/drivers/hwtracing/coresight/coresight-etm3x-core.c index 5bf5a5a4ce6d15265831afac116f193ca2ea4026..683a69e88efda89ab5e100d92a048a47c323181c 100644 --- a/drivers/hwtracing/coresight/coresight-etm3x-core.c +++ b/drivers/hwtracing/coresight/coresight-etm3x-core.c @@ -909,7 +909,7 @@ static void clear_etmdrvdata(void *info) etmdrvdata[cpu] = NULL; } -static int etm_remove(struct amba_device *adev) +static void etm_remove(struct amba_device *adev) { struct etm_drvdata *drvdata = dev_get_drvdata(&adev->dev); @@ -932,8 +932,6 @@ static int etm_remove(struct amba_device *adev) cpus_read_unlock(); coresight_unregister(drvdata->csdev); - - return 0; } #ifdef CONFIG_PM diff --git a/drivers/hwtracing/coresight/coresight-etm4x-core.c b/drivers/hwtracing/coresight/coresight-etm4x-core.c index 02d0b92cf510183ddc458b6040891e4067aa7ea6..d4d9c8bb88cad79d1fb7a15356bd87a0776f1c6c 100644 --- a/drivers/hwtracing/coresight/coresight-etm4x-core.c +++ b/drivers/hwtracing/coresight/coresight-etm4x-core.c @@ -1680,7 +1680,7 @@ static void clear_etmdrvdata(void *info) etmdrvdata[cpu] = NULL; } -static int etm4_remove(struct amba_device *adev) +static void etm4_remove(struct amba_device *adev) { struct etmv4_drvdata *drvdata = dev_get_drvdata(&adev->dev); @@ -1703,8 +1703,6 @@ static int etm4_remove(struct amba_device *adev) cpus_read_unlock(); coresight_unregister(drvdata->csdev); - - return 0; } static const struct amba_id etm4_ids[] = { diff --git a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c index 4682f26139961a2478045acfab3c8e492f6fb0ad..42cc38c89f3ba54e8fc46eb66b1b570e295ebd1d 100644 --- a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c +++ b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c @@ -364,8 +364,12 @@ static ssize_t mode_store(struct device *dev, mode = ETM_MODE_QELEM(config->mode); /* start by clearing QE bits */ config->cfg &= ~(BIT(13) | BIT(14)); - /* if supported, Q elements with instruction counts are enabled */ - if ((mode & BIT(0)) && (drvdata->q_support & BIT(0))) + /* + * if supported, Q elements with instruction counts are enabled. + * Always set the low bit for any requested mode. Valid combos are + * 0b00, 0b01 and 0b11. + */ + if (mode && drvdata->q_support) config->cfg |= BIT(13); /* * if supported, Q elements with and without instruction diff --git a/drivers/hwtracing/coresight/coresight-funnel.c b/drivers/hwtracing/coresight/coresight-funnel.c index 3fc6c678b51d8ebb1f9d60504d68feb2844189f4..b2fb853776d791721034ee9dff4f3134f0f74bf9 100644 --- a/drivers/hwtracing/coresight/coresight-funnel.c +++ b/drivers/hwtracing/coresight/coresight-funnel.c @@ -370,9 +370,9 @@ static int dynamic_funnel_probe(struct amba_device *adev, return funnel_probe(&adev->dev, &adev->res); } -static int dynamic_funnel_remove(struct amba_device *adev) +static void dynamic_funnel_remove(struct amba_device *adev) { - return funnel_remove(&adev->dev); + funnel_remove(&adev->dev); } static const struct amba_id dynamic_funnel_ids[] = { diff --git a/drivers/hwtracing/coresight/coresight-replicator.c b/drivers/hwtracing/coresight/coresight-replicator.c index 38008aca2c0f4b618168e8f231829c04d1d19d9f..da2bfeeabc1b48c3c79f4006aa66aba009798235 100644 --- a/drivers/hwtracing/coresight/coresight-replicator.c +++ b/drivers/hwtracing/coresight/coresight-replicator.c @@ -388,9 +388,9 @@ static int dynamic_replicator_probe(struct amba_device *adev, return replicator_probe(&adev->dev, &adev->res); } -static int dynamic_replicator_remove(struct amba_device *adev) +static void dynamic_replicator_remove(struct amba_device *adev) { - return replicator_remove(&adev->dev); + replicator_remove(&adev->dev); } static const struct amba_id dynamic_replicator_ids[] = { diff --git a/drivers/hwtracing/coresight/coresight-stm.c b/drivers/hwtracing/coresight/coresight-stm.c index 587c1d7f252081d4e24f63a45ac3caa85fe68a7c..0ecca9f93f3a1b9c8d814b76bd62619f2ca583db 100644 --- a/drivers/hwtracing/coresight/coresight-stm.c +++ b/drivers/hwtracing/coresight/coresight-stm.c @@ -951,15 +951,13 @@ static int stm_probe(struct amba_device *adev, const struct amba_id *id) return ret; } -static int stm_remove(struct amba_device *adev) +static void stm_remove(struct amba_device *adev) { struct stm_drvdata *drvdata = dev_get_drvdata(&adev->dev); coresight_unregister(drvdata->csdev); stm_unregister_device(&drvdata->stm); - - return 0; } #ifdef CONFIG_PM diff --git a/drivers/hwtracing/coresight/coresight-tmc-core.c b/drivers/hwtracing/coresight/coresight-tmc-core.c index 8169dff5a9f6a789552d61bf0db7f5bcc2c11185..e29b3914fc0ff14fbdeba40c2ad296d8fcfd3df4 100644 --- a/drivers/hwtracing/coresight/coresight-tmc-core.c +++ b/drivers/hwtracing/coresight/coresight-tmc-core.c @@ -559,7 +559,7 @@ static void tmc_shutdown(struct amba_device *adev) spin_unlock_irqrestore(&drvdata->spinlock, flags); } -static int tmc_remove(struct amba_device *adev) +static void tmc_remove(struct amba_device *adev) { struct tmc_drvdata *drvdata = dev_get_drvdata(&adev->dev); @@ -570,8 +570,6 @@ static int tmc_remove(struct amba_device *adev) */ misc_deregister(&drvdata->miscdev); coresight_unregister(drvdata->csdev); - - return 0; } static const struct amba_id tmc_ids[] = { diff --git a/drivers/hwtracing/coresight/coresight-tpiu.c b/drivers/hwtracing/coresight/coresight-tpiu.c index 5b35029461a0cb2605c3ff43f9d72f1a07c19dbe..0ca39d905d0b3d70decf54ede4780f40b8fc2e1a 100644 --- a/drivers/hwtracing/coresight/coresight-tpiu.c +++ b/drivers/hwtracing/coresight/coresight-tpiu.c @@ -173,13 +173,11 @@ static int tpiu_probe(struct amba_device *adev, const struct amba_id *id) return PTR_ERR(drvdata->csdev); } -static int tpiu_remove(struct amba_device *adev) +static void tpiu_remove(struct amba_device *adev) { struct tpiu_drvdata *drvdata = dev_get_drvdata(&adev->dev); coresight_unregister(drvdata->csdev); - - return 0; } #ifdef CONFIG_PM diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index 036fdcee5eb377487ec55b7d82553bbac4d0c7b4..9535e995ecc92bfab66f73f865c981e1dc224bed 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -488,7 +488,7 @@ config I2C_BRCMSTB config I2C_CADENCE tristate "Cadence I2C Controller" - depends on ARCH_ZYNQ || ARM64 || XTENSA + depends on ARCH_ZYNQ || ARM64 || XTENSA || COMPILE_TEST help Say yes here to select Cadence I2C Host Controller. This controller is e.g. used by Xilinx Zynq. @@ -946,7 +946,7 @@ config I2C_QCOM_GENI config I2C_QUP tristate "Qualcomm QUP based I2C controller" - depends on ARCH_QCOM + depends on ARCH_QCOM || COMPILE_TEST help If you say yes to this option, support will be included for the built-in I2C interface on the Qualcomm SoCs. diff --git a/drivers/i2c/busses/i2c-bcm2835.c b/drivers/i2c/busses/i2c-bcm2835.c index 37443edbf75464083693878983790c93d3457709..ad3b124a2e3768c9eb132fd2208ce421ed1da631 100644 --- a/drivers/i2c/busses/i2c-bcm2835.c +++ b/drivers/i2c/busses/i2c-bcm2835.c @@ -23,6 +23,11 @@ #define BCM2835_I2C_FIFO 0x10 #define BCM2835_I2C_DIV 0x14 #define BCM2835_I2C_DEL 0x18 +/* + * 16-bit field for the number of SCL cycles to wait after rising SCL + * before deciding the slave is not responding. 0 disables the + * timeout detection. + */ #define BCM2835_I2C_CLKT 0x1c #define BCM2835_I2C_C_READ BIT(0) @@ -477,6 +482,12 @@ static int bcm2835_i2c_probe(struct platform_device *pdev) adap->dev.of_node = pdev->dev.of_node; adap->quirks = of_device_get_match_data(&pdev->dev); + /* + * Disable the hardware clock stretching timeout. SMBUS + * specifies a limit for how long the device can stretch the + * clock, but core I2C doesn't. + */ + bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_CLKT, 0); bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_C, 0); ret = i2c_add_adapter(adap); diff --git a/drivers/i2c/busses/i2c-brcmstb.c b/drivers/i2c/busses/i2c-brcmstb.c index ba766d24219ef0ea5cbfc3c433c1f4115b53ded5..44e2466f3c6744c21190f73475d8dcce106e9ddc 100644 --- a/drivers/i2c/busses/i2c-brcmstb.c +++ b/drivers/i2c/busses/i2c-brcmstb.c @@ -674,7 +674,7 @@ static int brcmstb_i2c_probe(struct platform_device *pdev) /* set the data in/out register size for compatible SoCs */ if (of_device_is_compatible(dev->device->of_node, - "brcmstb,brcmper-i2c")) + "brcm,brcmper-i2c")) dev->data_regsz = sizeof(u8); else dev->data_regsz = sizeof(u32); diff --git a/drivers/i2c/busses/i2c-designware-pcidrv.c b/drivers/i2c/busses/i2c-designware-pcidrv.c index 55c83a7a24f368fa969618acfe813b2fb9e486e3..56c87ade0e89d29d877b97b2063097585dd596f1 100644 --- a/drivers/i2c/busses/i2c-designware-pcidrv.c +++ b/drivers/i2c/busses/i2c-designware-pcidrv.c @@ -37,10 +37,10 @@ enum dw_pci_ctl_id_t { }; struct dw_scl_sda_cfg { - u32 ss_hcnt; - u32 fs_hcnt; - u32 ss_lcnt; - u32 fs_lcnt; + u16 ss_hcnt; + u16 fs_hcnt; + u16 ss_lcnt; + u16 fs_lcnt; u32 sda_hold; }; diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c index eab6fd6b890ebe3e7750a2333f223c886e6d250f..5618c1ff34dc3a88c6e65fbdf179c435b7d9512f 100644 --- a/drivers/i2c/busses/i2c-i801.c +++ b/drivers/i2c/busses/i2c-i801.c @@ -797,6 +797,11 @@ static int i801_block_transaction(struct i801_priv *priv, int result = 0; unsigned char hostc; + if (read_write == I2C_SMBUS_READ && command == I2C_SMBUS_BLOCK_DATA) + data->block[0] = I2C_SMBUS_BLOCK_MAX; + else if (data->block[0] < 1 || data->block[0] > I2C_SMBUS_BLOCK_MAX) + return -EPROTO; + if (command == I2C_SMBUS_I2C_BLOCK_DATA) { if (read_write == I2C_SMBUS_WRITE) { /* set I2C_EN bit in configuration register */ @@ -810,16 +815,6 @@ static int i801_block_transaction(struct i801_priv *priv, } } - if (read_write == I2C_SMBUS_WRITE - || command == I2C_SMBUS_I2C_BLOCK_DATA) { - if (data->block[0] < 1) - data->block[0] = 1; - if (data->block[0] > I2C_SMBUS_BLOCK_MAX) - data->block[0] = I2C_SMBUS_BLOCK_MAX; - } else { - data->block[0] = 32; /* max for SMBus block reads */ - } - /* Experience has shown that the block buffer can only be used for SMBus (not I2C) block transactions, even though the datasheet doesn't mention this limitation. */ diff --git a/drivers/i2c/busses/i2c-meson.c b/drivers/i2c/busses/i2c-meson.c index ef73a42577cc7b3cedff721df31b6c94f707f31c..07eb819072c4fdf44a2a570b08aded0ae83c02e4 100644 --- a/drivers/i2c/busses/i2c-meson.c +++ b/drivers/i2c/busses/i2c-meson.c @@ -465,18 +465,18 @@ static int meson_i2c_probe(struct platform_device *pdev) */ meson_i2c_set_mask(i2c, REG_CTRL, REG_CTRL_START, 0); - ret = i2c_add_adapter(&i2c->adap); - if (ret < 0) { - clk_disable_unprepare(i2c->clk); - return ret; - } - /* Disable filtering */ meson_i2c_set_mask(i2c, REG_SLAVE_ADDR, REG_SLV_SDA_FILTER | REG_SLV_SCL_FILTER, 0); meson_i2c_set_clk_div(i2c, timings.bus_freq_hz); + ret = i2c_add_adapter(&i2c->adap); + if (ret < 0) { + clk_disable_unprepare(i2c->clk); + return ret; + } + return 0; } diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c index af349661fd7694382a6d4221677715ade8e2b3d4..8de8296d258318437441ee7e8aa4529c15516c09 100644 --- a/drivers/i2c/busses/i2c-mpc.c +++ b/drivers/i2c/busses/i2c-mpc.c @@ -105,23 +105,30 @@ static irqreturn_t mpc_i2c_isr(int irq, void *dev_id) /* Sometimes 9th clock pulse isn't generated, and slave doesn't release * the bus, because it wants to send ACK. * Following sequence of enabling/disabling and sending start/stop generates - * the 9 pulses, so it's all OK. + * the 9 pulses, each with a START then ending with STOP, so it's all OK. */ static void mpc_i2c_fixup(struct mpc_i2c *i2c) { int k; - u32 delay_val = 1000000 / i2c->real_clk + 1; - - if (delay_val < 2) - delay_val = 2; + unsigned long flags; for (k = 9; k; k--) { writeccr(i2c, 0); - writeccr(i2c, CCR_MSTA | CCR_MTX | CCR_MEN); + writeb(0, i2c->base + MPC_I2C_SR); /* clear any status bits */ + writeccr(i2c, CCR_MEN | CCR_MSTA); /* START */ + readb(i2c->base + MPC_I2C_DR); /* init xfer */ + udelay(15); /* let it hit the bus */ + local_irq_save(flags); /* should not be delayed further */ + writeccr(i2c, CCR_MEN | CCR_MSTA | CCR_RSTA); /* delay SDA */ readb(i2c->base + MPC_I2C_DR); - writeccr(i2c, CCR_MEN); - udelay(delay_val << 1); + if (k != 1) + udelay(5); + local_irq_restore(flags); } + writeccr(i2c, CCR_MEN); /* Initiate STOP */ + readb(i2c->base + MPC_I2C_DR); + udelay(15); /* Let STOP propagate */ + writeccr(i2c, 0); } static int i2c_wait(struct mpc_i2c *i2c, unsigned timeout, int writing) diff --git a/drivers/i2c/busses/i2c-nomadik.c b/drivers/i2c/busses/i2c-nomadik.c index d4b1b0865f6768b166693b4aa319981aca10ab16..a3363b20f168a458901d1eaeb1232d10169082db 100644 --- a/drivers/i2c/busses/i2c-nomadik.c +++ b/drivers/i2c/busses/i2c-nomadik.c @@ -1055,7 +1055,7 @@ static int nmk_i2c_probe(struct amba_device *adev, const struct amba_id *id) return ret; } -static int nmk_i2c_remove(struct amba_device *adev) +static void nmk_i2c_remove(struct amba_device *adev) { struct resource *res = &adev->res; struct nmk_i2c_dev *dev = amba_get_drvdata(adev); @@ -1068,8 +1068,6 @@ static int nmk_i2c_remove(struct amba_device *adev) i2c_clr_bit(dev->virtbase + I2C_CR, I2C_CR_PE); clk_disable_unprepare(dev->clk); release_mem_region(res->start, resource_size(res)); - - return 0; } static struct i2c_vendor_data vendor_stn8815 = { diff --git a/drivers/i2c/busses/i2c-pasemi.c b/drivers/i2c/busses/i2c-pasemi.c index 20f2772c0e79b75a0fbc201e10b7c0006b1e5303..2c909522f0f387e3158f452a48c57e24cd6cf98c 100644 --- a/drivers/i2c/busses/i2c-pasemi.c +++ b/drivers/i2c/busses/i2c-pasemi.c @@ -137,6 +137,12 @@ static int pasemi_i2c_xfer_msg(struct i2c_adapter *adapter, TXFIFO_WR(smbus, msg->buf[msg->len-1] | (stop ? MTXFIFO_STOP : 0)); + + if (stop) { + err = pasemi_smb_waitready(smbus); + if (err) + goto reset_out; + } } return 0; diff --git a/drivers/i2c/busses/i2c-qcom-cci.c b/drivers/i2c/busses/i2c-qcom-cci.c index 1c259b5188de8b2e2b7d6e202d088ee51c7f002c..09e599069a81d2133127c1ebba763bd84b174a4c 100644 --- a/drivers/i2c/busses/i2c-qcom-cci.c +++ b/drivers/i2c/busses/i2c-qcom-cci.c @@ -558,7 +558,7 @@ static int cci_probe(struct platform_device *pdev) cci->master[idx].adap.quirks = &cci->data->quirks; cci->master[idx].adap.algo = &cci_algo; cci->master[idx].adap.dev.parent = dev; - cci->master[idx].adap.dev.of_node = child; + cci->master[idx].adap.dev.of_node = of_node_get(child); cci->master[idx].master = idx; cci->master[idx].cci = cci; @@ -643,8 +643,10 @@ static int cci_probe(struct platform_device *pdev) continue; ret = i2c_add_adapter(&cci->master[i].adap); - if (ret < 0) + if (ret < 0) { + of_node_put(cci->master[i].adap.dev.of_node); goto error_i2c; + } } pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC); @@ -655,9 +657,11 @@ static int cci_probe(struct platform_device *pdev) return 0; error_i2c: - for (; i >= 0; i--) { - if (cci->master[i].cci) + for (--i ; i >= 0; i--) { + if (cci->master[i].cci) { i2c_del_adapter(&cci->master[i].adap); + of_node_put(cci->master[i].adap.dev.of_node); + } } error: disable_irq(cci->irq); @@ -673,8 +677,10 @@ static int cci_remove(struct platform_device *pdev) int i; for (i = 0; i < cci->data->num_masters; i++) { - if (cci->master[i].cci) + if (cci->master[i].cci) { i2c_del_adapter(&cci->master[i].adap); + of_node_put(cci->master[i].adap.dev.of_node); + } cci_halt(cci, i); } diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c index 2a8568b97c14d5452a532c6aede85b4734611b1d..8dabb6ffb1a4f088adc2b507c2ea89ad411cdbbe 100644 --- a/drivers/i2c/busses/i2c-xiic.c +++ b/drivers/i2c/busses/i2c-xiic.c @@ -756,7 +756,6 @@ static const struct i2c_adapter_quirks xiic_quirks = { static const struct i2c_adapter xiic_adapter = { .owner = THIS_MODULE, - .name = DRIVER_NAME, .class = I2C_CLASS_DEPRECATED, .algo = &xiic_algorithm, .quirks = &xiic_quirks, @@ -793,6 +792,8 @@ static int xiic_i2c_probe(struct platform_device *pdev) i2c_set_adapdata(&i2c->adap, i2c); i2c->adap.dev.parent = &pdev->dev; i2c->adap.dev.of_node = pdev->dev.of_node; + snprintf(i2c->adap.name, sizeof(i2c->adap.name), + DRIVER_NAME " %s", pdev->name); mutex_init(&i2c->lock); init_waitqueue_head(&i2c->wait); diff --git a/drivers/i2c/muxes/i2c-demux-pinctrl.c b/drivers/i2c/muxes/i2c-demux-pinctrl.c index 5365199a31f419b9db442ba3fa555e3a3ed044fc..f7a7405d4350a10718b7948d546ca31fc11dabdb 100644 --- a/drivers/i2c/muxes/i2c-demux-pinctrl.c +++ b/drivers/i2c/muxes/i2c-demux-pinctrl.c @@ -261,7 +261,7 @@ static int i2c_demux_pinctrl_probe(struct platform_device *pdev) err = device_create_file(&pdev->dev, &dev_attr_available_masters); if (err) - goto err_rollback; + goto err_rollback_activation; err = device_create_file(&pdev->dev, &dev_attr_current_master); if (err) @@ -271,8 +271,9 @@ static int i2c_demux_pinctrl_probe(struct platform_device *pdev) err_rollback_available: device_remove_file(&pdev->dev, &dev_attr_available_masters); -err_rollback: +err_rollback_activation: i2c_demux_deactivate_master(priv); +err_rollback: for (j = 0; j < i; j++) { of_node_put(priv->chan[j].parent_np); of_changeset_destroy(&priv->chan[j].chgset); diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index d79335506ecd3c3f5aa3beabde8e0dd74cef10a3..47551ab73ca8a03ab60448adb8fe0d8cee9aed5d 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -37,7 +37,7 @@ */ /* un-comment DEBUG to enable pr_debug() statements */ -#define DEBUG +/* #define DEBUG */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt @@ -64,11 +64,17 @@ static struct cpuidle_driver intel_idle_driver = { /* intel_idle.max_cstate=0 disables driver */ static int max_cstate = CPUIDLE_STATE_MAX - 1; static unsigned int disabled_states_mask; +static unsigned int preferred_states_mask; static struct cpuidle_device __percpu *intel_idle_cpuidle_devices; static unsigned long auto_demotion_disable_flags; -static bool disable_promotion_to_c1e; + +static enum { + C1E_PROMOTION_PRESERVE, + C1E_PROMOTION_ENABLE, + C1E_PROMOTION_DISABLE +} c1e_promotion = C1E_PROMOTION_PRESERVE; struct idle_cpu { struct cpuidle_state *state_table; @@ -88,6 +94,12 @@ static struct cpuidle_state *cpuidle_state_table __initdata; static unsigned int mwait_substates __initdata; +/* + * Enable interrupts before entering the C-state. On some platforms and for + * some C-states, this may measurably decrease interrupt latency. + */ +#define CPUIDLE_FLAG_IRQ_ENABLE BIT(14) + /* * Enable this state by default even if the ACPI _CST does not list it. */ @@ -115,9 +127,6 @@ static unsigned int mwait_substates __initdata; * If the local APIC timer is not known to be reliable in the target idle state, * enable one-shot tick broadcasting for the target CPU before executing MWAIT. * - * Optionally call leave_mm() for the target CPU upfront to avoid wakeups due to - * flushing user TLBs. - * * Must be called under local_irq_disable(). */ static __cpuidle int intel_idle(struct cpuidle_device *dev, @@ -127,6 +136,9 @@ static __cpuidle int intel_idle(struct cpuidle_device *dev, unsigned long eax = flg2MWAIT(state->flags); unsigned long ecx = 1; /* break on interrupt flag */ + if (state->flags & CPUIDLE_FLAG_IRQ_ENABLE) + local_irq_enable(); + mwait_idle_with_hints(eax, ecx); return index; @@ -698,7 +710,7 @@ static struct cpuidle_state skx_cstates[] __initdata = { { .name = "C1", .desc = "MWAIT 0x00", - .flags = MWAIT2flg(0x00), + .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_IRQ_ENABLE, .exit_latency = 2, .target_residency = 2, .enter = &intel_idle, @@ -727,7 +739,7 @@ static struct cpuidle_state icx_cstates[] __initdata = { { .name = "C1", .desc = "MWAIT 0x00", - .flags = MWAIT2flg(0x00), + .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_IRQ_ENABLE, .exit_latency = 1, .target_residency = 1, .enter = &intel_idle, @@ -744,8 +756,48 @@ static struct cpuidle_state icx_cstates[] __initdata = { .name = "C6", .desc = "MWAIT 0x20", .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, - .exit_latency = 128, - .target_residency = 384, + .exit_latency = 170, + .target_residency = 600, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { + .enter = NULL } +}; + +/* + * On Sapphire Rapids Xeon C1 has to be disabled if C1E is enabled, and vice + * versa. On SPR C1E is enabled only if "C1E promotion" bit is set in + * MSR_IA32_POWER_CTL. But in this case there effectively no C1, because C1 + * requests are promoted to C1E. If the "C1E promotion" bit is cleared, then + * both C1 and C1E requests end up with C1, so there is effectively no C1E. + * + * By default we enable C1 and disable C1E by marking it with + * 'CPUIDLE_FLAG_UNUSABLE'. + */ +static struct cpuidle_state spr_cstates[] __initdata = { + { + .name = "C1", + .desc = "MWAIT 0x00", + .flags = MWAIT2flg(0x00), + .exit_latency = 1, + .target_residency = 1, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { + .name = "C1E", + .desc = "MWAIT 0x01", + .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE | + CPUIDLE_FLAG_UNUSABLE, + .exit_latency = 2, + .target_residency = 4, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { + .name = "C6", + .desc = "MWAIT 0x20", + .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 290, + .target_residency = 800, .enter = &intel_idle, .enter_s2idle = intel_idle_s2idle, }, { @@ -963,6 +1015,39 @@ static struct cpuidle_state dnv_cstates[] __initdata = { .enter = NULL } }; +/* + * Note, depending on HW and FW revision, SnowRidge SoC may or may not support + * C6, and this is indicated in the CPUID mwait leaf. + */ +static struct cpuidle_state snr_cstates[] __initdata = { + { + .name = "C1", + .desc = "MWAIT 0x00", + .flags = MWAIT2flg(0x00), + .exit_latency = 2, + .target_residency = 2, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { + .name = "C1E", + .desc = "MWAIT 0x01", + .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, + .exit_latency = 15, + .target_residency = 25, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { + .name = "C6", + .desc = "MWAIT 0x20", + .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 130, + .target_residency = 500, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { + .enter = NULL } +}; + static const struct idle_cpu idle_cpu_nehalem __initconst = { .state_table = nehalem_cstates, .auto_demotion_disable_flags = NHM_C1_AUTO_DEMOTE | NHM_C3_AUTO_DEMOTE, @@ -1062,6 +1147,12 @@ static const struct idle_cpu idle_cpu_icx __initconst = { .use_acpi = true, }; +static const struct idle_cpu idle_cpu_spr __initconst = { + .state_table = spr_cstates, + .disable_promotion_to_c1e = true, + .use_acpi = true, +}; + static const struct idle_cpu idle_cpu_avn __initconst = { .state_table = avn_cstates, .disable_promotion_to_c1e = true, @@ -1084,6 +1175,12 @@ static const struct idle_cpu idle_cpu_dnv __initconst = { .use_acpi = true, }; +static const struct idle_cpu idle_cpu_snr __initconst = { + .state_table = snr_cstates, + .disable_promotion_to_c1e = true, + .use_acpi = true, +}; + static const struct x86_cpu_id intel_idle_ids[] __initconst = { X86_MATCH_INTEL_FAM6_MODEL(NEHALEM_EP, &idle_cpu_nhx), X86_MATCH_INTEL_FAM6_MODEL(NEHALEM, &idle_cpu_nehalem), @@ -1117,12 +1214,14 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = { X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE, &idle_cpu_skl), X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, &idle_cpu_skx), X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, &idle_cpu_icx), + X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, &idle_cpu_icx), + X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &idle_cpu_spr), X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNL, &idle_cpu_knl), X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNM, &idle_cpu_knl), X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT, &idle_cpu_bxt), X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_PLUS, &idle_cpu_bxt), X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_D, &idle_cpu_dnv), - X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D, &idle_cpu_dnv), + X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D, &idle_cpu_snr), {} }; @@ -1444,6 +1543,68 @@ static void __init sklh_idle_state_table_update(void) skl_cstates[6].flags |= CPUIDLE_FLAG_UNUSABLE; /* C9-SKL */ } +/** + * skx_idle_state_table_update - Adjust the Sky Lake/Cascade Lake + * idle states table. + */ +static void __init skx_idle_state_table_update(void) +{ + unsigned long long msr; + + rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr); + + /* + * 000b: C0/C1 (no package C-state support) + * 001b: C2 + * 010b: C6 (non-retention) + * 011b: C6 (retention) + * 111b: No Package C state limits. + */ + if ((msr & 0x7) < 2) { + /* + * Uses the CC6 + PC0 latency and 3 times of + * latency for target_residency if the PC6 + * is disabled in BIOS. This is consistent + * with how intel_idle driver uses _CST + * to set the target_residency. + */ + skx_cstates[2].exit_latency = 92; + skx_cstates[2].target_residency = 276; + } +} + +/** + * spr_idle_state_table_update - Adjust Sapphire Rapids idle states table. + */ +static void __init spr_idle_state_table_update(void) +{ + unsigned long long msr; + + /* Check if user prefers C1E over C1. */ + if ((preferred_states_mask & BIT(2)) && + !(preferred_states_mask & BIT(1))) { + /* Disable C1 and enable C1E. */ + spr_cstates[0].flags |= CPUIDLE_FLAG_UNUSABLE; + spr_cstates[1].flags &= ~CPUIDLE_FLAG_UNUSABLE; + + /* Enable C1E using the "C1E promotion" bit. */ + c1e_promotion = C1E_PROMOTION_ENABLE; + } + + /* + * By default, the C6 state assumes the worst-case scenario of package + * C6. However, if PC6 is disabled, we update the numbers to match + * core C6. + */ + rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr); + + /* Limit value 2 and above allow for PC6. */ + if ((msr & 0x7) < 2) { + spr_cstates[2].exit_latency = 190; + spr_cstates[2].target_residency = 600; + } +} + static bool __init intel_idle_verify_cstate(unsigned int mwait_hint) { unsigned int mwait_cstate = MWAIT_HINT2CSTATE(mwait_hint) + 1; @@ -1475,6 +1636,12 @@ static void __init intel_idle_init_cstates_icpu(struct cpuidle_driver *drv) case INTEL_FAM6_SKYLAKE: sklh_idle_state_table_update(); break; + case INTEL_FAM6_SKYLAKE_X: + skx_idle_state_table_update(); + break; + case INTEL_FAM6_SAPPHIRERAPIDS_X: + spr_idle_state_table_update(); + break; } for (cstate = 0; cstate < CPUIDLE_STATE_MAX; ++cstate) { @@ -1547,6 +1714,15 @@ static void auto_demotion_disable(void) wrmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr_bits); } +static void c1e_promotion_enable(void) +{ + unsigned long long msr_bits; + + rdmsrl(MSR_IA32_POWER_CTL, msr_bits); + msr_bits |= 0x2; + wrmsrl(MSR_IA32_POWER_CTL, msr_bits); +} + static void c1e_promotion_disable(void) { unsigned long long msr_bits; @@ -1578,7 +1754,9 @@ static int intel_idle_cpu_init(unsigned int cpu) if (auto_demotion_disable_flags) auto_demotion_disable(); - if (disable_promotion_to_c1e) + if (c1e_promotion == C1E_PROMOTION_ENABLE) + c1e_promotion_enable(); + else if (c1e_promotion == C1E_PROMOTION_DISABLE) c1e_promotion_disable(); return 0; @@ -1657,7 +1835,8 @@ static int __init intel_idle_init(void) if (icpu) { cpuidle_state_table = icpu->state_table; auto_demotion_disable_flags = icpu->auto_demotion_disable_flags; - disable_promotion_to_c1e = icpu->disable_promotion_to_c1e; + if (icpu->disable_promotion_to_c1e) + c1e_promotion = C1E_PROMOTION_DISABLE; if (icpu->use_acpi || force_use_acpi) intel_idle_acpi_cst_extract(); } else if (!intel_idle_acpi_cst_extract()) { @@ -1716,3 +1895,14 @@ module_param(max_cstate, int, 0444); */ module_param_named(states_off, disabled_states_mask, uint, 0444); MODULE_PARM_DESC(states_off, "Mask of disabled idle states"); +/* + * Some platforms come with mutually exclusive C-states, so that if one is + * enabled, the other C-states must not be used. Example: C1 and C1E on + * Sapphire Rapids platform. This parameter allows for selecting the + * preferred C-states among the groups of mutually exclusive C-states - the + * selected C-states will be registered, the other C-states from the mutually + * exclusive group won't be registered. If the platform has no mutually + * exclusive C-states, this parameter has no effect. + */ +module_param_named(preferred_cstates, preferred_states_mask, uint, 0444); +MODULE_PARM_DESC(preferred_cstates, "Mask of preferred idle states"); diff --git a/drivers/iio/accel/bmc150-accel-core.c b/drivers/iio/accel/bmc150-accel-core.c index 48435865fdaf34387b563b9ce025e00fddedcc0b..792526462f1c9c4275edda27526cc7eb555e97a0 100644 --- a/drivers/iio/accel/bmc150-accel-core.c +++ b/drivers/iio/accel/bmc150-accel-core.c @@ -1648,11 +1648,14 @@ int bmc150_accel_core_probe(struct device *dev, struct regmap *regmap, int irq, ret = iio_device_register(indio_dev); if (ret < 0) { dev_err(dev, "Unable to register iio device\n"); - goto err_trigger_unregister; + goto err_pm_cleanup; } return 0; +err_pm_cleanup: + pm_runtime_dont_use_autosuspend(dev); + pm_runtime_disable(dev); err_trigger_unregister: bmc150_accel_unregister_triggers(data, BMC150_ACCEL_TRIGGERS - 1); err_buffer_cleanup: diff --git a/drivers/iio/accel/kxcjk-1013.c b/drivers/iio/accel/kxcjk-1013.c index 2eaf85b6e39f4c399ea971451649042fd82c6712..89e0a89d95d6bdadc34dcda832345ae95e4c89d8 100644 --- a/drivers/iio/accel/kxcjk-1013.c +++ b/drivers/iio/accel/kxcjk-1013.c @@ -1429,11 +1429,14 @@ static int kxcjk1013_probe(struct i2c_client *client, ret = iio_device_register(indio_dev); if (ret < 0) { dev_err(&client->dev, "unable to register iio device\n"); - goto err_buffer_cleanup; + goto err_pm_cleanup; } return 0; +err_pm_cleanup: + pm_runtime_dont_use_autosuspend(&client->dev); + pm_runtime_disable(&client->dev); err_buffer_cleanup: iio_triggered_buffer_cleanup(indio_dev); err_trigger_unregister: diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c index a7208704d31c90d15b7e40bff45e43ab7dd783ec..e7e280282774089830e0f922d781f8ba3f31d7d1 100644 --- a/drivers/iio/accel/mma8452.c +++ b/drivers/iio/accel/mma8452.c @@ -176,6 +176,7 @@ static const struct mma8452_event_regs trans_ev_regs = { * @enabled_events: event flags enabled and handled by this driver */ struct mma_chip_info { + const char *name; u8 chip_id; const struct iio_chan_spec *channels; int num_channels; @@ -1303,6 +1304,7 @@ enum { static const struct mma_chip_info mma_chip_info_table[] = { [mma8451] = { + .name = "mma8451", .chip_id = MMA8451_DEVICE_ID, .channels = mma8451_channels, .num_channels = ARRAY_SIZE(mma8451_channels), @@ -1327,6 +1329,7 @@ static const struct mma_chip_info mma_chip_info_table[] = { MMA8452_INT_FF_MT, }, [mma8452] = { + .name = "mma8452", .chip_id = MMA8452_DEVICE_ID, .channels = mma8452_channels, .num_channels = ARRAY_SIZE(mma8452_channels), @@ -1343,6 +1346,7 @@ static const struct mma_chip_info mma_chip_info_table[] = { MMA8452_INT_FF_MT, }, [mma8453] = { + .name = "mma8453", .chip_id = MMA8453_DEVICE_ID, .channels = mma8453_channels, .num_channels = ARRAY_SIZE(mma8453_channels), @@ -1359,6 +1363,7 @@ static const struct mma_chip_info mma_chip_info_table[] = { MMA8452_INT_FF_MT, }, [mma8652] = { + .name = "mma8652", .chip_id = MMA8652_DEVICE_ID, .channels = mma8652_channels, .num_channels = ARRAY_SIZE(mma8652_channels), @@ -1368,6 +1373,7 @@ static const struct mma_chip_info mma_chip_info_table[] = { .enabled_events = MMA8452_INT_FF_MT, }, [mma8653] = { + .name = "mma8653", .chip_id = MMA8653_DEVICE_ID, .channels = mma8653_channels, .num_channels = ARRAY_SIZE(mma8653_channels), @@ -1382,6 +1388,7 @@ static const struct mma_chip_info mma_chip_info_table[] = { .enabled_events = MMA8452_INT_FF_MT, }, [fxls8471] = { + .name = "fxls8471", .chip_id = FXLS8471_DEVICE_ID, .channels = mma8451_channels, .num_channels = ARRAY_SIZE(mma8451_channels), @@ -1525,13 +1532,6 @@ static int mma8452_probe(struct i2c_client *client, struct mma8452_data *data; struct iio_dev *indio_dev; int ret; - const struct of_device_id *match; - - match = of_match_device(mma8452_dt_ids, &client->dev); - if (!match) { - dev_err(&client->dev, "unknown device model\n"); - return -ENODEV; - } indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data)); if (!indio_dev) @@ -1540,7 +1540,14 @@ static int mma8452_probe(struct i2c_client *client, data = iio_priv(indio_dev); data->client = client; mutex_init(&data->lock); - data->chip_info = match->data; + + data->chip_info = device_get_match_data(&client->dev); + if (!data->chip_info && id) { + data->chip_info = &mma_chip_info_table[id->driver_data]; + } else { + dev_err(&client->dev, "unknown device model\n"); + return -ENODEV; + } data->vdd_reg = devm_regulator_get(&client->dev, "vdd"); if (IS_ERR(data->vdd_reg)) @@ -1584,11 +1591,11 @@ static int mma8452_probe(struct i2c_client *client, } dev_info(&client->dev, "registering %s accelerometer; ID 0x%x\n", - match->compatible, data->chip_info->chip_id); + data->chip_info->name, data->chip_info->chip_id); i2c_set_clientdata(client, indio_dev); indio_dev->info = &mma8452_info; - indio_dev->name = id->name; + indio_dev->name = data->chip_info->name; indio_dev->modes = INDIO_DIRECT_MODE; indio_dev->channels = data->chip_info->channels; indio_dev->num_channels = data->chip_info->num_channels; @@ -1814,7 +1821,7 @@ MODULE_DEVICE_TABLE(i2c, mma8452_id); static struct i2c_driver mma8452_driver = { .driver = { .name = "mma8452", - .of_match_table = of_match_ptr(mma8452_dt_ids), + .of_match_table = mma8452_dt_ids, .pm = &mma8452_pm_ops, }, .probe = mma8452_probe, diff --git a/drivers/iio/accel/mma9551.c b/drivers/iio/accel/mma9551.c index 08a2303cc9df3c08022bb32730faa41c01f49816..26421e8e8263958b1585d0b8d176a56bca0c4951 100644 --- a/drivers/iio/accel/mma9551.c +++ b/drivers/iio/accel/mma9551.c @@ -495,11 +495,14 @@ static int mma9551_probe(struct i2c_client *client, ret = iio_device_register(indio_dev); if (ret < 0) { dev_err(&client->dev, "unable to register iio device\n"); - goto out_poweroff; + goto err_pm_cleanup; } return 0; +err_pm_cleanup: + pm_runtime_dont_use_autosuspend(&client->dev); + pm_runtime_disable(&client->dev); out_poweroff: mma9551_set_device_state(client, false); diff --git a/drivers/iio/accel/mma9553.c b/drivers/iio/accel/mma9553.c index c15908faa38167b0d6ff68d53ceca8de0ed2d403..a23a7685d1f93c893da306c14760eeea03966e94 100644 --- a/drivers/iio/accel/mma9553.c +++ b/drivers/iio/accel/mma9553.c @@ -1134,12 +1134,15 @@ static int mma9553_probe(struct i2c_client *client, ret = iio_device_register(indio_dev); if (ret < 0) { dev_err(&client->dev, "unable to register iio device\n"); - goto out_poweroff; + goto err_pm_cleanup; } dev_dbg(&indio_dev->dev, "Registered device %s\n", name); return 0; +err_pm_cleanup: + pm_runtime_dont_use_autosuspend(&client->dev); + pm_runtime_disable(&client->dev); out_poweroff: mma9551_set_device_state(client, false); return ret; diff --git a/drivers/iio/adc/ad7124.c b/drivers/iio/adc/ad7124.c index 9c2401c5848ece566c7bc8944d2699c7f4cd20fa..bd350099503765cc2213470bbb137057be211924 100644 --- a/drivers/iio/adc/ad7124.c +++ b/drivers/iio/adc/ad7124.c @@ -74,7 +74,7 @@ #define AD7124_CONFIG_REF_SEL(x) FIELD_PREP(AD7124_CONFIG_REF_SEL_MSK, x) #define AD7124_CONFIG_PGA_MSK GENMASK(2, 0) #define AD7124_CONFIG_PGA(x) FIELD_PREP(AD7124_CONFIG_PGA_MSK, x) -#define AD7124_CONFIG_IN_BUFF_MSK GENMASK(7, 6) +#define AD7124_CONFIG_IN_BUFF_MSK GENMASK(6, 5) #define AD7124_CONFIG_IN_BUFF(x) FIELD_PREP(AD7124_CONFIG_IN_BUFF_MSK, x) /* AD7124_FILTER_X */ diff --git a/drivers/iio/adc/men_z188_adc.c b/drivers/iio/adc/men_z188_adc.c index 42ea8bc7e78051c16bc8f1dcd7546393c99153d5..adc5ceaef8c93a373daef26dbaded4d4be1904ca 100644 --- a/drivers/iio/adc/men_z188_adc.c +++ b/drivers/iio/adc/men_z188_adc.c @@ -103,6 +103,7 @@ static int men_z188_probe(struct mcb_device *dev, struct z188_adc *adc; struct iio_dev *indio_dev; struct resource *mem; + int ret; indio_dev = devm_iio_device_alloc(&dev->dev, sizeof(struct z188_adc)); if (!indio_dev) @@ -128,8 +129,14 @@ static int men_z188_probe(struct mcb_device *dev, adc->mem = mem; mcb_set_drvdata(dev, indio_dev); - return iio_device_register(indio_dev); + ret = iio_device_register(indio_dev); + if (ret) + goto err_unmap; + + return 0; +err_unmap: + iounmap(adc->base); err: mcb_release_mem(mem); return -ENXIO; diff --git a/drivers/iio/adc/ti-adc081c.c b/drivers/iio/adc/ti-adc081c.c index b64718daa2017c91707cc30763ba85d692616e68..c79cd88cd42314a9a4ed658c382524e2df57f849 100644 --- a/drivers/iio/adc/ti-adc081c.c +++ b/drivers/iio/adc/ti-adc081c.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include @@ -151,13 +152,16 @@ static int adc081c_probe(struct i2c_client *client, { struct iio_dev *iio; struct adc081c *adc; - struct adcxx1c_model *model; + const struct adcxx1c_model *model; int err; if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WORD_DATA)) return -EOPNOTSUPP; - model = &adcxx1c_models[id->driver_data]; + if (dev_fwnode(&client->dev)) + model = device_get_match_data(&client->dev); + else + model = &adcxx1c_models[id->driver_data]; iio = devm_iio_device_alloc(&client->dev, sizeof(*adc)); if (!iio) @@ -224,10 +228,17 @@ static const struct i2c_device_id adc081c_id[] = { }; MODULE_DEVICE_TABLE(i2c, adc081c_id); +static const struct acpi_device_id adc081c_acpi_match[] = { + /* Used on some AAEON boards */ + { "ADC081C", (kernel_ulong_t)&adcxx1c_models[ADC081C] }, + { } +}; +MODULE_DEVICE_TABLE(acpi, adc081c_acpi_match); + static const struct of_device_id adc081c_of_match[] = { - { .compatible = "ti,adc081c" }, - { .compatible = "ti,adc101c" }, - { .compatible = "ti,adc121c" }, + { .compatible = "ti,adc081c", .data = &adcxx1c_models[ADC081C] }, + { .compatible = "ti,adc101c", .data = &adcxx1c_models[ADC101C] }, + { .compatible = "ti,adc121c", .data = &adcxx1c_models[ADC121C] }, { } }; MODULE_DEVICE_TABLE(of, adc081c_of_match); @@ -236,6 +247,7 @@ static struct i2c_driver adc081c_driver = { .driver = { .name = "adc081c", .of_match_table = adc081c_of_match, + .acpi_match_table = adc081c_acpi_match, }, .probe = adc081c_probe, .remove = adc081c_remove, diff --git a/drivers/iio/adc/twl6030-gpadc.c b/drivers/iio/adc/twl6030-gpadc.c index c6416ad795ca48ae2a24cd02c8ecd5e8d4fb74e3..256177b15c511de57c9e93b31a9e690d09bb7780 100644 --- a/drivers/iio/adc/twl6030-gpadc.c +++ b/drivers/iio/adc/twl6030-gpadc.c @@ -911,6 +911,8 @@ static int twl6030_gpadc_probe(struct platform_device *pdev) ret = devm_request_threaded_irq(dev, irq, NULL, twl6030_gpadc_irq_handler, IRQF_ONESHOT, "twl6030_gpadc", indio_dev); + if (ret) + return ret; ret = twl6030_gpadc_enable_irq(TWL6030_GPADC_RT_SW1_EOC_MASK); if (ret < 0) { diff --git a/drivers/iio/afe/iio-rescale.c b/drivers/iio/afe/iio-rescale.c index e42ea2b1707db9eb4628387a1db3a2b6fbbdebc7..3809f98894a515fd100167dc5c865bd7928a8bd5 100644 --- a/drivers/iio/afe/iio-rescale.c +++ b/drivers/iio/afe/iio-rescale.c @@ -38,7 +38,7 @@ static int rescale_read_raw(struct iio_dev *indio_dev, int *val, int *val2, long mask) { struct rescale *rescale = iio_priv(indio_dev); - unsigned long long tmp; + s64 tmp; int ret; switch (mask) { @@ -59,10 +59,10 @@ static int rescale_read_raw(struct iio_dev *indio_dev, *val2 = rescale->denominator; return IIO_VAL_FRACTIONAL; case IIO_VAL_FRACTIONAL_LOG2: - tmp = *val * 1000000000LL; - do_div(tmp, rescale->denominator); + tmp = (s64)*val * 1000000000LL; + tmp = div_s64(tmp, rescale->denominator); tmp *= rescale->numerator; - do_div(tmp, 1000000000LL); + tmp = div_s64(tmp, 1000000000LL); *val = tmp; return ret; default: diff --git a/drivers/iio/gyro/bmg160_core.c b/drivers/iio/gyro/bmg160_core.c index 39fe0b1785920c5150005a7af38a702087cdc564..b6b90eebec0b99b290490ae5b7f46c713f06fac9 100644 --- a/drivers/iio/gyro/bmg160_core.c +++ b/drivers/iio/gyro/bmg160_core.c @@ -1170,11 +1170,14 @@ int bmg160_core_probe(struct device *dev, struct regmap *regmap, int irq, ret = iio_device_register(indio_dev); if (ret < 0) { dev_err(dev, "unable to register iio device\n"); - goto err_buffer_cleanup; + goto err_pm_cleanup; } return 0; +err_pm_cleanup: + pm_runtime_dont_use_autosuspend(dev); + pm_runtime_disable(dev); err_buffer_cleanup: iio_triggered_buffer_cleanup(indio_dev); err_trigger_unregister: diff --git a/drivers/iio/imu/kmx61.c b/drivers/iio/imu/kmx61.c index 61885e99d3fc14db2655e7ac955b1adb4dd7a618..89133315e6aaf07bf509916a857233e04275acc9 100644 --- a/drivers/iio/imu/kmx61.c +++ b/drivers/iio/imu/kmx61.c @@ -1392,7 +1392,7 @@ static int kmx61_probe(struct i2c_client *client, ret = iio_device_register(data->acc_indio_dev); if (ret < 0) { dev_err(&client->dev, "Failed to register acc iio device\n"); - goto err_buffer_cleanup_mag; + goto err_pm_cleanup; } ret = iio_device_register(data->mag_indio_dev); @@ -1405,6 +1405,9 @@ static int kmx61_probe(struct i2c_client *client, err_iio_unregister_acc: iio_device_unregister(data->acc_indio_dev); +err_pm_cleanup: + pm_runtime_dont_use_autosuspend(&client->dev); + pm_runtime_disable(&client->dev); err_buffer_cleanup_mag: if (client->irq > 0) iio_triggered_buffer_cleanup(data->mag_indio_dev); diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c index 558ca3843bb95fe858932ebf363b1ad4c6a915a0..2c528425b03b4c0583c156a12cab7745dbae4d72 100644 --- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c +++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c @@ -1558,8 +1558,12 @@ static int st_lsm6dsx_read_oneshot(struct st_lsm6dsx_sensor *sensor, if (err < 0) return err; + /* + * we need to wait for sensor settling time before + * reading data in order to avoid corrupted samples + */ delay = 1000000000 / sensor->odr; - usleep_range(delay, 2 * delay); + usleep_range(3 * delay, 4 * delay); err = st_lsm6dsx_read_locked(hw, addr, &data, sizeof(data)); if (err < 0) diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c index ede99e0d53714d4d7d9dce825c8551598d3d7f5a..8c3faa7972842db38a55531c18cf626bfb4119fa 100644 --- a/drivers/iio/inkern.c +++ b/drivers/iio/inkern.c @@ -561,28 +561,50 @@ EXPORT_SYMBOL_GPL(iio_read_channel_average_raw); static int iio_convert_raw_to_processed_unlocked(struct iio_channel *chan, int raw, int *processed, unsigned int scale) { - int scale_type, scale_val, scale_val2, offset; + int scale_type, scale_val, scale_val2; + int offset_type, offset_val, offset_val2; s64 raw64 = raw; - int ret; - ret = iio_channel_read(chan, &offset, NULL, IIO_CHAN_INFO_OFFSET); - if (ret >= 0) - raw64 += offset; + offset_type = iio_channel_read(chan, &offset_val, &offset_val2, + IIO_CHAN_INFO_OFFSET); + if (offset_type >= 0) { + switch (offset_type) { + case IIO_VAL_INT: + break; + case IIO_VAL_INT_PLUS_MICRO: + case IIO_VAL_INT_PLUS_NANO: + /* + * Both IIO_VAL_INT_PLUS_MICRO and IIO_VAL_INT_PLUS_NANO + * implicitely truncate the offset to it's integer form. + */ + break; + case IIO_VAL_FRACTIONAL: + offset_val /= offset_val2; + break; + case IIO_VAL_FRACTIONAL_LOG2: + offset_val >>= offset_val2; + break; + default: + return -EINVAL; + } + + raw64 += offset_val; + } scale_type = iio_channel_read(chan, &scale_val, &scale_val2, IIO_CHAN_INFO_SCALE); if (scale_type < 0) { /* - * Just pass raw values as processed if no scaling is - * available. + * If no channel scaling is available apply consumer scale to + * raw value and return. */ - *processed = raw; + *processed = raw * scale; return 0; } switch (scale_type) { case IIO_VAL_INT: - *processed = raw64 * scale_val; + *processed = raw64 * scale_val * scale; break; case IIO_VAL_INT_PLUS_MICRO: if (scale_val2 < 0) diff --git a/drivers/iio/magnetometer/bmc150_magn.c b/drivers/iio/magnetometer/bmc150_magn.c index 8eacfaf584cfd0690fb4020a6ff74cca8bb490eb..620537d0104d4b756fcaa17f8e1c93bb390ca444 100644 --- a/drivers/iio/magnetometer/bmc150_magn.c +++ b/drivers/iio/magnetometer/bmc150_magn.c @@ -941,13 +941,14 @@ int bmc150_magn_probe(struct device *dev, struct regmap *regmap, ret = iio_device_register(indio_dev); if (ret < 0) { dev_err(dev, "unable to register iio device\n"); - goto err_disable_runtime_pm; + goto err_pm_cleanup; } dev_dbg(dev, "Registered device %s\n", name); return 0; -err_disable_runtime_pm: +err_pm_cleanup: + pm_runtime_dont_use_autosuspend(dev); pm_runtime_disable(dev); err_buffer_cleanup: iio_triggered_buffer_cleanup(indio_dev); diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 8e54184566f7fe5bb7c389bd8907c446aac23ac3..3c40aa50cd60c17064c4187b1a92fe307c0ebd1c 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -68,8 +68,8 @@ static const char * const cma_events[] = { [RDMA_CM_EVENT_TIMEWAIT_EXIT] = "timewait exit", }; -static void cma_set_mgid(struct rdma_id_private *id_priv, struct sockaddr *addr, - union ib_gid *mgid); +static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid, + enum ib_gid_type gid_type); const char *__attribute_const__ rdma_event_msg(enum rdma_cm_event_type event) { @@ -775,6 +775,7 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv) unsigned int p; u16 pkey, index; enum ib_port_state port_state; + int ret; int i; cma_dev = NULL; @@ -793,9 +794,14 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv) if (ib_get_cached_port_state(cur_dev->device, p, &port_state)) continue; - for (i = 0; !rdma_query_gid(cur_dev->device, - p, i, &gid); - i++) { + + for (i = 0; i < cur_dev->device->port_data[p].immutable.gid_tbl_len; + ++i) { + ret = rdma_query_gid(cur_dev->device, p, i, + &gid); + if (ret) + continue; + if (!memcmp(&gid, dgid, sizeof(gid))) { cma_dev = cur_dev; sgid = gid; @@ -1834,17 +1840,19 @@ static void destroy_mc(struct rdma_id_private *id_priv, if (dev_addr->bound_dev_if) ndev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if); - if (ndev) { + if (ndev && !send_only) { + enum ib_gid_type gid_type; union ib_gid mgid; - cma_set_mgid(id_priv, (struct sockaddr *)&mc->addr, - &mgid); - - if (!send_only) - cma_igmp_send(ndev, &mgid, false); - - dev_put(ndev); + gid_type = id_priv->cma_dev->default_gid_type + [id_priv->id.port_num - + rdma_start_port( + id_priv->cma_dev->device)]; + cma_iboe_set_mgid((struct sockaddr *)&mc->addr, &mgid, + gid_type); + cma_igmp_send(ndev, &mgid, false); } + dev_put(ndev); cancel_work_sync(&mc->iboe_join.work); } @@ -2627,7 +2635,7 @@ int rdma_set_ack_timeout(struct rdma_cm_id *id, u8 timeout) { struct rdma_id_private *id_priv; - if (id->qp_type != IB_QPT_RC) + if (id->qp_type != IB_QPT_RC && id->qp_type != IB_QPT_XRC_INI) return -EINVAL; id_priv = container_of(id, struct rdma_id_private, id); @@ -3313,22 +3321,30 @@ static int cma_resolve_ib_addr(struct rdma_id_private *id_priv) static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr, const struct sockaddr *dst_addr) { - if (!src_addr || !src_addr->sa_family) { - src_addr = (struct sockaddr *) &id->route.addr.src_addr; - src_addr->sa_family = dst_addr->sa_family; - if (IS_ENABLED(CONFIG_IPV6) && - dst_addr->sa_family == AF_INET6) { - struct sockaddr_in6 *src_addr6 = (struct sockaddr_in6 *) src_addr; - struct sockaddr_in6 *dst_addr6 = (struct sockaddr_in6 *) dst_addr; - src_addr6->sin6_scope_id = dst_addr6->sin6_scope_id; - if (ipv6_addr_type(&dst_addr6->sin6_addr) & IPV6_ADDR_LINKLOCAL) - id->route.addr.dev_addr.bound_dev_if = dst_addr6->sin6_scope_id; - } else if (dst_addr->sa_family == AF_IB) { - ((struct sockaddr_ib *) src_addr)->sib_pkey = - ((struct sockaddr_ib *) dst_addr)->sib_pkey; - } - } - return rdma_bind_addr(id, src_addr); + struct sockaddr_storage zero_sock = {}; + + if (src_addr && src_addr->sa_family) + return rdma_bind_addr(id, src_addr); + + /* + * When the src_addr is not specified, automatically supply an any addr + */ + zero_sock.ss_family = dst_addr->sa_family; + if (IS_ENABLED(CONFIG_IPV6) && dst_addr->sa_family == AF_INET6) { + struct sockaddr_in6 *src_addr6 = + (struct sockaddr_in6 *)&zero_sock; + struct sockaddr_in6 *dst_addr6 = + (struct sockaddr_in6 *)dst_addr; + + src_addr6->sin6_scope_id = dst_addr6->sin6_scope_id; + if (ipv6_addr_type(&dst_addr6->sin6_addr) & IPV6_ADDR_LINKLOCAL) + id->route.addr.dev_addr.bound_dev_if = + dst_addr6->sin6_scope_id; + } else if (dst_addr->sa_family == AF_IB) { + ((struct sockaddr_ib *)&zero_sock)->sib_pkey = + ((struct sockaddr_ib *)dst_addr)->sib_pkey; + } + return rdma_bind_addr(id, (struct sockaddr *)&zero_sock); } /* diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index 76b9c436edcd2f1ffaf7175d09133a26b0b93141..aa526c5ca0cf3789892cf82310e9708d810c1898 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c @@ -2411,7 +2411,8 @@ int ib_find_gid(struct ib_device *device, union ib_gid *gid, ++i) { ret = rdma_query_gid(device, port, i, &tmp_gid); if (ret) - return ret; + continue; + if (!memcmp(&tmp_gid, gid, sizeof *gid)) { *port_num = port; if (index) diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c index 2cc785c1970b4cce98f669b10be85b9470290dba..d12018c4c86e968f01ee8240c7f870d8f5f40167 100644 --- a/drivers/infiniband/core/ucma.c +++ b/drivers/infiniband/core/ucma.c @@ -95,6 +95,7 @@ struct ucma_context { u64 uid; struct list_head list; + struct list_head mc_list; struct work_struct close_work; }; @@ -105,6 +106,7 @@ struct ucma_multicast { u64 uid; u8 join_state; + struct list_head list; struct sockaddr_storage addr; }; @@ -198,6 +200,7 @@ static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file) INIT_WORK(&ctx->close_work, ucma_close_id); init_completion(&ctx->comp); + INIT_LIST_HEAD(&ctx->mc_list); /* So list_del() will work if we don't do ucma_finish_ctx() */ INIT_LIST_HEAD(&ctx->list); ctx->file = file; @@ -484,19 +487,19 @@ static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf, static void ucma_cleanup_multicast(struct ucma_context *ctx) { - struct ucma_multicast *mc; - unsigned long index; + struct ucma_multicast *mc, *tmp; - xa_for_each(&multicast_table, index, mc) { - if (mc->ctx != ctx) - continue; + xa_lock(&multicast_table); + list_for_each_entry_safe(mc, tmp, &ctx->mc_list, list) { + list_del(&mc->list); /* * At this point mc->ctx->ref is 0 so the mc cannot leave the * lock on the reader and this is enough serialization */ - xa_erase(&multicast_table, index); + __xa_erase(&multicast_table, mc->id); kfree(mc); } + xa_unlock(&multicast_table); } static void ucma_cleanup_mc_events(struct ucma_multicast *mc) @@ -1469,12 +1472,16 @@ static ssize_t ucma_process_join(struct ucma_file *file, mc->uid = cmd->uid; memcpy(&mc->addr, addr, cmd->addr_size); - if (xa_alloc(&multicast_table, &mc->id, NULL, xa_limit_32b, + xa_lock(&multicast_table); + if (__xa_alloc(&multicast_table, &mc->id, NULL, xa_limit_32b, GFP_KERNEL)) { ret = -ENOMEM; goto err_free_mc; } + list_add_tail(&mc->list, &ctx->mc_list); + xa_unlock(&multicast_table); + mutex_lock(&ctx->mutex); ret = rdma_join_multicast(ctx->cm_id, (struct sockaddr *)&mc->addr, join_state, mc); @@ -1500,8 +1507,11 @@ static ssize_t ucma_process_join(struct ucma_file *file, mutex_unlock(&ctx->mutex); ucma_cleanup_mc_events(mc); err_xa_erase: - xa_erase(&multicast_table, mc->id); + xa_lock(&multicast_table); + list_del(&mc->list); + __xa_erase(&multicast_table, mc->id); err_free_mc: + xa_unlock(&multicast_table); kfree(mc); err_put_ctx: ucma_put_ctx(ctx); @@ -1569,15 +1579,17 @@ static ssize_t ucma_leave_multicast(struct ucma_file *file, mc = ERR_PTR(-EINVAL); else if (!refcount_inc_not_zero(&mc->ctx->ref)) mc = ERR_PTR(-ENXIO); - else - __xa_erase(&multicast_table, mc->id); - xa_unlock(&multicast_table); if (IS_ERR(mc)) { + xa_unlock(&multicast_table); ret = PTR_ERR(mc); goto out; } + list_del(&mc->list); + __xa_erase(&multicast_table, mc->id); + xa_unlock(&multicast_table); + mutex_lock(&mc->ctx->mutex); rdma_leave_multicast(mc->ctx->cm_id, (struct sockaddr *) &mc->addr); mutex_unlock(&mc->ctx->mutex); diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index 3d895cc41c3ad93cf2df789035e01304d2039e47..597e889ba83126ff337125782aeac8ea038ac8a4 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c @@ -2078,6 +2078,7 @@ struct ib_mr *ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, return mr; mr->device = pd->device; + mr->type = IB_MR_TYPE_USER; mr->pd = pd; mr->dm = NULL; atomic_inc(&pd->usecnt); diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c index 441eb421e5e59a7d088611b52ad28c3bc44c8c94..5759027914b01a5badc44866f89ab0d43d51cfa4 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c @@ -614,8 +614,6 @@ int bnxt_qplib_alloc_rcfw_channel(struct bnxt_qplib_res *res, if (!cmdq->cmdq_bitmap) goto fail; - cmdq->bmap_size = bmap_size; - /* Allocate one extra to hold the QP1 entries */ rcfw->qp_tbl_size = qp_tbl_sz + 1; rcfw->qp_tbl = kcalloc(rcfw->qp_tbl_size, sizeof(struct bnxt_qplib_qp_node), @@ -663,8 +661,8 @@ void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw) iounmap(cmdq->cmdq_mbox.reg.bar_reg); iounmap(creq->creq_db.reg.bar_reg); - indx = find_first_bit(cmdq->cmdq_bitmap, cmdq->bmap_size); - if (indx != cmdq->bmap_size) + indx = find_first_bit(cmdq->cmdq_bitmap, rcfw->cmdq_depth); + if (indx != rcfw->cmdq_depth) dev_err(&rcfw->pdev->dev, "disabling RCFW with pending cmd-bit %lx\n", indx); diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h index 5f2f0a5a3560ff89902a04d3390f303c161e7e4a..6953f4e53dd2090e7b4e55e18b6572a8b9eb7971 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h @@ -150,7 +150,6 @@ struct bnxt_qplib_cmdq_ctx { wait_queue_head_t waitq; unsigned long flags; unsigned long *cmdq_bitmap; - u32 bmap_size; u32 seq_num; }; diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index 861e19fdfeb469fff7d75143cfbc80738ae70ecf..12e5461581cb455be2d8f19c26e46568fcc6cd7f 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c @@ -2469,6 +2469,7 @@ int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, memset(attr, 0, sizeof(*attr)); memset(init_attr, 0, sizeof(*init_attr)); attr->qp_state = to_ib_qp_state(qhp->attr.state); + attr->cur_qp_state = to_ib_qp_state(qhp->attr.state); init_attr->cap.max_send_wr = qhp->attr.sq_num_entries; init_attr->cap.max_recv_wr = qhp->attr.rq_num_entries; init_attr->cap.max_send_sge = qhp->attr.sq_max_sges; diff --git a/drivers/infiniband/hw/hfi1/ipoib_main.c b/drivers/infiniband/hw/hfi1/ipoib_main.c index a1cce33f97de0e7fe9cf36910e5d9974c7b33b85..c7779d9be03b854de613be1ffc295f5422c8824e 100644 --- a/drivers/infiniband/hw/hfi1/ipoib_main.c +++ b/drivers/infiniband/hw/hfi1/ipoib_main.c @@ -185,12 +185,6 @@ static void hfi1_ipoib_netdev_dtor(struct net_device *dev) free_percpu(priv->netstats); } -static void hfi1_ipoib_free_rdma_netdev(struct net_device *dev) -{ - hfi1_ipoib_netdev_dtor(dev); - free_netdev(dev); -} - static void hfi1_ipoib_set_id(struct net_device *dev, int id) { struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev); @@ -227,24 +221,23 @@ static int hfi1_ipoib_setup_rn(struct ib_device *device, priv->port_num = port_num; priv->netdev_ops = netdev->netdev_ops; - netdev->netdev_ops = &hfi1_ipoib_netdev_ops; - ib_query_pkey(device, port_num, priv->pkey_index, &priv->pkey); rc = hfi1_ipoib_txreq_init(priv); if (rc) { dd_dev_err(dd, "IPoIB netdev TX init - failed(%d)\n", rc); - hfi1_ipoib_free_rdma_netdev(netdev); return rc; } rc = hfi1_ipoib_rxq_init(netdev); if (rc) { dd_dev_err(dd, "IPoIB netdev RX init - failed(%d)\n", rc); - hfi1_ipoib_free_rdma_netdev(netdev); + hfi1_ipoib_txreq_deinit(priv); return rc; } + netdev->netdev_ops = &hfi1_ipoib_netdev_ops; + netdev->priv_destructor = hfi1_ipoib_netdev_dtor; netdev->needs_free_netdev = true; diff --git a/drivers/infiniband/hw/hfi1/mmu_rb.c b/drivers/infiniband/hw/hfi1/mmu_rb.c index d213f65d4cdd0ff5fe9a34d5617e3ccb149d94d9..ed8a96ae61cefc4d0e9613cbc39f4174ecf8322b 100644 --- a/drivers/infiniband/hw/hfi1/mmu_rb.c +++ b/drivers/infiniband/hw/hfi1/mmu_rb.c @@ -121,6 +121,9 @@ void hfi1_mmu_rb_unregister(struct mmu_rb_handler *handler) unsigned long flags; struct list_head del_list; + /* Prevent freeing of mm until we are completely finished. */ + mmgrab(handler->mn.mm); + /* Unregister first so we don't get any more notifications. */ mmu_notifier_unregister(&handler->mn, handler->mn.mm); @@ -143,6 +146,9 @@ void hfi1_mmu_rb_unregister(struct mmu_rb_handler *handler) do_remove(handler, &del_list); + /* Now the mm may be freed. */ + mmdrop(handler->mn.mm); + kfree(handler); } diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c index 3591923abebb9b172396136f186d3b053711ccbb..5f3edd255ca3cab845b8cc0019b86132c37a0b29 100644 --- a/drivers/infiniband/hw/hfi1/verbs.c +++ b/drivers/infiniband/hw/hfi1/verbs.c @@ -1439,8 +1439,7 @@ static int query_port(struct rvt_dev_info *rdi, u8 port_num, 4096 : hfi1_max_mtu), IB_MTU_4096); props->active_mtu = !valid_ib_mtu(ppd->ibmtu) ? props->max_mtu : mtu_to_enum(ppd->ibmtu, IB_MTU_4096); - props->phys_mtu = HFI1_CAP_IS_KSET(AIP) ? hfi1_max_mtu : - ib_mtu_enum_to_int(props->max_mtu); + props->phys_mtu = hfi1_max_mtu; return 0; } diff --git a/drivers/infiniband/hw/hns/hns_roce_cmd.c b/drivers/infiniband/hw/hns/hns_roce_cmd.c index 4b693d542aceef1566de7d82766d939890025dc3..8644136075719ca6d54bbab7fa95114b5987526d 100644 --- a/drivers/infiniband/hw/hns/hns_roce_cmd.c +++ b/drivers/infiniband/hw/hns/hns_roce_cmd.c @@ -38,45 +38,36 @@ #define CMD_POLL_TOKEN 0xffff #define CMD_MAX_NUM 32 -static int hns_roce_cmd_mbox_post_hw(struct hns_roce_dev *hr_dev, u64 in_param, - u64 out_param, u32 in_modifier, - u8 op_modifier, u16 op, u16 token, - int event) +static int hns_roce_cmd_mbox_post_hw(struct hns_roce_dev *hr_dev, + struct hns_roce_mbox_msg *mbox_msg) { - return hr_dev->hw->post_mbox(hr_dev, in_param, out_param, in_modifier, - op_modifier, op, token, event); + return hr_dev->hw->post_mbox(hr_dev, mbox_msg); } /* this should be called with "poll_sem" */ -static int __hns_roce_cmd_mbox_poll(struct hns_roce_dev *hr_dev, u64 in_param, - u64 out_param, unsigned long in_modifier, - u8 op_modifier, u16 op, - unsigned int timeout) +static int __hns_roce_cmd_mbox_poll(struct hns_roce_dev *hr_dev, + struct hns_roce_mbox_msg *mbox_msg) { int ret; - ret = hns_roce_cmd_mbox_post_hw(hr_dev, in_param, out_param, - in_modifier, op_modifier, op, - CMD_POLL_TOKEN, 0); + ret = hns_roce_cmd_mbox_post_hw(hr_dev, mbox_msg); if (ret) { dev_err_ratelimited(hr_dev->dev, "failed to post mailbox 0x%x in poll mode, ret = %d.\n", - op, ret); + mbox_msg->cmd, ret); return ret; } - return hr_dev->hw->poll_mbox_done(hr_dev, timeout); + return hr_dev->hw->poll_mbox_done(hr_dev); } -static int hns_roce_cmd_mbox_poll(struct hns_roce_dev *hr_dev, u64 in_param, - u64 out_param, unsigned long in_modifier, - u8 op_modifier, u16 op, unsigned int timeout) +static int hns_roce_cmd_mbox_poll(struct hns_roce_dev *hr_dev, + struct hns_roce_mbox_msg *mbox_msg) { int ret; down(&hr_dev->cmd.poll_sem); - ret = __hns_roce_cmd_mbox_poll(hr_dev, in_param, out_param, in_modifier, - op_modifier, op, timeout); + ret = __hns_roce_cmd_mbox_poll(hr_dev, mbox_msg); up(&hr_dev->cmd.poll_sem); return ret; @@ -100,10 +91,8 @@ void hns_roce_cmd_event(struct hns_roce_dev *hr_dev, u16 token, u8 status, complete(&context->done); } -static int __hns_roce_cmd_mbox_wait(struct hns_roce_dev *hr_dev, u64 in_param, - u64 out_param, unsigned long in_modifier, - u8 op_modifier, u16 op, - unsigned int timeout) +static int __hns_roce_cmd_mbox_wait(struct hns_roce_dev *hr_dev, + struct hns_roce_mbox_msg *mbox_msg) { struct hns_roce_cmdq *cmd = &hr_dev->cmd; struct hns_roce_cmd_context *context; @@ -124,20 +113,19 @@ static int __hns_roce_cmd_mbox_wait(struct hns_roce_dev *hr_dev, u64 in_param, reinit_completion(&context->done); - ret = hns_roce_cmd_mbox_post_hw(hr_dev, in_param, out_param, - in_modifier, op_modifier, op, - context->token, 1); + mbox_msg->token = context->token; + ret = hns_roce_cmd_mbox_post_hw(hr_dev, mbox_msg); if (ret) { dev_err_ratelimited(dev, "failed to post mailbox 0x%x in event mode, ret = %d.\n", - op, ret); + mbox_msg->cmd, ret); goto out; } if (!wait_for_completion_timeout(&context->done, - msecs_to_jiffies(timeout))) { + msecs_to_jiffies(HNS_ROCE_CMD_TIMEOUT_MSECS))) { dev_err_ratelimited(dev, "[cmd] token 0x%x mailbox 0x%x timeout.\n", - context->token, op); + context->token, mbox_msg->cmd); ret = -EBUSY; goto out; } @@ -145,45 +133,50 @@ static int __hns_roce_cmd_mbox_wait(struct hns_roce_dev *hr_dev, u64 in_param, ret = context->result; if (ret) dev_err_ratelimited(dev, "[cmd] token 0x%x mailbox 0x%x error %d.\n", - context->token, op, ret); + context->token, mbox_msg->cmd, ret); out: context->busy = 0; return ret; } -static int hns_roce_cmd_mbox_wait(struct hns_roce_dev *hr_dev, u64 in_param, - u64 out_param, unsigned long in_modifier, - u8 op_modifier, u16 op, unsigned int timeout) +static int hns_roce_cmd_mbox_wait(struct hns_roce_dev *hr_dev, + struct hns_roce_mbox_msg *mbox_msg) { int ret; down(&hr_dev->cmd.event_sem); - ret = __hns_roce_cmd_mbox_wait(hr_dev, in_param, out_param, in_modifier, - op_modifier, op, timeout); + ret = __hns_roce_cmd_mbox_wait(hr_dev, mbox_msg); up(&hr_dev->cmd.event_sem); return ret; } int hns_roce_cmd_mbox(struct hns_roce_dev *hr_dev, u64 in_param, u64 out_param, - unsigned long in_modifier, u8 op_modifier, u16 op, - unsigned int timeout) + u8 cmd, unsigned long tag) { + struct hns_roce_mbox_msg mbox_msg = {}; bool is_busy; if (hr_dev->hw->chk_mbox_avail) if (!hr_dev->hw->chk_mbox_avail(hr_dev, &is_busy)) return is_busy ? -EBUSY : 0; - if (hr_dev->cmd.use_events) - return hns_roce_cmd_mbox_wait(hr_dev, in_param, out_param, - in_modifier, op_modifier, op, - timeout); - else - return hns_roce_cmd_mbox_poll(hr_dev, in_param, out_param, - in_modifier, op_modifier, op, - timeout); + mbox_msg.in_param = in_param; + mbox_msg.out_param = out_param; + mbox_msg.cmd = cmd; + mbox_msg.tag = tag; + + if (hr_dev->cmd.use_events) { + mbox_msg.event_en = 1; + + return hns_roce_cmd_mbox_wait(hr_dev, &mbox_msg); + } else { + mbox_msg.event_en = 0; + mbox_msg.token = CMD_POLL_TOKEN; + + return hns_roce_cmd_mbox_poll(hr_dev, &mbox_msg); + } } int hns_roce_cmd_init(struct hns_roce_dev *hr_dev) @@ -269,3 +262,15 @@ void hns_roce_free_cmd_mailbox(struct hns_roce_dev *hr_dev, dma_pool_free(hr_dev->cmd.pool, mailbox->buf, mailbox->dma); kfree(mailbox); } + +int hns_roce_create_hw_ctx(struct hns_roce_dev *dev, + struct hns_roce_cmd_mailbox *mailbox, + u8 cmd, unsigned long idx) +{ + return hns_roce_cmd_mbox(dev, mailbox->dma, 0, cmd, idx); +} + +int hns_roce_destroy_hw_ctx(struct hns_roce_dev *dev, u8 cmd, unsigned long idx) +{ + return hns_roce_cmd_mbox(dev, 0, 0, cmd, idx); +} diff --git a/drivers/infiniband/hw/hns/hns_roce_cmd.h b/drivers/infiniband/hw/hns/hns_roce_cmd.h index 8025e7f657fa668b6a60f886080d5981aea52d54..052a3d60905aa3063f00596c529d6facf52de74e 100644 --- a/drivers/infiniband/hw/hns/hns_roce_cmd.h +++ b/drivers/infiniband/hw/hns/hns_roce_cmd.h @@ -140,12 +140,16 @@ enum { }; int hns_roce_cmd_mbox(struct hns_roce_dev *hr_dev, u64 in_param, u64 out_param, - unsigned long in_modifier, u8 op_modifier, u16 op, - unsigned int timeout); + u8 cmd, unsigned long tag); struct hns_roce_cmd_mailbox * hns_roce_alloc_cmd_mailbox(struct hns_roce_dev *hr_dev); void hns_roce_free_cmd_mailbox(struct hns_roce_dev *hr_dev, struct hns_roce_cmd_mailbox *mailbox); +int hns_roce_create_hw_ctx(struct hns_roce_dev *dev, + struct hns_roce_cmd_mailbox *mailbox, + u8 cmd, unsigned long idx); +int hns_roce_destroy_hw_ctx(struct hns_roce_dev *dev, u8 cmd, + unsigned long idx); #endif /* _HNS_ROCE_CMD_H */ diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c index 65e1e6126d95096f1ebb76cc83708be2c23f3962..5320f4a4c31295895ae0594eed6030ae5aa6bfc1 100644 --- a/drivers/infiniband/hw/hns/hns_roce_cq.c +++ b/drivers/infiniband/hw/hns/hns_roce_cq.c @@ -100,12 +100,39 @@ static void free_cqn(struct hns_roce_dev *hr_dev, unsigned long cqn) mutex_unlock(&cq_table->bank_mutex); } +static int hns_roce_create_cqc(struct hns_roce_dev *hr_dev, + struct hns_roce_cq *hr_cq, + u64 *mtts, dma_addr_t dma_handle) +{ + struct ib_device *ibdev = &hr_dev->ib_dev; + struct hns_roce_cmd_mailbox *mailbox; + int ret; + + mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); + if (IS_ERR(mailbox)) { + ibdev_err(ibdev, "failed to alloc mailbox for CQC.\n"); + return PTR_ERR(mailbox); + } + + hr_dev->hw->write_cqc(hr_dev, hr_cq, mailbox->buf, mtts, dma_handle); + + ret = hns_roce_create_hw_ctx(hr_dev, mailbox, HNS_ROCE_CMD_CREATE_CQC, + hr_cq->cqn); + if (ret) + ibdev_err(ibdev, + "failed to send create cmd for CQ(0x%lx), ret = %d.\n", + hr_cq->cqn, ret); + + hns_roce_free_cmd_mailbox(hr_dev, mailbox); + + return ret; +} + static int alloc_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq) { struct hns_roce_cq_table *cq_table = &hr_dev->cq_table; struct ib_device *ibdev = &hr_dev->ib_dev; - struct hns_roce_cmd_mailbox *mailbox; - u64 mtts[MTT_MIN_COUNT] = { 0 }; + u64 mtts[MTT_MIN_COUNT] = {}; dma_addr_t dma_handle; int ret; @@ -121,7 +148,7 @@ static int alloc_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq) if (ret) { ibdev_err(ibdev, "failed to get CQ(0x%lx) context, ret = %d.\n", hr_cq->cqn, ret); - goto err_out; + return ret; } ret = xa_err(xa_store(&cq_table->array, hr_cq->cqn, hr_cq, GFP_KERNEL)); @@ -130,41 +157,17 @@ static int alloc_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq) goto err_put; } - /* Allocate mailbox memory */ - mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); - if (IS_ERR(mailbox)) { - ret = PTR_ERR(mailbox); - goto err_xa; - } - - hr_dev->hw->write_cqc(hr_dev, hr_cq, mailbox->buf, mtts, dma_handle); - - /* Send mailbox to hw */ - ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_cq->cqn, 0, - HNS_ROCE_CMD_CREATE_CQC, HNS_ROCE_CMD_TIMEOUT_MSECS); - hns_roce_free_cmd_mailbox(hr_dev, mailbox); - if (ret) { - ibdev_err(ibdev, - "failed to send create cmd for CQ(0x%lx), ret = %d.\n", - hr_cq->cqn, ret); + ret = hns_roce_create_cqc(hr_dev, hr_cq, mtts, dma_handle); + if (ret) goto err_xa; - } - - hr_cq->cons_index = 0; - hr_cq->arm_sn = 1; - - refcount_set(&hr_cq->refcount, 1); - init_completion(&hr_cq->free); return 0; err_xa: xa_erase(&cq_table->array, hr_cq->cqn); - err_put: hns_roce_table_put(hr_dev, &cq_table->table, hr_cq->cqn); -err_out: return ret; } @@ -174,9 +177,8 @@ static void free_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq) struct device *dev = hr_dev->dev; int ret; - ret = hns_roce_cmd_mbox(hr_dev, 0, 0, hr_cq->cqn, 1, - HNS_ROCE_CMD_DESTROY_CQC, - HNS_ROCE_CMD_TIMEOUT_MSECS); + ret = hns_roce_destroy_hw_ctx(hr_dev, HNS_ROCE_CMD_DESTROY_CQC, + hr_cq->cqn); if (ret) dev_err(dev, "DESTROY_CQ failed (%d) for CQN %06lx\n", ret, hr_cq->cqn); @@ -414,6 +416,11 @@ int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr, goto err_cqc; } + hr_cq->cons_index = 0; + hr_cq->arm_sn = 1; + refcount_set(&hr_cq->refcount, 1); + init_completion(&hr_cq->free); + return 0; err_cqc: diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h index 8bea6de7f9552d5fb9b271471c27372745e46573..0d160432fa6588ff6b9a4c95d55e409a6be1b4ca 100644 --- a/drivers/infiniband/hw/hns/hns_roce_device.h +++ b/drivers/infiniband/hw/hns/hns_roce_device.h @@ -106,16 +106,6 @@ enum { SERV_TYPE_XRC = 5, }; -enum hns_roce_qp_state { - HNS_ROCE_QP_STATE_RST, - HNS_ROCE_QP_STATE_INIT, - HNS_ROCE_QP_STATE_RTR, - HNS_ROCE_QP_STATE_RTS, - HNS_ROCE_QP_STATE_SQD, - HNS_ROCE_QP_STATE_ERR, - HNS_ROCE_QP_NUM_STATE, -}; - enum hns_roce_event { HNS_ROCE_EVENT_TYPE_PATH_MIG = 0x01, HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED = 0x02, @@ -139,8 +129,6 @@ enum hns_roce_event { HNS_ROCE_EVENT_TYPE_INVALID_XRCETH = 0x17, }; -#define HNS_ROCE_CAP_FLAGS_EX_SHIFT 12 - enum { HNS_ROCE_CAP_FLAG_REREG_MR = BIT(0), HNS_ROCE_CAP_FLAG_ROCE_V1_V2 = BIT(1), @@ -535,6 +523,11 @@ struct hns_roce_cmd_context { u16 busy; }; +enum hns_roce_cmdq_state { + HNS_ROCE_CMDQ_STATE_NORMAL, + HNS_ROCE_CMDQ_STATE_FATAL_ERR, +}; + struct hns_roce_cmdq { struct dma_pool *pool; struct semaphore poll_sem; @@ -554,6 +547,7 @@ struct hns_roce_cmdq { * close device, switch into poll mode(non event mode) */ u8 use_events; + enum hns_roce_cmdq_state state; }; struct hns_roce_cmd_mailbox { @@ -561,6 +555,15 @@ struct hns_roce_cmd_mailbox { dma_addr_t dma; }; +struct hns_roce_mbox_msg { + u64 in_param; + u64 out_param; + u8 cmd; + u32 tag; + u16 token; + u8 event_en; +}; + struct hns_roce_dev; struct hns_roce_rinl_sge { @@ -647,6 +650,11 @@ struct hns_roce_ceqe { __le32 rsv[15]; }; +#define CEQE_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_ceqe, h, l) + +#define CEQE_CQN CEQE_FIELD_LOC(23, 0) +#define CEQE_OWNER CEQE_FIELD_LOC(31, 31) + struct hns_roce_aeqe { __le32 asyn; union { @@ -666,6 +674,13 @@ struct hns_roce_aeqe { __le32 rsv[12]; }; +#define AEQE_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_aeqe, h, l) + +#define AEQE_EVENT_TYPE AEQE_FIELD_LOC(7, 0) +#define AEQE_SUB_TYPE AEQE_FIELD_LOC(15, 8) +#define AEQE_OWNER AEQE_FIELD_LOC(31, 31) +#define AEQE_EVENT_QUEUE_NUM AEQE_FIELD_LOC(55, 32) + struct hns_roce_eq { struct hns_roce_dev *hr_dev; void __iomem *db_reg; @@ -715,7 +730,6 @@ struct hns_roce_caps { u32 num_pi_qps; u32 reserved_qps; int num_qpc_timer; - int num_cqc_timer; u32 num_srqs; u32 max_wqes; u32 max_srq_wrs; @@ -851,11 +865,9 @@ struct hns_roce_hw { int (*hw_profile)(struct hns_roce_dev *hr_dev); int (*hw_init)(struct hns_roce_dev *hr_dev); void (*hw_exit)(struct hns_roce_dev *hr_dev); - int (*post_mbox)(struct hns_roce_dev *hr_dev, u64 in_param, - u64 out_param, u32 in_modifier, u8 op_modifier, u16 op, - u16 token, int event); - int (*poll_mbox_done)(struct hns_roce_dev *hr_dev, - unsigned int timeout); + int (*post_mbox)(struct hns_roce_dev *hr_dev, + struct hns_roce_mbox_msg *mbox_msg); + int (*poll_mbox_done)(struct hns_roce_dev *hr_dev); bool (*chk_mbox_avail)(struct hns_roce_dev *hr_dev, bool *is_busy); int (*set_gid)(struct hns_roce_dev *hr_dev, int gid_index, const union ib_gid *gid, const struct ib_gid_attr *attr); @@ -873,10 +885,10 @@ struct hns_roce_hw { struct hns_roce_cq *hr_cq, void *mb_buf, u64 *mtts, dma_addr_t dma_handle); int (*set_hem)(struct hns_roce_dev *hr_dev, - struct hns_roce_hem_table *table, int obj, int step_idx); + struct hns_roce_hem_table *table, int obj, u32 step_idx); int (*clear_hem)(struct hns_roce_dev *hr_dev, struct hns_roce_hem_table *table, int obj, - int step_idx); + u32 step_idx); int (*modify_qp)(struct ib_qp *ibqp, const struct ib_qp_attr *attr, int attr_mask, enum ib_qp_state cur_state, enum ib_qp_state new_state); @@ -1140,9 +1152,6 @@ struct ib_mr *hns_roce_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, unsigned int *sg_offset); int hns_roce_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata); -int hns_roce_hw_destroy_mpt(struct hns_roce_dev *hr_dev, - struct hns_roce_cmd_mailbox *mailbox, - unsigned long mpt_index); unsigned long key_to_hw_index(u32 key); int hns_roce_alloc_mw(struct ib_mw *mw, struct ib_udata *udata); @@ -1180,7 +1189,6 @@ void *hns_roce_get_send_wqe(struct hns_roce_qp *hr_qp, unsigned int n); void *hns_roce_get_extend_sge(struct hns_roce_qp *hr_qp, unsigned int n); bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, u32 nreq, struct ib_cq *ib_cq); -enum hns_roce_qp_state to_hns_roce_state(enum ib_qp_state state); void hns_roce_lock_cqs(struct hns_roce_cq *send_cq, struct hns_roce_cq *recv_cq); void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq, diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c index 7cc45a332fc0215a6c6013d88d48abe9894eb239..a5f7b87757568e418a33850f2ab56f22bebdb77f 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hem.c +++ b/drivers/infiniband/hw/hns/hns_roce_hem.c @@ -488,7 +488,7 @@ static int set_mhop_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem_index *index) { struct ib_device *ibdev = &hr_dev->ib_dev; - int step_idx; + u32 step_idx; int ret = 0; if (index->inited & HEM_INDEX_L0) { @@ -618,7 +618,7 @@ static void clear_mhop_hem(struct hns_roce_dev *hr_dev, struct ib_device *ibdev = &hr_dev->ib_dev; u32 hop_num = mhop->hop_num; u32 chunk_ba_num; - int step_idx; + u32 step_idx; index->inited = HEM_INDEX_BUF; chunk_ba_num = mhop->bt_chunk_size / BA_BYTE_LEN; diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index 587b46ddecfc0187cb47ee4a1b264c2bb431b5b0..b5ed2aee578b07a89f75ebdeb78b14778da1bb27 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -149,8 +149,7 @@ static void set_atomic_seg(const struct ib_send_wr *wr, aseg->cmp_data = 0; } - roce_set_field(rc_sq_wqe->byte_16, V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M, - V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S, valid_num_sge); + hr_reg_write(rc_sq_wqe, RC_SEND_WQE_SGE_NUM, valid_num_sge); } static int fill_ext_sge_inl_data(struct hns_roce_qp *qp, @@ -271,8 +270,7 @@ static int set_rc_inl(struct hns_roce_qp *qp, const struct ib_send_wr *wr, dseg += sizeof(struct hns_roce_v2_rc_send_wqe); if (msg_len <= HNS_ROCE_V2_MAX_RC_INL_INN_SZ) { - roce_set_bit(rc_sq_wqe->byte_20, - V2_RC_SEND_WQE_BYTE_20_INL_TYPE_S, 0); + hr_reg_clear(rc_sq_wqe, RC_SEND_WQE_INL_TYPE); for (i = 0; i < wr->num_sge; i++) { memcpy(dseg, ((void *)wr->sg_list[i].addr), @@ -280,17 +278,13 @@ static int set_rc_inl(struct hns_roce_qp *qp, const struct ib_send_wr *wr, dseg += wr->sg_list[i].length; } } else { - roce_set_bit(rc_sq_wqe->byte_20, - V2_RC_SEND_WQE_BYTE_20_INL_TYPE_S, 1); + hr_reg_enable(rc_sq_wqe, RC_SEND_WQE_INL_TYPE); ret = fill_ext_sge_inl_data(qp, wr, &curr_idx, msg_len); if (ret) return ret; - roce_set_field(rc_sq_wqe->byte_16, - V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M, - V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S, - curr_idx - *sge_idx); + hr_reg_write(rc_sq_wqe, RC_SEND_WQE_SGE_NUM, curr_idx - *sge_idx); } *sge_idx = curr_idx; @@ -309,12 +303,10 @@ static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr, int j = 0; int i; - roce_set_field(rc_sq_wqe->byte_20, - V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M, - V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S, - (*sge_ind) & (qp->sge.sge_cnt - 1)); + hr_reg_write(rc_sq_wqe, RC_SEND_WQE_MSG_START_SGE_IDX, + (*sge_ind) & (qp->sge.sge_cnt - 1)); - roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_INLINE_S, + hr_reg_write(rc_sq_wqe, RC_SEND_WQE_INLINE, !!(wr->send_flags & IB_SEND_INLINE)); if (wr->send_flags & IB_SEND_INLINE) return set_rc_inl(qp, wr, rc_sq_wqe, sge_ind); @@ -339,9 +331,7 @@ static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr, valid_num_sge - HNS_ROCE_SGE_IN_WQE); } - roce_set_field(rc_sq_wqe->byte_16, - V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M, - V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S, valid_num_sge); + hr_reg_write(rc_sq_wqe, RC_SEND_WQE_SGE_NUM, valid_num_sge); return 0; } @@ -412,8 +402,7 @@ static int set_ud_opcode(struct hns_roce_v2_ud_send_wqe *ud_sq_wqe, ud_sq_wqe->immtdata = get_immtdata(wr); - roce_set_field(ud_sq_wqe->byte_4, V2_UD_SEND_WQE_BYTE_4_OPCODE_M, - V2_UD_SEND_WQE_BYTE_4_OPCODE_S, to_hr_opcode(ib_op)); + hr_reg_write(ud_sq_wqe, UD_SEND_WQE_OPCODE, to_hr_opcode(ib_op)); return 0; } @@ -424,21 +413,15 @@ static int fill_ud_av(struct hns_roce_v2_ud_send_wqe *ud_sq_wqe, struct ib_device *ib_dev = ah->ibah.device; struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev); - roce_set_field(ud_sq_wqe->byte_24, V2_UD_SEND_WQE_BYTE_24_UDPSPN_M, - V2_UD_SEND_WQE_BYTE_24_UDPSPN_S, ah->av.udp_sport); - - roce_set_field(ud_sq_wqe->byte_36, V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_M, - V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_S, ah->av.hop_limit); - roce_set_field(ud_sq_wqe->byte_36, V2_UD_SEND_WQE_BYTE_36_TCLASS_M, - V2_UD_SEND_WQE_BYTE_36_TCLASS_S, ah->av.tclass); - roce_set_field(ud_sq_wqe->byte_40, V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_M, - V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_S, ah->av.flowlabel); + hr_reg_write(ud_sq_wqe, UD_SEND_WQE_UDPSPN, ah->av.udp_sport); + hr_reg_write(ud_sq_wqe, UD_SEND_WQE_HOPLIMIT, ah->av.hop_limit); + hr_reg_write(ud_sq_wqe, UD_SEND_WQE_TCLASS, ah->av.tclass); + hr_reg_write(ud_sq_wqe, UD_SEND_WQE_FLOW_LABEL, ah->av.flowlabel); if (WARN_ON(ah->av.sl > MAX_SERVICE_LEVEL)) return -EINVAL; - roce_set_field(ud_sq_wqe->byte_40, V2_UD_SEND_WQE_BYTE_40_SL_M, - V2_UD_SEND_WQE_BYTE_40_SL_S, ah->av.sl); + hr_reg_write(ud_sq_wqe, UD_SEND_WQE_SL, ah->av.sl); ud_sq_wqe->sgid_index = ah->av.gid_index; @@ -448,10 +431,8 @@ static int fill_ud_av(struct hns_roce_v2_ud_send_wqe *ud_sq_wqe, if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) return 0; - roce_set_bit(ud_sq_wqe->byte_40, V2_UD_SEND_WQE_BYTE_40_UD_VLAN_EN_S, - ah->av.vlan_en); - roce_set_field(ud_sq_wqe->byte_36, V2_UD_SEND_WQE_BYTE_36_VLAN_M, - V2_UD_SEND_WQE_BYTE_36_VLAN_S, ah->av.vlan_id); + hr_reg_write(ud_sq_wqe, UD_SEND_WQE_VLAN_EN, ah->av.vlan_en); + hr_reg_write(ud_sq_wqe, UD_SEND_WQE_VLAN, ah->av.vlan_id); return 0; } @@ -476,27 +457,19 @@ static inline int set_ud_wqe(struct hns_roce_qp *qp, ud_sq_wqe->msg_len = cpu_to_le32(msg_len); - roce_set_bit(ud_sq_wqe->byte_4, V2_UD_SEND_WQE_BYTE_4_CQE_S, + hr_reg_write(ud_sq_wqe, UD_SEND_WQE_CQE, !!(wr->send_flags & IB_SEND_SIGNALED)); - - roce_set_bit(ud_sq_wqe->byte_4, V2_UD_SEND_WQE_BYTE_4_SE_S, + hr_reg_write(ud_sq_wqe, UD_SEND_WQE_SE, !!(wr->send_flags & IB_SEND_SOLICITED)); - roce_set_field(ud_sq_wqe->byte_16, V2_UD_SEND_WQE_BYTE_16_PD_M, - V2_UD_SEND_WQE_BYTE_16_PD_S, to_hr_pd(qp->ibqp.pd)->pdn); - - roce_set_field(ud_sq_wqe->byte_16, V2_UD_SEND_WQE_BYTE_16_SGE_NUM_M, - V2_UD_SEND_WQE_BYTE_16_SGE_NUM_S, valid_num_sge); - - roce_set_field(ud_sq_wqe->byte_20, - V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M, - V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S, - curr_idx & (qp->sge.sge_cnt - 1)); + hr_reg_write(ud_sq_wqe, UD_SEND_WQE_PD, to_hr_pd(qp->ibqp.pd)->pdn); + hr_reg_write(ud_sq_wqe, UD_SEND_WQE_SGE_NUM, valid_num_sge); + hr_reg_write(ud_sq_wqe, UD_SEND_WQE_MSG_START_SGE_IDX, + curr_idx & (qp->sge.sge_cnt - 1)); ud_sq_wqe->qkey = cpu_to_le32(ud_wr(wr)->remote_qkey & 0x80000000 ? qp->qkey : ud_wr(wr)->remote_qkey); - roce_set_field(ud_sq_wqe->byte_32, V2_UD_SEND_WQE_BYTE_32_DQPN_M, - V2_UD_SEND_WQE_BYTE_32_DQPN_S, ud_wr(wr)->remote_qpn); + hr_reg_write(ud_sq_wqe, UD_SEND_WQE_DQPN, ud_wr(wr)->remote_qpn); ret = fill_ud_av(ud_sq_wqe, ah); if (ret) @@ -516,8 +489,7 @@ static inline int set_ud_wqe(struct hns_roce_qp *qp, dma_wmb(); *sge_idx = curr_idx; - roce_set_bit(ud_sq_wqe->byte_4, V2_UD_SEND_WQE_BYTE_4_OWNER_S, - owner_bit); + hr_reg_write(ud_sq_wqe, UD_SEND_WQE_OWNER, owner_bit); return 0; } @@ -553,7 +525,7 @@ static int set_rc_opcode(struct hns_roce_dev *hr_dev, ret = -EOPNOTSUPP; break; case IB_WR_LOCAL_INV: - roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_SO_S, 1); + hr_reg_enable(rc_sq_wqe, RC_SEND_WQE_SO); fallthrough; case IB_WR_SEND_WITH_INV: rc_sq_wqe->inv_key = cpu_to_le32(wr->ex.invalidate_rkey); @@ -565,11 +537,11 @@ static int set_rc_opcode(struct hns_roce_dev *hr_dev, if (unlikely(ret)) return ret; - roce_set_field(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_OPCODE_M, - V2_RC_SEND_WQE_BYTE_4_OPCODE_S, to_hr_opcode(ib_op)); + hr_reg_write(rc_sq_wqe, RC_SEND_WQE_OPCODE, to_hr_opcode(ib_op)); return ret; } + static inline int set_rc_wqe(struct hns_roce_qp *qp, const struct ib_send_wr *wr, void *wqe, unsigned int *sge_idx, @@ -590,13 +562,13 @@ static inline int set_rc_wqe(struct hns_roce_qp *qp, if (WARN_ON(ret)) return ret; - roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_FENCE_S, + hr_reg_write(rc_sq_wqe, RC_SEND_WQE_FENCE, (wr->send_flags & IB_SEND_FENCE) ? 1 : 0); - roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_SE_S, + hr_reg_write(rc_sq_wqe, RC_SEND_WQE_SE, (wr->send_flags & IB_SEND_SOLICITED) ? 1 : 0); - roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_CQE_S, + hr_reg_write(rc_sq_wqe, RC_SEND_WQE_CQE, (wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0); if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP || @@ -616,8 +588,7 @@ static inline int set_rc_wqe(struct hns_roce_qp *qp, dma_wmb(); *sge_idx = curr_idx; - roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_OWNER_S, - owner_bit); + hr_reg_write(rc_sq_wqe, RC_SEND_WQE_OWNER, owner_bit); return ret; } @@ -682,14 +653,11 @@ static void write_dwqe(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp, struct hns_roce_v2_rc_send_wqe *rc_sq_wqe = wqe; /* All kinds of DirectWQE have the same header field layout */ - roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_FLAG_S, 1); - roce_set_field(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_DB_SL_L_M, - V2_RC_SEND_WQE_BYTE_4_DB_SL_L_S, qp->sl); - roce_set_field(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_DB_SL_H_M, - V2_RC_SEND_WQE_BYTE_4_DB_SL_H_S, - qp->sl >> HNS_ROCE_SL_SHIFT); - roce_set_field(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_WQE_INDEX_M, - V2_RC_SEND_WQE_BYTE_4_WQE_INDEX_S, qp->sq.head); + hr_reg_enable(rc_sq_wqe, RC_SEND_WQE_FLAG); + hr_reg_write(rc_sq_wqe, RC_SEND_WQE_DB_SL_L, qp->sl); + hr_reg_write(rc_sq_wqe, RC_SEND_WQE_DB_SL_H, + qp->sl >> HNS_ROCE_SL_SHIFT); + hr_reg_write(rc_sq_wqe, RC_SEND_WQE_WQE_INDEX, qp->sq.head); hns_roce_write512(hr_dev, wqe, qp->sq.db_reg); } @@ -1265,6 +1233,16 @@ static int hns_roce_cmq_csq_done(struct hns_roce_dev *hr_dev) return tail == priv->cmq.csq.head; } +static void update_cmdq_status(struct hns_roce_dev *hr_dev) +{ + struct hns_roce_v2_priv *priv = hr_dev->priv; + struct hnae3_handle *handle = priv->handle; + + if (handle->rinfo.reset_state == HNS_ROCE_STATE_RST_INIT || + handle->rinfo.instance_state == HNS_ROCE_STATE_INIT) + hr_dev->cmd.state = HNS_ROCE_CMDQ_STATE_FATAL_ERR; +} + static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev, struct hns_roce_cmq_desc *desc, int num) { @@ -1296,7 +1274,8 @@ static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev, } while (++timeout < priv->cmq.tx_timeout); if (hns_roce_cmq_csq_done(hr_dev)) { - for (ret = 0, i = 0; i < num; i++) { + ret = 0; + for (i = 0; i < num; i++) { /* check the result of hardware write back */ desc[i] = csq->desc[tail++]; if (tail == csq->desc_num) @@ -1318,6 +1297,8 @@ static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev, csq->head, tail); csq->head = tail; + update_cmdq_status(hr_dev); + ret = -EAGAIN; } @@ -1332,6 +1313,9 @@ static int hns_roce_cmq_send(struct hns_roce_dev *hr_dev, bool busy; int ret; + if (hr_dev->cmd.state == HNS_ROCE_CMDQ_STATE_FATAL_ERR) + return -EIO; + if (!v2_chk_mbox_is_avail(hr_dev, &busy)) return busy ? -EBUSY : 0; @@ -1344,17 +1328,17 @@ static int hns_roce_cmq_send(struct hns_roce_dev *hr_dev, return ret; } -static int config_hem_ba_to_hw(struct hns_roce_dev *hr_dev, unsigned long obj, - dma_addr_t base_addr, u16 op) +static int config_hem_ba_to_hw(struct hns_roce_dev *hr_dev, + dma_addr_t base_addr, u8 cmd, unsigned long tag) { - struct hns_roce_cmd_mailbox *mbox = hns_roce_alloc_cmd_mailbox(hr_dev); + struct hns_roce_cmd_mailbox *mbox; int ret; + mbox = hns_roce_alloc_cmd_mailbox(hr_dev); if (IS_ERR(mbox)) return PTR_ERR(mbox); - ret = hns_roce_cmd_mbox(hr_dev, base_addr, mbox->dma, obj, 0, op, - HNS_ROCE_CMD_TIMEOUT_MSECS); + ret = hns_roce_cmd_mbox(hr_dev, base_addr, mbox->dma, cmd, tag); hns_roce_free_cmd_mailbox(hr_dev, mbox); return ret; } @@ -1499,7 +1483,7 @@ static void __hns_roce_function_clear(struct hns_roce_dev *hr_dev, int vf_id) if (ret) continue; - if (roce_get_bit(resp->func_done, FUNC_CLEAR_RST_FUN_DONE_S)) { + if (hr_reg_read(resp, FUNC_CLEAR_RST_FUN_DONE)) { if (vf_id == 0) hr_dev->is_reset = true; return; @@ -1510,7 +1494,7 @@ static void __hns_roce_function_clear(struct hns_roce_dev *hr_dev, int vf_id) hns_roce_func_clr_rst_proc(hr_dev, ret, fclr_write_fail_flag); } -static void hns_roce_free_vf_resource(struct hns_roce_dev *hr_dev, int vf_id) +static int hns_roce_free_vf_resource(struct hns_roce_dev *hr_dev, int vf_id) { enum hns_roce_opcode_type opcode = HNS_ROCE_OPC_ALLOC_VF_RES; struct hns_roce_cmq_desc desc[2]; @@ -1521,17 +1505,29 @@ static void hns_roce_free_vf_resource(struct hns_roce_dev *hr_dev, int vf_id) desc[0].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT); hns_roce_cmq_setup_basic_desc(&desc[1], opcode, false); hr_reg_write(req_a, FUNC_RES_A_VF_ID, vf_id); - hns_roce_cmq_send(hr_dev, desc, 2); + + return hns_roce_cmq_send(hr_dev, desc, 2); } static void hns_roce_function_clear(struct hns_roce_dev *hr_dev) { + int ret; int i; + if (hr_dev->cmd.state == HNS_ROCE_CMDQ_STATE_FATAL_ERR) + return; + for (i = hr_dev->func_num - 1; i >= 0; i--) { __hns_roce_function_clear(hr_dev, i); - if (i != 0) - hns_roce_free_vf_resource(hr_dev, i); + + if (i == 0) + continue; + + ret = hns_roce_free_vf_resource(hr_dev, i); + if (ret) + ibdev_err(&hr_dev->ib_dev, + "failed to free vf resource, vf_id = %d, ret = %d.\n", + i, ret); } } @@ -1751,17 +1747,16 @@ static int __hns_roce_set_vf_switch_param(struct hns_roce_dev *hr_dev, swt = (struct hns_roce_vf_switch *)desc.data; hns_roce_cmq_setup_basic_desc(&desc, HNS_SWITCH_PARAMETER_CFG, true); swt->rocee_sel |= cpu_to_le32(HNS_ICL_SWITCH_CMD_ROCEE_SEL); - roce_set_field(swt->fun_id, VF_SWITCH_DATA_FUN_ID_VF_ID_M, - VF_SWITCH_DATA_FUN_ID_VF_ID_S, vf_id); + hr_reg_write(swt, VF_SWITCH_VF_ID, vf_id); ret = hns_roce_cmq_send(hr_dev, &desc, 1); if (ret) return ret; desc.flag = cpu_to_le16(HNS_ROCE_CMD_FLAG_IN); desc.flag &= cpu_to_le16(~HNS_ROCE_CMD_FLAG_WR); - roce_set_bit(swt->cfg, VF_SWITCH_DATA_CFG_ALW_LPBK_S, 1); - roce_set_bit(swt->cfg, VF_SWITCH_DATA_CFG_ALW_LCL_LPBK_S, 0); - roce_set_bit(swt->cfg, VF_SWITCH_DATA_CFG_ALW_DST_OVRD_S, 1); + hr_reg_enable(swt, VF_SWITCH_ALW_LPBK); + hr_reg_clear(swt, VF_SWITCH_ALW_LCL_LPBK); + hr_reg_enable(swt, VF_SWITCH_ALW_DST_OVRD); return hns_roce_cmq_send(hr_dev, &desc, 1); } @@ -1941,7 +1936,7 @@ static void set_default_caps(struct hns_roce_dev *hr_dev) caps->num_mtpts = HNS_ROCE_V2_MAX_MTPT_NUM; caps->num_pds = HNS_ROCE_V2_MAX_PD_NUM; caps->num_qpc_timer = HNS_ROCE_V2_MAX_QPC_TIMER_NUM; - caps->num_cqc_timer = HNS_ROCE_V2_MAX_CQC_TIMER_NUM; + caps->cqc_timer_bt_num = HNS_ROCE_V2_MAX_CQC_TIMER_BT_NUM; caps->max_qp_init_rdma = HNS_ROCE_V2_MAX_QP_INIT_RDMA; caps->max_qp_dest_rdma = HNS_ROCE_V2_MAX_QP_DEST_RDMA; @@ -2219,7 +2214,6 @@ static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev) caps->max_rq_sg = roundup_pow_of_two(caps->max_rq_sg); caps->max_extend_sg = le32_to_cpu(resp_a->max_extend_sg); caps->num_qpc_timer = le16_to_cpu(resp_a->num_qpc_timer); - caps->num_cqc_timer = le16_to_cpu(resp_a->num_cqc_timer); caps->max_srq_sges = le16_to_cpu(resp_a->max_srq_sges); caps->max_srq_sges = roundup_pow_of_two(caps->max_srq_sges); caps->num_aeq_vectors = resp_a->num_aeq_vectors; @@ -2246,87 +2240,39 @@ static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev) ctx_hop_num = resp_b->ctx_hop_num; pbl_hop_num = resp_b->pbl_hop_num; - caps->num_pds = 1 << roce_get_field(resp_c->cap_flags_num_pds, - V2_QUERY_PF_CAPS_C_NUM_PDS_M, - V2_QUERY_PF_CAPS_C_NUM_PDS_S); - caps->flags = roce_get_field(resp_c->cap_flags_num_pds, - V2_QUERY_PF_CAPS_C_CAP_FLAGS_M, - V2_QUERY_PF_CAPS_C_CAP_FLAGS_S); + caps->num_pds = 1 << hr_reg_read(resp_c, PF_CAPS_C_NUM_PDS); + + caps->flags = hr_reg_read(resp_c, PF_CAPS_C_CAP_FLAGS); caps->flags |= le16_to_cpu(resp_d->cap_flags_ex) << HNS_ROCE_CAP_FLAGS_EX_SHIFT; - caps->num_cqs = 1 << roce_get_field(resp_c->max_gid_num_cqs, - V2_QUERY_PF_CAPS_C_NUM_CQS_M, - V2_QUERY_PF_CAPS_C_NUM_CQS_S); - caps->gid_table_len[0] = roce_get_field(resp_c->max_gid_num_cqs, - V2_QUERY_PF_CAPS_C_MAX_GID_M, - V2_QUERY_PF_CAPS_C_MAX_GID_S); - - caps->max_cqes = 1 << roce_get_field(resp_c->cq_depth, - V2_QUERY_PF_CAPS_C_CQ_DEPTH_M, - V2_QUERY_PF_CAPS_C_CQ_DEPTH_S); - caps->num_mtpts = 1 << roce_get_field(resp_c->num_mrws, - V2_QUERY_PF_CAPS_C_NUM_MRWS_M, - V2_QUERY_PF_CAPS_C_NUM_MRWS_S); - caps->num_qps = 1 << roce_get_field(resp_c->ord_num_qps, - V2_QUERY_PF_CAPS_C_NUM_QPS_M, - V2_QUERY_PF_CAPS_C_NUM_QPS_S); - caps->max_qp_init_rdma = roce_get_field(resp_c->ord_num_qps, - V2_QUERY_PF_CAPS_C_MAX_ORD_M, - V2_QUERY_PF_CAPS_C_MAX_ORD_S); + caps->num_cqs = 1 << hr_reg_read(resp_c, PF_CAPS_C_NUM_CQS); + caps->gid_table_len[0] = hr_reg_read(resp_c, PF_CAPS_C_MAX_GID); + caps->max_cqes = 1 << hr_reg_read(resp_c, PF_CAPS_C_CQ_DEPTH); + caps->num_mtpts = 1 << hr_reg_read(resp_c, PF_CAPS_C_NUM_MRWS); + caps->num_qps = 1 << hr_reg_read(resp_c, PF_CAPS_C_NUM_QPS); + caps->max_qp_init_rdma = hr_reg_read(resp_c, PF_CAPS_C_MAX_ORD); caps->max_qp_dest_rdma = caps->max_qp_init_rdma; caps->max_wqes = 1 << le16_to_cpu(resp_c->sq_depth); - caps->num_srqs = 1 << roce_get_field(resp_d->wq_hop_num_max_srqs, - V2_QUERY_PF_CAPS_D_NUM_SRQS_M, - V2_QUERY_PF_CAPS_D_NUM_SRQS_S); - caps->cong_type = roce_get_field(resp_d->wq_hop_num_max_srqs, - V2_QUERY_PF_CAPS_D_CONG_TYPE_M, - V2_QUERY_PF_CAPS_D_CONG_TYPE_S); - caps->max_srq_wrs = 1 << le16_to_cpu(resp_d->srq_depth); - caps->ceqe_depth = 1 << roce_get_field(resp_d->num_ceqs_ceq_depth, - V2_QUERY_PF_CAPS_D_CEQ_DEPTH_M, - V2_QUERY_PF_CAPS_D_CEQ_DEPTH_S); - caps->num_comp_vectors = roce_get_field(resp_d->num_ceqs_ceq_depth, - V2_QUERY_PF_CAPS_D_NUM_CEQS_M, - V2_QUERY_PF_CAPS_D_NUM_CEQS_S); - - caps->aeqe_depth = 1 << roce_get_field(resp_d->arm_st_aeq_depth, - V2_QUERY_PF_CAPS_D_AEQ_DEPTH_M, - V2_QUERY_PF_CAPS_D_AEQ_DEPTH_S); - caps->default_aeq_arm_st = roce_get_field(resp_d->arm_st_aeq_depth, - V2_QUERY_PF_CAPS_D_AEQ_ARM_ST_M, - V2_QUERY_PF_CAPS_D_AEQ_ARM_ST_S); - caps->default_ceq_arm_st = roce_get_field(resp_d->arm_st_aeq_depth, - V2_QUERY_PF_CAPS_D_CEQ_ARM_ST_M, - V2_QUERY_PF_CAPS_D_CEQ_ARM_ST_S); - caps->reserved_pds = roce_get_field(resp_d->num_uars_rsv_pds, - V2_QUERY_PF_CAPS_D_RSV_PDS_M, - V2_QUERY_PF_CAPS_D_RSV_PDS_S); - caps->num_uars = 1 << roce_get_field(resp_d->num_uars_rsv_pds, - V2_QUERY_PF_CAPS_D_NUM_UARS_M, - V2_QUERY_PF_CAPS_D_NUM_UARS_S); - caps->reserved_qps = roce_get_field(resp_d->rsv_uars_rsv_qps, - V2_QUERY_PF_CAPS_D_RSV_QPS_M, - V2_QUERY_PF_CAPS_D_RSV_QPS_S); - caps->reserved_uars = roce_get_field(resp_d->rsv_uars_rsv_qps, - V2_QUERY_PF_CAPS_D_RSV_UARS_M, - V2_QUERY_PF_CAPS_D_RSV_UARS_S); - caps->reserved_mrws = roce_get_field(resp_e->chunk_size_shift_rsv_mrws, - V2_QUERY_PF_CAPS_E_RSV_MRWS_M, - V2_QUERY_PF_CAPS_E_RSV_MRWS_S); - caps->chunk_sz = 1 << roce_get_field(resp_e->chunk_size_shift_rsv_mrws, - V2_QUERY_PF_CAPS_E_CHUNK_SIZE_SHIFT_M, - V2_QUERY_PF_CAPS_E_CHUNK_SIZE_SHIFT_S); - caps->reserved_cqs = roce_get_field(resp_e->rsv_cqs, - V2_QUERY_PF_CAPS_E_RSV_CQS_M, - V2_QUERY_PF_CAPS_E_RSV_CQS_S); - caps->reserved_srqs = roce_get_field(resp_e->rsv_srqs, - V2_QUERY_PF_CAPS_E_RSV_SRQS_M, - V2_QUERY_PF_CAPS_E_RSV_SRQS_S); - caps->reserved_lkey = roce_get_field(resp_e->rsv_lkey, - V2_QUERY_PF_CAPS_E_RSV_LKEYS_M, - V2_QUERY_PF_CAPS_E_RSV_LKEYS_S); + caps->num_srqs = 1 << hr_reg_read(resp_d, PF_CAPS_D_NUM_SRQS); + caps->cong_type = hr_reg_read(resp_d, PF_CAPS_D_CONG_TYPE); + caps->max_srq_wrs = 1 << le16_to_cpu(resp_d->srq_depth); + caps->ceqe_depth = 1 << hr_reg_read(resp_d, PF_CAPS_D_CEQ_DEPTH); + caps->num_comp_vectors = hr_reg_read(resp_d, PF_CAPS_D_NUM_CEQS); + caps->aeqe_depth = 1 << hr_reg_read(resp_d, PF_CAPS_D_AEQ_DEPTH); + caps->default_aeq_arm_st = hr_reg_read(resp_d, PF_CAPS_D_AEQ_ARM_ST); + caps->default_ceq_arm_st = hr_reg_read(resp_d, PF_CAPS_D_CEQ_ARM_ST); + caps->reserved_pds = hr_reg_read(resp_d, PF_CAPS_D_RSV_PDS); + caps->num_uars = 1 << hr_reg_read(resp_d, PF_CAPS_D_NUM_UARS); + caps->reserved_qps = hr_reg_read(resp_d, PF_CAPS_D_RSV_QPS); + caps->reserved_uars = hr_reg_read(resp_d, PF_CAPS_D_RSV_UARS); + + caps->reserved_mrws = hr_reg_read(resp_e, PF_CAPS_E_RSV_MRWS); + caps->chunk_sz = 1 << hr_reg_read(resp_e, PF_CAPS_E_CHUNK_SIZE_SHIFT); + caps->reserved_cqs = hr_reg_read(resp_e, PF_CAPS_E_RSV_CQS); + caps->reserved_srqs = hr_reg_read(resp_e, PF_CAPS_E_RSV_SRQS); + caps->reserved_lkey = hr_reg_read(resp_e, PF_CAPS_E_RSV_LKEYS); caps->default_ceq_max_cnt = le16_to_cpu(resp_e->ceq_max_cnt); caps->default_ceq_period = le16_to_cpu(resp_e->ceq_period); caps->default_aeq_max_cnt = le16_to_cpu(resp_e->aeq_max_cnt); @@ -2341,15 +2287,9 @@ static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev) caps->cqe_hop_num = pbl_hop_num; caps->srqwqe_hop_num = pbl_hop_num; caps->idx_hop_num = pbl_hop_num; - caps->wqe_sq_hop_num = roce_get_field(resp_d->wq_hop_num_max_srqs, - V2_QUERY_PF_CAPS_D_SQWQE_HOP_NUM_M, - V2_QUERY_PF_CAPS_D_SQWQE_HOP_NUM_S); - caps->wqe_sge_hop_num = roce_get_field(resp_d->wq_hop_num_max_srqs, - V2_QUERY_PF_CAPS_D_EX_SGE_HOP_NUM_M, - V2_QUERY_PF_CAPS_D_EX_SGE_HOP_NUM_S); - caps->wqe_rq_hop_num = roce_get_field(resp_d->wq_hop_num_max_srqs, - V2_QUERY_PF_CAPS_D_RQWQE_HOP_NUM_M, - V2_QUERY_PF_CAPS_D_RQWQE_HOP_NUM_S); + caps->wqe_sq_hop_num = hr_reg_read(resp_d, PF_CAPS_D_SQWQE_HOP_NUM); + caps->wqe_sge_hop_num = hr_reg_read(resp_d, PF_CAPS_D_EX_SGE_HOP_NUM); + caps->wqe_rq_hop_num = hr_reg_read(resp_d, PF_CAPS_D_RQWQE_HOP_NUM); return 0; } @@ -2756,21 +2696,21 @@ static void hns_roce_v2_exit(struct hns_roce_dev *hr_dev) free_dip_list(hr_dev); } -static int hns_roce_mbox_post(struct hns_roce_dev *hr_dev, u64 in_param, - u64 out_param, u32 in_modifier, u8 op_modifier, - u16 op, u16 token, int event) +static int hns_roce_mbox_post(struct hns_roce_dev *hr_dev, + struct hns_roce_mbox_msg *mbox_msg) { struct hns_roce_cmq_desc desc; struct hns_roce_post_mbox *mb = (struct hns_roce_post_mbox *)desc.data; hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_POST_MB, false); - mb->in_param_l = cpu_to_le32(in_param); - mb->in_param_h = cpu_to_le32(in_param >> 32); - mb->out_param_l = cpu_to_le32(out_param); - mb->out_param_h = cpu_to_le32(out_param >> 32); - mb->cmd_tag = cpu_to_le32(in_modifier << 8 | op); - mb->token_event_en = cpu_to_le32(event << 16 | token); + mb->in_param_l = cpu_to_le32(mbox_msg->in_param); + mb->in_param_h = cpu_to_le32(mbox_msg->in_param >> 32); + mb->out_param_l = cpu_to_le32(mbox_msg->out_param); + mb->out_param_h = cpu_to_le32(mbox_msg->out_param >> 32); + mb->cmd_tag = cpu_to_le32(mbox_msg->tag << 8 | mbox_msg->cmd); + mb->token_event_en = cpu_to_le32(mbox_msg->event_en << 16 | + mbox_msg->token); return hns_roce_cmq_send(hr_dev, &desc, 1); } @@ -2788,6 +2728,9 @@ static int v2_wait_mbox_complete(struct hns_roce_dev *hr_dev, u32 timeout, mb_st = (struct hns_roce_mbox_status *)desc.data; end = msecs_to_jiffies(timeout) + jiffies; while (v2_chk_mbox_is_avail(hr_dev, &busy)) { + if (hr_dev->cmd.state == HNS_ROCE_CMDQ_STATE_FATAL_ERR) + return -EIO; + status = 0; hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_MB_ST, true); @@ -2823,9 +2766,8 @@ static int v2_wait_mbox_complete(struct hns_roce_dev *hr_dev, u32 timeout, return ret; } -static int v2_post_mbox(struct hns_roce_dev *hr_dev, u64 in_param, - u64 out_param, u32 in_modifier, u8 op_modifier, - u16 op, u16 token, int event) +static int v2_post_mbox(struct hns_roce_dev *hr_dev, + struct hns_roce_mbox_msg *mbox_msg) { u8 status = 0; int ret; @@ -2841,8 +2783,7 @@ static int v2_post_mbox(struct hns_roce_dev *hr_dev, u64 in_param, } /* Post new message to mbox */ - ret = hns_roce_mbox_post(hr_dev, in_param, out_param, in_modifier, - op_modifier, op, token, event); + ret = hns_roce_mbox_post(hr_dev, mbox_msg); if (ret) dev_err_ratelimited(hr_dev->dev, "failed to post mailbox, ret = %d.\n", ret); @@ -2850,12 +2791,13 @@ static int v2_post_mbox(struct hns_roce_dev *hr_dev, u64 in_param, return ret; } -static int v2_poll_mbox_done(struct hns_roce_dev *hr_dev, unsigned int timeout) +static int v2_poll_mbox_done(struct hns_roce_dev *hr_dev) { u8 status = 0; int ret; - ret = v2_wait_mbox_complete(hr_dev, timeout, &status); + ret = v2_wait_mbox_complete(hr_dev, HNS_ROCE_CMD_TIMEOUT_MSECS, + &status); if (!ret) { if (status != MB_ST_COMPLETE_SUCC) return -EBUSY; @@ -2892,10 +2834,8 @@ static int config_sgid_table(struct hns_roce_dev *hr_dev, hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_SGID_TB, false); - roce_set_field(sgid_tb->table_idx_rsv, CFG_SGID_TB_TABLE_IDX_M, - CFG_SGID_TB_TABLE_IDX_S, gid_index); - roce_set_field(sgid_tb->vf_sgid_type_rsv, CFG_SGID_TB_VF_SGID_TYPE_M, - CFG_SGID_TB_VF_SGID_TYPE_S, sgid_type); + hr_reg_write(sgid_tb, CFG_SGID_TB_TABLE_IDX, gid_index); + hr_reg_write(sgid_tb, CFG_SGID_TB_VF_SGID_TYPE, sgid_type); copy_gid(&sgid_tb->vf_sgid_l, gid); @@ -2930,19 +2870,14 @@ static int config_gmv_table(struct hns_roce_dev *hr_dev, copy_gid(&tb_a->vf_sgid_l, gid); - roce_set_field(tb_a->vf_sgid_type_vlan, CFG_GMV_TB_VF_SGID_TYPE_M, - CFG_GMV_TB_VF_SGID_TYPE_S, sgid_type); - roce_set_bit(tb_a->vf_sgid_type_vlan, CFG_GMV_TB_VF_VLAN_EN_S, - vlan_id < VLAN_CFI_MASK); - roce_set_field(tb_a->vf_sgid_type_vlan, CFG_GMV_TB_VF_VLAN_ID_M, - CFG_GMV_TB_VF_VLAN_ID_S, vlan_id); + hr_reg_write(tb_a, GMV_TB_A_VF_SGID_TYPE, sgid_type); + hr_reg_write(tb_a, GMV_TB_A_VF_VLAN_EN, vlan_id < VLAN_CFI_MASK); + hr_reg_write(tb_a, GMV_TB_A_VF_VLAN_ID, vlan_id); tb_b->vf_smac_l = cpu_to_le32(*(u32 *)mac); - roce_set_field(tb_b->vf_smac_h, CFG_GMV_TB_SMAC_H_M, - CFG_GMV_TB_SMAC_H_S, *(u16 *)&mac[4]); - roce_set_field(tb_b->table_idx_rsv, CFG_GMV_TB_SGID_IDX_M, - CFG_GMV_TB_SGID_IDX_S, gid_index); + hr_reg_write(tb_b, GMV_TB_B_SMAC_H, *(u16 *)&mac[4]); + hr_reg_write(tb_b, GMV_TB_B_SGID_IDX, gid_index); return hns_roce_cmq_send(hr_dev, desc, 2); } @@ -2991,10 +2926,8 @@ static int hns_roce_v2_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port, reg_smac_l = *(u32 *)(&addr[0]); reg_smac_h = *(u16 *)(&addr[4]); - roce_set_field(smac_tb->tb_idx_rsv, CFG_SMAC_TB_IDX_M, - CFG_SMAC_TB_IDX_S, phy_port); - roce_set_field(smac_tb->vf_smac_h_rsv, CFG_SMAC_TB_VF_SMAC_H_M, - CFG_SMAC_TB_VF_SMAC_H_S, reg_smac_h); + hr_reg_write(smac_tb, CFG_SMAC_TB_IDX, phy_port); + hr_reg_write(smac_tb, CFG_SMAC_TB_VF_SMAC_H, reg_smac_h); smac_tb->vf_smac_l = cpu_to_le32(reg_smac_l); return hns_roce_cmq_send(hr_dev, &desc, 1); @@ -3023,21 +2956,15 @@ static int set_mtpt_pbl(struct hns_roce_dev *hr_dev, mpt_entry->pbl_size = cpu_to_le32(mr->npages); mpt_entry->pbl_ba_l = cpu_to_le32(pbl_ba >> 3); - roce_set_field(mpt_entry->byte_48_mode_ba, - V2_MPT_BYTE_48_PBL_BA_H_M, V2_MPT_BYTE_48_PBL_BA_H_S, - upper_32_bits(pbl_ba >> 3)); + hr_reg_write(mpt_entry, MPT_PBL_BA_H, upper_32_bits(pbl_ba >> 3)); mpt_entry->pa0_l = cpu_to_le32(lower_32_bits(pages[0])); - roce_set_field(mpt_entry->byte_56_pa0_h, V2_MPT_BYTE_56_PA0_H_M, - V2_MPT_BYTE_56_PA0_H_S, upper_32_bits(pages[0])); + hr_reg_write(mpt_entry, MPT_PA0_H, upper_32_bits(pages[0])); mpt_entry->pa1_l = cpu_to_le32(lower_32_bits(pages[1])); - roce_set_field(mpt_entry->byte_64_buf_pa1, V2_MPT_BYTE_64_PA1_H_M, - V2_MPT_BYTE_64_PA1_H_S, upper_32_bits(pages[1])); - roce_set_field(mpt_entry->byte_64_buf_pa1, - V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M, - V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S, - to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.buf_pg_shift)); + hr_reg_write(mpt_entry, MPT_PA1_H, upper_32_bits(pages[1])); + hr_reg_write(mpt_entry, MPT_PBL_BUF_PG_SZ, + to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.buf_pg_shift)); return 0; } @@ -3046,7 +2973,6 @@ static int hns_roce_v2_write_mtpt(struct hns_roce_dev *hr_dev, void *mb_buf, struct hns_roce_mr *mr) { struct hns_roce_v2_mpt_entry *mpt_entry; - int ret; mpt_entry = mb_buf; memset(mpt_entry, 0, sizeof(*mpt_entry)); @@ -3085,9 +3011,7 @@ static int hns_roce_v2_write_mtpt(struct hns_roce_dev *hr_dev, to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.ba_pg_shift)); hr_reg_enable(mpt_entry, MPT_INNER_PA_VLD); - ret = set_mtpt_pbl(hr_dev, mpt_entry, mr); - - return ret; + return set_mtpt_pbl(hr_dev, mpt_entry, mr); } static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev, @@ -3098,24 +3022,19 @@ static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev, u32 mr_access_flags = mr->access; int ret = 0; - roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M, - V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_VALID); - - roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M, - V2_MPT_BYTE_4_PD_S, mr->pd); + hr_reg_write(mpt_entry, MPT_ST, V2_MPT_ST_VALID); + hr_reg_write(mpt_entry, MPT_PD, mr->pd); if (flags & IB_MR_REREG_ACCESS) { - roce_set_bit(mpt_entry->byte_8_mw_cnt_en, - V2_MPT_BYTE_8_BIND_EN_S, + hr_reg_write(mpt_entry, MPT_BIND_EN, (mr_access_flags & IB_ACCESS_MW_BIND ? 1 : 0)); - roce_set_bit(mpt_entry->byte_8_mw_cnt_en, - V2_MPT_BYTE_8_ATOMIC_EN_S, + hr_reg_write(mpt_entry, MPT_ATOMIC_EN, mr_access_flags & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0); - roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RR_EN_S, + hr_reg_write(mpt_entry, MPT_RR_EN, mr_access_flags & IB_ACCESS_REMOTE_READ ? 1 : 0); - roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RW_EN_S, + hr_reg_write(mpt_entry, MPT_RW_EN, mr_access_flags & IB_ACCESS_REMOTE_WRITE ? 1 : 0); - roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_LW_EN_S, + hr_reg_write(mpt_entry, MPT_LW_EN, mr_access_flags & IB_ACCESS_LOCAL_WRITE ? 1 : 0); } @@ -3146,37 +3065,28 @@ static int hns_roce_v2_frmr_write_mtpt(struct hns_roce_dev *hr_dev, return -ENOBUFS; } - roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M, - V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_FREE); - roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PBL_HOP_NUM_M, - V2_MPT_BYTE_4_PBL_HOP_NUM_S, 1); - roce_set_field(mpt_entry->byte_4_pd_hop_st, - V2_MPT_BYTE_4_PBL_BA_PG_SZ_M, - V2_MPT_BYTE_4_PBL_BA_PG_SZ_S, - to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.ba_pg_shift)); - roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M, - V2_MPT_BYTE_4_PD_S, mr->pd); + hr_reg_write(mpt_entry, MPT_ST, V2_MPT_ST_FREE); + hr_reg_write(mpt_entry, MPT_PD, mr->pd); + + hr_reg_enable(mpt_entry, MPT_RA_EN); + hr_reg_enable(mpt_entry, MPT_R_INV_EN); + hr_reg_enable(mpt_entry, MPT_L_INV_EN); - roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RA_EN_S, 1); - roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 1); - roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 1); + hr_reg_enable(mpt_entry, MPT_FRE); + hr_reg_clear(mpt_entry, MPT_MR_MW); + hr_reg_enable(mpt_entry, MPT_BPD); + hr_reg_clear(mpt_entry, MPT_PA); - roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_FRE_S, 1); - roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S, 0); - roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_MR_MW_S, 0); - roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_BPD_S, 1); + hr_reg_write(mpt_entry, MPT_PBL_HOP_NUM, 1); + hr_reg_write(mpt_entry, MPT_PBL_BA_PG_SZ, + to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.ba_pg_shift)); + hr_reg_write(mpt_entry, MPT_PBL_BUF_PG_SZ, + to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.buf_pg_shift)); mpt_entry->pbl_size = cpu_to_le32(mr->npages); mpt_entry->pbl_ba_l = cpu_to_le32(lower_32_bits(pbl_ba >> 3)); - roce_set_field(mpt_entry->byte_48_mode_ba, V2_MPT_BYTE_48_PBL_BA_H_M, - V2_MPT_BYTE_48_PBL_BA_H_S, - upper_32_bits(pbl_ba >> 3)); - - roce_set_field(mpt_entry->byte_64_buf_pa1, - V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M, - V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S, - to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.buf_pg_shift)); + hr_reg_write(mpt_entry, MPT_PBL_BA_H, upper_32_bits(pbl_ba >> 3)); return 0; } @@ -3188,36 +3098,29 @@ static int hns_roce_v2_mw_write_mtpt(void *mb_buf, struct hns_roce_mw *mw) mpt_entry = mb_buf; memset(mpt_entry, 0, sizeof(*mpt_entry)); - roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M, - V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_FREE); - roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M, - V2_MPT_BYTE_4_PD_S, mw->pdn); - roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PBL_HOP_NUM_M, - V2_MPT_BYTE_4_PBL_HOP_NUM_S, - mw->pbl_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : - mw->pbl_hop_num); - roce_set_field(mpt_entry->byte_4_pd_hop_st, - V2_MPT_BYTE_4_PBL_BA_PG_SZ_M, - V2_MPT_BYTE_4_PBL_BA_PG_SZ_S, - mw->pbl_ba_pg_sz + PG_SHIFT_OFFSET); - - roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 1); - roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 1); - roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_LW_EN_S, 1); - - roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S, 0); - roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_MR_MW_S, 1); - roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_BPD_S, 1); - roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_BQP_S, - mw->ibmw.type == IB_MW_TYPE_1 ? 0 : 1); + hr_reg_write(mpt_entry, MPT_ST, V2_MPT_ST_FREE); + hr_reg_write(mpt_entry, MPT_PD, mw->pdn); - roce_set_field(mpt_entry->byte_64_buf_pa1, - V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M, - V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S, - mw->pbl_buf_pg_sz + PG_SHIFT_OFFSET); + hr_reg_enable(mpt_entry, MPT_R_INV_EN); + hr_reg_enable(mpt_entry, MPT_L_INV_EN); + hr_reg_enable(mpt_entry, MPT_LW_EN); + + hr_reg_enable(mpt_entry, MPT_MR_MW); + hr_reg_enable(mpt_entry, MPT_BPD); + hr_reg_clear(mpt_entry, MPT_PA); + hr_reg_write(mpt_entry, MPT_BQP, + mw->ibmw.type == IB_MW_TYPE_1 ? 0 : 1); mpt_entry->lkey = cpu_to_le32(mw->rkey); + hr_reg_write(mpt_entry, MPT_PBL_HOP_NUM, + mw->pbl_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : + mw->pbl_hop_num); + hr_reg_write(mpt_entry, MPT_PBL_BA_PG_SZ, + mw->pbl_ba_pg_sz + PG_SHIFT_OFFSET); + hr_reg_write(mpt_entry, MPT_PBL_BUF_PG_SZ, + mw->pbl_buf_pg_sz + PG_SHIFT_OFFSET); + return 0; } @@ -3794,38 +3697,38 @@ static int hns_roce_v2_poll_cq(struct ib_cq *ibcq, int num_entries, } static int get_op_for_set_hem(struct hns_roce_dev *hr_dev, u32 type, - int step_idx, u16 *mbox_op) + u32 step_idx, u8 *mbox_cmd) { - u16 op; + u8 cmd; switch (type) { case HEM_TYPE_QPC: - op = HNS_ROCE_CMD_WRITE_QPC_BT0; + cmd = HNS_ROCE_CMD_WRITE_QPC_BT0; break; case HEM_TYPE_MTPT: - op = HNS_ROCE_CMD_WRITE_MPT_BT0; + cmd = HNS_ROCE_CMD_WRITE_MPT_BT0; break; case HEM_TYPE_CQC: - op = HNS_ROCE_CMD_WRITE_CQC_BT0; + cmd = HNS_ROCE_CMD_WRITE_CQC_BT0; break; case HEM_TYPE_SRQC: - op = HNS_ROCE_CMD_WRITE_SRQC_BT0; + cmd = HNS_ROCE_CMD_WRITE_SRQC_BT0; break; case HEM_TYPE_SCCC: - op = HNS_ROCE_CMD_WRITE_SCCC_BT0; + cmd = HNS_ROCE_CMD_WRITE_SCCC_BT0; break; case HEM_TYPE_QPC_TIMER: - op = HNS_ROCE_CMD_WRITE_QPC_TIMER_BT0; + cmd = HNS_ROCE_CMD_WRITE_QPC_TIMER_BT0; break; case HEM_TYPE_CQC_TIMER: - op = HNS_ROCE_CMD_WRITE_CQC_TIMER_BT0; + cmd = HNS_ROCE_CMD_WRITE_CQC_TIMER_BT0; break; default: dev_warn(hr_dev->dev, "failed to check hem type %u.\n", type); return -EINVAL; } - *mbox_op = op + step_idx; + *mbox_cmd = cmd + step_idx; return 0; } @@ -3848,10 +3751,10 @@ static int config_gmv_ba_to_hw(struct hns_roce_dev *hr_dev, unsigned long obj, } static int set_hem_to_hw(struct hns_roce_dev *hr_dev, int obj, - dma_addr_t base_addr, u32 hem_type, int step_idx) + dma_addr_t base_addr, u32 hem_type, u32 step_idx) { int ret; - u16 op; + u8 cmd; if (unlikely(hem_type == HEM_TYPE_GMV)) return config_gmv_ba_to_hw(hr_dev, obj, base_addr); @@ -3859,16 +3762,16 @@ static int set_hem_to_hw(struct hns_roce_dev *hr_dev, int obj, if (unlikely(hem_type == HEM_TYPE_SCCC && step_idx)) return 0; - ret = get_op_for_set_hem(hr_dev, hem_type, step_idx, &op); + ret = get_op_for_set_hem(hr_dev, hem_type, step_idx, &cmd); if (ret < 0) return ret; - return config_hem_ba_to_hw(hr_dev, obj, base_addr, op); + return config_hem_ba_to_hw(hr_dev, base_addr, cmd, obj); } static int hns_roce_v2_set_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem_table *table, int obj, - int step_idx) + u32 step_idx) { struct hns_roce_hem_iter iter; struct hns_roce_hem_mhop mhop; @@ -3926,29 +3829,29 @@ static int hns_roce_v2_set_hem(struct hns_roce_dev *hr_dev, } static int hns_roce_v2_clear_hem(struct hns_roce_dev *hr_dev, - struct hns_roce_hem_table *table, int obj, - int step_idx) + struct hns_roce_hem_table *table, + int tag, u32 step_idx) { - struct device *dev = hr_dev->dev; struct hns_roce_cmd_mailbox *mailbox; + struct device *dev = hr_dev->dev; + u8 cmd = 0xff; int ret; - u16 op = 0xff; if (!hns_roce_check_whether_mhop(hr_dev, table->type)) return 0; switch (table->type) { case HEM_TYPE_QPC: - op = HNS_ROCE_CMD_DESTROY_QPC_BT0; + cmd = HNS_ROCE_CMD_DESTROY_QPC_BT0; break; case HEM_TYPE_MTPT: - op = HNS_ROCE_CMD_DESTROY_MPT_BT0; + cmd = HNS_ROCE_CMD_DESTROY_MPT_BT0; break; case HEM_TYPE_CQC: - op = HNS_ROCE_CMD_DESTROY_CQC_BT0; + cmd = HNS_ROCE_CMD_DESTROY_CQC_BT0; break; case HEM_TYPE_SRQC: - op = HNS_ROCE_CMD_DESTROY_SRQC_BT0; + cmd = HNS_ROCE_CMD_DESTROY_SRQC_BT0; break; case HEM_TYPE_SCCC: case HEM_TYPE_QPC_TIMER: @@ -3961,15 +3864,13 @@ static int hns_roce_v2_clear_hem(struct hns_roce_dev *hr_dev, return 0; } - op += step_idx; + cmd += step_idx; mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); if (IS_ERR(mailbox)) return PTR_ERR(mailbox); - /* configure the tag and op */ - ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, obj, 0, op, - HNS_ROCE_CMD_TIMEOUT_MSECS); + ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, cmd, tag); hns_roce_free_cmd_mailbox(hr_dev, mailbox); return ret; @@ -3993,9 +3894,8 @@ static int hns_roce_v2_qp_modify(struct hns_roce_dev *hr_dev, memcpy(mailbox->buf, context, qpc_size); memcpy(mailbox->buf + qpc_size, qpc_mask, qpc_size); - ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_qp->qpn, 0, - HNS_ROCE_CMD_MODIFY_QPC, - HNS_ROCE_CMD_TIMEOUT_MSECS); + ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, + HNS_ROCE_CMD_MODIFY_QPC, hr_qp->qpn); hns_roce_free_cmd_mailbox(hr_dev, mailbox); @@ -4654,9 +4554,7 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp, if (ret) return ret; - if (gid_attr) - is_udp = (gid_attr->gid_type == - IB_GID_TYPE_ROCE_UDP_ENCAP); + is_udp = (gid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP); } /* Only HIP08 needs to set the vlan_en bits in QPC */ @@ -5040,9 +4938,8 @@ static int hns_roce_v2_query_qpc(struct hns_roce_dev *hr_dev, if (IS_ERR(mailbox)) return PTR_ERR(mailbox); - ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, hr_qp->qpn, 0, - HNS_ROCE_CMD_QUERY_QPC, - HNS_ROCE_CMD_TIMEOUT_MSECS); + ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, HNS_ROCE_CMD_QUERY_QPC, + hr_qp->qpn); if (ret) goto out; @@ -5408,9 +5305,8 @@ static int hns_roce_v2_modify_srq(struct ib_srq *ibsrq, hr_reg_write(srq_context, SRQC_LIMIT_WL, srq_attr->srq_limit); hr_reg_clear(srqc_mask, SRQC_LIMIT_WL); - ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, srq->srqn, 0, - HNS_ROCE_CMD_MODIFY_SRQC, - HNS_ROCE_CMD_TIMEOUT_MSECS); + ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, + HNS_ROCE_CMD_MODIFY_SRQC, srq->srqn); hns_roce_free_cmd_mailbox(hr_dev, mailbox); if (ret) { ibdev_err(&hr_dev->ib_dev, @@ -5436,9 +5332,8 @@ static int hns_roce_v2_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr) return PTR_ERR(mailbox); srq_context = mailbox->buf; - ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, srq->srqn, 0, - HNS_ROCE_CMD_QUERY_SRQC, - HNS_ROCE_CMD_TIMEOUT_MSECS); + ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, + HNS_ROCE_CMD_QUERY_SRQC, srq->srqn); if (ret) { ibdev_err(&hr_dev->ib_dev, "failed to process cmd of querying SRQ, ret = %d.\n", @@ -5478,9 +5373,8 @@ static int hns_roce_v2_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period) hr_reg_write(cq_context, CQC_CQ_PERIOD, cq_period); hr_reg_clear(cqc_mask, CQC_CQ_PERIOD); - ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_cq->cqn, 1, - HNS_ROCE_CMD_MODIFY_CQC, - HNS_ROCE_CMD_TIMEOUT_MSECS); + ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, + HNS_ROCE_CMD_MODIFY_CQC, hr_cq->cqn); hns_roce_free_cmd_mailbox(hr_dev, mailbox); if (ret) ibdev_err(&hr_dev->ib_dev, @@ -5603,7 +5497,7 @@ static struct hns_roce_aeqe *next_aeqe_sw_v2(struct hns_roce_eq *eq) (eq->cons_index & (eq->entries - 1)) * eq->eqe_size); - return (roce_get_bit(aeqe->asyn, HNS_ROCE_V2_AEQ_AEQE_OWNER_S) ^ + return (hr_reg_read(aeqe, AEQE_OWNER) ^ !!(eq->cons_index & eq->entries)) ? aeqe : NULL; } @@ -5623,15 +5517,9 @@ static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev, */ dma_rmb(); - event_type = roce_get_field(aeqe->asyn, - HNS_ROCE_V2_AEQE_EVENT_TYPE_M, - HNS_ROCE_V2_AEQE_EVENT_TYPE_S); - sub_type = roce_get_field(aeqe->asyn, - HNS_ROCE_V2_AEQE_SUB_TYPE_M, - HNS_ROCE_V2_AEQE_SUB_TYPE_S); - queue_num = roce_get_field(aeqe->event.queue_event.num, - HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M, - HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S); + event_type = hr_reg_read(aeqe, AEQE_EVENT_TYPE); + sub_type = hr_reg_read(aeqe, AEQE_SUB_TYPE); + queue_num = hr_reg_read(aeqe, AEQE_EVENT_QUEUE_NUM); switch (event_type) { case HNS_ROCE_EVENT_TYPE_PATH_MIG: @@ -5691,8 +5579,8 @@ static struct hns_roce_ceqe *next_ceqe_sw_v2(struct hns_roce_eq *eq) (eq->cons_index & (eq->entries - 1)) * eq->eqe_size); - return (!!(roce_get_bit(ceqe->comp, HNS_ROCE_V2_CEQ_CEQE_OWNER_S))) ^ - (!!(eq->cons_index & eq->entries)) ? ceqe : NULL; + return (hr_reg_read(ceqe, CEQE_OWNER) ^ + !!(eq->cons_index & eq->entries)) ? ceqe : NULL; } static int hns_roce_v2_ceq_int(struct hns_roce_dev *hr_dev, @@ -5708,8 +5596,7 @@ static int hns_roce_v2_ceq_int(struct hns_roce_dev *hr_dev, */ dma_rmb(); - cqn = roce_get_field(ceqe->comp, HNS_ROCE_V2_CEQE_COMP_CQN_M, - HNS_ROCE_V2_CEQE_COMP_CQN_S); + cqn = hr_reg_read(ceqe, CEQE_CQN); hns_roce_cq_completion(hr_dev, cqn); @@ -5807,15 +5694,14 @@ static void hns_roce_v2_destroy_eqc(struct hns_roce_dev *hr_dev, u32 eqn) { struct device *dev = hr_dev->dev; int ret; + u8 cmd; if (eqn < hr_dev->caps.num_comp_vectors) - ret = hns_roce_cmd_mbox(hr_dev, 0, 0, eqn & HNS_ROCE_V2_EQN_M, - 0, HNS_ROCE_CMD_DESTROY_CEQC, - HNS_ROCE_CMD_TIMEOUT_MSECS); + cmd = HNS_ROCE_CMD_DESTROY_CEQC; else - ret = hns_roce_cmd_mbox(hr_dev, 0, 0, eqn & HNS_ROCE_V2_EQN_M, - 0, HNS_ROCE_CMD_DESTROY_AEQC, - HNS_ROCE_CMD_TIMEOUT_MSECS); + cmd = HNS_ROCE_CMD_DESTROY_AEQC; + + ret = hns_roce_destroy_hw_ctx(hr_dev, cmd, eqn & HNS_ROCE_V2_EQN_M); if (ret) dev_err(dev, "[mailbox cmd] destroy eqc(%u) failed.\n", eqn); } @@ -5912,16 +5798,15 @@ static int alloc_eq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq) } static int hns_roce_v2_create_eq(struct hns_roce_dev *hr_dev, - struct hns_roce_eq *eq, - unsigned int eq_cmd) + struct hns_roce_eq *eq, u8 eq_cmd) { struct hns_roce_cmd_mailbox *mailbox; int ret; /* Allocate mailbox memory */ mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); - if (IS_ERR_OR_NULL(mailbox)) - return -ENOMEM; + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); ret = alloc_eq_buf(hr_dev, eq); if (ret) @@ -5931,8 +5816,7 @@ static int hns_roce_v2_create_eq(struct hns_roce_dev *hr_dev, if (ret) goto err_cmd_mbox; - ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, eq->eqn, 0, - eq_cmd, HNS_ROCE_CMD_TIMEOUT_MSECS); + ret = hns_roce_create_hw_ctx(hr_dev, mailbox, eq_cmd, eq->eqn); if (ret) { dev_err(hr_dev->dev, "[mailbox cmd] create eqc failed.\n"); goto err_cmd_mbox; @@ -6043,14 +5927,14 @@ static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev) struct hns_roce_eq_table *eq_table = &hr_dev->eq_table; struct device *dev = hr_dev->dev; struct hns_roce_eq *eq; - unsigned int eq_cmd; - int irq_num; - int eq_num; int other_num; int comp_num; int aeq_num; - int i; + int irq_num; + int eq_num; + u8 eq_cmd; int ret; + int i; other_num = hr_dev->caps.num_other_vectors; comp_num = hr_dev->caps.num_comp_vectors; diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h index e9a73c34389bd846c65e4aaff129eff81c1c910d..a3a2524a5e25e911ba786527a863910a9c9ea1bf 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h @@ -41,7 +41,7 @@ #define HNS_ROCE_V2_MAX_SRQ_WR 0x8000 #define HNS_ROCE_V2_MAX_SRQ_SGE 64 #define HNS_ROCE_V2_MAX_CQ_NUM 0x100000 -#define HNS_ROCE_V2_MAX_CQC_TIMER_NUM 0x100 +#define HNS_ROCE_V2_MAX_CQC_TIMER_BT_NUM 0x100 #define HNS_ROCE_V2_MAX_SRQ_NUM 0x100000 #define HNS_ROCE_V2_MAX_CQE_NUM 0x400000 #define HNS_ROCE_V2_MAX_RQ_SGE_NUM 64 @@ -291,33 +291,6 @@ struct hns_roce_v2_cq_context { #define HNS_ROCE_V2_CQ_DEFAULT_BURST_NUM 0x0 #define HNS_ROCE_V2_CQ_DEFAULT_INTERVAL 0x0 -#define V2_CQC_BYTE_4_ARM_ST_S 6 -#define V2_CQC_BYTE_4_ARM_ST_M GENMASK(7, 6) - -#define V2_CQC_BYTE_4_CEQN_S 15 -#define V2_CQC_BYTE_4_CEQN_M GENMASK(23, 15) - -#define V2_CQC_BYTE_8_CQN_S 0 -#define V2_CQC_BYTE_8_CQN_M GENMASK(23, 0) - -#define V2_CQC_BYTE_16_CQE_HOP_NUM_S 30 -#define V2_CQC_BYTE_16_CQE_HOP_NUM_M GENMASK(31, 30) - -#define V2_CQC_BYTE_28_CQ_PRODUCER_IDX_S 0 -#define V2_CQC_BYTE_28_CQ_PRODUCER_IDX_M GENMASK(23, 0) - -#define V2_CQC_BYTE_32_CQ_CONSUMER_IDX_S 0 -#define V2_CQC_BYTE_32_CQ_CONSUMER_IDX_M GENMASK(23, 0) - -#define V2_CQC_BYTE_52_CQE_CNT_S 0 -#define V2_CQC_BYTE_52_CQE_CNT_M GENMASK(23, 0) - -#define V2_CQC_BYTE_56_CQ_MAX_CNT_S 0 -#define V2_CQC_BYTE_56_CQ_MAX_CNT_M GENMASK(15, 0) - -#define V2_CQC_BYTE_56_CQ_PERIOD_S 16 -#define V2_CQC_BYTE_56_CQ_PERIOD_M GENMASK(31, 16) - #define CQC_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_v2_cq_context, h, l) #define CQC_CQ_ST CQC_FIELD_LOC(1, 0) @@ -776,12 +749,15 @@ struct hns_roce_v2_mpt_entry { #define MPT_LKEY MPT_FIELD_LOC(223, 192) #define MPT_VA MPT_FIELD_LOC(287, 224) #define MPT_PBL_SIZE MPT_FIELD_LOC(319, 288) -#define MPT_PBL_BA MPT_FIELD_LOC(380, 320) +#define MPT_PBL_BA_L MPT_FIELD_LOC(351, 320) +#define MPT_PBL_BA_H MPT_FIELD_LOC(380, 352) #define MPT_BLK_MODE MPT_FIELD_LOC(381, 381) #define MPT_RSV0 MPT_FIELD_LOC(383, 382) -#define MPT_PA0 MPT_FIELD_LOC(441, 384) +#define MPT_PA0_L MPT_FIELD_LOC(415, 384) +#define MPT_PA0_H MPT_FIELD_LOC(441, 416) #define MPT_BOUND_VA MPT_FIELD_LOC(447, 442) -#define MPT_PA1 MPT_FIELD_LOC(505, 448) +#define MPT_PA1_L MPT_FIELD_LOC(479, 448) +#define MPT_PA1_H MPT_FIELD_LOC(505, 480) #define MPT_PERSIST_EN MPT_FIELD_LOC(506, 506) #define MPT_RSV2 MPT_FIELD_LOC(507, 507) #define MPT_PBL_BUF_PG_SZ MPT_FIELD_LOC(511, 508) @@ -887,48 +863,24 @@ struct hns_roce_v2_ud_send_wqe { u8 dgid[GID_LEN_V2]; }; -#define V2_UD_SEND_WQE_BYTE_4_OPCODE_S 0 -#define V2_UD_SEND_WQE_BYTE_4_OPCODE_M GENMASK(4, 0) - -#define V2_UD_SEND_WQE_BYTE_4_OWNER_S 7 - -#define V2_UD_SEND_WQE_BYTE_4_CQE_S 8 - -#define V2_UD_SEND_WQE_BYTE_4_SE_S 11 - -#define V2_UD_SEND_WQE_BYTE_16_PD_S 0 -#define V2_UD_SEND_WQE_BYTE_16_PD_M GENMASK(23, 0) - -#define V2_UD_SEND_WQE_BYTE_16_SGE_NUM_S 24 -#define V2_UD_SEND_WQE_BYTE_16_SGE_NUM_M GENMASK(31, 24) - -#define V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S 0 -#define V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M GENMASK(23, 0) - -#define V2_UD_SEND_WQE_BYTE_24_UDPSPN_S 16 -#define V2_UD_SEND_WQE_BYTE_24_UDPSPN_M GENMASK(31, 16) - -#define V2_UD_SEND_WQE_BYTE_32_DQPN_S 0 -#define V2_UD_SEND_WQE_BYTE_32_DQPN_M GENMASK(23, 0) - -#define V2_UD_SEND_WQE_BYTE_36_VLAN_S 0 -#define V2_UD_SEND_WQE_BYTE_36_VLAN_M GENMASK(15, 0) - -#define V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_S 16 -#define V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_M GENMASK(23, 16) - -#define V2_UD_SEND_WQE_BYTE_36_TCLASS_S 24 -#define V2_UD_SEND_WQE_BYTE_36_TCLASS_M GENMASK(31, 24) - -#define V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_S 0 -#define V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_M GENMASK(19, 0) - -#define V2_UD_SEND_WQE_BYTE_40_SL_S 20 -#define V2_UD_SEND_WQE_BYTE_40_SL_M GENMASK(23, 20) - -#define V2_UD_SEND_WQE_BYTE_40_UD_VLAN_EN_S 30 - -#define V2_UD_SEND_WQE_BYTE_40_LBI_S 31 +#define UD_SEND_WQE_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_v2_ud_send_wqe, h, l) + +#define UD_SEND_WQE_OPCODE UD_SEND_WQE_FIELD_LOC(4, 0) +#define UD_SEND_WQE_OWNER UD_SEND_WQE_FIELD_LOC(7, 7) +#define UD_SEND_WQE_CQE UD_SEND_WQE_FIELD_LOC(8, 8) +#define UD_SEND_WQE_SE UD_SEND_WQE_FIELD_LOC(11, 11) +#define UD_SEND_WQE_PD UD_SEND_WQE_FIELD_LOC(119, 96) +#define UD_SEND_WQE_SGE_NUM UD_SEND_WQE_FIELD_LOC(127, 120) +#define UD_SEND_WQE_MSG_START_SGE_IDX UD_SEND_WQE_FIELD_LOC(151, 128) +#define UD_SEND_WQE_UDPSPN UD_SEND_WQE_FIELD_LOC(191, 176) +#define UD_SEND_WQE_DQPN UD_SEND_WQE_FIELD_LOC(247, 224) +#define UD_SEND_WQE_VLAN UD_SEND_WQE_FIELD_LOC(271, 256) +#define UD_SEND_WQE_HOPLIMIT UD_SEND_WQE_FIELD_LOC(279, 272) +#define UD_SEND_WQE_TCLASS UD_SEND_WQE_FIELD_LOC(287, 280) +#define UD_SEND_WQE_FLOW_LABEL UD_SEND_WQE_FIELD_LOC(307, 288) +#define UD_SEND_WQE_SL UD_SEND_WQE_FIELD_LOC(311, 308) +#define UD_SEND_WQE_VLAN_EN UD_SEND_WQE_FIELD_LOC(318, 318) +#define UD_SEND_WQE_LBI UD_SEND_WQE_FIELD_LOC(319, 319) struct hns_roce_v2_rc_send_wqe { __le32 byte_4; @@ -943,42 +895,23 @@ struct hns_roce_v2_rc_send_wqe { __le64 va; }; -#define V2_RC_SEND_WQE_BYTE_4_OPCODE_S 0 -#define V2_RC_SEND_WQE_BYTE_4_OPCODE_M GENMASK(4, 0) - -#define V2_RC_SEND_WQE_BYTE_4_DB_SL_L_S 5 -#define V2_RC_SEND_WQE_BYTE_4_DB_SL_L_M GENMASK(6, 5) - -#define V2_RC_SEND_WQE_BYTE_4_DB_SL_H_S 13 -#define V2_RC_SEND_WQE_BYTE_4_DB_SL_H_M GENMASK(14, 13) - -#define V2_RC_SEND_WQE_BYTE_4_WQE_INDEX_S 15 -#define V2_RC_SEND_WQE_BYTE_4_WQE_INDEX_M GENMASK(30, 15) - -#define V2_RC_SEND_WQE_BYTE_4_OWNER_S 7 - -#define V2_RC_SEND_WQE_BYTE_4_CQE_S 8 - -#define V2_RC_SEND_WQE_BYTE_4_FENCE_S 9 - -#define V2_RC_SEND_WQE_BYTE_4_SO_S 10 - -#define V2_RC_SEND_WQE_BYTE_4_SE_S 11 - -#define V2_RC_SEND_WQE_BYTE_4_INLINE_S 12 - -#define V2_RC_SEND_WQE_BYTE_4_FLAG_S 31 - -#define V2_RC_SEND_WQE_BYTE_16_XRC_SRQN_S 0 -#define V2_RC_SEND_WQE_BYTE_16_XRC_SRQN_M GENMASK(23, 0) - -#define V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S 24 -#define V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M GENMASK(31, 24) - -#define V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S 0 -#define V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M GENMASK(23, 0) - -#define V2_RC_SEND_WQE_BYTE_20_INL_TYPE_S 31 +#define RC_SEND_WQE_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_v2_rc_send_wqe, h, l) + +#define RC_SEND_WQE_OPCODE RC_SEND_WQE_FIELD_LOC(4, 0) +#define RC_SEND_WQE_DB_SL_L RC_SEND_WQE_FIELD_LOC(6, 5) +#define RC_SEND_WQE_DB_SL_H RC_SEND_WQE_FIELD_LOC(14, 13) +#define RC_SEND_WQE_OWNER RC_SEND_WQE_FIELD_LOC(7, 7) +#define RC_SEND_WQE_CQE RC_SEND_WQE_FIELD_LOC(8, 8) +#define RC_SEND_WQE_FENCE RC_SEND_WQE_FIELD_LOC(9, 9) +#define RC_SEND_WQE_SO RC_SEND_WQE_FIELD_LOC(10, 10) +#define RC_SEND_WQE_SE RC_SEND_WQE_FIELD_LOC(11, 11) +#define RC_SEND_WQE_INLINE RC_SEND_WQE_FIELD_LOC(12, 12) +#define RC_SEND_WQE_WQE_INDEX RC_SEND_WQE_FIELD_LOC(30, 15) +#define RC_SEND_WQE_FLAG RC_SEND_WQE_FIELD_LOC(31, 31) +#define RC_SEND_WQE_XRC_SRQN RC_SEND_WQE_FIELD_LOC(119, 96) +#define RC_SEND_WQE_SGE_NUM RC_SEND_WQE_FIELD_LOC(127, 120) +#define RC_SEND_WQE_MSG_START_SGE_IDX RC_SEND_WQE_FIELD_LOC(151, 128) +#define RC_SEND_WQE_INL_TYPE RC_SEND_WQE_FIELD_LOC(159, 159) struct hns_roce_wqe_frmr_seg { __le32 pbl_size; @@ -1021,7 +954,10 @@ struct hns_roce_func_clear { __le32 rsv[4]; }; -#define FUNC_CLEAR_RST_FUN_DONE_S 0 +#define FUNC_CLEAR_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_func_clear, h, l) + +#define FUNC_CLEAR_RST_FUN_DONE FUNC_CLEAR_FIELD_LOC(32, 32) + /* Each physical function manages up to 248 virtual functions, it takes up to * 100ms for each function to execute clear. If an abnormal reset occurs, it is * executed twice at most, so it takes up to 249 * 2 * 100ms. @@ -1100,12 +1036,12 @@ struct hns_roce_vf_switch { __le32 resv3; }; -#define VF_SWITCH_DATA_FUN_ID_VF_ID_S 3 -#define VF_SWITCH_DATA_FUN_ID_VF_ID_M GENMASK(10, 3) +#define VF_SWITCH_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_vf_switch, h, l) -#define VF_SWITCH_DATA_CFG_ALW_LPBK_S 1 -#define VF_SWITCH_DATA_CFG_ALW_LCL_LPBK_S 2 -#define VF_SWITCH_DATA_CFG_ALW_DST_OVRD_S 3 +#define VF_SWITCH_VF_ID VF_SWITCH_FIELD_LOC(42, 35) +#define VF_SWITCH_ALW_LPBK VF_SWITCH_FIELD_LOC(65, 65) +#define VF_SWITCH_ALW_LCL_LPBK VF_SWITCH_FIELD_LOC(66, 66) +#define VF_SWITCH_ALW_DST_OVRD VF_SWITCH_FIELD_LOC(67, 67) struct hns_roce_post_mbox { __le32 in_param_l; @@ -1168,11 +1104,10 @@ struct hns_roce_cfg_sgid_tb { __le32 vf_sgid_type_rsv; }; -#define CFG_SGID_TB_TABLE_IDX_S 0 -#define CFG_SGID_TB_TABLE_IDX_M GENMASK(7, 0) +#define SGID_TB_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_cfg_sgid_tb, h, l) -#define CFG_SGID_TB_VF_SGID_TYPE_S 0 -#define CFG_SGID_TB_VF_SGID_TYPE_M GENMASK(1, 0) +#define CFG_SGID_TB_TABLE_IDX SGID_TB_FIELD_LOC(7, 0) +#define CFG_SGID_TB_VF_SGID_TYPE SGID_TB_FIELD_LOC(161, 160) struct hns_roce_cfg_smac_tb { __le32 tb_idx_rsv; @@ -1180,11 +1115,11 @@ struct hns_roce_cfg_smac_tb { __le32 vf_smac_h_rsv; __le32 rsv[3]; }; -#define CFG_SMAC_TB_IDX_S 0 -#define CFG_SMAC_TB_IDX_M GENMASK(7, 0) -#define CFG_SMAC_TB_VF_SMAC_H_S 0 -#define CFG_SMAC_TB_VF_SMAC_H_M GENMASK(15, 0) +#define SMAC_TB_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_cfg_smac_tb, h, l) + +#define CFG_SMAC_TB_IDX SMAC_TB_FIELD_LOC(7, 0) +#define CFG_SMAC_TB_VF_SMAC_H SMAC_TB_FIELD_LOC(79, 64) struct hns_roce_cfg_gmv_tb_a { __le32 vf_sgid_l; @@ -1195,16 +1130,11 @@ struct hns_roce_cfg_gmv_tb_a { __le32 resv; }; -#define CFG_GMV_TB_SGID_IDX_S 0 -#define CFG_GMV_TB_SGID_IDX_M GENMASK(7, 0) - -#define CFG_GMV_TB_VF_SGID_TYPE_S 0 -#define CFG_GMV_TB_VF_SGID_TYPE_M GENMASK(1, 0) +#define GMV_TB_A_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_cfg_gmv_tb_a, h, l) -#define CFG_GMV_TB_VF_VLAN_EN_S 2 - -#define CFG_GMV_TB_VF_VLAN_ID_S 16 -#define CFG_GMV_TB_VF_VLAN_ID_M GENMASK(27, 16) +#define GMV_TB_A_VF_SGID_TYPE GMV_TB_A_FIELD_LOC(129, 128) +#define GMV_TB_A_VF_VLAN_EN GMV_TB_A_FIELD_LOC(130, 130) +#define GMV_TB_A_VF_VLAN_ID GMV_TB_A_FIELD_LOC(155, 144) struct hns_roce_cfg_gmv_tb_b { __le32 vf_smac_l; @@ -1213,8 +1143,10 @@ struct hns_roce_cfg_gmv_tb_b { __le32 resv[3]; }; -#define CFG_GMV_TB_SMAC_H_S 0 -#define CFG_GMV_TB_SMAC_H_M GENMASK(15, 0) +#define GMV_TB_B_FIELD_LOC(h, l) FIELD_LOC(struct hns_roce_cfg_gmv_tb_b, h, l) + +#define GMV_TB_B_SMAC_H GMV_TB_B_FIELD_LOC(47, 32) +#define GMV_TB_B_SGID_IDX GMV_TB_B_FIELD_LOC(71, 64) #define HNS_ROCE_QUERY_PF_CAPS_CMD_NUM 5 struct hns_roce_query_pf_caps_a { @@ -1266,29 +1198,17 @@ struct hns_roce_query_pf_caps_c { __le16 rq_depth; }; -#define V2_QUERY_PF_CAPS_C_NUM_PDS_S 0 -#define V2_QUERY_PF_CAPS_C_NUM_PDS_M GENMASK(19, 0) +#define PF_CAPS_C_FIELD_LOC(h, l) \ + FIELD_LOC(struct hns_roce_query_pf_caps_c, h, l) -#define V2_QUERY_PF_CAPS_C_CAP_FLAGS_S 20 -#define V2_QUERY_PF_CAPS_C_CAP_FLAGS_M GENMASK(31, 20) - -#define V2_QUERY_PF_CAPS_C_NUM_CQS_S 0 -#define V2_QUERY_PF_CAPS_C_NUM_CQS_M GENMASK(19, 0) - -#define V2_QUERY_PF_CAPS_C_MAX_GID_S 20 -#define V2_QUERY_PF_CAPS_C_MAX_GID_M GENMASK(28, 20) - -#define V2_QUERY_PF_CAPS_C_CQ_DEPTH_S 0 -#define V2_QUERY_PF_CAPS_C_CQ_DEPTH_M GENMASK(22, 0) - -#define V2_QUERY_PF_CAPS_C_NUM_MRWS_S 0 -#define V2_QUERY_PF_CAPS_C_NUM_MRWS_M GENMASK(19, 0) - -#define V2_QUERY_PF_CAPS_C_NUM_QPS_S 0 -#define V2_QUERY_PF_CAPS_C_NUM_QPS_M GENMASK(19, 0) - -#define V2_QUERY_PF_CAPS_C_MAX_ORD_S 20 -#define V2_QUERY_PF_CAPS_C_MAX_ORD_M GENMASK(27, 20) +#define PF_CAPS_C_NUM_PDS PF_CAPS_C_FIELD_LOC(19, 0) +#define PF_CAPS_C_CAP_FLAGS PF_CAPS_C_FIELD_LOC(31, 20) +#define PF_CAPS_C_NUM_CQS PF_CAPS_C_FIELD_LOC(51, 32) +#define PF_CAPS_C_MAX_GID PF_CAPS_C_FIELD_LOC(60, 52) +#define PF_CAPS_C_CQ_DEPTH PF_CAPS_C_FIELD_LOC(86, 64) +#define PF_CAPS_C_NUM_MRWS PF_CAPS_C_FIELD_LOC(115, 96) +#define PF_CAPS_C_NUM_QPS PF_CAPS_C_FIELD_LOC(147, 128) +#define PF_CAPS_C_MAX_ORD PF_CAPS_C_FIELD_LOC(155, 148) struct hns_roce_query_pf_caps_d { __le32 wq_hop_num_max_srqs; @@ -1299,20 +1219,26 @@ struct hns_roce_query_pf_caps_d { __le32 num_uars_rsv_pds; __le32 rsv_uars_rsv_qps; }; -#define V2_QUERY_PF_CAPS_D_NUM_SRQS_S 0 -#define V2_QUERY_PF_CAPS_D_NUM_SRQS_M GENMASK(19, 0) - -#define V2_QUERY_PF_CAPS_D_RQWQE_HOP_NUM_S 20 -#define V2_QUERY_PF_CAPS_D_RQWQE_HOP_NUM_M GENMASK(21, 20) - -#define V2_QUERY_PF_CAPS_D_EX_SGE_HOP_NUM_S 22 -#define V2_QUERY_PF_CAPS_D_EX_SGE_HOP_NUM_M GENMASK(23, 22) - -#define V2_QUERY_PF_CAPS_D_SQWQE_HOP_NUM_S 24 -#define V2_QUERY_PF_CAPS_D_SQWQE_HOP_NUM_M GENMASK(25, 24) -#define V2_QUERY_PF_CAPS_D_CONG_TYPE_S 26 -#define V2_QUERY_PF_CAPS_D_CONG_TYPE_M GENMASK(29, 26) +#define PF_CAPS_D_FIELD_LOC(h, l) \ + FIELD_LOC(struct hns_roce_query_pf_caps_d, h, l) + +#define PF_CAPS_D_NUM_SRQS PF_CAPS_D_FIELD_LOC(19, 0) +#define PF_CAPS_D_RQWQE_HOP_NUM PF_CAPS_D_FIELD_LOC(21, 20) +#define PF_CAPS_D_EX_SGE_HOP_NUM PF_CAPS_D_FIELD_LOC(23, 22) +#define PF_CAPS_D_SQWQE_HOP_NUM PF_CAPS_D_FIELD_LOC(25, 24) +#define PF_CAPS_D_CONG_TYPE PF_CAPS_D_FIELD_LOC(29, 26) +#define PF_CAPS_D_CEQ_DEPTH PF_CAPS_D_FIELD_LOC(85, 64) +#define PF_CAPS_D_NUM_CEQS PF_CAPS_D_FIELD_LOC(95, 86) +#define PF_CAPS_D_AEQ_DEPTH PF_CAPS_D_FIELD_LOC(117, 96) +#define PF_CAPS_D_AEQ_ARM_ST PF_CAPS_D_FIELD_LOC(119, 118) +#define PF_CAPS_D_CEQ_ARM_ST PF_CAPS_D_FIELD_LOC(121, 120) +#define PF_CAPS_D_RSV_PDS PF_CAPS_D_FIELD_LOC(147, 128) +#define PF_CAPS_D_NUM_UARS PF_CAPS_D_FIELD_LOC(155, 148) +#define PF_CAPS_D_RSV_QPS PF_CAPS_D_FIELD_LOC(179, 160) +#define PF_CAPS_D_RSV_UARS PF_CAPS_D_FIELD_LOC(187, 180) + +#define HNS_ROCE_CAP_FLAGS_EX_SHIFT 12 struct hns_roce_congestion_algorithm { u8 alg_sel; @@ -1321,33 +1247,6 @@ struct hns_roce_congestion_algorithm { u8 wnd_mode_sel; }; -#define V2_QUERY_PF_CAPS_D_CEQ_DEPTH_S 0 -#define V2_QUERY_PF_CAPS_D_CEQ_DEPTH_M GENMASK(21, 0) - -#define V2_QUERY_PF_CAPS_D_NUM_CEQS_S 22 -#define V2_QUERY_PF_CAPS_D_NUM_CEQS_M GENMASK(31, 22) - -#define V2_QUERY_PF_CAPS_D_AEQ_DEPTH_S 0 -#define V2_QUERY_PF_CAPS_D_AEQ_DEPTH_M GENMASK(21, 0) - -#define V2_QUERY_PF_CAPS_D_AEQ_ARM_ST_S 22 -#define V2_QUERY_PF_CAPS_D_AEQ_ARM_ST_M GENMASK(23, 22) - -#define V2_QUERY_PF_CAPS_D_CEQ_ARM_ST_S 24 -#define V2_QUERY_PF_CAPS_D_CEQ_ARM_ST_M GENMASK(25, 24) - -#define V2_QUERY_PF_CAPS_D_RSV_PDS_S 0 -#define V2_QUERY_PF_CAPS_D_RSV_PDS_M GENMASK(19, 0) - -#define V2_QUERY_PF_CAPS_D_NUM_UARS_S 20 -#define V2_QUERY_PF_CAPS_D_NUM_UARS_M GENMASK(27, 20) - -#define V2_QUERY_PF_CAPS_D_RSV_QPS_S 0 -#define V2_QUERY_PF_CAPS_D_RSV_QPS_M GENMASK(19, 0) - -#define V2_QUERY_PF_CAPS_D_RSV_UARS_S 20 -#define V2_QUERY_PF_CAPS_D_RSV_UARS_M GENMASK(27, 20) - struct hns_roce_query_pf_caps_e { __le32 chunk_size_shift_rsv_mrws; __le32 rsv_cqs; @@ -1359,20 +1258,14 @@ struct hns_roce_query_pf_caps_e { __le16 aeq_period; }; -#define V2_QUERY_PF_CAPS_E_RSV_MRWS_S 0 -#define V2_QUERY_PF_CAPS_E_RSV_MRWS_M GENMASK(19, 0) +#define PF_CAPS_E_FIELD_LOC(h, l) \ + FIELD_LOC(struct hns_roce_query_pf_caps_e, h, l) -#define V2_QUERY_PF_CAPS_E_CHUNK_SIZE_SHIFT_S 20 -#define V2_QUERY_PF_CAPS_E_CHUNK_SIZE_SHIFT_M GENMASK(31, 20) - -#define V2_QUERY_PF_CAPS_E_RSV_CQS_S 0 -#define V2_QUERY_PF_CAPS_E_RSV_CQS_M GENMASK(19, 0) - -#define V2_QUERY_PF_CAPS_E_RSV_SRQS_S 0 -#define V2_QUERY_PF_CAPS_E_RSV_SRQS_M GENMASK(19, 0) - -#define V2_QUERY_PF_CAPS_E_RSV_LKEYS_S 0 -#define V2_QUERY_PF_CAPS_E_RSV_LKEYS_M GENMASK(19, 0) +#define PF_CAPS_E_RSV_MRWS PF_CAPS_E_FIELD_LOC(19, 0) +#define PF_CAPS_E_CHUNK_SIZE_SHIFT PF_CAPS_E_FIELD_LOC(31, 20) +#define PF_CAPS_E_RSV_CQS PF_CAPS_E_FIELD_LOC(51, 32) +#define PF_CAPS_E_RSV_SRQS PF_CAPS_E_FIELD_LOC(83, 64) +#define PF_CAPS_E_RSV_LKEYS PF_CAPS_E_FIELD_LOC(115, 96) struct hns_roce_cmq_req { __le32 data[6]; @@ -1457,9 +1350,6 @@ struct hns_roce_dip { #define HNS_ROCE_EQ_INIT_CONS_IDX 0 #define HNS_ROCE_EQ_INIT_NXT_EQE_BA 0 -#define HNS_ROCE_V2_CEQ_CEQE_OWNER_S 31 -#define HNS_ROCE_V2_AEQ_AEQE_OWNER_S 31 - #define HNS_ROCE_V2_COMP_EQE_NUM 0x1000 #define HNS_ROCE_V2_ASYNC_EQE_NUM 0x1000 @@ -1516,18 +1406,6 @@ struct hns_roce_eq_context { #define EQC_NEX_EQE_BA_H EQC_FIELD_LOC(339, 320) #define EQC_EQE_SIZE EQC_FIELD_LOC(341, 340) -#define HNS_ROCE_V2_CEQE_COMP_CQN_S 0 -#define HNS_ROCE_V2_CEQE_COMP_CQN_M GENMASK(23, 0) - -#define HNS_ROCE_V2_AEQE_EVENT_TYPE_S 0 -#define HNS_ROCE_V2_AEQE_EVENT_TYPE_M GENMASK(7, 0) - -#define HNS_ROCE_V2_AEQE_SUB_TYPE_S 8 -#define HNS_ROCE_V2_AEQE_SUB_TYPE_M GENMASK(15, 8) - -#define HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S 0 -#define HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M GENMASK(23, 0) - #define MAX_SERVICE_LEVEL 0x7 struct hns_roce_wqe_atomic_seg { diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2_dfx.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2_dfx.c index 5a97b5a0b7be34040ad2888ea97f7c4dacdfdd01..f7a75a7cda7491e93f1aa6134facad61623f52a3 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2_dfx.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2_dfx.c @@ -18,9 +18,8 @@ int hns_roce_v2_query_cqc_info(struct hns_roce_dev *hr_dev, u32 cqn, return PTR_ERR(mailbox); cq_context = mailbox->buf; - ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, cqn, 0, - HNS_ROCE_CMD_QUERY_CQC, - HNS_ROCE_CMD_TIMEOUT_MSECS); + ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, HNS_ROCE_CMD_QUERY_CQC, + cqn); if (ret) { dev_err(hr_dev->dev, "QUERY cqc cmd process error\n"); goto err_mailbox; diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c index 8aa0af069042af86e0dafc5e32b1eda9f6876493..11f42cebfa40a6ba7598758f229004665fc3962f 100644 --- a/drivers/infiniband/hw/hns/hns_roce_main.c +++ b/drivers/infiniband/hw/hns/hns_roce_main.c @@ -774,7 +774,7 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev) ret = hns_roce_init_hem_table(hr_dev, &hr_dev->cqc_timer_table, HEM_TYPE_CQC_TIMER, hr_dev->caps.cqc_timer_entry_sz, - hr_dev->caps.num_cqc_timer, 1); + hr_dev->caps.cqc_timer_bt_num, 1); if (ret) { dev_err(dev, "Failed to init CQC timer memory, aborting.\n"); diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c index 44e0ee3b5b6c0b0f8fce10ef94fc18f9ee298a41..1e36ac383ea3042fcbb186d5a51aefbc0db91665 100644 --- a/drivers/infiniband/hw/hns/hns_roce_mr.c +++ b/drivers/infiniband/hw/hns/hns_roce_mr.c @@ -47,24 +47,6 @@ unsigned long key_to_hw_index(u32 key) return (key << 24) | (key >> 8); } -static int hns_roce_hw_create_mpt(struct hns_roce_dev *hr_dev, - struct hns_roce_cmd_mailbox *mailbox, - unsigned long mpt_index) -{ - return hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, mpt_index, 0, - HNS_ROCE_CMD_CREATE_MPT, - HNS_ROCE_CMD_TIMEOUT_MSECS); -} - -int hns_roce_hw_destroy_mpt(struct hns_roce_dev *hr_dev, - struct hns_roce_cmd_mailbox *mailbox, - unsigned long mpt_index) -{ - return hns_roce_cmd_mbox(hr_dev, 0, mailbox ? mailbox->dma : 0, - mpt_index, !mailbox, HNS_ROCE_CMD_DESTROY_MPT, - HNS_ROCE_CMD_TIMEOUT_MSECS); -} - static int alloc_mr_key(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr) { struct hns_roce_ida *mtpt_ida = &hr_dev->mr_table.mtpt_ida; @@ -144,7 +126,7 @@ static void hns_roce_mr_free(struct hns_roce_dev *hr_dev, int ret; if (mr->enabled) { - ret = hns_roce_hw_destroy_mpt(hr_dev, NULL, + ret = hns_roce_destroy_hw_ctx(hr_dev, HNS_ROCE_CMD_DESTROY_MPT, key_to_hw_index(mr->key) & (hr_dev->caps.num_mtpts - 1)); if (ret) @@ -166,10 +148,8 @@ static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev, /* Allocate mailbox memory */ mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); - if (IS_ERR(mailbox)) { - ret = PTR_ERR(mailbox); - return ret; - } + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); if (mr->type != MR_TYPE_FRMR) ret = hr_dev->hw->write_mtpt(hr_dev, mailbox->buf, mr); @@ -180,7 +160,7 @@ static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev, goto err_page; } - ret = hns_roce_hw_create_mpt(hr_dev, mailbox, + ret = hns_roce_create_hw_ctx(hr_dev, mailbox, HNS_ROCE_CMD_CREATE_MPT, mtpt_idx & (hr_dev->caps.num_mtpts - 1)); if (ret) { dev_err(dev, "failed to create mpt, ret = %d.\n", ret); @@ -302,13 +282,13 @@ int hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start, u64 length, return PTR_ERR(mailbox); mtpt_idx = key_to_hw_index(mr->key) & (hr_dev->caps.num_mtpts - 1); - ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, mtpt_idx, 0, - HNS_ROCE_CMD_QUERY_MPT, - HNS_ROCE_CMD_TIMEOUT_MSECS); + ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, HNS_ROCE_CMD_QUERY_MPT, + mtpt_idx); if (ret) goto free_cmd_mbox; - ret = hns_roce_hw_destroy_mpt(hr_dev, NULL, mtpt_idx); + ret = hns_roce_destroy_hw_ctx(hr_dev, HNS_ROCE_CMD_DESTROY_MPT, + mtpt_idx); if (ret) ibdev_warn(ib_dev, "failed to destroy MPT, ret = %d.\n", ret); @@ -338,7 +318,8 @@ int hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start, u64 length, goto free_cmd_mbox; } - ret = hns_roce_hw_create_mpt(hr_dev, mailbox, mtpt_idx); + ret = hns_roce_create_hw_ctx(hr_dev, mailbox, HNS_ROCE_CMD_CREATE_MPT, + mtpt_idx); if (ret) { ibdev_err(ib_dev, "failed to create MPT, ret = %d.\n", ret); goto free_cmd_mbox; @@ -356,12 +337,11 @@ int hns_roce_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata) { struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device); struct hns_roce_mr *mr = to_hr_mr(ibmr); - int ret = 0; hns_roce_mr_free(hr_dev, mr); kfree(mr); - return ret; + return 0; } struct ib_mr *hns_roce_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, @@ -477,7 +457,7 @@ static void hns_roce_mw_free(struct hns_roce_dev *hr_dev, int ret; if (mw->enabled) { - ret = hns_roce_hw_destroy_mpt(hr_dev, NULL, + ret = hns_roce_destroy_hw_ctx(hr_dev, HNS_ROCE_CMD_DESTROY_MPT, key_to_hw_index(mw->rkey) & (hr_dev->caps.num_mtpts - 1)); if (ret) @@ -517,7 +497,7 @@ static int hns_roce_mw_enable(struct hns_roce_dev *hr_dev, goto err_page; } - ret = hns_roce_hw_create_mpt(hr_dev, mailbox, + ret = hns_roce_create_hw_ctx(hr_dev, mailbox, HNS_ROCE_CMD_CREATE_MPT, mtpt_idx & (hr_dev->caps.num_mtpts - 1)); if (ret) { dev_err(dev, "MW CREATE_MPT failed (%d)\n", ret); diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c index 1099963db1b62c23552e04514ae3cd95623a1ff9..43530a7c8304d1377f40650aff5b7439e5b5c431 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c @@ -243,26 +243,6 @@ static int alloc_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) return 0; } -enum hns_roce_qp_state to_hns_roce_state(enum ib_qp_state state) -{ - switch (state) { - case IB_QPS_RESET: - return HNS_ROCE_QP_STATE_RST; - case IB_QPS_INIT: - return HNS_ROCE_QP_STATE_INIT; - case IB_QPS_RTR: - return HNS_ROCE_QP_STATE_RTR; - case IB_QPS_RTS: - return HNS_ROCE_QP_STATE_RTS; - case IB_QPS_SQD: - return HNS_ROCE_QP_STATE_SQD; - case IB_QPS_ERR: - return HNS_ROCE_QP_STATE_ERR; - default: - return HNS_ROCE_QP_NUM_STATE; - } -} - static void add_qp_to_list(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, struct ib_cq *send_cq, struct ib_cq *recv_cq) diff --git a/drivers/infiniband/hw/hns/hns_roce_restrack.c b/drivers/infiniband/hw/hns/hns_roce_restrack.c index 259444c0a6301a547e76843bb9a488fa14b5fff8..24a154d646304de3b2bd48bd416568da2324542f 100644 --- a/drivers/infiniband/hw/hns/hns_roce_restrack.c +++ b/drivers/infiniband/hw/hns/hns_roce_restrack.c @@ -13,61 +13,40 @@ static int hns_roce_fill_cq(struct sk_buff *msg, struct hns_roce_v2_cq_context *context) { if (rdma_nl_put_driver_u32(msg, "state", - roce_get_field(context->byte_4_pg_ceqn, - V2_CQC_BYTE_4_ARM_ST_M, - V2_CQC_BYTE_4_ARM_ST_S))) + hr_reg_read(context, CQC_ARM_ST))) + goto err; if (rdma_nl_put_driver_u32(msg, "ceqn", - roce_get_field(context->byte_4_pg_ceqn, - V2_CQC_BYTE_4_CEQN_M, - V2_CQC_BYTE_4_CEQN_S))) + hr_reg_read(context, CQC_CEQN))) goto err; if (rdma_nl_put_driver_u32(msg, "cqn", - roce_get_field(context->byte_8_cqn, - V2_CQC_BYTE_8_CQN_M, - V2_CQC_BYTE_8_CQN_S))) + hr_reg_read(context, CQC_CQN))) goto err; if (rdma_nl_put_driver_u32(msg, "hopnum", - roce_get_field(context->byte_16_hop_addr, - V2_CQC_BYTE_16_CQE_HOP_NUM_M, - V2_CQC_BYTE_16_CQE_HOP_NUM_S))) + hr_reg_read(context, CQC_CQE_HOP_NUM))) goto err; - if (rdma_nl_put_driver_u32( - msg, "pi", - roce_get_field(context->byte_28_cq_pi, - V2_CQC_BYTE_28_CQ_PRODUCER_IDX_M, - V2_CQC_BYTE_28_CQ_PRODUCER_IDX_S))) + if (rdma_nl_put_driver_u32(msg, "pi", + hr_reg_read(context, CQC_CQ_PRODUCER_IDX))) goto err; - if (rdma_nl_put_driver_u32( - msg, "ci", - roce_get_field(context->byte_32_cq_ci, - V2_CQC_BYTE_32_CQ_CONSUMER_IDX_M, - V2_CQC_BYTE_32_CQ_CONSUMER_IDX_S))) + if (rdma_nl_put_driver_u32(msg, "ci", + hr_reg_read(context, CQC_CQ_CONSUMER_IDX))) goto err; - if (rdma_nl_put_driver_u32( - msg, "coalesce", - roce_get_field(context->byte_56_cqe_period_maxcnt, - V2_CQC_BYTE_56_CQ_MAX_CNT_M, - V2_CQC_BYTE_56_CQ_MAX_CNT_S))) + if (rdma_nl_put_driver_u32(msg, "coalesce", + hr_reg_read(context, CQC_CQ_MAX_CNT))) goto err; - if (rdma_nl_put_driver_u32( - msg, "period", - roce_get_field(context->byte_56_cqe_period_maxcnt, - V2_CQC_BYTE_56_CQ_PERIOD_M, - V2_CQC_BYTE_56_CQ_PERIOD_S))) + if (rdma_nl_put_driver_u32(msg, "period", + hr_reg_read(context, CQC_CQ_PERIOD))) goto err; if (rdma_nl_put_driver_u32(msg, "cnt", - roce_get_field(context->byte_52_cqe_cnt, - V2_CQC_BYTE_52_CQE_CNT_M, - V2_CQC_BYTE_52_CQE_CNT_S))) + hr_reg_read(context, CQC_CQE_CNT))) goto err; return 0; diff --git a/drivers/infiniband/hw/hns/hns_roce_srq.c b/drivers/infiniband/hw/hns/hns_roce_srq.c index 21962e5472438534f3f360ee6699e5b756b2bb4f..f3e19c66283f96b55afeaf413e2d481198365427 100644 --- a/drivers/infiniband/hw/hns/hns_roce_srq.c +++ b/drivers/infiniband/hw/hns/hns_roce_srq.c @@ -59,58 +59,39 @@ static void hns_roce_ib_srq_event(struct hns_roce_srq *srq, } } -static int hns_roce_hw_create_srq(struct hns_roce_dev *dev, - struct hns_roce_cmd_mailbox *mailbox, - unsigned long srq_num) +static int alloc_srqn(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq) { - return hns_roce_cmd_mbox(dev, mailbox->dma, 0, srq_num, 0, - HNS_ROCE_CMD_CREATE_SRQ, - HNS_ROCE_CMD_TIMEOUT_MSECS); -} - -static int hns_roce_hw_destroy_srq(struct hns_roce_dev *dev, - struct hns_roce_cmd_mailbox *mailbox, - unsigned long srq_num) -{ - return hns_roce_cmd_mbox(dev, 0, mailbox ? mailbox->dma : 0, srq_num, - mailbox ? 0 : 1, HNS_ROCE_CMD_DESTROY_SRQ, - HNS_ROCE_CMD_TIMEOUT_MSECS); -} - -static int alloc_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq) -{ - struct hns_roce_srq_table *srq_table = &hr_dev->srq_table; struct hns_roce_ida *srq_ida = &hr_dev->srq_table.srq_ida; - struct ib_device *ibdev = &hr_dev->ib_dev; - struct hns_roce_cmd_mailbox *mailbox; - int ret; int id; id = ida_alloc_range(&srq_ida->ida, srq_ida->min, srq_ida->max, GFP_KERNEL); if (id < 0) { - ibdev_err(ibdev, "failed to alloc srq(%d).\n", id); + ibdev_err(&hr_dev->ib_dev, "failed to alloc srq(%d).\n", id); return -ENOMEM; } - srq->srqn = (unsigned long)id; - ret = hns_roce_table_get(hr_dev, &srq_table->table, srq->srqn); - if (ret) { - ibdev_err(ibdev, "failed to get SRQC table, ret = %d.\n", ret); - goto err_out; - } + srq->srqn = id; - ret = xa_err(xa_store(&srq_table->xa, srq->srqn, srq, GFP_KERNEL)); - if (ret) { - ibdev_err(ibdev, "failed to store SRQC, ret = %d.\n", ret); - goto err_put; - } + return 0; +} + +static void free_srqn(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq) +{ + ida_free(&hr_dev->srq_table.srq_ida.ida, (int)srq->srqn); +} + +static int hns_roce_create_srqc(struct hns_roce_dev *hr_dev, + struct hns_roce_srq *srq) +{ + struct ib_device *ibdev = &hr_dev->ib_dev; + struct hns_roce_cmd_mailbox *mailbox; + int ret; mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); - if (IS_ERR_OR_NULL(mailbox)) { + if (IS_ERR(mailbox)) { ibdev_err(ibdev, "failed to alloc mailbox for SRQC.\n"); - ret = -ENOMEM; - goto err_xa; + return PTR_ERR(mailbox); } ret = hr_dev->hw->write_srqc(srq, mailbox->buf); @@ -119,24 +100,44 @@ static int alloc_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq) goto err_mbox; } - ret = hns_roce_hw_create_srq(hr_dev, mailbox, srq->srqn); - if (ret) { + ret = hns_roce_create_hw_ctx(hr_dev, mailbox, HNS_ROCE_CMD_CREATE_SRQ, + srq->srqn); + if (ret) ibdev_err(ibdev, "failed to config SRQC, ret = %d.\n", ret); - goto err_mbox; - } +err_mbox: hns_roce_free_cmd_mailbox(hr_dev, mailbox); + return ret; +} + +static int alloc_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq) +{ + struct hns_roce_srq_table *srq_table = &hr_dev->srq_table; + struct ib_device *ibdev = &hr_dev->ib_dev; + int ret; + + ret = hns_roce_table_get(hr_dev, &srq_table->table, srq->srqn); + if (ret) { + ibdev_err(ibdev, "failed to get SRQC table, ret = %d.\n", ret); + return ret; + } + + ret = xa_err(xa_store(&srq_table->xa, srq->srqn, srq, GFP_KERNEL)); + if (ret) { + ibdev_err(ibdev, "failed to store SRQC, ret = %d.\n", ret); + goto err_put; + } + + ret = hns_roce_create_srqc(hr_dev, srq); + if (ret) + goto err_xa; return 0; -err_mbox: - hns_roce_free_cmd_mailbox(hr_dev, mailbox); err_xa: xa_erase(&srq_table->xa, srq->srqn); err_put: hns_roce_table_put(hr_dev, &srq_table->table, srq->srqn); -err_out: - ida_free(&srq_ida->ida, id); return ret; } @@ -146,7 +147,8 @@ static void free_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq) struct hns_roce_srq_table *srq_table = &hr_dev->srq_table; int ret; - ret = hns_roce_hw_destroy_srq(hr_dev, NULL, srq->srqn); + ret = hns_roce_destroy_hw_ctx(hr_dev, HNS_ROCE_CMD_DESTROY_SRQ, + srq->srqn); if (ret) dev_err(hr_dev->dev, "DESTROY_SRQ failed (%d) for SRQN %06lx\n", ret, srq->srqn); @@ -158,7 +160,6 @@ static void free_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq) wait_for_completion(&srq->free); hns_roce_table_put(hr_dev, &srq_table->table, srq->srqn); - ida_free(&srq_table->srq_ida.ida, (int)srq->srqn); } static int alloc_srq_idx(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq, @@ -406,10 +407,14 @@ int hns_roce_create_srq(struct ib_srq *ib_srq, if (ret) return ret; - ret = alloc_srqc(hr_dev, srq); + ret = alloc_srqn(hr_dev, srq); if (ret) goto err_srq_buf; + ret = alloc_srqc(hr_dev, srq); + if (ret) + goto err_srqn; + if (udata) { resp.srqn = srq->srqn; if (ib_copy_to_udata(udata, &resp, @@ -428,6 +433,8 @@ int hns_roce_create_srq(struct ib_srq *ib_srq, err_srqc: free_srqc(hr_dev, srq); +err_srqn: + free_srqn(hr_dev, srq); err_srq_buf: free_srq_buf(hr_dev, srq); @@ -440,6 +447,7 @@ int hns_roce_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata) struct hns_roce_srq *srq = to_hr_srq(ibsrq); free_srqc(hr_dev, srq); + free_srqn(hr_dev, srq); free_srq_buf(hr_dev, srq); return 0; } diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 7b11aff8a5ea7cb0d37f1d09149468b10b4882d6..05c7200751e50a834ac948a9c690b0a9b7cf68f5 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -3273,7 +3273,7 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr, case MLX4_DEV_EVENT_PORT_MGMT_CHANGE: ew = kmalloc(sizeof *ew, GFP_ATOMIC); if (!ew) - break; + return; INIT_WORK(&ew->work, handle_port_mgmt_change_event); memcpy(&ew->ib_eqe, eqe, sizeof *eqe); diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c index 343e6709d9fc3878e994b2a4962c619b821a78e5..2f053f48f1bebcb0dba263e5a5df20501cc83cd8 100644 --- a/drivers/infiniband/hw/mlx5/devx.c +++ b/drivers/infiniband/hw/mlx5/devx.c @@ -1792,8 +1792,10 @@ subscribe_event_xa_alloc(struct mlx5_devx_event_table *devx_event_table, key_level2, obj_event, GFP_KERNEL); - if (err) + if (err) { + kfree(obj_event); return err; + } INIT_LIST_HEAD(&obj_event->obj_sub_list); } diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index 19346693c1da4a3b6068b6aeb3de2feda3227e89..d827a4e44c946647d21ca908ed23e1e2116d7d66 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -531,8 +531,10 @@ static void __cache_work_func(struct mlx5_cache_ent *ent) spin_lock_irq(&ent->lock); if (ent->disabled) goto out; - if (need_delay) + if (need_delay) { queue_delayed_work(cache->wq, &ent->dwork, 300 * HZ); + goto out; + } remove_cache_mr_locked(ent); queue_adjust_cache_locked(ent); } @@ -575,6 +577,8 @@ struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev, ent = &cache->ent[entry]; spin_lock_irq(&ent->lock); if (list_empty(&ent->head)) { + queue_adjust_cache_locked(ent); + ent->miss++; spin_unlock_irq(&ent->lock); mr = create_cache_mr(ent); if (IS_ERR(mr)) diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index 16d52836518943775e4d49b68380345929621425..eeb87f31cd2523f71bf5f141fa7f7c5d8ab9a909 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -1918,6 +1918,7 @@ static int qedr_create_user_qp(struct qedr_dev *dev, /* db offset was calculated in copy_qp_uresp, now set in the user q */ if (qedr_qp_has_sq(qp)) { qp->usq.db_addr = ctx->dpi_addr + uresp.sq_db_offset; + qp->sq.max_wr = attrs->cap.max_send_wr; rc = qedr_db_recovery_add(dev, qp->usq.db_addr, &qp->usq.db_rec_data->db_data, DB_REC_WIDTH_32B, @@ -1928,6 +1929,7 @@ static int qedr_create_user_qp(struct qedr_dev *dev, if (qedr_qp_has_rq(qp)) { qp->urq.db_addr = ctx->dpi_addr + uresp.rq_db_offset; + qp->rq.max_wr = attrs->cap.max_recv_wr; rc = qedr_db_recovery_add(dev, qp->urq.db_addr, &qp->urq.db_rec_data->db_data, DB_REC_WIDTH_32B, diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c index ee48befc8978619b477cfa7325d66be1f622773f..d8d52a00a1be9328dffd3c666d1dd6e12a05d9e1 100644 --- a/drivers/infiniband/sw/rdmavt/qp.c +++ b/drivers/infiniband/sw/rdmavt/qp.c @@ -3124,6 +3124,8 @@ void rvt_ruc_loopback(struct rvt_qp *sqp) case IB_WR_ATOMIC_FETCH_AND_ADD: if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC))) goto inv_err; + if (unlikely(wqe->atomic_wr.remote_addr & (sizeof(u64) - 1))) + goto inv_err; if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64), wqe->atomic_wr.remote_addr, wqe->atomic_wr.rkey, @@ -3239,7 +3241,11 @@ void rvt_ruc_loopback(struct rvt_qp *sqp) spin_lock_irqsave(&sqp->s_lock, flags); rvt_send_complete(sqp, wqe, send_status); if (sqp->ibqp.qp_type == IB_QPT_RC) { - int lastwqe = rvt_error_qp(sqp, IB_WC_WR_FLUSH_ERR); + int lastwqe; + + spin_lock(&sqp->r_lock); + lastwqe = rvt_error_qp(sqp, IB_WC_WR_FLUSH_ERR); + spin_unlock(&sqp->r_lock); sqp->s_flags &= ~RVT_S_BUSY; spin_unlock_irqrestore(&sqp->s_lock, flags); diff --git a/drivers/infiniband/sw/rxe/rxe_opcode.c b/drivers/infiniband/sw/rxe/rxe_opcode.c index 0cb4b01fd9101e235bdb39052e5dc17c7b1ecd83..66ffb516bdaf0c1625b50d15d8686b0e9d9527c7 100644 --- a/drivers/infiniband/sw/rxe/rxe_opcode.c +++ b/drivers/infiniband/sw/rxe/rxe_opcode.c @@ -110,7 +110,7 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = { } }, [IB_OPCODE_RC_SEND_MIDDLE] = { - .name = "IB_OPCODE_RC_SEND_MIDDLE]", + .name = "IB_OPCODE_RC_SEND_MIDDLE", .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_SEND_MASK | RXE_MIDDLE_MASK, .length = RXE_BTH_BYTES, diff --git a/drivers/infiniband/sw/siw/siw.h b/drivers/infiniband/sw/siw/siw.h index 368959ae9a8cc382bb0bf89f66bce731ddbf3f24..df03d84c6868ace3d5fe68c47e62cdd33e8b44d2 100644 --- a/drivers/infiniband/sw/siw/siw.h +++ b/drivers/infiniband/sw/siw/siw.h @@ -644,14 +644,9 @@ static inline struct siw_sqe *orq_get_current(struct siw_qp *qp) return &qp->orq[qp->orq_get % qp->attrs.orq_size]; } -static inline struct siw_sqe *orq_get_tail(struct siw_qp *qp) -{ - return &qp->orq[qp->orq_put % qp->attrs.orq_size]; -} - static inline struct siw_sqe *orq_get_free(struct siw_qp *qp) { - struct siw_sqe *orq_e = orq_get_tail(qp); + struct siw_sqe *orq_e = &qp->orq[qp->orq_put % qp->attrs.orq_size]; if (READ_ONCE(orq_e->flags) == 0) return orq_e; diff --git a/drivers/infiniband/sw/siw/siw_qp_rx.c b/drivers/infiniband/sw/siw/siw_qp_rx.c index 60116f20653c776c3de38cbb3c954d9bdffbb112..875ea6f1b04a29034b6b41f8bfedaefc01af28c6 100644 --- a/drivers/infiniband/sw/siw/siw_qp_rx.c +++ b/drivers/infiniband/sw/siw/siw_qp_rx.c @@ -1153,11 +1153,12 @@ static int siw_check_tx_fence(struct siw_qp *qp) spin_lock_irqsave(&qp->orq_lock, flags); - rreq = orq_get_current(qp); - /* free current orq entry */ + rreq = orq_get_current(qp); WRITE_ONCE(rreq->flags, 0); + qp->orq_get++; + if (qp->tx_ctx.orq_fence) { if (unlikely(tx_waiting->wr_status != SIW_WR_QUEUED)) { pr_warn("siw: [QP %u]: fence resume: bad status %d\n", @@ -1165,10 +1166,12 @@ static int siw_check_tx_fence(struct siw_qp *qp) rv = -EPROTO; goto out; } - /* resume SQ processing */ + /* resume SQ processing, if possible */ if (tx_waiting->sqe.opcode == SIW_OP_READ || tx_waiting->sqe.opcode == SIW_OP_READ_LOCAL_INV) { - rreq = orq_get_tail(qp); + + /* SQ processing was stopped because of a full ORQ */ + rreq = orq_get_free(qp); if (unlikely(!rreq)) { pr_warn("siw: [QP %u]: no ORQE\n", qp_id(qp)); rv = -EPROTO; @@ -1181,15 +1184,14 @@ static int siw_check_tx_fence(struct siw_qp *qp) resume_tx = 1; } else if (siw_orq_empty(qp)) { + /* + * SQ processing was stopped by fenced work request. + * Resume since all previous Read's are now completed. + */ qp->tx_ctx.orq_fence = 0; resume_tx = 1; - } else { - pr_warn("siw: [QP %u]: fence resume: orq idx: %d:%d\n", - qp_id(qp), qp->orq_get, qp->orq_put); - rv = -EPROTO; } } - qp->orq_get++; out: spin_unlock_irqrestore(&qp->orq_lock, flags); diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c index 3690e28cc7ea2a1917971c4c15c9e4e188ba6be1..4884b122e41373090d6096c85e05f2a3a32e6718 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.c +++ b/drivers/infiniband/ulp/iser/iscsi_iser.c @@ -499,6 +499,7 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session, iser_conn->iscsi_conn = conn; out: + iscsi_put_endpoint(ep); mutex_unlock(&iser_conn->state_mutex); return error; } @@ -978,16 +979,22 @@ static struct scsi_host_template iscsi_iser_sht = { .track_queue_depth = 1, }; +static struct iscsi_transport_expand iscsi_iser_expand = { + .unbind_conn = iscsi_conn_unbind, +}; + static struct iscsi_transport iscsi_iser_transport = { .owner = THIS_MODULE, .name = "iser", - .caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_TEXT_NEGO, + .caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_TEXT_NEGO + | CAP_OPS_EXPAND, /* session management */ .create_session = iscsi_iser_session_create, .destroy_session = iscsi_iser_session_destroy, /* connection management */ .create_conn = iscsi_iser_conn_create, .bind_conn = iscsi_iser_conn_bind, + .ops_expand = &iscsi_iser_expand, .destroy_conn = iscsi_conn_teardown, .attr_is_visible = iser_attr_is_visible, .set_param = iscsi_iser_set_param, diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c index 46fad202a380e6043a0d94066827ccf099e953a3..13634eda833de3d2729fba2b9ada55c721276da9 100644 --- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c +++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c @@ -1328,6 +1328,12 @@ static int alloc_permits(struct rtrs_clt *clt) static void free_permits(struct rtrs_clt *clt) { + if (clt->permits_map) { + size_t sz = clt->queue_depth; + + wait_event(clt->permits_wait, + find_first_bit(clt->permits_map, sz) >= sz); + } kfree(clt->permits_map); clt->permits_map = NULL; kfree(clt->permits); @@ -2540,6 +2546,8 @@ static void rtrs_clt_dev_release(struct device *dev) { struct rtrs_clt *clt = container_of(dev, struct rtrs_clt, dev); + mutex_destroy(&clt->paths_ev_mutex); + mutex_destroy(&clt->paths_mutex); kfree(clt); } @@ -2571,6 +2579,8 @@ static struct rtrs_clt *alloc_clt(const char *sessname, size_t paths_num, return ERR_PTR(-ENOMEM); } + clt->dev.class = rtrs_clt_dev_class; + clt->dev.release = rtrs_clt_dev_release; uuid_gen(&clt->paths_uuid); INIT_LIST_HEAD_RCU(&clt->paths_list); clt->paths_num = paths_num; @@ -2588,64 +2598,51 @@ static struct rtrs_clt *alloc_clt(const char *sessname, size_t paths_num, init_waitqueue_head(&clt->permits_wait); mutex_init(&clt->paths_ev_mutex); mutex_init(&clt->paths_mutex); + device_initialize(&clt->dev); - clt->dev.class = rtrs_clt_dev_class; - clt->dev.release = rtrs_clt_dev_release; err = dev_set_name(&clt->dev, "%s", sessname); if (err) - goto err; + goto err_put; + /* * Suppress user space notification until * sysfs files are created */ dev_set_uevent_suppress(&clt->dev, true); - err = device_register(&clt->dev); - if (err) { - put_device(&clt->dev); - goto err; - } + err = device_add(&clt->dev); + if (err) + goto err_put; clt->kobj_paths = kobject_create_and_add("paths", &clt->dev.kobj); if (!clt->kobj_paths) { err = -ENOMEM; - goto err_dev; + goto err_del; } err = rtrs_clt_create_sysfs_root_files(clt); if (err) { kobject_del(clt->kobj_paths); kobject_put(clt->kobj_paths); - goto err_dev; + goto err_del; } dev_set_uevent_suppress(&clt->dev, false); kobject_uevent(&clt->dev.kobj, KOBJ_ADD); return clt; -err_dev: - device_unregister(&clt->dev); -err: +err_del: + device_del(&clt->dev); +err_put: free_percpu(clt->pcpu_path); - kfree(clt); + put_device(&clt->dev); return ERR_PTR(err); } -static void wait_for_inflight_permits(struct rtrs_clt *clt) -{ - if (clt->permits_map) { - size_t sz = clt->queue_depth; - - wait_event(clt->permits_wait, - find_first_bit(clt->permits_map, sz) >= sz); - } -} - static void free_clt(struct rtrs_clt *clt) { - wait_for_inflight_permits(clt); - free_permits(clt); free_percpu(clt->pcpu_path); - mutex_destroy(&clt->paths_ev_mutex); - mutex_destroy(&clt->paths_mutex); - /* release callback will free clt in last put */ + + /* + * release callback will free clt and destroy mutexes in last put + */ device_unregister(&clt->dev); } @@ -2761,6 +2758,7 @@ void rtrs_clt_close(struct rtrs_clt *clt) rtrs_clt_destroy_sess_files(sess, NULL); kobject_put(&sess->kobj); } + free_permits(clt); free_clt(clt); } EXPORT_SYMBOL(rtrs_clt_close); diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index 86d5c4c92b363c4fc6fb9832ca7da626496a7200..b4ccb333a834201e0e67cac4337383293cacd780 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c @@ -4045,9 +4045,11 @@ static void srp_remove_one(struct ib_device *device, void *client_data) spin_unlock(&host->target_lock); /* - * Wait for tl_err and target port removal tasks. + * srp_queue_remove_work() queues a call to + * srp_remove_target(). The latter function cancels + * target->tl_err_work so waiting for the remove works to + * finish is sufficient. */ - flush_workqueue(system_long_wq); flush_workqueue(srp_remove_wq); kfree(host); diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig index 9f60f1559e49926b161b2a5b472e30445f843c59..3f7a5ff17a9a3c23405b19aafbce729d5972fa42 100644 --- a/drivers/input/keyboard/Kconfig +++ b/drivers/input/keyboard/Kconfig @@ -556,7 +556,7 @@ config KEYBOARD_PMIC8XXX config KEYBOARD_SAMSUNG tristate "Samsung keypad support" - depends on HAVE_CLK + depends on HAS_IOMEM && HAVE_CLK select INPUT_MATRIXKMAP help Say Y here if you want to use the keypad on your Samsung mobile diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c index 11a9ee32c98cc89ab58d981162c1c7f0f1fd7219..6f59c8b245f240e19922c05e870ae7b735144f4c 100644 --- a/drivers/input/mouse/elan_i2c_core.c +++ b/drivers/input/mouse/elan_i2c_core.c @@ -153,55 +153,21 @@ static int elan_get_fwinfo(u16 ic_type, u8 iap_version, u16 *validpage_count, return 0; } -static int elan_enable_power(struct elan_tp_data *data) +static int elan_set_power(struct elan_tp_data *data, bool on) { int repeat = ETP_RETRY_COUNT; int error; - error = regulator_enable(data->vcc); - if (error) { - dev_err(&data->client->dev, - "failed to enable regulator: %d\n", error); - return error; - } - do { - error = data->ops->power_control(data->client, true); + error = data->ops->power_control(data->client, on); if (error >= 0) return 0; msleep(30); } while (--repeat > 0); - dev_err(&data->client->dev, "failed to enable power: %d\n", error); - return error; -} - -static int elan_disable_power(struct elan_tp_data *data) -{ - int repeat = ETP_RETRY_COUNT; - int error; - - do { - error = data->ops->power_control(data->client, false); - if (!error) { - error = regulator_disable(data->vcc); - if (error) { - dev_err(&data->client->dev, - "failed to disable regulator: %d\n", - error); - /* Attempt to power the chip back up */ - data->ops->power_control(data->client, true); - break; - } - - return 0; - } - - msleep(30); - } while (--repeat > 0); - - dev_err(&data->client->dev, "failed to disable power: %d\n", error); + dev_err(&data->client->dev, "failed to set power %s: %d\n", + on ? "on" : "off", error); return error; } @@ -1361,9 +1327,19 @@ static int __maybe_unused elan_suspend(struct device *dev) /* Enable wake from IRQ */ data->irq_wake = (enable_irq_wake(client->irq) == 0); } else { - ret = elan_disable_power(data); + ret = elan_set_power(data, false); + if (ret) + goto err; + + ret = regulator_disable(data->vcc); + if (ret) { + dev_err(dev, "error %d disabling regulator\n", ret); + /* Attempt to power the chip back up */ + elan_set_power(data, true); + } } +err: mutex_unlock(&data->sysfs_mutex); return ret; } @@ -1374,12 +1350,18 @@ static int __maybe_unused elan_resume(struct device *dev) struct elan_tp_data *data = i2c_get_clientdata(client); int error; - if (device_may_wakeup(dev) && data->irq_wake) { + if (!device_may_wakeup(dev)) { + error = regulator_enable(data->vcc); + if (error) { + dev_err(dev, "error %d enabling regulator\n", error); + goto err; + } + } else if (data->irq_wake) { disable_irq_wake(client->irq); data->irq_wake = false; } - error = elan_enable_power(data); + error = elan_set_power(data, true); if (error) { dev_err(dev, "power up when resuming failed: %d\n", error); goto err; diff --git a/drivers/input/serio/ambakmi.c b/drivers/input/serio/ambakmi.c index ecdeca147ed717c77615f89d9b3bf9126eae8669..4408245b61d2c6707bf136fc0d82a7e11795916a 100644 --- a/drivers/input/serio/ambakmi.c +++ b/drivers/input/serio/ambakmi.c @@ -159,7 +159,7 @@ static int amba_kmi_probe(struct amba_device *dev, return ret; } -static int amba_kmi_remove(struct amba_device *dev) +static void amba_kmi_remove(struct amba_device *dev) { struct amba_kmi_port *kmi = amba_get_drvdata(dev); @@ -168,7 +168,6 @@ static int amba_kmi_remove(struct amba_device *dev) iounmap(kmi->base); kfree(kmi); amba_release_regions(dev); - return 0; } static int __maybe_unused amba_kmi_resume(struct device *dev) diff --git a/drivers/input/tablet/aiptek.c b/drivers/input/tablet/aiptek.c index e08b0ef078e8198474120972a656b09a0150ee09..8afeefcea67bb1ad498569a2c8b8a3b78f5285e5 100644 --- a/drivers/input/tablet/aiptek.c +++ b/drivers/input/tablet/aiptek.c @@ -1801,15 +1801,13 @@ aiptek_probe(struct usb_interface *intf, const struct usb_device_id *id) input_set_abs_params(inputdev, ABS_TILT_Y, AIPTEK_TILT_MIN, AIPTEK_TILT_MAX, 0, 0); input_set_abs_params(inputdev, ABS_WHEEL, AIPTEK_WHEEL_MIN, AIPTEK_WHEEL_MAX - 1, 0, 0); - /* Verify that a device really has an endpoint */ - if (intf->cur_altsetting->desc.bNumEndpoints < 1) { + err = usb_find_common_endpoints(intf->cur_altsetting, + NULL, NULL, &endpoint, NULL); + if (err) { dev_err(&intf->dev, - "interface has %d endpoints, but must have minimum 1\n", - intf->cur_altsetting->desc.bNumEndpoints); - err = -EINVAL; + "interface has no int in endpoints, but must have minimum 1\n"); goto fail3; } - endpoint = &intf->cur_altsetting->endpoint[0].desc; /* Go set up our URB, which is called when the tablet receives * input. diff --git a/drivers/input/touchscreen/zinitix.c b/drivers/input/touchscreen/zinitix.c index 6df6f07f1ac66a036737e1957a5ed8007cd7f2bd..17b10b81c71319947ff95542cbdc794fb68a1112 100644 --- a/drivers/input/touchscreen/zinitix.c +++ b/drivers/input/touchscreen/zinitix.c @@ -135,7 +135,7 @@ struct point_coord { struct touch_event { __le16 status; - u8 finger_cnt; + u8 finger_mask; u8 time_stamp; struct point_coord point_coord[MAX_SUPPORTED_FINGER_NUM]; }; @@ -311,11 +311,32 @@ static int zinitix_send_power_on_sequence(struct bt541_ts_data *bt541) static void zinitix_report_finger(struct bt541_ts_data *bt541, int slot, const struct point_coord *p) { + u16 x, y; + + if (unlikely(!(p->sub_status & + (SUB_BIT_UP | SUB_BIT_DOWN | SUB_BIT_MOVE)))) { + dev_dbg(&bt541->client->dev, "unknown finger event %#02x\n", + p->sub_status); + return; + } + + x = le16_to_cpu(p->x); + y = le16_to_cpu(p->y); + input_mt_slot(bt541->input_dev, slot); - input_mt_report_slot_state(bt541->input_dev, MT_TOOL_FINGER, true); - touchscreen_report_pos(bt541->input_dev, &bt541->prop, - le16_to_cpu(p->x), le16_to_cpu(p->y), true); - input_report_abs(bt541->input_dev, ABS_MT_TOUCH_MAJOR, p->width); + if (input_mt_report_slot_state(bt541->input_dev, MT_TOOL_FINGER, + !(p->sub_status & SUB_BIT_UP))) { + touchscreen_report_pos(bt541->input_dev, + &bt541->prop, x, y, true); + input_report_abs(bt541->input_dev, + ABS_MT_TOUCH_MAJOR, p->width); + dev_dbg(&bt541->client->dev, "finger %d %s (%u, %u)\n", + slot, p->sub_status & SUB_BIT_DOWN ? "down" : "move", + x, y); + } else { + dev_dbg(&bt541->client->dev, "finger %d up (%u, %u)\n", + slot, x, y); + } } static irqreturn_t zinitix_ts_irq_handler(int irq, void *bt541_handler) @@ -323,6 +344,7 @@ static irqreturn_t zinitix_ts_irq_handler(int irq, void *bt541_handler) struct bt541_ts_data *bt541 = bt541_handler; struct i2c_client *client = bt541->client; struct touch_event touch_event; + unsigned long finger_mask; int error; int i; @@ -335,10 +357,14 @@ static irqreturn_t zinitix_ts_irq_handler(int irq, void *bt541_handler) goto out; } - for (i = 0; i < MAX_SUPPORTED_FINGER_NUM; i++) - if (touch_event.point_coord[i].sub_status & SUB_BIT_EXIST) - zinitix_report_finger(bt541, i, - &touch_event.point_coord[i]); + finger_mask = touch_event.finger_mask; + for_each_set_bit(i, &finger_mask, MAX_SUPPORTED_FINGER_NUM) { + const struct point_coord *p = &touch_event.point_coord[i]; + + /* Only process contacts that are actually reported */ + if (p->sub_status & SUB_BIT_EXIST) + zinitix_report_finger(bt541, i, p); + } input_mt_sync_frame(bt541->input_dev); input_sync(bt541->input_dev); diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h index b4adab69856323b3f9158720dac5dc673249974e..0c40d22409f232f46becc79a25891f1cb42df70c 100644 --- a/drivers/iommu/amd/amd_iommu.h +++ b/drivers/iommu/amd/amd_iommu.h @@ -17,6 +17,7 @@ extern int amd_iommu_init_passthrough(void); extern irqreturn_t amd_iommu_int_thread(int irq, void *data); extern irqreturn_t amd_iommu_int_handler(int irq, void *data); extern void amd_iommu_apply_erratum_63(u16 devid); +extern void amd_iommu_restart_event_logging(struct amd_iommu *iommu); extern void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu); extern int amd_iommu_init_devices(void); extern void amd_iommu_uninit_devices(void); diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h index 33446c9d3bac81cc4d7c3c66165be776193376e5..690c5976575c68ea0398ebb8ae900b160d3ef2bb 100644 --- a/drivers/iommu/amd/amd_iommu_types.h +++ b/drivers/iommu/amd/amd_iommu_types.h @@ -109,6 +109,7 @@ #define PASID_MASK 0x0000ffff /* MMIO status bits */ +#define MMIO_STATUS_EVT_OVERFLOW_INT_MASK (1 << 0) #define MMIO_STATUS_EVT_INT_MASK (1 << 1) #define MMIO_STATUS_COM_WAIT_INT_MASK (1 << 2) #define MMIO_STATUS_PPR_INT_MASK (1 << 6) diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c index 28de889aa516408a9b398449beda05706b0a8749..6eaefc9e7b3d60f6ef2bb0ab629466cc66c3f3d6 100644 --- a/drivers/iommu/amd/init.c +++ b/drivers/iommu/amd/init.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #include @@ -655,6 +656,16 @@ static int __init alloc_command_buffer(struct amd_iommu *iommu) return iommu->cmd_buf ? 0 : -ENOMEM; } +/* + * This function restarts event logging in case the IOMMU experienced + * an event log buffer overflow. + */ +void amd_iommu_restart_event_logging(struct amd_iommu *iommu) +{ + iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN); + iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN); +} + /* * This function resets the command buffer if the IOMMU stopped fetching * commands from it. @@ -805,16 +816,27 @@ static int iommu_ga_log_enable(struct amd_iommu *iommu) { #ifdef CONFIG_IRQ_REMAP u32 status, i; + u64 entry; if (!iommu->ga_log) return -EINVAL; - status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); - /* Check if already running */ - if (status & (MMIO_STATUS_GALOG_RUN_MASK)) + status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); + if (WARN_ON(status & (MMIO_STATUS_GALOG_RUN_MASK))) return 0; + entry = iommu_virt_to_phys(iommu->ga_log) | GA_LOG_SIZE_512; + memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_BASE_OFFSET, + &entry, sizeof(entry)); + entry = (iommu_virt_to_phys(iommu->ga_log_tail) & + (BIT_ULL(52)-1)) & ~7ULL; + memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_TAIL_OFFSET, + &entry, sizeof(entry)); + writel(0x00, iommu->mmio_base + MMIO_GA_HEAD_OFFSET); + writel(0x00, iommu->mmio_base + MMIO_GA_TAIL_OFFSET); + + iommu_feature_enable(iommu, CONTROL_GAINT_EN); iommu_feature_enable(iommu, CONTROL_GALOG_EN); @@ -822,19 +844,18 @@ static int iommu_ga_log_enable(struct amd_iommu *iommu) status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); if (status & (MMIO_STATUS_GALOG_RUN_MASK)) break; + udelay(10); } - if (i >= LOOP_TIMEOUT) + if (WARN_ON(i >= LOOP_TIMEOUT)) return -EINVAL; #endif /* CONFIG_IRQ_REMAP */ return 0; } -#ifdef CONFIG_IRQ_REMAP static int iommu_init_ga_log(struct amd_iommu *iommu) { - u64 entry; - +#ifdef CONFIG_IRQ_REMAP if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)) return 0; @@ -848,32 +869,13 @@ static int iommu_init_ga_log(struct amd_iommu *iommu) if (!iommu->ga_log_tail) goto err_out; - entry = iommu_virt_to_phys(iommu->ga_log) | GA_LOG_SIZE_512; - memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_BASE_OFFSET, - &entry, sizeof(entry)); - entry = (iommu_virt_to_phys(iommu->ga_log_tail) & - (BIT_ULL(52)-1)) & ~7ULL; - memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_TAIL_OFFSET, - &entry, sizeof(entry)); - writel(0x00, iommu->mmio_base + MMIO_GA_HEAD_OFFSET); - writel(0x00, iommu->mmio_base + MMIO_GA_TAIL_OFFSET); - return 0; err_out: free_ga_log(iommu); return -EINVAL; -} -#endif /* CONFIG_IRQ_REMAP */ - -static int iommu_init_ga(struct amd_iommu *iommu) -{ - int ret = 0; - -#ifdef CONFIG_IRQ_REMAP - ret = iommu_init_ga_log(iommu); +#else + return 0; #endif /* CONFIG_IRQ_REMAP */ - - return ret; } static int __init alloc_cwwb_sem(struct amd_iommu *iommu) @@ -1860,7 +1862,7 @@ static int __init iommu_init_pci(struct amd_iommu *iommu) if (iommu_feature(iommu, FEATURE_PPR) && alloc_ppr_log(iommu)) return -ENOMEM; - ret = iommu_init_ga(iommu); + ret = iommu_init_ga_log(iommu); if (ret) return ret; diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c index 5f1195791cb18a30ba695bd983f919e5e0cf2e97..200cf5da5e0ad1c9ec3acf34fe06566d5e1f14dd 100644 --- a/drivers/iommu/amd/iommu.c +++ b/drivers/iommu/amd/iommu.c @@ -813,7 +813,8 @@ amd_iommu_set_pci_msi_domain(struct device *dev, struct amd_iommu *iommu) { } #endif /* !CONFIG_IRQ_REMAP */ #define AMD_IOMMU_INT_MASK \ - (MMIO_STATUS_EVT_INT_MASK | \ + (MMIO_STATUS_EVT_OVERFLOW_INT_MASK | \ + MMIO_STATUS_EVT_INT_MASK | \ MMIO_STATUS_PPR_INT_MASK | \ MMIO_STATUS_GALOG_INT_MASK) @@ -823,7 +824,7 @@ irqreturn_t amd_iommu_int_thread(int irq, void *data) u32 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); while (status & AMD_IOMMU_INT_MASK) { - /* Enable EVT and PPR and GA interrupts again */ + /* Enable interrupt sources again */ writel(AMD_IOMMU_INT_MASK, iommu->mmio_base + MMIO_STATUS_OFFSET); @@ -844,6 +845,11 @@ irqreturn_t amd_iommu_int_thread(int irq, void *data) } #endif + if (status & MMIO_STATUS_EVT_OVERFLOW_INT_MASK) { + pr_info_ratelimited("IOMMU event log overflow\n"); + amd_iommu_restart_event_logging(iommu); + } + /* * Hardware bug: ERBT1312 * When re-enabling interrupt (by writing 1 diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c index f6868511ad0151dce9299fb1555526f901000a40..b22d0187ea8a31685adde0f20ad456c645984bd2 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c @@ -1852,6 +1852,7 @@ static irqreturn_t arm_smmu_evtq_thread(int irq, void *dev) dev_info(smmu->dev, "\t0x%016llx\n", (unsigned long long)evt[i]); + cond_resched(); } /* @@ -3533,7 +3534,7 @@ static int arm_smmu_switch_dirty_log(struct iommu_domain *domain, bool enable, if (!(smmu->features & ARM_SMMU_FEAT_HD)) return -ENODEV; - if (smmu_domain->stage != ARM_SMMU_DOMAIN_S1) + if (smmu_domain->stage == ARM_SMMU_DOMAIN_BYPASS) return -EINVAL; if (enable) { @@ -3574,7 +3575,7 @@ static int arm_smmu_sync_dirty_log(struct iommu_domain *domain, if (!(smmu->features & ARM_SMMU_FEAT_HD)) return -ENODEV; - if (smmu_domain->stage != ARM_SMMU_DOMAIN_S1) + if (smmu_domain->stage == ARM_SMMU_DOMAIN_BYPASS) return -EINVAL; if (!ops || !ops->sync_dirty_log) { @@ -3603,7 +3604,7 @@ static int arm_smmu_clear_dirty_log(struct iommu_domain *domain, if (!(smmu->features & ARM_SMMU_FEAT_HD)) return -ENODEV; - if (smmu_domain->stage != ARM_SMMU_DOMAIN_S1) + if (smmu_domain->stage == ARM_SMMU_DOMAIN_BYPASS) return -EINVAL; if (!ops || !ops->clear_dirty_log) { diff --git a/drivers/iommu/intel/irq_remapping.c b/drivers/iommu/intel/irq_remapping.c index aedaae4630bc8117ed9432daf2d7ecaffd28c773..b853888774e65511ae957b542e30139059c09c3e 100644 --- a/drivers/iommu/intel/irq_remapping.c +++ b/drivers/iommu/intel/irq_remapping.c @@ -576,9 +576,8 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu) fn, &intel_ir_domain_ops, iommu); if (!iommu->ir_domain) { - irq_domain_free_fwnode(fn); pr_err("IR%d: failed to allocate irqdomain\n", iommu->seq_id); - goto out_free_bitmap; + goto out_free_fwnode; } iommu->ir_msi_domain = arch_create_remap_msi_irq_domain(iommu->ir_domain, @@ -602,7 +601,7 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu) if (dmar_enable_qi(iommu)) { pr_err("Failed to enable queued invalidation\n"); - goto out_free_bitmap; + goto out_free_ir_domain; } } @@ -626,6 +625,14 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu) return 0; +out_free_ir_domain: + if (iommu->ir_msi_domain) + irq_domain_remove(iommu->ir_msi_domain); + iommu->ir_msi_domain = NULL; + irq_domain_remove(iommu->ir_domain); + iommu->ir_domain = NULL; +out_free_fwnode: + irq_domain_free_fwnode(fn); out_free_bitmap: bitmap_free(bitmap); out_free_pages: diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c index 3cf72c100add918b356651994d56a9693afa617a..701efdbdc12b34ffc070a29988f5d91274396eb3 100644 --- a/drivers/iommu/io-pgtable-arm-v7s.c +++ b/drivers/iommu/io-pgtable-arm-v7s.c @@ -242,13 +242,17 @@ static void *__arm_v7s_alloc_table(int lvl, gfp_t gfp, __GFP_ZERO | ARM_V7S_TABLE_GFP_DMA, get_order(size)); else if (lvl == 2) table = kmem_cache_zalloc(data->l2_tables, gfp); + + if (!table) + return NULL; + phys = virt_to_phys(table); if (phys != (arm_v7s_iopte)phys) { /* Doesn't fit in PTE */ dev_err(dev, "Page table does not fit in PTE: %pa", &phys); goto out_free; } - if (table && !cfg->coherent_walk) { + if (!cfg->coherent_walk) { dma = dma_map_single(dev, table, size, DMA_TO_DEVICE); if (dma_mapping_error(dev, dma)) goto out_free; diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c index 6c1280215b314c952e69f56068094865a7317815..0969224aff7b5c01e53c10e4b3fda9b984645171 100644 --- a/drivers/iommu/io-pgtable-arm.c +++ b/drivers/iommu/io-pgtable-arm.c @@ -322,11 +322,12 @@ static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data, static arm_lpae_iopte arm_lpae_install_table(arm_lpae_iopte *table, arm_lpae_iopte *ptep, arm_lpae_iopte curr, - struct io_pgtable_cfg *cfg) + struct arm_lpae_io_pgtable *data) { arm_lpae_iopte old, new; + struct io_pgtable_cfg *cfg = &data->iop.cfg; - new = __pa(table) | ARM_LPAE_PTE_TYPE_TABLE; + new = paddr_to_iopte(__pa(table), data) | ARM_LPAE_PTE_TYPE_TABLE; if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS) new |= ARM_LPAE_PTE_NSTABLE; @@ -377,7 +378,7 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova, if (!cptep) return -ENOMEM; - pte = arm_lpae_install_table(cptep, ptep, 0, cfg); + pte = arm_lpae_install_table(cptep, ptep, 0, data); if (pte) __arm_lpae_free_pages(cptep, tblsz, cfg); } else if (!cfg->coherent_walk && !(pte & ARM_LPAE_PTE_SW_SYNC)) { @@ -575,7 +576,7 @@ static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data, __arm_lpae_init_pte(data, blk_paddr, pte, lvl, &tablep[i]); } - pte = arm_lpae_install_table(tablep, ptep, blk_pte, cfg); + pte = arm_lpae_install_table(tablep, ptep, blk_pte, data); if (pte != blk_pte) { __arm_lpae_free_pages(tablep, tablesz, cfg); /* @@ -738,7 +739,7 @@ static size_t arm_lpae_do_split_blk(struct arm_lpae_io_pgtable *data, io_pgtable_tlb_flush_walk(&data->iop, iova, size, size); } /* Race does not exist */ - pte = arm_lpae_install_table(tablep, ptep, blk_pte, cfg); + pte = arm_lpae_install_table(tablep, ptep, blk_pte, data); /* Have splited it into page? */ if (lvl == (ARM_LPAE_MAX_LEVELS - 1)) diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 25b3b8386ca9df8499282d0647d9c8755c18e57f..97953fa276301bcd8504f54cfbbf1dc38813b5ce 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -198,9 +198,14 @@ static struct dev_iommu *dev_iommu_get(struct device *dev) static void dev_iommu_free(struct device *dev) { - iommu_fwspec_free(dev); - kfree(dev->iommu); + struct dev_iommu *param = dev->iommu; + dev->iommu = NULL; + if (param->fwspec) { + fwnode_handle_put(param->fwspec->iommu_fwnode); + kfree(param->fwspec); + } + kfree(param); } static int __iommu_probe_device(struct device *dev, struct list_head *group_list) @@ -1079,39 +1084,6 @@ int iommu_group_unregister_notifier(struct iommu_group *group, } EXPORT_SYMBOL_GPL(iommu_group_unregister_notifier); -static void iommu_dev_fault_timer_fn(struct timer_list *t) -{ - struct iommu_fault_param *fparam = from_timer(fparam, t, timer); - struct iommu_fault_event *evt; - struct iommu_fault_page_request *prm; - - u64 now; - - now = get_jiffies_64(); - - /* The goal is to ensure driver or guest page fault handler(via vfio) - * send page response on time. Otherwise, limited queue resources - * may be occupied by some irresponsive guests or drivers. - * When per device pending fault list is not empty, we periodically checks - * if any anticipated page response time has expired. - * - * TODO: - * We could do the following if response time expires: - * 1. send page response code FAILURE to all pending PRQ - * 2. inform device driver or vfio - * 3. drain in-flight page requests and responses for this device - * 4. clear pending fault list such that driver can unregister fault - * handler(otherwise blocked when pending faults are present). - */ - list_for_each_entry(evt, &fparam->faults, list) { - prm = &evt->fault.prm; - if (time_after64(now, evt->expire)) - pr_err("Page response time expired!, pasid %d gid %d exp %llu now %llu\n", - prm->pasid, prm->grpid, evt->expire, now); - } - mod_timer(t, now + prq_timeout); -} - /** * iommu_register_device_fault_handler() - Register a device fault handler * @dev: the device @@ -1159,9 +1131,6 @@ int iommu_register_device_fault_handler(struct device *dev, mutex_init(¶m->fault_param->lock); INIT_LIST_HEAD(¶m->fault_param->faults); - if (prq_timeout) - timer_setup(¶m->fault_param->timer, iommu_dev_fault_timer_fn, - TIMER_DEFERRABLE); done_unlock: mutex_unlock(¶m->lock); @@ -1301,9 +1270,7 @@ int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt) struct dev_iommu *param = dev->iommu; struct iommu_fault_event *evt_pending = NULL; struct iommu_fault_param *fparam; - struct timer_list *tmr; int ret = 0; - u64 exp; if (!param || !evt || WARN_ON_ONCE(!iommu_fault_valid(&evt->fault))) return -EINVAL; @@ -1324,17 +1291,7 @@ int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt) ret = -ENOMEM; goto done_unlock; } - /* Keep track of response expiration time */ - exp = get_jiffies_64() + prq_timeout; - evt_pending->expire = exp; mutex_lock(&fparam->lock); - if (list_empty(&fparam->faults)) { - /* First pending event, start timer */ - tmr = &fparam->timer; - WARN_ON(timer_pending(tmr)); - mod_timer(tmr, exp); - } - list_add_tail(&evt_pending->list, &fparam->faults); mutex_unlock(&fparam->lock); } @@ -1412,13 +1369,6 @@ int iommu_page_response(struct device *dev, break; } - /* stop response timer if no more pending request */ - if (list_empty(¶m->fault_param->faults) && - timer_pending(¶m->fault_param->timer)) { - pr_debug("no pending PRQ, stop timer\n"); - del_timer(¶m->fault_param->timer); - } - done_unlock: mutex_unlock(¶m->fault_param->lock); return ret; @@ -3017,13 +2967,6 @@ int iommu_switch_dirty_log(struct iommu_domain *domain, bool enable, } mutex_lock(&domain->switch_log_lock); - if (enable && domain->dirty_log_tracking) { - ret = -EBUSY; - goto out; - } else if (!enable && !domain->dirty_log_tracking) { - ret = -EINVAL; - goto out; - } pr_debug("switch_dirty_log %s for: iova 0x%lx size 0x%zx\n", enable ? "enable" : "disable", iova, size); @@ -3046,11 +2989,9 @@ int iommu_switch_dirty_log(struct iommu_domain *domain, bool enable, if (flush) iommu_flush_iotlb_all(domain); - if (!ret) { - domain->dirty_log_tracking = enable; + if (!ret) trace_switch_dirty_log(orig_iova, orig_size, enable); - } -out: + mutex_unlock(&domain->switch_log_lock); return ret; } @@ -3077,10 +3018,6 @@ int iommu_sync_dirty_log(struct iommu_domain *domain, unsigned long iova, } mutex_lock(&domain->switch_log_lock); - if (!domain->dirty_log_tracking) { - ret = -EINVAL; - goto out; - } pr_debug("sync_dirty_log for: iova 0x%lx size 0x%zx\n", iova, size); @@ -3101,7 +3038,7 @@ int iommu_sync_dirty_log(struct iommu_domain *domain, unsigned long iova, if (!ret) trace_sync_dirty_log(orig_iova, orig_size); -out: + mutex_unlock(&domain->switch_log_lock); return ret; } @@ -3150,9 +3087,8 @@ int iommu_clear_dirty_log(struct iommu_domain *domain, unsigned long bitmap_pgshift) { unsigned long riova, rsize; - unsigned int min_pagesz; + unsigned int min_pagesz, rs, re, start, end; bool flush = false; - int rs, re, start, end; int ret = 0; min_pagesz = 1 << __ffs(domain->pgsize_bitmap); @@ -3163,17 +3099,13 @@ int iommu_clear_dirty_log(struct iommu_domain *domain, } mutex_lock(&domain->switch_log_lock); - if (!domain->dirty_log_tracking) { - ret = -EINVAL; - goto out; - } start = (iova - base_iova) >> bitmap_pgshift; end = start + (size >> bitmap_pgshift); bitmap_for_each_set_region(bitmap, rs, re, start, end) { flush = true; - riova = base_iova + (rs << bitmap_pgshift); - rsize = (re - rs) << bitmap_pgshift; + riova = base_iova + ((unsigned long)rs << bitmap_pgshift); + rsize = (unsigned long)(re - rs) << bitmap_pgshift; ret = __iommu_clear_dirty_log(domain, riova, rsize, bitmap, base_iova, bitmap_pgshift); if (ret) @@ -3182,7 +3114,7 @@ int iommu_clear_dirty_log(struct iommu_domain *domain, if (flush) iommu_flush_iotlb_all(domain); -out: + mutex_unlock(&domain->switch_log_lock); return ret; } diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c index 82504049f8e445e95b4ae5777a1df2122ecf2d4b..1246e8f8bf08601725485c6c2c58dd1107f22e31 100644 --- a/drivers/iommu/iova.c +++ b/drivers/iommu/iova.c @@ -158,10 +158,11 @@ __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free) cached_iova = rb_entry(iovad->cached32_node, struct iova, node); if (free == cached_iova || (free->pfn_hi < iovad->dma_32bit_pfn && - free->pfn_lo >= cached_iova->pfn_lo)) { + free->pfn_lo >= cached_iova->pfn_lo)) iovad->cached32_node = rb_next(&free->node); + + if (free->pfn_lo < iovad->dma_32bit_pfn) iovad->max32_alloc_size = iovad->dma_32bit_pfn; - } cached_iova = rb_entry(iovad->cached_node, struct iova, node); if (free->pfn_lo >= cached_iova->pfn_lo) diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c index d71f10257f15929f6337b9cc13cc147998c59df5..d9068e8f2db4fcc7484ed6fe8d4b56589b6f7159 100644 --- a/drivers/iommu/ipmmu-vmsa.c +++ b/drivers/iommu/ipmmu-vmsa.c @@ -1012,7 +1012,9 @@ static int ipmmu_probe(struct platform_device *pdev) bitmap_zero(mmu->ctx, IPMMU_CTX_MAX); mmu->features = of_device_get_match_data(&pdev->dev); memset(mmu->utlb_ctx, IPMMU_CTX_INVALID, mmu->features->num_utlbs); - dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40)); + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40)); + if (ret) + return ret; /* Map I/O memory and request IRQ. */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c index 71f29c0927fc710aa5ff65318fbc3b2702d2700e..ff2c692c0db473fc2e5da897519037d082baf8d9 100644 --- a/drivers/iommu/omap-iommu.c +++ b/drivers/iommu/omap-iommu.c @@ -1665,7 +1665,7 @@ static struct iommu_device *omap_iommu_probe_device(struct device *dev) num_iommus = of_property_count_elems_of_size(dev->of_node, "iommus", sizeof(phandle)); if (num_iommus < 0) - return 0; + return ERR_PTR(-ENODEV); arch_data = kcalloc(num_iommus + 1, sizeof(*arch_data), GFP_KERNEL); if (!arch_data) diff --git a/drivers/iommu/sw64/sunway_iommu.c b/drivers/iommu/sw64/sunway_iommu.c index dd3382ee007f0f67181eca5c8cee881c78ca16a2..b6c8f1272d28fc9de9575fa9d245eb8434396704 100644 --- a/drivers/iommu/sw64/sunway_iommu.c +++ b/drivers/iommu/sw64/sunway_iommu.c @@ -40,6 +40,9 @@ #define SW64_IOMMU_PGSIZES (((1ULL) << PAGE_SHIFT) | ((1ULL) << PAGE_8M_SHIFT)) +#define IDENTMAP_ALL ((1U) << 0) +#define DMA_MASK64 ((1U) << 1) + /* IOMMU Exceptional Status */ enum exceptype { DTE_LEVEL1 = 0x0, @@ -383,7 +386,7 @@ set_dte_entry(struct sunway_iommu_dev *sdev, struct sunway_iommu_domain *sdomain dte_l2_val = (__pa(sdomain->pt_root) & PAGE_MASK) | SW64_IOMMU_ENTRY_VALID; if (sdomain->type == IOMMU_DOMAIN_IDENTITY) { dte_l2_val |= 0x1; - sdev->passthrough = true; + sdev->passthrough = IDENTMAP_ALL; } *dte_l2 = dte_l2_val; @@ -645,10 +648,21 @@ irqreturn_t iommu_interrupt(int irq, void *dev) type = (iommu_status >> 59) & 0x7; devid = (iommu_status >> 37) & 0xffff; dva = iommu_status & 0xffffffff; - sdev = search_dev_data(devid); - sdomain = sdev->domain; pr_info("%s, iommu_status = %#lx, devid %#lx, dva %#lx, ", __func__, iommu_status, devid, dva); + + sdev = search_dev_data(devid); + if (sdev == NULL) { + pr_info("no such dev!!!\n"); + + iommu_status &= ~(1UL << 62); + write_piu_ior0(hose->node, hose->index, + IOMMUEXCPT_STATUS, iommu_status); + + return IRQ_HANDLED; + } + + sdomain = sdev->domain; switch (type) { case DTE_LEVEL1: pr_info("invalid level1 dte, addr:%#lx, val:%#lx\n", @@ -671,7 +685,6 @@ irqreturn_t iommu_interrupt(int irq, void *dev) fetch_pte(sdomain, dva, PTE_LEVEL2_VAL)); iommu_status &= ~(1UL << 62); - iommu_status = iommu_status | (1UL << 63); write_piu_ior0(hose->node, hose->index, IOMMUEXCPT_STATUS, iommu_status); break; @@ -1058,18 +1071,24 @@ static void *sunway_alloc_coherent(struct device *dev, if (!(hose->iommu_enable)) return cpu_addr; - sdomain = get_sunway_domain(dev); sdev = dev_iommu_priv_get(dev); - if (sdev->passthrough) - if (pdev->dma_mask > DMA_BIT_MASK(32)) + if (sdev->passthrough & DMA_MASK64) + return cpu_addr; + else if (sdev->passthrough) { + if (min_not_zero(*dev->dma_mask, dev->coherent_dma_mask) + > DMA_BIT_MASK(32)) { + sdev->passthrough |= DMA_MASK64; return cpu_addr; + } - dma_dom = to_dma_domain(sdomain); - if (sdomain->type == IOMMU_DOMAIN_IDENTITY) { - sdomain->type = IOMMU_DOMAIN_DMA; - set_dte_entry(sdev, sdomain); + __free_pages(page, get_order(size)); + set_dma_ops(dev, get_arch_dma_ops(dev->bus)); + return dev->dma_ops->alloc(dev, size, dma_addr, gfp, attrs); } + sdomain = get_sunway_domain(dev); + dma_dom = to_dma_domain(sdomain); + *dma_addr = pci_iommu_map_single(pdev, dma_dom, cpu_addr, size); if (*dma_addr == 0) { free_pages((unsigned long)cpu_addr, get_order(size)); @@ -1164,16 +1183,22 @@ sunway_map_page(struct device *dev, struct page *page, return paddr; sdev = dev_iommu_priv_get(dev); - if (sdev->passthrough) - if (pdev->dma_mask > DMA_BIT_MASK(32)) + if (sdev->passthrough & DMA_MASK64) + return paddr; + else if (sdev->passthrough) { + if (min_not_zero(*dev->dma_mask, dev->coherent_dma_mask) + > DMA_BIT_MASK(32)) { + sdev->passthrough |= DMA_MASK64; return paddr; + } + + set_dma_ops(dev, get_arch_dma_ops(dev->bus)); + return dev->dma_ops->map_page(dev, page, offset, + size, dir, attrs); + } sdomain = get_sunway_domain(dev); dma_dom = to_dma_domain(sdomain); - if (sdomain->type == IOMMU_DOMAIN_IDENTITY) { - sdomain->type = IOMMU_DOMAIN_DMA; - set_dte_entry(sdev, sdomain); - } return pci_iommu_map_single(pdev, dma_dom, (char *)page_address(page) + offset, size); @@ -1243,13 +1268,18 @@ sunway_map_sg(struct device *dev, struct scatterlist *sgl, goto check; sdev = dev_iommu_priv_get(dev); - if (sdev->passthrough) - if (pdev->dma_mask > DMA_BIT_MASK(32)) + if (sdev->passthrough & DMA_MASK64) + goto check; + else if (sdev->passthrough) { + if (min_not_zero(*dev->dma_mask, dev->coherent_dma_mask) + > DMA_BIT_MASK(32)) { + sdev->passthrough |= DMA_MASK64; goto check; + } - if (sdomain->type == IOMMU_DOMAIN_IDENTITY) { - sdomain->type = IOMMU_DOMAIN_DMA; - set_dte_entry(sdev, sdomain); + set_dma_ops(dev, get_arch_dma_ops(dev->bus)); + return dev->dma_ops->map_sg(dev, sgl, nents, + dir, attrs); } sg_dma_address(sg) = @@ -1489,6 +1519,9 @@ sunway_iommu_iova_to_phys(struct iommu_domain *dom, dma_addr_t iova) struct sunway_iommu_domain *sdomain = to_sunway_domain(dom); unsigned long paddr, grn; + if (iova > SW64_BAR_ADDRESS) + return iova; + paddr = fetch_pte(sdomain, iova, PTE_LEVEL2_VAL); if ((paddr & SW64_IOMMU_ENTRY_VALID) == 0) @@ -1524,7 +1557,7 @@ sunway_iommu_map(struct iommu_domain *dom, unsigned long iova, * to avoid VFIO trying to map pci config space. */ if (iova > SW64_BAR_ADDRESS) - return -EINVAL; + return 0; mutex_lock(&sdomain->api_lock); ret = sunway_iommu_map_page(sdomain, iova, paddr, page_size); diff --git a/drivers/iommu/sw64/sunway_iommu.h b/drivers/iommu/sw64/sunway_iommu.h index 5ad1dc7c406f1c4472a33fc2b6d3659aa4373cc9..bc9e13466f0697843f33a10227f588b067179109 100644 --- a/drivers/iommu/sw64/sunway_iommu.h +++ b/drivers/iommu/sw64/sunway_iommu.h @@ -30,7 +30,7 @@ struct sunway_iommu_dev { struct llist_node dev_data_list; /* Global device list */ u16 devid; int alias; - bool passthrough; + unsigned int passthrough; struct sunway_iommu *iommu; struct pci_dev *pdev; diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig index 214d7fd1fdd1e36b910360e9e2cec267a23817c6..dd35895c92f3855c151f93f5c38c34971a70f516 100644 --- a/drivers/irqchip/Kconfig +++ b/drivers/irqchip/Kconfig @@ -1,14 +1,6 @@ # SPDX-License-Identifier: GPL-2.0-only menu "IRQ chip support" -config SW64_INTC - bool "SW64 Platform-Level Interrupt Controller" - depends on ACPI && SW64 - help - This enables support for the INTC chip found in SW systems. - The INTC controls devices interrupts and connects them to each - core's local interrupt controller. - config IRQCHIP def_bool y depends on OF_IRQ @@ -19,6 +11,24 @@ config ARM_GIC select GENERIC_IRQ_MULTI_HANDLER select GENERIC_IRQ_EFFECTIVE_AFF_MASK +config SW64_INTC_V2 + bool "SW64 Interrupt Controller V2" + depends on SW64_CHIP3 + default y + select GENERIC_IRQ_CHIP + select IRQ_DOMAIN + help + This enables support for the INTC chip found in SW CHIP3 systems. + The INTC controls devices interrupts and connects them to each + core's local interrupt controller. + +config SW64_LPC_INTC + bool "SW64 cpu builtin LPC Interrupt Controller" + depends on SW64_INTC_V2 + help + Say yes here to add support for the SW64 cpu builtin LPC + IRQ controller. + config ARM_GIC_PM bool depends on PM @@ -178,6 +188,16 @@ config HISILICON_IRQ_MBIGEN select ARM_GIC_V3 select ARM_GIC_V3_ITS +if ASCEND_FEATURES + +config ASCEND_INIT_ALL_GICR + bool "Enable init all GICR for Ascend" + depends on ARM_GIC_V3 + depends on ARM_GIC_V3_ITS + default n + +endif + config IMGPDC_IRQ bool select GENERIC_IRQ_CHIP diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile index 4c78b0f64e6cf9983c40bd5368acf3142b7845fc..14a022c074ce37c054c89b4175f187e6edc0cf3e 100644 --- a/drivers/irqchip/Makefile +++ b/drivers/irqchip/Makefile @@ -27,7 +27,8 @@ obj-$(CONFIG_ARCH_SUNXI) += irq-sun4i.o obj-$(CONFIG_ARCH_SUNXI) += irq-sunxi-nmi.o obj-$(CONFIG_ARCH_SPEAR3XX) += spear-shirq.o obj-$(CONFIG_ARM_GIC) += irq-gic.o irq-gic-common.o -obj-$(CONFIG_SW64_INTC) += irq-intc-v1.o +obj-$(CONFIG_SW64_INTC_V2) += irq-sw64-intc-v2.o +obj-$(CONFIG_SW64_LPC_INTC) += irq-sw64-lpc-intc.o obj-$(CONFIG_ARM_GIC_PM) += irq-gic-pm.o obj-$(CONFIG_ARCH_REALVIEW) += irq-gic-realview.o obj-$(CONFIG_ARM_GIC_V2M) += irq-gic-v2m.o diff --git a/drivers/irqchip/irq-gic-phytium-2500-its.c b/drivers/irqchip/irq-gic-phytium-2500-its.c index 4d2758fbad2286d10347631c90ab05921736125c..cb9962c4debb58ea680736eee390f45c6b7d194b 100644 --- a/drivers/irqchip/irq-gic-phytium-2500-its.c +++ b/drivers/irqchip/irq-gic-phytium-2500-its.c @@ -1675,9 +1675,7 @@ static int its_cpumask_select(struct its_device *its_dev, } cpu = cpumask_any_and(mask_val, cpu_mask); - if ((cpu > cpus) && (cpu < (cpus + skt_cpu_cnt[skt_id]))) { - cpus = cpu; - } + cpus = cpus + cpu % skt_cpu_cnt[skt_id]; if (is_kdump_kernel()) { skt = (cpu_logical_map(cpu) >> 16) & 0xff; diff --git a/drivers/irqchip/irq-gic-phytium-2500.c b/drivers/irqchip/irq-gic-phytium-2500.c index dbdb778b5b4bde8c7437408ab251fbcd22d16342..a0c622fb20392594d2b583117d5f5fed645e6f4b 100644 --- a/drivers/irqchip/irq-gic-phytium-2500.c +++ b/drivers/irqchip/irq-gic-phytium-2500.c @@ -1345,9 +1345,7 @@ static int gic_cpumask_select(struct irq_data *d, const struct cpumask *mask_val } cpu = cpumask_any_and(mask_val, cpu_online_mask); - if ((cpu > cpus) && (cpu < (cpus + skt_cpu_cnt[irq_skt]))) { - cpus = cpu; - } + cpus = cpus + cpu % skt_cpu_cnt[irq_skt]; if (is_kdump_kernel()) { skt = (cpu_logical_map(cpu) >> 16) & 0xff; diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 6b46cfdcb402062eeaa396f8f788dc95c6ff67cb..81271fd8954fadd969264a75911d92d042dcb6f0 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -195,6 +195,14 @@ static DEFINE_RAW_SPINLOCK(vmovp_lock); static DEFINE_IDA(its_vpeid_ida); +#ifdef CONFIG_ASCEND_INIT_ALL_GICR +static bool init_all_gicr; +static int nr_gicr; +#else +#define init_all_gicr false +#define nr_gicr 0 +#endif + #define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist)) #define gic_data_rdist_cpu(cpu) (per_cpu_ptr(gic_rdists->rdist, cpu)) #define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base) @@ -1624,7 +1632,7 @@ static int its_select_cpu(struct irq_data *d, cpu = cpumask_pick_least_loaded(d, tmpmask); } else { - cpumask_and(tmpmask, irq_data_get_affinity_mask(d), cpu_online_mask); + cpumask_copy(tmpmask, aff_mask); /* If we cannot cross sockets, limit the search to that node */ if ((its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) && @@ -1640,6 +1648,26 @@ static int its_select_cpu(struct irq_data *d, return cpu; } +#ifdef CONFIG_ASCEND_INIT_ALL_GICR +static int its_select_cpu_other(const struct cpumask *mask_val) +{ + int cpu; + + if (!init_all_gicr) + return -EINVAL; + + cpu = find_first_bit(cpumask_bits(mask_val), NR_CPUS); + if (cpu >= nr_gicr) + cpu = -EINVAL; + return cpu; +} +#else +static int its_select_cpu_other(const struct cpumask *mask_val) +{ + return -EINVAL; +} +#endif + static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val, bool force) { @@ -1661,6 +1689,9 @@ static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val, cpu = cpumask_pick_least_loaded(d, mask_val); if (cpu < 0 || cpu >= nr_cpu_ids) + cpu = its_select_cpu_other(mask_val); + + if (cpu < 0) goto err; /* don't set the affinity when the target cpu is same as current one */ @@ -2928,8 +2959,12 @@ static int allocate_vpe_l1_table(void) static int its_alloc_collections(struct its_node *its) { int i; + int cpu_nr = nr_cpu_ids; - its->collections = kcalloc(nr_cpu_ids, sizeof(*its->collections), + if (init_all_gicr) + cpu_nr = CONFIG_NR_CPUS; + + its->collections = kcalloc(cpu_nr, sizeof(*its->collections), GFP_KERNEL); if (!its->collections) return -ENOMEM; @@ -3225,6 +3260,213 @@ static void its_cpu_init_collections(void) raw_spin_unlock(&its_lock); } +#ifdef CONFIG_ASCEND_INIT_ALL_GICR +void its_set_gicr_nr(int nr) +{ + nr_gicr = nr; +} + +static int __init its_enable_init_all_gicr(char *str) +{ + init_all_gicr = true; + return 1; +} + +__setup("init_all_gicr", its_enable_init_all_gicr); + +bool its_init_all_gicr(void) +{ + return init_all_gicr; +} + +static void its_cpu_init_lpis_others(void __iomem *rbase, int cpu) +{ + struct page *pend_page; + phys_addr_t paddr; + u64 val, tmp; + + if (!init_all_gicr) + return; + + val = readl_relaxed(rbase + GICR_CTLR); + if ((gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED) && + (val & GICR_CTLR_ENABLE_LPIS)) { + /* + * Check that we get the same property table on all + * RDs. If we don't, this is hopeless. + */ + paddr = gicr_read_propbaser(rbase + GICR_PROPBASER); + paddr &= GENMASK_ULL(51, 12); + if (WARN_ON(gic_rdists->prop_table_pa != paddr)) + add_taint(TAINT_CRAP, LOCKDEP_STILL_OK); + + paddr = gicr_read_pendbaser(rbase + GICR_PENDBASER); + paddr &= GENMASK_ULL(51, 16); + + WARN_ON(!gic_check_reserved_range(paddr, LPI_PENDBASE_SZ)); + + goto out; + } + + /* If we didn't allocate the pending table yet, do it now */ + pend_page = its_allocate_pending_table(GFP_NOWAIT); + if (!pend_page) { + pr_err("Failed to allocate PENDBASE for GICR:%p\n", rbase); + return; + } + + paddr = page_to_phys(pend_page); + pr_info("GICR:%p using LPI pending table @%pa\n", + rbase, &paddr); + + WARN_ON(gic_reserve_range(paddr, LPI_PENDBASE_SZ)); + + /* Disable LPIs */ + val = readl_relaxed(rbase + GICR_CTLR); + val &= ~GICR_CTLR_ENABLE_LPIS; + writel_relaxed(val, rbase + GICR_CTLR); + + /* + * Make sure any change to the table is observable by the GIC. + */ + dsb(sy); + + /* set PROPBASE */ + val = (gic_rdists->prop_table_pa | + GICR_PROPBASER_InnerShareable | + GICR_PROPBASER_RaWaWb | + ((LPI_NRBITS - 1) & GICR_PROPBASER_IDBITS_MASK)); + + gicr_write_propbaser(val, rbase + GICR_PROPBASER); + tmp = gicr_read_propbaser(rbase + GICR_PROPBASER); + + if ((tmp ^ val) & GICR_PROPBASER_SHAREABILITY_MASK) { + if (!(tmp & GICR_PROPBASER_SHAREABILITY_MASK)) { + /* + * The HW reports non-shareable, we must + * remove the cacheability attributes as + * well. + */ + val &= ~(GICR_PROPBASER_SHAREABILITY_MASK | + GICR_PROPBASER_CACHEABILITY_MASK); + val |= GICR_PROPBASER_nC; + gicr_write_propbaser(val, rbase + GICR_PROPBASER); + } + pr_info_once("GIC: using cache flushing for LPI property table\n"); + gic_rdists->flags |= RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING; + } + + /* set PENDBASE */ + val = (page_to_phys(pend_page) | + GICR_PENDBASER_InnerShareable | + GICR_PENDBASER_RaWaWb); + + gicr_write_pendbaser(val, rbase + GICR_PENDBASER); + tmp = gicr_read_pendbaser(rbase + GICR_PENDBASER); + + if (!(tmp & GICR_PENDBASER_SHAREABILITY_MASK)) { + /* + * The HW reports non-shareable, we must remove the + * cacheability attributes as well. + */ + val &= ~(GICR_PENDBASER_SHAREABILITY_MASK | + GICR_PENDBASER_CACHEABILITY_MASK); + val |= GICR_PENDBASER_nC; + gicr_write_pendbaser(val, rbase + GICR_PENDBASER); + } + + /* Enable LPIs */ + val = readl_relaxed(rbase + GICR_CTLR); + val |= GICR_CTLR_ENABLE_LPIS; + writel_relaxed(val, rbase + GICR_CTLR); + + /* Make sure the GIC has seen the above */ + dsb(sy); +out: + pr_info("GICv3: CPU%d: using %s LPI pending table @%pa\n", + cpu, pend_page ? "allocated" : "reserved", &paddr); +} + +static void its_cpu_init_collection_others(void __iomem *rbase, + phys_addr_t phys_base, int cpu) +{ + u32 count; + struct its_node *its; + + if (!init_all_gicr) + return; + + raw_spin_lock(&its_lock); + + list_for_each_entry(its, &its_nodes, entry) { + u64 target; + + /* + * We now have to bind each collection to its target + * redistributor. + */ + if (gic_read_typer(its->base + GITS_TYPER) & GITS_TYPER_PTA) { + /* + * This ITS wants the physical address of the + * redistributor. + */ + target = phys_base; + } else { + /* + * This ITS wants a linear CPU number. + */ + target = gic_read_typer(rbase + GICR_TYPER); + target = GICR_TYPER_CPU_NUMBER(target) << 16; + } + + dsb(sy); + + /* In FPGA, We need to check if the gicr has been cut, + * and if it is, it can't be initialized + */ + count = 2000; + while (1) { + if (readl_relaxed(rbase + GICR_SYNCR) == 0) + break; + + count--; + if (!count) { + pr_err("this gicr does not exist, or it's abnormal:%pK\n", + &phys_base); + break; + } + cpu_relax(); + udelay(1); + } + + if (count == 0) + break; + + pr_info("its init other collection table, ITS:%pK, GICR:%pK, coreId:%u\n", + &its->phys_base, &phys_base, cpu); + + /* Perform collection mapping */ + its->collections[cpu].target_address = target; + its->collections[cpu].col_id = cpu; + + its_send_mapc(its, &its->collections[cpu], 1); + its_send_invall(its, &its->collections[cpu]); + } + + raw_spin_unlock(&its_lock); +} + +int its_cpu_init_others(void __iomem *base, phys_addr_t phys_base, int cpu) +{ + if (!list_empty(&its_nodes)) { + its_cpu_init_lpis_others(base, cpu); + its_cpu_init_collection_others(base, phys_base, cpu); + } + + return 0; +} +#endif + static struct its_device *its_find_device(struct its_node *its, u32 dev_id) { struct its_device *its_dev = NULL, *tmp; diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index 31687aba427d2e434368877d03c390568a9fdef7..64d7811a2b77d5bd5ed2a4b81517e3e23e1d2944 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -206,11 +206,11 @@ static inline void __iomem *gic_dist_base(struct irq_data *d) } } -static void gic_do_wait_for_rwp(void __iomem *base) +static void gic_do_wait_for_rwp(void __iomem *base, u32 bit) { u32 count = 1000000; /* 1s! */ - while (readl_relaxed(base + GICD_CTLR) & GICD_CTLR_RWP) { + while (readl_relaxed(base + GICD_CTLR) & bit) { count--; if (!count) { pr_err_ratelimited("RWP timeout, gone fishing\n"); @@ -224,13 +224,13 @@ static void gic_do_wait_for_rwp(void __iomem *base) /* Wait for completion of a distributor change */ static void gic_dist_wait_for_rwp(void) { - gic_do_wait_for_rwp(gic_data.dist_base); + gic_do_wait_for_rwp(gic_data.dist_base, GICD_CTLR_RWP); } /* Wait for completion of a redistributor change */ static void gic_redist_wait_for_rwp(void) { - gic_do_wait_for_rwp(gic_data_rdist_rd_base()); + gic_do_wait_for_rwp(gic_data_rdist_rd_base(), GICR_CTLR_RWP); } #ifdef CONFIG_ARM64 @@ -244,17 +244,11 @@ static u64 __maybe_unused gic_read_iar(void) } #endif -static void gic_enable_redist(bool enable) +static void __gic_enable_redist(void __iomem *rbase, bool enable) { - void __iomem *rbase; u32 count = 1000000; /* 1s! */ u32 val; - if (gic_data.flags & FLAGS_WORKAROUND_GICR_WAKER_MSM8996) - return; - - rbase = gic_data_rdist_rd_base(); - val = readl_relaxed(rbase + GICR_WAKER); if (enable) /* Wake up this CPU redistributor */ @@ -281,6 +275,14 @@ static void gic_enable_redist(bool enable) enable ? "wakeup" : "sleep"); } +static void gic_enable_redist(bool enable) +{ + if (gic_data.flags & FLAGS_WORKAROUND_GICR_WAKER_MSM8996) + return; + + __gic_enable_redist(gic_data_rdist_rd_base(), enable); +} + /* * Routines to disable, enable, EOI and route interrupts */ @@ -923,6 +925,22 @@ static int __gic_update_rdist_properties(struct redist_region *region, { u64 typer = gic_read_typer(ptr + GICR_TYPER); + /* Boot-time cleanip */ + if ((typer & GICR_TYPER_VLPIS) && (typer & GICR_TYPER_RVPEID)) { + u64 val; + + /* Deactivate any present vPE */ + val = gicr_read_vpendbaser(ptr + SZ_128K + GICR_VPENDBASER); + if (val & GICR_VPENDBASER_Valid) + gicr_write_vpendbaser(GICR_VPENDBASER_PendingLast, + ptr + SZ_128K + GICR_VPENDBASER); + + /* Mark the VPE table as invalid */ + val = gicr_read_vpropbaser(ptr + SZ_128K + GICR_VPROPBASER); + val &= ~GICR_VPROPBASER_4_1_VALID; + gicr_write_vpropbaser(val, ptr + SZ_128K + GICR_VPROPBASER); + } + gic_data.rdists.has_vlpis &= !!(typer & GICR_TYPER_VLPIS); /* RVPEID implies some form of DirectLPI, no matter what the doc says... :-/ */ @@ -1126,6 +1144,89 @@ static void gic_cpu_init(void) gic_cpu_sys_reg_init(); } +#ifdef CONFIG_ASCEND_INIT_ALL_GICR +static int __gic_compute_nr_gicr(struct redist_region *region, void __iomem *ptr) +{ + static int gicr_nr = 0; + + its_set_gicr_nr(++gicr_nr); + + return 1; +} + +static void gic_compute_nr_gicr(void) +{ + gic_iterate_rdists(__gic_compute_nr_gicr); +} + +static int gic_rdist_cpu(void __iomem *ptr, unsigned int cpu) +{ + unsigned long mpidr = cpu_logical_map(cpu); + u64 typer; + u32 aff; + + /* + * Convert affinity to a 32bit value that can be matched to + * GICR_TYPER bits [63:32]. + */ + aff = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 24 | + MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 | + MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 | + MPIDR_AFFINITY_LEVEL(mpidr, 0)); + + typer = gic_read_typer(ptr + GICR_TYPER); + if ((typer >> 32) == aff) + return 0; + + return 1; +} + +static int gic_rdist_cpus(void __iomem *ptr) +{ + unsigned int i; + + for (i = 0; i < nr_cpu_ids; i++) { + if (gic_rdist_cpu(ptr, i) == 0) + return 0; + } + + return 1; +} + +static int gic_cpu_init_other(struct redist_region *region, void __iomem *ptr) +{ + u64 offset; + phys_addr_t phys_base; + static int cpu = 0; + + if (cpu == 0) + cpu = nr_cpu_ids; + + if (gic_rdist_cpus(ptr) == 1) { + offset = ptr - region->redist_base; + phys_base = region->phys_base + offset; + __gic_enable_redist(ptr, true); + if (gic_dist_supports_lpis()) + its_cpu_init_others(ptr, phys_base, cpu); + cpu++; + } + + return 1; +} + +static void gic_cpu_init_others(void) +{ + if (!its_init_all_gicr()) + return; + + gic_iterate_rdists(gic_cpu_init_other); +} +#else +static inline void gic_compute_nr_gicr(void) {} + +static inline void gic_cpu_init_others(void) {} +#endif + #ifdef CONFIG_SMP #define MPIDR_TO_SGI_RS(mpidr) (MPIDR_RS(mpidr) << ICC_SGI1R_RS_SHIFT) @@ -1459,6 +1560,12 @@ static int gic_irq_domain_translate(struct irq_domain *d, if(fwspec->param_count != 2) return -EINVAL; + if (fwspec->param[0] < 16) { + pr_err(FW_BUG "Illegal GSI%d translation request\n", + fwspec->param[0]); + return -EINVAL; + } + *hwirq = fwspec->param[0]; *type = fwspec->param[1]; @@ -1747,6 +1854,7 @@ static int __init gic_init_bases(void __iomem *dist_base, gic_data.rdists.has_vlpis = true; gic_data.rdists.has_direct_lpi = true; gic_data.rdists.has_vpend_valid_dirty = true; + gic_compute_nr_gicr(); if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdists.rdist)) { err = -ENOMEM; @@ -1783,6 +1891,8 @@ static int __init gic_init_bases(void __iomem *dist_base, gicv2m_init(handle, gic_data.domain); } + gic_cpu_init_others(); + return 0; out_free: diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c index 176f5f06432d1acb2e762a223fff9cdb16cab079..205cbd24ff20916028a09328d3db21514a7a88c5 100644 --- a/drivers/irqchip/irq-gic.c +++ b/drivers/irqchip/irq-gic.c @@ -1094,6 +1094,12 @@ static int gic_irq_domain_translate(struct irq_domain *d, if(fwspec->param_count != 2) return -EINVAL; + if (fwspec->param[0] < 16) { + pr_err(FW_BUG "Illegal GSI%d translation request\n", + fwspec->param[0]); + return -EINVAL; + } + *hwirq = fwspec->param[0]; *type = fwspec->param[1]; diff --git a/drivers/irqchip/irq-intc-v1.c b/drivers/irqchip/irq-intc-v1.c deleted file mode 100644 index 4519e96526fbded25fcab9aae38308081e949fe5..0000000000000000000000000000000000000000 --- a/drivers/irqchip/irq-intc-v1.c +++ /dev/null @@ -1,104 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -static void fake_irq_mask(struct irq_data *data) -{ -} - -static void fake_irq_unmask(struct irq_data *data) -{ -} - -static struct irq_chip onchip_intc = { - .name = "SW fake Intc", - .irq_mask = fake_irq_mask, - .irq_unmask = fake_irq_unmask, -}; - -static int sw_intc_domain_map(struct irq_domain *d, unsigned int irq, - irq_hw_number_t hw) -{ - irq_set_chip_and_handler(irq, &onchip_intc, handle_level_irq); - irq_set_status_flags(irq, IRQ_LEVEL); - return 0; -} - -static const struct irq_domain_ops intc_irq_domain_ops = { - .xlate = irq_domain_xlate_onecell, - .map = sw_intc_domain_map, -}; - -#ifdef CONFIG_ACPI - -static int __init -intc_parse_madt(union acpi_subtable_headers *header, - const unsigned long end) -{ - struct acpi_madt_io_sapic *its_entry; - static struct irq_domain *root_domain; - int intc_irqs = 8, irq_base = NR_IRQS_LEGACY; - irq_hw_number_t hwirq_base = 0; - int irq_start = -1; - - its_entry = (struct acpi_madt_io_sapic *)header; - - intc_irqs -= hwirq_base; /* calculate # of irqs to allocate */ - - irq_base = irq_alloc_descs(irq_start, 16, intc_irqs, - numa_node_id()); - if (irq_base < 0) { - WARN(1, "Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n", - irq_start); - irq_base = irq_start; - } - - root_domain = irq_domain_add_legacy(NULL, intc_irqs, irq_base, - hwirq_base, &intc_irq_domain_ops, NULL); - - if (!root_domain) - pr_err("Failed to create irqdomain"); - - irq_set_default_host(root_domain); - - sw64_io_write(0, MCU_DVC_INT_EN, 0xff); - - return 0; -} - -static int __init acpi_intc_init(void) -{ - int count = 0; - - count = acpi_table_parse_madt(ACPI_MADT_TYPE_IO_SAPIC, - intc_parse_madt, 0); - - if (count <= 0) { - pr_err("No valid intc entries exist\n"); - return -EINVAL; - } - return 0; -} -#else -static int __init acpi_intc_init(void) -{ - return 0; -} -#endif - -static int __init intc_init(void) -{ - acpi_intc_init(); - - return 0; -} -subsys_initcall(intc_init); diff --git a/drivers/irqchip/irq-mbigen.c b/drivers/irqchip/irq-mbigen.c index 8729b8a6b54dcf215e0ca5d23b393879db7554d0..fc05e23938cd256906e7f73f2eb8c46c7409b534 100644 --- a/drivers/irqchip/irq-mbigen.c +++ b/drivers/irqchip/irq-mbigen.c @@ -402,7 +402,18 @@ static struct platform_driver mbigen_platform_driver = { .probe = mbigen_device_probe, }; -module_platform_driver(mbigen_platform_driver); +static int __init mbigen_init(void) +{ + return platform_driver_register(&mbigen_platform_driver); +} + +static void __exit mbigen_exit(void) +{ + platform_driver_unregister(&mbigen_platform_driver); +} + +arch_initcall(mbigen_init); +module_exit(mbigen_exit); MODULE_AUTHOR("Jun Ma "); MODULE_AUTHOR("Yun Wu "); diff --git a/drivers/irqchip/irq-nvic.c b/drivers/irqchip/irq-nvic.c index 21cb31ff2bbf25777d34c6b12ed86c6458724461..e903c44edb64ab474d35097daed4ebcbd44ba913 100644 --- a/drivers/irqchip/irq-nvic.c +++ b/drivers/irqchip/irq-nvic.c @@ -94,6 +94,7 @@ static int __init nvic_of_init(struct device_node *node, if (!nvic_irq_domain) { pr_warn("Failed to allocate irq domain\n"); + iounmap(nvic_base); return -ENOMEM; } @@ -103,6 +104,7 @@ static int __init nvic_of_init(struct device_node *node, if (ret) { pr_warn("Failed to allocate irq chips\n"); irq_domain_remove(nvic_irq_domain); + iounmap(nvic_base); return ret; } diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c index 926e55d838cb1a3d176ff605546bc0fa60f6d825..bd99ee0ae433d9d0a014831c61c3f7119409d326 100644 --- a/drivers/irqchip/irq-sifive-plic.c +++ b/drivers/irqchip/irq-sifive-plic.c @@ -400,3 +400,4 @@ static int __init plic_init(struct device_node *node, IRQCHIP_DECLARE(sifive_plic, "sifive,plic-1.0.0", plic_init); IRQCHIP_DECLARE(riscv_plic0, "riscv,plic0", plic_init); /* for legacy systems */ +IRQCHIP_DECLARE(thead_c900_plic, "thead,c900-plic", plic_init); /* for firmware driver */ diff --git a/arch/sw_64/chip/chip3/irq_chip.c b/drivers/irqchip/irq-sw64-intc-v2.c similarity index 60% rename from arch/sw_64/chip/chip3/irq_chip.c rename to drivers/irqchip/irq-sw64-intc-v2.c index ee43e87c554b5d20e33a6bd4e027edb7672bc995..8640c4aa9506c71c0c0f1209171e09e23218c145 100644 --- a/arch/sw_64/chip/chip3/irq_chip.c +++ b/drivers/irqchip/irq-sw64-intc-v2.c @@ -1,19 +1,10 @@ // SPDX-License-Identifier: GPL-2.0 -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include #include #include -#include +#include +#include +#include #include -#include static void fake_irq_mask(struct irq_data *data) { @@ -43,6 +34,64 @@ static const struct irq_domain_ops sw64_intc_domain_ops = { .map = sw64_intc_domain_map, }; +static int __init +intc_parse_madt(union acpi_subtable_headers *header, + const unsigned long end) +{ + struct acpi_madt_io_sapic *its_entry; + static struct irq_domain *root_domain; + int intc_irqs = 8, irq_base = NR_IRQS_LEGACY; + irq_hw_number_t hwirq_base = 0; + int irq_start = -1; + + its_entry = (struct acpi_madt_io_sapic *)header; + + intc_irqs -= hwirq_base; /* calculate # of irqs to allocate */ + + irq_base = irq_alloc_descs(irq_start, 16, intc_irqs, + numa_node_id()); + if (irq_base < 0) { + WARN(1, "Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n", + irq_start); + irq_base = irq_start; + } + + root_domain = irq_domain_add_legacy(NULL, intc_irqs, irq_base, + hwirq_base, &sw64_intc_domain_ops, NULL); + + if (!root_domain) + pr_err("Failed to create irqdomain"); + + irq_set_default_host(root_domain); + + sw64_io_write(0, MCU_DVC_INT_EN, 0xff); + + return 0; +} + +static int __init acpi_intc_init(void) +{ + int count = 0; + + count = acpi_table_parse_madt(ACPI_MADT_TYPE_IO_SAPIC, + intc_parse_madt, 0); + + if (count <= 0) { + pr_err("No valid intc entries exist\n"); + return -EINVAL; + } + return 0; +} + +static int __init intc_init(void) +{ + acpi_intc_init(); + + return 0; +} + +subsys_initcall(intc_init); + static struct irq_domain *root_domain; static int __init diff --git a/drivers/irqchip/irq-sw64-lpc-intc.c b/drivers/irqchip/irq-sw64-lpc-intc.c new file mode 100644 index 0000000000000000000000000000000000000000..1cbf8747824232bb62bce64432320ea50ad50451 --- /dev/null +++ b/drivers/irqchip/irq-sw64-lpc-intc.c @@ -0,0 +1,137 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include +#include +#include +#include + +#define LPC_NR_IRQS 16 +#define LPC_IRQ 0x4 +#define LPC_IRQ_MASK 0x8 + +struct lpc_intc_data { + struct irq_domain *domain; + struct irq_chip_generic *gc; +}; + +static void lpc_irq_mask_ack(struct irq_data *data) +{ + struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data); + struct irq_chip_type *ct = irq_data_get_chip_type(data); + unsigned int mask = data->mask; + + irq_gc_lock(gc); + *ct->mask_cache |= mask; + irq_reg_writel(gc, *ct->mask_cache, ct->regs.mask); + irq_reg_writel(gc, mask, ct->regs.ack); + irq_gc_unlock(gc); +} + +static void lpc_irq_handler(struct irq_desc *desc) +{ + struct lpc_intc_data *b = irq_desc_get_handler_data(desc); + struct irq_chip *chip = irq_desc_get_chip(desc); + unsigned int irq; + u32 status; + + chained_irq_enter(chip, desc); + + status = irq_reg_readl(b->gc, LPC_IRQ); + + if (status == 0) { + raw_spin_lock(&desc->lock); + handle_bad_irq(desc); + raw_spin_unlock(&desc->lock); + goto out; + } + + while (status) { + irq = __ffs(status); + status &= ~BIT(irq); + generic_handle_irq(irq_find_mapping(b->domain, irq)); + } + +out: + chained_irq_exit(chip, desc); +} + +static int __init lpc_intc_of_init(struct device_node *np, + struct device_node *parent) +{ + unsigned int set = IRQ_NOPROBE | IRQ_LEVEL; + struct lpc_intc_data *data; + struct irq_chip_type *ct; + int parent_irq, ret; + void __iomem *base; + int hwirq = 0; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + base = of_iomap(np, 0); + if (!base) { + pr_err("failed to remap lpc intc registers\n"); + ret = -ENOMEM; + goto out_free; + } + + parent_irq = irq_of_parse_and_map(np, 0); + if (!parent_irq) { + pr_err("failed to find parent interrupt\n"); + ret = -EINVAL; + goto out_unmap; + } + + data->domain = irq_domain_add_linear(np, LPC_NR_IRQS, + &irq_generic_chip_ops, NULL); + if (!data->domain) { + ret = -ENOMEM; + goto out_unmap; + } + + /* Allocate a single Generic IRQ chip for this node */ + ret = irq_alloc_domain_generic_chips(data->domain, 16, 1, np->name, + handle_level_irq, 0, set, + IRQ_GC_INIT_MASK_CACHE); + if (ret) { + pr_err("failed to allocate generic irq chip\n"); + goto out_free_domain; + } + + /* Set the IRQ chaining logic */ + irq_set_chained_handler_and_data(parent_irq, + lpc_irq_handler, data); + + data->gc = irq_get_domain_generic_chip(data->domain, 0); + data->gc->reg_base = base; + data->gc->private = data; + + ct = data->gc->chip_types; + + ct->regs.ack = LPC_IRQ; + ct->regs.mask = LPC_IRQ_MASK; + ct->chip.irq_mask = irq_gc_mask_set_bit; + ct->chip.irq_unmask = irq_gc_mask_clr_bit; + ct->chip.irq_ack = irq_gc_ack_set_bit; + ct->chip.irq_mask_ack = lpc_irq_mask_ack; + + for (hwirq = 0 ; hwirq < 16 ; hwirq++) + irq_create_mapping(data->domain, hwirq); + + /* Enable LPC interrupts */ + writel(0xffffebdd, base + LPC_IRQ_MASK); + + return 0; + +out_free_domain: + irq_domain_remove(data->domain); +out_unmap: + iounmap(base); +out_free: + kfree(data); + return ret; +} +IRQCHIP_DECLARE(sw_lpc_intc, "sw64,lpc_intc", lpc_intc_of_init); diff --git a/drivers/irqchip/qcom-pdc.c b/drivers/irqchip/qcom-pdc.c index 5dc63c20b67ea93730c27161015c4f7f0672b51f..fc747b7f498302e8b4daf0d2a6b01a28abddcb69 100644 --- a/drivers/irqchip/qcom-pdc.c +++ b/drivers/irqchip/qcom-pdc.c @@ -74,17 +74,18 @@ static int qcom_pdc_gic_set_irqchip_state(struct irq_data *d, static void pdc_enable_intr(struct irq_data *d, bool on) { int pin_out = d->hwirq; + unsigned long flags; u32 index, mask; u32 enable; index = pin_out / 32; mask = pin_out % 32; - raw_spin_lock(&pdc_lock); + raw_spin_lock_irqsave(&pdc_lock, flags); enable = pdc_reg_read(IRQ_ENABLE_BANK, index); enable = on ? ENABLE_INTR(enable, mask) : CLEAR_INTR(enable, mask); pdc_reg_write(IRQ_ENABLE_BANK, index, enable); - raw_spin_unlock(&pdc_lock); + raw_spin_unlock_irqrestore(&pdc_lock, flags); } static void qcom_pdc_gic_disable(struct irq_data *d) diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c index bd087cca1c1d2d0aca56d7479598d0c2f50c228b..af17459c1a5c02d847d801be97f72605c1870e51 100644 --- a/drivers/isdn/hardware/mISDN/hfcpci.c +++ b/drivers/isdn/hardware/mISDN/hfcpci.c @@ -2005,7 +2005,11 @@ setup_hw(struct hfc_pci *hc) } /* Allocate memory for FIFOS */ /* the memory needs to be on a 32k boundary within the first 4G */ - dma_set_mask(&hc->pdev->dev, 0xFFFF8000); + if (dma_set_mask(&hc->pdev->dev, 0xFFFF8000)) { + printk(KERN_WARNING + "HFC-PCI: No usable DMA configuration!\n"); + return -EIO; + } buffer = dma_alloc_coherent(&hc->pdev->dev, 0x8000, &hc->hw.dmahandle, GFP_KERNEL); /* We silently assume the address is okay if nonzero */ diff --git a/drivers/isdn/mISDN/dsp_pipeline.c b/drivers/isdn/mISDN/dsp_pipeline.c index 40588692cec74ed5861b32c193f238d320e8e24b..c3b2c99b5cd5ceaf12c9fc7dcd929840ec6b3870 100644 --- a/drivers/isdn/mISDN/dsp_pipeline.c +++ b/drivers/isdn/mISDN/dsp_pipeline.c @@ -17,9 +17,6 @@ #include "dsp.h" #include "dsp_hwec.h" -/* uncomment for debugging */ -/*#define PIPELINE_DEBUG*/ - struct dsp_pipeline_entry { struct mISDN_dsp_element *elem; void *p; @@ -104,10 +101,6 @@ int mISDN_dsp_element_register(struct mISDN_dsp_element *elem) } } -#ifdef PIPELINE_DEBUG - printk(KERN_DEBUG "%s: %s registered\n", __func__, elem->name); -#endif - return 0; err2: @@ -129,10 +122,6 @@ void mISDN_dsp_element_unregister(struct mISDN_dsp_element *elem) list_for_each_entry_safe(entry, n, &dsp_elements, list) if (entry->elem == elem) { device_unregister(&entry->dev); -#ifdef PIPELINE_DEBUG - printk(KERN_DEBUG "%s: %s unregistered\n", - __func__, elem->name); -#endif return; } printk(KERN_ERR "%s: element %s not in list.\n", __func__, elem->name); @@ -145,10 +134,6 @@ int dsp_pipeline_module_init(void) if (IS_ERR(elements_class)) return PTR_ERR(elements_class); -#ifdef PIPELINE_DEBUG - printk(KERN_DEBUG "%s: dsp pipeline module initialized\n", __func__); -#endif - dsp_hwec_init(); return 0; @@ -168,10 +153,6 @@ void dsp_pipeline_module_exit(void) __func__, entry->elem->name); kfree(entry); } - -#ifdef PIPELINE_DEBUG - printk(KERN_DEBUG "%s: dsp pipeline module exited\n", __func__); -#endif } int dsp_pipeline_init(struct dsp_pipeline *pipeline) @@ -181,10 +162,6 @@ int dsp_pipeline_init(struct dsp_pipeline *pipeline) INIT_LIST_HEAD(&pipeline->list); -#ifdef PIPELINE_DEBUG - printk(KERN_DEBUG "%s: dsp pipeline ready\n", __func__); -#endif - return 0; } @@ -210,16 +187,12 @@ void dsp_pipeline_destroy(struct dsp_pipeline *pipeline) return; _dsp_pipeline_destroy(pipeline); - -#ifdef PIPELINE_DEBUG - printk(KERN_DEBUG "%s: dsp pipeline destroyed\n", __func__); -#endif } int dsp_pipeline_build(struct dsp_pipeline *pipeline, const char *cfg) { - int incomplete = 0, found = 0; - char *dup, *tok, *name, *args; + int found = 0; + char *dup, *next, *tok, *name, *args; struct dsp_element_entry *entry, *n; struct dsp_pipeline_entry *pipeline_entry; struct mISDN_dsp_element *elem; @@ -230,10 +203,10 @@ int dsp_pipeline_build(struct dsp_pipeline *pipeline, const char *cfg) if (!list_empty(&pipeline->list)) _dsp_pipeline_destroy(pipeline); - dup = kstrdup(cfg, GFP_ATOMIC); + dup = next = kstrdup(cfg, GFP_ATOMIC); if (!dup) return 0; - while ((tok = strsep(&dup, "|"))) { + while ((tok = strsep(&next, "|"))) { if (!strlen(tok)) continue; name = strsep(&tok, "("); @@ -251,7 +224,6 @@ int dsp_pipeline_build(struct dsp_pipeline *pipeline, const char *cfg) printk(KERN_ERR "%s: failed to add " "entry to pipeline: %s (out of " "memory)\n", __func__, elem->name); - incomplete = 1; goto _out; } pipeline_entry->elem = elem; @@ -268,20 +240,12 @@ int dsp_pipeline_build(struct dsp_pipeline *pipeline, const char *cfg) if (pipeline_entry->p) { list_add_tail(&pipeline_entry-> list, &pipeline->list); -#ifdef PIPELINE_DEBUG - printk(KERN_DEBUG "%s: created " - "instance of %s%s%s\n", - __func__, name, args ? - " with args " : "", args ? - args : ""); -#endif } else { printk(KERN_ERR "%s: failed " "to add entry to pipeline: " "%s (new() returned NULL)\n", __func__, elem->name); kfree(pipeline_entry); - incomplete = 1; } } found = 1; @@ -290,11 +254,9 @@ int dsp_pipeline_build(struct dsp_pipeline *pipeline, const char *cfg) if (found) found = 0; - else { + else printk(KERN_ERR "%s: element not found, skipping: " "%s\n", __func__, name); - incomplete = 1; - } } _out: @@ -303,10 +265,6 @@ int dsp_pipeline_build(struct dsp_pipeline *pipeline, const char *cfg) else pipeline->inuse = 0; -#ifdef PIPELINE_DEBUG - printk(KERN_DEBUG "%s: dsp pipeline built%s: %s\n", - __func__, incomplete ? " incomplete" : "", cfg); -#endif kfree(dup); return 0; } diff --git a/drivers/mailbox/imx-mailbox.c b/drivers/mailbox/imx-mailbox.c index 2543c7b6948b6d9ce701eccdd0e6430ef629a869..c5663398c6b7d7ec49b0314d2f564e23d62dd900 100644 --- a/drivers/mailbox/imx-mailbox.c +++ b/drivers/mailbox/imx-mailbox.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #define IMX_MU_xSR_GIPn(x) BIT(28 + (3 - (x))) @@ -66,6 +67,7 @@ struct imx_mu_priv { const struct imx_mu_dcfg *dcfg; struct clk *clk; int irq; + bool suspend; u32 xcr; @@ -277,6 +279,9 @@ static irqreturn_t imx_mu_isr(int irq, void *p) return IRQ_NONE; } + if (priv->suspend) + pm_system_wakeup(); + return IRQ_HANDLED; } @@ -326,6 +331,8 @@ static int imx_mu_startup(struct mbox_chan *chan) break; } + priv->suspend = true; + return 0; } @@ -543,6 +550,8 @@ static int imx_mu_probe(struct platform_device *pdev) clk_disable_unprepare(priv->clk); + priv->suspend = false; + return 0; disable_runtime_pm: diff --git a/drivers/mailbox/tegra-hsp.c b/drivers/mailbox/tegra-hsp.c index e07091d71986a5f1ca30dfdfd38c785356e6647b..4895d8074002220ef7baa5dc1c0e6bd628cdf6db 100644 --- a/drivers/mailbox/tegra-hsp.c +++ b/drivers/mailbox/tegra-hsp.c @@ -410,6 +410,11 @@ static int tegra_hsp_mailbox_flush(struct mbox_chan *chan, value = tegra_hsp_channel_readl(ch, HSP_SM_SHRD_MBOX); if ((value & HSP_SM_SHRD_MBOX_FULL) == 0) { mbox_chan_txdone(chan, 0); + + /* Wait until channel is empty */ + if (chan->active_req != NULL) + continue; + return 0; } diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c index 8c371d5eef8eb96becfc9da08a32545552591bae..097577ae3c47177a6ec0706e106aa14e5a0b66e2 100644 --- a/drivers/md/bcache/alloc.c +++ b/drivers/md/bcache/alloc.c @@ -482,8 +482,7 @@ void bch_bucket_free(struct cache_set *c, struct bkey *k) unsigned int i; for (i = 0; i < KEY_PTRS(k); i++) - __bch_bucket_free(PTR_CACHE(c, k, i), - PTR_BUCKET(c, k, i)); + __bch_bucket_free(c->cache, PTR_BUCKET(c, k, i)); } int __bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve, @@ -674,7 +673,7 @@ bool bch_alloc_sectors(struct cache_set *c, SET_PTR_OFFSET(&b->key, i, PTR_OFFSET(&b->key, i) + sectors); atomic_long_add(sectors, - &PTR_CACHE(c, &b->key, i)->sectors_written); + &c->cache->sectors_written); } if (b->sectors_free < c->cache->sb.block_size) diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h index e8bf4f752e8beebcad27756fb533b7550ba53686..0563a40812fa5a35208b74bacf4d62ce0dc14998 100644 --- a/drivers/md/bcache/bcache.h +++ b/drivers/md/bcache/bcache.h @@ -178,7 +178,6 @@ #define pr_fmt(fmt) "bcache: %s() " fmt, __func__ -#include #include #include #include @@ -190,6 +189,7 @@ #include #include +#include "bcache_ondisk.h" #include "bset.h" #include "util.h" #include "closure.h" @@ -364,7 +364,6 @@ struct cached_dev { /* The rest of this all shows up in sysfs */ unsigned int sequential_cutoff; - unsigned int readahead; unsigned int io_disable:1; unsigned int verify:1; @@ -373,6 +372,7 @@ struct cached_dev { unsigned int partial_stripes_expensive:1; unsigned int writeback_metadata:1; unsigned int writeback_running:1; + unsigned int writeback_consider_fragment:1; unsigned char writeback_percent; unsigned int writeback_delay; @@ -385,6 +385,9 @@ struct cached_dev { unsigned int writeback_rate_update_seconds; unsigned int writeback_rate_i_term_inverse; unsigned int writeback_rate_p_term_inverse; + unsigned int writeback_rate_fp_term_low; + unsigned int writeback_rate_fp_term_mid; + unsigned int writeback_rate_fp_term_high; unsigned int writeback_rate_minimum; enum stop_on_failure stop_when_cache_set_failed; @@ -393,6 +396,13 @@ struct cached_dev { unsigned int error_limit; unsigned int offline_seconds; + /* + * Retry to update writeback_rate if contention happens for + * down_read(dc->writeback_lock) in update_writeback_rate() + */ +#define BCH_WBRATE_UPDATE_MAX_SKIPS 15 + unsigned int rate_update_retry; + char backing_dev_name[BDEVNAME_SIZE]; }; @@ -800,13 +810,6 @@ static inline sector_t bucket_remainder(struct cache_set *c, sector_t s) return s & (c->cache->sb.bucket_size - 1); } -static inline struct cache *PTR_CACHE(struct cache_set *c, - const struct bkey *k, - unsigned int ptr) -{ - return c->cache; -} - static inline size_t PTR_BUCKET_NR(struct cache_set *c, const struct bkey *k, unsigned int ptr) @@ -818,7 +821,7 @@ static inline struct bucket *PTR_BUCKET(struct cache_set *c, const struct bkey *k, unsigned int ptr) { - return PTR_CACHE(c, k, ptr)->buckets + PTR_BUCKET_NR(c, k, ptr); + return c->cache->buckets + PTR_BUCKET_NR(c, k, ptr); } static inline uint8_t gen_after(uint8_t a, uint8_t b) @@ -837,7 +840,7 @@ static inline uint8_t ptr_stale(struct cache_set *c, const struct bkey *k, static inline bool ptr_available(struct cache_set *c, const struct bkey *k, unsigned int i) { - return (PTR_DEV(k, i) < MAX_CACHES_PER_SET) && PTR_CACHE(c, k, i); + return (PTR_DEV(k, i) < MAX_CACHES_PER_SET) && c->cache; } /* Btree key macros */ diff --git a/include/uapi/linux/bcache.h b/drivers/md/bcache/bcache_ondisk.h similarity index 100% rename from include/uapi/linux/bcache.h rename to drivers/md/bcache/bcache_ondisk.h diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c index 67a2c47f4201ae03fd9d41febeb507ea623a351d..94d38e8a59b323aa576cc1b0f6ed6ef95ebef49d 100644 --- a/drivers/md/bcache/bset.c +++ b/drivers/md/bcache/bset.c @@ -712,8 +712,10 @@ void bch_bset_build_written_tree(struct btree_keys *b) for (j = inorder_next(0, t->size); j; j = inorder_next(j, t->size)) { - while (bkey_to_cacheline(t, k) < cacheline) - prev = k, k = bkey_next(k); + while (bkey_to_cacheline(t, k) < cacheline) { + prev = k; + k = bkey_next(k); + } t->prev[j] = bkey_u64s(prev); t->tree[j].m = bkey_to_cacheline_offset(t, cacheline++, k); @@ -901,8 +903,10 @@ unsigned int bch_btree_insert_key(struct btree_keys *b, struct bkey *k, status = BTREE_INSERT_STATUS_INSERT; while (m != bset_bkey_last(i) && - bkey_cmp(k, b->ops->is_extents ? &START_KEY(m) : m) > 0) - prev = m, m = bkey_next(m); + bkey_cmp(k, b->ops->is_extents ? &START_KEY(m) : m) > 0) { + prev = m; + m = bkey_next(m); + } /* prev is in the tree, if we merge we're done */ status = BTREE_INSERT_STATUS_BACK_MERGE; diff --git a/drivers/md/bcache/bset.h b/drivers/md/bcache/bset.h index a50dcfda656f5f9426eb66ac6620348aa23e2a0d..d795c84246b0184b60d98d01f798cdf36876d99b 100644 --- a/drivers/md/bcache/bset.h +++ b/drivers/md/bcache/bset.h @@ -2,10 +2,10 @@ #ifndef _BCACHE_BSET_H #define _BCACHE_BSET_H -#include #include #include +#include "bcache_ondisk.h" #include "util.h" /* for time_stats */ /* diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index fe6dce125aba226e5f7c05cad39c78b38829389d..98daa9d200f79a92603e420a5438604b91c5df69 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c @@ -426,7 +426,7 @@ void __bch_btree_node_write(struct btree *b, struct closure *parent) do_btree_node_write(b); atomic_long_add(set_blocks(i, block_bytes(b->c->cache)) * b->c->cache->sb.block_size, - &PTR_CACHE(b->c, &b->key, 0)->btree_sectors_written); + &b->c->cache->btree_sectors_written); b->written += set_blocks(i, block_bytes(b->c->cache)); } @@ -1161,7 +1161,7 @@ static void make_btree_freeing_key(struct btree *b, struct bkey *k) for (i = 0; i < KEY_PTRS(k); i++) SET_PTR_GEN(k, i, - bch_inc_gen(PTR_CACHE(b->c, &b->key, i), + bch_inc_gen(b->c->cache, PTR_BUCKET(b->c, &b->key, i))); mutex_unlock(&b->c->bucket_lock); @@ -2006,8 +2006,7 @@ int bch_btree_check(struct cache_set *c) int i; struct bkey *k = NULL; struct btree_iter iter; - struct btree_check_state *check_state; - char name[32]; + struct btree_check_state check_state; /* check and mark root node keys */ for_each_key_filter(&c->root->keys, k, &iter, bch_ptr_invalid) @@ -2018,61 +2017,59 @@ int bch_btree_check(struct cache_set *c) if (c->root->level == 0) return 0; - check_state = kzalloc(sizeof(struct btree_check_state), GFP_KERNEL); - if (!check_state) - return -ENOMEM; - - check_state->c = c; - check_state->total_threads = bch_btree_chkthread_nr(); - check_state->key_idx = 0; - spin_lock_init(&check_state->idx_lock); - atomic_set(&check_state->started, 0); - atomic_set(&check_state->enough, 0); - init_waitqueue_head(&check_state->wait); + memset(&check_state, 0, sizeof(struct btree_check_state)); + check_state.c = c; + check_state.total_threads = bch_btree_chkthread_nr(); + check_state.key_idx = 0; + spin_lock_init(&check_state.idx_lock); + atomic_set(&check_state.started, 0); + atomic_set(&check_state.enough, 0); + init_waitqueue_head(&check_state.wait); + rw_lock(0, c->root, c->root->level); /* * Run multiple threads to check btree nodes in parallel, - * if check_state->enough is non-zero, it means current + * if check_state.enough is non-zero, it means current * running check threads are enough, unncessary to create * more. */ - for (i = 0; i < check_state->total_threads; i++) { - /* fetch latest check_state->enough earlier */ + for (i = 0; i < check_state.total_threads; i++) { + /* fetch latest check_state.enough earlier */ smp_mb__before_atomic(); - if (atomic_read(&check_state->enough)) + if (atomic_read(&check_state.enough)) break; - check_state->infos[i].result = 0; - check_state->infos[i].state = check_state; - snprintf(name, sizeof(name), "bch_btrchk[%u]", i); - atomic_inc(&check_state->started); + check_state.infos[i].result = 0; + check_state.infos[i].state = &check_state; - check_state->infos[i].thread = + check_state.infos[i].thread = kthread_run(bch_btree_check_thread, - &check_state->infos[i], - name); - if (IS_ERR(check_state->infos[i].thread)) { + &check_state.infos[i], + "bch_btrchk[%d]", i); + if (IS_ERR(check_state.infos[i].thread)) { pr_err("fails to run thread bch_btrchk[%d]\n", i); for (--i; i >= 0; i--) - kthread_stop(check_state->infos[i].thread); + kthread_stop(check_state.infos[i].thread); ret = -ENOMEM; goto out; } + atomic_inc(&check_state.started); } - wait_event_interruptible(check_state->wait, - atomic_read(&check_state->started) == 0 || - test_bit(CACHE_SET_IO_DISABLE, &c->flags)); + /* + * Must wait for all threads to stop. + */ + wait_event(check_state.wait, atomic_read(&check_state.started) == 0); - for (i = 0; i < check_state->total_threads; i++) { - if (check_state->infos[i].result) { - ret = check_state->infos[i].result; + for (i = 0; i < check_state.total_threads; i++) { + if (check_state.infos[i].result) { + ret = check_state.infos[i].result; goto out; } } out: - kfree(check_state); + rw_unlock(0, c->root); return ret; } diff --git a/drivers/md/bcache/btree.h b/drivers/md/bcache/btree.h index 50482107134f12745066dfab31750693c6bba04a..1b5fdbc0d83eba863d4915ee47b3a8c27643694e 100644 --- a/drivers/md/bcache/btree.h +++ b/drivers/md/bcache/btree.h @@ -226,7 +226,7 @@ struct btree_check_info { int result; }; -#define BCH_BTR_CHKTHREAD_MAX 64 +#define BCH_BTR_CHKTHREAD_MAX 12 struct btree_check_state { struct cache_set *c; int total_threads; diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c index b00fd08d696b5f187db535ef9f97fb3e9bee6802..45e7d54a40ff72b2abc1a43ce0818adb5ec0e5da 100644 --- a/drivers/md/bcache/debug.c +++ b/drivers/md/bcache/debug.c @@ -50,7 +50,7 @@ void bch_btree_verify(struct btree *b) v->keys.ops = b->keys.ops; bio = bch_bbio_alloc(b->c); - bio_set_dev(bio, PTR_CACHE(b->c, &b->key, 0)->bdev); + bio_set_dev(bio, b->c->cache->bdev); bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0); bio->bi_iter.bi_size = KEY_SIZE(&v->key) << 9; bio->bi_opf = REQ_OP_READ | REQ_META; diff --git a/drivers/md/bcache/extents.c b/drivers/md/bcache/extents.c index f4658a1f37b862efc543c514da21958598d3ec11..d626ffcbecb99c040e3a5ee05e4ba67b525a2847 100644 --- a/drivers/md/bcache/extents.c +++ b/drivers/md/bcache/extents.c @@ -50,7 +50,7 @@ static bool __ptr_invalid(struct cache_set *c, const struct bkey *k) for (i = 0; i < KEY_PTRS(k); i++) if (ptr_available(c, k, i)) { - struct cache *ca = PTR_CACHE(c, k, i); + struct cache *ca = c->cache; size_t bucket = PTR_BUCKET_NR(c, k, i); size_t r = bucket_remainder(c, PTR_OFFSET(k, i)); @@ -71,7 +71,7 @@ static const char *bch_ptr_status(struct cache_set *c, const struct bkey *k) for (i = 0; i < KEY_PTRS(k); i++) if (ptr_available(c, k, i)) { - struct cache *ca = PTR_CACHE(c, k, i); + struct cache *ca = c->cache; size_t bucket = PTR_BUCKET_NR(c, k, i); size_t r = bucket_remainder(c, PTR_OFFSET(k, i)); diff --git a/drivers/md/bcache/features.c b/drivers/md/bcache/features.c index d636b7b2d070c49608aeb910096edfb32bcb4469..634922c5601db7d9f836ea1a49568db1b9876861 100644 --- a/drivers/md/bcache/features.c +++ b/drivers/md/bcache/features.c @@ -6,7 +6,7 @@ * Copyright 2020 Coly Li * */ -#include +#include "bcache_ondisk.h" #include "bcache.h" #include "features.h" @@ -19,7 +19,7 @@ struct feature { static struct feature feature_list[] = { {BCH_FEATURE_INCOMPAT, BCH_FEATURE_INCOMPAT_LOG_LARGE_BUCKET_SIZE, "large_bucket"}, - {0, 0, 0 }, + {0, 0, NULL }, }; #define compose_feature_string(type) \ diff --git a/drivers/md/bcache/features.h b/drivers/md/bcache/features.h index d1c8fd3977fc64461215673c18e67ef9f937d03d..09161b89c63edf993b2a8ac815590fb414b92ef1 100644 --- a/drivers/md/bcache/features.h +++ b/drivers/md/bcache/features.h @@ -2,10 +2,11 @@ #ifndef _BCACHE_FEATURES_H #define _BCACHE_FEATURES_H -#include #include #include +#include "bcache_ondisk.h" + #define BCH_FEATURE_COMPAT 0 #define BCH_FEATURE_RO_COMPAT 1 #define BCH_FEATURE_INCOMPAT 2 diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c index dad71a6b78891c149c428d9ebc1758c5b9c357de..e4388fe3ab7ef96ead612d775ce64270ec9ea318 100644 --- a/drivers/md/bcache/io.c +++ b/drivers/md/bcache/io.c @@ -36,7 +36,7 @@ void __bch_submit_bbio(struct bio *bio, struct cache_set *c) struct bbio *b = container_of(bio, struct bbio, bio); bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0); - bio_set_dev(bio, PTR_CACHE(c, &b->key, 0)->bdev); + bio_set_dev(bio, c->cache->bdev); b->submit_time_us = local_clock_us(); closure_bio_submit(c, bio, bio->bi_private); @@ -137,7 +137,7 @@ void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio, blk_status_t error, const char *m) { struct bbio *b = container_of(bio, struct bbio, bio); - struct cache *ca = PTR_CACHE(c, &b->key, 0); + struct cache *ca = c->cache; int is_read = (bio_data_dir(bio) == READ ? 1 : 0); unsigned int threshold = op_is_write(bio_op(bio)) diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c index c6613e817333765aa240053f381a374a7d8f1db8..346a92c43858224dc6ce7ed9216936fe8e915333 100644 --- a/drivers/md/bcache/journal.c +++ b/drivers/md/bcache/journal.c @@ -111,7 +111,7 @@ reread: left = ca->sb.bucket_size - offset; * Check from the oldest jset for last_seq. If * i->j.seq < j->last_seq, it means the oldest jset * in list is expired and useless, remove it from - * this list. Otherwise, j is a condidate jset for + * this list. Otherwise, j is a candidate jset for * further following checks. */ while (!list_empty(list)) { @@ -407,6 +407,11 @@ int bch_journal_replay(struct cache_set *s, struct list_head *list) return ret; } +void bch_journal_space_reserve(struct journal *j) +{ + j->do_reserve = true; +} + /* Journalling */ static void btree_flush_write(struct cache_set *c) @@ -498,7 +503,7 @@ static void btree_flush_write(struct cache_set *c) * - If there are matched nodes recorded in btree_nodes[], * they are clean now (this is why and how the oldest * journal entry can be reclaimed). These selected nodes - * will be ignored and skipped in the folowing for-loop. + * will be ignored and skipped in the following for-loop. */ if (((btree_current_write(b)->journal - fifo_front_p) & mask) != 0) { @@ -625,12 +630,30 @@ static void do_journal_discard(struct cache *ca) } } +static unsigned int free_journal_buckets(struct cache_set *c) +{ + struct journal *j = &c->journal; + struct cache *ca = c->cache; + struct journal_device *ja = &c->cache->journal; + unsigned int n; + + /* In case njournal_buckets is not power of 2 */ + if (ja->cur_idx >= ja->discard_idx) + n = ca->sb.njournal_buckets + ja->discard_idx - ja->cur_idx; + else + n = ja->discard_idx - ja->cur_idx; + + if (n > (1 + j->do_reserve)) + return n - (1 + j->do_reserve); + + return 0; +} + static void journal_reclaim(struct cache_set *c) { struct bkey *k = &c->journal.key; struct cache *ca = c->cache; uint64_t last_seq; - unsigned int next; struct journal_device *ja = &ca->journal; atomic_t p __maybe_unused; @@ -653,12 +676,10 @@ static void journal_reclaim(struct cache_set *c) if (c->journal.blocks_free) goto out; - next = (ja->cur_idx + 1) % ca->sb.njournal_buckets; - /* No space available on this device */ - if (next == ja->discard_idx) + if (!free_journal_buckets(c)) goto out; - ja->cur_idx = next; + ja->cur_idx = (ja->cur_idx + 1) % ca->sb.njournal_buckets; k->ptr[0] = MAKE_PTR(0, bucket_to_sector(c, ca->sb.d[ja->cur_idx]), ca->sb.nr_this_dev); @@ -768,7 +789,7 @@ static void journal_write_unlocked(struct closure *cl) w->data->csum = csum_set(w->data); for (i = 0; i < KEY_PTRS(k); i++) { - ca = PTR_CACHE(c, k, i); + ca = c->cache; bio = &ca->journal.bio; atomic_long_add(sectors, &ca->meta_sectors_written); diff --git a/drivers/md/bcache/journal.h b/drivers/md/bcache/journal.h index f2ea34d5f431ba860904d074fca567e772e9daa9..cd316b4a1e95f5b2e5ee260599e44d35fe25a805 100644 --- a/drivers/md/bcache/journal.h +++ b/drivers/md/bcache/journal.h @@ -105,6 +105,7 @@ struct journal { spinlock_t lock; spinlock_t flush_write_lock; bool btree_flushing; + bool do_reserve; /* used when waiting because the journal was full */ struct closure_waitlist wait; struct closure io; @@ -182,5 +183,6 @@ int bch_journal_replay(struct cache_set *c, struct list_head *list); void bch_journal_free(struct cache_set *c); int bch_journal_alloc(struct cache_set *c); +void bch_journal_space_reserve(struct journal *j); #endif /* _BCACHE_JOURNAL_H */ diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index 2143263831456217af9cd1676dda4a79722c8d7e..c1a1bd7aa9ec47e2a0f6241356240a5ae132de85 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c @@ -878,9 +878,9 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s, struct bio *bio, unsigned int sectors) { int ret = MAP_CONTINUE; - unsigned int reada = 0; struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); struct bio *miss, *cache_bio; + unsigned int size_limit; s->cache_missed = 1; @@ -890,13 +890,10 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s, goto out_submit; } - if (!(bio->bi_opf & REQ_RAHEAD) && - !(bio->bi_opf & (REQ_META|REQ_PRIO)) && - s->iop.c->gc_stats.in_use < CUTOFF_CACHE_READA) - reada = min_t(sector_t, dc->readahead >> 9, - get_capacity(bio->bi_disk) - bio_end_sector(bio)); - - s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada); + /* Limitation for valid replace key size and cache_bio bvecs number */ + size_limit = min_t(unsigned int, BIO_MAX_PAGES * PAGE_SECTORS, + (1 << KEY_SIZE_BITS) - 1); + s->insert_bio_sectors = min3(size_limit, sectors, bio_sectors(bio)); s->iop.replace_key = KEY(s->iop.inode, bio->bi_iter.bi_sector + s->insert_bio_sectors, @@ -908,7 +905,8 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s, s->iop.replace = true; - miss = bio_next_split(bio, sectors, GFP_NOIO, &s->d->bio_split); + miss = bio_next_split(bio, s->insert_bio_sectors, GFP_NOIO, + &s->d->bio_split); /* btree_search_recurse()'s btree iterator is no good anymore */ ret = miss == bio ? MAP_DONE : -EINTR; @@ -930,9 +928,6 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s, if (bch_bio_alloc_pages(cache_bio, __GFP_NOWARN|GFP_NOIO)) goto out_put; - if (reada) - bch_mark_cache_readahead(s->iop.c, s->d); - s->cache_miss = miss; s->iop.bio = cache_bio; bio_get(cache_bio); @@ -1109,6 +1104,12 @@ static void detached_dev_do_request(struct bcache_device *d, struct bio *bio) * which would call closure_get(&dc->disk.cl) */ ddip = kzalloc(sizeof(struct detached_dev_io_private), GFP_NOIO); + if (!ddip) { + bio->bi_status = BLK_STS_RESOURCE; + bio->bi_end_io(bio); + return; + } + ddip->d = d; /* Count on the bcache device */ ddip->start_time = part_start_io_acct(d->disk, &ddip->part, bio); diff --git a/drivers/md/bcache/stats.c b/drivers/md/bcache/stats.c index 503aafe188dce4b6b1ca4a9361ec0a3f637967f6..68b02216033d039b76be143ced490f65f9dc6565 100644 --- a/drivers/md/bcache/stats.c +++ b/drivers/md/bcache/stats.c @@ -46,7 +46,6 @@ read_attribute(cache_misses); read_attribute(cache_bypass_hits); read_attribute(cache_bypass_misses); read_attribute(cache_hit_ratio); -read_attribute(cache_readaheads); read_attribute(cache_miss_collisions); read_attribute(bypassed); @@ -64,7 +63,6 @@ SHOW(bch_stats) DIV_SAFE(var(cache_hits) * 100, var(cache_hits) + var(cache_misses))); - var_print(cache_readaheads); var_print(cache_miss_collisions); sysfs_hprint(bypassed, var(sectors_bypassed) << 9); #undef var @@ -80,17 +78,17 @@ static void bch_stats_release(struct kobject *k) { } -static struct attribute *bch_stats_files[] = { +static struct attribute *bch_stats_attrs[] = { &sysfs_cache_hits, &sysfs_cache_misses, &sysfs_cache_bypass_hits, &sysfs_cache_bypass_misses, &sysfs_cache_hit_ratio, - &sysfs_cache_readaheads, &sysfs_cache_miss_collisions, &sysfs_bypassed, NULL }; +ATTRIBUTE_GROUPS(bch_stats); static KTYPE(bch_stats); int bch_cache_accounting_add_kobjs(struct cache_accounting *acc, @@ -113,7 +111,6 @@ void bch_cache_accounting_clear(struct cache_accounting *acc) acc->total.cache_misses = 0; acc->total.cache_bypass_hits = 0; acc->total.cache_bypass_misses = 0; - acc->total.cache_readaheads = 0; acc->total.cache_miss_collisions = 0; acc->total.sectors_bypassed = 0; } @@ -145,7 +142,6 @@ static void scale_stats(struct cache_stats *stats, unsigned long rescale_at) scale_stat(&stats->cache_misses); scale_stat(&stats->cache_bypass_hits); scale_stat(&stats->cache_bypass_misses); - scale_stat(&stats->cache_readaheads); scale_stat(&stats->cache_miss_collisions); scale_stat(&stats->sectors_bypassed); } @@ -168,7 +164,6 @@ static void scale_accounting(struct timer_list *t) move_stat(cache_misses); move_stat(cache_bypass_hits); move_stat(cache_bypass_misses); - move_stat(cache_readaheads); move_stat(cache_miss_collisions); move_stat(sectors_bypassed); @@ -209,14 +204,6 @@ void bch_mark_cache_accounting(struct cache_set *c, struct bcache_device *d, mark_cache_stats(&c->accounting.collector, hit, bypass); } -void bch_mark_cache_readahead(struct cache_set *c, struct bcache_device *d) -{ - struct cached_dev *dc = container_of(d, struct cached_dev, disk); - - atomic_inc(&dc->accounting.collector.cache_readaheads); - atomic_inc(&c->accounting.collector.cache_readaheads); -} - void bch_mark_cache_miss_collision(struct cache_set *c, struct bcache_device *d) { struct cached_dev *dc = container_of(d, struct cached_dev, disk); diff --git a/drivers/md/bcache/stats.h b/drivers/md/bcache/stats.h index abfaabf7e7fcf8fee33c8370074551bf5fc91768..ca4f435f7216a7231cf9e1dd4ee8cae56ea93841 100644 --- a/drivers/md/bcache/stats.h +++ b/drivers/md/bcache/stats.h @@ -7,7 +7,6 @@ struct cache_stat_collector { atomic_t cache_misses; atomic_t cache_bypass_hits; atomic_t cache_bypass_misses; - atomic_t cache_readaheads; atomic_t cache_miss_collisions; atomic_t sectors_bypassed; }; diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 81f1cc5b34999578b06020875cb2cca30f12e3fb..b5601f200c090ca32a5819c2f8fd74e9e3e67798 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -1058,6 +1058,7 @@ static int cached_dev_status_update(void *arg) int bch_cached_dev_run(struct cached_dev *dc) { + int ret = 0; struct bcache_device *d = &dc->disk; char *buf = kmemdup_nul(dc->sb.label, SB_LABEL_SIZE, GFP_KERNEL); char *env[] = { @@ -1070,19 +1071,15 @@ int bch_cached_dev_run(struct cached_dev *dc) if (dc->io_disable) { pr_err("I/O disabled on cached dev %s\n", dc->backing_dev_name); - kfree(env[1]); - kfree(env[2]); - kfree(buf); - return -EIO; + ret = -EIO; + goto out; } if (atomic_xchg(&dc->running, 1)) { - kfree(env[1]); - kfree(env[2]); - kfree(buf); pr_info("cached dev %s is running already\n", dc->backing_dev_name); - return -EBUSY; + ret = -EBUSY; + goto out; } if (!d->c && @@ -1103,15 +1100,13 @@ int bch_cached_dev_run(struct cached_dev *dc) * only class / kset properties are persistent */ kobject_uevent_env(&disk_to_dev(d->disk)->kobj, KOBJ_CHANGE, env); - kfree(env[1]); - kfree(env[2]); - kfree(buf); if (sysfs_create_link(&d->kobj, &disk_to_dev(d->disk)->kobj, "dev") || sysfs_create_link(&disk_to_dev(d->disk)->kobj, &d->kobj, "bcache")) { pr_err("Couldn't create bcache dev <-> disk sysfs symlinks\n"); - return -ENOMEM; + ret = -ENOMEM; + goto out; } dc->status_update_thread = kthread_run(cached_dev_status_update, @@ -1120,7 +1115,11 @@ int bch_cached_dev_run(struct cached_dev *dc) pr_warn("failed to create bcache_status_update kthread, continue to run without monitoring backing device status\n"); } - return 0; +out: + kfree(env[1]); + kfree(env[2]); + kfree(buf); + return ret; } /* @@ -1151,9 +1150,7 @@ static void cancel_writeback_rate_update_dwork(struct cached_dev *dc) static void cached_dev_detach_finish(struct work_struct *w) { struct cached_dev *dc = container_of(w, struct cached_dev, detach); - struct closure cl; - - closure_init_stack(&cl); + struct cache_set *c = dc->disk.c; BUG_ON(!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)); BUG_ON(refcount_read(&dc->count)); @@ -1167,17 +1164,11 @@ static void cached_dev_detach_finish(struct work_struct *w) dc->writeback_thread = NULL; } - memset(&dc->sb.set_uuid, 0, 16); - SET_BDEV_STATE(&dc->sb, BDEV_STATE_NONE); - - bch_write_bdev_super(dc, &cl); - closure_sync(&cl); - mutex_lock(&bch_register_lock); - calc_cached_dev_sectors(dc->disk.c); bcache_device_detach(&dc->disk); list_move(&dc->list, &uncached_devices); + calc_cached_dev_sectors(c); clear_bit(BCACHE_DEV_DETACHING, &dc->disk.flags); clear_bit(BCACHE_DEV_UNLINK_DONE, &dc->disk.flags); @@ -1956,7 +1947,7 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb) goto err; if (bioset_init(&c->bio_split, 4, offsetof(struct bbio, bio), - BIOSET_NEED_BVECS|BIOSET_NEED_RESCUER)) + BIOSET_NEED_RESCUER)) goto err; c->uuids = alloc_meta_bucket_pages(GFP_KERNEL, sb); @@ -2150,6 +2141,7 @@ static int run_cache_set(struct cache_set *c) flash_devs_run(c); + bch_journal_space_reserve(&c->journal); set_bit(CACHE_SET_RUNNING, &c->flags); return 0; err: @@ -2536,7 +2528,7 @@ static void register_cache_worker(struct work_struct *work) module_put(THIS_MODULE); } -static void register_device_aync(struct async_reg_args *args) +static void register_device_async(struct async_reg_args *args) { if (SB_IS_BDEV(args->sb)) INIT_DELAYED_WORK(&args->reg_work, register_bdev_worker); @@ -2597,8 +2589,6 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, else err = "device busy"; mutex_unlock(&bch_register_lock); - if (!IS_ERR(bdev)) - bdput(bdev); if (attr == &ksysfs_register_quiet) goto done; } @@ -2630,7 +2620,7 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, args->sb = sb; args->sb_disk = sb_disk; args->bdev = bdev; - register_device_aync(args); + register_device_async(args); /* No wait and returns to user space */ goto async_done; } @@ -2638,8 +2628,11 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, if (SB_IS_BDEV(sb)) { struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL); - if (!dc) + if (!dc) { + ret = -ENOMEM; + err = "cannot allocate memory"; goto out_put_sb_page; + } mutex_lock(&bch_register_lock); ret = register_bdev(sb, sb_disk, bdev, dc); @@ -2650,11 +2643,15 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, } else { struct cache *ca = kzalloc(sizeof(*ca), GFP_KERNEL); - if (!ca) + if (!ca) { + ret = -ENOMEM; + err = "cannot allocate memory"; goto out_put_sb_page; + } /* blkdev_put() will be called in bch_cache_release() */ - if (register_cache(sb, sb_disk, bdev, ca) != 0) + ret = register_cache(sb, sb_disk, bdev, ca); + if (ret) goto out_free_sb; } @@ -2708,8 +2705,8 @@ static ssize_t bch_pending_bdevs_cleanup(struct kobject *k, } list_for_each_entry_safe(pdev, tpdev, &pending_devs, list) { + char *pdev_set_uuid = pdev->dc->sb.set_uuid; list_for_each_entry_safe(c, tc, &bch_cache_sets, list) { - char *pdev_set_uuid = pdev->dc->sb.set_uuid; char *set_uuid = c->set_uuid; if (!memcmp(pdev_set_uuid, set_uuid, 16)) { @@ -2771,7 +2768,7 @@ static int bcache_reboot(struct notifier_block *n, unsigned long code, void *x) * The reason bch_register_lock is not held to call * bch_cache_set_stop() and bcache_device_stop() is to * avoid potential deadlock during reboot, because cache - * set or bcache device stopping process will acqurie + * set or bcache device stopping process will acquire * bch_register_lock too. * * We are safe here because bcache_is_reboot sets to diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c index 554e3afc9b688be1b3415d7fb1fdf407f5c192b0..8467e37411a7c65a41d0abd6f0f476a0b39a3b46 100644 --- a/drivers/md/bcache/sysfs.c +++ b/drivers/md/bcache/sysfs.c @@ -117,10 +117,14 @@ rw_attribute(writeback_running); rw_attribute(writeback_percent); rw_attribute(writeback_delay); rw_attribute(writeback_rate); +rw_attribute(writeback_consider_fragment); rw_attribute(writeback_rate_update_seconds); rw_attribute(writeback_rate_i_term_inverse); rw_attribute(writeback_rate_p_term_inverse); +rw_attribute(writeback_rate_fp_term_low); +rw_attribute(writeback_rate_fp_term_mid); +rw_attribute(writeback_rate_fp_term_high); rw_attribute(writeback_rate_minimum); read_attribute(writeback_rate_debug); @@ -133,7 +137,6 @@ rw_attribute(io_disable); rw_attribute(discard); rw_attribute(running); rw_attribute(label); -rw_attribute(readahead); rw_attribute(errors); rw_attribute(io_error_limit); rw_attribute(io_error_halflife); @@ -195,6 +198,7 @@ SHOW(__bch_cached_dev) var_printf(bypass_torture_test, "%i"); var_printf(writeback_metadata, "%i"); var_printf(writeback_running, "%i"); + var_printf(writeback_consider_fragment, "%i"); var_print(writeback_delay); var_print(writeback_percent); sysfs_hprint(writeback_rate, @@ -205,6 +209,9 @@ SHOW(__bch_cached_dev) var_print(writeback_rate_update_seconds); var_print(writeback_rate_i_term_inverse); var_print(writeback_rate_p_term_inverse); + var_print(writeback_rate_fp_term_low); + var_print(writeback_rate_fp_term_mid); + var_print(writeback_rate_fp_term_high); var_print(writeback_rate_minimum); if (attr == &sysfs_writeback_rate_debug) { @@ -252,7 +259,6 @@ SHOW(__bch_cached_dev) var_printf(partial_stripes_expensive, "%u"); var_hprint(sequential_cutoff); - var_hprint(readahead); sysfs_print(running, atomic_read(&dc->running)); sysfs_print(state, states[BDEV_STATE(&dc->sb)]); @@ -303,6 +309,7 @@ STORE(__cached_dev) sysfs_strtoul_bool(bypass_torture_test, dc->bypass_torture_test); sysfs_strtoul_bool(writeback_metadata, dc->writeback_metadata); sysfs_strtoul_bool(writeback_running, dc->writeback_running); + sysfs_strtoul_bool(writeback_consider_fragment, dc->writeback_consider_fragment); sysfs_strtoul_clamp(writeback_delay, dc->writeback_delay, 0, UINT_MAX); sysfs_strtoul_clamp(writeback_percent, dc->writeback_percent, @@ -331,6 +338,16 @@ STORE(__cached_dev) sysfs_strtoul_clamp(writeback_rate_p_term_inverse, dc->writeback_rate_p_term_inverse, 1, UINT_MAX); + sysfs_strtoul_clamp(writeback_rate_fp_term_low, + dc->writeback_rate_fp_term_low, + 1, dc->writeback_rate_fp_term_mid - 1); + sysfs_strtoul_clamp(writeback_rate_fp_term_mid, + dc->writeback_rate_fp_term_mid, + dc->writeback_rate_fp_term_low + 1, + dc->writeback_rate_fp_term_high - 1); + sysfs_strtoul_clamp(writeback_rate_fp_term_high, + dc->writeback_rate_fp_term_high, + dc->writeback_rate_fp_term_mid + 1, UINT_MAX); sysfs_strtoul_clamp(writeback_rate_minimum, dc->writeback_rate_minimum, 1, UINT_MAX); @@ -346,7 +363,6 @@ STORE(__cached_dev) sysfs_strtoul_clamp(sequential_cutoff, dc->sequential_cutoff, 0, UINT_MAX); - d_strtoi_h(readahead); if (attr == &sysfs_clear_stats) bch_cache_accounting_clear(&dc->accounting); @@ -404,7 +420,7 @@ STORE(__cached_dev) if (!env) return -ENOMEM; add_uevent_var(env, "DRIVER=bcache"); - add_uevent_var(env, "CACHED_UUID=%pU", dc->sb.uuid), + add_uevent_var(env, "CACHED_UUID=%pU", dc->sb.uuid); add_uevent_var(env, "CACHED_LABEL=%s", buf); kobject_uevent_env(&disk_to_dev(dc->disk.disk)->kobj, KOBJ_CHANGE, @@ -484,7 +500,7 @@ STORE(bch_cached_dev) return size; } -static struct attribute *bch_cached_dev_files[] = { +static struct attribute *bch_cached_dev_attrs[] = { &sysfs_attach, &sysfs_detach, &sysfs_stop, @@ -499,9 +515,13 @@ static struct attribute *bch_cached_dev_files[] = { &sysfs_writeback_delay, &sysfs_writeback_percent, &sysfs_writeback_rate, + &sysfs_writeback_consider_fragment, &sysfs_writeback_rate_update_seconds, &sysfs_writeback_rate_i_term_inverse, &sysfs_writeback_rate_p_term_inverse, + &sysfs_writeback_rate_fp_term_low, + &sysfs_writeback_rate_fp_term_mid, + &sysfs_writeback_rate_fp_term_high, &sysfs_writeback_rate_minimum, &sysfs_writeback_rate_debug, &sysfs_io_errors, @@ -515,7 +535,6 @@ static struct attribute *bch_cached_dev_files[] = { &sysfs_running, &sysfs_state, &sysfs_label, - &sysfs_readahead, #ifdef CONFIG_BCACHE_DEBUG &sysfs_verify, &sysfs_bypass_torture_test, @@ -524,6 +543,7 @@ static struct attribute *bch_cached_dev_files[] = { &sysfs_backing_dev_uuid, NULL }; +ATTRIBUTE_GROUPS(bch_cached_dev); KTYPE(bch_cached_dev); SHOW(bch_flash_dev) @@ -581,7 +601,7 @@ STORE(__bch_flash_dev) } STORE_LOCKED(bch_flash_dev) -static struct attribute *bch_flash_dev_files[] = { +static struct attribute *bch_flash_dev_attrs[] = { &sysfs_unregister, #if 0 &sysfs_data_csum, @@ -590,6 +610,7 @@ static struct attribute *bch_flash_dev_files[] = { &sysfs_size, NULL }; +ATTRIBUTE_GROUPS(bch_flash_dev); KTYPE(bch_flash_dev); struct bset_stats_op { @@ -936,7 +957,7 @@ static void bch_cache_set_internal_release(struct kobject *k) { } -static struct attribute *bch_cache_set_files[] = { +static struct attribute *bch_cache_set_attrs[] = { &sysfs_unregister, &sysfs_stop, &sysfs_synchronous, @@ -961,9 +982,10 @@ static struct attribute *bch_cache_set_files[] = { &sysfs_clear_stats, NULL }; +ATTRIBUTE_GROUPS(bch_cache_set); KTYPE(bch_cache_set); -static struct attribute *bch_cache_set_internal_files[] = { +static struct attribute *bch_cache_set_internal_attrs[] = { &sysfs_active_journal_entries, sysfs_time_stats_attribute_list(btree_gc, sec, ms) @@ -1003,6 +1025,7 @@ static struct attribute *bch_cache_set_internal_files[] = { &sysfs_feature_incompat, NULL }; +ATTRIBUTE_GROUPS(bch_cache_set_internal); KTYPE(bch_cache_set_internal); static int __bch_cache_cmp(const void *l, const void *r) @@ -1071,8 +1094,10 @@ SHOW(__bch_cache) --n; while (cached < p + n && - *cached == BTREE_PRIO) - cached++, n--; + *cached == BTREE_PRIO) { + cached++; + n--; + } for (i = 0; i < n; i++) sum += INITIAL_PRIO - cached[i]; @@ -1161,7 +1186,7 @@ STORE(__bch_cache) } STORE_LOCKED(bch_cache) -static struct attribute *bch_cache_files[] = { +static struct attribute *bch_cache_attrs[] = { &sysfs_bucket_size, &sysfs_block_size, &sysfs_nbuckets, @@ -1175,4 +1200,5 @@ static struct attribute *bch_cache_files[] = { &sysfs_cache_replacement_policy, NULL }; +ATTRIBUTE_GROUPS(bch_cache); KTYPE(bch_cache); diff --git a/drivers/md/bcache/sysfs.h b/drivers/md/bcache/sysfs.h index 215df32f567b9143c192cc2d67b63a5831b150e7..a2ff6447b699f595f6b7a1b0e1fe2504972b067c 100644 --- a/drivers/md/bcache/sysfs.h +++ b/drivers/md/bcache/sysfs.h @@ -9,7 +9,7 @@ struct kobj_type type ## _ktype = { \ .show = type ## _show, \ .store = type ## _store \ }), \ - .default_attrs = type ## _files \ + .default_groups = type ## _groups \ } #define SHOW(fn) \ @@ -51,13 +51,27 @@ STORE(fn) \ #define sysfs_printf(file, fmt, ...) \ do { \ if (attr == &sysfs_ ## file) \ - return snprintf(buf, PAGE_SIZE, fmt "\n", __VA_ARGS__); \ + return sysfs_emit(buf, fmt "\n", __VA_ARGS__); \ } while (0) #define sysfs_print(file, var) \ do { \ if (attr == &sysfs_ ## file) \ - return snprint(buf, PAGE_SIZE, var); \ + return sysfs_emit(buf, \ + __builtin_types_compatible_p(typeof(var), int) \ + ? "%i\n" : \ + __builtin_types_compatible_p(typeof(var), unsigned int) \ + ? "%u\n" : \ + __builtin_types_compatible_p(typeof(var), long) \ + ? "%li\n" : \ + __builtin_types_compatible_p(typeof(var), unsigned long)\ + ? "%lu\n" : \ + __builtin_types_compatible_p(typeof(var), int64_t) \ + ? "%lli\n" : \ + __builtin_types_compatible_p(typeof(var), uint64_t) \ + ? "%llu\n" : \ + __builtin_types_compatible_p(typeof(var), const char *) \ + ? "%s\n" : "%i\n", var); \ } while (0) #define sysfs_hprint(file, val) \ diff --git a/drivers/md/bcache/util.h b/drivers/md/bcache/util.h index c029f744319080570704de408d0cbcfff2c788e5..97c524679c8ad87ac5530cc390c17d83966a5dad 100644 --- a/drivers/md/bcache/util.h +++ b/drivers/md/bcache/util.h @@ -27,7 +27,7 @@ struct closure; #else /* DEBUG */ -#define EBUG_ON(cond) do { if (cond); } while (0) +#define EBUG_ON(cond) do { if (cond) do {} while (0); } while (0) #define atomic_dec_bug(v) atomic_dec(v) #define atomic_inc_bug(v, i) atomic_inc(v) @@ -342,23 +342,6 @@ static inline int bch_strtoul_h(const char *cp, long *res) _r; \ }) -#define snprint(buf, size, var) \ - snprintf(buf, size, \ - __builtin_types_compatible_p(typeof(var), int) \ - ? "%i\n" : \ - __builtin_types_compatible_p(typeof(var), unsigned int) \ - ? "%u\n" : \ - __builtin_types_compatible_p(typeof(var), long) \ - ? "%li\n" : \ - __builtin_types_compatible_p(typeof(var), unsigned long)\ - ? "%lu\n" : \ - __builtin_types_compatible_p(typeof(var), int64_t) \ - ? "%lli\n" : \ - __builtin_types_compatible_p(typeof(var), uint64_t) \ - ? "%llu\n" : \ - __builtin_types_compatible_p(typeof(var), const char *) \ - ? "%s\n" : "%i\n", var) - ssize_t bch_hprint(char *buf, int64_t v); bool bch_is_zero(const char *p, size_t n); diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c index 3c74996978dadef2642c3ac716abb4c46abda3c9..3e879e98537343f7db2e2633f511e84dffee048a 100644 --- a/drivers/md/bcache/writeback.c +++ b/drivers/md/bcache/writeback.c @@ -88,6 +88,44 @@ static void __update_writeback_rate(struct cached_dev *dc) int64_t integral_scaled; uint32_t new_rate; + /* + * We need to consider the number of dirty buckets as well + * when calculating the proportional_scaled, Otherwise we might + * have an unreasonable small writeback rate at a highly fragmented situation + * when very few dirty sectors consumed a lot dirty buckets, the + * worst case is when dirty buckets reached cutoff_writeback_sync and + * dirty data is still not even reached to writeback percent, so the rate + * still will be at the minimum value, which will cause the write + * stuck at a non-writeback mode. + */ + struct cache_set *c = dc->disk.c; + + int64_t dirty_buckets = c->nbuckets - c->avail_nbuckets; + + if (dc->writeback_consider_fragment && + c->gc_stats.in_use > BCH_WRITEBACK_FRAGMENT_THRESHOLD_LOW && dirty > 0) { + int64_t fragment = + div_s64((dirty_buckets * c->cache->sb.bucket_size), dirty); + int64_t fp_term; + int64_t fps; + + if (c->gc_stats.in_use <= BCH_WRITEBACK_FRAGMENT_THRESHOLD_MID) { + fp_term = (int64_t)dc->writeback_rate_fp_term_low * + (c->gc_stats.in_use - BCH_WRITEBACK_FRAGMENT_THRESHOLD_LOW); + } else if (c->gc_stats.in_use <= BCH_WRITEBACK_FRAGMENT_THRESHOLD_HIGH) { + fp_term = (int64_t)dc->writeback_rate_fp_term_mid * + (c->gc_stats.in_use - BCH_WRITEBACK_FRAGMENT_THRESHOLD_MID); + } else { + fp_term = (int64_t)dc->writeback_rate_fp_term_high * + (c->gc_stats.in_use - BCH_WRITEBACK_FRAGMENT_THRESHOLD_HIGH); + } + fps = div_s64(dirty, dirty_buckets) * fp_term; + if (fragment > 3 && fps > proportional_scaled) { + /* Only overrite the p when fragment > 3 */ + proportional_scaled = fps; + } + } + if ((error < 0 && dc->writeback_rate_integral > 0) || (error > 0 && time_before64(local_clock(), dc->writeback_rate.next + NSEC_PER_MSEC))) { @@ -197,19 +235,27 @@ static void update_writeback_rate(struct work_struct *work) return; } - if (atomic_read(&dc->has_dirty) && dc->writeback_percent) { - /* - * If the whole cache set is idle, set_at_max_writeback_rate() - * will set writeback rate to a max number. Then it is - * unncessary to update writeback rate for an idle cache set - * in maximum writeback rate number(s). - */ - if (!set_at_max_writeback_rate(c, dc)) { - down_read(&dc->writeback_lock); + /* + * If the whole cache set is idle, set_at_max_writeback_rate() + * will set writeback rate to a max number. Then it is + * unncessary to update writeback rate for an idle cache set + * in maximum writeback rate number(s). + */ + if (atomic_read(&dc->has_dirty) && dc->writeback_percent && + !set_at_max_writeback_rate(c, dc)) { + do { + if (!down_read_trylock((&dc->writeback_lock))) { + dc->rate_update_retry++; + if (dc->rate_update_retry <= + BCH_WBRATE_UPDATE_MAX_SKIPS) + break; + down_read(&dc->writeback_lock); + dc->rate_update_retry = 0; + } __update_writeback_rate(dc); update_gc_after_writeback(c); up_read(&dc->writeback_lock); - } + } while (0); } @@ -378,7 +424,7 @@ static void read_dirty_endio(struct bio *bio) struct dirty_io *io = w->private; /* is_read = 1 */ - bch_count_io_errors(PTR_CACHE(io->dc->disk.c, &w->key, 0), + bch_count_io_errors(io->dc->disk.c->cache, bio->bi_status, 1, "reading dirty data from cache"); @@ -472,8 +518,7 @@ static void read_dirty(struct cached_dev *dc) dirty_init(w); bio_set_op_attrs(&io->bio, REQ_OP_READ, 0); io->bio.bi_iter.bi_sector = PTR_OFFSET(&w->key, 0); - bio_set_dev(&io->bio, - PTR_CACHE(dc->disk.c, &w->key, 0)->bdev); + bio_set_dev(&io->bio, dc->disk.c->cache->bdev); io->bio.bi_end_io = read_dirty_endio; if (bch_bio_alloc_pages(&io->bio, GFP_KERNEL)) @@ -548,10 +593,13 @@ void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned int inode, sectors_dirty = atomic_add_return(s, d->stripe_sectors_dirty + stripe); - if (sectors_dirty == d->stripe_size) - set_bit(stripe, d->full_dirty_stripes); - else - clear_bit(stripe, d->full_dirty_stripes); + if (sectors_dirty == d->stripe_size) { + if (!test_bit(stripe, d->full_dirty_stripes)) + set_bit(stripe, d->full_dirty_stripes); + } else { + if (test_bit(stripe, d->full_dirty_stripes)) + clear_bit(stripe, d->full_dirty_stripes); + } nr_sectors -= s; stripe_offset = 0; @@ -705,6 +753,15 @@ static int bch_writeback_thread(void *arg) * bch_cached_dev_detach(). */ if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)) { + struct closure cl; + + closure_init_stack(&cl); + memset(&dc->sb.set_uuid, 0, 16); + SET_BDEV_STATE(&dc->sb, BDEV_STATE_NONE); + + bch_write_bdev_super(dc, &cl); + closure_sync(&cl); + up_write(&dc->writeback_lock); break; } @@ -756,13 +813,11 @@ static int bch_writeback_thread(void *arg) /* Init */ #define INIT_KEYS_EACH_TIME 500000 -#define INIT_KEYS_SLEEP_MS 100 struct sectors_dirty_init { struct btree_op op; unsigned int inode; size_t count; - struct bkey start; }; static int sectors_dirty_init_fn(struct btree_op *_op, struct btree *b, @@ -778,11 +833,8 @@ static int sectors_dirty_init_fn(struct btree_op *_op, struct btree *b, KEY_START(k), KEY_SIZE(k)); op->count++; - if (atomic_read(&b->c->search_inflight) && - !(op->count % INIT_KEYS_EACH_TIME)) { - bkey_copy_key(&op->start, k); - return -EAGAIN; - } + if (!(op->count % INIT_KEYS_EACH_TIME)) + cond_resched(); return MAP_CONTINUE; } @@ -797,24 +849,16 @@ static int bch_root_node_dirty_init(struct cache_set *c, bch_btree_op_init(&op.op, -1); op.inode = d->id; op.count = 0; - op.start = KEY(op.inode, 0, 0); - - do { - ret = bcache_btree(map_keys_recurse, - k, - c->root, - &op.op, - &op.start, - sectors_dirty_init_fn, - 0); - if (ret == -EAGAIN) - schedule_timeout_interruptible( - msecs_to_jiffies(INIT_KEYS_SLEEP_MS)); - else if (ret < 0) { - pr_warn("sectors dirty init failed, ret=%d!\n", ret); - break; - } - } while (ret == -EAGAIN); + + ret = bcache_btree(map_keys_recurse, + k, + c->root, + &op.op, + &KEY(op.inode, 0, 0), + sectors_dirty_init_fn, + 0); + if (ret < 0) + pr_warn("sectors dirty init failed, ret=%d!\n", ret); return ret; } @@ -858,7 +902,6 @@ static int bch_dirty_init_thread(void *arg) goto out; } skip_nr--; - cond_resched(); } if (p) { @@ -868,7 +911,6 @@ static int bch_dirty_init_thread(void *arg) p = NULL; prev_idx = cur_idx; - cond_resched(); } out: @@ -899,65 +941,56 @@ void bch_sectors_dirty_init(struct bcache_device *d) struct btree_iter iter; struct sectors_dirty_init op; struct cache_set *c = d->c; - struct bch_dirty_init_state *state; - char name[32]; + struct bch_dirty_init_state state; /* Just count root keys if no leaf node */ + rw_lock(0, c->root, c->root->level); if (c->root->level == 0) { bch_btree_op_init(&op.op, -1); op.inode = d->id; op.count = 0; - op.start = KEY(op.inode, 0, 0); for_each_key_filter(&c->root->keys, k, &iter, bch_ptr_invalid) sectors_dirty_init_fn(&op.op, c->root, k); - return; - } - state = kzalloc(sizeof(struct bch_dirty_init_state), GFP_KERNEL); - if (!state) { - pr_warn("sectors dirty init failed: cannot allocate memory\n"); + rw_unlock(0, c->root); return; } - state->c = c; - state->d = d; - state->total_threads = bch_btre_dirty_init_thread_nr(); - state->key_idx = 0; - spin_lock_init(&state->idx_lock); - atomic_set(&state->started, 0); - atomic_set(&state->enough, 0); - init_waitqueue_head(&state->wait); - - for (i = 0; i < state->total_threads; i++) { - /* Fetch latest state->enough earlier */ + memset(&state, 0, sizeof(struct bch_dirty_init_state)); + state.c = c; + state.d = d; + state.total_threads = bch_btre_dirty_init_thread_nr(); + state.key_idx = 0; + spin_lock_init(&state.idx_lock); + atomic_set(&state.started, 0); + atomic_set(&state.enough, 0); + init_waitqueue_head(&state.wait); + + for (i = 0; i < state.total_threads; i++) { + /* Fetch latest state.enough earlier */ smp_mb__before_atomic(); - if (atomic_read(&state->enough)) + if (atomic_read(&state.enough)) break; - state->infos[i].state = state; - atomic_inc(&state->started); - snprintf(name, sizeof(name), "bch_dirty_init[%d]", i); - - state->infos[i].thread = - kthread_run(bch_dirty_init_thread, - &state->infos[i], - name); - if (IS_ERR(state->infos[i].thread)) { + state.infos[i].state = &state; + state.infos[i].thread = + kthread_run(bch_dirty_init_thread, &state.infos[i], + "bch_dirtcnt[%d]", i); + if (IS_ERR(state.infos[i].thread)) { pr_err("fails to run thread bch_dirty_init[%d]\n", i); for (--i; i >= 0; i--) - kthread_stop(state->infos[i].thread); + kthread_stop(state.infos[i].thread); goto out; } + atomic_inc(&state.started); } - wait_event_interruptible(state->wait, - atomic_read(&state->started) == 0 || - test_bit(CACHE_SET_IO_DISABLE, &c->flags)); - out: - kfree(state); + /* Must wait for all threads to stop. */ + wait_event(state.wait, atomic_read(&state.started) == 0); + rw_unlock(0, c->root); } void bch_cached_dev_writeback_init(struct cached_dev *dc) @@ -968,6 +1001,7 @@ void bch_cached_dev_writeback_init(struct cached_dev *dc) dc->writeback_metadata = true; dc->writeback_running = false; + dc->writeback_consider_fragment = true; dc->writeback_percent = 10; dc->writeback_delay = 30; atomic_long_set(&dc->writeback_rate.rate, 1024); @@ -975,8 +1009,14 @@ void bch_cached_dev_writeback_init(struct cached_dev *dc) dc->writeback_rate_update_seconds = WRITEBACK_RATE_UPDATE_SECS_DEFAULT; dc->writeback_rate_p_term_inverse = 40; + dc->writeback_rate_fp_term_low = 1; + dc->writeback_rate_fp_term_mid = 10; + dc->writeback_rate_fp_term_high = 1000; dc->writeback_rate_i_term_inverse = 10000; + /* For dc->writeback_lock contention in update_writeback_rate() */ + dc->rate_update_retry = 0; + WARN_ON(test_and_clear_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags)); INIT_DELAYED_WORK(&dc->writeback_rate_update, update_writeback_rate); } diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h index 3f1230e22de013a67a8bc332a53d52dacba8990c..31df716951f66b127303da51be522ce943de1064 100644 --- a/drivers/md/bcache/writeback.h +++ b/drivers/md/bcache/writeback.h @@ -16,7 +16,11 @@ #define BCH_AUTO_GC_DIRTY_THRESHOLD 50 -#define BCH_DIRTY_INIT_THRD_MAX 64 +#define BCH_WRITEBACK_FRAGMENT_THRESHOLD_LOW 50 +#define BCH_WRITEBACK_FRAGMENT_THRESHOLD_MID 57 +#define BCH_WRITEBACK_FRAGMENT_THRESHOLD_HIGH 64 + +#define BCH_DIRTY_INIT_THRD_MAX 12 /* * 14 (16384ths) is chosen here as something that each backing device * should be a reasonable fraction of the share, and not to blow up diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h index 3db92d9a030b9051e1337661b017a719ac998293..595e87e03d4344ce27aa49ec2ad27b5b00697531 100644 --- a/drivers/md/dm-core.h +++ b/drivers/md/dm-core.h @@ -122,6 +122,19 @@ struct mapped_device { struct srcu_struct io_barrier; }; +/* + * Bits for the flags field of struct mapped_device. + */ +#define DMF_BLOCK_IO_FOR_SUSPEND 0 +#define DMF_SUSPENDED 1 +#define DMF_FROZEN 2 +#define DMF_FREEING 3 +#define DMF_DELETING 4 +#define DMF_NOFLUSH_SUSPENDING 5 +#define DMF_DEFERRED_REMOVE 6 +#define DMF_SUSPENDED_INTERNALLY 7 +#define DMF_POST_SUSPENDING 8 + void disable_discard(struct mapped_device *md); void disable_write_same(struct mapped_device *md); void disable_write_zeroes(struct mapped_device *md); diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 2aa4acd33af3903506824331181179f856502b97..b9677f701b6a11a0011683dc01f0721aa8f07285 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -2561,7 +2561,7 @@ static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string static int get_key_size(char **key_string) { - return (*key_string[0] == ':') ? -EINVAL : strlen(*key_string) >> 1; + return (*key_string[0] == ':') ? -EINVAL : (int)(strlen(*key_string) >> 1); } #endif /* CONFIG_KEYS */ diff --git a/drivers/md/dm-historical-service-time.c b/drivers/md/dm-historical-service-time.c index 186f91e2752c1862be6d23b038329b845581b611..06fe43c13ba38b396eb7268d945ee11d97cd5526 100644 --- a/drivers/md/dm-historical-service-time.c +++ b/drivers/md/dm-historical-service-time.c @@ -429,7 +429,7 @@ static struct dm_path *hst_select_path(struct path_selector *ps, { struct selector *s = ps->context; struct path_info *pi = NULL, *best = NULL; - u64 time_now = sched_clock(); + u64 time_now = ktime_get_ns(); struct dm_path *ret = NULL; unsigned long flags; @@ -470,7 +470,7 @@ static int hst_start_io(struct path_selector *ps, struct dm_path *path, static u64 path_service_time(struct path_info *pi, u64 start_time) { - u64 sched_now = ktime_get_ns(); + u64 now = ktime_get_ns(); /* if a previous disk request has finished after this IO was * sent to the hardware, pretend the submission happened @@ -479,11 +479,11 @@ static u64 path_service_time(struct path_info *pi, u64 start_time) if (time_after64(pi->last_finish, start_time)) start_time = pi->last_finish; - pi->last_finish = sched_now; - if (time_before64(sched_now, start_time)) + pi->last_finish = now; + if (time_before64(now, start_time)) return 0; - return sched_now - start_time; + return now - start_time; } static int hst_end_io(struct path_selector *ps, struct dm_path *path, diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c index 4c7da1c4e6cb9e50c2b828e41a0716c3ad12a4c1..6f085e96c3f335f2bd0d37883a6fe17b4524dd91 100644 --- a/drivers/md/dm-integrity.c +++ b/drivers/md/dm-integrity.c @@ -2354,9 +2354,11 @@ static void do_journal_write(struct dm_integrity_c *ic, unsigned write_start, dm_integrity_io_error(ic, "invalid sector in journal", -EIO); sec &= ~(sector_t)(ic->sectors_per_block - 1); } + if (unlikely(sec >= ic->provided_data_sectors)) { + journal_entry_set_unused(je); + continue; + } } - if (unlikely(sec >= ic->provided_data_sectors)) - continue; get_area_and_offset(ic, sec, &area, &offset); restore_last_bytes(ic, access_journal_data(ic, i, j), je); for (k = j + 1; k < ic->journal_section_entries; k++) { @@ -4230,6 +4232,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv) } if (ic->internal_hash) { + size_t recalc_tags_size; ic->recalc_wq = alloc_workqueue("dm-integrity-recalc", WQ_MEM_RECLAIM, 1); if (!ic->recalc_wq ) { ti->error = "Cannot allocate workqueue"; @@ -4243,8 +4246,10 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv) r = -ENOMEM; goto bad; } - ic->recalc_tags = kvmalloc_array(RECALC_SECTORS >> ic->sb->log2_sectors_per_block, - ic->tag_size, GFP_KERNEL); + recalc_tags_size = (RECALC_SECTORS >> ic->sb->log2_sectors_per_block) * ic->tag_size; + if (crypto_shash_digestsize(ic->internal_hash) > ic->tag_size) + recalc_tags_size += crypto_shash_digestsize(ic->internal_hash) - ic->tag_size; + ic->recalc_tags = kvmalloc(recalc_tags_size, GFP_KERNEL); if (!ic->recalc_tags) { ti->error = "Cannot allocate tags for recalculating"; r = -ENOMEM; diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c index 1ca65b434f1faef16ec606319e4afd5dbdc64405..b839705654d4ed1fc14e2c7ae44ccc690780eec7 100644 --- a/drivers/md/dm-ioctl.c +++ b/drivers/md/dm-ioctl.c @@ -17,6 +17,7 @@ #include #include #include +#include #include @@ -1696,6 +1697,7 @@ static ioctl_fn lookup_ioctl(unsigned int cmd, int *ioctl_flags) if (unlikely(cmd >= ARRAY_SIZE(_ioctls))) return NULL; + cmd = array_index_nospec(cmd, ARRAY_SIZE(_ioctls)); *ioctl_flags = _ioctls[cmd].flags; return _ioctls[cmd].fn; } diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c index be708b7c66a1200805d73ed80e3e3532a64fcff7..a75929b222a435867e6b89576196ab6bd4f36cb6 100644 --- a/drivers/md/dm-rq.c +++ b/drivers/md/dm-rq.c @@ -490,6 +490,14 @@ static blk_status_t dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx, struct mapped_device *md = tio->md; struct dm_target *ti = md->immutable_target; + /* + * blk-mq's unquiesce may come from outside events, such as + * elevator switch, updating nr_requests or others, and request may + * come during suspend, so simply ask for blk-mq to requeue it. + */ + if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) + return BLK_STS_RESOURCE; + if (unlikely(!ti)) { int srcu_idx; struct dm_table *map; diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 3403722b1688225ace20223fbadeda2872a78c1b..f1db4a9d1dff4442d13cc4ea33ca3050117efd93 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -132,19 +132,6 @@ EXPORT_SYMBOL_GPL(dm_bio_get_target_bio_nr); #define MINOR_ALLOCED ((void *)-1) -/* - * Bits for the md->flags field. - */ -#define DMF_BLOCK_IO_FOR_SUSPEND 0 -#define DMF_SUSPENDED 1 -#define DMF_FROZEN 2 -#define DMF_FREEING 3 -#define DMF_DELETING 4 -#define DMF_NOFLUSH_SUSPENDING 5 -#define DMF_DEFERRED_REMOVE 6 -#define DMF_SUSPENDED_INTERNALLY 7 -#define DMF_POST_SUSPENDING 8 - #define DM_NUMA_NODE NUMA_NO_NODE static int dm_numa_node = DM_NUMA_NODE; @@ -607,18 +594,17 @@ static void start_io_acct(struct dm_io *io) false, 0, &io->stats_aux); } -static void end_io_acct(struct dm_io *io) +static void end_io_acct(struct mapped_device *md, struct bio *bio, + unsigned long start_time, struct dm_stats_aux *stats_aux) { - struct mapped_device *md = io->md; - struct bio *bio = io->orig_bio; - unsigned long duration = jiffies - io->start_time; + unsigned long duration = jiffies - start_time; - bio_end_io_acct(bio, io->start_time); + bio_end_io_acct(bio, start_time); if (unlikely(dm_stats_used(&md->stats))) dm_stats_account_io(&md->stats, bio_data_dir(bio), bio->bi_iter.bi_sector, bio_sectors(bio), - true, duration, &io->stats_aux); + true, duration, stats_aux); /* nudge anyone waiting on suspend queue */ if (unlikely(wq_has_sleeper(&md->wait))) @@ -903,6 +889,8 @@ static void dec_pending(struct dm_io *io, blk_status_t error) blk_status_t io_error; struct bio *bio; struct mapped_device *md = io->md; + unsigned long start_time = 0; + struct dm_stats_aux stats_aux; /* Push-back supersedes any I/O errors */ if (unlikely(error)) { @@ -929,8 +917,10 @@ static void dec_pending(struct dm_io *io, blk_status_t error) io_error = io->status; bio = io->orig_bio; - end_io_acct(io); + start_time = io->start_time; + stats_aux = io->stats_aux; free_io(md, io); + end_io_acct(md, bio, start_time, &stats_aux); if (io_error == BLK_STS_DM_REQUEUE) return; @@ -1894,8 +1884,10 @@ static struct mapped_device *alloc_dev(int minor) if (IS_ENABLED(CONFIG_DAX_DRIVER)) { md->dax_dev = alloc_dax(md, md->disk->disk_name, &dm_dax_ops, 0); - if (IS_ERR(md->dax_dev)) + if (IS_ERR(md->dax_dev)) { + md->dax_dev = NULL; goto bad; + } } add_disk_no_queue_reg(md->disk); diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c index ef6e78d45d5b8bfe8e63200611b97131bbebd45e..ee3e63aa864bfd9ffbf79282018025e781505dd8 100644 --- a/drivers/md/persistent-data/dm-btree.c +++ b/drivers/md/persistent-data/dm-btree.c @@ -83,14 +83,16 @@ void inc_children(struct dm_transaction_manager *tm, struct btree_node *n, } static int insert_at(size_t value_size, struct btree_node *node, unsigned index, - uint64_t key, void *value) - __dm_written_to_disk(value) + uint64_t key, void *value) + __dm_written_to_disk(value) { uint32_t nr_entries = le32_to_cpu(node->header.nr_entries); + uint32_t max_entries = le32_to_cpu(node->header.max_entries); __le64 key_le = cpu_to_le64(key); if (index > nr_entries || - index >= le32_to_cpu(node->header.max_entries)) { + index >= max_entries || + nr_entries >= max_entries) { DMERR("too many entries in btree node for insert"); __dm_unbless_for_disk(value); return -ENOMEM; diff --git a/drivers/md/persistent-data/dm-space-map-common.c b/drivers/md/persistent-data/dm-space-map-common.c index a213bf11738fbbb10448d09abfb8d532679f011e..85853ab6297171c755114929712322fc682170c3 100644 --- a/drivers/md/persistent-data/dm-space-map-common.c +++ b/drivers/md/persistent-data/dm-space-map-common.c @@ -281,6 +281,11 @@ int sm_ll_lookup_bitmap(struct ll_disk *ll, dm_block_t b, uint32_t *result) struct disk_index_entry ie_disk; struct dm_block *blk; + if (b >= ll->nr_blocks) { + DMERR_LIMIT("metadata block out of bounds"); + return -EINVAL; + } + b = do_div(index, ll->entries_per_block); r = ll->load_ie(ll, index, &ie_disk); if (r < 0) diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index da6772f49f076d01953fcc1e96974e681772aa76..9fccbf9160156fb5a17d3b1c529d8805534fc3c8 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -1326,6 +1326,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio, struct raid1_plug_cb *plug = NULL; int first_clone; int max_sectors; + bool write_behind = false; if (mddev_is_clustered(mddev) && md_cluster_ops->area_resyncing(mddev, WRITE, @@ -1378,6 +1379,15 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio, max_sectors = r1_bio->sectors; for (i = 0; i < disks; i++) { struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); + + /* + * The write-behind io is only attempted on drives marked as + * write-mostly, which means we could allocate write behind + * bio later. + */ + if (rdev && test_bit(WriteMostly, &rdev->flags)) + write_behind = true; + if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) { atomic_inc(&rdev->nr_pending); blocked_rdev = rdev; @@ -1452,6 +1462,15 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio, goto retry_write; } + /* + * When using a bitmap, we may call alloc_behind_master_bio below. + * alloc_behind_master_bio allocates a copy of the data payload a page + * at a time and thus needs a new bio that can fit the whole payload + * this bio in page sized chunks. + */ + if (write_behind && bitmap) + max_sectors = min_t(int, max_sectors, + BIO_MAX_PAGES * (PAGE_SIZE >> 9)); if (max_sectors < bio_sectors(bio)) { struct bio *split = bio_split(bio, max_sectors, GFP_NOIO, &conf->bio_split); @@ -1479,6 +1498,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio, * allocate memory, or a reader on WriteMostly * is waiting for behind writes to flush */ if (bitmap && + test_bit(WriteMostly, &rdev->flags) && (atomic_read(&bitmap->behind_writes) < mddev->bitmap_info.max_write_behind) && !waitqueue_active(&bitmap->behind_wait)) { diff --git a/drivers/media/Kconfig b/drivers/media/Kconfig index a6d073f2e036aa34e6a3fd0b14f3a6755544c1a8..d157af63be417ab40fc85eb60b04838e5c90f020 100644 --- a/drivers/media/Kconfig +++ b/drivers/media/Kconfig @@ -142,10 +142,10 @@ config MEDIA_TEST_SUPPORT prompt "Test drivers" if MEDIA_SUPPORT_FILTER default y if !MEDIA_SUPPORT_FILTER help - Those drivers should not be used on production Kernels, but - can be useful on debug ones. It enables several dummy drivers - that simulate a real hardware. Very useful to test userspace - applications and to validate if the subsystem core is doesn't + These drivers should not be used on production kernels, but + can be useful on debug ones. This option enables several dummy drivers + that simulate real hardware. Very useful to test userspace + applications and to validate if the subsystem core doesn't have regressions. Say Y if you want to use some virtual test driver. diff --git a/drivers/media/cec/core/cec-pin.c b/drivers/media/cec/core/cec-pin.c index f006bd8eec63c0808f8dd09fb3ced709ae3642a5..f8452a1f9fc6c13bd14a22ec1b2679d8b3b78712 100644 --- a/drivers/media/cec/core/cec-pin.c +++ b/drivers/media/cec/core/cec-pin.c @@ -1033,6 +1033,7 @@ static int cec_pin_thread_func(void *_adap) { struct cec_adapter *adap = _adap; struct cec_pin *pin = adap->pin; + bool irq_enabled = false; for (;;) { wait_event_interruptible(pin->kthread_waitq, @@ -1060,6 +1061,7 @@ static int cec_pin_thread_func(void *_adap) ns_to_ktime(pin->work_rx_msg.rx_ts)); msg->len = 0; } + if (pin->work_tx_status) { unsigned int tx_status = pin->work_tx_status; @@ -1083,27 +1085,39 @@ static int cec_pin_thread_func(void *_adap) switch (atomic_xchg(&pin->work_irq_change, CEC_PIN_IRQ_UNCHANGED)) { case CEC_PIN_IRQ_DISABLE: - pin->ops->disable_irq(adap); + if (irq_enabled) { + pin->ops->disable_irq(adap); + irq_enabled = false; + } cec_pin_high(pin); cec_pin_to_idle(pin); hrtimer_start(&pin->timer, ns_to_ktime(0), HRTIMER_MODE_REL); break; case CEC_PIN_IRQ_ENABLE: + if (irq_enabled) + break; pin->enable_irq_failed = !pin->ops->enable_irq(adap); if (pin->enable_irq_failed) { cec_pin_to_idle(pin); hrtimer_start(&pin->timer, ns_to_ktime(0), HRTIMER_MODE_REL); + } else { + irq_enabled = true; } break; default: break; } - if (kthread_should_stop()) break; } + if (pin->ops->disable_irq && irq_enabled) + pin->ops->disable_irq(adap); + hrtimer_cancel(&pin->timer); + cec_pin_read(pin); + cec_pin_to_idle(pin); + pin->state = CEC_ST_OFF; return 0; } @@ -1130,13 +1144,7 @@ static int cec_pin_adap_enable(struct cec_adapter *adap, bool enable) hrtimer_start(&pin->timer, ns_to_ktime(0), HRTIMER_MODE_REL); } else { - if (pin->ops->disable_irq) - pin->ops->disable_irq(adap); - hrtimer_cancel(&pin->timer); kthread_stop(pin->kthread); - cec_pin_read(pin); - cec_pin_to_idle(pin); - pin->state = CEC_ST_OFF; } return 0; } @@ -1157,11 +1165,8 @@ void cec_pin_start_timer(struct cec_pin *pin) if (pin->state != CEC_ST_RX_IRQ) return; - atomic_set(&pin->work_irq_change, CEC_PIN_IRQ_UNCHANGED); - pin->ops->disable_irq(pin->adap); - cec_pin_high(pin); - cec_pin_to_idle(pin); - hrtimer_start(&pin->timer, ns_to_ktime(0), HRTIMER_MODE_REL); + atomic_set(&pin->work_irq_change, CEC_PIN_IRQ_DISABLE); + wake_up_interruptible(&pin->kthread_waitq); } static int cec_pin_adap_transmit(struct cec_adapter *adap, u8 attempts, diff --git a/drivers/media/common/saa7146/saa7146_fops.c b/drivers/media/common/saa7146/saa7146_fops.c index d6531874faa650f0e8367ac8d37e0490e299b7db..8047e305f3d014c768c91e83e3ff491409ea8460 100644 --- a/drivers/media/common/saa7146/saa7146_fops.c +++ b/drivers/media/common/saa7146/saa7146_fops.c @@ -523,7 +523,7 @@ int saa7146_vv_init(struct saa7146_dev* dev, struct saa7146_ext_vv *ext_vv) ERR("out of memory. aborting.\n"); kfree(vv); v4l2_ctrl_handler_free(hdl); - return -1; + return -ENOMEM; } saa7146_video_uops.init(dev,vv); diff --git a/drivers/media/common/videobuf2/videobuf2-dma-contig.c b/drivers/media/common/videobuf2/videobuf2-dma-contig.c index 2f3a5996d3fc90a62e01f1e6c62876ace0c75ddb..fe626109ef4db62b381d5c0959fa0a693bea5d5e 100644 --- a/drivers/media/common/videobuf2/videobuf2-dma-contig.c +++ b/drivers/media/common/videobuf2/videobuf2-dma-contig.c @@ -150,7 +150,7 @@ static void *vb2_dc_alloc(struct device *dev, unsigned long attrs, buf->cookie = dma_alloc_attrs(dev, size, &buf->dma_addr, GFP_KERNEL | gfp_flags, buf->attrs); if (!buf->cookie) { - dev_err(dev, "dma_alloc_coherent of size %ld failed\n", size); + dev_err(dev, "dma_alloc_coherent of size %lu failed\n", size); kfree(buf); return ERR_PTR(-ENOMEM); } @@ -196,9 +196,9 @@ static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma) vma->vm_ops->open(vma); - pr_debug("%s: mapped dma addr 0x%08lx at 0x%08lx, size %ld\n", - __func__, (unsigned long)buf->dma_addr, vma->vm_start, - buf->size); + pr_debug("%s: mapped dma addr 0x%08lx at 0x%08lx, size %lu\n", + __func__, (unsigned long)buf->dma_addr, vma->vm_start, + buf->size); return 0; } diff --git a/drivers/media/dvb-core/dmxdev.c b/drivers/media/dvb-core/dmxdev.c index f14a872d126872c3fc695cbd3f8460c32d317b14..e58cb8434dafeea3323ea892028be6ffde3ca155 100644 --- a/drivers/media/dvb-core/dmxdev.c +++ b/drivers/media/dvb-core/dmxdev.c @@ -1413,7 +1413,7 @@ static const struct dvb_device dvbdev_dvr = { }; int dvb_dmxdev_init(struct dmxdev *dmxdev, struct dvb_adapter *dvb_adapter) { - int i; + int i, ret; if (dmxdev->demux->open(dmxdev->demux) < 0) return -EUSERS; @@ -1432,14 +1432,26 @@ int dvb_dmxdev_init(struct dmxdev *dmxdev, struct dvb_adapter *dvb_adapter) DMXDEV_STATE_FREE); } - dvb_register_device(dvb_adapter, &dmxdev->dvbdev, &dvbdev_demux, dmxdev, + ret = dvb_register_device(dvb_adapter, &dmxdev->dvbdev, &dvbdev_demux, dmxdev, DVB_DEVICE_DEMUX, dmxdev->filternum); - dvb_register_device(dvb_adapter, &dmxdev->dvr_dvbdev, &dvbdev_dvr, + if (ret < 0) + goto err_register_dvbdev; + + ret = dvb_register_device(dvb_adapter, &dmxdev->dvr_dvbdev, &dvbdev_dvr, dmxdev, DVB_DEVICE_DVR, dmxdev->filternum); + if (ret < 0) + goto err_register_dvr_dvbdev; dvb_ringbuffer_init(&dmxdev->dvr_buffer, NULL, 8192); return 0; + +err_register_dvr_dvbdev: + dvb_unregister_device(dmxdev->dvbdev); +err_register_dvbdev: + vfree(dmxdev->filter); + dmxdev->filter = NULL; + return ret; } EXPORT_SYMBOL(dvb_dmxdev_init); diff --git a/drivers/media/dvb-frontends/dib8000.c b/drivers/media/dvb-frontends/dib8000.c index bb02354a48b81b4c6c6ff3c23558eb1df31afc34..d67f2dd997d06188bba43fb266829ffd298e8a82 100644 --- a/drivers/media/dvb-frontends/dib8000.c +++ b/drivers/media/dvb-frontends/dib8000.c @@ -4473,8 +4473,10 @@ static struct dvb_frontend *dib8000_init(struct i2c_adapter *i2c_adap, u8 i2c_ad state->timf_default = cfg->pll->timf; - if (dib8000_identify(&state->i2c) == 0) + if (dib8000_identify(&state->i2c) == 0) { + kfree(fe); goto error; + } dibx000_init_i2c_master(&state->i2c_master, DIB8000, state->i2c.adap, state->i2c.addr); diff --git a/drivers/media/i2c/adv7511-v4l2.c b/drivers/media/i2c/adv7511-v4l2.c index ab7883cff8b2236064f6d8870d398f8eabc1407f..9f5713b76794d686c17bff3b7d0a3dedc7d10505 100644 --- a/drivers/media/i2c/adv7511-v4l2.c +++ b/drivers/media/i2c/adv7511-v4l2.c @@ -555,7 +555,7 @@ static void log_infoframe(struct v4l2_subdev *sd, const struct adv7511_cfg_read_ buffer[3] = 0; buffer[3] = hdmi_infoframe_checksum(buffer, len + 4); - if (hdmi_infoframe_unpack(&frame, buffer, sizeof(buffer)) < 0) { + if (hdmi_infoframe_unpack(&frame, buffer, len + 4) < 0) { v4l2_err(sd, "%s: unpack of %s infoframe failed\n", __func__, cri->desc); return; } diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c index d1f58795794fd0a2c2278c1692cdfc7ed01e5f81..8cf1704308bf5aaef736debb1a45dad5d930445a 100644 --- a/drivers/media/i2c/adv7604.c +++ b/drivers/media/i2c/adv7604.c @@ -2454,7 +2454,7 @@ static int adv76xx_read_infoframe(struct v4l2_subdev *sd, int index, buffer[i + 3] = infoframe_read(sd, adv76xx_cri[index].payload_addr + i); - if (hdmi_infoframe_unpack(frame, buffer, sizeof(buffer)) < 0) { + if (hdmi_infoframe_unpack(frame, buffer, len + 3) < 0) { v4l2_err(sd, "%s: unpack of %s infoframe failed\n", __func__, adv76xx_cri[index].desc); return -ENOENT; diff --git a/drivers/media/i2c/adv7842.c b/drivers/media/i2c/adv7842.c index f7d2b6cd3008b3e4e9bf0e46de05b581cd779aea..a870117feb44c550b65c02c8b7b4b1d8e5afeb2b 100644 --- a/drivers/media/i2c/adv7842.c +++ b/drivers/media/i2c/adv7842.c @@ -2574,7 +2574,7 @@ static void log_infoframe(struct v4l2_subdev *sd, const struct adv7842_cfg_read_ for (i = 0; i < len; i++) buffer[i + 3] = infoframe_read(sd, cri->payload_addr + i); - if (hdmi_infoframe_unpack(&frame, buffer, sizeof(buffer)) < 0) { + if (hdmi_infoframe_unpack(&frame, buffer, len + 3) < 0) { v4l2_err(sd, "%s: unpack of %s infoframe failed\n", __func__, cri->desc); return; } diff --git a/drivers/media/pci/b2c2/flexcop-pci.c b/drivers/media/pci/b2c2/flexcop-pci.c index a9d9520a94c6d87437aa6e839099e569b28f91c8..c9e6c7d6637682cbf53c651a604b79cd9e0824df 100644 --- a/drivers/media/pci/b2c2/flexcop-pci.c +++ b/drivers/media/pci/b2c2/flexcop-pci.c @@ -185,6 +185,8 @@ static irqreturn_t flexcop_pci_isr(int irq, void *dev_id) dma_addr_t cur_addr = fc->read_ibi_reg(fc,dma1_008).dma_0x8.dma_cur_addr << 2; u32 cur_pos = cur_addr - fc_pci->dma[0].dma_addr0; + if (cur_pos > fc_pci->dma[0].size * 2) + goto error; deb_irq("%u irq: %08x cur_addr: %llx: cur_pos: %08x, last_cur_pos: %08x ", jiffies_to_usecs(jiffies - fc_pci->last_irq), @@ -225,6 +227,7 @@ static irqreturn_t flexcop_pci_isr(int irq, void *dev_id) ret = IRQ_NONE; } +error: spin_unlock_irqrestore(&fc_pci->irq_lock, flags); return ret; } diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c index 35a51e9b539da194c766e72b7156225db043f9a2..1f0e4b913a053485493a90bef6b049fbefc46a42 100644 --- a/drivers/media/pci/bt8xx/bttv-driver.c +++ b/drivers/media/pci/bt8xx/bttv-driver.c @@ -3898,7 +3898,7 @@ static int bttv_register_video(struct bttv *btv) /* video */ vdev_init(btv, &btv->video_dev, &bttv_video_template, "video"); - btv->video_dev.device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_TUNER | + btv->video_dev.device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE | V4L2_CAP_STREAMING; if (btv->tuner_type != TUNER_ABSENT) btv->video_dev.device_caps |= V4L2_CAP_TUNER; @@ -3919,7 +3919,7 @@ static int bttv_register_video(struct bttv *btv) /* vbi */ vdev_init(btv, &btv->vbi_dev, &bttv_video_template, "vbi"); btv->vbi_dev.device_caps = V4L2_CAP_VBI_CAPTURE | V4L2_CAP_READWRITE | - V4L2_CAP_STREAMING | V4L2_CAP_TUNER; + V4L2_CAP_STREAMING; if (btv->tuner_type != TUNER_ABSENT) btv->vbi_dev.device_caps |= V4L2_CAP_TUNER; diff --git a/drivers/media/pci/cx88/cx88-mpeg.c b/drivers/media/pci/cx88/cx88-mpeg.c index a57c991b165b1ab84ddcd4edf79cd68dd62c8ac0..10d2971ef0624ff2b87119567d7a099565245b81 100644 --- a/drivers/media/pci/cx88/cx88-mpeg.c +++ b/drivers/media/pci/cx88/cx88-mpeg.c @@ -162,6 +162,9 @@ int cx8802_start_dma(struct cx8802_dev *dev, cx_write(MO_TS_GPCNTRL, GP_COUNT_CONTROL_RESET); q->count = 0; + /* clear interrupt status register */ + cx_write(MO_TS_INTSTAT, 0x1f1111); + /* enable irqs */ dprintk(1, "setting the interrupt mask\n"); cx_set(MO_PCI_INTMSK, core->pci_irqmask | PCI_INT_TSINT); diff --git a/drivers/media/pci/ivtv/ivtv-driver.h b/drivers/media/pci/ivtv/ivtv-driver.h index e5efe525ad7bf91dc726c78581d91a5ddce9a75e..00caf60ff98903f4fcf9c0209768225ee883d565 100644 --- a/drivers/media/pci/ivtv/ivtv-driver.h +++ b/drivers/media/pci/ivtv/ivtv-driver.h @@ -332,7 +332,6 @@ struct ivtv_stream { struct ivtv *itv; /* for ease of use */ const char *name; /* name of the stream */ int type; /* stream type */ - u32 caps; /* V4L2 capabilities */ struct v4l2_fh *fh; /* pointer to the streaming filehandle */ spinlock_t qlock; /* locks access to the queues */ diff --git a/drivers/media/pci/ivtv/ivtv-ioctl.c b/drivers/media/pci/ivtv/ivtv-ioctl.c index 35dccb31174c1e82d77ba72d6a58b7b9af7b34e8..a9d69b253516b610d9b772b7dd56d67ba79478b2 100644 --- a/drivers/media/pci/ivtv/ivtv-ioctl.c +++ b/drivers/media/pci/ivtv/ivtv-ioctl.c @@ -443,7 +443,7 @@ static int ivtv_g_fmt_vid_out_overlay(struct file *file, void *fh, struct v4l2_f struct ivtv_stream *s = &itv->streams[fh2id(fh)->type]; struct v4l2_window *winfmt = &fmt->fmt.win; - if (!(s->caps & V4L2_CAP_VIDEO_OUTPUT_OVERLAY)) + if (!(s->vdev.device_caps & V4L2_CAP_VIDEO_OUTPUT_OVERLAY)) return -EINVAL; if (!itv->osd_video_pbase) return -EINVAL; @@ -554,7 +554,7 @@ static int ivtv_try_fmt_vid_out_overlay(struct file *file, void *fh, struct v4l2 u32 chromakey = fmt->fmt.win.chromakey; u8 global_alpha = fmt->fmt.win.global_alpha; - if (!(s->caps & V4L2_CAP_VIDEO_OUTPUT_OVERLAY)) + if (!(s->vdev.device_caps & V4L2_CAP_VIDEO_OUTPUT_OVERLAY)) return -EINVAL; if (!itv->osd_video_pbase) return -EINVAL; @@ -1388,7 +1388,7 @@ static int ivtv_g_fbuf(struct file *file, void *fh, struct v4l2_framebuffer *fb) 0, }; - if (!(s->caps & V4L2_CAP_VIDEO_OUTPUT_OVERLAY)) + if (!(s->vdev.device_caps & V4L2_CAP_VIDEO_OUTPUT_OVERLAY)) return -ENOTTY; if (!itv->osd_video_pbase) return -ENOTTY; @@ -1455,7 +1455,7 @@ static int ivtv_s_fbuf(struct file *file, void *fh, const struct v4l2_framebuffe struct ivtv_stream *s = &itv->streams[fh2id(fh)->type]; struct yuv_playback_info *yi = &itv->yuv_info; - if (!(s->caps & V4L2_CAP_VIDEO_OUTPUT_OVERLAY)) + if (!(s->vdev.device_caps & V4L2_CAP_VIDEO_OUTPUT_OVERLAY)) return -ENOTTY; if (!itv->osd_video_pbase) return -ENOTTY; @@ -1475,7 +1475,7 @@ static int ivtv_overlay(struct file *file, void *fh, unsigned int on) struct ivtv *itv = id->itv; struct ivtv_stream *s = &itv->streams[fh2id(fh)->type]; - if (!(s->caps & V4L2_CAP_VIDEO_OUTPUT_OVERLAY)) + if (!(s->vdev.device_caps & V4L2_CAP_VIDEO_OUTPUT_OVERLAY)) return -ENOTTY; if (!itv->osd_video_pbase) return -ENOTTY; diff --git a/drivers/media/pci/ivtv/ivtv-streams.c b/drivers/media/pci/ivtv/ivtv-streams.c index f04ee84bab5fd38690914860ea6c7db7b51ae318..f9de5d1605fe37aea734d0e1f24963ea099cab11 100644 --- a/drivers/media/pci/ivtv/ivtv-streams.c +++ b/drivers/media/pci/ivtv/ivtv-streams.c @@ -176,7 +176,7 @@ static void ivtv_stream_init(struct ivtv *itv, int type) s->itv = itv; s->type = type; s->name = ivtv_stream_info[type].name; - s->caps = ivtv_stream_info[type].v4l2_caps; + s->vdev.device_caps = ivtv_stream_info[type].v4l2_caps; if (ivtv_stream_info[type].pio) s->dma = PCI_DMA_NONE; @@ -299,12 +299,9 @@ static int ivtv_reg_dev(struct ivtv *itv, int type) if (s_mpg->vdev.v4l2_dev) num = s_mpg->vdev.num + ivtv_stream_info[type].num_offset; } - s->vdev.device_caps = s->caps; - if (itv->osd_video_pbase) { - itv->streams[IVTV_DEC_STREAM_TYPE_YUV].vdev.device_caps |= - V4L2_CAP_VIDEO_OUTPUT_OVERLAY; - itv->streams[IVTV_DEC_STREAM_TYPE_MPG].vdev.device_caps |= - V4L2_CAP_VIDEO_OUTPUT_OVERLAY; + if (itv->osd_video_pbase && (type == IVTV_DEC_STREAM_TYPE_YUV || + type == IVTV_DEC_STREAM_TYPE_MPG)) { + s->vdev.device_caps |= V4L2_CAP_VIDEO_OUTPUT_OVERLAY; itv->v4l2_cap |= V4L2_CAP_VIDEO_OUTPUT_OVERLAY; } video_set_drvdata(&s->vdev, s); diff --git a/drivers/media/pci/saa7134/saa7134-alsa.c b/drivers/media/pci/saa7134/saa7134-alsa.c index 7a1fb067b0e09b81e6453abd006edcc623e05978..d3cde05a6ebab80027cb5722c9d3eccb836022a6 100644 --- a/drivers/media/pci/saa7134/saa7134-alsa.c +++ b/drivers/media/pci/saa7134/saa7134-alsa.c @@ -1214,16 +1214,14 @@ static int alsa_device_exit(struct saa7134_dev *dev) static int saa7134_alsa_init(void) { - struct saa7134_dev *dev = NULL; - struct list_head *list; + struct saa7134_dev *dev; saa7134_dmasound_init = alsa_device_init; saa7134_dmasound_exit = alsa_device_exit; pr_info("saa7134 ALSA driver for DMA sound loaded\n"); - list_for_each(list,&saa7134_devlist) { - dev = list_entry(list, struct saa7134_dev, devlist); + list_for_each_entry(dev, &saa7134_devlist, devlist) { if (dev->pci->device == PCI_DEVICE_ID_PHILIPS_SAA7130) pr_info("%s/alsa: %s doesn't support digital audio\n", dev->name, saa7134_boards[dev->board].name); @@ -1231,7 +1229,7 @@ static int saa7134_alsa_init(void) alsa_device_init(dev); } - if (dev == NULL) + if (list_empty(&saa7134_devlist)) pr_info("saa7134 ALSA: no saa7134 cards found\n"); return 0; diff --git a/drivers/media/pci/saa7146/hexium_gemini.c b/drivers/media/pci/saa7146/hexium_gemini.c index 2214c74bbbf1548b12b8122827a25c0b61a45756..3947701cd6c7e76835d3d8e58321a5a75c7d9c46 100644 --- a/drivers/media/pci/saa7146/hexium_gemini.c +++ b/drivers/media/pci/saa7146/hexium_gemini.c @@ -284,7 +284,12 @@ static int hexium_attach(struct saa7146_dev *dev, struct saa7146_pci_extension_d hexium_set_input(hexium, 0); hexium->cur_input = 0; - saa7146_vv_init(dev, &vv_data); + ret = saa7146_vv_init(dev, &vv_data); + if (ret) { + i2c_del_adapter(&hexium->i2c_adapter); + kfree(hexium); + return ret; + } vv_data.vid_ops.vidioc_enum_input = vidioc_enum_input; vv_data.vid_ops.vidioc_g_input = vidioc_g_input; diff --git a/drivers/media/pci/saa7146/hexium_orion.c b/drivers/media/pci/saa7146/hexium_orion.c index 39d14c179d229da31c1d76a7c067b56a24c2883d..2eb4bee16b71f28175854cc636472c903f0f4228 100644 --- a/drivers/media/pci/saa7146/hexium_orion.c +++ b/drivers/media/pci/saa7146/hexium_orion.c @@ -355,10 +355,16 @@ static struct saa7146_ext_vv vv_data; static int hexium_attach(struct saa7146_dev *dev, struct saa7146_pci_extension_data *info) { struct hexium *hexium = (struct hexium *) dev->ext_priv; + int ret; DEB_EE("\n"); - saa7146_vv_init(dev, &vv_data); + ret = saa7146_vv_init(dev, &vv_data); + if (ret) { + pr_err("Error in saa7146_vv_init()\n"); + return ret; + } + vv_data.vid_ops.vidioc_enum_input = vidioc_enum_input; vv_data.vid_ops.vidioc_g_input = vidioc_g_input; vv_data.vid_ops.vidioc_s_input = vidioc_s_input; diff --git a/drivers/media/pci/saa7146/mxb.c b/drivers/media/pci/saa7146/mxb.c index 73fc901ecf3db5ecd7312bb29af338886717244a..bf0b9b0914cd53cddf78c56eb4db2099302f7742 100644 --- a/drivers/media/pci/saa7146/mxb.c +++ b/drivers/media/pci/saa7146/mxb.c @@ -683,10 +683,16 @@ static struct saa7146_ext_vv vv_data; static int mxb_attach(struct saa7146_dev *dev, struct saa7146_pci_extension_data *info) { struct mxb *mxb; + int ret; DEB_EE("dev:%p\n", dev); - saa7146_vv_init(dev, &vv_data); + ret = saa7146_vv_init(dev, &vv_data); + if (ret) { + ERR("Error in saa7146_vv_init()"); + return ret; + } + if (mxb_probe(dev)) { saa7146_vv_release(dev); return -1; diff --git a/drivers/media/platform/aspeed-video.c b/drivers/media/platform/aspeed-video.c index 7bb6babdcade049bf7e6a806d19026a687f15a31..757a58829a512a21602f7902ee997c631a5e19c5 100644 --- a/drivers/media/platform/aspeed-video.c +++ b/drivers/media/platform/aspeed-video.c @@ -151,7 +151,7 @@ #define VE_SRC_TB_EDGE_DET_BOT GENMASK(28, VE_SRC_TB_EDGE_DET_BOT_SHF) #define VE_MODE_DETECT_STATUS 0x098 -#define VE_MODE_DETECT_H_PIXELS GENMASK(11, 0) +#define VE_MODE_DETECT_H_PERIOD GENMASK(11, 0) #define VE_MODE_DETECT_V_LINES_SHF 16 #define VE_MODE_DETECT_V_LINES GENMASK(27, VE_MODE_DETECT_V_LINES_SHF) #define VE_MODE_DETECT_STATUS_VSYNC BIT(28) @@ -162,6 +162,8 @@ #define VE_SYNC_STATUS_VSYNC_SHF 16 #define VE_SYNC_STATUS_VSYNC GENMASK(27, VE_SYNC_STATUS_VSYNC_SHF) +#define VE_H_TOTAL_PIXELS 0x0A0 + #define VE_INTERRUPT_CTRL 0x304 #define VE_INTERRUPT_STATUS 0x308 #define VE_INTERRUPT_MODE_DETECT_WD BIT(0) @@ -500,6 +502,10 @@ static void aspeed_video_enable_mode_detect(struct aspeed_video *video) aspeed_video_update(video, VE_INTERRUPT_CTRL, 0, VE_INTERRUPT_MODE_DETECT); + /* Disable mode detect in order to re-trigger */ + aspeed_video_update(video, VE_SEQ_CTRL, + VE_SEQ_CTRL_TRIG_MODE_DET, 0); + /* Trigger mode detect */ aspeed_video_update(video, VE_SEQ_CTRL, 0, VE_SEQ_CTRL_TRIG_MODE_DET); } @@ -552,6 +558,8 @@ static void aspeed_video_irq_res_change(struct aspeed_video *video, ulong delay) set_bit(VIDEO_RES_CHANGE, &video->flags); clear_bit(VIDEO_FRAME_INPRG, &video->flags); + video->v4l2_input_status = V4L2_IN_ST_NO_SIGNAL; + aspeed_video_off(video); aspeed_video_bufs_done(video, VB2_BUF_STATE_ERROR); @@ -759,6 +767,7 @@ static void aspeed_video_get_resolution(struct aspeed_video *video) u32 src_lr_edge; u32 src_tb_edge; u32 sync; + u32 htotal; struct v4l2_bt_timings *det = &video->detected_timings; det->width = MIN_WIDTH; @@ -786,10 +795,6 @@ static void aspeed_video_get_resolution(struct aspeed_video *video) return; } - /* Disable mode detect in order to re-trigger */ - aspeed_video_update(video, VE_SEQ_CTRL, - VE_SEQ_CTRL_TRIG_MODE_DET, 0); - aspeed_video_check_and_set_polarity(video); aspeed_video_enable_mode_detect(video); @@ -807,6 +812,7 @@ static void aspeed_video_get_resolution(struct aspeed_video *video) src_tb_edge = aspeed_video_read(video, VE_SRC_TB_EDGE_DET); mds = aspeed_video_read(video, VE_MODE_DETECT_STATUS); sync = aspeed_video_read(video, VE_SYNC_STATUS); + htotal = aspeed_video_read(video, VE_H_TOTAL_PIXELS); video->frame_bottom = (src_tb_edge & VE_SRC_TB_EDGE_DET_BOT) >> VE_SRC_TB_EDGE_DET_BOT_SHF; @@ -823,8 +829,7 @@ static void aspeed_video_get_resolution(struct aspeed_video *video) VE_SRC_LR_EDGE_DET_RT_SHF; video->frame_left = src_lr_edge & VE_SRC_LR_EDGE_DET_LEFT; det->hfrontporch = video->frame_left; - det->hbackporch = (mds & VE_MODE_DETECT_H_PIXELS) - - video->frame_right; + det->hbackporch = htotal - video->frame_right; det->hsync = sync & VE_SYNC_STATUS_HSYNC; if (video->frame_left > video->frame_right) continue; @@ -1337,7 +1342,6 @@ static void aspeed_video_resolution_work(struct work_struct *work) struct delayed_work *dwork = to_delayed_work(work); struct aspeed_video *video = container_of(dwork, struct aspeed_video, res_work); - u32 input_status = video->v4l2_input_status; aspeed_video_on(video); @@ -1350,8 +1354,7 @@ static void aspeed_video_resolution_work(struct work_struct *work) aspeed_video_get_resolution(video); if (video->detected_timings.width != video->active_timings.width || - video->detected_timings.height != video->active_timings.height || - input_status != video->v4l2_input_status) { + video->detected_timings.height != video->active_timings.height) { static const struct v4l2_event ev = { .type = V4L2_EVENT_SOURCE_CHANGE, .u.src_change.changes = V4L2_EVENT_SRC_CH_RESOLUTION, diff --git a/drivers/media/platform/coda/coda-common.c b/drivers/media/platform/coda/coda-common.c index 87a2c706f7477db4003ebcfe0c6578a34a4677b0..2333079a83c71330412f64e930e257a1f3132cc1 100644 --- a/drivers/media/platform/coda/coda-common.c +++ b/drivers/media/platform/coda/coda-common.c @@ -408,6 +408,7 @@ static struct vdoa_data *coda_get_vdoa_data(void) if (!vdoa_data) vdoa_data = ERR_PTR(-EPROBE_DEFER); + put_device(&vdoa_pdev->dev); out: of_node_put(vdoa_node); @@ -1537,11 +1538,13 @@ static void coda_pic_run_work(struct work_struct *work) if (!wait_for_completion_timeout(&ctx->completion, msecs_to_jiffies(1000))) { - dev_err(dev->dev, "CODA PIC_RUN timeout\n"); + if (ctx->use_bit) { + dev_err(dev->dev, "CODA PIC_RUN timeout\n"); - ctx->hold = true; + ctx->hold = true; - coda_hw_reset(ctx); + coda_hw_reset(ctx); + } if (ctx->ops->run_timeout) ctx->ops->run_timeout(ctx); diff --git a/drivers/media/platform/coda/coda-jpeg.c b/drivers/media/platform/coda/coda-jpeg.c index b11cfbe166dd3141568c10a16d02406385fa8f80..a72f4655e5ad56312d8fe4c113452f5b3baebe86 100644 --- a/drivers/media/platform/coda/coda-jpeg.c +++ b/drivers/media/platform/coda/coda-jpeg.c @@ -1127,7 +1127,8 @@ static int coda9_jpeg_prepare_encode(struct coda_ctx *ctx) coda_write(dev, 0, CODA9_REG_JPEG_GBU_BT_PTR); coda_write(dev, 0, CODA9_REG_JPEG_GBU_WD_PTR); coda_write(dev, 0, CODA9_REG_JPEG_GBU_BBSR); - coda_write(dev, 0, CODA9_REG_JPEG_BBC_STRM_CTRL); + coda_write(dev, BIT(31) | ((end_addr - start_addr - header_len) / 256), + CODA9_REG_JPEG_BBC_STRM_CTRL); coda_write(dev, 0, CODA9_REG_JPEG_GBU_CTRL); coda_write(dev, 0, CODA9_REG_JPEG_GBU_FF_RPTR); coda_write(dev, 127, CODA9_REG_JPEG_GBU_BBER); @@ -1257,6 +1258,23 @@ static void coda9_jpeg_finish_encode(struct coda_ctx *ctx) coda_hw_reset(ctx); } +static void coda9_jpeg_encode_timeout(struct coda_ctx *ctx) +{ + struct coda_dev *dev = ctx->dev; + u32 end_addr, wr_ptr; + + /* Handle missing BBC overflow interrupt via timeout */ + end_addr = coda_read(dev, CODA9_REG_JPEG_BBC_END_ADDR); + wr_ptr = coda_read(dev, CODA9_REG_JPEG_BBC_WR_PTR); + if (wr_ptr >= end_addr - 256) { + v4l2_err(&dev->v4l2_dev, "JPEG too large for capture buffer\n"); + coda9_jpeg_finish_encode(ctx); + return; + } + + coda_hw_reset(ctx); +} + static void coda9_jpeg_release(struct coda_ctx *ctx) { int i; @@ -1276,6 +1294,7 @@ const struct coda_context_ops coda9_jpeg_encode_ops = { .start_streaming = coda9_jpeg_start_encoding, .prepare_run = coda9_jpeg_prepare_encode, .finish_run = coda9_jpeg_finish_encode, + .run_timeout = coda9_jpeg_encode_timeout, .release = coda9_jpeg_release, }; diff --git a/drivers/media/platform/coda/imx-vdoa.c b/drivers/media/platform/coda/imx-vdoa.c index 8bc0d83718193f3028693cd566ab69133a3f274d..dd6e2e320264ef3eae9b70ea25691b6fb5c0396f 100644 --- a/drivers/media/platform/coda/imx-vdoa.c +++ b/drivers/media/platform/coda/imx-vdoa.c @@ -287,7 +287,11 @@ static int vdoa_probe(struct platform_device *pdev) struct resource *res; int ret; - dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); + ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); + if (ret) { + dev_err(&pdev->dev, "DMA enable failed\n"); + return ret; + } vdoa = devm_kzalloc(&pdev->dev, sizeof(*vdoa), GFP_KERNEL); if (!vdoa) diff --git a/drivers/media/platform/davinci/vpif.c b/drivers/media/platform/davinci/vpif.c index 5e67994e62ccac50a0205f06d520e3be469174a6..ee610daf90a3c9a93805cd7be44d4996d87e6cdf 100644 --- a/drivers/media/platform/davinci/vpif.c +++ b/drivers/media/platform/davinci/vpif.c @@ -428,6 +428,7 @@ static int vpif_probe(struct platform_device *pdev) static struct resource *res, *res_irq; struct platform_device *pdev_capture, *pdev_display; struct device_node *endpoint = NULL; + int ret; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); vpif_base = devm_ioremap_resource(&pdev->dev, res); @@ -458,8 +459,8 @@ static int vpif_probe(struct platform_device *pdev) res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!res_irq) { dev_warn(&pdev->dev, "Missing IRQ resource.\n"); - pm_runtime_put(&pdev->dev); - return -EINVAL; + ret = -EINVAL; + goto err_put_rpm; } pdev_capture = devm_kzalloc(&pdev->dev, sizeof(*pdev_capture), @@ -493,10 +494,17 @@ static int vpif_probe(struct platform_device *pdev) } return 0; + +err_put_rpm: + pm_runtime_put(&pdev->dev); + pm_runtime_disable(&pdev->dev); + + return ret; } static int vpif_remove(struct platform_device *pdev) { + pm_runtime_put(&pdev->dev); pm_runtime_disable(&pdev->dev); return 0; } diff --git a/drivers/media/platform/imx-pxp.c b/drivers/media/platform/imx-pxp.c index 08d76eb05ed1a446d9827c13aa4a20d999fbb353..62356adebc39ed15c6855c92729ca102937ad943 100644 --- a/drivers/media/platform/imx-pxp.c +++ b/drivers/media/platform/imx-pxp.c @@ -1664,6 +1664,8 @@ static int pxp_probe(struct platform_device *pdev) if (irq < 0) return irq; + spin_lock_init(&dev->irqlock); + ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, pxp_irq_handler, IRQF_ONESHOT, dev_name(&pdev->dev), dev); if (ret < 0) { @@ -1681,8 +1683,6 @@ static int pxp_probe(struct platform_device *pdev) goto err_clk; } - spin_lock_init(&dev->irqlock); - ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev); if (ret) goto err_clk; diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c index 219c2c5b78efc3c1f7520d1989ffaa3f7d05cbed..5f93bc670edb20e56eef04967e881c07666f7d9d 100644 --- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c +++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c @@ -237,11 +237,11 @@ static int fops_vcodec_release(struct file *file) mtk_v4l2_debug(1, "[%d] encoder", ctx->id); mutex_lock(&dev->dev_mutex); + v4l2_m2m_ctx_release(ctx->m2m_ctx); mtk_vcodec_enc_release(ctx); v4l2_fh_del(&ctx->fh); v4l2_fh_exit(&ctx->fh); v4l2_ctrl_handler_free(&ctx->ctrl_hdl); - v4l2_m2m_ctx_release(ctx->m2m_ctx); list_del_init(&ctx->list); kfree(ctx); diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_fw_vpu.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_fw_vpu.c index cd27f637dbe7c8690baf3b7d0cd9790839118d61..cfc7ebed8fb7ab7a828ff1357b5cc5fa04afbe2a 100644 --- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_fw_vpu.c +++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_fw_vpu.c @@ -102,6 +102,8 @@ struct mtk_vcodec_fw *mtk_vcodec_fw_vpu_init(struct mtk_vcodec_dev *dev, vpu_wdt_reg_handler(fw_pdev, mtk_vcodec_vpu_reset_handler, dev, rst_id); fw = devm_kzalloc(&dev->plat_dev->dev, sizeof(*fw), GFP_KERNEL); + if (!fw) + return ERR_PTR(-ENOMEM); fw->type = VPU; fw->ops = &mtk_vcodec_vpu_msg; fw->pdev = fw_pdev; diff --git a/drivers/media/platform/qcom/venus/core.c b/drivers/media/platform/qcom/venus/core.c index 58ddebbb84468a08363da2f45e72bfa03abe280f..62d11c6e41d60d2b18b068260160d2e4afc51189 100644 --- a/drivers/media/platform/qcom/venus/core.c +++ b/drivers/media/platform/qcom/venus/core.c @@ -222,7 +222,6 @@ static int venus_probe(struct platform_device *pdev) return -ENOMEM; core->dev = dev; - platform_set_drvdata(pdev, core); r = platform_get_resource(pdev, IORESOURCE_MEM, 0); core->base = devm_ioremap_resource(dev, r); @@ -252,7 +251,7 @@ static int venus_probe(struct platform_device *pdev) return -ENODEV; if (core->pm_ops->core_get) { - ret = core->pm_ops->core_get(dev); + ret = core->pm_ops->core_get(core); if (ret) return ret; } @@ -277,6 +276,12 @@ static int venus_probe(struct platform_device *pdev) if (ret) goto err_core_put; + ret = v4l2_device_register(dev, &core->v4l2_dev); + if (ret) + goto err_core_deinit; + + platform_set_drvdata(pdev, core); + pm_runtime_enable(dev); ret = pm_runtime_get_sync(dev); @@ -289,11 +294,11 @@ static int venus_probe(struct platform_device *pdev) ret = venus_firmware_init(core); if (ret) - goto err_runtime_disable; + goto err_of_depopulate; ret = venus_boot(core); if (ret) - goto err_runtime_disable; + goto err_firmware_deinit; ret = hfi_core_resume(core, true); if (ret) @@ -311,10 +316,6 @@ static int venus_probe(struct platform_device *pdev) if (ret) goto err_venus_shutdown; - ret = v4l2_device_register(dev, &core->v4l2_dev); - if (ret) - goto err_core_deinit; - ret = pm_runtime_put_sync(dev); if (ret) { pm_runtime_get_noresume(dev); @@ -327,18 +328,22 @@ static int venus_probe(struct platform_device *pdev) err_dev_unregister: v4l2_device_unregister(&core->v4l2_dev); -err_core_deinit: - hfi_core_deinit(core, false); err_venus_shutdown: venus_shutdown(core); +err_firmware_deinit: + venus_firmware_deinit(core); +err_of_depopulate: + of_platform_depopulate(dev); err_runtime_disable: pm_runtime_put_noidle(dev); pm_runtime_set_suspended(dev); pm_runtime_disable(dev); hfi_destroy(core); +err_core_deinit: + hfi_core_deinit(core, false); err_core_put: if (core->pm_ops->core_put) - core->pm_ops->core_put(dev); + core->pm_ops->core_put(core); return ret; } @@ -364,11 +369,12 @@ static int venus_remove(struct platform_device *pdev) pm_runtime_disable(dev); if (pm_ops->core_put) - pm_ops->core_put(dev); + pm_ops->core_put(core); + + v4l2_device_unregister(&core->v4l2_dev); hfi_destroy(core); - v4l2_device_unregister(&core->v4l2_dev); mutex_destroy(&core->pm_lock); mutex_destroy(&core->lock); venus_dbgfs_deinit(core); @@ -387,7 +393,7 @@ static __maybe_unused int venus_runtime_suspend(struct device *dev) return ret; if (pm_ops->core_power) { - ret = pm_ops->core_power(dev, POWER_OFF); + ret = pm_ops->core_power(core, POWER_OFF); if (ret) return ret; } @@ -405,7 +411,8 @@ static __maybe_unused int venus_runtime_suspend(struct device *dev) err_video_path: icc_set_bw(core->cpucfg_path, kbps_to_icc(1000), 0); err_cpucfg_path: - pm_ops->core_power(dev, POWER_ON); + if (pm_ops->core_power) + pm_ops->core_power(core, POWER_ON); return ret; } @@ -425,7 +432,7 @@ static __maybe_unused int venus_runtime_resume(struct device *dev) return ret; if (pm_ops->core_power) { - ret = pm_ops->core_power(dev, POWER_ON); + ret = pm_ops->core_power(core, POWER_ON); if (ret) return ret; } diff --git a/drivers/media/platform/qcom/venus/core.h b/drivers/media/platform/qcom/venus/core.h index 05c9fbd51f0c057586afbe588541c4a93c9e3487..f2a0ef9ee884e488ea7ac3eb6418e1d73b408a82 100644 --- a/drivers/media/platform/qcom/venus/core.h +++ b/drivers/media/platform/qcom/venus/core.h @@ -123,7 +123,6 @@ struct venus_caps { * @clks: an array of struct clk pointers * @vcodec0_clks: an array of vcodec0 struct clk pointers * @vcodec1_clks: an array of vcodec1 struct clk pointers - * @pd_dl_venus: pmdomain device-link for venus domain * @pmdomains: an array of pmdomains struct device pointers * @vdev_dec: a reference to video device structure for decoder instances * @vdev_enc: a reference to video device structure for encoder instances @@ -161,7 +160,6 @@ struct venus_core { struct icc_path *cpucfg_path; struct opp_table *opp_table; bool has_opp_table; - struct device_link *pd_dl_venus; struct device *pmdomains[VIDC_PMDOMAINS_NUM_MAX]; struct device_link *opp_dl_venus; struct device *opp_pmdomain; diff --git a/drivers/media/platform/qcom/venus/pm_helpers.c b/drivers/media/platform/qcom/venus/pm_helpers.c index 2946547a0df4a445cfd105847849bf076584f8d2..710f9a2b132b07ca89ca33cc983854be37a9f811 100644 --- a/drivers/media/platform/qcom/venus/pm_helpers.c +++ b/drivers/media/platform/qcom/venus/pm_helpers.c @@ -147,14 +147,12 @@ static u32 load_per_type(struct venus_core *core, u32 session_type) struct venus_inst *inst = NULL; u32 mbs_per_sec = 0; - mutex_lock(&core->lock); list_for_each_entry(inst, &core->instances, list) { if (inst->session_type != session_type) continue; mbs_per_sec += load_per_instance(inst); } - mutex_unlock(&core->lock); return mbs_per_sec; } @@ -203,14 +201,12 @@ static int load_scale_bw(struct venus_core *core) struct venus_inst *inst = NULL; u32 mbs_per_sec, avg, peak, total_avg = 0, total_peak = 0; - mutex_lock(&core->lock); list_for_each_entry(inst, &core->instances, list) { mbs_per_sec = load_per_instance(inst); mbs_to_bw(inst, mbs_per_sec, &avg, &peak); total_avg += avg; total_peak += peak; } - mutex_unlock(&core->lock); /* * keep minimum bandwidth vote for "video-mem" path, @@ -237,8 +233,9 @@ static int load_scale_v1(struct venus_inst *inst) struct device *dev = core->dev; u32 mbs_per_sec; unsigned int i; - int ret; + int ret = 0; + mutex_lock(&core->lock); mbs_per_sec = load_per_type(core, VIDC_SESSION_TYPE_ENC) + load_per_type(core, VIDC_SESSION_TYPE_DEC); @@ -263,29 +260,28 @@ static int load_scale_v1(struct venus_inst *inst) if (ret) { dev_err(dev, "failed to set clock rate %lu (%d)\n", freq, ret); - return ret; + goto exit; } ret = load_scale_bw(core); if (ret) { dev_err(dev, "failed to set bandwidth (%d)\n", ret); - return ret; + goto exit; } - return 0; +exit: + mutex_unlock(&core->lock); + return ret; } -static int core_get_v1(struct device *dev) +static int core_get_v1(struct venus_core *core) { - struct venus_core *core = dev_get_drvdata(dev); - return core_clks_get(core); } -static int core_power_v1(struct device *dev, int on) +static int core_power_v1(struct venus_core *core, int on) { - struct venus_core *core = dev_get_drvdata(dev); int ret = 0; if (on == POWER_ON) @@ -752,12 +748,12 @@ static int venc_power_v4(struct device *dev, int on) return ret; } -static int vcodec_domains_get(struct device *dev) +static int vcodec_domains_get(struct venus_core *core) { int ret; struct opp_table *opp_table; struct device **opp_virt_dev; - struct venus_core *core = dev_get_drvdata(dev); + struct device *dev = core->dev; const struct venus_resources *res = core->res; struct device *pd; unsigned int i; @@ -773,13 +769,6 @@ static int vcodec_domains_get(struct device *dev) core->pmdomains[i] = pd; } - core->pd_dl_venus = device_link_add(dev, core->pmdomains[0], - DL_FLAG_PM_RUNTIME | - DL_FLAG_STATELESS | - DL_FLAG_RPM_ACTIVE); - if (!core->pd_dl_venus) - return -ENODEV; - skip_pmdomains: if (!core->has_opp_table) return 0; @@ -806,29 +795,23 @@ static int vcodec_domains_get(struct device *dev) opp_dl_add_err: dev_pm_opp_detach_genpd(core->opp_table); opp_attach_err: - if (core->pd_dl_venus) { - device_link_del(core->pd_dl_venus); - for (i = 0; i < res->vcodec_pmdomains_num; i++) { - if (IS_ERR_OR_NULL(core->pmdomains[i])) - continue; - dev_pm_domain_detach(core->pmdomains[i], true); - } + for (i = 0; i < res->vcodec_pmdomains_num; i++) { + if (IS_ERR_OR_NULL(core->pmdomains[i])) + continue; + dev_pm_domain_detach(core->pmdomains[i], true); } + return ret; } -static void vcodec_domains_put(struct device *dev) +static void vcodec_domains_put(struct venus_core *core) { - struct venus_core *core = dev_get_drvdata(dev); const struct venus_resources *res = core->res; unsigned int i; if (!res->vcodec_pmdomains_num) goto skip_pmdomains; - if (core->pd_dl_venus) - device_link_del(core->pd_dl_venus); - for (i = 0; i < res->vcodec_pmdomains_num; i++) { if (IS_ERR_OR_NULL(core->pmdomains[i])) continue; @@ -845,9 +828,9 @@ static void vcodec_domains_put(struct device *dev) dev_pm_opp_detach_genpd(core->opp_table); } -static int core_get_v4(struct device *dev) +static int core_get_v4(struct venus_core *core) { - struct venus_core *core = dev_get_drvdata(dev); + struct device *dev = core->dev; const struct venus_resources *res = core->res; int ret; @@ -886,7 +869,7 @@ static int core_get_v4(struct device *dev) } } - ret = vcodec_domains_get(dev); + ret = vcodec_domains_get(core); if (ret) { if (core->has_opp_table) dev_pm_opp_of_remove_table(dev); @@ -897,14 +880,14 @@ static int core_get_v4(struct device *dev) return 0; } -static void core_put_v4(struct device *dev) +static void core_put_v4(struct venus_core *core) { - struct venus_core *core = dev_get_drvdata(dev); + struct device *dev = core->dev; if (legacy_binding) return; - vcodec_domains_put(dev); + vcodec_domains_put(core); if (core->has_opp_table) dev_pm_opp_of_remove_table(dev); @@ -913,19 +896,33 @@ static void core_put_v4(struct device *dev) } -static int core_power_v4(struct device *dev, int on) +static int core_power_v4(struct venus_core *core, int on) { - struct venus_core *core = dev_get_drvdata(dev); + struct device *dev = core->dev; + struct device *pmctrl = core->pmdomains[0]; int ret = 0; if (on == POWER_ON) { + if (pmctrl) { + ret = pm_runtime_get_sync(pmctrl); + if (ret < 0) { + pm_runtime_put_noidle(pmctrl); + return ret; + } + } + ret = core_clks_enable(core); + if (ret < 0 && pmctrl) + pm_runtime_put_sync(pmctrl); } else { /* Drop the performance state vote */ if (core->opp_pmdomain) dev_pm_opp_set_rate(dev, 0); core_clks_disable(core); + + if (pmctrl) + pm_runtime_put_sync(pmctrl); } return ret; @@ -962,13 +959,13 @@ static int load_scale_v4(struct venus_inst *inst) struct device *dev = core->dev; unsigned long freq = 0, freq_core1 = 0, freq_core2 = 0; unsigned long filled_len = 0; - int i, ret; + int i, ret = 0; for (i = 0; i < inst->num_input_bufs; i++) filled_len = max(filled_len, inst->payloads[i]); if (inst->session_type == VIDC_SESSION_TYPE_DEC && !filled_len) - return 0; + return ret; freq = calculate_inst_freq(inst, filled_len); inst->clk_data.freq = freq; @@ -984,7 +981,6 @@ static int load_scale_v4(struct venus_inst *inst) freq_core2 += inst->clk_data.freq; } } - mutex_unlock(&core->lock); freq = max(freq_core1, freq_core2); @@ -1008,17 +1004,19 @@ static int load_scale_v4(struct venus_inst *inst) if (ret) { dev_err(dev, "failed to set clock rate %lu (%d)\n", freq, ret); - return ret; + goto exit; } ret = load_scale_bw(core); if (ret) { dev_err(dev, "failed to set bandwidth (%d)\n", ret); - return ret; + goto exit; } - return 0; +exit: + mutex_unlock(&core->lock); + return ret; } static const struct venus_pm_ops pm_ops_v4 = { diff --git a/drivers/media/platform/qcom/venus/pm_helpers.h b/drivers/media/platform/qcom/venus/pm_helpers.h index aa2f6afa2354432fc0f58b6735f45356214f3579..a492c50c5543c5036a13cef7a72891b788b7e1c9 100644 --- a/drivers/media/platform/qcom/venus/pm_helpers.h +++ b/drivers/media/platform/qcom/venus/pm_helpers.h @@ -4,14 +4,15 @@ #define __VENUS_PM_HELPERS_H__ struct device; +struct venus_core; #define POWER_ON 1 #define POWER_OFF 0 struct venus_pm_ops { - int (*core_get)(struct device *dev); - void (*core_put)(struct device *dev); - int (*core_power)(struct device *dev, int on); + int (*core_get)(struct venus_core *core); + void (*core_put)(struct venus_core *core); + int (*core_power)(struct venus_core *core, int on); int (*vdec_get)(struct device *dev); void (*vdec_put)(struct device *dev); diff --git a/drivers/media/platform/rcar-vin/rcar-csi2.c b/drivers/media/platform/rcar-vin/rcar-csi2.c index d2d87a204e9185af52430fd09d634c144a5ff6d2..5e8e48a721a049c9faabaa8c3e4d6770f99e6eaa 100644 --- a/drivers/media/platform/rcar-vin/rcar-csi2.c +++ b/drivers/media/platform/rcar-vin/rcar-csi2.c @@ -436,16 +436,23 @@ static int rcsi2_wait_phy_start(struct rcar_csi2 *priv, static int rcsi2_set_phypll(struct rcar_csi2 *priv, unsigned int mbps) { const struct rcsi2_mbps_reg *hsfreq; + const struct rcsi2_mbps_reg *hsfreq_prev = NULL; - for (hsfreq = priv->info->hsfreqrange; hsfreq->mbps != 0; hsfreq++) + for (hsfreq = priv->info->hsfreqrange; hsfreq->mbps != 0; hsfreq++) { if (hsfreq->mbps >= mbps) break; + hsfreq_prev = hsfreq; + } if (!hsfreq->mbps) { dev_err(priv->dev, "Unsupported PHY speed (%u Mbps)", mbps); return -ERANGE; } + if (hsfreq_prev && + ((mbps - hsfreq_prev->mbps) <= (hsfreq->mbps - mbps))) + hsfreq = hsfreq_prev; + rcsi2_write(priv, PHYPLL_REG, PHYPLL_HSFREQRANGE(hsfreq->reg)); return 0; @@ -969,10 +976,17 @@ static int rcsi2_phtw_write_mbps(struct rcar_csi2 *priv, unsigned int mbps, const struct rcsi2_mbps_reg *values, u16 code) { const struct rcsi2_mbps_reg *value; + const struct rcsi2_mbps_reg *prev_value = NULL; - for (value = values; value->mbps; value++) + for (value = values; value->mbps; value++) { if (value->mbps >= mbps) break; + prev_value = value; + } + + if (prev_value && + ((mbps - prev_value->mbps) <= (value->mbps - mbps))) + value = prev_value; if (!value->mbps) { dev_err(priv->dev, "Unsupported PHY speed (%u Mbps)", mbps); diff --git a/drivers/media/platform/rcar-vin/rcar-v4l2.c b/drivers/media/platform/rcar-vin/rcar-v4l2.c index 3e7a3ae2a6b97045bdf9d653e5560802e02f8422..0bbe6f9f92062fdbd5bfe3fa43533a03959f1002 100644 --- a/drivers/media/platform/rcar-vin/rcar-v4l2.c +++ b/drivers/media/platform/rcar-vin/rcar-v4l2.c @@ -175,20 +175,27 @@ static void rvin_format_align(struct rvin_dev *vin, struct v4l2_pix_format *pix) break; } - /* HW limit width to a multiple of 32 (2^5) for NV12/16 else 2 (2^1) */ + /* Hardware limits width alignment based on format. */ switch (pix->pixelformat) { + /* Multiple of 32 (2^5) for NV12/16. */ case V4L2_PIX_FMT_NV12: case V4L2_PIX_FMT_NV16: walign = 5; break; - default: + /* Multiple of 2 (2^1) for YUV. */ + case V4L2_PIX_FMT_YUYV: + case V4L2_PIX_FMT_UYVY: walign = 1; break; + /* No multiple for RGB. */ + default: + walign = 0; + break; } /* Limit to VIN capabilities */ - v4l_bound_align_image(&pix->width, 2, vin->info->max_width, walign, - &pix->height, 4, vin->info->max_height, 2, 0); + v4l_bound_align_image(&pix->width, 5, vin->info->max_width, walign, + &pix->height, 2, vin->info->max_height, 0, 0); pix->bytesperline = rvin_format_bytesperline(vin, pix); pix->sizeimage = rvin_format_sizeimage(pix); diff --git a/drivers/media/platform/rockchip/rga/rga.c b/drivers/media/platform/rockchip/rga/rga.c index 6759091b15e091834f00385f3caa8c7e7dd0d4dc..d99ea8973b6788353d164c197efb00d82014f04d 100644 --- a/drivers/media/platform/rockchip/rga/rga.c +++ b/drivers/media/platform/rockchip/rga/rga.c @@ -895,7 +895,7 @@ static int rga_probe(struct platform_device *pdev) } rga->dst_mmu_pages = (unsigned int *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 3); - if (rga->dst_mmu_pages) { + if (!rga->dst_mmu_pages) { ret = -ENOMEM; goto free_src_pages; } diff --git a/drivers/media/radio/si470x/radio-si470x-i2c.c b/drivers/media/radio/si470x/radio-si470x-i2c.c index a972c0705ac79c9d90aa051610766d6b7becd872..76d39e2e877068e0a8fbe65c42f8de585cc95d9f 100644 --- a/drivers/media/radio/si470x/radio-si470x-i2c.c +++ b/drivers/media/radio/si470x/radio-si470x-i2c.c @@ -368,7 +368,7 @@ static int si470x_i2c_probe(struct i2c_client *client) if (radio->hdl.error) { retval = radio->hdl.error; dev_err(&client->dev, "couldn't register control\n"); - goto err_dev; + goto err_all; } /* video device initialization */ @@ -463,7 +463,6 @@ static int si470x_i2c_probe(struct i2c_client *client) return 0; err_all: v4l2_ctrl_handler_free(&radio->hdl); -err_dev: v4l2_device_unregister(&radio->v4l2_dev); err_initial: return retval; diff --git a/drivers/media/rc/gpio-ir-tx.c b/drivers/media/rc/gpio-ir-tx.c index c6cd2e6d8e654d8cb5182f1bb7fc3436d7cdecf5..a50701cfbbd7b37c5462347c9e74926091f1fdee 100644 --- a/drivers/media/rc/gpio-ir-tx.c +++ b/drivers/media/rc/gpio-ir-tx.c @@ -48,11 +48,29 @@ static int gpio_ir_tx_set_carrier(struct rc_dev *dev, u32 carrier) return 0; } +static void delay_until(ktime_t until) +{ + /* + * delta should never exceed 0.5 seconds (IR_MAX_DURATION) and on + * m68k ndelay(s64) does not compile; so use s32 rather than s64. + */ + s32 delta; + + while (true) { + delta = ktime_us_delta(until, ktime_get()); + if (delta <= 0) + return; + + /* udelay more than 1ms may not work */ + delta = min(delta, 1000); + udelay(delta); + } +} + static void gpio_ir_tx_unmodulated(struct gpio_ir *gpio_ir, uint *txbuf, uint count) { ktime_t edge; - s32 delta; int i; local_irq_disable(); @@ -63,9 +81,7 @@ static void gpio_ir_tx_unmodulated(struct gpio_ir *gpio_ir, uint *txbuf, gpiod_set_value(gpio_ir->gpio, !(i % 2)); edge = ktime_add_us(edge, txbuf[i]); - delta = ktime_us_delta(edge, ktime_get()); - if (delta > 0) - udelay(delta); + delay_until(edge); } gpiod_set_value(gpio_ir->gpio, 0); @@ -97,9 +113,7 @@ static void gpio_ir_tx_modulated(struct gpio_ir *gpio_ir, uint *txbuf, if (i % 2) { // space edge = ktime_add_us(edge, txbuf[i]); - delta = ktime_us_delta(edge, ktime_get()); - if (delta > 0) - udelay(delta); + delay_until(edge); } else { // pulse ktime_t last = ktime_add_us(edge, txbuf[i]); diff --git a/drivers/media/rc/igorplugusb.c b/drivers/media/rc/igorplugusb.c index effaa5751d6c98099cd6cb745ddbcb40411eb889..3e9988ee785f0cf4f6a3804c253a8defb53f3586 100644 --- a/drivers/media/rc/igorplugusb.c +++ b/drivers/media/rc/igorplugusb.c @@ -64,9 +64,11 @@ static void igorplugusb_irdata(struct igorplugusb *ir, unsigned len) if (start >= len) { dev_err(ir->dev, "receive overflow invalid: %u", overflow); } else { - if (overflow > 0) + if (overflow > 0) { dev_warn(ir->dev, "receive overflow, at least %u lost", overflow); + ir_raw_event_reset(ir->rc); + } do { rawir.duration = ir->buf_in[i] * 85; diff --git a/drivers/media/rc/ir_toy.c b/drivers/media/rc/ir_toy.c index 1aa7989e756ccd967ca376ce0201911f3f5c4708..7f394277478b32ae0c461dfc3949249194775b90 100644 --- a/drivers/media/rc/ir_toy.c +++ b/drivers/media/rc/ir_toy.c @@ -429,7 +429,7 @@ static int irtoy_probe(struct usb_interface *intf, err = usb_submit_urb(irtoy->urb_in, GFP_KERNEL); if (err != 0) { dev_err(irtoy->dev, "fail to submit in urb: %d\n", err); - return err; + goto free_rcdev; } err = irtoy_setup(irtoy); diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c index 8870c4e6c5f445d1c67a00a3271d7787cfcdbf27..dbb5a4f44bda50631da1a72b95d2e3ca79e402ba 100644 --- a/drivers/media/rc/mceusb.c +++ b/drivers/media/rc/mceusb.c @@ -1430,7 +1430,7 @@ static void mceusb_gen1_init(struct mceusb_dev *ir) */ ret = usb_control_msg(ir->usbdev, usb_rcvctrlpipe(ir->usbdev, 0), USB_REQ_SET_ADDRESS, USB_TYPE_VENDOR, 0, 0, - data, USB_CTRL_MSG_SZ, HZ * 3); + data, USB_CTRL_MSG_SZ, 3000); dev_dbg(dev, "set address - ret = %d", ret); dev_dbg(dev, "set address - data[0] = %d, data[1] = %d", data[0], data[1]); @@ -1438,20 +1438,20 @@ static void mceusb_gen1_init(struct mceusb_dev *ir) /* set feature: bit rate 38400 bps */ ret = usb_control_msg(ir->usbdev, usb_sndctrlpipe(ir->usbdev, 0), USB_REQ_SET_FEATURE, USB_TYPE_VENDOR, - 0xc04e, 0x0000, NULL, 0, HZ * 3); + 0xc04e, 0x0000, NULL, 0, 3000); dev_dbg(dev, "set feature - ret = %d", ret); /* bRequest 4: set char length to 8 bits */ ret = usb_control_msg(ir->usbdev, usb_sndctrlpipe(ir->usbdev, 0), 4, USB_TYPE_VENDOR, - 0x0808, 0x0000, NULL, 0, HZ * 3); + 0x0808, 0x0000, NULL, 0, 3000); dev_dbg(dev, "set char length - retB = %d", ret); /* bRequest 2: set handshaking to use DTR/DSR */ ret = usb_control_msg(ir->usbdev, usb_sndctrlpipe(ir->usbdev, 0), 2, USB_TYPE_VENDOR, - 0x0000, 0x0100, NULL, 0, HZ * 3); + 0x0000, 0x0100, NULL, 0, 3000); dev_dbg(dev, "set handshake - retC = %d", ret); /* device resume */ diff --git a/drivers/media/rc/redrat3.c b/drivers/media/rc/redrat3.c index 2cf3377ec63a7c0b8b61c17e0ce68d61ffecdae8..a61f9820ade951c943580c909d64380b32ec9779 100644 --- a/drivers/media/rc/redrat3.c +++ b/drivers/media/rc/redrat3.c @@ -404,7 +404,7 @@ static int redrat3_send_cmd(int cmd, struct redrat3_dev *rr3) udev = rr3->udev; res = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), cmd, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, - 0x0000, 0x0000, data, sizeof(u8), HZ * 10); + 0x0000, 0x0000, data, sizeof(u8), 10000); if (res < 0) { dev_err(rr3->dev, "%s: Error sending rr3 cmd res %d, data %d", @@ -480,7 +480,7 @@ static u32 redrat3_get_timeout(struct redrat3_dev *rr3) pipe = usb_rcvctrlpipe(rr3->udev, 0); ret = usb_control_msg(rr3->udev, pipe, RR3_GET_IR_PARAM, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, - RR3_IR_IO_SIG_TIMEOUT, 0, tmp, len, HZ * 5); + RR3_IR_IO_SIG_TIMEOUT, 0, tmp, len, 5000); if (ret != len) dev_warn(rr3->dev, "Failed to read timeout from hardware\n"); else { @@ -510,7 +510,7 @@ static int redrat3_set_timeout(struct rc_dev *rc_dev, unsigned int timeoutus) ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), RR3_SET_IR_PARAM, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT, RR3_IR_IO_SIG_TIMEOUT, 0, timeout, sizeof(*timeout), - HZ * 25); + 25000); dev_dbg(dev, "set ir parm timeout %d ret 0x%02x\n", be32_to_cpu(*timeout), ret); @@ -542,32 +542,32 @@ static void redrat3_reset(struct redrat3_dev *rr3) *val = 0x01; rc = usb_control_msg(udev, rxpipe, RR3_RESET, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, - RR3_CPUCS_REG_ADDR, 0, val, len, HZ * 25); + RR3_CPUCS_REG_ADDR, 0, val, len, 25000); dev_dbg(dev, "reset returned 0x%02x\n", rc); *val = length_fuzz; rc = usb_control_msg(udev, txpipe, RR3_SET_IR_PARAM, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT, - RR3_IR_IO_LENGTH_FUZZ, 0, val, len, HZ * 25); + RR3_IR_IO_LENGTH_FUZZ, 0, val, len, 25000); dev_dbg(dev, "set ir parm len fuzz %d rc 0x%02x\n", *val, rc); *val = (65536 - (minimum_pause * 2000)) / 256; rc = usb_control_msg(udev, txpipe, RR3_SET_IR_PARAM, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT, - RR3_IR_IO_MIN_PAUSE, 0, val, len, HZ * 25); + RR3_IR_IO_MIN_PAUSE, 0, val, len, 25000); dev_dbg(dev, "set ir parm min pause %d rc 0x%02x\n", *val, rc); *val = periods_measure_carrier; rc = usb_control_msg(udev, txpipe, RR3_SET_IR_PARAM, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT, - RR3_IR_IO_PERIODS_MF, 0, val, len, HZ * 25); + RR3_IR_IO_PERIODS_MF, 0, val, len, 25000); dev_dbg(dev, "set ir parm periods measure carrier %d rc 0x%02x", *val, rc); *val = RR3_DRIVER_MAXLENS; rc = usb_control_msg(udev, txpipe, RR3_SET_IR_PARAM, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT, - RR3_IR_IO_MAX_LENGTHS, 0, val, len, HZ * 25); + RR3_IR_IO_MAX_LENGTHS, 0, val, len, 25000); dev_dbg(dev, "set ir parm max lens %d rc 0x%02x\n", *val, rc); kfree(val); @@ -585,7 +585,7 @@ static void redrat3_get_firmware_rev(struct redrat3_dev *rr3) rc = usb_control_msg(rr3->udev, usb_rcvctrlpipe(rr3->udev, 0), RR3_FW_VERSION, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, - 0, 0, buffer, RR3_FW_VERSION_LEN, HZ * 5); + 0, 0, buffer, RR3_FW_VERSION_LEN, 5000); if (rc >= 0) dev_info(rr3->dev, "Firmware rev: %s", buffer); @@ -825,14 +825,14 @@ static int redrat3_transmit_ir(struct rc_dev *rcdev, unsigned *txbuf, pipe = usb_sndbulkpipe(rr3->udev, rr3->ep_out->bEndpointAddress); ret = usb_bulk_msg(rr3->udev, pipe, irdata, - sendbuf_len, &ret_len, 10 * HZ); + sendbuf_len, &ret_len, 10000); dev_dbg(dev, "sent %d bytes, (ret %d)\n", ret_len, ret); /* now tell the hardware to transmit what we sent it */ pipe = usb_rcvctrlpipe(rr3->udev, 0); ret = usb_control_msg(rr3->udev, pipe, RR3_TX_SEND_SIGNAL, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, - 0, 0, irdata, 2, HZ * 10); + 0, 0, irdata, 2, 10000); if (ret < 0) dev_err(dev, "Error: control msg send failed, rc %d\n", ret); diff --git a/drivers/media/test-drivers/vidtv/vidtv_s302m.c b/drivers/media/test-drivers/vidtv/vidtv_s302m.c index d79b65854627cc18c60c5172430c40369e687c68..4676083cee3b8aa58f01845c9839d91505d24850 100644 --- a/drivers/media/test-drivers/vidtv/vidtv_s302m.c +++ b/drivers/media/test-drivers/vidtv/vidtv_s302m.c @@ -455,6 +455,9 @@ struct vidtv_encoder e->name = kstrdup(args.name, GFP_KERNEL); e->encoder_buf = vzalloc(VIDTV_S302M_BUF_SZ); + if (!e->encoder_buf) + goto out_kfree_e; + e->encoder_buf_sz = VIDTV_S302M_BUF_SZ; e->encoder_buf_offset = 0; @@ -467,10 +470,8 @@ struct vidtv_encoder e->is_video_encoder = false; ctx = kzalloc(priv_sz, GFP_KERNEL); - if (!ctx) { - kfree(e); - return NULL; - } + if (!ctx) + goto out_kfree_buf; e->ctx = ctx; ctx->last_duration = 0; @@ -498,6 +499,14 @@ struct vidtv_encoder e->next = NULL; return e; + +out_kfree_buf: + kfree(e->encoder_buf); + +out_kfree_e: + kfree(e->name); + kfree(e); + return NULL; } void vidtv_s302m_encoder_destroy(struct vidtv_encoder *e) diff --git a/drivers/media/tuners/msi001.c b/drivers/media/tuners/msi001.c index 78e6fd600d8ef77ec60fd1e55aa96feaaf90827f..44247049a31903f5cbb9159d9be8ae934d5fb9ae 100644 --- a/drivers/media/tuners/msi001.c +++ b/drivers/media/tuners/msi001.c @@ -442,6 +442,13 @@ static int msi001_probe(struct spi_device *spi) V4L2_CID_RF_TUNER_BANDWIDTH_AUTO, 0, 1, 1, 1); dev->bandwidth = v4l2_ctrl_new_std(&dev->hdl, &msi001_ctrl_ops, V4L2_CID_RF_TUNER_BANDWIDTH, 200000, 8000000, 1, 200000); + if (dev->hdl.error) { + ret = dev->hdl.error; + dev_err(&spi->dev, "Could not initialize controls\n"); + /* control init failed, free handler */ + goto err_ctrl_handler_free; + } + v4l2_ctrl_auto_cluster(2, &dev->bandwidth_auto, 0, false); dev->lna_gain = v4l2_ctrl_new_std(&dev->hdl, &msi001_ctrl_ops, V4L2_CID_RF_TUNER_LNA_GAIN, 0, 1, 1, 1); diff --git a/drivers/media/tuners/si2157.c b/drivers/media/tuners/si2157.c index fefb2625f6558e340488586395607781f2becf2c..75ddf7ed1faff25f255d45b58ef5ee1e68f59109 100644 --- a/drivers/media/tuners/si2157.c +++ b/drivers/media/tuners/si2157.c @@ -90,7 +90,7 @@ static int si2157_init(struct dvb_frontend *fe) dev_dbg(&client->dev, "\n"); /* Try to get Xtal trim property, to verify tuner still running */ - memcpy(cmd.args, "\x15\x00\x04\x02", 4); + memcpy(cmd.args, "\x15\x00\x02\x04", 4); cmd.wlen = 4; cmd.rlen = 4; ret = si2157_cmd_execute(client, &cmd); diff --git a/drivers/media/usb/b2c2/flexcop-usb.c b/drivers/media/usb/b2c2/flexcop-usb.c index e731243267e4904b8ceed06f004a0e62352e44cf..a2563c25408086d013c4a7a9306d913343aa1eda 100644 --- a/drivers/media/usb/b2c2/flexcop-usb.c +++ b/drivers/media/usb/b2c2/flexcop-usb.c @@ -87,7 +87,7 @@ static int flexcop_usb_readwrite_dw(struct flexcop_device *fc, u16 wRegOffsPCI, 0, fc_usb->data, sizeof(u32), - B2C2_WAIT_FOR_OPERATION_RDW * HZ); + B2C2_WAIT_FOR_OPERATION_RDW); if (ret != sizeof(u32)) { err("error while %s dword from %d (%d).", read ? "reading" : @@ -155,7 +155,7 @@ static int flexcop_usb_v8_memory_req(struct flexcop_usb *fc_usb, wIndex, fc_usb->data, buflen, - nWaitTime * HZ); + nWaitTime); if (ret != buflen) ret = -EIO; @@ -249,13 +249,13 @@ static int flexcop_usb_i2c_req(struct flexcop_i2c_adapter *i2c, /* DKT 020208 - add this to support special case of DiSEqC */ case USB_FUNC_I2C_CHECKWRITE: pipe = B2C2_USB_CTRL_PIPE_OUT; - nWaitTime = 2; + nWaitTime = 2000; request_type |= USB_DIR_OUT; break; case USB_FUNC_I2C_READ: case USB_FUNC_I2C_REPEATREAD: pipe = B2C2_USB_CTRL_PIPE_IN; - nWaitTime = 2; + nWaitTime = 2000; request_type |= USB_DIR_IN; break; default: @@ -282,7 +282,7 @@ static int flexcop_usb_i2c_req(struct flexcop_i2c_adapter *i2c, wIndex, fc_usb->data, buflen, - nWaitTime * HZ); + nWaitTime); if (ret != buflen) ret = -EIO; diff --git a/drivers/media/usb/b2c2/flexcop-usb.h b/drivers/media/usb/b2c2/flexcop-usb.h index 2f230bf72252be0d022e003e032784b0b5a9dce1..c7cca1a5ee59d0e420180a5d1c8325cf5351b21b 100644 --- a/drivers/media/usb/b2c2/flexcop-usb.h +++ b/drivers/media/usb/b2c2/flexcop-usb.h @@ -91,13 +91,13 @@ typedef enum { UTILITY_SRAM_TESTVERIFY = 0x16, } flexcop_usb_utility_function_t; -#define B2C2_WAIT_FOR_OPERATION_RW (1*HZ) -#define B2C2_WAIT_FOR_OPERATION_RDW (3*HZ) -#define B2C2_WAIT_FOR_OPERATION_WDW (1*HZ) +#define B2C2_WAIT_FOR_OPERATION_RW 1000 +#define B2C2_WAIT_FOR_OPERATION_RDW 3000 +#define B2C2_WAIT_FOR_OPERATION_WDW 1000 -#define B2C2_WAIT_FOR_OPERATION_V8READ (3*HZ) -#define B2C2_WAIT_FOR_OPERATION_V8WRITE (3*HZ) -#define B2C2_WAIT_FOR_OPERATION_V8FLASH (3*HZ) +#define B2C2_WAIT_FOR_OPERATION_V8READ 3000 +#define B2C2_WAIT_FOR_OPERATION_V8WRITE 3000 +#define B2C2_WAIT_FOR_OPERATION_V8FLASH 3000 typedef enum { V8_MEMORY_PAGE_DVB_CI = 0x20, diff --git a/drivers/media/usb/cpia2/cpia2_usb.c b/drivers/media/usb/cpia2/cpia2_usb.c index 76aac06f9fb8e2cb5b85ada5a77be10900e37dfc..cba03b286473858851310c678f047ffc772f6e8f 100644 --- a/drivers/media/usb/cpia2/cpia2_usb.c +++ b/drivers/media/usb/cpia2/cpia2_usb.c @@ -550,7 +550,7 @@ static int write_packet(struct usb_device *udev, 0, /* index */ buf, /* buffer */ size, - HZ); + 1000); kfree(buf); return ret; @@ -582,7 +582,7 @@ static int read_packet(struct usb_device *udev, 0, /* index */ buf, /* buffer */ size, - HZ); + 1000); if (ret >= 0) memcpy(registers, buf, size); diff --git a/drivers/media/usb/dvb-usb/dib0700_core.c b/drivers/media/usb/dvb-usb/dib0700_core.c index 70219b3e85666de2772509149396b85b4fa95a5c..7ea8f68b0f458418276bffd1c595cf8c2e094dc3 100644 --- a/drivers/media/usb/dvb-usb/dib0700_core.c +++ b/drivers/media/usb/dvb-usb/dib0700_core.c @@ -618,8 +618,6 @@ int dib0700_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff) deb_info("the endpoint number (%i) is not correct, use the adapter id instead", adap->fe_adap[0].stream.props.endpoint); if (onoff) st->channel_state |= 1 << (adap->id); - else - st->channel_state |= 1 << ~(adap->id); } else { if (onoff) st->channel_state |= 1 << (adap->fe_adap[0].stream.props.endpoint-2); diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c index a27a6844032528437561278a5816d0fe6513a4e7..aa929db56db1f4721799aae7b3da1afc9e95215a 100644 --- a/drivers/media/usb/dvb-usb/dw2102.c +++ b/drivers/media/usb/dvb-usb/dw2102.c @@ -2148,46 +2148,153 @@ static struct dvb_usb_device_properties s6x0_properties = { } }; -static const struct dvb_usb_device_description d1100 = { - "Prof 1100 USB ", - {&dw2102_table[PROF_1100], NULL}, - {NULL}, -}; +static struct dvb_usb_device_properties p1100_properties = { + .caps = DVB_USB_IS_AN_I2C_ADAPTER, + .usb_ctrl = DEVICE_SPECIFIC, + .size_of_priv = sizeof(struct dw2102_state), + .firmware = P1100_FIRMWARE, + .no_reconnect = 1, -static const struct dvb_usb_device_description d660 = { - "TeVii S660 USB", - {&dw2102_table[TEVII_S660], NULL}, - {NULL}, -}; + .i2c_algo = &s6x0_i2c_algo, + .rc.core = { + .rc_interval = 150, + .rc_codes = RC_MAP_TBS_NEC, + .module_name = "dw2102", + .allowed_protos = RC_PROTO_BIT_NEC, + .rc_query = prof_rc_query, + }, -static const struct dvb_usb_device_description d480_1 = { - "TeVii S480.1 USB", - {&dw2102_table[TEVII_S480_1], NULL}, - {NULL}, + .generic_bulk_ctrl_endpoint = 0x81, + .num_adapters = 1, + .download_firmware = dw2102_load_firmware, + .read_mac_address = s6x0_read_mac_address, + .adapter = { + { + .num_frontends = 1, + .fe = {{ + .frontend_attach = stv0288_frontend_attach, + .stream = { + .type = USB_BULK, + .count = 8, + .endpoint = 0x82, + .u = { + .bulk = { + .buffersize = 4096, + } + } + }, + } }, + } + }, + .num_device_descs = 1, + .devices = { + {"Prof 1100 USB ", + {&dw2102_table[PROF_1100], NULL}, + {NULL}, + }, + } }; -static const struct dvb_usb_device_description d480_2 = { - "TeVii S480.2 USB", - {&dw2102_table[TEVII_S480_2], NULL}, - {NULL}, -}; +static struct dvb_usb_device_properties s660_properties = { + .caps = DVB_USB_IS_AN_I2C_ADAPTER, + .usb_ctrl = DEVICE_SPECIFIC, + .size_of_priv = sizeof(struct dw2102_state), + .firmware = S660_FIRMWARE, + .no_reconnect = 1, -static const struct dvb_usb_device_description d7500 = { - "Prof 7500 USB DVB-S2", - {&dw2102_table[PROF_7500], NULL}, - {NULL}, -}; + .i2c_algo = &s6x0_i2c_algo, + .rc.core = { + .rc_interval = 150, + .rc_codes = RC_MAP_TEVII_NEC, + .module_name = "dw2102", + .allowed_protos = RC_PROTO_BIT_NEC, + .rc_query = dw2102_rc_query, + }, -static const struct dvb_usb_device_description d421 = { - "TeVii S421 PCI", - {&dw2102_table[TEVII_S421], NULL}, - {NULL}, + .generic_bulk_ctrl_endpoint = 0x81, + .num_adapters = 1, + .download_firmware = dw2102_load_firmware, + .read_mac_address = s6x0_read_mac_address, + .adapter = { + { + .num_frontends = 1, + .fe = {{ + .frontend_attach = ds3000_frontend_attach, + .stream = { + .type = USB_BULK, + .count = 8, + .endpoint = 0x82, + .u = { + .bulk = { + .buffersize = 4096, + } + } + }, + } }, + } + }, + .num_device_descs = 3, + .devices = { + {"TeVii S660 USB", + {&dw2102_table[TEVII_S660], NULL}, + {NULL}, + }, + {"TeVii S480.1 USB", + {&dw2102_table[TEVII_S480_1], NULL}, + {NULL}, + }, + {"TeVii S480.2 USB", + {&dw2102_table[TEVII_S480_2], NULL}, + {NULL}, + }, + } }; -static const struct dvb_usb_device_description d632 = { - "TeVii S632 USB", - {&dw2102_table[TEVII_S632], NULL}, - {NULL}, +static struct dvb_usb_device_properties p7500_properties = { + .caps = DVB_USB_IS_AN_I2C_ADAPTER, + .usb_ctrl = DEVICE_SPECIFIC, + .size_of_priv = sizeof(struct dw2102_state), + .firmware = P7500_FIRMWARE, + .no_reconnect = 1, + + .i2c_algo = &s6x0_i2c_algo, + .rc.core = { + .rc_interval = 150, + .rc_codes = RC_MAP_TBS_NEC, + .module_name = "dw2102", + .allowed_protos = RC_PROTO_BIT_NEC, + .rc_query = prof_rc_query, + }, + + .generic_bulk_ctrl_endpoint = 0x81, + .num_adapters = 1, + .download_firmware = dw2102_load_firmware, + .read_mac_address = s6x0_read_mac_address, + .adapter = { + { + .num_frontends = 1, + .fe = {{ + .frontend_attach = prof_7500_frontend_attach, + .stream = { + .type = USB_BULK, + .count = 8, + .endpoint = 0x82, + .u = { + .bulk = { + .buffersize = 4096, + } + } + }, + } }, + } + }, + .num_device_descs = 1, + .devices = { + {"Prof 7500 USB DVB-S2", + {&dw2102_table[PROF_7500], NULL}, + {NULL}, + }, + } }; static struct dvb_usb_device_properties su3000_properties = { @@ -2267,6 +2374,59 @@ static struct dvb_usb_device_properties su3000_properties = { } }; +static struct dvb_usb_device_properties s421_properties = { + .caps = DVB_USB_IS_AN_I2C_ADAPTER, + .usb_ctrl = DEVICE_SPECIFIC, + .size_of_priv = sizeof(struct dw2102_state), + .power_ctrl = su3000_power_ctrl, + .num_adapters = 1, + .identify_state = su3000_identify_state, + .i2c_algo = &su3000_i2c_algo, + + .rc.core = { + .rc_interval = 150, + .rc_codes = RC_MAP_SU3000, + .module_name = "dw2102", + .allowed_protos = RC_PROTO_BIT_RC5, + .rc_query = su3000_rc_query, + }, + + .read_mac_address = su3000_read_mac_address, + + .generic_bulk_ctrl_endpoint = 0x01, + + .adapter = { + { + .num_frontends = 1, + .fe = {{ + .streaming_ctrl = su3000_streaming_ctrl, + .frontend_attach = m88rs2000_frontend_attach, + .stream = { + .type = USB_BULK, + .count = 8, + .endpoint = 0x82, + .u = { + .bulk = { + .buffersize = 4096, + } + } + } + } }, + } + }, + .num_device_descs = 2, + .devices = { + { "TeVii S421 PCI", + { &dw2102_table[TEVII_S421], NULL }, + { NULL }, + }, + { "TeVii S632 USB", + { &dw2102_table[TEVII_S632], NULL }, + { NULL }, + }, + } +}; + static struct dvb_usb_device_properties t220_properties = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, .usb_ctrl = DEVICE_SPECIFIC, @@ -2384,101 +2544,33 @@ static struct dvb_usb_device_properties tt_s2_4600_properties = { static int dw2102_probe(struct usb_interface *intf, const struct usb_device_id *id) { - int retval = -ENOMEM; - struct dvb_usb_device_properties *p1100; - struct dvb_usb_device_properties *s660; - struct dvb_usb_device_properties *p7500; - struct dvb_usb_device_properties *s421; - - p1100 = kmemdup(&s6x0_properties, - sizeof(struct dvb_usb_device_properties), GFP_KERNEL); - if (!p1100) - goto err0; - - /* copy default structure */ - /* fill only different fields */ - p1100->firmware = P1100_FIRMWARE; - p1100->devices[0] = d1100; - p1100->rc.core.rc_query = prof_rc_query; - p1100->rc.core.rc_codes = RC_MAP_TBS_NEC; - p1100->adapter->fe[0].frontend_attach = stv0288_frontend_attach; - - s660 = kmemdup(&s6x0_properties, - sizeof(struct dvb_usb_device_properties), GFP_KERNEL); - if (!s660) - goto err1; - - s660->firmware = S660_FIRMWARE; - s660->num_device_descs = 3; - s660->devices[0] = d660; - s660->devices[1] = d480_1; - s660->devices[2] = d480_2; - s660->adapter->fe[0].frontend_attach = ds3000_frontend_attach; - - p7500 = kmemdup(&s6x0_properties, - sizeof(struct dvb_usb_device_properties), GFP_KERNEL); - if (!p7500) - goto err2; - - p7500->firmware = P7500_FIRMWARE; - p7500->devices[0] = d7500; - p7500->rc.core.rc_query = prof_rc_query; - p7500->rc.core.rc_codes = RC_MAP_TBS_NEC; - p7500->adapter->fe[0].frontend_attach = prof_7500_frontend_attach; - - - s421 = kmemdup(&su3000_properties, - sizeof(struct dvb_usb_device_properties), GFP_KERNEL); - if (!s421) - goto err3; - - s421->num_device_descs = 2; - s421->devices[0] = d421; - s421->devices[1] = d632; - s421->adapter->fe[0].frontend_attach = m88rs2000_frontend_attach; - - if (0 == dvb_usb_device_init(intf, &dw2102_properties, - THIS_MODULE, NULL, adapter_nr) || - 0 == dvb_usb_device_init(intf, &dw2104_properties, - THIS_MODULE, NULL, adapter_nr) || - 0 == dvb_usb_device_init(intf, &dw3101_properties, - THIS_MODULE, NULL, adapter_nr) || - 0 == dvb_usb_device_init(intf, &s6x0_properties, - THIS_MODULE, NULL, adapter_nr) || - 0 == dvb_usb_device_init(intf, p1100, - THIS_MODULE, NULL, adapter_nr) || - 0 == dvb_usb_device_init(intf, s660, - THIS_MODULE, NULL, adapter_nr) || - 0 == dvb_usb_device_init(intf, p7500, - THIS_MODULE, NULL, adapter_nr) || - 0 == dvb_usb_device_init(intf, s421, - THIS_MODULE, NULL, adapter_nr) || - 0 == dvb_usb_device_init(intf, &su3000_properties, - THIS_MODULE, NULL, adapter_nr) || - 0 == dvb_usb_device_init(intf, &t220_properties, - THIS_MODULE, NULL, adapter_nr) || - 0 == dvb_usb_device_init(intf, &tt_s2_4600_properties, - THIS_MODULE, NULL, adapter_nr)) { - - /* clean up copied properties */ - kfree(s421); - kfree(p7500); - kfree(s660); - kfree(p1100); + if (!(dvb_usb_device_init(intf, &dw2102_properties, + THIS_MODULE, NULL, adapter_nr) && + dvb_usb_device_init(intf, &dw2104_properties, + THIS_MODULE, NULL, adapter_nr) && + dvb_usb_device_init(intf, &dw3101_properties, + THIS_MODULE, NULL, adapter_nr) && + dvb_usb_device_init(intf, &s6x0_properties, + THIS_MODULE, NULL, adapter_nr) && + dvb_usb_device_init(intf, &p1100_properties, + THIS_MODULE, NULL, adapter_nr) && + dvb_usb_device_init(intf, &s660_properties, + THIS_MODULE, NULL, adapter_nr) && + dvb_usb_device_init(intf, &p7500_properties, + THIS_MODULE, NULL, adapter_nr) && + dvb_usb_device_init(intf, &s421_properties, + THIS_MODULE, NULL, adapter_nr) && + dvb_usb_device_init(intf, &su3000_properties, + THIS_MODULE, NULL, adapter_nr) && + dvb_usb_device_init(intf, &t220_properties, + THIS_MODULE, NULL, adapter_nr) && + dvb_usb_device_init(intf, &tt_s2_4600_properties, + THIS_MODULE, NULL, adapter_nr))) { return 0; } - retval = -ENODEV; - kfree(s421); -err3: - kfree(p7500); -err2: - kfree(s660); -err1: - kfree(p1100); -err0: - return retval; + return -ENODEV; } static void dw2102_disconnect(struct usb_interface *intf) diff --git a/drivers/media/usb/dvb-usb/m920x.c b/drivers/media/usb/dvb-usb/m920x.c index 4bb5b82599a790c615422fe360ce20f4bdf37a9d..691e05833db1959e27858445e7e3594f5b4df7fc 100644 --- a/drivers/media/usb/dvb-usb/m920x.c +++ b/drivers/media/usb/dvb-usb/m920x.c @@ -274,6 +274,13 @@ static int m920x_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], int nu /* Should check for ack here, if we knew how. */ } if (msg[i].flags & I2C_M_RD) { + char *read = kmalloc(1, GFP_KERNEL); + if (!read) { + ret = -ENOMEM; + kfree(read); + goto unlock; + } + for (j = 0; j < msg[i].len; j++) { /* Last byte of transaction? * Send STOP, otherwise send ACK. */ @@ -281,9 +288,12 @@ static int m920x_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], int nu if ((ret = m920x_read(d->udev, M9206_I2C, 0x0, 0x20 | stop, - &msg[i].buf[j], 1)) != 0) + read, 1)) != 0) goto unlock; + msg[i].buf[j] = read[0]; } + + kfree(read); } else { for (j = 0; j < msg[i].len; j++) { /* Last byte of transaction? Then send STOP. */ diff --git a/drivers/media/usb/em28xx/em28xx-cards.c b/drivers/media/usb/em28xx/em28xx-cards.c index cf45cc566cbe2e7a2a6fde6422fc0c134b392b09..26408a972b443e7d90751aa3ad8beb9362b51be1 100644 --- a/drivers/media/usb/em28xx/em28xx-cards.c +++ b/drivers/media/usb/em28xx/em28xx-cards.c @@ -3575,8 +3575,10 @@ static int em28xx_init_dev(struct em28xx *dev, struct usb_device *udev, if (dev->is_audio_only) { retval = em28xx_audio_setup(dev); - if (retval) - return -ENODEV; + if (retval) { + retval = -ENODEV; + goto err_deinit_media; + } em28xx_init_extension(dev); return 0; @@ -3595,7 +3597,7 @@ static int em28xx_init_dev(struct em28xx *dev, struct usb_device *udev, dev_err(&dev->intf->dev, "%s: em28xx_i2c_register bus 0 - error [%d]!\n", __func__, retval); - return retval; + goto err_deinit_media; } /* register i2c bus 1 */ @@ -3611,9 +3613,7 @@ static int em28xx_init_dev(struct em28xx *dev, struct usb_device *udev, "%s: em28xx_i2c_register bus 1 - error [%d]!\n", __func__, retval); - em28xx_i2c_unregister(dev, 0); - - return retval; + goto err_unreg_i2c; } } @@ -3621,6 +3621,12 @@ static int em28xx_init_dev(struct em28xx *dev, struct usb_device *udev, em28xx_card_setup(dev); return 0; + +err_unreg_i2c: + em28xx_i2c_unregister(dev, 0); +err_deinit_media: + em28xx_unregister_media_device(dev); + return retval; } static int em28xx_duplicate_dev(struct em28xx *dev) @@ -3875,6 +3881,8 @@ static int em28xx_usb_probe(struct usb_interface *intf, goto err_free; } + kref_init(&dev->ref); + dev->devno = nr; dev->model = id->driver_info; dev->alt = -1; @@ -3975,6 +3983,8 @@ static int em28xx_usb_probe(struct usb_interface *intf, } if (dev->board.has_dual_ts && em28xx_duplicate_dev(dev) == 0) { + kref_init(&dev->dev_next->ref); + dev->dev_next->ts = SECONDARY_TS; dev->dev_next->alt = -1; dev->dev_next->is_audio_only = has_vendor_audio && @@ -4029,12 +4039,8 @@ static int em28xx_usb_probe(struct usb_interface *intf, em28xx_write_reg(dev, 0x0b, 0x82); mdelay(100); } - - kref_init(&dev->dev_next->ref); } - kref_init(&dev->ref); - request_modules(dev); /* @@ -4089,11 +4095,8 @@ static void em28xx_usb_disconnect(struct usb_interface *intf) em28xx_close_extension(dev); - if (dev->dev_next) { - em28xx_close_extension(dev->dev_next); + if (dev->dev_next) em28xx_release_resources(dev->dev_next); - } - em28xx_release_resources(dev); if (dev->dev_next) { diff --git a/drivers/media/usb/em28xx/em28xx-core.c b/drivers/media/usb/em28xx/em28xx-core.c index af9216278024f7c4ddb13e22fc817210c1cfed80..308bc029099d9da5de834f76e8e7b1b8fc4831f5 100644 --- a/drivers/media/usb/em28xx/em28xx-core.c +++ b/drivers/media/usb/em28xx/em28xx-core.c @@ -89,7 +89,7 @@ int em28xx_read_reg_req_len(struct em28xx *dev, u8 req, u16 reg, mutex_lock(&dev->ctrl_urb_lock); ret = usb_control_msg(udev, pipe, req, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, - 0x0000, reg, dev->urb_buf, len, HZ); + 0x0000, reg, dev->urb_buf, len, 1000); if (ret < 0) { em28xx_regdbg("(pipe 0x%08x): IN: %02x %02x %02x %02x %02x %02x %02x %02x failed with error %i\n", pipe, @@ -158,7 +158,7 @@ int em28xx_write_regs_req(struct em28xx *dev, u8 req, u16 reg, char *buf, memcpy(dev->urb_buf, buf, len); ret = usb_control_msg(udev, pipe, req, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, - 0x0000, reg, dev->urb_buf, len, HZ); + 0x0000, reg, dev->urb_buf, len, 1000); mutex_unlock(&dev->ctrl_urb_lock); if (ret < 0) { diff --git a/drivers/media/usb/go7007/s2250-board.c b/drivers/media/usb/go7007/s2250-board.c index b9e45124673b6b72955ceb0003658e183e0ed5ee..2e5913bccb38f696cadd93ea5bd1c7b17862a7b1 100644 --- a/drivers/media/usb/go7007/s2250-board.c +++ b/drivers/media/usb/go7007/s2250-board.c @@ -504,6 +504,7 @@ static int s2250_probe(struct i2c_client *client, u8 *data; struct go7007 *go = i2c_get_adapdata(adapter); struct go7007_usb *usb = go->hpi_context; + int err = -EIO; audio = i2c_new_dummy_device(adapter, TLV320_ADDRESS >> 1); if (IS_ERR(audio)) @@ -532,11 +533,8 @@ static int s2250_probe(struct i2c_client *client, V4L2_CID_HUE, -512, 511, 1, 0); sd->ctrl_handler = &state->hdl; if (state->hdl.error) { - int err = state->hdl.error; - - v4l2_ctrl_handler_free(&state->hdl); - kfree(state); - return err; + err = state->hdl.error; + goto fail; } state->std = V4L2_STD_NTSC; @@ -600,7 +598,7 @@ static int s2250_probe(struct i2c_client *client, i2c_unregister_device(audio); v4l2_ctrl_handler_free(&state->hdl); kfree(state); - return -EIO; + return err; } static int s2250_remove(struct i2c_client *client) diff --git a/drivers/media/usb/hdpvr/hdpvr-video.c b/drivers/media/usb/hdpvr/hdpvr-video.c index 563128d117317f40eef5661aed370616e1ed026d..60e57e0f192725a8dae8500b911c8b2426dd94ae 100644 --- a/drivers/media/usb/hdpvr/hdpvr-video.c +++ b/drivers/media/usb/hdpvr/hdpvr-video.c @@ -308,7 +308,6 @@ static int hdpvr_start_streaming(struct hdpvr_device *dev) dev->status = STATUS_STREAMING; - INIT_WORK(&dev->worker, hdpvr_transmit_buffers); schedule_work(&dev->worker); v4l2_dbg(MSG_BUFFER, hdpvr_debug, &dev->v4l2_dev, @@ -1165,6 +1164,9 @@ int hdpvr_register_videodev(struct hdpvr_device *dev, struct device *parent, bool ac3 = dev->flags & HDPVR_FLAG_AC3_CAP; int res; + // initialize dev->worker + INIT_WORK(&dev->worker, hdpvr_transmit_buffers); + dev->cur_std = V4L2_STD_525_60; dev->width = 720; dev->height = 480; diff --git a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c index d38dee1792e41f21b2f21758fe393e92b5a061d4..3915d551d59e7b2f564a2d222d5ee9d21cf1028b 100644 --- a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c +++ b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c @@ -1467,7 +1467,7 @@ static int pvr2_upload_firmware1(struct pvr2_hdw *hdw) for (address = 0; address < fwsize; address += 0x800) { memcpy(fw_ptr, fw_entry->data + address, 0x800); ret += usb_control_msg(hdw->usb_dev, pipe, 0xa0, 0x40, address, - 0, fw_ptr, 0x800, HZ); + 0, fw_ptr, 0x800, 1000); } trace_firmware("Upload done, releasing device's CPU"); @@ -1605,7 +1605,7 @@ int pvr2_upload_firmware2(struct pvr2_hdw *hdw) ((u32 *)fw_ptr)[icnt] = swab32(((u32 *)fw_ptr)[icnt]); ret |= usb_bulk_msg(hdw->usb_dev, pipe, fw_ptr,bcnt, - &actual_length, HZ); + &actual_length, 1000); ret |= (actual_length != bcnt); if (ret) break; fw_done += bcnt; @@ -3438,7 +3438,7 @@ void pvr2_hdw_cpufw_set_enabled(struct pvr2_hdw *hdw, 0xa0,0xc0, address,0, hdw->fw_buffer+address, - 0x800,HZ); + 0x800,1000); if (ret < 0) break; } @@ -3977,7 +3977,7 @@ void pvr2_hdw_cpureset_assert(struct pvr2_hdw *hdw,int val) /* Write the CPUCS register on the 8051. The lsb of the register is the reset bit; a 1 asserts reset while a 0 clears it. */ pipe = usb_sndctrlpipe(hdw->usb_dev, 0); - ret = usb_control_msg(hdw->usb_dev,pipe,0xa0,0x40,0xe600,0,da,1,HZ); + ret = usb_control_msg(hdw->usb_dev,pipe,0xa0,0x40,0xe600,0,da,1,1000); if (ret < 0) { pvr2_trace(PVR2_TRACE_ERROR_LEGS, "cpureset_assert(%d) error=%d",val,ret); diff --git a/drivers/media/usb/s2255/s2255drv.c b/drivers/media/usb/s2255/s2255drv.c index 4af55e2478be19ef0cd85c723bdb8e3596455426..cb15eb32d2a6be0bb6a360e842bfa134c7a73ba5 100644 --- a/drivers/media/usb/s2255/s2255drv.c +++ b/drivers/media/usb/s2255/s2255drv.c @@ -1884,7 +1884,7 @@ static long s2255_vendor_req(struct s2255_dev *dev, unsigned char Request, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, Value, Index, buf, - TransferBufferLength, HZ * 5); + TransferBufferLength, USB_CTRL_SET_TIMEOUT); if (r >= 0) memcpy(TransferBuffer, buf, TransferBufferLength); @@ -1893,7 +1893,7 @@ static long s2255_vendor_req(struct s2255_dev *dev, unsigned char Request, r = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), Request, USB_TYPE_VENDOR | USB_RECIP_DEVICE, Value, Index, buf, - TransferBufferLength, HZ * 5); + TransferBufferLength, USB_CTRL_SET_TIMEOUT); } kfree(buf); return r; diff --git a/drivers/media/usb/stk1160/stk1160-core.c b/drivers/media/usb/stk1160/stk1160-core.c index b4f8bc5db138917baa34d827cc209554c15ef43e..ce717502ea4c393096646c92b25e380e86d9d4df 100644 --- a/drivers/media/usb/stk1160/stk1160-core.c +++ b/drivers/media/usb/stk1160/stk1160-core.c @@ -65,7 +65,7 @@ int stk1160_read_reg(struct stk1160 *dev, u16 reg, u8 *value) return -ENOMEM; ret = usb_control_msg(dev->udev, pipe, 0x00, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, - 0x00, reg, buf, sizeof(u8), HZ); + 0x00, reg, buf, sizeof(u8), 1000); if (ret < 0) { stk1160_err("read failed on reg 0x%x (%d)\n", reg, ret); @@ -85,7 +85,7 @@ int stk1160_write_reg(struct stk1160 *dev, u16 reg, u16 value) ret = usb_control_msg(dev->udev, pipe, 0x01, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, - value, reg, NULL, 0, HZ); + value, reg, NULL, 0, 1000); if (ret < 0) { stk1160_err("write failed on reg 0x%x (%d)\n", reg, ret); @@ -403,7 +403,7 @@ static void stk1160_disconnect(struct usb_interface *interface) /* Here is the only place where isoc get released */ stk1160_uninit_isoc(dev); - stk1160_clear_queue(dev); + stk1160_clear_queue(dev, VB2_BUF_STATE_ERROR); video_unregister_device(&dev->vdev); v4l2_device_disconnect(&dev->v4l2_dev); diff --git a/drivers/media/usb/stk1160/stk1160-v4l.c b/drivers/media/usb/stk1160/stk1160-v4l.c index 6a4eb616d5160ec822acace9122bef200827792d..1aa953469402f50590a3b92590ed83871d88c140 100644 --- a/drivers/media/usb/stk1160/stk1160-v4l.c +++ b/drivers/media/usb/stk1160/stk1160-v4l.c @@ -258,7 +258,7 @@ static int stk1160_start_streaming(struct stk1160 *dev) stk1160_uninit_isoc(dev); out_stop_hw: usb_set_interface(dev->udev, 0, 0); - stk1160_clear_queue(dev); + stk1160_clear_queue(dev, VB2_BUF_STATE_QUEUED); mutex_unlock(&dev->v4l_lock); @@ -306,7 +306,7 @@ static int stk1160_stop_streaming(struct stk1160 *dev) stk1160_stop_hw(dev); - stk1160_clear_queue(dev); + stk1160_clear_queue(dev, VB2_BUF_STATE_ERROR); stk1160_dbg("streaming stopped\n"); @@ -745,7 +745,7 @@ static const struct video_device v4l_template = { /********************************************************************/ /* Must be called with both v4l_lock and vb_queue_lock hold */ -void stk1160_clear_queue(struct stk1160 *dev) +void stk1160_clear_queue(struct stk1160 *dev, enum vb2_buffer_state vb2_state) { struct stk1160_buffer *buf; unsigned long flags; @@ -756,7 +756,7 @@ void stk1160_clear_queue(struct stk1160 *dev) buf = list_first_entry(&dev->avail_bufs, struct stk1160_buffer, list); list_del(&buf->list); - vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR); + vb2_buffer_done(&buf->vb.vb2_buf, vb2_state); stk1160_dbg("buffer [%p/%d] aborted\n", buf, buf->vb.vb2_buf.index); } @@ -766,7 +766,7 @@ void stk1160_clear_queue(struct stk1160 *dev) buf = dev->isoc_ctl.buf; dev->isoc_ctl.buf = NULL; - vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR); + vb2_buffer_done(&buf->vb.vb2_buf, vb2_state); stk1160_dbg("buffer [%p/%d] aborted\n", buf, buf->vb.vb2_buf.index); } diff --git a/drivers/media/usb/stk1160/stk1160.h b/drivers/media/usb/stk1160/stk1160.h index a31ea1c80f25569a9312e228601c3ac58665bd36..a70963ce875337f3929e45582f800dc1251d6848 100644 --- a/drivers/media/usb/stk1160/stk1160.h +++ b/drivers/media/usb/stk1160/stk1160.h @@ -166,7 +166,7 @@ struct regval { int stk1160_vb2_setup(struct stk1160 *dev); int stk1160_video_register(struct stk1160 *dev); void stk1160_video_unregister(struct stk1160 *dev); -void stk1160_clear_queue(struct stk1160 *dev); +void stk1160_clear_queue(struct stk1160 *dev, enum vb2_buffer_state vb2_state); /* Provided by stk1160-video.c */ int stk1160_alloc_isoc(struct stk1160 *dev); diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c index 447b6a198926ec088df047306117a778410de7a2..282f3d2388cc20612d612fa39622d99f208f4095 100644 --- a/drivers/media/usb/uvc/uvc_driver.c +++ b/drivers/media/usb/uvc/uvc_driver.c @@ -2065,7 +2065,6 @@ int uvc_register_video_device(struct uvc_device *dev, const struct v4l2_file_operations *fops, const struct v4l2_ioctl_ops *ioctl_ops) { - const char *name; int ret; /* Initialize the video buffers queue. */ @@ -2094,20 +2093,16 @@ int uvc_register_video_device(struct uvc_device *dev, case V4L2_BUF_TYPE_VIDEO_CAPTURE: default: vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; - name = "Video Capture"; break; case V4L2_BUF_TYPE_VIDEO_OUTPUT: vdev->device_caps = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING; - name = "Video Output"; break; case V4L2_BUF_TYPE_META_CAPTURE: vdev->device_caps = V4L2_CAP_META_CAPTURE | V4L2_CAP_STREAMING; - name = "Metadata"; break; } - snprintf(vdev->name, sizeof(vdev->name), "%s %u", name, - stream->header.bTerminalLink); + strscpy(vdev->name, dev->name, sizeof(vdev->name)); /* * Set the driver data before calling video_register_device, otherwise diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c index b8477fa93b7d7733c8226634b282a8545ee45f71..f6373d678d256361b14024406688680af7d3a91e 100644 --- a/drivers/media/usb/uvc/uvc_video.c +++ b/drivers/media/usb/uvc/uvc_video.c @@ -1915,6 +1915,10 @@ static int uvc_video_start_transfer(struct uvc_streaming *stream, if (ep == NULL) return -EIO; + /* Reject broken descriptors. */ + if (usb_endpoint_maxp(&ep->desc) == 0) + return -EIO; + ret = uvc_init_video_bulk(stream, ep, gfp_flags); } diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h index a3dfacf069c44dede79b361f4c4e0d21002932b7..c884020b287843270872f88b36e0856ff0f18f0d 100644 --- a/drivers/media/usb/uvc/uvcvideo.h +++ b/drivers/media/usb/uvc/uvcvideo.h @@ -183,7 +183,7 @@ /* Maximum status buffer size in bytes of interrupt URB. */ #define UVC_MAX_STATUS_SIZE 16 -#define UVC_CTRL_CONTROL_TIMEOUT 500 +#define UVC_CTRL_CONTROL_TIMEOUT 5000 #define UVC_CTRL_STREAMING_TIMEOUT 5000 /* Maximum allowed number of control mappings per device */ diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c index 4ffa14e44efe4d54a2d2e36d1d1f5cb89ccbaa67..6d6d30dbbe68b416224c24134db0ab31a0d54027 100644 --- a/drivers/media/v4l2-core/v4l2-ioctl.c +++ b/drivers/media/v4l2-core/v4l2-ioctl.c @@ -2127,6 +2127,7 @@ static int v4l_prepare_buf(const struct v4l2_ioctl_ops *ops, static int v4l_g_parm(const struct v4l2_ioctl_ops *ops, struct file *file, void *fh, void *arg) { + struct video_device *vfd = video_devdata(file); struct v4l2_streamparm *p = arg; v4l2_std_id std; int ret = check_fmt(file, p->type); @@ -2138,7 +2139,8 @@ static int v4l_g_parm(const struct v4l2_ioctl_ops *ops, if (p->type != V4L2_BUF_TYPE_VIDEO_CAPTURE && p->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) return -EINVAL; - p->parm.capture.readbuffers = 2; + if (vfd->device_caps & V4L2_CAP_READWRITE) + p->parm.capture.readbuffers = 2; ret = ops->vidioc_g_std(file, fh, &std); if (ret == 0) v4l2_video_std_frame_period(std, &p->parm.capture.timeperframe); diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c index b221b4e438a1a91f8e8bf31164390189288e1cdb..73190652c267bd61cddafa2ede59a3e98ac1f3ab 100644 --- a/drivers/media/v4l2-core/v4l2-mem2mem.c +++ b/drivers/media/v4l2-core/v4l2-mem2mem.c @@ -585,19 +585,14 @@ int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, } EXPORT_SYMBOL_GPL(v4l2_m2m_reqbufs); -int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, - struct v4l2_buffer *buf) +static void v4l2_m2m_adjust_mem_offset(struct vb2_queue *vq, + struct v4l2_buffer *buf) { - struct vb2_queue *vq; - int ret = 0; - unsigned int i; - - vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); - ret = vb2_querybuf(vq, buf); - /* Adjust MMAP memory offsets for the CAPTURE queue */ if (buf->memory == V4L2_MEMORY_MMAP && V4L2_TYPE_IS_CAPTURE(vq->type)) { if (V4L2_TYPE_IS_MULTIPLANAR(vq->type)) { + unsigned int i; + for (i = 0; i < buf->length; ++i) buf->m.planes[i].m.mem_offset += DST_QUEUE_OFF_BASE; @@ -605,8 +600,23 @@ int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, buf->m.offset += DST_QUEUE_OFF_BASE; } } +} - return ret; +int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, + struct v4l2_buffer *buf) +{ + struct vb2_queue *vq; + int ret; + + vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); + ret = vb2_querybuf(vq, buf); + if (ret) + return ret; + + /* Adjust MMAP memory offsets for the CAPTURE queue */ + v4l2_m2m_adjust_mem_offset(vq, buf); + + return 0; } EXPORT_SYMBOL_GPL(v4l2_m2m_querybuf); @@ -763,6 +773,9 @@ int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, if (ret) return ret; + /* Adjust MMAP memory offsets for the CAPTURE queue */ + v4l2_m2m_adjust_mem_offset(vq, buf); + /* * If the capture queue is streaming, but streaming hasn't started * on the device, but was asked to stop, mark the previously queued @@ -784,9 +797,17 @@ int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, struct v4l2_buffer *buf) { struct vb2_queue *vq; + int ret; vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); - return vb2_dqbuf(vq, buf, file->f_flags & O_NONBLOCK); + ret = vb2_dqbuf(vq, buf, file->f_flags & O_NONBLOCK); + if (ret) + return ret; + + /* Adjust MMAP memory offsets for the CAPTURE queue */ + v4l2_m2m_adjust_mem_offset(vq, buf); + + return 0; } EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf); @@ -795,9 +816,17 @@ int v4l2_m2m_prepare_buf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, { struct video_device *vdev = video_devdata(file); struct vb2_queue *vq; + int ret; vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); - return vb2_prepare_buf(vq, vdev->v4l2_dev->mdev, buf); + ret = vb2_prepare_buf(vq, vdev->v4l2_dev->mdev, buf); + if (ret) + return ret; + + /* Adjust MMAP memory offsets for the CAPTURE queue */ + v4l2_m2m_adjust_mem_offset(vq, buf); + + return 0; } EXPORT_SYMBOL_GPL(v4l2_m2m_prepare_buf); diff --git a/drivers/memory/atmel-ebi.c b/drivers/memory/atmel-ebi.c index c267283b01fdaf74c2ef094926fd65da72af0ca5..e749dcb3ddea93335a6ca5f5dd6e1de0e9a52418 100644 --- a/drivers/memory/atmel-ebi.c +++ b/drivers/memory/atmel-ebi.c @@ -544,20 +544,27 @@ static int atmel_ebi_probe(struct platform_device *pdev) smc_np = of_parse_phandle(dev->of_node, "atmel,smc", 0); ebi->smc.regmap = syscon_node_to_regmap(smc_np); - if (IS_ERR(ebi->smc.regmap)) - return PTR_ERR(ebi->smc.regmap); + if (IS_ERR(ebi->smc.regmap)) { + ret = PTR_ERR(ebi->smc.regmap); + goto put_node; + } ebi->smc.layout = atmel_hsmc_get_reg_layout(smc_np); - if (IS_ERR(ebi->smc.layout)) - return PTR_ERR(ebi->smc.layout); + if (IS_ERR(ebi->smc.layout)) { + ret = PTR_ERR(ebi->smc.layout); + goto put_node; + } ebi->smc.clk = of_clk_get(smc_np, 0); if (IS_ERR(ebi->smc.clk)) { - if (PTR_ERR(ebi->smc.clk) != -ENOENT) - return PTR_ERR(ebi->smc.clk); + if (PTR_ERR(ebi->smc.clk) != -ENOENT) { + ret = PTR_ERR(ebi->smc.clk); + goto put_node; + } ebi->smc.clk = NULL; } + of_node_put(smc_np); ret = clk_prepare_enable(ebi->smc.clk); if (ret) return ret; @@ -608,6 +615,10 @@ static int atmel_ebi_probe(struct platform_device *pdev) } return of_platform_populate(np, NULL, NULL, dev); + +put_node: + of_node_put(smc_np); + return ret; } static __maybe_unused int atmel_ebi_resume(struct device *dev) diff --git a/drivers/memory/emif.c b/drivers/memory/emif.c index ddb1879f07d3f7a9b657307719ca6bd05c9bdd92..5a059be3516c9f2c1082c05b5ebb7fef54204fe6 100644 --- a/drivers/memory/emif.c +++ b/drivers/memory/emif.c @@ -1403,7 +1403,7 @@ static struct emif_data *__init_or_module get_device_details( temp = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL); dev_info = devm_kzalloc(dev, sizeof(*dev_info), GFP_KERNEL); - if (!emif || !pd || !dev_info) { + if (!emif || !temp || !dev_info) { dev_err(dev, "%s:%d: allocation error\n", __func__, __LINE__); goto error; } @@ -1495,7 +1495,7 @@ static int __init_or_module emif_probe(struct platform_device *pdev) { struct emif_data *emif; struct resource *res; - int irq; + int irq, ret; if (pdev->dev.of_node) emif = of_get_memory_device_details(pdev->dev.of_node, &pdev->dev); @@ -1526,7 +1526,9 @@ static int __init_or_module emif_probe(struct platform_device *pdev) emif_onetime_settings(emif); emif_debugfs_init(emif); disable_and_clear_all_interrupts(emif); - setup_interrupts(emif, irq); + ret = setup_interrupts(emif, irq); + if (ret) + goto error; /* One-time actions taken on probing the first device */ if (!emif1) { diff --git a/drivers/memory/pl172.c b/drivers/memory/pl172.c index 575fadbffa3062ec74af5bb3b9a4300e07281858..9eb8cc7de494a8d6d558e17f3ae519635abdf7bc 100644 --- a/drivers/memory/pl172.c +++ b/drivers/memory/pl172.c @@ -273,14 +273,12 @@ static int pl172_probe(struct amba_device *adev, const struct amba_id *id) return ret; } -static int pl172_remove(struct amba_device *adev) +static void pl172_remove(struct amba_device *adev) { struct pl172_data *pl172 = amba_get_drvdata(adev); clk_disable_unprepare(pl172->clk); amba_release_regions(adev); - - return 0; } static const struct amba_id pl172_ids[] = { diff --git a/drivers/memory/pl353-smc.c b/drivers/memory/pl353-smc.c index cc01979780d87eb04fffdaae933d5ecd240bac16..b0b251bb207f3a68d8a8f1969567ac1275f0ce0e 100644 --- a/drivers/memory/pl353-smc.c +++ b/drivers/memory/pl353-smc.c @@ -427,14 +427,12 @@ static int pl353_smc_probe(struct amba_device *adev, const struct amba_id *id) return err; } -static int pl353_smc_remove(struct amba_device *adev) +static void pl353_smc_remove(struct amba_device *adev) { struct pl353_smc_data *pl353_smc = amba_get_drvdata(adev); clk_disable_unprepare(pl353_smc->memclk); clk_disable_unprepare(pl353_smc->aclk); - - return 0; } static const struct amba_id pl353_ids[] = { diff --git a/drivers/memory/renesas-rpc-if.c b/drivers/memory/renesas-rpc-if.c index a760ab08256ff47aeffe8a7e3fa4e88a99683022..781af51e3f79325b57b1b21f810b42d88801cc4d 100644 --- a/drivers/memory/renesas-rpc-if.c +++ b/drivers/memory/renesas-rpc-if.c @@ -245,7 +245,7 @@ int rpcif_sw_init(struct rpcif *rpc, struct device *dev) res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dirmap"); rpc->dirmap = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(rpc->dirmap)) - rpc->dirmap = NULL; + return PTR_ERR(rpc->dirmap); rpc->size = resource_size(res); rpc->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL); @@ -592,6 +592,7 @@ static int rpcif_probe(struct platform_device *pdev) struct platform_device *vdev; struct device_node *flash; const char *name; + int ret; flash = of_get_next_child(pdev->dev.of_node, NULL); if (!flash) { @@ -615,7 +616,14 @@ static int rpcif_probe(struct platform_device *pdev) return -ENOMEM; vdev->dev.parent = &pdev->dev; platform_set_drvdata(pdev, vdev); - return platform_device_add(vdev); + + ret = platform_device_add(vdev); + if (ret) { + platform_device_put(vdev); + return ret; + } + + return 0; } static int rpcif_remove(struct platform_device *pdev) diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index 15680c3c92792aaca3b3b8e83b8b4e87bf82d1fe..3f9f84f9f2880c68c8cf0e0cac511585d7aa401e 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig @@ -699,6 +699,16 @@ config MFD_INTEL_PMC_BXT Register and P-unit access. In addition this creates devices for iTCO watchdog and telemetry that are part of the PMC. +config MFD_INTEL_PMT + tristate "Intel Platform Monitoring Technology (PMT) support" + depends on PCI + select MFD_CORE + help + The Intel Platform Monitoring Technology (PMT) is an interface that + provides access to hardware monitor registers. This driver supports + Telemetry, Watcher, and Crashlog PMT capabilities/devices for + platforms starting from Tiger Lake. + config MFD_IPAQ_MICRO bool "Atmel Micro ASIC (iPAQ h3100/h3600/h3700) Support" depends on SA1100_H3100 || SA1100_H3600 diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile index fb1df45a301e5d0d1ed21eaeb15419f79ede0558..ce8f1c0583d5ccf741f663bcb7d3563d09767b42 100644 --- a/drivers/mfd/Makefile +++ b/drivers/mfd/Makefile @@ -216,6 +216,7 @@ obj-$(CONFIG_MFD_INTEL_LPSS_PCI) += intel-lpss-pci.o obj-$(CONFIG_MFD_INTEL_LPSS_ACPI) += intel-lpss-acpi.o obj-$(CONFIG_MFD_INTEL_MSIC) += intel_msic.o obj-$(CONFIG_MFD_INTEL_PMC_BXT) += intel_pmc_bxt.o +obj-$(CONFIG_MFD_INTEL_PMT) += intel_pmt.o obj-$(CONFIG_MFD_PALMAS) += palmas.o obj-$(CONFIG_MFD_VIPERBOARD) += viperboard.o obj-$(CONFIG_MFD_RC5T583) += rc5t583.o rc5t583-irq.o diff --git a/drivers/mfd/asic3.c b/drivers/mfd/asic3.c index a6bd2134cea2ae8c8a2720347b8523965e8e8cd4..14e4bbe6a9da3d007bdf44ad65ea3f4b44e343ad 100644 --- a/drivers/mfd/asic3.c +++ b/drivers/mfd/asic3.c @@ -914,14 +914,14 @@ static int __init asic3_mfd_probe(struct platform_device *pdev, ret = mfd_add_devices(&pdev->dev, pdev->id, &asic3_cell_ds1wm, 1, mem, asic->irq_base, NULL); if (ret < 0) - goto out; + goto out_unmap; } if (mem_sdio && (irq >= 0)) { ret = mfd_add_devices(&pdev->dev, pdev->id, &asic3_cell_mmc, 1, mem_sdio, irq, NULL); if (ret < 0) - goto out; + goto out_unmap; } ret = 0; @@ -935,8 +935,12 @@ static int __init asic3_mfd_probe(struct platform_device *pdev, ret = mfd_add_devices(&pdev->dev, 0, asic3_cell_leds, ASIC3_NUM_LEDS, NULL, 0, NULL); } + return ret; - out: +out_unmap: + if (asic->tmio_cnf) + iounmap(asic->tmio_cnf); +out: return ret; } diff --git a/drivers/mfd/atmel-flexcom.c b/drivers/mfd/atmel-flexcom.c index d2f5c073fdf31553d4d107c7d605f0fe155671d8..559eb4d352b68b21c69fe3ad5002b37ac7cd6c22 100644 --- a/drivers/mfd/atmel-flexcom.c +++ b/drivers/mfd/atmel-flexcom.c @@ -87,8 +87,7 @@ static const struct of_device_id atmel_flexcom_of_match[] = { }; MODULE_DEVICE_TABLE(of, atmel_flexcom_of_match); -#ifdef CONFIG_PM_SLEEP -static int atmel_flexcom_resume(struct device *dev) +static int __maybe_unused atmel_flexcom_resume_noirq(struct device *dev) { struct atmel_flexcom *ddata = dev_get_drvdata(dev); int err; @@ -105,16 +104,16 @@ static int atmel_flexcom_resume(struct device *dev) return 0; } -#endif -static SIMPLE_DEV_PM_OPS(atmel_flexcom_pm_ops, NULL, - atmel_flexcom_resume); +static const struct dev_pm_ops atmel_flexcom_pm_ops = { + .resume_noirq = atmel_flexcom_resume_noirq, +}; static struct platform_driver atmel_flexcom_driver = { .probe = atmel_flexcom_probe, .driver = { .name = "atmel_flexcom", - .pm = &atmel_flexcom_pm_ops, + .pm = pm_ptr(&atmel_flexcom_pm_ops), .of_match_table = atmel_flexcom_of_match, }, }; diff --git a/drivers/mfd/intel-lpss-acpi.c b/drivers/mfd/intel-lpss-acpi.c index c8fe334b5fe8bb425833bd57baf24c59b71afefe..045cbf0cbe53ae55f334f0a0d83aa25da96bf5cc 100644 --- a/drivers/mfd/intel-lpss-acpi.c +++ b/drivers/mfd/intel-lpss-acpi.c @@ -102,6 +102,7 @@ static int intel_lpss_acpi_probe(struct platform_device *pdev) { struct intel_lpss_platform_info *info; const struct acpi_device_id *id; + int ret; id = acpi_match_device(intel_lpss_acpi_ids, &pdev->dev); if (!id) @@ -115,10 +116,14 @@ static int intel_lpss_acpi_probe(struct platform_device *pdev) info->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); info->irq = platform_get_irq(pdev, 0); + ret = intel_lpss_probe(&pdev->dev, info); + if (ret) + return ret; + pm_runtime_set_active(&pdev->dev); pm_runtime_enable(&pdev->dev); - return intel_lpss_probe(&pdev->dev, info); + return 0; } static int intel_lpss_acpi_remove(struct platform_device *pdev) diff --git a/drivers/mfd/intel_pmt.c b/drivers/mfd/intel_pmt.c new file mode 100644 index 0000000000000000000000000000000000000000..dd7eb614c28e47d56fd88fb80cf501e0e67fd12b --- /dev/null +++ b/drivers/mfd/intel_pmt.c @@ -0,0 +1,261 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Intel Platform Monitoring Technology PMT driver + * + * Copyright (c) 2020, Intel Corporation. + * All Rights Reserved. + * + * Author: David E. Box + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* Intel DVSEC capability vendor space offsets */ +#define INTEL_DVSEC_ENTRIES 0xA +#define INTEL_DVSEC_SIZE 0xB +#define INTEL_DVSEC_TABLE 0xC +#define INTEL_DVSEC_TABLE_BAR(x) ((x) & GENMASK(2, 0)) +#define INTEL_DVSEC_TABLE_OFFSET(x) ((x) & GENMASK(31, 3)) +#define INTEL_DVSEC_ENTRY_SIZE 4 + +/* PMT capabilities */ +#define DVSEC_INTEL_ID_TELEMETRY 2 +#define DVSEC_INTEL_ID_WATCHER 3 +#define DVSEC_INTEL_ID_CRASHLOG 4 + +struct intel_dvsec_header { + u16 length; + u16 id; + u8 num_entries; + u8 entry_size; + u8 tbir; + u32 offset; +}; + +enum pmt_quirks { + /* Watcher capability not supported */ + PMT_QUIRK_NO_WATCHER = BIT(0), + + /* Crashlog capability not supported */ + PMT_QUIRK_NO_CRASHLOG = BIT(1), + + /* Use shift instead of mask to read discovery table offset */ + PMT_QUIRK_TABLE_SHIFT = BIT(2), + + /* DVSEC not present (provided in driver data) */ + PMT_QUIRK_NO_DVSEC = BIT(3), +}; + +struct pmt_platform_info { + unsigned long quirks; + struct intel_dvsec_header **capabilities; +}; + +static const struct pmt_platform_info tgl_info = { + .quirks = PMT_QUIRK_NO_WATCHER | PMT_QUIRK_NO_CRASHLOG | + PMT_QUIRK_TABLE_SHIFT, +}; + +/* DG1 Platform with DVSEC quirk*/ +static struct intel_dvsec_header dg1_telemetry = { + .length = 0x10, + .id = 2, + .num_entries = 1, + .entry_size = 3, + .tbir = 0, + .offset = 0x466000, +}; + +static struct intel_dvsec_header *dg1_capabilities[] = { + &dg1_telemetry, + NULL +}; + +static const struct pmt_platform_info dg1_info = { + .quirks = PMT_QUIRK_NO_DVSEC, + .capabilities = dg1_capabilities, +}; + +static int pmt_add_dev(struct pci_dev *pdev, struct intel_dvsec_header *header, + unsigned long quirks) +{ + struct device *dev = &pdev->dev; + struct resource *res, *tmp; + struct mfd_cell *cell; + const char *name; + int count = header->num_entries; + int size = header->entry_size; + int id = header->id; + int i; + + switch (id) { + case DVSEC_INTEL_ID_TELEMETRY: + name = "pmt_telemetry"; + break; + case DVSEC_INTEL_ID_WATCHER: + if (quirks & PMT_QUIRK_NO_WATCHER) { + dev_info(dev, "Watcher not supported\n"); + return -EINVAL; + } + name = "pmt_watcher"; + break; + case DVSEC_INTEL_ID_CRASHLOG: + if (quirks & PMT_QUIRK_NO_CRASHLOG) { + dev_info(dev, "Crashlog not supported\n"); + return -EINVAL; + } + name = "pmt_crashlog"; + break; + default: + return -EINVAL; + } + + if (!header->num_entries || !header->entry_size) { + dev_err(dev, "Invalid count or size for %s header\n", name); + return -EINVAL; + } + + cell = devm_kzalloc(dev, sizeof(*cell), GFP_KERNEL); + if (!cell) + return -ENOMEM; + + res = devm_kcalloc(dev, count, sizeof(*res), GFP_KERNEL); + if (!res) + return -ENOMEM; + + if (quirks & PMT_QUIRK_TABLE_SHIFT) + header->offset >>= 3; + + /* + * The PMT DVSEC contains the starting offset and count for a block of + * discovery tables, each providing access to monitoring facilities for + * a section of the device. Create a resource list of these tables to + * provide to the driver. + */ + for (i = 0, tmp = res; i < count; i++, tmp++) { + tmp->start = pdev->resource[header->tbir].start + + header->offset + i * (size << 2); + tmp->end = tmp->start + (size << 2) - 1; + tmp->flags = IORESOURCE_MEM; + } + + cell->resources = res; + cell->num_resources = count; + cell->name = name; + + return devm_mfd_add_devices(dev, PLATFORM_DEVID_AUTO, cell, 1, NULL, 0, + NULL); +} + +static int pmt_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct pmt_platform_info *info; + unsigned long quirks = 0; + bool found_devices = false; + int ret, pos = 0; + + ret = pcim_enable_device(pdev); + if (ret) + return ret; + + info = (struct pmt_platform_info *)id->driver_data; + + if (info) + quirks = info->quirks; + + if (info && (info->quirks & PMT_QUIRK_NO_DVSEC)) { + struct intel_dvsec_header **header; + + header = info->capabilities; + while (*header) { + ret = pmt_add_dev(pdev, *header, quirks); + if (ret) + dev_warn(&pdev->dev, + "Failed to add device for DVSEC id %d\n", + (*header)->id); + else + found_devices = true; + + ++header; + } + } else { + do { + struct intel_dvsec_header header; + u32 table; + u16 vid; + + pos = pci_find_next_ext_capability(pdev, pos, PCI_EXT_CAP_ID_DVSEC); + if (!pos) + break; + + pci_read_config_word(pdev, pos + PCI_DVSEC_HEADER1, &vid); + if (vid != PCI_VENDOR_ID_INTEL) + continue; + + pci_read_config_word(pdev, pos + PCI_DVSEC_HEADER2, + &header.id); + pci_read_config_byte(pdev, pos + INTEL_DVSEC_ENTRIES, + &header.num_entries); + pci_read_config_byte(pdev, pos + INTEL_DVSEC_SIZE, + &header.entry_size); + pci_read_config_dword(pdev, pos + INTEL_DVSEC_TABLE, + &table); + + header.tbir = INTEL_DVSEC_TABLE_BAR(table); + header.offset = INTEL_DVSEC_TABLE_OFFSET(table); + + ret = pmt_add_dev(pdev, &header, quirks); + if (ret) + continue; + + found_devices = true; + } while (true); + } + + if (!found_devices) + return -ENODEV; + + pm_runtime_put(&pdev->dev); + pm_runtime_allow(&pdev->dev); + + return 0; +} + +static void pmt_pci_remove(struct pci_dev *pdev) +{ + pm_runtime_forbid(&pdev->dev); + pm_runtime_get_sync(&pdev->dev); +} + +#define PCI_DEVICE_ID_INTEL_PMT_ADL 0x467d +#define PCI_DEVICE_ID_INTEL_PMT_DG1 0x490e +#define PCI_DEVICE_ID_INTEL_PMT_OOBMSM 0x09a7 +#define PCI_DEVICE_ID_INTEL_PMT_TGL 0x9a0d +static const struct pci_device_id pmt_pci_ids[] = { + { PCI_DEVICE_DATA(INTEL, PMT_ADL, &tgl_info) }, + { PCI_DEVICE_DATA(INTEL, PMT_DG1, &dg1_info) }, + { PCI_DEVICE_DATA(INTEL, PMT_OOBMSM, NULL) }, + { PCI_DEVICE_DATA(INTEL, PMT_TGL, &tgl_info) }, + { } +}; +MODULE_DEVICE_TABLE(pci, pmt_pci_ids); + +static struct pci_driver pmt_pci_driver = { + .name = "intel-pmt", + .id_table = pmt_pci_ids, + .probe = pmt_pci_probe, + .remove = pmt_pci_remove, +}; +module_pci_driver(pmt_pci_driver); + +MODULE_AUTHOR("David E. Box "); +MODULE_DESCRIPTION("Intel Platform Monitoring Technology PMT driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mfd/lpc_sunway_chip3.c b/drivers/mfd/lpc_sunway_chip3.c index 793b557195c268113c9e808f15c9cee11a45af35..878aff87c99299730cd09f436a0a8e377f6324cd 100644 --- a/drivers/mfd/lpc_sunway_chip3.c +++ b/drivers/mfd/lpc_sunway_chip3.c @@ -75,53 +75,16 @@ enum { LPC_DMA_SWRST = 0x70, }; -enum { - LPC_IRQ0 = 0, /* 8254 Timer */ - LPC_IRQ1, /* Keyboard */ - LPC_IRQ2, /* Reserved */ - LPC_IRQ3, /* UART */ - LPC_IRQ4, /* UART */ - LPC_IRQ5, /* LPC Parallel Port2 */ - LPC_IRQ6, /* FDC-Floppy Disk Controller */ - LPC_IRQ7, /* LPT-Parallel Port1 */ - LPC_NR_IRQS, - LPC_IRQ8, /* RTC */ - LPC_IRQ9, /* Undefined */ - LPC_IRQ10, /* Undefined */ - LPC_IRQ11, /* Undefined */ - LPC_IRQ12, /* Mouse */ - LPC_IRQ13, /* Undefined */ - LPC_IRQ14, /* Undefined */ - LPC_IRQ15, /* Undefined */ -}; - struct lpc_chip3_adapter { void __iomem *hst_regs; struct device *dev; int irq; - struct irq_chip_generic *gc; unsigned int features; }; static struct resource superio_chip3_resources[] = { { .flags = IORESOURCE_IO, - }, { - .start = LPC_IRQ1, - .flags = IORESOURCE_IRQ, - .name = "i8042_kbd_irq", - }, { - .start = LPC_IRQ12, - .flags = IORESOURCE_IRQ, - .name = "i8042_aux_irq", - }, { - .start = LPC_IRQ5, - .flags = IORESOURCE_IRQ, - .name = "uart0_irq", - }, { - .start = LPC_IRQ4, - .flags = IORESOURCE_IRQ, - .name = "uart1_irq", } }; @@ -218,75 +181,9 @@ static void lpc_fw_flash_init(struct platform_device *pdev, } -static u32 lpc_do_irq(struct lpc_chip3_adapter *lpc_adapter) -{ - u32 irq_status = readl_relaxed(lpc_adapter->hst_regs + LPC_IRQ); - u32 ret = irq_status; - - DBG_LPC("%s irq_status=%#x\n", __func__, irq_status); - while (irq_status) { - int hwirq = fls(irq_status) - 1; - - generic_handle_irq(hwirq); - irq_status &= ~BIT(hwirq); - } - - lpc_writel(lpc_adapter->hst_regs, LPC_IRQ, ret); - return 1; -} - -static void lpc_irq_handler_mfd(struct irq_desc *desc) -{ - unsigned int irq = irq_desc_get_irq(desc); - struct lpc_chip3_adapter *lpc_adapter = irq_get_handler_data(irq); - u32 worked = 0; - - DBG_LPC("enter %s line:%d\n", __func__, __LINE__); - - worked = lpc_do_irq(lpc_adapter); - if (worked == IRQ_HANDLED) - dev_dbg(lpc_adapter->dev, "LPC irq handled.\n"); - - DBG_LPC("leave %s line:%d\n", __func__, __LINE__); -} - -static void lpc_unmask_interrupt_all(struct lpc_chip3_adapter *lpc_adapter) -{ - lpc_writel(lpc_adapter->hst_regs, LPC_IRQ_MASK, 0); -} - -static void lpc_irq_mask_ack(struct irq_data *d) -{ - struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); - struct irq_chip_type *ct = irq_data_get_chip_type(d); - u32 mask = d->mask; - - irq_gc_lock(gc); - *ct->mask_cache |= mask; - irq_reg_writel(gc, *ct->mask_cache, ct->regs.mask); - irq_reg_writel(gc, mask, ct->regs.ack); - irq_gc_unlock(gc); -} - -static void lpc_enable_irqs(struct lpc_chip3_adapter *lpc_adapter) -{ - int interrupt = 0; - - lpc_unmask_interrupt_all(lpc_adapter); - - interrupt = lpc_readl(lpc_adapter->hst_regs, LPC_IRQ); - - lpc_writel(lpc_adapter->hst_regs, LPC_CTL, 0x1600); - interrupt = lpc_readl(lpc_adapter->hst_regs, LPC_IRQ); -} - static int lpc_chip3_probe(struct platform_device *pdev) { int ret; - int num_ct = 1; - int irq_base; - struct irq_chip_generic *gc; - struct irq_chip_type *ct; struct lpc_chip3_adapter *lpc_adapter; struct resource *mem; @@ -312,32 +209,6 @@ static int lpc_chip3_probe(struct platform_device *pdev) lpc_adapter->dev = &pdev->dev; lpc_adapter->features = 0; - lpc_adapter->irq = platform_get_irq(pdev, 0); - if (lpc_adapter->irq < 0) { - dev_err(&pdev->dev, "no irq resource?\n"); - return lpc_adapter->irq; /* -ENXIO */ - } - - irq_base = LPC_IRQ0; - gc = irq_alloc_generic_chip("LPC_CHIP3", num_ct, irq_base, - lpc_adapter->hst_regs, handle_level_irq); - - ct = gc->chip_types; - ct->regs.mask = LPC_IRQ_MASK; - ct->regs.ack = LPC_IRQ; - ct->chip.irq_mask = irq_gc_mask_set_bit; - ct->chip.irq_unmask = irq_gc_mask_clr_bit; - ct->chip.irq_ack = irq_gc_ack_set_bit; - ct->chip.irq_mask_ack = lpc_irq_mask_ack; - irq_setup_generic_chip(gc, IRQ_MSK(LPC_NR_IRQS), 0, 0, - IRQ_NOPROBE | IRQ_LEVEL); - - lpc_adapter->gc = gc; - - irq_set_handler_data(lpc_adapter->irq, lpc_adapter); - irq_set_chained_handler(lpc_adapter->irq, - (irq_flow_handler_t) lpc_irq_handler_mfd); - lpc_enable(lpc_adapter); lpc_mem_flash_init(pdev, lpc_adapter); @@ -350,7 +221,6 @@ static int lpc_chip3_probe(struct platform_device *pdev) goto out_dev; dev_info(lpc_adapter->dev, "probe succeed !\n"); - lpc_enable_irqs(lpc_adapter); return ret; diff --git a/drivers/mfd/mc13xxx-core.c b/drivers/mfd/mc13xxx-core.c index 1abe7432aad82a308d6b3d8f4c977e9a91964edd..e281a9202f110f3c5b98ba3cbf304bb12115e56e 100644 --- a/drivers/mfd/mc13xxx-core.c +++ b/drivers/mfd/mc13xxx-core.c @@ -323,8 +323,10 @@ int mc13xxx_adc_do_conversion(struct mc13xxx *mc13xxx, unsigned int mode, adc1 |= MC13783_ADC1_ATOX; dev_dbg(mc13xxx->dev, "%s: request irq\n", __func__); - mc13xxx_irq_request(mc13xxx, MC13XXX_IRQ_ADCDONE, + ret = mc13xxx_irq_request(mc13xxx, MC13XXX_IRQ_ADCDONE, mc13xxx_handler_adcdone, __func__, &adcdone_data); + if (ret) + goto out; mc13xxx_reg_write(mc13xxx, MC13XXX_ADC0, adc0); mc13xxx_reg_write(mc13xxx, MC13XXX_ADC1, adc1); diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig index fafa8b0d809960e7867d6e60201f33d0a86a65d3..140716083ab87afa2218cbb7144af7b22a5601b6 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig @@ -351,6 +351,14 @@ config HMC6352 This driver provides support for the Honeywell HMC6352 compass, providing configuration and heading data via sysfs. +config SUNWAY_GED + tristate "sunway generic device driver for memhotplug" + depends on SW64 + depends on MEMORY_HOTPLUG + help + This driver provides support for sunway generic device driver for + memhotplug, providing configuration and heading data via sysfs. + config DS1682 tristate "Dallas DS1682 Total Elapsed Time Recorder with Alarm" depends on I2C diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile index d23231e73330373cf13f81eff6ff4e5ce7ef4769..3615763234a643f221b6876abaa6e573e106e8a1 100644 --- a/drivers/misc/Makefile +++ b/drivers/misc/Makefile @@ -34,6 +34,7 @@ obj-$(CONFIG_SENSORS_TSL2550) += tsl2550.o obj-$(CONFIG_DS1682) += ds1682.o obj-$(CONFIG_C2PORT) += c2port/ obj-$(CONFIG_HMC6352) += hmc6352.o +obj-$(CONFIG_SUNWAY_GED) += sunway-ged.o obj-y += eeprom/ obj-y += cb710/ obj-$(CONFIG_VMWARE_BALLOON) += vmw_balloon.o diff --git a/drivers/misc/cardreader/alcor_pci.c b/drivers/misc/cardreader/alcor_pci.c index de6d44a158bbae69f22ed2deed3000928f7cb6fa..3f514d77a843f3c5bb13934343945dde101628c0 100644 --- a/drivers/misc/cardreader/alcor_pci.c +++ b/drivers/misc/cardreader/alcor_pci.c @@ -266,7 +266,7 @@ static int alcor_pci_probe(struct pci_dev *pdev, if (!priv) return -ENOMEM; - ret = ida_simple_get(&alcor_pci_idr, 0, 0, GFP_KERNEL); + ret = ida_alloc(&alcor_pci_idr, GFP_KERNEL); if (ret < 0) return ret; priv->id = ret; @@ -280,7 +280,8 @@ static int alcor_pci_probe(struct pci_dev *pdev, ret = pci_request_regions(pdev, DRV_NAME_ALCOR_PCI); if (ret) { dev_err(&pdev->dev, "Cannot request region\n"); - return -ENOMEM; + ret = -ENOMEM; + goto error_free_ida; } if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) { @@ -324,6 +325,8 @@ static int alcor_pci_probe(struct pci_dev *pdev, error_release_regions: pci_release_regions(pdev); +error_free_ida: + ida_free(&alcor_pci_idr, priv->id); return ret; } @@ -337,7 +340,7 @@ static void alcor_pci_remove(struct pci_dev *pdev) mfd_remove_devices(&pdev->dev); - ida_simple_remove(&alcor_pci_idr, priv->id); + ida_free(&alcor_pci_idr, priv->id); pci_release_regions(pdev); pci_set_drvdata(pdev, NULL); diff --git a/drivers/misc/eeprom/ee1004.c b/drivers/misc/eeprom/ee1004.c index 252e15ba65e111c0595dc2683dec25620cfb5f3a..d9f90332aaf65ecadf5b28e8c4191980d9c39f7e 100644 --- a/drivers/misc/eeprom/ee1004.c +++ b/drivers/misc/eeprom/ee1004.c @@ -82,6 +82,9 @@ static ssize_t ee1004_eeprom_read(struct i2c_client *client, char *buf, if (unlikely(offset + count > EE1004_PAGE_SIZE)) count = EE1004_PAGE_SIZE - offset; + if (count > I2C_SMBUS_BLOCK_MAX) + count = I2C_SMBUS_BLOCK_MAX; + status = i2c_smbus_read_i2c_block_data_or_emulated(client, offset, count, buf); dev_dbg(&client->dev, "read %zu@%d --> %d\n", count, offset, status); diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c index ef49ac8d910192dc242e3d7219e727e16af588bf..d0471fec37fbb30d6519c7b7749ddea8058ef523 100644 --- a/drivers/misc/fastrpc.c +++ b/drivers/misc/fastrpc.c @@ -1284,7 +1284,14 @@ static int fastrpc_dmabuf_alloc(struct fastrpc_user *fl, char __user *argp) } if (copy_to_user(argp, &bp, sizeof(bp))) { - dma_buf_put(buf->dmabuf); + /* + * The usercopy failed, but we can't do much about it, as + * dma_buf_fd() already called fd_install() and made the + * file descriptor accessible for the current process. It + * might already be closed and dmabuf no longer valid when + * we reach this point. Therefore "leak" the fd and rely on + * the process exit path to do any required cleanup. + */ return -EFAULT; } diff --git a/drivers/misc/habanalabs/common/debugfs.c b/drivers/misc/habanalabs/common/debugfs.c index 912ddfa360b1375e79d8b6d37e69bda169485ad5..9716b0728b306aeac90d9f50fb3e25ed28281a53 100644 --- a/drivers/misc/habanalabs/common/debugfs.c +++ b/drivers/misc/habanalabs/common/debugfs.c @@ -859,6 +859,8 @@ static ssize_t hl_set_power_state(struct file *f, const char __user *buf, pci_set_power_state(hdev->pdev, PCI_D0); pci_restore_state(hdev->pdev); rc = pci_enable_device(hdev->pdev); + if (rc < 0) + return rc; } else if (value == 2) { pci_save_state(hdev->pdev); pci_disable_device(hdev->pdev); diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c index f764684367153e004ebed4a154dd981c5862353a..954f7230b3886a48d0313cd82a22fcd7075ac825 100644 --- a/drivers/misc/kgdbts.c +++ b/drivers/misc/kgdbts.c @@ -1061,10 +1061,10 @@ static int kgdbts_option_setup(char *opt) { if (strlen(opt) >= MAX_CONFIG_LEN) { printk(KERN_ERR "kgdbts: config string too long\n"); - return -ENOSPC; + return 1; } strcpy(config, opt); - return 0; + return 1; } __setup("kgdbts=", kgdbts_option_setup); diff --git a/drivers/misc/lattice-ecp3-config.c b/drivers/misc/lattice-ecp3-config.c index 5eaf74447ca1e48764b446512e42d538579874eb..556bb7d705f532c7465917bc35de01187d263912 100644 --- a/drivers/misc/lattice-ecp3-config.c +++ b/drivers/misc/lattice-ecp3-config.c @@ -76,12 +76,12 @@ static void firmware_load(const struct firmware *fw, void *context) if (fw == NULL) { dev_err(&spi->dev, "Cannot load firmware, aborting\n"); - return; + goto out; } if (fw->size == 0) { dev_err(&spi->dev, "Error: Firmware size is 0!\n"); - return; + goto out; } /* Fill dummy data (24 stuffing bits for commands) */ @@ -103,7 +103,7 @@ static void firmware_load(const struct firmware *fw, void *context) dev_err(&spi->dev, "Error: No supported FPGA detected (JEDEC_ID=%08x)!\n", jedec_id); - return; + goto out; } dev_info(&spi->dev, "FPGA %s detected\n", ecp3_dev[i].name); @@ -116,7 +116,7 @@ static void firmware_load(const struct firmware *fw, void *context) buffer = kzalloc(fw->size + 8, GFP_KERNEL); if (!buffer) { dev_err(&spi->dev, "Error: Can't allocate memory!\n"); - return; + goto out; } /* @@ -155,7 +155,7 @@ static void firmware_load(const struct firmware *fw, void *context) "Error: Timeout waiting for FPGA to clear (status=%08x)!\n", status); kfree(buffer); - return; + goto out; } dev_info(&spi->dev, "Configuring the FPGA...\n"); @@ -181,7 +181,7 @@ static void firmware_load(const struct firmware *fw, void *context) release_firmware(fw); kfree(buffer); - +out: complete(&data->fw_loaded); } diff --git a/drivers/misc/lkdtm/Makefile b/drivers/misc/lkdtm/Makefile index 30c8ac24635d4cb14ef9f8534687f9611ce09ce7..4405fb2bc7a0004b4346f3faf649eb8cf5c4586c 100644 --- a/drivers/misc/lkdtm/Makefile +++ b/drivers/misc/lkdtm/Makefile @@ -16,7 +16,7 @@ KCOV_INSTRUMENT_rodata.o := n OBJCOPYFLAGS := OBJCOPYFLAGS_rodata_objcopy.o := \ - --rename-section .noinstr.text=.rodata,alloc,readonly,load + --rename-section .noinstr.text=.rodata,alloc,readonly,load,contents targets += rodata.o rodata_objcopy.o $(obj)/rodata_objcopy.o: $(obj)/rodata.o FORCE $(call if_changed,objcopy) diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h index 67bb6a25fd0a020c55c310fcd171b1fc566982f9..d81d75a20b8f211f6fd312b4282d086a10cecc67 100644 --- a/drivers/misc/mei/hw-me-regs.h +++ b/drivers/misc/mei/hw-me-regs.h @@ -107,6 +107,7 @@ #define MEI_DEV_ID_ADP_S 0x7AE8 /* Alder Lake Point S */ #define MEI_DEV_ID_ADP_LP 0x7A60 /* Alder Lake Point LP */ #define MEI_DEV_ID_ADP_P 0x51E0 /* Alder Lake Point P */ +#define MEI_DEV_ID_ADP_N 0x54E0 /* Alder Lake Point N */ /* * MEI HW Section diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c index fee603039e872430736dedb51a934494d103d5b6..ca3067fa6f0e07df9e2d8ce3fc11ab0a53ef550c 100644 --- a/drivers/misc/mei/interrupt.c +++ b/drivers/misc/mei/interrupt.c @@ -427,31 +427,26 @@ int mei_irq_read_handler(struct mei_device *dev, list_for_each_entry(cl, &dev->file_list, link) { if (mei_cl_hbm_equal(cl, mei_hdr)) { cl_dbg(dev, cl, "got a message\n"); - break; + ret = mei_cl_irq_read_msg(cl, mei_hdr, meta_hdr, cmpl_list); + goto reset_slots; } } /* if no recipient cl was found we assume corrupted header */ - if (&cl->link == &dev->file_list) { - /* A message for not connected fixed address clients - * should be silently discarded - * On power down client may be force cleaned, - * silently discard such messages - */ - if (hdr_is_fixed(mei_hdr) || - dev->dev_state == MEI_DEV_POWER_DOWN) { - mei_irq_discard_msg(dev, mei_hdr, mei_hdr->length); - ret = 0; - goto reset_slots; - } - dev_err(dev->dev, "no destination client found 0x%08X\n", - dev->rd_msg_hdr[0]); - ret = -EBADMSG; - goto end; + /* A message for not connected fixed address clients + * should be silently discarded + * On power down client may be force cleaned, + * silently discard such messages + */ + if (hdr_is_fixed(mei_hdr) || + dev->dev_state == MEI_DEV_POWER_DOWN) { + mei_irq_discard_msg(dev, mei_hdr, mei_hdr->length); + ret = 0; + goto reset_slots; } - - ret = mei_cl_irq_read_msg(cl, mei_hdr, meta_hdr, cmpl_list); - + dev_err(dev->dev, "no destination client found 0x%08X\n", dev->rd_msg_hdr[0]); + ret = -EBADMSG; + goto end; reset_slots: /* reset the number of slots and header */ diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c index 3a45aaf002ac8523e3955e23e21603acfffeb280..a738253dbd056171fc8db0e16b60cbb3407450df 100644 --- a/drivers/misc/mei/pci-me.c +++ b/drivers/misc/mei/pci-me.c @@ -113,6 +113,7 @@ static const struct pci_device_id mei_me_pci_tbl[] = { {MEI_PCI_DEVICE(MEI_DEV_ID_ADP_S, MEI_ME_PCH15_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_ADP_LP, MEI_ME_PCH15_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_ADP_P, MEI_ME_PCH15_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_ADP_N, MEI_ME_PCH15_CFG)}, /* required last entry */ {0, } diff --git a/drivers/misc/sunway-ged.c b/drivers/misc/sunway-ged.c new file mode 100644 index 0000000000000000000000000000000000000000..b4e4ca31585257961b54120024bdf049af9ae8bd --- /dev/null +++ b/drivers/misc/sunway-ged.c @@ -0,0 +1,253 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* Generic Event Device for ACPI. */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#define OFFSET_START_ADDR 0 +#define OFFSET_LENGTH 8 +#define OFFSET_STATUS 16 +#define OFFSET_SLOT 24 + +/* Memory hotplug event */ +#define SUNWAY_MEMHOTPLUG_ADD 0x1 +#define SUNWAY_MEMHOTPLUG_REMOVE 0x2 + +struct sunway_memory_device { + struct sunway_ged_device *device; + unsigned int state; /* State of the memory device */ + struct list_head list; + + u64 start_addr; /* Memory Range start physical addr */ + u64 length; /* Memory Range length */ + u64 slot; /* Memory Range slot */ + unsigned int enabled:1; +}; + +struct sunway_ged_device { + struct device *dev; + void __iomem *membase; + void *driver_data; + spinlock_t lock; + struct list_head dev_list; +}; + +static int sunway_memory_enable_device(struct sunway_memory_device *mem_device) +{ + int num_enabled = 0; + int result = 0; + + if (mem_device->enabled) { /* just sanity check...*/ + num_enabled++; + goto out; + } + + /* + * If the memory block size is zero, please ignore it. + * Don't try to do the following memory hotplug flowchart. + */ + if (!mem_device->length) + goto out; + + lock_device_hotplug(); + /* suppose node = 0, fix me! */ + result = __add_memory(0, mem_device->start_addr, mem_device->length); + unlock_device_hotplug(); + /* + * If the memory block has been used by the kernel, add_memory() + * returns -EEXIST. If add_memory() returns the other error, it + * means that this memory block is not used by the kernel. + */ + if (result && result != -EEXIST) + goto out; + + mem_device->enabled = 1; + + /* + * Add num_enable even if add_memory() returns -EEXIST, so the + * device is bound to this driver. + */ + num_enabled++; +out: + if (!num_enabled) { + dev_err(mem_device->device->dev, "add_memory failed\n"); + return -EINVAL; + } + + return 0; +} + +static int sunway_memory_get_meminfo(struct sunway_memory_device *mem_device) +{ + struct sunway_ged_device *geddev; + + if (!mem_device) + return -EINVAL; + + if (mem_device->enabled) + return 0; + + geddev = mem_device->device; + + mem_device->start_addr = readq(geddev->membase + OFFSET_START_ADDR); + mem_device->length = readq(geddev->membase + OFFSET_LENGTH); + + return 0; +} + +static void sunway_memory_device_remove(struct sunway_ged_device *device) +{ + struct sunway_memory_device *mem_dev, *n; + unsigned long start_addr, length, slot; + + if (!device) + return; + + start_addr = readq(device->membase + OFFSET_START_ADDR); + length = readq(device->membase + OFFSET_LENGTH); + slot = readq(device->membase + OFFSET_SLOT); + + list_for_each_entry_safe(mem_dev, n, &device->dev_list, list) { + if (!mem_dev->enabled) + continue; + + if ((start_addr == mem_dev->start_addr) && + (length == mem_dev->length)) { + /* suppose node = 0, fix me! */ + remove_memory(0, start_addr, length); + list_del(&mem_dev->list); + kfree(mem_dev); + } + } + + writeq(slot, device->membase + OFFSET_SLOT); +} + +static int sunway_memory_device_add(struct sunway_ged_device *device) +{ + struct sunway_memory_device *mem_device; + int result; + + if (!device) + return -EINVAL; + + mem_device = kzalloc(sizeof(struct sunway_memory_device), GFP_KERNEL); + if (!mem_device) + return -ENOMEM; + + INIT_LIST_HEAD(&mem_device->list); + mem_device->device = device; + + /* Get the range from the IO */ + mem_device->start_addr = readq(device->membase + OFFSET_START_ADDR); + mem_device->length = readq(device->membase + OFFSET_LENGTH); + mem_device->slot = readq(device->membase + OFFSET_SLOT); + + result = sunway_memory_enable_device(mem_device); + if (result) { + dev_err(device->dev, "sunway_memory_enable_device() error\n"); + sunway_memory_device_remove(device); + + return result; + } + + list_add_tail(&mem_device->list, &device->dev_list); + dev_dbg(device->dev, "Memory device configured\n"); + + hcall(HCALL_MEMHOTPLUG, mem_device->start_addr, 0, 0); + + return 1; +} + +static irqreturn_t sunwayged_ist(int irq, void *data) +{ + struct sunway_ged_device *sunwayged_dev = data; + unsigned int status; + + status = readl(sunwayged_dev->membase + OFFSET_STATUS); + + /* through IO status to add or remove memory device */ + if (status & SUNWAY_MEMHOTPLUG_ADD) + sunway_memory_device_add(sunwayged_dev); + + if (status & SUNWAY_MEMHOTPLUG_REMOVE) + sunway_memory_device_remove(sunwayged_dev); + + return IRQ_HANDLED; +} + +static irqreturn_t sunwayged_irq_handler(int irq, void *data) +{ + return IRQ_WAKE_THREAD; +} + +static int sunwayged_probe(struct platform_device *pdev) +{ + struct resource *regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); + int irq = platform_get_irq(pdev, 0); + struct sunway_ged_device *geddev; + struct device *dev; + int irqflags; + + if (!regs) { + dev_err(dev, "no registers defined\n"); + return -EINVAL; + } + + geddev = devm_kzalloc(&pdev->dev, sizeof(*geddev), GFP_KERNEL); + if (!geddev) + return -ENOMEM; + + spin_lock_init(&geddev->lock); + geddev->membase = devm_ioremap(&pdev->dev, + regs->start, resource_size(regs)); + if (!geddev->membase) + return -ENOMEM; + + INIT_LIST_HEAD(&geddev->dev_list); + geddev->dev = &pdev->dev; + irqflags = IRQF_SHARED; + + if (request_threaded_irq(irq, sunwayged_irq_handler, sunwayged_ist, + irqflags, "SUNWAY:Ged", geddev)) { + dev_err(dev, "failed to setup event handler for irq %u\n", irq); + + return -EINVAL; + } + + platform_set_drvdata(pdev, geddev); + + return 0; +} + +static int sunwayged_remove(struct platform_device *pdev) +{ + return 0; +} + +static const struct of_device_id sunwayged_of_match[] = { + {.compatible = "sw6,sunway-ged", }, + { } +}; +MODULE_DEVICE_TABLE(of, sunwayged_of_match); + +static struct platform_driver sunwayged_platform_driver = { + .driver = { + .name = "sunway-ged", + .of_match_table = sunwayged_of_match, + }, + .probe = sunwayged_probe, + .remove = sunwayged_remove, +}; +module_platform_driver(sunwayged_platform_driver); + +MODULE_AUTHOR("Lu Feifei"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Sunway ged driver"); diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index 94caee49da99c37c53d679d08e135c408ea9de51..99b981a05b6c0ed6c94f9f53a2a3c4b6754cf915 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -1642,31 +1642,31 @@ static void mmc_blk_read_single(struct mmc_queue *mq, struct request *req) struct mmc_card *card = mq->card; struct mmc_host *host = card->host; blk_status_t error = BLK_STS_OK; - int retries = 0; do { u32 status; int err; + int retries = 0; - mmc_blk_rw_rq_prep(mqrq, card, 1, mq); + while (retries++ <= MMC_READ_SINGLE_RETRIES) { + mmc_blk_rw_rq_prep(mqrq, card, 1, mq); - mmc_wait_for_req(host, mrq); + mmc_wait_for_req(host, mrq); - err = mmc_send_status(card, &status); - if (err) - goto error_exit; - - if (!mmc_host_is_spi(host) && - !mmc_ready_for_data(status)) { - err = mmc_blk_fix_state(card, req); + err = mmc_send_status(card, &status); if (err) goto error_exit; - } - if (mrq->cmd->error && retries++ < MMC_READ_SINGLE_RETRIES) - continue; + if (!mmc_host_is_spi(host) && + !mmc_ready_for_data(status)) { + err = mmc_blk_fix_state(card, req); + if (err) + goto error_exit; + } - retries = 0; + if (!mrq->cmd->error) + break; + } if (mrq->cmd->error || mrq->data->error || diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c index 864c8c205ff782908e6f137c82e13fdf3739a3a6..03e2f965a96a89aa6e4d5667221c68a1e8f74db1 100644 --- a/drivers/mmc/core/host.c +++ b/drivers/mmc/core/host.c @@ -513,6 +513,16 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev) EXPORT_SYMBOL(mmc_alloc_host); +static int mmc_validate_host_caps(struct mmc_host *host) +{ + if (host->caps & MMC_CAP_SDIO_IRQ && !host->ops->enable_sdio_irq) { + dev_warn(host->parent, "missing ->enable_sdio_irq() ops\n"); + return -EINVAL; + } + + return 0; +} + /** * mmc_add_host - initialise host hardware * @host: mmc host @@ -525,8 +535,9 @@ int mmc_add_host(struct mmc_host *host) { int err; - WARN_ON((host->caps & MMC_CAP_SDIO_IRQ) && - !host->ops->enable_sdio_irq); + err = mmc_validate_host_caps(host); + if (err) + return err; err = device_add(&host->class_dev); if (err) diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c index 1b0853a82189aceefcd70a2e491fc0ac1ddcf183..99a4ce68d82f1e2c753745a86027526c475dd317 100644 --- a/drivers/mmc/core/sdio.c +++ b/drivers/mmc/core/sdio.c @@ -708,6 +708,8 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr, if (host->ops->init_card) host->ops->init_card(host, card); + card->ocr = ocr_card; + /* * If the host and card support UHS-I mode request the card * to switch to 1.8V signaling level. No 1.8v signalling if @@ -820,7 +822,7 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr, goto mismatch; } } - card->ocr = ocr_card; + mmc_fixup_device(card, sdio_fixup_methods); if (card->type == MMC_TYPE_SD_COMBO) { diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c index 90cd179625fc27be597ab4e4bcc21f2b626a9ead..647928ab00a30db117ce9a65d52a765cf364999a 100644 --- a/drivers/mmc/host/davinci_mmc.c +++ b/drivers/mmc/host/davinci_mmc.c @@ -1375,8 +1375,12 @@ static int davinci_mmcsd_suspend(struct device *dev) static int davinci_mmcsd_resume(struct device *dev) { struct mmc_davinci_host *host = dev_get_drvdata(dev); + int ret; + + ret = clk_enable(host->clk); + if (ret) + return ret; - clk_enable(host->clk); mmc_davinci_reset_ctrl(host, 0); return 0; diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c index b274083a6e63540f9bd6dc6cdd2fdf6f4e64d40e..091e0e051d109609f363bb102f806e4f6c28996a 100644 --- a/drivers/mmc/host/meson-gx-mmc.c +++ b/drivers/mmc/host/meson-gx-mmc.c @@ -173,6 +173,8 @@ struct meson_host { int irq; bool vqmmc_enabled; + bool needs_pre_post_req; + }; #define CMD_CFG_LENGTH_MASK GENMASK(8, 0) @@ -652,6 +654,8 @@ static void meson_mmc_request_done(struct mmc_host *mmc, struct meson_host *host = mmc_priv(mmc); host->cmd = NULL; + if (host->needs_pre_post_req) + meson_mmc_post_req(mmc, mrq, 0); mmc_request_done(host->mmc, mrq); } @@ -869,7 +873,7 @@ static int meson_mmc_validate_dram_access(struct mmc_host *mmc, struct mmc_data static void meson_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq) { struct meson_host *host = mmc_priv(mmc); - bool needs_pre_post_req = mrq->data && + host->needs_pre_post_req = mrq->data && !(mrq->data->host_cookie & SD_EMMC_PRE_REQ_DONE); /* @@ -885,22 +889,19 @@ static void meson_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq) } } - if (needs_pre_post_req) { + if (host->needs_pre_post_req) { meson_mmc_get_transfer_mode(mmc, mrq); if (!meson_mmc_desc_chain_mode(mrq->data)) - needs_pre_post_req = false; + host->needs_pre_post_req = false; } - if (needs_pre_post_req) + if (host->needs_pre_post_req) meson_mmc_pre_req(mmc, mrq); /* Stop execution */ writel(0, host->regs + SD_EMMC_START); meson_mmc_start_cmd(mmc, mrq->sbc ?: mrq->cmd); - - if (needs_pre_post_req) - meson_mmc_post_req(mmc, mrq, 0); } static void meson_mmc_read_resp(struct mmc_host *mmc, struct mmc_command *cmd) diff --git a/drivers/mmc/host/meson-mx-sdhc-mmc.c b/drivers/mmc/host/meson-mx-sdhc-mmc.c index 8fdd0bbbfa21fb0ee77a769446f0769f04749ec4..28aa78aa08f3f2752d3da3b2db2a51e98ab80e6a 100644 --- a/drivers/mmc/host/meson-mx-sdhc-mmc.c +++ b/drivers/mmc/host/meson-mx-sdhc-mmc.c @@ -854,6 +854,11 @@ static int meson_mx_sdhc_probe(struct platform_device *pdev) goto err_disable_pclk; irq = platform_get_irq(pdev, 0); + if (irq < 0) { + ret = irq; + goto err_disable_pclk; + } + ret = devm_request_threaded_irq(dev, irq, meson_mx_sdhc_irq, meson_mx_sdhc_irq_thread, IRQF_ONESHOT, NULL, host); diff --git a/drivers/mmc/host/meson-mx-sdio.c b/drivers/mmc/host/meson-mx-sdio.c index 1c5299cd0cbe1333dc394e0c432a102a884616fd..264aae2a2b0cfa62bf68b6984399cc15a83fc305 100644 --- a/drivers/mmc/host/meson-mx-sdio.c +++ b/drivers/mmc/host/meson-mx-sdio.c @@ -663,6 +663,11 @@ static int meson_mx_mmc_probe(struct platform_device *pdev) } irq = platform_get_irq(pdev, 0); + if (irq < 0) { + ret = irq; + goto error_free_mmc; + } + ret = devm_request_threaded_irq(host->controller_dev, irq, meson_mx_mmc_irq, meson_mx_mmc_irq_thread, IRQF_ONESHOT, diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c index 9bde0def114b5714330632e5d7c08590accfd67b..b5684e5d79e60d3c67f18bf38353df68a4c01acc 100644 --- a/drivers/mmc/host/mmci.c +++ b/drivers/mmc/host/mmci.c @@ -2203,7 +2203,7 @@ static int mmci_probe(struct amba_device *dev, return ret; } -static int mmci_remove(struct amba_device *dev) +static void mmci_remove(struct amba_device *dev) { struct mmc_host *mmc = amba_get_drvdata(dev); @@ -2231,8 +2231,6 @@ static int mmci_remove(struct amba_device *dev) clk_disable_unprepare(host->clk); mmc_free_host(mmc); } - - return 0; } #ifdef CONFIG_PM diff --git a/drivers/mmc/host/mmci_stm32_sdmmc.c b/drivers/mmc/host/mmci_stm32_sdmmc.c index a75d3dd34d18cb41f24f4f201613916a3110fb09..4cceb9bab0361e311c38ea4800df8d355b557f70 100644 --- a/drivers/mmc/host/mmci_stm32_sdmmc.c +++ b/drivers/mmc/host/mmci_stm32_sdmmc.c @@ -62,8 +62,8 @@ static int sdmmc_idma_validate_data(struct mmci_host *host, * excepted the last element which has no constraint on idmasize */ for_each_sg(data->sg, sg, data->sg_len - 1, i) { - if (!IS_ALIGNED(data->sg->offset, sizeof(u32)) || - !IS_ALIGNED(data->sg->length, SDMMC_IDMA_BURST)) { + if (!IS_ALIGNED(sg->offset, sizeof(u32)) || + !IS_ALIGNED(sg->length, SDMMC_IDMA_BURST)) { dev_err(mmc_dev(host->mmc), "unaligned scatterlist: ofst:%x length:%d\n", data->sg->offset, data->sg->length); @@ -71,7 +71,7 @@ static int sdmmc_idma_validate_data(struct mmci_host *host, } } - if (!IS_ALIGNED(data->sg->offset, sizeof(u32))) { + if (!IS_ALIGNED(sg->offset, sizeof(u32))) { dev_err(mmc_dev(host->mmc), "unaligned last scatterlist: ofst:%x length:%d\n", data->sg->offset, data->sg->length); diff --git a/drivers/mmc/host/moxart-mmc.c b/drivers/mmc/host/moxart-mmc.c index 7697068ad9695144efacf999f4f4b2941f91275b..ea67a7ef2390c834d2118f58d610be5a557ef859 100644 --- a/drivers/mmc/host/moxart-mmc.c +++ b/drivers/mmc/host/moxart-mmc.c @@ -708,12 +708,12 @@ static int moxart_remove(struct platform_device *pdev) if (!IS_ERR_OR_NULL(host->dma_chan_rx)) dma_release_channel(host->dma_chan_rx); mmc_remove_host(mmc); - mmc_free_host(mmc); writel(0, host->base + REG_INTERRUPT_MASK); writel(0, host->base + REG_POWER_CONTROL); writel(readl(host->base + REG_CLOCK_CONTROL) | CLK_OFF, host->base + REG_CLOCK_CONTROL); + mmc_free_host(mmc); return 0; } diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c index 782879d46ff4849d8efb72afb2eac5d38085c4a2..ac01fb518386a046c039dfaef7548ef10d519c1a 100644 --- a/drivers/mmc/host/renesas_sdhi_core.c +++ b/drivers/mmc/host/renesas_sdhi_core.c @@ -390,10 +390,10 @@ static void renesas_sdhi_hs400_complete(struct mmc_host *mmc) SH_MOBILE_SDHI_SCC_TMPPORT2_HS400OSEL) | sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_TMPPORT2)); - /* Set the sampling clock selection range of HS400 mode */ sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_DTCNTL, SH_MOBILE_SDHI_SCC_DTCNTL_TAPEN | - 0x4 << SH_MOBILE_SDHI_SCC_DTCNTL_TAPNUM_SHIFT); + sd_scc_read32(host, priv, + SH_MOBILE_SDHI_SCC_DTCNTL)); /* Avoid bad TAP */ if (bad_taps & BIT(priv->tap_set)) { diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c index ab5ab969f711deecdbca38e27c758b2f2b416bd4..343648fcbc31f1b228b3f8ca4a3cea6fbd8207c6 100644 --- a/drivers/mmc/host/sdhci-of-esdhc.c +++ b/drivers/mmc/host/sdhci-of-esdhc.c @@ -524,12 +524,16 @@ static void esdhc_of_adma_workaround(struct sdhci_host *host, u32 intmask) static int esdhc_of_enable_dma(struct sdhci_host *host) { + int ret; u32 value; struct device *dev = mmc_dev(host->mmc); if (of_device_is_compatible(dev->of_node, "fsl,ls1043a-esdhc") || - of_device_is_compatible(dev->of_node, "fsl,ls1046a-esdhc")) - dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40)); + of_device_is_compatible(dev->of_node, "fsl,ls1046a-esdhc")) { + ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40)); + if (ret) + return ret; + } value = sdhci_readl(host, ESDHC_DMA_SYSCTL); diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c index bf04a08eeba13424827477c48d2a8124770d4013..a78b060ce847158505ae2cc2c4c3151feee69478 100644 --- a/drivers/mmc/host/sdhci-pci-core.c +++ b/drivers/mmc/host/sdhci-pci-core.c @@ -1932,6 +1932,7 @@ static const struct pci_device_id pci_ids[] = { SDHCI_PCI_DEVICE(INTEL, JSL_SD, intel_byt_sd), SDHCI_PCI_DEVICE(INTEL, LKF_EMMC, intel_glk_emmc), SDHCI_PCI_DEVICE(INTEL, LKF_SD, intel_byt_sd), + SDHCI_PCI_DEVICE(INTEL, ADL_EMMC, intel_glk_emmc), SDHCI_PCI_DEVICE(O2, 8120, o2), SDHCI_PCI_DEVICE(O2, 8220, o2), SDHCI_PCI_DEVICE(O2, 8221, o2), diff --git a/drivers/mmc/host/sdhci-pci.h b/drivers/mmc/host/sdhci-pci.h index 8f90c4163bb5c7a1faf5705af925cc09da193181..dcd99d5057ee1ba66047bff826f7c3f6626aba2d 100644 --- a/drivers/mmc/host/sdhci-pci.h +++ b/drivers/mmc/host/sdhci-pci.h @@ -59,6 +59,7 @@ #define PCI_DEVICE_ID_INTEL_JSL_SD 0x4df8 #define PCI_DEVICE_ID_INTEL_LKF_EMMC 0x98c4 #define PCI_DEVICE_ID_INTEL_LKF_SD 0x98f8 +#define PCI_DEVICE_ID_INTEL_ADL_EMMC 0x54c4 #define PCI_DEVICE_ID_SYSKONNECT_8000 0x8000 #define PCI_DEVICE_ID_VIA_95D0 0x95d0 diff --git a/drivers/mmc/host/sdhci-xenon.c b/drivers/mmc/host/sdhci-xenon.c index 0e5234a5ca2247bb76efa83083307a63f2200bf1..d509198c00c8af6b67630cad924e1a54ff8afc50 100644 --- a/drivers/mmc/host/sdhci-xenon.c +++ b/drivers/mmc/host/sdhci-xenon.c @@ -240,16 +240,6 @@ static void xenon_voltage_switch(struct sdhci_host *host) { /* Wait for 5ms after set 1.8V signal enable bit */ usleep_range(5000, 5500); - - /* - * For some reason the controller's Host Control2 register reports - * the bit representing 1.8V signaling as 0 when read after it was - * written as 1. Subsequent read reports 1. - * - * Since this may cause some issues, do an empty read of the Host - * Control2 register here to circumvent this. - */ - sdhci_readw(host, SDHCI_HOST_CONTROL2); } static const struct sdhci_ops sdhci_xenon_ops = { diff --git a/drivers/mtd/chips/Kconfig b/drivers/mtd/chips/Kconfig index aef14990e5f7cfc6c0e115f66a6047e03e4edf9a..19726ebd973d0bbde3d118688401baedc1169f76 100644 --- a/drivers/mtd/chips/Kconfig +++ b/drivers/mtd/chips/Kconfig @@ -55,12 +55,14 @@ choice LITTLE_ENDIAN_BYTE, if the bytes are reversed. config MTD_CFI_NOSWAP + depends on !ARCH_IXP4XX || CPU_BIG_ENDIAN bool "NO" config MTD_CFI_BE_BYTE_SWAP bool "BIG_ENDIAN_BYTE" config MTD_CFI_LE_BYTE_SWAP + depends on !ARCH_IXP4XX bool "LITTLE_ENDIAN_BYTE" endchoice diff --git a/drivers/mtd/devices/phram.c b/drivers/mtd/devices/phram.c index aa88558c7edb100b60b5f23f99f1edec0fd0eb8d..d7d17a4b61f522020af5329f92364e94e2846299 100644 --- a/drivers/mtd/devices/phram.c +++ b/drivers/mtd/devices/phram.c @@ -270,6 +270,7 @@ static int phram_setup(const char *val) if (len == 0 || erasesize == 0 || erasesize > len || erasesize > UINT_MAX || rem) { parse_err("illegal erasesize or len\n"); + ret = -EINVAL; goto error; } diff --git a/drivers/mtd/hyperbus/rpc-if.c b/drivers/mtd/hyperbus/rpc-if.c index ecb050ba95cdff52e22469672b60e366ca265abf..dc164c18f8429fe5d632b264084af0bc9b7e574c 100644 --- a/drivers/mtd/hyperbus/rpc-if.c +++ b/drivers/mtd/hyperbus/rpc-if.c @@ -124,7 +124,9 @@ static int rpcif_hb_probe(struct platform_device *pdev) if (!hyperbus) return -ENOMEM; - rpcif_sw_init(&hyperbus->rpc, pdev->dev.parent); + error = rpcif_sw_init(&hyperbus->rpc, pdev->dev.parent); + if (error) + return error; platform_set_drvdata(pdev, hyperbus); @@ -150,9 +152,9 @@ static int rpcif_hb_remove(struct platform_device *pdev) { struct rpcif_hyperbus *hyperbus = platform_get_drvdata(pdev); int error = hyperbus_unregister_device(&hyperbus->hbdev); - struct rpcif *rpc = dev_get_drvdata(pdev->dev.parent); - rpcif_disable_rpm(rpc); + rpcif_disable_rpm(&hyperbus->rpc); + return error; } diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig index 6650acbc961e972089aa97fdf56f90b987075a71..fc0aaa03c524272ed065837c438b277c921b4fb0 100644 --- a/drivers/mtd/maps/Kconfig +++ b/drivers/mtd/maps/Kconfig @@ -325,7 +325,7 @@ config MTD_DC21285 config MTD_IXP4XX tristate "CFI Flash device mapped on Intel IXP4xx based systems" - depends on MTD_CFI && MTD_COMPLEX_MAPPINGS && ARCH_IXP4XX + depends on MTD_CFI && MTD_COMPLEX_MAPPINGS && ARCH_IXP4XX && MTD_CFI_ADV_OPTIONS help This enables MTD access to flash devices on platforms based on Intel's IXP4xx family of network processors such as the diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c index 95d47422bbf2039f9e92bc7d09e08c548d7e3b0a..5725818fa199f72c056e9f1bb5be10eb2126e93a 100644 --- a/drivers/mtd/mtdpart.c +++ b/drivers/mtd/mtdpart.c @@ -313,7 +313,7 @@ static int __mtd_del_partition(struct mtd_info *mtd) if (err) return err; - list_del(&child->part.node); + list_del(&mtd->part.node); free_partition(mtd); return 0; diff --git a/drivers/mtd/nand/bbt.c b/drivers/mtd/nand/bbt.c index 044adf91385465ca73e54f53238fb68b03c8a604..64af6898131d656401a42135855c22e183df4109 100644 --- a/drivers/mtd/nand/bbt.c +++ b/drivers/mtd/nand/bbt.c @@ -123,7 +123,7 @@ int nanddev_bbt_set_block_status(struct nand_device *nand, unsigned int entry, unsigned int rbits = bits_per_block + offs - BITS_PER_LONG; pos[1] &= ~GENMASK(rbits - 1, 0); - pos[1] |= val >> rbits; + pos[1] |= val >> (bits_per_block - rbits); } return 0; diff --git a/drivers/mtd/nand/onenand/generic.c b/drivers/mtd/nand/onenand/generic.c index 8b6f4da5d72011b878732242d0041dc6b6c0b2aa..a4b8b65fe15f50faeaa1225203c52cd50f6a9288 100644 --- a/drivers/mtd/nand/onenand/generic.c +++ b/drivers/mtd/nand/onenand/generic.c @@ -53,7 +53,12 @@ static int generic_onenand_probe(struct platform_device *pdev) } info->onenand.mmcontrol = pdata ? pdata->mmcontrol : NULL; - info->onenand.irq = platform_get_irq(pdev, 0); + + err = platform_get_irq(pdev, 0); + if (err < 0) + goto out_iounmap; + + info->onenand.irq = err; info->mtd.dev.parent = &pdev->dev; info->mtd.priv = &info->onenand; diff --git a/drivers/mtd/nand/raw/atmel/nand-controller.c b/drivers/mtd/nand/raw/atmel/nand-controller.c index 8aab1017b460037d1ba7d5036a89bad20dce0e99..c048e826746a9d92cd2a9d455bd417f91da62630 100644 --- a/drivers/mtd/nand/raw/atmel/nand-controller.c +++ b/drivers/mtd/nand/raw/atmel/nand-controller.c @@ -2057,13 +2057,15 @@ static int atmel_nand_controller_init(struct atmel_nand_controller *nc, nc->mck = of_clk_get(dev->parent->of_node, 0); if (IS_ERR(nc->mck)) { dev_err(dev, "Failed to retrieve MCK clk\n"); - return PTR_ERR(nc->mck); + ret = PTR_ERR(nc->mck); + goto out_release_dma; } np = of_parse_phandle(dev->parent->of_node, "atmel,smc", 0); if (!np) { dev_err(dev, "Missing or invalid atmel,smc property\n"); - return -EINVAL; + ret = -EINVAL; + goto out_release_dma; } nc->smc = syscon_node_to_regmap(np); @@ -2071,10 +2073,16 @@ static int atmel_nand_controller_init(struct atmel_nand_controller *nc, if (IS_ERR(nc->smc)) { ret = PTR_ERR(nc->smc); dev_err(dev, "Could not get SMC regmap (err = %d)\n", ret); - return ret; + goto out_release_dma; } return 0; + +out_release_dma: + if (nc->dmac) + dma_release_channel(nc->dmac); + + return ret; } static int diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c index 909b14cc8e55cf880ebd0e040774e85a7102161c..580b91cbd18defc7a085cc9dad965c4b9b7159f4 100644 --- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c +++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c @@ -2062,7 +2062,7 @@ static int brcmnand_read_by_pio(struct mtd_info *mtd, struct nand_chip *chip, mtd->oobsize / trans, host->hwcfg.sector_size_1k); - if (!ret) { + if (ret != -EBADMSG) { *err_addr = brcmnand_get_uncorrecc_addr(ctrl); if (*err_addr) diff --git a/drivers/mtd/nand/raw/davinci_nand.c b/drivers/mtd/nand/raw/davinci_nand.c index f8c36d19ab47f36aba5d2c1696bd795058bb084b..bfd3f440aca574931e30e22e96e4a6e891ea0f4b 100644 --- a/drivers/mtd/nand/raw/davinci_nand.c +++ b/drivers/mtd/nand/raw/davinci_nand.c @@ -372,17 +372,15 @@ static int nand_davinci_correct_4bit(struct nand_chip *chip, u_char *data, } /** - * nand_read_page_hwecc_oob_first - hw ecc, read oob first + * nand_davinci_read_page_hwecc_oob_first - Hardware ECC page read with ECC + * data read from OOB area * @chip: nand chip info structure * @buf: buffer to store read data * @oob_required: caller requires OOB data read to chip->oob_poi * @page: page number to read * - * Hardware ECC for large page chips, require OOB to be read first. For this - * ECC mode, the write_page method is re-used from ECC_HW. These methods - * read/write ECC from the OOB area, unlike the ECC_HW_SYNDROME support with - * multiple ECC steps, follows the "infix ECC" scheme and reads/writes ECC from - * the data area, by overwriting the NAND manufacturer bad block markings. + * Hardware ECC for large page chips, which requires the ECC data to be + * extracted from the OOB before the actual data is read. */ static int nand_davinci_read_page_hwecc_oob_first(struct nand_chip *chip, uint8_t *buf, @@ -394,7 +392,6 @@ static int nand_davinci_read_page_hwecc_oob_first(struct nand_chip *chip, int eccsteps = chip->ecc.steps; uint8_t *p = buf; uint8_t *ecc_code = chip->ecc.code_buf; - uint8_t *ecc_calc = chip->ecc.calc_buf; unsigned int max_bitflips = 0; /* Read the OOB area first */ @@ -402,7 +399,8 @@ static int nand_davinci_read_page_hwecc_oob_first(struct nand_chip *chip, if (ret) return ret; - ret = nand_read_page_op(chip, page, 0, NULL, 0); + /* Move read cursor to start of page */ + ret = nand_change_read_column_op(chip, 0, NULL, 0, false); if (ret) return ret; @@ -420,8 +418,6 @@ static int nand_davinci_read_page_hwecc_oob_first(struct nand_chip *chip, if (ret) return ret; - chip->ecc.calculate(chip, p, &ecc_calc[i]); - stat = chip->ecc.correct(chip, p, &ecc_code[i], NULL); if (stat == -EBADMSG && (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) { diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c index a6658567d55c0a637c8b484abc5f938d49728d2c..92e8ca56f56653761a502b383fe8a8a86150a952 100644 --- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c +++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c @@ -646,6 +646,7 @@ static void gpmi_nfc_compute_timings(struct gpmi_nand_data *this, const struct nand_sdr_timings *sdr) { struct gpmi_nfc_hardware_timing *hw = &this->hw; + struct resources *r = &this->resources; unsigned int dll_threshold_ps = this->devdata->max_chain_delay; unsigned int period_ps, reference_period_ps; unsigned int data_setup_cycles, data_hold_cycles, addr_setup_cycles; @@ -669,6 +670,8 @@ static void gpmi_nfc_compute_timings(struct gpmi_nand_data *this, wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_NO_DELAY; } + hw->clk_rate = clk_round_rate(r->clock[0], hw->clk_rate); + /* SDR core timings are given in picoseconds */ period_ps = div_u64((u64)NSEC_PER_SEC * 1000, hw->clk_rate); @@ -711,14 +714,32 @@ static void gpmi_nfc_compute_timings(struct gpmi_nand_data *this, (use_half_period ? BM_GPMI_CTRL1_HALF_PERIOD : 0); } -static void gpmi_nfc_apply_timings(struct gpmi_nand_data *this) +static int gpmi_nfc_apply_timings(struct gpmi_nand_data *this) { struct gpmi_nfc_hardware_timing *hw = &this->hw; struct resources *r = &this->resources; void __iomem *gpmi_regs = r->gpmi_regs; unsigned int dll_wait_time_us; + int ret; + + /* Clock dividers do NOT guarantee a clean clock signal on its output + * during the change of the divide factor on i.MX6Q/UL/SX. On i.MX7/8, + * all clock dividers provide these guarantee. + */ + if (GPMI_IS_MX6Q(this) || GPMI_IS_MX6SX(this)) + clk_disable_unprepare(r->clock[0]); - clk_set_rate(r->clock[0], hw->clk_rate); + ret = clk_set_rate(r->clock[0], hw->clk_rate); + if (ret) { + dev_err(this->dev, "cannot set clock rate to %lu Hz: %d\n", hw->clk_rate, ret); + return ret; + } + + if (GPMI_IS_MX6Q(this) || GPMI_IS_MX6SX(this)) { + ret = clk_prepare_enable(r->clock[0]); + if (ret) + return ret; + } writel(hw->timing0, gpmi_regs + HW_GPMI_TIMING0); writel(hw->timing1, gpmi_regs + HW_GPMI_TIMING1); @@ -737,6 +758,8 @@ static void gpmi_nfc_apply_timings(struct gpmi_nand_data *this) /* Wait for the DLL to settle. */ udelay(dll_wait_time_us); + + return 0; } static int gpmi_setup_interface(struct nand_chip *chip, int chipnr, @@ -1032,15 +1055,6 @@ static int gpmi_get_clks(struct gpmi_nand_data *this) r->clock[i] = clk; } - if (GPMI_IS_MX6(this)) - /* - * Set the default value for the gpmi clock. - * - * If you want to use the ONFI nand which is in the - * Synchronous Mode, you should change the clock as you need. - */ - clk_set_rate(r->clock[0], 22000000); - return 0; err_clock: @@ -2278,7 +2292,9 @@ static int gpmi_nfc_exec_op(struct nand_chip *chip, */ if (this->hw.must_apply_timings) { this->hw.must_apply_timings = false; - gpmi_nfc_apply_timings(this); + ret = gpmi_nfc_apply_timings(this); + if (ret) + goto out_pm; } dev_dbg(this->dev, "%s: %d instructions\n", __func__, op->ninstrs); @@ -2407,6 +2423,7 @@ static int gpmi_nfc_exec_op(struct nand_chip *chip, this->bch = false; +out_pm: pm_runtime_mark_last_busy(this->dev); pm_runtime_put_autosuspend(this->dev); diff --git a/drivers/mtd/nand/raw/mpc5121_nfc.c b/drivers/mtd/nand/raw/mpc5121_nfc.c index cb293c50acb876fe565adfd93aec2f024e857e44..5b9271b9c32655b4d4d8f2d972c3d478d7b3b6d9 100644 --- a/drivers/mtd/nand/raw/mpc5121_nfc.c +++ b/drivers/mtd/nand/raw/mpc5121_nfc.c @@ -291,7 +291,6 @@ static int ads5121_chipselect_init(struct mtd_info *mtd) /* Control chips select signal on ADS5121 board */ static void ads5121_select_chip(struct nand_chip *nand, int chip) { - struct mtd_info *mtd = nand_to_mtd(nand); struct mpc5121_nfc_prv *prv = nand_get_controller_data(nand); u8 v; diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c index 1f0d542d59230c06d5277f5741d66831d23958fb..c41c0ff611b1bf312310c5e66ed0c5b464d6cee6 100644 --- a/drivers/mtd/nand/raw/nand_base.c +++ b/drivers/mtd/nand/raw/nand_base.c @@ -297,16 +297,19 @@ static int nand_isbad_bbm(struct nand_chip *chip, loff_t ofs) * * Return: -EBUSY if the chip has been suspended, 0 otherwise */ -static int nand_get_device(struct nand_chip *chip) +static void nand_get_device(struct nand_chip *chip) { - mutex_lock(&chip->lock); - if (chip->suspended) { + /* Wait until the device is resumed. */ + while (1) { + mutex_lock(&chip->lock); + if (!chip->suspended) { + mutex_lock(&chip->controller->lock); + return; + } mutex_unlock(&chip->lock); - return -EBUSY; - } - mutex_lock(&chip->controller->lock); - return 0; + wait_event(chip->resume_wq, !chip->suspended); + } } /** @@ -531,9 +534,7 @@ static int nand_block_markbad_lowlevel(struct nand_chip *chip, loff_t ofs) nand_erase_nand(chip, &einfo, 0); /* Write bad block marker to OOB */ - ret = nand_get_device(chip); - if (ret) - return ret; + nand_get_device(chip); ret = nand_markbad_bbm(chip, ofs); nand_release_device(chip); @@ -3534,9 +3535,7 @@ static int nand_read_oob(struct mtd_info *mtd, loff_t from, ops->mode != MTD_OPS_RAW) return -ENOTSUPP; - ret = nand_get_device(chip); - if (ret) - return ret; + nand_get_device(chip); if (!ops->datbuf) ret = nand_do_read_oob(chip, from, ops); @@ -4119,13 +4118,11 @@ static int nand_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops) { struct nand_chip *chip = mtd_to_nand(mtd); - int ret; + int ret = 0; ops->retlen = 0; - ret = nand_get_device(chip); - if (ret) - return ret; + nand_get_device(chip); switch (ops->mode) { case MTD_OPS_PLACE_OOB: @@ -4181,9 +4178,7 @@ int nand_erase_nand(struct nand_chip *chip, struct erase_info *instr, return -EINVAL; /* Grab the lock and see if the device is available */ - ret = nand_get_device(chip); - if (ret) - return ret; + nand_get_device(chip); /* Shift to get first page */ page = (int)(instr->addr >> chip->page_shift); @@ -4270,7 +4265,7 @@ static void nand_sync(struct mtd_info *mtd) pr_debug("%s: called\n", __func__); /* Grab the lock and see if the device is available */ - WARN_ON(nand_get_device(chip)); + nand_get_device(chip); /* Release it and go back */ nand_release_device(chip); } @@ -4287,9 +4282,7 @@ static int nand_block_isbad(struct mtd_info *mtd, loff_t offs) int ret; /* Select the NAND device */ - ret = nand_get_device(chip); - if (ret) - return ret; + nand_get_device(chip); nand_select_target(chip, chipnr); @@ -4360,6 +4353,8 @@ static void nand_resume(struct mtd_info *mtd) __func__); } mutex_unlock(&chip->lock); + + wake_up_all(&chip->resume_wq); } /** @@ -5068,6 +5063,7 @@ static int nand_scan_ident(struct nand_chip *chip, unsigned int maxchips, chip->cur_cs = -1; mutex_init(&chip->lock); + init_waitqueue_head(&chip->resume_wq); /* Enforce the right timings for reset/detection */ chip->current_interface_config = nand_get_reset_interface_config(); diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c index b99d2e9d1e2c49a86157c8e06d08df067928a87e..bb181e18c7c52b80ca2fe7fff6c03f0f70324a3e 100644 --- a/drivers/mtd/nand/raw/qcom_nandc.c +++ b/drivers/mtd/nand/raw/qcom_nandc.c @@ -2,7 +2,6 @@ /* * Copyright (c) 2016, The Linux Foundation. All rights reserved. */ - #include #include #include @@ -2968,10 +2967,6 @@ static int qcom_nandc_probe(struct platform_device *pdev) if (!nandc->base_dma) return -ENXIO; - ret = qcom_nandc_alloc(nandc); - if (ret) - goto err_nandc_alloc; - ret = clk_prepare_enable(nandc->core_clk); if (ret) goto err_core_clk; @@ -2980,6 +2975,10 @@ static int qcom_nandc_probe(struct platform_device *pdev) if (ret) goto err_aon_clk; + ret = qcom_nandc_alloc(nandc); + if (ret) + goto err_nandc_alloc; + ret = qcom_nandc_setup(nandc); if (ret) goto err_setup; @@ -2991,15 +2990,14 @@ static int qcom_nandc_probe(struct platform_device *pdev) return 0; err_setup: + qcom_nandc_unalloc(nandc); +err_nandc_alloc: clk_disable_unprepare(nandc->aon_clk); err_aon_clk: clk_disable_unprepare(nandc->core_clk); err_core_clk: - qcom_nandc_unalloc(nandc); -err_nandc_alloc: dma_unmap_resource(dev, res->start, resource_size(res), DMA_BIDIRECTIONAL, 0); - return ret; } diff --git a/drivers/mtd/ubi/fastmap-wl.c b/drivers/mtd/ubi/fastmap-wl.c index 28f55f9cf71533a54f2f83578b2f8f30a176d55a..053ab52668e8bf3c1b8f39b8430e56fbdd9161a5 100644 --- a/drivers/mtd/ubi/fastmap-wl.c +++ b/drivers/mtd/ubi/fastmap-wl.c @@ -97,6 +97,33 @@ struct ubi_wl_entry *ubi_wl_get_fm_peb(struct ubi_device *ubi, int anchor) return e; } +/* + * has_enough_free_count - whether ubi has enough free pebs to fill fm pools + * @ubi: UBI device description object + * @is_wl_pool: whether UBI is filling wear leveling pool + * + * This helper function checks whether there are enough free pebs (deducted + * by fastmap pebs) to fill fm_pool and fm_wl_pool, above rule works after + * there is at least one of free pebs is filled into fm_wl_pool. + * For wear leveling pool, UBI should also reserve free pebs for bad pebs + * handling, because there maybe no enough free pebs for user volumes after + * producing new bad pebs. + */ +static bool has_enough_free_count(struct ubi_device *ubi, bool is_wl_pool) +{ + int fm_used = 0; // fastmap non anchor pebs. + int beb_rsvd_pebs; + + if (!ubi->free.rb_node) + return false; + + beb_rsvd_pebs = is_wl_pool ? ubi->beb_rsvd_pebs : 0; + if (ubi->fm_wl_pool.size > 0 && !(ubi->ro_mode || ubi->fm_disabled)) + fm_used = ubi->fm_size / ubi->leb_size - 1; + + return ubi->free_count - beb_rsvd_pebs > fm_used; +} + /** * ubi_refill_pools - refills all fastmap PEB pools. * @ubi: UBI device description object @@ -120,21 +147,17 @@ void ubi_refill_pools(struct ubi_device *ubi) wl_tree_add(ubi->fm_anchor, &ubi->free); ubi->free_count++; } - if (ubi->fm_next_anchor) { - wl_tree_add(ubi->fm_next_anchor, &ubi->free); - ubi->free_count++; - } - /* All available PEBs are in ubi->free, now is the time to get + /* + * All available PEBs are in ubi->free, now is the time to get * the best anchor PEBs. */ ubi->fm_anchor = ubi_wl_get_fm_peb(ubi, 1); - ubi->fm_next_anchor = ubi_wl_get_fm_peb(ubi, 1); for (;;) { enough = 0; if (pool->size < pool->max_size) { - if (!ubi->free.rb_node) + if (!has_enough_free_count(ubi, false)) break; e = wl_get_wle(ubi); @@ -147,8 +170,7 @@ void ubi_refill_pools(struct ubi_device *ubi) enough++; if (wl_pool->size < wl_pool->max_size) { - if (!ubi->free.rb_node || - (ubi->free_count - ubi->beb_rsvd_pebs < 5)) + if (!has_enough_free_count(ubi, true)) break; e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF); @@ -286,20 +308,26 @@ static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi) int ubi_ensure_anchor_pebs(struct ubi_device *ubi) { struct ubi_work *wrk; + struct ubi_wl_entry *anchor; spin_lock(&ubi->wl_lock); - /* Do we have a next anchor? */ - if (!ubi->fm_next_anchor) { - ubi->fm_next_anchor = ubi_wl_get_fm_peb(ubi, 1); - if (!ubi->fm_next_anchor) - /* Tell wear leveling to produce a new anchor PEB */ - ubi->fm_do_produce_anchor = 1; + /* Do we already have an anchor? */ + if (ubi->fm_anchor) { + spin_unlock(&ubi->wl_lock); + return 0; } - /* Do wear leveling to get a new anchor PEB or check the - * existing next anchor candidate. - */ + /* See if we can find an anchor PEB on the list of free PEBs */ + anchor = ubi_wl_get_fm_peb(ubi, 1); + if (anchor) { + ubi->fm_anchor = anchor; + spin_unlock(&ubi->wl_lock); + return 0; + } + + ubi->fm_do_produce_anchor = 1; + /* No luck, trigger wear leveling to produce a new anchor PEB. */ if (ubi->wl_scheduled) { spin_unlock(&ubi->wl_lock); return 0; @@ -381,11 +409,6 @@ static void ubi_fastmap_close(struct ubi_device *ubi) ubi->fm_anchor = NULL; } - if (ubi->fm_next_anchor) { - return_unused_peb(ubi, ubi->fm_next_anchor); - ubi->fm_next_anchor = NULL; - } - if (ubi->fm) { for (i = 0; i < ubi->fm->used_blocks; i++) kfree(ubi->fm->e[i]); diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c index 88fdf8f5709fa0d25f3de95e9cc119c41358ff45..cdc2d713d3eb3f815632b5502ad5f71ccb86f62e 100644 --- a/drivers/mtd/ubi/fastmap.c +++ b/drivers/mtd/ubi/fastmap.c @@ -1219,17 +1219,6 @@ static int ubi_write_fastmap(struct ubi_device *ubi, fm_pos += sizeof(*fec); ubi_assert(fm_pos <= ubi->fm_size); } - if (ubi->fm_next_anchor) { - fec = (struct ubi_fm_ec *)(fm_raw + fm_pos); - - fec->pnum = cpu_to_be32(ubi->fm_next_anchor->pnum); - set_seen(ubi, ubi->fm_next_anchor->pnum, seen_pebs); - fec->ec = cpu_to_be32(ubi->fm_next_anchor->ec); - - free_peb_count++; - fm_pos += sizeof(*fec); - ubi_assert(fm_pos <= ubi->fm_size); - } fmh->free_peb_count = cpu_to_be32(free_peb_count); ubi_for_each_used_peb(ubi, wl_e, tmp_rb) { diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c index 04d0e217ea1c246c5c0907d614b8abd0fff25658..90e5f8d2fe0a9a373c4ab61552d4c8b945c1943b 100644 --- a/drivers/mtd/ubi/vmt.c +++ b/drivers/mtd/ubi/vmt.c @@ -309,7 +309,6 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req) ubi->volumes[vol_id] = NULL; ubi->vol_count -= 1; spin_unlock(&ubi->volumes_lock); - ubi_eba_destroy_table(eba_tbl); out_acc: spin_lock(&ubi->volumes_lock); ubi->rsvd_pebs -= vol->reserved_pebs; diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c index 7847de75a74ca7b47bb5bf6e636ac65eb330c096..e2e70efc02fb7dd56b8937766d43a98180c3d3ed 100644 --- a/drivers/mtd/ubi/wl.c +++ b/drivers/mtd/ubi/wl.c @@ -688,16 +688,16 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, #ifdef CONFIG_MTD_UBI_FASTMAP e1 = find_anchor_wl_entry(&ubi->used); - if (e1 && ubi->fm_next_anchor && - (ubi->fm_next_anchor->ec - e1->ec >= UBI_WL_THRESHOLD)) { + if (e1 && ubi->fm_anchor && + (ubi->fm_anchor->ec - e1->ec >= UBI_WL_THRESHOLD)) { ubi->fm_do_produce_anchor = 1; - /* fm_next_anchor is no longer considered a good anchor - * candidate. + /* + * fm_anchor is no longer considered a good anchor. * NULL assignment also prevents multiple wear level checks * of this PEB. */ - wl_tree_add(ubi->fm_next_anchor, &ubi->free); - ubi->fm_next_anchor = NULL; + wl_tree_add(ubi->fm_anchor, &ubi->free); + ubi->fm_anchor = NULL; ubi->free_count++; } @@ -968,11 +968,11 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, spin_lock(&ubi->wl_lock); ubi->move_from = ubi->move_to = NULL; ubi->move_to_put = ubi->wl_scheduled = 0; + wl_entry_destroy(ubi, e1); + wl_entry_destroy(ubi, e2); spin_unlock(&ubi->wl_lock); ubi_free_vid_buf(vidb); - wl_entry_destroy(ubi, e1); - wl_entry_destroy(ubi, e2); out_ro: ubi_ro_mode(ubi); @@ -1086,12 +1086,13 @@ static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk) if (!err) { spin_lock(&ubi->wl_lock); - if (!ubi->fm_disabled && !ubi->fm_next_anchor && + if (!ubi->fm_disabled && !ubi->fm_anchor && e->pnum < UBI_FM_MAX_START) { - /* Abort anchor production, if needed it will be + /* + * Abort anchor production, if needed it will be * enabled again in the wear leveling started below. */ - ubi->fm_next_anchor = e; + ubi->fm_anchor = e; ubi->fm_do_produce_anchor = 0; } else { wl_tree_add(e, &ubi->free); @@ -1243,6 +1244,18 @@ int ubi_wl_put_peb(struct ubi_device *ubi, int vol_id, int lnum, retry: spin_lock(&ubi->wl_lock); e = ubi->lookuptbl[pnum]; + if (!e) { + /* + * This wl entry has been removed for some errors by other + * process (eg. wear leveling worker), corresponding process + * (except __erase_worker, which cannot concurrent with + * ubi_wl_put_peb) will set ubi ro_mode at the same time, + * just ignore this wl entry. + */ + spin_unlock(&ubi->wl_lock); + up_read(&ubi->fm_protect); + return 0; + } if (e == ubi->move_from) { /* * User is putting the physical eraseblock which was selected to diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c index eb7f76753c9c0dbdd2ca07985d9c407b433ab506..9f44e2e458df17e08b2ab9238d5dca39c52110bb 100644 --- a/drivers/net/arcnet/com20020-pci.c +++ b/drivers/net/arcnet/com20020-pci.c @@ -136,6 +136,9 @@ static int com20020pci_probe(struct pci_dev *pdev, return -ENOMEM; ci = (struct com20020_pci_card_info *)id->driver_data; + if (!ci) + return -EINVAL; + priv->ci = ci; mm = &ci->misc_map; diff --git a/drivers/net/bareudp.c b/drivers/net/bareudp.c index 39b128205f2551b97fb4213fe6127ea4fd3d4b85..53ef48588e59a1510ab5ac4d16a76f26d7728ca1 100644 --- a/drivers/net/bareudp.c +++ b/drivers/net/bareudp.c @@ -140,14 +140,14 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb) oiph = skb_network_header(skb); skb_reset_network_header(skb); - if (!IS_ENABLED(CONFIG_IPV6) || family == AF_INET) + if (!ipv6_mod_enabled() || family == AF_INET) err = IP_ECN_decapsulate(oiph, skb); else err = IP6_ECN_decapsulate(oiph, skb); if (unlikely(err)) { if (log_ecn_error) { - if (!IS_ENABLED(CONFIG_IPV6) || family == AF_INET) + if (!ipv6_mod_enabled() || family == AF_INET) net_info_ratelimited("non-ECT from %pI4 " "with TOS=%#x\n", &((struct iphdr *)oiph)->saddr, @@ -213,11 +213,12 @@ static struct socket *bareudp_create_sock(struct net *net, __be16 port) int err; memset(&udp_conf, 0, sizeof(udp_conf)); -#if IS_ENABLED(CONFIG_IPV6) - udp_conf.family = AF_INET6; -#else - udp_conf.family = AF_INET; -#endif + + if (ipv6_mod_enabled()) + udp_conf.family = AF_INET6; + else + udp_conf.family = AF_INET; + udp_conf.local_udp_port = port; /* Open UDP socket */ err = udp_sock_create(net, &udp_conf, &sock); @@ -246,12 +247,6 @@ static int bareudp_socket_create(struct bareudp_dev *bareudp, __be16 port) tunnel_cfg.encap_destroy = NULL; setup_udp_tunnel_sock(bareudp->net, sock, &tunnel_cfg); - /* As the setup_udp_tunnel_sock does not call udp_encap_enable if the - * socket type is v6 an explicit call to udp_encap_enable is needed. - */ - if (sock->sk->sk_family == AF_INET6) - udp_encap_enable(); - rcu_assign_pointer(bareudp->sock, sock); return 0; } @@ -445,7 +440,7 @@ static netdev_tx_t bareudp_xmit(struct sk_buff *skb, struct net_device *dev) } rcu_read_lock(); - if (IS_ENABLED(CONFIG_IPV6) && info->mode & IP_TUNNEL_INFO_IPV6) + if (ipv6_mod_enabled() && info->mode & IP_TUNNEL_INFO_IPV6) err = bareudp6_xmit_skb(skb, dev, bareudp, info); else err = bareudp_xmit_skb(skb, dev, bareudp, info); @@ -475,7 +470,7 @@ static int bareudp_fill_metadata_dst(struct net_device *dev, use_cache = ip_tunnel_dst_cache_usable(skb, info); - if (!IS_ENABLED(CONFIG_IPV6) || ip_tunnel_info_af(info) == AF_INET) { + if (!ipv6_mod_enabled() || ip_tunnel_info_af(info) == AF_INET) { struct rtable *rt; __be32 saddr; diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c index aa001b16765ae152e666fb1fa0d2ebd4ef8784e6..c2cef7ba2671969c74737d26ec3b6ea992d3b694 100644 --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c @@ -223,7 +223,7 @@ static inline int __check_agg_selection_timer(struct port *port) if (bond == NULL) return 0; - return BOND_AD_INFO(bond).agg_select_timer ? 1 : 0; + return atomic_read(&BOND_AD_INFO(bond).agg_select_timer) ? 1 : 0; } /** @@ -1003,8 +1003,8 @@ static void ad_mux_machine(struct port *port, bool *update_slave_arr) if (port->aggregator && port->aggregator->is_active && !__port_is_enabled(port)) { - __enable_port(port); + *update_slave_arr = true; } } break; @@ -1760,6 +1760,7 @@ static void ad_agg_selection_logic(struct aggregator *agg, port = port->next_port_in_aggregator) { __enable_port(port); } + *update_slave_arr = true; } } @@ -1975,7 +1976,7 @@ static void ad_marker_response_received(struct bond_marker *marker, */ void bond_3ad_initiate_agg_selection(struct bonding *bond, int timeout) { - BOND_AD_INFO(bond).agg_select_timer = timeout; + atomic_set(&BOND_AD_INFO(bond).agg_select_timer, timeout); } /** @@ -2258,6 +2259,28 @@ void bond_3ad_update_ad_actor_settings(struct bonding *bond) spin_unlock_bh(&bond->mode_lock); } +/** + * bond_agg_timer_advance - advance agg_select_timer + * @bond: bonding structure + * + * Return true when agg_select_timer reaches 0. + */ +static bool bond_agg_timer_advance(struct bonding *bond) +{ + int val, nval; + + while (1) { + val = atomic_read(&BOND_AD_INFO(bond).agg_select_timer); + if (!val) + return false; + nval = val - 1; + if (atomic_cmpxchg(&BOND_AD_INFO(bond).agg_select_timer, + val, nval) == val) + break; + } + return nval == 0; +} + /** * bond_3ad_state_machine_handler - handle state machines timeout * @work: work context to fetch bonding struct to work on from @@ -2293,9 +2316,7 @@ void bond_3ad_state_machine_handler(struct work_struct *work) if (!bond_has_slaves(bond)) goto re_arm; - /* check if agg_select_timer timer after initialize is timed out */ - if (BOND_AD_INFO(bond).agg_select_timer && - !(--BOND_AD_INFO(bond).agg_select_timer)) { + if (bond_agg_timer_advance(bond)) { slave = bond_first_slave_rcu(bond); port = slave ? &(SLAVE_AD_INFO(slave)->port) : NULL; diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 4804264c012fdcba3001178a4124491593b1ce25..d3827905535ba84c2af50b924fa20eb068e6c26c 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -1062,9 +1062,6 @@ static bool bond_should_notify_peers(struct bonding *bond) slave = rcu_dereference(bond->curr_active_slave); rcu_read_unlock(); - netdev_dbg(bond->dev, "bond_should_notify_peers: slave %s\n", - slave ? slave->dev->name : "NULL"); - if (!slave || !bond->send_peer_notif || bond->send_peer_notif % max(1, bond->params.peer_notif_delay) != 0 || @@ -1072,6 +1069,9 @@ static bool bond_should_notify_peers(struct bonding *bond) test_bit(__LINK_STATE_LINKWATCH_PENDING, &slave->dev->state)) return false; + netdev_dbg(bond->dev, "bond_should_notify_peers: slave %s\n", + slave ? slave->dev->name : "NULL"); + return true; } @@ -4575,25 +4575,39 @@ static netdev_tx_t bond_xmit_broadcast(struct sk_buff *skb, struct bonding *bond = netdev_priv(bond_dev); struct slave *slave = NULL; struct list_head *iter; + bool xmit_suc = false; + bool skb_used = false; bond_for_each_slave_rcu(bond, slave, iter) { - if (bond_is_last_slave(bond, slave)) - break; - if (bond_slave_is_up(slave) && slave->link == BOND_LINK_UP) { - struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); + struct sk_buff *skb2; + + if (!(bond_slave_is_up(slave) && slave->link == BOND_LINK_UP)) + continue; + if (bond_is_last_slave(bond, slave)) { + skb2 = skb; + skb_used = true; + } else { + skb2 = skb_clone(skb, GFP_ATOMIC); if (!skb2) { net_err_ratelimited("%s: Error: %s: skb_clone() failed\n", bond_dev->name, __func__); continue; } - bond_dev_queue_xmit(bond, skb2, slave->dev); } + + if (bond_dev_queue_xmit(bond, skb2, slave->dev) == NETDEV_TX_OK) + xmit_suc = true; } - if (slave && bond_slave_is_up(slave) && slave->link == BOND_LINK_UP) - return bond_dev_queue_xmit(bond, skb, slave->dev); - return bond_tx_drop(bond_dev, skb); + if (!skb_used) + dev_kfree_skb_any(skb); + + if (xmit_suc) + return NETDEV_TX_OK; + + atomic_long_inc(&bond_dev->tx_dropped); + return NET_XMIT_DROP; } /*------------------------- Device initialization ---------------------------*/ diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c index 19a7e4adb9338d945490fb68117221a23ba3529b..19a19a7b7deb87b3a73895f044f4cd0e387650bb 100644 --- a/drivers/net/can/m_can/m_can.c +++ b/drivers/net/can/m_can/m_can.c @@ -1491,8 +1491,6 @@ static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev) M_CAN_FIFO_DATA(i / 4), *(u32 *)(cf->data + i)); - can_put_echo_skb(skb, dev, 0); - if (cdev->can.ctrlmode & CAN_CTRLMODE_FD) { cccr = m_can_read(cdev, M_CAN_CCCR); cccr &= ~(CCCR_CMR_MASK << CCCR_CMR_SHIFT); @@ -1509,6 +1507,9 @@ static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev) m_can_write(cdev, M_CAN_CCCR, cccr); } m_can_write(cdev, M_CAN_TXBTIE, 0x1); + + can_put_echo_skb(skb, dev, 0); + m_can_write(cdev, M_CAN_TXBAR, 0x1); /* End of xmit function for version 3.0.x */ } else { diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c index de59dd6aad29918633ccfc4a237fccc26643b1d5..67f0f14e2bf4e1eadcd9409aa80d0a4ac6ece0dd 100644 --- a/drivers/net/can/rcar/rcar_canfd.c +++ b/drivers/net/can/rcar/rcar_canfd.c @@ -1598,15 +1598,15 @@ static int rcar_canfd_channel_probe(struct rcar_canfd_global *gpriv, u32 ch, netif_napi_add(ndev, &priv->napi, rcar_canfd_rx_poll, RCANFD_NAPI_WEIGHT); + spin_lock_init(&priv->tx_lock); + devm_can_led_init(ndev); + gpriv->ch[priv->channel] = priv; err = register_candev(ndev); if (err) { dev_err(&pdev->dev, "register_candev() failed, error %d\n", err); goto fail_candev; } - spin_lock_init(&priv->tx_lock); - devm_can_led_init(ndev); - gpriv->ch[priv->channel] = priv; dev_info(&pdev->dev, "device registered (channel %u)\n", priv->channel); return 0; diff --git a/drivers/net/can/softing/softing_cs.c b/drivers/net/can/softing/softing_cs.c index 2e93ee79237395e310b5f16e0ef15aa77e427111..e5c939b63fa65889e34e274aaee1426a349b056e 100644 --- a/drivers/net/can/softing/softing_cs.c +++ b/drivers/net/can/softing/softing_cs.c @@ -293,7 +293,7 @@ static int softingcs_probe(struct pcmcia_device *pcmcia) return 0; platform_failed: - kfree(dev); + platform_device_put(pdev); mem_failed: pcmcia_bad: pcmcia_failed: diff --git a/drivers/net/can/softing/softing_fw.c b/drivers/net/can/softing/softing_fw.c index ccd649a8e37bdc23de9449c42db7cdda2e714fd4..bad69a4abec1043e5d62e87725ccca07627eb8dd 100644 --- a/drivers/net/can/softing/softing_fw.c +++ b/drivers/net/can/softing/softing_fw.c @@ -565,18 +565,19 @@ int softing_startstop(struct net_device *dev, int up) if (ret < 0) goto failed; } - /* enable_error_frame */ - /* + + /* enable_error_frame + * * Error reporting is switched off at the moment since * the receiving of them is not yet 100% verified * This should be enabled sooner or later - * - if (error_reporting) { + */ + if (0 && error_reporting) { ret = softing_fct_cmd(card, 51, "enable_error_frame"); if (ret < 0) goto failed; } - */ + /* initialize interface */ iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 2]); iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 4]); diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c index 4e13f6dfb91a2eecf603c8abc19dfc9e2e77e6e9..189d226588133dcdf2d6442fecc33e2c3bf496c9 100644 --- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c @@ -1288,7 +1288,7 @@ mcp251xfd_tef_obj_read(const struct mcp251xfd_priv *priv, len > tx_ring->obj_num || offset + len > tx_ring->obj_num)) { netdev_err(priv->ndev, - "Trying to read to many TEF objects (max=%d, offset=%d, len=%d).\n", + "Trying to read too many TEF objects (max=%d, offset=%d, len=%d).\n", tx_ring->obj_num, offset, len); return -ERANGE; } @@ -2497,7 +2497,7 @@ static int mcp251xfd_register_chip_detect(struct mcp251xfd_priv *priv) if (!mcp251xfd_is_251X(priv) && priv->devtype_data.model != devtype_data->model) { netdev_info(ndev, - "Detected %s, but firmware specifies a %s. Fixing up.", + "Detected %s, but firmware specifies a %s. Fixing up.\n", __mcp251xfd_get_model_str(devtype_data->model), mcp251xfd_get_model_str(priv)); } @@ -2534,7 +2534,7 @@ static int mcp251xfd_register_check_rx_int(struct mcp251xfd_priv *priv) return 0; netdev_info(priv->ndev, - "RX_INT active after softreset, disabling RX_INT support."); + "RX_INT active after softreset, disabling RX_INT support.\n"); devm_gpiod_put(&priv->spi->dev, priv->rx_int); priv->rx_int = NULL; @@ -2578,7 +2578,7 @@ mcp251xfd_register_get_dev_id(const struct mcp251xfd_priv *priv, out_kfree_buf_rx: kfree(buf_rx); - return 0; + return err; } #define MCP251XFD_QUIRK_ACTIVE(quirk) \ diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c index 249d2fba28c7f4d18fab66ea7346967c77dbb4cb..6458da9c13b95500422a0beb364839db1f094122 100644 --- a/drivers/net/can/usb/ems_usb.c +++ b/drivers/net/can/usb/ems_usb.c @@ -823,7 +823,6 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne usb_unanchor_urb(urb); usb_free_coherent(dev->udev, size, buf, urb->transfer_dma); - dev_kfree_skb(skb); atomic_dec(&dev->active_tx_urbs); diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c index 018ca3b057a3baeb59d460385b464c5faa7b7747..e023c401f4f77387826eff3f3f71a6fb862a89f5 100644 --- a/drivers/net/can/usb/gs_usb.c +++ b/drivers/net/can/usb/gs_usb.c @@ -190,8 +190,8 @@ struct gs_can { struct gs_usb { struct gs_can *canch[GS_MAX_INTF]; struct usb_anchor rx_submitted; - atomic_t active_channels; struct usb_device *udev; + u8 active_channels; }; /* 'allocate' a tx context. @@ -320,7 +320,7 @@ static void gs_usb_receive_bulk_callback(struct urb *urb) /* device reports out of range channel id */ if (hf->channel >= GS_MAX_INTF) - goto resubmit_urb; + goto device_detach; dev = usbcan->canch[hf->channel]; @@ -405,6 +405,7 @@ static void gs_usb_receive_bulk_callback(struct urb *urb) /* USB failure take down all interfaces */ if (rc == -ENODEV) { + device_detach: for (rc = 0; rc < GS_MAX_INTF; rc++) { if (usbcan->canch[rc]) netif_device_detach(usbcan->canch[rc]->netdev); @@ -506,6 +507,8 @@ static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb, hf->echo_id = idx; hf->channel = dev->channel; + hf->flags = 0; + hf->reserved = 0; cf = (struct can_frame *)skb->data; @@ -585,7 +588,7 @@ static int gs_can_open(struct net_device *netdev) if (rc) return rc; - if (atomic_add_return(1, &parent->active_channels) == 1) { + if (!parent->active_channels) { for (i = 0; i < GS_MAX_RX_URBS; i++) { struct urb *urb; u8 *buf; @@ -686,6 +689,7 @@ static int gs_can_open(struct net_device *netdev) dev->can.state = CAN_STATE_ERROR_ACTIVE; + parent->active_channels++; if (!(dev->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)) netif_start_queue(netdev); @@ -701,7 +705,8 @@ static int gs_can_close(struct net_device *netdev) netif_stop_queue(netdev); /* Stop polling */ - if (atomic_dec_and_test(&parent->active_channels)) + parent->active_channels--; + if (!parent->active_channels) usb_kill_anchored_urbs(&parent->rx_submitted); /* Stop sending URBs */ @@ -980,8 +985,6 @@ static int gs_usb_probe(struct usb_interface *intf, init_usb_anchor(&dev->rx_submitted); - atomic_set(&dev->active_channels, 0); - usb_set_intfdata(intf, dev); dev->udev = interface_to_usbdev(intf); diff --git a/drivers/net/can/usb/mcba_usb.c b/drivers/net/can/usb/mcba_usb.c index 912160fd2ca02457f44c688e6040a1ff16bf6b09..21063335ab599cde2e410e7ca0389f6b3ae1bdb6 100644 --- a/drivers/net/can/usb/mcba_usb.c +++ b/drivers/net/can/usb/mcba_usb.c @@ -33,10 +33,6 @@ #define MCBA_USB_RX_BUFF_SIZE 64 #define MCBA_USB_TX_BUFF_SIZE (sizeof(struct mcba_usb_msg)) -/* MCBA endpoint numbers */ -#define MCBA_USB_EP_IN 1 -#define MCBA_USB_EP_OUT 1 - /* Microchip command id */ #define MBCA_CMD_RECEIVE_MESSAGE 0xE3 #define MBCA_CMD_I_AM_ALIVE_FROM_CAN 0xF5 @@ -84,6 +80,8 @@ struct mcba_priv { atomic_t free_ctx_cnt; void *rxbuf[MCBA_MAX_RX_URBS]; dma_addr_t rxbuf_dma[MCBA_MAX_RX_URBS]; + int rx_pipe; + int tx_pipe; }; /* CAN frame */ @@ -272,10 +270,8 @@ static netdev_tx_t mcba_usb_xmit(struct mcba_priv *priv, memcpy(buf, usb_msg, MCBA_USB_TX_BUFF_SIZE); - usb_fill_bulk_urb(urb, priv->udev, - usb_sndbulkpipe(priv->udev, MCBA_USB_EP_OUT), buf, - MCBA_USB_TX_BUFF_SIZE, mcba_usb_write_bulk_callback, - ctx); + usb_fill_bulk_urb(urb, priv->udev, priv->tx_pipe, buf, MCBA_USB_TX_BUFF_SIZE, + mcba_usb_write_bulk_callback, ctx); urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; usb_anchor_urb(urb, &priv->tx_submitted); @@ -368,7 +364,6 @@ static netdev_tx_t mcba_usb_start_xmit(struct sk_buff *skb, xmit_failed: can_free_echo_skb(priv->netdev, ctx->ndx); mcba_usb_free_ctx(ctx); - dev_kfree_skb(skb); stats->tx_dropped++; return NETDEV_TX_OK; @@ -611,7 +606,7 @@ static void mcba_usb_read_bulk_callback(struct urb *urb) resubmit_urb: usb_fill_bulk_urb(urb, priv->udev, - usb_rcvbulkpipe(priv->udev, MCBA_USB_EP_OUT), + priv->rx_pipe, urb->transfer_buffer, MCBA_USB_RX_BUFF_SIZE, mcba_usb_read_bulk_callback, priv); @@ -656,7 +651,7 @@ static int mcba_usb_start(struct mcba_priv *priv) urb->transfer_dma = buf_dma; usb_fill_bulk_urb(urb, priv->udev, - usb_rcvbulkpipe(priv->udev, MCBA_USB_EP_IN), + priv->rx_pipe, buf, MCBA_USB_RX_BUFF_SIZE, mcba_usb_read_bulk_callback, priv); urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; @@ -810,6 +805,13 @@ static int mcba_usb_probe(struct usb_interface *intf, struct mcba_priv *priv; int err; struct usb_device *usbdev = interface_to_usbdev(intf); + struct usb_endpoint_descriptor *in, *out; + + err = usb_find_common_endpoints(intf->cur_altsetting, &in, &out, NULL, NULL); + if (err) { + dev_err(&intf->dev, "Can't find endpoints\n"); + return err; + } netdev = alloc_candev(sizeof(struct mcba_priv), MCBA_MAX_TX_URBS); if (!netdev) { @@ -855,6 +857,9 @@ static int mcba_usb_probe(struct usb_interface *intf, goto cleanup_free_candev; } + priv->rx_pipe = usb_rcvbulkpipe(priv->udev, in->bEndpointAddress); + priv->tx_pipe = usb_sndbulkpipe(priv->udev, out->bEndpointAddress); + devm_can_led_init(netdev); /* Start USB dev only if we have successfully registered CAN device */ diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c index ca7c55d6a41dbe2001655be026f063ed8e47bb39..985e00aee4ee1253fd2d7399f1d551859e422843 100644 --- a/drivers/net/can/usb/usb_8dev.c +++ b/drivers/net/can/usb/usb_8dev.c @@ -670,9 +670,20 @@ static netdev_tx_t usb_8dev_start_xmit(struct sk_buff *skb, atomic_inc(&priv->active_tx_urbs); err = usb_submit_urb(urb, GFP_ATOMIC); - if (unlikely(err)) - goto failed; - else if (atomic_read(&priv->active_tx_urbs) >= MAX_TX_URBS) + if (unlikely(err)) { + can_free_echo_skb(netdev, context->echo_index); + + usb_unanchor_urb(urb); + usb_free_coherent(priv->udev, size, buf, urb->transfer_dma); + + atomic_dec(&priv->active_tx_urbs); + + if (err == -ENODEV) + netif_device_detach(netdev); + else + netdev_warn(netdev, "failed tx_urb %d\n", err); + stats->tx_dropped++; + } else if (atomic_read(&priv->active_tx_urbs) >= MAX_TX_URBS) /* Slow down tx path */ netif_stop_queue(netdev); @@ -691,19 +702,6 @@ static netdev_tx_t usb_8dev_start_xmit(struct sk_buff *skb, return NETDEV_TX_BUSY; -failed: - can_free_echo_skb(netdev, context->echo_index); - - usb_unanchor_urb(urb); - usb_free_coherent(priv->udev, size, buf, urb->transfer_dma); - - atomic_dec(&priv->active_tx_urbs); - - if (err == -ENODEV) - netif_device_detach(netdev); - else - netdev_warn(netdev, "failed tx_urb %d\n", err); - nomembuf: usb_free_urb(urb); diff --git a/drivers/net/can/vxcan.c b/drivers/net/can/vxcan.c index 7000c6cd1e48bc5d8fbee7bd3b69523cc69f9186..282c53ef76d233535938091043c3d52072c36e9b 100644 --- a/drivers/net/can/vxcan.c +++ b/drivers/net/can/vxcan.c @@ -148,7 +148,7 @@ static void vxcan_setup(struct net_device *dev) dev->hard_header_len = 0; dev->addr_len = 0; dev->tx_queue_len = 0; - dev->flags = (IFF_NOARP|IFF_ECHO); + dev->flags = IFF_NOARP; dev->netdev_ops = &vxcan_netdev_ops; dev->needs_free_netdev = true; diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c index 48d746e18f30219e49ed04c24165a789d0bc71fc..375998263af7a2abb70f5abca7d86c8619541346 100644 --- a/drivers/net/can/xilinx_can.c +++ b/drivers/net/can/xilinx_can.c @@ -1762,7 +1762,12 @@ static int xcan_probe(struct platform_device *pdev) spin_lock_init(&priv->tx_lock); /* Get IRQ for the device */ - ndev->irq = platform_get_irq(pdev, 0); + ret = platform_get_irq(pdev, 0); + if (ret < 0) + goto err_free; + + ndev->irq = ret; + ndev->flags |= IFF_ECHO; /* We support local echo */ platform_set_drvdata(pdev, ndev); diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig index 2451f61a38e4ae3582c41fe6a313e012d87031ef..9e32ea9c116473d532784e58606f64b6839974f2 100644 --- a/drivers/net/dsa/Kconfig +++ b/drivers/net/dsa/Kconfig @@ -36,6 +36,7 @@ config NET_DSA_MT7530 tristate "MediaTek MT753x and MT7621 Ethernet switch support" depends on NET_DSA select NET_DSA_TAG_MTK + select MEDIATEK_GE_PHY help This enables support for the MediaTek MT7530, MT7531, and MT7621 Ethernet switch chips. diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index 690e9d9495e7593aa2eda07ff1dae9c2049fef3a..08a675a5328d718091ee6189e186d6dd06fec394 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -504,7 +504,7 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds) get_device(&priv->master_mii_bus->dev); priv->master_mii_dn = dn; - priv->slave_mii_bus = devm_mdiobus_alloc(ds->dev); + priv->slave_mii_bus = mdiobus_alloc(); if (!priv->slave_mii_bus) { of_node_put(dn); return -ENOMEM; @@ -564,8 +564,10 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds) } err = mdiobus_register(priv->slave_mii_bus); - if (err && dn) + if (err && dn) { + mdiobus_free(priv->slave_mii_bus); of_node_put(dn); + } return err; } @@ -573,6 +575,7 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds) static void bcm_sf2_mdio_unregister(struct bcm_sf2_priv *priv) { mdiobus_unregister(priv->slave_mii_bus); + mdiobus_free(priv->slave_mii_bus); of_node_put(priv->master_mii_dn); } diff --git a/drivers/net/dsa/bcm_sf2_cfp.c b/drivers/net/dsa/bcm_sf2_cfp.c index d82cee5d92022be1ee39e1ab1a314c2352cf1f3a..cbf44fc7d03aa4f650c8b22e8d332096a13e6220 100644 --- a/drivers/net/dsa/bcm_sf2_cfp.c +++ b/drivers/net/dsa/bcm_sf2_cfp.c @@ -567,14 +567,14 @@ static void bcm_sf2_cfp_slice_ipv6(struct bcm_sf2_priv *priv, static struct cfp_rule *bcm_sf2_cfp_rule_find(struct bcm_sf2_priv *priv, int port, u32 location) { - struct cfp_rule *rule = NULL; + struct cfp_rule *rule; list_for_each_entry(rule, &priv->cfp.rules_list, next) { if (rule->port == port && rule->fs.location == location) - break; + return rule; } - return rule; + return NULL; } static int bcm_sf2_cfp_rule_cmp(struct bcm_sf2_priv *priv, int port, diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c index dcf1fc89451f28f98f501536b1ce334f717b4d49..2044d440d7de4cc15c841c4f331a62d6d2cebc70 100644 --- a/drivers/net/dsa/lan9303-core.c +++ b/drivers/net/dsa/lan9303-core.c @@ -1305,7 +1305,7 @@ static int lan9303_probe_reset_gpio(struct lan9303 *chip, struct device_node *np) { chip->reset_gpio = devm_gpiod_get_optional(chip->dev, "reset", - GPIOD_OUT_LOW); + GPIOD_OUT_HIGH); if (IS_ERR(chip->reset_gpio)) return PTR_ERR(chip->reset_gpio); diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c index 4d23a7aba79615c6bdd360a63d8511c6dbe36d63..80ef7ea779545c3db1d0bfb64256a30aaac82a96 100644 --- a/drivers/net/dsa/lantiq_gswip.c +++ b/drivers/net/dsa/lantiq_gswip.c @@ -495,8 +495,9 @@ static int gswip_mdio_rd(struct mii_bus *bus, int addr, int reg) static int gswip_mdio(struct gswip_priv *priv, struct device_node *mdio_np) { struct dsa_switch *ds = priv->ds; + int err; - ds->slave_mii_bus = devm_mdiobus_alloc(priv->dev); + ds->slave_mii_bus = mdiobus_alloc(); if (!ds->slave_mii_bus) return -ENOMEM; @@ -509,7 +510,11 @@ static int gswip_mdio(struct gswip_priv *priv, struct device_node *mdio_np) ds->slave_mii_bus->parent = priv->dev; ds->slave_mii_bus->phy_mask = ~ds->phys_mii_mask; - return of_mdiobus_register(ds->slave_mii_bus, mdio_np); + err = of_mdiobus_register(ds->slave_mii_bus, mdio_np); + if (err) + mdiobus_free(ds->slave_mii_bus); + + return err; } static int gswip_pce_table_entry_read(struct gswip_priv *priv, @@ -2086,8 +2091,10 @@ static int gswip_probe(struct platform_device *pdev) gswip_mdio_mask(priv, GSWIP_MDIO_GLOB_ENABLE, 0, GSWIP_MDIO_GLOB); dsa_unregister_switch(priv->ds); mdio_bus: - if (mdio_np) + if (mdio_np) { mdiobus_unregister(priv->ds->slave_mii_bus); + mdiobus_free(priv->ds->slave_mii_bus); + } put_mdio_node: of_node_put(mdio_np); for (i = 0; i < priv->num_gphy_fw; i++) @@ -2108,6 +2115,7 @@ static int gswip_remove(struct platform_device *pdev) if (priv->ds->slave_mii_bus) { mdiobus_unregister(priv->ds->slave_mii_bus); of_node_put(priv->ds->slave_mii_bus->dev.of_node); + mdiobus_free(priv->ds->slave_mii_bus); } for (i = 0; i < priv->num_gphy_fw; i++) diff --git a/drivers/net/dsa/microchip/ksz8795_spi.c b/drivers/net/dsa/microchip/ksz8795_spi.c index 8b00f8e6c02f4f2a2fbc545c026cb2daaa48528c..5639c5c59e255e6f639bad75a722d842f723c51c 100644 --- a/drivers/net/dsa/microchip/ksz8795_spi.c +++ b/drivers/net/dsa/microchip/ksz8795_spi.c @@ -86,12 +86,23 @@ static const struct of_device_id ksz8795_dt_ids[] = { }; MODULE_DEVICE_TABLE(of, ksz8795_dt_ids); +static const struct spi_device_id ksz8795_spi_ids[] = { + { "ksz8765" }, + { "ksz8794" }, + { "ksz8795" }, + { "ksz8863" }, + { "ksz8873" }, + { }, +}; +MODULE_DEVICE_TABLE(spi, ksz8795_spi_ids); + static struct spi_driver ksz8795_spi_driver = { .driver = { .name = "ksz8795-switch", .owner = THIS_MODULE, .of_match_table = of_match_ptr(ksz8795_dt_ids), }, + .id_table = ksz8795_spi_ids, .probe = ksz8795_spi_probe, .remove = ksz8795_spi_remove, .shutdown = ksz8795_spi_shutdown, diff --git a/drivers/net/dsa/microchip/ksz9477_spi.c b/drivers/net/dsa/microchip/ksz9477_spi.c index 1142768969c205b9c4f5a25bfc56bd452c4dd283..9bda83d063e8ec652434b02aca2047d165f216de 100644 --- a/drivers/net/dsa/microchip/ksz9477_spi.c +++ b/drivers/net/dsa/microchip/ksz9477_spi.c @@ -88,12 +88,24 @@ static const struct of_device_id ksz9477_dt_ids[] = { }; MODULE_DEVICE_TABLE(of, ksz9477_dt_ids); +static const struct spi_device_id ksz9477_spi_ids[] = { + { "ksz9477" }, + { "ksz9897" }, + { "ksz9893" }, + { "ksz9563" }, + { "ksz8563" }, + { "ksz9567" }, + { }, +}; +MODULE_DEVICE_TABLE(spi, ksz9477_spi_ids); + static struct spi_driver ksz9477_spi_driver = { .driver = { .name = "ksz9477-switch", .owner = THIS_MODULE, .of_match_table = of_match_ptr(ksz9477_dt_ids), }, + .id_table = ksz9477_spi_ids, .probe = ksz9477_spi_probe, .remove = ksz9477_spi_remove, .shutdown = ksz9477_spi_shutdown, diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c index 1f642fdbf214c80864cdb46a16f3ab3a674dc684..5ee8809bc27112cfd9da1430531abfc6da73f592 100644 --- a/drivers/net/dsa/mt7530.c +++ b/drivers/net/dsa/mt7530.c @@ -2342,7 +2342,7 @@ mt753x_phylink_validate(struct dsa_switch *ds, int port, phylink_set_port_modes(mask); - if (state->interface != PHY_INTERFACE_MODE_TRGMII || + if (state->interface != PHY_INTERFACE_MODE_TRGMII && !phy_interface_mode_is_8023z(state->interface)) { phylink_set(mask, 10baseT_Half); phylink_set(mask, 10baseT_Full); diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index afc5500ef8ed9227176b7cda75a80d395dcd43e7..e79a808375fc862fd886a1d57c97a92a6d1bc7ac 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -3072,7 +3072,7 @@ static int mv88e6xxx_mdio_register(struct mv88e6xxx_chip *chip, return err; } - bus = devm_mdiobus_alloc_size(chip->dev, sizeof(*mdio_bus)); + bus = mdiobus_alloc_size(sizeof(*mdio_bus)); if (!bus) return -ENOMEM; @@ -3097,14 +3097,14 @@ static int mv88e6xxx_mdio_register(struct mv88e6xxx_chip *chip, if (!external) { err = mv88e6xxx_g2_irq_mdio_setup(chip, bus); if (err) - return err; + goto out; } err = of_mdiobus_register(bus, np); if (err) { dev_err(chip->dev, "Cannot register MDIO bus (%d)\n", err); mv88e6xxx_g2_irq_mdio_free(chip, bus); - return err; + goto out; } if (external) @@ -3113,21 +3113,26 @@ static int mv88e6xxx_mdio_register(struct mv88e6xxx_chip *chip, list_add(&mdio_bus->list, &chip->mdios); return 0; + +out: + mdiobus_free(bus); + return err; } static void mv88e6xxx_mdios_unregister(struct mv88e6xxx_chip *chip) { - struct mv88e6xxx_mdio_bus *mdio_bus; + struct mv88e6xxx_mdio_bus *mdio_bus, *p; struct mii_bus *bus; - list_for_each_entry(mdio_bus, &chip->mdios, list) { + list_for_each_entry_safe(mdio_bus, p, &chip->mdios, list) { bus = mdio_bus->bus; if (!mdio_bus->external) mv88e6xxx_g2_irq_mdio_free(chip, bus); mdiobus_unregister(bus); + mdiobus_free(bus); } } @@ -3292,6 +3297,7 @@ static const struct mv88e6xxx_ops mv88e6097_ops = { .port_set_link = mv88e6xxx_port_set_link, .port_set_speed_duplex = mv88e6185_port_set_speed_duplex, .port_tag_remap = mv88e6095_port_tag_remap, + .port_set_policy = mv88e6352_port_set_policy, .port_set_frame_mode = mv88e6351_port_set_frame_mode, .port_set_egress_floods = mv88e6352_port_set_egress_floods, .port_set_ether_type = mv88e6351_port_set_ether_type, diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c index 2e5bbdca5ea4756df5089804bce7970735aba5ff..c96dfc11aa6fc122091436e8e6df0073fd572680 100644 --- a/drivers/net/dsa/ocelot/felix_vsc9959.c +++ b/drivers/net/dsa/ocelot/felix_vsc9959.c @@ -1050,7 +1050,7 @@ static int vsc9959_mdio_bus_alloc(struct ocelot *ocelot) return PTR_ERR(hw); } - bus = devm_mdiobus_alloc_size(dev, sizeof(*mdio_priv)); + bus = mdiobus_alloc_size(sizeof(*mdio_priv)); if (!bus) return -ENOMEM; @@ -1070,6 +1070,7 @@ static int vsc9959_mdio_bus_alloc(struct ocelot *ocelot) rc = mdiobus_register(bus); if (rc < 0) { dev_err(dev, "failed to register MDIO bus\n"); + mdiobus_free(bus); return rc; } @@ -1119,6 +1120,7 @@ static void vsc9959_mdio_bus_free(struct ocelot *ocelot) lynx_pcs_destroy(pcs); } mdiobus_unregister(felix->imdio); + mdiobus_free(felix->imdio); } static void vsc9959_sched_speed_set(struct ocelot *ocelot, int port, @@ -1464,7 +1466,7 @@ static int felix_pci_probe(struct pci_dev *pdev, err = dsa_register_switch(ds); if (err) { - dev_err(&pdev->dev, "Failed to register DSA switch: %d\n", err); + dev_err_probe(&pdev->dev, err, "Failed to register DSA switch\n"); goto err_register_ds; } diff --git a/drivers/net/dsa/qca/ar9331.c b/drivers/net/dsa/qca/ar9331.c index 661745932a5392004231fcd06929ac947223a483..c33bdcf7efc587e4d5d20c45801e9b0c21a511c8 100644 --- a/drivers/net/dsa/qca/ar9331.c +++ b/drivers/net/dsa/qca/ar9331.c @@ -289,7 +289,7 @@ static int ar9331_sw_mbus_init(struct ar9331_sw_priv *priv) if (!mnp) return -ENODEV; - ret = of_mdiobus_register(mbus, mnp); + ret = devm_of_mdiobus_register(dev, mbus, mnp); of_node_put(mnp); if (ret) return ret; @@ -856,7 +856,6 @@ static void ar9331_sw_remove(struct mdio_device *mdiodev) struct ar9331_sw_priv *priv = dev_get_drvdata(&mdiodev->dev); irq_domain_remove(priv->irqdomain); - mdiobus_unregister(priv->mbus); dsa_unregister_switch(&priv->ds); reset_control_assert(priv->sw_reset); diff --git a/drivers/net/ethernet/8390/mcf8390.c b/drivers/net/ethernet/8390/mcf8390.c index 4ad8031ab6695707bd275a4e0d66b65dfe5c0d95..065fdbe66c425de384dfaeccef0182f7eaae5f46 100644 --- a/drivers/net/ethernet/8390/mcf8390.c +++ b/drivers/net/ethernet/8390/mcf8390.c @@ -406,12 +406,12 @@ static int mcf8390_init(struct net_device *dev) static int mcf8390_probe(struct platform_device *pdev) { struct net_device *dev; - struct resource *mem, *irq; + struct resource *mem; resource_size_t msize; - int ret; + int ret, irq; - irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); - if (irq == NULL) { + irq = platform_get_irq(pdev, 0); + if (irq < 0) { dev_err(&pdev->dev, "no IRQ specified?\n"); return -ENXIO; } @@ -434,7 +434,7 @@ static int mcf8390_probe(struct platform_device *pdev) SET_NETDEV_DEV(dev, &pdev->dev); platform_set_drvdata(pdev, dev); - dev->irq = irq->start; + dev->irq = irq; dev->base_addr = mem->start; ret = mcf8390_init(dev); diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index 7d7f6a972c289ca329b1353e6469f83a56d97502..6998a8cb3faadeebae2d620a3a47d5996fc146b4 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -160,7 +160,6 @@ source "drivers/net/ethernet/pasemi/Kconfig" source "drivers/net/ethernet/pensando/Kconfig" source "drivers/net/ethernet/qlogic/Kconfig" source "drivers/net/ethernet/qualcomm/Kconfig" -source "drivers/net/ethernet/ramaxel/Kconfig" source "drivers/net/ethernet/rdc/Kconfig" source "drivers/net/ethernet/realtek/Kconfig" source "drivers/net/ethernet/renesas/Kconfig" diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index 5e375d5a62307b75500d9aa0b51508f28be1b43a..6a7d68ea63ed08ede187bbb3caf8186cdf03dd8f 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -71,7 +71,6 @@ obj-$(CONFIG_NET_VENDOR_PACKET_ENGINES) += packetengines/ obj-$(CONFIG_NET_VENDOR_PASEMI) += pasemi/ obj-$(CONFIG_NET_VENDOR_QLOGIC) += qlogic/ obj-$(CONFIG_NET_VENDOR_QUALCOMM) += qualcomm/ -obj-$(CONFIG_NET_VENDOR_RAMAXEL) += ramaxel/ obj-$(CONFIG_NET_VENDOR_REALTEK) += realtek/ obj-$(CONFIG_NET_VENDOR_RENESAS) += renesas/ obj-$(CONFIG_NET_VENDOR_RDC) += rdc/ diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index 395eb0b52680211eac42a3baea57e3e21091b539..a816b30bca04c7641bc3142df5a814b02ea27b3b 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -721,7 +721,9 @@ static void xgbe_stop_timers(struct xgbe_prv_data *pdata) if (!channel->tx_ring) break; + /* Deactivate the Tx timer */ del_timer_sync(&channel->tx_timer); + channel->tx_timer_active = 0; } } @@ -2557,6 +2559,14 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget) buf2_len = xgbe_rx_buf2_len(rdata, packet, len); len += buf2_len; + if (buf2_len > rdata->rx.buf.dma_len) { + /* Hardware inconsistency within the descriptors + * that has resulted in a length underflow. + */ + error = 1; + goto skip_data; + } + if (!skb) { skb = xgbe_create_skb(pdata, napi, rdata, buf1_len); @@ -2586,8 +2596,10 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget) if (!last || context_next) goto read_again; - if (!skb) + if (!skb || error) { + dev_kfree_skb(skb); goto next_packet; + } /* Be sure we don't exceed the configured MTU */ max_len = netdev->mtu + ETH_HLEN; diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c index 90cb55eb546658456f7ace869a079171d169d041..014513ce00a14b06149f5f82257359b11c2d34f5 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c @@ -418,6 +418,9 @@ static void xgbe_pci_remove(struct pci_dev *pdev) pci_free_irq_vectors(pdata->pcidev); + /* Disable all interrupts in the hardware */ + XP_IOWRITE(pdata, XP_INT_EN, 0x0); + xgbe_free_pdata(pdata); } diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c index 5f1fc6582d74a2b624ac3351b6e0ec41f400bc11..78c7cbc372b0559c1aaf2a95bb9ac84c9dba860a 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c @@ -696,6 +696,12 @@ static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring, buf_pool->rx_skb[skb_index] = NULL; datalen = xgene_enet_get_data_len(le64_to_cpu(raw_desc->m1)); + + /* strip off CRC as HW isn't doing this */ + nv = GET_VAL(NV, le64_to_cpu(raw_desc->m0)); + if (!nv) + datalen -= 4; + skb_put(skb, datalen); prefetch(skb->data - NET_IP_ALIGN); skb->protocol = eth_type_trans(skb, ndev); @@ -717,12 +723,8 @@ static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring, } } - nv = GET_VAL(NV, le64_to_cpu(raw_desc->m0)); - if (!nv) { - /* strip off CRC as HW isn't doing this */ - datalen -= 4; + if (!nv) goto skip_jumbo; - } slots = page_pool->slots - 1; head = page_pool->head; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index d04994840b87d5cf8f406bdc80556f77c9ebe501..2a61229d3f9762cf0b3d389d1a3916ad0555c1da 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@ -1850,6 +1850,14 @@ struct bnx2x { /* Vxlan/Geneve related information */ u16 udp_tunnel_ports[BNX2X_UDP_PORT_MAX]; + +#define FW_CAP_INVALIDATE_VF_FP_HSI BIT(0) + u32 fw_cap; + + u32 fw_major; + u32 fw_minor; + u32 fw_rev; + u32 fw_eng; }; /* Tx queues may be less or equal to Rx queues */ @@ -2526,5 +2534,4 @@ void bnx2x_register_phc(struct bnx2x *bp); * Meant for implicit re-load flows. */ int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp); - #endif /* bnx2x.h */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index b5d954cb409ae4de9e6e6d70dede933a2154bbfa..198e041d841091d5d6bcbe76b0a858f6de715de0 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -2363,26 +2363,30 @@ int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err) /* is another pf loaded on this engine? */ if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP && load_code != FW_MSG_CODE_DRV_LOAD_COMMON) { - /* build my FW version dword */ - u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) + - (BCM_5710_FW_MINOR_VERSION << 8) + - (BCM_5710_FW_REVISION_VERSION << 16) + - (BCM_5710_FW_ENGINEERING_VERSION << 24); + u8 loaded_fw_major, loaded_fw_minor, loaded_fw_rev, loaded_fw_eng; + u32 loaded_fw; /* read loaded FW from chip */ - u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM); + loaded_fw = REG_RD(bp, XSEM_REG_PRAM); - DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x\n", - loaded_fw, my_fw); + loaded_fw_major = loaded_fw & 0xff; + loaded_fw_minor = (loaded_fw >> 8) & 0xff; + loaded_fw_rev = (loaded_fw >> 16) & 0xff; + loaded_fw_eng = (loaded_fw >> 24) & 0xff; + + DP(BNX2X_MSG_SP, "loaded fw 0x%x major 0x%x minor 0x%x rev 0x%x eng 0x%x\n", + loaded_fw, loaded_fw_major, loaded_fw_minor, loaded_fw_rev, loaded_fw_eng); /* abort nic load if version mismatch */ - if (my_fw != loaded_fw) { + if (loaded_fw_major != BCM_5710_FW_MAJOR_VERSION || + loaded_fw_minor != BCM_5710_FW_MINOR_VERSION || + loaded_fw_eng != BCM_5710_FW_ENGINEERING_VERSION || + loaded_fw_rev < BCM_5710_FW_REVISION_VERSION_V15) { if (print_err) - BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. Aborting\n", - loaded_fw, my_fw); + BNX2X_ERR("loaded FW incompatible. Aborting\n"); else - BNX2X_DEV_INFO("bnx2x with FW %x was already loaded which mismatches my %x FW, possibly due to MF UNDI\n", - loaded_fw, my_fw); + BNX2X_DEV_INFO("loaded FW incompatible, possibly due to MF UNDI\n"); + return -EBUSY; } } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h index 3f8435208bf498171a48eb0b4336295b40aec549..a84d015da5dfa6005ce846349915038d5dcd8826 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h @@ -241,6 +241,8 @@ IRO[221].m2)) #define XSTORM_VF_TO_PF_OFFSET(funcId) \ (IRO[48].base + ((funcId) * IRO[48].m1)) +#define XSTORM_ETH_FUNCTION_INFO_FP_HSI_VALID_E2_OFFSET(fid) \ + (IRO[386].base + ((fid) * IRO[386].m1)) #define COMMON_ASM_INVALID_ASSERT_OPCODE 0x0 /* eth hsi version */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h index 622fadc50316ee9c37af97a45e266ab7fb187b63..611efee758340bd9dc5338ce68409bab179bf650 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h @@ -3024,7 +3024,8 @@ struct afex_stats { #define BCM_5710_FW_MAJOR_VERSION 7 #define BCM_5710_FW_MINOR_VERSION 13 -#define BCM_5710_FW_REVISION_VERSION 15 +#define BCM_5710_FW_REVISION_VERSION 21 +#define BCM_5710_FW_REVISION_VERSION_V15 15 #define BCM_5710_FW_ENGINEERING_VERSION 0 #define BCM_5710_FW_COMPILE_FLAGS 1 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 28069b29086257e922ef0ebf78cb9f5084ec3759..6333471916be10ff9cb601f1a9ce9404e72a7a12 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -74,9 +74,19 @@ __stringify(BCM_5710_FW_MINOR_VERSION) "." \ __stringify(BCM_5710_FW_REVISION_VERSION) "." \ __stringify(BCM_5710_FW_ENGINEERING_VERSION) + +#define FW_FILE_VERSION_V15 \ + __stringify(BCM_5710_FW_MAJOR_VERSION) "." \ + __stringify(BCM_5710_FW_MINOR_VERSION) "." \ + __stringify(BCM_5710_FW_REVISION_VERSION_V15) "." \ + __stringify(BCM_5710_FW_ENGINEERING_VERSION) + #define FW_FILE_NAME_E1 "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw" #define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw" #define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw" +#define FW_FILE_NAME_E1_V15 "bnx2x/bnx2x-e1-" FW_FILE_VERSION_V15 ".fw" +#define FW_FILE_NAME_E1H_V15 "bnx2x/bnx2x-e1h-" FW_FILE_VERSION_V15 ".fw" +#define FW_FILE_NAME_E2_V15 "bnx2x/bnx2x-e2-" FW_FILE_VERSION_V15 ".fw" /* Time in jiffies before concluding the transmitter is hung */ #define TX_TIMEOUT (5*HZ) @@ -90,6 +100,9 @@ MODULE_LICENSE("GPL"); MODULE_FIRMWARE(FW_FILE_NAME_E1); MODULE_FIRMWARE(FW_FILE_NAME_E1H); MODULE_FIRMWARE(FW_FILE_NAME_E2); +MODULE_FIRMWARE(FW_FILE_NAME_E1_V15); +MODULE_FIRMWARE(FW_FILE_NAME_E1H_V15); +MODULE_FIRMWARE(FW_FILE_NAME_E2_V15); int bnx2x_num_queues; module_param_named(num_queues, bnx2x_num_queues, int, 0444); @@ -747,9 +760,7 @@ static int bnx2x_mc_assert(struct bnx2x *bp) CHIP_IS_E1(bp) ? "everest1" : CHIP_IS_E1H(bp) ? "everest1h" : CHIP_IS_E2(bp) ? "everest2" : "everest3", - BCM_5710_FW_MAJOR_VERSION, - BCM_5710_FW_MINOR_VERSION, - BCM_5710_FW_REVISION_VERSION); + bp->fw_major, bp->fw_minor, bp->fw_rev); return rc; } @@ -13366,16 +13377,11 @@ static int bnx2x_check_firmware(struct bnx2x *bp) /* Check FW version */ offset = be32_to_cpu(fw_hdr->fw_version.offset); fw_ver = firmware->data + offset; - if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) || - (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) || - (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) || - (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) { + if (fw_ver[0] != bp->fw_major || fw_ver[1] != bp->fw_minor || + fw_ver[2] != bp->fw_rev || fw_ver[3] != bp->fw_eng) { BNX2X_ERR("Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n", - fw_ver[0], fw_ver[1], fw_ver[2], fw_ver[3], - BCM_5710_FW_MAJOR_VERSION, - BCM_5710_FW_MINOR_VERSION, - BCM_5710_FW_REVISION_VERSION, - BCM_5710_FW_ENGINEERING_VERSION); + fw_ver[0], fw_ver[1], fw_ver[2], fw_ver[3], + bp->fw_major, bp->fw_minor, bp->fw_rev, bp->fw_eng); return -EINVAL; } @@ -13455,32 +13461,49 @@ do { \ static int bnx2x_init_firmware(struct bnx2x *bp) { - const char *fw_file_name; + const char *fw_file_name, *fw_file_name_v15; struct bnx2x_fw_file_hdr *fw_hdr; int rc; if (bp->firmware) return 0; - if (CHIP_IS_E1(bp)) + if (CHIP_IS_E1(bp)) { fw_file_name = FW_FILE_NAME_E1; - else if (CHIP_IS_E1H(bp)) + fw_file_name_v15 = FW_FILE_NAME_E1_V15; + } else if (CHIP_IS_E1H(bp)) { fw_file_name = FW_FILE_NAME_E1H; - else if (!CHIP_IS_E1x(bp)) + fw_file_name_v15 = FW_FILE_NAME_E1H_V15; + } else if (!CHIP_IS_E1x(bp)) { fw_file_name = FW_FILE_NAME_E2; - else { + fw_file_name_v15 = FW_FILE_NAME_E2_V15; + } else { BNX2X_ERR("Unsupported chip revision\n"); return -EINVAL; } + BNX2X_DEV_INFO("Loading %s\n", fw_file_name); rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev); if (rc) { - BNX2X_ERR("Can't load firmware file %s\n", - fw_file_name); - goto request_firmware_exit; + BNX2X_DEV_INFO("Trying to load older fw %s\n", fw_file_name_v15); + + /* try to load prev version */ + rc = request_firmware(&bp->firmware, fw_file_name_v15, &bp->pdev->dev); + + if (rc) + goto request_firmware_exit; + + bp->fw_rev = BCM_5710_FW_REVISION_VERSION_V15; + } else { + bp->fw_cap |= FW_CAP_INVALIDATE_VF_FP_HSI; + bp->fw_rev = BCM_5710_FW_REVISION_VERSION; } + bp->fw_major = BCM_5710_FW_MAJOR_VERSION; + bp->fw_minor = BCM_5710_FW_MINOR_VERSION; + bp->fw_eng = BCM_5710_FW_ENGINEERING_VERSION; + rc = bnx2x_check_firmware(bp); if (rc) { BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index 03eb0179ec0085eb07a2b82b1293664e5419279a..08437eaacbb966cf3909fd21d14ae2fe15e11a5e 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c @@ -758,9 +758,18 @@ static void bnx2x_vf_igu_reset(struct bnx2x *bp, struct bnx2x_virtf *vf) void bnx2x_vf_enable_access(struct bnx2x *bp, u8 abs_vfid) { + u16 abs_fid; + + abs_fid = FW_VF_HANDLE(abs_vfid); + /* set the VF-PF association in the FW */ - storm_memset_vf_to_pf(bp, FW_VF_HANDLE(abs_vfid), BP_FUNC(bp)); - storm_memset_func_en(bp, FW_VF_HANDLE(abs_vfid), 1); + storm_memset_vf_to_pf(bp, abs_fid, BP_FUNC(bp)); + storm_memset_func_en(bp, abs_fid, 1); + + /* Invalidate fp_hsi version for vfs */ + if (bp->fw_cap & FW_CAP_INVALIDATE_VF_FP_HSI) + REG_WR8(bp, BAR_XSTRORM_INTMEM + + XSTORM_ETH_FUNCTION_INFO_FP_HSI_VALID_E2_OFFSET(abs_fid), 0); /* clear vf errors*/ bnx2x_vf_semi_clear_err(bp, abs_vfid); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 92f9f7f5240b623b0a83a50bccb09cd4e1a4f76a..34affd1de91daad7c6c18ccacfbe6b57c3efb4a8 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -569,7 +569,8 @@ struct nqe_cn { #define BNXT_MAX_MTU 9500 #define BNXT_MAX_PAGE_MODE_MTU \ ((unsigned int)PAGE_SIZE - VLAN_ETH_HLEN - NET_IP_ALIGN - \ - XDP_PACKET_HEADROOM) + XDP_PACKET_HEADROOM - \ + SKB_DATA_ALIGN((unsigned int)sizeof(struct skb_shared_info))) #define BNXT_MIN_PKT_SIZE 52 diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index f12a01ba86cf407ad8bf48595c20f0a22b7d4b2f..d90b7b85c052e94ac30da77393ae61d29256e5a0 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -1934,6 +1934,9 @@ static int bnxt_get_fecparam(struct net_device *dev, case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE: fec->active_fec |= ETHTOOL_FEC_LLRS; break; + case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE: + fec->active_fec |= ETHTOOL_FEC_OFF; + break; } return 0; } @@ -2046,9 +2049,7 @@ static int bnxt_set_pauseparam(struct net_device *dev, } link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; - if (bp->hwrm_spec_code >= 0x10201) - link_info->req_flow_ctrl = - PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE; + link_info->req_flow_ctrl = 0; } else { /* when transition from auto pause to force pause, * force a link change diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index aac837aa2c24bd7a76ff5b65222d8767e0e34416..5de37c33a737c62ad7d762416770e7a523310d4d 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -2243,8 +2243,10 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring, dma_length_status = status->length_status; if (dev->features & NETIF_F_RXCSUM) { rx_csum = (__force __be16)(status->rx_csum & 0xffff); - skb->csum = (__force __wsum)ntohs(rx_csum); - skb->ip_summed = CHECKSUM_COMPLETE; + if (rx_csum) { + skb->csum = (__force __wsum)ntohs(rx_csum); + skb->ip_summed = CHECKSUM_COMPLETE; + } } /* DMA flags and length are still valid no matter how @@ -3966,10 +3968,12 @@ static int bcmgenet_probe(struct platform_device *pdev) /* Request the WOL interrupt and advertise suspend if available */ priv->wol_irq_disabled = true; - err = devm_request_irq(&pdev->dev, priv->wol_irq, bcmgenet_wol_isr, 0, - dev->name, priv); - if (!err) - device_set_wakeup_capable(&pdev->dev, 1); + if (priv->wol_irq > 0) { + err = devm_request_irq(&pdev->dev, priv->wol_irq, + bcmgenet_wol_isr, 0, dev->name, priv); + if (!err) + device_set_wakeup_capable(&pdev->dev, 1); + } /* Set the needed headroom to account for any possible * features enabling/disabling at runtime diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c index e84ad587fb2141c72408f413724c432d84717786..2c2a56d5a0a1a87e41eec62dd5b1bbd1abbdec37 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c @@ -41,6 +41,13 @@ void bcmgenet_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct bcmgenet_priv *priv = netdev_priv(dev); + struct device *kdev = &priv->pdev->dev; + + if (!device_can_wakeup(kdev)) { + wol->supported = 0; + wol->wolopts = 0; + return; + } wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER; wol->wolopts = priv->wolopts; diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index f2ec92b6123daf465f5a8d3bf2695098793c3c2d..78c6d133f54fad5d759b57adfe30ce256feb2cc7 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -1448,7 +1448,14 @@ static int macb_poll(struct napi_struct *napi, int budget) if (work_done < budget) { napi_complete_done(napi, work_done); - /* Packets received while interrupts were disabled */ + /* RSR bits only seem to propagate to raise interrupts when + * interrupts are enabled at the time, so if bits are already + * set due to packets received while interrupts were disabled, + * they will not cause another interrupt to be generated when + * interrupts are re-enabled. + * Check for this case here. This has been seen to happen + * around 30% of the time under heavy network load. + */ status = macb_readl(bp, RSR); if (status) { if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) @@ -1456,6 +1463,22 @@ static int macb_poll(struct napi_struct *napi, int budget) napi_reschedule(napi); } else { queue_writel(queue, IER, bp->rx_intr_mask); + + /* In rare cases, packets could have been received in + * the window between the check above and re-enabling + * interrupts. Therefore, a double-check is required + * to avoid losing a wakeup. This can potentially race + * with the interrupt handler doing the same actions + * if an interrupt is raised just after enabling them, + * but this should be harmless. + */ + status = macb_readl(bp, RSR); + if (unlikely(status)) { + queue_writel(queue, IDR, bp->rx_intr_mask); + if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) + queue_writel(queue, ISR, MACB_BIT(RCOMP)); + napi_schedule(napi); + } } } @@ -4538,7 +4561,7 @@ static int macb_probe(struct platform_device *pdev) #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT if (GEM_BFEXT(DAW64, gem_readl(bp, DCFG6))) { - dma_set_mask(&pdev->dev, DMA_BIT_MASK(44)); + dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(44)); bp->hw_dma_cap |= HW_DMA_CAP_64B; } #endif diff --git a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c index 7ff31d1026fb27a4edc8af073b36927c675c8fe7..e0d34e64fc6cb72d396a6505c5c4028b2cf81a19 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c @@ -3678,6 +3678,8 @@ int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai, MAC_STATS_ACCUM_SECS : (MAC_STATS_ACCUM_SECS * 10); adapter->params.pci.vpd_cap_addr = pci_find_capability(adapter->pdev, PCI_CAP_ID_VPD); + if (!adapter->params.pci.vpd_cap_addr) + return -ENODEV; ret = get_vpd_params(adapter, &adapter->params.vpd); if (ret < 0) return ret; diff --git a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c index d04a6c1634452034217e9dc4ab8771bf02899b86..da8d10475a08e6e23c466b6f62637f7c840e86bd 100644 --- a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c +++ b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c @@ -32,6 +32,7 @@ #include #include +#include #include #include @@ -99,7 +100,7 @@ cxgb_find_route(struct cxgb4_lld_info *lldi, rt = ip_route_output_ports(&init_net, &fl4, NULL, peer_ip, local_ip, peer_port, local_port, IPPROTO_TCP, - tos, 0); + tos & ~INET_ECN_MASK, 0); if (IS_ERR(rt)) return NULL; n = dst_neigh_lookup(&rt->dst, &peer_ip); diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c index b08029245ce89eb9e5be524c1793f789feff88aa..368587864a7aba1042ad9ea5170c364dc1cf8959 100644 --- a/drivers/net/ethernet/cortina/gemini.c +++ b/drivers/net/ethernet/cortina/gemini.c @@ -305,21 +305,21 @@ static void gmac_speed_set(struct net_device *netdev) switch (phydev->speed) { case 1000: status.bits.speed = GMAC_SPEED_1000; - if (phydev->interface == PHY_INTERFACE_MODE_RGMII) + if (phy_interface_mode_is_rgmii(phydev->interface)) status.bits.mii_rmii = GMAC_PHY_RGMII_1000; netdev_dbg(netdev, "connect %s to RGMII @ 1Gbit\n", phydev_name(phydev)); break; case 100: status.bits.speed = GMAC_SPEED_100; - if (phydev->interface == PHY_INTERFACE_MODE_RGMII) + if (phy_interface_mode_is_rgmii(phydev->interface)) status.bits.mii_rmii = GMAC_PHY_RGMII_100_10; netdev_dbg(netdev, "connect %s to RGMII @ 100 Mbit\n", phydev_name(phydev)); break; case 10: status.bits.speed = GMAC_SPEED_10; - if (phydev->interface == PHY_INTERFACE_MODE_RGMII) + if (phy_interface_mode_is_rgmii(phydev->interface)) status.bits.mii_rmii = GMAC_PHY_RGMII_100_10; netdev_dbg(netdev, "connect %s to RGMII @ 10 Mbit\n", phydev_name(phydev)); @@ -389,6 +389,9 @@ static int gmac_setup_phy(struct net_device *netdev) status.bits.mii_rmii = GMAC_PHY_GMII; break; case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_RGMII_ID: + case PHY_INTERFACE_MODE_RGMII_TXID: + case PHY_INTERFACE_MODE_RGMII_RXID: netdev_dbg(netdev, "RGMII: set GMAC0 and GMAC1 to MII/RGMII mode\n"); status.bits.mii_rmii = GMAC_PHY_RGMII_100_10; diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c index f06d88c471d0f0c8f8fd7e526f1a7423fd55f5a6..d89ddc165ec24f757aca4be47e8bfcfdd2803b57 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c @@ -4225,7 +4225,7 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev) } INIT_WORK(&priv->tx_onestep_tstamp, dpaa2_eth_tx_onestep_tstamp); - + mutex_init(&priv->onestep_tstamp_lock); skb_queue_head_init(&priv->tx_skbs); /* Obtain a MC portal */ @@ -4405,12 +4405,12 @@ static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev) #ifdef CONFIG_DEBUG_FS dpaa2_dbg_remove(priv); #endif + + unregister_netdev(net_dev); rtnl_lock(); dpaa2_eth_disconnect_mac(priv); rtnl_unlock(); - unregister_netdev(net_dev); - dpaa2_eth_dl_port_del(priv); dpaa2_eth_dl_traps_unregister(priv); dpaa2_eth_dl_unregister(priv); diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c index 32b5faa87bb8dbcef676f744adf0f4f4ece4ba61..208a3459f2e2924b50d50d9efbe04b2f1544d02f 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c @@ -168,7 +168,7 @@ static int dpaa2_ptp_probe(struct fsl_mc_device *mc_dev) base = of_iomap(node, 0); if (!base) { err = -ENOMEM; - goto err_close; + goto err_put; } err = fsl_mc_allocate_irqs(mc_dev); @@ -212,6 +212,8 @@ static int dpaa2_ptp_probe(struct fsl_mc_device *mc_dev) fsl_mc_free_irqs(mc_dev); err_unmap: iounmap(base); +err_put: + of_node_put(node); err_close: dprtc_close(mc_dev->mc_io, 0, mc_dev->mc_handle); err_free_mcp: diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c index 37f9554618e9b04cc49d9fb8b489540d7ece80f6..6571107bf291cd9d15df78790767ee67c787a099 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c @@ -657,7 +657,10 @@ static int enetc_get_ts_info(struct net_device *ndev, #ifdef CONFIG_FSL_ENETC_PTP_CLOCK info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE | SOF_TIMESTAMPING_RX_HARDWARE | - SOF_TIMESTAMPING_RAW_HARDWARE; + SOF_TIMESTAMPING_RAW_HARDWARE | + SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE; info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c index 901749a7a318b7e001c7b31c9c6e41707588bb69..6eeccc11b76efc4381aa38b83461e5410ff957c9 100644 --- a/drivers/net/ethernet/freescale/fman/mac.c +++ b/drivers/net/ethernet/freescale/fman/mac.c @@ -94,14 +94,17 @@ static void mac_exception(void *handle, enum fman_mac_exceptions ex) __func__, ex); } -static void set_fman_mac_params(struct mac_device *mac_dev, - struct fman_mac_params *params) +static int set_fman_mac_params(struct mac_device *mac_dev, + struct fman_mac_params *params) { struct mac_priv_s *priv = mac_dev->priv; params->base_addr = (typeof(params->base_addr)) devm_ioremap(priv->dev, mac_dev->res->start, resource_size(mac_dev->res)); + if (!params->base_addr) + return -ENOMEM; + memcpy(¶ms->addr, mac_dev->addr, sizeof(mac_dev->addr)); params->max_speed = priv->max_speed; params->phy_if = mac_dev->phy_if; @@ -112,6 +115,8 @@ static void set_fman_mac_params(struct mac_device *mac_dev, params->event_cb = mac_exception; params->dev_id = mac_dev; params->internal_phy_node = priv->internal_phy_node; + + return 0; } static int tgec_initialization(struct mac_device *mac_dev) @@ -123,7 +128,9 @@ static int tgec_initialization(struct mac_device *mac_dev) priv = mac_dev->priv; - set_fman_mac_params(mac_dev, ¶ms); + err = set_fman_mac_params(mac_dev, ¶ms); + if (err) + goto _return; mac_dev->fman_mac = tgec_config(¶ms); if (!mac_dev->fman_mac) { @@ -169,7 +176,9 @@ static int dtsec_initialization(struct mac_device *mac_dev) priv = mac_dev->priv; - set_fman_mac_params(mac_dev, ¶ms); + err = set_fman_mac_params(mac_dev, ¶ms); + if (err) + goto _return; mac_dev->fman_mac = dtsec_config(¶ms); if (!mac_dev->fman_mac) { @@ -218,7 +227,9 @@ static int memac_initialization(struct mac_device *mac_dev) priv = mac_dev->priv; - set_fman_mac_params(mac_dev, ¶ms); + err = set_fman_mac_params(mac_dev, ¶ms); + if (err) + goto _return; if (priv->max_speed == SPEED_10000) params.phy_if = PHY_INTERFACE_MODE_XGMII; diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c index ff756265d58faf2e2d5af0e05e7a41d927ee8745..9a2c16d69e2c1ae7657811cd6060d4187ee719b7 100644 --- a/drivers/net/ethernet/freescale/gianfar_ethtool.c +++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c @@ -1464,6 +1464,7 @@ static int gfar_get_ts_info(struct net_device *dev, ptp_node = of_find_compatible_node(NULL, NULL, "fsl,etsec-ptp"); if (ptp_node) { ptp_dev = of_find_device_by_node(ptp_node); + of_node_put(ptp_node); if (ptp_dev) ptp = platform_get_drvdata(ptp_dev); } diff --git a/drivers/net/ethernet/freescale/xgmac_mdio.c b/drivers/net/ethernet/freescale/xgmac_mdio.c index bfa2826c55454db2586f8dfde2799989db243a46..b7984a772e12d61916d0a8d4a8513631e82a2c5d 100644 --- a/drivers/net/ethernet/freescale/xgmac_mdio.c +++ b/drivers/net/ethernet/freescale/xgmac_mdio.c @@ -49,6 +49,7 @@ struct tgec_mdio_controller { struct mdio_fsl_priv { struct tgec_mdio_controller __iomem *mdio_base; bool is_little_endian; + bool has_a009885; bool has_a011043; }; @@ -184,10 +185,10 @@ static int xgmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum) { struct mdio_fsl_priv *priv = (struct mdio_fsl_priv *)bus->priv; struct tgec_mdio_controller __iomem *regs = priv->mdio_base; + unsigned long flags; uint16_t dev_addr; uint32_t mdio_stat; uint32_t mdio_ctl; - uint16_t value; int ret; bool endian = priv->is_little_endian; @@ -219,12 +220,18 @@ static int xgmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum) return ret; } + if (priv->has_a009885) + /* Once the operation completes, i.e. MDIO_STAT_BSY clears, we + * must read back the data register within 16 MDC cycles. + */ + local_irq_save(flags); + /* Initiate the read */ xgmac_write32(mdio_ctl | MDIO_CTL_READ, ®s->mdio_ctl, endian); ret = xgmac_wait_until_done(&bus->dev, regs, endian); if (ret) - return ret; + goto irq_restore; /* Return all Fs if nothing was there */ if ((xgmac_read32(®s->mdio_stat, endian) & MDIO_STAT_RD_ER) && @@ -232,13 +239,17 @@ static int xgmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum) dev_dbg(&bus->dev, "Error while reading PHY%d reg at %d.%hhu\n", phy_id, dev_addr, regnum); - return 0xffff; + ret = 0xffff; + } else { + ret = xgmac_read32(®s->mdio_data, endian) & 0xffff; + dev_dbg(&bus->dev, "read %04x\n", ret); } - value = xgmac_read32(®s->mdio_data, endian) & 0xffff; - dev_dbg(&bus->dev, "read %04x\n", value); +irq_restore: + if (priv->has_a009885) + local_irq_restore(flags); - return value; + return ret; } static int xgmac_mdio_probe(struct platform_device *pdev) @@ -282,6 +293,8 @@ static int xgmac_mdio_probe(struct platform_device *pdev) priv->is_little_endian = device_property_read_bool(&pdev->dev, "little-endian"); + priv->has_a009885 = device_property_read_bool(&pdev->dev, + "fsl,erratum-a009885"); priv->has_a011043 = device_property_read_bool(&pdev->dev, "fsl,erratum-a011043"); @@ -307,9 +320,10 @@ static int xgmac_mdio_probe(struct platform_device *pdev) static int xgmac_mdio_remove(struct platform_device *pdev) { struct mii_bus *bus = platform_get_drvdata(pdev); + struct mdio_fsl_priv *priv = bus->priv; mdiobus_unregister(bus); - iounmap(bus->priv); + iounmap(priv->mdio_base); mdiobus_free(bus); return 0; diff --git a/drivers/net/ethernet/google/gve/gve_adminq.c b/drivers/net/ethernet/google/gve/gve_adminq.c index 6009d76e41fc4f26e6b3481aa3d533d76441de0e..67f2b9a61463ad38ef806f406b7bbd9fed6bb596 100644 --- a/drivers/net/ethernet/google/gve/gve_adminq.c +++ b/drivers/net/ethernet/google/gve/gve_adminq.c @@ -141,7 +141,7 @@ static int gve_adminq_parse_err(struct gve_priv *priv, u32 status) */ static int gve_adminq_kick_and_wait(struct gve_priv *priv) { - u32 tail, head; + int tail, head; int i; tail = ioread32be(&priv->reg_bar0->adminq_event_counter); diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h index b668df6193be4ede325b6baf9101cdea4f95f160..7d4ae467f3ad413c42f39f6b05db3a4ab0d2475a 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h +++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h @@ -46,6 +46,7 @@ enum HCLGE_MBX_OPCODE { HCLGE_MBX_PUSH_PROMISC_INFO, /* (PF -> VF) push vf promisc info */ HCLGE_MBX_VF_UNINIT, /* (VF -> PF) vf is unintializing */ HCLGE_MBX_HANDLE_VF_TBL, /* (VF -> PF) store/clear hw table */ + HCLGE_MBX_GET_RING_VECTOR_MAP, /* (VF -> PF) get ring-to-vector map */ HCLGE_MBX_GET_VF_FLR_STATUS = 200, /* (M7 -> PF) get vf flr status */ HCLGE_MBX_PUSH_LINK_STATUS, /* (M7 -> PF) get port link status */ @@ -92,8 +93,8 @@ struct hclge_ring_chain_param { struct hclge_basic_info { u8 hw_tc_map; u8 rsv; - u16 mbx_api_version; - u32 pf_caps; + __le16 mbx_api_version; + __le32 pf_caps; }; struct hclgevf_mbx_resp_status { @@ -134,11 +135,20 @@ struct hclge_vf_to_pf_msg { }; struct hclge_pf_to_vf_msg { - u16 code; - u16 vf_mbx_msg_code; - u16 vf_mbx_msg_subcode; - u16 resp_status; - u8 resp_data[HCLGE_MBX_MAX_RESP_DATA_SIZE]; + __le16 code; + union { + /* used for mbx response */ + struct { + __le16 vf_mbx_msg_code; + __le16 vf_mbx_msg_subcode; + __le16 resp_status; + u8 resp_data[HCLGE_MBX_MAX_RESP_DATA_SIZE]; + }; + /* used for general mbx */ + struct { + u8 msg_data[HCLGE_MBX_MAX_MSG_SIZE]; + }; + }; }; struct hclge_mbx_vf_to_pf_cmd { @@ -148,7 +158,7 @@ struct hclge_mbx_vf_to_pf_cmd { u8 rsv1[1]; u8 msg_len; u8 rsv2; - u16 match_id; + __le16 match_id; struct hclge_vf_to_pf_msg msg; }; @@ -159,7 +169,7 @@ struct hclge_mbx_pf_to_vf_cmd { u8 rsv[3]; u8 msg_len; u8 rsv1; - u16 match_id; + __le16 match_id; struct hclge_pf_to_vf_msg msg; }; @@ -169,6 +179,49 @@ struct hclge_vf_rst_cmd { u8 rsv[22]; }; +#pragma pack(1) +struct hclge_mbx_link_status { + __le16 link_status; + __le32 speed; + __le16 duplex; + u8 flag; +}; + +struct hclge_mbx_link_mode { + __le16 idx; + __le64 link_mode; +}; + +struct hclge_mbx_port_base_vlan { + __le16 state; + __le16 vlan_proto; + __le16 qos; + __le16 vlan_tag; +}; + +struct hclge_mbx_vf_queue_info { + __le16 num_tqps; + __le16 rss_size; + __le16 rx_buf_len; +}; + +struct hclge_mbx_vf_queue_depth { + __le16 num_tx_desc; + __le16 num_rx_desc; +}; + +struct hclge_mbx_vlan_filter { + u8 is_kill; + __le16 vlan_id; + __le16 proto; +}; + +struct hclge_mbx_mtu_info { + __le32 mtu; +}; + +#pragma pack() + /* used by VF to store the received Async responses from PF */ struct hclgevf_mbx_arq_ring { #define HCLGE_MBX_MAX_ARQ_MSG_SIZE 8 @@ -177,7 +230,7 @@ struct hclgevf_mbx_arq_ring { u32 head; u32 tail; atomic_t count; - u16 msg_q[HCLGE_MBX_MAX_ARQ_MSG_NUM][HCLGE_MBX_MAX_ARQ_MSG_SIZE]; + __le16 msg_q[HCLGE_MBX_MAX_ARQ_MSG_NUM][HCLGE_MBX_MAX_ARQ_MSG_SIZE]; }; #define hclge_mbx_ring_ptr_move_crq(crq) \ diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index cd0ef12b6e85f9b8c2aca8b50270fe38c6dcb3ff..1eaea162d00e4477bd0302f71bf31cf14fdbb914 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -95,6 +95,7 @@ enum HNAE3_DEV_CAP_BITS { HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, HNAE3_DEV_SUPPORT_MC_MAC_MNG_B, + HNAE3_DEV_SUPPORT_CQ_B, }; #define hnae3_dev_fd_supported(hdev) \ @@ -154,6 +155,9 @@ enum HNAE3_DEV_CAP_BITS { #define hnae3_ae_dev_mc_mac_mng_supported(ae_dev) \ test_bit(HNAE3_DEV_SUPPORT_MC_MAC_MNG_B, (ae_dev)->caps) +#define hnae3_ae_dev_cq_supported(ae_dev) \ + test_bit(HNAE3_DEV_SUPPORT_CQ_B, (ae_dev)->caps) + enum HNAE3_PF_CAP_BITS { HNAE3_PF_SUPPORT_VLAN_FLTR_MDF_B = 0, }; @@ -536,6 +540,8 @@ struct hnae3_ae_dev { * Get 1588 rx hwstamp * get_ts_info * Get phc info + * clean_vf_config + * Clean residual vf info after disable sriov */ struct hnae3_ae_ops { int (*init_ae_dev)(struct hnae3_ae_dev *ae_dev); @@ -729,6 +735,7 @@ struct hnae3_ae_ops { struct ethtool_ts_info *info); int (*get_link_diagnosis_info)(struct hnae3_handle *handle, u32 *status_code); + void (*clean_vf_config)(struct hnae3_ae_dev *ae_dev, int num_vfs); }; struct hnae3_dcb_ops { @@ -761,6 +768,7 @@ struct hnae3_tc_info { u8 prio_tc[HNAE3_MAX_USER_PRIO]; /* TC indexed by prio */ u16 tqp_count[HNAE3_MAX_TC]; u16 tqp_offset[HNAE3_MAX_TC]; + u8 max_tc; /* Total number of TCs */ u8 num_tc; /* Total number of enabled TCs */ bool mqprio_active; }; @@ -840,6 +848,9 @@ struct hnae3_handle { u8 netdev_flags; struct dentry *hnae3_dbgfs; + /* protects concurrent contention between debugfs commands */ + struct mutex dbgfs_lock; + char **dbgfs_buf; /* Network interface message level enabled bits */ u32 msg_enable; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c index c15ca710dabb826c0d3cb2f2d8fdb61812a207d8..c8b151d29f53ba5df2e269937f25a642951db457 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c @@ -149,6 +149,7 @@ static const struct hclge_comm_caps_bit_map hclge_pf_cmd_caps[] = { {HCLGE_COMM_CAP_PORT_VLAN_BYPASS_B, HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B}, {HCLGE_COMM_CAP_PORT_VLAN_BYPASS_B, HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B}, + {HCLGE_COMM_CAP_CQ_B, HNAE3_DEV_SUPPORT_CQ_B}, }; static const struct hclge_comm_caps_bit_map hclge_vf_cmd_caps[] = { @@ -160,6 +161,7 @@ static const struct hclge_comm_caps_bit_map hclge_vf_cmd_caps[] = { {HCLGE_COMM_CAP_QB_B, HNAE3_DEV_SUPPORT_QB_B}, {HCLGE_COMM_CAP_TX_PUSH_B, HNAE3_DEV_SUPPORT_TX_PUSH_B}, {HCLGE_COMM_CAP_RXD_ADV_LAYOUT_B, HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B}, + {HCLGE_COMM_CAP_CQ_B, HNAE3_DEV_SUPPORT_CQ_B}, }; static void diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h index 876650eddac4242872ee66b76933ceeb7e3bbbe8..7a7d4cf9bf35d5c45e8b479633e3b2f040a78a3e 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h @@ -338,6 +338,7 @@ enum HCLGE_COMM_CAP_BITS { HCLGE_COMM_CAP_PAUSE_B = 14, HCLGE_COMM_CAP_RXD_ADV_LAYOUT_B = 15, HCLGE_COMM_CAP_PORT_VLAN_BYPASS_B = 17, + HCLGE_COMM_CAP_CQ_B = 18, }; enum HCLGE_COMM_API_CAP_BITS { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.h b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.h index aa1d7a6ff4ca20030d14b41397f1b1111cc24479..946d166a452db20bde87d504dadbc407296b1475 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.h @@ -106,7 +106,7 @@ int hclge_comm_parse_rss_hfunc(struct hclge_comm_rss_cfg *rss_cfg, void hclge_comm_get_rss_hash_info(struct hclge_comm_rss_cfg *rss_cfg, u8 *key, u8 *hfunc); void hclge_comm_get_rss_indir_tbl(struct hclge_comm_rss_cfg *rss_cfg, - u32 *indir, __le16 rss_ind_tbl_size); + u32 *indir, u16 rss_ind_tbl_size); int hclge_comm_set_rss_algo_key(struct hclge_comm_hw *hw, const u8 hfunc, const u8 *key); int hclge_comm_init_rss_tuple_cmd(struct hclge_comm_rss_cfg *rss_cfg, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c index 0c60f41fca8a6f25446131feaa47ca04e294c837..f3c9395d8351cb31108973e1867332e00026c6ac 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c @@ -75,7 +75,7 @@ int hclge_comm_tqps_update_stats(struct hnae3_handle *handle, ret = hclge_comm_cmd_send(hw, &desc, 1); if (ret) { dev_err(&hw->cmq.csq.pdev->dev, - "failed to get tqp stat, ret = %d, tx = %u.\n", + "failed to get tqp stat, ret = %d, rx = %u.\n", ret, i); return ret; } @@ -89,7 +89,7 @@ int hclge_comm_tqps_update_stats(struct hnae3_handle *handle, ret = hclge_comm_cmd_send(hw, &desc, 1); if (ret) { dev_err(&hw->cmq.csq.pdev->dev, - "failed to get tqp stat, ret = %d, rx = %u.\n", + "failed to get tqp stat, ret = %d, tx = %u.\n", ret, i); return ret; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c index 817e2e8a7287e9cb793f6ef4f42bb9cf5f242ec5..93aeb615191d9064aee2be7e1f21c0003fb75322 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c @@ -562,12 +562,12 @@ static void hns3_dbg_tx_spare_info(struct hns3_enet_ring *ring, char *buf, for (i = 0; i < ring_num; i++) { j = 0; - sprintf(result[j++], "%8u", i); - sprintf(result[j++], "%9u", ring->tx_copybreak); - sprintf(result[j++], "%3u", tx_spare->len); - sprintf(result[j++], "%3u", tx_spare->next_to_use); - sprintf(result[j++], "%3u", tx_spare->next_to_clean); - sprintf(result[j++], "%3u", tx_spare->last_to_clean); + sprintf(result[j++], "%u", i); + sprintf(result[j++], "%u", ring->tx_copybreak); + sprintf(result[j++], "%u", tx_spare->len); + sprintf(result[j++], "%u", tx_spare->next_to_use); + sprintf(result[j++], "%u", tx_spare->next_to_clean); + sprintf(result[j++], "%u", tx_spare->last_to_clean); sprintf(result[j++], "%pad", &tx_spare->dma); hns3_dbg_fill_content(content, sizeof(content), tx_spare_info_items, @@ -598,35 +598,35 @@ static void hns3_dump_rx_queue_info(struct hns3_enet_ring *ring, u32 base_add_l, base_add_h; u32 j = 0; - sprintf(result[j++], "%8u", index); + sprintf(result[j++], "%u", index); - sprintf(result[j++], "%6u", readl_relaxed(ring->tqp->io_base + + sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_BD_NUM_REG)); - sprintf(result[j++], "%6u", readl_relaxed(ring->tqp->io_base + + sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_BD_LEN_REG)); - sprintf(result[j++], "%4u", readl_relaxed(ring->tqp->io_base + + sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_TAIL_REG)); - sprintf(result[j++], "%4u", readl_relaxed(ring->tqp->io_base + + sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG)); - sprintf(result[j++], "%6u", readl_relaxed(ring->tqp->io_base + + sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_FBDNUM_REG)); - sprintf(result[j++], "%6u", readl_relaxed(ring->tqp->io_base + + sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_PKTNUM_RECORD_REG)); - sprintf(result[j++], "%9u", ring->rx_copybreak); + sprintf(result[j++], "%u", ring->rx_copybreak); - sprintf(result[j++], "%7s", readl_relaxed(ring->tqp->io_base + + sprintf(result[j++], "%s", readl_relaxed(ring->tqp->io_base + HNS3_RING_EN_REG) ? "on" : "off"); if (hnae3_ae_dev_tqp_txrx_indep_supported(ae_dev)) - sprintf(result[j++], "%10s", readl_relaxed(ring->tqp->io_base + + sprintf(result[j++], "%s", readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_EN_REG) ? "on" : "off"); else - sprintf(result[j++], "%10s", "NA"); + sprintf(result[j++], "%s", "NA"); base_add_h = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_BASEADDR_H_REG); @@ -700,36 +700,36 @@ static void hns3_dump_tx_queue_info(struct hns3_enet_ring *ring, u32 base_add_l, base_add_h; u32 j = 0; - sprintf(result[j++], "%8u", index); - sprintf(result[j++], "%6u", readl_relaxed(ring->tqp->io_base + + sprintf(result[j++], "%u", index); + sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_BD_NUM_REG)); - sprintf(result[j++], "%2u", readl_relaxed(ring->tqp->io_base + + sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_TC_REG)); - sprintf(result[j++], "%4u", readl_relaxed(ring->tqp->io_base + + sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_TAIL_REG)); - sprintf(result[j++], "%4u", readl_relaxed(ring->tqp->io_base + + sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_HEAD_REG)); - sprintf(result[j++], "%6u", readl_relaxed(ring->tqp->io_base + + sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_FBDNUM_REG)); - sprintf(result[j++], "%6u", readl_relaxed(ring->tqp->io_base + + sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_OFFSET_REG)); - sprintf(result[j++], "%6u", readl_relaxed(ring->tqp->io_base + + sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_PKTNUM_RECORD_REG)); - sprintf(result[j++], "%7s", readl_relaxed(ring->tqp->io_base + + sprintf(result[j++], "%s", readl_relaxed(ring->tqp->io_base + HNS3_RING_EN_REG) ? "on" : "off"); if (hnae3_ae_dev_tqp_txrx_indep_supported(ae_dev)) - sprintf(result[j++], "%10s", readl_relaxed(ring->tqp->io_base + + sprintf(result[j++], "%s", readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_EN_REG) ? "on" : "off"); else - sprintf(result[j++], "%10s", "NA"); + sprintf(result[j++], "%s", "NA"); base_add_h = readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_BASEADDR_H_REG); @@ -848,15 +848,15 @@ static void hns3_dump_rx_bd_info(struct hns3_nic_priv *priv, { unsigned int j = 0; - sprintf(result[j++], "%5d", idx); + sprintf(result[j++], "%d", idx); sprintf(result[j++], "%#x", le32_to_cpu(desc->rx.l234_info)); - sprintf(result[j++], "%7u", le16_to_cpu(desc->rx.pkt_len)); - sprintf(result[j++], "%4u", le16_to_cpu(desc->rx.size)); + sprintf(result[j++], "%u", le16_to_cpu(desc->rx.pkt_len)); + sprintf(result[j++], "%u", le16_to_cpu(desc->rx.size)); sprintf(result[j++], "%#x", le32_to_cpu(desc->rx.rss_hash)); - sprintf(result[j++], "%5u", le16_to_cpu(desc->rx.fd_id)); - sprintf(result[j++], "%8u", le16_to_cpu(desc->rx.vlan_tag)); - sprintf(result[j++], "%15u", le16_to_cpu(desc->rx.o_dm_vlan_id_fb)); - sprintf(result[j++], "%11u", le16_to_cpu(desc->rx.ot_vlan_tag)); + sprintf(result[j++], "%u", le16_to_cpu(desc->rx.fd_id)); + sprintf(result[j++], "%u", le16_to_cpu(desc->rx.vlan_tag)); + sprintf(result[j++], "%u", le16_to_cpu(desc->rx.o_dm_vlan_id_fb)); + sprintf(result[j++], "%u", le16_to_cpu(desc->rx.ot_vlan_tag)); sprintf(result[j++], "%#x", le32_to_cpu(desc->rx.bd_base_info)); if (test_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, &priv->state)) { u32 ol_info = le32_to_cpu(desc->rx.ol_info); @@ -930,19 +930,19 @@ static void hns3_dump_tx_bd_info(struct hns3_nic_priv *priv, { unsigned int j = 0; - sprintf(result[j++], "%6d", idx); + sprintf(result[j++], "%d", idx); sprintf(result[j++], "%#llx", le64_to_cpu(desc->addr)); - sprintf(result[j++], "%5u", le16_to_cpu(desc->tx.vlan_tag)); - sprintf(result[j++], "%5u", le16_to_cpu(desc->tx.send_size)); + sprintf(result[j++], "%u", le16_to_cpu(desc->tx.vlan_tag)); + sprintf(result[j++], "%u", le16_to_cpu(desc->tx.send_size)); sprintf(result[j++], "%#x", le32_to_cpu(desc->tx.type_cs_vlan_tso_len)); - sprintf(result[j++], "%5u", le16_to_cpu(desc->tx.outer_vlan_tag)); - sprintf(result[j++], "%5u", le16_to_cpu(desc->tx.tv)); - sprintf(result[j++], "%10u", + sprintf(result[j++], "%u", le16_to_cpu(desc->tx.outer_vlan_tag)); + sprintf(result[j++], "%u", le16_to_cpu(desc->tx.tv)); + sprintf(result[j++], "%u", le32_to_cpu(desc->tx.ol_type_vlan_len_msec)); sprintf(result[j++], "%#x", le32_to_cpu(desc->tx.paylen_ol4cs)); sprintf(result[j++], "%#x", le16_to_cpu(desc->tx.bdtp_fe_sc_vld_ra_ri)); - sprintf(result[j++], "%5u", le16_to_cpu(desc->tx.mss_hw_csum)); + sprintf(result[j++], "%u", le16_to_cpu(desc->tx.mss_hw_csum)); } static int hns3_dbg_tx_bd_info(struct hns3_dbg_data *d, char *buf, int len) @@ -1226,7 +1226,8 @@ static ssize_t hns3_dbg_read(struct file *filp, char __user *buffer, if (ret) return ret; - save_buf = &hns3_dbg_cmd[index].buf; + mutex_lock(&handle->dbgfs_lock); + save_buf = &handle->dbgfs_buf[index]; if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state) || test_bit(HNS3_NIC_STATE_RESETTING, &priv->state)) { @@ -1238,15 +1239,15 @@ static ssize_t hns3_dbg_read(struct file *filp, char __user *buffer, read_buf = *save_buf; } else { read_buf = kvzalloc(hns3_dbg_cmd[index].buf_len, GFP_KERNEL); - if (!read_buf) - return -ENOMEM; + if (!read_buf) { + ret = -ENOMEM; + goto out; + } /* save the buffer addr until the last read operation */ *save_buf = read_buf; - } - /* get data ready for the first time to read */ - if (!*ppos) { + /* get data ready for the first time to read */ ret = hns3_dbg_read_cmd(dbg_data, hns3_dbg_cmd[index].cmd, read_buf, hns3_dbg_cmd[index].buf_len); if (ret) @@ -1255,8 +1256,10 @@ static ssize_t hns3_dbg_read(struct file *filp, char __user *buffer, size = simple_read_from_buffer(buffer, count, ppos, read_buf, strlen(read_buf)); - if (size > 0) + if (size > 0) { + mutex_unlock(&handle->dbgfs_lock); return size; + } out: /* free the buffer for the last read operation */ @@ -1265,6 +1268,7 @@ static ssize_t hns3_dbg_read(struct file *filp, char __user *buffer, *save_buf = NULL; } + mutex_unlock(&handle->dbgfs_lock); return ret; } @@ -1328,6 +1332,13 @@ int hns3_dbg_init(struct hnae3_handle *handle) int ret; u32 i; + handle->dbgfs_buf = devm_kcalloc(&handle->pdev->dev, + ARRAY_SIZE(hns3_dbg_cmd), + sizeof(*handle->dbgfs_buf), + GFP_KERNEL); + if (!handle->dbgfs_buf) + return -ENOMEM; + hns3_dbg_dentry[HNS3_DBG_DENTRY_COMMON].dentry = debugfs_create_dir(name, hns3_dbgfs_root); handle->hnae3_dbgfs = hns3_dbg_dentry[HNS3_DBG_DENTRY_COMMON].dentry; @@ -1337,6 +1348,8 @@ int hns3_dbg_init(struct hnae3_handle *handle) debugfs_create_dir(hns3_dbg_dentry[i].name, handle->hnae3_dbgfs); + mutex_init(&handle->dbgfs_lock); + for (i = 0; i < ARRAY_SIZE(hns3_dbg_cmd); i++) { if ((hns3_dbg_cmd[i].cmd == HNAE3_DBG_CMD_TM_NODES && ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2) || @@ -1363,6 +1376,7 @@ int hns3_dbg_init(struct hnae3_handle *handle) return 0; out: + mutex_destroy(&handle->dbgfs_lock); debugfs_remove_recursive(handle->hnae3_dbgfs); handle->hnae3_dbgfs = NULL; return ret; @@ -1373,11 +1387,12 @@ void hns3_dbg_uninit(struct hnae3_handle *handle) u32 i; for (i = 0; i < ARRAY_SIZE(hns3_dbg_cmd); i++) - if (hns3_dbg_cmd[i].buf) { - kvfree(hns3_dbg_cmd[i].buf); - hns3_dbg_cmd[i].buf = NULL; + if (handle->dbgfs_buf[i]) { + kvfree(handle->dbgfs_buf[i]); + handle->dbgfs_buf[i] = NULL; } + mutex_destroy(&handle->dbgfs_lock); debugfs_remove_recursive(handle->hnae3_dbgfs); handle->hnae3_dbgfs = NULL; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h index 83aa1450ab9fe383bd31e979a885131329e4bf6a..97578eabb7d8b7a2defc1ac1422bb77f29b1f6bf 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h @@ -49,7 +49,6 @@ struct hns3_dbg_cmd_info { enum hnae3_dbg_cmd cmd; enum hns3_dbg_dentry_type dentry; u32 buf_len; - char *buf; int (*init)(struct hnae3_handle *handle, unsigned int cmd); }; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index edaf84e7fc855b7a72ec514b4dbb19744b09dc2f..2174b5756b07be316de3fe7d3ea347d6f209ce6e 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -1027,46 +1027,56 @@ static bool hns3_can_use_tx_sgl(struct hns3_enet_ring *ring, static void hns3_init_tx_spare_buffer(struct hns3_enet_ring *ring) { + u32 alloc_size = ring->tqp->handle->kinfo.tx_spare_buf_size; struct hns3_tx_spare *tx_spare; struct page *page; - u32 alloc_size; dma_addr_t dma; int order; - alloc_size = ring->tqp->handle->kinfo.tx_spare_buf_size; if (!alloc_size) return; order = get_order(alloc_size); + if (order >= MAX_ORDER) { + if (net_ratelimit()) + dev_warn(ring_to_dev(ring), "failed to allocate tx spare buffer, exceed to max order\n"); + return; + } + tx_spare = devm_kzalloc(ring_to_dev(ring), sizeof(*tx_spare), GFP_KERNEL); if (!tx_spare) { /* The driver still work without the tx spare buffer */ dev_warn(ring_to_dev(ring), "failed to allocate hns3_tx_spare\n"); - return; + goto devm_kzalloc_error; } page = alloc_pages_node(dev_to_node(ring_to_dev(ring)), GFP_KERNEL, order); if (!page) { dev_warn(ring_to_dev(ring), "failed to allocate tx spare pages\n"); - devm_kfree(ring_to_dev(ring), tx_spare); - return; + goto alloc_pages_error; } dma = dma_map_page(ring_to_dev(ring), page, 0, PAGE_SIZE << order, DMA_TO_DEVICE); if (dma_mapping_error(ring_to_dev(ring), dma)) { dev_warn(ring_to_dev(ring), "failed to map pages for tx spare\n"); - put_page(page); - devm_kfree(ring_to_dev(ring), tx_spare); - return; + goto dma_mapping_error; } tx_spare->dma = dma; tx_spare->buf = page_address(page); tx_spare->len = PAGE_SIZE << order; ring->tx_spare = tx_spare; + return; + +dma_mapping_error: + put_page(page); +alloc_pages_error: + devm_kfree(ring_to_dev(ring), tx_spare); +devm_kzalloc_error: + ring->tqp->handle->kinfo.tx_spare_buf_size = 0; } /* Use hns3_tx_spare_space() to make sure there is enough buffer @@ -3049,6 +3059,21 @@ static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent) return ret; } +/** + * hns3_clean_vf_config + * @pdev: pointer to a pci_dev structure + * @num_vfs: number of VFs allocated + * + * Clean residual vf config after disable sriov + **/ +static void hns3_clean_vf_config(struct pci_dev *pdev, int num_vfs) +{ + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); + + if (ae_dev->ops->clean_vf_config) + ae_dev->ops->clean_vf_config(ae_dev, num_vfs); +} + /* hns3_remove - Device removal routine * @pdev: PCI device information struct */ @@ -3087,7 +3112,10 @@ static int hns3_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) else return num_vfs; } else if (!pci_vfs_assigned(pdev)) { + int num_vfs_pre = pci_num_vf(pdev); + pci_disable_sriov(pdev); + hns3_clean_vf_config(pdev, num_vfs_pre); } else { dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n"); @@ -5130,10 +5158,7 @@ static void hns3_set_cq_period_mode(struct hns3_nic_priv *priv, priv->tqp_vector[i].rx_group.dim.mode = mode; } - /* only device version above V3(include V3), GL can switch CQ/EQ - * period mode. - */ - if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) { + if (hnae3_ae_dev_cq_supported(ae_dev)) { u32 new_mode; u64 reg; @@ -5177,6 +5202,13 @@ static void hns3_state_init(struct hnae3_handle *handle) set_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, &priv->state); } +static void hns3_state_uninit(struct hnae3_handle *handle) +{ + struct hns3_nic_priv *priv = handle->priv; + + clear_bit(HNS3_NIC_STATE_INITED, &priv->state); +} + static int hns3_client_init(struct hnae3_handle *handle) { struct pci_dev *pdev = handle->pdev; @@ -5294,7 +5326,9 @@ static int hns3_client_init(struct hnae3_handle *handle) return ret; out_reg_netdev_fail: + hns3_state_uninit(handle); hns3_dbg_uninit(handle); + hns3_client_stop(handle); out_client_start: hns3_free_rx_cpu_rmap(netdev); hns3_nic_uninit_irq(priv); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c index a7cf5fee9f4822db829c230f1519f418397e692e..295733e1bbc4b211c5b2d4dd463df01955ada44e 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c @@ -670,8 +670,8 @@ static void hns3_get_ringparam(struct net_device *netdev, struct hnae3_handle *h = priv->ae_handle; int rx_queue_index = h->kinfo.num_tqps; - if (hns3_nic_resetting(netdev)) { - netdev_err(netdev, "dev resetting!"); + if (hns3_nic_resetting(netdev) || !priv->ring) { + netdev_err(netdev, "failed to get ringparam value, due to dev resetting or uninited\n"); return; } @@ -1091,8 +1091,14 @@ static int hns3_check_ringparam(struct net_device *ndev, { #define RX_BUF_LEN_2K 2048 #define RX_BUF_LEN_4K 4096 - if (hns3_nic_resetting(ndev)) + + struct hns3_nic_priv *priv = netdev_priv(ndev); + + if (hns3_nic_resetting(ndev) || !priv->ring) { + netdev_err(ndev, "failed to set ringparam value, due to dev resetting or uninited\n"); return -EBUSY; + } + if (param->rx_mini_pending || param->rx_jumbo_pending) return -EINVAL; @@ -1115,6 +1121,36 @@ static int hns3_check_ringparam(struct net_device *ndev, return 0; } +static bool +hns3_is_ringparam_changed(struct net_device *ndev, + struct ethtool_ringparam *param, + struct kernel_ethtool_ringparam *kernel_param, + struct hns3_ring_param *old_ringparam, + struct hns3_ring_param *new_ringparam) +{ + struct hns3_nic_priv *priv = netdev_priv(ndev); + struct hnae3_handle *h = priv->ae_handle; + u16 queue_num = h->kinfo.num_tqps; + + new_ringparam->tx_desc_num = ALIGN(param->tx_pending, + HNS3_RING_BD_MULTIPLE); + new_ringparam->rx_desc_num = ALIGN(param->rx_pending, + HNS3_RING_BD_MULTIPLE); + old_ringparam->tx_desc_num = priv->ring[0].desc_num; + old_ringparam->rx_desc_num = priv->ring[queue_num].desc_num; + old_ringparam->rx_buf_len = priv->ring[queue_num].buf_size; + new_ringparam->rx_buf_len = kernel_param->rx_buf_len; + + if (old_ringparam->tx_desc_num == new_ringparam->tx_desc_num && + old_ringparam->rx_desc_num == new_ringparam->rx_desc_num && + old_ringparam->rx_buf_len == new_ringparam->rx_buf_len) { + netdev_info(ndev, "descriptor number and rx buffer length not changed\n"); + return false; + } + + return true; +} + static int hns3_change_rx_buf_len(struct net_device *ndev, u32 rx_buf_len) { struct hns3_nic_priv *priv = netdev_priv(ndev); @@ -1136,57 +1172,47 @@ static int hns3_set_ringparam(struct net_device *ndev, struct kernel_ethtool_ringparam *kernel_param, struct netlink_ext_ack *extack) { + struct hns3_ring_param old_ringparam, new_ringparam; struct hns3_nic_priv *priv = netdev_priv(ndev); struct hnae3_handle *h = priv->ae_handle; struct hns3_enet_ring *tmp_rings; bool if_running = netif_running(ndev); - u32 old_tx_desc_num, new_tx_desc_num; - u32 old_rx_desc_num, new_rx_desc_num; - u16 queue_num = h->kinfo.num_tqps; - u32 old_rx_buf_len; int ret, i; ret = hns3_check_ringparam(ndev, param, kernel_param); if (ret) return ret; - /* Hardware requires that its descriptors must be multiple of eight */ - new_tx_desc_num = ALIGN(param->tx_pending, HNS3_RING_BD_MULTIPLE); - new_rx_desc_num = ALIGN(param->rx_pending, HNS3_RING_BD_MULTIPLE); - old_tx_desc_num = priv->ring[0].desc_num; - old_rx_desc_num = priv->ring[queue_num].desc_num; - old_rx_buf_len = priv->ring[queue_num].buf_size; - if (old_tx_desc_num == new_tx_desc_num && - old_rx_desc_num == new_rx_desc_num && - kernel_param->rx_buf_len == old_rx_buf_len) + if (!hns3_is_ringparam_changed(ndev, param, kernel_param, + &old_ringparam, &new_ringparam)) return 0; tmp_rings = hns3_backup_ringparam(priv); if (!tmp_rings) { - netdev_err(ndev, - "backup ring param failed by allocating memory fail\n"); + netdev_err(ndev, "backup ring param failed by allocating memory fail\n"); return -ENOMEM; } netdev_info(ndev, - "Changing Tx/Rx ring depth from %u/%u to %u/%u, Changing rx buffer len from %d to %d\n", - old_tx_desc_num, old_rx_desc_num, - new_tx_desc_num, new_rx_desc_num, - old_rx_buf_len, kernel_param->rx_buf_len); + "Changing Tx/Rx ring depth from %u/%u to %u/%u, Changing rx buffer len from %u to %u\n", + old_ringparam.tx_desc_num, old_ringparam.rx_desc_num, + new_ringparam.tx_desc_num, new_ringparam.rx_desc_num, + old_ringparam.rx_buf_len, new_ringparam.rx_buf_len); if (if_running) ndev->netdev_ops->ndo_stop(ndev); - hns3_change_all_ring_bd_num(priv, new_tx_desc_num, new_rx_desc_num); - hns3_change_rx_buf_len(ndev, kernel_param->rx_buf_len); + hns3_change_all_ring_bd_num(priv, new_ringparam.tx_desc_num, + new_ringparam.rx_desc_num); + hns3_change_rx_buf_len(ndev, new_ringparam.rx_buf_len); ret = hns3_init_all_ring(priv); if (ret) { netdev_err(ndev, "set ringparam fail, revert to old value(%d)\n", ret); - hns3_change_rx_buf_len(ndev, old_rx_buf_len); - hns3_change_all_ring_bd_num(priv, old_tx_desc_num, - old_rx_desc_num); + hns3_change_rx_buf_len(ndev, old_ringparam.rx_buf_len); + hns3_change_all_ring_bd_num(priv, old_ringparam.tx_desc_num, + old_ringparam.rx_desc_num); for (i = 0; i < h->kinfo.num_tqps * 2; i++) memcpy(&priv->ring[i], &tmp_rings[i], sizeof(struct hns3_enet_ring)); @@ -1396,11 +1422,33 @@ static int hns3_check_ql_coalesce_param(struct net_device *netdev, return 0; } -static int hns3_check_coalesce_para(struct net_device *netdev, - struct ethtool_coalesce *cmd) +static int +hns3_check_cqe_coalesce_param(struct net_device *netdev, + struct kernel_ethtool_coalesce *kernel_coal) +{ + struct hnae3_handle *handle = hns3_get_handle(netdev); + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev); + + if ((kernel_coal->use_cqe_mode_tx || kernel_coal->use_cqe_mode_rx) && + !hnae3_ae_dev_cq_supported(ae_dev)) { + netdev_err(netdev, "coalesced cqe mode is not supported\n"); + return -EOPNOTSUPP; + } + + return 0; +} + +static int +hns3_check_coalesce_para(struct net_device *netdev, + struct ethtool_coalesce *cmd, + struct kernel_ethtool_coalesce *kernel_coal) { int ret; + ret = hns3_check_cqe_coalesce_param(netdev, kernel_coal); + if (ret) + return ret; + ret = hns3_check_gl_coalesce_para(netdev, cmd); if (ret) { netdev_err(netdev, @@ -1475,7 +1523,7 @@ static int hns3_set_coalesce(struct net_device *netdev, if (hns3_nic_resetting(netdev)) return -EBUSY; - ret = hns3_check_coalesce_para(netdev, cmd); + ret = hns3_check_coalesce_para(netdev, cmd, kernel_coal); if (ret) return ret; @@ -1783,9 +1831,6 @@ static int hns3_set_tx_spare_buf_size(struct net_device *netdev, struct hnae3_handle *h = priv->ae_handle; int ret; - if (hns3_nic_resetting(netdev)) - return -EBUSY; - h->kinfo.tx_spare_buf_size = data; ret = hns3_reset_notify(h, HNAE3_DOWN_CLIENT); @@ -1816,6 +1861,11 @@ static int hns3_set_tunable(struct net_device *netdev, struct hnae3_handle *h = priv->ae_handle; int i, ret = 0; + if (hns3_nic_resetting(netdev) || !priv->ring) { + netdev_err(netdev, "failed to set tunable value, dev resetting!"); + return -EBUSY; + } + switch (tuna->id) { case ETHTOOL_TX_COPYBREAK: priv->tx_copybreak = *(u32 *)data; @@ -1834,22 +1884,30 @@ static int hns3_set_tunable(struct net_device *netdev, case ETHTOOL_TX_COPYBREAK_BUF_SIZE: old_tx_spare_buf_size = h->kinfo.tx_spare_buf_size; new_tx_spare_buf_size = *(u32 *)data; + netdev_info(netdev, "request to set tx spare buf size from %u to %u\n", + old_tx_spare_buf_size, new_tx_spare_buf_size); ret = hns3_set_tx_spare_buf_size(netdev, new_tx_spare_buf_size); - if (ret) { + if (ret || + (!priv->ring->tx_spare && new_tx_spare_buf_size != 0)) { int ret1; - netdev_warn(netdev, - "change tx spare buf size fail, revert to old value\n"); + netdev_warn(netdev, "change tx spare buf size fail, revert to old value\n"); ret1 = hns3_set_tx_spare_buf_size(netdev, old_tx_spare_buf_size); if (ret1) { - netdev_err(netdev, - "revert to old tx spare buf size fail\n"); + netdev_err(netdev, "revert to old tx spare buf size fail\n"); return ret1; } return ret; } + + if (!priv->ring->tx_spare) + netdev_info(netdev, "the active tx spare buf size is 0, disable tx spare buffer\n"); + else + netdev_info(netdev, "the active tx spare buf size is %u, due to page order\n", + priv->ring->tx_spare->len); + break; default: ret = -EOPNOTSUPP; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.h b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.h index 822d6fcbc73b8f6f871d43d00779e5d95e2ab34d..da207d1d9aa93d1ce1bc95b58bf4aa9fe49d7f20 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.h @@ -28,4 +28,10 @@ struct hns3_ethtool_link_ext_state_mapping { u8 link_ext_substate; }; +struct hns3_ring_param { + u32 tx_desc_num; + u32 rx_desc_num; + u32 rx_buf_len; +}; + #endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c index 42a9e73d8588958cfffe0c85c691b4c839449105..6efd768cc07cffee1cac345276bd65a0966823fb 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c @@ -1977,7 +1977,7 @@ static int hclge_handle_mpf_ras_error(struct hclge_dev *hdev, * @num: number of extended command structures * * This function handles all the PF RAS errors in the - * hw register/s using command. + * hw registers using command. */ static int hclge_handle_pf_ras_error(struct hclge_dev *hdev, struct hclge_desc *desc, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 1e4c89d4b96b68ec89e3496f8275e314149b19d6..e2edf6e3cb2078cd06f7cb8a9136e1030292ba50 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -1546,9 +1546,8 @@ static void hclge_init_tc_config(struct hclge_dev *hdev) static int hclge_configure(struct hclge_dev *hdev) { struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); - const struct cpumask *cpumask = cpu_online_mask; struct hclge_cfg cfg; - int node, ret; + int ret; ret = hclge_get_cfg(hdev, &cfg); if (ret) @@ -1594,13 +1593,6 @@ static int hclge_configure(struct hclge_dev *hdev) hclge_init_tc_config(hdev); hclge_init_kdump_kernel_config(hdev); - /* Set the affinity based on numa node */ - node = dev_to_node(&hdev->pdev->dev); - if (node != NUMA_NO_NODE) - cpumask = cpumask_of_node(node); - - cpumask_copy(&hdev->affinity_mask, cpumask); - return ret; } @@ -1872,6 +1864,7 @@ static int hclge_alloc_vport(struct hclge_dev *hdev) vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO; vport->mps = HCLGE_MAC_DEFAULT_FRAME; vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE; + vport->port_base_vlan_cfg.tbl_sta = true; vport->rxvlan_cfg.rx_vlan_offload_en = true; vport->req_vlan_fltr_en = true; INIT_LIST_HEAD(&vport->vlan_list); @@ -3275,7 +3268,7 @@ static int hclge_tp_port_init(struct hclge_dev *hdev) static int hclge_update_port_info(struct hclge_dev *hdev) { struct hclge_mac *mac = &hdev->hw.mac; - int speed = HCLGE_MAC_SPEED_UNKNOWN; + int speed; int ret; /* get the port info from SFP cmd if not copper port */ @@ -3286,10 +3279,13 @@ static int hclge_update_port_info(struct hclge_dev *hdev) if (!hdev->support_sfp_query) return 0; - if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) + if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { + speed = mac->speed; ret = hclge_get_sfp_info(hdev, mac); - else + } else { + speed = HCLGE_MAC_SPEED_UNKNOWN; ret = hclge_get_sfp_speed(hdev, &speed); + } if (ret == -EOPNOTSUPP) { hdev->support_sfp_query = false; @@ -3301,6 +3297,8 @@ static int hclge_update_port_info(struct hclge_dev *hdev) if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { if (mac->speed_type == QUERY_ACTIVE_SPEED) { hclge_update_port_capability(hdev, mac); + if (mac->speed != speed) + (void)hclge_tm_port_shaper_cfg(hdev); return 0; } return hclge_cfg_mac_speed_dup(hdev, mac->speed, @@ -3383,6 +3381,12 @@ static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf, link_state_old = vport->vf_info.link_state; vport->vf_info.link_state = link_state; + /* return success directly if the VF is unalive, VF will + * query link state itself when it starts work. + */ + if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) + return 0; + ret = hclge_push_vf_link_status(vport); if (ret) { vport->vf_info.link_state = link_state_old; @@ -3563,17 +3567,6 @@ static void hclge_get_misc_vector(struct hclge_dev *hdev) hdev->num_msi_used += 1; } -static void hclge_misc_affinity_setup(struct hclge_dev *hdev) -{ - irq_set_affinity_hint(hdev->misc_vector.vector_irq, - &hdev->affinity_mask); -} - -static void hclge_misc_affinity_teardown(struct hclge_dev *hdev) -{ - irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL); -} - static int hclge_misc_irq_init(struct hclge_dev *hdev) { int ret; @@ -8438,12 +8431,11 @@ int hclge_rm_uc_addr_common(struct hclge_vport *vport, hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); hclge_prepare_mac_addr(&req, addr, false); ret = hclge_remove_mac_vlan_tbl(vport, &req); - if (!ret) { + if (!ret || ret == -ENOENT) { mutex_lock(&hdev->vport_lock); hclge_update_umv_space(vport, true); mutex_unlock(&hdev->vport_lock); - } else if (ret == -ENOENT) { - ret = 0; + return 0; } return ret; @@ -8993,11 +8985,16 @@ static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf, ether_addr_copy(vport->vf_info.mac, mac_addr); + /* there is a timewindow for PF to know VF unalive, it may + * cause send mailbox fail, but it doesn't matter, VF will + * query it when reinit. + */ if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) { dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %s, and it will be reinitialized!\n", vf, format_mac_addr); - return hclge_inform_reset_assert_to_vf(vport); + (void)hclge_inform_reset_assert_to_vf(vport); + return 0; } dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %s\n", @@ -9818,19 +9815,28 @@ static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id, bool writen_to_tbl) { struct hclge_vport_vlan_cfg *vlan, *tmp; + struct hclge_dev *hdev = vport->back; - list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) - if (vlan->vlan_id == vlan_id) + mutex_lock(&hdev->vport_lock); + + list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { + if (vlan->vlan_id == vlan_id) { + mutex_unlock(&hdev->vport_lock); return; + } + } vlan = kzalloc(sizeof(*vlan), GFP_KERNEL); - if (!vlan) + if (!vlan) { + mutex_unlock(&hdev->vport_lock); return; + } vlan->hd_tbl_status = writen_to_tbl; vlan->vlan_id = vlan_id; list_add_tail(&vlan->node, &vport->vlan_list); + mutex_unlock(&hdev->vport_lock); } static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport) @@ -9839,6 +9845,8 @@ static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport) struct hclge_dev *hdev = vport->back; int ret; + mutex_lock(&hdev->vport_lock); + list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { if (!vlan->hd_tbl_status) { ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q), @@ -9848,12 +9856,16 @@ static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport) dev_err(&hdev->pdev->dev, "restore vport vlan list failed, ret=%d\n", ret); + + mutex_unlock(&hdev->vport_lock); return ret; } } vlan->hd_tbl_status = true; } + mutex_unlock(&hdev->vport_lock); + return 0; } @@ -9863,6 +9875,8 @@ static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id, struct hclge_vport_vlan_cfg *vlan, *tmp; struct hclge_dev *hdev = vport->back; + mutex_lock(&hdev->vport_lock); + list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { if (vlan->vlan_id == vlan_id) { if (is_write_tbl && vlan->hd_tbl_status) @@ -9877,6 +9891,8 @@ static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id, break; } } + + mutex_unlock(&hdev->vport_lock); } void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list) @@ -9884,6 +9900,8 @@ void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list) struct hclge_vport_vlan_cfg *vlan, *tmp; struct hclge_dev *hdev = vport->back; + mutex_lock(&hdev->vport_lock); + list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { if (vlan->hd_tbl_status) hclge_set_vlan_filter_hw(hdev, @@ -9899,6 +9917,7 @@ void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list) } } clear_bit(vport->vport_id, hdev->vf_vlan_full); + mutex_unlock(&hdev->vport_lock); } void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev) @@ -9907,6 +9926,8 @@ void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev) struct hclge_vport *vport; int i; + mutex_lock(&hdev->vport_lock); + for (i = 0; i < hdev->num_alloc_vport; i++) { vport = &hdev->vport[i]; list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { @@ -9914,37 +9935,61 @@ void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev) kfree(vlan); } } + + mutex_unlock(&hdev->vport_lock); } -void hclge_restore_vport_vlan_table(struct hclge_vport *vport) +void hclge_restore_vport_port_base_vlan_config(struct hclge_dev *hdev) { - struct hclge_vport_vlan_cfg *vlan, *tmp; - struct hclge_dev *hdev = vport->back; + struct hclge_vlan_info *vlan_info; + struct hclge_vport *vport; u16 vlan_proto; u16 vlan_id; u16 state; + int vf_id; int ret; - vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto; - vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag; - state = vport->port_base_vlan_cfg.state; + /* PF should restore all vfs port base vlan */ + for (vf_id = 0; vf_id < hdev->num_alloc_vfs; vf_id++) { + vport = &hdev->vport[vf_id + HCLGE_VF_VPORT_START_NUM]; + vlan_info = vport->port_base_vlan_cfg.tbl_sta ? + &vport->port_base_vlan_cfg.vlan_info : + &vport->port_base_vlan_cfg.old_vlan_info; - if (state != HNAE3_PORT_BASE_VLAN_DISABLE) { - clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]); - hclge_set_vlan_filter_hw(hdev, htons(vlan_proto), - vport->vport_id, vlan_id, - false); - return; + vlan_id = vlan_info->vlan_tag; + vlan_proto = vlan_info->vlan_proto; + state = vport->port_base_vlan_cfg.state; + + if (state != HNAE3_PORT_BASE_VLAN_DISABLE) { + clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]); + ret = hclge_set_vlan_filter_hw(hdev, htons(vlan_proto), + vport->vport_id, + vlan_id, false); + vport->port_base_vlan_cfg.tbl_sta = ret == 0; + } } +} - list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { - ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q), - vport->vport_id, - vlan->vlan_id, false); - if (ret) - break; - vlan->hd_tbl_status = true; +void hclge_restore_vport_vlan_table(struct hclge_vport *vport) +{ + struct hclge_vport_vlan_cfg *vlan, *tmp; + struct hclge_dev *hdev = vport->back; + int ret; + + mutex_lock(&hdev->vport_lock); + + if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) { + list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { + ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q), + vport->vport_id, + vlan->vlan_id, false); + if (ret) + break; + vlan->hd_tbl_status = true; + } } + + mutex_unlock(&hdev->vport_lock); } /* For global reset and imp reset, hardware will clear the mac table, @@ -9984,6 +10029,7 @@ static void hclge_restore_hw_table(struct hclge_dev *hdev) struct hnae3_handle *handle = &vport->nic; hclge_restore_mac_table_common(vport); + hclge_restore_vport_port_base_vlan_config(hdev); hclge_restore_vport_vlan_table(vport); set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state); hclge_restore_fd_entries(handle); @@ -10040,6 +10086,8 @@ static int hclge_update_vlan_filter_entries(struct hclge_vport *vport, false); } + vport->port_base_vlan_cfg.tbl_sta = false; + /* force add VLAN 0 */ ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, false, 0); if (ret) @@ -10080,6 +10128,7 @@ static int hclge_modify_port_base_vlan_tag(struct hclge_vport *vport, if (ret) return ret; + vport->port_base_vlan_cfg.tbl_sta = false; /* remove old VLAN tag */ if (old_info->vlan_tag == 0) ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, @@ -10129,7 +10178,9 @@ int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state, else nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE; + vport->port_base_vlan_cfg.old_vlan_info = *old_vlan_info; vport->port_base_vlan_cfg.vlan_info = *vlan_info; + vport->port_base_vlan_cfg.tbl_sta = true; hclge_set_vport_vlan_fltr_change(vport); return 0; @@ -10197,14 +10248,17 @@ static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid, return ret; } - /* for DEVICE_VERSION_V3, vf doesn't need to know about the port based + /* there is a timewindow for PF to know VF unalive, it may + * cause send mailbox fail, but it doesn't matter, VF will + * query it when reinit. + * for DEVICE_VERSION_V3, vf doesn't need to know about the port based * VLAN state. */ if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3 && test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) - hclge_push_vf_port_base_vlan_info(&hdev->vport[0], - vport->vport_id, state, - &vlan_info); + (void)hclge_push_vf_port_base_vlan_info(&hdev->vport[0], + vport->vport_id, + state, &vlan_info); return 0; } @@ -10262,11 +10316,11 @@ int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto, } if (!ret) { - if (is_kill) - hclge_rm_vport_vlan_table(vport, vlan_id, false); - else + if (!is_kill) hclge_add_vport_vlan_table(vport, vlan_id, writen_to_tbl); + else if (is_kill && vlan_id != 0) + hclge_rm_vport_vlan_table(vport, vlan_id, false); } else if (is_kill) { /* when remove hw vlan filter failed, record the vlan id, * and try to remove it from hw later, to be consistence @@ -10388,6 +10442,9 @@ int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu) /* PF's mps must be greater then VF's mps */ for (i = 1; i < hdev->num_alloc_vport; i++) if (max_frm_size < hdev->vport[i].mps) { + dev_err(&hdev->pdev->dev, + "failed to set pf mtu for less than vport %d, mps = %u.\n", + i, hdev->vport[i].mps); mutex_unlock(&hdev->vport_lock); return -EINVAL; } @@ -11393,11 +11450,6 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) timer_setup(&hdev->reset_timer, hclge_reset_timer, 0); INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task); - /* Setup affinity after service timer setup because add_timer_on - * is called in affinity notify. - */ - hclge_misc_affinity_setup(hdev); - hclge_clear_all_event_cause(hdev); hclge_clear_resetting_state(hdev); @@ -11815,7 +11867,6 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) hclge_reset_vf_rate(hdev); hclge_clear_vf_vlan(hdev); - hclge_misc_affinity_teardown(hdev); hclge_state_uninit(hdev); hclge_ptp_uninit(hdev); hclge_uninit_rxd_adv_layout(hdev); @@ -11838,8 +11889,8 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) hclge_misc_irq_uninit(hdev); hclge_devlink_uninit(hdev); hclge_pci_uninit(hdev); - mutex_destroy(&hdev->vport_lock); hclge_uninit_vport_vlan_table(hdev); + mutex_destroy(&hdev->vport_lock); ae_dev->priv = NULL; } @@ -12663,6 +12714,55 @@ static int hclge_get_link_diagnosis_info(struct hnae3_handle *handle, return 0; } +/* After disable sriov, VF still has some config and info need clean, + * which configed by PF. + */ +static void hclge_clear_vport_vf_info(struct hclge_vport *vport, int vfid) +{ + struct hclge_dev *hdev = vport->back; + struct hclge_vlan_info vlan_info; + int ret; + + /* after disable sriov, clean VF rate configured by PF */ + ret = hclge_tm_qs_shaper_cfg(vport, 0); + if (ret) + dev_err(&hdev->pdev->dev, + "failed to clean vf%d rate config, ret = %d\n", + vfid, ret); + + vlan_info.vlan_tag = 0; + vlan_info.qos = 0; + vlan_info.vlan_proto = ETH_P_8021Q; + ret = hclge_update_port_base_vlan_cfg(vport, + HNAE3_PORT_BASE_VLAN_DISABLE, + &vlan_info); + if (ret) + dev_err(&hdev->pdev->dev, + "failed to clean vf%d port base vlan, ret = %d\n", + vfid, ret); + + ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, false); + if (ret) + dev_err(&hdev->pdev->dev, + "failed to clean vf%d spoof config, ret = %d\n", + vfid, ret); + + memset(&vport->vf_info, 0, sizeof(vport->vf_info)); +} + +static void hclge_clean_vport_config(struct hnae3_ae_dev *ae_dev, int num_vfs) +{ + struct hclge_dev *hdev = ae_dev->priv; + struct hclge_vport *vport; + int i; + + for (i = 0; i < num_vfs; i++) { + vport = &hdev->vport[i + HCLGE_VF_VPORT_START_NUM]; + + hclge_clear_vport_vf_info(vport, i); + } +} + static const struct hnae3_ae_ops hclge_ops = { .init_ae_dev = hclge_init_ae_dev, .uninit_ae_dev = hclge_uninit_ae_dev, @@ -12764,6 +12864,7 @@ static const struct hnae3_ae_ops hclge_ops = { .get_rx_hwts = hclge_ptp_get_rx_hwts, .get_ts_info = hclge_ptp_get_ts_info, .get_link_diagnosis_info = hclge_get_link_diagnosis_info, + .clean_vf_config = hclge_clean_vport_config, }; static struct hnae3_ae_algo ae_algo = { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index f7f5a4b0906890bf9b24d6d86fe8c67c4d778b10..d2158116398d367fb677f327b356b0e451e94127 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -780,8 +780,8 @@ struct hclge_vf_vlan_cfg { union { struct { u8 is_kill; - u16 vlan; - u16 proto; + __le16 vlan; + __le16 proto; }; u8 enable; }; @@ -938,8 +938,6 @@ struct hclge_dev { DECLARE_KFIFO(mac_tnl_log, struct hclge_mac_tnl_stats, HCLGE_MAC_TNL_LOG_SIZE); - /* affinity mask and notify for misc interrupt */ - cpumask_t affinity_mask; struct hclge_ptp *ptp; struct devlink *devlink; struct hclge_comm_rss_cfg rss_cfg; @@ -985,7 +983,9 @@ struct hclge_vlan_info { struct hclge_port_base_vlan_config { u16 state; + bool tbl_sta; struct hclge_vlan_info vlan_info; + struct hclge_vlan_info old_vlan_info; }; struct hclge_vf_info { @@ -1031,6 +1031,7 @@ struct hclge_vport { spinlock_t mac_list_lock; /* protect mac address need to add/detele */ struct list_head uc_mac_list; /* Store VF unicast table */ struct list_head mc_mac_list; /* Store VF multicast table */ + struct list_head vlan_list; /* Store VF vlan table */ }; @@ -1105,6 +1106,7 @@ void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list, void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list); void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev); void hclge_restore_mac_table_common(struct hclge_vport *vport); +void hclge_restore_vport_port_base_vlan_config(struct hclge_dev *hdev); void hclge_restore_vport_vlan_table(struct hclge_vport *vport); int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state, struct hclge_vlan_info *vlan_info); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c index 6799d16de34b9490f60fe67c94a2619e329e0ca4..e1012f7f9b7349029383cbc13709f34cffd81e37 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c @@ -57,17 +57,19 @@ static int hclge_gen_resp_to_vf(struct hclge_vport *vport, resp_pf_to_vf->msg_len = vf_to_pf_req->msg_len; resp_pf_to_vf->match_id = vf_to_pf_req->match_id; - resp_pf_to_vf->msg.code = HCLGE_MBX_PF_VF_RESP; - resp_pf_to_vf->msg.vf_mbx_msg_code = vf_to_pf_req->msg.code; - resp_pf_to_vf->msg.vf_mbx_msg_subcode = vf_to_pf_req->msg.subcode; + resp_pf_to_vf->msg.code = cpu_to_le16(HCLGE_MBX_PF_VF_RESP); + resp_pf_to_vf->msg.vf_mbx_msg_code = + cpu_to_le16(vf_to_pf_req->msg.code); + resp_pf_to_vf->msg.vf_mbx_msg_subcode = + cpu_to_le16(vf_to_pf_req->msg.subcode); resp = hclge_errno_to_resp(resp_msg->status); if (resp < SHRT_MAX) { - resp_pf_to_vf->msg.resp_status = resp; + resp_pf_to_vf->msg.resp_status = cpu_to_le16(resp); } else { dev_warn(&hdev->pdev->dev, "failed to send response to VF, response status %u is out-of-bound\n", resp); - resp_pf_to_vf->msg.resp_status = EIO; + resp_pf_to_vf->msg.resp_status = cpu_to_le16(EIO); } if (resp_msg->len > 0) @@ -94,15 +96,22 @@ static int hclge_send_mbx_msg(struct hclge_vport *vport, u8 *msg, u16 msg_len, enum hclge_comm_cmd_status status; struct hclge_desc desc; + if (msg_len > HCLGE_MBX_MAX_MSG_SIZE) { + dev_err(&hdev->pdev->dev, + "msg data length(=%u) exceeds maximum(=%u)\n", + msg_len, HCLGE_MBX_MAX_MSG_SIZE); + return -EMSGSIZE; + } + resp_pf_to_vf = (struct hclge_mbx_pf_to_vf_cmd *)desc.data; hclge_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_PF_TO_VF, false); resp_pf_to_vf->dest_vfid = dest_vfid; resp_pf_to_vf->msg_len = msg_len; - resp_pf_to_vf->msg.code = mbx_opcode; + resp_pf_to_vf->msg.code = cpu_to_le16(mbx_opcode); - memcpy(&resp_pf_to_vf->msg.vf_mbx_msg_code, msg, msg_len); + memcpy(resp_pf_to_vf->msg.msg_data, msg, msg_len); trace_hclge_pf_mbx_send(hdev, resp_pf_to_vf); @@ -118,8 +127,8 @@ static int hclge_send_mbx_msg(struct hclge_vport *vport, u8 *msg, u16 msg_len, int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport) { struct hclge_dev *hdev = vport->back; + __le16 msg_data; u16 reset_type; - u8 msg_data[2]; u8 dest_vfid; BUILD_BUG_ON(HNAE3_MAX_RESET > U16_MAX); @@ -133,10 +142,10 @@ int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport) else reset_type = HNAE3_VF_FUNC_RESET; - memcpy(&msg_data[0], &reset_type, sizeof(u16)); + msg_data = cpu_to_le16(reset_type); /* send this requested info to VF */ - return hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data), + return hclge_send_mbx_msg(vport, (u8 *)&msg_data, sizeof(msg_data), HCLGE_MBX_ASSERTING_RESET, dest_vfid); } @@ -176,7 +185,7 @@ static int hclge_get_ring_chain_from_mbx( ring_num = req->msg.ring_num; if (ring_num > HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM) - return -ENOMEM; + return -EINVAL; for (i = 0; i < ring_num; i++) { if (req->msg.param[i].tqp_index >= vport->nic.kinfo.rss_size) { @@ -242,6 +251,81 @@ static int hclge_map_unmap_ring_to_vf_vector(struct hclge_vport *vport, bool en, return ret; } +static int hclge_query_ring_vector_map(struct hclge_vport *vport, + struct hnae3_ring_chain_node *ring_chain, + struct hclge_desc *desc) +{ + struct hclge_ctrl_vector_chain_cmd *req = + (struct hclge_ctrl_vector_chain_cmd *)desc->data; + struct hclge_dev *hdev = vport->back; + u16 tqp_type_and_id; + int status; + + hclge_cmd_setup_basic_desc(desc, HCLGE_OPC_ADD_RING_TO_VECTOR, true); + + tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[0]); + hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M, HCLGE_INT_TYPE_S, + hnae3_get_bit(ring_chain->flag, HNAE3_RING_TYPE_B)); + hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M, HCLGE_TQP_ID_S, + ring_chain->tqp_index); + req->tqp_type_and_id[0] = cpu_to_le16(tqp_type_and_id); + req->vfid = vport->vport_id; + + status = hclge_cmd_send(&hdev->hw, desc, 1); + if (status) + dev_err(&hdev->pdev->dev, + "Get VF ring vector map info fail, status is %d.\n", + status); + + return status; +} + +static int hclge_get_vf_ring_vector_map(struct hclge_vport *vport, + struct hclge_mbx_vf_to_pf_cmd *req, + struct hclge_respond_to_vf_msg *resp) +{ +#define HCLGE_LIMIT_RING_NUM 1 +#define HCLGE_RING_TYPE_OFFSET 0 +#define HCLGE_TQP_INDEX_OFFSET 1 +#define HCLGE_INT_GL_INDEX_OFFSET 2 +#define HCLGE_VECTOR_ID_OFFSET 3 +#define HCLGE_RING_VECTOR_MAP_INFO_LEN 4 + struct hnae3_ring_chain_node ring_chain; + struct hclge_desc desc; + struct hclge_ctrl_vector_chain_cmd *data = + (struct hclge_ctrl_vector_chain_cmd *)desc.data; + u16 tqp_type_and_id; + u8 int_gl_index; + int ret; + + req->msg.ring_num = HCLGE_LIMIT_RING_NUM; + + memset(&ring_chain, 0, sizeof(ring_chain)); + ret = hclge_get_ring_chain_from_mbx(req, &ring_chain, vport); + if (ret) + return ret; + + ret = hclge_query_ring_vector_map(vport, &ring_chain, &desc); + if (ret) { + hclge_free_vector_ring_chain(&ring_chain); + return ret; + } + + tqp_type_and_id = le16_to_cpu(data->tqp_type_and_id[0]); + int_gl_index = hnae3_get_field(tqp_type_and_id, + HCLGE_INT_GL_IDX_M, HCLGE_INT_GL_IDX_S); + + resp->data[HCLGE_RING_TYPE_OFFSET] = req->msg.param[0].ring_type; + resp->data[HCLGE_TQP_INDEX_OFFSET] = req->msg.param[0].tqp_index; + resp->data[HCLGE_INT_GL_INDEX_OFFSET] = int_gl_index; + resp->data[HCLGE_VECTOR_ID_OFFSET] = data->int_vector_id_l; + resp->len = HCLGE_RING_VECTOR_MAP_INFO_LEN; + + hclge_free_vector_ring_chain(&ring_chain); + + return ret; +} + static void hclge_set_vf_promisc_mode(struct hclge_vport *vport, struct hclge_mbx_vf_to_pf_cmd *req) { @@ -332,16 +416,14 @@ int hclge_push_vf_port_base_vlan_info(struct hclge_vport *vport, u8 vfid, u16 state, struct hclge_vlan_info *vlan_info) { -#define MSG_DATA_SIZE 8 + struct hclge_mbx_port_base_vlan base_vlan; - u8 msg_data[MSG_DATA_SIZE]; + base_vlan.state = cpu_to_le16(state); + base_vlan.vlan_proto = cpu_to_le16(vlan_info->vlan_proto); + base_vlan.qos = cpu_to_le16(vlan_info->qos); + base_vlan.vlan_tag = cpu_to_le16(vlan_info->vlan_tag); - memcpy(&msg_data[0], &state, sizeof(u16)); - memcpy(&msg_data[2], &vlan_info->vlan_proto, sizeof(u16)); - memcpy(&msg_data[4], &vlan_info->qos, sizeof(u16)); - memcpy(&msg_data[6], &vlan_info->vlan_tag, sizeof(u16)); - - return hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data), + return hclge_send_mbx_msg(vport, (u8 *)&base_vlan, sizeof(base_vlan), HCLGE_MBX_PUSH_VLAN_INFO, vfid); } @@ -355,13 +437,16 @@ static int hclge_set_vf_vlan_cfg(struct hclge_vport *vport, struct hnae3_handle *handle = &vport->nic; struct hclge_dev *hdev = vport->back; struct hclge_vf_vlan_cfg *msg_cmd; + __be16 proto; + u16 vlan_id; msg_cmd = (struct hclge_vf_vlan_cfg *)&mbx_req->msg; switch (msg_cmd->subcode) { case HCLGE_MBX_VLAN_FILTER: - return hclge_set_vlan_filter(handle, - cpu_to_be16(msg_cmd->proto), - msg_cmd->vlan, msg_cmd->is_kill); + proto = cpu_to_be16(le16_to_cpu(msg_cmd->proto)); + vlan_id = le16_to_cpu(msg_cmd->vlan); + return hclge_set_vlan_filter(handle, proto, vlan_id, + msg_cmd->is_kill); case HCLGE_MBX_VLAN_RX_OFF_CFG: return hclge_en_hw_strip_rxvtag(handle, msg_cmd->enable); case HCLGE_MBX_GET_PORT_BASE_VLAN_STATE: @@ -404,15 +489,17 @@ static void hclge_get_basic_info(struct hclge_vport *vport, struct hnae3_ae_dev *ae_dev = vport->back->ae_dev; struct hclge_basic_info *basic_info; unsigned int i; + u32 pf_caps; basic_info = (struct hclge_basic_info *)resp_msg->data; for (i = 0; i < kinfo->tc_info.num_tc; i++) basic_info->hw_tc_map |= BIT(i); + pf_caps = le32_to_cpu(basic_info->pf_caps); if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps)) - hnae3_set_bit(basic_info->pf_caps, - HNAE3_PF_SUPPORT_VLAN_FLTR_MDF_B, 1); + hnae3_set_bit(pf_caps, HNAE3_PF_SUPPORT_VLAN_FLTR_MDF_B, 1); + basic_info->pf_caps = cpu_to_le32(pf_caps); resp_msg->len = HCLGE_MBX_MAX_RESP_DATA_SIZE; } @@ -420,19 +507,15 @@ static void hclge_get_vf_queue_info(struct hclge_vport *vport, struct hclge_respond_to_vf_msg *resp_msg) { #define HCLGE_TQPS_RSS_INFO_LEN 6 -#define HCLGE_TQPS_ALLOC_OFFSET 0 -#define HCLGE_TQPS_RSS_SIZE_OFFSET 2 -#define HCLGE_TQPS_RX_BUFFER_LEN_OFFSET 4 + struct hclge_mbx_vf_queue_info *queue_info; struct hclge_dev *hdev = vport->back; /* get the queue related info */ - memcpy(&resp_msg->data[HCLGE_TQPS_ALLOC_OFFSET], - &vport->alloc_tqps, sizeof(u16)); - memcpy(&resp_msg->data[HCLGE_TQPS_RSS_SIZE_OFFSET], - &vport->nic.kinfo.rss_size, sizeof(u16)); - memcpy(&resp_msg->data[HCLGE_TQPS_RX_BUFFER_LEN_OFFSET], - &hdev->rx_buf_len, sizeof(u16)); + queue_info = (struct hclge_mbx_vf_queue_info *)resp_msg->data; + queue_info->num_tqps = cpu_to_le16(vport->alloc_tqps); + queue_info->rss_size = cpu_to_le16(vport->nic.kinfo.rss_size); + queue_info->rx_buf_len = cpu_to_le16(hdev->rx_buf_len); resp_msg->len = HCLGE_TQPS_RSS_INFO_LEN; } @@ -447,16 +530,15 @@ static void hclge_get_vf_queue_depth(struct hclge_vport *vport, struct hclge_respond_to_vf_msg *resp_msg) { #define HCLGE_TQPS_DEPTH_INFO_LEN 4 -#define HCLGE_TQPS_NUM_TX_DESC_OFFSET 0 -#define HCLGE_TQPS_NUM_RX_DESC_OFFSET 2 + struct hclge_mbx_vf_queue_depth *queue_depth; struct hclge_dev *hdev = vport->back; /* get the queue depth info */ - memcpy(&resp_msg->data[HCLGE_TQPS_NUM_TX_DESC_OFFSET], - &hdev->num_tx_desc, sizeof(u16)); - memcpy(&resp_msg->data[HCLGE_TQPS_NUM_RX_DESC_OFFSET], - &hdev->num_rx_desc, sizeof(u16)); + queue_depth = (struct hclge_mbx_vf_queue_depth *)resp_msg->data; + queue_depth->num_tx_desc = cpu_to_le16(hdev->num_tx_desc); + queue_depth->num_rx_desc = cpu_to_le16(hdev->num_rx_desc); + resp_msg->len = HCLGE_TQPS_DEPTH_INFO_LEN; } @@ -481,10 +563,9 @@ int hclge_push_vf_link_status(struct hclge_vport *vport) #define HCLGE_VF_LINK_STATE_UP 1U #define HCLGE_VF_LINK_STATE_DOWN 0U + struct hclge_mbx_link_status link_info; struct hclge_dev *hdev = vport->back; u16 link_status; - u8 msg_data[9]; - u16 duplex; /* mac.link can only be 0 or 1 */ switch (vport->vf_info.link_state) { @@ -500,14 +581,13 @@ int hclge_push_vf_link_status(struct hclge_vport *vport) break; } - duplex = hdev->hw.mac.duplex; - memcpy(&msg_data[0], &link_status, sizeof(u16)); - memcpy(&msg_data[2], &hdev->hw.mac.speed, sizeof(u32)); - memcpy(&msg_data[6], &duplex, sizeof(u16)); - msg_data[8] = HCLGE_MBX_PUSH_LINK_STATUS_EN; + link_info.link_status = cpu_to_le16(link_status); + link_info.speed = cpu_to_le32(hdev->hw.mac.speed); + link_info.duplex = cpu_to_le16(hdev->hw.mac.duplex); + link_info.flag = HCLGE_MBX_PUSH_LINK_STATUS_EN; /* send this requested info to VF */ - return hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data), + return hclge_send_mbx_msg(vport, (u8 *)&link_info, sizeof(link_info), HCLGE_MBX_LINK_STAT_CHANGE, vport->vport_id); } @@ -515,22 +595,22 @@ static void hclge_get_link_mode(struct hclge_vport *vport, struct hclge_mbx_vf_to_pf_cmd *mbx_req) { #define HCLGE_SUPPORTED 1 + struct hclge_mbx_link_mode link_mode; struct hclge_dev *hdev = vport->back; unsigned long advertising; unsigned long supported; unsigned long send_data; - u8 msg_data[10] = {}; u8 dest_vfid; advertising = hdev->hw.mac.advertising[0]; supported = hdev->hw.mac.supported[0]; dest_vfid = mbx_req->mbx_src_vfid; - msg_data[0] = mbx_req->msg.data[0]; - - send_data = msg_data[0] == HCLGE_SUPPORTED ? supported : advertising; + send_data = mbx_req->msg.data[0] == HCLGE_SUPPORTED ? supported : + advertising; + link_mode.idx = cpu_to_le16((u16)mbx_req->msg.data[0]); + link_mode.link_mode = cpu_to_le64(send_data); - memcpy(&msg_data[2], &send_data, sizeof(unsigned long)); - hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data), + hclge_send_mbx_msg(vport, (u8 *)&link_mode, sizeof(link_mode), HCLGE_MBX_LINK_STAT_MODE, dest_vfid); } @@ -544,7 +624,7 @@ static int hclge_mbx_reset_vf_queue(struct hclge_vport *vport, u16 queue_id; int ret; - memcpy(&queue_id, mbx_req->msg.data, sizeof(queue_id)); + queue_id = le16_to_cpu(*(__le16 *)mbx_req->msg.data); resp_msg->data[0] = HCLGE_RESET_ALL_QUEUE_DONE; resp_msg->len = sizeof(u8); @@ -580,36 +660,39 @@ static void hclge_vf_keep_alive(struct hclge_vport *vport) static int hclge_set_vf_mtu(struct hclge_vport *vport, struct hclge_mbx_vf_to_pf_cmd *mbx_req) { + struct hclge_mbx_mtu_info *mtu_info; u32 mtu; - memcpy(&mtu, mbx_req->msg.data, sizeof(mtu)); + mtu_info = (struct hclge_mbx_mtu_info *)mbx_req->msg.data; + mtu = le32_to_cpu(mtu_info->mtu); return hclge_set_vport_mtu(vport, mtu); } -static void hclge_get_queue_id_in_pf(struct hclge_vport *vport, - struct hclge_mbx_vf_to_pf_cmd *mbx_req, - struct hclge_respond_to_vf_msg *resp_msg) +static int hclge_get_queue_id_in_pf(struct hclge_vport *vport, + struct hclge_mbx_vf_to_pf_cmd *mbx_req, + struct hclge_respond_to_vf_msg *resp_msg) { struct hnae3_handle *handle = &vport->nic; struct hclge_dev *hdev = vport->back; u16 queue_id, qid_in_pf; - memcpy(&queue_id, mbx_req->msg.data, sizeof(queue_id)); + queue_id = le16_to_cpu(*(__le16 *)mbx_req->msg.data); if (queue_id >= handle->kinfo.num_tqps) { dev_err(&hdev->pdev->dev, "Invalid queue id(%u) from VF %u\n", queue_id, mbx_req->mbx_src_vfid); - return; + return -EINVAL; } qid_in_pf = hclge_covert_handle_qid_global(&vport->nic, queue_id); - memcpy(resp_msg->data, &qid_in_pf, sizeof(qid_in_pf)); + *(__le16 *)resp_msg->data = cpu_to_le16(qid_in_pf); resp_msg->len = sizeof(qid_in_pf); + return 0; } -static void hclge_get_rss_key(struct hclge_vport *vport, - struct hclge_mbx_vf_to_pf_cmd *mbx_req, - struct hclge_respond_to_vf_msg *resp_msg) +static int hclge_get_rss_key(struct hclge_vport *vport, + struct hclge_mbx_vf_to_pf_cmd *mbx_req, + struct hclge_respond_to_vf_msg *resp_msg) { #define HCLGE_RSS_MBX_RESP_LEN 8 struct hclge_dev *hdev = vport->back; @@ -627,13 +710,14 @@ static void hclge_get_rss_key(struct hclge_vport *vport, dev_warn(&hdev->pdev->dev, "failed to get the rss hash key, the index(%u) invalid !\n", index); - return; + return -EINVAL; } memcpy(resp_msg->data, &rss_cfg->rss_hash_key[index * HCLGE_RSS_MBX_RESP_LEN], HCLGE_RSS_MBX_RESP_LEN); resp_msg->len = HCLGE_RSS_MBX_RESP_LEN; + return 0; } static void hclge_link_fail_parse(struct hclge_dev *hdev, u8 link_fail_code) @@ -746,6 +830,14 @@ void hclge_mbx_handler(struct hclge_dev *hdev) ret = hclge_map_unmap_ring_to_vf_vector(vport, false, req); break; + case HCLGE_MBX_GET_RING_VECTOR_MAP: + ret = hclge_get_vf_ring_vector_map(vport, req, + &resp_msg); + if (ret) + dev_err(&hdev->pdev->dev, + "PF fail(%d) to get VF ring vector map\n", + ret); + break; case HCLGE_MBX_SET_PROMISC_MODE: hclge_set_vf_promisc_mode(vport, req); break; @@ -809,10 +901,10 @@ void hclge_mbx_handler(struct hclge_dev *hdev) "VF fail(%d) to set mtu\n", ret); break; case HCLGE_MBX_GET_QID_IN_PF: - hclge_get_queue_id_in_pf(vport, req, &resp_msg); + ret = hclge_get_queue_id_in_pf(vport, req, &resp_msg); break; case HCLGE_MBX_GET_RSS_KEY: - hclge_get_rss_key(vport, req, &resp_msg); + ret = hclge_get_rss_key(vport, req, &resp_msg); break; case HCLGE_MBX_GET_LINK_MODE: hclge_get_link_mode(vport, req); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c index 63d2be4349e3e704712a557aef257894c7be7a24..03d63b6a9b2bc025d1f180d6d632da42efa12734 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c @@ -48,7 +48,7 @@ static int hclge_mdio_write(struct mii_bus *bus, int phyid, int regnum, int ret; if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state)) - return 0; + return -EBUSY; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MDIO_CONFIG, false); @@ -86,7 +86,7 @@ static int hclge_mdio_read(struct mii_bus *bus, int phyid, int regnum) int ret; if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state)) - return 0; + return -EBUSY; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MDIO_CONFIG, true); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c index 089f4444b7e3e3484bb30a883de139926968e367..084e190602d6890077ece10fcf41fd2356f81e7f 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c @@ -282,8 +282,8 @@ static int hclge_tm_pg_to_pri_map_cfg(struct hclge_dev *hdev, return hclge_cmd_send(&hdev->hw, &desc, 1); } -static int hclge_tm_qs_to_pri_map_cfg(struct hclge_dev *hdev, - u16 qs_id, u8 pri) +static int hclge_tm_qs_to_pri_map_cfg(struct hclge_dev *hdev, u16 qs_id, u8 pri, + bool link_vld) { struct hclge_qs_to_pri_link_cmd *map; struct hclge_desc desc; @@ -294,7 +294,7 @@ static int hclge_tm_qs_to_pri_map_cfg(struct hclge_dev *hdev, map->qs_id = cpu_to_le16(qs_id); map->priority = pri; - map->link_vld = HCLGE_TM_QS_PRI_LINK_VLD_MSK; + map->link_vld = link_vld ? HCLGE_TM_QS_PRI_LINK_VLD_MSK : 0; return hclge_cmd_send(&hdev->hw, &desc, 1); } @@ -420,7 +420,7 @@ static int hclge_tm_pg_shapping_cfg(struct hclge_dev *hdev, return hclge_cmd_send(&hdev->hw, &desc, 1); } -static int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev) +int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev) { struct hclge_port_shapping_cmd *shap_cfg_cmd; struct hclge_shaper_ir_para ir_para; @@ -642,11 +642,13 @@ static void hclge_tm_update_kinfo_rss_size(struct hclge_vport *vport) * one tc for VF for simplicity. VF's vport_id is non zero. */ if (vport->vport_id) { + kinfo->tc_info.max_tc = 1; kinfo->tc_info.num_tc = 1; vport->qs_offset = HNAE3_MAX_TC + vport->vport_id - HCLGE_VF_VPORT_START_NUM; vport_max_rss_size = hdev->vf_rss_size_max; } else { + kinfo->tc_info.max_tc = hdev->tc_max; kinfo->tc_info.num_tc = min_t(u16, vport->alloc_tqps, hdev->tm_info.num_tc); vport->qs_offset = 0; @@ -679,7 +681,9 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport) kinfo->num_tqps = hclge_vport_get_tqp_num(vport); vport->dwrr = 100; /* 100 percent as init */ vport->bw_limit = hdev->tm_info.pg_info[0].bw_limit; - hdev->rss_cfg.rss_size = kinfo->rss_size; + + if (vport->vport_id == PF_VPORT_ID) + hdev->rss_cfg.rss_size = kinfo->rss_size; /* when enable mqprio, the tc_info has been updated. */ if (kinfo->tc_info.mqprio_active) @@ -714,14 +718,22 @@ static void hclge_tm_vport_info_update(struct hclge_dev *hdev) static void hclge_tm_tc_info_init(struct hclge_dev *hdev) { - u8 i; + u8 i, tc_sch_mode; + u32 bw_limit; + + for (i = 0; i < hdev->tc_max; i++) { + if (i < hdev->tm_info.num_tc) { + tc_sch_mode = HCLGE_SCH_MODE_DWRR; + bw_limit = hdev->tm_info.pg_info[0].bw_limit; + } else { + tc_sch_mode = HCLGE_SCH_MODE_SP; + bw_limit = 0; + } - for (i = 0; i < hdev->tm_info.num_tc; i++) { hdev->tm_info.tc_info[i].tc_id = i; - hdev->tm_info.tc_info[i].tc_sch_mode = HCLGE_SCH_MODE_DWRR; + hdev->tm_info.tc_info[i].tc_sch_mode = tc_sch_mode; hdev->tm_info.tc_info[i].pgid = 0; - hdev->tm_info.tc_info[i].bw_limit = - hdev->tm_info.pg_info[0].bw_limit; + hdev->tm_info.tc_info[i].bw_limit = bw_limit; } for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) @@ -926,10 +938,13 @@ static int hclge_tm_pri_q_qs_cfg_tc_base(struct hclge_dev *hdev) for (k = 0; k < hdev->num_alloc_vport; k++) { struct hnae3_knic_private_info *kinfo = &vport[k].nic.kinfo; - for (i = 0; i < kinfo->tc_info.num_tc; i++) { + for (i = 0; i < kinfo->tc_info.max_tc; i++) { + u8 pri = i < kinfo->tc_info.num_tc ? i : 0; + bool link_vld = i < kinfo->tc_info.num_tc; + ret = hclge_tm_qs_to_pri_map_cfg(hdev, vport[k].qs_offset + i, - i); + pri, link_vld); if (ret) return ret; } @@ -949,7 +964,7 @@ static int hclge_tm_pri_q_qs_cfg_vnet_base(struct hclge_dev *hdev) for (i = 0; i < HNAE3_MAX_TC; i++) { ret = hclge_tm_qs_to_pri_map_cfg(hdev, vport[k].qs_offset + i, - k); + k, true); if (ret) return ret; } @@ -989,33 +1004,39 @@ static int hclge_tm_pri_tc_base_shaper_cfg(struct hclge_dev *hdev) { u32 max_tm_rate = hdev->ae_dev->dev_specs.max_tm_rate; struct hclge_shaper_ir_para ir_para; - u32 shaper_para; + u32 shaper_para_c, shaper_para_p; int ret; u32 i; - for (i = 0; i < hdev->tm_info.num_tc; i++) { + for (i = 0; i < hdev->tc_max; i++) { u32 rate = hdev->tm_info.tc_info[i].bw_limit; - ret = hclge_shaper_para_calc(rate, HCLGE_SHAPER_LVL_PRI, - &ir_para, max_tm_rate); - if (ret) - return ret; + if (rate) { + ret = hclge_shaper_para_calc(rate, HCLGE_SHAPER_LVL_PRI, + &ir_para, max_tm_rate); + if (ret) + return ret; + + shaper_para_c = hclge_tm_get_shapping_para(0, 0, 0, + HCLGE_SHAPER_BS_U_DEF, + HCLGE_SHAPER_BS_S_DEF); + shaper_para_p = hclge_tm_get_shapping_para(ir_para.ir_b, + ir_para.ir_u, + ir_para.ir_s, + HCLGE_SHAPER_BS_U_DEF, + HCLGE_SHAPER_BS_S_DEF); + } else { + shaper_para_c = 0; + shaper_para_p = 0; + } - shaper_para = hclge_tm_get_shapping_para(0, 0, 0, - HCLGE_SHAPER_BS_U_DEF, - HCLGE_SHAPER_BS_S_DEF); ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET, i, - shaper_para, rate); + shaper_para_c, rate); if (ret) return ret; - shaper_para = hclge_tm_get_shapping_para(ir_para.ir_b, - ir_para.ir_u, - ir_para.ir_s, - HCLGE_SHAPER_BS_U_DEF, - HCLGE_SHAPER_BS_S_DEF); ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET, i, - shaper_para, rate); + shaper_para_p, rate); if (ret) return ret; } @@ -1125,7 +1146,7 @@ static int hclge_tm_pri_tc_base_dwrr_cfg(struct hclge_dev *hdev) int ret; u32 i, k; - for (i = 0; i < hdev->tm_info.num_tc; i++) { + for (i = 0; i < hdev->tc_max; i++) { pg_info = &hdev->tm_info.pg_info[hdev->tm_info.tc_info[i].pgid]; dwrr = pg_info->tc_dwrr[i]; @@ -1135,9 +1156,15 @@ static int hclge_tm_pri_tc_base_dwrr_cfg(struct hclge_dev *hdev) return ret; for (k = 0; k < hdev->num_alloc_vport; k++) { + struct hnae3_knic_private_info *kinfo = &vport[k].nic.kinfo; + + if (i >= kinfo->tc_info.max_tc) + continue; + + dwrr = i < kinfo->tc_info.num_tc ? vport[k].dwrr : 0; ret = hclge_tm_qs_weight_cfg( hdev, vport[k].qs_offset + i, - vport[k].dwrr); + dwrr); if (ret) return ret; } @@ -1303,6 +1330,7 @@ static int hclge_tm_schd_mode_tc_base_cfg(struct hclge_dev *hdev, u8 pri_id) { struct hclge_vport *vport = hdev->vport; int ret; + u8 mode; u16 i; ret = hclge_tm_pri_schd_mode_cfg(hdev, pri_id); @@ -1310,9 +1338,16 @@ static int hclge_tm_schd_mode_tc_base_cfg(struct hclge_dev *hdev, u8 pri_id) return ret; for (i = 0; i < hdev->num_alloc_vport; i++) { + struct hnae3_knic_private_info *kinfo = &vport[i].nic.kinfo; + + if (pri_id >= kinfo->tc_info.max_tc) + continue; + + mode = pri_id < kinfo->tc_info.num_tc ? HCLGE_SCH_MODE_DWRR : + HCLGE_SCH_MODE_SP; ret = hclge_tm_qs_schd_mode_cfg(hdev, vport[i].qs_offset + pri_id, - HCLGE_SCH_MODE_DWRR); + mode); if (ret) return ret; } @@ -1353,7 +1388,7 @@ static int hclge_tm_lvl34_schd_mode_cfg(struct hclge_dev *hdev) u8 i; if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) { - for (i = 0; i < hdev->tm_info.num_tc; i++) { + for (i = 0; i < hdev->tc_max; i++) { ret = hclge_tm_schd_mode_tc_base_cfg(hdev, i); if (ret) return ret; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h index 619cc30a2dfcc2804312b6d110818c10599287bd..d943943912f76522ec340f99b32180a07a224c12 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h @@ -237,6 +237,7 @@ int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr); void hclge_pfc_rx_stats_get(struct hclge_dev *hdev, u64 *stats); void hclge_pfc_tx_stats_get(struct hclge_dev *hdev, u64 *stats); int hclge_tm_qs_shaper_cfg(struct hclge_vport *vport, int max_tx_rate); +int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev); int hclge_tm_get_qset_num(struct hclge_dev *hdev, u16 *qset_num); int hclge_tm_get_pri_num(struct hclge_dev *hdev, u8 *pri_num); int hclge_tm_get_qset_map_pri(struct hclge_dev *hdev, u16 qset_id, u8 *priority, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h index 5b0b71bd61200e47d70e74895d30533a95092bc1..8510b88d49820acf3ef81c34ee263d6eaebd2591 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h @@ -62,7 +62,7 @@ TRACE_EVENT(hclge_pf_mbx_send, TP_fast_assign( __entry->vfid = req->dest_vfid; - __entry->code = req->msg.code; + __entry->code = le16_to_cpu(req->msg.code); __assign_str(pciname, pci_name(hdev->pdev)); __assign_str(devname, &hdev->vport[0].nic.kinfo.netdev->name); memcpy(__entry->mbx_data, req, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index a2ec2d8c21518dee90ca5048ec5a9f61fd9e6580..32a7b467d79fcd9016148c4f1bf8a6715bd17116 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -189,8 +189,8 @@ static int hclgevf_get_basic_info(struct hclgevf_dev *hdev) basic_info = (struct hclge_basic_info *)resp_msg; hdev->hw_tc_map = basic_info->hw_tc_map; - hdev->mbx_api_version = basic_info->mbx_api_version; - caps = basic_info->pf_caps; + hdev->mbx_api_version = le16_to_cpu(basic_info->mbx_api_version); + caps = le32_to_cpu(basic_info->pf_caps); if (test_bit(HNAE3_PF_SUPPORT_VLAN_FLTR_MDF_B, &caps)) set_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps); @@ -223,10 +223,8 @@ static int hclgevf_get_port_base_vlan_filter_state(struct hclgevf_dev *hdev) static int hclgevf_get_queue_info(struct hclgevf_dev *hdev) { #define HCLGEVF_TQPS_RSS_INFO_LEN 6 -#define HCLGEVF_TQPS_ALLOC_OFFSET 0 -#define HCLGEVF_TQPS_RSS_SIZE_OFFSET 2 -#define HCLGEVF_TQPS_RX_BUFFER_LEN_OFFSET 4 + struct hclge_mbx_vf_queue_info *queue_info; u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN]; struct hclge_vf_to_pf_msg send_msg; int status; @@ -241,12 +239,10 @@ static int hclgevf_get_queue_info(struct hclgevf_dev *hdev) return status; } - memcpy(&hdev->num_tqps, &resp_msg[HCLGEVF_TQPS_ALLOC_OFFSET], - sizeof(u16)); - memcpy(&hdev->rss_size_max, &resp_msg[HCLGEVF_TQPS_RSS_SIZE_OFFSET], - sizeof(u16)); - memcpy(&hdev->rx_buf_len, &resp_msg[HCLGEVF_TQPS_RX_BUFFER_LEN_OFFSET], - sizeof(u16)); + queue_info = (struct hclge_mbx_vf_queue_info *)resp_msg; + hdev->num_tqps = le16_to_cpu(queue_info->num_tqps); + hdev->rss_size_max = le16_to_cpu(queue_info->rss_size); + hdev->rx_buf_len = le16_to_cpu(queue_info->rx_buf_len); return 0; } @@ -254,9 +250,8 @@ static int hclgevf_get_queue_info(struct hclgevf_dev *hdev) static int hclgevf_get_queue_depth(struct hclgevf_dev *hdev) { #define HCLGEVF_TQPS_DEPTH_INFO_LEN 4 -#define HCLGEVF_TQPS_NUM_TX_DESC_OFFSET 0 -#define HCLGEVF_TQPS_NUM_RX_DESC_OFFSET 2 + struct hclge_mbx_vf_queue_depth *queue_depth; u8 resp_msg[HCLGEVF_TQPS_DEPTH_INFO_LEN]; struct hclge_vf_to_pf_msg send_msg; int ret; @@ -271,10 +266,9 @@ static int hclgevf_get_queue_depth(struct hclgevf_dev *hdev) return ret; } - memcpy(&hdev->num_tx_desc, &resp_msg[HCLGEVF_TQPS_NUM_TX_DESC_OFFSET], - sizeof(u16)); - memcpy(&hdev->num_rx_desc, &resp_msg[HCLGEVF_TQPS_NUM_RX_DESC_OFFSET], - sizeof(u16)); + queue_depth = (struct hclge_mbx_vf_queue_depth *)resp_msg; + hdev->num_tx_desc = le16_to_cpu(queue_depth->num_tx_desc); + hdev->num_rx_desc = le16_to_cpu(queue_depth->num_rx_desc); return 0; } @@ -288,11 +282,11 @@ static u16 hclgevf_get_qid_global(struct hnae3_handle *handle, u16 queue_id) int ret; hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QID_IN_PF, 0); - memcpy(send_msg.data, &queue_id, sizeof(queue_id)); + *(__le16 *)send_msg.data = cpu_to_le16(queue_id); ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_data, sizeof(resp_data)); if (!ret) - qid_in_pf = *(u16 *)resp_data; + qid_in_pf = le16_to_cpu(*(__le16 *)resp_data); return qid_in_pf; } @@ -1245,11 +1239,8 @@ static int hclgevf_set_vlan_filter(struct hnae3_handle *handle, __be16 proto, u16 vlan_id, bool is_kill) { -#define HCLGEVF_VLAN_MBX_IS_KILL_OFFSET 0 -#define HCLGEVF_VLAN_MBX_VLAN_ID_OFFSET 1 -#define HCLGEVF_VLAN_MBX_PROTO_OFFSET 3 - struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + struct hclge_mbx_vlan_filter *vlan_filter; struct hclge_vf_to_pf_msg send_msg; int ret; @@ -1271,11 +1262,11 @@ static int hclgevf_set_vlan_filter(struct hnae3_handle *handle, hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, HCLGE_MBX_VLAN_FILTER); - send_msg.data[HCLGEVF_VLAN_MBX_IS_KILL_OFFSET] = is_kill; - memcpy(&send_msg.data[HCLGEVF_VLAN_MBX_VLAN_ID_OFFSET], &vlan_id, - sizeof(vlan_id)); - memcpy(&send_msg.data[HCLGEVF_VLAN_MBX_PROTO_OFFSET], &proto, - sizeof(proto)); + vlan_filter = (struct hclge_mbx_vlan_filter *)send_msg.data; + vlan_filter->is_kill = is_kill; + vlan_filter->vlan_id = cpu_to_le16(vlan_id); + vlan_filter->proto = cpu_to_le16(be16_to_cpu(proto)); + /* when remove hw vlan filter failed, record the vlan id, * and try to remove it from hw later, to be consistence * with stack. @@ -1347,7 +1338,7 @@ static int hclgevf_reset_tqp(struct hnae3_handle *handle) for (i = 1; i < handle->kinfo.num_tqps; i++) { hclgevf_build_send_msg(&send_msg, HCLGE_MBX_QUEUE_RESET, 0); - memcpy(send_msg.data, &i, sizeof(i)); + *(__le16 *)send_msg.data = cpu_to_le16(i); ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); if (ret) return ret; @@ -1359,10 +1350,13 @@ static int hclgevf_reset_tqp(struct hnae3_handle *handle) static int hclgevf_set_mtu(struct hnae3_handle *handle, int new_mtu) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + struct hclge_mbx_mtu_info *mtu_info; struct hclge_vf_to_pf_msg send_msg; hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_MTU, 0); - memcpy(send_msg.data, &new_mtu, sizeof(new_mtu)); + mtu_info = (struct hclge_mbx_mtu_info *)send_msg.data; + mtu_info->mtu = cpu_to_le32(new_mtu); + return hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); } @@ -2052,8 +2046,7 @@ static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data) break; } - if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER) - hclgevf_enable_vector(&hdev->misc_vector, true); + hclgevf_enable_vector(&hdev->misc_vector, true); return IRQ_HANDLED; } @@ -2863,6 +2856,11 @@ static int hclgevf_reset_hdev(struct hclgevf_dev *hdev) return ret; } + /* get current port based vlan state from PF */ + ret = hclgevf_get_port_base_vlan_filter_state(hdev); + if (ret) + return ret; + set_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state); hclgevf_init_rxd_adv_layout(hdev); @@ -2959,7 +2957,7 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev) goto err_config; } - /* ensure vf tbl list as empty before init*/ + /* ensure vf tbl list as empty before init */ ret = hclgevf_clear_vport_list(hdev); if (ret) { dev_err(&pdev->dev, @@ -3311,7 +3309,7 @@ static void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version, for (i = 0; i < reg_um; i++) *reg++ = hclgevf_read_dev(&hdev->hw, ring_reg_addr_list[i] + - 0x200 * j); + HCLGEVF_TQP_REG_SIZE * j); for (i = 0; i < separator_num; i++) *reg++ = SEPARATOR_VALUE; } @@ -3329,7 +3327,7 @@ static void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version, } void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state, - u8 *port_base_vlan_info, u8 data_size) + struct hclge_mbx_port_base_vlan *port_base_vlan) { struct hnae3_handle *nic = &hdev->nic; struct hclge_vf_to_pf_msg send_msg; @@ -3354,7 +3352,7 @@ void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state, /* send msg to PF and wait update port based vlan info */ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, HCLGE_MBX_PORT_BASE_VLAN_CFG); - memcpy(send_msg.data, port_base_vlan_info, data_size); + memcpy(send_msg.data, port_base_vlan, sizeof(*port_base_vlan)); ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); if (!ret) { if (state == HNAE3_PORT_BASE_VLAN_DISABLE) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h index 4b00fd44f118821f559083567549ce104fab7f6b..59ca6c794d6dbef559c4a51eb6e521c2841b2d96 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h @@ -293,5 +293,5 @@ void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed, void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev); void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev); void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state, - u8 *port_base_vlan_info, u8 data_size); + struct hclge_mbx_port_base_vlan *port_base_vlan); #endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c index d5e0a3f762f7dbe04d729561f8334a3cc88580b6..bbf7b14079de3cf2dc68cdd67a1f288b38903a79 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c @@ -17,7 +17,7 @@ static int hclgevf_resp_to_errno(u16 resp_code) static void hclgevf_reset_mbx_resp_status(struct hclgevf_dev *hdev) { /* this function should be called with mbx_resp.mbx_mutex held - * to prtect the received_response from race condition + * to protect the received_response from race condition */ hdev->mbx_resp.received_resp = false; hdev->mbx_resp.origin_mbx_msg = 0; @@ -32,8 +32,10 @@ static void hclgevf_reset_mbx_resp_status(struct hclgevf_dev *hdev) /* hclgevf_get_mbx_resp: used to get a response from PF after VF sends a mailbox * message to PF. * @hdev: pointer to struct hclgevf_dev - * @resp_msg: pointer to store the original message type and response status - * @len: the resp_msg data array length. + * @code0: the message opcode VF send to PF. + * @code1: the message sub-opcode VF send to PF. + * @resp_data: pointer to store response data from PF to VF. + * @resp_len: the length of resp_data from PF to VF. */ static int hclgevf_get_mbx_resp(struct hclgevf_dev *hdev, u16 code0, u16 code1, u8 *resp_data, u16 resp_len) @@ -122,7 +124,7 @@ int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev, if (need_resp) { mutex_lock(&hdev->mbx_resp.mbx_mutex); hclgevf_reset_mbx_resp_status(hdev); - req->match_id = hdev->mbx_resp.match_id; + req->match_id = cpu_to_le16(hdev->mbx_resp.match_id); status = hclgevf_cmd_send(&hdev->hw, &desc, 1); if (status) { dev_err(&hdev->pdev->dev, @@ -160,27 +162,29 @@ static bool hclgevf_cmd_crq_empty(struct hclgevf_hw *hw) static void hclgevf_handle_mbx_response(struct hclgevf_dev *hdev, struct hclge_mbx_pf_to_vf_cmd *req) { + u16 vf_mbx_msg_subcode = le16_to_cpu(req->msg.vf_mbx_msg_subcode); + u16 vf_mbx_msg_code = le16_to_cpu(req->msg.vf_mbx_msg_code); struct hclgevf_mbx_resp_status *resp = &hdev->mbx_resp; + u16 resp_status = le16_to_cpu(req->msg.resp_status); + u16 match_id = le16_to_cpu(req->match_id); if (resp->received_resp) dev_warn(&hdev->pdev->dev, - "VF mbx resp flag not clear(%u)\n", - req->msg.vf_mbx_msg_code); - - resp->origin_mbx_msg = - (req->msg.vf_mbx_msg_code << 16); - resp->origin_mbx_msg |= req->msg.vf_mbx_msg_subcode; - resp->resp_status = - hclgevf_resp_to_errno(req->msg.resp_status); + "VF mbx resp flag not clear(%u)\n", + vf_mbx_msg_code); + + resp->origin_mbx_msg = (vf_mbx_msg_code << 16); + resp->origin_mbx_msg |= vf_mbx_msg_subcode; + resp->resp_status = hclgevf_resp_to_errno(resp_status); memcpy(resp->additional_info, req->msg.resp_data, HCLGE_MBX_MAX_RESP_DATA_SIZE * sizeof(u8)); - if (req->match_id) { + if (match_id) { /* If match_id is not zero, it means PF support match_id. * if the match_id is right, VF get the right response, or * ignore the response. and driver will clear hdev->mbx_resp * when send next message which need response. */ - if (req->match_id == resp->match_id) + if (match_id == resp->match_id) resp->received_resp = true; } else { resp->received_resp = true; @@ -197,7 +201,7 @@ static void hclgevf_handle_mbx_msg(struct hclgevf_dev *hdev, HCLGE_MBX_MAX_ARQ_MSG_NUM) { dev_warn(&hdev->pdev->dev, "Async Q full, dropping msg(%u)\n", - req->msg.code); + le16_to_cpu(req->msg.code)); return; } @@ -216,6 +220,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev) struct hclge_comm_cmq_ring *crq; struct hclge_desc *desc; u16 flag; + u16 code; crq = &hdev->hw.hw.cmq.crq; @@ -230,10 +235,11 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev) req = (struct hclge_mbx_pf_to_vf_cmd *)desc->data; flag = le16_to_cpu(crq->desc[crq->next_to_use].flag); + code = le16_to_cpu(req->msg.code); if (unlikely(!hnae3_get_bit(flag, HCLGEVF_CMDQ_RX_OUTVLD_B))) { dev_warn(&hdev->pdev->dev, "dropped invalid mailbox message, code = %u\n", - req->msg.code); + code); /* dropping/not processing this invalid message */ crq->desc[crq->next_to_use].flag = 0; @@ -249,7 +255,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev) * timeout and simultaneously queue the async messages for later * prcessing in context of mailbox task i.e. the slow path. */ - switch (req->msg.code) { + switch (code) { case HCLGE_MBX_PF_VF_RESP: hclgevf_handle_mbx_response(hdev, req); break; @@ -263,7 +269,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev) default: dev_err(&hdev->pdev->dev, "VF received unsupported(%u) mbx msg from PF\n", - req->msg.code); + code); break; } crq->desc[crq->next_to_use].flag = 0; @@ -285,14 +291,18 @@ static void hclgevf_parse_promisc_info(struct hclgevf_dev *hdev, void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev) { + struct hclge_mbx_port_base_vlan *vlan_info; + struct hclge_mbx_link_status *link_info; + struct hclge_mbx_link_mode *link_mode; enum hnae3_reset_type reset_type; u16 link_status, state; - u16 *msg_q, *vlan_info; + __le16 *msg_q; + u16 opcode; u8 duplex; u32 speed; u32 tail; u8 flag; - u8 idx; + u16 idx; tail = hdev->arq.tail; @@ -306,13 +316,14 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev) } msg_q = hdev->arq.msg_q[hdev->arq.head]; - - switch (msg_q[0]) { + opcode = le16_to_cpu(msg_q[0]); + switch (opcode) { case HCLGE_MBX_LINK_STAT_CHANGE: - link_status = msg_q[1]; - memcpy(&speed, &msg_q[2], sizeof(speed)); - duplex = (u8)msg_q[4]; - flag = (u8)msg_q[5]; + link_info = (struct hclge_mbx_link_status *)(msg_q + 1); + link_status = le16_to_cpu(link_info->link_status); + speed = le32_to_cpu(link_info->speed); + duplex = (u8)le16_to_cpu(link_info->duplex); + flag = link_info->flag; /* update upper layer with new link link status */ hclgevf_update_speed_duplex(hdev, speed, duplex); @@ -324,13 +335,14 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev) break; case HCLGE_MBX_LINK_STAT_MODE: - idx = (u8)msg_q[1]; + link_mode = (struct hclge_mbx_link_mode *)(msg_q + 1); + idx = le16_to_cpu(link_mode->idx); if (idx) - memcpy(&hdev->hw.mac.supported, &msg_q[2], - sizeof(unsigned long)); + hdev->hw.mac.supported = + le64_to_cpu(link_mode->link_mode); else - memcpy(&hdev->hw.mac.advertising, &msg_q[2], - sizeof(unsigned long)); + hdev->hw.mac.advertising = + le64_to_cpu(link_mode->link_mode); break; case HCLGE_MBX_ASSERTING_RESET: /* PF has asserted reset hence VF should go in pending @@ -338,25 +350,27 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev) * has been completely reset. After this stack should * eventually be re-initialized. */ - reset_type = (enum hnae3_reset_type)msg_q[1]; + reset_type = + (enum hnae3_reset_type)le16_to_cpu(msg_q[1]); set_bit(reset_type, &hdev->reset_pending); set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); hclgevf_reset_task_schedule(hdev); break; case HCLGE_MBX_PUSH_VLAN_INFO: - state = msg_q[1]; - vlan_info = &msg_q[1]; + vlan_info = + (struct hclge_mbx_port_base_vlan *)(msg_q + 1); + state = le16_to_cpu(vlan_info->state); hclgevf_update_port_base_vlan_info(hdev, state, - (u8 *)vlan_info, 8); + vlan_info); break; case HCLGE_MBX_PUSH_PROMISC_INFO: - hclgevf_parse_promisc_info(hdev, msg_q[1]); + hclgevf_parse_promisc_info(hdev, le16_to_cpu(msg_q[1])); break; default: dev_err(&hdev->pdev->dev, "fetched unsupported(%u) message from arq\n", - msg_q[0]); + opcode); break; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h index e4bfb6191fef579e8c758062700750e62844941f..5d4895bb57a17d9a01b29545af427ae760c0d578 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h @@ -29,7 +29,7 @@ TRACE_EVENT(hclge_vf_mbx_get, TP_fast_assign( __entry->vfid = req->dest_vfid; - __entry->code = req->msg.code; + __entry->code = le16_to_cpu(req->msg.code); __assign_str(pciname, pci_name(hdev->pdev)); __assign_str(devname, &hdev->nic.kinfo.netdev->name); memcpy(__entry->mbx_data, req, diff --git a/drivers/net/ethernet/huawei/hinic/hinic_lld.c b/drivers/net/ethernet/huawei/hinic/hinic_lld.c index 6c960cecf10115cd8b543f68c9bdb0f651083142..9d39da0c76d4dd386391491faa389348664b009c 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_lld.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_lld.c @@ -119,6 +119,7 @@ struct hinic_pcidev { bool nic_des_enable; struct timer_list syncfw_time_timer; + int card_id; }; #define HINIC_EVENT_PROCESS_TIMEOUT 10000 @@ -800,8 +801,7 @@ static bool __is_pcidev_match_chip_name(const char *ifname, if (dev->init_state < HINIC_INIT_STATE_HW_PART_INITED) return false; } else { - if (dev->init_state >= - HINIC_INIT_STATE_HW_PART_INITED && + if (dev->init_state < HINIC_INIT_STATE_HW_PART_INITED || hinic_func_type(dev->hwdev) != type) return false; } @@ -1152,6 +1152,10 @@ void *hinic_get_ppf_hwdev_by_pdev(struct pci_dev *pdev) chip_node = pci_adapter->chip_node; lld_dev_hold(); list_for_each_entry(dev, &chip_node->func_list, node) { + if (test_bit(HINIC_FUNC_IN_REMOVE, &dev->flag) || + dev->init_state < HINIC_INIT_STATE_HW_IF_INITED) + continue; + if (dev->hwdev && hinic_func_type(dev->hwdev) == TYPE_PPF) { lld_dev_put(); return dev->hwdev; @@ -1364,6 +1368,10 @@ int hinic_get_pf_id(void *hwdev, u32 port_id, u32 *pf_id, u32 *isvalid) lld_dev_hold(); list_for_each_entry(dev, &chip_node->func_list, node) { + if (test_bit(HINIC_FUNC_IN_REMOVE, &dev->flag) || + dev->init_state < HINIC_INIT_STATE_HWDEV_INITED) + continue; + if (hinic_physical_port_id(dev->hwdev) == port_id) { *pf_id = hinic_global_func_id(dev->hwdev); *isvalid = 1; @@ -1851,7 +1859,8 @@ static void send_event_to_all_pf(struct hinic_pcidev *dev, lld_dev_hold(); list_for_each_entry(des_dev, &dev->chip_node->func_list, node) { - if (test_bit(HINIC_FUNC_IN_REMOVE, &des_dev->flag)) + if (test_bit(HINIC_FUNC_IN_REMOVE, &des_dev->flag) || + des_dev->init_state < HINIC_INIT_STATE_HW_IF_INITED) continue; if (hinic_func_type(des_dev->hwdev) == TYPE_VF) @@ -1869,7 +1878,8 @@ static void send_event_to_dst_pf(struct hinic_pcidev *dev, u16 func_id, lld_dev_hold(); list_for_each_entry(des_dev, &dev->chip_node->func_list, node) { - if (test_bit(HINIC_FUNC_IN_REMOVE, &des_dev->flag)) + if (test_bit(HINIC_FUNC_IN_REMOVE, &des_dev->flag) || + des_dev->init_state < HINIC_INIT_STATE_HW_IF_INITED) continue; if (hinic_func_type(des_dev->hwdev) == TYPE_VF) @@ -2099,6 +2109,9 @@ static void free_chip_node(struct hinic_pcidev *pci_adapter) u32 id; int err; + if (!(card_bit_map & BIT(pci_adapter->card_id))) + return; + if (list_empty(&chip_node->func_list)) { list_del(&chip_node->node); sdk_info(&pci_adapter->pcidev->dev, @@ -2633,8 +2646,11 @@ static void slave_host_init_delay_work(struct work_struct *work) /* Make sure the PPF must be the first one */ lld_dev_hold(); list_for_each_entry(ppf_pcidev, &chip_node->func_list, node) { - if (ppf_pcidev && - hinic_func_type(ppf_pcidev->hwdev) == TYPE_PPF) { + if (test_bit(HINIC_FUNC_IN_REMOVE, &ppf_pcidev->flag) || + ppf_pcidev->init_state < HINIC_INIT_STATE_HW_IF_INITED) + continue; + + if (hinic_func_type(ppf_pcidev->hwdev) == TYPE_PPF) { found = 1; break; } @@ -2701,6 +2717,9 @@ static int hinic_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto alloc_chip_node_fail; } + sscanf(pci_adapter->chip_node->chip_name, HINIC_CHIP_NAME "%d", + &pci_adapter->card_id); + err = nictool_k_init(); if (err) { sdk_warn(&pdev->dev, "Failed to init nictool"); @@ -2865,7 +2884,8 @@ int hinic_register_micro_log(struct hinic_micro_log_info *micro_log_info) lld_dev_hold(); list_for_each_entry(chip_node, &g_hinic_chip_list, node) { list_for_each_entry(dev, &chip_node->func_list, node) { - if (test_bit(HINIC_FUNC_IN_REMOVE, &dev->flag)) + if (test_bit(HINIC_FUNC_IN_REMOVE, &dev->flag) || + dev->init_state < HINIC_INIT_STATE_HW_IF_INITED) continue; if (hinic_func_type(dev->hwdev) == TYPE_PPF) { @@ -2895,7 +2915,8 @@ void hinic_unregister_micro_log(struct hinic_micro_log_info *micro_log_info) lld_dev_hold(); list_for_each_entry(chip_node, &g_hinic_chip_list, node) { list_for_each_entry(dev, &chip_node->func_list, node) { - if (test_bit(HINIC_FUNC_IN_REMOVE, &dev->flag)) + if (test_bit(HINIC_FUNC_IN_REMOVE, &dev->flag) || + dev->init_state < HINIC_INIT_STATE_HW_IF_INITED) continue; if (hinic_func_type(dev->hwdev) == TYPE_PPF) diff --git a/drivers/net/ethernet/huawei/hinic/hinic_rx.c b/drivers/net/ethernet/huawei/hinic/hinic_rx.c index 3f69855409c8315e41a134fcd85aaaf5dbed7467..57d5d792c6ce5b4bda874c42a37b9eb96c9e320a 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_rx.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_rx.c @@ -67,7 +67,7 @@ static bool rx_alloc_mapped_page(struct hinic_rxq *rxq, return true; /* alloc new page for storage */ - page = alloc_pages_node(NUMA_NO_NODE, GFP_ATOMIC, nic_dev->page_order); + page = dev_alloc_pages(nic_dev->page_order); if (unlikely(!page)) { RXQ_STATS_INC(rxq, alloc_rx_buf_err); return false; diff --git a/drivers/net/ethernet/i825xx/sni_82596.c b/drivers/net/ethernet/i825xx/sni_82596.c index 27937c5d795673a53d79aee82a91898732102d52..daec9ce04531be2531a24d5b44bc83ad5dae3a2c 100644 --- a/drivers/net/ethernet/i825xx/sni_82596.c +++ b/drivers/net/ethernet/i825xx/sni_82596.c @@ -117,9 +117,10 @@ static int sni_82596_probe(struct platform_device *dev) netdevice->dev_addr[5] = readb(eth_addr + 0x06); iounmap(eth_addr); - if (!netdevice->irq) { + if (netdevice->irq < 0) { printk(KERN_ERR "%s: IRQ not found for i82596 at 0x%lx\n", __FILE__, netdevice->base_addr); + retval = netdevice->irq; goto probe_failed; } diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index fadef994320cd6a3387f827a56f66d95c02a24de..85f4d2418d25c062de3c8106a609e14351190c82 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -2354,8 +2354,10 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter, * flush reset queue and process this reset */ if (adapter->force_reset_recovery && !list_empty(&adapter->rwi_list)) { - list_for_each_safe(entry, tmp_entry, &adapter->rwi_list) + list_for_each_safe(entry, tmp_entry, &adapter->rwi_list) { list_del(entry); + kfree(list_entry(entry, struct ibmvnic_rwi, list)); + } } rwi->reset_reason = reason; list_add_tail(&rwi->list, &adapter->rwi_list); @@ -3405,11 +3407,25 @@ static void send_request_cap(struct ibmvnic_adapter *adapter, int retry) struct device *dev = &adapter->vdev->dev; union ibmvnic_crq crq; int max_entries; + int cap_reqs; + + /* We send out 6 or 7 REQUEST_CAPABILITY CRQs below (depending on + * the PROMISC flag). Initialize this count upfront. When the tasklet + * receives a response to all of these, it will send the next protocol + * message (QUERY_IP_OFFLOAD). + */ + if (!(adapter->netdev->flags & IFF_PROMISC) || + adapter->promisc_supported) + cap_reqs = 7; + else + cap_reqs = 6; if (!retry) { /* Sub-CRQ entries are 32 byte long */ int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4); + atomic_set(&adapter->running_cap_crqs, cap_reqs); + if (adapter->min_tx_entries_per_subcrq > entries_page || adapter->min_rx_add_entries_per_subcrq > entries_page) { dev_err(dev, "Fatal, invalid entries per sub-crq\n"); @@ -3470,44 +3486,45 @@ static void send_request_cap(struct ibmvnic_adapter *adapter, int retry) adapter->opt_rx_comp_queues; adapter->req_rx_add_queues = adapter->max_rx_add_queues; + } else { + atomic_add(cap_reqs, &adapter->running_cap_crqs); } - memset(&crq, 0, sizeof(crq)); crq.request_capability.first = IBMVNIC_CRQ_CMD; crq.request_capability.cmd = REQUEST_CAPABILITY; crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES); crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues); - atomic_inc(&adapter->running_cap_crqs); + cap_reqs--; ibmvnic_send_crq(adapter, &crq); crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES); crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues); - atomic_inc(&adapter->running_cap_crqs); + cap_reqs--; ibmvnic_send_crq(adapter, &crq); crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES); crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues); - atomic_inc(&adapter->running_cap_crqs); + cap_reqs--; ibmvnic_send_crq(adapter, &crq); crq.request_capability.capability = cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ); crq.request_capability.number = cpu_to_be64(adapter->req_tx_entries_per_subcrq); - atomic_inc(&adapter->running_cap_crqs); + cap_reqs--; ibmvnic_send_crq(adapter, &crq); crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ); crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_entries_per_subcrq); - atomic_inc(&adapter->running_cap_crqs); + cap_reqs--; ibmvnic_send_crq(adapter, &crq); crq.request_capability.capability = cpu_to_be16(REQ_MTU); crq.request_capability.number = cpu_to_be64(adapter->req_mtu); - atomic_inc(&adapter->running_cap_crqs); + cap_reqs--; ibmvnic_send_crq(adapter, &crq); if (adapter->netdev->flags & IFF_PROMISC) { @@ -3515,16 +3532,21 @@ static void send_request_cap(struct ibmvnic_adapter *adapter, int retry) crq.request_capability.capability = cpu_to_be16(PROMISC_REQUESTED); crq.request_capability.number = cpu_to_be64(1); - atomic_inc(&adapter->running_cap_crqs); + cap_reqs--; ibmvnic_send_crq(adapter, &crq); } } else { crq.request_capability.capability = cpu_to_be16(PROMISC_REQUESTED); crq.request_capability.number = cpu_to_be64(0); - atomic_inc(&adapter->running_cap_crqs); + cap_reqs--; ibmvnic_send_crq(adapter, &crq); } + + /* Keep at end to catch any discrepancy between expected and actual + * CRQs sent. + */ + WARN_ON(cap_reqs != 0); } static int pending_scrq(struct ibmvnic_adapter *adapter, @@ -3957,118 +3979,132 @@ static void send_query_map(struct ibmvnic_adapter *adapter) static void send_query_cap(struct ibmvnic_adapter *adapter) { union ibmvnic_crq crq; + int cap_reqs; + + /* We send out 25 QUERY_CAPABILITY CRQs below. Initialize this count + * upfront. When the tasklet receives a response to all of these, it + * can send out the next protocol messaage (REQUEST_CAPABILITY). + */ + cap_reqs = 25; + + atomic_set(&adapter->running_cap_crqs, cap_reqs); - atomic_set(&adapter->running_cap_crqs, 0); memset(&crq, 0, sizeof(crq)); crq.query_capability.first = IBMVNIC_CRQ_CMD; crq.query_capability.cmd = QUERY_CAPABILITY; crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES); - atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); + cap_reqs--; crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES); - atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); + cap_reqs--; crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES); - atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); + cap_reqs--; crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES); - atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); + cap_reqs--; crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES); - atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); + cap_reqs--; crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES); - atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); + cap_reqs--; crq.query_capability.capability = cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ); - atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); + cap_reqs--; crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ); - atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); + cap_reqs--; crq.query_capability.capability = cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ); - atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); + cap_reqs--; crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ); - atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); + cap_reqs--; crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD); - atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); + cap_reqs--; crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED); - atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); + cap_reqs--; crq.query_capability.capability = cpu_to_be16(MIN_MTU); - atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); + cap_reqs--; crq.query_capability.capability = cpu_to_be16(MAX_MTU); - atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); + cap_reqs--; crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS); - atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); + cap_reqs--; crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION); - atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); + cap_reqs--; crq.query_capability.capability = cpu_to_be16(RX_VLAN_HEADER_INSERTION); - atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); + cap_reqs--; crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES); - atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); + cap_reqs--; crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED); - atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); + cap_reqs--; crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES); - atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); + cap_reqs--; crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES); - atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); + cap_reqs--; crq.query_capability.capability = cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q); - atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); + cap_reqs--; crq.query_capability.capability = cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ); - atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); + cap_reqs--; crq.query_capability.capability = cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ); - atomic_inc(&adapter->running_cap_crqs); ibmvnic_send_crq(adapter, &crq); + cap_reqs--; crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ); - atomic_inc(&adapter->running_cap_crqs); + ibmvnic_send_crq(adapter, &crq); + cap_reqs--; + + /* Keep at end to catch any discrepancy between expected and actual + * CRQs sent. + */ + WARN_ON(cap_reqs != 0); } static void send_query_ip_offload(struct ibmvnic_adapter *adapter) @@ -4373,6 +4409,8 @@ static void handle_request_cap_rsp(union ibmvnic_crq *crq, char *name; atomic_dec(&adapter->running_cap_crqs); + netdev_dbg(adapter->netdev, "Outstanding request-caps: %d\n", + atomic_read(&adapter->running_cap_crqs)); switch (be16_to_cpu(crq->request_capability_rsp.capability)) { case REQ_TX_QUEUES: req_value = &adapter->req_tx_queues; @@ -4889,6 +4927,13 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq, adapter->fw_done_rc = -EIO; complete(&adapter->fw_done); } + + /* if we got here during crq-init, retry crq-init */ + if (!completion_done(&adapter->init_done)) { + adapter->init_done_rc = -EAGAIN; + complete(&adapter->init_done); + } + if (!completion_done(&adapter->stats_done)) complete(&adapter->stats_done); if (test_bit(0, &adapter->resetting)) @@ -5043,12 +5088,6 @@ static void ibmvnic_tasklet(struct tasklet_struct *t) ibmvnic_handle_crq(crq, adapter); crq->generic.first = 0; } - - /* remain in tasklet until all - * capabilities responses are received - */ - if (!adapter->wait_capability) - done = true; } /* if capabilities CRQ's were sent in this tasklet, the following * tasklet must wait until all responses are received @@ -5357,6 +5396,12 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) goto ibmvnic_dev_file_err; netif_carrier_off(netdev); + + adapter->state = VNIC_PROBED; + + adapter->wait_for_reset = false; + adapter->last_reset_time = jiffies; + rc = register_netdev(netdev); if (rc) { dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc); @@ -5364,10 +5409,6 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) } dev_info(&dev->dev, "ibmvnic registered\n"); - adapter->state = VNIC_PROBED; - - adapter->wait_for_reset = false; - adapter->last_reset_time = jiffies; return 0; ibmvnic_register_fail: diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index b38b914f9ac6cc0bd55e2d7930d739ea709f4f65..15b1503d5b6ca9cd4aa7f8a7f28a2922aa90cca1 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -4134,9 +4134,9 @@ static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw) return ret_val; if (!(data & valid_csum_mask)) { - e_dbg("NVM Checksum Invalid\n"); + e_dbg("NVM Checksum valid bit not set\n"); - if (hw->mac.type < e1000_pch_cnp) { + if (hw->mac.type < e1000_pch_tgp) { data |= valid_csum_mask; ret_val = e1000_write_nvm(hw, word, 1, &data); if (ret_val) diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 5b83d1bc0e74d38ea1c1c7ccd6c2214bda402a39..effdc3361266f11932d50925d35bd0ecd93dd7b4 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -172,7 +172,6 @@ enum i40e_interrupt_policy { struct i40e_lump_tracking { u16 num_entries; - u16 search_hint; u16 list[0]; #define I40E_PILE_VALID_BIT 0x8000 #define I40E_IWARP_IRQ_PILE_ID (I40E_PILE_VALID_BIT - 2) @@ -755,12 +754,12 @@ struct i40e_vsi { struct rtnl_link_stats64 net_stats_offsets; struct i40e_eth_stats eth_stats; struct i40e_eth_stats eth_stats_offsets; - u32 tx_restart; - u32 tx_busy; + u64 tx_restart; + u64 tx_busy; u64 tx_linearize; u64 tx_force_wb; - u32 rx_buf_failed; - u32 rx_page_failed; + u64 rx_buf_failed; + u64 rx_page_failed; /* These are containers of ring pointers, allocated at run-time */ struct i40e_ring **rx_rings; diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index 714b578b2b49c5948b476319fb80e7b269d28bd3..989d5c7263d7cc1e32a16d5a3aad2499a54bbf19 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -240,7 +240,7 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) (unsigned long int)vsi->net_stats_offsets.rx_compressed, (unsigned long int)vsi->net_stats_offsets.tx_compressed); dev_info(&pf->pdev->dev, - " tx_restart = %d, tx_busy = %d, rx_buf_failed = %d, rx_page_failed = %d\n", + " tx_restart = %llu, tx_busy = %llu, rx_buf_failed = %llu, rx_page_failed = %llu\n", vsi->tx_restart, vsi->tx_busy, vsi->rx_buf_failed, vsi->rx_page_failed); rcu_read_lock(); @@ -742,10 +742,8 @@ static void i40e_dbg_dump_vf(struct i40e_pf *pf, int vf_id) vsi = pf->vsi[vf->lan_vsi_idx]; dev_info(&pf->pdev->dev, "vf %2d: VSI id=%d, seid=%d, qps=%d\n", vf_id, vf->lan_vsi_id, vsi->seid, vf->num_queue_pairs); - dev_info(&pf->pdev->dev, " num MDD=%lld, invalid msg=%lld, valid msg=%lld\n", - vf->num_mdd_events, - vf->num_invalid_msgs, - vf->num_valid_msgs); + dev_info(&pf->pdev->dev, " num MDD=%lld\n", + vf->num_mdd_events); } else { dev_info(&pf->pdev->dev, "invalid VF id %d\n", vf_id); } diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index f888a443a067bcbaa773f2aef770aca78336b911..bd18a780a0008de16a0f70428654389f2634734c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -195,10 +195,6 @@ int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem) * @id: an owner id to stick on the items assigned * * Returns the base item index of the lump, or negative for error - * - * The search_hint trick and lack of advanced fit-finding only work - * because we're highly likely to have all the same size lump requests. - * Linear search time and any fragmentation should be minimal. **/ static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile, u16 needed, u16 id) @@ -213,8 +209,21 @@ static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile, return -EINVAL; } - /* start the linear search with an imperfect hint */ - i = pile->search_hint; + /* Allocate last queue in the pile for FDIR VSI queue + * so it doesn't fragment the qp_pile + */ + if (pile == pf->qp_pile && pf->vsi[id]->type == I40E_VSI_FDIR) { + if (pile->list[pile->num_entries - 1] & I40E_PILE_VALID_BIT) { + dev_err(&pf->pdev->dev, + "Cannot allocate queue %d for I40E_VSI_FDIR\n", + pile->num_entries - 1); + return -ENOMEM; + } + pile->list[pile->num_entries - 1] = id | I40E_PILE_VALID_BIT; + return pile->num_entries - 1; + } + + i = 0; while (i < pile->num_entries) { /* skip already allocated entries */ if (pile->list[i] & I40E_PILE_VALID_BIT) { @@ -233,7 +242,6 @@ static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile, for (j = 0; j < needed; j++) pile->list[i+j] = id | I40E_PILE_VALID_BIT; ret = i; - pile->search_hint = i + j; break; } @@ -256,7 +264,7 @@ static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id) { int valid_id = (id | I40E_PILE_VALID_BIT); int count = 0; - int i; + u16 i; if (!pile || index >= pile->num_entries) return -EINVAL; @@ -268,8 +276,6 @@ static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id) count++; } - if (count && index < pile->search_hint) - pile->search_hint = index; return count; } @@ -771,9 +777,9 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) struct rtnl_link_stats64 *ns; /* netdev stats */ struct i40e_eth_stats *oes; struct i40e_eth_stats *es; /* device's eth stats */ - u32 tx_restart, tx_busy; + u64 tx_restart, tx_busy; struct i40e_ring *p; - u32 rx_page, rx_buf; + u64 rx_page, rx_buf; u64 bytes, packets; unsigned int start; u64 tx_linearize; @@ -10130,15 +10136,9 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) } i40e_get_oem_version(&pf->hw); - if (test_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state) && - ((hw->aq.fw_maj_ver == 4 && hw->aq.fw_min_ver <= 33) || - hw->aq.fw_maj_ver < 4) && hw->mac.type == I40E_MAC_XL710) { - /* The following delay is necessary for 4.33 firmware and older - * to recover after EMP reset. 200 ms should suffice but we - * put here 300 ms to be sure that FW is ready to operate - * after reset. - */ - mdelay(300); + if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state)) { + /* The following delay is necessary for firmware update. */ + mdelay(1000); } /* re-verify the eeprom if we just had an EMP reset */ @@ -11327,7 +11327,6 @@ static int i40e_init_interrupt_scheme(struct i40e_pf *pf) return -ENOMEM; pf->irq_pile->num_entries = vectors; - pf->irq_pile->search_hint = 0; /* track first vector for misc interrupts, ignore return */ (void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT - 1); @@ -12130,7 +12129,6 @@ static int i40e_sw_init(struct i40e_pf *pf) goto sw_init_done; } pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp; - pf->qp_pile->search_hint = 0; pf->tx_timeout_recovery_level = 1; diff --git a/drivers/net/ethernet/intel/i40e/i40e_register.h b/drivers/net/ethernet/intel/i40e/i40e_register.h index 564df22f3f4639263b94bf7cd0374dbc7961cb85..8335f151ceefc9a0c9ee94d46fcdcfaabf7713a3 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_register.h +++ b/drivers/net/ethernet/intel/i40e/i40e_register.h @@ -279,6 +279,9 @@ #define I40E_VFINT_DYN_CTLN(_INTVF) (0x00024800 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: VFR */ #define I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT 1 #define I40E_VFINT_DYN_CTLN_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT) +#define I40E_VFINT_ICR0_ADMINQ_SHIFT 30 +#define I40E_VFINT_ICR0_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ADMINQ_SHIFT) +#define I40E_VFINT_ICR0_ENA(_VF) (0x0002C000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ #define I40E_VPINT_AEQCTL(_VF) (0x0002B800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ #define I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT 0 #define I40E_VPINT_AEQCTL_ITR_INDX_SHIFT 11 diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 65c4c4fd359fa62d636dfc27089d7508b27be79d..9181e007e0392e7150d6d01b39fc808cf74a86f7 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -1323,6 +1323,32 @@ static i40e_status i40e_config_vf_promiscuous_mode(struct i40e_vf *vf, return aq_ret; } +/** + * i40e_sync_vfr_reset + * @hw: pointer to hw struct + * @vf_id: VF identifier + * + * Before trigger hardware reset, we need to know if no other process has + * reserved the hardware for any reset operations. This check is done by + * examining the status of the RSTAT1 register used to signal the reset. + **/ +static int i40e_sync_vfr_reset(struct i40e_hw *hw, int vf_id) +{ + u32 reg; + int i; + + for (i = 0; i < I40E_VFR_WAIT_COUNT; i++) { + reg = rd32(hw, I40E_VFINT_ICR0_ENA(vf_id)) & + I40E_VFINT_ICR0_ADMINQ_MASK; + if (reg) + return 0; + + usleep_range(100, 200); + } + + return -EAGAIN; +} + /** * i40e_trigger_vf_reset * @vf: pointer to the VF structure @@ -1337,9 +1363,11 @@ static void i40e_trigger_vf_reset(struct i40e_vf *vf, bool flr) struct i40e_pf *pf = vf->pf; struct i40e_hw *hw = &pf->hw; u32 reg, reg_idx, bit_idx; + bool vf_active; + u32 radq; /* warn the VF */ - clear_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states); + vf_active = test_and_clear_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states); /* Disable VF's configuration API during reset. The flag is re-enabled * in i40e_alloc_vf_res(), when it's safe again to access VF's VSI. @@ -1353,7 +1381,19 @@ static void i40e_trigger_vf_reset(struct i40e_vf *vf, bool flr) * just need to clean up, so don't hit the VFRTRIG register. */ if (!flr) { - /* reset VF using VPGEN_VFRTRIG reg */ + /* Sync VFR reset before trigger next one */ + radq = rd32(hw, I40E_VFINT_ICR0_ENA(vf->vf_id)) & + I40E_VFINT_ICR0_ADMINQ_MASK; + if (vf_active && !radq) + /* waiting for finish reset by virtual driver */ + if (i40e_sync_vfr_reset(hw, vf->vf_id)) + dev_info(&pf->pdev->dev, + "Reset VF %d never finished\n", + vf->vf_id); + + /* Reset VF using VPGEN_VFRTRIG reg. It is also setting + * in progress state in rstat1 register. + */ reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id)); reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK; wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg); @@ -1824,19 +1864,17 @@ int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) /***********************virtual channel routines******************/ /** - * i40e_vc_send_msg_to_vf_ex + * i40e_vc_send_msg_to_vf * @vf: pointer to the VF info * @v_opcode: virtual channel opcode * @v_retval: virtual channel return value * @msg: pointer to the msg buffer * @msglen: msg length - * @is_quiet: true for not printing unsuccessful return values, false otherwise * * send msg to VF **/ -static int i40e_vc_send_msg_to_vf_ex(struct i40e_vf *vf, u32 v_opcode, - u32 v_retval, u8 *msg, u16 msglen, - bool is_quiet) +static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode, + u32 v_retval, u8 *msg, u16 msglen) { struct i40e_pf *pf; struct i40e_hw *hw; @@ -1851,25 +1889,6 @@ static int i40e_vc_send_msg_to_vf_ex(struct i40e_vf *vf, u32 v_opcode, hw = &pf->hw; abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; - /* single place to detect unsuccessful return values */ - if (v_retval && !is_quiet) { - vf->num_invalid_msgs++; - dev_info(&pf->pdev->dev, "VF %d failed opcode %d, retval: %d\n", - vf->vf_id, v_opcode, v_retval); - if (vf->num_invalid_msgs > - I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED) { - dev_err(&pf->pdev->dev, - "Number of invalid messages exceeded for VF %d\n", - vf->vf_id); - dev_err(&pf->pdev->dev, "Use PF Control I/F to enable the VF\n"); - set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states); - } - } else { - vf->num_valid_msgs++; - /* reset the invalid counter, if a valid message is received. */ - vf->num_invalid_msgs = 0; - } - aq_ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval, msg, msglen, NULL); if (aq_ret) { @@ -1882,23 +1901,6 @@ static int i40e_vc_send_msg_to_vf_ex(struct i40e_vf *vf, u32 v_opcode, return 0; } -/** - * i40e_vc_send_msg_to_vf - * @vf: pointer to the VF info - * @v_opcode: virtual channel opcode - * @v_retval: virtual channel return value - * @msg: pointer to the msg buffer - * @msglen: msg length - * - * send msg to VF - **/ -static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode, - u32 v_retval, u8 *msg, u16 msglen) -{ - return i40e_vc_send_msg_to_vf_ex(vf, v_opcode, v_retval, - msg, msglen, false); -} - /** * i40e_vc_send_resp_to_vf * @vf: pointer to the VF info @@ -2563,6 +2565,59 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg) aq_ret); } +/** + * i40e_check_enough_queue - find big enough queue number + * @vf: pointer to the VF info + * @needed: the number of items needed + * + * Returns the base item index of the queue, or negative for error + **/ +static int i40e_check_enough_queue(struct i40e_vf *vf, u16 needed) +{ + unsigned int i, cur_queues, more, pool_size; + struct i40e_lump_tracking *pile; + struct i40e_pf *pf = vf->pf; + struct i40e_vsi *vsi; + + vsi = pf->vsi[vf->lan_vsi_idx]; + cur_queues = vsi->alloc_queue_pairs; + + /* if current allocated queues are enough for need */ + if (cur_queues >= needed) + return vsi->base_queue; + + pile = pf->qp_pile; + if (cur_queues > 0) { + /* if the allocated queues are not zero + * just check if there are enough queues for more + * behind the allocated queues. + */ + more = needed - cur_queues; + for (i = vsi->base_queue + cur_queues; + i < pile->num_entries; i++) { + if (pile->list[i] & I40E_PILE_VALID_BIT) + break; + + if (more-- == 1) + /* there is enough */ + return vsi->base_queue; + } + } + + pool_size = 0; + for (i = 0; i < pile->num_entries; i++) { + if (pile->list[i] & I40E_PILE_VALID_BIT) { + pool_size = 0; + continue; + } + if (needed <= ++pool_size) + /* there is enough */ + return i; + } + + return -ENOMEM; +} + /** * i40e_vc_request_queues_msg * @vf: pointer to the VF info @@ -2597,6 +2652,12 @@ static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg) req_pairs - cur_pairs, pf->queues_left); vfres->num_queue_pairs = pf->queues_left + cur_pairs; + } else if (i40e_check_enough_queue(vf, req_pairs) < 0) { + dev_warn(&pf->pdev->dev, + "VF %d requested %d more queues, but there is not enough for it.\n", + vf->vf_id, + req_pairs - cur_pairs); + vfres->num_queue_pairs = cur_pairs; } else { /* successful request */ vf->num_req_queues = req_pairs; @@ -2660,7 +2721,6 @@ static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg) * i40e_check_vf_permission * @vf: pointer to the VF info * @al: MAC address list from virtchnl - * @is_quiet: set true for printing msg without opcode info, false otherwise * * Check that the given list of MAC addresses is allowed. Will return -EPERM * if any address in the list is not valid. Checks the following conditions: @@ -2675,15 +2735,13 @@ static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg) * addresses might not be accurate. **/ static inline int i40e_check_vf_permission(struct i40e_vf *vf, - struct virtchnl_ether_addr_list *al, - bool *is_quiet) + struct virtchnl_ether_addr_list *al) { struct i40e_pf *pf = vf->pf; struct i40e_vsi *vsi = pf->vsi[vf->lan_vsi_idx]; int mac2add_cnt = 0; int i; - *is_quiet = false; for (i = 0; i < al->num_elements; i++) { struct i40e_mac_filter *f; u8 *addr = al->list[i].addr; @@ -2707,7 +2765,6 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf, !ether_addr_equal(addr, vf->default_lan_addr.addr)) { dev_err(&pf->pdev->dev, "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n"); - *is_quiet = true; return -EPERM; } @@ -2744,7 +2801,6 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg) (struct virtchnl_ether_addr_list *)msg; struct i40e_pf *pf = vf->pf; struct i40e_vsi *vsi = NULL; - bool is_quiet = false; i40e_status ret = 0; int i; @@ -2761,7 +2817,7 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg) */ spin_lock_bh(&vsi->mac_filter_hash_lock); - ret = i40e_check_vf_permission(vf, al, &is_quiet); + ret = i40e_check_vf_permission(vf, al); if (ret) { spin_unlock_bh(&vsi->mac_filter_hash_lock); goto error_param; @@ -2799,8 +2855,8 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg) error_param: /* send the response to the VF */ - return i40e_vc_send_msg_to_vf_ex(vf, VIRTCHNL_OP_ADD_ETH_ADDR, - ret, NULL, 0, is_quiet); + return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR, + ret, NULL, 0); } /** diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h index 49575a640a84c5e6c3abeaaae05427818b06fd87..a554d0a0b09bd56fb9904defd0a390df877cfa89 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h @@ -10,8 +10,6 @@ #define I40E_VIRTCHNL_SUPPORTED_QTYPES 2 -#define I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED 10 - #define I40E_VLAN_PRIORITY_SHIFT 13 #define I40E_VLAN_MASK 0xFFF #define I40E_PRIORITY_MASK 0xE000 @@ -19,6 +17,7 @@ #define I40E_MAX_VF_PROMISC_FLAGS 3 #define I40E_VF_STATE_WAIT_COUNT 20 +#define I40E_VFR_WAIT_COUNT 100 /* Various queue ctrls */ enum i40e_queue_ctrl { @@ -91,9 +90,6 @@ struct i40e_vf { u8 num_queue_pairs; /* num of qps assigned to VF vsis */ u8 num_req_queues; /* num of requested qps */ u64 num_mdd_events; /* num of mdd events detected */ - /* num of continuous malformed or invalid msgs detected */ - u64 num_invalid_msgs; - u64 num_valid_msgs; /* num of valid msgs detected */ unsigned long vf_caps; /* vf's adv. capabilities */ unsigned long vf_states; /* vf's runtime states */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c index 86c79f71c685a5f41a118685bb792f7e943466a0..75e4a698c3db20b71140cb694cba11e031697f0d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c +++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c @@ -247,21 +247,25 @@ bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count) static struct sk_buff *i40e_construct_skb_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp) { + unsigned int totalsize = xdp->data_end - xdp->data_meta; unsigned int metasize = xdp->data - xdp->data_meta; - unsigned int datasize = xdp->data_end - xdp->data; struct sk_buff *skb; + net_prefetch(xdp->data_meta); + /* allocate a skb to store the frags */ - skb = __napi_alloc_skb(&rx_ring->q_vector->napi, - xdp->data_end - xdp->data_hard_start, + skb = __napi_alloc_skb(&rx_ring->q_vector->napi, totalsize, GFP_ATOMIC | __GFP_NOWARN); if (unlikely(!skb)) return NULL; - skb_reserve(skb, xdp->data - xdp->data_hard_start); - memcpy(__skb_put(skb, datasize), xdp->data, datasize); - if (metasize) + memcpy(__skb_put(skb, totalsize), xdp->data_meta, + ALIGN(totalsize, sizeof(long))); + + if (metasize) { skb_metadata_set(skb, metasize); + __skb_pull(skb, metasize); + } xsk_buff_free(xdp); return skb; diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h index 6766446a33f49b13b6385026585c90827b630ab8..ce1e2fb22e09283302ad55e740b145a4f4ce6fa5 100644 --- a/drivers/net/ethernet/intel/iavf/iavf.h +++ b/drivers/net/ethernet/intel/iavf/iavf.h @@ -309,6 +309,7 @@ struct iavf_adapter { struct iavf_hw hw; /* defined in iavf_type.h */ enum iavf_state_t state; + enum iavf_state_t last_state; unsigned long crit_section; struct delayed_work watchdog_task; @@ -378,6 +379,15 @@ struct iavf_device { extern char iavf_driver_name[]; extern struct workqueue_struct *iavf_wq; +static inline void iavf_change_state(struct iavf_adapter *adapter, + enum iavf_state_t state) +{ + if (adapter->state != state) { + adapter->last_state = adapter->state; + adapter->state = state; + } +} + int iavf_up(struct iavf_adapter *adapter); void iavf_down(struct iavf_adapter *adapter); int iavf_process_config(struct iavf_adapter *adapter); diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index de7794ebc7e73e732c7423e47ac99234d6217d1e..bd1fb3774769b0c301d31f2e87a572cffd7c143f 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -963,7 +963,7 @@ static void iavf_configure(struct iavf_adapter *adapter) **/ static void iavf_up_complete(struct iavf_adapter *adapter) { - adapter->state = __IAVF_RUNNING; + iavf_change_state(adapter, __IAVF_RUNNING); clear_bit(__IAVF_VSI_DOWN, adapter->vsi.state); iavf_napi_enable_all(adapter); @@ -1698,7 +1698,7 @@ static int iavf_startup(struct iavf_adapter *adapter) iavf_shutdown_adminq(hw); goto err; } - adapter->state = __IAVF_INIT_VERSION_CHECK; + iavf_change_state(adapter, __IAVF_INIT_VERSION_CHECK); err: return err; } @@ -1722,7 +1722,7 @@ static int iavf_init_version_check(struct iavf_adapter *adapter) if (!iavf_asq_done(hw)) { dev_err(&pdev->dev, "Admin queue command never completed\n"); iavf_shutdown_adminq(hw); - adapter->state = __IAVF_STARTUP; + iavf_change_state(adapter, __IAVF_STARTUP); goto err; } @@ -1745,8 +1745,7 @@ static int iavf_init_version_check(struct iavf_adapter *adapter) err); goto err; } - adapter->state = __IAVF_INIT_GET_RESOURCES; - + iavf_change_state(adapter, __IAVF_INIT_GET_RESOURCES); err: return err; } @@ -1862,7 +1861,7 @@ static int iavf_init_get_resources(struct iavf_adapter *adapter) if (netdev->features & NETIF_F_GRO) dev_info(&pdev->dev, "GRO is enabled\n"); - adapter->state = __IAVF_DOWN; + iavf_change_state(adapter, __IAVF_DOWN); set_bit(__IAVF_VSI_DOWN, adapter->vsi.state); rtnl_unlock(); @@ -1910,7 +1909,7 @@ static void iavf_watchdog_task(struct work_struct *work) goto restart_watchdog; if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) - adapter->state = __IAVF_COMM_FAILED; + iavf_change_state(adapter, __IAVF_COMM_FAILED); switch (adapter->state) { case __IAVF_COMM_FAILED: @@ -1921,7 +1920,7 @@ static void iavf_watchdog_task(struct work_struct *work) /* A chance for redemption! */ dev_err(&adapter->pdev->dev, "Hardware came out of reset. Attempting reinit.\n"); - adapter->state = __IAVF_STARTUP; + iavf_change_state(adapter, __IAVF_STARTUP); adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED; queue_delayed_work(iavf_wq, &adapter->init_task, 10); clear_bit(__IAVF_IN_CRITICAL_TASK, @@ -1971,9 +1970,10 @@ static void iavf_watchdog_task(struct work_struct *work) goto restart_watchdog; } - /* check for hw reset */ + /* check for hw reset */ reg_val = rd32(hw, IAVF_VF_ARQLEN1) & IAVF_VF_ARQLEN1_ARQENABLE_MASK; if (!reg_val) { + iavf_change_state(adapter, __IAVF_RESETTING); adapter->flags |= IAVF_FLAG_RESET_PENDING; adapter->aq_required = 0; adapter->current_op = VIRTCHNL_OP_UNKNOWN; @@ -2053,7 +2053,7 @@ static void iavf_disable_vf(struct iavf_adapter *adapter) adapter->netdev->flags &= ~IFF_UP; clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); adapter->flags &= ~IAVF_FLAG_RESET_PENDING; - adapter->state = __IAVF_DOWN; + iavf_change_state(adapter, __IAVF_DOWN); wake_up(&adapter->down_waitqueue); dev_info(&adapter->pdev->dev, "Reset task did not complete, VF disabled\n"); } @@ -2165,7 +2165,7 @@ static void iavf_reset_task(struct work_struct *work) } iavf_irq_disable(adapter); - adapter->state = __IAVF_RESETTING; + iavf_change_state(adapter, __IAVF_RESETTING); adapter->flags &= ~IAVF_FLAG_RESET_PENDING; /* free the Tx/Rx rings and descriptors, might be better to just @@ -2265,11 +2265,14 @@ static void iavf_reset_task(struct work_struct *work) iavf_configure(adapter); + /* iavf_up_complete() will switch device back + * to __IAVF_RUNNING + */ iavf_up_complete(adapter); iavf_irq_enable(adapter, true); } else { - adapter->state = __IAVF_DOWN; + iavf_change_state(adapter, __IAVF_DOWN); wake_up(&adapter->down_waitqueue); } clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section); @@ -3277,7 +3280,7 @@ static int iavf_close(struct net_device *netdev) adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_CLOSE; iavf_down(adapter); - adapter->state = __IAVF_DOWN_PENDING; + iavf_change_state(adapter, __IAVF_DOWN_PENDING); iavf_free_traffic_irqs(adapter); clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); @@ -3317,8 +3320,11 @@ static int iavf_change_mtu(struct net_device *netdev, int new_mtu) iavf_notify_client_l2_params(&adapter->vsi); adapter->flags |= IAVF_FLAG_SERVICE_CLIENT_REQUESTED; } - adapter->flags |= IAVF_FLAG_RESET_NEEDED; - queue_work(iavf_wq, &adapter->reset_task); + + if (netif_running(netdev)) { + adapter->flags |= IAVF_FLAG_RESET_NEEDED; + queue_work(iavf_wq, &adapter->reset_task); + } return 0; } @@ -3658,7 +3664,7 @@ static void iavf_init_task(struct work_struct *work) "Failed to communicate with PF; waiting before retry\n"); adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED; iavf_shutdown_adminq(hw); - adapter->state = __IAVF_STARTUP; + iavf_change_state(adapter, __IAVF_STARTUP); queue_delayed_work(iavf_wq, &adapter->init_task, HZ * 5); goto out; } @@ -3684,7 +3690,7 @@ static void iavf_shutdown(struct pci_dev *pdev) if (iavf_lock_timeout(adapter, __IAVF_IN_CRITICAL_TASK, 5000)) dev_warn(&adapter->pdev->dev, "failed to set __IAVF_IN_CRITICAL_TASK in %s\n", __FUNCTION__); /* Prevent the watchdog from running. */ - adapter->state = __IAVF_REMOVE; + iavf_change_state(adapter, __IAVF_REMOVE); adapter->aq_required = 0; clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); @@ -3757,7 +3763,7 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) hw->back = adapter; adapter->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1; - adapter->state = __IAVF_STARTUP; + iavf_change_state(adapter, __IAVF_STARTUP); /* Call save state here because it relies on the adapter struct. */ pci_save_state(pdev); @@ -3925,7 +3931,7 @@ static void iavf_remove(struct pci_dev *pdev) dev_warn(&adapter->pdev->dev, "failed to set __IAVF_IN_CRITICAL_TASK in %s\n", __FUNCTION__); /* Shut down all the garbage mashers on the detention level */ - adapter->state = __IAVF_REMOVE; + iavf_change_state(adapter, __IAVF_REMOVE); adapter->aq_required = 0; adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED; iavf_free_all_tx_resources(adapter); diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c index 8be3151f2c62b5c4d4ffd03dd1d935a4ee6fb15a..ff479bf7214433755a193951c51cad33afceefa2 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c +++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c @@ -1460,7 +1460,7 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, iavf_free_all_tx_resources(adapter); iavf_free_all_rx_resources(adapter); if (adapter->state == __IAVF_DOWN_PENDING) { - adapter->state = __IAVF_DOWN; + iavf_change_state(adapter, __IAVF_DOWN); wake_up(&adapter->down_waitqueue); } break; diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index 6a57b41ddb5450f2145a4fa16cb08c0b4522d17f..7794703c1359391edddbee6bd01a45c776f33c5b 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -498,7 +498,7 @@ static inline struct ice_pf *ice_netdev_to_pf(struct net_device *netdev) static inline bool ice_is_xdp_ena_vsi(struct ice_vsi *vsi) { - return !!vsi->xdp_prog; + return !!READ_ONCE(vsi->xdp_prog); } static inline void ice_set_ring_xdp(struct ice_ring *ring) diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h index b06fbe99d8e9316e2df848dafd1db1f1487f2af2..b6dd8f81d69979edc78c323985bb99a2a35234e5 100644 --- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h +++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h @@ -870,11 +870,11 @@ struct ice_aqc_get_phy_caps { * 01b - Report topology capabilities * 10b - Report SW configured */ -#define ICE_AQC_REPORT_MODE_S 1 -#define ICE_AQC_REPORT_MODE_M (3 << ICE_AQC_REPORT_MODE_S) -#define ICE_AQC_REPORT_NVM_CAP 0 -#define ICE_AQC_REPORT_TOPO_CAP BIT(1) -#define ICE_AQC_REPORT_SW_CFG BIT(2) +#define ICE_AQC_REPORT_MODE_S 1 +#define ICE_AQC_REPORT_MODE_M (3 << ICE_AQC_REPORT_MODE_S) +#define ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA 0 +#define ICE_AQC_REPORT_TOPO_CAP_MEDIA BIT(1) +#define ICE_AQC_REPORT_ACTIVE_CFG BIT(2) __le32 reserved1; __le32 addr_high; __le32 addr_low; diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index 64714757bd4f4f790c9d7ffb47a57086927ea9ce..ecdc467c4f6f5c048bb855efb2d6f4e159979ebd 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -193,7 +193,7 @@ ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode, ice_debug(hw, ICE_DBG_LINK, " module_type[2] = 0x%x\n", pcaps->module_type[2]); - if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP) { + if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP_MEDIA) { pi->phy.phy_type_low = le64_to_cpu(pcaps->phy_type_low); pi->phy.phy_type_high = le64_to_cpu(pcaps->phy_type_high); memcpy(pi->phy.link_info.module_type, &pcaps->module_type, @@ -924,7 +924,8 @@ enum ice_status ice_init_hw(struct ice_hw *hw) /* Initialize port_info struct with PHY capabilities */ status = ice_aq_get_phy_caps(hw->port_info, false, - ICE_AQC_REPORT_TOPO_CAP, pcaps, NULL); + ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps, + NULL); devm_kfree(ice_hw_to_dev(hw), pcaps); if (status) goto err_unroll_sched; @@ -2682,7 +2683,7 @@ enum ice_status ice_update_link_info(struct ice_port_info *pi) if (!pcaps) return ICE_ERR_NO_MEMORY; - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, + status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps, NULL); devm_kfree(ice_hw_to_dev(hw), pcaps); @@ -2842,8 +2843,8 @@ ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update) return ICE_ERR_NO_MEMORY; /* Get the current PHY config */ - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps, - NULL); + status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, + pcaps, NULL); if (status) { *aq_failures = ICE_SET_FC_AQ_FAIL_GET; goto out; @@ -2989,7 +2990,7 @@ ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, if (!pcaps) return ICE_ERR_NO_MEMORY; - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, pcaps, + status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps, NULL); if (status) goto out; @@ -3032,7 +3033,8 @@ ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(pi->hw)) { struct ice_link_default_override_tlv tlv; - if (ice_get_link_default_override(&tlv, pi)) + status = ice_get_link_default_override(&tlv, pi); + if (status) goto out; if (!(tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE) && diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index 0c596b67b68996d32076335184c673ebe2bc7719..57fe21c23cb134adddec73884ab9ab4b07ba0834 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -1081,7 +1081,7 @@ ice_get_fecparam(struct net_device *netdev, struct ethtool_fecparam *fecparam) if (!caps) return -ENOMEM; - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, + status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, caps, NULL); if (status) { err = -EAGAIN; @@ -1976,7 +1976,7 @@ ice_get_link_ksettings(struct net_device *netdev, return -ENOMEM; status = ice_aq_get_phy_caps(vsi->port_info, false, - ICE_AQC_REPORT_SW_CFG, caps, NULL); + ICE_AQC_REPORT_ACTIVE_CFG, caps, NULL); if (status) { err = -EIO; goto done; @@ -2013,7 +2013,7 @@ ice_get_link_ksettings(struct net_device *netdev, ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS); status = ice_aq_get_phy_caps(vsi->port_info, false, - ICE_AQC_REPORT_TOPO_CAP, caps, NULL); + ICE_AQC_REPORT_TOPO_CAP_MEDIA, caps, NULL); if (status) { err = -EIO; goto done; @@ -2187,12 +2187,12 @@ ice_set_link_ksettings(struct net_device *netdev, { struct ice_netdev_priv *np = netdev_priv(netdev); struct ethtool_link_ksettings safe_ks, copy_ks; - struct ice_aqc_get_phy_caps_data *abilities; u8 autoneg, timeout = TEST_SET_BITS_TIMEOUT; - u16 adv_link_speed, curr_link_speed, idx; + struct ice_aqc_get_phy_caps_data *phy_caps; struct ice_aqc_set_phy_cfg_data config; + u16 adv_link_speed, curr_link_speed; struct ice_pf *pf = np->vsi->back; - struct ice_port_info *p; + struct ice_port_info *pi; u8 autoneg_changed = 0; enum ice_status status; u64 phy_type_high = 0; @@ -2200,33 +2200,25 @@ ice_set_link_ksettings(struct net_device *netdev, int err = 0; bool linkup; - p = np->vsi->port_info; + pi = np->vsi->port_info; - if (!p) + if (!pi) return -EOPNOTSUPP; - /* Check if this is LAN VSI */ - ice_for_each_vsi(pf, idx) - if (pf->vsi[idx]->type == ICE_VSI_PF) { - if (np->vsi != pf->vsi[idx]) - return -EOPNOTSUPP; - break; - } - - if (p->phy.media_type != ICE_MEDIA_BASET && - p->phy.media_type != ICE_MEDIA_FIBER && - p->phy.media_type != ICE_MEDIA_BACKPLANE && - p->phy.media_type != ICE_MEDIA_DA && - p->phy.link_info.link_info & ICE_AQ_LINK_UP) + if (pi->phy.media_type != ICE_MEDIA_BASET && + pi->phy.media_type != ICE_MEDIA_FIBER && + pi->phy.media_type != ICE_MEDIA_BACKPLANE && + pi->phy.media_type != ICE_MEDIA_DA && + pi->phy.link_info.link_info & ICE_AQ_LINK_UP) return -EOPNOTSUPP; - abilities = kzalloc(sizeof(*abilities), GFP_KERNEL); - if (!abilities) + phy_caps = kzalloc(sizeof(*phy_caps), GFP_KERNEL); + if (!phy_caps) return -ENOMEM; /* Get the PHY capabilities based on media */ - status = ice_aq_get_phy_caps(p, false, ICE_AQC_REPORT_TOPO_CAP, - abilities, NULL); + status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, + phy_caps, NULL); if (status) { err = -EAGAIN; goto done; @@ -2288,26 +2280,26 @@ ice_set_link_ksettings(struct net_device *netdev, * configuration is initialized during probe from PHY capabilities * software mode, and updated on set PHY configuration. */ - memcpy(&config, &p->phy.curr_user_phy_cfg, sizeof(config)); + memcpy(&config, &pi->phy.curr_user_phy_cfg, sizeof(config)); config.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; /* Check autoneg */ - err = ice_setup_autoneg(p, &safe_ks, &config, autoneg, &autoneg_changed, + err = ice_setup_autoneg(pi, &safe_ks, &config, autoneg, &autoneg_changed, netdev); if (err) goto done; /* Call to get the current link speed */ - p->phy.get_link_info = true; - status = ice_get_link_status(p, &linkup); + pi->phy.get_link_info = true; + status = ice_get_link_status(pi, &linkup); if (status) { err = -EAGAIN; goto done; } - curr_link_speed = p->phy.link_info.link_speed; + curr_link_speed = pi->phy.curr_user_speed_req; adv_link_speed = ice_ksettings_find_adv_link_speed(ks); /* If speed didn't get set, set it to what it currently is. @@ -2326,7 +2318,7 @@ ice_set_link_ksettings(struct net_device *netdev, } /* save the requested speeds */ - p->phy.link_info.req_speeds = adv_link_speed; + pi->phy.link_info.req_speeds = adv_link_speed; /* set link and auto negotiation so changes take effect */ config.caps |= ICE_AQ_PHY_ENA_LINK; @@ -2342,9 +2334,9 @@ ice_set_link_ksettings(struct net_device *netdev, * for set PHY configuration */ config.phy_type_high = cpu_to_le64(phy_type_high) & - abilities->phy_type_high; + phy_caps->phy_type_high; config.phy_type_low = cpu_to_le64(phy_type_low) & - abilities->phy_type_low; + phy_caps->phy_type_low; if (!(config.phy_type_high || config.phy_type_low)) { /* If there is no intersection and lenient mode is enabled, then @@ -2364,7 +2356,7 @@ ice_set_link_ksettings(struct net_device *netdev, } /* If link is up put link down */ - if (p->phy.link_info.link_info & ICE_AQ_LINK_UP) { + if (pi->phy.link_info.link_info & ICE_AQ_LINK_UP) { /* Tell the OS link is going down, the link will go * back up when fw says it is ready asynchronously */ @@ -2374,7 +2366,7 @@ ice_set_link_ksettings(struct net_device *netdev, } /* make the aq call */ - status = ice_aq_set_phy_cfg(&pf->hw, p, &config, NULL); + status = ice_aq_set_phy_cfg(&pf->hw, pi, &config, NULL); if (status) { netdev_info(netdev, "Set phy config failed,\n"); err = -EAGAIN; @@ -2382,9 +2374,9 @@ ice_set_link_ksettings(struct net_device *netdev, } /* Save speed request */ - p->phy.curr_user_speed_req = adv_link_speed; + pi->phy.curr_user_speed_req = adv_link_speed; done: - kfree(abilities); + kfree(phy_caps); clear_bit(__ICE_CFG_BUSY, pf->state); return err; @@ -2958,7 +2950,7 @@ ice_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) return; /* Get current PHY config */ - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps, + status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps, NULL); if (status) goto out; @@ -3025,7 +3017,7 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) return -ENOMEM; /* Get current PHY config */ - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps, + status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps, NULL); if (status) { kfree(pcaps); diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h index c0ee0541e53fc14c92ef083b6accea03bc156d49..847e1ef8e10641a13c835eca849a4b7a4f1af679 100644 --- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h +++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h @@ -507,6 +507,7 @@ struct ice_tx_ctx_desc { (0x3FFFFULL << ICE_TXD_CTX_QW1_TSO_LEN_S) #define ICE_TXD_CTX_QW1_MSS_S 50 +#define ICE_TXD_CTX_MIN_MSS 64 enum ice_tx_ctx_desc_cmd_bits { ICE_TX_CTX_DESC_TSO = 0x01, diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 52ac6cc08e83e73acb646e5828e3689428ae509f..ea8d868c8f30a16778b91dacd83ff8f3a81f6a56 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -1265,6 +1265,7 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi) ring->vsi = vsi; ring->dev = dev; ring->count = vsi->num_tx_desc; + ring->txq_teid = ICE_INVAL_TEID; WRITE_ONCE(vsi->tx_rings[i], ring); } @@ -2667,6 +2668,8 @@ int ice_vsi_release(struct ice_vsi *vsi) } } + if (ice_is_vsi_dflt_vsi(pf->first_sw, vsi)) + ice_clear_dflt_vsi(pf->first_sw); ice_fltr_remove_all(vsi); ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx); ice_vsi_delete(vsi); diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 4c7d1720113a0a939061f7a959b05ce7ed845ad6..eb0625b52e4530c149ba109ecf6136fc46cb3b0a 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -726,7 +726,7 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup) } status = ice_aq_get_phy_caps(vsi->port_info, false, - ICE_AQC_REPORT_SW_CFG, caps, NULL); + ICE_AQC_REPORT_ACTIVE_CFG, caps, NULL); if (status) netdev_info(vsi->netdev, "Get phy capability failed.\n"); @@ -1602,7 +1602,9 @@ static void ice_handle_mdd_event(struct ice_pf *pf) * reset, so print the event prior to reset. */ ice_print_vf_rx_mdd_event(vf); + mutex_lock(&pf->vf[i].cfg_lock); ice_reset_vf(&pf->vf[i], false); + mutex_unlock(&pf->vf[i].cfg_lock); } } } @@ -1643,7 +1645,7 @@ static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up) if (!pcaps) return -ENOMEM; - retcode = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps, + retcode = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps, NULL); if (retcode) { dev_err(dev, "Failed to get phy capabilities, VSI %d error %d\n", @@ -1703,7 +1705,7 @@ static int ice_init_nvm_phy_type(struct ice_port_info *pi) if (!pcaps) return -ENOMEM; - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_NVM_CAP, pcaps, + status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA, pcaps, NULL); if (status) { @@ -1819,7 +1821,7 @@ static int ice_init_phy_user_cfg(struct ice_port_info *pi) if (!pcaps) return -ENOMEM; - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, pcaps, + status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps, NULL); if (status) { dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n"); @@ -1898,7 +1900,7 @@ static int ice_configure_phy(struct ice_vsi *vsi) return -ENOMEM; /* Get current PHY config */ - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps, + status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps, NULL); if (status) { dev_err(dev, "Failed to get PHY configuration, VSI %d error %s\n", @@ -1916,7 +1918,7 @@ static int ice_configure_phy(struct ice_vsi *vsi) /* Use PHY topology as baseline for configuration */ memset(pcaps, 0, sizeof(*pcaps)); - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, pcaps, + status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps, NULL); if (status) { dev_err(dev, "Failed to get PHY topology, VSI %d error %s\n", @@ -2473,8 +2475,10 @@ int ice_destroy_xdp_rings(struct ice_vsi *vsi) for (i = 0; i < vsi->num_xdp_txq; i++) if (vsi->xdp_rings[i]) { - if (vsi->xdp_rings[i]->desc) + if (vsi->xdp_rings[i]->desc) { + synchronize_rcu(); ice_free_tx_ring(vsi->xdp_rings[i]); + } kfree_rcu(vsi->xdp_rings[i], rcu); vsi->xdp_rings[i] = NULL; } @@ -6787,6 +6791,7 @@ ice_features_check(struct sk_buff *skb, struct net_device __always_unused *netdev, netdev_features_t features) { + bool gso = skb_is_gso(skb); size_t len; /* No point in doing any of this if neither checksum nor GSO are @@ -6799,24 +6804,32 @@ ice_features_check(struct sk_buff *skb, /* We cannot support GSO if the MSS is going to be less than * 64 bytes. If it is then we need to drop support for GSO. */ - if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64)) + if (gso && (skb_shinfo(skb)->gso_size < ICE_TXD_CTX_MIN_MSS)) features &= ~NETIF_F_GSO_MASK; - len = skb_network_header(skb) - skb->data; + len = skb_network_offset(skb); if (len > ICE_TXD_MACLEN_MAX || len & 0x1) goto out_rm_features; - len = skb_transport_header(skb) - skb_network_header(skb); + len = skb_network_header_len(skb); if (len > ICE_TXD_IPLEN_MAX || len & 0x1) goto out_rm_features; if (skb->encapsulation) { - len = skb_inner_network_header(skb) - skb_transport_header(skb); - if (len > ICE_TXD_L4LEN_MAX || len & 0x1) - goto out_rm_features; + /* this must work for VXLAN frames AND IPIP/SIT frames, and in + * the case of IPIP frames, the transport header pointer is + * after the inner header! So check to make sure that this + * is a GRE or UDP_TUNNEL frame before doing that math. + */ + if (gso && (skb_shinfo(skb)->gso_type & + (SKB_GSO_GRE | SKB_GSO_UDP_TUNNEL))) { + len = skb_inner_network_header(skb) - + skb_transport_header(skb); + if (len > ICE_TXD_L4LEN_MAX || len & 0x1) + goto out_rm_features; + } - len = skb_inner_transport_header(skb) - - skb_inner_network_header(skb); + len = skb_inner_network_header_len(skb); if (len > ICE_TXD_IPLEN_MAX || len & 0x1) goto out_rm_features; } diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c index 69ce5d60a8570334f0e9f61261618af363a3822e..a980d337861dedc387cc5a263b906c897b94c1c6 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c @@ -360,20 +360,26 @@ void ice_free_vfs(struct ice_pf *pf) else dev_warn(dev, "VFs are assigned - not disabling SR-IOV\n"); - /* Avoid wait time by stopping all VFs at the same time */ - ice_for_each_vf(pf, i) - ice_dis_vf_qs(&pf->vf[i]); - tmp = pf->num_alloc_vfs; pf->num_qps_per_vf = 0; pf->num_alloc_vfs = 0; for (i = 0; i < tmp; i++) { - if (test_bit(ICE_VF_STATE_INIT, pf->vf[i].vf_states)) { + struct ice_vf *vf = &pf->vf[i]; + + mutex_lock(&vf->cfg_lock); + + ice_dis_vf_qs(vf); + + if (test_bit(ICE_VF_STATE_INIT, vf->vf_states)) { /* disable VF qp mappings and set VF disable state */ - ice_dis_vf_mappings(&pf->vf[i]); - set_bit(ICE_VF_STATE_DIS, pf->vf[i].vf_states); - ice_free_vf_res(&pf->vf[i]); + ice_dis_vf_mappings(vf); + set_bit(ICE_VF_STATE_DIS, vf->vf_states); + ice_free_vf_res(vf); } + + mutex_unlock(&vf->cfg_lock); + + mutex_destroy(&vf->cfg_lock); } if (ice_sriov_free_msix_res(pf)) @@ -1221,9 +1227,13 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr) ice_for_each_vf(pf, v) { vf = &pf->vf[v]; + mutex_lock(&vf->cfg_lock); + ice_vf_pre_vsi_rebuild(vf); ice_vf_rebuild_vsi(vf); ice_vf_post_vsi_rebuild(vf); + + mutex_unlock(&vf->cfg_lock); } ice_flush(hw); @@ -1270,6 +1280,8 @@ bool ice_reset_vf(struct ice_vf *vf, bool is_vflr) u32 reg; int i; + lockdep_assert_held(&vf->cfg_lock); + dev = ice_pf_to_dev(pf); if (test_bit(__ICE_VF_RESETS_DISABLED, pf->state)) { @@ -1518,6 +1530,8 @@ static void ice_set_dflt_settings_vfs(struct ice_pf *pf) set_bit(ICE_VIRTCHNL_VF_CAP_L2, &vf->vf_caps); vf->spoofchk = true; vf->num_vf_qs = pf->num_qps_per_vf; + + mutex_init(&vf->cfg_lock); } } @@ -1721,9 +1735,12 @@ void ice_process_vflr_event(struct ice_pf *pf) bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32; /* read GLGEN_VFLRSTAT register to find out the flr VFs */ reg = rd32(hw, GLGEN_VFLRSTAT(reg_idx)); - if (reg & BIT(bit_idx)) + if (reg & BIT(bit_idx)) { /* GLGEN_VFLRSTAT bit will be cleared in ice_reset_vf */ + mutex_lock(&vf->cfg_lock); ice_reset_vf(vf, true); + mutex_unlock(&vf->cfg_lock); + } } } @@ -1800,7 +1817,9 @@ ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event) if (!vf) return; + mutex_lock(&vf->cfg_lock); ice_vc_reset_vf(vf); + mutex_unlock(&vf->cfg_lock); } /** @@ -1830,24 +1849,6 @@ ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode, dev = ice_pf_to_dev(pf); - /* single place to detect unsuccessful return values */ - if (v_retval) { - vf->num_inval_msgs++; - dev_info(dev, "VF %d failed opcode %d, retval: %d\n", vf->vf_id, - v_opcode, v_retval); - if (vf->num_inval_msgs > ICE_DFLT_NUM_INVAL_MSGS_ALLOWED) { - dev_err(dev, "Number of invalid messages exceeded for VF %d\n", - vf->vf_id); - dev_err(dev, "Use PF Control I/F to enable the VF\n"); - set_bit(ICE_VF_STATE_DIS, vf->vf_states); - return -EIO; - } - } else { - vf->num_valid_msgs++; - /* reset the invalid counter, if a valid message is received. */ - vf->num_inval_msgs = 0; - } - aq_ret = ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, v_opcode, v_retval, msg, msglen, NULL); if (aq_ret && pf->hw.mailboxq.sq_last_status != ICE_AQ_RC_ENOSYS) { @@ -2722,9 +2723,9 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg) goto error_param; } - /* Skip queue if not enabled */ if (!test_bit(vf_q_id, vf->txq_ena)) - continue; + dev_dbg(ice_pf_to_dev(vsi->back), "Queue %u on VSI %u is not enabled, but stopping it anyway\n", + vf_q_id, vsi->vsi_num); ice_fill_txq_meta(vsi, ring, &txq_meta); @@ -3345,6 +3346,8 @@ ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos, return 0; } + mutex_lock(&vf->cfg_lock); + vf->port_vlan_info = vlanprio; if (vf->port_vlan_info) @@ -3354,6 +3357,7 @@ ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos, dev_info(dev, "Clearing port VLAN on VF %d\n", vf_id); ice_vc_reset_vf(vf); + mutex_unlock(&vf->cfg_lock); return 0; } @@ -3719,6 +3723,15 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event) return; } + /* VF is being configured in another context that triggers a VFR, so no + * need to process this message + */ + if (!mutex_trylock(&vf->cfg_lock)) { + dev_info(dev, "VF %u is being configured in another context that will trigger a VFR, so there is no need to handle this message\n", + vf->vf_id); + return; + } + switch (v_opcode) { case VIRTCHNL_OP_VERSION: err = ice_vc_get_ver_msg(vf, msg); @@ -3795,6 +3808,8 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event) dev_info(dev, "PF failed to honor VF %d, opcode %d, error %d\n", vf_id, v_opcode, err); } + + mutex_unlock(&vf->cfg_lock); } /** @@ -3909,6 +3924,8 @@ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) return -EINVAL; } + mutex_lock(&vf->cfg_lock); + /* VF is notified of its new MAC via the PF's response to the * VIRTCHNL_OP_GET_VF_RESOURCES message after the VF has been reset */ @@ -3926,6 +3943,7 @@ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) } ice_vc_reset_vf(vf); + mutex_unlock(&vf->cfg_lock); return 0; } @@ -3955,11 +3973,15 @@ int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted) if (trusted == vf->trusted) return 0; + mutex_lock(&vf->cfg_lock); + vf->trusted = trusted; ice_vc_reset_vf(vf); dev_info(ice_pf_to_dev(pf), "VF %u is now %strusted\n", vf_id, trusted ? "" : "un"); + mutex_unlock(&vf->cfg_lock); + return 0; } diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h index 0f519fba3770d3d3b4f250b69158c53f1170293b..d2e935c678a147e14037ea451a4b5f0e020c0640 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h @@ -13,7 +13,6 @@ #define ICE_MAX_MACADDR_PER_VF 18 /* Malicious Driver Detection */ -#define ICE_DFLT_NUM_INVAL_MSGS_ALLOWED 10 #define ICE_MDD_EVENTS_THRESHOLD 30 /* Static VF transaction/status register def */ @@ -68,6 +67,11 @@ struct ice_mdd_vf_events { struct ice_vf { struct ice_pf *pf; + /* Used during virtchnl message handling and NDO ops against the VF + * that will trigger a VFR + */ + struct mutex cfg_lock; + u16 vf_id; /* VF ID in the PF space */ u16 lan_vsi_idx; /* index into PF struct */ /* first vector index of this VF in the PF space */ @@ -92,8 +96,6 @@ struct ice_vf { unsigned int tx_rate; /* Tx bandwidth limit in Mbps */ DECLARE_BITMAP(vf_states, ICE_VF_STATES_NBITS); /* VF runtime states */ - u64 num_inval_msgs; /* number of continuous invalid msgs */ - u64 num_valid_msgs; /* number of valid msgs detected */ unsigned long vf_caps; /* VF's adv. capabilities */ u8 num_req_qs; /* num of queue pairs requested by VF */ u16 num_mac; diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c index 9f36f8d7a9854faa97504dd023f77991eb016606..5733526fa245c9ec625180e9f9b30f9f5b4d95bd 100644 --- a/drivers/net/ethernet/intel/ice/ice_xsk.c +++ b/drivers/net/ethernet/intel/ice/ice_xsk.c @@ -36,8 +36,10 @@ static void ice_qp_reset_stats(struct ice_vsi *vsi, u16 q_idx) static void ice_qp_clean_rings(struct ice_vsi *vsi, u16 q_idx) { ice_clean_tx_ring(vsi->tx_rings[q_idx]); - if (ice_is_xdp_ena_vsi(vsi)) + if (ice_is_xdp_ena_vsi(vsi)) { + synchronize_rcu(); ice_clean_tx_ring(vsi->xdp_rings[q_idx]); + } ice_clean_rx_ring(vsi->rx_rings[q_idx]); } diff --git a/drivers/net/ethernet/intel/igc/igc_phy.c b/drivers/net/ethernet/intel/igc/igc_phy.c index 8e1799508edc42438a128c38be2f62cf8eeae083..e380b7a3ea63b19468d976ac56a4ff29116b5bbb 100644 --- a/drivers/net/ethernet/intel/igc/igc_phy.c +++ b/drivers/net/ethernet/intel/igc/igc_phy.c @@ -748,8 +748,6 @@ s32 igc_write_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 data) if (ret_val) return ret_val; ret_val = igc_write_phy_reg_mdic(hw, offset, data); - if (ret_val) - return ret_val; hw->phy.ops.release(hw); } else { ret_val = igc_write_xmdio_reg(hw, (u16)offset, dev_addr, @@ -781,8 +779,6 @@ s32 igc_read_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 *data) if (ret_val) return ret_val; ret_val = igc_read_phy_reg_mdic(hw, offset, data); - if (ret_val) - return ret_val; hw->phy.ops.release(hw); } else { ret_val = igc_read_xmdio_reg(hw, (u16)offset, dev_addr, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index de0fc6ecf491fbfc59bec67b8c24485426e3460f..eaa992e7c591c2079f2399f8738dbf6a922487e7 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -177,11 +177,14 @@ struct vf_data_storage { u16 pf_vlan; /* When set, guest VLAN config not allowed. */ u16 pf_qos; u16 tx_rate; + int link_enable; + int link_state; u8 spoofchk_enabled; bool rss_query_enabled; u8 trusted; int xcast_mode; unsigned int vf_api; + u8 primary_abort_count; }; enum ixgbevf_xcast_modes { @@ -552,6 +555,8 @@ struct ixgbe_mac_addr { #define IXGBE_TRY_LINK_TIMEOUT (4 * HZ) #define IXGBE_SFP_POLL_JIFFIES (2 * HZ) /* SFP poll every 2 seconds */ +#define IXGBE_PRIMARY_ABORT_LIMIT 5 + /* board specific private data structure */ struct ixgbe_adapter { unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; @@ -610,6 +615,7 @@ struct ixgbe_adapter { #define IXGBE_FLAG2_RX_LEGACY BIT(16) #define IXGBE_FLAG2_IPSEC_ENABLED BIT(17) #define IXGBE_FLAG2_VF_IPSEC_ENABLED BIT(18) +#define IXGBE_FLAG2_AUTO_DISABLE_VF BIT(19) /* Tx fast path data */ int num_tx_queues; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index 089e1b64b59a0c6a97bb03f95d22a23d3f19643c..294c59ab35c563b1062f1295f1ae511a13cebf01 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -138,6 +138,8 @@ static const char ixgbe_priv_flags_strings[][ETH_GSTRING_LEN] = { "legacy-rx", #define IXGBE_PRIV_FLAGS_VF_IPSEC_EN BIT(1) "vf-ipsec", +#define IXGBE_PRIV_FLAGS_AUTO_DISABLE_VF BIT(2) + "mdd-disable-vf", }; #define IXGBE_PRIV_FLAGS_STR_LEN ARRAY_SIZE(ixgbe_priv_flags_strings) @@ -3523,6 +3525,9 @@ static u32 ixgbe_get_priv_flags(struct net_device *netdev) if (adapter->flags2 & IXGBE_FLAG2_VF_IPSEC_ENABLED) priv_flags |= IXGBE_PRIV_FLAGS_VF_IPSEC_EN; + if (adapter->flags2 & IXGBE_FLAG2_AUTO_DISABLE_VF) + priv_flags |= IXGBE_PRIV_FLAGS_AUTO_DISABLE_VF; + return priv_flags; } @@ -3530,6 +3535,7 @@ static int ixgbe_set_priv_flags(struct net_device *netdev, u32 priv_flags) { struct ixgbe_adapter *adapter = netdev_priv(netdev); unsigned int flags2 = adapter->flags2; + unsigned int i; flags2 &= ~IXGBE_FLAG2_RX_LEGACY; if (priv_flags & IXGBE_PRIV_FLAGS_LEGACY_RX) @@ -3539,6 +3545,21 @@ static int ixgbe_set_priv_flags(struct net_device *netdev, u32 priv_flags) if (priv_flags & IXGBE_PRIV_FLAGS_VF_IPSEC_EN) flags2 |= IXGBE_FLAG2_VF_IPSEC_ENABLED; + flags2 &= ~IXGBE_FLAG2_AUTO_DISABLE_VF; + if (priv_flags & IXGBE_PRIV_FLAGS_AUTO_DISABLE_VF) { + if (adapter->hw.mac.type == ixgbe_mac_82599EB) { + /* Reset primary abort counter */ + for (i = 0; i < adapter->num_vfs; i++) + adapter->vfinfo[i].primary_abort_count = 0; + + flags2 |= IXGBE_FLAG2_AUTO_DISABLE_VF; + } else { + e_info(probe, + "Cannot set private flags: Operation not supported\n"); + return -EOPNOTSUPP; + } + } + if (flags2 != adapter->flags2) { adapter->flags2 = flags2; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 0109b47b2fe66edeb8c6fee12985afab83aaa5b9..74ee496e40319015ff6724e1e676a2052942aca2 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -5683,6 +5683,9 @@ static void ixgbe_up_complete(struct ixgbe_adapter *adapter) ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD; IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); + + /* update setting rx tx for all active vfs */ + ixgbe_set_all_vfs(adapter); } void ixgbe_reinit_locked(struct ixgbe_adapter *adapter) @@ -6140,11 +6143,8 @@ void ixgbe_down(struct ixgbe_adapter *adapter) for (i = 0 ; i < adapter->num_vfs; i++) adapter->vfinfo[i].clear_to_send = false; - /* ping all the active vfs to let them know we are going down */ - ixgbe_ping_all_vfs(adapter); - - /* Disable all VFTE/VFRE TX/RX */ - ixgbe_disable_tx_rx(adapter); + /* update setting rx tx for all active vfs */ + ixgbe_set_all_vfs(adapter); } /* disable transmits in the hardware now that interrupts are off */ @@ -7602,6 +7602,27 @@ static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter *adapter) } #ifdef CONFIG_PCI_IOV +static void ixgbe_bad_vf_abort(struct ixgbe_adapter *adapter, u32 vf) +{ + struct ixgbe_hw *hw = &adapter->hw; + + if (adapter->hw.mac.type == ixgbe_mac_82599EB && + adapter->flags2 & IXGBE_FLAG2_AUTO_DISABLE_VF) { + adapter->vfinfo[vf].primary_abort_count++; + if (adapter->vfinfo[vf].primary_abort_count == + IXGBE_PRIMARY_ABORT_LIMIT) { + ixgbe_set_vf_link_state(adapter, vf, + IFLA_VF_LINK_STATE_DISABLE); + adapter->vfinfo[vf].primary_abort_count = 0; + + e_info(drv, + "Malicious Driver Detection event detected on PF %d VF %d MAC: %pM mdd-disable-vf=on", + hw->bus.func, vf, + adapter->vfinfo[vf].vf_mac_addresses); + } + } +} + static void ixgbe_check_for_bad_vf(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; @@ -7633,8 +7654,10 @@ static void ixgbe_check_for_bad_vf(struct ixgbe_adapter *adapter) continue; pci_read_config_word(vfdev, PCI_STATUS, &status_reg); if (status_reg != IXGBE_FAILED_READ_CFG_WORD && - status_reg & PCI_STATUS_REC_MASTER_ABORT) + status_reg & PCI_STATUS_REC_MASTER_ABORT) { + ixgbe_bad_vf_abort(adapter, vf); pcie_flr(vfdev); + } } } @@ -10255,6 +10278,7 @@ static const struct net_device_ops ixgbe_netdev_ops = { .ndo_set_vf_vlan = ixgbe_ndo_set_vf_vlan, .ndo_set_vf_rate = ixgbe_ndo_set_vf_bw, .ndo_set_vf_spoofchk = ixgbe_ndo_set_vf_spoofchk, + .ndo_set_vf_link_state = ixgbe_ndo_set_vf_link_state, .ndo_set_vf_rss_query_en = ixgbe_ndo_set_vf_rss_query_en, .ndo_set_vf_trust = ixgbe_ndo_set_vf_trust, .ndo_get_vf_config = ixgbe_ndo_get_vf_config, @@ -10723,6 +10747,9 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (err) goto err_sw_init; + if (adapter->hw.mac.type == ixgbe_mac_82599EB) + adapter->flags2 |= IXGBE_FLAG2_AUTO_DISABLE_VF; + switch (adapter->hw.mac.type) { case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h index a148534d7256dbb5dbb5942bc58cfc4ae4412e58..8f4316b19278cefdc1a23c9801c7c7003076fabc 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h @@ -85,6 +85,8 @@ enum ixgbe_pfvf_api_rev { #define IXGBE_VF_IPSEC_ADD 0x0d #define IXGBE_VF_IPSEC_DEL 0x0e +#define IXGBE_VF_GET_LINK_STATE 0x10 /* get vf link state */ + /* length of permanent address message returned from PF */ #define IXGBE_VF_PERMADDR_MSG_LEN 4 /* word in permanent address message with the current multicast type */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index 214a38de3f415012600bccb0af01a20e6f30c9f1..7f11c0a8e7a91de4d1893bdbc15c027f80d70c44 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -96,6 +96,7 @@ static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter, for (i = 0; i < num_vfs; i++) { /* enable spoof checking for all VFs */ adapter->vfinfo[i].spoofchk_enabled = true; + adapter->vfinfo[i].link_enable = true; /* We support VF RSS querying only for 82599 and x540 * devices at the moment. These devices share RSS @@ -820,6 +821,57 @@ static inline void ixgbe_write_qde(struct ixgbe_adapter *adapter, u32 vf, } } +/** + * ixgbe_set_vf_rx_tx - Set VF rx tx + * @adapter: Pointer to adapter struct + * @vf: VF identifier + * + * Set or reset correct transmit and receive for vf + **/ +static void ixgbe_set_vf_rx_tx(struct ixgbe_adapter *adapter, int vf) +{ + u32 reg_cur_tx, reg_cur_rx, reg_req_tx, reg_req_rx; + struct ixgbe_hw *hw = &adapter->hw; + u32 reg_offset, vf_shift; + + vf_shift = vf % 32; + reg_offset = vf / 32; + + reg_cur_tx = IXGBE_READ_REG(hw, IXGBE_VFTE(reg_offset)); + reg_cur_rx = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset)); + + if (adapter->vfinfo[vf].link_enable) { + reg_req_tx = reg_cur_tx | 1 << vf_shift; + reg_req_rx = reg_cur_rx | 1 << vf_shift; + } else { + reg_req_tx = reg_cur_tx & ~(1 << vf_shift); + reg_req_rx = reg_cur_rx & ~(1 << vf_shift); + } + + /* The 82599 cannot support a mix of jumbo and non-jumbo PF/VFs. + * For more info take a look at ixgbe_set_vf_lpe + */ + if (adapter->hw.mac.type == ixgbe_mac_82599EB) { + struct net_device *dev = adapter->netdev; + int pf_max_frame = dev->mtu + ETH_HLEN; + +#if IS_ENABLED(CONFIG_FCOE) + if (dev->features & NETIF_F_FCOE_MTU) + pf_max_frame = max_t(int, pf_max_frame, + IXGBE_FCOE_JUMBO_FRAME_SIZE); +#endif /* CONFIG_FCOE */ + + if (pf_max_frame > ETH_FRAME_LEN) + reg_req_rx = reg_cur_rx & ~(1 << vf_shift); + } + + /* Enable/Disable particular VF */ + if (reg_cur_tx != reg_req_tx) + IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg_req_tx); + if (reg_cur_rx != reg_req_rx) + IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg_req_rx); +} + static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf) { struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; @@ -845,11 +897,6 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf) vf_shift = vf % 32; reg_offset = vf / 32; - /* enable transmit for vf */ - reg = IXGBE_READ_REG(hw, IXGBE_VFTE(reg_offset)); - reg |= BIT(vf_shift); - IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg); - /* force drop enable for all VF Rx queues */ reg = IXGBE_QDE_ENABLE; if (adapter->vfinfo[vf].pf_vlan) @@ -857,27 +904,7 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf) ixgbe_write_qde(adapter, vf, reg); - /* enable receive for vf */ - reg = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset)); - reg |= BIT(vf_shift); - /* - * The 82599 cannot support a mix of jumbo and non-jumbo PF/VFs. - * For more info take a look at ixgbe_set_vf_lpe - */ - if (adapter->hw.mac.type == ixgbe_mac_82599EB) { - struct net_device *dev = adapter->netdev; - int pf_max_frame = dev->mtu + ETH_HLEN; - -#ifdef CONFIG_FCOE - if (dev->features & NETIF_F_FCOE_MTU) - pf_max_frame = max_t(int, pf_max_frame, - IXGBE_FCOE_JUMBO_FRAME_SIZE); - -#endif /* CONFIG_FCOE */ - if (pf_max_frame > ETH_FRAME_LEN) - reg &= ~BIT(vf_shift); - } - IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg); + ixgbe_set_vf_rx_tx(adapter, vf); /* enable VF mailbox for further messages */ adapter->vfinfo[vf].clear_to_send = true; @@ -1202,6 +1229,26 @@ static int ixgbe_update_vf_xcast_mode(struct ixgbe_adapter *adapter, return 0; } +static int ixgbe_get_vf_link_state(struct ixgbe_adapter *adapter, + u32 *msgbuf, u32 vf) +{ + u32 *link_state = &msgbuf[1]; + + /* verify the PF is supporting the correct API */ + switch (adapter->vfinfo[vf].vf_api) { + case ixgbe_mbox_api_12: + case ixgbe_mbox_api_13: + case ixgbe_mbox_api_14: + break; + default: + return -EOPNOTSUPP; + } + + *link_state = adapter->vfinfo[vf].link_enable; + + return 0; +} + static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf) { u32 mbx_size = IXGBE_VFMAILBOX_SIZE; @@ -1267,6 +1314,9 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf) case IXGBE_VF_UPDATE_XCAST_MODE: retval = ixgbe_update_vf_xcast_mode(adapter, msgbuf, vf); break; + case IXGBE_VF_GET_LINK_STATE: + retval = ixgbe_get_vf_link_state(adapter, msgbuf, vf); + break; case IXGBE_VF_IPSEC_ADD: retval = ixgbe_ipsec_vf_add_sa(adapter, msgbuf, vf); break; @@ -1322,18 +1372,6 @@ void ixgbe_msg_task(struct ixgbe_adapter *adapter) } } -void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - - /* disable transmit and receive for all vfs */ - IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), 0); - IXGBE_WRITE_REG(hw, IXGBE_VFTE(1), 0); - - IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), 0); - IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), 0); -} - static inline void ixgbe_ping_vf(struct ixgbe_adapter *adapter, int vf) { struct ixgbe_hw *hw = &adapter->hw; @@ -1359,6 +1397,21 @@ void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter) } } +/** + * ixgbe_set_all_vfs - update vfs queues + * @adapter: Pointer to adapter struct + * + * Update setting transmit and receive queues for all vfs + **/ +void ixgbe_set_all_vfs(struct ixgbe_adapter *adapter) +{ + int i; + + for (i = 0 ; i < adapter->num_vfs; i++) + ixgbe_set_vf_link_state(adapter, i, + adapter->vfinfo[i].link_state); +} + int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) { struct ixgbe_adapter *adapter = netdev_priv(netdev); @@ -1656,6 +1709,84 @@ int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting) return 0; } +/** + * ixgbe_set_vf_link_state - Set link state + * @adapter: Pointer to adapter struct + * @vf: VF identifier + * @state: required link state + * + * Set a link force state on/off a single vf + **/ +void ixgbe_set_vf_link_state(struct ixgbe_adapter *adapter, int vf, int state) +{ + adapter->vfinfo[vf].link_state = state; + + switch (state) { + case IFLA_VF_LINK_STATE_AUTO: + if (test_bit(__IXGBE_DOWN, &adapter->state)) + adapter->vfinfo[vf].link_enable = false; + else + adapter->vfinfo[vf].link_enable = true; + break; + case IFLA_VF_LINK_STATE_ENABLE: + adapter->vfinfo[vf].link_enable = true; + break; + case IFLA_VF_LINK_STATE_DISABLE: + adapter->vfinfo[vf].link_enable = false; + break; + } + + ixgbe_set_vf_rx_tx(adapter, vf); + + /* restart the VF */ + adapter->vfinfo[vf].clear_to_send = false; + ixgbe_ping_vf(adapter, vf); +} + +/** + * ixgbe_ndo_set_vf_link_state - Set link state + * @netdev: network interface device structure + * @vf: VF identifier + * @state: required link state + * + * Set the link state of a specified VF, regardless of physical link state + **/ +int ixgbe_ndo_set_vf_link_state(struct net_device *netdev, int vf, int state) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + int ret = 0; + + if (vf < 0 || vf >= adapter->num_vfs) { + dev_err(&adapter->pdev->dev, + "NDO set VF link - invalid VF identifier %d\n", vf); + return -EINVAL; + } + + switch (state) { + case IFLA_VF_LINK_STATE_ENABLE: + dev_info(&adapter->pdev->dev, + "NDO set VF %d link state %d - not supported\n", + vf, state); + break; + case IFLA_VF_LINK_STATE_DISABLE: + dev_info(&adapter->pdev->dev, + "NDO set VF %d link state disable\n", vf); + ixgbe_set_vf_link_state(adapter, vf, state); + break; + case IFLA_VF_LINK_STATE_AUTO: + dev_info(&adapter->pdev->dev, + "NDO set VF %d link state auto\n", vf); + ixgbe_set_vf_link_state(adapter, vf, state); + break; + default: + dev_err(&adapter->pdev->dev, + "NDO set VF %d - invalid link state %d\n", vf, state); + ret = -EINVAL; + } + + return ret; +} + int ixgbe_ndo_set_vf_rss_query_en(struct net_device *netdev, int vf, bool setting) { diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h index 3ec21923c89cd9e38cd39327445a184f68f661c9..0690ecb8dfa348e9493fe9e9003bc3b8225a928e 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h @@ -17,8 +17,8 @@ void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter); #endif void ixgbe_msg_task(struct ixgbe_adapter *adapter); int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask); -void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter); void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter); +void ixgbe_set_all_vfs(struct ixgbe_adapter *adapter); int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int queue, u8 *mac); int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int queue, u16 vlan, u8 qos, __be16 vlan_proto); @@ -31,7 +31,9 @@ int ixgbe_ndo_set_vf_rss_query_en(struct net_device *netdev, int vf, int ixgbe_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting); int ixgbe_ndo_get_vf_config(struct net_device *netdev, int vf, struct ifla_vf_info *ivi); +int ixgbe_ndo_set_vf_link_state(struct net_device *netdev, int vf, int state); void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter); +void ixgbe_set_vf_link_state(struct ixgbe_adapter *adapter, int vf, int state); int ixgbe_disable_sriov(struct ixgbe_adapter *adapter); #ifdef CONFIG_PCI_IOV void ixgbe_enable_sriov(struct ixgbe_adapter *adapter, unsigned int max_vfs); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c index d60da7a89092e82f8195b961ba67938af6739722..ca1a428b278e0df4dcceaf3bd048b79054b3faf5 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c @@ -391,12 +391,14 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget) u32 cmd_type; while (budget-- > 0) { - if (unlikely(!ixgbe_desc_unused(xdp_ring)) || - !netif_carrier_ok(xdp_ring->netdev)) { + if (unlikely(!ixgbe_desc_unused(xdp_ring))) { work_done = false; break; } + if (!netif_carrier_ok(xdp_ring->netdev)) + break; + if (!xsk_tx_peek_desc(pool, &desc)) break; diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h index a0e325774819486d2578a1d23fe55e072fc4fee1..89bfe4eb92f8cc0cf1242cf16fcfecf7359593e9 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h @@ -387,6 +387,8 @@ struct ixgbevf_adapter { u32 *rss_key; u8 rss_indir_tbl[IXGBEVF_X550_VFRETA_SIZE]; u32 flags; + bool link_state; + #define IXGBEVF_FLAGS_LEGACY_RX BIT(1) #ifdef CONFIG_XFRM diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index a70e4601eeb803c1f7b80ac0d43f5a3dfdaf8438..01acfa4244c7e341993748261b8a3eb4f4c3f95e 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -1987,14 +1987,15 @@ static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter, if (adapter->flags & IXGBEVF_FLAGS_LEGACY_RX) return; - set_ring_build_skb_enabled(rx_ring); + if (PAGE_SIZE < 8192) + if (max_frame > IXGBEVF_MAX_FRAME_BUILD_SKB) + set_ring_uses_large_buffer(rx_ring); - if (PAGE_SIZE < 8192) { - if (max_frame <= IXGBEVF_MAX_FRAME_BUILD_SKB) - return; + /* 82599 can't rely on RXDCTL.RLPML to restrict the size of the frame */ + if (adapter->hw.mac.type == ixgbe_mac_82599_vf && !ring_uses_large_buffer(rx_ring)) + return; - set_ring_uses_large_buffer(rx_ring); - } + set_ring_build_skb_enabled(rx_ring); } /** @@ -2293,7 +2294,9 @@ static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter) static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter) { struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; struct ixgbe_hw *hw = &adapter->hw; + bool state; ixgbevf_configure_msix(adapter); @@ -2306,6 +2309,11 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter) spin_unlock_bh(&adapter->mbx_lock); + state = adapter->link_state; + hw->mac.ops.get_link_state(hw, &adapter->link_state); + if (state && state != adapter->link_state) + dev_info(&pdev->dev, "VF is administratively disabled\n"); + smp_mb__before_atomic(); clear_bit(__IXGBEVF_DOWN, &adapter->state); ixgbevf_napi_enable_all(adapter); @@ -3074,6 +3082,8 @@ static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter) adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD; adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD; + adapter->link_state = true; + set_bit(__IXGBEVF_DOWN, &adapter->state); return 0; @@ -3306,7 +3316,7 @@ static void ixgbevf_watchdog_subtask(struct ixgbevf_adapter *adapter) ixgbevf_watchdog_update_link(adapter); - if (adapter->link_up) + if (adapter->link_up && adapter->link_state) ixgbevf_watchdog_link_is_up(adapter); else ixgbevf_watchdog_link_is_down(adapter); diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.h b/drivers/net/ethernet/intel/ixgbevf/mbx.h index 853796c8ef0e21e33575a91acf996296cdb8cc98..403f4d9445b28720f2a9d7d5934f8cbb472240da 100644 --- a/drivers/net/ethernet/intel/ixgbevf/mbx.h +++ b/drivers/net/ethernet/intel/ixgbevf/mbx.h @@ -97,6 +97,8 @@ enum ixgbe_pfvf_api_rev { #define IXGBE_VF_IPSEC_ADD 0x0d #define IXGBE_VF_IPSEC_DEL 0x0e +#define IXGBE_VF_GET_LINK_STATE 0x10 /* get vf link state */ + /* length of permanent address message returned from PF */ #define IXGBE_VF_PERMADDR_MSG_LEN 4 /* word in permanent address message with the current multicast type */ diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c index bfe6dfcec4abf3e2526b7d64024898fde1ce0c72..86d5521e69f8723ba4c008520a414cad4a0ad153 100644 --- a/drivers/net/ethernet/intel/ixgbevf/vf.c +++ b/drivers/net/ethernet/intel/ixgbevf/vf.c @@ -573,6 +573,46 @@ static s32 ixgbevf_hv_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode) return -EOPNOTSUPP; } +/** + * ixgbevf_get_link_state_vf - Get VF link state from PF + * @hw: pointer to the HW structure + * @link_state: link state storage + * + * Returns state of the operation error or success. + */ +static s32 ixgbevf_get_link_state_vf(struct ixgbe_hw *hw, bool *link_state) +{ + u32 msgbuf[2]; + s32 ret_val; + s32 err; + + msgbuf[0] = IXGBE_VF_GET_LINK_STATE; + msgbuf[1] = 0x0; + + err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 2); + + if (err || (msgbuf[0] & IXGBE_VT_MSGTYPE_NACK)) { + ret_val = IXGBE_ERR_MBX; + } else { + ret_val = 0; + *link_state = msgbuf[1]; + } + + return ret_val; +} + +/** + * ixgbevf_hv_get_link_state_vf - * Hyper-V variant - just a stub. + * @hw: unused + * @link_state: unused + * + * Hyper-V variant; there is no mailbox communication. + */ +static s32 ixgbevf_hv_get_link_state_vf(struct ixgbe_hw *hw, bool *link_state) +{ + return -EOPNOTSUPP; +} + /** * ixgbevf_set_vfta_vf - Set/Unset VLAN filter table address * @hw: pointer to the HW structure @@ -950,6 +990,7 @@ static const struct ixgbe_mac_operations ixgbevf_mac_ops = { .set_rar = ixgbevf_set_rar_vf, .update_mc_addr_list = ixgbevf_update_mc_addr_list_vf, .update_xcast_mode = ixgbevf_update_xcast_mode, + .get_link_state = ixgbevf_get_link_state_vf, .set_uc_addr = ixgbevf_set_uc_addr_vf, .set_vfta = ixgbevf_set_vfta_vf, .set_rlpml = ixgbevf_set_rlpml_vf, @@ -967,6 +1008,7 @@ static const struct ixgbe_mac_operations ixgbevf_hv_mac_ops = { .set_rar = ixgbevf_hv_set_rar_vf, .update_mc_addr_list = ixgbevf_hv_update_mc_addr_list_vf, .update_xcast_mode = ixgbevf_hv_update_xcast_mode, + .get_link_state = ixgbevf_hv_get_link_state_vf, .set_uc_addr = ixgbevf_hv_set_uc_addr_vf, .set_vfta = ixgbevf_hv_set_vfta_vf, .set_rlpml = ixgbevf_hv_set_rlpml_vf, diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h index d1e9e306653b87ca9ee6e73543ca918f8a3787af..45d9269218db60bee69b23a32e6fef5eb95e7175 100644 --- a/drivers/net/ethernet/intel/ixgbevf/vf.h +++ b/drivers/net/ethernet/intel/ixgbevf/vf.h @@ -42,6 +42,7 @@ struct ixgbe_mac_operations { s32 (*init_rx_addrs)(struct ixgbe_hw *); s32 (*update_mc_addr_list)(struct ixgbe_hw *, struct net_device *); s32 (*update_xcast_mode)(struct ixgbe_hw *, int); + s32 (*get_link_state)(struct ixgbe_hw *hw, bool *link_state); s32 (*enable_mc)(struct ixgbe_hw *); s32 (*disable_mc)(struct ixgbe_hw *); s32 (*clear_vfta)(struct ixgbe_hw *); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c index 044a5b1196acbc2515b788cedeb4d16dbee79879..161174be51c31932b99af4725ca63fb2b67f9502 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c @@ -386,7 +386,12 @@ static int otx2_forward_vf_mbox_msgs(struct otx2_nic *pf, dst_mdev->msg_size = mbox_hdr->msg_size; dst_mdev->num_msgs = num_msgs; err = otx2_sync_mbox_msg(dst_mbox); - if (err) { + /* Error code -EIO indicate there is a communication failure + * to the AF. Rest of the error codes indicate that AF processed + * VF messages and set the error codes in response messages + * (if any) so simply forward responses to VF. + */ + if (err == -EIO) { dev_warn(pf->dev, "AF not responding to VF%d messages\n", vf); /* restore PF mbase and exit */ diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index a2d3f04a9ff222fe4ebccd4607873e39ae0ca33d..7d7dc0754a3a1335df5458fb50b15344c274d600 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -215,7 +215,7 @@ static void mtk_mac_config(struct phylink_config *config, unsigned int mode, phylink_config); struct mtk_eth *eth = mac->hw; u32 mcr_cur, mcr_new, sid, i; - int val, ge_mode, err; + int val, ge_mode, err = 0; /* MT76x8 has no hardware settings between for the MAC */ if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) && diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index 2e55e0088871540e1f139ab970660e5f184f8ec1..94426d29025eb4e8a3bf2d6ea3b73d8445768453 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -130,11 +130,8 @@ static int cmd_alloc_index(struct mlx5_cmd *cmd) static void cmd_free_index(struct mlx5_cmd *cmd, int idx) { - unsigned long flags; - - spin_lock_irqsave(&cmd->alloc_lock, flags); + lockdep_assert_held(&cmd->alloc_lock); set_bit(idx, &cmd->bitmask); - spin_unlock_irqrestore(&cmd->alloc_lock, flags); } static void cmd_ent_get(struct mlx5_cmd_work_ent *ent) @@ -144,13 +141,21 @@ static void cmd_ent_get(struct mlx5_cmd_work_ent *ent) static void cmd_ent_put(struct mlx5_cmd_work_ent *ent) { + struct mlx5_cmd *cmd = ent->cmd; + unsigned long flags; + + spin_lock_irqsave(&cmd->alloc_lock, flags); if (!refcount_dec_and_test(&ent->refcnt)) - return; + goto out; - if (ent->idx >= 0) - cmd_free_index(ent->cmd, ent->idx); + if (ent->idx >= 0) { + cmd_free_index(cmd, ent->idx); + up(ent->page_queue ? &cmd->pages_sem : &cmd->sem); + } cmd_free_ent(ent); +out: + spin_unlock_irqrestore(&cmd->alloc_lock, flags); } static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx) @@ -883,25 +888,6 @@ static bool opcode_allowed(struct mlx5_cmd *cmd, u16 opcode) return cmd->allowed_opcode == opcode; } -static int cmd_alloc_index_retry(struct mlx5_cmd *cmd) -{ - unsigned long alloc_end = jiffies + msecs_to_jiffies(1000); - int idx; - -retry: - idx = cmd_alloc_index(cmd); - if (idx < 0 && time_before(jiffies, alloc_end)) { - /* Index allocation can fail on heavy load of commands. This is a temporary - * situation as the current command already holds the semaphore, meaning that - * another command completion is being handled and it is expected to release - * the entry index soon. - */ - cpu_relax(); - goto retry; - } - return idx; -} - bool mlx5_cmd_is_down(struct mlx5_core_dev *dev) { return pci_channel_offline(dev->pdev) || @@ -926,7 +912,7 @@ static void cmd_work_handler(struct work_struct *work) sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem; down(sem); if (!ent->page_queue) { - alloc_ret = cmd_alloc_index_retry(cmd); + alloc_ret = cmd_alloc_index(cmd); if (alloc_ret < 0) { mlx5_core_err_rl(dev, "failed to allocate command entry\n"); if (ent->callback) { @@ -1582,8 +1568,6 @@ static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool force vector = vec & 0xffffffff; for (i = 0; i < (1 << cmd->log_sz); i++) { if (test_bit(i, &vector)) { - struct semaphore *sem; - ent = cmd->ent_arr[i]; /* if we already completed the command, ignore it */ @@ -1606,10 +1590,6 @@ static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool force dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) cmd_ent_put(ent); - if (ent->page_queue) - sem = &cmd->pages_sem; - else - sem = &cmd->sem; ent->ts2 = ktime_get_ns(); memcpy(ent->out->first.data, ent->lay->out, sizeof(ent->lay->out)); dump_command(dev, ent, 0); @@ -1663,7 +1643,6 @@ static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool force */ complete(&ent->done); } - up(sem); } } } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c index 9c076aa20306a7d2bd116b85dfac000bcafaa3f0..b6f5c1bcdbcd493f4ec4ea9bae6eb904454ccce5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c @@ -183,18 +183,7 @@ void mlx5e_rep_bond_unslave(struct mlx5_eswitch *esw, static bool mlx5e_rep_is_lag_netdev(struct net_device *netdev) { - struct mlx5e_rep_priv *rpriv; - struct mlx5e_priv *priv; - - /* A given netdev is not a representor or not a slave of LAG configuration */ - if (!mlx5e_eswitch_rep(netdev) || !netif_is_lag_port(netdev)) - return false; - - priv = netdev_priv(netdev); - rpriv = priv->ppriv; - - /* Egress acl forward to vport is supported only non-uplink representor */ - return rpriv->rep->vport != MLX5_VPORT_UPLINK; + return netif_is_lag_port(netdev) && mlx5e_eswitch_vf_rep(netdev); } static void mlx5e_rep_changelowerstate_event(struct net_device *netdev, void *ptr) @@ -210,9 +199,6 @@ static void mlx5e_rep_changelowerstate_event(struct net_device *netdev, void *pt u16 fwd_vport_num; int err; - if (!mlx5e_rep_is_lag_netdev(netdev)) - return; - info = ptr; lag_info = info->lower_state_info; /* This is not an event of a representor becoming active slave */ @@ -266,9 +252,6 @@ static void mlx5e_rep_changeupper_event(struct net_device *netdev, void *ptr) struct net_device *lag_dev; struct mlx5e_priv *priv; - if (!mlx5e_rep_is_lag_netdev(netdev)) - return; - priv = netdev_priv(netdev); rpriv = priv->ppriv; lag_dev = info->upper_dev; @@ -293,6 +276,19 @@ static int mlx5e_rep_esw_bond_netevent(struct notifier_block *nb, unsigned long event, void *ptr) { struct net_device *netdev = netdev_notifier_info_to_dev(ptr); + struct mlx5e_rep_priv *rpriv; + struct mlx5e_rep_bond *bond; + struct mlx5e_priv *priv; + + if (!mlx5e_rep_is_lag_netdev(netdev)) + return NOTIFY_DONE; + + bond = container_of(nb, struct mlx5e_rep_bond, nb); + priv = netdev_priv(netdev); + rpriv = mlx5_eswitch_get_uplink_priv(priv->mdev->priv.eswitch, REP_ETH); + /* Verify VF representor is on the same device of the bond handling the netevent. */ + if (rpriv->uplink_priv.bond != bond) + return NOTIFY_DONE; switch (event) { case NETDEV_CHANGELOWERSTATE: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c index 71e8d66fa1509434f300c4447036fce13f281666..6692bc8333f73fa1723d0b2deede69a9dcb2889e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c @@ -11,13 +11,13 @@ static int mlx5e_xsk_map_pool(struct mlx5e_priv *priv, { struct device *dev = mlx5_core_dma_dev(priv->mdev); - return xsk_pool_dma_map(pool, dev, 0); + return xsk_pool_dma_map(pool, dev, DMA_ATTR_SKIP_CPU_SYNC); } static void mlx5e_xsk_unmap_pool(struct mlx5e_priv *priv, struct xsk_buff_pool *pool) { - return xsk_pool_dma_unmap(pool, 0); + return xsk_pool_dma_unmap(pool, DMA_ATTR_SKIP_CPU_SYNC); } static int mlx5e_xsk_get_pools(struct mlx5e_xsk *xsk) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 584751da20b17a29054ea2faa571007694d9ebff..38b557a6353d6023a2b9059089d301ea931efbd2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -1754,7 +1754,7 @@ static int mlx5e_get_module_eeprom(struct net_device *netdev, if (size_read < 0) { netdev_err(priv->netdev, "%s: mlx5_query_eeprom failed:0x%x\n", __func__, size_read); - return 0; + return size_read; } i += size_read; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 2f6c3a5813ed18820ab3c620959194f41b140f5b..16e98ac47624c7c7496f8418c1db082c6f5d7add 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -5024,9 +5024,13 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) } if (mlx5_vxlan_allowed(mdev->vxlan) || mlx5_geneve_tx_allowed(mdev)) { - netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL; - netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL; - netdev->vlan_features |= NETIF_F_GSO_UDP_TUNNEL; + netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_UDP_TUNNEL_CSUM; + netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_UDP_TUNNEL_CSUM; + netdev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM; + netdev->vlan_features |= NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_UDP_TUNNEL_CSUM; } if (mlx5e_tunnel_proto_supported(mdev, IPPROTO_GRE)) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 22926c684fa9e4132fe71d607c54bd767d95a803..59dc746b71888010dbc17227c421645100e2b256 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -271,8 +271,8 @@ static inline int mlx5e_page_alloc_pool(struct mlx5e_rq *rq, if (unlikely(!dma_info->page)) return -ENOMEM; - dma_info->addr = dma_map_page(rq->pdev, dma_info->page, 0, - PAGE_SIZE, rq->buff.map_dir); + dma_info->addr = dma_map_page_attrs(rq->pdev, dma_info->page, 0, PAGE_SIZE, + rq->buff.map_dir, DMA_ATTR_SKIP_CPU_SYNC); if (unlikely(dma_mapping_error(rq->pdev, dma_info->addr))) { page_pool_recycle_direct(rq->page_pool, dma_info->page); dma_info->page = NULL; @@ -293,7 +293,8 @@ static inline int mlx5e_page_alloc(struct mlx5e_rq *rq, void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info) { - dma_unmap_page(rq->pdev, dma_info->addr, PAGE_SIZE, rq->buff.map_dir); + dma_unmap_page_attrs(rq->pdev, dma_info->addr, PAGE_SIZE, rq->buff.map_dir, + DMA_ATTR_SKIP_CPU_SYNC); } void mlx5e_page_release_dynamic(struct mlx5e_rq *rq, @@ -979,7 +980,8 @@ static inline void mlx5e_handle_csum(struct net_device *netdev, } /* True when explicitly set via priv flag, or XDP prog is loaded */ - if (test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state)) + if (test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state) || + get_cqe_tls_offload(cqe)) goto csum_unnecessary; /* CQE csum doesn't cover padding octets in short ethernet diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index e06b1ba7d23498bd72cb7e895e2365d4fe2c4b14..ccc7dd3e738a48ede918113f769a0bed7b84300b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -2037,10 +2037,6 @@ esw_check_vport_match_metadata_supported(const struct mlx5_eswitch *esw) if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source)) return false; - if (mlx5_core_is_ecpf_esw_manager(esw->dev) || - mlx5_ecpf_vport_exists(esw->dev)) - return false; - return true; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 0ff034b0866e2e9da7334fee35531f7f9a1830a3..55772f0cbbf8fef911b7e233605b0bd9564fd286 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -2034,6 +2034,8 @@ void mlx5_del_flow_rules(struct mlx5_flow_handle *handle) fte->node.del_hw_func = NULL; up_write_ref_node(&fte->node, false); tree_put_node(&fte->node, false); + } else { + up_write_ref_node(&fte->node, false); } kfree(handle); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c index ee710ce0079504466f34fd9910c481a0537fadd9..9b472e793ee361acf907286584842d993c8db075 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c @@ -131,7 +131,7 @@ static void mlx5_stop_sync_reset_poll(struct mlx5_core_dev *dev) { struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset; - del_timer(&fw_reset->timer); + del_timer_sync(&fw_reset->timer); } static void mlx5_sync_reset_clear_reset_requested(struct mlx5_core_dev *dev, bool poll_health) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c index 15c3a9058e72876fda3ac796fa78bf98dbc42255..c04413f449c509ac7e387b2a556d2ffa86176dfb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c @@ -123,6 +123,10 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev, return; } + /* Handle multipath entry with lower priority value */ + if (mp->mfi && mp->mfi != fi && fi->fib_priority >= mp->mfi->fib_priority) + return; + /* Handle add/replace event */ nhs = fib_info_num_path(fi); if (nhs == 1) { @@ -132,12 +136,13 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev, int i = mlx5_lag_dev_get_netdev_idx(ldev, nh_dev); if (i < 0) - i = MLX5_LAG_NORMAL_AFFINITY; - else - ++i; + return; + i++; mlx5_lag_set_port_affinity(ldev, i); } + + mp->mfi = fi; return; } @@ -265,10 +270,8 @@ static int mlx5_lag_fib_event(struct notifier_block *nb, fen_info = container_of(info, struct fib_entry_notifier_info, info); fi = fen_info->fi; - if (fi->nh) { - NL_SET_ERR_MSG_MOD(info->extack, "IPv4 route with nexthop objects is not supported"); - return notifier_from_errno(-EINVAL); - } + if (fi->nh) + return NOTIFY_DONE; fib_dev = fib_info_nh(fen_info->fi, 0)->fib_nh_dev; if (fib_dev != ldev->pf[MLX5_LAG_P1].netdev && fib_dev != ldev->pf[MLX5_LAG_P2].netdev) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c index 947f346bdc2d6ef373b6d1fd7ee1a824b84934fc..77c6287c90d55063cb61b49d8c2b298312f27bc3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c @@ -292,7 +292,7 @@ static int create_chain_restore(struct fs_chain *chain) { struct mlx5_eswitch *esw = chain->chains->dev->priv.eswitch; - char modact[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)]; + u8 modact[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {}; struct mlx5_fs_chains *chains = chain->chains; enum mlx5e_tc_attr_to_reg chain_to_reg; struct mlx5_modify_hdr *mod_hdr; diff --git a/drivers/net/ethernet/mellanox/mlxsw/cmd.h b/drivers/net/ethernet/mellanox/mlxsw/cmd.h index 5ffdfb532cb7f9e4a16f1423283648e8f85bce1a..91f68fb0b420ae5eee729cf3bdb094b20fec4618 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/cmd.h +++ b/drivers/net/ethernet/mellanox/mlxsw/cmd.h @@ -905,6 +905,18 @@ static inline int mlxsw_cmd_sw2hw_rdq(struct mlxsw_core *mlxsw_core, */ MLXSW_ITEM32(cmd_mbox, sw2hw_dq, cq, 0x00, 24, 8); +enum mlxsw_cmd_mbox_sw2hw_dq_sdq_lp { + MLXSW_CMD_MBOX_SW2HW_DQ_SDQ_LP_WQE, + MLXSW_CMD_MBOX_SW2HW_DQ_SDQ_LP_IGNORE_WQE, +}; + +/* cmd_mbox_sw2hw_dq_sdq_lp + * SDQ local Processing + * 0: local processing by wqe.lp + * 1: local processing (ignoring wqe.lp) + */ +MLXSW_ITEM32(cmd_mbox, sw2hw_dq, sdq_lp, 0x00, 23, 1); + /* cmd_mbox_sw2hw_dq_sdq_tclass * SDQ: CPU Egress TClass * RDQ: Reserved diff --git a/drivers/net/ethernet/mellanox/mlxsw/i2c.c b/drivers/net/ethernet/mellanox/mlxsw/i2c.c index 939b692ffc335ecad1265a002ae674926cf6a37a..ce843ea9146466d402a97f4e249aec548bcd06ed 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/i2c.c +++ b/drivers/net/ethernet/mellanox/mlxsw/i2c.c @@ -650,6 +650,7 @@ static int mlxsw_i2c_probe(struct i2c_client *client, return 0; errout: + mutex_destroy(&mlxsw_i2c->cmd.lock); i2c_set_clientdata(client, NULL); return err; diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c index ffaeda75eec4226d52f9baf32fe20556e4d5c6e7..dbb16ce25bdf32b92d8466adbd42801e33af17d8 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci.c +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c @@ -285,6 +285,7 @@ static int mlxsw_pci_sdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox, struct mlxsw_pci_queue *q) { int tclass; + int lp; int i; int err; @@ -292,9 +293,12 @@ static int mlxsw_pci_sdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox, q->consumer_counter = 0; tclass = q->num == MLXSW_PCI_SDQ_EMAD_INDEX ? MLXSW_PCI_SDQ_EMAD_TC : MLXSW_PCI_SDQ_CTL_TC; + lp = q->num == MLXSW_PCI_SDQ_EMAD_INDEX ? MLXSW_CMD_MBOX_SW2HW_DQ_SDQ_LP_IGNORE_WQE : + MLXSW_CMD_MBOX_SW2HW_DQ_SDQ_LP_WQE; /* Set CQ of same number of this SDQ. */ mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, q->num); + mlxsw_cmd_mbox_sw2hw_dq_sdq_lp_set(mbox, lp); mlxsw_cmd_mbox_sw2hw_dq_sdq_tclass_set(mbox, tclass); mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(mbox, 3); /* 8 pages */ for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) { @@ -1599,7 +1603,7 @@ static int mlxsw_pci_skb_transmit(void *bus_priv, struct sk_buff *skb, wqe = elem_info->elem; mlxsw_pci_wqe_c_set(wqe, 1); /* always report completion */ - mlxsw_pci_wqe_lp_set(wqe, !!tx_info->is_emad); + mlxsw_pci_wqe_lp_set(wqe, 0); mlxsw_pci_wqe_type_set(wqe, MLXSW_PCI_WQE_TYPE_ETHERNET); err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, 0, skb->data, @@ -1900,6 +1904,7 @@ int mlxsw_pci_driver_register(struct pci_driver *pci_driver) { pci_driver->probe = mlxsw_pci_probe; pci_driver->remove = mlxsw_pci_remove; + pci_driver->shutdown = mlxsw_pci_remove; return pci_register_driver(pci_driver); } EXPORT_SYMBOL(mlxsw_pci_driver_register); diff --git a/drivers/net/ethernet/micrel/Kconfig b/drivers/net/ethernet/micrel/Kconfig index 42bc014136fe30644391e55e565feb1fbc05179d..9ceb7e1fb1696f8ff0f22928090590f17f33a8a1 100644 --- a/drivers/net/ethernet/micrel/Kconfig +++ b/drivers/net/ethernet/micrel/Kconfig @@ -37,6 +37,7 @@ config KS8851 config KS8851_MLL tristate "Micrel KS8851 MLL" depends on HAS_IOMEM + depends on PTP_1588_CLOCK_OPTIONAL select MII select CRC32 select EEPROM_93CX6 diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c index 52401915828a1287d70edf567f01974117d69907..a06466ecca12a90055016c3e1f6adafbea314f42 100644 --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c @@ -848,12 +848,11 @@ void ocelot_get_strings(struct ocelot *ocelot, int port, u32 sset, u8 *data) } EXPORT_SYMBOL(ocelot_get_strings); +/* Caller must hold &ocelot->stats_lock */ static void ocelot_update_stats(struct ocelot *ocelot) { int i, j; - mutex_lock(&ocelot->stats_lock); - for (i = 0; i < ocelot->num_phys_ports; i++) { /* Configure the port to read the stats from */ ocelot_write(ocelot, SYS_STAT_CFG_STAT_VIEW(i), SYS_STAT_CFG); @@ -872,8 +871,6 @@ static void ocelot_update_stats(struct ocelot *ocelot) ~(u64)U32_MAX) + val; } } - - mutex_unlock(&ocelot->stats_lock); } static void ocelot_check_stats_work(struct work_struct *work) @@ -882,7 +879,9 @@ static void ocelot_check_stats_work(struct work_struct *work) struct ocelot *ocelot = container_of(del_work, struct ocelot, stats_work); + mutex_lock(&ocelot->stats_lock); ocelot_update_stats(ocelot); + mutex_unlock(&ocelot->stats_lock); queue_delayed_work(ocelot->stats_queue, &ocelot->stats_work, OCELOT_STATS_CHECK_DELAY); @@ -892,12 +891,16 @@ void ocelot_get_ethtool_stats(struct ocelot *ocelot, int port, u64 *data) { int i; + mutex_lock(&ocelot->stats_lock); + /* check and update now */ ocelot_update_stats(ocelot); /* Copy all counters */ for (i = 0; i < ocelot->num_stats; i++) *data++ = ocelot->stats[port * ocelot->num_stats + i]; + + mutex_unlock(&ocelot->stats_lock); } EXPORT_SYMBOL(ocelot_get_ethtool_stats); diff --git a/drivers/net/ethernet/mscc/ocelot_flower.c b/drivers/net/ethernet/mscc/ocelot_flower.c index 3655503352928d9a7d368003ad530df20c3b1015..c4c4649b2088e17653f3ab9267c71316e340a3d3 100644 --- a/drivers/net/ethernet/mscc/ocelot_flower.c +++ b/drivers/net/ethernet/mscc/ocelot_flower.c @@ -54,6 +54,12 @@ static int ocelot_chain_to_block(int chain, bool ingress) */ static int ocelot_chain_to_lookup(int chain) { + /* Backwards compatibility with older, single-chain tc-flower + * offload support in Ocelot + */ + if (chain == 0) + return 0; + return (chain / VCAP_LOOKUP) % 10; } @@ -62,7 +68,15 @@ static int ocelot_chain_to_lookup(int chain) */ static int ocelot_chain_to_pag(int chain) { - int lookup = ocelot_chain_to_lookup(chain); + int lookup; + + /* Backwards compatibility with older, single-chain tc-flower + * offload support in Ocelot + */ + if (chain == 0) + return 0; + + lookup = ocelot_chain_to_lookup(chain); /* calculate PAG value as chain index relative to the first PAG */ return chain - VCAP_IS2_CHAIN(lookup, 0); @@ -462,13 +476,6 @@ ocelot_flower_parse_key(struct ocelot *ocelot, int port, bool ingress, return -EOPNOTSUPP; } - if (filter->block_id == VCAP_IS1 && - !is_zero_ether_addr(match.mask->dst)) { - NL_SET_ERR_MSG_MOD(extack, - "Key type S1_NORMAL cannot match on destination MAC"); - return -EOPNOTSUPP; - } - /* The hw support mac matches only for MAC_ETYPE key, * therefore if other matches(port, tcp flags, etc) are added * then just bail out @@ -483,6 +490,14 @@ ocelot_flower_parse_key(struct ocelot *ocelot, int port, bool ingress, return -EOPNOTSUPP; flow_rule_match_eth_addrs(rule, &match); + + if (filter->block_id == VCAP_IS1 && + !is_zero_ether_addr(match.mask->dst)) { + NL_SET_ERR_MSG_MOD(extack, + "Key type S1_NORMAL cannot match on destination MAC"); + return -EOPNOTSUPP; + } + filter->key_type = OCELOT_VCAP_KEY_ETYPE; ether_addr_copy(filter->key.etype.dmac.value, match.key->dst); diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c index b5c4a64bc02570695a1e01439a34bf55755f7ae0..30763e9666331db59702ae7d31a99d06366d7e54 100644 --- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c +++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c @@ -2901,11 +2901,9 @@ static netdev_tx_t myri10ge_sw_tso(struct sk_buff *skb, status = myri10ge_xmit(curr, dev); if (status != 0) { dev_kfree_skb_any(curr); - if (segs != NULL) { - curr = segs; - segs = next; + skb_list_walk_safe(next, curr, next) { curr->next = NULL; - dev_kfree_skb_any(segs); + dev_kfree_skb_any(curr); } goto drop; } diff --git a/drivers/net/ethernet/netronome/nfp/bpf/fw.h b/drivers/net/ethernet/netronome/nfp/bpf/fw.h index 4268a7e0f344202a044f794a677e1b50231c0891..33f9058ed32e29ab5ef27633d9e89950a9afdd6c 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/fw.h +++ b/drivers/net/ethernet/netronome/nfp/bpf/fw.h @@ -13,8 +13,8 @@ */ #define NFP_BPF_SCALAR_VALUE 1 #define NFP_BPF_MAP_VALUE 4 -#define NFP_BPF_STACK 6 -#define NFP_BPF_PACKET_DATA 8 +#define NFP_BPF_STACK 5 +#define NFP_BPF_PACKET_DATA 7 enum bpf_cap_tlv_type { NFP_BPF_CAP_TYPE_FUNC = 1, diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c index d19c02e991145bed556f3500c6007e253057881e..088ceac07b80556be5b85ea5b1ad6ccb4f05391a 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c +++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c @@ -922,8 +922,8 @@ nfp_tunnel_add_shared_mac(struct nfp_app *app, struct net_device *netdev, int port, bool mod) { struct nfp_flower_priv *priv = app->priv; - int ida_idx = NFP_MAX_MAC_INDEX, err; struct nfp_tun_offloaded_mac *entry; + int ida_idx = -1, err; u16 nfp_mac_idx = 0; entry = nfp_tunnel_lookup_offloaded_macs(app, netdev->dev_addr); @@ -997,7 +997,7 @@ nfp_tunnel_add_shared_mac(struct nfp_app *app, struct net_device *netdev, err_free_entry: kfree(entry); err_free_ida: - if (ida_idx != NFP_MAX_MAC_INDEX) + if (ida_idx != -1) ida_simple_remove(&priv->tun.mac_off_ids, ida_idx); return err; @@ -1011,6 +1011,7 @@ nfp_tunnel_del_shared_mac(struct nfp_app *app, struct net_device *netdev, struct nfp_flower_repr_priv *repr_priv; struct nfp_tun_offloaded_mac *entry; struct nfp_repr *repr; + u16 nfp_mac_idx; int ida_idx; entry = nfp_tunnel_lookup_offloaded_macs(app, mac); @@ -1029,8 +1030,6 @@ nfp_tunnel_del_shared_mac(struct nfp_app *app, struct net_device *netdev, entry->bridge_count--; if (!entry->bridge_count && entry->ref_count) { - u16 nfp_mac_idx; - nfp_mac_idx = entry->index & ~NFP_TUN_PRE_TUN_IDX_BIT; if (__nfp_tunnel_offload_mac(app, mac, nfp_mac_idx, false)) { @@ -1046,7 +1045,6 @@ nfp_tunnel_del_shared_mac(struct nfp_app *app, struct net_device *netdev, /* If MAC is now used by 1 repr set the offloaded MAC index to port. */ if (entry->ref_count == 1 && list_is_singular(&entry->repr_list)) { - u16 nfp_mac_idx; int port, err; repr_priv = list_first_entry(&entry->repr_list, @@ -1074,8 +1072,14 @@ nfp_tunnel_del_shared_mac(struct nfp_app *app, struct net_device *netdev, WARN_ON_ONCE(rhashtable_remove_fast(&priv->tun.offloaded_macs, &entry->ht_node, offloaded_macs_params)); + + if (nfp_flower_is_supported_bridge(netdev)) + nfp_mac_idx = entry->index & ~NFP_TUN_PRE_TUN_IDX_BIT; + else + nfp_mac_idx = entry->index; + /* If MAC has global ID then extract and free the ida entry. */ - if (nfp_tunnel_is_mac_idx_global(entry->index)) { + if (nfp_tunnel_is_mac_idx_global(nfp_mac_idx)) { ida_idx = nfp_tunnel_get_ida_from_global_mac_idx(entry->index); ida_simple_remove(&priv->tun.mac_off_ids, ida_idx); } diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c index 9e098e40fb1c690220ee8ab0c9daa8ba2bbf6877..a9a9bf2e065a5635880392e0c4099eed982bb381 100644 --- a/drivers/net/ethernet/nxp/lpc_eth.c +++ b/drivers/net/ethernet/nxp/lpc_eth.c @@ -1468,6 +1468,7 @@ static int lpc_eth_drv_resume(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct netdata_local *pldat; + int ret; if (device_may_wakeup(&pdev->dev)) disable_irq_wake(ndev->irq); @@ -1477,7 +1478,9 @@ static int lpc_eth_drv_resume(struct platform_device *pdev) pldat = netdev_priv(ndev); /* Enable interface clock */ - clk_enable(pldat->clk); + ret = clk_enable(pldat->clk); + if (ret) + return ret; /* Reset and initialize */ __lpc_eth_reset(pldat); diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c index d355676f6c160d685239d52565cf96075297eae2..e14869a2e24a517d75e5b1e0afba00d4e93d7856 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_main.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c @@ -311,10 +311,10 @@ int ionic_adminq_post_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx) static void ionic_dev_cmd_clean(struct ionic *ionic) { - union __iomem ionic_dev_cmd_regs *regs = ionic->idev.dev_cmd_regs; + struct ionic_dev *idev = &ionic->idev; - iowrite32(0, ®s->doorbell); - memset_io(®s->cmd, 0, sizeof(regs->cmd)); + iowrite32(0, &idev->dev_cmd_regs->doorbell); + memset_io(&idev->dev_cmd_regs->cmd, 0, sizeof(idev->dev_cmd_regs->cmd)); } int ionic_dev_cmd_wait(struct ionic *ionic, unsigned long max_seconds) diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index b8dc5c4591ef52e212776aeece736303092c4ff6..d6b79caf9d8e5f48725690ae77d36d10619193b1 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -3778,11 +3778,11 @@ bool qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *p_disabled_vfs) return found; } -static void qed_iov_get_link(struct qed_hwfn *p_hwfn, - u16 vfid, - struct qed_mcp_link_params *p_params, - struct qed_mcp_link_state *p_link, - struct qed_mcp_link_capabilities *p_caps) +static int qed_iov_get_link(struct qed_hwfn *p_hwfn, + u16 vfid, + struct qed_mcp_link_params *p_params, + struct qed_mcp_link_state *p_link, + struct qed_mcp_link_capabilities *p_caps) { struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn, vfid, @@ -3790,7 +3790,7 @@ static void qed_iov_get_link(struct qed_hwfn *p_hwfn, struct qed_bulletin_content *p_bulletin; if (!p_vf) - return; + return -EINVAL; p_bulletin = p_vf->bulletin.p_virt; @@ -3800,6 +3800,7 @@ static void qed_iov_get_link(struct qed_hwfn *p_hwfn, __qed_vf_get_link_state(p_hwfn, p_link, p_bulletin); if (p_caps) __qed_vf_get_link_caps(p_hwfn, p_caps, p_bulletin); + return 0; } static int @@ -4658,6 +4659,7 @@ static int qed_get_vf_config(struct qed_dev *cdev, struct qed_public_vf_info *vf_info; struct qed_mcp_link_state link; u32 tx_rate; + int ret; /* Sanitize request */ if (IS_VF(cdev)) @@ -4671,7 +4673,9 @@ static int qed_get_vf_config(struct qed_dev *cdev, vf_info = qed_iov_get_public_vf_info(hwfn, vf_id, true); - qed_iov_get_link(hwfn, vf_id, NULL, &link, NULL); + ret = qed_iov_get_link(hwfn, vf_id, NULL, &link, NULL); + if (ret) + return ret; /* Fill information about VF */ ivi->vf = vf_id; @@ -4687,6 +4691,7 @@ static int qed_get_vf_config(struct qed_dev *cdev, tx_rate = vf_info->tx_rate; ivi->max_tx_rate = tx_rate ? tx_rate : link.speed; ivi->min_tx_rate = qed_iov_get_vf_min_rate(hwfn, vf_id); + ivi->trusted = vf_info->is_trusted_request; return 0; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c index 72a38d53d33f68c497c3f18b14c7874c6da713f4..e2a5a6a373cbe613ad06ef5d94196efc8174f5c7 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.c +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c @@ -513,6 +513,9 @@ int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn) p_iov->bulletin.size, &p_iov->bulletin.phys, GFP_KERNEL); + if (!p_iov->bulletin.p_virt) + goto free_pf2vf_reply; + DP_VERBOSE(p_hwfn, QED_MSG_IOV, "VF's bulletin Board [%p virt 0x%llx phys 0x%08x bytes]\n", p_iov->bulletin.p_virt, @@ -552,6 +555,10 @@ int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn) return rc; +free_pf2vf_reply: + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + sizeof(union pfvf_tlvs), + p_iov->pf2vf_reply, p_iov->pf2vf_reply_phys); free_vf2pf_request: dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(union vfpf_tlvs), diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c index 21c906200e791a538c6be100d6ecc7a112cce1c1..d210632676d32ff97e8df7326dc2e336adb771ed 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_fp.c +++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c @@ -752,6 +752,9 @@ qede_build_skb(struct qede_rx_queue *rxq, buf = page_address(bd->data) + bd->page_offset; skb = build_skb(buf, rxq->rx_buf_seg_size); + if (unlikely(!skb)) + return NULL; + skb_reserve(skb, pad); skb_put(skb, len); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h index 5d79ee4370bcd5b89e0ca7cbc8b2e7b32c8c4d7b..7519773eaca6ee5caa29f40c6f8aa891464760bd 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h @@ -51,7 +51,7 @@ static inline int qlcnic_dcb_get_hw_capability(struct qlcnic_dcb *dcb) if (dcb && dcb->ops->get_hw_capability) return dcb->ops->get_hw_capability(dcb); - return 0; + return -EOPNOTSUPP; } static inline void qlcnic_dcb_free(struct qlcnic_dcb *dcb) @@ -65,7 +65,7 @@ static inline int qlcnic_dcb_attach(struct qlcnic_dcb *dcb) if (dcb && dcb->ops->attach) return dcb->ops->attach(dcb); - return 0; + return -EOPNOTSUPP; } static inline int @@ -74,7 +74,7 @@ qlcnic_dcb_query_hw_capability(struct qlcnic_dcb *dcb, char *buf) if (dcb && dcb->ops->query_hw_capability) return dcb->ops->query_hw_capability(dcb, buf); - return 0; + return -EOPNOTSUPP; } static inline void qlcnic_dcb_get_info(struct qlcnic_dcb *dcb) @@ -89,7 +89,7 @@ qlcnic_dcb_query_cee_param(struct qlcnic_dcb *dcb, char *buf, u8 type) if (dcb && dcb->ops->query_cee_param) return dcb->ops->query_cee_param(dcb, buf, type); - return 0; + return -EOPNOTSUPP; } static inline int qlcnic_dcb_get_cee_cfg(struct qlcnic_dcb *dcb) @@ -97,7 +97,7 @@ static inline int qlcnic_dcb_get_cee_cfg(struct qlcnic_dcb *dcb) if (dcb && dcb->ops->get_cee_cfg) return dcb->ops->get_cee_cfg(dcb); - return 0; + return -EOPNOTSUPP; } static inline void qlcnic_dcb_aen_handler(struct qlcnic_dcb *dcb, void *msg) diff --git a/drivers/net/ethernet/ramaxel/Kconfig b/drivers/net/ethernet/ramaxel/Kconfig deleted file mode 100644 index 987c7eb4880bf50a7ac373e2a74c121cc0fc5b0f..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/ramaxel/Kconfig +++ /dev/null @@ -1,20 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0-only -# -# Ramaxel driver configuration -# - -config NET_VENDOR_RAMAXEL - bool "Ramaxel devices" - default y - help - If you have a network (Ethernet) card belonging to this class, say Y. - Note that the answer to this question doesn't directly affect the - kernel: saying N will just cause the configurator to skip all - the questions about Ramaxel cards. If you say Y, you will be asked - for your specific card in the following questions. - -if NET_VENDOR_RAMAXEL - - source "drivers/net/ethernet/ramaxel/spnic/Kconfig" - -endif # NET_VENDOR_RAMAXEL diff --git a/drivers/net/ethernet/ramaxel/Makefile b/drivers/net/ethernet/ramaxel/Makefile deleted file mode 100644 index 087f570c2257796554b00724b7dbd911271763f6..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/ramaxel/Makefile +++ /dev/null @@ -1,6 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0-only -# -# Makefile for the Ramaxel device drivers. -# - -obj-$(CONFIG_SPNIC) += spnic/ \ No newline at end of file diff --git a/drivers/net/ethernet/ramaxel/spnic/Kconfig b/drivers/net/ethernet/ramaxel/spnic/Kconfig deleted file mode 100644 index d0cb81041e8c7b2c98a783f1120aa0267d352fe7..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/Kconfig +++ /dev/null @@ -1,14 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0-only -# -# Ramaxel SPNIC driver configuration -# - -config SPNIC - tristate "Ramaxel PCIE Network Interface Card" - default n - depends on PCI_MSI && NUMA && PCI_IOV && (X86 || ARM64) - help - This driver supports Ramaxel PCIE Ethernet cards. - To compile this driver as part of the kernel, choose Y here. - If unsure, choose N. - The default is N. diff --git a/drivers/net/ethernet/ramaxel/spnic/Makefile b/drivers/net/ethernet/ramaxel/spnic/Makefile deleted file mode 100644 index 207e1d9c431ac5ea51a2ab6b60644089ff1b0752..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/Makefile +++ /dev/null @@ -1,39 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0-only -obj-$(CONFIG_SPNIC) += spnic.o - -subdir-ccflags-y += -I$(srctree)/$(src)/hw - -spnic-objs := hw/sphw_common.o \ - hw/sphw_hwif.o \ - hw/sphw_eqs.o \ - hw/sphw_mbox.o \ - hw/sphw_api_cmd.o \ - hw/sphw_mgmt.o \ - hw/sphw_wq.o \ - hw/sphw_cmdq.o \ - hw/sphw_prof_adap.o \ - hw/sphw_hw_cfg.o \ - hw/sphw_hw_comm.o \ - hw/sphw_hwdev.o \ - spnic_sriov.o \ - spnic_lld.o \ - spnic_dev_mgmt.o \ - spnic_main.o \ - spnic_tx.o \ - spnic_rx.o \ - spnic_rss.o \ - spnic_ntuple.o \ - spnic_dcb.o \ - spnic_ethtool.o \ - spnic_ethtool_stats.o \ - spnic_dbg.o \ - spnic_irq.o \ - spnic_filter.o \ - spnic_netdev_ops.o \ - spnic_nic_cfg.o \ - spnic_mag_cfg.o \ - spnic_nic_cfg_vf.o \ - spnic_rss_cfg.o \ - spnic_nic_event.o \ - spnic_nic_io.o \ - spnic_nic_dbg.o diff --git a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_api_cmd.c b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_api_cmd.c deleted file mode 100644 index b459ca322515971d52c45d05447ebcfdd56b5d7d..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_api_cmd.c +++ /dev/null @@ -1,1165 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "sphw_crm.h" -#include "sphw_hw.h" -#include "sphw_common.h" -#include "sphw_hwdev.h" -#include "sphw_csr.h" -#include "sphw_hwif.h" -#include "sphw_api_cmd.h" - -#define API_CMD_CHAIN_CELL_SIZE_SHIFT 6U - -#define API_CMD_CELL_DESC_SIZE 8 -#define API_CMD_CELL_DATA_ADDR_SIZE 8 - -#define API_CHAIN_NUM_CELLS 32 -#define API_CHAIN_CELL_SIZE 128 -#define API_CHAIN_RSP_DATA_SIZE 128 - -#define API_CMD_CELL_WB_ADDR_SIZE 8 - -#define API_CHAIN_CELL_ALIGNMENT 8 - -#define API_CMD_TIMEOUT 10000 -#define API_CMD_STATUS_TIMEOUT 100000 - -#define API_CMD_BUF_SIZE 2048ULL - -#define API_CMD_NODE_ALIGN_SIZE 512ULL -#define API_PAYLOAD_ALIGN_SIZE 64ULL - -#define API_CHAIN_RESP_ALIGNMENT 128ULL - -#define COMPLETION_TIMEOUT_DEFAULT 1000UL -#define POLLING_COMPLETION_TIMEOUT_DEFAULT 1000U - -#define API_CMD_RESPONSE_DATA_PADDR(val) be64_to_cpu(*((u64 *)(val))) - -#define READ_API_CMD_PRIV_DATA(id, token) ((((u32)(id)) << 16) + (token)) -#define WRITE_API_CMD_PRIV_DATA(id) (((u8)(id)) << 16) - -#define MASKED_IDX(chain, idx) ((idx) & ((chain)->num_cells - 1)) - -#define SIZE_4BYTES(size) (ALIGN((u32)(size), 4U) >> 2) -#define SIZE_8BYTES(size) (ALIGN((u32)(size), 8U) >> 3) - -enum api_cmd_data_format { - SGL_DATA = 1, -}; - -enum api_cmd_type { - API_CMD_WRITE_TYPE = 0, - API_CMD_READ_TYPE = 1, -}; - -enum api_cmd_bypass { - NOT_BYPASS = 0, - BYPASS = 1, -}; - -enum api_cmd_resp_aeq { - NOT_TRIGGER = 0, - TRIGGER = 1, -}; - -enum api_cmd_chn_code { - APICHN_0 = 0, -}; - -enum api_cmd_chn_rsvd { - APICHN_VALID = 0, - APICHN_INVALID = 1, -}; - -#define API_DESC_LEN 7 - -static u8 xor_chksum_set(void *data) -{ - int idx; - u8 checksum = 0; - u8 *val = data; - - for (idx = 0; idx < API_DESC_LEN; idx++) - checksum ^= val[idx]; - - return checksum; -} - -static void set_prod_idx(struct sphw_api_cmd_chain *chain) -{ - enum sphw_api_cmd_chain_type chain_type = chain->chain_type; - struct sphw_hwif *hwif = chain->hwdev->hwif; - u32 hw_prod_idx_addr = SPHW_CSR_API_CMD_CHAIN_PI_ADDR(chain_type); - u32 prod_idx = chain->prod_idx; - - sphw_hwif_write_reg(hwif, hw_prod_idx_addr, prod_idx); -} - -static u32 get_hw_cons_idx(struct sphw_api_cmd_chain *chain) -{ - u32 addr, val; - - addr = SPHW_CSR_API_CMD_STATUS_0_ADDR(chain->chain_type); - val = sphw_hwif_read_reg(chain->hwdev->hwif, addr); - - return SPHW_API_CMD_STATUS_GET(val, CONS_IDX); -} - -static void dump_api_chain_reg(struct sphw_api_cmd_chain *chain) -{ - void *dev = chain->hwdev->dev_hdl; - u32 addr, val; - - addr = SPHW_CSR_API_CMD_STATUS_0_ADDR(chain->chain_type); - val = sphw_hwif_read_reg(chain->hwdev->hwif, addr); - - sdk_err(dev, "Chain type: 0x%x, cpld error: 0x%x, check error: 0x%x, current fsm: 0x%x\n", - chain->chain_type, SPHW_API_CMD_STATUS_GET(val, CPLD_ERR), - SPHW_API_CMD_STATUS_GET(val, CHKSUM_ERR), - SPHW_API_CMD_STATUS_GET(val, FSM)); - - sdk_err(dev, "Chain hw current ci: 0x%x\n", - SPHW_API_CMD_STATUS_GET(val, CONS_IDX)); - - addr = SPHW_CSR_API_CMD_CHAIN_PI_ADDR(chain->chain_type); - val = sphw_hwif_read_reg(chain->hwdev->hwif, addr); - sdk_err(dev, "Chain hw current pi: 0x%x\n", val); -} - -/** - * chain_busy - check if the chain is still processing last requests - * @chain: chain to check - **/ -static int chain_busy(struct sphw_api_cmd_chain *chain) -{ - void *dev = chain->hwdev->dev_hdl; - struct sphw_api_cmd_cell_ctxt *ctxt; - u64 resp_header; - - ctxt = &chain->cell_ctxt[chain->prod_idx]; - - switch (chain->chain_type) { - case SPHW_API_CMD_MULTI_READ: - case SPHW_API_CMD_POLL_READ: - resp_header = be64_to_cpu(ctxt->resp->header); - if (ctxt->status && - !SPHW_API_CMD_RESP_HEADER_VALID(resp_header)) { - sdk_err(dev, "Context(0x%x) busy!, pi: %u, resp_header: 0x%08x%08x\n", - ctxt->status, chain->prod_idx, - upper_32_bits(resp_header), - lower_32_bits(resp_header)); - dump_api_chain_reg(chain); - return -EBUSY; - } - break; - case SPHW_API_CMD_POLL_WRITE: - case SPHW_API_CMD_WRITE_TO_MGMT_CPU: - case SPHW_API_CMD_WRITE_ASYNC_TO_MGMT_CPU: - chain->cons_idx = get_hw_cons_idx(chain); - - if (chain->cons_idx == MASKED_IDX(chain, chain->prod_idx + 1)) { - sdk_err(dev, "API CMD chain %d is busy, cons_idx = %u, prod_idx = %u\n", - chain->chain_type, chain->cons_idx, - chain->prod_idx); - dump_api_chain_reg(chain); - return -EBUSY; - } - break; - default: - sdk_err(dev, "Unknown Chain type %d\n", chain->chain_type); - return -EINVAL; - } - - return 0; -} - -/** - * get_cell_data_size - get the data size of specific cell type - * @type: chain type - **/ -static u16 get_cell_data_size(enum sphw_api_cmd_chain_type type) -{ - u16 cell_data_size = 0; - - switch (type) { - case SPHW_API_CMD_POLL_READ: - cell_data_size = ALIGN(API_CMD_CELL_DESC_SIZE + - API_CMD_CELL_WB_ADDR_SIZE + - API_CMD_CELL_DATA_ADDR_SIZE, - API_CHAIN_CELL_ALIGNMENT); - break; - - case SPHW_API_CMD_WRITE_TO_MGMT_CPU: - case SPHW_API_CMD_POLL_WRITE: - case SPHW_API_CMD_WRITE_ASYNC_TO_MGMT_CPU: - cell_data_size = ALIGN(API_CMD_CELL_DESC_SIZE + - API_CMD_CELL_DATA_ADDR_SIZE, - API_CHAIN_CELL_ALIGNMENT); - break; - default: - break; - } - - return cell_data_size; -} - -/** - * prepare_cell_ctrl - prepare the ctrl of the cell for the command - * @cell_ctrl: the control of the cell to set the control into it - * @cell_len: the size of the cell - **/ -static void prepare_cell_ctrl(u64 *cell_ctrl, u16 cell_len) -{ - u64 ctrl; - u8 chksum; - - ctrl = SPHW_API_CMD_CELL_CTRL_SET(SIZE_8BYTES(cell_len), CELL_LEN) | - SPHW_API_CMD_CELL_CTRL_SET(0ULL, RD_DMA_ATTR_OFF) | - SPHW_API_CMD_CELL_CTRL_SET(0ULL, WR_DMA_ATTR_OFF); - - chksum = xor_chksum_set(&ctrl); - - ctrl |= SPHW_API_CMD_CELL_CTRL_SET(chksum, XOR_CHKSUM); - - /* The data in the HW should be in Big Endian Format */ - *cell_ctrl = cpu_to_be64(ctrl); -} - -/** - * prepare_api_cmd - prepare API CMD command - * @chain: chain for the command - * @cell: the cell of the command - * @node_id: destination node on the card that will receive the command - * @cmd: command data - * @cmd_size: the command size - **/ -static void prepare_api_cmd(struct sphw_api_cmd_chain *chain, - struct sphw_api_cmd_cell *cell, u8 node_id, - const void *cmd, u16 cmd_size) -{ - struct sphw_api_cmd_cell_ctxt *cell_ctxt; - u32 priv; - - cell_ctxt = &chain->cell_ctxt[chain->prod_idx]; - - switch (chain->chain_type) { - case SPHW_API_CMD_POLL_READ: - priv = READ_API_CMD_PRIV_DATA(chain->chain_type, - cell_ctxt->saved_prod_idx); - cell->desc = SPHW_API_CMD_DESC_SET(SGL_DATA, API_TYPE) | - SPHW_API_CMD_DESC_SET(API_CMD_READ_TYPE, RD_WR) | - SPHW_API_CMD_DESC_SET(BYPASS, MGMT_BYPASS) | - SPHW_API_CMD_DESC_SET(NOT_TRIGGER, RESP_AEQE_EN) | - SPHW_API_CMD_DESC_SET(priv, PRIV_DATA); - break; - case SPHW_API_CMD_POLL_WRITE: - priv = WRITE_API_CMD_PRIV_DATA(chain->chain_type); - cell->desc = SPHW_API_CMD_DESC_SET(SGL_DATA, API_TYPE) | - SPHW_API_CMD_DESC_SET(API_CMD_WRITE_TYPE, RD_WR) | - SPHW_API_CMD_DESC_SET(BYPASS, MGMT_BYPASS) | - SPHW_API_CMD_DESC_SET(NOT_TRIGGER, RESP_AEQE_EN) | - SPHW_API_CMD_DESC_SET(priv, PRIV_DATA); - break; - case SPHW_API_CMD_WRITE_ASYNC_TO_MGMT_CPU: - case SPHW_API_CMD_WRITE_TO_MGMT_CPU: - priv = WRITE_API_CMD_PRIV_DATA(chain->chain_type); - cell->desc = SPHW_API_CMD_DESC_SET(SGL_DATA, API_TYPE) | - SPHW_API_CMD_DESC_SET(API_CMD_WRITE_TYPE, RD_WR) | - SPHW_API_CMD_DESC_SET(NOT_BYPASS, MGMT_BYPASS) | - SPHW_API_CMD_DESC_SET(TRIGGER, RESP_AEQE_EN) | - SPHW_API_CMD_DESC_SET(priv, PRIV_DATA); - break; - default: - sdk_err(chain->hwdev->dev_hdl, "Unknown Chain type: %d\n", - chain->chain_type); - return; - } - - cell->desc |= SPHW_API_CMD_DESC_SET(APICHN_0, APICHN_CODE) | - SPHW_API_CMD_DESC_SET(APICHN_VALID, APICHN_RSVD); - - cell->desc |= SPHW_API_CMD_DESC_SET(node_id, DEST) | - SPHW_API_CMD_DESC_SET(SIZE_4BYTES(cmd_size), SIZE); - - cell->desc |= SPHW_API_CMD_DESC_SET(xor_chksum_set(&cell->desc), - XOR_CHKSUM); - - /* The data in the HW should be in Big Endian Format */ - cell->desc = cpu_to_be64(cell->desc); - - memcpy(cell_ctxt->api_cmd_vaddr, cmd, cmd_size); -} - -/** - * prepare_cell - prepare cell ctrl and cmd in the current producer cell - * @chain: chain for the command - * @node_id: destination node on the card that will receive the command - * @cmd: command data - * @cmd_size: the command size - * Return: 0 - success, negative - failure - **/ -static void prepare_cell(struct sphw_api_cmd_chain *chain, u8 node_id, - const void *cmd, u16 cmd_size) -{ - struct sphw_api_cmd_cell *curr_node; - u16 cell_size; - - curr_node = chain->curr_node; - - cell_size = get_cell_data_size(chain->chain_type); - - prepare_cell_ctrl(&curr_node->ctrl, cell_size); - prepare_api_cmd(chain, curr_node, node_id, cmd, cmd_size); -} - -static inline void cmd_chain_prod_idx_inc(struct sphw_api_cmd_chain *chain) -{ - chain->prod_idx = MASKED_IDX(chain, chain->prod_idx + 1); -} - -static void issue_api_cmd(struct sphw_api_cmd_chain *chain) -{ - set_prod_idx(chain); -} - -/** - * api_cmd_status_update - update the status of the chain - * @chain: chain to update - **/ -static void api_cmd_status_update(struct sphw_api_cmd_chain *chain) -{ - struct sphw_api_cmd_status *wb_status; - enum sphw_api_cmd_chain_type chain_type; - u64 status_header; - u32 buf_desc; - - wb_status = chain->wb_status; - - buf_desc = be32_to_cpu(wb_status->buf_desc); - if (SPHW_API_CMD_STATUS_GET(buf_desc, CHKSUM_ERR)) - return; - - status_header = be64_to_cpu(wb_status->header); - chain_type = SPHW_API_CMD_STATUS_HEADER_GET(status_header, CHAIN_ID); - if (chain_type >= SPHW_API_CMD_MAX) - return; - - if (chain_type != chain->chain_type) - return; - - chain->cons_idx = SPHW_API_CMD_STATUS_GET(buf_desc, CONS_IDX); -} - -static enum sphw_wait_return wait_for_status_poll_handler(void *priv_data) -{ - struct sphw_api_cmd_chain *chain = priv_data; - - if (!chain->hwdev->chip_present_flag) - return WAIT_PROCESS_ERR; - - api_cmd_status_update(chain); - /* SYNC API CMD cmd should start after prev cmd finished */ - if (chain->cons_idx == chain->prod_idx) - return WAIT_PROCESS_CPL; - - return WAIT_PROCESS_WAITING; -} - -/** - * wait_for_status_poll - wait for write to mgmt command to complete - * @chain: the chain of the command - * Return: 0 - success, negative - failure - **/ -static int wait_for_status_poll(struct sphw_api_cmd_chain *chain) -{ - return sphw_wait_for_timeout(chain, wait_for_status_poll_handler, - API_CMD_STATUS_TIMEOUT, 100); -} - -static void copy_resp_data(struct sphw_api_cmd_cell_ctxt *ctxt, void *ack, - u16 ack_size) -{ - struct sphw_api_cmd_resp_fmt *resp = ctxt->resp; - - memcpy(ack, &resp->resp_data, ack_size); - ctxt->status = 0; -} - -static enum sphw_wait_return check_cmd_resp_handler(void *priv_data) -{ - struct sphw_api_cmd_cell_ctxt *ctxt = priv_data; - u64 resp_header; - u8 resp_status; - - resp_header = be64_to_cpu(ctxt->resp->header); - rmb(); /* read the latest header */ - - if (SPHW_API_CMD_RESP_HEADER_VALID(resp_header)) { - resp_status = SPHW_API_CMD_RESP_HEAD_GET(resp_header, STATUS); - if (resp_status) { - pr_err("Api chain response data err, status: %u\n", - resp_status); - return WAIT_PROCESS_ERR; - } - - return WAIT_PROCESS_CPL; - } - - return WAIT_PROCESS_WAITING; -} - -/** - * prepare_cell - polling for respense data of the read api-command - * @chain: pointer to api cmd chain - * - * Return: 0 - success, negative - failure - **/ -static int wait_for_resp_polling(struct sphw_api_cmd_cell_ctxt *ctxt) -{ - return sphw_wait_for_timeout(ctxt, check_cmd_resp_handler, - POLLING_COMPLETION_TIMEOUT_DEFAULT, - USEC_PER_MSEC); -} - -/** - * wait_for_api_cmd_completion - wait for command to complete - * @chain: chain for the command - * Return: 0 - success, negative - failure - **/ -static int wait_for_api_cmd_completion(struct sphw_api_cmd_chain *chain, - struct sphw_api_cmd_cell_ctxt *ctxt, - void *ack, u16 ack_size) -{ - void *dev = chain->hwdev->dev_hdl; - int err = 0; - - switch (chain->chain_type) { - case SPHW_API_CMD_POLL_READ: - err = wait_for_resp_polling(ctxt); - if (!err) - copy_resp_data(ctxt, ack, ack_size); - else - sdk_err(dev, "API CMD poll response timeout\n"); - break; - case SPHW_API_CMD_POLL_WRITE: - case SPHW_API_CMD_WRITE_TO_MGMT_CPU: - err = wait_for_status_poll(chain); - if (err) { - sdk_err(dev, "API CMD Poll status timeout, chain type: %d\n", - chain->chain_type); - break; - } - break; - case SPHW_API_CMD_WRITE_ASYNC_TO_MGMT_CPU: - /* No need to wait */ - break; - default: - sdk_err(dev, "Unknown API CMD Chain type: %d\n", - chain->chain_type); - err = -EINVAL; - break; - } - - if (err) - dump_api_chain_reg(chain); - - return err; -} - -static inline void update_api_cmd_ctxt(struct sphw_api_cmd_chain *chain, - struct sphw_api_cmd_cell_ctxt *ctxt) -{ - ctxt->status = 1; - ctxt->saved_prod_idx = chain->prod_idx; - if (ctxt->resp) { - ctxt->resp->header = 0; - - /* make sure "header" was cleared */ - wmb(); - } -} - -/** - * api_cmd - API CMD command - * @chain: chain for the command - * @node_id: destination node on the card that will receive the command - * @cmd: command data - * @size: the command size - * Return: 0 - success, negative - failure - **/ -static int api_cmd(struct sphw_api_cmd_chain *chain, u8 node_id, - const void *cmd, u16 cmd_size, void *ack, u16 ack_size) -{ - struct sphw_api_cmd_cell_ctxt *ctxt = NULL; - - if (chain->chain_type == SPHW_API_CMD_WRITE_ASYNC_TO_MGMT_CPU) - spin_lock(&chain->async_lock); - else - down(&chain->sem); - ctxt = &chain->cell_ctxt[chain->prod_idx]; - if (chain_busy(chain)) { - if (chain->chain_type == SPHW_API_CMD_WRITE_ASYNC_TO_MGMT_CPU) - spin_unlock(&chain->async_lock); - else - up(&chain->sem); - return -EBUSY; - } - update_api_cmd_ctxt(chain, ctxt); - - prepare_cell(chain, node_id, cmd, cmd_size); - - cmd_chain_prod_idx_inc(chain); - - wmb(); /* issue the command */ - - issue_api_cmd(chain); - - /* incremented prod idx, update ctxt */ - - chain->curr_node = chain->cell_ctxt[chain->prod_idx].cell_vaddr; - if (chain->chain_type == SPHW_API_CMD_WRITE_ASYNC_TO_MGMT_CPU) - spin_unlock(&chain->async_lock); - else - up(&chain->sem); - - return wait_for_api_cmd_completion(chain, ctxt, ack, ack_size); -} - -/** - * sphw_api_cmd_write - Write API CMD command - * @chain: chain for write command - * @node_id: destination node on the card that will receive the command - * @cmd: command data - * @size: the command size - * Return: 0 - success, negative - failure - **/ -int sphw_api_cmd_write(struct sphw_api_cmd_chain *chain, u8 node_id, const void *cmd, u16 size) -{ - /* Verify the chain type */ - return api_cmd(chain, node_id, cmd, size, NULL, 0); -} - -/** - * sphw_api_cmd_read - Read API CMD command - * @chain: chain for read command - * @node_id: destination node on the card that will receive the command - * @cmd: command data - * @size: the command size - * Return: 0 - success, negative - failure - **/ -int sphw_api_cmd_read(struct sphw_api_cmd_chain *chain, u8 node_id, const void *cmd, u16 size, - void *ack, u16 ack_size) -{ - return api_cmd(chain, node_id, cmd, size, ack, ack_size); -} - -static enum sphw_wait_return check_chain_restart_handler(void *priv_data) -{ - struct sphw_api_cmd_chain *cmd_chain = priv_data; - u32 reg_addr, val; - - reg_addr = SPHW_CSR_API_CMD_CHAIN_REQ_ADDR(cmd_chain->chain_type); - val = sphw_hwif_read_reg(cmd_chain->hwdev->hwif, reg_addr); - if (!SPHW_API_CMD_CHAIN_REQ_GET(val, RESTART)) - return WAIT_PROCESS_CPL; - - return WAIT_PROCESS_WAITING; -} - -/** - * api_cmd_hw_restart - restart the chain in the HW - * @chain: the API CMD specific chain to restart - **/ -static int api_cmd_hw_restart(struct sphw_api_cmd_chain *cmd_chain) -{ - struct sphw_hwif *hwif = cmd_chain->hwdev->hwif; - u32 reg_addr, val; - - /* Read Modify Write */ - reg_addr = SPHW_CSR_API_CMD_CHAIN_REQ_ADDR(cmd_chain->chain_type); - val = sphw_hwif_read_reg(hwif, reg_addr); - - val = SPHW_API_CMD_CHAIN_REQ_CLEAR(val, RESTART); - val |= SPHW_API_CMD_CHAIN_REQ_SET(1, RESTART); - - sphw_hwif_write_reg(hwif, reg_addr, val); - - return sphw_wait_for_timeout(cmd_chain, check_chain_restart_handler, - API_CMD_TIMEOUT, USEC_PER_MSEC); -} - -/** - * api_cmd_ctrl_init - set the control register of a chain - * @chain: the API CMD specific chain to set control register for - **/ -static void api_cmd_ctrl_init(struct sphw_api_cmd_chain *chain) -{ - struct sphw_hwif *hwif = chain->hwdev->hwif; - u32 reg_addr, ctrl; - u32 size; - - /* Read Modify Write */ - reg_addr = SPHW_CSR_API_CMD_CHAIN_CTRL_ADDR(chain->chain_type); - - size = (u32)ilog2(chain->cell_size >> API_CMD_CHAIN_CELL_SIZE_SHIFT); - - ctrl = sphw_hwif_read_reg(hwif, reg_addr); - - ctrl = SPHW_API_CMD_CHAIN_CTRL_CLEAR(ctrl, AEQE_EN) & - SPHW_API_CMD_CHAIN_CTRL_CLEAR(ctrl, CELL_SIZE); - - ctrl |= SPHW_API_CMD_CHAIN_CTRL_SET(0, AEQE_EN) | - SPHW_API_CMD_CHAIN_CTRL_SET(size, CELL_SIZE); - - sphw_hwif_write_reg(hwif, reg_addr, ctrl); -} - -/** - * api_cmd_set_status_addr - set the status address of a chain in the HW - * @chain: the API CMD specific chain to set status address for - **/ -static void api_cmd_set_status_addr(struct sphw_api_cmd_chain *chain) -{ - struct sphw_hwif *hwif = chain->hwdev->hwif; - u32 addr, val; - - addr = SPHW_CSR_API_CMD_STATUS_HI_ADDR(chain->chain_type); - val = upper_32_bits(chain->wb_status_paddr); - sphw_hwif_write_reg(hwif, addr, val); - - addr = SPHW_CSR_API_CMD_STATUS_LO_ADDR(chain->chain_type); - val = lower_32_bits(chain->wb_status_paddr); - sphw_hwif_write_reg(hwif, addr, val); -} - -/** - * api_cmd_set_num_cells - set the number cells of a chain in the HW - * @chain: the API CMD specific chain to set the number of cells for - **/ -static void api_cmd_set_num_cells(struct sphw_api_cmd_chain *chain) -{ - struct sphw_hwif *hwif = chain->hwdev->hwif; - u32 addr, val; - - addr = SPHW_CSR_API_CMD_CHAIN_NUM_CELLS_ADDR(chain->chain_type); - val = chain->num_cells; - sphw_hwif_write_reg(hwif, addr, val); -} - -/** - * api_cmd_head_init - set the head cell of a chain in the HW - * @chain: the API CMD specific chain to set the head for - **/ -static void api_cmd_head_init(struct sphw_api_cmd_chain *chain) -{ - struct sphw_hwif *hwif = chain->hwdev->hwif; - u32 addr, val; - - addr = SPHW_CSR_API_CMD_CHAIN_HEAD_HI_ADDR(chain->chain_type); - val = upper_32_bits(chain->head_cell_paddr); - sphw_hwif_write_reg(hwif, addr, val); - - addr = SPHW_CSR_API_CMD_CHAIN_HEAD_LO_ADDR(chain->chain_type); - val = lower_32_bits(chain->head_cell_paddr); - sphw_hwif_write_reg(hwif, addr, val); -} - -static enum sphw_wait_return check_chain_ready_handler(void *priv_data) -{ - struct sphw_api_cmd_chain *chain = priv_data; - u32 addr, val; - u32 hw_cons_idx; - - addr = SPHW_CSR_API_CMD_STATUS_0_ADDR(chain->chain_type); - val = sphw_hwif_read_reg(chain->hwdev->hwif, addr); - hw_cons_idx = SPHW_API_CMD_STATUS_GET(val, CONS_IDX); - - /* wait for HW cons idx to be updated */ - if (hw_cons_idx == chain->cons_idx) - return WAIT_PROCESS_CPL; - return WAIT_PROCESS_WAITING; -} - -/** - * wait_for_ready_chain - wait for the chain to be ready - * @chain: the API CMD specific chain to wait for - * Return: 0 - success, negative - failure - **/ -static int wait_for_ready_chain(struct sphw_api_cmd_chain *chain) -{ - return sphw_wait_for_timeout(chain, check_chain_ready_handler, - API_CMD_TIMEOUT, USEC_PER_MSEC); -} - -/** - * api_cmd_chain_hw_clean - clean the HW - * @chain: the API CMD specific chain - **/ -static void api_cmd_chain_hw_clean(struct sphw_api_cmd_chain *chain) -{ - struct sphw_hwif *hwif = chain->hwdev->hwif; - u32 addr, ctrl; - - addr = SPHW_CSR_API_CMD_CHAIN_CTRL_ADDR(chain->chain_type); - - ctrl = sphw_hwif_read_reg(hwif, addr); - ctrl = SPHW_API_CMD_CHAIN_CTRL_CLEAR(ctrl, RESTART_EN) & - SPHW_API_CMD_CHAIN_CTRL_CLEAR(ctrl, XOR_ERR) & - SPHW_API_CMD_CHAIN_CTRL_CLEAR(ctrl, AEQE_EN) & - SPHW_API_CMD_CHAIN_CTRL_CLEAR(ctrl, XOR_CHK_EN) & - SPHW_API_CMD_CHAIN_CTRL_CLEAR(ctrl, CELL_SIZE); - - sphw_hwif_write_reg(hwif, addr, ctrl); -} - -/** - * api_cmd_chain_hw_init - initialize the chain in the HW - * @chain: the API CMD specific chain to initialize in HW - * Return: 0 - success, negative - failure - **/ -static int api_cmd_chain_hw_init(struct sphw_api_cmd_chain *chain) -{ - api_cmd_chain_hw_clean(chain); - - api_cmd_set_status_addr(chain); - - if (api_cmd_hw_restart(chain)) { - sdk_err(chain->hwdev->dev_hdl, "Failed to restart api_cmd_hw\n"); - return -EBUSY; - } - - api_cmd_ctrl_init(chain); - api_cmd_set_num_cells(chain); - api_cmd_head_init(chain); - - return wait_for_ready_chain(chain); -} - -/** - * alloc_cmd_buf - allocate a dma buffer for API CMD command - * @chain: the API CMD specific chain for the cmd - * @cell: the cell in the HW for the cmd - * @cell_idx: the index of the cell - * Return: 0 - success, negative - failure - **/ -static int alloc_cmd_buf(struct sphw_api_cmd_chain *chain, - struct sphw_api_cmd_cell *cell, u32 cell_idx) -{ - struct sphw_api_cmd_cell_ctxt *cell_ctxt; - void *dev = chain->hwdev->dev_hdl; - void *buf_vaddr; - u64 buf_paddr; - int err = 0; - - buf_vaddr = (u8 *)((u64)chain->buf_vaddr_base + - chain->buf_size_align * cell_idx); - buf_paddr = chain->buf_paddr_base + - chain->buf_size_align * cell_idx; - - cell_ctxt = &chain->cell_ctxt[cell_idx]; - - cell_ctxt->api_cmd_vaddr = buf_vaddr; - - /* set the cmd DMA address in the cell */ - switch (chain->chain_type) { - case SPHW_API_CMD_POLL_READ: - cell->read.hw_cmd_paddr = cpu_to_be64(buf_paddr); - break; - case SPHW_API_CMD_WRITE_TO_MGMT_CPU: - case SPHW_API_CMD_POLL_WRITE: - case SPHW_API_CMD_WRITE_ASYNC_TO_MGMT_CPU: - /* The data in the HW should be in Big Endian Format */ - cell->write.hw_cmd_paddr = cpu_to_be64(buf_paddr); - break; - default: - sdk_err(dev, "Unknown API CMD Chain type: %d\n", - chain->chain_type); - err = -EINVAL; - break; - } - - return err; -} - -/** - * alloc_cmd_buf - allocate a resp buffer for API CMD command - * @chain: the API CMD specific chain for the cmd - * @cell: the cell in the HW for the cmd - * @cell_idx: the index of the cell - **/ -static void alloc_resp_buf(struct sphw_api_cmd_chain *chain, - struct sphw_api_cmd_cell *cell, u32 cell_idx) -{ - struct sphw_api_cmd_cell_ctxt *cell_ctxt; - void *resp_vaddr; - u64 resp_paddr; - - resp_vaddr = (u8 *)((u64)chain->rsp_vaddr_base + - chain->rsp_size_align * cell_idx); - resp_paddr = chain->rsp_paddr_base + - chain->rsp_size_align * cell_idx; - - cell_ctxt = &chain->cell_ctxt[cell_idx]; - - cell_ctxt->resp = resp_vaddr; - cell->read.hw_wb_resp_paddr = cpu_to_be64(resp_paddr); -} - -static int sphw_alloc_api_cmd_cell_buf(struct sphw_api_cmd_chain *chain, u32 cell_idx, - struct sphw_api_cmd_cell *node) -{ - void *dev = chain->hwdev->dev_hdl; - int err; - - /* For read chain, we should allocate buffer for the response data */ - if (chain->chain_type == SPHW_API_CMD_MULTI_READ || - chain->chain_type == SPHW_API_CMD_POLL_READ) - alloc_resp_buf(chain, node, cell_idx); - - switch (chain->chain_type) { - case SPHW_API_CMD_WRITE_TO_MGMT_CPU: - case SPHW_API_CMD_POLL_WRITE: - case SPHW_API_CMD_POLL_READ: - case SPHW_API_CMD_WRITE_ASYNC_TO_MGMT_CPU: - err = alloc_cmd_buf(chain, node, cell_idx); - if (err) { - sdk_err(dev, "Failed to allocate cmd buffer\n"); - goto alloc_cmd_buf_err; - } - break; - /* For api command write and api command read, the data section - * is directly inserted in the cell, so no need to allocate. - */ - case SPHW_API_CMD_MULTI_READ: - chain->cell_ctxt[cell_idx].api_cmd_vaddr = - &node->read.hw_cmd_paddr; /* to do: who int this*/ - break; - default: - sdk_err(dev, "Unsupported API CMD chain type\n"); - err = -EINVAL; - goto alloc_cmd_buf_err; - } - - return 0; - -alloc_cmd_buf_err: - - return err; -} - -/** - * api_cmd_create_cell - create API CMD cell of specific chain - * @chain: the API CMD specific chain to create its cell - * @cell_idx: the cell index to create - * @pre_node: previous cell - * @node_vaddr: the virt addr of the cell - * Return: 0 - success, negative - failure - **/ -static int api_cmd_create_cell(struct sphw_api_cmd_chain *chain, u32 cell_idx, - struct sphw_api_cmd_cell *pre_node, - struct sphw_api_cmd_cell **node_vaddr) -{ - struct sphw_api_cmd_cell_ctxt *cell_ctxt; - struct sphw_api_cmd_cell *node; - void *cell_vaddr; - u64 cell_paddr; - int err; - - cell_vaddr = (void *)((u64)chain->cell_vaddr_base + - chain->cell_size_align * cell_idx); - cell_paddr = chain->cell_paddr_base + - chain->cell_size_align * cell_idx; - - cell_ctxt = &chain->cell_ctxt[cell_idx]; - cell_ctxt->cell_vaddr = cell_vaddr; - node = cell_ctxt->cell_vaddr; - - if (!pre_node) { - chain->head_node = cell_vaddr; - chain->head_cell_paddr = cell_paddr; - } else { - /* The data in the HW should be in Big Endian Format */ - pre_node->next_cell_paddr = cpu_to_be64(cell_paddr); - } - - /* Driver software should make sure that there is an empty API - * command cell at the end the chain - */ - node->next_cell_paddr = 0; - - err = sphw_alloc_api_cmd_cell_buf(chain, cell_idx, node); - if (err) - return err; - - *node_vaddr = node; - - return 0; -} - -/** - * api_cmd_create_cells - create API CMD cells for specific chain - * @chain: the API CMD specific chain - * Return: 0 - success, negative - failure - **/ -static int api_cmd_create_cells(struct sphw_api_cmd_chain *chain) -{ - struct sphw_api_cmd_cell *node = NULL, *pre_node = NULL; - void *dev = chain->hwdev->dev_hdl; - u32 cell_idx; - int err; - - for (cell_idx = 0; cell_idx < chain->num_cells; cell_idx++) { - err = api_cmd_create_cell(chain, cell_idx, pre_node, &node); - if (err) { - sdk_err(dev, "Failed to create API CMD cell\n"); - return err; - } - - pre_node = node; - } - - if (!node) - return -EFAULT; - - /* set the Final node to point on the start */ - node->next_cell_paddr = cpu_to_be64(chain->head_cell_paddr); - - /* set the current node to be the head */ - chain->curr_node = chain->head_node; - return 0; -} - -/** - * api_chain_init - initialize API CMD specific chain - * @chain: the API CMD specific chain to initialize - * @attr: attributes to set in the chain - * Return: 0 - success, negative - failure - **/ -static int api_chain_init(struct sphw_api_cmd_chain *chain, - struct sphw_api_cmd_chain_attr *attr) -{ - void *dev = chain->hwdev->dev_hdl; - size_t cell_ctxt_size; - size_t cells_buf_size; - int err; - - chain->chain_type = attr->chain_type; - chain->num_cells = attr->num_cells; - chain->cell_size = attr->cell_size; - chain->rsp_size = attr->rsp_size; - - chain->prod_idx = 0; - chain->cons_idx = 0; - - if (chain->chain_type == SPHW_API_CMD_WRITE_ASYNC_TO_MGMT_CPU) - spin_lock_init(&chain->async_lock); - else - sema_init(&chain->sem, 1); - - cell_ctxt_size = chain->num_cells * sizeof(*chain->cell_ctxt); - if (!cell_ctxt_size) { - sdk_err(dev, "Api chain cell size cannot be zero\n"); - return -EINVAL; - } - - chain->cell_ctxt = kzalloc(cell_ctxt_size, GFP_KERNEL); - if (!chain->cell_ctxt) { - sdk_err(dev, "Failed to allocate cell contexts for a chain\n"); - return -ENOMEM; - } - - chain->wb_status = dma_alloc_coherent(dev, sizeof(*chain->wb_status), - &chain->wb_status_paddr, GFP_KERNEL); - if (!chain->wb_status) { - sdk_err(dev, "Failed to allocate DMA wb status\n"); - err = -ENOMEM; - goto alloc_wb_status_err; - } - - chain->cell_size_align = ALIGN((u64)chain->cell_size, - API_CMD_NODE_ALIGN_SIZE); - chain->rsp_size_align = ALIGN((u64)chain->rsp_size, - API_CHAIN_RESP_ALIGNMENT); - chain->buf_size_align = ALIGN(API_CMD_BUF_SIZE, API_PAYLOAD_ALIGN_SIZE); - - cells_buf_size = (chain->cell_size_align + chain->rsp_size_align + - chain->buf_size_align) * chain->num_cells; - - err = sphw_dma_alloc_coherent_align(dev, cells_buf_size, API_CMD_NODE_ALIGN_SIZE, - GFP_KERNEL, &chain->cells_addr); - if (err) { - sdk_err(dev, "Failed to allocate API CMD cells buffer\n"); - goto alloc_cells_buf_err; - } - - chain->cell_vaddr_base = chain->cells_addr.align_vaddr; - chain->cell_paddr_base = chain->cells_addr.align_paddr; - - chain->rsp_vaddr_base = (u8 *)((u64)chain->cell_vaddr_base + - chain->cell_size_align * chain->num_cells); - chain->rsp_paddr_base = chain->cell_paddr_base + - chain->cell_size_align * chain->num_cells; - - chain->buf_vaddr_base = (u8 *)((u64)chain->rsp_vaddr_base + - chain->rsp_size_align * chain->num_cells); - chain->buf_paddr_base = chain->rsp_paddr_base + - chain->rsp_size_align * chain->num_cells; - - return 0; - -alloc_cells_buf_err: - dma_free_coherent(dev, sizeof(*chain->wb_status), - chain->wb_status, chain->wb_status_paddr); - -alloc_wb_status_err: - kfree(chain->cell_ctxt); - - return err; -} - -/** - * api_chain_free - free API CMD specific chain - * @chain: the API CMD specific chain to free - **/ -static void api_chain_free(struct sphw_api_cmd_chain *chain) -{ - void *dev = chain->hwdev->dev_hdl; - - sphw_dma_free_coherent_align(dev, &chain->cells_addr); - - dma_free_coherent(dev, sizeof(*chain->wb_status), - chain->wb_status, chain->wb_status_paddr); - kfree(chain->cell_ctxt); -} - -/** - * api_cmd_create_chain - create API CMD specific chain - * @chain: the API CMD specific chain to create - * @attr: attributes to set in the chain - * Return: 0 - success, negative - failure - **/ -static int api_cmd_create_chain(struct sphw_api_cmd_chain **cmd_chain, - struct sphw_api_cmd_chain_attr *attr) -{ - struct sphw_hwdev *hwdev = attr->hwdev; - struct sphw_api_cmd_chain *chain = NULL; - int err; - - if (attr->num_cells & (attr->num_cells - 1)) { - sdk_err(hwdev->dev_hdl, "Invalid number of cells, must be power of 2\n"); - return -EINVAL; - } - - chain = kzalloc(sizeof(*chain), GFP_KERNEL); - if (!chain) - return -ENOMEM; - - chain->hwdev = hwdev; - - err = api_chain_init(chain, attr); - if (err) { - sdk_err(hwdev->dev_hdl, "Failed to initialize chain\n"); - goto chain_init_err; - } - - err = api_cmd_create_cells(chain); - if (err) { - sdk_err(hwdev->dev_hdl, "Failed to create cells for API CMD chain\n"); - goto create_cells_err; - } - - err = api_cmd_chain_hw_init(chain); - if (err) { - sdk_err(hwdev->dev_hdl, "Failed to initialize chain HW\n"); - goto chain_hw_init_err; - } - - *cmd_chain = chain; - return 0; - -chain_hw_init_err: -create_cells_err: - api_chain_free(chain); - -chain_init_err: - kfree(chain); - return err; -} - -/** - * api_cmd_destroy_chain - destroy API CMD specific chain - * @chain: the API CMD specific chain to destroy - **/ -static void api_cmd_destroy_chain(struct sphw_api_cmd_chain *chain) -{ - api_chain_free(chain); - kfree(chain); -} - -/** - * sphw_api_cmd_init - Initialize all the API CMD chains - * @hwif: the hardware interface of a pci function device - * @chain: the API CMD chains that will be initialized - * Return: 0 - success, negative - failure - **/ -int sphw_api_cmd_init(struct sphw_hwdev *hwdev, struct sphw_api_cmd_chain **chain) -{ - void *dev = hwdev->dev_hdl; - struct sphw_api_cmd_chain_attr attr; - enum sphw_api_cmd_chain_type chain_type, i; - int err; - - attr.hwdev = hwdev; - attr.num_cells = API_CHAIN_NUM_CELLS; - attr.cell_size = API_CHAIN_CELL_SIZE; - attr.rsp_size = API_CHAIN_RSP_DATA_SIZE; - - chain_type = SPHW_API_CMD_WRITE_TO_MGMT_CPU; - for (; chain_type < SPHW_API_CMD_MAX; chain_type++) { - attr.chain_type = chain_type; - - err = api_cmd_create_chain(&chain[chain_type], &attr); - if (err) { - sdk_err(dev, "Failed to create chain %d\n", chain_type); - goto create_chain_err; - } - } - - return 0; - -create_chain_err: - i = SPHW_API_CMD_WRITE_TO_MGMT_CPU; - for (; i < chain_type; i++) - api_cmd_destroy_chain(chain[i]); - - return err; -} - -/** - * sphw_api_cmd_free - free the API CMD chains - * @chain: the API CMD chains that will be freed - **/ -void sphw_api_cmd_free(struct sphw_api_cmd_chain **chain) -{ - enum sphw_api_cmd_chain_type chain_type; - - chain_type = SPHW_API_CMD_WRITE_TO_MGMT_CPU; - - for (; chain_type < SPHW_API_CMD_MAX; chain_type++) - api_cmd_destroy_chain(chain[chain_type]); -} diff --git a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_api_cmd.h b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_api_cmd.h deleted file mode 100644 index 14a6c0b50e17d603064b5a3988fd3c85b70cd467..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_api_cmd.h +++ /dev/null @@ -1,277 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPHW_API_CMD_H -#define SPHW_API_CMD_H - -#include "sphw_hwif.h" - -/*api_cmd_cell.ctrl structure*/ -#define SPHW_API_CMD_CELL_CTRL_CELL_LEN_SHIFT 0 -#define SPHW_API_CMD_CELL_CTRL_RD_DMA_ATTR_OFF_SHIFT 16 -#define SPHW_API_CMD_CELL_CTRL_WR_DMA_ATTR_OFF_SHIFT 24 -#define SPHW_API_CMD_CELL_CTRL_XOR_CHKSUM_SHIFT 56 - -#define SPHW_API_CMD_CELL_CTRL_CELL_LEN_MASK 0x3FU -#define SPHW_API_CMD_CELL_CTRL_RD_DMA_ATTR_OFF_MASK 0x3FU -#define SPHW_API_CMD_CELL_CTRL_WR_DMA_ATTR_OFF_MASK 0x3FU -#define SPHW_API_CMD_CELL_CTRL_XOR_CHKSUM_MASK 0xFFU - -#define SPHW_API_CMD_CELL_CTRL_SET(val, member) \ - ((((u64)(val)) & SPHW_API_CMD_CELL_CTRL_##member##_MASK) << \ - SPHW_API_CMD_CELL_CTRL_##member##_SHIFT) - -/*api_cmd_cell.desc structure*/ -#define SPHW_API_CMD_DESC_API_TYPE_SHIFT 0 -#define SPHW_API_CMD_DESC_RD_WR_SHIFT 1 -#define SPHW_API_CMD_DESC_MGMT_BYPASS_SHIFT 2 -#define SPHW_API_CMD_DESC_RESP_AEQE_EN_SHIFT 3 -#define SPHW_API_CMD_DESC_APICHN_RSVD_SHIFT 4 -#define SPHW_API_CMD_DESC_APICHN_CODE_SHIFT 6 -#define SPHW_API_CMD_DESC_PRIV_DATA_SHIFT 8 -#define SPHW_API_CMD_DESC_DEST_SHIFT 32 -#define SPHW_API_CMD_DESC_SIZE_SHIFT 40 -#define SPHW_API_CMD_DESC_XOR_CHKSUM_SHIFT 56 - -#define SPHW_API_CMD_DESC_API_TYPE_MASK 0x1U -#define SPHW_API_CMD_DESC_RD_WR_MASK 0x1U -#define SPHW_API_CMD_DESC_MGMT_BYPASS_MASK 0x1U -#define SPHW_API_CMD_DESC_RESP_AEQE_EN_MASK 0x1U -#define SPHW_API_CMD_DESC_APICHN_RSVD_MASK 0x3U -#define SPHW_API_CMD_DESC_APICHN_CODE_MASK 0x3U -#define SPHW_API_CMD_DESC_PRIV_DATA_MASK 0xFFFFFFU -#define SPHW_API_CMD_DESC_DEST_MASK 0x1FU -#define SPHW_API_CMD_DESC_SIZE_MASK 0x7FFU -#define SPHW_API_CMD_DESC_XOR_CHKSUM_MASK 0xFFU - -#define SPHW_API_CMD_DESC_SET(val, member) \ - ((((u64)(val)) & SPHW_API_CMD_DESC_##member##_MASK) << \ - SPHW_API_CMD_DESC_##member##_SHIFT) - -/*api_cmd_status header*/ -#define SPHW_API_CMD_STATUS_HEADER_VALID_SHIFT 0 -#define SPHW_API_CMD_STATUS_HEADER_CHAIN_ID_SHIFT 16 - -#define SPHW_API_CMD_STATUS_HEADER_VALID_MASK 0xFFU -#define SPHW_API_CMD_STATUS_HEADER_CHAIN_ID_MASK 0xFFU - -#define SPHW_API_CMD_STATUS_HEADER_GET(val, member) \ - (((val) >> SPHW_API_CMD_STATUS_HEADER_##member##_SHIFT) & \ - SPHW_API_CMD_STATUS_HEADER_##member##_MASK) - -/*API_CHAIN_REQ CSR: 0x0020+api_idx*0x080*/ -#define SPHW_API_CMD_CHAIN_REQ_RESTART_SHIFT 1 -#define SPHW_API_CMD_CHAIN_REQ_WB_TRIGGER_SHIFT 2 - -#define SPHW_API_CMD_CHAIN_REQ_RESTART_MASK 0x1U -#define SPHW_API_CMD_CHAIN_REQ_WB_TRIGGER_MASK 0x1U - -#define SPHW_API_CMD_CHAIN_REQ_SET(val, member) \ - (((val) & SPHW_API_CMD_CHAIN_REQ_##member##_MASK) << \ - SPHW_API_CMD_CHAIN_REQ_##member##_SHIFT) - -#define SPHW_API_CMD_CHAIN_REQ_GET(val, member) \ - (((val) >> SPHW_API_CMD_CHAIN_REQ_##member##_SHIFT) & \ - SPHW_API_CMD_CHAIN_REQ_##member##_MASK) - -#define SPHW_API_CMD_CHAIN_REQ_CLEAR(val, member) \ - ((val) & (~(SPHW_API_CMD_CHAIN_REQ_##member##_MASK \ - << SPHW_API_CMD_CHAIN_REQ_##member##_SHIFT))) - -/*API_CHAIN_CTL CSR: 0x0014+api_idx*0x080*/ -#define SPHW_API_CMD_CHAIN_CTRL_RESTART_EN_SHIFT 1 -#define SPHW_API_CMD_CHAIN_CTRL_XOR_ERR_SHIFT 2 -#define SPHW_API_CMD_CHAIN_CTRL_AEQE_EN_SHIFT 4 -#define SPHW_API_CMD_CHAIN_CTRL_AEQ_ID_SHIFT 8 -#define SPHW_API_CMD_CHAIN_CTRL_XOR_CHK_EN_SHIFT 28 -#define SPHW_API_CMD_CHAIN_CTRL_CELL_SIZE_SHIFT 30 - -#define SPHW_API_CMD_CHAIN_CTRL_RESTART_EN_MASK 0x1U -#define SPHW_API_CMD_CHAIN_CTRL_XOR_ERR_MASK 0x1U -#define SPHW_API_CMD_CHAIN_CTRL_AEQE_EN_MASK 0x1U -#define SPHW_API_CMD_CHAIN_CTRL_AEQ_ID_MASK 0x3U -#define SPHW_API_CMD_CHAIN_CTRL_XOR_CHK_EN_MASK 0x3U -#define SPHW_API_CMD_CHAIN_CTRL_CELL_SIZE_MASK 0x3U - -#define SPHW_API_CMD_CHAIN_CTRL_SET(val, member) \ - (((val) & SPHW_API_CMD_CHAIN_CTRL_##member##_MASK) << \ - SPHW_API_CMD_CHAIN_CTRL_##member##_SHIFT) - -#define SPHW_API_CMD_CHAIN_CTRL_CLEAR(val, member) \ - ((val) & (~(SPHW_API_CMD_CHAIN_CTRL_##member##_MASK \ - << SPHW_API_CMD_CHAIN_CTRL_##member##_SHIFT))) - -/*api_cmd rsp header*/ -#define SPHW_API_CMD_RESP_HEAD_VALID_SHIFT 0 -#define SPHW_API_CMD_RESP_HEAD_STATUS_SHIFT 8 -#define SPHW_API_CMD_RESP_HEAD_CHAIN_ID_SHIFT 16 -#define SPHW_API_CMD_RESP_HEAD_RESP_LEN_SHIFT 24 -#define SPHW_API_CMD_RESP_HEAD_DRIVER_PRIV_SHIFT 40 - -#define SPHW_API_CMD_RESP_HEAD_VALID_MASK 0xFF -#define SPHW_API_CMD_RESP_HEAD_STATUS_MASK 0xFFU -#define SPHW_API_CMD_RESP_HEAD_CHAIN_ID_MASK 0xFFU -#define SPHW_API_CMD_RESP_HEAD_RESP_LEN_MASK 0x1FFU -#define SPHW_API_CMD_RESP_HEAD_DRIVER_PRIV_MASK 0xFFFFFFU - -#define SPHW_API_CMD_RESP_HEAD_VALID_CODE 0xFF - -#define SPHW_API_CMD_RESP_HEADER_VALID(val) \ - (((val) & SPHW_API_CMD_RESP_HEAD_VALID_MASK) == \ - SPHW_API_CMD_RESP_HEAD_VALID_CODE) - -#define SPHW_API_CMD_RESP_HEAD_GET(val, member) \ - (((val) >> SPHW_API_CMD_RESP_HEAD_##member##_SHIFT) & \ - SPHW_API_CMD_RESP_HEAD_##member##_MASK) - -#define SPHW_API_CMD_RESP_HEAD_CHAIN_ID(val) \ - (((val) >> SPHW_API_CMD_RESP_HEAD_CHAIN_ID_SHIFT) & \ - SPHW_API_CMD_RESP_HEAD_CHAIN_ID_MASK) - -#define SPHW_API_CMD_RESP_HEAD_DRIVER_PRIV(val) \ - ((u16)(((val) >> SPHW_API_CMD_RESP_HEAD_DRIVER_PRIV_SHIFT) & \ - SPHW_API_CMD_RESP_HEAD_DRIVER_PRIV_MASK)) -/*API_STATUS_0 CSR: 0x0030+api_idx*0x080*/ -#define SPHW_API_CMD_STATUS_CONS_IDX_MASK 0xFFFFFFU -#define SPHW_API_CMD_STATUS_CONS_IDX_SHIFT 0 - -#define SPHW_API_CMD_STATUS_FSM_MASK 0xFU -#define SPHW_API_CMD_STATUS_FSM_SHIFT 24 - -#define SPHW_API_CMD_STATUS_CHKSUM_ERR_MASK 0x3U -#define SPHW_API_CMD_STATUS_CHKSUM_ERR_SHIFT 28 - -#define SPHW_API_CMD_STATUS_CPLD_ERR_MASK 0x1U -#define SPHW_API_CMD_STATUS_CPLD_ERR_SHIFT 30 - -#define SPHW_API_CMD_STATUS_CONS_IDX(val) \ - ((val) & SPHW_API_CMD_STATUS_CONS_IDX_MASK) - -#define SPHW_API_CMD_STATUS_CHKSUM_ERR(val) \ - (((val) >> SPHW_API_CMD_STATUS_CHKSUM_ERR_SHIFT) & \ - SPHW_API_CMD_STATUS_CHKSUM_ERR_MASK) - -#define SPHW_API_CMD_STATUS_GET(val, member) \ - (((val) >> SPHW_API_CMD_STATUS_##member##_SHIFT) & \ - SPHW_API_CMD_STATUS_##member##_MASK) - -enum sphw_api_cmd_chain_type { - /* write to mgmt cpu command with completion */ - SPHW_API_CMD_WRITE_TO_MGMT_CPU = 2, - /* multi read command with completion notification - not used */ - SPHW_API_CMD_MULTI_READ = 3, - /* write command without completion notification */ - SPHW_API_CMD_POLL_WRITE = 4, - /* read command without completion notification */ - SPHW_API_CMD_POLL_READ = 5, - /* read from mgmt cpu command with completion */ - SPHW_API_CMD_WRITE_ASYNC_TO_MGMT_CPU = 6, - SPHW_API_CMD_MAX, -}; - -struct sphw_api_cmd_status { - u64 header; - u32 buf_desc; - u32 cell_addr_hi; - u32 cell_addr_lo; - u32 rsvd0; - u64 rsvd1; -}; - -/* HW struct */ -struct sphw_api_cmd_cell { - u64 ctrl; - - /* address is 64 bit in HW struct */ - u64 next_cell_paddr; - - u64 desc; - - /* HW struct */ - union { - struct { - u64 hw_cmd_paddr; - } write; - - struct { - u64 hw_wb_resp_paddr; - u64 hw_cmd_paddr; - } read; - }; -}; - -struct sphw_api_cmd_resp_fmt { - u64 header; - u64 resp_data; -}; - -struct sphw_api_cmd_cell_ctxt { - struct sphw_api_cmd_cell *cell_vaddr; - - void *api_cmd_vaddr; - - struct sphw_api_cmd_resp_fmt *resp; - - struct completion done; - int status; - - u32 saved_prod_idx; -}; - -struct sphw_api_cmd_chain_attr { - struct sphw_hwdev *hwdev; - enum sphw_api_cmd_chain_type chain_type; - - u32 num_cells; - u16 rsp_size; - u16 cell_size; -}; - -struct sphw_api_cmd_chain { - struct sphw_hwdev *hwdev; - enum sphw_api_cmd_chain_type chain_type; - - u32 num_cells; - u16 cell_size; - u16 rsp_size; - - /* HW members is 24 bit format */ - u32 prod_idx; - u32 cons_idx; - - struct semaphore sem; - /* Async cmd can not be scheduling */ - spinlock_t async_lock; - - dma_addr_t wb_status_paddr; - struct sphw_api_cmd_status *wb_status; - - dma_addr_t head_cell_paddr; - struct sphw_api_cmd_cell *head_node; - - struct sphw_api_cmd_cell_ctxt *cell_ctxt; - struct sphw_api_cmd_cell *curr_node; - - struct sphw_dma_addr_align cells_addr; - - u8 *cell_vaddr_base; - u64 cell_paddr_base; - u8 *rsp_vaddr_base; - u64 rsp_paddr_base; - u8 *buf_vaddr_base; - u64 buf_paddr_base; - u64 cell_size_align; - u64 rsp_size_align; - u64 buf_size_align; -}; - -int sphw_api_cmd_write(struct sphw_api_cmd_chain *chain, u8 node_id, const void *cmd, u16 size); - -int sphw_api_cmd_read(struct sphw_api_cmd_chain *chain, u8 node_id, const void *cmd, u16 size, - void *ack, u16 ack_size); - -int sphw_api_cmd_init(struct sphw_hwdev *hwdev, struct sphw_api_cmd_chain **chain); - -void sphw_api_cmd_free(struct sphw_api_cmd_chain **chain); - -#endif diff --git a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_cfg_cmd.h b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_cfg_cmd.h deleted file mode 100644 index 63b89e71c55280ec505ef7ebc8ed323ac927a462..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_cfg_cmd.h +++ /dev/null @@ -1,127 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPHW_CFG_CMD_H -#define SPHW_CFG_CMD_H - -#include "sphw_mgmt_msg_base.h" - -enum cfg_cmd { - CFG_CMD_GET_DEV_CAP = 0, -}; - -struct cfg_cmd_dev_cap { - struct mgmt_msg_head head; - - u16 func_id; - u16 rsvd1; - - /* Public resources */ - u8 host_id; - u8 ep_id; - u8 er_id; - u8 port_id; - - u16 host_total_func; - u8 host_pf_num; - u8 pf_id_start; - u16 host_vf_num; - u16 vf_id_start; - u8 host_oq_id_mask_val; - u8 rsvd_host[3]; - - u16 svc_cap_en; - u16 max_vf; - u8 flexq_en; - u8 valid_cos_bitmap; - /* Reserved for func_valid_cos_bitmap */ - u16 rsvd_func1; - u32 rsvd_func2; - - u8 sf_svc_attr; - u8 func_sf_en; - u8 lb_mode; - u8 smf_pg; - - u32 max_conn_num; - u16 max_stick2cache_num; - u16 max_bfilter_start_addr; - u16 bfilter_len; - u16 hash_bucket_num; - - /* shared resource */ - u8 host_sf_en; - u8 rsvd2_sr[3]; - u32 host_pctx_num; - u32 host_ccxt_num; - u32 host_scq_num; - u32 host_srq_num; - u32 host_mpt_num; - - /* l2nic */ - u16 nic_max_sq_id; - u16 nic_max_rq_id; - u32 rsvd_nic[3]; - - /* RoCE */ - u32 roce_max_qp; - u32 roce_max_cq; - u32 roce_max_srq; - u32 roce_max_mpt; - u32 roce_max_drc_qp; - - u32 roce_cmtt_cl_start; - u32 roce_cmtt_cl_end; - u32 roce_cmtt_cl_size; - - u32 roce_dmtt_cl_start; - u32 roce_dmtt_cl_end; - u32 roce_dmtt_cl_size; - - u32 roce_wqe_cl_start; - u32 roce_wqe_cl_end; - u32 roce_wqe_cl_size; - u8 roce_srq_container_mode; - u8 rsvd_roce1[3]; - u32 rsvd_roce2[5]; - - /* IPsec */ - u32 ipsec_max_sactx; - u32 rsvd_ipsec[3]; - - /* OVS */ - u32 ovs_max_qpc; - u16 fake_vf_start_id; - u8 fake_vf_num; - u8 rsvd_ovs1; - u32 rsvd_ovs2[2]; - - /* ToE */ - u32 toe_max_pctx; - u32 toe_max_cq; - u16 toe_max_srq; - u16 toe_srq_id_start; - u16 toe_max_mpt; - u16 toe_max_cctxt; - u32 rsvd_toe[2]; - - /* FC */ - u32 fc_max_pctx; - u32 fc_max_scq; - u32 fc_max_srq; - - u32 fc_max_cctx; - u32 fc_cctx_id_start; - - u8 fc_vp_id_start; - u8 fc_vp_id_end; - u8 rsvd_fc1[2]; - u32 rsvd_fc2[5]; - - /* VBS */ - u32 rsvd_vbs[4]; - - u32 rsvd_glb[11]; -}; - -#endif diff --git a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_cmdq.c b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_cmdq.c deleted file mode 100644 index 9ebff6f8ac97f06a13364a38a44055ee190d088c..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_cmdq.c +++ /dev/null @@ -1,1573 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "sphw_crm.h" -#include "sphw_hw.h" -#include "sphw_hwdev.h" -#include "sphw_eqs.h" -#include "sphw_common.h" -#include "sphw_wq.h" -#include "sphw_hw_comm.h" -#include "sphw_cmdq.h" - -#define SPHW_CMDQ_BUF_SIZE 2048U -#define SPHW_CMDQ_BUF_HW_RSVD 8 -#define SPHW_CMDQ_MAX_DATA_SIZE \ - (SPHW_CMDQ_BUF_SIZE - SPHW_CMDQ_BUF_HW_RSVD) - -#define CMDQ_CMD_TIMEOUT 5000 /* millisecond */ - -#define UPPER_8_BITS(data) (((data) >> 8) & 0xFF) -#define LOWER_8_BITS(data) ((data) & 0xFF) - -#define CMDQ_DB_INFO_HI_PROD_IDX_SHIFT 0 -#define CMDQ_DB_INFO_HI_PROD_IDX_MASK 0xFFU -#define CMDQ_DB_INFO_SET(val, member) \ - ((((u32)(val)) & CMDQ_DB_INFO_##member##_MASK) << \ - CMDQ_DB_INFO_##member##_SHIFT) - -#define CMDQ_DB_HEAD_QUEUE_TYPE_SHIFT 23 -#define CMDQ_DB_HEAD_CMDQ_TYPE_SHIFT 24 -#define CMDQ_DB_HEAD_SRC_TYPE_SHIFT 27 -#define CMDQ_DB_HEAD_QUEUE_TYPE_MASK 0x1U -#define CMDQ_DB_HEAD_CMDQ_TYPE_MASK 0x7U -#define CMDQ_DB_HEAD_SRC_TYPE_MASK 0x1FU -#define CMDQ_DB_HEAD_SET(val, member) \ - ((((u32)(val)) & CMDQ_DB_HEAD_##member##_MASK) << \ - CMDQ_DB_HEAD_##member##_SHIFT) - -#define CMDQ_CTRL_PI_SHIFT 0 -#define CMDQ_CTRL_CMD_SHIFT 16 -#define CMDQ_CTRL_MOD_SHIFT 24 -#define CMDQ_CTRL_ACK_TYPE_SHIFT 29 -#define CMDQ_CTRL_HW_BUSY_BIT_SHIFT 31 - -#define CMDQ_CTRL_PI_MASK 0xFFFFU -#define CMDQ_CTRL_CMD_MASK 0xFFU -#define CMDQ_CTRL_MOD_MASK 0x1FU -#define CMDQ_CTRL_ACK_TYPE_MASK 0x3U -#define CMDQ_CTRL_HW_BUSY_BIT_MASK 0x1U - -#define CMDQ_CTRL_SET(val, member) \ - ((((u32)(val)) & CMDQ_CTRL_##member##_MASK) << \ - CMDQ_CTRL_##member##_SHIFT) - -#define CMDQ_CTRL_GET(val, member) \ - (((val) >> CMDQ_CTRL_##member##_SHIFT) & \ - CMDQ_CTRL_##member##_MASK) - -#define CMDQ_WQE_HEADER_BUFDESC_LEN_SHIFT 0 -#define CMDQ_WQE_HEADER_COMPLETE_FMT_SHIFT 15 -#define CMDQ_WQE_HEADER_DATA_FMT_SHIFT 22 -#define CMDQ_WQE_HEADER_COMPLETE_REQ_SHIFT 23 -#define CMDQ_WQE_HEADER_COMPLETE_SECT_LEN_SHIFT 27 -#define CMDQ_WQE_HEADER_CTRL_LEN_SHIFT 29 -#define CMDQ_WQE_HEADER_HW_BUSY_BIT_SHIFT 31 - -#define CMDQ_WQE_HEADER_BUFDESC_LEN_MASK 0xFFU -#define CMDQ_WQE_HEADER_COMPLETE_FMT_MASK 0x1U -#define CMDQ_WQE_HEADER_DATA_FMT_MASK 0x1U -#define CMDQ_WQE_HEADER_COMPLETE_REQ_MASK 0x1U -#define CMDQ_WQE_HEADER_COMPLETE_SECT_LEN_MASK 0x3U -#define CMDQ_WQE_HEADER_CTRL_LEN_MASK 0x3U -#define CMDQ_WQE_HEADER_HW_BUSY_BIT_MASK 0x1U - -#define CMDQ_WQE_HEADER_SET(val, member) \ - ((((u32)(val)) & CMDQ_WQE_HEADER_##member##_MASK) << \ - CMDQ_WQE_HEADER_##member##_SHIFT) - -#define CMDQ_WQE_HEADER_GET(val, member) \ - (((val) >> CMDQ_WQE_HEADER_##member##_SHIFT) & \ - CMDQ_WQE_HEADER_##member##_MASK) - -#define CMDQ_CTXT_CURR_WQE_PAGE_PFN_SHIFT 0 -#define CMDQ_CTXT_EQ_ID_SHIFT 53 -#define CMDQ_CTXT_CEQ_ARM_SHIFT 61 -#define CMDQ_CTXT_CEQ_EN_SHIFT 62 -#define CMDQ_CTXT_HW_BUSY_BIT_SHIFT 63 - -#define CMDQ_CTXT_CURR_WQE_PAGE_PFN_MASK 0xFFFFFFFFFFFFF -#define CMDQ_CTXT_EQ_ID_MASK 0xFF -#define CMDQ_CTXT_CEQ_ARM_MASK 0x1 -#define CMDQ_CTXT_CEQ_EN_MASK 0x1 -#define CMDQ_CTXT_HW_BUSY_BIT_MASK 0x1 - -#define CMDQ_CTXT_PAGE_INFO_SET(val, member) \ - (((u64)(val) & CMDQ_CTXT_##member##_MASK) << \ - CMDQ_CTXT_##member##_SHIFT) - -#define CMDQ_CTXT_PAGE_INFO_GET(val, member) \ - (((u64)(val) >> CMDQ_CTXT_##member##_SHIFT) & \ - CMDQ_CTXT_##member##_MASK) - -#define CMDQ_CTXT_WQ_BLOCK_PFN_SHIFT 0 -#define CMDQ_CTXT_CI_SHIFT 52 - -#define CMDQ_CTXT_WQ_BLOCK_PFN_MASK 0xFFFFFFFFFFFFF -#define CMDQ_CTXT_CI_MASK 0xFFF - -#define CMDQ_CTXT_BLOCK_INFO_SET(val, member) \ - (((u64)(val) & CMDQ_CTXT_##member##_MASK) << \ - CMDQ_CTXT_##member##_SHIFT) - -#define CMDQ_CTXT_BLOCK_INFO_GET(val, member) \ - (((u64)(val) >> CMDQ_CTXT_##member##_SHIFT) & \ - CMDQ_CTXT_##member##_MASK) - -#define SAVED_DATA_ARM_SHIFT 31 - -#define SAVED_DATA_ARM_MASK 0x1U - -#define SAVED_DATA_SET(val, member) \ - (((val) & SAVED_DATA_##member##_MASK) << \ - SAVED_DATA_##member##_SHIFT) - -#define SAVED_DATA_CLEAR(val, member) \ - ((val) & (~(SAVED_DATA_##member##_MASK << \ - SAVED_DATA_##member##_SHIFT))) - -#define WQE_ERRCODE_VAL_SHIFT 0 - -#define WQE_ERRCODE_VAL_MASK 0x7FFFFFFF - -#define WQE_ERRCODE_GET(val, member) \ - (((val) >> WQE_ERRCODE_##member##_SHIFT) & \ - WQE_ERRCODE_##member##_MASK) - -#define CEQE_CMDQ_TYPE_SHIFT 0 - -#define CEQE_CMDQ_TYPE_MASK 0x7 - -#define CEQE_CMDQ_GET(val, member) \ - (((val) >> CEQE_CMDQ_##member##_SHIFT) & \ - CEQE_CMDQ_##member##_MASK) - -#define WQE_COMPLETED(ctrl_info) CMDQ_CTRL_GET(ctrl_info, HW_BUSY_BIT) - -#define WQE_HEADER(wqe) ((struct sphw_cmdq_header *)(wqe)) - -#define CMDQ_DB_PI_OFF(pi) (((u16)LOWER_8_BITS(pi)) << 3) - -#define CMDQ_DB_ADDR(db_base, pi) \ - (((u8 *)(db_base)) + CMDQ_DB_PI_OFF(pi)) - -#define CMDQ_PFN_SHIFT 12 -#define CMDQ_PFN(addr) ((addr) >> CMDQ_PFN_SHIFT) - -#define FIRST_DATA_TO_WRITE_LAST sizeof(u64) - -#define WQE_LCMD_SIZE 64 -#define WQE_SCMD_SIZE 64 - -#define COMPLETE_LEN 3 - -#define CMDQ_WQEBB_SIZE 64 -#define CMDQ_WQE_SIZE 64 - -#define cmdq_to_cmdqs(cmdq) container_of((cmdq) - (cmdq)->cmdq_type, \ - struct sphw_cmdqs, cmdq[0]) - -#define CMDQ_SEND_CMPT_CODE 10 -#define CMDQ_COMPLETE_CMPT_CODE 11 -#define CMDQ_FORCE_STOP_CMPT_CODE 12 - -enum cmdq_scmd_type { - CMDQ_SET_ARM_CMD = 2, -}; - -enum cmdq_wqe_type { - WQE_LCMD_TYPE, - WQE_SCMD_TYPE, -}; - -enum ctrl_sect_len { - CTRL_SECT_LEN = 1, - CTRL_DIRECT_SECT_LEN = 2, -}; - -enum bufdesc_len { - BUFDESC_LCMD_LEN = 2, - BUFDESC_SCMD_LEN = 3, -}; - -enum data_format { - DATA_SGE, - DATA_DIRECT, -}; - -enum completion_format { - COMPLETE_DIRECT, - COMPLETE_SGE, -}; - -enum completion_request { - CEQ_SET = 1, -}; - -enum cmdq_cmd_type { - SYNC_CMD_DIRECT_RESP, - SYNC_CMD_SGE_RESP, - ASYNC_CMD, -}; - -#define NUM_WQEBBS_FOR_CMDQ_WQE 1 - -bool sphw_cmdq_idle(struct sphw_cmdq *cmdq) -{ - return sphw_wq_is_empty(&cmdq->wq); -} - -static void *cmdq_read_wqe(struct sphw_wq *wq, u16 *ci) -{ - if (sphw_wq_is_empty(wq)) - return NULL; - - return sphw_wq_read_one_wqebb(wq, ci); -} - -static void *cmdq_get_wqe(struct sphw_wq *wq, u16 *pi) -{ - if (!sphw_wq_free_wqebbs(wq)) - return NULL; - - return sphw_wq_get_one_wqebb(wq, pi); -} - -struct sphw_cmd_buf *sphw_alloc_cmd_buf(void *hwdev) -{ - struct sphw_cmdqs *cmdqs = NULL; - struct sphw_cmd_buf *cmd_buf = NULL; - void *dev = NULL; - - if (!hwdev) { - pr_err("Failed to alloc cmd buf, Invalid hwdev\n"); - return NULL; - } - - cmdqs = ((struct sphw_hwdev *)hwdev)->cmdqs; - dev = ((struct sphw_hwdev *)hwdev)->dev_hdl; - - cmd_buf = kzalloc(sizeof(*cmd_buf), GFP_ATOMIC); - if (!cmd_buf) { - sdk_err(dev, "Failed to allocate cmd buf\n"); - return NULL; - } - - cmd_buf->buf = pci_pool_alloc(cmdqs->cmd_buf_pool, GFP_ATOMIC, - &cmd_buf->dma_addr); - if (!cmd_buf->buf) { - sdk_err(dev, "Failed to allocate cmdq cmd buf from the pool\n"); - goto alloc_pci_buf_err; - } - - atomic_set(&cmd_buf->ref_cnt, 1); - - return cmd_buf; - -alloc_pci_buf_err: - kfree(cmd_buf); - return NULL; -} - -void sphw_free_cmd_buf(void *hwdev, struct sphw_cmd_buf *cmd_buf) -{ - struct sphw_cmdqs *cmdqs = NULL; - - if (!hwdev || !cmd_buf) { - pr_err("Failed to free cmd buf: hwdev: %p, cmd_buf: %p\n", - hwdev, cmd_buf); - return; - } - - if (!atomic_dec_and_test(&cmd_buf->ref_cnt)) - return; - - cmdqs = ((struct sphw_hwdev *)hwdev)->cmdqs; - - pci_pool_free(cmdqs->cmd_buf_pool, cmd_buf->buf, cmd_buf->dma_addr); - kfree(cmd_buf); -} - -static void cmdq_set_completion(struct sphw_cmdq_completion *complete, - struct sphw_cmd_buf *buf_out) -{ - struct sphw_sge_resp *sge_resp = &complete->sge_resp; - - sphw_set_sge(&sge_resp->sge, buf_out->dma_addr, SPHW_CMDQ_BUF_SIZE); -} - -static void cmdq_set_lcmd_bufdesc(struct sphw_cmdq_wqe_lcmd *wqe, - struct sphw_cmd_buf *buf_in) -{ - sphw_set_sge(&wqe->buf_desc.sge, buf_in->dma_addr, buf_in->size); -} - -static void cmdq_set_inline_wqe_data(struct sphw_cmdq_inline_wqe *wqe, - const void *buf_in, u32 in_size) -{ - struct sphw_cmdq_wqe_scmd *wqe_scmd = &wqe->wqe_scmd; - - wqe_scmd->buf_desc.buf_len = in_size; - memcpy(wqe_scmd->buf_desc.data, buf_in, in_size); -} - -static void cmdq_fill_db(struct sphw_cmdq_db *db, - enum sphw_cmdq_type cmdq_type, u16 prod_idx) -{ - db->db_info = CMDQ_DB_INFO_SET(UPPER_8_BITS(prod_idx), HI_PROD_IDX); - - db->db_head = CMDQ_DB_HEAD_SET(SPHW_DB_CMDQ_TYPE, QUEUE_TYPE) | - CMDQ_DB_HEAD_SET(cmdq_type, CMDQ_TYPE) | - CMDQ_DB_HEAD_SET(SPHW_DB_SRC_CMDQ_TYPE, SRC_TYPE); -} - -static void cmdq_set_db(struct sphw_cmdq *cmdq, - enum sphw_cmdq_type cmdq_type, u16 prod_idx) -{ - struct sphw_cmdq_db db = {0}; - - cmdq_fill_db(&db, cmdq_type, prod_idx); - - wmb(); /* write all before the doorbell */ - writeq(*((u64 *)&db), CMDQ_DB_ADDR(cmdq->db_base, prod_idx)); -} - -static void cmdq_wqe_fill(void *dst, const void *src) -{ - memcpy((u8 *)dst + FIRST_DATA_TO_WRITE_LAST, - (u8 *)src + FIRST_DATA_TO_WRITE_LAST, - CMDQ_WQE_SIZE - FIRST_DATA_TO_WRITE_LAST); - - wmb(); /* The first 8 bytes should be written last */ - - *(u64 *)dst = *(u64 *)src; -} - -static void cmdq_prepare_wqe_ctrl(struct sphw_cmdq_wqe *wqe, int wrapped, - u8 mod, u8 cmd, u16 prod_idx, - enum completion_format complete_format, - enum data_format data_format, - enum bufdesc_len buf_len) -{ - struct sphw_ctrl *ctrl = NULL; - enum ctrl_sect_len ctrl_len; - struct sphw_cmdq_wqe_lcmd *wqe_lcmd = NULL; - struct sphw_cmdq_wqe_scmd *wqe_scmd = NULL; - u32 saved_data = WQE_HEADER(wqe)->saved_data; - - if (data_format == DATA_SGE) { - wqe_lcmd = &wqe->wqe_lcmd; - - wqe_lcmd->status.status_info = 0; - ctrl = &wqe_lcmd->ctrl; - ctrl_len = CTRL_SECT_LEN; - } else { - wqe_scmd = &wqe->inline_wqe.wqe_scmd; - - wqe_scmd->status.status_info = 0; - ctrl = &wqe_scmd->ctrl; - ctrl_len = CTRL_DIRECT_SECT_LEN; - } - - ctrl->ctrl_info = CMDQ_CTRL_SET(prod_idx, PI) | - CMDQ_CTRL_SET(cmd, CMD) | - CMDQ_CTRL_SET(mod, MOD); - - WQE_HEADER(wqe)->header_info = - CMDQ_WQE_HEADER_SET(buf_len, BUFDESC_LEN) | - CMDQ_WQE_HEADER_SET(complete_format, COMPLETE_FMT) | - CMDQ_WQE_HEADER_SET(data_format, DATA_FMT) | - CMDQ_WQE_HEADER_SET(CEQ_SET, COMPLETE_REQ) | - CMDQ_WQE_HEADER_SET(COMPLETE_LEN, COMPLETE_SECT_LEN) | - CMDQ_WQE_HEADER_SET(ctrl_len, CTRL_LEN) | - CMDQ_WQE_HEADER_SET((u32)wrapped, HW_BUSY_BIT); - - if (cmd == CMDQ_SET_ARM_CMD && mod == SPHW_MOD_COMM) { - saved_data &= SAVED_DATA_CLEAR(saved_data, ARM); - WQE_HEADER(wqe)->saved_data = saved_data | - SAVED_DATA_SET(1, ARM); - } else { - saved_data &= SAVED_DATA_CLEAR(saved_data, ARM); - WQE_HEADER(wqe)->saved_data = saved_data; - } -} - -static void cmdq_set_lcmd_wqe(struct sphw_cmdq_wqe *wqe, - enum cmdq_cmd_type cmd_type, - struct sphw_cmd_buf *buf_in, - struct sphw_cmd_buf *buf_out, int wrapped, - u8 mod, u8 cmd, u16 prod_idx) -{ - struct sphw_cmdq_wqe_lcmd *wqe_lcmd = &wqe->wqe_lcmd; - enum completion_format complete_format = COMPLETE_DIRECT; - - switch (cmd_type) { - case SYNC_CMD_DIRECT_RESP: - wqe_lcmd->completion.direct_resp = 0; - break; - case SYNC_CMD_SGE_RESP: - if (buf_out) { - complete_format = COMPLETE_SGE; - cmdq_set_completion(&wqe_lcmd->completion, - buf_out); - } - break; - case ASYNC_CMD: - wqe_lcmd->completion.direct_resp = 0; - wqe_lcmd->buf_desc.saved_async_buf = (u64)(buf_in); - break; - } - - cmdq_prepare_wqe_ctrl(wqe, wrapped, mod, cmd, prod_idx, complete_format, - DATA_SGE, BUFDESC_LCMD_LEN); - - cmdq_set_lcmd_bufdesc(wqe_lcmd, buf_in); -} - -static void cmdq_set_inline_wqe(struct sphw_cmdq_wqe *wqe, - enum cmdq_cmd_type cmd_type, - const void *buf_in, u16 in_size, - struct sphw_cmd_buf *buf_out, int wrapped, - u8 mod, u8 cmd, u16 prod_idx) -{ - struct sphw_cmdq_wqe_scmd *wqe_scmd = &wqe->inline_wqe.wqe_scmd; - enum completion_format complete_format = COMPLETE_DIRECT; - - switch (cmd_type) { - case SYNC_CMD_SGE_RESP: - complete_format = COMPLETE_SGE; - cmdq_set_completion(&wqe_scmd->completion, buf_out); - break; - case SYNC_CMD_DIRECT_RESP: - complete_format = COMPLETE_DIRECT; - wqe_scmd->completion.direct_resp = 0; - break; - default: - break; - } - - cmdq_prepare_wqe_ctrl(wqe, wrapped, mod, cmd, prod_idx, - complete_format, DATA_DIRECT, BUFDESC_SCMD_LEN); - - cmdq_set_inline_wqe_data(&wqe->inline_wqe, buf_in, in_size); -} - -static void cmdq_update_cmd_status(struct sphw_cmdq *cmdq, u16 prod_idx, - struct sphw_cmdq_wqe *wqe) -{ - struct sphw_cmdq_cmd_info *cmd_info; - struct sphw_cmdq_wqe_lcmd *wqe_lcmd; - u32 status_info; - - wqe_lcmd = &wqe->wqe_lcmd; - cmd_info = &cmdq->cmd_infos[prod_idx]; - - if (cmd_info->errcode) { - status_info = wqe_lcmd->status.status_info; - *cmd_info->errcode = WQE_ERRCODE_GET(status_info, VAL); - } - - if (cmd_info->direct_resp) - *cmd_info->direct_resp = wqe_lcmd->completion.direct_resp; -} - -static int sphw_cmdq_sync_timeout_check(struct sphw_cmdq *cmdq, struct sphw_cmdq_wqe *wqe, u16 pi) -{ - struct sphw_cmdq_wqe_lcmd *wqe_lcmd; - u32 ctrl_info; - - wqe_lcmd = &wqe->wqe_lcmd; - ctrl_info = wqe_lcmd->ctrl.ctrl_info; - if (!WQE_COMPLETED(ctrl_info)) { - sdk_info(cmdq->hwdev->dev_hdl, "Cmdq sync command check busy bit not set\n"); - return -EFAULT; - } - - cmdq_update_cmd_status(cmdq, pi, wqe); - - sdk_info(cmdq->hwdev->dev_hdl, "Cmdq sync command check succeed\n"); - return 0; -} - -static void clear_cmd_info(struct sphw_cmdq_cmd_info *cmd_info, - struct sphw_cmdq_cmd_info *saved_cmd_info) -{ - if (cmd_info->errcode == saved_cmd_info->errcode) - cmd_info->errcode = NULL; - - if (cmd_info->done == saved_cmd_info->done) - cmd_info->done = NULL; - - if (cmd_info->direct_resp == saved_cmd_info->direct_resp) - cmd_info->direct_resp = NULL; -} - -static int cmdq_ceq_handler_status(struct sphw_cmdq *cmdq, - struct sphw_cmdq_cmd_info *cmd_info, - struct sphw_cmdq_cmd_info *saved_cmd_info, - u64 curr_msg_id, u16 curr_prod_idx, - struct sphw_cmdq_wqe *curr_wqe, - u32 timeout) -{ - ulong timeo; - int err; - - timeo = msecs_to_jiffies(timeout); - if (wait_for_completion_timeout(saved_cmd_info->done, timeo)) - return 0; - - spin_lock_bh(&cmdq->cmdq_lock); - - if (cmd_info->cmpt_code == saved_cmd_info->cmpt_code) - cmd_info->cmpt_code = NULL; - - if (*saved_cmd_info->cmpt_code == CMDQ_COMPLETE_CMPT_CODE) { - sdk_info(cmdq->hwdev->dev_hdl, "Cmdq direct sync command has been completed\n"); - spin_unlock_bh(&cmdq->cmdq_lock); - return 0; - } - - if (curr_msg_id == cmd_info->cmdq_msg_id) { - err = sphw_cmdq_sync_timeout_check(cmdq, curr_wqe, curr_prod_idx); - if (err) - cmd_info->cmd_type = SPHW_CMD_TYPE_TIMEOUT; - else - cmd_info->cmd_type = SPHW_CMD_TYPE_FAKE_TIMEOUT; - } else { - err = -ETIMEDOUT; - sdk_err(cmdq->hwdev->dev_hdl, "Cmdq sync command current msg id dismatch with cmd_info msg id\n"); - } - - clear_cmd_info(cmd_info, saved_cmd_info); - - spin_unlock_bh(&cmdq->cmdq_lock); - - if (!err) - return 0; - - sphw_dump_ceq_info(cmdq->hwdev); - - return -ETIMEDOUT; -} - -static int wait_cmdq_sync_cmd_completion(struct sphw_cmdq *cmdq, - struct sphw_cmdq_cmd_info *cmd_info, - struct sphw_cmdq_cmd_info *saved_cmd_info, - u64 curr_msg_id, u16 curr_prod_idx, - struct sphw_cmdq_wqe *curr_wqe, u32 timeout) -{ - return cmdq_ceq_handler_status(cmdq, cmd_info, saved_cmd_info, - curr_msg_id, curr_prod_idx, - curr_wqe, timeout); -} - -static int cmdq_msg_lock(struct sphw_cmdq *cmdq, u16 channel) -{ - struct sphw_cmdqs *cmdqs = cmdq_to_cmdqs(cmdq); - - /* Keep wrapped and doorbell index correct. bh - for tasklet(ceq) */ - spin_lock_bh(&cmdq->cmdq_lock); - - if (cmdqs->lock_channel_en && test_bit(channel, &cmdqs->channel_stop)) { - spin_unlock_bh(&cmdq->cmdq_lock); - return -EAGAIN; - } - - return 0; -} - -static void cmdq_msg_unlock(struct sphw_cmdq *cmdq) -{ - spin_unlock_bh(&cmdq->cmdq_lock); -} - -static void cmdq_clear_cmd_buf(struct sphw_cmdq_cmd_info *cmd_info, - struct sphw_hwdev *hwdev) -{ - if (cmd_info->buf_in) - sphw_free_cmd_buf(hwdev, cmd_info->buf_in); - - if (cmd_info->buf_out) - sphw_free_cmd_buf(hwdev, cmd_info->buf_out); - - cmd_info->buf_in = NULL; - cmd_info->buf_out = NULL; -} - -static void cmdq_set_cmd_buf(struct sphw_cmdq_cmd_info *cmd_info, - struct sphw_hwdev *hwdev, - struct sphw_cmd_buf *buf_in, - struct sphw_cmd_buf *buf_out) -{ - cmd_info->buf_in = buf_in; - cmd_info->buf_out = buf_out; - - if (buf_in) - atomic_inc(&buf_in->ref_cnt); - - if (buf_out) - atomic_inc(&buf_out->ref_cnt); -} - -static int cmdq_sync_cmd_direct_resp(struct sphw_cmdq *cmdq, u8 mod, - u8 cmd, struct sphw_cmd_buf *buf_in, - u64 *out_param, u32 timeout, u16 channel) -{ - struct sphw_wq *wq = &cmdq->wq; - struct sphw_cmdq_wqe *curr_wqe = NULL, wqe; - struct sphw_cmdq_cmd_info *cmd_info = NULL, saved_cmd_info; - struct completion done; - u16 curr_prod_idx, next_prod_idx; - int wrapped, errcode = 0; - int cmpt_code = CMDQ_SEND_CMPT_CODE; - u64 curr_msg_id; - int err; - - err = cmdq_msg_lock(cmdq, channel); - if (err) - return err; - - curr_wqe = cmdq_get_wqe(wq, &curr_prod_idx); - if (!curr_wqe) { - cmdq_msg_unlock(cmdq); - return -EBUSY; - } - - memset(&wqe, 0, sizeof(wqe)); - - wrapped = cmdq->wrapped; - - next_prod_idx = curr_prod_idx + NUM_WQEBBS_FOR_CMDQ_WQE; - if (next_prod_idx >= wq->q_depth) { - cmdq->wrapped = !cmdq->wrapped; - next_prod_idx -= (u16)wq->q_depth; - } - - cmd_info = &cmdq->cmd_infos[curr_prod_idx]; - - init_completion(&done); - - cmd_info->cmd_type = SPHW_CMD_TYPE_DIRECT_RESP; - cmd_info->done = &done; - cmd_info->errcode = &errcode; - cmd_info->direct_resp = out_param; - cmd_info->cmpt_code = &cmpt_code; - cmd_info->channel = channel; - cmdq_set_cmd_buf(cmd_info, cmdq->hwdev, buf_in, NULL); - - memcpy(&saved_cmd_info, cmd_info, sizeof(*cmd_info)); - - cmdq_set_lcmd_wqe(&wqe, SYNC_CMD_DIRECT_RESP, buf_in, NULL, - wrapped, mod, cmd, curr_prod_idx); - - /* CMDQ WQE is not shadow, therefore wqe will be written to wq */ - cmdq_wqe_fill(curr_wqe, &wqe); - - (cmd_info->cmdq_msg_id)++; - curr_msg_id = cmd_info->cmdq_msg_id; - - cmdq_set_db(cmdq, SPHW_CMDQ_SYNC, next_prod_idx); - - cmdq_msg_unlock(cmdq); - - timeout = timeout ? timeout : CMDQ_CMD_TIMEOUT; - err = wait_cmdq_sync_cmd_completion(cmdq, cmd_info, &saved_cmd_info, - curr_msg_id, curr_prod_idx, - curr_wqe, timeout); - if (err) { - sdk_err(cmdq->hwdev->dev_hdl, "Cmdq sync command(mod: %u, cmd: %u) timeout, prod idx: 0x%x\n", - mod, cmd, curr_prod_idx); - err = -ETIMEDOUT; - } - - if (cmpt_code == CMDQ_FORCE_STOP_CMPT_CODE) { - sdk_info(cmdq->hwdev->dev_hdl, "Force stop cmdq cmd, mod: %u, cmd: %u\n", - mod, cmd); - err = -EAGAIN; - } - - smp_rmb(); /* read error code after completion */ - - return err ? err : errcode; -} - -static int cmdq_sync_cmd_detail_resp(struct sphw_cmdq *cmdq, u8 mod, u8 cmd, - struct sphw_cmd_buf *buf_in, - struct sphw_cmd_buf *buf_out, - u64 *out_param, u32 timeout, u16 channel) -{ - struct sphw_wq *wq = &cmdq->wq; - struct sphw_cmdq_wqe *curr_wqe = NULL, wqe; - struct sphw_cmdq_cmd_info *cmd_info = NULL, saved_cmd_info; - struct completion done; - u16 curr_prod_idx, next_prod_idx; - int wrapped, errcode = 0; - int cmpt_code = CMDQ_SEND_CMPT_CODE; - u64 curr_msg_id; - int err; - - err = cmdq_msg_lock(cmdq, channel); - if (err) - return err; - - curr_wqe = cmdq_get_wqe(wq, &curr_prod_idx); - if (!curr_wqe) { - cmdq_msg_unlock(cmdq); - return -EBUSY; - } - - memset(&wqe, 0, sizeof(wqe)); - - wrapped = cmdq->wrapped; - - next_prod_idx = curr_prod_idx + NUM_WQEBBS_FOR_CMDQ_WQE; - if (next_prod_idx >= wq->q_depth) { - cmdq->wrapped = !cmdq->wrapped; - next_prod_idx -= (u16)wq->q_depth; - } - - cmd_info = &cmdq->cmd_infos[curr_prod_idx]; - - init_completion(&done); - - cmd_info->cmd_type = SPHW_CMD_TYPE_SGE_RESP; - cmd_info->done = &done; - cmd_info->errcode = &errcode; - cmd_info->direct_resp = out_param; - cmd_info->cmpt_code = &cmpt_code; - cmd_info->channel = channel; - cmdq_set_cmd_buf(cmd_info, cmdq->hwdev, buf_in, buf_out); - - memcpy(&saved_cmd_info, cmd_info, sizeof(*cmd_info)); - - cmdq_set_lcmd_wqe(&wqe, SYNC_CMD_SGE_RESP, buf_in, buf_out, - wrapped, mod, cmd, curr_prod_idx); - - cmdq_wqe_fill(curr_wqe, &wqe); - - (cmd_info->cmdq_msg_id)++; - curr_msg_id = cmd_info->cmdq_msg_id; - - cmdq_set_db(cmdq, cmdq->cmdq_type, next_prod_idx); - - cmdq_msg_unlock(cmdq); - - timeout = timeout ? timeout : CMDQ_CMD_TIMEOUT; - err = wait_cmdq_sync_cmd_completion(cmdq, cmd_info, &saved_cmd_info, - curr_msg_id, curr_prod_idx, - curr_wqe, timeout); - if (err) { - sdk_err(cmdq->hwdev->dev_hdl, "Cmdq sync command(mod: %u, cmd: %u) timeout, prod idx: 0x%x\n", - mod, cmd, curr_prod_idx); - err = -ETIMEDOUT; - } - - if (cmpt_code == CMDQ_FORCE_STOP_CMPT_CODE) { - sdk_info(cmdq->hwdev->dev_hdl, "Force stop cmdq cmd, mod: %u, cmd: %u\n", - mod, cmd); - err = -EAGAIN; - } - - smp_rmb(); /* read error code after completion */ - - return err ? err : errcode; -} - -static int cmdq_async_cmd(struct sphw_cmdq *cmdq, u8 mod, u8 cmd, - struct sphw_cmd_buf *buf_in, u16 channel) -{ - struct sphw_cmdq_cmd_info *cmd_info = NULL; - struct sphw_wq *wq = &cmdq->wq; - u16 curr_prod_idx, next_prod_idx; - struct sphw_cmdq_wqe *curr_wqe = NULL, wqe; - int wrapped, err; - - err = cmdq_msg_lock(cmdq, channel); - if (err) - return err; - - curr_wqe = cmdq_get_wqe(wq, &curr_prod_idx); - if (!curr_wqe) { - cmdq_msg_unlock(cmdq); - return -EBUSY; - } - - memset(&wqe, 0, sizeof(wqe)); - - wrapped = cmdq->wrapped; - next_prod_idx = curr_prod_idx + NUM_WQEBBS_FOR_CMDQ_WQE; - if (next_prod_idx >= wq->q_depth) { - cmdq->wrapped = !cmdq->wrapped; - next_prod_idx -= (u16)wq->q_depth; - } - - cmdq_set_lcmd_wqe(&wqe, ASYNC_CMD, buf_in, NULL, wrapped, - mod, cmd, curr_prod_idx); - - cmdq_wqe_fill(curr_wqe, &wqe); - - cmd_info = &cmdq->cmd_infos[curr_prod_idx]; - cmd_info->cmd_type = SPHW_CMD_TYPE_ASYNC; - cmd_info->channel = channel; - /* The caller will not free the cmd_buf of the asynchronous command, - * so there is no need to increase the reference count here - */ - cmd_info->buf_in = buf_in; - - /* LB mode 1 compatible, cmdq 0 also for async, which is sync_no_wait */ - cmdq_set_db(cmdq, SPHW_CMDQ_SYNC, next_prod_idx); - - cmdq_msg_unlock(cmdq); - - return 0; -} - -int cmdq_set_arm_bit(struct sphw_cmdq *cmdq, const void *buf_in, - u16 in_size) -{ - struct sphw_wq *wq = &cmdq->wq; - struct sphw_cmdq_wqe *curr_wqe = NULL, wqe; - u16 curr_prod_idx, next_prod_idx; - int wrapped; - - /* Keep wrapped and doorbell index correct. bh - for tasklet(ceq) */ - spin_lock_bh(&cmdq->cmdq_lock); - - curr_wqe = cmdq_get_wqe(wq, &curr_prod_idx); - if (!curr_wqe) { - spin_unlock_bh(&cmdq->cmdq_lock); - return -EBUSY; - } - - memset(&wqe, 0, sizeof(wqe)); - - wrapped = cmdq->wrapped; - - next_prod_idx = curr_prod_idx + NUM_WQEBBS_FOR_CMDQ_WQE; - if (next_prod_idx >= wq->q_depth) { - cmdq->wrapped = !cmdq->wrapped; - next_prod_idx -= (u16)wq->q_depth; - } - - cmdq_set_inline_wqe(&wqe, SYNC_CMD_DIRECT_RESP, buf_in, in_size, NULL, - wrapped, SPHW_MOD_COMM, CMDQ_SET_ARM_CMD, - curr_prod_idx); - - /* cmdq wqe is not shadow, therefore wqe will be written to wq */ - cmdq_wqe_fill(curr_wqe, &wqe); - - cmdq->cmd_infos[curr_prod_idx].cmd_type = SPHW_CMD_TYPE_SET_ARM; - - cmdq_set_db(cmdq, cmdq->cmdq_type, next_prod_idx); - - spin_unlock_bh(&cmdq->cmdq_lock); - - return 0; -} - -static int cmdq_params_valid(void *hwdev, struct sphw_cmd_buf *buf_in) -{ - if (!buf_in || !hwdev) { - pr_err("Invalid CMDQ buffer addr: %p or hwdev: %p\n", - buf_in, hwdev); - return -EINVAL; - } - - if (!buf_in->size || buf_in->size > SPHW_CMDQ_MAX_DATA_SIZE) { - pr_err("Invalid CMDQ buffer size: 0x%x\n", buf_in->size); - return -EINVAL; - } - - return 0; -} - -#define WAIT_CMDQ_ENABLE_TIMEOUT 300 -static int wait_cmdqs_enable(struct sphw_cmdqs *cmdqs) -{ - unsigned long end; - - end = jiffies + msecs_to_jiffies(WAIT_CMDQ_ENABLE_TIMEOUT); - do { - if (cmdqs->status & SPHW_CMDQ_ENABLE) - return 0; - } while (time_before(jiffies, end) && cmdqs->hwdev->chip_present_flag && - !cmdqs->disable_flag); - - cmdqs->disable_flag = 1; - - return -EBUSY; -} - -int sphw_cmdq_direct_resp(void *hwdev, u8 mod, u8 cmd, struct sphw_cmd_buf *buf_in, - u64 *out_param, u32 timeout, u16 channel) -{ - struct sphw_cmdqs *cmdqs = NULL; - int err = cmdq_params_valid(hwdev, buf_in); - - if (err) { - pr_err("Invalid CMDQ parameters\n"); - return err; - } - - /* to do : support send cmdq only when cmdq init*/ - if (!(((struct sphw_hwdev *)hwdev)->chip_present_flag)) - return -EPERM; - - cmdqs = ((struct sphw_hwdev *)hwdev)->cmdqs; - err = wait_cmdqs_enable(cmdqs); - if (err) { - sdk_err(cmdqs->hwdev->dev_hdl, "Cmdq is disable\n"); - return err; - } - - err = cmdq_sync_cmd_direct_resp(&cmdqs->cmdq[SPHW_CMDQ_SYNC], - mod, cmd, buf_in, out_param, - timeout, channel); - - if (!(((struct sphw_hwdev *)hwdev)->chip_present_flag)) - return -ETIMEDOUT; - else - return err; -} - -int sphw_cmdq_detail_resp(void *hwdev, u8 mod, u8 cmd, struct sphw_cmd_buf *buf_in, - struct sphw_cmd_buf *buf_out, u64 *out_param, u32 timeout, u16 channel) -{ - struct sphw_cmdqs *cmdqs = NULL; - int err = cmdq_params_valid(hwdev, buf_in); - - if (err) - return err; - - cmdqs = ((struct sphw_hwdev *)hwdev)->cmdqs; - - if (!(((struct sphw_hwdev *)hwdev)->chip_present_flag)) /*to do*/ - return -EPERM; - - err = wait_cmdqs_enable(cmdqs); - if (err) { - sdk_err(cmdqs->hwdev->dev_hdl, "Cmdq is disable\n"); - return err; - } - - err = cmdq_sync_cmd_detail_resp(&cmdqs->cmdq[SPHW_CMDQ_SYNC], - mod, cmd, buf_in, buf_out, out_param, - timeout, channel); - if (!(((struct sphw_hwdev *)hwdev)->chip_present_flag)) - return -ETIMEDOUT; - else - return err; -} - -int sphw_cos_id_detail_resp(void *hwdev, u8 mod, u8 cmd, u8 cos_id, struct sphw_cmd_buf *buf_in, - struct sphw_cmd_buf *buf_out, u64 *out_param, u32 timeout, u16 channel) -{ - struct sphw_cmdqs *cmdqs = NULL; - int err = cmdq_params_valid(hwdev, buf_in); - - if (err) - return err; - - cmdqs = ((struct sphw_hwdev *)hwdev)->cmdqs; - - if (!(((struct sphw_hwdev *)hwdev)->chip_present_flag)) /*to do*/ - return -EPERM; - - err = wait_cmdqs_enable(cmdqs); - if (err) { - sdk_err(cmdqs->hwdev->dev_hdl, "Cmdq is disable\n"); - return err; - } - - if (cos_id >= SPHW_MAX_CMDQ_TYPES) { - sdk_err(cmdqs->hwdev->dev_hdl, "Cmdq id is invalid\n"); - return -EINVAL; - } - - err = cmdq_sync_cmd_detail_resp(&cmdqs->cmdq[cos_id], mod, cmd, - buf_in, buf_out, out_param, - timeout, channel); - if (!(((struct sphw_hwdev *)hwdev)->chip_present_flag)) - return -ETIMEDOUT; - else - return err; -} - -int sphw_cmdq_async(void *hwdev, u8 mod, u8 cmd, struct sphw_cmd_buf *buf_in, u16 channel) -{ - struct sphw_cmdqs *cmdqs = NULL; - int err = cmdq_params_valid(hwdev, buf_in); - - if (err) - return err; - - cmdqs = ((struct sphw_hwdev *)hwdev)->cmdqs; - - if (!(((struct sphw_hwdev *)hwdev)->chip_present_flag)) /*to do*/ - return -EPERM; - - err = wait_cmdqs_enable(cmdqs); - if (err) { - sdk_err(cmdqs->hwdev->dev_hdl, "Cmdq is disable\n"); - return err; - } - /* LB mode 1 compatible, cmdq 0 also for async, which is sync_no_wait*/ - return cmdq_async_cmd(&cmdqs->cmdq[SPHW_CMDQ_SYNC], mod, - cmd, buf_in, channel); -} - -int sphw_set_arm_bit(void *hwdev, enum sphw_set_arm_type q_type, u16 q_id) -{ - struct sphw_cmdqs *cmdqs = NULL; - struct sphw_cmdq *cmdq = NULL; - struct sphw_cmdq_arm_bit arm_bit; - enum sphw_cmdq_type cmdq_type = SPHW_CMDQ_SYNC; - u16 in_size; - int err; - - if (!hwdev) - return -EINVAL; - - if (!(((struct sphw_hwdev *)hwdev)->chip_present_flag))/* to do*/ - return -EPERM; - - cmdqs = ((struct sphw_hwdev *)hwdev)->cmdqs; - - if (!(cmdqs->status & SPHW_CMDQ_ENABLE)) - return -EBUSY; - - if (q_type == SPHW_SET_ARM_CMDQ) { - if (q_id >= SPHW_MAX_CMDQ_TYPES) - return -EFAULT; - - cmdq_type = q_id; - } - /* sq is using interrupt now, so we only need to set arm bit for cmdq, - * remove comment below if need to set sq arm bit - * else - * cmdq_type = SPHW_CMDQ_SYNC; - */ - - cmdq = &cmdqs->cmdq[cmdq_type]; - - arm_bit.q_type = q_type; - arm_bit.q_id = q_id; - in_size = sizeof(arm_bit); - - err = cmdq_set_arm_bit(cmdq, &arm_bit, in_size); - if (err) { - sdk_err(cmdqs->hwdev->dev_hdl, - "Failed to set arm for q_type: %d, qid %d\n", - q_type, q_id); - return err; - } - - return 0; -} - -static void clear_wqe_complete_bit(struct sphw_cmdq *cmdq, - struct sphw_cmdq_wqe *wqe, u16 ci) -{ - struct sphw_ctrl *ctrl = NULL; - u32 header_info = WQE_HEADER(wqe)->header_info; - enum data_format df = CMDQ_WQE_HEADER_GET(header_info, DATA_FMT); - - if (df == DATA_SGE) - ctrl = &wqe->wqe_lcmd.ctrl; - else - ctrl = &wqe->inline_wqe.wqe_scmd.ctrl; - - /* clear HW busy bit */ - ctrl->ctrl_info = 0; - cmdq->cmd_infos[ci].cmd_type = SPHW_CMD_TYPE_NONE; - - wmb(); /* verify wqe is clear */ - - sphw_wq_put_wqebbs(&cmdq->wq, NUM_WQEBBS_FOR_CMDQ_WQE); -} - -static void cmdq_sync_cmd_handler(struct sphw_cmdq *cmdq, - struct sphw_cmdq_wqe *wqe, u16 ci) -{ - spin_lock(&cmdq->cmdq_lock); - - cmdq_update_cmd_status(cmdq, ci, wqe); - - if (cmdq->cmd_infos[ci].cmpt_code) { - *cmdq->cmd_infos[ci].cmpt_code = CMDQ_COMPLETE_CMPT_CODE; - cmdq->cmd_infos[ci].cmpt_code = NULL; - } - - /* make sure cmpt_code operation before done operation */ - smp_rmb(); - - if (cmdq->cmd_infos[ci].done) { - complete(cmdq->cmd_infos[ci].done); - cmdq->cmd_infos[ci].done = NULL; - } - - spin_unlock(&cmdq->cmdq_lock); - - cmdq_clear_cmd_buf(&cmdq->cmd_infos[ci], cmdq->hwdev); - clear_wqe_complete_bit(cmdq, wqe, ci); -} - -static void cmdq_async_cmd_handler(struct sphw_hwdev *hwdev, - struct sphw_cmdq *cmdq, - struct sphw_cmdq_wqe *wqe, u16 ci) -{ - cmdq_clear_cmd_buf(&cmdq->cmd_infos[ci], hwdev); - clear_wqe_complete_bit(cmdq, wqe, ci); -} - -static int cmdq_arm_ceq_handler(struct sphw_cmdq *cmdq, - struct sphw_cmdq_wqe *wqe, u16 ci) -{ - struct sphw_ctrl *ctrl = &wqe->inline_wqe.wqe_scmd.ctrl; - u32 ctrl_info = ctrl->ctrl_info; - - if (!WQE_COMPLETED(ctrl_info)) - return -EBUSY; - - clear_wqe_complete_bit(cmdq, wqe, ci); - - return 0; -} - -#define SPHW_CMDQ_WQE_HEAD_LEN 32 -static void sphw_dump_cmdq_wqe_head(struct sphw_hwdev *hwdev, struct sphw_cmdq_wqe *wqe) -{ - u32 i; - u32 *data = (u32 *)wqe; - - for (i = 0; i < (SPHW_CMDQ_WQE_HEAD_LEN / sizeof(u32)); i += 4) { - sdk_info(hwdev->dev_hdl, "wqe data: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n", - *(data + i), *(data + i + 1), *(data + i + 2), - *(data + i + 3)); - } -} - -void sphw_cmdq_ceq_handler(void *handle, u32 ceqe_data) -{ - struct sphw_cmdqs *cmdqs = ((struct sphw_hwdev *)handle)->cmdqs; - enum sphw_cmdq_type cmdq_type = CEQE_CMDQ_GET(ceqe_data, TYPE); - struct sphw_cmdq *cmdq = &cmdqs->cmdq[cmdq_type]; - struct sphw_hwdev *hwdev = cmdqs->hwdev; - struct sphw_cmdq_wqe *wqe = NULL; - struct sphw_cmdq_wqe_lcmd *wqe_lcmd = NULL; - struct sphw_cmdq_cmd_info *cmd_info = NULL; - u32 ctrl_info; - u16 ci; - - while ((wqe = cmdq_read_wqe(&cmdq->wq, &ci)) != NULL) { - cmd_info = &cmdq->cmd_infos[ci]; - - switch (cmd_info->cmd_type) { - case SPHW_CMD_TYPE_NONE: - return; - case SPHW_CMD_TYPE_TIMEOUT: - sdk_warn(hwdev->dev_hdl, "Cmdq timeout, q_id: %u, ci: %u\n", - cmdq_type, ci); - sphw_dump_cmdq_wqe_head(hwdev, wqe); - fallthrough; - case SPHW_CMD_TYPE_FAKE_TIMEOUT: - cmdq_clear_cmd_buf(cmd_info, hwdev); - clear_wqe_complete_bit(cmdq, wqe, ci); - break; - case SPHW_CMD_TYPE_SET_ARM: - if (cmdq_arm_ceq_handler(cmdq, wqe, ci)) - return; - - break; - default: - /* only arm bit is using scmd wqe, the wqe is lcmd */ - wqe_lcmd = &wqe->wqe_lcmd; - ctrl_info = wqe_lcmd->ctrl.ctrl_info; - - if (!WQE_COMPLETED(ctrl_info)) - return; - - /* For FORCE_STOP cmd_type, we also need to wait for - * the firmware processing to complete to prevent the - * firmware from accessing the released cmd_buf - */ - if (cmd_info->cmd_type == SPHW_CMD_TYPE_FORCE_STOP) { - cmdq_clear_cmd_buf(cmd_info, hwdev); - clear_wqe_complete_bit(cmdq, wqe, ci); - } else if (cmd_info->cmd_type == SPHW_CMD_TYPE_ASYNC) { - cmdq_async_cmd_handler(hwdev, cmdq, wqe, ci); - } else { - cmdq_sync_cmd_handler(cmdq, wqe, ci); - } - - break; - } - } -} - -static void cmdq_init_queue_ctxt(struct sphw_cmdqs *cmdqs, - struct sphw_cmdq *cmdq, - struct cmdq_ctxt_info *ctxt_info) -{ - struct sphw_wq *wq = &cmdq->wq; - u64 cmdq_first_block_paddr, pfn; - u16 start_ci = (u16)wq->cons_idx; - - pfn = CMDQ_PFN(sphw_wq_get_first_wqe_page_addr(wq)); - - ctxt_info->curr_wqe_page_pfn = - CMDQ_CTXT_PAGE_INFO_SET(1, HW_BUSY_BIT) | - CMDQ_CTXT_PAGE_INFO_SET(1, CEQ_EN) | - CMDQ_CTXT_PAGE_INFO_SET(1, CEQ_ARM) | - CMDQ_CTXT_PAGE_INFO_SET(SPHW_CEQ_ID_CMDQ, EQ_ID) | - CMDQ_CTXT_PAGE_INFO_SET(pfn, CURR_WQE_PAGE_PFN); - - if (!WQ_IS_0_LEVEL_CLA(wq)) { - cmdq_first_block_paddr = cmdqs->wq_block_paddr; - pfn = CMDQ_PFN(cmdq_first_block_paddr); - } - - ctxt_info->wq_block_pfn = CMDQ_CTXT_BLOCK_INFO_SET(start_ci, CI) | - CMDQ_CTXT_BLOCK_INFO_SET(pfn, WQ_BLOCK_PFN); -} - -static int init_cmdq(struct sphw_cmdq *cmdq, struct sphw_hwdev *hwdev, - enum sphw_cmdq_type q_type) -{ - void __iomem *db_base; - int err; - - cmdq->cmdq_type = q_type; - cmdq->wrapped = 1; - cmdq->hwdev = hwdev; - - spin_lock_init(&cmdq->cmdq_lock); - - cmdq->cmd_infos = kcalloc(cmdq->wq.q_depth, sizeof(*cmdq->cmd_infos), - GFP_KERNEL); - if (!cmdq->cmd_infos) { - sdk_err(hwdev->dev_hdl, "Failed to allocate cmdq infos\n"); - err = -ENOMEM; - goto cmd_infos_err; - } - - err = sphw_alloc_db_addr(hwdev, &db_base, NULL); - if (err) { - sdk_err(hwdev->dev_hdl, "Failed to allocate doorbell address\n"); - goto alloc_db_err; - } - - cmdq->db_base = (u8 *)db_base; - return 0; - -alloc_db_err: - kfree(cmdq->cmd_infos); - -cmd_infos_err: - - return err; -} - -static void free_cmdq(struct sphw_hwdev *hwdev, struct sphw_cmdq *cmdq) -{ - sphw_free_db_addr(hwdev, cmdq->db_base, NULL); - kfree(cmdq->cmd_infos); -} - -static int sphw_set_cmdq_ctxts(struct sphw_hwdev *hwdev) -{ - struct sphw_cmdqs *cmdqs = hwdev->cmdqs; - enum sphw_cmdq_type cmdq_type; - int err; - - cmdq_type = SPHW_CMDQ_SYNC; - for (; cmdq_type < SPHW_MAX_CMDQ_TYPES; cmdq_type++) { - err = sphw_set_cmdq_ctxt(hwdev, (u8)cmdq_type, &cmdqs->cmdq[cmdq_type].cmdq_ctxt); - if (err) - return err; - } - - cmdqs->status |= SPHW_CMDQ_ENABLE; - cmdqs->disable_flag = 0; - - return 0; -} - -static void cmdq_flush_sync_cmd(struct sphw_cmdq_cmd_info *cmd_info) -{ - if (cmd_info->cmd_type != SPHW_CMD_TYPE_DIRECT_RESP && - cmd_info->cmd_type != SPHW_CMD_TYPE_SGE_RESP) - return; - - cmd_info->cmd_type = SPHW_CMD_TYPE_FORCE_STOP; - - if (cmd_info->cmpt_code && - *cmd_info->cmpt_code == CMDQ_SEND_CMPT_CODE) - *cmd_info->cmpt_code = CMDQ_FORCE_STOP_CMPT_CODE; - - if (cmd_info->done) { - complete(cmd_info->done); - cmd_info->done = NULL; - cmd_info->cmpt_code = NULL; - cmd_info->direct_resp = NULL; - cmd_info->errcode = NULL; - } -} - -void sphw_cmdq_flush_cmd(struct sphw_hwdev *hwdev, struct sphw_cmdq *cmdq) -{ - struct sphw_cmdq_cmd_info *cmd_info = NULL; - u16 ci = 0; - - spin_lock_bh(&cmdq->cmdq_lock); - - while (cmdq_read_wqe(&cmdq->wq, &ci)) { - sphw_wq_put_wqebbs(&cmdq->wq, NUM_WQEBBS_FOR_CMDQ_WQE); - cmd_info = &cmdq->cmd_infos[ci]; - - if (cmd_info->cmd_type == SPHW_CMD_TYPE_DIRECT_RESP || - cmd_info->cmd_type == SPHW_CMD_TYPE_SGE_RESP) - cmdq_flush_sync_cmd(cmd_info); - } - - spin_unlock_bh(&cmdq->cmdq_lock); -} - -void sphw_cmdq_flush_channel_sync_cmd(struct sphw_hwdev *hwdev, u16 channel) -{ - struct sphw_cmdq_cmd_info *cmd_info = NULL; - struct sphw_cmdq *cmdq = NULL; - struct sphw_wq *wq = NULL; - u16 wqe_cnt, ci, i; - - if (channel >= SPHW_CHANNEL_MAX) - return; - - cmdq = &hwdev->cmdqs->cmdq[SPHW_CMDQ_SYNC]; - - spin_lock_bh(&cmdq->cmdq_lock); - - wq = &cmdq->wq; - ci = wq->cons_idx; - wqe_cnt = (u16)WQ_MASK_IDX(wq, wq->prod_idx + - wq->q_depth - wq->cons_idx); - for (i = 0; i < wqe_cnt; i++) { - cmd_info = &cmdq->cmd_infos[WQ_MASK_IDX(wq, ci + i)]; - - if (cmd_info->channel == channel) - cmdq_flush_sync_cmd(cmd_info); - } - - spin_unlock_bh(&cmdq->cmdq_lock); -} - -static void cmdq_reset_all_cmd_buff(struct sphw_cmdq *cmdq) -{ - u16 i; - - for (i = 0; i < cmdq->wq.q_depth; i++) - cmdq_clear_cmd_buf(&cmdq->cmd_infos[i], cmdq->hwdev); -} - -int sphw_cmdq_set_channel_status(struct sphw_hwdev *hwdev, u16 channel, bool enable) -{ - if (channel >= SPHW_CHANNEL_MAX) - return -EINVAL; - - if (enable) { - clear_bit(channel, &hwdev->cmdqs->channel_stop); - } else { - set_bit(channel, &hwdev->cmdqs->channel_stop); - sphw_cmdq_flush_channel_sync_cmd(hwdev, channel); - } - - sdk_info(hwdev->dev_hdl, "%s cmdq channel 0x%x\n", - enable ? "Enable" : "Disable", channel); - - return 0; -} - -void sphw_cmdq_enable_channel_lock(struct sphw_hwdev *hwdev, bool enable) -{ - hwdev->cmdqs->lock_channel_en = enable; - - sdk_info(hwdev->dev_hdl, "%s cmdq channel lock\n", - enable ? "Enable" : "Disable"); -} - -int sphw_reinit_cmdq_ctxts(struct sphw_hwdev *hwdev) -{ - struct sphw_cmdqs *cmdqs = hwdev->cmdqs; - enum sphw_cmdq_type cmdq_type; - - cmdq_type = SPHW_CMDQ_SYNC; - for (; cmdq_type < SPHW_MAX_CMDQ_TYPES; cmdq_type++) { - sphw_cmdq_flush_cmd(hwdev, &cmdqs->cmdq[cmdq_type]); - cmdq_reset_all_cmd_buff(&cmdqs->cmdq[cmdq_type]); - cmdqs->cmdq[cmdq_type].wrapped = 1; - sphw_wq_reset(&cmdqs->cmdq[cmdq_type].wq); - } - - return sphw_set_cmdq_ctxts(hwdev); -} - -static int create_cmdq_wq(struct sphw_cmdqs *cmdqs) -{ - enum sphw_cmdq_type type, cmdq_type; - int err; - - cmdq_type = SPHW_CMDQ_SYNC; - for (; cmdq_type < SPHW_MAX_CMDQ_TYPES; cmdq_type++) { - err = sphw_wq_create(cmdqs->hwdev, &cmdqs->cmdq[cmdq_type].wq, SPHW_CMDQ_DEPTH, - CMDQ_WQEBB_SIZE); - if (err) { - sdk_err(cmdqs->hwdev->dev_hdl, "Failed to create cmdq wq\n"); - goto destroy_wq; - } - } - - /* 1-level CLA must put all cmdq's wq page addr in one wq block */ - if (!WQ_IS_0_LEVEL_CLA(&cmdqs->cmdq[SPHW_CMDQ_SYNC].wq)) { - /* cmdq wq's CLA table is up to 512B */ -#define CMDQ_WQ_CLA_SIZE 512 - if (cmdqs->cmdq[SPHW_CMDQ_SYNC].wq.num_wq_pages > - CMDQ_WQ_CLA_SIZE / sizeof(u64)) { - sdk_err(cmdqs->hwdev->dev_hdl, "Cmdq wq page exceed limit: %lu\n", - CMDQ_WQ_CLA_SIZE / sizeof(u64)); - goto destroy_wq; - } - - cmdqs->wq_block_vaddr = - dma_alloc_coherent(cmdqs->hwdev->dev_hdl, PAGE_SIZE, - &cmdqs->wq_block_paddr, GFP_KERNEL); - if (!cmdqs->wq_block_vaddr) { - sdk_err(cmdqs->hwdev->dev_hdl, "Failed to alloc cmdq wq block\n"); - goto destroy_wq; - } - - type = SPHW_CMDQ_SYNC; - for (; type < SPHW_MAX_CMDQ_TYPES; type++) - memcpy((u8 *)cmdqs->wq_block_vaddr + - CMDQ_WQ_CLA_SIZE * type, - cmdqs->cmdq[type].wq.wq_block_vaddr, - cmdqs->cmdq[type].wq.num_wq_pages * sizeof(u64)); - } - - return 0; - -destroy_wq: - type = SPHW_CMDQ_SYNC; - for (; type < cmdq_type; type++) - sphw_wq_destroy(&cmdqs->cmdq[type].wq); - - return err; -} - -static void destroy_cmdq_wq(struct sphw_cmdqs *cmdqs) -{ - enum sphw_cmdq_type cmdq_type; - - if (cmdqs->wq_block_vaddr) - dma_free_coherent(cmdqs->hwdev->dev_hdl, PAGE_SIZE, - cmdqs->wq_block_vaddr, cmdqs->wq_block_paddr); - - cmdq_type = SPHW_CMDQ_SYNC; - for (; cmdq_type < SPHW_MAX_CMDQ_TYPES; cmdq_type++) - sphw_wq_destroy(&cmdqs->cmdq[cmdq_type].wq); -} - -int sphw_cmdqs_init(struct sphw_hwdev *hwdev) -{ - struct sphw_cmdqs *cmdqs = NULL; - enum sphw_cmdq_type type, cmdq_type; - int err; - - cmdqs = kzalloc(sizeof(*cmdqs), GFP_KERNEL); - if (!cmdqs) - return -ENOMEM; - - hwdev->cmdqs = cmdqs; - cmdqs->hwdev = hwdev; - - cmdqs->cmd_buf_pool = dma_pool_create("sphw_cmdq", hwdev->dev_hdl, - SPHW_CMDQ_BUF_SIZE, - SPHW_CMDQ_BUF_SIZE, 0ULL); - if (!cmdqs->cmd_buf_pool) { - sdk_err(hwdev->dev_hdl, "Failed to create cmdq buffer pool\n"); - err = -ENOMEM; - goto pool_create_err; - } - - err = create_cmdq_wq(cmdqs); - if (err) - goto create_wq_err; - - cmdq_type = SPHW_CMDQ_SYNC; - for (; cmdq_type < SPHW_MAX_CMDQ_TYPES; cmdq_type++) { - err = init_cmdq(&cmdqs->cmdq[cmdq_type], hwdev, cmdq_type); - if (err) { - sdk_err(hwdev->dev_hdl, "Failed to initialize cmdq type :%d\n", - cmdq_type); - goto init_cmdq_err; - } - - cmdq_init_queue_ctxt(cmdqs, &cmdqs->cmdq[cmdq_type], - &cmdqs->cmdq[cmdq_type].cmdq_ctxt); - } - - err = sphw_set_cmdq_ctxts(hwdev); - if (err) - goto init_cmdq_err; - - return 0; - -init_cmdq_err: - type = SPHW_CMDQ_SYNC; - for (; type < cmdq_type; type++) - free_cmdq(hwdev, &cmdqs->cmdq[type]); - - destroy_cmdq_wq(cmdqs); - -create_wq_err: - dma_pool_destroy(cmdqs->cmd_buf_pool); - -pool_create_err: - kfree(cmdqs); - - return err; -} - -void sphw_cmdqs_free(struct sphw_hwdev *hwdev) -{ - struct sphw_cmdqs *cmdqs = hwdev->cmdqs; - enum sphw_cmdq_type cmdq_type = SPHW_CMDQ_SYNC; - - cmdqs->status &= ~SPHW_CMDQ_ENABLE; - - for (; cmdq_type < SPHW_MAX_CMDQ_TYPES; cmdq_type++) { - sphw_cmdq_flush_cmd(hwdev, &cmdqs->cmdq[cmdq_type]); - cmdq_reset_all_cmd_buff(&cmdqs->cmdq[cmdq_type]); - free_cmdq(cmdqs->hwdev, &cmdqs->cmdq[cmdq_type]); - } - - destroy_cmdq_wq(cmdqs); - - dma_pool_destroy(cmdqs->cmd_buf_pool); - - kfree(cmdqs); -} diff --git a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_cmdq.h b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_cmdq.h deleted file mode 100644 index 2c1f1bbda4ada3c22491fa3e93bf8c371b69b654..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_cmdq.h +++ /dev/null @@ -1,195 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPHW_CMDQ_H -#define SPHW_CMDQ_H - -#include "sphw_comm_msg_intf.h" - -#define SPHW_SCMD_DATA_LEN 16 - -#define SPHW_CMDQ_DEPTH 4096 - -enum sphw_cmdq_type { - SPHW_CMDQ_SYNC, - SPHW_CMDQ_ASYNC, - SPHW_MAX_CMDQ_TYPES, -}; - -enum sphw_db_src_type { - SPHW_DB_SRC_CMDQ_TYPE, - SPHW_DB_SRC_L2NIC_SQ_TYPE, -}; - -enum sphw_cmdq_db_type { - SPHW_DB_SQ_RQ_TYPE, - SPHW_DB_CMDQ_TYPE, -}; - -/*hardware define: cmdq wqe*/ -struct sphw_cmdq_header { - u32 header_info; - u32 saved_data; -}; - -struct sphw_scmd_bufdesc { - u32 buf_len; - u32 rsvd; - u8 data[SPHW_SCMD_DATA_LEN]; -}; - -struct sphw_lcmd_bufdesc { - struct sphw_sge sge; - u32 rsvd1; - u64 saved_async_buf; - u64 rsvd3; -}; - -struct sphw_cmdq_db { - u32 db_head; - u32 db_info; -}; - -struct sphw_status { - u32 status_info; -}; - -struct sphw_ctrl { - u32 ctrl_info; -}; - -struct sphw_sge_resp { - struct sphw_sge sge; - u32 rsvd; -}; - -struct sphw_cmdq_completion { - union { - struct sphw_sge_resp sge_resp; - u64 direct_resp; - }; -}; - -struct sphw_cmdq_wqe_scmd { - struct sphw_cmdq_header header; - u64 rsvd; - struct sphw_status status; - struct sphw_ctrl ctrl; - struct sphw_cmdq_completion completion; - struct sphw_scmd_bufdesc buf_desc; -}; - -struct sphw_cmdq_wqe_lcmd { - struct sphw_cmdq_header header; - struct sphw_status status; - struct sphw_ctrl ctrl; - struct sphw_cmdq_completion completion; - struct sphw_lcmd_bufdesc buf_desc; -}; - -struct sphw_cmdq_inline_wqe { - struct sphw_cmdq_wqe_scmd wqe_scmd; -}; - -struct sphw_cmdq_wqe { - union { - struct sphw_cmdq_inline_wqe inline_wqe; - struct sphw_cmdq_wqe_lcmd wqe_lcmd; - }; -}; - -struct sphw_cmdq_arm_bit { - u32 q_type; - u32 q_id; -}; - -enum sphw_cmdq_status { - SPHW_CMDQ_ENABLE = BIT(0), -}; - -enum sphw_cmdq_cmd_type { - SPHW_CMD_TYPE_NONE, - SPHW_CMD_TYPE_SET_ARM, - SPHW_CMD_TYPE_DIRECT_RESP, - SPHW_CMD_TYPE_SGE_RESP, - SPHW_CMD_TYPE_ASYNC, - SPHW_CMD_TYPE_FAKE_TIMEOUT, - SPHW_CMD_TYPE_TIMEOUT, - SPHW_CMD_TYPE_FORCE_STOP, -}; - -struct sphw_cmdq_cmd_info { - enum sphw_cmdq_cmd_type cmd_type; - u16 channel; - - struct completion *done; - int *errcode; - int *cmpt_code; - u64 *direct_resp; - u64 cmdq_msg_id; - - struct sphw_cmd_buf *buf_in; - struct sphw_cmd_buf *buf_out; -}; - -struct sphw_cmdq { - struct sphw_wq wq; - - enum sphw_cmdq_type cmdq_type; - int wrapped; - - /* spinlock for send cmdq commands */ - spinlock_t cmdq_lock; - - /* doorbell area */ - u8 __iomem *db_base; - - struct cmdq_ctxt_info cmdq_ctxt; - - struct sphw_cmdq_cmd_info *cmd_infos; - - struct sphw_hwdev *hwdev; -}; - -struct sphw_cmdqs { - struct sphw_hwdev *hwdev; - - struct pci_pool *cmd_buf_pool; - - /* All cmdq's CLA of a VF occupy a PAGE when cmdq wq is 1-level CLA */ - dma_addr_t wq_block_paddr; - void *wq_block_vaddr; - struct sphw_cmdq cmdq[SPHW_MAX_CMDQ_TYPES]; - - u32 status; - u32 disable_flag; - - bool lock_channel_en; - unsigned long channel_stop; -}; - -enum sphw_set_arm_type { - SPHW_SET_ARM_CMDQ, - SPHW_SET_ARM_SQ, - SPHW_SET_ARM_TYPE_NUM, -}; - -int sphw_set_arm_bit(void *hwdev, enum sphw_set_arm_type q_type, u16 q_id); - -void sphw_cmdq_ceq_handler(void *hwdev, u32 ceqe_data); - -int sphw_reinit_cmdq_ctxts(struct sphw_hwdev *hwdev); - -bool sphw_cmdq_idle(struct sphw_cmdq *cmdq); - -int sphw_cmdqs_init(struct sphw_hwdev *hwdev); - -void sphw_cmdqs_free(struct sphw_hwdev *hwdev); - -void sphw_cmdq_flush_cmd(struct sphw_hwdev *hwdev, struct sphw_cmdq *cmdq); - -int sphw_cmdq_set_channel_status(struct sphw_hwdev *hwdev, u16 channel, bool enable); - -void sphw_cmdq_enable_channel_lock(struct sphw_hwdev *hwdev, bool enable); - -#endif diff --git a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_comm_cmd.h b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_comm_cmd.h deleted file mode 100644 index d0e4c87942b58f76b7cb707b5807adbea5d593af..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_comm_cmd.h +++ /dev/null @@ -1,60 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPHW_COMMON_CMD_H -#define SPHW_COMMON_CMD_H - -/* COMM Commands between Driver to MPU */ -enum sphw_mgmt_cmd { - COMM_MGMT_CMD_FUNC_RESET = 0, - COMM_MGMT_CMD_FEATURE_NEGO, - COMM_MGMT_CMD_FLUSH_DOORBELL, - COMM_MGMT_CMD_START_FLUSH, - COMM_MGMT_CMD_SET_FUNC_FLR, - COMM_MGMT_CMD_GET_GLOBAL_ATTR, - - COMM_MGMT_CMD_SET_CMDQ_CTXT = 20, - COMM_MGMT_CMD_SET_VAT, - COMM_MGMT_CMD_CFG_PAGESIZE, - COMM_MGMT_CMD_CFG_MSIX_CTRL_REG, - COMM_MGMT_CMD_SET_CEQ_CTRL_REG, - COMM_MGMT_CMD_SET_DMA_ATTR, - - COMM_MGMT_CMD_GET_MQM_FIX_INFO = 40, - COMM_MGMT_CMD_SET_MQM_CFG_INFO, - COMM_MGMT_CMD_SET_MQM_SRCH_GPA, - COMM_MGMT_CMD_SET_PPF_TMR, - COMM_MGMT_CMD_SET_PPF_HT_GPA, - COMM_MGMT_CMD_SET_FUNC_TMR_BITMAT, - - COMM_MGMT_CMD_GET_FW_VERSION = 60, - COMM_MGMT_CMD_GET_BOARD_INFO, - COMM_MGMT_CMD_SYNC_TIME, - COMM_MGMT_CMD_GET_HW_PF_INFOS, - COMM_MGMT_CMD_SEND_BDF_INFO, - COMM_MGMT_CMD_GET_VIRTIO_BDF_INFO, - - COMM_MGMT_CMD_UPDATE_FW = 80, - COMM_MGMT_CMD_ACTIVE_FW, - COMM_MGMT_CMD_HOT_ACTIVE_FW, - COMM_MGMT_CMD_HOT_ACTIVE_DONE_NOTICE, - COMM_MGMT_CMD_SWITCH_CFG, - COMM_MGMT_CMD_CHECK_FLASH, - COMM_MGMT_CMD_CHECK_FLASH_RW, - COMM_MGMT_CMD_RESOURCE_CFG, - - COMM_MGMT_CMD_FAULT_REPORT = 100, - COMM_MGMT_CMD_WATCHDOG_INFO, - COMM_MGMT_CMD_MGMT_RESET, - COMM_MGMT_CMD_FFM_SET, - - COMM_MGMT_CMD_GET_LOG = 120, - COMM_MGMT_CMD_TEMP_OP, - COMM_MGMT_CMD_EN_AUTO_RST_CHIP, - COMM_MGMT_CMD_CFG_REG, - COMM_MGMT_CMD_GET_CHIP_ID, - COMM_MGMT_CMD_SYSINFO_DFX, - COMM_MGMT_CMD_PCIE_DFX_NTC, -}; - -#endif /* SPHW_COMMON_CMD_H */ diff --git a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_comm_msg_intf.h b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_comm_msg_intf.h deleted file mode 100644 index fd12a47e5bb5790327fa1ef1fc86baa6bf388587..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_comm_msg_intf.h +++ /dev/null @@ -1,273 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPHW_COMM_MSG_INTF_H -#define SPHW_COMM_MSG_INTF_H - -#include "sphw_mgmt_msg_base.h" - -#define FUNC_RESET_FLAG_MAX_VALUE ((1U << (RES_TYPE_IPSEC + 1)) - 1) -struct comm_cmd_func_reset { - struct mgmt_msg_head head; - - u16 func_id; - u16 rsvd1[3]; - u64 reset_flag; -}; - -enum { - COMM_F_API_CHAIN = 1U << 0, -}; - -#define COMM_MAX_FEATURE_QWORD 4 -struct comm_cmd_feature_nego { - struct mgmt_msg_head head; - - u16 func_id; - u8 opcode; /* 1: set, 0: get */ - u8 rsvd; - u64 s_feature[COMM_MAX_FEATURE_QWORD]; -}; - -struct comm_cmd_clear_doorbell { - struct mgmt_msg_head head; - - u16 func_id; - u16 rsvd1[3]; -}; - -struct comm_cmd_clear_resource { - struct mgmt_msg_head head; - - u16 func_id; - u16 rsvd1[3]; -}; - -struct comm_global_attr { - u8 max_host_num; - u8 max_pf_num; - u16 vf_id_start; - - u8 mgmt_host_node_id; /* for api cmd to mgmt cpu */ - u8 rsvd1[3]; - - u32 rsvd2[8]; -}; - -struct comm_cmd_get_glb_attr { - struct mgmt_msg_head head; - - struct comm_global_attr attr; -}; - -enum sphw_fw_ver_type { - SPHW_FW_VER_TYPE_BOOT, - SPHW_FW_VER_TYPE_MPU, - SPHW_FW_VER_TYPE_NPU, - SPHW_FW_VER_TYPE_SMU, - SPHW_FW_VER_TYPE_CFG, -}; - -#define SPHW_FW_VERSION_LEN 16 -#define SPHW_FW_COMPILE_TIME_LEN 20 -struct comm_cmd_get_fw_version { - struct mgmt_msg_head head; - - u16 fw_type; - u16 rsvd1; - u8 ver[SPHW_FW_VERSION_LEN]; - u8 time[SPHW_FW_COMPILE_TIME_LEN]; -}; - -/* hardware define: cmdq context */ -struct cmdq_ctxt_info { - u64 curr_wqe_page_pfn; - u64 wq_block_pfn; -}; - -struct comm_cmd_cmdq_ctxt { - struct mgmt_msg_head head; - - u16 func_id; - u8 cmdq_id; - u8 rsvd1[5]; - - struct cmdq_ctxt_info ctxt; -}; - -struct comm_cmd_root_ctxt { - struct mgmt_msg_head head; - - u16 func_id; - u8 set_cmdq_depth; - u8 cmdq_depth; - u16 rx_buf_sz; - u8 lro_en; - u8 rsvd1; - u16 sq_depth; - u16 rq_depth; - u64 rsvd2; -}; - -struct comm_cmd_wq_page_size { - struct mgmt_msg_head head; - - u16 func_id; - u8 opcode; - /* real_size=4KB*2^page_size, range(0~20) must be checked by driver */ - u8 page_size; - - u32 rsvd1; -}; - -struct comm_cmd_msix_config { - struct mgmt_msg_head head; - - u16 func_id; - u8 opcode; - u8 rsvd1; - u16 msix_index; - u8 pending_cnt; - u8 coalesce_timer_cnt; - u8 resend_timer_cnt; - u8 lli_timer_cnt; - u8 lli_credit_cnt; - u8 rsvd2[5]; -}; - -struct comm_cmd_dma_attr_config { - struct mgmt_msg_head head; - - u16 func_id; - u8 entry_idx; - u8 st; - u8 at; - u8 ph; - u8 no_snooping; - u8 tph_en; - u32 resv1; -}; - -struct comm_cmd_ceq_ctrl_reg { - struct mgmt_msg_head head; - - u16 func_id; - u16 q_id; - u32 ctrl0; - u32 ctrl1; - u32 rsvd1; -}; - -struct comm_cmd_func_tmr_bitmap_op { - struct mgmt_msg_head head; - - u16 func_id; - u8 opcode; /* 1: start, 0: stop */ - u8 rsvd1[5]; -}; - -struct comm_cmd_ppf_tmr_op { - struct mgmt_msg_head head; - - u8 ppf_id; - u8 opcode; /* 1: start, 0: stop */ - u8 rsvd1[6]; -}; - -struct comm_cmd_ht_gpa { - struct mgmt_msg_head head; - - u8 host_id; - u32 rsvd1[7]; - u64 page_pa0; - u64 page_pa1; -}; - -struct comm_cmd_get_eqm_num { - struct mgmt_msg_head head; - - u8 host_id; - u8 rsvd1[3]; - u32 chunk_num; - u32 search_gpa_num; -}; - -struct comm_cmd_eqm_cfg { - struct mgmt_msg_head head; - - u8 host_id; - u8 valid; - u16 rsvd1; - u32 page_size; - u32 rsvd2; -}; - -struct comm_cmd_eqm_search_gpa { - struct mgmt_msg_head head; - - u8 host_id; - u8 rsvd1[3]; - u32 start_idx; - u32 num; - u32 rsvd2; - u64 gpa_hi52[0]; -}; - -struct comm_cmd_ffm_info { - struct mgmt_msg_head head; - - u8 node_id; - /* error level of the interrupt source */ - u8 err_level; - /* Classification by interrupt source properties */ - u16 err_type; - u32 err_csr_addr; - u32 err_csr_value; - u32 rsvd1; -}; - -struct sphw_board_info { - u8 board_type; - u8 port_num; - u8 port_speed; - u8 pcie_width; - u8 host_num; - u8 pf_num; - u16 vf_total_num; - u8 tile_num; - u8 qcm_num; - u8 core_num; - u8 work_mode; - u8 service_mode; - u8 pcie_mode; - u8 boot_sel; - u8 board_id; - u32 cfg_addr; -}; - -struct comm_cmd_board_info { - struct mgmt_msg_head head; - - struct sphw_board_info info; - u32 rsvd[25]; -}; - -struct comm_cmd_sync_time { - struct mgmt_msg_head head; - - u64 mstime; - u64 rsvd1; -}; - -struct comm_cmd_bdf_info { - struct mgmt_msg_head head; - - u16 function_idx; - u8 rsvd1[2]; - u8 bus; - u8 device; - u8 function; - u8 rsvd2[5]; -}; - -#endif diff --git a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_common.c b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_common.c deleted file mode 100644 index aaba9e68ba3160590d28c7934bd6853cc29f8797..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_common.c +++ /dev/null @@ -1,88 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#include -#include -#include - -#include "sphw_common.h" - -int sphw_dma_alloc_coherent_align(void *dev_hdl, u64 size, u64 align, unsigned int flag, - struct sphw_dma_addr_align *mem_align) -{ - void *vaddr = NULL, *align_vaddr = NULL; - dma_addr_t paddr, align_paddr; - u64 real_size = size; - - vaddr = dma_alloc_coherent(dev_hdl, real_size, &paddr, flag); - if (!vaddr) - return -ENOMEM; - - align_paddr = ALIGN(paddr, align); - /* align */ - if (align_paddr == paddr) { - align_vaddr = vaddr; - goto out; - } - - dma_free_coherent(dev_hdl, real_size, vaddr, paddr); - - /* realloc memory for align */ - real_size = size + align; - vaddr = dma_alloc_coherent(dev_hdl, real_size, &paddr, flag); - if (!vaddr) - return -ENOMEM; - - align_paddr = ALIGN(paddr, align); - align_vaddr = (void *)((u64)vaddr + (align_paddr - paddr)); - -out: - mem_align->real_size = (u32)real_size; - mem_align->ori_vaddr = vaddr; - mem_align->ori_paddr = paddr; - mem_align->align_vaddr = align_vaddr; - mem_align->align_paddr = align_paddr; - - return 0; -} - -void sphw_dma_free_coherent_align(void *dev_hdl, struct sphw_dma_addr_align *mem_align) -{ - dma_free_coherent(dev_hdl, mem_align->real_size, - mem_align->ori_vaddr, mem_align->ori_paddr); -} - -int sphw_wait_for_timeout(void *priv_data, wait_cpl_handler handler, - u32 wait_total_ms, u32 wait_once_us) -{ - enum sphw_wait_return ret; - unsigned long end; - /* Take 9/10 * wait_once_us as the minimum sleep time of usleep_range */ - u32 usleep_min = wait_once_us - wait_once_us / 10; - - if (!handler) - return -EINVAL; - - end = jiffies + msecs_to_jiffies(wait_total_ms); - do { - ret = handler(priv_data); - if (ret == WAIT_PROCESS_CPL) - return 0; - else if (ret == WAIT_PROCESS_ERR) - return -EIO; - - /* Sleep more than 20ms using msleep is accurate */ - if (wait_once_us >= 20 * USEC_PER_MSEC) - msleep(wait_once_us / USEC_PER_MSEC); - else - usleep_range(usleep_min, wait_once_us); - } while (time_before(jiffies, end)); - - ret = handler(priv_data); - if (ret == WAIT_PROCESS_CPL) - return 0; - else if (ret == WAIT_PROCESS_ERR) - return -EIO; - - return -ETIMEDOUT; -} diff --git a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_common.h b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_common.h deleted file mode 100644 index 05327bd4bcfef8e429c404bf0ded8052ab244216..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_common.h +++ /dev/null @@ -1,106 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPHW_COMMON_H -#define SPHW_COMMON_H - -#include - -struct sphw_dma_addr_align { - u32 real_size; - - void *ori_vaddr; - dma_addr_t ori_paddr; - - void *align_vaddr; - dma_addr_t align_paddr; -}; - -int sphw_dma_alloc_coherent_align(void *dev_hdl, u64 size, u64 align, unsigned int flag, - struct sphw_dma_addr_align *mem_align); - -void sphw_dma_free_coherent_align(void *dev_hdl, struct sphw_dma_addr_align *mem_align); - -enum sphw_wait_return { - WAIT_PROCESS_CPL = 0, - WAIT_PROCESS_WAITING = 1, - WAIT_PROCESS_ERR = 2, -}; - -typedef enum sphw_wait_return (*wait_cpl_handler)(void *priv_data); - -int sphw_wait_for_timeout(void *priv_data, wait_cpl_handler handler, - u32 wait_total_ms, u32 wait_once_us); - -/* * - * sphw_cpu_to_be32 - convert data to big endian 32 bit format - * @data: the data to convert - * @len: length of data to convert, must be Multiple of 4B - */ -static inline void sphw_cpu_to_be32(void *data, int len) -{ - int i, chunk_sz = sizeof(u32); - u32 *mem = data; - - if (!data) - return; - - len = len / chunk_sz; - - for (i = 0; i < len; i++) { - *mem = cpu_to_be32(*mem); - mem++; - } -} - -/* * - * sphw_cpu_to_be32 - convert data from big endian 32 bit format - * @data: the data to convert - * @len: length of data to convert - */ -static inline void sphw_be32_to_cpu(void *data, int len) -{ - int i, chunk_sz = sizeof(u32); - u32 *mem = data; - - if (!data) - return; - - len = len / chunk_sz; - - for (i = 0; i < len; i++) { - *mem = be32_to_cpu(*mem); - mem++; - } -} - -struct sphw_sge { - u32 hi_addr; - u32 lo_addr; - u32 len; -}; - -/* * - * sphw_set_sge - set dma area in scatter gather entry - * @sge: scatter gather entry - * @addr: dma address - * @len: length of relevant data in the dma address - */ -static inline void sphw_set_sge(struct sphw_sge *sge, dma_addr_t addr, int len) -{ - sge->hi_addr = upper_32_bits(addr); - sge->lo_addr = lower_32_bits(addr); - sge->len = len; -} - -#define sdk_err(dev, format, ...) dev_err(dev, "[COMM]" format, ##__VA_ARGS__) -#define sdk_warn(dev, format, ...) dev_warn(dev, "[COMM]" format, ##__VA_ARGS__) -#define sdk_notice(dev, format, ...) dev_notice(dev, "[COMM]" format, ##__VA_ARGS__) -#define sdk_info(dev, format, ...) dev_info(dev, "[COMM]" format, ##__VA_ARGS__) - -#define nic_err(dev, format, ...) dev_err(dev, "[NIC]" format, ##__VA_ARGS__) -#define nic_warn(dev, format, ...) dev_warn(dev, "[NIC]" format, ##__VA_ARGS__) -#define nic_notice(dev, format, ...) dev_notice(dev, "[NIC]" format, ##__VA_ARGS__) -#define nic_info(dev, format, ...) dev_info(dev, "[NIC]" format, ##__VA_ARGS__) - -#endif diff --git a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_crm.h b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_crm.h deleted file mode 100644 index 8cce36698e3d153dcaeeaf2c9661161e82b5b49f..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_crm.h +++ /dev/null @@ -1,982 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPHW_CRM_H -#define SPHW_CRM_H - -#define ARRAY_LEN(arr) ((int)((int)sizeof(arr) / (int)sizeof((arr)[0]))) - -#define SPHW_MGMT_VERSION_MAX_LEN 32 - -#define SPHW_FW_VERSION_NAME 16 -#define SPHW_FW_VERSION_SECTION_CNT 4 -#define SPHW_FW_VERSION_SECTION_BORDER 0xFF -struct sphw_fw_version { - u8 mgmt_ver[SPHW_FW_VERSION_NAME]; - u8 microcode_ver[SPHW_FW_VERSION_NAME]; - u8 boot_ver[SPHW_FW_VERSION_NAME]; -}; - -#define SPHW_MGMT_CMD_UNSUPPORTED 0xFF - -/* show each drivers only such as nic_service_cap, - * toe_service_cap structure, but not show service_cap - */ -enum sphw_service_type { - SERVICE_T_NIC = 0, - SERVICE_T_OVS, - SERVICE_T_ROCE, - SERVICE_T_TOE, - SERVICE_T_IOE, - SERVICE_T_FC, - SERVICE_T_VBS, - SERVICE_T_IPSEC, - SERVICE_T_VIRTIO, - SERVICE_T_MIGRATE, - SERVICE_T_MAX, - - /* Only used for interruption resource management, - * mark the request module - */ - SERVICE_T_INTF = (1 << 15), - SERVICE_T_CQM = (1 << 16), -}; - -struct nic_service_cap { - u16 max_sqs; - u16 max_rqs; -}; - -/* PF/VF ToE service resource structure */ -struct dev_toe_svc_cap { - /* PF resources */ - u32 max_pctxs; /* Parent Context: max specifications 1M */ - u32 max_cqs; - u16 max_srqs; - u32 srq_id_start; - u32 max_mpts; -}; - -/* ToE services */ -struct toe_service_cap { - struct dev_toe_svc_cap dev_toe_cap; - - bool alloc_flag; - u32 pctx_sz; /* 1KB */ - u32 scqc_sz; /* 64B */ -}; - -/* PF FC service resource structure defined */ -struct dev_fc_svc_cap { - /* PF Parent QPC */ - u32 max_parent_qpc_num; /* max number is 2048 */ - - /* PF Child QPC */ - u32 max_child_qpc_num; /* max number is 2048 */ - u32 child_qpc_id_start; - - /* PF SCQ */ - u32 scq_num; /* 16 */ - - /* PF supports SRQ */ - u32 srq_num; /* Number of SRQ is 2 */ - - u8 vp_id_start; - u8 vp_id_end; -}; - -/* FC services */ -struct fc_service_cap { - struct dev_fc_svc_cap dev_fc_cap; - - /* Parent QPC */ - u32 parent_qpc_size; /* 256B */ - - /* Child QPC */ - u32 child_qpc_size; /* 256B */ - - /* SQ */ - u32 sqe_size; /* 128B(in linked list mode) */ - - /* SCQ */ - u32 scqc_size; /* Size of the Context 32B */ - u32 scqe_size; /* 64B */ - - /* SRQ */ - u32 srqc_size; /* Size of SRQ Context (64B) */ - u32 srqe_size; /* 32B */ -}; - -struct dev_roce_svc_own_cap { - u32 max_qps; - u32 max_cqs; - u32 max_srqs; - u32 max_mpts; - u32 max_drc_qps; - - u32 cmtt_cl_start; - u32 cmtt_cl_end; - u32 cmtt_cl_sz; - - u32 dmtt_cl_start; - u32 dmtt_cl_end; - u32 dmtt_cl_sz; - - u32 wqe_cl_start; - u32 wqe_cl_end; - u32 wqe_cl_sz; - - u32 qpc_entry_sz; - u32 max_wqes; - u32 max_rq_sg; - u32 max_sq_inline_data_sz; - u32 max_rq_desc_sz; - - u32 rdmarc_entry_sz; - u32 max_qp_init_rdma; - u32 max_qp_dest_rdma; - - u32 max_srq_wqes; - u32 reserved_srqs; - u32 max_srq_sge; - u32 srqc_entry_sz; - - u32 max_msg_sz; /* Message size 2GB */ -}; - -/* RDMA service capability structure */ -struct dev_rdma_svc_cap { - /* ROCE service unique parameter structure */ - struct dev_roce_svc_own_cap roce_own_cap; -}; - -/* Defines the RDMA service capability flag */ -enum { - RDMA_BMME_FLAG_LOCAL_INV = (1 << 0), - RDMA_BMME_FLAG_REMOTE_INV = (1 << 1), - RDMA_BMME_FLAG_FAST_REG_WR = (1 << 2), - RDMA_BMME_FLAG_RESERVED_LKEY = (1 << 3), - RDMA_BMME_FLAG_TYPE_2_WIN = (1 << 4), - RDMA_BMME_FLAG_WIN_TYPE_2B = (1 << 5), - - RDMA_DEV_CAP_FLAG_XRC = (1 << 6), - RDMA_DEV_CAP_FLAG_MEM_WINDOW = (1 << 7), - RDMA_DEV_CAP_FLAG_ATOMIC = (1 << 8), - RDMA_DEV_CAP_FLAG_APM = (1 << 9), -}; - -/* RDMA services */ -struct rdma_service_cap { - struct dev_rdma_svc_cap dev_rdma_cap; - - u8 log_mtt; /* 1. the number of MTT PA must be integer power of 2 - * 2. represented by logarithm. Each MTT table can - * contain 1, 2, 4, 8, and 16 PA) - */ - /* todo: need to check whether related to max_mtt_seg */ - u32 num_mtts; /* Number of MTT table (4M), - * is actually MTT seg number - */ - u32 log_mtt_seg; - u32 mtt_entry_sz; /* MTT table size 8B, including 1 PA(64bits) */ - u32 mpt_entry_sz; /* MPT table size (64B) */ - - u32 dmtt_cl_start; - u32 dmtt_cl_end; - u32 dmtt_cl_sz; - - u8 log_rdmarc; /* 1. the number of RDMArc PA must be integer power of 2 - * 2. represented by logarithm. Each MTT table can - * contain 1, 2, 4, 8, and 16 PA) - */ - - u32 reserved_qps; /* Number of reserved QP */ - u32 max_sq_sg; /* Maximum SGE number of SQ (8) */ - u32 max_sq_desc_sz; /* WQE maximum size of SQ(1024B), inline maximum - * size if 960B(944B aligned to the 960B), - * 960B=>wqebb alignment=>1024B - */ - u32 wqebb_size; /* Currently, the supports 64B and 128B, - * defined as 64Bytes - */ - - u32 max_cqes; /* Size of the depth of the CQ (64K-1) */ - u32 reserved_cqs; /* Number of reserved CQ */ - u32 cqc_entry_sz; /* Size of the CQC (64B/128B) */ - u32 cqe_size; /* Size of CQE (32B) */ - - u32 reserved_mrws; /* Number of reserved MR/MR Window */ - - u32 max_fmr_maps; /* max MAP of FMR, - * (1 << (32-ilog2(num_mpt)))-1; - */ - - /* todo: max value needs to be confirmed */ - /* MTT table number of Each MTT seg(3) */ - - u32 log_rdmarc_seg; /* table number of each RDMArc seg(3) */ - - /* Timeout time. Formula:Tr=4.096us*2(local_ca_ack_delay), [Tr,4Tr] */ - u32 local_ca_ack_delay; - u32 num_ports; /* Physical port number */ - - u32 db_page_size; /* Size of the DB (4KB) */ - u32 direct_wqe_size; /* Size of the DWQE (256B) */ - - u32 num_pds; /* Maximum number of PD (128K) */ - u32 reserved_pds; /* Number of reserved PD */ - u32 max_xrcds; /* Maximum number of xrcd (64K) */ - u32 reserved_xrcds; /* Number of reserved xrcd */ - - u32 max_gid_per_port; /* gid number (16) of each port */ - u32 gid_entry_sz; /* RoCE v2 GID table is 32B, - * compatible RoCE v1 expansion - */ - - u32 reserved_lkey; /* local_dma_lkey */ - u32 num_comp_vectors; /* Number of complete vector (32) */ - u32 page_size_cap; /* Supports 4K,8K,64K,256K,1M and 4M page_size */ - - u32 flags; /* RDMA some identity */ - u32 max_frpl_len; /* Maximum number of pages frmr registration */ - u32 max_pkeys; /* Number of supported pkey group */ -}; - -/* PF OVS service resource structure defined */ -struct dev_ovs_svc_cap { - u32 max_pctxs; /* Parent Context: max specifications 1M */ - u8 dynamic_qp_en; - u8 fake_vf_num; - u16 fake_vf_start_id; -}; - -/* OVS services */ -struct ovs_service_cap { - struct dev_ovs_svc_cap dev_ovs_cap; - - u32 pctx_sz; /* 512B */ -}; - -/* PF IPsec service resource structure defined */ -struct dev_ipsec_svc_cap { - /* PF resources */ - u32 max_sa_ctxs; /* Parent Context: max specifications 8192 */ -}; - -/* IPsec services */ -struct ipsec_service_cap { - struct dev_ipsec_svc_cap dev_ipsec_cap; - u32 sactx_sz; /* 512B */ -}; - -/* Defines the IRQ information structure */ -struct irq_info { - u16 msix_entry_idx; /* IRQ corresponding index number */ - u32 irq_id; /* the IRQ number from OS */ -}; - -struct interrupt_info { - u32 lli_set; - u32 interrupt_coalesc_set; - u16 msix_index; - u8 lli_credit_limit; - u8 lli_timer_cfg; - u8 pending_limt; - u8 coalesc_timer_cfg; - u8 resend_timer_cfg; -}; - -enum sphw_msix_state { - SPHW_MSIX_ENABLE, - SPHW_MSIX_DISABLE, -}; - -enum sphw_msix_auto_mask { - SPHW_SET_MSIX_AUTO_MASK, - SPHW_CLR_MSIX_AUTO_MASK, -}; - -enum func_type { - TYPE_PF, - TYPE_VF, - TYPE_PPF, - TYPE_UNKNOWN, -}; - -struct sphw_init_para { - /* Record spnic_pcidev or NDIS_Adapter pointer address */ - void *adapter_hdl; - /* Record pcidev or Handler pointer address - * for example: ioremap interface input parameter - */ - void *pcidev_hdl; - /* Record pcidev->dev or Handler pointer address which used to - * dma address application or dev_err print the parameter - */ - void *dev_hdl; - - /* Configure virtual address, PF is bar1, VF is bar0/1 */ - void *cfg_reg_base; - /* interrupt configuration register address, PF is bar2, VF is bar2/3 - */ - void *intr_reg_base; - /* for PF bar3 virtual address, if function is VF should set to NULL */ - void *mgmt_reg_base; - - u64 db_dwqe_len; - u64 db_base_phy; - /* the doorbell address, bar4/5 higher 4M space */ - void *db_base; - /* direct wqe 4M, follow the doorbell address space */ - void *dwqe_mapping; - void **hwdev; - void *chip_node; - /* In bmgw x86 host, driver can't send message to mgmt cpu directly, - * need to trasmit message ppf mbox to bmgw arm host. - */ - void *ppf_hwdev; -}; - -/* B200 config BAR45 4MB, DB & DWQE both 2MB */ -#define SPHW_DB_DWQE_SIZE 0x00400000 - -/* db/dwqe page size: 4K */ -#define SPHW_DB_PAGE_SIZE 0x00001000ULL -#define SPHW_DWQE_OFFSET 0x00000800ULL - -#define SPHW_DB_MAX_AREAS (SPHW_DB_DWQE_SIZE / SPHW_DB_PAGE_SIZE) - -#ifndef IFNAMSIZ -#define IFNAMSIZ 16 -#endif -#define MAX_FUNCTION_NUM 4096 -#define SPHW_MAX_COS 8 - -struct card_node { - struct list_head node; - struct list_head func_list; - char chip_name[IFNAMSIZ]; - void *log_info; - void *dbgtool_info; - void *func_handle_array[MAX_FUNCTION_NUM]; - unsigned char bus_num; - u8 func_num; - bool up_bitmap_setted; - u8 valid_up_bitmap; -}; - -#define FAULT_SHOW_STR_LEN 16 - -enum sphw_fault_source_type { - /* same as FAULT_TYPE_CHIP */ - SPHW_FAULT_SRC_HW_MGMT_CHIP = 0, - /* same as FAULT_TYPE_UCODE */ - SPHW_FAULT_SRC_HW_MGMT_UCODE, - /* same as FAULT_TYPE_MEM_RD_TIMEOUT */ - SPHW_FAULT_SRC_HW_MGMT_MEM_RD_TIMEOUT, - /* same as FAULT_TYPE_MEM_WR_TIMEOUT */ - SPHW_FAULT_SRC_HW_MGMT_MEM_WR_TIMEOUT, - /* same as FAULT_TYPE_REG_RD_TIMEOUT */ - SPHW_FAULT_SRC_HW_MGMT_REG_RD_TIMEOUT, - /* same as FAULT_TYPE_REG_WR_TIMEOUT */ - SPHW_FAULT_SRC_HW_MGMT_REG_WR_TIMEOUT, - SPHW_FAULT_SRC_SW_MGMT_UCODE, - SPHW_FAULT_SRC_MGMT_WATCHDOG, - SPHW_FAULT_SRC_MGMT_RESET = 8, - SPHW_FAULT_SRC_HW_PHY_FAULT, - SPHW_FAULT_SRC_TX_PAUSE_EXCP, - SPHW_FAULT_SRC_PCIE_LINK_DOWN = 20, - SPHW_FAULT_SRC_HOST_HEARTBEAT_LOST = 21, - SPHW_FAULT_SRC_TX_TIMEOUT, - SPHW_FAULT_SRC_TYPE_MAX, -}; - -union sphw_fault_hw_mgmt { - u32 val[4]; - /* valid only type == FAULT_TYPE_CHIP */ - struct { - u8 node_id; - /* enum sphw_fault_err_level */ - u8 err_level; - u16 err_type; - u32 err_csr_addr; - u32 err_csr_value; - /* func_id valid only if err_level == FAULT_LEVEL_SERIOUS_FLR */ - u16 func_id; - u16 rsvd2; - } chip; - - /* valid only if type == FAULT_TYPE_UCODE */ - struct { - u8 cause_id; - u8 core_id; - u8 c_id; - u8 rsvd3; - u32 epc; - u32 rsvd4; - u32 rsvd5; - } ucode; - - /* valid only if type == FAULT_TYPE_MEM_RD_TIMEOUT || - * FAULT_TYPE_MEM_WR_TIMEOUT - */ - struct { - u32 err_csr_ctrl; - u32 err_csr_data; - u32 ctrl_tab; - u32 mem_index; - } mem_timeout; - - /* valid only if type == FAULT_TYPE_REG_RD_TIMEOUT || - * FAULT_TYPE_REG_WR_TIMEOUT - */ - struct { - u32 err_csr; - u32 rsvd6; - u32 rsvd7; - u32 rsvd8; - } reg_timeout; - - struct { - /* 0: read; 1: write */ - u8 op_type; - u8 port_id; - u8 dev_ad; - u8 rsvd9; - u32 csr_addr; - u32 op_data; - u32 rsvd10; - } phy_fault; -}; - -/* defined by chip */ -struct sphw_fault_event { - /* enum sphw_fault_type */ - u8 type; - u8 fault_level; /* sdk write fault level for uld event */ - u8 rsvd0[2]; - union sphw_fault_hw_mgmt event; -}; - -struct sphw_cmd_fault_event { - u8 status; - u8 version; - u8 rsvd0[6]; - struct sphw_fault_event event; -}; - -enum sphw_event_type { - SPHW_EVENT_LINK_DOWN = 0, - SPHW_EVENT_LINK_UP = 1, - SPHW_EVENT_FAULT = 3, - SPHW_EVENT_DCB_STATE_CHANGE = 5, - SPHW_EVENT_INIT_MIGRATE_PF, - SPHW_EVENT_SRIOV_STATE_CHANGE, - SPHW_EVENT_PORT_MODULE_EVENT, - SPHW_EVENT_PCIE_LINK_DOWN, - SPHW_EVENT_HEART_LOST, -}; - -struct sphw_event_link_info { - u8 valid; - u8 port_type; - u8 autoneg_cap; - u8 autoneg_state; - u8 duplex; - u8 speed; -}; - -struct sphw_dcb_info { - u8 dcb_on; - u8 default_cos; - u8 up_cos[SPHW_MAX_COS]; -}; - -struct sphw_sriov_state_info { - u8 enable; - u16 num_vfs; -}; - -enum link_err_type { - LINK_ERR_MODULE_UNRECOGENIZED, - LINK_ERR_NUM, -}; - -enum port_module_event_type { - SPHW_PORT_MODULE_CABLE_PLUGGED, - SPHW_PORT_MODULE_CABLE_UNPLUGGED, - SPHW_PORT_MODULE_LINK_ERR, - SPHW_PORT_MODULE_MAX_EVENT, -}; - -struct sphw_port_module_event { - enum port_module_event_type type; - enum link_err_type err_type; -}; - -struct sphw_event_info { - enum sphw_event_type type; - union { - struct sphw_event_link_info link_info; - struct sphw_fault_event info; - struct sphw_dcb_info dcb_state; - struct sphw_sriov_state_info sriov_state; - struct sphw_port_module_event module_event; - }; -}; - -typedef void (*sphw_event_handler)(void *handle, struct sphw_event_info *event); - -/* * - * @brief sphw_event_register - register hardware event - * @param dev: device pointer to hwdev - * @param pri_handle: private data will be used by the callback - * @param callback: callback function - */ -void sphw_event_register(void *dev, void *pri_handle, sphw_event_handler callback); - -/* * - * @brief sphw_event_unregister - unregister hardware event - * @param dev: device pointer to hwdev - */ -void sphw_event_unregister(void *dev); - -/* * - * @brief sphw_set_msix_auto_mask_state - set msix auto mask function - * @param hwdev: device pointer to hwdev - * @param msix_idx: msix id - * @param flag: msix auto_mask flag, 1-enable, 2-clear - */ -void sphw_set_msix_auto_mask_state(void *hwdev, u16 msix_idx, enum sphw_msix_auto_mask flag); - -/* * - * @brief sphw_set_msix_state - set msix state - * @param hwdev: device pointer to hwdev - * @param msix_idx: msix id - * @param flag: msix state flag, 0-enable, 1-disable - */ -void sphw_set_msix_state(void *hwdev, u16 msix_idx, enum sphw_msix_state flag); - -/* * - * @brief sphw_misx_intr_clear_resend_bit - clear msix resend bit - * @param hwdev: device pointer to hwdev - * @param msix_idx: msix id - * @param clear_resend_en: 1-clear - */ -void sphw_misx_intr_clear_resend_bit(void *hwdev, u16 msix_idx, u8 clear_resend_en); - -/* * - * @brief sphw_set_interrupt_cfg_direct - set interrupt cfg - * @param hwdev: device pointer to hwdev - * @param interrupt_para: interrupt info - * @param channel: channel id - * @retval zero: success - * @retval non-zero: failure - */ -int sphw_set_interrupt_cfg_direct(void *hwdev, struct interrupt_info *interrupt_para, u16 channel); - -int sphw_set_interrupt_cfg(void *hwdev, struct interrupt_info interrupt_info, u16 channel); - -/* * - * @brief sphw_get_interrupt_cfg - get interrupt cfg - * @param dev: device pointer to hwdev - * @param info: interrupt info - * @param channel: channel id - * @retval zero: success - * @retval non-zero: failure - */ -int sphw_get_interrupt_cfg(void *dev, struct interrupt_info *info, u16 channel); - -/* * - * @brief sphw_alloc_irqs - alloc irq - * @param hwdev: device pointer to hwdev - * @param type: service type - * @param num: alloc number - * @param irq_info_array: alloc irq info - * @param act_num: alloc actual number - * @retval zero: success - * @retval non-zero: failure - */ -int sphw_alloc_irqs(void *hwdev, enum sphw_service_type type, u16 num, - struct irq_info *irq_info_array, u16 *act_num); - -/* * - * @brief sphw_free_irq - free irq - * @param hwdev: device pointer to hwdev - * @param type: service type - * @param irq_id: irq id - */ -void sphw_free_irq(void *hwdev, enum sphw_service_type type, u32 irq_id); - -/* * - * @brief sphw_alloc_ceqs - alloc ceqs - * @param hwdev: device pointer to hwdev - * @param type: service type - * @param num: alloc ceq number - * @param ceq_id_array: alloc ceq_id_array - * @param act_num: alloc actual number - * @retval zero: success - * @retval non-zero: failure - */ -int sphw_alloc_ceqs(void *hwdev, enum sphw_service_type type, int num, int *ceq_id_array, - int *act_num); - -/* * - * @brief sphw_free_irq - free ceq - * @param hwdev: device pointer to hwdev - * @param type: service type - * @param irq_id: ceq id - */ -void sphw_free_ceq(void *hwdev, enum sphw_service_type type, int ceq_id); - -/* * - * @brief sphw_get_pcidev_hdl - get pcidev_hdl - * @param hwdev: device pointer to hwdev - * @retval non-null: success - * @retval null: failure - */ -void *sphw_get_pcidev_hdl(void *hwdev); - -/* * - * @brief sphw_ppf_idx - get ppf id - * @param hwdev: device pointer to hwdev - * @retval ppf id - */ -u8 sphw_ppf_idx(void *hwdev); - -/* * - * @brief sphw_get_chip_present_flag - get chip present flag - * @param hwdev: device pointer to hwdev - * @retval 1: chip is present - * @retval 0: chip is absent - */ -int sphw_get_chip_present_flag(const void *hwdev); - -/* * - * @brief sphw_support_nic - function support nic - * @param hwdev: device pointer to hwdev - * @param cap: nic service capbility - * @retval true: function support nic - * @retval false: function not support nic - */ -bool sphw_support_nic(void *hwdev, struct nic_service_cap *cap); - -/* * - * @brief sphw_support_ipsec - function support ipsec - * @param hwdev: device pointer to hwdev - * @param cap: ipsec service capbility - * @retval true: function support ipsec - * @retval false: function not support ipsec - */ -bool sphw_support_ipsec(void *hwdev, struct ipsec_service_cap *cap); - -/* * - * @brief sphw_support_roce - function support roce - * @param hwdev: device pointer to hwdev - * @param cap: roce service capbility - * @retval true: function support roce - * @retval false: function not support roce - */ -bool sphw_support_roce(void *hwdev, struct rdma_service_cap *cap); - -/* * - * @brief sphw_support_fc - function support fc - * @param hwdev: device pointer to hwdev - * @param cap: fc service capbility - * @retval true: function support fc - * @retval false: function not support fc - */ -bool sphw_support_fc(void *hwdev, struct fc_service_cap *cap); - -/* * - * @brief sphw_support_rdma - function support rdma - * @param hwdev: device pointer to hwdev - * @param cap: rdma service capbility - * @retval true: function support rdma - * @retval false: function not support rdma - */ -bool sphw_support_rdma(void *hwdev, struct rdma_service_cap *cap); - -/* * - * @brief sphw_support_ovs - function support ovs - * @param hwdev: device pointer to hwdev - * @param cap: ovs service capbility - * @retval true: function support ovs - * @retval false: function not support ovs - */ -bool sphw_support_ovs(void *hwdev, struct ovs_service_cap *cap); - -/* * - * @brief sphw_support_toe - sync time to hardware - * @param hwdev: device pointer to hwdev - * @param cap: toe service capbility - * @retval zero: success - * @retval non-zero: failure - */ -bool sphw_support_toe(void *hwdev, struct toe_service_cap *cap); - -/* * - * @brief sphw_sync_time - sync time to hardware - * @param hwdev: device pointer to hwdev - * @param time: time to sync - * @retval zero: success - * @retval non-zero: failure - */ -int sphw_sync_time(void *hwdev, u64 time); - -/* * - * @brief sphw_disable_mgmt_msg_report - disable mgmt report msg - * @param hwdev: device pointer to hwdev - */ -void sphw_disable_mgmt_msg_report(void *hwdev); - -/* * - * @brief sphw_func_for_mgmt - get function service type - * @param hwdev: device pointer to hwdev - * @retval true: function for mgmt - * @retval false: function is not for mgmt - */ -bool sphw_func_for_mgmt(void *hwdev); - -/* * - * @brief sphw_set_pcie_order_cfg - set pcie order cfg - * @param handle: device pointer to hwdev - */ -void sphw_set_pcie_order_cfg(void *handle); - -/* * - * @brief sphw_init_hwdev - call to init hwdev - * @param para: device pointer to para - * @retval zero: success - * @retval non-zero: failure - */ -int sphw_init_hwdev(struct sphw_init_para *para); - -/* * - * @brief sphw_free_hwdev - free hwdev - * @param hwdev: device pointer to hwdev - */ -void sphw_free_hwdev(void *hwdev); - -/* * - * @brief sphw_detect_hw_present - detect hardware present - * @param hwdev: device pointer to hwdev - */ -void sphw_detect_hw_present(void *hwdev); - -/* * - * @brief sphw_record_pcie_error - record pcie error - * @param hwdev: device pointer to hwdev - */ -void sphw_record_pcie_error(void *hwdev); - -/* * - * @brief sphw_shutdown_hwdev - shutdown hwdev - * @param hwdev: device pointer to hwdev - */ -void sphw_shutdown_hwdev(void *hwdev); - -/* * - * @brief sphw_get_mgmt_version - get management cpu version - * @param hwdev: device pointer to hwdev - * @param mgmt_ver: output management version - * @param channel: channel id - * @retval zero: success - * @retval non-zero: failure - */ -int sphw_get_mgmt_version(void *hwdev, u8 *mgmt_ver, u8 version_size, u16 channel); - -/* * - * @brief sphw_get_fw_version - get firmware version - * @param hwdev: device pointer to hwdev - * @param fw_ver: firmware version - * @param channel: channel id - * @retval zero: success - * @retval non-zero: failure - */ -int sphw_get_fw_version(void *hwdev, struct sphw_fw_version *fw_ver, u16 channel); - -/* * - * @brief sphw_global_func_id - get global function id - * @param hwdev: device pointer to hwdev - * @retval global function id - */ -u16 sphw_global_func_id(void *hwdev); - -/* * - * @brief sphw_vector_to_eqn - vector to eq id - * @param hwdev: device pointer to hwdev - * @param type: service type - * @param vector: vertor - * @retval eq id - */ -int sphw_vector_to_eqn(void *hwdev, enum sphw_service_type type, int vector); - -/* * - * @brief sphw_glb_pf_vf_offset - get vf offset id of pf - * @param hwdev: device pointer to hwdev - * @retval vf offset id - */ -u16 sphw_glb_pf_vf_offset(void *hwdev); - -/* * - * @brief sphw_pf_id_of_vf - get pf id of vf - * @param hwdev: device pointer to hwdev - * @retval pf id - */ -u8 sphw_pf_id_of_vf(void *hwdev); - -/* * - * @brief sphw_func_type - get function type - * @param hwdev: device pointer to hwdev - * @retval function type - */ -enum func_type sphw_func_type(void *hwdev); - -/* * - * @brief sphw_host_oq_id_mask - get oq id - * @param hwdev: device pointer to hwdev - * @retval oq id - */ -u8 sphw_host_oq_id_mask(void *hwdev); - -/* * - * @brief sphw_host_id - get host id - * @param hwdev: device pointer to hwdev - * @retval host id - */ -u8 sphw_host_id(void *hwdev); - -/* * - * @brief sphw_func_max_qnum - get host total function number - * @param hwdev: device pointer to hwdev - * @retval non-zero: host total function number - * @retval zero: failure - */ -u16 sphw_host_total_func(void *hwdev); - -/* * - * @brief sphw_func_max_qnum - get max nic queue number - * @param hwdev: device pointer to hwdev - * @retval non-zero: max nic queue number - * @retval zero: failure - */ -u16 sphw_func_max_nic_qnum(void *hwdev); - -/* * - * @brief sphw_func_max_qnum - get max queue number - * @param hwdev: device pointer to hwdev - * @retval non-zero: max queue number - * @retval zero: failure - */ -u16 sphw_func_max_qnum(void *hwdev); - -/* * - * @brief sphw_er_id - get ep id - * @param hwdev: device pointer to hwdev - * @retval ep id - */ -u8 sphw_ep_id(void *hwdev); /* Obtain service_cap.ep_id */ - -/* * - * @brief sphw_er_id - get er id - * @param hwdev: device pointer to hwdev - * @retval er id - */ -u8 sphw_er_id(void *hwdev); /* Obtain service_cap.er_id */ - -/* * - * @brief sphw_physical_port_id - get physical port id - * @param hwdev: device pointer to hwdev - * @retval physical port id - */ -u8 sphw_physical_port_id(void *hwdev); /* Obtain service_cap.port_id */ - -/* * - * @brief sphw_func_max_vf - get vf number - * @param hwdev: device pointer to hwdev - * @retval non-zero: vf number - * @retval zero: failure - */ -u16 sphw_func_max_vf(void *hwdev); /* Obtain service_cap.max_vf */ - -/* @brief sphw_max_pf_num - get global max pf number - */ -u8 sphw_max_pf_num(void *hwdev); - -/* * - * @brief sphw_host_pf_num - get current host pf number - * @param hwdev: device pointer to hwdev - * @retval non-zero: pf number - * @retval zero: failure - */ -u32 sphw_host_pf_num(void *hwdev); /* Obtain service_cap.pf_num */ - -/* * - * @brief sphw_pcie_itf_id - get pcie port id - * @param hwdev: device pointer to hwdev - * @retval pcie port id - */ -u8 sphw_pcie_itf_id(void *hwdev); - -/* * - * @brief sphw_vf_in_pf - get vf offset in pf - * @param hwdev: device pointer to hwdev - * @retval vf offset in pf - */ -u8 sphw_vf_in_pf(void *hwdev); - -/* * - * @brief sphw_cos_valid_bitmap - get cos valid bitmap - * @param hwdev: device pointer to hwdev - * @retval non-zero: valid cos bit map - * @retval zero: failure - */ -u8 sphw_cos_valid_bitmap(void *hwdev); - -/* * - * @brief sphw_get_card_present_state - get card present state - * @param hwdev: device pointer to hwdev - * @param card_present_state: return card present state - * @retval zero: success - * @retval non-zero: failure - */ -int sphw_get_card_present_state(void *hwdev, bool *card_present_state); - -/* * - * @brief sphw_func_rx_tx_flush - function flush - * @param hwdev: device pointer to hwdev - * @param channel: channel id - * @retval zero: success - * @retval non-zero: failure - */ -int sphw_func_rx_tx_flush(void *hwdev, u16 channel); - -/* * - * @brief sphw_flush_mgmt_workq - when remove function should flush work queue - * @param hwdev: device pointer to hwdev - */ -void sphw_flush_mgmt_workq(void *hwdev); - -/* @brief sphw_ceq_num get toe ceq num - */ -u8 sphw_ceq_num(void *hwdev); - -/* * - * @brief sphw_intr_num get intr num - */ -u16 sphw_intr_num(void *hwdev); - -/* @brief sphw_flexq_en get flexq en - */ -u8 sphw_flexq_en(void *hwdev); - -/** - * @brief sphw_fault_event_report - report fault event - * @param hwdev: device pointer to hwdev - * @param src: fault event source, reference to enum sphw_fault_source_type - * @param level: fault level, reference to enum sphw_fault_err_level - */ -void sphw_fault_event_report(void *hwdev, u16 src, u16 level); - -#endif diff --git a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_csr.h b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_csr.h deleted file mode 100644 index d283c14566157104fee852179d7af7e595715e55..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_csr.h +++ /dev/null @@ -1,158 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPHW_CSR_H -#define SPHW_CSR_H - -/* bit30/bit31 for bar index flag - * 00: bar0 - * 01: bar1 - * 10: bar2 - * 11: bar3 - */ -#define SPHW_CFG_REGS_FLAG 0x40000000 - -#define SPHW_MGMT_REGS_FLAG 0xC0000000 - -#define SPHW_REGS_FLAG_MAKS 0x3FFFFFFF - -#define SPHW_VF_CFG_REG_OFFSET 0x2000 - -#define SPHW_HOST_CSR_BASE_ADDR (SPHW_MGMT_REGS_FLAG + 0x6000) -#define SPHW_CSR_GLOBAL_BASE_ADDR (SPHW_MGMT_REGS_FLAG + 0x6400) - -/* HW interface registers */ -#define SPHW_CSR_FUNC_ATTR0_ADDR (SPHW_CFG_REGS_FLAG + 0x0) -#define SPHW_CSR_FUNC_ATTR1_ADDR (SPHW_CFG_REGS_FLAG + 0x4) -#define SPHW_CSR_FUNC_ATTR2_ADDR (SPHW_CFG_REGS_FLAG + 0x8) -#define SPHW_CSR_FUNC_ATTR3_ADDR (SPHW_CFG_REGS_FLAG + 0xC) -#define SPHW_CSR_FUNC_ATTR4_ADDR (SPHW_CFG_REGS_FLAG + 0x10) -#define SPHW_CSR_FUNC_ATTR5_ADDR (SPHW_CFG_REGS_FLAG + 0x14) -#define SPHW_CSR_FUNC_ATTR6_ADDR (SPHW_CFG_REGS_FLAG + 0x18) - -#define SPHW_FUNC_CSR_MAILBOX_DATA_OFF 0x80 -#define SPHW_FUNC_CSR_MAILBOX_CONTROL_OFF \ - (SPHW_CFG_REGS_FLAG + 0x0100) -#define SPHW_FUNC_CSR_MAILBOX_INT_OFFSET_OFF \ - (SPHW_CFG_REGS_FLAG + 0x0104) -#define SPHW_FUNC_CSR_MAILBOX_RESULT_H_OFF \ - (SPHW_CFG_REGS_FLAG + 0x0108) -#define SPHW_FUNC_CSR_MAILBOX_RESULT_L_OFF \ - (SPHW_CFG_REGS_FLAG + 0x010C) - -#define SPHW_PPF_ELECTION_OFFSET 0x0 -#define SPHW_MPF_ELECTION_OFFSET 0x20 - -#define SPHW_CSR_PPF_ELECTION_ADDR \ - (SPHW_HOST_CSR_BASE_ADDR + SPHW_PPF_ELECTION_OFFSET) - -#define SPHW_CSR_GLOBAL_MPF_ELECTION_ADDR \ - (SPHW_HOST_CSR_BASE_ADDR + SPHW_MPF_ELECTION_OFFSET) - -#define SPHW_CSR_FUNC_PPF_ELECT_BASE_ADDR (SPHW_CFG_REGS_FLAG + 0x60) -#define SPHW_CSR_FUNC_PPF_ELECT_PORT_STRIDE 0x4 - -#define SPHW_CSR_FUNC_PPF_ELECT(host_idx) \ - (SPHW_CSR_FUNC_PPF_ELECT_BASE_ADDR + \ - (host_idx) * SPHW_CSR_FUNC_PPF_ELECT_PORT_STRIDE) - -#define SPHW_CSR_DMA_ATTR_TBL_ADDR (SPHW_CFG_REGS_FLAG + 0x380) -#define SPHW_CSR_DMA_ATTR_INDIR_IDX_ADDR (SPHW_CFG_REGS_FLAG + 0x390) - -/* MSI-X registers */ -#define SPHW_CSR_MSIX_INDIR_IDX_ADDR (SPHW_CFG_REGS_FLAG + 0x310) -#define SPHW_CSR_MSIX_CTRL_ADDR (SPHW_CFG_REGS_FLAG + 0x300) -#define SPHW_CSR_MSIX_CNT_ADDR (SPHW_CFG_REGS_FLAG + 0x304) -#define SPHW_CSR_FUNC_MSI_CLR_WR_ADDR (SPHW_CFG_REGS_FLAG + 0x58) - -#define SPHW_MSI_CLR_INDIR_RESEND_TIMER_CLR_SHIFT 0 -#define SPHW_MSI_CLR_INDIR_INT_MSK_SET_SHIFT 1 -#define SPHW_MSI_CLR_INDIR_INT_MSK_CLR_SHIFT 2 -#define SPHW_MSI_CLR_INDIR_AUTO_MSK_SET_SHIFT 3 -#define SPHW_MSI_CLR_INDIR_AUTO_MSK_CLR_SHIFT 4 -#define SPHW_MSI_CLR_INDIR_SIMPLE_INDIR_IDX_SHIFT 22 - -#define SPHW_MSI_CLR_INDIR_RESEND_TIMER_CLR_MASK 0x1U -#define SPHW_MSI_CLR_INDIR_INT_MSK_SET_MASK 0x1U -#define SPHW_MSI_CLR_INDIR_INT_MSK_CLR_MASK 0x1U -#define SPHW_MSI_CLR_INDIR_AUTO_MSK_SET_MASK 0x1U -#define SPHW_MSI_CLR_INDIR_AUTO_MSK_CLR_MASK 0x1U -#define SPHW_MSI_CLR_INDIR_SIMPLE_INDIR_IDX_MASK 0x3FFU - -#define SPHW_MSI_CLR_INDIR_SET(val, member) \ - (((val) & SPHW_MSI_CLR_INDIR_##member##_MASK) << \ - SPHW_MSI_CLR_INDIR_##member##_SHIFT) - -/* EQ registers */ -#define SPHW_AEQ_INDIR_IDX_ADDR (SPHW_CFG_REGS_FLAG + 0x210) -#define SPHW_CEQ_INDIR_IDX_ADDR (SPHW_CFG_REGS_FLAG + 0x290) - -#define SPHW_EQ_INDIR_IDX_ADDR(type) \ - ((type == SPHW_AEQ) ? SPHW_AEQ_INDIR_IDX_ADDR : SPHW_CEQ_INDIR_IDX_ADDR) - -#define SPHW_AEQ_MTT_OFF_BASE_ADDR (SPHW_CFG_REGS_FLAG + 0x240) -#define SPHW_CEQ_MTT_OFF_BASE_ADDR (SPHW_CFG_REGS_FLAG + 0x2C0) - -#define SPHW_CSR_EQ_PAGE_OFF_STRIDE 8 - -#define SPHW_AEQ_HI_PHYS_ADDR_REG(pg_num) \ - (SPHW_AEQ_MTT_OFF_BASE_ADDR + \ - (pg_num) * SPHW_CSR_EQ_PAGE_OFF_STRIDE) - -#define SPHW_AEQ_LO_PHYS_ADDR_REG(pg_num) \ - (SPHW_AEQ_MTT_OFF_BASE_ADDR + \ - (pg_num) * SPHW_CSR_EQ_PAGE_OFF_STRIDE + 4) - -#define SPHW_CEQ_HI_PHYS_ADDR_REG(pg_num) \ - (SPHW_CEQ_MTT_OFF_BASE_ADDR + \ - (pg_num) * SPHW_CSR_EQ_PAGE_OFF_STRIDE) - -#define SPHW_CEQ_LO_PHYS_ADDR_REG(pg_num) \ - (SPHW_CEQ_MTT_OFF_BASE_ADDR + \ - (pg_num) * SPHW_CSR_EQ_PAGE_OFF_STRIDE + 4) - -#define SPHW_CSR_AEQ_CTRL_0_ADDR (SPHW_CFG_REGS_FLAG + 0x200) -#define SPHW_CSR_AEQ_CTRL_1_ADDR (SPHW_CFG_REGS_FLAG + 0x204) -#define SPHW_CSR_AEQ_CONS_IDX_ADDR (SPHW_CFG_REGS_FLAG + 0x208) -#define SPHW_CSR_AEQ_PROD_IDX_ADDR (SPHW_CFG_REGS_FLAG + 0x20C) -#define SPHW_CSR_AEQ_CI_SIMPLE_INDIR_ADDR (SPHW_CFG_REGS_FLAG + 0x50) - -#define SPHW_CSR_CEQ_CTRL_0_ADDR (SPHW_CFG_REGS_FLAG + 0x280) -#define SPHW_CSR_CEQ_CTRL_1_ADDR (SPHW_CFG_REGS_FLAG + 0x284) -#define SPHW_CSR_CEQ_CONS_IDX_ADDR (SPHW_CFG_REGS_FLAG + 0x288) -#define SPHW_CSR_CEQ_PROD_IDX_ADDR (SPHW_CFG_REGS_FLAG + 0x28c) -#define SPHW_CSR_CEQ_CI_SIMPLE_INDIR_ADDR (SPHW_CFG_REGS_FLAG + 0x54) - -/* API CMD registers */ -#define SPHW_CSR_API_CMD_BASE (SPHW_MGMT_REGS_FLAG + 0x2000) - -#define SPHW_CSR_API_CMD_STRIDE 0x80 - -#define SPHW_CSR_API_CMD_CHAIN_HEAD_HI_ADDR(idx) \ - (SPHW_CSR_API_CMD_BASE + 0x0 + (idx) * SPHW_CSR_API_CMD_STRIDE) - -#define SPHW_CSR_API_CMD_CHAIN_HEAD_LO_ADDR(idx) \ - (SPHW_CSR_API_CMD_BASE + 0x4 + (idx) * SPHW_CSR_API_CMD_STRIDE) - -#define SPHW_CSR_API_CMD_STATUS_HI_ADDR(idx) \ - (SPHW_CSR_API_CMD_BASE + 0x8 + (idx) * SPHW_CSR_API_CMD_STRIDE) - -#define SPHW_CSR_API_CMD_STATUS_LO_ADDR(idx) \ - (SPHW_CSR_API_CMD_BASE + 0xC + (idx) * SPHW_CSR_API_CMD_STRIDE) - -#define SPHW_CSR_API_CMD_CHAIN_NUM_CELLS_ADDR(idx) \ - (SPHW_CSR_API_CMD_BASE + 0x10 + (idx) * SPHW_CSR_API_CMD_STRIDE) - -#define SPHW_CSR_API_CMD_CHAIN_CTRL_ADDR(idx) \ - (SPHW_CSR_API_CMD_BASE + 0x14 + (idx) * SPHW_CSR_API_CMD_STRIDE) - -#define SPHW_CSR_API_CMD_CHAIN_PI_ADDR(idx) \ - (SPHW_CSR_API_CMD_BASE + 0x1C + (idx) * SPHW_CSR_API_CMD_STRIDE) - -#define SPHW_CSR_API_CMD_CHAIN_REQ_ADDR(idx) \ - (SPHW_CSR_API_CMD_BASE + 0x20 + (idx) * SPHW_CSR_API_CMD_STRIDE) - -#define SPHW_CSR_API_CMD_STATUS_0_ADDR(idx) \ - (SPHW_CSR_API_CMD_BASE + 0x30 + (idx) * SPHW_CSR_API_CMD_STRIDE) - -#endif diff --git a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_eqs.c b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_eqs.c deleted file mode 100644 index 24c55d656f9c45b3e5dfeda81b1d89a1ac60a06f..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_eqs.c +++ /dev/null @@ -1,1272 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "sphw_crm.h" -#include "sphw_hw.h" -#include "sphw_common.h" -#include "sphw_hwdev.h" -#include "sphw_hwif.h" -#include "sphw_hw.h" -#include "sphw_csr.h" -#include "sphw_hw_comm.h" -#include "sphw_prof_adap.h" -#include "sphw_eqs.h" - -#define SPHW_EQS_WQ_NAME "sphw_eqs" - -#define AEQ_CTRL_0_INTR_IDX_SHIFT 0 -#define AEQ_CTRL_0_DMA_ATTR_SHIFT 12 -#define AEQ_CTRL_0_PCI_INTF_IDX_SHIFT 20 -#define AEQ_CTRL_0_INTR_MODE_SHIFT 31 - -#define AEQ_CTRL_0_INTR_IDX_MASK 0x3FFU -#define AEQ_CTRL_0_DMA_ATTR_MASK 0x3FU -#define AEQ_CTRL_0_PCI_INTF_IDX_MASK 0x7U -#define AEQ_CTRL_0_INTR_MODE_MASK 0x1U - -#define AEQ_CTRL_0_SET(val, member) \ - (((val) & AEQ_CTRL_0_##member##_MASK) << \ - AEQ_CTRL_0_##member##_SHIFT) - -#define AEQ_CTRL_0_CLEAR(val, member) \ - ((val) & (~(AEQ_CTRL_0_##member##_MASK << \ - AEQ_CTRL_0_##member##_SHIFT))) - -#define AEQ_CTRL_1_LEN_SHIFT 0 -#define AEQ_CTRL_1_ELEM_SIZE_SHIFT 24 -#define AEQ_CTRL_1_PAGE_SIZE_SHIFT 28 - -#define AEQ_CTRL_1_LEN_MASK 0x1FFFFFU -#define AEQ_CTRL_1_ELEM_SIZE_MASK 0x3U -#define AEQ_CTRL_1_PAGE_SIZE_MASK 0xFU - -#define AEQ_CTRL_1_SET(val, member) \ - (((val) & AEQ_CTRL_1_##member##_MASK) << \ - AEQ_CTRL_1_##member##_SHIFT) - -#define AEQ_CTRL_1_CLEAR(val, member) \ - ((val) & (~(AEQ_CTRL_1_##member##_MASK << \ - AEQ_CTRL_1_##member##_SHIFT))) - -#define SPHW_EQ_PROD_IDX_MASK 0xFFFFF -#define SPHW_TASK_PROCESS_EQE_LIMIT 1024 -#define SPHW_EQ_UPDATE_CI_STEP 64 - -static uint g_aeq_len = SPHW_DEFAULT_AEQ_LEN; -module_param(g_aeq_len, uint, 0444); -MODULE_PARM_DESC(g_aeq_len, - "aeq depth, valid range is " __stringify(SPHW_MIN_AEQ_LEN) - " - " __stringify(SPHW_MAX_AEQ_LEN)); - -static uint g_ceq_len = SPHW_DEFAULT_CEQ_LEN; -module_param(g_ceq_len, uint, 0444); -MODULE_PARM_DESC(g_ceq_len, - "ceq depth, valid range is " __stringify(SPHW_MIN_CEQ_LEN) - " - " __stringify(SPHW_MAX_CEQ_LEN)); - -static uint g_num_ceqe_in_tasklet = SPHW_TASK_PROCESS_EQE_LIMIT; -module_param(g_num_ceqe_in_tasklet, uint, 0444); -MODULE_PARM_DESC(g_num_ceqe_in_tasklet, - "The max number of ceqe can be processed in tasklet, default = 1024"); - -#define CEQ_CTRL_0_INTR_IDX_SHIFT 0 -#define CEQ_CTRL_0_DMA_ATTR_SHIFT 12 -#define CEQ_CTRL_0_LIMIT_KICK_SHIFT 20 -#define CEQ_CTRL_0_PCI_INTF_IDX_SHIFT 24 -#define CEQ_CTRL_0_PAGE_SIZE_SHIFT 27 -#define CEQ_CTRL_0_INTR_MODE_SHIFT 31 - -#define CEQ_CTRL_0_INTR_IDX_MASK 0x3FFU -#define CEQ_CTRL_0_DMA_ATTR_MASK 0x3FU -#define CEQ_CTRL_0_LIMIT_KICK_MASK 0xFU -#define CEQ_CTRL_0_PCI_INTF_IDX_MASK 0x3U -#define CEQ_CTRL_0_PAGE_SIZE_MASK 0xF -#define CEQ_CTRL_0_INTR_MODE_MASK 0x1U - -#define CEQ_CTRL_0_SET(val, member) \ - (((val) & CEQ_CTRL_0_##member##_MASK) << \ - CEQ_CTRL_0_##member##_SHIFT) - -#define CEQ_CTRL_1_LEN_SHIFT 0 -#define CEQ_CTRL_1_GLB_FUNC_ID_SHIFT 20 - -#define CEQ_CTRL_1_LEN_MASK 0xFFFFFU -#define CEQ_CTRL_1_GLB_FUNC_ID_MASK 0xFFFU - -#define CEQ_CTRL_1_SET(val, member) \ - (((val) & CEQ_CTRL_1_##member##_MASK) << \ - CEQ_CTRL_1_##member##_SHIFT) - -#define EQ_ELEM_DESC_TYPE_SHIFT 0 -#define EQ_ELEM_DESC_SRC_SHIFT 7 -#define EQ_ELEM_DESC_SIZE_SHIFT 8 -#define EQ_ELEM_DESC_WRAPPED_SHIFT 31 - -#define EQ_ELEM_DESC_TYPE_MASK 0x7FU -#define EQ_ELEM_DESC_SRC_MASK 0x1U -#define EQ_ELEM_DESC_SIZE_MASK 0xFFU -#define EQ_ELEM_DESC_WRAPPED_MASK 0x1U - -#define EQ_ELEM_DESC_GET(val, member) \ - (((val) >> EQ_ELEM_DESC_##member##_SHIFT) & \ - EQ_ELEM_DESC_##member##_MASK) - -#define EQ_CONS_IDX_CONS_IDX_SHIFT 0 -#define EQ_CONS_IDX_INT_ARMED_SHIFT 31 - -#define EQ_CONS_IDX_CONS_IDX_MASK 0x1FFFFFU -#define EQ_CONS_IDX_INT_ARMED_MASK 0x1U - -#define EQ_CONS_IDX_SET(val, member) \ - (((val) & EQ_CONS_IDX_##member##_MASK) << \ - EQ_CONS_IDX_##member##_SHIFT) - -#define EQ_CONS_IDX_CLEAR(val, member) \ - ((val) & (~(EQ_CONS_IDX_##member##_MASK << \ - EQ_CONS_IDX_##member##_SHIFT))) - -#define EQ_CI_SIMPLE_INDIR_CI_SHIFT 0 -#define EQ_CI_SIMPLE_INDIR_ARMED_SHIFT 21 -#define EQ_CI_SIMPLE_INDIR_AEQ_IDX_SHIFT 30 -#define EQ_CI_SIMPLE_INDIR_CEQ_IDX_SHIFT 24 - -#define EQ_CI_SIMPLE_INDIR_CI_MASK 0x1FFFFFU -#define EQ_CI_SIMPLE_INDIR_ARMED_MASK 0x1U -#define EQ_CI_SIMPLE_INDIR_AEQ_IDX_MASK 0x3U -#define EQ_CI_SIMPLE_INDIR_CEQ_IDX_MASK 0xFFU - -#define EQ_CI_SIMPLE_INDIR_SET(val, member) \ - (((val) & EQ_CI_SIMPLE_INDIR_##member##_MASK) << \ - EQ_CI_SIMPLE_INDIR_##member##_SHIFT) - -#define EQ_CI_SIMPLE_INDIR_CLEAR(val, member) \ - ((val) & (~(EQ_CI_SIMPLE_INDIR_##member##_MASK << \ - EQ_CI_SIMPLE_INDIR_##member##_SHIFT))) - -#define EQ_WRAPPED(eq) ((u32)(eq)->wrapped << EQ_VALID_SHIFT) - -#define EQ_CONS_IDX(eq) ((eq)->cons_idx | \ - ((u32)(eq)->wrapped << EQ_WRAPPED_SHIFT)) - -#define EQ_CONS_IDX_REG_ADDR(eq) \ - (((eq)->type == SPHW_AEQ) ? \ - SPHW_CSR_AEQ_CONS_IDX_ADDR : \ - SPHW_CSR_CEQ_CONS_IDX_ADDR) -#define EQ_CI_SIMPLE_INDIR_REG_ADDR(eq) \ - (((eq)->type == SPHW_AEQ) ? \ - SPHW_CSR_AEQ_CI_SIMPLE_INDIR_ADDR : \ - SPHW_CSR_CEQ_CI_SIMPLE_INDIR_ADDR) - -#define EQ_PROD_IDX_REG_ADDR(eq) \ - (((eq)->type == SPHW_AEQ) ? \ - SPHW_CSR_AEQ_PROD_IDX_ADDR : \ - SPHW_CSR_CEQ_PROD_IDX_ADDR) - -#define SPHW_EQ_HI_PHYS_ADDR_REG(type, pg_num) \ - ((u32)((type == SPHW_AEQ) ? \ - SPHW_AEQ_HI_PHYS_ADDR_REG(pg_num) : \ - SPHW_CEQ_HI_PHYS_ADDR_REG(pg_num))) - -#define SPHW_EQ_LO_PHYS_ADDR_REG(type, pg_num) \ - ((u32)((type == SPHW_AEQ) ? \ - SPHW_AEQ_LO_PHYS_ADDR_REG(pg_num) : \ - SPHW_CEQ_LO_PHYS_ADDR_REG(pg_num))) - -#define GET_EQ_NUM_PAGES(eq, size) \ - ((u16)(ALIGN((u32)((eq)->eq_len * (eq)->elem_size), \ - (size)) / (size))) - -#define SPHW_EQ_MAX_PAGES(eq) \ - ((eq)->type == SPHW_AEQ ? SPHW_AEQ_MAX_PAGES : \ - SPHW_CEQ_MAX_PAGES) - -#define GET_EQ_NUM_ELEMS(eq, pg_size) ((pg_size) / (u32)(eq)->elem_size) - -#define GET_EQ_ELEMENT(eq, idx) \ - (((u8 *)(eq)->eq_pages[(idx) / (eq)->num_elem_in_pg].align_vaddr) + \ - (u32)(((idx) & ((eq)->num_elem_in_pg - 1)) * (eq)->elem_size)) - -#define GET_AEQ_ELEM(eq, idx) \ - ((struct sphw_aeq_elem *)GET_EQ_ELEMENT((eq), (idx))) - -#define GET_CEQ_ELEM(eq, idx) ((u32 *)GET_EQ_ELEMENT((eq), (idx))) - -#define GET_CURR_AEQ_ELEM(eq) GET_AEQ_ELEM((eq), (eq)->cons_idx) - -#define GET_CURR_CEQ_ELEM(eq) GET_CEQ_ELEM((eq), (eq)->cons_idx) - -#define PAGE_IN_4K(page_size) ((page_size) >> 12) -#define EQ_SET_HW_PAGE_SIZE_VAL(eq) \ - ((u32)ilog2(PAGE_IN_4K((eq)->page_size))) - -#define ELEMENT_SIZE_IN_32B(eq) (((eq)->elem_size) >> 5) -#define EQ_SET_HW_ELEM_SIZE_VAL(eq) ((u32)ilog2(ELEMENT_SIZE_IN_32B(eq))) - -#define AEQ_DMA_ATTR_DEFAULT 0 -#define CEQ_DMA_ATTR_DEFAULT 0 - -#define CEQ_LMT_KICK_DEFAULT 0 - -#define EQ_MSIX_RESEND_TIMER_CLEAR 1 - -#define EQ_WRAPPED_SHIFT 20 - -#define EQ_VALID_SHIFT 31 - -#define CEQE_TYPE_SHIFT 23 -#define CEQE_TYPE_MASK 0x7 - -#define CEQE_TYPE(type) (((type) >> CEQE_TYPE_SHIFT) & \ - CEQE_TYPE_MASK) - -#define CEQE_DATA_MASK 0x3FFFFFF -#define CEQE_DATA(data) ((data) & CEQE_DATA_MASK) - -#define aeq_to_aeqs(eq) \ - container_of((eq) - (eq)->q_id, struct sphw_aeqs, aeq[0]) - -#define ceq_to_ceqs(eq) \ - container_of((eq) - (eq)->q_id, struct sphw_ceqs, ceq[0]) - -static irqreturn_t ceq_interrupt(int irq, void *data); -static irqreturn_t aeq_interrupt(int irq, void *data); - -static void ceq_tasklet(ulong eq_tasklet); - -/** - * sphw_aeq_register_hw_cb - register aeq callback for specific event - * @hwdev: the pointer to hw device - * @event: event for the handler - * @hw_cb: callback function - **/ -int sphw_aeq_register_hw_cb(void *hwdev, enum sphw_aeq_type event, sphw_aeq_hwe_cb hwe_cb) -{ - struct sphw_aeqs *aeqs = NULL; - - if (!hwdev || !hwe_cb || event >= SPHW_MAX_AEQ_EVENTS) - return -EINVAL; - - aeqs = ((struct sphw_hwdev *)hwdev)->aeqs; - - aeqs->aeq_hwe_cb[event] = hwe_cb; - - set_bit(SPHW_AEQ_HW_CB_REG, &aeqs->aeq_hw_cb_state[event]); - - return 0; -} - -/** - * sphw_aeq_unregister_hw_cb - unregister the aeq callback for specific event - * @hwdev: the pointer to hw device - * @event: event for the handler - **/ -void sphw_aeq_unregister_hw_cb(void *hwdev, enum sphw_aeq_type event) -{ - struct sphw_aeqs *aeqs = NULL; - - if (!hwdev || event >= SPHW_MAX_AEQ_EVENTS) - return; - - aeqs = ((struct sphw_hwdev *)hwdev)->aeqs; - - clear_bit(SPHW_AEQ_HW_CB_REG, &aeqs->aeq_hw_cb_state[event]); - - while (test_bit(SPHW_AEQ_HW_CB_RUNNING, - &aeqs->aeq_hw_cb_state[event])) - usleep_range(EQ_USLEEP_LOW_BOUND, EQ_USLEEP_HIG_BOUND); - - aeqs->aeq_hwe_cb[event] = NULL; -} - -/** - * sphw_aeq_register_swe_cb - register aeq callback for sw event - * @hwdev: the pointer to hw device - * @event: soft event for the handler - * @sw_cb: callback function - **/ -int sphw_aeq_register_swe_cb(void *hwdev, enum sphw_aeq_sw_type event, sphw_aeq_swe_cb aeq_swe_cb) -{ - struct sphw_aeqs *aeqs = NULL; - - if (!hwdev || !aeq_swe_cb || event >= SPHW_MAX_AEQ_SW_EVENTS) - return -EINVAL; - - aeqs = ((struct sphw_hwdev *)hwdev)->aeqs; - - aeqs->aeq_swe_cb[event] = aeq_swe_cb; - - set_bit(SPHW_AEQ_SW_CB_REG, &aeqs->aeq_sw_cb_state[event]); - - return 0; -} - -/** - * sphw_aeq_unregister_swe_cb - unregister the aeq callback for sw event - * @hwdev: the pointer to hw device - * @event: soft event for the handler - **/ -void sphw_aeq_unregister_swe_cb(void *hwdev, enum sphw_aeq_sw_type event) -{ - struct sphw_aeqs *aeqs = NULL; - - if (!hwdev || event >= SPHW_MAX_AEQ_SW_EVENTS) - return; - - aeqs = ((struct sphw_hwdev *)hwdev)->aeqs; - - clear_bit(SPHW_AEQ_SW_CB_REG, &aeqs->aeq_sw_cb_state[event]); - - while (test_bit(SPHW_AEQ_SW_CB_RUNNING, - &aeqs->aeq_sw_cb_state[event])) - usleep_range(EQ_USLEEP_LOW_BOUND, EQ_USLEEP_HIG_BOUND); - - aeqs->aeq_swe_cb[event] = NULL; -} - -/** - * sphw_ceq_register_cb - register ceq callback for specific event - * @hwdev: the pointer to hw device - * @event: event for the handler - * @ceq_cb: callback function - **/ -int sphw_ceq_register_cb(void *hwdev, enum sphw_ceq_event event, sphw_ceq_event_cb callback) -{ - struct sphw_ceqs *ceqs = NULL; - - if (!hwdev || event >= SPHW_MAX_CEQ_EVENTS) - return -EINVAL; - - ceqs = ((struct sphw_hwdev *)hwdev)->ceqs; - - ceqs->ceq_cb[event] = callback; - - set_bit(SPHW_CEQ_CB_REG, &ceqs->ceq_cb_state[event]); - - return 0; -} - -/** - * sphw_ceq_unregister_cb - unregister ceq callback for specific event - * @hwdev: the pointer to hw device - * @event: event for the handler - **/ -void sphw_ceq_unregister_cb(void *hwdev, enum sphw_ceq_event event) -{ - struct sphw_ceqs *ceqs = NULL; - - if (!hwdev || event >= SPHW_MAX_CEQ_EVENTS) - return; - - ceqs = ((struct sphw_hwdev *)hwdev)->ceqs; - - clear_bit(SPHW_CEQ_CB_REG, &ceqs->ceq_cb_state[event]); - - while (test_bit(SPHW_CEQ_CB_RUNNING, &ceqs->ceq_cb_state[event])) - usleep_range(EQ_USLEEP_LOW_BOUND, EQ_USLEEP_HIG_BOUND); - - ceqs->ceq_cb[event] = NULL; -} - -/** - * set_eq_cons_idx - write the cons idx to the hw - * @eq: The event queue to update the cons idx for - * @cons idx: consumer index value - **/ -static void set_eq_cons_idx(struct sphw_eq *eq, u32 arm_state) -{ - u32 eq_wrap_ci, val; - u32 addr = EQ_CI_SIMPLE_INDIR_REG_ADDR(eq); - - eq_wrap_ci = EQ_CONS_IDX(eq); - val = EQ_CI_SIMPLE_INDIR_SET(arm_state, ARMED); - if (eq->type == SPHW_AEQ) { - val = val | - EQ_CI_SIMPLE_INDIR_SET(eq_wrap_ci, CI) | - EQ_CI_SIMPLE_INDIR_SET(eq->q_id, AEQ_IDX); - } else { - val = val | - EQ_CI_SIMPLE_INDIR_SET(eq_wrap_ci, CI) | - EQ_CI_SIMPLE_INDIR_SET(eq->q_id, CEQ_IDX); - } - - sphw_hwif_write_reg(eq->hwdev->hwif, addr, val); -} - -/** - * ceq_event_handler - handle for the ceq events - * @ceqs: ceqs part of the chip - * @ceqe: ceq element of the event - **/ -static void ceq_event_handler(struct sphw_ceqs *ceqs, u32 ceqe) -{ - struct sphw_hwdev *hwdev = ceqs->hwdev; - enum sphw_ceq_event event = CEQE_TYPE(ceqe); - u32 ceqe_data = CEQE_DATA(ceqe); - - if (event >= SPHW_MAX_CEQ_EVENTS) { - sdk_err(hwdev->dev_hdl, "Ceq unknown event:%d, ceqe date: 0x%x\n", - event, ceqe_data); - return; - } - - set_bit(SPHW_CEQ_CB_RUNNING, &ceqs->ceq_cb_state[event]); - - if (ceqs->ceq_cb[event] && - test_bit(SPHW_CEQ_CB_REG, &ceqs->ceq_cb_state[event])) - ceqs->ceq_cb[event](hwdev, ceqe_data); - - clear_bit(SPHW_CEQ_CB_RUNNING, &ceqs->ceq_cb_state[event]); -} - -static void aeq_elem_handler(struct sphw_eq *eq, u32 aeqe_desc) -{ - struct sphw_aeqs *aeqs = aeq_to_aeqs(eq); - struct sphw_aeq_elem *aeqe_pos; - enum sphw_aeq_type event; - enum sphw_aeq_sw_type sw_type; - u32 sw_event; - u8 data[SPHW_AEQE_DATA_SIZE], size; - - aeqe_pos = GET_CURR_AEQ_ELEM(eq); - - event = EQ_ELEM_DESC_GET(aeqe_desc, TYPE); - if (EQ_ELEM_DESC_GET(aeqe_desc, SRC)) { - sw_event = event; - sw_type = sw_event >= SPHW_NIC_FATAL_ERROR_MAX ? - SPHW_STATEFULL_EVENT : - SPHW_STATELESS_EVENT; - /* SW event uses only the first 8B */ - memcpy(data, aeqe_pos->aeqe_data, SPHW_AEQE_DATA_SIZE); - sphw_be32_to_cpu(data, SPHW_AEQE_DATA_SIZE); - set_bit(SPHW_AEQ_SW_CB_RUNNING, - &aeqs->aeq_sw_cb_state[sw_type]); - if (aeqs->aeq_swe_cb[sw_type] && - test_bit(SPHW_AEQ_SW_CB_REG, - &aeqs->aeq_sw_cb_state[sw_type])) - aeqs->aeq_swe_cb[sw_type](aeqs->hwdev, event, data); - - clear_bit(SPHW_AEQ_SW_CB_RUNNING, - &aeqs->aeq_sw_cb_state[sw_type]); - return; - } - - if (event < SPHW_MAX_AEQ_EVENTS) { - memcpy(data, aeqe_pos->aeqe_data, SPHW_AEQE_DATA_SIZE); - sphw_be32_to_cpu(data, SPHW_AEQE_DATA_SIZE); - - size = EQ_ELEM_DESC_GET(aeqe_desc, SIZE); - set_bit(SPHW_AEQ_HW_CB_RUNNING, - &aeqs->aeq_hw_cb_state[event]); - if (aeqs->aeq_hwe_cb[event] && - test_bit(SPHW_AEQ_HW_CB_REG, - &aeqs->aeq_hw_cb_state[event])) - aeqs->aeq_hwe_cb[event](aeqs->hwdev, data, size); - clear_bit(SPHW_AEQ_HW_CB_RUNNING, - &aeqs->aeq_hw_cb_state[event]); - return; - } - sdk_warn(eq->hwdev->dev_hdl, "Unknown aeq hw event %d\n", event); -} - -/** - * aeq_irq_handler - handler for the aeq event - * @eq: the async event queue of the event - **/ -static bool aeq_irq_handler(struct sphw_eq *eq) -{ - struct sphw_aeq_elem *aeqe_pos = NULL; - u32 aeqe_desc; - u32 i, eqe_cnt = 0; - - for (i = 0; i < SPHW_TASK_PROCESS_EQE_LIMIT; i++) { - aeqe_pos = GET_CURR_AEQ_ELEM(eq); - - /* Data in HW is in Big endian Format */ - aeqe_desc = be32_to_cpu(aeqe_pos->desc); - - /* HW updates wrapped bit, when it adds eq element event */ - if (EQ_ELEM_DESC_GET(aeqe_desc, WRAPPED) == eq->wrapped) - return false; - - aeq_elem_handler(eq, aeqe_desc); - - eq->cons_idx++; - - if (eq->cons_idx == eq->eq_len) { - eq->cons_idx = 0; - eq->wrapped = !eq->wrapped; - } - - if (++eqe_cnt >= SPHW_EQ_UPDATE_CI_STEP) { - eqe_cnt = 0; - set_eq_cons_idx(eq, SPHW_EQ_NOT_ARMED); - } - } - - return true; -} - -/** - * ceq_irq_handler - handler for the ceq event - * @eq: the completion event queue of the event - **/ -static bool ceq_irq_handler(struct sphw_eq *eq) -{ - struct sphw_ceqs *ceqs = ceq_to_ceqs(eq); - u32 ceqe, eqe_cnt = 0; - u32 i; - - for (i = 0; i < g_num_ceqe_in_tasklet; i++) { - ceqe = *(GET_CURR_CEQ_ELEM(eq)); - ceqe = be32_to_cpu(ceqe); - - /* HW updates wrapped bit, when it adds eq element event */ - if (EQ_ELEM_DESC_GET(ceqe, WRAPPED) == eq->wrapped) - return false; - - ceq_event_handler(ceqs, ceqe); - - eq->cons_idx++; - - if (eq->cons_idx == eq->eq_len) { - eq->cons_idx = 0; - eq->wrapped = !eq->wrapped; - } - - if (++eqe_cnt >= SPHW_EQ_UPDATE_CI_STEP) { - eqe_cnt = 0; - set_eq_cons_idx(eq, SPHW_EQ_NOT_ARMED); - } - } - - return true; -} - -static void reschedule_eq_handler(struct sphw_eq *eq) -{ - if (eq->type == SPHW_AEQ) { - struct sphw_aeqs *aeqs = aeq_to_aeqs(eq); - struct workqueue_struct *workq = aeqs->workq; - struct sphw_eq_work *aeq_work = &eq->aeq_work; - - queue_work_on(sphw_get_work_cpu_affinity(eq->hwdev, WORK_TYPE_AEQ), - workq, &aeq_work->work); - } else { - tasklet_schedule(&eq->ceq_tasklet); - } -} - -/** - * eq_irq_handler - handler for the eq event - * @data: the event queue of the event - **/ -static bool eq_irq_handler(void *data) -{ - struct sphw_eq *eq = (struct sphw_eq *)data; - bool uncompleted = false; - - if (eq->type == SPHW_AEQ) - uncompleted = aeq_irq_handler(eq); - else - uncompleted = ceq_irq_handler(eq); - - set_eq_cons_idx(eq, uncompleted ? SPHW_EQ_NOT_ARMED : - SPHW_EQ_ARMED); - - return uncompleted; -} - -/** - * eq_irq_work - eq work for the event - * @work: the work that is associated with the eq - **/ -static void eq_irq_work(struct work_struct *work) -{ - struct sphw_eq_work *aeq_work = - container_of(work, struct sphw_eq_work, work); - - if (eq_irq_handler(aeq_work->data)) - reschedule_eq_handler(aeq_work->data); -} - -/** - * aeq_interrupt - aeq interrupt handler - * @irq: irq number - * @data: the async event queue of the event - **/ -static irqreturn_t aeq_interrupt(int irq, void *data) -{ - struct sphw_eq *aeq = (struct sphw_eq *)data; - struct sphw_hwdev *hwdev = aeq->hwdev; - struct sphw_aeqs *aeqs = aeq_to_aeqs(aeq); - struct workqueue_struct *workq = aeqs->workq; - struct sphw_eq_work *aeq_work = NULL; - - /* clear resend timer cnt register */ - sphw_misx_intr_clear_resend_bit(hwdev, aeq->eq_irq.msix_entry_idx, - EQ_MSIX_RESEND_TIMER_CLEAR); - - aeq_work = &aeq->aeq_work; - aeq_work->data = aeq; - - queue_work_on(sphw_get_work_cpu_affinity(hwdev, WORK_TYPE_AEQ), - workq, &aeq_work->work); - - return IRQ_HANDLED; -} - -/** - * ceq_tasklet - ceq tasklet for the event - * @ceq_data: data that will be used by the tasklet(ceq) - **/ -static void ceq_tasklet(ulong ceq_data) -{ - struct sphw_ceq_tasklet_data *ceq_tasklet_data = - (struct sphw_ceq_tasklet_data *)ceq_data; - struct sphw_eq *eq = (struct sphw_eq *)ceq_tasklet_data->data; - - eq->soft_intr_jif = jiffies; - - if (eq_irq_handler(ceq_tasklet_data->data)) - reschedule_eq_handler(ceq_tasklet_data->data); -} - -/** - * ceq_interrupt - ceq interrupt handler - * @irq: irq number - * @data: the completion event queue of the event - **/ -static irqreturn_t ceq_interrupt(int irq, void *data) -{ - struct sphw_eq *ceq = (struct sphw_eq *)data; - struct sphw_ceq_tasklet_data *ceq_tasklet_data = NULL; - - ceq->hard_intr_jif = jiffies; - - /* clear resend timer counters */ - sphw_misx_intr_clear_resend_bit(ceq->hwdev, ceq->eq_irq.msix_entry_idx, - EQ_MSIX_RESEND_TIMER_CLEAR); - - ceq_tasklet_data = &ceq->ceq_tasklet_data; - ceq_tasklet_data->data = data; - tasklet_schedule(&ceq->ceq_tasklet); - - return IRQ_HANDLED; -} - -/** - * set_eq_ctrls - setting eq's ctrls registers - * @eq: the event queue for setting - **/ -static int set_eq_ctrls(struct sphw_eq *eq) -{ - enum sphw_eq_type type = eq->type; - struct sphw_hwif *hwif = eq->hwdev->hwif; - struct irq_info *eq_irq = &eq->eq_irq; - u32 addr, val, ctrl0, ctrl1, page_size_val, elem_size; - u32 pci_intf_idx = SPHW_PCI_INTF_IDX(hwif); - int err; - - if (type == SPHW_AEQ) { - /* set ctrl0 */ - addr = SPHW_CSR_AEQ_CTRL_0_ADDR; - - val = sphw_hwif_read_reg(hwif, addr); - - val = AEQ_CTRL_0_CLEAR(val, INTR_IDX) & - AEQ_CTRL_0_CLEAR(val, DMA_ATTR) & - AEQ_CTRL_0_CLEAR(val, PCI_INTF_IDX) & - AEQ_CTRL_0_CLEAR(val, INTR_MODE); - - ctrl0 = AEQ_CTRL_0_SET(eq_irq->msix_entry_idx, INTR_IDX) | - AEQ_CTRL_0_SET(AEQ_DMA_ATTR_DEFAULT, DMA_ATTR) | - AEQ_CTRL_0_SET(pci_intf_idx, PCI_INTF_IDX) | - AEQ_CTRL_0_SET(SPHW_INTR_MODE_ARMED, INTR_MODE); - - val |= ctrl0; - - sphw_hwif_write_reg(hwif, addr, val); - - /* set ctrl1 */ - addr = SPHW_CSR_AEQ_CTRL_1_ADDR; - - page_size_val = EQ_SET_HW_PAGE_SIZE_VAL(eq); - elem_size = EQ_SET_HW_ELEM_SIZE_VAL(eq); - - ctrl1 = AEQ_CTRL_1_SET(eq->eq_len, LEN) | - AEQ_CTRL_1_SET(elem_size, ELEM_SIZE) | - AEQ_CTRL_1_SET(page_size_val, PAGE_SIZE); - - sphw_hwif_write_reg(hwif, addr, ctrl1); - - } else { - page_size_val = EQ_SET_HW_PAGE_SIZE_VAL(eq); - ctrl0 = CEQ_CTRL_0_SET(eq_irq->msix_entry_idx, INTR_IDX) | - CEQ_CTRL_0_SET(CEQ_DMA_ATTR_DEFAULT, DMA_ATTR) | - CEQ_CTRL_0_SET(CEQ_LMT_KICK_DEFAULT, LIMIT_KICK) | - CEQ_CTRL_0_SET(pci_intf_idx, PCI_INTF_IDX) | - CEQ_CTRL_0_SET(page_size_val, PAGE_SIZE) | - CEQ_CTRL_0_SET(SPHW_INTR_MODE_ARMED, INTR_MODE); - - ctrl1 = CEQ_CTRL_1_SET(eq->eq_len, LEN); - - /* set ceq ctrl reg through mgmt cpu */ - err = sphw_set_ceq_ctrl_reg(eq->hwdev, eq->q_id, ctrl0, ctrl1); - if (err) - return err; - } - - return 0; -} - -/** - * ceq_elements_init - Initialize all the elements in the ceq - * @eq: the event queue - * @init_val: value to init with it the elements - **/ -static void ceq_elements_init(struct sphw_eq *eq, u32 init_val) -{ - u32 *ceqe = NULL; - u32 i; - - for (i = 0; i < eq->eq_len; i++) { - ceqe = GET_CEQ_ELEM(eq, i); - *(ceqe) = cpu_to_be32(init_val); - } - - wmb(); /* Write the init values */ -} - -/** - * aeq_elements_init - initialize all the elements in the aeq - * @eq: the event queue - * @init_val: value to init with it the elements - **/ -static void aeq_elements_init(struct sphw_eq *eq, u32 init_val) -{ - struct sphw_aeq_elem *aeqe = NULL; - u32 i; - - for (i = 0; i < eq->eq_len; i++) { - aeqe = GET_AEQ_ELEM(eq, i); - aeqe->desc = cpu_to_be32(init_val); - } - - wmb(); /* Write the init values */ -} - -static void eq_elements_init(struct sphw_eq *eq, u32 init_val) -{ - if (eq->type == SPHW_AEQ) - aeq_elements_init(eq, init_val); - else - ceq_elements_init(eq, init_val); -} - -/** - * alloc_eq_pages - allocate the pages for the queue - * @eq: the event queue - **/ -static int alloc_eq_pages(struct sphw_eq *eq) -{ - struct sphw_hwif *hwif = eq->hwdev->hwif; - struct sphw_dma_addr_align *eq_page = NULL; - u32 reg, init_val; - u16 pg_idx, i; - int err; - - eq->eq_pages = kcalloc(eq->num_pages, sizeof(*eq->eq_pages), - GFP_KERNEL); - if (!eq->eq_pages) { - sdk_err(eq->hwdev->dev_hdl, "Failed to alloc eq pages description\n"); - return -ENOMEM; - } - - for (pg_idx = 0; pg_idx < eq->num_pages; pg_idx++) { - eq_page = &eq->eq_pages[pg_idx]; - err = sphw_dma_alloc_coherent_align(eq->hwdev->dev_hdl, eq->page_size, - SPHW_MIN_EQ_PAGE_SIZE, GFP_KERNEL, eq_page); - if (err) { - sdk_err(eq->hwdev->dev_hdl, "Failed to alloc eq page, page index: %hu\n", - pg_idx); - goto dma_alloc_err; - } - - reg = SPHW_EQ_HI_PHYS_ADDR_REG(eq->type, pg_idx); - sphw_hwif_write_reg(hwif, reg, upper_32_bits(eq_page->align_paddr)); - - reg = SPHW_EQ_LO_PHYS_ADDR_REG(eq->type, pg_idx); - sphw_hwif_write_reg(hwif, reg, lower_32_bits(eq_page->align_paddr)); - } - - eq->num_elem_in_pg = GET_EQ_NUM_ELEMS(eq, eq->page_size); - if (eq->num_elem_in_pg & (eq->num_elem_in_pg - 1)) { - sdk_err(eq->hwdev->dev_hdl, "Number element in eq page != power of 2\n"); - err = -EINVAL; - goto dma_alloc_err; - } - init_val = EQ_WRAPPED(eq); - - eq_elements_init(eq, init_val); - - return 0; - -dma_alloc_err: - for (i = 0; i < pg_idx; i++) - sphw_dma_free_coherent_align(eq->hwdev->dev_hdl, &eq->eq_pages[i]); - - kfree(eq->eq_pages); - - return err; -} - -/** - * free_eq_pages - free the pages of the queue - * @eq: the event queue - **/ -static void free_eq_pages(struct sphw_eq *eq) -{ - u16 pg_idx; - - for (pg_idx = 0; pg_idx < eq->num_pages; pg_idx++) - sphw_dma_free_coherent_align(eq->hwdev->dev_hdl, &eq->eq_pages[pg_idx]); - - kfree(eq->eq_pages); -} - -static inline u32 get_page_size(struct sphw_eq *eq) -{ - u32 total_size; - u32 count; - - total_size = ALIGN((eq->eq_len * eq->elem_size), - SPHW_MIN_EQ_PAGE_SIZE); - - if (total_size <= (SPHW_EQ_MAX_PAGES(eq) * SPHW_MIN_EQ_PAGE_SIZE)) - return SPHW_MIN_EQ_PAGE_SIZE; - - count = (u32)(ALIGN((total_size / SPHW_EQ_MAX_PAGES(eq)), - SPHW_MIN_EQ_PAGE_SIZE) / SPHW_MIN_EQ_PAGE_SIZE); - - /* round up to nearest power of two */ - count = 1U << (u8)fls((int)(count - 1)); - - return ((u32)SPHW_MIN_EQ_PAGE_SIZE) * count; -} - -static int request_eq_irq(struct sphw_eq *eq, struct irq_info *entry) -{ - int err = 0; - - if (eq->type == SPHW_AEQ) { - struct sphw_eq_work *aeq_work = &eq->aeq_work; - - INIT_WORK(&aeq_work->work, eq_irq_work); - } else { - tasklet_init(&eq->ceq_tasklet, ceq_tasklet, - (ulong)(&eq->ceq_tasklet_data)); - } - - if (eq->type == SPHW_AEQ) { - snprintf(eq->irq_name, sizeof(eq->irq_name), - "sphw_aeq%u@pci:%s", eq->q_id, - pci_name(eq->hwdev->pcidev_hdl)); - - err = request_irq(entry->irq_id, aeq_interrupt, 0UL, - eq->irq_name, eq); - } else { - snprintf(eq->irq_name, sizeof(eq->irq_name), - "sphw_ceq%u@pci:%s", eq->q_id, - pci_name(eq->hwdev->pcidev_hdl)); - err = request_irq(entry->irq_id, ceq_interrupt, 0UL, - eq->irq_name, eq); - } - - return err; -} - -static void reset_eq(struct sphw_eq *eq) -{ - /* clear eq_len to force eqe drop in hardware */ - if (eq->type == SPHW_AEQ) - sphw_hwif_write_reg(eq->hwdev->hwif, SPHW_CSR_AEQ_CTRL_1_ADDR, 0); - else - sphw_set_ceq_ctrl_reg(eq->hwdev, eq->q_id, 0, 0); - - wmb(); /* clear eq_len before clear prod idx */ - - sphw_hwif_write_reg(eq->hwdev->hwif, EQ_PROD_IDX_REG_ADDR(eq), 0); -} - -/** - * init_eq - initialize eq - * @eq: the event queue - * @hwdev: the pointer to hw device - * @q_id: Queue id number - * @q_len: the number of EQ elements - * @type: the type of the event queue, ceq or aeq - * @entry: msix entry associated with the event queue - * Return: 0 - Success, Negative - failure - **/ -static int init_eq(struct sphw_eq *eq, struct sphw_hwdev *hwdev, u16 q_id, - u32 q_len, enum sphw_eq_type type, struct irq_info *entry) -{ - int err = 0; - - eq->hwdev = hwdev; - eq->q_id = q_id; - eq->type = type; - eq->eq_len = q_len; - - /* Indirect access should set q_id first */ - sphw_hwif_write_reg(hwdev->hwif, SPHW_EQ_INDIR_IDX_ADDR(eq->type), eq->q_id); - wmb(); /* write index before config */ - - reset_eq(eq); - - eq->cons_idx = 0; - eq->wrapped = 0; - - eq->elem_size = (type == SPHW_AEQ) ? SPHW_AEQE_SIZE : SPHW_CEQE_SIZE; - - eq->page_size = get_page_size(eq); - eq->orig_page_size = eq->page_size; - eq->num_pages = GET_EQ_NUM_PAGES(eq, eq->page_size); - if (eq->num_pages > SPHW_EQ_MAX_PAGES(eq)) { - sdk_err(hwdev->dev_hdl, "Number pages: %u too many pages for eq\n", - eq->num_pages); - return -EINVAL; - } - - err = alloc_eq_pages(eq); - if (err) { - sdk_err(hwdev->dev_hdl, "Failed to allocate pages for eq\n"); - return err; - } - - eq->eq_irq.msix_entry_idx = entry->msix_entry_idx; - eq->eq_irq.irq_id = entry->irq_id; - - err = set_eq_ctrls(eq); - if (err) { - sdk_err(hwdev->dev_hdl, "Failed to set ctrls for eq\n"); - goto init_eq_ctrls_err; - } - - set_eq_cons_idx(eq, SPHW_EQ_ARMED); - - err = request_eq_irq(eq, entry); - if (err) { - sdk_err(hwdev->dev_hdl, - "Failed to request irq for the eq, err: %d\n", err); - goto req_irq_err; - } - - sphw_set_msix_state(hwdev, entry->msix_entry_idx, SPHW_MSIX_DISABLE); - - return 0; - -init_eq_ctrls_err: -req_irq_err: - free_eq_pages(eq); - return err; -} - -/** - * remove_eq - remove eq - * @eq: the event queue - **/ -static void remove_eq(struct sphw_eq *eq) -{ - struct irq_info *entry = &eq->eq_irq; - - sphw_set_msix_state(eq->hwdev, entry->msix_entry_idx, SPHW_MSIX_DISABLE); - synchronize_irq(entry->irq_id); - - free_irq(entry->irq_id, eq); - - /* Indirect access should set q_id first */ - sphw_hwif_write_reg(eq->hwdev->hwif, SPHW_EQ_INDIR_IDX_ADDR(eq->type), eq->q_id); - - wmb(); /* write index before config */ - - if (eq->type == SPHW_AEQ) { - struct sphw_eq_work *aeq_work = &eq->aeq_work; - - cancel_work_sync(&aeq_work->work); - - /* clear eq_len to avoid hw access host memory */ - sphw_hwif_write_reg(eq->hwdev->hwif, SPHW_CSR_AEQ_CTRL_1_ADDR, 0); - } else { - tasklet_kill(&eq->ceq_tasklet); - - sphw_set_ceq_ctrl_reg(eq->hwdev, eq->q_id, 0, 0); - } - - /* update cons_idx to avoid invalid interrupt */ - eq->cons_idx = sphw_hwif_read_reg(eq->hwdev->hwif, EQ_PROD_IDX_REG_ADDR(eq)); - set_eq_cons_idx(eq, SPHW_EQ_NOT_ARMED); - - free_eq_pages(eq); -} - -/** - * sphw_aeqs_init - init all the aeqs - * @hwdev: the pointer to hw device - * @num_aeqs: number of AEQs - * @msix_entries: msix entries associated with the event queues - * Return: 0 - Success, Negative - failure - **/ -int sphw_aeqs_init(struct sphw_hwdev *hwdev, u16 num_aeqs, struct irq_info *msix_entries) -{ - struct sphw_aeqs *aeqs = NULL; - int err; - u16 i, q_id; - - if (!hwdev) - return -EINVAL; - - aeqs = kzalloc(sizeof(*aeqs), GFP_KERNEL); - if (!aeqs) - return -ENOMEM; - - hwdev->aeqs = aeqs; - aeqs->hwdev = hwdev; - aeqs->num_aeqs = num_aeqs; - aeqs->workq = alloc_workqueue(SPHW_EQS_WQ_NAME, WQ_MEM_RECLAIM, SPHW_MAX_AEQS); - if (!aeqs->workq) { - sdk_err(hwdev->dev_hdl, "Failed to initialize aeq workqueue\n"); - err = -ENOMEM; - goto create_work_err; - } - - if (g_aeq_len < SPHW_MIN_AEQ_LEN || g_aeq_len > SPHW_MAX_AEQ_LEN) { - sdk_warn(hwdev->dev_hdl, "Module Parameter g_aeq_len value %u out of range, resetting to %d\n", - g_aeq_len, SPHW_DEFAULT_AEQ_LEN); - g_aeq_len = SPHW_DEFAULT_AEQ_LEN; - } - - for (q_id = 0; q_id < num_aeqs; q_id++) { - err = init_eq(&aeqs->aeq[q_id], hwdev, q_id, g_aeq_len, - SPHW_AEQ, &msix_entries[q_id]); - if (err) { - sdk_err(hwdev->dev_hdl, "Failed to init aeq %u\n", - q_id); - goto init_aeq_err; - } - } - for (q_id = 0; q_id < num_aeqs; q_id++) - sphw_set_msix_state(hwdev, msix_entries[q_id].msix_entry_idx, SPHW_MSIX_ENABLE); - - return 0; - -init_aeq_err: - for (i = 0; i < q_id; i++) - remove_eq(&aeqs->aeq[i]); - - destroy_workqueue(aeqs->workq); - -create_work_err: - kfree(aeqs); - - return err; -} - -/** - * sphw_aeqs_free - free all the aeqs - * @hwdev: the pointer to hw device - **/ -void sphw_aeqs_free(struct sphw_hwdev *hwdev) -{ - struct sphw_aeqs *aeqs = hwdev->aeqs; - enum sphw_aeq_type aeq_event = SPHW_HW_INTER_INT; - enum sphw_aeq_sw_type sw_aeq_event = SPHW_STATELESS_EVENT; - u16 q_id; - - for (q_id = 0; q_id < aeqs->num_aeqs; q_id++) - remove_eq(&aeqs->aeq[q_id]); - - for (; sw_aeq_event < SPHW_MAX_AEQ_SW_EVENTS; sw_aeq_event++) - sphw_aeq_unregister_swe_cb(hwdev, sw_aeq_event); - - for (; aeq_event < SPHW_MAX_AEQ_EVENTS; aeq_event++) - sphw_aeq_unregister_hw_cb(hwdev, aeq_event); - - destroy_workqueue(aeqs->workq); - - kfree(aeqs); -} - -/** - * sphw_ceqs_init - init all the ceqs - * @hwdev: the pointer to hw device - * @num_ceqs: number of CEQs - * @msix_entries: msix entries associated with the event queues - * Return: 0 - Success, Negative - failure - **/ -int sphw_ceqs_init(struct sphw_hwdev *hwdev, u16 num_ceqs, struct irq_info *msix_entries) -{ - struct sphw_ceqs *ceqs; - int err; - u16 i, q_id; - - ceqs = kzalloc(sizeof(*ceqs), GFP_KERNEL); - if (!ceqs) - return -ENOMEM; - - hwdev->ceqs = ceqs; - - ceqs->hwdev = hwdev; - ceqs->num_ceqs = num_ceqs; - - if (g_ceq_len < SPHW_MIN_CEQ_LEN || g_ceq_len > SPHW_MAX_CEQ_LEN) { - sdk_warn(hwdev->dev_hdl, "Module Parameter g_ceq_len value %u out of range, resetting to %d\n", - g_ceq_len, SPHW_DEFAULT_CEQ_LEN); - g_ceq_len = SPHW_DEFAULT_CEQ_LEN; - } - - if (!g_num_ceqe_in_tasklet) { - sdk_warn(hwdev->dev_hdl, "Module Parameter g_num_ceqe_in_tasklet can not be zero, resetting to %d\n", - SPHW_TASK_PROCESS_EQE_LIMIT); - g_num_ceqe_in_tasklet = SPHW_TASK_PROCESS_EQE_LIMIT; - } - for (q_id = 0; q_id < num_ceqs; q_id++) { - err = init_eq(&ceqs->ceq[q_id], hwdev, q_id, g_ceq_len, - SPHW_CEQ, &msix_entries[q_id]); - if (err) { - sdk_err(hwdev->dev_hdl, "Failed to init ceq %u\n", - q_id); - goto init_ceq_err; - } - } - for (q_id = 0; q_id < num_ceqs; q_id++) - sphw_set_msix_state(hwdev, msix_entries[q_id].msix_entry_idx, SPHW_MSIX_ENABLE); - - return 0; - -init_ceq_err: - for (i = 0; i < q_id; i++) - remove_eq(&ceqs->ceq[i]); - - kfree(ceqs); - - return err; -} - -/** - * sphw_ceqs_free - free all the ceqs - * @hwdev: the pointer to hw device - **/ -void sphw_ceqs_free(struct sphw_hwdev *hwdev) -{ - struct sphw_ceqs *ceqs = hwdev->ceqs; - enum sphw_ceq_event ceq_event = SPHW_CMDQ; - u16 q_id; - - for (q_id = 0; q_id < ceqs->num_ceqs; q_id++) - remove_eq(&ceqs->ceq[q_id]); - - for (; ceq_event < SPHW_MAX_CEQ_EVENTS; ceq_event++) - sphw_ceq_unregister_cb(hwdev, ceq_event); - - kfree(ceqs); -} - -void sphw_get_ceq_irqs(struct sphw_hwdev *hwdev, struct irq_info *irqs, u16 *num_irqs) -{ - struct sphw_ceqs *ceqs = hwdev->ceqs; - u16 q_id; - - for (q_id = 0; q_id < ceqs->num_ceqs; q_id++) { - irqs[q_id].irq_id = ceqs->ceq[q_id].eq_irq.irq_id; - irqs[q_id].msix_entry_idx = - ceqs->ceq[q_id].eq_irq.msix_entry_idx; - } - - *num_irqs = ceqs->num_ceqs; -} - -void sphw_get_aeq_irqs(struct sphw_hwdev *hwdev, struct irq_info *irqs, u16 *num_irqs) -{ - struct sphw_aeqs *aeqs = hwdev->aeqs; - u16 q_id; - - for (q_id = 0; q_id < aeqs->num_aeqs; q_id++) { - irqs[q_id].irq_id = aeqs->aeq[q_id].eq_irq.irq_id; - irqs[q_id].msix_entry_idx = - aeqs->aeq[q_id].eq_irq.msix_entry_idx; - } - - *num_irqs = aeqs->num_aeqs; -} - -void sphw_dump_aeq_info(struct sphw_hwdev *hwdev) -{ - struct sphw_aeq_elem *aeqe_pos = NULL; - struct sphw_eq *eq = NULL; - u32 addr, ci, pi, ctrl0, idx; - int q_id; - - for (q_id = 0; q_id < hwdev->aeqs->num_aeqs; q_id++) { - eq = &hwdev->aeqs->aeq[q_id]; - /* Indirect access should set q_id first */ - sphw_hwif_write_reg(eq->hwdev->hwif, SPHW_EQ_INDIR_IDX_ADDR(eq->type), eq->q_id); - wmb(); /* write index before config */ - - addr = SPHW_CSR_AEQ_CTRL_0_ADDR; - - ctrl0 = sphw_hwif_read_reg(hwdev->hwif, addr); - - idx = sphw_hwif_read_reg(hwdev->hwif, SPHW_EQ_INDIR_IDX_ADDR(eq->type)); - - addr = EQ_CONS_IDX_REG_ADDR(eq); - ci = sphw_hwif_read_reg(hwdev->hwif, addr); - addr = EQ_PROD_IDX_REG_ADDR(eq); - pi = sphw_hwif_read_reg(hwdev->hwif, addr); - aeqe_pos = GET_CURR_AEQ_ELEM(eq); - sdk_err(hwdev->dev_hdl, "Aeq id: %d, idx: %u, ctrl0: 0x%08x, ci: 0x%08x, pi: 0x%x, work_state: 0x%x, wrap: %u, desc: 0x%x\n", - q_id, idx, ctrl0, ci, pi, work_busy(&eq->aeq_work.work), - eq->wrapped, be32_to_cpu(aeqe_pos->desc)); - } -} - -void sphw_dump_ceq_info(struct sphw_hwdev *hwdev) -{ - struct sphw_eq *eq = NULL; - u32 addr, ci, pi; - int q_id; - - for (q_id = 0; q_id < hwdev->ceqs->num_ceqs; q_id++) { - eq = &hwdev->ceqs->ceq[q_id]; - /* Indirect access should set q_id first */ - sphw_hwif_write_reg(eq->hwdev->hwif, SPHW_EQ_INDIR_IDX_ADDR(eq->type), eq->q_id); - wmb(); /* write index before config */ - - addr = EQ_CONS_IDX_REG_ADDR(eq); - ci = sphw_hwif_read_reg(hwdev->hwif, addr); - addr = EQ_PROD_IDX_REG_ADDR(eq); - pi = sphw_hwif_read_reg(hwdev->hwif, addr); - sdk_err(hwdev->dev_hdl, "Ceq id: %d, ci: 0x%08x, sw_ci: 0x%08x, pi: 0x%x, tasklet_state: 0x%lx, wrap: %u, ceqe: 0x%x\n", - q_id, ci, eq->cons_idx, pi, eq->ceq_tasklet.state, - eq->wrapped, be32_to_cpu(*(GET_CURR_CEQ_ELEM(eq)))); - - sdk_err(hwdev->dev_hdl, "Ceq last response hard interrupt time: %u\n", - jiffies_to_msecs(jiffies - eq->hard_intr_jif)); - sdk_err(hwdev->dev_hdl, "Ceq last response soft interrupt time: %u\n", - jiffies_to_msecs(jiffies - eq->soft_intr_jif)); - } -} diff --git a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_eqs.h b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_eqs.h deleted file mode 100644 index df25b5a5fdcf45518304f3f4c8dc466a4997f9b7..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_eqs.h +++ /dev/null @@ -1,157 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPHW_EQS_H -#define SPHW_EQS_H - -#define SPHW_MAX_AEQS 4 -#define SPHW_MAX_CEQS 32 - -#define SPHW_AEQ_MAX_PAGES 4 -#define SPHW_CEQ_MAX_PAGES 8 - -#define SPHW_AEQE_SIZE 64 -#define SPHW_CEQE_SIZE 4 - -#define SPHW_AEQE_DESC_SIZE 4 -#define SPHW_AEQE_DATA_SIZE \ - (SPHW_AEQE_SIZE - SPHW_AEQE_DESC_SIZE) - -#define SPHW_DEFAULT_AEQ_LEN 4096 -#define SPHW_DEFAULT_CEQ_LEN 8192 - -#define SPHW_MIN_EQ_PAGE_SIZE 0x1000 /* min eq page size 4K Bytes */ -#define SPHW_MAX_EQ_PAGE_SIZE 0x400000 /* max eq page size 4M Bytes */ - -#define SPHW_MIN_AEQ_LEN 64 -#define SPHW_MAX_AEQ_LEN \ - ((SPHW_MAX_EQ_PAGE_SIZE / SPHW_AEQE_SIZE) * SPHW_AEQ_MAX_PAGES) - -#define SPHW_MIN_CEQ_LEN 64 -#define SPHW_MAX_CEQ_LEN \ - ((SPHW_MAX_EQ_PAGE_SIZE / SPHW_CEQE_SIZE) * SPHW_CEQ_MAX_PAGES) -#define SPHW_CEQ_ID_CMDQ 0 - -#define EQ_IRQ_NAME_LEN 64 - -#define EQ_USLEEP_LOW_BOUND 900 -#define EQ_USLEEP_HIG_BOUND 1000 - -enum sphw_eq_type { - SPHW_AEQ, - SPHW_CEQ -}; - -enum sphw_eq_intr_mode { - SPHW_INTR_MODE_ARMED, - SPHW_INTR_MODE_ALWAYS, -}; - -enum sphw_eq_ci_arm_state { - SPHW_EQ_NOT_ARMED, - SPHW_EQ_ARMED, -}; - -struct sphw_eq_work { - struct work_struct work; - void *data; -}; - -struct sphw_ceq_tasklet_data { - void *data; -}; - -struct sphw_eq { - struct sphw_hwdev *hwdev; - u16 q_id; - enum sphw_eq_type type; - u32 page_size; - u32 orig_page_size; - u32 eq_len; - - u32 cons_idx; - u16 wrapped; - - u16 elem_size; - u16 num_pages; - u32 num_elem_in_pg; - - struct irq_info eq_irq; - char irq_name[EQ_IRQ_NAME_LEN]; - - struct sphw_dma_addr_align *eq_pages; - - struct sphw_eq_work aeq_work; - struct tasklet_struct ceq_tasklet; - struct sphw_ceq_tasklet_data ceq_tasklet_data; - - u64 hard_intr_jif; - u64 soft_intr_jif; -}; - -struct sphw_aeq_elem { - u8 aeqe_data[SPHW_AEQE_DATA_SIZE]; - u32 desc; -}; - -enum sphw_aeq_cb_state { - SPHW_AEQ_HW_CB_REG = 0, - SPHW_AEQ_HW_CB_RUNNING, - SPHW_AEQ_SW_CB_REG, - SPHW_AEQ_SW_CB_RUNNING, -}; - -struct sphw_aeqs { - struct sphw_hwdev *hwdev; - - sphw_aeq_hwe_cb aeq_hwe_cb[SPHW_MAX_AEQ_EVENTS]; - sphw_aeq_swe_cb aeq_swe_cb[SPHW_MAX_AEQ_SW_EVENTS]; - unsigned long aeq_hw_cb_state[SPHW_MAX_AEQ_EVENTS]; - unsigned long aeq_sw_cb_state[SPHW_MAX_AEQ_SW_EVENTS]; - - struct sphw_eq aeq[SPHW_MAX_AEQS]; - u16 num_aeqs; - struct workqueue_struct *workq; -}; - -enum sphw_ceq_cb_state { - SPHW_CEQ_CB_REG = 0, - SPHW_CEQ_CB_RUNNING, -}; - -struct sphw_ceqs { - struct sphw_hwdev *hwdev; - - sphw_ceq_event_cb ceq_cb[SPHW_MAX_CEQ_EVENTS]; - void *ceq_data[SPHW_MAX_CEQ_EVENTS]; - unsigned long ceq_cb_state[SPHW_MAX_CEQ_EVENTS]; - - struct sphw_eq ceq[SPHW_MAX_CEQS]; - u16 num_ceqs; -}; - -struct sphw_ceq_info { - u32 q_len; - u32 page_size; - u16 elem_size; - u16 num_pages; - u32 num_elem_in_pg; -}; - -int sphw_aeqs_init(struct sphw_hwdev *hwdev, u16 num_aeqs, struct irq_info *msix_entries); - -void sphw_aeqs_free(struct sphw_hwdev *hwdev); - -int sphw_ceqs_init(struct sphw_hwdev *hwdev, u16 num_ceqs, struct irq_info *msix_entries); - -void sphw_ceqs_free(struct sphw_hwdev *hwdev); - -void sphw_get_ceq_irqs(struct sphw_hwdev *hwdev, struct irq_info *irqs, u16 *num_irqs); - -void sphw_get_aeq_irqs(struct sphw_hwdev *hwdev, struct irq_info *irqs, u16 *num_irqs); - -void sphw_dump_ceq_info(struct sphw_hwdev *hwdev); - -void sphw_dump_aeq_info(struct sphw_hwdev *hwdev); - -#endif diff --git a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_hw.h b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_hw.h deleted file mode 100644 index 41945efe86d8af3c0e8d8b3650ddfae23d3131e3..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_hw.h +++ /dev/null @@ -1,643 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPHW_HW_H -#define SPHW_HW_H - -#include "sphw_comm_cmd.h" -#include "sphw_comm_msg_intf.h" -#include "sphw_crm.h" - -enum sphw_mod_type { - SPHW_MOD_COMM = 0, /* HW communication module */ - SPHW_MOD_L2NIC = 1, /* L2NIC module */ - SPHW_MOD_ROCE = 2, - SPHW_MOD_PLOG = 3, - SPHW_MOD_TOE = 4, - SPHW_MOD_FLR = 5, - SPHW_MOD_FC = 6, - SPHW_MOD_CFGM = 7, /* Configuration module */ - SPHW_MOD_CQM = 8, - SPHW_MOD_VSWITCH = 9, - COMM_MOD_FC = 10, - SPHW_MOD_OVS = 11, - SPHW_MOD_DSW = 12, - SPHW_MOD_MIGRATE = 13, - SPHW_MOD_HILINK = 14, - SPHW_MOD_CRYPT = 15, /* secure crypto module */ - SPHW_MOD_VIO = 16, - SPHW_MOD_DFT = 17, /* DFT */ - SPHW_MOD_HW_MAX = 18, /* hardware max module id */ - /* Software module id, for PF/VF and multi-host */ - SPHW_MOD_SW_FUNC = 19, - SPHW_MOD_IOE = 20, - SPHW_MOD_MAX, -}; - -/* to use 0-level CLA, page size must be: SQ 16B(wqe) * 64k(max_q_depth) */ -#define SPHW_DEFAULT_WQ_PAGE_SIZE 0x100000 -#define SPHW_HW_WQ_PAGE_SIZE 0x1000 -#define SPHW_MAX_WQ_PAGE_SIZE_ORDER 8 - -enum sphw_channel_id { - SPHW_CHANNEL_DEFAULT, - SPHW_CHANNEL_COMM, - SPHW_CHANNEL_NIC, - SPHW_CHANNEL_ROCE, - SPHW_CHANNEL_TOE, - SPHW_CHANNEL_FC, - SPHW_CHANNEL_OVS, - SPHW_CHANNEL_DSW, - SPHW_CHANNEL_MIG, - SPHW_CHANNEL_CRYPT, - - SPHW_CHANNEL_MAX = 32, -}; - -struct sphw_cmd_buf { - void *buf; - dma_addr_t dma_addr; - u16 size; - /* Usage count, USERS DO NOT USE */ - atomic_t ref_cnt; -}; - -enum sphw_aeq_type { - SPHW_HW_INTER_INT = 0, - SPHW_MBX_FROM_FUNC = 1, - SPHW_MSG_FROM_MGMT_CPU = 2, - SPHW_API_RSP = 3, - SPHW_API_CHAIN_STS = 4, - SPHW_MBX_SEND_RSLT = 5, - SPHW_MAX_AEQ_EVENTS -}; - -#define SPHW_NIC_FATAL_ERROR_MAX 0x8U - -enum sphw_aeq_sw_type { - SPHW_STATELESS_EVENT = 0, - SPHW_STATEFULL_EVENT = 1, - SPHW_MAX_AEQ_SW_EVENTS -}; - -typedef void (*sphw_aeq_hwe_cb)(void *handle, u8 *data, u8 size); -typedef u8 (*sphw_aeq_swe_cb)(void *handle, u8 event, u8 *data); - -/** - * @brief sphw_aeq_register_hw_cb - register aeq hardware callback - * @param hwdev: device pointer to hwdev - * @param event: event type - * @param hwe_cb: callback function - * @retval zero: success - * @retval non-zero: failure - **/ -int sphw_aeq_register_hw_cb(void *hwdev, enum sphw_aeq_type event, sphw_aeq_hwe_cb hwe_cb); - -/** - * @brief sphw_aeq_unregister_hw_cb - unregister aeq hardware callback - * @param hwdev: device pointer to hwdev - * @param event: event type - **/ -void sphw_aeq_unregister_hw_cb(void *hwdev, enum sphw_aeq_type event); - -/** - * @brief sphw_aeq_register_swe_cb - register aeq soft event callback - * @param hwdev: device pointer to hwdev - * @param event: event type - * @param aeq_swe_cb: callback function - * @retval zero: success - * @retval non-zero: failure - **/ -int sphw_aeq_register_swe_cb(void *hwdev, enum sphw_aeq_sw_type event, sphw_aeq_swe_cb aeq_swe_cb); - -/** - * @brief sphw_aeq_unregister_swe_cb - unregister aeq soft event callback - * @param hwdev: device pointer to hwdev - * @param event: event type - **/ -void sphw_aeq_unregister_swe_cb(void *hwdev, enum sphw_aeq_sw_type event); - -enum sphw_ceq_event { - SPHW_NON_L2NIC_SCQ, - SPHW_NON_L2NIC_ECQ, - SPHW_NON_L2NIC_NO_CQ_EQ, - SPHW_CMDQ, - SPHW_L2NIC_SQ, - SPHW_L2NIC_RQ, - SPHW_MAX_CEQ_EVENTS, -}; - -typedef void (*sphw_ceq_event_cb)(void *handle, u32 ceqe_data); - -/** - * @brief sphw_ceq_register_cb - register ceq callback - * @param hwdev: device pointer to hwdev - * @param event: event type - * @param callback: callback function - * @retval zero: success - * @retval non-zero: failure - **/ -int sphw_ceq_register_cb(void *hwdev, enum sphw_ceq_event event, sphw_ceq_event_cb callback); -/** - * @brief sphw_ceq_unregister_cb - unregister ceq callback - * @param hwdev: device pointer to hwdev - * @param event: event type - **/ -void sphw_ceq_unregister_cb(void *hwdev, enum sphw_ceq_event event); - -typedef int (*sphw_vf_mbox_cb)(void *handle, void *pri_handle, u16 cmd, void *buf_in, - u16 in_size, void *buf_out, u16 *out_size); - -typedef int (*sphw_pf_mbox_cb)(void *handle, void *pri_handle, u16 vf_id, u16 cmd, void *buf_in, - u16 in_size, void *buf_out, u16 *out_size); - -typedef int (*sphw_ppf_mbox_cb)(void *handle, void *pri_handle, - u16 pf_idx, u16 vf_id, u16 cmd, - void *buf_in, u16 in_size, void *buf_out, - u16 *out_size); - -typedef int (*sphw_pf_recv_from_ppf_mbox_cb)(void *handle, void *pri_handle, u16 cmd, void *buf_in, - u16 in_size, void *buf_out, u16 *out_size); - -/** - * @brief sphw_register_ppf_mbox_cb - ppf register mbox msg callback - * @param hwdev: device pointer to hwdev - * @param mod: mod type - * @param pri_handle: private data will be used by the callback - * @param callback: callback function - * @retval zero: success - * @retval non-zero: failure - **/ -int sphw_register_ppf_mbox_cb(void *hwdev, u8 mod, void *pri_handle, sphw_ppf_mbox_cb callback); - -/** - * @brief sphw_register_pf_mbox_cb - pf register mbox msg callback - * @param hwdev: device pointer to hwdev - * @param mod: mod type - * @param pri_handle: private data will be used by the callback - * @param callback: callback function - * @retval zero: success - * @retval non-zero: failure - **/ -int sphw_register_pf_mbox_cb(void *hwdev, u8 mod, void *pri_handle, sphw_pf_mbox_cb callback); -/** - * @brief sphw_register_vf_mbox_cb - vf register mbox msg callback - * @param hwdev: device pointer to hwdev - * @param mod: mod type - * @param pri_handle: private data will be used by the callback - * @param callback: callback function - * @retval zero: success - * @retval non-zero: failure - **/ -int sphw_register_vf_mbox_cb(void *hwdev, u8 mod, void *pri_handle, sphw_vf_mbox_cb callback); -/** - * @brief sphw_register_ppf_to_pf_mbox_cb - register mbox msg callback - * @param hwdev: device pointer to hwdev - * @param mod: mod type - * @param pri_handle: private data will be used by the callback - * @param callback: callback function - * @retval zero: success - * @retval non-zero: failure - **/ -int sphw_register_ppf_to_pf_mbox_cb(void *hwdev, u8 mod, void *pri_handle, - sphw_pf_recv_from_ppf_mbox_cb callback); - -/** - * @brief sphw_unregister_ppf_mbox_cb - ppf unregister mbox msg callback - * @param hwdev: device pointer to hwdev - * @param mod: mod type - **/ -void sphw_unregister_ppf_mbox_cb(void *hwdev, u8 mod); - -/** - * @brief sphw_unregister_pf_mbox_cb - pf register mbox msg callback - * @param hwdev: device pointer to hwdev - * @param mod: mod type - **/ -void sphw_unregister_pf_mbox_cb(void *hwdev, u8 mod); - -/** - * @brief sphw_unregister_vf_mbox_cb - pf register mbox msg callback - * @param hwdev: device pointer to hwdev - * @param mod: mod type - **/ -void sphw_unregister_vf_mbox_cb(void *hwdev, u8 mod); - -/** - * @brief sphw_unregister_ppf_to_pf_mbox_cb - unregister mbox msg callback - * @param hwdev: device pointer to hwdev - * @param mod: mod type - **/ -void sphw_unregister_ppf_to_pf_mbox_cb(void *hwdev, u8 mod); - -typedef void (*sphw_mgmt_msg_cb)(void *hwdev, void *pri_handle, u16 cmd, void *buf_in, u16 in_size, - void *buf_out, u16 *out_size); - -/** - * @brief sphw_register_mgmt_msg_cb - register mgmt msg callback - * @param hwdev: device pointer to hwdev - * @param mod: mod type - * @param pri_handle: private data will be used by the callback - * @param callback: callback function - * @retval zero: success - * @retval non-zero: failure - **/ -int sphw_register_mgmt_msg_cb(void *hwdev, u8 mod, void *pri_handle, sphw_mgmt_msg_cb callback); - -/** - * @brief sphw_unregister_mgmt_msg_cb - unregister mgmt msg callback - * @param hwdev: device pointer to hwdev - * @param mod: mod type - **/ -void sphw_unregister_mgmt_msg_cb(void *hwdev, u8 mod); - -/** - * @brief sphw_register_service_adapter - register service adapter - * @param hwdev: device pointer to hwdev - * @param service_adapter: service adapter - * @param type: service type - * @retval zero: success - * @retval non-zero: failure - **/ -int sphw_register_service_adapter(void *hwdev, void *service_adapter, - enum sphw_service_type type); - -/** - * @brief sphw_unregister_service_adapter - unregister service adapter - * @param hwdev: device pointer to hwdev - * @param type: service type - **/ -void sphw_unregister_service_adapter(void *hwdev, enum sphw_service_type type); - -/** - * @brief sphw_get_service_adapter - get service adapter - * @param hwdev: device pointer to hwdev - * @param type: service type - * @retval non-zero: success - * @retval null: failure - **/ -void *sphw_get_service_adapter(void *hwdev, enum sphw_service_type type); - -/** - * @brief sphw_alloc_db_phy_addr - alloc doorbell & direct wqe pyhsical addr - * @param hwdev: device pointer to hwdev - * @param db_base: pointer to alloc doorbell base address - * @param dwqe_base: pointer to alloc direct base address - * @retval zero: success - * @retval non-zero: failure - **/ -int sphw_alloc_db_phy_addr(void *hwdev, u64 *db_base, u64 *dwqe_base); - -/** - * @brief sphw_free_db_phy_addr - free doorbell & direct wqe physical address - * @param hwdev: device pointer to hwdev - * @param db_base: pointer to free doorbell base address - * @param dwqe_base: pointer to free direct base address - **/ -void sphw_free_db_phy_addr(void *hwdev, u64 db_base, u64 dwqe_base); - -/** - * @brief sphw_alloc_db_addr - alloc doorbell & direct wqe - * @param hwdev: device pointer to hwdev - * @param db_base: pointer to alloc doorbell base address - * @param dwqe_base: pointer to alloc direct base address - * @retval zero: success - * @retval non-zero: failure - **/ -int sphw_alloc_db_addr(void *hwdev, void __iomem **db_base, void __iomem **dwqe_base); - -/** - * @brief sphw_free_db_addr - free doorbell & direct wqe - * @param hwdev: device pointer to hwdev - * @param db_base: pointer to free doorbell base address - * @param dwqe_base: pointer to free direct base address - **/ -void sphw_free_db_addr(void *hwdev, const void __iomem *db_base, void __iomem *dwqe_base); - -/** - * @brief sphw_alloc_db_phy_addr - alloc physical doorbell & direct wqe - * @param hwdev: device pointer to hwdev - * @param db_base: pointer to alloc doorbell base address - * @param dwqe_base: pointer to alloc direct base address - * @retval zero: success - * @retval non-zero: failure - **/ -int sphw_alloc_db_phy_addr(void *hwdev, u64 *db_base, u64 *dwqe_base); - -/** - * @brief sphw_free_db_phy_addr - free physical doorbell & direct wqe - * @param hwdev: device pointer to hwdev - * @param db_base: free doorbell base address - * @param dwqe_base: free direct base address - **/ - -void sphw_free_db_phy_addr(void *hwdev, u64 db_base, u64 dwqe_base); - -/** - * @brief sphw_set_root_ctxt - set root context - * @param hwdev: device pointer to hwdev - * @param rq_depth: rq depth - * @param sq_depth: sq depth - * @param rx_buf_sz: rx buffer size - * @param channel: channel id - * @retval zero: success - * @retval non-zero: failure - **/ -int sphw_set_root_ctxt(void *hwdev, u32 rq_depth, u32 sq_depth, int rx_buf_sz, u16 channel); - -/** - * @brief sphw_clean_root_ctxt - clean root context - * @param hwdev: device pointer to hwdev - * @param channel: channel id - * @retval zero: success - * @retval non-zero: failure - **/ -int sphw_clean_root_ctxt(void *hwdev, u16 channel); - -/** - * @brief sphw_alloc_cmd_buf - alloc cmd buffer - * @param hwdev: device pointer to hwdev - * @retval non-zero: success - * @retval null: failure - **/ -struct sphw_cmd_buf *sphw_alloc_cmd_buf(void *hwdev); - -/** - * @brief sphw_free_cmd_buf - free cmd buffer - * @param hwdev: device pointer to hwdev - * @param cmd_buf: cmd buffer to free - **/ -void sphw_free_cmd_buf(void *hwdev, struct sphw_cmd_buf *cmd_buf); - -/** - * @brief sphw_dbg_get_hw_stats - get hardware stats - * @param hwdev: device pointer to hwdev - * @param hw_stats: pointer to memory caller to alloc - * @param out_size: out size - * @retval zero: success - * @retval non-zero: failure - */ -int sphw_dbg_get_hw_stats(const void *hwdev, u8 *hw_stats, u16 *out_size); - -/** - * @brief sphw_dbg_clear_hw_stats - clear hardware stats - * @param hwdev: device pointer to hwdev - * @retval clear hardware size - */ -u16 sphw_dbg_clear_hw_stats(void *hwdev); - -/** - * @brief sphw_get_chip_fault_stats - get chip fault stats - * @param hwdev: device pointer to hwdev - * @param chip_fault_stats: pointer to memory caller to alloc - * @param offset: offset - */ -void sphw_get_chip_fault_stats(const void *hwdev, u8 *chip_fault_stats, u32 offset); - -/** - * @brief sphw_msg_to_mgmt_sync - msg to management cpu - * @param hwdev: device pointer to hwdev - * @param mod: mod type - * @param cmd: cmd - * @param buf_in: message buffer in - * @param in_size: in buffer size - * @param buf_out: message buffer out - * @param out_size: out buffer size - * @param timeout: timeout - * @param channel: channel id - * @retval zero: success - * @retval non-zero: failure - */ -int sphw_msg_to_mgmt_sync(void *hwdev, u8 mod, u16 cmd, void *buf_in, u16 in_size, void *buf_out, - u16 *out_size, u32 timeout, u16 channel); - -/** - * @brief sphw_msg_to_mgmt_async - msg to management cpu async - * @param hwdev: device pointer to hwdev - * @param mod: mod type - * @param cmd: cmd - * @param buf_in: message buffer in - * @param in_size: in buffer size - * @param channel: channel id - * @retval zero: success - * @retval non-zero: failure - * - * The function does not sleep inside, allowing use in irq context - */ -int sphw_msg_to_mgmt_async(void *hwdev, u8 mod, u16 cmd, void *buf_in, u16 in_size, u16 channel); - -/** - * @brief sphw_msg_to_mgmt_no_ack - msg to management cpu don't need no ack - * @param hwdev: device pointer to hwdev - * @param mod: mod type - * @param cmd: cmd - * @param buf_in: message buffer in - * @param in_size: in buffer size - * @param channel: channel id - * @retval zero: success - * @retval non-zero: failure - * - * The function will sleep inside, and it is not allowed to be used in - * interrupt context - */ -int sphw_msg_to_mgmt_no_ack(void *hwdev, u8 mod, u16 cmd, void *buf_in, u16 in_size, u16 channel); - -int sphw_msg_to_mgmt_api_chain_async(void *hwdev, u8 mod, u16 cmd, const void *buf_in, u16 in_size); - -int sphw_msg_to_mgmt_api_chain_sync(void *hwdev, u8 mod, u16 cmd, void *buf_in, u16 in_size, - void *buf_out, u16 *out_size, u32 timeout); - -/** - * @brief sphw_mbox_to_pf - vf mbox message to pf - * @param hwdev: device pointer to hwdev - * @param mod: mod type - * @param cmd: cmd - * @param buf_in: message buffer in - * @param in_size: in buffer size - * @param buf_out: message buffer out - * @param out_size: out buffer size - * @param timeout: timeout - * @param channel: channel id - * @retval zero: success - * @retval non-zero: failure - */ -int sphw_mbox_to_pf(void *hwdev, u8 mod, u16 cmd, void *buf_in, u16 in_size, void *buf_out, - u16 *out_size, u32 timeout, u16 channel); - -/** - * @brief sphw_mbox_to_vf - mbox message to vf - * @param hwdev: device pointer to hwdev - * @param vf_id: vf index - * @param mod: mod type - * @param cmd: cmd - * @param buf_in: message buffer in - * @param in_size: in buffer size - * @param buf_out: message buffer out - * @param out_size: out buffer size - * @param timeout: timeout - * @param channel: channel id - * @retval zero: success - * @retval non-zero: failure - */ -int sphw_mbox_to_vf(void *hwdev, u16 vf_id, u8 mod, u16 cmd, void *buf_in, - u16 in_size, void *buf_out, u16 *out_size, u32 timeout, u16 channel); - -/** - * @brief sphw_cmdq_async - cmdq asynchronous message - * @param hwdev: device pointer to hwdev - * @param mod: mod type - * @param cmd: cmd - * @param buf_in: message buffer in - * @param channel: channel id - * @retval zero: success - * @retval non-zero: failure - */ -int sphw_cmdq_async(void *hwdev, u8 mod, u8 cmd, struct sphw_cmd_buf *buf_in, u16 channel); - -/** - * @brief sphw_cmdq_direct_resp - cmdq direct message response - * @param hwdev: device pointer to hwdev - * @param mod: mod type - * @param cmd: cmd - * @param buf_in: message buffer in - * @param out_param: message out - * @param timeout: timeout - * @param channel: channel id - * @retval zero: success - * @retval non-zero: failure - */ -int sphw_cmdq_direct_resp(void *hwdev, u8 mod, u8 cmd, struct sphw_cmd_buf *buf_in, - u64 *out_param, u32 timeout, u16 channel); - -/** - * @brief sphw_cmdq_detail_resp - cmdq detail message response - * @param hwdev: device pointer to hwdev - * @param mod: mod type - * @param cmd: cmd - * @param buf_in: message buffer in - * @param buf_out: message buffer out - * @param out_param: inline output data - * @param timeout: timeout - * @param channel: channel id - * @retval zero: success - * @retval non-zero: failure - */ -int sphw_cmdq_detail_resp(void *hwdev, u8 mod, u8 cmd, struct sphw_cmd_buf *buf_in, - struct sphw_cmd_buf *buf_out, u64 *out_param, u32 timeout, u16 channel); - -/** - * @brief sphw_cos_id_detail_resp - cmdq detail message response - * @param hwdev: device pointer to hwdev - * @param mod: mod type - * @param cmd: cmd - * @param cos_id: cos id - * @param buf_in: message buffer in - * @param buf_out: message buffer out - * @param out_param: inline output data - * @param timeout: timeout - * @param channel: channel id - * @retval zero: success - * @retval non-zero: failure - */ -int sphw_cos_id_detail_resp(void *hwdev, u8 mod, u8 cmd, u8 cos_id, struct sphw_cmd_buf *buf_in, - struct sphw_cmd_buf *buf_out, u64 *out_param, u32 timeout, u16 channel); - -/** - * @brief sphw_ppf_tmr_start - start ppf timer - * @param hwdev: device pointer to hwdev - * @retval zero: success - * @retval non-zero: failure - */ -int sphw_ppf_tmr_start(void *hwdev); - -/** - * @brief sphw_ppf_tmr_stop - stop ppf timer - * @param hwdev: device pointer to hwdev - * @retval zero: success - * @retval non-zero: failure - */ -int sphw_ppf_tmr_stop(void *hwdev); - -/** - * @brief sphw_func_tmr_bitmap_set - set timer bitmap status - * @param hwdev: device pointer to hwdev - * @param enable: 0-disable, 1-enable - * @retval zero: success - * @retval non-zero: failure - */ -int sphw_func_tmr_bitmap_set(void *hwdev, bool enable); - -/** - * @brief sphw_get_board_info - get board info - * @param hwdev: device pointer to hwdev - * @param info: board info - * @param channel: channel id - * @retval zero: success - * @retval non-zero: failure - */ -int sphw_get_board_info(void *hwdev, struct sphw_board_info *info, u16 channel); - -/** - * @brief sphw_set_wq_page_size - set work queue page size - * @param hwdev: device pointer to hwdev - * @param func_idx: function id - * @param page_size: page size - * @param channel: channel id - * @retval zero: success - * @retval non-zero: failure - */ -int sphw_set_wq_page_size(void *hwdev, u16 func_idx, u32 page_size, u16 channel); - -/** - * @brief sphw_event_callback - evnet callback to notify service driver - * @param hwdev: device pointer to hwdev - * @param event: event info to service driver - */ -void sphw_event_callback(void *hwdev, struct sphw_event_info *event); - -/** - * @brief sphw_link_event_stats - link event stats - * @param hwdev: device pointer to hwdev - * @param link: link status - */ -void sphw_link_event_stats(void *dev, u8 link); - -enum func_reset_flag { - RES_TYPE_FLUSH_BIT = 0, - RES_TYPE_MQM, - RES_TYPE_SMF, - - RES_TYPE_COMM = 10, - RES_TYPE_COMM_MGMT_CH, - RES_TYPE_COMM_CMD_CH, - RES_TYPE_NIC, - RES_TYPE_OVS, - RES_TYPE_VBS, - RES_TYPE_ROCE, - RES_TYPE_FC, - RES_TYPE_TOE, - RES_TYPE_IPSEC, -}; - -#define SPHW_COMM_RES (BIT(RES_TYPE_COMM) | BIT(RES_TYPE_FLUSH_BIT) | BIT(RES_TYPE_MQM) | \ - BIT(RES_TYPE_SMF) | BIT(RES_TYPE_COMM_CMD_CH)) - -#define SPHW_NIC_RES BIT(RES_TYPE_NIC) -#define SPHW_FC_RES BIT(RES_TYPE_FC) - -/** - * @brief sphw_func_reset - reset func - * @param hwdev: device pointer to hwdev - * @param func_id: global function index - * @param reset_flag: reset flag - * @param channel: channel id - */ -int sphw_func_reset(void *dev, u16 func_id, u64 reset_flag, u16 channel); - -int sphw_get_dev_cap(void *hwdev); - -int sphw_set_bdf_ctxt(void *hwdev, u8 bus, u8 device, u8 function); - -int sphw_init_func_mbox_msg_channel(void *hwdev, u16 num_func); - -#endif diff --git a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_hw_cfg.c b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_hw_cfg.c deleted file mode 100644 index 4b2674ec66f040f3a9e74295998a921e62cb2a63..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_hw_cfg.c +++ /dev/null @@ -1,1341 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt - -#include -#include -#include -#include -#include -#include -#include - -#include "sphw_common.h" -#include "sphw_crm.h" -#include "sphw_hw.h" -#include "sphw_hwdev.h" -#include "sphw_hwif.h" -#include "sphw_cfg_cmd.h" -#include "sphw_hw_cfg.h" - -uint g_rdma_mtts_num; - -uint intr_mode; -uint timer_enable = 1; -uint bloomfilter_enable; -uint g_test_mode; -uint g_test_qpc_num; -uint g_test_qpc_resvd_num; -uint g_test_pagesize_reorder; -uint g_test_xid_alloc_mode = 1; -uint g_test_gpa_check_enable = 1; -uint g_test_qpc_alloc_mode = 2; -uint g_test_scqc_alloc_mode = 2; -uint g_test_max_conn; -uint g_test_max_cache_conn; -uint g_test_scqc_num; -uint g_test_mpt_num; -uint g_test_mpt_resvd; -uint g_test_scq_resvd; -uint g_test_hash_num; -uint g_test_reorder_num; - -static void set_cfg_test_param(struct cfg_mgmt_info *cfg_mgmt) -{ - cfg_mgmt->svc_cap.test_mode = g_test_mode; - if (cfg_mgmt->svc_cap.test_mode == 0) - return; - - cfg_mgmt->svc_cap.timer_en = (u8)timer_enable; - cfg_mgmt->svc_cap.bloomfilter_en = (u8)bloomfilter_enable; - cfg_mgmt->svc_cap.test_qpc_num = g_test_qpc_num; - cfg_mgmt->svc_cap.test_qpc_resvd_num = g_test_qpc_resvd_num; - cfg_mgmt->svc_cap.test_page_size_reorder = g_test_pagesize_reorder; - cfg_mgmt->svc_cap.test_xid_alloc_mode = (bool)g_test_xid_alloc_mode; - cfg_mgmt->svc_cap.test_gpa_check_enable = (bool)g_test_gpa_check_enable; - cfg_mgmt->svc_cap.test_qpc_alloc_mode = (u8)g_test_qpc_alloc_mode; - cfg_mgmt->svc_cap.test_scqc_alloc_mode = (u8)g_test_scqc_alloc_mode; - cfg_mgmt->svc_cap.test_max_conn_num = g_test_max_conn; - cfg_mgmt->svc_cap.test_max_cache_conn_num = g_test_max_cache_conn; - cfg_mgmt->svc_cap.test_scqc_num = g_test_scqc_num; - cfg_mgmt->svc_cap.test_mpt_num = g_test_mpt_num; - cfg_mgmt->svc_cap.test_scq_resvd_num = g_test_scq_resvd; - cfg_mgmt->svc_cap.test_mpt_recvd_num = g_test_mpt_resvd; - cfg_mgmt->svc_cap.test_hash_num = g_test_hash_num; - cfg_mgmt->svc_cap.test_reorder_num = g_test_reorder_num; -} - -static void parse_pub_res_cap(struct sphw_hwdev *hwdev, - struct service_cap *cap, - struct cfg_cmd_dev_cap *dev_cap, - enum func_type type) -{ - struct dev_sf_svc_attr *attr = &cap->sf_svc_attr; - - cap->host_id = dev_cap->host_id; - cap->ep_id = dev_cap->ep_id; - cap->er_id = dev_cap->er_id; - cap->port_id = dev_cap->port_id; - - cap->svc_type = dev_cap->svc_cap_en; - cap->chip_svc_type = cap->svc_type; - - cap->cos_valid_bitmap = dev_cap->valid_cos_bitmap; - cap->flexq_en = dev_cap->flexq_en; - - cap->host_total_function = dev_cap->host_total_func; - - if (type != TYPE_VF) { - cap->max_vf = dev_cap->max_vf; - cap->pf_num = dev_cap->host_pf_num; - cap->pf_id_start = dev_cap->pf_id_start; - cap->vf_num = dev_cap->host_vf_num; - cap->vf_id_start = dev_cap->vf_id_start; - } else { - cap->max_vf = 0; - } - - if (dev_cap->sf_svc_attr & SF_SVC_FT_BIT) - attr->ft_en = true; - else - attr->ft_en = false; - - if (dev_cap->sf_svc_attr & SF_SVC_RDMA_BIT) - attr->rdma_en = true; - else - attr->rdma_en = false; - - /* PPF will overwrite it when parse dynamic resource */ - if (dev_cap->func_sf_en) - cap->sf_en = true; - else - cap->sf_en = false; - - cap->lb_mode = dev_cap->lb_mode; - cap->smf_pg = dev_cap->smf_pg; - - cap->timer_en = (u8)timer_enable; /* timer enable */ - cap->host_oq_id_mask_val = dev_cap->host_oq_id_mask_val; - cap->max_connect_num = dev_cap->max_conn_num; - cap->max_stick2cache_num = dev_cap->max_stick2cache_num; - cap->bfilter_start_addr = dev_cap->max_bfilter_start_addr; - cap->bfilter_len = dev_cap->bfilter_len; - cap->hash_bucket_num = dev_cap->hash_bucket_num; - - sdk_info(hwdev->dev_hdl, "Get public resource capbility: svc_cap_en: 0x%x\n", - cap->svc_type); - sdk_info(hwdev->dev_hdl, "Host_id: 0x%x, ep_id: 0x%x, er_id: 0x%x, port_id: 0x%x, cos_bitmap: 0x%x, flexq: 0x%x\n", - cap->host_id, cap->ep_id, cap->er_id, cap->port_id, - cap->cos_valid_bitmap, cap->flexq_en); - sdk_info(hwdev->dev_hdl, "Host_total_function: 0x%x, host_oq_id_mask_val: 0x%x, max_vf: 0x%x\n", - cap->host_total_function, cap->host_oq_id_mask_val, - cap->max_vf); - sdk_info(hwdev->dev_hdl, "Host_pf_num: 0x%x, pf_id_start: 0x%x, host_vf_num: 0x%x, vf_id_start: 0x%x\n", - cap->pf_num, cap->pf_id_start, cap->vf_num, cap->vf_id_start); -} - -static void parse_dynamic_share_res_cap(struct sphw_hwdev *hwdev, - struct service_cap *cap, - struct cfg_cmd_dev_cap *dev_cap, - enum func_type type) -{ - struct host_shared_resource_cap *shared_cap = &cap->shared_res_cap; - - if (dev_cap->host_sf_en) - cap->sf_en = true; - else - cap->sf_en = false; - - shared_cap->host_pctxs = dev_cap->host_pctx_num; - shared_cap->host_cctxs = dev_cap->host_ccxt_num; - shared_cap->host_scqs = dev_cap->host_scq_num; - shared_cap->host_srqs = dev_cap->host_srq_num; - shared_cap->host_mpts = dev_cap->host_mpt_num; - - sdk_info(hwdev->dev_hdl, "Dynamic share resource capbility:\n"); - sdk_info(hwdev->dev_hdl, "host_pctxs: 0x%x, host_cctxs: 0x%x, host_scqs: 0x%x, host_srqs: 0x%x, host_mpts: 0x%x\n", - shared_cap->host_pctxs, shared_cap->host_cctxs, - shared_cap->host_scqs, shared_cap->host_srqs, - shared_cap->host_mpts); -} - -static void parse_l2nic_res_cap(struct sphw_hwdev *hwdev, - struct service_cap *cap, - struct cfg_cmd_dev_cap *dev_cap, - enum func_type type) -{ - struct nic_service_cap *nic_cap = &cap->nic_cap; - - nic_cap->max_sqs = dev_cap->nic_max_sq_id + 1; - nic_cap->max_rqs = dev_cap->nic_max_rq_id + 1; - - sdk_info(hwdev->dev_hdl, "L2nic resource capbility, max_sqs: 0x%x, max_rqs: 0x%x\n", - nic_cap->max_sqs, nic_cap->max_rqs); - - /* Check parameters from firmware */ - if (nic_cap->max_sqs > SPHW_CFG_MAX_QP || - nic_cap->max_rqs > SPHW_CFG_MAX_QP) { - sdk_info(hwdev->dev_hdl, "Number of qp exceed limit[1-%d]: sq: %u, rq: %u\n", - SPHW_CFG_MAX_QP, nic_cap->max_sqs, nic_cap->max_rqs); - nic_cap->max_sqs = SPHW_CFG_MAX_QP; - nic_cap->max_rqs = SPHW_CFG_MAX_QP; - } -} - -static void parse_fc_res_cap(struct sphw_hwdev *hwdev, - struct service_cap *cap, - struct cfg_cmd_dev_cap *dev_cap, - enum func_type type) -{ - struct dev_fc_svc_cap *fc_cap = &cap->fc_cap.dev_fc_cap; - - fc_cap->max_parent_qpc_num = dev_cap->fc_max_pctx; - fc_cap->scq_num = dev_cap->fc_max_scq; - fc_cap->srq_num = dev_cap->fc_max_srq; - fc_cap->max_child_qpc_num = dev_cap->fc_max_cctx; - fc_cap->child_qpc_id_start = dev_cap->fc_cctx_id_start; - fc_cap->vp_id_start = dev_cap->fc_vp_id_start; - fc_cap->vp_id_end = dev_cap->fc_vp_id_end; - - sdk_info(hwdev->dev_hdl, "Get fc resource capbility\n"); - sdk_info(hwdev->dev_hdl, "Max_parent_qpc_num: 0x%x, scq_num: 0x%x, srq_num: 0x%x, max_child_qpc_num: 0x%x, child_qpc_id_start: 0x%x\n", - fc_cap->max_parent_qpc_num, fc_cap->scq_num, fc_cap->srq_num, - fc_cap->max_child_qpc_num, fc_cap->child_qpc_id_start); - sdk_info(hwdev->dev_hdl, "Vp_id_start: 0x%x, vp_id_end: 0x%x\n", - fc_cap->vp_id_start, fc_cap->vp_id_end); -} - -static void parse_roce_res_cap(struct sphw_hwdev *hwdev, - struct service_cap *cap, - struct cfg_cmd_dev_cap *dev_cap, - enum func_type type) -{ - struct dev_roce_svc_own_cap *roce_cap = - &cap->rdma_cap.dev_rdma_cap.roce_own_cap; - - roce_cap->max_qps = dev_cap->roce_max_qp; - roce_cap->max_cqs = dev_cap->roce_max_cq; - roce_cap->max_srqs = dev_cap->roce_max_srq; - roce_cap->max_mpts = dev_cap->roce_max_mpt; - roce_cap->max_drc_qps = dev_cap->roce_max_drc_qp; - - roce_cap->wqe_cl_start = dev_cap->roce_wqe_cl_start; - roce_cap->wqe_cl_end = dev_cap->roce_wqe_cl_end; - roce_cap->wqe_cl_sz = dev_cap->roce_wqe_cl_size; - - sdk_info(hwdev->dev_hdl, "Get roce resource capbility, type: 0x%x\n", - type); - sdk_info(hwdev->dev_hdl, "Max_qps: 0x%x, max_cqs: 0x%x, max_srqs: 0x%x, max_mpts: 0x%x, max_drcts: 0x%x\n", - roce_cap->max_qps, roce_cap->max_cqs, roce_cap->max_srqs, - roce_cap->max_mpts, roce_cap->max_drc_qps); - - sdk_info(hwdev->dev_hdl, "Wqe_start: 0x%x, wqe_end: 0x%x, wqe_sz: 0x%x\n", - roce_cap->wqe_cl_start, roce_cap->wqe_cl_end, - roce_cap->wqe_cl_sz); - - if (roce_cap->max_qps == 0) { - if (type == TYPE_PF || type == TYPE_PPF) { - roce_cap->max_qps = 1024; - roce_cap->max_cqs = 2048; - roce_cap->max_srqs = 1024; - roce_cap->max_mpts = 1024; - roce_cap->max_drc_qps = 64; - } else { - roce_cap->max_qps = 512; - roce_cap->max_cqs = 1024; - roce_cap->max_srqs = 512; - roce_cap->max_mpts = 512; - roce_cap->max_drc_qps = 64; - } - } -} - -static void parse_rdma_res_cap(struct sphw_hwdev *hwdev, - struct service_cap *cap, - struct cfg_cmd_dev_cap *dev_cap, - enum func_type type) -{ - struct dev_roce_svc_own_cap *roce_cap = - &cap->rdma_cap.dev_rdma_cap.roce_own_cap; - - roce_cap->cmtt_cl_start = dev_cap->roce_cmtt_cl_start; - roce_cap->cmtt_cl_end = dev_cap->roce_cmtt_cl_end; - roce_cap->cmtt_cl_sz = dev_cap->roce_cmtt_cl_size; - - roce_cap->dmtt_cl_start = dev_cap->roce_dmtt_cl_start; - roce_cap->dmtt_cl_end = dev_cap->roce_dmtt_cl_end; - roce_cap->dmtt_cl_sz = dev_cap->roce_dmtt_cl_size; - - sdk_info(hwdev->dev_hdl, "Get rdma resource capbility, Cmtt_start: 0x%x, cmtt_end: 0x%x, cmtt_sz: 0x%x\n", - roce_cap->cmtt_cl_start, roce_cap->cmtt_cl_end, - roce_cap->cmtt_cl_sz); - - sdk_info(hwdev->dev_hdl, "Dmtt_start: 0x%x, dmtt_end: 0x%x, dmtt_sz: 0x%x\n", - roce_cap->dmtt_cl_start, roce_cap->dmtt_cl_end, - roce_cap->dmtt_cl_sz); -} - -static void parse_ovs_res_cap(struct sphw_hwdev *hwdev, - struct service_cap *cap, - struct cfg_cmd_dev_cap *dev_cap, - enum func_type type) -{ - struct ovs_service_cap *ovs_cap = &cap->ovs_cap; - - ovs_cap->dev_ovs_cap.max_pctxs = dev_cap->ovs_max_qpc; - ovs_cap->dev_ovs_cap.fake_vf_start_id = dev_cap->fake_vf_start_id; - ovs_cap->dev_ovs_cap.fake_vf_num = dev_cap->fake_vf_num; - ovs_cap->dev_ovs_cap.dynamic_qp_en = dev_cap->flexq_en; - - sdk_info(hwdev->dev_hdl, "Get ovs resource capbility, max_qpc: 0x%x, fake_vf_start_id: 0x%x, fake_vf_num: 0x%x, dynamic_qp_en: 0x%x\n", - ovs_cap->dev_ovs_cap.max_pctxs, - ovs_cap->dev_ovs_cap.fake_vf_start_id, - ovs_cap->dev_ovs_cap.fake_vf_num, - ovs_cap->dev_ovs_cap.dynamic_qp_en); -} - -static void parse_toe_res_cap(struct sphw_hwdev *hwdev, - struct service_cap *cap, - struct cfg_cmd_dev_cap *dev_cap, - enum func_type type) -{ - struct dev_toe_svc_cap *toe_cap = &cap->toe_cap.dev_toe_cap; - - toe_cap->max_pctxs = dev_cap->toe_max_pctx; - toe_cap->max_cqs = dev_cap->toe_max_cq; - toe_cap->max_srqs = dev_cap->toe_max_srq; - toe_cap->srq_id_start = dev_cap->toe_srq_id_start; - toe_cap->max_mpts = dev_cap->toe_max_mpt; - - sdk_info(hwdev->dev_hdl, "Get toe resource capbility, max_pctxs: 0x%x, max_cqs: 0x%x, max_srqs: 0x%x, srq_id_start: 0x%x, max_mpts: 0x%x\n", - toe_cap->max_pctxs, toe_cap->max_cqs, toe_cap->max_srqs, - toe_cap->srq_id_start, toe_cap->max_mpts); -} - -static void parse_ipsec_res_cap(struct sphw_hwdev *hwdev, - struct service_cap *cap, - struct cfg_cmd_dev_cap *dev_cap, - enum func_type type) -{ - struct ipsec_service_cap *ipsec_cap = &cap->ipsec_cap; - - ipsec_cap->dev_ipsec_cap.max_sa_ctxs = dev_cap->ipsec_max_sactx; - - sdk_info(hwdev->dev_hdl, "Get IPsec resource capbility, max_sa_ctxs: 0x%x\n", - ipsec_cap->dev_ipsec_cap.max_sa_ctxs); -} - -static void parse_dev_cap(struct sphw_hwdev *dev, - struct cfg_cmd_dev_cap *dev_cap, enum func_type type) -{ - struct service_cap *cap = &dev->cfg_mgmt->svc_cap; - - /* Public resource */ - parse_pub_res_cap(dev, cap, dev_cap, type); - - /* PPF managed dynamic resource */ - if (type == TYPE_PPF) - parse_dynamic_share_res_cap(dev, cap, dev_cap, type); - - /* L2 NIC resource */ - if (IS_NIC_TYPE(dev)) - parse_l2nic_res_cap(dev, cap, dev_cap, type); - - /* FC without virtulization */ - if (type == TYPE_PF || type == TYPE_PPF) { - if (IS_FC_TYPE(dev)) - parse_fc_res_cap(dev, cap, dev_cap, type); - } - - /* toe resource */ - if (IS_TOE_TYPE(dev)) - parse_toe_res_cap(dev, cap, dev_cap, type); - - /* mtt cache line */ - if (IS_RDMA_ENABLE(dev)) - parse_rdma_res_cap(dev, cap, dev_cap, type); - - /* RoCE resource */ - if (IS_ROCE_TYPE(dev)) - parse_roce_res_cap(dev, cap, dev_cap, type); - - if (IS_OVS_TYPE(dev)) - parse_ovs_res_cap(dev, cap, dev_cap, type); - - if (IS_IPSEC_TYPE(dev)) - parse_ipsec_res_cap(dev, cap, dev_cap, type); -} - -static int get_cap_from_fw(struct sphw_hwdev *dev, enum func_type type) -{ - struct cfg_cmd_dev_cap dev_cap; - u16 out_len = sizeof(dev_cap); - int err; - - memset(&dev_cap, 0, sizeof(dev_cap)); - dev_cap.func_id = sphw_global_func_id(dev); - sdk_info(dev->dev_hdl, "Get cap from fw, func_idx: %u\n", - dev_cap.func_id); - - err = sphw_msg_to_mgmt_sync(dev, SPHW_MOD_CFGM, CFG_CMD_GET_DEV_CAP, - &dev_cap, sizeof(dev_cap), &dev_cap, &out_len, 0, - SPHW_CHANNEL_COMM); - if (err || dev_cap.head.status || !out_len) { - sdk_err(dev->dev_hdl, - "Failed to get capability from FW, err: %d, status: 0x%x, out size: 0x%x\n", - err, dev_cap.head.status, out_len); - return -EIO; - } - - parse_dev_cap(dev, &dev_cap, type); - - return 0; -} - -int sphw_get_dev_cap(void *dev) -{ - struct sphw_hwdev *hwdev = dev; - enum func_type type = SPHW_FUNC_TYPE(hwdev); - int err; - - switch (type) { - case TYPE_PF: - case TYPE_PPF: - case TYPE_VF: - err = get_cap_from_fw(hwdev, type); - if (err) { - sdk_err(hwdev->dev_hdl, "Failed to get PF/PPF capability\n"); - return err; - } - break; - default: - sdk_err(hwdev->dev_hdl, "Unsupported PCI Function type: %d\n", - type); - return -EINVAL; - } - - return 0; -} - -static void nic_param_fix(struct sphw_hwdev *dev) -{ -} - -static void rdma_mtt_fix(struct sphw_hwdev *dev) -{ - struct service_cap *cap = &dev->cfg_mgmt->svc_cap; - struct rdma_service_cap *rdma_cap = &cap->rdma_cap; - - rdma_cap->log_mtt = LOG_MTT_SEG; - rdma_cap->log_mtt_seg = LOG_MTT_SEG; - rdma_cap->mtt_entry_sz = MTT_ENTRY_SZ; - rdma_cap->mpt_entry_sz = RDMA_MPT_ENTRY_SZ; - rdma_cap->num_mtts = (g_rdma_mtts_num > 0 ? - g_rdma_mtts_num : RDMA_NUM_MTTS); -} - -static void rdma_param_fix(struct sphw_hwdev *dev) -{ - struct service_cap *cap = &dev->cfg_mgmt->svc_cap; - struct rdma_service_cap *rdma_cap = &cap->rdma_cap; - struct dev_roce_svc_own_cap *roce_cap = - &rdma_cap->dev_rdma_cap.roce_own_cap; - - rdma_cap->log_mtt = LOG_MTT_SEG; - rdma_cap->log_rdmarc = LOG_RDMARC_SEG; - rdma_cap->reserved_qps = RDMA_RSVD_QPS; - rdma_cap->max_sq_sg = RDMA_MAX_SQ_SGE; - - /* RoCE */ - if (IS_ROCE_TYPE(dev)) { - roce_cap->qpc_entry_sz = ROCE_QPC_ENTRY_SZ; - roce_cap->max_wqes = ROCE_MAX_WQES; - roce_cap->max_rq_sg = ROCE_MAX_RQ_SGE; - roce_cap->max_sq_inline_data_sz = ROCE_MAX_SQ_INLINE_DATA_SZ; - roce_cap->max_rq_desc_sz = ROCE_MAX_RQ_DESC_SZ; - roce_cap->rdmarc_entry_sz = ROCE_RDMARC_ENTRY_SZ; - roce_cap->max_qp_init_rdma = ROCE_MAX_QP_INIT_RDMA; - roce_cap->max_qp_dest_rdma = ROCE_MAX_QP_DEST_RDMA; - roce_cap->max_srq_wqes = ROCE_MAX_SRQ_WQES; - roce_cap->reserved_srqs = ROCE_RSVD_SRQS; - roce_cap->max_srq_sge = ROCE_MAX_SRQ_SGE; - roce_cap->srqc_entry_sz = ROCE_SRQC_ENTERY_SZ; - roce_cap->max_msg_sz = ROCE_MAX_MSG_SZ; - } - - rdma_cap->max_sq_desc_sz = RDMA_MAX_SQ_DESC_SZ; - rdma_cap->wqebb_size = WQEBB_SZ; - rdma_cap->max_cqes = RDMA_MAX_CQES; - rdma_cap->reserved_cqs = RDMA_RSVD_CQS; - rdma_cap->cqc_entry_sz = RDMA_CQC_ENTRY_SZ; - rdma_cap->cqe_size = RDMA_CQE_SZ; - rdma_cap->reserved_mrws = RDMA_RSVD_MRWS; - rdma_cap->mpt_entry_sz = RDMA_MPT_ENTRY_SZ; - - /* 2^8 - 1 - * +------------------------+-----------+ - * | 4B | 1M(20b) | Key(8b) | - * +------------------------+-----------+ - * key = 8bit key + 24bit index, - * now Lkey of SGE uses 2bit(bit31 and bit30), so key only have 10bit, - * we use original 8bits directly for simpilification - */ - rdma_cap->max_fmr_maps = 255; - rdma_cap->num_mtts = (g_rdma_mtts_num > 0 ? - g_rdma_mtts_num : RDMA_NUM_MTTS); - rdma_cap->log_mtt_seg = LOG_MTT_SEG; - rdma_cap->mtt_entry_sz = MTT_ENTRY_SZ; - rdma_cap->log_rdmarc_seg = LOG_RDMARC_SEG; - rdma_cap->local_ca_ack_delay = LOCAL_ACK_DELAY; - rdma_cap->num_ports = RDMA_NUM_PORTS; - rdma_cap->db_page_size = DB_PAGE_SZ; - rdma_cap->direct_wqe_size = DWQE_SZ; - rdma_cap->num_pds = NUM_PD; - rdma_cap->reserved_pds = RSVD_PD; - rdma_cap->max_xrcds = MAX_XRCDS; - rdma_cap->reserved_xrcds = RSVD_XRCDS; - rdma_cap->max_gid_per_port = MAX_GID_PER_PORT; - rdma_cap->gid_entry_sz = GID_ENTRY_SZ; - rdma_cap->reserved_lkey = RSVD_LKEY; - rdma_cap->num_comp_vectors = (u32)dev->cfg_mgmt->eq_info.num_ceq; - rdma_cap->page_size_cap = PAGE_SZ_CAP; - rdma_cap->flags = (RDMA_BMME_FLAG_LOCAL_INV | - RDMA_BMME_FLAG_REMOTE_INV | - RDMA_BMME_FLAG_FAST_REG_WR | - RDMA_DEV_CAP_FLAG_XRC | - RDMA_DEV_CAP_FLAG_MEM_WINDOW | - RDMA_BMME_FLAG_TYPE_2_WIN | - RDMA_BMME_FLAG_WIN_TYPE_2B | - RDMA_DEV_CAP_FLAG_ATOMIC); - rdma_cap->max_frpl_len = MAX_FRPL_LEN; - rdma_cap->max_pkeys = MAX_PKEYS; -} - -static void toe_param_fix(struct sphw_hwdev *dev) -{ - struct service_cap *cap = &dev->cfg_mgmt->svc_cap; - struct toe_service_cap *toe_cap = &cap->toe_cap; - - toe_cap->pctx_sz = TOE_PCTX_SZ; - toe_cap->scqc_sz = TOE_CQC_SZ; -} - -static void ovs_param_fix(struct sphw_hwdev *dev) -{ - struct service_cap *cap = &dev->cfg_mgmt->svc_cap; - struct ovs_service_cap *ovs_cap = &cap->ovs_cap; - - ovs_cap->pctx_sz = OVS_PCTX_SZ; -} - -static void fc_param_fix(struct sphw_hwdev *dev) -{ - struct service_cap *cap = &dev->cfg_mgmt->svc_cap; - struct fc_service_cap *fc_cap = &cap->fc_cap; - - fc_cap->parent_qpc_size = FC_PCTX_SZ; - fc_cap->child_qpc_size = FC_CCTX_SZ; - fc_cap->sqe_size = FC_SQE_SZ; - - fc_cap->scqc_size = FC_SCQC_SZ; - fc_cap->scqe_size = FC_SCQE_SZ; - - fc_cap->srqc_size = FC_SRQC_SZ; - fc_cap->srqe_size = FC_SRQE_SZ; -} - -static void ipsec_param_fix(struct sphw_hwdev *dev) -{ - struct service_cap *cap = &dev->cfg_mgmt->svc_cap; - struct ipsec_service_cap *ipsec_cap = &cap->ipsec_cap; - - ipsec_cap->sactx_sz = IPSEC_SACTX_SZ; -} - -static void init_service_param(struct sphw_hwdev *dev) -{ - if (IS_NIC_TYPE(dev)) - nic_param_fix(dev); - if (IS_RDMA_ENABLE(dev)) - rdma_mtt_fix(dev); - if (IS_ROCE_TYPE(dev)) - rdma_param_fix(dev); - if (IS_FC_TYPE(dev)) - fc_param_fix(dev); - if (IS_TOE_TYPE(dev)) - toe_param_fix(dev); - if (IS_OVS_TYPE(dev)) - ovs_param_fix(dev); - if (IS_IPSEC_TYPE(dev)) - ipsec_param_fix(dev); -} - -static void cfg_get_eq_num(struct sphw_hwdev *dev) -{ - struct cfg_eq_info *eq_info = &dev->cfg_mgmt->eq_info; - - eq_info->num_ceq = dev->hwif->attr.num_ceqs; - eq_info->num_ceq_remain = eq_info->num_ceq; -} - -static int cfg_init_eq(struct sphw_hwdev *dev) -{ - struct cfg_mgmt_info *cfg_mgmt = dev->cfg_mgmt; - struct cfg_eq *eq = NULL; - u8 num_ceq, i = 0; - - cfg_get_eq_num(dev); - num_ceq = cfg_mgmt->eq_info.num_ceq; - - sdk_info(dev->dev_hdl, "Cfg mgmt: ceqs=0x%x, remain=0x%x\n", - cfg_mgmt->eq_info.num_ceq, cfg_mgmt->eq_info.num_ceq_remain); - - if (!num_ceq) { - sdk_err(dev->dev_hdl, "Ceq num cfg in fw is zero\n"); - return -EFAULT; - } - - eq = kcalloc(num_ceq, sizeof(*eq), GFP_KERNEL); - if (!eq) - return -ENOMEM; - - for (i = 0; i < num_ceq; ++i) { - eq[i].eqn = i; - eq[i].free = CFG_FREE; - eq[i].type = SERVICE_T_MAX; - } - - cfg_mgmt->eq_info.eq = eq; - - mutex_init(&cfg_mgmt->eq_info.eq_mutex); - - return 0; -} - -int sphw_vector_to_eqn(void *hwdev, enum sphw_service_type type, int vector) -{ - struct sphw_hwdev *dev = hwdev; - struct cfg_mgmt_info *cfg_mgmt = NULL; - struct cfg_eq *eq = NULL; - int eqn = -EINVAL; - - if (!hwdev || vector < 0) - return -EINVAL; - - if (type != SERVICE_T_ROCE) { - sdk_err(dev->dev_hdl, - "Service type :%d, only RDMA service could get eqn by vector.\n", - type); - return -EINVAL; - } - - cfg_mgmt = dev->cfg_mgmt; - vector = (vector % cfg_mgmt->eq_info.num_ceq) + CFG_RDMA_CEQ_BASE; - - eq = cfg_mgmt->eq_info.eq; - if (eq[vector].type == SERVICE_T_ROCE && eq[vector].free == CFG_BUSY) - eqn = eq[vector].eqn; - - return eqn; -} - -static int cfg_init_interrupt(struct sphw_hwdev *dev) -{ - struct cfg_mgmt_info *cfg_mgmt = dev->cfg_mgmt; - struct cfg_irq_info *irq_info = &cfg_mgmt->irq_param_info; - u16 intr_num = dev->hwif->attr.num_irqs; - - if (!intr_num) { - sdk_err(dev->dev_hdl, "Irq num cfg in fw is zero\n"); - return -EFAULT; - } - irq_info->alloc_info = kcalloc(intr_num, sizeof(*irq_info->alloc_info), - GFP_KERNEL); - if (!irq_info->alloc_info) - return -ENOMEM; - - irq_info->num_irq_hw = intr_num; - - /* Production requires VF only surppots MSI-X */ - if (SPHW_FUNC_TYPE(dev) == TYPE_VF) - cfg_mgmt->svc_cap.interrupt_type = INTR_TYPE_MSIX; - else - cfg_mgmt->svc_cap.interrupt_type = intr_mode; - - mutex_init(&irq_info->irq_mutex); - return 0; -} - -static int cfg_enable_interrupt(struct sphw_hwdev *dev) -{ - struct cfg_mgmt_info *cfg_mgmt = dev->cfg_mgmt; - u16 nreq = cfg_mgmt->irq_param_info.num_irq_hw; - - void *pcidev = dev->pcidev_hdl; - struct irq_alloc_info_st *irq_info = NULL; - struct msix_entry *entry = NULL; - u16 i = 0; - int actual_irq; - - irq_info = cfg_mgmt->irq_param_info.alloc_info; - - sdk_info(dev->dev_hdl, "Interrupt type: %u, irq num: %u.\n", - cfg_mgmt->svc_cap.interrupt_type, nreq); - - switch (cfg_mgmt->svc_cap.interrupt_type) { - case INTR_TYPE_MSIX: - if (!nreq) { - sdk_err(dev->dev_hdl, "Interrupt number cannot be zero\n"); - return -EINVAL; - } - entry = kcalloc(nreq, sizeof(*entry), GFP_KERNEL); - if (!entry) - return -ENOMEM; - - for (i = 0; i < nreq; i++) - entry[i].entry = i; - - actual_irq = pci_enable_msix_range(pcidev, entry, - VECTOR_THRESHOLD, nreq); - if (actual_irq < 0) { - sdk_err(dev->dev_hdl, "Alloc msix entries with threshold 2 failed. actual_irq: %d\n", - actual_irq); - kfree(entry); - return -ENOMEM; - } - - nreq = (u16)actual_irq; - cfg_mgmt->irq_param_info.num_total = nreq; - cfg_mgmt->irq_param_info.num_irq_remain = nreq; - sdk_info(dev->dev_hdl, "Request %u msix vector success.\n", - nreq); - - for (i = 0; i < nreq; ++i) { - /* u16 driver uses to specify entry, OS writes */ - irq_info[i].info.msix_entry_idx = entry[i].entry; - /* u32 kernel uses to write allocated vector */ - irq_info[i].info.irq_id = entry[i].vector; - irq_info[i].type = SERVICE_T_MAX; - irq_info[i].free = CFG_FREE; - } - - kfree(entry); - - break; - - default: - sdk_err(dev->dev_hdl, "Unsupport interrupt type %d\n", - cfg_mgmt->svc_cap.interrupt_type); - break; - } - - return 0; -} - -int sphw_alloc_irqs(void *hwdev, enum sphw_service_type type, u16 num, - struct irq_info *irq_info_array, u16 *act_num) -{ - struct sphw_hwdev *dev = hwdev; - struct cfg_mgmt_info *cfg_mgmt = NULL; - struct cfg_irq_info *irq_info = NULL; - struct irq_alloc_info_st *alloc_info = NULL; - int max_num_irq; - u16 free_num_irq; - int i, j; - - if (!hwdev || !irq_info_array || !act_num) - return -EINVAL; - - cfg_mgmt = dev->cfg_mgmt; - irq_info = &cfg_mgmt->irq_param_info; - alloc_info = irq_info->alloc_info; - max_num_irq = irq_info->num_total; - free_num_irq = irq_info->num_irq_remain; - - mutex_lock(&irq_info->irq_mutex); - - if (num > free_num_irq) { - if (free_num_irq == 0) { - sdk_err(dev->dev_hdl, - "no free irq resource in cfg mgmt.\n"); - mutex_unlock(&irq_info->irq_mutex); - return -ENOMEM; - } - - sdk_warn(dev->dev_hdl, "only %u irq resource in cfg mgmt.\n", - free_num_irq); - num = free_num_irq; - } - - *act_num = 0; - - for (i = 0; i < num; i++) { - for (j = 0; j < max_num_irq; j++) { - if (alloc_info[j].free == CFG_FREE) { - if (irq_info->num_irq_remain == 0) { - sdk_err(dev->dev_hdl, "No free irq resource in cfg mgmt\n"); - mutex_unlock(&irq_info->irq_mutex); - return -EINVAL; - } - alloc_info[j].type = type; - alloc_info[j].free = CFG_BUSY; - - irq_info_array[i].msix_entry_idx = - alloc_info[j].info.msix_entry_idx; - irq_info_array[i].irq_id = - alloc_info[j].info.irq_id; - (*act_num)++; - irq_info->num_irq_remain--; - - break; - } - } - } - - mutex_unlock(&irq_info->irq_mutex); - return 0; -} - -void sphw_free_irq(void *hwdev, enum sphw_service_type type, u32 irq_id) -{ - struct sphw_hwdev *dev = hwdev; - struct cfg_mgmt_info *cfg_mgmt = NULL; - struct cfg_irq_info *irq_info = NULL; - struct irq_alloc_info_st *alloc_info = NULL; - int max_num_irq; - int i; - - if (!hwdev) - return; - - cfg_mgmt = dev->cfg_mgmt; - irq_info = &cfg_mgmt->irq_param_info; - alloc_info = irq_info->alloc_info; - max_num_irq = irq_info->num_total; - - mutex_lock(&irq_info->irq_mutex); - - for (i = 0; i < max_num_irq; i++) { - if (irq_id == alloc_info[i].info.irq_id && - type == alloc_info[i].type) { - if (alloc_info[i].free == CFG_BUSY) { - alloc_info[i].free = CFG_FREE; - irq_info->num_irq_remain++; - if (irq_info->num_irq_remain > max_num_irq) { - sdk_err(dev->dev_hdl, "Find target,but over range\n"); - mutex_unlock(&irq_info->irq_mutex); - return; - } - break; - } - } - } - - if (i >= max_num_irq) - sdk_warn(dev->dev_hdl, "Irq %u don`t need to free\n", irq_id); - - mutex_unlock(&irq_info->irq_mutex); -} - -int sphw_vector_to_irq(void *hwdev, enum sphw_service_type type, int vector) -{ - struct sphw_hwdev *dev = hwdev; - struct cfg_mgmt_info *cfg_mgmt = NULL; - struct irq_alloc_info_st *irq_info = NULL; - int irq = -EINVAL; - - if (!hwdev) - return -EINVAL; - - cfg_mgmt = dev->cfg_mgmt; - if (type != SERVICE_T_ROCE) { - sdk_err(dev->dev_hdl, - "Service type: %u, only RDMA service could get eqn by vector\n", - type); - return -EINVAL; - } - - /* Current RDMA CEQ are 2 - 31, will change in the future */ - vector = ((vector % cfg_mgmt->eq_info.num_ceq) + CFG_RDMA_CEQ_BASE); - - irq_info = cfg_mgmt->irq_param_info.alloc_info; - if (irq_info[vector].type == SERVICE_T_ROCE) - if (irq_info[vector].free == CFG_BUSY) - irq = (int)irq_info[vector].info.irq_id; - - return irq; -} - -int sphw_alloc_ceqs(void *hwdev, enum sphw_service_type type, int num, int *ceq_id_array, - int *act_num) -{ - struct sphw_hwdev *dev = hwdev; - struct cfg_mgmt_info *cfg_mgmt = NULL; - struct cfg_eq_info *eq = NULL; - int free_ceq; - int i, j; - - if (!hwdev || !ceq_id_array || !act_num) - return -EINVAL; - - cfg_mgmt = dev->cfg_mgmt; - eq = &cfg_mgmt->eq_info; - free_ceq = eq->num_ceq_remain; - - mutex_lock(&eq->eq_mutex); - - if (num > free_ceq) { - if (free_ceq <= 0) { - sdk_err(dev->dev_hdl, "No free ceq resource in cfg mgmt\n"); - mutex_unlock(&eq->eq_mutex); - return -ENOMEM; - } - - sdk_warn(dev->dev_hdl, "Only %d ceq resource in cfg mgmt\n", - free_ceq); - } - - *act_num = 0; - - num = min(num, eq->num_ceq - CFG_RDMA_CEQ_BASE); - for (i = 0; i < num; i++) { - if (eq->num_ceq_remain == 0) { - sdk_warn(dev->dev_hdl, "Alloc %d ceqs, less than required %d ceqs\n", - *act_num, num); - mutex_unlock(&eq->eq_mutex); - return 0; - } - - for (j = CFG_RDMA_CEQ_BASE; j < eq->num_ceq; j++) { - if (eq->eq[j].free == CFG_FREE) { - eq->eq[j].type = type; - eq->eq[j].free = CFG_BUSY; - eq->num_ceq_remain--; - ceq_id_array[i] = eq->eq[j].eqn; - (*act_num)++; - break; - } - } - } - - mutex_unlock(&eq->eq_mutex); - return 0; -} - -void sphw_free_ceq(void *hwdev, enum sphw_service_type type, int ceq_id) -{ - struct sphw_hwdev *dev = hwdev; - struct cfg_mgmt_info *cfg_mgmt = NULL; - struct cfg_eq_info *eq = NULL; - u8 num_ceq; - u8 i = 0; - - if (!hwdev) - return; - - cfg_mgmt = dev->cfg_mgmt; - eq = &cfg_mgmt->eq_info; - num_ceq = eq->num_ceq; - - mutex_lock(&eq->eq_mutex); - - for (i = 0; i < num_ceq; i++) { - if (ceq_id == eq->eq[i].eqn && - type == cfg_mgmt->eq_info.eq[i].type) { - if (eq->eq[i].free == CFG_BUSY) { - eq->eq[i].free = CFG_FREE; - eq->num_ceq_remain++; - if (eq->num_ceq_remain > num_ceq) - eq->num_ceq_remain %= num_ceq; - - mutex_unlock(&eq->eq_mutex); - return; - } - } - } - - if (i >= num_ceq) - sdk_warn(dev->dev_hdl, "ceq %d don`t need to free.\n", ceq_id); - - mutex_unlock(&eq->eq_mutex); -} - -int init_cfg_mgmt(struct sphw_hwdev *dev) -{ - int err; - struct cfg_mgmt_info *cfg_mgmt; - - cfg_mgmt = kzalloc(sizeof(*cfg_mgmt), GFP_KERNEL); - if (!cfg_mgmt) - return -ENOMEM; - - dev->cfg_mgmt = cfg_mgmt; - cfg_mgmt->hwdev = dev; - - err = cfg_init_eq(dev); - if (err) { - sdk_err(dev->dev_hdl, "Failed to init cfg event queue, err: %d\n", - err); - goto free_mgmt_mem; - } - - err = cfg_init_interrupt(dev); - if (err) { - sdk_err(dev->dev_hdl, "Failed to init cfg interrupt, err: %d\n", - err); - goto free_eq_mem; - } - - err = cfg_enable_interrupt(dev); - if (err) { - sdk_err(dev->dev_hdl, "Failed to enable cfg interrupt, err: %d\n", - err); - goto free_interrupt_mem; - } - - return 0; - -free_interrupt_mem: - kfree(cfg_mgmt->irq_param_info.alloc_info); - cfg_mgmt->irq_param_info.alloc_info = NULL; - -free_eq_mem: - kfree(cfg_mgmt->eq_info.eq); - cfg_mgmt->eq_info.eq = NULL; - -free_mgmt_mem: - kfree(cfg_mgmt); - return err; -} - -void free_cfg_mgmt(struct sphw_hwdev *dev) -{ - struct cfg_mgmt_info *cfg_mgmt = dev->cfg_mgmt; - - /* if the allocated resource were recycled */ - if (cfg_mgmt->irq_param_info.num_irq_remain != - cfg_mgmt->irq_param_info.num_total || - cfg_mgmt->eq_info.num_ceq_remain != cfg_mgmt->eq_info.num_ceq) - sdk_err(dev->dev_hdl, "Can't reclaim all irq and event queue, please check\n"); - - switch (cfg_mgmt->svc_cap.interrupt_type) { - case INTR_TYPE_MSIX: - pci_disable_msix(dev->pcidev_hdl); - break; - - case INTR_TYPE_MSI: - pci_disable_msi(dev->pcidev_hdl); - break; - - case INTR_TYPE_INT: - default: - break; - } - - kfree(cfg_mgmt->irq_param_info.alloc_info); - cfg_mgmt->irq_param_info.alloc_info = NULL; - - kfree(cfg_mgmt->eq_info.eq); - cfg_mgmt->eq_info.eq = NULL; - - kfree(cfg_mgmt); -} - -int init_capability(struct sphw_hwdev *dev) -{ - int err; - struct cfg_mgmt_info *cfg_mgmt = dev->cfg_mgmt; - - set_cfg_test_param(cfg_mgmt); - - cfg_mgmt->svc_cap.sf_svc_attr.ft_pf_en = false; - cfg_mgmt->svc_cap.sf_svc_attr.rdma_pf_en = false; - - err = sphw_get_dev_cap(dev); - if (err) - return err; - - init_service_param(dev); - - sdk_info(dev->dev_hdl, "Init capability success\n"); - return 0; -} - -void free_capability(struct sphw_hwdev *dev) -{ - sdk_info(dev->dev_hdl, "Free capability success"); -} - -bool sphw_support_nic(void *hwdev, struct nic_service_cap *cap) -{ - struct sphw_hwdev *dev = hwdev; - - if (!hwdev) - return false; - - if (!IS_NIC_TYPE(dev)) - return false; - - if (cap) - memcpy(cap, &dev->cfg_mgmt->svc_cap.nic_cap, sizeof(*cap)); - - return true; -} - -bool sphw_support_ipsec(void *hwdev, struct ipsec_service_cap *cap) -{ - struct sphw_hwdev *dev = hwdev; - - if (!hwdev) - return false; - - if (!IS_IPSEC_TYPE(dev)) - return false; - - if (cap) - memcpy(cap, &dev->cfg_mgmt->svc_cap.ipsec_cap, sizeof(*cap)); - - return true; -} - -bool sphw_support_roce(void *hwdev, struct rdma_service_cap *cap) -{ - struct sphw_hwdev *dev = hwdev; - - if (!hwdev) - return false; - - if (!IS_ROCE_TYPE(dev)) - return false; - - if (cap) - memcpy(cap, &dev->cfg_mgmt->svc_cap.rdma_cap, sizeof(*cap)); - - return true; -} - -bool sphw_support_fc(void *hwdev, struct fc_service_cap *cap) -{ - struct sphw_hwdev *dev = hwdev; - - if (!hwdev) - return false; - - if (!IS_FC_TYPE(dev)) - return false; - - if (cap) - memcpy(cap, &dev->cfg_mgmt->svc_cap.fc_cap, sizeof(*cap)); - - return true; -} - -bool sphw_support_rdma(void *hwdev, struct rdma_service_cap *cap) -{ - struct sphw_hwdev *dev = hwdev; - - if (!hwdev) - return false; - - if (!IS_RDMA_TYPE(dev)) - return false; - - if (cap) - memcpy(cap, &dev->cfg_mgmt->svc_cap.rdma_cap, sizeof(*cap)); - - return true; -} - -bool sphw_support_ovs(void *hwdev, struct ovs_service_cap *cap) -{ - struct sphw_hwdev *dev = hwdev; - - if (!hwdev) - return false; - - if (!IS_OVS_TYPE(dev)) - return false; - - if (cap) - memcpy(cap, &dev->cfg_mgmt->svc_cap.ovs_cap, sizeof(*cap)); - - return true; -} - -/* Only PPF support it, PF is not */ -bool sphw_support_toe(void *hwdev, struct toe_service_cap *cap) -{ - struct sphw_hwdev *dev = hwdev; - - if (!hwdev) - return false; - - if (!IS_TOE_TYPE(dev)) - return false; - - if (cap) - memcpy(cap, &dev->cfg_mgmt->svc_cap.toe_cap, sizeof(*cap)); - - return true; -} - -bool sphw_func_for_mgmt(void *hwdev) -{ - struct sphw_hwdev *dev = hwdev; - - if (!hwdev) - return false; - - if (dev->cfg_mgmt->svc_cap.chip_svc_type >= CFG_SVC_NIC_BIT0) - return false; - else - return true; -} - -bool sphw_get_stateful_enable(void *hwdev) -{ - struct sphw_hwdev *dev = hwdev; - - if (!hwdev) - return false; - - return dev->cfg_mgmt->svc_cap.sf_en; -} - -u8 sphw_host_oq_id_mask(void *hwdev) -{ - struct sphw_hwdev *dev = hwdev; - - if (!dev) { - pr_err("Hwdev pointer is NULL for getting host oq id mask\n"); - return 0; - } - return dev->cfg_mgmt->svc_cap.host_oq_id_mask_val; -} - -u8 sphw_host_id(void *hwdev) -{ - struct sphw_hwdev *dev = hwdev; - - if (!dev) { - pr_err("Hwdev pointer is NULL for getting host id\n"); - return 0; - } - return dev->cfg_mgmt->svc_cap.host_id; -} - -u16 sphw_host_total_func(void *hwdev) -{ - struct sphw_hwdev *dev = hwdev; - - if (!dev) { - pr_err("Hwdev pointer is NULL for getting host total function number\n"); - return 0; - } - return dev->cfg_mgmt->svc_cap.host_total_function; -} - -u16 sphw_func_max_qnum(void *hwdev) -{ - struct sphw_hwdev *dev = hwdev; - - if (!dev) { - pr_err("Hwdev pointer is NULL for getting function max queue number\n"); - return 0; - } - return dev->cfg_mgmt->svc_cap.nic_cap.max_sqs; -} - -u16 sphw_func_max_nic_qnum(void *hwdev) -{ - struct sphw_hwdev *dev = hwdev; - - if (!dev) { - pr_err("Hwdev pointer is NULL for getting function max queue number\n"); - return 0; - } - return dev->cfg_mgmt->svc_cap.nic_cap.max_sqs; -} - -u8 sphw_ep_id(void *hwdev) -{ - struct sphw_hwdev *dev = hwdev; - - if (!dev) { - pr_err("Hwdev pointer is NULL for getting ep id\n"); - return 0; - } - return dev->cfg_mgmt->svc_cap.ep_id; -} - -u8 sphw_er_id(void *hwdev) -{ - struct sphw_hwdev *dev = hwdev; - - if (!dev) { - pr_err("Hwdev pointer is NULL for getting er id\n"); - return 0; - } - return dev->cfg_mgmt->svc_cap.er_id; -} - -u8 sphw_physical_port_id(void *hwdev) -{ - struct sphw_hwdev *dev = hwdev; - - if (!dev) { - pr_err("Hwdev pointer is NULL for getting physical port id\n"); - return 0; - } - return dev->cfg_mgmt->svc_cap.port_id; -} - -u16 sphw_func_max_vf(void *hwdev) -{ - struct sphw_hwdev *dev = hwdev; - - if (!dev) { - pr_err("Hwdev pointer is NULL for getting max vf number\n"); - return 0; - } - return dev->cfg_mgmt->svc_cap.max_vf; -} - -u8 sphw_cos_valid_bitmap(void *hwdev) -{ - struct sphw_hwdev *dev = hwdev; - - if (!dev) { - pr_err("Hwdev pointer is NULL for getting cos valid bitmap\n"); - return 0; - } - return (u8)(dev->cfg_mgmt->svc_cap.cos_valid_bitmap); -} - -void sphw_shutdown_hwdev(void *hwdev) -{ - /* to do : if IS_SLAVE_HOST*/ -} - -u32 sphw_host_pf_num(void *hwdev) -{ - struct sphw_hwdev *dev = hwdev; - - if (!dev) { - pr_err("Hwdev pointer is NULL for getting pf number capability\n"); - return 0; - } - - return dev->cfg_mgmt->svc_cap.pf_num; -} - -u8 sphw_flexq_en(void *hwdev) -{ - struct sphw_hwdev *dev = hwdev; - - if (!hwdev) - return 0; - - return dev->cfg_mgmt->svc_cap.flexq_en; -} diff --git a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_hw_cfg.h b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_hw_cfg.h deleted file mode 100644 index 1b48e0991563eb09af035c3a55e385ed60cf323a..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_hw_cfg.h +++ /dev/null @@ -1,329 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPHW_HW_CFG_H -#define SPHW_HW_CFG_H - -#include - -#define CFG_MAX_CMD_TIMEOUT 30000 /* ms */ - -enum { - CFG_FREE = 0, - CFG_BUSY = 1 -}; - -/* start position for CEQs allocation, Max number of CEQs is 32 */ -enum { - CFG_RDMA_CEQ_BASE = 0 -}; - -enum { - SF_SVC_FT_BIT = (1 << 0), - SF_SVC_RDMA_BIT = (1 << 1), -}; - -/* RDMA resource */ -#define K_UNIT BIT(10) -#define M_UNIT BIT(20) -#define G_UNIT BIT(30) - -/* L2NIC */ -#define SPHW_CFG_MAX_QP 256 - -/* RDMA */ -#define RDMA_RSVD_QPS 2 -#define ROCE_MAX_WQES (32 * K_UNIT - 1) -#define IWARP_MAX_WQES (8 * K_UNIT) - -#define RDMA_MAX_SQ_SGE 32 - -#define ROCE_MAX_RQ_SGE 16 - -#define RDMA_MAX_SQ_DESC_SZ (1 * K_UNIT) - -/* (256B(cache_line_len) - 16B(ctrl_seg_len) - 64B(max_task_seg_len)) */ -#define ROCE_MAX_SQ_INLINE_DATA_SZ 912 - -#define ROCE_MAX_RQ_DESC_SZ 256 - -#define ROCE_QPC_ENTRY_SZ 512 - -#define WQEBB_SZ 64 - -#define ROCE_RDMARC_ENTRY_SZ 32 -#define ROCE_MAX_QP_INIT_RDMA 128 -#define ROCE_MAX_QP_DEST_RDMA 128 - -#define ROCE_MAX_SRQ_WQES (16 * K_UNIT - 1) -#define ROCE_RSVD_SRQS 0 -#define ROCE_MAX_SRQ_SGE 15 -#define ROCE_SRQC_ENTERY_SZ 64 - -#define RDMA_MAX_CQES (8 * M_UNIT - 1) -#define RDMA_RSVD_CQS 0 - -#define RDMA_CQC_ENTRY_SZ 128 - -#define RDMA_CQE_SZ 64 -#define RDMA_RSVD_MRWS 128 -#define RDMA_MPT_ENTRY_SZ 64 -#define RDMA_NUM_MTTS (1 * G_UNIT) -#define LOG_MTT_SEG 5 -#define MTT_ENTRY_SZ 8 -#define LOG_RDMARC_SEG 3 - -#define LOCAL_ACK_DELAY 15 -#define RDMA_NUM_PORTS 1 -#define ROCE_MAX_MSG_SZ (2 * G_UNIT) - -#define DB_PAGE_SZ (4 * K_UNIT) -#define DWQE_SZ 256 - -#define NUM_PD (128 * K_UNIT) -#define RSVD_PD 0 - -#define MAX_XRCDS (64 * K_UNIT) -#define RSVD_XRCDS 0 - -#define MAX_GID_PER_PORT 128 -#define GID_ENTRY_SZ 32 -#define RSVD_LKEY ((RDMA_RSVD_MRWS - 1) << 8) -#define NUM_COMP_VECTORS 32 -#define PAGE_SZ_CAP ((1UL << 12) | (1UL << 16) | (1UL << 21)) -#define ROCE_MODE 1 - -#define MAX_FRPL_LEN 511 -#define MAX_PKEYS 1 - -/* ToE */ -#define TOE_PCTX_SZ 1024 -#define TOE_CQC_SZ 64 - -/* IoE */ -#define IOE_PCTX_SZ 512 - -/* FC */ -#define FC_PCTX_SZ 256 -#define FC_CCTX_SZ 256 -#define FC_SQE_SZ 128 -#define FC_SCQC_SZ 64 -#define FC_SCQE_SZ 64 -#define FC_SRQC_SZ 64 -#define FC_SRQE_SZ 32 - -/* OVS */ -#define OVS_PCTX_SZ 512 - -/* IPsec */ -#define IPSEC_SACTX_SZ 512 - -struct dev_sf_svc_attr { - bool ft_en; /* business enable flag (not include RDMA) */ - bool ft_pf_en; /* In FPGA Test VF resource is in PF or not, - * 0 - VF, 1 - PF, VF doesn't need this bit. - */ - bool rdma_en; - bool rdma_pf_en;/* In FPGA Test VF RDMA resource is in PF or not, - * 0 - VF, 1 - PF, VF doesn't need this bit. - */ -}; - -struct host_shared_resource_cap { - u32 host_pctxs; /* Parent Context max 1M, IOE and FCoE max 8K flows */ - u32 host_cctxs; /* Child Context: max 8K */ - u32 host_scqs; /* shared CQ, chip interface module uses 1 SCQ - * TOE/IOE/FCoE each uses 1 SCQ - * RoCE/IWARP uses multiple SCQs - * So 6 SCQ least - */ - u32 host_srqs; /* SRQ number: 256K */ - u32 host_mpts; /* MR number:1M */ -}; - -enum intr_type { - INTR_TYPE_MSIX, - INTR_TYPE_MSI, - INTR_TYPE_INT, - INTR_TYPE_NONE, - /* PXE,OVS need single thread processing, - * synchronization messages must use poll wait mechanism interface - */ -}; - -/* service type relates define */ -enum cfg_svc_type_en { - CFG_SVC_NIC_BIT0 = (1 << 0), - CFG_SVC_ROCE_BIT1 = (1 << 1), - CFG_SVC_VBS_BIT2 = (1 << 2), - CFG_SVC_TOE_BIT3 = (1 << 3), - CFG_SVC_IPSEC_BIT4 = (1 << 4), - CFG_SVC_FC_BIT5 = (1 << 5), - CFG_SVC_VIRTIO_BIT6 = (1 << 6), - CFG_SVC_OVS_BIT7 = (1 << 7), - CFG_SVC_RSV2_BIT8 = (1 << 8), - CFG_SVC_IOE_BIT9 = (1 << 9), - - CFG_SVC_FT_EN = (CFG_SVC_VBS_BIT2 | CFG_SVC_TOE_BIT3 | - CFG_SVC_IPSEC_BIT4 | CFG_SVC_FC_BIT5 | - CFG_SVC_VIRTIO_BIT6 | CFG_SVC_OVS_BIT7 | - CFG_SVC_IOE_BIT9), - CFG_SVC_RDMA_EN = CFG_SVC_ROCE_BIT1 -}; - -/* device capability */ -struct service_cap { - struct dev_sf_svc_attr sf_svc_attr; - enum cfg_svc_type_en svc_type; /* user input service type */ - enum cfg_svc_type_en chip_svc_type; /* HW supported service type */ - - u8 host_id; - u8 ep_id; - u8 er_id; /* PF/VF's ER */ - u8 port_id; /* PF/VF's physical port */ - - /* Host global resources */ - u16 host_total_function; - u8 pf_num; - u8 pf_id_start; - u16 vf_num; /* max numbers of vf in current host */ - u16 vf_id_start; - u8 host_oq_id_mask_val; - - u8 flexq_en; - u8 cos_valid_bitmap; - u16 max_vf; /* max VF number that PF supported */ - - /* DO NOT get interrupt_type from firmware */ - enum intr_type interrupt_type; - - bool sf_en; /* stateful business status */ - u8 timer_en; /* 0:disable, 1:enable */ - u8 bloomfilter_en; /* 0:disable, 1:enable*/ - - u8 lb_mode; - u8 smf_pg; - - /* For test */ - u32 test_mode; - u32 test_qpc_num; - u32 test_qpc_resvd_num; - u32 test_page_size_reorder; - bool test_xid_alloc_mode; - bool test_gpa_check_enable; - u8 test_qpc_alloc_mode; - u8 test_scqc_alloc_mode; - - u32 test_max_conn_num; - u32 test_max_cache_conn_num; - u32 test_scqc_num; - u32 test_mpt_num; - u32 test_scq_resvd_num; - u32 test_mpt_recvd_num; - u32 test_hash_num; - u32 test_reorder_num; - - u32 max_connect_num; /* PF/VF maximum connection number(1M) */ - /* The maximum connections which can be stick to cache memory, max 1K */ - u16 max_stick2cache_num; - /* Starting address in cache memory for bloom filter, 64Bytes aligned */ - u16 bfilter_start_addr; - /* Length for bloom filter, aligned on 64Bytes. The size is length*64B. - * Bloom filter memory size + 1 must be power of 2. - * The maximum memory size of bloom filter is 4M - */ - u16 bfilter_len; - /* The size of hash bucket tables, align on 64 entries. - * Be used to AND (&) the hash value. Bucket Size +1 must be power of 2. - * The maximum number of hash bucket is 4M - */ - u16 hash_bucket_num; - - struct host_shared_resource_cap shared_res_cap; /* shared capability */ - struct nic_service_cap nic_cap; /* NIC capability */ - struct rdma_service_cap rdma_cap; /* RDMA capability */ - struct fc_service_cap fc_cap; /* FC capability */ - struct toe_service_cap toe_cap; /* ToE capability */ - struct ovs_service_cap ovs_cap; /* OVS capability */ - struct ipsec_service_cap ipsec_cap; /* IPsec capability */ -}; - -struct cfg_eq { - enum sphw_service_type type; - int eqn; - int free; /* 1 - alocated, 0- freed */ -}; - -struct cfg_eq_info { - struct cfg_eq *eq; - - u8 num_ceq; - - u8 num_ceq_remain; - - /* mutex used for allocate EQs */ - struct mutex eq_mutex; -}; - -struct irq_alloc_info_st { - enum sphw_service_type type; - int free; /* 1 - alocated, 0- freed */ - struct irq_info info; -}; - -struct cfg_irq_info { - struct irq_alloc_info_st *alloc_info; - u16 num_total; - u16 num_irq_remain; - u16 num_irq_hw; /* device max irq number */ - - /* mutex used for allocate EQs */ - struct mutex irq_mutex; -}; - -#define VECTOR_THRESHOLD 2 - -struct cfg_mgmt_info { - struct sphw_hwdev *hwdev; - struct service_cap svc_cap; - struct cfg_eq_info eq_info; /* EQ */ - struct cfg_irq_info irq_param_info; /* IRQ */ - u32 func_seq_num; /* temporary */ -}; - -#define IS_NIC_TYPE(dev) \ - (((u32)(dev)->cfg_mgmt->svc_cap.chip_svc_type) & CFG_SVC_NIC_BIT0) -#define IS_ROCE_TYPE(dev) \ - (((u32)(dev)->cfg_mgmt->svc_cap.chip_svc_type) & CFG_SVC_ROCE_BIT1) -#define IS_FCOE_TYPE(dev) \ - (((u32)(dev)->cfg_mgmt->svc_cap.chip_svc_type) & CFG_SVC_FCOE_BIT2) -#define IS_TOE_TYPE(dev) \ - (((u32)(dev)->cfg_mgmt->svc_cap.chip_svc_type) & CFG_SVC_TOE_BIT3) -#define IS_IPSEC_TYPE(dev) \ - (((u32)(dev)->cfg_mgmt->svc_cap.chip_svc_type) & CFG_SVC_IPSEC_BIT4) -#define IS_FC_TYPE(dev) \ - (((u32)(dev)->cfg_mgmt->svc_cap.chip_svc_type) & CFG_SVC_FC_BIT5) -#define IS_FIC_TYPE(dev) \ - (((u32)(dev)->cfg_mgmt->svc_cap.chip_svc_type) & CFG_SVC_FIC_BIT6) -#define IS_OVS_TYPE(dev) \ - (((u32)(dev)->cfg_mgmt->svc_cap.chip_svc_type) & CFG_SVC_OVS_BIT7) -#define IS_ACL_TYPE(dev) \ - (((u32)(dev)->cfg_mgmt->svc_cap.chip_svc_type) & CFG_SVC_ACL_BIT8) -#define IS_IOE_TYPE(dev) \ - (((u32)(dev)->cfg_mgmt->svc_cap.chip_svc_type) & CFG_SVC_IOE_BIT9) -#define IS_FT_TYPE(dev) \ - (((u32)(dev)->cfg_mgmt->svc_cap.chip_svc_type) & CFG_SVC_FT_EN) -#define IS_RDMA_TYPE(dev) \ - (((u32)(dev)->cfg_mgmt->svc_cap.chip_svc_type) & CFG_SVC_RDMA_EN) -#define IS_RDMA_ENABLE(dev) \ - ((dev)->cfg_mgmt->svc_cap.sf_svc_attr.rdma_en) - -int init_cfg_mgmt(struct sphw_hwdev *dev); - -void free_cfg_mgmt(struct sphw_hwdev *dev); - -int init_capability(struct sphw_hwdev *dev); - -void free_capability(struct sphw_hwdev *dev); - -#endif diff --git a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_hw_comm.c b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_hw_comm.c deleted file mode 100644 index b868bf8ed1cb6eecfb23b2158d97018e112d2fb3..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_hw_comm.c +++ /dev/null @@ -1,1280 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#include -#include -#include -#include -#include -#include -#include -#include - -#include "sphw_crm.h" -#include "sphw_hw.h" -#include "sphw_common.h" -#include "sphw_csr.h" -#include "sphw_hwdev.h" -#include "sphw_hwif.h" -#include "sphw_wq.h" -#include "sphw_cmdq.h" -#include "sphw_comm_msg_intf.h" -#include "sphw_hw_comm.h" - -#define SPHW_MSIX_CNT_LLI_TIMER_SHIFT 0 -#define SPHW_MSIX_CNT_LLI_CREDIT_SHIFT 8 -#define SPHW_MSIX_CNT_COALESC_TIMER_SHIFT 8 -#define SPHW_MSIX_CNT_PENDING_SHIFT 8 -#define SPHW_MSIX_CNT_RESEND_TIMER_SHIFT 29 - -#define SPHW_MSIX_CNT_LLI_TIMER_MASK 0xFFU -#define SPHW_MSIX_CNT_LLI_CREDIT_MASK 0xFFU -#define SPHW_MSIX_CNT_COALESC_TIMER_MASK 0xFFU -#define SPHW_MSIX_CNT_PENDING_MASK 0x1FU -#define SPHW_MSIX_CNT_RESEND_TIMER_MASK 0x7U - -#define SPHW_MSIX_CNT_SET(val, member) \ - (((val) & SPHW_MSIX_CNT_##member##_MASK) << \ - SPHW_MSIX_CNT_##member##_SHIFT) - -#define DEFAULT_RX_BUF_SIZE ((u16)0xB) - -enum sphw_rx_buf_size { - SPHW_RX_BUF_SIZE_32B = 0x20, - SPHW_RX_BUF_SIZE_64B = 0x40, - SPHW_RX_BUF_SIZE_96B = 0x60, - SPHW_RX_BUF_SIZE_128B = 0x80, - SPHW_RX_BUF_SIZE_192B = 0xC0, - SPHW_RX_BUF_SIZE_256B = 0x100, - SPHW_RX_BUF_SIZE_384B = 0x180, - SPHW_RX_BUF_SIZE_512B = 0x200, - SPHW_RX_BUF_SIZE_768B = 0x300, - SPHW_RX_BUF_SIZE_1K = 0x400, - SPHW_RX_BUF_SIZE_1_5K = 0x600, - SPHW_RX_BUF_SIZE_2K = 0x800, - SPHW_RX_BUF_SIZE_3K = 0xC00, - SPHW_RX_BUF_SIZE_4K = 0x1000, - SPHW_RX_BUF_SIZE_8K = 0x2000, - SPHW_RX_BUF_SIZE_16K = 0x4000, -}; - -const int sphw_hw_rx_buf_size[] = { - SPHW_RX_BUF_SIZE_32B, - SPHW_RX_BUF_SIZE_64B, - SPHW_RX_BUF_SIZE_96B, - SPHW_RX_BUF_SIZE_128B, - SPHW_RX_BUF_SIZE_192B, - SPHW_RX_BUF_SIZE_256B, - SPHW_RX_BUF_SIZE_384B, - SPHW_RX_BUF_SIZE_512B, - SPHW_RX_BUF_SIZE_768B, - SPHW_RX_BUF_SIZE_1K, - SPHW_RX_BUF_SIZE_1_5K, - SPHW_RX_BUF_SIZE_2K, - SPHW_RX_BUF_SIZE_3K, - SPHW_RX_BUF_SIZE_4K, - SPHW_RX_BUF_SIZE_8K, - SPHW_RX_BUF_SIZE_16K, -}; - -static inline int comm_msg_to_mgmt_sync(void *hwdev, u16 cmd, void *buf_in, u16 in_size, - void *buf_out, u16 *out_size) -{ - return sphw_msg_to_mgmt_sync(hwdev, SPHW_MOD_COMM, cmd, buf_in, in_size, buf_out, - out_size, 0, SPHW_CHANNEL_COMM); -} - -static inline int comm_msg_to_mgmt_sync_ch(void *hwdev, u16 cmd, void *buf_in, u16 in_size, - void *buf_out, u16 *out_size, u16 channel) -{ - return sphw_msg_to_mgmt_sync(hwdev, SPHW_MOD_COMM, cmd, buf_in, - in_size, buf_out, out_size, 0, channel); -} - -int sphw_get_interrupt_cfg(void *dev, struct interrupt_info *info, u16 channel) -{ - struct sphw_hwdev *hwdev = dev; - struct comm_cmd_msix_config msix_cfg; - u16 out_size = sizeof(msix_cfg); - int err; - - if (!hwdev || !info) - return -EINVAL; - - memset(&msix_cfg, 0, sizeof(msix_cfg)); - msix_cfg.func_id = sphw_global_func_id(hwdev); - msix_cfg.msix_index = info->msix_index; - msix_cfg.opcode = MGMT_MSG_CMD_OP_GET; - - err = comm_msg_to_mgmt_sync_ch(hwdev, COMM_MGMT_CMD_CFG_MSIX_CTRL_REG, - &msix_cfg, sizeof(msix_cfg), &msix_cfg, - &out_size, channel); - if (err || !out_size || msix_cfg.head.status) { - sdk_err(hwdev->dev_hdl, "Failed to get interrupt config, err: %d, status: 0x%x, out size: 0x%x, channel: 0x%x\n", - err, msix_cfg.head.status, out_size, channel); - return -EINVAL; - } - - info->lli_credit_limit = msix_cfg.lli_credit_cnt; - info->lli_timer_cfg = msix_cfg.lli_timer_cnt; - info->pending_limt = msix_cfg.pending_cnt; - info->coalesc_timer_cfg = msix_cfg.coalesce_timer_cnt; - info->resend_timer_cfg = msix_cfg.resend_timer_cnt; - - return 0; -} - -int sphw_set_interrupt_cfg_direct(void *hwdev, struct interrupt_info *info, u16 channel) -{ - struct comm_cmd_msix_config msix_cfg; - u16 out_size = sizeof(msix_cfg); - int err; - - if (!hwdev) - return -EINVAL; - - memset(&msix_cfg, 0, sizeof(msix_cfg)); - msix_cfg.func_id = sphw_global_func_id(hwdev); - msix_cfg.msix_index = (u16)info->msix_index; - msix_cfg.opcode = MGMT_MSG_CMD_OP_SET; - - msix_cfg.lli_credit_cnt = info->lli_credit_limit; - msix_cfg.lli_timer_cnt = info->lli_timer_cfg; - msix_cfg.pending_cnt = info->pending_limt; - msix_cfg.coalesce_timer_cnt = info->coalesc_timer_cfg; - msix_cfg.resend_timer_cnt = info->resend_timer_cfg; - - err = comm_msg_to_mgmt_sync_ch(hwdev, COMM_MGMT_CMD_CFG_MSIX_CTRL_REG, - &msix_cfg, sizeof(msix_cfg), &msix_cfg, - &out_size, channel); - if (err || !out_size || msix_cfg.head.status) { - sdk_err(((struct sphw_hwdev *)hwdev)->dev_hdl, - "Failed to set interrupt config, err: %d, status: 0x%x, out size: 0x%x, channel: 0x%x\n", - err, msix_cfg.head.status, out_size, channel); - return -EINVAL; - } - - return 0; -} - -int sphw_set_interrupt_cfg(void *dev, struct interrupt_info info, u16 channel) -{ - struct interrupt_info temp_info; - struct sphw_hwdev *hwdev = dev; - int err; - - if (!hwdev) - return -EINVAL; - - temp_info.msix_index = info.msix_index; - - err = sphw_get_interrupt_cfg(hwdev, &temp_info, channel); - if (err) - return -EINVAL; - - if (!info.lli_set) { - info.lli_credit_limit = temp_info.lli_credit_limit; - info.lli_timer_cfg = temp_info.lli_timer_cfg; - } - - if (!info.interrupt_coalesc_set) { - info.pending_limt = temp_info.pending_limt; - info.coalesc_timer_cfg = temp_info.coalesc_timer_cfg; - info.resend_timer_cfg = temp_info.resend_timer_cfg; - } - - return sphw_set_interrupt_cfg_direct(hwdev, &info, channel); -} - -void sphw_misx_intr_clear_resend_bit(void *hwdev, u16 msix_idx, u8 clear_resend_en) -{ - struct sphw_hwif *hwif = NULL; - u32 msix_ctrl = 0, addr; - - if (!hwdev) - return; - - hwif = ((struct sphw_hwdev *)hwdev)->hwif; - - msix_ctrl = SPHW_MSI_CLR_INDIR_SET(msix_idx, SIMPLE_INDIR_IDX) | - SPHW_MSI_CLR_INDIR_SET(clear_resend_en, RESEND_TIMER_CLR); - - addr = SPHW_CSR_FUNC_MSI_CLR_WR_ADDR; - sphw_hwif_write_reg(hwif, addr, msix_ctrl); -} - -int sphw_set_wq_page_size(void *hwdev, u16 func_idx, u32 page_size, u16 channel) -{ - struct comm_cmd_wq_page_size page_size_info; - u16 out_size = sizeof(page_size_info); - int err; - - memset(&page_size_info, 0, sizeof(page_size_info)); - page_size_info.func_id = func_idx; - page_size_info.page_size = SPHW_PAGE_SIZE_HW(page_size); - page_size_info.opcode = MGMT_MSG_CMD_OP_SET; - - err = comm_msg_to_mgmt_sync_ch(hwdev, COMM_MGMT_CMD_CFG_PAGESIZE, - &page_size_info, sizeof(page_size_info), - &page_size_info, &out_size, channel); - if (err || !out_size || page_size_info.head.status) { - sdk_err(((struct sphw_hwdev *)hwdev)->dev_hdl, "Failed to set wq page size, err: %d, status: 0x%x, out_size: 0x%0x, channel: 0x%x\n", - err, page_size_info.head.status, out_size, channel); - return -EFAULT; - } - - return 0; -} - -int sphw_func_reset(void *dev, u16 func_id, u64 reset_flag, u16 channel) -{ - struct comm_cmd_func_reset func_reset; - struct sphw_hwdev *hwdev = dev; - u16 out_size = sizeof(func_reset); - int err = 0; - - if (!dev) { - pr_err("Invalid para: dev is null.\n"); - return -EINVAL; - } - - sdk_info(hwdev->dev_hdl, "Function is reset, flag: 0x%llx, channel:0x%x\n", - reset_flag, channel); - - memset(&func_reset, 0, sizeof(func_reset)); - func_reset.func_id = func_id; - func_reset.reset_flag = reset_flag; - err = comm_msg_to_mgmt_sync_ch(hwdev, COMM_MGMT_CMD_FUNC_RESET, - &func_reset, sizeof(func_reset), - &func_reset, &out_size, channel); - if (err || !out_size || func_reset.head.status) { - sdk_err(hwdev->dev_hdl, "Failed to reset func resources, reset_flag 0x%llx, err: %d, status: 0x%x, out_size: 0x%x\n", - reset_flag, err, func_reset.head.status, out_size); - return -EIO; - } - - return 0; -} - -static u16 get_hw_rx_buf_size(int rx_buf_sz) -{ - u16 num_hw_types = - sizeof(sphw_hw_rx_buf_size) / - sizeof(sphw_hw_rx_buf_size[0]); - u16 i; - - for (i = 0; i < num_hw_types; i++) { - if (sphw_hw_rx_buf_size[i] == rx_buf_sz) - return i; - } - - pr_err("Chip can't support rx buf size of %d\n", rx_buf_sz); - - return DEFAULT_RX_BUF_SIZE; /* default 2K */ -} - -int sphw_set_root_ctxt(void *hwdev, u32 rq_depth, u32 sq_depth, int rx_buf_sz, u16 channel) -{ - struct comm_cmd_root_ctxt root_ctxt; - u16 out_size = sizeof(root_ctxt); - int err; - - if (!hwdev) - return -EINVAL; - - memset(&root_ctxt, 0, sizeof(root_ctxt)); - root_ctxt.func_id = sphw_global_func_id(hwdev); - - root_ctxt.set_cmdq_depth = 0; - root_ctxt.cmdq_depth = 0; - - root_ctxt.lro_en = 1; - - root_ctxt.rq_depth = (u16)ilog2(rq_depth); - root_ctxt.rx_buf_sz = get_hw_rx_buf_size(rx_buf_sz); - root_ctxt.sq_depth = (u16)ilog2(sq_depth); - - err = comm_msg_to_mgmt_sync_ch(hwdev, COMM_MGMT_CMD_SET_VAT, - &root_ctxt, sizeof(root_ctxt), - &root_ctxt, &out_size, channel); - if (err || !out_size || root_ctxt.head.status) { - sdk_err(((struct sphw_hwdev *)hwdev)->dev_hdl, - "Failed to set root context, err: %d, status: 0x%x, out_size: 0x%x, channel: 0x%x\n", - err, root_ctxt.head.status, out_size, channel); - return -EFAULT; - } - - return 0; -} - -int sphw_clean_root_ctxt(void *hwdev, u16 channel) -{ - struct comm_cmd_root_ctxt root_ctxt; - u16 out_size = sizeof(root_ctxt); - int err; - - if (!hwdev) - return -EINVAL; - - memset(&root_ctxt, 0, sizeof(root_ctxt)); - root_ctxt.func_id = sphw_global_func_id(hwdev); - - err = comm_msg_to_mgmt_sync_ch(hwdev, COMM_MGMT_CMD_SET_VAT, - &root_ctxt, sizeof(root_ctxt), - &root_ctxt, &out_size, channel); - if (err || !out_size || root_ctxt.head.status) { - sdk_err(((struct sphw_hwdev *)hwdev)->dev_hdl, - "Failed to set root context, err: %d, status: 0x%x, out_size: 0x%x, channel: 0x%x\n", - err, root_ctxt.head.status, out_size, channel); - return -EFAULT; - } - - return 0; -} - -int sphw_set_cmdq_depth(void *hwdev, u16 cmdq_depth) -{ - struct comm_cmd_root_ctxt root_ctxt; - u16 out_size = sizeof(root_ctxt); - int err; - - memset(&root_ctxt, 0, sizeof(root_ctxt)); - root_ctxt.func_id = sphw_global_func_id(hwdev); - - root_ctxt.set_cmdq_depth = 1; - root_ctxt.cmdq_depth = (u8)ilog2(cmdq_depth); - - err = comm_msg_to_mgmt_sync(hwdev, COMM_MGMT_CMD_SET_VAT, &root_ctxt, - sizeof(root_ctxt), &root_ctxt, &out_size); - if (err || !out_size || root_ctxt.head.status) { - sdk_err(((struct sphw_hwdev *)hwdev)->dev_hdl, - "Failed to set cmdq depth, err: %d, status: 0x%x, out_size: 0x%x\n", - err, root_ctxt.head.status, out_size); - return -EFAULT; - } - - return 0; -} - -int sphw_set_cmdq_ctxt(struct sphw_hwdev *hwdev, u8 cmdq_id, struct cmdq_ctxt_info *ctxt) -{ - struct comm_cmd_cmdq_ctxt cmdq_ctxt; - u16 out_size = sizeof(cmdq_ctxt); - int err; - - memset(&cmdq_ctxt, 0, sizeof(cmdq_ctxt)); - memcpy(&cmdq_ctxt.ctxt, ctxt, sizeof(*ctxt)); - cmdq_ctxt.func_id = sphw_global_func_id(hwdev); - cmdq_ctxt.cmdq_id = cmdq_id; - - err = comm_msg_to_mgmt_sync(hwdev, COMM_MGMT_CMD_SET_CMDQ_CTXT, - &cmdq_ctxt, sizeof(cmdq_ctxt), - &cmdq_ctxt, &out_size); - if (err || !out_size || cmdq_ctxt.head.status) { - sdk_err(hwdev->dev_hdl, "Failed to set cmdq ctxt, err: %d, status: 0x%x, out_size: 0x%x\n", - err, cmdq_ctxt.head.status, out_size); - return -EFAULT; - } - - return 0; -} - -int sphw_set_ceq_ctrl_reg(struct sphw_hwdev *hwdev, u16 q_id, u32 ctrl0, u32 ctrl1) -{ - struct comm_cmd_ceq_ctrl_reg ceq_ctrl; - u16 out_size = sizeof(ceq_ctrl); - int err; - - memset(&ceq_ctrl, 0, sizeof(ceq_ctrl)); - ceq_ctrl.func_id = sphw_global_func_id(hwdev); - ceq_ctrl.q_id = q_id; - ceq_ctrl.ctrl0 = ctrl0; - ceq_ctrl.ctrl1 = ctrl1; - - err = comm_msg_to_mgmt_sync(hwdev, COMM_MGMT_CMD_SET_CEQ_CTRL_REG, - &ceq_ctrl, sizeof(ceq_ctrl), - &ceq_ctrl, &out_size); - if (err || !out_size || ceq_ctrl.head.status) { - sdk_err(hwdev->dev_hdl, "Failed to set ceq %u ctrl reg, err: %d status: 0x%x, out_size: 0x%x\n", - q_id, err, ceq_ctrl.head.status, out_size); - return -EFAULT; - } - - return 0; -} - -int sphw_set_dma_attr_tbl(struct sphw_hwdev *hwdev, u8 entry_idx, u8 st, u8 at, u8 ph, - u8 no_snooping, u8 tph_en) -{ - struct comm_cmd_dma_attr_config dma_attr; - u16 out_size = sizeof(dma_attr); - int err; - - memset(&dma_attr, 0, sizeof(dma_attr)); - dma_attr.func_id = sphw_global_func_id(hwdev); - dma_attr.entry_idx = entry_idx; - dma_attr.st = st; - dma_attr.at = at; - dma_attr.ph = ph; - dma_attr.no_snooping = no_snooping; - dma_attr.tph_en = tph_en; - - err = comm_msg_to_mgmt_sync(hwdev, COMM_MGMT_CMD_SET_DMA_ATTR, &dma_attr, sizeof(dma_attr), - &dma_attr, &out_size); - if (err || !out_size || dma_attr.head.status) { - sdk_err(hwdev->dev_hdl, "Failed to set dma_attr, err: %d, status: 0x%x, out_size: 0x%x\n", - err, dma_attr.head.status, out_size); - return -EIO; - } - - return 0; -} - -int sphw_set_bdf_ctxt(void *hwdev, u8 bus, u8 device, u8 function) -{ - struct comm_cmd_bdf_info bdf_info; - u16 out_size = sizeof(bdf_info); - int err; - - if (!hwdev) - return -EINVAL; - - memset(&bdf_info, 0, sizeof(bdf_info)); - bdf_info.function_idx = sphw_global_func_id(hwdev); - bdf_info.bus = bus; - bdf_info.device = device; - bdf_info.function = function; - - err = comm_msg_to_mgmt_sync(hwdev, COMM_MGMT_CMD_SEND_BDF_INFO, - &bdf_info, sizeof(bdf_info), - &bdf_info, &out_size); - if (err || !out_size || bdf_info.head.status) { - sdk_err(((struct sphw_hwdev *)hwdev)->dev_hdl, - "Failed to set bdf info to MPU, err: %d, status: 0x%x, out_size: 0x%x\n", - err, bdf_info.head.status, out_size); - return -EIO; - } - - return 0; -} - -int sphw_sync_time(void *hwdev, u64 time) -{ - struct comm_cmd_sync_time time_info; - u16 out_size = sizeof(time_info); - int err; - - memset(&time_info, 0, sizeof(time_info)); - time_info.mstime = time; - err = comm_msg_to_mgmt_sync(hwdev, COMM_MGMT_CMD_SYNC_TIME, &time_info, - sizeof(time_info), &time_info, &out_size); - if (err || time_info.head.status || !out_size) { - sdk_err(((struct sphw_hwdev *)hwdev)->dev_hdl, - "Failed to sync time to mgmt, err: %d, status: 0x%x, out size: 0x%x\n", - err, time_info.head.status, out_size); - return -EIO; - } - - return 0; -} - -int sphw_get_fw_ver(void *hwdev, enum sphw_fw_ver_type type, u8 *mgmt_ver, - u8 version_size, u16 channel) -{ - struct comm_cmd_get_fw_version fw_ver; - struct sphw_hwdev *dev = hwdev; - u16 out_size = sizeof(fw_ver); - int err; - - if (!hwdev || !mgmt_ver) - return -EINVAL; - - memset(&fw_ver, 0, sizeof(fw_ver)); - fw_ver.fw_type = type; - err = comm_msg_to_mgmt_sync_ch(hwdev, COMM_MGMT_CMD_GET_FW_VERSION, - &fw_ver, sizeof(fw_ver), &fw_ver, - &out_size, channel); - if (err || !out_size || fw_ver.head.status) { - sdk_err(dev->dev_hdl, "Failed to get fw version, err: %d, status: 0x%x, out size: 0x%x, channel: 0x%x\n", - err, fw_ver.head.status, out_size, channel); - return -EIO; - } - - snprintf(mgmt_ver, version_size, "%s", fw_ver.ver); - - return 0; -} - -int sphw_get_mgmt_version(void *hwdev, u8 *mgmt_ver, u8 version_size, u16 channel) -{ - return sphw_get_fw_ver(hwdev, SPHW_FW_VER_TYPE_MPU, mgmt_ver, version_size, channel); -} - -int sphw_get_fw_version(void *hwdev, struct sphw_fw_version *fw_ver, u16 channel) -{ - int err; - - if (!hwdev || !fw_ver) - return -EINVAL; - - err = sphw_get_fw_ver(hwdev, SPHW_FW_VER_TYPE_MPU, fw_ver->mgmt_ver, - sizeof(fw_ver->mgmt_ver), channel); - if (err) - return err; - - err = sphw_get_fw_ver(hwdev, SPHW_FW_VER_TYPE_NPU, fw_ver->microcode_ver, - sizeof(fw_ver->microcode_ver), channel); - if (err) - return err; - - return sphw_get_fw_ver(hwdev, SPHW_FW_VER_TYPE_BOOT, fw_ver->boot_ver, - sizeof(fw_ver->boot_ver), channel); -} - -static int sphw_comm_features_nego(void *hwdev, u8 opcode, u64 *s_feature, u16 size) -{ - struct comm_cmd_feature_nego feature_nego; - u16 out_size = sizeof(feature_nego); - struct sphw_hwdev *dev = hwdev; - int err; - - if (!hwdev || !s_feature || size > COMM_MAX_FEATURE_QWORD) - return -EINVAL; - - memset(&feature_nego, 0, sizeof(feature_nego)); - feature_nego.func_id = sphw_global_func_id(hwdev); - feature_nego.opcode = opcode; - if (opcode == MGMT_MSG_CMD_OP_SET) - memcpy(feature_nego.s_feature, s_feature, (size * sizeof(u64))); - - err = comm_msg_to_mgmt_sync(hwdev, COMM_MGMT_CMD_FEATURE_NEGO, - &feature_nego, sizeof(feature_nego), - &feature_nego, &out_size); - if (err || !out_size || feature_nego.head.status) { - sdk_err(dev->dev_hdl, "Failed to negotiate feature, err: %d, status: 0x%x, out size: 0x%x\n", - err, feature_nego.head.status, out_size); - return -EINVAL; - } - - if (opcode == MGMT_MSG_CMD_OP_GET) - memcpy(s_feature, feature_nego.s_feature, (size * sizeof(u64))); - - return 0; -} - -int sphw_get_comm_features(void *hwdev, u64 *s_feature, u16 size) -{ - return sphw_comm_features_nego(hwdev, MGMT_MSG_CMD_OP_GET, s_feature, size); -} - -int sphw_set_comm_features(void *hwdev, u64 *s_feature, u16 size) -{ - return sphw_comm_features_nego(hwdev, MGMT_MSG_CMD_OP_SET, s_feature, size); -} - -int sphw_func_tmr_bitmap_set(void *hwdev, bool en) -{ - struct comm_cmd_func_tmr_bitmap_op bitmap_op; - u16 out_size = sizeof(bitmap_op); - int err; - - if (!hwdev) - return -EINVAL; - - memset(&bitmap_op, 0, sizeof(bitmap_op)); - bitmap_op.func_id = sphw_global_func_id(hwdev); - bitmap_op.opcode = en ? FUNC_TMR_BITMAP_ENABLE : - FUNC_TMR_BITMAP_DISABLE; - - err = comm_msg_to_mgmt_sync(hwdev, COMM_MGMT_CMD_SET_FUNC_TMR_BITMAT, - &bitmap_op, sizeof(bitmap_op), - &bitmap_op, &out_size); - if (err || !out_size || bitmap_op.head.status) { - sdk_err(((struct sphw_hwdev *)hwdev)->dev_hdl, - "Failed to set timer bitmap, err: %d, status: 0x%x, out_size: 0x%x\n", - err, bitmap_op.head.status, out_size); - return -EFAULT; - } - - return 0; -} - -int ppf_ht_gpa_set(struct sphw_hwdev *hwdev, struct sphw_page_addr *pg0, struct sphw_page_addr *pg1) -{ - struct comm_cmd_ht_gpa ht_gpa_set; - u16 out_size = sizeof(ht_gpa_set); - int ret; - - memset(&ht_gpa_set, 0, sizeof(ht_gpa_set)); - pg0->virt_addr = dma_alloc_coherent(hwdev->dev_hdl, SPHW_HT_GPA_PAGE_SIZE, - &pg0->phys_addr, GFP_KERNEL); - if (!pg0->virt_addr) { - sdk_err(hwdev->dev_hdl, "Alloc pg0 page addr failed\n"); - return -EFAULT; - } - - pg1->virt_addr = dma_alloc_coherent(hwdev->dev_hdl, SPHW_HT_GPA_PAGE_SIZE, - &pg1->phys_addr, GFP_KERNEL); - if (!pg1->virt_addr) { - sdk_err(hwdev->dev_hdl, "Alloc pg1 page addr failed\n"); - return -EFAULT; - } - - ht_gpa_set.host_id = sphw_host_id(hwdev); - ht_gpa_set.page_pa0 = pg0->phys_addr; - ht_gpa_set.page_pa1 = pg1->phys_addr; - sdk_info(hwdev->dev_hdl, "PPF ht gpa set: page_addr0.pa=0x%llx, page_addr1.pa=0x%llx\n", - pg0->phys_addr, pg1->phys_addr); - ret = comm_msg_to_mgmt_sync(hwdev, COMM_MGMT_CMD_SET_PPF_HT_GPA, - &ht_gpa_set, sizeof(ht_gpa_set), - &ht_gpa_set, &out_size); - if (ret || !out_size || ht_gpa_set.head.status) { - sdk_warn(hwdev->dev_hdl, "PPF ht gpa set failed, ret: %d, status: 0x%x, out_size: 0x%x\n", - ret, ht_gpa_set.head.status, out_size); - return -EFAULT; - } - - hwdev->page_pa0.phys_addr = pg0->phys_addr; - hwdev->page_pa0.virt_addr = pg0->virt_addr; - - hwdev->page_pa1.phys_addr = pg1->phys_addr; - hwdev->page_pa1.virt_addr = pg1->virt_addr; - - return 0; -} - -int sphw_ppf_ht_gpa_init(struct sphw_hwdev *hwdev) -{ - int ret; - int i; - int j; - int size; - - struct sphw_page_addr page_addr0[SPHW_PPF_HT_GPA_SET_RETRY_TIMES]; - struct sphw_page_addr page_addr1[SPHW_PPF_HT_GPA_SET_RETRY_TIMES]; - - size = SPHW_PPF_HT_GPA_SET_RETRY_TIMES * sizeof(page_addr0[0]); - memset(page_addr0, 0, size); - memset(page_addr1, 0, size); - - for (i = 0; i < SPHW_PPF_HT_GPA_SET_RETRY_TIMES; i++) { - ret = ppf_ht_gpa_set(hwdev, &page_addr0[i], &page_addr1[i]); - if (!ret) - break; - } - - for (j = 0; j < i; j++) { - if (page_addr0[j].virt_addr) { - dma_free_coherent(hwdev->dev_hdl, - SPHW_HT_GPA_PAGE_SIZE, - page_addr0[j].virt_addr, - page_addr0[j].phys_addr); - page_addr0[j].virt_addr = NULL; - } - if (page_addr1[j].virt_addr) { - dma_free_coherent(hwdev->dev_hdl, - SPHW_HT_GPA_PAGE_SIZE, - page_addr1[j].virt_addr, - page_addr1[j].phys_addr); - page_addr1[j].virt_addr = NULL; - } - } - - if (i >= SPHW_PPF_HT_GPA_SET_RETRY_TIMES) { - sdk_err(hwdev->dev_hdl, "PPF ht gpa init failed, retry times: %d\n", - i); - return -EFAULT; - } - - return 0; -} - -void sphw_ppf_ht_gpa_deinit(struct sphw_hwdev *hwdev) -{ - if (hwdev->page_pa0.virt_addr) { - dma_free_coherent(hwdev->dev_hdl, SPHW_HT_GPA_PAGE_SIZE, - hwdev->page_pa0.virt_addr, - hwdev->page_pa0.phys_addr); - hwdev->page_pa0.virt_addr = NULL; - } - - if (hwdev->page_pa1.virt_addr) { - dma_free_coherent(hwdev->dev_hdl, SPHW_HT_GPA_PAGE_SIZE, - hwdev->page_pa1.virt_addr, - hwdev->page_pa1.phys_addr); - hwdev->page_pa1.virt_addr = NULL; - } -} - -static int set_ppf_tmr_status(struct sphw_hwdev *hwdev, - enum ppf_tmr_status status) -{ - struct comm_cmd_ppf_tmr_op op; - u16 out_size = sizeof(op); - int err = 0; - - if (!hwdev) - return -EINVAL; - - memset(&op, 0, sizeof(op)); - - if (sphw_func_type(hwdev) != TYPE_PPF) - return -EFAULT; - - if (status == SPHW_PPF_TMR_FLAG_START) { - err = sphw_ppf_ht_gpa_init(hwdev); - if (err) { - sdk_err(hwdev->dev_hdl, "PPF ht gpa init fail!\n"); - return -EFAULT; - } - } else { - sphw_ppf_ht_gpa_deinit(hwdev); - } - - op.opcode = status; - op.ppf_id = sphw_ppf_idx(hwdev); - - err = comm_msg_to_mgmt_sync(hwdev, COMM_MGMT_CMD_SET_PPF_TMR, &op, - sizeof(op), &op, &out_size); - if (err || !out_size || op.head.status) { - sdk_err(hwdev->dev_hdl, "Failed to set ppf timer, err: %d, status: 0x%x, out_size: 0x%x\n", - err, op.head.status, out_size); - return -EFAULT; - } - - return 0; -} - -int sphw_ppf_tmr_start(void *hwdev) -{ - if (!hwdev) { - pr_err("Hwdev pointer is NULL for starting ppf timer\n"); - return -EINVAL; - } - - return set_ppf_tmr_status(hwdev, SPHW_PPF_TMR_FLAG_START); -} - -int sphw_ppf_tmr_stop(void *hwdev) -{ - if (!hwdev) { - pr_err("Hwdev pointer is NULL for stop ppf timer\n"); - return -EINVAL; - } - - return set_ppf_tmr_status(hwdev, SPHW_PPF_TMR_FLAG_STOP); -} - -int mqm_eqm_try_alloc_mem(struct sphw_hwdev *hwdev, u32 page_size, - u32 page_num) -{ - struct sphw_page_addr *page_addr = hwdev->mqm_att.brm_srch_page_addr; - u32 valid_num = 0; - u32 flag = 1; - u32 i = 0; - - for (i = 0; i < page_num; i++) { - page_addr->virt_addr = - dma_alloc_coherent(hwdev->dev_hdl, page_size, - &page_addr->phys_addr, GFP_KERNEL); - if (!page_addr->virt_addr) { - flag = 0; - break; - } - valid_num++; - page_addr++; - } - - if (flag == 1) { - hwdev->mqm_att.page_size = page_size; - hwdev->mqm_att.page_num = page_num; - } else { - page_addr = hwdev->mqm_att.brm_srch_page_addr; - for (i = 0; i < valid_num; i++) { - dma_free_coherent(hwdev->dev_hdl, page_size, - page_addr->virt_addr, - page_addr->phys_addr); - page_addr++; - } - return -EFAULT; - } - - return 0; -} - -int mqm_eqm_alloc_page_mem(struct sphw_hwdev *hwdev) -{ - int ret = 0; - int page_num; - - /* apply for 2M page, page number is chunk_num/1024 */ - page_num = (hwdev->mqm_att.chunk_num + 1023) >> 10; - ret = mqm_eqm_try_alloc_mem(hwdev, 2 * 1024 * 1024, page_num); - if (!ret) { - sdk_info(hwdev->dev_hdl, "[mqm_eqm_init] Alloc page_size 2M OK\n"); - return 0; - } - - /* apply for 64KB page, page number is chunk_num/32 */ - page_num = (hwdev->mqm_att.chunk_num + 31) >> 5; - ret = mqm_eqm_try_alloc_mem(hwdev, 64 * 1024, page_num); - if (!ret) { - sdk_info(hwdev->dev_hdl, "[mqm_eqm_init] Alloc page_size 64K OK\n"); - return 0; - } - - /* apply for 4KB page, page number is chunk_num/2 */ - page_num = (hwdev->mqm_att.chunk_num + 1) >> 1; - ret = mqm_eqm_try_alloc_mem(hwdev, 4 * 1024, page_num); - if (!ret) { - sdk_info(hwdev->dev_hdl, "[mqm_eqm_init] Alloc page_size 4K OK\n"); - return 0; - } - - return ret; -} - -void mqm_eqm_free_page_mem(struct sphw_hwdev *hwdev) -{ - u32 i; - struct sphw_page_addr *page_addr; - u32 page_size; - - page_size = hwdev->mqm_att.page_size; - page_addr = hwdev->mqm_att.brm_srch_page_addr; - - for (i = 0; i < hwdev->mqm_att.page_num; i++) { - dma_free_coherent(hwdev->dev_hdl, page_size, - page_addr->virt_addr, page_addr->phys_addr); - page_addr++; - } -} - -int mqm_eqm_set_cfg_2_hw(struct sphw_hwdev *hwdev, u8 valid) -{ - struct comm_cmd_eqm_cfg info_eqm_cfg; - u16 out_size = sizeof(info_eqm_cfg); - int err; - - memset(&info_eqm_cfg, 0, sizeof(info_eqm_cfg)); - - info_eqm_cfg.host_id = sphw_host_id(hwdev); - info_eqm_cfg.page_size = hwdev->mqm_att.page_size; - info_eqm_cfg.valid = valid; - err = comm_msg_to_mgmt_sync(hwdev, COMM_MGMT_CMD_SET_MQM_CFG_INFO, - &info_eqm_cfg, sizeof(info_eqm_cfg), - &info_eqm_cfg, &out_size); - if (err || !out_size || info_eqm_cfg.head.status) { - sdk_err(hwdev->dev_hdl, "Failed to init func table, err: %d, status: 0x%x, out_size: 0x%x\n", - err, info_eqm_cfg.head.status, out_size); - return -EFAULT; - } - - return 0; -} - -#define EQM_DATA_BUF_SIZE 1024 -#define MQM_ATT_PAGE_NUM 128 - -int mqm_eqm_set_page_2_hw(struct sphw_hwdev *hwdev) -{ - struct comm_cmd_eqm_search_gpa *info = NULL; - struct sphw_page_addr *page_addr = NULL; - void *send_buf = NULL; - u16 send_buf_size; - u32 i; - u64 *gpa_hi52 = NULL; - u64 gpa; - u32 num; - u32 start_idx; - int err = 0; - u16 out_size; - u8 cmd; - - send_buf_size = sizeof(struct comm_cmd_eqm_search_gpa) + - EQM_DATA_BUF_SIZE; - send_buf = kzalloc(send_buf_size, GFP_KERNEL); - if (!send_buf) { - sdk_err(hwdev->dev_hdl, "Alloc virtual mem failed\r\n"); - return -EFAULT; - } - - page_addr = hwdev->mqm_att.brm_srch_page_addr; - info = (struct comm_cmd_eqm_search_gpa *)send_buf; - - gpa_hi52 = info->gpa_hi52; - num = 0; - start_idx = 0; - cmd = COMM_MGMT_CMD_SET_MQM_SRCH_GPA; - for (i = 0; i < hwdev->mqm_att.page_num; i++) { - /* gpa align to 4K, save gpa[31:12] */ - gpa = page_addr->phys_addr >> 12; - gpa_hi52[num] = gpa; - num++; - if (num == MQM_ATT_PAGE_NUM) { - info->num = num; - info->start_idx = start_idx; - info->host_id = sphw_host_id(hwdev); - out_size = send_buf_size; - err = comm_msg_to_mgmt_sync(hwdev, cmd, info, - (u16)send_buf_size, - info, &out_size); - if (MSG_TO_MGMT_SYNC_RETURN_ERR(err, out_size, - info->head.status)) { - sdk_err(hwdev->dev_hdl, "Set mqm srch gpa fail, err: %d, status: 0x%x, out_size: 0x%x\n", - err, info->head.status, out_size); - err = -EFAULT; - goto set_page_2_hw_end; - } - - gpa_hi52 = info->gpa_hi52; - num = 0; - start_idx = i + 1; - } - page_addr++; - } - - if (num != 0) { - info->num = num; - info->start_idx = start_idx; - info->host_id = sphw_host_id(hwdev); - out_size = send_buf_size; - err = comm_msg_to_mgmt_sync(hwdev, cmd, info, - (u16)send_buf_size, info, - &out_size); - if (MSG_TO_MGMT_SYNC_RETURN_ERR(err, out_size, - info->head.status)) { - sdk_err(hwdev->dev_hdl, "Set mqm srch gpa fail, err: %d, status: 0x%x, out_size: 0x%x\n", - err, info->head.status, out_size); - err = -EFAULT; - goto set_page_2_hw_end; - } - } - -set_page_2_hw_end: - kfree(send_buf); - return err; -} - -int mqm_eqm_init(struct sphw_hwdev *hwdev) -{ - struct comm_cmd_get_eqm_num info_eqm_fix; - u16 len = sizeof(info_eqm_fix); - int ret; - - if (hwdev->hwif->attr.func_type != TYPE_PPF) - return 0; - - memset(&info_eqm_fix, 0, sizeof(info_eqm_fix)); - - ret = comm_msg_to_mgmt_sync(hwdev, COMM_MGMT_CMD_GET_MQM_FIX_INFO, - &info_eqm_fix, sizeof(info_eqm_fix), - &info_eqm_fix, &len); - if (ret || !len || info_eqm_fix.head.status) { - sdk_err(hwdev->dev_hdl, "Get mqm fix info fail,err: %d, status: 0x%x, out_size: 0x%x\n", - ret, info_eqm_fix.head.status, len); - return -EFAULT; - } - sdk_info(hwdev->dev_hdl, "get chunk_num: 0x%x, search_gpa_num: 0x%08x\n", - info_eqm_fix.chunk_num, info_eqm_fix.search_gpa_num); - if (!(info_eqm_fix.chunk_num)) - return 0; - - hwdev->mqm_att.chunk_num = info_eqm_fix.chunk_num; - hwdev->mqm_att.search_gpa_num = info_eqm_fix.search_gpa_num; - hwdev->mqm_att.page_size = 0; - hwdev->mqm_att.page_num = 0; - - hwdev->mqm_att.brm_srch_page_addr = - kcalloc(hwdev->mqm_att.chunk_num, - sizeof(struct sphw_page_addr), GFP_KERNEL); - if (!(hwdev->mqm_att.brm_srch_page_addr)) { - sdk_err(hwdev->dev_hdl, "Alloc virtual mem failed\r\n"); - return -EFAULT; - } - - ret = mqm_eqm_alloc_page_mem(hwdev); - if (ret) { - sdk_err(hwdev->dev_hdl, "Alloc eqm page mem failed\r\n"); - goto err_page; - } - - ret = mqm_eqm_set_page_2_hw(hwdev); - if (ret) { - sdk_err(hwdev->dev_hdl, "Set page to hw failed\r\n"); - goto err_ecmd; - } - - ret = mqm_eqm_set_cfg_2_hw(hwdev, 1); - if (ret) { - sdk_err(hwdev->dev_hdl, "Set page to hw failed\r\n"); - goto err_ecmd; - } - - return 0; - -err_ecmd: - mqm_eqm_free_page_mem(hwdev); - -err_page: - kfree(hwdev->mqm_att.brm_srch_page_addr); - - return ret; -} - -void mqm_eqm_deinit(struct sphw_hwdev *hwdev) -{ - int ret; - - if (hwdev->hwif->attr.func_type != TYPE_PPF) - return; - - if (!(hwdev->mqm_att.chunk_num)) - return; - - mqm_eqm_free_page_mem(hwdev); - kfree(hwdev->mqm_att.brm_srch_page_addr); - - ret = mqm_eqm_set_cfg_2_hw(hwdev, 0); - if (ret) { - sdk_err(hwdev->dev_hdl, "Set mqm eqm cfg to chip fail! err: %d\n", - ret); - return; - } - - hwdev->mqm_att.chunk_num = 0; - hwdev->mqm_att.search_gpa_num = 0; - hwdev->mqm_att.page_num = 0; - hwdev->mqm_att.page_size = 0; -} - -int sphw_ppf_ext_db_init(void *dev) -{ - struct sphw_hwdev *hwdev = dev; - int ret; - - /* IS OVS MODE SURPORT EXT DB NEEDED */ - - ret = mqm_eqm_init(hwdev); - if (ret) { - sdk_err(hwdev->dev_hdl, "MQM eqm init fail!\n"); - return -EFAULT; - } - sdk_info(hwdev->dev_hdl, "ppf_ext_db_init ok\r\n"); - - return 0; -} - -int sphw_ppf_ext_db_deinit(void *dev) -{ - struct sphw_hwdev *hwdev = dev; - - if (!dev) - return -EINVAL; - - if (hwdev->hwif->attr.func_type != TYPE_PPF) - return 0; - - mqm_eqm_deinit(hwdev); - - return 0; -} - -#define SPHW_FLR_TIMEOUT 1000 - -static enum sphw_wait_return check_flr_finish_handler(void *priv_data) -{ - struct sphw_hwif *hwif = priv_data; - enum sphw_pf_status status; - - status = sphw_get_pf_status(hwif); - if (status == SPHW_PF_STATUS_FLR_FINISH_FLAG) { - sphw_set_pf_status(hwif, SPHW_PF_STATUS_ACTIVE_FLAG); - return WAIT_PROCESS_CPL; - } - - return WAIT_PROCESS_WAITING; -} - -static int wait_for_flr_finish(struct sphw_hwif *hwif) -{ - return sphw_wait_for_timeout(hwif, check_flr_finish_handler, - SPHW_FLR_TIMEOUT, 10 * USEC_PER_MSEC); -} - -#define SPHW_WAIT_CMDQ_IDLE_TIMEOUT 5000 - -static enum sphw_wait_return check_cmdq_stop_handler(void *priv_data) -{ - struct sphw_hwdev *hwdev = priv_data; - struct sphw_cmdqs *cmdqs = hwdev->cmdqs; - enum sphw_cmdq_type cmdq_type; - - /* Stop waiting when card unpresent */ - if (!hwdev->chip_present_flag) - return WAIT_PROCESS_CPL; - - cmdq_type = SPHW_CMDQ_SYNC; - for (; cmdq_type < SPHW_MAX_CMDQ_TYPES; cmdq_type++) { - if (!sphw_cmdq_idle(&cmdqs->cmdq[cmdq_type])) - return WAIT_PROCESS_WAITING; - } - - return WAIT_PROCESS_CPL; -} - -static int wait_cmdq_stop(struct sphw_hwdev *hwdev) -{ - enum sphw_cmdq_type cmdq_type; - struct sphw_cmdqs *cmdqs = hwdev->cmdqs; - int err; - - if (!(cmdqs->status & SPHW_CMDQ_ENABLE)) - return 0; - - cmdqs->status &= ~SPHW_CMDQ_ENABLE; - - err = sphw_wait_for_timeout(hwdev, check_cmdq_stop_handler, - SPHW_WAIT_CMDQ_IDLE_TIMEOUT, USEC_PER_MSEC); - if (!err) - return 0; - - cmdq_type = SPHW_CMDQ_SYNC; - for (; cmdq_type < SPHW_MAX_CMDQ_TYPES; cmdq_type++) { - if (!sphw_cmdq_idle(&cmdqs->cmdq[cmdq_type])) - sdk_err(hwdev->dev_hdl, "Cmdq %d is busy\n", cmdq_type); - } - - cmdqs->status |= SPHW_CMDQ_ENABLE; - - return err; -} - -static int sphw_pf_rx_tx_flush(struct sphw_hwdev *hwdev, u16 channel) -{ - struct sphw_hwif *hwif = hwdev->hwif; - struct comm_cmd_clear_doorbell clear_db; - struct comm_cmd_clear_resource clr_res; - u16 out_size; - int err; - int ret = 0; - - /*wait ucode stop I/O */ - msleep(100); - - err = wait_cmdq_stop(hwdev); - if (err) { - sdk_warn(hwdev->dev_hdl, "CMDQ is still working, please check CMDQ timeout value is reasonable\n"); - ret = err; - } - - sphw_disable_doorbell(hwif); - - out_size = sizeof(clear_db); - memset(&clear_db, 0, sizeof(clear_db)); - clear_db.func_id = SPHW_HWIF_GLOBAL_IDX(hwif); - - err = comm_msg_to_mgmt_sync_ch(hwdev, COMM_MGMT_CMD_FLUSH_DOORBELL, - &clear_db, sizeof(clear_db), - &clear_db, &out_size, channel); - if (err || !out_size || clear_db.head.status) { - sdk_warn(hwdev->dev_hdl, "Failed to flush doorbell, err: %d, status: 0x%x, out_size: 0x%x, channel: 0x%x\n", - err, clear_db.head.status, out_size, channel); - if (err) - ret = err; - else - ret = -EFAULT; - } - - sphw_set_pf_status(hwif, SPHW_PF_STATUS_FLR_START_FLAG); - - memset(&clr_res, 0, sizeof(clr_res)); - clr_res.func_id = SPHW_HWIF_GLOBAL_IDX(hwif); - - err = sphw_msg_to_mgmt_no_ack(hwdev, SPHW_MOD_COMM, COMM_MGMT_CMD_START_FLUSH, &clr_res, - sizeof(clr_res), channel); - if (err) { - sdk_warn(hwdev->dev_hdl, "Failed to notice flush message, err: %d, channel: 0x%x\n", - err, channel); - ret = err; - } - - err = wait_for_flr_finish(hwif); - if (err) { - sdk_warn(hwdev->dev_hdl, "Wait firmware FLR timeout\n"); - ret = err; - } - - sphw_enable_doorbell(hwif); - - err = sphw_reinit_cmdq_ctxts(hwdev); - if (err) { - sdk_warn(hwdev->dev_hdl, "Failed to reinit cmdq\n"); - ret = err; - } - - return ret; -} - -int sphw_func_rx_tx_flush(void *hwdev, u16 channel) -{ - struct sphw_hwdev *dev = hwdev; - - if (!hwdev) - return -EINVAL; - - if (!dev->chip_present_flag) - return 0; - - if (SPHW_FUNC_TYPE(dev) == TYPE_VF) - /* TO DO */ - return 0; - else - return sphw_pf_rx_tx_flush(dev, channel); -} - -int sphw_get_board_info(void *hwdev, struct sphw_board_info *info, u16 channel) -{ - struct comm_cmd_board_info board_info; - u16 out_size = sizeof(board_info); - int err; - - if (!hwdev || !info) - return -EINVAL; - - memset(&board_info, 0, sizeof(board_info)); - err = comm_msg_to_mgmt_sync_ch(hwdev, COMM_MGMT_CMD_GET_BOARD_INFO, - &board_info, sizeof(board_info), - &board_info, &out_size, channel); - if (err || board_info.head.status || !out_size) { - sdk_err(((struct sphw_hwdev *)hwdev)->dev_hdl, - "Failed to get board info, err: %d, status: 0x%x, out size: 0x%x, channel: 0x%x\n", - err, board_info.head.status, out_size, channel); - return -EIO; - } - - memcpy(info, &board_info.info, sizeof(*info)); - - return 0; -} - -int sphw_get_global_attr(void *hwdev, struct comm_global_attr *attr) -{ - struct comm_cmd_get_glb_attr get_attr; - u16 out_size = sizeof(get_attr); - int err = 0; - - err = comm_msg_to_mgmt_sync(hwdev, COMM_MGMT_CMD_GET_GLOBAL_ATTR, - &get_attr, sizeof(get_attr), &get_attr, - &out_size); - if (err || !out_size || get_attr.head.status) { - sdk_err(((struct sphw_hwdev *)hwdev)->dev_hdl, - "Failed to get global attribute, err: %d, status: 0x%x, out size: 0x%x\n", - err, get_attr.head.status, out_size); - return -EIO; - } - - memcpy(attr, &get_attr.attr, sizeof(*attr)); - - return 0; -} diff --git a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_hw_comm.h b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_hw_comm.h deleted file mode 100644 index 4e0cf2dfb21eb82e5c1030e639a74fb8aa3c4c14..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_hw_comm.h +++ /dev/null @@ -1,45 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPHW_HW_COMM_H -#define SPHW_HW_COMM_H - -#include "sphw_comm_msg_intf.h" - -#define MSG_TO_MGMT_SYNC_RETURN_ERR(err, out_size, status) \ - ((err) || (status) || !(out_size)) - -#define SPHW_PAGE_SIZE_HW(pg_size) ((u8)ilog2((u32)((pg_size) >> 12))) - -enum func_tmr_bitmap_status { - FUNC_TMR_BITMAP_DISABLE, - FUNC_TMR_BITMAP_ENABLE, -}; - -enum ppf_tmr_status { - SPHW_PPF_TMR_FLAG_STOP, - SPHW_PPF_TMR_FLAG_START, -}; - -#define SPHW_HT_GPA_PAGE_SIZE 4096UL -#define SPHW_PPF_HT_GPA_SET_RETRY_TIMES 10 - -int sphw_set_cmdq_depth(void *hwdev, u16 cmdq_depth); - -int sphw_set_cmdq_ctxt(struct sphw_hwdev *hwdev, u8 cmdq_id, struct cmdq_ctxt_info *ctxt); - -int sphw_ppf_ext_db_init(void *dev); - -int sphw_ppf_ext_db_deinit(void *dev); - -int sphw_set_ceq_ctrl_reg(struct sphw_hwdev *hwdev, u16 q_id, u32 ctrl0, u32 ctrl1); - -int sphw_set_dma_attr_tbl(struct sphw_hwdev *hwdevm, u8 entry_idx, u8 st, u8 at, u8 ph, - u8 no_snooping, u8 tph_en); - -int sphw_get_comm_features(void *hwdev, u64 *s_feature, u16 size); -int sphw_set_comm_features(void *hwdev, u64 *s_feature, u16 size); - -int sphw_get_global_attr(void *hwdev, struct comm_global_attr *attr); - -#endif diff --git a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_hwdev.c b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_hwdev.c deleted file mode 100644 index 783fa46bcfe5b7199a419dd612bd2c2a402c2f18..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_hwdev.c +++ /dev/null @@ -1,1324 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "sphw_crm.h" -#include "sphw_hw.h" -#include "sphw_common.h" -#include "sphw_hwdev.h" -#include "sphw_csr.h" -#include "sphw_hwif.h" -#include "sphw_eqs.h" -#include "sphw_api_cmd.h" -#include "sphw_mgmt.h" -#include "sphw_mbox.h" -#include "sphw_wq.h" -#include "sphw_cmdq.h" -#include "sphw_hw_cfg.h" -#include "sphw_hw_comm.h" -#include "sphw_prof_adap.h" - -static bool disable_stateful_load; -module_param(disable_stateful_load, bool, 0444); -MODULE_PARM_DESC(disable_stateful_load, "Disable stateful load - default is false"); - -static bool disable_cfg_comm; -module_param(disable_cfg_comm, bool, 0444); -MODULE_PARM_DESC(disable_cfg_comm, "disable_cfg_comm or not - default is false"); - -static unsigned int wq_page_order = SPHW_MAX_WQ_PAGE_SIZE_ORDER; -module_param(wq_page_order, uint, 0444); -MODULE_PARM_DESC(wq_page_order, "Set wq page size order, wq page size is 4K * (2 ^ wq_page_order) - default is 8"); - -enum sphw_pcie_nosnoop { - SPHW_PCIE_SNOOP = 0, - SPHW_PCIE_NO_SNOOP = 1, -}; - -enum sphw_pcie_tph { - SPHW_PCIE_TPH_DISABLE = 0, - SPHW_PCIE_TPH_ENABLE = 1, -}; - -#define SPHW_DMA_ATTR_INDIR_IDX_SHIFT 0 - -#define SPHW_DMA_ATTR_INDIR_IDX_MASK 0x3FF - -#define SPHW_DMA_ATTR_INDIR_IDX_SET(val, member) \ - (((u32)(val) & SPHW_DMA_ATTR_INDIR_##member##_MASK) << \ - SPHW_DMA_ATTR_INDIR_##member##_SHIFT) - -#define SPHW_DMA_ATTR_INDIR_IDX_CLEAR(val, member) \ - ((val) & (~(SPHW_DMA_ATTR_INDIR_##member##_MASK \ - << SPHW_DMA_ATTR_INDIR_##member##_SHIFT))) - -#define SPHW_DMA_ATTR_ENTRY_ST_SHIFT 0 -#define SPHW_DMA_ATTR_ENTRY_AT_SHIFT 8 -#define SPHW_DMA_ATTR_ENTRY_PH_SHIFT 10 -#define SPHW_DMA_ATTR_ENTRY_NO_SNOOPING_SHIFT 12 -#define SPHW_DMA_ATTR_ENTRY_TPH_EN_SHIFT 13 - -#define SPHW_DMA_ATTR_ENTRY_ST_MASK 0xFF -#define SPHW_DMA_ATTR_ENTRY_AT_MASK 0x3 -#define SPHW_DMA_ATTR_ENTRY_PH_MASK 0x3 -#define SPHW_DMA_ATTR_ENTRY_NO_SNOOPING_MASK 0x1 -#define SPHW_DMA_ATTR_ENTRY_TPH_EN_MASK 0x1 - -#define SPHW_DMA_ATTR_ENTRY_SET(val, member) \ - (((u32)(val) & SPHW_DMA_ATTR_ENTRY_##member##_MASK) << \ - SPHW_DMA_ATTR_ENTRY_##member##_SHIFT) - -#define SPHW_DMA_ATTR_ENTRY_CLEAR(val, member) \ - ((val) & (~(SPHW_DMA_ATTR_ENTRY_##member##_MASK \ - << SPHW_DMA_ATTR_ENTRY_##member##_SHIFT))) - -#define SPHW_PCIE_ST_DISABLE 0 -#define SPHW_PCIE_AT_DISABLE 0 -#define SPHW_PCIE_PH_DISABLE 0 - -#define PCIE_MSIX_ATTR_ENTRY 0 - -#define SPHW_CHIP_PRESENT 1 -#define SPHW_CHIP_ABSENT 0 - -#define SPHW_DEAULT_EQ_MSIX_PENDING_LIMIT 0 -#define SPHW_DEAULT_EQ_MSIX_COALESC_TIMER_CFG 0xFF -#define SPHW_DEAULT_EQ_MSIX_RESEND_TIMER_CFG 7 - -#define SPHW_HWDEV_WQ_NAME "sphw_hardware" -#define SPHW_WQ_MAX_REQ 10 - -static void sphw_init_heartbeat_detect(struct sphw_hwdev *hwdev); -static void sphw_destroy_heartbeat_detect(struct sphw_hwdev *hwdev); - -typedef void (*mgmt_event_cb)(void *handle, void *buf_in, u16 in_size, - void *buf_out, u16 *out_size); - -struct mgmt_event_handle { - u16 cmd; - mgmt_event_cb proc; -}; - -int pf_handle_vf_comm_mbox(void *handle, void *pri_handle, - u16 vf_id, u16 cmd, void *buf_in, - u16 in_size, void *buf_out, u16 *out_size) -{ - struct sphw_hwdev *hwdev = handle; - - if (!hwdev) - return -EINVAL; - - sdk_warn(hwdev->dev_hdl, "Unsupported vf mbox event %u to process\n", - cmd); - - return 0; -} - -int vf_handle_pf_comm_mbox(void *handle, void *pri_handle, u16 cmd, void *buf_in, - u16 in_size, void *buf_out, u16 *out_size) -{ - struct sphw_hwdev *hwdev = handle; - - if (!hwdev) - return -EINVAL; - - sdk_warn(hwdev->dev_hdl, "Unsupported pf mbox event %u to process\n", - cmd); - return 0; -} - -static void chip_fault_show(struct sphw_hwdev *hwdev, struct sphw_fault_event *event) -{ - char fault_level[FAULT_LEVEL_MAX][FAULT_SHOW_STR_LEN + 1] = { - "fatal", "reset", "host", "flr", "general", "suggestion"}; - char level_str[FAULT_SHOW_STR_LEN + 1]; - u8 level; - - memset(level_str, 0, FAULT_SHOW_STR_LEN + 1); - level = event->event.chip.err_level; - if (level < FAULT_LEVEL_MAX) - strncpy(level_str, fault_level[level], - FAULT_SHOW_STR_LEN); - else - strncpy(level_str, "Unknown", FAULT_SHOW_STR_LEN); - - if (level == FAULT_LEVEL_SERIOUS_FLR) - dev_err(hwdev->dev_hdl, "err_level: %u [%s], flr func_id: %u\n", - level, level_str, event->event.chip.func_id); - - dev_err(hwdev->dev_hdl, "Module_id: 0x%x, err_type: 0x%x, err_level: %u[%s], err_csr_addr: 0x%08x, err_csr_value: 0x%08x\n", - event->event.chip.node_id, - event->event.chip.err_type, level, level_str, - event->event.chip.err_csr_addr, - event->event.chip.err_csr_value); -} - -static void fault_report_show(struct sphw_hwdev *hwdev, - struct sphw_fault_event *event) -{ - char fault_type[FAULT_TYPE_MAX][FAULT_SHOW_STR_LEN + 1] = { - "chip", "ucode", "mem rd timeout", "mem wr timeout", - "reg rd timeout", "reg wr timeout", "phy fault" - }; - char type_str[FAULT_SHOW_STR_LEN + 1]; - struct fault_event_stats *fault = NULL; - - sdk_err(hwdev->dev_hdl, "Fault event report received, func_id: %u\n", - sphw_global_func_id(hwdev)); - - memset(type_str, 0, FAULT_SHOW_STR_LEN + 1); - if (event->type < FAULT_TYPE_MAX) - strncpy(type_str, fault_type[event->type], - strlen(fault_type[event->type])); - else - strncpy(type_str, "Unknown", strlen("Unknown")); - - sdk_err(hwdev->dev_hdl, "Fault type: %u [%s]\n", event->type, type_str); - /* 0, 1, 2 and 3 word Represents array event->event.val index */ - sdk_err(hwdev->dev_hdl, "Fault val[0]: 0x%08x, val[1]: 0x%08x, val[2]: 0x%08x, val[3]: 0x%08x\n", - event->event.val[0], event->event.val[1], event->event.val[2], - event->event.val[3]); - - fault = &hwdev->hw_stats.fault_event_stats; - - switch (event->type) { - case FAULT_TYPE_CHIP: - chip_fault_show(hwdev, event); - break; - case FAULT_TYPE_UCODE: - atomic_inc(&fault->fault_type_stat[event->type]); - sdk_err(hwdev->dev_hdl, "Cause_id: %u, core_id: %u, c_id: %u, epc: 0x%08x\n", - event->event.ucode.cause_id, event->event.ucode.core_id, - event->event.ucode.c_id, event->event.ucode.epc); - break; - case FAULT_TYPE_MEM_RD_TIMEOUT: - case FAULT_TYPE_MEM_WR_TIMEOUT: - atomic_inc(&fault->fault_type_stat[event->type]); - sdk_err(hwdev->dev_hdl, "Err_csr_ctrl: 0x%08x, err_csr_data: 0x%08x, ctrl_tab: 0x%08x, mem_index: 0x%08x\n", - event->event.mem_timeout.err_csr_ctrl, - event->event.mem_timeout.err_csr_data, - event->event.mem_timeout.ctrl_tab, - event->event.mem_timeout.mem_index); - break; - case FAULT_TYPE_REG_RD_TIMEOUT: - case FAULT_TYPE_REG_WR_TIMEOUT: - atomic_inc(&fault->fault_type_stat[event->type]); - sdk_err(hwdev->dev_hdl, "Err_csr: 0x%08x\n", - event->event.reg_timeout.err_csr); - break; - case FAULT_TYPE_PHY_FAULT: - atomic_inc(&fault->fault_type_stat[event->type]); - sdk_err(hwdev->dev_hdl, "Op_type: %u, port_id: %u, dev_ad: %u, csr_addr: 0x%08x, op_data: 0x%08x\n", - event->event.phy_fault.op_type, - event->event.phy_fault.port_id, - event->event.phy_fault.dev_ad, - event->event.phy_fault.csr_addr, - event->event.phy_fault.op_data); - break; - default: - break; - } -} - -static void fault_event_handler(void *dev, void *buf_in, u16 in_size, - void *buf_out, u16 *out_size) -{ - struct sphw_cmd_fault_event *fault_event = NULL; - struct sphw_event_info event_info; - struct sphw_hwdev *hwdev = dev; - u8 fault_src = SPHW_FAULT_SRC_TYPE_MAX; - u8 fault_level; - - if (in_size != sizeof(*fault_event)) { - sdk_err(hwdev->dev_hdl, "Invalid fault event report, length: %u, should be %ld\n", - in_size, sizeof(*fault_event)); - return; - } - - fault_event = buf_in; - fault_report_show(hwdev, &fault_event->event); - - if (fault_event->event.type == FAULT_TYPE_CHIP) - fault_level = fault_event->event.event.chip.err_level; - else - fault_level = FAULT_LEVEL_FATAL; - - if (hwdev->event_callback) { - event_info.type = SPHW_EVENT_FAULT; - memcpy(&event_info.info, &fault_event->event, - sizeof(struct sphw_fault_event)); - event_info.info.fault_level = fault_level; - hwdev->event_callback(hwdev->event_pri_handle, &event_info); - } - - if (fault_event->event.type <= FAULT_TYPE_REG_WR_TIMEOUT) - fault_src = fault_event->event.type; - else if (fault_event->event.type == FAULT_TYPE_PHY_FAULT) - fault_src = SPHW_FAULT_SRC_HW_PHY_FAULT; - - sphw_fault_post_process(hwdev, fault_src, fault_level); -} - -static void ffm_event_msg_handler(void *hwdev, void *buf_in, u16 in_size, - void *buf_out, u16 *out_size) -{ - struct ffm_intr_info *intr = NULL; - struct sphw_hwdev *dev = hwdev; - - if (in_size != sizeof(*intr)) { - sdk_err(dev->dev_hdl, "Invalid fault event report, length: %u, should be %ld.\n", - in_size, sizeof(*intr)); - return; - } - - intr = buf_in; - - sdk_err(dev->dev_hdl, "node_id: 0x%x, err_type: 0x%x, err_level: %u, err_csr_addr: 0x%08x, err_csr_value: 0x%08x\n", - intr->node_id, intr->err_type, intr->err_level, - intr->err_csr_addr, intr->err_csr_value); -} - -const struct mgmt_event_handle mgmt_event_proc[] = { - { - .cmd = COMM_MGMT_CMD_FAULT_REPORT, - .proc = fault_event_handler, - }, - - { - .cmd = COMM_MGMT_CMD_FFM_SET, - .proc = ffm_event_msg_handler, - }, -}; - -void pf_handle_mgmt_comm_event(void *handle, void *pri_handle, u16 cmd, - void *buf_in, u16 in_size, void *buf_out, - u16 *out_size) -{ - struct sphw_hwdev *hwdev = handle; - u32 i, event_num = ARRAY_LEN(mgmt_event_proc); - - if (!hwdev) - return; - - for (i = 0; i < event_num; i++) { - if (cmd == mgmt_event_proc[i].cmd) { - if (mgmt_event_proc[i].proc) - mgmt_event_proc[i].proc(handle, buf_in, in_size, - buf_out, out_size); - - return; - } - } - - sdk_warn(hwdev->dev_hdl, "Unsupported mgmt cpu event %u to process\n", - cmd); -} - -void sphw_set_chip_present(void *hwdev) -{ - ((struct sphw_hwdev *)hwdev)->chip_present_flag = SPHW_CHIP_PRESENT; -} - -void sphw_set_chip_absent(void *hwdev) -{ - struct sphw_hwdev *dev = hwdev; - - sdk_err(dev->dev_hdl, "Card not present\n"); - dev->chip_present_flag = SPHW_CHIP_ABSENT; -} - -int sphw_get_chip_present_flag(const void *hwdev) -{ - if (!hwdev) - return 0; - - return ((struct sphw_hwdev *)hwdev)->chip_present_flag; -} - -/* TODO */ -void sphw_force_complete_all(void *hwdev) -{ -} - -void sphw_detect_hw_present(void *hwdev) -{ - u32 addr, attr1; - - addr = SPHW_CSR_FUNC_ATTR1_ADDR; - attr1 = sphw_hwif_read_reg(((struct sphw_hwdev *)hwdev)->hwif, addr); - if (attr1 == SPHW_PCIE_LINK_DOWN) { - sphw_set_chip_absent(hwdev); - sphw_force_complete_all(hwdev); - } -} - -/** - * dma_attr_table_init - initialize the default dma attributes - * @hwdev: the pointer to hw device - **/ -static int dma_attr_table_init(struct sphw_hwdev *hwdev) -{ - u32 addr, val, dst_attr; - - /* Use indirect access should set entry_idx first*/ - addr = SPHW_CSR_DMA_ATTR_INDIR_IDX_ADDR; - val = sphw_hwif_read_reg(hwdev->hwif, addr); - val = SPHW_DMA_ATTR_INDIR_IDX_CLEAR(val, IDX); - - val |= SPHW_DMA_ATTR_INDIR_IDX_SET(PCIE_MSIX_ATTR_ENTRY, IDX); - - sphw_hwif_write_reg(hwdev->hwif, addr, val); - - wmb(); /* write index before config */ - - addr = SPHW_CSR_DMA_ATTR_TBL_ADDR; - val = sphw_hwif_read_reg(hwdev->hwif, addr); - dst_attr = SPHW_DMA_ATTR_ENTRY_SET(SPHW_PCIE_ST_DISABLE, ST) | - SPHW_DMA_ATTR_ENTRY_SET(SPHW_PCIE_AT_DISABLE, AT) | - SPHW_DMA_ATTR_ENTRY_SET(SPHW_PCIE_PH_DISABLE, PH) | - SPHW_DMA_ATTR_ENTRY_SET(SPHW_PCIE_SNOOP, NO_SNOOPING) | - SPHW_DMA_ATTR_ENTRY_SET(SPHW_PCIE_TPH_DISABLE, TPH_EN); - if (dst_attr == val) - return 0; - - return sphw_set_dma_attr_tbl(hwdev, PCIE_MSIX_ATTR_ENTRY, SPHW_PCIE_ST_DISABLE, - SPHW_PCIE_AT_DISABLE, SPHW_PCIE_PH_DISABLE, - SPHW_PCIE_SNOOP, SPHW_PCIE_TPH_DISABLE); -} - -static int init_aeqs_msix_attr(struct sphw_hwdev *hwdev) -{ - struct sphw_aeqs *aeqs = hwdev->aeqs; - struct interrupt_info info = {0}; - struct sphw_eq *eq = NULL; - int q_id; - int err; - - info.lli_set = 0; - info.interrupt_coalesc_set = 1; - info.pending_limt = SPHW_DEAULT_EQ_MSIX_PENDING_LIMIT; - info.coalesc_timer_cfg = SPHW_DEAULT_EQ_MSIX_COALESC_TIMER_CFG; - info.resend_timer_cfg = SPHW_DEAULT_EQ_MSIX_RESEND_TIMER_CFG; - - for (q_id = aeqs->num_aeqs - 1; q_id >= 0; q_id--) { - eq = &aeqs->aeq[q_id]; - info.msix_index = eq->eq_irq.msix_entry_idx; - err = sphw_set_interrupt_cfg_direct(hwdev, &info, SPHW_CHANNEL_COMM); - if (err) { - sdk_err(hwdev->dev_hdl, "Set msix attr for aeq %d failed\n", - q_id); - return -EFAULT; - } - } - - return 0; -} - -static int init_ceqs_msix_attr(struct sphw_hwdev *hwdev) -{ - struct sphw_ceqs *ceqs = hwdev->ceqs; - struct interrupt_info info = {0}; - struct sphw_eq *eq = NULL; - u16 q_id; - int err; - - info.lli_set = 0; - info.interrupt_coalesc_set = 1; - info.pending_limt = SPHW_DEAULT_EQ_MSIX_PENDING_LIMIT; - info.coalesc_timer_cfg = SPHW_DEAULT_EQ_MSIX_COALESC_TIMER_CFG; - info.resend_timer_cfg = SPHW_DEAULT_EQ_MSIX_RESEND_TIMER_CFG; - - for (q_id = 0; q_id < ceqs->num_ceqs; q_id++) { - eq = &ceqs->ceq[q_id]; - info.msix_index = eq->eq_irq.msix_entry_idx; - err = sphw_set_interrupt_cfg(hwdev, info, SPHW_CHANNEL_COMM); - if (err) { - sdk_err(hwdev->dev_hdl, "Set msix attr for ceq %u failed\n", - q_id); - return -EFAULT; - } - } - - return 0; -} - -static int sphw_comm_aeqs_init(struct sphw_hwdev *hwdev) -{ - struct irq_info aeq_irqs[SPHW_MAX_AEQS] = {{0} }; - u16 num_aeqs, resp_num_irq = 0, i; - int err; - - num_aeqs = SPHW_HWIF_NUM_AEQS(hwdev->hwif); - if (num_aeqs > SPHW_MAX_AEQS) { - sdk_warn(hwdev->dev_hdl, "Adjust aeq num to %d\n", - SPHW_MAX_AEQS); - num_aeqs = SPHW_MAX_AEQS; - } - err = sphw_alloc_irqs(hwdev, SERVICE_T_INTF, num_aeqs, aeq_irqs, &resp_num_irq); - if (err) { - sdk_err(hwdev->dev_hdl, "Failed to alloc aeq irqs, num_aeqs: %u\n", - num_aeqs); - return err; - } - - if (resp_num_irq < num_aeqs) { - sdk_warn(hwdev->dev_hdl, "Adjust aeq num to %u\n", - resp_num_irq); - num_aeqs = resp_num_irq; - } - - err = sphw_aeqs_init(hwdev, num_aeqs, aeq_irqs); - if (err) { - sdk_err(hwdev->dev_hdl, "Failed to init aeqs\n"); - goto aeqs_init_err; - } - - return 0; - -aeqs_init_err: - for (i = 0; i < num_aeqs; i++) - sphw_free_irq(hwdev, SERVICE_T_INTF, aeq_irqs[i].irq_id); - - return err; -} - -static void sphw_comm_aeqs_free(struct sphw_hwdev *hwdev) -{ - struct irq_info aeq_irqs[SPHW_MAX_AEQS] = {{0} }; - u16 num_irqs, i; - - sphw_get_aeq_irqs(hwdev, aeq_irqs, &num_irqs); - - sphw_aeqs_free(hwdev); - - for (i = 0; i < num_irqs; i++) - sphw_free_irq(hwdev, SERVICE_T_INTF, aeq_irqs[i].irq_id); -} - -static int sphw_comm_ceqs_init(struct sphw_hwdev *hwdev) -{ - struct irq_info ceq_irqs[SPHW_MAX_CEQS] = {{0} }; - u16 num_ceqs, resp_num_irq = 0, i; - int err; - - num_ceqs = SPHW_HWIF_NUM_CEQS(hwdev->hwif); - if (num_ceqs > SPHW_MAX_CEQS) { - sdk_warn(hwdev->dev_hdl, "Adjust ceq num to %d\n", - SPHW_MAX_CEQS); - num_ceqs = SPHW_MAX_CEQS; - } - - err = sphw_alloc_irqs(hwdev, SERVICE_T_INTF, num_ceqs, ceq_irqs, &resp_num_irq); - if (err) { - sdk_err(hwdev->dev_hdl, "Failed to alloc ceq irqs, num_ceqs: %u\n", - num_ceqs); - return err; - } - - if (resp_num_irq < num_ceqs) { - sdk_warn(hwdev->dev_hdl, "Adjust ceq num to %u\n", - resp_num_irq); - num_ceqs = resp_num_irq; - } - - err = sphw_ceqs_init(hwdev, num_ceqs, ceq_irqs); - if (err) { - sdk_err(hwdev->dev_hdl, - "Failed to init ceqs, err:%d\n", err); - goto ceqs_init_err; - } - - return 0; - -ceqs_init_err: - for (i = 0; i < num_ceqs; i++) - sphw_free_irq(hwdev, SERVICE_T_INTF, ceq_irqs[i].irq_id); - - return err; -} - -static void sphw_comm_ceqs_free(struct sphw_hwdev *hwdev) -{ - struct irq_info ceq_irqs[SPHW_MAX_CEQS] = {{0} }; - u16 num_irqs; - int i; - - sphw_get_ceq_irqs(hwdev, ceq_irqs, &num_irqs); - - sphw_ceqs_free(hwdev); - - for (i = 0; i < num_irqs; i++) - sphw_free_irq(hwdev, SERVICE_T_INTF, ceq_irqs[i].irq_id); -} - -static int sphw_comm_func_to_func_init(struct sphw_hwdev *hwdev) -{ - int err; - - err = sphw_func_to_func_init(hwdev); - if (err) - return err; - - sphw_aeq_register_hw_cb(hwdev, SPHW_MBX_FROM_FUNC, sphw_mbox_func_aeqe_handler); - sphw_aeq_register_hw_cb(hwdev, SPHW_MSG_FROM_MGMT_CPU, sphw_mgmt_msg_aeqe_handler); - - if (!SPHW_IS_VF(hwdev)) - sphw_register_pf_mbox_cb(hwdev, SPHW_MOD_COMM, hwdev->func_to_func, - pf_handle_vf_comm_mbox); - else - sphw_register_vf_mbox_cb(hwdev, SPHW_MOD_COMM, hwdev->func_to_func, - vf_handle_pf_comm_mbox); - - return 0; -} - -static void sphw_comm_func_to_func_free(struct sphw_hwdev *hwdev) -{ - sphw_aeq_unregister_hw_cb(hwdev, SPHW_MBX_FROM_FUNC); - - if (!SPHW_IS_VF(hwdev)) { - sphw_unregister_pf_mbox_cb(hwdev, SPHW_MOD_COMM); - } else { - sphw_unregister_vf_mbox_cb(hwdev, SPHW_MOD_COMM); - - sphw_aeq_unregister_hw_cb(hwdev, SPHW_MSG_FROM_MGMT_CPU); - } - - sphw_func_to_func_free(hwdev); -} - -static int sphw_comm_pf_to_mgmt_init(struct sphw_hwdev *hwdev) -{ - int err; - - /* VF do not support api chain */ - if (sphw_func_type(hwdev) == TYPE_VF || - !COMM_SUPPORT_API_CHAIN(hwdev)) - return 0; - - err = sphw_pf_to_mgmt_init(hwdev); - if (err) - return err; - - sphw_register_mgmt_msg_cb(hwdev, SPHW_MOD_COMM, hwdev->pf_to_mgmt, - pf_handle_mgmt_comm_event); - - return 0; -} - -static void sphw_comm_pf_to_mgmt_free(struct sphw_hwdev *hwdev) -{ - /* VF do not support api chain */ - if (sphw_func_type(hwdev) == TYPE_VF || - !COMM_SUPPORT_API_CHAIN(hwdev)) - return; - - sphw_unregister_mgmt_msg_cb(hwdev, SPHW_MOD_COMM); - - sphw_aeq_unregister_hw_cb(hwdev, SPHW_MSG_FROM_MGMT_CPU); - - sphw_pf_to_mgmt_free(hwdev); -} - -static int sphw_comm_cmdqs_init(struct sphw_hwdev *hwdev) -{ - int err; - - err = sphw_cmdqs_init(hwdev); - if (err) { - sdk_err(hwdev->dev_hdl, "Failed to init cmd queues\n"); - return err; - } - - sphw_ceq_register_cb(hwdev, SPHW_CMDQ, sphw_cmdq_ceq_handler); - - err = sphw_set_cmdq_depth(hwdev, SPHW_CMDQ_DEPTH); - if (err) { - sdk_err(hwdev->dev_hdl, "Failed to set cmdq depth\n"); - goto set_cmdq_depth_err; - } - - return 0; - -set_cmdq_depth_err: - sphw_cmdqs_free(hwdev); - - return err; -} - -static void sphw_comm_cmdqs_free(struct sphw_hwdev *hwdev) -{ - sphw_ceq_unregister_cb(hwdev, SPHW_CMDQ); - sphw_cmdqs_free(hwdev); -} - -static void sphw_sync_mgmt_func_state(struct sphw_hwdev *hwdev) -{ - sphw_set_pf_status(hwdev->hwif, SPHW_PF_STATUS_ACTIVE_FLAG); -} - -static void sphw_unsync_mgmt_func_state(struct sphw_hwdev *hwdev) -{ - sphw_set_pf_status(hwdev->hwif, SPHW_PF_STATUS_INIT); -} - -static int init_basic_attributes(struct sphw_hwdev *hwdev) -{ - u64 drv_feature[COMM_MAX_FEATURE_QWORD] = {SPHW_DRV_FEATURE_QW0}; - int err, i; - - err = sphw_get_board_info(hwdev, &hwdev->board_info, SPHW_CHANNEL_COMM); - if (err) - return err; - - err = sphw_get_comm_features(hwdev, hwdev->features, COMM_MAX_FEATURE_QWORD); - if (err) { - sdk_err(hwdev->dev_hdl, "Get comm features failed\n"); - return err; - } - - sdk_info(hwdev->dev_hdl, "Comm hw features: 0x%llx, drv features: 0x%llx\n", - hwdev->features[0], drv_feature[0]); - - for (i = 0; i < COMM_MAX_FEATURE_QWORD; i++) - hwdev->features[i] &= drv_feature[i]; - - err = sphw_get_global_attr(hwdev, &hwdev->glb_attr); - if (err) { - sdk_err(hwdev->dev_hdl, "Failed to get global attribute\n"); - return err; - } - - sdk_info(hwdev->dev_hdl, "global attribute: max_host: 0x%x, max_pf: 0x%x, vf_id_start: 0x%x, mgmt cpu node id: 0x%x\n", - hwdev->glb_attr.max_host_num, hwdev->glb_attr.max_pf_num, - hwdev->glb_attr.vf_id_start, - hwdev->glb_attr.mgmt_host_node_id); - - sphw_init_profile_adapter(hwdev); - - return 0; -} - -static int init_basic_mgmt_channel(struct sphw_hwdev *hwdev) -{ - int err; - - err = sphw_comm_aeqs_init(hwdev); - if (err) { - sdk_err(hwdev->dev_hdl, "Failed to init async event queues\n"); - return err; - } - - err = sphw_comm_func_to_func_init(hwdev); - if (err) { - sdk_err(hwdev->dev_hdl, "Failed to init mailbox\n"); - goto func_to_func_init_err; - } - - err = init_aeqs_msix_attr(hwdev); - if (err) { - sdk_err(hwdev->dev_hdl, "Failed to init aeqs msix attr\n"); - goto aeqs_msix_attr_init_err; - } - - return 0; - -aeqs_msix_attr_init_err: - sphw_comm_func_to_func_free(hwdev); - -func_to_func_init_err: - sphw_comm_aeqs_free(hwdev); - - return err; -} - -static void free_base_mgmt_channel(struct sphw_hwdev *hwdev) -{ - sphw_comm_func_to_func_free(hwdev); - sphw_comm_aeqs_free(hwdev); -} - -static int init_pf_mgmt_channel(struct sphw_hwdev *hwdev) -{ - int err; - - err = sphw_comm_pf_to_mgmt_init(hwdev); - if (err) { - sdk_err(hwdev->dev_hdl, "Failed to init pf to mgmt\n"); - return err; - } - - return 0; -} - -static void free_pf_mgmt_channel(struct sphw_hwdev *hwdev) -{ - sphw_comm_pf_to_mgmt_free(hwdev); -} - -static int init_mgmt_channel_post(struct sphw_hwdev *hwdev) -{ - int err; - - /* mbox host channel resources will be freed in - * sphw_func_to_func_free - */ - err = sphw_mbox_init_host_msg_channel(hwdev); - if (err) { - sdk_err(hwdev->dev_hdl, "Failed to init mbox host channel\n"); - return err; - } - - err = init_pf_mgmt_channel(hwdev); - if (err) - return err; - - return 0; -} - -static void free_mgmt_msg_channel_post(struct sphw_hwdev *hwdev) -{ - free_pf_mgmt_channel(hwdev); -} - -static int init_cmdqs_channel(struct sphw_hwdev *hwdev) -{ - int err; - - err = dma_attr_table_init(hwdev); - if (err) { - sdk_err(hwdev->dev_hdl, "Failed to init dma attr table\n"); - goto dma_attr_init_err; - } - - err = sphw_comm_ceqs_init(hwdev); - if (err) { - sdk_err(hwdev->dev_hdl, "Failed to init completion event queues\n"); - goto ceqs_init_err; - } - - err = init_ceqs_msix_attr(hwdev); - if (err) { - sdk_err(hwdev->dev_hdl, "Failed to init ceqs msix attr\n"); - goto init_ceq_msix_err; - } - - /* set default wq page_size */ - if (wq_page_order > SPHW_MAX_WQ_PAGE_SIZE_ORDER) { - sdk_info(hwdev->dev_hdl, "wq_page_order exceed limit[0, %d], reset to %d\n", - SPHW_MAX_WQ_PAGE_SIZE_ORDER, - SPHW_MAX_WQ_PAGE_SIZE_ORDER); - wq_page_order = SPHW_MAX_WQ_PAGE_SIZE_ORDER; - } - hwdev->wq_page_size = SPHW_HW_WQ_PAGE_SIZE * (1U << wq_page_order); - sdk_info(hwdev->dev_hdl, "WQ page size: 0x%x\n", hwdev->wq_page_size); - err = sphw_set_wq_page_size(hwdev, sphw_global_func_id(hwdev), hwdev->wq_page_size, - SPHW_CHANNEL_COMM); - if (err) { - sdk_err(hwdev->dev_hdl, "Failed to set wq page size\n"); - goto init_wq_pg_size_err; - } - - err = sphw_comm_cmdqs_init(hwdev); - if (err) { - sdk_err(hwdev->dev_hdl, "Failed to init cmd queues\n"); - goto cmdq_init_err; - } - - return 0; - -cmdq_init_err: - if (SPHW_FUNC_TYPE(hwdev) != TYPE_VF) - sphw_set_wq_page_size(hwdev, sphw_global_func_id(hwdev), SPHW_HW_WQ_PAGE_SIZE, - SPHW_CHANNEL_COMM); -init_wq_pg_size_err: -init_ceq_msix_err: - sphw_comm_ceqs_free(hwdev); - -ceqs_init_err: -dma_attr_init_err: - - return err; -} - -int sphw_init_comm_ch(struct sphw_hwdev *hwdev) -{ - int err; - - err = init_basic_mgmt_channel(hwdev); - if (err) { - sdk_err(hwdev->dev_hdl, "Failed to init mgmt channel\n"); - return err; - } - - err = sphw_func_reset(hwdev, sphw_global_func_id(hwdev), SPHW_COMM_RES, SPHW_CHANNEL_COMM); - if (err) { - sdk_err(hwdev->dev_hdl, "Failed to reset function\n"); - goto func_reset_err; - } - - err = init_basic_attributes(hwdev); - if (err) { - sdk_err(hwdev->dev_hdl, "Failed to init basic attributes\n"); - goto init_basic_attr_err; - } - - err = init_mgmt_channel_post(hwdev); - if (err) - goto init_mgmt_channel_post_err; - - err = init_cmdqs_channel(hwdev); - if (err) { - sdk_err(hwdev->dev_hdl, "Failed to init cmdq channel\n"); - goto init_cmdqs_channel_err; - } - - sphw_sync_mgmt_func_state(hwdev); - - if (SPHW_F_CHANNEL_LOCK_EN(hwdev)) { - sphw_mbox_enable_channel_lock(hwdev, true); - sphw_cmdq_enable_channel_lock(hwdev, true); - } - - return 0; - -init_cmdqs_channel_err: - free_mgmt_msg_channel_post(hwdev); -init_mgmt_channel_post_err: -init_basic_attr_err: -func_reset_err: - free_base_mgmt_channel(hwdev); - - return err; -} - -void sphw_uninit_comm_ch(struct sphw_hwdev *hwdev) -{ - sphw_unsync_mgmt_func_state(hwdev); - - sphw_comm_cmdqs_free(hwdev); - - if (SPHW_FUNC_TYPE(hwdev) != TYPE_VF) - sphw_set_wq_page_size(hwdev, sphw_global_func_id(hwdev), SPHW_HW_WQ_PAGE_SIZE, - SPHW_CHANNEL_COMM); - - sphw_comm_ceqs_free(hwdev); - - sphw_deinit_profile_adapter(hwdev); - - free_mgmt_msg_channel_post(hwdev); - - free_base_mgmt_channel(hwdev); -} - -int sphw_init_hwdev(struct sphw_init_para *para) -{ - struct sphw_hwdev *hwdev; - int err; - - hwdev = kzalloc(sizeof(*hwdev), GFP_KERNEL); - if (!hwdev) - return -ENOMEM; - - *para->hwdev = hwdev; - hwdev->adapter_hdl = para->adapter_hdl; - hwdev->pcidev_hdl = para->pcidev_hdl; - hwdev->dev_hdl = para->dev_hdl; - hwdev->chip_node = para->chip_node; - - hwdev->chip_fault_stats = vzalloc(SPHW_CHIP_FAULT_SIZE); - if (!hwdev->chip_fault_stats) - goto alloc_chip_fault_stats_err; - - err = sphw_init_hwif(hwdev, para->cfg_reg_base, para->intr_reg_base, - para->mgmt_reg_base, para->db_base_phy, - para->db_base, para->db_dwqe_len); - if (err) { - sdk_err(hwdev->dev_hdl, "Failed to init hwif\n"); - goto init_hwif_err; - } - - sphw_set_chip_present(hwdev); - - if (disable_cfg_comm) - return 0; - - hwdev->workq = alloc_workqueue(SPHW_HWDEV_WQ_NAME, WQ_MEM_RECLAIM, - SPHW_WQ_MAX_REQ); - if (!hwdev->workq) { - sdk_err(hwdev->dev_hdl, "Failed to alloc hardware workq\n"); - goto alloc_workq_err; - } - - sphw_init_heartbeat_detect(hwdev); - - err = init_cfg_mgmt(hwdev); - if (err) { - sdk_err(hwdev->dev_hdl, "Failed to init config mgmt\n"); - goto init_cfg_mgmt_err; - } - - err = sphw_init_comm_ch(hwdev); - if (err) { - sdk_err(hwdev->dev_hdl, "Failed to init communication channel\n"); - goto init_comm_ch_err; - } - - err = init_capability(hwdev); - if (err) { - sdk_err(hwdev->dev_hdl, "Failed to init capability\n"); - goto init_cap_err; - } - - err = sphw_set_comm_features(hwdev, hwdev->features, COMM_MAX_FEATURE_QWORD); - if (err) { - sdk_err(hwdev->dev_hdl, "Failed to set comm features\n"); - goto set_feature_err; - } - - return 0; - -set_feature_err: - free_capability(hwdev); - -init_cap_err: - sphw_uninit_comm_ch(hwdev); - -init_comm_ch_err: - free_cfg_mgmt(hwdev); - -init_cfg_mgmt_err: - sphw_destroy_heartbeat_detect(hwdev); - destroy_workqueue(hwdev->workq); - -alloc_workq_err: - sphw_free_hwif(hwdev); - -init_hwif_err: - vfree(hwdev->chip_fault_stats); - -alloc_chip_fault_stats_err: - kfree(hwdev); - *para->hwdev = NULL; - - return -EFAULT; -} - -void sphw_free_hwdev(void *hwdev) -{ - struct sphw_hwdev *dev = hwdev; - - sphw_func_rx_tx_flush(hwdev, SPHW_CHANNEL_COMM); - - free_capability(dev); - - sphw_uninit_comm_ch(dev); - - free_cfg_mgmt(dev); - sphw_destroy_heartbeat_detect(hwdev); - destroy_workqueue(dev->workq); - sphw_free_hwif(dev); - vfree(dev->chip_fault_stats); - - kfree(dev); -} - -void *sphw_get_pcidev_hdl(void *hwdev) -{ - struct sphw_hwdev *dev = (struct sphw_hwdev *)hwdev; - - if (!hwdev) - return NULL; - - return dev->pcidev_hdl; -} - -int sphw_register_service_adapter(void *hwdev, void *service_adapter, enum sphw_service_type type) -{ - struct sphw_hwdev *dev = hwdev; - - if (!hwdev || !service_adapter || type >= SERVICE_T_MAX) - return -EINVAL; - - if (dev->service_adapter[type]) - return -EINVAL; - - dev->service_adapter[type] = service_adapter; - - return 0; -} - -void sphw_unregister_service_adapter(void *hwdev, enum sphw_service_type type) -{ - struct sphw_hwdev *dev = hwdev; - - if (!hwdev || type >= SERVICE_T_MAX) - return; - - dev->service_adapter[type] = NULL; -} - -void *sphw_get_service_adapter(void *hwdev, enum sphw_service_type type) -{ - struct sphw_hwdev *dev = hwdev; - - if (!hwdev || type >= SERVICE_T_MAX) - return NULL; - - return dev->service_adapter[type]; -} - -int sphw_dbg_get_hw_stats(const void *hwdev, u8 *hw_stats, u16 *out_size) -{ - if (*out_size != sizeof(struct sphw_hw_stats)) { - pr_err("Unexpect out buf size from user :%u, expect: %lu\n", - *out_size, sizeof(struct sphw_hw_stats)); - return -EFAULT; - } - - memcpy(hw_stats, &((struct sphw_hwdev *)hwdev)->hw_stats, - sizeof(struct sphw_hw_stats)); - return 0; -} - -u16 sphw_dbg_clear_hw_stats(void *hwdev) -{ - memset((void *)&((struct sphw_hwdev *)hwdev)->hw_stats, 0, - sizeof(struct sphw_hw_stats)); - memset((void *)((struct sphw_hwdev *)hwdev)->chip_fault_stats, 0, - SPHW_CHIP_FAULT_SIZE); - return sizeof(struct sphw_hw_stats); -} - -void sphw_get_chip_fault_stats(const void *hwdev, u8 *chip_fault_stats, u32 offset) -{ - u32 copy_len = offset + MAX_DRV_BUF_SIZE - SPHW_CHIP_FAULT_SIZE; - - if (offset + MAX_DRV_BUF_SIZE <= SPHW_CHIP_FAULT_SIZE) - memcpy(chip_fault_stats, - ((struct sphw_hwdev *)hwdev)->chip_fault_stats - + offset, MAX_DRV_BUF_SIZE); - else - memcpy(chip_fault_stats, - ((struct sphw_hwdev *)hwdev)->chip_fault_stats - + offset, copy_len); -} - -void sphw_event_register(void *dev, void *pri_handle, sphw_event_handler callback) -{ - struct sphw_hwdev *hwdev = dev; - - if (!dev) { - pr_err("Hwdev pointer is NULL for register event\n"); - return; - } - - hwdev->event_callback = callback; - hwdev->event_pri_handle = pri_handle; -} - -void sphw_event_unregister(void *dev) -{ - struct sphw_hwdev *hwdev = dev; - - if (!dev) { - pr_err("Hwdev pointer is NULL for register event\n"); - return; - } - - hwdev->event_callback = NULL; - hwdev->event_pri_handle = NULL; -} - -void sphw_event_callback(void *hwdev, struct sphw_event_info *event) -{ - struct sphw_hwdev *dev = hwdev; - - if (!hwdev) { - pr_err("Hwdev pointer is NULL for event callback\n"); - return; - } - - if (!dev->event_callback) { - sdk_info(dev->dev_hdl, "Event callback function not register\n"); - return; - } - - dev->event_callback(dev->event_pri_handle, event); -} - -void sphw_set_pcie_order_cfg(void *handle) -{ -} - -void sphw_disable_mgmt_msg_report(void *hwdev) -{ - struct sphw_hwdev *hw_dev = (struct sphw_hwdev *)hwdev; - - sphw_set_pf_status(hw_dev->hwif, SPHW_PF_STATUS_INIT); -} - -void sphw_record_pcie_error(void *hwdev) -{ - struct sphw_hwdev *dev = (struct sphw_hwdev *)hwdev; - - if (!hwdev) - return; - - atomic_inc(&dev->hw_stats.fault_event_stats.pcie_fault_stats); -} - -int sphw_get_card_present_state(void *hwdev, bool *card_present_state) -{ - struct sphw_hwdev *dev = hwdev; - u32 addr, attr1; - - if (!hwdev || !card_present_state) - return -EINVAL; - - addr = SPHW_CSR_FUNC_ATTR1_ADDR; - attr1 = sphw_hwif_read_reg(dev->hwif, addr); - if (attr1 == SPHW_PCIE_LINK_DOWN) { - sdk_warn(dev->dev_hdl, "Card is not present\n"); - *card_present_state = (bool)0; - } else { - *card_present_state = (bool)1; - } - - return 0; -} - -void sphw_link_event_stats(void *dev, u8 link) -{ - struct sphw_hwdev *hwdev = dev; - - if (link) - atomic_inc(&hwdev->hw_stats.link_event_stats.link_up_stats); - else - atomic_inc(&hwdev->hw_stats.link_event_stats.link_down_stats); -} - -u8 sphw_max_pf_num(void *hwdev) -{ - if (!hwdev) - return 0; - - return SPHW_MAX_PF_NUM((struct sphw_hwdev *)hwdev); -} - -void sphw_fault_event_report(void *hwdev, u16 src, u16 level) -{ - if (!hwdev) - return; - - sdk_info(((struct sphw_hwdev *)hwdev)->dev_hdl, "Fault event report, src: %u, level: %u\n", - src, level); - - sphw_fault_post_process(hwdev, src, level); -} - -void sphw_heartbeat_lost_handler(struct work_struct *work) -{ - struct sphw_event_info event_info = { 0 }; - struct sphw_hwdev *hwdev = container_of(work, struct sphw_hwdev, - heartbeat_lost_work); - u16 src, level; - - atomic_inc(&hwdev->hw_stats.heart_lost_stats); - - if (hwdev->event_callback) { - event_info.type = - hwdev->pcie_link_down ? SPHW_EVENT_PCIE_LINK_DOWN : - SPHW_EVENT_HEART_LOST; - hwdev->event_callback(hwdev->event_pri_handle, &event_info); - } - - if (hwdev->pcie_link_down) { - src = SPHW_FAULT_SRC_PCIE_LINK_DOWN; - level = FAULT_LEVEL_HOST; - sdk_err(hwdev->dev_hdl, "Detect pcie is link down\n"); - } else { - src = SPHW_FAULT_SRC_HOST_HEARTBEAT_LOST; - level = FAULT_LEVEL_FATAL; - sdk_err(hwdev->dev_hdl, "Heart lost report received, func_id: %d\n", - sphw_global_func_id(hwdev)); - } - - sphw_fault_post_process(hwdev, src, level); -} - -#define DETECT_PCIE_LINK_DOWN_RETRY 2 -#define SPHW_HEARTBEAT_START_EXPIRE 5000 -#define SPHW_HEARTBEAT_PERIOD 1000 - -static bool sphw_is_hw_abnormal(struct sphw_hwdev *hwdev) -{ - u32 status; - - if (!sphw_get_chip_present_flag(hwdev)) - return false; - - status = sphw_get_heartbeat_status(hwdev); - if (status == SPHW_PCIE_LINK_DOWN) { - sdk_warn(hwdev->dev_hdl, "Detect BAR register read failed\n"); - hwdev->rd_bar_err_cnt++; - if (hwdev->rd_bar_err_cnt >= DETECT_PCIE_LINK_DOWN_RETRY) { - sphw_set_chip_absent(hwdev); - sphw_force_complete_all(hwdev); - hwdev->pcie_link_down = true; - return true; - } - - return false; - } - - if (status) { - hwdev->heartbeat_lost = true; - return true; - } - - hwdev->rd_bar_err_cnt = 0; - - return false; -} - -static void sphw_heartbeat_timer_handler(struct timer_list *t) -{ - struct sphw_hwdev *hwdev = from_timer(hwdev, t, heartbeat_timer); - - if (sphw_is_hw_abnormal(hwdev)) - queue_work(hwdev->workq, &hwdev->heartbeat_lost_work); - else - mod_timer(&hwdev->heartbeat_timer, - jiffies + msecs_to_jiffies(SPHW_HEARTBEAT_PERIOD)); -} - -static void sphw_init_heartbeat_detect(struct sphw_hwdev *hwdev) -{ - timer_setup(&hwdev->heartbeat_timer, sphw_heartbeat_timer_handler, 0); - - hwdev->heartbeat_timer.expires = - jiffies + msecs_to_jiffies(SPHW_HEARTBEAT_START_EXPIRE); - - add_timer(&hwdev->heartbeat_timer); - - INIT_WORK(&hwdev->heartbeat_lost_work, sphw_heartbeat_lost_handler); -} - -static void sphw_destroy_heartbeat_detect(struct sphw_hwdev *hwdev) -{ - del_timer_sync(&hwdev->heartbeat_timer); -} diff --git a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_hwdev.h b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_hwdev.h deleted file mode 100644 index 10da31bda3d2a15fc928e57af73acc810b134339..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_hwdev.h +++ /dev/null @@ -1,93 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPHW_HWDEV_H -#define SPHW_HWDEV_H - -#include "sphw_mt.h" -#include "sphw_crm.h" -#include "sphw_hw.h" - -struct cfg_mgmt_info; - -struct sphw_hwif; -struct sphw_aeqs; -struct sphw_ceqs; -struct sphw_mbox; -struct sphw_msg_pf_to_mgmt; - -struct sphw_page_addr { - void *virt_addr; - u64 phys_addr; -}; - -struct mqm_addr_trans_tbl_info { - u32 chunk_num; - u32 search_gpa_num; - u32 page_size; - u32 page_num; - struct sphw_page_addr *brm_srch_page_addr; -}; - -struct sphw_hwdev { - void *adapter_hdl; /* pointer to spnic_pcidev or NDIS_Adapter */ - void *pcidev_hdl; /* pointer to pcidev or Handler */ - void *dev_hdl; /* pointer to pcidev->dev or Handler, for - * sdk_err() or dma_alloc() - */ - - void *service_adapter[SERVICE_T_MAX]; - void *chip_node; - void *ppf_hwdev; - - u32 wq_page_size; - int chip_present_flag; - - struct sphw_hwif *hwif; /* include void __iomem *bar */ - struct comm_global_attr glb_attr; - u64 features[COMM_MAX_FEATURE_QWORD]; - - struct cfg_mgmt_info *cfg_mgmt; - - struct sphw_cmdqs *cmdqs; - struct sphw_aeqs *aeqs; - struct sphw_ceqs *ceqs; - struct sphw_mbox *func_to_func; - struct sphw_msg_pf_to_mgmt *pf_to_mgmt; - - void *cqm_hdl; - struct mqm_addr_trans_tbl_info mqm_att; - struct sphw_page_addr page_pa0; - struct sphw_page_addr page_pa1; - u32 statufull_ref_cnt; - - struct sphw_hw_stats hw_stats; - u8 *chip_fault_stats; - - sphw_event_handler event_callback; - void *event_pri_handle; - - struct sphw_board_info board_info; - - int prof_adap_type; - struct sphw_prof_attr *prof_attr; - - struct workqueue_struct *workq; - - u32 rd_bar_err_cnt; - bool pcie_link_down; - bool heartbeat_lost; - struct timer_list heartbeat_timer; - struct work_struct heartbeat_lost_work; -}; - -#define SPHW_MAX_HOST_NUM(hwdev) ((hwdev)->glb_attr.max_host_num) -#define SPHW_MAX_PF_NUM(hwdev) ((hwdev)->glb_attr.max_pf_num) -#define SPHW_MGMT_CPU_NODE_ID(hwdev) ((hwdev)->glb_attr.mgmt_host_node_id) - -#define COMM_FEATURE_QW0(hwdev, feature) ((hwdev)->features[0] & COMM_F_##feature) -#define COMM_SUPPORT_API_CHAIN(hwdev) COMM_FEATURE_QW0(hwdev, API_CHAIN) - -#define SPHW_DRV_FEATURE_QW0 COMM_F_API_CHAIN - -#endif diff --git a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_hwif.c b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_hwif.c deleted file mode 100644 index fbb1128957f0c6a67e3630e74a49c0ae97e99c61..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_hwif.c +++ /dev/null @@ -1,886 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt - -#include -#include -#include -#include - -#include "sphw_csr.h" -#include "sphw_crm.h" -#include "sphw_hw.h" -#include "sphw_common.h" -#include "sphw_hwdev.h" -#include "sphw_hwif.h" - -#define WAIT_HWIF_READY_TIMEOUT 10000 -#define SPHW_WAIT_DOORBELL_AND_OUTBOUND_TIMEOUT 60000 - -#define DB_IDX(db, db_base) \ - ((u32)(((ulong)(db) - (ulong)(db_base)) / \ - SPHW_DB_PAGE_SIZE)) - -#define SPHW_AF0_FUNC_GLOBAL_IDX_SHIFT 0 -#define SPHW_AF0_P2P_IDX_SHIFT 12 -#define SPHW_AF0_PCI_INTF_IDX_SHIFT 17 -#define SPHW_AF0_VF_IN_PF_SHIFT 20 -#define SPHW_AF0_FUNC_TYPE_SHIFT 28 - -#define SPHW_AF0_FUNC_GLOBAL_IDX_MASK 0xFFF -#define SPHW_AF0_P2P_IDX_MASK 0x1F -#define SPHW_AF0_PCI_INTF_IDX_MASK 0x7 -#define SPHW_AF0_VF_IN_PF_MASK 0xFF -#define SPHW_AF0_FUNC_TYPE_MASK 0x1 - -#define SPHW_AF0_GET(val, member) \ - (((val) >> SPHW_AF0_##member##_SHIFT) & SPHW_AF0_##member##_MASK) - -#define SPHW_AF1_PPF_IDX_SHIFT 0 -#define SPHW_AF1_AEQS_PER_FUNC_SHIFT 8 -#define SPHW_AF1_MGMT_INIT_STATUS_SHIFT 30 -#define SPHW_AF1_PF_INIT_STATUS_SHIFT 31 - -#define SPHW_AF1_PPF_IDX_MASK 0x3F -#define SPHW_AF1_AEQS_PER_FUNC_MASK 0x3 -#define SPHW_AF1_MGMT_INIT_STATUS_MASK 0x1 -#define SPHW_AF1_PF_INIT_STATUS_MASK 0x1 - -#define SPHW_AF1_GET(val, member) \ - (((val) >> SPHW_AF1_##member##_SHIFT) & SPHW_AF1_##member##_MASK) - -#define SPHW_AF2_CEQS_PER_FUNC_SHIFT 0 -#define SPHW_AF2_DMA_ATTR_PER_FUNC_SHIFT 9 -#define SPHW_AF2_IRQS_PER_FUNC_SHIFT 16 - -#define SPHW_AF2_CEQS_PER_FUNC_MASK 0x1FF -#define SPHW_AF2_DMA_ATTR_PER_FUNC_MASK 0x7 -#define SPHW_AF2_IRQS_PER_FUNC_MASK 0x7FF - -#define SPHW_AF2_GET(val, member) \ - (((val) >> SPHW_AF2_##member##_SHIFT) & SPHW_AF2_##member##_MASK) - -#define SPHW_AF3_GLOBAL_VF_ID_OF_NXT_PF_SHIFT 0 -#define SPHW_AF3_GLOBAL_VF_ID_OF_PF_SHIFT 16 - -#define SPHW_AF3_GLOBAL_VF_ID_OF_NXT_PF_MASK 0xFFF -#define SPHW_AF3_GLOBAL_VF_ID_OF_PF_MASK 0xFFF - -#define SPHW_AF3_GET(val, member) \ - (((val) >> SPHW_AF3_##member##_SHIFT) & SPHW_AF3_##member##_MASK) - -#define SPHW_AF4_DOORBELL_CTRL_SHIFT 0 -#define SPHW_AF4_DOORBELL_CTRL_MASK 0x1 - -#define SPHW_AF4_GET(val, member) \ - (((val) >> SPHW_AF4_##member##_SHIFT) & SPHW_AF4_##member##_MASK) - -#define SPHW_AF4_SET(val, member) \ - (((val) & SPHW_AF4_##member##_MASK) << SPHW_AF4_##member##_SHIFT) - -#define SPHW_AF4_CLEAR(val, member) \ - ((val) & (~(SPHW_AF4_##member##_MASK << SPHW_AF4_##member##_SHIFT))) - -#define SPHW_AF5_OUTBOUND_CTRL_SHIFT 0 -#define SPHW_AF5_OUTBOUND_CTRL_MASK 0x1 - -#define SPHW_AF5_GET(val, member) \ - (((val) >> SPHW_AF5_##member##_SHIFT) & SPHW_AF5_##member##_MASK) - -#define SPHW_AF5_SET(val, member) \ - (((val) & SPHW_AF5_##member##_MASK) << SPHW_AF5_##member##_SHIFT) - -#define SPHW_AF5_CLEAR(val, member) \ - ((val) & (~(SPHW_AF5_##member##_MASK << SPHW_AF5_##member##_SHIFT))) - -#define SPHW_AF6_PF_STATUS_SHIFT 0 -#define SPHW_AF6_PF_STATUS_MASK 0xFFFF - -#define SPHW_AF6_SET(val, member) \ - ((((u32)(val)) & SPHW_AF6_##member##_MASK) << \ - SPHW_AF6_##member##_SHIFT) - -#define SPHW_AF6_GET(val, member) \ - (((val) >> SPHW_AF6_##member##_SHIFT) & SPHW_AF6_##member##_MASK) - -#define SPHW_AF6_CLEAR(val, member) \ - ((val) & (~(SPHW_AF6_##member##_MASK << \ - SPHW_AF6_##member##_SHIFT))) - -#define sphw_PPF_ELECT_PORT_IDX_SHIFT 0 - -#define sphw_PPF_ELECT_PORT_IDX_MASK 0x3F - -#define sphw_PPF_ELECT_PORT_GET(val, member) \ - (((val) >> sphw_PPF_ELECT_PORT_##member##_SHIFT) & \ - sphw_PPF_ELECT_PORT_##member##_MASK) - -#define SPHW_PPF_ELECTION_IDX_SHIFT 0 - -#define SPHW_PPF_ELECTION_IDX_MASK 0x3F - -#define SPHW_PPF_ELECTION_SET(val, member) \ - (((val) & SPHW_PPF_ELECTION_##member##_MASK) << \ - SPHW_PPF_ELECTION_##member##_SHIFT) - -#define SPHW_PPF_ELECTION_GET(val, member) \ - (((val) >> SPHW_PPF_ELECTION_##member##_SHIFT) & \ - SPHW_PPF_ELECTION_##member##_MASK) - -#define SPHW_PPF_ELECTION_CLEAR(val, member) \ - ((val) & (~(SPHW_PPF_ELECTION_##member##_MASK << \ - SPHW_PPF_ELECTION_##member##_SHIFT))) - -#define SPHW_MPF_ELECTION_IDX_SHIFT 0 - -#define SPHW_MPF_ELECTION_IDX_MASK 0x1F - -#define SPHW_MPF_ELECTION_SET(val, member) \ - (((val) & SPHW_MPF_ELECTION_##member##_MASK) << \ - SPHW_MPF_ELECTION_##member##_SHIFT) - -#define SPHW_MPF_ELECTION_GET(val, member) \ - (((val) >> SPHW_MPF_ELECTION_##member##_SHIFT) & \ - SPHW_MPF_ELECTION_##member##_MASK) - -#define SPHW_MPF_ELECTION_CLEAR(val, member) \ - ((val) & (~(SPHW_MPF_ELECTION_##member##_MASK << \ - SPHW_MPF_ELECTION_##member##_SHIFT))) - -#define SPHW_GET_REG_FLAG(reg) ((reg) & (~(SPHW_REGS_FLAG_MAKS))) - -#define SPHW_GET_REG_ADDR(reg) ((reg) & (SPHW_REGS_FLAG_MAKS)) - -u32 sphw_hwif_read_reg(struct sphw_hwif *hwif, u32 reg) -{ - if (SPHW_GET_REG_FLAG(reg) == SPHW_MGMT_REGS_FLAG) - return be32_to_cpu(readl(hwif->mgmt_regs_base + - SPHW_GET_REG_ADDR(reg))); - else - return be32_to_cpu(readl(hwif->cfg_regs_base + - SPHW_GET_REG_ADDR(reg))); -} - -void sphw_hwif_write_reg(struct sphw_hwif *hwif, u32 reg, u32 val) -{ - if (SPHW_GET_REG_FLAG(reg) == SPHW_MGMT_REGS_FLAG) - writel(cpu_to_be32(val), - hwif->mgmt_regs_base + SPHW_GET_REG_ADDR(reg)); - else - writel(cpu_to_be32(val), - hwif->cfg_regs_base + SPHW_GET_REG_ADDR(reg)); -} - -/** - * sphw_get_heartbeat_status - get heart beat status - * @hwdev: the pointer to hw device - * Return: 0 - normal, 1 - heart lost, 0xFFFFFFFF - Pcie link down - **/ -u32 sphw_get_heartbeat_status(struct sphw_hwdev *hwdev) -{ - u32 attr1; - - attr1 = sphw_hwif_read_reg(hwdev->hwif, SPHW_CSR_FUNC_ATTR1_ADDR); - if (attr1 == SPHW_PCIE_LINK_DOWN) - return attr1; - - return !SPHW_AF1_GET(attr1, MGMT_INIT_STATUS); -} - -/** - * hwif_ready - test if the HW initialization passed - * @hwdev: the pointer to hw device - * Return: 0 - success, negative - failure - **/ -static int hwif_ready(struct sphw_hwdev *hwdev) -{ - if (sphw_get_heartbeat_status(hwdev)) - return -EBUSY; - - return 0; -} - -static enum sphw_wait_return check_hwif_ready_handler(void *priv_data) -{ - if (!hwif_ready(priv_data)) - return WAIT_PROCESS_CPL; - - return WAIT_PROCESS_WAITING; -} - -static int wait_hwif_ready(struct sphw_hwdev *hwdev) -{ - if (!sphw_wait_for_timeout(hwdev, check_hwif_ready_handler, - WAIT_HWIF_READY_TIMEOUT, USEC_PER_MSEC)) - return 0; - - sdk_err(hwdev->dev_hdl, "Wait for hwif timeout\n"); - return -EBUSY; -} - -/** - * set_hwif_attr - set the attributes as members in hwif - * @hwif: the hardware interface of a pci function device - * @attr0: the first attribute that was read from the hw - * @attr1: the second attribute that was read from the hw - * @attr2: the third attribute that was read from the hw - * @attr3: the fourth attribute that was read from the hw - **/ -static void set_hwif_attr(struct sphw_hwif *hwif, u32 attr0, u32 attr1, - u32 attr2, u32 attr3) -{ - hwif->attr.func_global_idx = SPHW_AF0_GET(attr0, FUNC_GLOBAL_IDX); - hwif->attr.port_to_port_idx = SPHW_AF0_GET(attr0, P2P_IDX); - hwif->attr.pci_intf_idx = SPHW_AF0_GET(attr0, PCI_INTF_IDX); - hwif->attr.vf_in_pf = SPHW_AF0_GET(attr0, VF_IN_PF); - hwif->attr.func_type = SPHW_AF0_GET(attr0, FUNC_TYPE); - - hwif->attr.ppf_idx = SPHW_AF1_GET(attr1, PPF_IDX); - hwif->attr.num_aeqs = BIT(SPHW_AF1_GET(attr1, AEQS_PER_FUNC)); - hwif->attr.num_ceqs = (u8)SPHW_AF2_GET(attr2, CEQS_PER_FUNC); - hwif->attr.num_irqs = SPHW_AF2_GET(attr2, IRQS_PER_FUNC); - hwif->attr.num_dma_attr = BIT(SPHW_AF2_GET(attr2, DMA_ATTR_PER_FUNC)); - - hwif->attr.global_vf_id_of_pf = SPHW_AF3_GET(attr3, GLOBAL_VF_ID_OF_PF); - - pr_info("func_global_idx: 0x%x, port_to_port_idx: 0x%x, pci_intf_idx: 0x%x, vf_in_pf: 0x%x, func_type: %d\n", - hwif->attr.func_global_idx, hwif->attr.port_to_port_idx, - hwif->attr.pci_intf_idx, hwif->attr.vf_in_pf, - hwif->attr.func_type); - - pr_info("ppf_idx: 0x%x, num_aeqs: 0x%x, num_ceqs: 0x%x, num_irqs: 0x%x, num_dma_attr: 0x%x, global_vf_id_of_pf: %u\n", - hwif->attr.ppf_idx, hwif->attr.num_aeqs, - hwif->attr.num_ceqs, hwif->attr.num_irqs, - hwif->attr.num_dma_attr, hwif->attr.global_vf_id_of_pf); -} - -/** - * get_hwif_attr - read and set the attributes as members in hwif - * @hwif: the hardware interface of a pci function device - **/ -static void get_hwif_attr(struct sphw_hwif *hwif) -{ - u32 addr, attr0, attr1, attr2, attr3; - - addr = SPHW_CSR_FUNC_ATTR0_ADDR; - attr0 = sphw_hwif_read_reg(hwif, addr); - - addr = SPHW_CSR_FUNC_ATTR1_ADDR; - attr1 = sphw_hwif_read_reg(hwif, addr); - - addr = SPHW_CSR_FUNC_ATTR2_ADDR; - attr2 = sphw_hwif_read_reg(hwif, addr); - - addr = SPHW_CSR_FUNC_ATTR3_ADDR; - attr3 = sphw_hwif_read_reg(hwif, addr); - - pr_info("attr0: 0x%08x, attr1: 0x%08x, attr2: 0x%08x, attr3: 0x%08x\n", - attr0, attr1, attr2, attr3); - set_hwif_attr(hwif, attr0, attr1, attr2, attr3); -} - -void sphw_set_pf_status(struct sphw_hwif *hwif, enum sphw_pf_status status) -{ - u32 attr6 = SPHW_AF6_SET(status, PF_STATUS); - u32 addr = SPHW_CSR_FUNC_ATTR6_ADDR; - - if (hwif->attr.func_type == TYPE_VF) - return; - - sphw_hwif_write_reg(hwif, addr, attr6); -} - -enum sphw_pf_status sphw_get_pf_status(struct sphw_hwif *hwif) -{ - u32 attr6 = sphw_hwif_read_reg(hwif, SPHW_CSR_FUNC_ATTR6_ADDR); - - return SPHW_AF6_GET(attr6, PF_STATUS); -} - -enum sphw_doorbell_ctrl sphw_get_doorbell_ctrl_status(struct sphw_hwif *hwif) -{ - u32 attr4 = sphw_hwif_read_reg(hwif, SPHW_CSR_FUNC_ATTR4_ADDR); - - return SPHW_AF4_GET(attr4, DOORBELL_CTRL); -} - -enum sphw_outbound_ctrl sphw_get_outbound_ctrl_status(struct sphw_hwif *hwif) -{ - u32 attr5 = sphw_hwif_read_reg(hwif, SPHW_CSR_FUNC_ATTR5_ADDR); - - return SPHW_AF5_GET(attr5, OUTBOUND_CTRL); -} - -void sphw_enable_doorbell(struct sphw_hwif *hwif) -{ - u32 addr, attr4; - - addr = SPHW_CSR_FUNC_ATTR4_ADDR; - attr4 = sphw_hwif_read_reg(hwif, addr); - - attr4 = SPHW_AF4_CLEAR(attr4, DOORBELL_CTRL); - attr4 |= SPHW_AF4_SET(ENABLE_DOORBELL, DOORBELL_CTRL); - - sphw_hwif_write_reg(hwif, addr, attr4); -} - -void sphw_disable_doorbell(struct sphw_hwif *hwif) -{ - u32 addr, attr4; - - addr = SPHW_CSR_FUNC_ATTR4_ADDR; - attr4 = sphw_hwif_read_reg(hwif, addr); - - attr4 = SPHW_AF4_CLEAR(attr4, DOORBELL_CTRL); - attr4 |= SPHW_AF4_SET(DISABLE_DOORBELL, DOORBELL_CTRL); - - sphw_hwif_write_reg(hwif, addr, attr4); -} - -/** - * set_ppf - try to set hwif as ppf and set the type of hwif in this case - * @hwif: the hardware interface of a pci function device - **/ -static void set_ppf(struct sphw_hwif *hwif) -{ - struct sphw_func_attr *attr = &hwif->attr; - u32 addr, val, ppf_election; - - /* Read Modify Write */ - addr = SPHW_CSR_PPF_ELECTION_ADDR; - - val = sphw_hwif_read_reg(hwif, addr); - val = SPHW_PPF_ELECTION_CLEAR(val, IDX); - - ppf_election = SPHW_PPF_ELECTION_SET(attr->func_global_idx, IDX); - val |= ppf_election; - - sphw_hwif_write_reg(hwif, addr, val); - - /* Check PPF */ - val = sphw_hwif_read_reg(hwif, addr); - - attr->ppf_idx = SPHW_PPF_ELECTION_GET(val, IDX); - if (attr->ppf_idx == attr->func_global_idx) - attr->func_type = TYPE_PPF; -} - -/** - * get_mpf - get the mpf index into the hwif - * @hwif: the hardware interface of a pci function device - **/ -static void get_mpf(struct sphw_hwif *hwif) -{ - struct sphw_func_attr *attr = &hwif->attr; - u32 mpf_election, addr; - - addr = SPHW_CSR_GLOBAL_MPF_ELECTION_ADDR; - - mpf_election = sphw_hwif_read_reg(hwif, addr); - attr->mpf_idx = SPHW_MPF_ELECTION_GET(mpf_election, IDX); -} - -/** - * set_mpf - try to set hwif as mpf and set the mpf idx in hwif - * @hwif: the hardware interface of a pci function device - **/ -static void set_mpf(struct sphw_hwif *hwif) -{ - struct sphw_func_attr *attr = &hwif->attr; - u32 addr, val, mpf_election; - - /* Read Modify Write */ - addr = SPHW_CSR_GLOBAL_MPF_ELECTION_ADDR; - - val = sphw_hwif_read_reg(hwif, addr); - - val = SPHW_MPF_ELECTION_CLEAR(val, IDX); - mpf_election = SPHW_MPF_ELECTION_SET(attr->func_global_idx, IDX); - - val |= mpf_election; - sphw_hwif_write_reg(hwif, addr, val); -} - -static int init_db_area_idx(struct sphw_free_db_area *free_db_area, u64 db_dwqe_len) -{ - u32 db_max_areas; - - db_max_areas = (db_dwqe_len > SPHW_DB_DWQE_SIZE) ? SPHW_DB_MAX_AREAS : - (u32)(db_dwqe_len / SPHW_DB_PAGE_SIZE); - free_db_area->db_bitmap_array = bitmap_zalloc(db_max_areas, GFP_KERNEL); - if (!free_db_area->db_bitmap_array) { - pr_err("Failed to allocate db area.\n"); - return -ENOMEM; - } - free_db_area->db_max_areas = db_max_areas; - spin_lock_init(&free_db_area->idx_lock); - - return 0; -} - -static void free_db_area(struct sphw_free_db_area *free_db_area) -{ - kfree(free_db_area->db_bitmap_array); -} - -static int get_db_idx(struct sphw_hwif *hwif, u32 *idx) -{ - struct sphw_free_db_area *free_db_area = &hwif->free_db_area; - u32 pg_idx; - - spin_lock(&free_db_area->idx_lock); - pg_idx = (u32)find_first_zero_bit(free_db_area->db_bitmap_array, - free_db_area->db_max_areas); - if (pg_idx == free_db_area->db_max_areas) { - spin_unlock(&free_db_area->idx_lock); - return -ENOMEM; - } - set_bit(pg_idx, free_db_area->db_bitmap_array); - spin_unlock(&free_db_area->idx_lock); - - *idx = pg_idx; - - return 0; -} - -static void free_db_idx(struct sphw_hwif *hwif, u32 idx) -{ - struct sphw_free_db_area *free_db_area = &hwif->free_db_area; - - if (idx >= free_db_area->db_max_areas) - return; - - spin_lock(&free_db_area->idx_lock); - clear_bit((int)idx, free_db_area->db_bitmap_array); - - spin_unlock(&free_db_area->idx_lock); -} - -void sphw_free_db_addr(void *hwdev, const void __iomem *db_base, void __iomem *dwqe_base) -{ - struct sphw_hwif *hwif = NULL; - u32 idx; - - if (!hwdev || !db_base) - return; - - hwif = ((struct sphw_hwdev *)hwdev)->hwif; - idx = DB_IDX(db_base, hwif->db_base); - - free_db_idx(hwif, idx); -} - -int sphw_alloc_db_addr(void *hwdev, void __iomem **db_base, void __iomem **dwqe_base) -{ - struct sphw_hwif *hwif = NULL; - u32 idx = 0; - int err; - - if (!hwdev || !db_base) - return -EINVAL; - - hwif = ((struct sphw_hwdev *)hwdev)->hwif; - - err = get_db_idx(hwif, &idx); - if (err) - return -EFAULT; - - *db_base = hwif->db_base + idx * SPHW_DB_PAGE_SIZE; - - if (!dwqe_base) - return 0; - - *dwqe_base = (u8 *)*db_base + SPHW_DWQE_OFFSET; - - return 0; -} - -void sphw_free_db_phy_addr(void *hwdev, u64 db_base, u64 dwqe_base) -{ - struct sphw_hwif *hwif = NULL; - u32 idx; - - if (!hwdev) - return; - - hwif = ((struct sphw_hwdev *)hwdev)->hwif; - idx = DB_IDX(db_base, hwif->db_base_phy); - - free_db_idx(hwif, idx); -} - -int sphw_alloc_db_phy_addr(void *hwdev, u64 *db_base, u64 *dwqe_base) -{ - struct sphw_hwif *hwif = NULL; - u32 idx; - int err; - - if (!hwdev || !db_base || !dwqe_base) - return -EINVAL; - - hwif = ((struct sphw_hwdev *)hwdev)->hwif; - - err = get_db_idx(hwif, &idx); - if (err) - return -EFAULT; - - *db_base = hwif->db_base_phy + idx * SPHW_DB_PAGE_SIZE; - *dwqe_base = *db_base + SPHW_DWQE_OFFSET; - - return 0; -} - -void sphw_set_msix_auto_mask_state(void *hwdev, u16 msix_idx, enum sphw_msix_auto_mask flag) -{ - struct sphw_hwif *hwif = NULL; - u32 mask_bits; - u32 addr; - - if (!hwdev) - return; - - hwif = ((struct sphw_hwdev *)hwdev)->hwif; - - if (flag) - mask_bits = SPHW_MSI_CLR_INDIR_SET(1, AUTO_MSK_SET); - else - mask_bits = SPHW_MSI_CLR_INDIR_SET(1, AUTO_MSK_CLR); - - mask_bits = mask_bits | SPHW_MSI_CLR_INDIR_SET(msix_idx, SIMPLE_INDIR_IDX); - - addr = SPHW_CSR_FUNC_MSI_CLR_WR_ADDR; - sphw_hwif_write_reg(hwif, addr, mask_bits); -} - -void sphw_set_msix_state(void *hwdev, u16 msix_idx, enum sphw_msix_state flag) -{ - struct sphw_hwif *hwif = NULL; - u32 mask_bits; - u32 addr; - u8 int_msk = 1; - - if (!hwdev) - return; - - hwif = ((struct sphw_hwdev *)hwdev)->hwif; - - if (flag) - mask_bits = SPHW_MSI_CLR_INDIR_SET(int_msk, INT_MSK_SET); - else - mask_bits = SPHW_MSI_CLR_INDIR_SET(int_msk, INT_MSK_CLR); - mask_bits = mask_bits | SPHW_MSI_CLR_INDIR_SET(msix_idx, SIMPLE_INDIR_IDX); - - addr = SPHW_CSR_FUNC_MSI_CLR_WR_ADDR; - sphw_hwif_write_reg(hwif, addr, mask_bits); -} - -static void disable_all_msix(struct sphw_hwdev *hwdev) -{ - u16 num_irqs = hwdev->hwif->attr.num_irqs; - u16 i; - - for (i = 0; i < num_irqs; i++) - sphw_set_msix_state(hwdev, i, SPHW_MSIX_DISABLE); -} - -static enum sphw_wait_return check_db_flush_enable_handler(void *priv_data) -{ - struct sphw_hwif *hwif = priv_data; - enum sphw_doorbell_ctrl db_ctrl; - - db_ctrl = sphw_get_doorbell_ctrl_status(hwif); - if (db_ctrl == ENABLE_DOORBELL) - return WAIT_PROCESS_CPL; - - return WAIT_PROCESS_WAITING; -} - -static enum sphw_wait_return check_db_flush_disable_handler(void *priv_data) -{ - struct sphw_hwif *hwif = priv_data; - enum sphw_doorbell_ctrl db_ctrl; - - db_ctrl = sphw_get_doorbell_ctrl_status(hwif); - if (db_ctrl == DISABLE_DOORBELL) - return WAIT_PROCESS_CPL; - - return WAIT_PROCESS_WAITING; -} - -int wait_until_doorbell_flush_states(struct sphw_hwif *hwif, - enum sphw_doorbell_ctrl states) -{ - if (!hwif) - return -EFAULT; - - return sphw_wait_for_timeout(hwif, states == ENABLE_DOORBELL ? - check_db_flush_enable_handler : check_db_flush_disable_handler, - SPHW_WAIT_DOORBELL_AND_OUTBOUND_TIMEOUT, USEC_PER_MSEC); -} - -static enum sphw_wait_return check_db_outbound_enable_handler(void *priv_data) -{ - struct sphw_hwif *hwif = priv_data; - enum sphw_doorbell_ctrl db_ctrl; - enum sphw_outbound_ctrl outbound_ctrl; - - db_ctrl = sphw_get_doorbell_ctrl_status(hwif); - outbound_ctrl = sphw_get_outbound_ctrl_status(hwif); - - if (outbound_ctrl == ENABLE_OUTBOUND && db_ctrl == ENABLE_DOORBELL) - return WAIT_PROCESS_CPL; - - return WAIT_PROCESS_WAITING; -} - -static int wait_until_doorbell_and_outbound_enabled(struct sphw_hwif *hwif) -{ - return sphw_wait_for_timeout(hwif, check_db_outbound_enable_handler, - SPHW_WAIT_DOORBELL_AND_OUTBOUND_TIMEOUT, USEC_PER_MSEC); -} - -/** - * sphw_init_hwif - initialize the hw interface - * @hwif: the hardware interface of a pci function device - * @pdev: the pci device that will be part of the hwif struct - * Return: 0 - success, negative - failure - **/ -int sphw_init_hwif(struct sphw_hwdev *hwdev, void *cfg_reg_base, void *intr_reg_base, - void *mgmt_regs_base, u64 db_base_phy, void *db_base, u64 db_dwqe_len) -{ - struct sphw_hwif *hwif = NULL; - u32 attr4, attr5; - int err; - - hwif = kzalloc(sizeof(*hwif), GFP_KERNEL); - if (!hwif) - return -ENOMEM; - - hwdev->hwif = hwif; - hwif->pdev = hwdev->pcidev_hdl; - - /* if function is VF, mgmt_regs_base will be NULL */ - if (!mgmt_regs_base) - hwif->cfg_regs_base = (u8 *)cfg_reg_base + - SPHW_VF_CFG_REG_OFFSET; - else - hwif->cfg_regs_base = cfg_reg_base; - - hwif->intr_regs_base = intr_reg_base; - hwif->mgmt_regs_base = mgmt_regs_base; - sdk_info(hwdev->dev_hdl, "init intr_regs_base: %p, mgmt_regs_base: %p, db_base: %p, db_dwqe_len: 0x%llx\n", - hwif->intr_regs_base, hwif->mgmt_regs_base, - db_base, db_dwqe_len); - - hwif->db_base_phy = db_base_phy; - hwif->db_base = db_base; - hwif->db_dwqe_len = db_dwqe_len; - err = init_db_area_idx(&hwif->free_db_area, hwif->db_dwqe_len); - if (err) { - sdk_err(hwdev->dev_hdl, "Failed to init db area.\n"); - goto init_db_area_err; - } - - err = wait_hwif_ready(hwdev); - if (err) { - sdk_err(hwdev->dev_hdl, "Chip status is not ready\n"); - goto hwif_ready_err; - } - - get_hwif_attr(hwif); - - err = wait_until_doorbell_and_outbound_enabled(hwif); - if (err) { - attr4 = sphw_hwif_read_reg(hwif, SPHW_CSR_FUNC_ATTR4_ADDR); - attr5 = sphw_hwif_read_reg(hwif, SPHW_CSR_FUNC_ATTR5_ADDR); - sdk_err(hwdev->dev_hdl, "Hw doorbell/outbound is disabled, attr4 0x%x attr5 0x%x\n", - attr4, attr5); - goto hwif_ready_err; - } - - if (!SPHW_IS_VF(hwdev)) { - set_ppf(hwif); - - if (SPHW_IS_PPF(hwdev)) - set_mpf(hwif); - - get_mpf(hwif); - } - - disable_all_msix(hwdev); - /* disable mgmt cpu report any event */ - sphw_set_pf_status(hwdev->hwif, SPHW_PF_STATUS_INIT); - - sdk_info(hwdev->dev_hdl, "global_func_idx: %u, func_type: %d, host_id: %u, ppf: %u, mpf: %u\n", - hwif->attr.func_global_idx, hwif->attr.func_type, - hwif->attr.pci_intf_idx, hwif->attr.ppf_idx, - hwif->attr.mpf_idx); - - return 0; - -hwif_ready_err: - free_db_area(&hwif->free_db_area); -init_db_area_err: - kfree(hwif); - - return err; -} - -/** - * sphw_free_hwif - free the hw interface - * @hwif: the hardware interface of a pci function device - * @pdev: the pci device that will be part of the hwif struct - **/ -void sphw_free_hwif(struct sphw_hwdev *hwdev) -{ - free_db_area(&hwdev->hwif->free_db_area); - kfree(hwdev->hwif); -} - -u16 sphw_global_func_id(void *hwdev) -{ - struct sphw_hwif *hwif = NULL; - - if (!hwdev) - return 0; - - hwif = ((struct sphw_hwdev *)hwdev)->hwif; - - return hwif->attr.func_global_idx; -} - -u16 sphw_intr_num(void *hwdev) -{ - struct sphw_hwif *hwif = NULL; - - if (!hwdev) - return 0; - - hwif = ((struct sphw_hwdev *)hwdev)->hwif; - - return hwif->attr.num_irqs; -} - -u8 sphw_pf_id_of_vf(void *hwdev) -{ - struct sphw_hwif *hwif = NULL; - - if (!hwdev) - return 0; - - hwif = ((struct sphw_hwdev *)hwdev)->hwif; - - return hwif->attr.port_to_port_idx; -} - -u8 sphw_pcie_itf_id(void *hwdev) -{ - struct sphw_hwif *hwif = NULL; - - if (!hwdev) - return 0; - - hwif = ((struct sphw_hwdev *)hwdev)->hwif; - - return hwif->attr.pci_intf_idx; -} - -u8 sphw_vf_in_pf(void *hwdev) -{ - struct sphw_hwif *hwif = NULL; - - if (!hwdev) - return 0; - - hwif = ((struct sphw_hwdev *)hwdev)->hwif; - - return hwif->attr.vf_in_pf; -} - -enum func_type sphw_func_type(void *hwdev) -{ - struct sphw_hwif *hwif = NULL; - - if (!hwdev) - return 0; - - hwif = ((struct sphw_hwdev *)hwdev)->hwif; - - return hwif->attr.func_type; -} - -u8 sphw_ceq_num(void *hwdev) -{ - struct sphw_hwif *hwif = NULL; - - if (!hwdev) - return 0; - - hwif = ((struct sphw_hwdev *)hwdev)->hwif; - - return hwif->attr.num_ceqs; -} - -u8 sphw_dma_attr_entry_num(void *hwdev) -{ - struct sphw_hwif *hwif = NULL; - - if (!hwdev) - return 0; - - hwif = ((struct sphw_hwdev *)hwdev)->hwif; - - return hwif->attr.num_dma_attr; -} - -u16 sphw_glb_pf_vf_offset(void *hwdev) -{ - struct sphw_hwif *hwif = NULL; - - if (!hwdev) - return 0; - - hwif = ((struct sphw_hwdev *)hwdev)->hwif; - - return hwif->attr.global_vf_id_of_pf; -} - -u8 sphw_mpf_idx(void *hwdev) -{ - struct sphw_hwif *hwif = NULL; - - if (!hwdev) - return 0; - - hwif = ((struct sphw_hwdev *)hwdev)->hwif; - - return hwif->attr.mpf_idx; -} - -u8 sphw_ppf_idx(void *hwdev) -{ - struct sphw_hwif *hwif = NULL; - - if (!hwdev) - return 0; - - hwif = ((struct sphw_hwdev *)hwdev)->hwif; - - return hwif->attr.ppf_idx; -} - -u8 sphw_host_ppf_idx(void *hwdev, u8 host_id) -{ - struct sphw_hwdev *dev = hwdev; - u32 ppf_elect_port_addr; - u32 val; - - if (!hwdev) - return 0; - - ppf_elect_port_addr = SPHW_CSR_FUNC_PPF_ELECT(host_id); - val = sphw_hwif_read_reg(dev->hwif, ppf_elect_port_addr); - - return sphw_PPF_ELECT_PORT_GET(val, IDX); -} diff --git a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_hwif.h b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_hwif.h deleted file mode 100644 index 9035baf8a66ee266eac8c75c5c0073d958b818bc..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_hwif.h +++ /dev/null @@ -1,102 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPHW_HWIF_H -#define SPHW_HWIF_H - -#define SPHW_PCIE_LINK_DOWN 0xFFFFFFFF - -struct sphw_free_db_area { - unsigned long *db_bitmap_array; - u32 db_max_areas; - /* spinlock for allocating doorbell area */ - spinlock_t idx_lock; -}; - -struct sphw_func_attr { - u16 func_global_idx; - u8 port_to_port_idx; - u8 pci_intf_idx; - u8 vf_in_pf; - enum func_type func_type; - - u8 mpf_idx; - - u8 ppf_idx; - - u16 num_irqs; - u8 num_aeqs; - u8 num_ceqs; - - u8 num_dma_attr; - - u16 global_vf_id_of_pf; -}; - -struct sphw_hwif { - u8 __iomem *cfg_regs_base; - u8 __iomem *intr_regs_base; - u8 __iomem *mgmt_regs_base; - u64 db_base_phy; - u64 db_dwqe_len; - u8 __iomem *db_base; - - struct sphw_free_db_area free_db_area; - - struct sphw_func_attr attr; - - void *pdev; -}; - -enum sphw_outbound_ctrl { - ENABLE_OUTBOUND = 0x0, - DISABLE_OUTBOUND = 0x1, -}; - -enum sphw_doorbell_ctrl { - ENABLE_DOORBELL = 0x0, - DISABLE_DOORBELL = 0x1, -}; - -enum sphw_pf_status { - SPHW_PF_STATUS_INIT = 0X0, - SPHW_PF_STATUS_ACTIVE_FLAG = 0x11, - SPHW_PF_STATUS_FLR_START_FLAG = 0x12, - SPHW_PF_STATUS_FLR_FINISH_FLAG = 0x13, -}; - -#define SPHW_HWIF_NUM_AEQS(hwif) ((hwif)->attr.num_aeqs) -#define SPHW_HWIF_NUM_CEQS(hwif) ((hwif)->attr.num_ceqs) -#define SPHW_HWIF_NUM_IRQS(hwif) ((hwif)->attr.num_irqs) -#define SPHW_HWIF_GLOBAL_IDX(hwif) ((hwif)->attr.func_global_idx) -#define SPHW_HWIF_GLOBAL_VF_OFFSET(hwif) ((hwif)->attr.global_vf_id_of_pf) -#define SPHW_HWIF_PPF_IDX(hwif) ((hwif)->attr.ppf_idx) -#define SPHW_PCI_INTF_IDX(hwif) ((hwif)->attr.pci_intf_idx) - -#define SPHW_FUNC_TYPE(dev) ((dev)->hwif->attr.func_type) -#define SPHW_IS_PF(dev) (SPHW_FUNC_TYPE(dev) == TYPE_PF) -#define SPHW_IS_VF(dev) (SPHW_FUNC_TYPE(dev) == TYPE_VF) -#define SPHW_IS_PPF(dev) (SPHW_FUNC_TYPE(dev) == TYPE_PPF) - -u32 sphw_hwif_read_reg(struct sphw_hwif *hwif, u32 reg); - -void sphw_hwif_write_reg(struct sphw_hwif *hwif, u32 reg, u32 val); - -void sphw_set_pf_status(struct sphw_hwif *hwif, enum sphw_pf_status status); - -enum sphw_pf_status sphw_get_pf_status(struct sphw_hwif *hwif); - -void sphw_disable_doorbell(struct sphw_hwif *hwif); - -void sphw_enable_doorbell(struct sphw_hwif *hwif); - -int sphw_init_hwif(struct sphw_hwdev *hwdev, void *cfg_reg_base, void *intr_reg_base, - void *mgmt_regs_base, u64 db_base_phy, void *db_base, u64 db_dwqe_len); - -void sphw_free_hwif(struct sphw_hwdev *hwdev); - -u8 sphw_host_ppf_idx(void *hwdev, u8 host_id); - -u32 sphw_get_heartbeat_status(struct sphw_hwdev *hwdev); - -#endif diff --git a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_mbox.c b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_mbox.c deleted file mode 100644 index 694463ca0a931340571cb15bed112b772a992327..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_mbox.c +++ /dev/null @@ -1,1792 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt - -#include -#include -#include -#include -#include -#include - -#include "sphw_hw.h" -#include "sphw_hwdev.h" -#include "sphw_csr.h" -#include "sphw_hwif.h" -#include "sphw_eqs.h" -#include "sphw_prof_adap.h" -#include "sphw_mbox.h" -#include "sphw_common.h" - -#define SPHW_MBOX_INT_DST_AEQN_SHIFT 10 -#define SPHW_MBOX_INT_SRC_RESP_AEQN_SHIFT 12 -#define SPHW_MBOX_INT_STAT_DMA_SHIFT 14 -/* The size of data to be send (unit of 4 bytes) */ -#define SPHW_MBOX_INT_TX_SIZE_SHIFT 20 -/* SO_RO(strong order, relax order) */ -#define SPHW_MBOX_INT_STAT_DMA_SO_RO_SHIFT 25 -#define SPHW_MBOX_INT_WB_EN_SHIFT 28 - -#define SPHW_MBOX_INT_DST_AEQN_MASK 0x3 -#define SPHW_MBOX_INT_SRC_RESP_AEQN_MASK 0x3 -#define SPHW_MBOX_INT_STAT_DMA_MASK 0x3F -#define SPHW_MBOX_INT_TX_SIZE_MASK 0x1F -#define SPHW_MBOX_INT_STAT_DMA_SO_RO_MASK 0x3 -#define SPHW_MBOX_INT_WB_EN_MASK 0x1 - -#define SPHW_MBOX_INT_SET(val, field) \ - (((val) & SPHW_MBOX_INT_##field##_MASK) << \ - SPHW_MBOX_INT_##field##_SHIFT) - -enum sphw_mbox_tx_status { - TX_NOT_DONE = 1, -}; - -#define SPHW_MBOX_CTRL_TRIGGER_AEQE_SHIFT 0 -/* specifies the issue request for the message data. - * 0 - Tx request is done; - * 1 - Tx request is in process. - */ -#define SPHW_MBOX_CTRL_TX_STATUS_SHIFT 1 -#define SPHW_MBOX_CTRL_DST_FUNC_SHIFT 16 - -#define SPHW_MBOX_CTRL_TRIGGER_AEQE_MASK 0x1 -#define SPHW_MBOX_CTRL_TX_STATUS_MASK 0x1 -#define SPHW_MBOX_CTRL_DST_FUNC_MASK 0x1FFF - -#define SPHW_MBOX_CTRL_SET(val, field) \ - (((val) & SPHW_MBOX_CTRL_##field##_MASK) << \ - SPHW_MBOX_CTRL_##field##_SHIFT) - -#define MBOX_SEGLEN_MASK SPHW_MSG_HEADER_SET(SPHW_MSG_HEADER_SEG_LEN_MASK, SEG_LEN) - -#define MBOX_MSG_POLLING_TIMEOUT 8000 -#define SPHW_MBOX_COMP_TIME 25000U - -#define MBOX_MAX_BUF_SZ 2048U -#define MBOX_HEADER_SZ 8 -#define SPHW_MBOX_DATA_SIZE (MBOX_MAX_BUF_SZ - MBOX_HEADER_SZ) - -/* MBOX size is 64B, 8B for mbox_header, 8B reserved */ -#define MBOX_SEG_LEN 48 -#define MBOX_SEG_LEN_ALIGN 4 -#define MBOX_WB_STATUS_LEN 16UL - -#define SEQ_ID_START_VAL 0 -#define SEQ_ID_MAX_VAL 42 -#define MBOX_LAST_SEG_MAX_LEN (MBOX_MAX_BUF_SZ - \ - SEQ_ID_MAX_VAL * MBOX_SEG_LEN) - -/* mbox write back status is 16B, only first 4B is used */ -#define MBOX_WB_STATUS_ERRCODE_MASK 0xFFFF -#define MBOX_WB_STATUS_MASK 0xFF -#define MBOX_WB_ERROR_CODE_MASK 0xFF00 -#define MBOX_WB_STATUS_FINISHED_SUCCESS 0xFF -#define MBOX_WB_STATUS_FINISHED_WITH_ERR 0xFE -#define MBOX_WB_STATUS_NOT_FINISHED 0x00 - -#define MBOX_STATUS_FINISHED(wb) \ - (((wb) & MBOX_WB_STATUS_MASK) != MBOX_WB_STATUS_NOT_FINISHED) -#define MBOX_STATUS_SUCCESS(wb) \ - (((wb) & MBOX_WB_STATUS_MASK) == MBOX_WB_STATUS_FINISHED_SUCCESS) -#define MBOX_STATUS_ERRCODE(wb) \ - ((wb) & MBOX_WB_ERROR_CODE_MASK) - -#define DST_AEQ_IDX_DEFAULT_VAL 0 -#define SRC_AEQ_IDX_DEFAULT_VAL 0 -#define NO_DMA_ATTRIBUTE_VAL 0 - -#define MBOX_MSG_NO_DATA_LEN 1 - -#define MBOX_BODY_FROM_HDR(header) ((u8 *)(header) + MBOX_HEADER_SZ) -#define MBOX_AREA(hwif) \ - ((hwif)->cfg_regs_base + SPHW_FUNC_CSR_MAILBOX_DATA_OFF) - -#define MBOX_DMA_MSG_QUEUE_DEPTH 32 - -#define MBOX_MQ_CI_OFFSET (SPHW_CFG_REGS_FLAG + SPHW_FUNC_CSR_MAILBOX_DATA_OFF + \ - MBOX_HEADER_SZ + MBOX_SEG_LEN) - -#define MBOX_MQ_SYNC_CI_SHIFT 0 -#define MBOX_MQ_ASYNC_CI_SHIFT 8 - -#define MBOX_MQ_SYNC_CI_MASK 0xFF -#define MBOX_MQ_ASYNC_CI_MASK 0xFF - -#define MBOX_MQ_CI_SET(val, field) \ - (((val) & MBOX_MQ_##field##_CI_MASK) << MBOX_MQ_##field##_CI_SHIFT) -#define MBOX_MQ_CI_GET(val, field) \ - (((val) >> MBOX_MQ_##field##_CI_SHIFT) & MBOX_MQ_##field##_CI_MASK) -#define MBOX_MQ_CI_CLEAR(val, field) \ - ((val) & (~(MBOX_MQ_##field##_CI_MASK << MBOX_MQ_##field##_CI_SHIFT))) - -#define IS_PF_OR_PPF_SRC(hwdev, src_func_idx) \ - ((src_func_idx) < SPHW_MAX_PF_NUM(hwdev)) - -#define MBOX_RESPONSE_ERROR 0x1 -#define MBOX_MSG_ID_MASK 0xF -#define MBOX_MSG_ID(func_to_func) ((func_to_func)->send_msg_id) -#define MBOX_MSG_ID_INC(func_to_func) \ - (MBOX_MSG_ID(func_to_func) = \ - (MBOX_MSG_ID(func_to_func) + 1) & MBOX_MSG_ID_MASK) - -/* max message counter wait to process for one function */ -#define SPHW_MAX_MSG_CNT_TO_PROCESS 10 - -#define MBOX_MSG_CHANNEL_STOP(func_to_func) \ - ((((func_to_func)->lock_channel_en) && \ - test_bit((func_to_func)->cur_msg_channel, \ - &(func_to_func)->channel_stop)) ? true : false) - -enum mbox_ordering_type { - STRONG_ORDER, -}; - -enum mbox_write_back_type { - WRITE_BACK = 1, -}; - -enum mbox_aeq_trig_type { - NOT_TRIGGER, - TRIGGER, -}; - -static int send_mbox_msg(struct sphw_mbox *func_to_func, u8 mod, u16 cmd, - void *msg, u16 msg_len, u16 dst_func, - enum sphw_msg_direction_type direction, - enum sphw_msg_ack_type ack_type, - struct mbox_msg_info *msg_info); - -struct sphw_msg_desc *get_mbox_msg_desc(struct sphw_mbox *func_to_func, - u64 dir, u64 src_func_id); - -/** - * sphw_register_ppf_mbox_cb - register mbox callback for ppf - * @hwdev: the pointer to hw device - * @mod: specific mod that the callback will handle - * @pri_handle specific mod's private data that will be used in callback - * @callback: callback function - * Return: 0 - success, negative - failure - */ -int sphw_register_ppf_mbox_cb(void *hwdev, u8 mod, void *pri_handle, sphw_ppf_mbox_cb callback) -{ - struct sphw_mbox *func_to_func = NULL; - - if (mod >= SPHW_MOD_MAX || !hwdev) - return -EFAULT; - - func_to_func = ((struct sphw_hwdev *)hwdev)->func_to_func; - - func_to_func->ppf_mbox_cb[mod] = callback; - func_to_func->ppf_mbox_data[mod] = pri_handle; - - set_bit(SPHW_PPF_MBOX_CB_REG, &func_to_func->ppf_mbox_cb_state[mod]); - - return 0; -} - -/** - * sphw_register_pf_mbox_cb - register mbox callback for pf - * @hwdev: the pointer to hw device - * @mod: specific mod that the callback will handle - * @pri_handle specific mod's private data that will be used in callback - * @callback: callback function - * Return: 0 - success, negative - failure - */ -int sphw_register_pf_mbox_cb(void *hwdev, u8 mod, void *pri_handle, sphw_pf_mbox_cb callback) -{ - struct sphw_mbox *func_to_func = NULL; - - if (mod >= SPHW_MOD_MAX || !hwdev) - return -EFAULT; - - func_to_func = ((struct sphw_hwdev *)hwdev)->func_to_func; - - func_to_func->pf_mbox_cb[mod] = callback; - func_to_func->pf_mbox_data[mod] = pri_handle; - - set_bit(SPHW_PF_MBOX_CB_REG, &func_to_func->pf_mbox_cb_state[mod]); - - return 0; -} - -/** - * sphw_register_vf_mbox_cb - register mbox callback for vf - * @hwdev: the pointer to hw device - * @mod: specific mod that the callback will handle - * @pri_handle specific mod's private data that will be used in callback - * @callback: callback function - * Return: 0 - success, negative - failure - */ -int sphw_register_vf_mbox_cb(void *hwdev, u8 mod, void *pri_handle, sphw_vf_mbox_cb callback) -{ - struct sphw_mbox *func_to_func = NULL; - - if (mod >= SPHW_MOD_MAX || !hwdev) - return -EFAULT; - - func_to_func = ((struct sphw_hwdev *)hwdev)->func_to_func; - - func_to_func->vf_mbox_cb[mod] = callback; - func_to_func->vf_mbox_data[mod] = pri_handle; - - set_bit(SPHW_VF_MBOX_CB_REG, &func_to_func->vf_mbox_cb_state[mod]); - - return 0; -} - -/** - * sphw_register_ppf_to_pf_mbox_cb - register mbox callback for pf from ppf - * @hwdev: the pointer to hw device - * @mod: specific mod that the callback will handle - * @pri_handle specific mod's private data that will be used in callback - * @callback: callback function - * Return: 0 - success, negative - failure - */ -int sphw_register_ppf_to_pf_mbox_cb(void *hwdev, u8 mod, void *pri_handle, - sphw_pf_recv_from_ppf_mbox_cb callback) -{ - struct sphw_mbox *func_to_func = NULL; - - if (mod >= SPHW_MOD_MAX || !hwdev) - return -EFAULT; - - func_to_func = ((struct sphw_hwdev *)hwdev)->func_to_func; - - func_to_func->pf_recv_ppf_mbox_cb[mod] = callback; - func_to_func->pf_recv_ppf_mbox_data[mod] = pri_handle; - - set_bit(SPHW_PPF_TO_PF_MBOX_CB_REG, - &func_to_func->ppf_to_pf_mbox_cb_state[mod]); - - return 0; -} - -/** - * sphw_unregister_ppf_mbox_cb - unregister the mbox callback for ppf - * @hwdev: the pointer to hw device - * @mod: specific mod that the callback will handle - * Return: - */ -void sphw_unregister_ppf_mbox_cb(void *hwdev, u8 mod) -{ - struct sphw_mbox *func_to_func = NULL; - - if (mod >= SPHW_MOD_MAX || !hwdev) - return; - - func_to_func = ((struct sphw_hwdev *)hwdev)->func_to_func; - - clear_bit(SPHW_PPF_MBOX_CB_REG, - &func_to_func->ppf_mbox_cb_state[mod]); - - while (test_bit(SPHW_PPF_MBOX_CB_RUNNING, - &func_to_func->ppf_mbox_cb_state[mod])) - usleep_range(900, 1000); - - func_to_func->ppf_mbox_data[mod] = NULL; - func_to_func->ppf_mbox_cb[mod] = NULL; -} - -/** - * sphw_unregister_ppf_mbox_cb - unregister the mbox callback for pf - * @hwdev: the pointer to hw device - * @mod: specific mod that the callback will handle - * Return: - */ -void sphw_unregister_pf_mbox_cb(void *hwdev, u8 mod) -{ - struct sphw_mbox *func_to_func = NULL; - - if (mod >= SPHW_MOD_MAX || !hwdev) - return; - - func_to_func = ((struct sphw_hwdev *)hwdev)->func_to_func; - - clear_bit(SPHW_PF_MBOX_CB_REG, &func_to_func->pf_mbox_cb_state[mod]); - - while (test_bit(SPHW_PF_MBOX_CB_RUNNING, - &func_to_func->pf_mbox_cb_state[mod])) - usleep_range(900, 1000); - - func_to_func->pf_mbox_data[mod] = NULL; - func_to_func->pf_mbox_cb[mod] = NULL; -} - -/** - * sphw_unregister_vf_mbox_cb - unregister the mbox callback for vf - * @hwdev: the pointer to hw device - * @mod: specific mod that the callback will handle - * Return: - */ -void sphw_unregister_vf_mbox_cb(void *hwdev, u8 mod) -{ - struct sphw_mbox *func_to_func = NULL; - - if (mod >= SPHW_MOD_MAX || !hwdev) - return; - - func_to_func = ((struct sphw_hwdev *)hwdev)->func_to_func; - - clear_bit(SPHW_VF_MBOX_CB_REG, &func_to_func->vf_mbox_cb_state[mod]); - - while (test_bit(SPHW_VF_MBOX_CB_RUNNING, - &func_to_func->vf_mbox_cb_state[mod])) - usleep_range(900, 1000); - - func_to_func->vf_mbox_data[mod] = NULL; - func_to_func->vf_mbox_cb[mod] = NULL; -} - -/** - * sphw_unregister_ppf_mbox_cb - unregister the mbox callback for pf from ppf - * @hwdev: the pointer to hw device - * @mod: specific mod that the callback will handle - * Return: - */ -void sphw_unregister_ppf_to_pf_mbox_cb(void *hwdev, u8 mod) -{ - struct sphw_mbox *func_to_func = NULL; - - if (mod >= SPHW_MOD_MAX || !hwdev) - return; - - func_to_func = ((struct sphw_hwdev *)hwdev)->func_to_func; - - clear_bit(SPHW_PPF_TO_PF_MBOX_CB_REG, - &func_to_func->ppf_to_pf_mbox_cb_state[mod]); - - while (test_bit(SPHW_PPF_TO_PF_MBOX_CB_RUNNIG, - &func_to_func->ppf_to_pf_mbox_cb_state[mod])) - usleep_range(900, 1000); - - func_to_func->pf_recv_ppf_mbox_data[mod] = NULL; - func_to_func->pf_recv_ppf_mbox_cb[mod] = NULL; -} - -static int recv_vf_mbox_handler(struct sphw_mbox *func_to_func, - struct sphw_recv_mbox *recv_mbox, - void *buf_out, u16 *out_size) -{ - sphw_vf_mbox_cb cb; - int ret; - - if (recv_mbox->mod >= SPHW_MOD_MAX) { - sdk_warn(func_to_func->hwdev->dev_hdl, "Receive illegal mbox message, mod = %hhu\n", - recv_mbox->mod); - return -EINVAL; - } - - set_bit(SPHW_VF_MBOX_CB_RUNNING, &func_to_func->vf_mbox_cb_state[recv_mbox->mod]); - - cb = func_to_func->vf_mbox_cb[recv_mbox->mod]; - if (cb && test_bit(SPHW_VF_MBOX_CB_REG, - &func_to_func->vf_mbox_cb_state[recv_mbox->mod])) { - ret = cb(func_to_func->hwdev, - func_to_func->vf_mbox_data[recv_mbox->mod], - recv_mbox->cmd, recv_mbox->msg, - recv_mbox->msg_len, buf_out, out_size); - } else { - sdk_warn(func_to_func->hwdev->dev_hdl, "VF mbox cb is not registered\n"); - ret = -EINVAL; - } - - clear_bit(SPHW_VF_MBOX_CB_RUNNING, - &func_to_func->vf_mbox_cb_state[recv_mbox->mod]); - - return ret; -} - -static int recv_pf_from_ppf_handler(struct sphw_mbox *func_to_func, - struct sphw_recv_mbox *recv_mbox, - void *buf_out, u16 *out_size) -{ - sphw_pf_recv_from_ppf_mbox_cb cb; - enum sphw_mod_type mod = recv_mbox->mod; - int ret; - - if (mod >= SPHW_MOD_MAX) { - sdk_warn(func_to_func->hwdev->dev_hdl, "Receive illegal mbox message, mod = %d\n", - mod); - return -EINVAL; - } - - set_bit(SPHW_PPF_TO_PF_MBOX_CB_RUNNIG, - &func_to_func->ppf_to_pf_mbox_cb_state[mod]); - - cb = func_to_func->pf_recv_ppf_mbox_cb[mod]; - if (cb && test_bit(SPHW_PPF_TO_PF_MBOX_CB_REG, - &func_to_func->ppf_to_pf_mbox_cb_state[mod])) { - ret = cb(func_to_func->hwdev, - func_to_func->pf_recv_ppf_mbox_data[mod], - recv_mbox->cmd, recv_mbox->msg, recv_mbox->msg_len, - buf_out, out_size); - } else { - sdk_warn(func_to_func->hwdev->dev_hdl, "PF receive ppf mailbox callback is not registered\n"); - ret = -EINVAL; - } - - clear_bit(SPHW_PPF_TO_PF_MBOX_CB_RUNNIG, - &func_to_func->ppf_to_pf_mbox_cb_state[mod]); - - return ret; -} - -static int recv_ppf_mbox_handler(struct sphw_mbox *func_to_func, - struct sphw_recv_mbox *recv_mbox, - u8 pf_id, void *buf_out, u16 *out_size) -{ - sphw_ppf_mbox_cb cb; - u16 vf_id = 0; - int ret; - - if (recv_mbox->mod >= SPHW_MOD_MAX) { - sdk_warn(func_to_func->hwdev->dev_hdl, "Receive illegal mbox message, mod = %hhu\n", - recv_mbox->mod); - return -EINVAL; - } - - set_bit(SPHW_PPF_MBOX_CB_RUNNING, - &func_to_func->ppf_mbox_cb_state[recv_mbox->mod]); - - cb = func_to_func->ppf_mbox_cb[recv_mbox->mod]; - if (cb && test_bit(SPHW_PPF_MBOX_CB_REG, - &func_to_func->ppf_mbox_cb_state[recv_mbox->mod])) { - ret = cb(func_to_func->hwdev, - func_to_func->ppf_mbox_data[recv_mbox->mod], - pf_id, vf_id, recv_mbox->cmd, recv_mbox->msg, - recv_mbox->msg_len, buf_out, out_size); - } else { - sdk_warn(func_to_func->hwdev->dev_hdl, "PPF mbox cb is not registered, mod = %hhu\n", - recv_mbox->mod); - ret = -EINVAL; - } - - clear_bit(SPHW_PPF_MBOX_CB_RUNNING, - &func_to_func->ppf_mbox_cb_state[recv_mbox->mod]); - - return ret; -} - -static int recv_pf_from_vf_mbox_handler(struct sphw_mbox *func_to_func, - struct sphw_recv_mbox *recv_mbox, - u16 src_func_idx, void *buf_out, - u16 *out_size) -{ - sphw_pf_mbox_cb cb; - u16 vf_id = 0; - int ret; - - if (recv_mbox->mod >= SPHW_MOD_MAX) { - sdk_warn(func_to_func->hwdev->dev_hdl, "Receive illegal mbox message, mod = %hhu\n", - recv_mbox->mod); - return -EINVAL; - } - - set_bit(SPHW_PF_MBOX_CB_RUNNING, - &func_to_func->pf_mbox_cb_state[recv_mbox->mod]); - - cb = func_to_func->pf_mbox_cb[recv_mbox->mod]; - if (cb && test_bit(SPHW_PF_MBOX_CB_REG, - &func_to_func->pf_mbox_cb_state[recv_mbox->mod])) { - vf_id = src_func_idx - - sphw_glb_pf_vf_offset(func_to_func->hwdev); - ret = cb(func_to_func->hwdev, - func_to_func->pf_mbox_data[recv_mbox->mod], - vf_id, recv_mbox->cmd, recv_mbox->msg, - recv_mbox->msg_len, buf_out, out_size); - } else { - sdk_warn(func_to_func->hwdev->dev_hdl, "PF mbox mod(0x%x) cb is not registered\n", - recv_mbox->mod); - ret = -EINVAL; - } - - clear_bit(SPHW_PF_MBOX_CB_RUNNING, - &func_to_func->pf_mbox_cb_state[recv_mbox->mod]); - - return ret; -} - -static void response_for_recv_func_mbox(struct sphw_mbox *func_to_func, - struct sphw_recv_mbox *recv_mbox, - int err, u16 out_size, u16 src_func_idx) -{ - struct mbox_msg_info msg_info = {0}; - - msg_info.msg_id = recv_mbox->msg_id; - if (err) - msg_info.status = SPHW_MBOX_PF_SEND_ERR; - - /* if not data need to response, set out_size to 1 */ - if (!out_size || err) - out_size = MBOX_MSG_NO_DATA_LEN; - - send_mbox_msg(func_to_func, recv_mbox->mod, recv_mbox->cmd, - recv_mbox->resp_buff, out_size, src_func_idx, - SPHW_MSG_RESPONSE, SPHW_MSG_NO_ACK, &msg_info); -} - -static void recv_func_mbox_handler(struct sphw_mbox *func_to_func, - struct sphw_recv_mbox *recv_mbox) -{ - struct sphw_hwdev *dev = func_to_func->hwdev; - void *buf_out = recv_mbox->resp_buff; - u16 src_func_idx = recv_mbox->src_func_idx; - u16 out_size = MBOX_MAX_BUF_SZ; - int err = 0; - - if (SPHW_IS_VF(dev)) { - err = recv_vf_mbox_handler(func_to_func, recv_mbox, buf_out, - &out_size); - } else { /* pf/ppf process */ - if (IS_PF_OR_PPF_SRC(dev, src_func_idx)) { - if (SPHW_IS_PPF(dev)) { - err = recv_ppf_mbox_handler(func_to_func, - recv_mbox, - (u8)src_func_idx, - buf_out, &out_size); - if (err) - goto out; - } else { - err = recv_pf_from_ppf_handler(func_to_func, - recv_mbox, - buf_out, - &out_size); - if (err) - goto out; - } - /* The source is neither PF nor PPF, so it is from VF */ - } else { - err = recv_pf_from_vf_mbox_handler(func_to_func, - recv_mbox, - src_func_idx, - buf_out, &out_size); - } - } - -out: - if (recv_mbox->ack_type == SPHW_MSG_ACK) - response_for_recv_func_mbox(func_to_func, recv_mbox, err, - out_size, src_func_idx); -} - -static struct sphw_recv_mbox *alloc_recv_mbox(void) -{ - struct sphw_recv_mbox *recv_msg = NULL; - - recv_msg = kzalloc(sizeof(*recv_msg), GFP_KERNEL); - if (!recv_msg) - return NULL; - - recv_msg->msg = kzalloc(MBOX_MAX_BUF_SZ, GFP_KERNEL); - if (!recv_msg->msg) - goto alloc_msg_err; - - recv_msg->resp_buff = kzalloc(MBOX_MAX_BUF_SZ, GFP_KERNEL); - if (!recv_msg->resp_buff) - goto alloc_resp_bff_err; - - return recv_msg; - -alloc_resp_bff_err: - kfree(recv_msg->msg); - -alloc_msg_err: - kfree(recv_msg); - - return NULL; -} - -static void free_recv_mbox(struct sphw_recv_mbox *recv_msg) -{ - kfree(recv_msg->resp_buff); - kfree(recv_msg->msg); - kfree(recv_msg); -} - -static void recv_func_mbox_work_handler(struct work_struct *work) -{ - struct sphw_mbox_work *mbox_work = - container_of(work, struct sphw_mbox_work, work); - - recv_func_mbox_handler(mbox_work->func_to_func, mbox_work->recv_mbox); - - atomic_dec(&mbox_work->msg_ch->recv_msg_cnt); - - free_recv_mbox(mbox_work->recv_mbox); - kfree(mbox_work); -} - -static void resp_mbox_handler(struct sphw_mbox *func_to_func, - struct sphw_msg_desc *msg_desc) -{ - spin_lock(&func_to_func->mbox_lock); - if (msg_desc->msg_info.msg_id == func_to_func->send_msg_id && - func_to_func->event_flag == EVENT_START) - func_to_func->event_flag = EVENT_SUCCESS; - else - sdk_err(func_to_func->hwdev->dev_hdl, - "Mbox response timeout, current send msg id(0x%x), recv msg id(0x%x), status(0x%x)\n", - func_to_func->send_msg_id, msg_desc->msg_info.msg_id, - msg_desc->msg_info.status); - spin_unlock(&func_to_func->mbox_lock); -} - -static void recv_mbox_msg_handler(struct sphw_mbox *func_to_func, - struct sphw_msg_desc *msg_desc, - u64 mbox_header) -{ - struct sphw_hwdev *hwdev = func_to_func->hwdev; - struct sphw_recv_mbox *recv_msg = NULL; - struct sphw_mbox_work *mbox_work = NULL; - struct sphw_msg_channel *msg_ch = - container_of(msg_desc, struct sphw_msg_channel, recv_msg); - u16 src_func_idx = SPHW_MSG_HEADER_GET(mbox_header, SRC_GLB_FUNC_IDX); - - if (atomic_read(&msg_ch->recv_msg_cnt) > - SPHW_MAX_MSG_CNT_TO_PROCESS) { - sdk_warn(hwdev->dev_hdl, "This function(%u) have %d message wait to process, can't add to work queue\n", - src_func_idx, atomic_read(&msg_ch->recv_msg_cnt)); - return; - } - - recv_msg = alloc_recv_mbox(); - if (!recv_msg) { - sdk_err(hwdev->dev_hdl, "Failed to alloc receive mbox message buffer\n"); - return; - } - recv_msg->msg_len = msg_desc->msg_len; - memcpy(recv_msg->msg, msg_desc->msg, recv_msg->msg_len); - recv_msg->msg_id = msg_desc->msg_info.msg_id; - recv_msg->mod = SPHW_MSG_HEADER_GET(mbox_header, MODULE); - recv_msg->cmd = SPHW_MSG_HEADER_GET(mbox_header, CMD); - recv_msg->ack_type = SPHW_MSG_HEADER_GET(mbox_header, NO_ACK); - recv_msg->src_func_idx = src_func_idx; - - mbox_work = kzalloc(sizeof(*mbox_work), GFP_KERNEL); - if (!mbox_work) { - sdk_err(hwdev->dev_hdl, "Allocate mbox work memory failed.\n"); - free_recv_mbox(recv_msg); - return; - } - - atomic_inc(&msg_ch->recv_msg_cnt); - - mbox_work->func_to_func = func_to_func; - mbox_work->recv_mbox = recv_msg; - mbox_work->msg_ch = msg_ch; - - INIT_WORK(&mbox_work->work, recv_func_mbox_work_handler); - queue_work_on(sphw_get_work_cpu_affinity(hwdev, WORK_TYPE_MBOX), - func_to_func->workq, &mbox_work->work); -} - -static bool check_mbox_segment(struct sphw_mbox *func_to_func, - struct sphw_msg_desc *msg_desc, - u64 mbox_header) -{ - u8 seq_id, seg_len, msg_id, mod; - u16 src_func_idx, cmd; - - seq_id = SPHW_MSG_HEADER_GET(mbox_header, SEQID); - seg_len = SPHW_MSG_HEADER_GET(mbox_header, SEG_LEN); - msg_id = SPHW_MSG_HEADER_GET(mbox_header, MSG_ID); - mod = SPHW_MSG_HEADER_GET(mbox_header, MODULE); - cmd = SPHW_MSG_HEADER_GET(mbox_header, CMD); - src_func_idx = SPHW_MSG_HEADER_GET(mbox_header, SRC_GLB_FUNC_IDX); - - if (seq_id > SEQ_ID_MAX_VAL || seg_len > MBOX_SEG_LEN || - (seq_id == SEQ_ID_MAX_VAL && seg_len > MBOX_LAST_SEG_MAX_LEN)) - goto seg_err; - - if (seq_id == 0) { - msg_desc->seq_id = seq_id; - msg_desc->msg_info.msg_id = msg_id; - msg_desc->mod = mod; - msg_desc->cmd = cmd; - } else { - if (seq_id != msg_desc->seq_id + 1 || - msg_id != msg_desc->msg_info.msg_id || - mod != msg_desc->mod || cmd != msg_desc->cmd) - goto seg_err; - - msg_desc->seq_id = seq_id; - } - - return true; - -seg_err: - sdk_err(func_to_func->hwdev->dev_hdl, - "Mailbox segment check failed, src func id: 0x%x, front seg info: seq id: 0x%x, msg id: 0x%x, mod: 0x%x, cmd: 0x%x\n", - src_func_idx, msg_desc->seq_id, msg_desc->msg_info.msg_id, - msg_desc->mod, msg_desc->cmd); - sdk_err(func_to_func->hwdev->dev_hdl, - "Current seg info: seg len: 0x%x, seq id: 0x%x, msg id: 0x%x, mod: 0x%x, cmd: 0x%x\n", - seg_len, seq_id, msg_id, mod, cmd); - - return false; -} - -static void recv_mbox_handler(struct sphw_mbox *func_to_func, - void *header, struct sphw_msg_desc *msg_desc) -{ - u64 mbox_header = *((u64 *)header); - void *mbox_body = MBOX_BODY_FROM_HDR(header); - u8 seq_id, seg_len; - int pos; - - if (!check_mbox_segment(func_to_func, msg_desc, mbox_header)) { - msg_desc->seq_id = SEQ_ID_MAX_VAL; - return; - } - - seq_id = SPHW_MSG_HEADER_GET(mbox_header, SEQID); - seg_len = SPHW_MSG_HEADER_GET(mbox_header, SEG_LEN); - - pos = seq_id * MBOX_SEG_LEN; - memcpy((u8 *)msg_desc->msg + pos, mbox_body, seg_len); - - if (!SPHW_MSG_HEADER_GET(mbox_header, LAST)) - return; - - msg_desc->msg_len = SPHW_MSG_HEADER_GET(mbox_header, MSG_LEN); - msg_desc->msg_info.status = SPHW_MSG_HEADER_GET(mbox_header, STATUS); - - if (SPHW_MSG_HEADER_GET(mbox_header, DIRECTION) == - SPHW_MSG_RESPONSE) { - resp_mbox_handler(func_to_func, msg_desc); - return; - } - - recv_mbox_msg_handler(func_to_func, msg_desc, mbox_header); -} - -void sphw_mbox_func_aeqe_handler(void *handle, u8 *header, u8 size) -{ - struct sphw_mbox *func_to_func = NULL; - struct sphw_msg_desc *msg_desc = NULL; - u64 mbox_header = *((u64 *)header); - u64 src, dir; - - func_to_func = ((struct sphw_hwdev *)handle)->func_to_func; - - dir = SPHW_MSG_HEADER_GET(mbox_header, DIRECTION); - src = SPHW_MSG_HEADER_GET(mbox_header, SRC_GLB_FUNC_IDX); - - msg_desc = get_mbox_msg_desc(func_to_func, dir, src); - if (!msg_desc) { - sdk_err(func_to_func->hwdev->dev_hdl, - "Mailbox source function id: %u is invalid for current function\n", - (u32)src); - return; - } - - recv_mbox_handler(func_to_func, (u64 *)header, msg_desc); -} - -static int init_mbox_dma_queue(struct sphw_hwdev *hwdev, struct mbox_dma_queue *mq) -{ - u32 size; - - mq->depth = MBOX_DMA_MSG_QUEUE_DEPTH; - mq->prod_idx = 0; - mq->cons_idx = 0; - - size = mq->depth * MBOX_MAX_BUF_SZ; - mq->dma_buff_vaddr = dma_alloc_coherent(hwdev->dev_hdl, size, &mq->dma_buff_paddr, - GFP_KERNEL); - if (!mq->dma_buff_vaddr) { - sdk_err(hwdev->dev_hdl, "Failed to alloc dma_buffer\n"); - return -ENOMEM; - } - - return 0; -} - -static void deinit_mbox_dma_queue(struct sphw_hwdev *hwdev, struct mbox_dma_queue *mq) -{ - dma_free_coherent(hwdev->dev_hdl, mq->depth * MBOX_MAX_BUF_SZ, - mq->dma_buff_vaddr, mq->dma_buff_paddr); -} - -static int sphw_init_mbox_dma_queue(struct sphw_mbox *func_to_func) -{ - u32 val; - int err; - - err = init_mbox_dma_queue(func_to_func->hwdev, &func_to_func->sync_msg_queue); - if (err) - return err; - - err = init_mbox_dma_queue(func_to_func->hwdev, &func_to_func->async_msg_queue); - if (err) { - deinit_mbox_dma_queue(func_to_func->hwdev, &func_to_func->sync_msg_queue); - return err; - } - - val = sphw_hwif_read_reg(func_to_func->hwdev->hwif, MBOX_MQ_CI_OFFSET); - val = MBOX_MQ_CI_CLEAR(val, SYNC); - val = MBOX_MQ_CI_CLEAR(val, ASYNC); - sphw_hwif_write_reg(func_to_func->hwdev->hwif, MBOX_MQ_CI_OFFSET, val); - - return 0; -} - -static void sphw_deinit_mbox_dma_queue(struct sphw_mbox *func_to_func) -{ - deinit_mbox_dma_queue(func_to_func->hwdev, &func_to_func->sync_msg_queue); - deinit_mbox_dma_queue(func_to_func->hwdev, &func_to_func->async_msg_queue); -} - -#define MBOX_DMA_MSG_INIT_XOR_VAL 0x5a5a5a5a -static u32 mbox_dma_msg_xor(u32 *data, u16 msg_len) -{ - u32 xor = MBOX_DMA_MSG_INIT_XOR_VAL; - u16 dw_len = msg_len / sizeof(u32); - u16 i; - - for (i = 0; i < dw_len; i++) - xor ^= data[i]; - - return xor; -} - -#define MQ_ID_MASK(mq, idx) ((idx) & ((mq)->depth - 1)) -#define IS_MSG_QUEUE_FULL(mq) (MQ_ID_MASK(mq, (mq)->prod_idx + 1) == \ - MQ_ID_MASK(mq, (mq)->cons_idx)) - -static int mbox_prepare_dma_entry(struct sphw_mbox *func_to_func, struct mbox_dma_queue *mq, - struct mbox_dma_msg *dma_msg, void *msg, u16 msg_len) -{ - u64 dma_addr, offset; - void *dma_vaddr; - - if (IS_MSG_QUEUE_FULL(mq)) { - sdk_err(func_to_func->hwdev->dev_hdl, "Mbox sync message queue is busy, pi: %u, ci: %u\n", - mq->prod_idx, MQ_ID_MASK(mq, mq->cons_idx)); - return -EBUSY; - } - - /* copy data to DMA buffer */ - offset = mq->prod_idx * MBOX_MAX_BUF_SZ; - dma_vaddr = (u8 *)mq->dma_buff_vaddr + offset; - memcpy(dma_vaddr, msg, msg_len); - dma_addr = mq->dma_buff_paddr + offset; - dma_msg->dma_addr_high = upper_32_bits(dma_addr); - dma_msg->dma_addr_low = lower_32_bits(dma_addr); - dma_msg->msg_len = msg_len; - dma_msg->xor = mbox_dma_msg_xor(dma_vaddr, ALIGN(msg_len, sizeof(u32))); - - mq->prod_idx++; - mq->prod_idx = MQ_ID_MASK(mq, mq->prod_idx); - - return 0; -} - -static int mbox_prepare_dma_msg(struct sphw_mbox *func_to_func, enum sphw_msg_ack_type ack_type, - struct mbox_dma_msg *dma_msg, void *msg, u16 msg_len) -{ - struct mbox_dma_queue *mq = NULL; - u32 val; - - val = sphw_hwif_read_reg(func_to_func->hwdev->hwif, MBOX_MQ_CI_OFFSET); - if (ack_type == SPHW_MSG_ACK) { - mq = &func_to_func->sync_msg_queue; - mq->cons_idx = MBOX_MQ_CI_GET(val, SYNC); - } else { - mq = &func_to_func->async_msg_queue; - mq->cons_idx = MBOX_MQ_CI_GET(val, ASYNC); - } - - return mbox_prepare_dma_entry(func_to_func, mq, dma_msg, msg, msg_len); -} - -static void clear_mbox_status(struct sphw_send_mbox *mbox) -{ - *mbox->wb_status = 0; - - /* clear mailbox write back status */ - wmb(); -} - -static void mbox_copy_header(struct sphw_hwdev *hwdev, - struct sphw_send_mbox *mbox, u64 *header) -{ - u32 *data = (u32 *)header; - u32 i, idx_max = MBOX_HEADER_SZ / sizeof(u32); - - for (i = 0; i < idx_max; i++) - __raw_writel(cpu_to_be32(*(data + i)), mbox->data + i * sizeof(u32)); -} - -static void mbox_copy_send_data(struct sphw_hwdev *hwdev, struct sphw_send_mbox *mbox, void *seg, - u16 seg_len) -{ - u32 *data = seg; - u32 data_len, chk_sz = sizeof(u32); - u32 i, idx_max; - - data_len = seg_len; - idx_max = ALIGN(data_len, chk_sz) / chk_sz; - - for (i = 0; i < idx_max; i++) - __raw_writel(cpu_to_be32(*(data + i)), - mbox->data + MBOX_HEADER_SZ + i * sizeof(u32)); -} - -static void write_mbox_msg_attr(struct sphw_mbox *func_to_func, - u16 dst_func, u16 dst_aeqn, u16 seg_len) -{ - u32 mbox_int, mbox_ctrl; - - /* for VF to PF's message, dest func id will self-learning by HW */ - if (SPHW_IS_VF(func_to_func->hwdev) && - dst_func != SPHW_MGMT_SRC_ID) - dst_func = 0; /* the destination is the VF's PF */ - - mbox_int = SPHW_MBOX_INT_SET(dst_aeqn, DST_AEQN) | - SPHW_MBOX_INT_SET(0, SRC_RESP_AEQN) | - SPHW_MBOX_INT_SET(NO_DMA_ATTRIBUTE_VAL, STAT_DMA) | - SPHW_MBOX_INT_SET(ALIGN(seg_len + MBOX_HEADER_SZ, - MBOX_SEG_LEN_ALIGN) >> 2, TX_SIZE) | - SPHW_MBOX_INT_SET(STRONG_ORDER, STAT_DMA_SO_RO) | - SPHW_MBOX_INT_SET(WRITE_BACK, WB_EN); - - sphw_hwif_write_reg(func_to_func->hwdev->hwif, SPHW_FUNC_CSR_MAILBOX_INT_OFFSET_OFF, - mbox_int); - - wmb(); /* writing the mbox int attributes */ - mbox_ctrl = SPHW_MBOX_CTRL_SET(TX_NOT_DONE, TX_STATUS); - - mbox_ctrl |= SPHW_MBOX_CTRL_SET(NOT_TRIGGER, TRIGGER_AEQE); - - mbox_ctrl |= SPHW_MBOX_CTRL_SET(dst_func, DST_FUNC); - - sphw_hwif_write_reg(func_to_func->hwdev->hwif, - SPHW_FUNC_CSR_MAILBOX_CONTROL_OFF, mbox_ctrl); -} - -void dump_mbox_reg(struct sphw_hwdev *hwdev) -{ - u32 val; - - val = sphw_hwif_read_reg(hwdev->hwif, SPHW_FUNC_CSR_MAILBOX_CONTROL_OFF); - sdk_err(hwdev->dev_hdl, "Mailbox control reg: 0x%x\n", val); - val = sphw_hwif_read_reg(hwdev->hwif, SPHW_FUNC_CSR_MAILBOX_INT_OFFSET_OFF); - sdk_err(hwdev->dev_hdl, "Mailbox interrupt offset: 0x%x\n", val); -} - -static u16 get_mbox_status(struct sphw_send_mbox *mbox) -{ - /* write back is 16B, but only use first 4B */ - u64 wb_val = be64_to_cpu(*mbox->wb_status); - - rmb(); /* verify reading before check */ - - return (u16)(wb_val & MBOX_WB_STATUS_ERRCODE_MASK); -} - -enum sphw_wait_return check_mbox_wb_status(void *priv_data) -{ - struct sphw_mbox *func_to_func = priv_data; - u16 wb_status; - - if (MBOX_MSG_CHANNEL_STOP(func_to_func)) - return WAIT_PROCESS_ERR; - - wb_status = get_mbox_status(&func_to_func->send_mbox); - - return MBOX_STATUS_FINISHED(wb_status) ? - WAIT_PROCESS_CPL : WAIT_PROCESS_WAITING; -} - -static int send_mbox_seg(struct sphw_mbox *func_to_func, u64 header, - u16 dst_func, void *seg, u16 seg_len, void *msg_info) -{ - struct sphw_send_mbox *send_mbox = &func_to_func->send_mbox; - struct sphw_hwdev *hwdev = func_to_func->hwdev; - u8 num_aeqs = hwdev->hwif->attr.num_aeqs; - u16 dst_aeqn, wb_status = 0, errcode; - u16 seq_dir = SPHW_MSG_HEADER_GET(header, DIRECTION); - int err; - - /* mbox to mgmt cpu, hardware don't care dst aeq id*/ - if (num_aeqs > SPHW_MBOX_RSP_MSG_AEQ) - dst_aeqn = (seq_dir == SPHW_MSG_DIRECT_SEND) ? - SPHW_ASYNC_MSG_AEQ : SPHW_MBOX_RSP_MSG_AEQ; - else - dst_aeqn = 0; - - clear_mbox_status(send_mbox); - - mbox_copy_header(hwdev, send_mbox, &header); - - mbox_copy_send_data(hwdev, send_mbox, seg, seg_len); - - write_mbox_msg_attr(func_to_func, dst_func, dst_aeqn, seg_len); - - wmb(); /* writing the mbox msg attributes */ - - err = sphw_wait_for_timeout(func_to_func, check_mbox_wb_status, - MBOX_MSG_POLLING_TIMEOUT, USEC_PER_MSEC); - wb_status = get_mbox_status(send_mbox); - if (err) { - sdk_err(hwdev->dev_hdl, "Send mailbox segment timeout, wb status: 0x%x\n", - wb_status); - dump_mbox_reg(hwdev); - return -ETIMEDOUT; - } - - if (!MBOX_STATUS_SUCCESS(wb_status)) { - sdk_err(hwdev->dev_hdl, "Send mailbox segment to function %u error, wb status: 0x%x\n", - dst_func, wb_status); - errcode = MBOX_STATUS_ERRCODE(wb_status); - return errcode ? errcode : -EFAULT; - } - - return 0; -} - -static int send_mbox_msg(struct sphw_mbox *func_to_func, u8 mod, u16 cmd, - void *msg, u16 msg_len, u16 dst_func, - enum sphw_msg_direction_type direction, - enum sphw_msg_ack_type ack_type, - struct mbox_msg_info *msg_info) -{ - struct sphw_hwdev *hwdev = func_to_func->hwdev; - struct mbox_dma_msg dma_msg = {0}; - enum sphw_data_type data_type = SPHW_DATA_INLINE; - int err = 0; - u32 seq_id = 0; - u16 seg_len = MBOX_SEG_LEN; - u16 rsp_aeq_id, left; - u8 *msg_seg = NULL; - u64 header = 0; - - if (hwdev->hwif->attr.num_aeqs >= 2) - rsp_aeq_id = SPHW_MBOX_RSP_MSG_AEQ; - else - rsp_aeq_id = 0; - - mutex_lock(&func_to_func->msg_send_lock); - - if (IS_DMA_MBX_MSG(dst_func)) { - err = mbox_prepare_dma_msg(func_to_func, ack_type, &dma_msg, msg, msg_len); - if (err) - goto send_err; - - msg = &dma_msg; - msg_len = sizeof(dma_msg); - data_type = SPHW_DATA_DMA; - } - - msg_seg = (u8 *)msg; - left = msg_len; - - header = SPHW_MSG_HEADER_SET(msg_len, MSG_LEN) | - SPHW_MSG_HEADER_SET(mod, MODULE) | - SPHW_MSG_HEADER_SET(seg_len, SEG_LEN) | - SPHW_MSG_HEADER_SET(ack_type, NO_ACK) | - SPHW_MSG_HEADER_SET(data_type, DATA_TYPE) | - SPHW_MSG_HEADER_SET(SEQ_ID_START_VAL, SEQID) | - SPHW_MSG_HEADER_SET(NOT_LAST_SEGMENT, LAST) | - SPHW_MSG_HEADER_SET(direction, DIRECTION) | - SPHW_MSG_HEADER_SET(cmd, CMD) | - /* The vf's offset to it's associated pf */ - SPHW_MSG_HEADER_SET(msg_info->msg_id, MSG_ID) | - SPHW_MSG_HEADER_SET(rsp_aeq_id, AEQ_ID) | - SPHW_MSG_HEADER_SET(SPHW_MSG_FROM_MBOX, SOURCE) | - SPHW_MSG_HEADER_SET(!!msg_info->status, STATUS) | - SPHW_MSG_HEADER_SET(sphw_global_func_id(hwdev), SRC_GLB_FUNC_IDX); - - while (!(SPHW_MSG_HEADER_GET(header, LAST))) { - if (left <= MBOX_SEG_LEN) { - header &= ~MBOX_SEGLEN_MASK; - header |= SPHW_MSG_HEADER_SET(left, SEG_LEN); - header |= SPHW_MSG_HEADER_SET(LAST_SEGMENT, LAST); - - seg_len = left; - } - - err = send_mbox_seg(func_to_func, header, dst_func, msg_seg, - seg_len, msg_info); - if (err) { - sdk_err(hwdev->dev_hdl, "Failed to send mbox seg, seq_id=0x%llx\n", - SPHW_MSG_HEADER_GET(header, SEQID)); - goto send_err; - } - - left -= MBOX_SEG_LEN; - msg_seg += MBOX_SEG_LEN; - - seq_id++; - header &= ~(SPHW_MSG_HEADER_SET(SPHW_MSG_HEADER_SEQID_MASK, SEQID)); - header |= SPHW_MSG_HEADER_SET(seq_id, SEQID); - } - -send_err: - mutex_unlock(&func_to_func->msg_send_lock); - - return err; -} - -static void set_mbox_to_func_event(struct sphw_mbox *func_to_func, - enum mbox_event_state event_flag) -{ - spin_lock(&func_to_func->mbox_lock); - func_to_func->event_flag = event_flag; - spin_unlock(&func_to_func->mbox_lock); -} - -static enum sphw_wait_return check_mbox_msg_finish(void *priv_data) -{ - struct sphw_mbox *func_to_func = priv_data; - - if (MBOX_MSG_CHANNEL_STOP(func_to_func)) - return WAIT_PROCESS_ERR; - - return (func_to_func->event_flag == EVENT_SUCCESS) ? - WAIT_PROCESS_CPL : WAIT_PROCESS_WAITING; -} - -static int wait_mbox_msg_completion(struct sphw_mbox *func_to_func, - u32 timeout) -{ - int err; - - timeout = timeout ? timeout : SPHW_MBOX_COMP_TIME; - err = sphw_wait_for_timeout(func_to_func, check_mbox_msg_finish, - timeout, USEC_PER_MSEC); - if (err) { - set_mbox_to_func_event(func_to_func, EVENT_TIMEOUT); - return -ETIMEDOUT; - } - - set_mbox_to_func_event(func_to_func, EVENT_END); - - return 0; -} - -#define TRY_MBOX_LOCK_SLEPP 1000 -static int send_mbox_msg_lock(struct sphw_mbox *func_to_func, u16 channel) -{ - if (!func_to_func->lock_channel_en) { - mutex_lock(&func_to_func->mbox_send_lock); - return 0; - } - - while (!test_bit(channel, &func_to_func->channel_stop)) { - if (mutex_trylock(&func_to_func->mbox_send_lock)) - return 0; - - usleep_range(TRY_MBOX_LOCK_SLEPP - 1, TRY_MBOX_LOCK_SLEPP); - } - - return -EAGAIN; -} - -static void send_mbox_msg_unlock(struct sphw_mbox *func_to_func) -{ - mutex_unlock(&func_to_func->mbox_send_lock); -} - -int sphw_mbox_to_func(struct sphw_mbox *func_to_func, u8 mod, u16 cmd, u16 dst_func, void *buf_in, - u16 in_size, void *buf_out, u16 *out_size, u32 timeout, u16 channel) -{ - /* use mbox_resp to hole data which responsed from other function */ - struct sphw_msg_desc *msg_desc = NULL; - struct mbox_msg_info msg_info = {0}; - int err; - - if (!func_to_func->hwdev->chip_present_flag) - return -EPERM; - - /* expect response message */ - msg_desc = get_mbox_msg_desc(func_to_func, SPHW_MSG_RESPONSE, - dst_func); - if (!msg_desc) - return -EFAULT; - - err = send_mbox_msg_lock(func_to_func, channel); - if (err) - return err; - - func_to_func->cur_msg_channel = channel; - msg_info.msg_id = MBOX_MSG_ID_INC(func_to_func); - - set_mbox_to_func_event(func_to_func, EVENT_START); - - err = send_mbox_msg(func_to_func, mod, cmd, buf_in, in_size, dst_func, - SPHW_MSG_DIRECT_SEND, SPHW_MSG_ACK, &msg_info); - if (err) { - sdk_err(func_to_func->hwdev->dev_hdl, "Send mailbox mod %u, cmd %u failed, msg_id: %u, err: %d\n", - mod, cmd, msg_info.msg_id, err); - set_mbox_to_func_event(func_to_func, EVENT_FAIL); - goto send_err; - } - - if (wait_mbox_msg_completion(func_to_func, timeout)) { - sdk_err(func_to_func->hwdev->dev_hdl, - "Send mbox msg timeout, msg_id: %u\n", msg_info.msg_id); - sphw_dump_aeq_info(func_to_func->hwdev); - err = -ETIMEDOUT; - goto send_err; - } - - if (mod != msg_desc->mod || cmd != msg_desc->cmd) { - sdk_err(func_to_func->hwdev->dev_hdl, - "Invalid response mbox message, mod: 0x%x, cmd: 0x%x, expect mod: 0x%x, cmd: 0x%x\n", - msg_desc->mod, msg_desc->cmd, mod, cmd); - err = -EFAULT; - goto send_err; - } - - if (msg_desc->msg_info.status) { - err = msg_desc->msg_info.status; - goto send_err; - } - - if (buf_out && out_size) { - if (*out_size < msg_desc->msg_len) { - sdk_err(func_to_func->hwdev->dev_hdl, - "Invalid response mbox message length: %u for mod %d cmd %u, should less than: %u\n", - msg_desc->msg_len, mod, cmd, *out_size); - err = -EFAULT; - goto send_err; - } - - if (msg_desc->msg_len) - memcpy(buf_out, msg_desc->msg, msg_desc->msg_len); - - *out_size = msg_desc->msg_len; - } - -send_err: - send_mbox_msg_unlock(func_to_func); - - return err; -} - -static int mbox_func_params_valid(struct sphw_mbox *func_to_func, - void *buf_in, u16 in_size, u16 channel) -{ - if (!buf_in || !in_size) - return -EINVAL; - - if (in_size > SPHW_MBOX_DATA_SIZE) { - sdk_err(func_to_func->hwdev->dev_hdl, - "Mbox msg len %u exceed limit: [1, %u]\n", - in_size, SPHW_MBOX_DATA_SIZE); - return -EINVAL; - } - - if (channel >= SPHW_CHANNEL_MAX) { - sdk_err(func_to_func->hwdev->dev_hdl, - "Invalid channel id: 0x%x\n", channel); - return -EINVAL; - } - - return 0; -} - -int sphw_mbox_to_func_no_ack(struct sphw_hwdev *hwdev, u16 func_idx, u8 mod, u16 cmd, - void *buf_in, u16 in_size, u16 channel) -{ - struct mbox_msg_info msg_info = {0}; - int err = mbox_func_params_valid(hwdev->func_to_func, buf_in, in_size, - channel); - - if (err) - return err; - - err = send_mbox_msg_lock(hwdev->func_to_func, channel); - if (err) - return err; - - err = send_mbox_msg(hwdev->func_to_func, mod, cmd, buf_in, in_size, - func_idx, SPHW_MSG_DIRECT_SEND, - SPHW_MSG_NO_ACK, &msg_info); - if (err) - sdk_err(hwdev->dev_hdl, "Send mailbox no ack failed\n"); - - send_mbox_msg_unlock(hwdev->func_to_func); - - return err; -} - -int sphw_send_mbox_to_mgmt(struct sphw_hwdev *hwdev, u8 mod, u16 cmd, void *buf_in, u16 in_size, - void *buf_out, u16 *out_size, u32 timeout, u16 channel) -{ - struct sphw_mbox *func_to_func = hwdev->func_to_func; - int err = mbox_func_params_valid(func_to_func, buf_in, in_size, channel); - - if (err) - return err; - - return sphw_mbox_to_func(func_to_func, mod, cmd, SPHW_MGMT_SRC_ID, - buf_in, in_size, buf_out, out_size, timeout, channel); -} - -void sphw_response_mbox_to_mgmt(struct sphw_hwdev *hwdev, u8 mod, u16 cmd, - void *buf_in, u16 in_size, u16 msg_id) -{ - struct mbox_msg_info msg_info; - - msg_info.msg_id = (u8)msg_id; - msg_info.status = 0; - - send_mbox_msg(hwdev->func_to_func, mod, cmd, buf_in, in_size, - SPHW_MGMT_SRC_ID, SPHW_MSG_RESPONSE, - SPHW_MSG_NO_ACK, &msg_info); -} - -int sphw_send_mbox_to_mgmt_no_ack(struct sphw_hwdev *hwdev, u8 mod, u16 cmd, - void *buf_in, u16 in_size, u16 channel) -{ - struct sphw_mbox *func_to_func = hwdev->func_to_func; - int err = mbox_func_params_valid(func_to_func, buf_in, in_size, - channel); - - if (err) - return err; - - return sphw_mbox_to_func_no_ack(hwdev, SPHW_MGMT_SRC_ID, mod, cmd, - buf_in, in_size, channel); -} - -int sphw_mbox_ppf_to_host(void *hwdev, u8 mod, u16 cmd, u8 host_id, - void *buf_in, u16 in_size, void *buf_out, - u16 *out_size, u32 timeout, u16 channel) -{ - struct sphw_hwdev *dev = hwdev; - u16 dst_ppf_func; - int err; - - if (!hwdev) - return -EINVAL; - - if (!(dev->chip_present_flag)) - return -EPERM; - - err = mbox_func_params_valid(dev->func_to_func, buf_in, in_size, - channel); - if (err) - return err; - - if (!SPHW_IS_PPF(dev)) { - sdk_err(dev->dev_hdl, "Params error, only ppf support send mbox to ppf. func_type: %d\n", - sphw_func_type(dev)); - return -EINVAL; - } - - if (host_id >= SPHW_MAX_HOST_NUM(dev) || - host_id == SPHW_PCI_INTF_IDX(dev->hwif)) { - sdk_err(dev->dev_hdl, "Params error, host id: %u\n", host_id); - return -EINVAL; - } - - dst_ppf_func = sphw_host_ppf_idx(dev, host_id); - if (dst_ppf_func >= SPHW_MAX_PF_NUM(dev)) { - sdk_err(dev->dev_hdl, "Dest host(%u) have not elect ppf(0x%x).\n", - host_id, dst_ppf_func); - return -EINVAL; - } - - return sphw_mbox_to_func(dev->func_to_func, mod, cmd, dst_ppf_func, buf_in, in_size, - buf_out, out_size, timeout, channel); -} - -int sphw_mbox_to_pf(void *hwdev, u8 mod, u16 cmd, void *buf_in, u16 in_size, void *buf_out, - u16 *out_size, u32 timeout, u16 channel) -{ - struct sphw_hwdev *dev = hwdev; - int err; - - if (!hwdev) - return -EINVAL; - - if (!(dev->chip_present_flag)) - return -EPERM; - - err = mbox_func_params_valid(dev->func_to_func, buf_in, in_size, - channel); - if (err) - return err; - - if (!SPHW_IS_VF(dev)) { - sdk_err(dev->dev_hdl, "Params error, func_type: %d\n", - sphw_func_type(dev)); - return -EINVAL; - } - - return sphw_mbox_to_func(dev->func_to_func, mod, cmd, sphw_pf_id_of_vf(dev), buf_in, - in_size, buf_out, out_size, timeout, channel); -} - -int sphw_mbox_to_vf(void *hwdev, u16 vf_id, u8 mod, u16 cmd, void *buf_in, u16 in_size, - void *buf_out, u16 *out_size, u32 timeout, u16 channel) -{ - struct sphw_mbox *func_to_func = NULL; - int err = 0; - u16 dst_func_idx; - - if (!hwdev) - return -EINVAL; - - func_to_func = ((struct sphw_hwdev *)hwdev)->func_to_func; - err = mbox_func_params_valid(func_to_func, buf_in, in_size, channel); - if (err) - return err; - - if (SPHW_IS_VF((struct sphw_hwdev *)hwdev)) { - sdk_err(((struct sphw_hwdev *)hwdev)->dev_hdl, "Params error, func_type: %d\n", - sphw_func_type(hwdev)); - return -EINVAL; - } - - if (!vf_id) { - sdk_err(((struct sphw_hwdev *)hwdev)->dev_hdl, - "VF id(%u) error!\n", vf_id); - return -EINVAL; - } - - /* vf_offset_to_pf + vf_id is the vf's global function id of vf in - * this pf - */ - dst_func_idx = sphw_glb_pf_vf_offset(hwdev) + vf_id; - - return sphw_mbox_to_func(func_to_func, mod, cmd, dst_func_idx, buf_in, - in_size, buf_out, out_size, timeout, channel); -} - -void sphw_mbox_enable_channel_lock(struct sphw_hwdev *hwdev, bool enable) -{ - hwdev->func_to_func->lock_channel_en = enable; - - sdk_info(hwdev->dev_hdl, "%s mbox channel lock\n", - enable ? "Enable" : "Disable"); -} - -static int alloc_mbox_msg_channel(struct sphw_msg_channel *msg_ch) -{ - msg_ch->resp_msg.msg = kzalloc(MBOX_MAX_BUF_SZ, GFP_KERNEL); - if (!msg_ch->resp_msg.msg) - return -ENOMEM; - - msg_ch->recv_msg.msg = kzalloc(MBOX_MAX_BUF_SZ, GFP_KERNEL); - if (!msg_ch->recv_msg.msg) { - kfree(msg_ch->resp_msg.msg); - return -ENOMEM; - } - - msg_ch->resp_msg.seq_id = SEQ_ID_MAX_VAL; - msg_ch->recv_msg.seq_id = SEQ_ID_MAX_VAL; - atomic_set(&msg_ch->recv_msg_cnt, 0); - - return 0; -} - -static void free_mbox_msg_channel(struct sphw_msg_channel *msg_ch) -{ - kfree(msg_ch->recv_msg.msg); - kfree(msg_ch->resp_msg.msg); -} - -static int init_mgmt_msg_channel(struct sphw_mbox *func_to_func) -{ - int err; - - err = alloc_mbox_msg_channel(&func_to_func->mgmt_msg); - if (err) { - sdk_err(func_to_func->hwdev->dev_hdl, "Failed to alloc mgmt message channel\n"); - return err; - } - - err = sphw_init_mbox_dma_queue(func_to_func); - if (err) { - sdk_err(func_to_func->hwdev->dev_hdl, "Failed to init mbox dma queue\n"); - free_mbox_msg_channel(&func_to_func->mgmt_msg); - } - - return err; -} - -static void deinit_mgmt_msg_channel(struct sphw_mbox *func_to_func) -{ - sphw_deinit_mbox_dma_queue(func_to_func); - free_mbox_msg_channel(&func_to_func->mgmt_msg); -} - -int sphw_mbox_init_host_msg_channel(struct sphw_hwdev *hwdev) -{ - struct sphw_mbox *func_to_func = hwdev->func_to_func; - u8 host_num = SPHW_MAX_HOST_NUM(hwdev); - int i, host_id, err; - - if (host_num == 0) - return 0; - - func_to_func->host_msg = kcalloc(host_num, - sizeof(*func_to_func->host_msg), - GFP_KERNEL); - if (!func_to_func->host_msg) { - sdk_err(func_to_func->hwdev->dev_hdl, "Failed to alloc host message array\n"); - return -ENOMEM; - } - - for (host_id = 0; host_id < host_num; host_id++) { - err = alloc_mbox_msg_channel(&func_to_func->host_msg[host_id]); - if (err) { - sdk_err(func_to_func->hwdev->dev_hdl, - "Failed to alloc host %d message channel\n", - host_id); - goto alloc_msg_ch_err; - } - } - - func_to_func->support_h2h_msg = true; - - return 0; - -alloc_msg_ch_err: - for (i = 0; i < host_id; i++) - free_mbox_msg_channel(&func_to_func->host_msg[i]); - - kfree(func_to_func->host_msg); - func_to_func->host_msg = NULL; - - return -ENOMEM; -} - -static void deinit_host_msg_channel(struct sphw_mbox *func_to_func) -{ - int i; - - if (!func_to_func->host_msg) - return; - - for (i = 0; i < SPHW_MAX_HOST_NUM(func_to_func->hwdev); i++) - free_mbox_msg_channel(&func_to_func->host_msg[i]); - - kfree(func_to_func->host_msg); - func_to_func->host_msg = NULL; -} - -int sphw_init_func_mbox_msg_channel(void *hwdev, u16 num_func) -{ - struct sphw_hwdev *dev = hwdev; - struct sphw_mbox *func_to_func = NULL; - u16 func_id, i; - int err; - - if (!hwdev || !num_func || num_func > SPHW_MAX_FUNCTIONS) - return -EINVAL; - - func_to_func = dev->func_to_func; - if (func_to_func->func_msg) - return (func_to_func->num_func_msg == num_func) ? 0 : -EFAULT; - - func_to_func->func_msg = - kcalloc(num_func, sizeof(*func_to_func->func_msg), GFP_KERNEL); - if (!func_to_func->func_msg) { - sdk_err(func_to_func->hwdev->dev_hdl, "Failed to alloc func message array\n"); - return -ENOMEM; - } - - for (func_id = 0; func_id < num_func; func_id++) { - err = alloc_mbox_msg_channel(&func_to_func->func_msg[func_id]); - if (err) { - sdk_err(func_to_func->hwdev->dev_hdl, - "Failed to alloc func %hu message channel\n", - func_id); - goto alloc_msg_ch_err; - } - } - - func_to_func->num_func_msg = num_func; - - return 0; - -alloc_msg_ch_err: - for (i = 0; i < func_id; i++) - free_mbox_msg_channel(&func_to_func->func_msg[i]); - - kfree(func_to_func->func_msg); - func_to_func->func_msg = NULL; - - return -ENOMEM; -} - -void sphw_deinit_func_mbox_msg_channel(struct sphw_hwdev *hwdev) -{ - struct sphw_mbox *func_to_func = hwdev->func_to_func; - u16 i; - - if (!func_to_func->func_msg) - return; - - for (i = 0; i < func_to_func->num_func_msg; i++) - free_mbox_msg_channel(&func_to_func->func_msg[i]); - - kfree(func_to_func->func_msg); - func_to_func->func_msg = NULL; -} - -struct sphw_msg_desc *get_mbox_msg_desc(struct sphw_mbox *func_to_func, u64 dir, u64 src_func_id) -{ - struct sphw_hwdev *hwdev = func_to_func->hwdev; - struct sphw_msg_channel *msg_ch = NULL; - u16 id; - - if (src_func_id == SPHW_MGMT_SRC_ID) { - msg_ch = &func_to_func->mgmt_msg; - } else if (SPHW_IS_VF(hwdev)) { - /* message from pf */ - msg_ch = func_to_func->func_msg; - if (src_func_id != sphw_pf_id_of_vf(hwdev) || !msg_ch) - return NULL; - } else if (src_func_id > sphw_glb_pf_vf_offset(hwdev)) { - /* message from vf */ - id = (u16)(src_func_id - 1U) - sphw_glb_pf_vf_offset(hwdev); - if (id >= func_to_func->num_func_msg) - return NULL; - - msg_ch = &func_to_func->func_msg[id]; - } else { - /* message from other host's ppf */ - if (!func_to_func->support_h2h_msg) - return NULL; - - for (id = 0; id < SPHW_MAX_HOST_NUM(hwdev); id++) { - if (src_func_id == sphw_host_ppf_idx(hwdev, (u8)id)) - break; - } - - if (id == SPHW_MAX_HOST_NUM(hwdev) || !func_to_func->host_msg) - return NULL; - - msg_ch = &func_to_func->host_msg[id]; - } - - return (dir == SPHW_MSG_DIRECT_SEND) ? - &msg_ch->recv_msg : &msg_ch->resp_msg; -} - -static void prepare_send_mbox(struct sphw_mbox *func_to_func) -{ - struct sphw_send_mbox *send_mbox = &func_to_func->send_mbox; - - send_mbox->data = MBOX_AREA(func_to_func->hwdev->hwif); -} - -static int alloc_mbox_wb_status(struct sphw_mbox *func_to_func) -{ - struct sphw_send_mbox *send_mbox = &func_to_func->send_mbox; - struct sphw_hwdev *hwdev = func_to_func->hwdev; - u32 addr_h, addr_l; - - send_mbox->wb_vaddr = dma_alloc_coherent(hwdev->dev_hdl, MBOX_WB_STATUS_LEN, - &send_mbox->wb_paddr, GFP_KERNEL); - if (!send_mbox->wb_vaddr) - return -ENOMEM; - - send_mbox->wb_status = send_mbox->wb_vaddr; - - addr_h = upper_32_bits(send_mbox->wb_paddr); - addr_l = lower_32_bits(send_mbox->wb_paddr); - - sphw_hwif_write_reg(hwdev->hwif, SPHW_FUNC_CSR_MAILBOX_RESULT_H_OFF, addr_h); - sphw_hwif_write_reg(hwdev->hwif, SPHW_FUNC_CSR_MAILBOX_RESULT_L_OFF, addr_l); - - return 0; -} - -static void free_mbox_wb_status(struct sphw_mbox *func_to_func) -{ - struct sphw_send_mbox *send_mbox = &func_to_func->send_mbox; - struct sphw_hwdev *hwdev = func_to_func->hwdev; - - sphw_hwif_write_reg(hwdev->hwif, SPHW_FUNC_CSR_MAILBOX_RESULT_H_OFF, 0); - sphw_hwif_write_reg(hwdev->hwif, SPHW_FUNC_CSR_MAILBOX_RESULT_L_OFF, 0); - - dma_free_coherent(hwdev->dev_hdl, MBOX_WB_STATUS_LEN, - send_mbox->wb_vaddr, send_mbox->wb_paddr); -} - -int sphw_func_to_func_init(struct sphw_hwdev *hwdev) -{ - struct sphw_mbox *func_to_func; - int err; - - func_to_func = kzalloc(sizeof(*func_to_func), GFP_KERNEL); - if (!func_to_func) - return -ENOMEM; - - hwdev->func_to_func = func_to_func; - func_to_func->hwdev = hwdev; - mutex_init(&func_to_func->mbox_send_lock); - mutex_init(&func_to_func->msg_send_lock); - spin_lock_init(&func_to_func->mbox_lock); - func_to_func->workq = - create_singlethread_workqueue(SPHW_MBOX_WQ_NAME); - if (!func_to_func->workq) { - sdk_err(hwdev->dev_hdl, "Failed to initialize MBOX workqueue\n"); - err = -ENOMEM; - goto create_mbox_workq_err; - } - - err = init_mgmt_msg_channel(func_to_func); - if (err) - goto init_mgmt_msg_ch_err; - - if (SPHW_IS_VF(hwdev)) { - /* VF to PF mbox message channel */ - err = sphw_init_func_mbox_msg_channel(hwdev, 1); - if (err) - goto init_func_msg_ch_err; - } - - err = alloc_mbox_wb_status(func_to_func); - if (err) { - sdk_err(hwdev->dev_hdl, "Failed to alloc mbox write back status\n"); - goto alloc_wb_status_err; - } - - prepare_send_mbox(func_to_func); - - return 0; - -alloc_wb_status_err: - if (SPHW_IS_VF(hwdev)) - sphw_deinit_func_mbox_msg_channel(hwdev); - -init_func_msg_ch_err: - deinit_mgmt_msg_channel(func_to_func); - -init_mgmt_msg_ch_err: - destroy_workqueue(func_to_func->workq); - -create_mbox_workq_err: - kfree(func_to_func); - - return err; -} - -void sphw_func_to_func_free(struct sphw_hwdev *hwdev) -{ - struct sphw_mbox *func_to_func = hwdev->func_to_func; - - /* destroy workqueue before free related mbox resources in case of - * illegal resource access - */ - destroy_workqueue(func_to_func->workq); - - free_mbox_wb_status(func_to_func); - if (SPHW_IS_PPF(hwdev)) - deinit_host_msg_channel(func_to_func); - sphw_deinit_func_mbox_msg_channel(hwdev); - deinit_mgmt_msg_channel(func_to_func); - - kfree(func_to_func); -} diff --git a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_mbox.h b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_mbox.h deleted file mode 100644 index 9aebee1c088a34590f9846b3b617f2f30ee4e262..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_mbox.h +++ /dev/null @@ -1,271 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPHW_MBOX_H -#define SPHW_MBOX_H - -#include "sphw_crm.h" - -#define SPHW_MBOX_PF_SEND_ERR 0x1 - -#define SPHW_MGMT_SRC_ID 0x1FFF -#define SPHW_MAX_FUNCTIONS 4096 - -/* message header define */ -#define SPHW_MSG_HEADER_SRC_GLB_FUNC_IDX_SHIFT 0 -#define SPHW_MSG_HEADER_STATUS_SHIFT 13 -#define SPHW_MSG_HEADER_SOURCE_SHIFT 15 -#define SPHW_MSG_HEADER_AEQ_ID_SHIFT 16 -#define SPHW_MSG_HEADER_MSG_ID_SHIFT 18 -#define SPHW_MSG_HEADER_CMD_SHIFT 22 - -#define SPHW_MSG_HEADER_MSG_LEN_SHIFT 32 -#define SPHW_MSG_HEADER_MODULE_SHIFT 43 -#define SPHW_MSG_HEADER_SEG_LEN_SHIFT 48 -#define SPHW_MSG_HEADER_NO_ACK_SHIFT 54 -#define SPHW_MSG_HEADER_DATA_TYPE_SHIFT 55 -#define SPHW_MSG_HEADER_SEQID_SHIFT 56 -#define SPHW_MSG_HEADER_LAST_SHIFT 62 -#define SPHW_MSG_HEADER_DIRECTION_SHIFT 63 - -#define SPHW_MSG_HEADER_SRC_GLB_FUNC_IDX_MASK 0x1FFF -#define SPHW_MSG_HEADER_STATUS_MASK 0x1 -#define SPHW_MSG_HEADER_SOURCE_MASK 0x1 -#define SPHW_MSG_HEADER_AEQ_ID_MASK 0x3 -#define SPHW_MSG_HEADER_MSG_ID_MASK 0xF -#define SPHW_MSG_HEADER_CMD_MASK 0x3FF - -#define SPHW_MSG_HEADER_MSG_LEN_MASK 0x7FF -#define SPHW_MSG_HEADER_MODULE_MASK 0x1F -#define SPHW_MSG_HEADER_SEG_LEN_MASK 0x3F -#define SPHW_MSG_HEADER_NO_ACK_MASK 0x1 -#define SPHW_MSG_HEADER_DATA_TYPE_MASK 0x1 -#define SPHW_MSG_HEADER_SEQID_MASK 0x3F -#define SPHW_MSG_HEADER_LAST_MASK 0x1 -#define SPHW_MSG_HEADER_DIRECTION_MASK 0x1 - -#define SPHW_MSG_HEADER_GET(val, field) \ - (((val) >> SPHW_MSG_HEADER_##field##_SHIFT) & \ - SPHW_MSG_HEADER_##field##_MASK) -#define SPHW_MSG_HEADER_SET(val, field) \ - ((u64)(((u64)(val)) & SPHW_MSG_HEADER_##field##_MASK) << \ - SPHW_MSG_HEADER_##field##_SHIFT) - -#define IS_DMA_MBX_MSG(dst_func) ((dst_func) == SPHW_MGMT_SRC_ID) - -enum sphw_msg_direction_type { - SPHW_MSG_DIRECT_SEND = 0, - SPHW_MSG_RESPONSE = 1, -}; - -enum sphw_msg_segment_type { - NOT_LAST_SEGMENT = 0, - LAST_SEGMENT = 1, -}; - -enum sphw_msg_ack_type { - SPHW_MSG_ACK, - SPHW_MSG_NO_ACK, -}; - -enum sphw_data_type { - SPHW_DATA_INLINE = 0, - SPHW_DATA_DMA = 1, -}; - -enum sphw_msg_src_type { - SPHW_MSG_FROM_MGMT = 0, - SPHW_MSG_FROM_MBOX = 1, -}; - -enum sphw_msg_aeq_type { - SPHW_ASYNC_MSG_AEQ = 0, - /* indicate dest func or mgmt cpu which aeq to response mbox message */ - SPHW_MBOX_RSP_MSG_AEQ = 1, - /* indicate mgmt cpu which aeq to response api cmd message */ - SPHW_MGMT_RSP_MSG_AEQ = 2, -}; - -#define SPHW_MBOX_WQ_NAME "sphw_mbox" - -enum sphw_mbox_seg_errcode { - MBOX_ERRCODE_NO_ERRORS = 0, - /* VF send the mailbox data to the wrong destination functions */ - MBOX_ERRCODE_VF_TO_WRONG_FUNC = 0x100, - /* PPF send the mailbox data to the wrong destination functions */ - MBOX_ERRCODE_PPF_TO_WRONG_FUNC = 0x200, - /* PF send the mailbox data to the wrong destination functions */ - MBOX_ERRCODE_PF_TO_WRONG_FUNC = 0x300, - /* The mailbox data size is set to all zero */ - MBOX_ERRCODE_ZERO_DATA_SIZE = 0x400, - /* The sender function attribute has not been learned by hardware */ - MBOX_ERRCODE_UNKNOWN_SRC_FUNC = 0x500, - /* The receiver function attr has not been learned by hardware */ - MBOX_ERRCODE_UNKNOWN_DES_FUNC = 0x600, -}; - -struct mbox_msg_info { - u8 msg_id; - u8 status; /* can only use 1 bit */ -}; - -struct sphw_msg_desc { - void *msg; - u16 msg_len; - u8 seq_id; - u8 mod; - u16 cmd; - struct mbox_msg_info msg_info; -}; - -struct sphw_msg_channel { - struct sphw_msg_desc resp_msg; - struct sphw_msg_desc recv_msg; - - atomic_t recv_msg_cnt; -}; - -/* Receive other functions mbox message */ -struct sphw_recv_mbox { - void *msg; - u16 msg_len; - u8 msg_id; - u8 mod; - u16 cmd; - u16 src_func_idx; - enum sphw_msg_ack_type ack_type; - void *resp_buff; -}; - -struct sphw_send_mbox { - u8 *data; - - u64 *wb_status; /* write back status */ - void *wb_vaddr; - dma_addr_t wb_paddr; -}; - -enum mbox_event_state { - EVENT_START = 0, - EVENT_FAIL, - EVENT_SUCCESS, - EVENT_TIMEOUT, - EVENT_END, -}; - -enum sphw_mbox_cb_state { - SPHW_VF_MBOX_CB_REG = 0, - SPHW_VF_MBOX_CB_RUNNING, - SPHW_PF_MBOX_CB_REG, - SPHW_PF_MBOX_CB_RUNNING, - SPHW_PPF_MBOX_CB_REG, - SPHW_PPF_MBOX_CB_RUNNING, - SPHW_PPF_TO_PF_MBOX_CB_REG, - SPHW_PPF_TO_PF_MBOX_CB_RUNNIG, -}; - -struct mbox_dma_msg { - u32 xor; - u32 dma_addr_high; - u32 dma_addr_low; - u32 msg_len; - u64 rsvd; -}; - -struct mbox_dma_queue { - void *dma_buff_vaddr; - dma_addr_t dma_buff_paddr; - - u16 depth; - u16 prod_idx; - u16 cons_idx; -}; - -struct sphw_mbox { - struct sphw_hwdev *hwdev; - - bool lock_channel_en; - unsigned long channel_stop; - u16 cur_msg_channel; - - /* lock for send mbox message and ack message */ - struct mutex mbox_send_lock; - /* lock for send mbox message */ - struct mutex msg_send_lock; - struct sphw_send_mbox send_mbox; - - struct mbox_dma_queue sync_msg_queue; - struct mbox_dma_queue async_msg_queue; - - struct workqueue_struct *workq; - - struct sphw_msg_channel mgmt_msg; /* driver and MGMT CPU */ - struct sphw_msg_channel *host_msg; /* PPF message between hosts */ - struct sphw_msg_channel *func_msg; /* PF to VF or VF to PF */ - u16 num_func_msg; - bool support_h2h_msg; /* host to host */ - - /* vf receive pf/ppf callback */ - sphw_vf_mbox_cb vf_mbox_cb[SPHW_MOD_MAX]; - void *vf_mbox_data[SPHW_MOD_MAX]; - /* pf/ppf receive vf callback */ - sphw_pf_mbox_cb pf_mbox_cb[SPHW_MOD_MAX]; - void *pf_mbox_data[SPHW_MOD_MAX]; - /* ppf receive pf/ppf callback */ - sphw_ppf_mbox_cb ppf_mbox_cb[SPHW_MOD_MAX]; - void *ppf_mbox_data[SPHW_MOD_MAX]; - /* pf receive ppf callback */ - sphw_pf_recv_from_ppf_mbox_cb pf_recv_ppf_mbox_cb[SPHW_MOD_MAX]; - void *pf_recv_ppf_mbox_data[SPHW_MOD_MAX]; - unsigned long ppf_to_pf_mbox_cb_state[SPHW_MOD_MAX]; - unsigned long ppf_mbox_cb_state[SPHW_MOD_MAX]; - unsigned long pf_mbox_cb_state[SPHW_MOD_MAX]; - unsigned long vf_mbox_cb_state[SPHW_MOD_MAX]; - - u8 send_msg_id; - enum mbox_event_state event_flag; - /* lock for mbox event flag */ - spinlock_t mbox_lock; -}; - -struct sphw_mbox_work { - struct work_struct work; - struct sphw_mbox *func_to_func; - struct sphw_recv_mbox *recv_mbox; - struct sphw_msg_channel *msg_ch; -}; - -struct vf_cmd_check_handle { - u16 cmd; - bool (*check_cmd)(struct sphw_hwdev *hwdev, u16 src_func_idx, - void *buf_in, u16 in_size); -}; - -void sphw_mbox_func_aeqe_handler(void *handle, u8 *header, u8 size); - -int sphw_func_to_func_init(struct sphw_hwdev *hwdev); - -void sphw_func_to_func_free(struct sphw_hwdev *hwdev); - -int sphw_send_mbox_to_mgmt(struct sphw_hwdev *hwdev, u8 mod, u16 cmd, - void *buf_in, u16 in_size, void *buf_out, - u16 *out_size, u32 timeout, u16 channel); - -void sphw_response_mbox_to_mgmt(struct sphw_hwdev *hwdev, u8 mod, u16 cmd, - void *buf_in, u16 in_size, u16 msg_id); - -int sphw_send_mbox_to_mgmt_no_ack(struct sphw_hwdev *hwdev, u8 mod, u16 cmd, - void *buf_in, u16 in_size, u16 channel); -int sphw_mbox_to_func(struct sphw_mbox *func_to_func, u8 mod, u16 cmd, - u16 dst_func, void *buf_in, u16 in_size, - void *buf_out, u16 *out_size, u32 timeout, u16 channel); - -int sphw_mbox_ppf_to_host(void *hwdev, u8 mod, u16 cmd, u8 host_id, - void *buf_in, u16 in_size, void *buf_out, - u16 *out_size, u32 timeout, u16 channel); - -int sphw_mbox_init_host_msg_channel(struct sphw_hwdev *hwdev); - -void sphw_mbox_enable_channel_lock(struct sphw_hwdev *hwdev, bool enable); - -#endif diff --git a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_mgmt.c b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_mgmt.c deleted file mode 100644 index a66f40635963060ed742e5ed571ee0a018e4551d..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_mgmt.c +++ /dev/null @@ -1,895 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "sphw_crm.h" -#include "sphw_hw.h" -#include "sphw_common.h" -#include "sphw_comm_cmd.h" -#include "sphw_hwdev.h" -#include "sphw_eqs.h" -#include "sphw_mbox.h" -#include "sphw_api_cmd.h" -#include "sphw_prof_adap.h" -#include "sphw_mgmt.h" -#include "sphw_csr.h" - -#define SPHW_MSG_TO_MGMT_MAX_LEN 2016 - -#define SPHW_API_CHAIN_AEQ_ID 2 -#define MAX_PF_MGMT_BUF_SIZE 2048UL -#define SEGMENT_LEN 48 -#define ASYNC_MSG_FLAG 0x8 -#define MGMT_MSG_MAX_SEQ_ID (ALIGN(SPHW_MSG_TO_MGMT_MAX_LEN, \ - SEGMENT_LEN) / SEGMENT_LEN) - -#define BUF_OUT_DEFAULT_SIZE 1 - -#define MGMT_MSG_SIZE_MIN 20 -#define MGMT_MSG_SIZE_STEP 16 -#define MGMT_MSG_RSVD_FOR_DEV 8 - -#define SYNC_MSG_ID_MASK 0x7 -#define ASYNC_MSG_ID_MASK 0x7 - -#define SYNC_FLAG 0 -#define ASYNC_FLAG 1 - -#define MSG_NO_RESP 0xFFFF - -#define MGMT_MSG_TIMEOUT 20000 /* millisecond */ - -#define SYNC_MSG_ID(pf_to_mgmt) ((pf_to_mgmt)->sync_msg_id) - -#define SYNC_MSG_ID_INC(pf_to_mgmt) (SYNC_MSG_ID(pf_to_mgmt) = \ - (SYNC_MSG_ID(pf_to_mgmt) + 1) & SYNC_MSG_ID_MASK) -#define ASYNC_MSG_ID(pf_to_mgmt) ((pf_to_mgmt)->async_msg_id) - -#define ASYNC_MSG_ID_INC(pf_to_mgmt) (ASYNC_MSG_ID(pf_to_mgmt) = \ - ((ASYNC_MSG_ID(pf_to_mgmt) + 1) & ASYNC_MSG_ID_MASK) | ASYNC_MSG_FLAG) - -static void pf_to_mgmt_send_event_set(struct sphw_msg_pf_to_mgmt *pf_to_mgmt, - int event_flag) -{ - spin_lock(&pf_to_mgmt->sync_event_lock); - pf_to_mgmt->event_flag = event_flag; - spin_unlock(&pf_to_mgmt->sync_event_lock); -} - -/** - * sphw_register_mgmt_msg_cb - register sync msg handler for a module - * @hwdev: the pointer to hw device - * @mod: module in the chip that this handler will handle its sync messages - * @pri_handle: specific mod's private data that will be used in callback - * @callback: the handler for a sync message that will handle messages - **/ -int sphw_register_mgmt_msg_cb(void *hwdev, u8 mod, void *pri_handle, sphw_mgmt_msg_cb callback) -{ - struct sphw_msg_pf_to_mgmt *pf_to_mgmt = NULL; - - if (mod >= SPHW_MOD_HW_MAX || !hwdev) - return -EFAULT; - - pf_to_mgmt = ((struct sphw_hwdev *)hwdev)->pf_to_mgmt; - if (!pf_to_mgmt) - return -EINVAL; - - pf_to_mgmt->recv_mgmt_msg_cb[mod] = callback; - pf_to_mgmt->recv_mgmt_msg_data[mod] = pri_handle; - - set_bit(SPHW_MGMT_MSG_CB_REG, &pf_to_mgmt->mgmt_msg_cb_state[mod]); - - return 0; -} - -/** - * sphw_unregister_mgmt_msg_cb - unregister sync msg handler for a module - * @hwdev: the pointer to hw device - * @mod: module in the chip that this handler will handle its sync messages - **/ -void sphw_unregister_mgmt_msg_cb(void *hwdev, u8 mod) -{ - struct sphw_msg_pf_to_mgmt *pf_to_mgmt = NULL; - - if (!hwdev || mod >= SPHW_MOD_HW_MAX) - return; - - pf_to_mgmt = ((struct sphw_hwdev *)hwdev)->pf_to_mgmt; - if (!pf_to_mgmt) - return; - - clear_bit(SPHW_MGMT_MSG_CB_REG, &pf_to_mgmt->mgmt_msg_cb_state[mod]); - - while (test_bit(SPHW_MGMT_MSG_CB_RUNNING, &pf_to_mgmt->mgmt_msg_cb_state[mod])) - usleep_range(900, 1000); - - pf_to_mgmt->recv_mgmt_msg_cb[mod] = NULL; - pf_to_mgmt->recv_mgmt_msg_data[mod] = NULL; -} - -/** - * mgmt_msg_len - calculate the total message length - * @msg_data_len: the length of the message data - * Return: the total message length - **/ -static u16 mgmt_msg_len(u16 msg_data_len) -{ - /* u64 - the size of the header */ - u16 msg_size; - - msg_size = (u16)(MGMT_MSG_RSVD_FOR_DEV + sizeof(u64) + msg_data_len); - - if (msg_size > MGMT_MSG_SIZE_MIN) - msg_size = MGMT_MSG_SIZE_MIN + - ALIGN((msg_size - MGMT_MSG_SIZE_MIN), - MGMT_MSG_SIZE_STEP); - else - msg_size = MGMT_MSG_SIZE_MIN; - - return msg_size; -} - -/** - * prepare_header - prepare the header of the message - * @pf_to_mgmt: PF to MGMT channel - * @header: pointer of the header to prepare - * @msg_len: the length of the message - * @mod: module in the chip that will get the message - * @direction: the direction of the original message - * @msg_id: message id - **/ -static void prepare_header(struct sphw_msg_pf_to_mgmt *pf_to_mgmt, - u64 *header, u16 msg_len, u8 mod, - enum sphw_msg_ack_type ack_type, - enum sphw_msg_direction_type direction, - enum sphw_mgmt_cmd cmd, u32 msg_id) -{ - struct sphw_hwif *hwif = pf_to_mgmt->hwdev->hwif; - - *header = SPHW_MSG_HEADER_SET(msg_len, MSG_LEN) | - SPHW_MSG_HEADER_SET(mod, MODULE) | - SPHW_MSG_HEADER_SET(msg_len, SEG_LEN) | - SPHW_MSG_HEADER_SET(ack_type, NO_ACK) | - SPHW_MSG_HEADER_SET(SPHW_DATA_INLINE, DATA_TYPE) | - SPHW_MSG_HEADER_SET(0, SEQID) | - SPHW_MSG_HEADER_SET(SPHW_API_CHAIN_AEQ_ID, AEQ_ID) | - SPHW_MSG_HEADER_SET(LAST_SEGMENT, LAST) | - SPHW_MSG_HEADER_SET(direction, DIRECTION) | - SPHW_MSG_HEADER_SET(cmd, CMD) | - SPHW_MSG_HEADER_SET(SPHW_MSG_FROM_MGMT, SOURCE) | - SPHW_MSG_HEADER_SET(hwif->attr.func_global_idx, SRC_GLB_FUNC_IDX) | - SPHW_MSG_HEADER_SET(msg_id, MSG_ID); -} - -/** - * prepare_mgmt_cmd - prepare the mgmt command - * @mgmt_cmd: pointer to the command to prepare - * @header: pointer of the header to prepare - * @msg: the data of the message - * @msg_len: the length of the message - **/ -static void prepare_mgmt_cmd(u8 *mgmt_cmd, u64 *header, const void *msg, - int msg_len) -{ - memset(mgmt_cmd, 0, MGMT_MSG_RSVD_FOR_DEV); - - mgmt_cmd += MGMT_MSG_RSVD_FOR_DEV; - memcpy(mgmt_cmd, header, sizeof(*header)); - - mgmt_cmd += sizeof(*header); - memcpy(mgmt_cmd, msg, msg_len); -} - -/** - * send_msg_to_mgmt_sync - send async message - * @pf_to_mgmt: PF to MGMT channel - * @mod: module in the chip that will get the message - * @cmd: command of the message - * @msg: the msg data - * @msg_len: the msg data length - * @direction: the direction of the original message - * @resp_msg_id: msg id to response for - * Return: 0 - success, negative - failure - **/ -static int send_msg_to_mgmt_sync(struct sphw_msg_pf_to_mgmt *pf_to_mgmt, - u8 mod, u16 cmd, const void *msg, u16 msg_len, - enum sphw_msg_ack_type ack_type, - enum sphw_msg_direction_type direction, - u16 resp_msg_id) -{ - void *mgmt_cmd = pf_to_mgmt->sync_msg_buf; - struct sphw_api_cmd_chain *chain = NULL; - u8 node_id = SPHW_MGMT_CPU_NODE_ID(pf_to_mgmt->hwdev); - u64 header; - u16 cmd_size = mgmt_msg_len(msg_len); - - if (!sphw_get_chip_present_flag(pf_to_mgmt->hwdev)) - return -EFAULT; - - if (direction == SPHW_MSG_RESPONSE) - prepare_header(pf_to_mgmt, &header, msg_len, mod, ack_type, - direction, cmd, resp_msg_id); - else - prepare_header(pf_to_mgmt, &header, msg_len, mod, ack_type, - direction, cmd, SYNC_MSG_ID_INC(pf_to_mgmt)); - chain = pf_to_mgmt->cmd_chain[SPHW_API_CMD_WRITE_TO_MGMT_CPU]; - - if (ack_type == SPHW_MSG_ACK) - pf_to_mgmt_send_event_set(pf_to_mgmt, SEND_EVENT_START); - - prepare_mgmt_cmd((u8 *)mgmt_cmd, &header, msg, msg_len); - - return sphw_api_cmd_write(chain, node_id, mgmt_cmd, cmd_size); -} - -/** - * send_msg_to_mgmt_async - send async message - * @pf_to_mgmt: PF to MGMT channel - * @mod: module in the chip that will get the message - * @cmd: command of the message - * @msg: the data of the message - * @msg_len: the length of the message - * @direction: the direction of the original message - * Return: 0 - success, negative - failure - **/ -static int send_msg_to_mgmt_async(struct sphw_msg_pf_to_mgmt *pf_to_mgmt, - u8 mod, u16 cmd, const void *msg, u16 msg_len, - enum sphw_msg_direction_type direction) -{ - void *mgmt_cmd = pf_to_mgmt->async_msg_buf; - struct sphw_api_cmd_chain *chain = NULL; - u8 node_id = SPHW_MGMT_CPU_NODE_ID(pf_to_mgmt->hwdev); - u64 header; - u16 cmd_size = mgmt_msg_len(msg_len); - - if (!sphw_get_chip_present_flag(pf_to_mgmt->hwdev)) - return -EFAULT; - - prepare_header(pf_to_mgmt, &header, msg_len, mod, SPHW_MSG_NO_ACK, - direction, cmd, ASYNC_MSG_ID(pf_to_mgmt)); - - prepare_mgmt_cmd((u8 *)mgmt_cmd, &header, msg, msg_len); - - chain = pf_to_mgmt->cmd_chain[SPHW_API_CMD_WRITE_ASYNC_TO_MGMT_CPU]; - - return sphw_api_cmd_write(chain, node_id, mgmt_cmd, cmd_size); -} - -static inline void msg_to_mgmt_pre(u8 mod, void *buf_in) -{ - struct sphw_msg_head *msg_head = NULL; - - /* set aeq fix num to 3, need to ensure response aeq id < 3*/ - if (mod == SPHW_MOD_COMM || mod == SPHW_MOD_L2NIC) { - msg_head = buf_in; - - if (msg_head->resp_aeq_num >= SPHW_MAX_AEQS) - msg_head->resp_aeq_num = 0; - } -} - -int sphw_pf_to_mgmt_sync(void *hwdev, u8 mod, u16 cmd, void *buf_in, - u16 in_size, void *buf_out, u16 *out_size, u32 timeout) -{ - struct sphw_msg_pf_to_mgmt *pf_to_mgmt = NULL; - void *dev = ((struct sphw_hwdev *)hwdev)->dev_hdl; - struct sphw_recv_msg *recv_msg = NULL; - struct completion *recv_done = NULL; - ulong timeo; - int err; - ulong ret; - - msg_to_mgmt_pre(mod, buf_in); - - pf_to_mgmt = ((struct sphw_hwdev *)hwdev)->pf_to_mgmt; - - /* Lock the sync_msg_buf */ - down(&pf_to_mgmt->sync_msg_lock); - recv_msg = &pf_to_mgmt->recv_resp_msg_from_mgmt; - recv_done = &recv_msg->recv_done; - - init_completion(recv_done); - - err = send_msg_to_mgmt_sync(pf_to_mgmt, mod, cmd, buf_in, in_size, - SPHW_MSG_ACK, SPHW_MSG_DIRECT_SEND, - MSG_NO_RESP); - if (err) { - sdk_err(dev, "Failed to send sync msg to mgmt, sync_msg_id: %u\n", - pf_to_mgmt->sync_msg_id); - pf_to_mgmt_send_event_set(pf_to_mgmt, SEND_EVENT_FAIL); - goto unlock_sync_msg; - } - - timeo = msecs_to_jiffies(timeout ? timeout : MGMT_MSG_TIMEOUT); - - ret = wait_for_completion_timeout(recv_done, timeo); - if (!ret) { - sdk_err(dev, "Mgmt response sync cmd timeout, sync_msg_id: %u\n", - pf_to_mgmt->sync_msg_id); - sphw_dump_aeq_info((struct sphw_hwdev *)hwdev); - err = -ETIMEDOUT; - pf_to_mgmt_send_event_set(pf_to_mgmt, SEND_EVENT_TIMEOUT); - goto unlock_sync_msg; - } - pf_to_mgmt_send_event_set(pf_to_mgmt, SEND_EVENT_END); - - if (!(((struct sphw_hwdev *)hwdev)->chip_present_flag)) { - up(&pf_to_mgmt->sync_msg_lock); - return -ETIMEDOUT; - } - - if (buf_out && out_size) { - if (*out_size < recv_msg->msg_len) { - sdk_err(dev, "Invalid response message length: %u for mod %d cmd %u from mgmt, should less than: %u\n", - recv_msg->msg_len, mod, cmd, *out_size); - err = -EFAULT; - goto unlock_sync_msg; - } - - if (recv_msg->msg_len) - memcpy(buf_out, recv_msg->msg, recv_msg->msg_len); - - *out_size = recv_msg->msg_len; - } - -unlock_sync_msg: - up(&pf_to_mgmt->sync_msg_lock); - - return err; -} - -int sphw_pf_to_mgmt_async(void *hwdev, u8 mod, u16 cmd, const void *buf_in, u16 in_size) -{ - struct sphw_msg_pf_to_mgmt *pf_to_mgmt; - void *dev = ((struct sphw_hwdev *)hwdev)->dev_hdl; - int err; - - pf_to_mgmt = ((struct sphw_hwdev *)hwdev)->pf_to_mgmt; - - /* Lock the async_msg_buf */ - spin_lock_bh(&pf_to_mgmt->async_msg_lock); - ASYNC_MSG_ID_INC(pf_to_mgmt); - - err = send_msg_to_mgmt_async(pf_to_mgmt, mod, cmd, buf_in, in_size, - SPHW_MSG_DIRECT_SEND); - spin_unlock_bh(&pf_to_mgmt->async_msg_lock); - - if (err) { - sdk_err(dev, "Failed to send async mgmt msg\n"); - return err; - } - - return 0; -} - -int sphw_pf_msg_to_mgmt_sync(void *hwdev, u8 mod, u16 cmd, void *buf_in, - u16 in_size, void *buf_out, u16 *out_size, u32 timeout) -{ - if (!hwdev) - return -EINVAL; - - if (!sphw_get_chip_present_flag(hwdev)) - return -EPERM; - - if (in_size > SPHW_MSG_TO_MGMT_MAX_LEN) - return -EINVAL; - - return sphw_pf_to_mgmt_sync(hwdev, mod, cmd, buf_in, in_size, buf_out, out_size, timeout); -} - -int sphw_msg_to_mgmt_sync(void *hwdev, u8 mod, u16 cmd, void *buf_in, u16 in_size, void *buf_out, - u16 *out_size, u32 timeout, u16 channel) -{ - if (!hwdev) - return -EINVAL; - - if (!sphw_get_chip_present_flag(hwdev)) - return -EPERM; - - return sphw_send_mbox_to_mgmt(hwdev, mod, cmd, buf_in, in_size, - buf_out, out_size, timeout, channel); -} - -int sphw_msg_to_mgmt_no_ack(void *hwdev, u8 mod, u16 cmd, void *buf_in, u16 in_size, u16 channel) -{ - if (!hwdev) - return -EINVAL; - - if (!sphw_get_chip_present_flag(hwdev)) - return -EPERM; - - return sphw_send_mbox_to_mgmt_no_ack(hwdev, mod, cmd, buf_in, in_size, channel); -} - -int sphw_msg_to_mgmt_async(void *hwdev, u8 mod, u16 cmd, void *buf_in, u16 in_size, u16 channel) -{ - return sphw_msg_to_mgmt_api_chain_async(hwdev, mod, cmd, buf_in, in_size); -} - -int sphw_msg_to_mgmt_api_chain_sync(void *hwdev, u8 mod, u16 cmd, void *buf_in, u16 in_size, - void *buf_out, u16 *out_size, u32 timeout) -{ - if (!hwdev) - return -EINVAL; - - if (!sphw_get_chip_present_flag(hwdev)) - return -EPERM; - - return sphw_pf_msg_to_mgmt_sync(hwdev, mod, cmd, buf_in, in_size, - buf_out, out_size, timeout); -} - -int sphw_msg_to_mgmt_api_chain_async(void *hwdev, u8 mod, u16 cmd, const void *buf_in, u16 in_size) -{ - int err; - - if (!hwdev) - return -EINVAL; - - if (sphw_func_type(hwdev) == TYPE_VF) { - err = -EFAULT; - sdk_err(((struct sphw_hwdev *)hwdev)->dev_hdl, - "VF don't support async cmd\n"); - } else { - err = sphw_pf_to_mgmt_async(hwdev, mod, cmd, buf_in, in_size); - } - - return err; -} - -static void send_mgmt_ack(struct sphw_msg_pf_to_mgmt *pf_to_mgmt, - u8 mod, u16 cmd, void *buf_in, u16 in_size, - u16 msg_id) -{ - u16 buf_size; - - if (!in_size) - buf_size = BUF_OUT_DEFAULT_SIZE; - else - buf_size = in_size; - - sphw_response_mbox_to_mgmt(pf_to_mgmt->hwdev, mod, cmd, buf_in, buf_size, msg_id); -} - -static void mgmt_recv_msg_handler(struct sphw_msg_pf_to_mgmt *pf_to_mgmt, - u8 mod, u16 cmd, void *buf_in, u16 in_size, - u16 msg_id, int need_resp) -{ - void *dev = pf_to_mgmt->hwdev->dev_hdl; - void *buf_out = pf_to_mgmt->mgmt_ack_buf; - enum sphw_mod_type tmp_mod = mod; - bool ack_first = false; - u16 out_size = 0; - - memset(buf_out, 0, MAX_PF_MGMT_BUF_SIZE); - - if (mod >= SPHW_MOD_HW_MAX) { - sdk_warn(dev, "Receive illegal message from mgmt cpu, mod = %d\n", - mod); - goto resp; - } - - set_bit(SPHW_MGMT_MSG_CB_RUNNING, &pf_to_mgmt->mgmt_msg_cb_state[tmp_mod]); - - if (!pf_to_mgmt->recv_mgmt_msg_cb[mod] || - !test_bit(SPHW_MGMT_MSG_CB_REG, - &pf_to_mgmt->mgmt_msg_cb_state[tmp_mod])) { - sdk_warn(dev, "Receive mgmt callback is null, mod = %d\n", - mod); - clear_bit(SPHW_MGMT_MSG_CB_RUNNING, &pf_to_mgmt->mgmt_msg_cb_state[tmp_mod]); - goto resp; - } - - pf_to_mgmt->recv_mgmt_msg_cb[tmp_mod](pf_to_mgmt->hwdev, - pf_to_mgmt->recv_mgmt_msg_data[tmp_mod], - cmd, buf_in, in_size, buf_out, &out_size); - - clear_bit(SPHW_MGMT_MSG_CB_RUNNING, &pf_to_mgmt->mgmt_msg_cb_state[tmp_mod]); - -resp: - if (!ack_first && need_resp) - send_mgmt_ack(pf_to_mgmt, mod, cmd, buf_out, out_size, msg_id); -} - -/** - * mgmt_resp_msg_handler - handler for response message from mgmt cpu - * @pf_to_mgmt: PF to MGMT channel - * @recv_msg: received message details - **/ -static void mgmt_resp_msg_handler(struct sphw_msg_pf_to_mgmt *pf_to_mgmt, - struct sphw_recv_msg *recv_msg) -{ - void *dev = pf_to_mgmt->hwdev->dev_hdl; - - /* delete async msg */ - if (recv_msg->msg_id & ASYNC_MSG_FLAG) - return; - - spin_lock(&pf_to_mgmt->sync_event_lock); - if (recv_msg->msg_id == pf_to_mgmt->sync_msg_id && - pf_to_mgmt->event_flag == SEND_EVENT_START) { - pf_to_mgmt->event_flag = SEND_EVENT_SUCCESS; - complete(&recv_msg->recv_done); - } else if (recv_msg->msg_id != pf_to_mgmt->sync_msg_id) { - sdk_err(dev, "Send msg id(0x%x) recv msg id(0x%x) dismatch, event state=%d\n", - pf_to_mgmt->sync_msg_id, recv_msg->msg_id, - pf_to_mgmt->event_flag); - } else { - sdk_err(dev, "Wait timeout, send msg id(0x%x) recv msg id(0x%x), event state=%d!\n", - pf_to_mgmt->sync_msg_id, recv_msg->msg_id, - pf_to_mgmt->event_flag); - } - spin_unlock(&pf_to_mgmt->sync_event_lock); -} - -static void recv_mgmt_msg_work_handler(struct work_struct *work) -{ - struct sphw_mgmt_msg_handle_work *mgmt_work = - container_of(work, struct sphw_mgmt_msg_handle_work, work); - - mgmt_recv_msg_handler(mgmt_work->pf_to_mgmt, mgmt_work->mod, - mgmt_work->cmd, mgmt_work->msg, - mgmt_work->msg_len, mgmt_work->msg_id, - !mgmt_work->async_mgmt_to_pf); - - kfree(mgmt_work->msg); - kfree(mgmt_work); -} - -static bool check_mgmt_seq_id_and_seg_len(struct sphw_recv_msg *recv_msg, - u8 seq_id, u8 seg_len) -{ - if (seq_id > MGMT_MSG_MAX_SEQ_ID || seg_len > SEGMENT_LEN) - return false; - - if (seq_id == 0) { - recv_msg->seq_id = seq_id; - } else { - if (seq_id != recv_msg->seq_id + 1) - return false; - - recv_msg->seq_id = seq_id; - } - - return true; -} - -/** - * recv_mgmt_msg_handler - handler a message from mgmt cpu - * @pf_to_mgmt: PF to MGMT channel - * @header: the header of the message - * @recv_msg: received message details - **/ -static void recv_mgmt_msg_handler(struct sphw_msg_pf_to_mgmt *pf_to_mgmt, - u8 *header, struct sphw_recv_msg *recv_msg) -{ - struct sphw_hwdev *hwdev = pf_to_mgmt->hwdev; - struct sphw_mgmt_msg_handle_work *mgmt_work = NULL; - u64 mbox_header = *((u64 *)header); - void *msg_body = header + sizeof(mbox_header); - u8 seq_id, seq_len; - u32 offset; - u64 dir; - - /* Don't need to get anything from hw when cmd is async */ - dir = SPHW_MSG_HEADER_GET(mbox_header, DIRECTION); - if (dir == SPHW_MSG_RESPONSE && SPHW_MSG_HEADER_GET(mbox_header, MSG_ID) & ASYNC_MSG_FLAG) - return; - - seq_len = SPHW_MSG_HEADER_GET(mbox_header, SEG_LEN); - seq_id = SPHW_MSG_HEADER_GET(mbox_header, SEQID); - - if (!check_mgmt_seq_id_and_seg_len(recv_msg, seq_id, seq_len)) { - sdk_err(hwdev->dev_hdl, - "Mgmt msg sequence id and segment length check fail, front seq_id: 0x%x,current seq_id: 0x%x, seg len: 0x%x\n", - recv_msg->seq_id, seq_id, seq_len); - /* set seq_id to invalid seq_id */ - recv_msg->seq_id = MGMT_MSG_MAX_SEQ_ID; - return; - } - - offset = seq_id * SEGMENT_LEN; - memcpy((u8 *)recv_msg->msg + offset, msg_body, seq_len); - - if (!SPHW_MSG_HEADER_GET(mbox_header, LAST)) - return; - - recv_msg->cmd = SPHW_MSG_HEADER_GET(mbox_header, CMD); - recv_msg->mod = SPHW_MSG_HEADER_GET(mbox_header, MODULE); - recv_msg->async_mgmt_to_pf = SPHW_MSG_HEADER_GET(mbox_header, NO_ACK); - recv_msg->msg_len = SPHW_MSG_HEADER_GET(mbox_header, MSG_LEN); - recv_msg->msg_id = SPHW_MSG_HEADER_GET(mbox_header, MSG_ID); - recv_msg->seq_id = MGMT_MSG_MAX_SEQ_ID; - - if (SPHW_MSG_HEADER_GET(mbox_header, DIRECTION) == SPHW_MSG_RESPONSE) { - mgmt_resp_msg_handler(pf_to_mgmt, recv_msg); - return; - } - - mgmt_work = kzalloc(sizeof(*mgmt_work), GFP_KERNEL); - if (!mgmt_work) { - sdk_err(hwdev->dev_hdl, "Allocate mgmt work memory failed\n"); - return; - } - - if (recv_msg->msg_len) { - mgmt_work->msg = kzalloc(recv_msg->msg_len, GFP_KERNEL); - if (!mgmt_work->msg) { - sdk_err(hwdev->dev_hdl, "Allocate mgmt msg memory failed\n"); - kfree(mgmt_work); - return; - } - } - - mgmt_work->pf_to_mgmt = pf_to_mgmt; - mgmt_work->msg_len = recv_msg->msg_len; - memcpy(mgmt_work->msg, recv_msg->msg, recv_msg->msg_len); - mgmt_work->msg_id = recv_msg->msg_id; - mgmt_work->mod = recv_msg->mod; - mgmt_work->cmd = recv_msg->cmd; - mgmt_work->async_mgmt_to_pf = recv_msg->async_mgmt_to_pf; - - INIT_WORK(&mgmt_work->work, recv_mgmt_msg_work_handler); - queue_work_on(sphw_get_work_cpu_affinity(hwdev, WORK_TYPE_MGMT_MSG), - pf_to_mgmt->workq, &mgmt_work->work); -} - -/** - * sphw_mgmt_msg_aeqe_handler - handler for a mgmt message event - * @handle: PF to MGMT channel - * @header: the header of the message - * @size: unused - **/ -void sphw_mgmt_msg_aeqe_handler(void *hwdev, u8 *header, u8 size) -{ - struct sphw_hwdev *dev = (struct sphw_hwdev *)hwdev; - struct sphw_msg_pf_to_mgmt *pf_to_mgmt = NULL; - struct sphw_recv_msg *recv_msg = NULL; - bool is_send_dir = false; - - if ((SPHW_MSG_HEADER_GET(*(u64 *)header, SOURCE) == - SPHW_MSG_FROM_MBOX)) { - sphw_mbox_func_aeqe_handler(hwdev, header, size); - return; - } - - pf_to_mgmt = dev->pf_to_mgmt; - if (!pf_to_mgmt) - return; - - is_send_dir = (SPHW_MSG_HEADER_GET(*(u64 *)header, DIRECTION) == - SPHW_MSG_DIRECT_SEND) ? true : false; - - recv_msg = is_send_dir ? &pf_to_mgmt->recv_msg_from_mgmt : - &pf_to_mgmt->recv_resp_msg_from_mgmt; - - recv_mgmt_msg_handler(pf_to_mgmt, header, recv_msg); -} - -/** - * alloc_recv_msg - allocate received message memory - * @recv_msg: pointer that will hold the allocated data - * Return: 0 - success, negative - failure - **/ -static int alloc_recv_msg(struct sphw_recv_msg *recv_msg) -{ - recv_msg->seq_id = MGMT_MSG_MAX_SEQ_ID; - - recv_msg->msg = kzalloc(MAX_PF_MGMT_BUF_SIZE, GFP_KERNEL); - if (!recv_msg->msg) - return -ENOMEM; - - return 0; -} - -/** - * free_recv_msg - free received message memory - * @recv_msg: pointer that holds the allocated data - **/ -static void free_recv_msg(struct sphw_recv_msg *recv_msg) -{ - kfree(recv_msg->msg); -} - -/** - * alloc_msg_buf - allocate all the message buffers of PF to MGMT channel - * @pf_to_mgmt: PF to MGMT channel - * Return: 0 - success, negative - failure - **/ -static int alloc_msg_buf(struct sphw_msg_pf_to_mgmt *pf_to_mgmt) -{ - int err; - void *dev = pf_to_mgmt->hwdev->dev_hdl; - - err = alloc_recv_msg(&pf_to_mgmt->recv_msg_from_mgmt); - if (err) { - sdk_err(dev, "Failed to allocate recv msg\n"); - return err; - } - - err = alloc_recv_msg(&pf_to_mgmt->recv_resp_msg_from_mgmt); - if (err) { - sdk_err(dev, "Failed to allocate resp recv msg\n"); - goto alloc_msg_for_resp_err; - } - - pf_to_mgmt->async_msg_buf = kzalloc(MAX_PF_MGMT_BUF_SIZE, GFP_KERNEL); - if (!pf_to_mgmt->async_msg_buf) { - err = -ENOMEM; - goto async_msg_buf_err; - } - - pf_to_mgmt->sync_msg_buf = kzalloc(MAX_PF_MGMT_BUF_SIZE, GFP_KERNEL); - if (!pf_to_mgmt->sync_msg_buf) { - err = -ENOMEM; - goto sync_msg_buf_err; - } - - pf_to_mgmt->mgmt_ack_buf = kzalloc(MAX_PF_MGMT_BUF_SIZE, GFP_KERNEL); - if (!pf_to_mgmt->mgmt_ack_buf) { - err = -ENOMEM; - goto ack_msg_buf_err; - } - - return 0; - -ack_msg_buf_err: - kfree(pf_to_mgmt->sync_msg_buf); - -sync_msg_buf_err: - kfree(pf_to_mgmt->async_msg_buf); - -async_msg_buf_err: - free_recv_msg(&pf_to_mgmt->recv_resp_msg_from_mgmt); - -alloc_msg_for_resp_err: - free_recv_msg(&pf_to_mgmt->recv_msg_from_mgmt); - return err; -} - -/** - * free_msg_buf - free all the message buffers of PF to MGMT channel - * @pf_to_mgmt: PF to MGMT channel - * Return: 0 - success, negative - failure - **/ -static void free_msg_buf(struct sphw_msg_pf_to_mgmt *pf_to_mgmt) -{ - kfree(pf_to_mgmt->mgmt_ack_buf); - kfree(pf_to_mgmt->sync_msg_buf); - kfree(pf_to_mgmt->async_msg_buf); - - free_recv_msg(&pf_to_mgmt->recv_resp_msg_from_mgmt); - free_recv_msg(&pf_to_mgmt->recv_msg_from_mgmt); -} - -/** - * sphw_pf_to_mgmt_init - initialize PF to MGMT channel - * @hwdev: the pointer to hw device - * Return: 0 - success, negative - failure - **/ -int sphw_pf_to_mgmt_init(struct sphw_hwdev *hwdev) -{ - struct sphw_msg_pf_to_mgmt *pf_to_mgmt; - void *dev = hwdev->dev_hdl; - int err; - - pf_to_mgmt = kzalloc(sizeof(*pf_to_mgmt), GFP_KERNEL); - if (!pf_to_mgmt) - return -ENOMEM; - - hwdev->pf_to_mgmt = pf_to_mgmt; - pf_to_mgmt->hwdev = hwdev; - spin_lock_init(&pf_to_mgmt->async_msg_lock); - spin_lock_init(&pf_to_mgmt->sync_event_lock); - sema_init(&pf_to_mgmt->sync_msg_lock, 1); - pf_to_mgmt->workq = create_singlethread_workqueue(SPHW_MGMT_WQ_NAME); - if (!pf_to_mgmt->workq) { - sdk_err(dev, "Failed to initialize MGMT workqueue\n"); - err = -ENOMEM; - goto create_mgmt_workq_err; - } - - err = alloc_msg_buf(pf_to_mgmt); - if (err) { - sdk_err(dev, "Failed to allocate msg buffers\n"); - goto alloc_msg_buf_err; - } - - err = sphw_api_cmd_init(hwdev, pf_to_mgmt->cmd_chain); - if (err) { - sdk_err(dev, "Failed to init the api cmd chains\n"); - goto api_cmd_init_err; - } - - return 0; - -api_cmd_init_err: - free_msg_buf(pf_to_mgmt); - -alloc_msg_buf_err: - destroy_workqueue(pf_to_mgmt->workq); - -create_mgmt_workq_err: - kfree(pf_to_mgmt); - - return err; -} - -/** - * sphw_pf_to_mgmt_free - free PF to MGMT channel - * @hwdev: the pointer to hw device - **/ -void sphw_pf_to_mgmt_free(struct sphw_hwdev *hwdev) -{ - struct sphw_msg_pf_to_mgmt *pf_to_mgmt = hwdev->pf_to_mgmt; - - /* destroy workqueue before free related pf_to_mgmt resources in case of - * illegal resource access - */ - destroy_workqueue(pf_to_mgmt->workq); - sphw_api_cmd_free(pf_to_mgmt->cmd_chain); - - free_msg_buf(pf_to_mgmt); - kfree(pf_to_mgmt); -} - -void sphw_flush_mgmt_workq(void *hwdev) -{ - struct sphw_hwdev *dev = (struct sphw_hwdev *)hwdev; - - flush_workqueue(dev->aeqs->workq); - - if (sphw_func_type(dev) != TYPE_VF) - flush_workqueue(dev->pf_to_mgmt->workq); -} - -int sphw_api_cmd_read_ack(void *hwdev, u8 dest, const void *cmd, u16 size, void *ack, u16 ack_size) -{ - struct sphw_msg_pf_to_mgmt *pf_to_mgmt = NULL; - struct sphw_api_cmd_chain *chain = NULL; - - if (!hwdev || !cmd || (ack_size && !ack)) - return -EINVAL; - - pf_to_mgmt = ((struct sphw_hwdev *)hwdev)->pf_to_mgmt; - chain = pf_to_mgmt->cmd_chain[SPHW_API_CMD_POLL_READ]; - - if (!(((struct sphw_hwdev *)hwdev)->chip_present_flag)) - return -EPERM; - - return sphw_api_cmd_read(chain, dest, cmd, size, ack, ack_size); -} - -/** - * api cmd write or read bypass default use poll, if want to use aeq interrupt, - * please set wb_trigger_aeqe to 1 - **/ -int sphw_api_cmd_write_nack(void *hwdev, u8 dest, const void *cmd, u16 size) -{ - struct sphw_msg_pf_to_mgmt *pf_to_mgmt = NULL; - struct sphw_api_cmd_chain *chain = NULL; - - if (!hwdev || !size || !cmd) - return -EINVAL; - - pf_to_mgmt = ((struct sphw_hwdev *)hwdev)->pf_to_mgmt; - chain = pf_to_mgmt->cmd_chain[SPHW_API_CMD_POLL_WRITE]; - - if (!(((struct sphw_hwdev *)hwdev)->chip_present_flag)) - return -EPERM; - - return sphw_api_cmd_write(chain, dest, cmd, size); -} diff --git a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_mgmt.h b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_mgmt.h deleted file mode 100644 index 802336bd5cb1856988d4a99c31467f952389f403..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_mgmt.h +++ /dev/null @@ -1,106 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPHW_MGMT_H -#define SPHW_MGMT_H - -#define SPHW_MGMT_WQ_NAME "sphw_mgmt" - -struct sphw_recv_msg { - void *msg; - - u16 msg_len; - enum sphw_mod_type mod; - u16 cmd; - u8 seq_id; - u16 msg_id; - int async_mgmt_to_pf; - - struct completion recv_done; -}; - -struct sphw_msg_head { - u8 status; - u8 version; - u8 resp_aeq_num; - u8 rsvd0[5]; -}; - -enum comm_pf_to_mgmt_event_state { - SEND_EVENT_UNINIT = 0, - SEND_EVENT_START, - SEND_EVENT_SUCCESS, - SEND_EVENT_FAIL, - SEND_EVENT_TIMEOUT, - SEND_EVENT_END, -}; - -enum sphw_mgmt_msg_cb_state { - SPHW_MGMT_MSG_CB_REG = 0, - SPHW_MGMT_MSG_CB_RUNNING, -}; - -struct sphw_msg_pf_to_mgmt { - struct sphw_hwdev *hwdev; - - /* Async cmd can not be scheduling */ - spinlock_t async_msg_lock; - struct semaphore sync_msg_lock; - - struct workqueue_struct *workq; - - void *async_msg_buf; - void *sync_msg_buf; - void *mgmt_ack_buf; - - struct sphw_recv_msg recv_msg_from_mgmt; - struct sphw_recv_msg recv_resp_msg_from_mgmt; - - u16 async_msg_id; - u16 sync_msg_id; - struct sphw_api_cmd_chain *cmd_chain[SPHW_API_CMD_MAX]; - - sphw_mgmt_msg_cb recv_mgmt_msg_cb[SPHW_MOD_HW_MAX]; - void *recv_mgmt_msg_data[SPHW_MOD_HW_MAX]; - unsigned long mgmt_msg_cb_state[SPHW_MOD_HW_MAX]; - - void *async_msg_cb_data[SPHW_MOD_HW_MAX]; - - /* lock when sending msg */ - spinlock_t sync_event_lock; - enum comm_pf_to_mgmt_event_state event_flag; -}; - -struct sphw_mgmt_msg_handle_work { - struct work_struct work; - struct sphw_msg_pf_to_mgmt *pf_to_mgmt; - - void *msg; - u16 msg_len; - - enum sphw_mod_type mod; - u16 cmd; - u16 msg_id; - - int async_mgmt_to_pf; -}; - -void sphw_mgmt_msg_aeqe_handler(void *handle, u8 *header, u8 size); - -int sphw_pf_to_mgmt_init(struct sphw_hwdev *hwdev); - -void sphw_pf_to_mgmt_free(struct sphw_hwdev *hwdev); - -int sphw_pf_to_mgmt_sync(void *hwdev, u8 mod, u16 cmd, void *buf_in, - u16 in_size, void *buf_out, u16 *out_size, u32 timeout); -int sphw_pf_to_mgmt_async(void *hwdev, u8 mod, u16 cmd, const void *buf_in, u16 in_size); - -int sphw_pf_msg_to_mgmt_sync(void *hwdev, u8 mod, u16 cmd, void *buf_in, - u16 in_size, void *buf_out, u16 *out_size, u32 timeout); - -int sphw_api_cmd_read_ack(void *hwdev, u8 dest, const void *cmd, u16 size, - void *ack, u16 ack_size); - -int sphw_api_cmd_write_nack(void *hwdev, u8 dest, const void *cmd, u16 size); - -#endif diff --git a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_mgmt_msg_base.h b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_mgmt_msg_base.h deleted file mode 100644 index 13f726895f587881320ec71168918374c6d3577a..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_mgmt_msg_base.h +++ /dev/null @@ -1,19 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPHW_MGMT_MSG_BASE_H -#define SPHW_MGMT_MSG_BASE_H - -#define MGMT_MSG_CMD_OP_SET 1 -#define MGMT_MSG_CMD_OP_GET 0 - -#define MGMT_MSG_CMD_OP_START 1 -#define MGMT_MSG_CMD_OP_STOP 0 - -struct mgmt_msg_head { - u8 status; - u8 version; - u8 rsvd0[6]; -}; - -#endif diff --git a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_mt.h b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_mt.h deleted file mode 100644 index d7fb58054202b0484def08992f8af61e73ad9b61..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_mt.h +++ /dev/null @@ -1,533 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPHW_MT_H -#define SPHW_MT_H - -#define NICTOOL_CMD_TYPE 0x18 - -struct api_cmd_rd { - u32 pf_id; - u8 dest; - u8 *cmd; - u16 size; - void *ack; - u16 ack_size; -}; - -struct api_cmd_wr { - u32 pf_id; - u8 dest; - u8 *cmd; - u16 size; -}; - -struct pf_dev_info { - u64 bar0_size; - u8 bus; - u8 slot; - u8 func; - u64 phy_addr; -}; - -/* Indicates the maximum number of interrupts that can be recorded. - * Subsequent interrupts are not recorded in FFM. - */ -#define FFM_RECORD_NUM_MAX 64 - -struct ffm_intr_info { - u8 node_id; - /* error level of the interrupt source */ - u8 err_level; - /* Classification by interrupt source properties */ - u16 err_type; - u32 err_csr_addr; - u32 err_csr_value; -}; - -struct ffm_intr_tm_info { - struct ffm_intr_info intr_info; - u8 times; - u8 sec; - u8 min; - u8 hour; - u8 mday; - u8 mon; - u16 year; -}; - -struct ffm_record_info { - u32 ffm_num; - u32 last_err_csr_addr; - u32 last_err_csr_value; - struct ffm_intr_tm_info ffm[FFM_RECORD_NUM_MAX]; -}; - -struct dbgtool_k_glb_info { - struct semaphore dbgtool_sem; - struct ffm_record_info *ffm; -}; - -struct msg_2_up { - u8 pf_id; - u8 mod; - u8 cmd; - void *buf_in; - u16 in_size; - void *buf_out; - u16 *out_size; -}; - -struct dbgtool_param { - union { - struct api_cmd_rd api_rd; - struct api_cmd_wr api_wr; - struct pf_dev_info *dev_info; - struct ffm_record_info *ffm_rd; - struct msg_2_up msg2up; - } param; - char chip_name[16]; -}; - -/* dbgtool command type */ -/* You can add commands as required. The dbgtool command can be - * used to invoke all interfaces of the kernel-mode x86 driver. - */ -enum dbgtool_cmd { - DBGTOOL_CMD_API_RD = 0, - DBGTOOL_CMD_API_WR, - DBGTOOL_CMD_FFM_RD, - DBGTOOL_CMD_FFM_CLR, - DBGTOOL_CMD_PF_DEV_INFO_GET, - DBGTOOL_CMD_MSG_2_UP, - DBGTOOL_CMD_FREE_MEM, - DBGTOOL_CMD_NUM -}; - -#define PF_MAX_SIZE 16 -#define BUSINFO_LEN 32 -#define SELF_TEST_BAR_ADDR_OFFSET 0x883c - -enum module_name { - SEND_TO_NPU = 1, - SEND_TO_MPU, - SEND_TO_SM, - - SEND_TO_HW_DRIVER, - SEND_TO_NIC_DRIVER, - SEND_TO_OVS_DRIVER, - SEND_TO_ROCE_DRIVER, - SEND_TO_TOE_DRIVER, - SEND_TO_IWAP_DRIVER, - SEND_TO_FC_DRIVER, - SEND_FCOE_DRIVER, -}; - -enum driver_cmd_type { - TX_INFO = 1, - Q_NUM, - TX_WQE_INFO, - TX_MAPPING, - RX_INFO, - RX_WQE_INFO, - RX_CQE_INFO, - UPRINT_FUNC_EN, - UPRINT_FUNC_RESET, - UPRINT_SET_PATH, - UPRINT_GET_STATISTICS, - FUNC_TYPE, - GET_FUNC_IDX, - GET_INTER_NUM, - CLOSE_TX_STREAM, - GET_DRV_VERSION, - CLEAR_FUNC_STASTIC, - GET_HW_STATS, - CLEAR_HW_STATS, - GET_SELF_TEST_RES, - GET_CHIP_FAULT_STATS, - NIC_RSVD1, - NIC_RSVD2, - NIC_RSVD3, - GET_CHIP_ID, - GET_SINGLE_CARD_INFO, - GET_FIRMWARE_ACTIVE_STATUS, - ROCE_DFX_FUNC, - GET_DEVICE_ID, - GET_PF_DEV_INFO, - CMD_FREE_MEM, - GET_LOOPBACK_MODE = 32, - SET_LOOPBACK_MODE, - SET_LINK_MODE, - SET_PF_BW_LIMIT, - GET_PF_BW_LIMIT, - ROCE_CMD, - GET_POLL_WEIGHT, - SET_POLL_WEIGHT, - GET_HOMOLOGUE, - SET_HOMOLOGUE, - GET_SSET_COUNT, - GET_SSET_ITEMS, - IS_DRV_IN_VM, - LRO_ADPT_MGMT, - SET_INTER_COAL_PARAM, - GET_INTER_COAL_PARAM, - GET_CHIP_INFO, - GET_NIC_STATS_LEN, - GET_NIC_STATS_STRING, - GET_NIC_STATS_INFO, - GET_PF_ID, - NIC_RSVD4, - NIC_RSVD5, - DCB_QOS_INFO, - DCB_PFC_STATE, - DCB_ETS_STATE, - DCB_STATE, - NIC_RSVD6, - NIC_RSVD7, - GET_ULD_DEV_NAME, - - RSS_CFG = 0x40, - RSS_INDIR, - PORT_ID, - - GET_FUNC_CAP = 0x50, - - GET_WIN_STAT = 0x60, - WIN_CSR_READ = 0x61, - WIN_CSR_WRITE = 0x62, - WIN_API_CMD_RD = 0x63, - - VM_COMPAT_TEST = 0xFF -}; - -enum api_chain_cmd_type { - API_CSR_READ, - API_CSR_WRITE -}; - -enum sm_cmd_type { - SM_CTR_RD32 = 1, - SM_CTR_RD64_PAIR, - SM_CTR_RD64, - SM_CTR_RD32_CLEAR, - SM_CTR_RD64_PAIR_CLEAR, - SM_CTR_RD64_CLEAR -}; - -struct cqm_stats { - atomic_t cqm_cmd_alloc_cnt; - atomic_t cqm_cmd_free_cnt; - atomic_t cqm_send_cmd_box_cnt; - atomic_t cqm_send_cmd_imm_cnt; - atomic_t cqm_db_addr_alloc_cnt; - atomic_t cqm_db_addr_free_cnt; - atomic_t cqm_fc_srq_create_cnt; - atomic_t cqm_srq_create_cnt; - atomic_t cqm_rq_create_cnt; - atomic_t cqm_qpc_mpt_create_cnt; - atomic_t cqm_nonrdma_queue_create_cnt; - atomic_t cqm_rdma_queue_create_cnt; - atomic_t cqm_rdma_table_create_cnt; - atomic_t cqm_qpc_mpt_delete_cnt; - atomic_t cqm_nonrdma_queue_delete_cnt; - atomic_t cqm_rdma_queue_delete_cnt; - atomic_t cqm_rdma_table_delete_cnt; - atomic_t cqm_func_timer_clear_cnt; - atomic_t cqm_func_hash_buf_clear_cnt; - atomic_t cqm_scq_callback_cnt; - atomic_t cqm_ecq_callback_cnt; - atomic_t cqm_nocq_callback_cnt; - atomic_t cqm_aeq_callback_cnt[112]; -}; - -enum sphw_fault_err_level { - FAULT_LEVEL_FATAL, - FAULT_LEVEL_SERIOUS_RESET, - FAULT_LEVEL_HOST, - FAULT_LEVEL_SERIOUS_FLR, - FAULT_LEVEL_GENERAL, - FAULT_LEVEL_SUGGESTION, - FAULT_LEVEL_MAX, -}; - -struct link_event_stats { - atomic_t link_down_stats; - atomic_t link_up_stats; -}; - -enum sphw_fault_type { - FAULT_TYPE_CHIP, - FAULT_TYPE_UCODE, - FAULT_TYPE_MEM_RD_TIMEOUT, - FAULT_TYPE_MEM_WR_TIMEOUT, - FAULT_TYPE_REG_RD_TIMEOUT, - FAULT_TYPE_REG_WR_TIMEOUT, - FAULT_TYPE_PHY_FAULT, - FAULT_TYPE_MAX, -}; - -struct fault_event_stats { - atomic_t chip_fault_stats[22][FAULT_LEVEL_MAX]; - atomic_t fault_type_stat[FAULT_TYPE_MAX]; - atomic_t pcie_fault_stats; -}; - -struct sphw_hw_stats { - atomic_t heart_lost_stats; - struct cqm_stats cqm_stats; - struct link_event_stats link_event_stats; - struct fault_event_stats fault_event_stats; -}; - -#ifndef IFNAMSIZ -#define IFNAMSIZ 16 -#endif - -struct pf_info { - char name[IFNAMSIZ]; - char bus_info[BUSINFO_LEN]; - u32 pf_type; -}; - -struct card_info { - struct pf_info pf[PF_MAX_SIZE]; - u32 pf_num; -}; - -struct spnic_nic_loop_mode { - u32 loop_mode; - u32 loop_ctrl; -}; - -enum spnic_show_set { - SHOW_SSET_IO_STATS = 1, -}; - -#define SPNIC_SHOW_ITEM_LEN 32 -struct spnic_show_item { - char name[SPNIC_SHOW_ITEM_LEN]; - u8 hexadecimal; /* 0: decimal , 1: Hexadecimal */ - u8 rsvd[7]; - u64 value; -}; - -#define SPHW_CHIP_FAULT_SIZE (110 * 1024) -#define MAX_DRV_BUF_SIZE 4096 - -struct nic_cmd_chip_fault_stats { - u32 offset; - u8 chip_fault_stats[MAX_DRV_BUF_SIZE]; -}; - -#define NIC_TOOL_MAGIC 'x' - -#define CARD_MAX_SIZE 16 -struct nic_card_id { - u32 id[CARD_MAX_SIZE]; - u32 num; -}; - -struct func_pdev_info { - u64 bar0_phy_addr; - u64 bar0_size; - u64 bar1_phy_addr; - u64 bar1_size; - u64 bar3_phy_addr; - u64 bar3_size; - u64 rsvd1[4]; -}; - -struct sphw_card_func_info { - u32 num_pf; - u32 rsvd0; - u64 usr_api_phy_addr; - struct func_pdev_info pdev_info[CARD_MAX_SIZE]; -}; - -struct wqe_info { - int q_id; - void *slq_handle; - unsigned int wqe_id; -}; - -#define MAX_VER_INFO_LEN 128 -struct drv_version_info { - char ver[MAX_VER_INFO_LEN]; -}; - -struct spnic_tx_hw_page { - u64 phy_addr; - u64 *map_addr; -}; - -struct nic_sq_info { - u16 q_id; - u16 pi; - u16 ci; /* sw_ci */ - u16 fi; /* hw_ci */ - u32 q_depth; - u16 pi_reverse; - u16 wqebb_size; - u8 priority; - u16 *ci_addr; - u64 cla_addr; - void *slq_handle; - struct spnic_tx_hw_page direct_wqe; - struct spnic_tx_hw_page doorbell; - u32 page_idx; - u32 glb_sq_id; -}; - -struct nic_rq_info { - u16 q_id; - u16 glb_rq_id; - u16 hw_pi; - u16 ci; /* sw_ci */ - u16 sw_pi; - u16 wqebb_size; - u16 q_depth; - u16 buf_len; - - void *slq_handle; - u64 ci_wqe_page_addr; - u64 ci_cla_tbl_addr; - - u8 coalesc_timer_cfg; - u8 pending_limt; - u16 msix_idx; - u32 msix_vector; -}; - -#define MT_EPERM 1 /* Operation not permitted */ -#define MT_EIO 2 /* I/O error */ -#define MT_EINVAL 3 /* Invalid argument */ -#define MT_EBUSY 4 /* Device or resource busy */ -#define MT_EOPNOTSUPP 0xFF /* Operation not supported */ - -struct mt_msg_head { - u8 status; - u8 rsvd1[3]; -}; - -#define MT_DCB_OPCODE_WR BIT(0) /* 1 - write, 0 - read */ -struct spnic_mt_qos_info { - struct mt_msg_head head; - - u16 op_code; - u8 valid_cos_bitmap; - u8 valid_up_bitmap; - u32 rsvd1; -}; - -struct spnic_mt_dcb_state { - struct mt_msg_head head; - - u16 op_code; - u8 state; - u8 rsvd; -}; - -#define MT_DCB_ETS_UP_TC BIT(1) -#define MT_DCB_ETS_UP_BW BIT(2) -#define MT_DCB_ETS_UP_PRIO BIT(3) -#define MT_DCB_ETS_TC_BW BIT(4) -#define MT_DCB_ETS_TC_PRIO BIT(5) - -#define DCB_UP_TC_NUM 0x8 -struct spnic_mt_ets_state { - struct mt_msg_head head; - - u16 op_code; - u8 up_tc[DCB_UP_TC_NUM]; - u8 up_bw[DCB_UP_TC_NUM]; - u8 tc_bw[DCB_UP_TC_NUM]; - u8 up_prio_bitmap; - u8 tc_prio_bitmap; - u32 rsvd; -}; - -#define MT_DCB_PFC_PFC_STATE BIT(1) -#define MT_DCB_PFC_PFC_PRI_EN BIT(2) -struct spnic_mt_pfc_state { - struct mt_msg_head head; - - u16 op_code; - u8 state; - u8 pfc_en_bitpamp; - u32 rsvd; -}; - -enum mt_api_type { - API_TYPE_MBOX = 1, - API_TYPE_API_CHAIN_BYPASS, - API_TYPE_API_CHAIN_TO_MPU, -}; - -struct npu_cmd_st { - u32 mod : 8; - u32 cmd : 8; - u32 ack_type : 3; - u32 direct_resp : 1; - u32 len : 12; -}; - -struct mpu_cmd_st { - u32 api_type : 8; - u32 mod : 8; - u32 cmd : 16; -}; - -struct msg_module { - char device_name[IFNAMSIZ]; - u32 module; - union { - u32 msg_formate; /* for driver */ - struct npu_cmd_st npu_cmd; - struct mpu_cmd_st mpu_cmd; - }; - u32 timeout; /* for mpu/npu cmd */ - u32 func_idx; - u32 buf_in_size; - u32 buf_out_size; - void *in_buf; - void *out_buf; - int bus_num; - u32 rsvd2[5]; -}; - -int alloc_buff_in(void *hwdev, struct msg_module *nt_msg, u32 in_size, void **buf_in); - -int alloc_buff_out(void *hwdev, struct msg_module *nt_msg, u32 out_size, void **buf_out); - -void free_buff_in(void *hwdev, struct msg_module *nt_msg, void *buf_in); - -void free_buff_out(void *hwdev, struct msg_module *nt_msg, void *buf_out); - -int copy_buf_out_to_user(struct msg_module *nt_msg, u32 out_size, void *buf_out); - -int get_func_type(void *hwdev, const void *buf_in, u32 in_size, void *buf_out, u32 *out_size); - -int get_func_id(void *hwdev, const void *buf_in, u32 in_size, void *buf_out, u32 *out_size); - -int get_drv_version(void *hwdev, const void *buf_in, u32 in_size, void *buf_out, u32 *out_size); - -int get_hw_driver_stats(const void *hwdev, const void *buf_in, u32 in_size, void *buf_out, - u32 *out_size); - -int clear_hw_driver_stats(void *hwdev, const void *buf_in, u32 in_size, void *buf_out, - u32 *out_size); - -int get_chip_faults_stats(const void *hwdev, const void *buf_in, u32 in_size, void *buf_out, - u32 *out_size); - -int get_chip_id_test(void *hwdev, const void *buf_in, u32 in_size, void *buf_out, u32 *out_size); - -int send_to_mpu(void *hwdev, struct msg_module *nt_msg, void *buf_in, u32 in_size, void *buf_out, - u32 *out_size); -int send_to_npu(void *hwdev, struct msg_module *nt_msg, void *buf_in, u32 in_size, void *buf_out, - u32 *out_size); - -int send_to_sm(void *hwdev, struct msg_module *nt_msg, void *buf_in, u32 in_size, void *buf_out, - u32 *out_size); - -#endif /* SPHW_MT_H_ */ diff --git a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_prof_adap.c b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_prof_adap.c deleted file mode 100644 index 20ebda15cda20254ad6c5ec2aba59ca7eb2d22fa..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_prof_adap.c +++ /dev/null @@ -1,94 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt - -#include -#include -#include -#include - -#include "sphw_common.h" -#include "sphw_hwdev.h" -#include "sphw_profile.h" -#include "sphw_prof_adap.h" - -typedef bool (*sphw_is_match_prof)(struct sphw_hwdev *hwdev); - -static bool is_match_prof_default_adapter(struct sphw_hwdev *hwdev) -{ - /* always match default profile adapter in standard scene */ - return true; -} - -enum prof_adapter_type { - PROF_ADAP_TYPE_PANGEA = 1, - - /* Add prof adapter type before default */ - PROF_ADAP_TYPE_DEFAULT, -}; - -/** - * struct sphw_prof_adapter - custom scene's profile adapter - * @type: adapter type - * @match: Check whether the current function is used in the custom scene. - * Implemented in the current source file - * @init: When @match return true, the initialization function called in probe. - * Implemented in the source file of the custom scene - * @deinit: When @match return true, the deinitialization function called when - * remove. Implemented in the source file of the custom scene - */ -struct sphw_prof_adapter { - enum prof_adapter_type type; - sphw_is_match_prof match; - sphw_init_prof_attr init; - sphw_deinit_prof_attr deinit; -}; - -struct sphw_prof_adapter prof_adap_objs[] = { - /* Add prof adapter before default profile */ - { - .type = PROF_ADAP_TYPE_DEFAULT, - .match = is_match_prof_default_adapter, - .init = NULL, - .deinit = NULL, - }, -}; - -void sphw_init_profile_adapter(struct sphw_hwdev *hwdev) -{ - struct sphw_prof_adapter *prof_obj = NULL; - u16 num_adap = ARRAY_SIZE(prof_adap_objs); - u16 i; - - for (i = 0; i < num_adap; i++) { - prof_obj = &prof_adap_objs[i]; - if (!(prof_obj->match && prof_obj->match(hwdev))) - continue; - - hwdev->prof_adap_type = prof_obj->type; - hwdev->prof_attr = prof_obj->init ? - prof_obj->init(hwdev) : NULL; - sdk_info(hwdev->dev_hdl, "Find profile adapter, type: %d\n", - hwdev->prof_adap_type); - - break; - } -} - -void sphw_deinit_profile_adapter(struct sphw_hwdev *hwdev) -{ - struct sphw_prof_adapter *prof_obj = NULL; - u16 num_adap = ARRAY_SIZE(prof_adap_objs); - u16 i; - - for (i = 0; i < num_adap; i++) { - prof_obj = &prof_adap_objs[i]; - if (hwdev->prof_adap_type != prof_obj->type) - continue; - - if (prof_obj->deinit) - prof_obj->deinit(hwdev->prof_attr); - break; - } -} diff --git a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_prof_adap.h b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_prof_adap.h deleted file mode 100644 index f83d3a28c834dfa4c1d72b1f0db239314714bc51..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_prof_adap.h +++ /dev/null @@ -1,49 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPHW_PROF_ADAP_H -#define SPHW_PROF_ADAP_H - -#include - -#include "sphw_profile.h" - -#define GET_PROF_ATTR_OPS(hwdev) \ - ((hwdev)->prof_attr ? (hwdev)->prof_attr->ops : NULL) - -static inline int sphw_get_work_cpu_affinity(struct sphw_hwdev *hwdev, - enum cpu_affinity_work_type type) -{ - struct sphw_prof_ops *ops = GET_PROF_ATTR_OPS(hwdev); - - if (ops && ops->get_work_cpu_affinity) - return ops->get_work_cpu_affinity(hwdev->prof_attr->priv_data, type); - - return WORK_CPU_UNBOUND; -} - -static inline void sphw_fault_post_process(struct sphw_hwdev *hwdev, u16 src, u16 level) -{ - struct sphw_prof_ops *ops = GET_PROF_ATTR_OPS(hwdev); - - if (ops && ops->fault_recover) - ops->fault_recover(hwdev->prof_attr->priv_data, src, level); -} - -static inline bool sphw_sw_feature_en(struct sphw_hwdev *hwdev, u64 feature_bit) -{ - if (!hwdev->prof_attr) - return false; - - return (hwdev->prof_attr->sw_feature_cap & feature_bit) && - (hwdev->prof_attr->dft_sw_feature & feature_bit); -} - -#define SW_FEATURE_EN(hwdev, f_bit) \ - sphw_sw_feature_en(hwdev, SPHW_SW_F_##f_bit) -#define SPHW_F_CHANNEL_LOCK_EN(hwdev) SW_FEATURE_EN(hwdev, CHANNEL_LOCK) - -void sphw_init_profile_adapter(struct sphw_hwdev *hwdev); -void sphw_deinit_profile_adapter(struct sphw_hwdev *hwdev); - -#endif diff --git a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_profile.h b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_profile.h deleted file mode 100644 index 0e1c6c91ba31c53320ace49ffe871fb326385fee..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_profile.h +++ /dev/null @@ -1,36 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPHW_PROFILE_H -#define SPHW_PROFILE_H - -enum cpu_affinity_work_type { - WORK_TYPE_AEQ, - WORK_TYPE_MBOX, - WORK_TYPE_MGMT_MSG, - WORK_TYPE_COMM, -}; - -enum sphw_sw_features { - SPHW_SW_F_CHANNEL_LOCK = BIT(0), -}; - -struct sphw_prof_ops { - void (*fault_recover)(void *data, u16 src, u16 level); - int (*get_work_cpu_affinity)(void *data, u32 work_type); -}; - -struct sphw_prof_attr { - void *priv_data; - u64 hw_feature_cap; - u64 sw_feature_cap; - u64 dft_hw_feature; - u64 dft_sw_feature; - - struct sphw_prof_ops *ops; -}; - -typedef struct sphw_prof_attr *(*sphw_init_prof_attr)(void *hwdev); -typedef void (*sphw_deinit_prof_attr)(struct sphw_prof_attr *porf_attr); - -#endif diff --git a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_wq.c b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_wq.c deleted file mode 100644 index 0ec202dfc4d7e6bfdaec9a753f2bb016dec05042..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_wq.c +++ /dev/null @@ -1,152 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt - -#include -#include -#include -#include -#include -#include -#include -#include - -#include "sphw_common.h" -#include "sphw_wq.h" -#include "sphw_hwdev.h" - -#define WQ_MIN_DEPTH 64 -#define WQ_MAX_DEPTH 65536 -#define WQ_MAX_NUM_PAGES (PAGE_SIZE / sizeof(u64)) - -static int wq_init_wq_block(struct sphw_wq *wq) -{ - int i; - - if (WQ_IS_0_LEVEL_CLA(wq)) { - wq->wq_block_paddr = wq->wq_pages[0].align_paddr; - wq->wq_block_vaddr = wq->wq_pages[0].align_vaddr; - - return 0; - } - - if (wq->num_wq_pages > WQ_MAX_NUM_PAGES) { - sdk_err(wq->dev_hdl, "num_wq_pages exceed limit: %lu\n", - WQ_MAX_NUM_PAGES); - return -EFAULT; - } - - wq->wq_block_vaddr = dma_alloc_coherent(wq->dev_hdl, PAGE_SIZE, &wq->wq_block_paddr, - GFP_KERNEL); - if (!wq->wq_block_vaddr) { - sdk_err(wq->dev_hdl, "Failed to alloc wq block\n"); - return -ENOMEM; - } - - for (i = 0; i < wq->num_wq_pages; i++) - wq->wq_block_vaddr[i] = - cpu_to_be64(wq->wq_pages[i].align_paddr); - - return 0; -} - -static int wq_alloc_pages(struct sphw_wq *wq) -{ - int i, page_idx, err; - - wq->wq_pages = kcalloc(wq->num_wq_pages, sizeof(*wq->wq_pages), - GFP_KERNEL); - if (!wq->wq_pages) { - sdk_err(wq->dev_hdl, "Failed to alloc wq pages handle\n"); - return -ENOMEM; - } - - for (page_idx = 0; page_idx < wq->num_wq_pages; page_idx++) { - err = sphw_dma_alloc_coherent_align(wq->dev_hdl, wq->wq_page_size, - wq->wq_page_size, GFP_KERNEL, - &wq->wq_pages[page_idx]); - if (err) { - sdk_err(wq->dev_hdl, "Failed to alloc wq page\n"); - goto free_wq_pages; - } - } - - err = wq_init_wq_block(wq); - if (err) - goto free_wq_pages; - - return 0; - -free_wq_pages: - for (i = 0; i < page_idx; i++) - sphw_dma_free_coherent_align(wq->dev_hdl, &wq->wq_pages[i]); - - kfree(wq->wq_pages); - wq->wq_pages = NULL; - - return -ENOMEM; -} - -static void wq_free_pages(struct sphw_wq *wq) -{ - int i; - - if (!WQ_IS_0_LEVEL_CLA(wq)) - dma_free_coherent(wq->dev_hdl, PAGE_SIZE, wq->wq_block_vaddr, - wq->wq_block_paddr); - - for (i = 0; i < wq->num_wq_pages; i++) - sphw_dma_free_coherent_align(wq->dev_hdl, &wq->wq_pages[i]); - - kfree(wq->wq_pages); - wq->wq_pages = NULL; -} - -int sphw_wq_create(void *hwdev, struct sphw_wq *wq, u32 q_depth, u16 wqebb_size) -{ - struct sphw_hwdev *dev = hwdev; - u32 wq_page_size; - - if (!wq || !dev) { - pr_err("Invalid wq or dev_hdl\n"); - return -EINVAL; - } - - if (q_depth < WQ_MIN_DEPTH || q_depth > WQ_MAX_DEPTH || - (q_depth & (q_depth - 1)) || !wqebb_size || - (wqebb_size & (wqebb_size - 1))) { - sdk_err(dev->dev_hdl, "Wq q_depth(%u) or wqebb_size(%u) is invalid\n", - q_depth, wqebb_size); - return -EINVAL; - } - - wq_page_size = ALIGN(dev->wq_page_size, PAGE_SIZE); - - memset(wq, 0, sizeof(*wq)); - wq->dev_hdl = dev->dev_hdl; - wq->q_depth = q_depth; - wq->idx_mask = (u16)(q_depth - 1); - wq->wqebb_size = wqebb_size; - wq->wqebb_size_shift = (u16)ilog2(wq->wqebb_size); - wq->wq_page_size = wq_page_size; - - wq->wqebbs_per_page = wq_page_size / wqebb_size; - /* In case of wq_page_size is larger than q_depth * wqebb_size */ - if (wq->wqebbs_per_page > q_depth) - wq->wqebbs_per_page = q_depth; - wq->wqebbs_per_page_shift = (u16)ilog2(wq->wqebbs_per_page); - wq->wqebbs_per_page_mask = (u16)(wq->wqebbs_per_page - 1); - wq->num_wq_pages = (u16)(ALIGN(((u32)q_depth * wqebb_size), - wq_page_size) / wq_page_size); - - return wq_alloc_pages(wq); -} - -void sphw_wq_destroy(struct sphw_wq *wq) -{ - if (!wq) - return; - - wq_free_pages(wq); -} diff --git a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_wq.h b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_wq.h deleted file mode 100644 index 01d564ca527a7e41c9d273dce9199223124349d9..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_wq.h +++ /dev/null @@ -1,119 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPHW_WQ_H -#define SPHW_WQ_H - -struct sphw_wq { - u16 cons_idx; - u16 prod_idx; - - u32 q_depth; - u16 idx_mask; - u16 wqebb_size_shift; - u16 num_wq_pages; - u32 wqebbs_per_page; - u16 wqebbs_per_page_shift; - u16 wqebbs_per_page_mask; - - struct sphw_dma_addr_align *wq_pages; - - dma_addr_t wq_block_paddr; - u64 *wq_block_vaddr; - - void *dev_hdl; - u32 wq_page_size; - u16 wqebb_size; -} ____cacheline_aligned; - -int sphw_wq_create(void *hwdev, struct sphw_wq *wq, u32 q_depth, u16 wqebb_size); -void sphw_wq_destroy(struct sphw_wq *wq); - -#define WQ_MASK_IDX(wq, idx) ((idx) & (wq)->idx_mask) -#define WQ_MASK_PAGE(wq, pg_idx) ((pg_idx) < (wq)->num_wq_pages ? (pg_idx) : 0) -#define WQ_PAGE_IDX(wq, idx) ((idx) >> (wq)->wqebbs_per_page_shift) -#define WQ_OFFSET_IN_PAGE(wq, idx) ((idx) & (wq)->wqebbs_per_page_mask) -#define WQ_GET_WQEBB_ADDR(wq, pg_idx, idx_in_pg) \ - ((u8 *)(wq)->wq_pages[pg_idx].align_vaddr + ((idx_in_pg) << (wq)->wqebb_size_shift)) -#define WQ_IS_0_LEVEL_CLA(wq) ((wq)->num_wq_pages == 1) - -static inline u16 sphw_wq_free_wqebbs(struct sphw_wq *wq) -{ - return wq->q_depth - ((wq->q_depth + wq->prod_idx - wq->cons_idx) & - wq->idx_mask) - 1; -} - -static inline bool sphw_wq_is_empty(struct sphw_wq *wq) -{ - return WQ_MASK_IDX(wq, wq->prod_idx) == WQ_MASK_IDX(wq, wq->cons_idx); -} - -static inline void *sphw_wq_get_one_wqebb(struct sphw_wq *wq, u16 *pi) -{ - *pi = WQ_MASK_IDX(wq, wq->prod_idx); - wq->prod_idx++; - - return WQ_GET_WQEBB_ADDR(wq, WQ_PAGE_IDX(wq, *pi), - WQ_OFFSET_IN_PAGE(wq, *pi)); -} - -static inline void *sphw_wq_get_multi_wqebbs(struct sphw_wq *wq, u16 num_wqebbs, u16 *prod_idx, - void **second_part_wqebbs_addr, - u16 *first_part_wqebbs_num) -{ - u32 pg_idx, off_in_page; - - *prod_idx = WQ_MASK_IDX(wq, wq->prod_idx); - wq->prod_idx += num_wqebbs; - - pg_idx = WQ_PAGE_IDX(wq, *prod_idx); - off_in_page = WQ_OFFSET_IN_PAGE(wq, *prod_idx); - - if (off_in_page + num_wqebbs > wq->wqebbs_per_page) { - /* wqe across wq page boundary */ - *second_part_wqebbs_addr = - WQ_GET_WQEBB_ADDR(wq, WQ_MASK_PAGE(wq, pg_idx + 1), 0); - *first_part_wqebbs_num = wq->wqebbs_per_page - off_in_page; - } else { - *second_part_wqebbs_addr = NULL; - *first_part_wqebbs_num = num_wqebbs; - } - - return WQ_GET_WQEBB_ADDR(wq, pg_idx, off_in_page); -} - -static inline void sphw_wq_put_wqebbs(struct sphw_wq *wq, u16 num_wqebbs) -{ - wq->cons_idx += num_wqebbs; -} - -static inline void *sphw_wq_wqebb_addr(struct sphw_wq *wq, u16 idx) -{ - return WQ_GET_WQEBB_ADDR(wq, WQ_PAGE_IDX(wq, idx), - WQ_OFFSET_IN_PAGE(wq, idx)); -} - -static inline void *sphw_wq_read_one_wqebb(struct sphw_wq *wq, u16 *cons_idx) -{ - *cons_idx = WQ_MASK_IDX(wq, wq->cons_idx); - - return sphw_wq_wqebb_addr(wq, *cons_idx); -} - -static inline u64 sphw_wq_get_first_wqe_page_addr(struct sphw_wq *wq) -{ - return wq->wq_pages[0].align_paddr; -} - -static inline void sphw_wq_reset(struct sphw_wq *wq) -{ - u16 pg_idx; - - wq->cons_idx = 0; - wq->prod_idx = 0; - - for (pg_idx = 0; pg_idx < wq->num_wq_pages; pg_idx++) - memset(wq->wq_pages[pg_idx].align_vaddr, 0, wq->wq_page_size); -} - -#endif diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_dbg.c b/drivers/net/ethernet/ramaxel/spnic/spnic_dbg.c deleted file mode 100644 index 910baed023a5aa3a1a9f420141beaa37840bffd2..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/spnic_dbg.c +++ /dev/null @@ -1,752 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt - -#include -#include -#include -#include - -#include "sphw_mt.h" -#include "sphw_crm.h" -#include "spnic_nic_dev.h" -#include "spnic_nic_dbg.h" -#include "spnic_nic_qp.h" -#include "spnic_rx.h" -#include "spnic_tx.h" -#include "spnic_dcb.h" - -typedef int (*nic_driv_module)(struct spnic_nic_dev *nic_dev, const void *buf_in, u32 in_size, - void *buf_out, u32 *out_size); - -struct nic_drv_module_handle { - enum driver_cmd_type driv_cmd_name; - nic_driv_module driv_func; -}; - -int get_nic_drv_version(void *buf_out, u32 *out_size) -{ - struct drv_version_info *ver_info = buf_out; - - if (!buf_out) { - pr_err("Buf_out is NULL.\n"); - return -EINVAL; - } - - if (*out_size != sizeof(*ver_info)) { - pr_err("Unexpect out buf size from user :%u, expect: %lu\n", - *out_size, sizeof(*ver_info)); - return -EINVAL; - } - - snprintf(ver_info->ver, sizeof(ver_info->ver), "%s [compiled with the kernel]", - SPNIC_DRV_VERSION); - - return 0; -} - -static int get_tx_info(struct spnic_nic_dev *nic_dev, const void *buf_in, - u32 in_size, void *buf_out, u32 *out_size) -{ - u16 q_id; - - if (!SPHW_CHANNEL_RES_VALID(nic_dev)) { - nicif_err(nic_dev, drv, nic_dev->netdev, - "Netdev is down, can't get tx info\n"); - return -EFAULT; - } - - if (!buf_in || !buf_out) { - nicif_err(nic_dev, drv, nic_dev->netdev, - "Buf_in or buf_out is NULL.\n"); - return -EINVAL; - } - - q_id = *((u16 *)buf_in); - - return spnic_dbg_get_sq_info(nic_dev->hwdev, q_id, buf_out, *out_size); -} - -static int get_q_num(struct spnic_nic_dev *nic_dev, - const void *buf_in, u32 in_size, - void *buf_out, u32 *out_size) -{ - if (!SPHW_CHANNEL_RES_VALID(nic_dev)) { - nicif_err(nic_dev, drv, nic_dev->netdev, - "Netdev is down, can't get queue number\n"); - return -EFAULT; - } - - if (!buf_out) { - nicif_err(nic_dev, drv, nic_dev->netdev, - "Get queue number para buf_out is NULL.\n"); - return -EINVAL; - } - - if (*out_size != sizeof(u16)) { - nicif_err(nic_dev, drv, nic_dev->netdev, - "Unexpect out buf size from user: %u, expect: %lu\n", - *out_size, sizeof(u16)); - return -EINVAL; - } - - *((u16 *)buf_out) = nic_dev->q_params.num_qps; - - return 0; -} - -static int get_tx_wqe_info(struct spnic_nic_dev *nic_dev, - const void *buf_in, u32 in_size, - void *buf_out, u32 *out_size) -{ - const struct wqe_info *info = buf_in; - u16 wqebb_cnt = 1; - - if (!SPHW_CHANNEL_RES_VALID(nic_dev)) { - nicif_err(nic_dev, drv, nic_dev->netdev, - "Netdev is down, can't get tx wqe info\n"); - return -EFAULT; - } - - if (!info || !buf_out) { - nicif_err(nic_dev, drv, nic_dev->netdev, - "Buf_in or buf_out is NULL.\n"); - return -EINVAL; - } - - return spnic_dbg_get_wqe_info(nic_dev->hwdev, (u16)info->q_id, - (u16)info->wqe_id, wqebb_cnt, - buf_out, (u16 *)out_size, SPNIC_SQ); -} - -static int get_rx_info(struct spnic_nic_dev *nic_dev, const void *buf_in, - u32 in_size, void *buf_out, u32 *out_size) -{ - struct nic_rq_info *rq_info = buf_out; - u16 q_id; - int err; - - if (!SPHW_CHANNEL_RES_VALID(nic_dev)) { - nicif_err(nic_dev, drv, nic_dev->netdev, - "Netdev is down, can't get rx info\n"); - return -EFAULT; - } - - if (!buf_in || !buf_out) { - nicif_err(nic_dev, drv, nic_dev->netdev, - "Buf_in or buf_out is NULL.\n"); - return -EINVAL; - } - - q_id = *((u16 *)buf_in); - - err = spnic_dbg_get_rq_info(nic_dev->hwdev, q_id, buf_out, *out_size); - - if (err) { - nicif_err(nic_dev, drv, nic_dev->netdev, - "Get rq info failed, ret is %d.\n", err); - return err; - } - - rq_info->ci = (u16)nic_dev->rxqs[q_id].cons_idx & - nic_dev->rxqs[q_id].q_mask; - - rq_info->sw_pi = nic_dev->rxqs[q_id].next_to_update; - rq_info->msix_vector = nic_dev->rxqs[q_id].irq_id; - - rq_info->coalesc_timer_cfg = nic_dev->rxqs[q_id].last_coalesc_timer_cfg; - rq_info->pending_limt = nic_dev->rxqs[q_id].last_pending_limt; - - return 0; -} - -static int get_rx_wqe_info(struct spnic_nic_dev *nic_dev, const void *buf_in, - u32 in_size, void *buf_out, u32 *out_size) -{ - const struct wqe_info *info = buf_in; - u16 wqebb_cnt = 1; - - if (!SPHW_CHANNEL_RES_VALID(nic_dev)) { - nicif_err(nic_dev, drv, nic_dev->netdev, - "Netdev is down, can't get rx wqe info\n"); - return -EFAULT; - } - - if (!info || !buf_out) { - nicif_err(nic_dev, drv, nic_dev->netdev, - "Buf_in or buf_out is NULL.\n"); - return -EINVAL; - } - - return spnic_dbg_get_wqe_info(nic_dev->hwdev, (u16)info->q_id, - (u16)info->wqe_id, wqebb_cnt, - buf_out, (u16 *)out_size, SPNIC_RQ); -} - -static int get_rx_cqe_info(struct spnic_nic_dev *nic_dev, const void *buf_in, - u32 in_size, void *buf_out, u32 *out_size) -{ - const struct wqe_info *info = buf_in; - u16 q_id = 0; - u16 idx = 0; - - if (!SPHW_CHANNEL_RES_VALID(nic_dev)) { - nicif_err(nic_dev, drv, nic_dev->netdev, - "Netdev is down, can't get rx cqe info\n"); - return -EFAULT; - } - - if (!info || !buf_out) { - nicif_err(nic_dev, drv, nic_dev->netdev, - "Buf_in or buf_out is NULL.\n"); - return -EINVAL; - } - - if (*out_size != sizeof(struct spnic_rq_cqe)) { - nicif_err(nic_dev, drv, nic_dev->netdev, - "Unexpect out buf size from user :%u, expect: %lu\n", - *out_size, sizeof(struct spnic_rq_cqe)); - return -EINVAL; - } - q_id = (u16)info->q_id; - idx = (u16)info->wqe_id; - - if (q_id >= nic_dev->q_params.num_qps || - idx >= nic_dev->rxqs[q_id].q_depth) { - nicif_err(nic_dev, drv, nic_dev->netdev, - "Invalid q_id[%u] >= %u, or wqe idx[%u] >= %u.\n", - q_id, nic_dev->q_params.num_qps, idx, - nic_dev->rxqs[q_id].q_depth); - return -EFAULT; - } - - memcpy(buf_out, nic_dev->rxqs[q_id].rx_info[idx].cqe, - sizeof(struct spnic_rq_cqe)); - - return 0; -} - -static void clean_nicdev_stats(struct spnic_nic_dev *nic_dev) -{ - u64_stats_update_begin(&nic_dev->stats.syncp); - nic_dev->stats.netdev_tx_timeout = 0; - nic_dev->stats.tx_carrier_off_drop = 0; - nic_dev->stats.tx_invalid_qid = 0; - u64_stats_update_end(&nic_dev->stats.syncp); -} - -static int clear_func_static(struct spnic_nic_dev *nic_dev, const void *buf_in, - u32 in_size, void *buf_out, u32 *out_size) -{ - int i; - - *out_size = 0; - clean_nicdev_stats(nic_dev); - for (i = 0; i < nic_dev->max_qps; i++) { - spnic_rxq_clean_stats(&nic_dev->rxqs[i].rxq_stats); - spnic_txq_clean_stats(&nic_dev->txqs[i].txq_stats); - } - - return 0; -} - -static int get_loopback_mode(struct spnic_nic_dev *nic_dev, const void *buf_in, - u32 in_size, void *buf_out, u32 *out_size) -{ - struct spnic_nic_loop_mode *mode = buf_out; - - if (!out_size || !mode) - return -EINVAL; - - if (*out_size != sizeof(*mode)) { - nicif_err(nic_dev, drv, nic_dev->netdev, - "Unexpect out buf size from user: %u, expect: %lu\n", - *out_size, sizeof(*mode)); - return -EINVAL; - } - - return spnic_get_loopback_mode(nic_dev->hwdev, (u8 *)&mode->loop_mode, - (u8 *)&mode->loop_ctrl); -} - -static int set_loopback_mode(struct spnic_nic_dev *nic_dev, const void *buf_in, - u32 in_size, void *buf_out, u32 *out_size) -{ - const struct spnic_nic_loop_mode *mode = buf_in; - int err; - - if (!test_bit(SPNIC_INTF_UP, &nic_dev->flags)) { - nicif_err(nic_dev, drv, nic_dev->netdev, - "Netdev is down, can't set loopback mode\n"); - return -EFAULT; - } - - if (!mode || !out_size || in_size != sizeof(*mode)) - return -EINVAL; - - if (*out_size != sizeof(*mode)) { - nicif_err(nic_dev, drv, nic_dev->netdev, - "Unexpect out buf size from user: %u, expect: %lu\n", - *out_size, sizeof(*mode)); - return -EINVAL; - } - - err = spnic_set_loopback_mode(nic_dev->hwdev, (u8)mode->loop_mode, (u8)mode->loop_ctrl); - if (err == 0) - nicif_info(nic_dev, drv, nic_dev->netdev, "Set loopback mode %u en %u succeed\n", - mode->loop_mode, mode->loop_ctrl); - - return err; -} - -enum spnic_nic_link_mode { - SPNIC_LINK_MODE_AUTO = 0, - SPNIC_LINK_MODE_UP, - SPNIC_LINK_MODE_DOWN, - SPNIC_LINK_MODE_MAX, -}; - -static int set_link_mode_param_valid(struct spnic_nic_dev *nic_dev, - const void *buf_in, u32 in_size, - u32 *out_size) -{ - if (!test_bit(SPNIC_INTF_UP, &nic_dev->flags)) { - nicif_err(nic_dev, drv, nic_dev->netdev, - "Netdev is down, can't set link mode\n"); - return -EFAULT; - } - - if (!buf_in || !out_size || - in_size != sizeof(enum spnic_nic_link_mode)) - return -EINVAL; - - if (*out_size != sizeof(enum spnic_nic_link_mode)) { - nicif_err(nic_dev, drv, nic_dev->netdev, - "Unexpect out buf size from user: %u, expect: %lu\n", - *out_size, sizeof(enum spnic_nic_link_mode)); - return -EINVAL; - } - - return 0; -} - -static int set_link_mode(struct spnic_nic_dev *nic_dev, const void *buf_in, - u32 in_size, void *buf_out, u32 *out_size) -{ - const enum spnic_nic_link_mode *link = buf_in; - u8 link_status; - - if (set_link_mode_param_valid(nic_dev, buf_in, in_size, out_size)) - return -EFAULT; - - switch (*link) { - case SPNIC_LINK_MODE_AUTO: - if (spnic_get_link_state(nic_dev->hwdev, &link_status)) - link_status = false; - spnic_link_status_change(nic_dev, (bool)link_status); - nicif_info(nic_dev, drv, nic_dev->netdev, - "Set link mode: auto succeed, now is link %s\n", - (link_status ? "up" : "down")); - break; - case SPNIC_LINK_MODE_UP: - spnic_link_status_change(nic_dev, true); - nicif_info(nic_dev, drv, nic_dev->netdev, - "Set link mode: up succeed\n"); - break; - case SPNIC_LINK_MODE_DOWN: - spnic_link_status_change(nic_dev, false); - nicif_info(nic_dev, drv, nic_dev->netdev, - "Set link mode: down succeed\n"); - break; - default: - nicif_err(nic_dev, drv, nic_dev->netdev, - "Invalid link mode %d to set\n", *link); - return -EINVAL; - } - - return 0; -} - -static int get_sset_count(struct spnic_nic_dev *nic_dev, const void *buf_in, - u32 in_size, void *buf_out, u32 *out_size) -{ - u32 count; - - if (!buf_in || in_size != sizeof(u32) || !out_size || - *out_size != sizeof(u32) || !buf_out) { - nicif_err(nic_dev, drv, nic_dev->netdev, "Invalid parameters, in_size: %u\n", - in_size); - return -EINVAL; - } - - switch (*((u32 *)buf_in)) { - case SHOW_SSET_IO_STATS: - count = spnic_get_io_stats_size(nic_dev); - break; - default: - count = 0; - break; - } - - *((u32 *)buf_out) = count; - - return 0; -} - -static int get_sset_stats(struct spnic_nic_dev *nic_dev, const void *buf_in, - u32 in_size, void *buf_out, u32 *out_size) -{ - struct spnic_show_item *items = buf_out; - u32 sset, count, size; - int err; - - if (!buf_in || in_size != sizeof(u32) || !out_size || !buf_out) { - nicif_err(nic_dev, drv, nic_dev->netdev, "Invalid parameters, in_size: %u\n", - in_size); - return -EINVAL; - } - - size = sizeof(u32); - err = get_sset_count(nic_dev, buf_in, in_size, &count, &size); - if (err) { - nicif_err(nic_dev, drv, nic_dev->netdev, "Get sset count failed, ret=%d\n", - err); - return -EINVAL; - } - if (count * sizeof(*items) != *out_size) { - nicif_err(nic_dev, drv, nic_dev->netdev, - "Unexpect out buf size from user :%u, expect: %lu\n", - *out_size, count * sizeof(*items)); - return -EINVAL; - } - - sset = *((u32 *)buf_in); - - switch (sset) { - case SHOW_SSET_IO_STATS: - spnic_get_io_stats(nic_dev, items); - break; - - default: - nicif_err(nic_dev, drv, nic_dev->netdev, "Unknown %u to get stats\n", - sset); - err = -EINVAL; - break; - } - - return err; -} - -static int dcb_mt_qos_map(struct spnic_nic_dev *nic_dev, const void *buf_in, - u32 in_size, void *buf_out, u32 *out_size) -{ - const struct spnic_mt_qos_info *qos = buf_in; - struct spnic_mt_qos_info *qos_out = buf_out; - u8 up_cnt, up; - int err; - - if (!buf_out || !out_size || !buf_in) - return -EINVAL; - - if (*out_size != sizeof(*qos_out) || in_size != sizeof(*qos)) { - nicif_err(nic_dev, drv, nic_dev->netdev, - "Unexpect buf size from user, in_size: %u, out_size: %u, expect: %lu\n", - in_size, *out_size, sizeof(*qos)); - return -EINVAL; - } - - memcpy(qos_out, qos, sizeof(*qos)); - qos_out->head.status = 0; - if (qos->op_code & MT_DCB_OPCODE_WR) { - up_cnt = 0; - for (up = 0; up < SPNIC_DCB_UP_MAX; up++) { - if (qos->valid_up_bitmap & BIT(up)) - up_cnt++; - } - - if (up_cnt != nic_dev->wanted_dcb_cfg.max_cos) { - nicif_err(nic_dev, drv, nic_dev->netdev, - "Invalid up bitmap: 0x%x", - qos->valid_up_bitmap); - qos_out->head.status = MT_EINVAL; - return 0; - } - - err = spnic_dcbcfg_set_up_bitmap(nic_dev, qos->valid_up_bitmap); - if (err) - qos_out->head.status = MT_EIO; - } else { - qos_out->valid_up_bitmap = - spnic_get_valid_up_bitmap(&nic_dev->wanted_dcb_cfg); - qos_out->valid_cos_bitmap = - nic_dev->wanted_dcb_cfg.valid_cos_bitmap; - } - - return 0; -} - -static int dcb_mt_dcb_state(struct spnic_nic_dev *nic_dev, const void *buf_in, - u32 in_size, void *buf_out, u32 *out_size) -{ - const struct spnic_mt_dcb_state *dcb = buf_in; - struct spnic_mt_dcb_state *dcb_out = buf_out; - int err; - - if (!buf_in || !buf_out || !out_size) - return -EINVAL; - - if (*out_size != sizeof(*dcb_out) || in_size != sizeof(*dcb)) { - nicif_err(nic_dev, drv, nic_dev->netdev, - "Unexpect buf size from user, in_size: %u, out_size: %u, expect: %lu\n", - in_size, *out_size, sizeof(*dcb)); - return -EINVAL; - } - - memcpy(dcb_out, dcb, sizeof(*dcb)); - dcb_out->head.status = 0; - if (dcb->op_code & MT_DCB_OPCODE_WR) { - if (test_bit(SPNIC_DCB_ENABLE, &nic_dev->flags) == dcb->state) - return 0; - - /* nic_mutex has been acquired by send_to_nic_driver and will - * also be acquired inside spnic_setup_tc - */ - mutex_unlock(&nic_dev->nic_mutex); - rtnl_lock(); - err = spnic_setup_tc(nic_dev->netdev, - dcb->state ? nic_dev->wanted_dcb_cfg.max_cos : 0); - rtnl_unlock(); - mutex_lock(&nic_dev->nic_mutex); - if (err) - dcb_out->head.status = MT_EIO; - } else { - dcb_out->state = !!test_bit(SPNIC_DCB_ENABLE, &nic_dev->flags); - } - - return 0; -} - -static int dcb_mt_pfc_state(struct spnic_nic_dev *nic_dev, const void *buf_in, - u32 in_size, void *buf_out, u32 *out_size) -{ - const struct spnic_mt_pfc_state *pfc = buf_in; - struct spnic_mt_pfc_state *pfc_out = buf_out; - u8 cur_pfc_state, cur_pfc_en_bitmap; - int err; - - if (!buf_in || !buf_out || !out_size) - return -EINVAL; - - if (*out_size != sizeof(*pfc_out) || in_size != sizeof(*pfc)) { - nicif_err(nic_dev, drv, nic_dev->netdev, - "Unexpect buf size from user, in_size: %u, out_size: %u, expect: %lu\n", - in_size, *out_size, sizeof(*pfc)); - return -EINVAL; - } - - cur_pfc_state = spnic_dcbcfg_get_pfc_state(nic_dev); - cur_pfc_en_bitmap = spnic_dcbcfg_get_pfc_pri_en(nic_dev); - - memcpy(pfc_out, pfc, sizeof(*pfc)); - pfc_out->head.status = 0; - if (pfc->op_code & MT_DCB_OPCODE_WR) { - if (pfc->op_code & MT_DCB_PFC_PFC_STATE) - spnic_dcbcfg_set_pfc_state(nic_dev, pfc->state); - - if (pfc->op_code & MT_DCB_PFC_PFC_PRI_EN) - spnic_dcbcfg_set_pfc_pri_en(nic_dev, pfc->pfc_en_bitpamp); - - if (test_bit(SPNIC_DCB_ENABLE, &nic_dev->flags)) { - err = spnic_configure_dcb(nic_dev->netdev); - if (err) { - pfc_out->head.status = MT_EIO; - goto set_err; - } - } - } else { - pfc_out->state = cur_pfc_state; - pfc_out->pfc_en_bitpamp = cur_pfc_en_bitmap; - } - - return 0; - -set_err: - spnic_dcbcfg_set_pfc_state(nic_dev, cur_pfc_state); - spnic_dcbcfg_set_pfc_pri_en(nic_dev, cur_pfc_en_bitmap); - if (test_bit(SPNIC_DCB_ENABLE, &nic_dev->flags)) { - err = spnic_configure_dcb(nic_dev->netdev); - if (err) - nicif_warn(nic_dev, drv, nic_dev->netdev, - "Failed to rollback pfc config\n"); - } - return 0; -} - -static int dcb_mt_ets_state(struct spnic_nic_dev *nic_dev, const void *buf_in, - u32 in_size, void *buf_out, u32 *out_size) -{ - const struct spnic_mt_ets_state *ets = buf_in; - struct spnic_mt_ets_state *ets_out = buf_out; - struct spnic_dcb_config dcb_cfg_backup; - int err; - - if (!buf_in || !buf_out || !out_size) - return -EINVAL; - - if (*out_size != sizeof(*ets_out) || in_size != sizeof(*ets)) { - nicif_err(nic_dev, drv, nic_dev->netdev, - "Unexpect buf size from user, in_size: %u, out_size: %u, expect: %lu\n", - in_size, *out_size, sizeof(*ets)); - return -EINVAL; - } - - memcpy(ets_out, ets, sizeof(*ets)); - ets_out->head.status = 0; - if (ets->op_code & MT_DCB_OPCODE_WR) { - if (ets->op_code & (MT_DCB_ETS_UP_BW | MT_DCB_ETS_UP_PRIO)) { - nicif_err(nic_dev, drv, nic_dev->netdev, "Not support to set up bw and up prio\n"); - ets_out->head.status = MT_EOPNOTSUPP; - return 0; - } - - dcb_cfg_backup = nic_dev->wanted_dcb_cfg; - - if (ets->op_code & MT_DCB_ETS_UP_TC) { - err = spnic_dcbcfg_set_ets_up_tc_map(nic_dev, ets->up_tc); - if (err) { - ets_out->head.status = MT_EIO; - return 0; - } - } - if (ets->op_code & MT_DCB_ETS_TC_BW) { - err = spnic_dcbcfg_set_ets_tc_bw(nic_dev, ets->tc_bw); - if (err) { - ets_out->head.status = MT_EIO; - goto set_err; - } - } - if (ets->op_code & MT_DCB_ETS_TC_PRIO) - spnic_dcbcfg_set_ets_tc_prio_type(nic_dev, ets->tc_prio_bitmap); - - if (test_bit(SPNIC_DCB_ENABLE, &nic_dev->flags)) { - err = spnic_configure_dcb(nic_dev->netdev); - if (err) { - ets_out->head.status = MT_EIO; - goto set_err; - } - } - } else { - spnic_dcbcfg_get_ets_up_tc_map(nic_dev, ets_out->up_tc); - spnic_dcbcfg_get_ets_tc_bw(nic_dev, ets_out->tc_bw); - spnic_dcbcfg_get_ets_tc_prio_type(nic_dev, &ets_out->tc_prio_bitmap); - } - - return 0; - -set_err: - nic_dev->wanted_dcb_cfg = dcb_cfg_backup; - if (test_bit(SPNIC_DCB_ENABLE, &nic_dev->flags)) { - err = spnic_configure_dcb(nic_dev->netdev); - if (err) - nicif_warn(nic_dev, drv, nic_dev->netdev, - "Failed to rollback ets config\n"); - } - - return 0; -} - -static int get_inter_num(struct spnic_nic_dev *nic_dev, const void *buf_in, - u32 in_size, void *buf_out, u32 *out_size) -{ - u16 intr_num; - - intr_num = sphw_intr_num(nic_dev->hwdev); - - if (*out_size != sizeof(u16)) { - nicif_err(nic_dev, drv, nic_dev->netdev, - "Unexpect out buf size from user :%u, expect: %lu\n", - *out_size, sizeof(u16)); - return -EFAULT; - } - *(u16 *)buf_out = intr_num; - - *out_size = sizeof(u16); - - return 0; -} - -static int get_netdev_name(struct spnic_nic_dev *nic_dev, const void *buf_in, - u32 in_size, void *buf_out, u32 *out_size) -{ - if (*out_size != IFNAMSIZ) { - nicif_err(nic_dev, drv, nic_dev->netdev, - "Unexpect out buf size from user :%u, expect: %u\n", - *out_size, IFNAMSIZ); - return -EFAULT; - } - - strlcpy(buf_out, nic_dev->netdev->name, IFNAMSIZ); - - return 0; -} - -struct nic_drv_module_handle nic_driv_module_cmd_handle[] = { - {TX_INFO, get_tx_info}, - {Q_NUM, get_q_num}, - {TX_WQE_INFO, get_tx_wqe_info}, - {RX_INFO, get_rx_info}, - {RX_WQE_INFO, get_rx_wqe_info}, - {RX_CQE_INFO, get_rx_cqe_info}, - {GET_INTER_NUM, get_inter_num}, - {CLEAR_FUNC_STASTIC, clear_func_static}, - {GET_LOOPBACK_MODE, get_loopback_mode}, - {SET_LOOPBACK_MODE, set_loopback_mode}, - {SET_LINK_MODE, set_link_mode}, - {GET_SSET_COUNT, get_sset_count}, - {GET_SSET_ITEMS, get_sset_stats}, - {DCB_QOS_INFO, dcb_mt_qos_map}, - {DCB_STATE, dcb_mt_dcb_state}, - {DCB_PFC_STATE, dcb_mt_pfc_state}, - {DCB_ETS_STATE, dcb_mt_ets_state}, - {GET_ULD_DEV_NAME, get_netdev_name}, -}; - -static int send_to_nic_driver(struct spnic_nic_dev *nic_dev, - u32 cmd, const void *buf_in, - u32 in_size, void *buf_out, u32 *out_size) -{ - int index, num_cmds = sizeof(nic_driv_module_cmd_handle) / - sizeof(nic_driv_module_cmd_handle[0]); - enum driver_cmd_type cmd_type = (enum driver_cmd_type)cmd; - int err = 0; - - mutex_lock(&nic_dev->nic_mutex); - for (index = 0; index < num_cmds; index++) { - if (cmd_type == - nic_driv_module_cmd_handle[index].driv_cmd_name) { - err = nic_driv_module_cmd_handle[index].driv_func - (nic_dev, buf_in, - in_size, buf_out, out_size); - break; - } - } - mutex_unlock(&nic_dev->nic_mutex); - - if (index == num_cmds) - pr_err("Can't find callback for %d\n", cmd_type); - - return err; -} - -int nic_ioctl(void *uld_dev, u32 cmd, const void *buf_in, - u32 in_size, void *buf_out, u32 *out_size) -{ - if (cmd == GET_DRV_VERSION) - return get_nic_drv_version(buf_out, out_size); - else if (!uld_dev) - return -EINVAL; - - return send_to_nic_driver(uld_dev, cmd, buf_in, - in_size, buf_out, out_size); -} diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_dcb.c b/drivers/net/ethernet/ramaxel/spnic/spnic_dcb.c deleted file mode 100644 index 7108430e061858d263d97b62f3a965ac522db1f6..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/spnic_dcb.c +++ /dev/null @@ -1,965 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "sphw_crm.h" -#include "spnic_lld.h" -#include "spnic_nic_cfg.h" -#include "spnic_nic_dev.h" -#include "spnic_dcb.h" - -#define DCB_CFG_CHG_ETS BIT(0) -#define DCB_CFG_CHG_PFC BIT(1) -#define DCB_CFG_CHG_UP_COS BIT(2) - -#define MAX_BW_PERCENT 100 - -void spnic_set_prio_tc_map(struct spnic_nic_dev *nic_dev) -{ - struct spnic_dcb_config *dcb_cfg = &nic_dev->hw_dcb_cfg; - u8 valid_up_bitmap = spnic_get_valid_up_bitmap(dcb_cfg); - u8 default_tc = dcb_cfg->max_cos - 1; - u8 i, tc_id; - - /* use 0~max_cos-1 as tc for netdev */ - for (tc_id = 0, i = 0; i < SPNIC_DCB_COS_MAX; i++) { - if (dcb_cfg->valid_cos_bitmap & BIT(i)) { - netdev_set_prio_tc_map(nic_dev->netdev, - dcb_cfg->cos_cfg[i].up, tc_id); - tc_id++; - } - } - - /* set invalid up mapping to the default tc */ - for (i = 0; i < SPNIC_DCB_UP_MAX; i++) { - if (!(valid_up_bitmap & BIT(i))) - netdev_set_prio_tc_map(nic_dev->netdev, i, default_tc); - } -} - -void spnic_update_tx_db_cos(struct spnic_nic_dev *nic_dev) -{ - u8 i, valid_cos_bitmap, cos; - u16 num_rss; - - if (!test_bit(SPNIC_DCB_ENABLE, &nic_dev->flags)) { - spnic_set_txq_cos(nic_dev, 0, nic_dev->q_params.num_qps, - nic_dev->hw_dcb_cfg.default_cos); - return; - } - - num_rss = nic_dev->q_params.num_rss; - valid_cos_bitmap = nic_dev->hw_dcb_cfg.valid_cos_bitmap; - for (i = 0; i < nic_dev->q_params.num_tc; i++) { - cos = (u8)(ffs(valid_cos_bitmap) - 1); - spnic_set_txq_cos(nic_dev, (u16)(i * num_rss), num_rss, cos); - valid_cos_bitmap &= (~BIT(cos)); - } -} - -int spnic_set_tx_cos_state(struct spnic_nic_dev *nic_dev) -{ - struct spnic_dcb_config *dcb_cfg = &nic_dev->hw_dcb_cfg; - struct spnic_dcb_state dcb_state = {0}; - u8 default_cos, i; - int err; - - if (SPNIC_FUNC_IS_VF(nic_dev->hwdev)) { - err = spnic_get_pf_dcb_state(nic_dev->hwdev, &dcb_state); - if (err) { - spnic_err(nic_dev, drv, "Failed to get vf default cos\n"); - return err; - } - /* VF does not support DCB, use the default cos */ - dcb_cfg->default_cos = dcb_state.default_cos; - - return 0; - } - - default_cos = dcb_cfg->default_cos; - dcb_state.dcb_on = !!test_bit(SPNIC_DCB_ENABLE, &nic_dev->flags); - dcb_state.default_cos = default_cos; - memset(dcb_state.up_cos, default_cos, sizeof(dcb_state.up_cos)); - if (test_bit(SPNIC_DCB_ENABLE, &nic_dev->flags)) { - for (i = 0; i < SPNIC_DCB_COS_MAX; i++) { - if (dcb_cfg->valid_cos_bitmap & BIT(i)) - dcb_state.up_cos[dcb_cfg->cos_cfg[i].up] = i; - } - } - - err = spnic_set_dcb_state(nic_dev->hwdev, &dcb_state); - if (err) - spnic_err(nic_dev, drv, "Failed to set dcb state\n"); - - return err; -} - -static void setup_tc_reopen_handler(struct spnic_nic_dev *nic_dev, - const void *priv_data) -{ - u8 tc = *((u8 *)priv_data); - - if (tc) { - netdev_set_num_tc(nic_dev->netdev, tc); - spnic_set_prio_tc_map(nic_dev); - - set_bit(SPNIC_DCB_ENABLE, &nic_dev->flags); - } else { - netdev_reset_tc(nic_dev->netdev); - - clear_bit(SPNIC_DCB_ENABLE, &nic_dev->flags); - } - - spnic_set_tx_cos_state(nic_dev); -} - -int spnic_setup_tc(struct net_device *netdev, u8 tc) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - struct spnic_dyna_txrxq_params q_params = {0}; - u8 cur_tc; - int err; - - if (tc && test_bit(SPNIC_SAME_RXTX, &nic_dev->flags)) { - nicif_err(nic_dev, drv, netdev, "Failed to enable DCB while Symmetric RSS is enabled\n"); - return -EOPNOTSUPP; - } - - if (tc > nic_dev->hw_dcb_cfg.max_cos) { - nicif_err(nic_dev, drv, netdev, "Invalid num_tc: %u, max tc: %u\n", - tc, nic_dev->hw_dcb_cfg.max_cos); - return -EINVAL; - } - - if (tc & (tc - 1)) { - nicif_err(nic_dev, drv, netdev, - "Invalid num_tc: %u, must be power of 2\n", tc); - return -EINVAL; - } - - if (netif_running(netdev)) { - cur_tc = nic_dev->q_params.num_tc; - q_params = nic_dev->q_params; - q_params.num_tc = tc; - q_params.txqs_res = NULL; - q_params.rxqs_res = NULL; - q_params.irq_cfg = NULL; - - nicif_info(nic_dev, drv, netdev, "Change num_tc to %u, restarting channel\n", - tc); - err = spnic_change_channel_settings(nic_dev, &q_params, setup_tc_reopen_handler, - &tc); - if (err) { - if (cur_tc != nic_dev->q_params.num_tc) { - nicif_err(nic_dev, drv, netdev, - "Restore num_tc to %u\n", cur_tc); - /* In this case, the channel resource is - * invalid, so we can safely modify the number - * of tc in netdev. - */ - nic_dev->q_params.num_tc = cur_tc; - setup_tc_reopen_handler(nic_dev, &cur_tc); - } - nicif_err(nic_dev, drv, netdev, "Failed to change channel settings\n"); - return err; - } - } else { - setup_tc_reopen_handler(nic_dev, &tc); - spnic_update_num_qps(netdev); - } - - spnic_configure_dcb(netdev); - - return 0; -} - -/* Ucode thread timeout is 210ms, must be lagger then 210ms */ -#define SPNIC_WAIT_PORT_IO_STOP 250 - -static int spnic_stop_port_traffic_flow(struct spnic_nic_dev *nic_dev, bool wait) -{ - int err = 0; - - down(&nic_dev->dcb_sem); - - if (nic_dev->disable_port_cnt++ != 0) - goto out; - - err = spnic_force_port_disable(nic_dev); - if (err) { - spnic_err(nic_dev, drv, "Failed to disable port\n"); - goto set_port_err; - } - - err = spnic_set_port_funcs_state(nic_dev->hwdev, false); - if (err) { - spnic_err(nic_dev, drv, "Failed to disable all functions in port\n"); - goto set_port_funcs_err; - } - - spnic_info(nic_dev, drv, "Stop port traffic flow\n"); - - goto out; - -set_port_funcs_err: - spnic_force_set_port_state(nic_dev, !!netif_running(nic_dev->netdev)); - -set_port_err: -out: - if (err) - nic_dev->disable_port_cnt--; - - up(&nic_dev->dcb_sem); - if (!err && wait && nic_dev->netdev->reg_state == NETREG_REGISTERED) - msleep(SPNIC_WAIT_PORT_IO_STOP); - - return err; -} - -static int spnic_start_port_traffic_flow(struct spnic_nic_dev *nic_dev) -{ - int err; - - down(&nic_dev->dcb_sem); - - nic_dev->disable_port_cnt--; - if (nic_dev->disable_port_cnt > 0) { - up(&nic_dev->dcb_sem); - return 0; - } - - nic_dev->disable_port_cnt = 0; - up(&nic_dev->dcb_sem); - - err = spnic_force_set_port_state(nic_dev, !!netif_running(nic_dev->netdev)); - if (err) - spnic_err(nic_dev, drv, "Failed to disable port\n"); - - err = spnic_set_port_funcs_state(nic_dev->hwdev, true); - if (err) - spnic_err(nic_dev, drv, "Failed to disable all functions in port\n"); - - spnic_info(nic_dev, drv, "Start port traffic flow\n"); - - return err; -} - -static u8 get_cos_settings(u8 hw_valid_cos_bitmap, u8 *dst_valid_cos_bitmap) -{ - u8 support_cos = 0; - u8 num_cos, overflow; - u8 i; - - for (i = 0; i < SPNIC_DCB_COS_MAX; i++) { - if (hw_valid_cos_bitmap & BIT(i)) - support_cos++; - } - - num_cos = (u8)(1U << (u8)ilog2(support_cos)); - if (num_cos != support_cos) { - /* Remove unused cos id */ - overflow = support_cos - num_cos; - i = SPNIC_DCB_COS_MAX - 1; - while (overflow) { - if (hw_valid_cos_bitmap & BIT(i)) { - hw_valid_cos_bitmap &= (~BIT(i)); - overflow--; - } - - i--; - } - } - - *dst_valid_cos_bitmap = hw_valid_cos_bitmap; - - return num_cos; -} - -static int get_dft_valid_up_bitmap(struct spnic_nic_dev *nic_dev, u8 num_pri, - u8 *valid_up_bitmap) -{ - bool setted = false; - u8 up_bitmap = 0; - u8 up; - int err; - - err = spnic_get_chip_up_bitmap(nic_dev->pdev, &setted, &up_bitmap); - if (err) { - spnic_err(nic_dev, drv, "Get chip cos_up map failed\n"); - return -EFAULT; - } - - if (!setted) { - /* Use (num_cos-1)~0 as default user priority */ - for (up = 0; up < num_pri; up++) - up_bitmap |= (u8)BIT(up); - } - - err = spnic_set_chip_up_bitmap(nic_dev->pdev, up_bitmap); - if (err) { - spnic_err(nic_dev, drv, "Set chip cos_up map failed\n"); - return -EFAULT; - } - - *valid_up_bitmap = up_bitmap; - - return 0; -} - -u8 spnic_get_valid_up_bitmap(struct spnic_dcb_config *dcb_cfg) -{ - u8 valid_up_bitmap = 0; - u8 i; - - for (i = 0; i < SPNIC_DCB_COS_MAX; i++) { - if (dcb_cfg->valid_cos_bitmap & BIT(i)) - valid_up_bitmap |= (u8)BIT(dcb_cfg->cos_cfg[i].up); - } - - return valid_up_bitmap; -} - -static void update_valid_up_bitmap(struct spnic_dcb_config *dcb_cfg, - u8 valid_up_bitmap) -{ - u8 i, up; - - for (i = 0; i < SPNIC_DCB_COS_MAX; i++) { - if (!(dcb_cfg->valid_cos_bitmap & BIT(i))) { - dcb_cfg->cos_cfg[i].up = 0; - continue; - } - - /* get the highest priority */ - up = (u8)fls(valid_up_bitmap) - 1; - valid_up_bitmap &= (~BIT(up)); - - dcb_cfg->cos_cfg[i].up = up; - } -} - -static int init_default_dcb_cfg(struct spnic_nic_dev *nic_dev, - struct spnic_dcb_config *dcb_cfg) -{ - struct spnic_cos_cfg *cos_cfg = dcb_cfg->cos_cfg; - struct spnic_tc_cfg *tc_cfg = dcb_cfg->tc_cfg; - u8 valid_cos_bitmap, i; - u8 valid_up_bitmap = 0; - int err; - - valid_cos_bitmap = sphw_cos_valid_bitmap(nic_dev->hwdev); - if (!valid_cos_bitmap) { - spnic_err(nic_dev, drv, "None cos supported\n"); - return -EFAULT; - } - - dcb_cfg->max_cos = get_cos_settings(valid_cos_bitmap, - &dcb_cfg->valid_cos_bitmap); - dcb_cfg->default_cos = (u8)fls(dcb_cfg->valid_cos_bitmap) - 1; - - err = get_dft_valid_up_bitmap(nic_dev, dcb_cfg->max_cos, - &valid_up_bitmap); - if (err) - return err; - - for (i = 0; i < SPNIC_DCB_COS_MAX; i++) { - /* set all cos with 100 percent bw in default */ - cos_cfg[i].bw_pct = MAX_BW_PERCENT; - cos_cfg[i].prio_sp = 0; /* DWRR */ - cos_cfg[i].tc_id = 0; /* all cos mapping to tc0 */ - cos_cfg[i].up = 0; - } - - update_valid_up_bitmap(dcb_cfg, valid_up_bitmap); - - for (i = 0; i < SPNIC_DCB_TC_MAX; i++) { - /* tc0 with 100 percent bw in default */ - tc_cfg[i].bw_pct = (i == 0) ? MAX_BW_PERCENT : 0; - tc_cfg[i].prio_sp = 0; /* DWRR */ - } - - /* disable pfc */ - dcb_cfg->pfc_state = 0; - dcb_cfg->pfc_en_bitmap = 0; - - return 0; -} - -int spnic_dcb_init(struct spnic_nic_dev *nic_dev) -{ - struct spnic_dcb_config *dcb_cfg = &nic_dev->wanted_dcb_cfg; - int err; - - if (SPNIC_FUNC_IS_VF(nic_dev->hwdev)) - return spnic_set_tx_cos_state(nic_dev); - - err = init_default_dcb_cfg(nic_dev, dcb_cfg); - if (err) { - spnic_err(nic_dev, drv, "Initialize dcb configuration failed\n"); - return err; - } - - spnic_info(nic_dev, drv, "Support num cos %u, default cos %u\n", - dcb_cfg->max_cos, dcb_cfg->default_cos); - - nic_dev->dcb_changes = DCB_CFG_CHG_ETS | DCB_CFG_CHG_PFC | - DCB_CFG_CHG_UP_COS; - - memcpy(&nic_dev->hw_dcb_cfg, &nic_dev->wanted_dcb_cfg, - sizeof(nic_dev->hw_dcb_cfg)); - - err = spnic_set_tx_cos_state(nic_dev); - if (err) { - spnic_err(nic_dev, drv, "Set tx cos state failed\n"); - return err; - } - - sema_init(&nic_dev->dcb_sem, 1); - - return 0; -} - -u32 spnic_sync_dcb_cfg(struct spnic_nic_dev *nic_dev, struct spnic_dcb_config *src_dcb_cfg) -{ - struct spnic_dcb_config *wanted_cfg = src_dcb_cfg; - struct spnic_dcb_config *hw_cfg = &nic_dev->hw_dcb_cfg; - u32 changes = 0; - - if (memcmp(hw_cfg->cos_cfg, wanted_cfg->cos_cfg, - sizeof(hw_cfg->cos_cfg))) { - memcpy(hw_cfg->cos_cfg, wanted_cfg->cos_cfg, - sizeof(hw_cfg->cos_cfg)); - changes |= DCB_CFG_CHG_ETS; - } - - if (memcmp(hw_cfg->tc_cfg, wanted_cfg->tc_cfg, - sizeof(hw_cfg->tc_cfg))) { - memcpy(hw_cfg->tc_cfg, wanted_cfg->tc_cfg, - sizeof(hw_cfg->tc_cfg)); - changes |= DCB_CFG_CHG_ETS; - } - - if (hw_cfg->pfc_state != wanted_cfg->pfc_state || - (wanted_cfg->pfc_state && - hw_cfg->pfc_en_bitmap != wanted_cfg->pfc_en_bitmap)) { - hw_cfg->pfc_state = wanted_cfg->pfc_state; - hw_cfg->pfc_en_bitmap = wanted_cfg->pfc_en_bitmap; - changes |= DCB_CFG_CHG_PFC; - } - - return changes; -} - -static int dcbcfg_set_hw_cos_up_map(struct spnic_nic_dev *nic_dev, - struct spnic_dcb_config *dcb_cfg) -{ - u8 cos_up_map[SPNIC_DCB_COS_MAX] = {0}; - int err; - u8 i; - - for (i = 0; i < SPNIC_DCB_COS_MAX; i++) { - if (!(dcb_cfg->valid_cos_bitmap & BIT(i))) - continue; - - cos_up_map[i] = dcb_cfg->cos_cfg[i].up; - } - - err = spnic_dcb_set_cos_up_map(nic_dev->hwdev, dcb_cfg->valid_cos_bitmap, - cos_up_map, SPNIC_DCB_COS_MAX); - if (err) - spnic_err(nic_dev, drv, "Set cos_up map failed\n"); - - return err; -} - -/* The sum of the cos bandwidth mapped to the same TC is 100 */ -static void adjust_cos_bw(u8 valid_cos_bitmap, u8 *cos_tc, u8 *cos_bw) -{ - u8 tc, cos, cos_num; - u16 bw_all, bw_remain; - - for (tc = 0; tc < SPNIC_DCB_TC_MAX; tc++) { - bw_all = 0; - cos_num = 0; - for (cos = 0; cos < SPNIC_DCB_COS_MAX; cos++) { - if (!(valid_cos_bitmap & BIT(cos)) || cos_tc[cos] != tc) - continue; - bw_all += cos_bw[cos]; - cos_num++; - } - - if (!bw_all || !cos_num) - continue; - - bw_remain = MAX_BW_PERCENT % cos_num; - for (cos = 0; cos < SPNIC_DCB_COS_MAX; cos++) { - if (!(valid_cos_bitmap & BIT(cos)) || cos_tc[cos] != tc) - continue; - - cos_bw[cos] = - (u8)(MAX_BW_PERCENT * cos_bw[cos] / bw_all); - - if (bw_remain) { - cos_bw[cos]++; - bw_remain--; - } - } - } -} - -static void dcbcfg_dump_configuration(struct spnic_nic_dev *nic_dev, - u8 *cos_tc, u8 *cos_bw, u8 *cos_prio, - u8 *tc_bw, u8 *tc_prio) -{ - u8 i; - - for (i = 0; i < SPNIC_DCB_COS_MAX; i++) { - if (!(nic_dev->hw_dcb_cfg.valid_cos_bitmap & BIT(i))) - continue; - - spnic_info(nic_dev, drv, "cos: %u, up: %u, tc: %u, bw: %u, prio: %u\n", - i, nic_dev->hw_dcb_cfg.cos_cfg[i].up, cos_tc[i], - cos_bw[i], cos_prio[i]); - } - - for (i = 0; i < nic_dev->hw_dcb_cfg.max_cos; i++) - spnic_info(nic_dev, drv, "tc: %u, bw: %u, prio: %u\n", - i, tc_bw[i], tc_prio[i]); -} - -static int dcbcfg_set_hw_ets(struct spnic_nic_dev *nic_dev, - struct spnic_dcb_config *dcb_cfg) -{ - u8 cos_tc[SPNIC_DCB_COS_MAX] = {0}; - u8 cos_bw[SPNIC_DCB_COS_MAX] = {0}; - u8 cos_prio[SPNIC_DCB_COS_MAX] = {0}; - u8 tc_bw[SPNIC_DCB_TC_MAX] = {0}; - u8 tc_prio[SPNIC_DCB_TC_MAX] = {0}; - int err; - u8 i; - - for (i = 0; i < SPNIC_DCB_COS_MAX; i++) { - if (!(dcb_cfg->valid_cos_bitmap & BIT(i))) - continue; - - cos_tc[i] = dcb_cfg->cos_cfg[i].tc_id; - cos_bw[i] = dcb_cfg->cos_cfg[i].bw_pct; - cos_prio[i] = dcb_cfg->cos_cfg[i].prio_sp; - } - - for (i = 0; i < SPNIC_DCB_TC_MAX; i++) { - tc_bw[i] = dcb_cfg->tc_cfg[i].bw_pct; - tc_prio[i] = dcb_cfg->tc_cfg[i].prio_sp; - } - - adjust_cos_bw(dcb_cfg->valid_cos_bitmap, cos_tc, cos_bw); - - if (test_bit(SPNIC_DCB_ENABLE, &nic_dev->flags)) - dcbcfg_dump_configuration(nic_dev, cos_tc, cos_bw, cos_prio, tc_bw, tc_prio); - - err = spnic_dcb_set_ets(nic_dev->hwdev, cos_tc, cos_bw, cos_prio, tc_bw, tc_prio); - if (err) { - spnic_err(nic_dev, drv, "Failed to set ets\n"); - return err; - } - - return 0; -} - -static int dcbcfg_set_hw_pfc(struct spnic_nic_dev *nic_dev, - struct spnic_dcb_config *dcb_cfg) -{ - u8 valid_up_bitmap = spnic_get_valid_up_bitmap(dcb_cfg); - u8 outof_range_pfc = (~valid_up_bitmap) & dcb_cfg->pfc_en_bitmap; - int err; - - if (dcb_cfg->pfc_state && outof_range_pfc) - spnic_info(nic_dev, drv, "PFC setting out of range, 0x%x will be ignored\n", - outof_range_pfc); - - err = spnic_dcb_set_pfc(nic_dev->hwdev, dcb_cfg->pfc_state, dcb_cfg->pfc_en_bitmap); - if (err) { - spnic_err(nic_dev, drv, "Failed to %s PFC\n", - dcb_cfg->pfc_state ? "enable" : "disable"); - return err; - } - - if (dcb_cfg->pfc_state) - spnic_info(nic_dev, drv, "Set PFC: 0x%x to hw done\n", - dcb_cfg->pfc_en_bitmap & valid_up_bitmap); - else - spnic_info(nic_dev, drv, "Disable PFC, enable tx/rx pause\n"); - - return 0; -} - -int spnic_dcbcfg_setall_to_hw(struct spnic_nic_dev *nic_dev, struct spnic_dcb_config *src_dcb_cfg) -{ - bool stop_traffic = false; - int err = 0; - - nic_dev->dcb_changes |= spnic_sync_dcb_cfg(nic_dev, src_dcb_cfg); - if (!nic_dev->dcb_changes) - return 0; - - /* hw does not support to change up cos mapping and cos tc mapping with - * traffic flow - */ - stop_traffic = !!(nic_dev->dcb_changes & - (DCB_CFG_CHG_ETS | DCB_CFG_CHG_UP_COS)); - if (stop_traffic) { - err = spnic_stop_port_traffic_flow(nic_dev, true); - if (err) - return err; - } - - if (nic_dev->dcb_changes & DCB_CFG_CHG_UP_COS) { - err = dcbcfg_set_hw_cos_up_map(nic_dev, &nic_dev->hw_dcb_cfg); - if (err) - goto out; - - nic_dev->dcb_changes &= (~DCB_CFG_CHG_UP_COS); - } - - if (nic_dev->dcb_changes & DCB_CFG_CHG_ETS) { - err = dcbcfg_set_hw_ets(nic_dev, &nic_dev->hw_dcb_cfg); - if (err) - goto out; - - nic_dev->dcb_changes &= (~DCB_CFG_CHG_ETS); - } - - if (nic_dev->dcb_changes & DCB_CFG_CHG_PFC) { - err = dcbcfg_set_hw_pfc(nic_dev, &nic_dev->hw_dcb_cfg); - if (err) - goto out; - - nic_dev->dcb_changes &= (~DCB_CFG_CHG_PFC); - } - -out: - if (stop_traffic) - spnic_start_port_traffic_flow(nic_dev); - - return err; -} - -int spnic_dcb_reset_hw_config(struct spnic_nic_dev *nic_dev) -{ - struct spnic_dcb_config dft_cfg = {0}; - int err; - - init_default_dcb_cfg(nic_dev, &dft_cfg); - err = spnic_dcbcfg_setall_to_hw(nic_dev, &dft_cfg); - if (err) { - spnic_err(nic_dev, drv, "Failed to reset hw dcb configuration\n"); - return err; - } - - spnic_info(nic_dev, drv, "Reset hardware DCB configuration done\n"); - - return 0; -} - -int spnic_configure_dcb(struct net_device *netdev) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - - if (test_bit(SPNIC_DCB_ENABLE, &nic_dev->flags)) - return spnic_dcbcfg_setall_to_hw(nic_dev, &nic_dev->wanted_dcb_cfg); - else - return spnic_dcb_reset_hw_config(nic_dev); -} - -void spnic_dcbcfg_set_pfc_state(struct spnic_nic_dev *nic_dev, u8 pfc_state) -{ - nic_dev->wanted_dcb_cfg.pfc_state = pfc_state; -} - -u8 spnic_dcbcfg_get_pfc_state(struct spnic_nic_dev *nic_dev) -{ - return nic_dev->wanted_dcb_cfg.pfc_state; -} - -void spnic_dcbcfg_set_pfc_pri_en(struct spnic_nic_dev *nic_dev, u8 pfc_en_bitmap) -{ - nic_dev->wanted_dcb_cfg.pfc_en_bitmap = pfc_en_bitmap; -} - -u8 spnic_dcbcfg_get_pfc_pri_en(struct spnic_nic_dev *nic_dev) -{ - return nic_dev->wanted_dcb_cfg.pfc_en_bitmap; -} - -int spnic_dcbcfg_set_ets_up_tc_map(struct spnic_nic_dev *nic_dev, const u8 *up_tc_map) -{ - struct spnic_dcb_config *dcb_cfg = &nic_dev->wanted_dcb_cfg; - u8 i; - - for (i = 0; i < SPNIC_DCB_UP_MAX; i++) { - if (!(dcb_cfg->valid_cos_bitmap & BIT(i))) - continue; - - /* TC id can't exceed max cos */ - if (up_tc_map[dcb_cfg->cos_cfg[i].up] >= dcb_cfg->max_cos) - return -EINVAL; - } - - for (i = 0; i < SPNIC_DCB_UP_MAX; i++) { - if (!(dcb_cfg->valid_cos_bitmap & BIT(i))) - continue; - - dcb_cfg->cos_cfg[i].tc_id = up_tc_map[dcb_cfg->cos_cfg[i].up]; - } - - return 0; -} - -void spnic_dcbcfg_get_ets_up_tc_map(struct spnic_nic_dev *nic_dev, u8 *up_tc_map) -{ - struct spnic_dcb_config *dcb_cfg = &nic_dev->wanted_dcb_cfg; - struct spnic_cos_cfg *cos_cfg = dcb_cfg->cos_cfg; - u8 i; - - /* set unused up mapping to default tc */ - memset(up_tc_map, cos_cfg[dcb_cfg->default_cos].tc_id, - SPNIC_DCB_UP_MAX); - - for (i = 0; i < SPNIC_DCB_COS_MAX; i++) { - if (!(dcb_cfg->valid_cos_bitmap & BIT(i))) - continue; - - up_tc_map[cos_cfg[i].up] = cos_cfg[i].tc_id; - } -} - -int spnic_dcbcfg_set_ets_tc_bw(struct spnic_nic_dev *nic_dev, const u8 *tc_bw) -{ - struct spnic_dcb_config *dcb_cfg = &nic_dev->wanted_dcb_cfg; - u8 bw_sum = 0; - u8 i; - - for (i = 0; i < SPNIC_DCB_TC_MAX; i++) { - /* cannot set bandwidth for unused tc */ - if (i >= dcb_cfg->max_cos && tc_bw[i] > 0) - return -EINVAL; - - bw_sum += tc_bw[i]; - } - - if (bw_sum != MAX_BW_PERCENT && bw_sum != 0) { - spnic_err(nic_dev, drv, "Invalid total bw %u\n", bw_sum); - return -EINVAL; - } - - for (i = 0; i < dcb_cfg->max_cos; i++) - dcb_cfg->tc_cfg[i].bw_pct = tc_bw[i]; - - return 0; -} - -void spnic_dcbcfg_get_ets_tc_bw(struct spnic_nic_dev *nic_dev, u8 *tc_bw) -{ - struct spnic_dcb_config *dcb_cfg = &nic_dev->wanted_dcb_cfg; - u8 i; - - for (i = 0; i < dcb_cfg->max_cos; i++) - tc_bw[i] = dcb_cfg->tc_cfg[i].bw_pct; -} - -void spnic_dcbcfg_set_ets_tc_prio_type(struct spnic_nic_dev *nic_dev, u8 tc_prio_bitmap) -{ - struct spnic_dcb_config *dcb_cfg = &nic_dev->wanted_dcb_cfg; - u8 i; - - for (i = 0; i < dcb_cfg->max_cos; i++) - dcb_cfg->tc_cfg[i].prio_sp = !!(tc_prio_bitmap & BIT(i)); -} - -void spnic_dcbcfg_get_ets_tc_prio_type(struct spnic_nic_dev *nic_dev, u8 *tc_prio_bitmap) -{ - struct spnic_dcb_config *dcb_cfg = &nic_dev->wanted_dcb_cfg; - u8 i; - - *tc_prio_bitmap = 0; - for (i = 0; i < dcb_cfg->max_cos; i++) { - if (dcb_cfg->tc_cfg[i].prio_sp) - *tc_prio_bitmap |= (u8)BIT(i); - } -} - -/* TODO: send a command to MPU, and MPU close all port traffic */ -static int stop_all_ports_flow(void *uld_array[], u32 num_dev) -{ - struct spnic_nic_dev *tmp_dev = NULL; - u32 i, idx; - int err; - - for (idx = 0; idx < num_dev; idx++) { - tmp_dev = (struct spnic_nic_dev *)uld_array[idx]; - err = spnic_stop_port_traffic_flow(tmp_dev, false); - if (err) { - nicif_err(tmp_dev, drv, tmp_dev->netdev, "Stop port traffic flow failed\n"); - goto stop_port_err; - } - } - - /* wait all traffic flow stopped */ - msleep(SPNIC_WAIT_PORT_IO_STOP); - - return 0; - -stop_port_err: - for (i = 0; i < idx; i++) { - tmp_dev = (struct spnic_nic_dev *)uld_array[i]; - spnic_start_port_traffic_flow(tmp_dev); - } - - return err; -} - -static void start_all_ports_flow(void *uld_array[], u32 num_dev) -{ - struct spnic_nic_dev *tmp_dev = NULL; - u32 idx; - - for (idx = 0; idx < num_dev; idx++) { - tmp_dev = (struct spnic_nic_dev *)uld_array[idx]; - spnic_start_port_traffic_flow(tmp_dev); - } -} - -int change_dev_cos_up_map(struct spnic_nic_dev *nic_dev, u8 valid_up_bitmap) -{ - struct net_device *netdev = nic_dev->netdev; - int err = 0; - - if (test_and_set_bit(SPNIC_DCB_UP_COS_SETTING, &nic_dev->dcb_flags)) { - nicif_warn(nic_dev, drv, netdev, - "Cos_up map setting in inprocess, please try again later\n"); - return -EFAULT; - } - - if (spnic_get_valid_up_bitmap(&nic_dev->wanted_dcb_cfg) == - valid_up_bitmap) { - nicif_err(nic_dev, drv, netdev, "Same up bitmap, don't need to change anything\n"); - err = 0; - goto out; - } - - nicif_info(nic_dev, drv, netdev, "Set valid_up_bitmap: 0x%x\n", - valid_up_bitmap); - - update_valid_up_bitmap(&nic_dev->wanted_dcb_cfg, valid_up_bitmap); - - nic_dev->dcb_changes = DCB_CFG_CHG_ETS | DCB_CFG_CHG_PFC | DCB_CFG_CHG_UP_COS; - - if (test_bit(SPNIC_DCB_ENABLE, &nic_dev->flags)) { - err = spnic_dcbcfg_setall_to_hw(nic_dev, &nic_dev->wanted_dcb_cfg); - if (err) { - nicif_err(nic_dev, drv, netdev, "Reconfig dcb to hw failed\n"); - goto out; - } - - /* Change up/tc map for netdev */ - spnic_set_prio_tc_map(nic_dev); - spnic_update_tx_db_cos(nic_dev); - } - - err = spnic_set_tx_cos_state(nic_dev); - -out: - clear_bit(SPNIC_DCB_UP_COS_SETTING, &nic_dev->dcb_flags); - - return err; -} - -int spnic_dcbcfg_set_up_bitmap(struct spnic_nic_dev *nic_dev, u8 valid_up_bitmap) -{ - struct spnic_nic_dev *tmp_dev = NULL; - void **uld_array = NULL; - u32 i, idx, num_dev = 0; - int err, rollback_err; - bool up_setted = false; - u8 old_valid_up_bitmap = 0; - u8 max_pf; - - /* Save old valid up bitmap, in case of set failed */ - err = spnic_get_chip_up_bitmap(nic_dev->pdev, &up_setted, &old_valid_up_bitmap); - if (err) { - nicif_err(nic_dev, drv, nic_dev->netdev, "Get old chip valid up bitmap failed\n"); - return -EFAULT; - } - - if (valid_up_bitmap == old_valid_up_bitmap) { - nicif_info(nic_dev, drv, nic_dev->netdev, "Same valid up bitmap, don't need to change anything\n"); - return 0; - } - - max_pf = sphw_max_pf_num(nic_dev->hwdev); - if (!max_pf) { - nicif_err(nic_dev, drv, nic_dev->netdev, "Invalid max pf number\n"); - return -EFAULT; - } - - uld_array = kcalloc(max_pf, sizeof(void *), GFP_KERNEL); - if (!uld_array) { - nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to alloc uld_array\n"); - return -ENOMEM; - } - - /* Get all pf of this chip */ - err = spnic_get_pf_nic_uld_array(nic_dev->pdev, &num_dev, uld_array); - if (err) { - nicif_err(nic_dev, drv, nic_dev->netdev, "Get all pf private handle failed\n"); - err = -EFAULT; - goto out; - } - - err = stop_all_ports_flow(uld_array, num_dev); - if (err) - goto out; - - for (idx = 0; idx < num_dev; idx++) { - tmp_dev = (struct spnic_nic_dev *)uld_array[idx]; - err = change_dev_cos_up_map(tmp_dev, valid_up_bitmap); - if (err) { - nicif_err(tmp_dev, drv, tmp_dev->netdev, "Set cos_up map to hw failed\n"); - goto set_err; - } - } - - start_all_ports_flow(uld_array, num_dev); - - spnic_set_chip_up_bitmap(nic_dev->pdev, valid_up_bitmap); - kfree(uld_array); - - return 0; - -set_err: - /* undo all settings */ - for (i = 0; i <= idx; i++) { - tmp_dev = (struct spnic_nic_dev *)uld_array[i]; - rollback_err = change_dev_cos_up_map(tmp_dev, old_valid_up_bitmap); - if (rollback_err) - nicif_err(tmp_dev, drv, tmp_dev->netdev, "Failed to rollback cos_up map to hw\n"); - } - - start_all_ports_flow(uld_array, num_dev); - -out: - kfree(uld_array); - - return err; -} diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_dcb.h b/drivers/net/ethernet/ramaxel/spnic/spnic_dcb.h deleted file mode 100644 index 48ef471237e2795a02e1188356eaf4f8367210aa..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/spnic_dcb.h +++ /dev/null @@ -1,56 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPNIC_DCB_H -#define SPNIC_DCB_H - -enum SPNIC_DCB_FLAGS { - SPNIC_DCB_UP_COS_SETTING, - SPNIC_DCB_TRAFFIC_STOPPED, -}; - -struct spnic_cos_cfg { - u8 up; - u8 bw_pct; - u8 tc_id; - u8 prio_sp; /* 0 - DWRR, 1 - SP */ -}; - -struct spnic_tc_cfg { - u8 bw_pct; - u8 prio_sp; /* 0 - DWRR, 1 - SP */ - u16 rsvd; -}; - -struct spnic_dcb_config { - /* The num_tc of the protocol stack is also the same */ - u8 max_cos; - u8 default_cos; - u8 valid_cos_bitmap; - u8 rsvd1; - struct spnic_cos_cfg cos_cfg[SPNIC_DCB_COS_MAX]; - struct spnic_tc_cfg tc_cfg[SPNIC_DCB_TC_MAX]; - - u8 pfc_state; - u8 pfc_en_bitmap; - u16 rsvd2; -}; - -int spnic_dcb_init(struct spnic_nic_dev *nic_dev); -int spnic_dcb_reset_hw_config(struct spnic_nic_dev *nic_dev); -int spnic_configure_dcb(struct net_device *netdev); -int spnic_setup_tc(struct net_device *netdev, u8 tc); -u8 spnic_get_valid_up_bitmap(struct spnic_dcb_config *dcb_cfg); -void spnic_dcbcfg_set_pfc_state(struct spnic_nic_dev *nic_dev, u8 pfc_state); -u8 spnic_dcbcfg_get_pfc_state(struct spnic_nic_dev *nic_dev); -void spnic_dcbcfg_set_pfc_pri_en(struct spnic_nic_dev *nic_dev, u8 pfc_en_bitmap); -u8 spnic_dcbcfg_get_pfc_pri_en(struct spnic_nic_dev *nic_dev); -int spnic_dcbcfg_set_ets_up_tc_map(struct spnic_nic_dev *nic_dev, const u8 *up_tc_map); -void spnic_dcbcfg_get_ets_up_tc_map(struct spnic_nic_dev *nic_dev, u8 *up_tc_map); -int spnic_dcbcfg_set_ets_tc_bw(struct spnic_nic_dev *nic_dev, const u8 *tc_bw); -void spnic_dcbcfg_get_ets_tc_bw(struct spnic_nic_dev *nic_dev, u8 *tc_bw); -void spnic_dcbcfg_set_ets_tc_prio_type(struct spnic_nic_dev *nic_dev, u8 tc_prio_bitmap); -void spnic_dcbcfg_get_ets_tc_prio_type(struct spnic_nic_dev *nic_dev, u8 *tc_prio_bitmap); -int spnic_dcbcfg_set_up_bitmap(struct spnic_nic_dev *nic_dev, u8 valid_up_bitmap); -void spnic_update_tx_db_cos(struct spnic_nic_dev *nic_dev); -#endif diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_dev_mgmt.c b/drivers/net/ethernet/ramaxel/spnic/spnic_dev_mgmt.c deleted file mode 100644 index 6037645c0e8b6c15407fcbed2e0abc19f3edda11..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/spnic_dev_mgmt.c +++ /dev/null @@ -1,811 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "sphw_common.h" -#include "sphw_mt.h" -#include "sphw_crm.h" -#include "spnic_lld.h" -#include "spnic_sriov.h" -#include "spnic_pci_id_tbl.h" -#include "spnic_dev_mgmt.h" - -#define SPNIC_WAIT_TOOL_CNT_TIMEOUT 10000 -#define SPNIC_WAIT_TOOL_MIN_USLEEP_TIME 9900 -#define SPNIC_WAIT_TOOL_MAX_USLEEP_TIME 10000 - -#define MAX_CARD_ID 64 -static unsigned long card_bit_map; - -LIST_HEAD(g_spnic_chip_list); - -void lld_dev_cnt_init(struct spnic_pcidev *pci_adapter) -{ - atomic_set(&pci_adapter->ref_cnt, 0); -} - -void lld_dev_hold(struct spnic_lld_dev *dev) -{ - struct spnic_pcidev *pci_adapter = pci_get_drvdata(dev->pdev); - - atomic_inc(&pci_adapter->ref_cnt); -} - -void lld_dev_put(struct spnic_lld_dev *dev) -{ - struct spnic_pcidev *pci_adapter = pci_get_drvdata(dev->pdev); - - atomic_dec(&pci_adapter->ref_cnt); -} - -void wait_lld_dev_unused(struct spnic_pcidev *pci_adapter) -{ - unsigned long end; - - end = jiffies + msecs_to_jiffies(SPNIC_WAIT_TOOL_CNT_TIMEOUT); - do { - if (!atomic_read(&pci_adapter->ref_cnt)) - return; - - /* if sleep 10ms, use usleep_range to be more precise */ - usleep_range(SPNIC_WAIT_TOOL_MIN_USLEEP_TIME, - SPNIC_WAIT_TOOL_MAX_USLEEP_TIME); - } while (time_before(jiffies, end)); -} - -enum spnic_lld_status { - SPNIC_NODE_CHANGE = BIT(0), -}; - -struct spnic_lld_lock { - /* lock for chip list */ - struct mutex lld_mutex; - unsigned long status; - atomic_t dev_ref_cnt; -}; - -struct spnic_lld_lock g_lld_lock; - -#define WAIT_LLD_DEV_HOLD_TIMEOUT (10 * 60 * 1000) /* 10minutes */ -#define WAIT_LLD_DEV_NODE_CHANGED (10 * 60 * 1000) /* 10minutes */ -#define WAIT_LLD_DEV_REF_CNT_EMPTY (2 * 60 * 1000) /* 2minutes */ -#define PRINT_TIMEOUT_INTERVAL 10000 -#define MS_PER_SEC 1000 -#define LLD_LOCK_MIN_USLEEP_TIME 900 -#define LLD_LOCK_MAX_USLEEP_TIME 1000 - -/* node in chip_node will changed, tools or driver can't get node - * during this situation - */ -void lld_lock_chip_node(void) -{ - unsigned long end; - bool timeout = true; - u32 loop_cnt; - - mutex_lock(&g_lld_lock.lld_mutex); - - loop_cnt = 0; - end = jiffies + msecs_to_jiffies(WAIT_LLD_DEV_NODE_CHANGED); - do { - if (!test_and_set_bit(SPNIC_NODE_CHANGE, &g_lld_lock.status)) { - timeout = false; - break; - } - - loop_cnt++; - if (loop_cnt % PRINT_TIMEOUT_INTERVAL == 0) - pr_warn("Wait for lld node change complete for %us\n", - loop_cnt / MS_PER_SEC); - - /* if sleep 1ms, use usleep_range to be more precise */ - usleep_range(LLD_LOCK_MIN_USLEEP_TIME, - LLD_LOCK_MAX_USLEEP_TIME); - } while (time_before(jiffies, end)); - - if (timeout && test_and_set_bit(SPNIC_NODE_CHANGE, &g_lld_lock.status)) - pr_warn("Wait for lld node change complete timeout when trying to get lld lock\n"); - - loop_cnt = 0; - timeout = true; - end = jiffies + msecs_to_jiffies(WAIT_LLD_DEV_NODE_CHANGED); - do { - if (!atomic_read(&g_lld_lock.dev_ref_cnt)) { - timeout = false; - break; - } - - loop_cnt++; - if (loop_cnt % PRINT_TIMEOUT_INTERVAL == 0) - pr_warn("Wait for lld dev unused for %us, reference count: %d\n", - loop_cnt / MS_PER_SEC, - atomic_read(&g_lld_lock.dev_ref_cnt)); - - /* if sleep 1ms, use usleep_range to be more precise */ - usleep_range(LLD_LOCK_MIN_USLEEP_TIME, - LLD_LOCK_MAX_USLEEP_TIME); - } while (time_before(jiffies, end)); - - if (timeout && atomic_read(&g_lld_lock.dev_ref_cnt)) - pr_warn("Wait for lld dev unused timeout\n"); - - mutex_unlock(&g_lld_lock.lld_mutex); -} - -void lld_unlock_chip_node(void) -{ - clear_bit(SPNIC_NODE_CHANGE, &g_lld_lock.status); -} - -/* When tools or other drivers want to get node of chip_node, use this function - * to prevent node be freed - */ -void lld_hold(void) -{ - unsigned long end; - u32 loop_cnt = 0; - - /* ensure there have not any chip node in changing */ - mutex_lock(&g_lld_lock.lld_mutex); - - end = jiffies + msecs_to_jiffies(WAIT_LLD_DEV_HOLD_TIMEOUT); - do { - if (!test_bit(SPNIC_NODE_CHANGE, &g_lld_lock.status)) - break; - - loop_cnt++; - - if (loop_cnt % PRINT_TIMEOUT_INTERVAL == 0) - pr_warn("Wait lld node change complete for %us\n", - loop_cnt / MS_PER_SEC); - /* if sleep 1ms, use usleep_range to be more precise */ - usleep_range(LLD_LOCK_MIN_USLEEP_TIME, - LLD_LOCK_MAX_USLEEP_TIME); - } while (time_before(jiffies, end)); - - if (test_bit(SPNIC_NODE_CHANGE, &g_lld_lock.status)) - pr_warn("Wait lld node change complete timeout when trying to hode lld dev\n"); - - atomic_inc(&g_lld_lock.dev_ref_cnt); - mutex_unlock(&g_lld_lock.lld_mutex); -} - -void lld_put(void) -{ - atomic_dec(&g_lld_lock.dev_ref_cnt); -} - -void spnic_lld_lock_init(void) -{ - mutex_init(&g_lld_lock.lld_mutex); - atomic_set(&g_lld_lock.dev_ref_cnt, 0); -} - -void spnic_get_all_chip_id(void *id_info) -{ - struct nic_card_id *card_id = (struct nic_card_id *)id_info; - struct card_node *chip_node = NULL; - int i = 0; - int id, err; - - lld_hold(); - list_for_each_entry(chip_node, &g_spnic_chip_list, node) { - err = sscanf(chip_node->chip_name, SPHW_CHIP_NAME "%d", &id); - if (err < 0) - pr_err("Failed to get spnic id\n"); - card_id->id[i] = id; - i++; - } - lld_put(); - card_id->num = i; -} - -void spnic_get_card_func_info_by_card_name(const char *chip_name, - struct sphw_card_func_info *card_func) -{ - struct card_node *chip_node = NULL; - struct spnic_pcidev *dev = NULL; - struct func_pdev_info *pdev_info = NULL; - - card_func->num_pf = 0; - - lld_hold(); - - list_for_each_entry(chip_node, &g_spnic_chip_list, node) { - if (strncmp(chip_node->chip_name, chip_name, IFNAMSIZ)) - continue; - - list_for_each_entry(dev, &chip_node->func_list, node) { - if (sphw_func_type(dev->hwdev) == TYPE_VF) - continue; - - pdev_info = &card_func->pdev_info[card_func->num_pf]; - pdev_info->bar1_size = - pci_resource_len(dev->pcidev, SPNIC_PF_PCI_CFG_REG_BAR); - pdev_info->bar1_phy_addr = - pci_resource_start(dev->pcidev, SPNIC_PF_PCI_CFG_REG_BAR); - - pdev_info->bar3_size = - pci_resource_len(dev->pcidev, SPNIC_PCI_MGMT_REG_BAR); - pdev_info->bar3_phy_addr = - pci_resource_start(dev->pcidev, SPNIC_PCI_MGMT_REG_BAR); - - card_func->num_pf++; - if (card_func->num_pf >= CARD_MAX_SIZE) { - lld_put(); - return; - } - } - } - - lld_put(); -} - -static bool is_pcidev_match_chip_name(const char *ifname, struct spnic_pcidev *dev, - struct card_node *chip_node, enum func_type type) -{ - if (!strncmp(chip_node->chip_name, ifname, IFNAMSIZ)) { - if (sphw_func_type(dev->hwdev) != type) - return false; - return true; - } - - return false; -} - -static struct spnic_lld_dev *_get_lld_dev_by_chip_name(const char *ifname, enum func_type type) -{ - struct card_node *chip_node = NULL; - struct spnic_pcidev *dev = NULL; - - lld_hold(); - - list_for_each_entry(chip_node, &g_spnic_chip_list, node) { - list_for_each_entry(dev, &chip_node->func_list, node) { - if (is_pcidev_match_chip_name(ifname, dev, chip_node, type)) { - lld_put(); - return &dev->lld_dev; - } - } - } - - lld_put(); - return NULL; -} - -static struct spnic_lld_dev *spnic_get_lld_dev_by_chip_name(const char *ifname) -{ - struct spnic_lld_dev *dev_hw_init = NULL; - struct spnic_lld_dev *dev = NULL; - - /*find hw init device first*/ - dev_hw_init = _get_lld_dev_by_chip_name(ifname, TYPE_UNKNOWN); - if (dev_hw_init) { - if (sphw_func_type(dev_hw_init->hwdev) == TYPE_PPF) - return dev_hw_init; - } - - dev = _get_lld_dev_by_chip_name(ifname, TYPE_PPF); - if (dev) { - if (dev_hw_init) - return dev_hw_init; - - return dev; - } - - dev = _get_lld_dev_by_chip_name(ifname, TYPE_PF); - if (dev) { - if (dev_hw_init) - return dev_hw_init; - - return dev; - } - - dev = _get_lld_dev_by_chip_name(ifname, TYPE_VF); - if (dev) - return dev; - - return NULL; -} - -static bool is_pcidev_match_dev_name(const char *ifname, struct spnic_pcidev *dev, - enum sphw_service_type type) -{ - enum sphw_service_type i; - char nic_uld_name[IFNAMSIZ] = {0}; - int err; - - if (type == SERVICE_T_MAX) { - for (i = SERVICE_T_OVS; i < SERVICE_T_MAX; i++) { - if (!strncmp(dev->uld_dev_name[i], ifname, IFNAMSIZ)) - return true; - } - } else { - if (!strncmp(dev->uld_dev_name[type], ifname, IFNAMSIZ)) - return true; - } - - err = spnic_get_uld_dev_name(dev, SERVICE_T_NIC, (char *)nic_uld_name); - if (!err) { - if (!strncmp(nic_uld_name, ifname, IFNAMSIZ)) - return true; - } - - return false; -} - -static struct spnic_lld_dev *spnic_get_lld_dev_by_dev_name(const char *ifname, - enum sphw_service_type type) -{ - struct card_node *chip_node = NULL; - struct spnic_pcidev *dev = NULL; - - lld_hold(); - - list_for_each_entry(chip_node, &g_spnic_chip_list, node) { - list_for_each_entry(dev, &chip_node->func_list, node) { - if (is_pcidev_match_dev_name(ifname, dev, type)) { - lld_put(); - return &dev->lld_dev; - } - } - } - - lld_put(); - - return NULL; -} - -struct spnic_lld_dev *spnic_get_lld_dev_by_ifname(const char *ifname) -{ - struct spnic_lld_dev *dev = NULL; - - lld_hold(); - /* support search hwdev by chip name, net device name, - * or fc device name - */ - /* Find pcidev by chip_name first */ - dev = spnic_get_lld_dev_by_chip_name(ifname); - if (dev) - goto find_dev; - - /* If ifname not a chip name, - * find pcidev by FC name or netdevice name - */ - dev = spnic_get_lld_dev_by_dev_name(ifname, SERVICE_T_MAX); - if (!dev) { - lld_put(); - return NULL; - } - -find_dev: - lld_dev_hold(dev); - lld_put(); - return dev; -} - -void *spnic_get_hwdev_by_ifname(const char *ifname) -{ - struct spnic_lld_dev *dev = NULL; - - dev = spnic_get_lld_dev_by_ifname(ifname); - if (dev) - return dev->hwdev; - - return NULL; -} - -void *spnic_get_uld_dev_by_ifname(const char *ifname, enum sphw_service_type type) -{ - struct spnic_pcidev *dev = NULL; - struct spnic_lld_dev *lld_dev = NULL; - - if (type >= SERVICE_T_MAX) { - pr_err("Service type :%d is error\n", type); - return NULL; - } - - lld_dev = spnic_get_lld_dev_by_dev_name(ifname, type); - if (!lld_dev) - return NULL; - - dev = pci_get_drvdata(lld_dev->pdev); - if (dev) - return dev->uld_dev[type]; - - return NULL; -} - -static struct card_node *spnic_get_chip_node_by_hwdev(const void *hwdev) -{ - struct card_node *chip_node = NULL; - struct card_node *node_tmp = NULL; - struct spnic_pcidev *dev = NULL; - - if (!hwdev) - return NULL; - - lld_hold(); - - list_for_each_entry(node_tmp, &g_spnic_chip_list, node) { - if (!chip_node) { - list_for_each_entry(dev, &node_tmp->func_list, node) { - if (dev->hwdev == hwdev) { - chip_node = node_tmp; - break; - } - } - } - } - - lld_put(); - - return chip_node; -} - -int spnic_get_chip_name_by_hwdev(const void *hwdev, char *ifname) -{ - struct card_node *chip_node = NULL; - struct spnic_pcidev *dev = NULL; - - if (!hwdev || !ifname) - return -EINVAL; - - lld_hold(); - - list_for_each_entry(chip_node, &g_spnic_chip_list, node) { - list_for_each_entry(dev, &chip_node->func_list, node) { - if (dev->hwdev == hwdev) { - strncpy(ifname, chip_node->chip_name, IFNAMSIZ - 1); - ifname[IFNAMSIZ - 1] = 0; - lld_put(); - return 0; - } - } - } - - lld_put(); - - return -ENXIO; -} - -void *spnic_get_uld_dev_by_pdev(struct pci_dev *pdev, enum sphw_service_type type) -{ - struct spnic_pcidev *pci_adapter = NULL; - - if (type >= SERVICE_T_MAX) { - pr_err("Service type :%d is error\n", type); - return NULL; - } - - pci_adapter = pci_get_drvdata(pdev); - if (pci_adapter) - return pci_adapter->uld_dev[type]; - - return NULL; -} - -void *spnic_get_ppf_hwdev_by_pdev(struct pci_dev *pdev) -{ - struct spnic_pcidev *pci_adapter = NULL; - struct card_node *chip_node = NULL; - struct spnic_pcidev *dev = NULL; - - if (!pdev) - return NULL; - - pci_adapter = pci_get_drvdata(pdev); - if (!pci_adapter) - return NULL; - - chip_node = pci_adapter->chip_node; - lld_hold(); - list_for_each_entry(dev, &chip_node->func_list, node) { - if (dev->hwdev && sphw_func_type(dev->hwdev) == TYPE_PPF) { - lld_put(); - return dev->hwdev; - } - } - lld_put(); - - return NULL; -} - -/* NOTICE: nictool can't use this function, because this function can't keep - * tool context mutual exclusive with remove context - */ -void *spnic_get_ppf_uld_by_pdev(struct pci_dev *pdev, enum sphw_service_type type) -{ - struct spnic_pcidev *pci_adapter = NULL; - struct card_node *chip_node = NULL; - struct spnic_pcidev *dev = NULL; - - if (!pdev) - return NULL; - - pci_adapter = pci_get_drvdata(pdev); - if (!pci_adapter) - return NULL; - - chip_node = pci_adapter->chip_node; - lld_hold(); - list_for_each_entry(dev, &chip_node->func_list, node) { - if (sphw_func_type(dev->hwdev) == TYPE_PPF) { - lld_put(); - return dev->uld_dev[type]; - } - } - lld_put(); - - return NULL; -} - -int spnic_get_pf_nic_uld_array(struct pci_dev *pdev, u32 *dev_cnt, void *array[]) -{ - struct spnic_pcidev *dev = pci_get_drvdata(pdev); - struct card_node *chip_node = NULL; - u32 cnt; - - if (!dev || !sphw_support_nic(dev->hwdev, NULL)) - return -EINVAL; - - lld_hold(); - - cnt = 0; - chip_node = dev->chip_node; - list_for_each_entry(dev, &chip_node->func_list, node) { - if (sphw_func_type(dev->hwdev) == TYPE_VF) - continue; - - array[cnt] = dev->uld_dev[SERVICE_T_NIC]; - cnt++; - } - lld_put(); - - *dev_cnt = cnt; - - return 0; -} - -static bool is_func_valid(struct spnic_pcidev *dev) -{ - if (sphw_func_type(dev->hwdev) == TYPE_VF) - return false; - - return true; -} - -int spnic_get_uld_dev_name(struct spnic_pcidev *dev, enum sphw_service_type type, char *ifname) -{ - u32 out_size = IFNAMSIZ; - - if (!g_uld_info[type].ioctl) - return -EFAULT; - - return g_uld_info[type].ioctl(dev->uld_dev[type], GET_ULD_DEV_NAME, - NULL, 0, ifname, &out_size); -} - -void spnic_get_card_info(const void *hwdev, void *bufin) -{ - struct card_node *chip_node = NULL; - struct card_info *info = (struct card_info *)bufin; - struct spnic_pcidev *dev = NULL; - void *fun_hwdev = NULL; - u32 i = 0; - - info->pf_num = 0; - - chip_node = spnic_get_chip_node_by_hwdev(hwdev); - if (!chip_node) - return; - - lld_hold(); - - list_for_each_entry(dev, &chip_node->func_list, node) { - if (!is_func_valid(dev)) - continue; - - fun_hwdev = dev->hwdev; - - if (sphw_support_nic(fun_hwdev, NULL)) { - if (dev->uld_dev[SERVICE_T_NIC]) { - info->pf[i].pf_type |= (u32)BIT(SERVICE_T_NIC); - spnic_get_uld_dev_name(dev, SERVICE_T_NIC, info->pf[i].name); - } - } - - /* to do : get other service info*/ - - if (sphw_func_for_mgmt(fun_hwdev)) - strlcpy(info->pf[i].name, "FOR_MGMT", IFNAMSIZ); - - strlcpy(info->pf[i].bus_info, pci_name(dev->pcidev), - sizeof(info->pf[i].bus_info)); - info->pf_num++; - i = info->pf_num; - } - - lld_put(); -} - -struct spnic_sriov_info *spnic_get_sriov_info_by_pcidev(struct pci_dev *pdev) -{ - struct spnic_pcidev *pci_adapter = NULL; - - if (!pdev) - return NULL; - - pci_adapter = pci_get_drvdata(pdev); - if (!pci_adapter) - return NULL; - - return &pci_adapter->sriov_info; -} - -void *spnic_get_hwdev_by_pcidev(struct pci_dev *pdev) -{ - struct spnic_pcidev *pci_adapter = NULL; - - if (!pdev) - return NULL; - - pci_adapter = pci_get_drvdata(pdev); - if (!pci_adapter) - return NULL; - - return pci_adapter->hwdev; -} - -bool spnic_is_in_host(void) -{ - struct card_node *chip_node = NULL; - struct spnic_pcidev *dev = NULL; - - lld_hold(); - list_for_each_entry(chip_node, &g_spnic_chip_list, node) { - list_for_each_entry(dev, &chip_node->func_list, node) { - if (sphw_func_type(dev->hwdev) != TYPE_VF) { - lld_put(); - return true; - } - } - } - - lld_put(); - - return false; -} - -int spnic_get_chip_up_bitmap(struct pci_dev *pdev, bool *is_setted, u8 *valid_up_bitmap) -{ - struct spnic_pcidev *dev = pci_get_drvdata(pdev); - struct card_node *chip_node = NULL; - - if (!dev || !is_setted || !valid_up_bitmap) - return -EINVAL; - - chip_node = dev->chip_node; - *is_setted = chip_node->up_bitmap_setted; - if (chip_node->up_bitmap_setted) - *valid_up_bitmap = chip_node->valid_up_bitmap; - - return 0; -} - -int spnic_set_chip_up_bitmap(struct pci_dev *pdev, u8 valid_up_bitmap) -{ - struct spnic_pcidev *dev = pci_get_drvdata(pdev); - struct card_node *chip_node = NULL; - - if (!dev) - return -EINVAL; - - chip_node = dev->chip_node; - chip_node->up_bitmap_setted = true; - chip_node->valid_up_bitmap = valid_up_bitmap; - - return 0; -} - -static bool chip_node_is_exist(struct spnic_pcidev *pci_adapter, unsigned char *bus_number) -{ - struct card_node *chip_node = NULL; - - if (!pci_is_root_bus(pci_adapter->pcidev->bus)) - *bus_number = pci_adapter->pcidev->bus->number; - - if (*bus_number != 0) { - list_for_each_entry(chip_node, &g_spnic_chip_list, node) { - if (chip_node->bus_num == *bus_number) { - pci_adapter->chip_node = chip_node; - return true; - } - } - } else if (pci_adapter->pcidev->device == SPNIC_DEV_ID_VF || - pci_adapter->pcidev->device == SPNIC_DEV_ID_VF_HV) { - list_for_each_entry(chip_node, &g_spnic_chip_list, node) { - if (chip_node) { - pci_adapter->chip_node = chip_node; - return true; - } - } - } - - return false; -} - -int alloc_chip_node(struct spnic_pcidev *pci_adapter) -{ - struct card_node *chip_node = NULL; - unsigned char i; - unsigned char bus_number = 0; - - if (chip_node_is_exist(pci_adapter, &bus_number)) - return 0; - - for (i = 0; i < MAX_CARD_ID; i++) { - if (!test_and_set_bit(i, &card_bit_map)) - break; - } - - if (i == MAX_CARD_ID) { - sdk_err(&pci_adapter->pcidev->dev, "Failed to alloc card id\n"); - return -EFAULT; - } - - chip_node = kzalloc(sizeof(*chip_node), GFP_KERNEL); - if (!chip_node) { - clear_bit(i, &card_bit_map); - sdk_err(&pci_adapter->pcidev->dev, - "Failed to alloc chip node\n"); - return -ENOMEM; - } - - /* bus number */ - chip_node->bus_num = bus_number; - - snprintf(chip_node->chip_name, IFNAMSIZ, "%s%u", SPHW_CHIP_NAME, i); - - sdk_info(&pci_adapter->pcidev->dev, "Add new chip %s to global list succeed\n", - chip_node->chip_name); - - list_add_tail(&chip_node->node, &g_spnic_chip_list); - - INIT_LIST_HEAD(&chip_node->func_list); - pci_adapter->chip_node = chip_node; - - return 0; -} - -void free_chip_node(struct spnic_pcidev *pci_adapter) -{ - struct card_node *chip_node = pci_adapter->chip_node; - int id, err; - - if (list_empty(&chip_node->func_list)) { - list_del(&chip_node->node); - sdk_info(&pci_adapter->pcidev->dev, "Delete chip %s from global list succeed\n", - chip_node->chip_name); - err = sscanf(chip_node->chip_name, SPHW_CHIP_NAME "%d", &id); - if (err < 0) - sdk_err(&pci_adapter->pcidev->dev, "Failed to get spnic id\n"); - - clear_bit(id, &card_bit_map); - - kfree(chip_node); - } -} diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_dev_mgmt.h b/drivers/net/ethernet/ramaxel/spnic/spnic_dev_mgmt.h deleted file mode 100644 index 8f345769bec52308508cbd11728a23afd8966e7d..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/spnic_dev_mgmt.h +++ /dev/null @@ -1,78 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPNIC_DEV_MGMT_H -#define SPNIC_DEV_MGMT_H -#include -#include - -#define SPHW_CHIP_NAME "spnic" - -#define SPNIC_VF_PCI_CFG_REG_BAR 0 -#define SPNIC_PF_PCI_CFG_REG_BAR 1 - -#define SPNIC_PCI_INTR_REG_BAR 2 -#define SPNIC_PCI_MGMT_REG_BAR 3 /* Only PF have mgmt bar */ -#define SPNIC_PCI_DB_BAR 4 - -/* Structure pcidev private*/ -struct spnic_pcidev { - struct pci_dev *pcidev; - void *hwdev; - struct card_node *chip_node; - struct spnic_lld_dev lld_dev; - /* Record the service object address, - * such as spnic_dev and toe_dev, fc_dev - */ - void *uld_dev[SERVICE_T_MAX]; - /* Record the service object name */ - char uld_dev_name[SERVICE_T_MAX][IFNAMSIZ]; - /* It is a the global variable for driver to manage - * all function device linked list - */ - struct list_head node; - - bool disable_vf_load; - bool disable_srv_load[SERVICE_T_MAX]; - - void __iomem *cfg_reg_base; - void __iomem *intr_reg_base; - void __iomem *mgmt_reg_base; - u64 db_dwqe_len; - u64 db_base_phy; - void __iomem *db_base; - - /* lock for attach/detach uld */ - struct mutex pdev_mutex; - - struct spnic_sriov_info sriov_info; - - /* setted when uld driver processing event */ - unsigned long state; - struct pci_device_id id; - - atomic_t ref_cnt; -}; - -extern struct list_head g_spnic_chip_list; - -extern struct spnic_uld_info g_uld_info[SERVICE_T_MAX]; - -int alloc_chip_node(struct spnic_pcidev *pci_adapter); - -void free_chip_node(struct spnic_pcidev *pci_adapter); - -void lld_lock_chip_node(void); - -void lld_unlock_chip_node(void); - -void spnic_lld_lock_init(void); - -void lld_dev_cnt_init(struct spnic_pcidev *pci_adapter); -void wait_lld_dev_unused(struct spnic_pcidev *pci_adapter); - -int spnic_get_uld_dev_name(struct spnic_pcidev *dev, enum sphw_service_type type, char *ifname); - -void *spnic_get_hwdev_by_pcidev(struct pci_dev *pdev); - -#endif diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_ethtool.c b/drivers/net/ethernet/ramaxel/spnic/spnic_ethtool.c deleted file mode 100644 index e3a7e81a601b5d8da58ed30e8839350337f2f7b1..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/spnic_ethtool.c +++ /dev/null @@ -1,994 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "sphw_hw.h" -#include "sphw_crm.h" -#include "spnic_nic_dev.h" -#include "spnic_tx.h" -#include "spnic_rx.h" -#include "spnic_rss.h" - -#define COALESCE_ALL_QUEUE 0xFFFF -#define COALESCE_PENDING_LIMIT_UNIT 8 -#define COALESCE_TIMER_CFG_UNIT 5 -#define COALESCE_MAX_PENDING_LIMIT (255 * COALESCE_PENDING_LIMIT_UNIT) -#define COALESCE_MAX_TIMER_CFG (255 * COALESCE_TIMER_CFG_UNIT) - -static void spnic_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - struct pci_dev *pdev = nic_dev->pdev; - u8 mgmt_ver[SPHW_MGMT_VERSION_MAX_LEN] = {0}; - int err; - - strlcpy(info->driver, SPNIC_NIC_DRV_NAME, sizeof(info->driver)); - strlcpy(info->version, SPNIC_DRV_VERSION, sizeof(info->version)); - strlcpy(info->bus_info, pci_name(pdev), sizeof(info->bus_info)); - - err = sphw_get_mgmt_version(nic_dev->hwdev, mgmt_ver, SPHW_MGMT_VERSION_MAX_LEN, - SPHW_CHANNEL_NIC); - if (err) { - nicif_err(nic_dev, drv, netdev, "Failed to get fw version\n"); - return; - } - - snprintf(info->fw_version, sizeof(info->fw_version), "%s", mgmt_ver); -} - -static u32 spnic_get_msglevel(struct net_device *netdev) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - - return nic_dev->msg_enable; -} - -static void spnic_set_msglevel(struct net_device *netdev, u32 data) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - - nic_dev->msg_enable = data; - - nicif_info(nic_dev, drv, netdev, "Set message level: 0x%x\n", data); -} - -int spnic_nway_reset(struct net_device *netdev) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - struct nic_port_info port_info = {0}; - int err; - - err = spnic_get_port_info(nic_dev->hwdev, &port_info, SPHW_CHANNEL_NIC); - if (err) { - nicif_err(nic_dev, drv, netdev, - "Get port info failed\n"); - return -EFAULT; - } - - if (!port_info.autoneg_state) { - nicif_err(nic_dev, drv, netdev, "Autonegotiation is off, don't support to restart it\n"); - return -EINVAL; - } - - err = spnic_set_autoneg(nic_dev->hwdev, true); - if (err) { - nicif_err(nic_dev, drv, netdev, "Restart autonegotiation failed\n"); - return -EFAULT; - } - - nicif_info(nic_dev, drv, netdev, "Restart autonegotiation successfully\n"); - - return 0; -} - -static void spnic_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring, - struct kernel_ethtool_ringparam *kernel_ering, - struct netlink_ext_ack *extack) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - - ring->rx_max_pending = SPNIC_MAX_RX_QUEUE_DEPTH; - ring->tx_max_pending = SPNIC_MAX_TX_QUEUE_DEPTH; - ring->rx_pending = nic_dev->rxqs[0].q_depth; - ring->tx_pending = nic_dev->txqs[0].q_depth; -} - -static void spnic_update_qp_depth(struct spnic_nic_dev *nic_dev, u32 sq_depth, u32 rq_depth) -{ - u16 i; - - nic_dev->q_params.sq_depth = sq_depth; - nic_dev->q_params.rq_depth = rq_depth; - for (i = 0; i < nic_dev->max_qps; i++) { - nic_dev->txqs[i].q_depth = sq_depth; - nic_dev->txqs[i].q_mask = sq_depth - 1; - nic_dev->rxqs[i].q_depth = rq_depth; - nic_dev->rxqs[i].q_mask = rq_depth - 1; - } -} - -static int check_ringparam_valid(struct net_device *netdev, struct ethtool_ringparam *ring) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - - if (ring->rx_jumbo_pending || ring->rx_mini_pending) { - nicif_err(nic_dev, drv, netdev, - "Unsupported rx_jumbo_pending/rx_mini_pending\n"); - return -EINVAL; - } - - if (ring->tx_pending > SPNIC_MAX_TX_QUEUE_DEPTH || - ring->tx_pending < SPNIC_MIN_QUEUE_DEPTH || - ring->rx_pending > SPNIC_MAX_RX_QUEUE_DEPTH || - ring->rx_pending < SPNIC_MIN_QUEUE_DEPTH) { - nicif_err(nic_dev, drv, netdev, "Queue depth out of rang tx[%d-%d] rx[%d-%d]\n", - SPNIC_MIN_QUEUE_DEPTH, SPNIC_MAX_TX_QUEUE_DEPTH, - SPNIC_MIN_QUEUE_DEPTH, SPNIC_MAX_RX_QUEUE_DEPTH); - return -EINVAL; - } - - return 0; -} - -static int spnic_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring, - struct kernel_ethtool_ringparam *kernel_ering, - struct netlink_ext_ack *extack) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - struct spnic_dyna_txrxq_params q_params = {0}; - u32 new_sq_depth, new_rq_depth; - int err; - - err = check_ringparam_valid(netdev, ring); - if (err) - return err; - - new_sq_depth = (u32)(1U << (u16)ilog2(ring->tx_pending)); - new_rq_depth = (u32)(1U << (u16)ilog2(ring->rx_pending)); - - if (new_sq_depth == nic_dev->q_params.sq_depth && - new_rq_depth == nic_dev->q_params.rq_depth) - return 0; /* nothing to do */ - - nicif_info(nic_dev, drv, netdev, "Change Tx/Rx ring depth from %u/%u to %u/%u\n", - nic_dev->q_params.sq_depth, nic_dev->q_params.rq_depth, - new_sq_depth, new_rq_depth); - - if (!netif_running(netdev)) { - spnic_update_qp_depth(nic_dev, new_sq_depth, new_rq_depth); - } else { - q_params = nic_dev->q_params; - q_params.sq_depth = new_sq_depth; - q_params.rq_depth = new_rq_depth; - q_params.txqs_res = NULL; - q_params.rxqs_res = NULL; - q_params.irq_cfg = NULL; - - nicif_info(nic_dev, drv, netdev, "Restarting channel\n"); - err = spnic_change_channel_settings(nic_dev, &q_params, NULL, NULL); - if (err) { - nicif_err(nic_dev, drv, netdev, "Failed to change channel settings\n"); - return -EFAULT; - } - } - - return 0; -} - -static int get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coal, u16 queue) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - struct spnic_intr_coal_info *interrupt_info = NULL; - - if (queue == COALESCE_ALL_QUEUE) { - /* get tx/rx irq0 as default parameters */ - interrupt_info = &nic_dev->intr_coalesce[0]; - } else { - if (queue >= nic_dev->q_params.num_qps) { - nicif_err(nic_dev, drv, netdev, "Invalid queue_id: %u\n", queue); - return -EINVAL; - } - interrupt_info = &nic_dev->intr_coalesce[queue]; - } - - /* coalescs_timer is in unit of 5us */ - coal->rx_coalesce_usecs = interrupt_info->coalesce_timer_cfg * COALESCE_TIMER_CFG_UNIT; - /* coalescs_frams is in unit of 8 */ - coal->rx_max_coalesced_frames = interrupt_info->pending_limt * COALESCE_PENDING_LIMIT_UNIT; - - /* tx/rx use the same interrupt */ - coal->tx_coalesce_usecs = coal->rx_coalesce_usecs; - coal->tx_max_coalesced_frames = coal->rx_max_coalesced_frames; - coal->use_adaptive_rx_coalesce = nic_dev->adaptive_rx_coal; - - coal->pkt_rate_high = (u32)interrupt_info->pkt_rate_high; - coal->rx_coalesce_usecs_high = interrupt_info->rx_usecs_high * COALESCE_TIMER_CFG_UNIT; - coal->rx_max_coalesced_frames_high = interrupt_info->rx_pending_limt_high * - COALESCE_PENDING_LIMIT_UNIT; - - coal->pkt_rate_low = (u32)interrupt_info->pkt_rate_low; - coal->rx_coalesce_usecs_low = interrupt_info->rx_usecs_low * - COALESCE_TIMER_CFG_UNIT; - coal->rx_max_coalesced_frames_low = interrupt_info->rx_pending_limt_low * - COALESCE_PENDING_LIMIT_UNIT; - - return 0; -} - -static int set_queue_coalesce(struct spnic_nic_dev *nic_dev, u16 q_id, - struct spnic_intr_coal_info *coal) -{ - struct spnic_intr_coal_info *intr_coal; - struct interrupt_info info = {0}; - struct net_device *netdev = nic_dev->netdev; - int err; - - intr_coal = &nic_dev->intr_coalesce[q_id]; - if (intr_coal->coalesce_timer_cfg != coal->coalesce_timer_cfg || - intr_coal->pending_limt != coal->pending_limt) - intr_coal->user_set_intr_coal_flag = 1; - - intr_coal->coalesce_timer_cfg = coal->coalesce_timer_cfg; - intr_coal->pending_limt = coal->pending_limt; - intr_coal->pkt_rate_low = coal->pkt_rate_low; - intr_coal->rx_usecs_low = coal->rx_usecs_low; - intr_coal->rx_pending_limt_low = coal->rx_pending_limt_low; - intr_coal->pkt_rate_high = coal->pkt_rate_high; - intr_coal->rx_usecs_high = coal->rx_usecs_high; - intr_coal->rx_pending_limt_high = coal->rx_pending_limt_high; - - /* netdev not running or qp not in using, - * don't need to set coalesce to hw - */ - if (!test_bit(SPNIC_INTF_UP, &nic_dev->flags) || - q_id >= nic_dev->q_params.num_qps || nic_dev->adaptive_rx_coal) - return 0; - - info.msix_index = nic_dev->q_params.irq_cfg[q_id].msix_entry_idx; - info.lli_set = 0; - info.interrupt_coalesc_set = 1; - info.coalesc_timer_cfg = intr_coal->coalesce_timer_cfg; - info.pending_limt = intr_coal->pending_limt; - info.resend_timer_cfg = intr_coal->resend_timer_cfg; - nic_dev->rxqs[q_id].last_coalesc_timer_cfg = intr_coal->coalesce_timer_cfg; - nic_dev->rxqs[q_id].last_pending_limt = intr_coal->pending_limt; - err = sphw_set_interrupt_cfg(nic_dev->hwdev, info, SPHW_CHANNEL_NIC); - if (err) - nicif_warn(nic_dev, drv, netdev, "Failed to set queue%u coalesce", q_id); - - return err; -} - -static int is_coalesce_exceed_limit(struct net_device *netdev, - const struct ethtool_coalesce *coal) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - - if (coal->rx_coalesce_usecs > COALESCE_MAX_TIMER_CFG) { - nicif_err(nic_dev, drv, netdev, "rx_coalesce_usecs out of range[%d-%d]\n", 0, - COALESCE_MAX_TIMER_CFG); - return -EOPNOTSUPP; - } - - if (coal->rx_max_coalesced_frames > COALESCE_MAX_PENDING_LIMIT) { - nicif_err(nic_dev, drv, netdev, "rx_max_coalesced_frames out of range[%d-%d]\n", 0, - COALESCE_MAX_PENDING_LIMIT); - return -EOPNOTSUPP; - } - - if (coal->rx_coalesce_usecs_low > COALESCE_MAX_TIMER_CFG) { - nicif_err(nic_dev, drv, netdev, "rx_coalesce_usecs_low out of range[%d-%d]\n", 0, - COALESCE_MAX_TIMER_CFG); - return -EOPNOTSUPP; - } - - if (coal->rx_max_coalesced_frames_low > COALESCE_MAX_PENDING_LIMIT) { - nicif_err(nic_dev, drv, netdev, "rx_max_coalesced_frames_low out of range[%d-%d]\n", - 0, COALESCE_MAX_PENDING_LIMIT); - return -EOPNOTSUPP; - } - - if (coal->rx_coalesce_usecs_high > COALESCE_MAX_TIMER_CFG) { - nicif_err(nic_dev, drv, netdev, "rx_coalesce_usecs_high out of range[%d-%d]\n", 0, - COALESCE_MAX_TIMER_CFG); - return -EOPNOTSUPP; - } - - if (coal->rx_max_coalesced_frames_high > COALESCE_MAX_PENDING_LIMIT) { - nicif_err(nic_dev, drv, netdev, "rx_max_coalesced_frames_high out of range[%d-%d]\n", - 0, COALESCE_MAX_PENDING_LIMIT); - return -EOPNOTSUPP; - } - - return 0; -} - -static int is_coalesce_legal(struct net_device *netdev, - const struct ethtool_coalesce *coal) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - struct ethtool_coalesce tmp_coal = {0}; - int err; - - if (coal->rx_coalesce_usecs != coal->tx_coalesce_usecs) { - nicif_err(nic_dev, drv, netdev, "tx-usecs must be equal to rx-usecs\n"); - return -EINVAL; - } - - if (coal->rx_max_coalesced_frames != coal->tx_max_coalesced_frames) { - nicif_err(nic_dev, drv, netdev, "tx-frames must be equal to rx-frames\n"); - return -EINVAL; - } - - tmp_coal.cmd = coal->cmd; - tmp_coal.rx_coalesce_usecs = coal->rx_coalesce_usecs; - tmp_coal.rx_max_coalesced_frames = coal->rx_max_coalesced_frames; - tmp_coal.tx_coalesce_usecs = coal->tx_coalesce_usecs; - tmp_coal.tx_max_coalesced_frames = coal->tx_max_coalesced_frames; - tmp_coal.use_adaptive_rx_coalesce = coal->use_adaptive_rx_coalesce; - - tmp_coal.pkt_rate_low = coal->pkt_rate_low; - tmp_coal.rx_coalesce_usecs_low = coal->rx_coalesce_usecs_low; - tmp_coal.rx_max_coalesced_frames_low = coal->rx_max_coalesced_frames_low; - - tmp_coal.pkt_rate_high = coal->pkt_rate_high; - tmp_coal.rx_coalesce_usecs_high = coal->rx_coalesce_usecs_high; - tmp_coal.rx_max_coalesced_frames_high = coal->rx_max_coalesced_frames_high; - - if (memcmp(coal, &tmp_coal, sizeof(struct ethtool_coalesce))) { - nicif_err(nic_dev, drv, netdev, "Only support to change rx/tx-usecs and rx/tx-frames\n"); - return -EOPNOTSUPP; - } - - err = is_coalesce_exceed_limit(netdev, coal); - if (err) - return err; - - if (coal->rx_coalesce_usecs_low / COALESCE_TIMER_CFG_UNIT >= - coal->rx_coalesce_usecs_high / COALESCE_TIMER_CFG_UNIT) { - nicif_err(nic_dev, drv, netdev, - "coalesce_usecs_high(%u) must more than coalesce_usecs_low(%u), after dividing %d usecs unit\n", - coal->rx_coalesce_usecs_high, - coal->rx_coalesce_usecs_low, - COALESCE_TIMER_CFG_UNIT); - return -EOPNOTSUPP; - } - - if (coal->rx_max_coalesced_frames_low / COALESCE_PENDING_LIMIT_UNIT >= - coal->rx_max_coalesced_frames_high / COALESCE_PENDING_LIMIT_UNIT) { - nicif_err(nic_dev, drv, netdev, - "coalesced_frames_high(%u) must more than coalesced_frames_low(%u),after dividing %d frames unit\n", - coal->rx_max_coalesced_frames_high, - coal->rx_max_coalesced_frames_low, - COALESCE_PENDING_LIMIT_UNIT); - return -EOPNOTSUPP; - } - - if (coal->pkt_rate_low >= coal->pkt_rate_high) { - nicif_err(nic_dev, drv, netdev, "pkt_rate_high(%u) must more than pkt_rate_low(%u)\n", - coal->pkt_rate_high, - coal->pkt_rate_low); - return -EOPNOTSUPP; - } - - return 0; -} - -#define CHECK_COALESCE_ALIGN(coal, item, unit) \ -do { \ - if ((coal)->item % (unit)) \ - nicif_warn(nic_dev, drv, netdev, \ - "%s in %d units, change to %u\n", \ - #item, (unit), ((coal)->item - \ - (coal)->item % (unit))); \ -} while (0) - -#define CHECK_COALESCE_CHANGED(coal, item, unit, ori_val, obj_str) \ -do { \ - if (((coal)->item / (unit)) != (ori_val)) \ - nicif_info(nic_dev, drv, netdev, \ - "Change %s from %d to %u %s\n", \ - #item, (ori_val) * (unit), \ - ((coal)->item - (coal)->item % (unit)), \ - (obj_str)); \ -} while (0) - -#define CHECK_PKT_RATE_CHANGED(coal, item, ori_val, obj_str) \ -do { \ - if ((coal)->item != (ori_val)) \ - nicif_info(nic_dev, drv, netdev, \ - "Change %s from %llu to %u %s\n", \ - #item, (ori_val), (coal)->item, (obj_str)); \ -} while (0) - -static int set_hw_coal_param(struct spnic_nic_dev *nic_dev, struct spnic_intr_coal_info *intr_coal, - u16 queue) -{ - u16 i; - - if (queue == COALESCE_ALL_QUEUE) { - for (i = 0; i < nic_dev->max_qps; i++) - set_queue_coalesce(nic_dev, i, intr_coal); - } else { - if (queue >= nic_dev->q_params.num_qps) { - nicif_err(nic_dev, drv, nic_dev->netdev, "Invalid queue_id: %u\n", queue); - return -EINVAL; - } - set_queue_coalesce(nic_dev, queue, intr_coal); - } - - return 0; -} - -static int set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coal, u16 queue) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - struct spnic_intr_coal_info intr_coal = {0}; - struct spnic_intr_coal_info *ori_intr_coal = NULL; - u32 last_adaptive_rx; - char obj_str[32] = {0}; - int err = 0; - - err = is_coalesce_legal(netdev, coal); - if (err) - return err; - - CHECK_COALESCE_ALIGN(coal, rx_coalesce_usecs, COALESCE_TIMER_CFG_UNIT); - CHECK_COALESCE_ALIGN(coal, rx_max_coalesced_frames, COALESCE_PENDING_LIMIT_UNIT); - CHECK_COALESCE_ALIGN(coal, rx_coalesce_usecs_high, COALESCE_TIMER_CFG_UNIT); - CHECK_COALESCE_ALIGN(coal, rx_max_coalesced_frames_high, COALESCE_PENDING_LIMIT_UNIT); - CHECK_COALESCE_ALIGN(coal, rx_coalesce_usecs_low, COALESCE_TIMER_CFG_UNIT); - CHECK_COALESCE_ALIGN(coal, rx_max_coalesced_frames_low, COALESCE_PENDING_LIMIT_UNIT); - - if (queue == COALESCE_ALL_QUEUE) { - ori_intr_coal = &nic_dev->intr_coalesce[0]; - snprintf(obj_str, sizeof(obj_str), "for netdev"); - } else { - ori_intr_coal = &nic_dev->intr_coalesce[queue]; - snprintf(obj_str, sizeof(obj_str), "for queue %u", queue); - } - CHECK_COALESCE_CHANGED(coal, rx_coalesce_usecs, COALESCE_TIMER_CFG_UNIT, - ori_intr_coal->coalesce_timer_cfg, obj_str); - CHECK_COALESCE_CHANGED(coal, rx_max_coalesced_frames, COALESCE_PENDING_LIMIT_UNIT, - ori_intr_coal->pending_limt, obj_str); - CHECK_PKT_RATE_CHANGED(coal, pkt_rate_high, ori_intr_coal->pkt_rate_high, obj_str); - CHECK_COALESCE_CHANGED(coal, rx_coalesce_usecs_high, COALESCE_TIMER_CFG_UNIT, - ori_intr_coal->rx_usecs_high, obj_str); - CHECK_COALESCE_CHANGED(coal, rx_max_coalesced_frames_high, COALESCE_PENDING_LIMIT_UNIT, - ori_intr_coal->rx_pending_limt_high, obj_str); - CHECK_PKT_RATE_CHANGED(coal, pkt_rate_low, ori_intr_coal->pkt_rate_low, obj_str); - CHECK_COALESCE_CHANGED(coal, rx_coalesce_usecs_low, COALESCE_TIMER_CFG_UNIT, - ori_intr_coal->rx_usecs_low, obj_str); - CHECK_COALESCE_CHANGED(coal, rx_max_coalesced_frames_low, COALESCE_PENDING_LIMIT_UNIT, - ori_intr_coal->rx_pending_limt_low, obj_str); - - intr_coal.coalesce_timer_cfg = (u8)(coal->rx_coalesce_usecs / COALESCE_TIMER_CFG_UNIT); - intr_coal.pending_limt = (u8)(coal->rx_max_coalesced_frames / COALESCE_PENDING_LIMIT_UNIT); - - last_adaptive_rx = nic_dev->adaptive_rx_coal; - nic_dev->adaptive_rx_coal = coal->use_adaptive_rx_coalesce; - - intr_coal.pkt_rate_high = coal->pkt_rate_high; - intr_coal.rx_usecs_high = (u8)(coal->rx_coalesce_usecs_high / COALESCE_TIMER_CFG_UNIT); - intr_coal.rx_pending_limt_high = (u8)(coal->rx_max_coalesced_frames_high / - COALESCE_PENDING_LIMIT_UNIT); - - intr_coal.pkt_rate_low = coal->pkt_rate_low; - intr_coal.rx_usecs_low = (u8)(coal->rx_coalesce_usecs_low / COALESCE_TIMER_CFG_UNIT); - intr_coal.rx_pending_limt_low = (u8)(coal->rx_max_coalesced_frames_low / - COALESCE_PENDING_LIMIT_UNIT); - - /* coalesce timer or pending set to zero will disable coalesce */ - if (!nic_dev->adaptive_rx_coal && - (!intr_coal.coalesce_timer_cfg || !intr_coal.pending_limt)) - nicif_warn(nic_dev, drv, netdev, "Coalesce will be disabled\n"); - - /* ensure coalesce paramester will not be changed in auto - * moderation work - */ - if (SPHW_CHANNEL_RES_VALID(nic_dev)) { - if (!nic_dev->adaptive_rx_coal) - cancel_delayed_work_sync(&nic_dev->moderation_task); - else if (!last_adaptive_rx) - queue_delayed_work(nic_dev->workq, &nic_dev->moderation_task, - SPNIC_MODERATONE_DELAY); - } - - return set_hw_coal_param(nic_dev, &intr_coal, queue); -} - -static int spnic_get_coalesce(struct net_device *netdev, - struct ethtool_coalesce *coal, - struct kernel_ethtool_coalesce *kernel_coal, - struct netlink_ext_ack *ext_ack) -{ - return get_coalesce(netdev, coal, COALESCE_ALL_QUEUE); -} - -static int spnic_set_coalesce(struct net_device *netdev, - struct ethtool_coalesce *coal, - struct kernel_ethtool_coalesce *kernel_coal, - struct netlink_ext_ack *ext_ack) -{ - return set_coalesce(netdev, coal, COALESCE_ALL_QUEUE); -} - -static int spnic_get_per_queue_coalesce(struct net_device *netdev, u32 queue, - struct ethtool_coalesce *coal) -{ - return get_coalesce(netdev, coal, queue); -} - -static int spnic_set_per_queue_coalesce(struct net_device *netdev, u32 queue, - struct ethtool_coalesce *coal) -{ - return set_coalesce(netdev, coal, queue); -} - -static int spnic_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - int err; - - switch (state) { - case ETHTOOL_ID_ACTIVE: - err = spnic_set_led_status(nic_dev->hwdev, MAG_CMD_LED_TYPE_ALARM, - MAG_CMD_LED_MODE_FORCE_BLINK_2HZ); - if (err) - nicif_err(nic_dev, drv, netdev, "Set LED blinking in 2HZ failed\n"); - else - nicif_info(nic_dev, drv, netdev, "Set LED blinking in 2HZ success\n"); - break; - - case ETHTOOL_ID_INACTIVE: - err = spnic_set_led_status(nic_dev->hwdev, MAG_CMD_LED_TYPE_ALARM, - MAG_CMD_LED_MODE_DEFAULT); - if (err) - nicif_err(nic_dev, drv, netdev, "Reset LED to original status failed\n"); - else - nicif_info(nic_dev, drv, netdev, "Reset LED to original status success\n"); - break; - - default: - return -EOPNOTSUPP; - } - - return err; -} - -static void spnic_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - struct nic_pause_config nic_pause = {0}; - int err; - - err = spnic_get_pause_info(nic_dev->hwdev, &nic_pause); - if (err) { - nicif_err(nic_dev, drv, netdev, "Failed to get pauseparam from hw\n"); - } else { - pause->autoneg = nic_pause.auto_neg; - pause->rx_pause = nic_pause.rx_pause; - pause->tx_pause = nic_pause.tx_pause; - } -} - -static int spnic_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - struct nic_pause_config nic_pause = {0}; - struct nic_port_info port_info = {0}; - int err; - - err = spnic_get_port_info(nic_dev->hwdev, &port_info, SPHW_CHANNEL_NIC); - if (err) { - nicif_err(nic_dev, drv, netdev, "Failed to get auto-negotiation state\n"); - return -EFAULT; - } - - if (pause->autoneg != port_info.autoneg_state) { - nicif_err(nic_dev, drv, netdev, "To change autoneg please use: ethtool -s autoneg \n"); - return -EOPNOTSUPP; - } - - if (nic_dev->hw_dcb_cfg.pfc_state) { - nicif_err(nic_dev, drv, netdev, "Can not set pause when pfc is enable\n"); - return -EPERM; - } - - nic_pause.auto_neg = (u8)pause->autoneg; - nic_pause.rx_pause = (u8)pause->rx_pause; - nic_pause.tx_pause = (u8)pause->tx_pause; - - err = spnic_set_pause_info(nic_dev->hwdev, nic_pause); - if (err) { - nicif_err(nic_dev, drv, netdev, "Failed to set pauseparam\n"); - return -EFAULT; - } - - nicif_info(nic_dev, drv, netdev, "Set pause options, tx: %s, rx: %s\n", - pause->tx_pause ? "on" : "off", - pause->rx_pause ? "on" : "off"); - - return 0; -} - -static int spnic_get_module_info(struct net_device *netdev, struct ethtool_modinfo *modinfo) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - u8 sfp_type = 0; - u8 sfp_type_ext = 0; - int err; - - err = spnic_get_sfp_type(nic_dev->hwdev, &sfp_type, &sfp_type_ext); - if (err) - return err; - - switch (sfp_type) { - case MODULE_TYPE_SFP: - modinfo->type = ETH_MODULE_SFF_8472; - modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; - break; - case MODULE_TYPE_QSFP: - modinfo->type = ETH_MODULE_SFF_8436; - modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN; - break; - case MODULE_TYPE_QSFP_PLUS: - if (sfp_type_ext >= 0x3) { - modinfo->type = ETH_MODULE_SFF_8636; - modinfo->eeprom_len = ETH_MODULE_SFF_8636_MAX_LEN; - - } else { - modinfo->type = ETH_MODULE_SFF_8436; - modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN; - } - break; - case MODULE_TYPE_QSFP28: - modinfo->type = ETH_MODULE_SFF_8636; - modinfo->eeprom_len = ETH_MODULE_SFF_8636_MAX_LEN; - break; - default: - nicif_warn(nic_dev, drv, netdev, "Optical module unknown: 0x%x\n", sfp_type); - return -EINVAL; - } - - return 0; -} - -static int spnic_get_module_eeprom(struct net_device *netdev, struct ethtool_eeprom *ee, u8 *data) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - u8 sfp_data[STD_SFP_INFO_MAX_SIZE]; - int err; - - if (!ee->len || ((ee->len + ee->offset) > STD_SFP_INFO_MAX_SIZE)) - return -EINVAL; - - memset(data, 0, ee->len); - - err = spnic_get_sfp_eeprom(nic_dev->hwdev, (u8 *)sfp_data, ee->len); - if (err) - return err; - - memcpy(data, sfp_data + ee->offset, ee->len); - - return 0; -} - -#define SPNIC_PRIV_FLAGS_SYMM_RSS BIT(0) - -static u32 spnic_get_priv_flags(struct net_device *netdev) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - u32 priv_flags = 0; - - if (test_bit(SPNIC_SAME_RXTX, &nic_dev->flags)) - priv_flags |= SPNIC_PRIV_FLAGS_SYMM_RSS; - - return priv_flags; -} - -static int spnic_set_priv_flags(struct net_device *netdev, u32 priv_flags) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - - if (priv_flags & SPNIC_PRIV_FLAGS_SYMM_RSS) { - if (test_bit(SPNIC_DCB_ENABLE, &nic_dev->flags)) { - nicif_err(nic_dev, drv, netdev, "Failed to open Symmetric RSS while DCB is enabled\n"); - return -EOPNOTSUPP; - } - set_bit(SPNIC_SAME_RXTX, &nic_dev->flags); - } else { - clear_bit(SPNIC_SAME_RXTX, &nic_dev->flags); - } - - return 0; -} - -#define BROADCAST_PACKET_SIM 0xFF -#define UNICAST_PACKET_SIM 0xFE -#define IP_PROTOCOL_TYPE 0x08 -#define IP_PROTOCOL_COMPLEMENT 0x00 - -#define PORT_DOWN_ERR_IDX 0 -#define LP_DEFAULT_TIME 5 /* seconds */ -#define LP_PKT_LEN 1514 - -#define TEST_TIME_MULTIPLE 5 -static int spnic_run_lp_test(struct spnic_nic_dev *nic_dev, u32 test_time) -{ - u32 cnt = test_time * TEST_TIME_MULTIPLE; - struct sk_buff *skb = NULL; - struct sk_buff *skb_tmp = NULL; - u8 *test_data = NULL; - u8 *lb_test_rx_buf = nic_dev->lb_test_rx_buf; - struct net_device *netdev = nic_dev->netdev; - u32 i; - u8 j; - - skb_tmp = alloc_skb(LP_PKT_LEN, GFP_ATOMIC); - if (!skb_tmp) { - nicif_err(nic_dev, drv, netdev, "Alloc xmit skb template failed for loopback test\n"); - return -ENOMEM; - } - - test_data = __skb_put(skb_tmp, LP_PKT_LEN); - - memset(test_data, BROADCAST_PACKET_SIM, ETH_ALEN + ETH_ALEN); - - test_data[ETH_ALEN] = UNICAST_PACKET_SIM; - test_data[ETH_ALEN + ETH_ALEN] = IP_PROTOCOL_TYPE; - test_data[ETH_ALEN + ETH_ALEN + 1] = IP_PROTOCOL_COMPLEMENT; - - for (i = ETH_HLEN; i < LP_PKT_LEN; i++) - test_data[i] = i & 0xFF; - - skb_tmp->queue_mapping = 0; - skb_tmp->ip_summed = CHECKSUM_COMPLETE; - skb_tmp->dev = netdev; - - for (i = 0; i < cnt; i++) { - nic_dev->lb_test_rx_idx = 0; - memset(lb_test_rx_buf, 0, LP_PKT_CNT * LP_PKT_LEN); - - for (j = 0; j < LP_PKT_CNT; j++) { - skb = pskb_copy(skb_tmp, GFP_ATOMIC); - if (!skb) { - dev_kfree_skb_any(skb_tmp); - nicif_err(nic_dev, drv, netdev, "Copy skb failed for loopback test\n"); - return -ENOMEM; - } - - /* mark index for every pkt */ - skb->data[LP_PKT_LEN - 1] = j; - - if (spnic_lb_xmit_frame(skb, netdev)) { - dev_kfree_skb_any(skb); - dev_kfree_skb_any(skb_tmp); - nicif_err(nic_dev, drv, netdev, "Xmit pkt failed for loopback test\n"); - return -EBUSY; - } - } - - /* wait till all pkts received to RX buffer */ - msleep(200); - - for (j = 0; j < LP_PKT_CNT; j++) { - if (memcmp((lb_test_rx_buf + (j * LP_PKT_LEN)), - skb_tmp->data, (LP_PKT_LEN - 1)) || - (*(lb_test_rx_buf + ((j * LP_PKT_LEN) + - (LP_PKT_LEN - 1))) != j)) { - dev_kfree_skb_any(skb_tmp); - nicif_err(nic_dev, drv, netdev, - "Compare pkt failed in loopback test(index=0x%02x, data[%d]=0x%02x)\n", - (j + (i * LP_PKT_CNT)), (LP_PKT_LEN - 1), - *(lb_test_rx_buf + - ((j * LP_PKT_LEN) + (LP_PKT_LEN - 1)))); - return -EIO; - } - } - } - - dev_kfree_skb_any(skb_tmp); - nicif_info(nic_dev, drv, netdev, "Loopback test succeed.\n"); - return 0; -} - -enum diag_test_index { - INTERNAL_LP_TEST = 0, - EXTERNAL_LP_TEST = 1, - DIAG_TEST_MAX = 2, -}; - -#define SPNIC_INTERNAL_LP_MODE 5 -static int do_lp_test(struct spnic_nic_dev *nic_dev, u32 *flags, u32 test_time, - enum diag_test_index *test_index) -{ - struct net_device *netdev = nic_dev->netdev; - u8 *lb_test_rx_buf = NULL; - int err = 0; - - if (!(*flags & ETH_TEST_FL_EXTERNAL_LB)) { - *test_index = INTERNAL_LP_TEST; - if (spnic_set_loopback_mode(nic_dev->hwdev, SPNIC_INTERNAL_LP_MODE, true)) { - nicif_err(nic_dev, drv, netdev, - "Failed to set port loopback mode before loopback test\n"); - return -EFAULT; - } - } else { - *test_index = EXTERNAL_LP_TEST; - } - - lb_test_rx_buf = vmalloc(LP_PKT_CNT * LP_PKT_LEN); - if (!lb_test_rx_buf) { - nicif_err(nic_dev, drv, netdev, "Failed to alloc RX buffer for loopback test\n"); - err = -ENOMEM; - } else { - nic_dev->lb_test_rx_buf = lb_test_rx_buf; - nic_dev->lb_pkt_len = LP_PKT_LEN; - set_bit(SPNIC_LP_TEST, &nic_dev->flags); - - if (spnic_run_lp_test(nic_dev, test_time)) - err = -EFAULT; - - clear_bit(SPNIC_LP_TEST, &nic_dev->flags); - msleep(100); - vfree(lb_test_rx_buf); - nic_dev->lb_test_rx_buf = NULL; - } - - if (!(*flags & ETH_TEST_FL_EXTERNAL_LB)) { - if (spnic_set_loopback_mode(nic_dev->hwdev, SPNIC_INTERNAL_LP_MODE, false)) { - nicif_err(nic_dev, drv, netdev, - "Failed to cancel port loopback mode after loopback test\n"); - err = -EFAULT; - } - } else { - *flags |= ETH_TEST_FL_EXTERNAL_LB_DONE; - } - - return err; -} - -void spnic_lp_test(struct net_device *netdev, struct ethtool_test *eth_test, - u64 *data, u32 test_time) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - enum diag_test_index test_index = 0; - u8 link_status = 0; - int err; - - /* don't support loopback test when netdev is closed. */ - if (!test_bit(SPNIC_INTF_UP, &nic_dev->flags)) { - nicif_err(nic_dev, drv, netdev, - "Do not support loopback test when netdev is closed\n"); - eth_test->flags |= ETH_TEST_FL_FAILED; - data[PORT_DOWN_ERR_IDX] = 1; - return; - } - - if (test_time == 0) - test_time = LP_DEFAULT_TIME; - - netif_carrier_off(netdev); - netif_tx_disable(netdev); - - err = do_lp_test(nic_dev, ð_test->flags, test_time, &test_index); - if (err) { - eth_test->flags |= ETH_TEST_FL_FAILED; - data[test_index] = 1; - } - - netif_tx_wake_all_queues(netdev); - - err = spnic_get_link_state(nic_dev->hwdev, &link_status); - if (!err && link_status) - netif_carrier_on(netdev); -} - -static void spnic_diag_test(struct net_device *netdev, struct ethtool_test *eth_test, u64 *data) -{ - memset(data, 0, DIAG_TEST_MAX * sizeof(u64)); - - spnic_lp_test(netdev, eth_test, data, 0); -} - -static const struct ethtool_ops spnic_ethtool_ops = { - .supported_coalesce_params = ETHTOOL_COALESCE_USECS | - ETHTOOL_COALESCE_PKT_RATE_RX_USECS, - .get_link_ksettings = spnic_get_link_ksettings, - .set_link_ksettings = spnic_set_link_ksettings, - - .get_drvinfo = spnic_get_drvinfo, - .get_msglevel = spnic_get_msglevel, - .set_msglevel = spnic_set_msglevel, - .nway_reset = spnic_nway_reset, - .get_link = ethtool_op_get_link, - .get_ringparam = spnic_get_ringparam, - .set_ringparam = spnic_set_ringparam, - .get_pauseparam = spnic_get_pauseparam, - .set_pauseparam = spnic_set_pauseparam, - .get_sset_count = spnic_get_sset_count, - .get_ethtool_stats = spnic_get_ethtool_stats, - .get_strings = spnic_get_strings, - - .self_test = spnic_diag_test, - .set_phys_id = spnic_set_phys_id, - - .get_coalesce = spnic_get_coalesce, - .set_coalesce = spnic_set_coalesce, - .get_per_queue_coalesce = spnic_get_per_queue_coalesce, - .set_per_queue_coalesce = spnic_set_per_queue_coalesce, - - .get_rxnfc = spnic_get_rxnfc, - .set_rxnfc = spnic_set_rxnfc, - .get_priv_flags = spnic_get_priv_flags, - .set_priv_flags = spnic_set_priv_flags, - - .get_channels = spnic_get_channels, - .set_channels = spnic_set_channels, - - .get_module_info = spnic_get_module_info, - .get_module_eeprom = spnic_get_module_eeprom, - - .get_rxfh_indir_size = spnic_get_rxfh_indir_size, - .get_rxfh_key_size = spnic_get_rxfh_key_size, - .get_rxfh = spnic_get_rxfh, - .set_rxfh = spnic_set_rxfh, -}; - -static const struct ethtool_ops spnicvf_ethtool_ops = { - .supported_coalesce_params = ETHTOOL_COALESCE_USECS | - ETHTOOL_COALESCE_PKT_RATE_RX_USECS, - .get_link_ksettings = spnic_get_link_ksettings, - .get_drvinfo = spnic_get_drvinfo, - .get_msglevel = spnic_get_msglevel, - .set_msglevel = spnic_set_msglevel, - .get_link = ethtool_op_get_link, - .get_ringparam = spnic_get_ringparam, - - .set_ringparam = spnic_set_ringparam, - .get_sset_count = spnic_get_sset_count, - .get_ethtool_stats = spnic_get_ethtool_stats, - .get_strings = spnic_get_strings, - - .get_coalesce = spnic_get_coalesce, - .set_coalesce = spnic_set_coalesce, - .get_per_queue_coalesce = spnic_get_per_queue_coalesce, - .set_per_queue_coalesce = spnic_set_per_queue_coalesce, - - .get_rxnfc = spnic_get_rxnfc, - .set_rxnfc = spnic_set_rxnfc, - .get_priv_flags = spnic_get_priv_flags, - .set_priv_flags = spnic_set_priv_flags, - - .get_channels = spnic_get_channels, - .set_channels = spnic_set_channels, - - .get_rxfh_indir_size = spnic_get_rxfh_indir_size, - .get_rxfh_key_size = spnic_get_rxfh_key_size, - .get_rxfh = spnic_get_rxfh, - .set_rxfh = spnic_set_rxfh, -}; - -void spnic_set_ethtool_ops(struct net_device *netdev) -{ - netdev->ethtool_ops = &spnic_ethtool_ops; -} - -void spnicvf_set_ethtool_ops(struct net_device *netdev) -{ - netdev->ethtool_ops = &spnicvf_ethtool_ops; -} diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_ethtool_stats.c b/drivers/net/ethernet/ramaxel/spnic/spnic_ethtool_stats.c deleted file mode 100644 index 5a830e3454d498cc0402b48eda42f8c785bde3ed..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/spnic_ethtool_stats.c +++ /dev/null @@ -1,1035 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "sphw_hw.h" -#include "sphw_crm.h" -#include "sphw_mt.h" -#include "spnic_nic_cfg.h" -#include "spnic_nic_dev.h" -#include "spnic_tx.h" -#include "spnic_rx.h" - -struct spnic_stats { - char name[ETH_GSTRING_LEN]; - u32 size; - int offset; -}; - -#define SPNIC_NETDEV_STAT(_stat_item) { \ - .name = #_stat_item, \ - .size = sizeof_field(struct rtnl_link_stats64, _stat_item), \ - .offset = offsetof(struct rtnl_link_stats64, _stat_item) \ -} - -static struct spnic_stats spnic_netdev_stats[] = { - SPNIC_NETDEV_STAT(rx_packets), - SPNIC_NETDEV_STAT(tx_packets), - SPNIC_NETDEV_STAT(rx_bytes), - SPNIC_NETDEV_STAT(tx_bytes), - SPNIC_NETDEV_STAT(rx_errors), - SPNIC_NETDEV_STAT(tx_errors), - SPNIC_NETDEV_STAT(rx_dropped), - SPNIC_NETDEV_STAT(tx_dropped), - SPNIC_NETDEV_STAT(multicast), - SPNIC_NETDEV_STAT(collisions), - SPNIC_NETDEV_STAT(rx_length_errors), - SPNIC_NETDEV_STAT(rx_over_errors), - SPNIC_NETDEV_STAT(rx_crc_errors), - SPNIC_NETDEV_STAT(rx_frame_errors), - SPNIC_NETDEV_STAT(rx_fifo_errors), - SPNIC_NETDEV_STAT(rx_missed_errors), - SPNIC_NETDEV_STAT(tx_aborted_errors), - SPNIC_NETDEV_STAT(tx_carrier_errors), - SPNIC_NETDEV_STAT(tx_fifo_errors), - SPNIC_NETDEV_STAT(tx_heartbeat_errors), -}; - -#define SPNIC_NIC_STAT(_stat_item) { \ - .name = #_stat_item, \ - .size = sizeof_field(struct spnic_nic_stats, _stat_item), \ - .offset = offsetof(struct spnic_nic_stats, _stat_item) \ -} - -static struct spnic_stats spnic_nic_dev_stats[] = { - SPNIC_NIC_STAT(netdev_tx_timeout), -}; - -static struct spnic_stats spnic_nic_dev_stats_extern[] = { - SPNIC_NIC_STAT(tx_carrier_off_drop), - SPNIC_NIC_STAT(tx_invalid_qid), -}; - -#define SPNIC_RXQ_STAT(_stat_item) { \ - .name = "rxq%d_"#_stat_item, \ - .size = sizeof_field(struct spnic_rxq_stats, _stat_item), \ - .offset = offsetof(struct spnic_rxq_stats, _stat_item) \ -} - -#define SPNIC_TXQ_STAT(_stat_item) { \ - .name = "txq%d_"#_stat_item, \ - .size = sizeof_field(struct spnic_txq_stats, _stat_item), \ - .offset = offsetof(struct spnic_txq_stats, _stat_item) \ -} - -static struct spnic_stats spnic_rx_queue_stats[] = { - SPNIC_RXQ_STAT(packets), - SPNIC_RXQ_STAT(bytes), - SPNIC_RXQ_STAT(errors), - SPNIC_RXQ_STAT(csum_errors), - SPNIC_RXQ_STAT(other_errors), - SPNIC_RXQ_STAT(dropped), - SPNIC_RXQ_STAT(xdp_dropped), - SPNIC_RXQ_STAT(rx_buf_empty), -}; - -static struct spnic_stats spnic_rx_queue_stats_extern[] = { - SPNIC_RXQ_STAT(alloc_skb_err), - SPNIC_RXQ_STAT(alloc_rx_buf_err), - SPNIC_RXQ_STAT(xdp_large_pkt), -}; - -static struct spnic_stats spnic_tx_queue_stats[] = { - SPNIC_TXQ_STAT(packets), - SPNIC_TXQ_STAT(bytes), - SPNIC_TXQ_STAT(busy), - SPNIC_TXQ_STAT(wake), - SPNIC_TXQ_STAT(dropped), -}; - -static struct spnic_stats spnic_tx_queue_stats_extern[] = { - SPNIC_TXQ_STAT(skb_pad_err), - SPNIC_TXQ_STAT(frag_len_overflow), - SPNIC_TXQ_STAT(offload_cow_skb_err), - SPNIC_TXQ_STAT(map_frag_err), - SPNIC_TXQ_STAT(unknown_tunnel_pkt), - SPNIC_TXQ_STAT(frag_size_err), -}; - -#define SPNIC_FUNC_STAT(_stat_item) { \ - .name = #_stat_item, \ - .size = sizeof_field(struct spnic_vport_stats, _stat_item), \ - .offset = offsetof(struct spnic_vport_stats, _stat_item) \ -} - -static struct spnic_stats spnic_function_stats[] = { - SPNIC_FUNC_STAT(tx_unicast_pkts_vport), - SPNIC_FUNC_STAT(tx_unicast_bytes_vport), - SPNIC_FUNC_STAT(tx_multicast_pkts_vport), - SPNIC_FUNC_STAT(tx_multicast_bytes_vport), - SPNIC_FUNC_STAT(tx_broadcast_pkts_vport), - SPNIC_FUNC_STAT(tx_broadcast_bytes_vport), - - SPNIC_FUNC_STAT(rx_unicast_pkts_vport), - SPNIC_FUNC_STAT(rx_unicast_bytes_vport), - SPNIC_FUNC_STAT(rx_multicast_pkts_vport), - SPNIC_FUNC_STAT(rx_multicast_bytes_vport), - SPNIC_FUNC_STAT(rx_broadcast_pkts_vport), - SPNIC_FUNC_STAT(rx_broadcast_bytes_vport), - - SPNIC_FUNC_STAT(tx_discard_vport), - SPNIC_FUNC_STAT(rx_discard_vport), - SPNIC_FUNC_STAT(tx_err_vport), - SPNIC_FUNC_STAT(rx_err_vport), -}; - -#define SPNIC_PORT_STAT(_stat_item) { \ - .name = #_stat_item, \ - .size = sizeof_field(struct mag_cmd_port_stats, _stat_item), \ - .offset = offsetof(struct mag_cmd_port_stats, _stat_item) \ -} - -static struct spnic_stats spnic_port_stats[] = { - SPNIC_PORT_STAT(mac_rx_total_pkt_num), - SPNIC_PORT_STAT(mac_rx_total_oct_num), - SPNIC_PORT_STAT(mac_rx_bad_pkt_num), - SPNIC_PORT_STAT(mac_rx_bad_oct_num), - SPNIC_PORT_STAT(mac_rx_good_pkt_num), - SPNIC_PORT_STAT(mac_rx_good_oct_num), - SPNIC_PORT_STAT(mac_rx_uni_pkt_num), - SPNIC_PORT_STAT(mac_rx_multi_pkt_num), - SPNIC_PORT_STAT(mac_rx_broad_pkt_num), - SPNIC_PORT_STAT(mac_tx_total_pkt_num), - SPNIC_PORT_STAT(mac_tx_total_oct_num), - SPNIC_PORT_STAT(mac_tx_bad_pkt_num), - SPNIC_PORT_STAT(mac_tx_bad_oct_num), - SPNIC_PORT_STAT(mac_tx_good_pkt_num), - SPNIC_PORT_STAT(mac_tx_good_oct_num), - SPNIC_PORT_STAT(mac_tx_uni_pkt_num), - SPNIC_PORT_STAT(mac_tx_multi_pkt_num), - SPNIC_PORT_STAT(mac_tx_broad_pkt_num), - SPNIC_PORT_STAT(mac_rx_fragment_pkt_num), - SPNIC_PORT_STAT(mac_rx_undersize_pkt_num), - SPNIC_PORT_STAT(mac_rx_undermin_pkt_num), - SPNIC_PORT_STAT(mac_rx_64_oct_pkt_num), - SPNIC_PORT_STAT(mac_rx_65_127_oct_pkt_num), - SPNIC_PORT_STAT(mac_rx_128_255_oct_pkt_num), - SPNIC_PORT_STAT(mac_rx_256_511_oct_pkt_num), - SPNIC_PORT_STAT(mac_rx_512_1023_oct_pkt_num), - SPNIC_PORT_STAT(mac_rx_1024_1518_oct_pkt_num), - SPNIC_PORT_STAT(mac_rx_1519_2047_oct_pkt_num), - SPNIC_PORT_STAT(mac_rx_2048_4095_oct_pkt_num), - SPNIC_PORT_STAT(mac_rx_4096_8191_oct_pkt_num), - SPNIC_PORT_STAT(mac_rx_8192_9216_oct_pkt_num), - SPNIC_PORT_STAT(mac_rx_9217_12287_oct_pkt_num), - SPNIC_PORT_STAT(mac_rx_12288_16383_oct_pkt_num), - SPNIC_PORT_STAT(mac_rx_1519_max_good_pkt_num), - SPNIC_PORT_STAT(mac_rx_1519_max_bad_pkt_num), - SPNIC_PORT_STAT(mac_rx_oversize_pkt_num), - SPNIC_PORT_STAT(mac_rx_jabber_pkt_num), - SPNIC_PORT_STAT(mac_rx_pause_num), - SPNIC_PORT_STAT(mac_rx_pfc_pkt_num), - SPNIC_PORT_STAT(mac_rx_pfc_pri0_pkt_num), - SPNIC_PORT_STAT(mac_rx_pfc_pri1_pkt_num), - SPNIC_PORT_STAT(mac_rx_pfc_pri2_pkt_num), - SPNIC_PORT_STAT(mac_rx_pfc_pri3_pkt_num), - SPNIC_PORT_STAT(mac_rx_pfc_pri4_pkt_num), - SPNIC_PORT_STAT(mac_rx_pfc_pri5_pkt_num), - SPNIC_PORT_STAT(mac_rx_pfc_pri6_pkt_num), - SPNIC_PORT_STAT(mac_rx_pfc_pri7_pkt_num), - SPNIC_PORT_STAT(mac_rx_control_pkt_num), - SPNIC_PORT_STAT(mac_rx_sym_err_pkt_num), - SPNIC_PORT_STAT(mac_rx_fcs_err_pkt_num), - SPNIC_PORT_STAT(mac_rx_send_app_good_pkt_num), - SPNIC_PORT_STAT(mac_rx_send_app_bad_pkt_num), - SPNIC_PORT_STAT(mac_tx_fragment_pkt_num), - SPNIC_PORT_STAT(mac_tx_undersize_pkt_num), - SPNIC_PORT_STAT(mac_tx_undermin_pkt_num), - SPNIC_PORT_STAT(mac_tx_64_oct_pkt_num), - SPNIC_PORT_STAT(mac_tx_65_127_oct_pkt_num), - SPNIC_PORT_STAT(mac_tx_128_255_oct_pkt_num), - SPNIC_PORT_STAT(mac_tx_256_511_oct_pkt_num), - SPNIC_PORT_STAT(mac_tx_512_1023_oct_pkt_num), - SPNIC_PORT_STAT(mac_tx_1024_1518_oct_pkt_num), - SPNIC_PORT_STAT(mac_tx_1519_2047_oct_pkt_num), - SPNIC_PORT_STAT(mac_tx_2048_4095_oct_pkt_num), - SPNIC_PORT_STAT(mac_tx_4096_8191_oct_pkt_num), - SPNIC_PORT_STAT(mac_tx_8192_9216_oct_pkt_num), - SPNIC_PORT_STAT(mac_tx_9217_12287_oct_pkt_num), - SPNIC_PORT_STAT(mac_tx_12288_16383_oct_pkt_num), - SPNIC_PORT_STAT(mac_tx_1519_max_good_pkt_num), - SPNIC_PORT_STAT(mac_tx_1519_max_bad_pkt_num), - SPNIC_PORT_STAT(mac_tx_oversize_pkt_num), - SPNIC_PORT_STAT(mac_tx_jabber_pkt_num), - SPNIC_PORT_STAT(mac_tx_pause_num), - SPNIC_PORT_STAT(mac_tx_pfc_pkt_num), - SPNIC_PORT_STAT(mac_tx_pfc_pri0_pkt_num), - SPNIC_PORT_STAT(mac_tx_pfc_pri1_pkt_num), - SPNIC_PORT_STAT(mac_tx_pfc_pri2_pkt_num), - SPNIC_PORT_STAT(mac_tx_pfc_pri3_pkt_num), - SPNIC_PORT_STAT(mac_tx_pfc_pri4_pkt_num), - SPNIC_PORT_STAT(mac_tx_pfc_pri5_pkt_num), - SPNIC_PORT_STAT(mac_tx_pfc_pri6_pkt_num), - SPNIC_PORT_STAT(mac_tx_pfc_pri7_pkt_num), - SPNIC_PORT_STAT(mac_tx_control_pkt_num), - SPNIC_PORT_STAT(mac_tx_err_all_pkt_num), - SPNIC_PORT_STAT(mac_tx_from_app_good_pkt_num), - SPNIC_PORT_STAT(mac_tx_from_app_bad_pkt_num), -}; - -static char g_spnic_priv_flags_strings[][ETH_GSTRING_LEN] = { - "Symmetric-RSS", -}; - -u32 spnic_get_io_stats_size(struct spnic_nic_dev *nic_dev) -{ - u32 count; - - count = ARRAY_LEN(spnic_nic_dev_stats) + - ARRAY_LEN(spnic_nic_dev_stats_extern) + - (ARRAY_LEN(spnic_tx_queue_stats) + - ARRAY_LEN(spnic_tx_queue_stats_extern) + - ARRAY_LEN(spnic_rx_queue_stats) + - ARRAY_LEN(spnic_rx_queue_stats_extern)) * nic_dev->max_qps; - - return count; -} - -#define GET_VALUE_OF_PTR(size, ptr) ( \ - (size) == sizeof(u64) ? *(u64 *)(ptr) : \ - (size) == sizeof(u32) ? *(u32 *)(ptr) : \ - (size) == sizeof(u16) ? *(u16 *)(ptr) : *(u8 *)(ptr) \ -) - -#define DEV_STATS_PACK(items, item_idx, array, stats_ptr) do { \ - int j; \ - for (j = 0; j < ARRAY_LEN(array); j++) { \ - memcpy((items)[item_idx].name, (array)[j].name, \ - SPNIC_SHOW_ITEM_LEN); \ - (items)[item_idx].hexadecimal = 0; \ - (items)[item_idx].value = \ - GET_VALUE_OF_PTR((array)[j].size, \ - (char *)(stats_ptr) + (array)[j].offset); \ - (item_idx)++; \ - } \ -} while (0) - -#define QUEUE_STATS_PACK(items, item_idx, array, stats_ptr, qid) do { \ - int j; \ - for (j = 0; j < ARRAY_LEN(array); j++) { \ - memcpy((items)[item_idx].name, (array)[j].name, \ - SPNIC_SHOW_ITEM_LEN); \ - snprintf((items)[item_idx].name, SPNIC_SHOW_ITEM_LEN, \ - (array)[j].name, (qid)); \ - (items)[item_idx].hexadecimal = 0; \ - (items)[item_idx].value = \ - GET_VALUE_OF_PTR((array)[j].size, \ - (char *)(stats_ptr) + (array)[j].offset); \ - (item_idx)++; \ - } \ -} while (0) - -void spnic_get_io_stats(struct spnic_nic_dev *nic_dev, void *stats) -{ - struct spnic_show_item *items = stats; - int item_idx = 0; - u16 qid; - - DEV_STATS_PACK(items, item_idx, spnic_nic_dev_stats, &nic_dev->stats); - DEV_STATS_PACK(items, item_idx, spnic_nic_dev_stats_extern, - &nic_dev->stats); - - for (qid = 0; qid < nic_dev->max_qps; qid++) { - QUEUE_STATS_PACK(items, item_idx, spnic_tx_queue_stats, - &nic_dev->txqs[qid].txq_stats, qid); - QUEUE_STATS_PACK(items, item_idx, spnic_tx_queue_stats_extern, - &nic_dev->txqs[qid].txq_stats, qid); - } - - for (qid = 0; qid < nic_dev->max_qps; qid++) { - QUEUE_STATS_PACK(items, item_idx, spnic_rx_queue_stats, - &nic_dev->rxqs[qid].rxq_stats, qid); - QUEUE_STATS_PACK(items, item_idx, spnic_rx_queue_stats_extern, - &nic_dev->rxqs[qid].rxq_stats, qid); - } -} - -static char spnic_test_strings[][ETH_GSTRING_LEN] = { - "Internal lb test (on/offline)", - "External lb test (external_lb)", -}; - -int spnic_get_sset_count(struct net_device *netdev, int sset) -{ - int count = 0, q_num = 0; - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - - switch (sset) { - case ETH_SS_TEST: - return ARRAY_LEN(spnic_test_strings); - case ETH_SS_STATS: - q_num = nic_dev->q_params.num_qps; - count = ARRAY_LEN(spnic_netdev_stats) + - ARRAY_LEN(spnic_nic_dev_stats) + - ARRAY_LEN(spnic_function_stats) + - (ARRAY_LEN(spnic_tx_queue_stats) + - ARRAY_LEN(spnic_rx_queue_stats)) * q_num; - - if (!SPNIC_FUNC_IS_VF(nic_dev->hwdev)) - count += ARRAY_LEN(spnic_port_stats); - - return count; - case ETH_SS_PRIV_FLAGS: - return ARRAY_LEN(g_spnic_priv_flags_strings); - default: - return -EOPNOTSUPP; - } -} - -static void get_drv_queue_stats(struct spnic_nic_dev *nic_dev, u64 *data) -{ - struct spnic_txq_stats txq_stats; - struct spnic_rxq_stats rxq_stats; - u16 i = 0, j = 0, qid = 0; - char *p = NULL; - - for (qid = 0; qid < nic_dev->q_params.num_qps; qid++) { - if (!nic_dev->txqs) - break; - - spnic_txq_get_stats(&nic_dev->txqs[qid], &txq_stats); - for (j = 0; j < ARRAY_LEN(spnic_tx_queue_stats); j++, i++) { - p = (char *)(&txq_stats) + - spnic_tx_queue_stats[j].offset; - data[i] = (spnic_tx_queue_stats[j].size == - sizeof(u64)) ? *(u64 *)p : *(u32 *)p; - } - } - - for (qid = 0; qid < nic_dev->q_params.num_qps; qid++) { - if (!nic_dev->rxqs) - break; - - spnic_rxq_get_stats(&nic_dev->rxqs[qid], &rxq_stats); - for (j = 0; j < ARRAY_LEN(spnic_rx_queue_stats); j++, i++) { - p = (char *)(&rxq_stats) + - spnic_rx_queue_stats[j].offset; - data[i] = (spnic_rx_queue_stats[j].size == - sizeof(u64)) ? *(u64 *)p : *(u32 *)p; - } - } -} - -static u16 get_ethtool_port_stats(struct spnic_nic_dev *nic_dev, u64 *data) -{ - struct mag_cmd_port_stats *port_stats; - char *p = NULL; - u16 i = 0, j = 0; - int err; - - port_stats = kzalloc(sizeof(*port_stats), GFP_KERNEL); - if (!port_stats) { - nicif_err(nic_dev, drv, nic_dev->netdev, - "Failed to malloc port stats\n"); - memset(&data[i], 0, - ARRAY_LEN(spnic_port_stats) * sizeof(*data)); - i += ARRAY_LEN(spnic_port_stats); - return i; - } - - err = spnic_get_phy_port_stats(nic_dev->hwdev, port_stats); - if (err) - nicif_err(nic_dev, drv, nic_dev->netdev, - "Failed to get port stats from fw\n"); - - for (j = 0; j < ARRAY_LEN(spnic_port_stats); j++, i++) { - p = (char *)(port_stats) + spnic_port_stats[j].offset; - data[i] = (spnic_port_stats[j].size == - sizeof(u64)) ? *(u64 *)p : *(u32 *)p; - } - - kfree(port_stats); - - return i; -} - -void spnic_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, u64 *data) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - struct rtnl_link_stats64 temp; - const struct rtnl_link_stats64 *net_stats = NULL; - struct spnic_nic_stats *nic_stats = NULL; - - struct spnic_vport_stats vport_stats = {0}; - u16 i = 0, j = 0; - char *p = NULL; - int err; - - net_stats = dev_get_stats(netdev, &temp); - for (j = 0; j < ARRAY_LEN(spnic_netdev_stats); j++, i++) { - p = (char *)(net_stats) + spnic_netdev_stats[j].offset; - data[i] = GET_VALUE_OF_PTR(spnic_netdev_stats[j].size, p); - } - - nic_stats = &nic_dev->stats; - for (j = 0; j < ARRAY_LEN(spnic_nic_dev_stats); j++, i++) { - p = (char *)(nic_stats) + spnic_nic_dev_stats[j].offset; - data[i] = GET_VALUE_OF_PTR(spnic_nic_dev_stats[j].size, p); - } - - err = spnic_get_vport_stats(nic_dev->hwdev, &vport_stats); - if (err) - nicif_err(nic_dev, drv, netdev, - "Failed to get function stats from fw\n"); - - for (j = 0; j < ARRAY_LEN(spnic_function_stats); j++, i++) { - p = (char *)(&vport_stats) + spnic_function_stats[j].offset; - data[i] = GET_VALUE_OF_PTR(spnic_function_stats[j].size, p); - } - - if (!SPNIC_FUNC_IS_VF(nic_dev->hwdev)) - i += get_ethtool_port_stats(nic_dev, data + i); - - get_drv_queue_stats(nic_dev, data + i); -} - -static u16 get_drv_dev_strings(struct spnic_nic_dev *nic_dev, char *p) -{ - u16 i, cnt = 0; - - for (i = 0; i < ARRAY_LEN(spnic_netdev_stats); i++) { - memcpy(p, spnic_netdev_stats[i].name, - ETH_GSTRING_LEN); - p += ETH_GSTRING_LEN; - cnt++; - } - - for (i = 0; i < ARRAY_LEN(spnic_nic_dev_stats); i++) { - memcpy(p, spnic_nic_dev_stats[i].name, ETH_GSTRING_LEN); - p += ETH_GSTRING_LEN; - cnt++; - } - - return cnt; -} - -static u16 get_hw_stats_strings(struct spnic_nic_dev *nic_dev, char *p) -{ - u16 i, cnt = 0; - - for (i = 0; i < ARRAY_LEN(spnic_function_stats); i++) { - memcpy(p, spnic_function_stats[i].name, - ETH_GSTRING_LEN); - p += ETH_GSTRING_LEN; - cnt++; - } - - if (!SPNIC_FUNC_IS_VF(nic_dev->hwdev)) { - for (i = 0; i < ARRAY_LEN(spnic_port_stats); i++) { - memcpy(p, spnic_port_stats[i].name, - ETH_GSTRING_LEN); - p += ETH_GSTRING_LEN; - cnt++; - } - } - - return cnt; -} - -static u16 get_qp_stats_strings(struct spnic_nic_dev *nic_dev, char *p) -{ - u16 i = 0, j = 0, cnt = 0; - - for (i = 0; i < nic_dev->q_params.num_qps; i++) { - for (j = 0; j < ARRAY_LEN(spnic_tx_queue_stats); j++) { - sprintf(p, spnic_tx_queue_stats[j].name, i); - p += ETH_GSTRING_LEN; - cnt++; - } - } - - for (i = 0; i < nic_dev->q_params.num_qps; i++) { - for (j = 0; j < ARRAY_LEN(spnic_rx_queue_stats); j++) { - sprintf(p, spnic_rx_queue_stats[j].name, i); - p += ETH_GSTRING_LEN; - cnt++; - } - } - - return cnt; -} - -void spnic_get_strings(struct net_device *netdev, u32 stringset, u8 *data) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - char *p = (char *)data; - u16 offset = 0; - - switch (stringset) { - case ETH_SS_TEST: - memcpy(data, *spnic_test_strings, sizeof(spnic_test_strings)); - return; - case ETH_SS_STATS: - offset = get_drv_dev_strings(nic_dev, p); - offset += get_hw_stats_strings(nic_dev, - p + offset * ETH_GSTRING_LEN); - get_qp_stats_strings(nic_dev, p + offset * ETH_GSTRING_LEN); - - return; - case ETH_SS_PRIV_FLAGS: - memcpy(data, g_spnic_priv_flags_strings, - sizeof(g_spnic_priv_flags_strings)); - return; - default: - nicif_err(nic_dev, drv, netdev, - "Invalid string set %u.", stringset); - return; - } -} - -static const u32 spnic_mag_link_mode_ge[] = { - ETHTOOL_LINK_MODE_1000baseT_Full_BIT, - ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, - ETHTOOL_LINK_MODE_1000baseX_Full_BIT, -}; - -static const u32 spnic_mag_link_mode_10ge_base_r[] = { - ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, - ETHTOOL_LINK_MODE_10000baseR_FEC_BIT, - ETHTOOL_LINK_MODE_10000baseCR_Full_BIT, - ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, - ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, - ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT, -}; - -static const u32 spnic_mag_link_mode_25ge_base_r[] = { - ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, - ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, - ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, -}; - -static const u32 spnic_mag_link_mode_40ge_base_r4[] = { - ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, - ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, - ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, - ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, -}; - -static const u32 spnic_mag_link_mode_50ge_base_r[] = { - ETHTOOL_LINK_MODE_50000baseKR_Full_BIT, - ETHTOOL_LINK_MODE_50000baseSR_Full_BIT, - ETHTOOL_LINK_MODE_50000baseCR_Full_BIT, -}; - -static const u32 spnic_mag_link_mode_50ge_base_r2[] = { - ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, - ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, - ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, -}; - -static const u32 spnic_mag_link_mode_100ge_base_r[] = { - ETHTOOL_LINK_MODE_100000baseKR_Full_BIT, - ETHTOOL_LINK_MODE_100000baseSR_Full_BIT, - ETHTOOL_LINK_MODE_100000baseCR_Full_BIT, -}; - -static const u32 spnic_mag_link_mode_100ge_base_r2[] = { - ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT, - ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT, - ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT, -}; - -static const u32 spnic_mag_link_mode_100ge_base_r4[] = { - ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, - ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, - ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, -}; - -static const u32 spnic_mag_link_mode_200ge_base_r2[] = { - ETHTOOL_LINK_MODE_200000baseKR2_Full_BIT, - ETHTOOL_LINK_MODE_200000baseSR2_Full_BIT, - ETHTOOL_LINK_MODE_200000baseCR2_Full_BIT, -}; - -static const u32 spnic_mag_link_mode_200ge_base_r4[] = { - ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT, - ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT, - ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT, -}; - -struct hw2ethtool_link_mode { - const u32 *link_mode_bit_arr; - u32 arr_size; - u32 speed; -}; - -static const struct hw2ethtool_link_mode - hw2ethtool_link_mode_table[LINK_MODE_MAX_NUMBERS] = { - [LINK_MODE_GE] = { - .link_mode_bit_arr = spnic_mag_link_mode_ge, - .arr_size = ARRAY_LEN(spnic_mag_link_mode_ge), - .speed = SPEED_1000, - }, - [LINK_MODE_10GE_BASE_R] = { - .link_mode_bit_arr = spnic_mag_link_mode_10ge_base_r, - .arr_size = ARRAY_LEN(spnic_mag_link_mode_10ge_base_r), - .speed = SPEED_10000, - }, - [LINK_MODE_25GE_BASE_R] = { - .link_mode_bit_arr = spnic_mag_link_mode_25ge_base_r, - .arr_size = ARRAY_LEN(spnic_mag_link_mode_25ge_base_r), - .speed = SPEED_25000, - }, - [LINK_MODE_40GE_BASE_R4] = { - .link_mode_bit_arr = spnic_mag_link_mode_40ge_base_r4, - .arr_size = ARRAY_LEN(spnic_mag_link_mode_40ge_base_r4), - .speed = SPEED_40000, - }, - [LINK_MODE_50GE_BASE_R] = { - .link_mode_bit_arr = spnic_mag_link_mode_50ge_base_r, - .arr_size = ARRAY_LEN(spnic_mag_link_mode_50ge_base_r), - .speed = SPEED_50000, - }, - [LINK_MODE_50GE_BASE_R2] = { - .link_mode_bit_arr = spnic_mag_link_mode_50ge_base_r2, - .arr_size = ARRAY_LEN(spnic_mag_link_mode_50ge_base_r2), - .speed = SPEED_50000, - }, - [LINK_MODE_100GE_BASE_R] = { - .link_mode_bit_arr = spnic_mag_link_mode_100ge_base_r, - .arr_size = ARRAY_LEN(spnic_mag_link_mode_100ge_base_r), - .speed = SPEED_100000, - }, - [LINK_MODE_100GE_BASE_R2] = { - .link_mode_bit_arr = spnic_mag_link_mode_100ge_base_r2, - .arr_size = ARRAY_LEN(spnic_mag_link_mode_100ge_base_r2), - .speed = SPEED_100000, - }, - [LINK_MODE_100GE_BASE_R4] = { - .link_mode_bit_arr = spnic_mag_link_mode_100ge_base_r4, - .arr_size = ARRAY_LEN(spnic_mag_link_mode_100ge_base_r4), - .speed = SPEED_100000, - }, - [LINK_MODE_200GE_BASE_R2] = { - .link_mode_bit_arr = spnic_mag_link_mode_200ge_base_r2, - .arr_size = ARRAY_LEN(spnic_mag_link_mode_200ge_base_r2), - .speed = SPEED_200000, - }, - [LINK_MODE_200GE_BASE_R4] = { - .link_mode_bit_arr = spnic_mag_link_mode_200ge_base_r4, - .arr_size = ARRAY_LEN(spnic_mag_link_mode_200ge_base_r4), - .speed = SPEED_200000, - }, -}; - -#define GET_SUPPORTED_MODE 0 -#define GET_ADVERTISED_MODE 1 - -struct cmd_link_settings { - __ETHTOOL_DECLARE_LINK_MODE_MASK(supported); - __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising); - - u32 speed; - u8 duplex; - u8 port; - u8 autoneg; -}; - -#define ETHTOOL_ADD_SUPPORTED_LINK_MODE(ecmd, mode) \ - set_bit(ETHTOOL_LINK_MODE_##mode##_BIT, (ecmd)->supported) -#define ETHTOOL_ADD_ADVERTISED_LINK_MODE(ecmd, mode) \ - set_bit(ETHTOOL_LINK_MODE_##mode##_BIT, (ecmd)->advertising) - -#define ETHTOOL_ADD_SUPPORTED_SPEED_LINK_MODE(ecmd, mode) \ -do { \ - u32 i; \ - for (i = 0; i < hw2ethtool_link_mode_table[mode].arr_size; i++) \ - set_bit(hw2ethtool_link_mode_table[mode].link_mode_bit_arr[i], \ - (ecmd)->supported); \ -} while (0) - -#define ETHTOOL_ADD_ADVERTISED_SPEED_LINK_MODE(ecmd, mode) \ -do { \ - u32 i; \ - for (i = 0; i < hw2ethtool_link_mode_table[mode].arr_size; i++) \ - set_bit(hw2ethtool_link_mode_table[mode].link_mode_bit_arr[i], \ - (ecmd)->advertising); \ -} while (0) - -/* Related to enum mag_cmd_port_speed */ -static u32 hw_to_ethtool_speed[] = { - (u32)SPEED_UNKNOWN, SPEED_10, SPEED_100, SPEED_1000, SPEED_10000, - SPEED_25000, SPEED_40000, SPEED_50000, SPEED_100000, SPEED_200000 -}; - -static int spnic_ethtool_to_hw_speed_level(u32 speed) -{ - int i; - - for (i = 0; i < ARRAY_LEN(hw_to_ethtool_speed); i++) { - if (hw_to_ethtool_speed[i] == speed) - break; - } - - return i; -} - -static void -spnic_add_ethtool_link_mode(struct cmd_link_settings *link_settings, u32 hw_link_mode, u32 name) -{ - u32 link_mode; - - for (link_mode = 0; link_mode < LINK_MODE_MAX_NUMBERS; link_mode++) { - if (hw_link_mode & BIT(link_mode)) { - if (name == GET_SUPPORTED_MODE) - ETHTOOL_ADD_SUPPORTED_SPEED_LINK_MODE(link_settings, link_mode); - else - ETHTOOL_ADD_ADVERTISED_SPEED_LINK_MODE(link_settings, link_mode); - } - } -} - -static int spnic_link_speed_set(struct spnic_nic_dev *nic_dev, - struct cmd_link_settings *link_settings, - struct nic_port_info *port_info) -{ - u8 link_state = 0; - int err; - - if (port_info->supported_mode != LINK_MODE_UNKNOWN) - spnic_add_ethtool_link_mode(link_settings, port_info->supported_mode, - GET_SUPPORTED_MODE); - if (port_info->advertised_mode != LINK_MODE_UNKNOWN) - spnic_add_ethtool_link_mode(link_settings, port_info->advertised_mode, - GET_ADVERTISED_MODE); - - err = spnic_get_link_state(nic_dev->hwdev, &link_state); - if (!err && link_state) { - link_settings->speed = - port_info->speed < ARRAY_LEN(hw_to_ethtool_speed) ? - hw_to_ethtool_speed[port_info->speed] : - (u32)SPEED_UNKNOWN; - - link_settings->duplex = port_info->duplex; - } else { - link_settings->speed = (u32)SPEED_UNKNOWN; - link_settings->duplex = DUPLEX_UNKNOWN; - } - - return 0; -} - -static void spnic_link_port_type(struct cmd_link_settings *link_settings, u8 port_type) -{ - switch (port_type) { - case MAG_CMD_WIRE_TYPE_ELECTRIC: - ETHTOOL_ADD_SUPPORTED_LINK_MODE(link_settings, TP); - ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, TP); - link_settings->port = PORT_TP; - break; - - case MAG_CMD_WIRE_TYPE_AOC: - case MAG_CMD_WIRE_TYPE_MM: - case MAG_CMD_WIRE_TYPE_SM: - ETHTOOL_ADD_SUPPORTED_LINK_MODE(link_settings, FIBRE); - ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, FIBRE); - link_settings->port = PORT_FIBRE; - break; - - case MAG_CMD_WIRE_TYPE_COPPER: - ETHTOOL_ADD_SUPPORTED_LINK_MODE(link_settings, FIBRE); - ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, FIBRE); - link_settings->port = PORT_DA; - break; - - case MAG_CMD_WIRE_TYPE_BACKPLANE: - ETHTOOL_ADD_SUPPORTED_LINK_MODE(link_settings, Backplane); - ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, Backplane); - link_settings->port = PORT_NONE; - break; - - default: - link_settings->port = PORT_OTHER; - break; - } -} - -static int get_link_pause_settings(struct spnic_nic_dev *nic_dev, - struct cmd_link_settings *link_settings) -{ - struct nic_pause_config nic_pause = {0}; - int err; - - err = spnic_get_pause_info(nic_dev->hwdev, &nic_pause); - if (err) { - nicif_err(nic_dev, drv, nic_dev->netdev, - "Failed to get pauseparam from hw\n"); - return err; - } - - ETHTOOL_ADD_SUPPORTED_LINK_MODE(link_settings, Pause); - if (nic_pause.rx_pause && nic_pause.tx_pause) { - ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, Pause); - } else if (nic_pause.tx_pause) { - ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, Asym_Pause); - } else if (nic_pause.rx_pause) { - ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, Pause); - ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, Asym_Pause); - } - - return 0; -} - -int get_link_settings(struct net_device *netdev, struct cmd_link_settings *link_settings) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - struct nic_port_info port_info = {0}; - int err; - - err = spnic_get_port_info(nic_dev->hwdev, &port_info, SPHW_CHANNEL_NIC); - if (err) { - nicif_err(nic_dev, drv, netdev, "Failed to get port info\n"); - return err; - } - - err = spnic_link_speed_set(nic_dev, link_settings, &port_info); - if (err) - return err; - - spnic_link_port_type(link_settings, port_info.port_type); - - link_settings->autoneg = port_info.autoneg_state == PORT_CFG_AN_ON ? - AUTONEG_ENABLE : AUTONEG_DISABLE; - if (port_info.autoneg_cap) - ETHTOOL_ADD_SUPPORTED_LINK_MODE(link_settings, Autoneg); - if (port_info.autoneg_state == PORT_CFG_AN_ON) - ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, Autoneg); - - if (!SPNIC_FUNC_IS_VF(nic_dev->hwdev)) - err = get_link_pause_settings(nic_dev, link_settings); - - return err; -} - -int spnic_get_link_ksettings(struct net_device *netdev, - struct ethtool_link_ksettings *link_settings) -{ - struct cmd_link_settings settings = { { 0 } }; - struct ethtool_link_settings *base = &link_settings->base; - int err; - - ethtool_link_ksettings_zero_link_mode(link_settings, supported); - ethtool_link_ksettings_zero_link_mode(link_settings, advertising); - - err = get_link_settings(netdev, &settings); - if (err) - return err; - - bitmap_copy(link_settings->link_modes.supported, settings.supported, - __ETHTOOL_LINK_MODE_MASK_NBITS); - bitmap_copy(link_settings->link_modes.advertising, settings.advertising, - __ETHTOOL_LINK_MODE_MASK_NBITS); - - base->autoneg = settings.autoneg; - base->speed = settings.speed; - base->duplex = settings.duplex; - base->port = settings.port; - - return 0; -} - -static bool spnic_is_support_speed(u32 supported_link, u32 speed) -{ - u32 link_mode; - - for (link_mode = 0; link_mode < LINK_MODE_MAX_NUMBERS; link_mode++) { - if (!(supported_link & BIT(link_mode))) - continue; - - if (hw2ethtool_link_mode_table[link_mode].speed == speed) - return true; - } - - return false; -} - -static int spnic_is_speed_legal(struct spnic_nic_dev *nic_dev, - struct nic_port_info *port_info, u32 speed) -{ - struct net_device *netdev = nic_dev->netdev; - int speed_level = 0; - - if (port_info->supported_mode == LINK_MODE_UNKNOWN || - port_info->advertised_mode == LINK_MODE_UNKNOWN) { - nicif_err(nic_dev, drv, netdev, "Unknown supported link modes\n"); - return -EAGAIN; - } - - speed_level = spnic_ethtool_to_hw_speed_level(speed); - if (speed_level >= PORT_SPEED_UNKNOWN || - !spnic_is_support_speed(port_info->supported_mode, speed)) { - nicif_err(nic_dev, drv, netdev, - "Not supported speed: %u\n", speed); - return -EINVAL; - } - - return 0; -} - -static int get_link_settings_type(struct spnic_nic_dev *nic_dev, - u8 autoneg, u32 speed, u32 *set_settings) -{ - struct nic_port_info port_info = {0}; - int err; - - err = spnic_get_port_info(nic_dev->hwdev, &port_info, SPHW_CHANNEL_NIC); - if (err) { - nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to get current settings\n"); - return -EAGAIN; - } - - /* Alwayse set autonegation */ - if (port_info.autoneg_cap) - *set_settings |= HILINK_LINK_SET_AUTONEG; - - if (autoneg == AUTONEG_ENABLE) { - if (!port_info.autoneg_cap) { - nicif_err(nic_dev, drv, nic_dev->netdev, "Not support autoneg\n"); - return -EOPNOTSUPP; - } - } else if (speed != (u32)SPEED_UNKNOWN) { - /* Set speed only when autoneg is disable */ - err = spnic_is_speed_legal(nic_dev, &port_info, speed); - if (err) - return err; - - *set_settings |= HILINK_LINK_SET_SPEED; - } else { - nicif_err(nic_dev, drv, nic_dev->netdev, "Need to set speed when autoneg is off\n"); - return -EOPNOTSUPP; - } - - return 0; -} - -static int spnic_set_settings_to_hw(struct spnic_nic_dev *nic_dev, - u32 set_settings, u8 autoneg, u32 speed) -{ - struct net_device *netdev = nic_dev->netdev; - struct spnic_link_ksettings settings = {0}; - int speed_level = 0; - char set_link_str[128] = {0}; - int err = 0; - - snprintf(set_link_str, sizeof(set_link_str), "%s", - (set_settings & HILINK_LINK_SET_AUTONEG) ? - (autoneg ? "autong enable " : "autong disable ") : ""); - if (set_settings & HILINK_LINK_SET_SPEED) { - speed_level = spnic_ethtool_to_hw_speed_level(speed); - snprintf(set_link_str, sizeof(set_link_str), - "%sspeed %u ", set_link_str, speed); - } - - settings.valid_bitmap = set_settings; - settings.autoneg = autoneg ? PORT_CFG_AN_ON : PORT_CFG_AN_OFF; - settings.speed = (u8)speed_level; - - err = spnic_set_link_settings(nic_dev->hwdev, &settings); - if (err) - nicif_err(nic_dev, drv, netdev, "Set %sfailed\n", - set_link_str); - else - nicif_info(nic_dev, drv, netdev, "Set %ssuccess\n", - set_link_str); - - return err; -} - -int set_link_settings(struct net_device *netdev, u8 autoneg, u32 speed) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - u32 set_settings = 0; - int err = 0; - - err = get_link_settings_type(nic_dev, autoneg, speed, &set_settings); - if (err) - return err; - - if (set_settings) - err = spnic_set_settings_to_hw(nic_dev, set_settings, autoneg, speed); - else - nicif_info(nic_dev, drv, netdev, "Nothing changed, exiting without setting anything\n"); - - return err; -} - -int spnic_set_link_ksettings(struct net_device *netdev, - const struct ethtool_link_ksettings *link_settings) -{ - /* Only support to set autoneg and speed */ - return set_link_settings(netdev, link_settings->base.autoneg, - link_settings->base.speed); -} diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_filter.c b/drivers/net/ethernet/ramaxel/spnic/spnic_filter.c deleted file mode 100644 index d7ca2bed454b9e933eedf0e04b7110885bce1610..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/spnic_filter.c +++ /dev/null @@ -1,411 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "sphw_hw.h" -#include "sphw_crm.h" -#include "spnic_nic_dev.h" - -enum spnic_rx_mod { - SPNIC_RX_MODE_UC = 1 << 0, - SPNIC_RX_MODE_MC = 1 << 1, - SPNIC_RX_MODE_BC = 1 << 2, - SPNIC_RX_MODE_MC_ALL = 1 << 3, - SPNIC_RX_MODE_PROMISC = 1 << 4, -}; - -static int spnic_uc_sync(struct net_device *netdev, u8 *addr) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - - return spnic_set_mac(nic_dev->hwdev, addr, 0, sphw_global_func_id(nic_dev->hwdev), - SPHW_CHANNEL_NIC); -} - -static int spnic_uc_unsync(struct net_device *netdev, u8 *addr) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - - /* The addr is in use */ - if (ether_addr_equal(addr, netdev->dev_addr)) - return 0; - - return spnic_del_mac(nic_dev->hwdev, addr, 0, sphw_global_func_id(nic_dev->hwdev), - SPHW_CHANNEL_NIC); -} - -void spnic_clean_mac_list_filter(struct spnic_nic_dev *nic_dev) -{ - struct net_device *netdev = nic_dev->netdev; - struct spnic_mac_filter *ftmp = NULL; - struct spnic_mac_filter *f = NULL; - - list_for_each_entry_safe(f, ftmp, &nic_dev->uc_filter_list, list) { - if (f->state == SPNIC_MAC_HW_SYNCED) - spnic_uc_unsync(netdev, f->addr); - list_del(&f->list); - kfree(f); - } - - list_for_each_entry_safe(f, ftmp, &nic_dev->mc_filter_list, list) { - if (f->state == SPNIC_MAC_HW_SYNCED) - spnic_uc_unsync(netdev, f->addr); - list_del(&f->list); - kfree(f); - } -} - -static struct spnic_mac_filter *spnic_find_mac(struct list_head *filter_list, u8 *addr) -{ - struct spnic_mac_filter *f = NULL; - - list_for_each_entry(f, filter_list, list) { - if (ether_addr_equal(addr, f->addr)) - return f; - } - return NULL; -} - -static struct spnic_mac_filter *spnic_add_filter(struct spnic_nic_dev *nic_dev, - struct list_head *mac_filter_list, u8 *addr) -{ - struct spnic_mac_filter *f; - - f = kzalloc(sizeof(*f), GFP_ATOMIC); - if (!f) - goto out; - - ether_addr_copy(f->addr, addr); - - INIT_LIST_HEAD(&f->list); - list_add_tail(&f->list, mac_filter_list); - - f->state = SPNIC_MAC_WAIT_HW_SYNC; - set_bit(SPNIC_MAC_FILTER_CHANGED, &nic_dev->flags); - -out: - return f; -} - -static void spnic_del_filter(struct spnic_nic_dev *nic_dev, struct spnic_mac_filter *f) -{ - set_bit(SPNIC_MAC_FILTER_CHANGED, &nic_dev->flags); - - if (f->state == SPNIC_MAC_WAIT_HW_SYNC) { - /* have not added to hw, delete it directly */ - list_del(&f->list); - kfree(f); - return; - } - - f->state = SPNIC_MAC_WAIT_HW_UNSYNC; -} - -static struct spnic_mac_filter *spnic_mac_filter_entry_clone(struct spnic_mac_filter *src) -{ - struct spnic_mac_filter *f; - - f = kzalloc(sizeof(*f), GFP_ATOMIC); - if (!f) - return NULL; - - *f = *src; - INIT_LIST_HEAD(&f->list); - - return f; -} - -static void spnic_undo_del_filter_entries(struct list_head *filter_list, struct list_head *from) -{ - struct spnic_mac_filter *ftmp = NULL; - struct spnic_mac_filter *f = NULL; - - list_for_each_entry_safe(f, ftmp, from, list) { - if (spnic_find_mac(filter_list, f->addr)) - continue; - - if (f->state == SPNIC_MAC_HW_SYNCED) - f->state = SPNIC_MAC_WAIT_HW_UNSYNC; - - list_move_tail(&f->list, filter_list); - } -} - -static void spnic_undo_add_filter_entries(struct list_head *filter_list, struct list_head *from) -{ - struct spnic_mac_filter *ftmp = NULL; - struct spnic_mac_filter *tmp = NULL; - struct spnic_mac_filter *f = NULL; - - list_for_each_entry_safe(f, ftmp, from, list) { - tmp = spnic_find_mac(filter_list, f->addr); - if (tmp && tmp->state == SPNIC_MAC_HW_SYNCED) - tmp->state = SPNIC_MAC_WAIT_HW_SYNC; - } -} - -static void spnic_cleanup_filter_list(struct list_head *head) -{ - struct spnic_mac_filter *ftmp = NULL; - struct spnic_mac_filter *f = NULL; - - list_for_each_entry_safe(f, ftmp, head, list) { - list_del(&f->list); - kfree(f); - } -} - -static int spnic_mac_filter_sync_hw(struct spnic_nic_dev *nic_dev, struct list_head *del_list, - struct list_head *add_list) -{ - struct net_device *netdev = nic_dev->netdev; - struct spnic_mac_filter *ftmp = NULL; - struct spnic_mac_filter *f = NULL; - int err = 0, add_count = 0; - - if (!list_empty(del_list)) { - list_for_each_entry_safe(f, ftmp, del_list, list) { - err = spnic_uc_unsync(netdev, f->addr); - if (err) { /* ignore errors when delete mac */ - nic_err(&nic_dev->pdev->dev, "Failed to delete mac\n"); - } - - list_del(&f->list); - kfree(f); - } - } - - if (!list_empty(add_list)) { - list_for_each_entry_safe(f, ftmp, add_list, list) { - err = spnic_uc_sync(netdev, f->addr); - if (err) { - nic_err(&nic_dev->pdev->dev, "Failed to add mac\n"); - return err; - } - - add_count++; - list_del(&f->list); - kfree(f); - } - } - - return add_count; -} - -static int spnic_mac_filter_sync(struct spnic_nic_dev *nic_dev, - struct list_head *mac_filter_list, bool uc) -{ - struct net_device *netdev = nic_dev->netdev; - struct list_head tmp_del_list, tmp_add_list; - struct spnic_mac_filter *fclone = NULL; - struct spnic_mac_filter *ftmp = NULL; - struct spnic_mac_filter *f = NULL; - int err = 0, add_count = 0; - - INIT_LIST_HEAD(&tmp_del_list); - INIT_LIST_HEAD(&tmp_add_list); - - list_for_each_entry_safe(f, ftmp, mac_filter_list, list) { - if (f->state != SPNIC_MAC_WAIT_HW_UNSYNC) - continue; - - f->state = SPNIC_MAC_HW_UNSYNCED; - list_move_tail(&f->list, &tmp_del_list); - } - - list_for_each_entry_safe(f, ftmp, mac_filter_list, list) { - if (f->state != SPNIC_MAC_WAIT_HW_SYNC) - continue; - - fclone = spnic_mac_filter_entry_clone(f); - if (!fclone) { - err = -ENOMEM; - break; - } - - f->state = SPNIC_MAC_HW_SYNCED; - list_add_tail(&fclone->list, &tmp_add_list); - } - - if (err) { - spnic_undo_del_filter_entries(mac_filter_list, &tmp_del_list); - spnic_undo_add_filter_entries(mac_filter_list, &tmp_add_list); - nicif_err(nic_dev, drv, netdev, "Failed to clone mac_filter_entry\n"); - - spnic_cleanup_filter_list(&tmp_del_list); - spnic_cleanup_filter_list(&tmp_add_list); - return -ENOMEM; - } - - add_count = spnic_mac_filter_sync_hw(nic_dev, &tmp_del_list, &tmp_add_list); - if (list_empty(&tmp_add_list)) - return add_count; - - /* there are errors when add mac to hw, delete all mac in hw */ - spnic_undo_add_filter_entries(mac_filter_list, &tmp_add_list); - /* VF don't support to enter promisc mode, - * so we can't delete any other uc mac - */ - if (!SPNIC_FUNC_IS_VF(nic_dev->hwdev) || !uc) { - list_for_each_entry_safe(f, ftmp, mac_filter_list, list) { - if (f->state != SPNIC_MAC_HW_SYNCED) - continue; - - fclone = spnic_mac_filter_entry_clone(f); - if (!fclone) - break; - - f->state = SPNIC_MAC_WAIT_HW_SYNC; - list_add_tail(&fclone->list, &tmp_del_list); - } - } - - spnic_cleanup_filter_list(&tmp_add_list); - spnic_mac_filter_sync_hw(nic_dev, &tmp_del_list, &tmp_add_list); - - /* need to enter promisc/allmulti mode */ - return -ENOMEM; -} - -static void spnic_mac_filter_sync_all(struct spnic_nic_dev *nic_dev) -{ - struct net_device *netdev = nic_dev->netdev; - int add_count; - - if (test_bit(SPNIC_MAC_FILTER_CHANGED, &nic_dev->flags)) { - clear_bit(SPNIC_MAC_FILTER_CHANGED, &nic_dev->flags); - add_count = spnic_mac_filter_sync(nic_dev, &nic_dev->uc_filter_list, true); - if (add_count < 0 && SPNIC_SUPPORT_PROMISC(nic_dev->hwdev)) { - set_bit(SPNIC_PROMISC_FORCE_ON, &nic_dev->rx_mod_state); - nicif_info(nic_dev, drv, netdev, "Promisc mode forced on\n"); - } else if (add_count) { - clear_bit(SPNIC_PROMISC_FORCE_ON, &nic_dev->rx_mod_state); - } - - add_count = spnic_mac_filter_sync(nic_dev, &nic_dev->mc_filter_list, false); - if (add_count < 0 && SPNIC_SUPPORT_ALLMULTI(nic_dev->hwdev)) { - set_bit(SPNIC_ALLMULTI_FORCE_ON, &nic_dev->rx_mod_state); - nicif_info(nic_dev, drv, netdev, "All multicast mode forced on\n"); - } else if (add_count) { - clear_bit(SPNIC_ALLMULTI_FORCE_ON, &nic_dev->rx_mod_state); - } - } -} - -#define SPNIC_DEFAULT_RX_MODE (SPNIC_RX_MODE_UC | SPNIC_RX_MODE_MC | \ - SPNIC_RX_MODE_BC) - -static void spnic_update_mac_filter(struct spnic_nic_dev *nic_dev, - struct netdev_hw_addr_list *src_list, - struct list_head *filter_list) -{ - struct spnic_mac_filter *filter = NULL; - struct spnic_mac_filter *ftmp = NULL; - struct spnic_mac_filter *f = NULL; - struct netdev_hw_addr *ha = NULL; - - /* add addr if not already in the filter list */ - netif_addr_lock_bh(nic_dev->netdev); - netdev_hw_addr_list_for_each(ha, src_list) { - filter = spnic_find_mac(filter_list, ha->addr); - if (!filter) - spnic_add_filter(nic_dev, filter_list, ha->addr); - else if (filter->state == SPNIC_MAC_WAIT_HW_UNSYNC) - filter->state = SPNIC_MAC_HW_SYNCED; - } - netif_addr_unlock_bh(nic_dev->netdev); - - /* delete addr if not in netdev list */ - list_for_each_entry_safe(f, ftmp, filter_list, list) { - bool found = false; - - netif_addr_lock_bh(nic_dev->netdev); - netdev_hw_addr_list_for_each(ha, src_list) - if (ether_addr_equal(ha->addr, f->addr)) { - found = true; - break; - } - netif_addr_unlock_bh(nic_dev->netdev); - - if (found) - continue; - - spnic_del_filter(nic_dev, f); - } -} - -static void update_mac_filter(struct spnic_nic_dev *nic_dev) -{ - struct net_device *netdev = nic_dev->netdev; - - if (test_and_clear_bit(SPNIC_UPDATE_MAC_FILTER, &nic_dev->flags)) { - spnic_update_mac_filter(nic_dev, &netdev->uc, &nic_dev->uc_filter_list); - spnic_update_mac_filter(nic_dev, &netdev->mc, &nic_dev->mc_filter_list); - } -} - -static void sync_rx_mode_to_hw(struct spnic_nic_dev *nic_dev, int promisc_en, - int allmulti_en) -{ - struct net_device *netdev = nic_dev->netdev; - u32 rx_mod = SPNIC_DEFAULT_RX_MODE; - int err; - - rx_mod |= (promisc_en ? SPNIC_RX_MODE_PROMISC : 0); - rx_mod |= (allmulti_en ? SPNIC_RX_MODE_MC_ALL : 0); - - if (promisc_en != test_bit(SPNIC_HW_PROMISC_ON, &nic_dev->rx_mod_state)) - nicif_info(nic_dev, drv, netdev, "%s promisc mode\n", - promisc_en ? "Enter" : "Left"); - if (allmulti_en != - test_bit(SPNIC_HW_ALLMULTI_ON, &nic_dev->rx_mod_state)) - nicif_info(nic_dev, drv, netdev, "%s all_multi mode\n", - allmulti_en ? "Enter" : "Left"); - - err = spnic_set_rx_mode(nic_dev->hwdev, rx_mod); - if (err) { - nicif_err(nic_dev, drv, netdev, "Failed to set rx_mode\n"); - return; - } - - promisc_en ? set_bit(SPNIC_HW_PROMISC_ON, &nic_dev->rx_mod_state) : - clear_bit(SPNIC_HW_PROMISC_ON, &nic_dev->rx_mod_state); - - allmulti_en ? set_bit(SPNIC_HW_ALLMULTI_ON, &nic_dev->rx_mod_state) : - clear_bit(SPNIC_HW_ALLMULTI_ON, &nic_dev->rx_mod_state); -} - -void spnic_set_rx_mode_work(struct work_struct *work) -{ - struct spnic_nic_dev *nic_dev = - container_of(work, struct spnic_nic_dev, rx_mode_work); - struct net_device *netdev = nic_dev->netdev; - int promisc_en = 0, allmulti_en = 0; - - update_mac_filter(nic_dev); - - spnic_mac_filter_sync_all(nic_dev); - - if (SPNIC_SUPPORT_PROMISC(nic_dev->hwdev)) - promisc_en = !!(netdev->flags & IFF_PROMISC) || - test_bit(SPNIC_PROMISC_FORCE_ON, &nic_dev->rx_mod_state); - - if (SPNIC_SUPPORT_ALLMULTI(nic_dev->hwdev)) - allmulti_en = !!(netdev->flags & IFF_ALLMULTI) || - test_bit(SPNIC_ALLMULTI_FORCE_ON, &nic_dev->rx_mod_state); - - if (promisc_en != test_bit(SPNIC_HW_PROMISC_ON, &nic_dev->rx_mod_state) || - allmulti_en != test_bit(SPNIC_HW_ALLMULTI_ON, &nic_dev->rx_mod_state)) - sync_rx_mode_to_hw(nic_dev, promisc_en, allmulti_en); -} diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_irq.c b/drivers/net/ethernet/ramaxel/spnic/spnic_irq.c deleted file mode 100644 index 872a94a735905b1b12a51573aa15ec2929caedd0..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/spnic_irq.c +++ /dev/null @@ -1,178 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "sphw_hw.h" -#include "sphw_crm.h" -#include "spnic_nic_io.h" -#include "spnic_nic_dev.h" -#include "spnic_tx.h" -#include "spnic_rx.h" - -int spnic_poll(struct napi_struct *napi, int budget) -{ - struct spnic_irq *irq_cfg = container_of(napi, struct spnic_irq, napi); - struct spnic_nic_dev *nic_dev = netdev_priv(irq_cfg->netdev); - int tx_pkts, rx_pkts; - - rx_pkts = spnic_rx_poll(irq_cfg->rxq, budget); - - tx_pkts = spnic_tx_poll(irq_cfg->txq, budget); - - if (tx_pkts >= budget || rx_pkts >= budget) - return budget; - - napi_complete(napi); - - sphw_set_msix_state(nic_dev->hwdev, irq_cfg->msix_entry_idx, SPHW_MSIX_ENABLE); - - return max(tx_pkts, rx_pkts); -} - -static void qp_add_napi(struct spnic_irq *irq_cfg) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(irq_cfg->netdev); - - netif_napi_add(nic_dev->netdev, &irq_cfg->napi, spnic_poll, nic_dev->poll_weight); - napi_enable(&irq_cfg->napi); -} - -static void qp_del_napi(struct spnic_irq *irq_cfg) -{ - napi_disable(&irq_cfg->napi); - netif_napi_del(&irq_cfg->napi); -} - -static irqreturn_t qp_irq(int irq, void *data) -{ - struct spnic_irq *irq_cfg = (struct spnic_irq *)data; - struct spnic_nic_dev *nic_dev = netdev_priv(irq_cfg->netdev); - - /* 1 is resend_timer */ - sphw_misx_intr_clear_resend_bit(nic_dev->hwdev, irq_cfg->msix_entry_idx, 1); - - napi_schedule(&irq_cfg->napi); - return IRQ_HANDLED; -} - -static int spnic_request_irq(struct spnic_irq *irq_cfg, u16 q_id) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(irq_cfg->netdev); - struct interrupt_info info = {0}; - int err; - - qp_add_napi(irq_cfg); - - info.msix_index = irq_cfg->msix_entry_idx; - info.lli_set = 0; - info.interrupt_coalesc_set = 1; - info.pending_limt = nic_dev->intr_coalesce[q_id].pending_limt; - info.coalesc_timer_cfg = nic_dev->intr_coalesce[q_id].coalesce_timer_cfg; - info.resend_timer_cfg = nic_dev->intr_coalesce[q_id].resend_timer_cfg; - nic_dev->rxqs[q_id].last_coalesc_timer_cfg = - nic_dev->intr_coalesce[q_id].coalesce_timer_cfg; - nic_dev->rxqs[q_id].last_pending_limt = nic_dev->intr_coalesce[q_id].pending_limt; - err = sphw_set_interrupt_cfg(nic_dev->hwdev, info, SPHW_CHANNEL_NIC); - if (err) { - nicif_err(nic_dev, drv, irq_cfg->netdev, - "Failed to set RX interrupt coalescing attribute.\n"); - qp_del_napi(irq_cfg); - return err; - } - - err = request_irq(irq_cfg->irq_id, &qp_irq, 0, irq_cfg->irq_name, irq_cfg); - if (err) { - nicif_err(nic_dev, drv, irq_cfg->netdev, "Failed to request Rx irq\n"); - qp_del_napi(irq_cfg); - return err; - } - - irq_set_affinity_hint(irq_cfg->irq_id, &irq_cfg->affinity_mask); - - return 0; -} - -static void spnic_release_irq(struct spnic_irq *irq_cfg) -{ - irq_set_affinity_hint(irq_cfg->irq_id, NULL); - synchronize_irq(irq_cfg->irq_id); - free_irq(irq_cfg->irq_id, irq_cfg); - qp_del_napi(irq_cfg); -} - -int spnic_qps_irq_init(struct spnic_nic_dev *nic_dev) -{ - struct pci_dev *pdev = nic_dev->pdev; - struct irq_info *qp_irq_info = NULL; - struct spnic_irq *irq_cfg = NULL; - u16 q_id, i; - u32 local_cpu; - int err; - - for (q_id = 0; q_id < nic_dev->q_params.num_qps; q_id++) { - qp_irq_info = &nic_dev->qps_irq_info[q_id]; - irq_cfg = &nic_dev->q_params.irq_cfg[q_id]; - - irq_cfg->irq_id = qp_irq_info->irq_id; - irq_cfg->msix_entry_idx = qp_irq_info->msix_entry_idx; - irq_cfg->netdev = nic_dev->netdev; - irq_cfg->txq = &nic_dev->txqs[q_id]; - irq_cfg->rxq = &nic_dev->rxqs[q_id]; - nic_dev->rxqs[q_id].irq_cfg = irq_cfg; - - local_cpu = cpumask_local_spread(q_id, dev_to_node(&pdev->dev)); - cpumask_set_cpu(local_cpu, &irq_cfg->affinity_mask); - - snprintf(irq_cfg->irq_name, sizeof(irq_cfg->irq_name), - "%s_qp%u", nic_dev->netdev->name, q_id); - - err = spnic_request_irq(irq_cfg, q_id); - if (err) { - nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to request Rx irq\n"); - goto req_tx_irq_err; - } - - sphw_set_msix_auto_mask_state(nic_dev->hwdev, irq_cfg->msix_entry_idx, - SPHW_SET_MSIX_AUTO_MASK); - sphw_set_msix_state(nic_dev->hwdev, irq_cfg->msix_entry_idx, SPHW_MSIX_ENABLE); - } - - INIT_DELAYED_WORK(&nic_dev->moderation_task, spnic_auto_moderation_work); - - return 0; - -req_tx_irq_err: - for (i = 0; i < q_id; i++) { - irq_cfg = &nic_dev->q_params.irq_cfg[i]; - sphw_set_msix_state(nic_dev->hwdev, irq_cfg->msix_entry_idx, SPHW_MSIX_DISABLE); - sphw_set_msix_auto_mask_state(nic_dev->hwdev, irq_cfg->msix_entry_idx, - SPHW_CLR_MSIX_AUTO_MASK); - spnic_release_irq(irq_cfg); - } - - return err; -} - -void spnic_qps_irq_deinit(struct spnic_nic_dev *nic_dev) -{ - struct spnic_irq *irq_cfg = NULL; - u16 q_id; - - for (q_id = 0; q_id < nic_dev->q_params.num_qps; q_id++) { - irq_cfg = &nic_dev->q_params.irq_cfg[q_id]; - sphw_set_msix_state(nic_dev->hwdev, irq_cfg->msix_entry_idx, SPHW_MSIX_DISABLE); - sphw_set_msix_auto_mask_state(nic_dev->hwdev, irq_cfg->msix_entry_idx, - SPHW_CLR_MSIX_AUTO_MASK); - spnic_release_irq(irq_cfg); - } -} diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_lld.c b/drivers/net/ethernet/ramaxel/spnic/spnic_lld.c deleted file mode 100644 index f09a4c186aaea10a0603bf9eecfc90f9bf3a8bfc..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/spnic_lld.c +++ /dev/null @@ -1,937 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "sphw_common.h" -#include "sphw_mt.h" -#include "sphw_crm.h" -#include "spnic_lld.h" -#include "spnic_pci_id_tbl.h" -#include "spnic_sriov.h" -#include "spnic_dev_mgmt.h" -#include "sphw_hw.h" -#include "spnic_nic_dev.h" - -static bool disable_vf_load; -module_param(disable_vf_load, bool, 0444); -MODULE_PARM_DESC(disable_vf_load, "Disable virtual functions probe or not - default is false"); - -static bool disable_attach; -module_param(disable_attach, bool, 0444); -MODULE_PARM_DESC(disable_attach, "disable_attach or not - default is false"); - -#define SPNIC_WAIT_SRIOV_CFG_TIMEOUT 15000 -#define SPNIC_SYNC_YEAR_OFFSET 1900 - -MODULE_AUTHOR("Ramaxel Technologies CO., Ltd"); -MODULE_DESCRIPTION(SPNIC_DRV_DESC); -MODULE_VERSION(SPNIC_DRV_VERSION); -MODULE_LICENSE("GPL"); - -struct spnic_uld_info g_uld_info[SERVICE_T_MAX] = { {0} }; - -#define SPHW_EVENT_PROCESS_TIMEOUT 10000 - -static const char *s_uld_name[SERVICE_T_MAX] = { - "nic", "ovs", "roce", "toe", "ioe", - "fc", "vbs", "ipsec", "virtio", "migrate"}; - -static int attach_uld(struct spnic_pcidev *dev, enum sphw_service_type type, - struct spnic_uld_info *uld_info) -{ - void *uld_dev = NULL; - int err; - - mutex_lock(&dev->pdev_mutex); - - if (dev->uld_dev[type]) { - sdk_err(&dev->pcidev->dev, "%s driver has attached to pcie device\n", - s_uld_name[type]); - err = 0; - goto out_unlock; - } - - err = uld_info->probe(&dev->lld_dev, &uld_dev, dev->uld_dev_name[type]); - if (err || !uld_dev) { - sdk_err(&dev->pcidev->dev, "Failed to add object for %s driver to pcie device\n", - s_uld_name[type]); - goto probe_failed; - } - - dev->uld_dev[type] = uld_dev; - mutex_unlock(&dev->pdev_mutex); - - sdk_info(&dev->pcidev->dev, "Attach %s driver to pcie device succeed\n", s_uld_name[type]); - return 0; - -probe_failed: -out_unlock: - mutex_unlock(&dev->pdev_mutex); - - return err; -} - -static void detach_uld(struct spnic_pcidev *dev, enum sphw_service_type type) -{ - struct spnic_uld_info *uld_info = &g_uld_info[type]; - unsigned long end; - bool timeout = true; - - mutex_lock(&dev->pdev_mutex); - if (!dev->uld_dev[type]) { - mutex_unlock(&dev->pdev_mutex); - return; - } - - end = jiffies + msecs_to_jiffies(SPHW_EVENT_PROCESS_TIMEOUT); - do { - if (!test_and_set_bit(type, &dev->state)) { - timeout = false; - break; - } - usleep_range(900, 1000); - } while (time_before(jiffies, end)); - - if (timeout && !test_and_set_bit(type, &dev->state)) - timeout = false; - - uld_info->remove(&dev->lld_dev, dev->uld_dev[type]); - dev->uld_dev[type] = NULL; - if (!timeout) - clear_bit(type, &dev->state); - - sdk_info(&dev->pcidev->dev, "Detach %s driver from pcie device succeed\n", - s_uld_name[type]); - mutex_unlock(&dev->pdev_mutex); -} - -static void attach_ulds(struct spnic_pcidev *dev) -{ - enum sphw_service_type type; - struct pci_dev *pdev = dev->pcidev; - - for (type = SERVICE_T_NIC; type < SERVICE_T_MAX; type++) { - if (g_uld_info[type].probe) { - if (pdev->is_virtfn && (!spnic_get_vf_service_load(pdev, (u16)type))) { - sdk_info(&pdev->dev, "VF device disable service_type = %d load in host\n", - type); - continue; - } - attach_uld(dev, type, &g_uld_info[type]); - } - } -} - -static void detach_ulds(struct spnic_pcidev *dev) -{ - enum sphw_service_type type; - - for (type = SERVICE_T_MAX - 1; type > SERVICE_T_NIC; type--) { - if (g_uld_info[type].probe) - detach_uld(dev, type); - } - - if (g_uld_info[SERVICE_T_NIC].probe) - detach_uld(dev, SERVICE_T_NIC); -} - -int spnic_register_uld(enum sphw_service_type type, struct spnic_uld_info *uld_info) -{ - struct card_node *chip_node = NULL; - struct spnic_pcidev *dev = NULL; - - if (type >= SERVICE_T_MAX) { - pr_err("Unknown type %d of up layer driver to register\n", type); - return -EINVAL; - } - - if (!uld_info || !uld_info->probe || !uld_info->remove) { - pr_err("Invalid information of %s driver to register\n", s_uld_name[type]); - return -EINVAL; - } - - lld_hold(); - - if (g_uld_info[type].probe) { - pr_err("%s driver has registered\n", s_uld_name[type]); - lld_put(); - return -EINVAL; - } - - memcpy(&g_uld_info[type], uld_info, sizeof(*uld_info)); - list_for_each_entry(chip_node, &g_spnic_chip_list, node) { - list_for_each_entry(dev, &chip_node->func_list, node) { - if (attach_uld(dev, type, uld_info)) { - sdk_err(&dev->pcidev->dev, "Attach %s driver to pcie device failed\n", - s_uld_name[type]); - continue; - } - } - } - - lld_put(); - - pr_info("Register %s driver succeed\n", s_uld_name[type]); - return 0; -} - -void spnic_unregister_uld(enum sphw_service_type type) -{ - struct card_node *chip_node = NULL; - struct spnic_pcidev *dev = NULL; - struct spnic_uld_info *uld_info = NULL; - - if (type >= SERVICE_T_MAX) { - pr_err("Unknown type %d of up layer driver to unregister\n", type); - return; - } - - lld_hold(); - list_for_each_entry(chip_node, &g_spnic_chip_list, node) { - /* detach vf first */ - list_for_each_entry(dev, &chip_node->func_list, node) { - if (sphw_func_type(dev->hwdev) != TYPE_VF) - continue; - - detach_uld(dev, type); - } - - list_for_each_entry(dev, &chip_node->func_list, node) { - if (sphw_func_type(dev->hwdev) == TYPE_VF) - continue; - - detach_uld(dev, type); - } - } - - uld_info = &g_uld_info[type]; - memset(uld_info, 0, sizeof(*uld_info)); - lld_put(); -} - -int spnic_attach_nic(struct spnic_lld_dev *lld_dev) -{ - struct spnic_pcidev *dev = NULL; - - if (!lld_dev) - return -EINVAL; - - dev = container_of(lld_dev, struct spnic_pcidev, lld_dev); - return attach_uld(dev, SERVICE_T_NIC, &g_uld_info[SERVICE_T_NIC]); -} - -void spnic_detach_nic(struct spnic_lld_dev *lld_dev) -{ - struct spnic_pcidev *dev = NULL; - - if (!lld_dev) - return; - - dev = container_of(lld_dev, struct spnic_pcidev, lld_dev); - detach_uld(dev, SERVICE_T_NIC); -} - -static void sphw_sync_time_to_fmw(struct spnic_pcidev *pdev_pri) -{ - struct tm tm = {0}; - u64 tv_msec; - int err; - - tv_msec = ktime_to_ms(ktime_get_real()); - err = sphw_sync_time(pdev_pri->hwdev, tv_msec); - if (err) { - sdk_err(&pdev_pri->pcidev->dev, "Synchronize UTC time to firmware failed, errno:%d.\n", - err); - } else { - time64_to_tm(tv_msec / MSEC_PER_SEC, 0, &tm); - sdk_info(&pdev_pri->pcidev->dev, "Synchronize UTC time to firmware succeed. UTC time %ld-%02d-%02d %02d:%02d:%02d.\n", - tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, tm.tm_hour, - tm.tm_min, tm.tm_sec); - } -} - -static void send_uld_dev_event(struct spnic_pcidev *dev, - struct sphw_event_info *event) -{ - enum sphw_service_type type; - - for (type = SERVICE_T_NIC; type < SERVICE_T_MAX; type++) { - if (test_and_set_bit(type, &dev->state)) { - sdk_warn(&dev->pcidev->dev, "Event: 0x%x can't handler, %s is in detach\n", - event->type, s_uld_name[type]); - continue; - } - - if (g_uld_info[type].event) - g_uld_info[type].event(&dev->lld_dev, dev->uld_dev[type], event); - clear_bit(type, &dev->state); - } -} - -static void send_event_to_dst_pf(struct spnic_pcidev *dev, u16 func_id, - struct sphw_event_info *event) -{ - struct spnic_pcidev *des_dev = NULL; - - lld_hold(); - list_for_each_entry(des_dev, &dev->chip_node->func_list, node) { - if (sphw_func_type(des_dev->hwdev) == TYPE_VF) - continue; - - if (sphw_global_func_id(des_dev->hwdev) == func_id) { - send_uld_dev_event(des_dev, event); - break; - } - } - lld_put(); -} - -void spnic_event_process(void *adapter, struct sphw_event_info *event) -{ - struct spnic_pcidev *dev = adapter; - u16 func_id; - - if (event->type == SPHW_EVENT_FAULT && - event->info.fault_level == FAULT_LEVEL_SERIOUS_FLR && - event->info.event.chip.func_id < sphw_max_pf_num(dev->hwdev)) { - func_id = event->info.event.chip.func_id; - return send_event_to_dst_pf(adapter, func_id, event); - } - - send_uld_dev_event(adapter, event); -} - -#define SPNIC_IS_VF_DEV(pdev) ((pdev)->device == SPNIC_DEV_ID_VF) - -static int mapping_bar(struct pci_dev *pdev, struct spnic_pcidev *pci_adapter) -{ - int cfg_bar; - - cfg_bar = SPNIC_IS_VF_DEV(pdev) ? SPNIC_VF_PCI_CFG_REG_BAR : SPNIC_PF_PCI_CFG_REG_BAR; - - pci_adapter->cfg_reg_base = pci_ioremap_bar(pdev, cfg_bar); - if (!pci_adapter->cfg_reg_base) { - sdk_err(&pdev->dev, "Failed to map configuration regs\n"); - return -ENOMEM; - } - - pci_adapter->intr_reg_base = pci_ioremap_bar(pdev, SPNIC_PCI_INTR_REG_BAR); - if (!pci_adapter->intr_reg_base) { - sdk_err(&pdev->dev, - "Failed to map interrupt regs\n"); - goto map_intr_bar_err; - } - - if (!SPNIC_IS_VF_DEV(pdev)) { - pci_adapter->mgmt_reg_base = pci_ioremap_bar(pdev, SPNIC_PCI_MGMT_REG_BAR); - if (!pci_adapter->mgmt_reg_base) { - sdk_err(&pdev->dev, "Failed to map mgmt regs\n"); - goto map_mgmt_bar_err; - } - } - - pci_adapter->db_base_phy = pci_resource_start(pdev, SPNIC_PCI_DB_BAR); - pci_adapter->db_dwqe_len = pci_resource_len(pdev, SPNIC_PCI_DB_BAR); - pci_adapter->db_base = pci_ioremap_bar(pdev, SPNIC_PCI_DB_BAR); - if (!pci_adapter->db_base) { - sdk_err(&pdev->dev, "Failed to map doorbell regs\n"); - goto map_db_err; - } - - return 0; - -map_db_err: - if (!SPNIC_IS_VF_DEV(pdev)) - iounmap(pci_adapter->mgmt_reg_base); - -map_mgmt_bar_err: - iounmap(pci_adapter->intr_reg_base); - -map_intr_bar_err: - iounmap(pci_adapter->cfg_reg_base); - - return -ENOMEM; -} - -static void unmapping_bar(struct spnic_pcidev *pci_adapter) -{ - iounmap(pci_adapter->db_base); - - if (!SPNIC_IS_VF_DEV(pci_adapter->pcidev)) - iounmap(pci_adapter->mgmt_reg_base); - - iounmap(pci_adapter->intr_reg_base); - iounmap(pci_adapter->cfg_reg_base); -} - -static int spnic_pci_init(struct pci_dev *pdev) -{ - struct spnic_pcidev *pci_adapter = NULL; - int err; - - pci_adapter = kzalloc(sizeof(*pci_adapter), GFP_KERNEL); - if (!pci_adapter) { - sdk_err(&pdev->dev, "Failed to alloc pci device adapter\n"); - return -ENOMEM; - } - pci_adapter->pcidev = pdev; - mutex_init(&pci_adapter->pdev_mutex); - - pci_set_drvdata(pdev, pci_adapter); - - /* to do CONFIG_PCI_IOV */ - - err = pci_enable_device(pdev); - if (err) { - sdk_err(&pdev->dev, "Failed to enable PCI device\n"); - goto pci_enable_err; - } - - err = pci_request_regions(pdev, SPNIC_NIC_DRV_NAME); - if (err) { - sdk_err(&pdev->dev, "Failed to request regions\n"); - goto pci_regions_err; - } - - pci_enable_pcie_error_reporting(pdev); - - pci_set_master(pdev); - - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); - if (err) { - sdk_warn(&pdev->dev, "Couldn't set 64-bit DMA mask\n"); - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); - if (err) { - sdk_err(&pdev->dev, "Failed to set DMA mask\n"); - goto dma_mask_err; - } - } - - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); - if (err) { - sdk_warn(&pdev->dev, "Couldn't set 64-bit coherent DMA mask\n"); - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); - if (err) { - sdk_err(&pdev->dev, "Failed to set coherent DMA mask\n"); - goto dma_consistnet_mask_err; - } - } - - return 0; - -dma_consistnet_mask_err: -dma_mask_err: - pci_clear_master(pdev); - pci_release_regions(pdev); - -pci_regions_err: - pci_disable_device(pdev); - -pci_enable_err: - pci_set_drvdata(pdev, NULL); - kfree(pci_adapter); - - return err; -} - -static void spnic_pci_deinit(struct pci_dev *pdev) -{ - struct spnic_pcidev *pci_adapter = pci_get_drvdata(pdev); - - pci_clear_master(pdev); - pci_release_regions(pdev); - pci_disable_pcie_error_reporting(pdev); - pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); - kfree(pci_adapter); -} - -#ifdef CONFIG_X86 -/** - * cfg_order_reg - when cpu model is haswell or broadwell, should configure dma - * order register to zero - * @pci_adapter: pci_adapter - **/ -static void cfg_order_reg(struct spnic_pcidev *pci_adapter) -{ - u8 cpu_model[] = {0x3c, 0x3f, 0x45, 0x46, 0x3d, 0x47, 0x4f, 0x56}; - struct cpuinfo_x86 *cpuinfo = NULL; - u32 i; - - if (sphw_func_type(pci_adapter->hwdev) == TYPE_VF) - return; - - cpuinfo = &cpu_data(0); - for (i = 0; i < sizeof(cpu_model); i++) { - if (cpu_model[i] == cpuinfo->x86_model) - sphw_set_pcie_order_cfg(pci_adapter->hwdev); - } -} -#endif - -static int spnic_func_init(struct pci_dev *pdev, struct spnic_pcidev *pci_adapter) -{ - struct sphw_init_para init_para = {0}; - int err; - - init_para.adapter_hdl = pci_adapter; - init_para.pcidev_hdl = pdev; - init_para.dev_hdl = &pdev->dev; - init_para.cfg_reg_base = pci_adapter->cfg_reg_base; - init_para.intr_reg_base = pci_adapter->intr_reg_base; - init_para.mgmt_reg_base = pci_adapter->mgmt_reg_base; - init_para.db_base = pci_adapter->db_base; - init_para.db_base_phy = pci_adapter->db_base_phy; - init_para.db_dwqe_len = pci_adapter->db_dwqe_len; - init_para.hwdev = &pci_adapter->hwdev; - init_para.chip_node = pci_adapter->chip_node; - err = sphw_init_hwdev(&init_para); - if (err) { - pci_adapter->hwdev = NULL; - sdk_err(&pdev->dev, "Failed to initialize hardware device\n"); - return -EFAULT; - } - - pci_adapter->lld_dev.pdev = pdev; - pci_adapter->lld_dev.hwdev = pci_adapter->hwdev; - if (sphw_func_type(pci_adapter->hwdev) != TYPE_VF) - set_bit(SPNIC_FUNC_PERSENT, &pci_adapter->sriov_info.state); - - sphw_event_register(pci_adapter->hwdev, pci_adapter, spnic_event_process); - - if (sphw_func_type(pci_adapter->hwdev) != TYPE_VF) - sphw_sync_time_to_fmw(pci_adapter); - - lld_lock_chip_node(); - list_add_tail(&pci_adapter->node, &pci_adapter->chip_node->func_list); - lld_unlock_chip_node(); - - if (!disable_attach) { - attach_ulds(pci_adapter); -#ifdef CONFIG_X86 - cfg_order_reg(pci_adapter); -#endif - } - - sdk_info(&pdev->dev, "Pcie device probed\n"); - - return 0; -} - -static void spnic_func_deinit(struct pci_dev *pdev) -{ - struct spnic_pcidev *pci_adapter = pci_get_drvdata(pdev); - - /* When function deinit, disable mgmt initiative report events firstly, - * then flush mgmt work-queue. - */ - sphw_disable_mgmt_msg_report(pci_adapter->hwdev); - - sphw_flush_mgmt_workq(pci_adapter->hwdev); - - lld_lock_chip_node(); - list_del(&pci_adapter->node); - lld_unlock_chip_node(); - - wait_lld_dev_unused(pci_adapter); - - detach_ulds(pci_adapter); - - sphw_event_unregister(pci_adapter->hwdev); - - sphw_free_hwdev(pci_adapter->hwdev); -} - -static inline void wait_sriov_cfg_complete(struct spnic_pcidev *pci_adapter) -{ - struct spnic_sriov_info *sriov_info; - unsigned long end; - - sriov_info = &pci_adapter->sriov_info; - clear_bit(SPNIC_FUNC_PERSENT, &sriov_info->state); - usleep_range(9900, 10000); - - end = jiffies + msecs_to_jiffies(SPNIC_WAIT_SRIOV_CFG_TIMEOUT); - do { - if (!test_bit(SPNIC_SRIOV_ENABLE, &sriov_info->state) && - !test_bit(SPNIC_SRIOV_DISABLE, &sriov_info->state)) - return; - - usleep_range(9900, 10000); - } while (time_before(jiffies, end)); -} - -bool spnic_get_vf_load_state(struct pci_dev *pdev) -{ - struct spnic_pcidev *pci_adapter = NULL; - struct pci_dev *pf_pdev = NULL; - - if (!pdev) { - pr_err("pdev is null.\n"); - return false; - } - - /* vf used in vm */ - if (pci_is_root_bus(pdev->bus)) - return false; - - if (pdev->is_virtfn) - pf_pdev = pdev->physfn; - else - pf_pdev = pdev; - - pci_adapter = pci_get_drvdata(pf_pdev); - if (!pci_adapter) { - sdk_err(&pdev->dev, "pci_adapter is null.\n"); - return false; - } - - return !pci_adapter->disable_vf_load; -} - -int spnic_set_vf_load_state(struct pci_dev *pdev, bool vf_load_state) -{ - struct spnic_pcidev *pci_adapter = NULL; - - if (!pdev) { - pr_err("pdev is null.\n"); - return -EINVAL; - } - - pci_adapter = pci_get_drvdata(pdev); - if (!pci_adapter) { - sdk_err(&pdev->dev, "pci_adapter is null.\n"); - return -EINVAL; - } - - if (sphw_func_type(pci_adapter->hwdev) == TYPE_VF) - return 0; - - pci_adapter->disable_vf_load = !vf_load_state; - sdk_info(&pci_adapter->pcidev->dev, "Current function %s vf load in host\n", - vf_load_state ? "enable" : "disable"); - - return 0; -} - -bool spnic_get_vf_service_load(struct pci_dev *pdev, u16 service) -{ - struct spnic_pcidev *pci_adapter = NULL; - struct pci_dev *pf_pdev = NULL; - - if (!pdev) { - pr_err("pdev is null.\n"); - return false; - } - - if (pdev->is_virtfn) - pf_pdev = pdev->physfn; - else - pf_pdev = pdev; - - pci_adapter = pci_get_drvdata(pf_pdev); - if (!pci_adapter) { - sdk_err(&pdev->dev, "pci_adapter is null.\n"); - return false; - } - - if (service >= SERVICE_T_MAX) { - sdk_err(&pdev->dev, "service_type = %u state is error\n", - service); - return false; - } - - return !pci_adapter->disable_srv_load[service]; -} - -int spnic_set_vf_service_load(struct pci_dev *pdev, u16 service, bool vf_srv_load) -{ - struct spnic_pcidev *pci_adapter = NULL; - - if (!pdev) { - pr_err("pdev is null.\n"); - return -EINVAL; - } - - if (service >= SERVICE_T_MAX) { - sdk_err(&pdev->dev, "service_type = %u state is error\n", - service); - return -EFAULT; - } - - pci_adapter = pci_get_drvdata(pdev); - if (!pci_adapter) { - sdk_err(&pdev->dev, "pci_adapter is null.\n"); - return -EINVAL; - } - - if (sphw_func_type(pci_adapter->hwdev) == TYPE_VF) - return 0; - - pci_adapter->disable_srv_load[service] = !vf_srv_load; - sdk_info(&pci_adapter->pcidev->dev, "Current function %s vf load in host\n", - vf_srv_load ? "enable" : "disable"); - - return 0; -} - -static int enable_vf_service_state(struct spnic_pcidev *dst_dev, u16 service) -{ - int err; - - err = sphw_get_dev_cap(dst_dev->hwdev); - if (err) { - sdk_err(&dst_dev->pcidev->dev, "Failed to get current device capabilities\n"); - return -EFAULT; - } - return attach_uld(dst_dev, service, &g_uld_info[service]); -} - -int spnic_set_vf_service_state(struct pci_dev *pdev, u16 vf_func_id, u16 service, bool en) -{ - struct spnic_pcidev *dev = NULL; - struct spnic_pcidev *dst_dev = NULL; - int err = -EFAULT; - - if (!pdev) - return -EINVAL; - - dev = pci_get_drvdata(pdev); - if (!dev) - return -EFAULT; - - if (service >= SERVICE_T_MAX) { - sdk_err(&pdev->dev, "Current vf do not supports set service_type = %u state in host\n", - service); - return -EFAULT; - } - - /* find func_idx pci_adapter and disable or enable service */ - lld_hold(); - list_for_each_entry(dst_dev, &dev->chip_node->func_list, node) { - if (sphw_global_func_id(dst_dev->hwdev) != vf_func_id) - continue; - if (en) { - err = enable_vf_service_state(dst_dev, service); - if (err) - sdk_err(&dev->pcidev->dev, "Failed to set functio_id = %u service_type = %u\n", - vf_func_id, service); - } else { - detach_uld(dst_dev, service); - err = 0; - } - break; - } - lld_put(); - - return err; -} - -static void spnic_remove(struct pci_dev *pdev) -{ - struct spnic_pcidev *pci_adapter = pci_get_drvdata(pdev); - - if (!pci_adapter) - return; - - sdk_info(&pdev->dev, "Pcie device remove begin\n"); - - sphw_detect_hw_present(pci_adapter->hwdev); - - if (sphw_func_type(pci_adapter->hwdev) != TYPE_VF) { - wait_sriov_cfg_complete(pci_adapter); - spnic_pci_sriov_disable(pdev); - } - - spnic_func_deinit(pdev); - - lld_lock_chip_node(); - free_chip_node(pci_adapter); - lld_unlock_chip_node(); - - unmapping_bar(pci_adapter); - spnic_pci_deinit(pdev); - - sdk_info(&pdev->dev, "Pcie device removed\n"); -} - -static int spnic_probe(struct pci_dev *pdev, const struct pci_device_id *id) -{ - struct spnic_pcidev *pci_adapter = NULL; - int err; - - sdk_info(&pdev->dev, "Pcie device probe begin\n"); - - if (pdev->is_virtfn && (!spnic_get_vf_load_state(pdev))) { - sdk_info(&pdev->dev, "VF device disable load in host\n"); - return 0; - } - - err = spnic_pci_init(pdev); - if (err) - return err; - - pci_adapter = pci_get_drvdata(pdev); - err = mapping_bar(pdev, pci_adapter); - if (err) { - sdk_err(&pdev->dev, "Failed to map bar\n"); - goto map_bar_failed; - } - - pci_adapter->disable_vf_load = disable_vf_load; - pci_adapter->id = *id; - lld_dev_cnt_init(pci_adapter); - - /* if chip information of pcie function exist, add the function into chip */ - lld_lock_chip_node(); - err = alloc_chip_node(pci_adapter); - if (err) { - lld_unlock_chip_node(); - sdk_err(&pdev->dev, - "Failed to add new chip node to global list\n"); - goto alloc_chip_node_fail; - } - - lld_unlock_chip_node(); - - err = spnic_func_init(pdev, pci_adapter); - if (err) - goto func_init_err; - - if (sphw_func_type(pci_adapter->hwdev) != TYPE_VF) { - err = sphw_set_bdf_ctxt(pci_adapter->hwdev, pdev->bus->number, - PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); - if (err) { - sdk_err(&pdev->dev, "Failed to set BDF info to MPU\n"); - goto set_bdf_err; - } - } - - return 0; - -set_bdf_err: - spnic_func_deinit(pdev); - -func_init_err: - lld_lock_chip_node(); - free_chip_node(pci_adapter); - lld_unlock_chip_node(); - -alloc_chip_node_fail: - unmapping_bar(pci_adapter); - -map_bar_failed: - spnic_pci_deinit(pdev); - - sdk_err(&pdev->dev, "Pcie device probe failed\n"); - return err; -} - -static const struct pci_device_id spnic_pci_table[] = { - {PCI_VDEVICE(RAMAXEL, SPNIC_DEV_ID_PF_STD), 0}, - {PCI_VDEVICE(RAMAXEL, SPNIC_DEV_ID_VF), 0}, - {PCI_VDEVICE(RAMAXEL, SPNIC_DEV_ID_VF_HV), 0}, - {0, 0} -}; - -MODULE_DEVICE_TABLE(pci, spnic_pci_table); - -/** - * spnic_io_error_detected - called when PCI error is detected - * @pdev: Pointer to PCI device - * @state: The current pci connection state - * - * This function is called after a PCI bus error affecting - * this device has been detected. - * - * Since we only need error detecting not error handling, so we - * always return PCI_ERS_RESULT_CAN_RECOVER to tell the AER - * driver that we don't need reset(error handling). - */ -static pci_ers_result_t spnic_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) -{ - struct spnic_pcidev *pci_adapter = NULL; - - sdk_err(&pdev->dev, - "Uncorrectable error detected, log and cleanup error status: 0x%08x\n", - state); - - pci_aer_clear_nonfatal_status(pdev); - pci_adapter = pci_get_drvdata(pdev); - - if (pci_adapter) - sphw_record_pcie_error(pci_adapter->hwdev); - - return PCI_ERS_RESULT_CAN_RECOVER; -} - -static void spnic_shutdown(struct pci_dev *pdev) -{ - struct spnic_pcidev *pci_adapter = pci_get_drvdata(pdev); - - sdk_err(&pdev->dev, "Shutdown device\n"); - - if (pci_adapter) - sphw_shutdown_hwdev(pci_adapter->hwdev); - - pci_disable_device(pdev); -} - -/* Cause we only need error detecting not error handling, so only error_detected - * callback is enough. - */ -static struct pci_error_handlers spnic_err_handler = { - .error_detected = spnic_io_error_detected, -}; - -static struct pci_driver spnic_driver = { - .name = SPNIC_NIC_DRV_NAME, - .id_table = spnic_pci_table, - .probe = spnic_probe, - .remove = spnic_remove, - .shutdown = spnic_shutdown, - .sriov_configure = spnic_pci_sriov_configure, - .err_handler = &spnic_err_handler -}; - -static __init int spnic_lld_init(void) -{ - int err; - - pr_info("%s - version %s\n", SPNIC_DRV_DESC, SPNIC_DRV_VERSION); - memset(g_uld_info, 0, sizeof(g_uld_info)); - - spnic_lld_lock_init(); - - err = spnic_register_uld(SERVICE_T_NIC, &nic_uld_info); - if (err) { - pr_err("Register spnic uld failed\n"); - return err; - } - - return pci_register_driver(&spnic_driver); -} - -static __exit void spnic_lld_exit(void) -{ - pci_unregister_driver(&spnic_driver); - spnic_unregister_uld(SERVICE_T_NIC); -} - -module_init(spnic_lld_init); -module_exit(spnic_lld_exit); diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_lld.h b/drivers/net/ethernet/ramaxel/spnic/spnic_lld.h deleted file mode 100644 index e1864f1b6c5b1b1b794019dfd1b0ddbc314cffbe..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/spnic_lld.h +++ /dev/null @@ -1,75 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPNIC_LLD_H -#define SPNIC_LLD_H - -#include "sphw_crm.h" - -struct spnic_lld_dev { - struct pci_dev *pdev; - void *hwdev; -}; - -struct spnic_uld_info { - /* uld_dev: should not return null even the function capability - * is not support the up layer driver - * uld_dev_name: NIC driver should copy net device name. - * FC driver could copy fc device name. - * other up layer driver don`t need copy anything - */ - int (*probe)(struct spnic_lld_dev *lld_dev, void **uld_dev, - char *uld_dev_name); - void (*remove)(struct spnic_lld_dev *lld_dev, void *uld_dev); - int (*suspend)(struct spnic_lld_dev *lld_dev, void *uld_dev, - pm_message_t state); - int (*resume)(struct spnic_lld_dev *lld_dev, void *uld_dev); - void (*event)(struct spnic_lld_dev *lld_dev, void *uld_dev, - struct sphw_event_info *event); - int (*ioctl)(void *uld_dev, u32 cmd, const void *buf_in, u32 in_size, - void *buf_out, u32 *out_size); -}; - -int spnic_register_uld(enum sphw_service_type type, struct spnic_uld_info *uld_info); - -void spnic_unregister_uld(enum sphw_service_type type); - -void *spnic_get_uld_dev_by_pdev(struct pci_dev *pdev, enum sphw_service_type type); - -void *spnic_get_ppf_uld_by_pdev(struct pci_dev *pdev, enum sphw_service_type type); - -int spnic_get_chip_name_by_hwdev(const void *hwdev, char *ifname); - -void *spnic_get_uld_dev_by_ifname(const char *ifname, enum sphw_service_type type); - -int spnic_get_pf_nic_uld_array(struct pci_dev *pdev, u32 *dev_cnt, void *array[]); - -int spnic_get_chip_up_bitmap(struct pci_dev *pdev, bool *is_setted, u8 *valid_up_bitmap); - -int spnic_set_chip_up_bitmap(struct pci_dev *pdev, u8 valid_up_bitmap); - -bool spnic_get_vf_service_load(struct pci_dev *pdev, u16 service); - -int spnic_set_vf_service_load(struct pci_dev *pdev, u16 service, bool vf_srv_load); - -int spnic_set_vf_service_state(struct pci_dev *pdev, u16 vf_func_id, u16 service, bool en); - -bool spnic_get_vf_load_state(struct pci_dev *pdev); - -int spnic_set_vf_load_state(struct pci_dev *pdev, bool vf_load_state); - -int spnic_attach_nic(struct spnic_lld_dev *lld_dev); - -void spnic_detach_nic(struct spnic_lld_dev *lld_dev); - -void lld_hold(void); -void lld_put(void); -void lld_dev_hold(struct spnic_lld_dev *dev); -void lld_dev_put(struct spnic_lld_dev *dev); -struct spnic_lld_dev *spnic_get_lld_dev_by_ifname(const char *ifname); - -void *spnic_get_ppf_hwdev_by_pdev(struct pci_dev *pdev); - -void spnic_send_event_to_uld(struct pci_dev *pdev, enum sphw_service_type type, - struct sphw_event_info *event); -#endif diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_mag_cfg.c b/drivers/net/ethernet/ramaxel/spnic/spnic_mag_cfg.c deleted file mode 100644 index d0448d1a6bd38c34495786098d276e9effb1bc80..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/spnic_mag_cfg.c +++ /dev/null @@ -1,778 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "sphw_crm.h" -#include "sphw_hw.h" -#include "spnic_mag_cmd.h" -#include "spnic_nic_io.h" -#include "spnic_nic_cfg.h" -#include "spnic_nic.h" -#include "sphw_common.h" - -static int mag_msg_to_mgmt_sync(void *hwdev, u16 cmd, void *buf_in, u16 in_size, - void *buf_out, u16 *out_size); -static int mag_msg_to_mgmt_sync_ch(void *hwdev, u16 cmd, void *buf_in, - u16 in_size, void *buf_out, u16 *out_size, - u16 channel); - -int spnic_set_port_enable(void *hwdev, bool enable, u16 channel) -{ - struct mag_cmd_set_port_enable en_state; - u16 out_size = sizeof(en_state); - struct spnic_nic_cfg *nic_cfg = NULL; - int err; - - if (!hwdev) - return -EINVAL; - - if (sphw_func_type(hwdev) == TYPE_VF) - return 0; - - memset(&en_state, 0, sizeof(en_state)); - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - - en_state.function_id = sphw_global_func_id(hwdev); - en_state.state = enable ? MAG_CMD_TX_ENABLE | MAG_CMD_RX_ENABLE : - MAG_CMD_PORT_DISABLE; - - err = mag_msg_to_mgmt_sync_ch(hwdev, MAG_CMD_SET_PORT_ENABLE, &en_state, - sizeof(en_state), &en_state, &out_size, channel); - if (err || !out_size || en_state.head.status) { - nic_err(nic_cfg->dev_hdl, "Failed to set port state, err: %d, status: 0x%x, out size: 0x%x, channel: 0x%x\n", - err, en_state.head.status, out_size, channel); - return -EIO; - } - - return 0; -} - -int spnic_get_phy_port_stats(void *hwdev, struct mag_cmd_port_stats *stats) -{ - struct mag_cmd_get_port_stat *port_stats = NULL; - struct mag_cmd_port_stats_info stats_info; - u16 out_size = sizeof(*port_stats); - struct spnic_nic_cfg *nic_cfg = NULL; - int err; - - port_stats = kzalloc(sizeof(*port_stats), GFP_KERNEL); - if (!port_stats) - return -ENOMEM; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - memset(&stats_info, 0, sizeof(stats_info)); - stats_info.port_id = sphw_physical_port_id(hwdev); - - err = mag_msg_to_mgmt_sync(hwdev, MAG_CMD_GET_PORT_STAT, - &stats_info, sizeof(stats_info), - port_stats, &out_size); - if (err || !out_size || port_stats->head.status) { - nic_err(nic_cfg->dev_hdl, - "Failed to get port statistics, err: %d, status: 0x%x, out size: 0x%x\n", - err, port_stats->head.status, out_size); - err = -EIO; - goto out; - } - - memcpy(stats, &port_stats->counter, sizeof(*stats)); - -out: - kfree(port_stats); - - return err; -} - -int spnic_set_port_funcs_state(void *hwdev, bool enable) -{ - return 0; -} - -int spnic_reset_port_link_cfg(void *hwdev) -{ - return 0; -} - -int spnic_force_port_relink(void *hwdev) -{ - return 0; -} - -int spnic_set_autoneg(void *hwdev, bool enable) -{ - /* TODO */ - - return 0; -} - -static int spnic_cfg_loopback_mode(struct spnic_nic_cfg *nic_cfg, u8 opcode, u8 *mode, u8 *enable) -{ - struct mag_cmd_cfg_loopback_mode lp; - u16 out_size = sizeof(lp); - int err; - - memset(&lp, 0, sizeof(lp)); - lp.port_id = sphw_physical_port_id(nic_cfg->hwdev); - lp.opcode = opcode; - if (opcode == MGMT_MSG_CMD_OP_SET) { - lp.lp_mode = *mode; - lp.lp_en = *enable; - } - - err = mag_msg_to_mgmt_sync(nic_cfg->hwdev, MAG_CMD_CFG_LOOPBACK_MODE, - &lp, sizeof(lp), &lp, &out_size); - if (err || !out_size || lp.head.status) { - nic_err(nic_cfg->dev_hdl, - "Failed to %s loopback mode, err: %d, status: 0x%x, out size: 0x%x\n", - opcode == MGMT_MSG_CMD_OP_SET ? "set" : "get", - err, lp.head.status, out_size); - return -EIO; - } - - if (opcode == MGMT_MSG_CMD_OP_GET) { - *mode = lp.lp_mode; - *enable = lp.lp_en; - } - - return 0; -} - -int spnic_get_loopback_mode(void *hwdev, u8 *mode, u8 *enable) -{ - struct spnic_nic_cfg *nic_cfg = NULL; - - if (!hwdev || !mode || !enable) - return -EINVAL; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - - return spnic_cfg_loopback_mode(nic_cfg, MGMT_MSG_CMD_OP_GET, mode, enable); -} - -#define LOOP_MODE_MIN 1 -#define LOOP_MODE_MAX 6 -int spnic_set_loopback_mode(void *hwdev, u8 mode, u8 enable) -{ - struct spnic_nic_cfg *nic_cfg = NULL; - - if (!hwdev) - return -EINVAL; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - - if (mode < LOOP_MODE_MIN || mode > LOOP_MODE_MAX) { - nic_err(nic_cfg->dev_hdl, "Invalid loopback mode %u to set\n", - mode); - return -EINVAL; - } - - return spnic_cfg_loopback_mode(nic_cfg, MGMT_MSG_CMD_OP_SET, &mode, &enable); -} - -int spnic_set_led_status(void *hwdev, enum mag_led_type type, enum mag_led_mode mode) -{ - struct spnic_nic_cfg *nic_cfg = NULL; - struct mag_cmd_set_led_cfg led_info; - u16 out_size = sizeof(led_info); - int err; - - if (!hwdev) - return -EFAULT; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - memset(&led_info, 0, sizeof(led_info)); - - led_info.function_id = sphw_global_func_id(hwdev); - led_info.type = type; - led_info.mode = mode; - - err = mag_msg_to_mgmt_sync(hwdev, MAG_CMD_SET_LED_CFG, &led_info, - sizeof(led_info), &led_info, &out_size); - if (err || led_info.head.status || !out_size) { - nic_err(nic_cfg->dev_hdl, "Failed to set led status, err: %d, status: 0x%x, out size: 0x%x\n", - err, led_info.head.status, out_size); - return -EIO; - } - - return 0; -} - -int spnic_get_port_info(void *hwdev, struct nic_port_info *port_info, u16 channel) -{ - struct mag_cmd_get_port_info port_msg; - u16 out_size = sizeof(port_msg); - struct spnic_nic_cfg *nic_cfg = NULL; - int err; - - if (!hwdev || !port_info) - return -EINVAL; - - memset(&port_msg, 0, sizeof(port_msg)); - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - - port_msg.port_id = sphw_physical_port_id(hwdev); - - err = mag_msg_to_mgmt_sync_ch(hwdev, MAG_CMD_GET_PORT_INFO, &port_msg, - sizeof(port_msg), &port_msg, &out_size, - channel); - if (err || !out_size || port_msg.head.status) { - nic_err(nic_cfg->dev_hdl, - "Failed to get port info, err: %d, status: 0x%x, out size: 0x%x, channel: 0x%x\n", - err, port_msg.head.status, out_size, channel); - return -EIO; - } - - port_info->autoneg_cap = port_msg.an_support; - port_info->autoneg_state = port_msg.an_en; - port_info->duplex = port_msg.duplex; - port_info->port_type = port_msg.wire_type; - port_info->speed = port_msg.speed; - port_info->fec = port_msg.fec; - port_info->supported_mode = port_msg.supported_mode; - port_info->advertised_mode = port_msg.advertised_mode; - - return 0; -} - -int spnic_get_speed(void *hwdev, enum mag_cmd_port_speed *speed, u16 channel) -{ - struct nic_port_info port_info = {0}; - int err; - - if (!hwdev || !speed) - return -EINVAL; - - err = spnic_get_port_info(hwdev, &port_info, channel); - if (err) - return err; - - *speed = port_info.speed; - - return 0; -} - -int spnic_set_link_settings(void *hwdev, struct spnic_link_ksettings *settings) -{ - struct mag_cmd_set_port_cfg info; - u16 out_size = sizeof(info); - struct spnic_nic_cfg *nic_cfg = NULL; - int err; - - if (!hwdev || !settings) - return -EINVAL; - - memset(&info, 0, sizeof(info)); - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - - info.port_id = sphw_physical_port_id(hwdev); - info.config_bitmap = settings->valid_bitmap; - info.autoneg = settings->autoneg; - info.speed = settings->speed; - info.fec = settings->fec; - - err = mag_msg_to_mgmt_sync(hwdev, MAG_CMD_SET_PORT_CFG, &info, - sizeof(info), &info, &out_size); - if (err || !out_size || info.head.status) { - nic_err(nic_cfg->dev_hdl, "Failed to set link settings, err: %d, status: 0x%x, out size: 0x%x\n", - err, info.head.status, out_size); - return -EIO; - } - - return info.head.status; -} - -int spnic_get_link_state(void *hwdev, u8 *link_state) -{ - struct mag_cmd_get_link_status get_link; - u16 out_size = sizeof(get_link); - struct spnic_nic_cfg *nic_cfg = NULL; - int err; - - if (!hwdev || !link_state) - return -EINVAL; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - - memset(&get_link, 0, sizeof(get_link)); - get_link.port_id = sphw_physical_port_id(hwdev); - - err = mag_msg_to_mgmt_sync(hwdev, MAG_CMD_GET_LINK_STATUS, &get_link, - sizeof(get_link), &get_link, &out_size); - if (err || !out_size || get_link.head.status) { - nic_err(nic_cfg->dev_hdl, "Failed to get link state, err: %d, status: 0x%x, out size: 0x%x\n", - err, get_link.head.status, out_size); - return -EIO; - } - - *link_state = get_link.status; - - return 0; -} - -void spnic_notify_vf_link_status(struct spnic_nic_cfg *nic_cfg, u16 vf_id, u8 link_status) -{ - struct mag_cmd_get_link_status link; - struct vf_data_storage *vf_infos = nic_cfg->vf_infos; - u16 out_size = sizeof(link); - int err; - - memset(&link, 0, sizeof(link)); - if (vf_infos[HW_VF_ID_TO_OS(vf_id)].registered) { - link.status = link_status; - link.port_id = sphw_physical_port_id(nic_cfg->hwdev); - err = sphw_mbox_to_vf(nic_cfg->hwdev, vf_id, SPHW_MOD_HILINK, - MAG_CMD_GET_LINK_STATUS, &link, sizeof(link), &link, - &out_size, 0, SPHW_CHANNEL_NIC); - if (err || !out_size || link.head.status) - nic_err(nic_cfg->dev_hdl, - "Send link change event to VF %d failed, err: %d, status: 0x%x, out_size: 0x%x\n", - HW_VF_ID_TO_OS(vf_id), err, - link.head.status, out_size); - } -} - -void spnic_notify_all_vfs_link_changed(void *hwdev, u8 link_status) -{ - struct spnic_nic_cfg *nic_cfg = NULL; - u16 i; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - nic_cfg->link_status = link_status; - for (i = 1; i <= nic_cfg->max_vfs; i++) { - if (!nic_cfg->vf_infos[HW_VF_ID_TO_OS(i)].link_forced) - spnic_notify_vf_link_status(nic_cfg, i, link_status); - } -} - -static int spnic_get_vf_link_status_msg_handler(struct spnic_nic_cfg *nic_cfg, u16 vf_id, - void *buf_in, u16 in_size, void *buf_out, - u16 *out_size) -{ - struct vf_data_storage *vf_infos = nic_cfg->vf_infos; - struct mag_cmd_get_link_status *get_link = buf_out; - bool link_forced, link_up; - - link_forced = vf_infos[HW_VF_ID_TO_OS(vf_id)].link_forced; - link_up = vf_infos[HW_VF_ID_TO_OS(vf_id)].link_up; - - if (link_forced) - get_link->status = link_up ? SPNIC_LINK_UP : SPNIC_LINK_DOWN; - else - get_link->status = nic_cfg->link_status; - - get_link->head.status = 0; - *out_size = sizeof(*get_link); - - return 0; -} - -int spnic_refresh_nic_cfg(void *hwdev, struct nic_port_info *port_info) -{ - /*TO DO */ - return 0; -} - -static void get_port_info(void *hwdev, struct mag_cmd_get_link_status *link_status, - struct sphw_event_link_info *link_info) -{ - struct nic_port_info port_info = {0}; - struct spnic_nic_cfg *nic_cfg = NULL; - int err; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - if (sphw_func_type(hwdev) != TYPE_VF && - link_status->status == SPHW_EVENT_LINK_UP) { - err = spnic_get_port_info(hwdev, &port_info, SPHW_CHANNEL_NIC); - if (err) { - nic_warn(nic_cfg->dev_hdl, "Failed to get port info\n"); - } else { - link_info->valid = 1; - link_info->port_type = port_info.port_type; - link_info->autoneg_cap = port_info.autoneg_cap; - link_info->autoneg_state = port_info.autoneg_state; - link_info->duplex = port_info.duplex; - link_info->speed = port_info.speed; - spnic_refresh_nic_cfg(hwdev, &port_info); - } - } -} - -static void link_status_event_handler(void *hwdev, void *buf_in, - u16 in_size, void *buf_out, u16 *out_size) -{ - struct mag_cmd_get_link_status *link_status = NULL; - struct mag_cmd_get_link_status *ret_link_status = NULL; - struct sphw_event_info event_info = {0}; - struct sphw_event_link_info *link_info = &event_info.link_info; - struct spnic_nic_cfg *nic_cfg = NULL; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - - link_status = buf_in; - sdk_info(nic_cfg->dev_hdl, "Link status report received, func_id: %u, status: %u\n", - sphw_global_func_id(hwdev), link_status->status); - - sphw_link_event_stats(hwdev, link_status->status); - - /* link event reported only after set vport enable */ - get_port_info(hwdev, link_status, link_info); - - event_info.type = link_status->status ? SPHW_EVENT_LINK_UP : SPHW_EVENT_LINK_DOWN; - - sphw_event_callback(hwdev, &event_info); - - if (sphw_func_type(hwdev) != TYPE_VF) { - spnic_notify_all_vfs_link_changed(hwdev, link_status->status); - ret_link_status = buf_out; - ret_link_status->head.status = 0; - *out_size = sizeof(*ret_link_status); - } -} - -static void cable_plug_event(void *hwdev, void *buf_in, u16 in_size, void *buf_out, u16 *out_size) -{ - struct mag_cmd_wire_event *plug_event = buf_in; - struct spnic_port_routine_cmd *rt_cmd = NULL; - struct spnic_nic_cfg *nic_cfg = NULL; - struct sphw_event_info event_info; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - rt_cmd = &nic_cfg->rt_cmd; - - mutex_lock(&nic_cfg->sfp_mutex); - rt_cmd->mpu_send_sfp_abs = false; - rt_cmd->mpu_send_sfp_info = false; - mutex_unlock(&nic_cfg->sfp_mutex); - - memset(&event_info, 0, sizeof(event_info)); - event_info.type = SPHW_EVENT_PORT_MODULE_EVENT; - event_info.module_event.type = plug_event->status ? - SPHW_PORT_MODULE_CABLE_PLUGGED : - SPHW_PORT_MODULE_CABLE_UNPLUGGED; - - *out_size = sizeof(*plug_event); - plug_event = buf_out; - plug_event->head.status = 0; - - sphw_event_callback(hwdev, &event_info); -} - -static void port_sfp_info_event(void *hwdev, void *buf_in, u16 in_size, - void *buf_out, u16 *out_size) -{ - struct mag_cmd_get_xsfp_info *sfp_info = buf_in; - struct spnic_port_routine_cmd *rt_cmd = NULL; - struct spnic_nic_cfg *nic_cfg = NULL; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - if (in_size != sizeof(*sfp_info)) { - sdk_err(nic_cfg->dev_hdl, "Invalid sfp info cmd, length: %u, should be %ld\n", - in_size, sizeof(*sfp_info)); - return; - } - - rt_cmd = &nic_cfg->rt_cmd; - mutex_lock(&nic_cfg->sfp_mutex); - memcpy(&rt_cmd->std_sfp_info, sfp_info, - sizeof(struct mag_cmd_get_xsfp_info)); - rt_cmd->mpu_send_sfp_info = true; - mutex_unlock(&nic_cfg->sfp_mutex); -} - -static void port_sfp_abs_event(void *hwdev, void *buf_in, u16 in_size, - void *buf_out, u16 *out_size) -{ - struct mag_cmd_get_xsfp_present *sfp_abs = buf_in; - struct spnic_port_routine_cmd *rt_cmd = NULL; - struct spnic_nic_cfg *nic_cfg = NULL; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - if (in_size != sizeof(*sfp_abs)) { - sdk_err(nic_cfg->dev_hdl, "Invalid sfp absent cmd, length: %u, should be %ld\n", - in_size, sizeof(*sfp_abs)); - return; - } - - rt_cmd = &nic_cfg->rt_cmd; - mutex_lock(&nic_cfg->sfp_mutex); - memcpy(&rt_cmd->abs, sfp_abs, sizeof(struct mag_cmd_get_xsfp_present)); - rt_cmd->mpu_send_sfp_abs = true; - mutex_unlock(&nic_cfg->sfp_mutex); -} - -static bool spnic_if_sfp_absent(void *hwdev) -{ - struct spnic_nic_cfg *nic_cfg = NULL; - struct spnic_port_routine_cmd *rt_cmd = NULL; - struct mag_cmd_get_xsfp_present sfp_abs; - u8 port_id = sphw_physical_port_id(hwdev); - u16 out_size = sizeof(sfp_abs); - int err; - bool sfp_abs_status; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - memset(&sfp_abs, 0, sizeof(sfp_abs)); - - rt_cmd = &nic_cfg->rt_cmd; - mutex_lock(&nic_cfg->sfp_mutex); - if (rt_cmd->mpu_send_sfp_abs) { - if (rt_cmd->abs.head.status) { - mutex_unlock(&nic_cfg->sfp_mutex); - return true; - } - - sfp_abs_status = (bool)rt_cmd->abs.abs_status; - mutex_unlock(&nic_cfg->sfp_mutex); - return sfp_abs_status; - } - mutex_unlock(&nic_cfg->sfp_mutex); - - sfp_abs.port_id = port_id; - err = mag_msg_to_mgmt_sync(hwdev, MAG_CMD_GET_XSFP_PRESENT, - &sfp_abs, sizeof(sfp_abs), &sfp_abs, - &out_size); - if (sfp_abs.head.status || err || !out_size) { - nic_err(nic_cfg->dev_hdl, - "Failed to get port%u sfp absent status, err: %d, status: 0x%x, out size: 0x%x\n", - port_id, err, sfp_abs.head.status, out_size); - return true; - } - - return (sfp_abs.abs_status == 0 ? false : true); -} - -int spnic_get_sfp_eeprom(void *hwdev, u8 *data, u32 len) -{ - struct spnic_nic_cfg *nic_cfg = NULL; - struct spnic_port_routine_cmd *rt_cmd = NULL; - struct mag_cmd_get_xsfp_info sfp_info; - u16 out_size = sizeof(sfp_info); - int err; - - if (!hwdev || !data) - return -EINVAL; - - if (spnic_if_sfp_absent(hwdev)) - return -ENXIO; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - memset(&sfp_info, 0, sizeof(sfp_info)); - - rt_cmd = &nic_cfg->rt_cmd; - mutex_lock(&nic_cfg->sfp_mutex); - if (rt_cmd->mpu_send_sfp_info) { - if (rt_cmd->std_sfp_info.head.status) { - mutex_unlock(&nic_cfg->sfp_mutex); - return -EIO; - } - - memcpy(data, rt_cmd->std_sfp_info.sfp_info, len); - mutex_unlock(&nic_cfg->sfp_mutex); - return 0; - } - mutex_unlock(&nic_cfg->sfp_mutex); - - sfp_info.port_id = sphw_physical_port_id(hwdev); - err = mag_msg_to_mgmt_sync(hwdev, MAG_CMD_GET_XSFP_INFO, &sfp_info, - sizeof(sfp_info), &sfp_info, &out_size); - if (sfp_info.head.status || err || !out_size) { - nic_err(nic_cfg->dev_hdl, - "Failed to get port%u sfp eeprom information, err: %d, status: 0x%x, out size: 0x%x\n", - sphw_physical_port_id(hwdev), err, - sfp_info.head.status, out_size); - return -EIO; - } - - memcpy(data, sfp_info.sfp_info, len); - - return 0; -} - -int spnic_get_sfp_type(void *hwdev, u8 *sfp_type, u8 *sfp_type_ext) -{ - struct spnic_nic_cfg *nic_cfg = NULL; - struct spnic_port_routine_cmd *rt_cmd = NULL; - u8 sfp_data[STD_SFP_INFO_MAX_SIZE]; - int err; - - if (!hwdev || !sfp_type || !sfp_type_ext) - return -EINVAL; - - if (spnic_if_sfp_absent(hwdev)) - return -ENXIO; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - rt_cmd = &nic_cfg->rt_cmd; - - mutex_lock(&nic_cfg->sfp_mutex); - if (rt_cmd->mpu_send_sfp_info) { - if (rt_cmd->std_sfp_info.head.status) { - mutex_unlock(&nic_cfg->sfp_mutex); - return -EIO; - } - - *sfp_type = rt_cmd->std_sfp_info.sfp_info[0]; - *sfp_type_ext = rt_cmd->std_sfp_info.sfp_info[1]; - mutex_unlock(&nic_cfg->sfp_mutex); - return 0; - } - mutex_unlock(&nic_cfg->sfp_mutex); - - err = spnic_get_sfp_eeprom(hwdev, (u8 *)sfp_data, STD_SFP_INFO_MAX_SIZE); - if (err) - return err; - - *sfp_type = sfp_data[0]; - *sfp_type_ext = sfp_data[1]; - - return 0; -} - -static const struct vf_msg_handler vf_mag_cmd_handler[] = { - { - .cmd = MAG_CMD_GET_LINK_STATUS, - .handler = spnic_get_vf_link_status_msg_handler, - }, -}; - -/* pf/ppf handler mbox msg from vf */ -int spnic_pf_mag_mbox_handler(void *hwdev, void *pri_handle, u16 vf_id, - u16 cmd, void *buf_in, u16 in_size, - void *buf_out, u16 *out_size) -{ - u32 index, cmd_size = ARRAY_LEN(vf_mag_cmd_handler); - struct spnic_nic_cfg *nic_cfg = NULL; - const struct vf_msg_handler *handler = NULL; - - if (!hwdev) - return -EFAULT; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - - for (index = 0; index < cmd_size; index++) { - handler = &vf_mag_cmd_handler[index]; - if (cmd == handler->cmd) - return handler->handler(nic_cfg, vf_id, buf_in, in_size, - buf_out, out_size); - } - - nic_warn(nic_cfg->dev_hdl, "NO handler for mag cmd: %u received from vf id: %u\n", - cmd, vf_id); - - return -EINVAL; -} - -static struct nic_event_handler mag_cmd_handler[] = { - { - .cmd = MAG_CMD_GET_LINK_STATUS, - .handler = link_status_event_handler, - }, - - { - .cmd = MAG_CMD_WIRE_EVENT, - .handler = cable_plug_event, - }, - - { - .cmd = MAG_CMD_GET_XSFP_INFO, - .handler = port_sfp_info_event, - }, - - { - .cmd = MAG_CMD_GET_XSFP_PRESENT, - .handler = port_sfp_abs_event, - }, -}; - -int spnic_mag_event_handler(void *hwdev, void *pri_handle, u16 cmd, void *buf_in, u16 in_size, - void *buf_out, u16 *out_size) -{ - struct spnic_nic_cfg *nic_cfg = NULL; - u32 size = ARRAY_LEN(mag_cmd_handler); - u32 i; - - if (!hwdev) - return -EINVAL; - - *out_size = 0; - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - for (i = 0; i < size; i++) { - if (cmd == mag_cmd_handler[i].cmd) { - mag_cmd_handler[i].handler(hwdev, buf_in, in_size, - buf_out, out_size); - break; - } - } - - /* can't find this event cmd */ - if (i == size) - sdk_warn(nic_cfg->dev_hdl, "Unsupported mag event, cmd: %u\n", - cmd); - - return 0; -} - -int spnic_vf_mag_event_handler(void *hwdev, void *pri_handle, u16 cmd, void *buf_in, u16 in_size, - void *buf_out, u16 *out_size) -{ - return spnic_mag_event_handler(hwdev, pri_handle, cmd, buf_in, in_size, buf_out, out_size); -} - -/* pf/ppf handler mgmt cpu report hilink event*/ -void spnic_pf_mag_event_handler(void *hwdev, void *pri_handle, u16 cmd, - void *buf_in, u16 in_size, void *buf_out, u16 *out_size) -{ - spnic_mag_event_handler(hwdev, pri_handle, cmd, buf_in, in_size, buf_out, out_size); -} - -static int _mag_msg_to_mgmt_sync(void *hwdev, u16 cmd, void *buf_in, u16 in_size, - void *buf_out, u16 *out_size, u16 channel) -{ - u32 i, cmd_cnt = ARRAY_LEN(vf_mag_cmd_handler); - bool cmd_to_pf = false; - - if (sphw_func_type(hwdev) == TYPE_VF) { - for (i = 0; i < cmd_cnt; i++) { - if (cmd == vf_mag_cmd_handler[i].cmd) { - cmd_to_pf = true; - break; - } - } - } - - if (cmd_to_pf) - return sphw_mbox_to_pf(hwdev, SPHW_MOD_HILINK, cmd, buf_in, in_size, buf_out, - out_size, 0, channel); - - return sphw_msg_to_mgmt_sync(hwdev, SPHW_MOD_HILINK, cmd, buf_in, - in_size, buf_out, out_size, 0, channel); -} - -static int mag_msg_to_mgmt_sync(void *hwdev, u16 cmd, void *buf_in, u16 in_size, - void *buf_out, u16 *out_size) -{ - return _mag_msg_to_mgmt_sync(hwdev, cmd, buf_in, in_size, buf_out, - out_size, SPHW_CHANNEL_NIC); -} - -static int mag_msg_to_mgmt_sync_ch(void *hwdev, u16 cmd, void *buf_in, - u16 in_size, void *buf_out, u16 *out_size, - u16 channel) -{ - return _mag_msg_to_mgmt_sync(hwdev, cmd, buf_in, in_size, buf_out, - out_size, channel); -} diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_mag_cmd.h b/drivers/net/ethernet/ramaxel/spnic/spnic_mag_cmd.h deleted file mode 100644 index 4e65b7af115b6ebdbfc90cde4292d5575d891702..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/spnic_mag_cmd.h +++ /dev/null @@ -1,643 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPNIC_MAG_CMD_H -#define SPNIC_MAG_CMD_H - -#include "sphw_mgmt_msg_base.h" - -enum mag_cmd { - SERDES_CMD_PROCESS = 0, - - MAG_CMD_SET_PORT_CFG = 1, - MAG_CMD_SET_PORT_ADAPT = 2, - MAG_CMD_CFG_LOOPBACK_MODE = 3, - - MAG_CMD_GET_PORT_ENABLE = 5, - MAG_CMD_SET_PORT_ENABLE = 6, - MAG_CMD_GET_LINK_STATUS = 7, - MAG_CMD_SET_LINK_FOLLOW = 8, - MAG_CMD_SET_PMA_ENABLE = 9, - MAG_CMD_CFG_FEC_MODE = 10, - - MAG_CMD_CFG_AN_TYPE = 12, /* reserved for furture use */ - - MAG_CMD_SET_PANGEA_ADAPT = 15, - - MAG_CMD_CFG_BIOS_LINK_CFG = 31, - MAG_CMD_RESTORE_LINK_CFG = 32, - MAG_CMD_ACTIVATE_BIOS_LINK_CFG = 33, - - /* LED */ - MAG_CMD_SET_LED_CFG = 50, - - /* PHY */ - MAG_CMD_GET_PHY_INIT_STATUS = 55, /* reserved for furture use */ - - MAG_CMD_GET_XSFP_INFO = 60, - MAG_CMD_SET_XSFP_ENABLE = 61, - MAG_CMD_GET_XSFP_PRESENT = 62, - MAG_CMD_SET_XSFP_RW = 63, - MAG_CMD_CFG_XSFP_TEMPERATURE = 64, - - MAG_CMD_WIRE_EVENT = 100, - MAG_CMD_LINK_ERR_EVENT = 101, - - MAG_CMD_EVENT_PORT_INFO = 150, - MAG_CMD_GET_PORT_STAT = 151, - MAG_CMD_CLR_PORT_STAT = 152, - MAG_CMD_GET_PORT_INFO = 153, - MAG_CMD_GET_PCS_ERR_CNT = 154, - MAG_CMD_GET_MAG_CNT = 155, - MAG_CMD_DUMP_ANTRAIN_INFO = 156, - - MAG_CMD_MAX = 0xFF, -}; - -enum mag_cmd_port_speed { - PORT_SPEED_NOT_SET = 0, - PORT_SPEED_10MB = 1, - PORT_SPEED_100MB = 2, - PORT_SPEED_1GB = 3, - PORT_SPEED_10GB = 4, - PORT_SPEED_25GB = 5, - PORT_SPEED_40GB = 6, - PORT_SPEED_50GB = 7, - PORT_SPEED_100GB = 8, - PORT_SPEED_200GB = 9, - PORT_SPEED_UNKNOWN -}; - -enum mag_cmd_port_an { - PORT_AN_NOT_SET = 0, - PORT_CFG_AN_ON = 1, - PORT_CFG_AN_OFF = 2 -}; - -enum mag_cmd_port_fec { - PORT_FEC_NOT_SET = 0, - PORT_FEC_RSFEC = 1, - PORT_FEC_BASEFEC = 2, - PORT_FEC_NOFEC = 3, - PORT_FEC_LLRSFEC = 4 -}; - -enum mag_cmd_port_lanes { - PORT_LANES_NOT_SET = 0, - PORT_LANES_X1 = 1, - PORT_LANES_X2 = 2, - PORT_LANES_X4 = 4, - PORT_LANES_X8 = 8, /* reserved for future use */ -}; - -enum mag_cmd_port_duplex { - PORT_DUPLEX_HALF = 0, - PORT_DUPLEX_FULL = 1 -}; - -/* mag_cmd_set_port_cfg config bitmap */ -#define MAG_CMD_SET_SPEED 0x1 -#define MAG_CMD_SET_AUTONEG 0x2 -#define MAG_CMD_SET_FEC 0x4 -#define MAG_CMD_SET_LANES 0x8 -struct mag_cmd_set_port_cfg { - struct mgmt_msg_head head; - - u8 port_id; - u8 rsvd0[3]; - - u32 config_bitmap; - u8 speed; - u8 autoneg; - u8 fec; - u8 lanes; - u8 rsvd1[20]; -}; - -/* mag supported/advertised link mode bitmap */ -enum mag_cmd_link_mode { - LINK_MODE_GE = 0, - LINK_MODE_10GE_BASE_R = 1, - LINK_MODE_25GE_BASE_R = 2, - LINK_MODE_40GE_BASE_R4 = 3, - LINK_MODE_50GE_BASE_R = 4, - LINK_MODE_50GE_BASE_R2 = 5, - LINK_MODE_100GE_BASE_R = 6, - LINK_MODE_100GE_BASE_R2 = 7, - LINK_MODE_100GE_BASE_R4 = 8, - LINK_MODE_200GE_BASE_R2 = 9, - LINK_MODE_200GE_BASE_R4 = 10, - LINK_MODE_MAX_NUMBERS, - - LINK_MODE_UNKNOWN = 0xFFFF -}; - -struct mag_cmd_get_port_info { - struct mgmt_msg_head head; - - u8 port_id; - u8 rsvd0[3]; - - u8 wire_type; - u8 an_support; - u8 an_en; - u8 duplex; - - u8 speed; - u8 fec; - u8 lanes; - u8 rsvd1; - - u32 supported_mode; - u32 advertised_mode; - u8 rsvd2[8]; -}; - -#define MAG_CMD_OPCODE_GET 0 -#define MAG_CMD_OPCODE_SET 1 -struct mag_cmd_set_port_adapt { - struct mgmt_msg_head head; - - u8 port_id; - u8 opcode; /* 0:get adapt info 1:set adapt */ - u8 enable; - u8 rsvd0; - u32 speed_mode; - u32 rsvd1[3]; -}; - -#define MAG_CMD_LP_MODE_SDS_S_TX2RX 1 -#define MAG_CMD_LP_MODE_SDS_P_RX2TX 2 -#define MAG_CMD_LP_MODE_SDS_P_TX2RX 3 -#define MAG_CMD_LP_MODE_MAC_RX2TX 4 -#define MAG_CMD_LP_MODE_MAC_TX2RX 5 -#define MAG_CMD_LP_MODE_TXDP2RXDP 6 -struct mag_cmd_cfg_loopback_mode { - struct mgmt_msg_head head; - - u8 port_id; - u8 opcode; /* 0:get loopback mode 1:set loopback mode */ - u8 lp_mode; - u8 lp_en; /* 0:disable 1:enable */ - - u32 rsvd0[2]; -}; - -#define MAG_CMD_PORT_DISABLE 0x0 -#define MAG_CMD_TX_ENABLE 0x1 -#define MAG_CMD_RX_ENABLE 0x2 - -struct mag_cmd_set_port_enable { - struct mgmt_msg_head head; - - u16 function_id; /* function_id should not more than the max support pf_id(32) */ - u16 rsvd0; - - u8 state; /* bitmap bit0:tx_en bit1:rx_en */ - u8 rsvd1[3]; -}; - -struct mag_cmd_get_port_enable { - struct mgmt_msg_head head; - - u8 port; - u8 state; /* bitmap bit0:tx_en bit1:rx_en */ - u8 rsvd0[2]; -}; - -#define PMA_FOLLOW_DEFAULT 0x0 -#define PMA_FOLLOW_ENABLE 0x1 -#define PMA_FOLLOW_DISABLE 0x2 -/* the physical port disable link follow only when all pf of the port are set to follow disable */ -struct mag_cmd_set_link_follow { - struct mgmt_msg_head head; - - u16 function_id; /* function_id should not more than the max support pf_id(32) */ - u16 rsvd0; - - u8 follow; - u8 rsvd1[3]; -}; - -/* firmware also use this cmd report link event to driver */ -struct mag_cmd_get_link_status { - struct mgmt_msg_head head; - - u8 port_id; - u8 status; /* 0:link down 1:link up */ - u8 rsvd0[2]; -}; - -struct mag_cmd_set_pma_enable { - struct mgmt_msg_head head; - - u16 function_id; /* function_id should not more than the max support pf_id(32) */ - u16 enable; -}; - -struct mag_cmd_cfg_an_type { - struct mgmt_msg_head head; - - u8 port_id; - u8 opcode; /* 0:get an type 1:set an type */ - u8 rsvd0[2]; - - u32 an_type; /* 0:ieee 1:25G/50 eth consortium */ -}; - -struct mag_cmd_cfg_fec_mode { - struct mgmt_msg_head head; - - u8 port_id; - u8 opcode; /* 0:get fec mode 1:set fec mode */ - u8 fec; - u8 rsvd0; -}; - -struct mag_cmd_cfg_bios_link_cfg { - struct mgmt_msg_head head; - - u8 port_id; - u8 opcode; /* 0:get bios link info 1:set bios link cfg */ - u8 clear; - u8 rsvd0; - - u32 wire_type; - u8 an_en; - u8 speed; - u8 fec; - u8 rsvd1; - u32 speed_mode; - u32 rsvd2[3]; -}; - -struct mag_cmd_restore_link_cfg { - struct mgmt_msg_head head; - - u8 port_id; - u8 rsvd[7]; -}; - -struct mag_cmd_activate_bios_link_cfg { - struct mgmt_msg_head head; - - u32 rsvd[8]; -}; - -/* led type */ -enum mag_led_type { - MAG_CMD_LED_TYPE_ALARM = 0x0, - MAG_CMD_LED_TYPE_LOW_SPEED = 0x1, - MAG_CMD_LED_TYPE_HIGH_SPEED = 0x2 -}; - -/* led mode */ -enum mag_led_mode { - MAG_CMD_LED_MODE_DEFAULT = 0x0, - MAG_CMD_LED_MODE_FORCE_ON = 0x1, - MAG_CMD_LED_MODE_FORCE_OFF = 0x2, - MAG_CMD_LED_MODE_FORCE_BLINK_1HZ = 0x3, - MAG_CMD_LED_MODE_FORCE_BLINK_2HZ = 0x4, - MAG_CMD_LED_MODE_FORCE_BLINK_4HZ = 0x5, - MAG_CMD_LED_MODE_1HZ = 0x6, - MAG_CMD_LED_MODE_2HZ = 0x7, - MAG_CMD_LED_MODE_4HZ = 0x8, -}; - -/* the led is report alarm when any pf of the port is alram */ -struct mag_cmd_set_led_cfg { - struct mgmt_msg_head head; - - u16 function_id; - u8 type; - u8 mode; -}; - -#define XSFP_INFO_MAX_SIZE 640 -/* xsfp wire type, refer to cmis protocol definition */ -enum mag_wire_type { - MAG_CMD_WIRE_TYPE_UNKNOWN = 0x0, - MAG_CMD_WIRE_TYPE_MM = 0x1, - MAG_CMD_WIRE_TYPE_SM = 0x2, - MAG_CMD_WIRE_TYPE_COPPER = 0x3, - MAG_CMD_WIRE_TYPE_ACC = 0x4, - MAG_CMD_WIRE_TYPE_BASET = 0x5, - MAG_CMD_WIRE_TYPE_AOC = 0x40, - MAG_CMD_WIRE_TYPE_ELECTRIC = 0x41, - MAG_CMD_WIRE_TYPE_BACKPLANE = 0x42 -}; - -struct mag_cmd_get_xsfp_info { - struct mgmt_msg_head head; - - u8 port_id; - u8 wire_type; - u16 out_len; - u32 rsvd; - u8 sfp_info[XSFP_INFO_MAX_SIZE]; -}; - -#define MAG_CMD_XSFP_DISABLE 0x0 -#define MAG_CMD_XSFP_ENABLE 0x1 - -struct mag_cmd_set_xsfp_enable { - struct mgmt_msg_head head; - - u16 function_id; /* function_id should not more than the max support pf_id(32) */ - u16 rsvd0; - - u8 status; - u8 rsvd1[3]; -}; - -#define MAG_CMD_XSFP_PRESENT 0x0 -#define MAG_CMD_XSFP_ABSENT 0x1 -struct mag_cmd_get_xsfp_present { - struct mgmt_msg_head head; - - u8 port_id; - u8 abs_status; /* 0:present, 1:absent */ - u8 rsvd[2]; -}; - -#define MAG_CMD_XSFP_READ 0x0 -#define MAG_CMD_XSFP_WRITE 0x1 -struct mag_cmd_set_xsfp_rw { - struct mgmt_msg_head head; - - u8 port_id; - u8 operation; /* 0: read; 1: write */ - u8 value; - u8 rsvd0; - u32 devaddr; - u32 offset; - u32 rsvd1; -}; - -struct mag_cmd_cfg_xsfp_temperature { - struct mgmt_msg_head head; - - u8 opcode; /* 0:read 1:write */ - u8 rsvd0[3]; - s32 max_temp; - s32 min_temp; -}; - -struct mag_cmd_get_xsfp_temperature { - struct mgmt_msg_head head; - - s16 sfp_temp[8]; - u8 rsvd[32]; - s32 max_temp; - s32 min_temp; -}; - -/* xsfp plug event */ -struct mag_cmd_wire_event { - struct mgmt_msg_head head; - - u8 port_id; - u8 status; /* 0:present, 1:absent */ - u8 rsvd[2]; -}; - -/* link err type definition */ -#define MAG_CMD_ERR_XSFP_UNKNOWN 0x0 -struct mag_cmd_link_err_event { - struct mgmt_msg_head head; - - u8 port_id; - u8 link_err_type; - u8 rsvd[2]; -}; - -#define MAG_PARAM_TYPE_DEFAULT_CFG 0x0 -#define MAG_PARAM_TYPE_BIOS_CFG 0x1 -#define MAG_PARAM_TYPE_TOOL_CFG 0x2 -#define MAG_PARAM_TYPE_FINAL_CFG 0x3 -#define MAG_PARAM_TYPE_WIRE_INFO 0x4 -#define MAG_PARAM_TYPE_ADAPT_INFO 0x5 -#define MAG_PARAM_TYPE_MAX_CNT 0x6 -struct param_head { - u8 valid_len; - u8 info_type; - u8 rsvd[2]; -}; - -struct mag_port_link_param { - struct param_head head; - - u8 an; - u8 fec; - u8 speed; - u8 rsvd0; - - u32 used; - u32 an_fec_ability; - u32 an_speed_ability; - u32 an_pause_ability; -}; - -struct mag_port_wire_info { - struct param_head head; - - u8 status; - u8 rsvd0[3]; - - u8 wire_type; - u8 default_fec; - u8 speed; - u8 rsvd1; - u32 speed_ability; -}; - -struct mag_port_adapt_info { - struct param_head head; - - u32 adapt_en; - u32 flash_adapt; - u32 rsvd0[2]; - - u32 wire_node; - u32 an_en; - u32 speed; - u32 fec; -}; - -struct mag_port_param_info { - u8 parameter_cnt; - u8 lane_id; - u8 lane_num; - u8 rsvd0; - - struct mag_port_link_param default_cfg; - struct mag_port_link_param bios_cfg; - struct mag_port_link_param tool_cfg; - struct mag_port_link_param final_cfg; - - struct mag_port_wire_info wire_info; - struct mag_port_adapt_info adapt_info; -}; - -struct mag_cmd_event_port_info { - struct mgmt_msg_head head; - - u8 port_id; - u8 event_type; - u8 rsvd0[2]; - - u8 vendor_name[16]; - u32 port_type; - u32 port_sub_type; - u32 cable_length; - u8 cable_temp; - u8 max_speed; - u8 sfp_type; - u8 rsvd1; - u32 power[4]; - - u8 an_state; - u8 fec; - u16 speed; - - u8 gpio_insert; /* 0:present 1:absent */ - u8 alos; - u8 rx_los; - u8 pma_ctrl; - - u32 pma_fifo_reg; - u32 pma_signal_ok_reg; - u32 pcs_64_66b_reg; - u32 rf_lf; - u8 pcs_link; - u8 pcs_mac_link; - u8 tx_enable; - u8 rx_enable; - u32 pcs_err_cnt; - - u8 eq_data[38]; - u8 rsvd2[2]; - - u32 his_link_machine_state; - u32 cur_link_machine_state; - u8 his_machine_state_data[128]; - u8 cur_machine_state_data[128]; - u8 his_machine_state_length; - u8 cur_machine_state_length; - - struct mag_port_param_info param_info; - u8 rsvd3[360]; -}; - -struct mag_cmd_port_stats { - u64 mac_tx_fragment_pkt_num; - u64 mac_tx_undersize_pkt_num; - u64 mac_tx_undermin_pkt_num; - u64 mac_tx_64_oct_pkt_num; - u64 mac_tx_65_127_oct_pkt_num; - u64 mac_tx_128_255_oct_pkt_num; - u64 mac_tx_256_511_oct_pkt_num; - u64 mac_tx_512_1023_oct_pkt_num; - u64 mac_tx_1024_1518_oct_pkt_num; - u64 mac_tx_1519_2047_oct_pkt_num; - u64 mac_tx_2048_4095_oct_pkt_num; - u64 mac_tx_4096_8191_oct_pkt_num; - u64 mac_tx_8192_9216_oct_pkt_num; - u64 mac_tx_9217_12287_oct_pkt_num; - u64 mac_tx_12288_16383_oct_pkt_num; - u64 mac_tx_1519_max_bad_pkt_num; - u64 mac_tx_1519_max_good_pkt_num; - u64 mac_tx_oversize_pkt_num; - u64 mac_tx_jabber_pkt_num; - u64 mac_tx_bad_pkt_num; - u64 mac_tx_bad_oct_num; - u64 mac_tx_good_pkt_num; - u64 mac_tx_good_oct_num; - u64 mac_tx_total_pkt_num; - u64 mac_tx_total_oct_num; - u64 mac_tx_uni_pkt_num; - u64 mac_tx_multi_pkt_num; - u64 mac_tx_broad_pkt_num; - u64 mac_tx_pause_num; - u64 mac_tx_pfc_pkt_num; - u64 mac_tx_pfc_pri0_pkt_num; - u64 mac_tx_pfc_pri1_pkt_num; - u64 mac_tx_pfc_pri2_pkt_num; - u64 mac_tx_pfc_pri3_pkt_num; - u64 mac_tx_pfc_pri4_pkt_num; - u64 mac_tx_pfc_pri5_pkt_num; - u64 mac_tx_pfc_pri6_pkt_num; - u64 mac_tx_pfc_pri7_pkt_num; - u64 mac_tx_control_pkt_num; - u64 mac_tx_err_all_pkt_num; - u64 mac_tx_from_app_good_pkt_num; - u64 mac_tx_from_app_bad_pkt_num; - - u64 mac_rx_fragment_pkt_num; - u64 mac_rx_undersize_pkt_num; - u64 mac_rx_undermin_pkt_num; - u64 mac_rx_64_oct_pkt_num; - u64 mac_rx_65_127_oct_pkt_num; - u64 mac_rx_128_255_oct_pkt_num; - u64 mac_rx_256_511_oct_pkt_num; - u64 mac_rx_512_1023_oct_pkt_num; - u64 mac_rx_1024_1518_oct_pkt_num; - u64 mac_rx_1519_2047_oct_pkt_num; - u64 mac_rx_2048_4095_oct_pkt_num; - u64 mac_rx_4096_8191_oct_pkt_num; - u64 mac_rx_8192_9216_oct_pkt_num; - u64 mac_rx_9217_12287_oct_pkt_num; - u64 mac_rx_12288_16383_oct_pkt_num; - u64 mac_rx_1519_max_bad_pkt_num; - u64 mac_rx_1519_max_good_pkt_num; - u64 mac_rx_oversize_pkt_num; - u64 mac_rx_jabber_pkt_num; - u64 mac_rx_bad_pkt_num; - u64 mac_rx_bad_oct_num; - u64 mac_rx_good_pkt_num; - u64 mac_rx_good_oct_num; - u64 mac_rx_total_pkt_num; - u64 mac_rx_total_oct_num; - u64 mac_rx_uni_pkt_num; - u64 mac_rx_multi_pkt_num; - u64 mac_rx_broad_pkt_num; - u64 mac_rx_pause_num; - u64 mac_rx_pfc_pkt_num; - u64 mac_rx_pfc_pri0_pkt_num; - u64 mac_rx_pfc_pri1_pkt_num; - u64 mac_rx_pfc_pri2_pkt_num; - u64 mac_rx_pfc_pri3_pkt_num; - u64 mac_rx_pfc_pri4_pkt_num; - u64 mac_rx_pfc_pri5_pkt_num; - u64 mac_rx_pfc_pri6_pkt_num; - u64 mac_rx_pfc_pri7_pkt_num; - u64 mac_rx_control_pkt_num; - u64 mac_rx_sym_err_pkt_num; - u64 mac_rx_fcs_err_pkt_num; - u64 mac_rx_send_app_good_pkt_num; - u64 mac_rx_send_app_bad_pkt_num; - u64 mac_rx_unfilter_pkt_num; -}; - -struct mag_cmd_port_stats_info { - struct mgmt_msg_head head; - - u8 port_id; - u8 rsvd0[3]; -}; - -struct mag_cmd_get_port_stat { - struct mgmt_msg_head head; - - struct mag_cmd_port_stats counter; - u64 rsvd1[15]; -}; - -struct mag_cmd_clr_port_stat { - struct mgmt_msg_head head; - - u8 port_id; - u8 rsvd0[3]; -}; - -#endif diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_main.c b/drivers/net/ethernet/ramaxel/spnic/spnic_main.c deleted file mode 100644 index 5b9822ebd806fb10468e9a3332362cd265e51f56..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/spnic_main.c +++ /dev/null @@ -1,924 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "sphw_common.h" -#include "sphw_hw.h" -#include "sphw_crm.h" -#include "sphw_mt.h" -#include "spnic_nic_cfg.h" -#include "spnic_nic_io.h" -#include "spnic_nic_dev.h" -#include "spnic_tx.h" -#include "spnic_rx.h" -#include "spnic_lld.h" -#include "spnic_rss.h" -#include "spnic_dcb.h" - -#define DEFAULT_POLL_WEIGHT 64 -static unsigned int poll_weight = DEFAULT_POLL_WEIGHT; -module_param(poll_weight, uint, 0444); -MODULE_PARM_DESC(poll_weight, "Number packets for NAPI budget (default=64)"); - -#define SPNIC_DEAULT_TXRX_MSIX_PENDING_LIMIT 0 -#define SPNIC_DEAULT_TXRX_MSIX_COALESC_TIMER_CFG 0 -#define SPNIC_DEAULT_TXRX_MSIX_RESEND_TIMER_CFG 7 - -static unsigned char qp_pending_limit = SPNIC_DEAULT_TXRX_MSIX_PENDING_LIMIT; -module_param(qp_pending_limit, byte, 0444); -MODULE_PARM_DESC(qp_pending_limit, "QP MSI-X Interrupt coalescing parameter pending_limit (default=2)"); - -static unsigned char qp_coalesc_timer_cfg = - SPNIC_DEAULT_TXRX_MSIX_COALESC_TIMER_CFG; -module_param(qp_coalesc_timer_cfg, byte, 0444); -MODULE_PARM_DESC(qp_coalesc_timer_cfg, "QP MSI-X Interrupt coalescing parameter coalesc_timer_cfg (default=32)"); - -#define DEFAULT_RX_BUFF_LEN 2 -u16 rx_buff = DEFAULT_RX_BUFF_LEN; -module_param(rx_buff, ushort, 0444); -MODULE_PARM_DESC(rx_buff, "Set rx_buff size, buffer len must be 2^n. 2 - 16, default is 2KB"); - -static unsigned int lro_replenish_thld = 256; -module_param(lro_replenish_thld, uint, 0444); -MODULE_PARM_DESC(lro_replenish_thld, "Number wqe for lro replenish buffer (default=256)"); - -#define SPNIC_NIC_DEV_WQ_NAME "spnic_nic_dev_wq" - -#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_LINK) - -#define QID_MASKED(q_id, nic_dev) ((q_id) & ((nic_dev)->num_qps - 1)) -#define WATCHDOG_TIMEOUT 5 - -#define SPNIC_SQ_DEPTH 1024 -#define SPNIC_RQ_DEPTH 1024 - -enum spnic_rx_buff_len { - RX_BUFF_VALID_2KB = 2, - RX_BUFF_VALID_4KB = 4, - RX_BUFF_VALID_8KB = 8, - RX_BUFF_VALID_16KB = 16, -}; - -#define CONVERT_UNIT 1024 - -static int spnic_netdev_event(struct notifier_block *notifier, unsigned long event, void *ptr); - -/* used for netdev notifier register/unregister */ -DEFINE_MUTEX(spnic_netdev_notifiers_mutex); -static int spnic_netdev_notifiers_ref_cnt; -static struct notifier_block spnic_netdev_notifier = { - .notifier_call = spnic_netdev_event, -}; - -static void spnic_register_notifier(struct spnic_nic_dev *nic_dev) -{ - int err; - - mutex_lock(&spnic_netdev_notifiers_mutex); - spnic_netdev_notifiers_ref_cnt++; - if (spnic_netdev_notifiers_ref_cnt == 1) { - err = register_netdevice_notifier(&spnic_netdev_notifier); - if (err) { - nic_info(&nic_dev->pdev->dev, "Register netdevice notifier failed, err: %d\n", - err); - spnic_netdev_notifiers_ref_cnt--; - } - } - mutex_unlock(&spnic_netdev_notifiers_mutex); -} - -static void spnic_unregister_notifier(struct spnic_nic_dev *nic_dev) -{ - mutex_lock(&spnic_netdev_notifiers_mutex); - if (spnic_netdev_notifiers_ref_cnt == 1) - unregister_netdevice_notifier(&spnic_netdev_notifier); - - if (spnic_netdev_notifiers_ref_cnt) - spnic_netdev_notifiers_ref_cnt--; - mutex_unlock(&spnic_netdev_notifiers_mutex); -} - -#define SPNIC_MAX_VLAN_DEPTH_OFFLOAD_SUPPORT 1 -#define SPNIC_VLAN_CLEAR_OFFLOAD (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | \ - NETIF_F_SCTP_CRC | NETIF_F_RXCSUM | \ - NETIF_F_ALL_TSO) - -int spnic_netdev_event(struct notifier_block *notifier, unsigned long event, void *ptr) -{ - struct net_device *ndev = netdev_notifier_info_to_dev(ptr); - struct net_device *real_dev = NULL; - struct net_device *ret = NULL; - struct spnic_nic_dev *nic_dev = NULL; - u16 vlan_depth; - - if (!is_vlan_dev(ndev)) - return NOTIFY_DONE; - - dev_hold(ndev); - - switch (event) { - case NETDEV_REGISTER: - real_dev = vlan_dev_real_dev(ndev); - nic_dev = spnic_get_uld_dev_by_ifname(real_dev->name, SERVICE_T_NIC); - if (!nic_dev) - goto out; - - vlan_depth = 1; - ret = vlan_dev_priv(ndev)->real_dev; - while (is_vlan_dev(ret)) { - ret = vlan_dev_priv(ret)->real_dev; - vlan_depth++; - } - - if (vlan_depth == SPNIC_MAX_VLAN_DEPTH_OFFLOAD_SUPPORT) { - ndev->vlan_features &= (~SPNIC_VLAN_CLEAR_OFFLOAD); - } else if (vlan_depth > SPNIC_MAX_VLAN_DEPTH_OFFLOAD_SUPPORT) { - ndev->hw_features &= (~SPNIC_VLAN_CLEAR_OFFLOAD); - ndev->features &= (~SPNIC_VLAN_CLEAR_OFFLOAD); - } - - break; - - default: - break; - }; - -out: - dev_put(ndev); - - return NOTIFY_DONE; -} - -void spnic_link_status_change(struct spnic_nic_dev *nic_dev, bool status) -{ - struct net_device *netdev = nic_dev->netdev; - - if (!SPHW_CHANNEL_RES_VALID(nic_dev) || test_bit(SPNIC_LP_TEST, &nic_dev->flags)) - return; - - if (status) { - if (netif_carrier_ok(netdev)) - return; - - nic_dev->link_status = status; - netif_carrier_on(netdev); - nicif_info(nic_dev, link, netdev, "Link is up\n"); - } else { - if (!netif_carrier_ok(netdev)) - return; - - nic_dev->link_status = status; - netif_carrier_off(netdev); - nicif_info(nic_dev, link, netdev, "Link is down\n"); - } -} - -static void netdev_feature_init(struct net_device *netdev) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - netdev_features_t dft_fts = 0; - netdev_features_t cso_fts = 0; - netdev_features_t vlan_fts = 0; - netdev_features_t tso_fts = 0; - netdev_features_t hw_features = 0; - - dft_fts |= NETIF_F_SG | NETIF_F_HIGHDMA; - - if (SPNIC_SUPPORT_CSUM(nic_dev->hwdev)) - cso_fts |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM; - if (SPNIC_SUPPORT_SCTP_CRC(nic_dev->hwdev)) - cso_fts |= NETIF_F_SCTP_CRC; - - if (SPNIC_SUPPORT_TSO(nic_dev->hwdev)) - tso_fts |= NETIF_F_TSO | NETIF_F_TSO6; - - if (SPNIC_SUPPORT_VLAN_OFFLOAD(nic_dev->hwdev)) { - vlan_fts |= NETIF_F_HW_VLAN_CTAG_TX; - vlan_fts |= NETIF_F_HW_VLAN_CTAG_RX; - } - - if (SPNIC_SUPPORT_RXVLAN_FILTER(nic_dev->hwdev)) - vlan_fts |= NETIF_F_HW_VLAN_CTAG_FILTER; - - if (SPNIC_SUPPORT_VXLAN_OFFLOAD(nic_dev->hwdev)) - tso_fts |= NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_UDP_TUNNEL_CSUM; - - /* LRO is disable in default, only set hw features */ - if (SPNIC_SUPPORT_LRO(nic_dev->hwdev)) - hw_features |= NETIF_F_LRO; - - netdev->features |= dft_fts | cso_fts | tso_fts | vlan_fts; - netdev->vlan_features |= dft_fts | cso_fts | tso_fts; - - hw_features |= netdev->hw_features; - - hw_features |= netdev->features; - - netdev->hw_features = hw_features; - - netdev->priv_flags |= IFF_UNICAST_FLT; - - netdev->hw_enc_features |= dft_fts; - if (SPNIC_SUPPORT_VXLAN_OFFLOAD(nic_dev->hwdev)) { - netdev->hw_enc_features |= cso_fts; - netdev->hw_enc_features |= tso_fts | NETIF_F_TSO_ECN; - } -} - -static void init_intr_coal_param(struct spnic_nic_dev *nic_dev) -{ - struct spnic_intr_coal_info *info = NULL; - u16 i; - - for (i = 0; i < nic_dev->max_qps; i++) { - info = &nic_dev->intr_coalesce[i]; - - info->pending_limt = qp_pending_limit; - info->coalesce_timer_cfg = qp_coalesc_timer_cfg; - - info->resend_timer_cfg = SPNIC_DEAULT_TXRX_MSIX_RESEND_TIMER_CFG; - - info->pkt_rate_high = SPNIC_RX_RATE_HIGH; - info->rx_usecs_high = SPNIC_RX_COAL_TIME_HIGH; - info->rx_pending_limt_high = SPNIC_RX_PENDING_LIMIT_HIGH; - info->pkt_rate_low = SPNIC_RX_RATE_LOW; - info->rx_usecs_low = SPNIC_RX_COAL_TIME_LOW; - info->rx_pending_limt_low = SPNIC_RX_PENDING_LIMIT_LOW; - } -} - -static int spnic_init_intr_coalesce(struct spnic_nic_dev *nic_dev) -{ - u64 size; - - if (qp_pending_limit != SPNIC_DEAULT_TXRX_MSIX_PENDING_LIMIT || - qp_coalesc_timer_cfg != SPNIC_DEAULT_TXRX_MSIX_COALESC_TIMER_CFG) - nic_dev->intr_coal_set_flag = 1; - else - nic_dev->intr_coal_set_flag = 0; - - size = sizeof(*nic_dev->intr_coalesce) * nic_dev->max_qps; - if (!size) { - nic_err(&nic_dev->pdev->dev, "Cannot allocate zero size intr coalesce\n"); - return -EINVAL; - } - nic_dev->intr_coalesce = kzalloc(size, GFP_KERNEL); - if (!nic_dev->intr_coalesce) { - nic_err(&nic_dev->pdev->dev, "Failed to alloc intr coalesce\n"); - return -ENOMEM; - } - - init_intr_coal_param(nic_dev); - - if (test_bit(SPNIC_INTR_ADAPT, &nic_dev->flags)) - nic_dev->adaptive_rx_coal = 1; - else - nic_dev->adaptive_rx_coal = 0; - - return 0; -} - -static void spnic_free_intr_coalesce(struct spnic_nic_dev *nic_dev) -{ - kfree(nic_dev->intr_coalesce); -} - -static int spnic_alloc_txrxqs(struct spnic_nic_dev *nic_dev) -{ - struct net_device *netdev = nic_dev->netdev; - int err; - - err = spnic_alloc_txqs(netdev); - if (err) { - nic_err(&nic_dev->pdev->dev, "Failed to alloc txqs\n"); - return err; - } - - err = spnic_alloc_rxqs(netdev); - if (err) { - nic_err(&nic_dev->pdev->dev, "Failed to alloc rxqs\n"); - goto alloc_rxqs_err; - } - - err = spnic_init_intr_coalesce(nic_dev); - if (err) { - nic_err(&nic_dev->pdev->dev, "Failed to init_intr_coalesce\n"); - goto init_intr_err; - } - - return 0; - -init_intr_err: - spnic_free_rxqs(netdev); - -alloc_rxqs_err: - spnic_free_txqs(netdev); - - return err; -} - -static void spnic_free_txrxqs(struct spnic_nic_dev *nic_dev) -{ - spnic_free_intr_coalesce(nic_dev); - spnic_free_rxqs(nic_dev->netdev); - spnic_free_txqs(nic_dev->netdev); -} - -static void spnic_sw_deinit(struct spnic_nic_dev *nic_dev) -{ - spnic_free_txrxqs(nic_dev); - - spnic_clean_mac_list_filter(nic_dev); - - spnic_del_mac(nic_dev->hwdev, nic_dev->netdev->dev_addr, 0, - sphw_global_func_id(nic_dev->hwdev), SPHW_CHANNEL_NIC); - - spnic_clear_rss_config(nic_dev); -} - -static int spnic_sw_init(struct spnic_nic_dev *nic_dev) -{ - struct net_device *netdev = nic_dev->netdev; - u64 nic_feature; - int err = 0; - - nic_feature = spnic_get_feature_cap(nic_dev->hwdev); - nic_feature &= SPNIC_DRV_FEATURE; - spnic_update_nic_feature(nic_dev->hwdev, nic_feature); - - sema_init(&nic_dev->port_state_sem, 1); - - err = spnic_dcb_init(nic_dev); - if (err) { - nic_err(&nic_dev->pdev->dev, "Failed to init dcb\n"); - return -EFAULT; - } - - nic_dev->q_params.sq_depth = SPNIC_SQ_DEPTH; - nic_dev->q_params.rq_depth = SPNIC_RQ_DEPTH; - - spnic_try_to_enable_rss(nic_dev); - - err = spnic_get_default_mac(nic_dev->hwdev, netdev->dev_addr); - if (err) { - nic_err(&nic_dev->pdev->dev, "Failed to get MAC address\n"); - goto get_mac_err; - } - - if (!is_valid_ether_addr(netdev->dev_addr)) { - if (!SPNIC_FUNC_IS_VF(nic_dev->hwdev)) { - nic_err(&nic_dev->pdev->dev, "Invalid MAC address %pM\n", netdev->dev_addr); - err = -EIO; - goto err_mac; - } - - nic_info(&nic_dev->pdev->dev, "Invalid MAC address %pM, using random\n", - netdev->dev_addr); - eth_hw_addr_random(netdev); - } - - err = spnic_set_mac(nic_dev->hwdev, netdev->dev_addr, 0, - sphw_global_func_id(nic_dev->hwdev), SPHW_CHANNEL_NIC); - /* When this is VF driver, we must consider that PF has already set VF - * MAC, and we can't consider this condition is error status during - * driver probe procedure. - */ - if (err && err != SPNIC_PF_SET_VF_ALREADY) { - nic_err(&nic_dev->pdev->dev, "Failed to set default MAC\n"); - goto set_mac_err; - } - - /* MTU range: 384 - 9600 */ - netdev->min_mtu = SPNIC_MIN_MTU_SIZE; - netdev->max_mtu = SPNIC_MAX_JUMBO_FRAME_SIZE; - - err = spnic_alloc_txrxqs(nic_dev); - if (err) { - nic_err(&nic_dev->pdev->dev, "Failed to alloc qps\n"); - goto alloc_qps_err; - } - - return 0; - -alloc_qps_err: - spnic_del_mac(nic_dev->hwdev, netdev->dev_addr, 0, sphw_global_func_id(nic_dev->hwdev), - SPHW_CHANNEL_NIC); - -set_mac_err: -err_mac: -get_mac_err: - spnic_clear_rss_config(nic_dev); - - return err; -} - -static void spnic_assign_netdev_ops(struct spnic_nic_dev *adapter) -{ - spnic_set_netdev_ops(adapter); - if (!SPNIC_FUNC_IS_VF(adapter->hwdev)) - spnic_set_ethtool_ops(adapter->netdev); - else - spnicvf_set_ethtool_ops(adapter->netdev); - - adapter->netdev->watchdog_timeo = WATCHDOG_TIMEOUT * HZ; -} - -static int spnic_validate_parameters(struct spnic_lld_dev *lld_dev) -{ - struct pci_dev *pdev = lld_dev->pdev; - - /* If weight exceeds the queue depth, the queue resources will be - * exhausted, and increasing it has no effect. - */ - if (!poll_weight || poll_weight > SPNIC_MAX_RX_QUEUE_DEPTH) { - nic_warn(&pdev->dev, "Module Parameter poll_weight is out of range: [1, %d], resetting to %d\n", - SPNIC_MAX_RX_QUEUE_DEPTH, DEFAULT_POLL_WEIGHT); - poll_weight = DEFAULT_POLL_WEIGHT; - } - - /* check rx_buff value, default rx_buff is 2KB. - * Valid rx_buff include 2KB/4KB/8KB/16KB. - */ - if (rx_buff != RX_BUFF_VALID_2KB && rx_buff != RX_BUFF_VALID_4KB && - rx_buff != RX_BUFF_VALID_8KB && rx_buff != RX_BUFF_VALID_16KB) { - nic_warn(&pdev->dev, "Module Parameter rx_buff value %u is out of range, must be 2^n. Valid range is 2 - 16, resetting to %dKB", - rx_buff, DEFAULT_RX_BUFF_LEN); - rx_buff = DEFAULT_RX_BUFF_LEN; - } - - return 0; -} - -static void adaptive_configuration_init(struct spnic_nic_dev *nic_dev) -{ - /* TODO: */ -} - -static int set_interrupt_moder(struct spnic_nic_dev *nic_dev, u16 q_id, - u8 coalesc_timer_cfg, u8 pending_limt) -{ - struct interrupt_info info; - int err; - - memset(&info, 0, sizeof(info)); - - if (coalesc_timer_cfg == nic_dev->rxqs[q_id].last_coalesc_timer_cfg && - pending_limt == nic_dev->rxqs[q_id].last_pending_limt) - return 0; - - /* netdev not running or qp not in using, - * don't need to set coalesce to hw - */ - if (!SPHW_CHANNEL_RES_VALID(nic_dev) || q_id >= nic_dev->q_params.num_qps) - return 0; - - info.lli_set = 0; - info.interrupt_coalesc_set = 1; - info.coalesc_timer_cfg = coalesc_timer_cfg; - info.pending_limt = pending_limt; - info.msix_index = nic_dev->q_params.irq_cfg[q_id].msix_entry_idx; - info.resend_timer_cfg = nic_dev->intr_coalesce[q_id].resend_timer_cfg; - - err = sphw_set_interrupt_cfg(nic_dev->hwdev, info, SPHW_CHANNEL_NIC); - if (err) { - nicif_err(nic_dev, drv, nic_dev->netdev, - "Failed to modify moderation for Queue: %u\n", q_id); - } else { - nic_dev->rxqs[q_id].last_coalesc_timer_cfg = coalesc_timer_cfg; - nic_dev->rxqs[q_id].last_pending_limt = pending_limt; - } - - return err; -} - -static void calc_coal_para(struct spnic_nic_dev *nic_dev, struct spnic_intr_coal_info *q_coal, - u64 rx_rate, u8 *coalesc_timer_cfg, u8 *pending_limt) -{ - if (rx_rate < q_coal->pkt_rate_low) { - *coalesc_timer_cfg = q_coal->rx_usecs_low; - *pending_limt = q_coal->rx_pending_limt_low; - } else if (rx_rate > q_coal->pkt_rate_high) { - *coalesc_timer_cfg = q_coal->rx_usecs_high; - *pending_limt = q_coal->rx_pending_limt_high; - } else { - *coalesc_timer_cfg = - (u8)((rx_rate - q_coal->pkt_rate_low) * - (q_coal->rx_usecs_high - q_coal->rx_usecs_low) / - (q_coal->pkt_rate_high - q_coal->pkt_rate_low) + q_coal->rx_usecs_low); - - *pending_limt = q_coal->rx_pending_limt_low; - } -} - -static void update_queue_coal(struct spnic_nic_dev *nic_dev, u16 qid, - u64 rx_rate, u64 avg_pkt_size, u64 tx_rate) -{ - struct spnic_intr_coal_info *q_coal = NULL; - u8 coalesc_timer_cfg, pending_limt; - - q_coal = &nic_dev->intr_coalesce[qid]; - - if (rx_rate > SPNIC_RX_RATE_THRESH && - avg_pkt_size > SPNIC_AVG_PKT_SMALL) { - calc_coal_para(nic_dev, q_coal, rx_rate, &coalesc_timer_cfg, &pending_limt); - } else { - coalesc_timer_cfg = SPNIC_LOWEST_LATENCY; - pending_limt = q_coal->rx_pending_limt_low; - } - - set_interrupt_moder(nic_dev, qid, coalesc_timer_cfg, pending_limt); -} - -void spnic_auto_moderation_work(struct work_struct *work) -{ - struct delayed_work *delay = to_delayed_work(work); - struct spnic_nic_dev *nic_dev = container_of(delay, struct spnic_nic_dev, moderation_task); - unsigned long period = (unsigned long)(jiffies - nic_dev->last_moder_jiffies); - u64 rx_packets, rx_bytes, rx_pkt_diff, rx_rate, avg_pkt_size; - u64 tx_packets, tx_bytes, tx_pkt_diff, tx_rate; - u16 qid; - - if (!test_bit(SPNIC_INTF_UP, &nic_dev->flags)) - return; - - queue_delayed_work(nic_dev->workq, &nic_dev->moderation_task, SPNIC_MODERATONE_DELAY); - - if (!nic_dev->adaptive_rx_coal || !period) - return; - - for (qid = 0; qid < nic_dev->q_params.num_qps; qid++) { - rx_packets = nic_dev->rxqs[qid].rxq_stats.packets; - rx_bytes = nic_dev->rxqs[qid].rxq_stats.bytes; - tx_packets = nic_dev->txqs[qid].txq_stats.packets; - tx_bytes = nic_dev->txqs[qid].txq_stats.bytes; - - rx_pkt_diff = rx_packets - nic_dev->rxqs[qid].last_moder_packets; - avg_pkt_size = rx_pkt_diff ? - ((unsigned long)(rx_bytes - nic_dev->rxqs[qid].last_moder_bytes)) / - rx_pkt_diff : 0; - - rx_rate = rx_pkt_diff * HZ / period; - tx_pkt_diff = tx_packets - nic_dev->txqs[qid].last_moder_packets; - tx_rate = tx_pkt_diff * HZ / period; - - update_queue_coal(nic_dev, qid, rx_rate, avg_pkt_size, tx_rate); - - nic_dev->rxqs[qid].last_moder_packets = rx_packets; - nic_dev->rxqs[qid].last_moder_bytes = rx_bytes; - nic_dev->txqs[qid].last_moder_packets = tx_packets; - nic_dev->txqs[qid].last_moder_bytes = tx_bytes; - } - - nic_dev->last_moder_jiffies = jiffies; -} - -void spnic_periodic_work_handler(struct work_struct *work) -{ - struct delayed_work *delay = to_delayed_work(work); - struct spnic_nic_dev *nic_dev = container_of(delay, struct spnic_nic_dev, periodic_work); - - if (test_and_clear_bit(EVENT_WORK_TX_TIMEOUT, &nic_dev->event_flag)) - sphw_fault_event_report(nic_dev->hwdev, SPHW_FAULT_SRC_TX_TIMEOUT, - FAULT_LEVEL_SERIOUS_FLR); - - queue_delayed_work(nic_dev->workq, &nic_dev->periodic_work, HZ); -} - -static void free_nic_dev(struct spnic_nic_dev *nic_dev) -{ - destroy_workqueue(nic_dev->workq); - kfree(nic_dev->vlan_bitmap); -} - -static int setup_nic_dev(struct net_device *netdev, struct spnic_lld_dev *lld_dev) -{ - struct pci_dev *pdev = lld_dev->pdev; - struct spnic_nic_dev *nic_dev; - u32 page_num; - - nic_dev = (struct spnic_nic_dev *)netdev_priv(netdev); - nic_dev->netdev = netdev; - SET_NETDEV_DEV(netdev, &pdev->dev); - nic_dev->hwdev = lld_dev->hwdev; - nic_dev->pdev = pdev; - nic_dev->poll_weight = (int)poll_weight; - nic_dev->msg_enable = DEFAULT_MSG_ENABLE; - nic_dev->lro_replenish_thld = lro_replenish_thld; - nic_dev->rx_buff_len = (u16)(rx_buff * CONVERT_UNIT); - nic_dev->dma_rx_buff_size = RX_BUFF_NUM_PER_PAGE * nic_dev->rx_buff_len; - page_num = nic_dev->dma_rx_buff_size / PAGE_SIZE; - nic_dev->page_order = page_num > 0 ? ilog2(page_num) : 0; - - mutex_init(&nic_dev->nic_mutex); - - nic_dev->vlan_bitmap = kzalloc(VLAN_BITMAP_SIZE(nic_dev), GFP_KERNEL); - if (!nic_dev->vlan_bitmap) { - nic_err(&pdev->dev, "Failed to allocate vlan bitmap\n"); - return -ENOMEM; - } - - nic_dev->workq = create_singlethread_workqueue(SPNIC_NIC_DEV_WQ_NAME); - if (!nic_dev->workq) { - nic_err(&pdev->dev, "Failed to initialize nic workqueue\n"); - kfree(nic_dev->vlan_bitmap); - return -ENOMEM; - } - - INIT_DELAYED_WORK(&nic_dev->periodic_work, spnic_periodic_work_handler); - - INIT_LIST_HEAD(&nic_dev->uc_filter_list); - INIT_LIST_HEAD(&nic_dev->mc_filter_list); - INIT_WORK(&nic_dev->rx_mode_work, spnic_set_rx_mode_work); - - INIT_LIST_HEAD(&nic_dev->rx_flow_rule.rules); - INIT_LIST_HEAD(&nic_dev->tcam.tcam_list); - INIT_LIST_HEAD(&nic_dev->tcam.tcam_dynamic_info.tcam_dynamic_list); - - return 0; -} - -static int spnic_set_default_hw_feature(struct spnic_nic_dev *nic_dev) -{ - int err; - - if (!SPNIC_FUNC_IS_VF(nic_dev->hwdev)) { - err = spnic_dcb_reset_hw_config(nic_dev); - if (err) { - nic_err(&nic_dev->pdev->dev, "Failed to reset hw dcb configuration\n"); - return err; - } - } - - err = spnic_set_nic_feature_to_hw(nic_dev->hwdev); - if (err) { - nic_err(&nic_dev->pdev->dev, "Failed to set nic features\n"); - return err; - } - - /* enable all hw features in netdev->features */ - return spnic_set_hw_features(nic_dev); -} - -static int nic_probe(struct spnic_lld_dev *lld_dev, void **uld_dev, char *uld_dev_name) -{ - struct pci_dev *pdev = lld_dev->pdev; - struct spnic_nic_dev *nic_dev = NULL; - struct net_device *netdev = NULL; - u16 max_qps, glb_func_id; - int err; - - /* *uld_dev should always no be NULL */ - *uld_dev = lld_dev; - - if (!sphw_support_nic(lld_dev->hwdev, NULL)) { - nic_info(&pdev->dev, "Hw don't support nic\n"); - return 0; - } - - nic_info(&pdev->dev, "NIC service probe begin\n"); - - err = spnic_validate_parameters(lld_dev); - if (err) { - err = -EINVAL; - goto err_out; - } - - glb_func_id = sphw_global_func_id(lld_dev->hwdev); - err = sphw_func_reset(lld_dev->hwdev, glb_func_id, SPHW_NIC_RES, SPHW_CHANNEL_NIC); - if (err) { - nic_err(&pdev->dev, "Failed to reset function\n"); - goto err_out; - } - - max_qps = sphw_func_max_nic_qnum(lld_dev->hwdev); - netdev = alloc_etherdev_mq(sizeof(*nic_dev), max_qps); - if (!netdev) { - nic_err(&pdev->dev, "Failed to allocate ETH device\n"); - err = -ENOMEM; - goto err_out; - } - - nic_dev = (struct spnic_nic_dev *)netdev_priv(netdev); - err = setup_nic_dev(netdev, lld_dev); - if (err) - goto setup_dev_err; - - adaptive_configuration_init(nic_dev); - - /* get nic cap from hw */ - sphw_support_nic(lld_dev->hwdev, &nic_dev->nic_cap); - - err = spnic_init_nic_hwdev(nic_dev->hwdev, pdev, &pdev->dev, nic_dev->rx_buff_len); - if (err) { - nic_err(&pdev->dev, "Failed to init nic hwdev\n"); - goto init_nic_hwdev_err; - } - - err = spnic_sw_init(nic_dev); - if (err) - goto sw_init_err; - - spnic_assign_netdev_ops(nic_dev); - netdev_feature_init(netdev); - - err = spnic_set_default_hw_feature(nic_dev); - if (err) - goto set_features_err; - - spnic_register_notifier(nic_dev); - - err = register_netdev(netdev); - if (err) { - nic_err(&pdev->dev, "Failed to register netdev\n"); - err = -ENOMEM; - goto netdev_err; - } - - queue_delayed_work(nic_dev->workq, &nic_dev->periodic_work, HZ); - netif_carrier_off(netdev); - - *uld_dev = nic_dev; - nicif_info(nic_dev, probe, netdev, "Register netdev succeed\n"); - nic_info(&pdev->dev, "NIC service probed\n"); - - return 0; - -netdev_err: - spnic_unregister_notifier(nic_dev); - -set_features_err: - spnic_sw_deinit(nic_dev); - -sw_init_err: - spnic_free_nic_hwdev(nic_dev->hwdev); - -init_nic_hwdev_err: - free_nic_dev(nic_dev); -setup_dev_err: - free_netdev(netdev); - -err_out: - nic_err(&pdev->dev, "NIC service probe failed\n"); - - return err; -} - -static void nic_remove(struct spnic_lld_dev *lld_dev, void *adapter) -{ - struct spnic_nic_dev *nic_dev = adapter; - struct net_device *netdev = NULL; - - if (!nic_dev || !sphw_support_nic(lld_dev->hwdev, NULL)) - return; - - nic_info(&lld_dev->pdev->dev, "NIC service remove begin\n"); - - netdev = nic_dev->netdev; - - unregister_netdev(netdev); - spnic_unregister_notifier(nic_dev); - - cancel_delayed_work_sync(&nic_dev->periodic_work); - cancel_work_sync(&nic_dev->rx_mode_work); - destroy_workqueue(nic_dev->workq); - - spnic_sw_deinit(nic_dev); - - spnic_flush_rx_flow_rule(nic_dev); - spnic_free_nic_hwdev(nic_dev->hwdev); - - kfree(nic_dev->vlan_bitmap); - - free_netdev(netdev); - - nic_info(&lld_dev->pdev->dev, "NIC service removed\n"); -} - -static void sriov_state_change(struct spnic_nic_dev *nic_dev, - const struct sphw_sriov_state_info *info) -{ - if (!info->enable) - spnic_clear_vfs_info(nic_dev->hwdev); -} - -const char *g_spnic_module_link_err[LINK_ERR_NUM] = { - "Unrecognized module", -}; - -void sphw_port_module_event_handler(struct spnic_nic_dev *nic_dev, struct sphw_event_info *event) -{ - enum port_module_event_type type = event->module_event.type; - enum link_err_type err_type = event->module_event.err_type; - - switch (type) { - case SPHW_PORT_MODULE_CABLE_PLUGGED: - case SPHW_PORT_MODULE_CABLE_UNPLUGGED: - nicif_info(nic_dev, link, nic_dev->netdev, - "Port module event: Cable %s\n", - type == SPHW_PORT_MODULE_CABLE_PLUGGED ? - "plugged" : "unplugged"); - break; - case SPHW_PORT_MODULE_LINK_ERR: - if (err_type >= LINK_ERR_NUM) { - nicif_info(nic_dev, link, nic_dev->netdev, - "Link failed, Unknown error type: 0x%x\n", err_type); - } else { - nicif_info(nic_dev, link, nic_dev->netdev, "Link failed, error type: 0x%x: %s\n", - err_type, g_spnic_module_link_err[err_type]); - } - break; - default: - nicif_err(nic_dev, link, nic_dev->netdev, "Unknown port module type %d\n", type); - break; - } -} - -void nic_event(struct spnic_lld_dev *lld_dev, void *adapter, struct sphw_event_info *event) -{ - struct spnic_nic_dev *nic_dev = adapter; - enum sphw_event_type type; - - if (!nic_dev || !event || !sphw_support_nic(lld_dev->hwdev, NULL)) - return; - - type = event->type; - - switch (type) { - case SPHW_EVENT_LINK_DOWN: - spnic_link_status_change(nic_dev, false); - break; - case SPHW_EVENT_LINK_UP: - spnic_link_status_change(nic_dev, true); - break; - case SPHW_EVENT_SRIOV_STATE_CHANGE: - sriov_state_change(nic_dev, &event->sriov_state); - break; - case SPHW_EVENT_PORT_MODULE_EVENT: - sphw_port_module_event_handler(nic_dev, event); - break; - case SPHW_EVENT_FAULT: - if (event->info.fault_level == FAULT_LEVEL_SERIOUS_FLR && - event->info.event.chip.func_id == sphw_global_func_id(lld_dev->hwdev)) - spnic_link_status_change(nic_dev, false); - break; - case SPHW_EVENT_PCIE_LINK_DOWN: - case SPHW_EVENT_HEART_LOST: - spnic_link_status_change(nic_dev, false); - break; - default: - break; - } -} - -struct net_device *spnic_get_netdev_by_lld(struct spnic_lld_dev *lld_dev) -{ - struct spnic_nic_dev *nic_dev = NULL; - - if (!lld_dev || !sphw_support_nic(lld_dev->hwdev, NULL)) - return NULL; - - nic_dev = spnic_get_uld_dev_by_pdev(lld_dev->pdev, SERVICE_T_NIC); - if (!nic_dev) { - sdk_err(&lld_dev->pdev->dev, - "There's no net device attached on the pci device"); - return NULL; - } - - return nic_dev->netdev; -} - -void *spnic_get_hwdev_by_netdev(struct net_device *netdev) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - - if (!nic_dev || !netdev) - return NULL; - - return nic_dev->hwdev; -} - -struct spnic_uld_info nic_uld_info = { - .probe = nic_probe, - .remove = nic_remove, - .suspend = NULL, - .resume = NULL, - .event = nic_event, - .ioctl = nic_ioctl, -}; diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_mgmt_interface.h b/drivers/net/ethernet/ramaxel/spnic/spnic_mgmt_interface.h deleted file mode 100644 index 720f6fcf5f0ad216cd28196e81d3bafd1dc8461f..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/spnic_mgmt_interface.h +++ /dev/null @@ -1,605 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPNIC_MGMT_INTERFACE_H -#define SPNIC_MGMT_INTERFACE_H - -#include - -#include "sphw_mgmt_msg_base.h" - -#define SPNIC_CMD_OP_SET MGMT_MSG_CMD_OP_SET -#define SPNIC_CMD_OP_GET MGMT_MSG_CMD_OP_GET - -#define SPNIC_CMD_OP_ADD 1 -#define SPNIC_CMD_OP_DEL 0 - -enum nic_feature_cap { - NIC_F_CSUM = BIT(0), - NIC_F_SCTP_CRC = BIT(1), - NIC_F_TSO = BIT(2), - NIC_F_LRO = BIT(3), - NIC_F_UFO = BIT(4), - NIC_F_RSS = BIT(5), - NIC_F_RX_VLAN_FILTER = BIT(6), - NIC_F_RX_VLAN_STRIP = BIT(7), - NIC_F_TX_VLAN_INSERT = BIT(8), - NIC_F_VXLAN_OFFLOAD = BIT(9), - NIC_F_IPSEC_OFFLOAD = BIT(10), - NIC_F_FDIR = BIT(11), - NIC_F_PROMISC = BIT(12), - NIC_F_ALLMULTI = BIT(13), -}; - -#define NIC_F_ALL_MASK 0x3FFF - -#define NIC_MAX_FEATURE_QWORD 4 -struct spnic_cmd_feature_nego { - struct mgmt_msg_head msg_head; - - u16 func_id; - u8 opcode; /* 1: set, 0: get */ - u8 rsvd; - u64 s_feature[NIC_MAX_FEATURE_QWORD]; -}; - -struct spnic_port_mac_set { - struct mgmt_msg_head msg_head; - - u16 func_id; - u16 vlan_id; - u16 rsvd1; - u8 mac[ETH_ALEN]; -}; - -struct spnic_port_mac_update { - struct mgmt_msg_head msg_head; - - u16 func_id; - u16 vlan_id; - u16 rsvd1; - u8 old_mac[ETH_ALEN]; - u16 rsvd2; - u8 new_mac[ETH_ALEN]; -}; - -struct spnic_vport_state { - struct mgmt_msg_head msg_head; - - u16 func_id; - u16 rsvd1; - u8 state; /* 0--disable, 1--enable */ - u8 rsvd2[3]; -}; - -struct spnic_port_state { - struct mgmt_msg_head msg_head; - - u16 func_id; - u16 rsvd1; - u8 state; /* 0--disable, 1--enable */ - u8 rsvd2[3]; -}; - -struct spnic_cmd_clear_qp_resource { - struct mgmt_msg_head msg_head; - - u16 func_id; - u16 rsvd1; -}; - -struct spnic_port_stats_info { - struct mgmt_msg_head msg_head; - - u16 func_id; - u16 rsvd1; -}; - -struct spnic_vport_stats { - u64 tx_unicast_pkts_vport; - u64 tx_unicast_bytes_vport; - u64 tx_multicast_pkts_vport; - u64 tx_multicast_bytes_vport; - u64 tx_broadcast_pkts_vport; - u64 tx_broadcast_bytes_vport; - - u64 rx_unicast_pkts_vport; - u64 rx_unicast_bytes_vport; - u64 rx_multicast_pkts_vport; - u64 rx_multicast_bytes_vport; - u64 rx_broadcast_pkts_vport; - u64 rx_broadcast_bytes_vport; - - u64 tx_discard_vport; - u64 rx_discard_vport; - u64 tx_err_vport; - u64 rx_err_vport; -}; - -struct spnic_cmd_vport_stats { - struct mgmt_msg_head msg_head; - - u32 stats_size; - u32 rsvd1; - struct spnic_vport_stats stats; - u64 rsvd2[6]; -}; - -struct spnic_cmd_qpn { - struct mgmt_msg_head msg_head; - - u16 func_id; - u16 base_qpn; -}; - -enum spnic_func_tbl_cfg_bitmap { - FUNC_CFG_INIT, - FUNC_CFG_RX_BUF_SIZE, - FUNC_CFG_MTU, -}; - -struct spnic_func_tbl_cfg { - u16 rx_wqe_buf_size; - u16 mtu; - u32 rsvd[9]; -}; - -struct spnic_cmd_set_func_tbl { - struct mgmt_msg_head msg_head; - - u16 func_id; - u16 rsvd; - - u32 cfg_bitmap; - struct spnic_func_tbl_cfg tbl_cfg; -}; - -struct spnic_cmd_cons_idx_attr { - struct mgmt_msg_head msg_head; - - u16 func_idx; - u8 dma_attr_off; - u8 pending_limit; - u8 coalescing_time; - u8 intr_en; - u16 intr_idx; - u32 l2nic_sqn; - u32 rsvd; - u64 ci_addr; -}; - -struct spnic_cmd_vlan_offload { - struct mgmt_msg_head msg_head; - - u16 func_id; - u8 vlan_offload; - u8 rsvd1[5]; -}; - -struct spnic_cmd_lro_config { - struct mgmt_msg_head msg_head; - - u16 func_id; - u8 opcode; - u8 rsvd1; - u8 lro_ipv4_en; - u8 lro_ipv6_en; - u8 lro_max_pkt_len; /* unit is 1K */ - u8 resv2[13]; -}; - -struct spnic_cmd_lro_timer { - struct mgmt_msg_head msg_head; - - u8 opcode; /* 1: set timer value, 0: get timer value */ - u8 rsvd1; - u16 rsvd2; - u32 timer; -}; - -struct spnic_cmd_vf_vlan_config { - struct mgmt_msg_head msg_head; - - u16 func_id; - u8 opcode; - u8 rsvd1; - u16 vlan_id; - u8 qos; - u8 rsvd2[5]; -}; - -struct spnic_cmd_spoofchk_set { - struct mgmt_msg_head msg_head; - - u16 func_id; - u8 state; - u8 rsvd1; -}; - -struct spnic_cmd_tx_rate_cfg { - struct mgmt_msg_head msg_head; - - u16 func_id; - u16 rsvd1; - u32 min_rate; - u32 max_rate; - u8 rsvd2[8]; -}; - -struct spnic_cmd_port_info { - struct mgmt_msg_head msg_head; - - u8 port_id; - u8 rsvd1[3]; - u8 port_type; - u8 autoneg_cap; - u8 autoneg_state; - u8 duplex; - u8 speed; - u8 fec; - u16 rsvd2; - u32 rsvd3[4]; -}; - -struct spnic_cmd_register_vf { - struct mgmt_msg_head msg_head; - - u8 op_register; /* 0 - unregister, 1 - register */ - u8 rsvd[39]; -}; - -struct spnic_cmd_link_state { - struct mgmt_msg_head msg_head; - - u8 port_id; - u8 state; - u16 rsvd1; -}; - -struct spnic_cmd_vlan_config { - struct mgmt_msg_head msg_head; - - u16 func_id; - u8 opcode; - u8 rsvd1; - u16 vlan_id; - u16 rsvd2; -}; - -/* set vlan filter */ -struct spnic_cmd_set_vlan_filter { - struct mgmt_msg_head msg_head; - - u16 func_id; - u8 resvd[2]; - u32 vlan_filter_ctrl; /* bit0:vlan filter en; bit1:broadcast_filter_en */ -}; - -struct spnic_cmd_link_ksettings_info { - struct mgmt_msg_head msg_head; - - u8 port_id; - u8 rsvd1[3]; - - u32 valid_bitmap; - u8 speed; /* enum nic_speed_level */ - u8 autoneg; /* 0 - off, 1 - on */ - u8 fec; /* 0 - RSFEC, 1 - BASEFEC, 2 - NOFEC */ - u8 rsvd2[21]; /* reserved for duplex, port, etc. */ -}; - -struct mpu_lt_info { - u8 node; - u8 inst; - u8 entry_size; - u8 rsvd; - u32 lt_index; - u32 offset; - u32 len; -}; - -struct nic_mpu_lt_opera { - struct mgmt_msg_head msg_head; - struct mpu_lt_info net_lt_cmd; - u8 data[100]; -}; - -struct spnic_rx_mode_config { - struct mgmt_msg_head msg_head; - - u16 func_id; - u16 rsvd1; - u32 rx_mode; -}; - -/* rss */ -struct spnic_rss_context_table { - struct mgmt_msg_head msg_head; - - u16 func_id; - u16 rsvd1; - u32 context; -}; - -struct spnic_cmd_rss_engine_type { - struct mgmt_msg_head msg_head; - - u16 func_id; - u8 opcode; - u8 hash_engine; - u8 rsvd1[4]; -}; - -#define SPNIC_RSS_INDIR_SIZE 256 -#define SPNIC_RSS_KEY_SIZE 40 - -struct spnic_cmd_rss_hash_key { - struct mgmt_msg_head msg_head; - - u16 func_id; - u8 opcode; - u8 rsvd1; - u8 key[SPNIC_RSS_KEY_SIZE]; -}; - -struct spnic_rss_indir_table { - struct mgmt_msg_head msg_head; - - u16 func_id; - u16 rsvd1; - u8 indir[SPNIC_RSS_INDIR_SIZE]; -}; - -#define SPNIC_DCB_UP_MAX 0x8 -#define SPNIC_DCB_COS_MAX 0x8 -#define SPNIC_DCB_TC_MAX 0x8 - -struct spnic_cmd_rss_config { - struct mgmt_msg_head msg_head; - - u16 func_id; - u8 rss_en; - u8 rq_priority_number; - u8 prio_tc[SPNIC_DCB_COS_MAX]; - u16 num_qps; - u16 rsvd1; -}; - -struct spnic_dcb_state { - u8 dcb_on; - u8 default_cos; - u16 rsvd1; - u8 up_cos[SPNIC_DCB_UP_MAX]; - u32 rsvd2[7]; -}; - -struct spnic_cmd_vf_dcb_state { - struct mgmt_msg_head msg_head; - - struct spnic_dcb_state state; -}; - -struct spnic_up_ets_cfg { - struct mgmt_msg_head msg_head; - - u8 port_id; - u8 rsvd1[3]; - - u8 cos_tc[SPNIC_DCB_COS_MAX]; - u8 tc_bw[SPNIC_DCB_TC_MAX]; - u8 cos_prio[SPNIC_DCB_COS_MAX]; - u8 cos_bw[SPNIC_DCB_COS_MAX]; - u8 tc_prio[SPNIC_DCB_TC_MAX]; -}; - -struct spnic_cmd_set_pfc { - struct mgmt_msg_head msg_head; - - u8 port_id; - u8 rsvd1; - u8 pfc_en; - u8 pfc_bitmap; - u8 rsvd2[4]; -}; - -struct spnic_cos_up_map { - struct mgmt_msg_head msg_head; - - u8 port_id; - /* every bit indicate index of map is valid 1 or not 0*/ - u8 cos_valid_mask; - u16 rsvd1; - - /* user priority in cos(index:cos, value: up pri) */ - u8 map[SPNIC_DCB_UP_MAX]; -}; - -struct spnic_cmd_pause_config { - struct mgmt_msg_head msg_head; - - u8 port_id; - u8 opcode; - u16 rsvd1; - u8 auto_neg; - u8 rx_pause; - u8 tx_pause; - u8 rsvd2[5]; -}; - -struct nic_cmd_tx_pause_notice { - struct mgmt_msg_head head; - - u32 tx_pause_except; - u32 except_level; - u32 rsvd; -}; - -#define SPNIC_CMD_OP_FREE 0 -#define SPNIC_CMD_OP_ALLOC 1 - -struct spnic_cmd_cfg_qps { - struct mgmt_msg_head msg_head; - - u16 func_id; - u8 opcode; /* 1: alloc qp, 0: free qp */ - u8 rsvd1; - u16 num_qps; - u16 rsvd2; -}; - -struct spnic_cmd_led_config { - struct mgmt_msg_head msg_head; - - u8 port; - u8 type; - u8 mode; - u8 rsvd1; -}; - -struct spnic_cmd_port_loopback { - struct mgmt_msg_head msg_head; - - u8 port_id; - u8 opcode; - u8 mode; - u8 en; - u32 rsvd1[2]; -}; - -struct spnic_cmd_get_light_module_abs { - struct mgmt_msg_head msg_head; - - u8 port_id; - u8 abs_status; /* 0:present, 1:absent */ - u8 rsv[2]; -}; - -#define STD_SFP_INFO_MAX_SIZE 640 -struct spnic_cmd_get_std_sfp_info { - struct mgmt_msg_head msg_head; - - u8 port_id; - u8 wire_type; - u16 eeprom_len; - u32 rsvd; - u8 sfp_info[STD_SFP_INFO_MAX_SIZE]; -}; - -struct spnic_cable_plug_event { - struct mgmt_msg_head msg_head; - - u16 func_id; - u8 plugged; /* 0: unplugged, 1: plugged */ - u8 port_id; -}; - -struct nic_cmd_mac_info { - struct mgmt_msg_head head; - - u32 valid_bitmap; - u16 rsvd; - - u8 host_id[32]; - u8 port_id[32]; - u8 mac_addr[192]; -}; - -#define SPNIC_TCAM_BLOCK_ENABLE 1 -#define SPNIC_TCAM_BLOCK_DISABLE 0 -#define SPNIC_TCAM_BLOCK_NORMAL_TYPE 0 -#define SPNIC_MAX_TCAM_RULES_NUM 4096 - -struct nic_cmd_set_tcam_enable { - struct mgmt_msg_head head; - - u16 func_id; - u8 tcam_enable; - u8 rsvd1; - u32 rsvd2; -}; - -/* alloc tcam block input struct */ -struct nic_cmd_ctrl_tcam_block_in { - struct mgmt_msg_head head; - - u16 func_id; /* func_id */ - u8 alloc_en; /* 0: free tcam block, 1: alloc tcam block */ - u8 tcam_type; /* 0: alloc 16 size tcam block, 1: alloc 0 size tcam block */ - u16 tcam_block_index; - u16 alloc_block_num; -}; - -/* alloc tcam block output struct */ -struct nic_cmd_ctrl_tcam_block_out { - struct mgmt_msg_head head; - - u16 func_id; - u8 alloc_en; - u8 tcam_type; - u16 tcam_block_index; - u16 mpu_alloc_block_size; -}; - -struct nic_cmd_flush_tcam_rules { - struct mgmt_msg_head head; - - u16 func_id; /* func_id */ - u16 rsvd; -}; - -struct nic_cmd_dfx_fdir_tcam_block_table { - struct mgmt_msg_head head; - u8 tcam_type; - u8 valid; - u16 tcam_block_index; - u16 use_function_id; - u16 rsvd; -}; - -struct tcam_result { - u32 qid; - u32 rsvd; -}; - -#define TCAM_FLOW_KEY_SIZE 44 - -struct tcam_key_x_y { - u8 x[TCAM_FLOW_KEY_SIZE]; - u8 y[TCAM_FLOW_KEY_SIZE]; -}; - -struct nic_tcam_cfg_rule { - u32 index; - struct tcam_result data; - struct tcam_key_x_y key; -}; - -struct nic_cmd_fdir_add_rule { - struct mgmt_msg_head head; - - u16 func_id; - u16 rsvd; - struct nic_tcam_cfg_rule rule; -}; - -struct nic_cmd_fdir_del_rules { - struct mgmt_msg_head head; - - u16 func_id; - u16 rsvd; - u32 index_start; - u32 index_num; -}; - -struct nic_cmd_fdir_get_rule { - struct mgmt_msg_head head; - - u32 index; - u32 valid; - struct tcam_key_x_y key; - struct tcam_result data; - u64 packet_count; - u64 byte_count; -}; - -#endif /* SPNIC_MGMT_INTERFACE_H */ diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_netdev_ops.c b/drivers/net/ethernet/ramaxel/spnic/spnic_netdev_ops.c deleted file mode 100644 index a4f668682f37cf7318f481a3db6fa22069c291e9..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/spnic_netdev_ops.c +++ /dev/null @@ -1,1526 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "sphw_hw.h" -#include "sphw_crm.h" -#include "spnic_nic_io.h" -#include "spnic_nic_dev.h" -#include "spnic_tx.h" -#include "spnic_rx.h" -#include "spnic_dcb.h" - -#define SPNIC_DEFAULT_RX_CSUM_OFFLOAD 0xFFF - -#define SPNIC_LRO_DEFAULT_COAL_PKT_SIZE 32 -#define SPNIC_LRO_DEFAULT_TIME_LIMIT 16 -#define SPNIC_WAIT_FLUSH_QP_RESOURCE_TIMEOUT 2000 -static void spnic_nic_set_rx_mode(struct net_device *netdev) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - - if (netdev_uc_count(netdev) != nic_dev->netdev_uc_cnt || - netdev_mc_count(netdev) != nic_dev->netdev_mc_cnt) { - set_bit(SPNIC_UPDATE_MAC_FILTER, &nic_dev->flags); - nic_dev->netdev_uc_cnt = netdev_uc_count(netdev); - nic_dev->netdev_mc_cnt = netdev_mc_count(netdev); - } - - queue_work(nic_dev->workq, &nic_dev->rx_mode_work); -} - -int spnic_alloc_txrxq_resources(struct spnic_nic_dev *nic_dev, - struct spnic_dyna_txrxq_params *q_params) -{ - u32 size; - int err; - - size = sizeof(*q_params->txqs_res) * q_params->num_qps; - q_params->txqs_res = kzalloc(size, GFP_KERNEL); - if (!q_params->txqs_res) { - nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to alloc txqs resources array\n"); - return -ENOMEM; - } - - size = sizeof(*q_params->rxqs_res) * q_params->num_qps; - q_params->rxqs_res = kzalloc(size, GFP_KERNEL); - if (!q_params->rxqs_res) { - nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to alloc rxqs resource array\n"); - err = -ENOMEM; - goto alloc_rxqs_res_arr_err; - } - - size = sizeof(*q_params->irq_cfg) * q_params->num_qps; - q_params->irq_cfg = kzalloc(size, GFP_KERNEL); - if (!q_params->irq_cfg) { - nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to alloc irq resource array\n"); - err = -ENOMEM; - goto alloc_irq_cfg_err; - } - - err = spnic_alloc_txqs_res(nic_dev, q_params->num_qps, - q_params->sq_depth, q_params->txqs_res); - if (err) { - nicif_err(nic_dev, drv, nic_dev->netdev, - "Failed to alloc txqs resource\n"); - goto alloc_txqs_res_err; - } - - err = spnic_alloc_rxqs_res(nic_dev, q_params->num_qps, - q_params->rq_depth, q_params->rxqs_res); - if (err) { - nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to alloc rxqs resource\n"); - goto alloc_rxqs_res_err; - } - - return 0; - -alloc_rxqs_res_err: - spnic_free_txqs_res(nic_dev, q_params->num_qps, q_params->sq_depth, - q_params->txqs_res); - -alloc_txqs_res_err: - kfree(q_params->irq_cfg); - q_params->irq_cfg = NULL; - -alloc_irq_cfg_err: - kfree(q_params->rxqs_res); - q_params->rxqs_res = NULL; - -alloc_rxqs_res_arr_err: - kfree(q_params->txqs_res); - q_params->txqs_res = NULL; - - return err; -} - -void spnic_free_txrxq_resources(struct spnic_nic_dev *nic_dev, - struct spnic_dyna_txrxq_params *q_params) -{ - spnic_free_rxqs_res(nic_dev, q_params->num_qps, q_params->rq_depth, q_params->rxqs_res); - spnic_free_txqs_res(nic_dev, q_params->num_qps, q_params->sq_depth, q_params->txqs_res); - - kfree(q_params->irq_cfg); - q_params->irq_cfg = NULL; - - kfree(q_params->rxqs_res); - q_params->rxqs_res = NULL; - - kfree(q_params->txqs_res); - q_params->txqs_res = NULL; -} - -int spnic_configure_txrxqs(struct spnic_nic_dev *nic_dev, - struct spnic_dyna_txrxq_params *q_params) -{ - int err; - - err = spnic_configure_txqs(nic_dev, q_params->num_qps, - q_params->sq_depth, q_params->txqs_res); - if (err) { - nicif_err(nic_dev, drv, nic_dev->netdev, - "Failed to configure txqs\n"); - return err; - } - - err = spnic_configure_rxqs(nic_dev, q_params->num_qps, - q_params->rq_depth, q_params->rxqs_res); - if (err) { - nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to configure rxqs\n"); - return err; - } - - return 0; -} - -static void config_dcb_qps_map(struct spnic_nic_dev *nic_dev) -{ - struct net_device *netdev = nic_dev->netdev; - u8 i, num_tcs; - u16 num_rss; - - if (!test_bit(SPNIC_DCB_ENABLE, &nic_dev->flags)) { - spnic_update_tx_db_cos(nic_dev); - return; - } - - num_tcs = (u8)netdev_get_num_tc(netdev); - /* For now, we don't support to change num_tcs */ - if (num_tcs != nic_dev->hw_dcb_cfg.max_cos || - nic_dev->q_params.num_qps < num_tcs || - !test_bit(SPNIC_RSS_ENABLE, &nic_dev->flags)) { - nicif_err(nic_dev, drv, netdev, "Invalid num_tcs: %u or num_qps: %u, disable DCB\n", - num_tcs, nic_dev->q_params.num_qps); - netdev_reset_tc(netdev); - nic_dev->q_params.num_tc = 0; - clear_bit(SPNIC_DCB_ENABLE, &nic_dev->flags); - /* if we can't enable rss or get enough num_qps, - * need to sync default configure to hw - */ - spnic_configure_dcb(netdev); - } else { - /* use 0~max_cos-1 as tc for netdev */ - num_rss = nic_dev->q_params.num_rss; - for (i = 0; i < num_tcs; i++) - netdev_set_tc_queue(netdev, i, num_rss, (u16)(num_rss * i)); - } - - spnic_update_tx_db_cos(nic_dev); -} - -static int spnic_configure(struct spnic_nic_dev *nic_dev) -{ - struct net_device *netdev = nic_dev->netdev; - int err; - - err = spnic_set_port_mtu(nic_dev->hwdev, (u16)netdev->mtu); - if (err) { - nicif_err(nic_dev, drv, netdev, "Failed to set mtu\n"); - return err; - } - - config_dcb_qps_map(nic_dev); - - /* rx rss init */ - err = spnic_rx_configure(netdev); - if (err) { - nicif_err(nic_dev, drv, netdev, "Failed to configure rx\n"); - return err; - } - - return 0; -} - -static void spnic_remove_configure(struct spnic_nic_dev *nic_dev) -{ - spnic_rx_remove_configure(nic_dev->netdev); -} - -/* try to modify the number of irq to the target number, - * and return the actual number of irq. - */ -static u16 spnic_qp_irq_change(struct spnic_nic_dev *nic_dev, u16 dst_num_qp_irq) -{ - struct irq_info *qps_irq_info = nic_dev->qps_irq_info; - u16 resp_irq_num, irq_num_gap, i; - u16 idx; - int err; - - if (dst_num_qp_irq > nic_dev->num_qp_irq) { - irq_num_gap = dst_num_qp_irq - nic_dev->num_qp_irq; - err = sphw_alloc_irqs(nic_dev->hwdev, SERVICE_T_NIC, irq_num_gap, - &qps_irq_info[nic_dev->num_qp_irq], &resp_irq_num); - if (err) { - nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to alloc irqs\n"); - return nic_dev->num_qp_irq; - } - - nic_dev->num_qp_irq += resp_irq_num; - } else if (dst_num_qp_irq < nic_dev->num_qp_irq) { - irq_num_gap = nic_dev->num_qp_irq - dst_num_qp_irq; - for (i = 0; i < irq_num_gap; i++) { - idx = (nic_dev->num_qp_irq - i) - 1; - sphw_free_irq(nic_dev->hwdev, SERVICE_T_NIC, qps_irq_info[idx].irq_id); - qps_irq_info[idx].irq_id = 0; - qps_irq_info[idx].msix_entry_idx = 0; - } - nic_dev->num_qp_irq = dst_num_qp_irq; - } - - return nic_dev->num_qp_irq; -} - -static void config_dcb_num_qps(struct spnic_nic_dev *nic_dev, - struct spnic_dyna_txrxq_params *q_params, - u16 max_qps) -{ - u8 num_tcs = q_params->num_tc; - u16 num_rss; - - if (!num_tcs || !test_bit(SPNIC_RSS_ENABLE, &nic_dev->flags)) - return; - - if (num_tcs == nic_dev->hw_dcb_cfg.max_cos && max_qps >= num_tcs) { - num_rss = max_qps / num_tcs; - num_rss = min_t(u16, num_rss, q_params->rss_limit); - q_params->num_rss = num_rss; - q_params->num_qps = (u16)(num_tcs * num_rss); - } /* else will disable DCB in config_dcb_qps_map() */ -} - -static void spnic_config_num_qps(struct spnic_nic_dev *nic_dev, - struct spnic_dyna_txrxq_params *q_params) -{ - u16 alloc_num_irq, cur_num_irq; - u16 dst_num_irq; - - if (test_bit(SPNIC_RSS_ENABLE, &nic_dev->flags)) { - q_params->num_rss = q_params->rss_limit; - q_params->num_qps = q_params->rss_limit; - } else { - q_params->num_rss = 0; - q_params->num_qps = 1; - } - - config_dcb_num_qps(nic_dev, q_params, nic_dev->max_qps); - - if (nic_dev->num_qp_irq >= q_params->num_qps) - goto out; - - cur_num_irq = nic_dev->num_qp_irq; - - alloc_num_irq = spnic_qp_irq_change(nic_dev, q_params->num_qps); - if (alloc_num_irq < q_params->num_qps) { - q_params->num_qps = alloc_num_irq; - q_params->num_rss = q_params->num_qps; - config_dcb_num_qps(nic_dev, q_params, q_params->num_qps); - nicif_warn(nic_dev, drv, nic_dev->netdev, - "Can not get enough irqs, adjust num_qps to %u\n", - q_params->num_qps); - - /* The current irq may be in use, we must keep it */ - dst_num_irq = max_t(u16, cur_num_irq, q_params->num_qps); - spnic_qp_irq_change(nic_dev, dst_num_irq); - } - -out: - nicif_info(nic_dev, drv, nic_dev->netdev, "Finally num_qps: %u, num_rss: %u\n", - q_params->num_qps, q_params->num_rss); -} - -/* determin num_qps from rss_tmpl_id/irq_num/dcb_en */ -int spnic_setup_num_qps(struct spnic_nic_dev *nic_dev) -{ - struct net_device *netdev = nic_dev->netdev; - u32 irq_size; - - nic_dev->num_qp_irq = 0; - - irq_size = sizeof(*nic_dev->qps_irq_info) * nic_dev->max_qps; - if (!irq_size) { - nicif_err(nic_dev, drv, netdev, "Cannot allocate zero size entries\n"); - return -EINVAL; - } - nic_dev->qps_irq_info = kzalloc(irq_size, GFP_KERNEL); - if (!nic_dev->qps_irq_info) { - nicif_err(nic_dev, drv, netdev, "Failed to alloc qps_irq_info\n"); - return -ENOMEM; - } - - spnic_config_num_qps(nic_dev, &nic_dev->q_params); - - return 0; -} - -static void spnic_destroy_num_qps(struct spnic_nic_dev *nic_dev) -{ - u16 i; - - for (i = 0; i < nic_dev->num_qp_irq; i++) - sphw_free_irq(nic_dev->hwdev, SERVICE_T_NIC, nic_dev->qps_irq_info[i].irq_id); - - kfree(nic_dev->qps_irq_info); -} - -int spnic_force_port_disable(struct spnic_nic_dev *nic_dev) -{ - int err; - - down(&nic_dev->port_state_sem); - - err = spnic_set_port_enable(nic_dev->hwdev, false, SPHW_CHANNEL_NIC); - if (!err) - nic_dev->force_port_disable = true; - - up(&nic_dev->port_state_sem); - - return err; -} - -int spnic_force_set_port_state(struct spnic_nic_dev *nic_dev, bool enable) -{ - int err = 0; - - down(&nic_dev->port_state_sem); - - nic_dev->force_port_disable = false; - err = spnic_set_port_enable(nic_dev->hwdev, enable, SPHW_CHANNEL_NIC); - - up(&nic_dev->port_state_sem); - - return err; -} - -int spnic_maybe_set_port_state(struct spnic_nic_dev *nic_dev, bool enable) -{ - int err; - - down(&nic_dev->port_state_sem); - - /* Do nothing when force disable - * Port will disable when call force port disable - * and should not enable port when in force mode - */ - if (nic_dev->force_port_disable) { - up(&nic_dev->port_state_sem); - return 0; - } - - err = spnic_set_port_enable(nic_dev->hwdev, enable, SPHW_CHANNEL_NIC); - - up(&nic_dev->port_state_sem); - - return err; -} - -static void spnic_print_link_message(struct spnic_nic_dev *nic_dev, u8 link_status) -{ - if (nic_dev->link_status == link_status) - return; - - nic_dev->link_status = link_status; - - nicif_info(nic_dev, link, nic_dev->netdev, "Link is %s\n", - (link_status ? "up" : "down")); -} - -int spnic_alloc_channel_resources(struct spnic_nic_dev *nic_dev, - struct spnic_dyna_qp_params *qp_params, - struct spnic_dyna_txrxq_params *trxq_params) -{ - int err; - - qp_params->num_qps = trxq_params->num_qps; - qp_params->sq_depth = trxq_params->sq_depth; - qp_params->rq_depth = trxq_params->rq_depth; - - err = spnic_alloc_qps(nic_dev->hwdev, nic_dev->qps_irq_info, qp_params); - if (err) { - nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to alloc qps\n"); - return err; - } - - err = spnic_alloc_txrxq_resources(nic_dev, trxq_params); - if (err) { - nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to alloc txrxq resources\n"); - spnic_free_qps(nic_dev->hwdev, qp_params); - return err; - } - - return 0; -} - -void spnic_free_channel_resources(struct spnic_nic_dev *nic_dev, - struct spnic_dyna_qp_params *qp_params, - struct spnic_dyna_txrxq_params *trxq_params) -{ - mutex_lock(&nic_dev->nic_mutex); - spnic_free_txrxq_resources(nic_dev, trxq_params); - spnic_free_qps(nic_dev->hwdev, qp_params); - mutex_unlock(&nic_dev->nic_mutex); -} - -int spnic_open_channel(struct spnic_nic_dev *nic_dev, struct spnic_dyna_qp_params *qp_params, - struct spnic_dyna_txrxq_params *trxq_params) -{ - int err; - - err = spnic_init_qps(nic_dev->hwdev, qp_params); - if (err) { - nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to init qps\n"); - return err; - } - - err = spnic_configure_txrxqs(nic_dev, trxq_params); - if (err) { - nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to configure txrxqs\n"); - goto cfg_txrxqs_err; - } - - err = spnic_qps_irq_init(nic_dev); - if (err) { - nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to init txrxq irq\n"); - goto init_qp_irq_err; - } - - err = spnic_configure(nic_dev); - if (err) { - nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to init txrxq irq\n"); - goto configure_err; - } - - return 0; - -configure_err: - spnic_qps_irq_deinit(nic_dev); - -init_qp_irq_err: -cfg_txrxqs_err: - spnic_deinit_qps(nic_dev->hwdev, qp_params); - - return err; -} - -void spnic_close_channel(struct spnic_nic_dev *nic_dev, - struct spnic_dyna_qp_params *qp_params) -{ - spnic_remove_configure(nic_dev); - spnic_qps_irq_deinit(nic_dev); - spnic_deinit_qps(nic_dev->hwdev, qp_params); -} - -int spnic_vport_up(struct spnic_nic_dev *nic_dev) -{ - struct net_device *netdev = nic_dev->netdev; - u8 link_status = 0; - u16 glb_func_id; - int err; - - glb_func_id = sphw_global_func_id(nic_dev->hwdev); - err = spnic_set_vport_enable(nic_dev->hwdev, glb_func_id, true, - SPHW_CHANNEL_NIC); - if (err) { - nicif_err(nic_dev, drv, netdev, "Failed to enable vport\n"); - goto vport_enable_err; - } - - err = spnic_maybe_set_port_state(nic_dev, true); - if (err) { - nicif_err(nic_dev, drv, netdev, "Failed to enable port\n"); - goto port_enable_err; - } - - netif_set_real_num_tx_queues(netdev, nic_dev->q_params.num_qps); - netif_set_real_num_rx_queues(netdev, nic_dev->q_params.num_qps); - netif_tx_wake_all_queues(netdev); - - err = spnic_get_link_state(nic_dev->hwdev, &link_status); - if (!err && link_status) - netif_carrier_on(netdev); - - queue_delayed_work(nic_dev->workq, &nic_dev->moderation_task, SPNIC_MODERATONE_DELAY); - - spnic_print_link_message(nic_dev, link_status); - - if (!SPNIC_FUNC_IS_VF(nic_dev->hwdev)) - spnic_notify_all_vfs_link_changed(nic_dev->hwdev, link_status); - - return 0; - -port_enable_err: - spnic_set_vport_enable(nic_dev->hwdev, glb_func_id, false, SPHW_CHANNEL_NIC); - -vport_enable_err: - spnic_flush_qps_res(nic_dev->hwdev); - /* After set vport disable 100ms, no packets will be send to host */ - msleep(100); - - return err; -} - -void spnic_vport_down(struct spnic_nic_dev *nic_dev) -{ - u16 glb_func_id; - - netif_carrier_off(nic_dev->netdev); - netif_tx_disable(nic_dev->netdev); - - cancel_delayed_work_sync(&nic_dev->moderation_task); - - if (sphw_get_chip_present_flag(nic_dev->hwdev)) { - if (!SPNIC_FUNC_IS_VF(nic_dev->hwdev)) - spnic_notify_all_vfs_link_changed(nic_dev->hwdev, 0); - - spnic_maybe_set_port_state(nic_dev, false); - - glb_func_id = sphw_global_func_id(nic_dev->hwdev); - spnic_set_vport_enable(nic_dev->hwdev, glb_func_id, false, SPHW_CHANNEL_NIC); - - spnic_flush_txqs(nic_dev->netdev); - spnic_flush_qps_res(nic_dev->hwdev); - /* After set vport disable 100ms, - * no packets will be send to host - * FPGA set 2000ms - */ - msleep(SPNIC_WAIT_FLUSH_QP_RESOURCE_TIMEOUT); - } -} - -int spnic_change_channel_settings(struct spnic_nic_dev *nic_dev, - struct spnic_dyna_txrxq_params *trxq_params, - spnic_reopen_handler reopen_handler, const void *priv_data) -{ - struct spnic_dyna_qp_params new_qp_params = {0}; - struct spnic_dyna_qp_params cur_qp_params = {0}; - int err; - - spnic_config_num_qps(nic_dev, trxq_params); - - err = spnic_alloc_channel_resources(nic_dev, &new_qp_params, trxq_params); - if (err) { - nicif_err(nic_dev, drv, nic_dev->netdev, - "Failed to alloc channel resources\n"); - return err; - } - - if (!test_and_set_bit(SPNIC_CHANGE_RES_INVALID, &nic_dev->flags)) { - spnic_vport_down(nic_dev); - spnic_close_channel(nic_dev, &cur_qp_params); - spnic_free_channel_resources(nic_dev, &cur_qp_params, &nic_dev->q_params); - } - - if (nic_dev->num_qp_irq > trxq_params->num_qps) - spnic_qp_irq_change(nic_dev, trxq_params->num_qps); - nic_dev->q_params = *trxq_params; - - if (reopen_handler) - reopen_handler(nic_dev, priv_data); - - err = spnic_open_channel(nic_dev, &new_qp_params, trxq_params); - if (err) - goto open_channel_err; - - err = spnic_vport_up(nic_dev); - if (err) - goto vport_up_err; - - clear_bit(SPNIC_CHANGE_RES_INVALID, &nic_dev->flags); - nicif_info(nic_dev, drv, nic_dev->netdev, "Change channel settings success\n"); - - return 0; - -vport_up_err: - spnic_close_channel(nic_dev, &new_qp_params); - -open_channel_err: - spnic_free_channel_resources(nic_dev, &new_qp_params, trxq_params); - - return err; -} - -int spnic_open(struct net_device *netdev) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - struct spnic_dyna_qp_params qp_params = {0}; - int err; - - if (test_bit(SPNIC_INTF_UP, &nic_dev->flags)) { - nicif_info(nic_dev, drv, netdev, "Netdev already open, do nothing\n"); - return 0; - } - - err = spnic_init_nicio_res(nic_dev->hwdev); - if (err) { - nicif_err(nic_dev, drv, netdev, "Failed to init nicio resources\n"); - return err; - } - - err = spnic_setup_num_qps(nic_dev); - if (err) { - nicif_err(nic_dev, drv, netdev, "Failed to setup num_qps\n"); - goto setup_qps_err; - } - - err = spnic_alloc_channel_resources(nic_dev, &qp_params, &nic_dev->q_params); - if (err) - goto alloc_channel_res_err; - - err = spnic_open_channel(nic_dev, &qp_params, &nic_dev->q_params); - if (err) - goto open_channel_err; - - err = spnic_vport_up(nic_dev); - if (err) - goto vport_up_err; - - set_bit(SPNIC_INTF_UP, &nic_dev->flags); - nicif_info(nic_dev, drv, nic_dev->netdev, "Netdev is up\n"); - - return 0; - -vport_up_err: - spnic_close_channel(nic_dev, &qp_params); - -open_channel_err: - spnic_free_channel_resources(nic_dev, &qp_params, &nic_dev->q_params); - -alloc_channel_res_err: - spnic_destroy_num_qps(nic_dev); - -setup_qps_err: - spnic_deinit_nicio_res(nic_dev->hwdev); - - return err; -} - -int spnic_close(struct net_device *netdev) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - struct spnic_dyna_qp_params qp_params = {0}; - - if (!test_and_clear_bit(SPNIC_INTF_UP, &nic_dev->flags)) { - nicif_info(nic_dev, drv, netdev, "Netdev already close, do nothing\n"); - return 0; - } - - if (test_and_clear_bit(SPNIC_CHANGE_RES_INVALID, &nic_dev->flags)) - goto out; - - spnic_vport_down(nic_dev); - spnic_close_channel(nic_dev, &qp_params); - spnic_free_channel_resources(nic_dev, &qp_params, &nic_dev->q_params); - -out: - spnic_deinit_nicio_res(nic_dev->hwdev); - spnic_destroy_num_qps(nic_dev); - - nicif_info(nic_dev, drv, nic_dev->netdev, "Netdev is down\n"); - - return 0; -} - -#define IPV6_ADDR_LEN 4 -#define PKT_INFO_LEN 9 -#define BITS_PER_TUPLE 32 -static u32 calc_xor_rss(u8 *rss_tunple, u32 len) -{ - u32 hash_value; - u32 i; - - hash_value = rss_tunple[0]; - for (i = 1; i < len; i++) - hash_value = hash_value ^ rss_tunple[i]; - - return hash_value; -} - -static u32 calc_toep_rss(u32 *rss_tunple, u32 len, const u32 *rss_key) -{ - u32 rss = 0; - u32 i, j; - - for (i = 1; i <= len; i++) { - for (j = 0; j < BITS_PER_TUPLE; j++) - if (rss_tunple[i - 1] & ((u32)1 << - (u32)((BITS_PER_TUPLE - 1) - j))) - rss ^= (rss_key[i - 1] << j) | - (u32)((u64)rss_key[i] >> (BITS_PER_TUPLE - j)); - } - - return rss; -} - -#define RSS_VAL(val, type) \ - (((type) == SPNIC_RSS_HASH_ENGINE_TYPE_TOEP) ? ntohl(val) : (val)) - -static u8 parse_ipv6_info(struct sk_buff *skb, u32 *rss_tunple, u8 hash_engine, u32 *len) -{ - struct ipv6hdr *ipv6hdr = ipv6_hdr(skb); - u32 *saddr = (u32 *)&ipv6hdr->saddr; - u32 *daddr = (u32 *)&ipv6hdr->daddr; - u8 i; - - for (i = 0; i < IPV6_ADDR_LEN; i++) { - rss_tunple[i] = RSS_VAL(daddr[i], hash_engine); - /* The offset of the sport relative to the dport is 4 */ - rss_tunple[(u32)(i + IPV6_ADDR_LEN)] = RSS_VAL(saddr[i], hash_engine); - } - *len = IPV6_ADDR_LEN + IPV6_ADDR_LEN; - - if (skb_network_header(skb) + sizeof(*ipv6hdr) == skb_transport_header(skb)) - return ipv6hdr->nexthdr; - return 0; -} - -u16 select_queue_by_hash_func(struct net_device *dev, struct sk_buff *skb, - unsigned int num_tx_queues) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(dev); - struct nic_rss_type rss_type = nic_dev->rss_type; - struct iphdr *iphdr = NULL; - u32 rss_tunple[PKT_INFO_LEN] = {0}; - u32 len = 0; - u32 hash = 0; - u8 hash_engine = nic_dev->rss_hash_engine; - u8 l4_proto; - unsigned char *l4_hdr = NULL; - - if (skb_rx_queue_recorded(skb)) { - hash = skb_get_rx_queue(skb); - - if (unlikely(hash >= num_tx_queues)) - hash %= num_tx_queues; - return (u16)hash; - } - - iphdr = ip_hdr(skb); - if (iphdr->version == IPV4_VERSION) { - rss_tunple[len++] = RSS_VAL(iphdr->daddr, hash_engine); - rss_tunple[len++] = RSS_VAL(iphdr->saddr, hash_engine); - l4_proto = iphdr->protocol; - } else if (iphdr->version == IPV6_VERSION) { - l4_proto = parse_ipv6_info(skb, (u32 *)rss_tunple, hash_engine, &len); - } else { - return (u16)hash; - } - - if ((iphdr->version == IPV4_VERSION && - ((l4_proto == IPPROTO_UDP && rss_type.udp_ipv4) || - (l4_proto == IPPROTO_TCP && rss_type.tcp_ipv4))) || - (iphdr->version == IPV6_VERSION && - ((l4_proto == IPPROTO_UDP && rss_type.udp_ipv6) || - (l4_proto == IPPROTO_TCP && rss_type.tcp_ipv6)))) { - l4_hdr = skb_transport_header(skb); - /* High 16 bits are dport, low 16 bits are sport. */ - rss_tunple[len++] = ((u32)ntohs(*((u16 *)l4_hdr + 1U)) << 16) | - ntohs(*(u16 *)l4_hdr); - } /* rss_type.ipv4 and rss_type.ipv6 default on. */ - - if (hash_engine == SPNIC_RSS_HASH_ENGINE_TYPE_TOEP) - hash = calc_toep_rss((u32 *)rss_tunple, len, nic_dev->rss_hkey_be); - else - hash = calc_xor_rss((u8 *)rss_tunple, len * (u32)sizeof(u32)); - - return (u16)nic_dev->rss_indir[hash & 0xFF]; -} - -static u16 spnic_select_queue(struct net_device *netdev, struct sk_buff *skb, - struct net_device *sb_dev) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - - if (skb->vlan_tci) - skb->priority = skb->vlan_tci >> VLAN_PRIO_SHIFT; - - if (netdev_get_num_tc(netdev)) - goto fall_back; - - if (test_bit(SPNIC_SAME_RXTX, &nic_dev->flags)) - return select_queue_by_hash_func(netdev, skb, netdev->real_num_tx_queues); - -fall_back: - return netdev_pick_tx(netdev, skb, NULL); -} - -static void spnic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) - -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - struct spnic_txq_stats *txq_stats = NULL; - struct spnic_rxq_stats *rxq_stats = NULL; - struct spnic_txq *txq = NULL; - struct spnic_rxq *rxq = NULL; - u64 bytes, packets, dropped, errors; - unsigned int start; - int i; - - bytes = 0; - packets = 0; - dropped = 0; - for (i = 0; i < nic_dev->max_qps; i++) { - if (!nic_dev->txqs) - break; - - txq = &nic_dev->txqs[i]; - txq_stats = &txq->txq_stats; - do { - start = u64_stats_fetch_begin(&txq_stats->syncp); - bytes += txq_stats->bytes; - packets += txq_stats->packets; - dropped += txq_stats->dropped; - } while (u64_stats_fetch_retry(&txq_stats->syncp, start)); - } - stats->tx_packets = packets; - stats->tx_bytes = bytes; - stats->tx_dropped = dropped; - - bytes = 0; - packets = 0; - errors = 0; - dropped = 0; - for (i = 0; i < nic_dev->max_qps; i++) { - if (!nic_dev->rxqs) - break; - - rxq = &nic_dev->rxqs[i]; - rxq_stats = &rxq->rxq_stats; - do { - start = u64_stats_fetch_begin(&rxq_stats->syncp); - bytes += rxq_stats->bytes; - packets += rxq_stats->packets; - errors += rxq_stats->csum_errors + rxq_stats->other_errors; - dropped += rxq_stats->dropped; - } while (u64_stats_fetch_retry(&rxq_stats->syncp, start)); - } - stats->rx_packets = packets; - stats->rx_bytes = bytes; - stats->rx_errors = errors; - stats->rx_dropped = dropped; -} - -static void spnic_tx_timeout(struct net_device *netdev, unsigned int txqueue) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - struct spnic_io_queue *sq = NULL; - bool hw_err = false; - u32 sw_pi, hw_ci; - u8 q_id; - - SPNIC_NIC_STATS_INC(nic_dev, netdev_tx_timeout); - nicif_err(nic_dev, drv, netdev, "Tx timeout\n"); - - for (q_id = 0; q_id < nic_dev->q_params.num_qps; q_id++) { - if (!netif_xmit_stopped(netdev_get_tx_queue(netdev, q_id))) - continue; - - sq = nic_dev->txqs[q_id].sq; - sw_pi = spnic_get_sq_local_pi(sq); - hw_ci = spnic_get_sq_hw_ci(sq); - nicif_info(nic_dev, drv, netdev, "txq%u: sw_pi: %hu, hw_ci: %u, sw_ci: %u, napi->state: 0x%lx\n", - q_id, sw_pi, hw_ci, spnic_get_sq_local_ci(sq), - nic_dev->q_params.irq_cfg[q_id].napi.state); - - if (sw_pi != hw_ci) - hw_err = true; - } - - if (hw_err) - set_bit(EVENT_WORK_TX_TIMEOUT, &nic_dev->event_flag); -} - -static int spnic_change_mtu(struct net_device *netdev, int new_mtu) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - u32 mtu = (u32)new_mtu; - int err = 0; - - u32 xdp_max_mtu; - - if (spnic_is_xdp_enable(nic_dev)) { - xdp_max_mtu = spnic_xdp_max_mtu(nic_dev); - if (mtu > xdp_max_mtu) { - nicif_err(nic_dev, drv, netdev, "Max MTU for xdp usage is %d\n", - xdp_max_mtu); - return -EINVAL; - } - } - - err = spnic_set_port_mtu(nic_dev->hwdev, (u16)mtu); - if (err) { - nicif_err(nic_dev, drv, netdev, "Failed to change port mtu to %d\n", - new_mtu); - } else { - nicif_info(nic_dev, drv, nic_dev->netdev, "Change mtu from %u to %d\n", - netdev->mtu, new_mtu); - netdev->mtu = mtu; - } - - return err; -} - -static int spnic_set_mac_addr(struct net_device *netdev, void *addr) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - struct sockaddr *saddr = addr; - int err; - - if (!is_valid_ether_addr(saddr->sa_data)) - return -EADDRNOTAVAIL; - - if (ether_addr_equal(netdev->dev_addr, saddr->sa_data)) { - nicif_info(nic_dev, drv, netdev, "Already using mac address %pM\n", - saddr->sa_data); - return 0; - } - - err = spnic_update_mac(nic_dev->hwdev, netdev->dev_addr, saddr->sa_data, 0, - sphw_global_func_id(nic_dev->hwdev)); - if (err) - return err; - - ether_addr_copy(netdev->dev_addr, saddr->sa_data); - - nicif_info(nic_dev, drv, netdev, "Set new mac address %pM\n", saddr->sa_data); - - return 0; -} - -static int spnic_vlan_rx_add_vid(struct net_device *netdev, __always_unused __be16 proto, u16 vid) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - unsigned long *vlan_bitmap = nic_dev->vlan_bitmap; - u16 func_id; - u32 col, line; - int err = 0; - - /* VLAN 0 donot be added, which is the same as VLAN 0 deleted. */ - if (vid == 0) - goto end; - - col = VID_COL(nic_dev, vid); - line = VID_LINE(nic_dev, vid); - - func_id = sphw_global_func_id(nic_dev->hwdev); - - err = spnic_add_vlan(nic_dev->hwdev, vid, func_id); - if (err) { - nicif_err(nic_dev, drv, netdev, "Failed to add vlan %u\n", vid); - goto end; - } - - set_bit(col, &vlan_bitmap[line]); - - nicif_info(nic_dev, drv, netdev, "Add vlan %u\n", vid); - -end: - return err; -} - -static int spnic_vlan_rx_kill_vid(struct net_device *netdev, __always_unused __be16 proto, u16 vid) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - unsigned long *vlan_bitmap = nic_dev->vlan_bitmap; - u16 func_id; - int col, line; - int err = 0; - - col = VID_COL(nic_dev, vid); - line = VID_LINE(nic_dev, vid); - - /* In the broadcast scenario, ucode finds the corresponding function - * based on VLAN 0 of vlan table. If we delete VLAN 0, the VLAN function - * is affected. - */ - if (vid == 0) - goto end; - - func_id = sphw_global_func_id(nic_dev->hwdev); - err = spnic_del_vlan(nic_dev->hwdev, vid, func_id); - if (err) { - nicif_err(nic_dev, drv, netdev, "Failed to delete vlan\n"); - goto end; - } - - clear_bit(col, &vlan_bitmap[line]); - - nicif_info(nic_dev, drv, netdev, "Remove vlan %u\n", vid); - -end: - return err; -} - -#define SET_FEATURES_OP_STR(op) ((op) ? "Enable" : "Disable") - -static int set_feature_rx_csum(struct spnic_nic_dev *nic_dev, netdev_features_t wanted_features, - netdev_features_t features, netdev_features_t *failed_features) -{ - netdev_features_t changed = wanted_features ^ features; - - if (changed & NETIF_F_RXCSUM) - spnic_info(nic_dev, drv, "%s rx csum success\n", - SET_FEATURES_OP_STR(wanted_features & NETIF_F_RXCSUM)); - - return 0; -} - -static int set_feature_tso(struct spnic_nic_dev *nic_dev, netdev_features_t wanted_features, - netdev_features_t features, netdev_features_t *failed_features) -{ - netdev_features_t changed = wanted_features ^ features; - - if (changed & NETIF_F_TSO) - spnic_info(nic_dev, drv, "%s tso success\n", - SET_FEATURES_OP_STR(wanted_features & NETIF_F_TSO)); - - return 0; -} - -static int set_feature_lro(struct spnic_nic_dev *nic_dev, netdev_features_t wanted_features, - netdev_features_t features, netdev_features_t *failed_features) -{ - netdev_features_t changed = wanted_features ^ features; - bool en = !!(wanted_features & NETIF_F_LRO); - int err; - - if (!(changed & NETIF_F_LRO)) - return 0; - - if (en && spnic_is_xdp_enable(nic_dev)) { - spnic_err(nic_dev, drv, "Can not enable LRO when xdp is enable\n"); - *failed_features |= NETIF_F_LRO; - return -EINVAL; - } - - err = spnic_set_rx_lro_state(nic_dev->hwdev, en, SPNIC_LRO_DEFAULT_TIME_LIMIT, - SPNIC_LRO_DEFAULT_COAL_PKT_SIZE); - if (err) { - spnic_err(nic_dev, drv, "%s lro failed\n", SET_FEATURES_OP_STR(en)); - *failed_features |= NETIF_F_LRO; - } else { - spnic_info(nic_dev, drv, "%s lro success\n", SET_FEATURES_OP_STR(en)); - } - - return err; -} - -static int set_feature_rx_cvlan(struct spnic_nic_dev *nic_dev, netdev_features_t wanted_features, - netdev_features_t features, netdev_features_t *failed_features) -{ - netdev_features_t changed = wanted_features ^ features; - netdev_features_t vlan_feature = NETIF_F_HW_VLAN_CTAG_RX; - bool en = !!(wanted_features & vlan_feature); - int err; - - if (!(changed & vlan_feature)) - return 0; - - err = spnic_set_rx_vlan_offload(nic_dev->hwdev, en); - if (err) { - spnic_err(nic_dev, drv, "%s rxvlan failed\n", SET_FEATURES_OP_STR(en)); - *failed_features |= vlan_feature; - } else { - spnic_info(nic_dev, drv, "%s rxvlan success\n", SET_FEATURES_OP_STR(en)); - } - - return err; -} - -static int set_feature_vlan_filter(struct spnic_nic_dev *nic_dev, netdev_features_t wanted_features, - netdev_features_t features, netdev_features_t *failed_features) -{ - netdev_features_t changed = wanted_features ^ features; - netdev_features_t vlan_filter_feature = NETIF_F_HW_VLAN_CTAG_FILTER; - bool en = !!(wanted_features & vlan_filter_feature); - int err = 0; - - if (!(changed & vlan_filter_feature)) - return 0; - - if (err == 0) - err = spnic_set_vlan_fliter(nic_dev->hwdev, en); - if (err) { - spnic_err(nic_dev, drv, "%s rx vlan filter failed\n", SET_FEATURES_OP_STR(en)); - *failed_features |= vlan_filter_feature; - } else { - spnic_info(nic_dev, drv, "%s rx vlan filter success\n", SET_FEATURES_OP_STR(en)); - } - - return err; -} - -static int set_features(struct spnic_nic_dev *nic_dev, netdev_features_t pre_features, - netdev_features_t features) -{ - netdev_features_t failed_features = 0; - u32 err = 0; - - err |= (u32)set_feature_rx_csum(nic_dev, features, pre_features, &failed_features); - err |= (u32)set_feature_tso(nic_dev, features, pre_features, &failed_features); - err |= (u32)set_feature_lro(nic_dev, features, pre_features, &failed_features); - err |= (u32)set_feature_rx_cvlan(nic_dev, features, pre_features, &failed_features); - err |= (u32)set_feature_vlan_filter(nic_dev, features, pre_features, &failed_features); - if (err) { - nic_dev->netdev->features = features ^ failed_features; - return -EIO; - } - - return 0; -} - -static int spnic_set_features(struct net_device *netdev, netdev_features_t features) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - - return set_features(nic_dev, nic_dev->netdev->features, features); -} - -int spnic_set_hw_features(struct spnic_nic_dev *nic_dev) -{ - /* enable all hw features in netdev->features */ - return set_features(nic_dev, ~nic_dev->netdev->features, nic_dev->netdev->features); -} - -static netdev_features_t spnic_fix_features(struct net_device *netdev, netdev_features_t features) -{ - /* If Rx checksum is disabled, then LRO should also be disabled */ - if (!(features & NETIF_F_RXCSUM)) - features &= ~NETIF_F_LRO; - - return features; -} - -#ifdef CONFIG_NET_POLL_CONTROLLER -static void spnic_netpoll(struct net_device *netdev) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - u16 i; - - for (i = 0; i < nic_dev->q_params.num_qps; i++) - napi_schedule(&nic_dev->q_params.irq_cfg[i].napi); -} -#endif /* CONFIG_NET_POLL_CONTROLLER */ - -static int spnic_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) -{ - struct spnic_nic_dev *adapter = netdev_priv(netdev); - int err; - - if (is_multicast_ether_addr(mac) || vf >= pci_num_vf(adapter->pdev)) - return -EINVAL; - - err = spnic_set_vf_mac(adapter->hwdev, OS_VF_ID_TO_HW(vf), mac); - if (err) - return err; - - if (!is_zero_ether_addr(mac)) - nic_info(&adapter->pdev->dev, "Setting MAC %pM on VF %d\n", mac, vf); - else - nic_info(&adapter->pdev->dev, "Deleting MAC on VF %d\n", vf); - - nic_info(&adapter->pdev->dev, "Please reload the VF driver to make this change effective."); - - return 0; -} - -static int set_hw_vf_vlan(void *hwdev, u16 cur_vlanprio, int vf, u16 vlan, u8 qos) -{ - int err = 0; - u16 old_vlan = cur_vlanprio & VLAN_VID_MASK; - - if (vlan || qos) { - if (cur_vlanprio) { - err = spnic_kill_vf_vlan(hwdev, OS_VF_ID_TO_HW(vf)); - if (err) - return err; - } - err = spnic_add_vf_vlan(hwdev, OS_VF_ID_TO_HW(vf), vlan, qos); - } else { - err = spnic_kill_vf_vlan(hwdev, OS_VF_ID_TO_HW(vf)); - } - - if (err) - return err; - - return spnic_update_mac_vlan(hwdev, old_vlan, vlan, OS_VF_ID_TO_HW(vf)); -} - -#define SPNIC_MAX_VLAN_ID 4094 -#define SPNIC_MAX_QOS_NUM 7 - -static int spnic_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, - u8 qos, __be16 vlan_proto) -{ - struct spnic_nic_dev *adapter = netdev_priv(netdev); - u16 vlanprio, cur_vlanprio; - - if (vf >= pci_num_vf(adapter->pdev) || vlan > SPNIC_MAX_VLAN_ID || qos > SPNIC_MAX_QOS_NUM) - return -EINVAL; - if (vlan_proto != htons(ETH_P_8021Q)) - return -EPROTONOSUPPORT; - vlanprio = vlan | qos << SPNIC_VLAN_PRIORITY_SHIFT; - cur_vlanprio = spnic_vf_info_vlanprio(adapter->hwdev, OS_VF_ID_TO_HW(vf)); - /* duplicate request, so just return success */ - if (vlanprio == cur_vlanprio) - return 0; - - return set_hw_vf_vlan(adapter->hwdev, cur_vlanprio, vf, vlan, qos); -} - -static int spnic_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting) -{ - struct spnic_nic_dev *adapter = netdev_priv(netdev); - int err = 0; - bool cur_spoofchk = false; - - if (vf >= pci_num_vf(adapter->pdev)) - return -EINVAL; - - cur_spoofchk = spnic_vf_info_spoofchk(adapter->hwdev, OS_VF_ID_TO_HW(vf)); - /* same request, so just return success */ - if ((setting && cur_spoofchk) || (!setting && !cur_spoofchk)) - return 0; - - err = spnic_set_vf_spoofchk(adapter->hwdev, OS_VF_ID_TO_HW(vf), setting); - if (!err) - nicif_info(adapter, drv, netdev, "Set VF %d spoofchk %s\n", - vf, setting ? "on" : "off"); - - return err; -} - -int spnic_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting) -{ - struct spnic_nic_dev *adapter = netdev_priv(netdev); - int err; - bool cur_trust; - - if (vf >= pci_num_vf(adapter->pdev)) - return -EINVAL; - - cur_trust = spnic_get_vf_trust(adapter->hwdev, OS_VF_ID_TO_HW(vf)); - /* same request, so just return success */ - if ((setting && cur_trust) || (!setting && !cur_trust)) - return 0; - - err = spnic_set_vf_trust(adapter->hwdev, OS_VF_ID_TO_HW(vf), setting); - if (!err) - nicif_info(adapter, drv, netdev, "Set VF %d trusted %s successfully\n", - vf, setting ? "on" : "off"); - else - nicif_err(adapter, drv, netdev, "Failed set VF %d trusted %s\n", - vf, setting ? "on" : "off"); - - return err; -} - -static int spnic_ndo_get_vf_config(struct net_device *netdev, int vf, struct ifla_vf_info *ivi) -{ - struct spnic_nic_dev *adapter = netdev_priv(netdev); - - if (vf >= pci_num_vf(adapter->pdev)) - return -EINVAL; - - spnic_get_vf_config(adapter->hwdev, OS_VF_ID_TO_HW(vf), ivi); - - return 0; -} - -/** - * spnic_ndo_set_vf_link_state - * @netdev: network interface device structure - * @vf_id: VF identifier - * @link: required link state - * - * Set the link state of a specified VF, regardless of physical link state - **/ -int spnic_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link) -{ - static const char * const vf_link[] = {"auto", "enable", "disable"}; - struct spnic_nic_dev *adapter = netdev_priv(netdev); - int err; - - /* validate the request */ - if (vf_id >= pci_num_vf(adapter->pdev)) { - nicif_err(adapter, drv, netdev, "Invalid VF Identifier %d\n", vf_id); - return -EINVAL; - } - - err = spnic_set_vf_link_state(adapter->hwdev, OS_VF_ID_TO_HW(vf_id), link); - if (!err) - nicif_info(adapter, drv, netdev, "Set VF %d link state: %s\n", - vf_id, vf_link[link]); - - return err; -} - -static int is_set_vf_bw_param_valid(const struct spnic_nic_dev *adapter, - int vf, int min_tx_rate, int max_tx_rate) -{ - /* verify VF is active */ - if (vf >= pci_num_vf(adapter->pdev)) { - nicif_err(adapter, drv, adapter->netdev, "VF number must be less than %d\n", - pci_num_vf(adapter->pdev)); - return -EINVAL; - } - - if (max_tx_rate < min_tx_rate) { - nicif_err(adapter, drv, adapter->netdev, "Invalid rate, max rate %d must greater than min rate %d\n", - max_tx_rate, min_tx_rate); - return -EINVAL; - } - - return 0; -} - -#define SPNIC_TX_RATE_TABLE_FULL 12 - -static int spnic_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate, int max_tx_rate) -{ - struct spnic_nic_dev *adapter = netdev_priv(netdev); - struct nic_port_info port_info = {0}; - u8 link_status = 0; - u32 speeds[] = {0, SPEED_10, SPEED_100, SPEED_1000, SPEED_10000, - SPEED_25000, SPEED_40000, SPEED_50000, SPEED_100000, - SPEED_200000}; - int err = 0; - - err = is_set_vf_bw_param_valid(adapter, vf, min_tx_rate, max_tx_rate); - if (err) - return err; - - err = spnic_get_link_state(adapter->hwdev, &link_status); - if (err) { - nicif_err(adapter, drv, netdev, "Get link status failed when set vf tx rate\n"); - return -EIO; - } - - if (!link_status) { - nicif_err(adapter, drv, netdev, "Link status must be up when set vf tx rate\n"); - return -EINVAL; - } - - err = spnic_get_port_info(adapter->hwdev, &port_info, SPHW_CHANNEL_NIC); - if (err || port_info.speed >= PORT_SPEED_UNKNOWN) - return -EIO; - - /* rate limit cannot be less than 0 and greater than link speed */ - if (max_tx_rate < 0 || max_tx_rate > speeds[port_info.speed]) { - nicif_err(adapter, drv, netdev, "Set vf max tx rate must be in [0 - %u]\n", - speeds[port_info.speed]); - return -EINVAL; - } - - err = spnic_set_vf_tx_rate(adapter->hwdev, OS_VF_ID_TO_HW(vf), max_tx_rate, min_tx_rate); - if (err) { - nicif_err(adapter, drv, netdev, "Unable to set VF %d max rate %d min rate %d%s\n", - vf, max_tx_rate, min_tx_rate, - err == SPNIC_TX_RATE_TABLE_FULL ? ", tx rate profile is full" : ""); - return -EIO; - } - - nicif_info(adapter, drv, netdev, "Set VF %d max tx rate %d min tx rate %d successfully\n", - vf, max_tx_rate, min_tx_rate); - - return 0; -} - -bool spnic_is_xdp_enable(struct spnic_nic_dev *nic_dev) -{ - return !!nic_dev->xdp_prog; -} - -int spnic_xdp_max_mtu(struct spnic_nic_dev *nic_dev) -{ - return nic_dev->rx_buff_len - (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN); -} - -static int spnic_xdp_setup(struct spnic_nic_dev *nic_dev, struct bpf_prog *prog, - struct netlink_ext_ack *extack) -{ - struct bpf_prog *old_prog = NULL; - int max_mtu = spnic_xdp_max_mtu(nic_dev); - int q_id; - - if (nic_dev->netdev->mtu > max_mtu) { - nicif_err(nic_dev, drv, nic_dev->netdev, - "Failed to setup xdp program, the current MTU %d is larger than max allowed MTU %d\n", - nic_dev->netdev->mtu, max_mtu); - NL_SET_ERR_MSG_MOD(extack, "MTU too large for loading xdp program"); - return -EINVAL; - } - - if (nic_dev->netdev->features & NETIF_F_LRO) { - nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to setup xdp program while LRO is on\n"); - NL_SET_ERR_MSG_MOD(extack, "Failed to setup xdp program while LRO is on\n"); - return -EINVAL; - } - - old_prog = xchg(&nic_dev->xdp_prog, prog); - for (q_id = 0; q_id < nic_dev->max_qps; q_id++) - xchg(&nic_dev->rxqs[q_id].xdp_prog, nic_dev->xdp_prog); - - if (old_prog) - bpf_prog_put(old_prog); - - return 0; -} - -static int spnic_xdp(struct net_device *netdev, struct netdev_bpf *xdp) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - - switch (xdp->command) { - case XDP_SETUP_PROG: - return spnic_xdp_setup(nic_dev, xdp->prog, xdp->extack); - default: - return -EINVAL; - } -} - -static const struct net_device_ops spnic_netdev_ops = { - .ndo_open = spnic_open, - .ndo_stop = spnic_close, - .ndo_start_xmit = spnic_xmit_frame, - - .ndo_get_stats64 = spnic_get_stats64, - - .ndo_tx_timeout = spnic_tx_timeout, - .ndo_select_queue = spnic_select_queue, - .ndo_change_mtu = spnic_change_mtu, - .ndo_set_mac_address = spnic_set_mac_addr, - .ndo_validate_addr = eth_validate_addr, - - .ndo_vlan_rx_add_vid = spnic_vlan_rx_add_vid, - .ndo_vlan_rx_kill_vid = spnic_vlan_rx_kill_vid, - - .ndo_set_vf_mac = spnic_ndo_set_vf_mac, - .ndo_set_vf_vlan = spnic_ndo_set_vf_vlan, - .ndo_set_vf_rate = spnic_ndo_set_vf_bw, - .ndo_set_vf_spoofchk = spnic_ndo_set_vf_spoofchk, - - .ndo_set_vf_trust = spnic_ndo_set_vf_trust, - - .ndo_get_vf_config = spnic_ndo_get_vf_config, - -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = spnic_netpoll, -#endif /* CONFIG_NET_POLL_CONTROLLER */ - - .ndo_set_rx_mode = spnic_nic_set_rx_mode, - - .ndo_bpf = spnic_xdp, - - .ndo_set_vf_link_state = spnic_ndo_set_vf_link_state, - - .ndo_fix_features = spnic_fix_features, - .ndo_set_features = spnic_set_features, -}; - -static const struct net_device_ops spnicvf_netdev_ops = { - .ndo_open = spnic_open, - .ndo_stop = spnic_close, - .ndo_start_xmit = spnic_xmit_frame, - - .ndo_get_stats64 = spnic_get_stats64, - - .ndo_tx_timeout = spnic_tx_timeout, - .ndo_select_queue = spnic_select_queue, - - .ndo_change_mtu = spnic_change_mtu, - .ndo_set_mac_address = spnic_set_mac_addr, - .ndo_validate_addr = eth_validate_addr, - - .ndo_vlan_rx_add_vid = spnic_vlan_rx_add_vid, - .ndo_vlan_rx_kill_vid = spnic_vlan_rx_kill_vid, - -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = spnic_netpoll, -#endif /* CONFIG_NET_POLL_CONTROLLER */ - - .ndo_set_rx_mode = spnic_nic_set_rx_mode, - - .ndo_bpf = spnic_xdp, - - .ndo_fix_features = spnic_fix_features, - .ndo_set_features = spnic_set_features, -}; - -void spnic_set_netdev_ops(struct spnic_nic_dev *nic_dev) -{ - if (!SPNIC_FUNC_IS_VF(nic_dev->hwdev)) - nic_dev->netdev->netdev_ops = &spnic_netdev_ops; - else - nic_dev->netdev->netdev_ops = &spnicvf_netdev_ops; -} diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_nic.h b/drivers/net/ethernet/ramaxel/spnic/spnic_nic.h deleted file mode 100644 index 83c904bc0f72334dd5086aade758c58fbc96b3a7..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/spnic_nic.h +++ /dev/null @@ -1,148 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPNIC_NIC_H -#define SPNIC_NIC_H -#include -#include "sphw_common.h" -#include "spnic_nic_io.h" -#include "spnic_nic_cfg.h" -#include "spnic_mag_cmd.h" - -#define MSG_TO_MGMT_SYNC_RETURN_ERR(err, status, out_size) \ - ((err) || (status) || !(out_size)) - -struct spnic_sq_attr { - u8 dma_attr_off; - u8 pending_limit; - u8 coalescing_time; - u8 intr_en; - u16 intr_idx; - u32 l2nic_sqn; - u64 ci_dma_base; -}; - -struct vf_data_storage { - u8 drv_mac_addr[ETH_ALEN]; - u8 user_mac_addr[ETH_ALEN]; - bool registered; - bool use_specified_mac; - u16 pf_vlan; - u8 pf_qos; - u32 max_rate; - u32 min_rate; - - bool link_forced; - bool link_up; /* only valid if VF link is forced */ - bool spoofchk; - bool trust; - u16 num_qps; -}; - -struct spnic_port_routine_cmd { - bool mpu_send_sfp_info; - bool mpu_send_sfp_abs; - - struct mag_cmd_get_xsfp_info std_sfp_info; - struct mag_cmd_get_xsfp_present abs; -}; - -struct spnic_nic_cfg { - void *hwdev; - void *pcidev_hdl; - void *dev_hdl; - - struct spnic_io_queue *sq; - struct spnic_io_queue *rq; - - u16 rx_buff_len; - - u16 num_qps; - u16 max_qps; - - void *ci_vaddr_base; - dma_addr_t ci_dma_base; - - /* including rq and rx doorbell */ - u16 allocated_num_db; - u8 __iomem **db_addr; - - u8 link_status; - - u16 max_vfs; - struct vf_data_storage *vf_infos; - struct spnic_dcb_state dcb_state; - - u64 feature_cap; - - struct semaphore cfg_lock; - - /* Valid when pfc is disable */ - bool pause_set; - struct nic_pause_config nic_pause; - - u8 pfc_en; - u8 pfc_bitmap; - - struct nic_port_info port_info; - - /* percentage of pf link bandwidth */ - u32 pf_bw_limit; - - struct spnic_port_routine_cmd rt_cmd; - /* mutex used for copy sfp info */ - struct mutex sfp_mutex; -}; - -struct vf_msg_handler { - u16 cmd; - int (*handler)(struct spnic_nic_cfg *nic_cfg, u16 vf, void *buf_in, u16 in_size, - void *buf_out, u16 *out_size); -}; - -struct nic_event_handler { - u16 cmd; - void (*handler)(void *hwdev, void *buf_in, u16 in_size, void *buf_out, u16 *out_size); -}; - -int spnic_set_ci_table(void *hwdev, struct spnic_sq_attr *attr); - -int l2nic_msg_to_mgmt_sync(void *hwdev, u16 cmd, void *buf_in, u16 in_size, - void *buf_out, u16 *out_size); - -int l2nic_msg_to_mgmt_sync_ch(void *hwdev, u16 cmd, void *buf_in, u16 in_size, - void *buf_out, u16 *out_size, u16 channel); - -int spnic_cfg_vf_vlan(struct spnic_nic_cfg *nic_cfg, u8 opcode, u16 vid, u8 qos, int vf_id); - -int spnic_vf_event_handler(void *hwdev, void *pri_handle, u16 cmd, void *buf_in, u16 in_size, - void *buf_out, u16 *out_size); - -void spnic_pf_event_handler(void *hwdev, void *pri_handle, u16 cmd, void *buf_in, u16 in_size, - void *buf_out, u16 *out_size); - -int spnic_pf_mbox_handler(void *hwdev, void *pri_handle, u16 vf_id, u16 cmd, void *buf_in, - u16 in_size, void *buf_out, u16 *out_size); - -u8 spnic_nic_sw_aeqe_handler(void *hwdev, u8 event, u8 *data); - -int spnic_vf_func_init(struct spnic_nic_cfg *nic_cfg); - -void spnic_vf_func_free(struct spnic_nic_cfg *nic_cfg); - -void spnic_notify_dcb_state_event(struct spnic_nic_cfg *nic_cfg, struct spnic_dcb_state *dcb_state); - -int spnic_save_dcb_state(struct spnic_nic_cfg *nic_cfg, struct spnic_dcb_state *dcb_state); - -void spnic_notify_vf_link_status(struct spnic_nic_cfg *nic_cfg, u16 vf_id, u8 link_status); - -int spnic_vf_mag_event_handler(void *hwdev, void *pri_handle, u16 cmd, - void *buf_in, u16 in_size, void *buf_out, u16 *out_size); - -void spnic_pf_mag_event_handler(void *hwdev, void *pri_handle, u16 cmd, - void *buf_in, u16 in_size, void *buf_out, u16 *out_size); - -int spnic_pf_mag_mbox_handler(void *hwdev, void *pri_handle, u16 vf_id, - u16 cmd, void *buf_in, u16 in_size, void *buf_out, u16 *out_size); - -#endif diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_nic_cfg.c b/drivers/net/ethernet/ramaxel/spnic/spnic_nic_cfg.c deleted file mode 100644 index d241f6a7947d5aa0159aa480d7cb5fc3fb24a45a..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/spnic_nic_cfg.c +++ /dev/null @@ -1,1334 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "sphw_crm.h" -#include "sphw_hw.h" -#include "spnic_nic_io.h" -#include "spnic_nic_cfg.h" -#include "spnic_nic.h" -#include "spnic_nic_cmd.h" -#include "sphw_common.h" - -int spnic_set_ci_table(void *hwdev, struct spnic_sq_attr *attr) -{ - struct spnic_cmd_cons_idx_attr cons_idx_attr; - u16 out_size = sizeof(cons_idx_attr); - struct spnic_nic_cfg *nic_cfg = NULL; - int err; - - if (!hwdev || !attr) - return -EINVAL; - - memset(&cons_idx_attr, 0, sizeof(cons_idx_attr)); - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - - cons_idx_attr.func_idx = sphw_global_func_id(hwdev); - - cons_idx_attr.dma_attr_off = attr->dma_attr_off; - cons_idx_attr.pending_limit = attr->pending_limit; - cons_idx_attr.coalescing_time = attr->coalescing_time; - - if (attr->intr_en) { - cons_idx_attr.intr_en = attr->intr_en; - cons_idx_attr.intr_idx = attr->intr_idx; - } - - cons_idx_attr.l2nic_sqn = attr->l2nic_sqn; - cons_idx_attr.ci_addr = attr->ci_dma_base; - - err = l2nic_msg_to_mgmt_sync(hwdev, SPNIC_NIC_CMD_SQ_CI_ATTR_SET, - &cons_idx_attr, sizeof(cons_idx_attr), - &cons_idx_attr, &out_size); - if (err || !out_size || cons_idx_attr.msg_head.status) { - sdk_err(nic_cfg->dev_hdl, - "Failed to set ci attribute table, err: %d, status: 0x%x, out_size: 0x%x\n", - err, cons_idx_attr.msg_head.status, out_size); - return -EFAULT; - } - - return 0; -} - -static int spnic_check_mac_info(u8 status, u16 vlan_id) -{ - if (status && status != SPNIC_MGMT_STATUS_EXIST && status != SPNIC_PF_SET_VF_ALREADY) - return -EINVAL; - - return 0; -} - -#define SPNIC_VLAN_ID_MASK 0x7FFF - -int spnic_set_mac(void *hwdev, const u8 *mac_addr, u16 vlan_id, u16 func_id, u16 channel) -{ - struct spnic_port_mac_set mac_info; - u16 out_size = sizeof(mac_info); - struct spnic_nic_cfg *nic_cfg = NULL; - int err; - - if (!hwdev || !mac_addr) - return -EINVAL; - - memset(&mac_info, 0, sizeof(mac_info)); - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - - if ((vlan_id & SPNIC_VLAN_ID_MASK) >= VLAN_N_VID) { - nic_err(nic_cfg->dev_hdl, "Invalid VLAN number: %d\n", - vlan_id & SPNIC_VLAN_ID_MASK); - return -EINVAL; - } - - mac_info.func_id = func_id; - mac_info.vlan_id = vlan_id; - ether_addr_copy(mac_info.mac, mac_addr); - - err = l2nic_msg_to_mgmt_sync_ch(hwdev, SPNIC_NIC_CMD_SET_MAC, - &mac_info, sizeof(mac_info), - &mac_info, &out_size, channel); - if (err || !out_size || spnic_check_mac_info(mac_info.msg_head.status, mac_info.vlan_id)) { - nic_err(nic_cfg->dev_hdl, - "Failed to update MAC, err: %d, status: 0x%x, out size: 0x%x, channel: 0x%x\n", - err, mac_info.msg_head.status, out_size, channel); - return -EINVAL; - } - - if (mac_info.msg_head.status == SPNIC_PF_SET_VF_ALREADY) { - nic_warn(nic_cfg->dev_hdl, "PF has already set VF mac, Ignore set operation\n"); - return SPNIC_PF_SET_VF_ALREADY; - } - - if (mac_info.msg_head.status == SPNIC_MGMT_STATUS_EXIST) { - nic_warn(nic_cfg->dev_hdl, "MAC is repeated. Ignore update operation\n"); - return 0; - } - - return 0; -} - -int spnic_del_mac(void *hwdev, const u8 *mac_addr, u16 vlan_id, u16 func_id, u16 channel) -{ - struct spnic_port_mac_set mac_info; - u16 out_size = sizeof(mac_info); - struct spnic_nic_cfg *nic_cfg = NULL; - int err; - - if (!hwdev || !mac_addr) - return -EINVAL; - - memset(&mac_info, 0, sizeof(mac_info)); - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - - if ((vlan_id & SPNIC_VLAN_ID_MASK) >= VLAN_N_VID) { - nic_err(nic_cfg->dev_hdl, "Invalid VLAN number: %d\n", - (vlan_id & SPNIC_VLAN_ID_MASK)); - return -EINVAL; - } - - mac_info.func_id = func_id; - mac_info.vlan_id = vlan_id; - ether_addr_copy(mac_info.mac, mac_addr); - - err = l2nic_msg_to_mgmt_sync_ch(hwdev, SPNIC_NIC_CMD_DEL_MAC, - &mac_info, sizeof(mac_info), &mac_info, - &out_size, channel); - if (err || !out_size || - (mac_info.msg_head.status && mac_info.msg_head.status != - SPNIC_PF_SET_VF_ALREADY)) { - nic_err(nic_cfg->dev_hdl, - "Failed to delete MAC, err: %d, status: 0x%x, out size: 0x%x, channel: 0x%x\n", - err, mac_info.msg_head.status, out_size, channel); - return -EINVAL; - } - - if (mac_info.msg_head.status == SPNIC_PF_SET_VF_ALREADY) { - nic_warn(nic_cfg->dev_hdl, "PF has already set VF mac, Ignore delete operation.\n"); - return SPNIC_PF_SET_VF_ALREADY; - } - - return 0; -} - -int spnic_update_mac(void *hwdev, u8 *old_mac, u8 *new_mac, u16 vlan_id, u16 func_id) -{ - struct spnic_port_mac_update mac_info; - u16 out_size = sizeof(mac_info); - struct spnic_nic_cfg *nic_cfg = NULL; - int err; - - if (!hwdev || !old_mac || !new_mac) - return -EINVAL; - - memset(&mac_info, 0, sizeof(mac_info)); - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - - if ((vlan_id & SPNIC_VLAN_ID_MASK) >= VLAN_N_VID) { - nic_err(nic_cfg->dev_hdl, "Invalid VLAN number: %d\n", - vlan_id & SPNIC_VLAN_ID_MASK); - return -EINVAL; - } - - mac_info.func_id = func_id; - mac_info.vlan_id = vlan_id; - ether_addr_copy(mac_info.old_mac, old_mac); - ether_addr_copy(mac_info.new_mac, new_mac); - - err = l2nic_msg_to_mgmt_sync(hwdev, SPNIC_NIC_CMD_UPDATE_MAC, - &mac_info, sizeof(mac_info), - &mac_info, &out_size); - if (err || !out_size || spnic_check_mac_info(mac_info.msg_head.status, mac_info.vlan_id)) { - nic_err(nic_cfg->dev_hdl, - "Failed to update MAC, err: %d, status: 0x%x, out size: 0x%x\n", - err, mac_info.msg_head.status, out_size); - return -EINVAL; - } - - if (mac_info.msg_head.status == SPNIC_PF_SET_VF_ALREADY) { - nic_warn(nic_cfg->dev_hdl, "PF has already set VF MAC. Ignore update operation\n"); - return SPNIC_PF_SET_VF_ALREADY; - } - - if (mac_info.msg_head.status == SPNIC_MGMT_STATUS_EXIST) { - nic_warn(nic_cfg->dev_hdl, "MAC is repeated. Ignore update operation\n"); - return 0; - } - - return 0; -} - -int spnic_get_default_mac(void *hwdev, u8 *mac_addr) -{ - struct spnic_port_mac_set mac_info; - u16 out_size = sizeof(mac_info); - struct spnic_nic_cfg *nic_cfg = NULL; - int err; - - if (!hwdev || !mac_addr) - return -EINVAL; - - memset(&mac_info, 0, sizeof(mac_info)); - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - - mac_info.func_id = sphw_global_func_id(hwdev); - - err = l2nic_msg_to_mgmt_sync(hwdev, SPNIC_NIC_CMD_GET_MAC, &mac_info, sizeof(mac_info), - &mac_info, &out_size); - if (err || !out_size || mac_info.msg_head.status) { - nic_err(nic_cfg->dev_hdl, - "Failed to get mac, err: %d, status: 0x%x, out size: 0x%x\n", - err, mac_info.msg_head.status, out_size); - return -EINVAL; - } - - ether_addr_copy(mac_addr, mac_info.mac); - - return 0; -} - -static int spnic_config_vlan(struct spnic_nic_cfg *nic_cfg, u8 opcode, u16 vlan_id, u16 func_id) -{ - struct spnic_cmd_vlan_config vlan_info; - u16 out_size = sizeof(vlan_info); - int err; - - memset(&vlan_info, 0, sizeof(vlan_info)); - vlan_info.opcode = opcode; - vlan_info.func_id = func_id; - vlan_info.vlan_id = vlan_id; - - err = l2nic_msg_to_mgmt_sync(nic_cfg->hwdev, SPNIC_NIC_CMD_CFG_FUNC_VLAN, - &vlan_info, sizeof(vlan_info), - &vlan_info, &out_size); - if (err || !out_size || vlan_info.msg_head.status) { - nic_err(nic_cfg->dev_hdl, - "Failed to %s vlan, err: %d, status: 0x%x, out size: 0x%x\n", - opcode == SPNIC_CMD_OP_ADD ? "add" : "delete", - err, vlan_info.msg_head.status, out_size); - return -EINVAL; - } - - return 0; -} - -int spnic_add_vlan(void *hwdev, u16 vlan_id, u16 func_id) -{ - struct spnic_nic_cfg *nic_cfg = NULL; - - if (!hwdev) - return -EINVAL; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - return spnic_config_vlan(nic_cfg, SPNIC_CMD_OP_ADD, vlan_id, func_id); -} - -int spnic_del_vlan(void *hwdev, u16 vlan_id, u16 func_id) -{ - struct spnic_nic_cfg *nic_cfg = NULL; - - if (!hwdev) - return -EINVAL; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - return spnic_config_vlan(nic_cfg, SPNIC_CMD_OP_DEL, vlan_id, func_id); -} - -int spnic_set_vport_enable(void *hwdev, u16 func_id, bool enable, u16 channel) -{ - struct spnic_vport_state en_state; - u16 out_size = sizeof(en_state); - struct spnic_nic_cfg *nic_cfg = NULL; - int err; - - if (!hwdev) - return -EINVAL; - - memset(&en_state, 0, sizeof(en_state)); - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - - en_state.func_id = func_id; - en_state.state = enable ? 1 : 0; - - err = l2nic_msg_to_mgmt_sync_ch(hwdev, SPNIC_NIC_CMD_SET_VPORT_ENABLE, - &en_state, sizeof(en_state), - &en_state, &out_size, channel); - if (err || !out_size || en_state.msg_head.status) { - nic_err(nic_cfg->dev_hdl, "Failed to set vport state, err: %d, status: 0x%x, out size: 0x%x, channel: 0x%x\n", - err, en_state.msg_head.status, out_size, channel); - return -EINVAL; - } - - return 0; -} - -int spnic_set_dcb_state(void *hwdev, struct spnic_dcb_state *dcb_state) -{ - struct vf_data_storage *vf_infos = NULL; - struct spnic_cmd_vf_dcb_state vf_dcb; - struct spnic_nic_cfg *nic_cfg = NULL; - u16 vf_id, out_size = 0; - int err; - - if (!hwdev || !dcb_state) - return -EINVAL; - - memset(&vf_dcb, 0, sizeof(vf_dcb)); - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - - if (!memcmp(&nic_cfg->dcb_state, dcb_state, sizeof(nic_cfg->dcb_state))) - return 0; - - memcpy(&vf_dcb.state, dcb_state, sizeof(vf_dcb.state)); - /* save in sdk, vf will get dcb state when probing */ - spnic_save_dcb_state(nic_cfg, dcb_state); - - /* notify statefull in pf, than notify all vf */ - spnic_notify_dcb_state_event(nic_cfg, dcb_state); - - /* not vf supported, don't need to notify vf */ - if (!nic_cfg->vf_infos) - return 0; - - vf_infos = nic_cfg->vf_infos; - for (vf_id = 0; vf_id < nic_cfg->max_vfs; vf_id++) { - if (vf_infos[vf_id].registered) { - vf_dcb.msg_head.status = 0; - out_size = sizeof(vf_dcb); - err = sphw_mbox_to_vf(hwdev, OS_VF_ID_TO_HW(vf_id), SPHW_MOD_L2NIC, - SPNIC_NIC_CMD_VF_COS, &vf_dcb, sizeof(vf_dcb), - &vf_dcb, &out_size, 0, SPHW_CHANNEL_NIC); - if (MSG_TO_MGMT_SYNC_RETURN_ERR(err, vf_dcb.msg_head.status, out_size)) - nic_err(nic_cfg->dev_hdl, - "Failed to notify dcb state to VF %u, err: %d, status: 0x%x, out size: 0x%x\n", - vf_id, err, vf_dcb.msg_head.status, out_size); - } - } - - return 0; -} - -int spnic_get_dcb_state(void *hwdev, struct spnic_dcb_state *dcb_state) -{ - struct spnic_nic_cfg *nic_cfg = NULL; - - if (!hwdev || !dcb_state) - return -EINVAL; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - - memcpy(dcb_state, &nic_cfg->dcb_state, sizeof(*dcb_state)); - - return 0; -} - -int spnic_save_dcb_state(struct spnic_nic_cfg *nic_cfg, struct spnic_dcb_state *dcb_state) -{ - memcpy(&nic_cfg->dcb_state, dcb_state, sizeof(*dcb_state)); - - return 0; -} - -int spnic_get_pf_dcb_state(void *hwdev, struct spnic_dcb_state *dcb_state) -{ - struct spnic_cmd_vf_dcb_state vf_dcb; - struct spnic_nic_cfg *nic_cfg = NULL; - u16 out_size = sizeof(vf_dcb); - int err; - - if (!hwdev || !dcb_state) - return -EINVAL; - - memset(&vf_dcb, 0, sizeof(vf_dcb)); - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - - if (sphw_func_type(hwdev) != TYPE_VF) { - nic_err(nic_cfg->dev_hdl, "Only vf need to get pf dcb state\n"); - return -EINVAL; - } - - err = l2nic_msg_to_mgmt_sync(hwdev, SPNIC_NIC_CMD_VF_COS, &vf_dcb, - sizeof(vf_dcb), &vf_dcb, &out_size); - if (err || !out_size || vf_dcb.msg_head.status) { - nic_err(nic_cfg->dev_hdl, "Failed to get vf default cos, err: %d, status: 0x%x, out size: 0x%x\n", - err, vf_dcb.msg_head.status, out_size); - return -EFAULT; - } - - memcpy(dcb_state, &vf_dcb.state, sizeof(*dcb_state)); - /* Save dcb_state in hw for statefull module */ - spnic_save_dcb_state(nic_cfg, dcb_state); - - return 0; -} - -static int spnic_cfg_hw_pause(struct spnic_nic_cfg *nic_cfg, u8 opcode, - struct nic_pause_config *nic_pause) -{ - struct spnic_cmd_pause_config pause_info; - u16 out_size = sizeof(pause_info); - int err; - - memset(&pause_info, 0, sizeof(pause_info)); - - pause_info.port_id = sphw_physical_port_id(nic_cfg->hwdev); - pause_info.opcode = opcode; - if (opcode == SPNIC_CMD_OP_SET) { - pause_info.auto_neg = nic_pause->auto_neg; - pause_info.rx_pause = nic_pause->rx_pause; - pause_info.tx_pause = nic_pause->tx_pause; - } - - err = l2nic_msg_to_mgmt_sync(nic_cfg->hwdev, SPNIC_NIC_CMD_CFG_PAUSE_INFO, - &pause_info, sizeof(pause_info), - &pause_info, &out_size); - if (err || !out_size || pause_info.msg_head.status) { - nic_err(nic_cfg->dev_hdl, "Failed to %s pause info, err: %d, status: 0x%x, out size: 0x%x\n", - opcode == SPNIC_CMD_OP_SET ? "set" : "get", - err, pause_info.msg_head.status, out_size); - return -EINVAL; - } - - if (opcode == SPNIC_CMD_OP_GET) { - nic_pause->auto_neg = pause_info.auto_neg; - nic_pause->rx_pause = pause_info.rx_pause; - nic_pause->tx_pause = pause_info.tx_pause; - } - - return 0; -} - -int spnic_set_pause_info(void *hwdev, struct nic_pause_config nic_pause) -{ - struct spnic_nic_cfg *nic_cfg = NULL; - int err; - - if (!hwdev) - return -EINVAL; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - - down(&nic_cfg->cfg_lock); - - err = spnic_cfg_hw_pause(nic_cfg, SPNIC_CMD_OP_SET, &nic_pause); - if (err) { - up(&nic_cfg->cfg_lock); - return err; - } - - nic_cfg->pfc_en = 0; - nic_cfg->pfc_bitmap = 0; - nic_cfg->pause_set = true; - nic_cfg->nic_pause.auto_neg = nic_pause.auto_neg; - nic_cfg->nic_pause.rx_pause = nic_pause.rx_pause; - nic_cfg->nic_pause.tx_pause = nic_pause.tx_pause; - - up(&nic_cfg->cfg_lock); - - return 0; -} - -int spnic_get_pause_info(void *hwdev, struct nic_pause_config *nic_pause) -{ - struct spnic_nic_cfg *nic_cfg = NULL; - int err = 0; - - if (!hwdev || !nic_pause) - return -EINVAL; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - - err = spnic_cfg_hw_pause(nic_cfg, SPNIC_CMD_OP_GET, nic_pause); - if (err) - return err; - - if (nic_cfg->pause_set || !nic_pause->auto_neg) { - nic_pause->rx_pause = nic_cfg->nic_pause.rx_pause; - nic_pause->tx_pause = nic_cfg->nic_pause.tx_pause; - } - - return 0; -} - -static int spnic_dcb_set_hw_pfc(struct spnic_nic_cfg *nic_cfg, u8 pfc_en, u8 pfc_bitmap) -{ - struct spnic_cmd_set_pfc pfc; - u16 out_size = sizeof(pfc); - int err; - - memset(&pfc, 0, sizeof(pfc)); - - pfc.port_id = sphw_physical_port_id(nic_cfg->hwdev); - pfc.pfc_bitmap = pfc_bitmap; - pfc.pfc_en = pfc_en; - - err = l2nic_msg_to_mgmt_sync(nic_cfg->hwdev, SPNIC_NIC_CMD_SET_PFC, - &pfc, sizeof(pfc), &pfc, &out_size); - if (err || pfc.msg_head.status || !out_size) { - nic_err(nic_cfg->dev_hdl, "Failed to set pfc, err: %d, status: 0x%x, out size: 0x%x\n", - err, pfc.msg_head.status, out_size); - return -EINVAL; - } - - return 0; -} - -int spnic_dcb_set_pfc(void *hwdev, u8 pfc_en, u8 pfc_bitmap) -{ - struct spnic_nic_cfg *nic_cfg = NULL; - int err; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - - down(&nic_cfg->cfg_lock); - - err = spnic_dcb_set_hw_pfc(nic_cfg, pfc_en, pfc_bitmap); - if (err) { - up(&nic_cfg->cfg_lock); - return err; - } - - nic_cfg->pfc_en = pfc_en; - nic_cfg->pfc_bitmap = pfc_bitmap; - - /* pause settings is opposite from pfc */ - nic_cfg->nic_pause.rx_pause = pfc_en ? 0 : 1; - nic_cfg->nic_pause.tx_pause = pfc_en ? 0 : 1; - - up(&nic_cfg->cfg_lock); - - return 0; -} - -int spnic_dcb_set_ets(void *hwdev, u8 *cos_tc, u8 *cos_bw, u8 *cos_prio, - u8 *tc_bw, u8 *tc_prio) -{ - struct spnic_up_ets_cfg ets; - struct spnic_nic_cfg *nic_cfg = NULL; - u16 out_size = sizeof(ets); - u16 cos_bw_t = 0; - u8 tc_bw_t = 0; - int i, err; - - memset(&ets, 0, sizeof(ets)); - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - - for (i = 0; i < SPNIC_DCB_COS_MAX; i++) { - cos_bw_t += *(cos_bw + i); - tc_bw_t += *(tc_bw + i); - - if (*(cos_tc + i) > SPNIC_DCB_TC_MAX) { - nic_err(nic_cfg->dev_hdl, "Invalid cos %d mapping tc: %u\n", - i, *(cos_tc + i)); - return -EINVAL; - } - } - - /* The sum of all TCs must be 100%, and the same for cos */ - if ((tc_bw_t != 100 && tc_bw_t != 0) || (cos_bw_t % 100) != 0) { - nic_err(nic_cfg->dev_hdl, - "Invalid pg_bw: %u or up_bw: %u\n", tc_bw_t, cos_bw_t); - return -EINVAL; - } - - ets.port_id = sphw_physical_port_id(hwdev); - memcpy(ets.cos_tc, cos_tc, SPNIC_DCB_COS_MAX); - memcpy(ets.cos_bw, cos_bw, SPNIC_DCB_COS_MAX); - memcpy(ets.cos_prio, cos_prio, SPNIC_DCB_COS_MAX); - memcpy(ets.tc_bw, tc_bw, SPNIC_DCB_TC_MAX); - memcpy(ets.tc_prio, tc_prio, SPNIC_DCB_TC_MAX); - - err = l2nic_msg_to_mgmt_sync(hwdev, SPNIC_NIC_CMD_SET_ETS, - &ets, sizeof(ets), &ets, &out_size); - if (err || ets.msg_head.status || !out_size) { - nic_err(nic_cfg->dev_hdl, - "Failed to set ets, err: %d, status: 0x%x, out size: 0x%x\n", - err, ets.msg_head.status, out_size); - return -EINVAL; - } - - return 0; -} - -int spnic_dcb_set_cos_up_map(void *hwdev, u8 cos_valid_bitmap, u8 *cos_up, u8 max_cos_num) -{ - struct spnic_cos_up_map map; - struct spnic_nic_cfg *nic_cfg = NULL; - u16 out_size = sizeof(map); - int err; - - if (!hwdev || !cos_up) - return -EINVAL; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - - memset(&map, 0, sizeof(map)); - - map.port_id = sphw_physical_port_id(hwdev); - map.cos_valid_mask = cos_valid_bitmap; - memcpy(map.map, cos_up, sizeof(map.map)); - - err = l2nic_msg_to_mgmt_sync(hwdev, SPNIC_NIC_CMD_SETUP_COS_MAPPING, - &map, sizeof(map), &map, &out_size); - if (err || map.msg_head.status || !out_size) { - nic_err(nic_cfg->dev_hdl, - "Failed to set cos2up map, err: %d, status: 0x%x, out size: 0x%x\n", - err, map.msg_head.status, out_size); - return -EFAULT; - } - - return 0; -} - -int spnic_flush_qps_res(void *hwdev) -{ - struct spnic_cmd_clear_qp_resource sq_res; - u16 out_size = sizeof(sq_res); - struct spnic_nic_cfg *nic_cfg = NULL; - int err; - - if (!hwdev) - return -EINVAL; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - - memset(&sq_res, 0, sizeof(sq_res)); - - sq_res.func_id = sphw_global_func_id(hwdev); - - err = l2nic_msg_to_mgmt_sync(hwdev, SPNIC_NIC_CMD_CLEAR_QP_RESOURCE, - &sq_res, sizeof(sq_res), &sq_res, - &out_size); - if (err || !out_size || sq_res.msg_head.status) { - nic_err(nic_cfg->dev_hdl, "Failed to clear sq resources, err: %d, status: 0x%x, out size: 0x%x\n", - err, sq_res.msg_head.status, out_size); - return -EINVAL; - } - - return 0; -} - -int spnic_get_vport_stats(void *hwdev, struct spnic_vport_stats *stats) -{ - struct spnic_port_stats_info stats_info; - struct spnic_cmd_vport_stats vport_stats; - u16 out_size = sizeof(vport_stats); - struct spnic_nic_cfg *nic_cfg = NULL; - int err; - - if (!hwdev || !stats) - return -EINVAL; - - memset(&stats_info, 0, sizeof(stats_info)); - memset(&vport_stats, 0, sizeof(vport_stats)); - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - - stats_info.func_id = sphw_global_func_id(hwdev); - - err = l2nic_msg_to_mgmt_sync(hwdev, SPNIC_NIC_CMD_GET_VPORT_STAT, - &stats_info, sizeof(stats_info), - &vport_stats, &out_size); - if (err || !out_size || vport_stats.msg_head.status) { - nic_err(nic_cfg->dev_hdl, - "Failed to get function statistics, err: %d, status: 0x%x, out size: 0x%x\n", - err, vport_stats.msg_head.status, out_size); - return -EFAULT; - } - - memcpy(stats, &vport_stats.stats, sizeof(*stats)); - - return 0; -} - -int spnic_set_function_table(struct spnic_nic_cfg *nic_cfg, u32 cfg_bitmap, - struct spnic_func_tbl_cfg *cfg) -{ - struct spnic_cmd_set_func_tbl cmd_func_tbl; - u16 out_size = sizeof(cmd_func_tbl); - int err; - - memset(&cmd_func_tbl, 0, sizeof(cmd_func_tbl)); - cmd_func_tbl.func_id = sphw_global_func_id(nic_cfg->hwdev); - cmd_func_tbl.cfg_bitmap = cfg_bitmap; - cmd_func_tbl.tbl_cfg = *cfg; - - err = l2nic_msg_to_mgmt_sync(nic_cfg->hwdev, SPNIC_NIC_CMD_SET_FUNC_TBL, - &cmd_func_tbl, sizeof(cmd_func_tbl), - &cmd_func_tbl, &out_size); - if (err || cmd_func_tbl.msg_head.status || !out_size) { - nic_err(nic_cfg->dev_hdl, - "Failed to set func table, bitmap: 0x%x, err: %d, status: 0x%x, out size: 0x%x\n", - cfg_bitmap, err, cmd_func_tbl.msg_head.status, out_size); - return -EFAULT; - } - - return 0; -} - -int spnic_init_function_table(struct spnic_nic_cfg *nic_cfg) -{ - struct spnic_func_tbl_cfg func_tbl_cfg = {0}; - u32 cfg_bitmap = BIT(FUNC_CFG_INIT) | BIT(FUNC_CFG_MTU) | - BIT(FUNC_CFG_RX_BUF_SIZE); - - func_tbl_cfg.mtu = 0x3FFF; /* default, max mtu */ - func_tbl_cfg.rx_wqe_buf_size = nic_cfg->rx_buff_len; - - return spnic_set_function_table(nic_cfg, cfg_bitmap, &func_tbl_cfg); -} - -int spnic_set_port_mtu(void *hwdev, u16 new_mtu) -{ - struct spnic_func_tbl_cfg func_tbl_cfg = {0}; - struct spnic_nic_cfg *nic_cfg = NULL; - - if (!hwdev) - return -EINVAL; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - - if (new_mtu < SPNIC_MIN_MTU_SIZE) { - nic_err(nic_cfg->dev_hdl, "Invalid mtu size: %ubytes, mtu size < %ubytes", - new_mtu, SPNIC_MIN_MTU_SIZE); - return -EINVAL; - } - - if (new_mtu > SPNIC_MAX_JUMBO_FRAME_SIZE) { - nic_err(nic_cfg->dev_hdl, "Invalid mtu size: %ubytes, mtu size > %ubytes", - new_mtu, SPNIC_MAX_JUMBO_FRAME_SIZE); - return -EINVAL; - } - - func_tbl_cfg.mtu = new_mtu; - return spnic_set_function_table(nic_cfg, BIT(FUNC_CFG_MTU), &func_tbl_cfg); -} - -static int nic_feature_nego(void *hwdev, u8 opcode, u64 *s_feature, u16 size) -{ - struct spnic_nic_cfg *nic_cfg = NULL; - struct spnic_cmd_feature_nego feature_nego; - u16 out_size = sizeof(feature_nego); - int err; - - if (!hwdev || !s_feature || size > NIC_MAX_FEATURE_QWORD) - return -EINVAL; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - memset(&feature_nego, 0, sizeof(feature_nego)); - feature_nego.func_id = sphw_global_func_id(hwdev); - feature_nego.opcode = opcode; - if (opcode == SPNIC_CMD_OP_SET) - memcpy(feature_nego.s_feature, s_feature, size * sizeof(u64)); - - err = l2nic_msg_to_mgmt_sync(hwdev, SPNIC_NIC_CMD_FEATURE_NEGO, - &feature_nego, sizeof(feature_nego), - &feature_nego, &out_size); - if (err || !out_size || feature_nego.msg_head.status) { - nic_err(nic_cfg->dev_hdl, "Failed to negotiate nic feature, err:%d, status: 0x%x, out_size: 0x%x\n", - err, feature_nego.msg_head.status, out_size); - return -EIO; - } - - if (opcode == SPNIC_CMD_OP_GET) - memcpy(s_feature, feature_nego.s_feature, size * sizeof(u64)); - - return 0; -} - -static int spnic_get_nic_feature_from_hw(void *hwdev, u64 *s_feature, u16 size) -{ - return nic_feature_nego(hwdev, SPNIC_CMD_OP_GET, s_feature, size); -} - -int spnic_set_nic_feature_to_hw(void *hwdev) -{ - struct spnic_nic_cfg *nic_cfg = NULL; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - - return nic_feature_nego(hwdev, SPNIC_CMD_OP_SET, &nic_cfg->feature_cap, 1); -} - -u64 spnic_get_feature_cap(void *hwdev) -{ - struct spnic_nic_cfg *nic_cfg = NULL; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - - return nic_cfg->feature_cap; -} - -void spnic_update_nic_feature(void *hwdev, u64 feature) -{ - struct spnic_nic_cfg *nic_cfg = NULL; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - - nic_cfg->feature_cap = feature; - - nic_info(nic_cfg->dev_hdl, "Update nic feature to 0x%llx\n", nic_cfg->feature_cap); -} - -static inline int init_nic_hwdev_param_valid(void *hwdev, void *pcidev_hdl, void *dev_hdl) -{ - if (!hwdev || !pcidev_hdl || !dev_hdl) - return -EINVAL; - - return 0; -} - -/* spnic_init_nic_hwdev - init nic hwdev - * @hwdev: pointer to hwdev - * @pcidev_hdl: pointer to pcidev or handler - * @dev_hdl: pointer to pcidev->dev or handler, for sdk_err() or dma_alloc() - * @rx_buff_len: rx_buff_len is receive buffer length - */ -int spnic_init_nic_hwdev(void *hwdev, void *pcidev_hdl, void *dev_hdl, u16 rx_buff_len) -{ - struct spnic_nic_cfg *nic_cfg = NULL; - int err; - - if (init_nic_hwdev_param_valid(hwdev, pcidev_hdl, dev_hdl)) - return -EINVAL; - - nic_cfg = kzalloc(sizeof(*nic_cfg), GFP_KERNEL); - if (!nic_cfg) - return -ENOMEM; - - nic_cfg->dev_hdl = dev_hdl; - nic_cfg->pcidev_hdl = pcidev_hdl; - nic_cfg->hwdev = hwdev; - - sema_init(&nic_cfg->cfg_lock, 1); - mutex_init(&nic_cfg->sfp_mutex); - - err = sphw_register_service_adapter(hwdev, nic_cfg, SERVICE_T_NIC); - if (err) { - nic_err(nic_cfg->dev_hdl, "Failed to register service adapter\n"); - goto register_sa_err; - } - - err = spnic_init_function_table(nic_cfg); - if (err) { - nic_err(nic_cfg->dev_hdl, "Failed to init function table\n"); - goto init_func_tbl_err; - } - - err = spnic_get_nic_feature_from_hw(hwdev, &nic_cfg->feature_cap, 1); - if (err) { - nic_err(nic_cfg->dev_hdl, "Failed to get nic features\n"); - goto get_feature_err; - } - - sdk_info(dev_hdl, "nic features: 0x%llx\n", nic_cfg->feature_cap); - - err = sphw_aeq_register_swe_cb(hwdev, SPHW_STATELESS_EVENT, spnic_nic_sw_aeqe_handler); - if (err) { - nic_err(nic_cfg->dev_hdl, - "Failed to register sw aeqe handler\n"); - goto register_sw_aeqe_err; - } - - err = spnic_vf_func_init(nic_cfg); - if (err) { - nic_err(nic_cfg->dev_hdl, "Failed to init vf info\n"); - goto vf_init_err; - } - - nic_cfg->rx_buff_len = rx_buff_len; - - return 0; - -vf_init_err: - sphw_aeq_unregister_swe_cb(hwdev, SPHW_STATELESS_EVENT); - -register_sw_aeqe_err: -get_feature_err: -init_func_tbl_err: - sphw_unregister_service_adapter(hwdev, SERVICE_T_NIC); - -register_sa_err: - kfree(nic_cfg); - - return err; -} - -void spnic_free_nic_hwdev(void *hwdev) -{ - struct spnic_nic_cfg *nic_cfg = NULL; - - if (!hwdev) - return; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - if (!nic_cfg) - return; - - spnic_vf_func_free(nic_cfg); - - sphw_aeq_unregister_swe_cb(hwdev, SPHW_STATELESS_EVENT); - - sphw_unregister_service_adapter(hwdev, SERVICE_T_NIC); - - kfree(nic_cfg); -} - -/* to do : send cmd to MPU to drop nic tx pkt*/ -int spnic_force_drop_tx_pkt(void *hwdev) -{ - return 0; -} - -int spnic_set_rx_mode(void *hwdev, u32 enable) -{ - struct spnic_nic_cfg *nic_cfg = NULL; - struct spnic_rx_mode_config rx_mode_cfg; - u16 out_size = sizeof(rx_mode_cfg); - int err; - - if (!hwdev) - return -EINVAL; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - - memset(&rx_mode_cfg, 0, sizeof(rx_mode_cfg)); - rx_mode_cfg.func_id = sphw_global_func_id(hwdev); - rx_mode_cfg.rx_mode = enable; - - err = l2nic_msg_to_mgmt_sync(hwdev, SPNIC_NIC_CMD_SET_RX_MODE, - &rx_mode_cfg, sizeof(rx_mode_cfg), - &rx_mode_cfg, &out_size); - if (err || !out_size || rx_mode_cfg.msg_head.status) { - nic_err(nic_cfg->dev_hdl, "Failed to set rx mode, err: %d, status: 0x%x, out size: 0x%x\n", - err, rx_mode_cfg.msg_head.status, out_size); - return -EINVAL; - } - - return 0; -} - -int spnic_set_rx_vlan_offload(void *hwdev, u8 en) -{ - struct spnic_nic_cfg *nic_cfg = NULL; - struct spnic_cmd_vlan_offload vlan_cfg; - u16 out_size = sizeof(vlan_cfg); - int err; - - if (!hwdev) - return -EINVAL; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - - memset(&vlan_cfg, 0, sizeof(vlan_cfg)); - vlan_cfg.func_id = sphw_global_func_id(hwdev); - vlan_cfg.vlan_offload = en; - - err = l2nic_msg_to_mgmt_sync(hwdev, SPNIC_NIC_CMD_SET_RX_VLAN_OFFLOAD, - &vlan_cfg, sizeof(vlan_cfg), - &vlan_cfg, &out_size); - if (err || !out_size || vlan_cfg.msg_head.status) { - nic_err(nic_cfg->dev_hdl, "Failed to set rx vlan offload, err: %d, status: 0x%x, out size: 0x%x\n", - err, vlan_cfg.msg_head.status, out_size); - return -EINVAL; - } - - return 0; -} - -int spnic_update_mac_vlan(void *hwdev, u16 old_vlan, u16 new_vlan, int vf_id) -{ - struct vf_data_storage *vf_info = NULL; - struct spnic_nic_cfg *nic_cfg = NULL; - u16 func_id; - int err; - - if (!hwdev || old_vlan >= VLAN_N_VID || new_vlan >= VLAN_N_VID) - return -EINVAL; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - vf_info = nic_cfg->vf_infos + HW_VF_ID_TO_OS(vf_id); - - if (!nic_cfg->vf_infos || is_zero_ether_addr(vf_info->drv_mac_addr)) - return 0; - - func_id = sphw_glb_pf_vf_offset(nic_cfg->hwdev) + (u16)vf_id; - - err = spnic_del_mac(nic_cfg->hwdev, vf_info->drv_mac_addr, - old_vlan, func_id, SPHW_CHANNEL_NIC); - if (err) { - nic_err(nic_cfg->dev_hdl, "Failed to delete VF %d MAC %pM vlan %u\n", - HW_VF_ID_TO_OS(vf_id), vf_info->drv_mac_addr, old_vlan); - return err; - } - - err = spnic_set_mac(nic_cfg->hwdev, vf_info->drv_mac_addr, - new_vlan, func_id, SPHW_CHANNEL_NIC); - if (err) { - nic_err(nic_cfg->dev_hdl, "Failed to add VF %d MAC %pM vlan %u\n", - HW_VF_ID_TO_OS(vf_id), vf_info->drv_mac_addr, new_vlan); - spnic_set_mac(nic_cfg->hwdev, vf_info->drv_mac_addr, - old_vlan, func_id, SPHW_CHANNEL_NIC); - return err; - } - - return 0; -} - -static int spnic_set_rx_lro(void *hwdev, u8 ipv4_en, u8 ipv6_en, u8 lro_max_pkt_len) -{ - struct spnic_nic_cfg *nic_cfg = NULL; - struct spnic_cmd_lro_config lro_cfg; - u16 out_size = sizeof(lro_cfg); - int err; - - if (!hwdev) - return -EINVAL; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - - memset(&lro_cfg, 0, sizeof(lro_cfg)); - lro_cfg.func_id = sphw_global_func_id(hwdev); - lro_cfg.opcode = SPNIC_CMD_OP_SET; - lro_cfg.lro_ipv4_en = ipv4_en; - lro_cfg.lro_ipv6_en = ipv6_en; - lro_cfg.lro_max_pkt_len = lro_max_pkt_len; - - err = l2nic_msg_to_mgmt_sync(hwdev, SPNIC_NIC_CMD_CFG_RX_LRO, - &lro_cfg, sizeof(lro_cfg), - &lro_cfg, &out_size); - if (err || !out_size || lro_cfg.msg_head.status) { - nic_err(nic_cfg->dev_hdl, "Failed to set lro offload, err: %d, status: 0x%x, out size: 0x%x\n", - err, lro_cfg.msg_head.status, out_size); - return -EINVAL; - } - - return 0; -} - -static int spnic_set_rx_lro_timer(void *hwdev, u32 timer_value) -{ - struct spnic_nic_cfg *nic_cfg = NULL; - struct spnic_cmd_lro_timer lro_timer; - u16 out_size = sizeof(lro_timer); - int err; - - if (!hwdev) - return -EINVAL; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - - memset(&lro_timer, 0, sizeof(lro_timer)); - lro_timer.opcode = SPNIC_CMD_OP_SET; - lro_timer.timer = timer_value; - - err = l2nic_msg_to_mgmt_sync(hwdev, SPNIC_NIC_CMD_CFG_LRO_TIMER, - &lro_timer, sizeof(lro_timer), - &lro_timer, &out_size); - if (err || !out_size || lro_timer.msg_head.status) { - nic_err(nic_cfg->dev_hdl, "Failed to set lro timer, err: %d, status: 0x%x, out size: 0x%x\n", - err, lro_timer.msg_head.status, out_size); - - return -EINVAL; - } - - return 0; -} - -int spnic_set_rx_lro_state(void *hwdev, u8 lro_en, u32 lro_timer, u32 lro_max_pkt_len) -{ - struct spnic_nic_cfg *nic_cfg = NULL; - u8 ipv4_en = 0, ipv6_en = 0; - int err; - - if (!hwdev) - return -EINVAL; - - ipv4_en = lro_en ? 1 : 0; - ipv6_en = lro_en ? 1 : 0; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - - nic_info(nic_cfg->dev_hdl, "Set LRO max coalesce packet size to %uK\n", - lro_max_pkt_len); - - err = spnic_set_rx_lro(hwdev, ipv4_en, ipv6_en, (u8)lro_max_pkt_len); - if (err) - return err; - - /* we don't set LRO timer for VF */ - if (sphw_func_type(hwdev) == TYPE_VF) - return 0; - - nic_info(nic_cfg->dev_hdl, "Set LRO timer to %u\n", lro_timer); - - return spnic_set_rx_lro_timer(hwdev, lro_timer); -} - -int spnic_set_vlan_fliter(void *hwdev, u32 vlan_filter_ctrl) -{ - struct spnic_nic_cfg *nic_cfg = NULL; - struct spnic_cmd_set_vlan_filter vlan_filter; - u16 out_size = sizeof(vlan_filter); - int err; - - if (!hwdev) - return -EINVAL; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - - memset(&vlan_filter, 0, sizeof(vlan_filter)); - vlan_filter.func_id = sphw_global_func_id(hwdev); - vlan_filter.vlan_filter_ctrl = vlan_filter_ctrl; - - err = l2nic_msg_to_mgmt_sync(hwdev, SPNIC_NIC_CMD_SET_VLAN_FILTER_EN, - &vlan_filter, sizeof(vlan_filter), - &vlan_filter, &out_size); - if (err || !out_size || vlan_filter.msg_head.status) { - nic_err(nic_cfg->dev_hdl, "Failed to set vlan filter, err: %d, status: 0x%x, out size: 0x%x\n", - err, vlan_filter.msg_head.status, out_size); - return -EINVAL; - } - - return 0; -} - -int spnic_add_tcam_rule(void *hwdev, struct nic_tcam_cfg_rule *tcam_rule) -{ - u16 out_size = sizeof(struct nic_cmd_fdir_add_rule); - struct nic_cmd_fdir_add_rule tcam_cmd; - struct spnic_nic_cfg *nic_cfg = NULL; - int err; - - if (!hwdev || !tcam_rule) - return -EINVAL; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - if (tcam_rule->index >= SPNIC_MAX_TCAM_RULES_NUM) { - nic_err(nic_cfg->dev_hdl, "Tcam rules num to add is invalid\n"); - return -EINVAL; - } - - memset(&tcam_cmd, 0, sizeof(struct nic_cmd_fdir_add_rule)); - memcpy((void *)&tcam_cmd.rule, (void *)tcam_rule, - sizeof(struct nic_tcam_cfg_rule)); - tcam_cmd.func_id = sphw_global_func_id(hwdev); - - err = l2nic_msg_to_mgmt_sync(hwdev, SPNIC_NIC_CMD_ADD_TC_FLOW, - &tcam_cmd, sizeof(tcam_cmd), - &tcam_cmd, &out_size); - if (err || tcam_cmd.head.status || !out_size) { - nic_err(nic_cfg->dev_hdl, - "Add tcam rule failed, err: %d, status: 0x%x, out size: 0x%x\n", - err, tcam_cmd.head.status, out_size); - return -EIO; - } - - return 0; -} - -int spnic_del_tcam_rule(void *hwdev, u32 index) -{ - u16 out_size = sizeof(struct nic_cmd_fdir_del_rules); - struct nic_cmd_fdir_del_rules tcam_cmd; - struct spnic_nic_cfg *nic_cfg = NULL; - int err; - - if (!hwdev) - return -EINVAL; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - if (index >= SPNIC_MAX_TCAM_RULES_NUM) { - nic_err(nic_cfg->dev_hdl, "Tcam rules num to del is invalid\n"); - return -EINVAL; - } - - memset(&tcam_cmd, 0, sizeof(struct nic_cmd_fdir_del_rules)); - tcam_cmd.index_start = index; - tcam_cmd.index_num = 1; - tcam_cmd.func_id = sphw_global_func_id(hwdev); - - err = l2nic_msg_to_mgmt_sync(hwdev, SPNIC_NIC_CMD_DEL_TC_FLOW, - &tcam_cmd, sizeof(tcam_cmd), - &tcam_cmd, &out_size); - if (err || tcam_cmd.head.status || !out_size) { - nic_err(nic_cfg->dev_hdl, "Del tcam rule failed, err: %d, status: 0x%x, out size: 0x%x\n", - err, tcam_cmd.head.status, out_size); - return -EIO; - } - - return 0; -} - -/** - * spnic_mgmt_tcam_block - alloc or free tcam block for IO packet. - * - * @param hwdev - * The hardware interface of a nic device. - * @param alloc_en - * 1 alloc block. - * 0 free block. - * @param index - * block index from firmware. - * @return - * 0 on success, - * negative error value otherwise. - */ -static int spnic_mgmt_tcam_block(void *hwdev, u8 alloc_en, u16 *index) -{ - struct nic_cmd_ctrl_tcam_block_out tcam_block_info; - u16 out_size = sizeof(struct nic_cmd_ctrl_tcam_block_out); - struct spnic_nic_cfg *nic_cfg = NULL; - int err; - - if (!hwdev || !index) - return -EINVAL; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - memset(&tcam_block_info, 0, sizeof(struct nic_cmd_ctrl_tcam_block_out)); - - tcam_block_info.func_id = sphw_global_func_id(hwdev); - tcam_block_info.alloc_en = alloc_en; - tcam_block_info.tcam_type = SPNIC_TCAM_BLOCK_NORMAL_TYPE; - tcam_block_info.tcam_block_index = *index; - - err = l2nic_msg_to_mgmt_sync(hwdev, SPNIC_NIC_CMD_CFG_TCAM_BLOCK, - &tcam_block_info, sizeof(tcam_block_info), - &tcam_block_info, &out_size); - if (err || tcam_block_info.head.status || !out_size) { - nic_err(nic_cfg->dev_hdl, - "Set tcam block failed, err: %d, status: 0x%x, out size: 0x%x\n", - err, tcam_block_info.head.status, out_size); - return -EIO; - } - - if (alloc_en) - *index = tcam_block_info.tcam_block_index; - - return 0; -} - -int spnic_alloc_tcam_block(void *hwdev, u16 *index) -{ - return spnic_mgmt_tcam_block(hwdev, SPNIC_TCAM_BLOCK_ENABLE, index); -} - -int spnic_free_tcam_block(void *hwdev, u16 *index) -{ - return spnic_mgmt_tcam_block(hwdev, SPNIC_TCAM_BLOCK_DISABLE, index); -} - -int spnic_set_fdir_tcam_rule_filter(void *hwdev, bool enable) -{ - struct nic_cmd_set_tcam_enable port_tcam_cmd; - u16 out_size = sizeof(port_tcam_cmd); - struct spnic_nic_cfg *nic_cfg = NULL; - int err; - - if (!hwdev) - return -EINVAL; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - memset(&port_tcam_cmd, 0, sizeof(port_tcam_cmd)); - port_tcam_cmd.func_id = sphw_global_func_id(hwdev); - port_tcam_cmd.tcam_enable = (u8)enable; - - err = l2nic_msg_to_mgmt_sync(hwdev, SPNIC_NIC_CMD_ENABLE_TCAM, - &port_tcam_cmd, sizeof(port_tcam_cmd), - &port_tcam_cmd, &out_size); - if (err || port_tcam_cmd.head.status || !out_size) { - nic_err(nic_cfg->dev_hdl, "Set fdir tcam filter failed, err: %d, status: 0x%x, out size: 0x%x, enable: 0x%x\n", - err, port_tcam_cmd.head.status, out_size, - enable); - return -EIO; - } - - return 0; -} - -int spnic_flush_tcam_rule(void *hwdev) -{ - struct nic_cmd_flush_tcam_rules tcam_flush; - u16 out_size = sizeof(struct nic_cmd_flush_tcam_rules); - struct spnic_nic_cfg *nic_cfg = NULL; - int err; - - if (!hwdev) - return -EINVAL; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - memset(&tcam_flush, 0, sizeof(struct nic_cmd_flush_tcam_rules)); - tcam_flush.func_id = sphw_global_func_id(hwdev); - - err = l2nic_msg_to_mgmt_sync(hwdev, SPNIC_NIC_CMD_FLUSH_TCAM, - &tcam_flush, - sizeof(struct nic_cmd_flush_tcam_rules), - &tcam_flush, &out_size); - if (err || tcam_flush.head.status || !out_size) { - nic_err(nic_cfg->dev_hdl, - "Flush tcam fdir rules failed, err: %d, status: 0x%x, out size: 0x%x\n", - err, tcam_flush.head.status, out_size); - return -EIO; - } - - return 0; -} diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_nic_cfg.h b/drivers/net/ethernet/ramaxel/spnic/spnic_nic_cfg.h deleted file mode 100644 index f280b41fe362a2716ca1783aca91018349a723ff..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/spnic_nic_cfg.h +++ /dev/null @@ -1,709 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPNIC_NIC_CFG_H -#define SPNIC_NIC_CFG_H - -#include - -#include "spnic_nic_cmd.h" -#include "spnic_mgmt_interface.h" -#include "spnic_mag_cmd.h" - -#define OS_VF_ID_TO_HW(os_vf_id) ((os_vf_id) + 1) -#define HW_VF_ID_TO_OS(hw_vf_id) ((hw_vf_id) - 1) - -#define SPNIC_VLAN_PRIORITY_SHIFT 13 - -#define SPNIC_RSS_KEY_RSV_NUM 2 - -#define SPNIC_MIN_MTU_SIZE 256 -#define SPNIC_MAX_JUMBO_FRAME_SIZE 9600 - -#define SPNIC_PF_SET_VF_ALREADY 0x4 -#define SPNIC_MGMT_STATUS_EXIST 0x6 - -#define SPNIC_LOWEST_LATENCY 1 -#define SPNIC_MULTI_VM_LATENCY 32 -#define SPNIC_MULTI_VM_PENDING_LIMIT 4 -#define SPNIC_RX_RATE_LOW 400000 -#define SPNIC_RX_COAL_TIME_LOW 16 -#define SPNIC_RX_PENDING_LIMIT_LOW 2 -#define SPNIC_RX_RATE_HIGH 1000000 -#define SPNIC_RX_COAL_TIME_HIGH 225 -#define SPNIC_RX_PENDING_LIMIT_HIGH 8 -#define SPNIC_RX_RATE_THRESH 50000 -#define SPNIC_TX_RATE_THRESH 50000 -#define SPNIC_RX_RATE_LOW_VM 100000 -#define SPNIC_RX_PENDING_LIMIT_HIGH_VM 87 - -enum spnic_valid_link_settings { - HILINK_LINK_SET_SPEED = 0x1, - HILINK_LINK_SET_AUTONEG = 0x2, - HILINK_LINK_SET_FEC = 0x4, -}; - -struct spnic_link_ksettings { - u32 valid_bitmap; - u8 speed; /* enum nic_speed_level */ - u8 autoneg; /* 0 - off; 1 - on */ - u8 fec; /* 0 - RSFEC; 1 - BASEFEC; 2 - NOFEC */ -}; - -u64 spnic_get_feature_cap(void *hwdev); - -#define SPNIC_SUPPORT_FEATURE(hwdev, feature) (spnic_get_feature_cap(hwdev) & NIC_F_##feature) -#define SPNIC_SUPPORT_CSUM(hwdev) SPNIC_SUPPORT_FEATURE(hwdev, CSUM) -#define SPNIC_SUPPORT_SCTP_CRC(hwdev) SPNIC_SUPPORT_FEATURE(hwdev, SCTP_CRC) -#define SPNIC_SUPPORT_TSO(hwdev) SPNIC_SUPPORT_FEATURE(hwdev, TSO) -#define SPNIC_SUPPORT_UFO(hwdev) SPNIC_SUPPORT_FEATURE(hwdev, UFO) -#define SPNIC_SUPPORT_LRO(hwdev) SPNIC_SUPPORT_FEATURE(hwdev, LRO) -#define SPNIC_SUPPORT_RSS(hwdev) SPNIC_SUPPORT_FEATURE(hwdev, RSS) -#define SPNIC_SUPPORT_RXVLAN_FILTER(hwdev) SPNIC_SUPPORT_FEATURE(hwdev, RX_VLAN_FILTER) -#define SPNIC_SUPPORT_VLAN_OFFLOAD(hwdev) (SPNIC_SUPPORT_FEATURE(hwdev, RX_VLAN_STRIP) && \ - SPNIC_SUPPORT_FEATURE(hwdev, TX_VLAN_INSERT)) -#define SPNIC_SUPPORT_VXLAN_OFFLOAD(hwdev) SPNIC_SUPPORT_FEATURE(hwdev, VXLAN_OFFLOAD) -#define SPNIC_SUPPORT_IPSEC_OFFLOAD(hwdev) SPNIC_SUPPORT_FEATURE(hwdev, IPSEC_OFFLOAD) -#define SPNIC_SUPPORT_FDIR(hwdev) SPNIC_SUPPORT_FEATURE(hwdev, FDIR) -#define SPNIC_SUPPORT_PROMISC(hwdev) SPNIC_SUPPORT_FEATURE(hwdev, PROMISC) -#define SPNIC_SUPPORT_ALLMULTI(hwdev) SPNIC_SUPPORT_FEATURE(hwdev, ALLMULTI) - -struct nic_rss_type { - u8 tcp_ipv6_ext; - u8 ipv6_ext; - u8 tcp_ipv6; - u8 ipv6; - u8 tcp_ipv4; - u8 ipv4; - u8 udp_ipv6; - u8 udp_ipv4; -}; - -enum spnic_rss_hash_type { - SPNIC_RSS_HASH_ENGINE_TYPE_XOR = 0, - SPNIC_RSS_HASH_ENGINE_TYPE_TOEP, - SPNIC_RSS_HASH_ENGINE_TYPE_MAX, -}; - -/* rss */ -struct nic_rss_indirect_tbl { - u32 rsvd[4]; /* Make sure that 16B beyond entry[] */ - u16 entry[SPNIC_RSS_INDIR_SIZE]; -}; - -struct nic_rss_context_tbl { - u32 rsvd[4]; - u32 ctx; -}; - -#define NIC_CONFIG_ALL_QUEUE_VLAN_CTX 0xFFFF -struct nic_vlan_ctx { - u32 func_id; - u32 qid; /* if qid = 0xFFFF, config current function all queue */ - u32 vlan_tag; - u32 vlan_mode; - u32 vlan_sel; -}; - -enum spnic_link_status { - SPNIC_LINK_DOWN = 0, - SPNIC_LINK_UP -}; - -struct nic_port_info { - u8 port_type; - u8 autoneg_cap; - u8 autoneg_state; - u8 duplex; - u8 speed; - u8 fec; - u32 supported_mode; - u32 advertised_mode; -}; - -struct nic_pause_config { - u8 auto_neg; - u8 rx_pause; - u8 tx_pause; -}; - -#define MODULE_TYPE_SFP 0x3 -#define MODULE_TYPE_QSFP28 0x11 -#define MODULE_TYPE_QSFP 0x0C -#define MODULE_TYPE_QSFP_PLUS 0x0D - -#define TCAM_IP_TYPE_MASK 0x1 -#define TCAM_TUNNEL_TYPE_MASK 0xF -#define TCAM_FUNC_ID_MASK 0x7FFF - -struct spnic_tcam_key_ipv4_mem { - u32 rsvd1:4; - u32 tunnel_type:4; - u32 ip_proto:8; - u32 rsvd0:16; - u32 sipv4_h:16; - u32 ip_type:1; - u32 function_id:15; - u32 dipv4_h:16; - u32 sipv4_l:16; - u32 rsvd2:16; - u32 dipv4_l:16; - u32 rsvd3; - u32 dport:16; - u32 rsvd4:16; - u32 rsvd5:16; - u32 sport:16; - u32 outer_sipv4_h:16; - u32 rsvd6:16; - u32 outer_dipv4_h:16; - u32 outer_sipv4_l:16; - u32 vni_h:16; - u32 outer_dipv4_l:16; - u32 rsvd7:16; - u32 vni_l:16; -}; - -struct spnic_tcam_key_ipv6_mem { - u32 rsvd1:4; - u32 tunnel_type:4; - u32 ip_proto:8; - u32 rsvd0:16; - u32 sipv6_key0:16; - u32 ip_type:1; - u32 function_id:15; - u32 sipv6_key2:16; - u32 sipv6_key1:16; - u32 sipv6_key4:16; - u32 sipv6_key3:16; - u32 sipv6_key6:16; - u32 sipv6_key5:16; - u32 dport:16; - u32 sipv6_key7:16; - u32 dipv6_key0:16; - u32 sport:16; - u32 dipv6_key2:16; - u32 dipv6_key1:16; - u32 dipv6_key4:16; - u32 dipv6_key3:16; - u32 dipv6_key6:16; - u32 dipv6_key5:16; - u32 rsvd2:16; - u32 dipv6_key7:16; -}; - -struct tag_tcam_key { - union { - struct spnic_tcam_key_ipv4_mem key_info; - struct spnic_tcam_key_ipv6_mem key_info_ipv6; - }; - - union { - struct spnic_tcam_key_ipv4_mem key_mask; - struct spnic_tcam_key_ipv6_mem key_mask_ipv6; - }; -}; - -int spnic_add_tcam_rule(void *hwdev, struct nic_tcam_cfg_rule *tcam_rule); -int spnic_del_tcam_rule(void *hwdev, u32 index); - -int spnic_alloc_tcam_block(void *hwdev, u16 *index); -int spnic_free_tcam_block(void *hwdev, u16 *index); - -int spnic_set_fdir_tcam_rule_filter(void *hwdev, bool enable); - -int spnic_flush_tcam_rule(void *hwdev); - -/* * - * @brief spnic_update_mac - update mac address to hardware - * @param hwdev: device pointer to hwdev - * @param old_mac: old mac to delete - * @param new_mac: new mac to update - * @param vlan_id: vlan id - * @param func_id: function index - * @retval zero: success - * @retval non-zero: failure - */ -int spnic_update_mac(void *hwdev, u8 *old_mac, u8 *new_mac, u16 vlan_id, u16 func_id); - -/* * - * @brief spnic_get_default_mac - get default mac address - * @param hwdev: device pointer to hwdev - * @param mac_addr: mac address from hardware - * @retval zero: success - * @retval non-zero: failure - */ -int spnic_get_default_mac(void *hwdev, u8 *mac_addr); - -/* * - * @brief spnic_set_port_mtu - set function mtu - * @param hwdev: device pointer to hwdev - * @param new_mtu: mtu - * @retval zero: success - * @retval non-zero: failure - */ -int spnic_set_port_mtu(void *hwdev, u16 new_mtu); - -/* * - * @brief spnic_get_link_state - get link state - * @param hwdev: device pointer to hwdev - * @param link_state: link state, 0-link down, 1-link up - * @retval zero: success - * @retval non-zero: failure - */ -int spnic_get_link_state(void *hwdev, u8 *link_state); - -/* * - * @brief spnic_get_vport_stats - get function stats - * @param hwdev: device pointer to hwdev - * @param stats: function stats - * @retval zero: success - * @retval non-zero: failure - */ -int spnic_get_vport_stats(void *hwdev, struct spnic_vport_stats *stats); - -/* * - * @brief spnic_notify_all_vfs_link_changed - notify to all vfs link changed - * @param hwdev: device pointer to hwdev - * @param link_status: link state, 0-link down, 1-link up - */ -void spnic_notify_all_vfs_link_changed(void *hwdev, u8 link_status); - -/* * - * @brief spnic_force_drop_tx_pkt - force drop tx packet - * @param hwdev: device pointer to hwdev - * @retval zero: success - * @retval non-zero: failure - */ -int spnic_force_drop_tx_pkt(void *hwdev); - -/* * - * @brief spnic_set_rx_mode - set function rx mode - * @param hwdev: device pointer to hwdev - * @param enable: rx mode state - * @retval zero: success - * @retval non-zero: failure - */ -int spnic_set_rx_mode(void *hwdev, u32 enable); - -/* * - * @brief spnic_set_rx_vlan_offload - set function vlan offload valid state - * @param hwdev: device pointer to hwdev - * @param en: 0-disable, 1-enable - * @retval zero: success - * @retval non-zero: failure - */ -int spnic_set_rx_vlan_offload(void *hwdev, u8 en); - -/* * - * @brief spnic_set_rx_lro_state - set rx LRO configuration - * @param hwdev: device pointer to hwdev - * @param lro_en: 0-disable, 1-enable - * @param lro_timer: LRO aggregation timeout - * @param lro_max_pkt_len: LRO coalesce packet size(unit is 1K) - * @retval zero: success - * @retval non-zero: failure - */ -int spnic_set_rx_lro_state(void *hwdev, u8 lro_en, u32 lro_timer, u32 lro_max_pkt_len); - -/* * - * @brief spnic_set_vf_spoofchk - set vf spoofchk - * @param hwdev: device pointer to hwdev - * @param vf_id: vf id - * @param spoofchk: spoofchk - * @retval zero: success - * @retval non-zero: failure - */ -int spnic_set_vf_spoofchk(void *hwdev, u16 vf_id, bool spoofchk); - -/* * - * @brief spnic_vf_info_spoofchk - get vf spoofchk info - * @param hwdev: device pointer to hwdev - * @param vf_id: vf id - * @retval spoofchk state - */ -bool spnic_vf_info_spoofchk(void *hwdev, int vf_id); - -/* * - * @brief spnic_add_vf_vlan - add vf vlan id - * @param hwdev: device pointer to hwdev - * @param vf_id: vf id - * @param vlan: vlan id - * @param qos: qos - * @retval zero: success - * @retval non-zero: failure - */ -int spnic_add_vf_vlan(void *hwdev, int vf_id, u16 vlan, u8 qos); - -/* * - * @brief spnic_kill_vf_vlan - kill vf vlan - * @param hwdev: device pointer to hwdev - * @param vf_id: vf id - * @param vlan: vlan id - * @retval zero: success - * @retval non-zero: failure - */ -int spnic_kill_vf_vlan(void *hwdev, int vf_id); - -/* * - * @brief spnic_set_vf_mac - set vf mac - * @param hwdev: device pointer to hwdev - * @param vf_id: vf id - * @param mac_addr: vf mac address - * @retval zero: success - * @retval non-zero: failure - */ -int spnic_set_vf_mac(void *hwdev, int vf_id, unsigned char *mac_addr); - -/* * - * @brief spnic_vf_info_vlanprio - get vf vlan priority - * @param hwdev: device pointer to hwdev - * @param vf_id: vf id - * @retval zero: vlan priority - */ -u16 spnic_vf_info_vlanprio(void *hwdev, int vf_id); - -/* * - * @brief spnic_set_vf_tx_rate - set vf tx rate - * @param hwdev: device pointer to hwdev - * @param vf_id: vf id - * @param max_rate: max rate - * @param min_rate: min rate - * @retval zero: success - * @retval non-zero: failure - */ -int spnic_set_vf_tx_rate(void *hwdev, u16 vf_id, u32 max_rate, u32 min_rate); - -/* * - * @brief spnic_set_vf_tx_rate - set vf tx rate - * @param hwdev: device pointer to hwdev - * @param vf_id: vf id - * @param ivi: vf info - * @retval zero: success - * @retval non-zero: failure - */ -void spnic_get_vf_config(void *hwdev, u16 vf_id, struct ifla_vf_info *ivi); - -/* * - * @brief spnic_set_vf_link_state - set vf link state - * @param hwdev: device pointer to hwdev - * @param vf_id: vf id - * @param link: link state - * @retval zero: success - * @retval non-zero: failure - */ -int spnic_set_vf_link_state(void *hwdev, u16 vf_id, int link); - -/* * - * @brief spnic_get_port_info - set port info - * @param hwdev: device pointer to hwdev - * @param port_info: port info - * @param channel: channel id - * @retval zero: success - * @retval non-zero: failure - */ -int spnic_get_port_info(void *hwdev, struct nic_port_info *port_info, u16 channel); - -/* * - * @brief spnic_set_rss_type - set rss type - * @param hwdev: device pointer to hwdev - * @param rss_type: rss type - * @retval zero: success - * @retval non-zero: failure - */ -int spnic_set_rss_type(void *hwdev, struct nic_rss_type rss_type); - -/* * - * @brief spnic_get_rss_type - get rss type - * @param hwdev: device pointer to hwdev - * @param rss_type: rss type - * @retval zero: success - * @retval non-zero: failure - */ -int spnic_get_rss_type(void *hwdev, struct nic_rss_type *rss_type); - -/* * - * @brief spnic_rss_get_hash_engine - get rss hash engine - * @param hwdev: device pointer to hwdev - * @param type: hash engine - * @retval zero: success - * @retval non-zero: failure - */ -int spnic_rss_get_hash_engine(void *hwdev, u8 *type); - -/* * - * @brief spnic_rss_set_hash_engine - set rss hash engine - * @param hwdev: device pointer to hwdev - * @param type: hash engine - * @retval zero: success - * @retval non-zero: failure - */ -int spnic_rss_set_hash_engine(void *hwdev, u8 type); - -/* * - * @brief spnic_rss_cfg - set rss configuration - * @param hwdev: device pointer to hwdev - * @param rss_en: enable rss flag - * @param type: number of TC - * @param prio_tc: priorityof TC - * @param num_qps: number of queue - * @retval zero: success - * @retval non-zero: failure - */ -int spnic_rss_cfg(void *hwdev, u8 rss_en, u8 tc_num, u8 *prio_tc, u16 num_qps); - -/* * - * @brief spnic_rss_set_template_tbl - set template table - * @param hwdev: device pointer to hwdev - * @param key: rss key - * @retval zero: success - * @retval non-zero: failure - */ -int spnic_rss_set_hash_key(void *hwdev, const u8 *key); - -/* * - * @brief spnic_rss_get_template_tbl - get template table - * @param hwdev: device pointer to hwdev - * @param key: rss key - * @retval zero: success - * @retval non-zero: failure - */ -int spnic_rss_get_hash_key(void *hwdev, u8 *key); - -/* * - * @brief spnic_refresh_nic_cfg - refresh port cfg - * @param hwdev: device pointer to hwdev - * @param port_info: port information - * @retval zero: success - * @retval non-zero: failure - */ -int spnic_refresh_nic_cfg(void *hwdev, struct nic_port_info *port_info); - -/* * - * @brief spnic_add_vlan - add vlan - * @param hwdev: device pointer to hwdev - * @param vlan_id: vlan id - * @param func_id: function id - * @retval zero: success - * @retval non-zero: failure - */ -int spnic_add_vlan(void *hwdev, u16 vlan_id, u16 func_id); - -/* * - * @brief spnic_del_vlan - delete vlan - * @param hwdev: device pointer to hwdev - * @param vlan_id: vlan id - * @param func_id: function id - * @retval zero: success - * @retval non-zero: failure - */ -int spnic_del_vlan(void *hwdev, u16 vlan_id, u16 func_id); - -/* * - * @brief spnic_set_mac - set mac address - * @param hwdev: device pointer to hwdev - * @param mac_addr: mac address from hardware - * @param vlan_id: vlan id - * @param func_id: function index - * @param channel: channel id - * @retval zero: success - * @retval non-zero: failure - */ -int spnic_set_mac(void *hwdev, const u8 *mac_addr, u16 vlan_id, u16 func_id, u16 channel); - -/* * - * @brief spnic_del_mac - delete mac address - * @param hwdev: device pointer to hwdev - * @param mac_addr: mac address from hardware - * @param vlan_id: vlan id - * @param func_id: function index - * @param channel: channel id - * @retval zero: success - * @retval non-zero: failure - */ -int spnic_del_mac(void *hwdev, const u8 *mac_addr, u16 vlan_id, u16 func_id, u16 channel); - -/* * - * @brief spnic_set_vport_enable - set function valid status - * @param hwdev: device pointer to hwdev - * @param func_id: global function index - * @param enable: 0-disable, 1-enable - * @param channel: channel id - * @retval zero: success - * @retval non-zero: failure - */ -int spnic_set_vport_enable(void *hwdev, u16 func_id, bool enable, u16 channel); - -/* * - * @brief spnic_set_port_enable - set port status - * @param hwdev: device pointer to hwdev - * @param enable: 0-disable, 1-enable - * @param channel: channel id - * @retval zero: success - * @retval non-zero: failure - */ -int spnic_set_port_enable(void *hwdev, bool enable, u16 channel); - -/* * - * @brief spnic_flush_qps_res - flush queue pairs resource in hardware - * @param hwdev: device pointer to hwdev - * @retval zero: success - * @retval non-zero: failure - */ -int spnic_flush_qps_res(void *hwdev); - -/* * - * @brief spnic_init_nic_hwdev - init nic hwdev - * @param hwdev: device pointer to hwdev - * @param pcidev_hdl: pointer to pcidev or handler - * @param dev_hdl: pointer to pcidev->dev or handler, for sdk_err() or - * dma_alloc() - * @param rx_buff_len: rx_buff_len is receive buffer length - * @retval zero: success - * @retval non-zero: failure - */ -int spnic_init_nic_hwdev(void *hwdev, void *pcidev_hdl, void *dev_hdl, u16 rx_buff_len); - -/* * - * @brief spnic_free_nic_hwdev - free nic hwdev - * @param hwdev: device pointer to hwdev - * @retval zero: success - * @retval non-zero: failure - */ -void spnic_free_nic_hwdev(void *hwdev); - -/* * - * @brief spnic_get_speed - set link speed - * @param hwdev: device pointer to hwdev - * @param port_info: link speed - * @param channel: channel id - * @retval zero: success - * @retval non-zero: failure - */ -int spnic_get_speed(void *hwdev, enum mag_cmd_port_speed *speed, u16 channel); - -int spnic_get_dcb_state(void *hwdev, struct spnic_dcb_state *dcb_state); - -int spnic_get_pf_dcb_state(void *hwdev, struct spnic_dcb_state *dcb_state); - -/* * - * @brief spnic_create_qps - create queue pairs - * @param hwdev: device pointer to hwdev - * @param num_qp: number of queue pairs - * @param sq_depth: sq depth - * @param rq_depth: rq depth - * @param qps_msix_arry: msix info - * @retval zero: success - * @retval non-zero: failure - */ -int spnic_create_qps(void *hwdev, u16 num_qp, u32 sq_depth, u32 rq_depth, - struct irq_info *qps_msix_arry); - -/* * - * @brief spnic_destroy_qps - destroy queue pairs - * @param hwdev: device pointer to hwdev - */ -void spnic_destroy_qps(void *hwdev); - -enum spnic_queue_type { - SPNIC_SQ, - SPNIC_RQ, - SPNIC_MAX_QUEUE_TYPE -}; - -/* * - * @brief spnic_get_nic_queue - get nic queue - * @param hwdev: device pointer to hwdev - * @param q_id: queue index - * @param q_type: queue type - * @retval queue address - */ -void *spnic_get_nic_queue(void *hwdev, u16 q_id, enum spnic_queue_type q_type); - -/* * - * @brief spnic_init_qp_ctxts - init queue pair context - * @param hwdev: device pointer to hwdev - * @retval zero: success - * @retval non-zero: failure - */ -int spnic_init_qp_ctxts(void *hwdev); - -/* * - * @brief spnic_free_qp_ctxts - free queue pairs - * @param hwdev: device pointer to hwdev - */ -void spnic_free_qp_ctxts(void *hwdev); - -/* * - * @brief spnic_rss_set_indir_tbl - set rss indirect table - * @param hwdev: device pointer to hwdev - * @param indir_table: rss indirect table - * @retval zero: success - * @retval non-zero: failure - */ -int spnic_rss_set_indir_tbl(void *hwdev, const u32 *indir_table); - -/* * - * @brief spnic_rss_get_indir_tbl - get rss indirect table - * @param hwdev: device pointer to hwdev - * @param indir_table: rss indirect table - * @retval zero: success - * @retval non-zero: failure - */ -int spnic_rss_get_indir_tbl(void *hwdev, u32 *indir_table); - -/* * - * @brief spnic_get_phy_port_stats - get port stats - * @param hwdev: device pointer to hwdev - * @param stats: port stats - * @retval zero: success - * @retval non-zero: failure - */ -int spnic_get_phy_port_stats(void *hwdev, struct mag_cmd_port_stats *stats); - -int spnic_set_port_funcs_state(void *hwdev, bool enable); - -int spnic_reset_port_link_cfg(void *hwdev); - -int spnic_force_port_relink(void *hwdev); - -int spnic_set_dcb_state(void *hwdev, struct spnic_dcb_state *dcb_state); - -int spnic_dcb_set_pfc(void *hwdev, u8 pfc_en, u8 pfc_bitmap); - -int spnic_dcb_get_pfc(void *hwdev, u8 *pfc_en_bitmap); - -int spnic_dcb_set_ets(void *hwdev, u8 *cos_tc, u8 *cos_bw, u8 *cos_prio, u8 *tc_bw, u8 *tc_prio); - -int spnic_dcb_set_cos_up_map(void *hwdev, u8 cos_valid_bitmap, u8 *cos_up, u8 max_cos_num); - -int spnic_get_pause_info(void *hwdev, struct nic_pause_config *nic_pause); - -int spnic_set_pause_info(void *hwdev, struct nic_pause_config nic_pause); - -int spnic_set_link_settings(void *hwdev, struct spnic_link_ksettings *settings); - -int spnic_set_vlan_fliter(void *hwdev, u32 vlan_filter_ctrl); - -void spnic_clear_vfs_info(void *hwdev); - -int spnic_update_mac_vlan(void *hwdev, u16 old_vlan, u16 new_vlan, int vf_id); - -int spnic_set_led_status(void *hwdev, enum mag_led_type type, enum mag_led_mode mode); - -int spnic_set_func_capture_en(void *hwdev, u16 func_id, bool cap_en); - -int spnic_set_loopback_mode(void *hwdev, u8 mode, u8 enable); -int spnic_get_loopback_mode(void *hwdev, u8 *mode, u8 *enable); - -bool spnic_get_vf_trust(void *hwdev, int vf_id); -int spnic_set_vf_trust(void *hwdev, u16 vf_id, bool trust); - -int spnic_set_autoneg(void *hwdev, bool enable); - -int spnic_get_sfp_type(void *hwdev, u8 *sfp_type, u8 *sfp_type_ext); -int spnic_get_sfp_eeprom(void *hwdev, u8 *data, u32 len); - -int spnic_set_nic_feature_to_hw(void *hwdeve); -void spnic_update_nic_feature(void *hwdev, u64 feature); - -#endif diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_nic_cfg_vf.c b/drivers/net/ethernet/ramaxel/spnic/spnic_nic_cfg_vf.c deleted file mode 100644 index 634f82153dff5c26a51e7e07528bf9d60d58b473..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/spnic_nic_cfg_vf.c +++ /dev/null @@ -1,658 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "sphw_crm.h" -#include "sphw_hw.h" -#include "spnic_nic_io.h" -#include "spnic_nic_cfg.h" -#include "spnic_nic.h" -#include "spnic_nic_cmd.h" - -static unsigned char set_vf_link_state; -module_param(set_vf_link_state, byte, 0444); -MODULE_PARM_DESC(set_vf_link_state, "Set vf link state, 0 represents link auto, 1 represents link always up, 2 represents link always down. - default is 0."); - -/* In order to adapt different linux version */ -enum { - SPNIC_IFLA_VF_LINK_STATE_AUTO, /* link state of the uplink */ - SPNIC_IFLA_VF_LINK_STATE_ENABLE, /* link always up */ - SPNIC_IFLA_VF_LINK_STATE_DISABLE, /* link always down */ -}; - -#define NIC_CVLAN_INSERT_ENABLE 0x1 -#define NIC_QINQ_INSERT_ENABLE 0X3 -static int spnic_set_vlan_ctx(struct spnic_nic_cfg *nic_cfg, u16 func_id, - u16 vlan_tag, u16 q_id, bool add) -{ - struct nic_vlan_ctx *vlan_ctx = NULL; - struct sphw_cmd_buf *cmd_buf = NULL; - u64 out_param = 0; - int err; - - cmd_buf = sphw_alloc_cmd_buf(nic_cfg->hwdev); - if (!cmd_buf) { - nic_err(nic_cfg->dev_hdl, "Failed to allocate cmd buf\n"); - return -ENOMEM; - } - - cmd_buf->size = sizeof(struct nic_vlan_ctx); - vlan_ctx = (struct nic_vlan_ctx *)cmd_buf->buf; - - vlan_ctx->func_id = func_id; - vlan_ctx->qid = q_id; - vlan_ctx->vlan_tag = vlan_tag; - vlan_ctx->vlan_sel = 0; /* TPID0 in IPSU */ - vlan_ctx->vlan_mode = add ? - NIC_QINQ_INSERT_ENABLE : NIC_CVLAN_INSERT_ENABLE; - - sphw_cpu_to_be32(vlan_ctx, sizeof(struct nic_vlan_ctx)); - - err = sphw_cmdq_direct_resp(nic_cfg->hwdev, SPHW_MOD_L2NIC, SPNIC_UCODE_CMD_MODIFY_VLAN_CTX, - cmd_buf, &out_param, 0, SPHW_CHANNEL_NIC); - - sphw_free_cmd_buf(nic_cfg->hwdev, cmd_buf); - - if (err || out_param != 0) { - nic_err(nic_cfg->dev_hdl, "Failed to set vlan context, err: %d, out_param: 0x%llx\n", - err, out_param); - return -EFAULT; - } - - return err; -} - -int spnic_cfg_vf_vlan(struct spnic_nic_cfg *nic_cfg, u8 opcode, u16 vid, u8 qos, int vf_id) -{ - struct spnic_cmd_vf_vlan_config vf_vlan; - u16 out_size = sizeof(vf_vlan); - u16 glb_func_id; - int err; - u16 vlan_tag; - - /* VLAN 0 is a special case, don't allow it to be removed */ - if (!vid && opcode == SPNIC_CMD_OP_DEL) - return 0; - - memset(&vf_vlan, 0, sizeof(vf_vlan)); - - vf_vlan.opcode = opcode; - vf_vlan.func_id = sphw_glb_pf_vf_offset(nic_cfg->hwdev) + (u16)vf_id; - vf_vlan.vlan_id = vid; - vf_vlan.qos = qos; - - err = l2nic_msg_to_mgmt_sync(nic_cfg->hwdev, SPNIC_NIC_CMD_CFG_VF_VLAN, - &vf_vlan, sizeof(vf_vlan), &vf_vlan, &out_size); - if (err || !out_size || vf_vlan.msg_head.status) { - nic_err(nic_cfg->dev_hdl, "Failed to set VF %d vlan, err: %d, status: 0x%x,out size: 0x%x\n", - HW_VF_ID_TO_OS(vf_id), err, vf_vlan.msg_head.status, out_size); - return -EFAULT; - } - - vlan_tag = vid + (u16)(qos << VLAN_PRIO_SHIFT); - - glb_func_id = sphw_glb_pf_vf_offset(nic_cfg->hwdev) + (u16)vf_id; - err = spnic_set_vlan_ctx(nic_cfg, glb_func_id, vlan_tag, NIC_CONFIG_ALL_QUEUE_VLAN_CTX, - opcode == SPNIC_CMD_OP_ADD); - if (err) { - nic_err(nic_cfg->dev_hdl, "Failed to set VF %d vlan ctx, err: %d\n", - HW_VF_ID_TO_OS(vf_id), err); - - /* rollback vlan config */ - if (opcode == SPNIC_CMD_OP_DEL) - vf_vlan.opcode = SPNIC_CMD_OP_ADD; - else - vf_vlan.opcode = SPNIC_CMD_OP_DEL; - l2nic_msg_to_mgmt_sync(nic_cfg->hwdev, SPNIC_NIC_CMD_CFG_VF_VLAN, &vf_vlan, - sizeof(vf_vlan), &vf_vlan, &out_size); - return err; - } - - return 0; -} - -/* this function just be called by spnic_ndo_set_vf_mac, - * others are not permitted. - */ -int spnic_set_vf_mac(void *hwdev, int vf, unsigned char *mac_addr) -{ - struct vf_data_storage *vf_info; - struct spnic_nic_cfg *nic_cfg; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - vf_info = nic_cfg->vf_infos + HW_VF_ID_TO_OS(vf); - - /* duplicate request, so just return success */ - if (ether_addr_equal(vf_info->user_mac_addr, mac_addr)) - return 0; - - ether_addr_copy(vf_info->user_mac_addr, mac_addr); - - return 0; -} - -int spnic_add_vf_vlan(void *hwdev, int vf_id, u16 vlan, u8 qos) -{ - struct spnic_nic_cfg *nic_cfg; - int err; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - - err = spnic_cfg_vf_vlan(nic_cfg, SPNIC_CMD_OP_ADD, vlan, qos, vf_id); - if (err) - return err; - - nic_cfg->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_vlan = vlan; - nic_cfg->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_qos = qos; - - nic_info(nic_cfg->dev_hdl, "Setting VLAN %u, QOS 0x%x on VF %d\n", - vlan, qos, HW_VF_ID_TO_OS(vf_id)); - - return 0; -} - -int spnic_kill_vf_vlan(void *hwdev, int vf_id) -{ - struct vf_data_storage *vf_infos; - struct spnic_nic_cfg *nic_cfg; - int err; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - vf_infos = nic_cfg->vf_infos; - - err = spnic_cfg_vf_vlan(nic_cfg, SPNIC_CMD_OP_DEL, vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_vlan, - vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_qos, vf_id); - if (err) - return err; - - nic_info(nic_cfg->dev_hdl, "Remove VLAN %u on VF %d\n", - vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_vlan, HW_VF_ID_TO_OS(vf_id)); - - vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_vlan = 0; - vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_qos = 0; - - return 0; -} - -u16 spnic_vf_info_vlanprio(void *hwdev, int vf_id) -{ - struct spnic_nic_cfg *nic_cfg; - u16 pf_vlan, vlanprio; - u8 pf_qos; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - - pf_vlan = nic_cfg->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_vlan; - pf_qos = nic_cfg->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_qos; - vlanprio = (u16)(pf_vlan | pf_qos << SPNIC_VLAN_PRIORITY_SHIFT); - - return vlanprio; -} - -int spnic_set_vf_link_state(void *hwdev, u16 vf_id, int link) -{ - struct spnic_nic_cfg *nic_cfg = - sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - struct vf_data_storage *vf_infos = nic_cfg->vf_infos; - u8 link_status = 0; - - switch (link) { - case SPNIC_IFLA_VF_LINK_STATE_AUTO: - vf_infos[HW_VF_ID_TO_OS(vf_id)].link_forced = false; - vf_infos[HW_VF_ID_TO_OS(vf_id)].link_up = nic_cfg->link_status ? true : false; - link_status = nic_cfg->link_status; - break; - case SPNIC_IFLA_VF_LINK_STATE_ENABLE: - vf_infos[HW_VF_ID_TO_OS(vf_id)].link_forced = true; - vf_infos[HW_VF_ID_TO_OS(vf_id)].link_up = true; - link_status = SPNIC_LINK_UP; - break; - case SPNIC_IFLA_VF_LINK_STATE_DISABLE: - vf_infos[HW_VF_ID_TO_OS(vf_id)].link_forced = true; - vf_infos[HW_VF_ID_TO_OS(vf_id)].link_up = false; - link_status = SPNIC_LINK_DOWN; - break; - default: - return -EINVAL; - } - - /* Notify the VF of its new link state */ - spnic_notify_vf_link_status(nic_cfg, vf_id, link_status); - - return 0; -} - -int spnic_set_vf_spoofchk(void *hwdev, u16 vf_id, bool spoofchk) -{ - struct spnic_cmd_spoofchk_set spoofchk_cfg; - struct vf_data_storage *vf_infos = NULL; - u16 out_size = sizeof(spoofchk_cfg); - struct spnic_nic_cfg *nic_cfg = NULL; - int err; - - if (!hwdev) - return -EINVAL; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - vf_infos = nic_cfg->vf_infos; - - memset(&spoofchk_cfg, 0, sizeof(spoofchk_cfg)); - - spoofchk_cfg.func_id = sphw_glb_pf_vf_offset(hwdev) + vf_id; - spoofchk_cfg.state = spoofchk ? 1 : 0; - err = l2nic_msg_to_mgmt_sync(hwdev, SPNIC_NIC_CMD_SET_SPOOPCHK_STATE, - &spoofchk_cfg, sizeof(spoofchk_cfg), &spoofchk_cfg, &out_size); - if (err || !out_size || spoofchk_cfg.msg_head.status) { - nic_err(nic_cfg->dev_hdl, "Failed to set VF(%d) spoofchk, err: %d, status: 0x%x, out size: 0x%x\n", - HW_VF_ID_TO_OS(vf_id), err, spoofchk_cfg.msg_head.status, out_size); - err = -EINVAL; - } - - vf_infos[HW_VF_ID_TO_OS(vf_id)].spoofchk = spoofchk; - - return err; -} - -bool spnic_vf_info_spoofchk(void *hwdev, int vf_id) -{ - struct spnic_nic_cfg *nic_cfg; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - - return nic_cfg->vf_infos[HW_VF_ID_TO_OS(vf_id)].spoofchk; -} - -int spnic_set_vf_trust(void *hwdev, u16 vf_id, bool trust) -{ - struct spnic_nic_cfg *nic_cfg = NULL; - - if (!hwdev) - return -EINVAL; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - if (vf_id > nic_cfg->max_vfs) - return -EINVAL; - - nic_cfg->vf_infos[HW_VF_ID_TO_OS(vf_id)].trust = trust; - - return 0; -} - -bool spnic_get_vf_trust(void *hwdev, int vf_id) -{ - struct spnic_nic_cfg *nic_cfg = NULL; - - if (!hwdev) - return -EINVAL; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - if (vf_id > nic_cfg->max_vfs) - return -EINVAL; - - return nic_cfg->vf_infos[HW_VF_ID_TO_OS(vf_id)].trust; -} - -static int spnic_cfg_vf_qps(struct spnic_nic_cfg *nic_cfg, u8 opcode, u16 vf_id, u16 num_qps) -{ - struct spnic_cmd_cfg_qps qps_info; - u16 out_size = sizeof(qps_info); - int err; - - memset(&qps_info, 0, sizeof(qps_info)); - - qps_info.func_id = sphw_glb_pf_vf_offset(nic_cfg->hwdev) + vf_id; - qps_info.opcode = opcode; - qps_info.num_qps = num_qps; - err = l2nic_msg_to_mgmt_sync(nic_cfg->hwdev, SPNIC_NIC_CMD_CFG_FLEX_QUEUE, &qps_info, - sizeof(qps_info), &qps_info, &out_size); - if (err || !out_size || qps_info.msg_head.status) { - nic_err(nic_cfg->dev_hdl, "Failed to %s VF(%d) qps, err: %d, status: 0x%x, out size: 0x%x\n", - opcode == SPNIC_CMD_OP_ALLOC ? "alloc" : "free", - HW_VF_ID_TO_OS(vf_id), err, qps_info.msg_head.status, out_size); - return -EFAULT; - } - - return 0; -} - -int spnic_alloc_vf_qps(void *hwdev, u16 vf_id, u16 num_qps) -{ - struct vf_data_storage *vf_infos = NULL; - struct spnic_nic_cfg *nic_cfg = NULL; - int err; - - if (!hwdev) - return -EINVAL; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - if (vf_id > nic_cfg->max_vfs) - return -EINVAL; - - err = spnic_cfg_vf_qps(nic_cfg, SPNIC_CMD_OP_ALLOC, vf_id, num_qps); - if (err) - return err; - - vf_infos = nic_cfg->vf_infos; - vf_infos[HW_VF_ID_TO_OS(vf_id)].num_qps = num_qps; - - return 0; -} - -int spnic_free_vf_qps(void *hwdev, u16 vf_id) -{ - struct vf_data_storage *vf_infos = NULL; - struct spnic_nic_cfg *nic_cfg = NULL; - int err; - - if (!hwdev) - return -EINVAL; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - if (vf_id > nic_cfg->max_vfs) - return -EINVAL; - - vf_infos = nic_cfg->vf_infos; - err = spnic_cfg_vf_qps(nic_cfg, SPNIC_CMD_OP_FREE, vf_id, - vf_infos[HW_VF_ID_TO_OS(vf_id)].num_qps); - if (err) - return err; - - vf_infos[HW_VF_ID_TO_OS(vf_id)].num_qps = 0; - - return 0; -} - -static int spnic_set_vf_tx_rate_max_min(struct spnic_nic_cfg *nic_cfg, u16 vf_id, - u32 max_rate, u32 min_rate) -{ - struct spnic_cmd_tx_rate_cfg rate_cfg; - u16 out_size = sizeof(rate_cfg); - int err; - - memset(&rate_cfg, 0, sizeof(rate_cfg)); - - rate_cfg.func_id = sphw_glb_pf_vf_offset(nic_cfg->hwdev) + vf_id; - rate_cfg.max_rate = max_rate; - rate_cfg.min_rate = min_rate; - err = l2nic_msg_to_mgmt_sync(nic_cfg->hwdev, SPNIC_NIC_CMD_SET_MAX_MIN_RATE, - &rate_cfg, sizeof(rate_cfg), &rate_cfg, &out_size); - if (rate_cfg.msg_head.status || err || !out_size) { - nic_err(nic_cfg->dev_hdl, "Failed to set VF %d max rate %u, min rate %u, err: %d, status: 0x%x, out size: 0x%x\n", - HW_VF_ID_TO_OS(vf_id), max_rate, min_rate, err, - rate_cfg.msg_head.status, out_size); - return -EIO; - } - - return 0; -} - -int spnic_set_vf_tx_rate(void *hwdev, u16 vf_id, u32 max_rate, u32 min_rate) -{ - struct spnic_nic_cfg *nic_cfg = NULL; - int err; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - - err = spnic_set_vf_tx_rate_max_min(nic_cfg, vf_id, max_rate, min_rate); - if (err) - return err; - - nic_cfg->vf_infos[HW_VF_ID_TO_OS(vf_id)].max_rate = max_rate; - nic_cfg->vf_infos[HW_VF_ID_TO_OS(vf_id)].min_rate = min_rate; - - return 0; -} - -void spnic_get_vf_config(void *hwdev, u16 vf_id, struct ifla_vf_info *ivi) -{ - struct vf_data_storage *vfinfo; - struct spnic_nic_cfg *nic_cfg; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - - vfinfo = nic_cfg->vf_infos + HW_VF_ID_TO_OS(vf_id); - - ivi->vf = HW_VF_ID_TO_OS(vf_id); - ether_addr_copy(ivi->mac, vfinfo->user_mac_addr); - ivi->vlan = vfinfo->pf_vlan; - ivi->qos = vfinfo->pf_qos; - - ivi->spoofchk = vfinfo->spoofchk; - - ivi->trusted = vfinfo->trust; - - ivi->max_tx_rate = vfinfo->max_rate; - ivi->min_tx_rate = vfinfo->min_rate; - - if (!vfinfo->link_forced) - ivi->linkstate = IFLA_VF_LINK_STATE_AUTO; - else if (vfinfo->link_up) - ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE; - else - ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE; -} - -static int spnic_init_vf_infos(struct spnic_nic_cfg *nic_cfg, u16 vf_id) -{ - struct vf_data_storage *vf_infos = nic_cfg->vf_infos; - u8 vf_link_state; - - if (set_vf_link_state > SPNIC_IFLA_VF_LINK_STATE_DISABLE) { - nic_warn(nic_cfg->dev_hdl, "Module Parameter set_vf_link_state value %u is out of range, resetting to %d\n", - set_vf_link_state, SPNIC_IFLA_VF_LINK_STATE_AUTO); - set_vf_link_state = SPNIC_IFLA_VF_LINK_STATE_AUTO; - } - - vf_link_state = set_vf_link_state; - - switch (vf_link_state) { - case SPNIC_IFLA_VF_LINK_STATE_AUTO: - vf_infos[vf_id].link_forced = false; - break; - case SPNIC_IFLA_VF_LINK_STATE_ENABLE: - vf_infos[vf_id].link_forced = true; - vf_infos[vf_id].link_up = true; - break; - case SPNIC_IFLA_VF_LINK_STATE_DISABLE: - vf_infos[vf_id].link_forced = true; - vf_infos[vf_id].link_up = false; - break; - default: - nic_err(nic_cfg->dev_hdl, "Input parameter set_vf_link_state error: %u\n", - vf_link_state); - return -EINVAL; - } - - return 0; -} - -static int vf_func_register(struct spnic_nic_cfg *nic_cfg) -{ - struct spnic_cmd_register_vf register_info; - u16 out_size = sizeof(register_info); - int err; - - err = sphw_register_vf_mbox_cb(nic_cfg->hwdev, SPHW_MOD_L2NIC, nic_cfg, - spnic_vf_event_handler); - if (err) - return err; - - err = sphw_register_vf_mbox_cb(nic_cfg->hwdev, SPHW_MOD_HILINK, nic_cfg, - spnic_vf_mag_event_handler); - if (err) - goto reg_hilink_err; - - memset(®ister_info, 0, sizeof(register_info)); - register_info.op_register = 1; - err = sphw_mbox_to_pf(nic_cfg->hwdev, SPHW_MOD_L2NIC, SPNIC_NIC_CMD_VF_REGISTER, - ®ister_info, sizeof(register_info), ®ister_info, &out_size, 0, - SPHW_CHANNEL_NIC); - if (err || !out_size || register_info.msg_head.status) { - nic_err(nic_cfg->dev_hdl, "Failed to register VF, err: %d, status: 0x%x, out size: 0x%x\n", - err, register_info.msg_head.status, out_size); - err = -EIO; - goto register_err; - } - - return 0; - -register_err: - sphw_unregister_vf_mbox_cb(nic_cfg->hwdev, SPHW_MOD_HILINK); - -reg_hilink_err: - sphw_unregister_vf_mbox_cb(nic_cfg->hwdev, SPHW_MOD_L2NIC); - - return err; -} - -static int pf_init_vf_infos(struct spnic_nic_cfg *nic_cfg) -{ - u32 size; - int err; - u16 i; - - err = sphw_register_mgmt_msg_cb(nic_cfg->hwdev, SPHW_MOD_L2NIC, nic_cfg, - spnic_pf_event_handler); - if (err) - return err; - - err = sphw_register_mgmt_msg_cb(nic_cfg->hwdev, SPHW_MOD_HILINK, nic_cfg, - spnic_pf_mag_event_handler); - if (err) - goto register_mag_mgmt_cb_err; - - nic_cfg->max_vfs = sphw_func_max_vf(nic_cfg->hwdev); - size = sizeof(*nic_cfg->vf_infos) * nic_cfg->max_vfs; - if (!size) - return 0; - - nic_cfg->vf_infos = kzalloc(size, GFP_KERNEL); - if (!nic_cfg->vf_infos) { - err = -ENOMEM; - goto alloc_vf_infos_err; - } - - for (i = 0; i < nic_cfg->max_vfs; i++) { - err = spnic_init_vf_infos(nic_cfg, i); - if (err) - goto init_vf_infos_err; - } - - err = sphw_register_pf_mbox_cb(nic_cfg->hwdev, SPHW_MOD_L2NIC, nic_cfg, - spnic_pf_mbox_handler); - if (err) - goto register_nic_mbox_cb_err; - - err = sphw_register_pf_mbox_cb(nic_cfg->hwdev, SPHW_MOD_HILINK, nic_cfg, - spnic_pf_mag_mbox_handler); - if (err) - goto register_mag_mbox_cb_err; - - return 0; - -register_mag_mbox_cb_err: - sphw_unregister_pf_mbox_cb(nic_cfg->hwdev, SPHW_MOD_L2NIC); - -register_nic_mbox_cb_err: -init_vf_infos_err: - kfree(nic_cfg->vf_infos); - -alloc_vf_infos_err: - sphw_unregister_mgmt_msg_cb(nic_cfg->hwdev, SPHW_MOD_HILINK); - -register_mag_mgmt_cb_err: - sphw_unregister_mgmt_msg_cb(nic_cfg->hwdev, SPHW_MOD_L2NIC); - - return err; -} - -int spnic_vf_func_init(struct spnic_nic_cfg *nic_cfg) -{ - if (sphw_func_type(nic_cfg->hwdev) == TYPE_VF) - return vf_func_register(nic_cfg); - - return pf_init_vf_infos(nic_cfg); -} - -void spnic_vf_func_free(struct spnic_nic_cfg *nic_cfg) -{ - struct spnic_cmd_register_vf unregister; - u16 out_size = sizeof(unregister); - int err; - - memset(&unregister, 0, sizeof(unregister)); - unregister.op_register = 0; - if (sphw_func_type(nic_cfg->hwdev) == TYPE_VF) { - err = sphw_mbox_to_pf(nic_cfg->hwdev, SPHW_MOD_L2NIC, SPNIC_NIC_CMD_VF_REGISTER, - &unregister, sizeof(unregister), &unregister, &out_size, 0, - SPHW_CHANNEL_NIC); - if (err || !out_size || unregister.msg_head.status) - nic_err(nic_cfg->dev_hdl, "Failed to unregister VF, err: %d, status: 0x%x, out_size: 0x%x\n", - err, unregister.msg_head.status, out_size); - - sphw_unregister_vf_mbox_cb(nic_cfg->hwdev, SPHW_MOD_L2NIC); - sphw_unregister_vf_mbox_cb(nic_cfg->hwdev, SPHW_MOD_HILINK); - } else { - if (nic_cfg->vf_infos) { - sphw_unregister_pf_mbox_cb(nic_cfg->hwdev, SPHW_MOD_L2NIC); - sphw_unregister_pf_mbox_cb(nic_cfg->hwdev, SPHW_MOD_HILINK); - spnic_clear_vfs_info(nic_cfg->hwdev); - kfree(nic_cfg->vf_infos); - } - - sphw_unregister_mgmt_msg_cb(nic_cfg->hwdev, SPHW_MOD_L2NIC); - sphw_unregister_mgmt_msg_cb(nic_cfg->hwdev, SPHW_MOD_HILINK); - } -} - -static void clear_vf_infos(void *hwdev, u16 vf_id) -{ - struct vf_data_storage *vf_infos; - struct spnic_nic_cfg *nic_cfg; - u16 func_id; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - - func_id = sphw_glb_pf_vf_offset(hwdev) + vf_id; - vf_infos = nic_cfg->vf_infos + HW_VF_ID_TO_OS(vf_id); - if (vf_infos->use_specified_mac) - spnic_del_mac(hwdev, vf_infos->drv_mac_addr, vf_infos->pf_vlan, - func_id, SPHW_CHANNEL_NIC); - - if (spnic_vf_info_vlanprio(hwdev, vf_id)) - spnic_kill_vf_vlan(hwdev, vf_id); - - if (vf_infos->max_rate) - spnic_set_vf_tx_rate(hwdev, vf_id, 0, 0); - - if (vf_infos->spoofchk) - spnic_set_vf_spoofchk(hwdev, vf_id, false); - - if (vf_infos->trust) - spnic_set_vf_trust(hwdev, vf_id, false); - - memset(vf_infos, 0, sizeof(*vf_infos)); - /* set vf_infos to default */ - spnic_init_vf_infos(nic_cfg, HW_VF_ID_TO_OS(vf_id)); -} - -void spnic_clear_vfs_info(void *hwdev) -{ - struct spnic_nic_cfg *nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - u16 i; - - for (i = 0; i < nic_cfg->max_vfs; i++) - clear_vf_infos(hwdev, OS_VF_ID_TO_HW(i)); -} diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_nic_cmd.h b/drivers/net/ethernet/ramaxel/spnic/spnic_nic_cmd.h deleted file mode 100644 index 689e84d90e971a7baacaad01fac1281e51fd8b0f..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/spnic_nic_cmd.h +++ /dev/null @@ -1,105 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPNIC_NIC_CMD_H -#define SPNIC_NIC_CMD_H - -/* Commands between NIC to MPU - */ -enum spnic_nic_cmd { - SPNIC_NIC_CMD_VF_REGISTER = 0, /* only for PFD and VFD */ - - /* FUNC CFG */ - SPNIC_NIC_CMD_SET_FUNC_TBL = 5, - SPNIC_NIC_CMD_SET_VPORT_ENABLE, - SPNIC_NIC_CMD_SET_RX_MODE, - SPNIC_NIC_CMD_SQ_CI_ATTR_SET, - SPNIC_NIC_CMD_GET_VPORT_STAT, - SPNIC_NIC_CMD_CLEAN_VPORT_STAT, - SPNIC_NIC_CMD_CLEAR_QP_RESOURCE, - SPNIC_NIC_CMD_CFG_FLEX_QUEUE, - /* LRO CFG */ - SPNIC_NIC_CMD_CFG_RX_LRO, - SPNIC_NIC_CMD_CFG_LRO_TIMER, - SPNIC_NIC_CMD_FEATURE_NEGO, - - /* MAC & VLAN CFG */ - SPNIC_NIC_CMD_GET_MAC = 20, - SPNIC_NIC_CMD_SET_MAC, - SPNIC_NIC_CMD_DEL_MAC, - SPNIC_NIC_CMD_UPDATE_MAC, - SPNIC_NIC_CMD_GET_ALL_DEFAULT_MAC, - - SPNIC_NIC_CMD_CFG_FUNC_VLAN, - SPNIC_NIC_CMD_SET_VLAN_FILTER_EN, - SPNIC_NIC_CMD_SET_RX_VLAN_OFFLOAD, - - /* SR-IOV */ - SPNIC_NIC_CMD_CFG_VF_VLAN = 40, - SPNIC_NIC_CMD_SET_SPOOPCHK_STATE, - /* RATE LIMIT */ - SPNIC_NIC_CMD_SET_MAX_MIN_RATE, - - /* RSS CFG */ - SPNIC_NIC_CMD_RSS_CFG = 60, - SPNIC_NIC_CMD_RSS_TEMP_MGR, - SPNIC_NIC_CMD_GET_RSS_CTX_TBL, - SPNIC_NIC_CMD_CFG_RSS_HASH_KEY, - SPNIC_NIC_CMD_CFG_RSS_HASH_ENGINE, - SPNIC_NIC_CMD_GET_INDIR_TBL, - - /* DPI/FDIR */ - SPNIC_NIC_CMD_ADD_TC_FLOW = 80, - SPNIC_NIC_CMD_DEL_TC_FLOW, - SPNIC_NIC_CMD_GET_TC_FLOW, - SPNIC_NIC_CMD_FLUSH_TCAM, - SPNIC_NIC_CMD_CFG_TCAM_BLOCK, - SPNIC_NIC_CMD_ENABLE_TCAM, - SPNIC_NIC_CMD_GET_TCAM_BLOCK, - SPNIC_NIC_CMD_CFG_DPI_TABLE_ID, - - /* PORT CFG */ - SPNIC_NIC_CMD_SET_PORT_ENABLE = 100, - SPNIC_NIC_CMD_CFG_PAUSE_INFO, - - SPNIC_NIC_CMD_SET_PORT_CAR, - SPNIC_NIC_CMD_SET_ER_DROP_PKT, - - SPNIC_NIC_CMD_VF_COS, - SPNIC_NIC_CMD_SETUP_COS_MAPPING, - SPNIC_NIC_CMD_SET_ETS, - SPNIC_NIC_CMD_SET_PFC, - - SPNIC_NIC_CMD_TX_PAUSE_EXCP_NOTICE = 118, - SPNIC_NIC_CMD_INQUIRT_PAUSE_CFG = 119, - - /* MISC */ - SPNIC_NIC_CMD_BIOS_CFG = 120, - SPNIC_NIC_CMD_SET_FIRMWARE_CUSTOM_PACKETS_MSG, - - /* DFX */ - SPNIC_NIC_CMD_GET_SM_TABLE = 140, - SPNIC_NIC_CMD_RD_LINE_TBL, - - SPNIC_NIC_CMD_SET_VHD_CFG = 161, - - SPNIC_NIC_CMD_MAX = 256, -}; - -/* NIC CMDQ MODE */ -enum spnic_ucode_cmd { - SPNIC_UCODE_CMD_MODIFY_QUEUE_CTX = 0, - SPNIC_UCODE_CMD_CLEAN_QUEUE_CONTEXT, - SPNIC_UCODE_CMD_ARM_SQ, - SPNIC_UCODE_CMD_ARM_RQ, - SPNIC_UCODE_CMD_SET_RSS_INDIR_TABLE, - SPNIC_UCODE_CMD_SET_RSS_CONTEXT_TABLE, - SPNIC_UCODE_CMD_GET_RSS_INDIR_TABLE, - SPNIC_UCODE_CMD_GET_RSS_CONTEXT_TABLE, - SPNIC_UCODE_CMD_SET_IQ_ENABLE, - SPNIC_UCODE_CMD_SET_RQ_FLUSH = 10, - SPNIC_UCODE_CMD_MODIFY_VLAN_CTX, - SPNIC_UCODE_CMD_DPI_HASH_TABLE, -}; - -#endif /* SPNIC_NIC_CMD_H */ diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_nic_dbg.c b/drivers/net/ethernet/ramaxel/spnic/spnic_nic_dbg.c deleted file mode 100644 index 08fe958a6d00eede069a30efde555ed2bd41514d..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/spnic_nic_dbg.c +++ /dev/null @@ -1,151 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt - -#include -#include -#include - -#include "sphw_crm.h" -#include "sphw_hw.h" -#include "sphw_mt.h" -#include "spnic_nic_qp.h" -#include "spnic_nic_io.h" -#include "spnic_nic_cfg.h" -#include "spnic_nic.h" - -int spnic_dbg_get_wqe_info(void *hwdev, u16 q_id, u16 idx, u16 wqebb_cnt, - u8 *wqe, u16 *wqe_size, enum spnic_queue_type q_type) -{ - struct spnic_io_queue *queue = NULL; - struct spnic_nic_cfg *nic_cfg = NULL; - void *src_wqebb = NULL; - u32 i, offset; - - if (!hwdev) { - pr_err("hwdev is NULL.\n"); - return -EINVAL; - } - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - - if (q_id >= nic_cfg->num_qps) { - pr_err("q_id[%u] > num_qps_cfg[%u].\n", q_id, nic_cfg->num_qps); - return -EINVAL; - } - - queue = (q_type == SPNIC_SQ) ? &nic_cfg->sq[q_id] : &nic_cfg->rq[q_id]; - - if ((idx + wqebb_cnt) > queue->wq.q_depth) { - pr_err("(idx[%u] + idx[%u]) > q_depth[%u].\n", - idx, wqebb_cnt, queue->wq.q_depth); - return -EINVAL; - } - - if (*wqe_size != (queue->wq.wqebb_size * wqebb_cnt)) { - pr_err("Unexpect out buf size from user :%u, expect: %d\n", - *wqe_size, (queue->wq.wqebb_size * wqebb_cnt)); - return -EINVAL; - } - - for (i = 0; i < wqebb_cnt; i++) { - src_wqebb = sphw_wq_wqebb_addr(&queue->wq, WQ_MASK_IDX(&queue->wq, idx + i)); - offset = queue->wq.wqebb_size * i; - memcpy(wqe + offset, src_wqebb, queue->wq.wqebb_size); - } - - return 0; -} - -int spnic_dbg_get_sq_info(void *hwdev, u16 q_id, struct nic_sq_info *sq_info, u32 msg_size) -{ - struct spnic_nic_cfg *nic_cfg = NULL; - struct spnic_io_queue *sq = NULL; - - if (!hwdev || !sq_info) { - pr_err("hwdev or sq_info is NULL.\n"); - return -EINVAL; - } - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - - if (q_id >= nic_cfg->num_qps) { - nic_err(nic_cfg->dev_hdl, "Input queue id(%u) is larger than the actual queue number\n", - q_id); - return -EINVAL; - } - - if (msg_size != sizeof(*sq_info)) { - nic_err(nic_cfg->dev_hdl, "Unexpect out buf size from user :%u, expect: %lu\n", - msg_size, sizeof(*sq_info)); - return -EINVAL; - } - - sq = &nic_cfg->sq[q_id]; - - sq_info->q_id = q_id; - sq_info->pi = spnic_get_sq_local_pi(sq); - sq_info->ci = spnic_get_sq_local_ci(sq); - sq_info->fi = spnic_get_sq_hw_ci(sq); - sq_info->q_depth = sq->wq.q_depth; - sq_info->wqebb_size = sq->wq.wqebb_size; - - sq_info->ci_addr = sq->tx.cons_idx_addr; - - sq_info->cla_addr = sq->wq.wq_block_paddr; - sq_info->slq_handle = sq; - - sq_info->doorbell.map_addr = (u64 *)sq->db_addr; - - return 0; -} - -int spnic_dbg_get_rq_info(void *hwdev, u16 q_id, struct nic_rq_info *rq_info, u32 msg_size) -{ - struct spnic_nic_cfg *nic_cfg = NULL; - struct spnic_io_queue *rq = NULL; - - if (!hwdev || !rq_info) { - pr_err("hwdev or rq_info is NULL.\n"); - return -EINVAL; - } - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - - if (q_id >= nic_cfg->num_qps) { - nic_err(nic_cfg->dev_hdl, "Input queue id(%u) is larger than the actual queue number\n", - q_id); - return -EINVAL; - } - - if (msg_size != sizeof(*rq_info)) { - nic_err(nic_cfg->dev_hdl, "Unexpect out buf size from user: %u, expect: %lu\n", - msg_size, sizeof(*rq_info)); - return -EINVAL; - } - - rq = &nic_cfg->rq[q_id]; - - rq_info->q_id = q_id; - - rq_info->hw_pi = cpu_to_be16(*rq->rx.pi_virt_addr); - rq_info->ci = spnic_get_rq_local_ci(rq); - - rq_info->sw_pi = 0; - - rq_info->wqebb_size = rq->wq.wqebb_size; - rq_info->q_depth = (u16)rq->wq.q_depth; - - rq_info->buf_len = nic_cfg->rx_buff_len; - - rq_info->slq_handle = rq; - - rq_info->ci_wqe_page_addr = sphw_wq_get_first_wqe_page_addr(&rq->wq); - rq_info->ci_cla_tbl_addr = rq->wq.wq_block_paddr; - - rq_info->msix_idx = rq->msix_entry_idx; - rq_info->msix_vector = 0; - - return 0; -} diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_nic_dbg.h b/drivers/net/ethernet/ramaxel/spnic/spnic_nic_dbg.h deleted file mode 100644 index d86c65ed5f4f50c0ac183dd3e3af855b62fee069..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/spnic_nic_dbg.h +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPNIC_NIC_DBG_H -#define SPNIC_NIC_DBG_H - -#include "spnic_nic_io.h" - -int spnic_dbg_get_sq_info(void *hwdev, u16 q_id, struct nic_sq_info *sq_info, u32 msg_size); - -int spnic_dbg_get_rq_info(void *hwdev, u16 q_id, struct nic_rq_info *rq_info, u32 msg_size); - -int spnic_dbg_get_wqe_info(void *hwdev, u16 q_id, u16 idx, u16 wqebb_cnt, - u8 *wqe, u16 *wqe_size, enum spnic_queue_type q_type); - -#endif diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_nic_dev.h b/drivers/net/ethernet/ramaxel/spnic/spnic_nic_dev.h deleted file mode 100644 index 8a0708fda19a68f7f9e300f3e4dcbf01b2fb24a6..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/spnic_nic_dev.h +++ /dev/null @@ -1,354 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPNIC_NIC_DEV_H -#define SPNIC_NIC_DEV_H - -#include -#include -#include -#include - -#include "spnic_nic_io.h" -#include "spnic_nic_cfg.h" -#include "spnic_tx.h" -#include "spnic_rx.h" -#include "spnic_dcb.h" - -#define SPNIC_NIC_DRV_NAME "spnic" -#define SPNIC_DRV_VERSION "B090" -#define SPNIC_DRV_DESC "Ramaxel(R) Network Interface Card Driver" - -#define SPNIC_FUNC_IS_VF(hwdev) (sphw_func_type(hwdev) == TYPE_VF) - -#define SPNIC_AVG_PKT_SMALL 256U -#define SPNIC_MODERATONE_DELAY HZ - -#define LP_PKT_CNT 64 - -enum spnic_flags { - SPNIC_INTF_UP, - SPNIC_MAC_FILTER_CHANGED, - SPNIC_LP_TEST, - SPNIC_RSS_ENABLE, - SPNIC_DCB_ENABLE, - SPNIC_SAME_RXTX, - SPNIC_INTR_ADAPT, - SPNIC_UPDATE_MAC_FILTER, - SPNIC_CHANGE_RES_INVALID, - SPNIC_RSS_DEFAULT_INDIR, -}; - -#define SPHW_CHANNEL_RES_VALID(nic_dev) \ - (test_bit(SPNIC_INTF_UP, &(nic_dev)->flags) && \ - !test_bit(SPNIC_CHANGE_RES_INVALID, &(nic_dev)->flags)) - -#define RX_BUFF_NUM_PER_PAGE 2 - -#define VLAN_BITMAP_BYTE_SIZE(nic_dev) (sizeof(*(nic_dev)->vlan_bitmap)) -#define VLAN_BITMAP_BITS_SIZE(nic_dev) (VLAN_BITMAP_BYTE_SIZE(nic_dev) * 8) -#define VLAN_NUM_BITMAPS(nic_dev) (VLAN_N_VID / \ - VLAN_BITMAP_BITS_SIZE(nic_dev)) -#define VLAN_BITMAP_SIZE(nic_dev) (VLAN_N_VID / \ - VLAN_BITMAP_BYTE_SIZE(nic_dev)) -#define VID_LINE(nic_dev, vid) ((vid) / VLAN_BITMAP_BITS_SIZE(nic_dev)) -#define VID_COL(nic_dev, vid) ((vid) & (VLAN_BITMAP_BITS_SIZE(nic_dev) - 1)) - -#define SPNIC_DRV_FEATURE NIC_F_ALL_MASK - -enum spnic_event_work_flags { - EVENT_WORK_TX_TIMEOUT, -}; - -enum spnic_rx_mode_state { - SPNIC_HW_PROMISC_ON, - SPNIC_HW_ALLMULTI_ON, - SPNIC_PROMISC_FORCE_ON, - SPNIC_ALLMULTI_FORCE_ON, -}; - -enum mac_filter_state { - SPNIC_MAC_WAIT_HW_SYNC, - SPNIC_MAC_HW_SYNCED, - SPNIC_MAC_WAIT_HW_UNSYNC, - SPNIC_MAC_HW_UNSYNCED, -}; - -struct spnic_mac_filter { - struct list_head list; - u8 addr[ETH_ALEN]; - unsigned long state; -}; - -struct spnic_irq { - struct net_device *netdev; - /* IRQ corresponding index number */ - u16 msix_entry_idx; - u32 irq_id; /* The IRQ number from OS */ - char irq_name[IFNAMSIZ + 16]; - struct napi_struct napi; - cpumask_t affinity_mask; - struct spnic_txq *txq; - struct spnic_rxq *rxq; -}; - -struct spnic_intr_coal_info { - u8 pending_limt; - u8 coalesce_timer_cfg; - u8 resend_timer_cfg; - - u64 pkt_rate_low; - u8 rx_usecs_low; - u8 rx_pending_limt_low; - u64 pkt_rate_high; - u8 rx_usecs_high; - u8 rx_pending_limt_high; - - u8 user_set_intr_coal_flag; -}; - -struct spnic_dyna_txrxq_params { - u16 num_qps; - u16 num_rss; - u16 rss_limit; - u8 num_tc; - u8 rsvd1; - u32 sq_depth; - u32 rq_depth; - - struct spnic_dyna_txq_res *txqs_res; - struct spnic_dyna_rxq_res *rxqs_res; - struct spnic_irq *irq_cfg; -}; - -#define SPNIC_NIC_STATS_INC(nic_dev, field) \ -do { \ - u64_stats_update_begin(&(nic_dev)->stats.syncp);\ - (nic_dev)->stats.field++; \ - u64_stats_update_end(&(nic_dev)->stats.syncp); \ -} while (0) - -struct spnic_nic_stats { - u64 netdev_tx_timeout; - - /* Subdivision statistics show in private tool */ - u64 tx_carrier_off_drop; - u64 tx_invalid_qid; - - struct u64_stats_sync syncp; -}; - -#define SPNIC_TCAM_DYNAMIC_BLOCK_SIZE 16 -#define SPNIC_MAX_TCAM_FILTERS 512 - -#define SPNIC_PKT_TCAM_DYNAMIC_INDEX_START(block_index) \ - (SPNIC_TCAM_DYNAMIC_BLOCK_SIZE * (block_index)) - -struct spnic_rx_flow_rule { - struct list_head rules; - int tot_num_rules; -}; - -struct spnic_tcam_dynamic_block { - struct list_head block_list; - u16 dynamic_block_id; - u16 dynamic_index_cnt; - u8 dynamic_index_used[SPNIC_TCAM_DYNAMIC_BLOCK_SIZE]; -}; - -struct spnic_tcam_dynamic_block_info { - struct list_head tcam_dynamic_list; - u16 dynamic_block_cnt; -}; - -struct spnic_tcam_filter { - struct list_head tcam_filter_list; - u16 dynamic_block_id; - u16 index; - struct tag_tcam_key tcam_key; - u16 queue; -}; - -/* function level struct info */ -struct spnic_tcam_info { - u16 tcam_rule_nums; - struct list_head tcam_list; - struct spnic_tcam_dynamic_block_info tcam_dynamic_info; -}; - -struct spnic_nic_dev { - struct pci_dev *pdev; - struct net_device *netdev; - void *hwdev; - - int poll_weight; - - unsigned long *vlan_bitmap; - - u16 max_qps; - - u32 msg_enable; - unsigned long flags; - - u32 lro_replenish_thld; - u32 dma_rx_buff_size; - u16 rx_buff_len; - u32 page_order; - - /* Rss related varibles */ - u8 rss_hash_engine; - struct nic_rss_type rss_type; - u8 *rss_hkey; - /* hkey in big endian */ - u32 *rss_hkey_be; - u32 *rss_indir; - - u32 dcb_changes; - struct spnic_dcb_config hw_dcb_cfg; - struct spnic_dcb_config wanted_dcb_cfg; - unsigned long dcb_flags; - int disable_port_cnt; - /* lock for disable or enable traffic flow */ - struct semaphore dcb_sem; - - struct spnic_intr_coal_info *intr_coalesce; - unsigned long last_moder_jiffies; - u32 adaptive_rx_coal; - u8 intr_coal_set_flag; - - struct spnic_nic_stats stats; - - /* lock for nic resource */ - struct mutex nic_mutex; - bool force_port_disable; - struct semaphore port_state_sem; - u8 link_status; - - struct nic_service_cap nic_cap; - - struct spnic_txq *txqs; - struct spnic_rxq *rxqs; - struct spnic_dyna_txrxq_params q_params; - - u16 num_qp_irq; - struct irq_info *qps_irq_info; - - struct workqueue_struct *workq; - - struct work_struct rx_mode_work; - struct delayed_work moderation_task; - - struct list_head uc_filter_list; - struct list_head mc_filter_list; - unsigned long rx_mod_state; - int netdev_uc_cnt; - int netdev_mc_cnt; - - int lb_test_rx_idx; - int lb_pkt_len; - u8 *lb_test_rx_buf; - - struct spnic_tcam_info tcam; - struct spnic_rx_flow_rule rx_flow_rule; - - struct bpf_prog *xdp_prog; - - struct delayed_work periodic_work; - /* reference to enum spnic_event_work_flags */ - unsigned long event_flag; -}; - -#define IPSEC_CAP_IS_SUPPORT(nic_dev) ((nic_dev)->ipsec) - -#define spnic_msg(level, nic_dev, msglvl, format, arg...) \ -do { \ - if ((nic_dev)->netdev && (nic_dev)->netdev->reg_state \ - == NETREG_REGISTERED) \ - nicif_##level((nic_dev), msglvl, (nic_dev)->netdev, \ - format, ## arg); \ - else \ - nic_##level(&(nic_dev)->pdev->dev, \ - format, ## arg); \ -} while (0) - -#define spnic_info(nic_dev, msglvl, format, arg...) \ - spnic_msg(info, nic_dev, msglvl, format, ## arg) - -#define spnic_warn(nic_dev, msglvl, format, arg...) \ - spnic_msg(warn, nic_dev, msglvl, format, ## arg) - -#define spnic_err(nic_dev, msglvl, format, arg...) \ - spnic_msg(err, nic_dev, msglvl, format, ## arg) - -#define nicif_err(priv, type, dev, fmt, args...) \ - netif_level(err, priv, type, dev, "[NIC]" fmt, ##args) -#define nicif_warn(priv, type, dev, fmt, args...) \ - netif_level(warn, priv, type, dev, "[NIC]" fmt, ##args) -#define nicif_notice(priv, type, dev, fmt, args...) \ - netif_level(notice, priv, type, dev, "[NIC]" fmt, ##args) -#define nicif_info(priv, type, dev, fmt, args...) \ - netif_level(info, priv, type, dev, "[NIC]" fmt, ##args) -#define nicif_dbg(priv, type, dev, fmt, args...) \ - netif_level(dbg, priv, type, dev, "[NIC]" fmt, ##args) - -extern struct spnic_uld_info nic_uld_info; - -u32 spnic_get_io_stats_size(struct spnic_nic_dev *nic_dev); - -void spnic_get_io_stats(struct spnic_nic_dev *nic_dev, void *stats); - -int spnic_open(struct net_device *netdev); - -int spnic_close(struct net_device *netdev); - -void spnic_set_ethtool_ops(struct net_device *netdev); - -void spnicvf_set_ethtool_ops(struct net_device *netdev); - -int nic_ioctl(void *uld_dev, u32 cmd, const void *buf_in, - u32 in_size, void *buf_out, u32 *out_size); - -void spnic_update_num_qps(struct net_device *netdev); - -int spnic_qps_irq_init(struct spnic_nic_dev *nic_dev); - -void spnic_qps_irq_deinit(struct spnic_nic_dev *nic_dev); - -void spnic_set_netdev_ops(struct spnic_nic_dev *nic_dev); - -int spnic_set_hw_features(struct spnic_nic_dev *nic_dev); - -void spnic_set_rx_mode_work(struct work_struct *work); - -void spnic_clean_mac_list_filter(struct spnic_nic_dev *nic_dev); - -void spnic_get_strings(struct net_device *netdev, u32 stringset, u8 *data); - -void spnic_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, u64 *data); - -int spnic_get_sset_count(struct net_device *netdev, int sset); - -int spnic_force_port_disable(struct spnic_nic_dev *nic_dev); - -int spnic_force_set_port_state(struct spnic_nic_dev *nic_dev, bool enable); - -int spnic_maybe_set_port_state(struct spnic_nic_dev *nic_dev, bool enable); - -int spnic_get_link_ksettings(struct net_device *netdev, - struct ethtool_link_ksettings *link_settings); -int spnic_set_link_ksettings(struct net_device *netdev, - const struct ethtool_link_ksettings *link_settings); - -void spnic_auto_moderation_work(struct work_struct *work); - -typedef void (*spnic_reopen_handler)(struct spnic_nic_dev *nic_dev, const void *priv_data); -int spnic_change_channel_settings(struct spnic_nic_dev *nic_dev, - struct spnic_dyna_txrxq_params *trxq_params, - spnic_reopen_handler reopen_handler, const void *priv_data); - -void spnic_link_status_change(struct spnic_nic_dev *nic_dev, bool status); - -bool spnic_is_xdp_enable(struct spnic_nic_dev *nic_dev); -int spnic_xdp_max_mtu(struct spnic_nic_dev *nic_dev); - -#endif diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_nic_event.c b/drivers/net/ethernet/ramaxel/spnic/spnic_nic_event.c deleted file mode 100644 index 0e8a2c4a3961a7e62ec055a4f6f741a3c53593de..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/spnic_nic_event.c +++ /dev/null @@ -1,506 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "sphw_crm.h" -#include "sphw_hw.h" -#include "spnic_nic_io.h" -#include "spnic_nic_cfg.h" -#include "spnic_nic.h" -#include "spnic_nic_cmd.h" - -static int spnic_init_vf_config(struct spnic_nic_cfg *nic_cfg, u16 vf_id) -{ - struct vf_data_storage *vf_info; - u16 func_id; - int err = 0; - - vf_info = nic_cfg->vf_infos + HW_VF_ID_TO_OS(vf_id); - ether_addr_copy(vf_info->drv_mac_addr, vf_info->user_mac_addr); - if (!is_zero_ether_addr(vf_info->drv_mac_addr)) { - vf_info->use_specified_mac = true; - func_id = sphw_glb_pf_vf_offset(nic_cfg->hwdev) + vf_id; - - err = spnic_set_mac(nic_cfg->hwdev, vf_info->drv_mac_addr, - vf_info->pf_vlan, func_id, SPHW_CHANNEL_NIC); - if (err) { - nic_err(nic_cfg->dev_hdl, "Failed to set VF %d MAC\n", - HW_VF_ID_TO_OS(vf_id)); - return err; - } - } else { - vf_info->use_specified_mac = false; - } - - if (spnic_vf_info_vlanprio(nic_cfg->hwdev, vf_id)) { - err = spnic_cfg_vf_vlan(nic_cfg, SPNIC_CMD_OP_ADD, - vf_info->pf_vlan, vf_info->pf_qos, vf_id); - if (err) { - nic_err(nic_cfg->dev_hdl, "Failed to add VF %d VLAN_QOS\n", - HW_VF_ID_TO_OS(vf_id)); - return err; - } - } - - if (vf_info->max_rate) { - err = spnic_set_vf_tx_rate(nic_cfg->hwdev, vf_id, vf_info->max_rate, - vf_info->min_rate); - if (err) { - nic_err(nic_cfg->dev_hdl, "Failed to set VF %d max rate %u, min rate %u\n", - HW_VF_ID_TO_OS(vf_id), vf_info->max_rate, - vf_info->min_rate); - return err; - } - } - - return 0; -} - -static int register_vf_msg_handler(struct spnic_nic_cfg *nic_cfg, u16 vf_id) -{ - int err; - - if (vf_id > nic_cfg->max_vfs) { - nic_err(nic_cfg->dev_hdl, "Register VF id %d exceed limit[0-%d]\n", - HW_VF_ID_TO_OS(vf_id), HW_VF_ID_TO_OS(nic_cfg->max_vfs)); - return -EFAULT; - } - - err = spnic_init_vf_config(nic_cfg, vf_id); - if (err) - return err; - - nic_cfg->vf_infos[HW_VF_ID_TO_OS(vf_id)].registered = true; - - return 0; -} - -static int unregister_vf_msg_handler(struct spnic_nic_cfg *nic_cfg, u16 vf_id) -{ - struct vf_data_storage *vf_info = - nic_cfg->vf_infos + HW_VF_ID_TO_OS(vf_id); - struct spnic_port_mac_set mac_info; - u16 out_size = sizeof(mac_info); - int err; - - if (vf_id > nic_cfg->max_vfs) - return -EFAULT; - - vf_info->registered = false; - - memset(&mac_info, 0, sizeof(mac_info)); - mac_info.func_id = sphw_glb_pf_vf_offset(nic_cfg->hwdev) + (u16)vf_id; - mac_info.vlan_id = vf_info->pf_vlan; - ether_addr_copy(mac_info.mac, vf_info->drv_mac_addr); - - if (vf_info->use_specified_mac || vf_info->pf_vlan) { - err = l2nic_msg_to_mgmt_sync(nic_cfg->hwdev, SPNIC_NIC_CMD_DEL_MAC, - &mac_info, sizeof(mac_info), &mac_info, &out_size); - if (err || mac_info.msg_head.status || !out_size) { - nic_err(nic_cfg->dev_hdl, "Failed to delete VF %d MAC, err: %d, status: 0x%x, out size: 0x%x\n", - HW_VF_ID_TO_OS(vf_id), err, - mac_info.msg_head.status, out_size); - return -EFAULT; - } - } - - memset(vf_info->drv_mac_addr, 0, ETH_ALEN); - - return 0; -} - -static int spnic_register_vf_msg_handler(struct spnic_nic_cfg *nic_cfg, - u16 vf_id, void *buf_in, u16 in_size, - void *buf_out, u16 *out_size) -{ - struct spnic_cmd_register_vf *register_vf = buf_in; - struct spnic_cmd_register_vf *register_info = buf_out; - int err; - - if (register_vf->op_register) - err = register_vf_msg_handler(nic_cfg, vf_id); - else - err = unregister_vf_msg_handler(nic_cfg, vf_id); - - if (err) - register_info->msg_head.status = EFAULT; - - *out_size = sizeof(*register_info); - - return 0; -} - -static int spnic_get_vf_cos_msg_handler(struct spnic_nic_cfg *nic_cfg, u16 vf_id, void *buf_in, - u16 in_size, void *buf_out, u16 *out_size) -{ - struct spnic_cmd_vf_dcb_state *dcb_state = buf_out; - - memcpy(&dcb_state->state, &nic_cfg->dcb_state, - sizeof(nic_cfg->dcb_state)); - - dcb_state->msg_head.status = 0; - *out_size = sizeof(*dcb_state); - return 0; -} - -static int spnic_get_vf_mac_msg_handler(struct spnic_nic_cfg *nic_cfg, u16 vf, - void *buf_in, u16 in_size, void *buf_out, u16 *out_size) -{ - struct vf_data_storage *vf_info = nic_cfg->vf_infos + HW_VF_ID_TO_OS(vf); - struct spnic_port_mac_set *mac_info = buf_out; - - int err; - - if (sphw_support_ovs(nic_cfg->hwdev, NULL)) { - err = l2nic_msg_to_mgmt_sync(nic_cfg->hwdev, SPNIC_NIC_CMD_GET_MAC, buf_in, - in_size, buf_out, out_size); - if (!err) { - if (is_zero_ether_addr(mac_info->mac)) - ether_addr_copy(mac_info->mac, vf_info->drv_mac_addr); - } - return err; - } - - ether_addr_copy(mac_info->mac, vf_info->drv_mac_addr); - mac_info->msg_head.status = 0; - *out_size = sizeof(*mac_info); - - return 0; -} - -static int spnic_set_vf_mac_msg_handler(struct spnic_nic_cfg *nic_cfg, u16 vf, - void *buf_in, u16 in_size, void *buf_out, u16 *out_size) -{ - struct vf_data_storage *vf_info = nic_cfg->vf_infos + HW_VF_ID_TO_OS(vf); - struct spnic_port_mac_set *mac_in = buf_in; - struct spnic_port_mac_set *mac_out = buf_out; - int err; - - if (vf_info->use_specified_mac && !vf_info->trust && - is_valid_ether_addr(mac_in->mac)) { - nic_warn(nic_cfg->dev_hdl, "PF has already set VF %d MAC address, and vf trust is off.\n", - HW_VF_ID_TO_OS(vf)); - mac_out->msg_head.status = SPNIC_PF_SET_VF_ALREADY; - *out_size = sizeof(*mac_out); - return 0; - } - - if (is_valid_ether_addr(mac_in->mac)) - mac_in->vlan_id = vf_info->pf_vlan; - - err = l2nic_msg_to_mgmt_sync(nic_cfg->hwdev, SPNIC_NIC_CMD_SET_MAC, - buf_in, in_size, buf_out, out_size); - if (err || !(*out_size)) { - nic_err(nic_cfg->dev_hdl, "Failed to set VF %d MAC address, err: %d,status: 0x%x, out size: 0x%x\n", - HW_VF_ID_TO_OS(vf), err, mac_out->msg_head.status, - *out_size); - return -EFAULT; - } - - if (is_valid_ether_addr(mac_in->mac) && !mac_out->msg_head.status) - ether_addr_copy(vf_info->drv_mac_addr, mac_in->mac); - - return err; -} - -static int spnic_del_vf_mac_msg_handler(struct spnic_nic_cfg *nic_cfg, u16 vf, - void *buf_in, u16 in_size, void *buf_out, u16 *out_size) -{ - struct vf_data_storage *vf_info = nic_cfg->vf_infos + HW_VF_ID_TO_OS(vf); - struct spnic_port_mac_set *mac_in = buf_in; - struct spnic_port_mac_set *mac_out = buf_out; - int err; - - if (vf_info->use_specified_mac && !vf_info->trust && - is_valid_ether_addr(mac_in->mac)) { - nic_warn(nic_cfg->dev_hdl, "PF has already set VF %d MAC address, and vf trust is off.\n", - HW_VF_ID_TO_OS(vf)); - mac_out->msg_head.status = SPNIC_PF_SET_VF_ALREADY; - *out_size = sizeof(*mac_out); - return 0; - } - - if (is_valid_ether_addr(mac_in->mac)) - mac_in->vlan_id = vf_info->pf_vlan; - - err = l2nic_msg_to_mgmt_sync(nic_cfg->hwdev, SPNIC_NIC_CMD_DEL_MAC, - buf_in, in_size, buf_out, out_size); - if (err || !(*out_size)) { - nic_err(nic_cfg->dev_hdl, "Failed to delete VF %d MAC, err: %d, status: 0x%x, out size: 0x%x\n", - HW_VF_ID_TO_OS(vf), err, mac_out->msg_head.status, - *out_size); - return -EFAULT; - } - - if (is_valid_ether_addr(mac_in->mac) && !mac_out->msg_head.status) - eth_zero_addr(vf_info->drv_mac_addr); - - return err; -} - -static int spnic_update_vf_mac_msg_handler(struct spnic_nic_cfg *nic_cfg, - u16 vf, void *buf_in, u16 in_size, - void *buf_out, u16 *out_size) -{ - struct vf_data_storage *vf_info = nic_cfg->vf_infos + HW_VF_ID_TO_OS(vf); - struct spnic_port_mac_update *mac_in = buf_in; - struct spnic_port_mac_update *mac_out = buf_out; - int err; - - if (!is_valid_ether_addr(mac_in->new_mac)) { - nic_err(nic_cfg->dev_hdl, "Update VF MAC is invalid.\n"); - return -EINVAL; - } - - if (vf_info->use_specified_mac && !vf_info->trust) { - nic_warn(nic_cfg->dev_hdl, "PF has already set VF %d MAC address, and vf trust is off.\n", - HW_VF_ID_TO_OS(vf)); - mac_out->msg_head.status = SPNIC_PF_SET_VF_ALREADY; - *out_size = sizeof(*mac_out); - return 0; - } - - mac_in->vlan_id = vf_info->pf_vlan; - err = l2nic_msg_to_mgmt_sync(nic_cfg->hwdev, SPNIC_NIC_CMD_UPDATE_MAC, - buf_in, in_size, buf_out, out_size); - if (err || !(*out_size)) { - nic_warn(nic_cfg->dev_hdl, "Failed to update VF %d MAC, err: %d,status: 0x%x, out size: 0x%x\n", - HW_VF_ID_TO_OS(vf), err, mac_out->msg_head.status, - *out_size); - return -EFAULT; - } - - if (!mac_out->msg_head.status) - ether_addr_copy(vf_info->drv_mac_addr, mac_in->new_mac); - - return err; -} - -const struct vf_msg_handler vf_cmd_handler[] = { - { - .cmd = SPNIC_NIC_CMD_VF_REGISTER, - .handler = spnic_register_vf_msg_handler, - }, - - { - .cmd = SPNIC_NIC_CMD_GET_MAC, - .handler = spnic_get_vf_mac_msg_handler, - }, - - { - .cmd = SPNIC_NIC_CMD_SET_MAC, - .handler = spnic_set_vf_mac_msg_handler, - }, - - { - .cmd = SPNIC_NIC_CMD_DEL_MAC, - .handler = spnic_del_vf_mac_msg_handler, - }, - - { - .cmd = SPNIC_NIC_CMD_UPDATE_MAC, - .handler = spnic_update_vf_mac_msg_handler, - }, - - { - .cmd = SPNIC_NIC_CMD_VF_COS, - .handler = spnic_get_vf_cos_msg_handler - }, -}; - -static int _l2nic_msg_to_mgmt_sync(void *hwdev, u16 cmd, void *buf_in, - u16 in_size, void *buf_out, u16 *out_size, u16 channel) -{ - u32 i, cmd_cnt = ARRAY_LEN(vf_cmd_handler); - bool cmd_to_pf = false; - - if (sphw_func_type(hwdev) == TYPE_VF) { - for (i = 0; i < cmd_cnt; i++) { - if (cmd == vf_cmd_handler[i].cmd) - cmd_to_pf = true; - } - } - - if (cmd_to_pf) - return sphw_mbox_to_pf(hwdev, SPHW_MOD_L2NIC, cmd, buf_in, in_size, buf_out, - out_size, 0, channel); - - return sphw_msg_to_mgmt_sync(hwdev, SPHW_MOD_L2NIC, cmd, buf_in, in_size, buf_out, - out_size, 0, channel); -} - -int l2nic_msg_to_mgmt_sync(void *hwdev, u16 cmd, void *buf_in, u16 in_size, - void *buf_out, u16 *out_size) -{ - return _l2nic_msg_to_mgmt_sync(hwdev, cmd, buf_in, in_size, buf_out, - out_size, SPHW_CHANNEL_NIC); -} - -int l2nic_msg_to_mgmt_sync_ch(void *hwdev, u16 cmd, void *buf_in, u16 in_size, - void *buf_out, u16 *out_size, u16 channel) -{ - return _l2nic_msg_to_mgmt_sync(hwdev, cmd, buf_in, in_size, buf_out, out_size, channel); -} - -/* pf/ppf handler mbox msg from vf */ -int spnic_pf_mbox_handler(void *hwdev, void *pri_handle, - u16 vf_id, u16 cmd, void *buf_in, u16 in_size, - void *buf_out, u16 *out_size) -{ - u32 index, cmd_size = ARRAY_LEN(vf_cmd_handler); - struct spnic_nic_cfg *nic_cfg = NULL; - - if (!hwdev) - return -EFAULT; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - - for (index = 0; index < cmd_size; index++) { - if (cmd == vf_cmd_handler[index].cmd) - return vf_cmd_handler[index].handler(nic_cfg, vf_id, buf_in, in_size, - buf_out, out_size); - } - - nic_warn(nic_cfg->dev_hdl, "NO handler for nic cmd(%u) received from vf id: %u\n", - cmd, vf_id); - - return -EINVAL; -} - -void spnic_notify_dcb_state_event(struct spnic_nic_cfg *nic_cfg, struct spnic_dcb_state *dcb_state) -{ - struct sphw_event_info event_info = {0}; - - /* This is 8 user priority to cos mapping relationships */ - sdk_info(nic_cfg->dev_hdl, "DCB %s, default cos %u, up2cos %u%u%u%u%u%u%u%u\n", - dcb_state->dcb_on ? "on" : "off", dcb_state->default_cos, - dcb_state->up_cos[0], dcb_state->up_cos[1], - dcb_state->up_cos[2], dcb_state->up_cos[3], - dcb_state->up_cos[4], dcb_state->up_cos[5], - dcb_state->up_cos[6], dcb_state->up_cos[7]); - - /* Saved in sdk for statefull module */ - spnic_save_dcb_state(nic_cfg, dcb_state); - - event_info.type = SPHW_EVENT_DCB_STATE_CHANGE; - memcpy(&event_info.dcb_state, dcb_state, sizeof(event_info.dcb_state)); - - sphw_event_callback(nic_cfg->hwdev, &event_info); -} - -void dcb_state_event(void *hwdev, void *buf_in, u16 in_size, - void *buf_out, u16 *out_size) -{ - struct spnic_cmd_vf_dcb_state *vf_dcb; - struct spnic_nic_cfg *nic_cfg; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - - vf_dcb = buf_in; - if (!vf_dcb) - return; - - spnic_notify_dcb_state_event(nic_cfg, &vf_dcb->state); -} - -void tx_pause_excp_event_handler(void *hwdev, void *buf_in, u16 in_size, - void *buf_out, u16 *out_size) -{ - struct nic_cmd_tx_pause_notice *excp_info = buf_in; - struct spnic_nic_cfg *nic_cfg = NULL; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - - if (in_size != sizeof(*excp_info)) { - nic_err(nic_cfg->dev_hdl, "Invalid in_size: %u, should be %ld\n", - in_size, sizeof(*excp_info)); - return; - } - - nic_warn(nic_cfg->dev_hdl, "Receive tx pause exception event, excp: %u, level: %u\n", - excp_info->tx_pause_except, excp_info->except_level); - - sphw_fault_event_report(hwdev, SPHW_FAULT_SRC_TX_PAUSE_EXCP, (u16)excp_info->except_level); -} - -struct nic_event_handler nic_cmd_handler[] = { - { - .cmd = SPNIC_NIC_CMD_VF_COS, - .handler = dcb_state_event, - }, - { - .cmd = SPNIC_NIC_CMD_TX_PAUSE_EXCP_NOTICE, - .handler = tx_pause_excp_event_handler, - }, -}; - -static void _event_handler(void *hwdev, u16 cmd, void *buf_in, u16 in_size, - void *buf_out, u16 *out_size) -{ - struct spnic_nic_cfg *nic_cfg = NULL; - u32 size = sizeof(nic_cmd_handler) / sizeof(struct nic_event_handler); - u32 i; - - if (!hwdev) - return; - - *out_size = 0; - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - - for (i = 0; i < size; i++) { - if (cmd == nic_cmd_handler[i].cmd) { - nic_cmd_handler[i].handler(hwdev, buf_in, in_size, buf_out, out_size); - break; - } - } - - /* can't find this event cmd */ - if (i == size) - sdk_warn(nic_cfg->dev_hdl, "Unsupported event cmd(%u) to process\n", - cmd); -} - -/* vf handler mbox msg from ppf/pf */ -/* vf link change event - * vf fault report event, TBD - */ -int spnic_vf_event_handler(void *hwdev, void *pri_handle, u16 cmd, void *buf_in, u16 in_size, - void *buf_out, u16 *out_size) -{ - _event_handler(hwdev, cmd, buf_in, in_size, buf_out, out_size); - return 0; -} - -/* pf/ppf handler mgmt cpu report nic event*/ -void spnic_pf_event_handler(void *hwdev, void *pri_handle, u16 cmd, void *buf_in, u16 in_size, - void *buf_out, u16 *out_size) -{ - _event_handler(hwdev, cmd, buf_in, in_size, buf_out, out_size); -} - -u8 spnic_nic_sw_aeqe_handler(void *hwdev, u8 event, u8 *data) -{ - struct spnic_nic_cfg *nic_cfg = NULL; - - if (!hwdev) - return 0; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - - nic_err(nic_cfg->dev_hdl, "Received nic ucode aeq event type: 0x%x, data: 0x%llx\n", - event, *((u64 *)data)); - - return 0; -} diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_nic_io.c b/drivers/net/ethernet/ramaxel/spnic/spnic_nic_io.c deleted file mode 100644 index 3f1fb1381844ca73bc37b21c3ee3d7d68232a355..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/spnic_nic_io.c +++ /dev/null @@ -1,1123 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt - -#include -#include -#include -#include - -#include "sphw_crm.h" -#include "sphw_hw.h" -#include "sphw_common.h" -#include "spnic_nic_qp.h" -#include "spnic_nic_io.h" -#include "spnic_nic_cfg.h" -#include "spnic_nic.h" -#include "spnic_nic_cmd.h" - -#define SPNIC_DEAULT_TX_CI_PENDING_LIMIT 0 -#define SPNIC_DEAULT_TX_CI_COALESCING_TIME 0 -#define SPNIC_DEAULT_DROP_THD_ON 0xFFFF -#define SPNIC_DEAULT_DROP_THD_OFF 0 - -static unsigned char tx_pending_limit = SPNIC_DEAULT_TX_CI_PENDING_LIMIT; -module_param(tx_pending_limit, byte, 0444); -MODULE_PARM_DESC(tx_pending_limit, "TX CI coalescing parameter pending_limit (default=0)"); - -static unsigned char tx_coalescing_time = SPNIC_DEAULT_TX_CI_COALESCING_TIME; -module_param(tx_coalescing_time, byte, 0444); -MODULE_PARM_DESC(tx_coalescing_time, "TX CI coalescing parameter coalescing_time (default=0)"); - -static unsigned char rq_wqe_type = SPNIC_NORMAL_RQ_WQE; -module_param(rq_wqe_type, byte, 0444); -MODULE_PARM_DESC(rq_wqe_type, "RQ WQE type 0-8Bytes, 1-16Bytes, 2-32Bytes (default=2)"); - -static u32 tx_drop_thd_on = SPNIC_DEAULT_DROP_THD_ON; -module_param(tx_drop_thd_on, uint, 0644); -MODULE_PARM_DESC(tx_drop_thd_on, "TX parameter drop_thd_on (default=0xffff)"); - -static u32 tx_drop_thd_off = SPNIC_DEAULT_DROP_THD_OFF; -module_param(tx_drop_thd_off, uint, 0644); -MODULE_PARM_DESC(tx_drop_thd_off, "TX parameter drop_thd_off (default=0)"); -/* performance: ci addr RTE_CACHE_SIZE(64B) alignment */ -#define SPNIC_CI_Q_ADDR_SIZE 64 - -#define CI_TABLE_SIZE(num_qps, pg_sz) (ALIGN((num_qps) * SPNIC_CI_Q_ADDR_SIZE, pg_sz)) - -#define SPNIC_CI_VADDR(base_addr, q_id) ((u8 *)(base_addr) + (q_id) * SPNIC_CI_Q_ADDR_SIZE) - -#define SPNIC_CI_PADDR(base_paddr, q_id) ((base_paddr) + (q_id) * SPNIC_CI_Q_ADDR_SIZE) - -#define WQ_PREFETCH_MAX 4 -#define WQ_PREFETCH_MIN 1 -#define WQ_PREFETCH_THRESHOLD 256 - -#define SPNIC_Q_CTXT_MAX 31 /* (2048 - 8) / 64 */ - -enum spnic_qp_ctxt_type { - SPNIC_QP_CTXT_TYPE_SQ, - SPNIC_QP_CTXT_TYPE_RQ, -}; - -struct spnic_qp_ctxt_header { - u16 num_queues; - u16 queue_type; - u16 start_qid; - u16 rsvd; -}; - -struct spnic_sq_ctxt { - u32 ci_pi; - u32 drop_mode_sp; - u32 wq_pfn_hi_owner; - u32 wq_pfn_lo; - - u32 rsvd0; - u32 pkt_drop_thd; - u32 global_sq_id; - u32 vlan_ceq_attr; - - u32 pref_cache; - u32 pref_ci_owner; - u32 pref_wq_pfn_hi_ci; - u32 pref_wq_pfn_lo; - - u32 rsvd8; - u32 rsvd9; - u32 wq_block_pfn_hi; - u32 wq_block_pfn_lo; -}; - -struct spnic_rq_ctxt { - u32 ci_pi; - u32 ceq_attr; - u32 wq_pfn_hi_type_owner; - u32 wq_pfn_lo; - - u32 rsvd[3]; - u32 cqe_sge_len; - - u32 pref_cache; - u32 pref_ci_owner; - u32 pref_wq_pfn_hi_ci; - u32 pref_wq_pfn_lo; - - u32 pi_paddr_hi; - u32 pi_paddr_lo; - u32 wq_block_pfn_hi; - u32 wq_block_pfn_lo; -}; - -struct spnic_sq_ctxt_block { - struct spnic_qp_ctxt_header cmdq_hdr; - struct spnic_sq_ctxt sq_ctxt[SPNIC_Q_CTXT_MAX]; -}; - -struct spnic_rq_ctxt_block { - struct spnic_qp_ctxt_header cmdq_hdr; - struct spnic_rq_ctxt rq_ctxt[SPNIC_Q_CTXT_MAX]; -}; - -struct spnic_clean_queue_ctxt { - struct spnic_qp_ctxt_header cmdq_hdr; - u32 rsvd; -}; - -#define SQ_CTXT_SIZE(num_sqs) ((u16)(sizeof(struct spnic_qp_ctxt_header) + \ - (num_sqs) * sizeof(struct spnic_sq_ctxt))) - -#define RQ_CTXT_SIZE(num_rqs) ((u16)(sizeof(struct spnic_qp_ctxt_header) + \ - (num_rqs) * sizeof(struct spnic_rq_ctxt))) - -#define CI_IDX_HIGH_SHIFH 12 - -#define CI_HIGN_IDX(val) ((val) >> CI_IDX_HIGH_SHIFH) - -#define SQ_CTXT_PI_IDX_SHIFT 0 -#define SQ_CTXT_CI_IDX_SHIFT 16 - -#define SQ_CTXT_PI_IDX_MASK 0xFFFFU -#define SQ_CTXT_CI_IDX_MASK 0xFFFFU - -#define SQ_CTXT_CI_PI_SET(val, member) (((val) & SQ_CTXT_##member##_MASK) << \ - SQ_CTXT_##member##_SHIFT) - -#define SQ_CTXT_MODE_SP_FLAG_SHIFT 0 -#define SQ_CTXT_MODE_PKT_DROP_SHIFT 1 - -#define SQ_CTXT_MODE_SP_FLAG_MASK 0x1U -#define SQ_CTXT_MODE_PKT_DROP_MASK 0x1U - -#define SQ_CTXT_MODE_SET(val, member) (((val) & SQ_CTXT_MODE_##member##_MASK) << \ - SQ_CTXT_MODE_##member##_SHIFT) - -#define SQ_CTXT_WQ_PAGE_HI_PFN_SHIFT 0 -#define SQ_CTXT_WQ_PAGE_OWNER_SHIFT 23 - -#define SQ_CTXT_WQ_PAGE_HI_PFN_MASK 0xFFFFFU -#define SQ_CTXT_WQ_PAGE_OWNER_MASK 0x1U - -#define SQ_CTXT_WQ_PAGE_SET(val, member) (((val) & SQ_CTXT_WQ_PAGE_##member##_MASK) << \ - SQ_CTXT_WQ_PAGE_##member##_SHIFT) - -#define SQ_CTXT_PKT_DROP_THD_ON_SHIFT 0 -#define SQ_CTXT_PKT_DROP_THD_OFF_SHIFT 16 - -#define SQ_CTXT_PKT_DROP_THD_ON_MASK 0xFFFFU -#define SQ_CTXT_PKT_DROP_THD_OFF_MASK 0xFFFFU - -#define SQ_CTXT_PKT_DROP_THD_SET(val, member) (((val) & SQ_CTXT_PKT_DROP_##member##_MASK) << \ - SQ_CTXT_PKT_DROP_##member##_SHIFT) - -#define SQ_CTXT_GLOBAL_SQ_ID_SHIFT 0 - -#define SQ_CTXT_GLOBAL_SQ_ID_MASK 0x1FFFU - -#define SQ_CTXT_GLOBAL_QUEUE_ID_SET(val, member) (((val) & SQ_CTXT_##member##_MASK) << \ - SQ_CTXT_##member##_SHIFT) - -#define SQ_CTXT_VLAN_TAG_SHIFT 0 -#define SQ_CTXT_VLAN_TYPE_SEL_SHIFT 16 -#define SQ_CTXT_VLAN_INSERT_MODE_SHIFT 19 -#define SQ_CTXT_VLAN_CEQ_EN_SHIFT 23 - -#define SQ_CTXT_VLAN_TAG_MASK 0xFFFFU -#define SQ_CTXT_VLAN_TYPE_SEL_MASK 0x7U -#define SQ_CTXT_VLAN_INSERT_MODE_MASK 0x3U -#define SQ_CTXT_VLAN_CEQ_EN_MASK 0x1U - -#define SQ_CTXT_VLAN_CEQ_SET(val, member) (((val) & SQ_CTXT_VLAN_##member##_MASK) << \ - SQ_CTXT_VLAN_##member##_SHIFT) - -#define SQ_CTXT_PREF_CACHE_THRESHOLD_SHIFT 0 -#define SQ_CTXT_PREF_CACHE_MAX_SHIFT 14 -#define SQ_CTXT_PREF_CACHE_MIN_SHIFT 25 - -#define SQ_CTXT_PREF_CACHE_THRESHOLD_MASK 0x3FFFU -#define SQ_CTXT_PREF_CACHE_MAX_MASK 0x7FFU -#define SQ_CTXT_PREF_CACHE_MIN_MASK 0x7FU - -#define SQ_CTXT_PREF_CI_HI_SHIFT 0 -#define SQ_CTXT_PREF_OWNER_SHIFT 4 - -#define SQ_CTXT_PREF_CI_HI_MASK 0xFU -#define SQ_CTXT_PREF_OWNER_MASK 0x1U - -#define SQ_CTXT_PREF_WQ_PFN_HI_SHIFT 0 -#define SQ_CTXT_PREF_CI_LOW_SHIFT 20 - -#define SQ_CTXT_PREF_WQ_PFN_HI_MASK 0xFFFFFU -#define SQ_CTXT_PREF_CI_LOW_MASK 0xFFFU - -#define SQ_CTXT_PREF_SET(val, member) (((val) & SQ_CTXT_PREF_##member##_MASK) << \ - SQ_CTXT_PREF_##member##_SHIFT) - -#define SQ_CTXT_WQ_BLOCK_PFN_HI_SHIFT 0 - -#define SQ_CTXT_WQ_BLOCK_PFN_HI_MASK 0x7FFFFFU - -#define SQ_CTXT_WQ_BLOCK_SET(val, member) (((val) & SQ_CTXT_WQ_BLOCK_##member##_MASK) << \ - SQ_CTXT_WQ_BLOCK_##member##_SHIFT) - -#define RQ_CTXT_PI_IDX_SHIFT 0 -#define RQ_CTXT_CI_IDX_SHIFT 16 - -#define RQ_CTXT_PI_IDX_MASK 0xFFFFU -#define RQ_CTXT_CI_IDX_MASK 0xFFFFU - -#define RQ_CTXT_CI_PI_SET(val, member) (((val) & RQ_CTXT_##member##_MASK) << \ - RQ_CTXT_##member##_SHIFT) - -#define RQ_CTXT_CEQ_ATTR_INTR_SHIFT 21 -#define RQ_CTXT_CEQ_ATTR_EN_SHIFT 31 - -#define RQ_CTXT_CEQ_ATTR_INTR_MASK 0x3FFU -#define RQ_CTXT_CEQ_ATTR_EN_MASK 0x1U - -#define RQ_CTXT_CEQ_ATTR_SET(val, member) (((val) & RQ_CTXT_CEQ_ATTR_##member##_MASK) << \ - RQ_CTXT_CEQ_ATTR_##member##_SHIFT) - -#define RQ_CTXT_WQ_PAGE_HI_PFN_SHIFT 0 -#define RQ_CTXT_WQ_PAGE_WQE_TYPE_SHIFT 28 -#define RQ_CTXT_WQ_PAGE_OWNER_SHIFT 31 - -#define RQ_CTXT_WQ_PAGE_HI_PFN_MASK 0xFFFFFU -#define RQ_CTXT_WQ_PAGE_WQE_TYPE_MASK 0x3U -#define RQ_CTXT_WQ_PAGE_OWNER_MASK 0x1U - -#define RQ_CTXT_WQ_PAGE_SET(val, member) (((val) & RQ_CTXT_WQ_PAGE_##member##_MASK) << \ - RQ_CTXT_WQ_PAGE_##member##_SHIFT) - -#define RQ_CTXT_CQE_LEN_SHIFT 28 - -#define RQ_CTXT_CQE_LEN_MASK 0x3U - -#define RQ_CTXT_CQE_LEN_SET(val, member) (((val) & RQ_CTXT_##member##_MASK) << \ - RQ_CTXT_##member##_SHIFT) - -#define RQ_CTXT_PREF_CACHE_THRESHOLD_SHIFT 0 -#define RQ_CTXT_PREF_CACHE_MAX_SHIFT 14 -#define RQ_CTXT_PREF_CACHE_MIN_SHIFT 25 - -#define RQ_CTXT_PREF_CACHE_THRESHOLD_MASK 0x3FFFU -#define RQ_CTXT_PREF_CACHE_MAX_MASK 0x7FFU -#define RQ_CTXT_PREF_CACHE_MIN_MASK 0x7FU - -#define RQ_CTXT_PREF_CI_HI_SHIFT 0 -#define RQ_CTXT_PREF_OWNER_SHIFT 4 - -#define RQ_CTXT_PREF_CI_HI_MASK 0xFU -#define RQ_CTXT_PREF_OWNER_MASK 0x1U - -#define RQ_CTXT_PREF_WQ_PFN_HI_SHIFT 0 -#define RQ_CTXT_PREF_CI_LOW_SHIFT 20 - -#define RQ_CTXT_PREF_WQ_PFN_HI_MASK 0xFFFFFU -#define RQ_CTXT_PREF_CI_LOW_MASK 0xFFFU - -#define RQ_CTXT_PREF_SET(val, member) (((val) & RQ_CTXT_PREF_##member##_MASK) << \ - RQ_CTXT_PREF_##member##_SHIFT) - -#define RQ_CTXT_WQ_BLOCK_PFN_HI_SHIFT 0 - -#define RQ_CTXT_WQ_BLOCK_PFN_HI_MASK 0x7FFFFFU - -#define RQ_CTXT_WQ_BLOCK_SET(val, member) (((val) & RQ_CTXT_WQ_BLOCK_##member##_MASK) << \ - RQ_CTXT_WQ_BLOCK_##member##_SHIFT) - -#define SIZE_16BYTES(size) (ALIGN((size), 16) >> 4) - -#define WQ_PAGE_PFN_SHIFT 12 -#define WQ_BLOCK_PFN_SHIFT 9 - -#define WQ_PAGE_PFN(page_addr) ((page_addr) >> WQ_PAGE_PFN_SHIFT) -#define WQ_BLOCK_PFN(page_addr) ((page_addr) >> WQ_BLOCK_PFN_SHIFT) - -/* sq and rq */ -#define TOTAL_DB_NUM(num_qps) ((u16)(2 * (num_qps))) - -int spnic_create_sq(struct spnic_nic_cfg *nic_cfg, struct spnic_io_queue *sq, - u16 q_id, u32 sq_depth, u16 sq_msix_idx) -{ - int err; - - /* sq used & hardware request init 1*/ - sq->owner = 1; - - sq->q_id = q_id; - sq->msix_entry_idx = sq_msix_idx; - - err = sphw_wq_create(nic_cfg->hwdev, &sq->wq, sq_depth, (u16)BIT(SPNIC_SQ_WQEBB_SHIFT)); - if (err) { - sdk_err(nic_cfg->dev_hdl, "Failed to create tx queue(%u) wq\n", - q_id); - return err; - } - - return 0; -} - -void spnic_destroy_sq(struct spnic_nic_cfg *nic_cfg, struct spnic_io_queue *sq) -{ - sphw_wq_destroy(&sq->wq); -} - -int spnic_create_rq(struct spnic_nic_cfg *nic_cfg, struct spnic_io_queue *rq, - u16 q_id, u32 rq_depth, u16 rq_msix_idx) -{ - int err; - - rq->wqe_type = rq_wqe_type; - rq->q_id = q_id; - rq->msix_entry_idx = rq_msix_idx; - - err = sphw_wq_create(nic_cfg->hwdev, &rq->wq, rq_depth, - (u16)BIT(SPNIC_RQ_WQEBB_SHIFT + rq_wqe_type)); - if (err) { - sdk_err(nic_cfg->dev_hdl, "Failed to create rx queue(%u) wq\n", - q_id); - return err; - } - - rq->rx.pi_virt_addr = dma_alloc_coherent(nic_cfg->dev_hdl, PAGE_SIZE, - &rq->rx.pi_dma_addr, GFP_KERNEL); - if (!rq->rx.pi_virt_addr) { - sphw_wq_destroy(&rq->wq); - nic_err(nic_cfg->dev_hdl, "Failed to allocate rq pi virt addr\n"); - return -ENOMEM; - } - - return 0; -} - -void spnic_destroy_rq(struct spnic_nic_cfg *nic_cfg, struct spnic_io_queue *rq) -{ - dma_free_coherent(nic_cfg->dev_hdl, PAGE_SIZE, rq->rx.pi_virt_addr, - rq->rx.pi_dma_addr); - - sphw_wq_destroy(&rq->wq); -} - -static int create_qp(struct spnic_nic_cfg *nic_cfg, struct spnic_io_queue *sq, - struct spnic_io_queue *rq, u16 q_id, u32 sq_depth, - u32 rq_depth, u16 qp_msix_idx) -{ - int err; - - err = spnic_create_sq(nic_cfg, sq, q_id, sq_depth, qp_msix_idx); - if (err) { - nic_err(nic_cfg->dev_hdl, "Failed to create sq, qid: %u\n", - q_id); - return err; - } - - err = spnic_create_rq(nic_cfg, rq, q_id, rq_depth, qp_msix_idx); - if (err) { - nic_err(nic_cfg->dev_hdl, "Failed to create rq, qid: %u\n", - q_id); - goto create_rq_err; - } - - return 0; - -create_rq_err: - spnic_destroy_sq(nic_cfg->hwdev, sq); - - return err; -} - -void destroy_qp(struct spnic_nic_cfg *nic_cfg, struct spnic_io_queue *sq, - struct spnic_io_queue *rq) -{ - spnic_destroy_sq(nic_cfg, sq); - spnic_destroy_rq(nic_cfg, rq); -} - -/* try to alloc the expect number of doorbell, and return the actual number - * of doorbell. - */ -static int spnic_doorbell_change(struct spnic_nic_cfg *nic_cfg, u16 dst_num_db) -{ - void __iomem *db_addr = NULL; - u16 cur_db_num = nic_cfg->allocated_num_db; - u16 db_num_gap, idx, i, cur_db_alloc; - int err; - - if (dst_num_db > nic_cfg->allocated_num_db) { - db_num_gap = dst_num_db - nic_cfg->allocated_num_db; - for (idx = 0; idx < db_num_gap; idx++) { - /* we don't use direct wqe for sq */ - err = sphw_alloc_db_addr(nic_cfg->hwdev, &db_addr, NULL); - if (err) { - nic_err(nic_cfg->dev_hdl, "Failed to alloc sq doorbell addr\n"); - goto alloc_db_err; - } - nic_cfg->db_addr[cur_db_num + idx] = db_addr; - } - - nic_cfg->allocated_num_db = dst_num_db; - } else if (dst_num_db < nic_cfg->allocated_num_db) { - db_num_gap = nic_cfg->allocated_num_db - dst_num_db; - for (idx = 0; idx < db_num_gap; idx++) { - cur_db_alloc = (cur_db_num - idx) - 1; - sphw_free_db_addr(nic_cfg->hwdev, nic_cfg->db_addr[cur_db_alloc], NULL); - nic_cfg->db_addr[cur_db_alloc] = NULL; - } - - nic_cfg->allocated_num_db = dst_num_db; - } - - return 0; - -alloc_db_err: - for (i = 0; i < idx; i++) { - sphw_free_db_addr(nic_cfg->hwdev, nic_cfg->db_addr[cur_db_num + i], NULL); - nic_cfg->db_addr[cur_db_num + i] = NULL; - } - - return -EFAULT; -} - -int spnic_init_nicio_res(void *hwdev) -{ - struct spnic_nic_cfg *nic_cfg = NULL; - - if (!hwdev) - return -EINVAL; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - if (!nic_cfg) { - pr_err("Failed to get nic service adapter\n"); - return -EFAULT; - } - - nic_cfg->max_qps = sphw_func_max_qnum(hwdev); - - nic_cfg->allocated_num_db = 0; - nic_cfg->db_addr = kcalloc(TOTAL_DB_NUM(nic_cfg->max_qps), - sizeof(*nic_cfg->db_addr), GFP_KERNEL); - if (!nic_cfg->db_addr) { - nic_err(nic_cfg->dev_hdl, "Failed to alloc db addr array\n"); - return -ENOMEM; - } - - nic_cfg->ci_vaddr_base = - dma_alloc_coherent(nic_cfg->dev_hdl, CI_TABLE_SIZE(nic_cfg->max_qps, PAGE_SIZE), - &nic_cfg->ci_dma_base, GFP_KERNEL); - if (!nic_cfg->ci_vaddr_base) { - kfree(nic_cfg->db_addr); - nic_err(nic_cfg->dev_hdl, "Failed to allocate ci area\n"); - return -ENOMEM; - } - - return 0; -} - -void spnic_deinit_nicio_res(void *hwdev) -{ - struct spnic_nic_cfg *nic_cfg = NULL; - - if (!hwdev) - return; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - if (!nic_cfg) { - pr_err("Failed to get nic service adapter\n"); - return; - } - - dma_free_coherent(nic_cfg->dev_hdl, CI_TABLE_SIZE(nic_cfg->max_qps, PAGE_SIZE), - nic_cfg->ci_vaddr_base, nic_cfg->ci_dma_base); - /* free all doorbell */ - spnic_doorbell_change(nic_cfg, 0); - kfree(nic_cfg->db_addr); -} - -int spnic_alloc_qps(void *hwdev, struct irq_info *qps_msix_arry, - struct spnic_dyna_qp_params *qp_params) -{ - struct spnic_io_queue *sqs = NULL; - struct spnic_io_queue *rqs = NULL; - struct spnic_nic_cfg *nic_cfg = NULL; - u16 q_id, i, cur_allocated_db, num_qps; - int err; - - if (!hwdev || !qps_msix_arry || !qp_params) - return -EINVAL; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - if (!nic_cfg) { - pr_err("Failed to get nic service adapter\n"); - return -EFAULT; - } - - if (qp_params->num_qps > nic_cfg->max_qps) - return -EINVAL; - - num_qps = qp_params->num_qps; - - cur_allocated_db = nic_cfg->allocated_num_db; - if (cur_allocated_db < TOTAL_DB_NUM(num_qps)) { - err = spnic_doorbell_change(nic_cfg, TOTAL_DB_NUM(num_qps)); - if (err) - return err; - } - - sqs = kcalloc(num_qps, sizeof(*sqs), GFP_KERNEL); - if (!sqs) { - nic_err(nic_cfg->dev_hdl, "Failed to allocate sq\n"); - err = -ENOMEM; - goto alloc_sqs_err; - } - - rqs = kcalloc(num_qps, sizeof(*rqs), GFP_KERNEL); - if (!rqs) { - nic_err(nic_cfg->dev_hdl, "Failed to allocate rq\n"); - err = -ENOMEM; - goto alloc_rqs_err; - } - - for (q_id = 0; q_id < num_qps; q_id++) { - err = create_qp(nic_cfg, &sqs[q_id], &rqs[q_id], q_id, - qp_params->sq_depth, qp_params->rq_depth, - qps_msix_arry[q_id].msix_entry_idx); - if (err) { - nic_err(nic_cfg->dev_hdl, "Failed to allocate qp %u, err: %d\n", - q_id, err); - goto create_qp_err; - } - } - - qp_params->sqs = sqs; - qp_params->rqs = rqs; - - return 0; - -create_qp_err: - for (i = 0; i < q_id; i++) - destroy_qp(nic_cfg, &sqs[i], &rqs[i]); - - kfree(rqs); - -alloc_rqs_err: - kfree(sqs); - -alloc_sqs_err: - /* Only release the newly added doorbell resource, - * the old resource is still in use - */ - spnic_doorbell_change(nic_cfg, cur_allocated_db); - - return err; -} - -void spnic_free_qps(void *hwdev, struct spnic_dyna_qp_params *qp_params) -{ - struct spnic_nic_cfg *nic_cfg = NULL; - u16 q_id; - - if (!hwdev || !qp_params) - return; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - if (!nic_cfg) { - pr_err("Failed to get nic service adapter\n"); - return; - } - - for (q_id = 0; q_id < qp_params->num_qps; q_id++) - destroy_qp(nic_cfg, &qp_params->sqs[q_id], - &qp_params->rqs[q_id]); - - kfree(qp_params->sqs); - kfree(qp_params->rqs); -} - -void init_qps_info(struct spnic_nic_cfg *nic_cfg, struct spnic_dyna_qp_params *qp_params) -{ - struct spnic_io_queue *sqs = qp_params->sqs; - struct spnic_io_queue *rqs = qp_params->rqs; - u16 q_id; - - nic_cfg->num_qps = qp_params->num_qps; - nic_cfg->sq = qp_params->sqs; - nic_cfg->rq = qp_params->rqs; - for (q_id = 0; q_id < nic_cfg->num_qps; q_id++) { - sqs[q_id].tx.cons_idx_addr = SPNIC_CI_VADDR(nic_cfg->ci_vaddr_base, q_id); - /* clear ci value */ - *(u16 *)sqs[q_id].tx.cons_idx_addr = 0; - sqs[q_id].db_addr = nic_cfg->db_addr[q_id]; - - /* The first num_qps doorbell is used by sq */ - rqs[q_id].db_addr = nic_cfg->db_addr[nic_cfg->num_qps + q_id]; - } -} - -int spnic_init_qps(void *hwdev, struct spnic_dyna_qp_params *qp_params) -{ - struct spnic_nic_cfg *nic_cfg = NULL; - - if (!hwdev || !qp_params) - return -EINVAL; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - if (!nic_cfg) { - pr_err("Failed to get nic service adapter\n"); - return -EFAULT; - } - - if (nic_cfg->allocated_num_db > TOTAL_DB_NUM(qp_params->num_qps)) - spnic_doorbell_change(nic_cfg, TOTAL_DB_NUM(qp_params->num_qps)); - - init_qps_info(nic_cfg, qp_params); - - return spnic_init_qp_ctxts(hwdev); -} - -void spnic_deinit_qps(void *hwdev, struct spnic_dyna_qp_params *qp_params) -{ - struct spnic_nic_cfg *nic_cfg = NULL; - - if (!hwdev || !qp_params) - return; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - if (!nic_cfg) { - pr_err("Failed to get nic service adapter\n"); - return; - } - - qp_params->sqs = nic_cfg->sq; - qp_params->rqs = nic_cfg->rq; - qp_params->num_qps = nic_cfg->num_qps; - - spnic_free_qp_ctxts(hwdev); -} - -int spnic_create_qps(void *hwdev, u16 num_qp, u32 sq_depth, u32 rq_depth, - struct irq_info *qps_msix_arry) -{ - struct spnic_nic_cfg *nic_cfg = NULL; - struct spnic_dyna_qp_params qp_params = {0}; - int err; - - if (!hwdev || !qps_msix_arry) - return -EFAULT; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - if (!nic_cfg) { - pr_err("Failed to get nic service adapter\n"); - return -EFAULT; - } - - err = spnic_init_nicio_res(hwdev); - if (err) - return err; - - qp_params.num_qps = num_qp; - qp_params.sq_depth = sq_depth; - qp_params.rq_depth = rq_depth; - err = spnic_alloc_qps(hwdev, qps_msix_arry, &qp_params); - if (err) { - spnic_deinit_nicio_res(hwdev); - nic_err(nic_cfg->dev_hdl, "Failed to allocate qps, err: %d\n", err); - return err; - } - - init_qps_info(nic_cfg, &qp_params); - - return 0; -} - -void spnic_destroy_qps(void *hwdev) -{ - struct spnic_nic_cfg *nic_cfg = NULL; - struct spnic_dyna_qp_params qp_params = {0}; - - if (!hwdev) - return; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - if (!nic_cfg) - return; - - spnic_deinit_qps(hwdev, &qp_params); - spnic_free_qps(hwdev, &qp_params); - spnic_deinit_nicio_res(hwdev); -} - -void *spnic_get_nic_queue(void *hwdev, u16 q_id, enum spnic_queue_type q_type) -{ - struct spnic_nic_cfg *nic_cfg = NULL; - - if (!hwdev || q_type >= SPNIC_MAX_QUEUE_TYPE) - return NULL; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - if (!nic_cfg) - return NULL; - - return ((q_type == SPNIC_SQ) ? &nic_cfg->sq[q_id] : &nic_cfg->rq[q_id]); -} - -void spnic_qp_prepare_cmdq_header(struct spnic_qp_ctxt_header *qp_ctxt_hdr, - enum spnic_qp_ctxt_type ctxt_type, - u16 num_queues, u16 q_id) -{ - qp_ctxt_hdr->queue_type = ctxt_type; - qp_ctxt_hdr->num_queues = num_queues; - qp_ctxt_hdr->start_qid = q_id; - qp_ctxt_hdr->rsvd = 0; - - sphw_cpu_to_be32(qp_ctxt_hdr, sizeof(*qp_ctxt_hdr)); -} - -void spnic_sq_prepare_ctxt(struct spnic_io_queue *sq, u16 sq_id, struct spnic_sq_ctxt *sq_ctxt) -{ - u64 wq_page_addr; - u64 wq_page_pfn, wq_block_pfn; - u32 wq_page_pfn_hi, wq_page_pfn_lo; - u32 wq_block_pfn_hi, wq_block_pfn_lo; - u16 pi_start, ci_start; - - ci_start = spnic_get_sq_local_ci(sq); - pi_start = spnic_get_sq_local_pi(sq); - - wq_page_addr = sphw_wq_get_first_wqe_page_addr(&sq->wq); - - wq_page_pfn = WQ_PAGE_PFN(wq_page_addr); - wq_page_pfn_hi = upper_32_bits(wq_page_pfn); - wq_page_pfn_lo = lower_32_bits(wq_page_pfn); - - wq_block_pfn = WQ_BLOCK_PFN(sq->wq.wq_block_paddr); - wq_block_pfn_hi = upper_32_bits(wq_block_pfn); - wq_block_pfn_lo = lower_32_bits(wq_block_pfn); - - sq_ctxt->ci_pi = - SQ_CTXT_CI_PI_SET(ci_start, CI_IDX) | - SQ_CTXT_CI_PI_SET(pi_start, PI_IDX); - - sq_ctxt->drop_mode_sp = - SQ_CTXT_MODE_SET(0, SP_FLAG) | - SQ_CTXT_MODE_SET(0, PKT_DROP); - - sq_ctxt->wq_pfn_hi_owner = - SQ_CTXT_WQ_PAGE_SET(wq_page_pfn_hi, HI_PFN) | - SQ_CTXT_WQ_PAGE_SET(1, OWNER); - - sq_ctxt->wq_pfn_lo = wq_page_pfn_lo; - - /* TO DO */ - sq_ctxt->pkt_drop_thd = - SQ_CTXT_PKT_DROP_THD_SET(tx_drop_thd_on, THD_ON) | - SQ_CTXT_PKT_DROP_THD_SET(tx_drop_thd_off, THD_OFF); - - sq_ctxt->global_sq_id = - SQ_CTXT_GLOBAL_QUEUE_ID_SET(sq_id, GLOBAL_SQ_ID); - - /* enable insert c-vlan in default */ - sq_ctxt->vlan_ceq_attr = - SQ_CTXT_VLAN_CEQ_SET(0, CEQ_EN) | - SQ_CTXT_VLAN_CEQ_SET(1, INSERT_MODE); - - sq_ctxt->rsvd0 = 0; - - sq_ctxt->pref_cache = - SQ_CTXT_PREF_SET(WQ_PREFETCH_MIN, CACHE_MIN) | - SQ_CTXT_PREF_SET(WQ_PREFETCH_MAX, CACHE_MAX) | - SQ_CTXT_PREF_SET(WQ_PREFETCH_THRESHOLD, CACHE_THRESHOLD); - - sq_ctxt->pref_ci_owner = - SQ_CTXT_PREF_SET(CI_HIGN_IDX(ci_start), CI_HI) | - SQ_CTXT_PREF_SET(1, OWNER); - - sq_ctxt->pref_wq_pfn_hi_ci = - SQ_CTXT_PREF_SET(ci_start, CI_LOW) | - SQ_CTXT_PREF_SET(wq_page_pfn_hi, WQ_PFN_HI); - - sq_ctxt->pref_wq_pfn_lo = wq_page_pfn_lo; - - sq_ctxt->wq_block_pfn_hi = SQ_CTXT_WQ_BLOCK_SET(wq_block_pfn_hi, PFN_HI); - - sq_ctxt->wq_block_pfn_lo = wq_block_pfn_lo; - - sphw_cpu_to_be32(sq_ctxt, sizeof(*sq_ctxt)); -} - -void spnic_rq_prepare_ctxt(struct spnic_io_queue *rq, struct spnic_rq_ctxt *rq_ctxt) -{ - u64 wq_page_addr; - u64 wq_page_pfn, wq_block_pfn; - u32 wq_page_pfn_hi, wq_page_pfn_lo; - u32 wq_block_pfn_hi, wq_block_pfn_lo; - u16 pi_start, ci_start; - u16 wqe_type = rq->wqe_type; - - /* RQ depth is in unit of 8Bytes */ - ci_start = (u16)((u32)spnic_get_rq_local_ci(rq) << wqe_type); - pi_start = (u16)((u32)spnic_get_rq_local_pi(rq) << wqe_type); - - wq_page_addr = sphw_wq_get_first_wqe_page_addr(&rq->wq); - - wq_page_pfn = WQ_PAGE_PFN(wq_page_addr); - wq_page_pfn_hi = upper_32_bits(wq_page_pfn); - wq_page_pfn_lo = lower_32_bits(wq_page_pfn); - - wq_block_pfn = WQ_BLOCK_PFN(rq->wq.wq_block_paddr); - wq_block_pfn_hi = upper_32_bits(wq_block_pfn); - wq_block_pfn_lo = lower_32_bits(wq_block_pfn); - - rq_ctxt->ci_pi = - RQ_CTXT_CI_PI_SET(ci_start, CI_IDX) | - RQ_CTXT_CI_PI_SET(pi_start, PI_IDX); - - rq_ctxt->ceq_attr = RQ_CTXT_CEQ_ATTR_SET(0, EN) | - RQ_CTXT_CEQ_ATTR_SET(rq->msix_entry_idx, INTR); - - rq_ctxt->wq_pfn_hi_type_owner = - RQ_CTXT_WQ_PAGE_SET(wq_page_pfn_hi, HI_PFN) | - RQ_CTXT_WQ_PAGE_SET(1, OWNER); - - switch (wqe_type) { - case SPNIC_EXTEND_RQ_WQE: - /* use 32Byte WQE with SGE for CQE */ - rq_ctxt->wq_pfn_hi_type_owner |= RQ_CTXT_WQ_PAGE_SET(0, WQE_TYPE); - break; - case SPNIC_NORMAL_RQ_WQE: - /* use 16Byte WQE with 32Bytes SGE for CQE */ - rq_ctxt->wq_pfn_hi_type_owner |= RQ_CTXT_WQ_PAGE_SET(2, WQE_TYPE); - rq_ctxt->cqe_sge_len = RQ_CTXT_CQE_LEN_SET(1, CQE_LEN); - break; - default: - pr_err("Invalid rq wqe type: %u", wqe_type); - } - - rq_ctxt->wq_pfn_lo = wq_page_pfn_lo; - - rq_ctxt->pref_cache = - RQ_CTXT_PREF_SET(WQ_PREFETCH_MIN, CACHE_MIN) | - RQ_CTXT_PREF_SET(WQ_PREFETCH_MAX, CACHE_MAX) | - RQ_CTXT_PREF_SET(WQ_PREFETCH_THRESHOLD, CACHE_THRESHOLD); - - rq_ctxt->pref_ci_owner = - RQ_CTXT_PREF_SET(CI_HIGN_IDX(ci_start), CI_HI) | - RQ_CTXT_PREF_SET(1, OWNER); - - rq_ctxt->pref_wq_pfn_hi_ci = - RQ_CTXT_PREF_SET(wq_page_pfn_hi, WQ_PFN_HI) | - RQ_CTXT_PREF_SET(ci_start, CI_LOW); - - rq_ctxt->pref_wq_pfn_lo = wq_page_pfn_lo; - - rq_ctxt->pi_paddr_hi = upper_32_bits(rq->rx.pi_dma_addr); - rq_ctxt->pi_paddr_lo = lower_32_bits(rq->rx.pi_dma_addr); - - rq_ctxt->wq_block_pfn_hi = RQ_CTXT_WQ_BLOCK_SET(wq_block_pfn_hi, PFN_HI); - - rq_ctxt->wq_block_pfn_lo = wq_block_pfn_lo; - - sphw_cpu_to_be32(rq_ctxt, sizeof(*rq_ctxt)); -} - -static int init_sq_ctxts(struct spnic_nic_cfg *nic_cfg) -{ - struct spnic_sq_ctxt_block *sq_ctxt_block = NULL; - struct spnic_sq_ctxt *sq_ctxt = NULL; - struct sphw_cmd_buf *cmd_buf = NULL; - struct spnic_io_queue *sq = NULL; - u64 out_param = 0; - u16 q_id, curr_id, max_ctxts, i; - int err = 0; - - cmd_buf = sphw_alloc_cmd_buf(nic_cfg->hwdev); - if (!cmd_buf) { - nic_err(nic_cfg->dev_hdl, "Failed to allocate cmd buf\n"); - return -ENOMEM; - } - - q_id = 0; - while (q_id < nic_cfg->num_qps) { - sq_ctxt_block = cmd_buf->buf; - sq_ctxt = sq_ctxt_block->sq_ctxt; - - max_ctxts = (nic_cfg->num_qps - q_id) > SPNIC_Q_CTXT_MAX ? - SPNIC_Q_CTXT_MAX : (nic_cfg->num_qps - q_id); - - spnic_qp_prepare_cmdq_header(&sq_ctxt_block->cmdq_hdr, SPNIC_QP_CTXT_TYPE_SQ, - max_ctxts, q_id); - - for (i = 0; i < max_ctxts; i++) { - curr_id = q_id + i; - sq = &nic_cfg->sq[curr_id]; - - spnic_sq_prepare_ctxt(sq, curr_id, &sq_ctxt[i]); - } - - cmd_buf->size = SQ_CTXT_SIZE(max_ctxts); - - err = sphw_cmdq_direct_resp(nic_cfg->hwdev, SPHW_MOD_L2NIC, - SPNIC_UCODE_CMD_MODIFY_QUEUE_CTX, - cmd_buf, &out_param, 0, SPHW_CHANNEL_NIC); - if (err || out_param != 0) { - nic_err(nic_cfg->dev_hdl, "Failed to set SQ ctxts, err: %d, out_param: 0x%llx\n", - err, out_param); - - err = -EFAULT; - break; - } - - q_id += max_ctxts; - } - - sphw_free_cmd_buf(nic_cfg->hwdev, cmd_buf); - - return err; -} - -static int init_rq_ctxts(struct spnic_nic_cfg *nic_cfg) -{ - struct spnic_rq_ctxt_block *rq_ctxt_block = NULL; - struct spnic_rq_ctxt *rq_ctxt = NULL; - struct sphw_cmd_buf *cmd_buf = NULL; - struct spnic_io_queue *rq = NULL; - u64 out_param = 0; - u16 q_id, curr_id, max_ctxts, i; - int err = 0; - - cmd_buf = sphw_alloc_cmd_buf(nic_cfg->hwdev); - if (!cmd_buf) { - nic_err(nic_cfg->dev_hdl, "Failed to allocate cmd buf\n"); - return -ENOMEM; - } - - q_id = 0; - while (q_id < nic_cfg->num_qps) { - rq_ctxt_block = cmd_buf->buf; - rq_ctxt = rq_ctxt_block->rq_ctxt; - - max_ctxts = (nic_cfg->num_qps - q_id) > SPNIC_Q_CTXT_MAX ? - SPNIC_Q_CTXT_MAX : (nic_cfg->num_qps - q_id); - - spnic_qp_prepare_cmdq_header(&rq_ctxt_block->cmdq_hdr, SPNIC_QP_CTXT_TYPE_RQ, - max_ctxts, q_id); - - for (i = 0; i < max_ctxts; i++) { - curr_id = q_id + i; - rq = &nic_cfg->rq[curr_id]; - - spnic_rq_prepare_ctxt(rq, &rq_ctxt[i]); - } - - cmd_buf->size = RQ_CTXT_SIZE(max_ctxts); - - err = sphw_cmdq_direct_resp(nic_cfg->hwdev, SPHW_MOD_L2NIC, - SPNIC_UCODE_CMD_MODIFY_QUEUE_CTX, - cmd_buf, &out_param, 0, SPHW_CHANNEL_NIC); - - if (err || out_param != 0) { - nic_err(nic_cfg->dev_hdl, "Failed to set RQ ctxts, err: %d, out_param: 0x%llx\n", - err, out_param); - - err = -EFAULT; - break; - } - - q_id += max_ctxts; - } - - sphw_free_cmd_buf(nic_cfg->hwdev, cmd_buf); - - return err; -} - -static int init_qp_ctxts(struct spnic_nic_cfg *nic_cfg) -{ - int err; - - err = init_sq_ctxts(nic_cfg); - if (err) - return err; - - err = init_rq_ctxts(nic_cfg); - if (err) - return err; - - return 0; -} - -static int clean_queue_offload_ctxt(struct spnic_nic_cfg *nic_cfg, - enum spnic_qp_ctxt_type ctxt_type) -{ - struct spnic_clean_queue_ctxt *ctxt_block = NULL; - struct sphw_cmd_buf *cmd_buf = NULL; - u64 out_param = 0; - int err; - - cmd_buf = sphw_alloc_cmd_buf(nic_cfg->hwdev); - if (!cmd_buf) { - nic_err(nic_cfg->dev_hdl, "Failed to allocate cmd buf\n"); - return -ENOMEM; - } - - ctxt_block = cmd_buf->buf; - ctxt_block->cmdq_hdr.num_queues = nic_cfg->max_qps; - ctxt_block->cmdq_hdr.queue_type = ctxt_type; - ctxt_block->cmdq_hdr.start_qid = 0; - - sphw_cpu_to_be32(ctxt_block, sizeof(*ctxt_block)); - - cmd_buf->size = sizeof(*ctxt_block); - - err = sphw_cmdq_direct_resp(nic_cfg->hwdev, SPHW_MOD_L2NIC, - SPNIC_UCODE_CMD_CLEAN_QUEUE_CONTEXT, - cmd_buf, &out_param, 0, SPHW_CHANNEL_NIC); - - if ((err) || (out_param)) { - nic_err(nic_cfg->dev_hdl, "Failed to clean queue offload ctxts, err: %d,out_param: 0x%llx\n", - err, out_param); - - err = -EFAULT; - } - - sphw_free_cmd_buf(nic_cfg->hwdev, cmd_buf); - - return err; -} - -static int clean_qp_offload_ctxt(struct spnic_nic_cfg *nic_cfg) -{ - /* clean LRO/TSO context space */ - return (clean_queue_offload_ctxt(nic_cfg, SPNIC_QP_CTXT_TYPE_SQ) || - clean_queue_offload_ctxt(nic_cfg, SPNIC_QP_CTXT_TYPE_RQ)); -} - -/* init qps ctxt and set sq ci attr and arm all sq*/ -int spnic_init_qp_ctxts(void *hwdev) -{ - struct spnic_nic_cfg *nic_cfg = NULL; - struct spnic_sq_attr sq_attr; - u32 rq_depth; - u16 q_id; - int err; - - if (!hwdev) - return -EINVAL; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - if (!nic_cfg) - return -EFAULT; - - err = init_qp_ctxts(nic_cfg); - if (err) { - nic_err(nic_cfg->dev_hdl, "Failed to init QP ctxts\n"); - return err; - } - - /* clean LRO/TSO context space */ - err = clean_qp_offload_ctxt(nic_cfg); - if (err) { - nic_err(nic_cfg->dev_hdl, "Failed to clean qp offload ctxts\n"); - return err; - } - - rq_depth = nic_cfg->rq[0].wq.q_depth << nic_cfg->rq[0].wqe_type; - - err = sphw_set_root_ctxt(hwdev, rq_depth, nic_cfg->sq[0].wq.q_depth, - nic_cfg->rx_buff_len, SPHW_CHANNEL_NIC); - if (err) { - nic_err(nic_cfg->dev_hdl, "Failed to set root context\n"); - return err; - } - - for (q_id = 0; q_id < nic_cfg->num_qps; q_id++) { - sq_attr.ci_dma_base = SPNIC_CI_PADDR(nic_cfg->ci_dma_base, q_id) >> 2; - sq_attr.pending_limit = tx_pending_limit; - sq_attr.coalescing_time = tx_coalescing_time; - sq_attr.intr_en = 1; - sq_attr.intr_idx = nic_cfg->sq[q_id].msix_entry_idx; - sq_attr.l2nic_sqn = q_id; - sq_attr.dma_attr_off = 0; - err = spnic_set_ci_table(hwdev, &sq_attr); - if (err) { - nic_err(nic_cfg->dev_hdl, "Failed to set ci table\n"); - goto set_cons_idx_table_err; - } - } - - return 0; - -set_cons_idx_table_err: - sphw_clean_root_ctxt(hwdev, SPHW_CHANNEL_NIC); - - return err; -} - -void spnic_free_qp_ctxts(void *hwdev) -{ - if (!hwdev) - return; - - sphw_clean_root_ctxt(hwdev, SPHW_CHANNEL_NIC); -} diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_nic_io.h b/drivers/net/ethernet/ramaxel/spnic/spnic_nic_io.h deleted file mode 100644 index e237ba33d82d1cd26c9ac9eee38c7583338b18c4..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/spnic_nic_io.h +++ /dev/null @@ -1,305 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPNIC_NIC_IO_H -#define SPNIC_NIC_IO_H - -#include "sphw_crm.h" -#include "sphw_common.h" -#include "sphw_wq.h" - -#define SPNIC_MAX_TX_QUEUE_DEPTH 65536 -#define SPNIC_MAX_RX_QUEUE_DEPTH 16384 - -#define SPNIC_MIN_QUEUE_DEPTH 128 - -#define SPNIC_SQ_WQEBB_SHIFT 4 -#define SPNIC_RQ_WQEBB_SHIFT 3 - -#define SPNIC_SQ_WQEBB_SIZE BIT(SPNIC_SQ_WQEBB_SHIFT) -#define SPNIC_CQE_SIZE_SHIFT 4 - -enum spnic_rq_wqe_type { - SPNIC_COMPACT_RQ_WQE, - SPNIC_NORMAL_RQ_WQE, - SPNIC_EXTEND_RQ_WQE, -}; - -struct spnic_io_queue { - struct sphw_wq wq; - union { - u8 wqe_type; /* for rq */ - u8 owner; /* for sq */ - }; - - u16 q_id; - u16 msix_entry_idx; - - u8 __iomem *db_addr; - - union { - struct { - void *cons_idx_addr; - } tx; - - struct { - u16 *pi_virt_addr; - dma_addr_t pi_dma_addr; - } rx; - }; -} ____cacheline_aligned; - -struct spnic_nic_db { - u32 db_info; - u32 pi_hi; -}; - -/* * - * @brief spnic_get_sq_free_wqebbs - get send queue free wqebb - * @param sq: send queue - * @retval : number of free wqebb - */ -static inline u16 spnic_get_sq_free_wqebbs(struct spnic_io_queue *sq) -{ - return sphw_wq_free_wqebbs(&sq->wq); -} - -/* * - * @brief spnic_update_sq_local_ci - update send queue local consumer index - * @param sq: send queue - * @param wqe_cnt: number of wqebb - */ -static inline void spnic_update_sq_local_ci(struct spnic_io_queue *sq, u16 wqebb_cnt) -{ - sphw_wq_put_wqebbs(&sq->wq, wqebb_cnt); -} - -/* * - * @brief spnic_get_sq_local_ci - get send queue local consumer index - * @param sq: send queue - * @retval : local consumer index - */ -static inline u16 spnic_get_sq_local_ci(struct spnic_io_queue *sq) -{ - return WQ_MASK_IDX(&sq->wq, sq->wq.cons_idx); -} - -/* * - * @brief spnic_get_sq_local_pi - get send queue local producer index - * @param sq: send queue - * @retval : local producer index - */ -static inline u16 spnic_get_sq_local_pi(struct spnic_io_queue *sq) -{ - return WQ_MASK_IDX(&sq->wq, sq->wq.prod_idx); -} - -/* * - * @brief spnic_get_sq_hw_ci - get send queue hardware consumer index - * @param sq: send queue - * @retval : hardware consumer index - */ -static inline u16 spnic_get_sq_hw_ci(struct spnic_io_queue *sq) -{ - return WQ_MASK_IDX(&sq->wq, *(u16 *)sq->tx.cons_idx_addr); -} - -/* * - * @brief spnic_get_sq_one_wqebb - get send queue wqe with single wqebb - * @param sq: send queue - * @param pi: return current pi - * @retval : wqe base address - */ -static inline void *spnic_get_sq_one_wqebb(struct spnic_io_queue *sq, u16 *pi) -{ - return sphw_wq_get_one_wqebb(&sq->wq, pi); -} - -/* * - * @brief spnic_get_sq_multi_wqebb - get send queue wqe with multiple wqebbs - * @param sq: send queue - * @param wqebb_cnt: wqebb counter - * @param pi: return current pi - * @param second_part_wqebbs_addr: second part wqebbs base address - * @param first_part_wqebbs_num: number wqebbs of first part - * @retval : first part wqebbs base address - */ -static inline void *spnic_get_sq_multi_wqebbs(struct spnic_io_queue *sq, u16 wqebb_cnt, u16 *pi, - void **second_part_wqebbs_addr, - u16 *first_part_wqebbs_num) -{ - return sphw_wq_get_multi_wqebbs(&sq->wq, wqebb_cnt, pi, second_part_wqebbs_addr, - first_part_wqebbs_num); -} - -/* * - * @brief spnic_get_and_update_sq_owner - get and update send queue owner bit - * @param sq: send queue - * @param curr_pi: current pi - * @param wqebb_cnt: wqebb counter - * @retval : owner bit - */ -static inline u16 spnic_get_and_update_sq_owner(struct spnic_io_queue *sq, - u16 curr_pi, u16 wqebb_cnt) -{ - u16 owner = sq->owner; - - if (unlikely(curr_pi + wqebb_cnt >= sq->wq.q_depth)) - sq->owner = !sq->owner; - - return owner; -} - -/* * - * @brief spnic_get_sq_wqe_with_owner - get send queue wqe with owner - * @param sq: send queue - * @param wqebb_cnt: wqebb counter - * @param pi: return current pi - * @param owner: return owner bit - * @param second_part_wqebbs_addr: second part wqebbs base address - * @param first_part_wqebbs_num: number wqebbs of first part - * @retval : first part wqebbs base address - */ -static inline void *spnic_get_sq_wqe_with_owner(struct spnic_io_queue *sq, - u16 wqebb_cnt, u16 *pi, u16 *owner, - void **second_part_wqebbs_addr, - u16 *first_part_wqebbs_num) -{ - void *wqe = sphw_wq_get_multi_wqebbs(&sq->wq, wqebb_cnt, pi, second_part_wqebbs_addr, - first_part_wqebbs_num); - - *owner = sq->owner; - if (unlikely(*pi + wqebb_cnt >= sq->wq.q_depth)) - sq->owner = !sq->owner; - - return wqe; -} - -/* * - * @brief spnic_rollback_sq_wqebbs - rollback send queue wqe - * @param sq: send queue - * @param wqebb_cnt: wqebb counter - * @param owner: owner bit - */ -static inline void spnic_rollback_sq_wqebbs(struct spnic_io_queue *sq, u16 wqebb_cnt, u16 owner) -{ - if (owner != sq->owner) - sq->owner = owner; - sq->wq.prod_idx -= wqebb_cnt; -} - -/* * - * @brief spnic_rq_wqe_addr - get receive queue wqe address by queue index - * @param rq: receive queue - * @param idx: wq index - * @retval: wqe base address - */ -static inline void *spnic_rq_wqe_addr(struct spnic_io_queue *rq, u16 idx) -{ - return sphw_wq_wqebb_addr(&rq->wq, idx); -} - -/* * - * @brief spnic_update_rq_hw_pi - update receive queue hardware pi - * @param rq: receive queue - * @param pi: pi - */ -static inline void spnic_update_rq_hw_pi(struct spnic_io_queue *rq, u16 pi) -{ - *rq->rx.pi_virt_addr = cpu_to_be16((pi & rq->wq.idx_mask) << rq->wqe_type); -} - -/* * - * @brief spnic_update_rq_local_ci - update receive queue local consumer index - * @param sq: receive queue - * @param wqe_cnt: number of wqebb - */ -static inline void spnic_update_rq_local_ci(struct spnic_io_queue *rq, u16 wqebb_cnt) -{ - sphw_wq_put_wqebbs(&rq->wq, wqebb_cnt); -} - -/* * - * @brief spnic_get_rq_local_ci - get receive queue local ci - * @param rq: receive queue - * @retval: receive queue local ci - */ -static inline u16 spnic_get_rq_local_ci(struct spnic_io_queue *rq) -{ - return WQ_MASK_IDX(&rq->wq, rq->wq.cons_idx); -} - -/* * - * @brief spnic_get_rq_local_pi - get receive queue local pi - * @param rq: receive queue - * @retval: receive queue local pi - */ -static inline u16 spnic_get_rq_local_pi(struct spnic_io_queue *rq) -{ - return WQ_MASK_IDX(&rq->wq, rq->wq.prod_idx); -} - -/* ******************** DB INFO ******************** */ -#define DB_INFO_QID_SHIFT 0 -#define DB_INFO_NON_FILTER_SHIFT 22 -#define DB_INFO_CFLAG_SHIFT 23 -#define DB_INFO_COS_SHIFT 24 -#define DB_INFO_TYPE_SHIFT 27 - -#define DB_INFO_QID_MASK 0x1FFFU -#define DB_INFO_NON_FILTER_MASK 0x1U -#define DB_INFO_CFLAG_MASK 0x1U -#define DB_INFO_COS_MASK 0x7U -#define DB_INFO_TYPE_MASK 0x1FU -#define DB_INFO_SET(val, member) \ - (((u32)(val) & DB_INFO_##member##_MASK) << \ - DB_INFO_##member##_SHIFT) - -#define DB_PI_LOW_MASK 0xFFU -#define DB_PI_HIGH_MASK 0xFFU -#define DB_PI_LOW(pi) ((pi) & DB_PI_LOW_MASK) -#define DB_PI_HI_SHIFT 8 -#define DB_PI_HIGH(pi) (((pi) >> DB_PI_HI_SHIFT) & DB_PI_HIGH_MASK) -#define DB_ADDR(queue, pi) ((u64 *)((queue)->db_addr) + DB_PI_LOW(pi)) -#define SRC_TYPE 1 - -/* CFLAG_DATA_PATH */ -#define SQ_CFLAG_DP 0 -#define RQ_CFLAG_DP 1 -/* * - * @brief spnic_write_db - write doorbell - * @param queue: nic io queue - * @param cos: cos index - * @param cflag: 0--sq, 1--rq - * @param pi: product index - */ -static inline void spnic_write_db(struct spnic_io_queue *queue, int cos, u8 cflag, u16 pi) -{ - struct spnic_nic_db db; - - db.db_info = DB_INFO_SET(SRC_TYPE, TYPE) | DB_INFO_SET(cflag, CFLAG) | - DB_INFO_SET(cos, COS) | DB_INFO_SET(queue->q_id, QID); - db.pi_hi = DB_PI_HIGH(pi); - - wmb(); /* Write all before the doorbell */ - - writeq(*((u64 *)&db), DB_ADDR(queue, pi)); -} - -struct spnic_dyna_qp_params { - u16 num_qps; - u32 sq_depth; - u32 rq_depth; - - struct spnic_io_queue *sqs; - struct spnic_io_queue *rqs; -}; - -int spnic_alloc_qps(void *hwdev, struct irq_info *qps_msix_arry, - struct spnic_dyna_qp_params *qp_params); -void spnic_free_qps(void *hwdev, struct spnic_dyna_qp_params *qp_params); -int spnic_init_qps(void *hwdev, struct spnic_dyna_qp_params *qp_params); -void spnic_deinit_qps(void *hwdev, struct spnic_dyna_qp_params *qp_params); -int spnic_init_nicio_res(void *hwdev); -void spnic_deinit_nicio_res(void *hwdev); -#endif diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_nic_qp.h b/drivers/net/ethernet/ramaxel/spnic/spnic_nic_qp.h deleted file mode 100644 index a8abdc1734d30cf5f715c71cda4f35d005d46a0a..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/spnic_nic_qp.h +++ /dev/null @@ -1,416 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPNIC_NIC_QP_H -#define SPNIC_NIC_QP_H - -#include "sphw_common.h" - -#define TX_MSS_DEFAULT 0x3E00 -#define TX_MSS_MIN 0x50 - -#define SPNIC_MAX_SQ_SGE 18 - -#define RQ_CQE_OFFOLAD_TYPE_PKT_TYPE_SHIFT 0 -#define RQ_CQE_OFFOLAD_TYPE_IP_TYPE_SHIFT 5 -#define RQ_CQE_OFFOLAD_TYPE_ENC_L3_TYPE_SHIFT 7 -#define RQ_CQE_OFFOLAD_TYPE_TUNNEL_PKT_FORMAT_SHIFT 8 -#define RQ_CQE_OFFOLAD_TYPE_PKT_UMBCAST_SHIFT 19 -#define RQ_CQE_OFFOLAD_TYPE_VLAN_EN_SHIFT 21 -#define RQ_CQE_OFFOLAD_TYPE_RSS_TYPE_SHIFT 24 - -#define RQ_CQE_OFFOLAD_TYPE_PKT_TYPE_MASK 0x1FU -#define RQ_CQE_OFFOLAD_TYPE_IP_TYPE_MASK 0x3U -#define RQ_CQE_OFFOLAD_TYPE_ENC_L3_TYPE_MASK 0x1U -#define RQ_CQE_OFFOLAD_TYPE_TUNNEL_PKT_FORMAT_MASK 0xFU -#define RQ_CQE_OFFOLAD_TYPE_PKT_UMBCAST_MASK 0x3U -#define RQ_CQE_OFFOLAD_TYPE_VLAN_EN_MASK 0x1U -#define RQ_CQE_OFFOLAD_TYPE_RSS_TYPE_MASK 0xFFU - -#define RQ_CQE_OFFOLAD_TYPE_GET(val, member) \ - (((val) >> RQ_CQE_OFFOLAD_TYPE_##member##_SHIFT) & \ - RQ_CQE_OFFOLAD_TYPE_##member##_MASK) - -#define SPNIC_GET_RX_PKT_TYPE(offload_type) \ - RQ_CQE_OFFOLAD_TYPE_GET(offload_type, PKT_TYPE) -#define SPNIC_GET_RX_IP_TYPE(offload_type) \ - RQ_CQE_OFFOLAD_TYPE_GET(offload_type, IP_TYPE) -#define SPNIC_GET_RX_ENC_L3_TYPE(offload_type) \ - RQ_CQE_OFFOLAD_TYPE_GET(offload_type, ENC_L3_TYPE) -#define SPNIC_GET_RX_TUNNEL_PKT_FORMAT(offload_type) \ - RQ_CQE_OFFOLAD_TYPE_GET(offload_type, TUNNEL_PKT_FORMAT) - -#define SPNIC_GET_RX_PKT_UMBCAST(offload_type) \ - RQ_CQE_OFFOLAD_TYPE_GET(offload_type, PKT_UMBCAST) - -#define SPNIC_GET_RX_VLAN_OFFLOAD_EN(offload_type) \ - RQ_CQE_OFFOLAD_TYPE_GET(offload_type, VLAN_EN) - -#define SPNIC_GET_RSS_TYPES(offload_type) \ - RQ_CQE_OFFOLAD_TYPE_GET(offload_type, RSS_TYPE) - -#define RQ_CQE_SGE_VLAN_SHIFT 0 -#define RQ_CQE_SGE_LEN_SHIFT 16 - -#define RQ_CQE_SGE_VLAN_MASK 0xFFFFU -#define RQ_CQE_SGE_LEN_MASK 0xFFFFU - -#define RQ_CQE_SGE_GET(val, member) \ - (((val) >> RQ_CQE_SGE_##member##_SHIFT) & RQ_CQE_SGE_##member##_MASK) - -#define SPNIC_GET_RX_VLAN_TAG(vlan_len) RQ_CQE_SGE_GET(vlan_len, VLAN) - -#define SPNIC_GET_RX_PKT_LEN(vlan_len) RQ_CQE_SGE_GET(vlan_len, LEN) - -#define RQ_CQE_STATUS_CSUM_ERR_SHIFT 0 -#define RQ_CQE_STATUS_NUM_LRO_SHIFT 16 -#define RQ_CQE_STATUS_LRO_PUSH_SHIFT 25 -#define RQ_CQE_STATUS_LRO_ENTER_SHIFT 26 -#define RQ_CQE_STATUS_LRO_INTR_SHIFT 27 - -#define RQ_CQE_STATUS_BP_EN_SHIFT 30 -#define RQ_CQE_STATUS_RXDONE_SHIFT 31 -#define RQ_CQE_STATUS_DECRY_PKT_SHIFT 29 -#define RQ_CQE_STATUS_FLUSH_SHIFT 28 - -#define RQ_CQE_STATUS_CSUM_ERR_MASK 0xFFFFU -#define RQ_CQE_STATUS_NUM_LRO_MASK 0xFFU -#define RQ_CQE_STATUS_LRO_PUSH_MASK 0X1U -#define RQ_CQE_STATUS_LRO_ENTER_MASK 0X1U -#define RQ_CQE_STATUS_LRO_INTR_MASK 0X1U -#define RQ_CQE_STATUS_BP_EN_MASK 0X1U -#define RQ_CQE_STATUS_RXDONE_MASK 0x1U -#define RQ_CQE_STATUS_FLUSH_MASK 0x1U -#define RQ_CQE_STATUS_DECRY_PKT_MASK 0x1U - -#define RQ_CQE_STATUS_GET(val, member) \ - (((val) >> RQ_CQE_STATUS_##member##_SHIFT) & \ - RQ_CQE_STATUS_##member##_MASK) - -#define SPNIC_GET_RX_CSUM_ERR(status) RQ_CQE_STATUS_GET(status, CSUM_ERR) - -#define SPNIC_GET_RX_DONE(status) RQ_CQE_STATUS_GET(status, RXDONE) - -#define SPNIC_GET_RX_FLUSH(status) RQ_CQE_STATUS_GET(status, FLUSH) - -#define SPNIC_GET_RX_BP_EN(status) RQ_CQE_STATUS_GET(status, BP_EN) - -#define SPNIC_GET_RX_NUM_LRO(status) RQ_CQE_STATUS_GET(status, NUM_LRO) - -#define SPNIC_RX_IS_DECRY_PKT(status) RQ_CQE_STATUS_GET(status, DECRY_PKT) - -#define RQ_CQE_SUPER_CQE_EN_SHIFT 0 -#define RQ_CQE_PKT_NUM_SHIFT 1 -#define RQ_CQE_PKT_LAST_LEN_SHIFT 6 -#define RQ_CQE_PKT_FIRST_LEN_SHIFT 19 - -#define RQ_CQE_SUPER_CQE_EN_MASK 0x1 -#define RQ_CQE_PKT_NUM_MASK 0x1FU -#define RQ_CQE_PKT_FIRST_LEN_MASK 0x1FFFU -#define RQ_CQE_PKT_LAST_LEN_MASK 0x1FFFU - -#define RQ_CQE_PKT_NUM_GET(val, member) \ - (((val) >> RQ_CQE_PKT_##member##_SHIFT) & RQ_CQE_PKT_##member##_MASK) -#define SPNIC_GET_RQ_CQE_PKT_NUM(pkt_info) RQ_CQE_PKT_NUM_GET(pkt_info, NUM) - -#define RQ_CQE_SUPER_CQE_EN_GET(val, member) \ - (((val) >> RQ_CQE_##member##_SHIFT) & RQ_CQE_##member##_MASK) -#define SPNIC_GET_SUPER_CQE_EN(pkt_info) \ - RQ_CQE_SUPER_CQE_EN_GET(pkt_info, SUPER_CQE_EN) - -#define RQ_CQE_PKT_LEN_GET(val, member) \ - (((val) >> RQ_CQE_PKT_##member##_SHIFT) & RQ_CQE_PKT_##member##_MASK) - -#define RQ_CQE_DECRY_INFO_DECRY_STATUS_SHIFT 8 -#define RQ_CQE_DECRY_INFO_ESP_NEXT_HEAD_SHIFT 0 - -#define RQ_CQE_DECRY_INFO_DECRY_STATUS_MASK 0xFFU -#define RQ_CQE_DECRY_INFO_ESP_NEXT_HEAD_MASK 0xFFU - -#define RQ_CQE_DECRY_INFO_GET(val, member) \ - (((val) >> RQ_CQE_DECRY_INFO_##member##_SHIFT) & \ - RQ_CQE_DECRY_INFO_##member##_MASK) - -#define SPNIC_GET_DECRYPT_STATUS(decry_info) \ - RQ_CQE_DECRY_INFO_GET(decry_info, DECRY_STATUS) - -#define SPNIC_GET_ESP_NEXT_HEAD(decry_info) \ - RQ_CQE_DECRY_INFO_GET(decry_info, ESP_NEXT_HEAD) - -struct spnic_rq_cqe { - u32 status; - u32 vlan_len; - - u32 offload_type; - u32 hash_val; - u32 xid; - u32 decrypt_info; - u32 rsvd6; - u32 pkt_info; -}; - -struct spnic_sge_sect { - struct sphw_sge sge; - u32 rsvd; -}; - -struct spnic_rq_extend_wqe { - struct spnic_sge_sect buf_desc; - struct spnic_sge_sect cqe_sect; -}; - -struct spnic_rq_normal_wqe { - u32 buf_hi_addr; - u32 buf_lo_addr; - u32 cqe_hi_addr; - u32 cqe_lo_addr; -}; - -struct spnic_rq_wqe { - union { - struct spnic_rq_normal_wqe normal_wqe; - struct spnic_rq_extend_wqe extend_wqe; - }; -}; - -struct spnic_sq_wqe_desc { - u32 ctrl_len; - u32 queue_info; - u32 hi_addr; - u32 lo_addr; -}; - -/* Engine only pass first 12B TS field directly to uCode through metadata - * vlan_offoad is used for hardware when vlan insert in tx - */ -struct spnic_sq_task { - u32 pkt_info0; - u32 ip_identify; - u32 pkt_info2; /* ipsec used as spi */ - u32 vlan_offload; -}; - -struct spnic_sq_bufdesc { - u32 len; /* 31-bits Length, L2NIC only use length[17:0] */ - u32 rsvd; - u32 hi_addr; - u32 lo_addr; -}; - -struct spnic_sq_compact_wqe { - struct spnic_sq_wqe_desc wqe_desc; -}; - -struct spnic_sq_extend_wqe { - struct spnic_sq_wqe_desc wqe_desc; - struct spnic_sq_task task; - struct spnic_sq_bufdesc buf_desc[0]; -}; - -struct spnic_sq_wqe { - union { - struct spnic_sq_compact_wqe compact_wqe; - struct spnic_sq_extend_wqe extend_wqe; - }; -}; - -/* use section pointer for support non continuous wqe */ -struct spnic_sq_wqe_combo { - struct spnic_sq_wqe_desc *ctrl_bd0; - struct spnic_sq_task *task; - struct spnic_sq_bufdesc *bds_head; - struct spnic_sq_bufdesc *bds_sec2; - u16 first_bds_num; - u32 wqe_type; - u32 task_type; -}; - -/* ************* SQ_CTRL ************** */ -enum sq_wqe_data_format { - SQ_NORMAL_WQE = 0, -}; - -enum sq_wqe_ec_type { - SQ_WQE_COMPACT_TYPE = 0, - SQ_WQE_EXTENDED_TYPE = 1, -}; - -enum sq_wqe_tasksect_len_type { - SQ_WQE_TASKSECT_46BITS = 0, - SQ_WQE_TASKSECT_16BYTES = 1, -}; - -#define SQ_CTRL_BD0_LEN_SHIFT 0 -#define SQ_CTRL_RSVD_SHIFT 18 -#define SQ_CTRL_BUFDESC_NUM_SHIFT 19 -#define SQ_CTRL_TASKSECT_LEN_SHIFT 27 -#define SQ_CTRL_DATA_FORMAT_SHIFT 28 -#define SQ_CTRL_DIRECT_SHIFT 29 -#define SQ_CTRL_EXTENDED_SHIFT 30 -#define SQ_CTRL_OWNER_SHIFT 31 - -#define SQ_CTRL_BD0_LEN_MASK 0x3FFFFU -#define SQ_CTRL_RSVD_MASK 0x1U -#define SQ_CTRL_BUFDESC_NUM_MASK 0xFFU -#define SQ_CTRL_TASKSECT_LEN_MASK 0x1U -#define SQ_CTRL_DATA_FORMAT_MASK 0x1U -#define SQ_CTRL_DIRECT_MASK 0x1U -#define SQ_CTRL_EXTENDED_MASK 0x1U -#define SQ_CTRL_OWNER_MASK 0x1U - -#define SQ_CTRL_SET(val, member) \ - (((u32)(val) & SQ_CTRL_##member##_MASK) << SQ_CTRL_##member##_SHIFT) - -#define SQ_CTRL_GET(val, member) \ - (((val) >> SQ_CTRL_##member##_SHIFT) & SQ_CTRL_##member##_MASK) - -#define SQ_CTRL_CLEAR(val, member) \ - ((val) & (~(SQ_CTRL_##member##_MASK << SQ_CTRL_##member##_SHIFT))) - -#define SQ_CTRL_QUEUE_INFO_PKT_TYPE_SHIFT 0 -#define SQ_CTRL_QUEUE_INFO_PLDOFF_SHIFT 2 -#define SQ_CTRL_QUEUE_INFO_UFO_SHIFT 10 -#define SQ_CTRL_QUEUE_INFO_TSO_SHIFT 11 -#define SQ_CTRL_QUEUE_INFO_TCPUDP_CS_SHIFT 12 -#define SQ_CTRL_QUEUE_INFO_MSS_SHIFT 13 -#define SQ_CTRL_QUEUE_INFO_SCTP_SHIFT 27 -#define SQ_CTRL_QUEUE_INFO_UC_SHIFT 28 -#define SQ_CTRL_QUEUE_INFO_PRI_SHIFT 29 - -#define SQ_CTRL_QUEUE_INFO_PKT_TYPE_MASK 0x3U -#define SQ_CTRL_QUEUE_INFO_PLDOFF_MASK 0xFFU -#define SQ_CTRL_QUEUE_INFO_UFO_MASK 0x1U -#define SQ_CTRL_QUEUE_INFO_TSO_MASK 0x1U -#define SQ_CTRL_QUEUE_INFO_TCPUDP_CS_MASK 0x1U -#define SQ_CTRL_QUEUE_INFO_MSS_MASK 0x3FFFU -#define SQ_CTRL_QUEUE_INFO_SCTP_MASK 0x1U -#define SQ_CTRL_QUEUE_INFO_UC_MASK 0x1U -#define SQ_CTRL_QUEUE_INFO_PRI_MASK 0x7U - -#define SQ_CTRL_QUEUE_INFO_SET(val, member) \ - (((u32)(val) & SQ_CTRL_QUEUE_INFO_##member##_MASK) << \ - SQ_CTRL_QUEUE_INFO_##member##_SHIFT) - -#define SQ_CTRL_QUEUE_INFO_GET(val, member) \ - (((val) >> SQ_CTRL_QUEUE_INFO_##member##_SHIFT) & \ - SQ_CTRL_QUEUE_INFO_##member##_MASK) - -#define SQ_CTRL_QUEUE_INFO_CLEAR(val, member) \ - ((val) & (~(SQ_CTRL_QUEUE_INFO_##member##_MASK << \ - SQ_CTRL_QUEUE_INFO_##member##_SHIFT))) - -#define SQ_TASK_INFO0_TUNNEL_FLAG_SHIFT 19 -#define SQ_TASK_INFO0_ESP_NEXT_PROTO_SHIFT 22 -#define SQ_TASK_INFO0_INNER_L4_EN_SHIFT 24 -#define SQ_TASK_INFO0_INNER_L3_EN_SHIFT 25 -#define SQ_TASK_INFO0_INNER_L4_PSEUDO_SHIFT 26 -#define SQ_TASK_INFO0_OUT_L4_EN_SHIFT 27 -#define SQ_TASK_INFO0_OUT_L3_EN_SHIFT 28 -#define SQ_TASK_INFO0_OUT_L4_PSEUDO_SHIFT 29 -#define SQ_TASK_INFO0_ESP_OFFLOAD_SHIFT 30 -#define SQ_TASK_INFO0_IPSEC_PROTO_SHIFT 31 - -#define SQ_TASK_INFO0_TUNNEL_FLAG_MASK 0x1U -#define SQ_TASK_INFO0_ESP_NEXT_PROTO_MASK 0x3U -#define SQ_TASK_INFO0_INNER_L4_EN_MASK 0x1U -#define SQ_TASK_INFO0_INNER_L3_EN_MASK 0x1U -#define SQ_TASK_INFO0_INNER_L4_PSEUDO_MASK 0x1U -#define SQ_TASK_INFO0_OUT_L4_EN_MASK 0x1U -#define SQ_TASK_INFO0_OUT_L3_EN_MASK 0x1U -#define SQ_TASK_INFO0_OUT_L4_PSEUDO_MASK 0x1U -#define SQ_TASK_INFO0_ESP_OFFLOAD_MASK 0x1U -#define SQ_TASK_INFO0_IPSEC_PROTO_MASK 0x1U - -#define SQ_TASK_INFO0_SET(val, member) \ - (((u32)(val) & SQ_TASK_INFO0_##member##_MASK) << \ - SQ_TASK_INFO0_##member##_SHIFT) -#define SQ_TASK_INFO0_GET(val, member) \ - (((val) >> SQ_TASK_INFO0_##member##_SHIFT) & \ - SQ_TASK_INFO0_##member##_MASK) - -#define SQ_TASK_INFO1_SET(val, member) \ - (((val) & SQ_TASK_INFO1_##member##_MASK) << \ - SQ_TASK_INFO1_##member##_SHIFT) -#define SQ_TASK_INFO1_GET(val, member) \ - (((val) >> SQ_TASK_INFO1_##member##_SHIFT) & \ - SQ_TASK_INFO1_##member##_MASK) - -#define SQ_TASK_INFO3_VLAN_TAG_SHIFT 0 -#define SQ_TASK_INFO3_VLAN_TYPE_SHIFT 16 -#define SQ_TASK_INFO3_VLAN_TAG_VALID_SHIFT 19 - -#define SQ_TASK_INFO3_VLAN_TAG_MASK 0xFFFFU -#define SQ_TASK_INFO3_VLAN_TYPE_MASK 0x7U -#define SQ_TASK_INFO3_VLAN_TAG_VALID_MASK 0x1U - -#define SQ_TASK_INFO3_SET(val, member) \ - (((val) & SQ_TASK_INFO3_##member##_MASK) << \ - SQ_TASK_INFO3_##member##_SHIFT) -#define SQ_TASK_INFO3_GET(val, member) \ - (((val) >> SQ_TASK_INFO3_##member##_SHIFT) & \ - SQ_TASK_INFO3_##member##_MASK) - -static inline u32 spnic_get_pkt_len_for_super_cqe(struct spnic_rq_cqe *cqe, bool last) -{ - u32 pkt_len = cqe->pkt_info; - - if (!last) - return RQ_CQE_PKT_LEN_GET(pkt_len, FIRST_LEN); - else - return RQ_CQE_PKT_LEN_GET(pkt_len, LAST_LEN); -} - -/* * - * spnic_prepare_sq_ctrl - init sq wqe cs - * @nr_descs: total sge_num, include bd0 in cs - * to do : check with zhangxingguo to confirm WQE init - */ -static inline void spnic_prepare_sq_ctrl(struct spnic_sq_wqe_combo *wqe_combo, - u32 queue_info, int nr_descs, u16 owner) -{ - struct spnic_sq_wqe_desc *wqe_desc = wqe_combo->ctrl_bd0; - - if (wqe_combo->wqe_type == SQ_WQE_COMPACT_TYPE) { - wqe_desc->ctrl_len |= - SQ_CTRL_SET(SQ_NORMAL_WQE, DATA_FORMAT) | - SQ_CTRL_SET(wqe_combo->wqe_type, EXTENDED) | - SQ_CTRL_SET(owner, OWNER); - - /* compact wqe queue_info will transfer to ucode */ - wqe_desc->queue_info = 0; - return; - } - - wqe_desc->ctrl_len |= SQ_CTRL_SET(nr_descs, BUFDESC_NUM) | - SQ_CTRL_SET(wqe_combo->task_type, TASKSECT_LEN) | - SQ_CTRL_SET(SQ_NORMAL_WQE, DATA_FORMAT) | - SQ_CTRL_SET(wqe_combo->wqe_type, EXTENDED) | - SQ_CTRL_SET(owner, OWNER); - - wqe_desc->queue_info = queue_info; - wqe_desc->queue_info |= SQ_CTRL_QUEUE_INFO_SET(1U, UC); - - if (!SQ_CTRL_QUEUE_INFO_GET(wqe_desc->queue_info, MSS)) { - wqe_desc->queue_info |= SQ_CTRL_QUEUE_INFO_SET(TX_MSS_DEFAULT, MSS); - } else if (SQ_CTRL_QUEUE_INFO_GET(wqe_desc->queue_info, MSS) < TX_MSS_MIN) { - /* mss should not less than 80 */ - wqe_desc->queue_info = SQ_CTRL_QUEUE_INFO_CLEAR(wqe_desc->queue_info, MSS); - wqe_desc->queue_info |= SQ_CTRL_QUEUE_INFO_SET(TX_MSS_MIN, MSS); - } -} - -/* * - * spnic_set_vlan_tx_offload - set vlan offload info - * @task: wqe task section - * @vlan_tag: vlan tag - * @vlan_type: 0--select TPID0 in IPSU, 1--select TPID0 in IPSU - * 2--select TPID2 in IPSU, 3--select TPID3 in IPSU, 4--select TPID4 in IPSU - */ -static inline void spnic_set_vlan_tx_offload(struct spnic_sq_task *task, u16 vlan_tag, u8 vlan_type) -{ - task->vlan_offload = SQ_TASK_INFO3_SET(vlan_tag, VLAN_TAG) | - SQ_TASK_INFO3_SET(vlan_type, VLAN_TYPE) | - SQ_TASK_INFO3_SET(1U, VLAN_TAG_VALID); -} - -#endif diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_ntuple.c b/drivers/net/ethernet/ramaxel/spnic/spnic_ntuple.c deleted file mode 100644 index cd92de93a57efe9e20ed6e5da75636c993d95e4e..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/spnic_ntuple.c +++ /dev/null @@ -1,841 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "sphw_common.h" -#include "sphw_crm.h" -#include "spnic_nic_cfg.h" -#include "spnic_nic_dev.h" - -#define MAX_NUM_OF_ETHTOOL_NTUPLE_RULES BIT(9) -struct spnic_ethtool_rx_flow_rule { - struct list_head list; - struct ethtool_rx_flow_spec flow_spec; -}; - -static void tcam_translate_key_y(u8 *key_y, u8 *src_input, u8 *mask, u8 len) -{ - u8 idx; - - for (idx = 0; idx < len; idx++) - key_y[idx] = src_input[idx] & mask[idx]; -} - -static void tcam_translate_key_x(u8 *key_x, u8 *key_y, u8 *mask, u8 len) -{ - u8 idx; - - for (idx = 0; idx < len; idx++) - key_x[idx] = key_y[idx] ^ mask[idx]; -} - -static void tcam_key_calculate(struct tag_tcam_key *tcam_key, - struct nic_tcam_cfg_rule *fdir_tcam_rule) -{ - tcam_translate_key_y(fdir_tcam_rule->key.y, (u8 *)(&tcam_key->key_info), - (u8 *)(&tcam_key->key_mask), TCAM_FLOW_KEY_SIZE); - tcam_translate_key_x(fdir_tcam_rule->key.x, fdir_tcam_rule->key.y, - (u8 *)(&tcam_key->key_mask), TCAM_FLOW_KEY_SIZE); -} - -#define TCAM_IPV4_TYPE 0 -#define TCAM_IPV6_TYPE 1 - -static int spnic_base_ipv4_parse(struct spnic_nic_dev *nic_dev, struct ethtool_rx_flow_spec *fs, - struct tag_tcam_key *tcam_key) -{ - struct ethtool_tcpip4_spec *mask = &fs->m_u.tcp_ip4_spec; - struct ethtool_tcpip4_spec *val = &fs->h_u.tcp_ip4_spec; - u32 temp; - - switch (mask->ip4src) { - case U32_MAX: - temp = ntohl(val->ip4src); - tcam_key->key_info.sipv4_h = high_16_bits(temp); - tcam_key->key_info.sipv4_l = low_16_bits(temp); - - tcam_key->key_mask.sipv4_h = U16_MAX; - tcam_key->key_mask.sipv4_l = U16_MAX; - break; - case 0: - break; - - default: - nicif_err(nic_dev, drv, nic_dev->netdev, "invalid src_ip mask\n"); - return -EINVAL; - } - - switch (mask->ip4dst) { - case U32_MAX: - temp = ntohl(val->ip4dst); - tcam_key->key_info.dipv4_h = high_16_bits(temp); - tcam_key->key_info.dipv4_l = low_16_bits(temp); - - tcam_key->key_mask.dipv4_h = U16_MAX; - tcam_key->key_mask.dipv4_l = U16_MAX; - break; - case 0: - break; - - default: - nicif_err(nic_dev, drv, nic_dev->netdev, "invalid src_ip mask\n"); - return -EINVAL; - } - - tcam_key->key_info.ip_type = TCAM_IPV4_TYPE; - tcam_key->key_mask.ip_type = TCAM_IP_TYPE_MASK; - - tcam_key->key_info.function_id = sphw_global_func_id(nic_dev->hwdev); - tcam_key->key_mask.function_id = TCAM_FUNC_ID_MASK; - - return 0; -} - -static int spnic_fdir_tcam_ipv4_l4_init(struct spnic_nic_dev *nic_dev, - struct ethtool_rx_flow_spec *fs, - struct tag_tcam_key *tcam_key) -{ - struct ethtool_tcpip4_spec *l4_mask = &fs->m_u.tcp_ip4_spec; - struct ethtool_tcpip4_spec *l4_val = &fs->h_u.tcp_ip4_spec; - int err; - - err = spnic_base_ipv4_parse(nic_dev, fs, tcam_key); - if (err) - return err; - - tcam_key->key_info.dport = ntohs(l4_val->pdst); - tcam_key->key_mask.dport = l4_mask->pdst; - - tcam_key->key_info.sport = ntohs(l4_val->psrc); - tcam_key->key_mask.sport = l4_mask->psrc; - - if (fs->flow_type == TCP_V4_FLOW) - tcam_key->key_info.ip_proto = IPPROTO_TCP; - else - tcam_key->key_info.ip_proto = IPPROTO_UDP; - tcam_key->key_mask.ip_proto = U8_MAX; - - return 0; -} - -static int spnic_fdir_tcam_ipv4_init(struct spnic_nic_dev *nic_dev, - struct ethtool_rx_flow_spec *fs, - struct tag_tcam_key *tcam_key) -{ - struct ethtool_usrip4_spec *l3_mask = &fs->m_u.usr_ip4_spec; - struct ethtool_usrip4_spec *l3_val = &fs->h_u.usr_ip4_spec; - int err; - - err = spnic_base_ipv4_parse(nic_dev, fs, tcam_key); - if (err) - return err; - - tcam_key->key_info.ip_proto = l3_val->proto; - tcam_key->key_mask.ip_proto = l3_mask->proto; - - return 0; -} - -#ifndef UNSUPPORT_NTUPLE_IPV6 -enum ipv6_parse_res { - IPV6_MASK_INVALID, - IPV6_MASK_ALL_MASK, - IPV6_MASK_ALL_ZERO, -}; - -enum ipv6_index { - IPV6_IDX0, - IPV6_IDX1, - IPV6_IDX2, - IPV6_IDX3, -}; - -static int ipv6_mask_parse(u32 *ipv6_mask) -{ - if (ipv6_mask[IPV6_IDX0] == 0 && ipv6_mask[IPV6_IDX1] == 0 && - ipv6_mask[IPV6_IDX2] == 0 && ipv6_mask[IPV6_IDX3] == 0) - return IPV6_MASK_ALL_ZERO; - - if (ipv6_mask[IPV6_IDX0] == U32_MAX && - ipv6_mask[IPV6_IDX1] == U32_MAX && - ipv6_mask[IPV6_IDX2] == U32_MAX && ipv6_mask[IPV6_IDX3] == U32_MAX) - return IPV6_MASK_ALL_MASK; - - return IPV6_MASK_INVALID; -} - -static int spnic_base_ipv6_parse(struct spnic_nic_dev *nic_dev, - struct ethtool_rx_flow_spec *fs, - struct tag_tcam_key *tcam_key) -{ - struct ethtool_tcpip6_spec *mask = &fs->m_u.tcp_ip6_spec; - struct ethtool_tcpip6_spec *val = &fs->h_u.tcp_ip6_spec; - int parse_res; - u32 temp; - - parse_res = ipv6_mask_parse((u32 *)mask->ip6src); - if (parse_res == IPV6_MASK_ALL_MASK) { - temp = ntohl(val->ip6src[IPV6_IDX0]); - tcam_key->key_info_ipv6.sipv6_key0 = high_16_bits(temp); - tcam_key->key_info_ipv6.sipv6_key1 = low_16_bits(temp); - temp = ntohl(val->ip6src[IPV6_IDX1]); - tcam_key->key_info_ipv6.sipv6_key2 = high_16_bits(temp); - tcam_key->key_info_ipv6.sipv6_key3 = low_16_bits(temp); - temp = ntohl(val->ip6src[IPV6_IDX2]); - tcam_key->key_info_ipv6.sipv6_key4 = high_16_bits(temp); - tcam_key->key_info_ipv6.sipv6_key5 = low_16_bits(temp); - temp = ntohl(val->ip6src[IPV6_IDX3]); - tcam_key->key_info_ipv6.sipv6_key6 = high_16_bits(temp); - tcam_key->key_info_ipv6.sipv6_key7 = low_16_bits(temp); - - tcam_key->key_mask_ipv6.sipv6_key0 = U16_MAX; - tcam_key->key_mask_ipv6.sipv6_key1 = U16_MAX; - tcam_key->key_mask_ipv6.sipv6_key2 = U16_MAX; - tcam_key->key_mask_ipv6.sipv6_key3 = U16_MAX; - tcam_key->key_mask_ipv6.sipv6_key4 = U16_MAX; - tcam_key->key_mask_ipv6.sipv6_key5 = U16_MAX; - tcam_key->key_mask_ipv6.sipv6_key6 = U16_MAX; - tcam_key->key_mask_ipv6.sipv6_key7 = U16_MAX; - } else if (parse_res == IPV6_MASK_INVALID) { - nicif_err(nic_dev, drv, nic_dev->netdev, "invalid src_ipv6 mask\n"); - return -EINVAL; - } - - parse_res = ipv6_mask_parse((u32 *)mask->ip6dst); - if (parse_res == IPV6_MASK_ALL_MASK) { - temp = ntohl(val->ip6dst[IPV6_IDX0]); - tcam_key->key_info_ipv6.dipv6_key0 = high_16_bits(temp); - tcam_key->key_info_ipv6.dipv6_key1 = low_16_bits(temp); - temp = ntohl(val->ip6dst[IPV6_IDX1]); - tcam_key->key_info_ipv6.dipv6_key2 = high_16_bits(temp); - tcam_key->key_info_ipv6.dipv6_key3 = low_16_bits(temp); - temp = ntohl(val->ip6dst[IPV6_IDX2]); - tcam_key->key_info_ipv6.dipv6_key4 = high_16_bits(temp); - tcam_key->key_info_ipv6.dipv6_key5 = low_16_bits(temp); - temp = ntohl(val->ip6dst[IPV6_IDX3]); - tcam_key->key_info_ipv6.dipv6_key6 = high_16_bits(temp); - tcam_key->key_info_ipv6.dipv6_key7 = low_16_bits(temp); - - tcam_key->key_mask_ipv6.dipv6_key0 = U16_MAX; - tcam_key->key_mask_ipv6.dipv6_key1 = U16_MAX; - tcam_key->key_mask_ipv6.dipv6_key2 = U16_MAX; - tcam_key->key_mask_ipv6.dipv6_key3 = U16_MAX; - tcam_key->key_mask_ipv6.dipv6_key4 = U16_MAX; - tcam_key->key_mask_ipv6.dipv6_key5 = U16_MAX; - tcam_key->key_mask_ipv6.dipv6_key6 = U16_MAX; - tcam_key->key_mask_ipv6.dipv6_key7 = U16_MAX; - } else if (parse_res == IPV6_MASK_INVALID) { - nicif_err(nic_dev, drv, nic_dev->netdev, "invalid dst_ipv6 mask\n"); - return -EINVAL; - } - - tcam_key->key_info_ipv6.ip_type = TCAM_IPV6_TYPE; - tcam_key->key_mask_ipv6.ip_type = TCAM_IP_TYPE_MASK; - - tcam_key->key_info_ipv6.function_id = sphw_global_func_id(nic_dev->hwdev); - tcam_key->key_mask_ipv6.function_id = TCAM_FUNC_ID_MASK; - - return 0; -} - -static int spnic_fdir_tcam_ipv6_l4_init(struct spnic_nic_dev *nic_dev, - struct ethtool_rx_flow_spec *fs, - struct tag_tcam_key *tcam_key) -{ - struct ethtool_tcpip6_spec *l4_mask = &fs->m_u.tcp_ip6_spec; - struct ethtool_tcpip6_spec *l4_val = &fs->h_u.tcp_ip6_spec; - int err; - - err = spnic_base_ipv6_parse(nic_dev, fs, tcam_key); - if (err) - return err; - - tcam_key->key_info_ipv6.dport = ntohs(l4_val->pdst); - tcam_key->key_mask_ipv6.dport = l4_mask->pdst; - - tcam_key->key_info_ipv6.sport = ntohs(l4_val->psrc); - tcam_key->key_mask_ipv6.sport = l4_mask->psrc; - - if (fs->flow_type == TCP_V6_FLOW) - tcam_key->key_info_ipv6.ip_proto = NEXTHDR_TCP; - else - tcam_key->key_info_ipv6.ip_proto = NEXTHDR_UDP; - tcam_key->key_mask_ipv6.ip_proto = U8_MAX; - - return 0; -} - -static int spnic_fdir_tcam_ipv6_init(struct spnic_nic_dev *nic_dev, - struct ethtool_rx_flow_spec *fs, - struct tag_tcam_key *tcam_key) -{ - struct ethtool_usrip6_spec *l3_mask = &fs->m_u.usr_ip6_spec; - struct ethtool_usrip6_spec *l3_val = &fs->h_u.usr_ip6_spec; - int err; - - err = spnic_base_ipv6_parse(nic_dev, fs, tcam_key); - if (err) - return err; - - tcam_key->key_info_ipv6.ip_proto = l3_val->l4_proto; - tcam_key->key_mask_ipv6.ip_proto = l3_mask->l4_proto; - - return 0; -} -#endif - -static int spnic_fdir_tcam_info_init(struct spnic_nic_dev *nic_dev, - struct ethtool_rx_flow_spec *fs, - struct tag_tcam_key *tcam_key, - struct nic_tcam_cfg_rule *fdir_tcam_rule) -{ - int err; - - switch (fs->flow_type) { - case TCP_V4_FLOW: - case UDP_V4_FLOW: - err = spnic_fdir_tcam_ipv4_l4_init(nic_dev, fs, tcam_key); - if (err) - return err; - break; - case IP_USER_FLOW: - err = spnic_fdir_tcam_ipv4_init(nic_dev, fs, tcam_key); - if (err) - return err; - break; -#ifndef UNSUPPORT_NTUPLE_IPV6 - case TCP_V6_FLOW: - case UDP_V6_FLOW: - err = spnic_fdir_tcam_ipv6_l4_init(nic_dev, fs, tcam_key); - if (err) - return err; - break; - case IPV6_USER_FLOW: - err = spnic_fdir_tcam_ipv6_init(nic_dev, fs, tcam_key); - if (err) - return err; - break; -#endif - default: - return -EOPNOTSUPP; - } - - tcam_key->key_info.tunnel_type = 0; - tcam_key->key_mask.tunnel_type = TCAM_TUNNEL_TYPE_MASK; - - fdir_tcam_rule->data.qid = (u32)fs->ring_cookie; - tcam_key_calculate(tcam_key, fdir_tcam_rule); - - return 0; -} - -void spnic_flush_rx_flow_rule(struct spnic_nic_dev *nic_dev) -{ - struct spnic_tcam_info *tcam_info = &nic_dev->tcam; - struct spnic_ethtool_rx_flow_rule *eth_rule = NULL; - struct spnic_ethtool_rx_flow_rule *eth_rule_tmp = NULL; - struct spnic_tcam_filter *tcam_iter = NULL; - struct spnic_tcam_filter *tcam_iter_tmp = NULL; - struct spnic_tcam_dynamic_block *block = NULL; - struct spnic_tcam_dynamic_block *block_tmp = NULL; - struct list_head *dynamic_list = &tcam_info->tcam_dynamic_info.tcam_dynamic_list; - - if (!list_empty(&tcam_info->tcam_list)) { - list_for_each_entry_safe(tcam_iter, tcam_iter_tmp, &tcam_info->tcam_list, - tcam_filter_list) { - list_del(&tcam_iter->tcam_filter_list); - kfree(tcam_iter); - } - } - if (!list_empty(dynamic_list)) { - list_for_each_entry_safe(block, block_tmp, dynamic_list, block_list) { - list_del(&block->block_list); - kfree(block); - } - } - - if (!list_empty(&nic_dev->rx_flow_rule.rules)) { - list_for_each_entry_safe(eth_rule, eth_rule_tmp, - &nic_dev->rx_flow_rule.rules, list) { - list_del(ð_rule->list); - kfree(eth_rule); - } - } - -#ifndef FPGA_SUPPORT - spnic_flush_tcam_rule(nic_dev->hwdev); - spnic_set_fdir_tcam_rule_filter(nic_dev->hwdev, false); -#endif -} - -static struct spnic_tcam_dynamic_block * -spnic_alloc_dynamic_block_resource(struct spnic_nic_dev *nic_dev, - struct spnic_tcam_info *tcam_info, u16 dynamic_block_id) -{ - struct spnic_tcam_dynamic_block *dynamic_block_ptr = NULL; - - dynamic_block_ptr = kzalloc(sizeof(*dynamic_block_ptr), GFP_KERNEL); - if (!dynamic_block_ptr) { - nicif_err(nic_dev, drv, nic_dev->netdev, "fdir filter dynamic alloc block index %d memory failed\n", - dynamic_block_id); - return NULL; - } - - dynamic_block_ptr->dynamic_block_id = dynamic_block_id; - list_add_tail(&dynamic_block_ptr->block_list, - &tcam_info->tcam_dynamic_info.tcam_dynamic_list); - - tcam_info->tcam_dynamic_info.dynamic_block_cnt++; - - return dynamic_block_ptr; -} - -static void -spnic_free_dynamic_block_resource(struct spnic_tcam_info *tcam_info, - struct spnic_tcam_dynamic_block *block_ptr) -{ - if (!block_ptr) - return; - - list_del(&block_ptr->block_list); - kfree(block_ptr); - - tcam_info->tcam_dynamic_info.dynamic_block_cnt--; -} - -static struct spnic_tcam_dynamic_block * -spnic_dynamic_lookup_tcam_filter(struct spnic_nic_dev *nic_dev, - struct nic_tcam_cfg_rule *fdir_tcam_rule, - struct spnic_tcam_info *tcam_info, - struct spnic_tcam_filter *tcam_filter, u16 *tcam_index) -{ - struct spnic_tcam_dynamic_block *tmp = NULL; - u16 index; - - list_for_each_entry(tmp, &tcam_info->tcam_dynamic_info.tcam_dynamic_list, block_list) - if (tmp->dynamic_index_cnt < SPNIC_TCAM_DYNAMIC_BLOCK_SIZE) - break; - - if (!tmp || tmp->dynamic_index_cnt >= SPNIC_TCAM_DYNAMIC_BLOCK_SIZE) { - nicif_err(nic_dev, drv, nic_dev->netdev, "Fdir filter dynamic lookup for index failed\n"); - return NULL; - } - - for (index = 0; index < SPNIC_TCAM_DYNAMIC_BLOCK_SIZE; index++) - if (tmp->dynamic_index_used[index] == 0) - break; - - if (index == SPNIC_TCAM_DYNAMIC_BLOCK_SIZE) { - nicif_err(nic_dev, drv, nic_dev->netdev, "tcam block 0x%x supports filter rules is full\n", - tmp->dynamic_block_id); - return NULL; - } - - tcam_filter->dynamic_block_id = tmp->dynamic_block_id; - tcam_filter->index = index; - *tcam_index = index; - - fdir_tcam_rule->index = index + - SPNIC_PKT_TCAM_DYNAMIC_INDEX_START(tmp->dynamic_block_id); - - return tmp; -} - -static int spnic_add_tcam_filter(struct spnic_nic_dev *nic_dev, - struct spnic_tcam_filter *tcam_filter, - struct nic_tcam_cfg_rule *fdir_tcam_rule) -{ - struct spnic_tcam_info *tcam_info = &nic_dev->tcam; - struct spnic_tcam_dynamic_block *dynamic_block_ptr = NULL; - struct spnic_tcam_dynamic_block *tmp = NULL; - u16 block_cnt = tcam_info->tcam_dynamic_info.dynamic_block_cnt; - u16 tcam_block_index = 0; - int block_alloc_flag = 0; - u16 index = 0; - int err; - - if (tcam_info->tcam_rule_nums >= - block_cnt * SPNIC_TCAM_DYNAMIC_BLOCK_SIZE) { - if (block_cnt >= (SPNIC_MAX_TCAM_FILTERS / SPNIC_TCAM_DYNAMIC_BLOCK_SIZE)) { - nicif_err(nic_dev, drv, nic_dev->netdev, "Dynamic tcam block is full, alloc failed\n"); - goto failed; - } - - err = spnic_alloc_tcam_block(nic_dev->hwdev, &tcam_block_index); - if (err) { - nicif_err(nic_dev, drv, nic_dev->netdev, "Fdir filter dynamic tcam alloc block failed\n"); - goto failed; - } - - block_alloc_flag = 1; - - dynamic_block_ptr = - spnic_alloc_dynamic_block_resource(nic_dev, tcam_info, tcam_block_index); - if (!dynamic_block_ptr) { - nicif_err(nic_dev, drv, nic_dev->netdev, "Fdir filter dynamic alloc block memory failed\n"); - goto block_alloc_failed; - } - } - - tmp = spnic_dynamic_lookup_tcam_filter(nic_dev, fdir_tcam_rule, tcam_info, - tcam_filter, &index); - if (!tmp) { - nicif_err(nic_dev, drv, nic_dev->netdev, "Dynamic lookup tcam filter failed\n"); - goto lookup_tcam_index_failed; - } - - err = spnic_add_tcam_rule(nic_dev->hwdev, fdir_tcam_rule); - if (err) { - nicif_err(nic_dev, drv, nic_dev->netdev, "Fdir_tcam_rule add failed\n"); - goto add_tcam_rules_failed; - } - - nicif_info(nic_dev, drv, nic_dev->netdev, - "Add fdir tcam rule, function_id: 0x%x, tcam_block_id: %d, local_index: %d, global_index: %d, queue: %d, tcam_rule_nums: %d succeed\n", - sphw_global_func_id(nic_dev->hwdev), - tcam_filter->dynamic_block_id, index, fdir_tcam_rule->index, - fdir_tcam_rule->data.qid, tcam_info->tcam_rule_nums + 1); - - if (tcam_info->tcam_rule_nums == 0) { - err = spnic_set_fdir_tcam_rule_filter(nic_dev->hwdev, true); - if (err) - goto enable_failed; - } - - list_add_tail(&tcam_filter->tcam_filter_list, &tcam_info->tcam_list); - - tmp->dynamic_index_used[index] = 1; - tmp->dynamic_index_cnt++; - - tcam_info->tcam_rule_nums++; - - return 0; - -enable_failed: - spnic_del_tcam_rule(nic_dev->hwdev, fdir_tcam_rule->index); - -add_tcam_rules_failed: -lookup_tcam_index_failed: - if (block_alloc_flag == 1) - spnic_free_dynamic_block_resource(tcam_info, dynamic_block_ptr); - -block_alloc_failed: - if (block_alloc_flag == 1) - spnic_free_tcam_block(nic_dev->hwdev, &tcam_block_index); - -failed: - return -EFAULT; -} - -static int spnic_del_tcam_filter(struct spnic_nic_dev *nic_dev, - struct spnic_tcam_filter *tcam_filter) -{ - struct spnic_tcam_info *tcam_info = &nic_dev->tcam; - u16 dynamic_block_id = tcam_filter->dynamic_block_id; - struct spnic_tcam_dynamic_block *tmp = NULL; - u32 index = 0; - int err; - - list_for_each_entry(tmp, &tcam_info->tcam_dynamic_info.tcam_dynamic_list, block_list) { - if (tmp->dynamic_block_id == dynamic_block_id) - break; - } - if (!tmp || tmp->dynamic_block_id != dynamic_block_id) { - nicif_err(nic_dev, drv, nic_dev->netdev, "Fdir filter del dynamic lookup for block failed\n"); - return -EFAULT; - } - - index = SPNIC_PKT_TCAM_DYNAMIC_INDEX_START(tmp->dynamic_block_id) + tcam_filter->index; - - err = spnic_del_tcam_rule(nic_dev->hwdev, index); - if (err) { - nicif_err(nic_dev, drv, nic_dev->netdev, "fdir_tcam_rule del failed\n"); - return -EFAULT; - } - - nicif_info(nic_dev, drv, nic_dev->netdev, - "Del fdir_tcam_dynamic_rule function_id: 0x%x, tcam_block_id: %d, local_index: %d, global_index: %d, local_rules_nums: %d, global_rule_nums: %d succeed\n", - sphw_global_func_id(nic_dev->hwdev), dynamic_block_id, - tcam_filter->index, index, tmp->dynamic_index_cnt - 1, - tcam_info->tcam_rule_nums - 1); - - tmp->dynamic_index_used[tcam_filter->index] = 0; - tmp->dynamic_index_cnt--; - tcam_info->tcam_rule_nums--; - if (tmp->dynamic_index_cnt == 0) { - spnic_free_tcam_block(nic_dev->hwdev, &dynamic_block_id); - spnic_free_dynamic_block_resource(tcam_info, tmp); - } - - if (tcam_info->tcam_rule_nums == 0) - spnic_set_fdir_tcam_rule_filter(nic_dev->hwdev, false); - - list_del(&tcam_filter->tcam_filter_list); - kfree(tcam_filter); - - return 0; -} - -static inline struct spnic_tcam_filter * -spnic_tcam_filter_lookup(struct list_head *filter_list, struct tag_tcam_key *key) -{ - struct spnic_tcam_filter *iter; - - list_for_each_entry(iter, filter_list, tcam_filter_list) { - if (memcmp(key, &iter->tcam_key, sizeof(struct tag_tcam_key)) == 0) - return iter; - } - - return NULL; -} - -static void del_ethtool_rule(struct spnic_nic_dev *nic_dev, - struct spnic_ethtool_rx_flow_rule *eth_rule) -{ - list_del(ð_rule->list); - nic_dev->rx_flow_rule.tot_num_rules--; - - kfree(eth_rule); -} - -static int spnic_remove_one_rule(struct spnic_nic_dev *nic_dev, - struct spnic_ethtool_rx_flow_rule *eth_rule) -{ - struct spnic_tcam_info *tcam_info = &nic_dev->tcam; - struct spnic_tcam_filter *tcam_filter; - struct nic_tcam_cfg_rule fdir_tcam_rule; - struct tag_tcam_key tcam_key; - int err; - - memset(&fdir_tcam_rule, 0, sizeof(fdir_tcam_rule)); - memset(&tcam_key, 0, sizeof(tcam_key)); - - err = spnic_fdir_tcam_info_init(nic_dev, ð_rule->flow_spec, &tcam_key, &fdir_tcam_rule); - if (err) { - nicif_err(nic_dev, drv, nic_dev->netdev, "Init fdir info failed\n"); - return err; - } - - tcam_filter = spnic_tcam_filter_lookup(&tcam_info->tcam_list, &tcam_key); - if (!tcam_filter) { - nicif_err(nic_dev, drv, nic_dev->netdev, "Filter does not exists\n"); - return -EEXIST; - } - - err = spnic_del_tcam_filter(nic_dev, tcam_filter); - if (err) { - nicif_err(nic_dev, drv, nic_dev->netdev, "Delete tcam filter failed\n"); - return err; - } - - del_ethtool_rule(nic_dev, eth_rule); - - return 0; -} - -static void add_rule_to_list(struct spnic_nic_dev *nic_dev, - struct spnic_ethtool_rx_flow_rule *rule) -{ - struct spnic_ethtool_rx_flow_rule *iter = NULL; - struct list_head *head = &nic_dev->rx_flow_rule.rules; - - list_for_each_entry(iter, &nic_dev->rx_flow_rule.rules, list) { - if (iter->flow_spec.location > rule->flow_spec.location) - break; - head = &iter->list; - } - nic_dev->rx_flow_rule.tot_num_rules++; - list_add(&rule->list, head); -} - -static int spnic_add_one_rule(struct spnic_nic_dev *nic_dev, struct ethtool_rx_flow_spec *fs) -{ - struct nic_tcam_cfg_rule fdir_tcam_rule; - struct tag_tcam_key tcam_key; - struct spnic_ethtool_rx_flow_rule *eth_rule = NULL; - struct spnic_tcam_filter *tcam_filter = NULL; - struct spnic_tcam_info *tcam_info = &nic_dev->tcam; - int err; - - memset(&fdir_tcam_rule, 0, sizeof(fdir_tcam_rule)); - memset(&tcam_key, 0, sizeof(tcam_key)); - err = spnic_fdir_tcam_info_init(nic_dev, fs, &tcam_key, &fdir_tcam_rule); - if (err) { - nicif_err(nic_dev, drv, nic_dev->netdev, "Init fdir info failed\n"); - return err; - } - - tcam_filter = spnic_tcam_filter_lookup(&tcam_info->tcam_list, &tcam_key); - if (tcam_filter) { - nicif_err(nic_dev, drv, nic_dev->netdev, "Filter exists\n"); - return -EEXIST; - } - - tcam_filter = kzalloc(sizeof(*tcam_filter), GFP_KERNEL); - if (!tcam_filter) - return -ENOMEM; - memcpy(&tcam_filter->tcam_key, &tcam_key, sizeof(struct tag_tcam_key)); - tcam_filter->queue = (u16)fdir_tcam_rule.data.qid; - - err = spnic_add_tcam_filter(nic_dev, tcam_filter, &fdir_tcam_rule); - if (err) - goto add_tcam_filter_fail; - - /* driver save new rule filter */ - eth_rule = kzalloc(sizeof(*eth_rule), GFP_KERNEL); - if (!eth_rule) { - err = -ENOMEM; - goto alloc_eth_rule_fail; - } - - eth_rule->flow_spec = *fs; - add_rule_to_list(nic_dev, eth_rule); - - return 0; - -alloc_eth_rule_fail: - spnic_del_tcam_filter(nic_dev, tcam_filter); -add_tcam_filter_fail: - kfree(tcam_filter); - return err; -} - -static struct spnic_ethtool_rx_flow_rule * -find_ethtool_rule(struct spnic_nic_dev *nic_dev, u32 location) -{ - struct spnic_ethtool_rx_flow_rule *iter = NULL; - - list_for_each_entry(iter, &nic_dev->rx_flow_rule.rules, list) { - if (iter->flow_spec.location == location) - return iter; - } - return NULL; -} - -static int validate_flow(struct spnic_nic_dev *nic_dev, struct ethtool_rx_flow_spec *fs) -{ - if (fs->location >= MAX_NUM_OF_ETHTOOL_NTUPLE_RULES) { - nicif_err(nic_dev, drv, nic_dev->netdev, "loc exceed limit[0,%lu]\n", - MAX_NUM_OF_ETHTOOL_NTUPLE_RULES); - return -EINVAL; - } - - if (fs->ring_cookie >= nic_dev->q_params.num_qps) { - nicif_err(nic_dev, drv, nic_dev->netdev, "action is larger than queue number %u\n", - nic_dev->q_params.num_qps); - return -EINVAL; - } - - switch (fs->flow_type) { - case TCP_V4_FLOW: - case UDP_V4_FLOW: - case IP_USER_FLOW: -#ifndef UNSUPPORT_NTUPLE_IPV6 - case TCP_V6_FLOW: - case UDP_V6_FLOW: - case IPV6_USER_FLOW: -#endif - break; - default: - nicif_err(nic_dev, drv, nic_dev->netdev, "flow type is not supported\n"); - return -EOPNOTSUPP; - } - - return 0; -} - -int spnic_ethtool_flow_replace(struct spnic_nic_dev *nic_dev, struct ethtool_rx_flow_spec *fs) -{ - struct spnic_ethtool_rx_flow_rule *eth_rule = NULL; - struct ethtool_rx_flow_spec flow_spec_temp; - int loc_exit_flag = 0; - int err; - - err = validate_flow(nic_dev, fs); - if (err) { - nicif_err(nic_dev, drv, nic_dev->netdev, "flow is not valid %d\n", err); - return err; - } - - eth_rule = find_ethtool_rule(nic_dev, fs->location); - /* when location is same, delete old location rule. */ - if (eth_rule) { - memcpy(&flow_spec_temp, ð_rule->flow_spec, sizeof(struct ethtool_rx_flow_spec)); - err = spnic_remove_one_rule(nic_dev, eth_rule); - if (err) - return err; - - loc_exit_flag = 1; - } - - /* add new rule filter */ - err = spnic_add_one_rule(nic_dev, fs); - if (err) { - nicif_err(nic_dev, drv, nic_dev->netdev, "Add new rule filter failed\n"); - if (loc_exit_flag) - spnic_add_one_rule(nic_dev, &flow_spec_temp); - - return -ENOENT; - } - - return 0; -} - -int spnic_ethtool_flow_remove(struct spnic_nic_dev *nic_dev, u32 location) -{ - struct spnic_ethtool_rx_flow_rule *eth_rule = NULL; - int err; - - if (location >= MAX_NUM_OF_ETHTOOL_NTUPLE_RULES) - return -ENOSPC; - - eth_rule = find_ethtool_rule(nic_dev, location); - if (!eth_rule) - return -ENOENT; - - err = spnic_remove_one_rule(nic_dev, eth_rule); - - return err; -} - -int spnic_ethtool_get_flow(struct spnic_nic_dev *nic_dev, - struct ethtool_rxnfc *info, u32 location) -{ - struct spnic_ethtool_rx_flow_rule *eth_rule = NULL; - - if (location >= MAX_NUM_OF_ETHTOOL_NTUPLE_RULES) - return -EINVAL; - - list_for_each_entry(eth_rule, &nic_dev->rx_flow_rule.rules, list) { - if (eth_rule->flow_spec.location == location) { - info->fs = eth_rule->flow_spec; - return 0; - } - } - - return -ENOENT; -} - -int spnic_ethtool_get_all_flows(struct spnic_nic_dev *nic_dev, - struct ethtool_rxnfc *info, u32 *rule_locs) -{ - int idx = 0; - struct spnic_ethtool_rx_flow_rule *eth_rule = NULL; - - info->data = MAX_NUM_OF_ETHTOOL_NTUPLE_RULES; - list_for_each_entry(eth_rule, &nic_dev->rx_flow_rule.rules, list) - rule_locs[idx++] = eth_rule->flow_spec.location; - - return info->rule_cnt == idx ? 0 : -ENOENT; -} diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_pci_id_tbl.h b/drivers/net/ethernet/ramaxel/spnic/spnic_pci_id_tbl.h deleted file mode 100644 index 9d32608e6bb70ae3e55a5714f4f6c8d739806032..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/spnic_pci_id_tbl.h +++ /dev/null @@ -1,12 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPNIC_PCI_ID_TBL_H -#define SPNIC_PCI_ID_TBL_H - -#define PCI_VENDOR_ID_RAMAXEL 0x1E81 -#define SPNIC_DEV_ID_PF_STD 0x9020 -#define SPNIC_DEV_ID_VF 0x9001 -#define SPNIC_DEV_ID_VF_HV 0x9002 - -#endif diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_rss.c b/drivers/net/ethernet/ramaxel/spnic/spnic_rss.c deleted file mode 100644 index 956d868df5b5b61e35cf0d4bf0e6255fdcff3b0d..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/spnic_rss.c +++ /dev/null @@ -1,741 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "sphw_crm.h" -#include "spnic_nic_cfg.h" -#include "spnic_nic_dev.h" -#include "sphw_hw.h" -#include "spnic_rss.h" - -static u16 num_qps; -module_param(num_qps, ushort, 0444); -MODULE_PARM_DESC(num_qps, "Number of Queue Pairs (default=0)"); - -#define MOD_PARA_VALIDATE_NUM_QPS(nic_dev, num_qps, out_qps) do { \ - if ((num_qps) > (nic_dev)->max_qps) \ - nic_warn(&(nic_dev)->pdev->dev, \ - "Module Parameter %s value %u is out of range, " \ - "Maximum value for the device: %u, using %u\n", \ - #num_qps, num_qps, (nic_dev)->max_qps, \ - (nic_dev)->max_qps); \ - if (!(num_qps) || (num_qps) > (nic_dev)->max_qps) \ - (out_qps) = (nic_dev)->max_qps; \ - else \ - (out_qps) = (num_qps); \ -} while (0) - -static void spnic_fillout_indir_tbl(struct spnic_nic_dev *nic_dev, u8 num_tcs, u32 *indir) -{ - u16 num_rss, tc_group_size; - int i; - - if (num_tcs) - tc_group_size = SPNIC_RSS_INDIR_SIZE / num_tcs; - else - tc_group_size = SPNIC_RSS_INDIR_SIZE; - - num_rss = nic_dev->q_params.num_rss; - for (i = 0; i < SPNIC_RSS_INDIR_SIZE; i++) - indir[i] = (i / tc_group_size) * num_rss + i % num_rss; -} - -int spnic_rss_init(struct spnic_nic_dev *nic_dev) -{ - struct net_device *netdev = nic_dev->netdev; - u8 cos, num_tc = 0; - u8 prio_tc[SPNIC_DCB_UP_MAX] = {0}; - u8 max_cos = nic_dev->hw_dcb_cfg.max_cos; - - if (test_bit(SPNIC_DCB_ENABLE, &nic_dev->flags)) { - num_tc = max_cos; - for (cos = 0; cos < SPNIC_DCB_COS_MAX; cos++) { - if (cos < SPNIC_DCB_COS_MAX - max_cos) - prio_tc[cos] = max_cos - 1; - else - prio_tc[cos] = (SPNIC_DCB_COS_MAX - 1) - cos; - } - } else { - num_tc = 0; - } - - return spnic_set_hw_rss_parameters(netdev, 1, num_tc, prio_tc); -} - -void spnic_rss_deinit(struct spnic_nic_dev *nic_dev) -{ - u8 prio_tc[SPNIC_DCB_UP_MAX] = {0}; - - spnic_rss_cfg(nic_dev->hwdev, 0, 0, prio_tc, 1); -} - -void spnic_init_rss_parameters(struct net_device *netdev) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - - nic_dev->rss_hash_engine = SPNIC_RSS_HASH_ENGINE_TYPE_XOR; - nic_dev->rss_type.tcp_ipv6_ext = 1; - nic_dev->rss_type.ipv6_ext = 1; - nic_dev->rss_type.tcp_ipv6 = 1; - nic_dev->rss_type.ipv6 = 1; - nic_dev->rss_type.tcp_ipv4 = 1; - nic_dev->rss_type.ipv4 = 1; - nic_dev->rss_type.udp_ipv6 = 1; - nic_dev->rss_type.udp_ipv4 = 1; -} - -void spnic_clear_rss_config(struct spnic_nic_dev *nic_dev) -{ - kfree(nic_dev->rss_hkey); - nic_dev->rss_hkey = NULL; - - kfree(nic_dev->rss_indir); - nic_dev->rss_indir = NULL; -} - -void spnic_set_default_rss_indir(struct net_device *netdev) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - - set_bit(SPNIC_RSS_DEFAULT_INDIR, &nic_dev->flags); -} - -static void spnic_maybe_reconfig_rss_indir(struct net_device *netdev) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - int i; - - /* if dcb is enabled, user can not config rss indir table */ - if (test_bit(SPNIC_DCB_ENABLE, &nic_dev->flags)) { - nicif_info(nic_dev, drv, netdev, "DCB is enabled, set default rss indir\n"); - goto discard_user_rss_indir; - } - - for (i = 0; i < SPNIC_RSS_INDIR_SIZE; i++) { - if (nic_dev->rss_indir[i] >= nic_dev->q_params.num_qps) - goto discard_user_rss_indir; - } - - return; - -discard_user_rss_indir: - spnic_set_default_rss_indir(netdev); -} - -static void decide_num_qps(struct spnic_nic_dev *nic_dev) -{ - u16 tmp_num_qps = nic_dev->q_params.num_qps; - u16 num_cpus = 0; - int i, node; - - MOD_PARA_VALIDATE_NUM_QPS(nic_dev, num_qps, tmp_num_qps); - - /* To reduce memory footprint in ovs mode. - * VF can't get board info correctly with early pf driver. - */ - /* if ((spnic_get_func_mode(nic_dev->hwdev) == FUNC_MOD_NORMAL_HOST) && - * service_mode == SPNIC_WORK_MODE_OVS && - * sphw_func_type(nic_dev->hwdev) != TYPE_VF) - * MOD_PARA_VALIDATE_NUM_QPS(nic_dev, ovs_num_qps, - * tmp_num_qps); - */ - - for (i = 0; i < (int)num_online_cpus(); i++) { - node = (int)cpu_to_node(i); - if (node == dev_to_node(&nic_dev->pdev->dev)) - num_cpus++; - } - - if (!num_cpus) - num_cpus = (u16)num_online_cpus(); - - nic_dev->q_params.num_qps = min_t(u16, tmp_num_qps, num_cpus); -} - -static void copy_value_to_rss_hkey(struct spnic_nic_dev *nic_dev, const u8 *hkey) -{ - u32 i; - u32 *rss_hkey = (u32 *)nic_dev->rss_hkey; - - memcpy(nic_dev->rss_hkey, hkey, SPNIC_RSS_KEY_SIZE); - - /* make a copy of the key, and convert it to Big Endian */ - for (i = 0; i < SPNIC_RSS_KEY_SIZE / sizeof(u32); i++) - nic_dev->rss_hkey_be[i] = cpu_to_be32(rss_hkey[i]); -} - -int alloc_rss_resource(struct spnic_nic_dev *nic_dev) -{ - u8 default_rss_key[SPNIC_RSS_KEY_SIZE] = { - 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, - 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, - 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4, - 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c, - 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa}; - - /* We request double spaces for the hash key, - * the second one holds the key of Big Edian - * format. - */ - nic_dev->rss_hkey = - kzalloc(SPNIC_RSS_KEY_SIZE * SPNIC_RSS_KEY_RSV_NUM, GFP_KERNEL); - if (!nic_dev->rss_hkey) { - nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to alloc memory for rss_hkey\n"); - return -ENOMEM; - } - - /* The second space is for big edian hash key */ - nic_dev->rss_hkey_be = (u32 *)(nic_dev->rss_hkey + SPNIC_RSS_KEY_SIZE); - copy_value_to_rss_hkey(nic_dev, (u8 *)default_rss_key); - - nic_dev->rss_indir = kzalloc(sizeof(u32) * SPNIC_RSS_INDIR_SIZE, GFP_KERNEL); - if (!nic_dev->rss_indir) { - nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to alloc memory for rss_indir\n"); - kfree(nic_dev->rss_hkey); - nic_dev->rss_hkey = NULL; - return -ENOMEM; - } - - set_bit(SPNIC_RSS_DEFAULT_INDIR, &nic_dev->flags); - - return 0; -} - -void spnic_try_to_enable_rss(struct spnic_nic_dev *nic_dev) -{ - u8 prio_tc[SPNIC_DCB_UP_MAX] = {0}; - int err = 0; - - if (!nic_dev) - return; - - nic_dev->max_qps = sphw_func_max_nic_qnum(nic_dev->hwdev); - if (nic_dev->max_qps <= 1 || !SPNIC_SUPPORT_RSS(nic_dev->hwdev)) - goto set_q_params; - - err = alloc_rss_resource(nic_dev); - if (err) { - nic_dev->max_qps = 1; - goto set_q_params; - } - - set_bit(SPNIC_RSS_ENABLE, &nic_dev->flags); - nic_dev->max_qps = sphw_func_max_nic_qnum(nic_dev->hwdev); - - decide_num_qps(nic_dev); - - nic_dev->q_params.rss_limit = nic_dev->q_params.num_qps; - nic_dev->q_params.num_rss = nic_dev->q_params.num_qps; - - spnic_init_rss_parameters(nic_dev->netdev); - err = spnic_set_hw_rss_parameters(nic_dev->netdev, 0, 0, prio_tc); - if (err) { - nic_err(&nic_dev->pdev->dev, "Failed to set hardware rss parameters\n"); - - spnic_clear_rss_config(nic_dev); - nic_dev->max_qps = 1; - goto set_q_params; - } - return; - -set_q_params: - clear_bit(SPNIC_RSS_ENABLE, &nic_dev->flags); - nic_dev->q_params.rss_limit = nic_dev->max_qps; - nic_dev->q_params.num_qps = nic_dev->max_qps; - nic_dev->q_params.num_rss = nic_dev->max_qps; -} - -static int spnic_config_rss_hw_resource(struct spnic_nic_dev *nic_dev, u32 *indir_tbl) -{ - int err; - - err = spnic_rss_set_indir_tbl(nic_dev->hwdev, indir_tbl); - if (err) - return err; - - err = spnic_set_rss_type(nic_dev->hwdev, nic_dev->rss_type); - if (err) - return err; - - return spnic_rss_set_hash_engine(nic_dev->hwdev, nic_dev->rss_hash_engine); -} - -int spnic_set_hw_rss_parameters(struct net_device *netdev, u8 rss_en, u8 num_tc, u8 *prio_tc) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - int err; - - /* RSS key */ - err = spnic_rss_set_hash_key(nic_dev->hwdev, nic_dev->rss_hkey); - if (err) - return err; - - spnic_maybe_reconfig_rss_indir(netdev); - - if (test_bit(SPNIC_RSS_DEFAULT_INDIR, &nic_dev->flags)) - spnic_fillout_indir_tbl(nic_dev, num_tc, nic_dev->rss_indir); - - err = spnic_config_rss_hw_resource(nic_dev, nic_dev->rss_indir); - if (err) - return err; - - err = spnic_rss_cfg(nic_dev->hwdev, rss_en, num_tc, prio_tc, nic_dev->q_params.num_qps); - if (err) - return err; - - return 0; -} - -/* for ethtool */ -static int set_l4_rss_hash_ops(struct ethtool_rxnfc *cmd, struct nic_rss_type *rss_type) -{ - u8 rss_l4_en = 0; - - switch (cmd->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { - case 0: - rss_l4_en = 0; - break; - case (RXH_L4_B_0_1 | RXH_L4_B_2_3): - rss_l4_en = 1; - break; - default: - return -EINVAL; - } - - switch (cmd->flow_type) { - case TCP_V4_FLOW: - rss_type->tcp_ipv4 = rss_l4_en; - break; - case TCP_V6_FLOW: - rss_type->tcp_ipv6 = rss_l4_en; - break; - case UDP_V4_FLOW: - rss_type->udp_ipv4 = rss_l4_en; - break; - case UDP_V6_FLOW: - rss_type->udp_ipv6 = rss_l4_en; - break; - default: - return -EINVAL; - } - - return 0; -} - -static int update_rss_hash_opts(struct spnic_nic_dev *nic_dev, struct ethtool_rxnfc *cmd, - struct nic_rss_type *rss_type) -{ - int err; - - switch (cmd->flow_type) { - case TCP_V4_FLOW: - case TCP_V6_FLOW: - case UDP_V4_FLOW: - case UDP_V6_FLOW: - err = set_l4_rss_hash_ops(cmd, rss_type); - if (err) - return err; - - break; - case IPV4_FLOW: - rss_type->ipv4 = 1; - break; - case IPV6_FLOW: - rss_type->ipv6 = 1; - break; - default: - nicif_err(nic_dev, drv, nic_dev->netdev, "Unsupported flow type\n"); - return -EINVAL; - } - - return 0; -} - -static int spnic_set_rss_hash_opts(struct spnic_nic_dev *nic_dev, struct ethtool_rxnfc *cmd) -{ - struct nic_rss_type *rss_type = &nic_dev->rss_type; - int err; - - if (!test_bit(SPNIC_RSS_ENABLE, &nic_dev->flags)) { - cmd->data = 0; - nicif_err(nic_dev, drv, nic_dev->netdev, - "RSS is disable, not support to set flow-hash\n"); - return -EOPNOTSUPP; - } - - /* RSS does not support anything other than hashing - * to queues on src and dst IPs and ports - */ - if (cmd->data & ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | - RXH_L4_B_2_3)) - return -EINVAL; - - /* We need at least the IP SRC and DEST fields for hashing */ - if (!(cmd->data & RXH_IP_SRC) || !(cmd->data & RXH_IP_DST)) - return -EINVAL; - - err = spnic_get_rss_type(nic_dev->hwdev, rss_type); - if (err) { - nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to get rss type\n"); - return -EFAULT; - } - - err = update_rss_hash_opts(nic_dev, cmd, rss_type); - if (err) - return err; - - err = spnic_set_rss_type(nic_dev->hwdev, *rss_type); - if (err) { - nicif_err(nic_dev, drv, nic_dev->netdev, - "Failed to set rss type\n"); - return -EFAULT; - } - - nicif_info(nic_dev, drv, nic_dev->netdev, "Set rss hash options success\n"); - - return 0; -} - -static void convert_rss_type(u8 rss_opt, struct ethtool_rxnfc *cmd) -{ - if (rss_opt) - cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; -} - -static int spnic_convert_rss_type(struct spnic_nic_dev *nic_dev, struct nic_rss_type *rss_type, - struct ethtool_rxnfc *cmd) -{ - cmd->data = RXH_IP_SRC | RXH_IP_DST; - switch (cmd->flow_type) { - case TCP_V4_FLOW: - convert_rss_type(rss_type->tcp_ipv4, cmd); - break; - case TCP_V6_FLOW: - convert_rss_type(rss_type->tcp_ipv6, cmd); - break; - case UDP_V4_FLOW: - convert_rss_type(rss_type->udp_ipv4, cmd); - break; - case UDP_V6_FLOW: - convert_rss_type(rss_type->udp_ipv6, cmd); - break; - case IPV4_FLOW: - case IPV6_FLOW: - break; - default: - nicif_err(nic_dev, drv, nic_dev->netdev, "Unsupported flow type\n"); - cmd->data = 0; - return -EINVAL; - } - - return 0; -} - -static int spnic_get_rss_hash_opts(struct spnic_nic_dev *nic_dev, struct ethtool_rxnfc *cmd) -{ - struct nic_rss_type rss_type = {0}; - int err; - - cmd->data = 0; - - if (!test_bit(SPNIC_RSS_ENABLE, &nic_dev->flags)) - return 0; - - err = spnic_get_rss_type(nic_dev->hwdev, &rss_type); - if (err) { - nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to get rss type\n"); - return err; - } - - return spnic_convert_rss_type(nic_dev, &rss_type, cmd); -} - -int spnic_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, u32 *rule_locs) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - int err = 0; - - switch (cmd->cmd) { - case ETHTOOL_GRXRINGS: - cmd->data = nic_dev->q_params.num_qps; - break; - case ETHTOOL_GRXCLSRLCNT: - cmd->rule_cnt = nic_dev->rx_flow_rule.tot_num_rules; - break; - case ETHTOOL_GRXCLSRULE: - err = spnic_ethtool_get_flow(nic_dev, cmd, cmd->fs.location); - break; - case ETHTOOL_GRXCLSRLALL: - err = spnic_ethtool_get_all_flows(nic_dev, cmd, rule_locs); - break; - case ETHTOOL_GRXFH: - err = spnic_get_rss_hash_opts(nic_dev, cmd); - break; - default: - err = -EOPNOTSUPP; - break; - } - - return err; -} - -int spnic_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - int err = 0; - - switch (cmd->cmd) { - case ETHTOOL_SRXFH: - err = spnic_set_rss_hash_opts(nic_dev, cmd); - break; - case ETHTOOL_SRXCLSRLINS: - err = spnic_ethtool_flow_replace(nic_dev, &cmd->fs); - break; - case ETHTOOL_SRXCLSRLDEL: - err = spnic_ethtool_flow_remove(nic_dev, cmd->fs.location); - break; - default: - err = -EOPNOTSUPP; - break; - } - - return err; -} - -static u16 spnic_max_channels(struct spnic_nic_dev *nic_dev) -{ - u8 tcs = (u8)netdev_get_num_tc(nic_dev->netdev); - - return tcs ? nic_dev->max_qps / tcs : nic_dev->max_qps; -} - -static u16 spnic_curr_channels(struct spnic_nic_dev *nic_dev) -{ - if (netif_running(nic_dev->netdev)) - return nic_dev->q_params.num_rss ? nic_dev->q_params.num_rss : 1; - else - return min_t(u16, spnic_max_channels(nic_dev), - nic_dev->q_params.rss_limit); -} - -void spnic_get_channels(struct net_device *netdev, struct ethtool_channels *channels) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - - channels->max_rx = 0; - channels->max_tx = 0; - channels->max_other = 0; - /* report maximum channels */ - channels->max_combined = spnic_max_channels(nic_dev); - channels->rx_count = 0; - channels->tx_count = 0; - channels->other_count = 0; - /* report flow director queues as maximum channels */ - channels->combined_count = spnic_curr_channels(nic_dev); -} - -void spnic_update_num_qps(struct net_device *netdev) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - u16 num_qps; - u8 tcs; - - /* change num_qps to change counter in ethtool -S */ - tcs = (u8)netdev_get_num_tc(nic_dev->netdev); - nic_dev->q_params.num_tc = tcs; - num_qps = (u16)(nic_dev->q_params.rss_limit * (tcs ? tcs : 1)); - nic_dev->q_params.num_qps = min_t(u16, nic_dev->max_qps, num_qps); -} - -static int spnic_validate_channel_parameter(struct net_device *netdev, - struct ethtool_channels *channels) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - u16 max_channel = spnic_max_channels(nic_dev); - unsigned int count = channels->combined_count; - - if (!count) { - nicif_err(nic_dev, drv, netdev, "Unsupported combined_count=0\n"); - return -EINVAL; - } - - if (channels->tx_count || channels->rx_count || channels->other_count) { - nicif_err(nic_dev, drv, netdev, "Setting rx/tx/other count not supported\n"); - return -EINVAL; - } - - if (count > max_channel) { - nicif_err(nic_dev, drv, netdev, "Combined count %u exceed limit %u\n", - count, max_channel); - return -EINVAL; - } - - return 0; -} - -static void change_num_channel_reopen_handler(struct spnic_nic_dev *nic_dev, const void *priv_data) -{ - spnic_set_default_rss_indir(nic_dev->netdev); -} - -int spnic_set_channels(struct net_device *netdev, struct ethtool_channels *channels) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - struct spnic_dyna_txrxq_params q_params = {0}; - unsigned int count = channels->combined_count; - int err; - - if (spnic_validate_channel_parameter(netdev, channels)) - return -EINVAL; - - if (!test_bit(SPNIC_RSS_ENABLE, &nic_dev->flags)) { - nicif_err(nic_dev, drv, netdev, - "This function don't support RSS, only support 1 queue pair\n"); - return -EOPNOTSUPP; - } - - nicif_info(nic_dev, drv, netdev, "Set max combined queue number from %u to %u\n", - nic_dev->q_params.rss_limit, count); - - if (netif_running(netdev)) { - q_params = nic_dev->q_params; - q_params.rss_limit = (u16)count; - q_params.txqs_res = NULL; - q_params.rxqs_res = NULL; - q_params.irq_cfg = NULL; - - nicif_info(nic_dev, drv, netdev, "Restarting channel\n"); - err = spnic_change_channel_settings(nic_dev, &q_params, - change_num_channel_reopen_handler, NULL); - if (err) { - nicif_err(nic_dev, drv, netdev, "Failed to change channel settings\n"); - return -EFAULT; - } - } else { - /* Discard user configured rss */ - spnic_set_default_rss_indir(netdev); - nic_dev->q_params.rss_limit = (u16)count; - spnic_update_num_qps(netdev); - } - - return 0; -} - -static int set_rss_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - int err; - - if (indir) { - err = spnic_rss_set_indir_tbl(nic_dev->hwdev, indir); - if (err) { - nicif_err(nic_dev, drv, netdev, "Failed to set rss indir table\n"); - return -EFAULT; - } - clear_bit(SPNIC_RSS_DEFAULT_INDIR, &nic_dev->flags); - - memcpy(nic_dev->rss_indir, indir, - sizeof(u32) * SPNIC_RSS_INDIR_SIZE); - nicif_info(nic_dev, drv, netdev, "Change rss indir success\n"); - } - - if (key) { - err = spnic_rss_set_hash_key(nic_dev->hwdev, key); - if (err) { - nicif_err(nic_dev, drv, netdev, "Failed to set rss key\n"); - return -EFAULT; - } - - copy_value_to_rss_hkey(nic_dev, key); - nicif_info(nic_dev, drv, netdev, "Change rss key success\n"); - } - - return 0; -} - -u32 spnic_get_rxfh_indir_size(struct net_device *netdev) -{ - return SPNIC_RSS_INDIR_SIZE; -} - -u32 spnic_get_rxfh_key_size(struct net_device *netdev) -{ - return SPNIC_RSS_KEY_SIZE; -} - -int spnic_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - int err = 0; - - if (!test_bit(SPNIC_RSS_ENABLE, &nic_dev->flags)) { - nicif_err(nic_dev, drv, nic_dev->netdev, "Rss is disable\n"); - return -EOPNOTSUPP; - } - - if (hfunc) - *hfunc = nic_dev->rss_hash_engine ? - ETH_RSS_HASH_TOP : ETH_RSS_HASH_XOR; - - if (indir) { - err = spnic_rss_get_indir_tbl(nic_dev->hwdev, indir); - if (err) - return -EFAULT; - } - - if (key) - memcpy(key, nic_dev->rss_hkey, SPNIC_RSS_KEY_SIZE); - - return err; -} - -int spnic_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key, const u8 hfunc) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - int err = 0; - - if (!test_bit(SPNIC_RSS_ENABLE, &nic_dev->flags)) { - nicif_err(nic_dev, drv, nic_dev->netdev, - "Not support to set rss parameters when rss is disable\n"); - return -EOPNOTSUPP; - } - - if (test_bit(SPNIC_DCB_ENABLE, &nic_dev->flags) && indir) { - nicif_err(nic_dev, drv, netdev, "Not support to set indir when DCB is enabled\n"); - return -EOPNOTSUPP; - } - - if (hfunc != ETH_RSS_HASH_NO_CHANGE) { - if (hfunc != ETH_RSS_HASH_TOP && hfunc != ETH_RSS_HASH_XOR) { - nicif_err(nic_dev, drv, netdev, "Not support to set hfunc type except TOP and XOR\n"); - return -EOPNOTSUPP; - } - - nic_dev->rss_hash_engine = (hfunc == ETH_RSS_HASH_XOR) ? - SPNIC_RSS_HASH_ENGINE_TYPE_XOR : - SPNIC_RSS_HASH_ENGINE_TYPE_TOEP; - err = spnic_rss_set_hash_engine(nic_dev->hwdev, nic_dev->rss_hash_engine); - if (err) - return -EFAULT; - - nicif_info(nic_dev, drv, netdev, "Change hfunc to RSS_HASH_%s success\n", - (hfunc == ETH_RSS_HASH_XOR) ? "XOR" : "TOP"); - } - err = set_rss_rxfh(netdev, indir, key); - - return err; -} diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_rss.h b/drivers/net/ethernet/ramaxel/spnic/spnic_rss.h deleted file mode 100644 index e64a4dcf39ddff8cfd498e456c4e0f1f95146367..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/spnic_rss.h +++ /dev/null @@ -1,50 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPNIC_RSS_H -#define SPNIC_RSS_H - -#include "spnic_nic_dev.h" - -int spnic_rss_init(struct spnic_nic_dev *nic_dev); - -void spnic_rss_deinit(struct spnic_nic_dev *nic_dev); - -int spnic_set_hw_rss_parameters(struct net_device *netdev, u8 rss_en, u8 num_tc, u8 *prio_tc); - -void spnic_init_rss_parameters(struct net_device *netdev); - -void spnic_set_default_rss_indir(struct net_device *netdev); - -void spnic_try_to_enable_rss(struct spnic_nic_dev *nic_dev); - -void spnic_clear_rss_config(struct spnic_nic_dev *nic_dev); - -void spnic_flush_rx_flow_rule(struct spnic_nic_dev *nic_dev); -int spnic_ethtool_get_flow(struct spnic_nic_dev *nic_dev, struct ethtool_rxnfc *info, u32 location); - -int spnic_ethtool_get_all_flows(struct spnic_nic_dev *nic_dev, - struct ethtool_rxnfc *info, u32 *rule_locs); - -int spnic_ethtool_flow_remove(struct spnic_nic_dev *nic_dev, u32 location); - -int spnic_ethtool_flow_replace(struct spnic_nic_dev *nic_dev, struct ethtool_rx_flow_spec *fs); - -/* for ethtool */ -int spnic_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, u32 *rule_locs); - -int spnic_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd); - -void spnic_get_channels(struct net_device *netdev, struct ethtool_channels *channels); - -int spnic_set_channels(struct net_device *netdev, struct ethtool_channels *channels); - -u32 spnic_get_rxfh_indir_size(struct net_device *netdev); - -u32 spnic_get_rxfh_key_size(struct net_device *netdev); - -int spnic_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc); - -int spnic_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key, const u8 hfunc); - -#endif diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_rss_cfg.c b/drivers/net/ethernet/ramaxel/spnic/spnic_rss_cfg.c deleted file mode 100644 index 12da3aa5940091538ddec11711cb2ae79167e836..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/spnic_rss_cfg.c +++ /dev/null @@ -1,333 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt - -#include -#include -#include -#include -#include -#include -#include -#include - -#include "sphw_crm.h" -#include "spnic_nic_cfg.h" -#include "sphw_hw.h" -#include "spnic_nic.h" -#include "sphw_common.h" - -static int spnic_rss_cfg_hash_key(struct spnic_nic_cfg *nic_cfg, u8 opcode, u8 *key) -{ - struct spnic_cmd_rss_hash_key hash_key; - u16 out_size = sizeof(hash_key); - int err; - - memset(&hash_key, 0, sizeof(struct spnic_cmd_rss_hash_key)); - hash_key.func_id = sphw_global_func_id(nic_cfg->hwdev); - hash_key.opcode = opcode; - - if (opcode == SPNIC_CMD_OP_SET) - memcpy(hash_key.key, key, SPNIC_RSS_KEY_SIZE); - - err = l2nic_msg_to_mgmt_sync(nic_cfg->hwdev, - SPNIC_NIC_CMD_CFG_RSS_HASH_KEY, - &hash_key, sizeof(hash_key), - &hash_key, &out_size); - if (err || !out_size || hash_key.msg_head.status) { - nic_err(nic_cfg->dev_hdl, "Failed to %s hash key, err: %d, status: 0x%x, out size: 0x%x\n", - opcode == SPNIC_CMD_OP_SET ? "set" : "get", - err, hash_key.msg_head.status, out_size); - return -EINVAL; - } - - if (opcode == SPNIC_CMD_OP_GET) - memcpy(key, hash_key.key, SPNIC_RSS_KEY_SIZE); - - return 0; -} - -int spnic_rss_set_hash_key(void *hwdev, const u8 *key) -{ - struct spnic_nic_cfg *nic_cfg = NULL; - u8 hash_key[SPNIC_RSS_KEY_SIZE]; - - if (!hwdev || !key) - return -EINVAL; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - memcpy(hash_key, key, SPNIC_RSS_KEY_SIZE); - return spnic_rss_cfg_hash_key(nic_cfg, SPNIC_CMD_OP_SET, hash_key); -} - -int spnic_rss_get_hash_key(void *hwdev, u8 *key) -{ - struct spnic_nic_cfg *nic_cfg = NULL; - - if (!hwdev || !key) - return -EINVAL; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - return spnic_rss_cfg_hash_key(nic_cfg, SPNIC_CMD_OP_GET, key); -} - -int spnic_rss_get_indir_tbl(void *hwdev, u32 *indir_table) -{ - struct sphw_cmd_buf *cmd_buf = NULL; - struct spnic_nic_cfg *nic_cfg = NULL; - u16 *indir_tbl = NULL; - int err, i; - - if (!hwdev || !indir_table) - return -EINVAL; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - cmd_buf = sphw_alloc_cmd_buf(hwdev); - if (!cmd_buf) { - nic_err(nic_cfg->dev_hdl, "Failed to allocate cmd_buf.\n"); - return -ENOMEM; - } - - cmd_buf->size = sizeof(struct nic_rss_indirect_tbl); - err = sphw_cmdq_detail_resp(hwdev, SPHW_MOD_L2NIC, SPNIC_UCODE_CMD_GET_RSS_INDIR_TABLE, - cmd_buf, cmd_buf, NULL, 0, SPHW_CHANNEL_NIC); - if (err) { - nic_err(nic_cfg->dev_hdl, "Failed to get rss indir table\n"); - goto get_indir_tbl_failed; - } - - indir_tbl = (u16 *)cmd_buf->buf; - for (i = 0; i < SPNIC_RSS_INDIR_SIZE; i++) - indir_table[i] = *(indir_tbl + i); - -get_indir_tbl_failed: - sphw_free_cmd_buf(hwdev, cmd_buf); - - return err; -} - -int spnic_rss_set_indir_tbl(void *hwdev, const u32 *indir_table) -{ - struct nic_rss_indirect_tbl *indir_tbl = NULL; - struct sphw_cmd_buf *cmd_buf = NULL; - struct spnic_nic_cfg *nic_cfg = NULL; - u32 *temp = NULL; - u32 i, size; - u64 out_param = 0; - int err; - - if (!hwdev || !indir_table) - return -EINVAL; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - cmd_buf = sphw_alloc_cmd_buf(hwdev); - if (!cmd_buf) { - nic_err(nic_cfg->dev_hdl, "Failed to allocate cmd buf\n"); - return -ENOMEM; - } - - cmd_buf->size = sizeof(struct nic_rss_indirect_tbl); - indir_tbl = (struct nic_rss_indirect_tbl *)cmd_buf->buf; - memset(indir_tbl, 0, sizeof(*indir_tbl)); - - for (i = 0; i < SPNIC_RSS_INDIR_SIZE; i++) - indir_tbl->entry[i] = (u16)(*(indir_table + i)); - - size = sizeof(indir_tbl->entry) / sizeof(u32); - temp = (u32 *)indir_tbl->entry; - for (i = 0; i < size; i++) - temp[i] = cpu_to_be32(temp[i]); - - err = sphw_cmdq_direct_resp(hwdev, SPHW_MOD_L2NIC, SPNIC_UCODE_CMD_SET_RSS_INDIR_TABLE, - cmd_buf, &out_param, 0, SPHW_CHANNEL_NIC); - if (err || out_param != 0) { - nic_err(nic_cfg->dev_hdl, "Failed to set rss indir table\n"); - err = -EFAULT; - } - - sphw_free_cmd_buf(hwdev, cmd_buf); - return err; -} - -#define SPNIC_RSS_TYPE_VALID_SHIFT 23 -#define SPNIC_RSS_TYPE_TCP_IPV6_EXT_SHIFT 24 -#define SPNIC_RSS_TYPE_IPV6_EXT_SHIFT 25 -#define SPNIC_RSS_TYPE_TCP_IPV6_SHIFT 26 -#define SPNIC_RSS_TYPE_IPV6_SHIFT 27 -#define SPNIC_RSS_TYPE_TCP_IPV4_SHIFT 28 -#define SPNIC_RSS_TYPE_IPV4_SHIFT 29 -#define SPNIC_RSS_TYPE_UDP_IPV6_SHIFT 30 -#define SPNIC_RSS_TYPE_UDP_IPV4_SHIFT 31 -#define SPNIC_RSS_TYPE_SET(val, member) (((u32)(val) & 0x1) << SPNIC_RSS_TYPE_##member##_SHIFT) - -#define SPNIC_RSS_TYPE_GET(val, member) (((u32)(val) >> SPNIC_RSS_TYPE_##member##_SHIFT) & 0x1) - -int spnic_set_rss_type(void *hwdev, struct nic_rss_type rss_type) -{ - struct nic_rss_context_tbl *ctx_tbl = NULL; - struct sphw_cmd_buf *cmd_buf = NULL; - struct spnic_nic_cfg *nic_cfg = NULL; - u32 ctx = 0; - u64 out_param = 0; - int err; - - if (!hwdev) - return -EINVAL; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - - cmd_buf = sphw_alloc_cmd_buf(hwdev); - if (!cmd_buf) { - nic_err(nic_cfg->dev_hdl, "Failed to allocate cmd buf\n"); - return -ENOMEM; - } - - ctx |= SPNIC_RSS_TYPE_SET(1, VALID) | - SPNIC_RSS_TYPE_SET(rss_type.ipv4, IPV4) | - SPNIC_RSS_TYPE_SET(rss_type.ipv6, IPV6) | - SPNIC_RSS_TYPE_SET(rss_type.ipv6_ext, IPV6_EXT) | - SPNIC_RSS_TYPE_SET(rss_type.tcp_ipv4, TCP_IPV4) | - SPNIC_RSS_TYPE_SET(rss_type.tcp_ipv6, TCP_IPV6) | - SPNIC_RSS_TYPE_SET(rss_type.tcp_ipv6_ext, TCP_IPV6_EXT) | - SPNIC_RSS_TYPE_SET(rss_type.udp_ipv4, UDP_IPV4) | - SPNIC_RSS_TYPE_SET(rss_type.udp_ipv6, UDP_IPV6); - - cmd_buf->size = sizeof(struct nic_rss_context_tbl); - ctx_tbl = (struct nic_rss_context_tbl *)cmd_buf->buf; - memset(ctx_tbl, 0, sizeof(*ctx_tbl)); - ctx_tbl->ctx = cpu_to_be32(ctx); - - /* cfg the rss context table by command queue */ - err = sphw_cmdq_direct_resp(hwdev, SPHW_MOD_L2NIC, SPNIC_UCODE_CMD_SET_RSS_CONTEXT_TABLE, - cmd_buf, &out_param, 0, SPHW_CHANNEL_NIC); - - sphw_free_cmd_buf(hwdev, cmd_buf); - - if (err || out_param != 0) { - nic_err(nic_cfg->dev_hdl, "Failed to set rss context table, err: %d\n", - err); - return -EFAULT; - } - - return 0; -} - -int spnic_get_rss_type(void *hwdev, struct nic_rss_type *rss_type) -{ - struct spnic_rss_context_table ctx_tbl; - u16 out_size = sizeof(ctx_tbl); - struct spnic_nic_cfg *nic_cfg = NULL; - int err; - - if (!hwdev || !rss_type) - return -EINVAL; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - - memset(&ctx_tbl, 0, sizeof(struct spnic_rss_context_table)); - ctx_tbl.func_id = sphw_global_func_id(hwdev); - - err = l2nic_msg_to_mgmt_sync(hwdev, SPNIC_NIC_CMD_GET_RSS_CTX_TBL, - &ctx_tbl, sizeof(ctx_tbl), - &ctx_tbl, &out_size); - if (err || !out_size || ctx_tbl.msg_head.status) { - nic_err(nic_cfg->dev_hdl, "Failed to get hash type, err: %d, status: 0x%x, out size: 0x%x\n", - err, ctx_tbl.msg_head.status, out_size); - return -EINVAL; - } - - rss_type->ipv4 = SPNIC_RSS_TYPE_GET(ctx_tbl.context, IPV4); - rss_type->ipv6 = SPNIC_RSS_TYPE_GET(ctx_tbl.context, IPV6); - rss_type->ipv6_ext = SPNIC_RSS_TYPE_GET(ctx_tbl.context, IPV6_EXT); - rss_type->tcp_ipv4 = SPNIC_RSS_TYPE_GET(ctx_tbl.context, TCP_IPV4); - rss_type->tcp_ipv6 = SPNIC_RSS_TYPE_GET(ctx_tbl.context, TCP_IPV6); - rss_type->tcp_ipv6_ext = SPNIC_RSS_TYPE_GET(ctx_tbl.context, TCP_IPV6_EXT); - rss_type->udp_ipv4 = SPNIC_RSS_TYPE_GET(ctx_tbl.context, UDP_IPV4); - rss_type->udp_ipv6 = SPNIC_RSS_TYPE_GET(ctx_tbl.context, UDP_IPV6); - - return 0; -} - -static int spnic_rss_cfg_hash_engine(struct spnic_nic_cfg *nic_cfg, u8 opcode, u8 *type) -{ - struct spnic_cmd_rss_engine_type hash_type; - u16 out_size = sizeof(hash_type); - int err; - - memset(&hash_type, 0, sizeof(struct spnic_cmd_rss_engine_type)); - - hash_type.func_id = sphw_global_func_id(nic_cfg->hwdev); - hash_type.opcode = opcode; - - if (opcode == SPNIC_CMD_OP_SET) - hash_type.hash_engine = *type; - - err = l2nic_msg_to_mgmt_sync(nic_cfg->hwdev, SPNIC_NIC_CMD_CFG_RSS_HASH_ENGINE, - &hash_type, sizeof(hash_type), - &hash_type, &out_size); - if (err || !out_size || hash_type.msg_head.status) { - nic_err(nic_cfg->dev_hdl, "Failed to %s hash engine, err: %d, status: 0x%x, out size: 0x%x\n", - opcode == SPNIC_CMD_OP_SET ? "set" : "get", - err, hash_type.msg_head.status, out_size); - return -EIO; - } - - if (opcode == SPNIC_CMD_OP_GET) - *type = hash_type.hash_engine; - - return 0; -} - -int spnic_rss_set_hash_engine(void *hwdev, u8 type) -{ - struct spnic_nic_cfg *nic_cfg = NULL; - - if (!hwdev) - return -EINVAL; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - return spnic_rss_cfg_hash_engine(nic_cfg, SPNIC_CMD_OP_SET, &type); -} - -int spnic_rss_get_hash_engine(void *hwdev, u8 *type) -{ - struct spnic_nic_cfg *nic_cfg = NULL; - - if (!hwdev || !type) - return -EINVAL; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - return spnic_rss_cfg_hash_engine(nic_cfg, SPNIC_CMD_OP_GET, type); -} - -int spnic_rss_cfg(void *hwdev, u8 rss_en, u8 tc_num, u8 *prio_tc, u16 num_qps) -{ - struct spnic_cmd_rss_config rss_cfg; - u16 out_size = sizeof(rss_cfg); - struct spnic_nic_cfg *nic_cfg = NULL; - int err; - - /* micro code required: number of TC should be power of 2 */ - if (!hwdev || !prio_tc || (tc_num & (tc_num - 1))) - return -EINVAL; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - memset(&rss_cfg, 0, sizeof(struct spnic_cmd_rss_config)); - rss_cfg.func_id = sphw_global_func_id(hwdev); - rss_cfg.rss_en = rss_en; - rss_cfg.rq_priority_number = tc_num ? (u8)ilog2(tc_num) : 0; - rss_cfg.num_qps = num_qps; - - memcpy(rss_cfg.prio_tc, prio_tc, SPNIC_DCB_UP_MAX); - err = l2nic_msg_to_mgmt_sync(hwdev, SPNIC_NIC_CMD_RSS_CFG, - &rss_cfg, sizeof(rss_cfg), - &rss_cfg, &out_size); - if (err || !out_size || rss_cfg.msg_head.status) { - nic_err(nic_cfg->dev_hdl, "Failed to set rss cfg, err: %d, status: 0x%x, out size: 0x%x\n", - err, rss_cfg.msg_head.status, out_size); - return -EINVAL; - } - - return 0; -} diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_rx.c b/drivers/net/ethernet/ramaxel/spnic/spnic_rx.c deleted file mode 100644 index 3ae2f15c727b01ac00dd7ff7269828196a98d0fd..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/spnic_rx.c +++ /dev/null @@ -1,1238 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "sphw_crm.h" -#include "sphw_common.h" -#include "spnic_nic_qp.h" -#include "spnic_nic_io.h" -#include "spnic_nic_dev.h" -#include "spnic_rx.h" -#include "spnic_rss.h" - -static u32 rq_pi_rd_en; -module_param(rq_pi_rd_en, uint, 0644); -MODULE_PARM_DESC(rq_pi_rd_en, "Enable rq read pi from host, defaut update pi by doorbell (default=0)"); - -/* performance: ci addr RTE_CACHE_SIZE(64B) alignment */ -#define SPNIC_RX_HDR_SIZE 256 -#define SPNIC_RX_BUFFER_WRITE 16 - -#define SPNIC_RX_TCP_PKT 0x3 -#define SPNIC_RX_UDP_PKT 0x4 -#define SPNIC_RX_SCTP_PKT 0x7 - -#define SPNIC_RX_IPV4_PKT 0 -#define SPNIC_RX_IPV6_PKT 1 -#define SPNIC_RX_INVALID_IP_TYPE 2 - -#define SPNIC_RX_PKT_FORMAT_NON_TUNNEL 0 -#define SPNIC_RX_PKT_FORMAT_VXLAN 1 - -#define RXQ_STATS_INC(rxq, field) \ -do { \ - u64_stats_update_begin(&(rxq)->rxq_stats.syncp); \ - (rxq)->rxq_stats.field++; \ - u64_stats_update_end(&(rxq)->rxq_stats.syncp); \ -} while (0) - -static bool rx_alloc_mapped_page(struct spnic_nic_dev *nic_dev, - struct spnic_rx_info *rx_info) -{ - struct pci_dev *pdev = nic_dev->pdev; - struct page *page = rx_info->page; - dma_addr_t dma = rx_info->buf_dma_addr; - - if (likely(dma)) - return true; - - /* alloc new page for storage */ - page = alloc_pages_node(NUMA_NO_NODE, GFP_ATOMIC | __GFP_COMP, nic_dev->page_order); - if (unlikely(!page)) - return false; - - /* map page for use */ - dma = dma_map_page(&pdev->dev, page, 0, nic_dev->dma_rx_buff_size, DMA_FROM_DEVICE); - - /* if mapping failed free memory back to system since - * there isn't much point in holding memory we can't use - */ - if (unlikely(dma_mapping_error(&pdev->dev, dma))) { - __free_pages(page, nic_dev->page_order); - return false; - } - - rx_info->page = page; - rx_info->buf_dma_addr = dma; - rx_info->page_offset = 0; - - return true; -} - -static u32 spnic_rx_fill_wqe(struct spnic_rxq *rxq) -{ - struct net_device *netdev = rxq->netdev; - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - struct spnic_rq_wqe *rq_wqe = NULL; - struct spnic_rx_info *rx_info = NULL; - u32 i; - - for (i = 0; i < rxq->q_depth; i++) { - rx_info = &rxq->rx_info[i]; - rq_wqe = spnic_rq_wqe_addr(rxq->rq, (u16)i); - - if (rxq->rq->wqe_type == SPNIC_EXTEND_RQ_WQE) { - /* unit of cqe length is 16B */ - sphw_set_sge(&rq_wqe->extend_wqe.cqe_sect.sge, rx_info->cqe_dma, - (sizeof(struct spnic_rq_cqe) >> SPNIC_CQE_SIZE_SHIFT)); - /* use fixed len */ - rq_wqe->extend_wqe.buf_desc.sge.len = - nic_dev->rx_buff_len; - } else { - rq_wqe->normal_wqe.cqe_hi_addr = upper_32_bits(rx_info->cqe_dma); - rq_wqe->normal_wqe.cqe_lo_addr = lower_32_bits(rx_info->cqe_dma); - } - - rx_info->rq_wqe = rq_wqe; - } - - return i; -} - -static struct sk_buff *stub_rx_alloc_skb(struct spnic_rxq *rxq, struct spnic_rx_info *rx_info) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(rxq->netdev); - struct sk_buff *skb = NULL; - u16 random_id; - dma_addr_t addr; - int err; - - get_random_bytes(&random_id, sizeof(u16)); - - rx_info->skb_len = SPNIC_RX_HDR_SIZE + (random_id & 0x3EFF); - skb = netdev_alloc_skb_ip_align(rxq->netdev, rx_info->skb_len); - if (!skb) { - nicif_err(nic_dev, drv, rxq->netdev, "Failed to allocate Rx SKB\n"); - return NULL; - } - - addr = dma_map_single(&nic_dev->pdev->dev, skb->data, rx_info->skb_len, - DMA_FROM_DEVICE); - - err = dma_mapping_error(&nic_dev->pdev->dev, addr); - if (err) { - nicif_err(nic_dev, drv, rxq->netdev, "Failed to map Rx DMA, err = %d\n", err); - goto err_rx_map; - } - - rx_info->buf_dma_addr = addr; - - return skb; - -err_rx_map: - dev_kfree_skb_any(skb); - return NULL; -} - -static u32 stub_spnic_rx_fill_buffers(struct spnic_rxq *rxq) -{ - struct net_device *netdev = rxq->netdev; - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - u32 i, free_wqebbs = rxq->delta - 1; - struct spnic_rq_wqe *rq_wqe = NULL; - struct spnic_rx_info *rx_info = NULL; - struct sk_buff *skb = NULL; - dma_addr_t dma_addr; - - for (i = 0; i < free_wqebbs; i++) { - rx_info = &rxq->rx_info[rxq->next_to_update]; - - skb = stub_rx_alloc_skb(rxq, rx_info); - if (!skb) { - nicif_err(nic_dev, drv, rxq->netdev, "Failed to alloc Rx skb\n"); - break; - } - - rq_wqe = rx_info->rq_wqe; - rx_info->saved_skb = skb; - dma_addr = rx_info->buf_dma_addr; - - if (rxq->rq->wqe_type == SPNIC_EXTEND_RQ_WQE) { - rq_wqe->extend_wqe.buf_desc.sge.hi_addr = upper_32_bits(dma_addr); - rq_wqe->extend_wqe.buf_desc.sge.lo_addr = lower_32_bits(dma_addr); - rq_wqe->extend_wqe.buf_desc.sge.len = rx_info->skb_len; - } else { - rq_wqe->normal_wqe.buf_hi_addr = upper_32_bits(dma_addr); - rq_wqe->normal_wqe.buf_lo_addr = lower_32_bits(dma_addr); - } - rxq->next_to_update = (rxq->next_to_update + 1) & rxq->q_mask; - } - - if (likely(i)) { - if (!rq_pi_rd_en) { - spnic_write_db(rxq->rq, rxq->q_id & (SPNIC_DCB_COS_MAX - 1), RQ_CFLAG_DP, - (u16)((u32)rxq->next_to_update << rxq->rq->wqe_type)); - } else { - /* Write all the wqes before pi update */ - wmb(); - - spnic_update_rq_hw_pi(rxq->rq, rxq->next_to_update); - } - rxq->delta -= i; - rxq->next_to_alloc = rxq->next_to_update; - } else { - nicif_err(nic_dev, drv, netdev, "Failed to allocate rx buffers, rxq id: %u\n", - rxq->q_id); - } - - return i; -} - -static u32 spnic_rx_fill_buffers(struct spnic_rxq *rxq) -{ - struct net_device *netdev = rxq->netdev; - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - struct spnic_rq_wqe *rq_wqe = NULL; - struct spnic_rx_info *rx_info = NULL; - dma_addr_t dma_addr; - u32 i, free_wqebbs = rxq->delta - 1; - - for (i = 0; i < free_wqebbs; i++) { - rx_info = &rxq->rx_info[rxq->next_to_update]; - - if (unlikely(!rx_alloc_mapped_page(nic_dev, rx_info))) { - RXQ_STATS_INC(rxq, alloc_rx_buf_err); - break; - } - - dma_addr = rx_info->buf_dma_addr + rx_info->page_offset; - - rq_wqe = rx_info->rq_wqe; - - if (rxq->rq->wqe_type == SPNIC_EXTEND_RQ_WQE) { - rq_wqe->extend_wqe.buf_desc.sge.hi_addr = upper_32_bits(dma_addr); - rq_wqe->extend_wqe.buf_desc.sge.lo_addr = lower_32_bits(dma_addr); - } else { - rq_wqe->normal_wqe.buf_hi_addr = upper_32_bits(dma_addr); - rq_wqe->normal_wqe.buf_lo_addr = lower_32_bits(dma_addr); - } - rxq->next_to_update = (rxq->next_to_update + 1) & rxq->q_mask; - } - - if (likely(i)) { - if (!rq_pi_rd_en) { - spnic_write_db(rxq->rq, rxq->q_id & (SPNIC_DCB_COS_MAX - 1), RQ_CFLAG_DP, - (u16)((u32)rxq->next_to_update << rxq->rq->wqe_type)); - } else { - /* Write all the wqes before pi update */ - wmb(); - - spnic_update_rq_hw_pi(rxq->rq, rxq->next_to_update); - } - rxq->delta -= i; - rxq->next_to_alloc = rxq->next_to_update; - } else if (free_wqebbs == rxq->q_depth - 1) { - RXQ_STATS_INC(rxq, rx_buf_empty); - } - - return i; -} - -static u32 spnic_rx_alloc_buffers(struct spnic_nic_dev *nic_dev, u32 rq_depth, - struct spnic_rx_info *rx_info_arr) -{ - u32 free_wqebbs = rq_depth - 1; - u32 idx; - - for (idx = 0; idx < free_wqebbs; idx++) { - if (!rx_alloc_mapped_page(nic_dev, &rx_info_arr[idx])) - break; - } - - return idx; -} - -void spnic_rx_free_buffers(struct spnic_nic_dev *nic_dev, u32 q_depth, - struct spnic_rx_info *rx_info_arr) -{ - struct spnic_rx_info *rx_info = NULL; - u32 i; - - /* Free all the Rx ring sk_buffs */ - for (i = 0; i < q_depth; i++) { - rx_info = &rx_info_arr[i]; - - if (rx_info->buf_dma_addr) { - dma_unmap_page(&nic_dev->pdev->dev, rx_info->buf_dma_addr, - nic_dev->dma_rx_buff_size, DMA_FROM_DEVICE); - rx_info->buf_dma_addr = 0; - } - - if (rx_info->page) { - __free_pages(rx_info->page, nic_dev->page_order); - rx_info->page = NULL; - } - } -} - -void stub_spnic_rx_free_buffers(struct spnic_rxq *rxq) -{ - struct spnic_rx_info *rx_info = NULL; - u32 i; - - /* Free all the Rx ring sk_buffs */ - for (i = 0; i < rxq->q_depth; i++) { - rx_info = &rxq->rx_info[i]; - - if (rx_info->buf_dma_addr) { - dma_unmap_page(rxq->dev, rx_info->buf_dma_addr, rx_info->skb_len, - DMA_FROM_DEVICE); - rx_info->buf_dma_addr = 0; - } - - if (rx_info->saved_skb) { - dev_kfree_skb_any(rx_info->saved_skb); - rx_info->saved_skb = NULL; - } - } -} - -static void spnic_reuse_rx_page(struct spnic_rxq *rxq, struct spnic_rx_info *old_rx_info) -{ - struct spnic_rx_info *new_rx_info; - u16 nta = rxq->next_to_alloc; - - new_rx_info = &rxq->rx_info[nta]; - - /* update, and store next to alloc */ - nta++; - rxq->next_to_alloc = (nta < rxq->q_depth) ? nta : 0; - - new_rx_info->page = old_rx_info->page; - new_rx_info->page_offset = old_rx_info->page_offset; - new_rx_info->buf_dma_addr = old_rx_info->buf_dma_addr; - - /* sync the buffer for use by the device */ - dma_sync_single_range_for_device(rxq->dev, new_rx_info->buf_dma_addr, - new_rx_info->page_offset, rxq->buf_len, - DMA_FROM_DEVICE); -} - -static bool spnic_add_rx_frag(struct spnic_rxq *rxq, struct spnic_rx_info *rx_info, - struct sk_buff *skb, u32 size) -{ - struct page *page; - u8 *va; - - page = rx_info->page; - va = (u8 *)page_address(page) + rx_info->page_offset; - prefetch(va); -#if L1_CACHE_BYTES < 128 - prefetch(va + L1_CACHE_BYTES); -#endif - - dma_sync_single_range_for_cpu(rxq->dev, rx_info->buf_dma_addr, - rx_info->page_offset, - rxq->buf_len, DMA_FROM_DEVICE); - - if (size <= SPNIC_RX_HDR_SIZE && !skb_is_nonlinear(skb)) { - memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); - - /* page is not reserved, we can reuse buffer as-is */ - if (likely(page_to_nid(page) == numa_node_id())) - return true; - - /* this page cannot be reused so discard it */ - put_page(page); - return false; - } - - skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, - (int)rx_info->page_offset, (int)size, rxq->buf_len); - - /* avoid re-using remote pages */ - if (unlikely(page_to_nid(page) != numa_node_id())) - return false; - - /* if we are only owner of page we can reuse it */ - if (unlikely(page_count(page) != 1)) - return false; - - /* flip page offset to other buffer */ - rx_info->page_offset ^= rxq->buf_len; - get_page(page); - - return true; -} - -static void packaging_skb(struct spnic_rxq *rxq, struct sk_buff *head_skb, u8 sge_num, u32 pkt_len) -{ - struct spnic_rx_info *rx_info = NULL; - struct sk_buff *skb = NULL; - u8 frag_num = 0; - u32 size; - u32 sw_ci; - - sw_ci = rxq->cons_idx & rxq->q_mask; - skb = head_skb; - while (sge_num) { - rx_info = &rxq->rx_info[sw_ci]; - sw_ci = (sw_ci + 1) & rxq->q_mask; - if (unlikely(pkt_len > rxq->buf_len)) { - size = rxq->buf_len; - pkt_len -= rxq->buf_len; - } else { - size = pkt_len; - } - - if (unlikely(frag_num == MAX_SKB_FRAGS)) { - frag_num = 0; - if (skb == head_skb) - skb = skb_shinfo(skb)->frag_list; - else - skb = skb->next; - } - - if (unlikely(skb != head_skb)) { - head_skb->len += size; - head_skb->data_len += size; - head_skb->truesize += rxq->buf_len; - } - - if (likely(spnic_add_rx_frag(rxq, rx_info, skb, size))) { - spnic_reuse_rx_page(rxq, rx_info); - } else { - /* we are not reusing the buffer so unmap it */ - dma_unmap_page(rxq->dev, rx_info->buf_dma_addr, - rxq->dma_rx_buff_size, DMA_FROM_DEVICE); - } - /* clear contents of buffer_info */ - rx_info->buf_dma_addr = 0; - rx_info->page = NULL; - sge_num--; - frag_num++; - } -} - -#define SPNIC_GET_SGE_NUM(pkt_len, rxq) \ - ((u8)(((pkt_len) >> (rxq)->rx_buff_shift) + \ - (((pkt_len) & ((rxq)->buf_len - 1)) ? 1 : 0))) - -static struct sk_buff *spnic_fetch_rx_buffer(struct spnic_rxq *rxq, u32 pkt_len) -{ - struct sk_buff *head_skb = NULL; - struct sk_buff *cur_skb = NULL; - struct sk_buff *skb = NULL; - struct net_device *netdev = rxq->netdev; - u8 sge_num, skb_num; - u16 wqebb_cnt = 0; - - head_skb = netdev_alloc_skb_ip_align(netdev, SPNIC_RX_HDR_SIZE); - if (unlikely(!head_skb)) - return NULL; - - sge_num = SPNIC_GET_SGE_NUM(pkt_len, rxq); - if (likely(sge_num <= MAX_SKB_FRAGS)) - skb_num = 1; - else - skb_num = (sge_num / MAX_SKB_FRAGS) + ((sge_num % MAX_SKB_FRAGS) ? 1 : 0); - - while (unlikely(skb_num > 1)) { - cur_skb = netdev_alloc_skb_ip_align(netdev, SPNIC_RX_HDR_SIZE); - if (unlikely(!cur_skb)) - goto alloc_skb_fail; - - if (!skb) { - skb_shinfo(head_skb)->frag_list = cur_skb; - skb = cur_skb; - } else { - skb->next = cur_skb; - skb = cur_skb; - } - - skb_num--; - } - - prefetchw(head_skb->data); - wqebb_cnt = sge_num; - - packaging_skb(rxq, head_skb, sge_num, pkt_len); - - rxq->cons_idx += wqebb_cnt; - rxq->delta += wqebb_cnt; - - return head_skb; - -alloc_skb_fail: - dev_kfree_skb_any(head_skb); - return NULL; -} - -void spnic_rxq_get_stats(struct spnic_rxq *rxq, struct spnic_rxq_stats *stats) -{ - struct spnic_rxq_stats *rxq_stats = &rxq->rxq_stats; - unsigned int start; - - u64_stats_update_begin(&stats->syncp); - do { - start = u64_stats_fetch_begin(&rxq_stats->syncp); - stats->bytes = rxq_stats->bytes; - stats->packets = rxq_stats->packets; - stats->errors = rxq_stats->csum_errors + - rxq_stats->other_errors; - stats->csum_errors = rxq_stats->csum_errors; - stats->other_errors = rxq_stats->other_errors; - stats->dropped = rxq_stats->dropped; - stats->xdp_dropped = rxq_stats->xdp_dropped; - stats->rx_buf_empty = rxq_stats->rx_buf_empty; - } while (u64_stats_fetch_retry(&rxq_stats->syncp, start)); - u64_stats_update_end(&stats->syncp); -} - -void spnic_rxq_clean_stats(struct spnic_rxq_stats *rxq_stats) -{ - u64_stats_update_begin(&rxq_stats->syncp); - rxq_stats->bytes = 0; - rxq_stats->packets = 0; - rxq_stats->errors = 0; - rxq_stats->csum_errors = 0; - rxq_stats->other_errors = 0; - rxq_stats->dropped = 0; - rxq_stats->xdp_dropped = 0; - rxq_stats->rx_buf_empty = 0; - - rxq_stats->alloc_skb_err = 0; - rxq_stats->alloc_rx_buf_err = 0; - rxq_stats->xdp_large_pkt = 0; - u64_stats_update_end(&rxq_stats->syncp); -} - -static void rxq_stats_init(struct spnic_rxq *rxq) -{ - struct spnic_rxq_stats *rxq_stats = &rxq->rxq_stats; - - u64_stats_init(&rxq_stats->syncp); - spnic_rxq_clean_stats(rxq_stats); -} - -static void spnic_pull_tail(struct sk_buff *skb) -{ - skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; - unsigned char *va = NULL; - unsigned int pull_len; - - /* it is valid to use page_address instead of kmap since we are - * working with pages allocated out of the lomem pool per - * alloc_page(GFP_ATOMIC) - */ - va = skb_frag_address(frag); - - /* we need the header to contain the greater of either ETH_HLEN or - * 60 bytes if the skb->len is less than 60 for skb_pad. - */ - pull_len = eth_get_headlen(skb->dev, va, SPNIC_RX_HDR_SIZE); - - /* align pull length to size of long to optimize memcpy performance */ - skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long))); - - /* update all of the pointers */ - skb_frag_size_sub(frag, (int)pull_len); - skb_frag_off_add(frag, (int)pull_len); - - skb->data_len -= pull_len; - skb->tail += pull_len; -} - -static void spnic_rx_csum(struct spnic_rxq *rxq, u32 offload_type, u32 status, struct sk_buff *skb) -{ - struct net_device *netdev = rxq->netdev; - u32 pkt_type = SPNIC_GET_RX_PKT_TYPE(offload_type); - u32 ip_type = SPNIC_GET_RX_IP_TYPE(offload_type); - u32 pkt_fmt = SPNIC_GET_RX_TUNNEL_PKT_FORMAT(offload_type); - - u32 csum_err; - - csum_err = SPNIC_GET_RX_CSUM_ERR(status); - - if (unlikely(csum_err == SPNIC_RX_CSUM_IPSU_OTHER_ERR)) - rxq->rxq_stats.other_errors++; - - if (!(netdev->features & NETIF_F_RXCSUM)) - return; - - if (unlikely(csum_err)) { - /* pkt type is recognized by HW, and csum is wrong */ - if (!(csum_err & (SPNIC_RX_CSUM_HW_CHECK_NONE | SPNIC_RX_CSUM_IPSU_OTHER_ERR))) - rxq->rxq_stats.csum_errors++; - skb->ip_summed = CHECKSUM_NONE; - return; - } - - if (ip_type == SPNIC_RX_INVALID_IP_TYPE || - !(pkt_fmt == SPNIC_RX_PKT_FORMAT_NON_TUNNEL || - pkt_fmt == SPNIC_RX_PKT_FORMAT_VXLAN)) { - skb->ip_summed = CHECKSUM_NONE; - return; - } - - switch (pkt_type) { - case SPNIC_RX_TCP_PKT: - case SPNIC_RX_UDP_PKT: - case SPNIC_RX_SCTP_PKT: - skb->ip_summed = CHECKSUM_UNNECESSARY; - break; - default: - skb->ip_summed = CHECKSUM_NONE; - break; - } -} - -static void spnic_rx_gro(struct spnic_rxq *rxq, u32 offload_type, struct sk_buff *skb) -{ - struct net_device *netdev = rxq->netdev; - bool l2_tunnel = false; - - if (!(netdev->features & NETIF_F_GRO)) - return; - - l2_tunnel = - SPNIC_GET_RX_TUNNEL_PKT_FORMAT(offload_type) == SPNIC_RX_PKT_FORMAT_VXLAN ? 1 : 0; - - if (l2_tunnel && skb->ip_summed == CHECKSUM_UNNECESSARY) - /* If we checked the outer header let the stack know */ - skb->csum_level = 1; -} - -static void spnic_copy_lp_data(struct spnic_nic_dev *nic_dev, struct sk_buff *skb) -{ - struct net_device *netdev = nic_dev->netdev; - u8 *lb_buf = nic_dev->lb_test_rx_buf; - void *frag_data = NULL; - int lb_len = nic_dev->lb_pkt_len; - int pkt_offset, frag_len, i; - - if (nic_dev->lb_test_rx_idx == LP_PKT_CNT) { - nic_dev->lb_test_rx_idx = 0; - nicif_warn(nic_dev, rx_err, netdev, "Loopback test warning, receive too many test pkts\n"); - } - - if (skb->len != nic_dev->lb_pkt_len) { - nicif_warn(nic_dev, rx_err, netdev, "Wrong packet length\n"); - nic_dev->lb_test_rx_idx++; - return; - } - - pkt_offset = nic_dev->lb_test_rx_idx * lb_len; - frag_len = (int)skb_headlen(skb); - memcpy(lb_buf + pkt_offset, skb->data, frag_len); - - pkt_offset += frag_len; - for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { - frag_data = skb_frag_address(&skb_shinfo(skb)->frags[i]); - frag_len = (int)skb_frag_size(&skb_shinfo(skb)->frags[i]); - memcpy(lb_buf + pkt_offset, frag_data, frag_len); - - pkt_offset += frag_len; - } - nic_dev->lb_test_rx_idx++; -} - -static inline void spnic_lro_set_gso_params(struct sk_buff *skb, u16 num_lro) -{ - struct ethhdr *eth = (struct ethhdr *)(skb->data); - __be16 proto; - - proto = __vlan_get_protocol(skb, eth->h_proto, NULL); - - skb_shinfo(skb)->gso_size = (u16)DIV_ROUND_UP((skb->len - skb_headlen(skb)), num_lro); - skb_shinfo(skb)->gso_type = (proto == htons(ETH_P_IP)) ? SKB_GSO_TCPV4 : SKB_GSO_TCPV6; -} - -enum spnic_xdp_pkt { - SPNIC_XDP_PKT_PASS, - SPNIC_XDP_PKT_DROP, -}; - -static inline void update_drop_rx_info(struct spnic_rxq *rxq, u16 weqbb_num) -{ - struct spnic_rx_info *rx_info = NULL; - - while (weqbb_num) { - rx_info = &rxq->rx_info[rxq->cons_idx & rxq->q_mask]; - if (likely(page_to_nid(rx_info->page) == numa_node_id())) - spnic_reuse_rx_page(rxq, rx_info); - - rx_info->buf_dma_addr = 0; - rx_info->page = NULL; - rxq->cons_idx++; - rxq->delta++; - - weqbb_num--; - } -} - -int spnic_run_xdp(struct spnic_rxq *rxq, u32 pkt_len) -{ - struct bpf_prog *xdp_prog = NULL; - struct spnic_rx_info *rx_info = NULL; - struct xdp_buff xdp; - int result = SPNIC_XDP_PKT_PASS; - u16 weqbb_num = 1; /* xdp can only use one rx_buff */ - u8 *va = NULL; - u32 act; - - rcu_read_lock(); - xdp_prog = READ_ONCE(rxq->xdp_prog); - if (!xdp_prog) - goto unlock_rcu; - - if (unlikely(pkt_len > rxq->buf_len)) { - RXQ_STATS_INC(rxq, xdp_large_pkt); - weqbb_num = (u16)(pkt_len >> rxq->rx_buff_shift) + - ((pkt_len & (rxq->buf_len - 1)) ? 1 : 0); - result = SPNIC_XDP_PKT_DROP; - goto xdp_out; - } - - rx_info = &rxq->rx_info[rxq->cons_idx & rxq->q_mask]; - va = (u8 *)page_address(rx_info->page) + rx_info->page_offset; - prefetch(va); - dma_sync_single_range_for_cpu(rxq->dev, rx_info->buf_dma_addr, rx_info->page_offset, - rxq->buf_len, DMA_FROM_DEVICE); - xdp.data = va; - xdp.data_hard_start = xdp.data; - xdp.data_end = xdp.data + pkt_len; - xdp.frame_sz = rxq->buf_len; - xdp_set_data_meta_invalid(&xdp); - prefetchw(xdp.data_hard_start); - act = bpf_prog_run_xdp(xdp_prog, &xdp); - switch (act) { - case XDP_PASS: - break; - case XDP_DROP: - result = SPNIC_XDP_PKT_DROP; - break; - default: - result = SPNIC_XDP_PKT_DROP; - bpf_warn_invalid_xdp_action(act); - } - -xdp_out: - if (result == SPNIC_XDP_PKT_DROP) { - RXQ_STATS_INC(rxq, xdp_dropped); - update_drop_rx_info(rxq, weqbb_num); - } - -unlock_rcu: - rcu_read_unlock(); - - return result; -} - -int recv_one_pkt(struct spnic_rxq *rxq, struct spnic_rq_cqe *rx_cqe, - u32 pkt_len, u32 vlan_len, u32 status) -{ - struct sk_buff *skb; - struct net_device *netdev = rxq->netdev; - u32 offload_type; - u16 num_lro; - struct spnic_nic_dev *nic_dev = netdev_priv(rxq->netdev); - - u32 xdp_status; - - xdp_status = spnic_run_xdp(rxq, pkt_len); - if (xdp_status == SPNIC_XDP_PKT_DROP) - return 0; - - skb = spnic_fetch_rx_buffer(rxq, pkt_len); - if (unlikely(!skb)) { - RXQ_STATS_INC(rxq, alloc_skb_err); - return -ENOMEM; - } - - /* place header in linear portion of buffer */ - if (skb_is_nonlinear(skb)) - spnic_pull_tail(skb); - - offload_type = rx_cqe->offload_type; - spnic_rx_csum(rxq, offload_type, status, skb); - - spnic_rx_gro(rxq, offload_type, skb); - - if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && - SPNIC_GET_RX_VLAN_OFFLOAD_EN(offload_type)) { - u16 vid = SPNIC_GET_RX_VLAN_TAG(vlan_len); - - /* if the packet is a vlan pkt, the vid may be 0 */ - __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); - } - - if (unlikely(test_bit(SPNIC_LP_TEST, &nic_dev->flags))) - spnic_copy_lp_data(nic_dev, skb); - - num_lro = SPNIC_GET_RX_NUM_LRO(status); - if (num_lro) - spnic_lro_set_gso_params(skb, num_lro); - - skb_record_rx_queue(skb, rxq->q_id); - skb->protocol = eth_type_trans(skb, netdev); - - if (skb_has_frag_list(skb)) { - napi_gro_flush(&rxq->irq_cfg->napi, false); - netif_receive_skb(skb); - } else { - napi_gro_receive(&rxq->irq_cfg->napi, skb); - } - - return 0; -} - -void rx_pass_super_cqe(struct spnic_rxq *rxq, u32 index, u32 pkt_num, struct spnic_rq_cqe *cqe) -{ - u8 sge_num = 0; - u32 pkt_len; - - while (index < pkt_num) { - pkt_len = spnic_get_pkt_len_for_super_cqe(cqe, index == (pkt_num - 1)); - sge_num += (u8)(pkt_len >> rxq->rx_buff_shift) + - ((pkt_len & (rxq->buf_len - 1)) ? 1 : 0); - index++; - } - - rxq->cons_idx += sge_num; - rxq->delta += sge_num; -} - -static inline int recv_supper_cqe(struct spnic_rxq *rxq, struct spnic_rq_cqe *rx_cqe, u32 pkt_info, - u32 vlan_len, u32 status, int *pkts, u64 *rx_bytes, u32 *dropped) -{ - u32 pkt_len; - int i, pkt_num = 0; - - pkt_num = SPNIC_GET_RQ_CQE_PKT_NUM(pkt_info); - i = 0; - while (i < pkt_num) { - pkt_len = ((i == (pkt_num - 1)) ? - RQ_CQE_PKT_LEN_GET(pkt_info, LAST_LEN) : - RQ_CQE_PKT_LEN_GET(pkt_info, FIRST_LEN)); - if (unlikely(recv_one_pkt(rxq, rx_cqe, pkt_len, vlan_len, status))) { - if (i) { - rx_pass_super_cqe(rxq, i, pkt_num, rx_cqe); - *dropped += (pkt_num - i); - } - break; - } - - *rx_bytes += pkt_len; - (*pkts)++; - i++; - } - - if (!i) - return -EFAULT; - - return 0; -} - -#define LRO_PKT_HDR_LEN_IPV4 66 -#define LRO_PKT_HDR_LEN_IPV6 86 -#define LRO_PKT_HDR_LEN(cqe) \ - (SPNIC_GET_RX_IP_TYPE((cqe)->offload_type) == \ - SPNIC_RX_IPV6_PKT ? LRO_PKT_HDR_LEN_IPV6 : LRO_PKT_HDR_LEN_IPV4) - -static void stub_rx_recv_jumbo_pkt(struct spnic_rxq *rxq, struct sk_buff *head_skb, - unsigned int left_pkt_len) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(rxq->netdev); - struct sk_buff *skb = NULL; - struct sk_buff *curr_skb = head_skb; - struct spnic_rx_info *rx_info = NULL; - unsigned int curr_len; - - while (left_pkt_len > 0) { - rx_info = &rxq->rx_info[rxq->cons_idx & rxq->q_mask]; - skb = rx_info->saved_skb; - - dma_unmap_single(&nic_dev->pdev->dev, rx_info->buf_dma_addr, - rx_info->skb_len, DMA_FROM_DEVICE); - - rx_info->buf_dma_addr = 0; - rx_info->saved_skb = NULL; - - prefetch(skb->data); - - curr_len = (left_pkt_len > rx_info->skb_len) ? rx_info->skb_len : left_pkt_len; - - left_pkt_len -= curr_len; - - __skb_put(skb, curr_len); - - if (curr_skb == head_skb) - skb_shinfo(head_skb)->frag_list = skb; - else - curr_skb->next = skb; - - head_skb->len += skb->len; - head_skb->data_len += skb->len; - head_skb->truesize += skb->truesize; - - curr_skb = skb; - rxq->cons_idx++; - rxq->delta++; - } -} - -int stub_spnic_rx_poll(struct spnic_rxq *rxq, int budget) -{ - u32 sw_ci, status, pkt_len, vlan_len = 0; - struct spnic_nic_dev *nic_dev = netdev_priv(rxq->netdev); - struct spnic_rq_cqe *rx_cqe = NULL; - u64 rx_bytes = 0; - int pkts = 0; - struct sk_buff *skb = NULL; - struct spnic_rx_info *rx_info = NULL; - u32 offload_type; - - while (likely(pkts < budget)) { - sw_ci = rxq->cons_idx & rxq->q_mask; - rx_info = &rxq->rx_info[sw_ci]; - rx_cqe = rxq->rx_info[sw_ci].cqe; - status = be32_to_cpu(rx_cqe->status); - - if (!SPNIC_GET_RX_DONE(status)) - break; - - /* make sure we read rx_done before packet length */ - rmb(); - - vlan_len = be32_to_cpu(rx_cqe->vlan_len); - pkt_len = SPNIC_GET_RX_PKT_LEN(vlan_len); - skb = rx_info->saved_skb; - - dma_unmap_single(&nic_dev->pdev->dev, rx_info->buf_dma_addr, - rx_info->skb_len, DMA_FROM_DEVICE); - - rx_info->buf_dma_addr = 0; - rx_info->saved_skb = NULL; - - rxq->cons_idx++; - rxq->delta++; - - if (pkt_len <= rx_info->skb_len) { - __skb_put(skb, pkt_len); - } else { - __skb_put(skb, rx_info->skb_len); - stub_rx_recv_jumbo_pkt(rxq, skb, pkt_len - rx_info->skb_len); - } - - offload_type = be32_to_cpu(rx_cqe->offload_type); - spnic_rx_csum(rxq, offload_type, status, skb); - - spnic_rx_gro(rxq, offload_type, skb); - - skb_record_rx_queue(skb, rxq->q_id); - skb->protocol = eth_type_trans(skb, rxq->netdev); - - if (skb_has_frag_list(skb)) { - napi_gro_flush(&rxq->irq_cfg->napi, false); - netif_receive_skb(skb); - } else { - napi_gro_receive(&rxq->irq_cfg->napi, skb); - } - rx_bytes += pkt_len; - pkts++; - rx_cqe->status = 0; - } - - if (rxq->delta >= SPNIC_RX_BUFFER_WRITE) - stub_spnic_rx_fill_buffers(rxq); - - u64_stats_update_begin(&rxq->rxq_stats.syncp); - rxq->rxq_stats.packets += pkts; - rxq->rxq_stats.bytes += rx_bytes; - u64_stats_update_end(&rxq->rxq_stats.syncp); - return pkts; -} - -int spnic_rx_poll(struct spnic_rxq *rxq, int budget) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(rxq->netdev); - u32 sw_ci, status, pkt_len, vlan_len, pkt_info, dropped = 0; - struct spnic_rq_cqe *rx_cqe = NULL; - u64 rx_bytes = 0; - u16 num_lro; - int pkts = 0, nr_pkts = 0; - u16 num_wqe = 0; - - while (likely(pkts < budget)) { - sw_ci = rxq->cons_idx & rxq->q_mask; - rx_cqe = rxq->rx_info[sw_ci].cqe; - status = rx_cqe->status; - - if (!SPNIC_GET_RX_DONE(status)) - break; - - /* make sure we read rx_done before packet length */ - rmb(); - - vlan_len = rx_cqe->vlan_len; - pkt_info = rx_cqe->pkt_info; - pkt_len = SPNIC_GET_RX_PKT_LEN(vlan_len); - - if (unlikely(SPNIC_GET_SUPER_CQE_EN(pkt_info))) { - if (unlikely(recv_supper_cqe(rxq, rx_cqe, pkt_info, vlan_len, status, &pkts, - &rx_bytes, &dropped))) - break; - nr_pkts += (int)SPNIC_GET_RQ_CQE_PKT_NUM(pkt_info); - } else { - if (recv_one_pkt(rxq, rx_cqe, pkt_len, - vlan_len, status)) - break; - rx_bytes += pkt_len; - pkts++; - nr_pkts++; - - num_lro = SPNIC_GET_RX_NUM_LRO(status); - if (num_lro) { - rx_bytes += ((num_lro - 1) * LRO_PKT_HDR_LEN(rx_cqe)); - num_wqe += SPNIC_GET_SGE_NUM(pkt_len, rxq); - } - } - - rx_cqe->status = 0; - - if (num_wqe >= nic_dev->lro_replenish_thld) - break; - } - - if (rxq->delta >= SPNIC_RX_BUFFER_WRITE) - spnic_rx_fill_buffers(rxq); - - u64_stats_update_begin(&rxq->rxq_stats.syncp); - rxq->rxq_stats.packets += nr_pkts; - rxq->rxq_stats.bytes += rx_bytes; - rxq->rxq_stats.dropped += dropped; - u64_stats_update_end(&rxq->rxq_stats.syncp); - return pkts; -} - -int spnic_alloc_rxqs_res(struct spnic_nic_dev *nic_dev, u16 num_rq, - u32 rq_depth, struct spnic_dyna_rxq_res *rxqs_res) -{ - struct spnic_dyna_rxq_res *rqres = NULL; - u64 cqe_mem_size = sizeof(struct spnic_rq_cqe) * rq_depth; - int idx, i; - u32 pkts; - u64 size; - - for (idx = 0; idx < num_rq; idx++) { - rqres = &rxqs_res[idx]; - size = sizeof(*rqres->rx_info) * rq_depth; - rqres->rx_info = kzalloc(size, GFP_KERNEL); - if (!rqres->rx_info) { - nicif_err(nic_dev, drv, nic_dev->netdev, - "Failed to alloc rxq%d rx info\n", idx); - goto err_out; - } - - rqres->cqe_start_vaddr = - dma_alloc_coherent(&nic_dev->pdev->dev, cqe_mem_size, - &rqres->cqe_start_paddr, GFP_KERNEL); - if (!rqres->cqe_start_vaddr) { - kfree(rqres->rx_info); - nicif_err(nic_dev, drv, nic_dev->netdev, - "Failed to alloc rxq%d cqe\n", idx); - goto err_out; - } - - pkts = spnic_rx_alloc_buffers(nic_dev, rq_depth, rqres->rx_info); - if (!pkts) { - dma_free_coherent(&nic_dev->pdev->dev, cqe_mem_size, - rqres->cqe_start_vaddr, - rqres->cqe_start_paddr); - kfree(rqres->rx_info); - nicif_err(nic_dev, drv, nic_dev->netdev, - "Failed to alloc rxq%d rx buffers\n", idx); - goto err_out; - } - rqres->next_to_alloc = (u16)pkts; - } - return 0; - -err_out: - for (i = 0; i < idx; i++) { - rqres = &rxqs_res[i]; - - spnic_rx_free_buffers(nic_dev, rq_depth, rqres->rx_info); - dma_free_coherent(&nic_dev->pdev->dev, cqe_mem_size, - rqres->cqe_start_vaddr, rqres->cqe_start_paddr); - kfree(rqres->rx_info); - } - - return -ENOMEM; -} - -void spnic_free_rxqs_res(struct spnic_nic_dev *nic_dev, u16 num_rq, - u32 rq_depth, struct spnic_dyna_rxq_res *rxqs_res) -{ - struct spnic_dyna_rxq_res *rqres = NULL; - u64 cqe_mem_size = sizeof(struct spnic_rq_cqe) * rq_depth; - int idx; - - for (idx = 0; idx < num_rq; idx++) { - rqres = &rxqs_res[idx]; - - spnic_rx_free_buffers(nic_dev, rq_depth, rqres->rx_info); - dma_free_coherent(&nic_dev->pdev->dev, cqe_mem_size, rqres->cqe_start_vaddr, - rqres->cqe_start_paddr); - kfree(rqres->rx_info); - } -} - -int spnic_configure_rxqs(struct spnic_nic_dev *nic_dev, u16 num_rq, - u32 rq_depth, struct spnic_dyna_rxq_res *rxqs_res) -{ - struct spnic_dyna_rxq_res *rqres = NULL; - struct irq_info *msix_entry = NULL; - struct spnic_rxq *rxq = NULL; - struct spnic_rq_cqe *cqe_va = NULL; - dma_addr_t cqe_pa; - u16 q_id; - u32 idx; - u32 pkts; - - for (q_id = 0; q_id < num_rq; q_id++) { - rxq = &nic_dev->rxqs[q_id]; - rqres = &rxqs_res[q_id]; - msix_entry = &nic_dev->qps_irq_info[q_id]; - - rxq->irq_id = msix_entry->irq_id; - rxq->msix_entry_idx = msix_entry->msix_entry_idx; - rxq->next_to_update = 0; - rxq->next_to_alloc = rqres->next_to_alloc; - rxq->q_depth = rq_depth; - rxq->delta = rxq->q_depth; - rxq->q_mask = rxq->q_depth - 1; - rxq->cons_idx = 0; - - rxq->rx_info = rqres->rx_info; - - /* fill cqe */ - cqe_va = (struct spnic_rq_cqe *)rqres->cqe_start_vaddr; - cqe_pa = rqres->cqe_start_paddr; - for (idx = 0; idx < rq_depth; idx++) { - rxq->rx_info[idx].cqe = cqe_va; - rxq->rx_info[idx].cqe_dma = cqe_pa; - cqe_va++; - cqe_pa += sizeof(*rxq->rx_info->cqe); - } - - rxq->rq = spnic_get_nic_queue(nic_dev->hwdev, rxq->q_id, SPNIC_RQ); - if (!rxq->rq) { - nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to get rq\n"); - return -EINVAL; - } - - pkts = spnic_rx_fill_wqe(rxq); - if (pkts != rxq->q_depth) { - nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to fill rx wqe\n"); - return -EFAULT; - } - - pkts = spnic_rx_fill_buffers(rxq); - if (!pkts) { - nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to fill Rx buffer\n"); - return -ENOMEM; - } - } - - return 0; -} - -void spnic_free_rxqs(struct net_device *netdev) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - - kfree(nic_dev->rxqs); -} - -int spnic_alloc_rxqs(struct net_device *netdev) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - struct pci_dev *pdev = nic_dev->pdev; - struct spnic_rxq *rxq = NULL; - u16 num_rxqs = nic_dev->max_qps; - u16 q_id; - u64 rxq_size; - - rxq_size = num_rxqs * sizeof(*nic_dev->rxqs); - if (!rxq_size) { - nic_err(&pdev->dev, "Cannot allocate zero size rxqs\n"); - return -EINVAL; - } - - nic_dev->rxqs = kzalloc(rxq_size, GFP_KERNEL); - if (!nic_dev->rxqs) { - nic_err(&pdev->dev, "Failed to allocate rxqs\n"); - return -ENOMEM; - } - - for (q_id = 0; q_id < num_rxqs; q_id++) { - rxq = &nic_dev->rxqs[q_id]; - rxq->netdev = netdev; - rxq->dev = &pdev->dev; - rxq->q_id = q_id; - rxq->buf_len = nic_dev->rx_buff_len; - rxq->rx_buff_shift = ilog2(nic_dev->rx_buff_len); - rxq->dma_rx_buff_size = nic_dev->dma_rx_buff_size; - rxq->q_depth = nic_dev->q_params.rq_depth; - rxq->q_mask = nic_dev->q_params.rq_depth - 1; - - rxq_stats_init(rxq); - } - - return 0; -} - -int spnic_rx_configure(struct net_device *netdev) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - int err; - - if (test_bit(SPNIC_RSS_ENABLE, &nic_dev->flags)) { - err = spnic_rss_init(nic_dev); - if (err) { - nicif_err(nic_dev, drv, netdev, "Failed to init rss\n"); - return -EFAULT; - } - } - - return 0; -} - -void spnic_rx_remove_configure(struct net_device *netdev) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - - if (test_bit(SPNIC_RSS_ENABLE, &nic_dev->flags)) - spnic_rss_deinit(nic_dev); -} diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_rx.h b/drivers/net/ethernet/ramaxel/spnic/spnic_rx.h deleted file mode 100644 index 564b5765a41b3fc28ece2c351ede194e14dcbfa2..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/spnic_rx.h +++ /dev/null @@ -1,118 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPNIC_RX_H -#define SPNIC_RX_H - -#include - -/*rx cqe checksum err*/ -#define SPNIC_RX_CSUM_IP_CSUM_ERR BIT(0) -#define SPNIC_RX_CSUM_TCP_CSUM_ERR BIT(1) -#define SPNIC_RX_CSUM_UDP_CSUM_ERR BIT(2) -#define SPNIC_RX_CSUM_IGMP_CSUM_ERR BIT(3) -#define SPNIC_RX_CSUM_ICMPV4_CSUM_ERR BIT(4) -#define SPNIC_RX_CSUM_ICMPV6_CSUM_ERR BIT(5) -#define SPNIC_RX_CSUM_SCTP_CRC_ERR BIT(6) -#define SPNIC_RX_CSUM_HW_CHECK_NONE BIT(7) -#define SPNIC_RX_CSUM_IPSU_OTHER_ERR BIT(8) - -#define SPNIC_HEADER_DATA_UNIT 2 - -struct spnic_rxq_stats { - u64 packets; - u64 bytes; - u64 errors; - u64 csum_errors; - u64 other_errors; - u64 dropped; - u64 xdp_dropped; - u64 rx_buf_empty; - - u64 alloc_skb_err; - u64 alloc_rx_buf_err; - u64 xdp_large_pkt; - struct u64_stats_sync syncp; -}; - -struct spnic_rx_info { - dma_addr_t buf_dma_addr; - - struct spnic_rq_cqe *cqe; - dma_addr_t cqe_dma; - struct page *page; - u32 page_offset; - struct spnic_rq_wqe *rq_wqe; - struct sk_buff *saved_skb; - u32 skb_len; -}; - -struct spnic_rxq { - struct net_device *netdev; - - u16 q_id; - u32 q_depth; - u32 q_mask; - - u16 buf_len; - u32 rx_buff_shift; - u32 dma_rx_buff_size; - - struct spnic_rxq_stats rxq_stats; - u32 cons_idx; - u32 delta; - - u32 irq_id; - u16 msix_entry_idx; - - struct spnic_rx_info *rx_info; - struct spnic_io_queue *rq; - struct bpf_prog *xdp_prog; - - struct spnic_irq *irq_cfg; - u16 next_to_alloc; - u16 next_to_update; - struct device *dev; /* device for DMA mapping */ - - unsigned long status; - dma_addr_t cqe_start_paddr; - void *cqe_start_vaddr; - - u64 last_moder_packets; - u64 last_moder_bytes; - u8 last_coalesc_timer_cfg; - u8 last_pending_limt; -} ____cacheline_aligned; - -struct spnic_dyna_rxq_res { - u16 next_to_alloc; - struct spnic_rx_info *rx_info; - dma_addr_t cqe_start_paddr; - void *cqe_start_vaddr; -}; - -int spnic_alloc_rxqs(struct net_device *netdev); - -void spnic_free_rxqs(struct net_device *netdev); - -int spnic_alloc_rxqs_res(struct spnic_nic_dev *nic_dev, u16 num_rq, - u32 rq_depth, struct spnic_dyna_rxq_res *rxqs_res); - -void spnic_free_rxqs_res(struct spnic_nic_dev *nic_dev, u16 num_rq, - u32 rq_depth, struct spnic_dyna_rxq_res *rxqs_res); - -int spnic_configure_rxqs(struct spnic_nic_dev *nic_dev, u16 num_rq, - u32 rq_depth, struct spnic_dyna_rxq_res *rxqs_res); - -int spnic_rx_configure(struct net_device *netdev); - -void spnic_rx_remove_configure(struct net_device *netdev); - -int spnic_rx_poll(struct spnic_rxq *rxq, int budget); -int stub_spnic_rx_poll(struct spnic_rxq *rxq, int budget); - -void spnic_rxq_get_stats(struct spnic_rxq *rxq, struct spnic_rxq_stats *stats); - -void spnic_rxq_clean_stats(struct spnic_rxq_stats *rxq_stats); - -#endif diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_sriov.c b/drivers/net/ethernet/ramaxel/spnic/spnic_sriov.c deleted file mode 100644 index aac22cb302eacb25675454ac6c654620880108e8..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/spnic_sriov.c +++ /dev/null @@ -1,200 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt - -#include -#include - -#include "sphw_common.h" -#include "sphw_crm.h" -#include "sphw_hw.h" -#include "spnic_lld.h" -#include "spnic_sriov.h" -#include "spnic_dev_mgmt.h" - -int spnic_init_vf_hw(void *hwdev, u16 start_vf_id, u16 end_vf_id) -{ - u16 i, func_idx; - int err; - - /* mbox msg channel resources will be freed during remove process */ - err = sphw_init_func_mbox_msg_channel(hwdev, sphw_func_max_vf(hwdev)); - if (err) - return err; - - /* vf use 256K as default wq page size, and can't change it */ - for (i = start_vf_id; i <= end_vf_id; i++) { - func_idx = sphw_glb_pf_vf_offset(hwdev) + i; - err = sphw_set_wq_page_size(hwdev, func_idx, SPHW_DEFAULT_WQ_PAGE_SIZE, - SPHW_CHANNEL_COMM); - if (err) - return err; - } - - return 0; -} - -int spnic_deinit_vf_hw(void *hwdev, u16 start_vf_id, u16 end_vf_id) -{ - u16 func_idx, idx; - - for (idx = start_vf_id; idx <= end_vf_id; idx++) { - func_idx = sphw_glb_pf_vf_offset(hwdev) + idx; - sphw_set_wq_page_size(hwdev, func_idx, SPHW_HW_WQ_PAGE_SIZE, SPHW_CHANNEL_COMM); - } - - return 0; -} - -int spnic_pci_sriov_disable(struct pci_dev *dev) -{ -#ifdef CONFIG_PCI_IOV - struct spnic_sriov_info *sriov_info = NULL; - struct sphw_event_info event = {0}; - void *hwdev = NULL; - u16 tmp_vfs; - - sriov_info = spnic_get_sriov_info_by_pcidev(dev); - hwdev = spnic_get_hwdev_by_pcidev(dev); - if (!hwdev) { - sdk_err(&dev->dev, "SR-IOV disable is not permitted, please wait...\n"); - return -EPERM; - } - - /* if SR-IOV is already disabled then there is nothing to do */ - if (!sriov_info->sriov_enabled) - return 0; - - if (test_and_set_bit(SPNIC_SRIOV_DISABLE, &sriov_info->state)) { - sdk_err(&dev->dev, "SR-IOV disable in process, please wait"); - return -EPERM; - } - - /* If our VFs are assigned we cannot shut down SR-IOV - * without causing issues, so just leave the hardware - * available but disabled - */ - if (pci_vfs_assigned(dev)) { - clear_bit(SPNIC_SRIOV_DISABLE, &sriov_info->state); - sdk_warn(&dev->dev, "Unloading driver while VFs are assigned - VFs will not be deallocated\n"); - return -EPERM; - } - - event.type = SPHW_EVENT_SRIOV_STATE_CHANGE; - event.sriov_state.enable = 0; - sphw_event_callback(hwdev, &event); - - sriov_info->sriov_enabled = false; - - /* disable iov and allow time for transactions to clear */ - pci_disable_sriov(dev); - - tmp_vfs = (u16)sriov_info->num_vfs; - sriov_info->num_vfs = 0; - spnic_deinit_vf_hw(hwdev, 1, tmp_vfs); - - clear_bit(SPNIC_SRIOV_DISABLE, &sriov_info->state); - -#endif - - return 0; -} - -int spnic_pci_sriov_enable(struct pci_dev *dev, int num_vfs) -{ -#ifdef CONFIG_PCI_IOV - struct spnic_sriov_info *sriov_info = NULL; - struct sphw_event_info event = {0}; - void *hwdev = NULL; - int pre_existing_vfs = 0; - int err = 0; - - sriov_info = spnic_get_sriov_info_by_pcidev(dev); - hwdev = spnic_get_hwdev_by_pcidev(dev); - if (!hwdev) { - sdk_err(&dev->dev, "SR-IOV enable is not permitted, please wait...\n"); - return -EPERM; - } - - if (test_and_set_bit(SPNIC_SRIOV_ENABLE, &sriov_info->state)) { - sdk_err(&dev->dev, "SR-IOV enable in process, please wait, num_vfs %d\n", - num_vfs); - return -EPERM; - } - - pre_existing_vfs = pci_num_vf(dev); - - if (num_vfs > pci_sriov_get_totalvfs(dev)) { - clear_bit(SPNIC_SRIOV_ENABLE, &sriov_info->state); - return -ERANGE; - } - if (pre_existing_vfs && pre_existing_vfs != num_vfs) { - err = spnic_pci_sriov_disable(dev); - if (err) { - clear_bit(SPNIC_SRIOV_ENABLE, &sriov_info->state); - return err; - } - } else if (pre_existing_vfs == num_vfs) { - clear_bit(SPNIC_SRIOV_ENABLE, &sriov_info->state); - return num_vfs; - } - - err = spnic_init_vf_hw(hwdev, 1, (u16)num_vfs); - if (err) { - sdk_err(&dev->dev, "Failed to init vf in hardware before enable sriov, error %d\n", - err); - clear_bit(SPNIC_SRIOV_ENABLE, &sriov_info->state); - return err; - } - - err = pci_enable_sriov(dev, num_vfs); - if (err) { - sdk_err(&dev->dev, "Failed to enable SR-IOV, error %d\n", err); - clear_bit(SPNIC_SRIOV_ENABLE, &sriov_info->state); - return err; - } - - sriov_info->sriov_enabled = true; - sriov_info->num_vfs = num_vfs; - - event.type = SPHW_EVENT_SRIOV_STATE_CHANGE; - event.sriov_state.enable = 1; - event.sriov_state.num_vfs = (u16)num_vfs; - sphw_event_callback(hwdev, &event); - - clear_bit(SPNIC_SRIOV_ENABLE, &sriov_info->state); - - return num_vfs; -#else - - return 0; -#endif -} - -static bool spnic_is_support_sriov_configure(struct pci_dev *pdev) -{ - /* TODO: get cap from firmware */ - - return true; -} - -int spnic_pci_sriov_configure(struct pci_dev *dev, int num_vfs) -{ - struct spnic_sriov_info *sriov_info = NULL; - - if (!spnic_is_support_sriov_configure(dev)) - return -EFAULT; - - sriov_info = spnic_get_sriov_info_by_pcidev(dev); - if (!sriov_info) - return -EFAULT; - - if (!test_bit(SPNIC_FUNC_PERSENT, &sriov_info->state)) - return -EFAULT; - - if (!num_vfs) - return spnic_pci_sriov_disable(dev); - else - return spnic_pci_sriov_enable(dev, num_vfs); -} diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_sriov.h b/drivers/net/ethernet/ramaxel/spnic/spnic_sriov.h deleted file mode 100644 index 622845e30427249b9e024fff3fb15a02bf1c8594..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/spnic_sriov.h +++ /dev/null @@ -1,24 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPNIC_SRIOV_H -#define SPNIC_SRIOV_H -#include - -enum spnic_sriov_state { - SPNIC_SRIOV_DISABLE, - SPNIC_SRIOV_ENABLE, - SPNIC_FUNC_PERSENT, -}; - -struct spnic_sriov_info { - bool sriov_enabled; - unsigned int num_vfs; - unsigned long state; -}; - -struct spnic_sriov_info *spnic_get_sriov_info_by_pcidev(struct pci_dev *pdev); -int spnic_pci_sriov_disable(struct pci_dev *dev); -int spnic_pci_sriov_enable(struct pci_dev *dev, int num_vfs); -int spnic_pci_sriov_configure(struct pci_dev *dev, int num_vfs); -#endif diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_tx.c b/drivers/net/ethernet/ramaxel/spnic/spnic_tx.c deleted file mode 100644 index 7478e76aa72943fe1007045c5d67dc76d8b6313f..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/spnic_tx.c +++ /dev/null @@ -1,877 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "sphw_crm.h" -#include "spnic_nic_qp.h" -#include "spnic_nic_io.h" -#include "spnic_nic_cfg.h" -#include "spnic_nic_dev.h" -#include "spnic_tx.h" - -#define MIN_SKB_LEN 32 - -#define MAX_PAYLOAD_OFFSET 221 - -#define NIC_QID(q_id, nic_dev) ((q_id) & ((nic_dev)->num_qps - 1)) - -#define SPNIC_TX_TASK_WRAPPED 1 -#define SPNIC_TX_BD_DESC_WRAPPED 2 - -#define TXQ_STATS_INC(txq, field) \ -do { \ - u64_stats_update_begin(&(txq)->txq_stats.syncp); \ - (txq)->txq_stats.field++; \ - u64_stats_update_end(&(txq)->txq_stats.syncp); \ -} while (0) - -void spnic_txq_get_stats(struct spnic_txq *txq, struct spnic_txq_stats *stats) -{ - struct spnic_txq_stats *txq_stats = &txq->txq_stats; - unsigned int start; - - u64_stats_update_begin(&stats->syncp); - do { - start = u64_stats_fetch_begin(&txq_stats->syncp); - stats->bytes = txq_stats->bytes; - stats->packets = txq_stats->packets; - stats->busy = txq_stats->busy; - stats->wake = txq_stats->wake; - stats->dropped = txq_stats->dropped; - } while (u64_stats_fetch_retry(&txq_stats->syncp, start)); - u64_stats_update_end(&stats->syncp); -} - -void spnic_txq_clean_stats(struct spnic_txq_stats *txq_stats) -{ - u64_stats_update_begin(&txq_stats->syncp); - txq_stats->bytes = 0; - txq_stats->packets = 0; - txq_stats->busy = 0; - txq_stats->wake = 0; - txq_stats->dropped = 0; - - txq_stats->skb_pad_err = 0; - txq_stats->frag_len_overflow = 0; - txq_stats->offload_cow_skb_err = 0; - txq_stats->map_frag_err = 0; - txq_stats->unknown_tunnel_pkt = 0; - txq_stats->frag_size_err = 0; - u64_stats_update_end(&txq_stats->syncp); -} - -static void txq_stats_init(struct spnic_txq *txq) -{ - struct spnic_txq_stats *txq_stats = &txq->txq_stats; - - u64_stats_init(&txq_stats->syncp); - spnic_txq_clean_stats(txq_stats); -} - -static inline void spnic_set_buf_desc(struct spnic_sq_bufdesc *buf_descs, dma_addr_t addr, u32 len) -{ - buf_descs->hi_addr = upper_32_bits(addr); - buf_descs->lo_addr = lower_32_bits(addr); - buf_descs->len = len; -} - -static int tx_map_skb(struct spnic_nic_dev *nic_dev, struct sk_buff *skb, - u16 valid_nr_frags, struct spnic_txq *txq, - struct spnic_tx_info *tx_info, - struct spnic_sq_wqe_combo *wqe_combo) -{ - struct spnic_sq_wqe_desc *wqe_desc = wqe_combo->ctrl_bd0; - struct spnic_sq_bufdesc *buf_desc = wqe_combo->bds_head; - struct spnic_dma_info *dma_info = tx_info->dma_info; - struct pci_dev *pdev = nic_dev->pdev; - skb_frag_t *frag = NULL; - u32 j, i; - int err; - - dma_info[0].dma = dma_map_single(&pdev->dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); - if (dma_mapping_error(&pdev->dev, dma_info[0].dma)) { - TXQ_STATS_INC(txq, map_frag_err); - return -EFAULT; - } - - dma_info[0].len = skb_headlen(skb); - - wqe_desc->hi_addr = upper_32_bits(dma_info[0].dma); - wqe_desc->lo_addr = lower_32_bits(dma_info[0].dma); - - wqe_desc->ctrl_len = dma_info[0].len; - - for (i = 0; i < valid_nr_frags;) { - frag = &(skb_shinfo(skb)->frags[i]); - if (unlikely(i == wqe_combo->first_bds_num)) - buf_desc = wqe_combo->bds_sec2; - - i++; - dma_info[i].dma = skb_frag_dma_map(&pdev->dev, frag, 0, skb_frag_size(frag), - DMA_TO_DEVICE); - if (dma_mapping_error(&pdev->dev, dma_info[i].dma)) { - TXQ_STATS_INC(txq, map_frag_err); - i--; - err = -EFAULT; - goto frag_map_err; - } - dma_info[i].len = skb_frag_size(frag); - - spnic_set_buf_desc(buf_desc, dma_info[i].dma, dma_info[i].len); - buf_desc++; - } - - return 0; - -frag_map_err: - for (j = 0; j < i;) { - j++; - dma_unmap_page(&pdev->dev, dma_info[j].dma, - dma_info[j].len, DMA_TO_DEVICE); - } - dma_unmap_single(&pdev->dev, dma_info[0].dma, dma_info[0].len, DMA_TO_DEVICE); - return err; -} - -static inline void tx_unmap_skb(struct spnic_nic_dev *nic_dev, - struct sk_buff *skb, u16 valid_nr_frags, - struct spnic_dma_info *dma_info) -{ - struct pci_dev *pdev = nic_dev->pdev; - int i; - - for (i = 0; i < valid_nr_frags; ) { - i++; - dma_unmap_page(&pdev->dev, dma_info[i].dma, dma_info[i].len, DMA_TO_DEVICE); - } - - dma_unmap_single(&pdev->dev, dma_info[0].dma, dma_info[0].len, DMA_TO_DEVICE); -} - -union spnic_l4 { - struct tcphdr *tcp; - struct udphdr *udp; - unsigned char *hdr; -}; - -enum sq_l3_type { - UNKNOWN_L3TYPE = 0, - IPV6_PKT = 1, - IPV4_PKT_NO_CHKSUM_OFFLOAD = 2, - IPV4_PKT_WITH_CHKSUM_OFFLOAD = 3, -}; - -enum sq_l4offload_type { - OFFLOAD_DISABLE = 0, - TCP_OFFLOAD_ENABLE = 1, - SCTP_OFFLOAD_ENABLE = 2, - UDP_OFFLOAD_ENABLE = 3, -}; - -/*initialize l4_len and offset*/ -static inline void get_inner_l4_info(struct sk_buff *skb, union spnic_l4 *l4, - u8 l4_proto, u32 *offset, - enum sq_l4offload_type *l4_offload) -{ - switch (l4_proto) { - case IPPROTO_TCP: - *l4_offload = TCP_OFFLOAD_ENABLE; - /* To keep same with TSO, payload offset begins from paylaod */ - *offset = (l4->tcp->doff << 2) + TRANSPORT_OFFSET(l4->hdr, skb); - break; - - case IPPROTO_UDP: - *l4_offload = UDP_OFFLOAD_ENABLE; - *offset = TRANSPORT_OFFSET(l4->hdr, skb); - break; - default: - break; - } -} - -static int spnic_tx_csum(struct spnic_txq *txq, struct spnic_sq_task *task, struct sk_buff *skb) -{ - if (skb->ip_summed != CHECKSUM_PARTIAL) - return 0; - - if (skb->encapsulation) { - union spnic_ip ip; - u8 l4_proto; - - task->pkt_info0 |= SQ_TASK_INFO0_SET(1U, TUNNEL_FLAG); - - ip.hdr = skb_network_header(skb); - if (ip.v4->version == IPV4_VERSION) { - l4_proto = ip.v4->protocol; - } else if (ip.v4->version == IPV6_VERSION) { - union spnic_l4 l4; - unsigned char *exthdr; - __be16 frag_off; - - exthdr = ip.hdr + sizeof(*ip.v6); - l4_proto = ip.v6->nexthdr; - l4.hdr = skb_transport_header(skb); - if (l4.hdr != exthdr) - ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_proto, &frag_off); - } else { - l4_proto = IPPROTO_RAW; - } - - if (l4_proto != IPPROTO_UDP || - ((struct udphdr *)skb_transport_header(skb))->dest != VXLAN_OFFLOAD_PORT_BE) { - TXQ_STATS_INC(txq, unknown_tunnel_pkt); - /* Unsupport tunnel packet, disable csum offload */ - skb_checksum_help(skb); - return 0; - } - } - - task->pkt_info0 |= SQ_TASK_INFO0_SET(1U, INNER_L4_EN); - - return 1; -} - -static void get_inner_l3_l4_type(struct sk_buff *skb, union spnic_ip *ip, - union spnic_l4 *l4, enum sq_l3_type *l3_type, u8 *l4_proto) -{ - unsigned char *exthdr = NULL; - - if (ip->v4->version == 4) { - *l3_type = IPV4_PKT_WITH_CHKSUM_OFFLOAD; - *l4_proto = ip->v4->protocol; - - } else if (ip->v4->version == 6) { - *l3_type = IPV6_PKT; - exthdr = ip->hdr + sizeof(*ip->v6); - *l4_proto = ip->v6->nexthdr; - if (exthdr != l4->hdr) { - __be16 frag_off = 0; - - ipv6_skip_exthdr(skb, (int)(exthdr - skb->data), l4_proto, &frag_off); - } - } else { - *l3_type = UNKNOWN_L3TYPE; - *l4_proto = 0; - } -} - -static inline void spnic_set_tso_info(struct spnic_sq_task *task, u32 *queue_info, - enum sq_l4offload_type l4_offload, u32 offset, u32 mss) -{ - if (l4_offload == TCP_OFFLOAD_ENABLE) { - *queue_info |= SQ_CTRL_QUEUE_INFO_SET(1U, TSO); - task->pkt_info0 |= SQ_TASK_INFO0_SET(1U, INNER_L4_EN); - } else if (l4_offload == UDP_OFFLOAD_ENABLE) { - *queue_info |= SQ_CTRL_QUEUE_INFO_SET(1U, UFO); - task->pkt_info0 |= SQ_TASK_INFO0_SET(1U, INNER_L4_EN); - } - - /*Default enable L3 calculation*/ - task->pkt_info0 |= SQ_TASK_INFO0_SET(1U, INNER_L3_EN); - - *queue_info |= SQ_CTRL_QUEUE_INFO_SET(offset >> 1, PLDOFF); - - /* set MSS value */ - *queue_info = SQ_CTRL_QUEUE_INFO_CLEAR(*queue_info, MSS); - *queue_info |= SQ_CTRL_QUEUE_INFO_SET(mss, MSS); -} - -static int spnic_tso(struct spnic_sq_task *task, u32 *queue_info, struct sk_buff *skb) -{ - enum sq_l4offload_type l4_offload = OFFLOAD_DISABLE; - enum sq_l3_type l3_type; - union spnic_ip ip; - union spnic_l4 l4; - u32 offset = 0; - u8 l4_proto; - int err; - - if (!skb_is_gso(skb)) - return 0; - - err = skb_cow_head(skb, 0); - if (err < 0) - return err; - - if (skb->encapsulation) { - u32 gso_type = skb_shinfo(skb)->gso_type; - /* L3 checksum always enable */ - task->pkt_info0 |= SQ_TASK_INFO0_SET(1U, OUT_L3_EN); - task->pkt_info0 |= SQ_TASK_INFO0_SET(1U, TUNNEL_FLAG); - - l4.hdr = skb_transport_header(skb); - ip.hdr = skb_network_header(skb); - - if (gso_type & SKB_GSO_UDP_TUNNEL_CSUM) { - l4.udp->check = ~csum_magic(&ip, IPPROTO_UDP); - task->pkt_info0 |= SQ_TASK_INFO0_SET(1U, OUT_L4_EN); - } else if (gso_type & SKB_GSO_UDP_TUNNEL) { - } - - ip.hdr = skb_inner_network_header(skb); - l4.hdr = skb_inner_transport_header(skb); - } else { - ip.hdr = skb_network_header(skb); - l4.hdr = skb_transport_header(skb); - } - - get_inner_l3_l4_type(skb, &ip, &l4, &l3_type, &l4_proto); - - if (l4_proto == IPPROTO_TCP) - l4.tcp->check = ~csum_magic(&ip, IPPROTO_TCP); - - get_inner_l4_info(skb, &l4, l4_proto, &offset, &l4_offload); - - spnic_set_tso_info(task, queue_info, l4_offload, offset, skb_shinfo(skb)->gso_size); - - return 1; -} - -static u32 spnic_tx_offload(struct sk_buff *skb, struct spnic_sq_task *task, - u32 *queue_info, struct spnic_txq *txq) -{ - u32 offload = 0; - int tso_cs_en; - - task->pkt_info0 = 0; - task->ip_identify = 0; - task->pkt_info2 = 0; - task->vlan_offload = 0; - - tso_cs_en = spnic_tso(task, queue_info, skb); - if (tso_cs_en < 0) { - offload = TX_OFFLOAD_INVALID; - return offload; - } else if (tso_cs_en) { - offload |= TX_OFFLOAD_TSO; - } else { - tso_cs_en = spnic_tx_csum(txq, task, skb); - if (tso_cs_en) - offload |= TX_OFFLOAD_CSUM; - } - -#define VLAN_INSERT_MODE_MAX 5 - if (unlikely(skb_vlan_tag_present(skb))) { - /* select vlan insert mode by qid, default 802.1Q Tag type*/ - spnic_set_vlan_tx_offload(task, skb_vlan_tag_get(skb), - txq->q_id % VLAN_INSERT_MODE_MAX); - offload |= TX_OFFLOAD_VLAN; - } - - if (unlikely(SQ_CTRL_QUEUE_INFO_GET(*queue_info, PLDOFF) > MAX_PAYLOAD_OFFSET)) { - offload = TX_OFFLOAD_INVALID; - return offload; - } - - return offload; -} - -static inline void get_pkt_stats(struct spnic_tx_info *tx_info, struct sk_buff *skb) -{ - u32 ihs, hdr_len; - - if (skb_is_gso(skb)) { - if (skb->encapsulation) - ihs = skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb); - else - ihs = skb_transport_offset(skb) + tcp_hdrlen(skb); - - hdr_len = (skb_shinfo(skb)->gso_segs - 1) * ihs; - tx_info->num_bytes = skb->len + (u64)hdr_len; - - } else { - tx_info->num_bytes = skb->len > ETH_ZLEN ? skb->len : ETH_ZLEN; - } - - tx_info->num_pkts = 1; -} - -static inline int spnic_maybe_stop_tx(struct spnic_txq *txq, u16 wqebb_cnt) -{ - if (likely(spnic_get_sq_free_wqebbs(txq->sq) >= wqebb_cnt)) - return 0; - - /* We need to check again in a case another CPU has just - * made room available. - */ - netif_stop_subqueue(txq->netdev, txq->q_id); - - if (likely(spnic_get_sq_free_wqebbs(txq->sq) < wqebb_cnt)) - return -EBUSY; - - /* there have enough wqebbs after queue is wake up */ - netif_start_subqueue(txq->netdev, txq->q_id); - - return 0; -} - -static inline u16 spnic_set_wqe_combo(struct spnic_txq *txq, struct spnic_sq_wqe_combo *wqe_combo, - u32 offload, u16 num_sge, u16 *curr_pi) -{ - void *second_part_wqebbs_addr = NULL; - void *wqe = NULL; - u16 first_part_wqebbs_num, tmp_pi; - - wqe_combo->ctrl_bd0 = spnic_get_sq_one_wqebb(txq->sq, curr_pi); - if (!offload && num_sge == 1) { - wqe_combo->wqe_type = SQ_WQE_COMPACT_TYPE; - return spnic_get_and_update_sq_owner(txq->sq, *curr_pi, 1); - } - - wqe_combo->wqe_type = SQ_WQE_EXTENDED_TYPE; - - if (offload) { - wqe_combo->task = spnic_get_sq_one_wqebb(txq->sq, &tmp_pi); - wqe_combo->task_type = SQ_WQE_TASKSECT_16BYTES; - } else { - wqe_combo->task_type = SQ_WQE_TASKSECT_46BITS; - } - - if (num_sge > 1) { - /* first wqebb contain bd0, and bd size is equal to sq wqebb - * size, so we use (num_sge - 1) as wanted weqbb_cnt - */ - wqe = spnic_get_sq_multi_wqebbs(txq->sq, num_sge - 1, &tmp_pi, - &second_part_wqebbs_addr, - &first_part_wqebbs_num); - wqe_combo->bds_head = wqe; - wqe_combo->bds_sec2 = second_part_wqebbs_addr; - wqe_combo->first_bds_num = first_part_wqebbs_num; - } - - return spnic_get_and_update_sq_owner(txq->sq, *curr_pi, num_sge + (u16)!!offload); -} - -inline u8 spnic_get_vlan_pri(struct sk_buff *skb) -{ - u16 vlan_tci = 0; - int err; - - err = vlan_get_tag(skb, &vlan_tci); - if (err) - return 0; - - return (vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; -} - -static netdev_tx_t spnic_send_one_skb(struct sk_buff *skb, struct net_device *netdev, - struct spnic_txq *txq) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - struct spnic_sq_wqe_combo wqe_combo = {0}; - struct spnic_tx_info *tx_info = NULL; - struct spnic_sq_task task; - u32 offload, queue_info = 0; - u16 owner = 0, pi = 0; - u16 wqebb_cnt, num_sge, valid_nr_frags; - bool find_zero_sge_len = false; - int err, i; - - if (unlikely(skb->len < MIN_SKB_LEN)) { - if (skb_pad(skb, (int)(MIN_SKB_LEN - skb->len))) { - TXQ_STATS_INC(txq, skb_pad_err); - goto tx_skb_pad_err; - } - - skb->len = MIN_SKB_LEN; - } - - valid_nr_frags = 0; - for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { - if (!skb_frag_size(&skb_shinfo(skb)->frags[i])) { - find_zero_sge_len = true; - continue; - } else if (find_zero_sge_len) { - TXQ_STATS_INC(txq, frag_size_err); - goto tx_drop_pkts; - } - - valid_nr_frags++; - } - - num_sge = valid_nr_frags + 1; - - /* assume need normal TS format wqe, task info need 1 wqebb */ - wqebb_cnt = num_sge + 1; - if (unlikely(spnic_maybe_stop_tx(txq, wqebb_cnt))) { - TXQ_STATS_INC(txq, busy); - return NETDEV_TX_BUSY; - } - - offload = spnic_tx_offload(skb, &task, &queue_info, txq); - if (unlikely(offload == TX_OFFLOAD_INVALID)) { - TXQ_STATS_INC(txq, offload_cow_skb_err); - goto tx_drop_pkts; - } else if (!offload) { - /* no TS in current wqe */ - wqebb_cnt -= 1; - } - - owner = spnic_set_wqe_combo(txq, &wqe_combo, offload, num_sge, &pi); - if (offload) { - wqe_combo.task->ip_identify = task.ip_identify; - wqe_combo.task->pkt_info0 = task.pkt_info0; - wqe_combo.task->pkt_info2 = task.pkt_info2; - wqe_combo.task->vlan_offload = task.vlan_offload; - } - - tx_info = &txq->tx_info[pi]; - tx_info->skb = skb; - tx_info->wqebb_cnt = wqebb_cnt; - tx_info->valid_nr_frags = valid_nr_frags; - - err = tx_map_skb(nic_dev, skb, valid_nr_frags, txq, tx_info, &wqe_combo); - if (err) { - spnic_rollback_sq_wqebbs(txq->sq, wqebb_cnt, owner); - goto tx_drop_pkts; - } - - get_pkt_stats(tx_info, skb); - - spnic_prepare_sq_ctrl(&wqe_combo, queue_info, num_sge, owner); - - spnic_write_db(txq->sq, txq->cos, SQ_CFLAG_DP, spnic_get_sq_local_pi(txq->sq)); - - return NETDEV_TX_OK; - -tx_drop_pkts: - dev_kfree_skb_any(skb); - -tx_skb_pad_err: - TXQ_STATS_INC(txq, dropped); - - return NETDEV_TX_OK; -} - -netdev_tx_t spnic_lb_xmit_frame(struct sk_buff *skb, struct net_device *netdev) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - u16 q_id = skb_get_queue_mapping(skb); - struct spnic_txq *txq = &nic_dev->txqs[q_id]; - - return spnic_send_one_skb(skb, netdev, txq); -} - -netdev_tx_t spnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - struct spnic_txq *txq = NULL; - u16 q_id = skb_get_queue_mapping(skb); - - if (unlikely(!netif_carrier_ok(netdev))) { - dev_kfree_skb_any(skb); - SPNIC_NIC_STATS_INC(nic_dev, tx_carrier_off_drop); - return NETDEV_TX_OK; - } - - if (unlikely(q_id >= nic_dev->q_params.num_qps)) { - txq = &nic_dev->txqs[0]; - SPNIC_NIC_STATS_INC(nic_dev, tx_invalid_qid); - goto tx_drop_pkts; - } - txq = &nic_dev->txqs[q_id]; - - return spnic_send_one_skb(skb, netdev, txq); - -tx_drop_pkts: - dev_kfree_skb_any(skb); - u64_stats_update_begin(&txq->txq_stats.syncp); - txq->txq_stats.dropped++; - u64_stats_update_end(&txq->txq_stats.syncp); - - return NETDEV_TX_OK; -} - -static inline void tx_free_skb(struct spnic_nic_dev *nic_dev, struct spnic_tx_info *tx_info) -{ - tx_unmap_skb(nic_dev, tx_info->skb, tx_info->valid_nr_frags, - tx_info->dma_info); - dev_kfree_skb_any(tx_info->skb); - tx_info->skb = NULL; -} - -static void free_all_tx_skbs(struct spnic_nic_dev *nic_dev, u32 sq_depth, - struct spnic_tx_info *tx_info_arr) -{ - struct spnic_tx_info *tx_info = NULL; - u32 idx; - - for (idx = 0; idx < sq_depth; idx++) { - tx_info = &tx_info_arr[idx]; - if (tx_info->skb) - tx_free_skb(nic_dev, tx_info); - } -} - -int spnic_tx_poll(struct spnic_txq *txq, int budget) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(txq->netdev); - struct spnic_tx_info *tx_info = NULL; - u64 tx_bytes = 0, wake = 0; - int pkts = 0, nr_pkts = 0; - u16 wqebb_cnt = 0; - u16 hw_ci, sw_ci = 0, q_id = txq->sq->q_id; - - hw_ci = spnic_get_sq_hw_ci(txq->sq); - dma_rmb(); - sw_ci = spnic_get_sq_local_ci(txq->sq); - - do { - tx_info = &txq->tx_info[sw_ci]; - - /* Whether all of the wqebb of this wqe is completed */ - if (hw_ci == sw_ci || - ((hw_ci - sw_ci) & txq->q_mask) < tx_info->wqebb_cnt) - break; - - sw_ci = (u16)(sw_ci + tx_info->wqebb_cnt) & txq->q_mask; - prefetch(&txq->tx_info[sw_ci]); - - wqebb_cnt += tx_info->wqebb_cnt; - - tx_bytes += tx_info->num_bytes; - nr_pkts += tx_info->num_pkts; - pkts++; - - tx_free_skb(nic_dev, tx_info); - - } while (likely(pkts < budget)); - - spnic_update_sq_local_ci(txq->sq, wqebb_cnt); - - if (unlikely(__netif_subqueue_stopped(nic_dev->netdev, q_id) && - spnic_get_sq_free_wqebbs(txq->sq) >= 1 && - test_bit(SPNIC_INTF_UP, &nic_dev->flags))) { - struct netdev_queue *netdev_txq = netdev_get_tx_queue(txq->netdev, q_id); - - __netif_tx_lock(netdev_txq, smp_processor_id()); - /* To avoid re-waking subqueue with xmit_frame */ - if (__netif_subqueue_stopped(nic_dev->netdev, q_id)) { - netif_wake_subqueue(nic_dev->netdev, q_id); - wake++; - } - __netif_tx_unlock(netdev_txq); - } - - u64_stats_update_begin(&txq->txq_stats.syncp); - txq->txq_stats.bytes += tx_bytes; - txq->txq_stats.packets += nr_pkts; - txq->txq_stats.wake += wake; - u64_stats_update_end(&txq->txq_stats.syncp); - - return pkts; -} - -void spnic_set_txq_cos(struct spnic_nic_dev *nic_dev, u16 start_qid, u16 q_num, u8 cos) -{ - u16 idx; - - for (idx = 0; idx < q_num; idx++) - nic_dev->txqs[idx + start_qid].cos = cos; -} - -#define SPNIC_BDS_PER_SQ_WQEBB \ - (SPNIC_SQ_WQEBB_SIZE / sizeof(struct spnic_sq_bufdesc)) - -int spnic_alloc_txqs_res(struct spnic_nic_dev *nic_dev, u16 num_sq, - u32 sq_depth, struct spnic_dyna_txq_res *txqs_res) -{ - struct spnic_dyna_txq_res *tqres = NULL; - int idx, i; - u64 size; - - for (idx = 0; idx < num_sq; idx++) { - tqres = &txqs_res[idx]; - - size = sizeof(*tqres->tx_info) * sq_depth; - tqres->tx_info = kzalloc(size, GFP_KERNEL); - if (!tqres->tx_info) { - nicif_err(nic_dev, drv, nic_dev->netdev, - "Failed to alloc txq%d tx info\n", idx); - goto err_out; - } - - size = sizeof(*tqres->bds) * (sq_depth * SPNIC_BDS_PER_SQ_WQEBB + SPNIC_MAX_SQ_SGE); - tqres->bds = kzalloc(size, GFP_KERNEL); - if (!tqres->bds) { - kfree(tqres->tx_info); - nicif_err(nic_dev, drv, nic_dev->netdev, - "Failed to alloc txq%d bds info\n", idx); - goto err_out; - } - } - - return 0; - -err_out: - for (i = 0; i < idx; i++) { - tqres = &txqs_res[i]; - - kfree(tqres->bds); - kfree(tqres->tx_info); - } - - return -ENOMEM; -} - -void spnic_free_txqs_res(struct spnic_nic_dev *nic_dev, u16 num_sq, - u32 sq_depth, struct spnic_dyna_txq_res *txqs_res) -{ - struct spnic_dyna_txq_res *tqres = NULL; - int idx; - - for (idx = 0; idx < num_sq; idx++) { - tqres = &txqs_res[idx]; - - free_all_tx_skbs(nic_dev, sq_depth, tqres->tx_info); - kfree(tqres->bds); - kfree(tqres->tx_info); - } -} - -int spnic_configure_txqs(struct spnic_nic_dev *nic_dev, u16 num_sq, - u32 sq_depth, struct spnic_dyna_txq_res *txqs_res) -{ - struct spnic_dyna_txq_res *tqres = NULL; - struct spnic_txq *txq = NULL; - u16 q_id; - u32 idx; - - for (q_id = 0; q_id < num_sq; q_id++) { - txq = &nic_dev->txqs[q_id]; - tqres = &txqs_res[q_id]; - - txq->q_depth = sq_depth; - txq->q_mask = sq_depth - 1; - - txq->tx_info = tqres->tx_info; - for (idx = 0; idx < sq_depth; idx++) - txq->tx_info[idx].dma_info = &tqres->bds[idx * SPNIC_BDS_PER_SQ_WQEBB]; - - txq->sq = spnic_get_nic_queue(nic_dev->hwdev, q_id, SPNIC_SQ); - if (!txq->sq) { - nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to get %u sq\n", q_id); - return -EFAULT; - } - } - - return 0; -} - -int spnic_alloc_txqs(struct net_device *netdev) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - struct pci_dev *pdev = nic_dev->pdev; - struct spnic_txq *txq = NULL; - u16 q_id, num_txqs = nic_dev->max_qps; - u64 txq_size; - - txq_size = num_txqs * sizeof(*nic_dev->txqs); - if (!txq_size) { - nic_err(&pdev->dev, "Cannot allocate zero size txqs\n"); - return -EINVAL; - } - - nic_dev->txqs = kzalloc(txq_size, GFP_KERNEL); - if (!nic_dev->txqs) { - nic_err(&pdev->dev, "Failed to allocate txqs\n"); - return -ENOMEM; - } - - for (q_id = 0; q_id < num_txqs; q_id++) { - txq = &nic_dev->txqs[q_id]; - txq->netdev = netdev; - txq->q_id = q_id; - txq->q_depth = nic_dev->q_params.sq_depth; - txq->q_mask = nic_dev->q_params.sq_depth - 1; - txq->dev = &pdev->dev; - - txq_stats_init(txq); - } - - return 0; -} - -void spnic_free_txqs(struct net_device *netdev) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - - kfree(nic_dev->txqs); -} - -static bool is_hw_complete_sq_process(struct spnic_io_queue *sq) -{ - u16 sw_pi, hw_ci; - - sw_pi = spnic_get_sq_local_pi(sq); - hw_ci = spnic_get_sq_hw_ci(sq); - - return sw_pi == hw_ci; -} - -#define SPNIC_FLUSH_QUEUE_TIMEOUT 1000 -static int spnic_stop_sq(struct spnic_txq *txq) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(txq->netdev); - unsigned long timeout; - int err; - - timeout = msecs_to_jiffies(SPNIC_FLUSH_QUEUE_TIMEOUT) + jiffies; - do { - if (is_hw_complete_sq_process(txq->sq)) - return 0; - - usleep_range(900, 1000); - } while (time_before(jiffies, timeout)); - - /* force hardware to drop packets */ - timeout = msecs_to_jiffies(SPNIC_FLUSH_QUEUE_TIMEOUT) + jiffies; - do { - if (is_hw_complete_sq_process(txq->sq)) - return 0; - - err = spnic_force_drop_tx_pkt(nic_dev->hwdev); - if (err) - break; - - usleep_range(9900, 10000); - } while (time_before(jiffies, timeout)); - - /* Avoid msleep takes too long and get a fake result */ - if (is_hw_complete_sq_process(txq->sq)) - return 0; - - return -EFAULT; -} - -/* should stop transmit any packets before calling this function */ -int spnic_flush_txqs(struct net_device *netdev) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - u16 qid; - int err; - - for (qid = 0; qid < nic_dev->q_params.num_qps; qid++) { - err = spnic_stop_sq(&nic_dev->txqs[qid]); - if (err) - nicif_err(nic_dev, drv, netdev, "Failed to stop sq%u\n", qid); - } - - return 0; -} diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_tx.h b/drivers/net/ethernet/ramaxel/spnic/spnic_tx.h deleted file mode 100644 index c3109db83299ffeb96cf3d27ed46bd0a1200134e..0000000000000000000000000000000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/spnic_tx.h +++ /dev/null @@ -1,129 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPNIC_TX_H -#define SPNIC_TX_H - -#include -#include -#include -#include -#include - -#include "spnic_nic_qp.h" - -#define VXLAN_OFFLOAD_PORT_BE htons(4789) - -enum tx_offload_type { - TX_OFFLOAD_TSO = BIT(0), - TX_OFFLOAD_CSUM = BIT(1), - TX_OFFLOAD_VLAN = BIT(2), - TX_OFFLOAD_INVALID = BIT(3), - TX_OFFLOAD_ESP = BIT(4), -}; - -struct spnic_txq_stats { - u64 packets; - u64 bytes; - u64 busy; - u64 wake; - u64 dropped; - - /* Subdivision statistics show in private tool */ - u64 skb_pad_err; - u64 frag_len_overflow; - u64 offload_cow_skb_err; - u64 map_frag_err; - u64 unknown_tunnel_pkt; - u64 frag_size_err; - - struct u64_stats_sync syncp; -}; - -struct spnic_dma_info { - dma_addr_t dma; - u32 len; -}; - -#define IPV4_VERSION 4 -#define IPV6_VERSION 6 -#define TCP_HDR_DOFF_UNIT 2 -#define TRANSPORT_OFFSET(l4_hdr, skb) ((u32)((l4_hdr) - (skb)->data)) - -union spnic_ip { - struct iphdr *v4; - struct ipv6hdr *v6; - unsigned char *hdr; -}; - -struct spnic_tx_info { - struct sk_buff *skb; - - u16 wqebb_cnt; - u16 valid_nr_frags; - - int num_sge; - u16 num_pkts; - u64 num_bytes; - struct spnic_dma_info *dma_info; -}; - -struct spnic_txq { - struct net_device *netdev; - struct device *dev; - - struct spnic_txq_stats txq_stats; - - u8 cos; - u16 q_id; - u32 q_mask; - u32 q_depth; - - struct spnic_tx_info *tx_info; - struct spnic_io_queue *sq; - - u64 last_moder_packets; - u64 last_moder_bytes; -} ____cacheline_aligned; - -netdev_tx_t spnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev); - -netdev_tx_t spnic_lb_xmit_frame(struct sk_buff *skb, struct net_device *netdev); - -struct spnic_dyna_txq_res { - struct spnic_tx_info *tx_info; - struct spnic_dma_info *bds; -}; - -void spnic_txq_get_stats(struct spnic_txq *txq, struct spnic_txq_stats *stats); - -void spnic_txq_clean_stats(struct spnic_txq_stats *txq_stats); - -struct spnic_nic_dev; -int spnic_alloc_txqs_res(struct spnic_nic_dev *nic_dev, u16 num_sq, - u32 sq_depth, struct spnic_dyna_txq_res *txqs_res); - -void spnic_free_txqs_res(struct spnic_nic_dev *nic_dev, u16 num_sq, - u32 sq_depth, struct spnic_dyna_txq_res *txqs_res); - -int spnic_configure_txqs(struct spnic_nic_dev *nic_dev, u16 num_sq, - u32 sq_depth, struct spnic_dyna_txq_res *txqs_res); - -int spnic_alloc_txqs(struct net_device *netdev); - -void spnic_free_txqs(struct net_device *netdev); - -int spnic_tx_poll(struct spnic_txq *txq, int budget); - -int spnic_flush_txqs(struct net_device *netdev); - -void spnic_set_txq_cos(struct spnic_nic_dev *nic_dev, u16 start_qid, u16 q_num, u8 cos); - -static inline __sum16 csum_magic(union spnic_ip *ip, unsigned short proto) -{ - return (ip->v4->version == IPV4_VERSION) ? - csum_tcpudp_magic(ip->v4->saddr, ip->v4->daddr, 0, proto, 0) : - csum_ipv6_magic(&ip->v6->saddr, &ip->v6->daddr, 0, proto, 0); -} - -#endif diff --git a/drivers/net/ethernet/rocker/rocker_ofdpa.c b/drivers/net/ethernet/rocker/rocker_ofdpa.c index 7072b249c8bd60ebb302db31df468635d841acc6..8157666209798ffc73a1c97d2905d7be16c5584f 100644 --- a/drivers/net/ethernet/rocker/rocker_ofdpa.c +++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c @@ -2795,7 +2795,8 @@ static void ofdpa_fib4_abort(struct rocker *rocker) if (!ofdpa_port) continue; nh->fib_nh_flags &= ~RTNH_F_OFFLOAD; - ofdpa_flow_tbl_del(ofdpa_port, OFDPA_OP_FLAG_REMOVE, + ofdpa_flow_tbl_del(ofdpa_port, + OFDPA_OP_FLAG_REMOVE | OFDPA_OP_FLAG_NOWAIT, flow_entry); } spin_unlock_irqrestore(&ofdpa->flow_tbl_lock, flags); diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c index 971f1e54b6526e36223059f684292bbd95bc539f..b1dd6189638b3ee173366f0c0ab274f11ae5c420 100644 --- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c @@ -2282,18 +2282,18 @@ static int __init sxgbe_cmdline_opt(char *str) char *opt; if (!str || !*str) - return -EINVAL; + return 1; while ((opt = strsep(&str, ",")) != NULL) { if (!strncmp(opt, "eee_timer:", 10)) { if (kstrtoint(opt + 10, 0, &eee_timer)) goto err; } } - return 0; + return 1; err: pr_err("%s: ERROR broken module parameter conversion\n", __func__); - return -EINVAL; + return 1; } __setup("sxgbeeth=", sxgbe_cmdline_opt); diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c index be6bfd6b7ec7576407f85ebb786eaed238f7d8be..50baf62b2cbc6808bad368e45d6e2efce7573f8c 100644 --- a/drivers/net/ethernet/sfc/mcdi.c +++ b/drivers/net/ethernet/sfc/mcdi.c @@ -163,9 +163,9 @@ static void efx_mcdi_send_request(struct efx_nic *efx, unsigned cmd, /* Serialise with efx_mcdi_ev_cpl() and efx_mcdi_ev_death() */ spin_lock_bh(&mcdi->iface_lock); ++mcdi->seqno; + seqno = mcdi->seqno & SEQ_MASK; spin_unlock_bh(&mcdi->iface_lock); - seqno = mcdi->seqno & SEQ_MASK; xflags = 0; if (mcdi->mode == MCDI_MODE_EVENTS) xflags |= MCDI_HEADER_XFLAGS_EVREQ; diff --git a/drivers/net/ethernet/sfc/rx_common.c b/drivers/net/ethernet/sfc/rx_common.c index e423b17e2a148ac8f7f2e1f2956b67d328e120f4..2c09afac5beb45e8f15475e536c3b03180786930 100644 --- a/drivers/net/ethernet/sfc/rx_common.c +++ b/drivers/net/ethernet/sfc/rx_common.c @@ -166,6 +166,9 @@ static void efx_fini_rx_recycle_ring(struct efx_rx_queue *rx_queue) struct efx_nic *efx = rx_queue->efx; int i; + if (unlikely(!rx_queue->page_ring)) + return; + /* Unmap and release the pages in the recycle ring. Remove the ring. */ for (i = 0; i <= rx_queue->page_ptr_mask; i++) { struct page *page = rx_queue->page_ring[i]; diff --git a/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c index cd478d2cd871ae46640bff2b0dd06eba384de777..00f6d347eaf75b33ec43c60aea8aca1fcaebdc7a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c +++ b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c @@ -57,10 +57,6 @@ #define TSE_PCS_USE_SGMII_ENA BIT(0) #define TSE_PCS_IF_USE_SGMII 0x03 -#define SGMII_ADAPTER_CTRL_REG 0x00 -#define SGMII_ADAPTER_DISABLE 0x0001 -#define SGMII_ADAPTER_ENABLE 0x0000 - #define AUTONEGO_LINK_TIMER 20 static int tse_pcs_reset(void __iomem *base, struct tse_pcs *pcs) @@ -202,12 +198,8 @@ void tse_pcs_fix_mac_speed(struct tse_pcs *pcs, struct phy_device *phy_dev, unsigned int speed) { void __iomem *tse_pcs_base = pcs->tse_pcs_base; - void __iomem *sgmii_adapter_base = pcs->sgmii_adapter_base; u32 val; - writew(SGMII_ADAPTER_ENABLE, - sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG); - pcs->autoneg = phy_dev->autoneg; if (phy_dev->autoneg == AUTONEG_ENABLE) { diff --git a/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.h b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.h index 442812c0a4bdccb6d93eb6e754b7800df8fcad7e..694ac25ef426ba2e6a9848b6881b9e3386d32ef6 100644 --- a/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.h +++ b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.h @@ -10,6 +10,10 @@ #include #include +#define SGMII_ADAPTER_CTRL_REG 0x00 +#define SGMII_ADAPTER_ENABLE 0x0000 +#define SGMII_ADAPTER_DISABLE 0x0001 + struct tse_pcs { struct device *dev; void __iomem *tse_pcs_base; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c index f37b6d57b2fe22c0febda05c88213e484874ec9d..8bb0106cb7ea3b5d701290dad576df78b7de3144 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c @@ -18,9 +18,6 @@ #include "altr_tse_pcs.h" -#define SGMII_ADAPTER_CTRL_REG 0x00 -#define SGMII_ADAPTER_DISABLE 0x0001 - #define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII 0x0 #define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RGMII 0x1 #define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RMII 0x2 @@ -62,16 +59,14 @@ static void socfpga_dwmac_fix_mac_speed(void *priv, unsigned int speed) { struct socfpga_dwmac *dwmac = (struct socfpga_dwmac *)priv; void __iomem *splitter_base = dwmac->splitter_base; - void __iomem *tse_pcs_base = dwmac->pcs.tse_pcs_base; void __iomem *sgmii_adapter_base = dwmac->pcs.sgmii_adapter_base; struct device *dev = dwmac->dev; struct net_device *ndev = dev_get_drvdata(dev); struct phy_device *phy_dev = ndev->phydev; u32 val; - if ((tse_pcs_base) && (sgmii_adapter_base)) - writew(SGMII_ADAPTER_DISABLE, - sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG); + writew(SGMII_ADAPTER_DISABLE, + sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG); if (splitter_base) { val = readl(splitter_base + EMAC_SPLITTER_CTRL_REG); @@ -93,7 +88,9 @@ static void socfpga_dwmac_fix_mac_speed(void *priv, unsigned int speed) writel(val, splitter_base + EMAC_SPLITTER_CTRL_REG); } - if (tse_pcs_base && sgmii_adapter_base) + writew(SGMII_ADAPTER_ENABLE, + sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG); + if (phy_dev) tse_pcs_fix_mac_speed(&dwmac->pcs, phy_dev, speed); } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c index 9f5ccf1a0a54015c09687297ec6b269cad68b37e..cad6588840d8b1678697330af987ef96944f577f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c @@ -734,7 +734,7 @@ static int sun8i_dwmac_reset(struct stmmac_priv *priv) if (err) { dev_err(priv->device, "EMAC reset timeout\n"); - return -EFAULT; + return err; } return 0; } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h index e5dbd0bc257e7a1d8136bca183f972be4f75740b..82889c363c7773cec228762ee688eada68c7850a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h @@ -130,6 +130,7 @@ #define NUM_DWMAC100_DMA_REGS 9 #define NUM_DWMAC1000_DMA_REGS 23 +#define NUM_DWMAC4_DMA_REGS 27 void dwmac_enable_dma_transmission(void __iomem *ioaddr); void dwmac_enable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c index 2d11fcaad76f5bd30418b9412caf90014489610f..83b687dcf5f852abe530422c0a130a8bc45c4694 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c @@ -21,10 +21,18 @@ #include "dwxgmac2.h" #define REG_SPACE_SIZE 0x1060 +#define GMAC4_REG_SPACE_SIZE 0x116C #define MAC100_ETHTOOL_NAME "st_mac100" #define GMAC_ETHTOOL_NAME "st_gmac" #define XGMAC_ETHTOOL_NAME "st_xgmac" +/* Same as DMA_CHAN_BASE_ADDR defined in dwmac4_dma.h + * + * It is here because dwmac_dma.h and dwmac4_dam.h can not be included at the + * same time due to the conflicting macro names. + */ +#define GMAC4_DMA_CHAN_BASE_ADDR 0x00001100 + #define ETHTOOL_DMA_OFFSET 55 struct stmmac_stats { @@ -413,6 +421,8 @@ static int stmmac_ethtool_get_regs_len(struct net_device *dev) if (priv->plat->has_xgmac) return XGMAC_REGSIZE * 4; + else if (priv->plat->has_gmac4) + return GMAC4_REG_SPACE_SIZE; return REG_SPACE_SIZE; } @@ -425,8 +435,13 @@ static void stmmac_ethtool_gregs(struct net_device *dev, stmmac_dump_mac_regs(priv, priv->hw, reg_space); stmmac_dump_dma_regs(priv, priv->ioaddr, reg_space); - if (!priv->plat->has_xgmac) { - /* Copy DMA registers to where ethtool expects them */ + /* Copy DMA registers to where ethtool expects them */ + if (priv->plat->has_gmac4) { + /* GMAC4 dumps its DMA registers at its DMA_CHAN_BASE_ADDR */ + memcpy(®_space[ETHTOOL_DMA_OFFSET], + ®_space[GMAC4_DMA_CHAN_BASE_ADDR / 4], + NUM_DWMAC4_DMA_REGS * 4); + } else if (!priv->plat->has_xgmac) { memcpy(®_space[ETHTOOL_DMA_OFFSET], ®_space[DMA_BUS_MODE / 4], NUM_DWMAC1000_DMA_REGS * 4); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c index d291612eeafb96b753da3a6e6b42fc2b16bc0d1e..07b1b8374cd26b8d9f2a52aa1a2d80d39b0b0c7f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c @@ -142,15 +142,20 @@ static int adjust_systime(void __iomem *ioaddr, u32 sec, u32 nsec, static void get_systime(void __iomem *ioaddr, u64 *systime) { - u64 ns; - - /* Get the TSSS value */ - ns = readl(ioaddr + PTP_STNSR); - /* Get the TSS and convert sec time value to nanosecond */ - ns += readl(ioaddr + PTP_STSR) * 1000000000ULL; + u64 ns, sec0, sec1; + + /* Get the TSS value */ + sec1 = readl_relaxed(ioaddr + PTP_STSR); + do { + sec0 = sec1; + /* Get the TSSS value */ + ns = readl_relaxed(ioaddr + PTP_STNSR); + /* Get the TSS value */ + sec1 = readl_relaxed(ioaddr + PTP_STSR); + } while (sec0 != sec1); if (systime) - *systime = ns; + *systime = ns + (sec1 * 1000000000ULL); } const struct stmmac_hwtimestamp stmmac_ptp = { diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index a8c5492cb39bef3e7f818b4a33f489c72a392bb6..a46c32257de42ba1e18c693a09999437a51d9c1c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -816,8 +816,6 @@ static int stmmac_init_ptp(struct stmmac_priv *priv) priv->hwts_tx_en = 0; priv->hwts_rx_en = 0; - stmmac_ptp_register(priv); - return 0; } @@ -2691,7 +2689,7 @@ static void stmmac_safety_feat_configuration(struct stmmac_priv *priv) /** * stmmac_hw_setup - setup mac in a usable state. * @dev : pointer to the device structure. - * @init_ptp: initialize PTP if set + * @ptp_register: register PTP if set * Description: * this is the main function to setup the HW in a usable state because the * dma engine is reset, the core registers are configured (e.g. AXI, @@ -2701,7 +2699,7 @@ static void stmmac_safety_feat_configuration(struct stmmac_priv *priv) * 0 on success and an appropriate (-)ve integer as defined in errno.h * file on failure. */ -static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) +static int stmmac_hw_setup(struct net_device *dev, bool ptp_register) { struct stmmac_priv *priv = netdev_priv(dev); u32 rx_cnt = priv->plat->rx_queues_to_use; @@ -2757,13 +2755,13 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) stmmac_mmc_setup(priv); - if (init_ptp) { - ret = stmmac_init_ptp(priv); - if (ret == -EOPNOTSUPP) - netdev_warn(priv->dev, "PTP not supported by HW\n"); - else if (ret) - netdev_warn(priv->dev, "PTP init failed\n"); - } + ret = stmmac_init_ptp(priv); + if (ret == -EOPNOTSUPP) + netdev_warn(priv->dev, "PTP not supported by HW\n"); + else if (ret) + netdev_warn(priv->dev, "PTP init failed\n"); + else if (ptp_register) + stmmac_ptp_register(priv); priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS; @@ -5430,7 +5428,7 @@ static int __init stmmac_cmdline_opt(char *str) char *opt; if (!str || !*str) - return -EINVAL; + return 1; while ((opt = strsep(&str, ",")) != NULL) { if (!strncmp(opt, "debug:", 6)) { if (kstrtoint(opt + 6, 0, &debug)) @@ -5461,11 +5459,11 @@ static int __init stmmac_cmdline_opt(char *str) goto err; } } - return 0; + return 1; err: pr_err("%s: ERROR broken module parameter conversion", __func__); - return -EINVAL; + return 1; } __setup("stmmaceth=", stmmac_cmdline_opt); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 3183d8826981e14cc54289be667a3d5c036d544b..b40b962055fa5b3be835e0c6b5910ff06c98881f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -432,8 +432,7 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac) plat->phylink_node = np; /* Get max speed of operation from device tree */ - if (of_property_read_u32(np, "max-speed", &plat->max_speed)) - plat->max_speed = -1; + of_property_read_u32(np, "max-speed", &plat->max_speed); plat->bus_id = of_alias_get_id(np, "ethernet"); if (plat->bus_id < 0) diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c index 54b53dbdb33cdbfa11e8f51b3358dc756de36871..69fc47089e625b4ab9077e4f6433f452f4b53dd2 100644 --- a/drivers/net/ethernet/sun/sunhme.c +++ b/drivers/net/ethernet/sun/sunhme.c @@ -3163,7 +3163,7 @@ static int happy_meal_pci_probe(struct pci_dev *pdev, if (err) { printk(KERN_ERR "happymeal(PCI): Cannot register net device, " "aborting.\n"); - goto err_out_iounmap; + goto err_out_free_coherent; } pci_set_drvdata(pdev, hp); @@ -3196,6 +3196,10 @@ static int happy_meal_pci_probe(struct pci_dev *pdev, return 0; +err_out_free_coherent: + dma_free_coherent(hp->dma_dev, PAGE_SIZE, + hp->happy_block, hp->hblock_dvma); + err_out_iounmap: iounmap(hp->gregs); diff --git a/drivers/net/ethernet/ti/cpsw_priv.c b/drivers/net/ethernet/ti/cpsw_priv.c index 424e644724e4608acfe6f2f631ac71e5b7d22f57..e74f2e95a46eb8ab8f01384f0718ebbc4b7a6504 100644 --- a/drivers/net/ethernet/ti/cpsw_priv.c +++ b/drivers/net/ethernet/ti/cpsw_priv.c @@ -1144,7 +1144,7 @@ int cpsw_fill_rx_channels(struct cpsw_priv *priv) static struct page_pool *cpsw_create_page_pool(struct cpsw_common *cpsw, int size) { - struct page_pool_params pp_params; + struct page_pool_params pp_params = {}; struct page_pool *pool; pp_params.order = 0; diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c index 43222a34cba069b9bc10750cbd9a4bcdfc8b6228..f9514518700ebb7f70009b46583f66af968ec417 100644 --- a/drivers/net/ethernet/ti/cpts.c +++ b/drivers/net/ethernet/ti/cpts.c @@ -568,7 +568,9 @@ int cpts_register(struct cpts *cpts) for (i = 0; i < CPTS_MAX_EVENTS; i++) list_add(&cpts->pool_data[i].list, &cpts->pool); - clk_enable(cpts->refclk); + err = clk_enable(cpts->refclk); + if (err) + return err; cpts_write32(cpts, CPTS_EN, control); cpts_write32(cpts, TS_PEND_EN, int_enable); diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c index 52f184500dd6f9a51a43b9aabf30d78e459e6b0e..f3d1814818c720e24c3ee269f02209b1c0a7c89b 100644 --- a/drivers/net/ethernet/xilinx/ll_temac_main.c +++ b/drivers/net/ethernet/xilinx/ll_temac_main.c @@ -1433,6 +1433,8 @@ static int temac_probe(struct platform_device *pdev) lp->indirect_lock = devm_kmalloc(&pdev->dev, sizeof(*lp->indirect_lock), GFP_KERNEL); + if (!lp->indirect_lock) + return -ENOMEM; spin_lock_init(lp->indirect_lock); } diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index 89ef84402187ed6130876b6284a22e497a9181df..ef20184277ff7b1001379de51d60d9a83e882471 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -41,8 +41,9 @@ #include "xilinx_axienet.h" /* Descriptors defines for Tx and Rx DMA */ -#define TX_BD_NUM_DEFAULT 64 +#define TX_BD_NUM_DEFAULT 128 #define RX_BD_NUM_DEFAULT 1024 +#define TX_BD_NUM_MIN (MAX_SKB_FRAGS + 1) #define TX_BD_NUM_MAX 4096 #define RX_BD_NUM_MAX 4096 @@ -496,7 +497,8 @@ static void axienet_setoptions(struct net_device *ndev, u32 options) static int __axienet_device_reset(struct axienet_local *lp) { - u32 timeout; + u32 value; + int ret; /* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset * process of Axi DMA takes a while to complete as all pending @@ -506,15 +508,23 @@ static int __axienet_device_reset(struct axienet_local *lp) * they both reset the entire DMA core, so only one needs to be used. */ axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, XAXIDMA_CR_RESET_MASK); - timeout = DELAY_OF_ONE_MILLISEC; - while (axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET) & - XAXIDMA_CR_RESET_MASK) { - udelay(1); - if (--timeout == 0) { - netdev_err(lp->ndev, "%s: DMA reset timeout!\n", - __func__); - return -ETIMEDOUT; - } + ret = read_poll_timeout(axienet_dma_in32, value, + !(value & XAXIDMA_CR_RESET_MASK), + DELAY_OF_ONE_MILLISEC, 50000, false, lp, + XAXIDMA_TX_CR_OFFSET); + if (ret) { + dev_err(lp->dev, "%s: DMA reset timeout!\n", __func__); + return ret; + } + + /* Wait for PhyRstCmplt bit to be set, indicating the PHY reset has finished */ + ret = read_poll_timeout(axienet_ior, value, + value & XAE_INT_PHYRSTCMPLT_MASK, + DELAY_OF_ONE_MILLISEC, 50000, false, lp, + XAE_IS_OFFSET); + if (ret) { + dev_err(lp->dev, "%s: timeout waiting for PhyRstCmplt\n", __func__); + return ret; } return 0; @@ -623,6 +633,8 @@ static int axienet_free_tx_chain(struct net_device *ndev, u32 first_bd, if (nr_bds == -1 && !(status & XAXIDMA_BD_STS_COMPLETE_MASK)) break; + /* Ensure we see complete descriptor update */ + dma_rmb(); phys = desc_get_phys_addr(lp, cur_p); dma_unmap_single(ndev->dev.parent, phys, (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK), @@ -631,13 +643,15 @@ static int axienet_free_tx_chain(struct net_device *ndev, u32 first_bd, if (cur_p->skb && (status & XAXIDMA_BD_STS_COMPLETE_MASK)) dev_consume_skb_irq(cur_p->skb); - cur_p->cntrl = 0; cur_p->app0 = 0; cur_p->app1 = 0; cur_p->app2 = 0; cur_p->app4 = 0; - cur_p->status = 0; cur_p->skb = NULL; + /* ensure our transmit path and device don't prematurely see status cleared */ + wmb(); + cur_p->cntrl = 0; + cur_p->status = 0; if (sizep) *sizep += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK; @@ -646,6 +660,32 @@ static int axienet_free_tx_chain(struct net_device *ndev, u32 first_bd, return i; } +/** + * axienet_check_tx_bd_space - Checks if a BD/group of BDs are currently busy + * @lp: Pointer to the axienet_local structure + * @num_frag: The number of BDs to check for + * + * Return: 0, on success + * NETDEV_TX_BUSY, if any of the descriptors are not free + * + * This function is invoked before BDs are allocated and transmission starts. + * This function returns 0 if a BD or group of BDs can be allocated for + * transmission. If the BD or any of the BDs are not free the function + * returns a busy status. This is invoked from axienet_start_xmit. + */ +static inline int axienet_check_tx_bd_space(struct axienet_local *lp, + int num_frag) +{ + struct axidma_bd *cur_p; + + /* Ensure we see all descriptor updates from device or TX IRQ path */ + rmb(); + cur_p = &lp->tx_bd_v[(lp->tx_bd_tail + num_frag) % lp->tx_bd_num]; + if (cur_p->cntrl) + return NETDEV_TX_BUSY; + return 0; +} + /** * axienet_start_xmit_done - Invoked once a transmit is completed by the * Axi DMA Tx channel. @@ -675,30 +715,8 @@ static void axienet_start_xmit_done(struct net_device *ndev) /* Matches barrier in axienet_start_xmit */ smp_mb(); - netif_wake_queue(ndev); -} - -/** - * axienet_check_tx_bd_space - Checks if a BD/group of BDs are currently busy - * @lp: Pointer to the axienet_local structure - * @num_frag: The number of BDs to check for - * - * Return: 0, on success - * NETDEV_TX_BUSY, if any of the descriptors are not free - * - * This function is invoked before BDs are allocated and transmission starts. - * This function returns 0 if a BD or group of BDs can be allocated for - * transmission. If the BD or any of the BDs are not free the function - * returns a busy status. This is invoked from axienet_start_xmit. - */ -static inline int axienet_check_tx_bd_space(struct axienet_local *lp, - int num_frag) -{ - struct axidma_bd *cur_p; - cur_p = &lp->tx_bd_v[(lp->tx_bd_tail + num_frag) % lp->tx_bd_num]; - if (cur_p->status & XAXIDMA_BD_STS_ALL_MASK) - return NETDEV_TX_BUSY; - return 0; + if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) + netif_wake_queue(ndev); } /** @@ -730,20 +748,15 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev) num_frag = skb_shinfo(skb)->nr_frags; cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; - if (axienet_check_tx_bd_space(lp, num_frag)) { - if (netif_queue_stopped(ndev)) - return NETDEV_TX_BUSY; - + if (axienet_check_tx_bd_space(lp, num_frag + 1)) { + /* Should not happen as last start_xmit call should have + * checked for sufficient space and queue should only be + * woken when sufficient space is available. + */ netif_stop_queue(ndev); - - /* Matches barrier in axienet_start_xmit_done */ - smp_mb(); - - /* Space might have just been freed - check again */ - if (axienet_check_tx_bd_space(lp, num_frag)) - return NETDEV_TX_BUSY; - - netif_wake_queue(ndev); + if (net_ratelimit()) + netdev_warn(ndev, "TX ring unexpectedly full\n"); + return NETDEV_TX_BUSY; } if (skb->ip_summed == CHECKSUM_PARTIAL) { @@ -804,6 +817,18 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev) if (++lp->tx_bd_tail >= lp->tx_bd_num) lp->tx_bd_tail = 0; + /* Stop queue if next transmit may not have space */ + if (axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) { + netif_stop_queue(ndev); + + /* Matches barrier in axienet_start_xmit_done */ + smp_mb(); + + /* Space might have just been freed - check again */ + if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) + netif_wake_queue(ndev); + } + return NETDEV_TX_OK; } @@ -832,44 +857,53 @@ static void axienet_recv(struct net_device *ndev) while ((cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) { dma_addr_t phys; - tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci; - - phys = desc_get_phys_addr(lp, cur_p); - dma_unmap_single(ndev->dev.parent, phys, lp->max_frm_size, - DMA_FROM_DEVICE); + /* Ensure we see complete descriptor update */ + dma_rmb(); skb = cur_p->skb; cur_p->skb = NULL; - length = cur_p->app4 & 0x0000FFFF; - - skb_put(skb, length); - skb->protocol = eth_type_trans(skb, ndev); - /*skb_checksum_none_assert(skb);*/ - skb->ip_summed = CHECKSUM_NONE; - - /* if we're doing Rx csum offload, set it up */ - if (lp->features & XAE_FEATURE_FULL_RX_CSUM) { - csumstatus = (cur_p->app2 & - XAE_FULL_CSUM_STATUS_MASK) >> 3; - if ((csumstatus == XAE_IP_TCP_CSUM_VALIDATED) || - (csumstatus == XAE_IP_UDP_CSUM_VALIDATED)) { - skb->ip_summed = CHECKSUM_UNNECESSARY; + + /* skb could be NULL if a previous pass already received the + * packet for this slot in the ring, but failed to refill it + * with a newly allocated buffer. In this case, don't try to + * receive it again. + */ + if (likely(skb)) { + length = cur_p->app4 & 0x0000FFFF; + + phys = desc_get_phys_addr(lp, cur_p); + dma_unmap_single(ndev->dev.parent, phys, lp->max_frm_size, + DMA_FROM_DEVICE); + + skb_put(skb, length); + skb->protocol = eth_type_trans(skb, ndev); + /*skb_checksum_none_assert(skb);*/ + skb->ip_summed = CHECKSUM_NONE; + + /* if we're doing Rx csum offload, set it up */ + if (lp->features & XAE_FEATURE_FULL_RX_CSUM) { + csumstatus = (cur_p->app2 & + XAE_FULL_CSUM_STATUS_MASK) >> 3; + if (csumstatus == XAE_IP_TCP_CSUM_VALIDATED || + csumstatus == XAE_IP_UDP_CSUM_VALIDATED) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + } + } else if ((lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) != 0 && + skb->protocol == htons(ETH_P_IP) && + skb->len > 64) { + skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF); + skb->ip_summed = CHECKSUM_COMPLETE; } - } else if ((lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) != 0 && - skb->protocol == htons(ETH_P_IP) && - skb->len > 64) { - skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF); - skb->ip_summed = CHECKSUM_COMPLETE; - } - netif_rx(skb); + netif_rx(skb); - size += length; - packets++; + size += length; + packets++; + } new_skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size); if (!new_skb) - return; + break; phys = dma_map_single(ndev->dev.parent, new_skb->data, lp->max_frm_size, @@ -878,7 +912,7 @@ static void axienet_recv(struct net_device *ndev) if (net_ratelimit()) netdev_err(ndev, "RX DMA mapping error\n"); dev_kfree_skb(new_skb); - return; + break; } desc_set_phys_addr(lp, phys, cur_p); @@ -886,6 +920,11 @@ static void axienet_recv(struct net_device *ndev) cur_p->status = 0; cur_p->skb = new_skb; + /* Only update tail_p to mark this slot as usable after it has + * been successfully refilled. + */ + tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci; + if (++lp->rx_bd_ci >= lp->rx_bd_num) lp->rx_bd_ci = 0; cur_p = &lp->rx_bd_v[lp->rx_bd_ci]; @@ -1361,7 +1400,8 @@ axienet_ethtools_set_ringparam(struct net_device *ndev, if (ering->rx_pending > RX_BD_NUM_MAX || ering->rx_mini_pending || ering->rx_jumbo_pending || - ering->rx_pending > TX_BD_NUM_MAX) + ering->tx_pending < TX_BD_NUM_MIN || + ering->tx_pending > TX_BD_NUM_MAX) return -EINVAL; if (netif_running(ndev)) @@ -2031,15 +2071,19 @@ static int axienet_probe(struct platform_device *pdev) lp->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD; lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD; - lp->phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); - if (lp->phy_node) { - ret = axienet_mdio_setup(lp); - if (ret) - dev_warn(&pdev->dev, - "error registering MDIO bus: %d\n", ret); - } + /* Reset core now that clocks are enabled, prior to accessing MDIO */ + ret = __axienet_device_reset(lp); + if (ret) + goto cleanup_clk; + + ret = axienet_mdio_setup(lp); + if (ret) + dev_warn(&pdev->dev, + "error registering MDIO bus: %d\n", ret); + if (lp->phy_mode == PHY_INTERFACE_MODE_SGMII || lp->phy_mode == PHY_INTERFACE_MODE_1000BASEX) { + lp->phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); if (!lp->phy_node) { dev_err(&pdev->dev, "phy-handle required for 1000BaseX/SGMII\n"); ret = -EINVAL; diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c index 962831cdde4db0860673e24d51f2305fda0cf333..4bd44fbc6ecfa92030021750b0637555d255e74b 100644 --- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c +++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c @@ -1187,7 +1187,7 @@ static int xemaclite_of_probe(struct platform_device *ofdev) if (rc) { dev_err(dev, "Cannot register network device, aborting\n"); - goto error; + goto put_node; } dev_info(dev, @@ -1195,6 +1195,8 @@ static int xemaclite_of_probe(struct platform_device *ofdev) (unsigned int __force)ndev->mem_start, lp->base_addr, ndev->irq); return 0; +put_node: + of_node_put(lp->phy_node); error: free_netdev(ndev); return rc; diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c index bd0beb16d68a9f0a1644715616a522a976ee5de5..83dc1c2c3b84bf5fd739795fef0d35f171380953 100644 --- a/drivers/net/hamradio/6pack.c +++ b/drivers/net/hamradio/6pack.c @@ -311,7 +311,6 @@ static void sp_setup(struct net_device *dev) { /* Finish setting up the DEVICE info. */ dev->netdev_ops = &sp_netdev_ops; - dev->needs_free_netdev = true; dev->mtu = SIXP_MTU; dev->hard_header_len = AX25_MAX_HEADER_LEN; dev->header_ops = &ax25_header_ops; @@ -674,14 +673,16 @@ static void sixpack_close(struct tty_struct *tty) */ netif_stop_queue(sp->dev); + unregister_netdev(sp->dev); + del_timer_sync(&sp->tx_t); del_timer_sync(&sp->resync_t); - /* Free all 6pack frame buffers. */ + /* Free all 6pack frame buffers after unreg. */ kfree(sp->rbuff); kfree(sp->xbuff); - unregister_netdev(sp->dev); + free_netdev(sp->dev); } /* Perform I/O control on an active 6pack channel. */ diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c index 63502a85a97514b3d64598e66dee4947bf6dc0e4..049264a7d9611b5697741b67e8ea5f10dd6c535e 100644 --- a/drivers/net/hamradio/mkiss.c +++ b/drivers/net/hamradio/mkiss.c @@ -31,6 +31,8 @@ #define AX_MTU 236 +/* some arch define END as assembly function ending, just undef it */ +#undef END /* SLIP/KISS protocol characters. */ #define END 0300 /* indicates end of frame */ #define ESC 0333 /* indicates byte stuffing */ diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index c042e22aa7049e98c41cc1a70dd05fbf82eced63..554e1863aab9945562d22aa8afe38f94c8cee33d 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -1562,6 +1562,9 @@ static void netvsc_get_ethtool_stats(struct net_device *dev, pcpu_sum = kvmalloc_array(num_possible_cpus(), sizeof(struct netvsc_ethtool_pcpu_stats), GFP_KERNEL); + if (!pcpu_sum) + return; + netvsc_get_pcpu_stats(dev, pcpu_sum); for_each_present_cpu(cpu) { struct netvsc_ethtool_pcpu_stats *this_sum = &pcpu_sum[cpu]; diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c index 7d67f41387f5577959839f902128b1777b4b51ee..4f5ef8a9a9a8796db21f270bbb707eb5204e10f1 100644 --- a/drivers/net/ieee802154/at86rf230.c +++ b/drivers/net/ieee802154/at86rf230.c @@ -100,6 +100,7 @@ struct at86rf230_local { unsigned long cal_timeout; bool is_tx; bool is_tx_from_off; + bool was_tx; u8 tx_retry; struct sk_buff *tx_skb; struct at86rf230_state_change tx; @@ -343,7 +344,11 @@ at86rf230_async_error_recover_complete(void *context) if (ctx->free) kfree(ctx); - ieee802154_wake_queue(lp->hw); + if (lp->was_tx) { + lp->was_tx = 0; + dev_kfree_skb_any(lp->tx_skb); + ieee802154_wake_queue(lp->hw); + } } static void @@ -352,7 +357,11 @@ at86rf230_async_error_recover(void *context) struct at86rf230_state_change *ctx = context; struct at86rf230_local *lp = ctx->lp; - lp->is_tx = 0; + if (lp->is_tx) { + lp->was_tx = 1; + lp->is_tx = 0; + } + at86rf230_async_state_change(lp, ctx, STATE_RX_AACK_ON, at86rf230_async_error_recover_complete); } diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c index 4eb64709d44cb09865d23d9e34ea5d53a9cfe6b4..fd9f33c833fa367afd5a5a69e8c165f37ab3d70f 100644 --- a/drivers/net/ieee802154/ca8210.c +++ b/drivers/net/ieee802154/ca8210.c @@ -1771,6 +1771,7 @@ static int ca8210_async_xmit_complete( status ); if (status != MAC_TRANSACTION_OVERFLOW) { + dev_kfree_skb_any(priv->tx_skb); ieee802154_wake_queue(priv->hw); return 0; } @@ -2976,8 +2977,8 @@ static void ca8210_hw_setup(struct ieee802154_hw *ca8210_hw) ca8210_hw->phy->cca.opt = NL802154_CCA_OPT_ENERGY_CARRIER_AND; ca8210_hw->phy->cca_ed_level = -9800; ca8210_hw->phy->symbol_duration = 16; - ca8210_hw->phy->lifs_period = 40; - ca8210_hw->phy->sifs_period = 12; + ca8210_hw->phy->lifs_period = 40 * ca8210_hw->phy->symbol_duration; + ca8210_hw->phy->sifs_period = 12 * ca8210_hw->phy->symbol_duration; ca8210_hw->flags = IEEE802154_HW_AFILT | IEEE802154_HW_OMIT_CKSUM | diff --git a/drivers/net/ieee802154/mac802154_hwsim.c b/drivers/net/ieee802154/mac802154_hwsim.c index 080b15fc00601ef832c5c1090c8872c211037e4c..97981cf7661ad759d901ff667174fe63bfea0a1f 100644 --- a/drivers/net/ieee802154/mac802154_hwsim.c +++ b/drivers/net/ieee802154/mac802154_hwsim.c @@ -786,6 +786,7 @@ static int hwsim_add_one(struct genl_info *info, struct device *dev, goto err_pib; } + pib->channel = 13; rcu_assign_pointer(phy->pib, pib); phy->idx = idx; INIT_LIST_HEAD(&phy->edges); diff --git a/drivers/net/ieee802154/mcr20a.c b/drivers/net/ieee802154/mcr20a.c index 8dc04e2590b18b1b41239c4ebf8ac1382942a8d5..383231b85464252a9dfc6a20f938247e5b546959 100644 --- a/drivers/net/ieee802154/mcr20a.c +++ b/drivers/net/ieee802154/mcr20a.c @@ -976,8 +976,8 @@ static void mcr20a_hw_setup(struct mcr20a_local *lp) dev_dbg(printdev(lp), "%s\n", __func__); phy->symbol_duration = 16; - phy->lifs_period = 40; - phy->sifs_period = 12; + phy->lifs_period = 40 * phy->symbol_duration; + phy->sifs_period = 12 * phy->symbol_duration; hw->flags = IEEE802154_HW_TX_OMIT_CKSUM | IEEE802154_HW_AFILT | diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c index a37aae00e128fed63dc334232a253c7b2668ad41..621648ce750b7efd60c328152d27cef3f90c0f5c 100644 --- a/drivers/net/ipa/ipa_endpoint.c +++ b/drivers/net/ipa/ipa_endpoint.c @@ -901,27 +901,35 @@ static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint, u32 count) struct gsi *gsi; u32 backlog; - if (!endpoint->replenish_enabled) { + if (!test_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags)) { if (count) atomic_add(count, &endpoint->replenish_saved); return; } + /* If already active, just update the backlog */ + if (test_and_set_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags)) { + if (count) + atomic_add(count, &endpoint->replenish_backlog); + return; + } while (atomic_dec_not_zero(&endpoint->replenish_backlog)) if (ipa_endpoint_replenish_one(endpoint)) goto try_again_later; + + clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags); + if (count) atomic_add(count, &endpoint->replenish_backlog); return; try_again_later: - /* The last one didn't succeed, so fix the backlog */ - backlog = atomic_inc_return(&endpoint->replenish_backlog); + clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags); - if (count) - atomic_add(count, &endpoint->replenish_backlog); + /* The last one didn't succeed, so fix the backlog */ + backlog = atomic_add_return(count + 1, &endpoint->replenish_backlog); /* Whenever a receive buffer transaction completes we'll try to * replenish again. It's unlikely, but if we fail to supply even @@ -941,7 +949,7 @@ static void ipa_endpoint_replenish_enable(struct ipa_endpoint *endpoint) u32 max_backlog; u32 saved; - endpoint->replenish_enabled = true; + set_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags); while ((saved = atomic_xchg(&endpoint->replenish_saved, 0))) atomic_add(saved, &endpoint->replenish_backlog); @@ -955,7 +963,7 @@ static void ipa_endpoint_replenish_disable(struct ipa_endpoint *endpoint) { u32 backlog; - endpoint->replenish_enabled = false; + clear_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags); while ((backlog = atomic_xchg(&endpoint->replenish_backlog, 0))) atomic_add(backlog, &endpoint->replenish_saved); } @@ -1472,7 +1480,8 @@ static void ipa_endpoint_setup_one(struct ipa_endpoint *endpoint) /* RX transactions require a single TRE, so the maximum * backlog is the same as the maximum outstanding TREs. */ - endpoint->replenish_enabled = false; + clear_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags); + clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags); atomic_set(&endpoint->replenish_saved, gsi_channel_tre_max(gsi, endpoint->channel_id)); atomic_set(&endpoint->replenish_backlog, 0); diff --git a/drivers/net/ipa/ipa_endpoint.h b/drivers/net/ipa/ipa_endpoint.h index 58a245de488e86298f2bf5eb515032cf3f00a6e9..823c4a1296587c225f65239e624fe1911b707a52 100644 --- a/drivers/net/ipa/ipa_endpoint.h +++ b/drivers/net/ipa/ipa_endpoint.h @@ -39,6 +39,19 @@ enum ipa_endpoint_name { #define IPA_ENDPOINT_MAX 32 /* Max supported by driver */ +/** + * enum ipa_replenish_flag: RX buffer replenish flags + * + * @IPA_REPLENISH_ENABLED: Whether receive buffer replenishing is enabled + * @IPA_REPLENISH_ACTIVE: Whether replenishing is underway + * @IPA_REPLENISH_COUNT: Number of defined replenish flags + */ +enum ipa_replenish_flag { + IPA_REPLENISH_ENABLED, + IPA_REPLENISH_ACTIVE, + IPA_REPLENISH_COUNT, /* Number of flags (must be last) */ +}; + /** * struct ipa_endpoint - IPA endpoint information * @channel_id: EP's GSI channel @@ -60,7 +73,7 @@ struct ipa_endpoint { struct net_device *netdev; /* Receive buffer replenishing for RX endpoints */ - bool replenish_enabled; + DECLARE_BITMAP(replenish_flags, IPA_REPLENISH_COUNT); u32 replenish_ready; atomic_t replenish_saved; atomic_t replenish_backlog; diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c index 779a84f70914e21245a1c7f74bf70eaef098e439..4e0fa42c18d035cbfa7f56061845a50d8abec0da 100644 --- a/drivers/net/ipvlan/ipvlan_main.c +++ b/drivers/net/ipvlan/ipvlan_main.c @@ -14,7 +14,7 @@ int sysctl_ipvlan_loop_qlen = 131072; int sysctl_ipvlan_loop_delay = 10; static int ipvlan_default_mode = IPVLAN_MODE_L3; module_param(ipvlan_default_mode, int, 0400); -MODULE_PARM_DESC(ipvlan_default_mode, "set ipvlan default mode: 0 for l2, 1 for l3, 2 for l3s, 3 for l2e, others invalid now"); +MODULE_PARM_DESC(ipvlan_default_mode, "set ipvlan default mode: 0 for l2, 1 for l3, 2 for l2e, 3 for l3s, others invalid now"); static struct ctl_table_header *ipvlan_table_hrd; static struct ctl_table ipvlan_table[] = { diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index c601d3df272202bd52d23888c59312cbf2e87b4e..789a124809e3c2b118bb376b9b011eabfabf64b1 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c @@ -3869,6 +3869,18 @@ static void macsec_common_dellink(struct net_device *dev, struct list_head *head struct macsec_dev *macsec = macsec_priv(dev); struct net_device *real_dev = macsec->real_dev; + /* If h/w offloading is available, propagate to the device */ + if (macsec_is_offloaded(macsec)) { + const struct macsec_ops *ops; + struct macsec_context ctx; + + ops = macsec_get_ops(netdev_priv(dev), &ctx); + if (ops) { + ctx.secy = &macsec->secy; + macsec_offload(ops->mdo_del_secy, &ctx); + } + } + unregister_netdevice_queue(dev, head); list_del_rcu(&macsec->secys); macsec_del_dev(macsec); @@ -3883,18 +3895,6 @@ static void macsec_dellink(struct net_device *dev, struct list_head *head) struct net_device *real_dev = macsec->real_dev; struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev); - /* If h/w offloading is available, propagate to the device */ - if (macsec_is_offloaded(macsec)) { - const struct macsec_ops *ops; - struct macsec_context ctx; - - ops = macsec_get_ops(netdev_priv(dev), &ctx); - if (ops) { - ctx.secy = &macsec->secy; - macsec_offload(ops->mdo_del_secy, &ctx); - } - } - macsec_common_dellink(dev, head); if (list_empty(&rxd->secys)) { @@ -4017,6 +4017,15 @@ static int macsec_newlink(struct net *net, struct net_device *dev, !macsec_check_offload(macsec->offload, macsec)) return -EOPNOTSUPP; + /* send_sci must be set to true when transmit sci explicitly is set */ + if ((data && data[IFLA_MACSEC_SCI]) && + (data && data[IFLA_MACSEC_INC_SCI])) { + u8 send_sci = !!nla_get_u8(data[IFLA_MACSEC_INC_SCI]); + + if (!send_sci) + return -EINVAL; + } + if (data && data[IFLA_MACSEC_ICV_LEN]) icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]); mtu = real_dev->mtu - icv_len - macsec_extra_len(true); diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index 694e2f5dbbe591c45c80913750231c0986c16854..39801c31e5071f8860075ef14ebfd5022ea66f2f 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c @@ -133,11 +133,17 @@ static void macvtap_setup(struct net_device *dev) dev->tx_queue_len = TUN_READQ_SIZE; } +static struct net *macvtap_link_net(const struct net_device *dev) +{ + return dev_net(macvlan_dev_real_dev(dev)); +} + static struct rtnl_link_ops macvtap_link_ops __read_mostly = { .kind = "macvtap", .setup = macvtap_setup, .newlink = macvtap_newlink, .dellink = macvtap_dellink, + .get_link_net = macvtap_link_net, .priv_size = sizeof(struct macvtap_dev), }; diff --git a/drivers/net/mdio/mdio-aspeed.c b/drivers/net/mdio/mdio-aspeed.c index 966c3b4ad59d14e74bc113680e0fbff231d296d2..e2273588c75b6f3dd212269611d7f41a8769ed16 100644 --- a/drivers/net/mdio/mdio-aspeed.c +++ b/drivers/net/mdio/mdio-aspeed.c @@ -148,6 +148,7 @@ static const struct of_device_id aspeed_mdio_of_match[] = { { .compatible = "aspeed,ast2600-mdio", }, { }, }; +MODULE_DEVICE_TABLE(of, aspeed_mdio_of_match); static struct platform_driver aspeed_mdio_driver = { .driver = { diff --git a/drivers/net/mdio/mdio-bcm-unimac.c b/drivers/net/mdio/mdio-bcm-unimac.c index fbd36891ee643cd0ae8cc53c44f2e940e6365c7c..5d171e7f118df40c4b08303f5c18deedc6e623dd 100644 --- a/drivers/net/mdio/mdio-bcm-unimac.c +++ b/drivers/net/mdio/mdio-bcm-unimac.c @@ -5,20 +5,18 @@ * Copyright (C) 2014-2017 Broadcom */ +#include +#include +#include #include -#include -#include -#include #include -#include -#include -#include - #include -#include #include - +#include +#include #include +#include +#include #define MDIO_CMD 0x00 #define MDIO_START_BUSY (1 << 29) diff --git a/drivers/net/mdio/mdio-bitbang.c b/drivers/net/mdio/mdio-bitbang.c index 5136275c8e7399fbbd74d048abb89f50929e5af3..99588192cc78f3d1f068b936157ef03f8dcd0656 100644 --- a/drivers/net/mdio/mdio-bitbang.c +++ b/drivers/net/mdio/mdio-bitbang.c @@ -14,10 +14,10 @@ * Vitaly Bordug */ -#include +#include #include +#include #include -#include #define MDIO_READ 2 #define MDIO_WRITE 1 diff --git a/drivers/net/mdio/mdio-cavium.c b/drivers/net/mdio/mdio-cavium.c index 1afd6fc1a351708fb18c8a0991ab64d5d687b47c..95ce274c1be143c30100040d523b389ab0b18122 100644 --- a/drivers/net/mdio/mdio-cavium.c +++ b/drivers/net/mdio/mdio-cavium.c @@ -4,9 +4,9 @@ */ #include +#include #include #include -#include #include "mdio-cavium.h" diff --git a/drivers/net/mdio/mdio-gpio.c b/drivers/net/mdio/mdio-gpio.c index 1b00235d7dc5b56c76408edfeb8fba54cf948d15..56c8f914f89304b5426b7d73c6369c4b93a634b6 100644 --- a/drivers/net/mdio/mdio-gpio.c +++ b/drivers/net/mdio/mdio-gpio.c @@ -17,15 +17,15 @@ * Vitaly Bordug */ -#include -#include +#include #include -#include -#include #include #include -#include +#include #include +#include +#include +#include struct mdio_gpio_info { struct mdiobb_ctrl ctrl; diff --git a/drivers/net/mdio/mdio-ipq4019.c b/drivers/net/mdio/mdio-ipq4019.c index 25c25ea6da66fb01ac9c2093b5ec26c0ad3c3984..9cd71d896963d7b48320c940231868ef44ada34b 100644 --- a/drivers/net/mdio/mdio-ipq4019.c +++ b/drivers/net/mdio/mdio-ipq4019.c @@ -3,10 +3,10 @@ /* Copyright (c) 2020 Sartura Ltd. */ #include -#include -#include #include #include +#include +#include #include #include #include diff --git a/drivers/net/mdio/mdio-ipq8064.c b/drivers/net/mdio/mdio-ipq8064.c index f0a6bfa61645e1f98d2a9ecddfb430c6f9ed79a0..49d4e9aa30bbf069ee1ae76908df05f80d56aa41 100644 --- a/drivers/net/mdio/mdio-ipq8064.c +++ b/drivers/net/mdio/mdio-ipq8064.c @@ -7,12 +7,12 @@ #include #include +#include #include -#include #include #include #include -#include +#include /* MII address register definitions */ #define MII_ADDR_REG_ADDR 0x10 diff --git a/drivers/net/mdio/mdio-mscc-miim.c b/drivers/net/mdio/mdio-mscc-miim.c index 11f583fd4611fe314042b8f638bf763bf6374b2a..037649bef92ea5b296b8be49823198ea88fb3345 100644 --- a/drivers/net/mdio/mdio-mscc-miim.c +++ b/drivers/net/mdio/mdio-mscc-miim.c @@ -6,14 +6,14 @@ * Copyright (c) 2017 Microsemi Corporation */ -#include -#include -#include -#include #include #include #include +#include +#include #include +#include +#include #define MSCC_MIIM_REG_STATUS 0x0 #define MSCC_MIIM_STATUS_STAT_PENDING BIT(2) @@ -76,6 +76,9 @@ static int mscc_miim_read(struct mii_bus *bus, int mii_id, int regnum) u32 val; int ret; + if (regnum & MII_ADDR_C45) + return -EOPNOTSUPP; + ret = mscc_miim_wait_pending(bus); if (ret) goto out; @@ -105,6 +108,9 @@ static int mscc_miim_write(struct mii_bus *bus, int mii_id, struct mscc_miim_dev *miim = bus->priv; int ret; + if (regnum & MII_ADDR_C45) + return -EOPNOTSUPP; + ret = mscc_miim_wait_pending(bus); if (ret < 0) goto out; diff --git a/drivers/net/mdio/mdio-mux-bcm-iproc.c b/drivers/net/mdio/mdio-mux-bcm-iproc.c index 42fb5f166136bd03909511ca2219334d4ffd6e47..641cfa41f492a4645c0c228ebdd320e6ee250afc 100644 --- a/drivers/net/mdio/mdio-mux-bcm-iproc.c +++ b/drivers/net/mdio/mdio-mux-bcm-iproc.c @@ -3,14 +3,14 @@ * Copyright 2016 Broadcom */ #include -#include +#include #include -#include +#include +#include #include +#include #include -#include -#include -#include +#include #define MDIO_RATE_ADJ_EXT_OFFSET 0x000 #define MDIO_RATE_ADJ_INT_OFFSET 0x004 diff --git a/drivers/net/mdio/mdio-mux-gpio.c b/drivers/net/mdio/mdio-mux-gpio.c index 10a758fdc9e63de28b0542fc95a71a30bc9f10ac..3c7f16f06b452b4143ece4f92df99f682783fe93 100644 --- a/drivers/net/mdio/mdio-mux-gpio.c +++ b/drivers/net/mdio/mdio-mux-gpio.c @@ -3,13 +3,13 @@ * Copyright (C) 2011, 2012 Cavium, Inc. */ -#include #include -#include +#include +#include #include +#include #include -#include -#include +#include #define DRV_VERSION "1.1" #define DRV_DESCRIPTION "GPIO controlled MDIO bus multiplexer driver" diff --git a/drivers/net/mdio/mdio-mux-mmioreg.c b/drivers/net/mdio/mdio-mux-mmioreg.c index d1a8780e24d881139bb62bca049c6b9b06e82b27..c02fb2a067eef22dc34cee77fa372aef72382d86 100644 --- a/drivers/net/mdio/mdio-mux-mmioreg.c +++ b/drivers/net/mdio/mdio-mux-mmioreg.c @@ -7,13 +7,13 @@ * Copyright 2012 Freescale Semiconductor, Inc. */ -#include #include +#include +#include #include #include -#include #include -#include +#include struct mdio_mux_mmioreg_state { void *mux_handle; diff --git a/drivers/net/mdio/mdio-mux-multiplexer.c b/drivers/net/mdio/mdio-mux-multiplexer.c index d6564381aa3e4f619d4c7bac8820a464f7bc31fa..527acfc3c045a94b613a97b3d4e727f76c6b4a82 100644 --- a/drivers/net/mdio/mdio-mux-multiplexer.c +++ b/drivers/net/mdio/mdio-mux-multiplexer.c @@ -4,10 +4,10 @@ * Copyright 2019 NXP */ -#include #include #include #include +#include struct mdio_mux_multiplexer_state { struct mux_control *muxc; diff --git a/drivers/net/mdio/mdio-mux.c b/drivers/net/mdio/mdio-mux.c index ccb3ee704eb1c76b2189b5f59d041755456c1a5d..3dde0c2b3e0977ce81ab1abe6cef60d49ee47b4a 100644 --- a/drivers/net/mdio/mdio-mux.c +++ b/drivers/net/mdio/mdio-mux.c @@ -3,12 +3,12 @@ * Copyright (C) 2011, 2012 Cavium, Inc. */ -#include -#include -#include #include +#include #include +#include #include +#include #define DRV_DESCRIPTION "MDIO bus multiplexer driver" diff --git a/drivers/net/mdio/mdio-octeon.c b/drivers/net/mdio/mdio-octeon.c index 6faf39314ac9366f919d49bb0e8854f481edd898..e096e68ac667b5a01e418913a9d09c4fb4e9f608 100644 --- a/drivers/net/mdio/mdio-octeon.c +++ b/drivers/net/mdio/mdio-octeon.c @@ -3,13 +3,13 @@ * Copyright (C) 2009-2015 Cavium, Inc. */ -#include +#include +#include +#include #include #include -#include -#include #include -#include +#include #include "mdio-cavium.h" diff --git a/drivers/net/mdio/mdio-thunder.c b/drivers/net/mdio/mdio-thunder.c index dd7430c998a2a5db4ec9ee9730e693891d7fd50b..822d2cdd2f3599025f3e79d4243337c18114c951 100644 --- a/drivers/net/mdio/mdio-thunder.c +++ b/drivers/net/mdio/mdio-thunder.c @@ -3,14 +3,14 @@ * Copyright (C) 2009-2016 Cavium, Inc. */ -#include -#include -#include +#include #include -#include #include -#include +#include +#include +#include #include +#include #include "mdio-cavium.h" diff --git a/drivers/net/mdio/mdio-xgene.c b/drivers/net/mdio/mdio-xgene.c index 461207cdf5d6e81d4c9edfbe1899ad3c0a07dde2..7ab4e26db08c265f9a1a2170a6405b15a31d051b 100644 --- a/drivers/net/mdio/mdio-xgene.c +++ b/drivers/net/mdio/mdio-xgene.c @@ -13,11 +13,11 @@ #include #include #include -#include -#include #include -#include +#include +#include #include +#include #include static bool xgene_mdio_status; diff --git a/drivers/net/mdio/of_mdio.c b/drivers/net/mdio/of_mdio.c index 4daf94bb56a53d222ecadbd9e2c7028d7073b4a5..ea0bf13e8ac3fb8ccc88162e186d51fd1cc2e8e5 100644 --- a/drivers/net/mdio/of_mdio.c +++ b/drivers/net/mdio/of_mdio.c @@ -8,17 +8,17 @@ * out of the OpenFirmware device tree and using it to populate an mii_bus. */ -#include #include -#include #include -#include -#include +#include +#include +#include #include #include #include #include -#include +#include +#include #define DEFAULT_GPIO_RESET_DELAY 10 /* in microseconds */ diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c index dbed15dc0fe777468c8e0a92514d53c6df9a2484..0cde17bd743f3cb3798f5c282cbd3e7fcae630e9 100644 --- a/drivers/net/phy/broadcom.c +++ b/drivers/net/phy/broadcom.c @@ -11,6 +11,7 @@ */ #include "bcm-phy-lib.h" +#include #include #include #include @@ -622,6 +623,26 @@ static int brcm_fet_config_init(struct phy_device *phydev) if (err < 0) return err; + /* The datasheet indicates the PHY needs up to 1us to complete a reset, + * build some slack here. + */ + usleep_range(1000, 2000); + + /* The PHY requires 65 MDC clock cycles to complete a write operation + * and turnaround the line properly. + * + * We ignore -EIO here as the MDIO controller (e.g.: mdio-bcm-unimac) + * may flag the lack of turn-around as a read failure. This is + * particularly true with this combination since the MDIO controller + * only used 64 MDC cycles. This is not a critical failure in this + * specific case and it has no functional impact otherwise, so we let + * that one go through. If there is a genuine bus error, the next read + * of MII_BRCM_FET_INTREG will error out. + */ + err = phy_read(phydev, MII_BMCR); + if (err < 0 && err != -EIO) + return err; + reg = phy_read(phydev, MII_BRCM_FET_INTREG); if (reg < 0) return reg; @@ -789,6 +810,7 @@ static struct phy_driver broadcom_drivers[] = { .phy_id_mask = 0xfffffff0, .name = "Broadcom BCM54616S", /* PHY_GBIT_FEATURES */ + .soft_reset = genphy_soft_reset, .config_init = bcm54xx_config_init, .config_aneg = bcm54616s_config_aneg, .ack_interrupt = bcm_phy_ack_intr, diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c index 7bf43031cea8c6c533b05e68a8ecaf6fb0029500..3d75b98f3051d9b5a1c49db8f2aa7e791220aa32 100644 --- a/drivers/net/phy/dp83822.c +++ b/drivers/net/phy/dp83822.c @@ -289,7 +289,7 @@ static int dp83822_config_intr(struct phy_device *phydev) if (err < 0) return err; - err = phy_write(phydev, MII_DP83822_MISR1, 0); + err = phy_write(phydev, MII_DP83822_MISR2, 0); if (err < 0) return err; diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index 91616182c311f9733dc63bf2ff86f48caad4706f..54786712a99130583bf0546c2f9a500b9bb021b3 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -515,9 +515,9 @@ static int m88e1121_config_aneg_rgmii_delays(struct phy_device *phydev) else mscr = 0; - return phy_modify_paged(phydev, MII_MARVELL_MSCR_PAGE, - MII_88E1121_PHY_MSCR_REG, - MII_88E1121_PHY_MSCR_DELAY_MASK, mscr); + return phy_modify_paged_changed(phydev, MII_MARVELL_MSCR_PAGE, + MII_88E1121_PHY_MSCR_REG, + MII_88E1121_PHY_MSCR_DELAY_MASK, mscr); } static int m88e1121_config_aneg(struct phy_device *phydev) @@ -531,11 +531,13 @@ static int m88e1121_config_aneg(struct phy_device *phydev) return err; } + changed = err; + err = marvell_set_polarity(phydev, phydev->mdix_ctrl); if (err < 0) return err; - changed = err; + changed |= err; err = genphy_config_aneg(phydev); if (err < 0) @@ -1059,16 +1061,15 @@ static int m88e1118_config_aneg(struct phy_device *phydev) { int err; - err = genphy_soft_reset(phydev); + err = marvell_set_polarity(phydev, phydev->mdix_ctrl); if (err < 0) return err; - err = marvell_set_polarity(phydev, phydev->mdix_ctrl); + err = genphy_config_aneg(phydev); if (err < 0) return err; - err = genphy_config_aneg(phydev); - return 0; + return genphy_soft_reset(phydev); } static int m88e1118_config_init(struct phy_device *phydev) @@ -1090,6 +1091,12 @@ static int m88e1118_config_init(struct phy_device *phydev) if (err < 0) return err; + if (phy_interface_is_rgmii(phydev)) { + err = m88e1121_config_aneg_rgmii_delays(phydev); + if (err < 0) + return err; + } + /* Adjust LED Control */ if (phydev->dev_flags & MARVELL_PHY_M1118_DNS323_LEDS) err = phy_write(phydev, 0x10, 0x1100); @@ -1529,8 +1536,8 @@ static int marvell_suspend(struct phy_device *phydev) int err; /* Suspend the fiber mode first */ - if (!linkmode_test_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, - phydev->supported)) { + if (linkmode_test_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, + phydev->supported)) { err = marvell_set_page(phydev, MII_MARVELL_FIBER_PAGE); if (err < 0) goto error; @@ -1564,8 +1571,8 @@ static int marvell_resume(struct phy_device *phydev) int err; /* Resume the fiber mode first */ - if (!linkmode_test_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, - phydev->supported)) { + if (linkmode_test_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, + phydev->supported)) { err = marvell_set_page(phydev, MII_MARVELL_FIBER_PAGE); if (err < 0) goto error; diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index 2645ca35103c9c56058ab21ab4a16a775a95158d..c416ab1d2b008899d85955435298acb31d14ca4f 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c @@ -588,7 +588,7 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner) mdiobus_setup_mdiodev_from_board_info(bus, mdiobus_create_device); bus->state = MDIOBUS_REGISTERED; - pr_info("%s: probed\n", bus->name); + dev_dbg(&bus->dev, "probed\n"); return 0; error: diff --git a/drivers/net/phy/mscc/mscc_main.c b/drivers/net/phy/mscc/mscc_main.c index 41a410124437d105b9657b9174d8b56f52606148..e14fa72791b0e47f1a5a1c22e1538b1196611fcd 100644 --- a/drivers/net/phy/mscc/mscc_main.c +++ b/drivers/net/phy/mscc/mscc_main.c @@ -2584,3 +2584,6 @@ MODULE_DEVICE_TABLE(mdio, vsc85xx_tbl); MODULE_DESCRIPTION("Microsemi VSC85xx PHY driver"); MODULE_AUTHOR("Nagaraju Lakkaraju"); MODULE_LICENSE("Dual MIT/GPL"); + +MODULE_FIRMWARE(MSCC_VSC8584_REVB_INT8051_FW); +MODULE_FIRMWARE(MSCC_VSC8574_REVB_INT8051_FW); diff --git a/drivers/net/phy/phy-core.c b/drivers/net/phy/phy-core.c index 8d333d3084ed32a7c345656555b2b59376bb8f98..cccb83dae673ba1db8db6ea226fb51df49dd9c9a 100644 --- a/drivers/net/phy/phy-core.c +++ b/drivers/net/phy/phy-core.c @@ -161,11 +161,11 @@ static const struct phy_setting settings[] = { PHY_SETTING( 2500, FULL, 2500baseT_Full ), PHY_SETTING( 2500, FULL, 2500baseX_Full ), /* 1G */ - PHY_SETTING( 1000, FULL, 1000baseKX_Full ), PHY_SETTING( 1000, FULL, 1000baseT_Full ), PHY_SETTING( 1000, HALF, 1000baseT_Half ), PHY_SETTING( 1000, FULL, 1000baseT1_Full ), PHY_SETTING( 1000, FULL, 1000baseX_Full ), + PHY_SETTING( 1000, FULL, 1000baseKX_Full ), /* 100M */ PHY_SETTING( 100, FULL, 100baseT_Full ), PHY_SETTING( 100, FULL, 100baseT1_Full ), diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 85f3cde5ffd09d9bc05956b47f23339b9c1f891c..d2f6d8107595a673d11b17e42ed98dcf2c27bfdc 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -1682,6 +1682,9 @@ void phy_detach(struct phy_device *phydev) phy_driver_is_genphy_10g(phydev)) device_release_driver(&phydev->mdio.dev); + /* Assert the reset signal */ + phy_device_reset(phydev, 1); + /* * The phydev might go away on the put_device() below, so avoid * a use-after-free bug by reading the underlying bus first. @@ -1693,9 +1696,6 @@ void phy_detach(struct phy_device *phydev) ndev_owner = dev->dev.parent->driver->owner; if (ndev_owner != bus->owner) module_put(bus->owner); - - /* Assert the reset signal */ - phy_device_reset(phydev, 1); } EXPORT_SYMBOL(phy_detach); diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c index 4cf874fb5c5b414bc76b190d1d355a75df5aff76..850915a37f4c2abbcef07be33bb8eccc917e7cc1 100644 --- a/drivers/net/phy/sfp-bus.c +++ b/drivers/net/phy/sfp-bus.c @@ -74,6 +74,12 @@ static const struct sfp_quirk sfp_quirks[] = { .vendor = "HUAWEI", .part = "MA5671A", .modes = sfp_quirk_2500basex, + }, { + // Lantech 8330-262D-E can operate at 2500base-X, but + // incorrectly report 2500MBd NRZ in their EEPROM + .vendor = "Lantech", + .part = "8330-262D-E", + .modes = sfp_quirk_2500basex, }, { .vendor = "UBNT", .part = "UF-INSTANT", @@ -609,6 +615,11 @@ struct sfp_bus *sfp_bus_find_fwnode(struct fwnode_handle *fwnode) else if (ret < 0) return ERR_PTR(ret); + if (!fwnode_device_is_available(ref.fwnode)) { + fwnode_handle_put(ref.fwnode); + return NULL; + } + bus = sfp_bus_get(ref.fwnode); fwnode_handle_put(ref.fwnode); if (!bus) diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c index 32c34c728c7a1a7076dc0c0b00476259fc292172..efffa65f8214313efde976e20ed95d85f191359e 100644 --- a/drivers/net/phy/sfp.c +++ b/drivers/net/phy/sfp.c @@ -1589,17 +1589,20 @@ static int sfp_sm_probe_for_phy(struct sfp *sfp) static int sfp_module_parse_power(struct sfp *sfp) { u32 power_mW = 1000; + bool supports_a2; if (sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_POWER_DECL)) power_mW = 1500; if (sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_HIGH_POWER_LEVEL)) power_mW = 2000; + supports_a2 = sfp->id.ext.sff8472_compliance != + SFP_SFF8472_COMPLIANCE_NONE || + sfp->id.ext.diagmon & SFP_DIAGMON_DDM; + if (power_mW > sfp->max_power_mW) { /* Module power specification exceeds the allowed maximum. */ - if (sfp->id.ext.sff8472_compliance == - SFP_SFF8472_COMPLIANCE_NONE && - !(sfp->id.ext.diagmon & SFP_DIAGMON_DDM)) { + if (!supports_a2) { /* The module appears not to implement bus address * 0xa2, so assume that the module powers up in the * indicated mode. @@ -1616,11 +1619,25 @@ static int sfp_module_parse_power(struct sfp *sfp) } } + if (power_mW <= 1000) { + /* Modules below 1W do not require a power change sequence */ + sfp->module_power_mW = power_mW; + return 0; + } + + if (!supports_a2) { + /* The module power level is below the host maximum and the + * module appears not to implement bus address 0xa2, so assume + * that the module powers up in the indicated mode. + */ + return 0; + } + /* If the module requires a higher power mode, but also requires * an address change sequence, warn the user that the module may * not be functional. */ - if (sfp->id.ext.diagmon & SFP_DIAGMON_ADDRMODE && power_mW > 1000) { + if (sfp->id.ext.diagmon & SFP_DIAGMON_ADDRMODE) { dev_warn(sfp->dev, "Address Change Sequence not supported but module requires %u.%uW, module may not be functional\n", power_mW / 1000, (power_mW / 100) % 10); diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c index 33b2e0fb68bbbae1f2c27564e425764672a14a3c..2b9815ec4a622ff9b03805ae06f5daec40fa151e 100644 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c @@ -69,6 +69,8 @@ #define MPHDRLEN 6 /* multilink protocol header length */ #define MPHDRLEN_SSN 4 /* ditto with short sequence numbers */ +#define PPP_PROTO_LEN 2 + /* * An instance of /dev/ppp can be associated with either a ppp * interface unit or a ppp channel. In both cases, file->private_data @@ -496,6 +498,9 @@ static ssize_t ppp_write(struct file *file, const char __user *buf, if (!pf) return -ENXIO; + /* All PPP packets should start with the 2-byte protocol */ + if (count < PPP_PROTO_LEN) + return -EINVAL; ret = -ENOMEM; skb = alloc_skb(count + pf->hdrlen, GFP_KERNEL); if (!skb) @@ -1632,7 +1637,7 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb) } ++ppp->stats64.tx_packets; - ppp->stats64.tx_bytes += skb->len - 2; + ppp->stats64.tx_bytes += skb->len - PPP_PROTO_LEN; switch (proto) { case PPP_IP: diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c index f81fb0b13a944f6a51d2b5be6135b14055b9f9db..369bd30fed35f624032066db954301d377e247a1 100644 --- a/drivers/net/slip/slip.c +++ b/drivers/net/slip/slip.c @@ -468,7 +468,7 @@ static void sl_tx_timeout(struct net_device *dev, unsigned int txqueue) spin_lock(&sl->lock); if (netif_queue_stopped(dev)) { - if (!netif_running(dev)) + if (!netif_running(dev) || !sl->tty) goto out; /* May be we must check transmitter timeout here ? diff --git a/drivers/net/tap.c b/drivers/net/tap.c index f549d3a8e59c0380c7f2d375d88900bf422b4675..8f7bb15206e9f3dd9f06e62d85b5e794b27f77a6 100644 --- a/drivers/net/tap.c +++ b/drivers/net/tap.c @@ -1202,7 +1202,8 @@ static int tap_sendmsg(struct socket *sock, struct msghdr *m, struct xdp_buff *xdp; int i; - if (ctl && (ctl->type == TUN_MSG_PTR)) { + if (m->msg_controllen == sizeof(struct tun_msg_ctl) && + ctl && ctl->type == TUN_MSG_PTR) { for (i = 0; i < ctl->num; i++) { xdp = &((struct xdp_buff *)ctl->ptr)[i]; tap_get_user_xdp(q, xdp); diff --git a/drivers/net/tun.c b/drivers/net/tun.c index ab64f3d007c1320187a61df1c0daf9eb5a3110e8..aef966a9dae27e6576333fce4d2b95e23137f30c 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -2499,7 +2499,8 @@ static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len) if (!tun) return -EBADFD; - if (ctl && (ctl->type == TUN_MSG_PTR)) { + if (m->msg_controllen == sizeof(struct tun_msg_ctl) && + ctl && ctl->type == TUN_MSG_PTR) { struct tun_page tpage; int n = ctl->num; int flush = 0; diff --git a/drivers/net/usb/aqc111.c b/drivers/net/usb/aqc111.c index 0717c18015c9c5a93c2dc8dc29d864addf6ea8fa..c9c40951817441db891db06350fec65a728f7816 100644 --- a/drivers/net/usb/aqc111.c +++ b/drivers/net/usb/aqc111.c @@ -1102,10 +1102,15 @@ static int aqc111_rx_fixup(struct usbnet *dev, struct sk_buff *skb) if (start_of_descs != desc_offset) goto err; - /* self check desc_offset from header*/ - if (desc_offset >= skb_len) + /* self check desc_offset from header and make sure that the + * bounds of the metadata array are inside the SKB + */ + if (pkt_count * 2 + desc_offset >= skb_len) goto err; + /* Packets must not overlap the metadata array */ + skb_trim(skb, desc_offset); + if (pkt_count == 0) goto err; diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c index b77b0a33d697dd3295b44a5543707db21649d627..0b0cbcee1920bdabe037345450c89e22e3853fa2 100644 --- a/drivers/net/usb/ax88179_178a.c +++ b/drivers/net/usb/ax88179_178a.c @@ -1467,58 +1467,68 @@ static int ax88179_rx_fixup(struct usbnet *dev, struct sk_buff *skb) u16 hdr_off; u32 *pkt_hdr; - /* This check is no longer done by usbnet */ - if (skb->len < dev->net->hard_header_len) + /* At the end of the SKB, there's a header telling us how many packets + * are bundled into this buffer and where we can find an array of + * per-packet metadata (which contains elements encoded into u16). + */ + if (skb->len < 4) return 0; - skb_trim(skb, skb->len - 4); rx_hdr = get_unaligned_le32(skb_tail_pointer(skb)); - pkt_cnt = (u16)rx_hdr; hdr_off = (u16)(rx_hdr >> 16); + + if (pkt_cnt == 0) + return 0; + + /* Make sure that the bounds of the metadata array are inside the SKB + * (and in front of the counter at the end). + */ + if (pkt_cnt * 2 + hdr_off > skb->len) + return 0; pkt_hdr = (u32 *)(skb->data + hdr_off); - while (pkt_cnt--) { + /* Packets must not overlap the metadata array */ + skb_trim(skb, hdr_off); + + for (; ; pkt_cnt--, pkt_hdr++) { u16 pkt_len; le32_to_cpus(pkt_hdr); pkt_len = (*pkt_hdr >> 16) & 0x1fff; - /* Check CRC or runt packet */ - if ((*pkt_hdr & AX_RXHDR_CRC_ERR) || - (*pkt_hdr & AX_RXHDR_DROP_ERR)) { - skb_pull(skb, (pkt_len + 7) & 0xFFF8); - pkt_hdr++; - continue; - } - - if (pkt_cnt == 0) { - skb->len = pkt_len; - /* Skip IP alignment pseudo header */ - skb_pull(skb, 2); - skb_set_tail_pointer(skb, skb->len); - skb->truesize = pkt_len + sizeof(struct sk_buff); - ax88179_rx_checksum(skb, pkt_hdr); - return 1; - } + if (pkt_len > skb->len) + return 0; - ax_skb = skb_clone(skb, GFP_ATOMIC); - if (ax_skb) { + /* Check CRC or runt packet */ + if (((*pkt_hdr & (AX_RXHDR_CRC_ERR | AX_RXHDR_DROP_ERR)) == 0) && + pkt_len >= 2 + ETH_HLEN) { + bool last = (pkt_cnt == 0); + + if (last) { + ax_skb = skb; + } else { + ax_skb = skb_clone(skb, GFP_ATOMIC); + if (!ax_skb) + return 0; + } ax_skb->len = pkt_len; /* Skip IP alignment pseudo header */ skb_pull(ax_skb, 2); skb_set_tail_pointer(ax_skb, ax_skb->len); ax_skb->truesize = pkt_len + sizeof(struct sk_buff); ax88179_rx_checksum(ax_skb, pkt_hdr); + + if (last) + return 1; + usbnet_skb_return(dev, ax_skb); - } else { - return 0; } - skb_pull(skb, (pkt_len + 7) & 0xFFF8); - pkt_hdr++; + /* Trim this packet away from the SKB */ + if (!skb_pull(skb, (pkt_len + 7) & 0xFFF8)) + return 0; } - return 1; } static struct sk_buff * diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index 6aaa0675c28a397c30991375f22256e9209bb2a3..43ddbe61dc58e140015e43317efcd7893fdb1693 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c @@ -570,6 +570,11 @@ static const struct usb_device_id products[] = { .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET, \ .bInterfaceProtocol = USB_CDC_PROTO_NONE +#define ZAURUS_FAKE_INTERFACE \ + .bInterfaceClass = USB_CLASS_COMM, \ + .bInterfaceSubClass = USB_CDC_SUBCLASS_MDLM, \ + .bInterfaceProtocol = USB_CDC_PROTO_NONE + /* SA-1100 based Sharp Zaurus ("collie"), or compatible; * wire-incompatible with true CDC Ethernet implementations. * (And, it seems, needlessly so...) @@ -623,6 +628,13 @@ static const struct usb_device_id products[] = { .idProduct = 0x9032, /* SL-6000 */ ZAURUS_MASTER_INTERFACE, .driver_info = 0, +}, { + .match_flags = USB_DEVICE_ID_MATCH_INT_INFO + | USB_DEVICE_ID_MATCH_DEVICE, + .idVendor = 0x04DD, + .idProduct = 0x9032, /* SL-6000 */ + ZAURUS_FAKE_INTERFACE, + .driver_info = 0, }, { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_DEVICE, diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c index 77ac5a721e7b6e6d8e1b31b3fc11547bb793a2d3..414341c9cf5ae17dc4f0ff14f02f37468d57c78a 100644 --- a/drivers/net/usb/cdc_mbim.c +++ b/drivers/net/usb/cdc_mbim.c @@ -658,6 +658,11 @@ static const struct usb_device_id mbim_devs[] = { .driver_info = (unsigned long)&cdc_mbim_info_avoid_altsetting_toggle, }, + /* Telit FN990 */ + { USB_DEVICE_AND_INTERFACE_INFO(0x1bc7, 0x1071, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE), + .driver_info = (unsigned long)&cdc_mbim_info_avoid_altsetting_toggle, + }, + /* default entry */ { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE), .driver_info = (unsigned long)&cdc_mbim_info_zlp, diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index eaaa5aee58251e5469dde979bddef20edf5cac85..ab91fa5b0194db57041c8f92ec9ba4ea62632a25 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c @@ -1702,10 +1702,10 @@ int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in) { struct sk_buff *skb; struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0]; - int len; + unsigned int len; int nframes; int x; - int offset; + unsigned int offset; union { struct usb_cdc_ncm_ndp16 *ndp16; struct usb_cdc_ncm_ndp32 *ndp32; @@ -1777,8 +1777,8 @@ int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in) break; } - /* sanity checking */ - if (((offset + len) > skb_in->len) || + /* sanity checking - watch out for integer wrap*/ + if ((offset > skb_in->len) || (len > skb_in->len - offset) || (len > ctx->rx_max) || (len < ETH_HLEN)) { netif_dbg(dev, rx_err, dev->net, "invalid frame detected (ignored) offset[%u]=%u, length=%u, skb=%p\n", diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c index 207e59e74935aaf4b23add136b611d3138a6349b..06d9f19ca142a69a7c70d4200cd0d7455cbb7d4e 100644 --- a/drivers/net/usb/ipheth.c +++ b/drivers/net/usb/ipheth.c @@ -121,7 +121,7 @@ static int ipheth_alloc_urbs(struct ipheth_device *iphone) if (tx_buf == NULL) goto free_rx_urb; - rx_buf = usb_alloc_coherent(iphone->udev, IPHETH_BUF_SIZE, + rx_buf = usb_alloc_coherent(iphone->udev, IPHETH_BUF_SIZE + IPHETH_IP_ALIGN, GFP_KERNEL, &rx_urb->transfer_dma); if (rx_buf == NULL) goto free_tx_buf; @@ -146,7 +146,7 @@ static int ipheth_alloc_urbs(struct ipheth_device *iphone) static void ipheth_free_urbs(struct ipheth_device *iphone) { - usb_free_coherent(iphone->udev, IPHETH_BUF_SIZE, iphone->rx_buf, + usb_free_coherent(iphone->udev, IPHETH_BUF_SIZE + IPHETH_IP_ALIGN, iphone->rx_buf, iphone->rx_urb->transfer_dma); usb_free_coherent(iphone->udev, IPHETH_BUF_SIZE, iphone->tx_buf, iphone->tx_urb->transfer_dma); @@ -317,7 +317,7 @@ static int ipheth_rx_submit(struct ipheth_device *dev, gfp_t mem_flags) usb_fill_bulk_urb(dev->rx_urb, udev, usb_rcvbulkpipe(udev, dev->bulk_in), - dev->rx_buf, IPHETH_BUF_SIZE, + dev->rx_buf, IPHETH_BUF_SIZE + IPHETH_IP_ALIGN, ipheth_rcvbulk_callback, dev); dev->rx_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c index 09bfa6a4dfbc17ecd40bc1fd37f23a8e88b75afe..7e40e2e2f37230c43ac875997485e23741abb420 100644 --- a/drivers/net/usb/mcs7830.c +++ b/drivers/net/usb/mcs7830.c @@ -108,8 +108,16 @@ static const char driver_name[] = "MOSCHIP usb-ethernet driver"; static int mcs7830_get_reg(struct usbnet *dev, u16 index, u16 size, void *data) { - return usbnet_read_cmd(dev, MCS7830_RD_BREQ, MCS7830_RD_BMREQ, - 0x0000, index, data, size); + int ret; + + ret = usbnet_read_cmd(dev, MCS7830_RD_BREQ, MCS7830_RD_BMREQ, + 0x0000, index, data, size); + if (ret < 0) + return ret; + else if (ret < size) + return -ENODATA; + + return ret; } static int mcs7830_set_reg(struct usbnet *dev, u16 index, u16 size, const void *data) diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 6e033ba717030e9c26a366b21f0bb74e2c078bc3..597766d14563e07242d76a5ae19b2321d0f5f3c6 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -1333,6 +1333,8 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x413c, 0x81d7, 0)}, /* Dell Wireless 5821e */ {QMI_FIXED_INTF(0x413c, 0x81d7, 1)}, /* Dell Wireless 5821e preproduction config */ {QMI_FIXED_INTF(0x413c, 0x81e0, 0)}, /* Dell Wireless 5821e with eSIM support*/ + {QMI_FIXED_INTF(0x413c, 0x81e4, 0)}, /* Dell Wireless 5829e with eSIM support*/ + {QMI_FIXED_INTF(0x413c, 0x81e6, 0)}, /* Dell Wireless 5829e */ {QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */ {QMI_FIXED_INTF(0x03f0, 0x9d1d, 1)}, /* HP lt4120 Snapdragon X5 LTE */ {QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */ diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c index 465e11dcdf12938d938a966b560a2fbbe20d6bd5..e5b74485114675fae3cc291f1501a1cd06088c81 100644 --- a/drivers/net/usb/smsc95xx.c +++ b/drivers/net/usb/smsc95xx.c @@ -84,9 +84,10 @@ static int __must_check __smsc95xx_read_reg(struct usbnet *dev, u32 index, ret = fn(dev, USB_VENDOR_REQUEST_READ_REGISTER, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, index, &buf, 4); - if (unlikely(ret < 0)) { - netdev_warn(dev->net, "Failed to read reg index 0x%08x: %d\n", - index, ret); + if (ret < 0) { + if (ret != -ENODEV) + netdev_warn(dev->net, "Failed to read reg index 0x%08x: %d\n", + index, ret); return ret; } @@ -116,7 +117,7 @@ static int __must_check __smsc95xx_write_reg(struct usbnet *dev, u32 index, ret = fn(dev, USB_VENDOR_REQUEST_WRITE_REGISTER, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, index, &buf, 4); - if (unlikely(ret < 0)) + if (ret < 0 && ret != -ENODEV) netdev_warn(dev->net, "Failed to write reg index 0x%08x: %d\n", index, ret); @@ -159,6 +160,9 @@ static int __must_check __smsc95xx_phy_wait_not_busy(struct usbnet *dev, do { ret = __smsc95xx_read_reg(dev, MII_ADDR, &val, in_pm); if (ret < 0) { + /* Ignore -ENODEV error during disconnect() */ + if (ret == -ENODEV) + return 0; netdev_warn(dev->net, "Error reading MII_ACCESS\n"); return ret; } @@ -194,7 +198,8 @@ static int __smsc95xx_mdio_read(struct usbnet *dev, int phy_id, int idx, addr = mii_address_cmd(phy_id, idx, MII_READ_ | MII_BUSY_); ret = __smsc95xx_write_reg(dev, MII_ADDR, addr, in_pm); if (ret < 0) { - netdev_warn(dev->net, "Error writing MII_ADDR\n"); + if (ret != -ENODEV) + netdev_warn(dev->net, "Error writing MII_ADDR\n"); goto done; } @@ -206,7 +211,8 @@ static int __smsc95xx_mdio_read(struct usbnet *dev, int phy_id, int idx, ret = __smsc95xx_read_reg(dev, MII_DATA, &val, in_pm); if (ret < 0) { - netdev_warn(dev->net, "Error reading MII_DATA\n"); + if (ret != -ENODEV) + netdev_warn(dev->net, "Error reading MII_DATA\n"); goto done; } @@ -214,6 +220,10 @@ static int __smsc95xx_mdio_read(struct usbnet *dev, int phy_id, int idx, done: mutex_unlock(&dev->phy_mutex); + + /* Ignore -ENODEV error during disconnect() */ + if (ret == -ENODEV) + return 0; return ret; } @@ -235,7 +245,8 @@ static void __smsc95xx_mdio_write(struct usbnet *dev, int phy_id, val = regval; ret = __smsc95xx_write_reg(dev, MII_DATA, val, in_pm); if (ret < 0) { - netdev_warn(dev->net, "Error writing MII_DATA\n"); + if (ret != -ENODEV) + netdev_warn(dev->net, "Error writing MII_DATA\n"); goto done; } @@ -243,7 +254,8 @@ static void __smsc95xx_mdio_write(struct usbnet *dev, int phy_id, addr = mii_address_cmd(phy_id, idx, MII_WRITE_ | MII_BUSY_); ret = __smsc95xx_write_reg(dev, MII_ADDR, addr, in_pm); if (ret < 0) { - netdev_warn(dev->net, "Error writing MII_ADDR\n"); + if (ret != -ENODEV) + netdev_warn(dev->net, "Error writing MII_ADDR\n"); goto done; } @@ -1049,6 +1061,14 @@ static const struct net_device_ops smsc95xx_netdev_ops = { .ndo_set_features = smsc95xx_set_features, }; +static void smsc95xx_handle_link_change(struct net_device *net) +{ + struct usbnet *dev = netdev_priv(net); + + phy_print_status(net->phydev); + usbnet_defer_kevent(dev, EVENT_LINK_CHANGE); +} + static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf) { struct smsc95xx_priv *pdata; @@ -1153,6 +1173,17 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf) dev->net->min_mtu = ETH_MIN_MTU; dev->net->max_mtu = ETH_DATA_LEN; dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len; + + ret = phy_connect_direct(dev->net, pdata->phydev, + &smsc95xx_handle_link_change, + PHY_INTERFACE_MODE_MII); + if (ret) { + netdev_err(dev->net, "can't attach PHY to %s\n", pdata->mdiobus->id); + goto unregister_mdio; + } + + phy_attached_info(dev->net->phydev); + return 0; unregister_mdio: @@ -1170,47 +1201,25 @@ static void smsc95xx_unbind(struct usbnet *dev, struct usb_interface *intf) { struct smsc95xx_priv *pdata = dev->driver_priv; + phy_disconnect(dev->net->phydev); mdiobus_unregister(pdata->mdiobus); mdiobus_free(pdata->mdiobus); netif_dbg(dev, ifdown, dev->net, "free pdata\n"); kfree(pdata); } -static void smsc95xx_handle_link_change(struct net_device *net) -{ - struct usbnet *dev = netdev_priv(net); - - phy_print_status(net->phydev); - usbnet_defer_kevent(dev, EVENT_LINK_CHANGE); -} - static int smsc95xx_start_phy(struct usbnet *dev) { - struct smsc95xx_priv *pdata = dev->driver_priv; - struct net_device *net = dev->net; - int ret; - - ret = smsc95xx_reset(dev); - if (ret < 0) - return ret; + phy_start(dev->net->phydev); - ret = phy_connect_direct(net, pdata->phydev, - &smsc95xx_handle_link_change, - PHY_INTERFACE_MODE_MII); - if (ret) { - netdev_err(net, "can't attach PHY to %s\n", pdata->mdiobus->id); - return ret; - } - - phy_attached_info(net->phydev); - phy_start(net->phydev); return 0; } -static int smsc95xx_disconnect_phy(struct usbnet *dev) +static int smsc95xx_stop(struct usbnet *dev) { - phy_stop(dev->net->phydev); - phy_disconnect(dev->net->phydev); + if (dev->net->phydev) + phy_stop(dev->net->phydev); + return 0; } @@ -1964,8 +1973,9 @@ static const struct driver_info smsc95xx_info = { .bind = smsc95xx_bind, .unbind = smsc95xx_unbind, .link_reset = smsc95xx_link_reset, - .reset = smsc95xx_start_phy, - .stop = smsc95xx_disconnect_phy, + .reset = smsc95xx_reset, + .check_connect = smsc95xx_start_phy, + .stop = smsc95xx_stop, .rx_fixup = smsc95xx_rx_fixup, .tx_fixup = smsc95xx_tx_fixup, .status = smsc95xx_status, diff --git a/drivers/net/usb/sr9700.c b/drivers/net/usb/sr9700.c index e04c8054c2cf3c7c3f5c3a065b48e99d299b5501..fce6713e970badda0337a49a518c07d88ecd5fa9 100644 --- a/drivers/net/usb/sr9700.c +++ b/drivers/net/usb/sr9700.c @@ -410,7 +410,7 @@ static int sr9700_rx_fixup(struct usbnet *dev, struct sk_buff *skb) /* ignore the CRC length */ len = (skb->data[1] | (skb->data[2] << 8)) - 4; - if (len > ETH_FRAME_LEN) + if (len > ETH_FRAME_LEN || len > skb->len) return 0; /* the last packet of current skb */ diff --git a/drivers/net/usb/zaurus.c b/drivers/net/usb/zaurus.c index 8e717a0b559b3aac42e94c0ea1c776737c79527b..7984f2157d222dbe2971702221dac46b2a370721 100644 --- a/drivers/net/usb/zaurus.c +++ b/drivers/net/usb/zaurus.c @@ -256,6 +256,11 @@ static const struct usb_device_id products [] = { .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET, \ .bInterfaceProtocol = USB_CDC_PROTO_NONE +#define ZAURUS_FAKE_INTERFACE \ + .bInterfaceClass = USB_CLASS_COMM, \ + .bInterfaceSubClass = USB_CDC_SUBCLASS_MDLM, \ + .bInterfaceProtocol = USB_CDC_PROTO_NONE + /* SA-1100 based Sharp Zaurus ("collie"), or compatible. */ { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO @@ -313,6 +318,13 @@ static const struct usb_device_id products [] = { .idProduct = 0x9032, /* SL-6000 */ ZAURUS_MASTER_INTERFACE, .driver_info = ZAURUS_PXA_INFO, +}, { + .match_flags = USB_DEVICE_ID_MATCH_INT_INFO + | USB_DEVICE_ID_MATCH_DEVICE, + .idVendor = 0x04DD, + .idProduct = 0x9032, /* SL-6000 */ + ZAURUS_FAKE_INTERFACE, + .driver_info = (unsigned long)&bogus_mdlm_info, }, { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_DEVICE, diff --git a/drivers/net/veth.c b/drivers/net/veth.c index be18b243642f0f1c4ac4f296a59f2ec9f07a410f..5be8ed910553532bafaa95820fcd1d26a24d3052 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -256,9 +256,10 @@ static void __veth_xdp_flush(struct veth_rq *rq) { /* Write ptr_ring before reading rx_notify_masked */ smp_mb(); - if (!rq->rx_notify_masked) { - rq->rx_notify_masked = true; - napi_schedule(&rq->xdp_napi); + if (!READ_ONCE(rq->rx_notify_masked) && + napi_schedule_prep(&rq->xdp_napi)) { + WRITE_ONCE(rq->rx_notify_masked, true); + __napi_schedule(&rq->xdp_napi); } } @@ -291,7 +292,7 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev) rcu_read_lock(); rcv = rcu_dereference(priv->peer); - if (unlikely(!rcv)) { + if (unlikely(!rcv) || !pskb_may_pull(skb, ETH_HLEN)) { kfree_skb(skb); goto drop; } @@ -301,7 +302,6 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev) if (rxq < rcv->real_num_rx_queues) { rq = &rcv_priv->rq[rxq]; rcv_xdp = rcu_access_pointer(rq->xdp_prog); - skb_record_rx_queue(skb, rxq); } skb_tx_timestamp(skb); @@ -853,8 +853,10 @@ static int veth_poll(struct napi_struct *napi, int budget) /* Write rx_notify_masked before reading ptr_ring */ smp_store_mb(rq->rx_notify_masked, false); if (unlikely(!__ptr_ring_empty(&rq->xdp_ring))) { - rq->rx_notify_masked = true; - napi_schedule(&rq->xdp_napi); + if (napi_schedule_prep(&rq->xdp_napi)) { + WRITE_ONCE(rq->rx_notify_masked, true); + __napi_schedule(&rq->xdp_napi); + } } } diff --git a/drivers/net/wireguard/queueing.c b/drivers/net/wireguard/queueing.c index 1de413b19e3424a2ace2edcbcf0d0d49c4be6167..8084e7408c0ae9065f57bc463921cb985fd68c5e 100644 --- a/drivers/net/wireguard/queueing.c +++ b/drivers/net/wireguard/queueing.c @@ -4,6 +4,7 @@ */ #include "queueing.h" +#include struct multicore_worker __percpu * wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr) @@ -42,7 +43,7 @@ void wg_packet_queue_free(struct crypt_queue *queue, bool purge) { free_percpu(queue->worker); WARN_ON(!purge && !__ptr_ring_empty(&queue->ring)); - ptr_ring_cleanup(&queue->ring, purge ? (void(*)(void*))kfree_skb : NULL); + ptr_ring_cleanup(&queue->ring, purge ? __skb_array_destroy_skb : NULL); } #define NEXT(skb) ((skb)->prev) diff --git a/drivers/net/wireguard/socket.c b/drivers/net/wireguard/socket.c index 52b9bc83abcbcca9007795fee738f1ad564e84e3..473221aa2236813d6188a078a7e38307cd0b59eb 100644 --- a/drivers/net/wireguard/socket.c +++ b/drivers/net/wireguard/socket.c @@ -160,6 +160,7 @@ static int send6(struct wg_device *wg, struct sk_buff *skb, rcu_read_unlock_bh(); return ret; #else + kfree_skb(skb); return -EAFNOSUPPORT; #endif } @@ -241,7 +242,7 @@ int wg_socket_endpoint_from_skb(struct endpoint *endpoint, endpoint->addr4.sin_addr.s_addr = ip_hdr(skb)->saddr; endpoint->src4.s_addr = ip_hdr(skb)->daddr; endpoint->src_if4 = skb->skb_iif; - } else if (skb->protocol == htons(ETH_P_IPV6)) { + } else if (IS_ENABLED(CONFIG_IPV6) && skb->protocol == htons(ETH_P_IPV6)) { endpoint->addr6.sin6_family = AF_INET6; endpoint->addr6.sin6_port = udp_hdr(skb)->source; endpoint->addr6.sin6_addr = ipv6_hdr(skb)->saddr; @@ -284,7 +285,7 @@ void wg_socket_set_peer_endpoint(struct wg_peer *peer, peer->endpoint.addr4 = endpoint->addr4; peer->endpoint.src4 = endpoint->src4; peer->endpoint.src_if4 = endpoint->src_if4; - } else if (endpoint->addr.sa_family == AF_INET6) { + } else if (IS_ENABLED(CONFIG_IPV6) && endpoint->addr.sa_family == AF_INET6) { peer->endpoint.addr6 = endpoint->addr6; peer->endpoint.src6 = endpoint->src6; } else { diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c index 49cc4b7ed5163a2de35cd84c6987e813573aae90..1baec4b412c8dc7f0c3b1edbe4c1f445e5074704 100644 --- a/drivers/net/wireless/ath/ar5523/ar5523.c +++ b/drivers/net/wireless/ath/ar5523/ar5523.c @@ -153,6 +153,10 @@ static void ar5523_cmd_rx_cb(struct urb *urb) ar5523_err(ar, "Invalid reply to WDCMSG_TARGET_START"); return; } + if (!cmd->odata) { + ar5523_err(ar, "Unexpected WDCMSG_TARGET_START reply"); + return; + } memcpy(cmd->odata, hdr + 1, sizeof(u32)); cmd->olen = sizeof(u32); cmd->res = 0; diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index d73ad60b571c2c41653cf8723f998a010bcebec8..d0967bb1f38718a9efbc2280fdec3e0f3c92e2f6 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -89,6 +89,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .rri_on_ddr = false, .hw_filter_reset_required = true, .fw_diag_ce_download = false, + .credit_size_workaround = false, .tx_stats_over_pktlog = true, }, { @@ -123,6 +124,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .rri_on_ddr = false, .hw_filter_reset_required = true, .fw_diag_ce_download = false, + .credit_size_workaround = false, .tx_stats_over_pktlog = true, }, { @@ -158,6 +160,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .rri_on_ddr = false, .hw_filter_reset_required = true, .fw_diag_ce_download = false, + .credit_size_workaround = false, .tx_stats_over_pktlog = false, }, { @@ -187,6 +190,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .num_wds_entries = 0x20, .uart_pin_workaround = true, .tx_stats_over_pktlog = false, + .credit_size_workaround = false, .bmi_large_size_download = true, .supports_peer_stats_info = true, }, @@ -222,6 +226,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .rri_on_ddr = false, .hw_filter_reset_required = true, .fw_diag_ce_download = false, + .credit_size_workaround = false, .tx_stats_over_pktlog = false, }, { @@ -256,6 +261,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .rri_on_ddr = false, .hw_filter_reset_required = true, .fw_diag_ce_download = false, + .credit_size_workaround = false, .tx_stats_over_pktlog = false, }, { @@ -290,6 +296,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .rri_on_ddr = false, .hw_filter_reset_required = true, .fw_diag_ce_download = false, + .credit_size_workaround = false, .tx_stats_over_pktlog = false, }, { @@ -327,6 +334,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .rri_on_ddr = false, .hw_filter_reset_required = true, .fw_diag_ce_download = true, + .credit_size_workaround = false, .tx_stats_over_pktlog = false, .supports_peer_stats_info = true, }, @@ -368,6 +376,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .rri_on_ddr = false, .hw_filter_reset_required = true, .fw_diag_ce_download = false, + .credit_size_workaround = false, .tx_stats_over_pktlog = false, }, { @@ -415,6 +424,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .rri_on_ddr = false, .hw_filter_reset_required = true, .fw_diag_ce_download = false, + .credit_size_workaround = false, .tx_stats_over_pktlog = false, }, { @@ -459,6 +469,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .rri_on_ddr = false, .hw_filter_reset_required = true, .fw_diag_ce_download = false, + .credit_size_workaround = false, .tx_stats_over_pktlog = false, }, { @@ -493,6 +504,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .rri_on_ddr = false, .hw_filter_reset_required = true, .fw_diag_ce_download = false, + .credit_size_workaround = false, .tx_stats_over_pktlog = false, }, { @@ -529,6 +541,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .rri_on_ddr = false, .hw_filter_reset_required = true, .fw_diag_ce_download = true, + .credit_size_workaround = false, .tx_stats_over_pktlog = false, }, { @@ -557,6 +570,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .ast_skid_limit = 0x10, .num_wds_entries = 0x20, .uart_pin_workaround = true, + .credit_size_workaround = true, }, { .id = QCA4019_HW_1_0_DEV_VERSION, @@ -597,6 +611,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .rri_on_ddr = false, .hw_filter_reset_required = true, .fw_diag_ce_download = false, + .credit_size_workaround = false, .tx_stats_over_pktlog = false, }, { @@ -624,6 +639,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .rri_on_ddr = true, .hw_filter_reset_required = false, .fw_diag_ce_download = false, + .credit_size_workaround = false, .tx_stats_over_pktlog = false, }, }; @@ -697,6 +713,7 @@ static void ath10k_send_suspend_complete(struct ath10k *ar) static int ath10k_init_sdio(struct ath10k *ar, enum ath10k_firmware_mode mode) { + bool mtu_workaround = ar->hw_params.credit_size_workaround; int ret; u32 param = 0; @@ -714,7 +731,7 @@ static int ath10k_init_sdio(struct ath10k *ar, enum ath10k_firmware_mode mode) param |= HI_ACS_FLAGS_SDIO_REDUCE_TX_COMPL_SET; - if (mode == ATH10K_FIRMWARE_MODE_NORMAL) + if (mode == ATH10K_FIRMWARE_MODE_NORMAL && !mtu_workaround) param |= HI_ACS_FLAGS_ALT_DATA_CREDIT_SIZE; else param &= ~HI_ACS_FLAGS_ALT_DATA_CREDIT_SIZE; diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c index 1fc0a312ab587337c54ecf3eb7d9f11b648fd653..5f67da47036cf1be24ec705c8d56ef03f796cd4e 100644 --- a/drivers/net/wireless/ath/ath10k/htt_tx.c +++ b/drivers/net/wireless/ath/ath10k/htt_tx.c @@ -147,6 +147,9 @@ void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt) htt->num_pending_tx--; if (htt->num_pending_tx == htt->max_num_pending_tx - 1) ath10k_mac_tx_unlock(htt->ar, ATH10K_TX_PAUSE_Q_FULL); + + if (htt->num_pending_tx == 0) + wake_up(&htt->empty_tx_wq); } int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt) diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h index c6ded21f5ed69bbb5a846115a12593a14291dfe4..d3ef83ad577dada610f557a757389518383d11a7 100644 --- a/drivers/net/wireless/ath/ath10k/hw.h +++ b/drivers/net/wireless/ath/ath10k/hw.h @@ -618,6 +618,9 @@ struct ath10k_hw_params { */ bool uart_pin_workaround; + /* Workaround for the credit size calculation */ + bool credit_size_workaround; + /* tx stats support over pktlog */ bool tx_stats_over_pktlog; diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c index daae470ecf5aa3f3495a623435d34a09111fa91d..e5a296039f714a1d4a284402291d00a3318ecf42 100644 --- a/drivers/net/wireless/ath/ath10k/snoc.c +++ b/drivers/net/wireless/ath/ath10k/snoc.c @@ -1477,11 +1477,11 @@ static int ath10k_setup_msa_resources(struct ath10k *ar, u32 msa_size) node = of_parse_phandle(dev->of_node, "memory-region", 0); if (node) { ret = of_address_to_resource(node, 0, &r); + of_node_put(node); if (ret) { dev_err(dev, "failed to resolve msa fixed region\n"); return ret; } - of_node_put(node); ar->msa.paddr = r.start; ar->msa.mem_size = resource_size(&r); diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c index aefe1f7f906c07d5eacaf918e069f187e5bbb59f..f51f1cf2c6a40eec433e5d415aec40ab165a3206 100644 --- a/drivers/net/wireless/ath/ath10k/txrx.c +++ b/drivers/net/wireless/ath/ath10k/txrx.c @@ -82,8 +82,6 @@ int ath10k_txrx_tx_unref(struct ath10k_htt *htt, flags = skb_cb->flags; ath10k_htt_tx_free_msdu_id(htt, tx_done->msdu_id); ath10k_htt_tx_dec_pending(htt); - if (htt->num_pending_tx == 0) - wake_up(&htt->empty_tx_wq); spin_unlock_bh(&htt->tx_lock); rcu_read_lock(); diff --git a/drivers/net/wireless/ath/ath10k/wow.c b/drivers/net/wireless/ath/ath10k/wow.c index 7d65c115669fe2a5f5d813884015b824552f4ad5..20b9aa8ddf7d52e2574daf8d50a197ce6bc1f8b6 100644 --- a/drivers/net/wireless/ath/ath10k/wow.c +++ b/drivers/net/wireless/ath/ath10k/wow.c @@ -337,14 +337,15 @@ static int ath10k_vif_wow_set_wakeups(struct ath10k_vif *arvif, if (patterns[i].mask[j / 8] & BIT(j % 8)) bitmask[j] = 0xff; old_pattern.mask = bitmask; - new_pattern = old_pattern; if (ar->wmi.rx_decap_mode == ATH10K_HW_TXRX_NATIVE_WIFI) { - if (patterns[i].pkt_offset < ETH_HLEN) + if (patterns[i].pkt_offset < ETH_HLEN) { ath10k_wow_convert_8023_to_80211(&new_pattern, &old_pattern); - else + } else { + new_pattern = old_pattern; new_pattern.pkt_offset += WOW_HDR_LEN - ETH_HLEN; + } } if (WARN_ON(new_pattern.pattern_len > WOW_MAX_PATTERN_SIZE)) diff --git a/drivers/net/wireless/ath/ath11k/ahb.c b/drivers/net/wireless/ath/ath11k/ahb.c index 430723c64adce6d03db67ca2196348d5801affef..190bc5712e965b364db3e8d3a05509fe784bbbe2 100644 --- a/drivers/net/wireless/ath/ath11k/ahb.c +++ b/drivers/net/wireless/ath/ath11k/ahb.c @@ -175,8 +175,11 @@ static void __ath11k_ahb_ext_irq_disable(struct ath11k_base *ab) ath11k_ahb_ext_grp_disable(irq_grp); - napi_synchronize(&irq_grp->napi); - napi_disable(&irq_grp->napi); + if (irq_grp->napi_enabled) { + napi_synchronize(&irq_grp->napi); + napi_disable(&irq_grp->napi); + irq_grp->napi_enabled = false; + } } } @@ -206,13 +209,13 @@ static void ath11k_ahb_clearbit32(struct ath11k_base *ab, u8 bit, u32 offset) static void ath11k_ahb_ce_irq_enable(struct ath11k_base *ab, u16 ce_id) { - const struct ce_pipe_config *ce_config; + const struct ce_attr *ce_attr; - ce_config = &ab->hw_params.target_ce_config[ce_id]; - if (__le32_to_cpu(ce_config->pipedir) & PIPEDIR_OUT) + ce_attr = &ab->hw_params.host_ce_config[ce_id]; + if (ce_attr->src_nentries) ath11k_ahb_setbit32(ab, ce_id, CE_HOST_IE_ADDRESS); - if (__le32_to_cpu(ce_config->pipedir) & PIPEDIR_IN) { + if (ce_attr->dest_nentries) { ath11k_ahb_setbit32(ab, ce_id, CE_HOST_IE_2_ADDRESS); ath11k_ahb_setbit32(ab, ce_id + CE_HOST_IE_3_SHIFT, CE_HOST_IE_3_ADDRESS); @@ -221,13 +224,13 @@ static void ath11k_ahb_ce_irq_enable(struct ath11k_base *ab, u16 ce_id) static void ath11k_ahb_ce_irq_disable(struct ath11k_base *ab, u16 ce_id) { - const struct ce_pipe_config *ce_config; + const struct ce_attr *ce_attr; - ce_config = &ab->hw_params.target_ce_config[ce_id]; - if (__le32_to_cpu(ce_config->pipedir) & PIPEDIR_OUT) + ce_attr = &ab->hw_params.host_ce_config[ce_id]; + if (ce_attr->src_nentries) ath11k_ahb_clearbit32(ab, ce_id, CE_HOST_IE_ADDRESS); - if (__le32_to_cpu(ce_config->pipedir) & PIPEDIR_IN) { + if (ce_attr->dest_nentries) { ath11k_ahb_clearbit32(ab, ce_id, CE_HOST_IE_2_ADDRESS); ath11k_ahb_clearbit32(ab, ce_id + CE_HOST_IE_3_SHIFT, CE_HOST_IE_3_ADDRESS); @@ -300,7 +303,10 @@ static void ath11k_ahb_ext_irq_enable(struct ath11k_base *ab) for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) { struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; - napi_enable(&irq_grp->napi); + if (!irq_grp->napi_enabled) { + napi_enable(&irq_grp->napi); + irq_grp->napi_enabled = true; + } ath11k_ahb_ext_grp_enable(irq_grp); } } @@ -360,6 +366,8 @@ static void ath11k_ahb_free_ext_irq(struct ath11k_base *ab) for (j = 0; j < irq_grp->num_irq; j++) free_irq(ab->irq_num[irq_grp->irqs[j]], irq_grp); + + netif_napi_del(&irq_grp->napi); } } diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h index c8e36251068c9861bb75521be380f0dabfd2bed4..d2f2898d17b498967e646289bdd61d772d62f528 100644 --- a/drivers/net/wireless/ath/ath11k/core.h +++ b/drivers/net/wireless/ath/ath11k/core.h @@ -124,6 +124,7 @@ struct ath11k_ext_irq_grp { u32 num_irq; u32 grp_id; u64 timestamp; + bool napi_enabled; struct napi_struct napi; struct net_device napi_ndev; }; @@ -687,7 +688,6 @@ struct ath11k_base { u32 wlan_init_status; int irq_num[ATH11K_IRQ_NUM_MAX]; struct ath11k_ext_irq_grp ext_irq_grp[ATH11K_EXT_IRQ_GRP_NUM_MAX]; - struct napi_struct *napi; struct ath11k_targ_cap target_caps; u32 ext_service_bitmap[WMI_SERVICE_EXT_BM_SIZE]; bool pdevs_macaddr_valid; diff --git a/drivers/net/wireless/ath/ath11k/dp.h b/drivers/net/wireless/ath/ath11k/dp.h index ee8db812589b3cdecdfda17ba796e36b75faa0a9..c4972233149f40aaac7a4377852e95f3bdecd892 100644 --- a/drivers/net/wireless/ath/ath11k/dp.h +++ b/drivers/net/wireless/ath/ath11k/dp.h @@ -514,7 +514,8 @@ struct htt_ppdu_stats_cfg_cmd { } __packed; #define HTT_PPDU_STATS_CFG_MSG_TYPE GENMASK(7, 0) -#define HTT_PPDU_STATS_CFG_PDEV_ID GENMASK(15, 8) +#define HTT_PPDU_STATS_CFG_SOC_STATS BIT(8) +#define HTT_PPDU_STATS_CFG_PDEV_ID GENMASK(15, 9) #define HTT_PPDU_STATS_CFG_TLV_TYPE_BITMASK GENMASK(31, 16) enum htt_ppdu_stats_tag_type { diff --git a/drivers/net/wireless/ath/ath11k/dp_tx.c b/drivers/net/wireless/ath/ath11k/dp_tx.c index 21dfd08d3debb6b489e3647aeb2d02ec0d47dc0a..092eee735da29d2bf5a465a264d13fafb9bd90fa 100644 --- a/drivers/net/wireless/ath/ath11k/dp_tx.c +++ b/drivers/net/wireless/ath/ath11k/dp_tx.c @@ -894,7 +894,7 @@ int ath11k_dp_tx_htt_h2t_ppdu_stats_req(struct ath11k *ar, u32 mask) cmd->msg = FIELD_PREP(HTT_PPDU_STATS_CFG_MSG_TYPE, HTT_H2T_MSG_TYPE_PPDU_STATS_CFG); - pdev_mask = 1 << (i + 1); + pdev_mask = 1 << (ar->pdev_idx + i); cmd->msg |= FIELD_PREP(HTT_PPDU_STATS_CFG_PDEV_ID, pdev_mask); cmd->msg |= FIELD_PREP(HTT_PPDU_STATS_CFG_TLV_TYPE_BITMASK, mask); diff --git a/drivers/net/wireless/ath/ath11k/hal.c b/drivers/net/wireless/ath/ath11k/hal.c index 9904c0eb758754ebbe076ff116e4a973bd75443a..f3b9108ab6bd073e695c40851f3fff4520e64695 100644 --- a/drivers/net/wireless/ath/ath11k/hal.c +++ b/drivers/net/wireless/ath/ath11k/hal.c @@ -991,6 +991,7 @@ int ath11k_hal_srng_setup(struct ath11k_base *ab, enum hal_ring_type type, srng->msi_data = params->msi_data; srng->initialized = 1; spin_lock_init(&srng->lock); + lockdep_set_class(&srng->lock, hal->srng_key + ring_id); for (i = 0; i < HAL_SRNG_NUM_REG_GRP; i++) { srng->hwreg_base[i] = srng_config->reg_start[i] + @@ -1237,6 +1238,24 @@ static int ath11k_hal_srng_create_config(struct ath11k_base *ab) return 0; } +static void ath11k_hal_register_srng_key(struct ath11k_base *ab) +{ + struct ath11k_hal *hal = &ab->hal; + u32 ring_id; + + for (ring_id = 0; ring_id < HAL_SRNG_RING_ID_MAX; ring_id++) + lockdep_register_key(hal->srng_key + ring_id); +} + +static void ath11k_hal_unregister_srng_key(struct ath11k_base *ab) +{ + struct ath11k_hal *hal = &ab->hal; + u32 ring_id; + + for (ring_id = 0; ring_id < HAL_SRNG_RING_ID_MAX; ring_id++) + lockdep_unregister_key(hal->srng_key + ring_id); +} + int ath11k_hal_srng_init(struct ath11k_base *ab) { struct ath11k_hal *hal = &ab->hal; @@ -1256,6 +1275,8 @@ int ath11k_hal_srng_init(struct ath11k_base *ab) if (ret) goto err_free_cont_rdp; + ath11k_hal_register_srng_key(ab); + return 0; err_free_cont_rdp: @@ -1270,6 +1291,7 @@ void ath11k_hal_srng_deinit(struct ath11k_base *ab) { struct ath11k_hal *hal = &ab->hal; + ath11k_hal_unregister_srng_key(ab); ath11k_hal_free_cont_rdp(ab); ath11k_hal_free_cont_wrp(ab); kfree(hal->srng_config); diff --git a/drivers/net/wireless/ath/ath11k/hal.h b/drivers/net/wireless/ath/ath11k/hal.h index 1f1b29cd0aa399f078a8c9a020ff81e7461736d6..5fbfded8d546c4193daa9d8dbb7a10963a1f2d9d 100644 --- a/drivers/net/wireless/ath/ath11k/hal.h +++ b/drivers/net/wireless/ath/ath11k/hal.h @@ -888,6 +888,8 @@ struct ath11k_hal { /* shadow register configuration */ u32 shadow_reg_addr[HAL_SHADOW_NUM_REGS]; int num_shadow_reg_configured; + + struct lock_class_key srng_key[HAL_SRNG_RING_ID_MAX]; }; u32 ath11k_hal_reo_qdesc_size(u32 ba_window_size, u8 tid); diff --git a/drivers/net/wireless/ath/ath11k/hw.c b/drivers/net/wireless/ath/ath11k/hw.c index 66331da3501291f08039073604089c32e10bfa86..f6282e8702923f27e7ff3a1c12a514886517d332 100644 --- a/drivers/net/wireless/ath/ath11k/hw.c +++ b/drivers/net/wireless/ath/ath11k/hw.c @@ -246,8 +246,6 @@ const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_ipq8074 = { const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_qca6390 = { .tx = { ATH11K_TX_RING_MASK_0, - ATH11K_TX_RING_MASK_1, - ATH11K_TX_RING_MASK_2, }, .rx_mon_status = { 0, 0, 0, 0, diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c index 0924bc8b35205f1619ff4bf3de3d8240a82f4365..cc9122f420243069939f08cd12276fdb7e091a0c 100644 --- a/drivers/net/wireless/ath/ath11k/mac.c +++ b/drivers/net/wireless/ath/ath11k/mac.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. + * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved. */ #include @@ -792,11 +793,15 @@ static int ath11k_mac_setup_bcn_tmpl(struct ath11k_vif *arvif) if (cfg80211_find_ie(WLAN_EID_RSN, ies, (skb_tail_pointer(bcn) - ies))) arvif->rsnie_present = true; + else + arvif->rsnie_present = false; if (cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT, WLAN_OUI_TYPE_MICROSOFT_WPA, ies, (skb_tail_pointer(bcn) - ies))) arvif->wpaie_present = true; + else + arvif->wpaie_present = false; ret = ath11k_wmi_bcn_tmpl(ar, arvif->vdev_id, &offs, bcn); @@ -2316,9 +2321,12 @@ static int ath11k_mac_op_hw_scan(struct ieee80211_hw *hw, arg.scan_id = ATH11K_SCAN_ID; if (req->ie_len) { + arg.extraie.ptr = kmemdup(req->ie, req->ie_len, GFP_KERNEL); + if (!arg.extraie.ptr) { + ret = -ENOMEM; + goto exit; + } arg.extraie.len = req->ie_len; - arg.extraie.ptr = kzalloc(req->ie_len, GFP_KERNEL); - memcpy(arg.extraie.ptr, req->ie, req->ie_len); } if (req->n_ssids) { @@ -2395,9 +2403,7 @@ static int ath11k_install_key(struct ath11k_vif *arvif, return 0; if (cmd == DISABLE_KEY) { - /* TODO: Check if FW expects value other than NONE for del */ - /* arg.key_cipher = WMI_CIPHER_NONE; */ - arg.key_len = 0; + arg.key_cipher = WMI_CIPHER_NONE; arg.key_data = NULL; goto install; } @@ -2529,7 +2535,7 @@ static int ath11k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, /* flush the fragments cache during key (re)install to * ensure all frags in the new frag list belong to the same key. */ - if (peer && cmd == SET_KEY) + if (peer && sta && cmd == SET_KEY) ath11k_peer_frags_flush(ar, peer); spin_unlock_bh(&ab->base_lock); @@ -3878,23 +3884,32 @@ static int __ath11k_set_antenna(struct ath11k *ar, u32 tx_ant, u32 rx_ant) return 0; } -int ath11k_mac_tx_mgmt_pending_free(int buf_id, void *skb, void *ctx) +static void ath11k_mac_tx_mgmt_free(struct ath11k *ar, int buf_id) { - struct sk_buff *msdu = skb; + struct sk_buff *msdu; struct ieee80211_tx_info *info; - struct ath11k *ar = ctx; - struct ath11k_base *ab = ar->ab; spin_lock_bh(&ar->txmgmt_idr_lock); - idr_remove(&ar->txmgmt_idr, buf_id); + msdu = idr_remove(&ar->txmgmt_idr, buf_id); spin_unlock_bh(&ar->txmgmt_idr_lock); - dma_unmap_single(ab->dev, ATH11K_SKB_CB(msdu)->paddr, msdu->len, + + if (!msdu) + return; + + dma_unmap_single(ar->ab->dev, ATH11K_SKB_CB(msdu)->paddr, msdu->len, DMA_TO_DEVICE); info = IEEE80211_SKB_CB(msdu); memset(&info->status, 0, sizeof(info->status)); ieee80211_free_txskb(ar->hw, msdu); +} + +int ath11k_mac_tx_mgmt_pending_free(int buf_id, void *skb, void *ctx) +{ + struct ath11k *ar = ctx; + + ath11k_mac_tx_mgmt_free(ar, buf_id); return 0; } @@ -3903,17 +3918,10 @@ static int ath11k_mac_vif_txmgmt_idr_remove(int buf_id, void *skb, void *ctx) { struct ieee80211_vif *vif = ctx; struct ath11k_skb_cb *skb_cb = ATH11K_SKB_CB((struct sk_buff *)skb); - struct sk_buff *msdu = skb; struct ath11k *ar = skb_cb->ar; - struct ath11k_base *ab = ar->ab; - if (skb_cb->vif == vif) { - spin_lock_bh(&ar->txmgmt_idr_lock); - idr_remove(&ar->txmgmt_idr, buf_id); - spin_unlock_bh(&ar->txmgmt_idr_lock); - dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, - DMA_TO_DEVICE); - } + if (skb_cb->vif == vif) + ath11k_mac_tx_mgmt_free(ar, buf_id); return 0; } @@ -3928,6 +3936,8 @@ static int ath11k_mac_mgmt_tx_wmi(struct ath11k *ar, struct ath11k_vif *arvif, int buf_id; int ret; + ATH11K_SKB_CB(skb)->ar = ar; + spin_lock_bh(&ar->txmgmt_idr_lock); buf_id = idr_alloc(&ar->txmgmt_idr, skb, 0, ATH11K_TX_MGMT_NUM_PENDING_MAX, GFP_ATOMIC); diff --git a/drivers/net/wireless/ath/ath11k/mhi.c b/drivers/net/wireless/ath/ath11k/mhi.c index aded9a719d51e9e128b37de350a9dd4247a0455b..84db9e55c3e72b0d56f5baf80e5317fc22fad548 100644 --- a/drivers/net/wireless/ath/ath11k/mhi.c +++ b/drivers/net/wireless/ath/ath11k/mhi.c @@ -402,7 +402,7 @@ static int ath11k_mhi_set_state(struct ath11k_pci *ab_pci, ret = 0; break; case ATH11K_MHI_POWER_ON: - ret = mhi_async_power_up(ab_pci->mhi_ctrl); + ret = mhi_sync_power_up(ab_pci->mhi_ctrl); break; case ATH11K_MHI_POWER_OFF: mhi_power_down(ab_pci->mhi_ctrl, true); diff --git a/drivers/net/wireless/ath/ath11k/pci.c b/drivers/net/wireless/ath/ath11k/pci.c index d7eb6b7160bb4ff204c4a2c29819f73b90b92cd5..105e344240c100f1d9e31de57e27b467750cd485 100644 --- a/drivers/net/wireless/ath/ath11k/pci.c +++ b/drivers/net/wireless/ath/ath11k/pci.c @@ -416,8 +416,11 @@ static void __ath11k_pci_ext_irq_disable(struct ath11k_base *sc) ath11k_pci_ext_grp_disable(irq_grp); - napi_synchronize(&irq_grp->napi); - napi_disable(&irq_grp->napi); + if (irq_grp->napi_enabled) { + napi_synchronize(&irq_grp->napi); + napi_disable(&irq_grp->napi); + irq_grp->napi_enabled = false; + } } } @@ -436,7 +439,10 @@ static void ath11k_pci_ext_irq_enable(struct ath11k_base *ab) for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) { struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; - napi_enable(&irq_grp->napi); + if (!irq_grp->napi_enabled) { + napi_enable(&irq_grp->napi); + irq_grp->napi_enabled = true; + } ath11k_pci_ext_grp_enable(irq_grp); } } diff --git a/drivers/net/wireless/ath/ath11k/reg.c b/drivers/net/wireless/ath/ath11k/reg.c index b8f9f3440887932d9e092850d4b866487bdad83e..e34311516b958a121464f36716f642a6c85d52ba 100644 --- a/drivers/net/wireless/ath/ath11k/reg.c +++ b/drivers/net/wireless/ath/ath11k/reg.c @@ -456,6 +456,9 @@ ath11k_reg_adjust_bw(u16 start_freq, u16 end_freq, u16 max_bw) { u16 bw; + if (end_freq <= start_freq) + return 0; + bw = end_freq - start_freq; bw = min_t(u16, bw, max_bw); @@ -463,8 +466,10 @@ ath11k_reg_adjust_bw(u16 start_freq, u16 end_freq, u16 max_bw) bw = 80; else if (bw >= 40 && bw < 80) bw = 40; - else if (bw < 40) + else if (bw >= 20 && bw < 40) bw = 20; + else + bw = 0; return bw; } @@ -488,73 +493,77 @@ ath11k_reg_update_weather_radar_band(struct ath11k_base *ab, struct cur_reg_rule *reg_rule, u8 *rule_idx, u32 flags, u16 max_bw) { + u32 start_freq; u32 end_freq; u16 bw; u8 i; i = *rule_idx; + /* there might be situations when even the input rule must be dropped */ + i--; + + /* frequencies below weather radar */ bw = ath11k_reg_adjust_bw(reg_rule->start_freq, ETSI_WEATHER_RADAR_BAND_LOW, max_bw); + if (bw > 0) { + i++; - ath11k_reg_update_rule(regd->reg_rules + i, reg_rule->start_freq, - ETSI_WEATHER_RADAR_BAND_LOW, bw, - reg_rule->ant_gain, reg_rule->reg_power, - flags); + ath11k_reg_update_rule(regd->reg_rules + i, + reg_rule->start_freq, + ETSI_WEATHER_RADAR_BAND_LOW, bw, + reg_rule->ant_gain, reg_rule->reg_power, + flags); - ath11k_dbg(ab, ATH11K_DBG_REG, - "\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n", - i + 1, reg_rule->start_freq, ETSI_WEATHER_RADAR_BAND_LOW, - bw, reg_rule->ant_gain, reg_rule->reg_power, - regd->reg_rules[i].dfs_cac_ms, - flags); - - if (reg_rule->end_freq > ETSI_WEATHER_RADAR_BAND_HIGH) - end_freq = ETSI_WEATHER_RADAR_BAND_HIGH; - else - end_freq = reg_rule->end_freq; + ath11k_dbg(ab, ATH11K_DBG_REG, + "\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n", + i + 1, reg_rule->start_freq, + ETSI_WEATHER_RADAR_BAND_LOW, bw, reg_rule->ant_gain, + reg_rule->reg_power, regd->reg_rules[i].dfs_cac_ms, + flags); + } - bw = ath11k_reg_adjust_bw(ETSI_WEATHER_RADAR_BAND_LOW, end_freq, - max_bw); + /* weather radar frequencies */ + start_freq = max_t(u32, reg_rule->start_freq, + ETSI_WEATHER_RADAR_BAND_LOW); + end_freq = min_t(u32, reg_rule->end_freq, ETSI_WEATHER_RADAR_BAND_HIGH); - i++; + bw = ath11k_reg_adjust_bw(start_freq, end_freq, max_bw); + if (bw > 0) { + i++; - ath11k_reg_update_rule(regd->reg_rules + i, - ETSI_WEATHER_RADAR_BAND_LOW, end_freq, bw, - reg_rule->ant_gain, reg_rule->reg_power, - flags); + ath11k_reg_update_rule(regd->reg_rules + i, start_freq, + end_freq, bw, reg_rule->ant_gain, + reg_rule->reg_power, flags); - regd->reg_rules[i].dfs_cac_ms = ETSI_WEATHER_RADAR_BAND_CAC_TIMEOUT; + regd->reg_rules[i].dfs_cac_ms = ETSI_WEATHER_RADAR_BAND_CAC_TIMEOUT; - ath11k_dbg(ab, ATH11K_DBG_REG, - "\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n", - i + 1, ETSI_WEATHER_RADAR_BAND_LOW, end_freq, - bw, reg_rule->ant_gain, reg_rule->reg_power, - regd->reg_rules[i].dfs_cac_ms, - flags); - - if (end_freq == reg_rule->end_freq) { - regd->n_reg_rules--; - *rule_idx = i; - return; + ath11k_dbg(ab, ATH11K_DBG_REG, + "\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n", + i + 1, start_freq, end_freq, bw, + reg_rule->ant_gain, reg_rule->reg_power, + regd->reg_rules[i].dfs_cac_ms, flags); } + /* frequencies above weather radar */ bw = ath11k_reg_adjust_bw(ETSI_WEATHER_RADAR_BAND_HIGH, reg_rule->end_freq, max_bw); + if (bw > 0) { + i++; - i++; - - ath11k_reg_update_rule(regd->reg_rules + i, ETSI_WEATHER_RADAR_BAND_HIGH, - reg_rule->end_freq, bw, - reg_rule->ant_gain, reg_rule->reg_power, - flags); + ath11k_reg_update_rule(regd->reg_rules + i, + ETSI_WEATHER_RADAR_BAND_HIGH, + reg_rule->end_freq, bw, + reg_rule->ant_gain, reg_rule->reg_power, + flags); - ath11k_dbg(ab, ATH11K_DBG_REG, - "\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n", - i + 1, ETSI_WEATHER_RADAR_BAND_HIGH, reg_rule->end_freq, - bw, reg_rule->ant_gain, reg_rule->reg_power, - regd->reg_rules[i].dfs_cac_ms, - flags); + ath11k_dbg(ab, ATH11K_DBG_REG, + "\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n", + i + 1, ETSI_WEATHER_RADAR_BAND_HIGH, + reg_rule->end_freq, bw, reg_rule->ant_gain, + reg_rule->reg_power, regd->reg_rules[i].dfs_cac_ms, + flags); + } *rule_idx = i; } diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c index 74ebe8e7d1d81ef79d753771ac299c82f5600cd4..53846dc9a5c5abe92de8f591eed7ed0a48593eb3 100644 --- a/drivers/net/wireless/ath/ath11k/wmi.c +++ b/drivers/net/wireless/ath/ath11k/wmi.c @@ -1665,7 +1665,8 @@ int ath11k_wmi_vdev_install_key(struct ath11k *ar, tlv = (struct wmi_tlv *)(skb->data + sizeof(*cmd)); tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) | FIELD_PREP(WMI_TLV_LEN, key_len_aligned); - memcpy(tlv->value, (u8 *)arg->key_data, key_len_aligned); + if (arg->key_data) + memcpy(tlv->value, (u8 *)arg->key_data, key_len_aligned); ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_INSTALL_KEY_CMDID); if (ret) { @@ -2036,7 +2037,7 @@ int ath11k_wmi_send_scan_start_cmd(struct ath11k *ar, void *ptr; int i, ret, len; u32 *tmp_ptr; - u8 extraie_len_with_pad = 0; + u16 extraie_len_with_pad = 0; struct hint_short_ssid *s_ssid = NULL; struct hint_bssid *hint_bssid = NULL; @@ -2055,7 +2056,7 @@ int ath11k_wmi_send_scan_start_cmd(struct ath11k *ar, len += sizeof(*bssid) * params->num_bssid; len += TLV_HDR_SIZE; - if (params->extraie.len) + if (params->extraie.len && params->extraie.len <= 0xFFFF) extraie_len_with_pad = roundup(params->extraie.len, sizeof(u32)); len += extraie_len_with_pad; @@ -2162,7 +2163,7 @@ int ath11k_wmi_send_scan_start_cmd(struct ath11k *ar, FIELD_PREP(WMI_TLV_LEN, len); ptr += TLV_HDR_SIZE; - if (params->extraie.len) + if (extraie_len_with_pad) memcpy(ptr, params->extraie.ptr, params->extraie.len); @@ -5421,7 +5422,7 @@ static int ath11k_reg_chan_list_event(struct ath11k_base *ab, struct sk_buff *sk ar = ab->pdevs[pdev_idx].ar; kfree(ab->new_regd[pdev_idx]); ab->new_regd[pdev_idx] = regd; - ieee80211_queue_work(ar->hw, &ar->regd_update_work); + queue_work(ab->workqueue, &ar->regd_update_work); } else { /* This regd would be applied during mac registration and is * held constant throughout for regd intersection purpose diff --git a/drivers/net/wireless/ath/ath5k/eeprom.c b/drivers/net/wireless/ath/ath5k/eeprom.c index 1fbc2c19848f2161662d05dc0bd0a25841117daf..d444b3d70ba2e05fa812b62f468816b3179492b4 100644 --- a/drivers/net/wireless/ath/ath5k/eeprom.c +++ b/drivers/net/wireless/ath/ath5k/eeprom.c @@ -746,6 +746,9 @@ ath5k_eeprom_convert_pcal_info_5111(struct ath5k_hw *ah, int mode, } } + if (idx == AR5K_EEPROM_N_PD_CURVES) + goto err_out; + ee->ee_pd_gains[mode] = 1; pd = &chinfo[pier].pd_curves[idx]; diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c index 860da13bfb6ac91c55925babbd8150458d4b97a5..f06eec99de688c87e42fb8379d339b7a7aa1d4d8 100644 --- a/drivers/net/wireless/ath/ath9k/hif_usb.c +++ b/drivers/net/wireless/ath/ath9k/hif_usb.c @@ -590,6 +590,13 @@ static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev, return; } + if (pkt_len > 2 * MAX_RX_BUF_SIZE) { + dev_err(&hif_dev->udev->dev, + "ath9k_htc: invalid pkt_len (%x)\n", pkt_len); + RX_STAT_INC(skb_dropped); + return; + } + pad_len = 4 - (pkt_len & 0x3); if (pad_len == 4) pad_len = 0; diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c index 510e61e97dbcbd1dd2d4c1847d00f0afdbfd6385..994ec48b2f669588dbfe46c69611aad47aaebfbe 100644 --- a/drivers/net/wireless/ath/ath9k/htc_hst.c +++ b/drivers/net/wireless/ath/ath9k/htc_hst.c @@ -30,6 +30,7 @@ static int htc_issue_send(struct htc_target *target, struct sk_buff* skb, hdr->endpoint_id = epid; hdr->flags = flags; hdr->payload_len = cpu_to_be16(len); + memset(hdr->control, 0, sizeof(hdr->control)); status = target->hif->send(target->hif_dev, endpoint->ul_pipeid, skb); @@ -272,6 +273,10 @@ int htc_connect_service(struct htc_target *target, conn_msg->dl_pipeid = endpoint->dl_pipeid; conn_msg->ul_pipeid = endpoint->ul_pipeid; + /* To prevent infoleak */ + conn_msg->svc_meta_len = 0; + conn_msg->pad = 0; + ret = htc_issue_send(target, skb, skb->len, 0, ENDPOINT0); if (ret) goto err; diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index af367696fd92ff97d5748f710a666c5576d49139..ac354dfc5055960b5e8bdf511237731c0b9bace1 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -839,7 +839,7 @@ static bool ath9k_txq_list_has_key(struct list_head *txq_list, u32 keyix) continue; txinfo = IEEE80211_SKB_CB(bf->bf_mpdu); - fi = (struct ath_frame_info *)&txinfo->rate_driver_data[0]; + fi = (struct ath_frame_info *)&txinfo->status.status_driver_data[0]; if (fi->keyix == keyix) return true; } diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index 5691bd6eb82c2fc5d251f37855cb1ea0b694ba2e..6555abf02f18bd9c06443e2b1e1791a1d8cdaac8 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c @@ -141,8 +141,8 @@ static struct ath_frame_info *get_frame_info(struct sk_buff *skb) { struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); BUILD_BUG_ON(sizeof(struct ath_frame_info) > - sizeof(tx_info->rate_driver_data)); - return (struct ath_frame_info *) &tx_info->rate_driver_data[0]; + sizeof(tx_info->status.status_driver_data)); + return (struct ath_frame_info *) &tx_info->status.status_driver_data[0]; } static void ath_send_bar(struct ath_atx_tid *tid, u16 seqno) @@ -2501,6 +2501,16 @@ static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf, spin_unlock_irqrestore(&sc->tx.txbuflock, flags); } +static void ath_clear_tx_status(struct ieee80211_tx_info *tx_info) +{ + void *ptr = &tx_info->status; + + memset(ptr + sizeof(tx_info->status.rates), 0, + sizeof(tx_info->status) - + sizeof(tx_info->status.rates) - + sizeof(tx_info->status.status_driver_data)); +} + static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf, struct ath_tx_status *ts, int nframes, int nbad, int txok) @@ -2512,6 +2522,8 @@ static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf, struct ath_hw *ah = sc->sc_ah; u8 i, tx_rateindex; + ath_clear_tx_status(tx_info); + if (txok) tx_info->status.ack_signal = ts->ts_rssi; @@ -2526,6 +2538,13 @@ static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf, tx_info->status.ampdu_len = nframes; tx_info->status.ampdu_ack_len = nframes - nbad; + tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1; + + for (i = tx_rateindex + 1; i < hw->max_rates; i++) { + tx_info->status.rates[i].count = 0; + tx_info->status.rates[i].idx = -1; + } + if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 && (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) == 0) { /* @@ -2547,16 +2566,6 @@ static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf, tx_info->status.rates[tx_rateindex].count = hw->max_rate_tries; } - - for (i = tx_rateindex + 1; i < hw->max_rates; i++) { - tx_info->status.rates[i].count = 0; - tx_info->status.rates[i].idx = -1; - } - - tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1; - - /* we report airtime in ath_tx_count_airtime(), don't report twice */ - tx_info->status.tx_time = 0; } static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq) diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c index dbef9d8fc893b7350bccfd7b5b34fbdbf99ac9f5..b903b856bcf7b5547ed0d2771bcd3aefc3aa9a04 100644 --- a/drivers/net/wireless/ath/carl9170/main.c +++ b/drivers/net/wireless/ath/carl9170/main.c @@ -1916,7 +1916,7 @@ static int carl9170_parse_eeprom(struct ar9170 *ar) WARN_ON(!(tx_streams >= 1 && tx_streams <= IEEE80211_HT_MCS_TX_MAX_STREAMS)); - tx_params = (tx_streams - 1) << + tx_params |= (tx_streams - 1) << IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT; carl9170_band_2GHz.ht_cap.mcs.tx_params |= tx_params; diff --git a/drivers/net/wireless/ath/regd.c b/drivers/net/wireless/ath/regd.c index bee9110b91f38620ef4b8f8c0e43f1c87698f808..20f4f8ea9f894c3ba1f21fce5f6d7505c1b44a06 100644 --- a/drivers/net/wireless/ath/regd.c +++ b/drivers/net/wireless/ath/regd.c @@ -666,14 +666,14 @@ ath_regd_init_wiphy(struct ath_regulatory *reg, /* * Some users have reported their EEPROM programmed with - * 0x8000 or 0x0 set, this is not a supported regulatory - * domain but since we have more than one user with it we - * need a solution for them. We default to 0x64, which is - * the default Atheros world regulatory domain. + * 0x8000 set, this is not a supported regulatory domain + * but since we have more than one user with it we need + * a solution for them. We default to 0x64, which is the + * default Atheros world regulatory domain. */ static void ath_regd_sanitize(struct ath_regulatory *reg) { - if (reg->current_rd != COUNTRY_ERD_FLAG && reg->current_rd != 0) + if (reg->current_rd != COUNTRY_ERD_FLAG) return; printk(KERN_DEBUG "ath: EEPROM regdomain sanitized\n"); reg->current_rd = 0x64; diff --git a/drivers/net/wireless/ath/wcn36xx/dxe.c b/drivers/net/wireless/ath/wcn36xx/dxe.c index cf4eb0fb2815182b42a3ded5f269c72776597aa8..6c62ffc799a2b5e07d5632461888cf2887664177 100644 --- a/drivers/net/wireless/ath/wcn36xx/dxe.c +++ b/drivers/net/wireless/ath/wcn36xx/dxe.c @@ -272,6 +272,21 @@ static int wcn36xx_dxe_enable_ch_int(struct wcn36xx *wcn, u16 wcn_ch) return 0; } +static void wcn36xx_dxe_disable_ch_int(struct wcn36xx *wcn, u16 wcn_ch) +{ + int reg_data = 0; + + wcn36xx_dxe_read_register(wcn, + WCN36XX_DXE_INT_MASK_REG, + ®_data); + + reg_data &= ~wcn_ch; + + wcn36xx_dxe_write_register(wcn, + WCN36XX_DXE_INT_MASK_REG, + (int)reg_data); +} + static int wcn36xx_dxe_fill_skb(struct device *dev, struct wcn36xx_dxe_ctl *ctl, gfp_t gfp) @@ -869,7 +884,6 @@ int wcn36xx_dxe_init(struct wcn36xx *wcn) WCN36XX_DXE_WQ_TX_L); wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_REG_CH_EN, ®_data); - wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_L); /***************************************/ /* Init descriptors for TX HIGH channel */ @@ -893,9 +907,6 @@ int wcn36xx_dxe_init(struct wcn36xx *wcn) wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_REG_CH_EN, ®_data); - /* Enable channel interrupts */ - wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_H); - /***************************************/ /* Init descriptors for RX LOW channel */ /***************************************/ @@ -905,7 +916,6 @@ int wcn36xx_dxe_init(struct wcn36xx *wcn) goto out_err_rxl_ch; } - /* For RX we need to preallocated buffers */ wcn36xx_dxe_ch_alloc_skb(wcn, &wcn->dxe_rx_l_ch); @@ -928,9 +938,6 @@ int wcn36xx_dxe_init(struct wcn36xx *wcn) WCN36XX_DXE_REG_CTL_RX_L, WCN36XX_DXE_CH_DEFAULT_CTL_RX_L); - /* Enable channel interrupts */ - wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_L); - /***************************************/ /* Init descriptors for RX HIGH channel */ /***************************************/ @@ -962,15 +969,18 @@ int wcn36xx_dxe_init(struct wcn36xx *wcn) WCN36XX_DXE_REG_CTL_RX_H, WCN36XX_DXE_CH_DEFAULT_CTL_RX_H); - /* Enable channel interrupts */ - wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_H); - ret = wcn36xx_dxe_request_irqs(wcn); if (ret < 0) goto out_err_irq; timer_setup(&wcn->tx_ack_timer, wcn36xx_dxe_tx_timer, 0); + /* Enable channel interrupts */ + wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_L); + wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_H); + wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_L); + wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_H); + return 0; out_err_irq: @@ -987,6 +997,14 @@ int wcn36xx_dxe_init(struct wcn36xx *wcn) void wcn36xx_dxe_deinit(struct wcn36xx *wcn) { + int reg_data = 0; + + /* Disable channel interrupts */ + wcn36xx_dxe_disable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_H); + wcn36xx_dxe_disable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_L); + wcn36xx_dxe_disable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_H); + wcn36xx_dxe_disable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_L); + free_irq(wcn->tx_irq, wcn); free_irq(wcn->rx_irq, wcn); del_timer(&wcn->tx_ack_timer); @@ -996,6 +1014,15 @@ void wcn36xx_dxe_deinit(struct wcn36xx *wcn) wcn->tx_ack_skb = NULL; } + /* Put the DXE block into reset before freeing memory */ + reg_data = WCN36XX_DXE_REG_RESET; + wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_REG_CSR_RESET, reg_data); + wcn36xx_dxe_ch_free_skbs(wcn, &wcn->dxe_rx_l_ch); wcn36xx_dxe_ch_free_skbs(wcn, &wcn->dxe_rx_h_ch); + + wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_tx_l_ch); + wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_tx_h_ch); + wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_rx_l_ch); + wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_rx_h_ch); } diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c index 629ddfd74da1a06fe3761ebcdd1bb68a0af46d0f..37e6e49de3366b8a7176d1aa4fd2be9aabc4c549 100644 --- a/drivers/net/wireless/ath/wcn36xx/main.c +++ b/drivers/net/wireless/ath/wcn36xx/main.c @@ -397,6 +397,7 @@ static void wcn36xx_change_opchannel(struct wcn36xx *wcn, int ch) static int wcn36xx_config(struct ieee80211_hw *hw, u32 changed) { struct wcn36xx *wcn = hw->priv; + int ret; wcn36xx_dbg(WCN36XX_DBG_MAC, "mac config changed 0x%08x\n", changed); @@ -412,17 +413,31 @@ static int wcn36xx_config(struct ieee80211_hw *hw, u32 changed) * want to receive/transmit regular data packets, then * simply stop the scan session and exit PS mode. */ - wcn36xx_smd_finish_scan(wcn, HAL_SYS_MODE_SCAN, - wcn->sw_scan_vif); - wcn->sw_scan_channel = 0; + if (wcn->sw_scan_channel) + wcn36xx_smd_end_scan(wcn, wcn->sw_scan_channel); + if (wcn->sw_scan_init) { + wcn36xx_smd_finish_scan(wcn, HAL_SYS_MODE_SCAN, + wcn->sw_scan_vif); + } } else if (wcn->sw_scan) { /* A scan is ongoing, do not change the operating * channel, but start a scan session on the channel. */ - wcn36xx_smd_init_scan(wcn, HAL_SYS_MODE_SCAN, - wcn->sw_scan_vif); + if (wcn->sw_scan_channel) + wcn36xx_smd_end_scan(wcn, wcn->sw_scan_channel); + if (!wcn->sw_scan_init) { + /* This can fail if we are unable to notify the + * operating channel. + */ + ret = wcn36xx_smd_init_scan(wcn, + HAL_SYS_MODE_SCAN, + wcn->sw_scan_vif); + if (ret) { + mutex_unlock(&wcn->conf_mutex); + return -EIO; + } + } wcn36xx_smd_start_scan(wcn, ch); - wcn->sw_scan_channel = ch; } else { wcn36xx_change_opchannel(wcn, ch); } @@ -709,7 +724,12 @@ static void wcn36xx_sw_scan_complete(struct ieee80211_hw *hw, struct wcn36xx *wcn = hw->priv; /* ensure that any scan session is finished */ - wcn36xx_smd_finish_scan(wcn, HAL_SYS_MODE_SCAN, wcn->sw_scan_vif); + if (wcn->sw_scan_channel) + wcn36xx_smd_end_scan(wcn, wcn->sw_scan_channel); + if (wcn->sw_scan_init) { + wcn36xx_smd_finish_scan(wcn, HAL_SYS_MODE_SCAN, + wcn->sw_scan_vif); + } wcn->sw_scan = false; wcn->sw_scan_opchannel = 0; } @@ -1342,6 +1362,9 @@ static int wcn36xx_platform_get_resources(struct wcn36xx *wcn, if (iris_node) { if (of_device_is_compatible(iris_node, "qcom,wcn3620")) wcn->rf_id = RF_IRIS_WCN3620; + if (of_device_is_compatible(iris_node, "qcom,wcn3660") || + of_device_is_compatible(iris_node, "qcom,wcn3660b")) + wcn->rf_id = RF_IRIS_WCN3660; if (of_device_is_compatible(iris_node, "qcom,wcn3680")) wcn->rf_id = RF_IRIS_WCN3680; of_node_put(iris_node); diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c index 3793907ace92e0e1e73cc45184cfd6dfe9b11bc3..7f00cb6f5e16b3dbb0aa281587a1d6a8756c053b 100644 --- a/drivers/net/wireless/ath/wcn36xx/smd.c +++ b/drivers/net/wireless/ath/wcn36xx/smd.c @@ -730,6 +730,7 @@ int wcn36xx_smd_init_scan(struct wcn36xx *wcn, enum wcn36xx_hal_sys_mode mode, wcn36xx_err("hal_init_scan response failed err=%d\n", ret); goto out; } + wcn->sw_scan_init = true; out: mutex_unlock(&wcn->hal_mutex); return ret; @@ -760,6 +761,7 @@ int wcn36xx_smd_start_scan(struct wcn36xx *wcn, u8 scan_channel) wcn36xx_err("hal_start_scan response failed err=%d\n", ret); goto out; } + wcn->sw_scan_channel = scan_channel; out: mutex_unlock(&wcn->hal_mutex); return ret; @@ -790,6 +792,7 @@ int wcn36xx_smd_end_scan(struct wcn36xx *wcn, u8 scan_channel) wcn36xx_err("hal_end_scan response failed err=%d\n", ret); goto out; } + wcn->sw_scan_channel = 0; out: mutex_unlock(&wcn->hal_mutex); return ret; @@ -831,6 +834,7 @@ int wcn36xx_smd_finish_scan(struct wcn36xx *wcn, wcn36xx_err("hal_finish_scan response failed err=%d\n", ret); goto out; } + wcn->sw_scan_init = false; out: mutex_unlock(&wcn->hal_mutex); return ret; @@ -2603,7 +2607,7 @@ static int wcn36xx_smd_missed_beacon_ind(struct wcn36xx *wcn, wcn36xx_dbg(WCN36XX_DBG_HAL, "beacon missed bss_index %d\n", tmp->bss_index); vif = wcn36xx_priv_to_vif(tmp); - ieee80211_connection_loss(vif); + ieee80211_beacon_loss(vif); } return 0; } @@ -2618,7 +2622,7 @@ static int wcn36xx_smd_missed_beacon_ind(struct wcn36xx *wcn, wcn36xx_dbg(WCN36XX_DBG_HAL, "beacon missed bss_index %d\n", rsp->bss_index); vif = wcn36xx_priv_to_vif(tmp); - ieee80211_connection_loss(vif); + ieee80211_beacon_loss(vif); return 0; } } diff --git a/drivers/net/wireless/ath/wcn36xx/txrx.c b/drivers/net/wireless/ath/wcn36xx/txrx.c index bbd7194c82e27ef6982cbb7d548ab55a2edce1f1..f33e7228a1010f4e81b3211f7b33fd319960718f 100644 --- a/drivers/net/wireless/ath/wcn36xx/txrx.c +++ b/drivers/net/wireless/ath/wcn36xx/txrx.c @@ -237,7 +237,6 @@ int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb) const struct wcn36xx_rate *rate; struct ieee80211_hdr *hdr; struct wcn36xx_rx_bd *bd; - struct ieee80211_supported_band *sband; u16 fc, sn; /* @@ -259,8 +258,6 @@ int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb) fc = __le16_to_cpu(hdr->frame_control); sn = IEEE80211_SEQ_TO_SN(__le16_to_cpu(hdr->seq_ctrl)); - status.freq = WCN36XX_CENTER_FREQ(wcn); - status.band = WCN36XX_BAND(wcn); status.mactime = 10; status.signal = -get_rssi0(bd); status.antenna = 1; @@ -272,18 +269,36 @@ int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb) wcn36xx_dbg(WCN36XX_DBG_RX, "status.flags=%x\n", status.flag); + if (bd->scan_learn) { + /* If packet originate from hardware scanning, extract the + * band/channel from bd descriptor. + */ + u8 hwch = (bd->reserved0 << 4) + bd->rx_ch; + + if (bd->rf_band != 1 && hwch <= sizeof(ab_rx_ch_map) && hwch >= 1) { + status.band = NL80211_BAND_5GHZ; + status.freq = ieee80211_channel_to_frequency(ab_rx_ch_map[hwch - 1], + status.band); + } else { + status.band = NL80211_BAND_2GHZ; + status.freq = ieee80211_channel_to_frequency(hwch, status.band); + } + } else { + status.band = WCN36XX_BAND(wcn); + status.freq = WCN36XX_CENTER_FREQ(wcn); + } + if (bd->rate_id < ARRAY_SIZE(wcn36xx_rate_table)) { rate = &wcn36xx_rate_table[bd->rate_id]; status.encoding = rate->encoding; status.enc_flags = rate->encoding_flags; status.bw = rate->bw; status.rate_idx = rate->mcs_or_legacy_index; - sband = wcn->hw->wiphy->bands[status.band]; status.nss = 1; if (status.band == NL80211_BAND_5GHZ && status.encoding == RX_ENC_LEGACY && - status.rate_idx >= sband->n_bitrates) { + status.rate_idx >= 4) { /* no dsss rates in 5Ghz rates table */ status.rate_idx -= 4; } @@ -298,22 +313,6 @@ int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb) ieee80211_is_probe_resp(hdr->frame_control)) status.boottime_ns = ktime_get_boottime_ns(); - if (bd->scan_learn) { - /* If packet originates from hardware scanning, extract the - * band/channel from bd descriptor. - */ - u8 hwch = (bd->reserved0 << 4) + bd->rx_ch; - - if (bd->rf_band != 1 && hwch <= sizeof(ab_rx_ch_map) && hwch >= 1) { - status.band = NL80211_BAND_5GHZ; - status.freq = ieee80211_channel_to_frequency(ab_rx_ch_map[hwch - 1], - status.band); - } else { - status.band = NL80211_BAND_2GHZ; - status.freq = ieee80211_channel_to_frequency(hwch, status.band); - } - } - memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status)); if (ieee80211_is_beacon(hdr->frame_control)) { diff --git a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h index 9b4dee2fc6483435a3a9ae17661a50d7f2f395d6..82be08265c06ca56efa586c0ce847258a3431084 100644 --- a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h +++ b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h @@ -96,6 +96,7 @@ enum wcn36xx_ampdu_state { #define RF_UNKNOWN 0x0000 #define RF_IRIS_WCN3620 0x3620 +#define RF_IRIS_WCN3660 0x3660 #define RF_IRIS_WCN3680 0x3680 static inline void buff_to_be(u32 *buf, size_t len) @@ -231,6 +232,7 @@ struct wcn36xx { struct cfg80211_scan_request *scan_req; bool sw_scan; u8 sw_scan_opchannel; + bool sw_scan_init; u8 sw_scan_channel; struct ieee80211_vif *sw_scan_vif; struct mutex scan_lock; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c index d821a4758f8cf0d3df5f7092d7cda8478499d7ed..a2b8d9171af2abe41290eea365041be88076c062 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c @@ -207,6 +207,8 @@ static int brcmf_init_nvram_parser(struct nvram_parser *nvp, size = BRCMF_FW_MAX_NVRAM_SIZE; else size = data_len; + /* Add space for properties we may add */ + size += strlen(BRCMF_FW_DEFAULT_BOARDREV) + 1; /* Alloc for extra 0 byte + roundup by 4 + length field */ size += 1 + 3 + sizeof(u32); nvp->nvram = kzalloc(size, GFP_KERNEL); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c index 1f12dfb33938a9f64b9c2bdebbc729549dd39c40..61febc9bfa14ab344329e0255c1480079e2a691b 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include @@ -446,47 +447,6 @@ brcmf_pcie_write_ram32(struct brcmf_pciedev_info *devinfo, u32 mem_offset, } -static void -brcmf_pcie_copy_mem_todev(struct brcmf_pciedev_info *devinfo, u32 mem_offset, - void *srcaddr, u32 len) -{ - void __iomem *address = devinfo->tcm + mem_offset; - __le32 *src32; - __le16 *src16; - u8 *src8; - - if (((ulong)address & 4) || ((ulong)srcaddr & 4) || (len & 4)) { - if (((ulong)address & 2) || ((ulong)srcaddr & 2) || (len & 2)) { - src8 = (u8 *)srcaddr; - while (len) { - iowrite8(*src8, address); - address++; - src8++; - len--; - } - } else { - len = len / 2; - src16 = (__le16 *)srcaddr; - while (len) { - iowrite16(le16_to_cpu(*src16), address); - address += 2; - src16++; - len--; - } - } - } else { - len = len / 4; - src32 = (__le32 *)srcaddr; - while (len) { - iowrite32(le32_to_cpu(*src32), address); - address += 4; - src32++; - len--; - } - } -} - - static void brcmf_pcie_copy_dev_tomem(struct brcmf_pciedev_info *devinfo, u32 mem_offset, void *dstaddr, u32 len) @@ -1346,6 +1306,18 @@ static void brcmf_pcie_down(struct device *dev) { } +static int brcmf_pcie_preinit(struct device *dev) +{ + struct brcmf_bus *bus_if = dev_get_drvdata(dev); + struct brcmf_pciedev *buspub = bus_if->bus_priv.pcie; + + brcmf_dbg(PCIE, "Enter\n"); + + brcmf_pcie_intr_enable(buspub->devinfo); + brcmf_pcie_hostready(buspub->devinfo); + + return 0; +} static int brcmf_pcie_tx(struct device *dev, struct sk_buff *skb) { @@ -1454,6 +1426,7 @@ static int brcmf_pcie_reset(struct device *dev) } static const struct brcmf_bus_ops brcmf_pcie_bus_ops = { + .preinit = brcmf_pcie_preinit, .txdata = brcmf_pcie_tx, .stop = brcmf_pcie_down, .txctl = brcmf_pcie_tx_ctlpkt, @@ -1561,8 +1534,8 @@ static int brcmf_pcie_download_fw_nvram(struct brcmf_pciedev_info *devinfo, return err; brcmf_dbg(PCIE, "Download FW %s\n", devinfo->fw_name); - brcmf_pcie_copy_mem_todev(devinfo, devinfo->ci->rambase, - (void *)fw->data, fw->size); + memcpy_toio(devinfo->tcm + devinfo->ci->rambase, + (void *)fw->data, fw->size); resetintr = get_unaligned_le32(fw->data); release_firmware(fw); @@ -1576,7 +1549,7 @@ static int brcmf_pcie_download_fw_nvram(struct brcmf_pciedev_info *devinfo, brcmf_dbg(PCIE, "Download NVRAM %s\n", devinfo->nvram_name); address = devinfo->ci->rambase + devinfo->ci->ramsize - nvram_len; - brcmf_pcie_copy_mem_todev(devinfo, address, nvram, nvram_len); + memcpy_toio(devinfo->tcm + address, nvram, nvram_len); brcmf_fw_nvram_free(nvram); } else { brcmf_dbg(PCIE, "No matching NVRAM file found %s\n", @@ -1775,6 +1748,8 @@ static void brcmf_pcie_setup(struct device *dev, int ret, ret = brcmf_chip_get_raminfo(devinfo->ci); if (ret) { brcmf_err(bus, "Failed to get RAM info\n"); + release_firmware(fw); + brcmf_fw_nvram_free(nvram); goto fail; } @@ -1824,9 +1799,6 @@ static void brcmf_pcie_setup(struct device *dev, int ret, init_waitqueue_head(&devinfo->mbdata_resp_wait); - brcmf_pcie_intr_enable(devinfo); - brcmf_pcie_hostready(devinfo); - ret = brcmf_attach(&devinfo->pdev->dev); if (ret) goto fail; diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c index 423d3c396b2d3f1047e4247b94ae7963ac730f22..1e21cdbb7313b788a758ba1144c546f4895f6ae4 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c @@ -304,7 +304,7 @@ static int iwlagn_mac_start(struct ieee80211_hw *hw) priv->is_open = 1; IWL_DEBUG_MAC80211(priv, "leave\n"); - return 0; + return ret; } static void iwlagn_mac_stop(struct ieee80211_hw *hw) diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c index be214f39f52bec3207f3d9dacbab714341c6e2fa..ab84ac3f8f03f382a3fa60edf344e4a6d1a60891 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c @@ -185,6 +185,9 @@ static void iwl_dealloc_ucode(struct iwl_drv *drv) for (i = 0; i < IWL_UCODE_TYPE_MAX; i++) iwl_free_fw_img(drv, drv->fw.img + i); + + /* clear the data for the aborted load case */ + memset(&drv->fw, 0, sizeof(drv->fw)); } static int iwl_alloc_fw_desc(struct iwl_drv *drv, struct fw_desc *desc, @@ -1365,6 +1368,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) int i; bool load_module = false; bool usniffer_images = false; + bool failure = true; fw->ucode_capa.max_probe_length = IWL_DEFAULT_MAX_PROBE_LENGTH; fw->ucode_capa.standard_phy_calibration_size = @@ -1625,15 +1629,9 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) * else from proceeding if the module fails to load * or hangs loading. */ - if (load_module) { + if (load_module) request_module("%s", op->name); -#ifdef CONFIG_IWLWIFI_OPMODE_MODULAR - if (err) - IWL_ERR(drv, - "failed to load module %s (error %d), is dynamic loading enabled?\n", - op->name, err); -#endif - } + failure = false; goto free; try_again: @@ -1648,7 +1646,12 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) out_unbind: complete(&drv->request_firmware_complete); device_release_driver(drv->trans->dev); + /* drv has just been freed by the release */ + failure = false; free: + if (failure) + iwl_dealloc_ucode(drv); + if (pieces) { for (i = 0; i < ARRAY_SIZE(pieces->img); i++) kfree(pieces->img[i].sec); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c index cbde21e772b17a328e3b170d20364862cbf8ceb4..b862cfbcd6e79ccf1c0e0e1992521c836135b950 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c @@ -587,8 +587,7 @@ static struct ieee80211_sband_iftype_data iwl_he_capa[] = { .has_he = true, .he_cap_elem = { .mac_cap_info[0] = - IEEE80211_HE_MAC_CAP0_HTC_HE | - IEEE80211_HE_MAC_CAP0_TWT_REQ, + IEEE80211_HE_MAC_CAP0_HTC_HE, .mac_cap_info[1] = IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US | IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c index a0ce761d0c59b423ec150878615aa79a81eadfa4..b1335fe3b01a29ced68eba4c6f9ca4522ce8c180 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c @@ -967,7 +967,7 @@ static void iwl_mvm_ftm_rtt_smoothing(struct iwl_mvm *mvm, overshoot = IWL_MVM_FTM_INITIATOR_SMOOTH_OVERSHOOT; alpha = IWL_MVM_FTM_INITIATOR_SMOOTH_ALPHA; - rtt_avg = (alpha * rtt + (100 - alpha) * resp->rtt_avg) / 100; + rtt_avg = div_s64(alpha * rtt + (100 - alpha) * resp->rtt_avg, 100); IWL_DEBUG_INFO(mvm, "%pM: prev rtt_avg=%lld, new rtt_avg=%lld, rtt=%lld\n", diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index 6348dfa61724a6110923b9d06b0a65a36754b32c..54b28f0932e25709d32f649412ee12405ef5b0a6 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -1495,8 +1495,10 @@ int iwl_mvm_up(struct iwl_mvm *mvm) while (!sband && i < NUM_NL80211_BANDS) sband = mvm->hw->wiphy->bands[i++]; - if (WARN_ON_ONCE(!sband)) + if (WARN_ON_ONCE(!sband)) { + ret = -ENODEV; goto error; + } chan = &sband->channels[0]; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 81cc85a97eb205d88cc0d1ec4a9b2f39ce72ea99..d2c6fdb7027320ac7bbeff2a638ad62d735d47f3 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -350,7 +350,6 @@ static const u8 he_if_types_ext_capa_sta[] = { [0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING, [2] = WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT, [7] = WLAN_EXT_CAPA8_OPMODE_NOTIF, - [9] = WLAN_EXT_CAPA10_TWT_REQUESTER_SUPPORT, }; static const struct wiphy_iftype_ext_capab he_iftypes_ext_capa[] = { @@ -1739,6 +1738,7 @@ static void iwl_mvm_recalc_multicast(struct iwl_mvm *mvm) struct iwl_mvm_mc_iter_data iter_data = { .mvm = mvm, }; + int ret; lockdep_assert_held(&mvm->mutex); @@ -1748,6 +1748,22 @@ static void iwl_mvm_recalc_multicast(struct iwl_mvm *mvm) ieee80211_iterate_active_interfaces_atomic( mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_mc_iface_iterator, &iter_data); + + /* + * Send a (synchronous) ech command so that we wait for the + * multiple asynchronous MCAST_FILTER_CMD commands sent by + * the interface iterator. Otherwise, we might get here over + * and over again (by userspace just sending a lot of these) + * and the CPU can send them faster than the firmware can + * process them. + * Note that the CPU is still faster - but with this we'll + * actually send fewer commands overall because the CPU will + * not schedule the work in mac80211 as frequently if it's + * still running when rescheduled (possibly multiple times). + */ + ret = iwl_mvm_send_cmd_pdu(mvm, ECHO_CMD, 0, 0, NULL); + if (ret) + IWL_ERR(mvm, "Failed to synchronize multicast groups update\n"); } static u64 iwl_mvm_prepare_multicast(struct ieee80211_hw *hw, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c index 838734fec50234ed253e752e86b9b39ea82c6804..86b3fb321dfdd82b0871bfb7920eac724c78e09e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c @@ -177,12 +177,39 @@ static int iwl_mvm_create_skb(struct iwl_mvm *mvm, struct sk_buff *skb, struct iwl_rx_mpdu_desc *desc = (void *)pkt->data; unsigned int headlen, fraglen, pad_len = 0; unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control); + u8 mic_crc_len = u8_get_bits(desc->mac_flags1, + IWL_RX_MPDU_MFLG1_MIC_CRC_LEN_MASK) << 1; if (desc->mac_flags2 & IWL_RX_MPDU_MFLG2_PAD) { len -= 2; pad_len = 2; } + /* + * For non monitor interface strip the bytes the RADA might not have + * removed. As monitor interface cannot exist with other interfaces + * this removal is safe. + */ + if (mic_crc_len && !ieee80211_hw_check(mvm->hw, RX_INCLUDES_FCS)) { + u32 pkt_flags = le32_to_cpu(pkt->len_n_flags); + + /* + * If RADA was not enabled then decryption was not performed so + * the MIC cannot be removed. + */ + if (!(pkt_flags & FH_RSCSR_RADA_EN)) { + if (WARN_ON(crypt_len > mic_crc_len)) + return -EINVAL; + + mic_crc_len -= crypt_len; + } + + if (WARN_ON(mic_crc_len > len)) + return -EINVAL; + + len -= mic_crc_len; + } + /* If frame is small enough to fit in skb->head, pull it completely. * If not, only pull ieee80211_hdr (including crypto if present, and * an additional 8 bytes for SNAP/ethertype, see below) so that diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c index a5d90e028833c7e420a2c85e2ffddce7980b903a..17b9925266947416fc3d27e33cc235fa0316b371 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c @@ -1706,7 +1706,10 @@ static u8 iwl_mvm_scan_umac_chan_flags_v2(struct iwl_mvm *mvm, IWL_SCAN_CHANNEL_FLAG_CACHE_ADD; /* set fragmented ebs for fragmented scan on HB channels */ - if (iwl_mvm_is_scan_fragmented(params->hb_type)) + if ((!iwl_mvm_is_cdb_supported(mvm) && + iwl_mvm_is_scan_fragmented(params->type)) || + (iwl_mvm_is_cdb_supported(mvm) && + iwl_mvm_is_scan_fragmented(params->hb_type))) flags |= IWL_SCAN_CHANNEL_FLAG_EBS_FRAG; return flags; @@ -2157,7 +2160,7 @@ static int iwl_mvm_check_running_scans(struct iwl_mvm *mvm, int type) return -EIO; } -#define SCAN_TIMEOUT 20000 +#define SCAN_TIMEOUT 30000 void iwl_mvm_scan_timeout_wk(struct work_struct *work) { diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c index 394598b14a17335ca0cae9c1057edb8cdde73cc0..3f081cdea09caf0012fbecdd460e3be5f8121f38 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c @@ -98,14 +98,13 @@ void iwl_mvm_roc_done_wk(struct work_struct *wk) struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, roc_done_wk); /* - * Clear the ROC_RUNNING /ROC_AUX_RUNNING status bit. + * Clear the ROC_RUNNING status bit. * This will cause the TX path to drop offchannel transmissions. * That would also be done by mac80211, but it is racy, in particular * in the case that the time event actually completed in the firmware * (which is handled in iwl_mvm_te_handle_notif). */ clear_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status); - clear_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status); synchronize_net(); @@ -131,9 +130,19 @@ void iwl_mvm_roc_done_wk(struct work_struct *wk) mvmvif = iwl_mvm_vif_from_mac80211(mvm->p2p_device_vif); iwl_mvm_flush_sta(mvm, &mvmvif->bcast_sta, true); } - } else { + } + + /* + * Clear the ROC_AUX_RUNNING status bit. + * This will cause the TX path to drop offchannel transmissions. + * That would also be done by mac80211, but it is racy, in particular + * in the case that the time event actually completed in the firmware + * (which is handled in iwl_mvm_te_handle_notif). + */ + if (test_and_clear_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status)) { /* do the same in case of hot spot 2.0 */ iwl_mvm_flush_sta(mvm, &mvm->aux_sta, true); + /* In newer version of this command an aux station is added only * in cases of dedicated tx queue and need to be removed in end * of use */ @@ -1157,15 +1166,10 @@ void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm, cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)), .action = cpu_to_le32(FW_CTXT_ACTION_ADD), + .conf_id = cpu_to_le32(SESSION_PROTECT_CONF_ASSOC), .duration_tu = cpu_to_le32(MSEC_TO_TU(duration)), }; - /* The time_event_data.id field is reused to save session - * protection's configuration. - */ - mvmvif->time_event_data.id = SESSION_PROTECT_CONF_ASSOC; - cmd.conf_id = cpu_to_le32(mvmvif->time_event_data.id); - lockdep_assert_held(&mvm->mutex); spin_lock_bh(&mvm->time_event_lock); @@ -1179,6 +1183,11 @@ void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm, } iwl_mvm_te_clear_data(mvm, te_data); + /* + * The time_event_data.id field is reused to save session + * protection's configuration. + */ + te_data->id = le32_to_cpu(cmd.conf_id); te_data->duration = le32_to_cpu(cmd.duration_tu); spin_unlock_bh(&mvm->time_event_lock); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c index 2c13fa8f282009a74ee5ec9d502fc8d4f5ab5396..6aedf5762571df54ab615a66ce944e1efc580aca 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c @@ -2260,7 +2260,12 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id) } } - if (inta_hw & MSIX_HW_INT_CAUSES_REG_WAKEUP) { + /* + * In some rare cases when the HW is in a bad state, we may + * get this interrupt too early, when prph_info is still NULL. + * So make sure that it's not NULL to prevent crashing. + */ + if (inta_hw & MSIX_HW_INT_CAUSES_REG_WAKEUP && trans_pcie->prph_info) { u32 sleep_notif = le32_to_cpu(trans_pcie->prph_info->sleep_notif); if (sleep_notif == IWL_D3_SLEEP_STATUS_SUSPEND || diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c index b031e9304983c40ed32abf549576923e99150304..b2991582189c20f1b1c5c60e072d3561d40ef2ae 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c @@ -320,8 +320,7 @@ int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans, /* This may fail if AMT took ownership of the device */ if (iwl_pcie_prepare_card_hw(trans)) { IWL_WARN(trans, "Exit HW not ready\n"); - ret = -EIO; - goto out; + return -EIO; } iwl_enable_rfkill_int(trans); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index 082768ec8aa808b46c41e5df5a8bdd8c6f77cd00..daec61a60fec5f39f5fc4d375568058f7e315593 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -1313,8 +1313,7 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans, /* This may fail if AMT took ownership of the device */ if (iwl_pcie_prepare_card_hw(trans)) { IWL_WARN(trans, "Exit HW not ready\n"); - ret = -EIO; - goto out; + return -EIO; } iwl_enable_rfkill_int(trans); diff --git a/drivers/net/wireless/intel/iwlwifi/queue/tx.c b/drivers/net/wireless/intel/iwlwifi/queue/tx.c index 9181221a2434d89f318d58b87d9c4dc011dca146..0136df00ff6a65454e0b4aecb9f7575c569780a4 100644 --- a/drivers/net/wireless/intel/iwlwifi/queue/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/queue/tx.c @@ -1148,6 +1148,7 @@ int iwl_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num, return 0; err_free_tfds: dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->dma_addr); + txq->tfds = NULL; error: if (txq->entries && cmd_queue) for (i = 0; i < slots_num; i++) diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index b793d61d15d272664b3e3aa83a2e44b398005223..cc550ba0c9dfefe2513cf2361fc2e97c64c60090 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -2264,6 +2264,15 @@ static void hw_scan_work(struct work_struct *work) if (req->ie_len) skb_put_data(probe, req->ie, req->ie_len); + if (!ieee80211_tx_prepare_skb(hwsim->hw, + hwsim->hw_scan_vif, + probe, + hwsim->tmp_chan->band, + NULL)) { + kfree_skb(probe); + continue; + } + local_bh_disable(); mac80211_hwsim_tx_frame(hwsim->hw, probe, hwsim->tmp_chan); @@ -3567,6 +3576,10 @@ static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2, } txi->flags |= IEEE80211_TX_STAT_ACK; } + + if (hwsim_flags & HWSIM_TX_CTL_NO_ACK) + txi->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED; + ieee80211_tx_status_irqsafe(data2->hw, skb); return 0; out: diff --git a/drivers/net/wireless/marvell/mwifiex/sta_event.c b/drivers/net/wireless/marvell/mwifiex/sta_event.c index bc79ca4cb803cc987cc0b9d33316950cd7f07520..753458628f86a18707b0b20acd9923bacd8706a2 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_event.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_event.c @@ -364,10 +364,12 @@ static void mwifiex_process_uap_tx_pause(struct mwifiex_private *priv, sta_ptr = mwifiex_get_sta_entry(priv, tp->peermac); if (sta_ptr && sta_ptr->tx_pause != tp->tx_pause) { sta_ptr->tx_pause = tp->tx_pause; + spin_unlock_bh(&priv->sta_list_spinlock); mwifiex_update_ralist_tx_pause(priv, tp->peermac, tp->tx_pause); + } else { + spin_unlock_bh(&priv->sta_list_spinlock); } - spin_unlock_bh(&priv->sta_list_spinlock); } } @@ -399,11 +401,13 @@ static void mwifiex_process_sta_tx_pause(struct mwifiex_private *priv, sta_ptr = mwifiex_get_sta_entry(priv, tp->peermac); if (sta_ptr && sta_ptr->tx_pause != tp->tx_pause) { sta_ptr->tx_pause = tp->tx_pause; + spin_unlock_bh(&priv->sta_list_spinlock); mwifiex_update_ralist_tx_pause(priv, tp->peermac, tp->tx_pause); + } else { + spin_unlock_bh(&priv->sta_list_spinlock); } - spin_unlock_bh(&priv->sta_list_spinlock); } } } diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c index 0fdfead45c77c775ff16427c815e78d03976b93d..f01b455783b2340d97b314a453c48cfa0d101faa 100644 --- a/drivers/net/wireless/mediatek/mt76/dma.c +++ b/drivers/net/wireless/mediatek/mt76/dma.c @@ -455,6 +455,7 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q) qbuf.addr = addr + offset; qbuf.len = len - offset; + qbuf.skip_unmap = false; mt76_dma_add_buf(dev, q, &qbuf, 1, 0, buf, NULL); frames++; } diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/main.c b/drivers/net/wireless/mediatek/mt76/mt7603/main.c index c9226dceb510c19eede46b476edd16d8ab39d7b0..bdff89cc3105e6b51bbb27a04e5061b0062ac214 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/main.c @@ -618,6 +618,9 @@ mt7603_sta_rate_tbl_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta_rates *sta_rates = rcu_dereference(sta->rates); int i; + if (!sta_rates) + return; + spin_lock_bh(&dev->mt76.lock); for (i = 0; i < ARRAY_SIZE(msta->rates); i++) { msta->rates[i].idx = sta_rates->rate[i].idx; diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c index 424be103093c6a61473c8971fa676e5f99c733cc..1465a92ea3fc9008bf67efff11a016455f5f7fe4 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c @@ -1626,7 +1626,7 @@ mt7615_mac_adjust_sensitivity(struct mt7615_phy *phy, struct mt7615_dev *dev = phy->dev; int false_cca = ofdm ? phy->false_cca_ofdm : phy->false_cca_cck; bool ext_phy = phy != &dev->phy; - u16 def_th = ofdm ? -98 : -110; + s16 def_th = ofdm ? -98 : -110; bool update = false; s8 *sensitivity; int signal; diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c index 88cdc2badeae7129d00bf301c7e1ab04abb620d6..defa207f53d6f5dbed3ce42c1e9305941d5182b4 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c @@ -673,6 +673,9 @@ static void mt7615_sta_rate_tbl_update(struct ieee80211_hw *hw, struct ieee80211_sta_rates *sta_rates = rcu_dereference(sta->rates); int i; + if (!sta_rates) + return; + spin_lock_bh(&dev->mt76.lock); for (i = 0; i < ARRAY_SIZE(msta->rates); i++) { msta->rates[i].idx = sta_rates->rate[i].idx; diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c index 9a7f317a098fc805ba661803d8eb456f051b807c..41054ee43dbfad4aa007164ac694bad6178218be 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c @@ -1259,8 +1259,11 @@ mt7915_mcu_wtbl_generic_tlv(struct sk_buff *skb, struct ieee80211_vif *vif, generic = (struct wtbl_generic *)tlv; if (sta) { + if (vif->type == NL80211_IFTYPE_STATION) + generic->partial_aid = cpu_to_le16(vif->bss_conf.aid); + else + generic->partial_aid = cpu_to_le16(sta->aid); memcpy(generic->peer_addr, sta->addr, ETH_ALEN); - generic->partial_aid = cpu_to_le16(sta->aid); generic->muar_idx = mvif->omac_idx; generic->qos = sta->wme; } else { @@ -1314,12 +1317,15 @@ mt7915_mcu_sta_basic_tlv(struct sk_buff *skb, struct ieee80211_vif *vif, case NL80211_IFTYPE_MESH_POINT: case NL80211_IFTYPE_AP: basic->conn_type = cpu_to_le32(CONNECTION_INFRA_STA); + basic->aid = cpu_to_le16(sta->aid); break; case NL80211_IFTYPE_STATION: basic->conn_type = cpu_to_le32(CONNECTION_INFRA_AP); + basic->aid = cpu_to_le16(vif->bss_conf.aid); break; case NL80211_IFTYPE_ADHOC: basic->conn_type = cpu_to_le32(CONNECTION_IBSS_ADHOC); + basic->aid = cpu_to_le16(sta->aid); break; default: WARN_ON(1); @@ -1327,7 +1333,6 @@ mt7915_mcu_sta_basic_tlv(struct sk_buff *skb, struct ieee80211_vif *vif, } memcpy(basic->peer_addr, sta->addr, ETH_ALEN); - basic->aid = cpu_to_le16(sta->aid); basic->qos = sta->wme; } diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c index bf3fbd14eda3ccd8926f475f7b6f913ae59a124d..091eea0d958d1be13b632837f077fe75f7699fce 100644 --- a/drivers/net/wireless/ray_cs.c +++ b/drivers/net/wireless/ray_cs.c @@ -382,6 +382,8 @@ static int ray_config(struct pcmcia_device *link) goto failed; local->sram = ioremap(link->resource[2]->start, resource_size(link->resource[2])); + if (!local->sram) + goto failed; /*** Set up 16k window for shared memory (receive buffer) ***************/ link->resource[3]->flags |= @@ -396,6 +398,8 @@ static int ray_config(struct pcmcia_device *link) goto failed; local->rmem = ioremap(link->resource[3]->start, resource_size(link->resource[3])); + if (!local->rmem) + goto failed; /*** Set up window for attribute memory ***********************************/ link->resource[4]->flags |= @@ -410,6 +414,8 @@ static int ray_config(struct pcmcia_device *link) goto failed; local->amem = ioremap(link->resource[4]->start, resource_size(link->resource[4])); + if (!local->amem) + goto failed; dev_dbg(&link->dev, "ray_config sram=%p\n", local->sram); dev_dbg(&link->dev, "ray_config rmem=%p\n", local->rmem); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c index 6312fddd9c00a2b37dee0951810098472e72c053..eaba661133280272cb370fb647b04fefcc7d1c96 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c @@ -1000,6 +1000,7 @@ int rtl92cu_hw_init(struct ieee80211_hw *hw) _initpabias(hw); rtl92c_dm_init(hw); exit: + local_irq_disable(); local_irq_restore(flags); return err; } diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c index 565efd8806247edbdf94b1fed4f134afe6cd290a..2ef1416899f03858b38ea838052c999cb65352d2 100644 --- a/drivers/net/wireless/realtek/rtw88/main.c +++ b/drivers/net/wireless/realtek/rtw88/main.c @@ -1652,7 +1652,7 @@ int rtw_core_init(struct rtw_dev *rtwdev) /* default rx filter setting */ rtwdev->hal.rcr = BIT_APP_FCS | BIT_APP_MIC | BIT_APP_ICV | - BIT_HTC_LOC_CTRL | BIT_APP_PHYSTS | + BIT_PKTCTL_DLEN | BIT_HTC_LOC_CTRL | BIT_APP_PHYSTS | BIT_AB | BIT_AM | BIT_APM; ret = rtw_load_firmware(rtwdev, RTW_NORMAL_FW); diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821c.h b/drivers/net/wireless/realtek/rtw88/rtw8821c.h index bd01e82b6bcd0bf6c2c3bd9d6c0b38f091dfbd5a..8d1e8ff71d7efc3d0fe13e6fb4346b26565e39fe 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8821c.h +++ b/drivers/net/wireless/realtek/rtw88/rtw8821c.h @@ -131,7 +131,7 @@ _rtw_write32s_mask(struct rtw_dev *rtwdev, u32 addr, u32 mask, u32 data) #define WLAN_TX_FUNC_CFG2 0x30 #define WLAN_MAC_OPT_NORM_FUNC1 0x98 #define WLAN_MAC_OPT_LB_FUNC1 0x80 -#define WLAN_MAC_OPT_FUNC2 0x30810041 +#define WLAN_MAC_OPT_FUNC2 0xb0810041 #define WLAN_SIFS_CFG (WLAN_SIFS_CCK_CONT_TX | \ (WLAN_SIFS_OFDM_CONT_TX << BIT_SHIFT_SIFS_OFDM_CTX) | \ diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822b.c b/drivers/net/wireless/realtek/rtw88/rtw8822b.c index 22d0dd640ac94fa8a06848ba23b33056748c5505..dbfd67c3f598cac92bbbcb5a2f49218e14a93621 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8822b.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8822b.c @@ -204,7 +204,7 @@ static void rtw8822b_phy_set_param(struct rtw_dev *rtwdev) #define WLAN_TX_FUNC_CFG2 0x30 #define WLAN_MAC_OPT_NORM_FUNC1 0x98 #define WLAN_MAC_OPT_LB_FUNC1 0x80 -#define WLAN_MAC_OPT_FUNC2 0x30810041 +#define WLAN_MAC_OPT_FUNC2 0xb0810041 #define WLAN_SIFS_CFG (WLAN_SIFS_CCK_CONT_TX | \ (WLAN_SIFS_OFDM_CONT_TX << BIT_SHIFT_SIFS_OFDM_CTX) | \ diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.c b/drivers/net/wireless/realtek/rtw88/rtw8822c.c index 79ad6232dce8303353548ce52c5b4f2dde9018c8..cee586335552d82621c54fcb080cc9a2cc522b92 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8822c.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.c @@ -1248,7 +1248,7 @@ static void rtw8822c_phy_set_param(struct rtw_dev *rtwdev) #define WLAN_TX_FUNC_CFG2 0x30 #define WLAN_MAC_OPT_NORM_FUNC1 0x98 #define WLAN_MAC_OPT_LB_FUNC1 0x80 -#define WLAN_MAC_OPT_FUNC2 0x30810041 +#define WLAN_MAC_OPT_FUNC2 0xb0810041 #define WLAN_MAC_INT_MIG_CFG 0x33330000 #define WLAN_SIFS_CFG (WLAN_SIFS_CCK_CONT_TX | \ diff --git a/drivers/net/wireless/rsi/rsi_91x_main.c b/drivers/net/wireless/rsi/rsi_91x_main.c index 8c638cfeac52f0524b0b31aa8ff792b5e59e7b7a..fe8aed58ac0888d088820da525347dfcd6648194 100644 --- a/drivers/net/wireless/rsi/rsi_91x_main.c +++ b/drivers/net/wireless/rsi/rsi_91x_main.c @@ -23,6 +23,7 @@ #include "rsi_common.h" #include "rsi_coex.h" #include "rsi_hal.h" +#include "rsi_usb.h" u32 rsi_zone_enabled = /* INFO_ZONE | INIT_ZONE | @@ -168,6 +169,9 @@ int rsi_read_pkt(struct rsi_common *common, u8 *rx_pkt, s32 rcv_pkt_len) frame_desc = &rx_pkt[index]; actual_length = *(u16 *)&frame_desc[0]; offset = *(u16 *)&frame_desc[2]; + if (!rcv_pkt_len && offset > + RSI_MAX_RX_USB_PKT_SIZE - FRAME_DESC_SZ) + goto fail; queueno = rsi_get_queueno(frame_desc, offset); length = rsi_get_length(frame_desc, offset); diff --git a/drivers/net/wireless/rsi/rsi_91x_usb.c b/drivers/net/wireless/rsi/rsi_91x_usb.c index d881df9ebd0c315ce662fe665dc3670798741513..11388a14696211b3bcc112eb0af0b7ffa70ac6e7 100644 --- a/drivers/net/wireless/rsi/rsi_91x_usb.c +++ b/drivers/net/wireless/rsi/rsi_91x_usb.c @@ -269,8 +269,12 @@ static void rsi_rx_done_handler(struct urb *urb) struct rsi_91x_usbdev *dev = (struct rsi_91x_usbdev *)rx_cb->data; int status = -EINVAL; + if (!rx_cb->rx_skb) + return; + if (urb->status) { dev_kfree_skb(rx_cb->rx_skb); + rx_cb->rx_skb = NULL; return; } @@ -294,8 +298,10 @@ static void rsi_rx_done_handler(struct urb *urb) if (rsi_rx_urb_submit(dev->priv, rx_cb->ep_num, GFP_ATOMIC)) rsi_dbg(ERR_ZONE, "%s: Failed in urb submission", __func__); - if (status) + if (status) { dev_kfree_skb(rx_cb->rx_skb); + rx_cb->rx_skb = NULL; + } } static void rsi_rx_urb_kill(struct rsi_hw *adapter, u8 ep_num) @@ -322,7 +328,6 @@ static int rsi_rx_urb_submit(struct rsi_hw *adapter, u8 ep_num, gfp_t mem_flags) struct sk_buff *skb; u8 dword_align_bytes = 0; -#define RSI_MAX_RX_USB_PKT_SIZE 3000 skb = dev_alloc_skb(RSI_MAX_RX_USB_PKT_SIZE); if (!skb) return -ENOMEM; diff --git a/drivers/net/wireless/rsi/rsi_usb.h b/drivers/net/wireless/rsi/rsi_usb.h index 8702f434b5699978549144b03203dacf5e016790..ad88f8c70a35174ea969f37464df307e0794508d 100644 --- a/drivers/net/wireless/rsi/rsi_usb.h +++ b/drivers/net/wireless/rsi/rsi_usb.h @@ -44,6 +44,8 @@ #define RSI_USB_BUF_SIZE 4096 #define RSI_USB_CTRL_BUF_SIZE 0x04 +#define RSI_MAX_RX_USB_PKT_SIZE 3000 + struct rx_usb_ctrl_block { u8 *data; struct urb *rx_urb; diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c index 94d19158efc188acc0d963b491343f52319f873b..ca261e0fc9c9b4b1ecd082db124b45a9452d7e73 100644 --- a/drivers/net/xen-netback/xenbus.c +++ b/drivers/net/xen-netback/xenbus.c @@ -256,6 +256,7 @@ static void backend_disconnect(struct backend_info *be) unsigned int queue_index; xen_unregister_watchers(vif); + xenbus_rm(XBT_NIL, be->dev->nodename, "hotplug-status"); #ifdef CONFIG_DEBUG_FS xenvif_debugfs_delif(vif); #endif /* CONFIG_DEBUG_FS */ @@ -675,7 +676,6 @@ static void hotplug_status_changed(struct xenbus_watch *watch, /* Not interested in this watch anymore. */ unregister_hotplug_status_watch(be); - xenbus_rm(XBT_NIL, be->dev->nodename, "hotplug-status"); } kfree(str); } @@ -824,15 +824,11 @@ static void connect(struct backend_info *be) xenvif_carrier_on(be->vif); unregister_hotplug_status_watch(be); - if (xenbus_exists(XBT_NIL, dev->nodename, "hotplug-status")) { - err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch, - NULL, hotplug_status_changed, - "%s/%s", dev->nodename, - "hotplug-status"); - if (err) - goto err; + err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch, NULL, + hotplug_status_changed, + "%s/%s", dev->nodename, "hotplug-status"); + if (!err) be->have_hotplug_status_watch = 1; - } netif_tx_wake_all_queues(be->vif->dev); diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index fce3a90a335cb31593a0a7d11ddf994b5db3afce..569f3c8e7b756f41ad42cae94e305b2b0c8a78c2 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -66,6 +66,10 @@ module_param_named(max_queues, xennet_max_queues, uint, 0644); MODULE_PARM_DESC(max_queues, "Maximum number of queues per virtual interface"); +static bool __read_mostly xennet_trusted = true; +module_param_named(trusted, xennet_trusted, bool, 0644); +MODULE_PARM_DESC(trusted, "Is the backend trusted"); + #define XENNET_TIMEOUT (5 * HZ) static const struct ethtool_ops xennet_ethtool_ops; @@ -175,6 +179,9 @@ struct netfront_info { /* Is device behaving sane? */ bool broken; + /* Should skbs be bounced into a zeroed buffer? */ + bool bounce; + atomic_t rx_gso_checksum_fixup; }; @@ -273,7 +280,8 @@ static struct sk_buff *xennet_alloc_one_rx_buffer(struct netfront_queue *queue) if (unlikely(!skb)) return NULL; - page = page_pool_dev_alloc_pages(queue->page_pool); + page = page_pool_alloc_pages(queue->page_pool, + GFP_ATOMIC | __GFP_NOWARN | __GFP_ZERO); if (unlikely(!page)) { kfree_skb(skb); return NULL; @@ -424,14 +432,12 @@ static bool xennet_tx_buf_gc(struct netfront_queue *queue) queue->tx_link[id] = TX_LINK_NONE; skb = queue->tx_skbs[id]; queue->tx_skbs[id] = NULL; - if (unlikely(gnttab_query_foreign_access( - queue->grant_tx_ref[id]) != 0)) { + if (unlikely(!gnttab_end_foreign_access_ref( + queue->grant_tx_ref[id], GNTMAP_readonly))) { dev_alert(dev, "Grant still in use by backend domain\n"); goto err; } - gnttab_end_foreign_access_ref( - queue->grant_tx_ref[id], GNTMAP_readonly); gnttab_release_grant_reference( &queue->gref_tx_head, queue->grant_tx_ref[id]); queue->grant_tx_ref[id] = GRANT_INVALID_REF; @@ -671,6 +677,33 @@ static int xennet_xdp_xmit(struct net_device *dev, int n, return n - drops; } +struct sk_buff *bounce_skb(const struct sk_buff *skb) +{ + unsigned int headerlen = skb_headroom(skb); + /* Align size to allocate full pages and avoid contiguous data leaks */ + unsigned int size = ALIGN(skb_end_offset(skb) + skb->data_len, + XEN_PAGE_SIZE); + struct sk_buff *n = alloc_skb(size, GFP_ATOMIC | __GFP_ZERO); + + if (!n) + return NULL; + + if (!IS_ALIGNED((uintptr_t)n->head, XEN_PAGE_SIZE)) { + WARN_ONCE(1, "misaligned skb allocated\n"); + kfree_skb(n); + return NULL; + } + + /* Set the data pointer */ + skb_reserve(n, headerlen); + /* Set the tail pointer and length */ + skb_put(n, skb->len); + + BUG_ON(skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len)); + + skb_copy_header(n, skb); + return n; +} #define MAX_XEN_SKB_FRAGS (65536 / XEN_PAGE_SIZE + 1) @@ -724,9 +757,13 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev /* The first req should be at least ETH_HLEN size or the packet will be * dropped by netback. + * + * If the backend is not trusted bounce all data to zeroed pages to + * avoid exposing contiguous data on the granted page not belonging to + * the skb. */ - if (unlikely(PAGE_SIZE - offset < ETH_HLEN)) { - nskb = skb_copy(skb, GFP_ATOMIC); + if (np->bounce || unlikely(PAGE_SIZE - offset < ETH_HLEN)) { + nskb = bounce_skb(skb); if (!nskb) goto drop; dev_consume_skb_any(skb); @@ -844,6 +881,28 @@ static int xennet_close(struct net_device *dev) return 0; } +static void xennet_destroy_queues(struct netfront_info *info) +{ + unsigned int i; + + for (i = 0; i < info->netdev->real_num_tx_queues; i++) { + struct netfront_queue *queue = &info->queues[i]; + + if (netif_running(info->netdev)) + napi_disable(&queue->napi); + netif_napi_del(&queue->napi); + } + + kfree(info->queues); + info->queues = NULL; +} + +static void xennet_uninit(struct net_device *dev) +{ + struct netfront_info *np = netdev_priv(dev); + xennet_destroy_queues(np); +} + static void xennet_set_rx_rsp_cons(struct netfront_queue *queue, RING_IDX val) { unsigned long flags; @@ -970,7 +1029,6 @@ static int xennet_get_responses(struct netfront_queue *queue, struct device *dev = &queue->info->netdev->dev; struct bpf_prog *xdp_prog; struct xdp_buff xdp; - unsigned long ret; int slots = 1; int err = 0; u32 verdict; @@ -1012,8 +1070,13 @@ static int xennet_get_responses(struct netfront_queue *queue, goto next; } - ret = gnttab_end_foreign_access_ref(ref, 0); - BUG_ON(!ret); + if (!gnttab_end_foreign_access_ref(ref, 0)) { + dev_alert(dev, + "Grant still in use by backend domain\n"); + queue->info->broken = true; + dev_alert(dev, "Disabled for further use\n"); + return -EINVAL; + } gnttab_release_grant_reference(&queue->gref_rx_head, ref); @@ -1033,8 +1096,10 @@ static int xennet_get_responses(struct netfront_queue *queue, } } rcu_read_unlock(); -next: + __skb_queue_tail(list, skb); + +next: if (!(rx->flags & XEN_NETRXF_more_data)) break; @@ -1234,6 +1299,10 @@ static int xennet_poll(struct napi_struct *napi, int budget) &need_xdp_flush); if (unlikely(err)) { + if (queue->info->broken) { + spin_unlock(&queue->rx_lock); + return 0; + } err: while ((skb = __skb_dequeue(&tmpq))) __skb_queue_tail(&errq, skb); @@ -1613,6 +1682,7 @@ static int xennet_xdp(struct net_device *dev, struct netdev_bpf *xdp) } static const struct net_device_ops xennet_netdev_ops = { + .ndo_uninit = xennet_uninit, .ndo_open = xennet_open, .ndo_stop = xennet_close, .ndo_start_xmit = xennet_start_xmit, @@ -1897,7 +1967,7 @@ static int setup_netfront(struct xenbus_device *dev, struct netfront_queue *queue, unsigned int feature_split_evtchn) { struct xen_netif_tx_sring *txs; - struct xen_netif_rx_sring *rxs; + struct xen_netif_rx_sring *rxs = NULL; grant_ref_t gref; int err; @@ -1917,21 +1987,21 @@ static int setup_netfront(struct xenbus_device *dev, err = xenbus_grant_ring(dev, txs, 1, &gref); if (err < 0) - goto grant_tx_ring_fail; + goto fail; queue->tx_ring_ref = gref; rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH); if (!rxs) { err = -ENOMEM; xenbus_dev_fatal(dev, err, "allocating rx ring page"); - goto alloc_rx_ring_fail; + goto fail; } SHARED_RING_INIT(rxs); FRONT_RING_INIT(&queue->rx, rxs, XEN_PAGE_SIZE); err = xenbus_grant_ring(dev, rxs, 1, &gref); if (err < 0) - goto grant_rx_ring_fail; + goto fail; queue->rx_ring_ref = gref; if (feature_split_evtchn) @@ -1944,22 +2014,28 @@ static int setup_netfront(struct xenbus_device *dev, err = setup_netfront_single(queue); if (err) - goto alloc_evtchn_fail; + goto fail; return 0; /* If we fail to setup netfront, it is safe to just revoke access to * granted pages because backend is not accessing it at this point. */ -alloc_evtchn_fail: - gnttab_end_foreign_access_ref(queue->rx_ring_ref, 0); -grant_rx_ring_fail: - free_page((unsigned long)rxs); -alloc_rx_ring_fail: - gnttab_end_foreign_access_ref(queue->tx_ring_ref, 0); -grant_tx_ring_fail: - free_page((unsigned long)txs); -fail: + fail: + if (queue->rx_ring_ref != GRANT_INVALID_REF) { + gnttab_end_foreign_access(queue->rx_ring_ref, 0, + (unsigned long)rxs); + queue->rx_ring_ref = GRANT_INVALID_REF; + } else { + free_page((unsigned long)rxs); + } + if (queue->tx_ring_ref != GRANT_INVALID_REF) { + gnttab_end_foreign_access(queue->tx_ring_ref, 0, + (unsigned long)txs); + queue->tx_ring_ref = GRANT_INVALID_REF; + } else { + free_page((unsigned long)txs); + } return err; } @@ -2105,22 +2181,6 @@ static int write_queue_xenstore_keys(struct netfront_queue *queue, return err; } -static void xennet_destroy_queues(struct netfront_info *info) -{ - unsigned int i; - - for (i = 0; i < info->netdev->real_num_tx_queues; i++) { - struct netfront_queue *queue = &info->queues[i]; - - if (netif_running(info->netdev)) - napi_disable(&queue->napi); - netif_napi_del(&queue->napi); - } - - kfree(info->queues); - info->queues = NULL; -} - static int xennet_create_page_pool(struct netfront_queue *queue) @@ -2229,6 +2289,10 @@ static int talk_to_netback(struct xenbus_device *dev, info->netdev->irq = 0; + /* Check if backend is trusted. */ + info->bounce = !xennet_trusted || + !xenbus_read_unsigned(dev->nodename, "trusted", 1); + /* Check if backend supports multiple queues */ max_queues = xenbus_read_unsigned(info->xbdev->otherend, "multi-queue-max-queues", 1); @@ -2395,6 +2459,9 @@ static int xennet_connect(struct net_device *dev) return err; if (np->netback_has_xdp_headroom) pr_info("backend supports XDP headroom\n"); + if (np->bounce) + dev_info(&np->xbdev->dev, + "bouncing transmitted data to zeroed pages\n"); /* talk_to_netback() sets the correct number of queues */ num_queues = dev->real_num_tx_queues; diff --git a/drivers/nfc/nfcmrvl/main.c b/drivers/nfc/nfcmrvl/main.c index 529be35ac1782a62e9f18b71905fa597ea52a222..54d228acc0f5d9bac7b617cb2c36376abe1a0a42 100644 --- a/drivers/nfc/nfcmrvl/main.c +++ b/drivers/nfc/nfcmrvl/main.c @@ -194,6 +194,7 @@ void nfcmrvl_nci_unregister_dev(struct nfcmrvl_private *priv) { struct nci_dev *ndev = priv->ndev; + nci_unregister_device(ndev); if (priv->ndev->nfc_dev->fw_download_in_progress) nfcmrvl_fw_dnld_abort(priv); @@ -202,7 +203,6 @@ void nfcmrvl_nci_unregister_dev(struct nfcmrvl_private *priv) if (gpio_is_valid(priv->config.reset_n_io)) gpio_free(priv->config.reset_n_io); - nci_unregister_device(ndev); nci_free_device(ndev); kfree(priv); } diff --git a/drivers/nfc/port100.c b/drivers/nfc/port100.c index 1caebefb25ff1d43abe9adf71e933fe2732ad497..2ae1474faede99aea3e9fea55d80da245f7e164b 100644 --- a/drivers/nfc/port100.c +++ b/drivers/nfc/port100.c @@ -1609,7 +1609,9 @@ static int port100_probe(struct usb_interface *interface, nfc_digital_free_device(dev->nfc_digital_dev); error: + usb_kill_urb(dev->in_urb); usb_free_urb(dev->in_urb); + usb_kill_urb(dev->out_urb); usb_free_urb(dev->out_urb); usb_put_dev(dev->udev); diff --git a/drivers/nfc/st21nfca/se.c b/drivers/nfc/st21nfca/se.c index c8bdf078d111522f4e7b5f0cc66ac57bbffc3647..0841e0e370a03bf496b067c861829f21f19b0d84 100644 --- a/drivers/nfc/st21nfca/se.c +++ b/drivers/nfc/st21nfca/se.c @@ -320,6 +320,11 @@ int st21nfca_connectivity_event_received(struct nfc_hci_dev *hdev, u8 host, return -ENOMEM; transaction->aid_len = skb->data[1]; + + /* Checking if the length of the AID is valid */ + if (transaction->aid_len > sizeof(transaction->aid)) + return -EINVAL; + memcpy(transaction->aid, &skb->data[2], transaction->aid_len); @@ -329,6 +334,11 @@ int st21nfca_connectivity_event_received(struct nfc_hci_dev *hdev, u8 host, return -EPROTO; transaction->params_len = skb->data[transaction->aid_len + 3]; + + /* Total size is allocated (skb->len - 2) minus fixed array members */ + if (transaction->params_len > ((skb->len - 2) - sizeof(struct nfc_evt_transaction))) + return -EINVAL; + memcpy(transaction->params, skb->data + transaction->aid_len + 4, transaction->params_len); diff --git a/drivers/ntb/hw/intel/ntb_hw_gen4.c b/drivers/ntb/hw/intel/ntb_hw_gen4.c index bc4541cbf8c6e174011851c09627971613a17057..99a5fc1ab0aafb2f0d1f0f969ea8a229cf70a10b 100644 --- a/drivers/ntb/hw/intel/ntb_hw_gen4.c +++ b/drivers/ntb/hw/intel/ntb_hw_gen4.c @@ -168,6 +168,18 @@ static enum ntb_topo gen4_ppd_topo(struct intel_ntb_dev *ndev, u32 ppd) return NTB_TOPO_NONE; } +static enum ntb_topo spr_ppd_topo(struct intel_ntb_dev *ndev, u32 ppd) +{ + switch (ppd & SPR_PPD_TOPO_MASK) { + case SPR_PPD_TOPO_B2B_USD: + return NTB_TOPO_B2B_USD; + case SPR_PPD_TOPO_B2B_DSD: + return NTB_TOPO_B2B_DSD; + } + + return NTB_TOPO_NONE; +} + int gen4_init_dev(struct intel_ntb_dev *ndev) { struct pci_dev *pdev = ndev->ntb.pdev; @@ -181,7 +193,10 @@ int gen4_init_dev(struct intel_ntb_dev *ndev) ndev->hwerr_flags |= NTB_HWERR_BAR_ALIGN; ppd1 = ioread32(ndev->self_mmio + GEN4_PPD1_OFFSET); - ndev->ntb.topo = gen4_ppd_topo(ndev, ppd1); + if (pdev_is_ICX(pdev)) + ndev->ntb.topo = gen4_ppd_topo(ndev, ppd1); + else if (pdev_is_SPR(pdev)) + ndev->ntb.topo = spr_ppd_topo(ndev, ppd1); dev_dbg(&pdev->dev, "ppd %#x topo %s\n", ppd1, ntb_topo_string(ndev->ntb.topo)); if (ndev->ntb.topo == NTB_TOPO_NONE) diff --git a/drivers/ntb/hw/intel/ntb_hw_gen4.h b/drivers/ntb/hw/intel/ntb_hw_gen4.h index a868c788de02f3abf37a1ac862814344e1d22f52..ec293953d665f7f7d98ca8134e69387f7fa8a9a3 100644 --- a/drivers/ntb/hw/intel/ntb_hw_gen4.h +++ b/drivers/ntb/hw/intel/ntb_hw_gen4.h @@ -46,10 +46,14 @@ #define GEN4_PPD_CLEAR_TRN 0x0001 #define GEN4_PPD_LINKTRN 0x0008 #define GEN4_PPD_CONN_MASK 0x0300 +#define SPR_PPD_CONN_MASK 0x0700 #define GEN4_PPD_CONN_B2B 0x0200 #define GEN4_PPD_DEV_MASK 0x1000 #define GEN4_PPD_DEV_DSD 0x1000 #define GEN4_PPD_DEV_USD 0x0000 +#define SPR_PPD_DEV_MASK 0x4000 +#define SPR_PPD_DEV_DSD 0x4000 +#define SPR_PPD_DEV_USD 0x0000 #define GEN4_LINK_CTRL_LINK_DISABLE 0x0010 #define GEN4_SLOTSTS 0xb05a @@ -59,6 +63,10 @@ #define GEN4_PPD_TOPO_B2B_USD (GEN4_PPD_CONN_B2B | GEN4_PPD_DEV_USD) #define GEN4_PPD_TOPO_B2B_DSD (GEN4_PPD_CONN_B2B | GEN4_PPD_DEV_DSD) +#define SPR_PPD_TOPO_MASK (SPR_PPD_CONN_MASK | SPR_PPD_DEV_MASK) +#define SPR_PPD_TOPO_B2B_USD (GEN4_PPD_CONN_B2B | SPR_PPD_DEV_USD) +#define SPR_PPD_TOPO_B2B_DSD (GEN4_PPD_CONN_B2B | SPR_PPD_DEV_DSD) + #define GEN4_DB_COUNT 32 #define GEN4_DB_LINK 32 #define GEN4_DB_LINK_BIT BIT_ULL(GEN4_DB_LINK) @@ -97,4 +105,12 @@ static inline int pdev_is_ICX(struct pci_dev *pdev) return 0; } +static inline int pdev_is_SPR(struct pci_dev *pdev) +{ + if (pdev_is_gen4(pdev) && + pdev->revision > PCI_DEVICE_REVISION_ICX_MAX) + return 1; + return 0; +} + #endif diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c index e05cc9f8a9fd1bfe95e9965400e4fb72b7605cfe..1d72653b5c8d172b56f55fe16741886c283a9e7a 100644 --- a/drivers/nvdimm/region_devs.c +++ b/drivers/nvdimm/region_devs.c @@ -1018,6 +1018,9 @@ static unsigned long default_align(struct nd_region *nd_region) } } + if (nd_region->ndr_size < MEMREMAP_COMPAT_ALIGN_MAX) + align = PAGE_SIZE; + mappings = max_t(u16, 1, nd_region->ndr_mappings); div_u64_rem(align, mappings, &remainder); if (remainder) diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 9ccf44592fe42ee75ac9645263b95201dc0a044b..dcc047f01a0761fdbae4ae887fedd83bdb87157e 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -3661,16 +3661,15 @@ static struct nvme_ns_head *nvme_find_ns_head(struct nvme_subsystem *subsys, return NULL; } -static int __nvme_check_ids(struct nvme_subsystem *subsys, - struct nvme_ns_head *new) +static int nvme_subsys_check_duplicate_ids(struct nvme_subsystem *subsys, + struct nvme_ns_ids *ids) { struct nvme_ns_head *h; lockdep_assert_held(&subsys->lock); list_for_each_entry(h, &subsys->nsheads, entry) { - if (nvme_ns_ids_valid(&new->ids) && - nvme_ns_ids_equal(&new->ids, &h->ids)) + if (nvme_ns_ids_valid(ids) && nvme_ns_ids_equal(ids, &h->ids)) return -EINVAL; } @@ -3704,7 +3703,7 @@ static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl, head->ids = *ids; kref_init(&head->ref); - ret = __nvme_check_ids(ctrl->subsys, head); + ret = nvme_subsys_check_duplicate_ids(ctrl->subsys, &head->ids); if (ret) { dev_err(ctrl->device, "duplicate IDs for nsid %d\n", nsid); @@ -4239,7 +4238,14 @@ static void nvme_async_event_work(struct work_struct *work) container_of(work, struct nvme_ctrl, async_event_work); nvme_aen_uevent(ctrl); - ctrl->ops->submit_async_event(ctrl); + + /* + * The transport drivers must guarantee AER submission here is safe by + * flushing ctrl async_event_work after changing the controller state + * from LIVE and before freeing the admin queue. + */ + if (ctrl->state == NVME_CTRL_LIVE) + ctrl->ops->submit_async_event(ctrl); } static bool nvme_ctrl_pp_status(struct nvme_ctrl *ctrl) diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h index a9c1e3b4585ec3f9721c7e76fd031e8250fbd265..78467cb3f343e7dad0321e059264ca6e1bfb3cfe 100644 --- a/drivers/nvme/host/fabrics.h +++ b/drivers/nvme/host/fabrics.h @@ -153,6 +153,7 @@ nvmf_ctlr_matches_baseopts(struct nvme_ctrl *ctrl, struct nvmf_ctrl_options *opts) { if (ctrl->state == NVME_CTRL_DELETING || + ctrl->state == NVME_CTRL_DELETING_NOIO || ctrl->state == NVME_CTRL_DEAD || strcmp(opts->subsysnqn, ctrl->opts->subsysnqn) || strcmp(opts->host->nqn, ctrl->opts->host->nqn) || diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 224907d8d5dc34ce933541cfa56af5912119d428..f435ab0809fbccb8e9fac959a651f1d6fce741c2 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -3198,7 +3198,8 @@ static const struct pci_device_id nvme_id_table[] = { NVME_QUIRK_DEALLOCATE_ZEROES, }, { PCI_VDEVICE(INTEL, 0x0a54), /* Intel P4500/P4600 */ .driver_data = NVME_QUIRK_STRIPE_SIZE | - NVME_QUIRK_DEALLOCATE_ZEROES, }, + NVME_QUIRK_DEALLOCATE_ZEROES | + NVME_QUIRK_IGNORE_DEV_SUBNQN, }, { PCI_VDEVICE(INTEL, 0x0a55), /* Dell Express Flash P4600 */ .driver_data = NVME_QUIRK_STRIPE_SIZE | NVME_QUIRK_DEALLOCATE_ZEROES, }, diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 2721200810a1724600bf13124559418544dcb600..579aed0c447f2b146d93ec435aae8b5ddbf56415 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -1200,6 +1200,7 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work) struct nvme_rdma_ctrl, err_work); nvme_stop_keep_alive(&ctrl->ctrl); + flush_work(&ctrl->ctrl.async_event_work); nvme_rdma_teardown_io_queues(ctrl, false); nvme_start_queues(&ctrl->ctrl); nvme_rdma_teardown_admin_queue(ctrl, false); diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index c014c5adbac5ef0b4e57daeeb2e201b15f245135..559e7fda7cc7ba40e112db0c33175bda3611c3fa 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -30,6 +30,44 @@ static int so_priority; module_param(so_priority, int, 0644); MODULE_PARM_DESC(so_priority, "nvme tcp socket optimize priority"); +#ifdef CONFIG_DEBUG_LOCK_ALLOC +/* lockdep can detect a circular dependency of the form + * sk_lock -> mmap_lock (page fault) -> fs locks -> sk_lock + * because dependencies are tracked for both nvme-tcp and user contexts. Using + * a separate class prevents lockdep from conflating nvme-tcp socket use with + * user-space socket API use. + */ +static struct lock_class_key nvme_tcp_sk_key[2]; +static struct lock_class_key nvme_tcp_slock_key[2]; + +static void nvme_tcp_reclassify_socket(struct socket *sock) +{ + struct sock *sk = sock->sk; + + if (WARN_ON_ONCE(!sock_allow_reclassification(sk))) + return; + + switch (sk->sk_family) { + case AF_INET: + sock_lock_init_class_and_name(sk, "slock-AF_INET-NVME", + &nvme_tcp_slock_key[0], + "sk_lock-AF_INET-NVME", + &nvme_tcp_sk_key[0]); + break; + case AF_INET6: + sock_lock_init_class_and_name(sk, "slock-AF_INET6-NVME", + &nvme_tcp_slock_key[1], + "sk_lock-AF_INET6-NVME", + &nvme_tcp_sk_key[1]); + break; + default: + WARN_ON_ONCE(1); + } +} +#else +static void nvme_tcp_reclassify_socket(struct socket *sock) { } +#endif + enum nvme_tcp_send_state { NVME_TCP_SEND_CMD_PDU = 0, NVME_TCP_SEND_H2C_PDU, @@ -904,7 +942,15 @@ static inline void nvme_tcp_done_send_req(struct nvme_tcp_queue *queue) static void nvme_tcp_fail_request(struct nvme_tcp_request *req) { - nvme_tcp_end_request(blk_mq_rq_from_pdu(req), NVME_SC_HOST_PATH_ERROR); + if (nvme_tcp_async_req(req)) { + union nvme_result res = {}; + + nvme_complete_async_event(&req->queue->ctrl->ctrl, + cpu_to_le16(NVME_SC_HOST_PATH_ERROR), &res); + } else { + nvme_tcp_end_request(blk_mq_rq_from_pdu(req), + NVME_SC_HOST_PATH_ERROR); + } } static int nvme_tcp_try_send_data(struct nvme_tcp_request *req) @@ -1414,6 +1460,8 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, goto err_destroy_mutex; } + nvme_tcp_reclassify_socket(queue->sock); + /* Single syn retry */ tcp_sock_set_syncnt(queue->sock->sk, 1); @@ -2069,6 +2117,7 @@ static void nvme_tcp_error_recovery_work(struct work_struct *work) struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl; nvme_stop_keep_alive(ctrl); + flush_work(&ctrl->async_event_work); nvme_tcp_teardown_io_queues(ctrl, false); /* unquiesce to fail fast pending requests */ nvme_start_queues(ctrl); diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c index 6b170083cd248e5f61ef69ed69ef5fd328a6ddf2..21d89d80d083834ad0b6c7d0189fc68f236118cc 100644 --- a/drivers/nvmem/core.c +++ b/drivers/nvmem/core.c @@ -222,6 +222,8 @@ static umode_t nvmem_bin_attr_is_visible(struct kobject *kobj, struct device *dev = kobj_to_dev(kobj); struct nvmem_device *nvmem = to_nvmem_device(dev); + attr->size = nvmem->size; + return nvmem_bin_attr_get_umode(nvmem); } diff --git a/drivers/of/base.c b/drivers/of/base.c index 161a23631472d94d1a01b6b7752303153561f329..a44a0e7ba2510767b5cfd3a8c484c991da024f6e 100644 --- a/drivers/of/base.c +++ b/drivers/of/base.c @@ -1328,9 +1328,14 @@ int of_phandle_iterator_next(struct of_phandle_iterator *it) * property data length */ if (it->cur + count > it->list_end) { - pr_err("%pOF: %s = %d found %d\n", - it->parent, it->cells_name, - count, it->cell_count); + if (it->cells_name) + pr_err("%pOF: %s = %d found %td\n", + it->parent, it->cells_name, + count, it->list_end - it->cur); + else + pr_err("%pOF: phandle %s needs %d, found %td\n", + it->parent, of_node_full_name(it->node), + count, it->list_end - it->cur); goto err; } } diff --git a/drivers/of/kexec.c b/drivers/of/kexec.c index f335d941a716e84157e42aab2caf09da772d2aa7..d8231c34e873d75fcf52ac740b45a7bd82a69dc8 100644 --- a/drivers/of/kexec.c +++ b/drivers/of/kexec.c @@ -396,6 +396,15 @@ void *of_kexec_alloc_and_setup_fdt(const struct kimage *image, crashk_res.end - crashk_res.start + 1); if (ret) goto out; + + if (crashk_low_res.end) { + ret = fdt_appendprop_addrrange(fdt, 0, chosen_node, + FDT_PROP_MEM_RANGE, + crashk_low_res.start, + crashk_low_res.end - crashk_low_res.start + 1); + if (ret) + goto out; + } } /* add bootargs */ diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c index 1d4b0b7d0cc10c71c5071439f54eaf61ce1afdb1..5407bbdb6439519e9a2521a7aff31c2789fa2df4 100644 --- a/drivers/of/unittest.c +++ b/drivers/of/unittest.c @@ -910,11 +910,18 @@ static void __init of_unittest_dma_ranges_one(const char *path, if (!rc) { phys_addr_t paddr; dma_addr_t dma_addr; - struct device dev_bogus; + struct device *dev_bogus; - dev_bogus.dma_range_map = map; - paddr = dma_to_phys(&dev_bogus, expect_dma_addr); - dma_addr = phys_to_dma(&dev_bogus, expect_paddr); + dev_bogus = kzalloc(sizeof(struct device), GFP_KERNEL); + if (!dev_bogus) { + unittest(0, "kzalloc() failed\n"); + kfree(map); + return; + } + + dev_bogus->dma_range_map = map; + paddr = dma_to_phys(dev_bogus, expect_dma_addr); + dma_addr = phys_to_dma(dev_bogus, expect_paddr); unittest(paddr == expect_paddr, "of_dma_get_range: wrong phys addr %pap (expecting %llx) on node %pOF\n", @@ -924,6 +931,7 @@ static void __init of_unittest_dma_ranges_one(const char *path, &dma_addr, expect_dma_addr, np); kfree(map); + kfree(dev_bogus); } of_node_put(np); #endif @@ -933,8 +941,9 @@ static void __init of_unittest_parse_dma_ranges(void) { of_unittest_dma_ranges_one("/testcase-data/address-tests/device@70000000", 0x0, 0x20000000); - of_unittest_dma_ranges_one("/testcase-data/address-tests/bus@80000000/device@1000", - 0x100000000, 0x20000000); + if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT)) + of_unittest_dma_ranges_one("/testcase-data/address-tests/bus@80000000/device@1000", + 0x100000000, 0x20000000); of_unittest_dma_ranges_one("/testcase-data/address-tests/pci@90000000", 0x80000000, 0x20000000); } diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c index b5f9ee81a46c1e2753d5f9150bf696279e882fff..b916fab9b161816bf032ba5b47ab58dc07cc4692 100644 --- a/drivers/parisc/ccio-dma.c +++ b/drivers/parisc/ccio-dma.c @@ -1003,7 +1003,7 @@ ccio_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, ioc->usg_calls++; #endif - while(sg_dma_len(sglist) && nents--) { + while (nents && sg_dma_len(sglist)) { #ifdef CCIO_COLLECT_STATS ioc->usg_pages += sg_dma_len(sglist) >> PAGE_SHIFT; @@ -1011,6 +1011,7 @@ ccio_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, ccio_unmap_page(dev, sg_dma_address(sglist), sg_dma_len(sglist), direction, 0); ++sglist; + nents--; } DBG_RUN_SG("%s() DONE (nents %d)\n", __func__, nents); diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c index 952a92504df69aa5c627ec8b38854c48c5c5a999..e33036281327d484c9470772839ede70e7b5e85d 100644 --- a/drivers/parisc/dino.c +++ b/drivers/parisc/dino.c @@ -142,9 +142,8 @@ struct dino_device { struct pci_hba_data hba; /* 'C' inheritance - must be first */ spinlock_t dinosaur_pen; - unsigned long txn_addr; /* EIR addr to generate interrupt */ - u32 txn_data; /* EIR data assign to each dino */ u32 imr; /* IRQ's which are enabled */ + struct gsc_irq gsc_irq; int global_irq[DINO_LOCAL_IRQS]; /* map IMR bit to global irq */ #ifdef DINO_DEBUG unsigned int dino_irr0; /* save most recent IRQ line stat */ @@ -339,14 +338,43 @@ static void dino_unmask_irq(struct irq_data *d) if (tmp & DINO_MASK_IRQ(local_irq)) { DBG(KERN_WARNING "%s(): IRQ asserted! (ILR 0x%x)\n", __func__, tmp); - gsc_writel(dino_dev->txn_data, dino_dev->txn_addr); + gsc_writel(dino_dev->gsc_irq.txn_data, dino_dev->gsc_irq.txn_addr); } } +#ifdef CONFIG_SMP +static int dino_set_affinity_irq(struct irq_data *d, const struct cpumask *dest, + bool force) +{ + struct dino_device *dino_dev = irq_data_get_irq_chip_data(d); + struct cpumask tmask; + int cpu_irq; + u32 eim; + + if (!cpumask_and(&tmask, dest, cpu_online_mask)) + return -EINVAL; + + cpu_irq = cpu_check_affinity(d, &tmask); + if (cpu_irq < 0) + return cpu_irq; + + dino_dev->gsc_irq.txn_addr = txn_affinity_addr(d->irq, cpu_irq); + eim = ((u32) dino_dev->gsc_irq.txn_addr) | dino_dev->gsc_irq.txn_data; + __raw_writel(eim, dino_dev->hba.base_addr+DINO_IAR0); + + irq_data_update_effective_affinity(d, &tmask); + + return IRQ_SET_MASK_OK; +} +#endif + static struct irq_chip dino_interrupt_type = { .name = "GSC-PCI", .irq_unmask = dino_unmask_irq, .irq_mask = dino_mask_irq, +#ifdef CONFIG_SMP + .irq_set_affinity = dino_set_affinity_irq, +#endif }; @@ -806,7 +834,6 @@ static int __init dino_common_init(struct parisc_device *dev, { int status; u32 eim; - struct gsc_irq gsc_irq; struct resource *res; pcibios_register_hba(&dino_dev->hba); @@ -821,10 +848,8 @@ static int __init dino_common_init(struct parisc_device *dev, ** still only has 11 IRQ input lines - just map some of them ** to a different processor. */ - dev->irq = gsc_alloc_irq(&gsc_irq); - dino_dev->txn_addr = gsc_irq.txn_addr; - dino_dev->txn_data = gsc_irq.txn_data; - eim = ((u32) gsc_irq.txn_addr) | gsc_irq.txn_data; + dev->irq = gsc_alloc_irq(&dino_dev->gsc_irq); + eim = ((u32) dino_dev->gsc_irq.txn_addr) | dino_dev->gsc_irq.txn_data; /* ** Dino needs a PA "IRQ" to get a processor's attention. diff --git a/drivers/parisc/gsc.c b/drivers/parisc/gsc.c index ed9371acf37eb6b40b649b4eec1607531cd2e17f..ec175ae998733bb69f90217eb2794e25d66e9ab5 100644 --- a/drivers/parisc/gsc.c +++ b/drivers/parisc/gsc.c @@ -135,10 +135,41 @@ static void gsc_asic_unmask_irq(struct irq_data *d) */ } +#ifdef CONFIG_SMP +static int gsc_set_affinity_irq(struct irq_data *d, const struct cpumask *dest, + bool force) +{ + struct gsc_asic *gsc_dev = irq_data_get_irq_chip_data(d); + struct cpumask tmask; + int cpu_irq; + + if (!cpumask_and(&tmask, dest, cpu_online_mask)) + return -EINVAL; + + cpu_irq = cpu_check_affinity(d, &tmask); + if (cpu_irq < 0) + return cpu_irq; + + gsc_dev->gsc_irq.txn_addr = txn_affinity_addr(d->irq, cpu_irq); + gsc_dev->eim = ((u32) gsc_dev->gsc_irq.txn_addr) | gsc_dev->gsc_irq.txn_data; + + /* switch IRQ's for devices below LASI/WAX to other CPU */ + gsc_writel(gsc_dev->eim, gsc_dev->hpa + OFFSET_IAR); + + irq_data_update_effective_affinity(d, &tmask); + + return IRQ_SET_MASK_OK; +} +#endif + + static struct irq_chip gsc_asic_interrupt_type = { .name = "GSC-ASIC", .irq_unmask = gsc_asic_unmask_irq, .irq_mask = gsc_asic_mask_irq, +#ifdef CONFIG_SMP + .irq_set_affinity = gsc_set_affinity_irq, +#endif }; int gsc_assign_irq(struct irq_chip *type, void *data) diff --git a/drivers/parisc/gsc.h b/drivers/parisc/gsc.h index 86abad3fa2150b8bc61c1e66fc79354855026482..73cbd0bb1975a071063ff3cc70165d721e1fb96b 100644 --- a/drivers/parisc/gsc.h +++ b/drivers/parisc/gsc.h @@ -31,6 +31,7 @@ struct gsc_asic { int version; int type; int eim; + struct gsc_irq gsc_irq; int global_irq[32]; }; diff --git a/drivers/parisc/lasi.c b/drivers/parisc/lasi.c index 4e4fd12c2112ea805783313567a4dacddc1c52e1..6ef621adb63a850a47a4d37d05461a4ec2330150 100644 --- a/drivers/parisc/lasi.c +++ b/drivers/parisc/lasi.c @@ -163,7 +163,6 @@ static int __init lasi_init_chip(struct parisc_device *dev) { extern void (*chassis_power_off)(void); struct gsc_asic *lasi; - struct gsc_irq gsc_irq; int ret; lasi = kzalloc(sizeof(*lasi), GFP_KERNEL); @@ -185,7 +184,7 @@ static int __init lasi_init_chip(struct parisc_device *dev) lasi_init_irq(lasi); /* the IRQ lasi should use */ - dev->irq = gsc_alloc_irq(&gsc_irq); + dev->irq = gsc_alloc_irq(&lasi->gsc_irq); if (dev->irq < 0) { printk(KERN_ERR "%s(): cannot get GSC irq\n", __func__); @@ -193,9 +192,9 @@ static int __init lasi_init_chip(struct parisc_device *dev) return -EBUSY; } - lasi->eim = ((u32) gsc_irq.txn_addr) | gsc_irq.txn_data; + lasi->eim = ((u32) lasi->gsc_irq.txn_addr) | lasi->gsc_irq.txn_data; - ret = request_irq(gsc_irq.irq, gsc_asic_intr, 0, "lasi", lasi); + ret = request_irq(lasi->gsc_irq.irq, gsc_asic_intr, 0, "lasi", lasi); if (ret < 0) { kfree(lasi); return ret; diff --git a/drivers/parisc/pdc_stable.c b/drivers/parisc/pdc_stable.c index e090978518f1a651195cad3e654fc6bde4900094..4760f82def6ec5738f1aada43b4323a200065098 100644 --- a/drivers/parisc/pdc_stable.c +++ b/drivers/parisc/pdc_stable.c @@ -979,8 +979,10 @@ pdcs_register_pathentries(void) entry->kobj.kset = paths_kset; err = kobject_init_and_add(&entry->kobj, &ktype_pdcspath, NULL, "%s", entry->name); - if (err) + if (err) { + kobject_put(&entry->kobj); return err; + } /* kobject is now registered */ write_lock(&entry->rw_lock); diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c index dce4cdf786cdb19a607a79dc35a804067577d41d..228c58060e9b3810f7ceb9dc1939628ec6eb804e 100644 --- a/drivers/parisc/sba_iommu.c +++ b/drivers/parisc/sba_iommu.c @@ -1047,7 +1047,7 @@ sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, spin_unlock_irqrestore(&ioc->res_lock, flags); #endif - while (sg_dma_len(sglist) && nents--) { + while (nents && sg_dma_len(sglist)) { sba_unmap_page(dev, sg_dma_address(sglist), sg_dma_len(sglist), direction, 0); @@ -1056,6 +1056,7 @@ sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, ioc->usingle_calls--; /* kluge since call is unmap_sg() */ #endif ++sglist; + nents--; } DBG_RUN_SG("%s() DONE (nents %d)\n", __func__, nents); diff --git a/drivers/parisc/wax.c b/drivers/parisc/wax.c index 5b6df1516235470167198138e0cdbcfdd5feef95..73a2b01f8d9ca7b33077e19637f92e04a7ac22c1 100644 --- a/drivers/parisc/wax.c +++ b/drivers/parisc/wax.c @@ -68,7 +68,6 @@ static int __init wax_init_chip(struct parisc_device *dev) { struct gsc_asic *wax; struct parisc_device *parent; - struct gsc_irq gsc_irq; int ret; wax = kzalloc(sizeof(*wax), GFP_KERNEL); @@ -85,7 +84,7 @@ static int __init wax_init_chip(struct parisc_device *dev) wax_init_irq(wax); /* the IRQ wax should use */ - dev->irq = gsc_claim_irq(&gsc_irq, WAX_GSC_IRQ); + dev->irq = gsc_claim_irq(&wax->gsc_irq, WAX_GSC_IRQ); if (dev->irq < 0) { printk(KERN_ERR "%s(): cannot get GSC irq\n", __func__); @@ -93,9 +92,9 @@ static int __init wax_init_chip(struct parisc_device *dev) return -EBUSY; } - wax->eim = ((u32) gsc_irq.txn_addr) | gsc_irq.txn_data; + wax->eim = ((u32) wax->gsc_irq.txn_addr) | wax->gsc_irq.txn_data; - ret = request_irq(gsc_irq.irq, gsc_asic_intr, 0, "wax", wax); + ret = request_irq(wax->gsc_irq.irq, gsc_asic_intr, 0, "wax", wax); if (ret < 0) { kfree(wax); return ret; diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c index 0f6a6685ab5b5400b2cba8acd26cd6c97b375ce5..af051fb886998506a3b72fdcbe0eb00021b0bef3 100644 --- a/drivers/pci/controller/pci-aardvark.c +++ b/drivers/pci/controller/pci-aardvark.c @@ -851,7 +851,9 @@ advk_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul *bridge, case PCI_EXP_RTSTA: { u32 isr0 = advk_readl(pcie, PCIE_ISR0_REG); u32 msglog = advk_readl(pcie, PCIE_MSG_LOG_REG); - *value = (isr0 & PCIE_MSG_PM_PME_MASK) << 16 | (msglog >> 16); + *value = msglog >> 16; + if (isr0 & PCIE_MSG_PM_PME_MASK) + *value |= PCI_EXP_RTSTA_PME; return PCI_BRIDGE_EMUL_HANDLED; } @@ -879,7 +881,6 @@ advk_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul *bridge, return PCI_BRIDGE_EMUL_HANDLED; } - case PCI_CAP_LIST_ID: case PCI_EXP_DEVCAP: case PCI_EXP_DEVCTL: *value = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + reg); @@ -960,6 +961,9 @@ static int advk_sw_pci_bridge_init(struct advk_pcie *pcie) /* Support interrupt A for MSI feature */ bridge->conf.intpin = PCIE_CORE_INT_A_ASSERT_ENABLE; + /* Aardvark HW provides PCIe Capability structure in version 2 */ + bridge->pcie_conf.cap = cpu_to_le16(2); + /* Indicates supports for Completion Retry Status */ bridge->pcie_conf.rootcap = cpu_to_le16(PCI_EXP_RTCAP_CRSVIS); @@ -1182,7 +1186,7 @@ static void advk_msi_irq_compose_msi_msg(struct irq_data *data, msg->address_lo = lower_32_bits(msi_msg); msg->address_hi = upper_32_bits(msi_msg); - msg->data = data->irq; + msg->data = data->hwirq; } static int advk_msi_set_affinity(struct irq_data *irq_data, @@ -1199,15 +1203,11 @@ static int advk_msi_irq_domain_alloc(struct irq_domain *domain, int hwirq, i; mutex_lock(&pcie->msi_used_lock); - hwirq = bitmap_find_next_zero_area(pcie->msi_used, MSI_IRQ_NUM, - 0, nr_irqs, 0); - if (hwirq >= MSI_IRQ_NUM) { - mutex_unlock(&pcie->msi_used_lock); - return -ENOSPC; - } - - bitmap_set(pcie->msi_used, hwirq, nr_irqs); + hwirq = bitmap_find_free_region(pcie->msi_used, MSI_IRQ_NUM, + order_base_2(nr_irqs)); mutex_unlock(&pcie->msi_used_lock); + if (hwirq < 0) + return -ENOSPC; for (i = 0; i < nr_irqs; i++) irq_domain_set_info(domain, virq + i, hwirq + i, @@ -1225,7 +1225,7 @@ static void advk_msi_irq_domain_free(struct irq_domain *domain, struct advk_pcie *pcie = domain->host_data; mutex_lock(&pcie->msi_used_lock); - bitmap_clear(pcie->msi_used, d->hwirq, nr_irqs); + bitmap_release_region(pcie->msi_used, d->hwirq, order_base_2(nr_irqs)); mutex_unlock(&pcie->msi_used_lock); } diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c index ad3e3cde1c20d2c9b6787b346fe43612738f6ec4..a070e69bb49cd40da45ddb46b857a7290a2cde76 100644 --- a/drivers/pci/controller/pci-hyperv.c +++ b/drivers/pci/controller/pci-hyperv.c @@ -1841,8 +1841,17 @@ static void hv_pci_assign_numa_node(struct hv_pcibus_device *hbus) if (!hv_dev) continue; - if (hv_dev->desc.flags & HV_PCI_DEVICE_FLAG_NUMA_AFFINITY) - set_dev_node(&dev->dev, hv_dev->desc.virtual_numa_node); + if (hv_dev->desc.flags & HV_PCI_DEVICE_FLAG_NUMA_AFFINITY && + hv_dev->desc.virtual_numa_node < num_possible_nodes()) + /* + * The kernel may boot with some NUMA nodes offline + * (e.g. in a KDUMP kernel) or with NUMA disabled via + * "numa=off". In those cases, adjust the host provided + * NUMA node to a valid NUMA node used by the kernel. + */ + set_dev_node(&dev->dev, + numa_map_to_online_node( + hv_dev->desc.virtual_numa_node)); put_pcichild(hv_dev); } diff --git a/drivers/pci/controller/pci-mvebu.c b/drivers/pci/controller/pci-mvebu.c index ed13e81cd691d461708f2d7b798037fee15f8449..2dc6890dbcaa263471714d7989b4185b90e6b738 100644 --- a/drivers/pci/controller/pci-mvebu.c +++ b/drivers/pci/controller/pci-mvebu.c @@ -573,6 +573,8 @@ static struct pci_bridge_emul_ops mvebu_pci_bridge_emul_ops = { static void mvebu_pci_bridge_emul_init(struct mvebu_pcie_port *port) { struct pci_bridge_emul *bridge = &port->bridge; + u32 pcie_cap = mvebu_readl(port, PCIE_CAP_PCIEXP); + u8 pcie_cap_ver = ((pcie_cap >> 16) & PCI_EXP_FLAGS_VERS); bridge->conf.vendor = PCI_VENDOR_ID_MARVELL; bridge->conf.device = mvebu_readl(port, PCIE_DEV_ID_OFF) >> 16; @@ -585,6 +587,12 @@ static void mvebu_pci_bridge_emul_init(struct mvebu_pcie_port *port) bridge->conf.iolimit = PCI_IO_RANGE_TYPE_32; } + /* + * Older mvebu hardware provides PCIe Capability structure only in + * version 1. New hardware provides it in version 2. + */ + bridge->pcie_conf.cap = cpu_to_le16(pcie_cap_ver); + bridge->has_pcie = true; bridge->data = port; bridge->ops = &mvebu_pci_bridge_emul_ops; diff --git a/drivers/pci/controller/pci-xgene.c b/drivers/pci/controller/pci-xgene.c index c33b385ac918e4eeb7805aaf9007b42be4f58398..e1c2daa50b4987d5668cb65fb15d5e6330022445 100644 --- a/drivers/pci/controller/pci-xgene.c +++ b/drivers/pci/controller/pci-xgene.c @@ -481,28 +481,27 @@ static int xgene_pcie_select_ib_reg(u8 *ib_reg_mask, u64 size) } static void xgene_pcie_setup_ib_reg(struct xgene_pcie_port *port, - struct resource_entry *entry, - u8 *ib_reg_mask) + struct of_pci_range *range, u8 *ib_reg_mask) { void __iomem *cfg_base = port->cfg_base; struct device *dev = port->dev; void *bar_addr; u32 pim_reg; - u64 cpu_addr = entry->res->start; - u64 pci_addr = cpu_addr - entry->offset; - u64 size = resource_size(entry->res); + u64 cpu_addr = range->cpu_addr; + u64 pci_addr = range->pci_addr; + u64 size = range->size; u64 mask = ~(size - 1) | EN_REG; u32 flags = PCI_BASE_ADDRESS_MEM_TYPE_64; u32 bar_low; int region; - region = xgene_pcie_select_ib_reg(ib_reg_mask, size); + region = xgene_pcie_select_ib_reg(ib_reg_mask, range->size); if (region < 0) { dev_warn(dev, "invalid pcie dma-range config\n"); return; } - if (entry->res->flags & IORESOURCE_PREFETCH) + if (range->flags & IORESOURCE_PREFETCH) flags |= PCI_BASE_ADDRESS_MEM_PREFETCH; bar_low = pcie_bar_low_val((u32)cpu_addr, flags); @@ -533,13 +532,25 @@ static void xgene_pcie_setup_ib_reg(struct xgene_pcie_port *port, static int xgene_pcie_parse_map_dma_ranges(struct xgene_pcie_port *port) { - struct pci_host_bridge *bridge = pci_host_bridge_from_priv(port); - struct resource_entry *entry; + struct device_node *np = port->node; + struct of_pci_range range; + struct of_pci_range_parser parser; + struct device *dev = port->dev; u8 ib_reg_mask = 0; - resource_list_for_each_entry(entry, &bridge->dma_ranges) - xgene_pcie_setup_ib_reg(port, entry, &ib_reg_mask); + if (of_pci_dma_range_parser_init(&parser, np)) { + dev_err(dev, "missing dma-ranges property\n"); + return -EINVAL; + } + + /* Get the dma-ranges from DT */ + for_each_of_pci_range(&parser, &range) { + u64 end = range.cpu_addr + range.size - 1; + dev_dbg(dev, "0x%08x 0x%016llx..0x%016llx -> 0x%016llx\n", + range.flags, range.cpu_addr, end, range.pci_addr); + xgene_pcie_setup_ib_reg(port, &range, &ib_reg_mask); + } return 0; } diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c index d41570715dc7f52a4812af0d15208fd3b6bb48a6..262b2c4c70c9f0befe25043a633c02318fbb5941 100644 --- a/drivers/pci/endpoint/functions/pci-epf-test.c +++ b/drivers/pci/endpoint/functions/pci-epf-test.c @@ -285,7 +285,17 @@ static int pci_epf_test_copy(struct pci_epf_test *epf_test) if (ret) dev_err(dev, "Data transfer failed\n"); } else { - memcpy(dst_addr, src_addr, reg->size); + void *buf; + + buf = kzalloc(reg->size, GFP_KERNEL); + if (!buf) { + ret = -ENOMEM; + goto err_map_addr; + } + + memcpy_fromio(buf, src_addr, reg->size); + memcpy_toio(dst_addr, buf, reg->size); + kfree(buf); } ktime_get_ts64(&end); pci_epf_test_print_rate("COPY", reg->size, &start, &end, use_dma); @@ -441,7 +451,7 @@ static int pci_epf_test_write(struct pci_epf_test *epf_test) if (!epf_test->dma_supported) { dev_err(dev, "Cannot transfer data using DMA\n"); ret = -EINVAL; - goto err_map_addr; + goto err_dma_map; } src_phys_addr = dma_map_single(dma_dev, buf, reg->size, diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h index 5b17becc17c87a85ec896109605a1e31209d0d6b..6209e1887da6a99313f980a16e0d87bf822f2dc2 100644 --- a/drivers/pci/hotplug/pciehp.h +++ b/drivers/pci/hotplug/pciehp.h @@ -72,6 +72,8 @@ extern int pciehp_poll_time; * @reset_lock: prevents access to the Data Link Layer Link Active bit in the * Link Status register and to the Presence Detect State bit in the Slot * Status register during a slot reset which may cause them to flap + * @depth: Number of additional hotplug ports in the path to the root bus, + * used as lock subclass for @reset_lock * @ist_running: flag to keep user request waiting while IRQ thread is running * @request_result: result of last user request submitted to the IRQ thread * @requester: wait queue to wake up on completion of user request, @@ -103,6 +105,7 @@ struct controller { struct hotplug_slot hotplug_slot; /* hotplug core interface */ struct rw_semaphore reset_lock; + unsigned int depth; unsigned int ist_running; int request_result; wait_queue_head_t requester; diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c index ad3393930ecb4d91dc21ef7a5abf575662c76e7c..e7fe4b42f0394e360b20dbefd886a1509541bc29 100644 --- a/drivers/pci/hotplug/pciehp_core.c +++ b/drivers/pci/hotplug/pciehp_core.c @@ -166,7 +166,7 @@ static void pciehp_check_presence(struct controller *ctrl) { int occupied; - down_read(&ctrl->reset_lock); + down_read_nested(&ctrl->reset_lock, ctrl->depth); mutex_lock(&ctrl->state_lock); occupied = pciehp_card_present_or_link_active(ctrl); diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c index b0a132265a3919e1beec88bfb34f20a3eee99f60..a2639135f5d31376c14fc6d940f243def374d825 100644 --- a/drivers/pci/hotplug/pciehp_hpc.c +++ b/drivers/pci/hotplug/pciehp_hpc.c @@ -93,6 +93,8 @@ static int pcie_poll_cmd(struct controller *ctrl, int timeout) if (slot_status & PCI_EXP_SLTSTA_CC) { pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, PCI_EXP_SLTSTA_CC); + ctrl->cmd_busy = 0; + smp_mb(); return 1; } msleep(10); @@ -578,7 +580,7 @@ static void pciehp_ignore_dpc_link_change(struct controller *ctrl, * the corresponding link change may have been ignored above. * Synthesize it to ensure that it is acted on. */ - down_read(&ctrl->reset_lock); + down_read_nested(&ctrl->reset_lock, ctrl->depth); if (!pciehp_check_link_active(ctrl)) pciehp_request(ctrl, PCI_EXP_SLTSTA_DLLSC); up_read(&ctrl->reset_lock); @@ -637,6 +639,8 @@ static irqreturn_t pciehp_isr(int irq, void *dev_id) */ if (ctrl->power_fault_detected) status &= ~PCI_EXP_SLTSTA_PFD; + else if (status & PCI_EXP_SLTSTA_PFD) + ctrl->power_fault_detected = true; events |= status; if (!events) { @@ -646,7 +650,7 @@ static irqreturn_t pciehp_isr(int irq, void *dev_id) } if (status) { - pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, events); + pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, status); /* * In MSI mode, all event bits must be zero before the port @@ -732,8 +736,7 @@ static irqreturn_t pciehp_ist(int irq, void *dev_id) } /* Check Power Fault Detected */ - if ((events & PCI_EXP_SLTSTA_PFD) && !ctrl->power_fault_detected) { - ctrl->power_fault_detected = 1; + if (events & PCI_EXP_SLTSTA_PFD) { ctrl_err(ctrl, "Slot(%s): Power fault\n", slot_name(ctrl)); pciehp_set_indicators(ctrl, PCI_EXP_SLTCTL_PWR_IND_OFF, PCI_EXP_SLTCTL_ATTN_IND_ON); @@ -753,7 +756,7 @@ static irqreturn_t pciehp_ist(int irq, void *dev_id) * Disable requests have higher priority than Presence Detect Changed * or Data Link Layer State Changed events. */ - down_read(&ctrl->reset_lock); + down_read_nested(&ctrl->reset_lock, ctrl->depth); if (events & DISABLE_SLOT) { if (!rpdev || (rpdev && !test_and_set_bit(0, &rpdev->slot_being_removed_rescanned))) @@ -936,7 +939,7 @@ int pciehp_reset_slot(struct hotplug_slot *hotplug_slot, int probe) if (probe) return 0; - down_write(&ctrl->reset_lock); + down_write_nested(&ctrl->reset_lock, ctrl->depth); if (!ATTN_BUTTN(ctrl)) { ctrl_mask |= PCI_EXP_SLTCTL_PDCE; @@ -992,6 +995,20 @@ static inline void dbg_ctrl(struct controller *ctrl) #define FLAG(x, y) (((x) & (y)) ? '+' : '-') +static inline int pcie_hotplug_depth(struct pci_dev *dev) +{ + struct pci_bus *bus = dev->bus; + int depth = 0; + + while (bus->parent) { + bus = bus->parent; + if (bus->self && bus->self->is_hotplug_bridge) + depth++; + } + + return depth; +} + struct controller *pcie_init(struct pcie_device *dev) { struct controller *ctrl; @@ -1005,6 +1022,7 @@ struct controller *pcie_init(struct pcie_device *dev) return NULL; ctrl->pcie = dev; + ctrl->depth = pcie_hotplug_depth(dev->port); pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &slot_cap); if (pdev->hotplug_user_indicators) @@ -1098,6 +1116,8 @@ static void quirk_cmd_compl(struct pci_dev *pdev) } DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl); +DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_QCOM, 0x0110, + PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl); DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_QCOM, 0x0400, PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl); DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_QCOM, 0x0401, diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index fcec39d6924e614532ca372860ab8395d64a1812..033c134e0bbd7c4265ca5e06facba9638cb84853 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c @@ -1302,19 +1302,24 @@ EXPORT_SYMBOL(pci_free_irq_vectors); /** * pci_irq_vector - return Linux IRQ number of a device vector - * @dev: PCI device to operate on - * @nr: device-relative interrupt vector index (0-based). + * @dev: PCI device to operate on + * @nr: Interrupt vector index (0-based) + * + * @nr has the following meanings depending on the interrupt mode: + * MSI-X: The index in the MSI-X vector table + * MSI: The index of the enabled MSI vectors + * INTx: Must be 0 + * + * Return: The Linux interrupt number or -EINVAl if @nr is out of range. */ int pci_irq_vector(struct pci_dev *dev, unsigned int nr) { if (dev->msix_enabled) { struct msi_desc *entry; - int i = 0; for_each_pci_msi_entry(entry, dev) { - if (i == nr) + if (entry->msi_attrib.entry_nr == nr) return entry->irq; - i++; } WARN_ON_ONCE(1); return -EINVAL; @@ -1338,17 +1343,22 @@ EXPORT_SYMBOL(pci_irq_vector); * pci_irq_get_affinity - return the affinity of a particular MSI vector * @dev: PCI device to operate on * @nr: device-relative interrupt vector index (0-based). + * + * @nr has the following meanings depending on the interrupt mode: + * MSI-X: The index in the MSI-X vector table + * MSI: The index of the enabled MSI vectors + * INTx: Must be 0 + * + * Return: A cpumask pointer or NULL if @nr is out of range */ const struct cpumask *pci_irq_get_affinity(struct pci_dev *dev, int nr) { if (dev->msix_enabled) { struct msi_desc *entry; - int i = 0; for_each_pci_msi_entry(entry, dev) { - if (i == nr) + if (entry->msi_attrib.entry_nr == nr) return &entry->affinity->mask; - i++; } WARN_ON_ONCE(1); return NULL; diff --git a/drivers/pci/pci-bridge-emul.c b/drivers/pci/pci-bridge-emul.c index db97cddfc85e1c7e731bbb0f667dc2427b323f6c..37504c2cce9b89165da60b384b895e19ab89aa56 100644 --- a/drivers/pci/pci-bridge-emul.c +++ b/drivers/pci/pci-bridge-emul.c @@ -139,8 +139,13 @@ struct pci_bridge_reg_behavior pci_regs_behavior[PCI_STD_HEADER_SIZEOF / 4] = { .ro = GENMASK(7, 0), }, + /* + * If expansion ROM is unsupported then ROM Base Address register must + * be implemented as read-only register that return 0 when read, same + * as for unused Base Address registers. + */ [PCI_ROM_ADDRESS1 / 4] = { - .rw = GENMASK(31, 11) | BIT(0), + .ro = ~0, }, /* @@ -171,41 +176,55 @@ struct pci_bridge_reg_behavior pcie_cap_regs_behavior[PCI_CAP_PCIE_SIZEOF / 4] = [PCI_CAP_LIST_ID / 4] = { /* * Capability ID, Next Capability Pointer and - * Capabilities register are all read-only. + * bits [14:0] of Capabilities register are all read-only. + * Bit 15 of Capabilities register is reserved. */ - .ro = ~0, + .ro = GENMASK(30, 0), }, [PCI_EXP_DEVCAP / 4] = { - .ro = ~0, + /* + * Bits [31:29] and [17:16] are reserved. + * Bits [27:18] are reserved for non-upstream ports. + * Bits 28 and [14:6] are reserved for non-endpoint devices. + * Other bits are read-only. + */ + .ro = BIT(15) | GENMASK(5, 0), }, [PCI_EXP_DEVCTL / 4] = { - /* Device control register is RW */ - .rw = GENMASK(15, 0), + /* + * Device control register is RW, except bit 15 which is + * reserved for non-endpoints or non-PCIe-to-PCI/X bridges. + */ + .rw = GENMASK(14, 0), /* * Device status register has bits 6 and [3:0] W1C, [5:4] RO, - * the rest is reserved + * the rest is reserved. Also bit 6 is reserved for non-upstream + * ports. */ - .w1c = (BIT(6) | GENMASK(3, 0)) << 16, + .w1c = GENMASK(3, 0) << 16, .ro = GENMASK(5, 4) << 16, }, [PCI_EXP_LNKCAP / 4] = { - /* All bits are RO, except bit 23 which is reserved */ - .ro = lower_32_bits(~BIT(23)), + /* + * All bits are RO, except bit 23 which is reserved and + * bit 18 which is reserved for non-upstream ports. + */ + .ro = lower_32_bits(~(BIT(23) | PCI_EXP_LNKCAP_CLKPM)), }, [PCI_EXP_LNKCTL / 4] = { /* * Link control has bits [15:14], [11:3] and [1:0] RW, the - * rest is reserved. + * rest is reserved. Bit 8 is reserved for non-upstream ports. * * Link status has bits [13:0] RO, and bits [15:14] * W1C. */ - .rw = GENMASK(15, 14) | GENMASK(11, 3) | GENMASK(1, 0), + .rw = GENMASK(15, 14) | GENMASK(11, 9) | GENMASK(7, 3) | GENMASK(1, 0), .ro = GENMASK(13, 0) << 16, .w1c = GENMASK(15, 14) << 16, }, @@ -277,11 +296,9 @@ int pci_bridge_emul_init(struct pci_bridge_emul *bridge, if (bridge->has_pcie) { bridge->conf.capabilities_pointer = PCI_CAP_PCIE_START; + bridge->conf.status |= cpu_to_le16(PCI_STATUS_CAP_LIST); bridge->pcie_conf.cap_id = PCI_CAP_ID_EXP; - /* Set PCIe v2, root port, slot support */ - bridge->pcie_conf.cap = - cpu_to_le16(PCI_EXP_TYPE_ROOT_PORT << 4 | 2 | - PCI_EXP_FLAGS_SLOT); + bridge->pcie_conf.cap |= cpu_to_le16(PCI_EXP_TYPE_ROOT_PORT << 4); bridge->pcie_cap_regs_behavior = kmemdup(pcie_cap_regs_behavior, sizeof(pcie_cap_regs_behavior), @@ -290,6 +307,27 @@ int pci_bridge_emul_init(struct pci_bridge_emul *bridge, kfree(bridge->pci_regs_behavior); return -ENOMEM; } + /* These bits are applicable only for PCI and reserved on PCIe */ + bridge->pci_regs_behavior[PCI_CACHE_LINE_SIZE / 4].ro &= + ~GENMASK(15, 8); + bridge->pci_regs_behavior[PCI_COMMAND / 4].ro &= + ~((PCI_COMMAND_SPECIAL | PCI_COMMAND_INVALIDATE | + PCI_COMMAND_VGA_PALETTE | PCI_COMMAND_WAIT | + PCI_COMMAND_FAST_BACK) | + (PCI_STATUS_66MHZ | PCI_STATUS_FAST_BACK | + PCI_STATUS_DEVSEL_MASK) << 16); + bridge->pci_regs_behavior[PCI_PRIMARY_BUS / 4].ro &= + ~GENMASK(31, 24); + bridge->pci_regs_behavior[PCI_IO_BASE / 4].ro &= + ~((PCI_STATUS_66MHZ | PCI_STATUS_FAST_BACK | + PCI_STATUS_DEVSEL_MASK) << 16); + bridge->pci_regs_behavior[PCI_INTERRUPT_LINE / 4].rw &= + ~((PCI_BRIDGE_CTL_MASTER_ABORT | + BIT(8) | BIT(9) | BIT(11)) << 16); + bridge->pci_regs_behavior[PCI_INTERRUPT_LINE / 4].ro &= + ~((PCI_BRIDGE_CTL_FAST_BACK) << 16); + bridge->pci_regs_behavior[PCI_INTERRUPT_LINE / 4].w1c &= + ~(BIT(10) << 16); } if (flags & PCI_BRIDGE_EMUL_NO_PREFETCHABLE_BAR) { diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index 9a4e34814005ad4762c35d565756f9e247dc889f..b85bb3dc421680f6b7710d770feaa1cb8dabedc0 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@ -459,6 +459,15 @@ int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info); void aer_print_error(struct pci_dev *dev, struct aer_err_info *info); #endif /* CONFIG_PCIEAER */ +#ifdef CONFIG_PCIEPORTBUS +/* Cached RCEC Endpoint Association */ +struct rcec_ea { + u8 nextbusn; + u8 lastbusn; + u32 bitmap; +}; +#endif + #ifdef CONFIG_PCIE_DPC void pci_save_dpc_state(struct pci_dev *dev); void pci_restore_dpc_state(struct pci_dev *dev); @@ -473,6 +482,22 @@ static inline void pci_dpc_init(struct pci_dev *pdev) {} static inline bool pci_dpc_recovered(struct pci_dev *pdev) { return false; } #endif +#ifdef CONFIG_PCIEPORTBUS +void pci_rcec_init(struct pci_dev *dev); +void pci_rcec_exit(struct pci_dev *dev); +void pcie_link_rcec(struct pci_dev *rcec); +void pcie_walk_rcec(struct pci_dev *rcec, + int (*cb)(struct pci_dev *, void *), + void *userdata); +#else +static inline void pci_rcec_init(struct pci_dev *dev) {} +static inline void pci_rcec_exit(struct pci_dev *dev) {} +static inline void pcie_link_rcec(struct pci_dev *rcec) {} +static inline void pcie_walk_rcec(struct pci_dev *rcec, + int (*cb)(struct pci_dev *, void *), + void *userdata) {} +#endif + #ifdef CONFIG_PCI_ATS /* Address Translation Service */ void pci_ats_init(struct pci_dev *dev); @@ -568,8 +593,8 @@ static inline int pci_dev_specific_disable_acs_redir(struct pci_dev *dev) /* PCI error reporting and recovery */ pci_ers_result_t pcie_do_recovery(struct pci_dev *dev, - pci_channel_state_t state, - pci_ers_result_t (*reset_link)(struct pci_dev *pdev)); + pci_channel_state_t state, + pci_ers_result_t (*reset_subordinates)(struct pci_dev *pdev)); bool pcie_wait_for_link(struct pci_dev *pdev, bool active); #ifdef CONFIG_PCIEASPM diff --git a/drivers/pci/pcie/Makefile b/drivers/pci/pcie/Makefile index 9a7085668466fafffbb3ae7bc84084c0e212d6be..b2980db88cc0926648eec25e82063280b1575953 100644 --- a/drivers/pci/pcie/Makefile +++ b/drivers/pci/pcie/Makefile @@ -2,7 +2,7 @@ # # Makefile for PCI Express features and port driver -pcieportdrv-y := portdrv_core.o portdrv_pci.o err.o +pcieportdrv-y := portdrv_core.o portdrv_pci.o err.o rcec.o obj-$(CONFIG_PCIEPORTBUS) += pcieportdrv.o diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c index 65dff5f3457ac0d302945c775408f9e0114c6f8b..77b0f2c45bc0e06004984b84f529b261e076ab27 100644 --- a/drivers/pci/pcie/aer.c +++ b/drivers/pci/pcie/aer.c @@ -300,7 +300,8 @@ int pci_aer_raw_clear_status(struct pci_dev *dev) return -EIO; port_type = pci_pcie_type(dev); - if (port_type == PCI_EXP_TYPE_ROOT_PORT) { + if (port_type == PCI_EXP_TYPE_ROOT_PORT || + port_type == PCI_EXP_TYPE_RC_EC) { pci_read_config_dword(dev, aer + PCI_ERR_ROOT_STATUS, &status); pci_write_config_dword(dev, aer + PCI_ERR_ROOT_STATUS, status); } @@ -595,7 +596,8 @@ static umode_t aer_stats_attrs_are_visible(struct kobject *kobj, if ((a == &dev_attr_aer_rootport_total_err_cor.attr || a == &dev_attr_aer_rootport_total_err_fatal.attr || a == &dev_attr_aer_rootport_total_err_nonfatal.attr) && - pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT) + ((pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT) && + (pci_pcie_type(pdev) != PCI_EXP_TYPE_RC_EC))) return 0; return a->mode; @@ -916,7 +918,10 @@ static bool find_source_device(struct pci_dev *parent, if (result) return true; - pci_walk_bus(parent->subordinate, find_device_iter, e_info); + if (pci_pcie_type(parent) == PCI_EXP_TYPE_RC_EC) + pcie_walk_rcec(parent, find_device_iter, e_info); + else + pci_walk_bus(parent->subordinate, find_device_iter, e_info); if (!e_info->error_dev_num) { pci_info(parent, "can't find device of ID%04x\n", e_info->id); @@ -1034,6 +1039,7 @@ EXPORT_SYMBOL_GPL(aer_recover_queue); */ int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info) { + int type = pci_pcie_type(dev); int aer = dev->aer_cap; int temp; @@ -1052,8 +1058,9 @@ int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info) &info->mask); if (!(info->status & ~info->mask)) return 0; - } else if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT || - pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM || + } else if (type == PCI_EXP_TYPE_ROOT_PORT || + type == PCI_EXP_TYPE_RC_EC || + type == PCI_EXP_TYPE_DOWNSTREAM || info->severity == AER_NONFATAL) { /* Link is still healthy for IO reads */ @@ -1205,6 +1212,7 @@ static int set_device_error_reporting(struct pci_dev *dev, void *data) int type = pci_pcie_type(dev); if ((type == PCI_EXP_TYPE_ROOT_PORT) || + (type == PCI_EXP_TYPE_RC_EC) || (type == PCI_EXP_TYPE_UPSTREAM) || (type == PCI_EXP_TYPE_DOWNSTREAM)) { if (enable) @@ -1229,9 +1237,12 @@ static void set_downstream_devices_error_reporting(struct pci_dev *dev, { set_device_error_reporting(dev, &enable); - if (!dev->subordinate) - return; - pci_walk_bus(dev->subordinate, set_device_error_reporting, &enable); + if (pci_pcie_type(dev) == PCI_EXP_TYPE_RC_EC) + pcie_walk_rcec(dev, set_device_error_reporting, &enable); + else if (dev->subordinate) + pci_walk_bus(dev->subordinate, set_device_error_reporting, + &enable); + } /** @@ -1329,6 +1340,11 @@ static int aer_probe(struct pcie_device *dev) struct device *device = &dev->device; struct pci_dev *port = dev->port; + /* Limit to Root Ports or Root Complex Event Collectors */ + if ((pci_pcie_type(port) != PCI_EXP_TYPE_RC_EC) && + (pci_pcie_type(port) != PCI_EXP_TYPE_ROOT_PORT)) + return -ENODEV; + rpc = devm_kzalloc(device, sizeof(struct aer_rpc), GFP_KERNEL); if (!rpc) return -ENOMEM; @@ -1350,41 +1366,74 @@ static int aer_probe(struct pcie_device *dev) } /** - * aer_root_reset - reset link on Root Port - * @dev: pointer to Root Port's pci_dev data structure + * aer_root_reset - reset Root Port hierarchy, RCEC, or RCiEP + * @dev: pointer to Root Port, RCEC, or RCiEP * - * Invoked by Port Bus driver when performing link reset at Root Port. + * Invoked by Port Bus driver when performing reset. */ static pci_ers_result_t aer_root_reset(struct pci_dev *dev) { - int aer = dev->aer_cap; + int type = pci_pcie_type(dev); + struct pci_dev *root; + int aer; + struct pci_host_bridge *host = pci_find_host_bridge(dev->bus); u32 reg32; int rc; + /* + * Only Root Ports and RCECs have AER Root Command and Root Status + * registers. If "dev" is an RCiEP, the relevant registers are in + * the RCEC. + */ + if (type == PCI_EXP_TYPE_RC_END) + root = dev->rcec; + else + root = dev; + + /* + * If the platform retained control of AER, an RCiEP may not have + * an RCEC visible to us, so dev->rcec ("root") may be NULL. In + * that case, firmware is responsible for these registers. + */ + aer = root ? root->aer_cap : 0; - /* Disable Root's interrupt in response to error messages */ - pci_read_config_dword(dev, aer + PCI_ERR_ROOT_COMMAND, ®32); - reg32 &= ~ROOT_PORT_INTR_ON_MESG_MASK; - pci_write_config_dword(dev, aer + PCI_ERR_ROOT_COMMAND, reg32); + if ((host->native_aer || pcie_ports_native) && aer) { + /* Disable Root's interrupt in response to error messages */ + pci_read_config_dword(root, aer + PCI_ERR_ROOT_COMMAND, ®32); + reg32 &= ~ROOT_PORT_INTR_ON_MESG_MASK; + pci_write_config_dword(root, aer + PCI_ERR_ROOT_COMMAND, reg32); + } - rc = pci_bus_error_reset(dev); - pci_info(dev, "Root Port link has been reset\n"); + if (type == PCI_EXP_TYPE_RC_EC || type == PCI_EXP_TYPE_RC_END) { + if (pcie_has_flr(dev)) { + rc = pcie_flr(dev); + pci_info(dev, "has been reset (%d)\n", rc); + } else { + pci_info(dev, "not reset (no FLR support)\n"); + rc = -ENOTTY; + } + } else { + rc = pci_bus_error_reset(dev); + pci_info(dev, "Root Port link has been reset (%d)\n", rc); + } - /* Clear Root Error Status */ - pci_read_config_dword(dev, aer + PCI_ERR_ROOT_STATUS, ®32); - pci_write_config_dword(dev, aer + PCI_ERR_ROOT_STATUS, reg32); + if ((host->native_aer || pcie_ports_native) && aer) { + /* Clear Root Error Status */ + pci_read_config_dword(root, aer + PCI_ERR_ROOT_STATUS, ®32); + pci_write_config_dword(root, aer + PCI_ERR_ROOT_STATUS, reg32); - /* Enable Root Port's interrupt in response to error messages */ - pci_read_config_dword(dev, aer + PCI_ERR_ROOT_COMMAND, ®32); - reg32 |= ROOT_PORT_INTR_ON_MESG_MASK; - pci_write_config_dword(dev, aer + PCI_ERR_ROOT_COMMAND, reg32); + /* Enable Root Port's interrupt in response to error messages */ + pci_read_config_dword(root, aer + PCI_ERR_ROOT_COMMAND, ®32); + reg32 |= ROOT_PORT_INTR_ON_MESG_MASK; + pci_write_config_dword(root, aer + PCI_ERR_ROOT_COMMAND, reg32); + } return rc ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED; } static struct pcie_port_service_driver aerdriver = { .name = "aer", - .port_type = PCI_EXP_TYPE_ROOT_PORT, + .port_type = PCIE_ANY_PORT, .service = PCIE_PORT_SERVICE_AER, .probe = aer_probe, diff --git a/drivers/pci/pcie/aer_inject.c b/drivers/pci/pcie/aer_inject.c index 4dc1d95f085bc892edb5d1f769f7ce0394782276..ba11a0eed2be06501cdd4a0a802220b6ebac9b5d 100644 --- a/drivers/pci/pcie/aer_inject.c +++ b/drivers/pci/pcie/aer_inject.c @@ -341,8 +341,11 @@ static int aer_inject(struct aer_error_inj *einj) if (!dev) return -ENODEV; rpdev = pcie_find_root_port(dev); + /* If Root Port not found, try to find an RCEC */ + if (!rpdev) + rpdev = dev->rcec; if (!rpdev) { - pci_err(dev, "Root port not found\n"); + pci_err(dev, "Neither Root Port nor RCEC found\n"); ret = -ENODEV; goto out_put; } diff --git a/drivers/pci/pcie/err.c b/drivers/pci/pcie/err.c index c543f419d8f9e8b457ece026dec5f9fafb887cff..510f31f0ef6d04c7dc18fe8fd2374a9af820e745 100644 --- a/drivers/pci/pcie/err.c +++ b/drivers/pci/pcie/err.c @@ -146,38 +146,71 @@ static int report_resume(struct pci_dev *dev, void *data) return 0; } +/** + * pci_walk_bridge - walk bridges potentially AER affected + * @bridge: bridge which may be a Port, an RCEC, or an RCiEP + * @cb: callback to be called for each device found + * @userdata: arbitrary pointer to be passed to callback + * + * If the device provided is a bridge, walk the subordinate bus, including + * any bridged devices on buses under this bus. Call the provided callback + * on each device found. + * + * If the device provided has no subordinate bus, e.g., an RCEC or RCiEP, + * call the callback on the device itself. + */ +static void pci_walk_bridge(struct pci_dev *bridge, + int (*cb)(struct pci_dev *, void *), + void *userdata) +{ + if (bridge->subordinate) + pci_walk_bus(bridge->subordinate, cb, userdata); + else + cb(bridge, userdata); +} + pci_ers_result_t pcie_do_recovery(struct pci_dev *dev, - pci_channel_state_t state, - pci_ers_result_t (*reset_link)(struct pci_dev *pdev)) + pci_channel_state_t state, + pci_ers_result_t (*reset_subordinates)(struct pci_dev *pdev)) { + int type = pci_pcie_type(dev); + struct pci_dev *bridge; pci_ers_result_t status = PCI_ERS_RESULT_CAN_RECOVER; - struct pci_bus *bus; + struct pci_host_bridge *host = pci_find_host_bridge(dev->bus); /* - * Error recovery runs on all subordinates of the first downstream port. - * If the downstream port detected the error, it is cleared at the end. + * If the error was detected by a Root Port, Downstream Port, RCEC, + * or RCiEP, recovery runs on the device itself. For Ports, that + * also includes any subordinate devices. + * + * If it was detected by another device (Endpoint, etc), recovery + * runs on the device and anything else under the same Port, i.e., + * everything under "bridge". */ - if (!(pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT || - pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM)) - dev = dev->bus->self; - bus = dev->subordinate; - - pci_dbg(dev, "broadcast error_detected message\n"); + if (type == PCI_EXP_TYPE_ROOT_PORT || + type == PCI_EXP_TYPE_DOWNSTREAM || + type == PCI_EXP_TYPE_RC_EC || + type == PCI_EXP_TYPE_RC_END) + bridge = dev; + else + bridge = pci_upstream_bridge(dev); + + pci_dbg(bridge, "broadcast error_detected message\n"); if (state == pci_channel_io_frozen) { - pci_walk_bus(bus, report_frozen_detected, &status); - status = reset_link(dev); + pci_walk_bridge(bridge, report_frozen_detected, &status); + status = reset_subordinates(bridge); if (status != PCI_ERS_RESULT_RECOVERED) { - pci_warn(dev, "link reset failed\n"); + pci_warn(bridge, "subordinate device reset failed\n"); goto failed; } } else { - pci_walk_bus(bus, report_normal_detected, &status); + pci_walk_bridge(bridge, report_normal_detected, &status); } if (status == PCI_ERS_RESULT_CAN_RECOVER) { status = PCI_ERS_RESULT_RECOVERED; - pci_dbg(dev, "broadcast mmio_enabled message\n"); - pci_walk_bus(bus, report_mmio_enabled, &status); + pci_dbg(bridge, "broadcast mmio_enabled message\n"); + pci_walk_bridge(bridge, report_mmio_enabled, &status); } if (status == PCI_ERS_RESULT_NEED_RESET) { @@ -187,27 +220,35 @@ pci_ers_result_t pcie_do_recovery(struct pci_dev *dev, * drivers' slot_reset callbacks? */ status = PCI_ERS_RESULT_RECOVERED; - pci_dbg(dev, "broadcast slot_reset message\n"); - pci_walk_bus(bus, report_slot_reset, &status); + pci_dbg(bridge, "broadcast slot_reset message\n"); + pci_walk_bridge(bridge, report_slot_reset, &status); } if (status != PCI_ERS_RESULT_RECOVERED) goto failed; - pci_dbg(dev, "broadcast resume message\n"); - pci_walk_bus(bus, report_resume, &status); + pci_dbg(bridge, "broadcast resume message\n"); + pci_walk_bridge(bridge, report_resume, &status); - if (pcie_aer_is_native(dev)) - pcie_clear_device_status(dev); - pci_aer_clear_nonfatal_status(dev); - pci_info(dev, "device recovery successful\n"); + /* + * If we have native control of AER, clear error status in the Root + * Port or Downstream Port that signaled the error. If the + * platform retained control of AER, it is responsible for clearing + * this status. In that case, the signaling device may not even be + * visible to the OS. + */ + if (host->native_aer || pcie_ports_native) { + pcie_clear_device_status(bridge); + pci_aer_clear_nonfatal_status(bridge); + } + pci_info(bridge, "device recovery successful\n"); return status; failed: - pci_uevent_ers(dev, PCI_ERS_RESULT_DISCONNECT); + pci_uevent_ers(bridge, PCI_ERS_RESULT_DISCONNECT); /* TODO: Should kernel panic here? */ - pci_info(dev, "device recovery failed\n"); + pci_info(bridge, "device recovery failed\n"); return status; } diff --git a/drivers/pci/pcie/pme.c b/drivers/pci/pcie/pme.c index 6a32970bb7311dd4c83363778bc112ac84a2eae4..3fc08488d65f1f154692d06881b232451bc12f85 100644 --- a/drivers/pci/pcie/pme.c +++ b/drivers/pci/pcie/pme.c @@ -310,7 +310,10 @@ static int pcie_pme_can_wakeup(struct pci_dev *dev, void *ign) static void pcie_pme_mark_devices(struct pci_dev *port) { pcie_pme_can_wakeup(port, NULL); - if (port->subordinate) + + if (pci_pcie_type(port) == PCI_EXP_TYPE_RC_EC) + pcie_walk_rcec(port, pcie_pme_can_wakeup, NULL); + else if (port->subordinate) pci_walk_bus(port->subordinate, pcie_pme_can_wakeup, NULL); } @@ -320,10 +323,16 @@ static void pcie_pme_mark_devices(struct pci_dev *port) */ static int pcie_pme_probe(struct pcie_device *srv) { - struct pci_dev *port; + struct pci_dev *port = srv->port; struct pcie_pme_service_data *data; + int type = pci_pcie_type(port); int ret; + /* Limit to Root Ports or Root Complex Event Collectors */ + if (type != PCI_EXP_TYPE_RC_EC && + type != PCI_EXP_TYPE_ROOT_PORT) + return -ENODEV; + data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; @@ -333,7 +342,6 @@ static int pcie_pme_probe(struct pcie_device *srv) data->srv = srv; set_service_data(srv, data); - port = srv->port; pcie_pme_interrupt_enable(port, false); pcie_clear_root_pme_status(port); @@ -445,7 +453,7 @@ static void pcie_pme_remove(struct pcie_device *srv) static struct pcie_port_service_driver pcie_pme_driver = { .name = "pcie_pme", - .port_type = PCI_EXP_TYPE_ROOT_PORT, + .port_type = PCIE_ANY_PORT, .service = PCIE_PORT_SERVICE_PME, .probe = pcie_pme_probe, diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c index 3779b264dbec39e322e54b96e4525120eb951334..3ee63968deaa5880dc700feb3d17b338a8557e68 100644 --- a/drivers/pci/pcie/portdrv_core.c +++ b/drivers/pci/pcie/portdrv_core.c @@ -233,12 +233,9 @@ static int get_port_device_capability(struct pci_dev *dev) } #endif - /* - * Root ports are capable of generating PME too. Root Complex - * Event Collectors can also generate PMEs, but we don't handle - * those yet. - */ - if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT && + /* Root Ports and Root Complex Event Collectors may generate PMEs */ + if ((pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT || + pci_pcie_type(dev) == PCI_EXP_TYPE_RC_EC) && (pcie_ports_native || host->native_pme)) { services |= PCIE_PORT_SERVICE_PME; diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c index d4559cf88f79de4e5168c745d8173e3feaef88a7..8bd4992a4f3286a19b8cc0e20bcf45ab9839ae89 100644 --- a/drivers/pci/pcie/portdrv_pci.c +++ b/drivers/pci/pcie/portdrv_pci.c @@ -101,14 +101,19 @@ static const struct dev_pm_ops pcie_portdrv_pm_ops = { static int pcie_portdrv_probe(struct pci_dev *dev, const struct pci_device_id *id) { + int type = pci_pcie_type(dev); int status; if (!pci_is_pcie(dev) || - ((pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT) && - (pci_pcie_type(dev) != PCI_EXP_TYPE_UPSTREAM) && - (pci_pcie_type(dev) != PCI_EXP_TYPE_DOWNSTREAM))) + ((type != PCI_EXP_TYPE_ROOT_PORT) && + (type != PCI_EXP_TYPE_UPSTREAM) && + (type != PCI_EXP_TYPE_DOWNSTREAM) && + (type != PCI_EXP_TYPE_RC_EC))) return -ENODEV; + if (type == PCI_EXP_TYPE_RC_EC) + pcie_link_rcec(dev); + status = pcie_port_device_register(dev); if (status) return status; @@ -195,6 +200,8 @@ static const struct pci_device_id port_pci_ids[] = { { PCI_DEVICE_CLASS(((PCI_CLASS_BRIDGE_PCI << 8) | 0x00), ~0) }, /* subtractive decode PCI-to-PCI bridge, class type is 060401h */ { PCI_DEVICE_CLASS(((PCI_CLASS_BRIDGE_PCI << 8) | 0x01), ~0) }, + /* handle any Root Complex Event Collector */ + { PCI_DEVICE_CLASS(((PCI_CLASS_SYSTEM_RCEC << 8) | 0x00), ~0) }, { }, }; diff --git a/drivers/pci/pcie/rcec.c b/drivers/pci/pcie/rcec.c new file mode 100644 index 0000000000000000000000000000000000000000..d0bcd141ac9c657431f11ab54414df186ea6f388 --- /dev/null +++ b/drivers/pci/pcie/rcec.c @@ -0,0 +1,190 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Root Complex Event Collector Support + * + * Authors: + * Sean V Kelley + * Qiuxu Zhuo + * + * Copyright (C) 2020 Intel Corp. + */ + +#include +#include +#include + +#include "../pci.h" + +struct walk_rcec_data { + struct pci_dev *rcec; + int (*user_callback)(struct pci_dev *dev, void *data); + void *user_data; +}; + +static bool rcec_assoc_rciep(struct pci_dev *rcec, struct pci_dev *rciep) +{ + unsigned long bitmap = rcec->rcec_ea->bitmap; + unsigned int devn; + + /* An RCiEP found on a different bus in range */ + if (rcec->bus->number != rciep->bus->number) + return true; + + /* Same bus, so check bitmap */ + for_each_set_bit(devn, &bitmap, 32) + if (devn == PCI_SLOT(rciep->devfn)) + return true; + + return false; +} + +static int link_rcec_helper(struct pci_dev *dev, void *data) +{ + struct walk_rcec_data *rcec_data = data; + struct pci_dev *rcec = rcec_data->rcec; + + if ((pci_pcie_type(dev) == PCI_EXP_TYPE_RC_END) && + rcec_assoc_rciep(rcec, dev)) { + dev->rcec = rcec; + pci_dbg(dev, "PME & error events signaled via %s\n", + pci_name(rcec)); + } + + return 0; +} + +static int walk_rcec_helper(struct pci_dev *dev, void *data) +{ + struct walk_rcec_data *rcec_data = data; + struct pci_dev *rcec = rcec_data->rcec; + + if ((pci_pcie_type(dev) == PCI_EXP_TYPE_RC_END) && + rcec_assoc_rciep(rcec, dev)) + rcec_data->user_callback(dev, rcec_data->user_data); + + return 0; +} + +static void walk_rcec(int (*cb)(struct pci_dev *dev, void *data), + void *userdata) +{ + struct walk_rcec_data *rcec_data = userdata; + struct pci_dev *rcec = rcec_data->rcec; + u8 nextbusn, lastbusn; + struct pci_bus *bus; + unsigned int bnr; + + if (!rcec->rcec_ea) + return; + + /* Walk own bus for bitmap based association */ + pci_walk_bus(rcec->bus, cb, rcec_data); + + nextbusn = rcec->rcec_ea->nextbusn; + lastbusn = rcec->rcec_ea->lastbusn; + + /* All RCiEP devices are on the same bus as the RCEC */ + if (nextbusn == 0xff && lastbusn == 0x00) + return; + + for (bnr = nextbusn; bnr <= lastbusn; bnr++) { + /* No association indicated (PCIe 5.0-1, 7.9.10.3) */ + if (bnr == rcec->bus->number) + continue; + + bus = pci_find_bus(pci_domain_nr(rcec->bus), bnr); + if (!bus) + continue; + + /* Find RCiEP devices on the given bus ranges */ + pci_walk_bus(bus, cb, rcec_data); + } +} + +/** + * pcie_link_rcec - Link RCiEP devices associated with RCEC. + * @rcec: RCEC whose RCiEP devices should be linked. + * + * Link the given RCEC to each RCiEP device found. + */ +void pcie_link_rcec(struct pci_dev *rcec) +{ + struct walk_rcec_data rcec_data; + + if (!rcec->rcec_ea) + return; + + rcec_data.rcec = rcec; + rcec_data.user_callback = NULL; + rcec_data.user_data = NULL; + + walk_rcec(link_rcec_helper, &rcec_data); +} + +/** + * pcie_walk_rcec - Walk RCiEP devices associating with RCEC and call callback. + * @rcec: RCEC whose RCiEP devices should be walked + * @cb: Callback to be called for each RCiEP device found + * @userdata: Arbitrary pointer to be passed to callback + * + * Walk the given RCEC. Call the callback on each RCiEP found. + * + * If @cb returns anything other than 0, break out. + */ +void pcie_walk_rcec(struct pci_dev *rcec, int (*cb)(struct pci_dev *, void *), + void *userdata) +{ + struct walk_rcec_data rcec_data; + + if (!rcec->rcec_ea) + return; + + rcec_data.rcec = rcec; + rcec_data.user_callback = cb; + rcec_data.user_data = userdata; + + walk_rcec(walk_rcec_helper, &rcec_data); +} + +void pci_rcec_init(struct pci_dev *dev) +{ + struct rcec_ea *rcec_ea; + u32 rcec, hdr, busn; + u8 ver; + + /* Only for Root Complex Event Collectors */ + if (pci_pcie_type(dev) != PCI_EXP_TYPE_RC_EC) + return; + + rcec = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_RCEC); + if (!rcec) + return; + + rcec_ea = kzalloc(sizeof(*rcec_ea), GFP_KERNEL); + if (!rcec_ea) + return; + + pci_read_config_dword(dev, rcec + PCI_RCEC_RCIEP_BITMAP, + &rcec_ea->bitmap); + + /* Check whether RCEC BUSN register is present */ + pci_read_config_dword(dev, rcec, &hdr); + ver = PCI_EXT_CAP_VER(hdr); + if (ver >= PCI_RCEC_BUSN_REG_VER) { + pci_read_config_dword(dev, rcec + PCI_RCEC_BUSN, &busn); + rcec_ea->nextbusn = PCI_RCEC_BUSN_NEXT(busn); + rcec_ea->lastbusn = PCI_RCEC_BUSN_LAST(busn); + } else { + /* Avoid later ver check by setting nextbusn */ + rcec_ea->nextbusn = 0xff; + rcec_ea->lastbusn = 0x00; + } + + dev->rcec_ea = rcec_ea; +} + +void pci_rcec_exit(struct pci_dev *dev) +{ + kfree(dev->rcec_ea); + dev->rcec_ea = NULL; +} diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 6a54bdcd5631800d30ffb88430d3c50dd164be98..5a926c883a8918c4135cbb4cb1a7f9eadf72fea2 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -2222,6 +2222,7 @@ static void pci_configure_device(struct pci_dev *dev) static void pci_release_capabilities(struct pci_dev *dev) { pci_aer_exit(dev); + pci_rcec_exit(dev); pci_vpd_release(dev); pci_iov_release(dev); pci_free_cap_save_buffers(dev); @@ -2422,6 +2423,7 @@ static void pci_init_capabilities(struct pci_dev *dev) pci_ptm_init(dev); /* Precision Time Measurement */ pci_aer_init(dev); /* Advanced Error Reporting */ pci_dpc_init(dev); /* Downstream Port Containment */ + pci_rcec_init(dev); /* Root Complex Event Collector */ pcie_report_downtraining(dev); diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index ae8782e3fae5ec76a2833bfa962308d35c904a7b..3b5d896af2331a1ade17d7de171dc2a516f7c01f 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -1816,6 +1816,18 @@ static void quirk_alder_ioapic(struct pci_dev *pdev) DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EESSC, quirk_alder_ioapic); #endif +static void quirk_no_msi(struct pci_dev *dev) +{ + pci_info(dev, "avoiding MSI to work around a hardware defect\n"); + dev->no_msi = 1; +} +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4386, quirk_no_msi); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4387, quirk_no_msi); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4388, quirk_no_msi); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4389, quirk_no_msi); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x438a, quirk_no_msi); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x438b, quirk_no_msi); + static void quirk_pcie_mch(struct pci_dev *pdev) { pdev->no_msi = 1; @@ -4104,6 +4116,9 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9120, quirk_dma_func1_alias); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9123, quirk_dma_func1_alias); +/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c136 */ +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9125, + quirk_dma_func1_alias); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9128, quirk_dma_func1_alias); /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c14 */ diff --git a/drivers/pcmcia/cs.c b/drivers/pcmcia/cs.c index e211e2619680ce7a1e841878503c532d3b63e7ea..f70197154a36256ce59dcaf31d22faa09cb5fcaf 100644 --- a/drivers/pcmcia/cs.c +++ b/drivers/pcmcia/cs.c @@ -666,18 +666,16 @@ static int pccardd(void *__skt) if (events || sysfs_events) continue; + set_current_state(TASK_INTERRUPTIBLE); if (kthread_should_stop()) break; - set_current_state(TASK_INTERRUPTIBLE); - schedule(); - /* make sure we are running */ - __set_current_state(TASK_RUNNING); - try_to_freeze(); } + /* make sure we are running before we exit */ + __set_current_state(TASK_RUNNING); /* shut down socket, if a device is still present */ if (skt->state & SOCKET_PRESENT) { diff --git a/drivers/pcmcia/rsrc_nonstatic.c b/drivers/pcmcia/rsrc_nonstatic.c index 3b05760e69d6213ecb48ae659d27ce6ebdc5a00a..69a6e9a5d6d269be87c84ea1611443c8e3ea657f 100644 --- a/drivers/pcmcia/rsrc_nonstatic.c +++ b/drivers/pcmcia/rsrc_nonstatic.c @@ -690,6 +690,9 @@ static struct resource *__nonstatic_find_io_region(struct pcmcia_socket *s, unsigned long min = base; int ret; + if (!res) + return NULL; + data.mask = align - 1; data.offset = base & data.mask; data.map = &s_data->io_db; @@ -809,6 +812,9 @@ static struct resource *nonstatic_find_mem_region(u_long base, u_long num, unsigned long min, max; int ret, i, j; + if (!res) + return NULL; + low = low || !(s->features & SS_CAP_PAGE_REGS); data.mask = align - 1; diff --git a/drivers/perf/fsl_imx8_ddr_perf.c b/drivers/perf/fsl_imx8_ddr_perf.c index 7f7bc0993670fbe78184c426d02b2ff4c43afa83..e09bbf3890c49a4ce66fda581f730c3f28b9faae 100644 --- a/drivers/perf/fsl_imx8_ddr_perf.c +++ b/drivers/perf/fsl_imx8_ddr_perf.c @@ -29,7 +29,7 @@ #define CNTL_OVER_MASK 0xFFFFFFFE #define CNTL_CSV_SHIFT 24 -#define CNTL_CSV_MASK (0xFF << CNTL_CSV_SHIFT) +#define CNTL_CSV_MASK (0xFFU << CNTL_CSV_SHIFT) #define EVENT_CYCLES_ID 0 #define EVENT_CYCLES_COUNTER 0 diff --git a/drivers/perf/hisilicon/Makefile b/drivers/perf/hisilicon/Makefile index 7643c9f93e367a697b17df3efd7aeef9f5f5643f..22e384cdfd53eeb965bb4774e8a959bc3ac4dd70 100644 --- a/drivers/perf/hisilicon/Makefile +++ b/drivers/perf/hisilicon/Makefile @@ -1,4 +1,6 @@ # SPDX-License-Identifier: GPL-2.0-only obj-$(CONFIG_HISI_PMU) += hisi_uncore_pmu.o hisi_uncore_l3c_pmu.o \ hisi_uncore_hha_pmu.o hisi_uncore_ddrc_pmu.o hisi_uncore_sllc_pmu.o \ - hisi_uncore_pa_pmu.o + hisi_uncore_pa_pmu.o \ + hisi_uncore_l3t_pmu.o \ + hisi_uncore_lpddrc_pmu.o diff --git a/drivers/perf/hisilicon/hisi_uncore_l3t_pmu.c b/drivers/perf/hisilicon/hisi_uncore_l3t_pmu.c new file mode 100644 index 0000000000000000000000000000000000000000..f414dc1736aa64c8d92f821aa2c1f34c60581cda --- /dev/null +++ b/drivers/perf/hisilicon/hisi_uncore_l3t_pmu.c @@ -0,0 +1,403 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * HiSilicon SoC L3T uncore Hardware event counters support + * + * Copyright (C) 2017 Hisilicon Limited + * Author: Anurup M + * Shaokun Zhang + * + * This code is based on the uncore PMUs like arm-cci and arm-ccn. + */ +#include +#include +#include +#include +#include +#include +#include + +#include "hisi_uncore_pmu.h" + +/* L3T register definition */ +#define L3T_PERF_CTRL 0x0408 +#define L3T_INT_MASK 0x0800 +#define L3T_INT_STATUS 0x0808 +#define L3T_INT_CLEAR 0x080c +#define L3T_EVENT_CTRL 0x1c00 +#define L3T_VERSION 0x1cf0 +#define L3T_EVENT_TYPE0 0x1d00 +/* + * If the HW version only supports a 48-bit counter, then + * bits [63:48] are reserved, which are Read-As-Zero and + * Writes-Ignored. + */ +#define L3T_CNTR0_LOWER 0x1e00 + +/* L3T has 8-counters */ +#define L3T_NR_COUNTERS 0x8 + +#define L3T_PERF_CTRL_EN 0x20000 +#define L3T_EVTYPE_NONE 0xff +#define L3T_NR_EVENTS 0x59 + +/* + * Select the counter register offset using the counter index + */ +static u32 hisi_l3t_pmu_get_counter_offset(int cntr_idx) +{ + return (L3T_CNTR0_LOWER + (cntr_idx * 8)); +} + +static u64 hisi_l3t_pmu_read_counter(struct hisi_pmu *l3t_pmu, + struct hw_perf_event *hwc) +{ + return readq(l3t_pmu->base + hisi_l3t_pmu_get_counter_offset(hwc->idx)); +} + +static void hisi_l3t_pmu_write_counter(struct hisi_pmu *l3t_pmu, + struct hw_perf_event *hwc, u64 val) +{ + writeq(val, l3t_pmu->base + hisi_l3t_pmu_get_counter_offset(hwc->idx)); +} + +static void hisi_l3t_pmu_write_evtype(struct hisi_pmu *l3t_pmu, int idx, + u32 type) +{ + u32 reg, reg_idx, shift, val; + + /* + * Select the appropriate event select register(L3T_EVENT_TYPE0/1). + * There are 2 event select registers for the 8 hardware counters. + * Event code is 8-bits and for the former 4 hardware counters, + * L3T_EVENT_TYPE0 is chosen. For the latter 4 hardware counters, + * L3T_EVENT_TYPE1 is chosen. + */ + reg = L3T_EVENT_TYPE0 + (idx / 4) * 4; + reg_idx = idx % 4; + shift = 8 * reg_idx; + + /* Write event code to L3T_EVENT_TYPEx Register */ + val = readl(l3t_pmu->base + reg); + val &= ~(L3T_EVTYPE_NONE << shift); + val |= (type << shift); + writel(val, l3t_pmu->base + reg); +} + +static void hisi_l3t_pmu_start_counters(struct hisi_pmu *l3t_pmu) +{ + u32 val; + + /* + * Set perf_enable bit in L3T_PERF_CTRL register to start counting + * for all enabled counters. + */ + val = readl(l3t_pmu->base + L3T_PERF_CTRL); + val |= L3T_PERF_CTRL_EN; + writel(val, l3t_pmu->base + L3T_PERF_CTRL); +} + +static void hisi_l3t_pmu_stop_counters(struct hisi_pmu *l3t_pmu) +{ + u32 val; + + /* + * Clear perf_enable bit in L3T_PERF_CTRL register to stop counting + * for all enabled counters. + */ + val = readl(l3t_pmu->base + L3T_PERF_CTRL); + val &= ~(L3T_PERF_CTRL_EN); + writel(val, l3t_pmu->base + L3T_PERF_CTRL); +} + +static void hisi_l3t_pmu_enable_counter(struct hisi_pmu *l3t_pmu, + struct hw_perf_event *hwc) +{ + u32 val; + + /* Enable counter index in L3T_EVENT_CTRL register */ + val = readl(l3t_pmu->base + L3T_EVENT_CTRL); + val |= (1 << hwc->idx); + writel(val, l3t_pmu->base + L3T_EVENT_CTRL); +} + +static void hisi_l3t_pmu_disable_counter(struct hisi_pmu *l3t_pmu, + struct hw_perf_event *hwc) +{ + u32 val; + + /* Clear counter index in L3T_EVENT_CTRL register */ + val = readl(l3t_pmu->base + L3T_EVENT_CTRL); + val &= ~(1 << hwc->idx); + writel(val, l3t_pmu->base + L3T_EVENT_CTRL); +} + +static void hisi_l3t_pmu_enable_counter_int(struct hisi_pmu *l3t_pmu, + struct hw_perf_event *hwc) +{ + u32 val; + + val = readl(l3t_pmu->base + L3T_INT_MASK); + /* Write 0 to enable interrupt */ + val &= ~(1 << hwc->idx); + writel(val, l3t_pmu->base + L3T_INT_MASK); +} + +static void hisi_l3t_pmu_disable_counter_int(struct hisi_pmu *l3t_pmu, + struct hw_perf_event *hwc) +{ + u32 val; + + val = readl(l3t_pmu->base + L3T_INT_MASK); + /* Write 1 to mask interrupt */ + val |= (1 << hwc->idx); + writel(val, l3t_pmu->base + L3T_INT_MASK); +} + +static u32 hisi_l3t_pmu_get_int_status(struct hisi_pmu *l3t_pmu) +{ + return readl(l3t_pmu->base + L3T_INT_STATUS); +} + +static void hisi_l3t_pmu_clear_int_status(struct hisi_pmu *l3t_pmu, int idx) +{ + writel(1 << idx, l3t_pmu->base + L3T_INT_CLEAR); +} + +static const struct acpi_device_id hisi_l3t_pmu_acpi_match[] = { + {} +}; +MODULE_DEVICE_TABLE(acpi, hisi_l3t_pmu_acpi_match); + +static const struct of_device_id l3t_of_match[] = { + { .compatible = "hisilicon,l3t-pmu", }, + {}, +}; + +static int hisi_l3t_pmu_init_data(struct platform_device *pdev, + struct hisi_pmu *l3t_pmu) +{ + /* + * Use the SCCL_ID and CCL_ID to identify the L3T PMU, while + * SCCL_ID is in MPIDR[aff2] and CCL_ID is in MPIDR[aff1]. + */ + if (device_property_read_u32(&pdev->dev, "hisilicon,scl-id", + &l3t_pmu->sccl_id)) { + dev_err(&pdev->dev, "Can not read l3t sccl-id!\n"); + return -EINVAL; + } + + if (device_property_read_u32(&pdev->dev, "hisilicon,ccl-id", + &l3t_pmu->ccl_id)) { + dev_err(&pdev->dev, "Can not read l3t ccl-id!\n"); + return -EINVAL; + } + + l3t_pmu->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(l3t_pmu->base)) { + dev_err(&pdev->dev, "ioremap failed for l3t_pmu resource\n"); + return PTR_ERR(l3t_pmu->base); + } + + l3t_pmu->identifier = readl(l3t_pmu->base + L3T_VERSION); + + return 0; +} + +static struct attribute *hisi_l3t_pmu_v1_format_attr[] = { + HISI_PMU_FORMAT_ATTR(event, "config:0-7"), + NULL, +}; + +static const struct attribute_group hisi_l3t_pmu_v1_format_group = { + .name = "format", + .attrs = hisi_l3t_pmu_v1_format_attr, +}; + +static struct attribute *hisi_l3t_pmu_v1_events_attr[] = { + HISI_PMU_EVENT_ATTR(rd_cpipe, 0x00), + HISI_PMU_EVENT_ATTR(wr_cpipe, 0x01), + HISI_PMU_EVENT_ATTR(rd_hit_cpipe, 0x02), + HISI_PMU_EVENT_ATTR(wr_hit_cpipe, 0x03), + HISI_PMU_EVENT_ATTR(victim_num, 0x04), + HISI_PMU_EVENT_ATTR(rd_spipe, 0x20), + HISI_PMU_EVENT_ATTR(wr_spipe, 0x21), + HISI_PMU_EVENT_ATTR(rd_hit_spipe, 0x22), + HISI_PMU_EVENT_ATTR(wr_hit_spipe, 0x23), + HISI_PMU_EVENT_ATTR(back_invalid, 0x29), + HISI_PMU_EVENT_ATTR(retry_cpu, 0x40), + HISI_PMU_EVENT_ATTR(retry_ring, 0x41), + HISI_PMU_EVENT_ATTR(prefetch_drop, 0x42), + NULL, +}; + +static const struct attribute_group hisi_l3t_pmu_v1_events_group = { + .name = "events", + .attrs = hisi_l3t_pmu_v1_events_attr, +}; + +static DEVICE_ATTR(cpumask, 0444, hisi_cpumask_sysfs_show, NULL); + +static struct attribute *hisi_l3t_pmu_cpumask_attrs[] = { + &dev_attr_cpumask.attr, + NULL, +}; + +static const struct attribute_group hisi_l3t_pmu_cpumask_attr_group = { + .attrs = hisi_l3t_pmu_cpumask_attrs, +}; + +static struct device_attribute hisi_l3t_pmu_identifier_attr = + __ATTR(identifier, 0444, hisi_uncore_pmu_identifier_attr_show, NULL); + +static struct attribute *hisi_l3t_pmu_identifier_attrs[] = { + &hisi_l3t_pmu_identifier_attr.attr, + NULL +}; + +static struct attribute_group hisi_l3t_pmu_identifier_group = { + .attrs = hisi_l3t_pmu_identifier_attrs, +}; + +static const struct attribute_group *hisi_l3t_pmu_v1_attr_groups[] = { + &hisi_l3t_pmu_v1_format_group, + &hisi_l3t_pmu_v1_events_group, + &hisi_l3t_pmu_cpumask_attr_group, + &hisi_l3t_pmu_identifier_group, + NULL, +}; + +static const struct hisi_uncore_ops hisi_uncore_l3t_ops = { + .write_evtype = hisi_l3t_pmu_write_evtype, + .get_event_idx = hisi_uncore_pmu_get_event_idx, + .start_counters = hisi_l3t_pmu_start_counters, + .stop_counters = hisi_l3t_pmu_stop_counters, + .enable_counter = hisi_l3t_pmu_enable_counter, + .disable_counter = hisi_l3t_pmu_disable_counter, + .enable_counter_int = hisi_l3t_pmu_enable_counter_int, + .disable_counter_int = hisi_l3t_pmu_disable_counter_int, + .write_counter = hisi_l3t_pmu_write_counter, + .read_counter = hisi_l3t_pmu_read_counter, + .get_int_status = hisi_l3t_pmu_get_int_status, + .clear_int_status = hisi_l3t_pmu_clear_int_status, +}; + +static int hisi_l3t_pmu_dev_probe(struct platform_device *pdev, + struct hisi_pmu *l3t_pmu) +{ + int ret; + + ret = hisi_l3t_pmu_init_data(pdev, l3t_pmu); + if (ret) + return ret; + + ret = hisi_uncore_pmu_init_irq(l3t_pmu, pdev); + if (ret) + return ret; + + l3t_pmu->counter_bits = 48; + l3t_pmu->check_event = L3T_NR_EVENTS; + l3t_pmu->pmu_events.attr_groups = hisi_l3t_pmu_v1_attr_groups; + + l3t_pmu->num_counters = L3T_NR_COUNTERS; + l3t_pmu->ops = &hisi_uncore_l3t_ops; + l3t_pmu->dev = &pdev->dev; + l3t_pmu->on_cpu = -1; + + return 0; +} + +static int hisi_l3t_pmu_probe(struct platform_device *pdev) +{ + struct hisi_pmu *l3t_pmu; + char *name; + int ret; + + l3t_pmu = devm_kzalloc(&pdev->dev, sizeof(*l3t_pmu), GFP_KERNEL); + if (!l3t_pmu) + return -ENOMEM; + + platform_set_drvdata(pdev, l3t_pmu); + + ret = hisi_l3t_pmu_dev_probe(pdev, l3t_pmu); + if (ret) + return ret; + + if (device_property_read_u32(&pdev->dev, "hisilicon,index-id", &l3t_pmu->index_id)) { + dev_err(&pdev->dev, "Can not read l3t index-id!\n"); + return -EINVAL; + } + + /* + * CCL_ID is used to identify the L3T in the same SCCL which was + * used _UID by mistake. + */ + name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%u_l3t%u", + l3t_pmu->sccl_id, l3t_pmu->index_id); + l3t_pmu->pmu = (struct pmu) { + .name = name, + .module = THIS_MODULE, + .task_ctx_nr = perf_invalid_context, + .event_init = hisi_uncore_pmu_event_init, + .pmu_enable = hisi_uncore_pmu_enable, + .pmu_disable = hisi_uncore_pmu_disable, + .add = hisi_uncore_pmu_add, + .del = hisi_uncore_pmu_del, + .start = hisi_uncore_pmu_start, + .stop = hisi_uncore_pmu_stop, + .read = hisi_uncore_pmu_read, + .attr_groups = l3t_pmu->pmu_events.attr_groups, + .capabilities = PERF_PMU_CAP_NO_EXCLUDE, + }; + + /* Pick one core to use for cpumask attributes */ + cpumask_set_cpu(smp_processor_id(), &l3t_pmu->associated_cpus); + + l3t_pmu->on_cpu = cpumask_first(&l3t_pmu->associated_cpus); + if (l3t_pmu->on_cpu >= nr_cpu_ids) + return -EINVAL; + + ret = perf_pmu_register(&l3t_pmu->pmu, name, -1); + + return ret; +} + +static int hisi_l3t_pmu_remove(struct platform_device *pdev) +{ + struct hisi_pmu *l3t_pmu = platform_get_drvdata(pdev); + + perf_pmu_unregister(&l3t_pmu->pmu); + + return 0; +} + +static struct platform_driver hisi_l3t_pmu_driver = { + .driver = { + .name = "hisi_l3t_pmu", + .acpi_match_table = ACPI_PTR(hisi_l3t_pmu_acpi_match), + .of_match_table = l3t_of_match, + .suppress_bind_attrs = true, + }, + .probe = hisi_l3t_pmu_probe, + .remove = hisi_l3t_pmu_remove, +}; + +static int __init hisi_l3t_pmu_module_init(void) +{ + int ret; + + ret = platform_driver_register(&hisi_l3t_pmu_driver); + + return ret; +} +module_init(hisi_l3t_pmu_module_init); + +static void __exit hisi_l3t_pmu_module_exit(void) +{ + platform_driver_unregister(&hisi_l3t_pmu_driver); +} +module_exit(hisi_l3t_pmu_module_exit); + +MODULE_DESCRIPTION("HiSilicon SoC L3T uncore PMU driver"); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Anurup M "); +MODULE_AUTHOR("Shaokun Zhang "); diff --git a/drivers/perf/hisilicon/hisi_uncore_lpddrc_pmu.c b/drivers/perf/hisilicon/hisi_uncore_lpddrc_pmu.c new file mode 100644 index 0000000000000000000000000000000000000000..03a4bb1a9948022f8716817c4b86ae9010ca6881 --- /dev/null +++ b/drivers/perf/hisilicon/hisi_uncore_lpddrc_pmu.c @@ -0,0 +1,408 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * HiSilicon SoC LPDDRC uncore Hardware event counters support + * + * Copyright (C) 2017 Hisilicon Limited + * Author: Shaokun Zhang + * Anurup M + * + * This code is based on the uncore PMUs like arm-cci and arm-ccn. + */ +#include +#include +#include +#include +#include +#include +#include + +#include "hisi_uncore_pmu.h" + +/* LPDDRC register definition in v1 */ +#define LPDDRC_PERF_CTRL 0x4930 +#define LPDDRC_FLUX_WR 0x4948 +#define LPDDRC_FLUX_RD 0x494c +#define LPDDRC_FLUX_WCMD 0x4950 +#define LPDDRC_FLUX_RCMD 0x4954 +#define LPDDRC_PRE_CMD 0x4984 +#define LPDDRC_ACT_CMD 0x4988 +#define LPDDRC_RNK_CHG 0x4990 +#define LPDDRC_RW_CHG 0x4994 +#define LPDDRC_EVENT_CTRL 0x4d60 +#define LPDDRC_INT_MASK 0x6c8 +#define LPDDRC_INT_STATUS 0x6cc +#define LPDDRC_INT_CLEAR 0x6d0 +#define LPDDRC_VERSION 0x710 + +#define LPDDRC_NR_COUNTERS 0x8 +#define LPDDRC_V1_PERF_CTRL_EN 0x1 +#define LPDDRC_V1_NR_EVENTS 0x7 + +/* + * For PMU v1, there are eight-events and every event has been mapped + * to fixed-purpose counters which register offset is not consistent. + * Therefore there is no write event type and we assume that event + * code (0 to 7) is equal to counter index in PMU driver. + */ +#define GET_LPDDRC_EVENTID(hwc) (hwc->config_base & 0x7) + +static const u32 lpddrc_reg_off[] = { + LPDDRC_FLUX_WR, LPDDRC_FLUX_RD, LPDDRC_FLUX_WCMD, LPDDRC_FLUX_RCMD, + LPDDRC_PRE_CMD, LPDDRC_ACT_CMD, LPDDRC_RNK_CHG, LPDDRC_RW_CHG +}; + +/* + * Select the counter register offset using the counter index. + * In PMU v1, there are no programmable counter, the count + * is read form the statistics counter register itself. + */ +static u32 hisi_lpddrc_pmu_v1_get_counter_offset(int cntr_idx) +{ + return lpddrc_reg_off[cntr_idx]; +} + +static u64 hisi_lpddrc_pmu_v1_read_counter(struct hisi_pmu *lpddrc_pmu, + struct hw_perf_event *hwc) +{ + return readl(lpddrc_pmu->base + + hisi_lpddrc_pmu_v1_get_counter_offset(hwc->idx)); +} + +/* + * For LPDDRC PMU, event counter should be reset when start counters, + * reset the prev_count by software, because the counter register was RO. + */ +static void hisi_lpddrc_pmu_v1_write_counter(struct hisi_pmu *lpddrc_pmu, + struct hw_perf_event *hwc, u64 val) +{ + local64_set(&hwc->prev_count, 0); +} + +/* + * For LPDDRC PMU v1, event has been mapped to fixed-purpose counter by hardware, + * so there is no need to write event type, while it is programmable counter in + * PMU v2. + */ +static void hisi_lpddrc_pmu_write_evtype(struct hisi_pmu *hha_pmu, int idx, + u32 type) +{ +} + +static void hisi_lpddrc_pmu_v1_start_counters(struct hisi_pmu *lpddrc_pmu) +{ + u32 val; + + /* Set perf_enable in LPDDRC_PERF_CTRL to start event counting */ + val = readl(lpddrc_pmu->base + LPDDRC_PERF_CTRL); + val |= LPDDRC_V1_PERF_CTRL_EN; + writel(val, lpddrc_pmu->base + LPDDRC_PERF_CTRL); +} + +static void hisi_lpddrc_pmu_v1_stop_counters(struct hisi_pmu *lpddrc_pmu) +{ + u32 val; + + /* Clear perf_enable in LPDDRC_PERF_CTRL to stop event counting */ + val = readl(lpddrc_pmu->base + LPDDRC_PERF_CTRL); + val &= ~LPDDRC_V1_PERF_CTRL_EN; + writel(val, lpddrc_pmu->base + LPDDRC_PERF_CTRL); +} + +static void hisi_lpddrc_pmu_v1_enable_counter(struct hisi_pmu *lpddrc_pmu, + struct hw_perf_event *hwc) +{ + u32 val; + + /* Set counter index(event code) in LPDDRC_EVENT_CTRL register */ + val = readl(lpddrc_pmu->base + LPDDRC_EVENT_CTRL); + val |= (1 << GET_LPDDRC_EVENTID(hwc)); + writel(val, lpddrc_pmu->base + LPDDRC_EVENT_CTRL); +} + +static void hisi_lpddrc_pmu_v1_disable_counter(struct hisi_pmu *lpddrc_pmu, + struct hw_perf_event *hwc) +{ + u32 val; + + /* Clear counter index(event code) in LPDDRC_EVENT_CTRL register */ + val = readl(lpddrc_pmu->base + LPDDRC_EVENT_CTRL); + val &= ~(1 << GET_LPDDRC_EVENTID(hwc)); + writel(val, lpddrc_pmu->base + LPDDRC_EVENT_CTRL); +} + +static int hisi_lpddrc_pmu_v1_get_event_idx(struct perf_event *event) +{ + struct hisi_pmu *lpddrc_pmu = to_hisi_pmu(event->pmu); + unsigned long *used_mask = lpddrc_pmu->pmu_events.used_mask; + struct hw_perf_event *hwc = &event->hw; + /* For LPDDRC PMU, we use event code as counter index */ + int idx = GET_LPDDRC_EVENTID(hwc); + + if (test_bit(idx, used_mask)) + return -EAGAIN; + + set_bit(idx, used_mask); + + return idx; +} + +static void hisi_lpddrc_pmu_v1_enable_counter_int(struct hisi_pmu *lpddrc_pmu, + struct hw_perf_event *hwc) +{ + u32 val; + + /* Write 0 to enable interrupt */ + val = readl(lpddrc_pmu->base + LPDDRC_INT_MASK); + val &= ~(1 << hwc->idx); + writel(val, lpddrc_pmu->base + LPDDRC_INT_MASK); +} + +static void hisi_lpddrc_pmu_v1_disable_counter_int(struct hisi_pmu *lpddrc_pmu, + struct hw_perf_event *hwc) +{ + u32 val; + + /* Write 1 to mask interrupt */ + val = readl(lpddrc_pmu->base + LPDDRC_INT_MASK); + val |= 1 << hwc->idx; + writel(val, lpddrc_pmu->base + LPDDRC_INT_MASK); +} + +static u32 hisi_lpddrc_pmu_v1_get_int_status(struct hisi_pmu *lpddrc_pmu) +{ + return readl(lpddrc_pmu->base + LPDDRC_INT_STATUS); +} + +static void hisi_lpddrc_pmu_v1_clear_int_status(struct hisi_pmu *lpddrc_pmu, + int idx) +{ + writel(1 << idx, lpddrc_pmu->base + LPDDRC_INT_CLEAR); +} + +static const struct acpi_device_id hisi_lpddrc_pmu_acpi_match[] = { + {} +}; +MODULE_DEVICE_TABLE(acpi, hisi_lpddrc_pmu_acpi_match); + +static const struct of_device_id lpddrc_of_match[] = { + { .compatible = "hisilicon,lpddrc-pmu", }, + {}, +}; + +static int hisi_lpddrc_pmu_init_data(struct platform_device *pdev, + struct hisi_pmu *lpddrc_pmu) +{ + /* + * Use the SCCL_ID and LPDDRC channel ID to identify the + * LPDDRC PMU, while SCCL_ID is in MPIDR[aff2]. + */ + if (device_property_read_u32(&pdev->dev, "hisilicon,ch-id", + &lpddrc_pmu->index_id)) { + dev_err(&pdev->dev, "Can not read lpddrc channel-id!\n"); + return -EINVAL; + } + + if (device_property_read_u32(&pdev->dev, "hisilicon,scl-id", + &lpddrc_pmu->sccl_id)) { + dev_err(&pdev->dev, "Can not read lpddrc sccl-id!\n"); + return -EINVAL; + } + /* LPDDRC PMUs only share the same SCCL */ + lpddrc_pmu->ccl_id = -1; + + lpddrc_pmu->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(lpddrc_pmu->base)) { + dev_err(&pdev->dev, "ioremap failed for lpddrc_pmu resource\n"); + return PTR_ERR(lpddrc_pmu->base); + } + + lpddrc_pmu->identifier = readl(lpddrc_pmu->base + LPDDRC_VERSION); + + return 0; +} + +static struct attribute *hisi_lpddrc_pmu_v1_format_attr[] = { + HISI_PMU_FORMAT_ATTR(event, "config:0-4"), + NULL, +}; + +static const struct attribute_group hisi_lpddrc_pmu_v1_format_group = { + .name = "format", + .attrs = hisi_lpddrc_pmu_v1_format_attr, +}; + +static struct attribute *hisi_lpddrc_pmu_v1_events_attr[] = { + HISI_PMU_EVENT_ATTR(flux_wr, 0x00), + HISI_PMU_EVENT_ATTR(flux_rd, 0x01), + HISI_PMU_EVENT_ATTR(flux_wcmd, 0x02), + HISI_PMU_EVENT_ATTR(flux_rcmd, 0x03), + HISI_PMU_EVENT_ATTR(pre_cmd, 0x04), + HISI_PMU_EVENT_ATTR(act_cmd, 0x05), + HISI_PMU_EVENT_ATTR(rnk_chg, 0x06), + HISI_PMU_EVENT_ATTR(rw_chg, 0x07), + NULL, +}; + +static const struct attribute_group hisi_lpddrc_pmu_v1_events_group = { + .name = "events", + .attrs = hisi_lpddrc_pmu_v1_events_attr, +}; + +static DEVICE_ATTR(cpumask, 0444, hisi_cpumask_sysfs_show, NULL); + +static struct attribute *hisi_lpddrc_pmu_cpumask_attrs[] = { + &dev_attr_cpumask.attr, + NULL, +}; + +static const struct attribute_group hisi_lpddrc_pmu_cpumask_attr_group = { + .attrs = hisi_lpddrc_pmu_cpumask_attrs, +}; + +static struct device_attribute hisi_lpddrc_pmu_identifier_attr = + __ATTR(identifier, 0444, hisi_uncore_pmu_identifier_attr_show, NULL); + +static struct attribute *hisi_lpddrc_pmu_identifier_attrs[] = { + &hisi_lpddrc_pmu_identifier_attr.attr, + NULL +}; + +static struct attribute_group hisi_lpddrc_pmu_identifier_group = { + .attrs = hisi_lpddrc_pmu_identifier_attrs, +}; + +static const struct attribute_group *hisi_lpddrc_pmu_v1_attr_groups[] = { + &hisi_lpddrc_pmu_v1_format_group, + &hisi_lpddrc_pmu_v1_events_group, + &hisi_lpddrc_pmu_cpumask_attr_group, + &hisi_lpddrc_pmu_identifier_group, + NULL, +}; + +static const struct hisi_uncore_ops hisi_uncore_lpddrc_v1_ops = { + .write_evtype = hisi_lpddrc_pmu_write_evtype, + .get_event_idx = hisi_lpddrc_pmu_v1_get_event_idx, + .start_counters = hisi_lpddrc_pmu_v1_start_counters, + .stop_counters = hisi_lpddrc_pmu_v1_stop_counters, + .enable_counter = hisi_lpddrc_pmu_v1_enable_counter, + .disable_counter = hisi_lpddrc_pmu_v1_disable_counter, + .enable_counter_int = hisi_lpddrc_pmu_v1_enable_counter_int, + .disable_counter_int = hisi_lpddrc_pmu_v1_disable_counter_int, + .write_counter = hisi_lpddrc_pmu_v1_write_counter, + .read_counter = hisi_lpddrc_pmu_v1_read_counter, + .get_int_status = hisi_lpddrc_pmu_v1_get_int_status, + .clear_int_status = hisi_lpddrc_pmu_v1_clear_int_status, +}; + +static int hisi_lpddrc_pmu_dev_probe(struct platform_device *pdev, + struct hisi_pmu *lpddrc_pmu) +{ + int ret; + + ret = hisi_lpddrc_pmu_init_data(pdev, lpddrc_pmu); + if (ret) + return ret; + + ret = hisi_uncore_pmu_init_irq(lpddrc_pmu, pdev); + if (ret) + return ret; + + lpddrc_pmu->counter_bits = 32; + lpddrc_pmu->check_event = LPDDRC_V1_NR_EVENTS; + lpddrc_pmu->pmu_events.attr_groups = hisi_lpddrc_pmu_v1_attr_groups; + lpddrc_pmu->ops = &hisi_uncore_lpddrc_v1_ops; + + lpddrc_pmu->num_counters = LPDDRC_NR_COUNTERS; + lpddrc_pmu->dev = &pdev->dev; + lpddrc_pmu->on_cpu = -1; + + return 0; +} + +static int hisi_lpddrc_pmu_probe(struct platform_device *pdev) +{ + struct hisi_pmu *lpddrc_pmu; + char *name; + int ret; + + lpddrc_pmu = devm_kzalloc(&pdev->dev, sizeof(*lpddrc_pmu), GFP_KERNEL); + if (!lpddrc_pmu) + return -ENOMEM; + + platform_set_drvdata(pdev, lpddrc_pmu); + + ret = hisi_lpddrc_pmu_dev_probe(pdev, lpddrc_pmu); + if (ret) + return ret; + + name = devm_kasprintf(&pdev->dev, GFP_KERNEL, + "hisi_sccl%u_lpddrc%u", lpddrc_pmu->sccl_id, + lpddrc_pmu->index_id); + + lpddrc_pmu->pmu = (struct pmu) { + .name = name, + .module = THIS_MODULE, + .task_ctx_nr = perf_invalid_context, + .event_init = hisi_uncore_pmu_event_init, + .pmu_enable = hisi_uncore_pmu_enable, + .pmu_disable = hisi_uncore_pmu_disable, + .add = hisi_uncore_pmu_add, + .del = hisi_uncore_pmu_del, + .start = hisi_uncore_pmu_start, + .stop = hisi_uncore_pmu_stop, + .read = hisi_uncore_pmu_read, + .attr_groups = lpddrc_pmu->pmu_events.attr_groups, + .capabilities = PERF_PMU_CAP_NO_EXCLUDE, + }; + + /* Pick one core to use for cpumask attributes */ + cpumask_set_cpu(smp_processor_id(), &lpddrc_pmu->associated_cpus); + + lpddrc_pmu->on_cpu = cpumask_first(&lpddrc_pmu->associated_cpus); + if (lpddrc_pmu->on_cpu >= nr_cpu_ids) + return -EINVAL; + + ret = perf_pmu_register(&lpddrc_pmu->pmu, name, -1); + + return ret; +} + +static int hisi_lpddrc_pmu_remove(struct platform_device *pdev) +{ + struct hisi_pmu *lpddrc_pmu = platform_get_drvdata(pdev); + + perf_pmu_unregister(&lpddrc_pmu->pmu); + return 0; +} + +static struct platform_driver hisi_lpddrc_pmu_driver = { + .driver = { + .name = "hisi_lpddrc_pmu", + .acpi_match_table = ACPI_PTR(hisi_lpddrc_pmu_acpi_match), + .of_match_table = lpddrc_of_match, + .suppress_bind_attrs = true, + }, + .probe = hisi_lpddrc_pmu_probe, + .remove = hisi_lpddrc_pmu_remove, +}; + +static int __init hisi_lpddrc_pmu_module_init(void) +{ + int ret; + + ret = platform_driver_register(&hisi_lpddrc_pmu_driver); + + return ret; +} +module_init(hisi_lpddrc_pmu_module_init); + +static void __exit hisi_lpddrc_pmu_module_exit(void) +{ + platform_driver_unregister(&hisi_lpddrc_pmu_driver); +} +module_exit(hisi_lpddrc_pmu_module_exit); + +MODULE_DESCRIPTION("HiSilicon SoC LPDDRC uncore PMU driver"); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Shaokun Zhang "); +MODULE_AUTHOR("Anurup M "); diff --git a/drivers/perf/hisilicon/hisi_uncore_pmu.c b/drivers/perf/hisilicon/hisi_uncore_pmu.c index 07f0c7015181037c67c6826a04bbecbeb0c90108..868fec1f933d420596a62987949774101c24328f 100644 --- a/drivers/perf/hisilicon/hisi_uncore_pmu.c +++ b/drivers/perf/hisilicon/hisi_uncore_pmu.c @@ -168,7 +168,7 @@ int hisi_uncore_pmu_init_irq(struct hisi_pmu *hisi_pmu, return irq; ret = devm_request_irq(&pdev->dev, irq, hisi_uncore_pmu_isr, - IRQF_NOBALANCING | IRQF_NO_THREAD, + IRQF_NOBALANCING | IRQF_NO_THREAD | IRQF_SHARED, dev_name(&pdev->dev), hisi_pmu); if (ret < 0) { dev_err(&pdev->dev, @@ -434,12 +434,19 @@ static void hisi_read_sccl_and_ccl_id(int *scclp, int *cclp) bool mt = mpidr & MPIDR_MT_BITMASK; int sccl, ccl; - if (mt && read_cpuid_part_number() == HISI_CPU_PART_TSV110) { - sccl = aff2 >> 3; - ccl = aff2 & 0x7; - } else if (mt) { - sccl = aff3; - ccl = aff2; + if (mt) { + switch (read_cpuid_part_number()) { + case HISI_CPU_PART_TSV110: + case HISI_CPU_PART_TSV200: + case ARM_CPU_PART_CORTEX_A55: + sccl = aff2 >> 3; + ccl = aff2 & 0x7; + break; + default: + sccl = aff3; + ccl = aff2; + break; + } } else { sccl = aff2; ccl = aff1; diff --git a/drivers/perf/qcom_l2_pmu.c b/drivers/perf/qcom_l2_pmu.c index 23a0e008dafa25fd9dd18cf9ccb49fe8cfb6f3ad..d58810f53727c90d7c6ad1f31a71433f086e69d4 100644 --- a/drivers/perf/qcom_l2_pmu.c +++ b/drivers/perf/qcom_l2_pmu.c @@ -739,7 +739,7 @@ static struct cluster_pmu *l2_cache_associate_cpu_with_cluster( { u64 mpidr; int cpu_cluster_id; - struct cluster_pmu *cluster = NULL; + struct cluster_pmu *cluster; /* * This assumes that the cluster_id is in MPIDR[aff1] for @@ -761,10 +761,10 @@ static struct cluster_pmu *l2_cache_associate_cpu_with_cluster( cluster->cluster_id); cpumask_set_cpu(cpu, &cluster->cluster_cpus); *per_cpu_ptr(l2cache_pmu->pmu_cluster, cpu) = cluster; - break; + return cluster; } - return cluster; + return NULL; } static int l2cache_pmu_online_cpu(unsigned int cpu, struct hlist_node *node) diff --git a/drivers/phy/amlogic/phy-meson8b-usb2.c b/drivers/phy/amlogic/phy-meson8b-usb2.c index 03c061dd5f0ded625717041c8593196dad5617e8..8f40b9342a971745c1c5d6ae33e4c4d3dcee17da 100644 --- a/drivers/phy/amlogic/phy-meson8b-usb2.c +++ b/drivers/phy/amlogic/phy-meson8b-usb2.c @@ -261,8 +261,9 @@ static int phy_meson8b_usb2_probe(struct platform_device *pdev) return PTR_ERR(priv->clk_usb); priv->reset = devm_reset_control_get_optional_shared(&pdev->dev, NULL); - if (PTR_ERR(priv->reset) == -EPROBE_DEFER) - return PTR_ERR(priv->reset); + if (IS_ERR(priv->reset)) + return dev_err_probe(&pdev->dev, PTR_ERR(priv->reset), + "Failed to get the reset line"); priv->dr_mode = of_usb_get_dr_mode_by_phy(pdev->dev.of_node, -1); if (priv->dr_mode == USB_DR_MODE_UNKNOWN) { diff --git a/drivers/phy/broadcom/phy-brcm-usb.c b/drivers/phy/broadcom/phy-brcm-usb.c index 99fbc7e4138be7e1262b6e413fbaeda208365623..b901a0d4e2a80af9e3a4b2aef4cadb072066b74e 100644 --- a/drivers/phy/broadcom/phy-brcm-usb.c +++ b/drivers/phy/broadcom/phy-brcm-usb.c @@ -17,6 +17,7 @@ #include #include #include +#include #include "phy-brcm-usb-init.h" @@ -69,12 +70,35 @@ struct brcm_usb_phy_data { int init_count; int wake_irq; struct brcm_usb_phy phys[BRCM_USB_PHY_ID_MAX]; + struct notifier_block pm_notifier; + bool pm_active; }; static s8 *node_reg_names[BRCM_REGS_MAX] = { "crtl", "xhci_ec", "xhci_gbl", "usb_phy", "usb_mdio", "bdc_ec" }; +static int brcm_pm_notifier(struct notifier_block *notifier, + unsigned long pm_event, + void *unused) +{ + struct brcm_usb_phy_data *priv = + container_of(notifier, struct brcm_usb_phy_data, pm_notifier); + + switch (pm_event) { + case PM_HIBERNATION_PREPARE: + case PM_SUSPEND_PREPARE: + priv->pm_active = true; + break; + case PM_POST_RESTORE: + case PM_POST_HIBERNATION: + case PM_POST_SUSPEND: + priv->pm_active = false; + break; + } + return NOTIFY_DONE; +} + static irqreturn_t brcm_usb_phy_wake_isr(int irq, void *dev_id) { struct phy *gphy = dev_id; @@ -90,6 +114,9 @@ static int brcm_usb_phy_init(struct phy *gphy) struct brcm_usb_phy_data *priv = container_of(phy, struct brcm_usb_phy_data, phys[phy->id]); + if (priv->pm_active) + return 0; + /* * Use a lock to make sure a second caller waits until * the base phy is inited before using it. @@ -119,6 +146,9 @@ static int brcm_usb_phy_exit(struct phy *gphy) struct brcm_usb_phy_data *priv = container_of(phy, struct brcm_usb_phy_data, phys[phy->id]); + if (priv->pm_active) + return 0; + dev_dbg(&gphy->dev, "EXIT\n"); if (phy->id == BRCM_USB_PHY_2_0) brcm_usb_uninit_eohci(&priv->ini); @@ -484,6 +514,9 @@ static int brcm_usb_phy_probe(struct platform_device *pdev) if (err) return err; + priv->pm_notifier.notifier_call = brcm_pm_notifier; + register_pm_notifier(&priv->pm_notifier); + mutex_init(&priv->mutex); /* make sure invert settings are correct */ @@ -524,7 +557,10 @@ static int brcm_usb_phy_probe(struct platform_device *pdev) static int brcm_usb_phy_remove(struct platform_device *pdev) { + struct brcm_usb_phy_data *priv = dev_get_drvdata(&pdev->dev); + sysfs_remove_group(&pdev->dev.kobj, &brcm_usb_phy_group); + unregister_pm_notifier(&priv->pm_notifier); return 0; } @@ -535,6 +571,7 @@ static int brcm_usb_phy_suspend(struct device *dev) struct brcm_usb_phy_data *priv = dev_get_drvdata(dev); if (priv->init_count) { + dev_dbg(dev, "SUSPEND\n"); priv->ini.wake_enabled = device_may_wakeup(dev); if (priv->phys[BRCM_USB_PHY_3_0].inited) brcm_usb_uninit_xhci(&priv->ini); @@ -574,6 +611,7 @@ static int brcm_usb_phy_resume(struct device *dev) * Uninitialize anything that wasn't previously initialized. */ if (priv->init_count) { + dev_dbg(dev, "RESUME\n"); if (priv->wake_irq >= 0) disable_irq_wake(priv->wake_irq); brcm_usb_init_common(&priv->ini); diff --git a/drivers/phy/phy-core-mipi-dphy.c b/drivers/phy/phy-core-mipi-dphy.c index 14e0551cd3190f71aeb85f0318c3d71c455836de..0aa740b73d0db06adc659a4ef519d6f0a564e890 100644 --- a/drivers/phy/phy-core-mipi-dphy.c +++ b/drivers/phy/phy-core-mipi-dphy.c @@ -66,10 +66,10 @@ int phy_mipi_dphy_get_default_config(unsigned long pixel_clock, cfg->hs_trail = max(4 * 8 * ui, 60000 + 4 * 4 * ui); cfg->init = 100; - cfg->lpx = 60000; + cfg->lpx = 50000; cfg->ta_get = 5 * cfg->lpx; cfg->ta_go = 4 * cfg->lpx; - cfg->ta_sure = 2 * cfg->lpx; + cfg->ta_sure = cfg->lpx; cfg->wakeup = 1000; cfg->hs_clk_rate = hs_clk_rate; diff --git a/drivers/phy/socionext/phy-uniphier-usb3ss.c b/drivers/phy/socionext/phy-uniphier-usb3ss.c index 6700645bcbe6bfd6abeeb4432618b5a6c2596055..3b5ffc16a6947c8a32e4a8e341f395ebe1e5fe5d 100644 --- a/drivers/phy/socionext/phy-uniphier-usb3ss.c +++ b/drivers/phy/socionext/phy-uniphier-usb3ss.c @@ -22,11 +22,13 @@ #include #define SSPHY_TESTI 0x0 -#define SSPHY_TESTO 0x4 #define TESTI_DAT_MASK GENMASK(13, 6) #define TESTI_ADR_MASK GENMASK(5, 1) #define TESTI_WR_EN BIT(0) +#define SSPHY_TESTO 0x4 +#define TESTO_DAT_MASK GENMASK(7, 0) + #define PHY_F(regno, msb, lsb) { (regno), (msb), (lsb) } #define CDR_CPD_TRIM PHY_F(7, 3, 0) /* RxPLL charge pump current */ @@ -84,12 +86,12 @@ static void uniphier_u3ssphy_set_param(struct uniphier_u3ssphy_priv *priv, val = FIELD_PREP(TESTI_DAT_MASK, 1); val |= FIELD_PREP(TESTI_ADR_MASK, p->field.reg_no); uniphier_u3ssphy_testio_write(priv, val); - val = readl(priv->base + SSPHY_TESTO); + val = readl(priv->base + SSPHY_TESTO) & TESTO_DAT_MASK; /* update value */ - val &= ~FIELD_PREP(TESTI_DAT_MASK, field_mask); + val &= ~field_mask; data = field_mask & (p->value << p->field.lsb); - val = FIELD_PREP(TESTI_DAT_MASK, data); + val = FIELD_PREP(TESTI_DAT_MASK, data | val); val |= FIELD_PREP(TESTI_ADR_MASK, p->field.reg_no); uniphier_u3ssphy_testio_write(priv, val); uniphier_u3ssphy_testio_write(priv, val | TESTI_WR_EN); diff --git a/drivers/phy/ti/phy-j721e-wiz.c b/drivers/phy/ti/phy-j721e-wiz.c index dceac7714872195e73f73cd04eb7d3dda117c42f..5536b8f4bfd13daaf109bc7138da32d19bac11fb 100644 --- a/drivers/phy/ti/phy-j721e-wiz.c +++ b/drivers/phy/ti/phy-j721e-wiz.c @@ -177,6 +177,7 @@ static const struct clk_div_table clk_div_table[] = { { .val = 1, .div = 2, }, { .val = 2, .div = 4, }, { .val = 3, .div = 8, }, + { /* sentinel */ }, }; static struct wiz_clk_div_sel clk_div_sel[] = { diff --git a/drivers/phy/xilinx/phy-zynqmp.c b/drivers/phy/xilinx/phy-zynqmp.c index 2b0f921b6ee3de1c93fb1e96a7c3b13909668283..b8ccac6f314672857b1128b05434778f3aa75631 100644 --- a/drivers/phy/xilinx/phy-zynqmp.c +++ b/drivers/phy/xilinx/phy-zynqmp.c @@ -134,7 +134,8 @@ #define PROT_BUS_WIDTH_10 0x0 #define PROT_BUS_WIDTH_20 0x1 #define PROT_BUS_WIDTH_40 0x2 -#define PROT_BUS_WIDTH_SHIFT 2 +#define PROT_BUS_WIDTH_SHIFT(n) ((n) * 2) +#define PROT_BUS_WIDTH_MASK(n) GENMASK((n) * 2 + 1, (n) * 2) /* Number of GT lanes */ #define NUM_LANES 4 @@ -443,12 +444,12 @@ static void xpsgtr_phy_init_sata(struct xpsgtr_phy *gtr_phy) static void xpsgtr_phy_init_sgmii(struct xpsgtr_phy *gtr_phy) { struct xpsgtr_dev *gtr_dev = gtr_phy->dev; + u32 mask = PROT_BUS_WIDTH_MASK(gtr_phy->lane); + u32 val = PROT_BUS_WIDTH_10 << PROT_BUS_WIDTH_SHIFT(gtr_phy->lane); /* Set SGMII protocol TX and RX bus width to 10 bits. */ - xpsgtr_write(gtr_dev, TX_PROT_BUS_WIDTH, - PROT_BUS_WIDTH_10 << (gtr_phy->lane * PROT_BUS_WIDTH_SHIFT)); - xpsgtr_write(gtr_dev, RX_PROT_BUS_WIDTH, - PROT_BUS_WIDTH_10 << (gtr_phy->lane * PROT_BUS_WIDTH_SHIFT)); + xpsgtr_clr_set(gtr_dev, TX_PROT_BUS_WIDTH, mask, val); + xpsgtr_clr_set(gtr_dev, RX_PROT_BUS_WIDTH, mask, val); xpsgtr_bypass_scrambler_8b10b(gtr_phy); } diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c index 40ce18a0d0190086a83512f1953a166311592b50..6768b2f03d6859601a6cb16d85deb2bada87e117 100644 --- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c +++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c @@ -1264,16 +1264,18 @@ static int bcm2835_pinctrl_probe(struct platform_device *pdev) sizeof(*girq->parents), GFP_KERNEL); if (!girq->parents) { - pinctrl_remove_gpio_range(pc->pctl_dev, &pc->gpio_range); - return -ENOMEM; + err = -ENOMEM; + goto out_remove; } if (is_7211) { pc->wake_irq = devm_kcalloc(dev, BCM2835_NUM_IRQS, sizeof(*pc->wake_irq), GFP_KERNEL); - if (!pc->wake_irq) - return -ENOMEM; + if (!pc->wake_irq) { + err = -ENOMEM; + goto out_remove; + } } /* @@ -1297,8 +1299,10 @@ static int bcm2835_pinctrl_probe(struct platform_device *pdev) len = strlen(dev_name(pc->dev)) + 16; name = devm_kzalloc(pc->dev, len, GFP_KERNEL); - if (!name) - return -ENOMEM; + if (!name) { + err = -ENOMEM; + goto out_remove; + } snprintf(name, len, "%s:bank%d", dev_name(pc->dev), i); @@ -1317,11 +1321,14 @@ static int bcm2835_pinctrl_probe(struct platform_device *pdev) err = gpiochip_add_data(&pc->gpio_chip, pc); if (err) { dev_err(dev, "could not add GPIO chip\n"); - pinctrl_remove_gpio_range(pc->pctl_dev, &pc->gpio_range); - return err; + goto out_remove; } return 0; + +out_remove: + pinctrl_remove_gpio_range(pc->pctl_dev, &pc->gpio_range); + return err; } static struct platform_driver bcm2835_pinctrl_driver = { diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c index b6ef1911c1dd16a3060a1e95d08e21f41d3fe8d3..348c670a7b07d331acdcfd3ca58f5167379bdf28 100644 --- a/drivers/pinctrl/intel/pinctrl-intel.c +++ b/drivers/pinctrl/intel/pinctrl-intel.c @@ -441,8 +441,8 @@ static void intel_gpio_set_gpio_mode(void __iomem *padcfg0) value &= ~PADCFG0_PMODE_MASK; value |= PADCFG0_PMODE_GPIO; - /* Disable input and output buffers */ - value |= PADCFG0_GPIORXDIS; + /* Disable TX buffer and enable RX (this will be input) */ + value &= ~PADCFG0_GPIORXDIS; value |= PADCFG0_GPIOTXDIS; /* Disable SCI/SMI/NMI generation */ @@ -487,9 +487,6 @@ static int intel_gpio_request_enable(struct pinctrl_dev *pctldev, intel_gpio_set_gpio_mode(padcfg0); - /* Disable TX buffer and enable RX (this will be input) */ - __intel_gpio_set_direction(padcfg0, true); - raw_spin_unlock_irqrestore(&pctrl->lock, flags); return 0; @@ -1105,9 +1102,6 @@ static int intel_gpio_irq_type(struct irq_data *d, unsigned int type) intel_gpio_set_gpio_mode(reg); - /* Disable TX buffer and enable RX (this will be input) */ - __intel_gpio_set_direction(reg, true); - value = readl(reg); value &= ~(PADCFG0_RXEVCFG_MASK | PADCFG0_RXINV); @@ -1207,6 +1201,39 @@ static irqreturn_t intel_gpio_irq(int irq, void *data) return IRQ_RETVAL(ret); } +static void intel_gpio_irq_init(struct intel_pinctrl *pctrl) +{ + int i; + + for (i = 0; i < pctrl->ncommunities; i++) { + const struct intel_community *community; + void __iomem *base; + unsigned int gpp; + + community = &pctrl->communities[i]; + base = community->regs; + + for (gpp = 0; gpp < community->ngpps; gpp++) { + /* Mask and clear all interrupts */ + writel(0, base + community->ie_offset + gpp * 4); + writel(0xffff, base + community->is_offset + gpp * 4); + } + } +} + +static int intel_gpio_irq_init_hw(struct gpio_chip *gc) +{ + struct intel_pinctrl *pctrl = gpiochip_get_data(gc); + + /* + * Make sure the interrupt lines are in a proper state before + * further configuration. + */ + intel_gpio_irq_init(pctrl); + + return 0; +} + static int intel_gpio_add_community_ranges(struct intel_pinctrl *pctrl, const struct intel_community *community) { @@ -1311,6 +1338,7 @@ static int intel_gpio_probe(struct intel_pinctrl *pctrl, int irq) girq->num_parents = 0; girq->default_type = IRQ_TYPE_NONE; girq->handler = handle_bad_irq; + girq->init_hw = intel_gpio_irq_init_hw; ret = devm_gpiochip_add_data(pctrl->dev, &pctrl->chip, pctrl); if (ret) { @@ -1640,26 +1668,6 @@ int intel_pinctrl_suspend_noirq(struct device *dev) } EXPORT_SYMBOL_GPL(intel_pinctrl_suspend_noirq); -static void intel_gpio_irq_init(struct intel_pinctrl *pctrl) -{ - size_t i; - - for (i = 0; i < pctrl->ncommunities; i++) { - const struct intel_community *community; - void __iomem *base; - unsigned int gpp; - - community = &pctrl->communities[i]; - base = community->regs; - - for (gpp = 0; gpp < community->ngpps; gpp++) { - /* Mask and clear all interrupts */ - writel(0, base + community->ie_offset + gpp * 4); - writel(0xffff, base + community->is_offset + gpp * 4); - } - } -} - static bool intel_gpio_update_reg(void __iomem *reg, u32 mask, u32 value) { u32 curr, updated; diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c index a02ad10ec6fada549a1f03d2903d6c11da1aa4aa..730581d130649bcf5878b7682b3125e666376f57 100644 --- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c +++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c @@ -1039,6 +1039,7 @@ int mtk_pctrl_init(struct platform_device *pdev, node = of_parse_phandle(np, "mediatek,pctl-regmap", 0); if (node) { pctl->regmap1 = syscon_node_to_regmap(node); + of_node_put(node); if (IS_ERR(pctl->regmap1)) return PTR_ERR(pctl->regmap1); } else if (regmap) { @@ -1052,6 +1053,7 @@ int mtk_pctrl_init(struct platform_device *pdev, node = of_parse_phandle(np, "mediatek,pctl-regmap", 1); if (node) { pctl->regmap2 = syscon_node_to_regmap(node); + of_node_put(node); if (IS_ERR(pctl->regmap2)) return PTR_ERR(pctl->regmap2); } diff --git a/drivers/pinctrl/mediatek/pinctrl-paris.c b/drivers/pinctrl/mediatek/pinctrl-paris.c index 623af4410b07c70d6d011c775cb692d7f8fbd5bd..d0a4ebbe1e7e65f6e04532bc02f8101afbb3fbd1 100644 --- a/drivers/pinctrl/mediatek/pinctrl-paris.c +++ b/drivers/pinctrl/mediatek/pinctrl-paris.c @@ -96,20 +96,16 @@ static int mtk_pinconf_get(struct pinctrl_dev *pctldev, err = hw->soc->bias_get_combo(hw, desc, &pullup, &ret); if (err) goto out; + if (ret == MTK_PUPD_SET_R1R0_00) + ret = MTK_DISABLE; if (param == PIN_CONFIG_BIAS_DISABLE) { - if (ret == MTK_PUPD_SET_R1R0_00) - ret = MTK_DISABLE; + if (ret != MTK_DISABLE) + err = -EINVAL; } else if (param == PIN_CONFIG_BIAS_PULL_UP) { - /* When desire to get pull-up value, return - * error if current setting is pull-down - */ - if (!pullup) + if (!pullup || ret == MTK_DISABLE) err = -EINVAL; } else if (param == PIN_CONFIG_BIAS_PULL_DOWN) { - /* When desire to get pull-down value, return - * error if current setting is pull-up - */ - if (pullup) + if (pullup || ret == MTK_DISABLE) err = -EINVAL; } } else { @@ -188,8 +184,7 @@ static int mtk_pinconf_get(struct pinctrl_dev *pctldev, } static int mtk_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin, - enum pin_config_param param, - enum pin_config_param arg) + enum pin_config_param param, u32 arg) { struct mtk_pinctrl *hw = pinctrl_dev_get_drvdata(pctldev); const struct mtk_pin_desc *desc; @@ -585,6 +580,9 @@ ssize_t mtk_pctrl_show_one_pin(struct mtk_pinctrl *hw, if (gpio >= hw->soc->npins) return -EINVAL; + if (mtk_is_virt_gpio(hw, gpio)) + return -EINVAL; + desc = (const struct mtk_pin_desc *)&hw->soc->pins[gpio]; pinmux = mtk_pctrl_get_pinmux(hw, gpio); if (pinmux >= hw->soc->nfuncs) @@ -719,10 +717,10 @@ static int mtk_pconf_group_get(struct pinctrl_dev *pctldev, unsigned group, unsigned long *config) { struct mtk_pinctrl *hw = pinctrl_dev_get_drvdata(pctldev); + struct mtk_pinctrl_group *grp = &hw->groups[group]; - *config = hw->groups[group].config; - - return 0; + /* One pin per group only */ + return mtk_pinconf_get(pctldev, grp->pin, config); } static int mtk_pconf_group_set(struct pinctrl_dev *pctldev, unsigned group, @@ -738,8 +736,6 @@ static int mtk_pconf_group_set(struct pinctrl_dev *pctldev, unsigned group, pinconf_to_config_argument(configs[i])); if (ret < 0) return ret; - - grp->config = configs[i]; } return 0; diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c index 657e35a75d84aad961606193d5bb089401127872..6d77feda9090a9ec6af22ef545ef785bd53a5dbf 100644 --- a/drivers/pinctrl/nomadik/pinctrl-nomadik.c +++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c @@ -1883,8 +1883,10 @@ static int nmk_pinctrl_probe(struct platform_device *pdev) } prcm_np = of_parse_phandle(np, "prcm", 0); - if (prcm_np) + if (prcm_np) { npct->prcm_base = of_iomap(prcm_np, 0); + of_node_put(prcm_np); + } if (!npct->prcm_base) { if (version == PINCTRL_NMK_STN8815) { dev_info(&pdev->dev, diff --git a/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c b/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c index 6de31b5ee358c62adfd1672c0120d015bee3e129..ce36b6ff7b95e4668b7aa8fb77aca0a32cda402a 100644 --- a/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c +++ b/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c @@ -78,7 +78,6 @@ struct npcm7xx_gpio { struct gpio_chip gc; int irqbase; int irq; - void *priv; struct irq_chip irq_chip; u32 pinctrl_id; int (*direction_input)(struct gpio_chip *chip, unsigned offset); @@ -226,7 +225,7 @@ static void npcmgpio_irq_handler(struct irq_desc *desc) chained_irq_enter(chip, desc); sts = ioread32(bank->base + NPCM7XX_GP_N_EVST); en = ioread32(bank->base + NPCM7XX_GP_N_EVEN); - dev_dbg(chip->parent_device, "==> got irq sts %.8x %.8x\n", sts, + dev_dbg(bank->gc.parent, "==> got irq sts %.8x %.8x\n", sts, en); sts &= en; @@ -241,33 +240,33 @@ static int npcmgpio_set_irq_type(struct irq_data *d, unsigned int type) gpiochip_get_data(irq_data_get_irq_chip_data(d)); unsigned int gpio = BIT(d->hwirq); - dev_dbg(d->chip->parent_device, "setirqtype: %u.%u = %u\n", gpio, + dev_dbg(bank->gc.parent, "setirqtype: %u.%u = %u\n", gpio, d->irq, type); switch (type) { case IRQ_TYPE_EDGE_RISING: - dev_dbg(d->chip->parent_device, "edge.rising\n"); + dev_dbg(bank->gc.parent, "edge.rising\n"); npcm_gpio_clr(&bank->gc, bank->base + NPCM7XX_GP_N_EVBE, gpio); npcm_gpio_clr(&bank->gc, bank->base + NPCM7XX_GP_N_POL, gpio); break; case IRQ_TYPE_EDGE_FALLING: - dev_dbg(d->chip->parent_device, "edge.falling\n"); + dev_dbg(bank->gc.parent, "edge.falling\n"); npcm_gpio_clr(&bank->gc, bank->base + NPCM7XX_GP_N_EVBE, gpio); npcm_gpio_set(&bank->gc, bank->base + NPCM7XX_GP_N_POL, gpio); break; case IRQ_TYPE_EDGE_BOTH: - dev_dbg(d->chip->parent_device, "edge.both\n"); + dev_dbg(bank->gc.parent, "edge.both\n"); npcm_gpio_set(&bank->gc, bank->base + NPCM7XX_GP_N_EVBE, gpio); break; case IRQ_TYPE_LEVEL_LOW: - dev_dbg(d->chip->parent_device, "level.low\n"); + dev_dbg(bank->gc.parent, "level.low\n"); npcm_gpio_set(&bank->gc, bank->base + NPCM7XX_GP_N_POL, gpio); break; case IRQ_TYPE_LEVEL_HIGH: - dev_dbg(d->chip->parent_device, "level.high\n"); + dev_dbg(bank->gc.parent, "level.high\n"); npcm_gpio_clr(&bank->gc, bank->base + NPCM7XX_GP_N_POL, gpio); break; default: - dev_dbg(d->chip->parent_device, "invalid irq type\n"); + dev_dbg(bank->gc.parent, "invalid irq type\n"); return -EINVAL; } @@ -289,7 +288,7 @@ static void npcmgpio_irq_ack(struct irq_data *d) gpiochip_get_data(irq_data_get_irq_chip_data(d)); unsigned int gpio = d->hwirq; - dev_dbg(d->chip->parent_device, "irq_ack: %u.%u\n", gpio, d->irq); + dev_dbg(bank->gc.parent, "irq_ack: %u.%u\n", gpio, d->irq); iowrite32(BIT(gpio), bank->base + NPCM7XX_GP_N_EVST); } @@ -301,7 +300,7 @@ static void npcmgpio_irq_mask(struct irq_data *d) unsigned int gpio = d->hwirq; /* Clear events */ - dev_dbg(d->chip->parent_device, "irq_mask: %u.%u\n", gpio, d->irq); + dev_dbg(bank->gc.parent, "irq_mask: %u.%u\n", gpio, d->irq); iowrite32(BIT(gpio), bank->base + NPCM7XX_GP_N_EVENC); } @@ -313,7 +312,7 @@ static void npcmgpio_irq_unmask(struct irq_data *d) unsigned int gpio = d->hwirq; /* Enable events */ - dev_dbg(d->chip->parent_device, "irq_unmask: %u.%u\n", gpio, d->irq); + dev_dbg(bank->gc.parent, "irq_unmask: %u.%u\n", gpio, d->irq); iowrite32(BIT(gpio), bank->base + NPCM7XX_GP_N_EVENS); } @@ -323,7 +322,7 @@ static unsigned int npcmgpio_irq_startup(struct irq_data *d) unsigned int gpio = d->hwirq; /* active-high, input, clear interrupt, enable interrupt */ - dev_dbg(d->chip->parent_device, "startup: %u.%u\n", gpio, d->irq); + dev_dbg(gc->parent, "startup: %u.%u\n", gpio, d->irq); npcmgpio_direction_input(gc, gpio); npcmgpio_irq_ack(d); npcmgpio_irq_unmask(d); @@ -905,7 +904,7 @@ static struct npcm7xx_func npcm7xx_funcs[] = { #define DRIVE_STRENGTH_HI_SHIFT 12 #define DRIVE_STRENGTH_MASK 0x0000FF00 -#define DS(lo, hi) (((lo) << DRIVE_STRENGTH_LO_SHIFT) | \ +#define DSTR(lo, hi) (((lo) << DRIVE_STRENGTH_LO_SHIFT) | \ ((hi) << DRIVE_STRENGTH_HI_SHIFT)) #define DSLO(x) (((x) >> DRIVE_STRENGTH_LO_SHIFT) & 0xF) #define DSHI(x) (((x) >> DRIVE_STRENGTH_HI_SHIFT) & 0xF) @@ -925,31 +924,31 @@ struct npcm7xx_pincfg { static const struct npcm7xx_pincfg pincfg[] = { /* PIN FUNCTION 1 FUNCTION 2 FUNCTION 3 FLAGS */ NPCM7XX_PINCFG(0, iox1, MFSEL1, 30, none, NONE, 0, none, NONE, 0, 0), - NPCM7XX_PINCFG(1, iox1, MFSEL1, 30, none, NONE, 0, none, NONE, 0, DS(8, 12)), - NPCM7XX_PINCFG(2, iox1, MFSEL1, 30, none, NONE, 0, none, NONE, 0, DS(8, 12)), + NPCM7XX_PINCFG(1, iox1, MFSEL1, 30, none, NONE, 0, none, NONE, 0, DSTR(8, 12)), + NPCM7XX_PINCFG(2, iox1, MFSEL1, 30, none, NONE, 0, none, NONE, 0, DSTR(8, 12)), NPCM7XX_PINCFG(3, iox1, MFSEL1, 30, none, NONE, 0, none, NONE, 0, 0), NPCM7XX_PINCFG(4, iox2, MFSEL3, 14, smb1d, I2CSEGSEL, 7, none, NONE, 0, SLEW), NPCM7XX_PINCFG(5, iox2, MFSEL3, 14, smb1d, I2CSEGSEL, 7, none, NONE, 0, SLEW), NPCM7XX_PINCFG(6, iox2, MFSEL3, 14, smb2d, I2CSEGSEL, 10, none, NONE, 0, SLEW), NPCM7XX_PINCFG(7, iox2, MFSEL3, 14, smb2d, I2CSEGSEL, 10, none, NONE, 0, SLEW), - NPCM7XX_PINCFG(8, lkgpo1, FLOCKR1, 4, none, NONE, 0, none, NONE, 0, DS(8, 12)), - NPCM7XX_PINCFG(9, lkgpo2, FLOCKR1, 8, none, NONE, 0, none, NONE, 0, DS(8, 12)), - NPCM7XX_PINCFG(10, ioxh, MFSEL3, 18, none, NONE, 0, none, NONE, 0, DS(8, 12)), - NPCM7XX_PINCFG(11, ioxh, MFSEL3, 18, none, NONE, 0, none, NONE, 0, DS(8, 12)), + NPCM7XX_PINCFG(8, lkgpo1, FLOCKR1, 4, none, NONE, 0, none, NONE, 0, DSTR(8, 12)), + NPCM7XX_PINCFG(9, lkgpo2, FLOCKR1, 8, none, NONE, 0, none, NONE, 0, DSTR(8, 12)), + NPCM7XX_PINCFG(10, ioxh, MFSEL3, 18, none, NONE, 0, none, NONE, 0, DSTR(8, 12)), + NPCM7XX_PINCFG(11, ioxh, MFSEL3, 18, none, NONE, 0, none, NONE, 0, DSTR(8, 12)), NPCM7XX_PINCFG(12, gspi, MFSEL1, 24, smb5b, I2CSEGSEL, 19, none, NONE, 0, SLEW), NPCM7XX_PINCFG(13, gspi, MFSEL1, 24, smb5b, I2CSEGSEL, 19, none, NONE, 0, SLEW), NPCM7XX_PINCFG(14, gspi, MFSEL1, 24, smb5c, I2CSEGSEL, 20, none, NONE, 0, SLEW), NPCM7XX_PINCFG(15, gspi, MFSEL1, 24, smb5c, I2CSEGSEL, 20, none, NONE, 0, SLEW), - NPCM7XX_PINCFG(16, lkgpo0, FLOCKR1, 0, none, NONE, 0, none, NONE, 0, DS(8, 12)), - NPCM7XX_PINCFG(17, pspi2, MFSEL3, 13, smb4den, I2CSEGSEL, 23, none, NONE, 0, DS(8, 12)), - NPCM7XX_PINCFG(18, pspi2, MFSEL3, 13, smb4b, I2CSEGSEL, 14, none, NONE, 0, DS(8, 12)), - NPCM7XX_PINCFG(19, pspi2, MFSEL3, 13, smb4b, I2CSEGSEL, 14, none, NONE, 0, DS(8, 12)), + NPCM7XX_PINCFG(16, lkgpo0, FLOCKR1, 0, none, NONE, 0, none, NONE, 0, DSTR(8, 12)), + NPCM7XX_PINCFG(17, pspi2, MFSEL3, 13, smb4den, I2CSEGSEL, 23, none, NONE, 0, DSTR(8, 12)), + NPCM7XX_PINCFG(18, pspi2, MFSEL3, 13, smb4b, I2CSEGSEL, 14, none, NONE, 0, DSTR(8, 12)), + NPCM7XX_PINCFG(19, pspi2, MFSEL3, 13, smb4b, I2CSEGSEL, 14, none, NONE, 0, DSTR(8, 12)), NPCM7XX_PINCFG(20, smb4c, I2CSEGSEL, 15, smb15, MFSEL3, 8, none, NONE, 0, 0), NPCM7XX_PINCFG(21, smb4c, I2CSEGSEL, 15, smb15, MFSEL3, 8, none, NONE, 0, 0), NPCM7XX_PINCFG(22, smb4d, I2CSEGSEL, 16, smb14, MFSEL3, 7, none, NONE, 0, 0), NPCM7XX_PINCFG(23, smb4d, I2CSEGSEL, 16, smb14, MFSEL3, 7, none, NONE, 0, 0), - NPCM7XX_PINCFG(24, ioxh, MFSEL3, 18, none, NONE, 0, none, NONE, 0, DS(8, 12)), - NPCM7XX_PINCFG(25, ioxh, MFSEL3, 18, none, NONE, 0, none, NONE, 0, DS(8, 12)), + NPCM7XX_PINCFG(24, ioxh, MFSEL3, 18, none, NONE, 0, none, NONE, 0, DSTR(8, 12)), + NPCM7XX_PINCFG(25, ioxh, MFSEL3, 18, none, NONE, 0, none, NONE, 0, DSTR(8, 12)), NPCM7XX_PINCFG(26, smb5, MFSEL1, 2, none, NONE, 0, none, NONE, 0, 0), NPCM7XX_PINCFG(27, smb5, MFSEL1, 2, none, NONE, 0, none, NONE, 0, 0), NPCM7XX_PINCFG(28, smb4, MFSEL1, 1, none, NONE, 0, none, NONE, 0, 0), @@ -965,12 +964,12 @@ static const struct npcm7xx_pincfg pincfg[] = { NPCM7XX_PINCFG(39, smb3b, I2CSEGSEL, 11, none, NONE, 0, none, NONE, 0, SLEW), NPCM7XX_PINCFG(40, smb3b, I2CSEGSEL, 11, none, NONE, 0, none, NONE, 0, SLEW), NPCM7XX_PINCFG(41, bmcuart0a, MFSEL1, 9, none, NONE, 0, none, NONE, 0, 0), - NPCM7XX_PINCFG(42, bmcuart0a, MFSEL1, 9, none, NONE, 0, none, NONE, 0, DS(2, 4) | GPO), + NPCM7XX_PINCFG(42, bmcuart0a, MFSEL1, 9, none, NONE, 0, none, NONE, 0, DSTR(2, 4) | GPO), NPCM7XX_PINCFG(43, uart1, MFSEL1, 10, jtag2, MFSEL4, 0, bmcuart1, MFSEL3, 24, 0), NPCM7XX_PINCFG(44, uart1, MFSEL1, 10, jtag2, MFSEL4, 0, bmcuart1, MFSEL3, 24, 0), NPCM7XX_PINCFG(45, uart1, MFSEL1, 10, jtag2, MFSEL4, 0, none, NONE, 0, 0), - NPCM7XX_PINCFG(46, uart1, MFSEL1, 10, jtag2, MFSEL4, 0, none, NONE, 0, DS(2, 8)), - NPCM7XX_PINCFG(47, uart1, MFSEL1, 10, jtag2, MFSEL4, 0, none, NONE, 0, DS(2, 8)), + NPCM7XX_PINCFG(46, uart1, MFSEL1, 10, jtag2, MFSEL4, 0, none, NONE, 0, DSTR(2, 8)), + NPCM7XX_PINCFG(47, uart1, MFSEL1, 10, jtag2, MFSEL4, 0, none, NONE, 0, DSTR(2, 8)), NPCM7XX_PINCFG(48, uart2, MFSEL1, 11, bmcuart0b, MFSEL4, 1, none, NONE, 0, GPO), NPCM7XX_PINCFG(49, uart2, MFSEL1, 11, bmcuart0b, MFSEL4, 1, none, NONE, 0, 0), NPCM7XX_PINCFG(50, uart2, MFSEL1, 11, none, NONE, 0, none, NONE, 0, 0), @@ -980,8 +979,8 @@ static const struct npcm7xx_pincfg pincfg[] = { NPCM7XX_PINCFG(54, uart2, MFSEL1, 11, none, NONE, 0, none, NONE, 0, 0), NPCM7XX_PINCFG(55, uart2, MFSEL1, 11, none, NONE, 0, none, NONE, 0, 0), NPCM7XX_PINCFG(56, r1err, MFSEL1, 12, none, NONE, 0, none, NONE, 0, 0), - NPCM7XX_PINCFG(57, r1md, MFSEL1, 13, none, NONE, 0, none, NONE, 0, DS(2, 4)), - NPCM7XX_PINCFG(58, r1md, MFSEL1, 13, none, NONE, 0, none, NONE, 0, DS(2, 4)), + NPCM7XX_PINCFG(57, r1md, MFSEL1, 13, none, NONE, 0, none, NONE, 0, DSTR(2, 4)), + NPCM7XX_PINCFG(58, r1md, MFSEL1, 13, none, NONE, 0, none, NONE, 0, DSTR(2, 4)), NPCM7XX_PINCFG(59, smb3d, I2CSEGSEL, 13, none, NONE, 0, none, NONE, 0, 0), NPCM7XX_PINCFG(60, smb3d, I2CSEGSEL, 13, none, NONE, 0, none, NONE, 0, 0), NPCM7XX_PINCFG(61, uart1, MFSEL1, 10, none, NONE, 0, none, NONE, 0, GPO), @@ -1004,19 +1003,19 @@ static const struct npcm7xx_pincfg pincfg[] = { NPCM7XX_PINCFG(77, fanin13, MFSEL2, 13, none, NONE, 0, none, NONE, 0, 0), NPCM7XX_PINCFG(78, fanin14, MFSEL2, 14, none, NONE, 0, none, NONE, 0, 0), NPCM7XX_PINCFG(79, fanin15, MFSEL2, 15, none, NONE, 0, none, NONE, 0, 0), - NPCM7XX_PINCFG(80, pwm0, MFSEL2, 16, none, NONE, 0, none, NONE, 0, DS(4, 8)), - NPCM7XX_PINCFG(81, pwm1, MFSEL2, 17, none, NONE, 0, none, NONE, 0, DS(4, 8)), - NPCM7XX_PINCFG(82, pwm2, MFSEL2, 18, none, NONE, 0, none, NONE, 0, DS(4, 8)), - NPCM7XX_PINCFG(83, pwm3, MFSEL2, 19, none, NONE, 0, none, NONE, 0, DS(4, 8)), - NPCM7XX_PINCFG(84, r2, MFSEL1, 14, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW), - NPCM7XX_PINCFG(85, r2, MFSEL1, 14, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW), - NPCM7XX_PINCFG(86, r2, MFSEL1, 14, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW), + NPCM7XX_PINCFG(80, pwm0, MFSEL2, 16, none, NONE, 0, none, NONE, 0, DSTR(4, 8)), + NPCM7XX_PINCFG(81, pwm1, MFSEL2, 17, none, NONE, 0, none, NONE, 0, DSTR(4, 8)), + NPCM7XX_PINCFG(82, pwm2, MFSEL2, 18, none, NONE, 0, none, NONE, 0, DSTR(4, 8)), + NPCM7XX_PINCFG(83, pwm3, MFSEL2, 19, none, NONE, 0, none, NONE, 0, DSTR(4, 8)), + NPCM7XX_PINCFG(84, r2, MFSEL1, 14, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW), + NPCM7XX_PINCFG(85, r2, MFSEL1, 14, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW), + NPCM7XX_PINCFG(86, r2, MFSEL1, 14, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW), NPCM7XX_PINCFG(87, r2, MFSEL1, 14, none, NONE, 0, none, NONE, 0, 0), NPCM7XX_PINCFG(88, r2, MFSEL1, 14, none, NONE, 0, none, NONE, 0, 0), NPCM7XX_PINCFG(89, r2, MFSEL1, 14, none, NONE, 0, none, NONE, 0, 0), NPCM7XX_PINCFG(90, r2err, MFSEL1, 15, none, NONE, 0, none, NONE, 0, 0), - NPCM7XX_PINCFG(91, r2md, MFSEL1, 16, none, NONE, 0, none, NONE, 0, DS(2, 4)), - NPCM7XX_PINCFG(92, r2md, MFSEL1, 16, none, NONE, 0, none, NONE, 0, DS(2, 4)), + NPCM7XX_PINCFG(91, r2md, MFSEL1, 16, none, NONE, 0, none, NONE, 0, DSTR(2, 4)), + NPCM7XX_PINCFG(92, r2md, MFSEL1, 16, none, NONE, 0, none, NONE, 0, DSTR(2, 4)), NPCM7XX_PINCFG(93, ga20kbc, MFSEL1, 17, smb5d, I2CSEGSEL, 21, none, NONE, 0, 0), NPCM7XX_PINCFG(94, ga20kbc, MFSEL1, 17, smb5d, I2CSEGSEL, 21, none, NONE, 0, 0), NPCM7XX_PINCFG(95, lpc, NONE, 0, espi, MFSEL4, 8, gpio, MFSEL1, 26, 0), @@ -1062,34 +1061,34 @@ static const struct npcm7xx_pincfg pincfg[] = { NPCM7XX_PINCFG(133, smb10, MFSEL4, 13, none, NONE, 0, none, NONE, 0, 0), NPCM7XX_PINCFG(134, smb11, MFSEL4, 14, none, NONE, 0, none, NONE, 0, 0), NPCM7XX_PINCFG(135, smb11, MFSEL4, 14, none, NONE, 0, none, NONE, 0, 0), - NPCM7XX_PINCFG(136, sd1, MFSEL3, 12, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW), - NPCM7XX_PINCFG(137, sd1, MFSEL3, 12, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW), - NPCM7XX_PINCFG(138, sd1, MFSEL3, 12, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW), - NPCM7XX_PINCFG(139, sd1, MFSEL3, 12, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW), - NPCM7XX_PINCFG(140, sd1, MFSEL3, 12, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW), + NPCM7XX_PINCFG(136, sd1, MFSEL3, 12, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW), + NPCM7XX_PINCFG(137, sd1, MFSEL3, 12, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW), + NPCM7XX_PINCFG(138, sd1, MFSEL3, 12, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW), + NPCM7XX_PINCFG(139, sd1, MFSEL3, 12, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW), + NPCM7XX_PINCFG(140, sd1, MFSEL3, 12, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW), NPCM7XX_PINCFG(141, sd1, MFSEL3, 12, none, NONE, 0, none, NONE, 0, 0), - NPCM7XX_PINCFG(142, sd1, MFSEL3, 12, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW), + NPCM7XX_PINCFG(142, sd1, MFSEL3, 12, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW), NPCM7XX_PINCFG(143, sd1, MFSEL3, 12, sd1pwr, MFSEL4, 5, none, NONE, 0, 0), - NPCM7XX_PINCFG(144, pwm4, MFSEL2, 20, none, NONE, 0, none, NONE, 0, DS(4, 8)), - NPCM7XX_PINCFG(145, pwm5, MFSEL2, 21, none, NONE, 0, none, NONE, 0, DS(4, 8)), - NPCM7XX_PINCFG(146, pwm6, MFSEL2, 22, none, NONE, 0, none, NONE, 0, DS(4, 8)), - NPCM7XX_PINCFG(147, pwm7, MFSEL2, 23, none, NONE, 0, none, NONE, 0, DS(4, 8)), - NPCM7XX_PINCFG(148, mmc8, MFSEL3, 11, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW), - NPCM7XX_PINCFG(149, mmc8, MFSEL3, 11, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW), - NPCM7XX_PINCFG(150, mmc8, MFSEL3, 11, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW), - NPCM7XX_PINCFG(151, mmc8, MFSEL3, 11, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW), - NPCM7XX_PINCFG(152, mmc, MFSEL3, 10, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW), + NPCM7XX_PINCFG(144, pwm4, MFSEL2, 20, none, NONE, 0, none, NONE, 0, DSTR(4, 8)), + NPCM7XX_PINCFG(145, pwm5, MFSEL2, 21, none, NONE, 0, none, NONE, 0, DSTR(4, 8)), + NPCM7XX_PINCFG(146, pwm6, MFSEL2, 22, none, NONE, 0, none, NONE, 0, DSTR(4, 8)), + NPCM7XX_PINCFG(147, pwm7, MFSEL2, 23, none, NONE, 0, none, NONE, 0, DSTR(4, 8)), + NPCM7XX_PINCFG(148, mmc8, MFSEL3, 11, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW), + NPCM7XX_PINCFG(149, mmc8, MFSEL3, 11, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW), + NPCM7XX_PINCFG(150, mmc8, MFSEL3, 11, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW), + NPCM7XX_PINCFG(151, mmc8, MFSEL3, 11, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW), + NPCM7XX_PINCFG(152, mmc, MFSEL3, 10, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW), NPCM7XX_PINCFG(153, mmcwp, FLOCKR1, 24, none, NONE, 0, none, NONE, 0, 0), /* Z1/A1 */ - NPCM7XX_PINCFG(154, mmc, MFSEL3, 10, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW), + NPCM7XX_PINCFG(154, mmc, MFSEL3, 10, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW), NPCM7XX_PINCFG(155, mmccd, MFSEL3, 25, mmcrst, MFSEL4, 6, none, NONE, 0, 0), /* Z1/A1 */ - NPCM7XX_PINCFG(156, mmc, MFSEL3, 10, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW), - NPCM7XX_PINCFG(157, mmc, MFSEL3, 10, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW), - NPCM7XX_PINCFG(158, mmc, MFSEL3, 10, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW), - NPCM7XX_PINCFG(159, mmc, MFSEL3, 10, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW), - - NPCM7XX_PINCFG(160, clkout, MFSEL1, 21, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW), - NPCM7XX_PINCFG(161, lpc, NONE, 0, espi, MFSEL4, 8, gpio, MFSEL1, 26, DS(8, 12)), - NPCM7XX_PINCFG(162, serirq, NONE, 0, gpio, MFSEL1, 31, none, NONE, 0, DS(8, 12)), + NPCM7XX_PINCFG(156, mmc, MFSEL3, 10, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW), + NPCM7XX_PINCFG(157, mmc, MFSEL3, 10, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW), + NPCM7XX_PINCFG(158, mmc, MFSEL3, 10, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW), + NPCM7XX_PINCFG(159, mmc, MFSEL3, 10, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW), + + NPCM7XX_PINCFG(160, clkout, MFSEL1, 21, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW), + NPCM7XX_PINCFG(161, lpc, NONE, 0, espi, MFSEL4, 8, gpio, MFSEL1, 26, DSTR(8, 12)), + NPCM7XX_PINCFG(162, serirq, NONE, 0, gpio, MFSEL1, 31, none, NONE, 0, DSTR(8, 12)), NPCM7XX_PINCFG(163, lpc, NONE, 0, espi, MFSEL4, 8, gpio, MFSEL1, 26, 0), NPCM7XX_PINCFG(164, lpc, NONE, 0, espi, MFSEL4, 8, gpio, MFSEL1, 26, SLEWLPC), NPCM7XX_PINCFG(165, lpc, NONE, 0, espi, MFSEL4, 8, gpio, MFSEL1, 26, SLEWLPC), @@ -1102,25 +1101,25 @@ static const struct npcm7xx_pincfg pincfg[] = { NPCM7XX_PINCFG(172, smb6, MFSEL3, 1, none, NONE, 0, none, NONE, 0, 0), NPCM7XX_PINCFG(173, smb7, MFSEL3, 2, none, NONE, 0, none, NONE, 0, 0), NPCM7XX_PINCFG(174, smb7, MFSEL3, 2, none, NONE, 0, none, NONE, 0, 0), - NPCM7XX_PINCFG(175, pspi1, MFSEL3, 4, faninx, MFSEL3, 3, none, NONE, 0, DS(8, 12)), - NPCM7XX_PINCFG(176, pspi1, MFSEL3, 4, faninx, MFSEL3, 3, none, NONE, 0, DS(8, 12)), - NPCM7XX_PINCFG(177, pspi1, MFSEL3, 4, faninx, MFSEL3, 3, none, NONE, 0, DS(8, 12)), - NPCM7XX_PINCFG(178, r1, MFSEL3, 9, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW), - NPCM7XX_PINCFG(179, r1, MFSEL3, 9, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW), - NPCM7XX_PINCFG(180, r1, MFSEL3, 9, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW), + NPCM7XX_PINCFG(175, pspi1, MFSEL3, 4, faninx, MFSEL3, 3, none, NONE, 0, DSTR(8, 12)), + NPCM7XX_PINCFG(176, pspi1, MFSEL3, 4, faninx, MFSEL3, 3, none, NONE, 0, DSTR(8, 12)), + NPCM7XX_PINCFG(177, pspi1, MFSEL3, 4, faninx, MFSEL3, 3, none, NONE, 0, DSTR(8, 12)), + NPCM7XX_PINCFG(178, r1, MFSEL3, 9, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW), + NPCM7XX_PINCFG(179, r1, MFSEL3, 9, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW), + NPCM7XX_PINCFG(180, r1, MFSEL3, 9, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW), NPCM7XX_PINCFG(181, r1, MFSEL3, 9, none, NONE, 0, none, NONE, 0, 0), NPCM7XX_PINCFG(182, r1, MFSEL3, 9, none, NONE, 0, none, NONE, 0, 0), - NPCM7XX_PINCFG(183, spi3, MFSEL4, 16, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW), - NPCM7XX_PINCFG(184, spi3, MFSEL4, 16, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW | GPO), - NPCM7XX_PINCFG(185, spi3, MFSEL4, 16, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW | GPO), - NPCM7XX_PINCFG(186, spi3, MFSEL4, 16, none, NONE, 0, none, NONE, 0, DS(8, 12)), - NPCM7XX_PINCFG(187, spi3cs1, MFSEL4, 17, none, NONE, 0, none, NONE, 0, DS(8, 12)), - NPCM7XX_PINCFG(188, spi3quad, MFSEL4, 20, spi3cs2, MFSEL4, 18, none, NONE, 0, DS(8, 12) | SLEW), - NPCM7XX_PINCFG(189, spi3quad, MFSEL4, 20, spi3cs3, MFSEL4, 19, none, NONE, 0, DS(8, 12) | SLEW), - NPCM7XX_PINCFG(190, gpio, FLOCKR1, 20, nprd_smi, NONE, 0, none, NONE, 0, DS(2, 4)), - NPCM7XX_PINCFG(191, none, NONE, 0, none, NONE, 0, none, NONE, 0, DS(8, 12)), /* XX */ - - NPCM7XX_PINCFG(192, none, NONE, 0, none, NONE, 0, none, NONE, 0, DS(8, 12)), /* XX */ + NPCM7XX_PINCFG(183, spi3, MFSEL4, 16, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW), + NPCM7XX_PINCFG(184, spi3, MFSEL4, 16, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW | GPO), + NPCM7XX_PINCFG(185, spi3, MFSEL4, 16, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW | GPO), + NPCM7XX_PINCFG(186, spi3, MFSEL4, 16, none, NONE, 0, none, NONE, 0, DSTR(8, 12)), + NPCM7XX_PINCFG(187, spi3cs1, MFSEL4, 17, none, NONE, 0, none, NONE, 0, DSTR(8, 12)), + NPCM7XX_PINCFG(188, spi3quad, MFSEL4, 20, spi3cs2, MFSEL4, 18, none, NONE, 0, DSTR(8, 12) | SLEW), + NPCM7XX_PINCFG(189, spi3quad, MFSEL4, 20, spi3cs3, MFSEL4, 19, none, NONE, 0, DSTR(8, 12) | SLEW), + NPCM7XX_PINCFG(190, gpio, FLOCKR1, 20, nprd_smi, NONE, 0, none, NONE, 0, DSTR(2, 4)), + NPCM7XX_PINCFG(191, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(8, 12)), /* XX */ + + NPCM7XX_PINCFG(192, none, NONE, 0, none, NONE, 0, none, NONE, 0, DSTR(8, 12)), /* XX */ NPCM7XX_PINCFG(193, r1, MFSEL3, 9, none, NONE, 0, none, NONE, 0, 0), NPCM7XX_PINCFG(194, smb0b, I2CSEGSEL, 0, none, NONE, 0, none, NONE, 0, 0), NPCM7XX_PINCFG(195, smb0b, I2CSEGSEL, 0, none, NONE, 0, none, NONE, 0, 0), @@ -1131,11 +1130,11 @@ static const struct npcm7xx_pincfg pincfg[] = { NPCM7XX_PINCFG(200, r2, MFSEL1, 14, none, NONE, 0, none, NONE, 0, 0), NPCM7XX_PINCFG(201, r1, MFSEL3, 9, none, NONE, 0, none, NONE, 0, 0), NPCM7XX_PINCFG(202, smb0c, I2CSEGSEL, 1, none, NONE, 0, none, NONE, 0, 0), - NPCM7XX_PINCFG(203, faninx, MFSEL3, 3, none, NONE, 0, none, NONE, 0, DS(8, 12)), + NPCM7XX_PINCFG(203, faninx, MFSEL3, 3, none, NONE, 0, none, NONE, 0, DSTR(8, 12)), NPCM7XX_PINCFG(204, ddc, NONE, 0, gpio, MFSEL3, 22, none, NONE, 0, SLEW), NPCM7XX_PINCFG(205, ddc, NONE, 0, gpio, MFSEL3, 22, none, NONE, 0, SLEW), - NPCM7XX_PINCFG(206, ddc, NONE, 0, gpio, MFSEL3, 22, none, NONE, 0, DS(4, 8)), - NPCM7XX_PINCFG(207, ddc, NONE, 0, gpio, MFSEL3, 22, none, NONE, 0, DS(4, 8)), + NPCM7XX_PINCFG(206, ddc, NONE, 0, gpio, MFSEL3, 22, none, NONE, 0, DSTR(4, 8)), + NPCM7XX_PINCFG(207, ddc, NONE, 0, gpio, MFSEL3, 22, none, NONE, 0, DSTR(4, 8)), NPCM7XX_PINCFG(208, rg2, MFSEL4, 24, ddr, MFSEL3, 26, none, NONE, 0, 0), NPCM7XX_PINCFG(209, rg2, MFSEL4, 24, ddr, MFSEL3, 26, none, NONE, 0, 0), NPCM7XX_PINCFG(210, rg2, MFSEL4, 24, ddr, MFSEL3, 26, none, NONE, 0, 0), @@ -1147,20 +1146,20 @@ static const struct npcm7xx_pincfg pincfg[] = { NPCM7XX_PINCFG(216, rg2mdio, MFSEL4, 23, ddr, MFSEL3, 26, none, NONE, 0, 0), NPCM7XX_PINCFG(217, rg2mdio, MFSEL4, 23, ddr, MFSEL3, 26, none, NONE, 0, 0), NPCM7XX_PINCFG(218, wdog1, MFSEL3, 19, none, NONE, 0, none, NONE, 0, 0), - NPCM7XX_PINCFG(219, wdog2, MFSEL3, 20, none, NONE, 0, none, NONE, 0, DS(4, 8)), + NPCM7XX_PINCFG(219, wdog2, MFSEL3, 20, none, NONE, 0, none, NONE, 0, DSTR(4, 8)), NPCM7XX_PINCFG(220, smb12, MFSEL3, 5, none, NONE, 0, none, NONE, 0, 0), NPCM7XX_PINCFG(221, smb12, MFSEL3, 5, none, NONE, 0, none, NONE, 0, 0), NPCM7XX_PINCFG(222, smb13, MFSEL3, 6, none, NONE, 0, none, NONE, 0, 0), NPCM7XX_PINCFG(223, smb13, MFSEL3, 6, none, NONE, 0, none, NONE, 0, 0), NPCM7XX_PINCFG(224, spix, MFSEL4, 27, none, NONE, 0, none, NONE, 0, SLEW), - NPCM7XX_PINCFG(225, spix, MFSEL4, 27, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW | GPO), - NPCM7XX_PINCFG(226, spix, MFSEL4, 27, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW | GPO), - NPCM7XX_PINCFG(227, spix, MFSEL4, 27, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW), - NPCM7XX_PINCFG(228, spixcs1, MFSEL4, 28, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW), - NPCM7XX_PINCFG(229, spix, MFSEL4, 27, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW), - NPCM7XX_PINCFG(230, spix, MFSEL4, 27, none, NONE, 0, none, NONE, 0, DS(8, 12) | SLEW), - NPCM7XX_PINCFG(231, clkreq, MFSEL4, 9, none, NONE, 0, none, NONE, 0, DS(8, 12)), + NPCM7XX_PINCFG(225, spix, MFSEL4, 27, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW | GPO), + NPCM7XX_PINCFG(226, spix, MFSEL4, 27, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW | GPO), + NPCM7XX_PINCFG(227, spix, MFSEL4, 27, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW), + NPCM7XX_PINCFG(228, spixcs1, MFSEL4, 28, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW), + NPCM7XX_PINCFG(229, spix, MFSEL4, 27, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW), + NPCM7XX_PINCFG(230, spix, MFSEL4, 27, none, NONE, 0, none, NONE, 0, DSTR(8, 12) | SLEW), + NPCM7XX_PINCFG(231, clkreq, MFSEL4, 9, none, NONE, 0, none, NONE, 0, DSTR(8, 12)), NPCM7XX_PINCFG(253, none, NONE, 0, none, NONE, 0, none, NONE, 0, GPI), /* SDHC1 power */ NPCM7XX_PINCFG(254, none, NONE, 0, none, NONE, 0, none, NONE, 0, GPI), /* SDHC2 power */ NPCM7XX_PINCFG(255, none, NONE, 0, none, NONE, 0, none, NONE, 0, GPI), /* DACOSEL */ @@ -1561,7 +1560,7 @@ static int npcm7xx_get_groups_count(struct pinctrl_dev *pctldev) { struct npcm7xx_pinctrl *npcm = pinctrl_dev_get_drvdata(pctldev); - dev_dbg(npcm->dev, "group size: %d\n", ARRAY_SIZE(npcm7xx_groups)); + dev_dbg(npcm->dev, "group size: %zu\n", ARRAY_SIZE(npcm7xx_groups)); return ARRAY_SIZE(npcm7xx_groups); } diff --git a/drivers/pinctrl/pinconf-generic.c b/drivers/pinctrl/pinconf-generic.c index 1e225d513988857391264d4155a9bf014f5b0c09..42e27dba62e26292554cb9b5c6b7f3414c48b6f3 100644 --- a/drivers/pinctrl/pinconf-generic.c +++ b/drivers/pinctrl/pinconf-generic.c @@ -30,10 +30,10 @@ static const struct pin_config_item conf_items[] = { PCONFDUMP(PIN_CONFIG_BIAS_BUS_HOLD, "input bias bus hold", NULL, false), PCONFDUMP(PIN_CONFIG_BIAS_DISABLE, "input bias disabled", NULL, false), PCONFDUMP(PIN_CONFIG_BIAS_HIGH_IMPEDANCE, "input bias high impedance", NULL, false), - PCONFDUMP(PIN_CONFIG_BIAS_PULL_DOWN, "input bias pull down", NULL, false), + PCONFDUMP(PIN_CONFIG_BIAS_PULL_DOWN, "input bias pull down", "ohms", true), PCONFDUMP(PIN_CONFIG_BIAS_PULL_PIN_DEFAULT, - "input bias pull to pin specific state", NULL, false), - PCONFDUMP(PIN_CONFIG_BIAS_PULL_UP, "input bias pull up", NULL, false), + "input bias pull to pin specific state", "ohms", true), + PCONFDUMP(PIN_CONFIG_BIAS_PULL_UP, "input bias pull up", "ohms", true), PCONFDUMP(PIN_CONFIG_DRIVE_OPEN_DRAIN, "output drive open drain", NULL, false), PCONFDUMP(PIN_CONFIG_DRIVE_OPEN_SOURCE, "output drive open source", NULL, false), PCONFDUMP(PIN_CONFIG_DRIVE_PUSH_PULL, "output drive push pull", NULL, false), diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c index 53a0badc6b035e4afd24e2d285f6e23755b65be4..9df48e0cf4cb4c028ac66b20177396a8068e1f15 100644 --- a/drivers/pinctrl/pinctrl-rockchip.c +++ b/drivers/pinctrl/pinctrl-rockchip.c @@ -3774,6 +3774,7 @@ static int rockchip_pinctrl_probe(struct platform_device *pdev) node = of_parse_phandle(np, "rockchip,grf", 0); if (node) { info->regmap_base = syscon_node_to_regmap(node); + of_node_put(node); if (IS_ERR(info->regmap_base)) return PTR_ERR(info->regmap_base); } else { @@ -3810,6 +3811,7 @@ static int rockchip_pinctrl_probe(struct platform_device *pdev) node = of_parse_phandle(np, "rockchip,pmu", 0); if (node) { info->regmap_pmu = syscon_node_to_regmap(node); + of_node_put(node); if (IS_ERR(info->regmap_pmu)) return PTR_ERR(info->regmap_pmu); } diff --git a/drivers/pinctrl/renesas/core.c b/drivers/pinctrl/renesas/core.c index 9d168b90cd2810e7ae5eb3813ae0dfe5e58e88b7..258972672eda1077871df9e3db2deb015397e076 100644 --- a/drivers/pinctrl/renesas/core.c +++ b/drivers/pinctrl/renesas/core.c @@ -739,7 +739,7 @@ static int sh_pfc_suspend_init(struct sh_pfc *pfc) { return 0; } #ifdef DEBUG #define SH_PFC_MAX_REGS 300 -#define SH_PFC_MAX_ENUMS 3000 +#define SH_PFC_MAX_ENUMS 5000 static unsigned int sh_pfc_errors __initdata = 0; static unsigned int sh_pfc_warnings __initdata = 0; @@ -851,7 +851,8 @@ static void __init sh_pfc_check_cfg_reg(const char *drvname, sh_pfc_check_reg(drvname, cfg_reg->reg); if (cfg_reg->field_width) { - n = cfg_reg->reg_width / cfg_reg->field_width; + fw = cfg_reg->field_width; + n = (cfg_reg->reg_width / fw) << fw; /* Skip field checks (done at build time) */ goto check_enum_ids; } diff --git a/drivers/pinctrl/renesas/pfc-r8a77470.c b/drivers/pinctrl/renesas/pfc-r8a77470.c index b3b116da1bb0dd3521fa2a619133e776079d6d2e..14005725a726b1c7fa9d46bb27da2bbde68de6a2 100644 --- a/drivers/pinctrl/renesas/pfc-r8a77470.c +++ b/drivers/pinctrl/renesas/pfc-r8a77470.c @@ -2121,7 +2121,7 @@ static const unsigned int vin0_clk_mux[] = { VI0_CLK_MARK, }; /* - VIN1 ------------------------------------------------------------------- */ -static const union vin_data vin1_data_pins = { +static const union vin_data12 vin1_data_pins = { .data12 = { RCAR_GP_PIN(3, 1), RCAR_GP_PIN(3, 2), RCAR_GP_PIN(3, 3), RCAR_GP_PIN(3, 4), @@ -2131,7 +2131,7 @@ static const union vin_data vin1_data_pins = { RCAR_GP_PIN(3, 15), RCAR_GP_PIN(3, 16), }, }; -static const union vin_data vin1_data_mux = { +static const union vin_data12 vin1_data_mux = { .data12 = { VI1_DATA0_MARK, VI1_DATA1_MARK, VI1_DATA2_MARK, VI1_DATA3_MARK, diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c index 7f809a57bee50db67cab65e22fef2654220d7f8a..56fff83a143bda97c4379e82b99b9b888e0ce84f 100644 --- a/drivers/pinctrl/samsung/pinctrl-samsung.c +++ b/drivers/pinctrl/samsung/pinctrl-samsung.c @@ -1002,6 +1002,16 @@ samsung_pinctrl_get_soc_data_for_of_alias(struct platform_device *pdev) return &(of_data->ctrl[id]); } +static void samsung_banks_of_node_put(struct samsung_pinctrl_drv_data *d) +{ + struct samsung_pin_bank *bank; + unsigned int i; + + bank = d->pin_banks; + for (i = 0; i < d->nr_banks; ++i, ++bank) + of_node_put(bank->of_node); +} + /* retrieve the soc specific data */ static const struct samsung_pin_ctrl * samsung_pinctrl_get_soc_data(struct samsung_pinctrl_drv_data *d, @@ -1116,19 +1126,19 @@ static int samsung_pinctrl_probe(struct platform_device *pdev) if (ctrl->retention_data) { drvdata->retention_ctrl = ctrl->retention_data->init(drvdata, ctrl->retention_data); - if (IS_ERR(drvdata->retention_ctrl)) - return PTR_ERR(drvdata->retention_ctrl); + if (IS_ERR(drvdata->retention_ctrl)) { + ret = PTR_ERR(drvdata->retention_ctrl); + goto err_put_banks; + } } ret = samsung_pinctrl_register(pdev, drvdata); if (ret) - return ret; + goto err_put_banks; ret = samsung_gpiolib_register(pdev, drvdata); - if (ret) { - samsung_pinctrl_unregister(pdev, drvdata); - return ret; - } + if (ret) + goto err_unregister; if (ctrl->eint_gpio_init) ctrl->eint_gpio_init(drvdata); @@ -1138,6 +1148,12 @@ static int samsung_pinctrl_probe(struct platform_device *pdev) platform_set_drvdata(pdev, drvdata); return 0; + +err_unregister: + samsung_pinctrl_unregister(pdev, drvdata); +err_put_banks: + samsung_banks_of_node_put(drvdata); + return ret; } /* diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c index e42a3a0005a72c562346e484b7b3ee307310ed06..be7f4f95f455daeb819746a5b64d4da1a8f7544b 100644 --- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c +++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c @@ -36,6 +36,13 @@ #include "../core.h" #include "pinctrl-sunxi.h" +/* + * These lock classes tell lockdep that GPIO IRQs are in a different + * category than their parents, so it won't report false recursion. + */ +static struct lock_class_key sunxi_pinctrl_irq_lock_class; +static struct lock_class_key sunxi_pinctrl_irq_request_class; + static struct irq_chip sunxi_pinctrl_edge_irq_chip; static struct irq_chip sunxi_pinctrl_level_irq_chip; @@ -1552,6 +1559,8 @@ int sunxi_pinctrl_init_with_variant(struct platform_device *pdev, for (i = 0; i < (pctl->desc->irq_banks * IRQ_PER_BANK); i++) { int irqno = irq_create_mapping(pctl->domain, i); + irq_set_lockdep_class(irqno, &sunxi_pinctrl_irq_lock_class, + &sunxi_pinctrl_irq_request_class); irq_set_chip_and_handler(irqno, &sunxi_pinctrl_edge_irq_chip, handle_edge_irq); irq_set_chip_data(irqno, pctl); diff --git a/drivers/platform/chrome/Makefile b/drivers/platform/chrome/Makefile index f901d2e43166c339971aed719f8d749026dd4ad1..88cbc434c06b224b07c3b5d1cdc3fac7d2a8b9f9 100644 --- a/drivers/platform/chrome/Makefile +++ b/drivers/platform/chrome/Makefile @@ -2,6 +2,7 @@ # tell define_trace.h where to find the cros ec trace header CFLAGS_cros_ec_trace.o:= -I$(src) +CFLAGS_cros_ec_sensorhub_ring.o:= -I$(src) obj-$(CONFIG_CHROMEOS_LAPTOP) += chromeos_laptop.o obj-$(CONFIG_CHROMEOS_PSTORE) += chromeos_pstore.o @@ -20,7 +21,7 @@ obj-$(CONFIG_CROS_EC_CHARDEV) += cros_ec_chardev.o obj-$(CONFIG_CROS_EC_LIGHTBAR) += cros_ec_lightbar.o obj-$(CONFIG_CROS_EC_VBC) += cros_ec_vbc.o obj-$(CONFIG_CROS_EC_DEBUGFS) += cros_ec_debugfs.o -cros-ec-sensorhub-objs := cros_ec_sensorhub.o cros_ec_sensorhub_ring.o cros_ec_trace.o +cros-ec-sensorhub-objs := cros_ec_sensorhub.o cros_ec_sensorhub_ring.o obj-$(CONFIG_CROS_EC_SENSORHUB) += cros-ec-sensorhub.o obj-$(CONFIG_CROS_EC_SYSFS) += cros_ec_sysfs.o obj-$(CONFIG_CROS_USBPD_LOGGER) += cros_usbpd_logger.o diff --git a/drivers/platform/chrome/cros_ec_sensorhub_ring.c b/drivers/platform/chrome/cros_ec_sensorhub_ring.c index 98e37080f760913ecdbeb53f611ce71cfbecbc7f..71948dade0e2aedd0a1f1b4ea09dc954c6fb5b5e 100644 --- a/drivers/platform/chrome/cros_ec_sensorhub_ring.c +++ b/drivers/platform/chrome/cros_ec_sensorhub_ring.c @@ -17,7 +17,8 @@ #include #include -#include "cros_ec_trace.h" +#define CREATE_TRACE_POINTS +#include "cros_ec_sensorhub_trace.h" /* Precision of fixed point for the m values from the filter */ #define M_PRECISION BIT(23) diff --git a/drivers/platform/chrome/cros_ec_sensorhub_trace.h b/drivers/platform/chrome/cros_ec_sensorhub_trace.h new file mode 100644 index 0000000000000000000000000000000000000000..57d9b47859692710b2048007524a01457a2bca48 --- /dev/null +++ b/drivers/platform/chrome/cros_ec_sensorhub_trace.h @@ -0,0 +1,123 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Trace events for the ChromeOS Sensorhub kernel module + * + * Copyright 2021 Google LLC. + */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM cros_ec + +#if !defined(_CROS_EC_SENSORHUB_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ) +#define _CROS_EC_SENSORHUB_TRACE_H_ + +#include +#include + +#include + +TRACE_EVENT(cros_ec_sensorhub_timestamp, + TP_PROTO(u32 ec_sample_timestamp, u32 ec_fifo_timestamp, s64 fifo_timestamp, + s64 current_timestamp, s64 current_time), + TP_ARGS(ec_sample_timestamp, ec_fifo_timestamp, fifo_timestamp, current_timestamp, + current_time), + TP_STRUCT__entry( + __field(u32, ec_sample_timestamp) + __field(u32, ec_fifo_timestamp) + __field(s64, fifo_timestamp) + __field(s64, current_timestamp) + __field(s64, current_time) + __field(s64, delta) + ), + TP_fast_assign( + __entry->ec_sample_timestamp = ec_sample_timestamp; + __entry->ec_fifo_timestamp = ec_fifo_timestamp; + __entry->fifo_timestamp = fifo_timestamp; + __entry->current_timestamp = current_timestamp; + __entry->current_time = current_time; + __entry->delta = current_timestamp - current_time; + ), + TP_printk("ec_ts: %9u, ec_fifo_ts: %9u, fifo_ts: %12lld, curr_ts: %12lld, curr_time: %12lld, delta %12lld", + __entry->ec_sample_timestamp, + __entry->ec_fifo_timestamp, + __entry->fifo_timestamp, + __entry->current_timestamp, + __entry->current_time, + __entry->delta + ) +); + +TRACE_EVENT(cros_ec_sensorhub_data, + TP_PROTO(u32 ec_sensor_num, u32 ec_fifo_timestamp, s64 fifo_timestamp, + s64 current_timestamp, s64 current_time), + TP_ARGS(ec_sensor_num, ec_fifo_timestamp, fifo_timestamp, current_timestamp, current_time), + TP_STRUCT__entry( + __field(u32, ec_sensor_num) + __field(u32, ec_fifo_timestamp) + __field(s64, fifo_timestamp) + __field(s64, current_timestamp) + __field(s64, current_time) + __field(s64, delta) + ), + TP_fast_assign( + __entry->ec_sensor_num = ec_sensor_num; + __entry->ec_fifo_timestamp = ec_fifo_timestamp; + __entry->fifo_timestamp = fifo_timestamp; + __entry->current_timestamp = current_timestamp; + __entry->current_time = current_time; + __entry->delta = current_timestamp - current_time; + ), + TP_printk("ec_num: %4u, ec_fifo_ts: %9u, fifo_ts: %12lld, curr_ts: %12lld, curr_time: %12lld, delta %12lld", + __entry->ec_sensor_num, + __entry->ec_fifo_timestamp, + __entry->fifo_timestamp, + __entry->current_timestamp, + __entry->current_time, + __entry->delta + ) +); + +TRACE_EVENT(cros_ec_sensorhub_filter, + TP_PROTO(struct cros_ec_sensors_ts_filter_state *state, s64 dx, s64 dy), + TP_ARGS(state, dx, dy), + TP_STRUCT__entry( + __field(s64, dx) + __field(s64, dy) + __field(s64, median_m) + __field(s64, median_error) + __field(s64, history_len) + __field(s64, x) + __field(s64, y) + ), + TP_fast_assign( + __entry->dx = dx; + __entry->dy = dy; + __entry->median_m = state->median_m; + __entry->median_error = state->median_error; + __entry->history_len = state->history_len; + __entry->x = state->x_offset; + __entry->y = state->y_offset; + ), + TP_printk("dx: %12lld. dy: %12lld median_m: %12lld median_error: %12lld len: %lld x: %12lld y: %12lld", + __entry->dx, + __entry->dy, + __entry->median_m, + __entry->median_error, + __entry->history_len, + __entry->x, + __entry->y + ) +); + + +#endif /* _CROS_EC_SENSORHUB_TRACE_H_ */ + +/* this part must be outside header guard */ + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . + +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE cros_ec_sensorhub_trace + +#include diff --git a/drivers/platform/chrome/cros_ec_trace.h b/drivers/platform/chrome/cros_ec_trace.h index 7e7cfc98657a4aea599e93efb75094f6df4c4144..9bb5cd2c98b8b4690f4633883103c21a801ba04d 100644 --- a/drivers/platform/chrome/cros_ec_trace.h +++ b/drivers/platform/chrome/cros_ec_trace.h @@ -15,7 +15,6 @@ #include #include #include -#include #include @@ -71,100 +70,6 @@ TRACE_EVENT(cros_ec_request_done, __entry->retval) ); -TRACE_EVENT(cros_ec_sensorhub_timestamp, - TP_PROTO(u32 ec_sample_timestamp, u32 ec_fifo_timestamp, s64 fifo_timestamp, - s64 current_timestamp, s64 current_time), - TP_ARGS(ec_sample_timestamp, ec_fifo_timestamp, fifo_timestamp, current_timestamp, - current_time), - TP_STRUCT__entry( - __field(u32, ec_sample_timestamp) - __field(u32, ec_fifo_timestamp) - __field(s64, fifo_timestamp) - __field(s64, current_timestamp) - __field(s64, current_time) - __field(s64, delta) - ), - TP_fast_assign( - __entry->ec_sample_timestamp = ec_sample_timestamp; - __entry->ec_fifo_timestamp = ec_fifo_timestamp; - __entry->fifo_timestamp = fifo_timestamp; - __entry->current_timestamp = current_timestamp; - __entry->current_time = current_time; - __entry->delta = current_timestamp - current_time; - ), - TP_printk("ec_ts: %9u, ec_fifo_ts: %9u, fifo_ts: %12lld, curr_ts: %12lld, curr_time: %12lld, delta %12lld", - __entry->ec_sample_timestamp, - __entry->ec_fifo_timestamp, - __entry->fifo_timestamp, - __entry->current_timestamp, - __entry->current_time, - __entry->delta - ) -); - -TRACE_EVENT(cros_ec_sensorhub_data, - TP_PROTO(u32 ec_sensor_num, u32 ec_fifo_timestamp, s64 fifo_timestamp, - s64 current_timestamp, s64 current_time), - TP_ARGS(ec_sensor_num, ec_fifo_timestamp, fifo_timestamp, current_timestamp, current_time), - TP_STRUCT__entry( - __field(u32, ec_sensor_num) - __field(u32, ec_fifo_timestamp) - __field(s64, fifo_timestamp) - __field(s64, current_timestamp) - __field(s64, current_time) - __field(s64, delta) - ), - TP_fast_assign( - __entry->ec_sensor_num = ec_sensor_num; - __entry->ec_fifo_timestamp = ec_fifo_timestamp; - __entry->fifo_timestamp = fifo_timestamp; - __entry->current_timestamp = current_timestamp; - __entry->current_time = current_time; - __entry->delta = current_timestamp - current_time; - ), - TP_printk("ec_num: %4u, ec_fifo_ts: %9u, fifo_ts: %12lld, curr_ts: %12lld, curr_time: %12lld, delta %12lld", - __entry->ec_sensor_num, - __entry->ec_fifo_timestamp, - __entry->fifo_timestamp, - __entry->current_timestamp, - __entry->current_time, - __entry->delta - ) -); - -TRACE_EVENT(cros_ec_sensorhub_filter, - TP_PROTO(struct cros_ec_sensors_ts_filter_state *state, s64 dx, s64 dy), - TP_ARGS(state, dx, dy), - TP_STRUCT__entry( - __field(s64, dx) - __field(s64, dy) - __field(s64, median_m) - __field(s64, median_error) - __field(s64, history_len) - __field(s64, x) - __field(s64, y) - ), - TP_fast_assign( - __entry->dx = dx; - __entry->dy = dy; - __entry->median_m = state->median_m; - __entry->median_error = state->median_error; - __entry->history_len = state->history_len; - __entry->x = state->x_offset; - __entry->y = state->y_offset; - ), - TP_printk("dx: %12lld. dy: %12lld median_m: %12lld median_error: %12lld len: %lld x: %12lld y: %12lld", - __entry->dx, - __entry->dy, - __entry->median_m, - __entry->median_error, - __entry->history_len, - __entry->x, - __entry->y - ) -); - - #endif /* _CROS_EC_TRACE_H_ */ /* this part must be outside header guard */ diff --git a/drivers/platform/chrome/cros_ec_typec.c b/drivers/platform/chrome/cros_ec_typec.c index 036d54dc52e24d810d0b228603ecfb54981f18c3..cc336457ca808cdbfc6944485b2e805c63639233 100644 --- a/drivers/platform/chrome/cros_ec_typec.c +++ b/drivers/platform/chrome/cros_ec_typec.c @@ -712,7 +712,13 @@ static int cros_typec_probe(struct platform_device *pdev) return -ENOMEM; typec->dev = dev; + typec->ec = dev_get_drvdata(pdev->dev.parent); + if (!typec->ec) { + dev_err(dev, "couldn't find parent EC device\n"); + return -ENODEV; + } + platform_set_drvdata(pdev, typec); ret = cros_typec_get_cmd_version(typec); diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index a1858689d6e10b499ddbae04cc4f92b49a9999f9..a24783aa52eae475a78ffc768b93c95eca457c9e 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig @@ -1362,6 +1362,42 @@ config INTEL_PMC_CORE - LTR Ignore - MPHY/PLL gating status (Sunrisepoint PCH only) +config INTEL_PMT_CLASS + tristate + help + The Intel Platform Monitoring Technology (PMT) class driver provides + the basic sysfs interface and file hierarchy uses by PMT devices. + + For more information, see: + + + To compile this driver as a module, choose M here: the module + will be called intel_pmt_class. + +config INTEL_PMT_TELEMETRY + tristate "Intel Platform Monitoring Technology (PMT) Telemetry driver" + depends on MFD_INTEL_PMT + select INTEL_PMT_CLASS + help + The Intel Platform Monitory Technology (PMT) Telemetry driver provides + access to hardware telemetry metrics on devices that support the + feature. + + To compile this driver as a module, choose M here: the module + will be called intel_pmt_telemetry. + +config INTEL_PMT_CRASHLOG + tristate "Intel Platform Monitoring Technology (PMT) Crashlog driver" + depends on MFD_INTEL_PMT + select INTEL_PMT_CLASS + help + The Intel Platform Monitoring Technology (PMT) crashlog driver provides + access to hardware crashlog capabilities on devices that support the + feature. + + To compile this driver as a module, choose M here: the module + will be called intel_pmt_crashlog. + config INTEL_PUNIT_IPC tristate "Intel P-Unit IPC Driver" help diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile index 5f823f7eff45280248f01197cd086fc81d3de66b..ca82c1344977caf10a56f6602315a5c54c3d8716 100644 --- a/drivers/platform/x86/Makefile +++ b/drivers/platform/x86/Makefile @@ -140,6 +140,9 @@ obj-$(CONFIG_INTEL_MFLD_THERMAL) += intel_mid_thermal.o obj-$(CONFIG_INTEL_MID_POWER_BUTTON) += intel_mid_powerbtn.o obj-$(CONFIG_INTEL_MRFLD_PWRBTN) += intel_mrfld_pwrbtn.o obj-$(CONFIG_INTEL_PMC_CORE) += intel_pmc_core.o intel_pmc_core_pltdrv.o +obj-$(CONFIG_INTEL_PMT_CLASS) += intel_pmt_class.o +obj-$(CONFIG_INTEL_PMT_TELEMETRY) += intel_pmt_telemetry.o +obj-$(CONFIG_INTEL_PMT_CRASHLOG) += intel_pmt_crashlog.o obj-$(CONFIG_INTEL_PUNIT_IPC) += intel_punit_ipc.o obj-$(CONFIG_INTEL_SCU_IPC) += intel_scu_ipc.o obj-$(CONFIG_INTEL_SCU_PCI) += intel_scu_pcidrv.o diff --git a/drivers/platform/x86/huawei-wmi.c b/drivers/platform/x86/huawei-wmi.c index a2d846c4a7eef53c05745e7ef9514662876563e9..eac3e6b4ea113064aa47f41bf587ab22a85ac68f 100644 --- a/drivers/platform/x86/huawei-wmi.c +++ b/drivers/platform/x86/huawei-wmi.c @@ -470,10 +470,17 @@ static DEVICE_ATTR_RW(charge_control_thresholds); static int huawei_wmi_battery_add(struct power_supply *battery) { - device_create_file(&battery->dev, &dev_attr_charge_control_start_threshold); - device_create_file(&battery->dev, &dev_attr_charge_control_end_threshold); + int err = 0; - return 0; + err = device_create_file(&battery->dev, &dev_attr_charge_control_start_threshold); + if (err) + return err; + + err = device_create_file(&battery->dev, &dev_attr_charge_control_end_threshold); + if (err) + device_remove_file(&battery->dev, &dev_attr_charge_control_start_threshold); + + return err; } static int huawei_wmi_battery_remove(struct power_supply *battery) diff --git a/drivers/platform/x86/intel_pmt_class.c b/drivers/platform/x86/intel_pmt_class.c new file mode 100644 index 0000000000000000000000000000000000000000..c86ff15b1ed522be06f2c670a2f62c3bef70aa7d --- /dev/null +++ b/drivers/platform/x86/intel_pmt_class.c @@ -0,0 +1,344 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Intel Platform Monitory Technology Telemetry driver + * + * Copyright (c) 2020, Intel Corporation. + * All Rights Reserved. + * + * Author: "Alexander Duyck" + */ + +#include +#include +#include +#include + +#include "intel_pmt_class.h" + +#define PMT_XA_START 0 +#define PMT_XA_MAX INT_MAX +#define PMT_XA_LIMIT XA_LIMIT(PMT_XA_START, PMT_XA_MAX) + +/* + * Early implementations of PMT on client platforms have some + * differences from the server platforms (which use the Out Of Band + * Management Services Module OOBMSM). This list tracks those + * platforms as needed to handle those differences. Newer client + * platforms are expected to be fully compatible with server. + */ +static const struct pci_device_id pmt_telem_early_client_pci_ids[] = { + { PCI_VDEVICE(INTEL, 0x467d) }, /* ADL */ + { PCI_VDEVICE(INTEL, 0x490e) }, /* DG1 */ + { PCI_VDEVICE(INTEL, 0x9a0d) }, /* TGL */ + { } +}; + +bool intel_pmt_is_early_client_hw(struct device *dev) +{ + struct pci_dev *parent = to_pci_dev(dev->parent); + + return !!pci_match_id(pmt_telem_early_client_pci_ids, parent); +} +EXPORT_SYMBOL_GPL(intel_pmt_is_early_client_hw); + +/* + * sysfs + */ +static ssize_t +intel_pmt_read(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, char *buf, loff_t off, + size_t count) +{ + struct intel_pmt_entry *entry = container_of(attr, + struct intel_pmt_entry, + pmt_bin_attr); + + if (off < 0) + return -EINVAL; + + if (off >= entry->size) + return 0; + + if (count > entry->size - off) + count = entry->size - off; + + memcpy_fromio(buf, entry->base + off, count); + + return count; +} + +static int +intel_pmt_mmap(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, struct vm_area_struct *vma) +{ + struct intel_pmt_entry *entry = container_of(attr, + struct intel_pmt_entry, + pmt_bin_attr); + unsigned long vsize = vma->vm_end - vma->vm_start; + struct device *dev = kobj_to_dev(kobj); + unsigned long phys = entry->base_addr; + unsigned long pfn = PFN_DOWN(phys); + unsigned long psize; + + if (vma->vm_flags & (VM_WRITE | VM_MAYWRITE)) + return -EROFS; + + psize = (PFN_UP(entry->base_addr + entry->size) - pfn) * PAGE_SIZE; + if (vsize > psize) { + dev_err(dev, "Requested mmap size is too large\n"); + return -EINVAL; + } + + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + if (io_remap_pfn_range(vma, vma->vm_start, pfn, + vsize, vma->vm_page_prot)) + return -EAGAIN; + + return 0; +} + +static ssize_t +guid_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct intel_pmt_entry *entry = dev_get_drvdata(dev); + + return sprintf(buf, "0x%x\n", entry->guid); +} +static DEVICE_ATTR_RO(guid); + +static ssize_t size_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct intel_pmt_entry *entry = dev_get_drvdata(dev); + + return sprintf(buf, "%zu\n", entry->size); +} +static DEVICE_ATTR_RO(size); + +static ssize_t +offset_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct intel_pmt_entry *entry = dev_get_drvdata(dev); + + return sprintf(buf, "%lu\n", offset_in_page(entry->base_addr)); +} +static DEVICE_ATTR_RO(offset); + +static struct attribute *intel_pmt_attrs[] = { + &dev_attr_guid.attr, + &dev_attr_size.attr, + &dev_attr_offset.attr, + NULL +}; +ATTRIBUTE_GROUPS(intel_pmt); + +static struct class intel_pmt_class = { + .name = "intel_pmt", + .owner = THIS_MODULE, + .dev_groups = intel_pmt_groups, +}; + +static int intel_pmt_populate_entry(struct intel_pmt_entry *entry, + struct intel_pmt_header *header, + struct device *dev, + struct resource *disc_res) +{ + struct pci_dev *pci_dev = to_pci_dev(dev->parent); + u8 bir; + + /* + * The base offset should always be 8 byte aligned. + * + * For non-local access types the lower 3 bits of base offset + * contains the index of the base address register where the + * telemetry can be found. + */ + bir = GET_BIR(header->base_offset); + + /* Local access and BARID only for now */ + switch (header->access_type) { + case ACCESS_LOCAL: + if (bir) { + dev_err(dev, + "Unsupported BAR index %d for access type %d\n", + bir, header->access_type); + return -EINVAL; + } + /* + * For access_type LOCAL, the base address is as follows: + * base address = end of discovery region + base offset + */ + entry->base_addr = disc_res->end + 1 + header->base_offset; + + /* + * Some hardware use a different calculation for the base address + * when access_type == ACCESS_LOCAL. On the these systems + * ACCCESS_LOCAL refers to an address in the same BAR as the + * header but at a fixed offset. But as the header address was + * supplied to the driver, we don't know which BAR it was in. + * So search for the bar whose range includes the header address. + */ + if (intel_pmt_is_early_client_hw(dev)) { + int i; + + entry->base_addr = 0; + for (i = 0; i < 6; i++) + if (disc_res->start >= pci_resource_start(pci_dev, i) && + (disc_res->start <= pci_resource_end(pci_dev, i))) { + entry->base_addr = pci_resource_start(pci_dev, i) + + header->base_offset; + break; + } + if (!entry->base_addr) + return -EINVAL; + } + + break; + case ACCESS_BARID: + /* + * If another BAR was specified then the base offset + * represents the offset within that BAR. SO retrieve the + * address from the parent PCI device and add offset. + */ + entry->base_addr = pci_resource_start(pci_dev, bir) + + GET_ADDRESS(header->base_offset); + break; + default: + dev_err(dev, "Unsupported access type %d\n", + header->access_type); + return -EINVAL; + } + + entry->guid = header->guid; + entry->size = header->size; + + return 0; +} + +static int intel_pmt_dev_register(struct intel_pmt_entry *entry, + struct intel_pmt_namespace *ns, + struct device *parent) +{ + struct resource res = {0}; + struct device *dev; + int ret; + + ret = xa_alloc(ns->xa, &entry->devid, entry, PMT_XA_LIMIT, GFP_KERNEL); + if (ret) + return ret; + + dev = device_create(&intel_pmt_class, parent, MKDEV(0, 0), entry, + "%s%d", ns->name, entry->devid); + + if (IS_ERR(dev)) { + dev_err(parent, "Could not create %s%d device node\n", + ns->name, entry->devid); + ret = PTR_ERR(dev); + goto fail_dev_create; + } + + entry->kobj = &dev->kobj; + + if (ns->attr_grp) { + ret = sysfs_create_group(entry->kobj, ns->attr_grp); + if (ret) + goto fail_sysfs; + } + + /* if size is 0 assume no data buffer, so no file needed */ + if (!entry->size) + return 0; + + res.start = entry->base_addr; + res.end = res.start + entry->size - 1; + res.flags = IORESOURCE_MEM; + + entry->base = devm_ioremap_resource(dev, &res); + if (IS_ERR(entry->base)) { + ret = PTR_ERR(entry->base); + goto fail_ioremap; + } + + sysfs_bin_attr_init(&entry->pmt_bin_attr); + entry->pmt_bin_attr.attr.name = ns->name; + entry->pmt_bin_attr.attr.mode = 0440; + entry->pmt_bin_attr.mmap = intel_pmt_mmap; + entry->pmt_bin_attr.read = intel_pmt_read; + entry->pmt_bin_attr.size = entry->size; + + ret = sysfs_create_bin_file(&dev->kobj, &entry->pmt_bin_attr); + if (!ret) + return 0; + +fail_ioremap: + if (ns->attr_grp) + sysfs_remove_group(entry->kobj, ns->attr_grp); +fail_sysfs: + device_unregister(dev); +fail_dev_create: + xa_erase(ns->xa, entry->devid); + + return ret; +} + +int intel_pmt_dev_create(struct intel_pmt_entry *entry, + struct intel_pmt_namespace *ns, + struct platform_device *pdev, int idx) +{ + struct intel_pmt_header header; + struct resource *disc_res; + int ret = -ENODEV; + + disc_res = platform_get_resource(pdev, IORESOURCE_MEM, idx); + if (!disc_res) + return ret; + + entry->disc_table = devm_platform_ioremap_resource(pdev, idx); + if (IS_ERR(entry->disc_table)) + return PTR_ERR(entry->disc_table); + + ret = ns->pmt_header_decode(entry, &header, &pdev->dev); + if (ret) + return ret; + + ret = intel_pmt_populate_entry(entry, &header, &pdev->dev, disc_res); + if (ret) + return ret; + + return intel_pmt_dev_register(entry, ns, &pdev->dev); + +} +EXPORT_SYMBOL_GPL(intel_pmt_dev_create); + +void intel_pmt_dev_destroy(struct intel_pmt_entry *entry, + struct intel_pmt_namespace *ns) +{ + struct device *dev = kobj_to_dev(entry->kobj); + + if (entry->size) + sysfs_remove_bin_file(entry->kobj, &entry->pmt_bin_attr); + + if (ns->attr_grp) + sysfs_remove_group(entry->kobj, ns->attr_grp); + + device_unregister(dev); + xa_erase(ns->xa, entry->devid); +} +EXPORT_SYMBOL_GPL(intel_pmt_dev_destroy); + +static int __init pmt_class_init(void) +{ + return class_register(&intel_pmt_class); +} + +static void __exit pmt_class_exit(void) +{ + class_unregister(&intel_pmt_class); +} + +module_init(pmt_class_init); +module_exit(pmt_class_exit); + +MODULE_AUTHOR("Alexander Duyck "); +MODULE_DESCRIPTION("Intel PMT Class driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/platform/x86/intel_pmt_class.h b/drivers/platform/x86/intel_pmt_class.h new file mode 100644 index 0000000000000000000000000000000000000000..1337019c2873eb37b3abda073700228437bb0b36 --- /dev/null +++ b/drivers/platform/x86/intel_pmt_class.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _INTEL_PMT_CLASS_H +#define _INTEL_PMT_CLASS_H + +#include +#include +#include +#include +#include +#include + +/* PMT access types */ +#define ACCESS_BARID 2 +#define ACCESS_LOCAL 3 + +/* PMT discovery base address/offset register layout */ +#define GET_BIR(v) ((v) & GENMASK(2, 0)) +#define GET_ADDRESS(v) ((v) & GENMASK(31, 3)) + +struct intel_pmt_entry { + struct bin_attribute pmt_bin_attr; + struct kobject *kobj; + void __iomem *disc_table; + void __iomem *base; + unsigned long base_addr; + size_t size; + u32 guid; + int devid; +}; + +struct intel_pmt_header { + u32 base_offset; + u32 size; + u32 guid; + u8 access_type; +}; + +struct intel_pmt_namespace { + const char *name; + struct xarray *xa; + const struct attribute_group *attr_grp; + int (*pmt_header_decode)(struct intel_pmt_entry *entry, + struct intel_pmt_header *header, + struct device *dev); +}; + +bool intel_pmt_is_early_client_hw(struct device *dev); +int intel_pmt_dev_create(struct intel_pmt_entry *entry, + struct intel_pmt_namespace *ns, + struct platform_device *pdev, int idx); +void intel_pmt_dev_destroy(struct intel_pmt_entry *entry, + struct intel_pmt_namespace *ns); +#endif diff --git a/drivers/platform/x86/intel_pmt_crashlog.c b/drivers/platform/x86/intel_pmt_crashlog.c new file mode 100644 index 0000000000000000000000000000000000000000..56963ceb6345f88408be9d686ac7f1c3fa057251 --- /dev/null +++ b/drivers/platform/x86/intel_pmt_crashlog.c @@ -0,0 +1,327 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Intel Platform Monitoring Technology Crashlog driver + * + * Copyright (c) 2020, Intel Corporation. + * All Rights Reserved. + * + * Author: "Alexander Duyck" + */ + +#include +#include +#include +#include +#include +#include + +#include "intel_pmt_class.h" + +#define DRV_NAME "pmt_crashlog" + +/* Crashlog discovery header types */ +#define CRASH_TYPE_OOBMSM 1 + +/* Control Flags */ +#define CRASHLOG_FLAG_DISABLE BIT(28) + +/* + * Bits 29 and 30 control the state of bit 31. + * + * Bit 29 will clear bit 31, if set, allowing a new crashlog to be captured. + * Bit 30 will immediately trigger a crashlog to be generated, setting bit 31. + * Bit 31 is the read-only status with a 1 indicating log is complete. + */ +#define CRASHLOG_FLAG_TRIGGER_CLEAR BIT(29) +#define CRASHLOG_FLAG_TRIGGER_EXECUTE BIT(30) +#define CRASHLOG_FLAG_TRIGGER_COMPLETE BIT(31) +#define CRASHLOG_FLAG_TRIGGER_MASK GENMASK(31, 28) + +/* Crashlog Discovery Header */ +#define CONTROL_OFFSET 0x0 +#define GUID_OFFSET 0x4 +#define BASE_OFFSET 0x8 +#define SIZE_OFFSET 0xC +#define GET_ACCESS(v) ((v) & GENMASK(3, 0)) +#define GET_TYPE(v) (((v) & GENMASK(7, 4)) >> 4) +#define GET_VERSION(v) (((v) & GENMASK(19, 16)) >> 16) +/* size is in bytes */ +#define GET_SIZE(v) ((v) * sizeof(u32)) + +struct crashlog_entry { + /* entry must be first member of struct */ + struct intel_pmt_entry entry; + struct mutex control_mutex; +}; + +struct pmt_crashlog_priv { + int num_entries; + struct crashlog_entry entry[]; +}; + +/* + * I/O + */ +static bool pmt_crashlog_complete(struct intel_pmt_entry *entry) +{ + u32 control = readl(entry->disc_table + CONTROL_OFFSET); + + /* return current value of the crashlog complete flag */ + return !!(control & CRASHLOG_FLAG_TRIGGER_COMPLETE); +} + +static bool pmt_crashlog_disabled(struct intel_pmt_entry *entry) +{ + u32 control = readl(entry->disc_table + CONTROL_OFFSET); + + /* return current value of the crashlog disabled flag */ + return !!(control & CRASHLOG_FLAG_DISABLE); +} + +static bool pmt_crashlog_supported(struct intel_pmt_entry *entry) +{ + u32 discovery_header = readl(entry->disc_table + CONTROL_OFFSET); + u32 crash_type, version; + + crash_type = GET_TYPE(discovery_header); + version = GET_VERSION(discovery_header); + + /* + * Currently we only recognize OOBMSM version 0 devices. + * We can ignore all other crashlog devices in the system. + */ + return crash_type == CRASH_TYPE_OOBMSM && version == 0; +} + +static void pmt_crashlog_set_disable(struct intel_pmt_entry *entry, + bool disable) +{ + u32 control = readl(entry->disc_table + CONTROL_OFFSET); + + /* clear trigger bits so we are only modifying disable flag */ + control &= ~CRASHLOG_FLAG_TRIGGER_MASK; + + if (disable) + control |= CRASHLOG_FLAG_DISABLE; + else + control &= ~CRASHLOG_FLAG_DISABLE; + + writel(control, entry->disc_table + CONTROL_OFFSET); +} + +static void pmt_crashlog_set_clear(struct intel_pmt_entry *entry) +{ + u32 control = readl(entry->disc_table + CONTROL_OFFSET); + + control &= ~CRASHLOG_FLAG_TRIGGER_MASK; + control |= CRASHLOG_FLAG_TRIGGER_CLEAR; + + writel(control, entry->disc_table + CONTROL_OFFSET); +} + +static void pmt_crashlog_set_execute(struct intel_pmt_entry *entry) +{ + u32 control = readl(entry->disc_table + CONTROL_OFFSET); + + control &= ~CRASHLOG_FLAG_TRIGGER_MASK; + control |= CRASHLOG_FLAG_TRIGGER_EXECUTE; + + writel(control, entry->disc_table + CONTROL_OFFSET); +} + +/* + * sysfs + */ +static ssize_t +enable_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct intel_pmt_entry *entry = dev_get_drvdata(dev); + int enabled = !pmt_crashlog_disabled(entry); + + return sprintf(buf, "%d\n", enabled); +} + +static ssize_t +enable_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct crashlog_entry *entry; + bool enabled; + int result; + + entry = dev_get_drvdata(dev); + + result = kstrtobool(buf, &enabled); + if (result) + return result; + + mutex_lock(&entry->control_mutex); + pmt_crashlog_set_disable(&entry->entry, !enabled); + mutex_unlock(&entry->control_mutex); + + return count; +} +static DEVICE_ATTR_RW(enable); + +static ssize_t +trigger_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct intel_pmt_entry *entry; + int trigger; + + entry = dev_get_drvdata(dev); + trigger = pmt_crashlog_complete(entry); + + return sprintf(buf, "%d\n", trigger); +} + +static ssize_t +trigger_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct crashlog_entry *entry; + bool trigger; + int result; + + entry = dev_get_drvdata(dev); + + result = kstrtobool(buf, &trigger); + if (result) + return result; + + mutex_lock(&entry->control_mutex); + + if (!trigger) { + pmt_crashlog_set_clear(&entry->entry); + } else if (pmt_crashlog_complete(&entry->entry)) { + /* we cannot trigger a new crash if one is still pending */ + result = -EEXIST; + goto err; + } else if (pmt_crashlog_disabled(&entry->entry)) { + /* if device is currently disabled, return busy */ + result = -EBUSY; + goto err; + } else { + pmt_crashlog_set_execute(&entry->entry); + } + + result = count; +err: + mutex_unlock(&entry->control_mutex); + return result; +} +static DEVICE_ATTR_RW(trigger); + +static struct attribute *pmt_crashlog_attrs[] = { + &dev_attr_enable.attr, + &dev_attr_trigger.attr, + NULL +}; + +static const struct attribute_group pmt_crashlog_group = { + .attrs = pmt_crashlog_attrs, +}; + +static int pmt_crashlog_header_decode(struct intel_pmt_entry *entry, + struct intel_pmt_header *header, + struct device *dev) +{ + void __iomem *disc_table = entry->disc_table; + struct crashlog_entry *crashlog; + + if (!pmt_crashlog_supported(entry)) + return 1; + + /* initialize control mutex */ + crashlog = container_of(entry, struct crashlog_entry, entry); + mutex_init(&crashlog->control_mutex); + + header->access_type = GET_ACCESS(readl(disc_table)); + header->guid = readl(disc_table + GUID_OFFSET); + header->base_offset = readl(disc_table + BASE_OFFSET); + + /* Size is measured in DWORDS, but accessor returns bytes */ + header->size = GET_SIZE(readl(disc_table + SIZE_OFFSET)); + + return 0; +} + +static DEFINE_XARRAY_ALLOC(crashlog_array); +static struct intel_pmt_namespace pmt_crashlog_ns = { + .name = "crashlog", + .xa = &crashlog_array, + .attr_grp = &pmt_crashlog_group, + .pmt_header_decode = pmt_crashlog_header_decode, +}; + +/* + * initialization + */ +static int pmt_crashlog_remove(struct platform_device *pdev) +{ + struct pmt_crashlog_priv *priv = platform_get_drvdata(pdev); + int i; + + for (i = 0; i < priv->num_entries; i++) + intel_pmt_dev_destroy(&priv->entry[i].entry, &pmt_crashlog_ns); + + return 0; +} + +static int pmt_crashlog_probe(struct platform_device *pdev) +{ + struct pmt_crashlog_priv *priv; + size_t size; + int i, ret; + + size = struct_size(priv, entry, pdev->num_resources); + priv = devm_kzalloc(&pdev->dev, size, GFP_KERNEL); + if (!priv) + return -ENOMEM; + + platform_set_drvdata(pdev, priv); + + for (i = 0; i < pdev->num_resources; i++) { + struct intel_pmt_entry *entry = &priv->entry[i].entry; + + ret = intel_pmt_dev_create(entry, &pmt_crashlog_ns, pdev, i); + if (ret < 0) + goto abort_probe; + if (ret) + continue; + + priv->num_entries++; + } + + return 0; +abort_probe: + pmt_crashlog_remove(pdev); + return ret; +} + +static struct platform_driver pmt_crashlog_driver = { + .driver = { + .name = DRV_NAME, + }, + .remove = pmt_crashlog_remove, + .probe = pmt_crashlog_probe, +}; + +static int __init pmt_crashlog_init(void) +{ + return platform_driver_register(&pmt_crashlog_driver); +} + +static void __exit pmt_crashlog_exit(void) +{ + platform_driver_unregister(&pmt_crashlog_driver); + xa_destroy(&crashlog_array); +} + +module_init(pmt_crashlog_init); +module_exit(pmt_crashlog_exit); + +MODULE_AUTHOR("Alexander Duyck "); +MODULE_DESCRIPTION("Intel PMT Crashlog driver"); +MODULE_ALIAS("platform:" DRV_NAME); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/platform/x86/intel_pmt_telemetry.c b/drivers/platform/x86/intel_pmt_telemetry.c new file mode 100644 index 0000000000000000000000000000000000000000..9f845e70a1f84f9d54fbf07896a786ecc74b7307 --- /dev/null +++ b/drivers/platform/x86/intel_pmt_telemetry.c @@ -0,0 +1,148 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Intel Platform Monitory Technology Telemetry driver + * + * Copyright (c) 2020, Intel Corporation. + * All Rights Reserved. + * + * Author: "David E. Box" + */ + +#include +#include +#include +#include +#include +#include + +#include "intel_pmt_class.h" + +#define TELEM_DEV_NAME "pmt_telemetry" + +#define TELEM_SIZE_OFFSET 0x0 +#define TELEM_GUID_OFFSET 0x4 +#define TELEM_BASE_OFFSET 0x8 +#define TELEM_ACCESS(v) ((v) & GENMASK(3, 0)) +/* size is in bytes */ +#define TELEM_SIZE(v) (((v) & GENMASK(27, 12)) >> 10) + +/* Used by client hardware to identify a fixed telemetry entry*/ +#define TELEM_CLIENT_FIXED_BLOCK_GUID 0x10000000 + +struct pmt_telem_priv { + int num_entries; + struct intel_pmt_entry entry[]; +}; + +static bool pmt_telem_region_overlaps(struct intel_pmt_entry *entry, + struct device *dev) +{ + u32 guid = readl(entry->disc_table + TELEM_GUID_OFFSET); + + if (guid != TELEM_CLIENT_FIXED_BLOCK_GUID) + return false; + + return intel_pmt_is_early_client_hw(dev); +} + +static int pmt_telem_header_decode(struct intel_pmt_entry *entry, + struct intel_pmt_header *header, + struct device *dev) +{ + void __iomem *disc_table = entry->disc_table; + + if (pmt_telem_region_overlaps(entry, dev)) + return 1; + + header->access_type = TELEM_ACCESS(readl(disc_table)); + header->guid = readl(disc_table + TELEM_GUID_OFFSET); + header->base_offset = readl(disc_table + TELEM_BASE_OFFSET); + + /* Size is measured in DWORDS, but accessor returns bytes */ + header->size = TELEM_SIZE(readl(disc_table)); + + /* + * Some devices may expose non-functioning entries that are + * reserved for future use. They have zero size. Do not fail + * probe for these. Just ignore them. + */ + if (header->size == 0) + return 1; + + return 0; +} + +static DEFINE_XARRAY_ALLOC(telem_array); +static struct intel_pmt_namespace pmt_telem_ns = { + .name = "telem", + .xa = &telem_array, + .pmt_header_decode = pmt_telem_header_decode, +}; + +static int pmt_telem_remove(struct platform_device *pdev) +{ + struct pmt_telem_priv *priv = platform_get_drvdata(pdev); + int i; + + for (i = 0; i < priv->num_entries; i++) + intel_pmt_dev_destroy(&priv->entry[i], &pmt_telem_ns); + + return 0; +} + +static int pmt_telem_probe(struct platform_device *pdev) +{ + struct pmt_telem_priv *priv; + size_t size; + int i, ret; + + size = struct_size(priv, entry, pdev->num_resources); + priv = devm_kzalloc(&pdev->dev, size, GFP_KERNEL); + if (!priv) + return -ENOMEM; + + platform_set_drvdata(pdev, priv); + + for (i = 0; i < pdev->num_resources; i++) { + struct intel_pmt_entry *entry = &priv->entry[i]; + + ret = intel_pmt_dev_create(entry, &pmt_telem_ns, pdev, i); + if (ret < 0) + goto abort_probe; + if (ret) + continue; + + priv->num_entries++; + } + + return 0; +abort_probe: + pmt_telem_remove(pdev); + return ret; +} + +static struct platform_driver pmt_telem_driver = { + .driver = { + .name = TELEM_DEV_NAME, + }, + .remove = pmt_telem_remove, + .probe = pmt_telem_probe, +}; + +static int __init pmt_telem_init(void) +{ + return platform_driver_register(&pmt_telem_driver); +} +module_init(pmt_telem_init); + +static void __exit pmt_telem_exit(void) +{ + platform_driver_unregister(&pmt_telem_driver); + xa_destroy(&telem_array); +} +module_exit(pmt_telem_exit); + +MODULE_AUTHOR("David E. Box "); +MODULE_DESCRIPTION("Intel PMT Telemetry driver"); +MODULE_ALIAS("platform:" TELEM_DEV_NAME); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/platform/x86/intel_speed_select_if/isst_if_common.c b/drivers/platform/x86/intel_speed_select_if/isst_if_common.c index 0c2aa22c7a12eaf36d9670dc450868fbc64b4543..407afafc7e83f0592e7e96a992974c462e43af42 100644 --- a/drivers/platform/x86/intel_speed_select_if/isst_if_common.c +++ b/drivers/platform/x86/intel_speed_select_if/isst_if_common.c @@ -532,7 +532,10 @@ static long isst_if_def_ioctl(struct file *file, unsigned int cmd, return ret; } -static DEFINE_MUTEX(punit_misc_dev_lock); +/* Lock to prevent module registration when already opened by user space */ +static DEFINE_MUTEX(punit_misc_dev_open_lock); +/* Lock to allow one share misc device for all ISST interace */ +static DEFINE_MUTEX(punit_misc_dev_reg_lock); static int misc_usage_count; static int misc_device_ret; static int misc_device_open; @@ -542,7 +545,7 @@ static int isst_if_open(struct inode *inode, struct file *file) int i, ret = 0; /* Fail open, if a module is going away */ - mutex_lock(&punit_misc_dev_lock); + mutex_lock(&punit_misc_dev_open_lock); for (i = 0; i < ISST_IF_DEV_MAX; ++i) { struct isst_if_cmd_cb *cb = &punit_callbacks[i]; @@ -564,7 +567,7 @@ static int isst_if_open(struct inode *inode, struct file *file) } else { misc_device_open++; } - mutex_unlock(&punit_misc_dev_lock); + mutex_unlock(&punit_misc_dev_open_lock); return ret; } @@ -573,7 +576,7 @@ static int isst_if_relase(struct inode *inode, struct file *f) { int i; - mutex_lock(&punit_misc_dev_lock); + mutex_lock(&punit_misc_dev_open_lock); misc_device_open--; for (i = 0; i < ISST_IF_DEV_MAX; ++i) { struct isst_if_cmd_cb *cb = &punit_callbacks[i]; @@ -581,7 +584,7 @@ static int isst_if_relase(struct inode *inode, struct file *f) if (cb->registered) module_put(cb->owner); } - mutex_unlock(&punit_misc_dev_lock); + mutex_unlock(&punit_misc_dev_open_lock); return 0; } @@ -598,6 +601,43 @@ static struct miscdevice isst_if_char_driver = { .fops = &isst_if_char_driver_ops, }; +static int isst_misc_reg(void) +{ + mutex_lock(&punit_misc_dev_reg_lock); + if (misc_device_ret) + goto unlock_exit; + + if (!misc_usage_count) { + misc_device_ret = isst_if_cpu_info_init(); + if (misc_device_ret) + goto unlock_exit; + + misc_device_ret = misc_register(&isst_if_char_driver); + if (misc_device_ret) { + isst_if_cpu_info_exit(); + goto unlock_exit; + } + } + misc_usage_count++; + +unlock_exit: + mutex_unlock(&punit_misc_dev_reg_lock); + + return misc_device_ret; +} + +static void isst_misc_unreg(void) +{ + mutex_lock(&punit_misc_dev_reg_lock); + if (misc_usage_count) + misc_usage_count--; + if (!misc_usage_count && !misc_device_ret) { + misc_deregister(&isst_if_char_driver); + isst_if_cpu_info_exit(); + } + mutex_unlock(&punit_misc_dev_reg_lock); +} + /** * isst_if_cdev_register() - Register callback for IOCTL * @device_type: The device type this callback handling. @@ -615,38 +655,31 @@ static struct miscdevice isst_if_char_driver = { */ int isst_if_cdev_register(int device_type, struct isst_if_cmd_cb *cb) { - if (misc_device_ret) - return misc_device_ret; + int ret; if (device_type >= ISST_IF_DEV_MAX) return -EINVAL; - mutex_lock(&punit_misc_dev_lock); + mutex_lock(&punit_misc_dev_open_lock); + /* Device is already open, we don't want to add new callbacks */ if (misc_device_open) { - mutex_unlock(&punit_misc_dev_lock); + mutex_unlock(&punit_misc_dev_open_lock); return -EAGAIN; } - if (!misc_usage_count) { - int ret; - - misc_device_ret = misc_register(&isst_if_char_driver); - if (misc_device_ret) - goto unlock_exit; - - ret = isst_if_cpu_info_init(); - if (ret) { - misc_deregister(&isst_if_char_driver); - misc_device_ret = ret; - goto unlock_exit; - } - } memcpy(&punit_callbacks[device_type], cb, sizeof(*cb)); punit_callbacks[device_type].registered = 1; - misc_usage_count++; -unlock_exit: - mutex_unlock(&punit_misc_dev_lock); + mutex_unlock(&punit_misc_dev_open_lock); - return misc_device_ret; + ret = isst_misc_reg(); + if (ret) { + /* + * No need of mutex as the misc device register failed + * as no one can open device yet. Hence no contention. + */ + punit_callbacks[device_type].registered = 0; + return ret; + } + return 0; } EXPORT_SYMBOL_GPL(isst_if_cdev_register); @@ -661,16 +694,12 @@ EXPORT_SYMBOL_GPL(isst_if_cdev_register); */ void isst_if_cdev_unregister(int device_type) { - mutex_lock(&punit_misc_dev_lock); - misc_usage_count--; + isst_misc_unreg(); + mutex_lock(&punit_misc_dev_open_lock); punit_callbacks[device_type].registered = 0; if (device_type == ISST_IF_DEV_MBOX) isst_delete_hash(); - if (!misc_usage_count && !misc_device_ret) { - misc_deregister(&isst_if_char_driver); - isst_if_cpu_info_exit(); - } - mutex_unlock(&punit_misc_dev_lock); + mutex_unlock(&punit_misc_dev_open_lock); } EXPORT_SYMBOL_GPL(isst_if_cdev_unregister); diff --git a/drivers/platform/x86/surface3_power.c b/drivers/platform/x86/surface3_power.c index cc4f9cba68563c137c1c71785e8d6c26479ef2c6..01aacf1bee0749d63836c3553337871754c633a4 100644 --- a/drivers/platform/x86/surface3_power.c +++ b/drivers/platform/x86/surface3_power.c @@ -233,14 +233,21 @@ static int mshw0011_bix(struct mshw0011_data *cdata, struct bix *bix) } bix->last_full_charg_capacity = ret; - /* get serial number */ + /* + * Get serial number, on some devices (with unofficial replacement + * battery?) reading any of the serial number range addresses gets + * nacked in this case just leave the serial number empty. + */ ret = i2c_smbus_read_i2c_block_data(client, MSHW0011_BAT0_REG_SERIAL_NO, sizeof(buf), buf); - if (ret != sizeof(buf)) { + if (ret == -EREMOTEIO) { + /* no serial number available */ + } else if (ret != sizeof(buf)) { dev_err(&client->dev, "Error reading serial no: %d\n", ret); return ret; + } else { + snprintf(bix->serial, ARRAY_SIZE(bix->serial), "%3pE%6pE", buf + 7, buf); } - snprintf(bix->serial, ARRAY_SIZE(bix->serial), "%3pE%6pE", buf + 7, buf); /* get cycle count */ ret = i2c_smbus_read_word_data(client, MSHW0011_BAT0_REG_CYCLE_CNT); diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c index 59b7e90cd587569f4feef93cf43223caed96bf87..ab6a9369649dba450c19fd14324f45b68838b3b3 100644 --- a/drivers/platform/x86/touchscreen_dmi.c +++ b/drivers/platform/x86/touchscreen_dmi.c @@ -756,6 +756,21 @@ static const struct ts_dmi_data predia_basic_data = { .properties = predia_basic_props, }; +static const struct property_entry rwc_nanote_p8_props[] = { + PROPERTY_ENTRY_U32("touchscreen-min-y", 46), + PROPERTY_ENTRY_U32("touchscreen-size-x", 1728), + PROPERTY_ENTRY_U32("touchscreen-size-y", 1140), + PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"), + PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-rwc-nanote-p8.fw"), + PROPERTY_ENTRY_U32("silead,max-fingers", 10), + { } +}; + +static const struct ts_dmi_data rwc_nanote_p8_data = { + .acpi_name = "MSSL1680:00", + .properties = rwc_nanote_p8_props, +}; + static const struct property_entry schneider_sct101ctm_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-x", 1715), PROPERTY_ENTRY_U32("touchscreen-size-y", 1140), @@ -1326,6 +1341,15 @@ const struct dmi_system_id touchscreen_dmi_table[] = { DMI_EXACT_MATCH(DMI_BOARD_NAME, "0E57"), }, }, + { + /* RWC NANOTE P8 */ + .driver_data = (void *)&rwc_nanote_p8_data, + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Default string"), + DMI_MATCH(DMI_PRODUCT_NAME, "AY07J"), + DMI_MATCH(DMI_PRODUCT_SKU, "0001") + }, + }, { /* Schneider SCT101CTM */ .driver_data = (void *)&schneider_sct101ctm_data, diff --git a/drivers/power/reset/gemini-poweroff.c b/drivers/power/reset/gemini-poweroff.c index 90e35c07240aee158c3478a4f6a248c15b138552..b7f7a8225f22e133726833e78b39636bec967607 100644 --- a/drivers/power/reset/gemini-poweroff.c +++ b/drivers/power/reset/gemini-poweroff.c @@ -107,8 +107,8 @@ static int gemini_poweroff_probe(struct platform_device *pdev) return PTR_ERR(gpw->base); irq = platform_get_irq(pdev, 0); - if (!irq) - return -EINVAL; + if (irq < 0) + return irq; gpw->dev = dev; diff --git a/drivers/power/reset/mt6323-poweroff.c b/drivers/power/reset/mt6323-poweroff.c index 0532803e6cbc40225f8151d1716279d899d166a5..d90e76fcb938384a6f3e85acc920bb5394799b42 100644 --- a/drivers/power/reset/mt6323-poweroff.c +++ b/drivers/power/reset/mt6323-poweroff.c @@ -57,6 +57,9 @@ static int mt6323_pwrc_probe(struct platform_device *pdev) return -ENOMEM; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -EINVAL; + pwrc->base = res->start; pwrc->regmap = mt6397_chip->regmap; pwrc->dev = &pdev->dev; diff --git a/drivers/power/supply/ab8500_fg.c b/drivers/power/supply/ab8500_fg.c index f1da757c939f8acdfa192997bfc3fb6325b15eee..a6b4a94c276627009ce4dbf4d25b92b3f200f73c 100644 --- a/drivers/power/supply/ab8500_fg.c +++ b/drivers/power/supply/ab8500_fg.c @@ -2541,8 +2541,10 @@ static int ab8500_fg_sysfs_init(struct ab8500_fg *di) ret = kobject_init_and_add(&di->fg_kobject, &ab8500_fg_ktype, NULL, "battery"); - if (ret < 0) + if (ret < 0) { + kobject_put(&di->fg_kobject); dev_err(di->dev, "failed to create sysfs entry\n"); + } return ret; } diff --git a/drivers/power/supply/axp20x_battery.c b/drivers/power/supply/axp20x_battery.c index e84b6e4da14a8a9e522c18dffad341a403301c88..9fda98b950bab406dbbd4a758e6031edc5288e57 100644 --- a/drivers/power/supply/axp20x_battery.c +++ b/drivers/power/supply/axp20x_battery.c @@ -185,7 +185,6 @@ static int axp20x_battery_get_prop(struct power_supply *psy, union power_supply_propval *val) { struct axp20x_batt_ps *axp20x_batt = power_supply_get_drvdata(psy); - struct iio_channel *chan; int ret = 0, reg, val1; switch (psp) { @@ -265,12 +264,12 @@ static int axp20x_battery_get_prop(struct power_supply *psy, if (ret) return ret; - if (reg & AXP20X_PWR_STATUS_BAT_CHARGING) - chan = axp20x_batt->batt_chrg_i; - else - chan = axp20x_batt->batt_dischrg_i; - - ret = iio_read_channel_processed(chan, &val->intval); + if (reg & AXP20X_PWR_STATUS_BAT_CHARGING) { + ret = iio_read_channel_processed(axp20x_batt->batt_chrg_i, &val->intval); + } else { + ret = iio_read_channel_processed(axp20x_batt->batt_dischrg_i, &val1); + val->intval = -val1; + } if (ret) return ret; diff --git a/drivers/power/supply/axp288_charger.c b/drivers/power/supply/axp288_charger.c index a4df1ea923864d46b1fa4892e34a80e9c1592782..f65bf7b295c59412afad9ae419df90d8a4a22ee5 100644 --- a/drivers/power/supply/axp288_charger.c +++ b/drivers/power/supply/axp288_charger.c @@ -41,11 +41,11 @@ #define VBUS_ISPOUT_CUR_LIM_1500MA 0x1 /* 1500mA */ #define VBUS_ISPOUT_CUR_LIM_2000MA 0x2 /* 2000mA */ #define VBUS_ISPOUT_CUR_NO_LIM 0x3 /* 2500mA */ -#define VBUS_ISPOUT_VHOLD_SET_MASK 0x31 +#define VBUS_ISPOUT_VHOLD_SET_MASK 0x38 #define VBUS_ISPOUT_VHOLD_SET_BIT_POS 0x3 #define VBUS_ISPOUT_VHOLD_SET_OFFSET 4000 /* 4000mV */ #define VBUS_ISPOUT_VHOLD_SET_LSB_RES 100 /* 100mV */ -#define VBUS_ISPOUT_VHOLD_SET_4300MV 0x3 /* 4300mV */ +#define VBUS_ISPOUT_VHOLD_SET_4400MV 0x4 /* 4400mV */ #define VBUS_ISPOUT_VBUS_PATH_DIS BIT(7) #define CHRG_CCCV_CC_MASK 0xf /* 4 bits */ @@ -744,6 +744,16 @@ static int charger_init_hw_regs(struct axp288_chrg_info *info) ret = axp288_charger_vbus_path_select(info, true); if (ret < 0) return ret; + } else { + /* Set Vhold to the factory default / recommended 4.4V */ + val = VBUS_ISPOUT_VHOLD_SET_4400MV << VBUS_ISPOUT_VHOLD_SET_BIT_POS; + ret = regmap_update_bits(info->regmap, AXP20X_VBUS_IPSOUT_MGMT, + VBUS_ISPOUT_VHOLD_SET_MASK, val); + if (ret < 0) { + dev_err(&info->pdev->dev, "register(%x) write error(%d)\n", + AXP20X_VBUS_IPSOUT_MGMT, ret); + return ret; + } } /* Read current charge voltage and current limit */ diff --git a/drivers/power/supply/bq24190_charger.c b/drivers/power/supply/bq24190_charger.c index 845af0f44c022c853a2d5dc9f0eb5351211132d3..8c3c378dce0d545076017f54a1c5ce1658fb13a4 100644 --- a/drivers/power/supply/bq24190_charger.c +++ b/drivers/power/supply/bq24190_charger.c @@ -41,6 +41,7 @@ #define BQ24190_REG_POC_CHG_CONFIG_DISABLE 0x0 #define BQ24190_REG_POC_CHG_CONFIG_CHARGE 0x1 #define BQ24190_REG_POC_CHG_CONFIG_OTG 0x2 +#define BQ24190_REG_POC_CHG_CONFIG_OTG_ALT 0x3 #define BQ24190_REG_POC_SYS_MIN_MASK (BIT(3) | BIT(2) | BIT(1)) #define BQ24190_REG_POC_SYS_MIN_SHIFT 1 #define BQ24190_REG_POC_SYS_MIN_MIN 3000 @@ -552,7 +553,11 @@ static int bq24190_vbus_is_enabled(struct regulator_dev *dev) pm_runtime_mark_last_busy(bdi->dev); pm_runtime_put_autosuspend(bdi->dev); - return ret ? ret : val == BQ24190_REG_POC_CHG_CONFIG_OTG; + if (ret) + return ret; + + return (val == BQ24190_REG_POC_CHG_CONFIG_OTG || + val == BQ24190_REG_POC_CHG_CONFIG_OTG_ALT); } static const struct regulator_ops bq24190_vbus_ops = { diff --git a/drivers/power/supply/wm8350_power.c b/drivers/power/supply/wm8350_power.c index e05cee457471bf1ff39db193930f5090309e1901..908cfd45d262403013fc0020d6a5a77218bfe47e 100644 --- a/drivers/power/supply/wm8350_power.c +++ b/drivers/power/supply/wm8350_power.c @@ -408,44 +408,112 @@ static const struct power_supply_desc wm8350_usb_desc = { * Initialisation *********************************************************************/ -static void wm8350_init_charger(struct wm8350 *wm8350) +static int wm8350_init_charger(struct wm8350 *wm8350) { + int ret; + /* register our interest in charger events */ - wm8350_register_irq(wm8350, WM8350_IRQ_CHG_BAT_HOT, + ret = wm8350_register_irq(wm8350, WM8350_IRQ_CHG_BAT_HOT, wm8350_charger_handler, 0, "Battery hot", wm8350); - wm8350_register_irq(wm8350, WM8350_IRQ_CHG_BAT_COLD, + if (ret) + goto err; + + ret = wm8350_register_irq(wm8350, WM8350_IRQ_CHG_BAT_COLD, wm8350_charger_handler, 0, "Battery cold", wm8350); - wm8350_register_irq(wm8350, WM8350_IRQ_CHG_BAT_FAIL, + if (ret) + goto free_chg_bat_hot; + + ret = wm8350_register_irq(wm8350, WM8350_IRQ_CHG_BAT_FAIL, wm8350_charger_handler, 0, "Battery fail", wm8350); - wm8350_register_irq(wm8350, WM8350_IRQ_CHG_TO, + if (ret) + goto free_chg_bat_cold; + + ret = wm8350_register_irq(wm8350, WM8350_IRQ_CHG_TO, wm8350_charger_handler, 0, "Charger timeout", wm8350); - wm8350_register_irq(wm8350, WM8350_IRQ_CHG_END, + if (ret) + goto free_chg_bat_fail; + + ret = wm8350_register_irq(wm8350, WM8350_IRQ_CHG_END, wm8350_charger_handler, 0, "Charge end", wm8350); - wm8350_register_irq(wm8350, WM8350_IRQ_CHG_START, + if (ret) + goto free_chg_to; + + ret = wm8350_register_irq(wm8350, WM8350_IRQ_CHG_START, wm8350_charger_handler, 0, "Charge start", wm8350); - wm8350_register_irq(wm8350, WM8350_IRQ_CHG_FAST_RDY, + if (ret) + goto free_chg_end; + + ret = wm8350_register_irq(wm8350, WM8350_IRQ_CHG_FAST_RDY, wm8350_charger_handler, 0, "Fast charge ready", wm8350); - wm8350_register_irq(wm8350, WM8350_IRQ_CHG_VBATT_LT_3P9, + if (ret) + goto free_chg_start; + + ret = wm8350_register_irq(wm8350, WM8350_IRQ_CHG_VBATT_LT_3P9, wm8350_charger_handler, 0, "Battery <3.9V", wm8350); - wm8350_register_irq(wm8350, WM8350_IRQ_CHG_VBATT_LT_3P1, + if (ret) + goto free_chg_fast_rdy; + + ret = wm8350_register_irq(wm8350, WM8350_IRQ_CHG_VBATT_LT_3P1, wm8350_charger_handler, 0, "Battery <3.1V", wm8350); - wm8350_register_irq(wm8350, WM8350_IRQ_CHG_VBATT_LT_2P85, + if (ret) + goto free_chg_vbatt_lt_3p9; + + ret = wm8350_register_irq(wm8350, WM8350_IRQ_CHG_VBATT_LT_2P85, wm8350_charger_handler, 0, "Battery <2.85V", wm8350); + if (ret) + goto free_chg_vbatt_lt_3p1; /* and supply change events */ - wm8350_register_irq(wm8350, WM8350_IRQ_EXT_USB_FB, + ret = wm8350_register_irq(wm8350, WM8350_IRQ_EXT_USB_FB, wm8350_charger_handler, 0, "USB", wm8350); - wm8350_register_irq(wm8350, WM8350_IRQ_EXT_WALL_FB, + if (ret) + goto free_chg_vbatt_lt_2p85; + + ret = wm8350_register_irq(wm8350, WM8350_IRQ_EXT_WALL_FB, wm8350_charger_handler, 0, "Wall", wm8350); - wm8350_register_irq(wm8350, WM8350_IRQ_EXT_BAT_FB, + if (ret) + goto free_ext_usb_fb; + + ret = wm8350_register_irq(wm8350, WM8350_IRQ_EXT_BAT_FB, wm8350_charger_handler, 0, "Battery", wm8350); + if (ret) + goto free_ext_wall_fb; + + return 0; + +free_ext_wall_fb: + wm8350_free_irq(wm8350, WM8350_IRQ_EXT_WALL_FB, wm8350); +free_ext_usb_fb: + wm8350_free_irq(wm8350, WM8350_IRQ_EXT_USB_FB, wm8350); +free_chg_vbatt_lt_2p85: + wm8350_free_irq(wm8350, WM8350_IRQ_CHG_VBATT_LT_2P85, wm8350); +free_chg_vbatt_lt_3p1: + wm8350_free_irq(wm8350, WM8350_IRQ_CHG_VBATT_LT_3P1, wm8350); +free_chg_vbatt_lt_3p9: + wm8350_free_irq(wm8350, WM8350_IRQ_CHG_VBATT_LT_3P9, wm8350); +free_chg_fast_rdy: + wm8350_free_irq(wm8350, WM8350_IRQ_CHG_FAST_RDY, wm8350); +free_chg_start: + wm8350_free_irq(wm8350, WM8350_IRQ_CHG_START, wm8350); +free_chg_end: + wm8350_free_irq(wm8350, WM8350_IRQ_CHG_END, wm8350); +free_chg_to: + wm8350_free_irq(wm8350, WM8350_IRQ_CHG_TO, wm8350); +free_chg_bat_fail: + wm8350_free_irq(wm8350, WM8350_IRQ_CHG_BAT_FAIL, wm8350); +free_chg_bat_cold: + wm8350_free_irq(wm8350, WM8350_IRQ_CHG_BAT_COLD, wm8350); +free_chg_bat_hot: + wm8350_free_irq(wm8350, WM8350_IRQ_CHG_BAT_HOT, wm8350); +err: + return ret; } static void free_charger_irq(struct wm8350 *wm8350) @@ -456,6 +524,7 @@ static void free_charger_irq(struct wm8350 *wm8350) wm8350_free_irq(wm8350, WM8350_IRQ_CHG_TO, wm8350); wm8350_free_irq(wm8350, WM8350_IRQ_CHG_END, wm8350); wm8350_free_irq(wm8350, WM8350_IRQ_CHG_START, wm8350); + wm8350_free_irq(wm8350, WM8350_IRQ_CHG_FAST_RDY, wm8350); wm8350_free_irq(wm8350, WM8350_IRQ_CHG_VBATT_LT_3P9, wm8350); wm8350_free_irq(wm8350, WM8350_IRQ_CHG_VBATT_LT_3P1, wm8350); wm8350_free_irq(wm8350, WM8350_IRQ_CHG_VBATT_LT_2P85, wm8350); diff --git a/drivers/powercap/intel_rapl_common.c b/drivers/powercap/intel_rapl_common.c index c9e57237d778f3683fe0540889b137ddff8eb7dc..a994c45ca42e5ad83cb24d5825a99e5a93ef24fb 100644 --- a/drivers/powercap/intel_rapl_common.c +++ b/drivers/powercap/intel_rapl_common.c @@ -61,6 +61,20 @@ #define PERF_STATUS_THROTTLE_TIME_MASK 0xffffffff #define PP_POLICY_MASK 0x1F +/* + * SPR has different layout for Psys Domain PowerLimit registers. + * There are 17 bits of PL1 and PL2 instead of 15 bits. + * The Enable bits and TimeWindow bits are also shifted as a result. + */ +#define PSYS_POWER_LIMIT1_MASK 0x1FFFF +#define PSYS_POWER_LIMIT1_ENABLE BIT(17) + +#define PSYS_POWER_LIMIT2_MASK (0x1FFFFULL<<32) +#define PSYS_POWER_LIMIT2_ENABLE BIT_ULL(49) + +#define PSYS_TIME_WINDOW1_MASK (0x7FULL<<19) +#define PSYS_TIME_WINDOW2_MASK (0x7FULL<<51) + /* Non HW constants */ #define RAPL_PRIMITIVE_DERIVED BIT(1) /* not from raw data */ #define RAPL_PRIMITIVE_DUMMY BIT(2) @@ -97,6 +111,7 @@ struct rapl_defaults { bool to_raw); unsigned int dram_domain_energy_unit; unsigned int psys_domain_energy_unit; + bool spr_psys_bits; }; static struct rapl_defaults *rapl_defaults; @@ -669,12 +684,51 @@ static struct rapl_primitive_info rpi[] = { RAPL_DOMAIN_REG_PERF, TIME_UNIT, 0), PRIMITIVE_INFO_INIT(PRIORITY_LEVEL, PP_POLICY_MASK, 0, RAPL_DOMAIN_REG_POLICY, ARBITRARY_UNIT, 0), + PRIMITIVE_INFO_INIT(PSYS_POWER_LIMIT1, PSYS_POWER_LIMIT1_MASK, 0, + RAPL_DOMAIN_REG_LIMIT, POWER_UNIT, 0), + PRIMITIVE_INFO_INIT(PSYS_POWER_LIMIT2, PSYS_POWER_LIMIT2_MASK, 32, + RAPL_DOMAIN_REG_LIMIT, POWER_UNIT, 0), + PRIMITIVE_INFO_INIT(PSYS_PL1_ENABLE, PSYS_POWER_LIMIT1_ENABLE, 17, + RAPL_DOMAIN_REG_LIMIT, ARBITRARY_UNIT, 0), + PRIMITIVE_INFO_INIT(PSYS_PL2_ENABLE, PSYS_POWER_LIMIT2_ENABLE, 49, + RAPL_DOMAIN_REG_LIMIT, ARBITRARY_UNIT, 0), + PRIMITIVE_INFO_INIT(PSYS_TIME_WINDOW1, PSYS_TIME_WINDOW1_MASK, 19, + RAPL_DOMAIN_REG_LIMIT, TIME_UNIT, 0), + PRIMITIVE_INFO_INIT(PSYS_TIME_WINDOW2, PSYS_TIME_WINDOW2_MASK, 51, + RAPL_DOMAIN_REG_LIMIT, TIME_UNIT, 0), /* non-hardware */ PRIMITIVE_INFO_INIT(AVERAGE_POWER, 0, 0, 0, POWER_UNIT, RAPL_PRIMITIVE_DERIVED), {NULL, 0, 0, 0}, }; +static enum rapl_primitives +prim_fixups(struct rapl_domain *rd, enum rapl_primitives prim) +{ + if (!rapl_defaults->spr_psys_bits) + return prim; + + if (rd->id != RAPL_DOMAIN_PLATFORM) + return prim; + + switch (prim) { + case POWER_LIMIT1: + return PSYS_POWER_LIMIT1; + case POWER_LIMIT2: + return PSYS_POWER_LIMIT2; + case PL1_ENABLE: + return PSYS_PL1_ENABLE; + case PL2_ENABLE: + return PSYS_PL2_ENABLE; + case TIME_WINDOW1: + return PSYS_TIME_WINDOW1; + case TIME_WINDOW2: + return PSYS_TIME_WINDOW2; + default: + return prim; + } +} + /* Read primitive data based on its related struct rapl_primitive_info. * if xlate flag is set, return translated data based on data units, i.e. * time, energy, and power. @@ -692,7 +746,8 @@ static int rapl_read_data_raw(struct rapl_domain *rd, enum rapl_primitives prim, bool xlate, u64 *data) { u64 value; - struct rapl_primitive_info *rp = &rpi[prim]; + enum rapl_primitives prim_fixed = prim_fixups(rd, prim); + struct rapl_primitive_info *rp = &rpi[prim_fixed]; struct reg_action ra; int cpu; @@ -738,7 +793,8 @@ static int rapl_write_data_raw(struct rapl_domain *rd, enum rapl_primitives prim, unsigned long long value) { - struct rapl_primitive_info *rp = &rpi[prim]; + enum rapl_primitives prim_fixed = prim_fixups(rd, prim); + struct rapl_primitive_info *rp = &rpi[prim_fixed]; int cpu; u64 bits; struct reg_action ra; @@ -981,6 +1037,7 @@ static const struct rapl_defaults rapl_defaults_spr_server = { .compute_time_window = rapl_compute_time_window_core, .dram_domain_energy_unit = 15300, .psys_domain_energy_unit = 1000000000, + .spr_psys_bits = true, }; static const struct rapl_defaults rapl_defaults_byt = { diff --git a/drivers/ptp/ptp_sysfs.c b/drivers/ptp/ptp_sysfs.c index be076a91e20e6090672703f9469453093556daa9..8cd59e84816316d31441b2ff10d8b175f80137f6 100644 --- a/drivers/ptp/ptp_sysfs.c +++ b/drivers/ptp/ptp_sysfs.c @@ -13,7 +13,7 @@ static ssize_t clock_name_show(struct device *dev, struct device_attribute *attr, char *page) { struct ptp_clock *ptp = dev_get_drvdata(dev); - return snprintf(page, PAGE_SIZE-1, "%s\n", ptp->info->name); + return sysfs_emit(page, "%s\n", ptp->info->name); } static DEVICE_ATTR_RO(clock_name); @@ -227,7 +227,7 @@ static ssize_t ptp_pin_show(struct device *dev, struct device_attribute *attr, mutex_unlock(&ptp->pincfg_mux); - return snprintf(page, PAGE_SIZE, "%u %u\n", func, chan); + return sysfs_emit(page, "%u %u\n", func, chan); } static ssize_t ptp_pin_store(struct device *dev, struct device_attribute *attr, diff --git a/drivers/pwm/pwm-lpc18xx-sct.c b/drivers/pwm/pwm-lpc18xx-sct.c index 5ff11145c1a309716227196c909894a22ca90476..9b15b6a79082a776ee1d77f211b612e1162c79dd 100644 --- a/drivers/pwm/pwm-lpc18xx-sct.c +++ b/drivers/pwm/pwm-lpc18xx-sct.c @@ -400,12 +400,6 @@ static int lpc18xx_pwm_probe(struct platform_device *pdev) lpc18xx_pwm_writel(lpc18xx_pwm, LPC18XX_PWM_LIMIT, BIT(lpc18xx_pwm->period_event)); - ret = pwmchip_add(&lpc18xx_pwm->chip); - if (ret < 0) { - dev_err(&pdev->dev, "pwmchip_add failed: %d\n", ret); - goto disable_pwmclk; - } - for (i = 0; i < lpc18xx_pwm->chip.npwm; i++) { struct lpc18xx_pwm_data *data; @@ -415,14 +409,12 @@ static int lpc18xx_pwm_probe(struct platform_device *pdev) GFP_KERNEL); if (!data) { ret = -ENOMEM; - goto remove_pwmchip; + goto disable_pwmclk; } pwm_set_chip_data(pwm, data); } - platform_set_drvdata(pdev, lpc18xx_pwm); - val = lpc18xx_pwm_readl(lpc18xx_pwm, LPC18XX_PWM_CTRL); val &= ~LPC18XX_PWM_BIDIR; val &= ~LPC18XX_PWM_CTRL_HALT; @@ -430,10 +422,16 @@ static int lpc18xx_pwm_probe(struct platform_device *pdev) val |= LPC18XX_PWM_PRE(0); lpc18xx_pwm_writel(lpc18xx_pwm, LPC18XX_PWM_CTRL, val); + ret = pwmchip_add(&lpc18xx_pwm->chip); + if (ret < 0) { + dev_err(&pdev->dev, "pwmchip_add failed: %d\n", ret); + goto disable_pwmclk; + } + + platform_set_drvdata(pdev, lpc18xx_pwm); + return 0; -remove_pwmchip: - pwmchip_remove(&lpc18xx_pwm->chip); disable_pwmclk: clk_disable_unprepare(lpc18xx_pwm->pwm_clk); return ret; diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index a6d27334a71d20cd49de05389dcced0341b4eca3..c65299f8c01d0a079d348f400d8228b76493a134 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -5868,9 +5868,8 @@ core_initcall(regulator_init); static int regulator_late_cleanup(struct device *dev, void *data) { struct regulator_dev *rdev = dev_to_rdev(dev); - const struct regulator_ops *ops = rdev->desc->ops; struct regulation_constraints *c = rdev->constraints; - int enabled, ret; + int ret; if (c && c->always_on) return 0; @@ -5883,14 +5882,8 @@ static int regulator_late_cleanup(struct device *dev, void *data) if (rdev->use_count) goto unlock; - /* If we can't read the status assume it's always on. */ - if (ops->is_enabled) - enabled = ops->is_enabled(rdev); - else - enabled = 1; - - /* But if reading the status failed, assume that it's off. */ - if (enabled <= 0) + /* If reading the status failed, assume that it's off. */ + if (_regulator_is_enabled(rdev) <= 0) goto unlock; if (have_full_constraints()) { diff --git a/drivers/regulator/qcom_smd-regulator.c b/drivers/regulator/qcom_smd-regulator.c index bb944ee5fe3b174bcdc1d3f12e444e1611ebf270..8d784a2a09d867c04d74486a33ec516d7a767b65 100644 --- a/drivers/regulator/qcom_smd-regulator.c +++ b/drivers/regulator/qcom_smd-regulator.c @@ -9,6 +9,7 @@ #include #include #include +#include #include struct qcom_rpm_reg { @@ -1107,52 +1108,93 @@ static const struct of_device_id rpm_of_match[] = { }; MODULE_DEVICE_TABLE(of, rpm_of_match); -static int rpm_reg_probe(struct platform_device *pdev) +/** + * rpm_regulator_init_vreg() - initialize all attributes of a qcom_smd-regulator + * @vreg: Pointer to the individual qcom_smd-regulator resource + * @dev: Pointer to the top level qcom_smd-regulator PMIC device + * @node: Pointer to the individual qcom_smd-regulator resource + * device node + * @rpm: Pointer to the rpm bus node + * @pmic_rpm_data: Pointer to a null-terminated array of qcom_smd-regulator + * resources defined for the top level PMIC device + * + * Return: 0 on success, errno on failure + */ +static int rpm_regulator_init_vreg(struct qcom_rpm_reg *vreg, struct device *dev, + struct device_node *node, struct qcom_smd_rpm *rpm, + const struct rpm_regulator_data *pmic_rpm_data) { - const struct rpm_regulator_data *reg; - const struct of_device_id *match; - struct regulator_config config = { }; + struct regulator_config config = {}; + const struct rpm_regulator_data *rpm_data; struct regulator_dev *rdev; + int ret; + + for (rpm_data = pmic_rpm_data; rpm_data->name; rpm_data++) + if (of_node_name_eq(node, rpm_data->name)) + break; + + if (!rpm_data->name) { + dev_err(dev, "Unknown regulator %pOFn\n", node); + return -EINVAL; + } + + vreg->dev = dev; + vreg->rpm = rpm; + vreg->type = rpm_data->type; + vreg->id = rpm_data->id; + + memcpy(&vreg->desc, rpm_data->desc, sizeof(vreg->desc)); + vreg->desc.name = rpm_data->name; + vreg->desc.supply_name = rpm_data->supply; + vreg->desc.owner = THIS_MODULE; + vreg->desc.type = REGULATOR_VOLTAGE; + vreg->desc.of_match = rpm_data->name; + + config.dev = dev; + config.of_node = node; + config.driver_data = vreg; + + rdev = devm_regulator_register(dev, &vreg->desc, &config); + if (IS_ERR(rdev)) { + ret = PTR_ERR(rdev); + dev_err(dev, "%pOFn: devm_regulator_register() failed, ret=%d\n", node, ret); + return ret; + } + + return 0; +} + +static int rpm_reg_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + const struct rpm_regulator_data *vreg_data; + struct device_node *node; struct qcom_rpm_reg *vreg; struct qcom_smd_rpm *rpm; + int ret; rpm = dev_get_drvdata(pdev->dev.parent); if (!rpm) { - dev_err(&pdev->dev, "unable to retrieve handle to rpm\n"); + dev_err(&pdev->dev, "Unable to retrieve handle to rpm\n"); return -ENODEV; } - match = of_match_device(rpm_of_match, &pdev->dev); - if (!match) { - dev_err(&pdev->dev, "failed to match device\n"); + vreg_data = of_device_get_match_data(dev); + if (!vreg_data) return -ENODEV; - } - for (reg = match->data; reg->name; reg++) { + for_each_available_child_of_node(dev->of_node, node) { vreg = devm_kzalloc(&pdev->dev, sizeof(*vreg), GFP_KERNEL); - if (!vreg) + if (!vreg) { + of_node_put(node); return -ENOMEM; + } + + ret = rpm_regulator_init_vreg(vreg, dev, node, rpm, vreg_data); - vreg->dev = &pdev->dev; - vreg->type = reg->type; - vreg->id = reg->id; - vreg->rpm = rpm; - - memcpy(&vreg->desc, reg->desc, sizeof(vreg->desc)); - - vreg->desc.id = -1; - vreg->desc.owner = THIS_MODULE; - vreg->desc.type = REGULATOR_VOLTAGE; - vreg->desc.name = reg->name; - vreg->desc.supply_name = reg->supply; - vreg->desc.of_match = reg->name; - - config.dev = &pdev->dev; - config.driver_data = vreg; - rdev = devm_regulator_register(&pdev->dev, &vreg->desc, &config); - if (IS_ERR(rdev)) { - dev_err(&pdev->dev, "failed to register %s\n", reg->name); - return PTR_ERR(rdev); + if (ret < 0) { + of_node_put(node); + return ret; } } diff --git a/drivers/regulator/rpi-panel-attiny-regulator.c b/drivers/regulator/rpi-panel-attiny-regulator.c index ee46bfbf5eee7e25380208372c5ac0a47ae8293f..991b4730d7687a89a6f8696d4e36597365d6e8d9 100644 --- a/drivers/regulator/rpi-panel-attiny-regulator.c +++ b/drivers/regulator/rpi-panel-attiny-regulator.c @@ -37,11 +37,24 @@ static const struct regmap_config attiny_regmap_config = { static int attiny_lcd_power_enable(struct regulator_dev *rdev) { unsigned int data; + int ret, i; regmap_write(rdev->regmap, REG_POWERON, 1); + msleep(80); + /* Wait for nPWRDWN to go low to indicate poweron is done. */ - regmap_read_poll_timeout(rdev->regmap, REG_PORTB, data, - data & BIT(0), 10, 1000000); + for (i = 0; i < 20; i++) { + ret = regmap_read(rdev->regmap, REG_PORTB, &data); + if (!ret) { + if (data & BIT(0)) + break; + } + usleep_range(10000, 12000); + } + usleep_range(10000, 12000); + + if (ret) + pr_err("%s: regmap_read_poll_timeout failed %d\n", __func__, ret); /* Default to the same orientation as the closed source * firmware used for the panel. Runtime rotation @@ -57,23 +70,34 @@ static int attiny_lcd_power_disable(struct regulator_dev *rdev) { regmap_write(rdev->regmap, REG_PWM, 0); regmap_write(rdev->regmap, REG_POWERON, 0); - udelay(1); + msleep(30); return 0; } static int attiny_lcd_power_is_enabled(struct regulator_dev *rdev) { unsigned int data; - int ret; + int ret, i; - ret = regmap_read(rdev->regmap, REG_POWERON, &data); + for (i = 0; i < 10; i++) { + ret = regmap_read(rdev->regmap, REG_POWERON, &data); + if (!ret) + break; + usleep_range(10000, 12000); + } if (ret < 0) return ret; if (!(data & BIT(0))) return 0; - ret = regmap_read(rdev->regmap, REG_PORTB, &data); + for (i = 0; i < 10; i++) { + ret = regmap_read(rdev->regmap, REG_PORTB, &data); + if (!ret) + break; + usleep_range(10000, 12000); + } + if (ret < 0) return ret; @@ -103,20 +127,32 @@ static int attiny_update_status(struct backlight_device *bl) { struct regmap *regmap = bl_get_data(bl); int brightness = bl->props.brightness; + int ret, i; if (bl->props.power != FB_BLANK_UNBLANK || bl->props.fb_blank != FB_BLANK_UNBLANK) brightness = 0; - return regmap_write(regmap, REG_PWM, brightness); + for (i = 0; i < 10; i++) { + ret = regmap_write(regmap, REG_PWM, brightness); + if (!ret) + break; + } + + return ret; } static int attiny_get_brightness(struct backlight_device *bl) { struct regmap *regmap = bl_get_data(bl); - int ret, brightness; + int ret, brightness, i; + + for (i = 0; i < 10; i++) { + ret = regmap_read(regmap, REG_PWM, &brightness); + if (!ret) + break; + } - ret = regmap_read(regmap, REG_PWM, &brightness); if (ret) return ret; @@ -166,7 +202,7 @@ static int attiny_i2c_probe(struct i2c_client *i2c, } regmap_write(regmap, REG_POWERON, 0); - mdelay(1); + msleep(30); config.dev = &i2c->dev; config.regmap = regmap; diff --git a/drivers/regulator/wm8994-regulator.c b/drivers/regulator/wm8994-regulator.c index cadea0344486fa6e63eabb79cee8159ae0f067e5..40befdd9dfa922bf5878f2d46bc9bf6cec413223 100644 --- a/drivers/regulator/wm8994-regulator.c +++ b/drivers/regulator/wm8994-regulator.c @@ -71,6 +71,35 @@ static const struct regulator_ops wm8994_ldo2_ops = { }; static const struct regulator_desc wm8994_ldo_desc[] = { + { + .name = "LDO1", + .id = 1, + .type = REGULATOR_VOLTAGE, + .n_voltages = WM8994_LDO1_MAX_SELECTOR + 1, + .vsel_reg = WM8994_LDO_1, + .vsel_mask = WM8994_LDO1_VSEL_MASK, + .ops = &wm8994_ldo1_ops, + .min_uV = 2400000, + .uV_step = 100000, + .enable_time = 3000, + .off_on_delay = 36000, + .owner = THIS_MODULE, + }, + { + .name = "LDO2", + .id = 2, + .type = REGULATOR_VOLTAGE, + .n_voltages = WM8994_LDO2_MAX_SELECTOR + 1, + .vsel_reg = WM8994_LDO_2, + .vsel_mask = WM8994_LDO2_VSEL_MASK, + .ops = &wm8994_ldo2_ops, + .enable_time = 3000, + .off_on_delay = 36000, + .owner = THIS_MODULE, + }, +}; + +static const struct regulator_desc wm8958_ldo_desc[] = { { .name = "LDO1", .id = 1, @@ -172,9 +201,16 @@ static int wm8994_ldo_probe(struct platform_device *pdev) * regulator core and we need not worry about it on the * error path. */ - ldo->regulator = devm_regulator_register(&pdev->dev, - &wm8994_ldo_desc[id], - &config); + if (ldo->wm8994->type == WM8994) { + ldo->regulator = devm_regulator_register(&pdev->dev, + &wm8994_ldo_desc[id], + &config); + } else { + ldo->regulator = devm_regulator_register(&pdev->dev, + &wm8958_ldo_desc[id], + &config); + } + if (IS_ERR(ldo->regulator)) { ret = PTR_ERR(ldo->regulator); dev_err(wm8994->dev, "Failed to register LDO%d: %d\n", diff --git a/drivers/remoteproc/qcom_pil_info.c b/drivers/remoteproc/qcom_pil_info.c index 7c007dd7b2000d65d118c8b493ddbca593659e5a..aca21560e20b8ae07fc9c1048b311fad67ff4d7f 100644 --- a/drivers/remoteproc/qcom_pil_info.c +++ b/drivers/remoteproc/qcom_pil_info.c @@ -104,7 +104,7 @@ int qcom_pil_info_store(const char *image, phys_addr_t base, size_t size) return -ENOMEM; found_unused: - memcpy_toio(entry, image, PIL_RELOC_NAME_LEN); + memcpy_toio(entry, image, strnlen(image, PIL_RELOC_NAME_LEN)); found_existing: /* Use two writel() as base is only aligned to 4 bytes on odd entries */ writel(base, entry + PIL_RELOC_NAME_LEN); diff --git a/drivers/remoteproc/qcom_q6v5_adsp.c b/drivers/remoteproc/qcom_q6v5_adsp.c index 9eb599701f9b046b6369c844954f8429894e5511..c39138d39cf07c6ec01b7b802527a68efe221106 100644 --- a/drivers/remoteproc/qcom_q6v5_adsp.c +++ b/drivers/remoteproc/qcom_q6v5_adsp.c @@ -406,6 +406,7 @@ static int adsp_alloc_memory_region(struct qcom_adsp *adsp) } ret = of_address_to_resource(node, 0, &r); + of_node_put(node); if (ret) return ret; diff --git a/drivers/remoteproc/qcom_q6v5_mss.c b/drivers/remoteproc/qcom_q6v5_mss.c index ebc3e755bcbcd488229024a2614e6dbfaf6af675..1b3aa84e36e7ad051daeab86cd776d6236268a96 100644 --- a/drivers/remoteproc/qcom_q6v5_mss.c +++ b/drivers/remoteproc/qcom_q6v5_mss.c @@ -1594,18 +1594,20 @@ static int q6v5_alloc_memory_region(struct q6v5 *qproc) * reserved memory regions from device's memory-region property. */ child = of_get_child_by_name(qproc->dev->of_node, "mba"); - if (!child) + if (!child) { node = of_parse_phandle(qproc->dev->of_node, "memory-region", 0); - else + } else { node = of_parse_phandle(child, "memory-region", 0); + of_node_put(child); + } ret = of_address_to_resource(node, 0, &r); + of_node_put(node); if (ret) { dev_err(qproc->dev, "unable to resolve mba region\n"); return ret; } - of_node_put(node); qproc->mba_phys = r.start; qproc->mba_size = resource_size(&r); @@ -1622,14 +1624,15 @@ static int q6v5_alloc_memory_region(struct q6v5 *qproc) } else { child = of_get_child_by_name(qproc->dev->of_node, "mpss"); node = of_parse_phandle(child, "memory-region", 0); + of_node_put(child); } ret = of_address_to_resource(node, 0, &r); + of_node_put(node); if (ret) { dev_err(qproc->dev, "unable to resolve mpss region\n"); return ret; } - of_node_put(node); qproc->mpss_phys = qproc->mpss_reloc = r.start; qproc->mpss_size = resource_size(&r); diff --git a/drivers/remoteproc/qcom_wcnss.c b/drivers/remoteproc/qcom_wcnss.c index e2573f79a137d71a00c92916e7c305cc3268cc4d..67286a4505cd1ce401aa57f6e1fbb4cbfb11331b 100644 --- a/drivers/remoteproc/qcom_wcnss.c +++ b/drivers/remoteproc/qcom_wcnss.c @@ -448,6 +448,7 @@ static int wcnss_alloc_memory_region(struct qcom_wcnss *wcnss) } ret = of_address_to_resource(node, 0, &r); + of_node_put(node); if (ret) return ret; diff --git a/drivers/remoteproc/remoteproc_debugfs.c b/drivers/remoteproc/remoteproc_debugfs.c index 7e5845376e9faebbe726393bfd7eb14d0567c50f..e8bb0ee6b35ac788637784c7e74a01004fd1fadf 100644 --- a/drivers/remoteproc/remoteproc_debugfs.c +++ b/drivers/remoteproc/remoteproc_debugfs.c @@ -76,7 +76,7 @@ static ssize_t rproc_coredump_write(struct file *filp, int ret, err = 0; char buf[20]; - if (count > sizeof(buf)) + if (count < 1 || count > sizeof(buf)) return -EINVAL; ret = copy_from_user(buf, user_buf, count); diff --git a/drivers/rpmsg/rpmsg_char.c b/drivers/rpmsg/rpmsg_char.c index 4bbbacdbf3bb7702f185f007b638daf83378d489..be90d77c5168d2784a595e9836fdf1031a4f5326 100644 --- a/drivers/rpmsg/rpmsg_char.c +++ b/drivers/rpmsg/rpmsg_char.c @@ -92,7 +92,7 @@ static int rpmsg_eptdev_destroy(struct device *dev, void *data) /* wake up any blocked readers */ wake_up_interruptible(&eptdev->readq); - device_del(&eptdev->dev); + cdev_device_del(&eptdev->cdev, &eptdev->dev); put_device(&eptdev->dev); return 0; @@ -332,7 +332,6 @@ static void rpmsg_eptdev_release_device(struct device *dev) ida_simple_remove(&rpmsg_ept_ida, dev->id); ida_simple_remove(&rpmsg_minor_ida, MINOR(eptdev->dev.devt)); - cdev_del(&eptdev->cdev); kfree(eptdev); } @@ -377,19 +376,13 @@ static int rpmsg_eptdev_create(struct rpmsg_ctrldev *ctrldev, dev->id = ret; dev_set_name(dev, "rpmsg%d", ret); - ret = cdev_add(&eptdev->cdev, dev->devt, 1); + ret = cdev_device_add(&eptdev->cdev, &eptdev->dev); if (ret) goto free_ept_ida; /* We can now rely on the release function for cleanup */ dev->release = rpmsg_eptdev_release_device; - ret = device_add(dev); - if (ret) { - dev_err(dev, "device_add failed: %d\n", ret); - put_device(dev); - } - return ret; free_ept_ida: @@ -458,7 +451,6 @@ static void rpmsg_ctrldev_release_device(struct device *dev) ida_simple_remove(&rpmsg_ctrl_ida, dev->id); ida_simple_remove(&rpmsg_minor_ida, MINOR(dev->devt)); - cdev_del(&ctrldev->cdev); kfree(ctrldev); } @@ -493,19 +485,13 @@ static int rpmsg_chrdev_probe(struct rpmsg_device *rpdev) dev->id = ret; dev_set_name(&ctrldev->dev, "rpmsg_ctrl%d", ret); - ret = cdev_add(&ctrldev->cdev, dev->devt, 1); + ret = cdev_device_add(&ctrldev->cdev, &ctrldev->dev); if (ret) goto free_ctrl_ida; /* We can now rely on the release function for cleanup */ dev->release = rpmsg_ctrldev_release_device; - ret = device_add(dev); - if (ret) { - dev_err(&rpdev->dev, "device_add failed: %d\n", ret); - put_device(dev); - } - dev_set_drvdata(&rpdev->dev, ctrldev); return ret; @@ -531,7 +517,7 @@ static void rpmsg_chrdev_remove(struct rpmsg_device *rpdev) if (ret) dev_warn(&rpdev->dev, "failed to nuke endpoints: %d\n", ret); - device_del(&ctrldev->dev); + cdev_device_del(&ctrldev->cdev, &ctrldev->dev); put_device(&ctrldev->dev); } diff --git a/drivers/rpmsg/rpmsg_core.c b/drivers/rpmsg/rpmsg_core.c index 91de940896e3d03170b8f0661ac0d5abd1883711..028ca5961bc2afff4f9a183f7473925b202ecada 100644 --- a/drivers/rpmsg/rpmsg_core.c +++ b/drivers/rpmsg/rpmsg_core.c @@ -473,13 +473,25 @@ static int rpmsg_dev_probe(struct device *dev) err = rpdrv->probe(rpdev); if (err) { dev_err(dev, "%s: failed: %d\n", __func__, err); - if (ept) - rpmsg_destroy_ept(ept); - goto out; + goto destroy_ept; } - if (ept && rpdev->ops->announce_create) + if (ept && rpdev->ops->announce_create) { err = rpdev->ops->announce_create(rpdev); + if (err) { + dev_err(dev, "failed to announce creation\n"); + goto remove_rpdev; + } + } + + return 0; + +remove_rpdev: + if (rpdrv->remove) + rpdrv->remove(rpdev); +destroy_ept: + if (ept) + rpmsg_destroy_ept(ept); out: return err; } diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c index 794a4f036b99834c8fa15bcc834dddcfaa1090ba..146056858135e304fce9d0a173d8d667427b5394 100644 --- a/drivers/rtc/interface.c +++ b/drivers/rtc/interface.c @@ -807,9 +807,13 @@ static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer) struct timerqueue_node *next = timerqueue_getnext(&rtc->timerqueue); struct rtc_time tm; ktime_t now; + int err; + + err = __rtc_read_time(rtc, &tm); + if (err) + return err; timer->enabled = 1; - __rtc_read_time(rtc, &tm); now = rtc_tm_to_ktime(tm); /* Skip over expired timers */ @@ -823,7 +827,6 @@ static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer) trace_rtc_timer_enqueue(timer); if (!next || ktime_before(timer->node.expires, next->expires)) { struct rtc_wkalrm alarm; - int err; alarm.time = rtc_ktime_to_tm(timer->node.expires); alarm.enabled = 1; diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c index c633319cdb91319d8af218ec4eda30503e59ec4e..58c6382a2807caa293ed30da83ab0d4809c15463 100644 --- a/drivers/rtc/rtc-cmos.c +++ b/drivers/rtc/rtc-cmos.c @@ -463,7 +463,10 @@ static int cmos_set_alarm(struct device *dev, struct rtc_wkalrm *t) min = t->time.tm_min; sec = t->time.tm_sec; + spin_lock_irq(&rtc_lock); rtc_control = CMOS_READ(RTC_CONTROL); + spin_unlock_irq(&rtc_lock); + if (!(rtc_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { /* Writing 0xff means "don't care" or "match all". */ mon = (mon <= 12) ? bin2bcd(mon) : 0xff; diff --git a/drivers/rtc/rtc-mc146818-lib.c b/drivers/rtc/rtc-mc146818-lib.c index 033dcc0645a5ed8e10968d6c1aff39047e898966..9abb178adc10072d06cc0e6c0aecbd51253b2e0b 100644 --- a/drivers/rtc/rtc-mc146818-lib.c +++ b/drivers/rtc/rtc-mc146818-lib.c @@ -99,7 +99,7 @@ unsigned int mc146818_get_time(struct rtc_time *time) time->tm_year += real_year - 72; #endif - if (century > 20) + if (century > 19) time->tm_year += (century - 19) * 100; /* diff --git a/drivers/rtc/rtc-pl030.c b/drivers/rtc/rtc-pl030.c index ebe03eba8f5ffafa19823082cfe198e8ea658cdb..87c93843d62add93f09e048baa309b868564dbe5 100644 --- a/drivers/rtc/rtc-pl030.c +++ b/drivers/rtc/rtc-pl030.c @@ -137,7 +137,7 @@ static int pl030_probe(struct amba_device *dev, const struct amba_id *id) return ret; } -static int pl030_remove(struct amba_device *dev) +static void pl030_remove(struct amba_device *dev) { struct pl030_rtc *rtc = amba_get_drvdata(dev); @@ -146,8 +146,6 @@ static int pl030_remove(struct amba_device *dev) free_irq(dev->irq[0], rtc); iounmap(rtc->base); amba_release_regions(dev); - - return 0; } static struct amba_id pl030_ids[] = { diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c index d4b2ab7861266e367ce87a590fb3f4fd8611318d..2f5581ea26fe152b16d374300b12db45d427bfa0 100644 --- a/drivers/rtc/rtc-pl031.c +++ b/drivers/rtc/rtc-pl031.c @@ -280,7 +280,7 @@ static int pl031_set_alarm(struct device *dev, struct rtc_wkalrm *alarm) return 0; } -static int pl031_remove(struct amba_device *adev) +static void pl031_remove(struct amba_device *adev) { struct pl031_local *ldata = dev_get_drvdata(&adev->dev); @@ -289,8 +289,6 @@ static int pl031_remove(struct amba_device *adev) if (adev->irq[0]) free_irq(adev->irq[0], ldata); amba_release_regions(adev); - - return 0; } static int pl031_probe(struct amba_device *adev, const struct amba_id *id) diff --git a/drivers/rtc/rtc-pxa.c b/drivers/rtc/rtc-pxa.c index d2f1d8f754bf3a3797b404ef0dc6cc9fca5d26c9..cf8119b6d32044b343d63e71f669190cc38ecfa1 100644 --- a/drivers/rtc/rtc-pxa.c +++ b/drivers/rtc/rtc-pxa.c @@ -330,6 +330,10 @@ static int __init pxa_rtc_probe(struct platform_device *pdev) if (sa1100_rtc->irq_alarm < 0) return -ENXIO; + sa1100_rtc->rtc = devm_rtc_allocate_device(&pdev->dev); + if (IS_ERR(sa1100_rtc->rtc)) + return PTR_ERR(sa1100_rtc->rtc); + pxa_rtc->base = devm_ioremap(dev, pxa_rtc->ress->start, resource_size(pxa_rtc->ress)); if (!pxa_rtc->base) { diff --git a/drivers/rtc/rtc-sw64-virt.c b/drivers/rtc/rtc-sw64-virt.c index 549d2e2d8a01d55989917e1836b2252d7eaa0340..23c93d7ddbae7281d04f4f86137018cc297b77f4 100644 --- a/drivers/rtc/rtc-sw64-virt.c +++ b/drivers/rtc/rtc-sw64-virt.c @@ -14,18 +14,40 @@ #include #define RTC_IO_ADDR (0x804910000000ULL) +unsigned long vtime_old, vtime_new; static int sw64_virt_read_time(struct device *dev, struct rtc_time *tm) { unsigned long *ioaddr; + unsigned long vtime_now; + long vtime_offset; ioaddr = ioremap(RTC_IO_ADDR, sizeof(long)); - rtc_time64_to_tm(*ioaddr, tm); + if (!vtime_new) { + rtc_time64_to_tm(*ioaddr, tm); + } else { + vtime_now = *ioaddr; + vtime_offset = vtime_new - vtime_old; + vtime_now += vtime_offset; + rtc_time64_to_tm(vtime_now, tm); + } + return 0; +} + +static int sw64_virt_set_time(struct device *dev, struct rtc_time *tm) +{ + unsigned long *ioaddr; + + ioaddr = ioremap(RTC_IO_ADDR, sizeof(long)); + vtime_old = *ioaddr; + + vtime_new = rtc_tm_to_time64(tm); return 0; } static const struct rtc_class_ops rtc_sw64_virt_ops = { .read_time = sw64_virt_read_time, + .set_time = sw64_virt_set_time, }; static int __init rtc_sw64_virt_probe(struct platform_device *pdev) diff --git a/drivers/rtc/rtc-wm8350.c b/drivers/rtc/rtc-wm8350.c index 2018614f258f6f83cf9897897500022a49fd20ae..6eaa9321c074102c58fdf3aa8069a82936cab8be 100644 --- a/drivers/rtc/rtc-wm8350.c +++ b/drivers/rtc/rtc-wm8350.c @@ -432,14 +432,21 @@ static int wm8350_rtc_probe(struct platform_device *pdev) return ret; } - wm8350_register_irq(wm8350, WM8350_IRQ_RTC_SEC, + ret = wm8350_register_irq(wm8350, WM8350_IRQ_RTC_SEC, wm8350_rtc_update_handler, 0, "RTC Seconds", wm8350); + if (ret) + return ret; + wm8350_mask_irq(wm8350, WM8350_IRQ_RTC_SEC); - wm8350_register_irq(wm8350, WM8350_IRQ_RTC_ALM, + ret = wm8350_register_irq(wm8350, WM8350_IRQ_RTC_ALM, wm8350_rtc_alarm_handler, 0, "RTC Alarm", wm8350); + if (ret) { + wm8350_free_irq(wm8350, WM8350_IRQ_RTC_SEC, wm8350); + return ret; + } return 0; } diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c index d24cafe02708fa941346510c6fd8aac21f1110e3..511bf8e0a436c2eb40f2601c6a68edf1724ea887 100644 --- a/drivers/s390/scsi/zfcp_fc.c +++ b/drivers/s390/scsi/zfcp_fc.c @@ -521,6 +521,8 @@ static void zfcp_fc_adisc_handler(void *data) goto out; } + /* re-init to undo drop from zfcp_fc_adisc() */ + port->d_id = ntoh24(adisc_resp->adisc_port_id); /* port is good, unblock rport without going through erp */ zfcp_scsi_schedule_rport_register(port); out: @@ -534,6 +536,7 @@ static int zfcp_fc_adisc(struct zfcp_port *port) struct zfcp_fc_req *fc_req; struct zfcp_adapter *adapter = port->adapter; struct Scsi_Host *shost = adapter->scsi_host; + u32 d_id; int ret; fc_req = kmem_cache_zalloc(zfcp_fc_req_cache, GFP_ATOMIC); @@ -558,7 +561,15 @@ static int zfcp_fc_adisc(struct zfcp_port *port) fc_req->u.adisc.req.adisc_cmd = ELS_ADISC; hton24(fc_req->u.adisc.req.adisc_port_id, fc_host_port_id(shost)); - ret = zfcp_fsf_send_els(adapter, port->d_id, &fc_req->ct_els, + d_id = port->d_id; /* remember as destination for send els below */ + /* + * Force fresh GID_PN lookup on next port recovery. + * Must happen after request setup and before sending request, + * to prevent race with port->d_id re-init in zfcp_fc_adisc_handler(). + */ + port->d_id = 0; + + ret = zfcp_fsf_send_els(adapter, d_id, &fc_req->ct_els, ZFCP_FC_CTELS_TMO); if (ret) kmem_cache_free(zfcp_fc_req_cache, fc_req); diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index 170d59df48d13c595a3085fb5611bb7c2b852ba0..0fbe4edeccd07e4c6703a598e0ee2bca5df88128 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig @@ -1151,7 +1151,6 @@ source "drivers/scsi/qla2xxx/Kconfig" source "drivers/scsi/qla4xxx/Kconfig" source "drivers/scsi/qedi/Kconfig" source "drivers/scsi/qedf/Kconfig" -source "drivers/scsi/spfc/Kconfig" source "drivers/scsi/huawei/Kconfig" config SCSI_LPFC diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile index 299d3318fac889b06429fe5db7437874c5420b9f..78a3c832394c4fcfbea6fd47108de383d1f61cdb 100644 --- a/drivers/scsi/Makefile +++ b/drivers/scsi/Makefile @@ -85,7 +85,6 @@ obj-$(CONFIG_PCMCIA_QLOGIC) += qlogicfas408.o obj-$(CONFIG_SCSI_QLOGIC_1280) += qla1280.o obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx/ obj-$(CONFIG_SCSI_QLA_ISCSI) += libiscsi.o qla4xxx/ -obj-$(CONFIG_SPFC) += spfc/ obj-$(CONFIG_SCSI_LPFC) += lpfc/ obj-$(CONFIG_SCSI_HUAWEI_FC) += huawei/ obj-$(CONFIG_SCSI_BFA_FC) += bfa/ diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c index d8e19afa7a14071aafc7ea44d5fd6913f7436a41..c6607c4686bb746d5c701794afe1fd5d6c79cf0d 100644 --- a/drivers/scsi/aha152x.c +++ b/drivers/scsi/aha152x.c @@ -3367,13 +3367,11 @@ static int __init aha152x_setup(char *str) setup[setup_count].synchronous = ints[0] >= 6 ? ints[6] : 1; setup[setup_count].delay = ints[0] >= 7 ? ints[7] : DELAY_DEFAULT; setup[setup_count].ext_trans = ints[0] >= 8 ? ints[8] : 0; - if (ints[0] > 8) { /*}*/ + if (ints[0] > 8) printk(KERN_NOTICE "aha152x: usage: aha152x=[,[," "[,[,[,[,[,]]]]]]]\n"); - } else { + else setup_count++; - return 0; - } return 1; } diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c index a13c203ef7a9a88260cfb430ab4f537498f17963..c4881657a807b6fd83647bd57aae8efcd9ead377 100644 --- a/drivers/scsi/be2iscsi/be_iscsi.c +++ b/drivers/scsi/be2iscsi/be_iscsi.c @@ -182,6 +182,7 @@ int beiscsi_conn_bind(struct iscsi_cls_session *cls_session, struct beiscsi_endpoint *beiscsi_ep; struct iscsi_endpoint *ep; uint16_t cri_index; + int rc = 0; ep = iscsi_lookup_endpoint(transport_fd); if (!ep) @@ -189,15 +190,17 @@ int beiscsi_conn_bind(struct iscsi_cls_session *cls_session, beiscsi_ep = ep->dd_data; - if (iscsi_conn_bind(cls_session, cls_conn, is_leading)) - return -EINVAL; + if (iscsi_conn_bind(cls_session, cls_conn, is_leading)) { + rc = -EINVAL; + goto put_ep; + } if (beiscsi_ep->phba != phba) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, "BS_%d : beiscsi_ep->hba=%p not equal to phba=%p\n", beiscsi_ep->phba, phba); - - return -EEXIST; + rc = -EEXIST; + goto put_ep; } cri_index = BE_GET_CRI_FROM_CID(beiscsi_ep->ep_cid); if (phba->conn_table[cri_index]) { @@ -209,7 +212,8 @@ int beiscsi_conn_bind(struct iscsi_cls_session *cls_session, beiscsi_ep->ep_cid, beiscsi_conn, phba->conn_table[cri_index]); - return -EINVAL; + rc = -EINVAL; + goto put_ep; } } @@ -226,7 +230,10 @@ int beiscsi_conn_bind(struct iscsi_cls_session *cls_session, "BS_%d : cid %d phba->conn_table[%u]=%p\n", beiscsi_ep->ep_cid, cri_index, beiscsi_conn); phba->conn_table[cri_index] = beiscsi_conn; - return 0; + +put_ep: + iscsi_put_endpoint(ep); + return rc; } static int beiscsi_iface_create_ipv4(struct beiscsi_hba *phba) diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c index 987dc8135a9b41f3e67c3aea65dd9efdd216d072..06f697bfc49f388c321532721cb5ee15e200f242 100644 --- a/drivers/scsi/be2iscsi/be_main.c +++ b/drivers/scsi/be2iscsi/be_main.c @@ -5801,15 +5801,21 @@ static struct pci_error_handlers beiscsi_eeh_handlers = { .resume = beiscsi_eeh_resume, }; +struct iscsi_transport_expand beiscsi_iscsi_expand = { + .unbind_conn = iscsi_conn_unbind, +}; + struct iscsi_transport beiscsi_iscsi_transport = { .owner = THIS_MODULE, .name = DRV_NAME, .caps = CAP_RECOVERY_L0 | CAP_HDRDGST | CAP_TEXT_NEGO | - CAP_MULTI_R2T | CAP_DATADGST | CAP_DATA_PATH_OFFLOAD, + CAP_MULTI_R2T | CAP_DATADGST | CAP_DATA_PATH_OFFLOAD | + CAP_OPS_EXPAND, .create_session = beiscsi_session_create, .destroy_session = beiscsi_session_destroy, .create_conn = beiscsi_conn_create, .bind_conn = beiscsi_conn_bind, + .ops_expand = &beiscsi_iscsi_expand, .destroy_conn = iscsi_conn_teardown, .attr_is_visible = beiscsi_attr_is_visible, .set_iface_param = beiscsi_iface_set_param, diff --git a/drivers/scsi/bfa/bfad_attr.c b/drivers/scsi/bfa/bfad_attr.c index 5ae1e3f7891018e6d62fe902d000f4fd2f93297f..e049cdb3c286ccde6fc058c194b418f3f46e49cd 100644 --- a/drivers/scsi/bfa/bfad_attr.c +++ b/drivers/scsi/bfa/bfad_attr.c @@ -711,7 +711,7 @@ bfad_im_serial_num_show(struct device *dev, struct device_attribute *attr, char serial_num[BFA_ADAPTER_SERIAL_NUM_LEN]; bfa_get_adapter_serial_num(&bfad->bfa, serial_num); - return snprintf(buf, PAGE_SIZE, "%s\n", serial_num); + return sysfs_emit(buf, "%s\n", serial_num); } static ssize_t @@ -725,7 +725,7 @@ bfad_im_model_show(struct device *dev, struct device_attribute *attr, char model[BFA_ADAPTER_MODEL_NAME_LEN]; bfa_get_adapter_model(&bfad->bfa, model); - return snprintf(buf, PAGE_SIZE, "%s\n", model); + return sysfs_emit(buf, "%s\n", model); } static ssize_t @@ -805,7 +805,7 @@ bfad_im_model_desc_show(struct device *dev, struct device_attribute *attr, snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN, "Invalid Model"); - return snprintf(buf, PAGE_SIZE, "%s\n", model_descr); + return sysfs_emit(buf, "%s\n", model_descr); } static ssize_t @@ -819,7 +819,7 @@ bfad_im_node_name_show(struct device *dev, struct device_attribute *attr, u64 nwwn; nwwn = bfa_fcs_lport_get_nwwn(port->fcs_port); - return snprintf(buf, PAGE_SIZE, "0x%llx\n", cpu_to_be64(nwwn)); + return sysfs_emit(buf, "0x%llx\n", cpu_to_be64(nwwn)); } static ssize_t @@ -836,7 +836,7 @@ bfad_im_symbolic_name_show(struct device *dev, struct device_attribute *attr, bfa_fcs_lport_get_attr(&bfad->bfa_fcs.fabric.bport, &port_attr); strlcpy(symname, port_attr.port_cfg.sym_name.symname, BFA_SYMNAME_MAXLEN); - return snprintf(buf, PAGE_SIZE, "%s\n", symname); + return sysfs_emit(buf, "%s\n", symname); } static ssize_t @@ -850,14 +850,14 @@ bfad_im_hw_version_show(struct device *dev, struct device_attribute *attr, char hw_ver[BFA_VERSION_LEN]; bfa_get_pci_chip_rev(&bfad->bfa, hw_ver); - return snprintf(buf, PAGE_SIZE, "%s\n", hw_ver); + return sysfs_emit(buf, "%s\n", hw_ver); } static ssize_t bfad_im_drv_version_show(struct device *dev, struct device_attribute *attr, char *buf) { - return snprintf(buf, PAGE_SIZE, "%s\n", BFAD_DRIVER_VERSION); + return sysfs_emit(buf, "%s\n", BFAD_DRIVER_VERSION); } static ssize_t @@ -871,7 +871,7 @@ bfad_im_optionrom_version_show(struct device *dev, char optrom_ver[BFA_VERSION_LEN]; bfa_get_adapter_optrom_ver(&bfad->bfa, optrom_ver); - return snprintf(buf, PAGE_SIZE, "%s\n", optrom_ver); + return sysfs_emit(buf, "%s\n", optrom_ver); } static ssize_t @@ -885,7 +885,7 @@ bfad_im_fw_version_show(struct device *dev, struct device_attribute *attr, char fw_ver[BFA_VERSION_LEN]; bfa_get_adapter_fw_ver(&bfad->bfa, fw_ver); - return snprintf(buf, PAGE_SIZE, "%s\n", fw_ver); + return sysfs_emit(buf, "%s\n", fw_ver); } static ssize_t @@ -897,7 +897,7 @@ bfad_im_num_of_ports_show(struct device *dev, struct device_attribute *attr, (struct bfad_im_port_s *) shost->hostdata[0]; struct bfad_s *bfad = im_port->bfad; - return snprintf(buf, PAGE_SIZE, "%d\n", + return sysfs_emit(buf, "%d\n", bfa_get_nports(&bfad->bfa)); } @@ -905,7 +905,7 @@ static ssize_t bfad_im_drv_name_show(struct device *dev, struct device_attribute *attr, char *buf) { - return snprintf(buf, PAGE_SIZE, "%s\n", BFAD_DRIVER_NAME); + return sysfs_emit(buf, "%s\n", BFAD_DRIVER_NAME); } static ssize_t @@ -924,14 +924,14 @@ bfad_im_num_of_discovered_ports_show(struct device *dev, rports = kcalloc(nrports, sizeof(struct bfa_rport_qualifier_s), GFP_ATOMIC); if (rports == NULL) - return snprintf(buf, PAGE_SIZE, "Failed\n"); + return sysfs_emit(buf, "Failed\n"); spin_lock_irqsave(&bfad->bfad_lock, flags); bfa_fcs_lport_get_rport_quals(port->fcs_port, rports, &nrports); spin_unlock_irqrestore(&bfad->bfad_lock, flags); kfree(rports); - return snprintf(buf, PAGE_SIZE, "%d\n", nrports); + return sysfs_emit(buf, "%d\n", nrports); } static DEVICE_ATTR(serial_number, S_IRUGO, diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c index 6890bbe04a8c14d431b0cfb4990067c43e01060f..8f47bf83694f6b033ba4031684f12d93fed167a7 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c +++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c @@ -80,7 +80,7 @@ static int bnx2fc_bind_pcidev(struct bnx2fc_hba *hba); static void bnx2fc_unbind_pcidev(struct bnx2fc_hba *hba); static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface, struct device *parent, int npiv); -static void bnx2fc_destroy_work(struct work_struct *work); +static void bnx2fc_port_destroy(struct fcoe_port *port); static struct bnx2fc_hba *bnx2fc_hba_lookup(struct net_device *phys_dev); static struct bnx2fc_interface *bnx2fc_interface_lookup(struct net_device @@ -506,7 +506,8 @@ static int bnx2fc_l2_rcv_thread(void *arg) static void bnx2fc_recv_frame(struct sk_buff *skb) { - u32 fr_len; + u64 crc_err; + u32 fr_len, fr_crc; struct fc_lport *lport; struct fcoe_rcv_info *fr; struct fc_stats *stats; @@ -540,6 +541,11 @@ static void bnx2fc_recv_frame(struct sk_buff *skb) skb_pull(skb, sizeof(struct fcoe_hdr)); fr_len = skb->len - sizeof(struct fcoe_crc_eof); + stats = per_cpu_ptr(lport->stats, get_cpu()); + stats->RxFrames++; + stats->RxWords += fr_len / FCOE_WORD_TO_BYTE; + put_cpu(); + fp = (struct fc_frame *)skb; fc_frame_init(fp); fr_dev(fp) = lport; @@ -622,16 +628,15 @@ static void bnx2fc_recv_frame(struct sk_buff *skb) return; } - stats = per_cpu_ptr(lport->stats, smp_processor_id()); - stats->RxFrames++; - stats->RxWords += fr_len / FCOE_WORD_TO_BYTE; + fr_crc = le32_to_cpu(fr_crc(fp)); - if (le32_to_cpu(fr_crc(fp)) != - ~crc32(~0, skb->data, fr_len)) { - if (stats->InvalidCRCCount < 5) + if (unlikely(fr_crc != ~crc32(~0, skb->data, fr_len))) { + stats = per_cpu_ptr(lport->stats, get_cpu()); + crc_err = (stats->InvalidCRCCount++); + put_cpu(); + if (crc_err < 5) printk(KERN_WARNING PFX "dropping frame with " "CRC error\n"); - stats->InvalidCRCCount++; kfree_skb(skb); return; } @@ -905,9 +910,6 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event, __bnx2fc_destroy(interface); } mutex_unlock(&bnx2fc_dev_lock); - - /* Ensure ALL destroy work has been completed before return */ - flush_workqueue(bnx2fc_wq); return; default: @@ -1213,8 +1215,8 @@ static int bnx2fc_vport_destroy(struct fc_vport *vport) mutex_unlock(&n_port->lp_mutex); bnx2fc_free_vport(interface->hba, port->lport); bnx2fc_port_shutdown(port->lport); + bnx2fc_port_destroy(port); bnx2fc_interface_put(interface); - queue_work(bnx2fc_wq, &port->destroy_work); return 0; } @@ -1523,7 +1525,6 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface, port->lport = lport; port->priv = interface; port->get_netdev = bnx2fc_netdev; - INIT_WORK(&port->destroy_work, bnx2fc_destroy_work); /* Configure fcoe_port */ rc = bnx2fc_lport_config(lport); @@ -1651,8 +1652,8 @@ static void __bnx2fc_destroy(struct bnx2fc_interface *interface) bnx2fc_interface_cleanup(interface); bnx2fc_stop(interface); list_del(&interface->list); + bnx2fc_port_destroy(port); bnx2fc_interface_put(interface); - queue_work(bnx2fc_wq, &port->destroy_work); } /** @@ -1692,15 +1693,12 @@ static int bnx2fc_destroy(struct net_device *netdev) return rc; } -static void bnx2fc_destroy_work(struct work_struct *work) +static void bnx2fc_port_destroy(struct fcoe_port *port) { - struct fcoe_port *port; struct fc_lport *lport; - port = container_of(work, struct fcoe_port, destroy_work); lport = port->lport; - - BNX2FC_HBA_DBG(lport, "Entered bnx2fc_destroy_work\n"); + BNX2FC_HBA_DBG(lport, "Entered %s, destroying lport %p\n", __func__, lport); bnx2fc_if_destroy(lport); } @@ -2554,9 +2552,6 @@ static void bnx2fc_ulp_exit(struct cnic_dev *dev) __bnx2fc_destroy(interface); mutex_unlock(&bnx2fc_dev_lock); - /* Ensure ALL destroy work has been completed before return */ - flush_workqueue(bnx2fc_wq); - bnx2fc_ulp_stop(hba); /* unregister cnic device */ if (test_and_clear_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic)) diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c index 37f5b719050e2e83c5b5e2c62766faec980b53ee..e13c77a76150bc42c79702a9f7785a81482346ce 100644 --- a/drivers/scsi/bnx2i/bnx2i_iscsi.c +++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c @@ -1420,17 +1420,23 @@ static int bnx2i_conn_bind(struct iscsi_cls_session *cls_session, * Forcefully terminate all in progress connection recovery at the * earliest, either in bind(), send_pdu(LOGIN), or conn_start() */ - if (bnx2i_adapter_ready(hba)) - return -EIO; + if (bnx2i_adapter_ready(hba)) { + ret_code = -EIO; + goto put_ep; + } bnx2i_ep = ep->dd_data; if ((bnx2i_ep->state == EP_STATE_TCP_FIN_RCVD) || - (bnx2i_ep->state == EP_STATE_TCP_RST_RCVD)) + (bnx2i_ep->state == EP_STATE_TCP_RST_RCVD)) { /* Peer disconnect via' FIN or RST */ - return -EINVAL; + ret_code = -EINVAL; + goto put_ep; + } - if (iscsi_conn_bind(cls_session, cls_conn, is_leading)) - return -EINVAL; + if (iscsi_conn_bind(cls_session, cls_conn, is_leading)) { + ret_code = -EINVAL; + goto put_ep; + } if (bnx2i_ep->hba != hba) { /* Error - TCP connection does not belong to this device @@ -1441,7 +1447,8 @@ static int bnx2i_conn_bind(struct iscsi_cls_session *cls_session, iscsi_conn_printk(KERN_ALERT, cls_conn->dd_data, "belong to hba (%s)\n", hba->netdev->name); - return -EEXIST; + ret_code = -EEXIST; + goto put_ep; } bnx2i_ep->conn = bnx2i_conn; bnx2i_conn->ep = bnx2i_ep; @@ -1458,6 +1465,8 @@ static int bnx2i_conn_bind(struct iscsi_cls_session *cls_session, bnx2i_put_rq_buf(bnx2i_conn, 0); bnx2i_arm_cq_event_coalescing(bnx2i_conn->ep, CNIC_ARM_CQE); +put_ep: + iscsi_put_endpoint(ep); return ret_code; } @@ -2265,17 +2274,23 @@ static struct scsi_host_template bnx2i_host_template = { .track_queue_depth = 1, }; + +static struct iscsi_transport_expand bnx2i_iscsi_expand = { + .unbind_conn = iscsi_conn_unbind, +}; + struct iscsi_transport bnx2i_iscsi_transport = { .owner = THIS_MODULE, .name = "bnx2i", .caps = CAP_RECOVERY_L0 | CAP_HDRDGST | CAP_MULTI_R2T | CAP_DATADGST | CAP_DATA_PATH_OFFLOAD | - CAP_TEXT_NEGO, + CAP_TEXT_NEGO | CAP_OPS_EXPAND, .create_session = bnx2i_session_create, .destroy_session = bnx2i_session_destroy, .create_conn = bnx2i_conn_create, .bind_conn = bnx2i_conn_bind, + .ops_expand = &bnx2i_iscsi_expand, .destroy_conn = bnx2i_conn_destroy, .attr_is_visible = bnx2i_attr_is_visible, .set_param = iscsi_set_param, diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c index 37d99357120faeae6d16b6fdbc18993e29b96af9..65eb6230d390853302ab9ea8b14c9a5e56259af9 100644 --- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c +++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c @@ -100,13 +100,18 @@ static struct scsi_host_template cxgb3i_host_template = { .track_queue_depth = 1, }; +static struct iscsi_transport_expand cxgb3i_iscsi_expand = { + .unbind_conn = iscsi_conn_unbind, +}; + static struct iscsi_transport cxgb3i_iscsi_transport = { .owner = THIS_MODULE, .name = DRV_MODULE_NAME, /* owner and name should be set already */ .caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST | CAP_DATADGST | CAP_DIGEST_OFFLOAD | - CAP_PADDING_OFFLOAD | CAP_TEXT_NEGO, + CAP_PADDING_OFFLOAD | CAP_TEXT_NEGO | + CAP_OPS_EXPAND, .attr_is_visible = cxgbi_attr_is_visible, .get_host_param = cxgbi_get_host_param, .set_host_param = cxgbi_set_host_param, @@ -117,6 +122,7 @@ static struct iscsi_transport cxgb3i_iscsi_transport = { /* connection management */ .create_conn = cxgbi_create_conn, .bind_conn = cxgbi_bind_conn, + .ops_expand = &cxgb3i_iscsi_expand, .destroy_conn = iscsi_tcp_conn_teardown, .start_conn = iscsi_conn_start, .stop_conn = iscsi_conn_stop, diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c index 2c3491528d4245873505bcb09c481215569a5fdc..88f96964c356934a1dc3067df0ebdebd6850550d 100644 --- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c +++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c @@ -118,12 +118,17 @@ static struct scsi_host_template cxgb4i_host_template = { .track_queue_depth = 1, }; +static struct iscsi_transport_expand cxgb4i_iscsi_expand = { + .unbind_conn = iscsi_conn_unbind, +}; + static struct iscsi_transport cxgb4i_iscsi_transport = { .owner = THIS_MODULE, .name = DRV_MODULE_NAME, .caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST | CAP_DATADGST | CAP_DIGEST_OFFLOAD | - CAP_PADDING_OFFLOAD | CAP_TEXT_NEGO, + CAP_PADDING_OFFLOAD | CAP_TEXT_NEGO | + CAP_OPS_EXPAND, .attr_is_visible = cxgbi_attr_is_visible, .get_host_param = cxgbi_get_host_param, .set_host_param = cxgbi_set_host_param, @@ -134,6 +139,7 @@ static struct iscsi_transport cxgb4i_iscsi_transport = { /* connection management */ .create_conn = cxgbi_create_conn, .bind_conn = cxgbi_bind_conn, + .ops_expand = &cxgb4i_iscsi_expand, .destroy_conn = iscsi_tcp_conn_teardown, .start_conn = iscsi_conn_start, .stop_conn = iscsi_conn_stop, diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c index ecb134b4699f2378796c78eab1c71fdf40d5ce2d..506b561670af0f917b1d25ed4cb90b6eaef10b4e 100644 --- a/drivers/scsi/cxgbi/libcxgbi.c +++ b/drivers/scsi/cxgbi/libcxgbi.c @@ -2690,11 +2690,13 @@ int cxgbi_bind_conn(struct iscsi_cls_session *cls_session, err = csk->cdev->csk_ddp_setup_pgidx(csk, csk->tid, ppm->tformat.pgsz_idx_dflt); if (err < 0) - return err; + goto put_ep; err = iscsi_conn_bind(cls_session, cls_conn, is_leading); - if (err) - return -EINVAL; + if (err) { + err = -EINVAL; + goto put_ep; + } /* calculate the tag idx bits needed for this conn based on cmds_max */ cconn->task_idx_bits = (__ilog2_u32(conn->session->cmds_max - 1)) + 1; @@ -2715,7 +2717,9 @@ int cxgbi_bind_conn(struct iscsi_cls_session *cls_session, /* init recv engine */ iscsi_tcp_hdr_recv_prep(tcp_conn); - return 0; +put_ep: + iscsi_put_endpoint(ep); + return err; } EXPORT_SYMBOL_GPL(cxgbi_bind_conn); diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h index 436d174f2194bc793c4912556b75b77e76bba0ec..679e38be439a2367b187efb231c3b48d903b0ba3 100644 --- a/drivers/scsi/hisi_sas/hisi_sas.h +++ b/drivers/scsi/hisi_sas/hisi_sas.h @@ -154,6 +154,7 @@ enum hisi_sas_bit_err_type { enum hisi_sas_phy_event { HISI_PHYE_PHY_UP = 0U, HISI_PHYE_LINK_RESET, + HISI_PHYE_PHY_UP_PM, HISI_PHYES_NUM, }; @@ -649,6 +650,7 @@ extern int hisi_sas_probe(struct platform_device *pdev, extern int hisi_sas_remove(struct platform_device *pdev); extern int hisi_sas_slave_configure(struct scsi_device *sdev); +extern int hisi_sas_slave_alloc(struct scsi_device *sdev); extern int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time); extern void hisi_sas_scan_start(struct Scsi_Host *shost); extern int hisi_sas_host_reset(struct Scsi_Host *shost, int reset_type); diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c index 98a1754907d1cf1c06671a8c62f99d144ae6fdd6..a1c6a67da132a37ff9a1c2d7abf46ec9a85eb049 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_main.c +++ b/drivers/scsi/hisi_sas/hisi_sas_main.c @@ -619,12 +619,6 @@ static void hisi_sas_bytes_dmaed(struct hisi_hba *hisi_hba, int phy_no, if (!phy->phy_attached) return; - if (test_bit(HISI_SAS_PM_BIT, &hisi_hba->flags) && - !sas_phy->suspended) { - dev_warn(hisi_hba->dev, "phy%d during suspend filtered out\n", phy_no); - return; - } - sas_notify_phy_event(sas_phy, PHYE_OOB_DONE, gfp_flags); if (sas_phy->phy) { @@ -756,6 +750,20 @@ static int hisi_sas_init_device(struct domain_device *device) return rc; } +int hisi_sas_slave_alloc(struct scsi_device *sdev) +{ + struct domain_device *ddev; + int rc; + + rc = sas_slave_alloc(sdev); + if (rc) + return rc; + ddev = sdev_to_domain_dev(sdev); + + return hisi_sas_init_device(ddev); +} +EXPORT_SYMBOL_GPL(hisi_sas_slave_alloc); + static int hisi_sas_dev_found(struct domain_device *device) { struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); @@ -802,9 +810,6 @@ static int hisi_sas_dev_found(struct domain_device *device) dev_info(dev, "dev[%d:%x] found\n", sas_dev->device_id, sas_dev->dev_type); - rc = hisi_sas_init_device(device); - if (rc) - goto err_out; sas_dev->dev_status = HISI_SAS_DEV_NORMAL; return 0; @@ -849,10 +854,11 @@ int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time) } EXPORT_SYMBOL_GPL(hisi_sas_scan_finished); -static void hisi_sas_phyup_work(struct work_struct *work) +static void hisi_sas_phyup_work_common(struct work_struct *work, + enum hisi_sas_phy_event event) { struct hisi_sas_phy *phy = - container_of(work, typeof(*phy), works[HISI_PHYE_PHY_UP]); + container_of(work, typeof(*phy), works[event]); struct hisi_hba *hisi_hba = phy->hisi_hba; struct asd_sas_phy *sas_phy = &phy->sas_phy; int phy_no = sas_phy->id; @@ -863,6 +869,11 @@ static void hisi_sas_phyup_work(struct work_struct *work) hisi_sas_bytes_dmaed(hisi_hba, phy_no, GFP_KERNEL); } +static void hisi_sas_phyup_work(struct work_struct *work) +{ + hisi_sas_phyup_work_common(work, HISI_PHYE_PHY_UP); +} + static void hisi_sas_linkreset_work(struct work_struct *work) { struct hisi_sas_phy *phy = @@ -872,9 +883,21 @@ static void hisi_sas_linkreset_work(struct work_struct *work) hisi_sas_control_phy(sas_phy, PHY_FUNC_LINK_RESET, NULL); } +static void hisi_sas_phyup_pm_work(struct work_struct *work) +{ + struct hisi_sas_phy *phy = + container_of(work, typeof(*phy), works[HISI_PHYE_PHY_UP_PM]); + struct hisi_hba *hisi_hba = phy->hisi_hba; + struct device *dev = hisi_hba->dev; + + hisi_sas_phyup_work_common(work, HISI_PHYE_PHY_UP_PM); + pm_runtime_put_sync(dev); +} + static const work_func_t hisi_sas_phye_fns[HISI_PHYES_NUM] = { [HISI_PHYE_PHY_UP] = hisi_sas_phyup_work, [HISI_PHYE_LINK_RESET] = hisi_sas_linkreset_work, + [HISI_PHYE_PHY_UP_PM] = hisi_sas_phyup_pm_work, }; bool hisi_sas_notify_phy_event(struct hisi_sas_phy *phy, @@ -1135,9 +1158,17 @@ static int hisi_sas_phy_set_linkrate(struct hisi_hba *hisi_hba, int phy_no, static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func, void *funcdata) { + struct hisi_sas_phy *phy = container_of(sas_phy, + struct hisi_sas_phy, sas_phy); struct sas_ha_struct *sas_ha = sas_phy->ha; struct hisi_hba *hisi_hba = sas_ha->lldd_ha; + struct device *dev = hisi_hba->dev; + DECLARE_COMPLETION_ONSTACK(completion); int phy_no = sas_phy->id; + u8 sts = phy->phy_attached; + int ret = 0; + + phy->reset_completion = &completion; switch (func) { case PHY_FUNC_HARD_RESET: @@ -1152,21 +1183,35 @@ static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func, case PHY_FUNC_DISABLE: hisi_sas_phy_enable(hisi_hba, phy_no, 0); - break; + goto out; case PHY_FUNC_SET_LINK_RATE: - return hisi_sas_phy_set_linkrate(hisi_hba, phy_no, funcdata); + ret = hisi_sas_phy_set_linkrate(hisi_hba, phy_no, funcdata); + break; + case PHY_FUNC_GET_EVENTS: if (hisi_hba->hw->get_events) { hisi_hba->hw->get_events(hisi_hba, phy_no); - break; + goto out; } fallthrough; case PHY_FUNC_RELEASE_SPINUP_HOLD: default: - return -EOPNOTSUPP; + ret = -EOPNOTSUPP; + goto out; } - return 0; + + if (sts && !wait_for_completion_timeout(&completion, 2 * HZ)) { + dev_warn(dev, "phy%d wait phyup timed out for func %d\n", + phy_no, func); + if (phy->in_reset) + ret = -ETIMEDOUT; + } + +out: + phy->reset_completion = NULL; + + return ret; } static void hisi_sas_task_done(struct sas_task *task) @@ -1400,11 +1445,13 @@ static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba) sas_port = device->port; port = to_hisi_sas_port(sas_port); + spin_lock(&sas_port->phy_list_lock); list_for_each_entry(sas_phy, &sas_port->phy_list, port_phy_el) if (state & BIT(sas_phy->id)) { phy = sas_phy->lldd_phy; break; } + spin_unlock(&sas_port->phy_list_lock); if (phy) { port->id = phy->port_id; @@ -1481,22 +1528,25 @@ static void hisi_sas_send_ata_reset_each_phy(struct hisi_hba *hisi_hba, struct ata_link *link; u8 fis[20] = {0}; u32 state; + int i; state = hisi_hba->hw->get_phys_state(hisi_hba); - list_for_each_entry(sas_phy, &sas_port->phy_list, port_phy_el) { + for (i = 0; i < hisi_hba->n_phy; i++) { if (!(state & BIT(sas_phy->id))) continue; + if (!(sas_port->phy_mask & BIT(i))) + continue; ata_for_each_link(link, ap, EDGE) { int pmp = sata_srst_pmp(link); - tmf_task.phy_id = sas_phy->id; + tmf_task.phy_id = i; hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis); rc = hisi_sas_exec_internal_tmf_task(device, fis, s, &tmf_task); if (rc != TMF_RESP_FUNC_COMPLETE) { dev_err(dev, "phy%d ata reset failed rc=%d\n", - sas_phy->id, rc); + i, rc); break; } } @@ -1773,7 +1823,6 @@ static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device) struct hisi_sas_device *sas_dev = device->lldd_dev; struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); struct sas_ha_struct *sas_ha = &hisi_hba->sha; - DECLARE_COMPLETION_ONSTACK(phyreset); int rc, reset_type; if (!local_phy->enabled) { @@ -1786,8 +1835,11 @@ static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device) sas_ha->sas_phy[local_phy->number]; struct hisi_sas_phy *phy = container_of(sas_phy, struct hisi_sas_phy, sas_phy); + unsigned long flags; + + spin_lock_irqsave(&phy->lock, flags); phy->in_reset = 1; - phy->reset_completion = &phyreset; + spin_unlock_irqrestore(&phy->lock, flags); } reset_type = (sas_dev->dev_status == HISI_SAS_DEV_INIT || @@ -1801,17 +1853,14 @@ static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device) sas_ha->sas_phy[local_phy->number]; struct hisi_sas_phy *phy = container_of(sas_phy, struct hisi_sas_phy, sas_phy); - int ret = wait_for_completion_timeout(&phyreset, - I_T_NEXUS_RESET_PHYUP_TIMEOUT); unsigned long flags; spin_lock_irqsave(&phy->lock, flags); - phy->reset_completion = NULL; phy->in_reset = 0; spin_unlock_irqrestore(&phy->lock, flags); /* report PHY down if timed out */ - if (!ret) + if (rc == -ETIMEDOUT) hisi_sas_phy_down(hisi_hba, sas_phy->id, 0, GFP_KERNEL); } else if (sas_dev->dev_status != HISI_SAS_DEV_INIT) { /* @@ -2803,13 +2852,13 @@ EXPORT_SYMBOL_GPL(hisi_sas_remove); #if IS_ENABLED(CONFIG_SCSI_HISI_SAS_DEBUGFS_DEFAULT_ENABLE) #define DEBUGFS_ENABLE_DEFAULT "enabled" bool hisi_sas_debugfs_enable = true; -u32 hisi_sas_debugfs_dump_count = 50; #else #define DEBUGFS_ENABLE_DEFAULT "disabled" bool hisi_sas_debugfs_enable; -u32 hisi_sas_debugfs_dump_count = 1; #endif +u32 hisi_sas_debugfs_dump_count = 1; + EXPORT_SYMBOL_GPL(hisi_sas_debugfs_enable); module_param_named(debugfs_enable, hisi_sas_debugfs_enable, bool, 0444); MODULE_PARM_DESC(hisi_sas_debugfs_enable, diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c index afe639994f3dd9435cadd9b3843c340c33aea54a..fdff327bb0300a106e0d100a51cd1032cd8fcdea 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c @@ -1327,7 +1327,6 @@ static irqreturn_t int_phyup_v1_hw(int irq_no, void *p) u32 *frame_rcvd = (u32 *)sas_phy->frame_rcvd; struct sas_identify_frame *id = (struct sas_identify_frame *)frame_rcvd; irqreturn_t res = IRQ_HANDLED; - unsigned long flags; irq_value = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT2); if (!(irq_value & CHL_INT2_SL_PHY_ENA_MSK)) { @@ -1380,15 +1379,9 @@ static irqreturn_t int_phyup_v1_hw(int irq_no, void *p) phy->identify.target_port_protocols = SAS_PROTOCOL_SMP; hisi_sas_notify_phy_event(phy, HISI_PHYE_PHY_UP); - - spin_lock_irqsave(&phy->lock, flags); - if (phy->reset_completion) { - phy->in_reset = 0; - complete(phy->reset_completion); - } - spin_unlock_irqrestore(&phy->lock, flags); - end: + if (phy->reset_completion) + complete(phy->reset_completion); hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT2, CHL_INT2_SL_PHY_ENA_MSK); @@ -1771,7 +1764,7 @@ static struct scsi_host_template sht_v1_hw = { .max_sectors = SCSI_DEFAULT_MAX_SECTORS, .eh_device_reset_handler = sas_eh_device_reset_handler, .eh_target_reset_handler = sas_eh_target_reset_handler, - .slave_alloc = sas_slave_alloc, + .slave_alloc = hisi_sas_slave_alloc, .target_destroy = sas_target_destroy, .ioctl = sas_ioctl, #ifdef CONFIG_COMPAT diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c index b0b2361e63fef5cbef0e039f743fb6ac18584110..9bfa796505aa71e56eb2482a597c066fe8387499 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c @@ -2641,7 +2641,6 @@ static int phy_up_v2_hw(int phy_no, struct hisi_hba *hisi_hba) struct device *dev = hisi_hba->dev; u32 *frame_rcvd = (u32 *)sas_phy->frame_rcvd; struct sas_identify_frame *id = (struct sas_identify_frame *)frame_rcvd; - unsigned long flags; hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_PHY_ENA_MSK, 1); @@ -2696,14 +2695,9 @@ static int phy_up_v2_hw(int phy_no, struct hisi_hba *hisi_hba) set_link_timer_quirk(hisi_hba); } hisi_sas_notify_phy_event(phy, HISI_PHYE_PHY_UP); - spin_lock_irqsave(&phy->lock, flags); - if (phy->reset_completion) { - phy->in_reset = 0; - complete(phy->reset_completion); - } - spin_unlock_irqrestore(&phy->lock, flags); - end: + if (phy->reset_completion) + complete(phy->reset_completion); hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, CHL_INT0_SL_PHY_ENABLE_MSK); hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_PHY_ENA_MSK, 0); @@ -3204,7 +3198,6 @@ static irqreturn_t sata_int_v2_hw(int irq_no, void *p) u32 ent_tmp, ent_msk, ent_int, port_id, link_rate, hard_phy_linkrate; irqreturn_t res = IRQ_HANDLED; u8 attached_sas_addr[SAS_ADDR_SIZE] = {0}; - unsigned long flags; int phy_no, offset; del_timer(&phy->timer); @@ -3280,12 +3273,8 @@ static irqreturn_t sata_int_v2_hw(int irq_no, void *p) phy->identify.target_port_protocols = SAS_PROTOCOL_SATA; hisi_sas_notify_phy_event(phy, HISI_PHYE_PHY_UP); - spin_lock_irqsave(&phy->lock, flags); - if (phy->reset_completion) { - phy->in_reset = 0; + if (phy->reset_completion) complete(phy->reset_completion); - } - spin_unlock_irqrestore(&phy->lock, flags); end: hisi_sas_write32(hisi_hba, ENT_INT_SRC1 + offset, ent_tmp); hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1 + offset, ent_msk); @@ -3584,7 +3573,7 @@ static struct scsi_host_template sht_v2_hw = { .max_sectors = SCSI_DEFAULT_MAX_SECTORS, .eh_device_reset_handler = sas_eh_device_reset_handler, .eh_target_reset_handler = sas_eh_target_reset_handler, - .slave_alloc = sas_slave_alloc, + .slave_alloc = hisi_sas_slave_alloc, .target_destroy = sas_target_destroy, .ioctl = sas_ioctl, #ifdef CONFIG_COMPAT diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c index 8b56d3e81ce1576be03ceee5001e9251555efcb0..fd5bdb0afa715989c3ade7ed9777cf434adbaa1f 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c @@ -527,7 +527,7 @@ MODULE_PARM_DESC(intr_conv, "interrupt converge enable (0-1)"); /* permit overriding the host protection capabilities mask (EEDP/T10 PI) */ static int prot_mask; -module_param(prot_mask, int, 0); +module_param(prot_mask, int, 0444); MODULE_PARM_DESC(prot_mask, " host protection capabilities mask, def=0x0 "); static void debugfs_work_handler_v3_hw(struct work_struct *work); @@ -1480,7 +1480,6 @@ static irqreturn_t phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba) struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; struct asd_sas_phy *sas_phy = &phy->sas_phy; struct device *dev = hisi_hba->dev; - unsigned long flags; del_timer(&phy->timer); hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_PHY_ENA_MSK, 1); @@ -1560,15 +1559,13 @@ static irqreturn_t phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba) phy->port_id = port_id; phy->phy_attached = 1; - hisi_sas_notify_phy_event(phy, HISI_PHYE_PHY_UP); + /* Call pm_runtime_put_sync() with pairs in hisi_sas_phyup_pm_work() */ + pm_runtime_get_noresume(dev); + hisi_sas_notify_phy_event(phy, HISI_PHYE_PHY_UP_PM); res = IRQ_HANDLED; - spin_lock_irqsave(&phy->lock, flags); - if (phy->reset_completion) { - phy->in_reset = 0; - complete(phy->reset_completion); - } - spin_unlock_irqrestore(&phy->lock, flags); end: + if (phy->reset_completion) + complete(phy->reset_completion); hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, CHL_INT0_SL_PHY_ENABLE_MSK); hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_PHY_ENA_MSK, 0); @@ -2392,17 +2389,25 @@ static irqreturn_t cq_interrupt_v3_hw(int irq_no, void *p) return IRQ_WAKE_THREAD; } +static void hisi_sas_v3_free_vectors(void *data) +{ + struct pci_dev *pdev = data; + + pci_free_irq_vectors(pdev); +} + static int interrupt_preinit_v3_hw(struct hisi_hba *hisi_hba) { int vectors; int max_msi = HISI_SAS_MSI_COUNT_V3_HW, min_msi; struct Scsi_Host *shost = hisi_hba->shost; + struct pci_dev *pdev = hisi_hba->pci_dev; struct irq_affinity desc = { .pre_vectors = BASE_VECTORS_V3_HW, }; min_msi = MIN_AFFINE_VECTORS_V3_HW; - vectors = pci_alloc_irq_vectors_affinity(hisi_hba->pci_dev, + vectors = pci_alloc_irq_vectors_affinity(pdev, min_msi, max_msi, PCI_IRQ_MSI | PCI_IRQ_AFFINITY, @@ -2414,6 +2419,7 @@ static int interrupt_preinit_v3_hw(struct hisi_hba *hisi_hba) hisi_hba->cq_nvecs = vectors - BASE_VECTORS_V3_HW; shost->nr_hw_queues = hisi_hba->cq_nvecs; + devm_add_action(&pdev->dev, hisi_sas_v3_free_vectors, pdev); return 0; } @@ -3155,7 +3161,7 @@ static struct scsi_host_template sht_v3_hw = { .max_sectors = SCSI_DEFAULT_MAX_SECTORS, .eh_device_reset_handler = sas_eh_device_reset_handler, .eh_target_reset_handler = sas_eh_target_reset_handler, - .slave_alloc = sas_slave_alloc, + .slave_alloc = hisi_sas_slave_alloc, .target_destroy = sas_target_destroy, .ioctl = sas_ioctl, #ifdef CONFIG_COMPAT @@ -3959,6 +3965,54 @@ static const struct file_operations debugfs_bist_phy_v3_hw_fops = { .owner = THIS_MODULE, }; +static ssize_t debugfs_bist_cnt_v3_hw_write(struct file *filp, + const char __user *buf, + size_t count, loff_t *ppos) +{ + struct seq_file *m = filp->private_data; + struct hisi_hba *hisi_hba = m->private; + unsigned int cnt; + int val; + + if (hisi_hba->debugfs_bist_enable) + return -EPERM; + + val = kstrtouint_from_user(buf, count, 0, &cnt); + if (val) + return val; + + if (cnt) + return -EINVAL; + + hisi_hba->debugfs_bist_cnt = 0; + return count; +} + +static int debugfs_bist_cnt_v3_hw_show(struct seq_file *s, void *p) +{ + struct hisi_hba *hisi_hba = s->private; + + seq_printf(s, "%u\n", hisi_hba->debugfs_bist_cnt); + + return 0; +} + +static int debugfs_bist_cnt_v3_hw_open(struct inode *inode, + struct file *filp) +{ + return single_open(filp, debugfs_bist_cnt_v3_hw_show, + inode->i_private); +} + +static const struct file_operations debugfs_bist_cnt_v3_hw_ops = { + .open = debugfs_bist_cnt_v3_hw_open, + .read = seq_read, + .write = debugfs_bist_cnt_v3_hw_write, + .llseek = seq_lseek, + .release = single_release, + .owner = THIS_MODULE, +}; + static const struct { int value; char *name; @@ -4596,8 +4650,8 @@ static void debugfs_bist_init_v3_hw(struct hisi_hba *hisi_hba) debugfs_create_file("phy_id", 0600, hisi_hba->debugfs_bist_dentry, hisi_hba, &debugfs_bist_phy_v3_hw_fops); - debugfs_create_u32("cnt", 0600, hisi_hba->debugfs_bist_dentry, - &hisi_hba->debugfs_bist_cnt); + debugfs_create_file("cnt", 0600, hisi_hba->debugfs_bist_dentry, + hisi_hba, &debugfs_bist_cnt_v3_hw_ops); debugfs_create_file("loopback_mode", 0600, hisi_hba->debugfs_bist_dentry, @@ -4763,7 +4817,7 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id) dev_err(dev, "%d hw queues\n", shost->nr_hw_queues); rc = scsi_add_host(shost, dev); if (rc) - goto err_out_free_irq_vectors; + goto err_out_debugfs; rc = sas_register_ha(sha); if (rc) @@ -4775,6 +4829,8 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id) scsi_scan_host(shost); + pm_runtime_set_autosuspend_delay(dev, 5000); + pm_runtime_use_autosuspend(dev); /* * For the situation that there are ATA disks connected with SAS * controller, it additionally creates ata_port which will affect the @@ -4792,8 +4848,6 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id) sas_unregister_ha(sha); err_out_register_ha: scsi_remove_host(shost); -err_out_free_irq_vectors: - pci_free_irq_vectors(pdev); err_out_debugfs: debugfs_exit_v3_hw(hisi_hba); err_out_ha: @@ -4821,7 +4875,6 @@ hisi_sas_v3_destroy_irqs(struct pci_dev *pdev, struct hisi_hba *hisi_hba) devm_free_irq(&pdev->dev, pci_irq_vector(pdev, nr), cq); } - pci_free_irq_vectors(pdev); } static void hisi_sas_v3_remove(struct pci_dev *pdev) @@ -4904,6 +4957,8 @@ static int _suspend_v3_hw(struct device *device) if (test_and_set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) return -1; + dev_warn(dev, "entering suspend state\n"); + scsi_block_requests(shost); set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); flush_workqueue(hisi_hba->wq); @@ -4919,11 +4974,11 @@ static int _suspend_v3_hw(struct device *device) hisi_sas_init_mem(hisi_hba); - dev_warn(dev, "entering suspend state\n"); - hisi_sas_release_tasks(hisi_hba); sas_suspend_ha(sha); + + dev_warn(dev, "end of suspending controller\n"); return 0; } @@ -4950,9 +5005,19 @@ static int _resume_v3_hw(struct device *device) return rc; } phys_init_v3_hw(hisi_hba); - sas_resume_ha(sha); + + /* + * If a directly-attached disk is removed during suspend, a deadlock + * may occur, as the PHYE_RESUME_TIMEOUT processing will require the + * hisi_hba->device to be active, which can only happen when resume + * completes. So don't wait for the HA event workqueue to drain upon + * resume. + */ + sas_resume_ha_no_sync(sha); clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags); + dev_warn(dev, "end of resuming controller\n"); + return 0; } diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c index cc3908c2d2f943173e0bc6cffe7770032d1eb2a8..a3431485def8f6dcab59cb133542a5c7e5a5a795 100644 --- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c +++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c @@ -35,7 +35,7 @@ #define IBMVSCSIS_VERSION "v0.2" -#define INITIAL_SRP_LIMIT 800 +#define INITIAL_SRP_LIMIT 1024 #define DEFAULT_MAX_SECTORS 256 #define MAX_TXU 1024 * 1024 diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c index a50f1eef0e0cdb4845dbab478ca65d9be3117e3e..4261380af97b4f434e362859e767112b440ab8c3 100644 --- a/drivers/scsi/libfc/fc_exch.c +++ b/drivers/scsi/libfc/fc_exch.c @@ -1702,6 +1702,7 @@ static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp) if (cancel_delayed_work_sync(&ep->timeout_work)) { FC_EXCH_DBG(ep, "Exchange timer canceled due to ABTS response\n"); fc_exch_release(ep); /* release from pending timer hold */ + return; } spin_lock_bh(&ep->ex_lock); diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index 7ef5efd94422487c076e17569dbf045af0926fa7..e361856509d5da2e38269e082cebda8ed50caa61 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -1386,23 +1386,32 @@ void iscsi_session_failure(struct iscsi_session *session, } EXPORT_SYMBOL_GPL(iscsi_session_failure); -void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err) +static bool iscsi_set_conn_failed(struct iscsi_conn *conn) { struct iscsi_session *session = conn->session; - spin_lock_bh(&session->frwd_lock); - if (session->state == ISCSI_STATE_FAILED) { - spin_unlock_bh(&session->frwd_lock); - return; - } + if (session->state == ISCSI_STATE_FAILED) + return false; if (conn->stop_stage == 0) session->state = ISCSI_STATE_FAILED; - spin_unlock_bh(&session->frwd_lock); set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx); set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx); - iscsi_conn_error_event(conn->cls_conn, err); + return true; +} + +void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err) +{ + struct iscsi_session *session = conn->session; + bool needs_evt; + + spin_lock_bh(&session->frwd_lock); + needs_evt = iscsi_set_conn_failed(conn); + spin_unlock_bh(&session->frwd_lock); + + if (needs_evt) + iscsi_conn_error_event(conn->cls_conn, err); } EXPORT_SYMBOL_GPL(iscsi_conn_failure); @@ -2178,6 +2187,51 @@ static void iscsi_check_transport_timeouts(struct timer_list *t) spin_unlock(&session->frwd_lock); } +/** + * iscsi_conn_unbind - prevent queueing to conn. + * @cls_conn: iscsi conn ep is bound to. + * @is_active: is the conn in use for boot or is this for EH/termination + * + * This must be called by drivers implementing the ep_disconnect callout. + * It disables queueing to the connection from libiscsi in preparation for + * an ep_disconnect call. + */ +void iscsi_conn_unbind(struct iscsi_cls_conn *cls_conn, bool is_active) +{ + struct iscsi_session *session; + struct iscsi_conn *conn; + + if (!cls_conn) + return; + + conn = cls_conn->dd_data; + session = conn->session; + /* + * Wait for iscsi_eh calls to exit. We don't wait for the tmf to + * complete or timeout. The caller just wants to know what's running + * is everything that needs to be cleaned up, and no cmds will be + * queued. + */ + mutex_lock(&session->eh_mutex); + + iscsi_suspend_queue(conn); + iscsi_suspend_tx(conn); + + spin_lock_bh(&session->frwd_lock); + if (!is_active) { + /* + * if logout timed out before userspace could even send a PDU + * the state might still be in ISCSI_STATE_LOGGED_IN and + * allowing new cmds and TMFs. + */ + if (session->state == ISCSI_STATE_LOGGED_IN) + iscsi_set_conn_failed(conn); + } + spin_unlock_bh(&session->frwd_lock); + mutex_unlock(&session->eh_mutex); +} +EXPORT_SYMBOL_GPL(iscsi_conn_unbind); + static void iscsi_prep_abort_task_pdu(struct iscsi_task *task, struct iscsi_tm *hdr) { diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c index dba3f5bec6bedf10dbcd848628273bbcb9d18e8a..f92b889369c3996bfb6e5a0d0da1379c286185f6 100644 --- a/drivers/scsi/libsas/sas_ata.c +++ b/drivers/scsi/libsas/sas_ata.c @@ -203,7 +203,7 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc) task->total_xfer_len = qc->nbytes; task->num_scatter = qc->n_elem; task->data_dir = qc->dma_dir; - } else if (qc->tf.protocol == ATA_PROT_NODATA) { + } else if (!ata_is_data(qc->tf.protocol)) { task->data_dir = DMA_NONE; } else { for_each_sg(qc->sg, sg, qc->n_elem, si) diff --git a/drivers/scsi/libsas/sas_event.c b/drivers/scsi/libsas/sas_event.c index f703115e7a25be3a8fb64bf5ab2423a7b895cd0a..3613b9b315bc70545064952644b7ca59a8f181f9 100644 --- a/drivers/scsi/libsas/sas_event.c +++ b/drivers/scsi/libsas/sas_event.c @@ -41,12 +41,25 @@ static int sas_queue_event(int event, struct sas_work *work, return rc; } - -void __sas_drain_work(struct sas_ha_struct *ha) +void sas_queue_deferred_work(struct sas_ha_struct *ha) { struct sas_work *sw, *_sw; int ret; + spin_lock_irq(&ha->lock); + list_for_each_entry_safe(sw, _sw, &ha->defer_q, drain_node) { + list_del_init(&sw->drain_node); + ret = sas_queue_work(ha, sw); + if (ret != 1) { + pm_runtime_put(ha->dev); + sas_free_event(to_asd_sas_event(&sw->work)); + } + } + spin_unlock_irq(&ha->lock); +} + +void __sas_drain_work(struct sas_ha_struct *ha) +{ set_bit(SAS_HA_DRAINING, &ha->state); /* flush submitters */ spin_lock_irq(&ha->lock); @@ -55,16 +68,8 @@ void __sas_drain_work(struct sas_ha_struct *ha) drain_workqueue(ha->event_q); drain_workqueue(ha->disco_q); - spin_lock_irq(&ha->lock); clear_bit(SAS_HA_DRAINING, &ha->state); - list_for_each_entry_safe(sw, _sw, &ha->defer_q, drain_node) { - list_del_init(&sw->drain_node); - ret = sas_queue_work(ha, sw); - if (ret != 1) - sas_free_event(to_asd_sas_event(&sw->work)); - - } - spin_unlock_irq(&ha->lock); + sas_queue_deferred_work(ha); } int sas_drain_work(struct sas_ha_struct *ha) @@ -104,11 +109,15 @@ void sas_enable_revalidation(struct sas_ha_struct *ha) if (!test_and_clear_bit(ev, &d->pending)) continue; - if (list_empty(&port->phy_list)) + spin_lock(&port->phy_list_lock); + if (list_empty(&port->phy_list)) { + spin_unlock(&port->phy_list_lock); continue; + } sas_phy = container_of(port->phy_list.next, struct asd_sas_phy, port_phy_el); + spin_unlock(&port->phy_list_lock); sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD, GFP_KERNEL); } @@ -119,19 +128,43 @@ void sas_enable_revalidation(struct sas_ha_struct *ha) static void sas_port_event_worker(struct work_struct *work) { struct asd_sas_event *ev = to_asd_sas_event(work); + struct asd_sas_phy *phy = ev->phy; + struct sas_ha_struct *ha = phy->ha; sas_port_event_fns[ev->event](work); + pm_runtime_put(ha->dev); sas_free_event(ev); } static void sas_phy_event_worker(struct work_struct *work) { struct asd_sas_event *ev = to_asd_sas_event(work); + struct asd_sas_phy *phy = ev->phy; + struct sas_ha_struct *ha = phy->ha; sas_phy_event_fns[ev->event](work); + pm_runtime_put(ha->dev); sas_free_event(ev); } +/* defer works of new phys during suspend */ +static bool sas_defer_event(struct asd_sas_phy *phy, struct asd_sas_event *ev) +{ + struct sas_ha_struct *ha = phy->ha; + unsigned long flags; + bool deferred = false; + + spin_lock_irqsave(&ha->lock, flags); + if (test_bit(SAS_HA_RESUMING, &ha->state) && !phy->suspended) { + struct sas_work *sw = &ev->work; + + list_add_tail(&sw->drain_node, &ha->defer_q); + deferred = true; + } + spin_unlock_irqrestore(&ha->lock, flags); + return deferred; +} + int sas_notify_port_event(struct asd_sas_phy *phy, enum port_event event, gfp_t gfp_flags) { @@ -145,11 +178,19 @@ int sas_notify_port_event(struct asd_sas_phy *phy, enum port_event event, if (!ev) return -ENOMEM; + /* Call pm_runtime_put() with pairs in sas_port_event_worker() */ + pm_runtime_get_noresume(ha->dev); + INIT_SAS_EVENT(ev, sas_port_event_worker, phy, event); + if (sas_defer_event(phy, ev)) + return 0; + ret = sas_queue_event(event, &ev->work, ha); - if (ret != 1) + if (ret != 1) { + pm_runtime_put(ha->dev); sas_free_event(ev); + } return ret; } @@ -168,11 +209,19 @@ int sas_notify_phy_event(struct asd_sas_phy *phy, enum phy_event event, if (!ev) return -ENOMEM; + /* Call pm_runtime_put() with pairs in sas_phy_event_worker() */ + pm_runtime_get_noresume(ha->dev); + INIT_SAS_EVENT(ev, sas_phy_event_worker, phy, event); + if (sas_defer_event(phy, ev)) + return 0; + ret = sas_queue_event(event, &ev->work, ha); - if (ret != 1) + if (ret != 1) { + pm_runtime_put(ha->dev); sas_free_event(ev); + } return ret; } diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c index f5bf74f278bec979df27291540ee0d17e79367a8..2128992bc2f5c7ca1c8964140466e2d13f12c16a 100644 --- a/drivers/scsi/libsas/sas_expander.c +++ b/drivers/scsi/libsas/sas_expander.c @@ -58,7 +58,9 @@ static int smp_execute_task_sg(struct domain_device *dev, struct sas_task *task = NULL; struct sas_internal *i = to_sas_internal(dev->port->ha->core.shost->transportt); + struct sas_ha_struct *ha = dev->port->ha; + pm_runtime_get_sync(ha->dev); mutex_lock(&dev->ex_dev.cmd_mutex); for (retry = 0; retry < 3; retry++) { if (test_bit(SAS_DEV_GONE, &dev->state)) { @@ -131,6 +133,7 @@ static int smp_execute_task_sg(struct domain_device *dev, } } mutex_unlock(&dev->ex_dev.cmd_mutex); + pm_runtime_put_sync(ha->dev); BUG_ON(retry == 3 && task != NULL); sas_free_task(task); diff --git a/drivers/scsi/libsas/sas_init.c b/drivers/scsi/libsas/sas_init.c index 23f8d34ccb0d9ef58bcd4df67164257d47cf685f..f1989a98f5114c69755f297a4aa95c606e7085b6 100644 --- a/drivers/scsi/libsas/sas_init.c +++ b/drivers/scsi/libsas/sas_init.c @@ -359,6 +359,7 @@ void sas_prep_resume_ha(struct sas_ha_struct *ha) int i; set_bit(SAS_HA_REGISTERED, &ha->state); + set_bit(SAS_HA_RESUMING, &ha->state); /* clear out any stale link events/data from the suspension path */ for (i = 0; i < ha->num_phys; i++) { @@ -384,7 +385,31 @@ static int phys_suspended(struct sas_ha_struct *ha) return rc; } -void sas_resume_ha(struct sas_ha_struct *ha) +static void sas_resume_insert_broadcast_ha(struct sas_ha_struct *ha) +{ + int i; + + for (i = 0; i < ha->num_phys; i++) { + struct asd_sas_port *port = ha->sas_port[i]; + struct domain_device *dev = port->port_dev; + + if (dev && dev_is_expander(dev->dev_type)) { + struct asd_sas_phy *first_phy; + + spin_lock(&port->phy_list_lock); + first_phy = list_first_entry_or_null( + &port->phy_list, struct asd_sas_phy, + port_phy_el); + spin_unlock(&port->phy_list_lock); + + if (first_phy) + sas_notify_port_event(first_phy, + PORTE_BROADCAST_RCVD, GFP_KERNEL); + } + } +} + +static void _sas_resume_ha(struct sas_ha_struct *ha, bool drain) { const unsigned long tmo = msecs_to_jiffies(25000); int i; @@ -414,10 +439,30 @@ void sas_resume_ha(struct sas_ha_struct *ha) * flush out disks that did not return */ scsi_unblock_requests(ha->core.shost); - sas_drain_work(ha); + if (drain) + sas_drain_work(ha); + clear_bit(SAS_HA_RESUMING, &ha->state); + + sas_queue_deferred_work(ha); + /* send event PORTE_BROADCAST_RCVD to identify some new inserted + * disks for expander + */ + sas_resume_insert_broadcast_ha(ha); +} + +void sas_resume_ha(struct sas_ha_struct *ha) +{ + _sas_resume_ha(ha, true); } EXPORT_SYMBOL(sas_resume_ha); +/* A no-sync variant, which does not call sas_drain_ha(). */ +void sas_resume_ha_no_sync(struct sas_ha_struct *ha) +{ + _sas_resume_ha(ha, false); +} +EXPORT_SYMBOL(sas_resume_ha_no_sync); + void sas_suspend_ha(struct sas_ha_struct *ha) { int i; diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h index d7a1fb5c10c6e6d33ddb67d56d6eea3513a2399f..acd515c018610d5fed1af48d1e3eee42d8242bc3 100644 --- a/drivers/scsi/libsas/sas_internal.h +++ b/drivers/scsi/libsas/sas_internal.h @@ -14,6 +14,7 @@ #include #include #include +#include #ifdef pr_fmt #undef pr_fmt @@ -56,6 +57,7 @@ void sas_unregister_ports(struct sas_ha_struct *sas_ha); void sas_disable_revalidation(struct sas_ha_struct *ha); void sas_enable_revalidation(struct sas_ha_struct *ha); +void sas_queue_deferred_work(struct sas_ha_struct *ha); void __sas_drain_work(struct sas_ha_struct *ha); void sas_deform_port(struct asd_sas_phy *phy, int gone); diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index 93e507677bdcb35934d8a563cb0d5d36df224657..03bc472f302a290607cfde8ac7cddb3bd6389776 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -374,6 +374,7 @@ struct lpfc_vport { #define FC_VPORT_LOGO_RCVD 0x200 /* LOGO received on vport */ #define FC_RSCN_DISCOVERY 0x400 /* Auth all devices after RSCN */ #define FC_LOGO_RCVD_DID_CHNG 0x800 /* FDISC on phys port detect DID chng*/ +#define FC_PT2PT_NO_NVME 0x1000 /* Don't send NVME PRLI */ #define FC_SCSI_SCAN_TMO 0x4000 /* scsi scan timer running */ #define FC_ABORT_DISCOVERY 0x8000 /* we want to abort discovery */ #define FC_NDISC_ACTIVE 0x10000 /* NPort discovery active */ @@ -763,7 +764,6 @@ struct lpfc_hba { #define HBA_DEVLOSS_TMO 0x2000 /* HBA in devloss timeout */ #define HBA_RRQ_ACTIVE 0x4000 /* process the rrq active list */ #define HBA_IOQ_FLUSH 0x8000 /* FCP/NVME I/O queues being flushed */ -#define HBA_FW_DUMP_OP 0x10000 /* Skips fn reset before FW dump */ #define HBA_RECOVERABLE_UE 0x20000 /* Firmware supports recoverable UE */ #define HBA_FORCED_LINK_SPEED 0x40000 /* * Firmware supports Forced Link Speed @@ -772,6 +772,7 @@ struct lpfc_hba { #define HBA_FLOGI_ISSUED 0x100000 /* FLOGI was issued */ #define HBA_DEFER_FLOGI 0x800000 /* Defer FLOGI till read_sparm cmpl */ + struct completion *fw_dump_cmpl; /* cmpl event tracker for fw_dump */ uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/ struct lpfc_dmabuf slim2p; @@ -898,6 +899,16 @@ struct lpfc_hba { uint32_t cfg_hostmem_hgp; uint32_t cfg_log_verbose; uint32_t cfg_enable_fc4_type; +#define LPFC_ENABLE_FCP 1 +#define LPFC_ENABLE_NVME 2 +#define LPFC_ENABLE_BOTH 3 +#if (IS_ENABLED(CONFIG_NVME_FC)) +#define LPFC_MAX_ENBL_FC4_TYPE LPFC_ENABLE_BOTH +#define LPFC_DEF_ENBL_FC4_TYPE LPFC_ENABLE_BOTH +#else +#define LPFC_MAX_ENBL_FC4_TYPE LPFC_ENABLE_FCP +#define LPFC_DEF_ENBL_FC4_TYPE LPFC_ENABLE_FCP +#endif uint32_t cfg_aer_support; uint32_t cfg_sriov_nr_virtfn; uint32_t cfg_request_firmware_upgrade; @@ -918,9 +929,6 @@ struct lpfc_hba { uint32_t cfg_ras_fwlog_func; uint32_t cfg_enable_bbcr; /* Enable BB Credit Recovery */ uint32_t cfg_enable_dpp; /* Enable Direct Packet Push */ -#define LPFC_ENABLE_FCP 1 -#define LPFC_ENABLE_NVME 2 -#define LPFC_ENABLE_BOTH 3 uint32_t cfg_enable_pbde; struct nvmet_fc_target_port *targetport; lpfc_vpd_t vpd; /* vital product data */ diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index 2c59a5bf353907237c648794608b5510c909e4d0..f0d1ced6301624f71aa274f56dc26cb77f1191c0 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -1142,6 +1142,9 @@ lpfc_issue_lip(struct Scsi_Host *shost) pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK; pmboxq->u.mb.mbxOwner = OWN_HOST; + if ((vport->fc_flag & FC_PT2PT) && (vport->fc_flag & FC_PT2PT_NO_NVME)) + vport->fc_flag &= ~FC_PT2PT_NO_NVME; + mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO * 2); if ((mbxstatus == MBX_SUCCESS) && @@ -1536,25 +1539,25 @@ lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode) before_fc_flag = phba->pport->fc_flag; sriov_nr_virtfn = phba->cfg_sriov_nr_virtfn; - /* Disable SR-IOV virtual functions if enabled */ - if (phba->cfg_sriov_nr_virtfn) { - pci_disable_sriov(pdev); - phba->cfg_sriov_nr_virtfn = 0; - } + if (opcode == LPFC_FW_DUMP) { + init_completion(&online_compl); + phba->fw_dump_cmpl = &online_compl; + } else { + /* Disable SR-IOV virtual functions if enabled */ + if (phba->cfg_sriov_nr_virtfn) { + pci_disable_sriov(pdev); + phba->cfg_sriov_nr_virtfn = 0; + } - if (opcode == LPFC_FW_DUMP) - phba->hba_flag |= HBA_FW_DUMP_OP; + status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE); - status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE); + if (status != 0) + return status; - if (status != 0) { - phba->hba_flag &= ~HBA_FW_DUMP_OP; - return status; + /* wait for the device to be quiesced before firmware reset */ + msleep(100); } - /* wait for the device to be quiesced before firmware reset */ - msleep(100); - reg_val = readl(phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET); @@ -1583,24 +1586,42 @@ lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode) lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "3153 Fail to perform the requested " "access: x%x\n", reg_val); + if (phba->fw_dump_cmpl) + phba->fw_dump_cmpl = NULL; return rc; } /* keep the original port state */ - if (before_fc_flag & FC_OFFLINE_MODE) - goto out; - - init_completion(&online_compl); - job_posted = lpfc_workq_post_event(phba, &status, &online_compl, - LPFC_EVT_ONLINE); - if (!job_posted) + if (before_fc_flag & FC_OFFLINE_MODE) { + if (phba->fw_dump_cmpl) + phba->fw_dump_cmpl = NULL; goto out; + } - wait_for_completion(&online_compl); + /* Firmware dump will trigger an HA_ERATT event, and + * lpfc_handle_eratt_s4 routine already handles bringing the port back + * online. + */ + if (opcode == LPFC_FW_DUMP) { + wait_for_completion(phba->fw_dump_cmpl); + } else { + init_completion(&online_compl); + job_posted = lpfc_workq_post_event(phba, &status, &online_compl, + LPFC_EVT_ONLINE); + if (!job_posted) + goto out; + wait_for_completion(&online_compl); + } out: /* in any case, restore the virtual functions enabled as before */ if (sriov_nr_virtfn) { + /* If fw_dump was performed, first disable to clean up */ + if (opcode == LPFC_FW_DUMP) { + pci_disable_sriov(pdev); + phba->cfg_sriov_nr_virtfn = 0; + } + sriov_err = lpfc_sli_probe_sriov_nr_virtfn(phba, sriov_nr_virtfn); if (!sriov_err) @@ -3779,8 +3800,8 @@ LPFC_ATTR_R(nvmet_mrq_post, * 3 - register both FCP and NVME * Supported values are [1,3]. Default value is 3 */ -LPFC_ATTR_R(enable_fc4_type, LPFC_ENABLE_BOTH, - LPFC_ENABLE_FCP, LPFC_ENABLE_BOTH, +LPFC_ATTR_R(enable_fc4_type, LPFC_DEF_ENBL_FC4_TYPE, + LPFC_ENABLE_FCP, LPFC_MAX_ENBL_FC4_TYPE, "Enable FC4 Protocol support - FCP / NVME"); /* diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index 3d9889b3d5c8a6ce487d4137501f9bb2ccbcee0b..387b0cd1ea18f8624c49ddd521a4a1db64ece118 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -1067,7 +1067,8 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* FLOGI failed, so there is no fabric */ spin_lock_irq(shost->host_lock); - vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); + vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP | + FC_PT2PT_NO_NVME); spin_unlock_irq(shost->host_lock); /* If private loop, then allow max outstanding els to be @@ -3945,6 +3946,23 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* Added for Vendor specifc support * Just keep retrying for these Rsn / Exp codes */ + if ((vport->fc_flag & FC_PT2PT) && + cmd == ELS_CMD_NVMEPRLI) { + switch (stat.un.b.lsRjtRsnCode) { + case LSRJT_UNABLE_TPC: + case LSRJT_INVALID_CMD: + case LSRJT_LOGICAL_ERR: + case LSRJT_CMD_UNSUPPORTED: + lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS, + "0168 NVME PRLI LS_RJT " + "reason %x port doesn't " + "support NVME, disabling NVME\n", + stat.un.b.lsRjtRsnCode); + retry = 0; + vport->fc_flag |= FC_PT2PT_NO_NVME; + goto out_retry; + } + } switch (stat.un.b.lsRjtRsnCode) { case LSRJT_UNABLE_TPC: /* The driver has a VALID PLOGI but the rport has diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index f4a672e5497161e1cb4666244e0c4a05e2937ffb..68ff233f936e5bde73c1718e48a3c9ebbea09161 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -635,10 +635,16 @@ lpfc_work_done(struct lpfc_hba *phba) if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) lpfc_sli4_post_async_mbox(phba); - if (ha_copy & HA_ERATT) + if (ha_copy & HA_ERATT) { /* Handle the error attention event */ lpfc_handle_eratt(phba); + if (phba->fw_dump_cmpl) { + complete(phba->fw_dump_cmpl); + phba->fw_dump_cmpl = NULL; + } + } + if (ha_copy & HA_MBATT) lpfc_sli_handle_mb_event(phba); diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 37612299a34a14ca4825cee107e19b3e04e9035c..134e4ee5dc48153e53240c288e030062e757c22a 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -1998,7 +1998,7 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba) } if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) { - lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "3143 Port Down: Firmware Update " "Detected\n"); en_rn_msg = false; @@ -13614,6 +13614,8 @@ lpfc_io_slot_reset_s4(struct pci_dev *pdev) psli->sli_flag &= ~LPFC_SLI_ACTIVE; spin_unlock_irq(&phba->hbalock); + /* Init cpu_map array */ + lpfc_cpu_map_array_init(phba); /* Configure and enable interrupt */ intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); if (intr_mode == LPFC_INTR_ERROR) { diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c index 6afcb1426e35724f48d61219e8776110204d2b70..e33f752318c198657b6d534c26f0c0fea883fe3f 100644 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c @@ -2010,8 +2010,9 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport, * is configured try it. */ ndlp->nlp_fc4_type |= NLP_FC4_FCP; - if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || - (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) { + if ((!(vport->fc_flag & FC_PT2PT_NO_NVME)) && + (vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH || + vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) { ndlp->nlp_fc4_type |= NLP_FC4_NVME; /* We need to update the localport also */ lpfc_nvme_update_localport(vport); diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 06a23718a7c7feb630addf1fd8e24cde908f18d8..a50f870c5f7250efc2106df130fc8cd509185f56 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -4629,12 +4629,6 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba) phba->fcf.fcf_flag = 0; spin_unlock_irq(&phba->hbalock); - /* SLI4 INTF 2: if FW dump is being taken skip INIT_PORT */ - if (phba->hba_flag & HBA_FW_DUMP_OP) { - phba->hba_flag &= ~HBA_FW_DUMP_OP; - return rc; - } - /* Now physically reset the device */ lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0389 Performing PCI function reset!\n"); @@ -7378,6 +7372,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) struct lpfc_vport *vport = phba->pport; struct lpfc_dmabuf *mp; struct lpfc_rqb *rqbp; + u32 flg; /* Perform a PCI function reset to start from clean */ rc = lpfc_pci_function_reset(phba); @@ -7391,7 +7386,17 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) else { spin_lock_irq(&phba->hbalock); phba->sli.sli_flag |= LPFC_SLI_ACTIVE; + flg = phba->sli.sli_flag; spin_unlock_irq(&phba->hbalock); + /* Allow a little time after setting SLI_ACTIVE for any polled + * MBX commands to complete via BSG. + */ + for (i = 0; i < 50 && (flg & LPFC_SLI_MBOX_ACTIVE); i++) { + msleep(20); + spin_lock_irq(&phba->hbalock); + flg = phba->sli.sli_flag; + spin_unlock_irq(&phba->hbalock); + } } lpfc_sli4_dip(phba); @@ -8928,7 +8933,7 @@ lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, "(%d):2541 Mailbox command x%x " "(x%x/x%x) failure: " "mqe_sta: x%x mcqe_sta: x%x/x%x " - "Data: x%x x%x\n,", + "Data: x%x x%x\n", mboxq->vport ? mboxq->vport->vpi : 0, mboxq->u.mb.mbxCommand, lpfc_sli_config_mbox_subsys_get(phba, @@ -8962,7 +8967,7 @@ lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, "(%d):2597 Sync Mailbox command " "x%x (x%x/x%x) failure: " "mqe_sta: x%x mcqe_sta: x%x/x%x " - "Data: x%x x%x\n,", + "Data: x%x x%x\n", mboxq->vport ? mboxq->vport->vpi : 0, mboxq->u.mb.mbxCommand, lpfc_sli_config_mbox_subsys_get(phba, @@ -12408,6 +12413,7 @@ lpfc_sli4_eratt_read(struct lpfc_hba *phba) uint32_t uerr_sta_hi, uerr_sta_lo; uint32_t if_type, portsmphr; struct lpfc_register portstat_reg; + u32 logmask; /* * For now, use the SLI4 device internal unrecoverable error @@ -12458,7 +12464,12 @@ lpfc_sli4_eratt_read(struct lpfc_hba *phba) readl(phba->sli4_hba.u.if_type2.ERR1regaddr); phba->work_status[1] = readl(phba->sli4_hba.u.if_type2.ERR2regaddr); - lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + logmask = LOG_TRACE_EVENT; + if (phba->work_status[0] == + SLIPORT_ERR1_REG_ERR_CODE_2 && + phba->work_status[1] == SLIPORT_ERR2_REG_FW_RESTART) + logmask = LOG_SLI; + lpfc_printf_log(phba, KERN_ERR, logmask, "2885 Port Status Event: " "port status reg 0x%x, " "port smphr reg 0x%x, " diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h index 6b8ec57e8bdfa206bfa5c22dd38588e2d05e850c..c088a848776efc4e96baa1009e25e8d2308fe8e3 100644 --- a/drivers/scsi/megaraid/megaraid_sas.h +++ b/drivers/scsi/megaraid/megaraid_sas.h @@ -2554,6 +2554,9 @@ struct megasas_instance_template { #define MEGASAS_IS_LOGICAL(sdev) \ ((sdev->channel < MEGASAS_MAX_PD_CHANNELS) ? 0 : 1) +#define MEGASAS_IS_LUN_VALID(sdev) \ + (((sdev)->lun == 0) ? 1 : 0) + #define MEGASAS_DEV_INDEX(scp) \ (((scp->device->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) + \ scp->device->id) diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 1c86b67476f09eef370c9a33fe50e206809944f5..718160ca66b3542545449182e4283706579af73d 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -2116,6 +2116,9 @@ static int megasas_slave_alloc(struct scsi_device *sdev) goto scan_target; } return -ENXIO; + } else if (!MEGASAS_IS_LUN_VALID(sdev)) { + sdev_printk(KERN_INFO, sdev, "%s: invalid LUN\n", __func__); + return -ENXIO; } scan_target: @@ -2146,6 +2149,10 @@ static void megasas_slave_destroy(struct scsi_device *sdev) instance = megasas_lookup_instance(sdev->host->host_no); if (MEGASAS_IS_LOGICAL(sdev)) { + if (!MEGASAS_IS_LUN_VALID(sdev)) { + sdev_printk(KERN_INFO, sdev, "%s: invalid LUN\n", __func__); + return; + } ld_tgt_id = MEGASAS_TARGET_ID(sdev); instance->ld_tgtid_status[ld_tgt_id] = LD_TARGET_ID_DELETED; if (megasas_dbg_lvl & LD_PD_DEBUG) diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c index 3fbbdf084d67a3f574b2de37c6394d3e3b2c4639..3153f164554aabaa6fbbed8d847b76316cc2b318 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c @@ -1832,9 +1832,10 @@ mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc, u8 poll) enable_irq(reply_q->os_irq); } } + + if (poll) + _base_process_reply_queue(reply_q); } - if (poll) - _base_process_reply_queue(reply_q); } /** diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c index b03c0f35d7b047208e2315d4a4c532ac6302b027..85ca8421fb862d0565d759ecd22b6584b0beba29 100644 --- a/drivers/scsi/mvsas/mv_init.c +++ b/drivers/scsi/mvsas/mv_init.c @@ -646,6 +646,7 @@ static struct pci_device_id mvs_pci_table[] = { { PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1300), chip_1300 }, { PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1320), chip_1320 }, { PCI_VDEVICE(ADAPTEC2, 0x0450), chip_6440 }, + { PCI_VDEVICE(TTI, 0x2640), chip_6440 }, { PCI_VDEVICE(TTI, 0x2710), chip_9480 }, { PCI_VDEVICE(TTI, 0x2720), chip_9480 }, { PCI_VDEVICE(TTI, 0x2721), chip_9480 }, @@ -697,7 +698,7 @@ static ssize_t mvs_show_driver_version(struct device *cdev, struct device_attribute *attr, char *buffer) { - return snprintf(buffer, PAGE_SIZE, "%s\n", DRV_VERSION); + return sysfs_emit(buffer, "%s\n", DRV_VERSION); } static DEVICE_ATTR(driver_version, @@ -749,7 +750,7 @@ mvs_store_interrupt_coalescing(struct device *cdev, static ssize_t mvs_show_interrupt_coalescing(struct device *cdev, struct device_attribute *attr, char *buffer) { - return snprintf(buffer, PAGE_SIZE, "%d\n", interrupt_coalescing); + return sysfs_emit(buffer, "%d\n", interrupt_coalescing); } static DEVICE_ATTR(interrupt_coalescing, diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c index 31d1ea5a5dd2bf321184a87ba590ebe95e23d8f1..1e52bc7febfab7a5b253c834a7418745e8022336 100644 --- a/drivers/scsi/mvsas/mv_sas.c +++ b/drivers/scsi/mvsas/mv_sas.c @@ -67,8 +67,10 @@ static struct mvs_info *mvs_find_dev_mvi(struct domain_device *dev) while (sha->sas_port[i]) { if (sha->sas_port[i] == dev->port) { + spin_lock(&sha->sas_port[i]->phy_list_lock); phy = container_of(sha->sas_port[i]->phy_list.next, struct asd_sas_phy, port_phy_el); + spin_unlock(&sha->sas_port[i]->phy_list_lock); j = 0; while (sha->sas_phy[j]) { if (sha->sas_phy[j] == phy) @@ -96,6 +98,8 @@ static int mvs_find_dev_phyno(struct domain_device *dev, int *phyno) while (sha->sas_port[i]) { if (sha->sas_port[i] == dev->port) { struct asd_sas_phy *phy; + + spin_lock(&sha->sas_port[i]->phy_list_lock); list_for_each_entry(phy, &sha->sas_port[i]->phy_list, port_phy_el) { j = 0; @@ -109,6 +113,7 @@ static int mvs_find_dev_phyno(struct domain_device *dev, int *phyno) num++; n++; } + spin_unlock(&sha->sas_port[i]->phy_list_lock); break; } i++; diff --git a/drivers/scsi/myrs.c b/drivers/scsi/myrs.c index 78c41bbf67562c03c514ff342568071041ed7ddd..e6a6678967e52e0b4a181d3510b8b56f5fa4ff1f 100644 --- a/drivers/scsi/myrs.c +++ b/drivers/scsi/myrs.c @@ -2272,7 +2272,8 @@ static void myrs_cleanup(struct myrs_hba *cs) myrs_unmap(cs); if (cs->mmio_base) { - cs->disable_intr(cs); + if (cs->disable_intr) + cs->disable_intr(cs); iounmap(cs->mmio_base); cs->mmio_base = NULL; } diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c index a7457a207d63f7c2aa7841539965f3e9253b9f7b..73e6f5d17ca720223176daf15a2f9bc4fd92ef58 100644 --- a/drivers/scsi/pm8001/pm8001_hwi.c +++ b/drivers/scsi/pm8001/pm8001_hwi.c @@ -1323,7 +1323,9 @@ int pm8001_mpi_build_cmd(struct pm8001_hba_info *pm8001_ha, int q_index = circularQ - pm8001_ha->inbnd_q_tbl; int rv = -1; - WARN_ON(q_index >= PM8001_MAX_INB_NUM); + if (WARN_ON(q_index >= pm8001_ha->max_q_num)) + return -EINVAL; + spin_lock_irqsave(&circularQ->iq_lock, flags); rv = pm8001_mpi_msg_free_get(circularQ, pm8001_ha->iomb_size, &pMessage); @@ -1709,7 +1711,6 @@ static void pm8001_send_abort_all(struct pm8001_hba_info *pm8001_ha, } task = sas_alloc_slow_task(GFP_ATOMIC); - if (!task) { pm8001_dbg(pm8001_ha, FAIL, "cannot allocate task\n"); return; @@ -1718,13 +1719,16 @@ static void pm8001_send_abort_all(struct pm8001_hba_info *pm8001_ha, task->task_done = pm8001_task_done; res = pm8001_tag_alloc(pm8001_ha, &ccb_tag); - if (res) + if (res) { + sas_free_task(task); return; + } ccb = &pm8001_ha->ccb_info[ccb_tag]; ccb->device = pm8001_ha_dev; ccb->ccb_tag = ccb_tag; ccb->task = task; + ccb->n_elem = 0; circularQ = &pm8001_ha->inbnd_q_tbl[0]; @@ -1735,8 +1739,10 @@ static void pm8001_send_abort_all(struct pm8001_hba_info *pm8001_ha, ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort, sizeof(task_abort), 0); - if (ret) + if (ret) { + sas_free_task(task); pm8001_tag_free(pm8001_ha, ccb_tag); + } } @@ -1786,6 +1792,7 @@ static void pm8001_send_read_log(struct pm8001_hba_info *pm8001_ha, ccb->device = pm8001_ha_dev; ccb->ccb_tag = ccb_tag; ccb->task = task; + ccb->n_elem = 0; pm8001_ha_dev->id |= NCQ_READ_LOG_FLAG; pm8001_ha_dev->id |= NCQ_2ND_RLE_FLAG; @@ -1802,7 +1809,7 @@ static void pm8001_send_read_log(struct pm8001_hba_info *pm8001_ha, sata_cmd.tag = cpu_to_le32(ccb_tag); sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id); - sata_cmd.ncqtag_atap_dir_m |= ((0x1 << 7) | (0x5 << 9)); + sata_cmd.ncqtag_atap_dir_m = cpu_to_le32((0x1 << 7) | (0x5 << 9)); memcpy(&sata_cmd.sata_fis, &fis, sizeof(struct host_to_dev_fis)); res = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd, @@ -2363,7 +2370,8 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) len = sizeof(struct pio_setup_fis); pm8001_dbg(pm8001_ha, IO, "PIO read len = %d\n", len); - } else if (t->ata_task.use_ncq) { + } else if (t->ata_task.use_ncq && + t->data_dir != DMA_NONE) { len = sizeof(struct set_dev_bits_fis); pm8001_dbg(pm8001_ha, IO, "FPDMA len = %d\n", len); @@ -3664,12 +3672,11 @@ int pm8001_mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) mb(); if (pm8001_dev->id & NCQ_ABORT_ALL_FLAG) { - pm8001_tag_free(pm8001_ha, tag); sas_free_task(t); - /* clear the flag */ - pm8001_dev->id &= 0xBFFFFFFF; - } else + pm8001_dev->id &= ~NCQ_ABORT_ALL_FLAG; + } else { t->task_done(t); + } return 0; } @@ -4232,22 +4239,22 @@ static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha, u32 opc = OPC_INB_SATA_HOST_OPSTART; memset(&sata_cmd, 0, sizeof(sata_cmd)); circularQ = &pm8001_ha->inbnd_q_tbl[0]; - if (task->data_dir == DMA_NONE) { + + if (task->data_dir == DMA_NONE && !task->ata_task.use_ncq) { ATAP = 0x04; /* no data*/ pm8001_dbg(pm8001_ha, IO, "no data\n"); } else if (likely(!task->ata_task.device_control_reg_update)) { - if (task->ata_task.dma_xfer) { + if (task->ata_task.use_ncq && + dev->sata_dev.class != ATA_DEV_ATAPI) { + ATAP = 0x07; /* FPDMA */ + pm8001_dbg(pm8001_ha, IO, "FPDMA\n"); + } else if (task->ata_task.dma_xfer) { ATAP = 0x06; /* DMA */ pm8001_dbg(pm8001_ha, IO, "DMA\n"); } else { ATAP = 0x05; /* PIO*/ pm8001_dbg(pm8001_ha, IO, "PIO\n"); } - if (task->ata_task.use_ncq && - dev->sata_dev.class != ATA_DEV_ATAPI) { - ATAP = 0x07; /* FPDMA */ - pm8001_dbg(pm8001_ha, IO, "FPDMA\n"); - } } if (task->ata_task.use_ncq && pm8001_get_ncq_tag(task, &hdr_tag)) { task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3); @@ -4437,6 +4444,9 @@ static int pm8001_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha, SAS_ADDR_SIZE); rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, sizeof(payload), 0); + if (rc) + pm8001_tag_free(pm8001_ha, tag); + return rc; } @@ -4587,7 +4597,7 @@ int pm8001_chip_ssp_tm_req(struct pm8001_hba_info *pm8001_ha, memcpy(sspTMCmd.lun, task->ssp_task.LUN, 8); sspTMCmd.tag = cpu_to_le32(ccb->ccb_tag); if (pm8001_ha->chip_id != chip_8001) - sspTMCmd.ds_ads_m = 0x08; + sspTMCmd.ds_ads_m = cpu_to_le32(0x08); circularQ = &pm8001_ha->inbnd_q_tbl[0]; ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sspTMCmd, sizeof(sspTMCmd), 0); @@ -4849,6 +4859,11 @@ pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha, ccb->ccb_tag = tag; rc = pm8001_chip_fw_flash_update_build(pm8001_ha, &flash_update_info, tag); + if (rc) { + kfree(fw_control_context); + pm8001_tag_free(pm8001_ha, tag); + } + return rc; } @@ -4953,6 +4968,9 @@ pm8001_chip_set_dev_state_req(struct pm8001_hba_info *pm8001_ha, payload.nds = cpu_to_le32(state); rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, sizeof(payload), 0); + if (rc) + pm8001_tag_free(pm8001_ha, tag); + return rc; } diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c index 56cefad0b23629bcd3296aec1093aaea9d991abb..f7706139687135c037b9bc483caac047105c1f16 100644 --- a/drivers/scsi/pm8001/pm8001_sas.c +++ b/drivers/scsi/pm8001/pm8001_sas.c @@ -753,8 +753,13 @@ static int pm8001_exec_internal_tmf_task(struct domain_device *dev, res = -TMF_RESP_FUNC_FAILED; /* Even TMF timed out, return direct. */ if (task->task_state_flags & SAS_TASK_STATE_ABORTED) { + struct pm8001_ccb_info *ccb = task->lldd_task; + pm8001_dbg(pm8001_ha, FAIL, "TMF task[%x]timeout.\n", tmf->tmf); + + if (ccb) + ccb->task = NULL; goto ex_err; } @@ -826,10 +831,10 @@ pm8001_exec_internal_task_abort(struct pm8001_hba_info *pm8001_ha, res = PM8001_CHIP_DISP->task_abort(pm8001_ha, pm8001_dev, flag, task_tag, ccb_tag); - if (res) { del_timer(&task->slow_task->timer); pm8001_dbg(pm8001_ha, FAIL, "Executing internal task failed\n"); + pm8001_tag_free(pm8001_ha, ccb_tag); goto ex_err; } wait_for_completion(&task->slow_task->completion); diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c index 45a8cfccf79da21272288df4ccf6a115fa6f7768..51345f02383d003160564f7dc4901d8b11844018 100644 --- a/drivers/scsi/pm8001/pm80xx_hwi.c +++ b/drivers/scsi/pm8001/pm80xx_hwi.c @@ -66,18 +66,16 @@ int pm80xx_bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shift_value) } static void pm80xx_pci_mem_copy(struct pm8001_hba_info *pm8001_ha, u32 soffset, - const void *destination, + __le32 *destination, u32 dw_count, u32 bus_base_number) { u32 index, value, offset; - u32 *destination1; - destination1 = (u32 *)destination; - for (index = 0; index < dw_count; index += 4, destination1++) { + for (index = 0; index < dw_count; index += 4, destination++) { offset = (soffset + index); if (offset < (64 * 1024)) { value = pm8001_cr32(pm8001_ha, bus_base_number, offset); - *destination1 = cpu_to_le32(value); + *destination = cpu_to_le32(value); } } return; @@ -767,6 +765,10 @@ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha) pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_severity = 0x01; pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt = 0x01; + /* Enable higher IQs and OQs, 32 to 63, bit 16 */ + if (pm8001_ha->max_q_num > 32) + pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt |= + 1 << 16; /* Disable end to end CRC checking */ pm8001_ha->main_cfg_tbl.pm80xx_tbl.crc_core_dump = (0x1 << 16); @@ -1026,6 +1028,13 @@ static int mpi_init_check(struct pm8001_hba_info *pm8001_ha) if (0x0000 != gst_len_mpistate) return -EBUSY; + /* + * As per controller datasheet, after successful MPI + * initialization minimum 500ms delay is required before + * issuing commands. + */ + msleep(500); + return 0; } @@ -1199,9 +1208,11 @@ pm80xx_set_thermal_config(struct pm8001_hba_info *pm8001_ha) else page_code = THERMAL_PAGE_CODE_8H; - payload.cfg_pg[0] = (THERMAL_LOG_ENABLE << 9) | - (THERMAL_ENABLE << 8) | page_code; - payload.cfg_pg[1] = (LTEMPHIL << 24) | (RTEMPHIL << 8); + payload.cfg_pg[0] = + cpu_to_le32((THERMAL_LOG_ENABLE << 9) | + (THERMAL_ENABLE << 8) | page_code); + payload.cfg_pg[1] = + cpu_to_le32((LTEMPHIL << 24) | (RTEMPHIL << 8)); pm8001_dbg(pm8001_ha, DEV, "Setting up thermal config. cfg_pg 0 0x%x cfg_pg 1 0x%x\n", @@ -1241,43 +1252,41 @@ pm80xx_set_sas_protocol_timer_config(struct pm8001_hba_info *pm8001_ha) circularQ = &pm8001_ha->inbnd_q_tbl[0]; payload.tag = cpu_to_le32(tag); - SASConfigPage.pageCode = SAS_PROTOCOL_TIMER_CONFIG_PAGE; - SASConfigPage.MST_MSI = 3 << 15; - SASConfigPage.STP_SSP_MCT_TMO = (STP_MCT_TMO << 16) | SSP_MCT_TMO; - SASConfigPage.STP_FRM_TMO = (SAS_MAX_OPEN_TIME << 24) | - (SMP_MAX_CONN_TIMER << 16) | STP_FRM_TIMER; - SASConfigPage.STP_IDLE_TMO = STP_IDLE_TIME; - - if (SASConfigPage.STP_IDLE_TMO > 0x3FFFFFF) - SASConfigPage.STP_IDLE_TMO = 0x3FFFFFF; - - - SASConfigPage.OPNRJT_RTRY_INTVL = (SAS_MFD << 16) | - SAS_OPNRJT_RTRY_INTVL; - SASConfigPage.Data_Cmd_OPNRJT_RTRY_TMO = (SAS_DOPNRJT_RTRY_TMO << 16) - | SAS_COPNRJT_RTRY_TMO; - SASConfigPage.Data_Cmd_OPNRJT_RTRY_THR = (SAS_DOPNRJT_RTRY_THR << 16) - | SAS_COPNRJT_RTRY_THR; - SASConfigPage.MAX_AIP = SAS_MAX_AIP; + SASConfigPage.pageCode = cpu_to_le32(SAS_PROTOCOL_TIMER_CONFIG_PAGE); + SASConfigPage.MST_MSI = cpu_to_le32(3 << 15); + SASConfigPage.STP_SSP_MCT_TMO = + cpu_to_le32((STP_MCT_TMO << 16) | SSP_MCT_TMO); + SASConfigPage.STP_FRM_TMO = + cpu_to_le32((SAS_MAX_OPEN_TIME << 24) | + (SMP_MAX_CONN_TIMER << 16) | STP_FRM_TIMER); + SASConfigPage.STP_IDLE_TMO = cpu_to_le32(STP_IDLE_TIME); + + SASConfigPage.OPNRJT_RTRY_INTVL = + cpu_to_le32((SAS_MFD << 16) | SAS_OPNRJT_RTRY_INTVL); + SASConfigPage.Data_Cmd_OPNRJT_RTRY_TMO = + cpu_to_le32((SAS_DOPNRJT_RTRY_TMO << 16) | SAS_COPNRJT_RTRY_TMO); + SASConfigPage.Data_Cmd_OPNRJT_RTRY_THR = + cpu_to_le32((SAS_DOPNRJT_RTRY_THR << 16) | SAS_COPNRJT_RTRY_THR); + SASConfigPage.MAX_AIP = cpu_to_le32(SAS_MAX_AIP); pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.pageCode 0x%08x\n", - SASConfigPage.pageCode); + le32_to_cpu(SASConfigPage.pageCode)); pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.MST_MSI 0x%08x\n", - SASConfigPage.MST_MSI); + le32_to_cpu(SASConfigPage.MST_MSI)); pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.STP_SSP_MCT_TMO 0x%08x\n", - SASConfigPage.STP_SSP_MCT_TMO); + le32_to_cpu(SASConfigPage.STP_SSP_MCT_TMO)); pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.STP_FRM_TMO 0x%08x\n", - SASConfigPage.STP_FRM_TMO); + le32_to_cpu(SASConfigPage.STP_FRM_TMO)); pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.STP_IDLE_TMO 0x%08x\n", - SASConfigPage.STP_IDLE_TMO); + le32_to_cpu(SASConfigPage.STP_IDLE_TMO)); pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.OPNRJT_RTRY_INTVL 0x%08x\n", - SASConfigPage.OPNRJT_RTRY_INTVL); + le32_to_cpu(SASConfigPage.OPNRJT_RTRY_INTVL)); pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.Data_Cmd_OPNRJT_RTRY_TMO 0x%08x\n", - SASConfigPage.Data_Cmd_OPNRJT_RTRY_TMO); + le32_to_cpu(SASConfigPage.Data_Cmd_OPNRJT_RTRY_TMO)); pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.Data_Cmd_OPNRJT_RTRY_THR 0x%08x\n", - SASConfigPage.Data_Cmd_OPNRJT_RTRY_THR); + le32_to_cpu(SASConfigPage.Data_Cmd_OPNRJT_RTRY_THR)); pm8001_dbg(pm8001_ha, INIT, "SASConfigPage.MAX_AIP 0x%08x\n", - SASConfigPage.MAX_AIP); + le32_to_cpu(SASConfigPage.MAX_AIP)); memcpy(&payload.cfg_pg, &SASConfigPage, sizeof(SASProtocolTimerConfig_t)); @@ -1403,12 +1412,13 @@ static int pm80xx_encrypt_update(struct pm8001_hba_info *pm8001_ha) /* Currently only one key is used. New KEK index is 1. * Current KEK index is 1. Store KEK to NVRAM is 1. */ - payload.new_curidx_ksop = ((1 << 24) | (1 << 16) | (1 << 8) | - KEK_MGMT_SUBOP_KEYCARDUPDATE); + payload.new_curidx_ksop = + cpu_to_le32(((1 << 24) | (1 << 16) | (1 << 8) | + KEK_MGMT_SUBOP_KEYCARDUPDATE)); pm8001_dbg(pm8001_ha, DEV, "Saving Encryption info to flash. payload 0x%x\n", - payload.new_curidx_ksop); + le32_to_cpu(payload.new_curidx_ksop)); rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, sizeof(payload), 0); @@ -1683,10 +1693,11 @@ static void pm80xx_chip_interrupt_enable(struct pm8001_hba_info *pm8001_ha, u8 vec) { #ifdef PM8001_USE_MSIX - u32 mask; - mask = (u32)(1 << vec); - - pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_CLR, (u32)(mask & 0xFFFFFFFF)); + if (vec < 32) + pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_CLR, 1U << vec); + else + pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_CLR_U, + 1U << (vec - 32)); return; #endif pm80xx_chip_intx_interrupt_enable(pm8001_ha); @@ -1702,12 +1713,15 @@ static void pm80xx_chip_interrupt_disable(struct pm8001_hba_info *pm8001_ha, u8 vec) { #ifdef PM8001_USE_MSIX - u32 mask; - if (vec == 0xFF) - mask = 0xFFFFFFFF; + if (vec == 0xFF) { + /* disable all vectors 0-31, 32-63 */ + pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, 0xFFFFFFFF); + pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_U, 0xFFFFFFFF); + } else if (vec < 32) + pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, 1U << vec); else - mask = (u32)(1 << vec); - pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, (u32)(mask & 0xFFFFFFFF)); + pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_U, + 1U << (vec - 32)); return; #endif pm80xx_chip_intx_interrupt_disable(pm8001_ha); @@ -1749,6 +1763,7 @@ static void pm80xx_send_abort_all(struct pm8001_hba_info *pm8001_ha, ccb->device = pm8001_ha_dev; ccb->ccb_tag = ccb_tag; ccb->task = task; + ccb->n_elem = 0; circularQ = &pm8001_ha->inbnd_q_tbl[0]; @@ -1830,7 +1845,7 @@ static void pm80xx_send_read_log(struct pm8001_hba_info *pm8001_ha, sata_cmd.tag = cpu_to_le32(ccb_tag); sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id); - sata_cmd.ncqtag_atap_dir_m_dad |= ((0x1 << 7) | (0x5 << 9)); + sata_cmd.ncqtag_atap_dir_m_dad = cpu_to_le32(((0x1 << 7) | (0x5 << 9))); memcpy(&sata_cmd.sata_fis, &fis, sizeof(struct host_to_dev_fis)); res = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd, @@ -2133,9 +2148,9 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb) pm8001_dbg(pm8001_ha, FAIL, "task 0x%p done with io_status 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n", t, status, ts->resp, ts->stat); + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); if (t->slow_task) complete(&t->slow_task->completion); - pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); } else { spin_unlock_irqrestore(&t->task_state_lock, flags); pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); @@ -2464,7 +2479,8 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) len = sizeof(struct pio_setup_fis); pm8001_dbg(pm8001_ha, IO, "PIO read len = %d\n", len); - } else if (t->ata_task.use_ncq) { + } else if (t->ata_task.use_ncq && + t->data_dir != DMA_NONE) { len = sizeof(struct set_dev_bits_fis); pm8001_dbg(pm8001_ha, IO, "FPDMA len = %d\n", len); @@ -2726,9 +2742,9 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) pm8001_dbg(pm8001_ha, FAIL, "task 0x%p done with io_status 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n", t, status, ts->resp, ts->stat); + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); if (t->slow_task) complete(&t->slow_task->completion); - pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); } else { spin_unlock_irqrestore(&t->task_state_lock, flags); pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag); @@ -4066,10 +4082,22 @@ static int process_oq(struct pm8001_hba_info *pm8001_ha, u8 vec) unsigned long flags; u32 regval; + /* + * Fatal errors are programmed to be signalled in irq vector + * pm8001_ha->max_q_num - 1 through pm8001_ha->main_cfg_tbl.pm80xx_tbl. + * fatal_err_interrupt + */ if (vec == (pm8001_ha->max_q_num - 1)) { + u32 mipsall_ready; + + if (pm8001_ha->chip_id == chip_8008 || + pm8001_ha->chip_id == chip_8009) + mipsall_ready = SCRATCH_PAD_MIPSALL_READY_8PORT; + else + mipsall_ready = SCRATCH_PAD_MIPSALL_READY_16PORT; + regval = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1); - if ((regval & SCRATCH_PAD_MIPSALL_READY) != - SCRATCH_PAD_MIPSALL_READY) { + if ((regval & mipsall_ready) != mipsall_ready) { pm8001_ha->controller_fatal_error = true; pm8001_dbg(pm8001_ha, FAIL, "Firmware Fatal error! Regval:0x%x\n", @@ -4304,13 +4332,15 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha, struct ssp_ini_io_start_req ssp_cmd; u32 tag = ccb->ccb_tag; int ret; - u64 phys_addr, start_addr, end_addr; + u64 phys_addr, end_addr; u32 end_addr_high, end_addr_low; struct inbound_queue_table *circularQ; u32 q_index, cpu_id; u32 opc = OPC_INB_SSPINIIOSTART; + memset(&ssp_cmd, 0, sizeof(ssp_cmd)); memcpy(ssp_cmd.ssp_iu.lun, task->ssp_task.LUN, 8); + /* data address domain added for spcv; set to 0 by host, * used internally by controller * 0 for SAS 1.1 and SAS 2.0 compatible TLR @@ -4321,7 +4351,7 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha, ssp_cmd.device_id = cpu_to_le32(pm8001_dev->device_id); ssp_cmd.tag = cpu_to_le32(tag); if (task->ssp_task.enable_first_burst) - ssp_cmd.ssp_iu.efb_prio_attr |= 0x80; + ssp_cmd.ssp_iu.efb_prio_attr = 0x80; ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_prio << 3); ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_attr & 7); memcpy(ssp_cmd.ssp_iu.cdb, task->ssp_task.cmd->cmnd, @@ -4353,21 +4383,24 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha, ssp_cmd.enc_esgl = cpu_to_le32(1<<31); } else if (task->num_scatter == 1) { u64 dma_addr = sg_dma_address(task->scatter); + ssp_cmd.enc_addr_low = cpu_to_le32(lower_32_bits(dma_addr)); ssp_cmd.enc_addr_high = cpu_to_le32(upper_32_bits(dma_addr)); ssp_cmd.enc_len = cpu_to_le32(task->total_xfer_len); ssp_cmd.enc_esgl = 0; + /* Check 4G Boundary */ - start_addr = cpu_to_le64(dma_addr); - end_addr = (start_addr + ssp_cmd.enc_len) - 1; - end_addr_low = cpu_to_le32(lower_32_bits(end_addr)); - end_addr_high = cpu_to_le32(upper_32_bits(end_addr)); - if (end_addr_high != ssp_cmd.enc_addr_high) { + end_addr = dma_addr + le32_to_cpu(ssp_cmd.enc_len) - 1; + end_addr_low = lower_32_bits(end_addr); + end_addr_high = upper_32_bits(end_addr); + + if (end_addr_high != le32_to_cpu(ssp_cmd.enc_addr_high)) { pm8001_dbg(pm8001_ha, FAIL, "The sg list address start_addr=0x%016llx data_len=0x%x end_addr_high=0x%08x end_addr_low=0x%08x has crossed 4G boundary\n", - start_addr, ssp_cmd.enc_len, + dma_addr, + le32_to_cpu(ssp_cmd.enc_len), end_addr_high, end_addr_low); pm8001_chip_make_sg(task->scatter, 1, ccb->buf_prd); @@ -4376,7 +4409,7 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha, cpu_to_le32(lower_32_bits(phys_addr)); ssp_cmd.enc_addr_high = cpu_to_le32(upper_32_bits(phys_addr)); - ssp_cmd.enc_esgl = cpu_to_le32(1<<31); + ssp_cmd.enc_esgl = cpu_to_le32(1U<<31); } } else if (task->num_scatter == 0) { ssp_cmd.enc_addr_low = 0; @@ -4384,8 +4417,10 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha, ssp_cmd.enc_len = cpu_to_le32(task->total_xfer_len); ssp_cmd.enc_esgl = 0; } + /* XTS mode. All other fields are 0 */ - ssp_cmd.key_cmode = 0x6 << 4; + ssp_cmd.key_cmode = cpu_to_le32(0x6 << 4); + /* set tweak values. Should be the start lba */ ssp_cmd.twk_val0 = cpu_to_le32((task->ssp_task.cmd->cmnd[2] << 24) | (task->ssp_task.cmd->cmnd[3] << 16) | @@ -4407,20 +4442,22 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha, ssp_cmd.esgl = cpu_to_le32(1<<31); } else if (task->num_scatter == 1) { u64 dma_addr = sg_dma_address(task->scatter); + ssp_cmd.addr_low = cpu_to_le32(lower_32_bits(dma_addr)); ssp_cmd.addr_high = cpu_to_le32(upper_32_bits(dma_addr)); ssp_cmd.len = cpu_to_le32(task->total_xfer_len); ssp_cmd.esgl = 0; + /* Check 4G Boundary */ - start_addr = cpu_to_le64(dma_addr); - end_addr = (start_addr + ssp_cmd.len) - 1; - end_addr_low = cpu_to_le32(lower_32_bits(end_addr)); - end_addr_high = cpu_to_le32(upper_32_bits(end_addr)); - if (end_addr_high != ssp_cmd.addr_high) { + end_addr = dma_addr + le32_to_cpu(ssp_cmd.len) - 1; + end_addr_low = lower_32_bits(end_addr); + end_addr_high = upper_32_bits(end_addr); + if (end_addr_high != le32_to_cpu(ssp_cmd.addr_high)) { pm8001_dbg(pm8001_ha, FAIL, "The sg list address start_addr=0x%016llx data_len=0x%x end_addr_high=0x%08x end_addr_low=0x%08x has crossed 4G boundary\n", - start_addr, ssp_cmd.len, + dma_addr, + le32_to_cpu(ssp_cmd.len), end_addr_high, end_addr_low); pm8001_chip_make_sg(task->scatter, 1, ccb->buf_prd); @@ -4454,7 +4491,7 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha, u32 q_index, cpu_id; struct sata_start_req sata_cmd; u32 hdr_tag, ncg_tag = 0; - u64 phys_addr, start_addr, end_addr; + u64 phys_addr, end_addr; u32 end_addr_high, end_addr_low; u32 ATAP = 0x0; u32 dir; @@ -4466,22 +4503,21 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha, q_index = (u32) (cpu_id) % (pm8001_ha->max_q_num); circularQ = &pm8001_ha->inbnd_q_tbl[q_index]; - if (task->data_dir == DMA_NONE) { + if (task->data_dir == DMA_NONE && !task->ata_task.use_ncq) { ATAP = 0x04; /* no data*/ pm8001_dbg(pm8001_ha, IO, "no data\n"); } else if (likely(!task->ata_task.device_control_reg_update)) { - if (task->ata_task.dma_xfer) { + if (task->ata_task.use_ncq && + dev->sata_dev.class != ATA_DEV_ATAPI) { + ATAP = 0x07; /* FPDMA */ + pm8001_dbg(pm8001_ha, IO, "FPDMA\n"); + } else if (task->ata_task.dma_xfer) { ATAP = 0x06; /* DMA */ pm8001_dbg(pm8001_ha, IO, "DMA\n"); } else { ATAP = 0x05; /* PIO*/ pm8001_dbg(pm8001_ha, IO, "PIO\n"); } - if (task->ata_task.use_ncq && - dev->sata_dev.class != ATA_DEV_ATAPI) { - ATAP = 0x07; /* FPDMA */ - pm8001_dbg(pm8001_ha, IO, "FPDMA\n"); - } } if (task->ata_task.use_ncq && pm8001_get_ncq_tag(task, &hdr_tag)) { task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3); @@ -4515,32 +4551,38 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha, pm8001_chip_make_sg(task->scatter, ccb->n_elem, ccb->buf_prd); phys_addr = ccb->ccb_dma_handle; - sata_cmd.enc_addr_low = lower_32_bits(phys_addr); - sata_cmd.enc_addr_high = upper_32_bits(phys_addr); + sata_cmd.enc_addr_low = + cpu_to_le32(lower_32_bits(phys_addr)); + sata_cmd.enc_addr_high = + cpu_to_le32(upper_32_bits(phys_addr)); sata_cmd.enc_esgl = cpu_to_le32(1 << 31); } else if (task->num_scatter == 1) { u64 dma_addr = sg_dma_address(task->scatter); - sata_cmd.enc_addr_low = lower_32_bits(dma_addr); - sata_cmd.enc_addr_high = upper_32_bits(dma_addr); + + sata_cmd.enc_addr_low = + cpu_to_le32(lower_32_bits(dma_addr)); + sata_cmd.enc_addr_high = + cpu_to_le32(upper_32_bits(dma_addr)); sata_cmd.enc_len = cpu_to_le32(task->total_xfer_len); sata_cmd.enc_esgl = 0; + /* Check 4G Boundary */ - start_addr = cpu_to_le64(dma_addr); - end_addr = (start_addr + sata_cmd.enc_len) - 1; - end_addr_low = cpu_to_le32(lower_32_bits(end_addr)); - end_addr_high = cpu_to_le32(upper_32_bits(end_addr)); - if (end_addr_high != sata_cmd.enc_addr_high) { + end_addr = dma_addr + le32_to_cpu(sata_cmd.enc_len) - 1; + end_addr_low = lower_32_bits(end_addr); + end_addr_high = upper_32_bits(end_addr); + if (end_addr_high != le32_to_cpu(sata_cmd.enc_addr_high)) { pm8001_dbg(pm8001_ha, FAIL, "The sg list address start_addr=0x%016llx data_len=0x%x end_addr_high=0x%08x end_addr_low=0x%08x has crossed 4G boundary\n", - start_addr, sata_cmd.enc_len, + dma_addr, + le32_to_cpu(sata_cmd.enc_len), end_addr_high, end_addr_low); pm8001_chip_make_sg(task->scatter, 1, ccb->buf_prd); phys_addr = ccb->ccb_dma_handle; sata_cmd.enc_addr_low = - lower_32_bits(phys_addr); + cpu_to_le32(lower_32_bits(phys_addr)); sata_cmd.enc_addr_high = - upper_32_bits(phys_addr); + cpu_to_le32(upper_32_bits(phys_addr)); sata_cmd.enc_esgl = cpu_to_le32(1 << 31); } @@ -4551,7 +4593,8 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha, sata_cmd.enc_esgl = 0; } /* XTS mode. All other fields are 0 */ - sata_cmd.key_index_mode = 0x6 << 4; + sata_cmd.key_index_mode = cpu_to_le32(0x6 << 4); + /* set tweak values. Should be the start lba */ sata_cmd.twk_val0 = cpu_to_le32((sata_cmd.sata_fis.lbal_exp << 24) | @@ -4577,31 +4620,31 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha, phys_addr = ccb->ccb_dma_handle; sata_cmd.addr_low = lower_32_bits(phys_addr); sata_cmd.addr_high = upper_32_bits(phys_addr); - sata_cmd.esgl = cpu_to_le32(1 << 31); + sata_cmd.esgl = cpu_to_le32(1U << 31); } else if (task->num_scatter == 1) { u64 dma_addr = sg_dma_address(task->scatter); + sata_cmd.addr_low = lower_32_bits(dma_addr); sata_cmd.addr_high = upper_32_bits(dma_addr); sata_cmd.len = cpu_to_le32(task->total_xfer_len); sata_cmd.esgl = 0; + /* Check 4G Boundary */ - start_addr = cpu_to_le64(dma_addr); - end_addr = (start_addr + sata_cmd.len) - 1; - end_addr_low = cpu_to_le32(lower_32_bits(end_addr)); - end_addr_high = cpu_to_le32(upper_32_bits(end_addr)); + end_addr = dma_addr + le32_to_cpu(sata_cmd.len) - 1; + end_addr_low = lower_32_bits(end_addr); + end_addr_high = upper_32_bits(end_addr); if (end_addr_high != sata_cmd.addr_high) { pm8001_dbg(pm8001_ha, FAIL, "The sg list address start_addr=0x%016llx data_len=0x%xend_addr_high=0x%08x end_addr_low=0x%08x has crossed 4G boundary\n", - start_addr, sata_cmd.len, + dma_addr, + le32_to_cpu(sata_cmd.len), end_addr_high, end_addr_low); pm8001_chip_make_sg(task->scatter, 1, ccb->buf_prd); phys_addr = ccb->ccb_dma_handle; - sata_cmd.addr_low = - lower_32_bits(phys_addr); - sata_cmd.addr_high = - upper_32_bits(phys_addr); - sata_cmd.esgl = cpu_to_le32(1 << 31); + sata_cmd.addr_low = lower_32_bits(phys_addr); + sata_cmd.addr_high = upper_32_bits(phys_addr); + sata_cmd.esgl = cpu_to_le32(1U << 31); } } else if (task->num_scatter == 0) { sata_cmd.addr_low = 0; @@ -4609,27 +4652,28 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha, sata_cmd.len = cpu_to_le32(task->total_xfer_len); sata_cmd.esgl = 0; } + /* scsi cdb */ sata_cmd.atapi_scsi_cdb[0] = cpu_to_le32(((task->ata_task.atapi_packet[0]) | - (task->ata_task.atapi_packet[1] << 8) | - (task->ata_task.atapi_packet[2] << 16) | - (task->ata_task.atapi_packet[3] << 24))); + (task->ata_task.atapi_packet[1] << 8) | + (task->ata_task.atapi_packet[2] << 16) | + (task->ata_task.atapi_packet[3] << 24))); sata_cmd.atapi_scsi_cdb[1] = cpu_to_le32(((task->ata_task.atapi_packet[4]) | - (task->ata_task.atapi_packet[5] << 8) | - (task->ata_task.atapi_packet[6] << 16) | - (task->ata_task.atapi_packet[7] << 24))); + (task->ata_task.atapi_packet[5] << 8) | + (task->ata_task.atapi_packet[6] << 16) | + (task->ata_task.atapi_packet[7] << 24))); sata_cmd.atapi_scsi_cdb[2] = cpu_to_le32(((task->ata_task.atapi_packet[8]) | - (task->ata_task.atapi_packet[9] << 8) | - (task->ata_task.atapi_packet[10] << 16) | - (task->ata_task.atapi_packet[11] << 24))); + (task->ata_task.atapi_packet[9] << 8) | + (task->ata_task.atapi_packet[10] << 16) | + (task->ata_task.atapi_packet[11] << 24))); sata_cmd.atapi_scsi_cdb[3] = cpu_to_le32(((task->ata_task.atapi_packet[12]) | - (task->ata_task.atapi_packet[13] << 8) | - (task->ata_task.atapi_packet[14] << 16) | - (task->ata_task.atapi_packet[15] << 24))); + (task->ata_task.atapi_packet[13] << 8) | + (task->ata_task.atapi_packet[14] << 16) | + (task->ata_task.atapi_packet[15] << 24))); } /* Check for read log for failed drive and return */ @@ -4827,8 +4871,13 @@ static int pm80xx_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha, payload.tag = cpu_to_le32(tag); payload.phyop_phyid = cpu_to_le32(((phy_op & 0xFF) << 8) | (phyId & 0xFF)); - return pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, - sizeof(payload), 0); + + rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, + sizeof(payload), 0); + if (rc) + pm8001_tag_free(pm8001_ha, tag); + + return rc; } static u32 pm80xx_chip_is_our_interrupt(struct pm8001_hba_info *pm8001_ha) diff --git a/drivers/scsi/pm8001/pm80xx_hwi.h b/drivers/scsi/pm8001/pm80xx_hwi.h index 701951a0f715bb18f943df9b3771d875a330eb24..0dfe9034f7e7f69b58bee99d0c3294de3ca2a0f2 100644 --- a/drivers/scsi/pm8001/pm80xx_hwi.h +++ b/drivers/scsi/pm8001/pm80xx_hwi.h @@ -1391,8 +1391,12 @@ typedef struct SASProtocolTimerConfig SASProtocolTimerConfig_t; #define SCRATCH_PAD_BOOT_LOAD_SUCCESS 0x0 #define SCRATCH_PAD_IOP0_READY 0xC00 #define SCRATCH_PAD_IOP1_READY 0x3000 -#define SCRATCH_PAD_MIPSALL_READY (SCRATCH_PAD_IOP1_READY | \ +#define SCRATCH_PAD_MIPSALL_READY_16PORT (SCRATCH_PAD_IOP1_READY | \ SCRATCH_PAD_IOP0_READY | \ + SCRATCH_PAD_ILA_READY | \ + SCRATCH_PAD_RAAE_READY) +#define SCRATCH_PAD_MIPSALL_READY_8PORT (SCRATCH_PAD_IOP0_READY | \ + SCRATCH_PAD_ILA_READY | \ SCRATCH_PAD_RAAE_READY) /* boot loader state */ diff --git a/drivers/scsi/qedf/qedf_io.c b/drivers/scsi/qedf/qedf_io.c index 63f99f4eeed972bb8d149d599050bc876fde7db1..472374d83cede79f44e41c65d9511ec3c758820b 100644 --- a/drivers/scsi/qedf/qedf_io.c +++ b/drivers/scsi/qedf/qedf_io.c @@ -2268,6 +2268,7 @@ int qedf_initiate_cleanup(struct qedf_ioreq *io_req, io_req->tm_flags == FCP_TMF_TGT_RESET) { clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags); io_req->sc_cmd = NULL; + kref_put(&io_req->refcount, qedf_release_cmd); complete(&io_req->tm_done); } diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c index c63dcc39f76c2ae5df87e67f7b847f490c81bce8..e64457f53da86da662c9b7ee4b3ccfa844b04c70 100644 --- a/drivers/scsi/qedf/qedf_main.c +++ b/drivers/scsi/qedf/qedf_main.c @@ -1859,6 +1859,7 @@ static int qedf_vport_create(struct fc_vport *vport, bool disabled) vport_qedf->cmd_mgr = base_qedf->cmd_mgr; init_completion(&vport_qedf->flogi_compl); INIT_LIST_HEAD(&vport_qedf->fcports); + INIT_DELAYED_WORK(&vport_qedf->stag_work, qedf_stag_change_work); rc = qedf_vport_libfc_config(vport, vn_port); if (rc) { diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c index f51723e2d5227d30646beb6cb057621f76a90856..ba9a22e55e32fdbe9f74988b2b581739578cf9ca 100644 --- a/drivers/scsi/qedi/qedi_iscsi.c +++ b/drivers/scsi/qedi/qedi_iscsi.c @@ -387,6 +387,7 @@ static int qedi_conn_bind(struct iscsi_cls_session *cls_session, struct qedi_ctx *qedi = iscsi_host_priv(shost); struct qedi_endpoint *qedi_ep; struct iscsi_endpoint *ep; + int rc = 0; ep = iscsi_lookup_endpoint(transport_fd); if (!ep) @@ -394,11 +395,16 @@ static int qedi_conn_bind(struct iscsi_cls_session *cls_session, qedi_ep = ep->dd_data; if ((qedi_ep->state == EP_STATE_TCP_FIN_RCVD) || - (qedi_ep->state == EP_STATE_TCP_RST_RCVD)) - return -EINVAL; + (qedi_ep->state == EP_STATE_TCP_RST_RCVD)) { + rc = -EINVAL; + goto put_ep; + } + + if (iscsi_conn_bind(cls_session, cls_conn, is_leading)) { + rc = -EINVAL; + goto put_ep; + } - if (iscsi_conn_bind(cls_session, cls_conn, is_leading)) - return -EINVAL; qedi_ep->conn = qedi_conn; qedi_conn->ep = qedi_ep; @@ -408,13 +414,18 @@ static int qedi_conn_bind(struct iscsi_cls_session *cls_session, qedi_conn->cmd_cleanup_req = 0; qedi_conn->cmd_cleanup_cmpl = 0; - if (qedi_bind_conn_to_iscsi_cid(qedi, qedi_conn)) - return -EINVAL; + if (qedi_bind_conn_to_iscsi_cid(qedi, qedi_conn)) { + rc = -EINVAL; + goto put_ep; + } + spin_lock_init(&qedi_conn->tmf_work_lock); INIT_LIST_HEAD(&qedi_conn->tmf_work_list); init_waitqueue_head(&qedi_conn->wait_queue); - return 0; +put_ep: + iscsi_put_endpoint(ep); + return rc; } static int qedi_iscsi_update_conn(struct qedi_ctx *qedi, @@ -1419,15 +1430,20 @@ static void qedi_cleanup_task(struct iscsi_task *task) cmd->scsi_cmd = NULL; } +static struct iscsi_transport_expand qedi_iscsi_expand = { + .unbind_conn = iscsi_conn_unbind, +}; + struct iscsi_transport qedi_iscsi_transport = { .owner = THIS_MODULE, .name = QEDI_MODULE_NAME, .caps = CAP_RECOVERY_L0 | CAP_HDRDGST | CAP_MULTI_R2T | CAP_DATADGST | - CAP_DATA_PATH_OFFLOAD | CAP_TEXT_NEGO, + CAP_DATA_PATH_OFFLOAD | CAP_TEXT_NEGO | CAP_OPS_EXPAND, .create_session = qedi_session_create, .destroy_session = qedi_session_destroy, .create_conn = qedi_conn_create, .bind_conn = qedi_conn_bind, + .ops_expand = &qedi_iscsi_expand, .start_conn = qedi_conn_start, .stop_conn = iscsi_conn_stop, .destroy_conn = qedi_conn_destroy, diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index e40a37236aa10a5d0c2728d957122f3b858d56da..d0407f44de78da687d67b87922cf77eaab2070e3 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -555,7 +555,7 @@ qla2x00_sysfs_read_vpd(struct file *filp, struct kobject *kobj, if (!capable(CAP_SYS_ADMIN)) return -EINVAL; - if (IS_NOCACHE_VPD_TYPE(ha)) + if (!IS_NOCACHE_VPD_TYPE(ha)) goto skip; faddr = ha->flt_region_vpd << 2; @@ -739,7 +739,7 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj, ql_log(ql_log_info, vha, 0x706f, "Issuing MPI reset.\n"); - if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { + if (IS_QLA83XX(ha)) { uint32_t idc_control; qla83xx_idc_lock(vha, 0); @@ -1050,9 +1050,6 @@ qla2x00_free_sysfs_attr(scsi_qla_host_t *vha, bool stop_beacon) continue; if (iter->type == 3 && !(IS_CNA_CAPABLE(ha))) continue; - if (iter->type == 0x27 && - (!IS_QLA27XX(ha) || !IS_QLA28XX(ha))) - continue; sysfs_remove_bin_file(&host->shost_gendev.kobj, iter->attr); diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index e1fd91a58120240b281842dff166e3f10f3d94b5..8a8e0920d2b41e478c173f67445292faa6cc6528 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -2796,7 +2796,11 @@ struct ct_fdmi2_hba_attributes { #define FDMI_PORT_SPEED_8GB 0x10 #define FDMI_PORT_SPEED_16GB 0x20 #define FDMI_PORT_SPEED_32GB 0x40 -#define FDMI_PORT_SPEED_64GB 0x80 +#define FDMI_PORT_SPEED_20GB 0x80 +#define FDMI_PORT_SPEED_40GB 0x100 +#define FDMI_PORT_SPEED_128GB 0x200 +#define FDMI_PORT_SPEED_64GB 0x400 +#define FDMI_PORT_SPEED_256GB 0x800 #define FDMI_PORT_SPEED_UNKNOWN 0x8000 #define FC_CLASS_2 0x04 @@ -5171,4 +5175,8 @@ struct sff_8247_a0 { #include "qla_gbl.h" #include "qla_dbg.h" #include "qla_inline.h" + +#define IS_SESSION_DELETED(_fcport) (_fcport->disc_state == DSC_DELETE_PEND || \ + _fcport->disc_state == DSC_DELETED) + #endif diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c index e28c4b7ec55ffb55ce4aa10e43c62719955007f5..73015c69b5e892b1c909659064212fa19a4ab726 100644 --- a/drivers/scsi/qla2xxx/qla_gs.c +++ b/drivers/scsi/qla2xxx/qla_gs.c @@ -676,8 +676,7 @@ qla2x00_rff_id(scsi_qla_host_t *vha, u8 type) return (QLA_SUCCESS); } - return qla_async_rffid(vha, &vha->d_id, qlt_rff_id(vha), - FC4_TYPE_FCP_SCSI); + return qla_async_rffid(vha, &vha->d_id, qlt_rff_id(vha), type); } static int qla_async_rffid(scsi_qla_host_t *vha, port_id_t *d_id, @@ -727,7 +726,7 @@ static int qla_async_rffid(scsi_qla_host_t *vha, port_id_t *d_id, /* Prepare CT arguments -- port_id, FC-4 feature, FC-4 type */ ct_req->req.rff_id.port_id = port_id_to_be_id(*d_id); ct_req->req.rff_id.fc4_feature = fc4feature; - ct_req->req.rff_id.fc4_type = fc4type; /* SCSI - FCP */ + ct_req->req.rff_id.fc4_type = fc4type; /* SCSI-FCP or FC-NVMe */ sp->u.iocb_cmd.u.ctarg.req_size = RFF_ID_REQ_SIZE; sp->u.iocb_cmd.u.ctarg.rsp_size = RFF_ID_RSP_SIZE; diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index fdae25ec554d9f1ba2ee6b35ba52dea859f76e5f..9452848ede3f86d29e9f1b5df196f8a484e37e9d 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -570,6 +570,14 @@ qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport, struct srb_iocb *lio; int rval = QLA_FUNCTION_FAILED; + if (IS_SESSION_DELETED(fcport)) { + ql_log(ql_log_warn, vha, 0xffff, + "%s: %8phC is being delete - not sending command.\n", + __func__, fcport->port_name); + fcport->flags &= ~FCF_ASYNC_ACTIVE; + return rval; + } + if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT)) return rval; @@ -953,6 +961,9 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha, set_bit(RELOGIN_NEEDED, &vha->dpc_flags); } break; + case ISP_CFG_NL: + qla24xx_fcport_handle_login(vha, fcport); + break; default: break; } @@ -1313,14 +1324,21 @@ int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt) struct port_database_24xx *pd; struct qla_hw_data *ha = vha->hw; - if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT) || - fcport->loop_id == FC_NO_LOOP_ID) { + if (IS_SESSION_DELETED(fcport)) { ql_log(ql_log_warn, vha, 0xffff, - "%s: %8phC - not sending command.\n", - __func__, fcport->port_name); + "%s: %8phC is being delete - not sending command.\n", + __func__, fcport->port_name); + fcport->flags &= ~FCF_ASYNC_ACTIVE; return rval; } + if (!vha->flags.online || fcport->flags & FCF_ASYNC_SENT) { + ql_log(ql_log_warn, vha, 0xffff, + "%s: %8phC online %d flags %x - not sending command.\n", + __func__, fcport->port_name, vha->flags.online, fcport->flags); + goto done; + } + sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); if (!sp) goto done; @@ -1480,6 +1498,11 @@ static void qla_chk_n2n_b4_login(struct scsi_qla_host *vha, fc_port_t *fcport) u8 login = 0; int rc; + ql_dbg(ql_dbg_disc, vha, 0x307b, + "%s %8phC DS %d LS %d lid %d retries=%d\n", + __func__, fcport->port_name, fcport->disc_state, + fcport->fw_login_state, fcport->loop_id, fcport->login_retry); + if (qla_tgt_mode_enabled(vha)) return; @@ -1537,7 +1560,8 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport) fcport->conflict, fcport->last_rscn_gen, fcport->rscn_gen, fcport->login_gen, fcport->loop_id, fcport->scan_state); - if (fcport->scan_state != QLA_FCPORT_FOUND) + if (fcport->scan_state != QLA_FCPORT_FOUND || + fcport->disc_state == DSC_DELETE_PEND) return 0; if ((fcport->loop_id != FC_NO_LOOP_ID) && @@ -1558,7 +1582,7 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport) if (vha->host->active_mode == MODE_TARGET && !N2N_TOPO(vha->hw)) return 0; - if (fcport->flags & FCF_ASYNC_SENT) { + if (fcport->flags & (FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE)) { set_bit(RELOGIN_NEEDED, &vha->dpc_flags); return 0; } @@ -2114,12 +2138,7 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea) ql_dbg(ql_dbg_disc, vha, 0x20eb, "%s %d %8phC cmd error %x\n", __func__, __LINE__, ea->fcport->port_name, ea->data[1]); - ea->fcport->flags &= ~FCF_ASYNC_SENT; - qla2x00_set_fcport_disc_state(ea->fcport, DSC_LOGIN_FAILED); - if (ea->data[1] & QLA_LOGIO_LOGIN_RETRIED) - set_bit(RELOGIN_NEEDED, &vha->dpc_flags); - else - qla2x00_mark_device_lost(vha, ea->fcport, 1); + qlt_schedule_sess_for_deletion(ea->fcport); break; case MBS_LOOP_ID_USED: /* data[1] = IO PARAM 1 = nport ID */ @@ -3309,6 +3328,14 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha) struct rsp_que *rsp = ha->rsp_q_map[0]; struct qla2xxx_fw_dump *fw_dump; + if (ha->fw_dump) { + ql_dbg(ql_dbg_init, vha, 0x00bd, + "Firmware dump already allocated.\n"); + return; + } + + ha->fw_dumped = 0; + ha->fw_dump_cap_flags = 0; dump_size = fixed_size = mem_size = eft_size = fce_size = mq_size = 0; req_q_size = rsp_q_size = 0; @@ -3319,7 +3346,7 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha) mem_size = (ha->fw_memory_size - 0x11000 + 1) * sizeof(uint16_t); } else if (IS_FWI2_CAPABLE(ha)) { - if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) + if (IS_QLA83XX(ha)) fixed_size = offsetof(struct qla83xx_fw_dump, ext_mem); else if (IS_QLA81XX(ha)) fixed_size = offsetof(struct qla81xx_fw_dump, ext_mem); @@ -3331,8 +3358,7 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha) mem_size = (ha->fw_memory_size - 0x100000 + 1) * sizeof(uint32_t); if (ha->mqenable) { - if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && - !IS_QLA28XX(ha)) + if (!IS_QLA83XX(ha)) mq_size = sizeof(struct qla2xxx_mq_chain); /* * Allocate maximum buffer size for all queues - Q0. @@ -3893,8 +3919,7 @@ qla2x00_setup_chip(scsi_qla_host_t *vha) ha->fw_major_version, ha->fw_minor_version, ha->fw_subminor_version); - if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || - IS_QLA28XX(ha)) { + if (IS_QLA83XX(ha)) { ha->flags.fac_supported = 0; rval = QLA_SUCCESS; } @@ -5382,6 +5407,13 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha) memcpy(fcport->node_name, new_fcport->node_name, WWN_SIZE); fcport->scan_state = QLA_FCPORT_FOUND; + if (fcport->login_retry == 0) { + fcport->login_retry = vha->hw->login_retry_count; + ql_dbg(ql_dbg_disc, vha, 0x2135, + "Port login retry %8phN, lid 0x%04x retry cnt=%d.\n", + fcport->port_name, fcport->loop_id, + fcport->login_retry); + } found++; break; } @@ -5515,6 +5547,8 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport) if (atomic_read(&fcport->state) == FCS_ONLINE) return; + qla2x00_set_fcport_state(fcport, FCS_ONLINE); + rport_ids.node_name = wwn_to_u64(fcport->node_name); rport_ids.port_name = wwn_to_u64(fcport->port_name); rport_ids.port_id = fcport->d_id.b.domain << 16 | @@ -5615,6 +5649,7 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) qla2x00_reg_remote_port(vha, fcport); break; case MODE_TARGET: + qla2x00_set_fcport_state(fcport, FCS_ONLINE); if (!vha->vha_tgt.qla_tgt->tgt_stop && !vha->vha_tgt.qla_tgt->tgt_stopped) qlt_fc_port_added(vha, fcport); @@ -5629,8 +5664,6 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) break; } - qla2x00_set_fcport_state(fcport, FCS_ONLINE); - if (IS_IIDMA_CAPABLE(vha->hw) && vha->hw->flags.gpsc_supported) { if (fcport->id_changed) { fcport->id_changed = 0; @@ -9127,7 +9160,7 @@ struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos, qpair->rsp->req = qpair->req; qpair->rsp->qpair = qpair; /* init qpair to this cpu. Will adjust at run time. */ - qla_cpu_update(qpair, smp_processor_id()); + qla_cpu_update(qpair, raw_smp_processor_id()); if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) { if (ha->fw_attributes & BIT_4) diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index c532c74ca1ab9e17ece023d6ee69e4b5edc1a799..e54cc2a761dd46ee110c2149b79d327ed0218d2f 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c @@ -2910,6 +2910,7 @@ static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res) set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); qla2xxx_wake_dpc(vha); + break; } fallthrough; default: @@ -2919,9 +2920,7 @@ static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res) fw_status[0], fw_status[1], fw_status[2]); fcport->flags &= ~FCF_ASYNC_SENT; - qla2x00_set_fcport_disc_state(fcport, - DSC_LOGIN_FAILED); - set_bit(RELOGIN_NEEDED, &vha->dpc_flags); + qlt_schedule_sess_for_deletion(fcport); break; } break; @@ -2933,8 +2932,7 @@ static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res) fw_status[0], fw_status[1], fw_status[2]); sp->fcport->flags &= ~FCF_ASYNC_SENT; - qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_FAILED); - set_bit(RELOGIN_NEEDED, &vha->dpc_flags); + qlt_schedule_sess_for_deletion(fcport); break; } diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 5e040b6debc84ef224dbf7e05ffac1dc931eea16..c5c7d60ab25241a0c1ba787e9268d30d6a5c2bcd 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -2248,6 +2248,7 @@ qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk) iocb->u.tmf.data = QLA_FUNCTION_FAILED; } else if ((le16_to_cpu(sts->scsi_status) & SS_RESPONSE_INFO_LEN_VALID)) { + host_to_fcp_swap(sts->data, sizeof(sts->data)); if (le32_to_cpu(sts->rsp_data_len) < 4) { ql_log(ql_log_warn, fcport->vha, 0x503b, "Async-%s error - hdl=%x not enough response(%d).\n", diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index 734745f450211a921beda952e2371011a6f663d5..bbb57edc1f66290b72495b3dcabf7652385aef2f 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -9,6 +9,12 @@ #include #include +#ifdef CONFIG_PPC +#define IS_PPCARCH true +#else +#define IS_PPCARCH false +#endif + static struct mb_cmd_name { uint16_t cmd; const char *str; @@ -698,6 +704,9 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr) vha->min_supported_speed = nv->min_supported_speed; } + + if (IS_PPCARCH) + mcp->mb[11] |= BIT_4; } if (ha->flags.exlogins_enabled) @@ -2984,8 +2993,7 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha) ha->orig_fw_iocb_count = mcp->mb[10]; if (ha->flags.npiv_supported) ha->max_npiv_vports = mcp->mb[11]; - if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) || - IS_QLA28XX(ha)) + if (IS_QLA81XX(ha) || IS_QLA83XX(ha)) ha->fw_max_fcf_count = mcp->mb[12]; } @@ -5546,7 +5554,7 @@ qla2x00_get_data_rate(scsi_qla_host_t *vha) mcp->out_mb = MBX_1|MBX_0; mcp->in_mb = MBX_2|MBX_1|MBX_0; if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) - mcp->in_mb |= MBX_3; + mcp->in_mb |= MBX_4|MBX_3; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c index 5acee3c798d42771e739201f043e9c44ce42dcb6..ba1b1c7549d359f9e025d1ac61f74466ab193e34 100644 --- a/drivers/scsi/qla2xxx/qla_nvme.c +++ b/drivers/scsi/qla2xxx/qla_nvme.c @@ -35,6 +35,11 @@ int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport) (fcport->nvme_flag & NVME_FLAG_REGISTERED)) return 0; + if (atomic_read(&fcport->state) == FCS_ONLINE) + return 0; + + qla2x00_set_fcport_state(fcport, FCS_ONLINE); + fcport->nvme_flag &= ~NVME_FLAG_RESETTING; memset(&req, 0, sizeof(struct nvme_fc_port_info)); @@ -165,6 +170,18 @@ static void qla_nvme_release_fcp_cmd_kref(struct kref *kref) qla2xxx_rel_qpair_sp(sp->qpair, sp); } +static void qla_nvme_ls_unmap(struct srb *sp, struct nvmefc_ls_req *fd) +{ + if (sp->flags & SRB_DMA_VALID) { + struct srb_iocb *nvme = &sp->u.iocb_cmd; + struct qla_hw_data *ha = sp->fcport->vha->hw; + + dma_unmap_single(&ha->pdev->dev, nvme->u.nvme.cmd_dma, + fd->rqstlen, DMA_TO_DEVICE); + sp->flags &= ~SRB_DMA_VALID; + } +} + static void qla_nvme_release_ls_cmd_kref(struct kref *kref) { struct srb *sp = container_of(kref, struct srb, cmd_kref); @@ -181,6 +198,8 @@ static void qla_nvme_release_ls_cmd_kref(struct kref *kref) spin_unlock_irqrestore(&priv->cmd_lock, flags); fd = priv->fd; + + qla_nvme_ls_unmap(sp, fd); fd->done(fd, priv->comp_status); out: qla2x00_rel_sp(sp); @@ -327,6 +346,8 @@ static int qla_nvme_ls_req(struct nvme_fc_local_port *lport, dma_sync_single_for_device(&ha->pdev->dev, nvme->u.nvme.cmd_dma, fd->rqstlen, DMA_TO_DEVICE); + sp->flags |= SRB_DMA_VALID; + rval = qla2x00_start_sp(sp); if (rval != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0x700e, @@ -334,6 +355,7 @@ static int qla_nvme_ls_req(struct nvme_fc_local_port *lport, wake_up(&sp->nvme_ls_waitq); sp->priv = NULL; priv->sp = NULL; + qla_nvme_ls_unmap(sp, fd); qla2x00_rel_sp(sp); return rval; } diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index e7f73a167fbd64136e64ad305d4eb0ee399724da..419156121cb59a9ededeef864994c7c9a3b6d05a 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -3673,8 +3673,7 @@ qla2x00_unmap_iobases(struct qla_hw_data *ha) if (ha->mqiobase) iounmap(ha->mqiobase); - if ((IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) && - ha->msixbase) + if (ha->msixbase) iounmap(ha->msixbase); } } @@ -5390,6 +5389,11 @@ void qla2x00_relogin(struct scsi_qla_host *vha) memset(&ea, 0, sizeof(ea)); ea.fcport = fcport; qla24xx_handle_relogin_event(vha, &ea); + } else if (vha->hw->current_topology == + ISP_CFG_NL && + IS_QLA2XXX_MIDTYPE(vha->hw)) { + (void)qla24xx_fcport_handle_login(vha, + fcport); } else if (vha->hw->current_topology == ISP_CFG_NL) { fcport->login_retry--; diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c index 0f92e9a044dcdb4e27c4226b6f96792bde915824..0fa9c529fca11a51d7191a089335c212fec035d3 100644 --- a/drivers/scsi/qla2xxx/qla_sup.c +++ b/drivers/scsi/qla2xxx/qla_sup.c @@ -844,7 +844,7 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr) ha->flt_region_nvram = start; break; case FLT_REG_IMG_PRI_27XX: - if (IS_QLA27XX(ha) && !IS_QLA28XX(ha)) + if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) ha->flt_region_img_status_pri = start; break; case FLT_REG_IMG_SEC_27XX: @@ -1356,7 +1356,7 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, __le32 *dwptr, uint32_t faddr, flash_data_addr(ha, faddr), le32_to_cpu(*dwptr)); if (ret) { ql_dbg(ql_dbg_user, vha, 0x7006, - "Failed slopw write %x (%x)\n", faddr, *dwptr); + "Failed slow write %x (%x)\n", faddr, *dwptr); break; } } diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index ebed14bed7835cc46a1512c6afc05198bd67f08a..cf9ae0ab489a0ef2566c0c1d58abcbe897a2c75c 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -3256,6 +3256,7 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, "RESET-RSP online/active/old-count/new-count = %d/%d/%d/%d.\n", vha->flags.online, qla2x00_reset_active(vha), cmd->reset_count, qpair->chip_reset); + res = 0; goto out_unmap_unlock; } @@ -7076,8 +7077,7 @@ qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha) if (!QLA_TGT_MODE_ENABLED()) return; - if ((ql2xenablemsix == 0) || IS_QLA83XX(ha) || IS_QLA27XX(ha) || - IS_QLA28XX(ha)) { + if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { ISP_ATIO_Q_IN(base_vha) = &ha->mqiobase->isp25mq.atio_q_in; ISP_ATIO_Q_OUT(base_vha) = &ha->mqiobase->isp25mq.atio_q_out; } else { diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c index 2c23b692e318c95f5d13022ff7b92c672ad56ad0..377d83762099babfa0e5ec652cdb9012ef24abc7 100644 --- a/drivers/scsi/qla4xxx/ql4_os.c +++ b/drivers/scsi/qla4xxx/ql4_os.c @@ -246,19 +246,24 @@ static struct scsi_host_template qla4xxx_driver_template = { .vendor_id = SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC, }; +static struct iscsi_transport_expand qla4xxx_iscsi_expand = { + .unbind_conn = iscsi_conn_unbind, +}; + static struct iscsi_transport qla4xxx_iscsi_transport = { .owner = THIS_MODULE, .name = DRIVER_NAME, .caps = CAP_TEXT_NEGO | CAP_DATA_PATH_OFFLOAD | CAP_HDRDGST | CAP_DATADGST | CAP_LOGIN_OFFLOAD | - CAP_MULTI_R2T, + CAP_MULTI_R2T | CAP_OPS_EXPAND, .attr_is_visible = qla4_attr_is_visible, .create_session = qla4xxx_session_create, .destroy_session = qla4xxx_session_destroy, .start_conn = qla4xxx_conn_start, .create_conn = qla4xxx_conn_create, .bind_conn = qla4xxx_conn_bind, + .ops_expand = &qla4xxx_iscsi_expand, .stop_conn = iscsi_conn_stop, .destroy_conn = qla4xxx_conn_destroy, .set_param = iscsi_set_param, @@ -3237,6 +3242,7 @@ static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session, conn = cls_conn->dd_data; qla_conn = conn->dd_data; qla_conn->qla_ep = ep->dd_data; + iscsi_put_endpoint(ep); return 0; } diff --git a/drivers/scsi/scsi_debugfs.c b/drivers/scsi/scsi_debugfs.c index c19ea7ab54cbd2b7daf96e19550454fd75ffaf34..d9a18124cfc9d4604930d431a9f2cca118f9ffc1 100644 --- a/drivers/scsi/scsi_debugfs.c +++ b/drivers/scsi/scsi_debugfs.c @@ -10,6 +10,7 @@ static const char *const scsi_cmd_flags[] = { SCSI_CMD_FLAG_NAME(TAGGED), SCSI_CMD_FLAG_NAME(UNCHECKED_ISA_DMA), SCSI_CMD_FLAG_NAME(INITIALIZED), + SCSI_CMD_FLAG_NAME(LAST), }; #undef SCSI_CMD_FLAG_NAME diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index f11f51e2465f5dea81fef1810d7368e11ff005cb..bcbeadb2d0f068dd912347f15bfe2b3008fae38d 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c @@ -2359,7 +2359,7 @@ scsi_ioctl_reset(struct scsi_device *dev, int __user *arg) return -EIO; error = -EIO; - rq = kzalloc(sizeof(struct request) + sizeof(struct scsi_cmnd) + + rq = kzalloc(sizeof(struct request_wrapper) + sizeof(struct scsi_cmnd) + shost->hostt->cmd_size, GFP_KERNEL); if (!rq) goto out_put_autopm_host; diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c index 3717eea37ecb386fd772e6dd90182b3674d41ceb..e91a0a5bc7a3ec1b8fb4d99c7a4c934517a39363 100644 --- a/drivers/scsi/scsi_pm.c +++ b/drivers/scsi/scsi_pm.c @@ -262,7 +262,7 @@ static int sdev_runtime_resume(struct device *dev) blk_pre_runtime_resume(sdev->request_queue); if (pm && pm->runtime_resume) err = pm->runtime_resume(dev); - blk_post_runtime_resume(sdev->request_queue, err); + blk_post_runtime_resume(sdev->request_queue); return err; } diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index a5759d0e388a851397ea2eb9fc9e8d72ea02d2f0..e23cb62ab2169d10590da46fe31e9bb2f2ede515 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c @@ -86,16 +86,10 @@ struct iscsi_internal { struct transport_container session_cont; }; -/* Worker to perform connection failure on unresponsive connections - * completely in kernel space. - */ -static void stop_conn_work_fn(struct work_struct *work); -static DECLARE_WORK(stop_conn_work, stop_conn_work_fn); - static atomic_t iscsi_session_nr; /* sysfs session id for next new session */ static struct workqueue_struct *iscsi_eh_timer_workq; -static struct workqueue_struct *iscsi_destroy_workq; +static struct workqueue_struct *iscsi_conn_cleanup_workq; static DEFINE_IDA(iscsi_sess_ida); /* @@ -268,9 +262,20 @@ void iscsi_destroy_endpoint(struct iscsi_endpoint *ep) } EXPORT_SYMBOL_GPL(iscsi_destroy_endpoint); +void iscsi_put_endpoint(struct iscsi_endpoint *ep) +{ + put_device(&ep->dev); +} +EXPORT_SYMBOL_GPL(iscsi_put_endpoint); + +/** + * iscsi_lookup_endpoint - get ep from handle + * @handle: endpoint handle + * + * Caller must do a iscsi_put_endpoint. + */ struct iscsi_endpoint *iscsi_lookup_endpoint(u64 handle) { - struct iscsi_endpoint *ep; struct device *dev; dev = class_find_device(&iscsi_endpoint_class, NULL, &handle, @@ -278,13 +283,7 @@ struct iscsi_endpoint *iscsi_lookup_endpoint(u64 handle) if (!dev) return NULL; - ep = iscsi_dev_to_endpoint(dev); - /* - * we can drop this now because the interface will prevent - * removals and lookups from racing. - */ - put_device(dev); - return ep; + return iscsi_dev_to_endpoint(dev); } EXPORT_SYMBOL_GPL(iscsi_lookup_endpoint); @@ -1598,12 +1597,6 @@ static DECLARE_TRANSPORT_CLASS(iscsi_connection_class, static struct sock *nls; static DEFINE_MUTEX(rx_queue_mutex); -/* - * conn_mutex protects the {start,bind,stop,destroy}_conn from racing - * against the kernel stop_connection recovery mechanism - */ -static DEFINE_MUTEX(conn_mutex); - static LIST_HEAD(sesslist); static DEFINE_SPINLOCK(sesslock); static LIST_HEAD(connlist); @@ -2225,6 +2218,164 @@ void iscsi_remove_session(struct iscsi_cls_session *session) } EXPORT_SYMBOL_GPL(iscsi_remove_session); +static void iscsi_stop_conn(struct iscsi_cls_conn *conn, int flag) +{ + ISCSI_DBG_TRANS_CONN(conn, "Stopping conn.\n"); + + switch (flag) { + case STOP_CONN_RECOVER: + WRITE_ONCE(conn->state, ISCSI_CONN_FAILED); + break; + case STOP_CONN_TERM: + WRITE_ONCE(conn->state, ISCSI_CONN_DOWN); + break; + default: + iscsi_cls_conn_printk(KERN_ERR, conn, "invalid stop flag %d\n", + flag); + return; + } + + conn->transport->stop_conn(conn, flag); + ISCSI_DBG_TRANS_CONN(conn, "Stopping conn done.\n"); +} + +static void iscsi_ep_disconnect(struct iscsi_cls_conn *conn, bool is_active) +{ + struct iscsi_cls_session *session = iscsi_conn_to_session(conn); + struct iscsi_endpoint *ep; + + ISCSI_DBG_TRANS_CONN(conn, "disconnect ep.\n"); + WRITE_ONCE(conn->state, ISCSI_CONN_FAILED); + + if (!conn->ep || !session->transport->ep_disconnect) + return; + + ep = conn->ep; + conn->ep = NULL; + + if (session->transport->caps & CAP_OPS_EXPAND && + session->transport->ops_expand && + session->transport->ops_expand->unbind_conn) + session->transport->ops_expand->unbind_conn(conn, is_active); + + session->transport->ep_disconnect(ep); + ISCSI_DBG_TRANS_CONN(conn, "disconnect ep done.\n"); +} + +static void iscsi_if_disconnect_bound_ep(struct iscsi_cls_conn *conn, + struct iscsi_endpoint *ep, + bool is_active) +{ + struct iscsi_cls_conn_wrapper *conn_wrapper = conn_to_wrapper(conn); + /* Check if this was a conn error and the kernel took ownership */ + spin_lock_irq(&conn_wrapper->lock); + if (!test_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn_wrapper->flags)) { + spin_unlock_irq(&conn_wrapper->lock); + iscsi_ep_disconnect(conn, is_active); + } else { + spin_unlock_irq(&conn_wrapper->lock); + ISCSI_DBG_TRANS_CONN(conn, "flush kernel conn cleanup.\n"); + mutex_unlock(&conn->ep_mutex); + + flush_work(&conn_wrapper->cleanup_work); + /* + * Userspace is now done with the EP so we can release the ref + * iscsi_cleanup_conn_work_fn took. + */ + iscsi_put_endpoint(ep); + mutex_lock(&conn->ep_mutex); + } +} + +static int iscsi_if_stop_conn(struct iscsi_transport *transport, + struct iscsi_uevent *ev) +{ + int flag = ev->u.stop_conn.flag; + struct iscsi_cls_conn *conn; + struct iscsi_cls_conn_wrapper *conn_wrapper; + + conn = iscsi_conn_lookup(ev->u.stop_conn.sid, ev->u.stop_conn.cid); + if (!conn) + return -EINVAL; + + conn_wrapper = conn_to_wrapper(conn); + ISCSI_DBG_TRANS_CONN(conn, "iscsi if conn stop.\n"); + /* + * If this is a termination we have to call stop_conn with that flag + * so the correct states get set. If we haven't run the work yet try to + * avoid the extra run. + */ + if (flag == STOP_CONN_TERM) { + cancel_work_sync(&conn_wrapper->cleanup_work); + iscsi_stop_conn(conn, flag); + } else { + /* + * For offload, when iscsid is restarted it won't know about + * existing endpoints so it can't do a ep_disconnect. We clean + * it up here for userspace. + */ + mutex_lock(&conn->ep_mutex); + if (conn->ep) + iscsi_if_disconnect_bound_ep(conn, conn->ep, true); + mutex_unlock(&conn->ep_mutex); + + /* + * Figure out if it was the kernel or userspace initiating this. + */ + spin_lock_irq(&conn_wrapper->lock); + if (!test_and_set_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn_wrapper->flags)) { + spin_unlock_irq(&conn_wrapper->lock); + iscsi_stop_conn(conn, flag); + } else { + spin_unlock_irq(&conn_wrapper->lock); + ISCSI_DBG_TRANS_CONN(conn, + "flush kernel conn cleanup.\n"); + flush_work(&conn_wrapper->cleanup_work); + } + /* + * Only clear for recovery to avoid extra cleanup runs during + * termination. + */ + spin_lock_irq(&conn_wrapper->lock); + clear_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn_wrapper->flags); + spin_unlock_irq(&conn_wrapper->lock); + } + ISCSI_DBG_TRANS_CONN(conn, "iscsi if conn stop done.\n"); + return 0; +} + +static void iscsi_cleanup_conn_work_fn(struct work_struct *work) +{ + struct iscsi_cls_conn_wrapper *conn_wrapper = + container_of(work, struct iscsi_cls_conn_wrapper, + cleanup_work); + struct iscsi_cls_conn *conn = &conn_wrapper->conn; + struct iscsi_cls_session *session = iscsi_conn_to_session(conn); + + mutex_lock(&conn->ep_mutex); + /* + * Get a ref to the ep, so we don't release its ID until after + * userspace is done referencing it in iscsi_if_disconnect_bound_ep. + */ + if (conn->ep) + get_device(&conn->ep->dev); + iscsi_ep_disconnect(conn, false); + + if (system_state != SYSTEM_RUNNING) { + /* + * If the user has set up for the session to never timeout + * then hang like they wanted. For all other cases fail right + * away since userspace is not going to relogin. + */ + if (session->recovery_tmo > 0) + session->recovery_tmo = 0; + } + + iscsi_stop_conn(conn, STOP_CONN_RECOVER); + mutex_unlock(&conn->ep_mutex); + ISCSI_DBG_TRANS_CONN(conn, "cleanup done.\n"); +} + void iscsi_free_session(struct iscsi_cls_session *session) { ISCSI_DBG_TRANS_SESSION(session, "Freeing session\n"); @@ -2253,21 +2404,25 @@ iscsi_create_conn(struct iscsi_cls_session *session, int dd_size, uint32_t cid) { struct iscsi_transport *transport = session->transport; struct iscsi_cls_conn *conn; + struct iscsi_cls_conn_wrapper *conn_wrapper; unsigned long flags; int err; - conn = kzalloc(sizeof(*conn) + dd_size, GFP_KERNEL); - if (!conn) + conn_wrapper = kzalloc(sizeof(*conn_wrapper) + dd_size, GFP_KERNEL); + if (!conn_wrapper) return NULL; + + conn = &conn_wrapper->conn; if (dd_size) - conn->dd_data = &conn[1]; + conn->dd_data = &conn_wrapper[1]; mutex_init(&conn->ep_mutex); + spin_lock_init(&conn_wrapper->lock); INIT_LIST_HEAD(&conn->conn_list); - INIT_LIST_HEAD(&conn->conn_list_err); + INIT_WORK(&conn_wrapper->cleanup_work, iscsi_cleanup_conn_work_fn); conn->transport = transport; conn->cid = cid; - conn->state = ISCSI_CONN_DOWN; + WRITE_ONCE(conn->state, ISCSI_CONN_DOWN); /* this is released in the dev's release function */ if (!get_device(&session->dev)) @@ -2321,7 +2476,6 @@ int iscsi_destroy_conn(struct iscsi_cls_conn *conn) spin_lock_irqsave(&connlock, flags); list_del(&conn->conn_list); - list_del(&conn->conn_list_err); spin_unlock_irqrestore(&connlock, flags); transport_unregister_device(&conn->dev); @@ -2448,77 +2602,6 @@ int iscsi_offload_mesg(struct Scsi_Host *shost, } EXPORT_SYMBOL_GPL(iscsi_offload_mesg); -/* - * This can be called without the rx_queue_mutex, if invoked by the kernel - * stop work. But, in that case, it is guaranteed not to race with - * iscsi_destroy by conn_mutex. - */ -static void iscsi_if_stop_conn(struct iscsi_cls_conn *conn, int flag) -{ - /* - * It is important that this path doesn't rely on - * rx_queue_mutex, otherwise, a thread doing allocation on a - * start_session/start_connection could sleep waiting on a - * writeback to a failed iscsi device, that cannot be recovered - * because the lock is held. If we don't hold it here, the - * kernel stop_conn_work_fn has a chance to stop the broken - * session and resolve the allocation. - * - * Still, the user invoked .stop_conn() needs to be serialized - * with stop_conn_work_fn by a private mutex. Not pretty, but - * it works. - */ - mutex_lock(&conn_mutex); - switch (flag) { - case STOP_CONN_RECOVER: - conn->state = ISCSI_CONN_FAILED; - break; - case STOP_CONN_TERM: - conn->state = ISCSI_CONN_DOWN; - break; - default: - iscsi_cls_conn_printk(KERN_ERR, conn, - "invalid stop flag %d\n", flag); - goto unlock; - } - - conn->transport->stop_conn(conn, flag); -unlock: - mutex_unlock(&conn_mutex); -} - -static void stop_conn_work_fn(struct work_struct *work) -{ - struct iscsi_cls_conn *conn, *tmp; - unsigned long flags; - LIST_HEAD(recovery_list); - - spin_lock_irqsave(&connlock, flags); - if (list_empty(&connlist_err)) { - spin_unlock_irqrestore(&connlock, flags); - return; - } - list_splice_init(&connlist_err, &recovery_list); - spin_unlock_irqrestore(&connlock, flags); - - list_for_each_entry_safe(conn, tmp, &recovery_list, conn_list_err) { - uint32_t sid = iscsi_conn_get_sid(conn); - struct iscsi_cls_session *session; - - session = iscsi_session_lookup(sid); - if (session) { - if (system_state != SYSTEM_RUNNING) { - session->recovery_tmo = 0; - iscsi_if_stop_conn(conn, STOP_CONN_TERM); - } else { - iscsi_if_stop_conn(conn, STOP_CONN_RECOVER); - } - } - - list_del_init(&conn->conn_list_err); - } -} - void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error) { struct nlmsghdr *nlh; @@ -2526,12 +2609,33 @@ void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error) struct iscsi_uevent *ev; struct iscsi_internal *priv; int len = nlmsg_total_size(sizeof(*ev)); + struct iscsi_cls_conn_wrapper *conn_wrapper = conn_to_wrapper(conn); unsigned long flags; + int state; - spin_lock_irqsave(&connlock, flags); - list_add(&conn->conn_list_err, &connlist_err); - spin_unlock_irqrestore(&connlock, flags); - queue_work(system_unbound_wq, &stop_conn_work); + spin_lock_irqsave(&conn_wrapper->lock, flags); + /* + * Userspace will only do a stop call if we are at least bound. And, we + * only need to do the in kernel cleanup if in the UP state so cmds can + * be released to upper layers. If in other states just wait for + * userspace to avoid races that can leave the cleanup_work queued. + */ + state = READ_ONCE(conn->state); + switch (state) { + case ISCSI_CONN_BOUND: + case ISCSI_CONN_UP: + if (!test_and_set_bit(ISCSI_CLS_CONN_BIT_CLEANUP, + &conn_wrapper->flags)) { + queue_work(iscsi_conn_cleanup_workq, + &conn_wrapper->cleanup_work); + } + break; + default: + ISCSI_DBG_TRANS_CONN(conn, "Got conn error in state %d\n", + state); + break; + } + spin_unlock_irqrestore(&conn_wrapper->lock, flags); priv = iscsi_if_transport_lookup(conn->transport); if (!priv) @@ -2861,26 +2965,19 @@ static int iscsi_if_destroy_conn(struct iscsi_transport *transport, struct iscsi_uevent *ev) { struct iscsi_cls_conn *conn; - unsigned long flags; + struct iscsi_cls_conn_wrapper *conn_wrapper; conn = iscsi_conn_lookup(ev->u.d_conn.sid, ev->u.d_conn.cid); if (!conn) return -EINVAL; - spin_lock_irqsave(&connlock, flags); - if (!list_empty(&conn->conn_list_err)) { - spin_unlock_irqrestore(&connlock, flags); - return -EAGAIN; - } - spin_unlock_irqrestore(&connlock, flags); - + conn_wrapper = conn_to_wrapper(conn); + ISCSI_DBG_TRANS_CONN(conn, "Flushing cleanup during destruction\n"); + flush_work(&conn_wrapper->cleanup_work); ISCSI_DBG_TRANS_CONN(conn, "Destroying transport conn\n"); - mutex_lock(&conn_mutex); if (transport->destroy_conn) transport->destroy_conn(conn); - mutex_unlock(&conn_mutex); - return 0; } @@ -2890,7 +2987,7 @@ iscsi_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev) char *data = (char*)ev + sizeof(*ev); struct iscsi_cls_conn *conn; struct iscsi_cls_session *session; - int err = 0, value = 0; + int err = 0, value = 0, state; if (ev->u.set_param.len > PAGE_SIZE) return -EINVAL; @@ -2907,8 +3004,8 @@ iscsi_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev) session->recovery_tmo = value; break; default: - if ((conn->state == ISCSI_CONN_BOUND) || - (conn->state == ISCSI_CONN_UP)) { + state = READ_ONCE(conn->state); + if (state == ISCSI_CONN_BOUND || state == ISCSI_CONN_UP) { err = transport->set_param(conn, ev->u.set_param.param, data, ev->u.set_param.len); } else { @@ -2968,15 +3065,22 @@ static int iscsi_if_ep_disconnect(struct iscsi_transport *transport, ep = iscsi_lookup_endpoint(ep_handle); if (!ep) return -EINVAL; + conn = ep->conn; - if (conn) { - mutex_lock(&conn->ep_mutex); - conn->ep = NULL; - mutex_unlock(&conn->ep_mutex); - conn->state = ISCSI_CONN_FAILED; + if (!conn) { + /* + * conn was not even bound yet, so we can't get iscsi conn + * failures yet. + */ + transport->ep_disconnect(ep); + goto put_ep; } - transport->ep_disconnect(ep); + mutex_lock(&conn->ep_mutex); + iscsi_if_disconnect_bound_ep(conn, ep, false); + mutex_unlock(&conn->ep_mutex); +put_ep: + iscsi_put_endpoint(ep); return 0; } @@ -3002,6 +3106,7 @@ iscsi_if_transport_ep(struct iscsi_transport *transport, ev->r.retcode = transport->ep_poll(ep, ev->u.ep_poll.timeout_ms); + iscsi_put_endpoint(ep); break; case ISCSI_UEVENT_TRANSPORT_EP_DISCONNECT: rc = iscsi_if_ep_disconnect(transport, @@ -3018,10 +3123,19 @@ iscsi_tgt_dscvr(struct iscsi_transport *transport, struct Scsi_Host *shost; struct sockaddr *dst_addr; int err; + int (*tgt_dscvr)(struct Scsi_Host *shost, enum iscsi_tgt_dscvr type, + uint32_t enable, struct sockaddr *dst_addr); - if (!transport->tgt_dscvr) - return -EINVAL; - + if (transport->caps & CAP_OPS_EXPAND) { + if (!transport->ops_expand || !transport->ops_expand->tgt_dscvr) + return -EINVAL; + tgt_dscvr = transport->ops_expand->tgt_dscvr; + } else { + if (!transport->ops_expand) + return -EINVAL; + tgt_dscvr = (int (*)(struct Scsi_Host *, enum iscsi_tgt_dscvr, uint32_t, + struct sockaddr *))(transport->ops_expand); + } shost = scsi_host_lookup(ev->u.tgt_dscvr.host_no); if (!shost) { printk(KERN_ERR "target discovery could not find host no %u\n", @@ -3031,8 +3145,8 @@ iscsi_tgt_dscvr(struct iscsi_transport *transport, dst_addr = (struct sockaddr *)((char*)ev + sizeof(*ev)); - err = transport->tgt_dscvr(shost, ev->u.tgt_dscvr.type, - ev->u.tgt_dscvr.enable, dst_addr); + err = tgt_dscvr(shost, ev->u.tgt_dscvr.type, + ev->u.tgt_dscvr.enable, dst_addr); scsi_host_put(shost); return err; } @@ -3632,18 +3746,125 @@ iscsi_get_host_stats(struct iscsi_transport *transport, struct nlmsghdr *nlh) return err; } +static int iscsi_if_transport_conn(struct iscsi_transport *transport, + struct nlmsghdr *nlh) +{ + struct iscsi_uevent *ev = nlmsg_data(nlh); + struct iscsi_cls_session *session; + struct iscsi_cls_conn *conn = NULL; + struct iscsi_cls_conn_wrapper *conn_wrapper; + struct iscsi_endpoint *ep; + uint32_t pdu_len; + int err = 0; + + switch (nlh->nlmsg_type) { + case ISCSI_UEVENT_CREATE_CONN: + return iscsi_if_create_conn(transport, ev); + case ISCSI_UEVENT_DESTROY_CONN: + return iscsi_if_destroy_conn(transport, ev); + case ISCSI_UEVENT_STOP_CONN: + return iscsi_if_stop_conn(transport, ev); + } + + /* + * The following cmds need to be run under the ep_mutex so in kernel + * conn cleanup (ep_disconnect + unbind and conn) is not done while + * these are running. They also must not run if we have just run a conn + * cleanup because they would set the state in a way that might allow + * IO or send IO themselves. + */ + switch (nlh->nlmsg_type) { + case ISCSI_UEVENT_START_CONN: + conn = iscsi_conn_lookup(ev->u.start_conn.sid, + ev->u.start_conn.cid); + break; + case ISCSI_UEVENT_BIND_CONN: + conn = iscsi_conn_lookup(ev->u.b_conn.sid, ev->u.b_conn.cid); + break; + case ISCSI_UEVENT_SEND_PDU: + conn = iscsi_conn_lookup(ev->u.send_pdu.sid, ev->u.send_pdu.cid); + break; + } + + if (!conn) + return -EINVAL; + + conn_wrapper = conn_to_wrapper(conn); + mutex_lock(&conn->ep_mutex); + spin_lock_irq(&conn_wrapper->lock); + if (test_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn_wrapper->flags)) { + spin_unlock_irq(&conn_wrapper->lock); + mutex_unlock(&conn->ep_mutex); + ev->r.retcode = -ENOTCONN; + return 0; + } + spin_unlock_irq(&conn_wrapper->lock); + + switch (nlh->nlmsg_type) { + case ISCSI_UEVENT_BIND_CONN: + session = iscsi_session_lookup(ev->u.b_conn.sid); + if (!session) { + err = -EINVAL; + break; + } + + ev->r.retcode = transport->bind_conn(session, conn, + ev->u.b_conn.transport_eph, + ev->u.b_conn.is_leading); + if (!ev->r.retcode) + WRITE_ONCE(conn->state, ISCSI_CONN_BOUND); + + if (ev->r.retcode || !transport->ep_connect) + break; + + ep = iscsi_lookup_endpoint(ev->u.b_conn.transport_eph); + if (ep) { + ep->conn = conn; + conn->ep = ep; + iscsi_put_endpoint(ep); + } else { + err = -ENOTCONN; + iscsi_cls_conn_printk(KERN_ERR, conn, + "Could not set ep conn binding\n"); + } + break; + case ISCSI_UEVENT_START_CONN: + ev->r.retcode = transport->start_conn(conn); + if (!ev->r.retcode) + WRITE_ONCE(conn->state, ISCSI_CONN_UP); + + break; + case ISCSI_UEVENT_SEND_PDU: + pdu_len = nlh->nlmsg_len - sizeof(*nlh) - sizeof(*ev); + + if ((ev->u.send_pdu.hdr_size > pdu_len) || + (ev->u.send_pdu.data_size > (pdu_len - ev->u.send_pdu.hdr_size))) { + err = -EINVAL; + break; + } + + ev->r.retcode = transport->send_pdu(conn, + (struct iscsi_hdr *)((char *)ev + sizeof(*ev)), + (char *)ev + sizeof(*ev) + ev->u.send_pdu.hdr_size, + ev->u.send_pdu.data_size); + break; + default: + err = -ENOSYS; + } + + mutex_unlock(&conn->ep_mutex); + return err; +} static int iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group) { int err = 0; u32 portid; - u32 pdu_len; struct iscsi_uevent *ev = nlmsg_data(nlh); struct iscsi_transport *transport = NULL; struct iscsi_internal *priv; struct iscsi_cls_session *session; - struct iscsi_cls_conn *conn; struct iscsi_endpoint *ep = NULL; if (!netlink_capable(skb, CAP_SYS_ADMIN)) @@ -3684,6 +3905,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group) ev->u.c_bound_session.initial_cmdsn, ev->u.c_bound_session.cmds_max, ev->u.c_bound_session.queue_depth); + iscsi_put_endpoint(ep); break; case ISCSI_UEVENT_DESTROY_SESSION: session = iscsi_session_lookup(ev->u.d_session.sid); @@ -3708,7 +3930,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group) list_del_init(&session->sess_list); spin_unlock_irqrestore(&sesslock, flags); - queue_work(iscsi_destroy_workq, &session->destroy_work); + queue_work(system_unbound_wq, &session->destroy_work); } break; case ISCSI_UEVENT_UNBIND_SESSION: @@ -3719,89 +3941,16 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group) else err = -EINVAL; break; - case ISCSI_UEVENT_CREATE_CONN: - err = iscsi_if_create_conn(transport, ev); - break; - case ISCSI_UEVENT_DESTROY_CONN: - err = iscsi_if_destroy_conn(transport, ev); - break; - case ISCSI_UEVENT_BIND_CONN: - session = iscsi_session_lookup(ev->u.b_conn.sid); - conn = iscsi_conn_lookup(ev->u.b_conn.sid, ev->u.b_conn.cid); - - if (conn && conn->ep) - iscsi_if_ep_disconnect(transport, conn->ep->id); - - if (!session || !conn) { - err = -EINVAL; - break; - } - - mutex_lock(&conn_mutex); - ev->r.retcode = transport->bind_conn(session, conn, - ev->u.b_conn.transport_eph, - ev->u.b_conn.is_leading); - if (!ev->r.retcode) - conn->state = ISCSI_CONN_BOUND; - mutex_unlock(&conn_mutex); - - if (ev->r.retcode || !transport->ep_connect) - break; - - ep = iscsi_lookup_endpoint(ev->u.b_conn.transport_eph); - if (ep) { - ep->conn = conn; - - mutex_lock(&conn->ep_mutex); - conn->ep = ep; - mutex_unlock(&conn->ep_mutex); - } else - iscsi_cls_conn_printk(KERN_ERR, conn, - "Could not set ep conn " - "binding\n"); - break; case ISCSI_UEVENT_SET_PARAM: err = iscsi_set_param(transport, ev); break; - case ISCSI_UEVENT_START_CONN: - conn = iscsi_conn_lookup(ev->u.start_conn.sid, ev->u.start_conn.cid); - if (conn) { - mutex_lock(&conn_mutex); - ev->r.retcode = transport->start_conn(conn); - if (!ev->r.retcode) - conn->state = ISCSI_CONN_UP; - mutex_unlock(&conn_mutex); - } - else - err = -EINVAL; - break; + case ISCSI_UEVENT_CREATE_CONN: + case ISCSI_UEVENT_DESTROY_CONN: case ISCSI_UEVENT_STOP_CONN: - conn = iscsi_conn_lookup(ev->u.stop_conn.sid, ev->u.stop_conn.cid); - if (conn) - iscsi_if_stop_conn(conn, ev->u.stop_conn.flag); - else - err = -EINVAL; - break; + case ISCSI_UEVENT_START_CONN: + case ISCSI_UEVENT_BIND_CONN: case ISCSI_UEVENT_SEND_PDU: - pdu_len = nlh->nlmsg_len - sizeof(*nlh) - sizeof(*ev); - - if ((ev->u.send_pdu.hdr_size > pdu_len) || - (ev->u.send_pdu.data_size > (pdu_len - ev->u.send_pdu.hdr_size))) { - err = -EINVAL; - break; - } - - conn = iscsi_conn_lookup(ev->u.send_pdu.sid, ev->u.send_pdu.cid); - if (conn) { - mutex_lock(&conn_mutex); - ev->r.retcode = transport->send_pdu(conn, - (struct iscsi_hdr*)((char*)ev + sizeof(*ev)), - (char*)ev + sizeof(*ev) + ev->u.send_pdu.hdr_size, - ev->u.send_pdu.data_size); - mutex_unlock(&conn_mutex); - } - else - err = -EINVAL; + err = iscsi_if_transport_conn(transport, nlh); break; case ISCSI_UEVENT_GET_STATS: err = iscsi_if_get_stats(transport, nlh); @@ -3991,10 +4140,11 @@ static ssize_t show_conn_state(struct device *dev, { struct iscsi_cls_conn *conn = iscsi_dev_to_conn(dev->parent); const char *state = "unknown"; + int conn_state = READ_ONCE(conn->state); - if (conn->state >= 0 && - conn->state < ARRAY_SIZE(connection_state_names)) - state = connection_state_names[conn->state]; + if (conn_state >= 0 && + conn_state < ARRAY_SIZE(connection_state_names)) + state = connection_state_names[conn_state]; return sysfs_emit(buf, "%s\n", state); } @@ -4649,7 +4799,10 @@ iscsi_register_transport(struct iscsi_transport *tt) int err; BUG_ON(!tt); - + if (tt->caps & CAP_OPS_EXPAND) { + BUG_ON(!tt->ops_expand); + WARN_ON(tt->ep_disconnect && !tt->ops_expand->unbind_conn); + } priv = iscsi_if_transport_lookup(tt); if (priv) return NULL; @@ -4803,10 +4956,10 @@ static __init int iscsi_transport_init(void) goto release_nls; } - iscsi_destroy_workq = alloc_workqueue("%s", - WQ_SYSFS | __WQ_LEGACY | WQ_MEM_RECLAIM | WQ_UNBOUND, - 1, "iscsi_destroy"); - if (!iscsi_destroy_workq) { + iscsi_conn_cleanup_workq = alloc_workqueue("%s", + WQ_SYSFS | WQ_MEM_RECLAIM | WQ_UNBOUND, 0, + "iscsi_conn_cleanup"); + if (!iscsi_conn_cleanup_workq) { err = -ENOMEM; goto destroy_wq; } @@ -4836,7 +4989,7 @@ static __init int iscsi_transport_init(void) static void __exit iscsi_transport_exit(void) { - destroy_workqueue(iscsi_destroy_workq); + destroy_workqueue(iscsi_conn_cleanup_workq); destroy_workqueue(iscsi_eh_timer_workq); netlink_kernel_release(nls); bus_unregister(&iscsi_flashnode_bus); diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c index 0a1734f34587dd4dc9e8b78468b05d37336e86b5..119a510156430a905b9f09a75780ec8ceabf904f 100644 --- a/drivers/scsi/ses.c +++ b/drivers/scsi/ses.c @@ -491,9 +491,6 @@ static int ses_enclosure_find_by_addr(struct enclosure_device *edev, int i; struct ses_component *scomp; - if (!edev->component[0].scratch) - return 0; - for (i = 0; i < edev->components; i++) { scomp = edev->component[i].scratch; if (scomp->addr != efd->addr) @@ -579,8 +576,10 @@ static void ses_enclosure_data_process(struct enclosure_device *edev, components++, type_ptr[0], name); - else + else if (components < edev->components) ecomp = &edev->component[components++]; + else + ecomp = ERR_PTR(-EINVAL); if (!IS_ERR(ecomp)) { if (addl_desc_ptr) @@ -745,9 +744,11 @@ static int ses_intf_add(struct device *cdev, buf = NULL; } page2_not_supported: - scomp = kcalloc(components, sizeof(struct ses_component), GFP_KERNEL); - if (!scomp) - goto err_free; + if (components > 0) { + scomp = kcalloc(components, sizeof(struct ses_component), GFP_KERNEL); + if (!scomp) + goto err_free; + } edev = enclosure_register(cdev->parent, dev_name(&sdev->sdev_gendev), components, &ses_enclosure_callbacks); @@ -827,7 +828,8 @@ static void ses_intf_remove_enclosure(struct scsi_device *sdev) kfree(ses_dev->page2); kfree(ses_dev); - kfree(edev->component[0].scratch); + if (edev->components > 0) + kfree(edev->component[0].scratch); put_device(&edev->edev); enclosure_unregister(edev); diff --git a/drivers/scsi/spfc/Kconfig b/drivers/scsi/spfc/Kconfig deleted file mode 100644 index 9d4566d90809103dc9336d519c4a480c8f9cc052..0000000000000000000000000000000000000000 --- a/drivers/scsi/spfc/Kconfig +++ /dev/null @@ -1,16 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0-only -# -# Ramaxel SPFC driver configuration -# - -config SPFC - tristate "Ramaxel Fabric Channel Host Adapter Support" - default m - depends on PCI && SCSI - depends on SCSI_FC_ATTRS - depends on ARM64 || X86_64 - help - This driver supports Ramaxel Fabric Channel PCIe host adapter. - To compile this driver as part of the kernel, choose Y here. - If unsure, choose N. - The default is M. diff --git a/drivers/scsi/spfc/Makefile b/drivers/scsi/spfc/Makefile deleted file mode 100644 index 849b730ac7335997255edf56b89eadb60e9252cc..0000000000000000000000000000000000000000 --- a/drivers/scsi/spfc/Makefile +++ /dev/null @@ -1,47 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0-only -obj-$(CONFIG_SPFC) += spfc.o - -subdir-ccflags-y += -I$(srctree)/$(src)/../../net/ethernet/ramaxel/spnic/hw -subdir-ccflags-y += -I$(srctree)/$(src)/hw -subdir-ccflags-y += -I$(srctree)/$(src)/common - -spfc-objs := common/unf_init.o \ - common/unf_event.o \ - common/unf_exchg.o \ - common/unf_exchg_abort.o \ - common/unf_io.o \ - common/unf_io_abnormal.o \ - common/unf_lport.o \ - common/unf_npiv.o \ - common/unf_npiv_portman.o \ - common/unf_disc.o \ - common/unf_rport.o \ - common/unf_service.o \ - common/unf_ls.o \ - common/unf_gs.o \ - common/unf_portman.o \ - common/unf_scsi.o \ - hw/spfc_utils.o \ - hw/spfc_lld.o \ - hw/spfc_io.o \ - hw/spfc_wqe.o \ - hw/spfc_service.o \ - hw/spfc_chipitf.o \ - hw/spfc_queue.o \ - hw/spfc_hba.o \ - hw/spfc_cqm_bat_cla.o \ - hw/spfc_cqm_bitmap_table.o \ - hw/spfc_cqm_main.o \ - hw/spfc_cqm_object.o \ - sphw_hwdev.o \ - sphw_hw_cfg.o \ - sphw_hw_comm.o \ - sphw_prof_adap.o \ - sphw_common.o \ - sphw_hwif.o \ - sphw_wq.o \ - sphw_cmdq.o \ - sphw_eqs.o \ - sphw_mbox.o \ - sphw_mgmt.o \ - sphw_api_cmd.o diff --git a/drivers/scsi/spfc/common/unf_common.h b/drivers/scsi/spfc/common/unf_common.h deleted file mode 100644 index bf9d156e07ceec3e4e59bbbfd808fae20a2ec112..0000000000000000000000000000000000000000 --- a/drivers/scsi/spfc/common/unf_common.h +++ /dev/null @@ -1,1755 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef UNF_COMMON_H -#define UNF_COMMON_H - -#include "unf_type.h" -#include "unf_fcstruct.h" - -/* version num */ -#define SPFC_DRV_VERSION "B101" -#define SPFC_DRV_DESC "Ramaxel Memory Technology Fibre Channel Driver" - -#define UNF_MAX_SECTORS 0xffff -#define UNF_ORIGIN_HOTTAG_MASK 0x7fff -#define UNF_HOTTAG_FLAG (1 << 15) -#define UNF_PKG_FREE_OXID 0x0 -#define UNF_PKG_FREE_RXID 0x1 - -#define UNF_SPFC_MAXRPORT_NUM (2048) -#define SPFC_DEFAULT_RPORT_INDEX (UNF_SPFC_MAXRPORT_NUM - 1) - -/* session use sq num */ -#define UNF_SQ_NUM_PER_SESSION 3 - -extern atomic_t fc_mem_ref; -extern u32 unf_dgb_level; -extern u32 spfc_dif_type; -extern u32 spfc_dif_enable; -extern u8 spfc_guard; -extern int link_lose_tmo; - -/* define bits */ -#define UNF_BIT(n) (0x1UL << (n)) -#define UNF_BIT_0 UNF_BIT(0) -#define UNF_BIT_1 UNF_BIT(1) -#define UNF_BIT_2 UNF_BIT(2) -#define UNF_BIT_3 UNF_BIT(3) -#define UNF_BIT_4 UNF_BIT(4) -#define UNF_BIT_5 UNF_BIT(5) - -#define UNF_BITS_PER_BYTE 8 - -#define UNF_NOTIFY_UP_CLEAN_FLASH 2 - -/* Echo macro define */ -#define ECHO_MG_VERSION_LOCAL 1 -#define ECHO_MG_VERSION_REMOTE 2 - -#define SPFC_WIN_NPIV_NUM 32 - -#define UNF_GET_NAME_HIGH_WORD(name) (((name) >> 32) & 0xffffffff) -#define UNF_GET_NAME_LOW_WORD(name) ((name) & 0xffffffff) - -#define UNF_FIRST_LPORT_ID_MASK 0xffffff00 -#define UNF_PORT_ID_MASK 0x000000ff -#define UNF_FIRST_LPORT_ID 0x00000000 -#define UNF_SECOND_LPORT_ID 0x00000001 -#define UNF_EIGHTH_LPORT_ID 0x00000007 -#define SPFC_MAX_COUNTER_TYPE 128 - -#define UNF_EVENT_ASYN 0 -#define UNF_EVENT_SYN 1 -#define UNF_GLOBAL_EVENT_ASYN 2 -#define UNF_GLOBAL_EVENT_SYN 3 - -#define UNF_GET_SLOT_ID_BY_PORTID(port_id) (((port_id) & 0x001f00) >> 8) -#define UNF_GET_FUNC_ID_BY_PORTID(port_id) ((port_id) & 0x0000ff) -#define UNF_GET_BOARD_TYPE_AND_SLOT_ID_BY_PORTID(port_id) \ - (((port_id) & 0x00FF00) >> 8) - -#define UNF_FC_SERVER_BOARD_8_G 13 /* 8G mode */ -#define UNF_FC_SERVER_BOARD_16_G 7 /* 16G mode */ -#define UNF_FC_SERVER_BOARD_32_G 6 /* 32G mode */ - -#define UNF_PORT_TYPE_FC_QSFP 1 -#define UNF_PORT_TYPE_FC_SFP 0 -#define UNF_PORT_UNGRADE_FW_RESET_ACTIVE 0 -#define UNF_PORT_UNGRADE_FW_RESET_INACTIVE 1 - -enum unf_rport_qos_level { - UNF_QOS_LEVEL_DEFAULT = 0, - UNF_QOS_LEVEL_MIDDLE, - UNF_QOS_LEVEL_HIGH, - UNF_QOS_LEVEL_BUTT -}; - -struct buff_list { - u8 *vaddr; - dma_addr_t paddr; -}; - -struct buf_describe { - struct buff_list *buflist; - u32 buf_size; - u32 buf_num; -}; - -#define IO_STATICS -struct unf_port_info { - u32 local_nport_id; - u32 nport_id; - u32 rport_index; - u64 port_name; - enum unf_rport_qos_level qos_level; - u8 cs_ctrl; - u8 rsvd0[3]; - u32 sqn_base; -}; - -struct unf_cfg_item { - char *puc_name; - u32 min_value; - u32 default_value; - u32 max_value; -}; - -struct unf_port_param { - u32 ra_tov; - u32 ed_tov; -}; - -/* get wwpn adn wwnn */ -struct unf_get_chip_info_argout { - u8 board_type; - u64 wwpn; - u64 wwnn; - u64 sys_mac; -}; - -/* get sfp info: present and speed */ -struct unf_get_port_info_argout { - u8 sfp_speed; - u8 present; - u8 rsvd[2]; -}; - -/* SFF-8436(QSFP+) Rev 4.7 */ -struct unf_sfp_plus_field_a0 { - u8 identifier; - /* offset 1~2 */ - struct { - u8 reserved; - u8 status; - } status_indicator; - /* offset 3~21 */ - struct { - u8 rx_tx_los; - u8 tx_fault; - u8 all_resv; - - u8 ini_complete : 1; - u8 bit_resv : 3; - u8 temp_low_warn : 1; - u8 temp_high_warn : 1; - u8 temp_low_alarm : 1; - u8 temp_high_alarm : 1; - - u8 resv : 4; - u8 vcc_low_warn : 1; - u8 vcc_high_warn : 1; - u8 vcc_low_alarm : 1; - u8 vcc_high_alarm : 1; - - u8 resv8; - u8 rx_pow[2]; - u8 tx_bias[2]; - u8 reserved[6]; - u8 vendor_specifics[3]; - } interrupt_flag; - /* offset 22~33 */ - struct { - u8 temp[2]; - u8 reserved[2]; - u8 supply_vol[2]; - u8 reserveds[2]; - u8 vendor_specific[4]; - } module_monitors; - /* offset 34~81 */ - struct { - u8 rx_pow[8]; - u8 tx_bias[8]; - u8 reserved[16]; - u8 vendor_specific[16]; - } channel_monitor_val; - - /* offset 82~85 */ - u8 reserved[4]; - - /* offset 86~97 */ - struct { - /* 86~88 */ - u8 tx_disable; - u8 rx_rate_select; - u8 tx_rate_select; - - /* 89~92 */ - u8 rx4_app_select; - u8 rx3_app_select; - u8 rx2_app_select; - u8 rx1_app_select; - /* 93 */ - u8 power_override : 1; - u8 power_set : 1; - u8 reserved : 6; - - /* 94~97 */ - u8 tx4_app_select; - u8 tx3_app_select; - u8 tx2_app_select; - u8 tx1_app_select; - /* 98~99 */ - u8 reserved2[2]; - } control; - /* 100~106 */ - struct { - /* 100 */ - u8 m_rx1_los : 1; - u8 m_rx2_los : 1; - u8 m_rx3_los : 1; - u8 m_rx4_los : 1; - u8 m_tx1_los : 1; - u8 m_tx2_los : 1; - u8 m_tx3_los : 1; - u8 m_tx4_los : 1; - /* 101 */ - u8 m_tx1_fault : 1; - u8 m_tx2_fault : 1; - u8 m_tx3_fault : 1; - u8 m_tx4_fault : 1; - u8 reserved : 4; - /* 102 */ - u8 reserved1; - /* 103 */ - u8 mini_cmp_flag : 1; - u8 rsv : 3; - u8 m_temp_low_warn : 1; - u8 m_temp_high_warn : 1; - u8 m_temp_low_alarm : 1; - u8 m_temp_high_alarm : 1; - /* 104 */ - u8 rsv1 : 4; - u8 m_vcc_low_warn : 1; - u8 m_vcc_high_warn : 1; - u8 m_vcc_low_alarm : 1; - u8 m_vcc_high_alarm : 1; - /* 105~106 */ - u8 vendor_specific[2]; - } module_channel_mask_bit; - /* 107~118 */ - u8 resv[12]; - /* 119~126 */ - u8 password_reserved[8]; - /* 127 */ - u8 page_select; -}; - -/* page 00 */ -struct unf_sfp_plus_field_00 { - /* 128~191 */ - struct { - u8 id; - u8 id_ext; - u8 connector; - u8 speci_com[6]; - u8 mode; - u8 speed; - u8 encoding; - u8 br_nominal; - u8 ext_rate_select_com; - u8 length_smf; - u8 length_om3; - u8 length_om2; - u8 length_om1; - u8 length_copper; - u8 device_tech; - u8 vendor_name[16]; - u8 ex_module; - u8 vendor_oui[3]; - u8 vendor_pn[16]; - u8 vendor_rev[2]; - /* Wave length or Copper cable Attenuation*/ - u8 wave_or_copper_attenuation[2]; - u8 wave_length_toler[2]; /* Wavelength tolerance */ - u8 max_temp; - u8 cc_base; - } base_id_fields; - /* 192~223 */ - struct { - u8 options[4]; - u8 vendor_sn[16]; - u8 date_code[8]; - u8 diagn_monit_type; - u8 enhance_opt; - u8 reserved; - u8 ccext; - } ext_id_fields; - /* 224~255 */ - u8 vendor_spec_eeprom[32]; -}; - -/* page 01 */ -struct unf_sfp_plus_field_01 { - u8 optional01[128]; -}; - -/* page 02 */ -struct unf_sfp_plus_field_02 { - u8 optional02[128]; -}; - -/* page 03 */ -struct unf_sfp_plus_field_03 { - u8 temp_high_alarm[2]; - u8 temp_low_alarm[2]; - u8 temp_high_warn[2]; - u8 temp_low_warn[2]; - - u8 reserved1[8]; - - u8 vcc_high_alarm[2]; - u8 vcc_low_alarm[2]; - u8 vcc_high_warn[2]; - u8 vcc_low_warn[2]; - - u8 reserved2[8]; - u8 vendor_specific1[16]; - - u8 pow_high_alarm[2]; - u8 pow_low_alarm[2]; - u8 pow_high_warn[2]; - u8 pow_low_warn[2]; - - u8 bias_high_alarm[2]; - u8 bias_low_alarm[2]; - u8 bias_high_warn[2]; - u8 bias_low_warn[2]; - - u8 tx_power_high_alarm[2]; - u8 tx_power_low_alarm[2]; - u8 reserved3[4]; - - u8 reserved4[8]; - - u8 vendor_specific2[16]; - u8 reserved5[2]; - u8 vendor_specific3[12]; - u8 rx_ampl[2]; - u8 rx_tx_sq_disable; - u8 rx_output_disable; - u8 chan_monit_mask[12]; - u8 reserved6[2]; -}; - -struct unf_sfp_plus_info { - struct unf_sfp_plus_field_a0 sfp_plus_info_a0; - struct unf_sfp_plus_field_00 sfp_plus_info_00; - struct unf_sfp_plus_field_01 sfp_plus_info_01; - struct unf_sfp_plus_field_02 sfp_plus_info_02; - struct unf_sfp_plus_field_03 sfp_plus_info_03; -}; - -struct unf_sfp_data_field_a0 { - /* Offset 0~63 */ - struct { - u8 id; - u8 id_ext; - u8 connector; - u8 transceiver[8]; - u8 encoding; - u8 br_nominal; /* Nominal signalling rate, units of 100MBd. */ - u8 rate_identifier; /* Type of rate select functionality */ - /* Link length supported for single mode fiber, units of km */ - u8 length_smk_km; - /* Link length supported for single mode fiber, - *units of 100 m - */ - u8 length_smf; - /* Link length supported for 50 um OM2 fiber,units of 10 m */ - u8 length_smf_om2; - /* Link length supported for 62.5 um OM1 fiber, units of 10 m */ - u8 length_smf_om1; - /*Link length supported for copper/direct attach cable, - *units of m - */ - u8 length_cable; - /* Link length supported for 50 um OM3 fiber, units of 10m */ - u8 length_om3; - u8 vendor_name[16]; /* ASCII */ - /* Code for electronic or optical compatibility*/ - u8 transceiver2; - u8 vendor_oui[3]; /* SFP vendor IEEE company ID */ - u8 vendor_pn[16]; /* Part number provided by SFP vendor (ASCII) - */ - /* Revision level for part number provided by vendor (ASCII) */ - u8 vendor_rev[4]; - /* Laser wavelength (Passive/Active Cable - *Specification Compliance) - */ - u8 wave_length[2]; - u8 unallocated; - /* Check code for Base ID Fields (addresses 0 to 62)*/ - u8 cc_base; - } base_id_fields; - - /* Offset 64~95 */ - struct { - u8 options[2]; - u8 br_max; - u8 br_min; - u8 vendor_sn[16]; - u8 date_code[8]; - u8 diag_monitoring_type; - u8 enhanced_options; - u8 sff8472_compliance; - u8 cc_ext; - } ext_id_fields; - - /* Offset 96~255 */ - struct { - u8 vendor_spec_eeprom[32]; - u8 rsvd[128]; - } vendor_spec_id_fields; -}; - -struct unf_sfp_data_field_a2 { - /* Offset 0~119 */ - struct { - /* 0~39 */ - struct { - u8 temp_alarm_high[2]; - u8 temp_alarm_low[2]; - u8 temp_warning_high[2]; - u8 temp_warning_low[2]; - - u8 vcc_alarm_high[2]; - u8 vcc_alarm_low[2]; - u8 vcc_warning_high[2]; - u8 vcc_warning_low[2]; - - u8 bias_alarm_high[2]; - u8 bias_alarm_low[2]; - u8 bias_warning_high[2]; - u8 bias_warning_low[2]; - - u8 tx_alarm_high[2]; - u8 tx_alarm_low[2]; - u8 tx_warning_high[2]; - u8 tx_warning_low[2]; - - u8 rx_alarm_high[2]; - u8 rx_alarm_low[2]; - u8 rx_warning_high[2]; - u8 rx_warning_low[2]; - } alarm_warn_th; - - u8 unallocated0[16]; - u8 ext_cal_constants[36]; - u8 unallocated1[3]; - u8 cc_dmi; - - /* 96~105 */ - struct { - u8 temp[2]; - u8 vcc[2]; - u8 tx_bias[2]; - u8 tx_power[2]; - u8 rx_power[2]; - } diag; - - u8 unallocated2[4]; - - struct { - u8 data_rdy_bar_state : 1; - u8 rx_los : 1; - u8 tx_fault_state : 1; - u8 soft_rate_select_state : 1; - u8 rate_select_state : 1; - u8 rs_state : 1; - u8 soft_tx_disable_select : 1; - u8 tx_disable_state : 1; - } status_ctrl; - u8 rsvd; - - /* 112~113 */ - struct { - /* 112 */ - u8 tx_alarm_low : 1; - u8 tx_alarm_high : 1; - u8 tx_bias_alarm_low : 1; - u8 tx_bias_alarm_high : 1; - u8 vcc_alarm_low : 1; - u8 vcc_alarm_high : 1; - u8 temp_alarm_low : 1; - u8 temp_alarm_high : 1; - - /* 113 */ - u8 rsvd : 6; - u8 rx_alarm_low : 1; - u8 rx_alarm_high : 1; - } alarm; - - u8 unallocated3[2]; - - /* 116~117 */ - struct { - /* 116 */ - u8 tx_warn_lo : 1; - u8 tx_warn_hi : 1; - u8 bias_warn_lo : 1; - u8 bias_warn_hi : 1; - u8 vcc_warn_lo : 1; - u8 vcc_warn_hi : 1; - u8 temp_warn_lo : 1; - u8 temp_warn_hi : 1; - - /* 117 */ - u8 rsvd : 6; - u8 rx_warn_lo : 1; - u8 rx_warn_hi : 1; - } warning; - - u8 ext_status_and_ctrl[2]; - } diag; - - /* Offset 120~255 */ - struct { - u8 vendor_spec[8]; - u8 user_eeprom[120]; - u8 vendor_ctrl[8]; - } general_use_fields; -}; - -struct unf_sfp_info { - struct unf_sfp_data_field_a0 sfp_info_a0; - struct unf_sfp_data_field_a2 sfp_info_a2; -}; - -struct unf_sfp_err_rome_info { - struct unf_sfp_info sfp_info; - struct unf_sfp_plus_info sfp_plus_info; -}; - -struct unf_err_code { - u32 loss_of_signal_count; - u32 bad_rx_char_count; - u32 loss_of_sync_count; - u32 link_fail_count; - u32 rx_eof_a_count; - u32 dis_frame_count; - u32 bad_crc_count; - u32 proto_error_count; -}; - -/* config file */ -enum unf_port_mode { - UNF_PORT_MODE_UNKNOWN = 0x00, - UNF_PORT_MODE_TGT = 0x10, - UNF_PORT_MODE_INI = 0x20, - UNF_PORT_MODE_BOTH = 0x30 -}; - -enum unf_port_upgrade { - UNF_PORT_UNSUPPORT_UPGRADE_REPORT = 0x00, - UNF_PORT_SUPPORT_UPGRADE_REPORT = 0x01, - UNF_PORT_UPGRADE_BUTT -}; - -#define UNF_BYTES_OF_DWORD 0x4 -static inline void __attribute__((unused)) unf_big_end_to_cpu(u8 *buffer, u32 size) -{ - u32 *buf = NULL; - u32 word_sum = 0; - u32 index = 0; - - if (!buffer) - return; - - buf = (u32 *)buffer; - - /* byte to word */ - if (size % UNF_BYTES_OF_DWORD == 0) - word_sum = size / UNF_BYTES_OF_DWORD; - else - return; - - /* word to byte */ - while (index < word_sum) { - *buf = be32_to_cpu(*buf); - buf++; - index++; - } -} - -static inline void __attribute__((unused)) unf_cpu_to_big_end(void *buffer, u32 size) -{ -#define DWORD_BIT 32 -#define BYTE_BIT 8 - u32 *buf = NULL; - u32 word_sum = 0; - u32 index = 0; - u32 tmp = 0; - - if (!buffer) - return; - - buf = (u32 *)buffer; - - /* byte to dword */ - word_sum = size / UNF_BYTES_OF_DWORD; - - /* dword to byte */ - while (index < word_sum) { - *buf = cpu_to_be32(*buf); - buf++; - index++; - } - - if (size % UNF_BYTES_OF_DWORD) { - tmp = cpu_to_be32(*buf); - tmp = - tmp >> (DWORD_BIT - (size % UNF_BYTES_OF_DWORD) * BYTE_BIT); - memcpy(buf, &tmp, (size % UNF_BYTES_OF_DWORD)); - } -} - -#define UNF_TOP_AUTO_MASK 0x0f -#define UNF_TOP_UNKNOWN 0xff -#define SPFC_TOP_AUTO 0x0 - -#define UNF_NORMAL_MODE 0 -#define UNF_SET_NOMAL_MODE(mode) ((mode) = UNF_NORMAL_MODE) - -/* - * * SCSI status - */ -#define SCSI_GOOD 0x00 -#define SCSI_CHECK_CONDITION 0x02 -#define SCSI_CONDITION_MET 0x04 -#define SCSI_BUSY 0x08 -#define SCSI_INTERMEDIATE 0x10 -#define SCSI_INTERMEDIATE_COND_MET 0x14 -#define SCSI_RESERVATION_CONFLICT 0x18 -#define SCSI_TASK_SET_FULL 0x28 -#define SCSI_ACA_ACTIVE 0x30 -#define SCSI_TASK_ABORTED 0x40 - -enum unf_act_topo { - UNF_ACT_TOP_PUBLIC_LOOP = 0x1, - UNF_ACT_TOP_PRIVATE_LOOP = 0x2, - UNF_ACT_TOP_P2P_DIRECT = 0x4, - UNF_ACT_TOP_P2P_FABRIC = 0x8, - UNF_TOP_LOOP_MASK = 0x03, - UNF_TOP_P2P_MASK = 0x0c, - UNF_TOP_FCOE_MASK = 0x30, - UNF_ACT_TOP_UNKNOWN -}; - -#define UNF_FL_PORT_LOOP_ADDR 0x00 -#define UNF_INVALID_LOOP_ADDR 0xff - -#define UNF_LOOP_ROLE_MASTER_OR_SLAVE 0x0 -#define UNF_LOOP_ROLE_ONLY_SLAVE 0x1 - -#define UNF_TOU16_CHECK(dest, src, over_action) \ - do { \ - if (unlikely(0xFFFF < (src))) { \ - FC_DRV_PRINT(UNF_LOG_REG_ATT, \ - UNF_ERR, "ToU16 error, src 0x%x ", \ - (src)); \ - over_action; \ - } \ - ((dest) = (u16)(src)); \ - } while (0) - -#define UNF_PORT_SPEED_AUTO 0 -#define UNF_PORT_SPEED_2_G 2 -#define UNF_PORT_SPEED_4_G 4 -#define UNF_PORT_SPEED_8_G 8 -#define UNF_PORT_SPEED_10_G 10 -#define UNF_PORT_SPEED_16_G 16 -#define UNF_PORT_SPEED_32_G 32 - -#define UNF_PORT_SPEED_UNKNOWN (~0) -#define UNF_PORT_SFP_SPEED_ERR 0xFF - -#define UNF_OP_DEBUG_DUMP 0x0001 -#define UNF_OP_FCPORT_INFO 0x0002 -#define UNF_OP_FCPORT_LINK_CMD_TEST 0x0003 -#define UNF_OP_TEST_MBX 0x0004 - -/* max frame size */ -#define UNF_MAX_FRAME_SIZE 2112 - -/* default */ -#define UNF_DEFAULT_FRAME_SIZE 2048 -#define UNF_DEFAULT_EDTOV 2000 -#define UNF_DEFAULT_RATOV 10000 -#define UNF_DEFAULT_FABRIC_RATOV 10000 -#define UNF_MAX_RETRY_COUNT 3 -#define UNF_RRQ_MIN_TIMEOUT_INTERVAL 30000 -#define UNF_LOGO_TIMEOUT_INTERVAL 3000 -#define UNF_SFS_MIN_TIMEOUT_INTERVAL 15000 -#define UNF_WRITE_RRQ_SENDERR_INTERVAL 3000 -#define UNF_REC_TOV 3000 - -#define UNF_WAIT_SEM_TIMEOUT (5000UL) -#define UNF_WAIT_ABTS_RSP_TIMEOUT (20000UL) -#define UNF_MAX_ABTS_WAIT_INTERVAL ((UNF_WAIT_SEM_TIMEOUT - 500) / 1000) - -#define UNF_TGT_RRQ_REDUNDANT_TIME 2000 -#define UNF_INI_RRQ_REDUNDANT_TIME 500 -#define UNF_INI_ELS_REDUNDANT_TIME 2000 - -/* ELS command values */ -#define UNF_ELS_CMND_HIGH_MASK 0xff000000 -#define UNF_ELS_CMND_RJT 0x01000000 -#define UNF_ELS_CMND_ACC 0x02000000 -#define UNF_ELS_CMND_PLOGI 0x03000000 -#define UNF_ELS_CMND_FLOGI 0x04000000 -#define UNF_ELS_CMND_LOGO 0x05000000 -#define UNF_ELS_CMND_RLS 0x0F000000 -#define UNF_ELS_CMND_ECHO 0x10000000 -#define UNF_ELS_CMND_REC 0x13000000 -#define UNF_ELS_CMND_RRQ 0x12000000 -#define UNF_ELS_CMND_PRLI 0x20000000 -#define UNF_ELS_CMND_PRLO 0x21000000 -#define UNF_ELS_CMND_PDISC 0x50000000 -#define UNF_ELS_CMND_FDISC 0x51000000 -#define UNF_ELS_CMND_ADISC 0x52000000 -#define UNF_ELS_CMND_FAN 0x60000000 -#define UNF_ELS_CMND_RSCN 0x61000000 -#define UNF_FCP_CMND_SRR 0x14000000 -#define UNF_GS_CMND_SCR 0x62000000 - -#define UNF_PLOGI_VERSION_UPPER 0x20 -#define UNF_PLOGI_VERSION_LOWER 0x20 -#define UNF_PLOGI_CONCURRENT_SEQ 0x00FF -#define UNF_PLOGI_RO_CATEGORY 0x00FE -#define UNF_PLOGI_SEQ_PER_XCHG 0x0001 -#define UNF_LGN_INFRAMESIZE 2048 - -/* CT_IU pream defines */ -#define UNF_REV_NPORTID_INIT 0x01000000 -#define UNF_FSTYPE_OPT_INIT 0xfc020000 -#define UNF_FSTYPE_RFT_ID 0x02170000 -#define UNF_FSTYPE_GID_PT 0x01A10000 -#define UNF_FSTYPE_GID_FT 0x01710000 -#define UNF_FSTYPE_RFF_ID 0x021F0000 -#define UNF_FSTYPE_GFF_ID 0x011F0000 -#define UNF_FSTYPE_GNN_ID 0x01130000 -#define UNF_FSTYPE_GPN_ID 0x01120000 - -#define UNF_CT_IU_RSP_MASK 0xffff0000 -#define UNF_CT_IU_REASON_MASK 0x00ff0000 -#define UNF_CT_IU_EXPLAN_MASK 0x0000ff00 -#define UNF_CT_IU_REJECT 0x80010000 -#define UNF_CT_IU_ACCEPT 0x80020000 - -#define UNF_FABRIC_FULL_REG 0x00000003 - -#define UNF_FC4_SCSI_BIT8 0x00000100 -#define UNF_FC4_FCP_TYPE 0x00000008 -#define UNF_FRAG_REASON_VENDOR 0 - -/* GID_PT, GID_FT */ -#define UNF_GID_PT_TYPE 0x7F000000 -#define UNF_GID_FT_TYPE 0x00000008 - -/* - *FC4 defines - */ -#define UNF_FC4_FRAME_PAGE_SIZE 0x10 -#define UNF_FC4_FRAME_PAGE_SIZE_SHIFT 16 - -#define UNF_FC4_FRAME_PARM_0_FCP 0x08000000 -#define UNF_FC4_FRAME_PARM_0_I_PAIR 0x00002000 -#define UNF_FC4_FRAME_PARM_0_GOOD_RSP_CODE 0x00000100 -#define UNF_FC4_FRAME_PARM_0_MASK \ - (UNF_FC4_FRAME_PARM_0_FCP | UNF_FC4_FRAME_PARM_0_I_PAIR | \ - UNF_FC4_FRAME_PARM_0_GOOD_RSP_CODE) -#define UNF_FC4_FRAME_PARM_3_INI 0x00000020 -#define UNF_FC4_FRAME_PARM_3_TGT 0x00000010 -#define UNF_FC4_FRAME_PARM_3_BOTH \ - (UNF_FC4_FRAME_PARM_3_INI | UNF_FC4_FRAME_PARM_3_TGT) -#define UNF_FC4_FRAME_PARM_3_R_XFER_DIS 0x00000002 -#define UNF_FC4_FRAME_PARM_3_W_XFER_DIS 0x00000001 -#define UNF_FC4_FRAME_PARM_3_REC_SUPPORT 0x00000400 /* bit 10 */ -#define UNF_FC4_FRAME_PARM_3_TASK_RETRY_ID_SUPPORT 0x00000200 /* bit 9 */ -#define UNF_FC4_FRAME_PARM_3_RETRY_SUPPORT 0x00000100 /* bit 8 */ -#define UNF_FC4_FRAME_PARM_3_CONF_ALLOW 0x00000080 /* bit 7 */ - -#define UNF_FC4_FRAME_PARM_3_MASK \ - (UNF_FC4_FRAME_PARM_3_INI | UNF_FC4_FRAME_PARM_3_TGT | \ - UNF_FC4_FRAME_PARM_3_R_XFER_DIS) - -#define UNF_FC4_TYPE_SHIFT 24 -#define UNF_FC4_TYPE_MASK 0xff -/* FC4 feature we support */ -#define UNF_GFF_ACC_MASK 0xFF000000 - -/* Reject CT_IU Reason Codes */ -#define UNF_CTIU_RJT_MASK 0xffff0000 -#define UNF_CTIU_RJT_INVALID_COMMAND 0x00010000 -#define UNF_CTIU_RJT_INVALID_VERSION 0x00020000 -#define UNF_CTIU_RJT_LOGIC_ERR 0x00030000 -#define UNF_CTIU_RJT_INVALID_SIZE 0x00040000 -#define UNF_CTIU_RJT_LOGIC_BUSY 0x00050000 -#define UNF_CTIU_RJT_PROTOCOL_ERR 0x00070000 -#define UNF_CTIU_RJT_UNABLE_PERFORM 0x00090000 -#define UNF_CTIU_RJT_NOT_SUPPORTED 0x000B0000 - -/* FS_RJT Reason code explanations, FC-GS-2 6.5 */ -#define UNF_CTIU_RJT_EXP_MASK 0x0000FF00 -#define UNF_CTIU_RJT_EXP_NO_ADDTION 0x00000000 -#define UNF_CTIU_RJT_EXP_PORTID_NO_REG 0x00000100 -#define UNF_CTIU_RJT_EXP_PORTNAME_NO_REG 0x00000200 -#define UNF_CTIU_RJT_EXP_NODENAME_NO_REG 0x00000300 -#define UNF_CTIU_RJT_EXP_FC4TYPE_NO_REG 0x00000700 -#define UNF_CTIU_RJT_EXP_PORTTYPE_NO_REG 0x00000A00 - -/* - * LS_RJT defines - */ -#define UNF_FC_LS_RJT_REASON_MASK 0x00ff0000 - -/* - * LS_RJT reason code defines - */ -#define UNF_LS_OK 0x00000000 -#define UNF_LS_RJT_INVALID_COMMAND 0x00010000 -#define UNF_LS_RJT_LOGICAL_ERROR 0x00030000 -#define UNF_LS_RJT_BUSY 0x00050000 -#define UNF_LS_RJT_PROTOCOL_ERROR 0x00070000 -#define UNF_LS_RJT_REQUEST_DENIED 0x00090000 -#define UNF_LS_RJT_NOT_SUPPORTED 0x000b0000 -#define UNF_LS_RJT_CLASS_ERROR 0x000c0000 - -/* - * LS_RJT code explanation - */ -#define UNF_LS_RJT_NO_ADDITIONAL_INFO 0x00000000 -#define UNF_LS_RJT_INV_DATA_FIELD_SIZE 0x00000700 -#define UNF_LS_RJT_INV_COMMON_SERV_PARAM 0x00000F00 -#define UNF_LS_RJT_INVALID_OXID_RXID 0x00001700 -#define UNF_LS_RJT_COMMAND_IN_PROGRESS 0x00001900 -#define UNF_LS_RJT_INSUFFICIENT_RESOURCES 0x00002900 -#define UNF_LS_RJT_COMMAND_NOT_SUPPORTED 0x00002C00 -#define UNF_LS_RJT_UNABLE_TO_SUPLY_REQ_DATA 0x00002A00 -#define UNF_LS_RJT_INVALID_PAYLOAD_LENGTH 0x00002D00 - -#define UNF_P2P_LOCAL_NPORT_ID 0x000000EF -#define UNF_P2P_REMOTE_NPORT_ID 0x000000D6 - -#define UNF_BBCREDIT_MANAGE_NFPORT 0 -#define UNF_BBCREDIT_MANAGE_LPORT 1 -#define UNF_BBCREDIT_LPORT 0 -#define UNF_CONTIN_INCREASE_SUPPORT 1 -#define UNF_CLASS_VALID 1 -#define UNF_CLASS_INVALID 0 -#define UNF_NOT_MEANINGFUL 0 -#define UNF_NO_SERVICE_PARAMS 0 -#define UNF_CLEAN_ADDRESS_DEFAULT 0 -#define UNF_PRIORITY_ENABLE 1 -#define UNF_PRIORITY_DISABLE 0 -#define UNF_SEQUEN_DELIVERY_REQ 1 /* Sequential delivery requested */ - -#define UNF_FC_PROTOCOL_CLASS_3 0x0 -#define UNF_FC_PROTOCOL_CLASS_2 0x1 -#define UNF_FC_PROTOCOL_CLASS_1 0x2 -#define UNF_FC_PROTOCOL_CLASS_F 0x3 -#define UNF_FC_PROTOCOL_CLASS_OTHER 0x4 - -#define UNF_RSCN_PORT_ADDR 0x0 -#define UNF_RSCN_AREA_ADDR_GROUP 0x1 -#define UNF_RSCN_DOMAIN_ADDR_GROUP 0x2 -#define UNF_RSCN_FABRIC_ADDR_GROUP 0x3 - -#define UNF_GET_RSCN_PLD_LEN(cmnd) ((cmnd) & 0x0000ffff) -#define UNF_RSCN_PAGE_LEN 0x4 - -#define UNF_PORT_LINK_UP 0x0000 -#define UNF_PORT_LINK_DOWN 0x0001 -#define UNF_PORT_RESET_START 0x0002 -#define UNF_PORT_RESET_END 0x0003 -#define UNF_PORT_LINK_UNKNOWN 0x0004 -#define UNF_PORT_NOP 0x0005 -#define UNF_PORT_CORE_FATAL_ERROR 0x0006 -#define UNF_PORT_CORE_UNRECOVERABLE_ERROR 0x0007 -#define UNF_PORT_CORE_RECOVERABLE_ERROR 0x0008 -#define UNF_PORT_LOGOUT 0x0009 -#define UNF_PORT_CLEAR_VLINK 0x000a -#define UNF_PORT_UPDATE_PROCESS 0x000b -#define UNF_PORT_DEBUG_DUMP 0x000c -#define UNF_PORT_GET_FWLOG 0x000d -#define UNF_PORT_CLEAN_DONE 0x000e -#define UNF_PORT_BEGIN_REMOVE 0x000f -#define UNF_PORT_RELEASE_RPORT_INDEX 0x0010 -#define UNF_PORT_ABNORMAL_RESET 0x0012 - -/* - * SCSI begin - */ -#define SCSIOPC_TEST_UNIT_READY 0x00 -#define SCSIOPC_INQUIRY 0x12 -#define SCSIOPC_MODE_SENSE_6 0x1A -#define SCSIOPC_MODE_SENSE_10 0x5A -#define SCSIOPC_MODE_SELECT_6 0x15 -#define SCSIOPC_RESERVE 0x16 -#define SCSIOPC_RELEASE 0x17 -#define SCSIOPC_START_STOP_UNIT 0x1B -#define SCSIOPC_READ_CAPACITY_10 0x25 -#define SCSIOPC_READ_CAPACITY_16 0x9E -#define SCSIOPC_READ_6 0x08 -#define SCSIOPC_READ_10 0x28 -#define SCSIOPC_READ_12 0xA8 -#define SCSIOPC_READ_16 0x88 -#define SCSIOPC_WRITE_6 0x0A -#define SCSIOPC_WRITE_10 0x2A -#define SCSIOPC_WRITE_12 0xAA -#define SCSIOPC_WRITE_16 0x8A -#define SCSIOPC_WRITE_VERIFY 0x2E -#define SCSIOPC_VERIFY_10 0x2F -#define SCSIOPC_VERIFY_12 0xAF -#define SCSIOPC_VERIFY_16 0x8F -#define SCSIOPC_REQUEST_SENSE 0x03 -#define SCSIOPC_REPORT_LUN 0xA0 -#define SCSIOPC_FORMAT_UNIT 0x04 -#define SCSIOPC_SEND_DIAGNOSTIC 0x1D -#define SCSIOPC_WRITE_SAME_10 0x41 -#define SCSIOPC_WRITE_SAME_16 0x93 -#define SCSIOPC_READ_BUFFER 0x3C -#define SCSIOPC_WRITE_BUFFER 0x3B - -#define SCSIOPC_LOG_SENSE 0x4D -#define SCSIOPC_MODE_SELECT_10 0x55 -#define SCSIOPC_SYNCHRONIZE_CACHE_10 0x35 -#define SCSIOPC_SYNCHRONIZE_CACHE_16 0x91 -#define SCSIOPC_WRITE_AND_VERIFY_10 0x2E -#define SCSIOPC_WRITE_AND_VERIFY_12 0xAE -#define SCSIOPC_WRITE_AND_VERIFY_16 0x8E -#define SCSIOPC_READ_MEDIA_SERIAL_NUMBER 0xAB -#define SCSIOPC_REASSIGN_BLOCKS 0x07 -#define SCSIOPC_ATA_PASSTHROUGH_16 0x85 -#define SCSIOPC_ATA_PASSTHROUGH_12 0xa1 - -/* - * SCSI end - */ -#define IS_READ_COMMAND(opcode) \ - ((opcode) == SCSIOPC_READ_6 || (opcode) == SCSIOPC_READ_10 || \ - (opcode) == SCSIOPC_READ_12 || (opcode) == SCSIOPC_READ_16) -#define IS_WRITE_COMMAND(opcode) \ - ((opcode) == SCSIOPC_WRITE_6 || (opcode) == SCSIOPC_WRITE_10 || \ - (opcode) == SCSIOPC_WRITE_12 || (opcode) == SCSIOPC_WRITE_16) - -#define IS_VERIFY_COMMAND(opcode) \ - ((opcode) == SCSIOPC_VERIFY_10 || (opcode) == SCSIOPC_VERIFY_12 || \ - (opcode) == SCSIOPC_VERIFY_16) - -#define FCP_RSP_LEN_VALID_MASK 0x1 -#define FCP_SNS_LEN_VALID_MASK 0x2 -#define FCP_RESID_OVER_MASK 0x4 -#define FCP_RESID_UNDER_MASK 0x8 -#define FCP_CONF_REQ_MASK 0x10 -#define FCP_SCSI_STATUS_GOOD 0x0 - -#define UNF_DELAYED_WORK_SYNC(ret, port_id, work, work_symb) \ - do { \ - if (!cancel_delayed_work_sync(work)) { \ - FC_DRV_PRINT(UNF_LOG_REG_ATT, \ - UNF_INFO, \ - "[info]LPort or RPort(0x%x) %s worker " \ - "can't destroy, or no " \ - "worker", \ - port_id, work_symb); \ - ret = UNF_RETURN_ERROR; \ - } else { \ - ret = RETURN_OK; \ - } \ - } while (0) - -#define UNF_GET_SFS_ENTRY(pkg) ((union unf_sfs_u *)(void *)(((struct unf_frame_pkg *)(pkg)) \ - ->unf_cmnd_pload_bl.buffer_ptr)) -/* FLOGI */ -#define UNF_GET_FLOGI_PAYLOAD(pkg) \ - (&(((union unf_sfs_u *)(UNF_GET_SFS_ENTRY(pkg)))->flogi.flogi_payload)) -#define UNF_FLOGI_PAYLOAD_LEN sizeof(struct unf_flogi_fdisc_payload) - -/* FLOGI ACC */ -#define UNF_GET_FLOGI_ACC_PAYLOAD(pkg) \ - (&(((union unf_sfs_u *)(UNF_GET_SFS_ENTRY(pkg))) \ - ->flogi_acc.flogi_payload)) -#define UNF_FLOGI_ACC_PAYLOAD_LEN sizeof(struct unf_flogi_fdisc_payload) - -/* FDISC */ -#define UNF_FDISC_PAYLOAD_LEN UNF_FLOGI_PAYLOAD_LEN -#define UNF_FDISC_ACC_PAYLOAD_LEN UNF_FLOGI_ACC_PAYLOAD_LEN - -/* PLOGI */ -#define UNF_GET_PLOGI_PAYLOAD(pkg) \ - (&(((union unf_sfs_u *)(UNF_GET_SFS_ENTRY(pkg)))->plogi.payload)) -#define UNF_PLOGI_PAYLOAD_LEN sizeof(struct unf_plogi_payload) - -/* PLOGI ACC */ -#define UNF_GET_PLOGI_ACC_PAYLOAD(pkg) \ - (&(((union unf_sfs_u *)(UNF_GET_SFS_ENTRY(pkg)))->plogi_acc.payload)) -#define UNF_PLOGI_ACC_PAYLOAD_LEN sizeof(struct unf_plogi_payload) - -/* LOGO */ -#define UNF_GET_LOGO_PAYLOAD(pkg) \ - (&(((union unf_sfs_u *)(UNF_GET_SFS_ENTRY(pkg)))->logo.payload)) -#define UNF_LOGO_PAYLOAD_LEN sizeof(struct unf_logo_payload) - -/* ECHO */ -#define UNF_GET_ECHO_PAYLOAD(pkg) \ - (((union unf_sfs_u *)(UNF_GET_SFS_ENTRY(pkg)))->echo.echo_pld) - -/* ECHO PHYADDR */ -#define UNF_GET_ECHO_PAYLOAD_PHYADDR(pkg) \ - (((union unf_sfs_u *)(UNF_GET_SFS_ENTRY(pkg)))->echo.phy_echo_addr) - -#define UNF_ECHO_PAYLOAD_LEN sizeof(struct unf_echo_payload) - -/* REC */ -#define UNF_GET_REC_PAYLOAD(pkg) \ - (&(((union unf_sfs_u *)(UNF_GET_SFS_ENTRY(pkg)))->rec.rec_pld)) - -#define UNF_REC_PAYLOAD_LEN sizeof(struct unf_rec_pld) - -/* ECHO ACC */ -#define UNF_GET_ECHO_ACC_PAYLOAD(pkg) \ - (((union unf_sfs_u *)(UNF_GET_SFS_ENTRY(pkg)))->echo_acc.echo_pld) -#define UNF_ECHO_ACC_PAYLOAD_LEN sizeof(struct unf_echo_payload) - -/* RRQ */ -#define UNF_GET_RRQ_PAYLOAD(pkg) \ - (&(((union unf_sfs_u *)(UNF_GET_SFS_ENTRY(pkg)))->rrq.cmnd)) -#define UNF_RRQ_PAYLOAD_LEN \ - (sizeof(struct unf_rrq) - sizeof(struct unf_fc_head)) - -/* PRLI */ -#define UNF_GET_PRLI_PAYLOAD(pkg) \ - (&(((union unf_sfs_u *)(UNF_GET_SFS_ENTRY(pkg)))->prli.payload)) -#define UNF_PRLI_PAYLOAD_LEN sizeof(struct unf_prli_payload) - -/* PRLI ACC */ -#define UNF_GET_PRLI_ACC_PAYLOAD(pkg) \ - (&(((union unf_sfs_u *)(UNF_GET_SFS_ENTRY(pkg)))->prli_acc.payload)) -#define UNF_PRLI_ACC_PAYLOAD_LEN sizeof(struct unf_prli_payload) - -/* PRLO */ -#define UNF_GET_PRLO_PAYLOAD(pkg) \ - (&(((union unf_sfs_u *)(UNF_GET_SFS_ENTRY(pkg)))->prlo.payload)) -#define UNF_PRLO_PAYLOAD_LEN sizeof(struct unf_prli_payload) - -#define UNF_GET_PRLO_ACC_PAYLOAD(pkg) \ - (&(((union unf_sfs_u *)(UNF_GET_SFS_ENTRY(pkg)))->prlo_acc.payload)) -#define UNF_PRLO_ACC_PAYLOAD_LEN sizeof(struct unf_prli_payload) - -/* PDISC */ -#define UNF_GET_PDISC_PAYLOAD(pkg) \ - (&(((union unf_sfs_u *)(UNF_GET_SFS_ENTRY(pkg)))->pdisc.payload)) -#define UNF_PDISC_PAYLOAD_LEN sizeof(struct unf_plogi_payload) - -/* PDISC ACC */ -#define UNF_GET_PDISC_ACC_PAYLOAD(pkg) \ - (&(((union unf_sfs_u *)(UNF_GET_SFS_ENTRY(pkg)))->pdisc_acc.payload)) -#define UNF_PDISC_ACC_PAYLOAD_LEN sizeof(struct unf_plogi_payload) - -/* ADISC */ -#define UNF_GET_ADISC_PAYLOAD(pkg) \ - (&(((union unf_sfs_u *)(UNF_GET_SFS_ENTRY(pkg)))->adisc.adisc_payl)) -#define UNF_ADISC_PAYLOAD_LEN sizeof(struct unf_adisc_payload) - -/* ADISC ACC */ -#define UNF_GET_ADISC_ACC_PAYLOAD(pkg) \ - (&(((union unf_sfs_u *)(UNF_GET_SFS_ENTRY(pkg)))->adisc_acc.adisc_payl)) -#define UNF_ADISC_ACC_PAYLOAD_LEN sizeof(struct unf_adisc_payload) - -/* RSCN ACC */ -#define UNF_GET_RSCN_ACC_PAYLOAD(pkg) \ - (&(((union unf_sfs_u *)(UNF_GET_SFS_ENTRY(pkg)))->els_acc.cmnd)) -#define UNF_RSCN_ACC_PAYLOAD_LEN \ - (sizeof(struct unf_els_acc) - sizeof(struct unf_fc_head)) - -/* LOGO ACC */ -#define UNF_GET_LOGO_ACC_PAYLOAD(pkg) \ - (&(((union unf_sfs_u *)(UNF_GET_SFS_ENTRY(pkg)))->els_acc.cmnd)) -#define UNF_LOGO_ACC_PAYLOAD_LEN \ - (sizeof(struct unf_els_acc) - sizeof(struct unf_fc_head)) - -/* RRQ ACC */ -#define UNF_GET_RRQ_ACC_PAYLOAD(pkg) \ - (&(((union unf_sfs_u *)(UNF_GET_SFS_ENTRY(pkg)))->els_acc.cmnd)) -#define UNF_RRQ_ACC_PAYLOAD_LEN \ - (sizeof(struct unf_els_acc) - sizeof(struct unf_fc_head)) - -/* REC ACC */ -#define UNF_GET_REC_ACC_PAYLOAD(pkg) \ - (&(((union unf_sfs_u *)(UNF_GET_SFS_ENTRY(pkg)))->els_acc.cmnd)) -#define UNF_REC_ACC_PAYLOAD_LEN \ - (sizeof(struct unf_els_acc) - sizeof(struct unf_fc_head)) - -/* GPN_ID */ -#define UNF_GET_GPNID_PAYLOAD(pkg) \ - (&(((union unf_sfs_u *)UNF_GET_SFS_ENTRY(pkg))->gpn_id.ctiu_pream)) -#define UNF_GPNID_PAYLOAD_LEN \ - (sizeof(struct unf_gpnid) - sizeof(struct unf_fc_head)) - -#define UNF_GET_GPNID_RSP_PAYLOAD(pkg) \ - (&(((union unf_sfs_u *)UNF_GET_SFS_ENTRY(pkg))->gpn_id_rsp.ctiu_pream)) -#define UNF_GPNID_RSP_PAYLOAD_LEN \ - (sizeof(struct unf_gpnid_rsp) - sizeof(struct unf_fc_head)) - -/* GNN_ID */ -#define UNF_GET_GNNID_PAYLOAD(pkg) \ - (&(((union unf_sfs_u *)UNF_GET_SFS_ENTRY(pkg))->gnn_id.ctiu_pream)) -#define UNF_GNNID_PAYLOAD_LEN \ - (sizeof(struct unf_gnnid) - sizeof(struct unf_fc_head)) - -#define UNF_GET_GNNID_RSP_PAYLOAD(pkg) \ - (&(((union unf_sfs_u *)UNF_GET_SFS_ENTRY(pkg))->gnn_id_rsp.ctiu_pream)) -#define UNF_GNNID_RSP_PAYLOAD_LEN \ - (sizeof(struct unf_gnnid_rsp) - sizeof(struct unf_fc_head)) - -/* GFF_ID */ -#define UNF_GET_GFFID_PAYLOAD(pkg) \ - (&(((union unf_sfs_u *)UNF_GET_SFS_ENTRY(pkg))->gff_id.ctiu_pream)) -#define UNF_GFFID_PAYLOAD_LEN \ - (sizeof(struct unf_gffid) - sizeof(struct unf_fc_head)) - -#define UNF_GET_GFFID_RSP_PAYLOAD(pkg) \ - (&(((union unf_sfs_u *)UNF_GET_SFS_ENTRY(pkg))->gff_id_rsp.ctiu_pream)) -#define UNF_GFFID_RSP_PAYLOAD_LEN \ - (sizeof(struct unf_gffid_rsp) - sizeof(struct unf_fc_head)) - -/* GID_FT/GID_PT */ -#define UNF_GET_GID_PAYLOAD(pkg) \ - (&(((union unf_sfs_u *)UNF_GET_SFS_ENTRY(pkg)) \ - ->get_id.gid_req.ctiu_pream)) - -#define UNF_GID_PAYLOAD_LEN (sizeof(struct unf_ctiu_prem) + sizeof(u32)) -#define UNF_GET_GID_ACC_PAYLOAD(pkg) \ - (((union unf_sfs_u *)UNF_GET_SFS_ENTRY(pkg)) \ - ->get_id.gid_rsp.gid_acc_pld) -#define UNF_GID_ACC_PAYLOAD_LEN sizeof(struct unf_gid_acc_pld) - -/* RFT_ID */ -#define UNF_GET_RFTID_PAYLOAD(pkg) \ - (&(((union unf_sfs_u *)UNF_GET_SFS_ENTRY(pkg))->rft_id.ctiu_pream)) -#define UNF_RFTID_PAYLOAD_LEN \ - (sizeof(struct unf_rftid) - sizeof(struct unf_fc_head)) - -#define UNF_GET_RFTID_RSP_PAYLOAD(pkg) \ - (&(((union unf_sfs_u *)UNF_GET_SFS_ENTRY(pkg))->rft_id_rsp.ctiu_pream)) -#define UNF_RFTID_RSP_PAYLOAD_LEN sizeof(struct unf_ctiu_prem) - -/* RFF_ID */ -#define UNF_GET_RFFID_PAYLOAD(pkg) \ - (&(((union unf_sfs_u *)UNF_GET_SFS_ENTRY(pkg))->rff_id.ctiu_pream)) -#define UNF_RFFID_PAYLOAD_LEN \ - (sizeof(struct unf_rffid) - sizeof(struct unf_fc_head)) - -#define UNF_GET_RFFID_RSP_PAYLOAD(pkg) \ - (&(((union unf_sfs_u *)UNF_GET_SFS_ENTRY(pkg))->rff_id_rsp.ctiu_pream)) -#define UNF_RFFID_RSP_PAYLOAD_LEN sizeof(struct unf_ctiu_prem) - -/* ACC&RJT */ -#define UNF_GET_ELS_ACC_RJT_PAYLOAD(pkg) \ - (&(((union unf_sfs_u *)UNF_GET_SFS_ENTRY(pkg))->els_rjt.cmnd)) -#define UNF_ELS_ACC_RJT_LEN \ - (sizeof(struct unf_els_rjt) - sizeof(struct unf_fc_head)) - -/* SCR */ -#define UNF_SCR_PAYLOAD(pkg) \ - (((union unf_sfs_u *)UNF_GET_SFS_ENTRY(pkg))->scr.payload) -#define UNF_SCR_PAYLOAD_LEN \ - (sizeof(struct unf_scr) - sizeof(struct unf_fc_head)) - -#define UNF_SCR_RSP_PAYLOAD(pkg) \ - (&(((union unf_sfs_u *)UNF_GET_SFS_ENTRY(pkg))->els_acc.cmnd)) -#define UNF_SCR_RSP_PAYLOAD_LEN \ - (sizeof(struct unf_els_acc) - sizeof(struct unf_fc_head)) - -#define UNF_GS_RSP_PAYLOAD_LEN \ - (sizeof(union unf_sfs_u) - sizeof(struct unf_fc_head)) - -#define UNF_GET_XCHG_TAG(pkg) \ - (((struct unf_frame_pkg *)(pkg)) \ - ->private_data[PKG_PRIVATE_XCHG_HOT_POOL_INDEX]) -#define UNF_GET_ABTS_XCHG_TAG(pkg) \ - ((u16)(((pkg)->private_data[PKG_PRIVATE_XCHG_HOT_POOL_INDEX]) >> 16)) -#define UNF_GET_IO_XCHG_TAG(pkg) \ - ((u16)((pkg)->private_data[PKG_PRIVATE_XCHG_HOT_POOL_INDEX])) - -#define UNF_GET_HOTPOOL_TAG(pkg) \ - (((struct unf_frame_pkg *)(pkg)) \ - ->private_data[PKG_PRIVATE_XCHG_HOT_POOL_INDEX]) -#define UNF_GET_SID(pkg) \ - (((struct unf_frame_pkg *)(pkg))->frame_head.csctl_sid & \ - UNF_NPORTID_MASK) -#define UNF_GET_DID(pkg) \ - (((struct unf_frame_pkg *)(pkg))->frame_head.rctl_did & \ - UNF_NPORTID_MASK) -#define UNF_GET_OXID(pkg) \ - (((struct unf_frame_pkg *)(pkg))->frame_head.oxid_rxid >> 16) -#define UNF_GET_RXID(pkg) \ - ((u16)((struct unf_frame_pkg *)(pkg))->frame_head.oxid_rxid) -#define UNF_GET_XID_RELEASE_TIMER(pkg) \ - (((struct unf_frame_pkg *)(pkg))->release_task_id_timer) -#define UNF_GETXCHGALLOCTIME(pkg) \ - (((struct unf_frame_pkg *)(pkg)) \ - ->private_data[PKG_PRIVATE_XCHG_ALLOC_TIME]) - -#define UNF_SET_XCHG_ALLOC_TIME(pkg, xchg) \ - (((struct unf_frame_pkg *)(pkg)) \ - ->private_data[PKG_PRIVATE_XCHG_ALLOC_TIME] = \ - (((struct unf_xchg *)(xchg)) \ - ->private_data[PKG_PRIVATE_XCHG_ALLOC_TIME])) -#define UNF_SET_ABORT_INFO_IOTYPE(pkg, xchg) \ - (((struct unf_frame_pkg *)(pkg)) \ - ->private_data[PKG_PRIVATE_XCHG_ABORT_INFO] |= \ - (((u8)(((struct unf_xchg *)(xchg))->data_direction & 0x7)) \ - << 2)) - -#define UNF_CHECK_NPORT_FPORT_BIT(els_payload) \ - (((struct unf_flogi_fdisc_payload *)(els_payload)) \ - ->fabric_parms.co_parms.nport) - -#define UNF_GET_RSP_BUF(pkg) \ - ((void *)(((struct unf_frame_pkg *)(pkg))->unf_rsp_pload_bl.buffer_ptr)) -#define UNF_GET_RSP_LEN(pkg) \ - (((struct unf_frame_pkg *)(pkg))->unf_rsp_pload_bl.length) - -#define UNF_N_PORT 0 -#define UNF_F_PORT 1 - -#define UNF_GET_RA_TOV_FROM_PARAMS(pfcparams) \ - (((struct unf_fabric_parm *)(pfcparams))->co_parms.r_a_tov) -#define UNF_GET_RT_TOV_FROM_PARAMS(pfcparams) \ - (((struct unf_fabric_parm *)(pfcparams))->co_parms.r_t_tov) -#define UNF_GET_E_D_TOV_FROM_PARAMS(pfcparams) \ - (((struct unf_fabric_parm *)(pfcparams))->co_parms.e_d_tov) -#define UNF_GET_E_D_TOV_RESOLUTION_FROM_PARAMS(pfcparams) \ - (((struct unf_fabric_parm *)(pfcparams))->co_parms.e_d_tov_resolution) -#define UNF_GET_BB_SC_N_FROM_PARAMS(pfcparams) \ - (((struct unf_fabric_parm *)(pfcparams))->co_parms.bbscn) -#define UNF_GET_BB_CREDIT_FROM_PARAMS(pfcparams) \ - (((struct unf_fabric_parm *)(pfcparams))->co_parms.bb_credit) - -enum unf_pcie_error_code { - UNF_PCIE_ERROR_NONE = 0, - UNF_PCIE_DATAPARITYDETECTED = 1, - UNF_PCIE_SIGNALTARGETABORT, - UNF_PCIE_RECEIVEDTARGETABORT, - UNF_PCIE_RECEIVEDMASTERABORT, - UNF_PCIE_SIGNALEDSYSTEMERROR, - UNF_PCIE_DETECTEDPARITYERROR, - UNF_PCIE_CORRECTABLEERRORDETECTED, - UNF_PCIE_NONFATALERRORDETECTED, - UNF_PCIE_FATALERRORDETECTED, - UNF_PCIE_UNSUPPORTEDREQUESTDETECTED, - UNF_PCIE_AUXILIARYPOWERDETECTED, - UNF_PCIE_TRANSACTIONSPENDING, - - UNF_PCIE_UNCORRECTINTERERRSTATUS, - UNF_PCIE_UNSUPPORTREQERRSTATUS, - UNF_PCIE_ECRCERRORSTATUS, - UNF_PCIE_MALFORMEDTLPSTATUS, - UNF_PCIE_RECEIVEROVERFLOWSTATUS, - UNF_PCIE_UNEXPECTCOMPLETESTATUS, - UNF_PCIE_COMPLETERABORTSTATUS, - UNF_PCIE_COMPLETIONTIMEOUTSTATUS, - UNF_PCIE_FLOWCTRLPROTOCOLERRSTATUS, - UNF_PCIE_POISONEDTLPSTATUS, - UNF_PCIE_SURPRISEDOWNERRORSTATUS, - UNF_PCIE_DATALINKPROTOCOLERRSTATUS, - UNF_PCIE_ADVISORYNONFATALERRSTATUS, - UNF_PCIE_REPLAYTIMERTIMEOUTSTATUS, - UNF_PCIE_REPLAYNUMROLLOVERSTATUS, - UNF_PCIE_BADDLLPSTATUS, - UNF_PCIE_BADTLPSTATUS, - UNF_PCIE_RECEIVERERRORSTATUS, - - UNF_PCIE_BUTT -}; - -#define UNF_DMA_HI32(a) (((a) >> 32) & 0xffffffff) -#define UNF_DMA_LO32(a) ((a) & 0xffffffff) - -#define UNF_WWN_LEN 8 -#define UNF_MAC_LEN 6 - -/* send BLS/ELS/BLS REPLY/ELS REPLY/GS/ */ -/* rcvd BLS/ELS/REQ DONE/REPLY DONE */ -#define UNF_PKG_BLS_REQ 0x0100 -#define UNF_PKG_BLS_REQ_DONE 0x0101 -#define UNF_PKG_BLS_REPLY 0x0102 -#define UNF_PKG_BLS_REPLY_DONE 0x0103 - -#define UNF_PKG_ELS_REQ 0x0200 -#define UNF_PKG_ELS_REQ_DONE 0x0201 - -#define UNF_PKG_ELS_REPLY 0x0202 -#define UNF_PKG_ELS_REPLY_DONE 0x0203 - -#define UNF_PKG_GS_REQ 0x0300 -#define UNF_PKG_GS_REQ_DONE 0x0301 - -#define UNF_PKG_TGT_XFER 0x0400 -#define UNF_PKG_TGT_RSP 0x0401 -#define UNF_PKG_TGT_RSP_NOSGL 0x0402 -#define UNF_PKG_TGT_RSP_STATUS 0x0403 - -#define UNF_PKG_INI_IO 0x0500 -#define UNF_PKG_INI_RCV_TGT_RSP 0x0507 - -/* external sgl struct start */ -struct unf_esgl_page { - u64 page_address; - dma_addr_t esgl_phy_addr; - u32 page_size; -}; - -/* external sgl struct end */ -struct unf_esgl { - struct list_head entry_esgl; - struct unf_esgl_page page; -}; - -#define UNF_RESPONE_DATA_LEN 8 -struct unf_frame_payld { - u8 *buffer_ptr; - dma_addr_t buf_dma_addr; - u32 length; -}; - -enum pkg_private_index { - PKG_PRIVATE_LOWLEVEL_XCHG_ADD = 0, - PKG_PRIVATE_XCHG_HOT_POOL_INDEX = 1, /* Hot Pool Index */ - PKG_PRIVATE_XCHG_RPORT_INDEX = 2, /* RPort index */ - PKG_PRIVATE_XCHG_VP_INDEX = 3, /* VPort index */ - PKG_PRIVATE_XCHG_SSQ_INDEX, - PKG_PRIVATE_RPORT_RX_SIZE, - PKG_PRIVATE_XCHG_TIMEER, - PKG_PRIVATE_XCHG_ALLOC_TIME, - PKG_PRIVATE_XCHG_ABORT_INFO, - PKG_PRIVATE_ECHO_CMD_SND_TIME, /* local send echo cmd time stamp */ - PKG_PRIVATE_ECHO_ACC_RCV_TIME, /* local receive echo acc time stamp */ - PKG_PRIVATE_ECHO_CMD_RCV_TIME, /* remote receive echo cmd time stamp */ - PKG_PRIVATE_ECHO_RSP_SND_TIME, /* remote send echo rsp time stamp */ - PKG_MAX_PRIVATE_DATA_SIZE -}; - -extern u32 dix_flag; -extern u32 dif_sgl_mode; -extern u32 dif_app_esc_check; -extern u32 dif_ref_esc_check; - -#define UNF_DIF_ACTION_NONE 0 - -enum unf_adm_dif_mode_E { - UNF_SWITCH_DIF_DIX = 0, - UNF_APP_REF_ESCAPE, - ALL_DIF_MODE = 20, -}; - -#define UNF_DIF_CRC_ERR 0x1001 -#define UNF_DIF_APP_ERR 0x1002 -#define UNF_DIF_LBA_ERR 0x1003 - -#define UNF_VERIFY_CRC_MASK (1 << 1) -#define UNF_VERIFY_APP_MASK (1 << 2) -#define UNF_VERIFY_LBA_MASK (1 << 3) - -#define UNF_REPLACE_CRC_MASK (1 << 8) -#define UNF_REPLACE_APP_MASK (1 << 9) -#define UNF_REPLACE_LBA_MASK (1 << 10) - -#define UNF_DIF_ACTION_MASK (0xff << 16) -#define UNF_DIF_ACTION_INSERT (0x1 << 16) -#define UNF_DIF_ACTION_VERIFY_AND_DELETE (0x2 << 16) -#define UNF_DIF_ACTION_VERIFY_AND_FORWARD (0x3 << 16) -#define UNF_DIF_ACTION_VERIFY_AND_REPLACE (0x4 << 16) - -#define UNF_DIF_ACTION_NO_INCREASE_REFTAG (0x1 << 24) - -#define UNF_DEFAULT_CRC_GUARD_SEED (0) -#define UNF_CAL_512_BLOCK_CNT(data_len) ((data_len) >> 9) -#define UNF_CAL_BLOCK_CNT(data_len, sector_size) ((data_len) / (sector_size)) -#define UNF_CAL_CRC_BLK_CNT(crc_data_len, sector_size) \ - ((crc_data_len) / ((sector_size) + 8)) - -#define UNF_DIF_DOUBLE_SGL (1 << 1) -#define UNF_DIF_SECTSIZE_4KB (1 << 2) -#define UNF_DIF_SECTSIZE_512 (0 << 2) -#define UNF_DIF_LBA_NONE_INCREASE (1 << 3) -#define UNF_DIF_TYPE3 (1 << 4) - -#define SECTOR_SIZE_512 512 -#define SECTOR_SIZE_4096 4096 -#define SPFC_DIF_APP_REF_ESC_NOT_CHECK 1 -#define SPFC_DIF_APP_REF_ESC_CHECK 0 - -struct unf_dif { - u16 crc; - u16 app_tag; - u32 lba; -}; - -enum unf_io_state { UNF_INI_IO = 0, UNF_TGT_XFER = 1, UNF_TGT_RSP = 2 }; - -#define UNF_PKG_LAST_RESPONSE 0 -#define UNF_PKG_NOT_LAST_RESPONSE 1 - -struct unf_frame_pkg { - /* pkt type:BLS/ELS/FC4LS/CMND/XFER/RSP */ - u32 type; - u32 last_pkg_flag; - u32 fcp_conf_flag; - -#define UNF_FCP_RESPONSE_VALID 0x01 -#define UNF_FCP_SENSE_VALID 0x02 - u32 response_and_sense_valid_flag; /* resp and sense vailed flag */ - u32 cmnd; - struct unf_fc_head frame_head; - u32 entry_count; - void *xchg_contex; - u32 transfer_len; - u32 residus_len; - u32 status; - u32 status_sub_code; - enum unf_io_state io_state; - u32 qos_level; - u32 private_data[PKG_MAX_PRIVATE_DATA_SIZE]; - struct unf_fcp_cmnd *fcp_cmnd; - struct unf_dif_control_info dif_control; - struct unf_frame_payld unf_cmnd_pload_bl; - struct unf_frame_payld unf_rsp_pload_bl; - struct unf_frame_payld unf_sense_pload_bl; - void *upper_cmd; - u32 abts_maker_status; - u32 release_task_id_timer; - u8 byte_orders; - u8 rx_or_ox_id; - u8 class_mode; - u8 rsvd; - u8 *peresp; - u32 rcvrsp_len; - ulong timeout; - u32 origin_hottag; - u32 origin_magicnum; -}; - -#define UNF_MAX_SFS_XCHG 2048 -#define UNF_RESERVE_SFS_XCHG 128 /* times on exchange mgr num */ - -struct unf_lport_cfg_item { - u32 port_id; - u32 port_mode; /* INI(0x20), TGT(0x10), BOTH(0x30) */ - u32 port_topology; /* 0x3:loop , 0xc:p2p ,0xf:auto */ - u32 max_queue_depth; - u32 max_io; /* Recommended Value 512-4096 */ - u32 max_login; - u32 max_sfs_xchg; - u32 port_speed; /* 0:auto 1:1Gbps 2:2Gbps 4:4Gbps 8:8Gbps 16:16Gbps */ - u32 tape_support; /* ape support */ - u32 fcp_conf; /* fcp confirm support */ - u32 bbscn; -}; - -struct unf_port_dynamic_info { - u32 sfp_posion; - u32 sfp_valid; - u32 phy_link; - u32 firmware_state; - u32 cur_speed; - u32 mailbox_timeout_cnt; -}; - -struct unf_port_intr_coalsec { - u32 delay_timer; - u32 depth; -}; - -struct unf_port_topo { - u32 topo_cfg; - enum unf_act_topo topo_act; -}; - -struct unf_port_transfer_para { - u32 type; - u32 value; -}; - -struct unf_buf { - u8 *buf; - u32 buf_len; -}; - -/* get ucode & up ver */ -#define SPFC_VER_LEN (16) -#define SPFC_COMPILE_TIME_LEN (20) -struct unf_fw_version { - u32 message_type; - u8 fw_version[SPFC_VER_LEN]; -}; - -struct unf_port_wwn { - u64 sys_port_wwn; - u64 sys_node_name; -}; - -enum unf_port_config_set_op { - UNF_PORT_CFG_SET_SPEED, - UNF_PORT_CFG_SET_PORT_SWITCH, - UNF_PORT_CFG_SET_POWER_STATE, - UNF_PORT_CFG_SET_PORT_STATE, - UNF_PORT_CFG_UPDATE_WWN, - UNF_PORT_CFG_TEST_FLASH, - UNF_PORT_CFG_UPDATE_FABRIC_PARAM, - UNF_PORT_CFG_UPDATE_PLOGI_PARAM, - UNF_PORT_CFG_SET_BUTT -}; - -enum unf_port_cfg_get_op { - UNF_PORT_CFG_GET_TOPO_ACT, - UNF_PORT_CFG_GET_LOOP_MAP, - UNF_PORT_CFG_GET_SFP_PRESENT, - UNF_PORT_CFG_GET_FW_VER, - UNF_PORT_CFG_GET_HW_VER, - UNF_PORT_CFG_GET_WORKBALE_BBCREDIT, - UNF_PORT_CFG_GET_WORKBALE_BBSCN, - UNF_PORT_CFG_GET_FC_SERDES, - UNF_PORT_CFG_GET_LOOP_ALPA, - UNF_PORT_CFG_GET_MAC_ADDR, - UNF_PORT_CFG_GET_SFP_VER, - UNF_PORT_CFG_GET_SFP_SUPPORT_UPDATE, - UNF_PORT_CFG_GET_SFP_LOG, - UNF_PORT_CFG_GET_PCIE_LINK_STATE, - UNF_PORT_CFG_GET_FLASH_DATA_INFO, - UNF_PORT_CFG_GET_BUTT, -}; - -enum unf_port_config_state { - UNF_PORT_CONFIG_STATE_START, - UNF_PORT_CONFIG_STATE_STOP, - UNF_PORT_CONFIG_STATE_RESET, - UNF_PORT_CONFIG_STATE_STOP_INTR, - UNF_PORT_CONFIG_STATE_BUTT -}; - -enum unf_port_config_update { - UNF_PORT_CONFIG_UPDATE_FW_MINIMUM, - UNF_PORT_CONFIG_UPDATE_FW_ALL, - UNF_PORT_CONFIG_UPDATE_BUTT -}; - -enum unf_disable_vp_mode { - UNF_DISABLE_VP_MODE_ONLY = 0x8, - UNF_DISABLE_VP_MODE_REINIT_LINK = 0x9, - UNF_DISABLE_VP_MODE_NOFAB_LOGO = 0xA, - UNF_DISABLE_VP_MODE_LOGO_ALL = 0xB -}; - -struct unf_vport_info { - u16 vp_index; - u64 node_name; - u64 port_name; - u32 port_mode; /* INI, TGT or both */ - enum unf_disable_vp_mode disable_mode; - u32 nport_id; /* maybe acquired by lowlevel and update to common */ - void *vport; -}; - -struct unf_port_login_parms { - enum unf_act_topo act_topo; - - u32 rport_index; - u32 seq_cnt : 1; - u32 ed_tov : 1; - u32 reserved : 14; - u32 tx_mfs : 16; - u32 ed_tov_timer_val; - - u8 remote_rttov_tag; - u8 remote_edtov_tag; - u16 remote_bb_credit; - u16 compared_bbscn; - u32 compared_edtov_val; - u32 compared_ratov_val; - u32 els_cmnd_code; -}; - -struct unf_mbox_head_info { - /* mbox header */ - u8 cmnd_type; - u8 length; - u8 port_id; - u8 pad0; - - /* operation */ - u32 opcode : 4; - u32 pad1 : 28; -}; - -struct unf_mbox_head_sts { - /* mbox header */ - u8 cmnd_type; - u8 length; - u8 port_id; - u8 pad0; - - /* operation */ - u16 pad1; - u8 pad2; - u8 status; -}; - -struct unf_low_level_service_op { - u32 (*unf_ls_gs_send)(void *hba, struct unf_frame_pkg *pkg); - u32 (*unf_bls_send)(void *hba, struct unf_frame_pkg *pkg); - u32 (*unf_cmnd_send)(void *hba, struct unf_frame_pkg *pkg); - u32 (*unf_rsp_send)(void *handle, struct unf_frame_pkg *pkg); - u32 (*unf_release_rport_res)(void *handle, struct unf_port_info *rport_info); - u32 (*unf_flush_ini_resp_que)(void *handle); - u32 (*unf_alloc_rport_res)(void *handle, struct unf_port_info *rport_info); - u32 (*ll_release_xid)(void *handle, struct unf_frame_pkg *pkg); - u32 (*unf_xfer_send)(void *handle, struct unf_frame_pkg *pkg); -}; - -struct unf_low_level_port_mgr_op { - /* fcport/opcode/input parameter */ - u32 (*ll_port_config_set)(void *fc_port, enum unf_port_config_set_op opcode, void *para_in); - - /* fcport/opcode/output parameter */ - u32 (*ll_port_config_get)(void *fc_port, enum unf_port_cfg_get_op opcode, void *para_out); -}; - -struct unf_chip_info { - u8 chip_type; - u8 chip_work_mode; - u8 disable_err_flag; -}; - -struct unf_low_level_functioon_op { - struct unf_chip_info chip_info; - /* low level type */ - u32 low_level_type; - const char *name; - struct pci_dev *dev; - u64 sys_node_name; - u64 sys_port_name; - struct unf_lport_cfg_item lport_cfg_items; -#define UNF_LOW_LEVEL_MGR_TYPE_ACTIVE 0 -#define UNF_LOW_LEVEL_MGR_TYPE_PASSTIVE 1 - const u32 xchg_mgr_type; - -#define UNF_NO_EXTRA_ABTS_XCHG 0x0 -#define UNF_LL_IOC_ABTS_XCHG 0x1 - const u32 abts_xchg; - -#define UNF_CM_RPORT_SET_QUALIFIER 0x0 -#define UNF_CM_RPORT_SET_QUALIFIER_REUSE 0x1 -#define UNF_CM_RPORT_SET_QUALIFIER_SPFC 0x2 - - /* low level pass-through flag. */ -#define UNF_LOW_LEVEL_PASS_THROUGH_FIP 0x0 -#define UNF_LOW_LEVEL_PASS_THROUGH_FABRIC_LOGIN 0x1 -#define UNF_LOW_LEVEL_PASS_THROUGH_PORT_LOGIN 0x2 - u32 passthrough_flag; - - /* low level parameter */ - u32 support_max_npiv_num; - u32 support_max_ssq_num; - u32 support_max_speed; - u32 support_min_speed; - u32 fc_ser_max_speed; - - u32 support_max_rport; - - u32 support_max_hot_tag_range; - u32 sfp_type; - u32 update_fw_reset_active; - u32 support_upgrade_report; - u32 multi_conf_support; - u32 port_type; -#define UNF_LOW_LEVEL_RELEASE_RPORT_SYNC 0x0 -#define UNF_LOW_LEVEL_RELEASE_RPORT_ASYNC 0x1 - u8 rport_release_type; -#define UNF_LOW_LEVEL_SIRT_PAGE_MODE_FIXED 0x0 -#define UNF_LOW_LEVEL_SIRT_PAGE_MODE_XCHG 0x1 - u8 sirt_page_mode; - u8 sfp_speed; - - /* IO reference */ - struct unf_low_level_service_op service_op; - - /* Port Mgr reference */ - struct unf_low_level_port_mgr_op port_mgr_op; - - u8 chip_id; -}; - -struct unf_cm_handle_op { - /* return:L_Port */ - void *(*unf_alloc_local_port)(void *private_data, - struct unf_low_level_functioon_op *low_level_op); - - /* input para:L_Port */ - u32 (*unf_release_local_port)(void *lport); - - /* input para:L_Port, FRAME_PKG_S */ - u32 (*unf_receive_ls_gs_pkg)(void *lport, struct unf_frame_pkg *pkg); - - /* input para:L_Port, FRAME_PKG_S */ - u32 (*unf_receive_bls_pkg)(void *lport, struct unf_frame_pkg *pkg); - /* input para:L_Port, FRAME_PKG_S */ - u32 (*unf_send_els_done)(void *lport, struct unf_frame_pkg *pkg); - - /* input para:L_Port, FRAME_PKG_S */ - u32 (*unf_receive_marker_status)(void *lport, struct unf_frame_pkg *pkg); - u32 (*unf_receive_abts_marker_status)(void *lport, struct unf_frame_pkg *pkg); - /* input para:L_Port, FRAME_PKG_S */ - u32 (*unf_receive_ini_response)(void *lport, struct unf_frame_pkg *pkg); - - int (*unf_get_cfg_parms)(char *section_name, - struct unf_cfg_item *cfg_parm, u32 *cfg_value, - u32 item_num); - - /* TGT IO interface */ - u32 (*unf_process_fcp_cmnd)(void *lport, struct unf_frame_pkg *pkg); - - /* TGT IO Done */ - u32 (*unf_tgt_cmnd_xfer_or_rsp_echo)(void *lport, struct unf_frame_pkg *pkg); - - u32 (*unf_cm_get_sgl_entry)(void *pkg, char **buf, u32 *buf_len); - u32 (*unf_cm_get_dif_sgl_entry)(void *pkg, char **buf, u32 *buf_len); - - struct unf_esgl_page *(*unf_get_one_free_esgl_page)(void *lport, struct unf_frame_pkg *pkg); - - /* input para:L_Port, EVENT */ - u32 (*unf_fc_port_event)(void *lport, u32 events, void *input); - - int (*unf_drv_start_work)(void *lport); - - void (*unf_card_rport_chip_err)(struct pci_dev const *pci_dev); -}; - -u32 unf_get_cm_handle_ops(struct unf_cm_handle_op *cm_handle); -int unf_common_init(void); -void unf_common_exit(void); - -#endif diff --git a/drivers/scsi/spfc/common/unf_disc.c b/drivers/scsi/spfc/common/unf_disc.c deleted file mode 100644 index c48d0ba670d4d11ba99475005321cb64a56619e7..0000000000000000000000000000000000000000 --- a/drivers/scsi/spfc/common/unf_disc.c +++ /dev/null @@ -1,1276 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#include "unf_disc.h" -#include "unf_log.h" -#include "unf_common.h" -#include "unf_event.h" -#include "unf_lport.h" -#include "unf_rport.h" -#include "unf_exchg.h" -#include "unf_ls.h" -#include "unf_gs.h" -#include "unf_portman.h" - -#define UNF_LIST_RSCN_PAGE_CNT 2560 -#define UNF_MAX_PORTS_PRI_LOOP 2 -#define UNF_MAX_GS_SEND_NUM 8 -#define UNF_OS_REMOVE_CARD_TIMEOUT (60 * 1000) - -static void unf_set_disc_state(struct unf_disc *disc, - enum unf_disc_state states) -{ - FC_CHECK_RETURN_VOID(disc); - - if (states != disc->states) { - /* Reset disc retry count */ - disc->retry_count = 0; - } - - disc->states = states; -} - -static inline u32 unf_get_loop_map(struct unf_lport *lport, u8 loop_map[], u32 loop_map_size) -{ - struct unf_buf buf = {0}; - u32 ret = UNF_RETURN_ERROR; - - FC_CHECK_RETURN_VALUE(lport->low_level_func.port_mgr_op.ll_port_config_get, - UNF_RETURN_ERROR); - - buf.buf = loop_map; - buf.buf_len = loop_map_size; - - ret = lport->low_level_func.port_mgr_op.ll_port_config_get(lport->fc_port, - UNF_PORT_CFG_GET_LOOP_MAP, - (void *)&buf); - return ret; -} - -static void unf_login_with_loop_node(struct unf_lport *lport, u32 alpa) -{ - /* Only used for Private Loop LOGIN */ - struct unf_rport *unf_rport = NULL; - ulong rport_flag = 0; - u32 port_feature = 0; - u32 ret; - - /* Check AL_PA validity */ - if (lport->nport_id == alpa) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "[info]Port(0x%x) is the same as RPort with AL_PA(0x%x), do nothing", - lport->port_id, alpa); - return; - } - - if (alpa == 0) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) RPort(0x%x) is fabric, do nothing", - lport->port_id, alpa); - return; - } - - /* Get & set R_Port: reuse only */ - unf_rport = unf_get_rport_by_nport_id(lport, alpa); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]LOGIN: Port(0x%x_0x%x) RPort(0x%x_0x%p) login with private loop", - lport->port_id, lport->nport_id, alpa, unf_rport); - - unf_rport = unf_get_safe_rport(lport, unf_rport, UNF_RPORT_REUSE_ONLY, alpa); - if (!unf_rport) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x_0x%x) allocate new RPort(0x%x) failed", - lport->port_id, lport->nport_id, alpa); - return; - } - - /* Update R_Port state & N_Port_ID */ - spin_lock_irqsave(&unf_rport->rport_state_lock, rport_flag); - unf_rport->nport_id = alpa; - unf_rport_state_ma(unf_rport, UNF_EVENT_RPORT_ENTER_PLOGI); - spin_unlock_irqrestore(&unf_rport->rport_state_lock, rport_flag); - - /* Private Loop: check whether need delay to send PLOGI or not */ - port_feature = unf_rport->options; - - /* check Rport and Lport feature */ - if (port_feature == UNF_PORT_MODE_UNKNOWN && - lport->options == UNF_PORT_MODE_INI) { - /* Start to send PLOGI */ - ret = unf_send_plogi(lport, unf_rport); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]LOGIN: Port(0x%x_0x%x) send PLOGI to RPort(0x%x) failed", - lport->port_id, lport->nport_id, unf_rport->nport_id); - - unf_rport_error_recovery(unf_rport); - } - } else { - unf_check_rport_need_delay_plogi(lport, unf_rport, port_feature); - } -} - -static int unf_discover_private_loop(void *arg_in, void *arg_out) -{ - struct unf_lport *unf_lport = (struct unf_lport *)arg_in; - u32 ret = UNF_RETURN_ERROR; - u32 i = 0; - u8 loop_id = 0; - u32 alpa_index = 0; - u8 loop_map[UNF_LOOPMAP_COUNT]; - - FC_CHECK_RETURN_VALUE(unf_lport, UNF_RETURN_ERROR); - memset(loop_map, 0x0, UNF_LOOPMAP_COUNT); - - /* Get Port Loop Map */ - ret = unf_get_loop_map(unf_lport, loop_map, UNF_LOOPMAP_COUNT); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) get loop map failed", unf_lport->port_id); - - return UNF_RETURN_ERROR; - } - - /* Check Loop Map Ports Count */ - if (loop_map[ARRAY_INDEX_0] > UNF_MAX_PORTS_PRI_LOOP) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) has more than %d ports(%u) in private loop", - unf_lport->port_id, UNF_MAX_PORTS_PRI_LOOP, loop_map[ARRAY_INDEX_0]); - - return UNF_RETURN_ERROR; - } - - /* AL_PA = 0 means Public Loop */ - if (loop_map[ARRAY_INDEX_1] == UNF_FL_PORT_LOOP_ADDR || - loop_map[ARRAY_INDEX_2] == UNF_FL_PORT_LOOP_ADDR) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) one or more AL_PA is 0x00, indicate it's FL_Port", - unf_lport->port_id); - - return UNF_RETURN_ERROR; - } - - /* Discovery Private Loop Ports */ - for (i = 0; i < loop_map[ARRAY_INDEX_0]; i++) { - alpa_index = i + 1; - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "[info]Port(0x%x) start to disc(0x%x) with count(0x%x)", - unf_lport->port_id, loop_map[alpa_index], i); - - /* Check whether need delay to send PLOGI or not */ - loop_id = loop_map[alpa_index]; - unf_login_with_loop_node(unf_lport, (u32)loop_id); - } - - return RETURN_OK; -} - -u32 unf_disc_start(void *lport) -{ - /* - * Call by: - * 1. Enter Private Loop Login - * 2. Analysis RSCN payload - * 3. SCR callback - * * - * Doing: - * Fabric/Public Loop: Send GID_PT - * Private Loop: (delay to) send PLOGI or send LOGO immediately - * P2P: do nothing - */ - struct unf_lport *unf_lport = (struct unf_lport *)lport; - struct unf_rport *unf_rport = NULL; - struct unf_disc *disc = NULL; - struct unf_cm_event_report *event = NULL; - u32 ret = RETURN_OK; - ulong flag = 0; - enum unf_act_topo act_topo = UNF_ACT_TOP_UNKNOWN; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - act_topo = unf_lport->act_topo; - disc = &unf_lport->disc; - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "[info]LOGIN: Port(0x%x) with topo(0x%x) begin to discovery", - unf_lport->port_id, act_topo); - - if (act_topo == UNF_ACT_TOP_P2P_FABRIC || - act_topo == UNF_ACT_TOP_PUBLIC_LOOP) { - /* 1. Fabric or Public Loop Topology: for directory server */ - unf_rport = unf_get_rport_by_nport_id(unf_lport, - UNF_FC_FID_DIR_SERV); /* 0xfffffc */ - if (!unf_rport) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) unable to get SNS RPort(0xfffffc)", - unf_lport->port_id); - - unf_rport = unf_rport_get_free_and_init(unf_lport, UNF_PORT_TYPE_FC, - UNF_FC_FID_DIR_SERV); - if (!unf_rport) - return UNF_RETURN_ERROR; - - unf_rport->nport_id = UNF_FC_FID_DIR_SERV; - } - - spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); - unf_set_disc_state(disc, UNF_DISC_ST_START); /* disc start */ - unf_disc_state_ma(unf_lport, UNF_EVENT_DISC_NORMAL_ENTER); - spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); - - /* - * NOTE: Send GID_PT - * The Name Server shall, when it receives a GID_PT request, - * return all Port Identifiers having registered support for the - * specified Port Type. One or more Port Identifiers, having - * registered as the specified Port Type, are returned. - */ - ret = unf_send_gid_pt(unf_lport, unf_rport); - if (ret != RETURN_OK) - unf_disc_error_recovery(unf_lport); - } else if (act_topo == UNF_ACT_TOP_PRIVATE_LOOP) { - /* Private Loop: to thread process */ - event = unf_get_one_event_node(unf_lport); - FC_CHECK_RETURN_VALUE(event, UNF_RETURN_ERROR); - - event->lport = unf_lport; - event->event_asy_flag = UNF_EVENT_ASYN; - event->unf_event_task = unf_discover_private_loop; - event->para_in = (void *)unf_lport; - - unf_post_one_event_node(unf_lport, event); - } else { - /* P2P toplogy mode: Do nothing */ - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x) with topo(0x%x) need do nothing", - unf_lport->port_id, act_topo); - } - - return ret; -} - -static u32 unf_disc_stop(void *lport) -{ - /* Call by GID_ACC processer */ - struct unf_lport *unf_lport = NULL; - struct unf_lport *root_lport = NULL; - struct unf_rport *sns_port = NULL; - struct unf_disc_rport *disc_rport = NULL; - struct unf_disc *disc = NULL; - struct unf_disc *root_disc = NULL; - struct list_head *node = NULL; - ulong flag = 0; - u32 ret = RETURN_OK; - u32 nport_id = 0; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - - unf_lport = (struct unf_lport *)lport; - disc = &unf_lport->disc; - root_lport = (struct unf_lport *)unf_lport->root_lport; - root_disc = &root_lport->disc; - - /* Get R_Port for Directory server */ - sns_port = unf_get_rport_by_nport_id(unf_lport, UNF_FC_FID_DIR_SERV); - if (!sns_port) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) find fabric RPort(0xfffffc) failed", - unf_lport->port_id); - - return UNF_RETURN_ERROR; - } - - /* for R_Port from disc pool busy list */ - spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); - if (list_empty(&disc->disc_rport_mgr.list_disc_rports_busy)) { - spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); - return RETURN_OK; - } - - node = UNF_OS_LIST_NEXT(&disc->disc_rport_mgr.list_disc_rports_busy); - do { - /* Delete from Disc busy list */ - disc_rport = list_entry(node, struct unf_disc_rport, entry_rport); - nport_id = disc_rport->nport_id; - list_del_init(node); - spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); - - /* Add back to (free) Disc R_Port pool (list) */ - spin_lock_irqsave(&root_disc->rport_busy_pool_lock, flag); - list_add_tail(node, &root_disc->disc_rport_mgr.list_disc_rports_pool); - spin_unlock_irqrestore(&root_disc->rport_busy_pool_lock, flag); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "Port(0x%x_0x%x) remove nportid:0x%x from rportbusy list", - unf_lport->port_id, unf_lport->nport_id, disc_rport->nport_id); - /* Send GNN_ID to Name Server */ - ret = unf_get_and_post_disc_event(unf_lport, sns_port, nport_id, - UNF_DISC_GET_NODE_NAME); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) add discovery event(0x%x) failed Rport(0x%x)", - unf_lport->nport_id, UNF_DISC_GET_NODE_NAME, nport_id); - - /* NOTE: go to next stage */ - unf_rcv_gnn_id_rsp_unknown(unf_lport, sns_port, nport_id); - } - - spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); - node = UNF_OS_LIST_NEXT(&disc->disc_rport_mgr.list_disc_rports_busy); - } while (node != &disc->disc_rport_mgr.list_disc_rports_busy); - spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); - - return ret; -} - -static u32 unf_init_rport_pool(struct unf_lport *lport) -{ - struct unf_rport_pool *rport_pool = NULL; - struct unf_rport *unf_rport = NULL; - u32 ret = RETURN_OK; - u32 i = 0; - u32 bitmap_cnt = 0; - ulong flag = 0; - u32 max_login = 0; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - - /* Init RPort Pool info */ - rport_pool = &lport->rport_pool; - max_login = lport->low_level_func.lport_cfg_items.max_login; - rport_pool->rport_pool_completion = NULL; - rport_pool->rport_pool_count = max_login; - spin_lock_init(&rport_pool->rport_free_pool_lock); - INIT_LIST_HEAD(&rport_pool->list_rports_pool); /* free RPort pool */ - - /* 1. Alloc RPort Pool buffer/resource (memory) */ - rport_pool->rport_pool_add = vmalloc((size_t)(max_login * sizeof(struct unf_rport))); - if (!rport_pool->rport_pool_add) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) allocate RPort(s) resource failed", lport->port_id); - - return UNF_RETURN_ERROR; - } - memset(rport_pool->rport_pool_add, 0, (max_login * sizeof(struct unf_rport))); - - /* 2. Alloc R_Port Pool bitmap */ - bitmap_cnt = (lport->low_level_func.support_max_rport) / BITS_PER_LONG + 1; - rport_pool->rpi_bitmap = vmalloc((size_t)(bitmap_cnt * sizeof(ulong))); - if (!rport_pool->rpi_bitmap) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) allocate RPort Bitmap failed", lport->port_id); - - vfree(rport_pool->rport_pool_add); - rport_pool->rport_pool_add = NULL; - return UNF_RETURN_ERROR; - } - memset(rport_pool->rpi_bitmap, 0, (bitmap_cnt * sizeof(ulong))); - - /* 3. Rport resource Management: Add Rports (buffer) to Rport Pool List - */ - unf_rport = (struct unf_rport *)(rport_pool->rport_pool_add); - spin_lock_irqsave(&rport_pool->rport_free_pool_lock, flag); - for (i = 0; i < rport_pool->rport_pool_count; i++) { - spin_lock_init(&unf_rport->rport_state_lock); - list_add_tail(&unf_rport->entry_rport, &rport_pool->list_rports_pool); - sema_init(&unf_rport->task_sema, 0); - unf_rport++; - } - spin_unlock_irqrestore(&rport_pool->rport_free_pool_lock, flag); - - return ret; -} - -static void unf_free_rport_pool(struct unf_lport *lport) -{ - struct unf_rport_pool *rport_pool = NULL; - bool wait = false; - ulong flag = 0; - u32 remain = 0; - u64 timeout = 0; - u32 max_login = 0; - u32 i; - struct unf_rport *unf_rport = NULL; - struct completion rport_pool_completion; - - init_completion(&rport_pool_completion); - FC_CHECK_RETURN_VOID(lport); - - rport_pool = &lport->rport_pool; - max_login = lport->low_level_func.lport_cfg_items.max_login; - - spin_lock_irqsave(&rport_pool->rport_free_pool_lock, flag); - if (rport_pool->rport_pool_count != max_login) { - rport_pool->rport_pool_completion = &rport_pool_completion; - remain = max_login - rport_pool->rport_pool_count; - wait = true; - } - spin_unlock_irqrestore(&rport_pool->rport_free_pool_lock, flag); - - if (wait) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[info]Port(0x%x) begin to wait for RPort pool completion, remain(0x%x)", - lport->port_id, remain); - - unf_show_all_rport(lport); - - timeout = wait_for_completion_timeout(rport_pool->rport_pool_completion, - msecs_to_jiffies(UNF_OS_REMOVE_CARD_TIMEOUT)); - if (timeout == 0) - unf_cm_mark_dirty_mem(lport, UNF_LPORT_DIRTY_FLAG_RPORT_POOL_DIRTY); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[info]Port(0x%x) wait for RPort pool completion end", - lport->port_id); - - spin_lock_irqsave(&rport_pool->rport_free_pool_lock, flag); - rport_pool->rport_pool_completion = NULL; - spin_unlock_irqrestore(&rport_pool->rport_free_pool_lock, flag); - } - - unf_rport = (struct unf_rport *)(rport_pool->rport_pool_add); - for (i = 0; i < rport_pool->rport_pool_count; i++) { - if (!unf_rport) - break; - unf_rport++; - } - - if ((lport->dirty_flag & UNF_LPORT_DIRTY_FLAG_RPORT_POOL_DIRTY) == 0) { - vfree(rport_pool->rport_pool_add); - rport_pool->rport_pool_add = NULL; - vfree(rport_pool->rpi_bitmap); - rport_pool->rpi_bitmap = NULL; - } -} - -static void unf_init_rscn_node(struct unf_port_id_page *port_id_page) -{ - FC_CHECK_RETURN_VOID(port_id_page); - - port_id_page->addr_format = 0; - port_id_page->event_qualifier = 0; - port_id_page->reserved = 0; - port_id_page->port_id_area = 0; - port_id_page->port_id_domain = 0; - port_id_page->port_id_port = 0; -} - -struct unf_port_id_page *unf_get_free_rscn_node(void *rscn_mg) -{ - /* Call by Save RSCN Port_ID */ - struct unf_rscn_mgr *rscn_mgr = NULL; - struct unf_port_id_page *port_id_node = NULL; - struct list_head *list_node = NULL; - ulong flag = 0; - - FC_CHECK_RETURN_VALUE(rscn_mg, NULL); - rscn_mgr = (struct unf_rscn_mgr *)rscn_mg; - - spin_lock_irqsave(&rscn_mgr->rscn_id_list_lock, flag); - if (list_empty(&rscn_mgr->list_free_rscn_page)) { - FC_DRV_PRINT(UNF_LOG_EQUIP_ATT, UNF_WARN, - "[warn]No RSCN node anymore"); - - spin_unlock_irqrestore(&rscn_mgr->rscn_id_list_lock, flag); - return NULL; - } - - /* Get from list_free_RSCN_page */ - list_node = UNF_OS_LIST_NEXT(&rscn_mgr->list_free_rscn_page); - list_del(list_node); - rscn_mgr->free_rscn_count--; - port_id_node = list_entry(list_node, struct unf_port_id_page, list_node_rscn); - unf_init_rscn_node(port_id_node); - spin_unlock_irqrestore(&rscn_mgr->rscn_id_list_lock, flag); - - return port_id_node; -} - -static void unf_release_rscn_node(void *rscn_mg, void *port_id_node) -{ - /* Call by RSCN GID_ACC */ - struct unf_rscn_mgr *rscn_mgr = NULL; - struct unf_port_id_page *port_id_page = NULL; - ulong flag = 0; - - FC_CHECK_RETURN_VOID(rscn_mg); - FC_CHECK_RETURN_VOID(port_id_node); - rscn_mgr = (struct unf_rscn_mgr *)rscn_mg; - port_id_page = (struct unf_port_id_page *)port_id_node; - - /* Back to list_free_RSCN_page */ - spin_lock_irqsave(&rscn_mgr->rscn_id_list_lock, flag); - rscn_mgr->free_rscn_count++; - unf_init_rscn_node(port_id_page); - list_add_tail(&port_id_page->list_node_rscn, &rscn_mgr->list_free_rscn_page); - spin_unlock_irqrestore(&rscn_mgr->rscn_id_list_lock, flag); -} - -static u32 unf_init_rscn_pool(struct unf_lport *lport) -{ - struct unf_rscn_mgr *rscn_mgr = NULL; - struct unf_port_id_page *port_id_page = NULL; - u32 ret = RETURN_OK; - u32 i = 0; - ulong flag = 0; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - rscn_mgr = &lport->disc.rscn_mgr; - - /* Get RSCN Pool buffer */ - rscn_mgr->rscn_pool_add = vmalloc(UNF_LIST_RSCN_PAGE_CNT * sizeof(struct unf_port_id_page)); - if (!rscn_mgr->rscn_pool_add) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[warn]Port(0x%x) allocate RSCN pool failed", lport->port_id); - - return UNF_RETURN_ERROR; - } - memset(rscn_mgr->rscn_pool_add, 0, - UNF_LIST_RSCN_PAGE_CNT * sizeof(struct unf_port_id_page)); - - spin_lock_irqsave(&rscn_mgr->rscn_id_list_lock, flag); - port_id_page = (struct unf_port_id_page *)(rscn_mgr->rscn_pool_add); - for (i = 0; i < UNF_LIST_RSCN_PAGE_CNT; i++) { - /* Add tail to list_free_RSCN_page */ - list_add_tail(&port_id_page->list_node_rscn, &rscn_mgr->list_free_rscn_page); - - rscn_mgr->free_rscn_count++; - port_id_page++; - } - spin_unlock_irqrestore(&rscn_mgr->rscn_id_list_lock, flag); - - return ret; -} - -static void unf_freerscn_pool(struct unf_lport *lport) -{ - struct unf_disc *disc = NULL; - - FC_CHECK_RETURN_VOID(lport); - - disc = &lport->disc; - if (disc->rscn_mgr.rscn_pool_add) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, - UNF_INFO, "[info]Port(0x%x) free RSCN pool", lport->nport_id); - - vfree(disc->rscn_mgr.rscn_pool_add); - disc->rscn_mgr.rscn_pool_add = NULL; - } -} - -static u32 unf_init_rscn_mgr(struct unf_lport *lport) -{ - struct unf_rscn_mgr *rscn_mgr = NULL; - u32 ret = RETURN_OK; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - rscn_mgr = &lport->disc.rscn_mgr; - - INIT_LIST_HEAD(&rscn_mgr->list_free_rscn_page); /* free RSCN page list */ - INIT_LIST_HEAD(&rscn_mgr->list_using_rscn_page); /* busy RSCN page list */ - spin_lock_init(&rscn_mgr->rscn_id_list_lock); - rscn_mgr->free_rscn_count = 0; - rscn_mgr->unf_get_free_rscn_node = unf_get_free_rscn_node; - rscn_mgr->unf_release_rscn_node = unf_release_rscn_node; - - ret = unf_init_rscn_pool(lport); - return ret; -} - -static void unf_destroy_rscn_mngr(struct unf_lport *lport) -{ - struct unf_rscn_mgr *rscn_mgr = NULL; - - FC_CHECK_RETURN_VOID(lport); - rscn_mgr = &lport->disc.rscn_mgr; - - rscn_mgr->free_rscn_count = 0; - rscn_mgr->unf_get_free_rscn_node = NULL; - rscn_mgr->unf_release_rscn_node = NULL; - - unf_freerscn_pool(lport); -} - -static u32 unf_init_disc_rport_pool(struct unf_lport *lport) -{ - struct unf_disc_rport_mg *disc_mgr = NULL; - struct unf_disc_rport *disc_rport = NULL; - u32 i = 0; - u32 max_log_in = 0; - ulong flag = 0; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - max_log_in = lport->low_level_func.lport_cfg_items.max_login; - disc_mgr = &lport->disc.disc_rport_mgr; - - /* Alloc R_Port Disc Pool buffer */ - disc_mgr->disc_pool_add = - vmalloc(max_log_in * sizeof(struct unf_disc_rport)); - if (!disc_mgr->disc_pool_add) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[warn]Port(0x%x) allocate disc RPort pool failed", lport->port_id); - - return UNF_RETURN_ERROR; - } - memset(disc_mgr->disc_pool_add, 0, (max_log_in * sizeof(struct unf_disc_rport))); - - /* Add R_Port to (free) DISC R_Port Pool */ - spin_lock_irqsave(&lport->disc.rport_busy_pool_lock, flag); - disc_rport = (struct unf_disc_rport *)(disc_mgr->disc_pool_add); - for (i = 0; i < max_log_in; i++) { - /* Add tail to list_disc_Rport_pool */ - list_add_tail(&disc_rport->entry_rport, &disc_mgr->list_disc_rports_pool); - - disc_rport++; - } - spin_unlock_irqrestore(&lport->disc.rport_busy_pool_lock, flag); - - return RETURN_OK; -} - -static void unf_free_disc_rport_pool(struct unf_lport *lport) -{ - struct unf_disc *disc = NULL; - - FC_CHECK_RETURN_VOID(lport); - - disc = &lport->disc; - if (disc->disc_rport_mgr.disc_pool_add) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, - UNF_INFO, "[info]Port(0x%x) free disc RPort pool", lport->port_id); - - vfree(disc->disc_rport_mgr.disc_pool_add); - disc->disc_rport_mgr.disc_pool_add = NULL; - } -} - -int unf_discover_port_info(void *arg_in) -{ - struct unf_disc_gs_event_info *disc_gs_info = NULL; - u32 ret = UNF_RETURN_ERROR; - struct unf_lport *unf_lport = NULL; - struct unf_rport *unf_rport = NULL; - - FC_CHECK_RETURN_VALUE(arg_in, UNF_RETURN_ERROR); - - disc_gs_info = (struct unf_disc_gs_event_info *)arg_in; - unf_lport = (struct unf_lport *)disc_gs_info->lport; - unf_rport = (struct unf_rport *)disc_gs_info->rport; - - switch (disc_gs_info->type) { - case UNF_DISC_GET_PORT_NAME: - ret = unf_send_gpn_id(unf_lport, unf_rport, disc_gs_info->rport_id); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) send GPN_ID failed RPort(0x%x)", - unf_lport->nport_id, disc_gs_info->rport_id); - unf_rcv_gpn_id_rsp_unknown(unf_lport, disc_gs_info->rport_id); - } - break; - case UNF_DISC_GET_FEATURE: - ret = unf_send_gff_id(unf_lport, unf_rport, disc_gs_info->rport_id); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) send GFF_ID failed to get RPort(0x%x)'s feature", - unf_lport->port_id, disc_gs_info->rport_id); - - unf_rcv_gff_id_rsp_unknown(unf_lport, disc_gs_info->rport_id); - } - break; - case UNF_DISC_GET_NODE_NAME: - ret = unf_send_gnn_id(unf_lport, unf_rport, disc_gs_info->rport_id); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) GNN_ID send failed with NPort ID(0x%x)", - unf_lport->port_id, disc_gs_info->rport_id); - - /* NOTE: Continue to next stage */ - unf_rcv_gnn_id_rsp_unknown(unf_lport, unf_rport, disc_gs_info->rport_id); - } - break; - default: - FC_DRV_PRINT(UNF_LOG_NORMAL, UNF_ERR, - "[err]Send GS packet type(0x%x) is unknown", disc_gs_info->type); - } - - kfree(disc_gs_info); - - return (int)ret; -} - -u32 unf_get_and_post_disc_event(void *lport, void *sns_port, u32 nport_id, - enum unf_disc_type type) -{ - struct unf_disc_gs_event_info *disc_gs_info = NULL; - ulong flag = 0; - struct unf_lport *root_lport = NULL; - struct unf_lport *unf_lport = NULL; - struct unf_disc_manage_info *disc_info = NULL; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(sns_port, UNF_RETURN_ERROR); - - unf_lport = (struct unf_lport *)lport; - - if (unf_lport->link_up == UNF_PORT_LINK_DOWN) - return RETURN_OK; - - root_lport = unf_lport->root_lport; - disc_info = &root_lport->disc.disc_thread_info; - - if (disc_info->thread_exit) - return RETURN_OK; - - disc_gs_info = kmalloc(sizeof(struct unf_disc_gs_event_info), GFP_ATOMIC); - if (!disc_gs_info) - return UNF_RETURN_ERROR; - - disc_gs_info->type = type; - disc_gs_info->lport = unf_lport; - disc_gs_info->rport = sns_port; - disc_gs_info->rport_id = nport_id; - - INIT_LIST_HEAD(&disc_gs_info->list_entry); - - spin_lock_irqsave(&disc_info->disc_event_list_lock, flag); - list_add_tail(&disc_gs_info->list_entry, &disc_info->list_head); - spin_unlock_irqrestore(&disc_info->disc_event_list_lock, flag); - wake_up_process(disc_info->thread); - return RETURN_OK; -} - -static int unf_disc_event_process(void *arg) -{ - struct list_head *node = NULL; - struct unf_disc_gs_event_info *disc_gs_info = NULL; - ulong flags = 0; - struct unf_disc *disc = (struct unf_disc *)arg; - struct unf_disc_manage_info *disc_info = &disc->disc_thread_info; - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_INFO, - "Port(0x%x) enter discovery thread.", disc->lport->port_id); - - while (!kthread_should_stop()) { - if (disc_info->thread_exit) - break; - - spin_lock_irqsave(&disc_info->disc_event_list_lock, flags); - if ((list_empty(&disc_info->list_head)) || - (atomic_read(&disc_info->disc_contrl_size) == 0)) { - spin_unlock_irqrestore(&disc_info->disc_event_list_lock, flags); - - set_current_state(TASK_INTERRUPTIBLE); - schedule_timeout((long)msecs_to_jiffies(UNF_S_TO_MS)); - } else { - node = UNF_OS_LIST_NEXT(&disc_info->list_head); - list_del_init(node); - disc_gs_info = list_entry(node, struct unf_disc_gs_event_info, list_entry); - spin_unlock_irqrestore(&disc_info->disc_event_list_lock, flags); - unf_discover_port_info(disc_gs_info); - } - } - FC_DRV_PRINT(UNF_LOG_EVENT, UNF_MAJOR, - "Port(0x%x) discovery thread over.", disc->lport->port_id); - - return RETURN_OK; -} - -void unf_flush_disc_event(void *disc, void *vport) -{ - struct unf_disc *unf_disc = (struct unf_disc *)disc; - struct unf_disc_manage_info *disc_info = NULL; - struct list_head *list = NULL; - struct list_head *list_tmp = NULL; - struct unf_disc_gs_event_info *disc_gs_info = NULL; - ulong flag = 0; - - FC_CHECK_RETURN_VOID(disc); - - disc_info = &unf_disc->disc_thread_info; - - spin_lock_irqsave(&disc_info->disc_event_list_lock, flag); - list_for_each_safe(list, list_tmp, &disc_info->list_head) { - disc_gs_info = list_entry(list, struct unf_disc_gs_event_info, list_entry); - - if (!vport || disc_gs_info->lport == vport) { - list_del_init(&disc_gs_info->list_entry); - kfree(disc_gs_info); - } - } - - if (!vport) - atomic_set(&disc_info->disc_contrl_size, UNF_MAX_GS_SEND_NUM); - spin_unlock_irqrestore(&disc_info->disc_event_list_lock, flag); -} - -void unf_disc_ctrl_size_inc(void *lport, u32 cmnd) -{ - struct unf_lport *unf_lport = NULL; - - FC_CHECK_RETURN_VOID(lport); - - unf_lport = (struct unf_lport *)lport; - unf_lport = unf_lport->root_lport; - FC_CHECK_RETURN_VOID(unf_lport); - - if (atomic_read(&unf_lport->disc.disc_thread_info.disc_contrl_size) == - UNF_MAX_GS_SEND_NUM) - return; - - if (cmnd == NS_GPN_ID || cmnd == NS_GNN_ID || cmnd == NS_GFF_ID) - atomic_inc(&unf_lport->disc.disc_thread_info.disc_contrl_size); -} - -void unf_destroy_disc_thread(void *disc) -{ - struct unf_disc_manage_info *disc_info = NULL; - struct unf_disc *unf_disc = (struct unf_disc *)disc; - - FC_CHECK_RETURN_VOID(unf_disc); - - disc_info = &unf_disc->disc_thread_info; - - disc_info->thread_exit = true; - unf_flush_disc_event(unf_disc, NULL); - - wake_up_process(disc_info->thread); - kthread_stop(disc_info->thread); - disc_info->thread = NULL; - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "Port(0x%x) destroy discovery thread succeed.", - unf_disc->lport->port_id); -} - -u32 unf_crerate_disc_thread(void *disc) -{ - struct unf_disc_manage_info *disc_info = NULL; - struct unf_disc *unf_disc = (struct unf_disc *)disc; - - FC_CHECK_RETURN_VALUE(unf_disc, UNF_RETURN_ERROR); - - /* If the thread cannot be found, apply for a new thread. */ - disc_info = &unf_disc->disc_thread_info; - - memset(disc_info, 0, sizeof(struct unf_disc_manage_info)); - - INIT_LIST_HEAD(&disc_info->list_head); - spin_lock_init(&disc_info->disc_event_list_lock); - atomic_set(&disc_info->disc_contrl_size, UNF_MAX_GS_SEND_NUM); - - disc_info->thread_exit = false; - disc_info->thread = kthread_create(unf_disc_event_process, unf_disc, "%x_DiscT", - unf_disc->lport->port_id); - - if (IS_ERR(disc_info->thread) || !disc_info->thread) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "Port(0x%x) creat discovery thread(0x%p) unsuccessful.", - unf_disc->lport->port_id, disc_info->thread); - - return UNF_RETURN_ERROR; - } - - wake_up_process(disc_info->thread); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_INFO, - "Port(0x%x) creat discovery thread succeed.", unf_disc->lport->port_id); - - return RETURN_OK; -} - -void unf_disc_ref_cnt_dec(struct unf_disc *disc) -{ - ulong flag = 0; - - FC_CHECK_RETURN_VOID(disc); - - spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); - if (atomic_dec_and_test(&disc->disc_ref_cnt)) { - if (disc->disc_completion) - complete(disc->disc_completion); - } - spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); -} - -void unf_wait_disc_complete(struct unf_lport *lport) -{ - struct unf_disc *disc = NULL; - bool wait = false; - ulong flag = 0; - u32 ret = UNF_RETURN_ERROR; - u64 time_out = 0; - - struct completion disc_completion; - - init_completion(&disc_completion); - disc = &lport->disc; - - UNF_DELAYED_WORK_SYNC(ret, (lport->port_id), (&disc->disc_work), - "Disc_work"); - if (ret == RETURN_OK) - unf_disc_ref_cnt_dec(disc); - - spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); - if (atomic_read(&disc->disc_ref_cnt) != 0) { - disc->disc_completion = &disc_completion; - wait = true; - } - spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); - - if (wait) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[info]Port(0x%x) begin to wait for discover completion", - lport->port_id); - - time_out = - wait_for_completion_timeout(disc->disc_completion, - msecs_to_jiffies(UNF_OS_REMOVE_CARD_TIMEOUT)); - if (time_out == 0) - unf_cm_mark_dirty_mem(lport, UNF_LPORT_DIRTY_FLAG_DISC_DIRTY); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[info]Port(0x%x) wait for discover completion end", lport->port_id); - - spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); - disc->disc_completion = NULL; - spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); - } -} - -void unf_disc_mgr_destroy(void *lport) -{ - struct unf_disc *disc = NULL; - struct unf_lport *unf_lport = NULL; - - FC_CHECK_RETURN_VOID(lport); - unf_lport = (struct unf_lport *)lport; - - disc = &unf_lport->disc; - disc->retry_count = 0; - disc->disc_temp.unf_disc_start = NULL; - disc->disc_temp.unf_disc_stop = NULL; - disc->disc_temp.unf_disc_callback = NULL; - - unf_free_disc_rport_pool(unf_lport); - unf_destroy_rscn_mngr(unf_lport); - unf_wait_disc_complete(unf_lport); - - if (unf_lport->root_lport != unf_lport) - return; - - unf_destroy_disc_thread(disc); - unf_free_rport_pool(unf_lport); - unf_lport->destroy_step = UNF_LPORT_DESTROY_STEP_6_DESTROY_DISC_MGR; -} - -void unf_disc_error_recovery(void *lport) -{ - struct unf_rport *unf_rport = NULL; - struct unf_disc *disc = NULL; - ulong delay = 0; - ulong flag = 0; - u32 ret = UNF_RETURN_ERROR; - struct unf_lport *unf_lport = NULL; - - FC_CHECK_RETURN_VOID(lport); - - unf_lport = (struct unf_lport *)lport; - disc = &unf_lport->disc; - - unf_rport = unf_get_rport_by_nport_id(unf_lport, UNF_FC_FID_DIR_SERV); - if (!unf_rport) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, - UNF_WARN, "[warn]Port(0x%x) find RPort failed", unf_lport->port_id); - return; - } - - spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); - - /* Delay work is pending */ - if (delayed_work_pending(&disc->disc_work)) { - spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x) disc_work is running and do nothing", - unf_lport->port_id); - return; - } - - /* Continue to retry */ - if (disc->retry_count < disc->max_retry_count) { - disc->retry_count++; - delay = (ulong)unf_lport->ed_tov; - if (queue_delayed_work(unf_wq, &disc->disc_work, - (ulong)msecs_to_jiffies((u32)delay))) - atomic_inc(&disc->disc_ref_cnt); - spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); - } else { - /* Go to next stage */ - if (disc->states == UNF_DISC_ST_GIDPT_WAIT) { - /* GID_PT_WAIT --->>> Send GID_FT */ - unf_disc_state_ma(unf_lport, UNF_EVENT_DISC_RETRY_TIMEOUT); - spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); - - while ((ret != RETURN_OK) && - (disc->retry_count < disc->max_retry_count)) { - ret = unf_send_gid_ft(unf_lport, unf_rport); - disc->retry_count++; - } - } else if (disc->states == UNF_DISC_ST_GIDFT_WAIT) { - /* GID_FT_WAIT --->>> Send LOGO */ - unf_disc_state_ma(unf_lport, UNF_EVENT_DISC_RETRY_TIMEOUT); - spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); - } else { - spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); - } - } -} - -enum unf_disc_state unf_disc_stat_start(enum unf_disc_state old_state, - enum unf_disc_event event) -{ - enum unf_disc_state next_state = UNF_DISC_ST_END; - - if (event == UNF_EVENT_DISC_NORMAL_ENTER) - next_state = UNF_DISC_ST_GIDPT_WAIT; - else - next_state = old_state; - - return next_state; -} - -enum unf_disc_state unf_disc_stat_gid_pt_wait(enum unf_disc_state old_state, - enum unf_disc_event event) -{ - enum unf_disc_state next_state = UNF_DISC_ST_END; - - switch (event) { - case UNF_EVENT_DISC_FAILED: - next_state = UNF_DISC_ST_GIDPT_WAIT; - break; - - case UNF_EVENT_DISC_RETRY_TIMEOUT: - next_state = UNF_DISC_ST_GIDFT_WAIT; - break; - - case UNF_EVENT_DISC_SUCCESS: - next_state = UNF_DISC_ST_END; - break; - - case UNF_EVENT_DISC_LINKDOWN: - next_state = UNF_DISC_ST_START; - break; - - default: - next_state = old_state; - break; - } - - return next_state; -} - -enum unf_disc_state unf_disc_stat_gid_ft_wait(enum unf_disc_state old_state, - enum unf_disc_event event) -{ - enum unf_disc_state next_state = UNF_DISC_ST_END; - - switch (event) { - case UNF_EVENT_DISC_FAILED: - next_state = UNF_DISC_ST_GIDFT_WAIT; - break; - - case UNF_EVENT_DISC_RETRY_TIMEOUT: - next_state = UNF_DISC_ST_END; - break; - - case UNF_EVENT_DISC_LINKDOWN: - next_state = UNF_DISC_ST_START; - break; - - case UNF_EVENT_DISC_SUCCESS: - next_state = UNF_DISC_ST_END; - break; - - default: - next_state = old_state; - break; - } - - return next_state; -} - -enum unf_disc_state unf_disc_stat_end(enum unf_disc_state old_state, enum unf_disc_event event) -{ - enum unf_disc_state next_state = UNF_DISC_ST_END; - - if (event == UNF_EVENT_DISC_LINKDOWN) - next_state = UNF_DISC_ST_START; - else - next_state = old_state; - - return next_state; -} - -void unf_disc_state_ma(struct unf_lport *lport, enum unf_disc_event event) -{ - struct unf_disc *disc = NULL; - enum unf_disc_state old_state = UNF_DISC_ST_START; - enum unf_disc_state next_state = UNF_DISC_ST_START; - - FC_CHECK_RETURN_VOID(lport); - - disc = &lport->disc; - old_state = disc->states; - - switch (disc->states) { - case UNF_DISC_ST_START: - next_state = unf_disc_stat_start(old_state, event); - break; - - case UNF_DISC_ST_GIDPT_WAIT: - next_state = unf_disc_stat_gid_pt_wait(old_state, event); - break; - - case UNF_DISC_ST_GIDFT_WAIT: - next_state = unf_disc_stat_gid_ft_wait(old_state, event); - break; - - case UNF_DISC_ST_END: - next_state = unf_disc_stat_end(old_state, event); - break; - - default: - next_state = old_state; - break; - } - - unf_set_disc_state(disc, next_state); -} - -static void unf_lport_disc_timeout(struct work_struct *work) -{ - struct unf_lport *unf_lport = NULL; - struct unf_rport *unf_rport = NULL; - struct unf_disc *disc = NULL; - enum unf_disc_state state = UNF_DISC_ST_END; - ulong flag = 0; - - FC_CHECK_RETURN_VOID(work); - - disc = container_of(work, struct unf_disc, disc_work.work); - if (!disc) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, - UNF_WARN, "[warn]Get discover pointer failed"); - - return; - } - - unf_lport = disc->lport; - if (!unf_lport) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Find Port by discovery work failed"); - - unf_disc_ref_cnt_dec(disc); - return; - } - - spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); - state = disc->states; - spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); - - unf_rport = unf_get_rport_by_nport_id(unf_lport, UNF_FC_FID_DIR_SERV); /* 0xfffffc */ - if (!unf_rport) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) find fabric RPort failed", unf_lport->port_id); - - unf_disc_ref_cnt_dec(disc); - return; - } - - switch (state) { - case UNF_DISC_ST_START: - break; - - case UNF_DISC_ST_GIDPT_WAIT: - (void)unf_send_gid_pt(unf_lport, unf_rport); - break; - - case UNF_DISC_ST_GIDFT_WAIT: - (void)unf_send_gid_ft(unf_lport, unf_rport); - break; - - case UNF_DISC_ST_END: - break; - - default: - break; - } - - unf_disc_ref_cnt_dec(disc); -} - -u32 unf_init_disc_mgr(struct unf_lport *lport) -{ - struct unf_disc *disc = NULL; - u32 ret = RETURN_OK; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - - disc = &lport->disc; - disc->max_retry_count = UNF_DISC_RETRY_TIMES; - disc->retry_count = 0; - disc->disc_flag = UNF_DISC_NONE; - INIT_LIST_HEAD(&disc->list_busy_rports); - INIT_LIST_HEAD(&disc->list_delete_rports); - INIT_LIST_HEAD(&disc->list_destroy_rports); - spin_lock_init(&disc->rport_busy_pool_lock); - - disc->disc_rport_mgr.disc_pool_add = NULL; - INIT_LIST_HEAD(&disc->disc_rport_mgr.list_disc_rports_pool); - INIT_LIST_HEAD(&disc->disc_rport_mgr.list_disc_rports_busy); - - disc->disc_completion = NULL; - disc->lport = lport; - INIT_DELAYED_WORK(&disc->disc_work, unf_lport_disc_timeout); - disc->disc_temp.unf_disc_start = unf_disc_start; - disc->disc_temp.unf_disc_stop = unf_disc_stop; - disc->disc_temp.unf_disc_callback = NULL; - atomic_set(&disc->disc_ref_cnt, 0); - - /* Init RSCN Manager */ - ret = unf_init_rscn_mgr(lport); - if (ret != RETURN_OK) - return UNF_RETURN_ERROR; - - if (lport->root_lport != lport) - return ret; - - ret = unf_crerate_disc_thread(disc); - if (ret != RETURN_OK) { - unf_destroy_rscn_mngr(lport); - - return UNF_RETURN_ERROR; - } - - /* Init R_Port free Pool */ - ret = unf_init_rport_pool(lport); - if (ret != RETURN_OK) { - unf_destroy_disc_thread(disc); - unf_destroy_rscn_mngr(lport); - - return UNF_RETURN_ERROR; - } - - /* Init R_Port free disc Pool */ - ret = unf_init_disc_rport_pool(lport); - if (ret != RETURN_OK) { - unf_destroy_disc_thread(disc); - unf_free_rport_pool(lport); - unf_destroy_rscn_mngr(lport); - - return UNF_RETURN_ERROR; - } - - return ret; -} diff --git a/drivers/scsi/spfc/common/unf_disc.h b/drivers/scsi/spfc/common/unf_disc.h deleted file mode 100644 index 7ecad3eec42497bfee385e5138927decb14b837d..0000000000000000000000000000000000000000 --- a/drivers/scsi/spfc/common/unf_disc.h +++ /dev/null @@ -1,51 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef UNF_DISC_H -#define UNF_DISC_H - -#include "unf_type.h" - -#define UNF_DISC_RETRY_TIMES 3 -#define UNF_DISC_NONE 0 -#define UNF_DISC_FABRIC 1 -#define UNF_DISC_LOOP 2 - -enum unf_disc_state { - UNF_DISC_ST_START = 0x3000, - UNF_DISC_ST_GIDPT_WAIT, - UNF_DISC_ST_GIDFT_WAIT, - UNF_DISC_ST_END -}; - -enum unf_disc_event { - UNF_EVENT_DISC_NORMAL_ENTER = 0x8000, - UNF_EVENT_DISC_FAILED = 0x8001, - UNF_EVENT_DISC_SUCCESS = 0x8002, - UNF_EVENT_DISC_RETRY_TIMEOUT = 0x8003, - UNF_EVENT_DISC_LINKDOWN = 0x8004 -}; - -enum unf_disc_type { - UNF_DISC_GET_PORT_NAME = 0, - UNF_DISC_GET_NODE_NAME, - UNF_DISC_GET_FEATURE -}; - -struct unf_disc_gs_event_info { - void *lport; - void *rport; - u32 rport_id; - enum unf_disc_type type; - struct list_head list_entry; -}; - -u32 unf_get_and_post_disc_event(void *lport, void *sns_port, u32 nport_id, - enum unf_disc_type type); - -void unf_flush_disc_event(void *disc, void *vport); -void unf_disc_ctrl_size_inc(void *lport, u32 cmnd); -void unf_disc_error_recovery(void *lport); -void unf_disc_mgr_destroy(void *lport); - -#endif diff --git a/drivers/scsi/spfc/common/unf_event.c b/drivers/scsi/spfc/common/unf_event.c deleted file mode 100644 index cf51c31ca4a3eeff81cd6defe41532c882a12ca9..0000000000000000000000000000000000000000 --- a/drivers/scsi/spfc/common/unf_event.c +++ /dev/null @@ -1,517 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#include "unf_event.h" -#include "unf_log.h" -#include "unf_common.h" -#include "unf_lport.h" - -struct unf_event_list fc_event_list; -struct unf_global_event_queue global_event_queue; - -/* Max global event node */ -#define UNF_MAX_GLOBAL_ENENT_NODE 24 - -u32 unf_init_event_msg(struct unf_lport *lport) -{ - struct unf_event_mgr *event_mgr = NULL; - struct unf_cm_event_report *event_node = NULL; - u32 ret = RETURN_OK; - u32 index = 0; - ulong flag = 0; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - event_mgr = &lport->event_mgr; - - /* Get and Initial Event Node resource */ - event_mgr->mem_add = vmalloc((size_t)event_mgr->free_event_count * - sizeof(struct unf_cm_event_report)); - if (!event_mgr->mem_add) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[warn]Port(0x%x) allocate event manager failed", - lport->port_id); - - return UNF_RETURN_ERROR; - } - memset(event_mgr->mem_add, 0, - ((size_t)event_mgr->free_event_count * sizeof(struct unf_cm_event_report))); - - event_node = (struct unf_cm_event_report *)(event_mgr->mem_add); - - spin_lock_irqsave(&event_mgr->port_event_lock, flag); - for (index = 0; index < event_mgr->free_event_count; index++) { - INIT_LIST_HEAD(&event_node->list_entry); - list_add_tail(&event_node->list_entry, &event_mgr->list_free_event); - event_node++; - } - spin_unlock_irqrestore(&event_mgr->port_event_lock, flag); - - return ret; -} - -static void unf_del_event_center_fun_op(struct unf_lport *lport) -{ - struct unf_event_mgr *event_mgr = NULL; - - FC_CHECK_RETURN_VOID(lport); - - event_mgr = &lport->event_mgr; - event_mgr->unf_get_free_event_func = NULL; - event_mgr->unf_release_event = NULL; - event_mgr->unf_post_event_func = NULL; -} - -void unf_init_event_node(struct unf_cm_event_report *event_node) -{ - FC_CHECK_RETURN_VOID(event_node); - - event_node->event = UNF_EVENT_TYPE_REQUIRE; - event_node->event_asy_flag = UNF_EVENT_ASYN; - event_node->delay_times = 0; - event_node->para_in = NULL; - event_node->para_out = NULL; - event_node->result = 0; - event_node->lport = NULL; - event_node->unf_event_task = NULL; -} - -struct unf_cm_event_report *unf_get_free_event_node(void *lport) -{ - struct unf_event_mgr *event_mgr = NULL; - struct unf_cm_event_report *event_node = NULL; - struct list_head *list_node = NULL; - struct unf_lport *unf_lport = NULL; - ulong flags = 0; - - FC_CHECK_RETURN_VALUE(lport, NULL); - unf_lport = (struct unf_lport *)lport; - unf_lport = unf_lport->root_lport; - - if (unlikely(atomic_read(&unf_lport->lport_no_operate_flag) == UNF_LPORT_NOP)) - return NULL; - - event_mgr = &unf_lport->event_mgr; - - spin_lock_irqsave(&event_mgr->port_event_lock, flags); - if (list_empty(&event_mgr->list_free_event)) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[warn]Port(0x%x) have no event node anymore", - unf_lport->port_id); - - spin_unlock_irqrestore(&event_mgr->port_event_lock, flags); - return NULL; - } - - list_node = UNF_OS_LIST_NEXT(&event_mgr->list_free_event); - list_del(list_node); - event_mgr->free_event_count--; - event_node = list_entry(list_node, struct unf_cm_event_report, list_entry); - - unf_init_event_node(event_node); - spin_unlock_irqrestore(&event_mgr->port_event_lock, flags); - - return event_node; -} - -void unf_post_event(void *lport, void *event_node) -{ - struct unf_cm_event_report *cm_event_node = NULL; - struct unf_chip_manage_info *card_thread_info = NULL; - struct unf_lport *unf_lport = NULL; - ulong flags = 0; - - FC_CHECK_RETURN_VOID(event_node); - cm_event_node = (struct unf_cm_event_report *)event_node; - - /* If null, post to global event center */ - if (!lport) { - spin_lock_irqsave(&fc_event_list.fc_event_list_lock, flags); - fc_event_list.list_num++; - list_add_tail(&cm_event_node->list_entry, &fc_event_list.list_head); - spin_unlock_irqrestore(&fc_event_list.fc_event_list_lock, flags); - - wake_up_process(event_task_thread); - } else { - unf_lport = (struct unf_lport *)lport; - unf_lport = unf_lport->root_lport; - card_thread_info = unf_lport->chip_info; - - /* Post to global event center */ - if (!card_thread_info) { - FC_DRV_PRINT(UNF_LOG_EVENT, UNF_WARN, - "[warn]Port(0x%x) has strange event with type(0x%x)", - unf_lport->nport_id, cm_event_node->event); - - spin_lock_irqsave(&fc_event_list.fc_event_list_lock, flags); - fc_event_list.list_num++; - list_add_tail(&cm_event_node->list_entry, &fc_event_list.list_head); - spin_unlock_irqrestore(&fc_event_list.fc_event_list_lock, flags); - - wake_up_process(event_task_thread); - } else { - spin_lock_irqsave(&card_thread_info->chip_event_list_lock, flags); - card_thread_info->list_num++; - list_add_tail(&cm_event_node->list_entry, &card_thread_info->list_head); - spin_unlock_irqrestore(&card_thread_info->chip_event_list_lock, flags); - - wake_up_process(card_thread_info->thread); - } - } -} - -void unf_check_event_mgr_status(struct unf_event_mgr *event_mgr) -{ - ulong flag = 0; - - FC_CHECK_RETURN_VOID(event_mgr); - - spin_lock_irqsave(&event_mgr->port_event_lock, flag); - if (event_mgr->emg_completion && event_mgr->free_event_count == UNF_MAX_EVENT_NODE) - complete(event_mgr->emg_completion); - - spin_unlock_irqrestore(&event_mgr->port_event_lock, flag); -} - -void unf_release_event(void *lport, void *event_node) -{ - struct unf_event_mgr *event_mgr = NULL; - struct unf_lport *unf_lport = NULL; - struct unf_cm_event_report *cm_event_node = NULL; - ulong flags = 0; - - FC_CHECK_RETURN_VOID(lport); - FC_CHECK_RETURN_VOID(event_node); - - cm_event_node = (struct unf_cm_event_report *)event_node; - unf_lport = (struct unf_lport *)lport; - unf_lport = unf_lport->root_lport; - event_mgr = &unf_lport->event_mgr; - - spin_lock_irqsave(&event_mgr->port_event_lock, flags); - event_mgr->free_event_count++; - unf_init_event_node(cm_event_node); - list_add_tail(&cm_event_node->list_entry, &event_mgr->list_free_event); - spin_unlock_irqrestore(&event_mgr->port_event_lock, flags); - - unf_check_event_mgr_status(event_mgr); -} - -void unf_release_global_event(void *event_node) -{ - ulong flag = 0; - struct unf_cm_event_report *cm_event_node = NULL; - - FC_CHECK_RETURN_VOID(event_node); - cm_event_node = (struct unf_cm_event_report *)event_node; - - unf_init_event_node(cm_event_node); - - spin_lock_irqsave(&global_event_queue.global_event_list_lock, flag); - global_event_queue.list_number++; - list_add_tail(&cm_event_node->list_entry, &global_event_queue.global_event_list); - spin_unlock_irqrestore(&global_event_queue.global_event_list_lock, flag); -} - -u32 unf_init_event_center(void *lport) -{ - struct unf_event_mgr *event_mgr = NULL; - u32 ret = RETURN_OK; - struct unf_lport *unf_lport = NULL; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - unf_lport = (struct unf_lport *)lport; - - /* Initial Disc manager */ - event_mgr = &unf_lport->event_mgr; - event_mgr->free_event_count = UNF_MAX_EVENT_NODE; - event_mgr->unf_get_free_event_func = unf_get_free_event_node; - event_mgr->unf_release_event = unf_release_event; - event_mgr->unf_post_event_func = unf_post_event; - - INIT_LIST_HEAD(&event_mgr->list_free_event); - spin_lock_init(&event_mgr->port_event_lock); - event_mgr->emg_completion = NULL; - - ret = unf_init_event_msg(unf_lport); - - return ret; -} - -void unf_wait_event_mgr_complete(struct unf_event_mgr *event_mgr) -{ - struct unf_event_mgr *event_mgr_temp = NULL; - bool wait = false; - ulong mg_flag = 0; - - struct completion fc_event_completion; - - init_completion(&fc_event_completion); - FC_CHECK_RETURN_VOID(event_mgr); - event_mgr_temp = event_mgr; - - spin_lock_irqsave(&event_mgr_temp->port_event_lock, mg_flag); - if (event_mgr_temp->free_event_count != UNF_MAX_EVENT_NODE) { - event_mgr_temp->emg_completion = &fc_event_completion; - wait = true; - } - spin_unlock_irqrestore(&event_mgr_temp->port_event_lock, mg_flag); - - if (wait) - wait_for_completion(event_mgr_temp->emg_completion); - - spin_lock_irqsave(&event_mgr_temp->port_event_lock, mg_flag); - event_mgr_temp->emg_completion = NULL; - spin_unlock_irqrestore(&event_mgr_temp->port_event_lock, mg_flag); -} - -u32 unf_event_center_destroy(void *lport) -{ - struct unf_event_mgr *event_mgr = NULL; - struct list_head *list = NULL; - struct list_head *list_tmp = NULL; - struct unf_cm_event_report *event_node = NULL; - u32 ret = RETURN_OK; - ulong flag = 0; - ulong list_lock_flag = 0; - struct unf_lport *unf_lport = NULL; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - unf_lport = (struct unf_lport *)lport; - event_mgr = &unf_lport->event_mgr; - - spin_lock_irqsave(&fc_event_list.fc_event_list_lock, list_lock_flag); - if (!list_empty(&fc_event_list.list_head)) { - list_for_each_safe(list, list_tmp, &fc_event_list.list_head) { - event_node = list_entry(list, struct unf_cm_event_report, list_entry); - - if (event_node->lport == unf_lport) { - list_del_init(&event_node->list_entry); - if (event_node->event_asy_flag == UNF_EVENT_SYN) { - event_node->result = UNF_RETURN_ERROR; - complete(&event_node->event_comp); - } - - spin_lock_irqsave(&event_mgr->port_event_lock, flag); - event_mgr->free_event_count++; - list_add_tail(&event_node->list_entry, - &event_mgr->list_free_event); - spin_unlock_irqrestore(&event_mgr->port_event_lock, flag); - } - } - } - spin_unlock_irqrestore(&fc_event_list.fc_event_list_lock, list_lock_flag); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[info]Port(0x%x) begin to wait event", - unf_lport->port_id); - - unf_wait_event_mgr_complete(event_mgr); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[info]Port(0x%x) wait event process end", - unf_lport->port_id); - - unf_del_event_center_fun_op(unf_lport); - - vfree(event_mgr->mem_add); - event_mgr->mem_add = NULL; - unf_lport->destroy_step = UNF_LPORT_DESTROY_STEP_3_DESTROY_EVENT_CENTER; - - return ret; -} - -static void unf_procee_asyn_event(struct unf_cm_event_report *event_node) -{ - struct unf_lport *lport = NULL; - u32 ret = UNF_RETURN_ERROR; - - lport = (struct unf_lport *)event_node->lport; - - FC_CHECK_RETURN_VOID(lport); - if (event_node->unf_event_task) { - ret = (u32)event_node->unf_event_task(event_node->para_in, - event_node->para_out); - } - - if (lport->event_mgr.unf_release_event) - lport->event_mgr.unf_release_event(lport, event_node); - - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_EVENT, UNF_WARN, - "[warn]Port(0x%x) handle event(0x%x) failed", - lport->port_id, event_node->event); - } -} - -void unf_handle_event(struct unf_cm_event_report *event_node) -{ - u32 ret = UNF_RETURN_ERROR; - u32 event = 0; - u32 event_asy_flag = UNF_EVENT_ASYN; - - FC_CHECK_RETURN_VOID(event_node); - - event = event_node->event; - event_asy_flag = event_node->event_asy_flag; - - switch (event_asy_flag) { - case UNF_EVENT_SYN: /* synchronous event node */ - case UNF_GLOBAL_EVENT_SYN: - if (event_node->unf_event_task) - ret = (u32)event_node->unf_event_task(event_node->para_in, - event_node->para_out); - - event_node->result = ret; - complete(&event_node->event_comp); - break; - - case UNF_EVENT_ASYN: /* asynchronous event node */ - unf_procee_asyn_event(event_node); - break; - - case UNF_GLOBAL_EVENT_ASYN: - if (event_node->unf_event_task) { - ret = (u32)event_node->unf_event_task(event_node->para_in, - event_node->para_out); - } - - unf_release_global_event(event_node); - - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_EVENT, UNF_WARN, - "[warn]handle global event(0x%x) failed", event); - } - break; - - default: - FC_DRV_PRINT(UNF_LOG_EVENT, UNF_WARN, - "[warn]Unknown event(0x%x)", event); - break; - } -} - -u32 unf_init_global_event_msg(void) -{ - struct unf_cm_event_report *event_node = NULL; - u32 ret = RETURN_OK; - u32 index = 0; - ulong flag = 0; - - INIT_LIST_HEAD(&global_event_queue.global_event_list); - spin_lock_init(&global_event_queue.global_event_list_lock); - global_event_queue.list_number = 0; - - global_event_queue.global_event_add = vmalloc(UNF_MAX_GLOBAL_ENENT_NODE * - sizeof(struct unf_cm_event_report)); - if (!global_event_queue.global_event_add) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Can't allocate global event queue"); - - return UNF_RETURN_ERROR; - } - memset(global_event_queue.global_event_add, 0, - (UNF_MAX_GLOBAL_ENENT_NODE * sizeof(struct unf_cm_event_report))); - - event_node = (struct unf_cm_event_report *)(global_event_queue.global_event_add); - - spin_lock_irqsave(&global_event_queue.global_event_list_lock, flag); - for (index = 0; index < UNF_MAX_GLOBAL_ENENT_NODE; index++) { - INIT_LIST_HEAD(&event_node->list_entry); - list_add_tail(&event_node->list_entry, &global_event_queue.global_event_list); - - global_event_queue.list_number++; - event_node++; - } - spin_unlock_irqrestore(&global_event_queue.global_event_list_lock, flag); - - return ret; -} - -void unf_destroy_global_event_msg(void) -{ - if (global_event_queue.list_number != UNF_MAX_GLOBAL_ENENT_NODE) { - FC_DRV_PRINT(UNF_LOG_EVENT, UNF_CRITICAL, - "[warn]Global event release not complete with remain nodes(0x%x)", - global_event_queue.list_number); - } - - vfree(global_event_queue.global_event_add); -} - -u32 unf_schedule_global_event(void *para_in, u32 event_asy_flag, - int (*unf_event_task)(void *arg_in, void *arg_out)) -{ - struct list_head *list_node = NULL; - struct unf_cm_event_report *event_node = NULL; - ulong flag = 0; - u32 ret = UNF_RETURN_ERROR; - spinlock_t *event_list_lock = NULL; - - FC_CHECK_RETURN_VALUE(unf_event_task, UNF_RETURN_ERROR); - - if (event_asy_flag != UNF_GLOBAL_EVENT_ASYN && event_asy_flag != UNF_GLOBAL_EVENT_SYN) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[warn]Event async flag(0x%x) abnormity", - event_asy_flag); - - return UNF_RETURN_ERROR; - } - - event_list_lock = &global_event_queue.global_event_list_lock; - spin_lock_irqsave(event_list_lock, flag); - if (list_empty(&global_event_queue.global_event_list)) { - spin_unlock_irqrestore(event_list_lock, flag); - - return UNF_RETURN_ERROR; - } - - list_node = UNF_OS_LIST_NEXT(&global_event_queue.global_event_list); - list_del_init(list_node); - global_event_queue.list_number--; - event_node = list_entry(list_node, struct unf_cm_event_report, list_entry); - spin_unlock_irqrestore(event_list_lock, flag); - - /* Initial global event */ - unf_init_event_node(event_node); - init_completion(&event_node->event_comp); - event_node->event_asy_flag = event_asy_flag; - event_node->unf_event_task = unf_event_task; - event_node->para_in = (void *)para_in; - event_node->para_out = NULL; - - unf_post_event(NULL, event_node); - - if (event_asy_flag == UNF_GLOBAL_EVENT_SYN) { - /* must wait for complete */ - wait_for_completion(&event_node->event_comp); - ret = event_node->result; - unf_release_global_event(event_node); - } else { - ret = RETURN_OK; - } - - return ret; -} - -struct unf_cm_event_report *unf_get_one_event_node(void *lport) -{ - struct unf_lport *unf_lport = (struct unf_lport *)lport; - - FC_CHECK_RETURN_VALUE(lport, NULL); - FC_CHECK_RETURN_VALUE(unf_lport->event_mgr.unf_get_free_event_func, NULL); - - return unf_lport->event_mgr.unf_get_free_event_func((void *)unf_lport); -} - -void unf_post_one_event_node(void *lport, struct unf_cm_event_report *event) -{ - struct unf_lport *unf_lport = (struct unf_lport *)lport; - - FC_CHECK_RETURN_VOID(lport); - FC_CHECK_RETURN_VOID(event); - - FC_CHECK_RETURN_VOID(unf_lport->event_mgr.unf_post_event_func); - FC_CHECK_RETURN_VOID(event); - - unf_lport->event_mgr.unf_post_event_func((void *)unf_lport, event); -} diff --git a/drivers/scsi/spfc/common/unf_event.h b/drivers/scsi/spfc/common/unf_event.h deleted file mode 100644 index 3fbd72bff8d779fb9bfdefc75779c685d0f2c4dd..0000000000000000000000000000000000000000 --- a/drivers/scsi/spfc/common/unf_event.h +++ /dev/null @@ -1,83 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef UNF_EVENT_H -#define UNF_EVENT_H - -#include "unf_type.h" - -#define UNF_MAX_EVENT_NODE 256 - -enum unf_event_type { - UNF_EVENT_TYPE_ALARM = 0, /* Alarm */ - UNF_EVENT_TYPE_REQUIRE, /* Require */ - UNF_EVENT_TYPE_RECOVERY, /* Recovery */ - UNF_EVENT_TYPE_BUTT -}; - -struct unf_cm_event_report { - /* event type */ - u32 event; - - /* ASY flag */ - u32 event_asy_flag; - - /* Delay times,must be async event */ - u32 delay_times; - - struct list_head list_entry; - - void *lport; - - /* parameter */ - void *para_in; - void *para_out; - u32 result; - - /* recovery strategy */ - int (*unf_event_task)(void *arg_in, void *arg_out); - - struct completion event_comp; -}; - -struct unf_event_mgr { - spinlock_t port_event_lock; - u32 free_event_count; - - struct list_head list_free_event; - - struct completion *emg_completion; - - void *mem_add; - struct unf_cm_event_report *(*unf_get_free_event_func)(void *lport); - void (*unf_release_event)(void *lport, void *event_node); - void (*unf_post_event_func)(void *lport, void *event_node); -}; - -struct unf_global_event_queue { - void *global_event_add; - u32 list_number; - struct list_head global_event_list; - spinlock_t global_event_list_lock; -}; - -struct unf_event_list { - struct list_head list_head; - spinlock_t fc_event_list_lock; - u32 list_num; /* list node number */ -}; - -void unf_handle_event(struct unf_cm_event_report *event_node); -u32 unf_init_global_event_msg(void); -void unf_destroy_global_event_msg(void); -u32 unf_schedule_global_event(void *para_in, u32 event_asy_flag, - int (*unf_event_task)(void *arg_in, void *arg_out)); -struct unf_cm_event_report *unf_get_one_event_node(void *lport); -void unf_post_one_event_node(void *lport, struct unf_cm_event_report *event); -u32 unf_event_center_destroy(void *lport); -u32 unf_init_event_center(void *lport); - -extern struct task_struct *event_task_thread; -extern struct unf_global_event_queue global_event_queue; -extern struct unf_event_list fc_event_list; -#endif diff --git a/drivers/scsi/spfc/common/unf_exchg.c b/drivers/scsi/spfc/common/unf_exchg.c deleted file mode 100644 index ab35cc318b6f03cced62f9a69bb82c965f6b0102..0000000000000000000000000000000000000000 --- a/drivers/scsi/spfc/common/unf_exchg.c +++ /dev/null @@ -1,2317 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#include "unf_exchg.h" -#include "unf_log.h" -#include "unf_common.h" -#include "unf_rport.h" -#include "unf_service.h" -#include "unf_io.h" -#include "unf_exchg_abort.h" - -#define SPFC_XCHG_TYPE_MASK 0xFFFF -#define UNF_DEL_XCHG_TIMER_SAFE(xchg) \ - do { \ - if (cancel_delayed_work(&((xchg)->timeout_work))) { \ - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_MAJOR, \ - "Exchange(0x%p) is free, but timer is pending.", \ - xchg); \ - } else { \ - FC_DRV_PRINT(UNF_LOG_IO_ATT, \ - UNF_CRITICAL, \ - "Exchange(0x%p) is free, but timer is running.", \ - xchg); \ - } \ - } while (0) - -static struct unf_io_flow_id io_stage_table[] = { - {"XCHG_ALLOC"}, {"TGT_RECEIVE_ABTS"}, - {"TGT_ABTS_DONE"}, {"TGT_IO_SRR"}, - {"SFS_RESPONSE"}, {"SFS_TIMEOUT"}, - {"INI_SEND_CMND"}, {"INI_RESPONSE_DONE"}, - {"INI_EH_ABORT"}, {"INI_EH_DEVICE_RESET"}, - {"INI_EH_BLS_DONE"}, {"INI_IO_TIMEOUT"}, - {"INI_REQ_TIMEOUT"}, {"XCHG_CANCEL_TIMER"}, - {"XCHG_FREE_XCHG"}, {"SEND_ELS"}, - {"IO_XCHG_WAIT"}, -}; - -static void unf_init_xchg_attribute(struct unf_xchg *xchg); -static void unf_delay_work_del_syn(struct unf_xchg *xchg); -static void unf_free_lport_sfs_xchg(struct unf_xchg_mgr *xchg_mgr, - bool done_ini_flag); -static void unf_free_lport_destroy_xchg(struct unf_xchg_mgr *xchg_mgr); - -void unf_wake_up_scsi_task_cmnd(struct unf_lport *lport) -{ - struct list_head *node = NULL; - struct list_head *next_node = NULL; - struct unf_xchg *xchg = NULL; - ulong hot_pool_lock_flags = 0; - ulong xchg_flag = 0; - struct unf_xchg_mgr *xchg_mgrs = NULL; - u32 i; - - FC_CHECK_RETURN_VOID(lport); - - for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) { - xchg_mgrs = unf_get_xchg_mgr_by_lport(lport, i); - - if (!xchg_mgrs) { - FC_DRV_PRINT(UNF_LOG_EVENT, UNF_MINOR, - "Can't find LPort(0x%x) MgrIdx %u exchange manager.", - lport->port_id, i); - continue; - } - - spin_lock_irqsave(&xchg_mgrs->hot_pool->xchg_hotpool_lock, hot_pool_lock_flags); - list_for_each_safe(node, next_node, - (&xchg_mgrs->hot_pool->ini_busylist)) { - xchg = list_entry(node, struct unf_xchg, list_xchg_entry); - - spin_lock_irqsave(&xchg->xchg_state_lock, xchg_flag); - if (INI_IO_STATE_UPTASK & xchg->io_state && - (atomic_read(&xchg->ref_cnt) > 0)) { - UNF_SET_SCSI_CMND_RESULT(xchg, UNF_IO_SUCCESS); - up(&xchg->task_sema); - FC_DRV_PRINT(UNF_LOG_EVENT, UNF_MINOR, - "Wake up task command exchange(0x%p), Hot Pool Tag(0x%x).", - xchg, xchg->hotpooltag); - } - spin_unlock_irqrestore(&xchg->xchg_state_lock, xchg_flag); - } - - spin_unlock_irqrestore(&xchg_mgrs->hot_pool->xchg_hotpool_lock, - hot_pool_lock_flags); - } -} - -void *unf_cm_get_free_xchg(void *lport, u32 xchg_type) -{ - struct unf_lport *unf_lport = NULL; - struct unf_cm_xchg_mgr_template *xchg_mgr_temp = NULL; - - FC_CHECK_RETURN_VALUE(unlikely(lport), NULL); - - unf_lport = (struct unf_lport *)lport; - xchg_mgr_temp = &unf_lport->xchg_mgr_temp; - - /* Find the corresponding Lport Xchg management template. */ - FC_CHECK_RETURN_VALUE(unlikely(xchg_mgr_temp->unf_xchg_get_free_and_init), NULL); - - return xchg_mgr_temp->unf_xchg_get_free_and_init(unf_lport, xchg_type); -} - -void unf_cm_free_xchg(void *lport, void *xchg) -{ - struct unf_lport *unf_lport = NULL; - struct unf_cm_xchg_mgr_template *xchg_mgr_temp = NULL; - - FC_CHECK_RETURN_VOID(unlikely(lport)); - FC_CHECK_RETURN_VOID(unlikely(xchg)); - - unf_lport = (struct unf_lport *)lport; - xchg_mgr_temp = &unf_lport->xchg_mgr_temp; - FC_CHECK_RETURN_VOID(unlikely(xchg_mgr_temp->unf_xchg_release)); - - /* - * unf_cm_free_xchg --->>> unf_free_xchg - * --->>> unf_xchg_ref_dec --->>> unf_free_fcp_xchg --->>> - * unf_done_ini_xchg - */ - xchg_mgr_temp->unf_xchg_release(lport, xchg); -} - -void *unf_cm_lookup_xchg_by_tag(void *lport, u16 hot_pool_tag) -{ - struct unf_lport *unf_lport = NULL; - struct unf_cm_xchg_mgr_template *xchg_mgr_temp = NULL; - - FC_CHECK_RETURN_VALUE(unlikely(lport), NULL); - - /* Find the corresponding Lport Xchg management template */ - unf_lport = (struct unf_lport *)lport; - xchg_mgr_temp = &unf_lport->xchg_mgr_temp; - - FC_CHECK_RETURN_VALUE(unlikely(xchg_mgr_temp->unf_look_up_xchg_by_tag), NULL); - - return xchg_mgr_temp->unf_look_up_xchg_by_tag(lport, hot_pool_tag); -} - -void *unf_cm_lookup_xchg_by_id(void *lport, u16 ox_id, u32 oid) -{ - struct unf_lport *unf_lport = NULL; - struct unf_cm_xchg_mgr_template *xchg_mgr_temp = NULL; - - FC_CHECK_RETURN_VALUE(unlikely(lport), NULL); - - unf_lport = (struct unf_lport *)lport; - xchg_mgr_temp = &unf_lport->xchg_mgr_temp; - - /* Find the corresponding Lport Xchg management template */ - FC_CHECK_RETURN_VALUE(unlikely(xchg_mgr_temp->unf_look_up_xchg_by_id), NULL); - - return xchg_mgr_temp->unf_look_up_xchg_by_id(lport, ox_id, oid); -} - -struct unf_xchg *unf_cm_lookup_xchg_by_cmnd_sn(void *lport, u64 command_sn, - u32 world_id, void *pinitiator) -{ - struct unf_lport *unf_lport = NULL; - struct unf_cm_xchg_mgr_template *xchg_mgr_temp = NULL; - struct unf_xchg *xchg = NULL; - - FC_CHECK_RETURN_VALUE(unlikely(lport), NULL); - - unf_lport = (struct unf_lport *)lport; - xchg_mgr_temp = &unf_lport->xchg_mgr_temp; - - FC_CHECK_RETURN_VALUE(unlikely(xchg_mgr_temp->unf_look_up_xchg_by_cmnd_sn), NULL); - - xchg = (struct unf_xchg *)xchg_mgr_temp->unf_look_up_xchg_by_cmnd_sn(unf_lport, - command_sn, - world_id, - pinitiator); - - return xchg; -} - -static u32 unf_init_xchg(struct unf_lport *lport, struct unf_xchg_mgr *xchg_mgr, - u32 xchg_sum, u32 sfs_sum) -{ - struct unf_xchg *xchg_mem = NULL; - union unf_sfs_u *sfs_mm_start = NULL; - dma_addr_t sfs_dma_addr; - struct unf_xchg *xchg = NULL; - struct unf_xchg_free_pool *free_pool = NULL; - ulong flags = 0; - u32 i = 0; - - FC_CHECK_RETURN_VALUE((sfs_sum <= xchg_sum), UNF_RETURN_ERROR); - - free_pool = &xchg_mgr->free_pool; - xchg_mem = xchg_mgr->fcp_mm_start; - xchg = xchg_mem; - - sfs_mm_start = (union unf_sfs_u *)xchg_mgr->sfs_mm_start; - sfs_dma_addr = xchg_mgr->sfs_phy_addr; - /* 1. Allocate the SFS UNION memory to each SFS XCHG - * and mount the SFS XCHG to the corresponding FREE linked list - */ - free_pool->total_sfs_xchg = 0; - free_pool->sfs_xchg_sum = sfs_sum; - for (i = 0; i < sfs_sum; i++) { - INIT_LIST_HEAD(&xchg->list_xchg_entry); - INIT_LIST_HEAD(&xchg->list_esgls); - spin_lock_init(&xchg->xchg_state_lock); - sema_init(&xchg->task_sema, 0); - sema_init(&xchg->echo_info.echo_sync_sema, 0); - - spin_lock_irqsave(&free_pool->xchg_freepool_lock, flags); - xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr = sfs_mm_start; - xchg->fcp_sfs_union.sfs_entry.sfs_buff_phy_addr = sfs_dma_addr; - xchg->fcp_sfs_union.sfs_entry.sfs_buff_len = sizeof(*sfs_mm_start); - list_add_tail(&xchg->list_xchg_entry, &free_pool->list_sfs_xchg_list); - free_pool->total_sfs_xchg++; - spin_unlock_irqrestore(&free_pool->xchg_freepool_lock, flags); - - sfs_mm_start++; - sfs_dma_addr = sfs_dma_addr + sizeof(union unf_sfs_u); - xchg++; - } - - free_pool->total_fcp_xchg = 0; - - for (i = 0; (i < xchg_sum - sfs_sum); i++) { - INIT_LIST_HEAD(&xchg->list_xchg_entry); - - INIT_LIST_HEAD(&xchg->list_esgls); - spin_lock_init(&xchg->xchg_state_lock); - sema_init(&xchg->task_sema, 0); - sema_init(&xchg->echo_info.echo_sync_sema, 0); - - /* alloc dma buffer for fcp_rsp_iu */ - spin_lock_irqsave(&free_pool->xchg_freepool_lock, flags); - list_add_tail(&xchg->list_xchg_entry, &free_pool->list_free_xchg_list); - free_pool->total_fcp_xchg++; - spin_unlock_irqrestore(&free_pool->xchg_freepool_lock, flags); - - xchg++; - } - - free_pool->fcp_xchg_sum = free_pool->total_fcp_xchg; - - return RETURN_OK; -} - -static u32 unf_get_xchg_config_sum(struct unf_lport *lport, u32 *xchg_sum) -{ - struct unf_lport_cfg_item *lport_cfg_items = NULL; - - lport_cfg_items = &lport->low_level_func.lport_cfg_items; - - /* It has been checked at the bottom layer. Don't need to check it - * again. - */ - *xchg_sum = lport_cfg_items->max_sfs_xchg + lport_cfg_items->max_io; - if ((*xchg_sum / UNF_EXCHG_MGR_NUM) == 0 || - lport_cfg_items->max_sfs_xchg / UNF_EXCHG_MGR_NUM == 0) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Port(0x%x) Xchgsum(%u) or SfsXchg(%u) is less than ExchangeMgrNum(%u).", - lport->port_id, *xchg_sum, lport_cfg_items->max_sfs_xchg, - UNF_EXCHG_MGR_NUM); - return UNF_RETURN_ERROR; - } - - if (*xchg_sum > (INVALID_VALUE16 - 1)) { - /* If the format of ox_id/rx_id is exceeded, this function is - * not supported - */ - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "Port(0x%x) Exchange num(0x%x) is Too Big.", - lport->port_id, *xchg_sum); - - return UNF_RETURN_ERROR; - } - - return RETURN_OK; -} - -static void unf_xchg_cancel_timer(void *xchg) -{ - struct unf_xchg *tmp_xchg = NULL; - bool need_dec_xchg_ref = false; - ulong flag = 0; - - FC_CHECK_RETURN_VOID(xchg); - tmp_xchg = (struct unf_xchg *)xchg; - - spin_lock_irqsave(&tmp_xchg->xchg_state_lock, flag); - if (cancel_delayed_work(&tmp_xchg->timeout_work)) - need_dec_xchg_ref = true; - - spin_unlock_irqrestore(&tmp_xchg->xchg_state_lock, flag); - - if (need_dec_xchg_ref) - unf_xchg_ref_dec(xchg, XCHG_CANCEL_TIMER); -} - -void unf_show_all_xchg(struct unf_lport *lport, struct unf_xchg_mgr *xchg_mgr) -{ - struct unf_lport *unf_lport = NULL; - struct unf_xchg *xchg = NULL; - struct list_head *xchg_node = NULL; - struct list_head *next_xchg_node = NULL; - ulong flags = 0; - - FC_CHECK_RETURN_VOID(lport); - FC_CHECK_RETURN_VOID(xchg_mgr); - - unf_lport = lport; - - /* hot Xchg */ - spin_lock_irqsave(&xchg_mgr->hot_pool->xchg_hotpool_lock, flags); - - FC_DRV_PRINT(UNF_LOG_NORMAL, UNF_WARN, "INI busy :"); - list_for_each_safe(xchg_node, next_xchg_node, &xchg_mgr->hot_pool->ini_busylist) { - xchg = list_entry(xchg_node, struct unf_xchg, list_xchg_entry); - FC_DRV_PRINT(UNF_LOG_NORMAL, UNF_MAJOR, - "0x%p---0x%x----0x%x----0x%x----0x%x----0x%x----0x%x----0x%x----0x%x----%llu.", - xchg, (u32)xchg->hotpooltag, (u32)xchg->xchg_type, - (u32)xchg->oxid, (u32)xchg->rxid, (u32)xchg->sid, (u32)xchg->did, - atomic_read(&xchg->ref_cnt), (u32)xchg->io_state, xchg->alloc_jif); - } - - FC_DRV_PRINT(UNF_LOG_NORMAL, UNF_WARN, "SFS :"); - list_for_each_safe(xchg_node, next_xchg_node, &xchg_mgr->hot_pool->sfs_busylist) { - xchg = list_entry(xchg_node, struct unf_xchg, list_xchg_entry); - FC_DRV_PRINT(UNF_LOG_NORMAL, UNF_WARN, - "0x%p---0x%x---0x%x----0x%x----0x%x----0x%x----0x%x----0x%x----0x%x----0x%x----%llu.", - xchg, xchg->cmnd_code, (u32)xchg->hotpooltag, - (u32)xchg->xchg_type, (u32)xchg->oxid, (u32)xchg->rxid, (u32)xchg->sid, - (u32)xchg->did, atomic_read(&xchg->ref_cnt), - (u32)xchg->io_state, xchg->alloc_jif); - } - - FC_DRV_PRINT(UNF_LOG_NORMAL, UNF_WARN, "Destroy list."); - list_for_each_safe(xchg_node, next_xchg_node, &xchg_mgr->hot_pool->list_destroy_xchg) { - xchg = list_entry(xchg_node, struct unf_xchg, list_xchg_entry); - FC_DRV_PRINT(UNF_LOG_NORMAL, UNF_WARN, - "0x%p---0x%x----0x%x----0x%x----0x%x----0x%x----0x%x----0x%x----0x%x----%llu.", - xchg, (u32)xchg->hotpooltag, (u32)xchg->xchg_type, - (u32)xchg->oxid, (u32)xchg->rxid, (u32)xchg->sid, (u32)xchg->did, - atomic_read(&xchg->ref_cnt), (u32)xchg->io_state, xchg->alloc_jif); - } - spin_unlock_irqrestore(&xchg_mgr->hot_pool->xchg_hotpool_lock, flags); -} - -static u32 unf_free_lport_xchg(struct unf_lport *lport, struct unf_xchg_mgr *xchg_mgr) -{ -#define UNF_OS_WAITIO_TIMEOUT (10 * 1000) - - ulong free_pool_lock_flags = 0; - bool wait = false; - u32 total_xchg = 0; - u32 total_xchg_sum = 0; - u32 ret = RETURN_OK; - u64 time_out = 0; - struct completion xchg_mgr_completion; - - init_completion(&xchg_mgr_completion); - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(xchg_mgr, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(xchg_mgr->hot_pool, UNF_RETURN_ERROR); - - unf_free_lport_sfs_xchg(xchg_mgr, false); - - /* free INI Mode exchanges belong to L_Port */ - unf_free_lport_ini_xchg(xchg_mgr, false); - - spin_lock_irqsave(&xchg_mgr->free_pool.xchg_freepool_lock, free_pool_lock_flags); - total_xchg = xchg_mgr->free_pool.total_fcp_xchg + xchg_mgr->free_pool.total_sfs_xchg; - total_xchg_sum = xchg_mgr->free_pool.fcp_xchg_sum + xchg_mgr->free_pool.sfs_xchg_sum; - if (total_xchg != total_xchg_sum) { - xchg_mgr->free_pool.xchg_mgr_completion = &xchg_mgr_completion; - wait = true; - } - spin_unlock_irqrestore(&xchg_mgr->free_pool.xchg_freepool_lock, free_pool_lock_flags); - - if (wait) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[info]Port(0x%x) begin to wait for exchange manager completion (0x%x:0x%x)", - lport->port_id, total_xchg, total_xchg_sum); - - unf_show_all_xchg(lport, xchg_mgr); - - time_out = wait_for_completion_timeout(xchg_mgr->free_pool.xchg_mgr_completion, - msecs_to_jiffies(UNF_OS_WAITIO_TIMEOUT)); - if (time_out == 0) - unf_free_lport_destroy_xchg(xchg_mgr); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[info]Port(0x%x) wait for exchange manager completion end", - lport->port_id); - - spin_lock_irqsave(&xchg_mgr->free_pool.xchg_freepool_lock, free_pool_lock_flags); - xchg_mgr->free_pool.xchg_mgr_completion = NULL; - spin_unlock_irqrestore(&xchg_mgr->free_pool.xchg_freepool_lock, - free_pool_lock_flags); - } - - return ret; -} - -void unf_free_lport_all_xchg(struct unf_lport *lport) -{ - struct unf_xchg_mgr *xchg_mgr = NULL; - u32 i; - - FC_CHECK_RETURN_VOID(lport); - - for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) { - xchg_mgr = unf_get_xchg_mgr_by_lport(lport, i); - ; - if (unlikely(!xchg_mgr)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "[err]Port(0x%x) hot pool is NULL", - lport->port_id); - - continue; - } - unf_free_lport_sfs_xchg(xchg_mgr, false); - - /* free INI Mode exchanges belong to L_Port */ - unf_free_lport_ini_xchg(xchg_mgr, false); - - unf_free_lport_destroy_xchg(xchg_mgr); - } -} - -static void unf_delay_work_del_syn(struct unf_xchg *xchg) -{ - FC_CHECK_RETURN_VOID(xchg); - - /* synchronous release timer */ - if (!cancel_delayed_work_sync(&xchg->timeout_work)) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "Exchange(0x%p), State(0x%x) can't delete work timer, timer is running or no timer.", - xchg, xchg->io_state); - } else { - /* The reference count cannot be directly subtracted. - * This prevents the XCHG from being moved to the Free linked - * list when the card is unloaded. - */ - unf_cm_free_xchg(xchg->lport, xchg); - } -} - -static void unf_free_lport_sfs_xchg(struct unf_xchg_mgr *xchg_mgr, bool done_ini_flag) -{ - struct list_head *list = NULL; - struct unf_xchg *xchg = NULL; - ulong hot_pool_lock_flags = 0; - - FC_CHECK_RETURN_VOID(xchg_mgr); - FC_CHECK_RETURN_VOID(xchg_mgr->hot_pool); - - spin_lock_irqsave(&xchg_mgr->hot_pool->xchg_hotpool_lock, hot_pool_lock_flags); - while (!list_empty(&xchg_mgr->hot_pool->sfs_busylist)) { - list = UNF_OS_LIST_NEXT(&xchg_mgr->hot_pool->sfs_busylist); - list_del_init(list); - - /* Prevent the xchg of the sfs from being accessed repeatedly. - * The xchg is first mounted to the destroy linked list. - */ - list_add_tail(list, &xchg_mgr->hot_pool->list_destroy_xchg); - - xchg = list_entry(list, struct unf_xchg, list_xchg_entry); - spin_unlock_irqrestore(&xchg_mgr->hot_pool->xchg_hotpool_lock, hot_pool_lock_flags); - unf_delay_work_del_syn(xchg); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "Free SFS Exchange(0x%p), State(0x%x), Reference count(%d), Start time(%llu).", - xchg, xchg->io_state, atomic_read(&xchg->ref_cnt), xchg->alloc_jif); - - unf_cm_free_xchg(xchg->lport, xchg); - - spin_lock_irqsave(&xchg_mgr->hot_pool->xchg_hotpool_lock, hot_pool_lock_flags); - } - spin_unlock_irqrestore(&xchg_mgr->hot_pool->xchg_hotpool_lock, hot_pool_lock_flags); -} - -void unf_free_lport_ini_xchg(struct unf_xchg_mgr *xchg_mgr, bool done_ini_flag) -{ - struct list_head *list = NULL; - struct unf_xchg *xchg = NULL; - ulong hot_pool_lock_flags = 0; - u32 up_status = 0; - - FC_CHECK_RETURN_VOID(xchg_mgr); - FC_CHECK_RETURN_VOID(xchg_mgr->hot_pool); - - spin_lock_irqsave(&xchg_mgr->hot_pool->xchg_hotpool_lock, hot_pool_lock_flags); - while (!list_empty(&xchg_mgr->hot_pool->ini_busylist)) { - /* for each INI busy_list (exchange) node */ - list = UNF_OS_LIST_NEXT(&xchg_mgr->hot_pool->ini_busylist); - - /* Put exchange node to destroy_list, prevent done repeatly */ - list_del_init(list); - list_add_tail(list, &xchg_mgr->hot_pool->list_destroy_xchg); - xchg = list_entry(list, struct unf_xchg, list_xchg_entry); - if (atomic_read(&xchg->ref_cnt) <= 0) - continue; - - spin_unlock_irqrestore(&xchg_mgr->hot_pool->xchg_hotpool_lock, - hot_pool_lock_flags); - unf_delay_work_del_syn(xchg); - - /* In the case of INI done, the command should be set to fail to - * prevent data inconsistency caused by the return of OK - */ - up_status = unf_get_up_level_cmnd_errcode(xchg->scsi_cmnd_info.err_code_table, - xchg->scsi_cmnd_info.err_code_table_cout, - UNF_IO_PORT_LOGOUT); - - if (INI_IO_STATE_UPABORT & xchg->io_state) { - /* - * About L_Port destroy: - * UP_ABORT ---to--->>> ABORT_Port_Removing - */ - up_status = UNF_IO_ABORT_PORT_REMOVING; - } - - xchg->scsi_cmnd_info.result = up_status; - up(&xchg->task_sema); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[info]Free INI exchange(0x%p) state(0x%x) reference count(%d) start time(%llu)", - xchg, xchg->io_state, atomic_read(&xchg->ref_cnt), xchg->alloc_jif); - - unf_cm_free_xchg(xchg->lport, xchg); - - /* go to next INI busy_list (exchange) node */ - spin_lock_irqsave(&xchg_mgr->hot_pool->xchg_hotpool_lock, hot_pool_lock_flags); - } - spin_unlock_irqrestore(&xchg_mgr->hot_pool->xchg_hotpool_lock, hot_pool_lock_flags); -} - -static void unf_free_lport_destroy_xchg(struct unf_xchg_mgr *xchg_mgr) -{ -#define UNF_WAIT_DESTROY_EMPTY_STEP_MS 1000 -#define UNF_WAIT_IO_STATE_TGT_FRONT_MS (10 * 1000) - - struct unf_xchg *xchg = NULL; - struct list_head *next_xchg_node = NULL; - ulong hot_pool_lock_flags = 0; - ulong xchg_flag = 0; - - FC_CHECK_RETURN_VOID(xchg_mgr); - FC_CHECK_RETURN_VOID(xchg_mgr->hot_pool); - - /* In this case, the timer on the destroy linked list is deleted. - * You only need to check whether the timer is released at the end of - * the tgt. - */ - spin_lock_irqsave(&xchg_mgr->hot_pool->xchg_hotpool_lock, hot_pool_lock_flags); - while (!list_empty(&xchg_mgr->hot_pool->list_destroy_xchg)) { - next_xchg_node = UNF_OS_LIST_NEXT(&xchg_mgr->hot_pool->list_destroy_xchg); - xchg = list_entry(next_xchg_node, struct unf_xchg, list_xchg_entry); - - spin_lock_irqsave(&xchg->xchg_state_lock, xchg_flag); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "Free Exchange(0x%p), Type(0x%x), State(0x%x), Reference count(%d), Start time(%llu)", - xchg, xchg->xchg_type, xchg->io_state, - atomic_read(&xchg->ref_cnt), xchg->alloc_jif); - - spin_unlock_irqrestore(&xchg->xchg_state_lock, xchg_flag); - spin_unlock_irqrestore(&xchg_mgr->hot_pool->xchg_hotpool_lock, hot_pool_lock_flags); - - /* This interface can be invoked to ensure that the timer is - * successfully canceled or wait until the timer execution is - * complete - */ - unf_delay_work_del_syn(xchg); - - /* - * If the timer is canceled successfully, delete Xchg - * If the timer has burst, the Xchg may have been released,In - * this case, deleting the Xchg will be failed - */ - unf_cm_free_xchg(xchg->lport, xchg); - - spin_lock_irqsave(&xchg_mgr->hot_pool->xchg_hotpool_lock, hot_pool_lock_flags); - }; - - spin_unlock_irqrestore(&xchg_mgr->hot_pool->xchg_hotpool_lock, hot_pool_lock_flags); -} - -static void unf_free_all_big_sfs(struct unf_xchg_mgr *xchg_mgr) -{ - struct unf_big_sfs *big_sfs = NULL; - struct list_head *node = NULL; - struct list_head *next_node = NULL; - ulong flag = 0; - u32 i; - - FC_CHECK_RETURN_VOID(xchg_mgr); - - /* Release the free resources in the busy state */ - spin_lock_irqsave(&xchg_mgr->big_sfs_pool.big_sfs_pool_lock, flag); - list_for_each_safe(node, next_node, &xchg_mgr->big_sfs_pool.list_busypool) { - list_del(node); - list_add_tail(node, &xchg_mgr->big_sfs_pool.list_freepool); - } - - list_for_each_safe(node, next_node, &xchg_mgr->big_sfs_pool.list_freepool) { - list_del(node); - big_sfs = list_entry(node, struct unf_big_sfs, entry_bigsfs); - if (big_sfs->addr) - big_sfs->addr = NULL; - } - spin_unlock_irqrestore(&xchg_mgr->big_sfs_pool.big_sfs_pool_lock, flag); - - if (xchg_mgr->big_sfs_buf_list.buflist) { - for (i = 0; i < xchg_mgr->big_sfs_buf_list.buf_num; i++) { - kfree(xchg_mgr->big_sfs_buf_list.buflist[i].vaddr); - xchg_mgr->big_sfs_buf_list.buflist[i].vaddr = NULL; - } - - kfree(xchg_mgr->big_sfs_buf_list.buflist); - xchg_mgr->big_sfs_buf_list.buflist = NULL; - } -} - -static void unf_free_big_sfs_pool(struct unf_xchg_mgr *xchg_mgr) -{ - FC_CHECK_RETURN_VOID(xchg_mgr); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_INFO, - "Free Big SFS Pool, Count(0x%x).", - xchg_mgr->big_sfs_pool.free_count); - - unf_free_all_big_sfs(xchg_mgr); - xchg_mgr->big_sfs_pool.free_count = 0; - - if (xchg_mgr->big_sfs_pool.big_sfs_pool) { - vfree(xchg_mgr->big_sfs_pool.big_sfs_pool); - xchg_mgr->big_sfs_pool.big_sfs_pool = NULL; - } -} - -static void unf_free_xchg_mgr_mem(struct unf_lport *lport, struct unf_xchg_mgr *xchg_mgr) -{ - struct unf_xchg *xchg = NULL; - u32 i = 0; - u32 xchg_sum = 0; - struct unf_xchg_free_pool *free_pool = NULL; - - FC_CHECK_RETURN_VOID(xchg_mgr); - - unf_free_big_sfs_pool(xchg_mgr); - - /* The sfs is released first, and the XchgMgr is allocated by the get - * free page. Therefore, the XchgMgr is compared with the '0' - */ - if (xchg_mgr->sfs_mm_start != 0) { - dma_free_coherent(&lport->low_level_func.dev->dev, xchg_mgr->sfs_mem_size, - xchg_mgr->sfs_mm_start, xchg_mgr->sfs_phy_addr); - xchg_mgr->sfs_mm_start = 0; - } - - /* Release Xchg first */ - if (xchg_mgr->fcp_mm_start) { - unf_get_xchg_config_sum(lport, &xchg_sum); - xchg_sum = xchg_sum / UNF_EXCHG_MGR_NUM; - - xchg = xchg_mgr->fcp_mm_start; - for (i = 0; i < xchg_sum; i++) { - if (!xchg) - break; - xchg++; - } - - vfree(xchg_mgr->fcp_mm_start); - xchg_mgr->fcp_mm_start = NULL; - } - - /* release the hot pool */ - if (xchg_mgr->hot_pool) { - vfree(xchg_mgr->hot_pool); - xchg_mgr->hot_pool = NULL; - } - - free_pool = &xchg_mgr->free_pool; - - vfree(xchg_mgr); -} - -static void unf_free_xchg_mgr(struct unf_lport *lport, struct unf_xchg_mgr *xchg_mgr) -{ - ulong flags = 0; - u32 ret = UNF_RETURN_ERROR; - - FC_CHECK_RETURN_VOID(lport); - FC_CHECK_RETURN_VOID(xchg_mgr); - - /* 1. At first, free exchanges for this Exch_Mgr */ - ret = unf_free_lport_xchg(lport, xchg_mgr); - - /* 2. Delete this Exch_Mgr entry */ - spin_lock_irqsave(&lport->xchg_mgr_lock, flags); - list_del_init(&xchg_mgr->xchg_mgr_entry); - spin_unlock_irqrestore(&lport->xchg_mgr_lock, flags); - - /* 3. free Exch_Mgr memory if necessary */ - if (ret == RETURN_OK) { - /* free memory directly */ - unf_free_xchg_mgr_mem(lport, xchg_mgr); - } else { - /* Add it to Dirty list */ - spin_lock_irqsave(&lport->xchg_mgr_lock, flags); - list_add_tail(&xchg_mgr->xchg_mgr_entry, &lport->list_drty_xchg_mgr_head); - spin_unlock_irqrestore(&lport->xchg_mgr_lock, flags); - - /* Mark dirty flag */ - unf_cm_mark_dirty_mem(lport, UNF_LPORT_DIRTY_FLAG_XCHGMGR_DIRTY); - } -} - -void unf_free_all_xchg_mgr(struct unf_lport *lport) -{ - struct unf_xchg_mgr *xchg_mgr = NULL; - ulong flags = 0; - u32 i = 0; - - FC_CHECK_RETURN_VOID(lport); - - /* for each L_Port->Exch_Mgr_List */ - spin_lock_irqsave(&lport->xchg_mgr_lock, flags); - while (!list_empty(&lport->list_xchg_mgr_head)) { - spin_unlock_irqrestore(&lport->xchg_mgr_lock, flags); - - xchg_mgr = unf_get_xchg_mgr_by_lport(lport, i); - unf_free_xchg_mgr(lport, xchg_mgr); - if (i < UNF_EXCHG_MGR_NUM) - lport->xchg_mgr[i] = NULL; - - i++; - - /* go to next */ - spin_lock_irqsave(&lport->xchg_mgr_lock, flags); - } - spin_unlock_irqrestore(&lport->xchg_mgr_lock, flags); - - lport->destroy_step = UNF_LPORT_DESTROY_STEP_4_DESTROY_EXCH_MGR; -} - -static u32 unf_init_xchg_mgr(struct unf_xchg_mgr *xchg_mgr) -{ - FC_CHECK_RETURN_VALUE(xchg_mgr, UNF_RETURN_ERROR); - - memset(xchg_mgr, 0, sizeof(struct unf_xchg_mgr)); - - INIT_LIST_HEAD(&xchg_mgr->xchg_mgr_entry); - xchg_mgr->fcp_mm_start = NULL; - xchg_mgr->mem_szie = sizeof(struct unf_xchg_mgr); - - return RETURN_OK; -} - -static u32 unf_init_xchg_mgr_free_pool(struct unf_xchg_mgr *xchg_mgr) -{ - struct unf_xchg_free_pool *free_pool = NULL; - - FC_CHECK_RETURN_VALUE(xchg_mgr, UNF_RETURN_ERROR); - - free_pool = &xchg_mgr->free_pool; - INIT_LIST_HEAD(&free_pool->list_free_xchg_list); - INIT_LIST_HEAD(&free_pool->list_sfs_xchg_list); - spin_lock_init(&free_pool->xchg_freepool_lock); - free_pool->fcp_xchg_sum = 0; - free_pool->xchg_mgr_completion = NULL; - - return RETURN_OK; -} - -static u32 unf_init_xchg_hot_pool(struct unf_lport *lport, struct unf_xchg_hot_pool *hot_pool, - u32 xchg_sum) -{ - FC_CHECK_RETURN_VALUE(hot_pool, UNF_RETURN_ERROR); - - INIT_LIST_HEAD(&hot_pool->sfs_busylist); - INIT_LIST_HEAD(&hot_pool->ini_busylist); - spin_lock_init(&hot_pool->xchg_hotpool_lock); - INIT_LIST_HEAD(&hot_pool->list_destroy_xchg); - hot_pool->total_xchges = 0; - hot_pool->wait_state = false; - hot_pool->lport = lport; - - /* Slab Pool Index */ - hot_pool->slab_next_index = 0; - UNF_TOU16_CHECK(hot_pool->slab_total_sum, xchg_sum, return UNF_RETURN_ERROR); - - return RETURN_OK; -} - -static u32 unf_alloc_and_init_big_sfs_pool(struct unf_lport *lport, struct unf_xchg_mgr *xchg_mgr) -{ -#define UNF_MAX_RESOURCE_RESERVED_FOR_RSCN 20 -#define UNF_BIG_SFS_POOL_TYPES 6 - u32 i = 0; - u32 size = 0; - u32 align_size = 0; - u32 npiv_cnt = 0; - struct unf_big_sfs_pool *big_sfs_pool = NULL; - struct unf_big_sfs *big_sfs_buff = NULL; - u32 buf_total_size; - u32 buf_num; - u32 buf_cnt_per_huge_buf; - u32 alloc_idx; - u32 cur_buf_idx = 0; - u32 cur_buf_offset = 0; - - FC_CHECK_RETURN_VALUE(xchg_mgr, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - big_sfs_pool = &xchg_mgr->big_sfs_pool; - - INIT_LIST_HEAD(&big_sfs_pool->list_freepool); - INIT_LIST_HEAD(&big_sfs_pool->list_busypool); - spin_lock_init(&big_sfs_pool->big_sfs_pool_lock); - npiv_cnt = lport->low_level_func.support_max_npiv_num; - - /* - * The value*6 indicates GID_PT/GID_FT, RSCN, and ECHO - * Another command is received when a command is being responded - * A maximum of 20 resources are reserved for the RSCN. During the test, - * multiple rscn are found. As a result, the resources are insufficient - * and the disc fails. - */ - big_sfs_pool->free_count = (npiv_cnt + 1) * UNF_BIG_SFS_POOL_TYPES + - UNF_MAX_RESOURCE_RESERVED_FOR_RSCN; - big_sfs_buff = - (struct unf_big_sfs *)vmalloc(big_sfs_pool->free_count * sizeof(struct unf_big_sfs)); - if (!big_sfs_buff) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "Allocate Big SFS buf fail."); - - return UNF_RETURN_ERROR; - } - memset(big_sfs_buff, 0, big_sfs_pool->free_count * sizeof(struct unf_big_sfs)); - xchg_mgr->mem_szie += (u32)(big_sfs_pool->free_count * sizeof(struct unf_big_sfs)); - big_sfs_pool->big_sfs_pool = (void *)big_sfs_buff; - - /* - * Use the larger value of sizeof (struct unf_gid_acc_pld) and sizeof - * (struct unf_rscn_pld) to avoid the icp error.Therefore, the value is - * directly assigned instead of being compared. - */ - size = sizeof(struct unf_gid_acc_pld); - align_size = ALIGN(size, PAGE_SIZE); - - buf_total_size = align_size * big_sfs_pool->free_count; - xchg_mgr->big_sfs_buf_list.buf_size = - buf_total_size > BUF_LIST_PAGE_SIZE ? BUF_LIST_PAGE_SIZE - : buf_total_size; - - buf_cnt_per_huge_buf = xchg_mgr->big_sfs_buf_list.buf_size / align_size; - buf_num = big_sfs_pool->free_count % buf_cnt_per_huge_buf - ? big_sfs_pool->free_count / buf_cnt_per_huge_buf + 1 - : big_sfs_pool->free_count / buf_cnt_per_huge_buf; - - xchg_mgr->big_sfs_buf_list.buflist = (struct buff_list *)kmalloc(buf_num * - sizeof(struct buff_list), GFP_KERNEL); - xchg_mgr->big_sfs_buf_list.buf_num = buf_num; - - if (!xchg_mgr->big_sfs_buf_list.buflist) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[err]Allocate BigSfs pool buf list failed out of memory"); - goto free_buff; - } - memset(xchg_mgr->big_sfs_buf_list.buflist, 0, buf_num * sizeof(struct buff_list)); - for (alloc_idx = 0; alloc_idx < buf_num; alloc_idx++) { - xchg_mgr->big_sfs_buf_list.buflist[alloc_idx].vaddr = - kmalloc(xchg_mgr->big_sfs_buf_list.buf_size, GFP_ATOMIC); - if (xchg_mgr->big_sfs_buf_list.buflist[alloc_idx].vaddr == - NULL) { - goto free_buff; - } - memset(xchg_mgr->big_sfs_buf_list.buflist[alloc_idx].vaddr, 0, - xchg_mgr->big_sfs_buf_list.buf_size); - } - - for (i = 0; i < big_sfs_pool->free_count; i++) { - if (i != 0 && !(i % buf_cnt_per_huge_buf)) - cur_buf_idx++; - - cur_buf_offset = align_size * (i % buf_cnt_per_huge_buf); - big_sfs_buff->addr = xchg_mgr->big_sfs_buf_list.buflist[cur_buf_idx].vaddr + - cur_buf_offset; - big_sfs_buff->size = size; - xchg_mgr->mem_szie += size; - list_add_tail(&big_sfs_buff->entry_bigsfs, &big_sfs_pool->list_freepool); - big_sfs_buff++; - } - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[EVENT]Allocate BigSfs pool size:%d,align_size:%d,buf_num:%u,buf_size:%u", - size, align_size, xchg_mgr->big_sfs_buf_list.buf_num, - xchg_mgr->big_sfs_buf_list.buf_size); - return RETURN_OK; -free_buff: - unf_free_all_big_sfs(xchg_mgr); - vfree(big_sfs_buff); - big_sfs_pool->big_sfs_pool = NULL; - return UNF_RETURN_ERROR; -} - -static void unf_free_one_big_sfs(struct unf_xchg *xchg) -{ - ulong flag = 0; - struct unf_xchg_mgr *xchg_mgr = NULL; - - FC_CHECK_RETURN_VOID(xchg); - xchg_mgr = xchg->xchg_mgr; - FC_CHECK_RETURN_VOID(xchg_mgr); - if (!xchg->big_sfs_buf) - return; - - if (xchg->cmnd_code != NS_GID_PT && xchg->cmnd_code != NS_GID_FT && - xchg->cmnd_code != ELS_ECHO && - xchg->cmnd_code != (UNF_SET_ELS_ACC_TYPE(ELS_ECHO)) && xchg->cmnd_code != ELS_RSCN && - xchg->cmnd_code != (UNF_SET_ELS_ACC_TYPE(ELS_RSCN))) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "Exchange(0x%p), Command(0x%x) big SFS buf is not NULL.", - xchg, xchg->cmnd_code); - } - - spin_lock_irqsave(&xchg_mgr->big_sfs_pool.big_sfs_pool_lock, flag); - list_del(&xchg->big_sfs_buf->entry_bigsfs); - list_add_tail(&xchg->big_sfs_buf->entry_bigsfs, - &xchg_mgr->big_sfs_pool.list_freepool); - xchg_mgr->big_sfs_pool.free_count++; - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "Free one big SFS buf(0x%p), Count(0x%x), Exchange(0x%p), Command(0x%x).", - xchg->big_sfs_buf->addr, xchg_mgr->big_sfs_pool.free_count, - xchg, xchg->cmnd_code); - spin_unlock_irqrestore(&xchg_mgr->big_sfs_pool.big_sfs_pool_lock, flag); -} - -static void unf_free_exchg_mgr_info(struct unf_lport *lport) -{ - u32 i; - struct list_head *node = NULL; - struct list_head *next_node = NULL; - ulong flags = 0; - struct unf_xchg_mgr *xchg_mgr = NULL; - - spin_lock_irqsave(&lport->xchg_mgr_lock, flags); - list_for_each_safe(node, next_node, &lport->list_xchg_mgr_head) { - list_del(node); - xchg_mgr = list_entry(node, struct unf_xchg_mgr, xchg_mgr_entry); - } - spin_unlock_irqrestore(&lport->xchg_mgr_lock, flags); - - for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) { - xchg_mgr = lport->xchg_mgr[i]; - - if (xchg_mgr) { - unf_free_big_sfs_pool(xchg_mgr); - - if (xchg_mgr->sfs_mm_start) { - dma_free_coherent(&lport->low_level_func.dev->dev, - xchg_mgr->sfs_mem_size, xchg_mgr->sfs_mm_start, - xchg_mgr->sfs_phy_addr); - xchg_mgr->sfs_mm_start = 0; - } - - if (xchg_mgr->fcp_mm_start) { - vfree(xchg_mgr->fcp_mm_start); - xchg_mgr->fcp_mm_start = NULL; - } - - if (xchg_mgr->hot_pool) { - vfree(xchg_mgr->hot_pool); - xchg_mgr->hot_pool = NULL; - } - - vfree(xchg_mgr); - lport->xchg_mgr[i] = NULL; - } - } -} - -static u32 unf_alloc_and_init_xchg_mgr(struct unf_lport *lport) -{ - struct unf_xchg_mgr *xchg_mgr = NULL; - struct unf_xchg_hot_pool *hot_pool = NULL; - struct unf_xchg *xchg_mem = NULL; - void *sfs_mm_start = 0; - dma_addr_t sfs_phy_addr = 0; - u32 xchg_sum = 0; - u32 sfs_xchg_sum = 0; - ulong flags = 0; - u32 ret = UNF_RETURN_ERROR; - u32 slab_num = 0; - u32 i = 0; - - ret = unf_get_xchg_config_sum(lport, &xchg_sum); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "Port(0x%x) can't get Exchange.", lport->port_id); - - return UNF_RETURN_ERROR; - } - - /* SFS Exchange Sum */ - sfs_xchg_sum = lport->low_level_func.lport_cfg_items.max_sfs_xchg / - UNF_EXCHG_MGR_NUM; - xchg_sum = xchg_sum / UNF_EXCHG_MGR_NUM; - slab_num = lport->low_level_func.support_max_hot_tag_range / UNF_EXCHG_MGR_NUM; - for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) { - /* Alloc Exchange Manager */ - xchg_mgr = (struct unf_xchg_mgr *)vmalloc(sizeof(struct unf_xchg_mgr)); - if (!xchg_mgr) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "Port(0x%x) allocate Exchange Manager Memory Fail.", - lport->port_id); - goto exit; - } - - /* Init Exchange Manager */ - ret = unf_init_xchg_mgr(xchg_mgr); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "Port(0x%x) initialization Exchange Manager unsuccessful.", - lport->port_id); - goto free_xchg_mgr; - } - - /* Initialize the Exchange Free Pool resource */ - ret = unf_init_xchg_mgr_free_pool(xchg_mgr); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "Port(0x%x) initialization Exchange Manager Free Pool unsuccessful.", - lport->port_id); - goto free_xchg_mgr; - } - - /* Allocate memory for Hot Pool and Xchg slab */ - hot_pool = vmalloc(sizeof(struct unf_xchg_hot_pool) + - sizeof(struct unf_xchg *) * slab_num); - if (!hot_pool) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "Port(0x%x) allocate Hot Pool Memory Fail.", - lport->port_id); - goto free_xchg_mgr; - } - memset(hot_pool, 0, - sizeof(struct unf_xchg_hot_pool) + sizeof(struct unf_xchg *) * slab_num); - - xchg_mgr->mem_szie += (u32)(sizeof(struct unf_xchg_hot_pool) + - sizeof(struct unf_xchg *) * slab_num); - /* Initialize the Exchange Hot Pool resource */ - ret = unf_init_xchg_hot_pool(lport, hot_pool, slab_num); - if (ret != RETURN_OK) - goto free_hot_pool; - - hot_pool->base += (u16)(i * slab_num); - /* Allocate the memory of all Xchg (IO/SFS) */ - xchg_mem = vmalloc(sizeof(struct unf_xchg) * xchg_sum); - if (!xchg_mem) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "Port(0x%x) allocate Exchange Memory Fail.", - lport->port_id); - goto free_hot_pool; - } - memset(xchg_mem, 0, sizeof(struct unf_xchg) * xchg_sum); - - xchg_mgr->mem_szie += (u32)(sizeof(struct unf_xchg) * xchg_sum); - xchg_mgr->hot_pool = hot_pool; - xchg_mgr->fcp_mm_start = xchg_mem; - /* Allocate the memory used by the SFS Xchg to carry the - * ELS/BLS/GS command and response - */ - xchg_mgr->sfs_mem_size = (u32)(sizeof(union unf_sfs_u) * sfs_xchg_sum); - - /* Apply for the DMA space for sending sfs frames. - * If the value of DMA32 is less than 4 GB, cross-4G problems - * will not occur - */ - sfs_mm_start = dma_alloc_coherent(&lport->low_level_func.dev->dev, - xchg_mgr->sfs_mem_size, - &sfs_phy_addr, GFP_KERNEL); - if (!sfs_mm_start) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "Port(0x%x) Get Free Pagers Fail .", - lport->port_id); - goto free_xchg_mem; - } - memset(sfs_mm_start, 0, sizeof(union unf_sfs_u) * sfs_xchg_sum); - - xchg_mgr->mem_szie += xchg_mgr->sfs_mem_size; - xchg_mgr->sfs_mm_start = sfs_mm_start; - xchg_mgr->sfs_phy_addr = sfs_phy_addr; - /* The Xchg is initialized and mounted to the Free Pool */ - ret = unf_init_xchg(lport, xchg_mgr, xchg_sum, sfs_xchg_sum); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "Port(0x%x) initialization Exchange unsuccessful, Exchange Number(%d), SFS Exchange number(%d).", - lport->port_id, xchg_sum, sfs_xchg_sum); - dma_free_coherent(&lport->low_level_func.dev->dev, xchg_mgr->sfs_mem_size, - xchg_mgr->sfs_mm_start, xchg_mgr->sfs_phy_addr); - xchg_mgr->sfs_mm_start = 0; - goto free_xchg_mem; - } - - /* Apply for the memory used by GID_PT, GID_FT, and RSCN */ - ret = unf_alloc_and_init_big_sfs_pool(lport, xchg_mgr); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "Port(0x%x) allocate big SFS fail", lport->port_id); - dma_free_coherent(&lport->low_level_func.dev->dev, xchg_mgr->sfs_mem_size, - xchg_mgr->sfs_mm_start, xchg_mgr->sfs_phy_addr); - xchg_mgr->sfs_mm_start = 0; - goto free_xchg_mem; - } - - spin_lock_irqsave(&lport->xchg_mgr_lock, flags); - lport->xchg_mgr[i] = (void *)xchg_mgr; - list_add_tail(&xchg_mgr->xchg_mgr_entry, &lport->list_xchg_mgr_head); - spin_unlock_irqrestore(&lport->xchg_mgr_lock, flags); - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[info]Port(0x%x) ExchangeMgr:(0x%p),Base:(0x%x).", - lport->port_id, lport->xchg_mgr[i], hot_pool->base); - } - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_INFO, - "Port(0x%x) allocate Exchange Manager size(0x%x).", - lport->port_id, xchg_mgr->mem_szie); - return RETURN_OK; -free_xchg_mem: - vfree(xchg_mem); -free_hot_pool: - vfree(hot_pool); -free_xchg_mgr: - vfree(xchg_mgr); -exit: - unf_free_exchg_mgr_info(lport); - return UNF_RETURN_ERROR; -} - -void unf_xchg_mgr_destroy(struct unf_lport *lport) -{ - FC_CHECK_RETURN_VOID(lport); - - unf_free_all_xchg_mgr(lport); -} - -u32 unf_alloc_xchg_resource(struct unf_lport *lport) -{ - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - - INIT_LIST_HEAD(&lport->list_drty_xchg_mgr_head); - INIT_LIST_HEAD(&lport->list_xchg_mgr_head); - spin_lock_init(&lport->xchg_mgr_lock); - - /* LPort Xchg Management Unit Alloc */ - if (unf_alloc_and_init_xchg_mgr(lport) != RETURN_OK) - return UNF_RETURN_ERROR; - - return RETURN_OK; -} - -void unf_destroy_dirty_xchg(struct unf_lport *lport, bool show_only) -{ - u32 dirty_xchg = 0; - struct unf_xchg_mgr *xchg_mgr = NULL; - ulong flags = 0; - struct list_head *node = NULL; - struct list_head *next_node = NULL; - - FC_CHECK_RETURN_VOID(lport); - - if (lport->dirty_flag & UNF_LPORT_DIRTY_FLAG_XCHGMGR_DIRTY) { - spin_lock_irqsave(&lport->xchg_mgr_lock, flags); - list_for_each_safe(node, next_node, &lport->list_drty_xchg_mgr_head) { - xchg_mgr = list_entry(node, struct unf_xchg_mgr, xchg_mgr_entry); - spin_unlock_irqrestore(&lport->xchg_mgr_lock, flags); - if (xchg_mgr) { - dirty_xchg = (xchg_mgr->free_pool.total_fcp_xchg + - xchg_mgr->free_pool.total_sfs_xchg); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[info]Port(0x%x) has %u dirty exchange(s)", - lport->port_id, dirty_xchg); - - unf_show_all_xchg(lport, xchg_mgr); - - if (!show_only) { - /* Delete Dirty Exchange Mgr entry */ - spin_lock_irqsave(&lport->xchg_mgr_lock, flags); - list_del_init(&xchg_mgr->xchg_mgr_entry); - spin_unlock_irqrestore(&lport->xchg_mgr_lock, flags); - - /* Free Dirty Exchange Mgr memory */ - unf_free_xchg_mgr_mem(lport, xchg_mgr); - } - } - spin_lock_irqsave(&lport->xchg_mgr_lock, flags); - } - spin_unlock_irqrestore(&lport->xchg_mgr_lock, flags); - } -} - -struct unf_xchg_mgr *unf_get_xchg_mgr_by_lport(struct unf_lport *lport, u32 idx) -{ - struct unf_xchg_mgr *xchg_mgr = NULL; - ulong flags = 0; - - FC_CHECK_RETURN_VALUE(lport, NULL); - FC_CHECK_RETURN_VALUE((idx < UNF_EXCHG_MGR_NUM), NULL); - - spin_lock_irqsave(&lport->xchg_mgr_lock, flags); - xchg_mgr = lport->xchg_mgr[idx]; - spin_unlock_irqrestore(&lport->xchg_mgr_lock, flags); - - return xchg_mgr; -} - -struct unf_xchg_hot_pool *unf_get_hot_pool_by_lport(struct unf_lport *lport, - u32 mgr_idx) -{ - struct unf_xchg_mgr *xchg_mgr = NULL; - struct unf_lport *unf_lport = NULL; - - FC_CHECK_RETURN_VALUE(lport, NULL); - - unf_lport = (struct unf_lport *)(lport->root_lport); - - FC_CHECK_RETURN_VALUE(unf_lport, NULL); - - /* Get Xchg Manager */ - xchg_mgr = unf_get_xchg_mgr_by_lport(unf_lport, mgr_idx); - if (!xchg_mgr) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_MAJOR, - "Port(0x%x) Exchange Manager is NULL.", - unf_lport->port_id); - - return NULL; - } - - /* Get Xchg Manager Hot Pool */ - return xchg_mgr->hot_pool; -} - -static inline void unf_hot_pool_slab_set(struct unf_xchg_hot_pool *hot_pool, - u16 slab_index, struct unf_xchg *xchg) -{ - FC_CHECK_RETURN_VOID(hot_pool); - - hot_pool->xchg_slab[slab_index] = xchg; -} - -static inline struct unf_xchg *unf_get_xchg_by_xchg_tag(struct unf_xchg_hot_pool *hot_pool, - u16 slab_index) -{ - FC_CHECK_RETURN_VALUE(hot_pool, NULL); - - return hot_pool->xchg_slab[slab_index]; -} - -static void *unf_look_up_xchg_by_tag(void *lport, u16 hot_pool_tag) -{ - struct unf_lport *unf_lport = NULL; - struct unf_xchg_hot_pool *hot_pool = NULL; - struct unf_xchg *xchg = NULL; - ulong flags = 0; - u32 exchg_mgr_idx = 0; - struct unf_xchg_mgr *xchg_mgr = NULL; - - FC_CHECK_RETURN_VALUE(lport, NULL); - - /* In the case of NPIV, lport is the Vport pointer, - * the share uses the ExchMgr of RootLport - */ - unf_lport = ((struct unf_lport *)lport)->root_lport; - FC_CHECK_RETURN_VALUE(unf_lport, NULL); - - exchg_mgr_idx = (hot_pool_tag * UNF_EXCHG_MGR_NUM) / - unf_lport->low_level_func.support_max_hot_tag_range; - if (unlikely(exchg_mgr_idx >= UNF_EXCHG_MGR_NUM)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "[err]Port(0x%x) Get ExchgMgr %u err", - unf_lport->port_id, exchg_mgr_idx); - - return NULL; - } - - xchg_mgr = unf_lport->xchg_mgr[exchg_mgr_idx]; - - if (unlikely(!xchg_mgr)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "[err]Port(0x%x) ExchgMgr %u is null", - unf_lport->port_id, exchg_mgr_idx); - - return NULL; - } - - hot_pool = xchg_mgr->hot_pool; - - if (unlikely(!hot_pool)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_MAJOR, - "Port(0x%x) Hot Pool is NULL.", - unf_lport->port_id); - - return NULL; - } - - if (unlikely(hot_pool_tag >= (hot_pool->slab_total_sum + hot_pool->base))) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "[err]LPort(0x%x) can't Input Tag(0x%x), Max(0x%x).", - unf_lport->port_id, hot_pool_tag, - (hot_pool->slab_total_sum + hot_pool->base)); - - return NULL; - } - - spin_lock_irqsave(&hot_pool->xchg_hotpool_lock, flags); - xchg = unf_get_xchg_by_xchg_tag(hot_pool, hot_pool_tag - hot_pool->base); - spin_unlock_irqrestore(&hot_pool->xchg_hotpool_lock, flags); - - return (void *)xchg; -} - -static void *unf_find_xchg_by_ox_id(void *lport, u16 ox_id, u32 oid) -{ - struct unf_xchg_hot_pool *hot_pool = NULL; - struct unf_xchg *xchg = NULL; - struct list_head *node = NULL; - struct list_head *next_node = NULL; - struct unf_lport *unf_lport = NULL; - ulong flags = 0; - ulong xchg_flags = 0; - u32 i = 0; - - FC_CHECK_RETURN_VALUE(lport, NULL); - - /* In the case of NPIV, the lport is the Vport pointer, - * and the share uses the ExchMgr of the RootLport - */ - unf_lport = ((struct unf_lport *)lport)->root_lport; - FC_CHECK_RETURN_VALUE(unf_lport, NULL); - - for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) { - hot_pool = unf_get_hot_pool_by_lport(unf_lport, i); - if (unlikely(!hot_pool)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_MAJOR, - "Port(0x%x) MgrIdex %u Hot Pool is NULL.", - unf_lport->port_id, i); - continue; - } - - spin_lock_irqsave(&hot_pool->xchg_hotpool_lock, flags); - - /* 1. Traverse sfs_busy list */ - list_for_each_safe(node, next_node, &hot_pool->sfs_busylist) { - xchg = list_entry(node, struct unf_xchg, list_xchg_entry); - spin_lock_irqsave(&xchg->xchg_state_lock, xchg_flags); - if (unf_check_oxid_matched(ox_id, oid, xchg)) { - atomic_inc(&xchg->ref_cnt); - spin_unlock_irqrestore(&xchg->xchg_state_lock, xchg_flags); - spin_unlock_irqrestore(&hot_pool->xchg_hotpool_lock, flags); - return xchg; - } - spin_unlock_irqrestore(&xchg->xchg_state_lock, xchg_flags); - } - - /* 2. Traverse INI_Busy List */ - list_for_each_safe(node, next_node, &hot_pool->ini_busylist) { - xchg = list_entry(node, struct unf_xchg, list_xchg_entry); - spin_lock_irqsave(&xchg->xchg_state_lock, xchg_flags); - if (unf_check_oxid_matched(ox_id, oid, xchg)) { - atomic_inc(&xchg->ref_cnt); - spin_unlock_irqrestore(&xchg->xchg_state_lock, xchg_flags); - spin_unlock_irqrestore(&hot_pool->xchg_hotpool_lock, flags); - return xchg; - } - spin_unlock_irqrestore(&xchg->xchg_state_lock, - xchg_flags); - } - - spin_unlock_irqrestore(&hot_pool->xchg_hotpool_lock, flags); - } - - return NULL; -} - -static inline bool unf_check_xchg_matched(struct unf_xchg *xchg, u64 command_sn, - u32 world_id, void *pinitiator) -{ - bool matched = false; - - matched = (command_sn == xchg->cmnd_sn); - if (matched && (atomic_read(&xchg->ref_cnt) > 0)) - return true; - else - return false; -} - -static void *unf_look_up_xchg_by_cmnd_sn(void *lport, u64 command_sn, - u32 world_id, void *pinitiator) -{ - struct unf_lport *unf_lport = NULL; - struct unf_xchg_hot_pool *hot_pool = NULL; - struct list_head *node = NULL; - struct list_head *next_node = NULL; - struct unf_xchg *xchg = NULL; - ulong flags = 0; - u32 i; - - FC_CHECK_RETURN_VALUE(lport, NULL); - - /* In NPIV, lport is a Vport pointer, and idle resources are shared by - * ExchMgr of RootLport. However, busy resources are mounted on each - * vport. Therefore, vport needs to be used. - */ - unf_lport = (struct unf_lport *)lport; - FC_CHECK_RETURN_VALUE(unf_lport, NULL); - - for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) { - hot_pool = unf_get_hot_pool_by_lport(unf_lport, i); - if (unlikely(!hot_pool)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "[err]Port(0x%x) hot pool is NULL", - unf_lport->port_id); - - continue; - } - - /* from busy_list */ - spin_lock_irqsave(&hot_pool->xchg_hotpool_lock, flags); - list_for_each_safe(node, next_node, &hot_pool->ini_busylist) { - xchg = list_entry(node, struct unf_xchg, list_xchg_entry); - if (unf_check_xchg_matched(xchg, command_sn, world_id, pinitiator)) { - spin_unlock_irqrestore(&hot_pool->xchg_hotpool_lock, flags); - - return xchg; - } - } - - /* vport: from destroy_list */ - if (unf_lport != unf_lport->root_lport) { - list_for_each_safe(node, next_node, &hot_pool->list_destroy_xchg) { - xchg = list_entry(node, struct unf_xchg, list_xchg_entry); - if (unf_check_xchg_matched(xchg, command_sn, world_id, - pinitiator)) { - spin_unlock_irqrestore(&hot_pool->xchg_hotpool_lock, flags); - - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_MAJOR, - "[info]Port(0x%x) lookup exchange from destroy list", - unf_lport->port_id); - - return xchg; - } - } - } - - spin_unlock_irqrestore(&hot_pool->xchg_hotpool_lock, flags); - } - - return NULL; -} - -static inline u32 unf_alloc_hot_pool_slab(struct unf_xchg_hot_pool *hot_pool, struct unf_xchg *xchg) -{ - u16 slab_index = 0; - - FC_CHECK_RETURN_VALUE(hot_pool, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(xchg, UNF_RETURN_ERROR); - - /* Check whether the hotpool tag is in the specified range sirt. - * If yes, set up the management relationship. If no, handle the problem - * according to the normal IO. If the sirt digitmap is used but the tag - * is occupied, it indicates that the I/O is discarded. - */ - - hot_pool->slab_next_index = (u16)hot_pool->slab_next_index; - slab_index = hot_pool->slab_next_index; - while (unf_get_xchg_by_xchg_tag(hot_pool, slab_index)) { - slab_index++; - slab_index = slab_index % hot_pool->slab_total_sum; - - /* Rewind occurs */ - if (slab_index == hot_pool->slab_next_index) { - FC_DRV_PRINT(UNF_LOG_EQUIP_ATT, UNF_MAJOR, - "There is No Slab At Hot Pool(0x%p) for xchg(0x%p).", - hot_pool, xchg); - - return UNF_RETURN_ERROR; - } - } - - unf_hot_pool_slab_set(hot_pool, slab_index, xchg); - xchg->hotpooltag = slab_index + hot_pool->base; - slab_index++; - hot_pool->slab_next_index = slab_index % hot_pool->slab_total_sum; - - return RETURN_OK; -} - -struct unf_esgl_page * -unf_get_and_add_one_free_esgl_page(struct unf_lport *lport, struct unf_xchg *xchg) -{ - struct unf_esgl *esgl = NULL; - struct list_head *list_head = NULL; - ulong flag = 0; - - FC_CHECK_RETURN_VALUE(lport, NULL); - FC_CHECK_RETURN_VALUE(xchg, NULL); - - /* Obtain a new Esgl from the EsglPool and add it to the list_esgls of - * the Xchg - */ - spin_lock_irqsave(&lport->esgl_pool.esgl_pool_lock, flag); - if (!list_empty(&lport->esgl_pool.list_esgl_pool)) { - list_head = UNF_OS_LIST_NEXT(&lport->esgl_pool.list_esgl_pool); - list_del(list_head); - lport->esgl_pool.esgl_pool_count--; - list_add_tail(list_head, &xchg->list_esgls); - - esgl = list_entry(list_head, struct unf_esgl, entry_esgl); - atomic_inc(&xchg->esgl_cnt); - spin_unlock_irqrestore(&lport->esgl_pool.esgl_pool_lock, flag); - } else { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%x) esgl pool is empty", - lport->nport_id); - - spin_unlock_irqrestore(&lport->esgl_pool.esgl_pool_lock, flag); - return NULL; - } - - return &esgl->page; -} - -void unf_release_esgls(struct unf_xchg *xchg) -{ - struct unf_lport *unf_lport = NULL; - struct list_head *list = NULL; - struct list_head *list_tmp = NULL; - ulong flag = 0; - - FC_CHECK_RETURN_VOID(xchg); - FC_CHECK_RETURN_VOID(xchg->lport); - - if (atomic_read(&xchg->esgl_cnt) <= 0) - return; - - /* In the case of NPIV, the Vport pointer is saved in v_pstExch, - * and the EsglPool of RootLport is shared. - */ - unf_lport = (xchg->lport)->root_lport; - FC_CHECK_RETURN_VOID(unf_lport); - - spin_lock_irqsave(&unf_lport->esgl_pool.esgl_pool_lock, flag); - if (!list_empty(&xchg->list_esgls)) { - list_for_each_safe(list, list_tmp, &xchg->list_esgls) { - list_del(list); - list_add_tail(list, &unf_lport->esgl_pool.list_esgl_pool); - unf_lport->esgl_pool.esgl_pool_count++; - atomic_dec(&xchg->esgl_cnt); - } - } - spin_unlock_irqrestore(&unf_lport->esgl_pool.esgl_pool_lock, flag); -} - -static void unf_add_back_to_fcp_list(struct unf_xchg_free_pool *free_pool, struct unf_xchg *xchg) -{ - ulong flags = 0; - - FC_CHECK_RETURN_VOID(free_pool); - FC_CHECK_RETURN_VOID(xchg); - - unf_init_xchg_attribute(xchg); - - /* The released I/O resources are added to the queue tail to facilitate - * fault locating - */ - spin_lock_irqsave(&free_pool->xchg_freepool_lock, flags); - list_add_tail(&xchg->list_xchg_entry, &free_pool->list_free_xchg_list); - free_pool->total_fcp_xchg++; - spin_unlock_irqrestore(&free_pool->xchg_freepool_lock, flags); -} - -static void unf_check_xchg_mgr_status(struct unf_xchg_mgr *xchg_mgr) -{ - ulong flags = 0; - u32 total_xchg = 0; - u32 total_xchg_sum = 0; - - FC_CHECK_RETURN_VOID(xchg_mgr); - - spin_lock_irqsave(&xchg_mgr->free_pool.xchg_freepool_lock, flags); - - total_xchg = xchg_mgr->free_pool.total_fcp_xchg + xchg_mgr->free_pool.total_sfs_xchg; - total_xchg_sum = xchg_mgr->free_pool.fcp_xchg_sum + xchg_mgr->free_pool.sfs_xchg_sum; - - if (xchg_mgr->free_pool.xchg_mgr_completion && total_xchg == total_xchg_sum) - complete(xchg_mgr->free_pool.xchg_mgr_completion); - - spin_unlock_irqrestore(&xchg_mgr->free_pool.xchg_freepool_lock, flags); -} - -static void unf_free_fcp_xchg(struct unf_xchg *xchg) -{ - struct unf_xchg_free_pool *free_pool = NULL; - struct unf_xchg_mgr *xchg_mgr = NULL; - struct unf_lport *unf_lport = NULL; - struct unf_rport *unf_rport = NULL; - - FC_CHECK_RETURN_VOID(xchg); - - /* Releasing a Specified INI I/O and Invoking the scsi_done Process */ - unf_done_ini_xchg(xchg); - free_pool = xchg->free_pool; - xchg_mgr = xchg->xchg_mgr; - unf_lport = xchg->lport; - unf_rport = xchg->rport; - - atomic_dec(&unf_rport->pending_io_cnt); - /* Release the Esgls in the Xchg structure and return it to the EsglPool - * of the Lport - */ - unf_release_esgls(xchg); - - if (unlikely(xchg->fcp_sfs_union.fcp_rsp_entry.fcp_rsp_iu)) { - kfree(xchg->fcp_sfs_union.fcp_rsp_entry.fcp_rsp_iu); - xchg->fcp_sfs_union.fcp_rsp_entry.fcp_rsp_iu = NULL; - } - - /* Mount I/O resources to the FCP Free linked list */ - unf_add_back_to_fcp_list(free_pool, xchg); - - /* The Xchg is released synchronously and then forcibly released to - * prevent the Xchg from accessing the Xchg in the normal I/O process - */ - if (unlikely(unf_lport->port_removing)) - unf_check_xchg_mgr_status(xchg_mgr); -} - -static void unf_init_io_xchg_param(struct unf_xchg *xchg, struct unf_lport *lport, - struct unf_xchg_mgr *xchg_mgr) -{ - static atomic64_t exhd_id; - - xchg->start_jif = atomic64_inc_return(&exhd_id); - xchg->xchg_mgr = xchg_mgr; - xchg->free_pool = &xchg_mgr->free_pool; - xchg->hot_pool = xchg_mgr->hot_pool; - xchg->lport = lport; - xchg->xchg_type = UNF_XCHG_TYPE_INI; - xchg->free_xchg = unf_free_fcp_xchg; - xchg->scsi_or_tgt_cmnd_func = NULL; - xchg->io_state = UNF_IO_STATE_NEW; - xchg->io_send_stage = TGT_IO_SEND_STAGE_NONE; - xchg->io_send_result = TGT_IO_SEND_RESULT_INVALID; - xchg->io_send_abort = false; - xchg->io_abort_result = false; - xchg->oxid = INVALID_VALUE16; - xchg->abort_oxid = INVALID_VALUE16; - xchg->rxid = INVALID_VALUE16; - xchg->sid = INVALID_VALUE32; - xchg->did = INVALID_VALUE32; - xchg->oid = INVALID_VALUE32; - xchg->seq_id = INVALID_VALUE8; - xchg->cmnd_code = INVALID_VALUE32; - xchg->data_len = 0; - xchg->resid_len = 0; - xchg->data_direction = DMA_NONE; - xchg->may_consume_res_cnt = 0; - xchg->fast_consume_res_cnt = 0; - xchg->io_front_jif = 0; - xchg->tmf_state = 0; - xchg->ucode_abts_state = INVALID_VALUE32; - xchg->abts_state = 0; - xchg->rport_bind_jifs = INVALID_VALUE64; - xchg->scsi_id = INVALID_VALUE32; - xchg->qos_level = 0; - xchg->world_id = INVALID_VALUE32; - - memset(&xchg->dif_control, 0, sizeof(struct unf_dif_control_info)); - memset(&xchg->req_sgl_info, 0, sizeof(struct unf_req_sgl_info)); - memset(&xchg->dif_sgl_info, 0, sizeof(struct unf_req_sgl_info)); - xchg->scsi_cmnd_info.result = 0; - - xchg->private_data[PKG_PRIVATE_XCHG_ALLOC_TIME] = - (u32)atomic64_inc_return(&((struct unf_lport *)lport->root_lport)->exchg_index); - - if (xchg->private_data[PKG_PRIVATE_XCHG_ALLOC_TIME] == INVALID_VALUE32) { - xchg->private_data[PKG_PRIVATE_XCHG_ALLOC_TIME] = - (u32)atomic64_inc_return(&((struct unf_lport *)lport->root_lport)->exchg_index); - } - - if (xchg->private_data[PKG_PRIVATE_XCHG_ALLOC_TIME] == 0) { - xchg->private_data[PKG_PRIVATE_XCHG_ALLOC_TIME] = - (u32)atomic64_inc_return(&((struct unf_lport *)lport->root_lport)->exchg_index); - } - - atomic_set(&xchg->ref_cnt, 0); - atomic_set(&xchg->delay_flag, 0); - - if (delayed_work_pending(&xchg->timeout_work)) - UNF_DEL_XCHG_TIMER_SAFE(xchg); - - INIT_DELAYED_WORK(&xchg->timeout_work, unf_fc_ini_io_xchg_time_out); -} - -static struct unf_xchg *unf_alloc_io_xchg(struct unf_lport *lport, - struct unf_xchg_mgr *xchg_mgr) -{ - struct unf_xchg *xchg = NULL; - struct list_head *list_node = NULL; - struct unf_xchg_free_pool *free_pool = NULL; - struct unf_xchg_hot_pool *hot_pool = NULL; - ulong flags = 0; - - FC_CHECK_RETURN_VALUE(xchg_mgr, NULL); - FC_CHECK_RETURN_VALUE(lport, NULL); - - free_pool = &xchg_mgr->free_pool; - hot_pool = xchg_mgr->hot_pool; - FC_CHECK_RETURN_VALUE(free_pool, NULL); - FC_CHECK_RETURN_VALUE(hot_pool, NULL); - - /* 1. Free Pool */ - spin_lock_irqsave(&free_pool->xchg_freepool_lock, flags); - if (unlikely(list_empty(&free_pool->list_free_xchg_list))) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_INFO, - "Port(0x%x) have no Exchange anymore.", - lport->port_id); - spin_unlock_irqrestore(&free_pool->xchg_freepool_lock, flags); - return NULL; - } - - /* Select an idle node from free pool */ - list_node = UNF_OS_LIST_NEXT(&free_pool->list_free_xchg_list); - list_del(list_node); - free_pool->total_fcp_xchg--; - spin_unlock_irqrestore(&free_pool->xchg_freepool_lock, flags); - - xchg = list_entry(list_node, struct unf_xchg, list_xchg_entry); - /* - * Hot Pool: - * When xchg is mounted to Hot Pool, the mount mode and release mode - * of Xchg must be specified and stored in the sfs linked list. - */ - flags = 0; - spin_lock_irqsave(&hot_pool->xchg_hotpool_lock, flags); - if (unf_alloc_hot_pool_slab(hot_pool, xchg) != RETURN_OK) { - spin_unlock_irqrestore(&hot_pool->xchg_hotpool_lock, flags); - unf_add_back_to_fcp_list(free_pool, xchg); - if (unlikely(lport->port_removing)) - unf_check_xchg_mgr_status(xchg_mgr); - - return NULL; - } - list_add_tail(&xchg->list_xchg_entry, &hot_pool->ini_busylist); - spin_unlock_irqrestore(&hot_pool->xchg_hotpool_lock, flags); - - /* 3. Exchange State */ - spin_lock_irqsave(&xchg->xchg_state_lock, flags); - unf_init_io_xchg_param(xchg, lport, xchg_mgr); - spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); - - return xchg; -} - -static void unf_add_back_to_sfs_list(struct unf_xchg_free_pool *free_pool, - struct unf_xchg *xchg) -{ - ulong flags = 0; - - FC_CHECK_RETURN_VOID(free_pool); - FC_CHECK_RETURN_VOID(xchg); - - unf_init_xchg_attribute(xchg); - - spin_lock_irqsave(&free_pool->xchg_freepool_lock, flags); - - list_add_tail(&xchg->list_xchg_entry, &free_pool->list_sfs_xchg_list); - free_pool->total_sfs_xchg++; - spin_unlock_irqrestore(&free_pool->xchg_freepool_lock, flags); -} - -static void unf_free_sfs_xchg(struct unf_xchg *xchg) -{ - struct unf_xchg_free_pool *free_pool = NULL; - struct unf_xchg_mgr *xchg_mgr = NULL; - struct unf_lport *unf_lport = NULL; - - FC_CHECK_RETURN_VOID(xchg); - - free_pool = xchg->free_pool; - unf_lport = xchg->lport; - xchg_mgr = xchg->xchg_mgr; - - /* The memory is applied for when the GID_PT/GID_FT is sent. - * If no response is received, the GID_PT/GID_FT needs to be forcibly - * released. - */ - - unf_free_one_big_sfs(xchg); - - unf_add_back_to_sfs_list(free_pool, xchg); - - if (unlikely(unf_lport->port_removing)) - unf_check_xchg_mgr_status(xchg_mgr); -} - -static void unf_fc_xchg_add_timer(void *xchg, ulong time_ms, - enum unf_timer_type time_type) -{ - ulong flag = 0; - struct unf_xchg *unf_xchg = NULL; - ulong times_ms = time_ms; - struct unf_lport *unf_lport = NULL; - - FC_CHECK_RETURN_VOID(xchg); - unf_xchg = (struct unf_xchg *)xchg; - unf_lport = unf_xchg->lport; - FC_CHECK_RETURN_VOID(unf_lport); - - /* update timeout */ - switch (time_type) { - /* The processing of TGT RRQ timeout is the same as that of TGT IO - * timeout. The timeout period is different. - */ - case UNF_TIMER_TYPE_TGT_RRQ: - times_ms = times_ms + UNF_TGT_RRQ_REDUNDANT_TIME; - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_INFO, - "TGT RRQ Timer set."); - break; - - case UNF_TIMER_TYPE_INI_RRQ: - times_ms = times_ms - UNF_INI_RRQ_REDUNDANT_TIME; - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_INFO, - "INI RRQ Timer set."); - break; - - case UNF_TIMER_TYPE_SFS: - times_ms = times_ms + UNF_INI_ELS_REDUNDANT_TIME; - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_INFO, - "INI ELS Timer set."); - break; - default: - break; - } - - /* The xchg of the timer must be valid. If the reference count of xchg - * is 0, the timer must not be added - */ - if (atomic_read(&unf_xchg->ref_cnt) <= 0) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_KEVENT, - "[warn]Abnormal Exchange(0x%p), Reference count(0x%x), Can't add timer.", - unf_xchg, atomic_read(&unf_xchg->ref_cnt)); - return; - } - - /* Delay Work: Hold for timer */ - spin_lock_irqsave(&unf_xchg->xchg_state_lock, flag); - if (queue_delayed_work(unf_lport->xchg_wq, &unf_xchg->timeout_work, - (ulong)msecs_to_jiffies((u32)times_ms))) { - /* hold for timer */ - atomic_inc(&unf_xchg->ref_cnt); - } - spin_unlock_irqrestore(&unf_xchg->xchg_state_lock, flag); -} - -static void unf_init_sfs_xchg_param(struct unf_xchg *xchg, - struct unf_lport *lport, - struct unf_xchg_mgr *xchg_mgr) -{ - xchg->free_pool = &xchg_mgr->free_pool; - xchg->hot_pool = xchg_mgr->hot_pool; - xchg->lport = lport; - xchg->xchg_mgr = xchg_mgr; - xchg->free_xchg = unf_free_sfs_xchg; - xchg->xchg_type = UNF_XCHG_TYPE_SFS; - xchg->io_state = UNF_IO_STATE_NEW; - xchg->scsi_cmnd_info.result = 0; - xchg->ob_callback_sts = UNF_IO_SUCCESS; - - xchg->private_data[PKG_PRIVATE_XCHG_ALLOC_TIME] = - (u32)atomic64_inc_return(&((struct unf_lport *)lport->root_lport)->exchg_index); - - if (xchg->private_data[PKG_PRIVATE_XCHG_ALLOC_TIME] == - INVALID_VALUE32) { - xchg->private_data[PKG_PRIVATE_XCHG_ALLOC_TIME] = - (u32)atomic64_inc_return(&((struct unf_lport *)lport->root_lport)->exchg_index); - } - - if (xchg->private_data[PKG_PRIVATE_XCHG_ALLOC_TIME] == 0) { - xchg->private_data[PKG_PRIVATE_XCHG_ALLOC_TIME] = - (u32)atomic64_inc_return(&((struct unf_lport *)lport->root_lport)->exchg_index); - } - - if (delayed_work_pending(&xchg->timeout_work)) - UNF_DEL_XCHG_TIMER_SAFE(xchg); - - INIT_DELAYED_WORK(&xchg->timeout_work, unf_sfs_xchg_time_out); -} - -static struct unf_xchg *unf_alloc_sfs_xchg(struct unf_lport *lport, - struct unf_xchg_mgr *xchg_mgr) -{ - struct unf_xchg *xchg = NULL; - struct list_head *list_node = NULL; - struct unf_xchg_free_pool *free_pool = NULL; - struct unf_xchg_hot_pool *hot_pool = NULL; - ulong flags = 0; - - FC_CHECK_RETURN_VALUE(lport, NULL); - FC_CHECK_RETURN_VALUE(xchg_mgr, NULL); - free_pool = &xchg_mgr->free_pool; - hot_pool = xchg_mgr->hot_pool; - FC_CHECK_RETURN_VALUE(free_pool, NULL); - FC_CHECK_RETURN_VALUE(hot_pool, NULL); - - /* Select an idle node from free pool */ - spin_lock_irqsave(&free_pool->xchg_freepool_lock, flags); - if (list_empty(&free_pool->list_sfs_xchg_list)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_MAJOR, - "Port(0x%x) have no Exchange anymore.", - lport->port_id); - spin_unlock_irqrestore(&free_pool->xchg_freepool_lock, flags); - return NULL; - } - - list_node = UNF_OS_LIST_NEXT(&free_pool->list_sfs_xchg_list); - list_del(list_node); - free_pool->total_sfs_xchg--; - spin_unlock_irqrestore(&free_pool->xchg_freepool_lock, flags); - - xchg = list_entry(list_node, struct unf_xchg, list_xchg_entry); - /* - * The xchg is mounted to the Hot Pool. - * The mount mode and release mode of the xchg must be specified - * and stored in the sfs linked list. - */ - flags = 0; - spin_lock_irqsave(&hot_pool->xchg_hotpool_lock, flags); - if (unf_alloc_hot_pool_slab(hot_pool, xchg) != RETURN_OK) { - spin_unlock_irqrestore(&hot_pool->xchg_hotpool_lock, flags); - unf_add_back_to_sfs_list(free_pool, xchg); - if (unlikely(lport->port_removing)) - unf_check_xchg_mgr_status(xchg_mgr); - - return NULL; - } - - list_add_tail(&xchg->list_xchg_entry, &hot_pool->sfs_busylist); - hot_pool->total_xchges++; - spin_unlock_irqrestore(&hot_pool->xchg_hotpool_lock, flags); - - spin_lock_irqsave(&xchg->xchg_state_lock, flags); - unf_init_sfs_xchg_param(xchg, lport, xchg_mgr); - spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); - - return xchg; -} - -static void *unf_get_new_xchg(void *lport, u32 xchg_type) -{ - struct unf_lport *unf_lport = NULL; - struct unf_xchg_mgr *xchg_mgr = NULL; - struct unf_xchg *xchg = NULL; - u32 exchg_type = 0; - u16 xchg_mgr_type; - u32 rtry_cnt = 0; - u32 last_exchg_mgr_idx; - - xchg_mgr_type = (xchg_type >> UNF_SHIFT_16); - exchg_type = xchg_type & SPFC_XCHG_TYPE_MASK; - FC_CHECK_RETURN_VALUE(lport, NULL); - - /* In the case of NPIV, the lport is the Vport pointer, - * and the share uses the ExchMgr of the RootLport. - */ - unf_lport = ((struct unf_lport *)lport)->root_lport; - FC_CHECK_RETURN_VALUE(unf_lport, NULL); - - if (unlikely((atomic_read(&unf_lport->lport_no_operate_flag) == UNF_LPORT_NOP) || - (atomic_read(&((struct unf_lport *)lport)->lport_no_operate_flag) == - UNF_LPORT_NOP))) { - return NULL; - } - - last_exchg_mgr_idx = (u32)atomic64_inc_return(&unf_lport->last_exchg_mgr_idx); -try_next_mgr: - rtry_cnt++; - if (unlikely(rtry_cnt > UNF_EXCHG_MGR_NUM)) - return NULL; - - /* If Fixed mode,only use XchgMgr 0 */ - if (unlikely(xchg_mgr_type == UNF_XCHG_MGR_TYPE_FIXED)) { - xchg_mgr = (struct unf_xchg_mgr *)unf_lport->xchg_mgr[ARRAY_INDEX_0]; - } else { - xchg_mgr = (struct unf_xchg_mgr *)unf_lport - ->xchg_mgr[last_exchg_mgr_idx % UNF_EXCHG_MGR_NUM]; - } - if (unlikely(!xchg_mgr)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "[err]Port(0x%x) get exchangemgr %u is null.", - unf_lport->port_id, last_exchg_mgr_idx % UNF_EXCHG_MGR_NUM); - return NULL; - } - last_exchg_mgr_idx++; - - /* Allocate entries based on the Exchange type */ - switch (exchg_type) { - case UNF_XCHG_TYPE_SFS: - xchg = unf_alloc_sfs_xchg(lport, xchg_mgr); - break; - case UNF_XCHG_TYPE_INI: - xchg = unf_alloc_io_xchg(lport, xchg_mgr); - break; - - default: - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_MAJOR, - "Port(0x%x) unwonted, Exchange type(0x%x).", - unf_lport->port_id, exchg_type); - break; - } - - if (likely(xchg)) { - xchg->oxid = INVALID_VALUE16; - xchg->abort_oxid = INVALID_VALUE16; - xchg->rxid = INVALID_VALUE16; - xchg->debug_hook = false; - xchg->alloc_jif = jiffies; - - atomic_set(&xchg->ref_cnt, 1); - atomic_set(&xchg->esgl_cnt, 0); - } else { - goto try_next_mgr; - } - - return xchg; -} - -static void unf_free_xchg(void *lport, void *xchg) -{ - struct unf_xchg *unf_xchg = NULL; - - FC_CHECK_RETURN_VOID(xchg); - - unf_xchg = (struct unf_xchg *)xchg; - unf_xchg_ref_dec(unf_xchg, XCHG_FREE_XCHG); -} - -u32 unf_init_xchg_mgr_temp(struct unf_lport *lport) -{ - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - - lport->xchg_mgr_temp.unf_xchg_get_free_and_init = unf_get_new_xchg; - lport->xchg_mgr_temp.unf_xchg_release = unf_free_xchg; - lport->xchg_mgr_temp.unf_look_up_xchg_by_tag = unf_look_up_xchg_by_tag; - lport->xchg_mgr_temp.unf_look_up_xchg_by_id = unf_find_xchg_by_ox_id; - lport->xchg_mgr_temp.unf_xchg_add_timer = unf_fc_xchg_add_timer; - lport->xchg_mgr_temp.unf_xchg_cancel_timer = unf_xchg_cancel_timer; - lport->xchg_mgr_temp.unf_xchg_abort_all_io = unf_xchg_abort_all_xchg; - lport->xchg_mgr_temp.unf_look_up_xchg_by_cmnd_sn = unf_look_up_xchg_by_cmnd_sn; - lport->xchg_mgr_temp.unf_xchg_abort_by_lun = unf_xchg_abort_by_lun; - lport->xchg_mgr_temp.unf_xchg_abort_by_session = unf_xchg_abort_by_session; - lport->xchg_mgr_temp.unf_xchg_mgr_io_xchg_abort = unf_xchg_mgr_io_xchg_abort; - lport->xchg_mgr_temp.unf_xchg_mgr_sfs_xchg_abort = unf_xchg_mgr_sfs_xchg_abort; - - return RETURN_OK; -} - -void unf_release_xchg_mgr_temp(struct unf_lport *lport) -{ - FC_CHECK_RETURN_VOID(lport); - - if (lport->dirty_flag & UNF_LPORT_DIRTY_FLAG_XCHGMGR_DIRTY) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "Port(0x%x) has dirty exchange, Don't release exchange manager template.", - lport->port_id); - - return; - } - - memset(&lport->xchg_mgr_temp, 0, sizeof(struct unf_cm_xchg_mgr_template)); - - lport->destroy_step = UNF_LPORT_DESTROY_STEP_7_DESTROY_XCHG_MGR_TMP; -} - -void unf_set_hot_pool_wait_state(struct unf_lport *lport, bool wait_state) -{ - struct unf_xchg_hot_pool *hot_pool = NULL; - ulong pool_lock_flags = 0; - u32 i = 0; - - FC_CHECK_RETURN_VOID(lport); - - for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) { - hot_pool = unf_get_hot_pool_by_lport(lport, i); - if (unlikely(!hot_pool)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%x) hot pool is NULL", - lport->port_id); - continue; - } - - spin_lock_irqsave(&hot_pool->xchg_hotpool_lock, pool_lock_flags); - hot_pool->wait_state = wait_state; - spin_unlock_irqrestore(&hot_pool->xchg_hotpool_lock, pool_lock_flags); - } -} - -u32 unf_xchg_ref_inc(struct unf_xchg *xchg, enum unf_ioflow_id io_stage) -{ - struct unf_xchg_hot_pool *hot_pool = NULL; - ulong flags = 0; - u32 ret = UNF_RETURN_ERROR; - - FC_CHECK_RETURN_VALUE(xchg, UNF_RETURN_ERROR); - - if (unlikely(xchg->debug_hook)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_MAJOR, - "[info]Xchg(0x%p) State(0x%x) SID_DID(0x%x_0x%x) OX_ID_RX_ID(0x%x_0x%x) AllocJiff(%llu) Refcnt(%d) Stage(%s)", - xchg, xchg->io_state, xchg->sid, xchg->did, - xchg->oxid, xchg->rxid, xchg->alloc_jif, - atomic_read(&xchg->ref_cnt), - io_stage_table[io_stage].stage); - } - - hot_pool = xchg->hot_pool; - FC_CHECK_RETURN_VALUE(hot_pool, UNF_RETURN_ERROR); - - /* Exchange -> Hot Pool Tag check */ - if (unlikely((xchg->hotpooltag >= (hot_pool->slab_total_sum + hot_pool->base)) || - xchg->hotpooltag < hot_pool->base)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "[err]Xchg(0x%p) S_ID(%xh) D_ID(0x%x) hot_pool_tag(0x%x) is bigger than slab total num(0x%x) base(0x%x)", - xchg, xchg->sid, xchg->did, xchg->hotpooltag, - hot_pool->slab_total_sum + hot_pool->base, hot_pool->base); - - return UNF_RETURN_ERROR; - } - - /* atomic read & inc */ - spin_lock_irqsave(&xchg->xchg_state_lock, flags); - if (unlikely(atomic_read(&xchg->ref_cnt) <= 0)) { - ret = UNF_RETURN_ERROR; - } else { - if (unf_get_xchg_by_xchg_tag(hot_pool, xchg->hotpooltag - hot_pool->base) == xchg) { - atomic_inc(&xchg->ref_cnt); - ret = RETURN_OK; - } else { - ret = UNF_RETURN_ERROR; - } - } - spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); - - return ret; -} - -void unf_xchg_ref_dec(struct unf_xchg *xchg, enum unf_ioflow_id io_stage) -{ - /* Atomic dec ref_cnt & test, free exchange if necessary (ref_cnt==0) */ - struct unf_xchg_hot_pool *hot_pool = NULL; - void (*free_xchg)(struct unf_xchg *) = NULL; - ulong flags = 0; - ulong xchg_lock_falgs = 0; - - FC_CHECK_RETURN_VOID(xchg); - - if (xchg->debug_hook) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_MAJOR, - "[info]Xchg(0x%p) State(0x%x) SID_DID(0x%x_0x%x) OXID_RXID(0x%x_0x%x) AllocJiff(%llu) Refcnt(%d) Statge %s", - xchg, xchg->io_state, xchg->sid, xchg->did, xchg->oxid, - xchg->rxid, xchg->alloc_jif, - atomic_read(&xchg->ref_cnt), - io_stage_table[io_stage].stage); - } - - hot_pool = xchg->hot_pool; - FC_CHECK_RETURN_VOID(hot_pool); - FC_CHECK_RETURN_VOID((xchg->hotpooltag >= hot_pool->base)); - - /* - * 1. Atomic dec & test - * 2. Free exchange if necessary (ref_cnt == 0) - */ - spin_lock_irqsave(&xchg->xchg_state_lock, xchg_lock_falgs); - if (atomic_dec_and_test(&xchg->ref_cnt)) { - free_xchg = xchg->free_xchg; - spin_unlock_irqrestore(&xchg->xchg_state_lock, xchg_lock_falgs); - spin_lock_irqsave(&hot_pool->xchg_hotpool_lock, flags); - unf_hot_pool_slab_set(hot_pool, - xchg->hotpooltag - hot_pool->base, NULL); - /* Delete exchange list entry */ - list_del_init(&xchg->list_xchg_entry); - hot_pool->total_xchges--; - spin_unlock_irqrestore(&hot_pool->xchg_hotpool_lock, flags); - - /* unf_free_fcp_xchg --->>> unf_done_ini_xchg */ - if (free_xchg) - free_xchg(xchg); - } else { - spin_unlock_irqrestore(&xchg->xchg_state_lock, xchg_lock_falgs); - } -} - -static void unf_init_xchg_attribute(struct unf_xchg *xchg) -{ - ulong flags = 0; - - FC_CHECK_RETURN_VOID(xchg); - - spin_lock_irqsave(&xchg->xchg_state_lock, flags); - xchg->xchg_mgr = NULL; - xchg->free_pool = NULL; - xchg->hot_pool = NULL; - xchg->lport = NULL; - xchg->rport = NULL; - xchg->disc_rport = NULL; - xchg->io_state = UNF_IO_STATE_NEW; - xchg->io_send_stage = TGT_IO_SEND_STAGE_NONE; - xchg->io_send_result = TGT_IO_SEND_RESULT_INVALID; - xchg->io_send_abort = false; - xchg->io_abort_result = false; - xchg->abts_state = 0; - xchg->oxid = INVALID_VALUE16; - xchg->abort_oxid = INVALID_VALUE16; - xchg->rxid = INVALID_VALUE16; - xchg->sid = INVALID_VALUE32; - xchg->did = INVALID_VALUE32; - xchg->oid = INVALID_VALUE32; - xchg->disc_portid = INVALID_VALUE32; - xchg->seq_id = INVALID_VALUE8; - xchg->cmnd_code = INVALID_VALUE32; - xchg->cmnd_sn = INVALID_VALUE64; - xchg->data_len = 0; - xchg->resid_len = 0; - xchg->data_direction = DMA_NONE; - xchg->hotpooltag = INVALID_VALUE16; - xchg->big_sfs_buf = NULL; - xchg->may_consume_res_cnt = 0; - xchg->fast_consume_res_cnt = 0; - xchg->io_front_jif = INVALID_VALUE64; - xchg->ob_callback_sts = UNF_IO_SUCCESS; - xchg->start_jif = 0; - xchg->rport_bind_jifs = INVALID_VALUE64; - xchg->scsi_id = INVALID_VALUE32; - xchg->qos_level = 0; - xchg->world_id = INVALID_VALUE32; - - memset(&xchg->seq, 0, sizeof(struct unf_seq)); - memset(&xchg->fcp_cmnd, 0, sizeof(struct unf_fcp_cmnd)); - memset(&xchg->scsi_cmnd_info, 0, sizeof(struct unf_scsi_cmd_info)); - memset(&xchg->dif_info, 0, sizeof(struct dif_info)); - memset(xchg->private_data, 0, (PKG_MAX_PRIVATE_DATA_SIZE * sizeof(u32))); - xchg->echo_info.echo_result = UNF_ELS_ECHO_RESULT_OK; - xchg->echo_info.response_time = 0; - - if (xchg->xchg_type == UNF_XCHG_TYPE_SFS) { - if (xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr) { - memset(xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr, 0, - sizeof(union unf_sfs_u)); - xchg->fcp_sfs_union.sfs_entry.cur_offset = 0; - } - } else if (xchg->xchg_type != UNF_XCHG_TYPE_INI) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_MAJOR, - "Exchange Type(0x%x) SFS Union uninited.", - xchg->xchg_type); - } - xchg->xchg_type = UNF_XCHG_TYPE_INVALID; - xchg->xfer_or_rsp_echo = NULL; - xchg->scsi_or_tgt_cmnd_func = NULL; - xchg->ob_callback = NULL; - xchg->callback = NULL; - xchg->free_xchg = NULL; - - atomic_set(&xchg->ref_cnt, 0); - atomic_set(&xchg->esgl_cnt, 0); - atomic_set(&xchg->delay_flag, 0); - - if (delayed_work_pending(&xchg->timeout_work)) - UNF_DEL_XCHG_TIMER_SAFE(xchg); - - spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); -} - -bool unf_busy_io_completed(struct unf_lport *lport) -{ - struct unf_xchg_mgr *xchg_mgr = NULL; - ulong pool_lock_flags = 0; - u32 i; - - FC_CHECK_RETURN_VALUE(lport, true); - - for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) { - xchg_mgr = unf_get_xchg_mgr_by_lport(lport, i); - if (unlikely(!xchg_mgr)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%x) Exchange Manager is NULL", - lport->port_id); - continue; - } - - spin_lock_irqsave(&xchg_mgr->hot_pool->xchg_hotpool_lock, - pool_lock_flags); - if (!list_empty(&xchg_mgr->hot_pool->ini_busylist)) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_INFO, - "[info]Port(0x%x) ini busylist is not empty.", - lport->port_id); - spin_unlock_irqrestore(&xchg_mgr->hot_pool->xchg_hotpool_lock, - pool_lock_flags); - return false; - } - spin_unlock_irqrestore(&xchg_mgr->hot_pool->xchg_hotpool_lock, - pool_lock_flags); - } - return true; -} diff --git a/drivers/scsi/spfc/common/unf_exchg.h b/drivers/scsi/spfc/common/unf_exchg.h deleted file mode 100644 index 0a48be31b971076da5c65fe254864646044f5544..0000000000000000000000000000000000000000 --- a/drivers/scsi/spfc/common/unf_exchg.h +++ /dev/null @@ -1,436 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef UNF_EXCHG_H -#define UNF_EXCHG_H - -#include "unf_type.h" -#include "unf_fcstruct.h" -#include "unf_lport.h" -#include "unf_scsi_common.h" - -enum unf_ioflow_id { - XCHG_ALLOC = 0, - TGT_RECEIVE_ABTS, - TGT_ABTS_DONE, - TGT_IO_SRR, - SFS_RESPONSE, - SFS_TIMEOUT, - INI_SEND_CMND, - INI_RESPONSE_DONE, - INI_EH_ABORT, - INI_EH_DEVICE_RESET, - INI_EH_BLS_DONE, - INI_IO_TIMEOUT, - INI_REQ_TIMEOUT, - XCHG_CANCEL_TIMER, - XCHG_FREE_XCHG, - SEND_ELS, - IO_XCHG_WAIT, - XCHG_BUTT -}; - -enum unf_xchg_type { - UNF_XCHG_TYPE_INI = 0, /* INI IO */ - UNF_XCHG_TYPE_SFS = 1, - UNF_XCHG_TYPE_INVALID -}; - -enum unf_xchg_mgr_type { - UNF_XCHG_MGR_TYPE_RANDOM = 0, - UNF_XCHG_MGR_TYPE_FIXED = 1, - UNF_XCHG_MGR_TYPE_INVALID -}; - -enum tgt_io_send_stage { - TGT_IO_SEND_STAGE_NONE = 0, - TGT_IO_SEND_STAGE_DOING = 1, /* xfer/rsp into queue */ - TGT_IO_SEND_STAGE_DONE = 2, /* xfer/rsp into queue complete */ - TGT_IO_SEND_STAGE_ECHO = 3, /* driver handled TSTS */ - TGT_IO_SEND_STAGE_INVALID -}; - -enum tgt_io_send_result { - TGT_IO_SEND_RESULT_OK = 0, /* xfer/rsp enqueue succeed */ - TGT_IO_SEND_RESULT_FAIL = 1, /* xfer/rsp enqueue fail */ - TGT_IO_SEND_RESULT_INVALID -}; - -struct unf_io_flow_id { - char *stage; -}; - -#define unf_check_oxid_matched(ox_id, oid, xchg) \ - (((ox_id) == (xchg)->oxid) && ((oid) == (xchg)->oid) && \ - (atomic_read(&(xchg)->ref_cnt) > 0)) - -#define UNF_CHECK_ALLOCTIME_VALID(lport, xchg_tag, exchg, pkg_alloc_time, \ - xchg_alloc_time) \ - do { \ - if (unlikely(((pkg_alloc_time) != 0) && \ - ((pkg_alloc_time) != (xchg_alloc_time)))) { \ - FC_DRV_PRINT(UNF_LOG_NORMAL, UNF_ERR, \ - "Lport(0x%x_0x%x_0x%x_0x%p) AllocTime is not " \ - "equal,PKG " \ - "AllocTime:0x%x,Exhg AllocTime:0x%x", \ - (lport)->port_id, (lport)->nport_id, xchg_tag, \ - exchg, pkg_alloc_time, xchg_alloc_time); \ - return UNF_RETURN_ERROR; \ - }; \ - if (unlikely((pkg_alloc_time) == 0)) { \ - FC_DRV_PRINT(UNF_LOG_NORMAL, UNF_MAJOR, \ - "Lport(0x%x_0x%x_0x%x_0x%p) pkgtime err,PKG " \ - "AllocTime:0x%x,Exhg AllocTime:0x%x", \ - (lport)->port_id, (lport)->nport_id, xchg_tag, \ - exchg, pkg_alloc_time, xchg_alloc_time); \ - }; \ - } while (0) - -#define UNF_SET_SCSI_CMND_RESULT(xchg, cmnd_result) \ - ((xchg)->scsi_cmnd_info.result = (cmnd_result)) - -#define UNF_GET_GS_SFS_XCHG_TIMER(lport) (3 * (ulong)(lport)->ra_tov) - -#define UNF_GET_BLS_SFS_XCHG_TIMER(lport) (2 * (ulong)(lport)->ra_tov) - -#define UNF_GET_ELS_SFS_XCHG_TIMER(lport) (2 * (ulong)(lport)->ra_tov) - -#define UNF_ELS_ECHO_RESULT_OK 0 -#define UNF_ELS_ECHO_RESULT_FAIL 1 - -struct unf_xchg; -/* Xchg hot pool, busy IO lookup Xchg */ -struct unf_xchg_hot_pool { - /* Xchg sum, in hot pool */ - u16 total_xchges; - bool wait_state; - - /* pool lock */ - spinlock_t xchg_hotpool_lock; - - /* Xchg posiontion list */ - struct list_head sfs_busylist; - struct list_head ini_busylist; - struct list_head list_destroy_xchg; - - /* Next free hot point */ - u16 slab_next_index; - u16 slab_total_sum; - u16 base; - - struct unf_lport *lport; - - struct unf_xchg *xchg_slab[ARRAY_INDEX_0]; -}; - -/* Xchg's FREE POOL */ -struct unf_xchg_free_pool { - spinlock_t xchg_freepool_lock; - - u32 fcp_xchg_sum; - - /* IO used Xchg */ - struct list_head list_free_xchg_list; - u32 total_fcp_xchg; - - /* SFS used Xchg */ - struct list_head list_sfs_xchg_list; - u32 total_sfs_xchg; - u32 sfs_xchg_sum; - - struct completion *xchg_mgr_completion; -}; - -struct unf_big_sfs { - struct list_head entry_bigsfs; - void *addr; - u32 size; -}; - -struct unf_big_sfs_pool { - void *big_sfs_pool; - u32 free_count; - struct list_head list_freepool; - struct list_head list_busypool; - spinlock_t big_sfs_pool_lock; -}; - -/* Xchg Manager for vport Xchg */ -struct unf_xchg_mgr { - /* MG type */ - u32 mgr_type; - - /* MG entry */ - struct list_head xchg_mgr_entry; - - /* MG attribution */ - u32 mem_szie; - - /* MG alloced resource */ - void *fcp_mm_start; - - u32 sfs_mem_size; - void *sfs_mm_start; - dma_addr_t sfs_phy_addr; - - struct unf_xchg_free_pool free_pool; - struct unf_xchg_hot_pool *hot_pool; - - struct unf_big_sfs_pool big_sfs_pool; - - struct buf_describe big_sfs_buf_list; -}; - -struct unf_seq { - /* Seq ID */ - u8 seq_id; - - /* Seq Cnt */ - u16 seq_cnt; - - /* Seq state and len,maybe used for fcoe */ - u16 seq_stat; - u32 rec_data_len; -}; - -union unf_xchg_fcp_sfs { - struct unf_sfs_entry sfs_entry; - struct unf_fcp_rsp_iu_entry fcp_rsp_entry; -}; - -#define UNF_IO_STATE_NEW 0 -#define TGT_IO_STATE_SEND_XFERRDY (1 << 2) /* succeed to send XFer rdy */ -#define TGT_IO_STATE_RSP (1 << 5) /* chip send rsp */ -#define TGT_IO_STATE_ABORT (1 << 7) - -#define INI_IO_STATE_UPTASK \ - (1 << 15) /* INI Upper-layer Task Management Commands */ -#define INI_IO_STATE_UPABORT \ - (1 << 16) /* INI Upper-layer timeout Abort flag \ - */ -#define INI_IO_STATE_DRABORT (1 << 17) /* INI driver Abort flag */ -#define INI_IO_STATE_DONE (1 << 18) /* INI complete flag */ -#define INI_IO_STATE_WAIT_RRQ (1 << 19) /* INI wait send rrq */ -#define INI_IO_STATE_UPSEND_ERR (1 << 20) /* INI send fail flag */ -/* INI only clear firmware resource flag */ -#define INI_IO_STATE_ABORT_RESOURCE (1 << 21) -/* ioc abort:INI send ABTS ,5S timeout Semaphore,than set 1 */ -#define INI_IO_STATE_ABORT_TIMEOUT (1 << 22) -#define INI_IO_STATE_RRQSEND_ERR (1 << 23) /* INI send RRQ fail flag */ -#define INI_IO_STATE_LOGO (1 << 24) /* INI busy IO session logo status */ -#define INI_IO_STATE_TMF_ABORT (1 << 25) /* INI TMF ABORT IO flag */ -#define INI_IO_STATE_REC_TIMEOUT_WAIT (1 << 26) /* INI REC TIMEOUT WAIT */ -#define INI_IO_STATE_REC_TIMEOUT (1 << 27) /* INI REC TIMEOUT */ - -#define TMF_RESPONSE_RECEIVED (1 << 0) -#define MARKER_STS_RECEIVED (1 << 1) -#define ABTS_RESPONSE_RECEIVED (1 << 2) - -struct unf_scsi_cmd_info { - ulong time_out; - ulong abort_time_out; - void *scsi_cmnd; - void (*done)(struct unf_scsi_cmnd *scsi_cmd); - ini_get_sgl_entry_buf unf_get_sgl_entry_buf; - struct unf_ini_error_code *err_code_table; /* error code table */ - char *sense_buf; - u32 err_code_table_cout; /* Size of the error code table */ - u32 buf_len; - u32 entry_cnt; - u32 result; /* Stores command execution results */ - u32 port_id; -/* Re-search for rport based on scsiid during retry. Otherwise, - *data inconsistency will occur - */ - u32 scsi_id; - void *sgl; - uplevel_cmd_done uplevel_done; -}; - -struct unf_req_sgl_info { - void *sgl; - void *sgl_start; - u32 req_index; - u32 entry_index; -}; - -struct unf_els_echo_info { - u64 response_time; - struct semaphore echo_sync_sema; - u32 echo_result; -}; - -struct unf_xchg { - /* Mg resource relative */ - /* list delete from HotPool */ - struct unf_xchg_hot_pool *hot_pool; - - /* attach to FreePool */ - struct unf_xchg_free_pool *free_pool; - struct unf_xchg_mgr *xchg_mgr; - struct unf_lport *lport; /* Local LPort/VLPort */ - struct unf_rport *rport; /* Rmote Port */ - struct unf_rport *disc_rport; /* Discover Rmote Port */ - struct list_head list_xchg_entry; - struct list_head list_abort_xchg_entry; - spinlock_t xchg_state_lock; - - /* Xchg reference */ - atomic_t ref_cnt; - atomic_t esgl_cnt; - bool debug_hook; - /* Xchg attribution */ - u16 hotpooltag; - u16 abort_oxid; - u32 xchg_type; /* LS,TGT CMND ,REQ,or SCSI Cmnd */ - u16 oxid; - u16 rxid; - u32 sid; - u32 did; - u32 oid; /* ID of the exchange initiator */ - u32 disc_portid; /* Send GNN_ID/GFF_ID NPortId */ - u8 seq_id; - u8 byte_orders; /* Byte order */ - struct unf_seq seq; - - u32 cmnd_code; - u32 world_id; - /* Dif control */ - struct unf_dif_control_info dif_control; - struct dif_info dif_info; - /* IO status Abort,timer out */ - u32 io_state; /* TGT_IO_STATE_E */ - u32 tmf_state; /* TMF STATE */ - u32 ucode_abts_state; - u32 abts_state; - - /* IO Enqueuing */ - enum tgt_io_send_stage io_send_stage; /* tgt_io_send_stage */ - /* IO Enqueuing result, success or failure */ - enum tgt_io_send_result io_send_result; /* tgt_io_send_result */ - - u8 io_send_abort; /* is or not send io abort */ - /*result of io abort cmd(succ:true; fail:false)*/ - u8 io_abort_result; - /* for INI,Indicates the length of the data transmitted over the PCI - * link - */ - u32 data_len; - /* ResidLen,greater than 0 UnderFlow or Less than Overflow */ - int resid_len; - /* +++++++++++++++++IO Special++++++++++++++++++++ */ - /* point to tgt cmnd/req/scsi cmnd */ - /* Fcp cmnd */ - struct unf_fcp_cmnd fcp_cmnd; - - struct unf_scsi_cmd_info scsi_cmnd_info; - - struct unf_req_sgl_info req_sgl_info; - - struct unf_req_sgl_info dif_sgl_info; - - u64 cmnd_sn; - void *pinitiator; - - /* timestamp */ - u64 start_jif; - u64 alloc_jif; - - u64 io_front_jif; - - u32 may_consume_res_cnt; - u32 fast_consume_res_cnt; - - /* scsi req info */ - u32 data_direction; - - struct unf_big_sfs *big_sfs_buf; - - /* scsi cmnd sense_buffer pointer */ - union unf_xchg_fcp_sfs fcp_sfs_union; - - /* One exchange may use several External Sgls */ - struct list_head list_esgls; - struct unf_els_echo_info echo_info; - struct semaphore task_sema; - - /* for RRQ ,IO Xchg add to SFS Xchg */ - void *io_xchg; - - /* Xchg delay work */ - struct delayed_work timeout_work; - - void (*xfer_or_rsp_echo)(struct unf_xchg *xchg, u32 status); - - /* wait list XCHG send function */ - int (*scsi_or_tgt_cmnd_func)(struct unf_xchg *xchg); - - /* send result callback */ - void (*ob_callback)(struct unf_xchg *xchg); - - /* Response IO callback */ - void (*callback)(void *lport, void *rport, void *xchg); - - /* Xchg release function */ - void (*free_xchg)(struct unf_xchg *xchg); - - /* +++++++++++++++++low level Special++++++++++++++++++++ */ - /* private data,provide for low level */ - u32 private_data[PKG_MAX_PRIVATE_DATA_SIZE]; - - u64 rport_bind_jifs; - - /* sfs exchg ob callback status */ - u32 ob_callback_sts; - u32 scsi_id; - u32 qos_level; - void *ls_rsp_addr; - void *ls_req; - u32 status; - atomic_t delay_flag; - void *upper_ct; -}; - -struct unf_esgl_page * -unf_get_and_add_one_free_esgl_page(struct unf_lport *lport, - struct unf_xchg *xchg); -void unf_release_xchg_mgr_temp(struct unf_lport *lport); -u32 unf_init_xchg_mgr_temp(struct unf_lport *lport); -u32 unf_alloc_xchg_resource(struct unf_lport *lport); -void unf_free_all_xchg_mgr(struct unf_lport *lport); -void unf_xchg_mgr_destroy(struct unf_lport *lport); -u32 unf_xchg_ref_inc(struct unf_xchg *xchg, enum unf_ioflow_id io_stage); -void unf_xchg_ref_dec(struct unf_xchg *xchg, enum unf_ioflow_id io_stage); -struct unf_xchg_mgr *unf_get_xchg_mgr_by_lport(struct unf_lport *lport, - u32 mgr_idx); -struct unf_xchg_hot_pool *unf_get_hot_pool_by_lport(struct unf_lport *lport, - u32 mgr_idx); -void unf_free_lport_ini_xchg(struct unf_xchg_mgr *xchg_mgr, bool done_ini_flag); -struct unf_xchg *unf_cm_lookup_xchg_by_cmnd_sn(void *lport, u64 command_sn, - u32 world_id, void *pinitiator); -void *unf_cm_lookup_xchg_by_id(void *lport, u16 ox_id, u32 oid); -void unf_cm_xchg_abort_by_lun(struct unf_lport *lport, struct unf_rport *rport, - u64 lun_id, void *tm_xchg, - bool abort_all_lun_flag); -void unf_cm_xchg_abort_by_session(struct unf_lport *lport, - struct unf_rport *rport); - -void unf_cm_xchg_mgr_abort_io_by_id(struct unf_lport *lport, - struct unf_rport *rport, u32 sid, u32 did, - u32 extra_io_stat); -void unf_cm_xchg_mgr_abort_sfs_by_id(struct unf_lport *lport, - struct unf_rport *rport, u32 sid, u32 did); -void unf_cm_free_xchg(void *lport, void *xchg); -void *unf_cm_get_free_xchg(void *lport, u32 xchg_type); -void *unf_cm_lookup_xchg_by_tag(void *lport, u16 hot_pool_tag); -void unf_release_esgls(struct unf_xchg *xchg); -void unf_show_all_xchg(struct unf_lport *lport, struct unf_xchg_mgr *xchg_mgr); -void unf_destroy_dirty_xchg(struct unf_lport *lport, bool show_only); -void unf_wake_up_scsi_task_cmnd(struct unf_lport *lport); -void unf_set_hot_pool_wait_state(struct unf_lport *lport, bool wait_state); -void unf_free_lport_all_xchg(struct unf_lport *lport); -extern u32 unf_get_up_level_cmnd_errcode(struct unf_ini_error_code *err_table, - u32 err_table_count, u32 drv_err_code); -bool unf_busy_io_completed(struct unf_lport *lport); - -#endif diff --git a/drivers/scsi/spfc/common/unf_exchg_abort.c b/drivers/scsi/spfc/common/unf_exchg_abort.c deleted file mode 100644 index 68f751be04aa1e41594b99686b09be4fb8257d5d..0000000000000000000000000000000000000000 --- a/drivers/scsi/spfc/common/unf_exchg_abort.c +++ /dev/null @@ -1,825 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#include "unf_exchg_abort.h" -#include "unf_log.h" -#include "unf_common.h" -#include "unf_rport.h" -#include "unf_service.h" -#include "unf_ls.h" -#include "unf_io.h" - -void unf_cm_xchg_mgr_abort_io_by_id(struct unf_lport *lport, struct unf_rport *rport, u32 sid, - u32 did, u32 extra_io_state) -{ - /* - * for target session: set ABORT - * 1. R_Port remove - * 2. Send PLOGI_ACC callback - * 3. RCVD PLOGI - * 4. RCVD LOGO - */ - FC_CHECK_RETURN_VOID(lport); - - if (lport->xchg_mgr_temp.unf_xchg_mgr_io_xchg_abort) { - /* The SID/DID of the Xchg is in reverse direction in different - * phases. Therefore, the reverse direction needs to be - * considered - */ - lport->xchg_mgr_temp.unf_xchg_mgr_io_xchg_abort(lport, rport, sid, did, - extra_io_state); - lport->xchg_mgr_temp.unf_xchg_mgr_io_xchg_abort(lport, rport, did, sid, - extra_io_state); - } -} - -void unf_cm_xchg_mgr_abort_sfs_by_id(struct unf_lport *lport, - struct unf_rport *rport, u32 sid, u32 did) -{ - FC_CHECK_RETURN_VOID(lport); - - if (lport->xchg_mgr_temp.unf_xchg_mgr_sfs_xchg_abort) { - /* The SID/DID of the Xchg is in reverse direction in different - * phases, therefore, the reverse direction needs to be - * considered - */ - lport->xchg_mgr_temp.unf_xchg_mgr_sfs_xchg_abort(lport, rport, sid, did); - lport->xchg_mgr_temp.unf_xchg_mgr_sfs_xchg_abort(lport, rport, did, sid); - } -} - -void unf_cm_xchg_abort_by_lun(struct unf_lport *lport, struct unf_rport *rport, - u64 lun_id, void *xchg, bool abort_all_lun_flag) -{ - /* - * LUN Reset: set UP_ABORT tag, with: - * INI_Busy_list, IO_Wait_list, - * IO_Delay_list, IO_Delay_transfer_list - */ - void (*unf_xchg_abort_by_lun)(void *, void *, u64, void *, bool) = NULL; - - FC_CHECK_RETURN_VOID(lport); - - unf_xchg_abort_by_lun = lport->xchg_mgr_temp.unf_xchg_abort_by_lun; - if (unf_xchg_abort_by_lun) - unf_xchg_abort_by_lun((void *)lport, (void *)rport, lun_id, - xchg, abort_all_lun_flag); -} - -void unf_cm_xchg_abort_by_session(struct unf_lport *lport, struct unf_rport *rport) -{ - void (*unf_xchg_abort_by_session)(void *, void *) = NULL; - - FC_CHECK_RETURN_VOID(lport); - - unf_xchg_abort_by_session = lport->xchg_mgr_temp.unf_xchg_abort_by_session; - if (unf_xchg_abort_by_session) - unf_xchg_abort_by_session((void *)lport, (void *)rport); -} - -static void unf_xchg_abort_all_sfs_xchg(struct unf_lport *lport, bool clean) -{ - struct unf_xchg_hot_pool *hot_pool = NULL; - struct list_head *xchg_node = NULL; - struct list_head *next_xchg_node = NULL; - struct unf_xchg *xchg = NULL; - ulong pool_lock_falgs = 0; - ulong xchg_lock_flags = 0; - u32 i = 0; - - FC_CHECK_RETURN_VOID(lport); - for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) { - hot_pool = unf_get_hot_pool_by_lport(lport, i); - if (unlikely(!hot_pool)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, - UNF_MAJOR, "Port(0x%x) Hot Pool is NULL.", lport->port_id); - - continue; - } - - if (!clean) { - spin_lock_irqsave(&hot_pool->xchg_hotpool_lock, pool_lock_falgs); - - /* Clearing the SFS_Busy_list Exchange Resource */ - list_for_each_safe(xchg_node, next_xchg_node, &hot_pool->sfs_busylist) { - xchg = list_entry(xchg_node, struct unf_xchg, list_xchg_entry); - spin_lock_irqsave(&xchg->xchg_state_lock, xchg_lock_flags); - if (atomic_read(&xchg->ref_cnt) > 0) - xchg->io_state |= TGT_IO_STATE_ABORT; - - spin_unlock_irqrestore(&xchg->xchg_state_lock, xchg_lock_flags); - } - - spin_unlock_irqrestore(&hot_pool->xchg_hotpool_lock, pool_lock_falgs); - } else { - continue; - } - } -} - -static void unf_xchg_abort_ini_io_xchg(struct unf_lport *lport, bool clean) -{ - /* Clean L_Port/V_Port Link Down I/O: Abort */ - struct unf_xchg_hot_pool *hot_pool = NULL; - struct list_head *xchg_node = NULL; - struct list_head *next_xchg_node = NULL; - struct unf_xchg *xchg = NULL; - ulong pool_lock_falgs = 0; - ulong xchg_lock_flags = 0; - u32 io_state = 0; - u32 i = 0; - - FC_CHECK_RETURN_VOID(lport); - - for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) { - hot_pool = unf_get_hot_pool_by_lport(lport, i); - if (unlikely(!hot_pool)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%x) hot pool is NULL", - lport->port_id); - - continue; - } - - if (!clean) { - spin_lock_irqsave(&hot_pool->xchg_hotpool_lock, pool_lock_falgs); - - /* 1. Abort INI_Busy_List IO */ - list_for_each_safe(xchg_node, next_xchg_node, &hot_pool->ini_busylist) { - xchg = list_entry(xchg_node, struct unf_xchg, list_xchg_entry); - spin_lock_irqsave(&xchg->xchg_state_lock, xchg_lock_flags); - if (atomic_read(&xchg->ref_cnt) > 0) - xchg->io_state |= INI_IO_STATE_DRABORT | io_state; - spin_unlock_irqrestore(&xchg->xchg_state_lock, xchg_lock_flags); - } - - spin_unlock_irqrestore(&hot_pool->xchg_hotpool_lock, pool_lock_falgs); - } else { - /* Do nothing, just return */ - continue; - } - } -} - -void unf_xchg_abort_all_xchg(void *lport, u32 xchg_type, bool clean) -{ - struct unf_lport *unf_lport = NULL; - - FC_CHECK_RETURN_VOID(lport); - unf_lport = (struct unf_lport *)lport; - - switch (xchg_type) { - case UNF_XCHG_TYPE_SFS: - unf_xchg_abort_all_sfs_xchg(unf_lport, clean); - break; - /* Clean L_Port/V_Port Link Down I/O: Abort */ - case UNF_XCHG_TYPE_INI: - unf_xchg_abort_ini_io_xchg(unf_lport, clean); - break; - default: - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%x) unknown exch type(0x%x)", - unf_lport->port_id, xchg_type); - break; - } -} - -static void unf_xchg_abort_ini_send_tm_cmd(void *lport, void *rport, u64 lun_id) -{ - /* - * LUN Reset: set UP_ABORT tag, with: - * INI_Busy_list, IO_Wait_list, - * IO_Delay_list, IO_Delay_transfer_list - */ - struct unf_lport *unf_lport = NULL; - struct unf_rport *unf_rport = NULL; - struct unf_xchg_hot_pool *hot_pool = NULL; - struct list_head *node = NULL; - struct list_head *next_node = NULL; - struct unf_xchg *xchg = NULL; - ulong flags = 0; - ulong xchg_flag = 0; - u32 i = 0; - u64 raw_lun_id = 0; - - FC_CHECK_RETURN_VOID(lport); - FC_CHECK_RETURN_VOID(rport); - - unf_lport = ((struct unf_lport *)lport)->root_lport; - FC_CHECK_RETURN_VOID(unf_lport); - unf_rport = (struct unf_rport *)rport; - - for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) { - hot_pool = unf_get_hot_pool_by_lport(unf_lport, i); - if (unlikely(!hot_pool)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "[err]Port(0x%x) hot pool is NULL", - unf_lport->port_id); - continue; - } - - spin_lock_irqsave(&hot_pool->xchg_hotpool_lock, flags); - - /* 1. for each exchange from busy list */ - list_for_each_safe(node, next_node, &hot_pool->ini_busylist) { - xchg = list_entry(node, struct unf_xchg, list_xchg_entry); - - raw_lun_id = *(u64 *)(xchg->fcp_cmnd.lun) >> UNF_SHIFT_16 & - UNF_RAW_LUN_ID_MASK; - if (lun_id == raw_lun_id && unf_rport == xchg->rport) { - spin_lock_irqsave(&xchg->xchg_state_lock, xchg_flag); - xchg->io_state |= INI_IO_STATE_TMF_ABORT; - spin_unlock_irqrestore(&xchg->xchg_state_lock, xchg_flag); - - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_MAJOR, - "[info]Exchange(%p) state(0x%x) S_ID(0x%x) D_ID(0x%x) tag(0x%x) abort by TMF CMD", - xchg, xchg->io_state, - ((struct unf_lport *)lport)->nport_id, - unf_rport->nport_id, xchg->hotpooltag); - } - } - - spin_unlock_irqrestore(&hot_pool->xchg_hotpool_lock, flags); - } -} - -static void unf_xchg_abort_ini_tmf_target_reset(void *lport, void *rport) -{ - /* - * LUN Reset: set UP_ABORT tag, with: - * INI_Busy_list, IO_Wait_list, - * IO_Delay_list, IO_Delay_transfer_list - */ - struct unf_lport *unf_lport = NULL; - struct unf_rport *unf_rport = NULL; - struct unf_xchg_hot_pool *hot_pool = NULL; - struct list_head *node = NULL; - struct list_head *next_node = NULL; - struct unf_xchg *xchg = NULL; - ulong flags = 0; - ulong xchg_flag = 0; - u32 i = 0; - - FC_CHECK_RETURN_VOID(lport); - FC_CHECK_RETURN_VOID(rport); - - unf_lport = ((struct unf_lport *)lport)->root_lport; - FC_CHECK_RETURN_VOID(unf_lport); - unf_rport = (struct unf_rport *)rport; - - for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) { - hot_pool = unf_get_hot_pool_by_lport(unf_lport, i); - if (unlikely(!hot_pool)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "[err]Port(0x%x) hot pool is NULL", - unf_lport->port_id); - continue; - } - - spin_lock_irqsave(&hot_pool->xchg_hotpool_lock, flags); - - /* 1. for each exchange from busy_list */ - list_for_each_safe(node, next_node, &hot_pool->ini_busylist) { - xchg = list_entry(node, struct unf_xchg, list_xchg_entry); - if (unf_rport == xchg->rport) { - spin_lock_irqsave(&xchg->xchg_state_lock, xchg_flag); - xchg->io_state |= INI_IO_STATE_TMF_ABORT; - spin_unlock_irqrestore(&xchg->xchg_state_lock, xchg_flag); - - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_MAJOR, - "[info]Exchange(%p) state(0x%x) S_ID(0x%x) D_ID(0x%x) tag(0x%x) abort by TMF CMD", - xchg, xchg->io_state, unf_lport->nport_id, - unf_rport->nport_id, xchg->hotpooltag); - } - } - - spin_unlock_irqrestore(&hot_pool->xchg_hotpool_lock, flags); - } -} - -void unf_xchg_abort_by_lun(void *lport, void *rport, u64 lun_id, void *xchg, - bool abort_all_lun_flag) -{ - /* ABORT: set UP_ABORT tag for target LUN I/O */ - struct unf_xchg *tm_xchg = (struct unf_xchg *)xchg; - - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_MAJOR, - "[event]Port(0x%x) LUN_ID(0x%llx) TM_EXCH(0x%p) flag(%d)", - ((struct unf_lport *)lport)->port_id, lun_id, xchg, - abort_all_lun_flag); - - /* for INI Mode */ - if (!tm_xchg) { - /* - * LUN Reset: set UP_ABORT tag, with: - * INI_Busy_list, IO_Wait_list, - * IO_Delay_list, IO_Delay_transfer_list - */ - unf_xchg_abort_ini_send_tm_cmd(lport, rport, lun_id); - - return; - } -} - -void unf_xchg_abort_by_session(void *lport, void *rport) -{ - /* - * LUN Reset: set UP_ABORT tag, with: - * INI_Busy_list, IO_Wait_list, - * IO_Delay_list, IO_Delay_transfer_list - */ - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_MAJOR, - "[event]Port(0x%x) Rport(0x%x) start session reset with TMF", - ((struct unf_lport *)lport)->port_id, ((struct unf_rport *)rport)->nport_id); - - unf_xchg_abort_ini_tmf_target_reset(lport, rport); -} - -void unf_xchg_up_abort_io_by_scsi_id(void *lport, u32 scsi_id) -{ - struct unf_lport *unf_lport = NULL; - struct unf_xchg_hot_pool *hot_pool = NULL; - struct list_head *node = NULL; - struct list_head *next_node = NULL; - struct unf_xchg *xchg = NULL; - ulong flags = 0; - ulong xchg_flag = 0; - u32 i; - u32 io_abort_flag = INI_IO_STATE_UPABORT | INI_IO_STATE_UPSEND_ERR | - INI_IO_STATE_TMF_ABORT; - - FC_CHECK_RETURN_VOID(lport); - - unf_lport = ((struct unf_lport *)lport)->root_lport; - FC_CHECK_RETURN_VOID(unf_lport); - - for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) { - hot_pool = unf_get_hot_pool_by_lport(unf_lport, i); - if (unlikely(!hot_pool)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "[err]Port(0x%x) hot pool is NULL", - unf_lport->port_id); - continue; - } - - spin_lock_irqsave(&hot_pool->xchg_hotpool_lock, flags); - - /* 1. for each exchange from busy_list */ - list_for_each_safe(node, next_node, &hot_pool->ini_busylist) { - xchg = list_entry(node, struct unf_xchg, list_xchg_entry); - spin_lock_irqsave(&xchg->xchg_state_lock, xchg_flag); - if (lport == xchg->lport && scsi_id == xchg->scsi_id && - !(xchg->io_state & io_abort_flag)) { - xchg->io_state |= INI_IO_STATE_UPABORT; - spin_unlock_irqrestore(&xchg->xchg_state_lock, xchg_flag); - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_MAJOR, - "[info]Exchange(%p) scsi_cmd(0x%p) state(0x%x) scsi_id(0x%x) tag(0x%x) upabort by scsi id", - xchg, xchg->scsi_cmnd_info.scsi_cmnd, - xchg->io_state, scsi_id, xchg->hotpooltag); - } else { - spin_unlock_irqrestore(&xchg->xchg_state_lock, xchg_flag); - } - } - spin_unlock_irqrestore(&hot_pool->xchg_hotpool_lock, flags); - } -} - -static void unf_ini_busy_io_xchg_abort(void *xchg_hot_pool, void *rport, - u32 sid, u32 did, u32 extra_io_state) -{ - /* - * for target session: Set (DRV) ABORT - * 1. R_Port remove - * 2. Send PLOGI_ACC callback - * 3. RCVD PLOGI - * 4. RCVD LOGO - */ - struct unf_xchg_hot_pool *hot_pool = NULL; - struct unf_xchg *xchg = NULL; - struct list_head *xchg_node = NULL; - struct list_head *next_xchg_node = NULL; - struct unf_rport *unf_rport = NULL; - ulong xchg_lock_flags = 0; - - unf_rport = (struct unf_rport *)rport; - hot_pool = (struct unf_xchg_hot_pool *)xchg_hot_pool; - - /* ABORT INI IO: INI_BUSY_LIST */ - list_for_each_safe(xchg_node, next_xchg_node, &hot_pool->ini_busylist) { - xchg = list_entry(xchg_node, struct unf_xchg, list_xchg_entry); - - spin_lock_irqsave(&xchg->xchg_state_lock, xchg_lock_flags); - if (did == xchg->did && sid == xchg->sid && - unf_rport == xchg->rport && - (atomic_read(&xchg->ref_cnt) > 0)) { - xchg->scsi_cmnd_info.result = UNF_SCSI_HOST(DID_IMM_RETRY); - xchg->io_state |= INI_IO_STATE_DRABORT; - xchg->io_state |= extra_io_state; - - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_MAJOR, - "[info]Abort INI:0x%p---0x%x----0x%x----0x%x----0x%x----0x%x----0x%x----0x%x----0x%x----%llu.", - xchg, (u32)xchg->hotpooltag, (u32)xchg->xchg_type, - (u32)xchg->oxid, (u32)xchg->rxid, - (u32)xchg->sid, (u32)xchg->did, (u32)xchg->io_state, - atomic_read(&xchg->ref_cnt), xchg->alloc_jif); - } - spin_unlock_irqrestore(&xchg->xchg_state_lock, xchg_lock_flags); - } -} - -void unf_xchg_mgr_io_xchg_abort(void *lport, void *rport, u32 sid, u32 did, u32 extra_io_state) -{ - /* - * for target session: set ABORT - * 1. R_Port remove - * 2. Send PLOGI_ACC callback - * 3. RCVD PLOGI - * 4. RCVD LOGO - */ - struct unf_xchg_hot_pool *hot_pool = NULL; - struct unf_lport *unf_lport = NULL; - ulong pool_lock_falgs = 0; - u32 i = 0; - - FC_CHECK_RETURN_VOID(lport); - unf_lport = ((struct unf_lport *)lport)->root_lport; - FC_CHECK_RETURN_VOID(unf_lport); - - for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) { - hot_pool = unf_get_hot_pool_by_lport(unf_lport, i); - if (unlikely(!hot_pool)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%x) hot pool is NULL", - unf_lport->port_id); - - continue; - } - - spin_lock_irqsave(&hot_pool->xchg_hotpool_lock, pool_lock_falgs); - - /* 1. Clear INI (session) IO: INI Mode */ - unf_ini_busy_io_xchg_abort(hot_pool, rport, sid, did, extra_io_state); - - spin_unlock_irqrestore(&hot_pool->xchg_hotpool_lock, pool_lock_falgs); - } -} - -void unf_xchg_mgr_sfs_xchg_abort(void *lport, void *rport, u32 sid, u32 did) -{ - struct unf_xchg_hot_pool *hot_pool = NULL; - struct list_head *xchg_node = NULL; - struct list_head *next_xchg_node = NULL; - struct unf_xchg *xchg = NULL; - struct unf_lport *unf_lport = NULL; - struct unf_rport *unf_rport = NULL; - ulong pool_lock_falgs = 0; - ulong xchg_lock_flags = 0; - u32 i = 0; - - FC_CHECK_RETURN_VOID(lport); - - unf_lport = ((struct unf_lport *)lport)->root_lport; - FC_CHECK_RETURN_VOID(unf_lport); - - for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) { - hot_pool = unf_get_hot_pool_by_lport(unf_lport, i); - if (!hot_pool) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, - UNF_MAJOR, "Port(0x%x) Hot Pool is NULL.", - unf_lport->port_id); - - continue; - } - - unf_rport = (struct unf_rport *)rport; - - spin_lock_irqsave(&hot_pool->xchg_hotpool_lock, pool_lock_falgs); - - /* Clear the SFS exchange of the corresponding connection */ - list_for_each_safe(xchg_node, next_xchg_node, &hot_pool->sfs_busylist) { - xchg = list_entry(xchg_node, struct unf_xchg, list_xchg_entry); - - spin_lock_irqsave(&xchg->xchg_state_lock, xchg_lock_flags); - if (did == xchg->did && sid == xchg->sid && - unf_rport == xchg->rport && (atomic_read(&xchg->ref_cnt) > 0)) { - xchg->io_state |= TGT_IO_STATE_ABORT; - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_MAJOR, - "Abort SFS:0x%p---0x%x----0x%x----0x%x----0x%x----0x%x----0x%x----0x%x----0x%x----%llu.", - xchg, (u32)xchg->hotpooltag, (u32)xchg->xchg_type, - (u32)xchg->oxid, (u32)xchg->rxid, (u32)xchg->sid, - (u32)xchg->did, (u32)xchg->io_state, - atomic_read(&xchg->ref_cnt), xchg->alloc_jif); - } - spin_unlock_irqrestore(&xchg->xchg_state_lock, xchg_lock_flags); - } - - spin_unlock_irqrestore(&hot_pool->xchg_hotpool_lock, pool_lock_falgs); - } -} - -static void unf_fc_wait_abts_complete(struct unf_lport *lport, struct unf_xchg *xchg) -{ - struct unf_lport *unf_lport = lport; - struct unf_scsi_cmnd scsi_cmnd = {0}; - ulong flag = 0; - u32 time_out_value = 2000; - struct unf_rport_scsi_id_image *scsi_image_table = NULL; - u32 io_result; - - scsi_cmnd.scsi_id = xchg->scsi_cmnd_info.scsi_id; - scsi_cmnd.upper_cmnd = xchg->scsi_cmnd_info.scsi_cmnd; - scsi_cmnd.done = xchg->scsi_cmnd_info.done; - scsi_image_table = &unf_lport->rport_scsi_table; - - if (down_timeout(&xchg->task_sema, (s64)msecs_to_jiffies(time_out_value))) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%x) recv abts marker timeout,Exch(0x%p) OX_ID(0x%x) RX_ID(0x%x)", - unf_lport->port_id, xchg, xchg->oxid, xchg->rxid); - goto ABTS_FIAILED; - } - - spin_lock_irqsave(&xchg->xchg_state_lock, flag); - if (xchg->ucode_abts_state == UNF_IO_SUCCESS || - xchg->scsi_cmnd_info.result == UNF_IO_ABORT_PORT_REMOVING) { - spin_unlock_irqrestore(&xchg->xchg_state_lock, flag); - - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_MAJOR, - "[info]Port(0x%x) Send ABTS succeed and recv marker Exch(0x%p) OX_ID(0x%x) RX_ID(0x%x) marker status(0x%x)", - unf_lport->port_id, xchg, xchg->oxid, xchg->rxid, - xchg->ucode_abts_state); - io_result = DID_BUS_BUSY; - UNF_IO_RESULT_CNT(scsi_image_table, scsi_cmnd.scsi_id, io_result); - unf_complete_cmnd(&scsi_cmnd, io_result << UNF_SHIFT_16); - return; - } - spin_unlock_irqrestore(&xchg->xchg_state_lock, flag); - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[warn]Port(0x%x) send ABTS failed. Exch(0x%p) hot_tag(0x%x) ret(0x%x) xchg->io_state (0x%x)", - unf_lport->port_id, xchg, xchg->hotpooltag, - xchg->scsi_cmnd_info.result, xchg->io_state); - goto ABTS_FIAILED; - -ABTS_FIAILED: - unf_lport->xchg_mgr_temp.unf_xchg_cancel_timer((void *)xchg); - spin_lock_irqsave(&xchg->xchg_state_lock, flag); - xchg->io_state &= ~INI_IO_STATE_UPABORT; - spin_unlock_irqrestore(&xchg->xchg_state_lock, flag); -} - -void unf_fc_abort_time_out_cmnd(struct unf_lport *lport, struct unf_xchg *xchg) -{ - struct unf_lport *unf_lport = lport; - ulong flag = 0; - - FC_CHECK_RETURN_VOID(lport); - FC_CHECK_RETURN_VOID(xchg); - - spin_lock_irqsave(&xchg->xchg_state_lock, flag); - if (xchg->io_state & INI_IO_STATE_UPABORT) { - spin_unlock_irqrestore(&xchg->xchg_state_lock, flag); - - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_MAJOR, - "LPort(0x%x) xchange(0x%p) OX_ID(0x%x), RX_ID(0x%x) Cmdsn(0x%lx) has been aborted.", - unf_lport->port_id, xchg, xchg->oxid, - xchg->rxid, (ulong)xchg->cmnd_sn); - return; - } - xchg->io_state |= INI_IO_STATE_UPABORT; - spin_unlock_irqrestore(&xchg->xchg_state_lock, flag); - - FC_DRV_PRINT(UNF_LOG_NORMAL, UNF_KEVENT, - "LPort(0x%x) exchg(0x%p) OX_ID(0x%x) RX_ID(0x%x) Cmdsn(0x%lx) timeout abort it", - unf_lport->port_id, xchg, xchg->oxid, xchg->rxid, (ulong)xchg->cmnd_sn); - - unf_lport->xchg_mgr_temp.unf_xchg_add_timer((void *)xchg, - (ulong)UNF_WAIT_ABTS_RSP_TIMEOUT, UNF_TIMER_TYPE_INI_ABTS); - - sema_init(&xchg->task_sema, 0); - - if (unf_send_abts(unf_lport, xchg) != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_MAJOR, - "LPort(0x%x) send ABTS, Send ABTS unsuccessful. Exchange OX_ID(0x%x), RX_ID(0x%x).", - unf_lport->port_id, xchg->oxid, xchg->rxid); - unf_lport->xchg_mgr_temp.unf_xchg_cancel_timer((void *)xchg); - spin_lock_irqsave(&xchg->xchg_state_lock, flag); - xchg->io_state &= ~INI_IO_STATE_UPABORT; - spin_unlock_irqrestore(&xchg->xchg_state_lock, flag); - return; - } - unf_fc_wait_abts_complete(unf_lport, xchg); -} - -static void unf_fc_ini_io_rec_wait_time_out(struct unf_lport *lport, struct unf_rport *rport, - struct unf_xchg *xchg) -{ - ulong time_out = 0; - - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%x) RPort(0x%x) Exch(0x%p) Rec timeout exchange OX_ID(0x%x) RX_ID(0x%x) state(0x%x)", - lport->port_id, rport->nport_id, xchg, xchg->oxid, - xchg->rxid, xchg->io_state); - - if (xchg->rport_bind_jifs == rport->rport_alloc_jifs) { - unf_send_rec(lport, rport, xchg); - - if (xchg->scsi_cmnd_info.abort_time_out > 0) { - time_out = (xchg->scsi_cmnd_info.abort_time_out > UNF_REC_TOV) ? - (xchg->scsi_cmnd_info.abort_time_out - UNF_REC_TOV) : 0; - if (time_out > 0) { - lport->xchg_mgr_temp.unf_xchg_add_timer((void *)xchg, time_out, - UNF_TIMER_TYPE_REQ_IO); - } else { - unf_fc_abort_time_out_cmnd(lport, xchg); - } - } - } -} - -static void unf_fc_ini_send_abts_time_out(struct unf_lport *lport, struct unf_rport *rport, - struct unf_xchg *xchg) -{ - if (xchg->rport_bind_jifs == rport->rport_alloc_jifs && - xchg->rport_bind_jifs != INVALID_VALUE64) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%x) RPort(0x%x) Exch(0x%p) first time to send abts timeout, retry again OX_ID(0x%x) RX_ID(0x%x) HotTag(0x%x) state(0x%x)", - lport->port_id, rport->nport_id, xchg, xchg->oxid, - xchg->rxid, xchg->hotpooltag, xchg->io_state); - - lport->xchg_mgr_temp.unf_xchg_add_timer((void *)xchg, - (ulong)UNF_WAIT_ABTS_RSP_TIMEOUT, UNF_TIMER_TYPE_INI_ABTS); - - if (unf_send_abts(lport, xchg) != RETURN_OK) { - lport->xchg_mgr_temp.unf_xchg_cancel_timer((void *)xchg); - - unf_abts_timeout_recovery_default(rport, xchg); - - unf_cm_free_xchg(lport, xchg); - } - } else { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%x) RPort(0x%x) Exch(0x%p) rport is invalid, exchg rport jiff(0x%llx 0x%llx), free exchange OX_ID(0x%x) RX_ID(0x%x) state(0x%x)", - lport->port_id, rport->nport_id, xchg, - xchg->rport_bind_jifs, rport->rport_alloc_jifs, - xchg->oxid, xchg->rxid, xchg->io_state); - - unf_cm_free_xchg(lport, xchg); - } -} - -void unf_fc_ini_io_xchg_time_out(struct work_struct *work) -{ - struct unf_xchg *xchg = NULL; - struct unf_lport *unf_lport = NULL; - struct unf_rport *unf_rport = NULL; - ulong flags = 0; - u32 ret = UNF_RETURN_ERROR; - u32 port_valid_flag = 0; - - xchg = container_of(work, struct unf_xchg, timeout_work.work); - FC_CHECK_RETURN_VOID(xchg); - - ret = unf_xchg_ref_inc(xchg, INI_IO_TIMEOUT); - FC_CHECK_RETURN_VOID(ret == RETURN_OK); - - unf_lport = xchg->lport; - unf_rport = xchg->rport; - - port_valid_flag = (!unf_lport) || (!unf_rport); - if (port_valid_flag) { - unf_xchg_ref_dec(xchg, INI_IO_TIMEOUT); - unf_xchg_ref_dec(xchg, INI_IO_TIMEOUT); - return; - } - - spin_lock_irqsave(&xchg->xchg_state_lock, flags); - /* 1. for Send RRQ failed Timer timeout */ - if (INI_IO_STATE_RRQSEND_ERR & xchg->io_state) { - spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[info]LPort(0x%x) RPort(0x%x) Exch(0x%p) had wait enough time for RRQ send failed OX_ID(0x%x) RX_ID(0x%x) state(0x%x)", - unf_lport->port_id, unf_rport->nport_id, xchg, - xchg->oxid, xchg->rxid, xchg->io_state); - unf_notify_chip_free_xid(xchg); - unf_cm_free_xchg(unf_lport, xchg); - } - /* Second ABTS timeout and enter LOGO process */ - else if ((INI_IO_STATE_ABORT_TIMEOUT & xchg->io_state) && - (!(ABTS_RESPONSE_RECEIVED & xchg->abts_state))) { - spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); - - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%x) RPort(0x%x) Exch(0x%p) had wait enough time for second abts send OX_ID(0x%x) RX_ID(0x%x) state(0x%x)", - unf_lport->port_id, unf_rport->nport_id, xchg, - xchg->oxid, xchg->rxid, xchg->io_state); - unf_abts_timeout_recovery_default(unf_rport, xchg); - unf_cm_free_xchg(unf_lport, xchg); - } - /* First time to send ABTS, timeout and retry to send ABTS again */ - else if ((INI_IO_STATE_UPABORT & xchg->io_state) && - (!(ABTS_RESPONSE_RECEIVED & xchg->abts_state))) { - xchg->io_state |= INI_IO_STATE_ABORT_TIMEOUT; - spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); - unf_fc_ini_send_abts_time_out(unf_lport, unf_rport, xchg); - } - /* 3. IO_DONE */ - else if ((INI_IO_STATE_DONE & xchg->io_state) && - (ABTS_RESPONSE_RECEIVED & xchg->abts_state)) { - /* - * for IO_DONE: - * 1. INI ABTS first timer time out - * 2. INI RCVD ABTS Response - * 3. Normal case for I/O Done - */ - /* Send ABTS & RCVD RSP & no timeout */ - spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); - if (unf_send_rrq(unf_lport, unf_rport, xchg) == RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_MAJOR, - "[info]LPort(0x%x) send RRQ succeed to RPort(0x%x) Exch(0x%p) OX_ID(0x%x) RX_ID(0x%x) state(0x%x)", - unf_lport->port_id, unf_rport->nport_id, xchg, - xchg->oxid, xchg->rxid, xchg->io_state); - } else { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]LPort(0x%x) can't send RRQ to RPort(0x%x) Exch(0x%p) OX_ID(0x%x) RX_ID(0x%x) state(0x%x)", - unf_lport->port_id, unf_rport->nport_id, xchg, - xchg->oxid, xchg->rxid, xchg->io_state); - - spin_lock_irqsave(&xchg->xchg_state_lock, flags); - xchg->io_state |= INI_IO_STATE_RRQSEND_ERR; - spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); - unf_lport->xchg_mgr_temp.unf_xchg_add_timer((void *)xchg, - (ulong)UNF_WRITE_RRQ_SENDERR_INTERVAL, UNF_TIMER_TYPE_INI_IO); - } - } else if (INI_IO_STATE_REC_TIMEOUT_WAIT & xchg->io_state) { - xchg->io_state &= ~INI_IO_STATE_REC_TIMEOUT_WAIT; - spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); - unf_fc_ini_io_rec_wait_time_out(unf_lport, unf_rport, xchg); - } else { - spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); - unf_fc_abort_time_out_cmnd(unf_lport, xchg); - } - - unf_xchg_ref_dec(xchg, INI_IO_TIMEOUT); - unf_xchg_ref_dec(xchg, INI_IO_TIMEOUT); -} - -void unf_sfs_xchg_time_out(struct work_struct *work) -{ - struct unf_xchg *xchg = NULL; - u32 ret = UNF_RETURN_ERROR; - struct unf_lport *unf_lport = NULL; - struct unf_rport *unf_rport = NULL; - ulong flags = 0; - - FC_CHECK_RETURN_VOID(work); - xchg = container_of(work, struct unf_xchg, timeout_work.work); - FC_CHECK_RETURN_VOID(xchg); - - ret = unf_xchg_ref_inc(xchg, SFS_TIMEOUT); - FC_CHECK_RETURN_VOID(ret == RETURN_OK); - - spin_lock_irqsave(&xchg->xchg_state_lock, flags); - unf_lport = xchg->lport; - unf_rport = xchg->rport; - spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); - - unf_xchg_ref_dec(xchg, SFS_TIMEOUT); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]SFS Exch(%p) Cmnd(0x%x) IO Exch(0x%p) Sid_Did(0x%x:0x%x) HotTag(0x%x) State(0x%x) Timeout.", - xchg, xchg->cmnd_code, xchg->io_xchg, xchg->sid, xchg->did, - xchg->hotpooltag, xchg->io_state); - - spin_lock_irqsave(&xchg->xchg_state_lock, flags); - if ((xchg->io_state & TGT_IO_STATE_ABORT) && - xchg->cmnd_code != ELS_RRQ && xchg->cmnd_code != ELS_LOGO) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "SFS Exch(0x%p) Cmnd(0x%x) Hot Pool Tag(0x%x) timeout, but aborted, no need to handle.", - xchg, xchg->cmnd_code, xchg->hotpooltag); - spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); - - unf_xchg_ref_dec(xchg, SFS_TIMEOUT); - unf_xchg_ref_dec(xchg, SFS_TIMEOUT); - - return; - } - spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); - - /* The sfs times out. If the sfs is ELS reply, - * go to UNF_RPortErrorRecovery/unf_lport_error_recovery. - * Otherwise, go to the corresponding obCallback. - */ - if (UNF_XCHG_IS_ELS_REPLY(xchg) && unf_rport) { - if (unf_rport->nport_id >= UNF_FC_FID_DOM_MGR) - unf_lport_error_recovery(unf_lport); - else - unf_rport_error_recovery(unf_rport); - - } else if (xchg->ob_callback) { - xchg->ob_callback(xchg); - } else { - /* Do nothing */ - } - unf_notify_chip_free_xid(xchg); - unf_xchg_ref_dec(xchg, SFS_TIMEOUT); - unf_xchg_ref_dec(xchg, SFS_TIMEOUT); -} diff --git a/drivers/scsi/spfc/common/unf_exchg_abort.h b/drivers/scsi/spfc/common/unf_exchg_abort.h deleted file mode 100644 index 75b5a1bab7337ad0e63bc599bde81f52a345dcb1..0000000000000000000000000000000000000000 --- a/drivers/scsi/spfc/common/unf_exchg_abort.h +++ /dev/null @@ -1,23 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef UNF_EXCHG_ABORT_H -#define UNF_EXCHG_ABORT_H - -#include "unf_type.h" -#include "unf_exchg.h" - -#define UNF_RAW_LUN_ID_MASK 0x000000000000ffff - -void unf_xchg_abort_by_lun(void *lport, void *rport, u64 lun_id, void *tm_xchg, - bool abort_all_lun_flag); -void unf_xchg_abort_by_session(void *lport, void *rport); -void unf_xchg_mgr_io_xchg_abort(void *lport, void *rport, u32 sid, u32 did, - u32 extra_io_state); -void unf_xchg_mgr_sfs_xchg_abort(void *lport, void *rport, u32 sid, u32 did); -void unf_xchg_abort_all_xchg(void *lport, u32 xchg_type, bool clean); -void unf_fc_abort_time_out_cmnd(struct unf_lport *lport, struct unf_xchg *xchg); -void unf_fc_ini_io_xchg_time_out(struct work_struct *work); -void unf_sfs_xchg_time_out(struct work_struct *work); -void unf_xchg_up_abort_io_by_scsi_id(void *lport, u32 scsi_id); -#endif diff --git a/drivers/scsi/spfc/common/unf_fcstruct.h b/drivers/scsi/spfc/common/unf_fcstruct.h deleted file mode 100644 index d6eb8592994b80145461718dc83b8e7e06a0723d..0000000000000000000000000000000000000000 --- a/drivers/scsi/spfc/common/unf_fcstruct.h +++ /dev/null @@ -1,459 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef UNF_FCSTRUCT_H -#define UNF_FCSTRUCT_H - -#include "unf_type.h" -#include "unf_scsi_common.h" - -#define FC_RCTL_BLS 0x80000000 - -/* - * * R_CTL Basic Link Data defines - */ - -#define FC_RCTL_BLS_ACC (FC_RCTL_BLS | 0x04000000) -#define FC_RCTL_BLS_RJT (FC_RCTL_BLS | 0x05000000) - -/* - * * BA_RJT reason code defines - */ -#define FCXLS_BA_RJT_LOGICAL_ERROR 0x00030000 - -/* - * * BA_RJT code explanation - */ - -#define FCXLS_LS_RJT_INVALID_OXID_RXID 0x00001700 - -/* - * * ELS ACC - */ -struct unf_els_acc { - struct unf_fc_head frame_hdr; - u32 cmnd; -}; - -/* - * * ELS RJT - */ -struct unf_els_rjt { - struct unf_fc_head frame_hdr; - u32 cmnd; - u32 reason_code; -}; - -/* - * * FLOGI payload, - * * FC-LS-2 FLOGI, PLOGI, FDISC or LS_ACC Payload - */ -struct unf_flogi_fdisc_payload { - u32 cmnd; - struct unf_fabric_parm fabric_parms; -}; - -/* - * * Flogi and Flogi accept frames. They are the same structure - */ -struct unf_flogi_fdisc_acc { - struct unf_fc_head frame_hdr; - struct unf_flogi_fdisc_payload flogi_payload; -}; - -/* - * * Fdisc and Fdisc accept frames. They are the same structure - */ - -struct unf_fdisc_acc { - struct unf_fc_head frame_hdr; - struct unf_flogi_fdisc_payload fdisc_payload; -}; - -/* - * * PLOGI payload - */ -struct unf_plogi_payload { - u32 cmnd; - struct unf_lgn_parm stparms; -}; - -/* - *Plogi, Plogi accept, Pdisc and Pdisc accept frames. They are all the same - *structure. - */ -struct unf_plogi_pdisc { - struct unf_fc_head frame_hdr; - struct unf_plogi_payload payload; -}; - -/* - * * LOGO logout link service requests invalidation of service parameters and - * * port name. - * * see FC-PH 4.3 Section 21.4.8 - */ -struct unf_logo_payload { - u32 cmnd; - u32 nport_id; - u32 high_port_name; - u32 low_port_name; -}; - -/* - * * payload to hold LOGO command - */ -struct unf_logo { - struct unf_fc_head frame_hdr; - struct unf_logo_payload payload; -}; - -/* - * * payload for ECHO command, refer to FC-LS-2 4.2.4 - */ -struct unf_echo_payload { - u32 cmnd; -#define UNF_FC_ECHO_PAYLOAD_LENGTH 255 /* Length in words */ - u32 data[UNF_FC_ECHO_PAYLOAD_LENGTH]; -}; - -struct unf_echo { - struct unf_fc_head frame_hdr; - struct unf_echo_payload *echo_pld; - dma_addr_t phy_echo_addr; -}; - -#define UNF_PRLI_SIRT_EXTRA_SIZE 12 - -/* - * * payload for PRLI and PRLO - */ -struct unf_prli_payload { - u32 cmnd; -#define UNF_FC_PRLI_PAYLOAD_LENGTH 7 /* Length in words */ - u32 parms[UNF_FC_PRLI_PAYLOAD_LENGTH]; -}; - -/* - * * FCHS structure with payload - */ -struct unf_prli_prlo { - struct unf_fc_head frame_hdr; - struct unf_prli_payload payload; -}; - -struct unf_adisc_payload { - u32 cmnd; - u32 hard_address; - u32 high_port_name; - u32 low_port_name; - u32 high_node_name; - u32 low_node_name; - u32 nport_id; -}; - -/* - * * FCHS structure with payload - */ -struct unf_adisc { - struct unf_fc_head frame_hdr; /* FCHS structure */ - struct unf_adisc_payload - adisc_payl; /* Payload data containing ADISC info - */ -}; - -/* - * * RLS payload - */ -struct unf_rls_payload { - u32 cmnd; - u32 nport_id; /* in litle endian format */ -}; - -/* - * * RLS - */ -struct unf_rls { - struct unf_fc_head frame_hdr; /* FCHS structure */ - struct unf_rls_payload rls; /* payload data containing the RLS info */ -}; - -/* - * * RLS accept payload - */ -struct unf_rls_acc_payload { - u32 cmnd; - u32 link_failure_count; - u32 loss_of_sync_count; - u32 loss_of_signal_count; - u32 primitive_seq_count; - u32 invalid_trans_word_count; - u32 invalid_crc_count; -}; - -/* - * * RLS accept - */ -struct unf_rls_acc { - struct unf_fc_head frame_hdr; /* FCHS structure */ - struct unf_rls_acc_payload - rls; /* payload data containing the RLS ACC info - */ -}; - -/* - * * FCHS structure with payload - */ -struct unf_rrq { - struct unf_fc_head frame_hdr; - u32 cmnd; - u32 sid; - u32 oxid_rxid; -}; - -#define UNF_SCR_PAYLOAD_CNT 2 -struct unf_scr { - struct unf_fc_head frame_hdr; - u32 payload[UNF_SCR_PAYLOAD_CNT]; -}; - -struct unf_ctiu_prem { - u32 rev_inid; - u32 gstype_gssub_options; - u32 cmnd_rsp_size; - u32 frag_reason_exp_vend; -}; - -#define UNF_FC4TYPE_CNT 8 -struct unf_rftid { - struct unf_fc_head frame_hdr; - struct unf_ctiu_prem ctiu_pream; - u32 nport_id; - u32 fc4_types[UNF_FC4TYPE_CNT]; -}; - -struct unf_rffid { - struct unf_fc_head frame_hdr; - struct unf_ctiu_prem ctiu_pream; - u32 nport_id; - u32 fc4_feature; -}; - -struct unf_rffid_rsp { - struct unf_fc_head frame_hdr; - struct unf_ctiu_prem ctiu_pream; -}; - -struct unf_gffid { - struct unf_fc_head frame_hdr; - struct unf_ctiu_prem ctiu_pream; - u32 nport_id; -}; - -struct unf_gffid_rsp { - struct unf_fc_head frame_hdr; - struct unf_ctiu_prem ctiu_pream; - u32 fc4_feature[32]; -}; - -struct unf_gnnid { - struct unf_fc_head frame_hdr; - struct unf_ctiu_prem ctiu_pream; - u32 nport_id; -}; - -struct unf_gnnid_rsp { - struct unf_fc_head frame_hdr; - struct unf_ctiu_prem ctiu_pream; - u32 node_name[2]; -}; - -struct unf_gpnid { - struct unf_fc_head frame_hdr; - struct unf_ctiu_prem ctiu_pream; - u32 nport_id; -}; - -struct unf_gpnid_rsp { - struct unf_fc_head frame_hdr; - struct unf_ctiu_prem ctiu_pream; - u32 port_name[2]; -}; - -struct unf_rft_rsp { - struct unf_fc_head frame_hdr; - struct unf_ctiu_prem ctiu_pream; -}; - -struct unf_ls_rjt_pld { - u32 srr_op; /* 01000000h */ - u8 vandor; - u8 reason_exp; - u8 reason; - u8 reserved; -}; - -struct unf_ls_rjt { - struct unf_fc_head frame_hdr; - struct unf_ls_rjt_pld pld; -}; - -struct unf_rec_pld { - u32 rec_cmnd; - u32 xchg_org_sid; /* bit0-bit23 */ - u16 rx_id; - u16 ox_id; -}; - -struct unf_rec { - struct unf_fc_head frame_hdr; - struct unf_rec_pld rec_pld; -}; - -struct unf_rec_acc_pld { - u32 cmnd; - u16 rx_id; - u16 ox_id; - u32 org_addr_id; /* bit0-bit23 */ - u32 rsp_addr_id; /* bit0-bit23 */ -}; - -struct unf_rec_acc { - struct unf_fc_head frame_hdr; - struct unf_rec_acc_pld payload; -}; - -struct unf_gid { - struct unf_ctiu_prem ctiu_pream; - u32 scope_type; -}; - -struct unf_gid_acc { - struct unf_fc_head frame_hdr; - struct unf_ctiu_prem ctiu_pream; -}; - -#define UNF_LOOPMAP_COUNT 128 -struct unf_loop_init { - struct unf_fc_head frame_hdr; - u32 cmnd; -#define UNF_FC_ALPA_BIT_MAP_SIZE 4 - u32 alpha_bit_map[UNF_FC_ALPA_BIT_MAP_SIZE]; -}; - -struct unf_loop_map { - struct unf_fc_head frame_hdr; - u32 cmnd; - u32 loop_map[32]; -}; - -struct unf_ctiu_rjt { - struct unf_fc_head frame_hdr; - struct unf_ctiu_prem ctiu_pream; -}; - -struct unf_gid_acc_pld { - struct unf_ctiu_prem ctiu_pream; - - u32 gid_port_id[UNF_GID_PORT_CNT]; -}; - -struct unf_gid_rsp { - struct unf_gid_acc_pld *gid_acc_pld; -}; - -struct unf_gid_req_rsp { - struct unf_fc_head frame_hdr; - struct unf_gid gid_req; - struct unf_gid_rsp gid_rsp; -}; - -/* FC-LS-2 Table 31 RSCN Payload */ -struct unf_rscn_port_id_page { - u8 port_id_port; - u8 port_id_area; - u8 port_id_domain; - - u8 addr_format : 2; - u8 event_qualifier : 4; - u8 reserved : 2; -}; - -struct unf_rscn_pld { - u32 cmnd; - struct unf_rscn_port_id_page port_id_page[UNF_RSCN_PAGE_SUM]; -}; - -struct unf_rscn { - struct unf_fc_head frame_hdr; - struct unf_rscn_pld *rscn_pld; -}; - -union unf_sfs_u { - struct { - struct unf_fc_head frame_head; - u8 data[0]; - } sfs_common; - struct unf_els_acc els_acc; - struct unf_els_rjt els_rjt; - struct unf_plogi_pdisc plogi; - struct unf_logo logo; - struct unf_echo echo; - struct unf_echo echo_acc; - struct unf_prli_prlo prli; - struct unf_prli_prlo prlo; - struct unf_rls rls; - struct unf_rls_acc rls_acc; - struct unf_plogi_pdisc pdisc; - struct unf_adisc adisc; - struct unf_rrq rrq; - struct unf_flogi_fdisc_acc flogi; - struct unf_fdisc_acc fdisc; - struct unf_scr scr; - struct unf_rec rec; - struct unf_rec_acc rec_acc; - struct unf_ls_rjt ls_rjt; - struct unf_rscn rscn; - struct unf_gid_req_rsp get_id; - struct unf_rftid rft_id; - struct unf_rft_rsp rft_id_rsp; - struct unf_rffid rff_id; - struct unf_rffid_rsp rff_id_rsp; - struct unf_gffid gff_id; - struct unf_gffid_rsp gff_id_rsp; - struct unf_gnnid gnn_id; - struct unf_gnnid_rsp gnn_id_rsp; - struct unf_gpnid gpn_id; - struct unf_gpnid_rsp gpn_id_rsp; - struct unf_plogi_pdisc plogi_acc; - struct unf_plogi_pdisc pdisc_acc; - struct unf_adisc adisc_acc; - struct unf_prli_prlo prli_acc; - struct unf_prli_prlo prlo_acc; - struct unf_flogi_fdisc_acc flogi_acc; - struct unf_fdisc_acc fdisc_acc; - struct unf_loop_init lpi; - struct unf_loop_map loop_map; - struct unf_ctiu_rjt ctiu_rjt; -}; - -struct unf_sfs_entry { - union unf_sfs_u *fc_sfs_entry_ptr; /* Virtual addr of SFS buffer */ - u64 sfs_buff_phy_addr; /* Physical addr of SFS buffer */ - u32 sfs_buff_len; /* Length of bytes in SFS buffer */ - u32 cur_offset; -}; - -struct unf_fcp_rsp_iu_entry { - u8 *fcp_rsp_iu; - u32 fcp_sense_len; -}; - -struct unf_rjt_info { - u32 els_cmnd_code; - u32 reason_code; - u32 reason_explanation; - u8 class_mode; - u8 ucrsvd[3]; -}; - -#endif diff --git a/drivers/scsi/spfc/common/unf_gs.c b/drivers/scsi/spfc/common/unf_gs.c deleted file mode 100644 index cb5fc1a5d246ddc36078d517b03a807cc7b9947b..0000000000000000000000000000000000000000 --- a/drivers/scsi/spfc/common/unf_gs.c +++ /dev/null @@ -1,2521 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#include "unf_gs.h" -#include "unf_log.h" -#include "unf_exchg.h" -#include "unf_rport.h" -#include "unf_service.h" -#include "unf_portman.h" -#include "unf_ls.h" - -static void unf_gpn_id_callback(void *lport, void *sns_port, void *xchg); -static void unf_gpn_id_ob_callback(struct unf_xchg *xchg); -static void unf_gnn_id_ob_callback(struct unf_xchg *xchg); -static void unf_scr_callback(void *lport, void *rport, void *xchg); -static void unf_scr_ob_callback(struct unf_xchg *xchg); -static void unf_gff_id_ob_callback(struct unf_xchg *xchg); -static void unf_gff_id_callback(void *lport, void *sns_port, void *xchg); -static void unf_gnn_id_callback(void *lport, void *sns_port, void *xchg); -static void unf_gid_ft_ob_callback(struct unf_xchg *xchg); -static void unf_gid_ft_callback(void *lport, void *rport, void *xchg); -static void unf_gid_pt_ob_callback(struct unf_xchg *xchg); -static void unf_gid_pt_callback(void *lport, void *rport, void *xchg); -static void unf_rft_id_ob_callback(struct unf_xchg *xchg); -static void unf_rft_id_callback(void *lport, void *rport, void *xchg); -static void unf_rff_id_callback(void *lport, void *rport, void *xchg); -static void unf_rff_id_ob_callback(struct unf_xchg *xchg); - -#define UNF_GET_DOMAIN_ID(x) (((x) & 0xFF0000) >> 16) -#define UNF_GET_AREA_ID(x) (((x) & 0x00FF00) >> 8) - -#define UNF_GID_LAST_PORT_ID 0x80 -#define UNF_GID_CONTROL(nport_id) ((nport_id) >> 24) -#define UNF_GET_PORT_OPTIONS(fc_4feature) ((fc_4feature) >> 20) - -#define UNF_SERVICE_GET_NPORTID_FORM_GID_PAGE(port_id_page) \ - (((u32)(port_id_page)->port_id_domain << 16) | \ - ((u32)(port_id_page)->port_id_area << 8) | \ - ((u32)(port_id_page)->port_id_port)) - -#define UNF_GNN_GFF_ID_RJT_REASON(rjt_reason) \ - ((UNF_CTIU_RJT_UNABLE_PERFORM == \ - ((rjt_reason) & UNF_CTIU_RJT_MASK)) && \ - ((UNF_CTIU_RJT_EXP_PORTID_NO_REG == \ - ((rjt_reason) & UNF_CTIU_RJT_EXP_MASK)) || \ - (UNF_CTIU_RJT_EXP_PORTNAME_NO_REG == \ - ((rjt_reason) & UNF_CTIU_RJT_EXP_MASK)) || \ - (UNF_CTIU_RJT_EXP_NODENAME_NO_REG == \ - ((rjt_reason) & UNF_CTIU_RJT_EXP_MASK)))) - -u32 unf_send_scr(struct unf_lport *lport, struct unf_rport *rport) -{ - /* after RCVD RFF_ID ACC */ - struct unf_scr *scr = NULL; - union unf_sfs_u *fc_entry = NULL; - struct unf_xchg *xchg = NULL; - u32 ret = UNF_RETURN_ERROR; - struct unf_frame_pkg pkg = {0}; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(rport, UNF_RETURN_ERROR); - - xchg = unf_get_sfs_free_xchg_and_init(lport, rport->nport_id, NULL, &fc_entry); - if (!xchg) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) exchange can't be NULL for SCR", - lport->port_id); - - return ret; - } - - xchg->cmnd_code = ELS_SCR; - - xchg->callback = unf_scr_callback; - xchg->ob_callback = unf_scr_ob_callback; - - unf_fill_package(&pkg, xchg, rport); - pkg.type = UNF_PKG_ELS_REQ; - - scr = &fc_entry->scr; - memset(scr, 0, sizeof(struct unf_scr)); - scr->payload[ARRAY_INDEX_0] = (UNF_GS_CMND_SCR); /* SCR is 0x62 */ - scr->payload[ARRAY_INDEX_1] = (UNF_FABRIC_FULL_REG); /* Full registration */ - ret = unf_ls_gs_cmnd_send(lport, &pkg, xchg); - if (ret != RETURN_OK) - unf_cm_free_xchg((void *)lport, (void *)xchg); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]LOGIN: SCR send %s. Port(0x%x_0x%x)--->RPort(0x%x) with hottag(0x%x)", - (ret != RETURN_OK) ? "failed" : "succeed", lport->port_id, - lport->nport_id, rport->nport_id, xchg->hotpooltag); - - return ret; -} - -static void unf_fill_gff_id_pld(struct unf_gffid *gff_id, u32 nport_id) -{ - FC_CHECK_RETURN_VOID(gff_id); - - gff_id->ctiu_pream.rev_inid = (UNF_REV_NPORTID_INIT); - gff_id->ctiu_pream.gstype_gssub_options = (UNF_FSTYPE_OPT_INIT); - gff_id->ctiu_pream.cmnd_rsp_size = (UNF_FSTYPE_GFF_ID); - gff_id->ctiu_pream.frag_reason_exp_vend = UNF_FRAG_REASON_VENDOR; - gff_id->nport_id = nport_id; -} - -static void unf_ctpass_thru_callback(void *lport, void *rport, void *xchg) -{ - struct unf_lport *unf_lport = NULL; - struct unf_gid_acc_pld *gid_acc_pld = NULL; - struct unf_xchg *unf_xchg = NULL; - union unf_sfs_u *sfs = NULL; - u32 cmnd_rsp_size = 0; - - struct send_com_trans_out *out_send = NULL; - - FC_CHECK_RETURN_VOID(lport); - FC_CHECK_RETURN_VOID(rport); - FC_CHECK_RETURN_VOID(xchg); - - unf_lport = (struct unf_lport *)lport; - unf_xchg = (struct unf_xchg *)xchg; - sfs = unf_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr; - - gid_acc_pld = sfs->get_id.gid_rsp.gid_acc_pld; - if (!gid_acc_pld) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]LOGIN: Port(0x%x) CT PassThru response payload is NULL", - unf_lport->port_id); - - return; - } - - out_send = (struct send_com_trans_out *)unf_xchg->upper_ct; - - cmnd_rsp_size = (gid_acc_pld->ctiu_pream.cmnd_rsp_size); - if (UNF_CT_IU_ACCEPT == (cmnd_rsp_size & UNF_CT_IU_RSP_MASK)) { - out_send->hba_status = 0; /* HBA_STATUS_OK 0 */ - out_send->total_resp_buffer_cnt = unf_xchg->fcp_sfs_union.sfs_entry.cur_offset; - out_send->actual_resp_buffer_cnt = unf_xchg->fcp_sfs_union.sfs_entry.cur_offset; - unf_cpu_to_big_end(out_send->resp_buffer, (u32)out_send->total_resp_buffer_cnt); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]LOGIN: Port(0x%x_0x%x) CT PassThru was receive len is(0x%0x)", - unf_lport->port_id, unf_lport->nport_id, - out_send->total_resp_buffer_cnt); - } else if (UNF_CT_IU_REJECT == (cmnd_rsp_size & UNF_CT_IU_RSP_MASK)) { - out_send->hba_status = 13; /* HBA_STATUS_ERROR_ELS_REJECT 13 */ - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]LOGIN: Port(0x%x_0x%x) CT PassThru was rejected", - unf_lport->port_id, unf_lport->nport_id); - } else { - out_send->hba_status = 1; /* HBA_STATUS_ERROR 1 */ - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]LOGIN: Port(0x%x_0x%x) CT PassThru was UNKNOWN", - unf_lport->port_id, unf_lport->nport_id); - } - - up(&unf_lport->wmi_task_sema); -} - -u32 unf_send_ctpass_thru(struct unf_lport *lport, void *buffer, u32 bufflen) -{ - union unf_sfs_u *fc_entry = NULL; - struct unf_xchg *xchg = NULL; - u32 ret = UNF_RETURN_ERROR; - struct unf_rport *sns_port = NULL; - struct send_com_trans_in *in_send = (struct send_com_trans_in *)buffer; - struct send_com_trans_out *out_send = - (struct send_com_trans_out *)buffer; - struct unf_ctiu_prem *ctiu_pream = NULL; - struct unf_gid *gs_pld = NULL; - struct unf_frame_pkg pkg = {0}; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(buffer, UNF_RETURN_ERROR); - - ctiu_pream = (struct unf_ctiu_prem *)in_send->req_buffer; - unf_cpu_to_big_end(ctiu_pream, sizeof(struct unf_gid)); - - if (ctiu_pream->cmnd_rsp_size >> UNF_SHIFT_16 == NS_GIEL) { - sns_port = unf_get_rport_by_nport_id(lport, UNF_FC_FID_MGMT_SERV); - if (!sns_port) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) can't find SNS port", - lport->port_id); - - return UNF_RETURN_ERROR; - } - } else if (ctiu_pream->cmnd_rsp_size >> UNF_SHIFT_16 == NS_GA_NXT) { - sns_port = unf_get_rport_by_nport_id(lport, UNF_FC_FID_DIR_SERV); - if (!sns_port) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) can't find SNS port", - lport->port_id); - - return UNF_RETURN_ERROR; - } - } else { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[info]%s cmnd(0x%x) is error:", __func__, - ctiu_pream->cmnd_rsp_size >> UNF_SHIFT_16); - - return UNF_RETURN_ERROR; - } - - xchg = unf_get_sfs_free_xchg_and_init(lport, sns_port->nport_id, sns_port, &fc_entry); - if (!xchg) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) exchange can't be NULL for GFF_ID", - lport->port_id); - - return UNF_RETURN_ERROR; - } - - xchg->cmnd_code = ctiu_pream->cmnd_rsp_size >> UNF_SHIFT_16; - xchg->upper_ct = buffer; - xchg->ob_callback = NULL; - xchg->callback = unf_ctpass_thru_callback; - xchg->oxid = xchg->hotpooltag; - unf_fill_package(&pkg, xchg, sns_port); - pkg.type = UNF_PKG_GS_REQ; - xchg->fcp_sfs_union.sfs_entry.sfs_buff_len = bufflen; - gs_pld = &fc_entry->get_id.gid_req; /* GID req payload */ - memset(gs_pld, 0, sizeof(struct unf_gid)); - memcpy(gs_pld, (struct unf_gid *)in_send->req_buffer, sizeof(struct unf_gid)); - fc_entry->get_id.gid_rsp.gid_acc_pld = (struct unf_gid_acc_pld *)out_send->resp_buffer; - - ret = unf_ls_gs_cmnd_send(lport, &pkg, xchg); - - return ret; -} - -u32 unf_send_gff_id(struct unf_lport *lport, struct unf_rport *sns_port, - u32 nport_id) -{ - struct unf_gffid *gff_id = NULL; - union unf_sfs_u *fc_entry = NULL; - struct unf_xchg *xchg = NULL; - u32 ret = UNF_RETURN_ERROR; - - struct unf_frame_pkg pkg; - struct unf_lport *unf_lport = NULL; - - FC_CHECK_RETURN_VALUE(sns_port, UNF_RETURN_ERROR); - - if (unf_is_lport_valid(lport) != RETURN_OK) - /* Lport is invalid, no retry or handle required, return ok */ - return RETURN_OK; - - unf_lport = (struct unf_lport *)lport->root_lport; - - memset(&pkg, 0, sizeof(struct unf_frame_pkg)); - - xchg = unf_get_sfs_free_xchg_and_init(lport, sns_port->nport_id, sns_port, &fc_entry); - if (!xchg) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) exchange can't be NULL for GFF_ID", - lport->port_id); - - return unf_get_and_post_disc_event(lport, sns_port, nport_id, UNF_DISC_GET_FEATURE); - } - - xchg->cmnd_code = NS_GFF_ID; - xchg->disc_portid = nport_id; - - xchg->ob_callback = unf_gff_id_ob_callback; - xchg->callback = unf_gff_id_callback; - - unf_fill_package(&pkg, xchg, sns_port); - pkg.type = UNF_PKG_GS_REQ; - - gff_id = &fc_entry->gff_id; - memset(gff_id, 0, sizeof(struct unf_gffid)); - unf_fill_gff_id_pld(gff_id, nport_id); - - ret = unf_ls_gs_cmnd_send(lport, &pkg, xchg); - if (ret != RETURN_OK) - unf_cm_free_xchg((void *)lport, (void *)xchg); - else - atomic_dec(&unf_lport->disc.disc_thread_info.disc_contrl_size); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]LOGIN: GFF_ID send %s. Port(0x%x)--->RPort(0x%x). Inquire RPort(0x%x)", - (ret != RETURN_OK) ? "failed" : "succeed", lport->port_id, - sns_port->nport_id, nport_id); - - return ret; -} - -static void unf_fill_gnnid_pld(struct unf_gnnid *gnnid_pld, u32 nport_id) -{ - /* Inquiry R_Port node name from SW */ - FC_CHECK_RETURN_VOID(gnnid_pld); - - gnnid_pld->ctiu_pream.rev_inid = (UNF_REV_NPORTID_INIT); - gnnid_pld->ctiu_pream.gstype_gssub_options = (UNF_FSTYPE_OPT_INIT); - gnnid_pld->ctiu_pream.cmnd_rsp_size = (UNF_FSTYPE_GNN_ID); - gnnid_pld->ctiu_pream.frag_reason_exp_vend = UNF_FRAG_REASON_VENDOR; - - gnnid_pld->nport_id = nport_id; -} - -u32 unf_send_gnn_id(struct unf_lport *lport, struct unf_rport *sns_port, - u32 nport_id) -{ - /* from DISC stop/re-login */ - struct unf_gnnid *unf_gnnid = NULL; - union unf_sfs_u *fc_entry = NULL; - struct unf_xchg *xchg = NULL; - u32 ret = UNF_RETURN_ERROR; - struct unf_frame_pkg pkg; - struct unf_lport *unf_lport = NULL; - - FC_CHECK_RETURN_VALUE(sns_port, UNF_RETURN_ERROR); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "Port(0x%x_0x%x) send gnnid to 0x%x.", lport->port_id, - lport->nport_id, nport_id); - - if (unf_is_lport_valid(lport) != RETURN_OK) - /* Lport is invalid, no retry or handle required, return ok */ - return RETURN_OK; - - unf_lport = (struct unf_lport *)lport->root_lport; - - memset(&pkg, 0, sizeof(struct unf_frame_pkg)); - - xchg = unf_get_sfs_free_xchg_and_init(lport, sns_port->nport_id, - sns_port, &fc_entry); - if (!xchg) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) exchange can't be NULL for GNN_ID", - lport->port_id); - - return unf_get_and_post_disc_event(lport, sns_port, nport_id, - UNF_DISC_GET_NODE_NAME); - } - - xchg->cmnd_code = NS_GNN_ID; - xchg->disc_portid = nport_id; - - xchg->ob_callback = unf_gnn_id_ob_callback; - xchg->callback = unf_gnn_id_callback; - - unf_fill_package(&pkg, xchg, sns_port); - pkg.type = UNF_PKG_GS_REQ; - - unf_gnnid = &fc_entry->gnn_id; /* GNNID payload */ - memset(unf_gnnid, 0, sizeof(struct unf_gnnid)); - unf_fill_gnnid_pld(unf_gnnid, nport_id); - - ret = unf_ls_gs_cmnd_send(lport, &pkg, xchg); - if (ret != RETURN_OK) - unf_cm_free_xchg((void *)lport, (void *)xchg); - else - atomic_dec(&unf_lport->disc.disc_thread_info.disc_contrl_size); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]LOGIN: GNN_ID send %s. Port(0x%x_0x%x)--->RPort(0x%x) inquire Nportid(0x%x)", - (ret != RETURN_OK) ? "failed" : "succeed", lport->port_id, - lport->nport_id, sns_port->nport_id, nport_id); - - return ret; -} - -static void unf_fill_gpnid_pld(struct unf_gpnid *gpnid_pld, u32 nport_id) -{ - FC_CHECK_RETURN_VOID(gpnid_pld); - - gpnid_pld->ctiu_pream.rev_inid = (UNF_REV_NPORTID_INIT); - gpnid_pld->ctiu_pream.gstype_gssub_options = (UNF_FSTYPE_OPT_INIT); - gpnid_pld->ctiu_pream.cmnd_rsp_size = (UNF_FSTYPE_GPN_ID); - gpnid_pld->ctiu_pream.frag_reason_exp_vend = UNF_FRAG_REASON_VENDOR; - - /* Inquiry WWN from SW */ - gpnid_pld->nport_id = nport_id; -} - -u32 unf_send_gpn_id(struct unf_lport *lport, struct unf_rport *sns_port, - u32 nport_id) -{ - struct unf_gpnid *gpnid_pld = NULL; - union unf_sfs_u *fc_entry = NULL; - struct unf_xchg *xchg = NULL; - u32 ret = UNF_RETURN_ERROR; - struct unf_frame_pkg pkg; - struct unf_lport *unf_lport = NULL; - - FC_CHECK_RETURN_VALUE(sns_port, UNF_RETURN_ERROR); - - if (unf_is_lport_valid(lport) != RETURN_OK) - /* Lport is invalid, no retry or handle required, return ok */ - return RETURN_OK; - - unf_lport = (struct unf_lport *)lport->root_lport; - - memset(&pkg, 0, sizeof(struct unf_frame_pkg)); - - xchg = unf_get_sfs_free_xchg_and_init(lport, sns_port->nport_id, - sns_port, &fc_entry); - if (!xchg) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) exchange can't be NULL for GPN_ID", - lport->port_id); - - return unf_get_and_post_disc_event(lport, sns_port, nport_id, - UNF_DISC_GET_PORT_NAME); - } - - xchg->cmnd_code = NS_GPN_ID; - xchg->disc_portid = nport_id; - - xchg->callback = unf_gpn_id_callback; - xchg->ob_callback = unf_gpn_id_ob_callback; - - unf_fill_package(&pkg, xchg, sns_port); - pkg.type = UNF_PKG_GS_REQ; - - gpnid_pld = &fc_entry->gpn_id; - memset(gpnid_pld, 0, sizeof(struct unf_gpnid)); - unf_fill_gpnid_pld(gpnid_pld, nport_id); - - ret = unf_ls_gs_cmnd_send(lport, &pkg, xchg); - if (ret != RETURN_OK) - unf_cm_free_xchg((void *)lport, (void *)xchg); - else - atomic_dec(&unf_lport->disc.disc_thread_info.disc_contrl_size); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]LOGIN: GPN_ID send %s. Port(0x%x)--->RPort(0x%x). Inquire RPort(0x%x)", - (ret != RETURN_OK) ? "failed" : "succeed", lport->port_id, - sns_port->nport_id, nport_id); - - return ret; -} - -static void unf_fill_gid_ft_pld(struct unf_gid *gid_pld) -{ - FC_CHECK_RETURN_VOID(gid_pld); - - gid_pld->ctiu_pream.rev_inid = (UNF_REV_NPORTID_INIT); - gid_pld->ctiu_pream.gstype_gssub_options = (UNF_FSTYPE_OPT_INIT); - gid_pld->ctiu_pream.cmnd_rsp_size = (UNF_FSTYPE_GID_FT); - gid_pld->ctiu_pream.frag_reason_exp_vend = UNF_FRAG_REASON_VENDOR; - - gid_pld->scope_type = (UNF_GID_FT_TYPE); -} - -u32 unf_send_gid_ft(struct unf_lport *lport, struct unf_rport *rport) -{ - struct unf_gid *gid_pld = NULL; - struct unf_gid_rsp *gid_rsp = NULL; - struct unf_gid_acc_pld *gid_acc_pld = NULL; - union unf_sfs_u *fc_entry = NULL; - struct unf_xchg *xchg = NULL; - u32 ret = UNF_RETURN_ERROR; - struct unf_frame_pkg pkg = {0}; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(rport, UNF_RETURN_ERROR); - - xchg = unf_get_sfs_free_xchg_and_init(lport, rport->nport_id, - rport, &fc_entry); - if (!xchg) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) exchange can't be NULL for GID_FT", - lport->port_id); - - return ret; - } - - xchg->cmnd_code = NS_GID_FT; - - xchg->ob_callback = unf_gid_ft_ob_callback; - xchg->callback = unf_gid_ft_callback; - - unf_fill_package(&pkg, xchg, rport); - pkg.type = UNF_PKG_GS_REQ; - - gid_pld = &fc_entry->get_id.gid_req; /* GID req payload */ - unf_fill_gid_ft_pld(gid_pld); - gid_rsp = &fc_entry->get_id.gid_rsp; /* GID rsp payload */ - - gid_acc_pld = (struct unf_gid_acc_pld *)unf_get_one_big_sfs_buf(xchg); - if (!gid_acc_pld) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) allocate GID_FT response buffer failed", - lport->port_id); - - unf_cm_free_xchg(lport, xchg); - return UNF_RETURN_ERROR; - } - memset(gid_acc_pld, 0, sizeof(struct unf_gid_acc_pld)); - gid_rsp->gid_acc_pld = gid_acc_pld; - - ret = unf_ls_gs_cmnd_send(lport, &pkg, xchg); - if (ret != RETURN_OK) - unf_cm_free_xchg((void *)lport, (void *)xchg); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]LOGIN: GID_FT send %s. Port(0x%x)--->RPort(0x%x)", - (ret != RETURN_OK) ? "failed" : "succeed", lport->port_id, - rport->nport_id); - - return ret; -} - -static void unf_fill_gid_pt_pld(struct unf_gid *gid_pld, - struct unf_lport *lport) -{ - FC_CHECK_RETURN_VOID(gid_pld); - FC_CHECK_RETURN_VOID(lport); - - gid_pld->ctiu_pream.rev_inid = (UNF_REV_NPORTID_INIT); - gid_pld->ctiu_pream.gstype_gssub_options = (UNF_FSTYPE_OPT_INIT); - gid_pld->ctiu_pream.cmnd_rsp_size = (UNF_FSTYPE_GID_PT); - gid_pld->ctiu_pream.frag_reason_exp_vend = UNF_FRAG_REASON_VENDOR; - - /* 0x7F000000 means NX_Port */ - gid_pld->scope_type = (UNF_GID_PT_TYPE); - UNF_PRINT_SFS_LIMIT(UNF_INFO, lport->port_id, gid_pld, - sizeof(struct unf_gid)); -} - -u32 unf_send_gid_pt(struct unf_lport *lport, struct unf_rport *rport) -{ - /* from DISC start */ - struct unf_gid *gid_pld = NULL; - struct unf_gid_rsp *gid_rsp = NULL; - struct unf_gid_acc_pld *gid_acc_pld = NULL; - union unf_sfs_u *fc_entry = NULL; - struct unf_xchg *xchg = NULL; - u32 ret = UNF_RETURN_ERROR; - struct unf_frame_pkg pkg = {0}; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(rport, UNF_RETURN_ERROR); - - xchg = unf_get_sfs_free_xchg_and_init(lport, rport->nport_id, - rport, &fc_entry); - if (!xchg) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) exchange can't be NULL for GID_PT", - lport->port_id); - - return ret; - } - - xchg->cmnd_code = NS_GID_PT; - - xchg->ob_callback = unf_gid_pt_ob_callback; - xchg->callback = unf_gid_pt_callback; - - unf_fill_package(&pkg, xchg, rport); - pkg.type = UNF_PKG_GS_REQ; - - gid_pld = &fc_entry->get_id.gid_req; /* GID req payload */ - unf_fill_gid_pt_pld(gid_pld, lport); - gid_rsp = &fc_entry->get_id.gid_rsp; /* GID rsp payload */ - - gid_acc_pld = (struct unf_gid_acc_pld *)unf_get_one_big_sfs_buf(xchg); - if (!gid_acc_pld) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%0x) Allocate GID_PT response buffer failed", - lport->port_id); - - unf_cm_free_xchg(lport, xchg); - return UNF_RETURN_ERROR; - } - memset(gid_acc_pld, 0, sizeof(struct unf_gid_acc_pld)); - gid_rsp->gid_acc_pld = gid_acc_pld; - - ret = unf_ls_gs_cmnd_send(lport, &pkg, xchg); - if (ret != RETURN_OK) - unf_cm_free_xchg((void *)lport, (void *)xchg); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]LOGIN: GID_PT send %s. Port(0x%x_0x%x)--->RPort(0x%x)", - (ret != RETURN_OK) ? "failed" : "succeed", lport->port_id, - lport->nport_id, rport->nport_id); - - return ret; -} - -static void unf_fill_rft_id_pld(struct unf_rftid *rftid_pld, - struct unf_lport *lport) -{ - u32 index = 1; - - FC_CHECK_RETURN_VOID(rftid_pld); - FC_CHECK_RETURN_VOID(lport); - - rftid_pld->ctiu_pream.rev_inid = (UNF_REV_NPORTID_INIT); - rftid_pld->ctiu_pream.gstype_gssub_options = (UNF_FSTYPE_OPT_INIT); - rftid_pld->ctiu_pream.cmnd_rsp_size = (UNF_FSTYPE_RFT_ID); - rftid_pld->ctiu_pream.frag_reason_exp_vend = UNF_FRAG_REASON_VENDOR; - rftid_pld->nport_id = (lport->nport_id); - rftid_pld->fc4_types[ARRAY_INDEX_0] = (UNF_FC4_SCSI_BIT8); - - for (index = ARRAY_INDEX_2; index < UNF_FC4TYPE_CNT; index++) - rftid_pld->fc4_types[index] = 0; -} - -u32 unf_send_rft_id(struct unf_lport *lport, struct unf_rport *rport) -{ - /* After PLOGI process */ - struct unf_rftid *rft_id = NULL; - union unf_sfs_u *fc_entry = NULL; - struct unf_xchg *xchg = NULL; - u32 ret = UNF_RETURN_ERROR; - struct unf_frame_pkg pkg; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(rport, UNF_RETURN_ERROR); - - memset(&pkg, 0, sizeof(struct unf_frame_pkg)); - - xchg = unf_get_sfs_free_xchg_and_init(lport, rport->nport_id, - rport, &fc_entry); - if (!xchg) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) exchange can't be NULL for RFT_ID", - lport->port_id); - - return ret; - } - - xchg->cmnd_code = NS_RFT_ID; - - xchg->callback = unf_rft_id_callback; - xchg->ob_callback = unf_rft_id_ob_callback; - - unf_fill_package(&pkg, xchg, rport); - pkg.type = UNF_PKG_GS_REQ; - - rft_id = &fc_entry->rft_id; - memset(rft_id, 0, sizeof(struct unf_rftid)); - unf_fill_rft_id_pld(rft_id, lport); - ret = unf_ls_gs_cmnd_send(lport, &pkg, xchg); - if (ret != RETURN_OK) - unf_cm_free_xchg((void *)lport, (void *)xchg); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]LOGIN: RFT_ID send %s. Port(0x%x_0x%x)--->RPort(0x%x). rport(0x%p) wwpn(0x%llx) ", - (ret != RETURN_OK) ? "failed" : "succeed", lport->port_id, - lport->nport_id, rport->nport_id, rport, rport->port_name); - - return ret; -} - -static void unf_fill_rff_id_pld(struct unf_rffid *rffid_pld, - struct unf_lport *lport, u32 fc4_type) -{ - FC_CHECK_RETURN_VOID(rffid_pld); - FC_CHECK_RETURN_VOID(lport); - - rffid_pld->ctiu_pream.rev_inid = (UNF_REV_NPORTID_INIT); - rffid_pld->ctiu_pream.gstype_gssub_options = (UNF_FSTYPE_OPT_INIT); - rffid_pld->ctiu_pream.cmnd_rsp_size = (UNF_FSTYPE_RFF_ID); - rffid_pld->ctiu_pream.frag_reason_exp_vend = UNF_FRAG_REASON_VENDOR; - rffid_pld->nport_id = (lport->nport_id); - rffid_pld->fc4_feature = (fc4_type | (lport->options << UNF_SHIFT_4)); -} - -u32 unf_send_rff_id(struct unf_lport *lport, struct unf_rport *rport, - u32 fc4_type) -{ - /* from RFT_ID, then Send SCR */ - struct unf_rffid *rff_id = NULL; - union unf_sfs_u *fc_entry = NULL; - struct unf_xchg *xchg = NULL; - u32 ret = UNF_RETURN_ERROR; - struct unf_frame_pkg pkg = {0}; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(rport, UNF_RETURN_ERROR); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, - UNF_INFO, "%s Enter", __func__); - - xchg = unf_get_sfs_free_xchg_and_init(lport, rport->nport_id, - rport, &fc_entry); - if (!xchg) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) exchange can't be NULL for RFF_ID", - lport->port_id); - - return ret; - } - - xchg->cmnd_code = NS_RFF_ID; - - xchg->callback = unf_rff_id_callback; - xchg->ob_callback = unf_rff_id_ob_callback; - - unf_fill_package(&pkg, xchg, rport); - pkg.type = UNF_PKG_GS_REQ; - - rff_id = &fc_entry->rff_id; - memset(rff_id, 0, sizeof(struct unf_rffid)); - unf_fill_rff_id_pld(rff_id, lport, fc4_type); - - ret = unf_ls_gs_cmnd_send(lport, &pkg, xchg); - if (ret != RETURN_OK) - unf_cm_free_xchg((void *)lport, (void *)xchg); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]LOGIN: RFF_ID feature 0x%x(10:TGT,20:INI,30:COM) send %s. Port(0x%x_0x%x)--->RPortid(0x%x) rport(0x%p)", - lport->options, (ret != RETURN_OK) ? "failed" : "succeed", - lport->port_id, lport->nport_id, rport->nport_id, rport); - - return ret; -} - -void unf_handle_init_gid_acc(struct unf_gid_acc_pld *gid_acc_pld, - struct unf_lport *lport) -{ - /* - * from SCR ACC callback - * NOTE: inquiry disc R_Port used for NPIV - */ - struct unf_disc_rport *disc_rport = NULL; - struct unf_disc *disc = NULL; - u32 ret = UNF_RETURN_ERROR; - u32 gid_port_id = 0; - u32 nport_id = 0; - u32 index = 0; - u8 control = 0; - - FC_CHECK_RETURN_VOID(gid_acc_pld); - FC_CHECK_RETURN_VOID(lport); - - /* - * 1. Find & Check & Get (new) R_Port from list_disc_rports_pool - * then, Add to R_Port Disc_busy_list - */ - while (index < UNF_GID_PORT_CNT) { - gid_port_id = (gid_acc_pld->gid_port_id[index]); - nport_id = UNF_NPORTID_MASK & gid_port_id; - control = UNF_GID_CONTROL(gid_port_id); - - /* for each N_Port_ID from GID_ACC payload */ - if (lport->nport_id != nport_id && nport_id != 0 && - (!unf_lookup_lport_by_nportid(lport, nport_id))) { - /* for New Port, not L_Port */ - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x_0x%x) get nportid(0x%x) from GID_ACC", - lport->port_id, lport->nport_id, nport_id); - - /* Get R_Port from list of RPort Disc Pool */ - disc_rport = unf_rport_get_free_and_init(lport, - UNF_PORT_TYPE_DISC, nport_id); - if (!disc_rport) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x_0x%x) can't allocate new rport(0x%x) from disc pool", - lport->port_id, lport->nport_id, - nport_id); - - index++; - continue; - } - } - - if (UNF_GID_LAST_PORT_ID == (UNF_GID_LAST_PORT_ID & control)) - break; - - index++; - } - - /* - * 2. Do port disc stop operation: - * NOTE: Do DISC & release R_Port from busy_list back to - * list_disc_rports_pool - */ - disc = &lport->disc; - if (!disc->disc_temp.unf_disc_stop) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x_0x%x) disc stop function is NULL", - lport->port_id, lport->nport_id); - - return; - } - - ret = disc->disc_temp.unf_disc_stop(lport); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x_0x%x) do disc stop failed", - lport->port_id, lport->nport_id); - } -} - -u32 unf_rport_relogin(struct unf_lport *lport, u32 nport_id) -{ - /* Send GNN_ID */ - struct unf_rport *sns_port = NULL; - u32 ret = RETURN_OK; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - - /* Get SNS R_Port */ - sns_port = unf_get_rport_by_nport_id(lport, UNF_FC_FID_DIR_SERV); - if (!sns_port) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) can't find fabric Port", lport->nport_id); - - return UNF_RETURN_ERROR; - } - - /* Send GNN_ID now to SW */ - ret = unf_get_and_post_disc_event(lport, sns_port, nport_id, - UNF_DISC_GET_NODE_NAME); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) add discovery event(0x%x) failed Rport(0x%x)", - lport->nport_id, UNF_DISC_GET_NODE_NAME, nport_id); - - /* NOTE: Continue to next stage */ - unf_rcv_gnn_id_rsp_unknown(lport, sns_port, nport_id); - } - - return ret; -} - -u32 unf_rport_check_wwn(struct unf_lport *lport, struct unf_rport *rport) -{ - /* Send GPN_ID */ - struct unf_rport *sns_port = NULL; - u32 ret = RETURN_OK; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(rport, UNF_RETURN_ERROR); - - /* Get SNS R_Port */ - sns_port = unf_get_rport_by_nport_id(lport, UNF_FC_FID_DIR_SERV); - if (!sns_port) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) can't find fabric Port", lport->nport_id); - - return UNF_RETURN_ERROR; - } - - /* Send GPN_ID to SW */ - ret = unf_get_and_post_disc_event(lport, sns_port, rport->nport_id, - UNF_DISC_GET_PORT_NAME); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) add discovery event(0x%x) failed Rport(0x%x)", - lport->nport_id, UNF_DISC_GET_PORT_NAME, - rport->nport_id); - - unf_rcv_gpn_id_rsp_unknown(lport, rport->nport_id); - } - - return ret; -} - -u32 unf_handle_rscn_port_not_indisc(struct unf_lport *lport, u32 rscn_nport_id) -{ - /* RSCN Port_ID not in GID_ACC payload table: Link Down */ - struct unf_rport *unf_rport = NULL; - u32 ret = RETURN_OK; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - - /* from R_Port busy list by N_Port_ID */ - unf_rport = unf_get_rport_by_nport_id(lport, rscn_nport_id); - if (unf_rport) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_KEVENT, - "[info]Port(0x%x) RPort(0x%x) wwpn(0x%llx) has been removed and link down it", - lport->port_id, rscn_nport_id, unf_rport->port_name); - - unf_rport_linkdown(lport, unf_rport); - } else { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "[info]Port(0x%x) has no RPort(0x%x) and do nothing", - lport->nport_id, rscn_nport_id); - } - - return ret; -} - -u32 unf_handle_rscn_port_indisc(struct unf_lport *lport, u32 rscn_nport_id) -{ - /* Send GPN_ID or re-login(GNN_ID) */ - struct unf_rport *unf_rport = NULL; - u32 ret = RETURN_OK; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - - /* from R_Port busy list by N_Port_ID */ - unf_rport = unf_get_rport_by_nport_id(lport, rscn_nport_id); - if (unf_rport) { - /* R_Port exist: send GPN_ID */ - ret = unf_rport_check_wwn(lport, unf_rport); - } else { - if (UNF_PORT_MODE_INI == (lport->options & UNF_PORT_MODE_INI)) - /* Re-LOGIN with INI mode: Send GNN_ID */ - ret = unf_rport_relogin(lport, rscn_nport_id); - else - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x) with no INI feature. Do nothing", - lport->nport_id); - } - - return ret; -} - -static u32 unf_handle_rscn_port_addr(struct unf_port_id_page *portid_page, - struct unf_gid_acc_pld *gid_acc_pld, - struct unf_lport *lport) -{ - /* - * Input parameters: - * 1. Port_ID_page: saved from RSCN payload - * 2. GID_ACC_payload: back from GID_ACC (GID_PT or GID_FT) - * * - * Do work: check whether RSCN Port_ID within GID_ACC payload or not - * then, re-login or link down rport - */ - u32 rscn_nport_id = 0; - u32 gid_port_id = 0; - u32 nport_id = 0; - u32 index = 0; - u8 control = 0; - u32 ret = RETURN_OK; - bool have_same_id = false; - - FC_CHECK_RETURN_VALUE(portid_page, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(gid_acc_pld, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - - /* 1. get RSCN_NPort_ID from (L_Port->Disc->RSCN_Mgr)->RSCN_Port_ID_Page - */ - rscn_nport_id = UNF_SERVICE_GET_NPORTID_FORM_GID_PAGE(portid_page); - - /* - * 2. for RSCN_NPort_ID - * check whether RSCN_NPort_ID within GID_ACC_Payload or not - */ - while (index < UNF_GID_PORT_CNT) { - gid_port_id = (gid_acc_pld->gid_port_id[index]); - nport_id = UNF_NPORTID_MASK & gid_port_id; - control = UNF_GID_CONTROL(gid_port_id); - - if (lport->nport_id != nport_id && nport_id != 0) { - /* is not L_Port */ - if (nport_id == rscn_nport_id) { - /* RSCN Port_ID within GID_ACC payload */ - have_same_id = true; - break; - } - } - - if (UNF_GID_LAST_PORT_ID == (UNF_GID_LAST_PORT_ID & control)) - break; - - index++; - } - - /* 3. RSCN_Port_ID not within GID_ACC payload table */ - if (!have_same_id) { - /* rport has been removed */ - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "[warn]Port(0x%x_0x%x) find RSCN N_Port_ID(0x%x) in GID_ACC table failed", - lport->port_id, lport->nport_id, rscn_nport_id); - - /* Link down rport */ - ret = unf_handle_rscn_port_not_indisc(lport, rscn_nport_id); - - } else { /* 4. RSCN_Port_ID within GID_ACC payload table */ - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "[info]Port(0x%x_0x%x) find RSCN N_Port_ID(0x%x) in GID_ACC table succeed", - lport->port_id, lport->nport_id, rscn_nport_id); - - /* Re-login with INI mode */ - ret = unf_handle_rscn_port_indisc(lport, rscn_nport_id); - } - - return ret; -} - -void unf_check_rport_rscn_process(struct unf_rport *rport, - struct unf_port_id_page *portid_page) -{ - struct unf_rport *unf_rport = rport; - struct unf_port_id_page *unf_portid_page = portid_page; - u8 addr_format = unf_portid_page->addr_format; - - switch (addr_format) { - /* domain+area */ - case UNF_RSCN_AREA_ADDR_GROUP: - if (UNF_GET_DOMAIN_ID(unf_rport->nport_id) == unf_portid_page->port_id_domain && - UNF_GET_AREA_ID(unf_rport->nport_id) == unf_portid_page->port_id_area) - unf_rport->rscn_position = UNF_RPORT_NEED_PROCESS; - - break; - /* domain */ - case UNF_RSCN_DOMAIN_ADDR_GROUP: - if (UNF_GET_DOMAIN_ID(unf_rport->nport_id) == unf_portid_page->port_id_domain) - unf_rport->rscn_position = UNF_RPORT_NEED_PROCESS; - - break; - /* all */ - case UNF_RSCN_FABRIC_ADDR_GROUP: - unf_rport->rscn_position = UNF_RPORT_NEED_PROCESS; - break; - default: - break; - } -} - -static void unf_set_rport_rscn_position(struct unf_lport *lport, - struct unf_port_id_page *portid_page) -{ - struct unf_rport *unf_rport = NULL; - struct list_head *list_node = NULL; - struct list_head *list_nextnode = NULL; - struct unf_disc *disc = NULL; - ulong disc_flag = 0; - ulong rport_flag = 0; - - FC_CHECK_RETURN_VOID(lport); - disc = &lport->disc; - - spin_lock_irqsave(&disc->rport_busy_pool_lock, disc_flag); - list_for_each_safe(list_node, list_nextnode, &disc->list_busy_rports) { - unf_rport = list_entry(list_node, struct unf_rport, entry_rport); - spin_lock_irqsave(&unf_rport->rport_state_lock, rport_flag); - - if (unf_rport->nport_id < UNF_FC_FID_DOM_MGR) { - if (unf_rport->rscn_position == UNF_RPORT_NOT_NEED_PROCESS) - unf_check_rport_rscn_process(unf_rport, portid_page); - } else { - unf_rport->rscn_position = UNF_RPORT_NOT_NEED_PROCESS; - } - - spin_unlock_irqrestore(&unf_rport->rport_state_lock, rport_flag); - } - spin_unlock_irqrestore(&disc->rport_busy_pool_lock, disc_flag); -} - -static void unf_set_rport_rscn_position_local(struct unf_lport *lport) -{ - struct unf_rport *unf_rport = NULL; - struct list_head *list_node = NULL; - struct list_head *list_nextnode = NULL; - struct unf_disc *disc = NULL; - ulong disc_flag = 0; - ulong rport_flag = 0; - - FC_CHECK_RETURN_VOID(lport); - disc = &lport->disc; - - spin_lock_irqsave(&disc->rport_busy_pool_lock, disc_flag); - list_for_each_safe(list_node, list_nextnode, &disc->list_busy_rports) { - unf_rport = list_entry(list_node, struct unf_rport, entry_rport); - spin_lock_irqsave(&unf_rport->rport_state_lock, rport_flag); - - if (unf_rport->nport_id < UNF_FC_FID_DOM_MGR) { - if (unf_rport->rscn_position == UNF_RPORT_NEED_PROCESS) - unf_rport->rscn_position = UNF_RPORT_ONLY_IN_LOCAL_PROCESS; - } else { - unf_rport->rscn_position = UNF_RPORT_NOT_NEED_PROCESS; - } - - spin_unlock_irqrestore(&unf_rport->rport_state_lock, rport_flag); - } - spin_unlock_irqrestore(&disc->rport_busy_pool_lock, disc_flag); -} - -static void unf_reset_rport_rscn_setting(struct unf_lport *lport) -{ - struct unf_rport *rport = NULL; - struct list_head *list_node = NULL; - struct list_head *list_nextnode = NULL; - struct unf_disc *disc = NULL; - ulong rport_flag = 0; - - FC_CHECK_RETURN_VOID(lport); - disc = &lport->disc; - - list_for_each_safe(list_node, list_nextnode, &disc->list_busy_rports) { - rport = list_entry(list_node, struct unf_rport, entry_rport); - spin_lock_irqsave(&rport->rport_state_lock, rport_flag); - rport->rscn_position = UNF_RPORT_NOT_NEED_PROCESS; - spin_unlock_irqrestore(&rport->rport_state_lock, rport_flag); - } -} - -void unf_compare_nport_id_with_rport_list(struct unf_lport *lport, u32 nport_id, - struct unf_port_id_page *portid_page) -{ - struct unf_rport *rport = NULL; - ulong rport_flag = 0; - u8 addr_format = portid_page->addr_format; - - FC_CHECK_RETURN_VOID(lport); - - switch (addr_format) { - /* domain+area */ - case UNF_RSCN_AREA_ADDR_GROUP: - if ((UNF_GET_DOMAIN_ID(nport_id) != portid_page->port_id_domain) || - (UNF_GET_AREA_ID(nport_id) != portid_page->port_id_area)) - return; - - break; - /* domain */ - case UNF_RSCN_DOMAIN_ADDR_GROUP: - if (UNF_GET_DOMAIN_ID(nport_id) != portid_page->port_id_domain) - return; - - break; - /* all */ - case UNF_RSCN_FABRIC_ADDR_GROUP: - break; - /* can't enter this branch guarantee by outer */ - default: - break; - } - - rport = unf_get_rport_by_nport_id(lport, nport_id); - - if (!rport) { - if (UNF_PORT_MODE_INI == (lport->options & UNF_PORT_MODE_INI)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_KEVENT, - "[event]Port(0x%x) Find Rport(0x%x) by RSCN", - lport->nport_id, nport_id); - unf_rport_relogin(lport, nport_id); - } - } else { - spin_lock_irqsave(&rport->rport_state_lock, rport_flag); - if (rport->rscn_position == UNF_RPORT_NEED_PROCESS) - rport->rscn_position = UNF_RPORT_IN_DISC_AND_LOCAL_PROCESS; - - spin_unlock_irqrestore(&rport->rport_state_lock, rport_flag); - } -} - -static void unf_compare_disc_with_local_rport(struct unf_lport *lport, - struct unf_gid_acc_pld *pld, - struct unf_port_id_page *page) -{ - u32 gid_port_id = 0; - u32 nport_id = 0; - u32 index = 0; - u8 control = 0; - - FC_CHECK_RETURN_VOID(pld); - FC_CHECK_RETURN_VOID(lport); - - while (index < UNF_GID_PORT_CNT) { - gid_port_id = (pld->gid_port_id[index]); - nport_id = UNF_NPORTID_MASK & gid_port_id; - control = UNF_GID_CONTROL(gid_port_id); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, - UNF_INFO, "[info]Port(0x%x) DISC N_Port_ID(0x%x)", - lport->nport_id, nport_id); - - if (nport_id != 0 && - (!unf_lookup_lport_by_nportid(lport, nport_id))) - unf_compare_nport_id_with_rport_list(lport, nport_id, page); - - if (UNF_GID_LAST_PORT_ID == (UNF_GID_LAST_PORT_ID & control)) - break; - - index++; - } - - unf_set_rport_rscn_position_local(lport); -} - -static u32 unf_process_each_rport_after_rscn(struct unf_lport *lport, - struct unf_rport *sns_port, - struct unf_rport *rport) -{ - ulong rport_flag = 0; - u32 ret = RETURN_OK; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(sns_port, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(sns_port, UNF_RETURN_ERROR); - - spin_lock_irqsave(&rport->rport_state_lock, rport_flag); - - if (rport->rscn_position == UNF_RPORT_IN_DISC_AND_LOCAL_PROCESS) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_KEVENT, - "[info]Port(0x%x_0x%x) RPort(0x%x) rescan position(0x%x), check wwpn", - lport->port_id, lport->nport_id, rport->nport_id, - rport->rscn_position); - rport->rscn_position = UNF_RPORT_NOT_NEED_PROCESS; - spin_unlock_irqrestore(&rport->rport_state_lock, rport_flag); - ret = unf_rport_check_wwn(lport, rport); - } else if (rport->rscn_position == UNF_RPORT_ONLY_IN_LOCAL_PROCESS) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_KEVENT, - "[event]Port(0x%x_0x%x) RPort(0x%x) rescan position(0x%x), linkdown it", - lport->port_id, lport->nport_id, rport->nport_id, - rport->rscn_position); - rport->rscn_position = UNF_RPORT_NOT_NEED_PROCESS; - spin_unlock_irqrestore(&rport->rport_state_lock, rport_flag); - unf_rport_linkdown(lport, rport); - } else { - spin_unlock_irqrestore(&rport->rport_state_lock, rport_flag); - } - - return ret; -} - -static u32 unf_process_local_rport_after_rscn(struct unf_lport *lport, - struct unf_rport *sns_port) -{ - struct unf_rport *unf_rport = NULL; - struct list_head *list_node = NULL; - struct unf_disc *disc = NULL; - ulong disc_flag = 0; - u32 ret = RETURN_OK; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(sns_port, UNF_RETURN_ERROR); - disc = &lport->disc; - - spin_lock_irqsave(&disc->rport_busy_pool_lock, disc_flag); - if (list_empty(&disc->list_busy_rports)) { - spin_unlock_irqrestore(&disc->rport_busy_pool_lock, disc_flag); - - return UNF_RETURN_ERROR; - } - - list_node = UNF_OS_LIST_NEXT(&disc->list_busy_rports); - - do { - unf_rport = list_entry(list_node, struct unf_rport, entry_rport); - - if (unf_rport->rscn_position == UNF_RPORT_NOT_NEED_PROCESS) { - list_node = UNF_OS_LIST_NEXT(list_node); - continue; - } else { - spin_unlock_irqrestore(&disc->rport_busy_pool_lock, disc_flag); - ret = unf_process_each_rport_after_rscn(lport, sns_port, unf_rport); - spin_lock_irqsave(&disc->rport_busy_pool_lock, disc_flag); - list_node = UNF_OS_LIST_NEXT(&disc->list_busy_rports); - } - } while (list_node != &disc->list_busy_rports); - - unf_reset_rport_rscn_setting(lport); - spin_unlock_irqrestore(&disc->rport_busy_pool_lock, disc_flag); - - return ret; -} - -static u32 unf_handle_rscn_group_addr(struct unf_port_id_page *portid_page, - struct unf_gid_acc_pld *gid_acc_pld, - struct unf_lport *lport) -{ - struct unf_rport *sns_port = NULL; - u32 ret = RETURN_OK; - - FC_CHECK_RETURN_VALUE(portid_page, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(gid_acc_pld, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - - sns_port = unf_get_rport_by_nport_id(lport, UNF_FC_FID_DIR_SERV); - if (!sns_port) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) find fabric port failed", lport->port_id); - - return UNF_RETURN_ERROR; - } - - unf_set_rport_rscn_position(lport, portid_page); - unf_compare_disc_with_local_rport(lport, gid_acc_pld, portid_page); - - ret = unf_process_local_rport_after_rscn(lport, sns_port); - - return ret; -} - -static void unf_handle_rscn_gid_acc(struct unf_gid_acc_pld *gid_acc_pid, - struct unf_lport *lport) -{ - /* for N_Port_ID table return from RSCN */ - struct unf_port_id_page *port_id_page = NULL; - struct unf_rscn_mgr *rscn_mgr = NULL; - struct list_head *list_node = NULL; - ulong flag = 0; - - FC_CHECK_RETURN_VOID(gid_acc_pid); - FC_CHECK_RETURN_VOID(lport); - rscn_mgr = &lport->disc.rscn_mgr; - - spin_lock_irqsave(&rscn_mgr->rscn_id_list_lock, flag); - while (!list_empty(&rscn_mgr->list_using_rscn_page)) { - /* - * for each RSCN_Using_Page(NPortID) - * for each - * L_Port->Disc->RSCN_Mgr->RSCN_Using_Page(Port_ID_Page) - * * NOTE: - * check using_page_port_id whether within GID_ACC payload or - * not - */ - list_node = UNF_OS_LIST_NEXT(&rscn_mgr->list_using_rscn_page); - port_id_page = list_entry(list_node, struct unf_port_id_page, list_node_rscn); - list_del(list_node); /* NOTE: here delete node (from RSCN using Page) */ - spin_unlock_irqrestore(&rscn_mgr->rscn_id_list_lock, flag); - - switch (port_id_page->addr_format) { - /* each page of RSNC corresponding one of N_Port_ID */ - case UNF_RSCN_PORT_ADDR: - (void)unf_handle_rscn_port_addr(port_id_page, gid_acc_pid, lport); - break; - - /* each page of RSNC corresponding address group */ - case UNF_RSCN_AREA_ADDR_GROUP: - case UNF_RSCN_DOMAIN_ADDR_GROUP: - case UNF_RSCN_FABRIC_ADDR_GROUP: - (void)unf_handle_rscn_group_addr(port_id_page, gid_acc_pid, lport); - break; - - default: - break; - } - - /* NOTE: release this RSCN_Node */ - rscn_mgr->unf_release_rscn_node(rscn_mgr, port_id_page); - - /* go to next */ - spin_lock_irqsave(&rscn_mgr->rscn_id_list_lock, flag); - } - - spin_unlock_irqrestore(&rscn_mgr->rscn_id_list_lock, flag); -} - -static void unf_gid_acc_handle(struct unf_gid_acc_pld *gid_acc_pid, - struct unf_lport *lport) -{ -#define UNF_NONE_DISC 0X0 /* before enter DISC */ - struct unf_disc *disc = NULL; - ulong flag = 0; - - FC_CHECK_RETURN_VOID(gid_acc_pid); - FC_CHECK_RETURN_VOID(lport); - disc = &lport->disc; - - spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); - switch (disc->disc_option) { - case UNF_INIT_DISC: /* from SCR callback with INI mode */ - disc->disc_option = UNF_NONE_DISC; - spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); - - unf_handle_init_gid_acc(gid_acc_pid, lport); /* R_Port from Disc_list */ - break; - - case UNF_RSCN_DISC: /* from RSCN payload parse(analysis) */ - disc->disc_option = UNF_NONE_DISC; - spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); - - unf_handle_rscn_gid_acc(gid_acc_pid, lport); /* R_Port from busy_list */ - break; - - default: - spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x_0x%x)'s disc option(0x%x) is abnormal", - lport->port_id, lport->nport_id, disc->disc_option); - break; - } -} - -static void unf_gid_ft_ob_callback(struct unf_xchg *xchg) -{ - /* Do recovery */ - struct unf_lport *lport = NULL; - union unf_sfs_u *sfs_ptr = NULL; - struct unf_disc *disc = NULL; - ulong flag = 0; - - FC_CHECK_RETURN_VOID(xchg); - - sfs_ptr = xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr; - if (!sfs_ptr) - return; - - spin_lock_irqsave(&xchg->xchg_state_lock, flag); - lport = xchg->lport; - spin_unlock_irqrestore(&xchg->xchg_state_lock, flag); - if (!lport) - return; - - disc = &lport->disc; - spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); - unf_disc_state_ma(lport, UNF_EVENT_DISC_FAILED); - spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); - - /* Do DISC recovery operation */ - unf_disc_error_recovery(lport); -} - -static void unf_gid_ft_callback(void *lport, void *rport, void *xchg) -{ - struct unf_lport *unf_lport = NULL; - struct unf_disc *disc = NULL; - struct unf_gid_acc_pld *gid_acc_pld = NULL; - struct unf_xchg *unf_xchg = NULL; - union unf_sfs_u *sfs_ptr = NULL; - u32 cmnd_rsp_size = 0; - u32 rjt_reason = 0; - ulong flag = 0; - - FC_CHECK_RETURN_VOID(lport); - FC_CHECK_RETURN_VOID(rport); - FC_CHECK_RETURN_VOID(xchg); - - unf_lport = (struct unf_lport *)lport; - unf_xchg = (struct unf_xchg *)xchg; - disc = &unf_lport->disc; - - sfs_ptr = unf_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr; - gid_acc_pld = sfs_ptr->get_id.gid_rsp.gid_acc_pld; - if (!gid_acc_pld) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]LOGIN: Port(0x%x) GID_FT response payload is NULL", - unf_lport->port_id); - - return; - } - - cmnd_rsp_size = gid_acc_pld->ctiu_pream.cmnd_rsp_size; - if (UNF_CT_IU_ACCEPT == (cmnd_rsp_size & UNF_CT_IU_RSP_MASK)) { - spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); - unf_disc_state_ma(unf_lport, UNF_EVENT_DISC_SUCCESS); - spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); - - /* Process GID_FT ACC */ - unf_gid_acc_handle(gid_acc_pld, unf_lport); - } else if (UNF_CT_IU_REJECT == (cmnd_rsp_size & UNF_CT_IU_RSP_MASK)) { - rjt_reason = (gid_acc_pld->ctiu_pream.frag_reason_exp_vend); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]LOGIN: Port(0x%x) GID_FT was rejected with reason code(0x%x)", - unf_lport->port_id, rjt_reason); - - if (UNF_CTIU_RJT_EXP_FC4TYPE_NO_REG == - (rjt_reason & UNF_CTIU_RJT_EXP_MASK)) { - spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); - unf_disc_state_ma(unf_lport, UNF_EVENT_DISC_SUCCESS); - spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); - - unf_gid_acc_handle(gid_acc_pld, unf_lport); - } else { - spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); - unf_disc_state_ma(unf_lport, UNF_EVENT_DISC_SUCCESS); - spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); - } - } else { - spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); - unf_disc_state_ma(unf_lport, UNF_EVENT_DISC_FAILED); - spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); - - /* Do DISC recovery operation */ - unf_disc_error_recovery(unf_lport); - } -} - -static void unf_gid_pt_ob_callback(struct unf_xchg *xchg) -{ - /* Do recovery */ - struct unf_lport *lport = NULL; - union unf_sfs_u *sfs_ptr = NULL; - struct unf_disc *disc = NULL; - ulong flag = 0; - - FC_CHECK_RETURN_VOID(xchg); - - sfs_ptr = xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr; - if (!sfs_ptr) - return; - - spin_lock_irqsave(&xchg->xchg_state_lock, flag); - lport = xchg->lport; - spin_unlock_irqrestore(&xchg->xchg_state_lock, flag); - if (!lport) - return; - - disc = &lport->disc; - spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); - unf_disc_state_ma(lport, UNF_EVENT_DISC_FAILED); - spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); - - /* Do DISC recovery operation */ - unf_disc_error_recovery(lport); -} - -static void unf_gid_pt_callback(void *lport, void *rport, void *xchg) -{ - struct unf_lport *unf_lport = NULL; - struct unf_rport *unf_rport = NULL; - struct unf_disc *disc = NULL; - struct unf_gid_acc_pld *gid_acc_pld = NULL; - struct unf_xchg *unf_xchg = NULL; - union unf_sfs_u *sfs_ptr = NULL; - u32 cmnd_rsp_size = 0; - u32 rjt_reason = 0; - ulong flag = 0; - u32 ret = UNF_RETURN_ERROR; - - FC_CHECK_RETURN_VOID(lport); - FC_CHECK_RETURN_VOID(rport); - FC_CHECK_RETURN_VOID(xchg); - - unf_lport = (struct unf_lport *)lport; - unf_rport = (struct unf_rport *)rport; - disc = &unf_lport->disc; - unf_xchg = (struct unf_xchg *)xchg; - sfs_ptr = unf_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr; - - gid_acc_pld = sfs_ptr->get_id.gid_rsp.gid_acc_pld; - if (!gid_acc_pld) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]LOGIN: Port(0x%x) GID_PT response payload is NULL", - unf_lport->port_id); - return; - } - - cmnd_rsp_size = (gid_acc_pld->ctiu_pream.cmnd_rsp_size); - if ((cmnd_rsp_size & UNF_CT_IU_RSP_MASK) == UNF_CT_IU_ACCEPT) { - spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); - unf_disc_state_ma(unf_lport, UNF_EVENT_DISC_SUCCESS); - spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); - - unf_gid_acc_handle(gid_acc_pld, unf_lport); - } else if ((cmnd_rsp_size & UNF_CT_IU_RSP_MASK) == UNF_CT_IU_REJECT) { - rjt_reason = (gid_acc_pld->ctiu_pream.frag_reason_exp_vend); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]LOGIN: Port(0x%x_0x%x) GID_PT was rejected with reason code(0x%x)", - unf_lport->port_id, unf_lport->nport_id, rjt_reason); - - if ((rjt_reason & UNF_CTIU_RJT_EXP_MASK) == - UNF_CTIU_RJT_EXP_PORTTYPE_NO_REG) { - spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); - unf_disc_state_ma(unf_lport, UNF_EVENT_DISC_SUCCESS); - spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); - - unf_gid_acc_handle(gid_acc_pld, unf_lport); - } else { - ret = unf_send_gid_ft(unf_lport, unf_rport); - if (ret != RETURN_OK) - goto SEND_GID_PT_FT_FAILED; - } - } else { - goto SEND_GID_PT_FT_FAILED; - } - - return; -SEND_GID_PT_FT_FAILED: - spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); - unf_disc_state_ma(unf_lport, UNF_EVENT_DISC_FAILED); - spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); - unf_disc_error_recovery(unf_lport); -} - -static void unf_gnn_id_ob_callback(struct unf_xchg *xchg) -{ - /* Send GFF_ID */ - struct unf_lport *lport = NULL; - struct unf_rport *sns_port = NULL; - u32 ret = UNF_RETURN_ERROR; - u32 nport_id = 0; - struct unf_lport *root_lport = NULL; - - FC_CHECK_RETURN_VOID(xchg); - lport = xchg->lport; - FC_CHECK_RETURN_VOID(lport); - sns_port = xchg->rport; - FC_CHECK_RETURN_VOID(sns_port); - nport_id = xchg->disc_portid; - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]LOGIN: Port(0x%x) send GNN_ID failed to inquire RPort(0x%x)", - lport->port_id, nport_id); - - root_lport = (struct unf_lport *)lport->root_lport; - atomic_inc(&root_lport->disc.disc_thread_info.disc_contrl_size); - wake_up_process(root_lport->disc.disc_thread_info.thread); - - /* NOTE: continue next stage */ - ret = unf_get_and_post_disc_event(lport, sns_port, nport_id, UNF_DISC_GET_FEATURE); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) add discovery event(0x%x) failed Rport(0x%x)", - lport->port_id, UNF_DISC_GET_FEATURE, nport_id); - - unf_rcv_gff_id_rsp_unknown(lport, nport_id); - } -} - -static void unf_rcv_gnn_id_acc(struct unf_lport *lport, - struct unf_rport *sns_port, - struct unf_gnnid_rsp *gnnid_rsp_pld, - u32 nport_id) -{ - /* Send GFF_ID or Link down immediately */ - struct unf_lport *unf_lport = lport; - struct unf_rport *unf_sns_port = sns_port; - struct unf_gnnid_rsp *unf_gnnid_rsp_pld = gnnid_rsp_pld; - struct unf_rport *rport = NULL; - u64 node_name = 0; - u32 ret = UNF_RETURN_ERROR; - - FC_CHECK_RETURN_VOID(lport); - FC_CHECK_RETURN_VOID(sns_port); - FC_CHECK_RETURN_VOID(gnnid_rsp_pld); - - node_name = ((u64)(unf_gnnid_rsp_pld->node_name[ARRAY_INDEX_0]) << UNF_SHIFT_32) | - ((u64)(unf_gnnid_rsp_pld->node_name[ARRAY_INDEX_1])); - - if (unf_lport->node_name == node_name) { - /* R_Port & L_Port with same Node Name */ - rport = unf_get_rport_by_nport_id(unf_lport, nport_id); - if (rport) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_KEVENT, - "[info]Port(0x%x) has the same node name(0x%llx) with RPort(0x%x), linkdown it", - unf_lport->port_id, node_name, nport_id); - - /* Destroy immediately */ - unf_rport_immediate_link_down(unf_lport, rport); - } - } else { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]LOGIN: Port(0x%x) got RPort(0x%x) with node name(0x%llx) by GNN_ID", - unf_lport->port_id, nport_id, node_name); - - /* Start to Send GFF_ID */ - ret = unf_get_and_post_disc_event(unf_lport, unf_sns_port, - nport_id, UNF_DISC_GET_FEATURE); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) add discovery event(0x%x) failed Rport(0x%x)", - unf_lport->port_id, UNF_DISC_GET_FEATURE, nport_id); - - unf_rcv_gff_id_rsp_unknown(unf_lport, nport_id); - } - } -} - -static void unf_rcv_gnn_id_rjt(struct unf_lport *lport, - struct unf_rport *sns_port, - struct unf_gnnid_rsp *gnnid_rsp_pld, - u32 nport_id) -{ - /* Send GFF_ID */ - struct unf_lport *unf_lport = lport; - struct unf_rport *unf_sns_port = sns_port; - struct unf_gnnid_rsp *unf_gnnid_rsp_pld = gnnid_rsp_pld; - u32 rjt_reason = 0; - u32 ret = UNF_RETURN_ERROR; - - FC_CHECK_RETURN_VOID(lport); - FC_CHECK_RETURN_VOID(sns_port); - FC_CHECK_RETURN_VOID(gnnid_rsp_pld); - - rjt_reason = (unf_gnnid_rsp_pld->ctiu_pream.frag_reason_exp_vend); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]LOGIN: Port(0x%x_0x%x) GNN_ID was rejected with reason code(0x%x)", - unf_lport->port_id, unf_lport->nport_id, rjt_reason); - - if (!UNF_GNN_GFF_ID_RJT_REASON(rjt_reason)) { - /* Node existence: Continue next stage */ - ret = unf_get_and_post_disc_event(unf_lport, unf_sns_port, - nport_id, UNF_DISC_GET_FEATURE); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) add discovery event(0x%x) failed Rport(0x%x)", - unf_lport->port_id, UNF_DISC_GET_FEATURE, nport_id); - - unf_rcv_gff_id_rsp_unknown(unf_lport, nport_id); - } - } -} - -void unf_rcv_gnn_id_rsp_unknown(struct unf_lport *lport, - struct unf_rport *sns_port, u32 nport_id) -{ - /* Send GFF_ID */ - struct unf_lport *unf_lport = lport; - struct unf_rport *unf_sns_port = sns_port; - u32 ret = UNF_RETURN_ERROR; - - FC_CHECK_RETURN_VOID(lport); - FC_CHECK_RETURN_VOID(sns_port); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]LOGIN: Port(0x%x_0x%x) Rportid(0x%x) GNN_ID response is unknown. Sending GFF_ID", - unf_lport->port_id, unf_lport->nport_id, nport_id); - - ret = unf_get_and_post_disc_event(unf_lport, unf_sns_port, nport_id, UNF_DISC_GET_FEATURE); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) add discovery event(0x%x) failed Rport(0x%x)", - unf_lport->port_id, UNF_DISC_GET_FEATURE, - nport_id); - - /* NOTE: go to next stage */ - unf_rcv_gff_id_rsp_unknown(unf_lport, nport_id); - } -} - -static void unf_gnn_id_callback(void *lport, void *sns_port, void *xchg) -{ - struct unf_lport *unf_lport = (struct unf_lport *)lport; - struct unf_rport *unf_sns_port = (struct unf_rport *)sns_port; - struct unf_xchg *unf_xchg = (struct unf_xchg *)xchg; - struct unf_gnnid_rsp *gnnid_rsp_pld = NULL; - u32 cmnd_rsp_size = 0; - u32 nport_id = 0; - struct unf_lport *root_lport = NULL; - - FC_CHECK_RETURN_VOID(lport); - FC_CHECK_RETURN_VOID(sns_port); - FC_CHECK_RETURN_VOID(xchg); - - nport_id = unf_xchg->disc_portid; - gnnid_rsp_pld = &unf_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->gnn_id_rsp; - cmnd_rsp_size = gnnid_rsp_pld->ctiu_pream.cmnd_rsp_size; - - root_lport = (struct unf_lport *)unf_lport->root_lport; - atomic_inc(&root_lport->disc.disc_thread_info.disc_contrl_size); - wake_up_process(root_lport->disc.disc_thread_info.thread); - - if ((cmnd_rsp_size & UNF_CT_IU_RSP_MASK) == UNF_CT_IU_ACCEPT) { - /* Case ACC: send GFF_ID or Link down immediately */ - unf_rcv_gnn_id_acc(unf_lport, unf_sns_port, gnnid_rsp_pld, nport_id); - } else if ((cmnd_rsp_size & UNF_CT_IU_RSP_MASK) == UNF_CT_IU_REJECT) { - /* Case RJT: send GFF_ID */ - unf_rcv_gnn_id_rjt(unf_lport, unf_sns_port, gnnid_rsp_pld, nport_id); - } else { /* NOTE: continue next stage */ - /* Case unknown: send GFF_ID */ - unf_rcv_gnn_id_rsp_unknown(unf_lport, unf_sns_port, nport_id); - } -} - -static void unf_gff_id_ob_callback(struct unf_xchg *xchg) -{ - /* Send PLOGI */ - struct unf_lport *lport = NULL; - struct unf_lport *root_lport = NULL; - struct unf_rport *rport = NULL; - ulong flag = 0; - u32 ret = UNF_RETURN_ERROR; - u32 nport_id = 0; - - FC_CHECK_RETURN_VOID(xchg); - - spin_lock_irqsave(&xchg->xchg_state_lock, flag); - lport = xchg->lport; - nport_id = xchg->disc_portid; - spin_unlock_irqrestore(&xchg->xchg_state_lock, flag); - - FC_CHECK_RETURN_VOID(lport); - - root_lport = (struct unf_lport *)lport->root_lport; - atomic_inc(&root_lport->disc.disc_thread_info.disc_contrl_size); - wake_up_process(root_lport->disc.disc_thread_info.thread); - - /* Get (safe) R_Port */ - rport = unf_get_rport_by_nport_id(lport, nport_id); - rport = unf_get_safe_rport(lport, rport, UNF_RPORT_REUSE_ONLY, nport_id); - if (!rport) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) can't allocate new RPort(0x%x)", - lport->port_id, nport_id); - return; - } - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x_0x%x) send GFF_ID(0x%x_0x%x) to RPort(0x%x_0x%x) abnormal", - lport->port_id, lport->nport_id, xchg->oxid, xchg->rxid, - rport->rport_index, rport->nport_id); - - /* Update R_Port state: PLOGI_WAIT */ - spin_lock_irqsave(&rport->rport_state_lock, flag); - rport->nport_id = nport_id; - unf_rport_state_ma(rport, UNF_EVENT_RPORT_ENTER_PLOGI); - spin_unlock_irqrestore(&rport->rport_state_lock, flag); - - /* NOTE: Start to send PLOGI */ - ret = unf_send_plogi(lport, rport); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) send PLOGI failed, enter recovry", - lport->port_id); - - /* Do R_Port recovery */ - unf_rport_error_recovery(rport); - } -} - -void unf_rcv_gff_id_acc(struct unf_lport *lport, - struct unf_gffid_rsp *gffid_rsp_pld, u32 nport_id) -{ - /* Delay to LOGIN */ - struct unf_lport *unf_lport = lport; - struct unf_rport *rport = NULL; - struct unf_gffid_rsp *unf_gffid_rsp_pld = gffid_rsp_pld; - u32 fc_4feacture = 0; - ulong flag = 0; - - FC_CHECK_RETURN_VOID(lport); - FC_CHECK_RETURN_VOID(gffid_rsp_pld); - - fc_4feacture = unf_gffid_rsp_pld->fc4_feature[ARRAY_INDEX_1]; - if ((UNF_GFF_ACC_MASK & fc_4feacture) == 0) - fc_4feacture = be32_to_cpu(unf_gffid_rsp_pld->fc4_feature[ARRAY_INDEX_1]); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]LOGIN: Port(0x%x_0x%x) RPort(0x%x) received GFF_ID ACC. FC4 feature is 0x%x(1:TGT,2:INI,3:COM)", - unf_lport->port_id, unf_lport->nport_id, nport_id, fc_4feacture); - - /* Check (& Get new) R_Port */ - rport = unf_get_rport_by_nport_id(unf_lport, nport_id); - if (rport) - rport = unf_find_rport(unf_lport, nport_id, rport->port_name); - - if (rport || (UNF_GET_PORT_OPTIONS(fc_4feacture) != UNF_PORT_MODE_INI)) { - rport = unf_get_safe_rport(unf_lport, rport, UNF_RPORT_REUSE_ONLY, nport_id); - FC_CHECK_RETURN_VOID(rport); - } else { - return; - } - - if ((fc_4feacture & UNF_GFF_ACC_MASK) != 0) { - spin_lock_irqsave(&rport->rport_state_lock, flag); - rport->options = UNF_GET_PORT_OPTIONS(fc_4feacture); - spin_unlock_irqrestore(&rport->rport_state_lock, flag); - } else if (rport->port_name != INVALID_WWPN) { - spin_lock_irqsave(&rport->rport_state_lock, flag); - rport->options = unf_get_port_feature(rport->port_name); - spin_unlock_irqrestore(&rport->rport_state_lock, flag); - } - - /* NOTE: Send PLOGI if necessary */ - unf_check_rport_need_delay_plogi(unf_lport, rport, rport->options); -} - -void unf_rcv_gff_id_rjt(struct unf_lport *lport, - struct unf_gffid_rsp *gffid_rsp_pld, u32 nport_id) -{ - /* Delay LOGIN or LOGO */ - struct unf_lport *unf_lport = lport; - struct unf_rport *rport = NULL; - struct unf_gffid_rsp *unf_gffid_rsp_pld = gffid_rsp_pld; - u32 rjt_reason = 0; - ulong flag = 0; - - FC_CHECK_RETURN_VOID(lport); - FC_CHECK_RETURN_VOID(gffid_rsp_pld); - - /* Check (& Get new) R_Port */ - rport = unf_get_rport_by_nport_id(unf_lport, nport_id); - if (rport) - rport = unf_find_rport(unf_lport, nport_id, rport->port_name); - - if (!rport) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x) get RPort by N_Port_ID(0x%x) failed and alloc new", - unf_lport->port_id, nport_id); - - rport = unf_rport_get_free_and_init(unf_lport, UNF_PORT_TYPE_FC, nport_id); - FC_CHECK_RETURN_VOID(rport); - - spin_lock_irqsave(&rport->rport_state_lock, flag); - rport->nport_id = nport_id; - spin_unlock_irqrestore(&rport->rport_state_lock, flag); - } - - rjt_reason = unf_gffid_rsp_pld->ctiu_pream.frag_reason_exp_vend; - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]LOGIN: Port(0x%x) send GFF_ID for RPort(0x%x) but was rejected. Reason code(0x%x)", - unf_lport->port_id, nport_id, rjt_reason); - - if (!UNF_GNN_GFF_ID_RJT_REASON(rjt_reason)) { - rport = unf_get_safe_rport(lport, rport, UNF_RPORT_REUSE_ONLY, nport_id); - FC_CHECK_RETURN_VOID(rport); - - /* Update R_Port state: PLOGI_WAIT */ - spin_lock_irqsave(&rport->rport_state_lock, flag); - rport->nport_id = nport_id; - unf_rport_state_ma(rport, UNF_EVENT_RPORT_ENTER_PLOGI); - spin_unlock_irqrestore(&rport->rport_state_lock, flag); - - /* Delay to send PLOGI */ - unf_rport_delay_login(rport); - } else { - spin_lock_irqsave(&rport->rport_state_lock, flag); - if (rport->rp_state == UNF_RPORT_ST_INIT) { - spin_unlock_irqrestore(&rport->rport_state_lock, flag); - - /* Enter closing state */ - unf_rport_enter_logo(unf_lport, rport); - } else { - spin_unlock_irqrestore(&rport->rport_state_lock, flag); - } - } -} - -void unf_rcv_gff_id_rsp_unknown(struct unf_lport *lport, u32 nport_id) -{ - /* Send PLOGI */ - struct unf_lport *unf_lport = lport; - struct unf_rport *rport = NULL; - ulong flag = 0; - u32 ret = RETURN_OK; - - FC_CHECK_RETURN_VOID(lport); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]LOGIN: Port(0x%x) send GFF_ID for RPort(0x%x) but response is unknown", - unf_lport->port_id, nport_id); - - /* Get (Safe) R_Port & Set State */ - rport = unf_get_rport_by_nport_id(unf_lport, nport_id); - if (rport) - rport = unf_find_rport(unf_lport, nport_id, rport->port_name); - - if (!rport) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x_0x%x) can't get RPort by NPort ID(0x%x), allocate new RPort", - unf_lport->port_id, unf_lport->nport_id, nport_id); - - rport = unf_rport_get_free_and_init(unf_lport, UNF_PORT_TYPE_FC, nport_id); - FC_CHECK_RETURN_VOID(rport); - - spin_lock_irqsave(&rport->rport_state_lock, flag); - rport->nport_id = nport_id; - spin_unlock_irqrestore(&rport->rport_state_lock, flag); - } - - rport = unf_get_safe_rport(unf_lport, rport, UNF_RPORT_REUSE_ONLY, nport_id); - FC_CHECK_RETURN_VOID(rport); - - /* Update R_Port state: PLOGI_WAIT */ - spin_lock_irqsave(&rport->rport_state_lock, flag); - rport->nport_id = nport_id; - unf_rport_state_ma(rport, UNF_EVENT_RPORT_ENTER_PLOGI); - spin_unlock_irqrestore(&rport->rport_state_lock, flag); - - /* Start to send PLOGI */ - ret = unf_send_plogi(unf_lport, rport); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]LOGIN: Port(0x%x) can not send PLOGI for RPort(0x%x), enter recovery", - unf_lport->port_id, nport_id); - - unf_rport_error_recovery(rport); - } -} - -static void unf_gff_id_callback(void *lport, void *sns_port, void *xchg) -{ - struct unf_lport *unf_lport = (struct unf_lport *)lport; - struct unf_lport *root_lport = NULL; - struct unf_xchg *unf_xchg = (struct unf_xchg *)xchg; - struct unf_gffid_rsp *gffid_rsp_pld = NULL; - u32 cmnd_rsp_size = 0; - u32 nport_id = 0; - - FC_CHECK_RETURN_VOID(lport); - FC_CHECK_RETURN_VOID(sns_port); - FC_CHECK_RETURN_VOID(xchg); - - nport_id = unf_xchg->disc_portid; - - gffid_rsp_pld = &unf_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->gff_id_rsp; - cmnd_rsp_size = (gffid_rsp_pld->ctiu_pream.cmnd_rsp_size); - - root_lport = (struct unf_lport *)unf_lport->root_lport; - atomic_inc(&root_lport->disc.disc_thread_info.disc_contrl_size); - wake_up_process(root_lport->disc.disc_thread_info.thread); - - if ((cmnd_rsp_size & UNF_CT_IU_RSP_MASK) == UNF_CT_IU_ACCEPT) { - /* Case for GFF_ID ACC: (Delay)PLOGI */ - unf_rcv_gff_id_acc(unf_lport, gffid_rsp_pld, nport_id); - } else if ((cmnd_rsp_size & UNF_CT_IU_RSP_MASK) == UNF_CT_IU_REJECT) { - /* Case for GFF_ID RJT: Delay PLOGI or LOGO directly */ - unf_rcv_gff_id_rjt(unf_lport, gffid_rsp_pld, nport_id); - } else { - /* Send PLOGI */ - unf_rcv_gff_id_rsp_unknown(unf_lport, nport_id); - } -} - -static void unf_rcv_gpn_id_acc(struct unf_lport *lport, - u32 nport_id, u64 port_name) -{ - /* then PLOGI or re-login */ - struct unf_lport *unf_lport = lport; - struct unf_rport *rport = NULL; - ulong flag = 0; - u32 ret = UNF_RETURN_ERROR; - - rport = unf_find_valid_rport(unf_lport, port_name, nport_id); - if (rport) { - /* R_Port with TGT mode & L_Port with INI mode: - * send PLOGI with INIT state - */ - if ((rport->options & UNF_PORT_MODE_TGT) == UNF_PORT_MODE_TGT) { - rport = unf_get_safe_rport(lport, rport, UNF_RPORT_REUSE_INIT, nport_id); - FC_CHECK_RETURN_VOID(rport); - - /* Update R_Port state: PLOGI_WAIT */ - spin_lock_irqsave(&rport->rport_state_lock, flag); - rport->nport_id = nport_id; - unf_rport_state_ma(rport, UNF_EVENT_RPORT_ENTER_PLOGI); - spin_unlock_irqrestore(&rport->rport_state_lock, flag); - - /* Start to send PLOGI */ - ret = unf_send_plogi(unf_lport, rport); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]LOGIN: Port(0x%x_0x%x) send PLOGI failed for 0x%x, enter recovry", - unf_lport->port_id, unf_lport->nport_id, nport_id); - - unf_rport_error_recovery(rport); - } - } else { - spin_lock_irqsave(&rport->rport_state_lock, flag); - if (rport->rp_state != UNF_RPORT_ST_PLOGI_WAIT && - rport->rp_state != UNF_RPORT_ST_PRLI_WAIT && - rport->rp_state != UNF_RPORT_ST_READY) { - unf_rport_state_ma(rport, UNF_EVENT_RPORT_LOGO); - spin_unlock_irqrestore(&rport->rport_state_lock, flag); - - /* Do LOGO operation */ - unf_rport_enter_logo(unf_lport, rport); - } else { - spin_unlock_irqrestore(&rport->rport_state_lock, flag); - } - } - } else { - /* Send GNN_ID */ - (void)unf_rport_relogin(unf_lport, nport_id); - } -} - -static void unf_rcv_gpn_id_rjt(struct unf_lport *lport, u32 nport_id) -{ - struct unf_lport *unf_lport = lport; - struct unf_rport *rport = NULL; - - FC_CHECK_RETURN_VOID(lport); - - rport = unf_get_rport_by_nport_id(unf_lport, nport_id); - if (rport) - /* Do R_Port Link down */ - unf_rport_linkdown(unf_lport, rport); -} - -void unf_rcv_gpn_id_rsp_unknown(struct unf_lport *lport, u32 nport_id) -{ - struct unf_lport *unf_lport = lport; - - FC_CHECK_RETURN_VOID(lport); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]LOGIN: Port(0x%x) wrong response of GPN_ID with RPort(0x%x)", - unf_lport->port_id, nport_id); - - /* NOTE: go to next stage */ - (void)unf_rport_relogin(unf_lport, nport_id); -} - -static void unf_gpn_id_ob_callback(struct unf_xchg *xchg) -{ - struct unf_lport *lport = NULL; - u32 nport_id = 0; - struct unf_lport *root_lport = NULL; - - FC_CHECK_RETURN_VOID(xchg); - - lport = xchg->lport; - nport_id = xchg->disc_portid; - FC_CHECK_RETURN_VOID(lport); - - root_lport = (struct unf_lport *)lport->root_lport; - atomic_inc(&root_lport->disc.disc_thread_info.disc_contrl_size); - wake_up_process(root_lport->disc.disc_thread_info.thread); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]LOGIN: Port(0x%x) send GPN_ID failed to inquire RPort(0x%x)", - lport->port_id, nport_id); - - /* NOTE: go to next stage */ - (void)unf_rport_relogin(lport, nport_id); -} - -static void unf_gpn_id_callback(void *lport, void *sns_port, void *xchg) -{ - struct unf_lport *unf_lport = NULL; - struct unf_xchg *unf_xchg = NULL; - struct unf_gpnid_rsp *gpnid_rsp_pld = NULL; - u64 port_name = 0; - u32 cmnd_rsp_size = 0; - u32 nport_id = 0; - struct unf_lport *root_lport = NULL; - - FC_CHECK_RETURN_VOID(lport); - FC_CHECK_RETURN_VOID(sns_port); - FC_CHECK_RETURN_VOID(xchg); - - unf_lport = (struct unf_lport *)lport; - unf_xchg = (struct unf_xchg *)xchg; - nport_id = unf_xchg->disc_portid; - - root_lport = (struct unf_lport *)unf_lport->root_lport; - atomic_inc(&root_lport->disc.disc_thread_info.disc_contrl_size); - wake_up_process(root_lport->disc.disc_thread_info.thread); - - gpnid_rsp_pld = &unf_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->gpn_id_rsp; - cmnd_rsp_size = gpnid_rsp_pld->ctiu_pream.cmnd_rsp_size; - if (UNF_CT_IU_ACCEPT == (cmnd_rsp_size & UNF_CT_IU_RSP_MASK)) { - /* GPN_ID ACC */ - port_name = ((u64)(gpnid_rsp_pld->port_name[ARRAY_INDEX_0]) - << UNF_SHIFT_32) | - ((u64)(gpnid_rsp_pld->port_name[ARRAY_INDEX_1])); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]LOGIN: Port(0x%x) GPN_ID ACC with WWN(0x%llx) RPort NPort ID(0x%x)", - unf_lport->port_id, port_name, nport_id); - - /* Send PLOGI or LOGO or GNN_ID */ - unf_rcv_gpn_id_acc(unf_lport, nport_id, port_name); - } else if (UNF_CT_IU_REJECT == (cmnd_rsp_size & UNF_CT_IU_RSP_MASK)) { - /* GPN_ID RJT: Link Down */ - unf_rcv_gpn_id_rjt(unf_lport, nport_id); - } else { - /* GPN_ID response type unknown: Send GNN_ID */ - unf_rcv_gpn_id_rsp_unknown(unf_lport, nport_id); - } -} - -static void unf_rff_id_ob_callback(struct unf_xchg *xchg) -{ - /* Do recovery */ - struct unf_lport *lport = NULL; - ulong flag = 0; - - FC_CHECK_RETURN_VOID(xchg); - - spin_lock_irqsave(&xchg->xchg_state_lock, flag); - lport = xchg->lport; - spin_unlock_irqrestore(&xchg->xchg_state_lock, flag); - - FC_CHECK_RETURN_VOID(lport); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]LOGIN: Port(0x%x_0x%x) send RFF_ID failed", - lport->port_id, lport->nport_id); - - unf_lport_error_recovery(lport); -} - -static void unf_rff_id_callback(void *lport, void *rport, void *xchg) -{ - struct unf_lport *unf_lport = NULL; - struct unf_rport *unf_rport = NULL; - struct unf_xchg *unf_xchg = NULL; - struct unf_ctiu_prem *ctiu_prem = NULL; - u32 ret = UNF_RETURN_ERROR; - u32 cmnd_rsp_size = 0; - ulong flag = 0; - - FC_CHECK_RETURN_VOID(lport); - FC_CHECK_RETURN_VOID(rport); - FC_CHECK_RETURN_VOID(xchg); - - unf_lport = (struct unf_lport *)lport; - unf_xchg = (struct unf_xchg *)xchg; - if (unlikely(!unf_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr)) - return; - - unf_rport = unf_get_rport_by_nport_id(unf_lport, UNF_FC_FID_FCTRL); - unf_rport = unf_get_safe_rport(unf_lport, unf_rport, - UNF_RPORT_REUSE_ONLY, UNF_FC_FID_FCTRL); - if (unlikely(!unf_rport)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) can't allocate RPort(0x%x)", - unf_lport->port_id, UNF_FC_FID_FCTRL); - return; - } - - unf_rport->nport_id = UNF_FC_FID_FCTRL; - ctiu_prem = &unf_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->rff_id_rsp.ctiu_pream; - cmnd_rsp_size = ctiu_prem->cmnd_rsp_size; - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "[info]LOGIN: Port(0x%x_0x%x) RFF_ID rsp is (0x%x)", - unf_lport->port_id, unf_lport->nport_id, - (cmnd_rsp_size & UNF_CT_IU_RSP_MASK)); - - /* RSP Type check: some SW not support RFF_ID, go to next stage also */ - if ((cmnd_rsp_size & UNF_CT_IU_RSP_MASK) == UNF_CT_IU_ACCEPT) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]LOGIN: Port(0x%x_0x%x) receive RFF ACC(0x%x) in state(0x%x)", - unf_lport->port_id, unf_lport->nport_id, - (cmnd_rsp_size & UNF_CT_IU_RSP_MASK), unf_lport->states); - } else { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]LOGIN: Port(0x%x_0x%x) receive RFF RJT(0x%x) in state(0x%x) with RJT reason code(0x%x) explanation(0x%x)", - unf_lport->port_id, unf_lport->nport_id, - (cmnd_rsp_size & UNF_CT_IU_RSP_MASK), unf_lport->states, - (ctiu_prem->frag_reason_exp_vend) & UNF_CT_IU_REASON_MASK, - (ctiu_prem->frag_reason_exp_vend) & UNF_CT_IU_EXPLAN_MASK); - } - - /* L_Port state check */ - spin_lock_irqsave(&unf_lport->lport_state_lock, flag); - if (unf_lport->states != UNF_LPORT_ST_RFF_ID_WAIT) { - spin_unlock_irqrestore(&unf_lport->lport_state_lock, flag); - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]LOGIN: Port(0x%x_0x%x) receive RFF reply in state(0x%x)", - unf_lport->port_id, unf_lport->nport_id, unf_lport->states); - - return; - } - /* LPort: RFF_ID_WAIT --> SCR_WAIT */ - unf_lport_state_ma(unf_lport, UNF_EVENT_LPORT_REMOTE_ACC); - spin_unlock_irqrestore(&unf_lport->lport_state_lock, flag); - - ret = unf_send_scr(unf_lport, unf_rport); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]LOGIN: Port(0x%x_0x%x) send SCR failed", - unf_lport->port_id, unf_lport->nport_id); - unf_lport_error_recovery(unf_lport); - } -} - -static void unf_rft_id_ob_callback(struct unf_xchg *xchg) -{ - struct unf_lport *lport = NULL; - ulong flag = 0; - - FC_CHECK_RETURN_VOID(xchg); - spin_lock_irqsave(&xchg->xchg_state_lock, flag); - lport = xchg->lport; - spin_unlock_irqrestore(&xchg->xchg_state_lock, flag); - FC_CHECK_RETURN_VOID(lport); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]LOGIN: Port(0x%x_0x%x) send RFT_ID failed", - lport->port_id, lport->nport_id); - unf_lport_error_recovery(lport); -} - -static void unf_rft_id_callback(void *lport, void *rport, void *xchg) -{ - /* RFT_ID --->>> RFF_ID */ - struct unf_lport *unf_lport = NULL; - struct unf_rport *unf_rport = NULL; - struct unf_xchg *unf_xchg = NULL; - struct unf_ctiu_prem *ctiu_prem = NULL; - u32 ret = UNF_RETURN_ERROR; - u32 cmnd_rsp_size = 0; - ulong flag = 0; - - FC_CHECK_RETURN_VOID(lport); - FC_CHECK_RETURN_VOID(rport); - FC_CHECK_RETURN_VOID(xchg); - - unf_lport = (struct unf_lport *)lport; - unf_rport = (struct unf_rport *)rport; - unf_xchg = (struct unf_xchg *)xchg; - - if (!unf_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) SFS entry is NULL with state(0x%x)", - unf_lport->port_id, unf_lport->states); - return; - } - - ctiu_prem = &unf_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr - ->rft_id_rsp.ctiu_pream; - cmnd_rsp_size = (ctiu_prem->cmnd_rsp_size); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]LOGIN: Port(0x%x_0x%x) RFT_ID response is (0x%x)", - (cmnd_rsp_size & UNF_CT_IU_RSP_MASK), unf_lport->port_id, - unf_lport->nport_id); - - if (UNF_CT_IU_ACCEPT == (cmnd_rsp_size & UNF_CT_IU_RSP_MASK)) { - /* Case for RFT_ID ACC: send RFF_ID */ - spin_lock_irqsave(&unf_lport->lport_state_lock, flag); - if (unf_lport->states != UNF_LPORT_ST_RFT_ID_WAIT) { - spin_unlock_irqrestore(&unf_lport->lport_state_lock, flag); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x_0x%x) receive RFT_ID ACC in state(0x%x)", - unf_lport->port_id, unf_lport->nport_id, - unf_lport->states); - - return; - } - - /* LPort: RFT_ID_WAIT --> RFF_ID_WAIT */ - unf_lport_state_ma(unf_lport, UNF_EVENT_LPORT_REMOTE_ACC); - spin_unlock_irqrestore(&unf_lport->lport_state_lock, flag); - - /* Start to send RFF_ID GS command */ - ret = unf_send_rff_id(unf_lport, unf_rport, UNF_FC4_FCP_TYPE); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]LOGIN: Port(0x%x_0x%x) send RFF_ID failed", - unf_lport->port_id, unf_lport->nport_id); - unf_lport_error_recovery(unf_lport); - } - } else { - /* Case for RFT_ID RJT: do recovery */ - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]LOGIN: Port(0x%x_0x%x) receive RFT_ID RJT with reason_code(0x%x) explanation(0x%x)", - unf_lport->port_id, unf_lport->nport_id, - (ctiu_prem->frag_reason_exp_vend) & UNF_CT_IU_REASON_MASK, - (ctiu_prem->frag_reason_exp_vend) & UNF_CT_IU_EXPLAN_MASK); - - /* Do L_Port recovery */ - unf_lport_error_recovery(unf_lport); - } -} - -static void unf_scr_ob_callback(struct unf_xchg *xchg) -{ - /* Callback fucnion for exception: Do L_Port error recovery */ - struct unf_lport *lport = NULL; - ulong flag = 0; - - FC_CHECK_RETURN_VOID(xchg); - - spin_lock_irqsave(&xchg->xchg_state_lock, flag); - lport = xchg->lport; - spin_unlock_irqrestore(&xchg->xchg_state_lock, flag); - - FC_CHECK_RETURN_VOID(lport); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) send SCR failed and do port recovery", - lport->port_id); - - unf_lport_error_recovery(lport); -} - -static void unf_scr_callback(void *lport, void *rport, void *xchg) -{ - /* Callback function for SCR response: Send GID_PT with INI mode */ - struct unf_lport *unf_lport = NULL; - struct unf_disc *disc = NULL; - struct unf_xchg *unf_xchg = NULL; - struct unf_els_acc *els_acc = NULL; - u32 ret = UNF_RETURN_ERROR; - ulong port_flag = 0; - ulong disc_flag = 0; - u32 cmnd = 0; - - FC_CHECK_RETURN_VOID(lport); - FC_CHECK_RETURN_VOID(xchg); - - unf_lport = (struct unf_lport *)lport; - unf_xchg = (struct unf_xchg *)xchg; - disc = &unf_lport->disc; - - if (!unf_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr) - return; - - els_acc = &unf_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->els_acc; - if (unf_xchg->byte_orders & UNF_BIT_2) - cmnd = be32_to_cpu(els_acc->cmnd); - else - cmnd = (els_acc->cmnd); - - if ((cmnd & UNF_ELS_CMND_HIGH_MASK) == UNF_ELS_CMND_ACC) { - spin_lock_irqsave(&unf_lport->lport_state_lock, port_flag); - if (unf_lport->states != UNF_LPORT_ST_SCR_WAIT) { - spin_unlock_irqrestore(&unf_lport->lport_state_lock, - port_flag); - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x_0x%x) receive SCR ACC with error state(0x%x)", - unf_lport->port_id, unf_lport->nport_id, - unf_lport->states); - return; - } - - /* LPort: SCR_WAIT --> READY */ - unf_lport_state_ma(unf_lport, UNF_EVENT_LPORT_REMOTE_ACC); - if (unf_lport->states == UNF_LPORT_ST_READY) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]LOGIN: Port(0x%x_0x%x) enter READY state when received SCR response", - unf_lport->port_id, unf_lport->nport_id); - } - - /* Start to Discovery with INI mode: GID_PT */ - if ((unf_lport->options & UNF_PORT_MODE_INI) == - UNF_PORT_MODE_INI) { - spin_unlock_irqrestore(&unf_lport->lport_state_lock, - port_flag); - - if (unf_lport->disc.disc_temp.unf_disc_start) { - spin_lock_irqsave(&disc->rport_busy_pool_lock, - disc_flag); - unf_lport->disc.disc_option = UNF_INIT_DISC; - disc->last_disc_jiff = jiffies; - spin_unlock_irqrestore(&disc->rport_busy_pool_lock, disc_flag); - - ret = unf_lport->disc.disc_temp.unf_disc_start(unf_lport); - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "[info]LOGIN: Port(0x%x) DISC %s with INI mode", - unf_lport->port_id, - (ret != RETURN_OK) ? "failed" : "succeed"); - } - return; - } - - spin_unlock_irqrestore(&unf_lport->lport_state_lock, port_flag); - /* NOTE: set state with UNF_DISC_ST_END used for - * RSCN process - */ - spin_lock_irqsave(&disc->rport_busy_pool_lock, disc_flag); - unf_lport->disc.states = UNF_DISC_ST_END; - spin_unlock_irqrestore(&disc->rport_busy_pool_lock, disc_flag); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "[info]Port(0x%x) is TGT mode, no need to discovery", - unf_lport->port_id); - - return; - } - unf_lport_error_recovery(unf_lport); -} - -void unf_check_rport_need_delay_plogi(struct unf_lport *lport, - struct unf_rport *rport, u32 port_feature) -{ - /* - * Called by: - * 1. Private loop - * 2. RCVD GFF_ID ACC - */ - struct unf_lport *unf_lport = lport; - struct unf_rport *unf_rport = rport; - ulong flag = 0; - u32 nport_id = 0; - u32 ret = UNF_RETURN_ERROR; - - FC_CHECK_RETURN_VOID(lport); - FC_CHECK_RETURN_VOID(rport); - nport_id = unf_rport->nport_id; - - /* - * Send GFF_ID means L_Port has INI attribute - * * - * When to send PLOGI: - * 1. R_Port has TGT mode (COM or TGT), send PLOGI immediately - * 2. R_Port only with INI, send LOGO immediately - * 3. R_Port with unknown attribute, delay to send PLOGI - */ - if ((UNF_PORT_MODE_TGT & port_feature) || - (UNF_LPORT_ENHANCED_FEATURE_ENHANCED_GFF & - unf_lport->enhanced_features)) { - /* R_Port has TGT mode: send PLOGI immediately */ - unf_rport = unf_get_safe_rport(lport, unf_rport, UNF_RPORT_REUSE_ONLY, nport_id); - FC_CHECK_RETURN_VOID(unf_rport); - - /* Update R_Port state: PLOGI_WAIT */ - spin_lock_irqsave(&unf_rport->rport_state_lock, flag); - unf_rport->nport_id = nport_id; - unf_rport_state_ma(unf_rport, UNF_EVENT_RPORT_ENTER_PLOGI); - spin_unlock_irqrestore(&unf_rport->rport_state_lock, flag); - - /* Start to send PLOGI */ - ret = unf_send_plogi(unf_lport, unf_rport); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]LOGIN: Port(0x%x_0x%x) send PLOGI to RPort(0x%x) failed", - unf_lport->port_id, unf_lport->nport_id, - nport_id); - - unf_rport_error_recovery(unf_rport); - } - } else if (port_feature == UNF_PORT_MODE_INI) { - /* R_Port only with INI mode: can't send PLOGI - * --->>> LOGO/nothing - */ - spin_lock_irqsave(&unf_rport->rport_state_lock, flag); - if (unf_rport->rp_state == UNF_RPORT_ST_INIT) { - spin_unlock_irqrestore(&unf_rport->rport_state_lock, flag); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]LOGIN: Port(0x%x_0x%x) send LOGO to RPort(0x%x) which only with INI mode", - unf_lport->port_id, unf_lport->nport_id, nport_id); - - /* Enter Closing state */ - unf_rport_enter_logo(unf_lport, unf_rport); - } else { - spin_unlock_irqrestore(&unf_rport->rport_state_lock, flag); - } - } else { - /* Unknown R_Port attribute: Delay to send PLOGI */ - unf_rport = unf_get_safe_rport(lport, unf_rport, UNF_RPORT_REUSE_ONLY, nport_id); - FC_CHECK_RETURN_VOID(unf_rport); - - /* Update R_Port state: PLOGI_WAIT */ - spin_lock_irqsave(&unf_rport->rport_state_lock, flag); - unf_rport->nport_id = nport_id; - unf_rport_state_ma(unf_rport, UNF_EVENT_RPORT_ENTER_PLOGI); - spin_unlock_irqrestore(&unf_rport->rport_state_lock, flag); - - unf_rport_delay_login(unf_rport); - } -} diff --git a/drivers/scsi/spfc/common/unf_gs.h b/drivers/scsi/spfc/common/unf_gs.h deleted file mode 100644 index d9856133b3cdbc6022f5a2bc3112f839622c4c29..0000000000000000000000000000000000000000 --- a/drivers/scsi/spfc/common/unf_gs.h +++ /dev/null @@ -1,58 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef UNF_GS_H -#define UNF_GS_H - -#include "unf_type.h" -#include "unf_lport.h" - -#ifdef __cplusplus -extern "C" { -#endif /* __cplusplus */ - -u32 unf_send_scr(struct unf_lport *lport, - struct unf_rport *rport); -u32 unf_send_ctpass_thru(struct unf_lport *lport, - void *buffer, u32 bufflen); - -u32 unf_send_gid_ft(struct unf_lport *lport, - struct unf_rport *rport); -u32 unf_send_gid_pt(struct unf_lport *lport, - struct unf_rport *rport); -u32 unf_send_gpn_id(struct unf_lport *lport, - struct unf_rport *sns_port, u32 nport_id); -u32 unf_send_gnn_id(struct unf_lport *lport, - struct unf_rport *sns_port, u32 nport_id); -u32 unf_send_gff_id(struct unf_lport *lport, - struct unf_rport *sns_port, u32 nport_id); - -u32 unf_send_rff_id(struct unf_lport *lport, - struct unf_rport *rport, u32 fc4_type); -u32 unf_send_rft_id(struct unf_lport *lport, - struct unf_rport *rport); -void unf_rcv_gnn_id_rsp_unknown(struct unf_lport *lport, - struct unf_rport *sns_port, u32 nport_id); -void unf_rcv_gpn_id_rsp_unknown(struct unf_lport *lport, u32 nport_id); -void unf_rcv_gff_id_rsp_unknown(struct unf_lport *lport, u32 nport_id); -void unf_check_rport_need_delay_plogi(struct unf_lport *lport, - struct unf_rport *rport, u32 port_feature); - -struct send_com_trans_in { - unsigned char port_wwn[8]; - u32 req_buffer_count; - unsigned char req_buffer[ARRAY_INDEX_1]; -}; - -struct send_com_trans_out { - u32 hba_status; - u32 total_resp_buffer_cnt; - u32 actual_resp_buffer_cnt; - unsigned char resp_buffer[ARRAY_INDEX_1]; -}; - -#ifdef __cplusplus -} -#endif /* __cplusplus */ - -#endif diff --git a/drivers/scsi/spfc/common/unf_init.c b/drivers/scsi/spfc/common/unf_init.c deleted file mode 100644 index 7e6f98d169776df252f79c2c104c04b49cdcdebc..0000000000000000000000000000000000000000 --- a/drivers/scsi/spfc/common/unf_init.c +++ /dev/null @@ -1,353 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#include "unf_type.h" -#include "unf_log.h" -#include "unf_scsi_common.h" -#include "unf_event.h" -#include "unf_exchg.h" -#include "unf_portman.h" -#include "unf_rport.h" -#include "unf_service.h" -#include "unf_io.h" -#include "unf_io_abnormal.h" - -#define UNF_PID 12 -#define MY_PID UNF_PID - -#define RPORT_FEATURE_POOL_SIZE 4096 -struct task_struct *event_task_thread; -struct workqueue_struct *unf_wq; - -atomic_t fc_mem_ref; - -struct unf_global_card_thread card_thread_mgr; -u32 unf_dgb_level = UNF_MAJOR; -u32 log_print_level = UNF_INFO; -u32 log_limited_times = UNF_LOGIN_ATT_PRINT_TIMES; - -static struct unf_esgl_page *unf_get_one_free_esgl_page - (void *lport, struct unf_frame_pkg *pkg) -{ - struct unf_lport *unf_lport = NULL; - struct unf_xchg *unf_xchg = NULL; - - FC_CHECK_RETURN_VALUE(lport, NULL); - FC_CHECK_RETURN_VALUE(pkg, NULL); - - unf_lport = (struct unf_lport *)lport; - unf_xchg = (struct unf_xchg *)pkg->xchg_contex; - - return unf_get_and_add_one_free_esgl_page(unf_lport, unf_xchg); -} - -static int unf_get_cfg_parms(char *section_name, struct unf_cfg_item *cfg_itm, - u32 *cfg_value, u32 itemnum) -{ - /* Maximum length of a configuration item value, including the end - * character - */ -#define UNF_MAX_ITEM_VALUE_LEN (256) - - u32 *unf_cfg_value = NULL; - struct unf_cfg_item *unf_cfg_itm = NULL; - u32 i = 0; - - unf_cfg_itm = cfg_itm; - unf_cfg_value = cfg_value; - - for (i = 0; i < itemnum; i++) { - if (!unf_cfg_itm || !unf_cfg_value) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, - UNF_ERR, - "[err]Config name or value is NULL"); - - return UNF_RETURN_ERROR; - } - - if (strcmp("End", unf_cfg_itm->puc_name) == 0x0) - break; - - if (strcmp("fw_path", unf_cfg_itm->puc_name) == 0x0) { - unf_cfg_itm++; - unf_cfg_value += UNF_MAX_ITEM_VALUE_LEN / sizeof(u32); - continue; - } - - *unf_cfg_value = unf_cfg_itm->default_value; - unf_cfg_itm++; - unf_cfg_value++; - } - - return RETURN_OK; -} - -struct unf_cm_handle_op unf_cm_handle_ops = { - .unf_alloc_local_port = unf_lport_create_and_init, - .unf_release_local_port = unf_release_local_port, - .unf_receive_ls_gs_pkg = unf_receive_ls_gs_pkg, - .unf_receive_bls_pkg = unf_receive_bls_pkg, - .unf_send_els_done = unf_send_els_done, - .unf_receive_ini_response = unf_ini_scsi_completed, - .unf_get_cfg_parms = unf_get_cfg_parms, - .unf_receive_marker_status = unf_recv_tmf_marker_status, - .unf_receive_abts_marker_status = unf_recv_abts_marker_status, - - .unf_process_fcp_cmnd = NULL, - .unf_tgt_cmnd_xfer_or_rsp_echo = NULL, - .unf_cm_get_sgl_entry = unf_ini_get_sgl_entry, - .unf_cm_get_dif_sgl_entry = unf_ini_get_dif_sgl_entry, - .unf_get_one_free_esgl_page = unf_get_one_free_esgl_page, - .unf_fc_port_event = unf_fc_port_link_event, -}; - -u32 unf_get_cm_handle_ops(struct unf_cm_handle_op *cm_handle) -{ - FC_CHECK_RETURN_VALUE(cm_handle, UNF_RETURN_ERROR); - - memcpy(cm_handle, &unf_cm_handle_ops, sizeof(struct unf_cm_handle_op)); - - return RETURN_OK; -} - -static void unf_deinit_cm_handle_ops(void) -{ - memset(&unf_cm_handle_ops, 0, sizeof(struct unf_cm_handle_op)); -} - -int unf_event_process(void *worker_ptr) -{ - struct list_head *event_list = NULL; - struct unf_cm_event_report *event_node = NULL; - struct completion *create_done = (struct completion *)worker_ptr; - ulong flags = 0; - - set_user_nice(current, UNF_OS_THRD_PRI_LOW); - recalc_sigpending(); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_INFO, - "[event]Enter event thread"); - - if (create_done) - complete(create_done); - - do { - spin_lock_irqsave(&fc_event_list.fc_event_list_lock, flags); - if (list_empty(&fc_event_list.list_head)) { - spin_unlock_irqrestore(&fc_event_list.fc_event_list_lock, flags); - - set_current_state(TASK_INTERRUPTIBLE); - schedule_timeout((long)msecs_to_jiffies(UNF_S_TO_MS)); - } else { - event_list = UNF_OS_LIST_NEXT(&fc_event_list.list_head); - list_del_init(event_list); - fc_event_list.list_num--; - event_node = list_entry(event_list, - struct unf_cm_event_report, - list_entry); - spin_unlock_irqrestore(&fc_event_list.fc_event_list_lock, flags); - - /* Process event node */ - unf_handle_event(event_node); - } - } while (!kthread_should_stop()); - - FC_DRV_PRINT(UNF_LOG_EVENT, UNF_MAJOR, - "[event]Event thread exit"); - - return RETURN_OK; -} - -static int unf_creat_event_center(void) -{ - struct completion create_done; - - init_completion(&create_done); - INIT_LIST_HEAD(&fc_event_list.list_head); - fc_event_list.list_num = 0; - spin_lock_init(&fc_event_list.fc_event_list_lock); - - event_task_thread = kthread_run(unf_event_process, &create_done, "spfc_event"); - if (IS_ERR(event_task_thread)) { - complete_and_exit(&create_done, 0); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Create event thread failed(0x%p)", - event_task_thread); - - return UNF_RETURN_ERROR; - } - wait_for_completion(&create_done); - return RETURN_OK; -} - -static void unf_cm_event_thread_exit(void) -{ - if (event_task_thread) - kthread_stop(event_task_thread); -} - -static void unf_init_card_mgr_list(void) -{ - /* So far, do not care */ - INIT_LIST_HEAD(&card_thread_mgr.card_list_head); - - spin_lock_init(&card_thread_mgr.global_card_list_lock); - - card_thread_mgr.card_num = 0; -} - -int unf_port_feature_pool_init(void) -{ - u32 index = 0; - u32 rport_feature_pool_size = 0; - struct unf_rport_feature_recard *rport_feature = NULL; - unsigned long flags = 0; - - rport_feature_pool_size = sizeof(struct unf_rport_feature_pool); - port_feature_pool = vmalloc(rport_feature_pool_size); - if (!port_feature_pool) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]cannot allocate rport feature pool"); - - return UNF_RETURN_ERROR; - } - memset(port_feature_pool, 0, rport_feature_pool_size); - spin_lock_init(&port_feature_pool->port_fea_pool_lock); - INIT_LIST_HEAD(&port_feature_pool->list_busy_head); - INIT_LIST_HEAD(&port_feature_pool->list_free_head); - - port_feature_pool->port_feature_pool_addr = - vmalloc((size_t)(RPORT_FEATURE_POOL_SIZE * sizeof(struct unf_rport_feature_recard))); - if (!port_feature_pool->port_feature_pool_addr) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]cannot allocate rport feature pool address"); - - vfree(port_feature_pool); - port_feature_pool = NULL; - - return UNF_RETURN_ERROR; - } - - memset(port_feature_pool->port_feature_pool_addr, 0, - RPORT_FEATURE_POOL_SIZE * sizeof(struct unf_rport_feature_recard)); - rport_feature = (struct unf_rport_feature_recard *) - port_feature_pool->port_feature_pool_addr; - - spin_lock_irqsave(&port_feature_pool->port_fea_pool_lock, flags); - for (index = 0; index < RPORT_FEATURE_POOL_SIZE; index++) { - list_add_tail(&rport_feature->entry_feature, &port_feature_pool->list_free_head); - rport_feature++; - } - spin_unlock_irqrestore(&port_feature_pool->port_fea_pool_lock, flags); - - return RETURN_OK; -} - -void unf_free_port_feature_pool(void) -{ - if (port_feature_pool->port_feature_pool_addr) { - vfree(port_feature_pool->port_feature_pool_addr); - port_feature_pool->port_feature_pool_addr = NULL; - } - - vfree(port_feature_pool); - port_feature_pool = NULL; -} - -int unf_common_init(void) -{ - int ret = RETURN_OK; - - unf_dgb_level = UNF_MAJOR; - log_print_level = UNF_KEVENT; - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_KEVENT, - "UNF Driver Version:%s.", SPFC_DRV_VERSION); - - atomic_set(&fc_mem_ref, 0); - ret = unf_port_feature_pool_init(); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Port Feature Pool init failed"); - return ret; - } - - ret = (int)unf_register_ini_transport(); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]INI interface init failed"); - goto REG_INITRANSPORT_FAIL; - } - - unf_port_mgmt_init(); - unf_init_card_mgr_list(); - ret = (int)unf_init_global_event_msg(); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Create global event center failed"); - goto CREAT_GLBEVENTMSG_FAIL; - } - - ret = (int)unf_creat_event_center(); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Create event center (thread) failed"); - goto CREAT_EVENTCENTER_FAIL; - } - - unf_wq = create_workqueue("unf_wq"); - if (!unf_wq) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Create work queue failed"); - goto CREAT_WORKQUEUE_FAIL; - } - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[info]Init common layer succeed"); - return ret; -CREAT_WORKQUEUE_FAIL: - unf_cm_event_thread_exit(); -CREAT_EVENTCENTER_FAIL: - unf_destroy_global_event_msg(); -CREAT_GLBEVENTMSG_FAIL: - unf_unregister_ini_transport(); -REG_INITRANSPORT_FAIL: - unf_free_port_feature_pool(); - return UNF_RETURN_ERROR; -} - -static void unf_destroy_dirty_port(void) -{ - u32 ditry_port_num = 0; - - unf_show_dirty_port(false, &ditry_port_num); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[info]Sys has %u dirty L_Port(s)", ditry_port_num); -} - -void unf_common_exit(void) -{ - unf_free_port_feature_pool(); - - unf_destroy_dirty_port(); - - flush_workqueue(unf_wq); - destroy_workqueue(unf_wq); - unf_wq = NULL; - - unf_cm_event_thread_exit(); - - unf_destroy_global_event_msg(); - - unf_deinit_cm_handle_ops(); - - unf_port_mgmt_deinit(); - - unf_unregister_ini_transport(); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_KEVENT, - "[info]SPFC module remove succeed, memory reference count is %d", - atomic_read(&fc_mem_ref)); -} diff --git a/drivers/scsi/spfc/common/unf_io.c b/drivers/scsi/spfc/common/unf_io.c deleted file mode 100644 index b1255ecba88c35b89f4af941a82937e7ef57c2fb..0000000000000000000000000000000000000000 --- a/drivers/scsi/spfc/common/unf_io.c +++ /dev/null @@ -1,1220 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#include "unf_io.h" -#include "unf_log.h" -#include "unf_portman.h" -#include "unf_service.h" -#include "unf_io_abnormal.h" - -u32 sector_size_flag; - -#define UNF_GET_FCP_CTL(pkg) ((((pkg)->status) >> UNF_SHIFT_8) & 0xFF) -#define UNF_GET_SCSI_STATUS(pkg) (((pkg)->status) & 0xFF) - -static u32 unf_io_success_handler(struct unf_xchg *xchg, - struct unf_frame_pkg *pkg, u32 up_status); -static u32 unf_ini_error_default_handler(struct unf_xchg *xchg, - struct unf_frame_pkg *pkg, - u32 up_status); -static u32 unf_io_underflow_handler(struct unf_xchg *xchg, - struct unf_frame_pkg *pkg, u32 up_status); -static u32 unf_ini_dif_error_handler(struct unf_xchg *xchg, - struct unf_frame_pkg *pkg, u32 up_status); - -struct unf_ini_error_handler_s { - u32 ini_error_code; - u32 (*unf_ini_error_handler)(struct unf_xchg *xchg, - struct unf_frame_pkg *pkg, u32 up_status); -}; - -struct unf_ini_error_handler_s ini_error_handler_table[] = { - {UNF_IO_SUCCESS, unf_io_success_handler}, - {UNF_IO_ABORTED, unf_ini_error_default_handler}, - {UNF_IO_FAILED, unf_ini_error_default_handler}, - {UNF_IO_ABORT_ABTS, unf_ini_error_default_handler}, - {UNF_IO_ABORT_LOGIN, unf_ini_error_default_handler}, - {UNF_IO_ABORT_REET, unf_ini_error_default_handler}, - {UNF_IO_ABORT_FAILED, unf_ini_error_default_handler}, - {UNF_IO_OUTOF_ORDER, unf_ini_error_default_handler}, - {UNF_IO_FTO, unf_ini_error_default_handler}, - {UNF_IO_LINK_FAILURE, unf_ini_error_default_handler}, - {UNF_IO_OVER_FLOW, unf_ini_error_default_handler}, - {UNF_IO_RSP_OVER, unf_ini_error_default_handler}, - {UNF_IO_LOST_FRAME, unf_ini_error_default_handler}, - {UNF_IO_UNDER_FLOW, unf_io_underflow_handler}, - {UNF_IO_HOST_PROG_ERROR, unf_ini_error_default_handler}, - {UNF_IO_SEST_PROG_ERROR, unf_ini_error_default_handler}, - {UNF_IO_INVALID_ENTRY, unf_ini_error_default_handler}, - {UNF_IO_ABORT_SEQ_NOT, unf_ini_error_default_handler}, - {UNF_IO_REJECT, unf_ini_error_default_handler}, - {UNF_IO_EDC_IN_ERROR, unf_ini_error_default_handler}, - {UNF_IO_EDC_OUT_ERROR, unf_ini_error_default_handler}, - {UNF_IO_UNINIT_KEK_ERR, unf_ini_error_default_handler}, - {UNF_IO_DEK_OUTOF_RANGE, unf_ini_error_default_handler}, - {UNF_IO_KEY_UNWRAP_ERR, unf_ini_error_default_handler}, - {UNF_IO_KEY_TAG_ERR, unf_ini_error_default_handler}, - {UNF_IO_KEY_ECC_ERR, unf_ini_error_default_handler}, - {UNF_IO_BLOCK_SIZE_ERROR, unf_ini_error_default_handler}, - {UNF_IO_ILLEGAL_CIPHER_MODE, unf_ini_error_default_handler}, - {UNF_IO_CLEAN_UP, unf_ini_error_default_handler}, - {UNF_IO_ABORTED_BY_TARGET, unf_ini_error_default_handler}, - {UNF_IO_TRANSPORT_ERROR, unf_ini_error_default_handler}, - {UNF_IO_LINK_FLASH, unf_ini_error_default_handler}, - {UNF_IO_TIMEOUT, unf_ini_error_default_handler}, - {UNF_IO_DMA_ERROR, unf_ini_error_default_handler}, - {UNF_IO_DIF_ERROR, unf_ini_dif_error_handler}, - {UNF_IO_INCOMPLETE, unf_ini_error_default_handler}, - {UNF_IO_DIF_REF_ERROR, unf_ini_dif_error_handler}, - {UNF_IO_DIF_GEN_ERROR, unf_ini_dif_error_handler}, - {UNF_IO_NO_XCHG, unf_ini_error_default_handler} - }; - -void unf_done_ini_xchg(struct unf_xchg *xchg) -{ - /* - * About I/O Done - * 1. normal case - * 2. Send ABTS & RCVD RSP - * 3. Send ABTS & timer timeout - */ - struct unf_scsi_cmnd scsi_cmd = {0}; - ulong flags = 0; - struct unf_scsi_cmd_info *scsi_cmnd_info = NULL; - struct unf_rport_scsi_id_image *scsi_image_table = NULL; - u32 scsi_id = 0; - - FC_CHECK_RETURN_VOID(xchg); - - if (unlikely(!xchg->scsi_cmnd_info.scsi_cmnd)) - return; - - /* 1. Free RX_ID for INI SIRT: Do not care */ - - /* - * 2. set & check exchange state - * * - * for Set UP_ABORT Tag: - * 1) L_Port destroy - * 2) LUN reset - * 3) Target/Session reset - * 4) SCSI send Abort(ABTS) - */ - spin_lock_irqsave(&xchg->xchg_state_lock, flags); - xchg->io_state |= INI_IO_STATE_DONE; - if (unlikely(xchg->io_state & - (INI_IO_STATE_UPABORT | INI_IO_STATE_UPSEND_ERR | INI_IO_STATE_TMF_ABORT))) { - /* - * a. UPABORT: scsi have send ABTS - * --->>> do not call SCSI_Done, return directly - * b. UPSEND_ERR: error happened duiring LLDD send SCSI_CMD - * --->>> do not call SCSI_Done, scsi need retry - */ - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_KEVENT, - "[event]Exchange(0x%p) Cmdsn:0x%lx upCmd:%p hottag(0x%x) with state(0x%x) has been aborted or send error", - xchg, (ulong)xchg->cmnd_sn, xchg->scsi_cmnd_info.scsi_cmnd, - xchg->hotpooltag, xchg->io_state); - - spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); - return; - } - spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); - - scsi_cmnd_info = &xchg->scsi_cmnd_info; - - /* - * 3. Set: - * scsi_cmnd; - * cmnd_done_func; - * cmnd up_level_done; - * sense_buff_addr; - * resid_length; - * cmnd_result; - * dif_info - * ** - * UNF_SCSI_CMND <<-- UNF_SCSI_CMND_INFO - */ - UNF_SET_HOST_CMND((&scsi_cmd), scsi_cmnd_info->scsi_cmnd); - UNF_SER_CMND_DONE_FUNC((&scsi_cmd), scsi_cmnd_info->done); - UNF_SET_UP_LEVEL_CMND_DONE_FUNC(&scsi_cmd, scsi_cmnd_info->uplevel_done); - scsi_cmd.drv_private = xchg->lport; - if (unlikely((UNF_SCSI_STATUS(xchg->scsi_cmnd_info.result)) & FCP_SNS_LEN_VALID_MASK)) { - unf_save_sense_data(scsi_cmd.upper_cmnd, - (char *)xchg->fcp_sfs_union.fcp_rsp_entry.fcp_rsp_iu, - (int)xchg->fcp_sfs_union.fcp_rsp_entry.fcp_sense_len); - } - UNF_SET_RESID((&scsi_cmd), (u32)xchg->resid_len); - UNF_SET_CMND_RESULT((&scsi_cmd), scsi_cmnd_info->result); - memcpy(&scsi_cmd.dif_info, &xchg->dif_info, sizeof(struct dif_info)); - - scsi_id = scsi_cmnd_info->scsi_id; - - UNF_DONE_SCSI_CMND((&scsi_cmd)); - - /* 4. Update IO result CNT */ - if (likely(xchg->lport)) { - scsi_image_table = &xchg->lport->rport_scsi_table; - UNF_IO_RESULT_CNT(scsi_image_table, scsi_id, - (scsi_cmnd_info->result >> UNF_SHIFT_16)); - } -} - -static inline u32 unf_ini_get_sgl_entry_buf(ini_get_sgl_entry_buf ini_get_sgl, - void *cmnd, void *driver_sgl, - void **upper_sgl, u32 *req_index, - u32 *index, char **buf, - u32 *buf_len) -{ - if (unlikely(!ini_get_sgl)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_MAJOR, - "Command(0x%p) Get sgl Entry func Null.", cmnd); - - return UNF_RETURN_ERROR; - } - - return ini_get_sgl(cmnd, driver_sgl, upper_sgl, req_index, index, buf, buf_len); -} - -u32 unf_ini_get_sgl_entry(void *pkg, char **buf, u32 *buf_len) -{ - struct unf_frame_pkg *unf_pkg = (struct unf_frame_pkg *)pkg; - struct unf_xchg *unf_xchg = NULL; - u32 ret = RETURN_OK; - - FC_CHECK_RETURN_VALUE(pkg, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(buf, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(buf_len, UNF_RETURN_ERROR); - - unf_xchg = (struct unf_xchg *)unf_pkg->xchg_contex; - FC_CHECK_RETURN_VALUE(unf_xchg, UNF_RETURN_ERROR); - - /* Get SGL Entry buffer for INI Mode */ - ret = unf_ini_get_sgl_entry_buf(unf_xchg->scsi_cmnd_info.unf_get_sgl_entry_buf, - unf_xchg->scsi_cmnd_info.scsi_cmnd, NULL, - &unf_xchg->req_sgl_info.sgl, - &unf_xchg->scsi_cmnd_info.port_id, - &((unf_xchg->req_sgl_info).entry_index), buf, buf_len); - - return ret; -} - -u32 unf_ini_get_dif_sgl_entry(void *pkg, char **buf, u32 *buf_len) -{ - struct unf_frame_pkg *unf_pkg = (struct unf_frame_pkg *)pkg; - struct unf_xchg *unf_xchg = NULL; - u32 ret = RETURN_OK; - - FC_CHECK_RETURN_VALUE(pkg, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(buf, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(buf_len, UNF_RETURN_ERROR); - - unf_xchg = (struct unf_xchg *)unf_pkg->xchg_contex; - FC_CHECK_RETURN_VALUE(unf_xchg, UNF_RETURN_ERROR); - - /* Get SGL Entry buffer for INI Mode */ - ret = unf_ini_get_sgl_entry_buf(unf_xchg->scsi_cmnd_info.unf_get_sgl_entry_buf, - unf_xchg->scsi_cmnd_info.scsi_cmnd, NULL, - &unf_xchg->dif_sgl_info.sgl, - &unf_xchg->scsi_cmnd_info.port_id, - &((unf_xchg->dif_sgl_info).entry_index), buf, buf_len); - return ret; -} - -u32 unf_get_up_level_cmnd_errcode(struct unf_ini_error_code *err_table, - u32 err_table_count, u32 drv_err_code) -{ - u32 loop = 0; - - /* fail return UNF_RETURN_ERROR,adjust by up level */ - if (unlikely(!err_table)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_MAJOR, - "Error Code Table is Null, Error Code(0x%x).", drv_err_code); - - return (u32)UNF_SCSI_HOST(DID_ERROR); - } - - for (loop = 0; loop < err_table_count; loop++) { - if (err_table[loop].drv_errcode == drv_err_code) - return err_table[loop].ap_errcode; - } - - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Unsupported Ap Error code by Error Code(0x%x).", drv_err_code); - - return (u32)UNF_SCSI_HOST(DID_ERROR); -} - -static u32 unf_ini_status_handle(struct unf_xchg *xchg, - struct unf_frame_pkg *pkg) -{ - u32 loop = 0; - u32 ret = UNF_RETURN_ERROR; - u32 up_status = 0; - - for (loop = 0; loop < sizeof(ini_error_handler_table) / - sizeof(struct unf_ini_error_handler_s); loop++) { - if (UNF_GET_LL_ERR(pkg) == ini_error_handler_table[loop].ini_error_code) { - up_status = - unf_get_up_level_cmnd_errcode(xchg->scsi_cmnd_info.err_code_table, - xchg->scsi_cmnd_info.err_code_table_cout, - UNF_GET_LL_ERR(pkg)); - - if (ini_error_handler_table[loop].unf_ini_error_handler) { - ret = ini_error_handler_table[loop] - .unf_ini_error_handler(xchg, pkg, up_status); - } else { - /* set exchange->result ---to--->>>scsi_result */ - ret = unf_ini_error_default_handler(xchg, pkg, up_status); - } - - return ret; - } - } - - up_status = unf_get_up_level_cmnd_errcode(xchg->scsi_cmnd_info.err_code_table, - xchg->scsi_cmnd_info.err_code_table_cout, - UNF_IO_SOFT_ERR); - - ret = unf_ini_error_default_handler(xchg, pkg, up_status); - - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "[err]Can not find com status, SID(0x%x) exchange(0x%p) com_status(0x%x) DID(0x%x) hot_pool_tag(0x%x)", - xchg->sid, xchg, pkg->status, xchg->did, xchg->hotpooltag); - - return ret; -} - -static void unf_analysis_response_info(struct unf_xchg *xchg, - struct unf_frame_pkg *pkg, - u32 *up_status) -{ - u8 *resp_buf = NULL; - - /* LL_Driver use Little End, and copy RSP_INFO to COM_Driver */ - if (unlikely(pkg->unf_rsp_pload_bl.length > UNF_RESPONE_DATA_LEN)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Receive FCP response resp buffer len is invalid 0x%x", - pkg->unf_rsp_pload_bl.length); - return; - } - - resp_buf = (u8 *)pkg->unf_rsp_pload_bl.buffer_ptr; - if (resp_buf) { - /* If chip use Little End, then change it to Big End */ - if ((pkg->byte_orders & UNF_BIT_3) == 0) - unf_cpu_to_big_end(resp_buf, pkg->unf_rsp_pload_bl.length); - - /* Chip DAM data with Big End */ - if (resp_buf[ARRAY_INDEX_3] != UNF_FCP_TM_RSP_COMPLETE) { - *up_status = UNF_SCSI_HOST(DID_BUS_BUSY); - - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%p) DID bus busy, scsi_status(0x%x)", - xchg->lport, UNF_GET_SCSI_STATUS(pkg)); - } - } else { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Receive FCP response, resp buffer is NULL resp buffer len is 0x%x", - pkg->unf_rsp_pload_bl.length); - } -} - -static void unf_analysis_sense_info(struct unf_xchg *xchg, - struct unf_frame_pkg *pkg, u32 *up_status) -{ - u32 length = 0; - - /* 4 bytes Align */ - length = MIN(SCSI_SENSE_DATA_LEN, pkg->unf_sense_pload_bl.length); - - if (unlikely(pkg->unf_sense_pload_bl.length > SCSI_SENSE_DATA_LEN)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[info]Receive FCP response resp buffer len is 0x%x", - pkg->unf_sense_pload_bl.length); - } - /* - * If have sense info then copy directly - * else, the chip has been dma the data to sense buffer - */ - - if (length != 0 && pkg->unf_rsp_pload_bl.buffer_ptr) { - /* has been dma to exchange buffer */ - if (unlikely(pkg->unf_rsp_pload_bl.length > UNF_RESPONE_DATA_LEN)) { - *up_status = UNF_SCSI_HOST(DID_ERROR); - - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Receive FCP response resp buffer len is invalid 0x%x", - pkg->unf_rsp_pload_bl.length); - - return; - } - - xchg->fcp_sfs_union.fcp_rsp_entry.fcp_rsp_iu = (u8 *)kmalloc(length, GFP_ATOMIC); - if (!xchg->fcp_sfs_union.fcp_rsp_entry.fcp_rsp_iu) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "[err]Alloc FCP sense buffer failed"); - return; - } - - memcpy(xchg->fcp_sfs_union.fcp_rsp_entry.fcp_rsp_iu, - ((u8 *)(pkg->unf_rsp_pload_bl.buffer_ptr)) + - pkg->unf_rsp_pload_bl.length, length); - - xchg->fcp_sfs_union.fcp_rsp_entry.fcp_sense_len = length; - } else { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Receive FCP response, sense buffer is NULL sense buffer len is 0x%x", - length); - } -} - -static u32 unf_io_success_handler(struct unf_xchg *xchg, - struct unf_frame_pkg *pkg, u32 up_status) -{ - u8 scsi_status = 0; - u8 control = 0; - u32 status = up_status; - - FC_CHECK_RETURN_VALUE(xchg, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(pkg, UNF_RETURN_ERROR); - - control = UNF_GET_FCP_CTL(pkg); - scsi_status = UNF_GET_SCSI_STATUS(pkg); - - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_INFO, - "[info]Port(0x%p), Exchange(0x%p) Completed, Control(0x%x), Scsi Status(0x%x)", - xchg->lport, xchg, control, scsi_status); - - if (control & FCP_SNS_LEN_VALID_MASK) { - /* has sense info */ - if (scsi_status == FCP_SCSI_STATUS_GOOD) - scsi_status = SCSI_CHECK_CONDITION; - - unf_analysis_sense_info(xchg, pkg, &status); - } else { - /* - * When the FCP_RSP_LEN_VALID bit is set to one, - * the content of the SCSI STATUS CODE field is not reliable - * and shall be ignored by the application client. - */ - if (control & FCP_RSP_LEN_VALID_MASK) - unf_analysis_response_info(xchg, pkg, &status); - } - - xchg->scsi_cmnd_info.result = status | UNF_SCSI_STATUS(scsi_status); - - return RETURN_OK; -} - -static u32 unf_ini_error_default_handler(struct unf_xchg *xchg, - struct unf_frame_pkg *pkg, - u32 up_status) -{ - /* set exchange->result ---to--->>> scsi_cmnd->result */ - FC_CHECK_RETURN_VALUE(xchg, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(pkg, UNF_RETURN_ERROR); - - FC_DRV_PRINT(UNF_LOG_ABNORMAL, UNF_WARN, - "[warn]SID(0x%x) exchange(0x%p) com_status(0x%x) up_status(0x%x) DID(0x%x) hot_pool_tag(0x%x) response_len(0x%x)", - xchg->sid, xchg, pkg->status, up_status, xchg->did, - xchg->hotpooltag, pkg->residus_len); - - xchg->scsi_cmnd_info.result = - up_status | UNF_SCSI_STATUS(UNF_GET_SCSI_STATUS(pkg)); - - return RETURN_OK; -} - -static u32 unf_ini_dif_error_handler(struct unf_xchg *xchg, - struct unf_frame_pkg *pkg, u32 up_status) -{ - u8 *sense_data = NULL; - u16 sense_code = 0; - - FC_CHECK_RETURN_VALUE(xchg, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(pkg, UNF_RETURN_ERROR); - - /* - * According to DIF scheme - * drive set check condition(0x2) when dif error occurs, - * and returns the values base on the upper-layer verification resule - * Check sequence: crc,Lba,App, - * if CRC error is found, the subsequent check is not performed - */ - xchg->scsi_cmnd_info.result = UNF_SCSI_STATUS(SCSI_CHECK_CONDITION); - - sense_code = (u16)pkg->status_sub_code; - sense_data = (u8 *)kmalloc(SCSI_SENSE_DATA_LEN, GFP_ATOMIC); - if (!sense_data) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "[err]Alloc FCP sense buffer failed"); - - return UNF_RETURN_ERROR; - } - memset(sense_data, 0, SCSI_SENSE_DATA_LEN); - sense_data[ARRAY_INDEX_0] = SENSE_DATA_RESPONSE_CODE; /* response code:0x70 */ - sense_data[ARRAY_INDEX_2] = ILLEGAL_REQUEST; /* sense key:0x05; */ - sense_data[ARRAY_INDEX_7] = ADDITINONAL_SENSE_LEN; /* additional sense length:0x7 */ - sense_data[ARRAY_INDEX_12] = (u8)(sense_code >> UNF_SHIFT_8); - sense_data[ARRAY_INDEX_13] = (u8)sense_code; - - xchg->fcp_sfs_union.fcp_rsp_entry.fcp_rsp_iu = sense_data; - xchg->fcp_sfs_union.fcp_rsp_entry.fcp_sense_len = SCSI_SENSE_DATA_LEN; - - /* valid sense data length snscode[13] */ - return RETURN_OK; -} - -static u32 unf_io_underflow_handler(struct unf_xchg *xchg, - struct unf_frame_pkg *pkg, u32 up_status) -{ - /* under flow: residlen > 0 */ - FC_CHECK_RETURN_VALUE(xchg, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(pkg, UNF_RETURN_ERROR); - - if (xchg->fcp_cmnd.cdb[ARRAY_INDEX_0] != SCSIOPC_REPORT_LUN && - xchg->fcp_cmnd.cdb[ARRAY_INDEX_0] != SCSIOPC_INQUIRY) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_INFO, - "[info]IO under flow: SID(0x%x) exchange(0x%p) com status(0x%x) up_status(0x%x) DID(0x%x) hot_pool_tag(0x%x) response SID(0x%x)", - xchg->sid, xchg, pkg->status, up_status, - xchg->did, xchg->hotpooltag, pkg->residus_len); - } - - xchg->resid_len = (int)pkg->residus_len; - (void)unf_io_success_handler(xchg, pkg, up_status); - - return RETURN_OK; -} - -void unf_complete_cmnd(struct unf_scsi_cmnd *scsi_cmnd, u32 result_size) -{ - /* - * Exception during process Que_CMND - * 1. L_Port == NULL; - * 2. L_Port == removing; - * 3. R_Port == NULL; - * 4. Xchg == NULL. - */ - FC_CHECK_RETURN_VOID((UNF_GET_CMND_DONE_FUNC(scsi_cmnd))); - - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_INFO, - "[info]Command(0x%p), Result(0x%x).", scsi_cmnd, result_size); - - UNF_SET_CMND_RESULT(scsi_cmnd, result_size); - - UNF_DONE_SCSI_CMND(scsi_cmnd); -} - -static inline void unf_bind_xchg_scsi_cmd(struct unf_xchg *xchg, - struct unf_scsi_cmnd *scsi_cmnd) -{ - struct unf_scsi_cmd_info *scsi_cmnd_info = NULL; - - scsi_cmnd_info = &xchg->scsi_cmnd_info; - - /* UNF_SCSI_CMND_INFO <<-- UNF_SCSI_CMND */ - scsi_cmnd_info->err_code_table = UNF_GET_ERR_CODE_TABLE(scsi_cmnd); - scsi_cmnd_info->err_code_table_cout = UNF_GET_ERR_CODE_TABLE_COUNT(scsi_cmnd); - scsi_cmnd_info->done = UNF_GET_CMND_DONE_FUNC(scsi_cmnd); - scsi_cmnd_info->scsi_cmnd = UNF_GET_HOST_CMND(scsi_cmnd); - scsi_cmnd_info->sense_buf = (char *)UNF_GET_SENSE_BUF_ADDR(scsi_cmnd); - scsi_cmnd_info->uplevel_done = UNF_GET_UP_LEVEL_CMND_DONE(scsi_cmnd); - scsi_cmnd_info->unf_get_sgl_entry_buf = UNF_GET_SGL_ENTRY_BUF_FUNC(scsi_cmnd); - scsi_cmnd_info->sgl = UNF_GET_CMND_SGL(scsi_cmnd); - scsi_cmnd_info->time_out = scsi_cmnd->time_out; - scsi_cmnd_info->entry_cnt = scsi_cmnd->entry_count; - scsi_cmnd_info->port_id = (u32)scsi_cmnd->port_id; - scsi_cmnd_info->scsi_id = UNF_GET_SCSI_ID_BY_CMND(scsi_cmnd); -} - -u32 unf_ini_scsi_completed(void *lport, struct unf_frame_pkg *pkg) -{ - struct unf_lport *unf_lport = NULL; - struct unf_xchg *unf_xchg = NULL; - struct unf_fcp_cmnd *fcp_cmnd = NULL; - u32 control = 0; - u16 xchg_tag = 0x0ffff; - u32 ret = UNF_RETURN_ERROR; - ulong xchg_flag = 0; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(pkg, UNF_RETURN_ERROR); - - unf_lport = (struct unf_lport *)lport; - xchg_tag = (u16)pkg->private_data[PKG_PRIVATE_XCHG_HOT_POOL_INDEX]; - - /* 1. Find Exchange Context */ - unf_xchg = unf_cm_lookup_xchg_by_tag(lport, (u16)xchg_tag); - if (unlikely(!unf_xchg)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%x_0x%x) can not find exchange by tag(0x%x)", - unf_lport->port_id, unf_lport->nport_id, xchg_tag); - - /* NOTE: return directly */ - return UNF_RETURN_ERROR; - } - - /* 2. Consistency check */ - UNF_CHECK_ALLOCTIME_VALID(unf_lport, xchg_tag, unf_xchg, - pkg->private_data[PKG_PRIVATE_XCHG_ALLOC_TIME], - unf_xchg->private_data[PKG_PRIVATE_XCHG_ALLOC_TIME]); - - /* 3. Increase ref_cnt for exchange protecting */ - ret = unf_xchg_ref_inc(unf_xchg, INI_RESPONSE_DONE); /* hold */ - FC_CHECK_RETURN_VALUE((ret == RETURN_OK), UNF_RETURN_ERROR); - - fcp_cmnd = &unf_xchg->fcp_cmnd; - control = fcp_cmnd->control; - control = UNF_GET_TASK_MGMT_FLAGS(control); - - /* 4. Cancel timer if necessary */ - if (unf_xchg->scsi_cmnd_info.time_out != 0) - unf_lport->xchg_mgr_temp.unf_xchg_cancel_timer(unf_xchg); - - /* 5. process scsi TMF if necessary */ - if (control != 0) { - unf_process_scsi_mgmt_result(pkg, unf_xchg); - unf_xchg_ref_dec(unf_xchg, INI_RESPONSE_DONE); /* cancel hold */ - - /* NOTE: return directly */ - return RETURN_OK; - } - - /* 6. Xchg Abort state check */ - spin_lock_irqsave(&unf_xchg->xchg_state_lock, xchg_flag); - unf_xchg->oxid = UNF_GET_OXID(pkg); - unf_xchg->rxid = UNF_GET_RXID(pkg); - if (INI_IO_STATE_UPABORT & unf_xchg->io_state) { - spin_unlock_irqrestore(&unf_xchg->xchg_state_lock, xchg_flag); - - FC_DRV_PRINT(UNF_LOG_NORMAL, UNF_WARN, - "[warn]Port(0x%x) find exchange(%p) state(0x%x) has been aborted", - unf_lport->port_id, unf_xchg, unf_xchg->io_state); - - /* NOTE: release exchange during SCSI ABORT(ABTS) */ - unf_xchg_ref_dec(unf_xchg, INI_RESPONSE_DONE); /* cancel hold */ - - return ret; - } - spin_unlock_irqrestore(&unf_xchg->xchg_state_lock, xchg_flag); - - /* - * 7. INI SCSI CMND Status process - * set exchange->result ---to--->>> scsi_result - */ - ret = unf_ini_status_handle(unf_xchg, pkg); - - /* 8. release exchangenecessary */ - unf_cm_free_xchg(unf_lport, unf_xchg); - - /* 9. dec exch ref_cnt */ - unf_xchg_ref_dec(unf_xchg, INI_RESPONSE_DONE); /* cancel hold: release resource now */ - - return ret; -} - -u32 unf_hardware_start_io(struct unf_lport *lport, struct unf_frame_pkg *pkg) -{ - if (unlikely(!lport->low_level_func.service_op.unf_cmnd_send)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "[err]Port(0x%x) low level send scsi function is NULL", - lport->port_id); - - return UNF_RETURN_ERROR; - } - - return lport->low_level_func.service_op.unf_cmnd_send(lport->fc_port, pkg); -} - -struct unf_rport *unf_find_rport_by_scsi_id(struct unf_lport *lport, - struct unf_ini_error_code *err_code_table, - u32 err_code_table_cout, u32 scsi_id, u32 *scsi_result) -{ - struct unf_rport_scsi_id_image *scsi_image_table = NULL; - struct unf_wwpn_rport_info *wwpn_rport_info = NULL; - struct unf_rport *unf_rport = NULL; - ulong flags = 0; - - /* scsi_table -> session_table ->image_table */ - scsi_image_table = &lport->rport_scsi_table; - - /* 1. Scsi_Id validity check */ - if (unlikely(scsi_id >= scsi_image_table->max_scsi_id)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "[err]Input scsi_id(0x%x) bigger than max_scsi_id(0x%x).", - scsi_id, scsi_image_table->max_scsi_id); - - *scsi_result = unf_get_up_level_cmnd_errcode(err_code_table, err_code_table_cout, - UNF_IO_SOFT_ERR); /* did_soft_error */ - - return NULL; - } - - /* 2. GetR_Port_Info/R_Port: use Scsi_Id find from L_Port's - * Rport_Scsi_Table (image table) - */ - spin_lock_irqsave(&scsi_image_table->scsi_image_table_lock, flags); - wwpn_rport_info = &scsi_image_table->wwn_rport_info_table[scsi_id]; - unf_rport = wwpn_rport_info->rport; - spin_unlock_irqrestore(&scsi_image_table->scsi_image_table_lock, flags); - - if (unlikely(!unf_rport)) { - *scsi_result = unf_get_up_level_cmnd_errcode(err_code_table, - err_code_table_cout, - UNF_IO_PORT_LOGOUT); - - return NULL; - } - - return unf_rport; -} - -static u32 unf_build_xchg_fcpcmnd(struct unf_fcp_cmnd *fcp_cmnd, - struct unf_scsi_cmnd *scsi_cmnd) -{ - memcpy(fcp_cmnd->cdb, &UNF_GET_FCP_CMND(scsi_cmnd), scsi_cmnd->cmnd_len); - - if ((fcp_cmnd->control == UNF_FCP_WR_DATA && - (IS_READ_COMMAND(fcp_cmnd->cdb[ARRAY_INDEX_0]))) || - (fcp_cmnd->control == UNF_FCP_RD_DATA && - (IS_WRITE_COMMAND(fcp_cmnd->cdb[ARRAY_INDEX_0])))) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_MINOR, - "Scsi command direction inconsistent, CDB[ARRAY_INDEX_0](0x%x), direction(0x%x).", - fcp_cmnd->cdb[ARRAY_INDEX_0], fcp_cmnd->control); - - return UNF_RETURN_ERROR; - } - - memcpy(fcp_cmnd->lun, scsi_cmnd->lun_id, sizeof(fcp_cmnd->lun)); - - unf_big_end_to_cpu((void *)fcp_cmnd->cdb, sizeof(fcp_cmnd->cdb)); - fcp_cmnd->data_length = UNF_GET_DATA_LEN(scsi_cmnd); - - return RETURN_OK; -} - -static void unf_adjust_xchg_len(struct unf_xchg *xchg, u32 scsi_cmnd) -{ - switch (scsi_cmnd) { - case SCSIOPC_REQUEST_SENSE: /* requires different buffer */ - xchg->data_len = UNF_SCSI_SENSE_BUFFERSIZE; - - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_MINOR, "Request Sense new."); - break; - - case SCSIOPC_TEST_UNIT_READY: - case SCSIOPC_RESERVE: - case SCSIOPC_RELEASE: - case SCSIOPC_START_STOP_UNIT: - xchg->data_len = 0; - break; - - default: - break; - } -} - -static void unf_copy_dif_control(struct unf_dif_control_info *dif_control, - struct unf_scsi_cmnd *scsi_cmnd) -{ - dif_control->fcp_dl = scsi_cmnd->dif_control.fcp_dl; - dif_control->protect_opcode = scsi_cmnd->dif_control.protect_opcode; - dif_control->start_lba = scsi_cmnd->dif_control.start_lba; - dif_control->app_tag = scsi_cmnd->dif_control.app_tag; - - dif_control->flags = scsi_cmnd->dif_control.flags; - dif_control->dif_sge_count = scsi_cmnd->dif_control.dif_sge_count; - dif_control->dif_sgl = scsi_cmnd->dif_control.dif_sgl; -} - -static void unf_adjust_dif_pci_transfer_len(struct unf_xchg *xchg, u32 direction) -{ - struct unf_dif_control_info *dif_control = NULL; - u32 sector_size = 0; - - dif_control = &xchg->dif_control; - - if (dif_control->protect_opcode == UNF_DIF_ACTION_NONE) - return; - if ((dif_control->flags & UNF_DIF_SECTSIZE_4KB) == 0) - sector_size = SECTOR_SIZE_512; - else - sector_size = SECTOR_SIZE_4096; - switch (dif_control->protect_opcode & UNF_DIF_ACTION_MASK) { - case UNF_DIF_ACTION_INSERT: - if (direction == DMA_TO_DEVICE) { - /* write IO,insert,Indicates that data with DIF is - * transmitted over the link. - */ - dif_control->fcp_dl = xchg->data_len + - UNF_CAL_BLOCK_CNT(xchg->data_len, sector_size) * UNF_DIF_AREA_SIZE; - } else { - /* read IO,insert,Indicates that the internal DIf is - * carried, and the link does not carry the DIf. - */ - dif_control->fcp_dl = xchg->data_len; - } - break; - - case UNF_DIF_ACTION_VERIFY_AND_DELETE: - if (direction == DMA_TO_DEVICE) { - /* write IO,Delete,Indicates that the internal DIf is - * carried, and the link does not carry the DIf. - */ - dif_control->fcp_dl = xchg->data_len; - } else { - /* read IO,Delete,Indicates that data with DIF is - * carried on the link and does not contain DIF on - * internal. - */ - dif_control->fcp_dl = xchg->data_len + - UNF_CAL_BLOCK_CNT(xchg->data_len, sector_size) * UNF_DIF_AREA_SIZE; - } - break; - - case UNF_DIF_ACTION_VERIFY_AND_FORWARD: - dif_control->fcp_dl = xchg->data_len + - UNF_CAL_BLOCK_CNT(xchg->data_len, sector_size) * UNF_DIF_AREA_SIZE; - break; - - default: - dif_control->fcp_dl = xchg->data_len; - break; - } - - xchg->fcp_cmnd.data_length = dif_control->fcp_dl; -} - -static void unf_get_dma_direction(struct unf_fcp_cmnd *fcp_cmnd, - struct unf_scsi_cmnd *scsi_cmnd) -{ - if (UNF_GET_DATA_DIRECTION(scsi_cmnd) == DMA_TO_DEVICE) { - fcp_cmnd->control = UNF_FCP_WR_DATA; - } else if (UNF_GET_DATA_DIRECTION(scsi_cmnd) == DMA_FROM_DEVICE) { - fcp_cmnd->control = UNF_FCP_RD_DATA; - } else { - /* DMA Direction None */ - fcp_cmnd->control = 0; - } -} - -static int unf_save_scsi_cmnd_to_xchg(struct unf_lport *lport, - struct unf_rport *rport, - struct unf_xchg *xchg, - struct unf_scsi_cmnd *scsi_cmnd) -{ - struct unf_lport *unf_lport = lport; - struct unf_rport *unf_rport = rport; - struct unf_xchg *unf_xchg = xchg; - u32 result_size = 0; - - scsi_cmnd->driver_scribble = (void *)unf_xchg->start_jif; - unf_xchg->rport = unf_rport; - unf_xchg->rport_bind_jifs = unf_rport->rport_alloc_jifs; - - /* Build Xchg SCSI_CMND info */ - unf_bind_xchg_scsi_cmd(unf_xchg, scsi_cmnd); - - unf_xchg->data_len = UNF_GET_DATA_LEN(scsi_cmnd); - unf_xchg->data_direction = UNF_GET_DATA_DIRECTION(scsi_cmnd); - unf_xchg->sid = unf_lport->nport_id; - unf_xchg->did = unf_rport->nport_id; - unf_xchg->private_data[PKG_PRIVATE_XCHG_RPORT_INDEX] = unf_rport->rport_index; - unf_xchg->world_id = scsi_cmnd->world_id; - unf_xchg->cmnd_sn = scsi_cmnd->cmnd_sn; - unf_xchg->pinitiator = scsi_cmnd->pinitiator; - unf_xchg->scsi_id = scsi_cmnd->scsi_id; - if (scsi_cmnd->qos_level == UNF_QOS_LEVEL_DEFAULT) - unf_xchg->qos_level = unf_rport->qos_level; - else - unf_xchg->qos_level = scsi_cmnd->qos_level; - - unf_get_dma_direction(&unf_xchg->fcp_cmnd, scsi_cmnd); - result_size = unf_build_xchg_fcpcmnd(&unf_xchg->fcp_cmnd, scsi_cmnd); - if (unlikely(result_size != RETURN_OK)) - return UNF_RETURN_ERROR; - - unf_adjust_xchg_len(unf_xchg, UNF_GET_FCP_CMND(scsi_cmnd)); - - unf_adjust_xchg_len(unf_xchg, UNF_GET_FCP_CMND(scsi_cmnd)); - - /* Dif (control) info */ - unf_copy_dif_control(&unf_xchg->dif_control, scsi_cmnd); - memcpy(&unf_xchg->dif_info, &scsi_cmnd->dif_info, sizeof(struct dif_info)); - unf_adjust_dif_pci_transfer_len(unf_xchg, UNF_GET_DATA_DIRECTION(scsi_cmnd)); - - /* single sgl info */ - if (unf_xchg->data_direction != DMA_NONE && UNF_GET_CMND_SGL(scsi_cmnd)) { - unf_xchg->req_sgl_info.sgl = UNF_GET_CMND_SGL(scsi_cmnd); - unf_xchg->req_sgl_info.sgl_start = unf_xchg->req_sgl_info.sgl; - /* Save the sgl header for easy - * location and printing. - */ - unf_xchg->req_sgl_info.req_index = 0; - unf_xchg->req_sgl_info.entry_index = 0; - } - - if (scsi_cmnd->dif_control.dif_sgl) { - unf_xchg->dif_sgl_info.sgl = UNF_INI_GET_DIF_SGL(scsi_cmnd); - unf_xchg->dif_sgl_info.entry_index = 0; - unf_xchg->dif_sgl_info.req_index = 0; - unf_xchg->dif_sgl_info.sgl_start = unf_xchg->dif_sgl_info.sgl; - } - - return RETURN_OK; -} - -static int unf_send_fcpcmnd(struct unf_lport *lport, struct unf_rport *rport, - struct unf_xchg *xchg) -{ -#define UNF_MAX_PENDING_IO_CNT 3 - struct unf_scsi_cmd_info *scsi_cmnd_info = NULL; - struct unf_lport *unf_lport = lport; - struct unf_rport *unf_rport = rport; - struct unf_xchg *unf_xchg = xchg; - struct unf_frame_pkg pkg = {0}; - u32 result_size = 0; - ulong flags = 0; - - memcpy(&pkg.dif_control, &unf_xchg->dif_control, sizeof(struct unf_dif_control_info)); - pkg.dif_control.fcp_dl = unf_xchg->dif_control.fcp_dl; - pkg.transfer_len = unf_xchg->data_len; /* Pcie data transfer length */ - pkg.xchg_contex = unf_xchg; - pkg.qos_level = unf_xchg->qos_level; - scsi_cmnd_info = &xchg->scsi_cmnd_info; - pkg.entry_count = unf_xchg->scsi_cmnd_info.entry_cnt; - if (unf_xchg->data_direction == DMA_NONE || !scsi_cmnd_info->sgl) - pkg.entry_count = 0; - - pkg.private_data[PKG_PRIVATE_XCHG_ALLOC_TIME] = - unf_xchg->private_data[PKG_PRIVATE_XCHG_ALLOC_TIME]; - pkg.private_data[PKG_PRIVATE_XCHG_VP_INDEX] = unf_lport->vp_index; - pkg.private_data[PKG_PRIVATE_XCHG_RPORT_INDEX] = unf_rport->rport_index; - pkg.private_data[PKG_PRIVATE_XCHG_HOT_POOL_INDEX] = - unf_xchg->hotpooltag | UNF_HOTTAG_FLAG; - - unf_select_sq(unf_xchg, &pkg); - pkg.fcp_cmnd = &unf_xchg->fcp_cmnd; - pkg.frame_head.csctl_sid = unf_lport->nport_id; - pkg.frame_head.rctl_did = unf_rport->nport_id; - pkg.upper_cmd = unf_xchg->scsi_cmnd_info.scsi_cmnd; - - /* exch->fcp_rsp_id --->>> pkg->buffer_ptr */ - pkg.frame_head.oxid_rxid = ((u32)unf_xchg->oxid << (u32)UNF_SHIFT_16 | unf_xchg->rxid); - - FC_DRV_PRINT(UNF_LOG_EQUIP_ATT, UNF_INFO, - "[info]LPort (0x%p), Nport ID(0x%x) RPort ID(0x%x) direction(0x%x) magic number(0x%x) IO to entry count(0x%x) hottag(0x%x)", - unf_lport, unf_lport->nport_id, unf_rport->nport_id, - xchg->data_direction, pkg.private_data[PKG_PRIVATE_XCHG_ALLOC_TIME], - pkg.entry_count, unf_xchg->hotpooltag); - - atomic_inc(&unf_rport->pending_io_cnt); - if (unf_rport->tape_support_needed && - (atomic_read(&unf_rport->pending_io_cnt) <= UNF_MAX_PENDING_IO_CNT)) { - spin_lock_irqsave(&xchg->xchg_state_lock, flags); - unf_xchg->io_state |= INI_IO_STATE_REC_TIMEOUT_WAIT; - spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); - scsi_cmnd_info->abort_time_out = scsi_cmnd_info->time_out; - scsi_cmnd_info->time_out = UNF_REC_TOV; - } - /* 3. add INI I/O timer if necessary */ - if (scsi_cmnd_info->time_out != 0) { - /* I/O inner timer, do not used at this time */ - unf_lport->xchg_mgr_temp.unf_xchg_add_timer(unf_xchg, - scsi_cmnd_info->time_out, UNF_TIMER_TYPE_REQ_IO); - } - - /* 4. R_Port state check */ - if (unlikely(unf_rport->lport_ini_state != UNF_PORT_STATE_LINKUP || - unf_rport->rp_state > UNF_RPORT_ST_READY)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[info]Port(0x%x) RPort(0x%p) NPortId(0x%x) inistate(0x%x): RPort state(0x%x) pUpperCmd(0x%p) is not ready", - unf_lport->port_id, unf_rport, unf_rport->nport_id, - unf_rport->lport_ini_state, unf_rport->rp_state, pkg.upper_cmd); - - result_size = unf_get_up_level_cmnd_errcode(scsi_cmnd_info->err_code_table, - scsi_cmnd_info->err_code_table_cout, - UNF_IO_INCOMPLETE); - scsi_cmnd_info->result = result_size; - - if (scsi_cmnd_info->time_out != 0) - unf_lport->xchg_mgr_temp.unf_xchg_cancel_timer(unf_xchg); - - unf_cm_free_xchg(unf_lport, unf_xchg); - - /* DID_IMM_RETRY */ - return RETURN_OK; - } else if (unf_rport->rp_state < UNF_RPORT_ST_READY) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[info]Port(0x%x) RPort(0x%p) NPortId(0x%x) inistate(0x%x): RPort state(0x%x) pUpperCmd(0x%p) is not ready", - unf_lport->port_id, unf_rport, unf_rport->nport_id, - unf_rport->lport_ini_state, unf_rport->rp_state, pkg.upper_cmd); - - spin_lock_irqsave(&xchg->xchg_state_lock, flags); - unf_xchg->io_state |= INI_IO_STATE_UPSEND_ERR; /* need retry */ - spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); - - if (unlikely(scsi_cmnd_info->time_out != 0)) - unf_lport->xchg_mgr_temp.unf_xchg_cancel_timer((void *)unf_xchg); - - /* Host busy & need scsi retry */ - return UNF_RETURN_ERROR; - } - - /* 5. send scsi_cmnd to FC_LL Driver */ - if (unf_hardware_start_io(unf_lport, &pkg) != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port (0x%x) pUpperCmd(0x%p) Hardware Send IO failed.", - unf_lport->port_id, pkg.upper_cmd); - - unf_release_esgls(unf_xchg); - - result_size = unf_get_up_level_cmnd_errcode(scsi_cmnd_info->err_code_table, - scsi_cmnd_info->err_code_table_cout, - UNF_IO_INCOMPLETE); - scsi_cmnd_info->result = result_size; - - if (scsi_cmnd_info->time_out != 0) - unf_lport->xchg_mgr_temp.unf_xchg_cancel_timer(unf_xchg); - - unf_cm_free_xchg(unf_lport, unf_xchg); - - /* SCSI_DONE */ - return RETURN_OK; - } - - return RETURN_OK; -} - -int unf_prefer_to_send_scsi_cmnd(struct unf_xchg *xchg) -{ - /* - * About INI_IO_STATE_DRABORT: - * 1. Set ABORT tag: Clean L_Port/V_Port Link Down I/O - * with: INI_busy_list, delay_list, delay_transfer_list, wait_list - * * - * 2. Set ABORT tag: for target session: - * with: INI_busy_list, delay_list, delay_transfer_list, wait_list - * a. R_Port remove - * b. Send PLOGI_ACC callback - * c. RCVD PLOGI - * d. RCVD LOGO - * * - * 3. if set ABORT: prevent send scsi_cmnd to target - */ - struct unf_lport *unf_lport = NULL; - struct unf_rport *unf_rport = NULL; - int ret = RETURN_OK; - ulong flags = 0; - - unf_lport = xchg->lport; - - unf_rport = xchg->rport; - if (unlikely(!unf_lport || !unf_rport)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "[err]Port(0x%p) or RPort(0x%p) is NULL", unf_lport, unf_rport); - - /* if happened (never happen): need retry */ - return UNF_RETURN_ERROR; - } - - /* 1. inc ref_cnt to protect exchange */ - ret = (int)unf_xchg_ref_inc(xchg, INI_SEND_CMND); - if (unlikely(ret != RETURN_OK)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%x) exhg(%p) exception ref(%d) ", unf_lport->port_id, - xchg, atomic_read(&xchg->ref_cnt)); - /* exchange exception, need retry */ - spin_lock_irqsave(&xchg->xchg_state_lock, flags); - xchg->io_state |= INI_IO_STATE_UPSEND_ERR; - spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); - - /* INI_IO_STATE_UPSEND_ERR: Host busy --->>> need retry */ - return UNF_RETURN_ERROR; - } - - /* 2. Xchg Abort state check: Free EXCH if necessary */ - spin_lock_irqsave(&xchg->xchg_state_lock, flags); - if (unlikely((xchg->io_state & INI_IO_STATE_UPABORT) || - (xchg->io_state & INI_IO_STATE_DRABORT))) { - /* Prevent to send: UP_ABORT/DRV_ABORT */ - spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); - xchg->scsi_cmnd_info.result = UNF_SCSI_HOST(DID_IMM_RETRY); - ret = RETURN_OK; - - unf_xchg_ref_dec(xchg, INI_SEND_CMND); - unf_cm_free_xchg(unf_lport, xchg); - - /* - * Release exchange & return directly: - * 1. FC LLDD rcvd ABTS before scsi_cmnd: do nothing - * 2. INI_IO_STATE_UPABORT/INI_IO_STATE_DRABORT: discard this - * cmnd directly - */ - return ret; - } - spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); - - /* 3. Send FCP_CMND to FC_LL Driver */ - ret = unf_send_fcpcmnd(unf_lport, unf_rport, xchg); - if (unlikely(ret != RETURN_OK)) { - /* exchange exception, need retry */ - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%x) send exhg(%p) hottag(0x%x) to Rport(%p) NPortID(0x%x) state(0x%x) scsi_id(0x%x) failed", - unf_lport->port_id, xchg, xchg->hotpooltag, unf_rport, - unf_rport->nport_id, unf_rport->rp_state, unf_rport->scsi_id); - - spin_lock_irqsave(&xchg->xchg_state_lock, flags); - - xchg->io_state |= INI_IO_STATE_UPSEND_ERR; - /* need retry */ - spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); - /* INI_IO_STATE_UPSEND_ERR: Host busy --->>> need retry */ - unf_cm_free_xchg(unf_lport, xchg); - } - - /* 4. dec ref_cnt */ - unf_xchg_ref_dec(xchg, INI_SEND_CMND); - - return ret; -} - -struct unf_lport *unf_find_lport_by_scsi_cmd(struct unf_scsi_cmnd *scsi_cmnd) -{ - struct unf_lport *unf_lport = NULL; - - /* cmd -->> L_Port */ - unf_lport = (struct unf_lport *)UNF_GET_HOST_PORT_BY_CMND(scsi_cmnd); - if (unlikely(!unf_lport)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Find Port by scsi_cmnd(0x%p) failed", scsi_cmnd); - - /* cmnd -->> scsi_host_id -->> L_Port */ - unf_lport = unf_find_lport_by_scsi_hostid(UNF_GET_SCSI_HOST_ID_BY_CMND(scsi_cmnd)); - } - - return unf_lport; -} - -int unf_cm_queue_command(struct unf_scsi_cmnd *scsi_cmnd) -{ - /* SCSI Command --->>> FC FCP Command */ - struct unf_lport *unf_lport = NULL; - struct unf_xchg *unf_xchg = NULL; - struct unf_rport *unf_rport = NULL; - struct unf_rport_scsi_id_image *scsi_image_table = NULL; - u32 cmnd_result = 0; - int ret = RETURN_OK; - ulong flags = 0; - u32 scsi_id = 0; - u32 exhg_mgr_type = UNF_XCHG_MGR_TYPE_RANDOM; - - /* 1. Get L_Port */ - unf_lport = unf_find_lport_by_scsi_cmd(scsi_cmnd); - - /* - * corresponds to the insertion or removal scenario or the remove card - * scenario. This method is used to search for LPort information based - * on SCSI_HOST_ID. The Slave alloc is not invoked when LUNs are not - * scanned. Therefore, the Lport cannot be obtained. You need to obtain - * the Lport from the Lport linked list. - * * - * FC After Link Up, the first SCSI command is inquiry. - * Before inquiry, SCSI delivers slave_alloc. - */ - if (!unf_lport) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "[err]Find Port by scsi cmd(0x%p) failed", scsi_cmnd); - - /* find from ini_error_code_table1 */ - cmnd_result = unf_get_up_level_cmnd_errcode(scsi_cmnd->err_code_table, - scsi_cmnd->err_code_table_cout, - UNF_IO_NO_LPORT); - - /* DID_NOT_CONNECT & SCSI_DONE & RETURN_OK(0) & I/O error */ - unf_complete_cmnd(scsi_cmnd, cmnd_result); - return RETURN_OK; - } - - /* Get Local SCSI_Image_table & SCSI_ID */ - scsi_image_table = &unf_lport->rport_scsi_table; - scsi_id = scsi_cmnd->scsi_id; - - /* 2. L_Port State check */ - if (unlikely(unf_lport->port_removing || unf_lport->pcie_link_down)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%x) is removing(%d) or pcielinkdown(%d) and return with scsi_id(0x%x)", - unf_lport->port_id, unf_lport->port_removing, - unf_lport->pcie_link_down, UNF_GET_SCSI_ID_BY_CMND(scsi_cmnd)); - - cmnd_result = unf_get_up_level_cmnd_errcode(scsi_cmnd->err_code_table, - scsi_cmnd->err_code_table_cout, - UNF_IO_NO_LPORT); - - UNF_IO_RESULT_CNT(scsi_image_table, scsi_id, (cmnd_result >> UNF_SHIFT_16)); - - /* DID_NOT_CONNECT & SCSI_DONE & RETURN_OK(0) & I/O error */ - unf_complete_cmnd(scsi_cmnd, cmnd_result); - return RETURN_OK; - } - - /* 3. Get R_Port */ - unf_rport = unf_find_rport_by_scsi_id(unf_lport, scsi_cmnd->err_code_table, - scsi_cmnd->err_code_table_cout, - UNF_GET_SCSI_ID_BY_CMND(scsi_cmnd), &cmnd_result); - if (unlikely(!unf_rport)) { - /* never happen: do not care */ - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) find RPort by scsi_id(0x%x) failed", - unf_lport->port_id, UNF_GET_SCSI_ID_BY_CMND(scsi_cmnd)); - - UNF_IO_RESULT_CNT(scsi_image_table, scsi_id, (cmnd_result >> UNF_SHIFT_16)); - - /* DID_NOT_CONNECT/DID_SOFT_ERROR & SCSI_DONE & RETURN_OK(0) & - * I/O error - */ - unf_complete_cmnd(scsi_cmnd, cmnd_result); - return RETURN_OK; - } - - /* 4. Can't get exchange & return host busy, retry by uplevel */ - unf_xchg = (struct unf_xchg *)unf_cm_get_free_xchg(unf_lport, - exhg_mgr_type << UNF_SHIFT_16 | UNF_XCHG_TYPE_INI); - if (unlikely(!unf_xchg)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[err]Port(0x%x) get free exchange for INI IO(0x%x) failed", - unf_lport->port_id, UNF_GET_SCSI_ID_BY_CMND(scsi_cmnd)); - - /* NOTE: need scsi retry */ - return UNF_RETURN_ERROR; - } - - unf_xchg->scsi_cmnd_info.result = UNF_SCSI_HOST(DID_ERROR); - - /* 5. Save the SCSI CMND information in advance. */ - ret = unf_save_scsi_cmnd_to_xchg(unf_lport, unf_rport, unf_xchg, scsi_cmnd); - if (unlikely(ret != RETURN_OK)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[err]Port(0x%x) save scsi_cmnd info(0x%x) to exchange failed", - unf_lport->port_id, UNF_GET_SCSI_ID_BY_CMND(scsi_cmnd)); - - spin_lock_irqsave(&unf_xchg->xchg_state_lock, flags); - unf_xchg->io_state |= INI_IO_STATE_UPSEND_ERR; - spin_unlock_irqrestore(&unf_xchg->xchg_state_lock, flags); - - /* INI_IO_STATE_UPSEND_ERR: Don't Do SCSI_DONE, need retry I/O */ - unf_cm_free_xchg(unf_lport, unf_xchg); - - /* NOTE: need scsi retry */ - return UNF_RETURN_ERROR; - } - - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_INFO, - "[info]Get exchange(0x%p) hottag(0x%x) for Pcmd:%p,Cmdsn:0x%lx,WorldId:%d", - unf_xchg, unf_xchg->hotpooltag, scsi_cmnd->upper_cmnd, - (ulong)scsi_cmnd->cmnd_sn, scsi_cmnd->world_id); - /* 6. Send SCSI CMND */ - ret = unf_prefer_to_send_scsi_cmnd(unf_xchg); - - return ret; -} diff --git a/drivers/scsi/spfc/common/unf_io.h b/drivers/scsi/spfc/common/unf_io.h deleted file mode 100644 index d8e50eb8035ec5855e3a731730f4bd7262084722..0000000000000000000000000000000000000000 --- a/drivers/scsi/spfc/common/unf_io.h +++ /dev/null @@ -1,96 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef UNF_IO_H -#define UNF_IO_H - -#include "unf_type.h" -#include "unf_scsi_common.h" -#include "unf_exchg.h" -#include "unf_rport.h" - -#define UNF_MAX_TARGET_NUMBER 2048 -#define UNF_DEFAULT_MAX_LUN 0xFFFF -#define UNF_MAX_DMA_SEGS 0x400 -#define UNF_MAX_SCSI_CMND_LEN 16 -#define UNF_MAX_BUS_CHANNEL 0 -#define UNF_DMA_BOUNDARY 0xffffffffffffffff -#define UNF_MAX_CMND_PER_LUN 64 /* LUN max command */ -#define UNF_CHECK_LUN_ID_MATCH(lun_id, raw_lun_id, scsi_id, xchg) \ - (((lun_id) == (raw_lun_id) || (lun_id) == INVALID_VALUE64) && \ - ((scsi_id) == (xchg)->scsi_id)) - -#define NO_SENSE 0x00 -#define RECOVERED_ERROR 0x01 -#define NOT_READY 0x02 -#define MEDIUM_ERROR 0x03 -#define HARDWARE_ERROR 0x04 -#define ILLEGAL_REQUEST 0x05 -#define UNIT_ATTENTION 0x06 -#define DATA_PROTECT 0x07 -#define BLANK_CHECK 0x08 -#define COPY_ABORTED 0x0a -#define ABORTED_COMMAND 0x0b -#define VOLUME_OVERFLOW 0x0d -#define MISCOMPARE 0x0e - -#define SENSE_DATA_RESPONSE_CODE 0x70 -#define ADDITINONAL_SENSE_LEN 0x7 - -extern u32 sector_size_flag; - -#define UNF_GET_SCSI_HOST_ID_BY_CMND(cmd) ((cmd)->scsi_host_id) -#define UNF_GET_SCSI_ID_BY_CMND(cmd) ((cmd)->scsi_id) -#define UNF_GET_HOST_PORT_BY_CMND(cmd) ((cmd)->drv_private) -#define UNF_GET_FCP_CMND(cmd) ((cmd)->pcmnd[ARRAY_INDEX_0]) -#define UNF_GET_DATA_LEN(cmd) ((cmd)->transfer_len) -#define UNF_GET_DATA_DIRECTION(cmd) ((cmd)->data_direction) - -#define UNF_GET_HOST_CMND(cmd) ((cmd)->upper_cmnd) -#define UNF_GET_CMND_DONE_FUNC(cmd) ((cmd)->done) -#define UNF_GET_UP_LEVEL_CMND_DONE(cmd) ((cmd)->uplevel_done) -#define UNF_GET_SGL_ENTRY_BUF_FUNC(cmd) ((cmd)->unf_ini_get_sgl_entry) -#define UNF_GET_SENSE_BUF_ADDR(cmd) ((cmd)->sense_buf) -#define UNF_GET_ERR_CODE_TABLE(cmd) ((cmd)->err_code_table) -#define UNF_GET_ERR_CODE_TABLE_COUNT(cmd) ((cmd)->err_code_table_cout) - -#define UNF_SET_HOST_CMND(cmd, host_cmd) ((cmd)->upper_cmnd = (host_cmd)) -#define UNF_SER_CMND_DONE_FUNC(cmd, pfn) ((cmd)->done = (pfn)) -#define UNF_SET_UP_LEVEL_CMND_DONE_FUNC(cmd, pfn) ((cmd)->uplevel_done = (pfn)) - -#define UNF_SET_RESID(cmd, uiresid) ((cmd)->resid = (uiresid)) -#define UNF_SET_CMND_RESULT(cmd, uiresult) ((cmd)->result = ((int)(uiresult))) - -#define UNF_DONE_SCSI_CMND(cmd) ((cmd)->done(cmd)) - -#define UNF_GET_CMND_SGL(cmd) ((cmd)->sgl) -#define UNF_INI_GET_DIF_SGL(cmd) ((cmd)->dif_control.dif_sgl) - -u32 unf_ini_scsi_completed(void *lport, struct unf_frame_pkg *pkg); -u32 unf_ini_get_sgl_entry(void *pkg, char **buf, u32 *buf_len); -u32 unf_ini_get_dif_sgl_entry(void *pkg, char **buf, u32 *buf_len); -void unf_complete_cmnd(struct unf_scsi_cmnd *scsi_cmnd, u32 result_size); -void unf_done_ini_xchg(struct unf_xchg *xchg); -u32 unf_tmf_timeout_recovery_special(void *rport, void *xchg); -u32 unf_tmf_timeout_recovery_default(void *rport, void *xchg); -void unf_abts_timeout_recovery_default(void *rport, void *xchg); -int unf_cm_queue_command(struct unf_scsi_cmnd *scsi_cmnd); -int unf_cm_eh_abort_handler(struct unf_scsi_cmnd *scsi_cmnd); -int unf_cm_eh_device_reset_handler(struct unf_scsi_cmnd *scsi_cmnd); -int unf_cm_target_reset_handler(struct unf_scsi_cmnd *scsi_cmnd); -int unf_cm_bus_reset_handler(struct unf_scsi_cmnd *scsi_cmnd); -int unf_cm_virtual_reset_handler(struct unf_scsi_cmnd *scsi_cmnd); -struct unf_rport *unf_find_rport_by_scsi_id(struct unf_lport *lport, - struct unf_ini_error_code *errcode_table, - u32 errcode_table_count, - u32 scsi_id, u32 *scsi_result); -u32 UNF_IOExchgDelayProcess(struct unf_lport *lport, struct unf_xchg *xchg); -struct unf_lport *unf_find_lport_by_scsi_cmd(struct unf_scsi_cmnd *scsi_cmnd); -int unf_send_scsi_mgmt_cmnd(struct unf_xchg *xchg, struct unf_lport *lport, - struct unf_rport *rport, - struct unf_scsi_cmnd *scsi_cmnd, - enum unf_task_mgmt_cmd task_mgnt_cmd_type); -void unf_tmf_abnormal_recovery(struct unf_lport *lport, struct unf_rport *rport, - struct unf_xchg *xchg); - -#endif diff --git a/drivers/scsi/spfc/common/unf_io_abnormal.c b/drivers/scsi/spfc/common/unf_io_abnormal.c deleted file mode 100644 index fece7aa5f44183f88559275be630cb49af307a14..0000000000000000000000000000000000000000 --- a/drivers/scsi/spfc/common/unf_io_abnormal.c +++ /dev/null @@ -1,986 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#include "unf_io_abnormal.h" -#include "unf_log.h" -#include "unf_scsi_common.h" -#include "unf_rport.h" -#include "unf_io.h" -#include "unf_portman.h" -#include "unf_service.h" - -static int unf_send_abts_success(struct unf_lport *lport, struct unf_xchg *xchg, - struct unf_scsi_cmnd *scsi_cmnd, - u32 time_out_value) -{ - bool need_wait_marker = true; - struct unf_rport_scsi_id_image *scsi_image_table = NULL; - u32 scsi_id = 0; - u32 return_value = 0; - ulong xchg_flag = 0; - - spin_lock_irqsave(&xchg->xchg_state_lock, xchg_flag); - need_wait_marker = (xchg->abts_state & MARKER_STS_RECEIVED) ? false : true; - spin_unlock_irqrestore(&xchg->xchg_state_lock, xchg_flag); - - if (need_wait_marker) { - if (down_timeout(&xchg->task_sema, (s64)msecs_to_jiffies(time_out_value))) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%x) recv abts marker timeout,Exch(0x%p) OX_ID(0x%x 0x%x) RX_ID(0x%x)", - lport->port_id, xchg, xchg->oxid, - xchg->hotpooltag, xchg->rxid); - - /* Cancel abts rsp timer when sema timeout */ - lport->xchg_mgr_temp.unf_xchg_cancel_timer((void *)xchg); - - /* Cnacel the flag of INI_IO_STATE_UPABORT and process - * the io in TMF - */ - spin_lock_irqsave(&xchg->xchg_state_lock, xchg_flag); - xchg->io_state &= ~INI_IO_STATE_UPABORT; - xchg->io_state |= INI_IO_STATE_TMF_ABORT; - spin_unlock_irqrestore(&xchg->xchg_state_lock, xchg_flag); - - return UNF_SCSI_ABORT_FAIL; - } - } else { - xchg->ucode_abts_state = UNF_IO_SUCCESS; - } - - scsi_image_table = &lport->rport_scsi_table; - scsi_id = scsi_cmnd->scsi_id; - - spin_lock_irqsave(&xchg->xchg_state_lock, xchg_flag); - if (xchg->ucode_abts_state == UNF_IO_SUCCESS || - xchg->scsi_cmnd_info.result == UNF_IO_ABORT_PORT_REMOVING) { - spin_unlock_irqrestore(&xchg->xchg_state_lock, xchg_flag); - - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_MAJOR, - "[info]Port(0x%x) Send ABTS succeed and recv marker Exch(0x%p) OX_ID(0x%x) RX_ID(0x%x) marker status(0x%x)", - lport->port_id, xchg, xchg->oxid, xchg->rxid, xchg->ucode_abts_state); - return_value = DID_RESET; - UNF_IO_RESULT_CNT(scsi_image_table, scsi_id, return_value); - unf_complete_cmnd(scsi_cmnd, DID_RESET << UNF_SHIFT_16); - return UNF_SCSI_ABORT_SUCCESS; - } - - xchg->io_state &= ~INI_IO_STATE_UPABORT; - xchg->io_state |= INI_IO_STATE_TMF_ABORT; - - spin_unlock_irqrestore(&xchg->xchg_state_lock, xchg_flag); - - /* Cancel abts rsp timer when sema timeout */ - lport->xchg_mgr_temp.unf_xchg_cancel_timer((void *)xchg); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[warn]Port(0x%x) send ABTS failed. Exch(0x%p) oxid(0x%x) hot_tag(0x%x) ret(0x%x) xchg->io_state (0x%x)", - lport->port_id, xchg, xchg->oxid, xchg->hotpooltag, - xchg->scsi_cmnd_info.result, xchg->io_state); - - /* return fail and then enter TMF */ - return UNF_SCSI_ABORT_FAIL; -} - -static int unf_ini_abort_cmnd(struct unf_lport *lport, struct unf_xchg *xchg, - struct unf_scsi_cmnd *scsi_cmnd) -{ - /* - * About INI_IO_STATE_UPABORT: - * * - * 1. Check: L_Port destroy - * 2. Check: I/O XCHG timeout - * 3. Set ABORT: send ABTS - * 4. Set ABORT: LUN reset - * 5. Set ABORT: Target reset - * 6. Check: Prevent to send I/O to target - * (unf_prefer_to_send_scsi_cmnd) - * 7. Check: Done INI XCHG --->>> do not call scsi_done, return directly - * 8. Check: INI SCSI Complete --->>> do not call scsi_done, return - * directly - */ -#define UNF_RPORT_NOTREADY_WAIT_SEM_TIMEOUT (2000) - - struct unf_lport *unf_lport = NULL; - struct unf_rport *unf_rport = NULL; - ulong rport_flag = 0; - ulong xchg_flag = 0; - struct unf_rport_scsi_id_image *scsi_image_table = NULL; - u32 scsi_id = 0; - u32 time_out_value = (u32)UNF_WAIT_SEM_TIMEOUT; - u32 return_value = 0; - - FC_CHECK_RETURN_VALUE(lport, UNF_SCSI_ABORT_FAIL); - unf_lport = lport; - - /* 1. Xchg State Set: INI_IO_STATE_UPABORT */ - spin_lock_irqsave(&xchg->xchg_state_lock, xchg_flag); - xchg->io_state |= INI_IO_STATE_UPABORT; - unf_rport = xchg->rport; - spin_unlock_irqrestore(&xchg->xchg_state_lock, xchg_flag); - - /* 2. R_Port check */ - if (unlikely(!unf_rport)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%x) send ABTS but no RPort, OX_ID(0x%x) RX_ID(0x%x)", - unf_lport->port_id, xchg->oxid, xchg->rxid); - - return UNF_SCSI_ABORT_SUCCESS; - } - - spin_lock_irqsave(&unf_rport->rport_state_lock, rport_flag); - if (unlikely(unf_rport->rp_state != UNF_RPORT_ST_READY)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%x) find RPort's state(0x%x) is not ready but send ABTS also, exchange(0x%p) tag(0x%x)", - unf_lport->port_id, unf_rport->rp_state, xchg, xchg->hotpooltag); - - /* - * Important: Send ABTS also & update timer - * Purpose: only used for release chip (uCode) resource, - * continue - */ - time_out_value = UNF_RPORT_NOTREADY_WAIT_SEM_TIMEOUT; - } - spin_unlock_irqrestore(&unf_rport->rport_state_lock, rport_flag); - - /* 3. L_Port State check */ - if (unlikely(unf_lport->port_removing)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%x) is removing", unf_lport->port_id); - - xchg->io_state &= ~INI_IO_STATE_UPABORT; - - return UNF_SCSI_ABORT_FAIL; - } - - scsi_image_table = &unf_lport->rport_scsi_table; - scsi_id = scsi_cmnd->scsi_id; - - /* If pcie linkdown, complete this io and flush all io */ - if (unlikely(unf_lport->pcie_link_down)) { - return_value = DID_RESET; - UNF_IO_RESULT_CNT(scsi_image_table, scsi_id, return_value); - unf_complete_cmnd(scsi_cmnd, DID_RESET << UNF_SHIFT_16); - unf_free_lport_all_xchg(lport); - return UNF_SCSI_ABORT_SUCCESS; - } - - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_KEVENT, - "[abort]Port(0x%x) Exchg(0x%p) delay(%llu) SID(0x%x) DID(0x%x) wwpn(0x%llx) hottag(0x%x) scsi_id(0x%x) lun_id(0x%x) cmdsn(0x%llx) Ini:%p", - unf_lport->port_id, xchg, - (u64)jiffies_to_msecs(jiffies) - (u64)jiffies_to_msecs(xchg->alloc_jif), - xchg->sid, xchg->did, unf_rport->port_name, xchg->hotpooltag, - scsi_cmnd->scsi_id, (u32)scsi_cmnd->raw_lun_id, scsi_cmnd->cmnd_sn, - scsi_cmnd->pinitiator); - - /* Init abts marker semaphore */ - sema_init(&xchg->task_sema, 0); - - if (xchg->scsi_cmnd_info.time_out != 0) - unf_lport->xchg_mgr_temp.unf_xchg_cancel_timer(xchg); - - lport->xchg_mgr_temp.unf_xchg_add_timer((void *)xchg, (ulong)UNF_WAIT_ABTS_RSP_TIMEOUT, - UNF_TIMER_TYPE_INI_ABTS); - - /* 4. Send INI ABTS CMND */ - if (unf_send_abts(unf_lport, xchg) != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%x) Send ABTS failed. Exch(0x%p) hottag(0x%x)", - unf_lport->port_id, xchg, xchg->hotpooltag); - - lport->xchg_mgr_temp.unf_xchg_cancel_timer((void *)xchg); - - spin_lock_irqsave(&xchg->xchg_state_lock, xchg_flag); - xchg->io_state &= ~INI_IO_STATE_UPABORT; - xchg->io_state |= INI_IO_STATE_TMF_ABORT; - - spin_unlock_irqrestore(&xchg->xchg_state_lock, xchg_flag); - - return UNF_SCSI_ABORT_FAIL; - } - - return unf_send_abts_success(unf_lport, xchg, scsi_cmnd, time_out_value); -} - -static void unf_flush_ini_resp_que(struct unf_lport *lport) -{ - FC_CHECK_RETURN_VOID(lport); - - if (lport->low_level_func.service_op.unf_flush_ini_resp_que) - (void)lport->low_level_func.service_op.unf_flush_ini_resp_que(lport->fc_port); -} - -int unf_cm_eh_abort_handler(struct unf_scsi_cmnd *scsi_cmnd) -{ - /* - * SCSI ABORT Command --->>> FC ABTS Command - * If return ABORT_FAIL, then enter TMF process - */ - struct unf_lport *unf_lport = NULL; - struct unf_xchg *unf_xchg = NULL; - struct unf_rport *unf_rport = NULL; - struct unf_lport *xchg_lport = NULL; - int ret = UNF_SCSI_ABORT_SUCCESS; - ulong flag = 0; - - /* 1. Get L_Port: Point to Scsi_host */ - unf_lport = unf_find_lport_by_scsi_cmd(scsi_cmnd); - if (!unf_lport) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Can't find port by scsi host id(0x%x)", - UNF_GET_SCSI_HOST_ID_BY_CMND(scsi_cmnd)); - return UNF_SCSI_ABORT_FAIL; - } - - /* 2. find target Xchg for INI Abort CMND */ - unf_xchg = unf_cm_lookup_xchg_by_cmnd_sn(unf_lport, scsi_cmnd->cmnd_sn, - scsi_cmnd->world_id, - scsi_cmnd->pinitiator); - if (unlikely(!unf_xchg)) { - FC_DRV_PRINT(UNF_LOG_ABNORMAL, UNF_WARN, - "[warn]Port(0x%x) can't find exchange by Cmdsn(0x%lx),Ini:%p", - unf_lport->port_id, (ulong)scsi_cmnd->cmnd_sn, - scsi_cmnd->pinitiator); - - unf_flush_ini_resp_que(unf_lport); - - return UNF_SCSI_ABORT_SUCCESS; - } - - /* 3. increase ref_cnt to protect exchange */ - ret = (int)unf_xchg_ref_inc(unf_xchg, INI_EH_ABORT); - if (unlikely(ret != RETURN_OK)) { - unf_flush_ini_resp_que(unf_lport); - - return UNF_SCSI_ABORT_SUCCESS; - } - - scsi_cmnd->upper_cmnd = unf_xchg->scsi_cmnd_info.scsi_cmnd; - unf_xchg->debug_hook = true; - - /* 4. Exchang L_Port/R_Port Get & check */ - spin_lock_irqsave(&unf_xchg->xchg_state_lock, flag); - xchg_lport = unf_xchg->lport; - unf_rport = unf_xchg->rport; - spin_unlock_irqrestore(&unf_xchg->xchg_state_lock, flag); - - if (unlikely(!xchg_lport || !unf_rport)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Exchange(0x%p)'s L_Port or R_Port is NULL, state(0x%x)", - unf_xchg, unf_xchg->io_state); - - unf_xchg_ref_dec(unf_xchg, INI_EH_ABORT); - - if (!xchg_lport) - /* for L_Port */ - return UNF_SCSI_ABORT_FAIL; - /* for R_Port */ - return UNF_SCSI_ABORT_SUCCESS; - } - - /* 5. Send INI Abort Cmnd */ - ret = unf_ini_abort_cmnd(xchg_lport, unf_xchg, scsi_cmnd); - - /* 6. decrease exchange ref_cnt */ - unf_xchg_ref_dec(unf_xchg, INI_EH_ABORT); - - return ret; -} - -u32 unf_tmf_timeout_recovery_default(void *rport, void *xchg) -{ - struct unf_lport *unf_lport = NULL; - ulong flag = 0; - struct unf_xchg *unf_xchg = (struct unf_xchg *)xchg; - struct unf_rport *unf_rport = (struct unf_rport *)rport; - - unf_lport = unf_xchg->lport; - FC_CHECK_RETURN_VALUE(unf_lport, UNF_RETURN_ERROR); - - spin_lock_irqsave(&unf_rport->rport_state_lock, flag); - unf_rport_state_ma(unf_rport, UNF_EVENT_RPORT_LOGO); - spin_unlock_irqrestore(&unf_rport->rport_state_lock, flag); - - unf_rport_enter_logo(unf_lport, unf_rport); - - return RETURN_OK; -} - -void unf_abts_timeout_recovery_default(void *rport, void *xchg) -{ - struct unf_lport *unf_lport = NULL; - ulong flag = 0; - ulong flags = 0; - struct unf_xchg *unf_xchg = (struct unf_xchg *)xchg; - struct unf_rport *unf_rport = (struct unf_rport *)rport; - - unf_lport = unf_xchg->lport; - FC_CHECK_RETURN_VOID(unf_lport); - - spin_lock_irqsave(&unf_xchg->xchg_state_lock, flags); - if (INI_IO_STATE_DONE & unf_xchg->io_state) { - spin_unlock_irqrestore(&unf_xchg->xchg_state_lock, flags); - - return; - } - spin_unlock_irqrestore(&unf_xchg->xchg_state_lock, flags); - - if (unf_xchg->rport_bind_jifs != unf_rport->rport_alloc_jifs) - return; - - spin_lock_irqsave(&unf_rport->rport_state_lock, flag); - unf_rport_state_ma(unf_rport, UNF_EVENT_RPORT_LOGO); - spin_unlock_irqrestore(&unf_rport->rport_state_lock, flag); - - unf_rport_enter_logo(unf_lport, unf_rport); -} - -u32 unf_tmf_timeout_recovery_special(void *rport, void *xchg) -{ - /* Do port reset or R_Port LOGO */ - int ret = UNF_RETURN_ERROR; - struct unf_lport *unf_lport = NULL; - struct unf_xchg *unf_xchg = (struct unf_xchg *)xchg; - struct unf_rport *unf_rport = (struct unf_rport *)rport; - - FC_CHECK_RETURN_VALUE(rport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(xchg, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(unf_xchg->lport, UNF_RETURN_ERROR); - - unf_lport = unf_xchg->lport->root_lport; - FC_CHECK_RETURN_VALUE(unf_lport, UNF_RETURN_ERROR); - - /* 1. TMF response timeout & Marker STS timeout */ - if (!(unf_xchg->tmf_state & - (MARKER_STS_RECEIVED | TMF_RESPONSE_RECEIVED))) { - /* TMF timeout & marker timeout */ - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) receive marker status timeout and do recovery", - unf_lport->port_id); - - /* Do port reset */ - ret = unf_cm_reset_port(unf_lport->port_id); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) do reset failed", - unf_lport->port_id); - - return UNF_RETURN_ERROR; - } - - return RETURN_OK; - } - - /* 2. default case: Do LOGO process */ - unf_tmf_timeout_recovery_default(unf_rport, unf_xchg); - - return RETURN_OK; -} - -void unf_tmf_abnormal_recovery(struct unf_lport *lport, struct unf_rport *rport, - struct unf_xchg *xchg) -{ - /* - * for device(lun)/target(session) reset: - * Do port reset or R_Port LOGO - */ - if (lport->unf_tmf_abnormal_recovery) - lport->unf_tmf_abnormal_recovery((void *)rport, (void *)xchg); -} - -int unf_cm_eh_device_reset_handler(struct unf_scsi_cmnd *scsi_cmnd) -{ - /* SCSI Device/LUN Reset Command --->>> FC LUN/Device Reset Command */ - struct unf_lport *unf_lport = NULL; - struct unf_rport *unf_rport = NULL; - struct unf_xchg *unf_xchg = NULL; - u32 cmnd_result = 0; - int ret = SUCCESS; - - FC_CHECK_RETURN_VALUE(scsi_cmnd, FAILED); - FC_CHECK_RETURN_VALUE(scsi_cmnd->lun_id, FAILED); - - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_MAJOR, - "[event]Enter device/LUN reset handler"); - - /* 1. Get L_Port */ - unf_lport = unf_find_lport_by_scsi_cmd(scsi_cmnd); - if (!unf_lport) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Can't find port by scsi_host_id(0x%x)", - UNF_GET_SCSI_HOST_ID_BY_CMND(scsi_cmnd)); - - return FAILED; - } - - /* 2. L_Port State checking */ - if (unlikely(unf_lport->port_removing)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%p) is removing", unf_lport); - - return FAILED; - } - - /* - * 3. Get R_Port: no rport is found or rport is not ready,return ok - * from: L_Port -->> rport_scsi_table (image table) -->> - * rport_info_table - */ - unf_rport = unf_find_rport_by_scsi_id(unf_lport, scsi_cmnd->err_code_table, - scsi_cmnd->err_code_table_cout, - UNF_GET_SCSI_ID_BY_CMND(scsi_cmnd), &cmnd_result); - if (unlikely(!unf_rport)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%x) Can't find rport by scsi_id(0x%x)", - unf_lport->port_id, UNF_GET_SCSI_ID_BY_CMND(scsi_cmnd)); - - return SUCCESS; - } - - /* - * 4. Set the I/O of the corresponding LUN to abort. - * * - * LUN Reset: set UP_ABORT tag, with: - * INI_Busy_list, IO_Wait_list, - * IO_Delay_list, IO_Delay_transfer_list - */ - unf_cm_xchg_abort_by_lun(unf_lport, unf_rport, *((u64 *)scsi_cmnd->lun_id), NULL, false); - - /* 5. R_Port state check */ - if (unlikely(unf_rport->rp_state != UNF_RPORT_ST_READY)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%x) RPort(0x%x) state(0x%x) SCSI Command(0x%p), rport is not ready", - unf_lport->port_id, unf_rport->nport_id, - unf_rport->rp_state, scsi_cmnd); - - return SUCCESS; - } - - /* 6. Get & inc ref_cnt free Xchg for Device reset */ - unf_xchg = (struct unf_xchg *)unf_cm_get_free_xchg(unf_lport, UNF_XCHG_TYPE_INI); - if (unlikely(!unf_xchg)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%p) can't get free exchange", unf_lport); - - return FAILED; - } - - /* increase ref_cnt for protecting exchange */ - ret = (int)unf_xchg_ref_inc(unf_xchg, INI_EH_DEVICE_RESET); - FC_CHECK_RETURN_VALUE((ret == RETURN_OK), FAILED); - - /* 7. Send Device/LUN Reset to Low level */ - ret = unf_send_scsi_mgmt_cmnd(unf_xchg, unf_lport, unf_rport, scsi_cmnd, - UNF_FCP_TM_LOGICAL_UNIT_RESET); - if (unlikely(ret == FAILED)) { - /* - * Do port reset or R_Port LOGO: - * 1. FAILED: send failed - * 2. FAILED: semaphore timeout - * 3. SUCCESS: rcvd rsp & semaphore has been waken up - */ - unf_tmf_abnormal_recovery(unf_lport, unf_rport, unf_xchg); - } - - /* - * 8. Release resource immediately if necessary - * NOTE: here, semaphore timeout or rcvd rsp(semaphore has been waken - * up) - */ - if (likely(!unf_lport->port_removing || unf_lport->root_lport != unf_lport)) - unf_cm_free_xchg(unf_xchg->lport, unf_xchg); - - /* decrease ref_cnt */ - unf_xchg_ref_dec(unf_xchg, INI_EH_DEVICE_RESET); - - return SUCCESS; -} - -int unf_cm_target_reset_handler(struct unf_scsi_cmnd *scsi_cmnd) -{ - /* SCSI Target Reset Command --->>> FC Session Reset/Delete Command */ - struct unf_lport *unf_lport = NULL; - struct unf_rport *unf_rport = NULL; - struct unf_xchg *unf_xchg = NULL; - u32 cmnd_result = 0; - int ret = SUCCESS; - - FC_CHECK_RETURN_VALUE(scsi_cmnd, FAILED); - FC_CHECK_RETURN_VALUE(scsi_cmnd->lun_id, FAILED); - - /* 1. Get L_Port */ - unf_lport = unf_find_lport_by_scsi_cmd(scsi_cmnd); - if (!unf_lport) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Can't find port by scsi_host_id(0x%x)", - UNF_GET_SCSI_HOST_ID_BY_CMND(scsi_cmnd)); - - return FAILED; - } - - /* 2. L_Port State check */ - if (unlikely(unf_lport->port_removing)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%p) is removing", unf_lport); - - return FAILED; - } - - /* - * 3. Get R_Port: no rport is found or rport is not ready,return ok - * from: L_Port -->> rport_scsi_table (image table) -->> - * rport_info_table - */ - unf_rport = unf_find_rport_by_scsi_id(unf_lport, scsi_cmnd->err_code_table, - scsi_cmnd->err_code_table_cout, - UNF_GET_SCSI_ID_BY_CMND(scsi_cmnd), &cmnd_result); - if (unlikely(!unf_rport)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Can't find rport by scsi_id(0x%x)", - UNF_GET_SCSI_ID_BY_CMND(scsi_cmnd)); - - return SUCCESS; - } - - /* - * 4. set UP_ABORT on Target IO and Session IO - * * - * LUN Reset: set UP_ABORT tag, with: - * INI_Busy_list, IO_Wait_list, - * IO_Delay_list, IO_Delay_transfer_list - */ - unf_cm_xchg_abort_by_session(unf_lport, unf_rport); - - /* 5. R_Port state check */ - if (unlikely(unf_rport->rp_state != UNF_RPORT_ST_READY)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%x) RPort(0x%x) state(0x%x) is not ready, SCSI Command(0x%p)", - unf_lport->port_id, unf_rport->nport_id, - unf_rport->rp_state, scsi_cmnd); - - return SUCCESS; - } - - /* 6. Get free Xchg for Target Reset CMND */ - unf_xchg = (struct unf_xchg *)unf_cm_get_free_xchg(unf_lport, UNF_XCHG_TYPE_INI); - if (unlikely(!unf_xchg)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%p) can't get free exchange", unf_lport); - - return FAILED; - } - - /* increase ref_cnt to protect exchange */ - ret = (int)unf_xchg_ref_inc(unf_xchg, INI_EH_DEVICE_RESET); - FC_CHECK_RETURN_VALUE((ret == RETURN_OK), FAILED); - - /* 7. Send Target Reset Cmnd to low-level */ - ret = unf_send_scsi_mgmt_cmnd(unf_xchg, unf_lport, unf_rport, scsi_cmnd, - UNF_FCP_TM_TARGET_RESET); - if (unlikely(ret == FAILED)) { - /* - * Do port reset or R_Port LOGO: - * 1. FAILED: send failed - * 2. FAILED: semaphore timeout - * 3. SUCCESS: rcvd rsp & semaphore has been waken up - */ - unf_tmf_abnormal_recovery(unf_lport, unf_rport, unf_xchg); - } - - /* - * 8. Release resource immediately if necessary - * NOTE: here, semaphore timeout or rcvd rsp(semaphore has been waken - * up) - */ - if (likely(!unf_lport->port_removing || unf_lport->root_lport != unf_lport)) - unf_cm_free_xchg(unf_xchg->lport, unf_xchg); - - /* decrease exchange ref_cnt */ - unf_xchg_ref_dec(unf_xchg, INI_EH_DEVICE_RESET); - - return SUCCESS; -} - -int unf_cm_bus_reset_handler(struct unf_scsi_cmnd *scsi_cmnd) -{ - /* SCSI BUS Reset Command --->>> FC Port Reset Command */ - struct unf_lport *unf_lport = NULL; - int cmnd_result = 0; - - /* 1. Get L_Port */ - unf_lport = unf_find_lport_by_scsi_cmd(scsi_cmnd); - if (!unf_lport) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Can't find port by scsi_host_id(0x%x)", - UNF_GET_SCSI_HOST_ID_BY_CMND(scsi_cmnd)); - - return FAILED; - } - - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_KEVENT, - "[event]Do port reset with scsi_bus_reset"); - - cmnd_result = unf_cm_reset_port(unf_lport->port_id); - if (unlikely(cmnd_result == UNF_RETURN_ERROR)) - return FAILED; - else - return SUCCESS; -} - -void unf_process_scsi_mgmt_result(struct unf_frame_pkg *pkg, - struct unf_xchg *xchg) -{ - u8 *rsp_info = NULL; - u8 rsp_code = 0; - u32 code_index = 0; - - /* - * LLT found that:RSP_CODE is the third byte of - * FCP_RSP_INFO, on Little endian should be byte 0, For - * detail FCP_4 Table 26 FCP_RSP_INFO field format - * * - * 1. state setting - * 2. wake up semaphore - */ - FC_CHECK_RETURN_VOID(pkg); - FC_CHECK_RETURN_VOID(xchg); - - xchg->tmf_state |= TMF_RESPONSE_RECEIVED; - - if (UNF_GET_LL_ERR(pkg) != UNF_IO_SUCCESS || - pkg->unf_rsp_pload_bl.length > UNF_RESPONE_DATA_LEN) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Send scsi manage command failed with error code(0x%x) resp len(0x%x)", - UNF_GET_LL_ERR(pkg), pkg->unf_rsp_pload_bl.length); - - xchg->scsi_cmnd_info.result = UNF_IO_FAILED; - - /* wakeup semaphore & return */ - up(&xchg->task_sema); - - return; - } - - rsp_info = pkg->unf_rsp_pload_bl.buffer_ptr; - if (rsp_info && pkg->unf_rsp_pload_bl.length != 0) { - /* change to little end if necessary */ - if (pkg->byte_orders & UNF_BIT_3) - unf_big_end_to_cpu(rsp_info, pkg->unf_rsp_pload_bl.length); - } - - if (!rsp_info) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_MAJOR, - "[info]FCP response data pointer is NULL with Xchg TAG(0x%x)", - xchg->hotpooltag); - - xchg->scsi_cmnd_info.result = UNF_IO_SUCCESS; - - /* wakeup semaphore & return */ - up(&xchg->task_sema); - - return; - } - - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_MAJOR, - "[info]FCP response data length(0x%x), RSP_CODE(0x%x:%x:%x:%x:%x:%x:%x:%x)", - pkg->unf_rsp_pload_bl.length, rsp_info[ARRAY_INDEX_0], - rsp_info[ARRAY_INDEX_1], rsp_info[ARRAY_INDEX_2], - rsp_info[ARRAY_INDEX_3], rsp_info[ARRAY_INDEX_4], - rsp_info[ARRAY_INDEX_5], rsp_info[ARRAY_INDEX_6], - rsp_info[ARRAY_INDEX_7]); - - rsp_code = rsp_info[code_index]; - if (rsp_code == UNF_FCP_TM_RSP_COMPLETE || rsp_code == UNF_FCP_TM_RSP_SUCCEED) - xchg->scsi_cmnd_info.result = UNF_IO_SUCCESS; - else - xchg->scsi_cmnd_info.result = UNF_IO_FAILED; - - /* wakeup semaphore & return */ - up(&xchg->task_sema); -} - -static void unf_build_task_mgmt_fcp_cmnd(struct unf_fcp_cmnd *fcp_cmnd, - struct unf_scsi_cmnd *scsi_cmnd, - enum unf_task_mgmt_cmd task_mgmt) -{ - FC_CHECK_RETURN_VOID(fcp_cmnd); - FC_CHECK_RETURN_VOID(scsi_cmnd); - - unf_big_end_to_cpu((void *)scsi_cmnd->lun_id, UNF_FCP_LUNID_LEN_8); - (*(u64 *)(scsi_cmnd->lun_id)) >>= UNF_SHIFT_8; - memcpy(fcp_cmnd->lun, scsi_cmnd->lun_id, sizeof(fcp_cmnd->lun)); - - /* - * If the TASK MANAGEMENT FLAGS field is set to a nonzero value, - * the FCP_CDB field, the FCP_DL field, the TASK ATTRIBUTE field, - * the RDDATA bit, and the WRDATA bit shall be ignored and the - * FCP_BIDIRECTIONAL_READ_DL field shall not be included in the FCP_CMND - * IU payload - */ - fcp_cmnd->control = UNF_SET_TASK_MGMT_FLAGS((u32)(task_mgmt)); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "SCSI cmnd(0x%x) is task mgmt cmnd. ntrl Flag(LITTLE END) is 0x%x.", - task_mgmt, fcp_cmnd->control); -} - -int unf_send_scsi_mgmt_cmnd(struct unf_xchg *xchg, struct unf_lport *lport, - struct unf_rport *rport, - struct unf_scsi_cmnd *scsi_cmnd, - enum unf_task_mgmt_cmd task_mgnt_cmd_type) -{ - /* - * 1. Device/LUN reset - * 2. Target/Session reset - */ - struct unf_xchg *unf_xchg = NULL; - int ret = SUCCESS; - struct unf_frame_pkg pkg = {0}; - ulong xchg_flag = 0; - ulong flag = 0; - - FC_CHECK_RETURN_VALUE(lport, FAILED); - FC_CHECK_RETURN_VALUE(rport, FAILED); - FC_CHECK_RETURN_VALUE(xchg, FAILED); - FC_CHECK_RETURN_VALUE(scsi_cmnd, FAILED); - FC_CHECK_RETURN_VALUE(task_mgnt_cmd_type <= UNF_FCP_TM_TERMINATE_TASK && - task_mgnt_cmd_type >= UNF_FCP_TM_QUERY_TASK_SET, FAILED); - - unf_xchg = xchg; - unf_xchg->lport = lport; - unf_xchg->rport = rport; - - /* 1. State: Up_Task */ - spin_lock_irqsave(&unf_xchg->xchg_state_lock, xchg_flag); - unf_xchg->io_state |= INI_IO_STATE_UPTASK; - spin_unlock_irqrestore(&unf_xchg->xchg_state_lock, xchg_flag); - pkg.frame_head.oxid_rxid = ((u32)unf_xchg->oxid << (u32)UNF_SHIFT_16) | unf_xchg->rxid; - - /* 2. Set TASK MANAGEMENT FLAGS of FCP_CMND to the corresponding task - * management command - */ - unf_build_task_mgmt_fcp_cmnd(&unf_xchg->fcp_cmnd, scsi_cmnd, task_mgnt_cmd_type); - - pkg.xchg_contex = unf_xchg; - pkg.private_data[PKG_PRIVATE_XCHG_RPORT_INDEX] = rport->rport_index; - pkg.fcp_cmnd = &unf_xchg->fcp_cmnd; - pkg.private_data[PKG_PRIVATE_XCHG_HOT_POOL_INDEX] = unf_xchg->hotpooltag | UNF_HOTTAG_FLAG; - pkg.frame_head.csctl_sid = lport->nport_id; - pkg.frame_head.rctl_did = rport->nport_id; - - pkg.private_data[PKG_PRIVATE_XCHG_ALLOC_TIME] = - xchg->private_data[PKG_PRIVATE_XCHG_ALLOC_TIME]; - - if (unlikely(lport->pcie_link_down)) { - unf_free_lport_all_xchg(lport); - return SUCCESS; - } - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_KEVENT, - "[event]Port(0x%x) send task_cmnd(0x%x) to RPort(0x%x) Hottag(0x%x) lunid(0x%llx)", - lport->port_id, task_mgnt_cmd_type, rport->nport_id, - unf_xchg->hotpooltag, *((u64 *)scsi_cmnd->lun_id)); - - /* 3. Init exchange task semaphore */ - sema_init(&unf_xchg->task_sema, 0); - - /* 4. Send Mgmt Task to low-level */ - if (unf_hardware_start_io(lport, &pkg) != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%x) send task_cmnd(0x%x) to RPort(0x%x) failed", - lport->port_id, task_mgnt_cmd_type, rport->nport_id); - - return FAILED; - } - - /* - * semaphore timeout - ** - * Code review: The second input parameter needs to be converted to - jiffies. - * set semaphore after the message is sent successfully.The semaphore is - returned when the semaphore times out or is woken up. - ** - * 5. The semaphore is cleared and counted when the Mgmt Task message is - sent, and is Wake Up when the RSP message is received. - * If the semaphore is not Wake Up, the semaphore is triggered after - timeout. That is, no RSP message is received within the timeout period. - */ - if (down_timeout(&unf_xchg->task_sema, (s64)msecs_to_jiffies((u32)UNF_WAIT_SEM_TIMEOUT))) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%x) send task_cmnd(0x%x) to RPort(0x%x) timeout scsi id(0x%x) lun id(0x%x)", - lport->nport_id, task_mgnt_cmd_type, - rport->nport_id, scsi_cmnd->scsi_id, - (u32)scsi_cmnd->raw_lun_id); - unf_notify_chip_free_xid(unf_xchg); - /* semaphore timeout */ - ret = FAILED; - spin_lock_irqsave(&lport->lport_state_lock, flag); - if (lport->states == UNF_LPORT_ST_RESET) - ret = SUCCESS; - spin_unlock_irqrestore(&lport->lport_state_lock, flag); - - return ret; - } - - /* - * 6. NOTE: no timeout (has been waken up) - * Do Scsi_Cmnd(Mgmt Task) result checking - * * - * FAILED: with error code or RSP is error - * SUCCESS: others - */ - if (unf_xchg->scsi_cmnd_info.result == UNF_IO_SUCCESS) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_MAJOR, - "[info]Port(0x%x) send task_cmnd(0x%x) to RPort(0x%x) and receive rsp succeed", - lport->nport_id, task_mgnt_cmd_type, rport->nport_id); - - ret = SUCCESS; - } else { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%x) send task_cmnd(0x%x) to RPort(0x%x) and receive rsp failed scsi id(0x%x) lun id(0x%x)", - lport->nport_id, task_mgnt_cmd_type, rport->nport_id, - scsi_cmnd->scsi_id, (u32)scsi_cmnd->raw_lun_id); - - ret = FAILED; - } - - return ret; -} - -u32 unf_recv_tmf_marker_status(void *lport, struct unf_frame_pkg *pkg) -{ - struct unf_lport *unf_lport = NULL; - u32 uret = RETURN_OK; - struct unf_xchg *unf_xchg = NULL; - u16 hot_pool_tag = 0; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(pkg, UNF_RETURN_ERROR); - unf_lport = (struct unf_lport *)lport; - - /* Find exchange which point to marker sts */ - if (!unf_lport->xchg_mgr_temp.unf_look_up_xchg_by_tag) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) tag function is NULL", unf_lport->port_id); - - return UNF_RETURN_ERROR; - } - - hot_pool_tag = - (u16)(pkg->private_data[PKG_PRIVATE_XCHG_HOT_POOL_INDEX]); - - unf_xchg = - (struct unf_xchg *)(unf_lport->xchg_mgr_temp - .unf_look_up_xchg_by_tag((void *)unf_lport, hot_pool_tag)); - if (!unf_xchg) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x_0x%x) find exchange by tag(0x%x) failed", - unf_lport->port_id, unf_lport->nport_id, hot_pool_tag); - - return UNF_RETURN_ERROR; - } - - /* - * NOTE: set exchange TMF state with MARKER_STS_RECEIVED - * * - * About TMF state - * 1. STS received - * 2. Response received - * 3. Do check if necessary - */ - unf_xchg->tmf_state |= MARKER_STS_RECEIVED; - - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_MAJOR, - "[info]Marker STS: D_ID(0x%x) S_ID(0x%x) OX_ID(0x%x) RX_ID(0x%x), EXCH: D_ID(0x%x) S_ID(0x%x) OX_ID(0x%x) RX_ID(0x%x)", - pkg->frame_head.rctl_did & UNF_NPORTID_MASK, - pkg->frame_head.csctl_sid & UNF_NPORTID_MASK, - (u16)(pkg->frame_head.oxid_rxid >> UNF_SHIFT_16), - (u16)(pkg->frame_head.oxid_rxid), unf_xchg->did, unf_xchg->sid, - unf_xchg->oxid, unf_xchg->rxid); - - return uret; -} - -u32 unf_recv_abts_marker_status(void *lport, struct unf_frame_pkg *pkg) -{ - struct unf_lport *unf_lport = NULL; - struct unf_xchg *unf_xchg = NULL; - u16 hot_pool_tag = 0; - ulong flags = 0; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(pkg, UNF_RETURN_ERROR); - unf_lport = (struct unf_lport *)lport; - - /* Find exchange by tag */ - if (!unf_lport->xchg_mgr_temp.unf_look_up_xchg_by_tag) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) tag function is NULL", unf_lport->port_id); - - return UNF_RETURN_ERROR; - } - - hot_pool_tag = (u16)(pkg->private_data[PKG_PRIVATE_XCHG_HOT_POOL_INDEX]); - - unf_xchg = - (struct unf_xchg *)(unf_lport->xchg_mgr_temp.unf_look_up_xchg_by_tag((void *)unf_lport, - hot_pool_tag)); - if (!unf_xchg) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x_0x%x) find exchange by tag(0x%x) failed", - unf_lport->port_id, unf_lport->nport_id, hot_pool_tag); - - return UNF_RETURN_ERROR; - } - - /* - * NOTE: set exchange ABTS state with MARKER_STS_RECEIVED - * * - * About exchange ABTS state - * 1. STS received - * 2. Response received - * 3. Do check if necessary - * * - * About Exchange status get from low level - * 1. Set: when RCVD ABTS Marker - * 2. Set: when RCVD ABTS Req Done - * 3. value: set value with pkg->status - */ - spin_lock_irqsave(&unf_xchg->xchg_state_lock, flags); - unf_xchg->ucode_abts_state = pkg->status; - unf_xchg->abts_state |= MARKER_STS_RECEIVED; - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_KEVENT, - "[info]Port(0x%x) wake up SEMA for Abts marker exchange(0x%p) oxid(0x%x 0x%x) hottag(0x%x) status(0x%x)", - unf_lport->port_id, unf_xchg, unf_xchg->oxid, unf_xchg->rxid, - unf_xchg->hotpooltag, pkg->abts_maker_status); - - /* - * NOTE: Second time for ABTS marker received, or - * ABTS response have been received, no need to wake up sema - */ - if ((INI_IO_STATE_ABORT_TIMEOUT & unf_xchg->io_state) || - (ABTS_RESPONSE_RECEIVED & unf_xchg->abts_state)) { - spin_unlock_irqrestore(&unf_xchg->xchg_state_lock, flags); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_KEVENT, - "[info]Port(0x%x) no need to wake up SEMA for Abts marker ABTS_STATE(0x%x) IO_STATE(0x%x)", - unf_lport->port_id, unf_xchg->abts_state, unf_xchg->io_state); - - return RETURN_OK; - } - - if (unf_xchg->io_state & INI_IO_STATE_TMF_ABORT) { - spin_unlock_irqrestore(&unf_xchg->xchg_state_lock, flags); - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_KEVENT, - "[info]Port(0x%x) receive Abts marker, exchange(%p) state(0x%x) free it", - unf_lport->port_id, unf_xchg, unf_xchg->io_state); - - unf_cm_free_xchg(unf_lport, unf_xchg); - } else { - spin_unlock_irqrestore(&unf_xchg->xchg_state_lock, flags); - up(&unf_xchg->task_sema); - } - - return RETURN_OK; -} diff --git a/drivers/scsi/spfc/common/unf_io_abnormal.h b/drivers/scsi/spfc/common/unf_io_abnormal.h deleted file mode 100644 index 31cc8e30e51afad6b7041b61ed1685adb5ecd5fc..0000000000000000000000000000000000000000 --- a/drivers/scsi/spfc/common/unf_io_abnormal.h +++ /dev/null @@ -1,19 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef UNF_IO_ABNORMAL_H -#define UNF_IO_ABNORMAL_H - -#include "unf_type.h" -#include "unf_lport.h" -#include "unf_exchg.h" - -#define UNF_GET_LL_ERR(pkg) (((pkg)->status) >> 16) - -void unf_process_scsi_mgmt_result(struct unf_frame_pkg *pkg, - struct unf_xchg *xchg); -u32 unf_hardware_start_io(struct unf_lport *lport, struct unf_frame_pkg *pkg); -u32 unf_recv_abts_marker_status(void *lport, struct unf_frame_pkg *pkg); -u32 unf_recv_tmf_marker_status(void *lport, struct unf_frame_pkg *pkg); - -#endif diff --git a/drivers/scsi/spfc/common/unf_log.h b/drivers/scsi/spfc/common/unf_log.h deleted file mode 100644 index 801e23ac0829c3ea41701cff50c685920608e3f2..0000000000000000000000000000000000000000 --- a/drivers/scsi/spfc/common/unf_log.h +++ /dev/null @@ -1,178 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef UNF_LOG_H -#define UNF_LOG_H -#include "unf_type.h" - -#define UNF_CRITICAL 1 -#define UNF_ERR 2 -#define UNF_WARN 3 -#define UNF_KEVENT 4 -#define UNF_MAJOR 5 -#define UNF_MINOR 6 -#define UNF_INFO 7 -#define UNF_DATA 7 -#define UNF_ALL 7 - -enum unf_debug_type { - UNF_DEBUG_TYPE_MML = 0, - UNF_DEBUG_TYPE_DIAGNOSE = 1, - UNF_DEBUG_TYPE_MESSAGE = 2, - UNF_DEBUG_TYPE_BUTT -}; - -enum unf_log_attr { - UNF_LOG_LOGIN_ATT = 0x1, - UNF_LOG_IO_ATT = 0x2, - UNF_LOG_EQUIP_ATT = 0x4, - UNF_LOG_REG_ATT = 0x8, - UNF_LOG_REG_MML_TEST = 0x10, - UNF_LOG_EVENT = 0x20, - UNF_LOG_NORMAL = 0x40, - UNF_LOG_ABNORMAL = 0X80, - UNF_LOG_BUTT -}; - -enum event_log { - UNF_EVTLOG_DRIVER_SUC = 0, - UNF_EVTLOG_DRIVER_INFO, - UNF_EVTLOG_DRIVER_WARN, - UNF_EVTLOG_DRIVER_ERR, - UNF_EVTLOG_LINK_SUC, - UNF_EVTLOG_LINK_INFO, - UNF_EVTLOG_LINK_WARN, - UNF_EVTLOG_LINK_ERR, - UNF_EVTLOG_IO_SUC, - UNF_EVTLOG_IO_INFO, - UNF_EVTLOG_IO_WARN, - UNF_EVTLOG_IO_ERR, - UNF_EVTLOG_TOOL_SUC, - UNF_EVTLOG_TOOL_INFO, - UNF_EVTLOG_TOOL_WARN, - UNF_EVTLOG_TOOL_ERR, - UNF_EVTLOG_BUT -}; - -#define UNF_IO_ATT_PRINT_TIMES 2 -#define UNF_LOGIN_ATT_PRINT_TIMES 100 - -#define UNF_IO_ATT_PRINT_LIMIT msecs_to_jiffies(2 * 1000) - -extern u32 unf_dgb_level; -extern u32 log_print_level; -extern u32 log_limited_times; - -#define DRV_LOG_LIMIT(module_id, log_level, log_att, format, ...) \ - do { \ - static unsigned long pre; \ - static int should_print = UNF_LOGIN_ATT_PRINT_TIMES; \ - if (time_after_eq(jiffies, pre + (UNF_IO_ATT_PRINT_LIMIT))) { \ - if (log_att == UNF_LOG_ABNORMAL) { \ - should_print = UNF_IO_ATT_PRINT_TIMES; \ - } else { \ - should_print = log_limited_times; \ - } \ - } \ - if (should_print < 0) { \ - if (log_att != UNF_LOG_ABNORMAL) \ - pre = jiffies; \ - break; \ - } \ - if (should_print-- > 0) { \ - printk(log_level "[%d][FC_UNF]" format "[%s][%-5d]\n", \ - smp_processor_id(), ##__VA_ARGS__, __func__, \ - __LINE__); \ - } \ - if (should_print == 0) { \ - printk(log_level "[FC_UNF]log is limited[%s][%-5d]\n", \ - __func__, __LINE__); \ - } \ - pre = jiffies; \ - } while (0) - -#define FC_CHECK_RETURN_VALUE(condition, ret) \ - do { \ - if (unlikely(!(condition))) { \ - FC_DRV_PRINT(UNF_LOG_REG_ATT, \ - UNF_ERR, "Para check(%s) invalid", \ - #condition); \ - return ret; \ - } \ - } while (0) - -#define FC_CHECK_RETURN_VOID(condition) \ - do { \ - if (unlikely(!(condition))) { \ - FC_DRV_PRINT(UNF_LOG_REG_ATT, \ - UNF_ERR, "Para check(%s) invalid", \ - #condition); \ - return; \ - } \ - } while (0) - -#define FC_DRV_PRINT(log_att, log_level, format, ...) \ - do { \ - if (unlikely((log_level) <= log_print_level)) { \ - if (log_level == UNF_CRITICAL) { \ - DRV_LOG_LIMIT(UNF_PID, KERN_CRIT, \ - log_att, format, ##__VA_ARGS__); \ - } else if (log_level == UNF_WARN) { \ - DRV_LOG_LIMIT(UNF_PID, KERN_WARNING, \ - log_att, format, ##__VA_ARGS__); \ - } else if (log_level == UNF_ERR) { \ - DRV_LOG_LIMIT(UNF_PID, KERN_ERR, \ - log_att, format, ##__VA_ARGS__); \ - } else if (log_level == UNF_MAJOR || \ - log_level == UNF_MINOR || \ - log_level == UNF_KEVENT) { \ - DRV_LOG_LIMIT(UNF_PID, KERN_NOTICE, \ - log_att, format, ##__VA_ARGS__); \ - } else if (log_level == UNF_INFO || \ - log_level == UNF_DATA) { \ - DRV_LOG_LIMIT(UNF_PID, KERN_INFO, \ - log_att, format, ##__VA_ARGS__); \ - } \ - } \ - } while (0) - -#define UNF_PRINT_SFS(dbg_level, portid, data, size) \ - do { \ - if ((dbg_level) <= log_print_level) { \ - u32 cnt = 0; \ - printk(KERN_INFO "[INFO]Port(0x%x) sfs:0x", (portid)); \ - for (cnt = 0; cnt < (size) / 4; cnt++) { \ - printk(KERN_INFO "%08x ", \ - ((u32 *)(data))[cnt]); \ - } \ - printk(KERN_INFO "[FC_UNF][%s]\n", __func__); \ - } \ - } while (0) - -#define UNF_PRINT_SFS_LIMIT(dbg_level, portid, data, size) \ - do { \ - if ((dbg_level) <= log_print_level) { \ - static ulong pre; \ - static int should_print = UNF_LOGIN_ATT_PRINT_TIMES; \ - if (time_after_eq( \ - jiffies, pre + UNF_IO_ATT_PRINT_LIMIT)) { \ - should_print = log_limited_times; \ - } \ - if (should_print < 0) { \ - pre = jiffies; \ - break; \ - } \ - if (should_print-- > 0) { \ - UNF_PRINT_SFS(dbg_level, portid, data, size); \ - } \ - if (should_print == 0) { \ - printk( \ - KERN_INFO \ - "[FC_UNF]sfs log is limited[%s][%-5d]\n", \ - __func__, __LINE__); \ - } \ - pre = jiffies; \ - } \ - } while (0) - -#endif diff --git a/drivers/scsi/spfc/common/unf_lport.c b/drivers/scsi/spfc/common/unf_lport.c deleted file mode 100644 index 66d3ac14d67651912b4e065af8ebe0567b3f7ac0..0000000000000000000000000000000000000000 --- a/drivers/scsi/spfc/common/unf_lport.c +++ /dev/null @@ -1,1008 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#include "unf_lport.h" -#include "unf_log.h" -#include "unf_rport.h" -#include "unf_exchg.h" -#include "unf_service.h" -#include "unf_ls.h" -#include "unf_gs.h" -#include "unf_portman.h" - -static void unf_lport_config(struct unf_lport *lport); -void unf_cm_mark_dirty_mem(struct unf_lport *lport, enum unf_lport_dirty_flag type) -{ - FC_CHECK_RETURN_VOID((lport)); - - lport->dirty_flag |= (u32)type; -} - -u32 unf_init_lport_route(struct unf_lport *lport) -{ - u32 ret = RETURN_OK; - int ret_val = 0; - - FC_CHECK_RETURN_VALUE((lport), UNF_RETURN_ERROR); - - /* Init L_Port route work */ - INIT_DELAYED_WORK(&lport->route_timer_work, unf_lport_route_work); - - /* Delay route work */ - ret_val = queue_delayed_work(unf_wq, &lport->route_timer_work, - (ulong)msecs_to_jiffies(UNF_LPORT_POLL_TIMER)); - if (unlikely((!(bool)(ret_val)))) { - FC_DRV_PRINT(UNF_LOG_EQUIP_ATT, UNF_WARN, - "[warn]Port(0x%x) schedule route work failed", - lport->port_id); - - return UNF_RETURN_ERROR; - } - - ret = unf_lport_ref_inc(lport); - return ret; -} - -void unf_destroy_lport_route(struct unf_lport *lport) -{ - u32 ret = UNF_RETURN_ERROR; - - FC_CHECK_RETURN_VOID(lport); - - /* Cancel (route timer) delay work */ - UNF_DELAYED_WORK_SYNC(ret, (lport->port_id), (&lport->route_timer_work), - "Route Timer work"); - if (ret == RETURN_OK) - /* Corresponding to ADD operation */ - unf_lport_ref_dec(lport); - - lport->destroy_step = UNF_LPORT_DESTROY_STEP_2_CLOSE_ROUTE; -} - -void unf_init_port_parms(struct unf_lport *lport) -{ - INIT_LIST_HEAD(&lport->list_vports_head); - INIT_LIST_HEAD(&lport->list_intergrad_vports); - INIT_LIST_HEAD(&lport->list_destroy_vports); - INIT_LIST_HEAD(&lport->entry_lport); - INIT_LIST_HEAD(&lport->list_qos_head); - - spin_lock_init(&lport->qos_mgr_lock); - spin_lock_init(&lport->lport_state_lock); - - lport->max_frame_size = max_frame_size; - lport->ed_tov = UNF_DEFAULT_EDTOV; - lport->ra_tov = UNF_DEFAULT_RATOV; - lport->fabric_node_name = 0; - lport->qos_level = UNF_QOS_LEVEL_DEFAULT; - lport->qos_cs_ctrl = false; - lport->priority = (bool)UNF_PRIORITY_DISABLE; - lport->port_dirt_exchange = false; - - unf_lport_config(lport); - - unf_set_lport_state(lport, UNF_LPORT_ST_ONLINE); - - lport->link_up = UNF_PORT_LINK_DOWN; - lport->port_removing = false; - lport->lport_free_completion = NULL; - lport->last_tx_fault_jif = 0; - lport->enhanced_features = 0; - lport->destroy_step = INVALID_VALUE32; - lport->dirty_flag = 0; - lport->switch_state = false; - lport->bbscn_support = false; - lport->loop_back_test_mode = false; - lport->start_work_state = UNF_START_WORK_STOP; - lport->sfp_power_fault_count = 0; - lport->sfp_9545_fault_count = 0; - - atomic_set(&lport->lport_no_operate_flag, UNF_LPORT_NORMAL); - atomic_set(&lport->port_ref_cnt, 0); - atomic_set(&lport->scsi_session_add_success, 0); - atomic_set(&lport->scsi_session_add_failed, 0); - atomic_set(&lport->scsi_session_del_success, 0); - atomic_set(&lport->scsi_session_del_failed, 0); - atomic_set(&lport->add_start_work_failed, 0); - atomic_set(&lport->add_closing_work_failed, 0); - atomic_set(&lport->alloc_scsi_id, 0); - atomic_set(&lport->resume_scsi_id, 0); - atomic_set(&lport->reuse_scsi_id, 0); - atomic_set(&lport->device_alloc, 0); - atomic_set(&lport->device_destroy, 0); - atomic_set(&lport->session_loss_tmo, 0); - atomic_set(&lport->host_no, 0); - atomic64_set(&lport->exchg_index, 0x1000); - atomic_inc(&lport->port_ref_cnt); - - memset(&lport->port_dynamic_info, 0, sizeof(struct unf_port_dynamic_info)); - memset(&lport->link_service_info, 0, sizeof(struct unf_link_service_collect)); - memset(&lport->err_code_sum, 0, sizeof(struct unf_err_code)); -} - -void unf_reset_lport_params(struct unf_lport *lport) -{ - struct unf_lport *unf_lport = lport; - - FC_CHECK_RETURN_VOID(lport); - - unf_lport->link_up = UNF_PORT_LINK_DOWN; - unf_lport->nport_id = 0; - unf_lport->max_frame_size = max_frame_size; - unf_lport->ed_tov = UNF_DEFAULT_EDTOV; - unf_lport->ra_tov = UNF_DEFAULT_RATOV; - unf_lport->fabric_node_name = 0; -} - -static enum unf_lport_login_state -unf_lport_state_online(enum unf_lport_login_state old_state, - enum unf_lport_event lport_event) -{ - enum unf_lport_login_state next_state = UNF_LPORT_ST_ONLINE; - - switch (lport_event) { - case UNF_EVENT_LPORT_LINK_UP: - next_state = UNF_LPORT_ST_LINK_UP; - break; - - case UNF_EVENT_LPORT_NORMAL_ENTER: - next_state = UNF_LPORT_ST_INITIAL; - break; - - default: - next_state = old_state; - break; - } - - return next_state; -} - -static enum unf_lport_login_state unf_lport_state_initial(enum unf_lport_login_state old_state, - enum unf_lport_event lport_event) -{ - enum unf_lport_login_state next_state = UNF_LPORT_ST_ONLINE; - - switch (lport_event) { - case UNF_EVENT_LPORT_LINK_UP: - next_state = UNF_LPORT_ST_LINK_UP; - break; - - default: - next_state = old_state; - break; - } - - return next_state; -} - -static enum unf_lport_login_state unf_lport_state_linkup(enum unf_lport_login_state old_state, - enum unf_lport_event lport_event) -{ - enum unf_lport_login_state next_state = UNF_LPORT_ST_ONLINE; - - switch (lport_event) { - case UNF_EVENT_LPORT_NORMAL_ENTER: - next_state = UNF_LPORT_ST_FLOGI_WAIT; - break; - - case UNF_EVENT_LPORT_READY: - next_state = UNF_LPORT_ST_READY; - break; - - case UNF_EVENT_LPORT_LINK_DOWN: - next_state = UNF_LPORT_ST_INITIAL; - break; - - default: - next_state = old_state; - break; - } - - return next_state; -} - -static enum unf_lport_login_state unf_lport_state_flogi_wait(enum unf_lport_login_state old_state, - enum unf_lport_event lport_event) -{ - enum unf_lport_login_state next_state = UNF_LPORT_ST_ONLINE; - - switch (lport_event) { - case UNF_EVENT_LPORT_REMOTE_ACC: - next_state = UNF_LPORT_ST_PLOGI_WAIT; - break; - - case UNF_EVENT_LPORT_READY: - next_state = UNF_LPORT_ST_READY; - break; - - case UNF_EVENT_LPORT_REMOTE_TIMEOUT: - next_state = UNF_LPORT_ST_LOGO; - break; - - case UNF_EVENT_LPORT_LINK_DOWN: - next_state = UNF_LPORT_ST_INITIAL; - break; - - default: - next_state = old_state; - break; - } - - return next_state; -} - -static enum unf_lport_login_state unf_lport_state_plogi_wait(enum unf_lport_login_state old_state, - enum unf_lport_event lport_event) -{ - enum unf_lport_login_state next_state = UNF_LPORT_ST_ONLINE; - - switch (lport_event) { - case UNF_EVENT_LPORT_REMOTE_ACC: - next_state = UNF_LPORT_ST_RFT_ID_WAIT; - break; - - case UNF_EVENT_LPORT_REMOTE_TIMEOUT: - next_state = UNF_LPORT_ST_LOGO; - break; - - case UNF_EVENT_LPORT_LINK_DOWN: - next_state = UNF_LPORT_ST_INITIAL; - break; - - default: - next_state = old_state; - break; - } - - return next_state; -} - -static enum unf_lport_login_state -unf_lport_state_rftid_wait(enum unf_lport_login_state old_state, - enum unf_lport_event lport_event) -{ - enum unf_lport_login_state next_state = UNF_LPORT_ST_ONLINE; - - switch (lport_event) { - case UNF_EVENT_LPORT_REMOTE_ACC: - next_state = UNF_LPORT_ST_RFF_ID_WAIT; - break; - - case UNF_EVENT_LPORT_REMOTE_TIMEOUT: - next_state = UNF_LPORT_ST_LOGO; - break; - - case UNF_EVENT_LPORT_LINK_DOWN: - next_state = UNF_LPORT_ST_INITIAL; - break; - - default: - next_state = old_state; - break; - } - - return next_state; -} - -static enum unf_lport_login_state unf_lport_state_rffid_wait(enum unf_lport_login_state old_state, - enum unf_lport_event lport_event) -{ - enum unf_lport_login_state next_state = UNF_LPORT_ST_ONLINE; - - switch (lport_event) { - case UNF_EVENT_LPORT_REMOTE_ACC: - next_state = UNF_LPORT_ST_SCR_WAIT; - break; - - case UNF_EVENT_LPORT_REMOTE_TIMEOUT: - next_state = UNF_LPORT_ST_LOGO; - break; - - case UNF_EVENT_LPORT_LINK_DOWN: - next_state = UNF_LPORT_ST_INITIAL; - break; - - default: - next_state = old_state; - break; - } - - return next_state; -} - -static enum unf_lport_login_state unf_lport_state_scr_wait(enum unf_lport_login_state old_state, - enum unf_lport_event lport_event) -{ - enum unf_lport_login_state next_state = UNF_LPORT_ST_ONLINE; - - switch (lport_event) { - case UNF_EVENT_LPORT_REMOTE_ACC: - next_state = UNF_LPORT_ST_READY; - break; - - case UNF_EVENT_LPORT_REMOTE_TIMEOUT: - next_state = UNF_LPORT_ST_LOGO; - break; - - case UNF_EVENT_LPORT_LINK_DOWN: - next_state = UNF_LPORT_ST_INITIAL; - break; - - default: - next_state = old_state; - break; - } - - return next_state; -} - -static enum unf_lport_login_state -unf_lport_state_logo(enum unf_lport_login_state old_state, - enum unf_lport_event lport_event) -{ - enum unf_lport_login_state next_state = UNF_LPORT_ST_ONLINE; - - switch (lport_event) { - case UNF_EVENT_LPORT_NORMAL_ENTER: - next_state = UNF_LPORT_ST_OFFLINE; - break; - - case UNF_EVENT_LPORT_LINK_DOWN: - next_state = UNF_LPORT_ST_INITIAL; - break; - - default: - next_state = old_state; - break; - } - - return next_state; -} - -static enum unf_lport_login_state unf_lport_state_offline(enum unf_lport_login_state old_state, - enum unf_lport_event lport_event) -{ - enum unf_lport_login_state next_state = UNF_LPORT_ST_ONLINE; - - switch (lport_event) { - case UNF_EVENT_LPORT_ONLINE: - next_state = UNF_LPORT_ST_ONLINE; - break; - - case UNF_EVENT_LPORT_RESET: - next_state = UNF_LPORT_ST_RESET; - break; - - case UNF_EVENT_LPORT_LINK_DOWN: - next_state = UNF_LPORT_ST_INITIAL; - break; - - default: - next_state = old_state; - break; - } - - return next_state; -} - -static enum unf_lport_login_state unf_lport_state_reset(enum unf_lport_login_state old_state, - enum unf_lport_event lport_event) -{ - enum unf_lport_login_state next_state = UNF_LPORT_ST_ONLINE; - - switch (lport_event) { - case UNF_EVENT_LPORT_NORMAL_ENTER: - next_state = UNF_LPORT_ST_INITIAL; - break; - - default: - next_state = old_state; - break; - } - - return next_state; -} - -static enum unf_lport_login_state unf_lport_state_ready(enum unf_lport_login_state old_state, - enum unf_lport_event lport_event) -{ - enum unf_lport_login_state next_state = UNF_LPORT_ST_ONLINE; - - switch (lport_event) { - case UNF_EVENT_LPORT_LINK_DOWN: - next_state = UNF_LPORT_ST_INITIAL; - break; - - case UNF_EVENT_LPORT_RESET: - next_state = UNF_LPORT_ST_RESET; - break; - - case UNF_EVENT_LPORT_OFFLINE: - next_state = UNF_LPORT_ST_LOGO; - break; - - default: - next_state = old_state; - break; - } - - return next_state; -} - -static struct unf_lport_state_ma lport_state[] = { - {UNF_LPORT_ST_ONLINE, unf_lport_state_online}, - {UNF_LPORT_ST_INITIAL, unf_lport_state_initial}, - {UNF_LPORT_ST_LINK_UP, unf_lport_state_linkup}, - {UNF_LPORT_ST_FLOGI_WAIT, unf_lport_state_flogi_wait}, - {UNF_LPORT_ST_PLOGI_WAIT, unf_lport_state_plogi_wait}, - {UNF_LPORT_ST_RFT_ID_WAIT, unf_lport_state_rftid_wait}, - {UNF_LPORT_ST_RFF_ID_WAIT, unf_lport_state_rffid_wait}, - {UNF_LPORT_ST_SCR_WAIT, unf_lport_state_scr_wait}, - {UNF_LPORT_ST_LOGO, unf_lport_state_logo}, - {UNF_LPORT_ST_OFFLINE, unf_lport_state_offline}, - {UNF_LPORT_ST_RESET, unf_lport_state_reset}, - {UNF_LPORT_ST_READY, unf_lport_state_ready}, -}; - -void unf_lport_state_ma(struct unf_lport *lport, - enum unf_lport_event lport_event) -{ - enum unf_lport_login_state old_state = UNF_LPORT_ST_ONLINE; - enum unf_lport_login_state next_state = UNF_LPORT_ST_ONLINE; - u32 index = 0; - - FC_CHECK_RETURN_VOID(lport); - - old_state = lport->states; - - while (index < (sizeof(lport_state) / sizeof(struct unf_lport_state_ma))) { - if (lport->states == lport_state[index].lport_state) { - next_state = lport_state[index].lport_state_ma(old_state, lport_event); - break; - } - index++; - } - - if (index >= (sizeof(lport_state) / sizeof(struct unf_lport_state_ma))) { - next_state = old_state; - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, - UNF_MAJOR, "[info]Port(0x%x) hold state(0x%x)", - lport->port_id, lport->states); - } - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "[info]Port(0x%x) with old state(0x%x) event(0x%x) next state(0x%x)", - lport->port_id, old_state, lport_event, next_state); - - unf_set_lport_state(lport, next_state); -} - -u32 unf_lport_retry_flogi(struct unf_lport *lport) -{ - struct unf_rport *unf_rport = NULL; - u32 ret = UNF_RETURN_ERROR; - ulong flag = 0; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - - /* Get (new) R_Port */ - unf_rport = unf_get_rport_by_nport_id(lport, UNF_FC_FID_FLOGI); - unf_rport = unf_get_safe_rport(lport, unf_rport, UNF_RPORT_REUSE_ONLY, UNF_FC_FID_FLOGI); - if (unlikely(!unf_rport)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, - UNF_WARN, "[warn]Port(0x%x) allocate RPort failed", - lport->port_id); - - return UNF_RETURN_ERROR; - } - - /* Check L_Port state */ - spin_lock_irqsave(&lport->lport_state_lock, flag); - if (lport->states != UNF_LPORT_ST_FLOGI_WAIT) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) no need to retry FLOGI with state(0x%x)", - lport->port_id, lport->states); - - spin_unlock_irqrestore(&lport->lport_state_lock, flag); - return RETURN_OK; - } - spin_unlock_irqrestore(&lport->lport_state_lock, flag); - - spin_lock_irqsave(&unf_rport->rport_state_lock, flag); - unf_rport->nport_id = UNF_FC_FID_FLOGI; - spin_unlock_irqrestore(&unf_rport->rport_state_lock, flag); - - /* Send FLOGI or FDISC */ - if (lport->root_lport != lport) { - ret = unf_send_fdisc(lport, unf_rport); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]LOGIN: Port(0x%x) send FDISC failed", lport->port_id); - - /* Do L_Port recovery */ - unf_lport_error_recovery(lport); - } - } else { - ret = unf_send_flogi(lport, unf_rport); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]LOGIN: Port(0x%x) send FLOGI failed\n", lport->port_id); - - /* Do L_Port recovery */ - unf_lport_error_recovery(lport); - } - } - - return ret; -} - -u32 unf_lport_name_server_register(struct unf_lport *lport, - enum unf_lport_login_state state) -{ - struct unf_rport *unf_rport = NULL; - ulong flag = 0; - u32 ret = UNF_RETURN_ERROR; - u32 fabric_id = UNF_FC_FID_DIR_SERV; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - - if (state == UNF_LPORT_ST_SCR_WAIT) - fabric_id = UNF_FC_FID_FCTRL; - - /* Get (safe) R_Port */ - unf_rport = - unf_get_rport_by_nport_id(lport, fabric_id); - unf_rport = unf_get_safe_rport(lport, unf_rport, UNF_RPORT_REUSE_ONLY, - fabric_id); - if (!unf_rport) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, - UNF_WARN, "[warn]Port(0x%x) allocate RPort failed", - lport->port_id); - - return UNF_RETURN_ERROR; - } - - /* Update R_Port & L_Port state */ - spin_lock_irqsave(&unf_rport->rport_state_lock, flag); - unf_rport->nport_id = fabric_id; - spin_unlock_irqrestore(&unf_rport->rport_state_lock, flag); - - spin_lock_irqsave(&lport->lport_state_lock, flag); - unf_lport_state_ma(lport, UNF_EVENT_LPORT_NORMAL_ENTER); - spin_unlock_irqrestore(&lport->lport_state_lock, flag); - - switch (state) { - /* RFT_ID */ - case UNF_LPORT_ST_RFT_ID_WAIT: - ret = unf_send_rft_id(lport, unf_rport); - break; - /* RFF_ID */ - case UNF_LPORT_ST_RFF_ID_WAIT: - ret = unf_send_rff_id(lport, unf_rport, UNF_FC4_FCP_TYPE); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]LOGIN: Port(0x%x) register SCSI FC4Type to fabric(0xfffffc) failed", - lport->nport_id); - unf_lport_error_recovery(lport); - } - break; - - /* SCR */ - case UNF_LPORT_ST_SCR_WAIT: - ret = unf_send_scr(lport, unf_rport); - break; - - /* PLOGI */ - case UNF_LPORT_ST_PLOGI_WAIT: - default: - spin_lock_irqsave(&unf_rport->rport_state_lock, flag); - unf_rport_state_ma(unf_rport, UNF_EVENT_RPORT_ENTER_PLOGI); - spin_unlock_irqrestore(&unf_rport->rport_state_lock, flag); - - ret = unf_send_plogi(lport, unf_rport); - break; - } - - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]LOGIN: Port(0x%x) register fabric(0xfffffc) failed", - lport->nport_id); - - /* Do L_Port recovery */ - unf_lport_error_recovery(lport); - } - - return ret; -} - -u32 unf_lport_enter_sns_logo(struct unf_lport *lport, struct unf_rport *rport) -{ - struct unf_rport *unf_rport = NULL; - ulong flag = 0; - u32 ret = UNF_RETURN_ERROR; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - - if (!rport) - unf_rport = unf_get_rport_by_nport_id(lport, UNF_FC_FID_DIR_SERV); - else - unf_rport = rport; - - if (!unf_rport) { - spin_lock_irqsave(&lport->lport_state_lock, flag); - unf_lport_state_ma(lport, UNF_EVENT_LPORT_NORMAL_ENTER); - spin_unlock_irqrestore(&lport->lport_state_lock, flag); - - return RETURN_OK; - } - - /* Update L_Port & R_Port state */ - spin_lock_irqsave(&lport->lport_state_lock, flag); - unf_lport_state_ma(lport, UNF_EVENT_LPORT_NORMAL_ENTER); - spin_unlock_irqrestore(&lport->lport_state_lock, flag); - - spin_lock_irqsave(&unf_rport->rport_state_lock, flag); - unf_rport_state_ma(unf_rport, UNF_EVENT_RPORT_LOGO); - spin_unlock_irqrestore(&unf_rport->rport_state_lock, flag); - - /* Do R_Port LOGO state */ - unf_rport_enter_logo(lport, unf_rport); - - return ret; -} - -void unf_lport_enter_sns_plogi(struct unf_lport *lport) -{ - /* Fabric or Public Loop Mode: Login with Name server */ - struct unf_lport *unf_lport = lport; - struct unf_rport *unf_rport = NULL; - ulong flag = 0; - u32 ret = UNF_RETURN_ERROR; - - FC_CHECK_RETURN_VOID(lport); - - /* Get (safe) R_Port */ - unf_rport = unf_get_rport_by_nport_id(unf_lport, UNF_FC_FID_DIR_SERV); - if (unf_rport) { - /* for port swap: Delete old R_Port if necessary */ - if (unf_rport->local_nport_id != lport->nport_id) { - unf_rport_immediate_link_down(lport, unf_rport); - unf_rport = NULL; - } - } - - unf_rport = unf_get_safe_rport(lport, unf_rport, UNF_RPORT_REUSE_ONLY, - UNF_FC_FID_DIR_SERV); - if (!unf_rport) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, - UNF_WARN, "[warn]Port(0x%x) allocate RPort failed", - lport->port_id); - - unf_lport_error_recovery(unf_lport); - return; - } - - spin_lock_irqsave(&unf_rport->rport_state_lock, flag); - unf_rport->nport_id = UNF_FC_FID_DIR_SERV; - spin_unlock_irqrestore(&unf_rport->rport_state_lock, flag); - - /* Send PLOGI to Fabric(0xfffffc) */ - ret = unf_send_plogi(unf_lport, unf_rport); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]LOGIN: Port(0x%x) send PLOGI to name server failed", - lport->port_id); - - unf_lport_error_recovery(unf_lport); - } -} - -int unf_get_port_params(void *arg_in, void *arg_out) -{ - struct unf_lport *unf_lport = (struct unf_lport *)arg_in; - struct unf_low_level_port_mgr_op *port_mgr = NULL; - struct unf_port_param port_params = {0}; - - FC_CHECK_RETURN_VALUE(arg_in, UNF_RETURN_ERROR); - - port_mgr = &unf_lport->low_level_func.port_mgr_op; - if (!port_mgr->ll_port_config_get) { - FC_DRV_PRINT(UNF_LOG_EQUIP_ATT, UNF_WARN, - "[warn]Port(0x%x) low level port_config_get function is NULL", - unf_lport->port_id); - - return UNF_RETURN_ERROR; - } - - FC_DRV_PRINT(UNF_LOG_EQUIP_ATT, UNF_INFO, - "[warn]Port(0x%x) get parameters with default:R_A_TOV(%d) E_D_TOV(%d)", - unf_lport->port_id, UNF_DEFAULT_FABRIC_RATOV, - UNF_DEFAULT_EDTOV); - - port_params.ra_tov = UNF_DEFAULT_FABRIC_RATOV; - port_params.ed_tov = UNF_DEFAULT_EDTOV; - - /* Update parameters with Fabric mode */ - if (unf_lport->act_topo == UNF_ACT_TOP_PUBLIC_LOOP || - unf_lport->act_topo == UNF_ACT_TOP_P2P_FABRIC) { - unf_lport->ra_tov = port_params.ra_tov; - unf_lport->ed_tov = port_params.ed_tov; - } - - return RETURN_OK; -} - -u32 unf_lport_enter_flogi(struct unf_lport *lport) -{ - struct unf_rport *unf_rport = NULL; - struct unf_cm_event_report *event = NULL; - ulong flag = 0; - u32 ret = UNF_RETURN_ERROR; - u32 nport_id = 0; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - - /* Get (safe) R_Port */ - nport_id = UNF_FC_FID_FLOGI; - unf_rport = unf_get_rport_by_nport_id(lport, UNF_FC_FID_FLOGI); - - unf_rport = unf_get_safe_rport(lport, unf_rport, UNF_RPORT_REUSE_ONLY, nport_id); - if (!unf_rport) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) allocate RPort failed", - lport->port_id); - - return UNF_RETURN_ERROR; - } - - /* Updtae L_Port state */ - spin_lock_irqsave(&lport->lport_state_lock, flag); - unf_lport_state_ma(lport, UNF_EVENT_LPORT_NORMAL_ENTER); - spin_unlock_irqrestore(&lport->lport_state_lock, flag); - - /* Update R_Port N_Port_ID */ - spin_lock_irqsave(&unf_rport->rport_state_lock, flag); - unf_rport->nport_id = UNF_FC_FID_FLOGI; - spin_unlock_irqrestore(&unf_rport->rport_state_lock, flag); - - event = unf_get_one_event_node(lport); - if (event) { - event->lport = lport; - event->event_asy_flag = UNF_EVENT_ASYN; - event->unf_event_task = unf_get_port_params; - event->para_in = (void *)lport; - unf_post_one_event_node(lport, event); - } - - if (lport->root_lport != lport) { - /* for NPIV */ - ret = unf_send_fdisc(lport, unf_rport); - if (ret != RETURN_OK) - unf_lport_error_recovery(lport); - } else { - /* for Physical Port */ - ret = unf_send_flogi(lport, unf_rport); - if (ret != RETURN_OK) - unf_lport_error_recovery(lport); - } - - return ret; -} - -void unf_set_lport_state(struct unf_lport *lport, enum unf_lport_login_state state) -{ - FC_CHECK_RETURN_VOID(lport); - if (lport->states != state) - lport->retries = 0; - - lport->states = state; -} - -static void unf_lport_timeout(struct work_struct *work) -{ - struct unf_lport *unf_lport = NULL; - enum unf_lport_login_state state = UNF_LPORT_ST_READY; - ulong flag = 0; - - FC_CHECK_RETURN_VOID(work); - unf_lport = container_of(work, struct unf_lport, retry_work.work); - spin_lock_irqsave(&unf_lport->lport_state_lock, flag); - state = unf_lport->states; - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) is timeout with state(0x%x)", - unf_lport->port_id, state); - spin_unlock_irqrestore(&unf_lport->lport_state_lock, flag); - - switch (state) { - /* FLOGI retry */ - case UNF_LPORT_ST_FLOGI_WAIT: - (void)unf_lport_retry_flogi(unf_lport); - break; - - case UNF_LPORT_ST_PLOGI_WAIT: - case UNF_LPORT_ST_RFT_ID_WAIT: - case UNF_LPORT_ST_RFF_ID_WAIT: - case UNF_LPORT_ST_SCR_WAIT: - (void)unf_lport_name_server_register(unf_lport, state); - break; - - /* Send LOGO External */ - case UNF_LPORT_ST_LOGO: - break; - - /* Do nothing */ - case UNF_LPORT_ST_OFFLINE: - case UNF_LPORT_ST_READY: - case UNF_LPORT_ST_RESET: - case UNF_LPORT_ST_ONLINE: - case UNF_LPORT_ST_INITIAL: - case UNF_LPORT_ST_LINK_UP: - - unf_lport->retries = 0; - break; - default: - break; - } - - unf_lport_ref_dec_to_destroy(unf_lport); -} - -static void unf_lport_config(struct unf_lport *lport) -{ - FC_CHECK_RETURN_VOID(lport); - - INIT_DELAYED_WORK(&lport->retry_work, unf_lport_timeout); - - lport->max_retry_count = UNF_MAX_RETRY_COUNT; - lport->retries = 0; -} - -void unf_lport_error_recovery(struct unf_lport *lport) -{ - ulong delay = 0; - ulong flag = 0; - int ret_val = 0; - u32 ret = UNF_RETURN_ERROR; - - FC_CHECK_RETURN_VOID(lport); - - ret = unf_lport_ref_inc(lport); - if (unlikely(ret != RETURN_OK)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) is removing and no need process", - lport->port_id); - return; - } - - spin_lock_irqsave(&lport->lport_state_lock, flag); - - /* Port State: removing */ - if (lport->port_removing) { - spin_unlock_irqrestore(&lport->lport_state_lock, flag); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) is removing and no need process", - lport->port_id); - - unf_lport_ref_dec_to_destroy(lport); - return; - } - - /* Port State: offline */ - if (lport->states == UNF_LPORT_ST_OFFLINE) { - spin_unlock_irqrestore(&lport->lport_state_lock, flag); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) is offline and no need process", - lport->port_id); - - unf_lport_ref_dec_to_destroy(lport); - return; - } - - /* Queue work state check */ - if (delayed_work_pending(&lport->retry_work)) { - spin_unlock_irqrestore(&lport->lport_state_lock, flag); - - unf_lport_ref_dec_to_destroy(lport); - return; - } - - /* Do retry operation */ - if (lport->retries < lport->max_retry_count) { - lport->retries++; - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x_0x%x) enter recovery and retry %u times", - lport->port_id, lport->nport_id, lport->retries); - - delay = (ulong)lport->ed_tov; - ret_val = queue_delayed_work(unf_wq, &lport->retry_work, - (ulong)msecs_to_jiffies((u32)delay)); - if (ret_val != 0) { - atomic_inc(&lport->port_ref_cnt); - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x) queue work success and reference count is %d", - lport->port_id, - atomic_read(&lport->port_ref_cnt)); - } - spin_unlock_irqrestore(&lport->lport_state_lock, flag); - } else { - unf_lport_state_ma(lport, UNF_EVENT_LPORT_REMOTE_TIMEOUT); - spin_unlock_irqrestore(&lport->lport_state_lock, flag); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) register operation timeout and do LOGO", - lport->port_id); - - (void)unf_lport_enter_sns_logo(lport, NULL); - } - - unf_lport_ref_dec_to_destroy(lport); -} - -struct unf_lport *unf_cm_lookup_vport_by_vp_index(struct unf_lport *lport, u16 vp_index) -{ - FC_CHECK_RETURN_VALUE(lport, NULL); - - if (vp_index == 0) - return lport; - - if (!lport->lport_mgr_temp.unf_look_up_vport_by_index) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Port(0x%x) function do look up vport by index is NULL", - lport->port_id); - - return NULL; - } - - return lport->lport_mgr_temp.unf_look_up_vport_by_index(lport, vp_index); -} - -struct unf_lport *unf_cm_lookup_vport_by_did(struct unf_lport *lport, u32 did) -{ - FC_CHECK_RETURN_VALUE(lport, NULL); - - if (!lport->lport_mgr_temp.unf_look_up_vport_by_did) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Port(0x%x) function do look up vport by D_ID is NULL", - lport->port_id); - - return NULL; - } - - return lport->lport_mgr_temp.unf_look_up_vport_by_did(lport, did); -} - -struct unf_lport *unf_cm_lookup_vport_by_wwpn(struct unf_lport *lport, u64 wwpn) -{ - FC_CHECK_RETURN_VALUE(lport, NULL); - - if (!lport->lport_mgr_temp.unf_look_up_vport_by_wwpn) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Port(0x%x) function do look up vport by WWPN is NULL", - lport->port_id); - - return NULL; - } - - return lport->lport_mgr_temp.unf_look_up_vport_by_wwpn(lport, wwpn); -} - -void unf_cm_vport_remove(struct unf_lport *vport) -{ - struct unf_lport *unf_lport = NULL; - - FC_CHECK_RETURN_VOID(vport); - unf_lport = vport->root_lport; - FC_CHECK_RETURN_VOID(unf_lport); - - if (!unf_lport->lport_mgr_temp.unf_vport_remove) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Port(0x%x) function do vport remove is NULL", - unf_lport->port_id); - return; - } - - unf_lport->lport_mgr_temp.unf_vport_remove(vport); -} diff --git a/drivers/scsi/spfc/common/unf_lport.h b/drivers/scsi/spfc/common/unf_lport.h deleted file mode 100644 index dbd531f15b1316c0303bb9de96ed26f626bee4f3..0000000000000000000000000000000000000000 --- a/drivers/scsi/spfc/common/unf_lport.h +++ /dev/null @@ -1,519 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef UNF_LPORT_H -#define UNF_LPORT_H - -#include "unf_type.h" -#include "unf_disc.h" -#include "unf_event.h" -#include "unf_common.h" - -#define UNF_PORT_TYPE_FC 0 -#define UNF_PORT_TYPE_DISC 1 -#define UNF_FW_UPDATE_PATH_LEN_MAX 255 -#define UNF_EXCHG_MGR_NUM (4) -#define UNF_ERR_CODE_PRINT_TIME 10 /* error code print times */ -#define UNF_MAX_IO_TYPE_STAT_NUM 48 /* IO abnormal max counter */ -#define UNF_MAX_IO_RETURN_VALUE 0x12 -#define UNF_MAX_SCSI_CMD 0xFF -#define UNF_MAX_LPRT_SCSI_ID_MAP 2048 - -enum unf_scsi_error_handle_type { - UNF_SCSI_ABORT_IO_TYPE = 0, - UNF_SCSI_DEVICE_RESET_TYPE, - UNF_SCSI_TARGET_RESET_TYPE, - UNF_SCSI_BUS_RESET_TYPE, - UNF_SCSI_HOST_RESET_TYPE, - UNF_SCSI_VIRTUAL_RESET_TYPE, - UNF_SCSI_ERROR_HANDLE_BUTT -}; - -enum unf_lport_destroy_step { - UNF_LPORT_DESTROY_STEP_0_SET_REMOVING = 0, - UNF_LPORT_DESTROY_STEP_1_REPORT_PORT_OUT, - UNF_LPORT_DESTROY_STEP_2_CLOSE_ROUTE, - UNF_LPORT_DESTROY_STEP_3_DESTROY_EVENT_CENTER, - UNF_LPORT_DESTROY_STEP_4_DESTROY_EXCH_MGR, - UNF_LPORT_DESTROY_STEP_5_DESTROY_ESGL_POOL, - UNF_LPORT_DESTROY_STEP_6_DESTROY_DISC_MGR, - UNF_LPORT_DESTROY_STEP_7_DESTROY_XCHG_MGR_TMP, - UNF_LPORT_DESTROY_STEP_8_DESTROY_RPORT_MG_TMP, - UNF_LPORT_DESTROY_STEP_9_DESTROY_LPORT_MG_TMP, - UNF_LPORT_DESTROY_STEP_10_DESTROY_SCSI_TABLE, - UNF_LPORT_DESTROY_STEP_11_UNREG_TGT_HOST, - UNF_LPORT_DESTROY_STEP_12_UNREG_SCSI_HOST, - UNF_LPORT_DESTROY_STEP_13_DESTROY_LW_INTERFACE, - UNF_LPORT_DESTROY_STEP_BUTT -}; - -enum unf_lport_enhanced_feature { - /* Enhance GFF feature connect even if fail to get GFF feature */ - UNF_LPORT_ENHANCED_FEATURE_ENHANCED_GFF = 0x0001, - UNF_LPORT_ENHANCED_FEATURE_IO_TRANSFERLIST = 0x0002, /* Enhance IO balance */ - UNF_LPORT_ENHANCED_FEATURE_IO_CHECKPOINT = 0x0004, /* Enhance IO check */ - UNF_LPORT_ENHANCED_FEATURE_CLOSE_FW_ROUTE = 0x0008, /* Close FW ROUTE */ - /* lowest frequency read SFP information */ - UNF_LPORT_ENHANCED_FEATURE_READ_SFP_ONCE = 0x0010, - UNF_LPORT_ENHANCED_FEATURE_BUTT -}; - -enum unf_lport_login_state { - UNF_LPORT_ST_ONLINE = 0x2000, /* uninitialized */ - UNF_LPORT_ST_INITIAL, /* initialized and LinkDown */ - UNF_LPORT_ST_LINK_UP, /* initialized and Link UP */ - UNF_LPORT_ST_FLOGI_WAIT, /* waiting for FLOGI completion */ - UNF_LPORT_ST_PLOGI_WAIT, /* waiting for PLOGI completion */ - UNF_LPORT_ST_RNN_ID_WAIT, /* waiting for RNN_ID completion */ - UNF_LPORT_ST_RSNN_NN_WAIT, /* waiting for RSNN_NN completion */ - UNF_LPORT_ST_RSPN_ID_WAIT, /* waiting for RSPN_ID completion */ - UNF_LPORT_ST_RPN_ID_WAIT, /* waiting for RPN_ID completion */ - UNF_LPORT_ST_RFT_ID_WAIT, /* waiting for RFT_ID completion */ - UNF_LPORT_ST_RFF_ID_WAIT, /* waiting for RFF_ID completion */ - UNF_LPORT_ST_SCR_WAIT, /* waiting for SCR completion */ - UNF_LPORT_ST_READY, /* ready for use */ - UNF_LPORT_ST_LOGO, /* waiting for LOGO completion */ - UNF_LPORT_ST_RESET, /* being reset and will restart */ - UNF_LPORT_ST_OFFLINE, /* offline */ - UNF_LPORT_ST_BUTT -}; - -enum unf_lport_event { - UNF_EVENT_LPORT_NORMAL_ENTER = 0x8000, /* next state enter */ - UNF_EVENT_LPORT_ONLINE = 0x8001, /* LPort link up */ - UNF_EVENT_LPORT_LINK_UP = 0x8002, /* LPort link up */ - UNF_EVENT_LPORT_LINK_DOWN = 0x8003, /* LPort link down */ - UNF_EVENT_LPORT_OFFLINE = 0x8004, /* lPort bing stopped */ - UNF_EVENT_LPORT_RESET = 0x8005, - UNF_EVENT_LPORT_REMOTE_ACC = 0x8006, /* next state enter */ - UNF_EVENT_LPORT_REMOTE_RJT = 0x8007, /* rport reject */ - UNF_EVENT_LPORT_REMOTE_TIMEOUT = 0x8008, /* rport time out */ - UNF_EVENT_LPORT_READY = 0x8009, - UNF_EVENT_LPORT_REMOTE_BUTT -}; - -struct unf_cm_disc_mg_template { - /* start input:L_Port,return:ok/fail */ - u32 (*unf_disc_start)(void *lport); - /* stop input: L_Port,return:ok/fail */ - u32 (*unf_disc_stop)(void *lport); - - /* Callback after disc complete[with event:ok/fail]. */ - void (*unf_disc_callback)(void *lport, u32 result); -}; - -struct unf_chip_manage_info { - struct list_head list_chip_thread_entry; - struct list_head list_head; - spinlock_t chip_event_list_lock; - struct task_struct *thread; - u32 list_num; - u32 slot_id; - u8 chip_id; - u8 rsv; - u8 sfp_9545_fault; - u8 sfp_power_fault; - atomic_t ref_cnt; - u32 thread_exit; - struct unf_chip_info chip_info; - atomic_t card_loop_test_flag; - spinlock_t card_loop_back_state_lock; - char update_path[UNF_FW_UPDATE_PATH_LEN_MAX]; -}; - -enum unf_timer_type { - UNF_TIMER_TYPE_TGT_IO, - UNF_TIMER_TYPE_INI_IO, - UNF_TIMER_TYPE_REQ_IO, - UNF_TIMER_TYPE_TGT_RRQ, - UNF_TIMER_TYPE_INI_RRQ, - UNF_TIMER_TYPE_SFS, - UNF_TIMER_TYPE_INI_ABTS -}; - -struct unf_cm_xchg_mgr_template { - void *(*unf_xchg_get_free_and_init)(void *lport, u32 xchg_type); - void *(*unf_look_up_xchg_by_id)(void *lport, u16 ox_id, u32 oid); - void *(*unf_look_up_xchg_by_tag)(void *lport, u16 hot_pool_tag); - void (*unf_xchg_release)(void *lport, void *xchg); - void (*unf_xchg_mgr_io_xchg_abort)(void *lport, void *rport, u32 sid, u32 did, - u32 extra_io_state); - void (*unf_xchg_mgr_sfs_xchg_abort)(void *lport, void *rport, u32 sid, u32 did); - void (*unf_xchg_add_timer)(void *xchg, ulong time_ms, enum unf_timer_type time_type); - void (*unf_xchg_cancel_timer)(void *xchg); - void (*unf_xchg_abort_all_io)(void *lport, u32 xchg_type, bool clean); - void *(*unf_look_up_xchg_by_cmnd_sn)(void *lport, u64 command_sn, - u32 world_id, void *pinitiator); - void (*unf_xchg_abort_by_lun)(void *lport, void *rport, u64 lun_id, void *xchg, - bool abort_all_lun_flag); - - void (*unf_xchg_abort_by_session)(void *lport, void *rport); -}; - -struct unf_cm_lport_template { - void *(*unf_look_up_vport_by_index)(void *lport, u16 vp_index); - void *(*unf_look_up_vport_by_port_id)(void *lport, u32 port_id); - void *(*unf_look_up_vport_by_wwpn)(void *lport, u64 wwpn); - void *(*unf_look_up_vport_by_did)(void *lport, u32 did); - void (*unf_vport_remove)(void *vport); -}; - -struct unf_lport_state_ma { - enum unf_lport_login_state lport_state; - enum unf_lport_login_state (*lport_state_ma)(enum unf_lport_login_state old_state, - enum unf_lport_event event); -}; - -struct unf_rport_pool { - u32 rport_pool_count; - void *rport_pool_add; - struct list_head list_rports_pool; - spinlock_t rport_free_pool_lock; - /* for synchronous reuse RPort POOL completion */ - struct completion *rport_pool_completion; - ulong *rpi_bitmap; -}; - -struct unf_vport_pool { - u16 vport_pool_count; - void *vport_pool_addr; - struct list_head list_vport_pool; - spinlock_t vport_pool_lock; - struct completion *vport_pool_completion; - u16 slab_next_index; /* Next free vport */ - u16 slab_total_sum; /* Total Vport num */ - struct unf_lport *vport_slab[ARRAY_INDEX_0]; -}; - -struct unf_esgl_pool { - u32 esgl_pool_count; - void *esgl_pool_addr; - struct list_head list_esgl_pool; - spinlock_t esgl_pool_lock; - struct buf_describe esgl_buff_list; -}; - -/* little endium */ -struct unf_port_id_page { - struct list_head list_node_rscn; - u8 port_id_port; - u8 port_id_area; - u8 port_id_domain; - u8 addr_format : 2; - u8 event_qualifier : 4; - u8 reserved : 2; -}; - -struct unf_rscn_mgr { - spinlock_t rscn_id_list_lock; - u32 free_rscn_count; - struct list_head list_free_rscn_page; - struct list_head list_using_rscn_page; - void *rscn_pool_add; - struct unf_port_id_page *(*unf_get_free_rscn_node)(void *rscn_mg); - void (*unf_release_rscn_node)(void *rscn_mg, void *rscn_node); -}; - -struct unf_disc_rport_mg { - void *disc_pool_add; - struct list_head list_disc_rports_pool; - struct list_head list_disc_rports_busy; -}; - -struct unf_disc_manage_info { - struct list_head list_head; - spinlock_t disc_event_list_lock; - atomic_t disc_contrl_size; - - u32 thread_exit; - struct task_struct *thread; -}; - -struct unf_disc { - u32 retry_count; - u32 max_retry_count; - u32 disc_flag; - - struct completion *disc_completion; - atomic_t disc_ref_cnt; - - struct list_head list_busy_rports; - struct list_head list_delete_rports; - struct list_head list_destroy_rports; - - spinlock_t rport_busy_pool_lock; - - struct unf_lport *lport; - enum unf_disc_state states; - struct delayed_work disc_work; - - /* Disc operation template */ - struct unf_cm_disc_mg_template disc_temp; - - /* UNF_INIT_DISC/UNF_RSCN_DISC */ - u32 disc_option; - - /* RSCN list */ - struct unf_rscn_mgr rscn_mgr; - struct unf_disc_rport_mg disc_rport_mgr; - struct unf_disc_manage_info disc_thread_info; - - u64 last_disc_jiff; -}; - -enum unf_service_item { - UNF_SERVICE_ITEM_FLOGI = 0, - UNF_SERVICE_ITEM_PLOGI, - UNF_SERVICE_ITEM_PRLI, - UNF_SERVICE_ITEM_RSCN, - UNF_SERVICE_ITEM_ABTS, - UNF_SERVICE_ITEM_PDISC, - UNF_SERVICE_ITEM_ADISC, - UNF_SERVICE_ITEM_LOGO, - UNF_SERVICE_ITEM_SRR, - UNF_SERVICE_ITEM_RRQ, - UNF_SERVICE_ITEM_ECHO, - UNF_SERVICE_BUTT -}; - -/* Link service counter */ -struct unf_link_service_collect { - u64 service_cnt[UNF_SERVICE_BUTT]; -}; - -struct unf_pcie_error_count { - u32 pcie_error_count[UNF_PCIE_BUTT]; -}; - -#define INVALID_WWPN 0 - -enum unf_device_scsi_state { - UNF_SCSI_ST_INIT = 0, - UNF_SCSI_ST_OFFLINE, - UNF_SCSI_ST_ONLINE, - UNF_SCSI_ST_DEAD, - UNF_SCSI_ST_BUTT -}; - -struct unf_wwpn_dfx_counter_info { - atomic64_t io_done_cnt[UNF_MAX_IO_RETURN_VALUE]; - atomic64_t scsi_cmd_cnt[UNF_MAX_SCSI_CMD]; - atomic64_t target_busy; - atomic64_t host_busy; - atomic_t error_handle[UNF_SCSI_ERROR_HANDLE_BUTT]; - atomic_t error_handle_result[UNF_SCSI_ERROR_HANDLE_BUTT]; - atomic_t device_alloc; - atomic_t device_destroy; -}; - -#define UNF_MAX_LUN_PER_TARGET 256 -struct unf_wwpn_rport_info { - u64 wwpn; - struct unf_rport *rport; /* Rport which linkup */ - void *lport; /* Lport */ - u32 target_id; /* target_id distribute by scsi */ - u32 las_ten_scsi_state; - atomic_t scsi_state; - struct unf_wwpn_dfx_counter_info *dfx_counter; - struct delayed_work loss_tmo_work; - bool need_scan; - struct list_head fc_lun_list; - u8 *lun_qos_level; -}; - -struct unf_rport_scsi_id_image { - spinlock_t scsi_image_table_lock; - struct unf_wwpn_rport_info - *wwn_rport_info_table; - u32 max_scsi_id; -}; - -enum unf_lport_dirty_flag { - UNF_LPORT_DIRTY_FLAG_NONE = 0, - UNF_LPORT_DIRTY_FLAG_XCHGMGR_DIRTY = 0x100, - UNF_LPORT_DIRTY_FLAG_RPORT_POOL_DIRTY = 0x200, - UNF_LPORT_DIRTY_FLAG_DISC_DIRTY = 0x400, - UNF_LPORT_DIRTY_FLAG_BUTT -}; - -typedef struct unf_rport *(*unf_rport_set_qualifier)(struct unf_lport *lport, - struct unf_rport *rport_by_nport_id, - struct unf_rport *rport_by_wwpn, - u64 wwpn, u32 sid); - -typedef u32 (*unf_tmf_status_recovery)(void *rport, void *xchg); - -enum unf_start_work_state { - UNF_START_WORK_STOP, - UNF_START_WORK_BEGIN, - UNF_START_WORK_COMPLETE -}; - -struct unf_qos_info { - u64 wwpn; - u32 nport_id; - enum unf_rport_qos_level qos_level; - struct list_head entry_qos_info; -}; - -struct unf_ini_private_info { - u32 driver_type; /* Driver Type */ - void *lower; /* driver private pointer */ -}; - -struct unf_product_host_info { - void *tgt_host; - struct Scsi_Host *host; - struct unf_ini_private_info drv_private_info; - struct Scsi_Host scsihost; -}; - -struct unf_lport { - u32 port_type; /* Port Type, fc or fcoe */ - atomic_t port_ref_cnt; /* LPort reference counter */ - void *fc_port; /* hard adapter hba pointer */ - void *rport, *drport; /* Used for SCSI interface */ - void *vport; - ulong system_io_bus_num; - - struct unf_product_host_info host_info; /* scsi host mg */ - struct unf_rport_scsi_id_image rport_scsi_table; - bool port_removing; - bool io_allowed; - bool port_dirt_exchange; - - spinlock_t xchg_mgr_lock; - struct list_head list_xchg_mgr_head; - struct list_head list_drty_xchg_mgr_head; - void *xchg_mgr[UNF_EXCHG_MGR_NUM]; - bool qos_cs_ctrl; - bool priority; - enum unf_rport_qos_level qos_level; - spinlock_t qos_mgr_lock; - struct list_head list_qos_head; - struct list_head list_vports_head; /* Vport Mg */ - struct list_head list_intergrad_vports; /* Vport intergrad list */ - struct list_head list_destroy_vports; /* Vport destroy list */ - - struct list_head entry_vport; /* VPort entry, hook in list_vports_head */ - - struct list_head entry_lport; /* LPort entry */ - spinlock_t lport_state_lock; /* UL Port Lock */ - struct unf_disc disc; /* Disc and rport Mg */ - struct unf_rport_pool rport_pool; /* rport pool,Vport share Lport pool */ - struct unf_esgl_pool esgl_pool; /* external sgl pool */ - u32 port_id; /* Port Management ,0x11000 etc. */ - enum unf_lport_login_state states; - u32 link_up; - u32 speed; - - u64 node_name; - u64 port_name; - u64 fabric_node_name; - u32 nport_id; - u32 max_frame_size; - u32 ed_tov; - u32 ra_tov; - u32 class_of_service; - u32 options; /* ini or tgt */ - u32 retries; - u32 max_retry_count; - enum unf_act_topo act_topo; - bool switch_state; /* TRUE---->ON,false---->OFF */ - bool last_switch_state; /* TRUE---->ON,false---->OFF */ - bool bbscn_support; /* TRUE---->ON,false---->OFF */ - - enum unf_start_work_state start_work_state; - struct unf_cm_xchg_mgr_template xchg_mgr_temp; /* Xchg Mg operation template */ - struct unf_cm_lport_template lport_mgr_temp; /* Xchg LPort operation template */ - struct unf_low_level_functioon_op low_level_func; - struct unf_event_mgr event_mgr; /* Disc and rport Mg */ - struct delayed_work retry_work; /* poll work or delay work */ - - struct workqueue_struct *link_event_wq; - struct workqueue_struct *xchg_wq; - atomic64_t io_stat[UNF_MAX_IO_TYPE_STAT_NUM]; - struct unf_err_code err_code_sum; /* Error code counter */ - struct unf_port_dynamic_info port_dynamic_info; - struct unf_link_service_collect link_service_info; - struct unf_pcie_error_count pcie_error_cnt; - unf_rport_set_qualifier unf_qualify_rport; /* Qualify Rport */ - - unf_tmf_status_recovery unf_tmf_abnormal_recovery; /* tmf marker recovery */ - - struct delayed_work route_timer_work; /* L_Port timer route */ - - u16 vp_index; /* Vport Index, Lport:0 */ - u16 path_id; - struct unf_vport_pool *vport_pool; /* Only for Lport */ - void *lport_mgr[UNF_MAX_LPRT_SCSI_ID_MAP]; - bool vport_remove_flags; - - void *root_lport; /* Point to physic Lport */ - - struct completion *lport_free_completion; /* Free LPort Completion */ - -#define UNF_LPORT_NOP 1 -#define UNF_LPORT_NORMAL 0 - - atomic_t lport_no_operate_flag; - - bool loop_back_test_mode; - bool switch_state_before_test_mode; /* TRUE---->ON,false---->OFF */ - u32 enhanced_features; /* Enhanced Features */ - - u32 destroy_step; - u32 dirty_flag; - struct unf_chip_manage_info *chip_info; - - u8 unique_position; - u8 sfp_power_fault_count; - u8 sfp_9545_fault_count; - u64 last_tx_fault_jif; /* SFP last tx fault jiffies */ - u32 target_cnt; - /* Server card: UNF_FC_SERVER_BOARD_32_G(6) for 32G mode, - * UNF_FC_SERVER_BOARD_16_G(7) for 16G mode - */ - u32 card_type; - atomic_t scsi_session_add_success; - atomic_t scsi_session_add_failed; - atomic_t scsi_session_del_success; - atomic_t scsi_session_del_failed; - atomic_t add_start_work_failed; - atomic_t add_closing_work_failed; - atomic_t device_alloc; - atomic_t device_destroy; - atomic_t session_loss_tmo; - atomic_t alloc_scsi_id; - atomic_t resume_scsi_id; - atomic_t reuse_scsi_id; - atomic64_t last_exchg_mgr_idx; - atomic_t host_no; - atomic64_t exchg_index; - int scan_world_id; - struct semaphore wmi_task_sema; - bool ready_to_remove; - u32 pcie_link_down_cnt; - bool pcie_link_down; - u8 fw_version[SPFC_VER_LEN]; - atomic_t link_lose_tmo; - u32 max_ssq_num; -}; - -void unf_lport_state_ma(struct unf_lport *lport, enum unf_lport_event lport_event); -void unf_lport_error_recovery(struct unf_lport *lport); -void unf_set_lport_state(struct unf_lport *lport, enum unf_lport_login_state state); -void unf_init_port_parms(struct unf_lport *lport); -u32 unf_lport_enter_flogi(struct unf_lport *lport); -void unf_lport_enter_sns_plogi(struct unf_lport *lport); -u32 unf_init_disc_mgr(struct unf_lport *lport); -u32 unf_init_lport_route(struct unf_lport *lport); -void unf_destroy_lport_route(struct unf_lport *lport); -void unf_reset_lport_params(struct unf_lport *lport); -void unf_cm_mark_dirty_mem(struct unf_lport *lport, enum unf_lport_dirty_flag type); -struct unf_lport *unf_cm_lookup_vport_by_vp_index(struct unf_lport *lport, u16 vp_index); -struct unf_lport *unf_cm_lookup_vport_by_did(struct unf_lport *lport, u32 did); -struct unf_lport *unf_cm_lookup_vport_by_wwpn(struct unf_lport *lport, u64 wwpn); -void unf_cm_vport_remove(struct unf_lport *vport); - -#endif diff --git a/drivers/scsi/spfc/common/unf_ls.c b/drivers/scsi/spfc/common/unf_ls.c deleted file mode 100644 index 6a2e1fd1872f73157fd64ccc9cf96e7e2521567a..0000000000000000000000000000000000000000 --- a/drivers/scsi/spfc/common/unf_ls.c +++ /dev/null @@ -1,4883 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#include "unf_ls.h" -#include "unf_log.h" -#include "unf_service.h" -#include "unf_portman.h" -#include "unf_gs.h" -#include "unf_npiv.h" - -static void unf_flogi_acc_ob_callback(struct unf_xchg *xchg); -static void unf_plogi_acc_ob_callback(struct unf_xchg *xchg); -static void unf_prli_acc_ob_callback(struct unf_xchg *xchg); -static void unf_rscn_acc_ob_callback(struct unf_xchg *xchg); -static void unf_pdisc_acc_ob_callback(struct unf_xchg *xchg); -static void unf_adisc_acc_ob_callback(struct unf_xchg *xchg); -static void unf_logo_acc_ob_callback(struct unf_xchg *xchg); -static void unf_logo_ob_callback(struct unf_xchg *xchg); -static void unf_logo_callback(void *lport, void *rport, void *xchg); -static void unf_rrq_callback(void *lport, void *rport, void *xchg); -static void unf_rrq_ob_callback(struct unf_xchg *xchg); -static void unf_lport_update_nport_id(struct unf_lport *lport, u32 nport_id); -static void -unf_lport_update_time_params(struct unf_lport *lport, - struct unf_flogi_fdisc_payload *flogi_payload); - -static void unf_login_with_rport_in_n2n(struct unf_lport *lport, - u64 remote_port_name, - u64 remote_node_name); -#define UNF_LOWLEVEL_BBCREDIT 0x6 -#define UNF_DEFAULT_BB_SC_N 0 - -#define UNF_ECHO_REQ_SIZE 0 -#define UNF_ECHO_WAIT_SEM_TIMEOUT(lport) (2 * (ulong)(lport)->ra_tov) - -#define UNF_SERVICE_COLLECT(service_collect, item) \ - do { \ - if ((item) < UNF_SERVICE_BUTT) { \ - (service_collect).service_cnt[(item)]++; \ - } \ - } while (0) - -static void unf_check_rport_need_delay_prli(struct unf_lport *lport, - struct unf_rport *rport, - u32 port_feature) -{ - u32 ret = UNF_RETURN_ERROR; - - FC_CHECK_RETURN_VOID(lport); - FC_CHECK_RETURN_VOID(rport); - - port_feature &= UNF_PORT_MODE_BOTH; - - /* Used for: L_Port has INI mode & R_Port is not SW */ - if (rport->nport_id < UNF_FC_FID_DOM_MGR) { - /* - * 1. immediately: R_Port only with TGT, or - * L_Port only with INI & R_Port has TGT mode, send PRLI - * immediately - */ - if ((port_feature == UNF_PORT_MODE_TGT || - lport->act_topo == UNF_ACT_TOP_P2P_DIRECT) || - (UNF_PORT_MODE_TGT == (port_feature & UNF_PORT_MODE_TGT))) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, - UNF_MAJOR, - "[info]LOGIN: Port(0x%x_0x%x) Rport(0x%x) with feature(0x%x) send PRLI", - lport->port_id, lport->nport_id, - rport->nport_id, port_feature); - ret = unf_send_prli(lport, rport, ELS_PRLI); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]LOGIN: Port(0x%x_0x%x) Rport(0x%x) with feature(0x%x) send PRLI failed", - lport->port_id, lport->nport_id, - rport->nport_id, port_feature); - - unf_rport_error_recovery(rport); - } - } - /* 2. R_Port has BOTH mode or unknown, Delay to send PRLI */ - else if (port_feature != UNF_PORT_MODE_INI) { - /* Prevent: PRLI done before PLOGI */ - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]LOGIN: Port(0x%x_0x%x) Rport(0x%x) with feature(0x%x) delay to send PRLI", - lport->port_id, lport->nport_id, - rport->nport_id, port_feature); - - /* Delay to send PRLI to R_Port */ - unf_rport_delay_login(rport); - } else { - /* 3. R_Port only with INI mode: wait for R_Port's PRLI: - * Do not care - */ - /* Cancel recovery(timer) work */ - if (delayed_work_pending(&rport->recovery_work)) { - if (cancel_delayed_work(&rport->recovery_work)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "[info]LOGIN: Port(0x%x_0x%x) Rport(0x%x) with feature(0x%x) is pure INI", - lport->port_id, - lport->nport_id, - rport->nport_id, - port_feature); - - unf_rport_ref_dec(rport); - } - } - - /* Server: R_Port only support INI, do not care this - * case - */ - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]LOGIN: Port(0x%x_0x%x) Rport(0x%x) with feature(0x%x) wait for PRLI", - lport->port_id, lport->nport_id, - rport->nport_id, port_feature); - } - } -} - -static u32 unf_low_level_bb_credit(struct unf_lport *lport) -{ - struct unf_lport *unf_lport = NULL; - u32 ret = UNF_RETURN_ERROR; - u32 bb_credit = UNF_LOWLEVEL_BBCREDIT; - - if (unlikely(!lport)) - return bb_credit; - - unf_lport = lport; - - if (unlikely(!unf_lport->low_level_func.port_mgr_op.ll_port_config_get)) - return bb_credit; - - ret = unf_lport->low_level_func.port_mgr_op.ll_port_config_get((void *)unf_lport->fc_port, - UNF_PORT_CFG_GET_WORKBALE_BBCREDIT, - (void *)&bb_credit); - if (unlikely(ret != RETURN_OK)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "[warn]Port(0x%x) get BB_Credit failed, use default value(%d)", - unf_lport->port_id, UNF_LOWLEVEL_BBCREDIT); - - bb_credit = UNF_LOWLEVEL_BBCREDIT; - } - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "[info]Port(0x%x) with BB_Credit(%u)", unf_lport->port_id, - bb_credit); - - return bb_credit; -} - -u32 unf_low_level_bb_scn(struct unf_lport *lport) -{ - struct unf_lport *unf_lport = NULL; - struct unf_low_level_port_mgr_op *port_mgr = NULL; - u32 ret = UNF_RETURN_ERROR; - u32 bb_scn = UNF_DEFAULT_BB_SC_N; - - if (unlikely(!lport)) - return bb_scn; - - unf_lport = lport; - port_mgr = &unf_lport->low_level_func.port_mgr_op; - - if (unlikely(!port_mgr->ll_port_config_get)) - return bb_scn; - - ret = port_mgr->ll_port_config_get((void *)unf_lport->fc_port, - UNF_PORT_CFG_GET_WORKBALE_BBSCN, - (void *)&bb_scn); - if (unlikely(ret != RETURN_OK)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "[warn]Port(0x%x) get bbscn failed, use default value(%d)", - unf_lport->port_id, UNF_DEFAULT_BB_SC_N); - - bb_scn = UNF_DEFAULT_BB_SC_N; - } - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "[info]Port(0x%x)'s bbscn(%d)", unf_lport->port_id, bb_scn); - - return bb_scn; -} - -static void unf_fill_rec_pld(struct unf_rec_pld *rec_pld, u32 sid) -{ - FC_CHECK_RETURN_VOID(rec_pld); - - rec_pld->rec_cmnd = (UNF_ELS_CMND_REC); - rec_pld->xchg_org_sid = sid; - rec_pld->ox_id = INVALID_VALUE16; - rec_pld->rx_id = INVALID_VALUE16; -} - -u32 unf_send_rec(struct unf_lport *lport, struct unf_rport *rport, - struct unf_xchg *io_xchg) -{ - struct unf_rec_pld *rec_pld = NULL; - union unf_sfs_u *fc_entry = NULL; - struct unf_xchg *xchg = NULL; - u32 ret = UNF_RETURN_ERROR; - struct unf_frame_pkg pkg; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(rport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(io_xchg, UNF_RETURN_ERROR); - - memset(&pkg, 0, sizeof(struct unf_frame_pkg)); - - xchg = unf_get_sfs_free_xchg_and_init(lport, rport->nport_id, rport, &fc_entry); - if (!xchg) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) exchange can't be NULL for PLOGI", - lport->port_id); - - return ret; - } - - xchg->cmnd_code = ELS_REC; - - unf_fill_package(&pkg, xchg, rport); - pkg.type = UNF_PKG_ELS_REQ; - pkg.origin_hottag = io_xchg->hotpooltag; - pkg.origin_magicnum = io_xchg->private_data[PKG_PRIVATE_XCHG_ALLOC_TIME]; - rec_pld = &fc_entry->rec.rec_pld; - memset(rec_pld, 0, sizeof(struct unf_rec_pld)); - - unf_fill_rec_pld(rec_pld, lport->nport_id); - - ret = unf_ls_gs_cmnd_send(lport, &pkg, xchg); - if (ret != RETURN_OK) - unf_cm_free_xchg((void *)lport, (void *)xchg); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_KEVENT, - "[info]LOGIN: Send REC %s. Port(0x%x_0x%x_0x%llx)--->RPort(0x%x_0x%llx) with hottag(0x%x)", - (ret != RETURN_OK) ? "failed" : "succeed", lport->port_id, - lport->nport_id, lport->port_name, rport->nport_id, - rport->port_name, xchg->hotpooltag); - - return ret; -} - -static void unf_fill_flogi_pld(struct unf_flogi_fdisc_payload *flogi_pld, - struct unf_lport *lport) -{ - struct unf_fabric_parm *fabric_parms = NULL; - - FC_CHECK_RETURN_VOID(flogi_pld); - FC_CHECK_RETURN_VOID(lport); - - fabric_parms = &flogi_pld->fabric_parms; - if (lport->act_topo == UNF_ACT_TOP_P2P_FABRIC || - lport->act_topo == UNF_ACT_TOP_P2P_DIRECT || - lport->act_topo == UNF_TOP_P2P_MASK) { - /* Fabric or P2P or FCoE VN2VN topology */ - fabric_parms->co_parms.bb_credit = unf_low_level_bb_credit(lport); - fabric_parms->co_parms.lowest_version = UNF_PLOGI_VERSION_LOWER; - fabric_parms->co_parms.highest_version = UNF_PLOGI_VERSION_UPPER; - fabric_parms->co_parms.bb_receive_data_field_size = (lport->max_frame_size); - fabric_parms->co_parms.bbscn = unf_low_level_bb_scn(lport); - } else { - /* Loop topology here */ - fabric_parms->co_parms.clean_address = UNF_CLEAN_ADDRESS_DEFAULT; - fabric_parms->co_parms.bb_credit = UNF_BBCREDIT_LPORT; - fabric_parms->co_parms.lowest_version = UNF_PLOGI_VERSION_LOWER; - fabric_parms->co_parms.highest_version = UNF_PLOGI_VERSION_UPPER; - fabric_parms->co_parms.alternate_bb_credit_mgmt = UNF_BBCREDIT_MANAGE_LPORT; - fabric_parms->co_parms.bb_receive_data_field_size = (lport->max_frame_size); - } - - if (lport->low_level_func.support_max_npiv_num != 0) - /* support NPIV */ - fabric_parms->co_parms.clean_address = 1; - - fabric_parms->cl_parms[ARRAY_INDEX_2].valid = UNF_CLASS_VALID; - - /* according the user value to set the priority */ - if (lport->qos_cs_ctrl) - fabric_parms->cl_parms[ARRAY_INDEX_2].priority = UNF_PRIORITY_ENABLE; - else - fabric_parms->cl_parms[ARRAY_INDEX_2].priority = UNF_PRIORITY_DISABLE; - - fabric_parms->cl_parms[ARRAY_INDEX_2].sequential_delivery = UNF_SEQUEN_DELIVERY_REQ; - fabric_parms->cl_parms[ARRAY_INDEX_2].received_data_field_size = (lport->max_frame_size); - - fabric_parms->high_node_name = UNF_GET_NAME_HIGH_WORD(lport->node_name); - fabric_parms->low_node_name = UNF_GET_NAME_LOW_WORD(lport->node_name); - fabric_parms->high_port_name = UNF_GET_NAME_HIGH_WORD(lport->port_name); - fabric_parms->low_port_name = UNF_GET_NAME_LOW_WORD(lport->port_name); -} - -u32 unf_send_flogi(struct unf_lport *lport, struct unf_rport *rport) -{ - struct unf_xchg *xchg = NULL; - struct unf_flogi_fdisc_payload *flogi_pld = NULL; - union unf_sfs_u *fc_entry = NULL; - u32 ret = UNF_RETURN_ERROR; - struct unf_frame_pkg pkg = {0}; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(rport, UNF_RETURN_ERROR); - - xchg = unf_get_sfs_free_xchg_and_init(lport, rport->nport_id, rport, &fc_entry); - if (!xchg) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) exchange can't be NULL for FLOGI", - lport->port_id); - - return ret; - } - - /* FLOGI */ - xchg->cmnd_code = ELS_FLOGI; - - /* for rcvd flogi acc/rjt processer */ - xchg->callback = unf_flogi_callback; - /* for send flogi failed processer */ - xchg->ob_callback = unf_flogi_ob_callback; - - unf_fill_package(&pkg, xchg, rport); - pkg.type = UNF_PKG_ELS_REQ; - - flogi_pld = &fc_entry->flogi.flogi_payload; - memset(flogi_pld, 0, sizeof(struct unf_flogi_fdisc_payload)); - unf_fill_flogi_pld(flogi_pld, lport); - flogi_pld->cmnd = (UNF_ELS_CMND_FLOGI); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]LOGIN: Begin to send FLOGI. Port(0x%x)--->RPort(0x%x) with hottag(0x%x)", - lport->port_id, rport->nport_id, xchg->hotpooltag); - - UNF_PRINT_SFS_LIMIT(UNF_INFO, lport->port_id, flogi_pld, - sizeof(struct unf_flogi_fdisc_payload)); - ret = unf_ls_gs_cmnd_send(lport, &pkg, xchg); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[warn]LOGIN: Send FLOGI failed. Port(0x%x)--->RPort(0x%x)", - lport->port_id, rport->nport_id); - - unf_cm_free_xchg((void *)lport, (void *)xchg); - } - - return ret; -} - -u32 unf_send_fdisc(struct unf_lport *lport, struct unf_rport *rport) -{ - struct unf_xchg *exch = NULL; - struct unf_flogi_fdisc_payload *fdisc_pld = NULL; - union unf_sfs_u *fc_entry = NULL; - u32 ret = UNF_RETURN_ERROR; - struct unf_frame_pkg pkg = {0}; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(rport, UNF_RETURN_ERROR); - - exch = unf_get_sfs_free_xchg_and_init(lport, rport->nport_id, rport, &fc_entry); - if (!exch) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) exchange can't be NULL for FDISC", - lport->port_id); - - return ret; - } - - exch->cmnd_code = ELS_FDISC; - - exch->callback = unf_fdisc_callback; - exch->ob_callback = unf_fdisc_ob_callback; - - unf_fill_package(&pkg, exch, rport); - pkg.type = UNF_PKG_ELS_REQ; - - fdisc_pld = &fc_entry->fdisc.fdisc_payload; - memset(fdisc_pld, 0, sizeof(struct unf_flogi_fdisc_payload)); - unf_fill_flogi_pld(fdisc_pld, lport); - fdisc_pld->cmnd = UNF_ELS_CMND_FDISC; - - ret = unf_ls_gs_cmnd_send(lport, &pkg, exch); - - if (ret != RETURN_OK) - unf_cm_free_xchg((void *)lport, (void *)exch); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]LOGIN: FDISC send %s. Port(0x%x)--->RPort(0x%x) with hottag(0x%x)", - (ret != RETURN_OK) ? "failed" : "succeed", lport->port_id, - rport->nport_id, exch->hotpooltag); - - return ret; -} - -static void unf_fill_plogi_pld(struct unf_plogi_payload *plogi_pld, - struct unf_lport *lport) -{ - struct unf_lgn_parm *login_parms = NULL; - struct unf_lport *unf_lport = NULL; - - FC_CHECK_RETURN_VOID(plogi_pld); - FC_CHECK_RETURN_VOID(lport); - - unf_lport = lport->root_lport; - plogi_pld->cmnd = (UNF_ELS_CMND_PLOGI); - login_parms = &plogi_pld->stparms; - - if (lport->act_topo == UNF_ACT_TOP_P2P_FABRIC || - lport->act_topo == UNF_ACT_TOP_P2P_DIRECT) { - /* P2P or Fabric mode or FCoE VN2VN */ - login_parms->co_parms.bb_credit = (unf_low_level_bb_credit(lport)); - login_parms->co_parms.alternate_bb_credit_mgmt = UNF_BBCREDIT_MANAGE_NFPORT; - login_parms->co_parms.bbscn = - (lport->act_topo == UNF_ACT_TOP_P2P_FABRIC) - ? 0 - : unf_low_level_bb_scn(lport); - } else { - /* Public loop & Private loop mode */ - login_parms->co_parms.bb_credit = UNF_BBCREDIT_LPORT; - login_parms->co_parms.alternate_bb_credit_mgmt = UNF_BBCREDIT_MANAGE_LPORT; - } - - login_parms->co_parms.lowest_version = UNF_PLOGI_VERSION_LOWER; - login_parms->co_parms.highest_version = UNF_PLOGI_VERSION_UPPER; - login_parms->co_parms.continuously_increasing = UNF_CONTIN_INCREASE_SUPPORT; - login_parms->co_parms.bb_receive_data_field_size = (lport->max_frame_size); - login_parms->co_parms.nport_total_concurrent_sequences = (UNF_PLOGI_CONCURRENT_SEQ); - login_parms->co_parms.relative_offset = (UNF_PLOGI_RO_CATEGORY); - login_parms->co_parms.e_d_tov = UNF_DEFAULT_EDTOV; - if (unf_lport->priority == UNF_PRIORITY_ENABLE) { - login_parms->cl_parms[ARRAY_INDEX_2].priority = - UNF_PRIORITY_ENABLE; - } else { - login_parms->cl_parms[ARRAY_INDEX_2].priority = - UNF_PRIORITY_DISABLE; - } - - /* for class_3 */ - login_parms->cl_parms[ARRAY_INDEX_2].valid = UNF_CLASS_VALID; - login_parms->cl_parms[ARRAY_INDEX_2].received_data_field_size = (lport->max_frame_size); - login_parms->cl_parms[ARRAY_INDEX_2].concurrent_sequences = (UNF_PLOGI_CONCURRENT_SEQ); - login_parms->cl_parms[ARRAY_INDEX_2].open_sequence_per_exchange = (UNF_PLOGI_SEQ_PER_XCHG); - - login_parms->high_node_name = UNF_GET_NAME_HIGH_WORD(lport->node_name); - login_parms->low_node_name = UNF_GET_NAME_LOW_WORD(lport->node_name); - login_parms->high_port_name = UNF_GET_NAME_HIGH_WORD(lport->port_name); - login_parms->low_port_name = UNF_GET_NAME_LOW_WORD(lport->port_name); - - UNF_PRINT_SFS_LIMIT(UNF_INFO, lport->port_id, plogi_pld, sizeof(struct unf_plogi_payload)); -} - -u32 unf_send_plogi(struct unf_lport *lport, struct unf_rport *rport) -{ - struct unf_plogi_payload *plogi_pld = NULL; - union unf_sfs_u *fc_entry = NULL; - struct unf_xchg *xchg = NULL; - u32 ret = UNF_RETURN_ERROR; - struct unf_frame_pkg pkg; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(rport, UNF_RETURN_ERROR); - - memset(&pkg, 0, sizeof(struct unf_frame_pkg)); - - xchg = unf_get_sfs_free_xchg_and_init(lport, rport->nport_id, rport, &fc_entry); - if (!xchg) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) exchange can't be NULL for PLOGI", - lport->port_id); - - return ret; - } - - xchg->cmnd_code = ELS_PLOGI; - - xchg->callback = unf_plogi_callback; - xchg->ob_callback = unf_plogi_ob_callback; - - unf_fill_package(&pkg, xchg, rport); - pkg.type = UNF_PKG_ELS_REQ; - unf_cm_xchg_mgr_abort_io_by_id(lport, rport, xchg->sid, xchg->did, 0); - - plogi_pld = &fc_entry->plogi.payload; - memset(plogi_pld, 0, sizeof(struct unf_plogi_payload)); - unf_fill_plogi_pld(plogi_pld, lport); - - ret = unf_ls_gs_cmnd_send(lport, &pkg, xchg); - if (ret != RETURN_OK) - unf_cm_free_xchg((void *)lport, (void *)xchg); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]LOGIN: Send PLOGI %s. Port(0x%x_0x%x_0x%llx)--->RPort(0x%x_0x%llx) with hottag(0x%x)", - (ret != RETURN_OK) ? "failed" : "succeed", lport->port_id, - lport->nport_id, lport->port_name, rport->nport_id, - rport->port_name, xchg->hotpooltag); - - return ret; -} - -static void unf_fill_logo_pld(struct unf_logo_payload *logo_pld, - struct unf_lport *lport) -{ - FC_CHECK_RETURN_VOID(logo_pld); - FC_CHECK_RETURN_VOID(lport); - - logo_pld->cmnd = (UNF_ELS_CMND_LOGO); - logo_pld->nport_id = (lport->nport_id); - logo_pld->high_port_name = UNF_GET_NAME_HIGH_WORD(lport->port_name); - logo_pld->low_port_name = UNF_GET_NAME_LOW_WORD(lport->port_name); - - UNF_PRINT_SFS_LIMIT(UNF_INFO, lport->port_id, logo_pld, sizeof(struct unf_logo_payload)); -} - -u32 unf_send_logo(struct unf_lport *lport, struct unf_rport *rport) -{ - struct unf_logo_payload *logo_pld = NULL; - union unf_sfs_u *fc_entry = NULL; - struct unf_xchg *xchg = NULL; - struct unf_frame_pkg pkg = {0}; - u32 ret = UNF_RETURN_ERROR; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - - xchg = unf_get_sfs_free_xchg_and_init(lport, rport->nport_id, rport, &fc_entry); - if (!xchg) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) exchange can't be NULL for LOGO", - lport->port_id); - - return ret; - } - - xchg->cmnd_code = ELS_LOGO; - /* retry or link down immediately */ - xchg->callback = unf_logo_callback; - /* do nothing */ - xchg->ob_callback = unf_logo_ob_callback; - - unf_fill_package(&pkg, xchg, rport); - pkg.type = UNF_PKG_ELS_REQ; - - logo_pld = &fc_entry->logo.payload; - memset(logo_pld, 0, sizeof(struct unf_logo_payload)); - unf_fill_logo_pld(logo_pld, lport); - - ret = unf_ls_gs_cmnd_send(lport, &pkg, xchg); - if (ret != RETURN_OK) - unf_cm_free_xchg((void *)lport, (void *)xchg); - - rport->logo_retries++; - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_KEVENT, - "[info]LOGIN: LOGO send %s. Port(0x%x)--->RPort(0x%x) hottag(0x%x) Retries(%d)", - (ret != RETURN_OK) ? "failed" : "succeed", lport->port_id, - rport->nport_id, xchg->hotpooltag, rport->logo_retries); - - return ret; -} - -u32 unf_send_logo_by_did(struct unf_lport *lport, u32 did) -{ - struct unf_logo_payload *logo_pld = NULL; - union unf_sfs_u *fc_entry = NULL; - struct unf_xchg *xchg = NULL; - struct unf_frame_pkg pkg = {0}; - u32 ret = UNF_RETURN_ERROR; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - - xchg = unf_get_sfs_free_xchg_and_init(lport, did, NULL, &fc_entry); - if (!xchg) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) exchange can't be NULL for LOGO", - lport->port_id); - - return ret; - } - - xchg->cmnd_code = ELS_LOGO; - - unf_fill_package(&pkg, xchg, NULL); - pkg.type = UNF_PKG_ELS_REQ; - - logo_pld = &fc_entry->logo.payload; - memset(logo_pld, 0, sizeof(struct unf_logo_payload)); - unf_fill_logo_pld(logo_pld, lport); - - ret = unf_ls_gs_cmnd_send(lport, &pkg, xchg); - if (ret != RETURN_OK) - unf_cm_free_xchg((void *)lport, (void *)xchg); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]LOGIN: LOGO send %s. Port(0x%x)--->RPort(0x%x) with hottag(0x%x)", - (ret != RETURN_OK) ? "failed" : "succeed", lport->port_id, - did, xchg->hotpooltag); - - return ret; -} - -static void unf_echo_callback(void *lport, void *rport, void *xchg) -{ - struct unf_lport *unf_lport = (struct unf_lport *)lport; - struct unf_rport *unf_rport = (struct unf_rport *)rport; - struct unf_xchg *unf_xchg = NULL; - struct unf_echo_payload *echo_rsp_pld = NULL; - u32 cmnd = 0; - u32 mag_ver_local = 0; - u32 mag_ver_remote = 0; - - FC_CHECK_RETURN_VOID(lport); - FC_CHECK_RETURN_VOID(rport); - FC_CHECK_RETURN_VOID(xchg); - - unf_xchg = (struct unf_xchg *)xchg; - if (!unf_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr) - return; - - echo_rsp_pld = unf_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->echo_acc.echo_pld; - FC_CHECK_RETURN_VOID(echo_rsp_pld); - - if (unf_xchg->byte_orders & UNF_BIT_2) { - unf_big_end_to_cpu((u8 *)echo_rsp_pld, sizeof(struct unf_echo_payload)); - cmnd = echo_rsp_pld->cmnd; - } else { - cmnd = echo_rsp_pld->cmnd; - } - - mag_ver_local = echo_rsp_pld->data[ARRAY_INDEX_0]; - mag_ver_remote = echo_rsp_pld->data[ARRAY_INDEX_1]; - - if (UNF_ELS_CMND_ACC == (cmnd & UNF_ELS_CMND_HIGH_MASK)) { - if (mag_ver_local == ECHO_MG_VERSION_LOCAL && - mag_ver_remote == ECHO_MG_VERSION_REMOTE) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "LPort(0x%x) send ECHO to RPort(0x%x), received ACC. local snd echo:(0x%x), remote rcv echo:(0x%x), remote snd echo acc:(0x%x), local rcv echo acc:(0x%x)", - unf_lport->port_id, unf_rport->nport_id, - unf_xchg->private_data[PKG_PRIVATE_ECHO_CMD_SND_TIME], - unf_xchg->private_data[PKG_PRIVATE_ECHO_CMD_RCV_TIME], - unf_xchg->private_data[PKG_PRIVATE_ECHO_RSP_SND_TIME], - unf_xchg->private_data[PKG_PRIVATE_ECHO_ACC_RCV_TIME]); - } else if ((mag_ver_local == ECHO_MG_VERSION_LOCAL) && - (mag_ver_remote != ECHO_MG_VERSION_REMOTE)) { - /* the peer don't supprt smartping, only local snd and - * rcv rsp time stamp - */ - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "LPort(0x%x) send ECHO to RPort(0x%x), received ACC. local snd echo:(0x%x), local rcv echo acc:(0x%x)", - unf_lport->port_id, unf_rport->nport_id, - unf_xchg->private_data[PKG_PRIVATE_ECHO_CMD_SND_TIME], - unf_xchg->private_data[PKG_PRIVATE_ECHO_ACC_RCV_TIME]); - } else if ((mag_ver_local != ECHO_MG_VERSION_LOCAL) && - (mag_ver_remote != ECHO_MG_VERSION_REMOTE)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, - UNF_MAJOR, - "LPort(0x%x) send ECHO to RPort(0x%x), received ACC. local and remote is not FC HBA", - unf_lport->port_id, unf_rport->nport_id); - } - } else { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) send ECHO to RPort(0x%x) and received RJT", - unf_lport->port_id, unf_rport->nport_id); - } - - unf_xchg->echo_info.echo_result = UNF_ELS_ECHO_RESULT_OK; - unf_xchg->echo_info.response_time = jiffies - unf_xchg->echo_info.response_time; - - /* wake up semaphore */ - up(&unf_xchg->echo_info.echo_sync_sema); -} - -static void unf_echo_ob_callback(struct unf_xchg *xchg) -{ - struct unf_lport *unf_lport = NULL; - struct unf_rport *unf_rport = NULL; - - FC_CHECK_RETURN_VOID(xchg); - unf_lport = xchg->lport; - FC_CHECK_RETURN_VOID(unf_lport); - unf_rport = xchg->rport; - FC_CHECK_RETURN_VOID(unf_rport); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) send ECHO to RPort(0x%x) but timeout", - unf_lport->port_id, unf_rport->nport_id); - - xchg->echo_info.echo_result = UNF_ELS_ECHO_RESULT_FAIL; - - /* wake up semaphore */ - up(&xchg->echo_info.echo_sync_sema); -} - -u32 unf_send_echo(struct unf_lport *lport, struct unf_rport *rport, u32 *time) -{ - struct unf_echo_payload *echo_pld = NULL; - union unf_sfs_u *fc_entry = NULL; - struct unf_xchg *xchg = NULL; - struct unf_frame_pkg pkg = {0}; - u32 ret = UNF_RETURN_ERROR; - ulong delay = 0; - dma_addr_t phy_echo_addr; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(rport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(time, UNF_RETURN_ERROR); - - delay = UNF_ECHO_WAIT_SEM_TIMEOUT(lport); - xchg = unf_get_sfs_free_xchg_and_init(lport, rport->nport_id, rport, &fc_entry); - if (!xchg) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) exchange can't be NULL for ECHO", - lport->port_id); - - return ret; - } - - /* ECHO */ - xchg->cmnd_code = ELS_ECHO; - xchg->fcp_sfs_union.sfs_entry.cur_offset = UNF_ECHO_REQ_SIZE; - - /* Set callback function, wake up semaphore */ - xchg->callback = unf_echo_callback; - /* wake up semaphore */ - xchg->ob_callback = unf_echo_ob_callback; - - unf_fill_package(&pkg, xchg, rport); - pkg.type = UNF_PKG_ELS_REQ; - - echo_pld = (struct unf_echo_payload *)unf_get_one_big_sfs_buf(xchg); - if (!echo_pld) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) can't allocate buffer for ECHO", - lport->port_id); - - unf_cm_free_xchg(lport, xchg); - return UNF_RETURN_ERROR; - } - - fc_entry->echo.echo_pld = echo_pld; - phy_echo_addr = pci_map_single(lport->low_level_func.dev, echo_pld, - UNF_ECHO_PAYLOAD_LEN, - DMA_BIDIRECTIONAL); - if (pci_dma_mapping_error(lport->low_level_func.dev, phy_echo_addr)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, - UNF_WARN, "[warn]Port(0x%x) pci map err", lport->port_id); - unf_cm_free_xchg(lport, xchg); - return UNF_RETURN_ERROR; - } - fc_entry->echo.phy_echo_addr = phy_echo_addr; - memset(echo_pld, 0, sizeof(struct unf_echo_payload)); - echo_pld->cmnd = (UNF_ELS_CMND_ECHO); - echo_pld->data[ARRAY_INDEX_0] = ECHO_MG_VERSION_LOCAL; - - ret = unf_xchg_ref_inc(xchg, SEND_ELS); - FC_CHECK_RETURN_VALUE((ret == RETURN_OK), UNF_RETURN_ERROR); - - xchg->echo_info.response_time = jiffies; - ret = unf_ls_gs_cmnd_send(lport, &pkg, xchg); - if (ret != RETURN_OK) { - unf_cm_free_xchg((void *)lport, (void *)xchg); - } else { - if (down_timeout(&xchg->echo_info.echo_sync_sema, - (long)msecs_to_jiffies((u32)delay))) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]ECHO send %s. Port(0x%x)--->RPort(0x%x) but response timeout ", - (ret != RETURN_OK) ? "failed" : "succeed", - lport->port_id, rport->nport_id); - - xchg->echo_info.echo_result = UNF_ELS_ECHO_RESULT_FAIL; - } - - if (xchg->echo_info.echo_result == UNF_ELS_ECHO_RESULT_FAIL) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, - UNF_MAJOR, "Echo send fail or timeout"); - - ret = UNF_RETURN_ERROR; - } else { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "echo acc rsp,echo_cmd_snd(0x%xus)-->echo_cmd_rcv(0x%xus)-->echo_acc_ snd(0x%xus)-->echo_acc_rcv(0x%xus).", - xchg->private_data[PKG_PRIVATE_ECHO_CMD_SND_TIME], - xchg->private_data[PKG_PRIVATE_ECHO_CMD_RCV_TIME], - xchg->private_data[PKG_PRIVATE_ECHO_RSP_SND_TIME], - xchg->private_data[PKG_PRIVATE_ECHO_ACC_RCV_TIME]); - - *time = - (xchg->private_data[PKG_PRIVATE_ECHO_ACC_RCV_TIME] - - xchg->private_data[PKG_PRIVATE_ECHO_CMD_SND_TIME]) - - (xchg->private_data[PKG_PRIVATE_ECHO_RSP_SND_TIME] - - xchg->private_data[PKG_PRIVATE_ECHO_CMD_RCV_TIME]); - } - } - - pci_unmap_single(lport->low_level_func.dev, phy_echo_addr, - UNF_ECHO_PAYLOAD_LEN, DMA_BIDIRECTIONAL); - fc_entry->echo.phy_echo_addr = 0; - unf_xchg_ref_dec(xchg, SEND_ELS); - - return ret; -} - -static void unf_fill_prli_pld(struct unf_prli_payload *prli_pld, - struct unf_lport *lport) -{ - u32 pld_len = 0; - - FC_CHECK_RETURN_VOID(prli_pld); - FC_CHECK_RETURN_VOID(lport); - - pld_len = sizeof(struct unf_prli_payload) - UNF_PRLI_SIRT_EXTRA_SIZE; - prli_pld->cmnd = - (UNF_ELS_CMND_PRLI | - ((u32)UNF_FC4_FRAME_PAGE_SIZE << UNF_FC4_FRAME_PAGE_SIZE_SHIFT) | - ((u32)pld_len)); - - prli_pld->parms[ARRAY_INDEX_0] = (UNF_FC4_FRAME_PARM_0_FCP | UNF_FC4_FRAME_PARM_0_I_PAIR); - prli_pld->parms[ARRAY_INDEX_1] = UNF_NOT_MEANINGFUL; - prli_pld->parms[ARRAY_INDEX_2] = UNF_NOT_MEANINGFUL; - - /* About Read Xfer_rdy disable */ - prli_pld->parms[ARRAY_INDEX_3] = (UNF_FC4_FRAME_PARM_3_R_XFER_DIS | lport->options); - - /* About FCP confirm */ - if (lport->low_level_func.lport_cfg_items.fcp_conf) - prli_pld->parms[ARRAY_INDEX_3] |= UNF_FC4_FRAME_PARM_3_CONF_ALLOW; - - /* About Tape support */ - if (lport->low_level_func.lport_cfg_items.tape_support) - prli_pld->parms[ARRAY_INDEX_3] |= - (UNF_FC4_FRAME_PARM_3_REC_SUPPORT | - UNF_FC4_FRAME_PARM_3_RETRY_SUPPORT | - UNF_FC4_FRAME_PARM_3_TASK_RETRY_ID_SUPPORT | - UNF_FC4_FRAME_PARM_3_CONF_ALLOW); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x)'s PRLI payload: options(0x%x) parameter-3(0x%x)", - lport->port_id, lport->options, - prli_pld->parms[ARRAY_INDEX_3]); - - UNF_PRINT_SFS_LIMIT(UNF_INFO, lport->port_id, prli_pld, sizeof(struct unf_prli_payload)); -} - -u32 unf_send_prli(struct unf_lport *lport, struct unf_rport *rport, - u32 cmnd_code) -{ - struct unf_prli_payload *prli_pal = NULL; - union unf_sfs_u *fc_entry = NULL; - struct unf_xchg *xchg = NULL; - u32 ret = UNF_RETURN_ERROR; - struct unf_frame_pkg pkg = {0}; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(rport, UNF_RETURN_ERROR); - - xchg = unf_get_sfs_free_xchg_and_init(lport, rport->nport_id, rport, &fc_entry); - if (!xchg) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) exchange can't be NULL for PRLI", - lport->port_id); - - return ret; - } - - xchg->cmnd_code = cmnd_code; - - /* for rcvd prli acc/rjt processer */ - xchg->callback = unf_prli_callback; - /* for send prli failed processer */ - xchg->ob_callback = unf_prli_ob_callback; - - unf_fill_package(&pkg, xchg, rport); - pkg.type = UNF_PKG_ELS_REQ; - - prli_pal = &fc_entry->prli.payload; - memset(prli_pal, 0, sizeof(struct unf_prli_payload)); - unf_fill_prli_pld(prli_pal, lport); - - ret = unf_ls_gs_cmnd_send(lport, &pkg, xchg); - if (ret != RETURN_OK) - unf_cm_free_xchg((void *)lport, (void *)xchg); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]LOGIN: PRLI send %s. Port(0x%x)--->RPort(0x%x)", - (ret != RETURN_OK) ? "failed" : "succeed", lport->port_id, - rport->nport_id); - - return ret; -} - -static void unf_fill_prlo_pld(struct unf_prli_payload *prlo_pld, - struct unf_lport *lport) -{ - FC_CHECK_RETURN_VOID(prlo_pld); - FC_CHECK_RETURN_VOID(lport); - - prlo_pld->cmnd = (UNF_ELS_CMND_PRLO); - prlo_pld->parms[ARRAY_INDEX_0] = (UNF_FC4_FRAME_PARM_0_FCP); - prlo_pld->parms[ARRAY_INDEX_1] = UNF_NOT_MEANINGFUL; - prlo_pld->parms[ARRAY_INDEX_2] = UNF_NOT_MEANINGFUL; - prlo_pld->parms[ARRAY_INDEX_3] = UNF_NO_SERVICE_PARAMS; - - UNF_PRINT_SFS_LIMIT(UNF_INFO, lport->port_id, prlo_pld, sizeof(struct unf_prli_payload)); -} - -u32 unf_send_prlo(struct unf_lport *lport, struct unf_rport *rport) -{ - struct unf_prli_payload *prlo_pld = NULL; - union unf_sfs_u *fc_entry = NULL; - struct unf_xchg *xchg = NULL; - u32 ret = UNF_RETURN_ERROR; - struct unf_frame_pkg pkg; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(rport, UNF_RETURN_ERROR); - - memset(&pkg, 0, sizeof(struct unf_frame_pkg)); - - xchg = unf_get_sfs_free_xchg_and_init(lport, rport->nport_id, rport, &fc_entry); - if (!xchg) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) exchange can't be NULL for PRLO", lport->port_id); - - return ret; - } - - xchg->cmnd_code = ELS_PRLO; - - unf_fill_package(&pkg, xchg, rport); - pkg.type = UNF_PKG_ELS_REQ; - - prlo_pld = &fc_entry->prlo.payload; - memset(prlo_pld, 0, sizeof(struct unf_prli_payload)); - unf_fill_prlo_pld(prlo_pld, lport); - - ret = unf_ls_gs_cmnd_send(lport, &pkg, xchg); - if (ret != RETURN_OK) - unf_cm_free_xchg((void *)lport, (void *)xchg); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]LOGIN: PRLO send %s. Port(0x%x)--->RPort(0x%x)", - (ret != RETURN_OK) ? "failed" : "succeed", lport->port_id, - rport->nport_id); - - return ret; -} - -static void unf_fill_pdisc_pld(struct unf_plogi_payload *pdisc_pld, - struct unf_lport *lport) -{ - struct unf_lgn_parm *login_parms = NULL; - - FC_CHECK_RETURN_VOID(pdisc_pld); - FC_CHECK_RETURN_VOID(lport); - - pdisc_pld->cmnd = (UNF_ELS_CMND_PDISC); - login_parms = &pdisc_pld->stparms; - - if (lport->act_topo == UNF_ACT_TOP_P2P_FABRIC || - lport->act_topo == UNF_ACT_TOP_P2P_DIRECT) { - /* P2P & Fabric */ - login_parms->co_parms.bb_credit = (unf_low_level_bb_credit(lport)); - login_parms->co_parms.alternate_bb_credit_mgmt = UNF_BBCREDIT_MANAGE_NFPORT; - login_parms->co_parms.bbscn = - (lport->act_topo == UNF_ACT_TOP_P2P_FABRIC) - ? 0 - : unf_low_level_bb_scn(lport); - } else { - /* Public loop & Private loop */ - login_parms->co_parms.bb_credit = UNF_BBCREDIT_LPORT; - /* :1 */ - login_parms->co_parms.alternate_bb_credit_mgmt = UNF_BBCREDIT_MANAGE_LPORT; - } - - login_parms->co_parms.lowest_version = UNF_PLOGI_VERSION_LOWER; - login_parms->co_parms.highest_version = UNF_PLOGI_VERSION_UPPER; - login_parms->co_parms.continuously_increasing = UNF_CONTIN_INCREASE_SUPPORT; - login_parms->co_parms.bb_receive_data_field_size = (lport->max_frame_size); - login_parms->co_parms.nport_total_concurrent_sequences = (UNF_PLOGI_CONCURRENT_SEQ); - login_parms->co_parms.relative_offset = (UNF_PLOGI_RO_CATEGORY); - login_parms->co_parms.e_d_tov = (lport->ed_tov); - - login_parms->high_node_name = UNF_GET_NAME_HIGH_WORD(lport->node_name); - login_parms->low_node_name = UNF_GET_NAME_LOW_WORD(lport->node_name); - login_parms->high_port_name = UNF_GET_NAME_HIGH_WORD(lport->port_name); - login_parms->low_port_name = UNF_GET_NAME_LOW_WORD(lport->port_name); - - /* class-3 */ - login_parms->cl_parms[ARRAY_INDEX_2].valid = UNF_CLASS_VALID; - login_parms->cl_parms[ARRAY_INDEX_2].received_data_field_size = (lport->max_frame_size); - login_parms->cl_parms[ARRAY_INDEX_2].concurrent_sequences = (UNF_PLOGI_CONCURRENT_SEQ); - login_parms->cl_parms[ARRAY_INDEX_2].open_sequence_per_exchange = (UNF_PLOGI_SEQ_PER_XCHG); - - UNF_PRINT_SFS_LIMIT(UNF_INFO, lport->port_id, pdisc_pld, sizeof(struct unf_plogi_payload)); -} - -u32 unf_send_pdisc(struct unf_lport *lport, struct unf_rport *rport) -{ - /* PLOGI/PDISC with same payload */ - struct unf_plogi_payload *pdisc_pld = NULL; - union unf_sfs_u *fc_entry = NULL; - struct unf_xchg *xchg = NULL; - u32 ret = UNF_RETURN_ERROR; - struct unf_frame_pkg pkg = {0}; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(rport, UNF_RETURN_ERROR); - - xchg = unf_get_sfs_free_xchg_and_init(lport, rport->nport_id, rport, &fc_entry); - if (!xchg) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) exchange can't be NULL for PDISC", - lport->port_id); - - return ret; - } - - xchg->cmnd_code = ELS_PDISC; - xchg->callback = NULL; - xchg->ob_callback = NULL; - - unf_fill_package(&pkg, xchg, rport); - pkg.type = UNF_PKG_ELS_REQ; - - pdisc_pld = &fc_entry->pdisc.payload; - memset(pdisc_pld, 0, sizeof(struct unf_plogi_payload)); - unf_fill_pdisc_pld(pdisc_pld, lport); - - ret = unf_ls_gs_cmnd_send(lport, &pkg, xchg); - if (ret != RETURN_OK) - unf_cm_free_xchg((void *)lport, (void *)xchg); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]LOGIN: PDISC send %s. Port(0x%x)--->RPort(0x%x)", - (ret != RETURN_OK) ? "failed" : "succeed", lport->port_id, rport->nport_id); - - return ret; -} - -static void unf_fill_adisc_pld(struct unf_adisc_payload *adisc_pld, - struct unf_lport *lport) -{ - FC_CHECK_RETURN_VOID(adisc_pld); - FC_CHECK_RETURN_VOID(lport); - - adisc_pld->cmnd = (UNF_ELS_CMND_ADISC); - adisc_pld->high_node_name = UNF_GET_NAME_HIGH_WORD(lport->node_name); - adisc_pld->low_node_name = UNF_GET_NAME_LOW_WORD(lport->node_name); - adisc_pld->high_port_name = UNF_GET_NAME_HIGH_WORD(lport->port_name); - adisc_pld->low_port_name = UNF_GET_NAME_LOW_WORD(lport->port_name); - - UNF_PRINT_SFS_LIMIT(UNF_INFO, lport->port_id, adisc_pld, sizeof(struct unf_adisc_payload)); -} - -u32 unf_send_adisc(struct unf_lport *lport, struct unf_rport *rport) -{ - struct unf_adisc_payload *adisc_pal = NULL; - union unf_sfs_u *fc_entry = NULL; - struct unf_xchg *xchg = NULL; - u32 ret = UNF_RETURN_ERROR; - struct unf_frame_pkg pkg = {0}; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(rport, UNF_RETURN_ERROR); - - xchg = unf_get_sfs_free_xchg_and_init(lport, rport->nport_id, rport, &fc_entry); - if (!xchg) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) exchange can't be NULL for ADISC", lport->port_id); - - return ret; - } - - xchg->cmnd_code = ELS_ADISC; - - xchg->callback = NULL; - xchg->ob_callback = NULL; - - unf_fill_package(&pkg, xchg, rport); - pkg.type = UNF_PKG_ELS_REQ; - - adisc_pal = &fc_entry->adisc.adisc_payl; - memset(adisc_pal, 0, sizeof(struct unf_adisc_payload)); - unf_fill_adisc_pld(adisc_pal, lport); - - ret = unf_ls_gs_cmnd_send(lport, &pkg, xchg); - if (ret != RETURN_OK) - unf_cm_free_xchg((void *)lport, (void *)xchg); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]LOGIN: ADISC send %s. Port(0x%x)--->RPort(0x%x)", - (ret != RETURN_OK) ? "failed" : "succeed", lport->port_id, - rport->nport_id); - - return ret; -} - -static void unf_fill_rrq_pld(struct unf_rrq *rrq_pld, struct unf_xchg *xchg) -{ - FC_CHECK_RETURN_VOID(rrq_pld); - FC_CHECK_RETURN_VOID(xchg); - - rrq_pld->cmnd = UNF_ELS_CMND_RRQ; - rrq_pld->sid = xchg->sid; - rrq_pld->oxid_rxid = ((u32)xchg->oxid << UNF_SHIFT_16 | xchg->rxid); -} - -u32 unf_send_rrq(struct unf_lport *lport, struct unf_rport *rport, - struct unf_xchg *xchg) -{ - /* after ABTS Done */ - struct unf_rrq *rrq_pld = NULL; - union unf_sfs_u *fc_entry = NULL; - struct unf_xchg *unf_xchg = NULL; - struct unf_frame_pkg pkg = {0}; - u32 ret = UNF_RETURN_ERROR; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(rport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(xchg, UNF_RETURN_ERROR); - - unf_xchg = unf_get_sfs_free_xchg_and_init(lport, rport->nport_id, rport, &fc_entry); - if (!unf_xchg) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) exchange can't be NULL for RRQ", - lport->port_id); - - return ret; - } - - unf_xchg->cmnd_code = ELS_RRQ; /* RRQ */ - - unf_xchg->callback = unf_rrq_callback; /* release I/O exchange context */ - unf_xchg->ob_callback = unf_rrq_ob_callback; /* release I/O exchange context */ - unf_xchg->io_xchg = xchg; /* pointer to IO XCHG */ - - unf_fill_package(&pkg, unf_xchg, rport); - pkg.type = UNF_PKG_ELS_REQ; - rrq_pld = &fc_entry->rrq; - memset(rrq_pld, 0, sizeof(struct unf_rrq)); - unf_fill_rrq_pld(rrq_pld, xchg); - - ret = unf_ls_gs_cmnd_send(lport, &pkg, unf_xchg); - if (ret != RETURN_OK) - unf_cm_free_xchg((void *)lport, (void *)unf_xchg); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]RRQ send %s. Port(0x%x)--->RPort(0x%x) free old exchange(0x%x)", - (ret != RETURN_OK) ? "failed" : "succeed", lport->port_id, - rport->nport_id, xchg->hotpooltag); - - return ret; -} - -u32 unf_send_flogi_acc(struct unf_lport *lport, struct unf_rport *rport, - struct unf_xchg *xchg) -{ - struct unf_flogi_fdisc_payload *flogi_acc_pld = NULL; - union unf_sfs_u *fc_entry = NULL; - u32 ret = UNF_RETURN_ERROR; - struct unf_frame_pkg pkg = {0}; - u16 ox_id = 0; - u16 rx_id = 0; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(rport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(xchg, UNF_RETURN_ERROR); - - xchg->cmnd_code = UNF_SET_ELS_ACC_TYPE(ELS_FLOGI); - - xchg->did = 0; /* D_ID must be 0 */ - xchg->sid = UNF_FC_FID_FLOGI; /* S_ID must be 0xfffffe */ - xchg->oid = xchg->sid; - xchg->callback = NULL; - xchg->lport = lport; - xchg->rport = rport; - xchg->ob_callback = unf_flogi_acc_ob_callback; /* call back for sending - * FLOGI response - */ - - fc_entry = xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr; - if (!fc_entry) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[warn]Port(0x%x) entry can't be NULL with tag(0x%x)", - lport->port_id, xchg->hotpooltag); - - unf_cm_free_xchg(lport, xchg); - return UNF_RETURN_ERROR; - } - - unf_fill_package(&pkg, xchg, rport); - pkg.type = UNF_PKG_ELS_REPLY; - - memset(fc_entry, 0, sizeof(union unf_sfs_u)); - flogi_acc_pld = &fc_entry->flogi_acc.flogi_payload; - flogi_acc_pld->cmnd = (UNF_ELS_CMND_ACC); - unf_fill_flogi_pld(flogi_acc_pld, lport); - ox_id = xchg->oxid; - rx_id = xchg->rxid; - - ret = unf_ls_gs_cmnd_send(lport, &pkg, xchg); - if (ret != RETURN_OK) - unf_cm_free_xchg((void *)lport, (void *)xchg); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[info]LOGIN: FLOGI ACC send %s. Port(0x%x)--->RPort(0x%x) with OX_ID(0x%x) RX_ID(0x%x)", - (ret != RETURN_OK) ? "failed" : "succeed", lport->port_id, - rport->nport_id, ox_id, rx_id); - return ret; -} - -static void unf_fill_plogi_acc_pld(struct unf_plogi_payload *plogi_acc_pld, - struct unf_lport *lport) -{ - struct unf_lgn_parm *login_parms = NULL; - - FC_CHECK_RETURN_VOID(plogi_acc_pld); - FC_CHECK_RETURN_VOID(lport); - - plogi_acc_pld->cmnd = (UNF_ELS_CMND_ACC); - login_parms = &plogi_acc_pld->stparms; - - if (lport->act_topo == UNF_ACT_TOP_P2P_FABRIC || - lport->act_topo == UNF_ACT_TOP_P2P_DIRECT) { - login_parms->co_parms.bb_credit = (unf_low_level_bb_credit(lport)); - login_parms->co_parms.alternate_bb_credit_mgmt = UNF_BBCREDIT_MANAGE_NFPORT; /* 0 */ - login_parms->co_parms.bbscn = - (lport->act_topo == UNF_ACT_TOP_P2P_FABRIC) - ? 0 - : unf_low_level_bb_scn(lport); - } else { - login_parms->co_parms.bb_credit = UNF_BBCREDIT_LPORT; - login_parms->co_parms.alternate_bb_credit_mgmt = UNF_BBCREDIT_MANAGE_LPORT; /* 1 */ - } - - login_parms->co_parms.lowest_version = UNF_PLOGI_VERSION_LOWER; - login_parms->co_parms.highest_version = UNF_PLOGI_VERSION_UPPER; - login_parms->co_parms.continuously_increasing = UNF_CONTIN_INCREASE_SUPPORT; - login_parms->co_parms.bb_receive_data_field_size = (lport->max_frame_size); - login_parms->co_parms.nport_total_concurrent_sequences = (UNF_PLOGI_CONCURRENT_SEQ); - login_parms->co_parms.relative_offset = (UNF_PLOGI_RO_CATEGORY); - login_parms->co_parms.e_d_tov = (lport->ed_tov); - login_parms->cl_parms[ARRAY_INDEX_2].valid = UNF_CLASS_VALID; /* class-3 */ - login_parms->cl_parms[ARRAY_INDEX_2].received_data_field_size = (lport->max_frame_size); - login_parms->cl_parms[ARRAY_INDEX_2].concurrent_sequences = (UNF_PLOGI_CONCURRENT_SEQ); - login_parms->cl_parms[ARRAY_INDEX_2].open_sequence_per_exchange = (UNF_PLOGI_SEQ_PER_XCHG); - login_parms->high_node_name = UNF_GET_NAME_HIGH_WORD(lport->node_name); - login_parms->low_node_name = UNF_GET_NAME_LOW_WORD(lport->node_name); - login_parms->high_port_name = UNF_GET_NAME_HIGH_WORD(lport->port_name); - login_parms->low_port_name = UNF_GET_NAME_LOW_WORD(lport->port_name); - - UNF_PRINT_SFS_LIMIT(UNF_INFO, lport->port_id, plogi_acc_pld, - sizeof(struct unf_plogi_payload)); -} - -u32 unf_send_plogi_acc(struct unf_lport *lport, struct unf_rport *rport, - struct unf_xchg *xchg) -{ - struct unf_plogi_payload *plogi_acc_pld = NULL; - union unf_sfs_u *fc_entry = NULL; - u32 ret = UNF_RETURN_ERROR; - struct unf_frame_pkg pkg = {0}; - u16 ox_id = 0; - u16 rx_id = 0; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(rport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(xchg, UNF_RETURN_ERROR); - - xchg->cmnd_code = UNF_SET_ELS_ACC_TYPE(ELS_PLOGI); - - xchg->did = rport->nport_id; - xchg->sid = lport->nport_id; - xchg->oid = xchg->sid; - xchg->callback = NULL; - xchg->lport = lport; - xchg->rport = rport; - - xchg->ob_callback = unf_plogi_acc_ob_callback; /* call back for sending PLOGI ACC */ - - unf_fill_package(&pkg, xchg, rport); - pkg.type = UNF_PKG_ELS_REPLY; - fc_entry = xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr; - if (!fc_entry) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) entry can't be NULL with tag(0x%x)", - lport->port_id, xchg->hotpooltag); - - unf_cm_free_xchg(lport, xchg); - return UNF_RETURN_ERROR; - } - - memset(fc_entry, 0, sizeof(union unf_sfs_u)); - plogi_acc_pld = &fc_entry->plogi_acc.payload; - unf_fill_plogi_acc_pld(plogi_acc_pld, lport); - ox_id = xchg->oxid; - rx_id = xchg->rxid; - - ret = unf_ls_gs_cmnd_send(lport, &pkg, xchg); - if (ret != RETURN_OK) - unf_cm_free_xchg((void *)lport, (void *)xchg); - - if (rport->nport_id < UNF_FC_FID_DOM_MGR || - lport->act_topo == UNF_ACT_TOP_P2P_DIRECT) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]LOGIN: PLOGI ACC send %s. Port(0x%x_0x%x_0x%llx)--->RPort(0x%x_0x%llx) with OX_ID(0x%x) RX_ID(0x%x)", - (ret != RETURN_OK) ? "failed" : "succeed", - lport->port_id, lport->nport_id, lport->port_name, - rport->nport_id, rport->port_name, ox_id, rx_id); - } - - return ret; -} - -static void unf_fill_prli_acc_pld(struct unf_prli_payload *prli_acc_pld, - struct unf_lport *lport, - struct unf_rport *rport) -{ - u32 port_mode = UNF_FC4_FRAME_PARM_3_TGT; - - FC_CHECK_RETURN_VOID(prli_acc_pld); - FC_CHECK_RETURN_VOID(lport); - FC_CHECK_RETURN_VOID(rport); - - prli_acc_pld->cmnd = - (UNF_ELS_CMND_ACC | - ((u32)UNF_FC4_FRAME_PAGE_SIZE << UNF_FC4_FRAME_PAGE_SIZE_SHIFT) | - ((u32)(sizeof(struct unf_prli_payload) - UNF_PRLI_SIRT_EXTRA_SIZE))); - - prli_acc_pld->parms[ARRAY_INDEX_0] = - (UNF_FC4_FRAME_PARM_0_FCP | UNF_FC4_FRAME_PARM_0_I_PAIR | - UNF_FC4_FRAME_PARM_0_GOOD_RSP_CODE); - prli_acc_pld->parms[ARRAY_INDEX_1] = UNF_NOT_MEANINGFUL; - prli_acc_pld->parms[ARRAY_INDEX_2] = UNF_NOT_MEANINGFUL; - - /* About INI/TGT mode */ - if (rport->nport_id < UNF_FC_FID_DOM_MGR) { - /* return INI (0x20): R_Port has TGT mode, L_Port has INI mode - */ - port_mode = UNF_FC4_FRAME_PARM_3_INI; - } else { - port_mode = lport->options; - } - - /* About Read xfer_rdy disable */ - prli_acc_pld->parms[ARRAY_INDEX_3] = - (UNF_FC4_FRAME_PARM_3_R_XFER_DIS | port_mode); /* 0x2 */ - - /* About Tape support */ - if (rport->tape_support_needed) { - prli_acc_pld->parms[ARRAY_INDEX_3] |= - (UNF_FC4_FRAME_PARM_3_REC_SUPPORT | - UNF_FC4_FRAME_PARM_3_RETRY_SUPPORT | - UNF_FC4_FRAME_PARM_3_TASK_RETRY_ID_SUPPORT | - UNF_FC4_FRAME_PARM_3_CONF_ALLOW); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "PRLI ACC tape support"); - } - - /* About confirm */ - if (lport->low_level_func.lport_cfg_items.fcp_conf) - prli_acc_pld->parms[ARRAY_INDEX_3] |= - UNF_FC4_FRAME_PARM_3_CONF_ALLOW; /* 0x80 */ - - UNF_PRINT_SFS_LIMIT(UNF_INFO, lport->port_id, prli_acc_pld, - sizeof(struct unf_prli_payload)); -} - -u32 unf_send_prli_acc(struct unf_lport *lport, struct unf_rport *rport, - struct unf_xchg *xchg) -{ - struct unf_prli_payload *prli_acc_pld = NULL; - union unf_sfs_u *fc_entry = NULL; - u32 ret = UNF_RETURN_ERROR; - struct unf_frame_pkg pkg = {0}; - u16 ox_id = 0; - u16 rx_id = 0; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(rport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(xchg, UNF_RETURN_ERROR); - - xchg->cmnd_code = UNF_SET_ELS_ACC_TYPE(ELS_PRLI); - xchg->did = rport->nport_id; - xchg->sid = lport->nport_id; - xchg->oid = xchg->sid; - xchg->lport = lport; - xchg->rport = rport; - - xchg->callback = NULL; - xchg->ob_callback = - unf_prli_acc_ob_callback; /* callback when send succeed */ - - unf_fill_package(&pkg, xchg, rport); - - pkg.type = UNF_PKG_ELS_REPLY; - fc_entry = xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr; - if (!fc_entry) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) entry can't be NULL with tag(0x%x)", - lport->port_id, xchg->hotpooltag); - - unf_cm_free_xchg(lport, xchg); - return UNF_RETURN_ERROR; - } - - memset(fc_entry, 0, sizeof(union unf_sfs_u)); - prli_acc_pld = &fc_entry->prli_acc.payload; - unf_fill_prli_acc_pld(prli_acc_pld, lport, rport); - ox_id = xchg->oxid; - rx_id = xchg->rxid; - - ret = unf_ls_gs_cmnd_send(lport, &pkg, xchg); - if (ret != RETURN_OK) - unf_cm_free_xchg((void *)lport, (void *)xchg); - - if (rport->nport_id < UNF_FC_FID_DOM_MGR || - lport->act_topo == UNF_ACT_TOP_P2P_DIRECT) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]LOGIN: PRLI ACC send %s. Port(0x%x)--->RPort(0x%x) with OX_ID(0x%x) RX_ID(0x%x)", - (ret != RETURN_OK) ? "failed" : "succeed", - lport->port_id, rport->nport_id, ox_id, rx_id); - } - - return ret; -} - -u32 unf_send_rec_acc(struct unf_lport *lport, struct unf_rport *rport, - struct unf_xchg *xchg) -{ - /* Reserved */ - unf_cm_free_xchg((void *)lport, (void *)xchg); - - return RETURN_OK; -} - -static void unf_rrq_acc_ob_callback(struct unf_xchg *xchg) -{ - FC_CHECK_RETURN_VOID(xchg); - - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_MAJOR, - "[info]RRQ ACC Xchg(0x%p) tag(0x%x)", xchg, - xchg->hotpooltag); -} - -static void unf_fill_els_acc_pld(struct unf_els_acc *els_acc_pld) -{ - FC_CHECK_RETURN_VOID(els_acc_pld); - - els_acc_pld->cmnd = (UNF_ELS_CMND_ACC); -} - -u32 unf_send_rscn_acc(struct unf_lport *lport, struct unf_rport *rport, - struct unf_xchg *xchg) -{ - struct unf_els_acc *rscn_acc = NULL; - union unf_sfs_u *fc_entry = NULL; - u32 ret = UNF_RETURN_ERROR; - u16 ox_id = 0; - u16 rx_id = 0; - struct unf_frame_pkg pkg; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(rport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(xchg, UNF_RETURN_ERROR); - - memset(&pkg, 0, sizeof(struct unf_frame_pkg)); - xchg->cmnd_code = UNF_SET_ELS_ACC_TYPE(ELS_RSCN); - xchg->did = rport->nport_id; - xchg->sid = lport->nport_id; - xchg->oid = xchg->sid; - xchg->lport = lport; - xchg->rport = rport; - - xchg->callback = NULL; - xchg->ob_callback = unf_rscn_acc_ob_callback; - - unf_fill_package(&pkg, xchg, rport); - pkg.type = UNF_PKG_ELS_REPLY; - fc_entry = xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr; - if (!fc_entry) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) entry can't be NULL with tag(0x%x)", - lport->port_id, xchg->hotpooltag); - - unf_cm_free_xchg(lport, xchg); - return UNF_RETURN_ERROR; - } - - memset(fc_entry, 0, sizeof(union unf_sfs_u)); - rscn_acc = &fc_entry->els_acc; - unf_fill_els_acc_pld(rscn_acc); - ox_id = xchg->oxid; - rx_id = xchg->rxid; - - ret = unf_ls_gs_cmnd_send(lport, &pkg, xchg); - if (ret != RETURN_OK) - unf_cm_free_xchg((void *)lport, (void *)xchg); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]LOGIN: RSCN ACC send %s. Port(0x%x)--->RPort(0x%x) with OXID(0x%x) RXID(0x%x)", - (ret != RETURN_OK) ? "failed" : "succeed", lport->port_id, - rport->nport_id, ox_id, rx_id); - - return ret; -} - -u32 unf_send_logo_acc(struct unf_lport *lport, struct unf_rport *rport, - struct unf_xchg *xchg) -{ - struct unf_els_acc *logo_acc = NULL; - union unf_sfs_u *fc_entry = NULL; - u32 ret = UNF_RETURN_ERROR; - u16 ox_id = 0; - u16 rx_id = 0; - struct unf_frame_pkg pkg; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(rport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(xchg, UNF_RETURN_ERROR); - - memset(&pkg, 0, sizeof(struct unf_frame_pkg)); - - xchg->cmnd_code = UNF_SET_ELS_ACC_TYPE(ELS_LOGO); - xchg->did = rport->nport_id; - xchg->sid = lport->nport_id; - xchg->oid = xchg->sid; - xchg->lport = lport; - xchg->rport = rport; - xchg->callback = NULL; - xchg->ob_callback = unf_logo_acc_ob_callback; - - unf_fill_package(&pkg, xchg, rport); - pkg.type = UNF_PKG_ELS_REPLY; - fc_entry = xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr; - if (!fc_entry) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) entry can't be NULL with tag(0x%x)", - lport->port_id, xchg->hotpooltag); - - unf_cm_free_xchg(lport, xchg); - return UNF_RETURN_ERROR; - } - - memset(fc_entry, 0, sizeof(union unf_sfs_u)); - logo_acc = &fc_entry->els_acc; - unf_fill_els_acc_pld(logo_acc); - ox_id = xchg->oxid; - rx_id = xchg->rxid; - - ret = unf_ls_gs_cmnd_send(lport, &pkg, xchg); - if (ret != RETURN_OK) - unf_cm_free_xchg((void *)lport, (void *)xchg); - - if (rport->nport_id < UNF_FC_FID_DOM_MGR) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]LOGIN: LOGO ACC send %s. Port(0x%x)--->RPort(0x%x) with OX_ID(0x%x) RX_ID(0x%x)", - (ret != RETURN_OK) ? "failed" : "succeed", - lport->port_id, rport->nport_id, ox_id, rx_id); - } - - return ret; -} - -static u32 unf_send_rrq_acc(struct unf_lport *lport, struct unf_rport *rport, - struct unf_xchg *xchg) -{ - struct unf_els_acc *rrq_acc = NULL; - union unf_sfs_u *fc_entry = NULL; - u32 ret = UNF_RETURN_ERROR; - u16 ox_id = 0; - u16 rx_id = 0; - struct unf_frame_pkg pkg = {0}; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(rport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(xchg, UNF_RETURN_ERROR); - - xchg->did = rport->nport_id; - xchg->sid = lport->nport_id; - xchg->oid = xchg->sid; - xchg->lport = lport; - xchg->rport = rport; - xchg->callback = NULL; /* do noting */ - - fc_entry = xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr; - if (!fc_entry) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) entry can't be NULL with tag(0x%x)", - lport->port_id, xchg->hotpooltag); - - return UNF_RETURN_ERROR; - } - - memset(fc_entry, 0, sizeof(union unf_sfs_u)); - rrq_acc = &fc_entry->els_acc; - xchg->cmnd_code = UNF_SET_ELS_ACC_TYPE(ELS_RRQ); - xchg->ob_callback = unf_rrq_acc_ob_callback; /* do noting */ - unf_fill_els_acc_pld(rrq_acc); - ox_id = xchg->oxid; - rx_id = xchg->rxid; - - unf_fill_package(&pkg, xchg, rport); - pkg.type = UNF_PKG_ELS_REPLY; - ret = unf_ls_gs_cmnd_send(lport, &pkg, xchg); - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]RRQ ACC send %s. Port(0x%x)--->RPort(0x%x) with Xchg(0x%p) OX_ID(0x%x) RX_ID(0x%x)", - (ret != RETURN_OK) ? "failed" : "succeed", lport->port_id, - rport->nport_id, xchg, ox_id, rx_id); - - return ret; -} - -static void unf_fill_pdisc_acc_pld(struct unf_plogi_payload *pdisc_acc_pld, - struct unf_lport *lport) -{ - struct unf_lgn_parm *login_parms = NULL; - - FC_CHECK_RETURN_VOID(pdisc_acc_pld); - FC_CHECK_RETURN_VOID(lport); - - pdisc_acc_pld->cmnd = (UNF_ELS_CMND_ACC); - login_parms = &pdisc_acc_pld->stparms; - - if (lport->act_topo == UNF_ACT_TOP_P2P_FABRIC || - lport->act_topo == UNF_ACT_TOP_P2P_DIRECT) { - login_parms->co_parms.bb_credit = (unf_low_level_bb_credit(lport)); - login_parms->co_parms.alternate_bb_credit_mgmt = UNF_BBCREDIT_MANAGE_NFPORT; - login_parms->co_parms.bbscn = - (lport->act_topo == UNF_ACT_TOP_P2P_FABRIC) - ? 0 - : unf_low_level_bb_scn(lport); - } else { - login_parms->co_parms.bb_credit = UNF_BBCREDIT_LPORT; - login_parms->co_parms.alternate_bb_credit_mgmt = UNF_BBCREDIT_MANAGE_LPORT; - } - - login_parms->co_parms.lowest_version = UNF_PLOGI_VERSION_LOWER; - login_parms->co_parms.highest_version = UNF_PLOGI_VERSION_UPPER; - login_parms->co_parms.continuously_increasing = UNF_CONTIN_INCREASE_SUPPORT; - login_parms->co_parms.bb_receive_data_field_size = (lport->max_frame_size); - login_parms->co_parms.nport_total_concurrent_sequences = (UNF_PLOGI_CONCURRENT_SEQ); - login_parms->co_parms.relative_offset = (UNF_PLOGI_RO_CATEGORY); - login_parms->co_parms.e_d_tov = (lport->ed_tov); - - login_parms->cl_parms[ARRAY_INDEX_2].valid = UNF_CLASS_VALID; /* class-3 */ - login_parms->cl_parms[ARRAY_INDEX_2].received_data_field_size = (lport->max_frame_size); - login_parms->cl_parms[ARRAY_INDEX_2].concurrent_sequences = (UNF_PLOGI_CONCURRENT_SEQ); - login_parms->cl_parms[ARRAY_INDEX_2].open_sequence_per_exchange = (UNF_PLOGI_SEQ_PER_XCHG); - - login_parms->high_node_name = UNF_GET_NAME_HIGH_WORD(lport->node_name); - login_parms->low_node_name = UNF_GET_NAME_LOW_WORD(lport->node_name); - login_parms->high_port_name = UNF_GET_NAME_HIGH_WORD(lport->port_name); - login_parms->low_port_name = UNF_GET_NAME_LOW_WORD(lport->port_name); - - UNF_PRINT_SFS_LIMIT(UNF_INFO, lport->port_id, pdisc_acc_pld, - sizeof(struct unf_plogi_payload)); -} - -u32 unf_send_pdisc_acc(struct unf_lport *lport, struct unf_rport *rport, - struct unf_xchg *xchg) -{ - struct unf_plogi_payload *pdisc_acc_pld = NULL; - union unf_sfs_u *fc_entry = NULL; - u32 ret = UNF_RETURN_ERROR; - u16 ox_id = 0; - u16 rx_id = 0; - struct unf_frame_pkg pkg; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(rport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(xchg, UNF_RETURN_ERROR); - - memset(&pkg, 0, sizeof(struct unf_frame_pkg)); - - xchg->cmnd_code = UNF_SET_ELS_ACC_TYPE(ELS_PDISC); - xchg->did = rport->nport_id; - xchg->sid = lport->nport_id; - xchg->oid = xchg->sid; - xchg->lport = lport; - xchg->rport = rport; - - xchg->callback = NULL; - xchg->ob_callback = unf_pdisc_acc_ob_callback; - - unf_fill_package(&pkg, xchg, rport); - pkg.type = UNF_PKG_ELS_REPLY; - fc_entry = xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr; - if (!fc_entry) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) entry can't be NULL with tag(0x%x)", - lport->port_id, xchg->hotpooltag); - - unf_cm_free_xchg(lport, xchg); - return UNF_RETURN_ERROR; - } - - memset(fc_entry, 0, sizeof(union unf_sfs_u)); - pdisc_acc_pld = &fc_entry->pdisc_acc.payload; - unf_fill_pdisc_acc_pld(pdisc_acc_pld, lport); - ox_id = xchg->oxid; - rx_id = xchg->rxid; - - ret = unf_ls_gs_cmnd_send(lport, &pkg, xchg); - if (ret != RETURN_OK) - unf_cm_free_xchg((void *)lport, (void *)xchg); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]LOGIN: Send PDISC ACC %s. Port(0x%x)--->RPort(0x%x) with OX_ID(0x%x) RX_ID(0x%x)", - (ret != RETURN_OK) ? "failed" : "succeed", lport->port_id, - rport->nport_id, ox_id, rx_id); - - return ret; -} - -static void unf_fill_adisc_acc_pld(struct unf_adisc_payload *adisc_acc_pld, - struct unf_lport *lport) -{ - FC_CHECK_RETURN_VOID(adisc_acc_pld); - FC_CHECK_RETURN_VOID(lport); - - adisc_acc_pld->cmnd = (UNF_ELS_CMND_ACC); - - adisc_acc_pld->hard_address = (lport->nport_id & UNF_ALPA_MASK); - adisc_acc_pld->high_node_name = UNF_GET_NAME_HIGH_WORD(lport->node_name); - adisc_acc_pld->low_node_name = UNF_GET_NAME_LOW_WORD(lport->node_name); - adisc_acc_pld->high_port_name = UNF_GET_NAME_HIGH_WORD(lport->port_name); - adisc_acc_pld->low_port_name = UNF_GET_NAME_LOW_WORD(lport->port_name); - adisc_acc_pld->nport_id = lport->nport_id; - - UNF_PRINT_SFS_LIMIT(UNF_INFO, lport->port_id, adisc_acc_pld, - sizeof(struct unf_adisc_payload)); -} - -u32 unf_send_adisc_acc(struct unf_lport *lport, struct unf_rport *rport, - struct unf_xchg *xchg) -{ - struct unf_adisc_payload *adisc_acc_pld = NULL; - union unf_sfs_u *fc_entry = NULL; - u32 ret = UNF_RETURN_ERROR; - struct unf_frame_pkg pkg = {0}; - u16 ox_id = 0; - u16 rx_id = 0; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(rport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(xchg, UNF_RETURN_ERROR); - - xchg->cmnd_code = UNF_SET_ELS_ACC_TYPE(ELS_ADISC); - xchg->did = rport->nport_id; - xchg->sid = lport->nport_id; - xchg->oid = xchg->sid; - xchg->lport = lport; - xchg->rport = rport; - - xchg->callback = NULL; - xchg->ob_callback = unf_adisc_acc_ob_callback; - unf_fill_package(&pkg, xchg, rport); - pkg.type = UNF_PKG_ELS_REPLY; - fc_entry = xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr; - if (!fc_entry) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) entry can't be NULL with tag(0x%x)", - lport->port_id, xchg->hotpooltag); - - unf_cm_free_xchg(lport, xchg); - return UNF_RETURN_ERROR; - } - - memset(fc_entry, 0, sizeof(union unf_sfs_u)); - adisc_acc_pld = &fc_entry->adisc_acc.adisc_payl; - unf_fill_adisc_acc_pld(adisc_acc_pld, lport); - ox_id = xchg->oxid; - rx_id = xchg->rxid; - - ret = unf_ls_gs_cmnd_send(lport, &pkg, xchg); - if (ret != RETURN_OK) - unf_cm_free_xchg((void *)lport, (void *)xchg); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]LOGIN: Send ADISC ACC %s. Port(0x%x)--->RPort(0x%x) with OX_ID(0x%x) RX_ID(0x%x)", - (ret != RETURN_OK) ? "failed" : "succeed", lport->port_id, - rport->nport_id, ox_id, rx_id); - - return ret; -} - -static void unf_fill_prlo_acc_pld(struct unf_prli_prlo *prlo_acc, - struct unf_lport *lport) -{ - struct unf_prli_payload *prlo_acc_pld = NULL; - - FC_CHECK_RETURN_VOID(prlo_acc); - - prlo_acc_pld = &prlo_acc->payload; - prlo_acc_pld->cmnd = - (UNF_ELS_CMND_ACC | - ((u32)UNF_FC4_FRAME_PAGE_SIZE << UNF_FC4_FRAME_PAGE_SIZE_SHIFT) | - ((u32)sizeof(struct unf_prli_payload))); - prlo_acc_pld->parms[ARRAY_INDEX_0] = - (UNF_FC4_FRAME_PARM_0_FCP | UNF_FC4_FRAME_PARM_0_GOOD_RSP_CODE); - prlo_acc_pld->parms[ARRAY_INDEX_1] = 0; - prlo_acc_pld->parms[ARRAY_INDEX_2] = 0; - prlo_acc_pld->parms[ARRAY_INDEX_3] = 0; - - UNF_PRINT_SFS_LIMIT(UNF_INFO, lport->port_id, prlo_acc_pld, - sizeof(struct unf_prli_payload)); -} - -u32 unf_send_prlo_acc(struct unf_lport *lport, struct unf_rport *rport, - struct unf_xchg *xchg) -{ - struct unf_prli_prlo *prlo_acc = NULL; - union unf_sfs_u *fc_entry = NULL; - u32 ret = UNF_RETURN_ERROR; - u16 ox_id = 0; - u16 rx_id = 0; - struct unf_frame_pkg pkg; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(rport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(xchg, UNF_RETURN_ERROR); - - memset(&pkg, 0, sizeof(struct unf_frame_pkg)); - - xchg->cmnd_code = UNF_SET_ELS_ACC_TYPE(ELS_PRLO); - xchg->did = rport->nport_id; - xchg->sid = lport->nport_id; - xchg->oid = xchg->sid; - xchg->lport = lport; - xchg->rport = rport; - - xchg->callback = NULL; - xchg->ob_callback = NULL; - - unf_fill_package(&pkg, xchg, rport); - pkg.type = UNF_PKG_ELS_REPLY; - fc_entry = xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr; - if (!fc_entry) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) entry can't be NULL with tag(0x%x)", - lport->port_id, xchg->hotpooltag); - - unf_cm_free_xchg(lport, xchg); - return UNF_RETURN_ERROR; - } - - memset(fc_entry, 0, sizeof(union unf_sfs_u)); - prlo_acc = &fc_entry->prlo_acc; - unf_fill_prlo_acc_pld(prlo_acc, lport); - ox_id = xchg->oxid; - rx_id = xchg->rxid; - - ret = unf_ls_gs_cmnd_send(lport, &pkg, xchg); - if (ret != RETURN_OK) - unf_cm_free_xchg((void *)lport, (void *)xchg); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]LOGIN: Send PRLO ACC %s. Port(0x%x)--->RPort(0x%x) with OX_ID(0x%x) RX_ID(0x%x)", - (ret != RETURN_OK) ? "failed" : "succeed", lport->port_id, - rport->nport_id, ox_id, rx_id); - - return ret; -} - -static void unf_prli_acc_ob_callback(struct unf_xchg *xchg) -{ - /* Report R_Port scsi Link Up */ - struct unf_lport *unf_lport = NULL; - struct unf_rport *unf_rport = NULL; - ulong flags = 0; - enum unf_rport_login_state rport_state = UNF_RPORT_ST_INIT; - - FC_CHECK_RETURN_VOID(xchg); - unf_lport = xchg->lport; - unf_rport = xchg->rport; - FC_CHECK_RETURN_VOID(unf_lport); - FC_CHECK_RETURN_VOID(unf_rport); - - /* Update & Report Link Up */ - spin_lock_irqsave(&unf_rport->rport_state_lock, flags); - unf_rport_state_ma(unf_rport, UNF_EVENT_RPORT_READY); - rport_state = unf_rport->rp_state; - if (unf_rport->nport_id < UNF_FC_FID_DOM_MGR) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_KEVENT, - "[event]LOGIN: Port(0x%x) RPort(0x%x) state(0x%x) WWN(0x%llx) prliacc", - unf_lport->port_id, unf_rport->nport_id, - unf_rport->rp_state, unf_rport->port_name); - } - spin_unlock_irqrestore(&unf_rport->rport_state_lock, flags); - - if (rport_state == UNF_RPORT_ST_READY) { - unf_rport->logo_retries = 0; - unf_update_lport_state_by_linkup_event(unf_lport, unf_rport, - unf_rport->options); - } -} - -static void unf_schedule_open_work(struct unf_lport *lport, - struct unf_rport *rport) -{ - /* Used for L_Port port only with TGT, or R_Port only with INI */ - struct unf_lport *unf_lport = lport; - struct unf_rport *unf_rport = rport; - ulong delay = 0; - ulong flag = 0; - u32 ret = 0; - u32 port_feature = INVALID_VALUE32; - - FC_CHECK_RETURN_VOID(lport); - FC_CHECK_RETURN_VOID(rport); - - delay = (ulong)unf_lport->ed_tov; - port_feature = unf_rport->options & UNF_PORT_MODE_BOTH; - - if (unf_lport->options == UNF_PORT_MODE_TGT || - port_feature == UNF_PORT_MODE_INI) { - spin_lock_irqsave(&unf_rport->rport_state_lock, flag); - - ret = unf_rport_ref_inc(unf_rport); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x_0x%x) RPort(0x%x) abnormal, no need open", - unf_lport->port_id, unf_lport->nport_id, unf_rport->nport_id); - - spin_unlock_irqrestore(&unf_rport->rport_state_lock, flag); - return; - } - - /* Delay work pending check */ - if (delayed_work_pending(&unf_rport->open_work)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x_0x%x) RPort(0x%x) open work is running, no need re-open", - unf_lport->port_id, unf_lport->nport_id, - unf_rport->nport_id); - - spin_unlock_irqrestore(&unf_rport->rport_state_lock, flag); - unf_rport_ref_dec(unf_rport); - return; - } - - /* start open work */ - if (queue_delayed_work(unf_wq, &unf_rport->open_work, - (ulong)msecs_to_jiffies((u32)delay))) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x_0x%x) RPort(0x%x) start open work", - unf_lport->port_id, unf_lport->nport_id, unf_rport->nport_id); - - (void)unf_rport_ref_inc(unf_rport); - } - spin_unlock_irqrestore(&unf_rport->rport_state_lock, flag); - - unf_rport_ref_dec(unf_rport); - } -} - -static void unf_plogi_acc_ob_callback(struct unf_xchg *xchg) -{ - struct unf_lport *unf_lport = NULL; - struct unf_rport *unf_rport = NULL; - ulong flags = 0; - - FC_CHECK_RETURN_VOID(xchg); - - spin_lock_irqsave(&xchg->xchg_state_lock, flags); - unf_lport = xchg->lport; - unf_rport = xchg->rport; - spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); - - FC_CHECK_RETURN_VOID(unf_lport); - FC_CHECK_RETURN_VOID(unf_rport); - - /* - * 1. According to FC-LS 4.2.7.1: - * after RCVD PLOGI or sending PLOGI ACC, need to termitate open EXCH - */ - unf_cm_xchg_mgr_abort_io_by_id(unf_lport, unf_rport, - unf_rport->nport_id, unf_lport->nport_id, 0); - - /* 2. Send PLOGI ACC fail */ - if (xchg->ob_callback_sts != UNF_IO_SUCCESS) { - /* Do R_Port recovery */ - unf_rport_error_recovery(unf_rport); - - /* Do not care: Just used for L_Port only is TGT mode or R_Port - * only is INI mode - */ - unf_schedule_open_work(unf_lport, unf_rport); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]LOGIN: Port(0x%x_0x%x_0x%x) send PLOGI ACC failed(0x%x) with RPort(0x%x) feature(0x%x)", - unf_lport->port_id, unf_lport->nport_id, - unf_lport->options, xchg->ob_callback_sts, - unf_rport->nport_id, unf_rport->options); - - return; - } - - /* 3. Private Loop: check whether or not need to send PRLI */ - spin_lock_irqsave(&unf_rport->rport_state_lock, flags); - if (unf_lport->act_topo == UNF_ACT_TOP_PRIVATE_LOOP && - (unf_rport->rp_state == UNF_RPORT_ST_PRLI_WAIT || - unf_rport->rp_state == UNF_RPORT_ST_READY)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x_0x%x) RPort(0x%x) with State(0x%x) return directly", - unf_lport->port_id, unf_lport->nport_id, - unf_rport->nport_id, unf_rport->rp_state); - - spin_unlock_irqrestore(&unf_rport->rport_state_lock, flags); - return; - } - unf_rport_state_ma(unf_rport, UNF_EVENT_RPORT_ENTER_PRLI); - spin_unlock_irqrestore(&unf_rport->rport_state_lock, flags); - - /* 4. Set Port Feature with BOTH: cancel */ - if (unf_rport->options == UNF_PORT_MODE_UNKNOWN && unf_rport->port_name != INVALID_WWPN) - unf_rport->options = unf_get_port_feature(unf_rport->port_name); - - /* - * 5. Check whether need to send PRLI delay - * Call by: RCVD PLOGI ACC or callback for sending PLOGI ACC succeed - */ - unf_check_rport_need_delay_prli(unf_lport, unf_rport, unf_rport->options); - - /* 6. Do not care: Just used for L_Port only is TGT mode or R_Port only - * is INI mode - */ - unf_schedule_open_work(unf_lport, unf_rport); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]LOGIN: Port(0x%x_0x%x_0x%x) send PLOGI ACC succeed with RPort(0x%x) feature(0x%x)", - unf_lport->port_id, unf_lport->nport_id, unf_lport->options, - unf_rport->nport_id, unf_rport->options); -} - -static void unf_flogi_acc_ob_callback(struct unf_xchg *xchg) -{ - /* Callback for Sending FLOGI ACC succeed */ - struct unf_lport *unf_lport = NULL; - struct unf_rport *unf_rport = NULL; - ulong flags = 0; - u64 rport_port_name = 0; - u64 rport_node_name = 0; - - FC_CHECK_RETURN_VOID(xchg); - FC_CHECK_RETURN_VOID(xchg->lport); - FC_CHECK_RETURN_VOID(xchg->rport); - - spin_lock_irqsave(&xchg->xchg_state_lock, flags); - unf_lport = xchg->lport; - unf_rport = xchg->rport; - spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); - - spin_lock_irqsave(&unf_rport->rport_state_lock, flags); - if (unf_rport->port_name == 0 && unf_rport->node_name == 0) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]LOGIN: Port(0x%x_0x%x_0x%x) already send Plogi with RPort(0x%x) feature(0x%x).", - unf_lport->port_id, unf_lport->nport_id, unf_lport->options, - unf_rport->nport_id, unf_rport->options); - - spin_unlock_irqrestore(&unf_rport->rport_state_lock, flags); - return; - } - - rport_port_name = unf_rport->port_name; - rport_node_name = unf_rport->node_name; - - /* Swap case: Set WWPN & WWNN with zero */ - unf_rport->port_name = 0; - unf_rport->node_name = 0; - spin_unlock_irqrestore(&unf_rport->rport_state_lock, flags); - - /* Enter PLOGI stage: after send FLOGI ACC succeed */ - unf_login_with_rport_in_n2n(unf_lport, rport_port_name, rport_node_name); -} - -static void unf_rscn_acc_ob_callback(struct unf_xchg *xchg) -{ -} - -static void unf_logo_acc_ob_callback(struct unf_xchg *xchg) -{ -} - -static void unf_adisc_acc_ob_callback(struct unf_xchg *xchg) -{ -} - -static void unf_pdisc_acc_ob_callback(struct unf_xchg *xchg) -{ -} - -static inline u8 unf_determin_bbscn(u8 local_bbscn, u8 remote_bbscn) -{ - if (remote_bbscn == 0 || local_bbscn == 0) - local_bbscn = 0; - else - local_bbscn = local_bbscn > remote_bbscn ? local_bbscn : remote_bbscn; - - return local_bbscn; -} - -static void unf_cfg_lowlevel_fabric_params(struct unf_lport *lport, - struct unf_rport *rport, - struct unf_fabric_parm *login_parms) -{ - struct unf_port_login_parms login_co_parms = {0}; - u32 remote_edtov = 0; - u32 ret = 0; - u8 remote_edtov_resolution = 0; /* 0:ms; 1:ns */ - - if (!lport->low_level_func.port_mgr_op.ll_port_config_set) - return; - - login_co_parms.remote_rttov_tag = (u8)UNF_GET_RT_TOV_FROM_PARAMS(login_parms); - login_co_parms.remote_edtov_tag = 0; - login_co_parms.remote_bb_credit = (u16)UNF_GET_BB_CREDIT_FROM_PARAMS(login_parms); - login_co_parms.compared_bbscn = - (u32)unf_determin_bbscn((u8)lport->low_level_func.lport_cfg_items.bbscn, - (u8)UNF_GET_BB_SC_N_FROM_PARAMS(login_parms)); - - remote_edtov_resolution = (u8)UNF_GET_E_D_TOV_RESOLUTION_FROM_PARAMS(login_parms); - remote_edtov = UNF_GET_E_D_TOV_FROM_PARAMS(login_parms); - login_co_parms.compared_edtov_val = - remote_edtov_resolution ? (remote_edtov / UNF_OS_MS_TO_NS) - : remote_edtov; - - login_co_parms.compared_ratov_val = UNF_GET_RA_TOV_FROM_PARAMS(login_parms); - login_co_parms.els_cmnd_code = ELS_FLOGI; - - if (UNF_TOP_P2P_MASK & (u32)lport->act_topo) { - login_co_parms.act_topo = (login_parms->co_parms.nport == UNF_F_PORT) - ? UNF_ACT_TOP_P2P_FABRIC - : UNF_ACT_TOP_P2P_DIRECT; - } else { - login_co_parms.act_topo = lport->act_topo; - } - - ret = lport->low_level_func.port_mgr_op.ll_port_config_set((void *)lport->fc_port, - UNF_PORT_CFG_UPDATE_FABRIC_PARAM, (void *)&login_co_parms); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Lowlevel unsupport fabric config"); - } -} - -u32 unf_check_flogi_params(struct unf_lport *lport, struct unf_rport *rport, - struct unf_fabric_parm *fabric_parms) -{ - u32 ret = RETURN_OK; - u32 high_port_name; - u32 low_port_name; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(rport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(fabric_parms, UNF_RETURN_ERROR); - - if (fabric_parms->cl_parms[ARRAY_INDEX_2].valid == UNF_CLASS_INVALID) { - /* Discard directly */ - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) NPort_ID(0x%x) FLOGI not support class3", - lport->port_id, rport->nport_id); - - return UNF_RETURN_ERROR; - } - - high_port_name = UNF_GET_NAME_HIGH_WORD(lport->port_name); - low_port_name = UNF_GET_NAME_LOW_WORD(lport->port_name); - if (fabric_parms->high_port_name == high_port_name && - fabric_parms->low_port_name == low_port_name) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]The wwpn(0x%x%x) of lport(0x%x) is same as the wwpn of rport(0x%x)", - high_port_name, low_port_name, lport->port_id, rport->nport_id); - return UNF_RETURN_ERROR; - } - - return ret; -} - -static void unf_save_fabric_params(struct unf_lport *lport, - struct unf_rport *rport, - struct unf_fabric_parm *fabric_parms) -{ - u64 fabric_node_name = 0; - - FC_CHECK_RETURN_VOID(lport); - FC_CHECK_RETURN_VOID(rport); - FC_CHECK_RETURN_VOID(fabric_parms); - - fabric_node_name = (u64)(((u64)(fabric_parms->high_node_name) << UNF_SHIFT_32) | - ((u64)(fabric_parms->low_node_name))); - - /* R_Port for 0xfffffe is used for FLOGI, not need to save WWN */ - if (fabric_parms->co_parms.bb_receive_data_field_size > UNF_MAX_FRAME_SIZE) - rport->max_frame_size = UNF_MAX_FRAME_SIZE; /* 2112 */ - else - rport->max_frame_size = fabric_parms->co_parms.bb_receive_data_field_size; - - /* with Fabric attribute */ - if (fabric_parms->co_parms.nport == UNF_F_PORT) { - rport->ed_tov = fabric_parms->co_parms.e_d_tov; - rport->ra_tov = fabric_parms->co_parms.r_a_tov; - lport->ed_tov = fabric_parms->co_parms.e_d_tov; - lport->ra_tov = fabric_parms->co_parms.r_a_tov; - lport->fabric_node_name = fabric_node_name; - } - - /* Configure info from FLOGI to chip */ - unf_cfg_lowlevel_fabric_params(lport, rport, fabric_parms); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "[info]Port(0x%x) Rport(0x%x) login parameter: E_D_TOV = %u. LPort E_D_TOV = %u. fabric nodename: 0x%x%x", - lport->port_id, rport->nport_id, (fabric_parms->co_parms.e_d_tov), - lport->ed_tov, fabric_parms->high_node_name, fabric_parms->low_node_name); -} - -u32 unf_flogi_handler(struct unf_lport *lport, u32 sid, struct unf_xchg *xchg) -{ - struct unf_rport *unf_rport = NULL; - struct unf_flogi_fdisc_acc *flogi_frame = NULL; - struct unf_fabric_parm *fabric_login_parms = NULL; - u32 ret = UNF_RETURN_ERROR; - ulong flag = 0; - u64 wwpn = 0; - u64 wwnn = 0; - enum unf_act_topo unf_active_topo; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(xchg, UNF_RETURN_ERROR); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[info]LOGIN: Port(0x%x)<---RPort(0x%x) Receive FLOGI with OX_ID(0x%x)", - lport->port_id, sid, xchg->oxid); - - UNF_SERVICE_COLLECT(lport->link_service_info, UNF_SERVICE_ITEM_FLOGI); - - /* Check L_Port state: Offline */ - if (lport->states >= UNF_LPORT_ST_OFFLINE) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) with state(0x%x) not need to handle FLOGI", - lport->port_id, lport->states); - - unf_cm_free_xchg(lport, xchg); - return ret; - } - - flogi_frame = &xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->flogi; - fabric_login_parms = &flogi_frame->flogi_payload.fabric_parms; - UNF_PRINT_SFS_LIMIT(UNF_INFO, lport->port_id, &flogi_frame->flogi_payload, - sizeof(struct unf_flogi_fdisc_payload)); - wwpn = (u64)(((u64)(fabric_login_parms->high_port_name) << UNF_SHIFT_32) | - ((u64)fabric_login_parms->low_port_name)); - wwnn = (u64)(((u64)(fabric_login_parms->high_node_name) << UNF_SHIFT_32) | - ((u64)fabric_login_parms->low_node_name)); - - /* Get (new) R_Port: reuse only */ - unf_rport = unf_get_rport_by_nport_id(lport, UNF_FC_FID_FLOGI); - unf_rport = unf_get_safe_rport(lport, unf_rport, UNF_RPORT_REUSE_ONLY, UNF_FC_FID_FLOGI); - if (unlikely(!unf_rport)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) has no RPort. do nothing", lport->port_id); - - unf_cm_free_xchg(lport, xchg); - return UNF_RETURN_ERROR; - } - - /* Update R_Port info */ - spin_lock_irqsave(&unf_rport->rport_state_lock, flag); - unf_rport->port_name = wwpn; - unf_rport->node_name = wwnn; - spin_unlock_irqrestore(&unf_rport->rport_state_lock, flag); - - /* Check RCVD FLOGI parameters: only for class-3 */ - ret = unf_check_flogi_params(lport, unf_rport, fabric_login_parms); - if (ret != RETURN_OK) { - /* Discard directly */ - unf_cm_free_xchg(lport, xchg); - return UNF_RETURN_ERROR; - } - - /* Save fabric parameters */ - unf_save_fabric_params(lport, unf_rport, fabric_login_parms); - - if ((u32)lport->act_topo & UNF_TOP_P2P_MASK) { - unf_active_topo = - (fabric_login_parms->co_parms.nport == UNF_F_PORT) - ? UNF_ACT_TOP_P2P_FABRIC - : UNF_ACT_TOP_P2P_DIRECT; - unf_lport_update_topo(lport, unf_active_topo); - } - /* Send ACC for FLOGI */ - ret = unf_send_flogi_acc(lport, unf_rport, xchg); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]LOGIN: Port(0x%x) send FLOGI ACC failed and do recover", - lport->port_id); - - /* Do L_Port recovery */ - unf_lport_error_recovery(lport); - } - - return ret; -} - -static void unf_cfg_lowlevel_port_params(struct unf_lport *lport, - struct unf_rport *rport, - struct unf_lgn_parm *login_parms, - u32 cmd_type) -{ - struct unf_port_login_parms login_co_parms = {0}; - u32 ret = 0; - - if (!lport->low_level_func.port_mgr_op.ll_port_config_set) - return; - - login_co_parms.rport_index = rport->rport_index; - login_co_parms.seq_cnt = 0; - login_co_parms.ed_tov = 0; /* ms */ - login_co_parms.ed_tov_timer_val = lport->ed_tov; - login_co_parms.tx_mfs = rport->max_frame_size; - - login_co_parms.remote_rttov_tag = (u8)UNF_GET_RT_TOV_FROM_PARAMS(login_parms); - login_co_parms.remote_edtov_tag = 0; - login_co_parms.remote_bb_credit = (u16)UNF_GET_BB_CREDIT_FROM_PARAMS(login_parms); - login_co_parms.els_cmnd_code = cmd_type; - - if (lport->act_topo == UNF_ACT_TOP_PRIVATE_LOOP) { - login_co_parms.compared_bbscn = 0; - } else { - login_co_parms.compared_bbscn = - (u32)unf_determin_bbscn((u8)lport->low_level_func.lport_cfg_items.bbscn, - (u8)UNF_GET_BB_SC_N_FROM_PARAMS(login_parms)); - } - - login_co_parms.compared_edtov_val = lport->ed_tov; - login_co_parms.compared_ratov_val = lport->ra_tov; - - ret = lport->low_level_func.port_mgr_op.ll_port_config_set((void *)lport->fc_port, - UNF_PORT_CFG_UPDATE_PLOGI_PARAM, (void *)&login_co_parms); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) Lowlevel unsupport port config", lport->port_id); - } -} - -u32 unf_check_plogi_params(struct unf_lport *lport, struct unf_rport *rport, - struct unf_lgn_parm *login_parms) -{ - u32 ret = RETURN_OK; - ulong flag = 0; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(rport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(login_parms, UNF_RETURN_ERROR); - - /* Parameters check: Class-type */ - if (login_parms->cl_parms[ARRAY_INDEX_2].valid == UNF_CLASS_INVALID || - login_parms->co_parms.bb_receive_data_field_size == 0) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) RPort N_Port_ID(0x%x) with PLOGI parameters invalid: class3(%u), BBReceiveDataFieldSize(0x%x), send LOGO", - lport->port_id, rport->nport_id, - login_parms->cl_parms[ARRAY_INDEX_2].valid, - login_parms->co_parms.bb_receive_data_field_size); - - spin_lock_irqsave(&rport->rport_state_lock, flag); - unf_rport_state_ma(rport, UNF_EVENT_RPORT_LOGO); /* --->>> LOGO */ - spin_unlock_irqrestore(&rport->rport_state_lock, flag); - - /* Enter LOGO stage */ - unf_rport_enter_logo(lport, rport); - return UNF_RETURN_ERROR; - } - - /* 16G FC Brocade SW, Domain Controller's PLOGI both support CLASS-1 & - * CLASS-2 - */ - if (login_parms->cl_parms[ARRAY_INDEX_0].valid == UNF_CLASS_VALID || - login_parms->cl_parms[ARRAY_INDEX_1].valid == UNF_CLASS_VALID) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "[info]Port(0x%x) get PLOGI class1(%u) class2(%u) from N_Port_ID(0x%x)", - lport->port_id, login_parms->cl_parms[ARRAY_INDEX_0].valid, - login_parms->cl_parms[ARRAY_INDEX_1].valid, rport->nport_id); - } - - return ret; -} - -static void unf_save_plogi_params(struct unf_lport *lport, - struct unf_rport *rport, - struct unf_lgn_parm *login_parms, - u32 cmd_code) -{ -#define UNF_DELAY_TIME 100 /* WWPN smaller delay to send PRLI with COM mode */ - - u64 wwpn = INVALID_VALUE64; - u64 wwnn = INVALID_VALUE64; - u32 ed_tov = 0; - u32 remote_edtov = 0; - - if (login_parms->co_parms.bb_receive_data_field_size > UNF_MAX_FRAME_SIZE) - rport->max_frame_size = UNF_MAX_FRAME_SIZE; /* 2112 */ - else - rport->max_frame_size = login_parms->co_parms.bb_receive_data_field_size; - - wwnn = (u64)(((u64)(login_parms->high_node_name) << UNF_SHIFT_32) | - ((u64)login_parms->low_node_name)); - wwpn = (u64)(((u64)(login_parms->high_port_name) << UNF_SHIFT_32) | - ((u64)login_parms->low_port_name)); - - remote_edtov = login_parms->co_parms.e_d_tov; - ed_tov = login_parms->co_parms.e_d_tov_resolution - ? (remote_edtov / UNF_OS_MS_TO_NS) - : remote_edtov; - - rport->port_name = wwpn; - rport->node_name = wwnn; - rport->local_nport_id = lport->nport_id; - - if (lport->act_topo == UNF_ACT_TOP_P2P_DIRECT || - lport->act_topo == UNF_ACT_TOP_PRIVATE_LOOP) { - /* P2P or Private Loop or FCoE VN2VN */ - lport->ed_tov = (lport->ed_tov > ed_tov) ? lport->ed_tov : ed_tov; - lport->ra_tov = 2 * lport->ed_tov; /* 2 * E_D_TOV */ - - if (ed_tov != 0) - rport->ed_tov = ed_tov; - else - rport->ed_tov = UNF_DEFAULT_EDTOV; - } else { - /* SAN: E_D_TOV updated by FLOGI */ - rport->ed_tov = lport->ed_tov; - } - - /* WWPN smaller: delay to send PRLI */ - if (rport->port_name > lport->port_name) - rport->ed_tov += UNF_DELAY_TIME; /* 100ms */ - - /* Configure port parameters to low level (chip) */ - unf_cfg_lowlevel_port_params(lport, rport, login_parms, cmd_code); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "[info]Port(0x%x) RPort(0x%x) with WWPN(0x%llx) WWNN(0x%llx) login: ED_TOV(%u) Port: ED_TOV(%u)", - lport->port_id, rport->nport_id, rport->port_name, rport->node_name, - ed_tov, lport->ed_tov); -} - -static bool unf_check_bbscn_is_enabled(u8 local_bbscn, u8 remote_bbscn) -{ - return unf_determin_bbscn(local_bbscn, remote_bbscn) ? true : false; -} - -static u32 unf_irq_process_switch2thread(void *lport, struct unf_xchg *xchg, - unf_event_task evt_task) -{ - struct unf_cm_event_report *event = NULL; - struct unf_xchg *unf_xchg = NULL; - u32 ret = 0; - struct unf_lport *unf_lport = NULL; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(xchg, UNF_RETURN_ERROR); - unf_lport = lport; - unf_xchg = xchg; - - if (unlikely(!unf_lport->event_mgr.unf_get_free_event_func || - !unf_lport->event_mgr.unf_post_event_func || - !unf_lport->event_mgr.unf_release_event)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) event function is NULL", - unf_lport->port_id); - - return UNF_RETURN_ERROR; - } - - ret = unf_xchg_ref_inc(unf_xchg, SFS_RESPONSE); - FC_CHECK_RETURN_VALUE((ret == RETURN_OK), UNF_RETURN_ERROR); - - event = unf_lport->event_mgr.unf_get_free_event_func((void *)lport); - FC_CHECK_RETURN_VALUE(event, UNF_RETURN_ERROR); - - event->lport = unf_lport; - event->event_asy_flag = UNF_EVENT_ASYN; - event->unf_event_task = evt_task; - event->para_in = xchg; - unf_lport->event_mgr.unf_post_event_func(unf_lport, event); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x) start to switch thread process now", - unf_lport->port_id); - - return ret; -} - -u32 unf_plogi_handler_com_process(struct unf_xchg *xchg) -{ - struct unf_xchg *unf_xchg = xchg; - struct unf_lport *unf_lport = NULL; - struct unf_rport *unf_rport = NULL; - struct unf_plogi_pdisc *plogi_frame = NULL; - struct unf_lgn_parm *login_parms = NULL; - u32 ret = UNF_RETURN_ERROR; - ulong flag = 0; - - FC_CHECK_RETURN_VALUE(unf_xchg, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(unf_xchg->lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(unf_xchg->rport, UNF_RETURN_ERROR); - - unf_lport = unf_xchg->lport; - unf_rport = unf_xchg->rport; - plogi_frame = &unf_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->plogi; - login_parms = &plogi_frame->payload.stparms; - - unf_save_plogi_params(unf_lport, unf_rport, login_parms, ELS_PLOGI); - - /* Update state: PLOGI_WAIT */ - spin_lock_irqsave(&unf_rport->rport_state_lock, flag); - unf_rport->nport_id = unf_xchg->sid; - unf_rport_state_ma(unf_rport, UNF_EVENT_RPORT_ENTER_PLOGI); - spin_unlock_irqrestore(&unf_rport->rport_state_lock, flag); - - /* Send PLOGI ACC to remote port */ - ret = unf_send_plogi_acc(unf_lport, unf_rport, unf_xchg); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]LOGIN: Port(0x%x) send PLOGI ACC failed", - unf_lport->port_id); - - /* NOTE: exchange has been freed inner(before) */ - unf_rport_error_recovery(unf_rport); - return ret; - } - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "[info]LOGIN: Port(0x%x) send PLOGI ACC to Port(0x%x) succeed", - unf_lport->port_id, unf_rport->nport_id); - - return ret; -} - -int unf_plogi_async_handle(void *argc_in, void *argc_out) -{ - struct unf_xchg *xchg = (struct unf_xchg *)argc_in; - u32 ret = RETURN_OK; - - FC_CHECK_RETURN_VALUE(xchg, UNF_RETURN_ERROR); - - ret = unf_plogi_handler_com_process(xchg); - - unf_xchg_ref_dec(xchg, SFS_RESPONSE); - - return (int)ret; -} - -u32 unf_plogi_handler(struct unf_lport *lport, u32 sid, struct unf_xchg *xchg) -{ - struct unf_xchg *unf_xchg = xchg; - struct unf_lport *unf_lport = lport; - struct unf_rport *unf_rport = NULL; - struct unf_plogi_pdisc *plogi_frame = NULL; - struct unf_lgn_parm *login_parms = NULL; - struct unf_rjt_info rjt_info = {0}; - u64 wwpn = INVALID_VALUE64; - u32 ret = UNF_RETURN_ERROR; - bool bbscn_enabled = false; - bool switch2thread = false; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(xchg, UNF_RETURN_ERROR); - - /* 1. Maybe: PLOGI is sent by Name server */ - if (sid < UNF_FC_FID_DOM_MGR || - lport->act_topo == UNF_ACT_TOP_P2P_DIRECT) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]LOGIN: Receive PLOGI. Port(0x%x_0x%x)<---RPort(0x%x) with OX_ID(0x%x)", - lport->port_id, lport->nport_id, sid, xchg->oxid); - } - - UNF_SERVICE_COLLECT(lport->link_service_info, UNF_SERVICE_ITEM_PLOGI); - - /* 2. State check: Offline */ - if (unf_lport->states >= UNF_LPORT_ST_OFFLINE) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x_0x%x) received PLOGI with state(0x%x)", - unf_lport->port_id, unf_lport->nport_id, unf_lport->states); - - unf_cm_free_xchg(unf_lport, unf_xchg); - return UNF_RETURN_ERROR; - } - - /* Get R_Port by WWpn */ - plogi_frame = &unf_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->plogi; - login_parms = &plogi_frame->payload.stparms; - - UNF_PRINT_SFS_LIMIT(UNF_INFO, unf_lport->port_id, &plogi_frame->payload, - sizeof(struct unf_plogi_payload)); - - wwpn = (u64)(((u64)(login_parms->high_port_name) << UNF_SHIFT_32) | - ((u64)login_parms->low_port_name)); - - /* 3. Get (new) R_Port (by wwpn) */ - unf_rport = unf_find_rport(unf_lport, sid, wwpn); - unf_rport = unf_get_safe_rport(unf_lport, unf_rport, UNF_RPORT_REUSE_ONLY, sid); - if (!unf_rport) { - memset(&rjt_info, 0, sizeof(struct unf_rjt_info)); - rjt_info.els_cmnd_code = ELS_PLOGI; - rjt_info.reason_code = UNF_LS_RJT_BUSY; - rjt_info.reason_explanation = UNF_LS_RJT_INSUFFICIENT_RESOURCES; - - /* R_Port is NULL: Send ELS RJT for PLOGI */ - (void)unf_send_els_rjt_by_did(unf_lport, unf_xchg, sid, &rjt_info); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) has no RPort and send PLOGI reject", - unf_lport->port_id); - return RETURN_OK; - } - - /* - * 4. According to FC-LS 4.2.7.1: - * After RCVD PLogi or send Plogi ACC, need to termitate open EXCH - */ - unf_cm_xchg_mgr_abort_io_by_id(unf_lport, unf_rport, sid, unf_lport->nport_id, 0); - - /* 5. Cancel recovery timer work after RCVD PLOGI */ - if (cancel_delayed_work(&unf_rport->recovery_work)) - atomic_dec(&unf_rport->rport_ref_cnt); - - /* - * 6. Plogi parameters check - * Call by: (RCVD) PLOGI handler & callback function for RCVD PLOGI_ACC - */ - ret = unf_check_plogi_params(unf_lport, unf_rport, login_parms); - if (ret != RETURN_OK) { - unf_cm_free_xchg(unf_lport, unf_xchg); - return UNF_RETURN_ERROR; - } - - unf_xchg->lport = lport; - unf_xchg->rport = unf_rport; - unf_xchg->sid = sid; - - /* 7. About bbscn for context change */ - bbscn_enabled = - unf_check_bbscn_is_enabled((u8)unf_lport->low_level_func.lport_cfg_items.bbscn, - (u8)UNF_GET_BB_SC_N_FROM_PARAMS(login_parms)); - if (unf_lport->act_topo == UNF_ACT_TOP_P2P_DIRECT && bbscn_enabled) { - switch2thread = true; - unf_lport->bbscn_support = true; - } - - /* 8. Process PLOGI Frame: switch to thread if necessary */ - if (switch2thread && unf_lport->root_lport == unf_lport) { - /* Wait for LR complete sync */ - ret = unf_irq_process_switch2thread(unf_lport, unf_xchg, unf_plogi_async_handle); - } else { - ret = unf_plogi_handler_com_process(unf_xchg); - } - - return ret; -} - -static void unf_obtain_tape_capacity(struct unf_lport *lport, - struct unf_rport *rport, u32 tape_parm) -{ - u32 rec_support = 0; - u32 task_retry_support = 0; - u32 retry_support = 0; - - rec_support = tape_parm & UNF_FC4_FRAME_PARM_3_REC_SUPPORT; - task_retry_support = - tape_parm & UNF_FC4_FRAME_PARM_3_TASK_RETRY_ID_SUPPORT; - retry_support = tape_parm & UNF_FC4_FRAME_PARM_3_RETRY_SUPPORT; - - if (lport->low_level_func.lport_cfg_items.tape_support && - rec_support && task_retry_support && retry_support) { - rport->tape_support_needed = true; - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x_0x%x) FC_tape is needed for RPort(0x%x)", - lport->port_id, lport->nport_id, rport->nport_id); - } - - if ((tape_parm & UNF_FC4_FRAME_PARM_3_CONF_ALLOW) && - lport->low_level_func.lport_cfg_items.fcp_conf) { - rport->fcp_conf_needed = true; - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x_0x%x) FCP confirm is needed for RPort(0x%x)", - lport->port_id, lport->nport_id, rport->nport_id); - } -} - -static u32 unf_prli_handler_com_process(struct unf_xchg *xchg) -{ - struct unf_prli_prlo *prli = NULL; - u32 ret = UNF_RETURN_ERROR; - ulong flags = 0; - u32 sid = 0; - struct unf_lport *unf_lport = NULL; - struct unf_rport *unf_rport = NULL; - struct unf_xchg *unf_xchg = NULL; - - unf_xchg = xchg; - FC_CHECK_RETURN_VALUE(unf_xchg->lport, UNF_RETURN_ERROR); - unf_lport = unf_xchg->lport; - sid = xchg->sid; - - UNF_SERVICE_COLLECT(unf_lport->link_service_info, UNF_SERVICE_ITEM_PRLI); - - /* 1. Get R_Port: for each R_Port from rport_busy_list */ - unf_rport = unf_get_rport_by_nport_id(unf_lport, sid); - if (!unf_rport) { - /* non session (R_Port) existence */ - (void)unf_send_logo_by_did(unf_lport, sid); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x_0x%x) received PRLI but no RPort SID(0x%x) OX_ID(0x%x)", - unf_lport->port_id, unf_lport->nport_id, sid, xchg->oxid); - - unf_cm_free_xchg(unf_lport, xchg); - return ret; - } - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "[info]LOGIN: Receive PRLI. Port(0x%x)<---RPort(0x%x) with S_ID(0x%x)", - unf_lport->port_id, unf_rport->nport_id, sid); - - /* 2. Get PRLI info */ - prli = &xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->prli; - if (sid < UNF_FC_FID_DOM_MGR || unf_lport->act_topo == UNF_ACT_TOP_P2P_DIRECT) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]LOGIN: Receive PRLI. Port(0x%x_0x%x)<---RPort(0x%x) parameter-3(0x%x) OX_ID(0x%x)", - unf_lport->port_id, unf_lport->nport_id, sid, - prli->payload.parms[ARRAY_INDEX_3], xchg->oxid); - } - - UNF_PRINT_SFS_LIMIT(UNF_INFO, unf_lport->port_id, &prli->payload, - sizeof(struct unf_prli_payload)); - - spin_lock_irqsave(&unf_rport->rport_state_lock, flags); - - /* 3. Increase R_Port ref_cnt */ - ret = unf_rport_ref_inc(unf_rport); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) RPort(0x%x_0x%p) is removing and do nothing", - unf_lport->port_id, unf_rport->nport_id, unf_rport); - - spin_unlock_irqrestore(&unf_rport->rport_state_lock, flags); - - unf_cm_free_xchg(unf_lport, xchg); - return RETURN_ERROR; - } - - /* 4. Cancel R_Port Open work */ - if (cancel_delayed_work(&unf_rport->open_work)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x_0x%x) RPort(0x%x) cancel open work succeed", - unf_lport->port_id, unf_lport->nport_id, unf_rport->nport_id); - - /* This is not the last counter */ - atomic_dec(&unf_rport->rport_ref_cnt); - } - - /* 5. Check R_Port state */ - if (unf_rport->rp_state != UNF_RPORT_ST_PRLI_WAIT && - unf_rport->rp_state != UNF_RPORT_ST_READY) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x_0x%x) RPort(0x%x) with state(0x%x) when received PRLI, send LOGO", - unf_lport->port_id, unf_lport->nport_id, - unf_rport->nport_id, unf_rport->rp_state); - - unf_rport_state_ma(unf_rport, UNF_EVENT_RPORT_LOGO); - spin_unlock_irqrestore(&unf_rport->rport_state_lock, flags); - - /* NOTE: Start to send LOGO */ - unf_rport_enter_logo(unf_lport, unf_rport); - - unf_cm_free_xchg(unf_lport, xchg); - unf_rport_ref_dec(unf_rport); - - return RETURN_ERROR; - } - - spin_unlock_irqrestore(&unf_rport->rport_state_lock, flags); - - /* 6. Update R_Port options(INI/TGT/BOTH) */ - unf_rport->options = - prli->payload.parms[ARRAY_INDEX_3] & - (UNF_FC4_FRAME_PARM_3_TGT | UNF_FC4_FRAME_PARM_3_INI); - - unf_update_port_feature(unf_rport->port_name, unf_rport->options); - - /* for Confirm */ - unf_rport->fcp_conf_needed = false; - - unf_obtain_tape_capacity(unf_lport, unf_rport, prli->payload.parms[ARRAY_INDEX_3]); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "[info]Port(0x%x_0x%x) RPort(0x%x) parameter-3(0x%x) options(0x%x)", - unf_lport->port_id, unf_lport->nport_id, unf_rport->nport_id, - prli->payload.parms[ARRAY_INDEX_3], unf_rport->options); - - /* 7. Send PRLI ACC */ - ret = unf_send_prli_acc(unf_lport, unf_rport, xchg); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]LOGIN: Port(0x%x_0x%x) RPort(0x%x) send PRLI ACC failed", - unf_lport->port_id, unf_lport->nport_id, unf_rport->nport_id); - - /* NOTE: exchange has been freed inner(before) */ - unf_rport_error_recovery(unf_rport); - } - - /* 8. Decrease R_Port ref_cnt */ - unf_rport_ref_dec(unf_rport); - - return ret; -} - -int unf_prli_async_handle(void *argc_in, void *argc_out) -{ - struct unf_xchg *xchg = (struct unf_xchg *)argc_in; - u32 ret = RETURN_OK; - - FC_CHECK_RETURN_VALUE(xchg, UNF_RETURN_ERROR); - - ret = unf_prli_handler_com_process(xchg); - - unf_xchg_ref_dec(xchg, SFS_RESPONSE); - - return (int)ret; -} - -u32 unf_prli_handler(struct unf_lport *lport, u32 sid, struct unf_xchg *xchg) -{ - u32 ret = UNF_RETURN_ERROR; - bool switch2thread = false; - struct unf_lport *unf_lport = NULL; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(xchg, UNF_RETURN_ERROR); - - xchg->sid = sid; - xchg->lport = lport; - unf_lport = lport; - - if (lport->bbscn_support && - lport->act_topo == UNF_ACT_TOP_P2P_DIRECT) - switch2thread = true; - - if (switch2thread && unf_lport->root_lport == unf_lport) { - /* Wait for LR done sync */ - ret = unf_irq_process_switch2thread(lport, xchg, unf_prli_async_handle); - } else { - ret = unf_prli_handler_com_process(xchg); - } - - return ret; -} - -static void unf_save_rscn_port_id(struct unf_rscn_mgr *rscn_mg, - struct unf_rscn_port_id_page *rscn_port_id) -{ - struct unf_port_id_page *exit_port_id_page = NULL; - struct unf_port_id_page *new_port_id_page = NULL; - struct list_head *node = NULL; - struct list_head *next_node = NULL; - ulong flag = 0; - bool is_repeat = false; - - FC_CHECK_RETURN_VOID(rscn_mg); - FC_CHECK_RETURN_VOID(rscn_port_id); - - /* 1. check new RSCN Port_ID (RSNC_Page) whether within RSCN_Mgr or not - */ - spin_lock_irqsave(&rscn_mg->rscn_id_list_lock, flag); - if (list_empty(&rscn_mg->list_using_rscn_page)) { - is_repeat = false; - } else { - /* Check repeat: for each exist RSCN page form RSCN_Mgr Page - * list - */ - list_for_each_safe(node, next_node, &rscn_mg->list_using_rscn_page) { - exit_port_id_page = list_entry(node, struct unf_port_id_page, - list_node_rscn); - if (exit_port_id_page->port_id_port == rscn_port_id->port_id_port && - exit_port_id_page->port_id_area == rscn_port_id->port_id_area && - exit_port_id_page->port_id_domain == rscn_port_id->port_id_domain) { - is_repeat = true; - break; - } - } - } - spin_unlock_irqrestore(&rscn_mg->rscn_id_list_lock, flag); - - FC_CHECK_RETURN_VOID(rscn_mg->unf_get_free_rscn_node); - - /* 2. Get & add free RSNC Node --->>> RSCN_Mgr */ - if (!is_repeat) { - new_port_id_page = rscn_mg->unf_get_free_rscn_node(rscn_mg); - if (!new_port_id_page) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, - UNF_ERR, "[err]Get free RSCN node failed"); - - return; - } - - new_port_id_page->addr_format = rscn_port_id->addr_format; - new_port_id_page->event_qualifier = rscn_port_id->event_qualifier; - new_port_id_page->reserved = rscn_port_id->reserved; - new_port_id_page->port_id_domain = rscn_port_id->port_id_domain; - new_port_id_page->port_id_area = rscn_port_id->port_id_area; - new_port_id_page->port_id_port = rscn_port_id->port_id_port; - - /* Add entry to list: using_rscn_page */ - spin_lock_irqsave(&rscn_mg->rscn_id_list_lock, flag); - list_add_tail(&new_port_id_page->list_node_rscn, &rscn_mg->list_using_rscn_page); - spin_unlock_irqrestore(&rscn_mg->rscn_id_list_lock, flag); - } else { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x) has repeat RSCN node with domain(0x%x) area(0x%x)", - rscn_port_id->port_id_domain, rscn_port_id->port_id_area, - rscn_port_id->port_id_port); - } -} - -static u32 unf_analysis_rscn_payload(struct unf_lport *lport, - struct unf_rscn_pld *rscn_pld) -{ -#define UNF_OS_DISC_REDISC_TIME 10000 - - struct unf_rscn_port_id_page *rscn_port_id = NULL; - struct unf_disc *disc = NULL; - struct unf_rscn_mgr *rscn_mgr = NULL; - u32 index = 0; - u32 pld_len = 0; - u32 port_id_page_cnt = 0; - u32 ret = RETURN_OK; - ulong flag = 0; - bool eb_need_disc_flag = false; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(rscn_pld, UNF_RETURN_ERROR); - - /* This field is the length in bytes of the entire Payload, inclusive of - * the word 0 - */ - pld_len = UNF_GET_RSCN_PLD_LEN(rscn_pld->cmnd); - pld_len -= sizeof(rscn_pld->cmnd); - port_id_page_cnt = pld_len / UNF_RSCN_PAGE_LEN; - - /* Pages within payload is nor more than 255 */ - if (port_id_page_cnt > UNF_RSCN_PAGE_SUM) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x_0x%x) page num(0x%x) exceed 255 in RSCN", - lport->port_id, lport->nport_id, port_id_page_cnt); - - return UNF_RETURN_ERROR; - } - - /* L_Port-->Disc-->Rscn_Mgr */ - disc = &lport->disc; - rscn_mgr = &disc->rscn_mgr; - - /* for each ID from RSCN_Page: check whether need to Disc or not */ - while (index < port_id_page_cnt) { - rscn_port_id = &rscn_pld->port_id_page[index]; - if (unf_lookup_lport_by_nportid(lport, *(u32 *)rscn_port_id)) { - /* Prevent to create session with L_Port which have the - * same N_Port_ID - */ - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "[info]Port(0x%x) find local N_Port_ID(0x%x) within RSCN payload", - ((struct unf_lport *)(lport->root_lport))->nport_id, - *(u32 *)rscn_port_id); - } else { - /* New RSCN_Page ID find, save it to RSCN_Mgr */ - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "[info]Port(0x%x_0x%x) save RSCN N_Port_ID(0x%x)", - lport->port_id, lport->nport_id, - *(u32 *)rscn_port_id); - - /* 1. new RSCN_Page ID find, save it to RSCN_Mgr */ - unf_save_rscn_port_id(rscn_mgr, rscn_port_id); - eb_need_disc_flag = true; - } - index++; - } - - if (!eb_need_disc_flag) { - FC_DRV_PRINT(UNF_LOG_NORMAL, UNF_MAJOR, - "[info]Port(0x%x) find all N_Port_ID and do not need to disc", - ((struct unf_lport *)(lport->root_lport))->nport_id); - - return RETURN_OK; - } - - /* 2. Do/Start Disc: Check & do Disc (GID_PT) process */ - if (!disc->disc_temp.unf_disc_start) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x_0x%x) DISC start function is NULL", - lport->nport_id, lport->nport_id); - - return UNF_RETURN_ERROR; - } - - spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); - if (disc->states == UNF_DISC_ST_END || - ((jiffies - disc->last_disc_jiff) > msecs_to_jiffies(UNF_OS_DISC_REDISC_TIME))) { - disc->disc_option = UNF_RSCN_DISC; - disc->last_disc_jiff = jiffies; - spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); - - ret = disc->disc_temp.unf_disc_start(lport); - } else { - FC_DRV_PRINT(UNF_LOG_ABNORMAL, UNF_INFO, - "[info]Port(0x%x_0x%x) DISC state(0x%x) with last time(%llu) and don't do DISC", - lport->port_id, lport->nport_id, disc->states, - disc->last_disc_jiff); - - spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); - } - - return ret; -} - -u32 unf_rscn_handler(struct unf_lport *lport, u32 sid, struct unf_xchg *xchg) -{ - /* - * A RSCN ELS shall be sent to registered Nx_Ports - * when an event occurs that may have affected the state of - * one or more Nx_Ports, or the ULP state within the Nx_Port. - * * - * The Payload of a RSCN Request includes a list - * containing the addresses of the affected Nx_Ports. - * * - * Each affected Port_ID page contains the ID of the Nx_Port, - * Fabric Controller, E_Port, domain, or area for which the event was - * detected. - */ - struct unf_rscn_pld *rscn_pld = NULL; - struct unf_rport *unf_rport = NULL; - u32 ret = UNF_RETURN_ERROR; - u32 pld_len = 0; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(xchg, UNF_RETURN_ERROR); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Receive RSCN Port(0x%x_0x%x)<---RPort(0x%x) OX_ID(0x%x)", - lport->port_id, lport->nport_id, sid, xchg->oxid); - - UNF_SERVICE_COLLECT(lport->link_service_info, UNF_SERVICE_ITEM_RSCN); - - /* 1. Get R_Port by S_ID */ - unf_rport = unf_get_rport_by_nport_id(lport, sid); /* rport busy_list */ - if (!unf_rport) { - unf_rport = unf_rport_get_free_and_init(lport, UNF_PORT_TYPE_FC, sid); - if (!unf_rport) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x_0x%x) received RSCN but has no RPort(0x%x) with OX_ID(0x%x)", - lport->port_id, lport->nport_id, sid, xchg->oxid); - - unf_cm_free_xchg(lport, xchg); - return UNF_RETURN_ERROR; - } - - unf_rport->nport_id = sid; - } - - rscn_pld = xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->rscn.rscn_pld; - FC_CHECK_RETURN_VALUE(rscn_pld, UNF_RETURN_ERROR); - pld_len = UNF_GET_RSCN_PLD_LEN(rscn_pld->cmnd); - UNF_PRINT_SFS_LIMIT(UNF_INFO, lport->port_id, rscn_pld, pld_len); - - /* 2. NOTE: Analysis RSCN payload(save & disc if necessary) */ - ret = unf_analysis_rscn_payload(lport, rscn_pld); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x_0x%x) analysis RSCN failed", - lport->port_id, lport->nport_id); - } - - /* 3. send rscn_acc after analysis payload */ - ret = unf_send_rscn_acc(lport, unf_rport, xchg); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x_0x%x) send RSCN response failed", - lport->port_id, lport->nport_id); - } - - return ret; -} - -static void unf_analysis_pdisc_pld(struct unf_lport *lport, - struct unf_rport *rport, - struct unf_plogi_pdisc *pdisc) -{ - struct unf_lgn_parm *pdisc_params = NULL; - u64 wwpn = INVALID_VALUE64; - u64 wwnn = INVALID_VALUE64; - - FC_CHECK_RETURN_VOID(lport); - FC_CHECK_RETURN_VOID(rport); - FC_CHECK_RETURN_VOID(pdisc); - - pdisc_params = &pdisc->payload.stparms; - if (pdisc_params->co_parms.bb_receive_data_field_size > UNF_MAX_FRAME_SIZE) - rport->max_frame_size = UNF_MAX_FRAME_SIZE; - else - rport->max_frame_size = pdisc_params->co_parms.bb_receive_data_field_size; - - wwnn = (u64)(((u64)(pdisc_params->high_node_name) << UNF_SHIFT_32) | - ((u64)pdisc_params->low_node_name)); - wwpn = (u64)(((u64)(pdisc_params->high_port_name) << UNF_SHIFT_32) | - ((u64)pdisc_params->low_port_name)); - - rport->port_name = wwpn; - rport->node_name = wwnn; - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x) save PDISC parameters to Rport(0x%x) WWPN(0x%llx) WWNN(0x%llx)", - lport->port_id, rport->nport_id, rport->port_name, - rport->node_name); -} - -u32 unf_send_pdisc_rjt(struct unf_lport *lport, struct unf_rport *rport, struct unf_xchg *xchg) -{ - u32 ret = UNF_RETURN_ERROR; - struct unf_rjt_info rjt_info; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(rport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(xchg, UNF_RETURN_ERROR); - - memset(&rjt_info, 0, sizeof(struct unf_rjt_info)); - rjt_info.els_cmnd_code = ELS_PDISC; - rjt_info.reason_code = UNF_LS_RJT_LOGICAL_ERROR; - rjt_info.reason_explanation = UNF_LS_RJT_NO_ADDITIONAL_INFO; - - ret = unf_send_els_rjt_by_rport(lport, xchg, rport, &rjt_info); - - return ret; -} - -u32 unf_pdisc_handler(struct unf_lport *lport, u32 sid, struct unf_xchg *xchg) -{ - struct unf_plogi_pdisc *pdisc = NULL; - struct unf_rport *unf_rport = NULL; - ulong flags = 0; - u32 ret = RETURN_OK; - u64 wwpn = 0; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(xchg, UNF_RETURN_ERROR); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]LOGIN: Receive PDISC. Port(0x%x)<---RPort(0x%x) with OX_ID(0x%x)", - lport->port_id, sid, xchg->oxid); - - UNF_SERVICE_COLLECT(lport->link_service_info, UNF_SERVICE_ITEM_PDISC); - pdisc = &xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->pdisc; - UNF_PRINT_SFS_LIMIT(UNF_INFO, lport->port_id, &pdisc->payload, - sizeof(struct unf_plogi_payload)); - wwpn = (u64)(((u64)(pdisc->payload.stparms.high_port_name) << UNF_SHIFT_32) | - ((u64)pdisc->payload.stparms.low_port_name)); - - unf_rport = unf_find_rport(lport, sid, wwpn); - if (!unf_rport) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) can't find RPort by NPort ID(0x%x). Free exchange and send LOGO", - lport->port_id, sid); - - unf_cm_free_xchg(lport, xchg); - (void)unf_send_logo_by_did(lport, sid); - } else { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MINOR, - "[info]Port(0x%x) get exist RPort(0x%x) when receive PDISC with S_Id(0x%x)", - lport->port_id, unf_rport->nport_id, sid); - - if (sid >= UNF_FC_FID_DOM_MGR) - return unf_send_pdisc_rjt(lport, unf_rport, xchg); - - unf_analysis_pdisc_pld(lport, unf_rport, pdisc); - - /* State: READY */ - spin_lock_irqsave(&unf_rport->rport_state_lock, flags); - if (unf_rport->rp_state == UNF_RPORT_ST_READY) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x) find RPort(0x%x) state is READY when receiving PDISC", - lport->port_id, sid); - - spin_unlock_irqrestore(&unf_rport->rport_state_lock, flags); - - ret = unf_send_pdisc_acc(lport, unf_rport, xchg); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) handle PDISC failed", - lport->port_id); - - return ret; - } - - /* Report Down/Up event to scsi */ - unf_update_lport_state_by_linkup_event(lport, - unf_rport, unf_rport->options); - } else if ((unf_rport->rp_state == UNF_RPORT_ST_CLOSING) && - (unf_rport->session)) { - /* State: Closing */ - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) find RPort(0x%x) state is 0x%x when receiving PDISC", - lport->port_id, sid, unf_rport->rp_state); - - spin_unlock_irqrestore(&unf_rport->rport_state_lock, flags); - - unf_cm_free_xchg(lport, xchg); - (void)unf_send_logo_by_did(lport, sid); - } else if (unf_rport->rp_state == UNF_RPORT_ST_PRLI_WAIT) { - /* State: PRLI_WAIT */ - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x) find RPort(0x%x) state is 0x%x when receiving PDISC", - lport->port_id, sid, unf_rport->rp_state); - - spin_unlock_irqrestore(&unf_rport->rport_state_lock, flags); - - ret = unf_send_pdisc_acc(lport, unf_rport, xchg); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) handle PDISC failed", - lport->port_id); - - return ret; - } - } else { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) find RPort(0x%x) state is 0x%x when receiving PDISC, send LOGO", - lport->port_id, sid, unf_rport->rp_state); - - unf_rport_state_ma(unf_rport, UNF_EVENT_RPORT_LOGO); - spin_unlock_irqrestore(&unf_rport->rport_state_lock, flags); - - unf_rport_enter_logo(lport, unf_rport); - unf_cm_free_xchg(lport, xchg); - } - } - - return ret; -} - -static void unf_analysis_adisc_pld(struct unf_lport *lport, - struct unf_rport *rport, - struct unf_adisc_payload *adisc_pld) -{ - u64 wwpn = INVALID_VALUE64; - u64 wwnn = INVALID_VALUE64; - - FC_CHECK_RETURN_VOID(lport); - FC_CHECK_RETURN_VOID(rport); - FC_CHECK_RETURN_VOID(adisc_pld); - - wwnn = (u64)(((u64)(adisc_pld->high_node_name) << UNF_SHIFT_32) | - ((u64)adisc_pld->low_node_name)); - wwpn = (u64)(((u64)(adisc_pld->high_port_name) << UNF_SHIFT_32) | - ((u64)adisc_pld->low_port_name)); - - rport->port_name = wwpn; - rport->node_name = wwnn; - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x) save ADISC parameters to RPort(0x%x), WWPN(0x%llx) WWNN(0x%llx) NPort ID(0x%x)", - lport->port_id, rport->nport_id, rport->port_name, - rport->node_name, adisc_pld->nport_id); -} - -u32 unf_adisc_handler(struct unf_lport *lport, u32 sid, struct unf_xchg *xchg) -{ - struct unf_rport *unf_rport = NULL; - struct unf_adisc_payload *adisc_pld = NULL; - ulong flags = 0; - u64 wwpn = 0; - u32 ret = RETURN_ERROR; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(xchg, UNF_RETURN_ERROR); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]LOGIN: Receive ADISC. Port(0x%x)<---RPort(0x%x) with OX_ID(0x%x)", - lport->port_id, sid, xchg->oxid); - - UNF_SERVICE_COLLECT(lport->link_service_info, UNF_SERVICE_ITEM_ADISC); - adisc_pld = &xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->adisc.adisc_payl; - UNF_PRINT_SFS_LIMIT(UNF_INFO, lport->port_id, adisc_pld, sizeof(struct unf_adisc_payload)); - wwpn = (u64)(((u64)(adisc_pld->high_port_name) << UNF_SHIFT_32) | - ((u64)adisc_pld->low_port_name)); - - unf_rport = unf_find_rport(lport, sid, wwpn); - if (!unf_rport) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) can't find RPort by NPort ID(0x%x). Free exchange and send LOGO", - lport->port_id, sid); - - unf_cm_free_xchg(lport, xchg); - (void)unf_send_logo_by_did(lport, sid); - - return ret; - } - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MINOR, - "[info]Port(0x%x) get exist RPort(0x%x) when receive ADISC with S_ID(0x%x)", - lport->port_id, unf_rport->nport_id, sid); - - unf_analysis_adisc_pld(lport, unf_rport, adisc_pld); - - /* State: READY */ - spin_lock_irqsave(&unf_rport->rport_state_lock, flags); - if (unf_rport->rp_state == UNF_RPORT_ST_READY) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x) find RPort(0x%x) state is READY when receiving ADISC", - lport->port_id, sid); - - spin_unlock_irqrestore(&unf_rport->rport_state_lock, flags); - - /* Return ACC directly */ - ret = unf_send_adisc_acc(lport, unf_rport, xchg); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) send ADISC ACC failed", lport->port_id); - - return ret; - } - - /* Report Down/Up event to SCSI */ - unf_update_lport_state_by_linkup_event(lport, unf_rport, unf_rport->options); - } - /* State: Closing */ - else if ((unf_rport->rp_state == UNF_RPORT_ST_CLOSING) && - (unf_rport->session)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) find RPort(0x%x) state is 0x%x when receiving ADISC", - lport->port_id, sid, unf_rport->rp_state); - - spin_unlock_irqrestore(&unf_rport->rport_state_lock, flags); - - unf_rport = unf_get_safe_rport(lport, unf_rport, - UNF_RPORT_REUSE_RECOVER, - unf_rport->nport_id); - if (unf_rport) { - spin_lock_irqsave(&unf_rport->rport_state_lock, flags); - unf_rport->nport_id = sid; - spin_unlock_irqrestore(&unf_rport->rport_state_lock, flags); - - ret = unf_send_adisc_acc(lport, unf_rport, xchg); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) send ADISC ACC failed", - lport->port_id); - - return ret; - } - - unf_update_lport_state_by_linkup_event(lport, - unf_rport, unf_rport->options); - } else { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) can't find RPort by NPort_ID(0x%x). Free exchange and send LOGO", - lport->port_id, sid); - - unf_cm_free_xchg(lport, xchg); - (void)unf_send_logo_by_did(lport, sid); - } - } else if (unf_rport->rp_state == UNF_RPORT_ST_PRLI_WAIT) { - /* State: PRLI_WAIT */ - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x) find RPort(0x%x) state is 0x%x when receiving ADISC", - lport->port_id, sid, unf_rport->rp_state); - - spin_unlock_irqrestore(&unf_rport->rport_state_lock, flags); - - ret = unf_send_adisc_acc(lport, unf_rport, xchg); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) send ADISC ACC failed", lport->port_id); - - return ret; - } - } else { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) find RPort(0x%x) state is 0x%x when receiving ADISC, send LOGO", - lport->port_id, sid, unf_rport->rp_state); - - unf_rport_state_ma(unf_rport, UNF_EVENT_RPORT_LOGO); - spin_unlock_irqrestore(&unf_rport->rport_state_lock, flags); - - unf_rport_enter_logo(lport, unf_rport); - unf_cm_free_xchg(lport, xchg); - } - - return ret; -} - -u32 unf_rec_handler(struct unf_lport *lport, u32 sid, struct unf_xchg *xchg) -{ - struct unf_rport *unf_rport = NULL; - u32 ret = UNF_RETURN_ERROR; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(xchg, UNF_RETURN_ERROR); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]LOGIN: Port(0x%x) receive REC", lport->port_id); - - /* Send rec acc */ - ret = unf_send_rec_acc(lport, unf_rport, xchg); /* discard directly */ - - return ret; -} - -u32 unf_rrq_handler(struct unf_lport *lport, u32 sid, struct unf_xchg *xchg) -{ - struct unf_rport *unf_rport = NULL; - struct unf_rrq *rrq = NULL; - struct unf_xchg *xchg_reused = NULL; - u32 ret = UNF_RETURN_ERROR; - u16 ox_id = 0; - u16 rx_id = 0; - u32 unf_sid = 0; - ulong flags = 0; - struct unf_rjt_info rjt_info = {0}; - struct unf_xchg_hot_pool *hot_pool = NULL; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(xchg, UNF_RETURN_ERROR); - - UNF_SERVICE_COLLECT(lport->link_service_info, UNF_SERVICE_ITEM_RRQ); - rrq = &xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->rrq; - ox_id = (u16)(rrq->oxid_rxid >> UNF_SHIFT_16); - rx_id = (u16)(rrq->oxid_rxid); - unf_sid = rrq->sid & UNF_NPORTID_MASK; - - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_KEVENT, - "[warn]Receive RRQ. Port(0x%x)<---RPort(0x%x) sfsXchg(0x%p) OX_ID(0x%x,0x%x) RX_ID(0x%x)", - lport->port_id, sid, xchg, ox_id, xchg->oxid, rx_id); - - /* Get R_Port */ - unf_rport = unf_get_rport_by_nport_id(lport, sid); - if (!unf_rport) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%x) receive RRQ but has no RPort(0x%x)", - lport->port_id, sid); - - /* NOTE: send LOGO */ - unf_send_logo_by_did(lport, unf_sid); - - unf_cm_free_xchg(lport, xchg); - return ret; - } - - /* Get Target (Abort I/O) exchange context */ - xchg_reused = unf_cm_lookup_xchg_by_id(lport, ox_id, unf_sid); /* unf_find_xchg_by_ox_id */ - if (!xchg_reused) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%x) cannot find exchange with OX_ID(0x%x) RX_ID(0x%x) S_ID(0x%x)", - lport->port_id, ox_id, rx_id, unf_sid); - - rjt_info.els_cmnd_code = ELS_RRQ; - rjt_info.reason_code = FCXLS_BA_RJT_LOGICAL_ERROR | FCXLS_LS_RJT_INVALID_OXID_RXID; - - /* NOTE: send ELS RJT */ - if (unf_send_els_rjt_by_rport(lport, xchg, unf_rport, &rjt_info) != RETURN_OK) { - unf_cm_free_xchg(lport, xchg); - return UNF_RETURN_ERROR; - } - - return RETURN_OK; - } - - hot_pool = xchg_reused->hot_pool; - if (unlikely(!hot_pool)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_MAJOR, - "Port(0x%x) OxId(0x%x) Rxid(0x%x) Sid(0x%x) Hot Pool is NULL.", - lport->port_id, ox_id, rx_id, unf_sid); - - return ret; - } - - spin_lock_irqsave(&hot_pool->xchg_hotpool_lock, flags); - xchg_reused->oxid = INVALID_VALUE16; - xchg_reused->rxid = INVALID_VALUE16; - spin_unlock_irqrestore(&hot_pool->xchg_hotpool_lock, flags); - - /* NOTE: release I/O exchange context */ - unf_xchg_ref_dec(xchg_reused, SFS_RESPONSE); - - /* Send RRQ ACC */ - ret = unf_send_rrq_acc(lport, unf_rport, xchg); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%x) can not send RRQ rsp. Xchg(0x%p) Ioxchg(0x%p) OX_RX_ID(0x%x 0x%x) S_ID(0x%x)", - lport->port_id, xchg, xchg_reused, ox_id, rx_id, unf_sid); - - unf_cm_free_xchg(lport, xchg); - } - - return ret; -} - -u32 unf_logo_handler(struct unf_lport *lport, u32 sid, struct unf_xchg *xchg) -{ - struct unf_rport *unf_rport = NULL; - struct unf_rport *logo_rport = NULL; - struct unf_logo *logo = NULL; - u32 ret = UNF_RETURN_ERROR; - u32 nport_id = 0; - struct unf_rjt_info rjt_info = {0}; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(xchg, UNF_RETURN_ERROR); - - UNF_SERVICE_COLLECT(lport->link_service_info, UNF_SERVICE_ITEM_LOGO); - logo = &xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->logo; - nport_id = logo->payload.nport_id & UNF_NPORTID_MASK; - - if (sid < UNF_FC_FID_DOM_MGR) { - /* R_Port is not fabric port */ - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_KEVENT, - "[info]LOGIN: Receive LOGO. Port(0x%x)<---RPort(0x%x) NPort_ID(0x%x) OXID(0x%x)", - lport->port_id, sid, nport_id, xchg->oxid); - } - - UNF_PRINT_SFS_LIMIT(UNF_INFO, lport->port_id, &logo->payload, - sizeof(struct unf_logo_payload)); - - /* - * 1. S_ID unequal to NPort_ID: - * link down Rport find by NPort_ID immediately - */ - if (sid != nport_id) { - logo_rport = unf_get_rport_by_nport_id(lport, nport_id); - if (logo_rport) - unf_rport_immediate_link_down(lport, logo_rport); - } - - /* 2. Get R_Port by S_ID (frame header) */ - unf_rport = unf_get_rport_by_nport_id(lport, sid); - unf_rport = unf_get_safe_rport(lport, unf_rport, UNF_RPORT_REUSE_INIT, sid); /* INIT */ - if (!unf_rport) { - memset(&rjt_info, 0, sizeof(struct unf_rjt_info)); - rjt_info.els_cmnd_code = ELS_LOGO; - rjt_info.reason_code = UNF_LS_RJT_LOGICAL_ERROR; - rjt_info.reason_explanation = UNF_LS_RJT_NO_ADDITIONAL_INFO; - ret = unf_send_els_rjt_by_did(lport, xchg, sid, &rjt_info); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) receive LOGO but has no RPort(0x%x)", - lport->port_id, sid); - - return ret; - } - - /* - * 3. I/O resource release: set ABORT tag - * * - * Call by: R_Port remove; RCVD LOGO; RCVD PLOGI; send PLOGI ACC - */ - unf_cm_xchg_mgr_abort_io_by_id(lport, unf_rport, sid, lport->nport_id, INI_IO_STATE_LOGO); - - /* 4. Send LOGO ACC */ - ret = unf_send_logo_acc(lport, unf_rport, xchg); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, - UNF_WARN, "[warn]Port(0x%x) send LOGO failed", lport->port_id); - } - /* - * 5. Do same operations with RCVD LOGO/PRLO & Send LOGO: - * retry (LOGIN or LOGO) or link down immediately - */ - unf_process_rport_after_logo(lport, unf_rport); - - return ret; -} - -u32 unf_prlo_handler(struct unf_lport *lport, u32 sid, struct unf_xchg *xchg) -{ - struct unf_rport *unf_rport = NULL; - struct unf_prli_prlo *prlo = NULL; - u32 ret = UNF_RETURN_ERROR; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(xchg, UNF_RETURN_ERROR); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]LOGIN: Receive PRLO. Port(0x%x)<---RPort(0x%x) with OX_ID(0x%x)", - lport->port_id, sid, xchg->oxid); - - UNF_SERVICE_COLLECT(lport->link_service_info, UNF_SERVICE_ITEM_LOGO); - - /* Get (new) R_Port */ - unf_rport = unf_get_rport_by_nport_id(lport, sid); - unf_rport = unf_get_safe_rport(lport, unf_rport, UNF_RPORT_REUSE_INIT, sid); /* INIT */ - if (!unf_rport) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) receive PRLO but has no RPort", - lport->port_id); - - /* Discard directly */ - unf_cm_free_xchg(lport, xchg); - return ret; - } - - prlo = &xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->prlo; - UNF_PRINT_SFS_LIMIT(UNF_INFO, lport->port_id, &prlo->payload, - sizeof(struct unf_prli_payload)); - - /* Send PRLO ACC to remote */ - ret = unf_send_prlo_acc(lport, unf_rport, xchg); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) send PRLO ACC failed", lport->port_id); - } - - /* Enter Enhanced action after LOGO (retry LOGIN or LOGO) */ - unf_process_rport_after_logo(lport, unf_rport); - - return ret; -} - -static void unf_fill_echo_acc_pld(struct unf_echo *echo_acc) -{ - struct unf_echo_payload *echo_acc_pld = NULL; - - FC_CHECK_RETURN_VOID(echo_acc); - - echo_acc_pld = echo_acc->echo_pld; - FC_CHECK_RETURN_VOID(echo_acc_pld); - - echo_acc_pld->cmnd = UNF_ELS_CMND_ACC; -} - -static void unf_echo_acc_callback(struct unf_xchg *xchg) -{ - struct unf_lport *unf_lport = NULL; - - FC_CHECK_RETURN_VOID(xchg); - - unf_lport = xchg->lport; - - FC_CHECK_RETURN_VOID(unf_lport); - if (xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->echo_acc.phy_echo_addr) { - pci_unmap_single(unf_lport->low_level_func.dev, - xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->echo_acc - .phy_echo_addr, - UNF_ECHO_PAYLOAD_LEN, DMA_BIDIRECTIONAL); - xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->echo_acc.phy_echo_addr = 0; - } -} - -static u32 unf_send_echo_acc(struct unf_lport *lport, u32 did, - struct unf_xchg *xchg) -{ - struct unf_echo *echo_acc = NULL; - union unf_sfs_u *fc_entry = NULL; - u32 ret = UNF_RETURN_ERROR; - u16 ox_id = 0; - u16 rx_id = 0; - struct unf_frame_pkg pkg; - dma_addr_t phy_echo_acc_addr; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(xchg, UNF_RETURN_ERROR); - - memset(&pkg, 0, sizeof(struct unf_frame_pkg)); - xchg->cmnd_code = UNF_SET_ELS_ACC_TYPE(ELS_ECHO); - xchg->did = did; - xchg->sid = lport->nport_id; - xchg->oid = xchg->sid; - xchg->lport = lport; - - xchg->callback = NULL; - xchg->ob_callback = unf_echo_acc_callback; - - unf_fill_package(&pkg, xchg, xchg->rport); - pkg.type = UNF_PKG_ELS_REPLY; - fc_entry = xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr; - if (!fc_entry) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) entry can't be NULL with tag(0x%x)", - lport->port_id, xchg->hotpooltag); - - unf_cm_free_xchg(lport, xchg); - return UNF_RETURN_ERROR; - } - - echo_acc = &fc_entry->echo_acc; - unf_fill_echo_acc_pld(echo_acc); - ox_id = xchg->oxid; - rx_id = xchg->rxid; - phy_echo_acc_addr = pci_map_single(lport->low_level_func.dev, - echo_acc->echo_pld, - UNF_ECHO_PAYLOAD_LEN, - DMA_BIDIRECTIONAL); - if (pci_dma_mapping_error(lport->low_level_func.dev, phy_echo_acc_addr)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, - UNF_WARN, "[warn]Port(0x%x) pci map err", - lport->port_id); - unf_cm_free_xchg(lport, xchg); - return UNF_RETURN_ERROR; - } - echo_acc->phy_echo_addr = phy_echo_acc_addr; - - ret = unf_ls_gs_cmnd_send(lport, &pkg, xchg); - if (ret != RETURN_OK) { - unf_cm_free_xchg((void *)lport, (void *)xchg); - pci_unmap_single(lport->low_level_func.dev, - phy_echo_acc_addr, UNF_ECHO_PAYLOAD_LEN, - DMA_BIDIRECTIONAL); - echo_acc->phy_echo_addr = 0; - } - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]ECHO ACC send %s. Port(0x%x)--->RPort(0x%x) with OX_ID(0x%x) RX_ID(0x%x)", - (ret != RETURN_OK) ? "failed" : "succeed", lport->port_id, - did, ox_id, rx_id); - - return ret; -} - -u32 unf_echo_handler(struct unf_lport *lport, u32 sid, struct unf_xchg *xchg) -{ - struct unf_echo_payload *echo_pld = NULL; - struct unf_rport *unf_rport = NULL; - u32 ret = UNF_RETURN_ERROR; - u32 data_len = 0; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(xchg, UNF_RETURN_ERROR); - - data_len = xchg->fcp_sfs_union.sfs_entry.cur_offset; - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Receive ECHO. Port(0x%x)<---RPort(0x%x) with OX_ID(0x%x))", - lport->port_id, sid, xchg->oxid); - - UNF_SERVICE_COLLECT(lport->link_service_info, UNF_SERVICE_ITEM_ECHO); - echo_pld = xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->echo.echo_pld; - UNF_PRINT_SFS_LIMIT(UNF_INFO, lport->port_id, echo_pld, data_len); - unf_rport = unf_get_rport_by_nport_id(lport, sid); - xchg->rport = unf_rport; - - ret = unf_send_echo_acc(lport, sid, xchg); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, - UNF_WARN, "[warn]Port(0x%x) send ECHO ACC failed", lport->port_id); - } - - return ret; -} - -static void unf_login_with_rport_in_n2n(struct unf_lport *lport, - u64 remote_port_name, - u64 remote_node_name) -{ - /* - * Call by (P2P): - * 1. RCVD FLOGI ACC - * 2. Send FLOGI ACC succeed - * * - * Compare WWN, larger is master, then send PLOGI - */ - struct unf_lport *unf_lport = lport; - struct unf_rport *unf_rport = NULL; - ulong lport_flag = 0; - ulong rport_flag = 0; - u64 port_name = 0; - u64 node_name = 0; - u32 ret = RETURN_OK; - - FC_CHECK_RETURN_VOID(lport); - - spin_lock_irqsave(&unf_lport->lport_state_lock, lport_flag); - unf_lport_state_ma(unf_lport, UNF_EVENT_LPORT_READY); /* LPort: FLOGI_WAIT --> READY */ - spin_unlock_irqrestore(&unf_lport->lport_state_lock, lport_flag); - - port_name = remote_port_name; - node_name = remote_node_name; - - if (unf_lport->port_name > port_name) { - /* Master case: send PLOGI */ - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x)'s WWN(0x%llx) is larger than rport(0x%llx), should be master", - unf_lport->port_id, unf_lport->port_name, port_name); - - /* Update N_Port_ID now: 0xEF */ - unf_lport->nport_id = UNF_P2P_LOCAL_NPORT_ID; - - unf_rport = unf_find_valid_rport(lport, port_name, UNF_P2P_REMOTE_NPORT_ID); - unf_rport = unf_get_safe_rport(lport, unf_rport, UNF_RPORT_REUSE_ONLY, - UNF_P2P_REMOTE_NPORT_ID); - if (unf_rport) { - unf_rport->node_name = node_name; - unf_rport->port_name = port_name; - unf_rport->nport_id = UNF_P2P_REMOTE_NPORT_ID; /* 0xD6 */ - unf_rport->local_nport_id = UNF_P2P_LOCAL_NPORT_ID; /* 0xEF */ - - spin_lock_irqsave(&unf_rport->rport_state_lock, rport_flag); - if (unf_rport->rp_state == UNF_RPORT_ST_PLOGI_WAIT || - unf_rport->rp_state == UNF_RPORT_ST_PRLI_WAIT || - unf_rport->rp_state == UNF_RPORT_ST_READY) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]LOGIN: Port(0x%x) Rport(0x%x) have sent PLOGI or PRLI with state(0x%x)", - unf_lport->port_id, - unf_rport->nport_id, - unf_rport->rp_state); - - spin_unlock_irqrestore(&unf_rport->rport_state_lock, - rport_flag); - return; - } - /* Update L_Port State: PLOGI_WAIT */ - unf_rport_state_ma(unf_rport, UNF_EVENT_RPORT_ENTER_PLOGI); - spin_unlock_irqrestore(&unf_rport->rport_state_lock, rport_flag); - - /* P2P with master: Start to Send PLOGI */ - ret = unf_send_plogi(unf_lport, unf_rport); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]LOGIN: Port(0x%x) with WWN(0x%llx) send PLOGI to(0x%llx) failed", - unf_lport->port_id, - unf_lport->port_name, port_name); - - unf_rport_error_recovery(unf_rport); - } - } else { - /* Get/Alloc R_Port failed */ - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) with WWN(0x%llx) allocate RPort(ID:0x%x,WWPN:0x%llx) failed", - unf_lport->port_id, unf_lport->port_name, - UNF_P2P_REMOTE_NPORT_ID, port_name); - } - } else { - /* Slave case: L_Port's Port Name is smaller than R_Port */ - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x) with WWN(0x%llx) is smaller than rport(0x%llx), do nothing", - unf_lport->port_id, unf_lport->port_name, port_name); - } -} - -void unf_lport_enter_mns_plogi(struct unf_lport *lport) -{ - /* Fabric or Public Loop Mode: Login with Name server */ - struct unf_lport *unf_lport = lport; - struct unf_rport *unf_rport = NULL; - ulong flag = 0; - u32 ret = UNF_RETURN_ERROR; - struct unf_plogi_payload *plogi_pld = NULL; - union unf_sfs_u *fc_entry = NULL; - struct unf_xchg *xchg = NULL; - struct unf_frame_pkg pkg; - - FC_CHECK_RETURN_VOID(lport); - - /* Get (safe) R_Port */ - unf_rport = unf_rport_get_free_and_init(lport, UNF_PORT_TYPE_FC, UNF_FC_FID_MGMT_SERV); - if (!unf_rport) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) allocate RPort failed", lport->port_id); - return; - } - - spin_lock_irqsave(&unf_rport->rport_state_lock, flag); - unf_rport->nport_id = UNF_FC_FID_MGMT_SERV; /* 0xfffffa */ - spin_unlock_irqrestore(&unf_rport->rport_state_lock, flag); - - memset(&pkg, 0, sizeof(struct unf_frame_pkg)); - - /* Get & Set new free exchange */ - xchg = unf_cm_get_free_xchg(lport, UNF_XCHG_TYPE_SFS); - if (!xchg) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) exchange can't be NULL for PLOGI", lport->port_id); - - return; - } - - xchg->cmnd_code = ELS_PLOGI; /* PLOGI */ - xchg->did = unf_rport->nport_id; - xchg->sid = lport->nport_id; - xchg->oid = xchg->sid; - xchg->lport = unf_lport; - xchg->rport = unf_rport; - - /* Set callback function */ - xchg->callback = NULL; /* for rcvd plogi acc/rjt processer */ - xchg->ob_callback = NULL; /* for send plogi failed processer */ - - unf_fill_package(&pkg, xchg, unf_rport); - pkg.type = UNF_PKG_ELS_REQ; - /* Fill PLOGI payload */ - fc_entry = xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr; - if (!fc_entry) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) entry can't be NULL with tag(0x%x)", - lport->port_id, xchg->hotpooltag); - - unf_cm_free_xchg(lport, xchg); - return; - } - - plogi_pld = &fc_entry->plogi.payload; - memset(plogi_pld, 0, sizeof(struct unf_plogi_payload)); - unf_fill_plogi_pld(plogi_pld, lport); - - /* Start to Send PLOGI command */ - ret = unf_ls_gs_cmnd_send(lport, &pkg, xchg); - if (ret != RETURN_OK) - unf_cm_free_xchg((void *)lport, (void *)xchg); -} - -static void unf_register_to_switch(struct unf_lport *lport) -{ - /* Register to Fabric, used for: FABRIC & PUBLI LOOP */ - ulong flag = 0; - - FC_CHECK_RETURN_VOID(lport); - - spin_lock_irqsave(&lport->lport_state_lock, flag); - unf_lport_state_ma(lport, UNF_EVENT_LPORT_REMOTE_ACC); - spin_unlock_irqrestore(&lport->lport_state_lock, flag); - - /* Login with Name server: PLOGI */ - unf_lport_enter_sns_plogi(lport); - - unf_lport_enter_mns_plogi(lport); - - /* Physical Port */ - if (lport->root_lport == lport && - lport->act_topo == UNF_ACT_TOP_P2P_FABRIC) { - unf_linkup_all_vports(lport); - } -} - -void unf_fdisc_ob_callback(struct unf_xchg *xchg) -{ - /* Do recovery */ - struct unf_lport *unf_lport = NULL; - ulong flag = 0; - - FC_CHECK_RETURN_VOID(xchg); - - spin_lock_irqsave(&xchg->xchg_state_lock, flag); - unf_lport = xchg->lport; - spin_unlock_irqrestore(&xchg->xchg_state_lock, flag); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]LOGIN: FDISC send failed"); - - FC_CHECK_RETURN_VOID(unf_lport); - - /* Do L_Port error recovery */ - unf_lport_error_recovery(unf_lport); -} - -void unf_fdisc_callback(void *lport, void *rport, void *exch) -{ - /* Register to Name Server or Do recovery */ - struct unf_lport *unf_lport = NULL; - struct unf_rport *unf_rport = NULL; - struct unf_xchg *xchg = NULL; - struct unf_flogi_fdisc_payload *fdisc_pld = NULL; - ulong flag = 0; - u32 cmd = 0; - - unf_lport = (struct unf_lport *)lport; - unf_rport = (struct unf_rport *)rport; - xchg = (struct unf_xchg *)exch; - FC_CHECK_RETURN_VOID(lport); - FC_CHECK_RETURN_VOID(rport); - FC_CHECK_RETURN_VOID(exch); - FC_CHECK_RETURN_VOID(xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr); - fdisc_pld = &xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->fdisc_acc.fdisc_payload; - if (xchg->byte_orders & UNF_BIT_2) - unf_big_end_to_cpu((u8 *)fdisc_pld, sizeof(struct unf_flogi_fdisc_payload)); - - cmd = fdisc_pld->cmnd; - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]LOGIN: FDISC response is (0x%x). Port(0x%x)<---RPort(0x%x) with OX_ID(0x%x)", - cmd, unf_lport->port_id, unf_rport->nport_id, xchg->oxid); - unf_rport = unf_get_rport_by_nport_id(unf_lport, UNF_FC_FID_FLOGI); - unf_rport = unf_get_safe_rport(unf_lport, unf_rport, - UNF_RPORT_REUSE_ONLY, UNF_FC_FID_FLOGI); - if (!unf_rport) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) has no Rport", unf_lport->port_id); - return; - } - - spin_lock_irqsave(&unf_rport->rport_state_lock, flag); - unf_rport->nport_id = UNF_FC_FID_FLOGI; - spin_unlock_irqrestore(&unf_rport->rport_state_lock, flag); - - if ((cmd & UNF_ELS_CMND_HIGH_MASK) == UNF_ELS_CMND_ACC) { - /* Case for ACC */ - spin_lock_irqsave(&unf_lport->lport_state_lock, flag); - if (unf_lport->states != UNF_LPORT_ST_FLOGI_WAIT) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x_0x%x) receive Flogi/Fdisc ACC in state(0x%x)", - unf_lport->port_id, unf_lport->nport_id, unf_lport->states); - - spin_unlock_irqrestore(&unf_lport->lport_state_lock, flag); - return; - } - spin_unlock_irqrestore(&unf_lport->lport_state_lock, flag); - - unf_lport_update_nport_id(unf_lport, xchg->sid); - unf_lport_update_time_params(unf_lport, fdisc_pld); - unf_register_to_switch(unf_lport); - } else { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]LOGIN: FDISC response is (0x%x). Port(0x%x)<---RPort(0x%x) with OX_ID(0x%x)", - cmd, unf_lport->port_id, unf_rport->nport_id, xchg->oxid); - - /* Case for RJT: Do L_Port recovery */ - unf_lport_error_recovery(unf_lport); - } -} - -void unf_flogi_ob_callback(struct unf_xchg *xchg) -{ - /* Send FLOGI failed & Do L_Port recovery */ - struct unf_lport *unf_lport = NULL; - ulong flag = 0; - - FC_CHECK_RETURN_VOID(xchg); - - /* Get L_port from exchange context */ - spin_lock_irqsave(&xchg->xchg_state_lock, flag); - unf_lport = xchg->lport; - spin_unlock_irqrestore(&xchg->xchg_state_lock, flag); - FC_CHECK_RETURN_VOID(unf_lport); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]LOGIN: Port(0x%x) send FLOGI failed", - unf_lport->port_id); - - /* Check L_Port state */ - spin_lock_irqsave(&unf_lport->lport_state_lock, flag); - if (unf_lport->states != UNF_LPORT_ST_FLOGI_WAIT) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]LOGIN: Port(0x%x_0x%x) send FLOGI failed with state(0x%x)", - unf_lport->port_id, unf_lport->nport_id, unf_lport->states); - - spin_unlock_irqrestore(&unf_lport->lport_state_lock, flag); - return; - } - spin_unlock_irqrestore(&unf_lport->lport_state_lock, flag); - - /* Do L_Port error recovery */ - unf_lport_error_recovery(unf_lport); -} - -static void unf_lport_update_nport_id(struct unf_lport *lport, u32 nport_id) -{ - ulong flag = 0; - - FC_CHECK_RETURN_VOID(lport); - - spin_lock_irqsave(&lport->lport_state_lock, flag); - lport->nport_id = nport_id; - spin_unlock_irqrestore(&lport->lport_state_lock, flag); -} - -static void -unf_lport_update_time_params(struct unf_lport *lport, - struct unf_flogi_fdisc_payload *flogi_payload) -{ - ulong flag = 0; - u32 ed_tov = 0; - u32 ra_tov = 0; - - FC_CHECK_RETURN_VOID(lport); - FC_CHECK_RETURN_VOID(flogi_payload); - - ed_tov = flogi_payload->fabric_parms.co_parms.e_d_tov; - ra_tov = flogi_payload->fabric_parms.co_parms.r_a_tov; - - spin_lock_irqsave(&lport->lport_state_lock, flag); - - /* FC-FS-3: 21.3.4, 21.3.5 */ - if (lport->act_topo == UNF_ACT_TOP_P2P_FABRIC || - lport->act_topo == UNF_ACT_TOP_PUBLIC_LOOP) { - lport->ed_tov = ed_tov; - lport->ra_tov = ra_tov; - } else { - FC_DRV_PRINT(UNF_LOG_EQUIP_ATT, UNF_MAJOR, - "[info]Port(0x%x_0x%x) with topo(0x%x) no need to save time parameters", - lport->port_id, lport->nport_id, lport->act_topo); - } - - spin_unlock_irqrestore(&lport->lport_state_lock, flag); -} - -static void unf_rcv_flogi_acc(struct unf_lport *lport, struct unf_rport *rport, - struct unf_flogi_fdisc_payload *flogi_pld, - u32 nport_id, struct unf_xchg *xchg) -{ - /* PLOGI to Name server or remote port */ - struct unf_lport *unf_lport = lport; - struct unf_rport *unf_rport = rport; - struct unf_flogi_fdisc_payload *unf_flogi_pld = flogi_pld; - struct unf_fabric_parm *fabric_params = NULL; - u64 port_name = 0; - u64 node_name = 0; - ulong flag = 0; - - FC_CHECK_RETURN_VOID(lport); - FC_CHECK_RETURN_VOID(rport); - FC_CHECK_RETURN_VOID(flogi_pld); - - /* Check L_Port state: FLOGI_WAIT */ - spin_lock_irqsave(&unf_lport->lport_state_lock, flag); - if (unf_lport->states != UNF_LPORT_ST_FLOGI_WAIT) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[info]Port(0x%x_0x%x) receive FLOGI ACC with state(0x%x)", - unf_lport->port_id, unf_lport->nport_id, unf_lport->states); - - spin_unlock_irqrestore(&unf_lport->lport_state_lock, flag); - return; - } - spin_unlock_irqrestore(&unf_lport->lport_state_lock, flag); - - fabric_params = &unf_flogi_pld->fabric_parms; - node_name = - (u64)(((u64)(fabric_params->high_node_name) << UNF_SHIFT_32) | - ((u64)(fabric_params->low_node_name))); - port_name = - (u64)(((u64)(fabric_params->high_port_name) << UNF_SHIFT_32) | - ((u64)(fabric_params->low_port_name))); - - /* flogi acc pyload class 3 service priority value */ - if (unf_lport->root_lport == unf_lport && unf_lport->qos_cs_ctrl && - fabric_params->cl_parms[ARRAY_INDEX_2].priority == UNF_PRIORITY_ENABLE) - unf_lport->priority = (bool)UNF_PRIORITY_ENABLE; - else - unf_lport->priority = (bool)UNF_PRIORITY_DISABLE; - - /* Save Flogi parameters */ - unf_save_fabric_params(unf_lport, unf_rport, fabric_params); - - if (UNF_CHECK_NPORT_FPORT_BIT(unf_flogi_pld) == UNF_N_PORT) { - /* P2P Mode */ - unf_lport_update_topo(unf_lport, UNF_ACT_TOP_P2P_DIRECT); - unf_login_with_rport_in_n2n(unf_lport, port_name, node_name); - } else { - /* for: - * UNF_ACT_TOP_PUBLIC_LOOP/UNF_ACT_TOP_P2P_FABRIC - * /UNF_TOP_P2P_MASK - */ - if (unf_lport->act_topo != UNF_ACT_TOP_PUBLIC_LOOP) - unf_lport_update_topo(unf_lport, UNF_ACT_TOP_P2P_FABRIC); - - unf_lport_update_nport_id(unf_lport, nport_id); - unf_lport_update_time_params(unf_lport, unf_flogi_pld); - - /* Save process both for Public loop & Fabric */ - unf_register_to_switch(unf_lport); - } -} - -static void unf_flogi_acc_com_process(struct unf_xchg *xchg) -{ - /* Maybe within interrupt or thread context */ - struct unf_lport *unf_lport = NULL; - struct unf_rport *unf_rport = NULL; - struct unf_flogi_fdisc_payload *flogi_pld = NULL; - u32 nport_id = 0; - u32 cmnd = 0; - ulong flags = 0; - struct unf_xchg *unf_xchg = xchg; - - FC_CHECK_RETURN_VOID(unf_xchg); - FC_CHECK_RETURN_VOID(unf_xchg->lport); - - unf_lport = unf_xchg->lport; - unf_rport = unf_xchg->rport; - flogi_pld = &unf_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->flogi_acc.flogi_payload; - cmnd = flogi_pld->cmnd; - - /* Get N_Port_ID & R_Port */ - /* Others: 0xFFFFFE */ - unf_rport = unf_get_rport_by_nport_id(unf_lport, UNF_FC_FID_FLOGI); - nport_id = UNF_FC_FID_FLOGI; - - /* Get Safe R_Port: reuse only */ - unf_rport = unf_get_safe_rport(unf_lport, unf_rport, UNF_RPORT_REUSE_ONLY, nport_id); - if (!unf_rport) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) can not allocate new Rport", unf_lport->port_id); - - return; - } - - spin_lock_irqsave(&unf_rport->rport_state_lock, flags); - unf_rport->nport_id = UNF_FC_FID_FLOGI; - - spin_unlock_irqrestore(&unf_rport->rport_state_lock, flags); - - /* Process FLOGI ACC or RJT */ - if ((cmnd & UNF_ELS_CMND_HIGH_MASK) == UNF_ELS_CMND_ACC) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]LOGIN: FLOGI response is(0x%x). Port(0x%x)<---RPort(0x%x) with OX_ID(0x%x)", - cmnd, unf_lport->port_id, unf_rport->nport_id, unf_xchg->oxid); - - /* Case for ACC */ - unf_rcv_flogi_acc(unf_lport, unf_rport, flogi_pld, unf_xchg->sid, unf_xchg); - } else { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]LOGIN: FLOGI response is(0x%x). Port(0x%x)<---RPort(0x%x) with OX_ID(0x%x)", - cmnd, unf_lport->port_id, unf_rport->nport_id, - unf_xchg->oxid); - - /* Case for RJT: do L_Port error recovery */ - unf_lport_error_recovery(unf_lport); - } -} - -static int unf_rcv_flogi_acc_async_callback(void *argc_in, void *argc_out) -{ - struct unf_xchg *xchg = (struct unf_xchg *)argc_in; - - FC_CHECK_RETURN_VALUE(xchg, UNF_RETURN_ERROR); - - unf_flogi_acc_com_process(xchg); - - unf_xchg_ref_dec(xchg, SFS_RESPONSE); - - return RETURN_OK; -} - -void unf_flogi_callback(void *lport, void *rport, void *xchg) -{ - /* Callback function for FLOGI ACC or RJT */ - struct unf_lport *unf_lport = (struct unf_lport *)lport; - struct unf_xchg *unf_xchg = (struct unf_xchg *)xchg; - struct unf_flogi_fdisc_payload *flogi_pld = NULL; - bool bbscn_enabled = false; - enum unf_act_topo act_topo = UNF_ACT_TOP_UNKNOWN; - bool switch2thread = false; - - FC_CHECK_RETURN_VOID(lport); - FC_CHECK_RETURN_VOID(rport); - FC_CHECK_RETURN_VOID(xchg); - FC_CHECK_RETURN_VOID(unf_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr); - - unf_xchg->lport = lport; - flogi_pld = &unf_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->flogi_acc.flogi_payload; - - if (unf_xchg->byte_orders & UNF_BIT_2) - unf_big_end_to_cpu((u8 *)flogi_pld, sizeof(struct unf_flogi_fdisc_payload)); - - if (unf_lport->act_topo != UNF_ACT_TOP_PUBLIC_LOOP && - (UNF_CHECK_NPORT_FPORT_BIT(flogi_pld) == UNF_F_PORT)) - /* Get Top Mode (P2P_F) --->>> used for BBSCN */ - act_topo = UNF_ACT_TOP_P2P_FABRIC; - - bbscn_enabled = - unf_check_bbscn_is_enabled((u8)unf_lport->low_level_func.lport_cfg_items.bbscn, - (u8)UNF_GET_BB_SC_N_FROM_PARAMS(&flogi_pld->fabric_parms)); - if (act_topo == UNF_ACT_TOP_P2P_FABRIC && bbscn_enabled) { - /* BBSCN Enable or not --->>> used for Context change */ - unf_lport->bbscn_support = true; - switch2thread = true; - } - - if (switch2thread && unf_lport->root_lport == unf_lport) { - /* Wait for LR done sync: for Root Port */ - (void)unf_irq_process_switch2thread(unf_lport, unf_xchg, - unf_rcv_flogi_acc_async_callback); - } else { - /* Process FLOGI response directly */ - unf_flogi_acc_com_process(unf_xchg); - } - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ALL, - "[info]Port(0x%x) process FLOGI response: switch(%d) to thread done", - unf_lport->port_id, switch2thread); -} - -void unf_plogi_ob_callback(struct unf_xchg *xchg) -{ - /* Do L_Port or R_Port recovery */ - struct unf_lport *unf_lport = NULL; - struct unf_rport *unf_rport = NULL; - ulong flag = 0; - - FC_CHECK_RETURN_VOID(xchg); - - spin_lock_irqsave(&xchg->xchg_state_lock, flag); - unf_lport = xchg->lport; - unf_rport = xchg->rport; - spin_unlock_irqrestore(&xchg->xchg_state_lock, flag); - - FC_CHECK_RETURN_VOID(unf_lport); - FC_CHECK_RETURN_VOID(unf_rport); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]LOGIN: Port(0x%x_0x%x) send PLOGI(0x%x_0x%x) to RPort(%p:0x%x_0x%x) failed", - unf_lport->port_id, unf_lport->nport_id, xchg->oxid, - xchg->rxid, unf_rport, unf_rport->rport_index, - unf_rport->nport_id); - - /* Start to recovery */ - if (unf_rport->nport_id > UNF_FC_FID_DOM_MGR) { - /* with Name server: R_Port is fabric --->>> L_Port error - * recovery - */ - unf_lport_error_recovery(unf_lport); - } else { - /* R_Port is not fabric --->>> R_Port error recovery */ - unf_rport_error_recovery(unf_rport); - } -} - -void unf_rcv_plogi_acc(struct unf_lport *lport, struct unf_rport *rport, - struct unf_lgn_parm *login_parms) -{ - /* PLOGI ACC: PRLI(non fabric) or RFT_ID(fabric) */ - struct unf_lport *unf_lport = lport; - struct unf_rport *unf_rport = rport; - struct unf_lgn_parm *unf_login_parms = login_parms; - u64 node_name = 0; - u64 port_name = 0; - ulong flag = 0; - u32 ret = RETURN_OK; - - FC_CHECK_RETURN_VOID(lport); - FC_CHECK_RETURN_VOID(rport); - FC_CHECK_RETURN_VOID(login_parms); - - node_name = (u64)(((u64)(unf_login_parms->high_node_name) << UNF_SHIFT_32) | - ((u64)(unf_login_parms->low_node_name))); - port_name = (u64)(((u64)(unf_login_parms->high_port_name) << UNF_SHIFT_32) | - ((u64)(unf_login_parms->low_port_name))); - - /* ACC & Case for: R_Port is fabric (RFT_ID) */ - if (unf_rport->nport_id >= UNF_FC_FID_DOM_MGR) { - /* Check L_Port state */ - spin_lock_irqsave(&unf_lport->lport_state_lock, flag); - if (unf_lport->states != UNF_LPORT_ST_PLOGI_WAIT) { - spin_unlock_irqrestore(&unf_lport->lport_state_lock, flag); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) receive PLOGI ACC with error state(0x%x)", - lport->port_id, unf_lport->states); - - return; - } - unf_lport_state_ma(unf_lport, UNF_EVENT_LPORT_REMOTE_ACC); - spin_unlock_irqrestore(&unf_lport->lport_state_lock, flag); - - /* PLOGI parameters save */ - unf_save_plogi_params(unf_lport, unf_rport, unf_login_parms, ELS_ACC); - - /* Update R_Port WWPN & WWNN */ - spin_lock_irqsave(&unf_rport->rport_state_lock, flag); - unf_rport->node_name = node_name; - unf_rport->port_name = port_name; - spin_unlock_irqrestore(&unf_rport->rport_state_lock, flag); - - /* Start to Send RFT_ID */ - ret = unf_send_rft_id(unf_lport, unf_rport); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]LOGIN: Port(0x%x) send RFT_ID failed", - lport->port_id); - - unf_lport_error_recovery(unf_lport); - } - } else { - /* ACC & Case for: R_Port is not fabric */ - if (unf_rport->options == UNF_PORT_MODE_UNKNOWN && - unf_rport->port_name != INVALID_WWPN) - unf_rport->options = unf_get_port_feature(port_name); - - /* Set Port Feature with BOTH: cancel */ - spin_lock_irqsave(&unf_rport->rport_state_lock, flag); - unf_rport->node_name = node_name; - unf_rport->port_name = port_name; - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "[info]LOGIN: Port(0x%x)<---LS_ACC(DID:0x%x SID:0x%x) for PLOGI ACC with RPort state(0x%x) NodeName(0x%llx) E_D_TOV(%u)", - unf_lport->port_id, unf_lport->nport_id, - unf_rport->nport_id, unf_rport->rp_state, - unf_rport->node_name, unf_rport->ed_tov); - - if (unf_lport->act_topo == UNF_ACT_TOP_PRIVATE_LOOP && - (unf_rport->rp_state == UNF_RPORT_ST_PRLI_WAIT || - unf_rport->rp_state == UNF_RPORT_ST_READY)) { - /* Do nothing, return directly */ - spin_unlock_irqrestore(&unf_rport->rport_state_lock, flag); - return; - } - - unf_rport_state_ma(unf_rport, UNF_EVENT_RPORT_ENTER_PRLI); - spin_unlock_irqrestore(&unf_rport->rport_state_lock, flag); - - /* PLOGI parameters save */ - unf_save_plogi_params(unf_lport, unf_rport, unf_login_parms, ELS_ACC); - - /* - * Need Delay to Send PRLI or not - * Used for: L_Port with INI mode & R_Port is not Fabric - */ - unf_check_rport_need_delay_prli(unf_lport, unf_rport, unf_rport->options); - - /* Do not care: Just used for L_Port only is TGT mode or R_Port - * only is INI mode - */ - unf_schedule_open_work(unf_lport, unf_rport); - } -} - -void unf_plogi_acc_com_process(struct unf_xchg *xchg) -{ - struct unf_lport *unf_lport = NULL; - struct unf_rport *unf_rport = NULL; - struct unf_xchg *unf_xchg = (struct unf_xchg *)xchg; - struct unf_plogi_payload *plogi_pld = NULL; - struct unf_lgn_parm *login_parms = NULL; - ulong flag = 0; - u64 port_name = 0; - u32 rport_nport_id = 0; - u32 cmnd = 0; - u32 ret = UNF_RETURN_ERROR; - - FC_CHECK_RETURN_VOID(unf_xchg); - FC_CHECK_RETURN_VOID(unf_xchg->lport); - FC_CHECK_RETURN_VOID(unf_xchg->rport); - - unf_lport = unf_xchg->lport; - unf_rport = unf_xchg->rport; - rport_nport_id = unf_rport->nport_id; - plogi_pld = &unf_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->plogi_acc.payload; - login_parms = &plogi_pld->stparms; - cmnd = (plogi_pld->cmnd); - - if (UNF_ELS_CMND_ACC == (cmnd & UNF_ELS_CMND_HIGH_MASK)) { - /* Case for PLOGI ACC: Go to next stage */ - port_name = - (u64)(((u64)(login_parms->high_port_name) << UNF_SHIFT_32) | - ((u64)(login_parms->low_port_name))); - - /* Get (new) R_Port: 0xfffffc has same WWN with 0xfffcxx */ - unf_rport = unf_find_rport(unf_lport, rport_nport_id, port_name); - unf_rport = unf_get_safe_rport(unf_lport, unf_rport, - UNF_RPORT_REUSE_ONLY, rport_nport_id); - if (unlikely(!unf_rport)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x_0x%x) alloc new RPort with wwpn(0x%llx) failed", - unf_lport->port_id, unf_lport->nport_id, port_name); - return; - } - - /* PLOGI parameters check */ - ret = unf_check_plogi_params(unf_lport, unf_rport, login_parms); - if (ret != RETURN_OK) - return; - - /* Update R_Port state */ - spin_lock_irqsave(&unf_rport->rport_state_lock, flag); - unf_rport->nport_id = rport_nport_id; - unf_rport_state_ma(unf_rport, UNF_EVENT_RPORT_ENTER_PLOGI); - spin_unlock_irqrestore(&unf_rport->rport_state_lock, flag); - - /* Start to process PLOGI ACC */ - unf_rcv_plogi_acc(unf_lport, unf_rport, login_parms); - } else { - /* Case for PLOGI RJT: L_Port or R_Port recovery */ - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]LOGIN: Port(0x%x)<---RPort(0x%p) with LS_RJT(DID:0x%x SID:0x%x) for PLOGI", - unf_lport->port_id, unf_rport, unf_lport->nport_id, - unf_rport->nport_id); - - if (unf_rport->nport_id >= UNF_FC_FID_DOM_MGR) - unf_lport_error_recovery(unf_lport); - else - unf_rport_error_recovery(unf_rport); - } - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]LOGIN: PLOGI response(0x%x). Port(0x%x_0x%x)<---RPort(0x%x_0x%p) wwpn(0x%llx) OX_ID(0x%x)", - cmnd, unf_lport->port_id, unf_lport->nport_id, unf_rport->nport_id, - unf_rport, port_name, unf_xchg->oxid); -} - -static int unf_rcv_plogi_acc_async_callback(void *argc_in, void *argc_out) -{ - struct unf_xchg *xchg = (struct unf_xchg *)argc_in; - - FC_CHECK_RETURN_VALUE(xchg, UNF_RETURN_ERROR); - - unf_plogi_acc_com_process(xchg); - - unf_xchg_ref_dec(xchg, SFS_RESPONSE); - - return RETURN_OK; -} - -void unf_plogi_callback(void *lport, void *rport, void *xchg) -{ - struct unf_lport *unf_lport = (struct unf_lport *)lport; - struct unf_xchg *unf_xchg = (struct unf_xchg *)xchg; - struct unf_plogi_payload *plogi_pld = NULL; - struct unf_lgn_parm *login_parms = NULL; - bool bbscn_enabled = false; - bool switch2thread = false; - - FC_CHECK_RETURN_VOID(lport); - FC_CHECK_RETURN_VOID(rport); - FC_CHECK_RETURN_VOID(xchg); - FC_CHECK_RETURN_VOID(unf_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr); - - plogi_pld = &unf_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->plogi_acc.payload; - login_parms = &plogi_pld->stparms; - unf_xchg->lport = lport; - - if (unf_xchg->byte_orders & UNF_BIT_2) - unf_big_end_to_cpu((u8 *)plogi_pld, sizeof(struct unf_plogi_payload)); - - bbscn_enabled = - unf_check_bbscn_is_enabled((u8)unf_lport->low_level_func.lport_cfg_items.bbscn, - (u8)UNF_GET_BB_SC_N_FROM_PARAMS(login_parms)); - if ((bbscn_enabled) && - unf_lport->act_topo == UNF_ACT_TOP_P2P_DIRECT) { - switch2thread = true; - unf_lport->bbscn_support = true; - } - - if (switch2thread && unf_lport->root_lport == unf_lport) { - /* Wait for LR done sync: just for ROOT Port */ - (void)unf_irq_process_switch2thread(unf_lport, unf_xchg, - unf_rcv_plogi_acc_async_callback); - } else { - unf_plogi_acc_com_process(unf_xchg); - } -} - -static void unf_logo_ob_callback(struct unf_xchg *xchg) -{ - struct unf_lport *lport = NULL; - struct unf_rport *rport = NULL; - struct unf_rport *old_rport = NULL; - struct unf_xchg *unf_xchg = NULL; - u32 nport_id = 0; - u32 logo_retry = 0; - u32 max_frame_size = 0; - u64 port_name = 0; - - FC_CHECK_RETURN_VOID(xchg); - unf_xchg = xchg; - old_rport = unf_xchg->rport; - logo_retry = old_rport->logo_retries; - max_frame_size = old_rport->max_frame_size; - port_name = old_rport->port_name; - unf_rport_enter_closing(old_rport); - - lport = unf_xchg->lport; - if (unf_is_lport_valid(lport) != RETURN_OK) - return; - - /* Get R_Port by exchange info: Init state */ - nport_id = unf_xchg->did; - rport = unf_get_rport_by_nport_id(lport, nport_id); - rport = unf_get_safe_rport(lport, rport, UNF_RPORT_REUSE_INIT, nport_id); - if (!rport) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) cannot allocate RPort", lport->port_id); - return; - } - - rport->logo_retries = logo_retry; - rport->max_frame_size = max_frame_size; - rport->port_name = port_name; - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[info]LOGIN: Port(0x%x) received LOGO RSP timeout topo(0x%x) retries(%u)", - lport->port_id, lport->act_topo, rport->logo_retries); - - /* RCVD LOGO/PRLO & SEND LOGO: the same process */ - if (rport->logo_retries < UNF_MAX_RETRY_COUNT) { - /* <: retry (LOGIN or LOGO) if necessary */ - unf_process_rport_after_logo(lport, rport); - } else { - /* >=: Link down */ - unf_rport_immediate_link_down(lport, rport); - } -} - -static void unf_logo_callback(void *lport, void *rport, void *xchg) -{ - /* RCVD LOGO ACC/RJT: retry(LOGIN/LOGO) or link down immediately */ - struct unf_lport *unf_lport = (struct unf_lport *)lport; - struct unf_rport *unf_rport = NULL; - struct unf_rport *old_rport = NULL; - struct unf_xchg *unf_xchg = NULL; - struct unf_els_rjt *els_acc_rjt = NULL; - u32 cmnd = 0; - u32 nport_id = 0; - u32 logo_retry = 0; - u32 max_frame_size = 0; - u64 port_name = 0; - - FC_CHECK_RETURN_VOID(xchg); - - unf_xchg = (struct unf_xchg *)xchg; - old_rport = unf_xchg->rport; - - logo_retry = old_rport->logo_retries; - max_frame_size = old_rport->max_frame_size; - port_name = old_rport->port_name; - unf_rport_enter_closing(old_rport); - - if (unf_is_lport_valid(lport) != RETURN_OK) - return; - - if (!unf_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr) - return; - - /* Get R_Port by exchange info: Init state */ - els_acc_rjt = &unf_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->els_rjt; - nport_id = unf_xchg->did; - unf_rport = unf_get_rport_by_nport_id(unf_lport, nport_id); - unf_rport = unf_get_safe_rport(unf_lport, unf_rport, UNF_RPORT_REUSE_INIT, nport_id); - - if (!unf_rport) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, - UNF_WARN, "[warn]Port(0x%x) cannot allocate RPort", - unf_lport->port_id); - return; - } - - unf_rport->logo_retries = logo_retry; - unf_rport->max_frame_size = max_frame_size; - unf_rport->port_name = port_name; - cmnd = be32_to_cpu(els_acc_rjt->cmnd); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]LOGIN: Port(0x%x) received LOGO RSP(0x%x),topo(0x%x) Port options(0x%x) RPort options(0x%x) retries(%u)", - unf_lport->port_id, (cmnd & UNF_ELS_CMND_HIGH_MASK), - unf_lport->act_topo, unf_lport->options, unf_rport->options, - unf_rport->logo_retries); - - /* RCVD LOGO/PRLO & SEND LOGO: the same process */ - if (unf_rport->logo_retries < UNF_MAX_RETRY_COUNT) { - /* <: retry (LOGIN or LOGO) if necessary */ - unf_process_rport_after_logo(unf_lport, unf_rport); - } else { - /* >=: Link down */ - unf_rport_immediate_link_down(unf_lport, unf_rport); - } -} - -void unf_prli_ob_callback(struct unf_xchg *xchg) -{ - /* Do R_Port recovery */ - struct unf_lport *lport = NULL; - struct unf_rport *rport = NULL; - ulong flag = 0; - - FC_CHECK_RETURN_VOID(xchg); - - spin_lock_irqsave(&xchg->xchg_state_lock, flag); - lport = xchg->lport; - rport = xchg->rport; - spin_unlock_irqrestore(&xchg->xchg_state_lock, flag); - - FC_CHECK_RETURN_VOID(lport); - FC_CHECK_RETURN_VOID(rport); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]LOGIN: Port(0x%x_0x%x) RPort(0x%x) send PRLI failed and do recovery", - lport->port_id, lport->nport_id, rport->nport_id); - - /* Start to do R_Port error recovery */ - unf_rport_error_recovery(rport); -} - -void unf_prli_callback(void *lport, void *rport, void *xchg) -{ - /* RCVD PRLI RSP: ACC or RJT --->>> SCSI Link Up */ - struct unf_lport *unf_lport = NULL; - struct unf_rport *unf_rport = NULL; - struct unf_xchg *unf_xchg = NULL; - struct unf_prli_payload *prli_acc_pld = NULL; - ulong flag = 0; - u32 cmnd = 0; - u32 options = 0; - u32 fcp_conf = 0; - u32 rec_support = 0; - u32 task_retry_support = 0; - u32 retry_support = 0; - u32 tape_support = 0; - u32 fc4_type = 0; - enum unf_rport_login_state rport_state = UNF_RPORT_ST_INIT; - - FC_CHECK_RETURN_VOID(lport); - FC_CHECK_RETURN_VOID(rport); - FC_CHECK_RETURN_VOID(xchg); - unf_lport = (struct unf_lport *)lport; - unf_rport = (struct unf_rport *)rport; - unf_xchg = (struct unf_xchg *)xchg; - - if (!unf_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) exchange(%p) entry is NULL", - unf_lport->port_id, unf_xchg); - return; - } - - /* Get PRLI ACC payload */ - prli_acc_pld = &unf_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->prli_acc.payload; - if (unf_xchg->byte_orders & UNF_BIT_2) { - /* Change to little End, About INI/TGT mode & confirm info */ - options = be32_to_cpu(prli_acc_pld->parms[ARRAY_INDEX_3]) & - (UNF_FC4_FRAME_PARM_3_TGT | UNF_FC4_FRAME_PARM_3_INI); - - cmnd = be32_to_cpu(prli_acc_pld->cmnd); - fcp_conf = be32_to_cpu(prli_acc_pld->parms[ARRAY_INDEX_3]) & - UNF_FC4_FRAME_PARM_3_CONF_ALLOW; - rec_support = be32_to_cpu(prli_acc_pld->parms[ARRAY_INDEX_3]) & - UNF_FC4_FRAME_PARM_3_REC_SUPPORT; - task_retry_support = be32_to_cpu(prli_acc_pld->parms[ARRAY_INDEX_3]) & - UNF_FC4_FRAME_PARM_3_TASK_RETRY_ID_SUPPORT; - retry_support = be32_to_cpu(prli_acc_pld->parms[ARRAY_INDEX_3]) & - UNF_FC4_FRAME_PARM_3_RETRY_SUPPORT; - fc4_type = be32_to_cpu(prli_acc_pld->parms[ARRAY_INDEX_0]) >> - UNF_FC4_TYPE_SHIFT & UNF_FC4_TYPE_MASK; - } else { - options = (prli_acc_pld->parms[ARRAY_INDEX_3]) & - (UNF_FC4_FRAME_PARM_3_TGT | UNF_FC4_FRAME_PARM_3_INI); - - cmnd = (prli_acc_pld->cmnd); - fcp_conf = prli_acc_pld->parms[ARRAY_INDEX_3] & UNF_FC4_FRAME_PARM_3_CONF_ALLOW; - rec_support = prli_acc_pld->parms[ARRAY_INDEX_3] & UNF_FC4_FRAME_PARM_3_REC_SUPPORT; - task_retry_support = prli_acc_pld->parms[ARRAY_INDEX_3] & - UNF_FC4_FRAME_PARM_3_TASK_RETRY_ID_SUPPORT; - retry_support = prli_acc_pld->parms[ARRAY_INDEX_3] & - UNF_FC4_FRAME_PARM_3_RETRY_SUPPORT; - fc4_type = prli_acc_pld->parms[ARRAY_INDEX_0] >> UNF_FC4_TYPE_SHIFT; - } - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]LOGIN: PRLI RSP: RPort(0x%x) parameter-3(0x%x) option(0x%x) cmd(0x%x) uiRecSupport:%u", - unf_rport->nport_id, prli_acc_pld->parms[ARRAY_INDEX_3], - options, cmnd, rec_support); - - /* PRLI ACC: R_Port READY & Report R_Port Link Up */ - if (UNF_ELS_CMND_ACC == (cmnd & UNF_ELS_CMND_HIGH_MASK)) { - /* Update R_Port options(INI/TGT/BOTH) */ - unf_rport->options = options; - - unf_update_port_feature(unf_rport->port_name, unf_rport->options); - - /* NOTE: R_Port only with INI mode, send LOGO */ - if (unf_rport->options == UNF_PORT_MODE_INI) { - /* Update R_Port state: LOGO */ - spin_lock_irqsave(&unf_rport->rport_state_lock, flag); - unf_rport_state_ma(unf_rport, UNF_EVENT_RPORT_LOGO); - spin_unlock_irqrestore(&unf_rport->rport_state_lock, flag); - - /* NOTE: Start to Send LOGO */ - unf_rport_enter_logo(unf_lport, unf_rport); - return; - } - - /* About confirm */ - if (fcp_conf && unf_lport->low_level_func.lport_cfg_items.fcp_conf) { - unf_rport->fcp_conf_needed = true; - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x_0x%x) FCP config is need for RPort(0x%x)", - unf_lport->port_id, unf_lport->nport_id, - unf_rport->nport_id); - } - - tape_support = (rec_support && task_retry_support && retry_support); - if (tape_support && unf_lport->low_level_func.lport_cfg_items.tape_support) { - unf_rport->tape_support_needed = true; - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_KEVENT, - "[info]Port(0x%x_0x%x) Rec is enabled for RPort(0x%x)", - unf_lport->port_id, unf_lport->nport_id, - unf_rport->nport_id); - } - - /* Update R_Port state: READY */ - spin_lock_irqsave(&unf_rport->rport_state_lock, flag); - unf_rport_state_ma(unf_rport, UNF_EVENT_RPORT_READY); - rport_state = unf_rport->rp_state; - spin_unlock_irqrestore(&unf_rport->rport_state_lock, flag); - - /* Report R_Port online (Link Up) event to SCSI */ - if (rport_state == UNF_RPORT_ST_READY) { - unf_rport->logo_retries = 0; - unf_update_lport_state_by_linkup_event(unf_lport, unf_rport, - unf_rport->options); - } - } else { - /* PRLI RJT: Do R_Port error recovery */ - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]LOGIN: Port(0x%x)<---LS_RJT(DID:0x%x SID:0x%x) for PRLI. RPort(0x%p) OX_ID(0x%x)", - unf_lport->port_id, unf_lport->nport_id, - unf_rport->nport_id, unf_rport, unf_xchg->oxid); - - unf_rport_error_recovery(unf_rport); - } -} - -static void unf_rrq_callback(void *lport, void *rport, void *xchg) -{ - /* Release I/O */ - struct unf_lport *unf_lport = NULL; - struct unf_xchg *unf_xchg = NULL; - struct unf_xchg *io_xchg = NULL; - - FC_CHECK_RETURN_VOID(lport); - FC_CHECK_RETURN_VOID(rport); - FC_CHECK_RETURN_VOID(xchg); - - unf_lport = (struct unf_lport *)lport; - unf_xchg = (struct unf_xchg *)xchg; - - if (!unf_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%x) exchange(0x%p) SfsEntryPtr is NULL", - unf_lport->port_id, unf_xchg); - return; - } - - io_xchg = (struct unf_xchg *)unf_xchg->io_xchg; - if (!io_xchg) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%x) IO exchange is NULL. RRQ cb sfs xchg(0x%p) tag(0x%x)", - unf_lport->port_id, unf_xchg, unf_xchg->hotpooltag); - return; - } - - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_MAJOR, - "[info]Port(0x%x) release IO exch(0x%p) tag(0x%x). RRQ cb sfs xchg(0x%p) tag(0x%x)", - unf_lport->port_id, unf_xchg->io_xchg, io_xchg->hotpooltag, - unf_xchg, unf_xchg->hotpooltag); - - /* After RRQ Success, Free xid */ - unf_notify_chip_free_xid(io_xchg); - - /* NOTE: release I/O exchange resource */ - unf_xchg_ref_dec(io_xchg, XCHG_ALLOC); -} - -static void unf_rrq_ob_callback(struct unf_xchg *xchg) -{ - /* Release I/O */ - struct unf_xchg *unf_xchg = NULL; - struct unf_xchg *io_xchg = NULL; - - unf_xchg = (struct unf_xchg *)xchg; - if (!unf_xchg) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, - UNF_WARN, "[warn]Exchange can't be NULL"); - return; - } - - io_xchg = (struct unf_xchg *)unf_xchg->io_xchg; - if (!io_xchg) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]IO exchange can't be NULL with Sfs exch(0x%p) tag(0x%x)", - unf_xchg, unf_xchg->hotpooltag); - return; - } - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_KEVENT, - "[info]send RRQ failed: SFS exch(0x%p) tag(0x%x) exch(0x%p) tag(0x%x) OXID_RXID(0x%x_0x%x) SID_DID(0x%x_0x%x)", - unf_xchg, unf_xchg->hotpooltag, io_xchg, io_xchg->hotpooltag, - io_xchg->oxid, io_xchg->rxid, io_xchg->sid, io_xchg->did); - - /* If RRQ failure or timepout, Free xid. */ - unf_notify_chip_free_xid(io_xchg); - - /* NOTE: Free I/O exchange resource */ - unf_xchg_ref_dec(io_xchg, XCHG_ALLOC); -} diff --git a/drivers/scsi/spfc/common/unf_ls.h b/drivers/scsi/spfc/common/unf_ls.h deleted file mode 100644 index 5fdd9e1a258d0e2c5f6e66ea0df20b28e6958f84..0000000000000000000000000000000000000000 --- a/drivers/scsi/spfc/common/unf_ls.h +++ /dev/null @@ -1,61 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef UNF_LS_H -#define UNF_LS_H - -#include "unf_type.h" -#include "unf_exchg.h" -#include "unf_rport.h" - -#ifdef __cplusplus -extern "C" { -#endif /* __cplusplus */ - -u32 unf_send_adisc(struct unf_lport *lport, struct unf_rport *rport); -u32 unf_send_pdisc(struct unf_lport *lport, struct unf_rport *rport); -u32 unf_send_flogi(struct unf_lport *lport, struct unf_rport *rport); -u32 unf_send_fdisc(struct unf_lport *lport, struct unf_rport *rport); -u32 unf_send_plogi(struct unf_lport *lport, struct unf_rport *rport); -u32 unf_send_prli(struct unf_lport *lport, struct unf_rport *rport, - u32 cmnd_code); -u32 unf_send_prlo(struct unf_lport *lport, struct unf_rport *rport); -u32 unf_send_logo(struct unf_lport *lport, struct unf_rport *rport); -u32 unf_send_logo_by_did(struct unf_lport *lport, u32 did); -u32 unf_send_echo(struct unf_lport *lport, struct unf_rport *rport, u32 *time); -u32 unf_send_plogi_rjt_by_did(struct unf_lport *lport, u32 did); -u32 unf_send_rrq(struct unf_lport *lport, struct unf_rport *rport, - struct unf_xchg *xchg); -void unf_flogi_ob_callback(struct unf_xchg *xchg); -void unf_flogi_callback(void *lport, void *rport, void *xchg); -void unf_fdisc_ob_callback(struct unf_xchg *xchg); -void unf_fdisc_callback(void *lport, void *rport, void *xchg); - -void unf_plogi_ob_callback(struct unf_xchg *xchg); -void unf_plogi_callback(void *lport, void *rport, void *xchg); -void unf_prli_ob_callback(struct unf_xchg *xchg); -void unf_prli_callback(void *lport, void *rport, void *xchg); -u32 unf_flogi_handler(struct unf_lport *lport, u32 sid, struct unf_xchg *xchg); -u32 unf_plogi_handler(struct unf_lport *lport, u32 sid, struct unf_xchg *xchg); -u32 unf_rec_handler(struct unf_lport *lport, u32 sid, struct unf_xchg *xchg); -u32 unf_prli_handler(struct unf_lport *lport, u32 sid, struct unf_xchg *xchg); -u32 unf_prlo_handler(struct unf_lport *lport, u32 sid, struct unf_xchg *xchg); -u32 unf_rscn_handler(struct unf_lport *lport, u32 sid, struct unf_xchg *xchg); -u32 unf_logo_handler(struct unf_lport *lport, u32 sid, struct unf_xchg *xchg); -u32 unf_echo_handler(struct unf_lport *lport, u32 sid, struct unf_xchg *xchg); -u32 unf_pdisc_handler(struct unf_lport *lport, u32 sid, struct unf_xchg *xchg); -u32 unf_send_pdisc_rjt(struct unf_lport *lport, struct unf_rport *rport, - struct unf_xchg *xchg); -u32 unf_adisc_handler(struct unf_lport *lport, u32 sid, struct unf_xchg *xchg); -u32 unf_rrq_handler(struct unf_lport *lport, u32 sid, struct unf_xchg *xchg); -u32 unf_send_rec(struct unf_lport *lport, struct unf_rport *rport, - struct unf_xchg *io_xchg); - -u32 unf_low_level_bb_scn(struct unf_lport *lport); -typedef int (*unf_event_task)(void *arg_in, void *arg_out); - -#ifdef __cplusplus -} -#endif /* __cplusplus */ - -#endif /* __UNF_SERVICE_H__ */ diff --git a/drivers/scsi/spfc/common/unf_npiv.c b/drivers/scsi/spfc/common/unf_npiv.c deleted file mode 100644 index 0d441f1c9e060b20f5f0b72154ca60fde97b8871..0000000000000000000000000000000000000000 --- a/drivers/scsi/spfc/common/unf_npiv.c +++ /dev/null @@ -1,1005 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#include "unf_npiv.h" -#include "unf_log.h" -#include "unf_rport.h" -#include "unf_exchg.h" -#include "unf_portman.h" -#include "unf_npiv_portman.h" - -#define UNF_DELETE_VPORT_MAX_WAIT_TIME_MS 60000 - -u32 unf_init_vport_pool(struct unf_lport *lport) -{ - u32 ret = RETURN_OK; - u32 i; - u16 vport_cnt = 0; - struct unf_lport *vport = NULL; - struct unf_vport_pool *vport_pool = NULL; - u32 vport_pool_size; - ulong flags = 0; - - FC_CHECK_RETURN_VALUE(lport, RETURN_ERROR); - - UNF_TOU16_CHECK(vport_cnt, lport->low_level_func.support_max_npiv_num, - return RETURN_ERROR); - if (vport_cnt == 0) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[warn]Port(0x%x) do not support NPIV", - lport->port_id); - - return RETURN_OK; - } - - vport_pool_size = sizeof(struct unf_vport_pool) + sizeof(struct unf_lport *) * vport_cnt; - lport->vport_pool = vmalloc(vport_pool_size); - if (!lport->vport_pool) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Port(0x%x) cannot allocate vport pool", - lport->port_id); - - return RETURN_ERROR; - } - memset(lport->vport_pool, 0, vport_pool_size); - vport_pool = lport->vport_pool; - vport_pool->vport_pool_count = vport_cnt; - vport_pool->vport_pool_completion = NULL; - spin_lock_init(&vport_pool->vport_pool_lock); - INIT_LIST_HEAD(&vport_pool->list_vport_pool); - - vport_pool->vport_pool_addr = - vmalloc((size_t)(vport_cnt * sizeof(struct unf_lport))); - if (!vport_pool->vport_pool_addr) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Port(0x%x) cannot allocate vport pool address", - lport->port_id); - vfree(lport->vport_pool); - lport->vport_pool = NULL; - - return RETURN_ERROR; - } - - memset(vport_pool->vport_pool_addr, 0, - vport_cnt * sizeof(struct unf_lport)); - vport = (struct unf_lport *)vport_pool->vport_pool_addr; - - spin_lock_irqsave(&vport_pool->vport_pool_lock, flags); - for (i = 0; i < vport_cnt; i++) { - list_add_tail(&vport->entry_vport, &vport_pool->list_vport_pool); - vport++; - } - - vport_pool->slab_next_index = 0; - vport_pool->slab_total_sum = vport_cnt; - spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flags); - - return ret; -} - -void unf_free_vport_pool(struct unf_lport *lport) -{ - struct unf_vport_pool *vport_pool = NULL; - bool wait = false; - ulong flag = 0; - u32 remain = 0; - struct completion vport_pool_completion; - - init_completion(&vport_pool_completion); - FC_CHECK_RETURN_VOID(lport); - FC_CHECK_RETURN_VOID(lport->vport_pool); - vport_pool = lport->vport_pool; - - spin_lock_irqsave(&vport_pool->vport_pool_lock, flag); - - if (vport_pool->slab_total_sum != vport_pool->vport_pool_count) { - vport_pool->vport_pool_completion = &vport_pool_completion; - remain = vport_pool->slab_total_sum - vport_pool->vport_pool_count; - wait = true; - } - spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flag); - - if (wait) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[info]Port(0x%x) begin to wait for vport pool completion remain(0x%x)", - lport->port_id, remain); - - wait_for_completion(vport_pool->vport_pool_completion); - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[info]Port(0x%x) wait for vport pool completion end", - lport->port_id); - spin_lock_irqsave(&vport_pool->vport_pool_lock, flag); - vport_pool->vport_pool_completion = NULL; - spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flag); - } - - if (lport->vport_pool->vport_pool_addr) { - vfree(lport->vport_pool->vport_pool_addr); - lport->vport_pool->vport_pool_addr = NULL; - } - - vfree(lport->vport_pool); - lport->vport_pool = NULL; -} - -struct unf_lport *unf_get_vport_by_slab_index(struct unf_vport_pool *vport_pool, - u16 slab_index) -{ - FC_CHECK_RETURN_VALUE(vport_pool, NULL); - - return vport_pool->vport_slab[slab_index]; -} - -static inline void unf_vport_pool_slab_set(struct unf_vport_pool *vport_pool, - u16 slab_index, - struct unf_lport *vport) -{ - FC_CHECK_RETURN_VOID(vport_pool); - - vport_pool->vport_slab[slab_index] = vport; -} - -u32 unf_alloc_vp_index(struct unf_vport_pool *vport_pool, - struct unf_lport *vport, u16 vpid) -{ - u16 slab_index; - ulong flags = 0; - - FC_CHECK_RETURN_VALUE(vport_pool, RETURN_ERROR); - FC_CHECK_RETURN_VALUE(vport, RETURN_ERROR); - - spin_lock_irqsave(&vport_pool->vport_pool_lock, flags); - if (vpid == 0) { - slab_index = vport_pool->slab_next_index; - while (unf_get_vport_by_slab_index(vport_pool, slab_index)) { - slab_index = (slab_index + 1) % vport_pool->slab_total_sum; - - if (vport_pool->slab_next_index == slab_index) { - spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flags); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[warn]VPort pool has no slab "); - - return RETURN_ERROR; - } - } - } else { - slab_index = vpid - 1; - if (unf_get_vport_by_slab_index(vport_pool, slab_index)) { - spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flags); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, - UNF_WARN, - "[warn]VPort Index(0x%x) is occupy", vpid); - - return RETURN_ERROR; - } - } - - unf_vport_pool_slab_set(vport_pool, slab_index, vport); - - vport_pool->slab_next_index = (slab_index + 1) % vport_pool->slab_total_sum; - - spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flags); - - spin_lock_irqsave(&vport->lport_state_lock, flags); - vport->vp_index = slab_index + 1; - spin_unlock_irqrestore(&vport->lport_state_lock, flags); - - return RETURN_OK; -} - -void unf_free_vp_index(struct unf_vport_pool *vport_pool, - struct unf_lport *vport) -{ - ulong flags = 0; - - FC_CHECK_RETURN_VOID(vport_pool); - FC_CHECK_RETURN_VOID(vport); - - if (vport->vp_index == 0 || - vport->vp_index > vport_pool->slab_total_sum) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_MAJOR, - "Input vpoot index(0x%x) is beyond the normal range, min(0x1), max(0x%x).", - vport->vp_index, vport_pool->slab_total_sum); - return; - } - - spin_lock_irqsave(&vport_pool->vport_pool_lock, flags); - unf_vport_pool_slab_set(vport_pool, vport->vp_index - 1, - NULL); /* SlabIndex=VpIndex-1 */ - spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flags); - - spin_lock_irqsave(&vport->lport_state_lock, flags); - vport->vp_index = INVALID_VALUE16; - spin_unlock_irqrestore(&vport->lport_state_lock, flags); -} - -struct unf_lport *unf_get_free_vport(struct unf_lport *lport) -{ - struct unf_lport *vport = NULL; - struct list_head *list_head = NULL; - struct unf_vport_pool *vport_pool = NULL; - ulong flag = 0; - - FC_CHECK_RETURN_VALUE(lport, NULL); - FC_CHECK_RETURN_VALUE(lport->vport_pool, NULL); - - vport_pool = lport->vport_pool; - - spin_lock_irqsave(&vport_pool->vport_pool_lock, flag); - if (!list_empty(&vport_pool->list_vport_pool)) { - list_head = UNF_OS_LIST_NEXT(&vport_pool->list_vport_pool); - list_del(list_head); - vport_pool->vport_pool_count--; - list_add_tail(list_head, &lport->list_vports_head); - vport = list_entry(list_head, struct unf_lport, entry_vport); - } else { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]LPort(0x%x)'s vport pool is empty", lport->port_id); - spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flag); - - return NULL; - } - spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flag); - - return vport; -} - -void unf_vport_back_to_pool(void *vport) -{ - struct unf_lport *unf_lport = NULL; - struct unf_lport *unf_vport = NULL; - struct list_head *list = NULL; - ulong flag = 0; - - FC_CHECK_RETURN_VOID(vport); - unf_vport = vport; - unf_lport = (struct unf_lport *)(unf_vport->root_lport); - FC_CHECK_RETURN_VOID(unf_lport); - FC_CHECK_RETURN_VOID(unf_lport->vport_pool); - - unf_free_vp_index(unf_lport->vport_pool, unf_vport); - - spin_lock_irqsave(&unf_lport->vport_pool->vport_pool_lock, flag); - - list = &unf_vport->entry_vport; - list_del(list); - list_add_tail(list, &unf_lport->vport_pool->list_vport_pool); - unf_lport->vport_pool->vport_pool_count++; - - spin_unlock_irqrestore(&unf_lport->vport_pool->vport_pool_lock, flag); -} - -void unf_init_vport_from_lport(struct unf_lport *vport, struct unf_lport *lport) -{ - FC_CHECK_RETURN_VOID(vport); - FC_CHECK_RETURN_VOID(lport); - - vport->port_type = lport->port_type; - vport->fc_port = lport->fc_port; - vport->act_topo = lport->act_topo; - vport->root_lport = lport; - vport->unf_qualify_rport = lport->unf_qualify_rport; - vport->link_event_wq = lport->link_event_wq; - vport->xchg_wq = lport->xchg_wq; - - memcpy(&vport->xchg_mgr_temp, &lport->xchg_mgr_temp, - sizeof(struct unf_cm_xchg_mgr_template)); - - memcpy(&vport->event_mgr, &lport->event_mgr, sizeof(struct unf_event_mgr)); - - memset(&vport->lport_mgr_temp, 0, sizeof(struct unf_cm_lport_template)); - - memcpy(&vport->low_level_func, &lport->low_level_func, - sizeof(struct unf_low_level_functioon_op)); -} - -void unf_check_vport_pool_status(struct unf_lport *lport) -{ - struct unf_vport_pool *vport_pool = NULL; - ulong flags = 0; - - FC_CHECK_RETURN_VOID(lport); - vport_pool = lport->vport_pool; - FC_CHECK_RETURN_VOID(vport_pool); - - spin_lock_irqsave(&vport_pool->vport_pool_lock, flags); - - if (vport_pool->vport_pool_completion && - vport_pool->slab_total_sum == vport_pool->vport_pool_count) { - complete(vport_pool->vport_pool_completion); - } - spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flags); -} - -void unf_vport_fabric_logo(struct unf_lport *vport) -{ - struct unf_rport *unf_rport = NULL; - ulong flag = 0; - - unf_rport = unf_get_rport_by_nport_id(vport, UNF_FC_FID_FLOGI); - FC_CHECK_RETURN_VOID(unf_rport); - spin_lock_irqsave(&unf_rport->rport_state_lock, flag); - unf_rport_state_ma(unf_rport, UNF_EVENT_RPORT_LOGO); - spin_unlock_irqrestore(&unf_rport->rport_state_lock, flag); - - unf_rport_enter_logo(vport, unf_rport); -} - -void unf_vport_deinit(void *vport) -{ - struct unf_lport *unf_vport = NULL; - - FC_CHECK_RETURN_VOID(vport); - unf_vport = (struct unf_lport *)vport; - - unf_unregister_scsi_host(unf_vport); - - unf_disc_mgr_destroy(unf_vport); - - unf_release_xchg_mgr_temp(unf_vport); - - unf_release_vport_mgr_temp(unf_vport); - - unf_destroy_scsi_id_table(unf_vport); - - unf_lport_release_lw_funop(unf_vport); - unf_vport->fc_port = NULL; - unf_vport->vport = NULL; - - if (unf_vport->lport_free_completion) { - complete(unf_vport->lport_free_completion); - } else { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]VPort(0x%x) point(0x%p) completion free function is NULL", - unf_vport->port_id, unf_vport); - dump_stack(); - } -} - -void unf_vport_ref_dec(struct unf_lport *vport) -{ - FC_CHECK_RETURN_VOID(vport); - - if (atomic_dec_and_test(&vport->port_ref_cnt)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]VPort(0x%x) point(0x%p) reference count is 0 and freevport", - vport->port_id, vport); - - unf_vport_deinit(vport); - } -} - -u32 unf_vport_init(void *vport) -{ - struct unf_lport *unf_vport = NULL; - - FC_CHECK_RETURN_VALUE(vport, RETURN_ERROR); - unf_vport = (struct unf_lport *)vport; - - unf_vport->options = UNF_PORT_MODE_INI; - unf_vport->nport_id = 0; - - if (unf_init_scsi_id_table(unf_vport) != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Vport(0x%x) can not initialize SCSI ID table", - unf_vport->port_id); - - return RETURN_ERROR; - } - - if (unf_init_disc_mgr(unf_vport) != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Vport(0x%x) can not initialize discover manager", - unf_vport->port_id); - unf_destroy_scsi_id_table(unf_vport); - - return RETURN_ERROR; - } - - if (unf_register_scsi_host(unf_vport) != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Vport(0x%x) vport can not register SCSI host", - unf_vport->port_id); - unf_disc_mgr_destroy(unf_vport); - unf_destroy_scsi_id_table(unf_vport); - - return RETURN_ERROR; - } - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_KEVENT, - "[event]Vport(0x%x) Create succeed with wwpn(0x%llx)", - unf_vport->port_id, unf_vport->port_name); - - return RETURN_OK; -} - -void unf_vport_remove(void *vport) -{ - struct unf_lport *unf_vport = NULL; - struct unf_lport *unf_lport = NULL; - struct completion port_free_completion; - - init_completion(&port_free_completion); - FC_CHECK_RETURN_VOID(vport); - unf_vport = (struct unf_lport *)vport; - unf_lport = (struct unf_lport *)(unf_vport->root_lport); - unf_vport->lport_free_completion = &port_free_completion; - - unf_set_lport_removing(unf_vport); - - unf_vport_ref_dec(unf_vport); - - wait_for_completion(unf_vport->lport_free_completion); - unf_vport_back_to_pool(unf_vport); - - unf_check_vport_pool_status(unf_lport); -} - -u32 unf_npiv_conf(u32 port_id, u64 wwpn, enum unf_rport_qos_level qos_level) -{ -#define VPORT_WWN_MASK 0xff00ffffffffffff -#define VPORT_WWN_SHIFT 48 - - struct fc_vport_identifiers vid = {0}; - struct Scsi_Host *host = NULL; - struct unf_lport *unf_lport = NULL; - struct unf_lport *unf_vport = NULL; - u16 vport_id = 0; - - unf_lport = unf_find_lport_by_port_id(port_id); - if (!unf_lport) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Cannot find LPort by (0x%x).", port_id); - - return RETURN_ERROR; - } - - unf_vport = unf_cm_lookup_vport_by_wwpn(unf_lport, wwpn); - if (unf_vport) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[err]Port(0x%x) has find vport with wwpn(0x%llx), can't create again", - unf_lport->port_id, wwpn); - - return RETURN_ERROR; - } - - unf_vport = unf_get_free_vport(unf_lport); - if (!unf_vport) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[warn]Can not get free vport from pool"); - - return RETURN_ERROR; - } - - unf_init_port_parms(unf_vport); - unf_init_vport_from_lport(unf_vport, unf_lport); - - if ((unf_lport->port_name & VPORT_WWN_MASK) == (wwpn & VPORT_WWN_MASK)) { - vport_id = (wwpn & ~VPORT_WWN_MASK) >> VPORT_WWN_SHIFT; - if (vport_id == 0) - vport_id = (unf_lport->port_name & ~VPORT_WWN_MASK) >> VPORT_WWN_SHIFT; - } - - if (unf_alloc_vp_index(unf_lport->vport_pool, unf_vport, vport_id) != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[warn]Vport can not allocate vport index"); - unf_vport_back_to_pool(unf_vport); - - return RETURN_ERROR; - } - unf_vport->port_id = (((u32)unf_vport->vp_index) << PORTID_VPINDEX_SHIT) | - unf_lport->port_id; - - vid.roles = FC_PORT_ROLE_FCP_INITIATOR; - vid.vport_type = FC_PORTTYPE_NPIV; - vid.disable = false; - vid.node_name = unf_lport->node_name; - - if (wwpn) { - vid.port_name = wwpn; - } else { - if ((unf_lport->port_name & ~VPORT_WWN_MASK) >> VPORT_WWN_SHIFT != - unf_vport->vp_index) { - vid.port_name = (unf_lport->port_name & VPORT_WWN_MASK) | - (((u64)unf_vport->vp_index) << VPORT_WWN_SHIFT); - } else { - vid.port_name = (unf_lport->port_name & VPORT_WWN_MASK); - } - } - - unf_vport->port_name = vid.port_name; - - host = unf_lport->host_info.host; - - if (!fc_vport_create(host, 0, &vid)) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Port(0x%x) Cannot Failed to create vport wwpn=%llx", - unf_lport->port_id, vid.port_name); - - unf_vport_back_to_pool(unf_vport); - - return RETURN_ERROR; - } - - unf_vport->qos_level = qos_level; - return RETURN_OK; -} - -struct unf_lport *unf_creat_vport(struct unf_lport *lport, - struct vport_config *vport_config) -{ - u32 ret = RETURN_OK; - struct unf_lport *unf_lport = NULL; - struct unf_lport *vport = NULL; - enum unf_act_topo lport_topo; - enum unf_lport_login_state lport_state; - ulong flag = 0; - - FC_CHECK_RETURN_VALUE(lport, NULL); - FC_CHECK_RETURN_VALUE(vport_config, NULL); - - if (vport_config->port_mode != FC_PORT_ROLE_FCP_INITIATOR) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Only support INITIATOR port mode(0x%x)", - vport_config->port_mode); - - return NULL; - } - unf_lport = lport; - - if (unf_lport->root_lport != unf_lport) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Port(0x%x) not root port return", - unf_lport->port_id); - - return NULL; - } - - vport = unf_cm_lookup_vport_by_wwpn(unf_lport, vport_config->port_name); - if (!vport) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[err]Port(0x%x) can not find vport with wwpn(0x%llx)", - unf_lport->port_id, vport_config->port_name); - - return NULL; - } - - ret = unf_vport_init(vport); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[warn]VPort(0x%x) can not initialize vport", - vport->port_id); - - return NULL; - } - - spin_lock_irqsave(&unf_lport->lport_state_lock, flag); - lport_topo = unf_lport->act_topo; - lport_state = unf_lport->states; - - vport_config->node_name = unf_lport->node_name; - spin_unlock_irqrestore(&unf_lport->lport_state_lock, flag); - - vport->port_name = vport_config->port_name; - vport->node_name = vport_config->node_name; - - if (lport_topo == UNF_ACT_TOP_P2P_FABRIC && - lport_state >= UNF_LPORT_ST_PLOGI_WAIT && - lport_state <= UNF_LPORT_ST_READY) { - vport->link_up = unf_lport->link_up; - (void)unf_lport_login(vport, lport_topo); - } - - return vport; -} - -u32 unf_drop_vport(struct unf_lport *vport) -{ - u32 ret = RETURN_ERROR; - struct fc_vport *unf_vport = NULL; - - FC_CHECK_RETURN_VALUE(vport, RETURN_ERROR); - - unf_vport = vport->vport; - if (!unf_vport) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]VPort(0x%x) find vport in scsi is NULL", - vport->port_id); - - return ret; - } - - ret = fc_vport_terminate(unf_vport); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]VPort(0x%x) terminate vport(%p) in scsi failed", - vport->port_id, unf_vport); - - return ret; - } - return ret; -} - -u32 unf_delete_vport(u32 port_id, u32 vp_index) -{ - struct unf_lport *unf_lport = NULL; - u16 unf_vp_index = 0; - struct unf_lport *vport = NULL; - - unf_lport = unf_find_lport_by_port_id(port_id); - if (!unf_lport) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Port(0x%x) can not be found by portid", port_id); - - return RETURN_ERROR; - } - - if (atomic_read(&unf_lport->lport_no_operate_flag) == UNF_LPORT_NOP) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[info]Port(0x%x) is in NOP, destroy all vports function will be called", - unf_lport->port_id); - - return RETURN_OK; - } - - UNF_TOU16_CHECK(unf_vp_index, vp_index, return RETURN_ERROR); - vport = unf_cm_lookup_vport_by_vp_index(unf_lport, unf_vp_index); - if (!vport) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[warn]Can not lookup VPort by VPort index(0x%x)", - unf_vp_index); - - return RETURN_ERROR; - } - - return unf_drop_vport(vport); -} - -void unf_vport_abort_all_sfs_exch(struct unf_lport *vport) -{ - struct unf_xchg_hot_pool *hot_pool = NULL; - struct list_head *xchg_node = NULL; - struct list_head *next_xchg_node = NULL; - struct unf_xchg *exch = NULL; - ulong pool_lock_flags = 0; - ulong exch_lock_flags = 0; - u32 i; - - FC_CHECK_RETURN_VOID(vport); - for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) { - hot_pool = unf_get_hot_pool_by_lport((struct unf_lport *)(vport->root_lport), i); - if (unlikely(!hot_pool)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%x) hot pool is NULL", - ((struct unf_lport *)(vport->root_lport))->port_id); - continue; - } - - spin_lock_irqsave(&hot_pool->xchg_hotpool_lock, pool_lock_flags); - list_for_each_safe(xchg_node, next_xchg_node, &hot_pool->sfs_busylist) { - exch = list_entry(xchg_node, struct unf_xchg, list_xchg_entry); - spin_lock_irqsave(&exch->xchg_state_lock, exch_lock_flags); - if (vport == exch->lport && (atomic_read(&exch->ref_cnt) > 0)) { - exch->io_state |= TGT_IO_STATE_ABORT; - spin_unlock_irqrestore(&exch->xchg_state_lock, exch_lock_flags); - unf_disc_ctrl_size_inc(vport, exch->cmnd_code); - /* Transfer exch to destroy chain */ - list_del(xchg_node); - list_add_tail(xchg_node, &hot_pool->list_destroy_xchg); - } else { - spin_unlock_irqrestore(&exch->xchg_state_lock, exch_lock_flags); - } - } - spin_unlock_irqrestore(&hot_pool->xchg_hotpool_lock, pool_lock_flags); - } -} - -void unf_vport_abort_ini_io_exch(struct unf_lport *vport) -{ - struct unf_xchg_hot_pool *hot_pool = NULL; - struct list_head *xchg_node = NULL; - struct list_head *next_xchg_node = NULL; - struct unf_xchg *exch = NULL; - ulong pool_lock_flags = 0; - ulong exch_lock_flags = 0; - u32 i; - - FC_CHECK_RETURN_VOID(vport); - for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) { - hot_pool = unf_get_hot_pool_by_lport((struct unf_lport *)(vport->root_lport), i); - if (unlikely(!hot_pool)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%x) MgrIdex %u hot pool is NULL", - ((struct unf_lport *)(vport->root_lport))->port_id, i); - continue; - } - - spin_lock_irqsave(&hot_pool->xchg_hotpool_lock, pool_lock_flags); - list_for_each_safe(xchg_node, next_xchg_node, &hot_pool->ini_busylist) { - exch = list_entry(xchg_node, struct unf_xchg, list_xchg_entry); - - if (vport == exch->lport && atomic_read(&exch->ref_cnt) > 0) { - /* Transfer exch to destroy chain */ - list_del(xchg_node); - list_add_tail(xchg_node, &hot_pool->list_destroy_xchg); - - spin_lock_irqsave(&exch->xchg_state_lock, exch_lock_flags); - exch->io_state |= INI_IO_STATE_DRABORT; - spin_unlock_irqrestore(&exch->xchg_state_lock, exch_lock_flags); - } - } - - spin_unlock_irqrestore(&hot_pool->xchg_hotpool_lock, pool_lock_flags); - } -} - -void unf_vport_abort_exch(struct unf_lport *vport) -{ - FC_CHECK_RETURN_VOID(vport); - - unf_vport_abort_all_sfs_exch(vport); - - unf_vport_abort_ini_io_exch(vport); -} - -u32 unf_vport_wait_all_exch_removed(struct unf_lport *vport) -{ -#define UNF_WAIT_EXCH_REMOVE_ONE_TIME_MS 1000 - struct unf_xchg_hot_pool *hot_pool = NULL; - struct list_head *xchg_node = NULL; - struct list_head *next_xchg_node = NULL; - struct unf_xchg *exch = NULL; - u32 vport_uses = 0; - ulong flags = 0; - u32 wait_timeout = 0; - u32 i = 0; - - FC_CHECK_RETURN_VALUE(vport, RETURN_ERROR); - - while (1) { - vport_uses = 0; - - for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) { - hot_pool = - unf_get_hot_pool_by_lport((struct unf_lport *)(vport->root_lport), i); - if (unlikely(!hot_pool)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%x) hot Pool is NULL", - ((struct unf_lport *)(vport->root_lport))->port_id); - - continue; - } - - spin_lock_irqsave(&hot_pool->xchg_hotpool_lock, flags); - list_for_each_safe(xchg_node, next_xchg_node, - &hot_pool->list_destroy_xchg) { - exch = list_entry(xchg_node, struct unf_xchg, list_xchg_entry); - - if (exch->lport != vport) - continue; - vport_uses++; - if (wait_timeout >= - UNF_DELETE_VPORT_MAX_WAIT_TIME_MS) { - FC_DRV_PRINT(UNF_LOG_NORMAL, UNF_ERR, - "[error]VPort(0x%x) Abort Exch(0x%p) Type(0x%x) OxRxid(0x%x 0x%x),sid did(0x%x 0x%x) SeqId(0x%x) IOState(0x%x) Ref(0x%x)", - vport->port_id, exch, - (u32)exch->xchg_type, - (u32)exch->oxid, - (u32)exch->rxid, (u32)exch->sid, - (u32)exch->did, (u32)exch->seq_id, - (u32)exch->io_state, - atomic_read(&exch->ref_cnt)); - } - } - spin_unlock_irqrestore(&hot_pool->xchg_hotpool_lock, flags); - } - - if (vport_uses == 0) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_MAJOR, - "[info]VPort(0x%x) has removed all exchanges it used", - vport->port_id); - break; - } - - if (wait_timeout >= UNF_DELETE_VPORT_MAX_WAIT_TIME_MS) - return RETURN_ERROR; - - msleep(UNF_WAIT_EXCH_REMOVE_ONE_TIME_MS); - wait_timeout += UNF_WAIT_EXCH_REMOVE_ONE_TIME_MS; - } - - return RETURN_OK; -} - -u32 unf_vport_wait_rports_removed(struct unf_lport *vport) -{ -#define UNF_WAIT_RPORT_REMOVE_ONE_TIME_MS 5000 - - struct unf_disc *disc = NULL; - struct list_head *node = NULL; - struct list_head *next_node = NULL; - u32 vport_uses = 0; - ulong flags = 0; - u32 wait_timeout = 0; - struct unf_rport *unf_rport = NULL; - - FC_CHECK_RETURN_VALUE(vport, RETURN_ERROR); - disc = &vport->disc; - - while (1) { - vport_uses = 0; - spin_lock_irqsave(&disc->rport_busy_pool_lock, flags); - list_for_each_safe(node, next_node, &disc->list_delete_rports) { - unf_rport = list_entry(node, struct unf_rport, entry_rport); - FC_DRV_PRINT(UNF_LOG_NORMAL, UNF_MAJOR, - "[info]Vport(0x%x) Rport(0x%x) point(%p) is in Delete", - vport->port_id, unf_rport->nport_id, unf_rport); - vport_uses++; - } - - list_for_each_safe(node, next_node, &disc->list_destroy_rports) { - unf_rport = list_entry(node, struct unf_rport, entry_rport); - FC_DRV_PRINT(UNF_LOG_NORMAL, UNF_MAJOR, - "[info]Vport(0x%x) Rport(0x%x) point(%p) is in Destroy", - vport->port_id, unf_rport->nport_id, unf_rport); - vport_uses++; - } - spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flags); - - if (vport_uses == 0) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_MAJOR, - "[info]VPort(0x%x) has removed all RPorts it used", - vport->port_id); - break; - } - - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Vport(0x%x) has %u RPorts not removed wait timeout(%u ms)", - vport->port_id, vport_uses, wait_timeout); - - if (wait_timeout >= UNF_DELETE_VPORT_MAX_WAIT_TIME_MS) - return RETURN_ERROR; - - msleep(UNF_WAIT_RPORT_REMOVE_ONE_TIME_MS); - wait_timeout += UNF_WAIT_RPORT_REMOVE_ONE_TIME_MS; - } - - return RETURN_OK; -} - -u32 unf_destroy_one_vport(struct unf_lport *vport) -{ - u32 ret; - struct unf_lport *root_port = NULL; - - FC_CHECK_RETURN_VALUE(vport, RETURN_ERROR); - - root_port = (struct unf_lport *)vport->root_lport; - - unf_vport_fabric_logo(vport); - - /* 1 set NOP */ - atomic_set(&vport->lport_no_operate_flag, UNF_LPORT_NOP); - vport->port_removing = true; - - /* 2 report linkdown to scsi and delele rpot */ - unf_linkdown_one_vport(vport); - - /* 3 set abort for exchange */ - unf_vport_abort_exch(vport); - - /* 4 wait exch return freepool */ - if (!root_port->port_dirt_exchange) { - ret = unf_vport_wait_all_exch_removed(vport); - if (ret != RETURN_OK) { - if (!root_port->port_removing) { - vport->port_removing = false; - FC_DRV_PRINT(UNF_LOG_NORMAL, UNF_ERR, - "[err]VPort(0x%x) can not wait Exchange return freepool", - vport->port_id); - - return RETURN_ERROR; - } - - FC_DRV_PRINT(UNF_LOG_NORMAL, UNF_WARN, - "[warn]Port(0x%x) is removing, there is dirty exchange, continue", - root_port->port_id); - - root_port->port_dirt_exchange = true; - } - } - - /* wait rport return rportpool */ - ret = unf_vport_wait_rports_removed(vport); - if (ret != RETURN_OK) { - vport->port_removing = false; - FC_DRV_PRINT(UNF_LOG_NORMAL, UNF_ERR, - "[err]VPort(0x%x) can not wait Rport return freepool", - vport->port_id); - - return RETURN_ERROR; - } - - unf_cm_vport_remove(vport); - - return RETURN_OK; -} - -void unf_destroy_all_vports(struct unf_lport *lport) -{ - struct unf_vport_pool *vport_pool = NULL; - struct unf_lport *unf_lport = NULL; - struct unf_lport *vport = NULL; - struct list_head *node = NULL; - struct list_head *next_node = NULL; - ulong flags = 0; - - unf_lport = lport; - FC_CHECK_RETURN_VOID(unf_lport); - - vport_pool = unf_lport->vport_pool; - if (unlikely(!vport_pool)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "[err]Lport(0x%x) VPort pool is NULL", unf_lport->port_id); - - return; - } - - /* Transfer to the transition chain */ - spin_lock_irqsave(&vport_pool->vport_pool_lock, flags); - list_for_each_safe(node, next_node, &unf_lport->list_vports_head) { - vport = list_entry(node, struct unf_lport, entry_vport); - list_del_init(&vport->entry_vport); - list_add_tail(&vport->entry_vport, &unf_lport->list_destroy_vports); - } - - list_for_each_safe(node, next_node, &unf_lport->list_intergrad_vports) { - vport = list_entry(node, struct unf_lport, entry_vport); - list_del_init(&vport->entry_vport); - list_add_tail(&vport->entry_vport, &unf_lport->list_destroy_vports); - atomic_dec(&vport->port_ref_cnt); - } - spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flags); - - spin_lock_irqsave(&vport_pool->vport_pool_lock, flags); - while (!list_empty(&unf_lport->list_destroy_vports)) { - node = UNF_OS_LIST_NEXT(&unf_lport->list_destroy_vports); - vport = list_entry(node, struct unf_lport, entry_vport); - - list_del_init(&vport->entry_vport); - list_add_tail(&vport->entry_vport, &unf_lport->list_vports_head); - spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flags); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[info]VPort(0x%x) Destroy begin", vport->port_id); - unf_drop_vport(vport); - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_KEVENT, - "[info]VPort(0x%x) Destroy end", vport->port_id); - - spin_lock_irqsave(&vport_pool->vport_pool_lock, flags); - } - spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flags); -} - -u32 unf_init_vport_mgr_temp(struct unf_lport *lport) -{ - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - lport->lport_mgr_temp.unf_look_up_vport_by_index = unf_lookup_vport_by_index; - lport->lport_mgr_temp.unf_look_up_vport_by_port_id = unf_lookup_vport_by_portid; - lport->lport_mgr_temp.unf_look_up_vport_by_did = unf_lookup_vport_by_did; - lport->lport_mgr_temp.unf_look_up_vport_by_wwpn = unf_lookup_vport_by_wwpn; - lport->lport_mgr_temp.unf_vport_remove = unf_vport_remove; - - return RETURN_OK; -} - -void unf_release_vport_mgr_temp(struct unf_lport *lport) -{ - FC_CHECK_RETURN_VOID(lport); - - memset(&lport->lport_mgr_temp, 0, sizeof(struct unf_cm_lport_template)); - - lport->destroy_step = UNF_LPORT_DESTROY_STEP_9_DESTROY_LPORT_MG_TMP; -} diff --git a/drivers/scsi/spfc/common/unf_npiv.h b/drivers/scsi/spfc/common/unf_npiv.h deleted file mode 100644 index 6f522470f47aba9cf1da9b31a6148e2be0a2b33b..0000000000000000000000000000000000000000 --- a/drivers/scsi/spfc/common/unf_npiv.h +++ /dev/null @@ -1,47 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef UNF_NPIV_H -#define UNF_NPIV_H - -#include "unf_type.h" -#include "unf_common.h" -#include "unf_lport.h" - -/* product VPORT configure */ -struct vport_config { - u64 node_name; - u64 port_name; - u32 port_mode; /* INI, TGT or both */ -}; - -/* product Vport function */ -#define PORTID_VPINDEX_MASK 0xff000000 -#define PORTID_VPINDEX_SHIT 24 -u32 unf_npiv_conf(u32 port_id, u64 wwpn, enum unf_rport_qos_level qos_level); -struct unf_lport *unf_creat_vport(struct unf_lport *lport, - struct vport_config *vport_config); -u32 unf_delete_vport(u32 port_id, u32 vp_index); - -/* Vport pool creat and release function */ -u32 unf_init_vport_pool(struct unf_lport *lport); -void unf_free_vport_pool(struct unf_lport *lport); - -/* Lport resigster stLPortMgTemp function */ -void unf_vport_remove(void *vport); -void unf_vport_ref_dec(struct unf_lport *vport); - -/* linkdown all Vport after receive linkdown event */ -void unf_linkdown_all_vports(void *lport); -/* Lport receive Flogi Acc linkup all Vport */ -void unf_linkup_all_vports(struct unf_lport *lport); -/* Lport remove delete all Vport */ -void unf_destroy_all_vports(struct unf_lport *lport); -void unf_vport_fabric_logo(struct unf_lport *vport); -u32 unf_destroy_one_vport(struct unf_lport *vport); -u32 unf_drop_vport(struct unf_lport *vport); -u32 unf_init_vport_mgr_temp(struct unf_lport *lport); -void unf_release_vport_mgr_temp(struct unf_lport *lport); -struct unf_lport *unf_get_vport_by_slab_index(struct unf_vport_pool *vport_pool, - u16 slab_index); -#endif diff --git a/drivers/scsi/spfc/common/unf_npiv_portman.c b/drivers/scsi/spfc/common/unf_npiv_portman.c deleted file mode 100644 index b4f393f2e732c873ea5850fcbc7e4aad22566375..0000000000000000000000000000000000000000 --- a/drivers/scsi/spfc/common/unf_npiv_portman.c +++ /dev/null @@ -1,360 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#include "unf_npiv_portman.h" -#include "unf_log.h" -#include "unf_common.h" -#include "unf_rport.h" -#include "unf_npiv.h" -#include "unf_portman.h" - -void *unf_lookup_vport_by_index(void *lport, u16 vp_index) -{ - struct unf_lport *unf_lport = NULL; - struct unf_vport_pool *vport_pool = NULL; - struct unf_lport *unf_vport = NULL; - ulong flags = 0; - - FC_CHECK_RETURN_VALUE(lport, NULL); - - unf_lport = (struct unf_lport *)lport; - - vport_pool = unf_lport->vport_pool; - if (unlikely(!vport_pool)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%x) vport pool is NULL", unf_lport->port_id); - - return NULL; - } - - if (vp_index == 0 || vp_index > vport_pool->slab_total_sum) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "[err]Port(0x%x) input vport index(0x%x) is beyond the normal range(0x1~0x%x)", - unf_lport->port_id, vp_index, vport_pool->slab_total_sum); - - return NULL; - } - - spin_lock_irqsave(&vport_pool->vport_pool_lock, flags); - unf_vport = unf_get_vport_by_slab_index(vport_pool, vp_index - 1); - spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flags); - - return (void *)unf_vport; -} - -void *unf_lookup_vport_by_portid(void *lport, u32 port_id) -{ - struct unf_lport *unf_lport = NULL; - struct unf_vport_pool *vport_pool = NULL; - struct unf_lport *unf_vport = NULL; - struct list_head *node = NULL; - struct list_head *next_node = NULL; - ulong flag = 0; - - FC_CHECK_RETURN_VALUE(lport, NULL); - - unf_lport = (struct unf_lport *)lport; - vport_pool = unf_lport->vport_pool; - if (unlikely(!vport_pool)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%x) vport pool is NULL", unf_lport->port_id); - - return NULL; - } - - spin_lock_irqsave(&vport_pool->vport_pool_lock, flag); - list_for_each_safe(node, next_node, &unf_lport->list_vports_head) { - unf_vport = list_entry(node, struct unf_lport, entry_vport); - if (unf_vport->port_id == port_id) { - spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flag); - return unf_vport; - } - } - - list_for_each_safe(node, next_node, &unf_lport->list_intergrad_vports) { - unf_vport = list_entry(node, struct unf_lport, entry_vport); - if (unf_vport->port_id == port_id) { - spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flag); - return unf_vport; - } - } - spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flag); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) has no vport ID(0x%x).", - unf_lport->port_id, port_id); - return NULL; -} - -void *unf_lookup_vport_by_did(void *lport, u32 did) -{ - struct unf_lport *unf_lport = NULL; - struct unf_vport_pool *vport_pool = NULL; - struct unf_lport *unf_vport = NULL; - struct list_head *node = NULL; - struct list_head *next_node = NULL; - ulong flag = 0; - - FC_CHECK_RETURN_VALUE(lport, NULL); - - unf_lport = (struct unf_lport *)lport; - vport_pool = unf_lport->vport_pool; - if (unlikely(!vport_pool)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%x) vport pool is NULL", unf_lport->port_id); - - return NULL; - } - - spin_lock_irqsave(&vport_pool->vport_pool_lock, flag); - list_for_each_safe(node, next_node, &unf_lport->list_vports_head) { - unf_vport = list_entry(node, struct unf_lport, entry_vport); - if (unf_vport->nport_id == did) { - spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flag); - - return unf_vport; - } - } - - list_for_each_safe(node, next_node, &unf_lport->list_intergrad_vports) { - unf_vport = list_entry(node, struct unf_lport, entry_vport); - if (unf_vport->nport_id == did) { - spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flag); - return unf_vport; - } - } - spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flag); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) has no vport Nport ID(0x%x)", unf_lport->port_id, did); - return NULL; -} - -void *unf_lookup_vport_by_wwpn(void *lport, u64 wwpn) -{ - struct unf_lport *unf_lport = NULL; - struct unf_vport_pool *vport_pool = NULL; - struct unf_lport *unf_vport = NULL; - struct list_head *node = NULL; - struct list_head *next_node = NULL; - ulong flag = 0; - - FC_CHECK_RETURN_VALUE(lport, NULL); - - unf_lport = (struct unf_lport *)lport; - vport_pool = unf_lport->vport_pool; - if (unlikely(!vport_pool)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%x) vport pool is NULL", unf_lport->port_id); - - return NULL; - } - - spin_lock_irqsave(&vport_pool->vport_pool_lock, flag); - list_for_each_safe(node, next_node, &unf_lport->list_vports_head) { - unf_vport = list_entry(node, struct unf_lport, entry_vport); - if (unf_vport->port_name == wwpn) { - spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flag); - - return unf_vport; - } - } - - list_for_each_safe(node, next_node, &unf_lport->list_intergrad_vports) { - unf_vport = list_entry(node, struct unf_lport, entry_vport); - if (unf_vport->port_name == wwpn) { - spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flag); - return unf_vport; - } - } - spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flag); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x) has no vport WWPN(0x%llx)", - unf_lport->port_id, wwpn); - - return NULL; -} - -void unf_linkdown_one_vport(struct unf_lport *vport) -{ - ulong flag = 0; - struct unf_lport *root_lport = NULL; - - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_KEVENT, - "[info]VPort(0x%x) linkdown", vport->port_id); - - spin_lock_irqsave(&vport->lport_state_lock, flag); - vport->link_up = UNF_PORT_LINK_DOWN; - vport->nport_id = 0; /* set nportid 0 before send fdisc again */ - unf_lport_state_ma(vport, UNF_EVENT_LPORT_LINK_DOWN); - spin_unlock_irqrestore(&vport->lport_state_lock, flag); - - root_lport = (struct unf_lport *)vport->root_lport; - - unf_flush_disc_event(&root_lport->disc, vport); - - unf_clean_linkdown_rport(vport); -} - -void unf_linkdown_all_vports(void *lport) -{ - struct unf_lport *unf_lport = NULL; - struct unf_vport_pool *vport_pool = NULL; - struct unf_lport *unf_vport = NULL; - struct list_head *node = NULL; - struct list_head *next_node = NULL; - ulong flags = 0; - - FC_CHECK_RETURN_VOID(lport); - - unf_lport = (struct unf_lport *)lport; - vport_pool = unf_lport->vport_pool; - if (unlikely(!vport_pool)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "[err]Port(0x%x) VPort pool is NULL", unf_lport->port_id); - - return; - } - - /* Transfer to the transition chain */ - spin_lock_irqsave(&vport_pool->vport_pool_lock, flags); - list_for_each_safe(node, next_node, &unf_lport->list_vports_head) { - unf_vport = list_entry(node, struct unf_lport, entry_vport); - list_del_init(&unf_vport->entry_vport); - list_add_tail(&unf_vport->entry_vport, &unf_lport->list_intergrad_vports); - (void)unf_lport_ref_inc(unf_vport); - } - spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flags); - - spin_lock_irqsave(&vport_pool->vport_pool_lock, flags); - while (!list_empty(&unf_lport->list_intergrad_vports)) { - node = UNF_OS_LIST_NEXT(&unf_lport->list_intergrad_vports); - unf_vport = list_entry(node, struct unf_lport, entry_vport); - - list_del_init(&unf_vport->entry_vport); - list_add_tail(&unf_vport->entry_vport, &unf_lport->list_vports_head); - spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flags); - - unf_linkdown_one_vport(unf_vport); - - unf_vport_ref_dec(unf_vport); - - spin_lock_irqsave(&vport_pool->vport_pool_lock, flags); - } - spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flags); -} - -int unf_process_vports_linkup(void *arg_in, void *arg_out) -{ -#define UNF_WAIT_VPORT_LOGIN_ONE_TIME_MS 100 - struct unf_vport_pool *vport_pool = NULL; - struct unf_lport *unf_lport = NULL; - struct unf_lport *unf_vport = NULL; - struct list_head *node = NULL; - struct list_head *next_node = NULL; - ulong flags = 0; - int ret = RETURN_OK; - - FC_CHECK_RETURN_VALUE(arg_in, RETURN_ERROR); - - unf_lport = (struct unf_lport *)arg_in; - - if (atomic_read(&unf_lport->lport_no_operate_flag) == UNF_LPORT_NOP) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) is NOP don't continue", unf_lport->port_id); - - return RETURN_OK; - } - - if (unf_lport->link_up != UNF_PORT_LINK_UP) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) is not linkup don't continue.", - unf_lport->port_id); - - return RETURN_OK; - } - - vport_pool = unf_lport->vport_pool; - if (unlikely(!vport_pool)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "[err]Port(0x%x) VPort pool is NULL.", unf_lport->port_id); - - return RETURN_OK; - } - - /* Transfer to the transition chain */ - spin_lock_irqsave(&vport_pool->vport_pool_lock, flags); - list_for_each_safe(node, next_node, &unf_lport->list_vports_head) { - unf_vport = list_entry(node, struct unf_lport, entry_vport); - list_del_init(&unf_vport->entry_vport); - list_add_tail(&unf_vport->entry_vport, &unf_lport->list_intergrad_vports); - (void)unf_lport_ref_inc(unf_vport); - } - spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flags); - - spin_lock_irqsave(&vport_pool->vport_pool_lock, flags); - while (!list_empty(&unf_lport->list_intergrad_vports)) { - node = UNF_OS_LIST_NEXT(&unf_lport->list_intergrad_vports); - unf_vport = list_entry(node, struct unf_lport, entry_vport); - - list_del_init(&unf_vport->entry_vport); - list_add_tail(&unf_vport->entry_vport, &unf_lport->list_vports_head); - spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flags); - - if (atomic_read(&unf_vport->lport_no_operate_flag) == UNF_LPORT_NOP) { - unf_vport_ref_dec(unf_vport); - spin_lock_irqsave(&vport_pool->vport_pool_lock, flags); - continue; - } - - if (unf_lport->link_up == UNF_PORT_LINK_UP && - unf_lport->act_topo == UNF_ACT_TOP_P2P_FABRIC) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_MAJOR, - "[info]Vport(0x%x) begin login", unf_vport->port_id); - - unf_vport->link_up = UNF_PORT_LINK_UP; - (void)unf_lport_login(unf_vport, unf_lport->act_topo); - - msleep(UNF_WAIT_VPORT_LOGIN_ONE_TIME_MS); - } else { - unf_linkdown_one_vport(unf_vport); - - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Vport(0x%x) login failed because root port linkdown", - unf_vport->port_id); - } - - unf_vport_ref_dec(unf_vport); - spin_lock_irqsave(&vport_pool->vport_pool_lock, flags); - } - spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flags); - - return ret; -} - -void unf_linkup_all_vports(struct unf_lport *lport) -{ - struct unf_cm_event_report *event = NULL; - - FC_CHECK_RETURN_VOID(lport); - - if (unlikely(!lport->event_mgr.unf_get_free_event_func || - !lport->event_mgr.unf_post_event_func || - !lport->event_mgr.unf_release_event)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) Event fun is NULL", - lport->port_id); - return; - } - - event = lport->event_mgr.unf_get_free_event_func((void *)lport); - FC_CHECK_RETURN_VOID(event); - - event->lport = lport; - event->event_asy_flag = UNF_EVENT_ASYN; - event->unf_event_task = unf_process_vports_linkup; - event->para_in = (void *)lport; - - lport->event_mgr.unf_post_event_func(lport, event); -} diff --git a/drivers/scsi/spfc/common/unf_npiv_portman.h b/drivers/scsi/spfc/common/unf_npiv_portman.h deleted file mode 100644 index 284c23c9abe40523459510f645e29c62beb4afc0..0000000000000000000000000000000000000000 --- a/drivers/scsi/spfc/common/unf_npiv_portman.h +++ /dev/null @@ -1,17 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef UNF_NPIV_PORTMAN_H -#define UNF_NPIV_PORTMAN_H - -#include "unf_type.h" -#include "unf_lport.h" - -/* Lport resigster stLPortMgTemp function */ -void *unf_lookup_vport_by_index(void *lport, u16 vp_index); -void *unf_lookup_vport_by_portid(void *lport, u32 port_id); -void *unf_lookup_vport_by_did(void *lport, u32 did); -void *unf_lookup_vport_by_wwpn(void *lport, u64 wwpn); -void unf_linkdown_one_vport(struct unf_lport *vport); - -#endif diff --git a/drivers/scsi/spfc/common/unf_portman.c b/drivers/scsi/spfc/common/unf_portman.c deleted file mode 100644 index ef8f90eb377702963a41b41ef3291caf6a82838a..0000000000000000000000000000000000000000 --- a/drivers/scsi/spfc/common/unf_portman.c +++ /dev/null @@ -1,2431 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#include "unf_portman.h" -#include "unf_log.h" -#include "unf_exchg.h" -#include "unf_rport.h" -#include "unf_io.h" -#include "unf_npiv.h" -#include "unf_scsi_common.h" - -#define UNF_LPORT_CHIP_ERROR(unf_lport) \ - ((unf_lport)->pcie_error_cnt.pcie_error_count[UNF_PCIE_FATALERRORDETECTED]) - -struct unf_global_lport global_lport_mgr; - -static int unf_port_switch(struct unf_lport *lport, bool switch_flag); -static u32 unf_build_lport_wwn(struct unf_lport *lport); -static int unf_lport_destroy(void *lport, void *arg_out); -static u32 unf_port_linkup(struct unf_lport *lport, void *input); -static u32 unf_port_linkdown(struct unf_lport *lport, void *input); -static u32 unf_port_abnormal_reset(struct unf_lport *lport, void *input); -static u32 unf_port_reset_start(struct unf_lport *lport, void *input); -static u32 unf_port_reset_end(struct unf_lport *lport, void *input); -static u32 unf_port_nop(struct unf_lport *lport, void *input); -static void unf_destroy_card_thread(struct unf_lport *lport); -static u32 unf_creat_card_thread(struct unf_lport *lport); -static u32 unf_find_card_thread(struct unf_lport *lport); -static u32 unf_port_begin_remove(struct unf_lport *lport, void *input); - -static struct unf_port_action g_lport_action[] = { - {UNF_PORT_LINK_UP, unf_port_linkup}, - {UNF_PORT_LINK_DOWN, unf_port_linkdown}, - {UNF_PORT_RESET_START, unf_port_reset_start}, - {UNF_PORT_RESET_END, unf_port_reset_end}, - {UNF_PORT_NOP, unf_port_nop}, - {UNF_PORT_BEGIN_REMOVE, unf_port_begin_remove}, - {UNF_PORT_RELEASE_RPORT_INDEX, unf_port_release_rport_index}, - {UNF_PORT_ABNORMAL_RESET, unf_port_abnormal_reset}, -}; - -static void unf_destroy_dirty_rport(struct unf_lport *lport, bool show_only) -{ - u32 dirty_rport = 0; - - /* for whole L_Port */ - if (lport->dirty_flag & UNF_LPORT_DIRTY_FLAG_RPORT_POOL_DIRTY) { - dirty_rport = lport->rport_pool.rport_pool_count; - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[info]Port(0x%x) has %d dirty RPort(s)", - lport->port_id, dirty_rport); - - /* Show L_Port's R_Port(s) from busy_list & destroy_list */ - unf_show_all_rport(lport); - - /* free R_Port pool memory & bitmap */ - if (!show_only) { - vfree(lport->rport_pool.rport_pool_add); - lport->rport_pool.rport_pool_add = NULL; - vfree(lport->rport_pool.rpi_bitmap); - lport->rport_pool.rpi_bitmap = NULL; - } - } -} - -void unf_show_dirty_port(bool show_only, u32 *dirty_port_num) -{ - struct list_head *node = NULL; - struct list_head *node_next = NULL; - struct unf_lport *unf_lport = NULL; - ulong flags = 0; - u32 port_num = 0; - - FC_CHECK_RETURN_VOID(dirty_port_num); - - /* for each dirty L_Port from global L_Port list */ - spin_lock_irqsave(&global_lport_mgr.global_lport_list_lock, flags); - list_for_each_safe(node, node_next, &global_lport_mgr.dirty_list_head) { - unf_lport = list_entry(node, struct unf_lport, entry_lport); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[info]Port(0x%x) has dirty data(0x%x)", - unf_lport->port_id, unf_lport->dirty_flag); - - /* Destroy dirty L_Port's exchange(s) & R_Port(s) */ - unf_destroy_dirty_xchg(unf_lport, show_only); - unf_destroy_dirty_rport(unf_lport, show_only); - - /* Delete (dirty L_Port) list entry if necessary */ - if (!show_only) { - list_del_init(node); - vfree(unf_lport); - } - - port_num++; - } - spin_unlock_irqrestore(&global_lport_mgr.global_lport_list_lock, flags); - - *dirty_port_num = port_num; -} - -void unf_show_all_rport(struct unf_lport *lport) -{ - struct unf_lport *unf_lport = NULL; - struct unf_rport *unf_rport = NULL; - struct unf_disc *disc = NULL; - struct list_head *node = NULL; - struct list_head *next_node = NULL; - ulong flag = 0; - u32 rport_cnt = 0; - u32 target_cnt = 0; - - FC_CHECK_RETURN_VOID(lport); - - unf_lport = lport; - disc = &unf_lport->disc; - - spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); - - FC_DRV_PRINT(UNF_LOG_NORMAL, UNF_MAJOR, - "[info]Port(0x%x) disc state(0x%x)", unf_lport->port_id, disc->states); - - /* for each R_Port from busy_list */ - list_for_each_safe(node, next_node, &disc->list_busy_rports) { - unf_rport = list_entry(node, struct unf_rport, entry_rport); - rport_cnt++; - - FC_DRV_PRINT(UNF_LOG_NORMAL, UNF_MAJOR, - "[info]Port(0x%x) busy RPorts(%u_%p) WWPN(0x%016llx) scsi_id(0x%x) local N_Port_ID(0x%x) N_Port_ID(0x%06x). State(0x%04x) options(0x%04x) index(0x%04x) ref(%d) pend:%d", - unf_lport->port_id, rport_cnt, unf_rport, - unf_rport->port_name, unf_rport->scsi_id, - unf_rport->local_nport_id, unf_rport->nport_id, - unf_rport->rp_state, unf_rport->options, - unf_rport->rport_index, - atomic_read(&unf_rport->rport_ref_cnt), - atomic_read(&unf_rport->pending_io_cnt)); - - if (unf_rport->nport_id < UNF_FC_FID_DOM_MGR) - target_cnt++; - } - - unf_lport->target_cnt = target_cnt; - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[info]Port(0x%x) targetnum=(%u)", unf_lport->port_id, - unf_lport->target_cnt); - - /* for each R_Port from destroy_list */ - list_for_each_safe(node, next_node, &disc->list_destroy_rports) { - unf_rport = list_entry(node, struct unf_rport, entry_rport); - rport_cnt++; - - FC_DRV_PRINT(UNF_LOG_NORMAL, UNF_MAJOR, - "[info]Port(0x%x) destroy RPorts(%u) WWPN(0x%016llx) N_Port_ID(0x%06x) State(0x%04x) options(0x%04x) index(0x%04x) ref(%d)", - unf_lport->port_id, rport_cnt, unf_rport->port_name, - unf_rport->nport_id, unf_rport->rp_state, - unf_rport->options, unf_rport->rport_index, - atomic_read(&unf_rport->rport_ref_cnt)); - } - - spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); -} - -u32 unf_lport_ref_inc(struct unf_lport *lport) -{ - ulong lport_flags = 0; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - - spin_lock_irqsave(&lport->lport_state_lock, lport_flags); - if (atomic_read(&lport->port_ref_cnt) <= 0) { - spin_unlock_irqrestore(&lport->lport_state_lock, lport_flags); - - return UNF_RETURN_ERROR; - } - - atomic_inc(&lport->port_ref_cnt); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "[info]Port(0x%p) port_id(0x%x) reference count is %d", - lport, lport->port_id, atomic_read(&lport->port_ref_cnt)); - - spin_unlock_irqrestore(&lport->lport_state_lock, lport_flags); - - return RETURN_OK; -} - -void unf_lport_ref_dec(struct unf_lport *lport) -{ - ulong flags = 0; - ulong lport_flags = 0; - - FC_CHECK_RETURN_VOID(lport); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "LPort(0x%p), port ID(0x%x), reference count is %d.", - lport, lport->port_id, atomic_read(&lport->port_ref_cnt)); - - spin_lock_irqsave(&global_lport_mgr.global_lport_list_lock, flags); - spin_lock_irqsave(&lport->lport_state_lock, lport_flags); - if (atomic_dec_and_test(&lport->port_ref_cnt)) { - spin_unlock_irqrestore(&lport->lport_state_lock, lport_flags); - list_del(&lport->entry_lport); - global_lport_mgr.lport_sum--; - - /* attaches the lport to the destroy linked list for dfx */ - list_add_tail(&lport->entry_lport, &global_lport_mgr.destroy_list_head); - spin_unlock_irqrestore(&global_lport_mgr.global_lport_list_lock, flags); - - (void)unf_lport_destroy(lport, NULL); - } else { - spin_unlock_irqrestore(&lport->lport_state_lock, lport_flags); - spin_unlock_irqrestore(&global_lport_mgr.global_lport_list_lock, flags); - } -} - -void unf_lport_update_topo(struct unf_lport *lport, - enum unf_act_topo active_topo) -{ - ulong flag = 0; - - FC_CHECK_RETURN_VOID(lport); - - if (active_topo > UNF_ACT_TOP_UNKNOWN || active_topo < UNF_ACT_TOP_PUBLIC_LOOP) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) set invalid topology(0x%x) with current value(0x%x)", - lport->nport_id, active_topo, lport->act_topo); - - return; - } - - spin_lock_irqsave(&lport->lport_state_lock, flag); - lport->act_topo = active_topo; - spin_unlock_irqrestore(&lport->lport_state_lock, flag); -} - -void unf_set_lport_removing(struct unf_lport *lport) -{ - FC_CHECK_RETURN_VOID(lport); - - lport->fc_port = NULL; - lport->port_removing = true; - lport->destroy_step = UNF_LPORT_DESTROY_STEP_0_SET_REMOVING; -} - -u32 unf_release_local_port(void *lport) -{ - struct unf_lport *unf_lport = lport; - struct completion lport_free_completion; - - init_completion(&lport_free_completion); - FC_CHECK_RETURN_VALUE(unf_lport, UNF_RETURN_ERROR); - - unf_lport->lport_free_completion = &lport_free_completion; - unf_set_lport_removing(unf_lport); - unf_lport_ref_dec(unf_lport); - wait_for_completion(unf_lport->lport_free_completion); - /* for dirty case */ - if (unf_lport->dirty_flag == 0) - vfree(unf_lport); - - return RETURN_OK; -} - -static void unf_free_all_esgl_pages(struct unf_lport *lport) -{ - struct list_head *node = NULL; - struct list_head *next_node = NULL; - ulong flag = 0; - u32 i; - - FC_CHECK_RETURN_VOID(lport); - spin_lock_irqsave(&lport->esgl_pool.esgl_pool_lock, flag); - list_for_each_safe(node, next_node, &lport->esgl_pool.list_esgl_pool) { - list_del(node); - } - - spin_unlock_irqrestore(&lport->esgl_pool.esgl_pool_lock, flag); - if (lport->esgl_pool.esgl_buff_list.buflist) { - for (i = 0; i < lport->esgl_pool.esgl_buff_list.buf_num; i++) { - if (lport->esgl_pool.esgl_buff_list.buflist[i].vaddr) { - dma_free_coherent(&lport->low_level_func.dev->dev, - lport->esgl_pool.esgl_buff_list.buf_size, - lport->esgl_pool.esgl_buff_list.buflist[i].vaddr, - lport->esgl_pool.esgl_buff_list.buflist[i].paddr); - lport->esgl_pool.esgl_buff_list.buflist[i].vaddr = NULL; - } - } - kfree(lport->esgl_pool.esgl_buff_list.buflist); - lport->esgl_pool.esgl_buff_list.buflist = NULL; - } -} - -static u32 unf_init_esgl_pool(struct unf_lport *lport) -{ - struct unf_esgl *esgl = NULL; - u32 ret = RETURN_OK; - u32 index = 0; - u32 buf_total_size; - u32 buf_num; - u32 alloc_idx; - u32 curbuf_idx = 0; - u32 curbuf_offset = 0; - u32 buf_cnt_perhugebuf; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - - lport->esgl_pool.esgl_pool_count = lport->low_level_func.lport_cfg_items.max_io; - spin_lock_init(&lport->esgl_pool.esgl_pool_lock); - INIT_LIST_HEAD(&lport->esgl_pool.list_esgl_pool); - - lport->esgl_pool.esgl_pool_addr = - vmalloc((size_t)((lport->esgl_pool.esgl_pool_count) * sizeof(struct unf_esgl))); - if (!lport->esgl_pool.esgl_pool_addr) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "LPort(0x%x) cannot allocate ESGL Pool.", lport->port_id); - - return UNF_RETURN_ERROR; - } - esgl = (struct unf_esgl *)lport->esgl_pool.esgl_pool_addr; - memset(esgl, 0, ((lport->esgl_pool.esgl_pool_count) * sizeof(struct unf_esgl))); - - buf_total_size = (u32)(PAGE_SIZE * lport->esgl_pool.esgl_pool_count); - - lport->esgl_pool.esgl_buff_list.buf_size = - buf_total_size > BUF_LIST_PAGE_SIZE ? BUF_LIST_PAGE_SIZE : buf_total_size; - buf_cnt_perhugebuf = lport->esgl_pool.esgl_buff_list.buf_size / PAGE_SIZE; - buf_num = lport->esgl_pool.esgl_pool_count % buf_cnt_perhugebuf - ? lport->esgl_pool.esgl_pool_count / buf_cnt_perhugebuf + 1 - : lport->esgl_pool.esgl_pool_count / buf_cnt_perhugebuf; - lport->esgl_pool.esgl_buff_list.buflist = - (struct buff_list *)kmalloc(buf_num * sizeof(struct buff_list), GFP_KERNEL); - lport->esgl_pool.esgl_buff_list.buf_num = buf_num; - - if (!lport->esgl_pool.esgl_buff_list.buflist) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[err]Allocate Esgl pool buf list failed out of memory"); - goto free_buff; - } - memset(lport->esgl_pool.esgl_buff_list.buflist, 0, buf_num * sizeof(struct buff_list)); - - for (alloc_idx = 0; alloc_idx < buf_num; alloc_idx++) { - lport->esgl_pool.esgl_buff_list.buflist[alloc_idx] - .vaddr = dma_alloc_coherent(&lport->low_level_func.dev->dev, - lport->esgl_pool.esgl_buff_list.buf_size, - &lport->esgl_pool.esgl_buff_list.buflist[alloc_idx].paddr, GFP_KERNEL); - if (!lport->esgl_pool.esgl_buff_list.buflist[alloc_idx].vaddr) - goto free_buff; - memset(lport->esgl_pool.esgl_buff_list.buflist[alloc_idx].vaddr, 0, - lport->esgl_pool.esgl_buff_list.buf_size); - } - - /* allocates the Esgl page, and the DMA uses the */ - for (index = 0; index < lport->esgl_pool.esgl_pool_count; index++) { - if (index != 0 && !(index % buf_cnt_perhugebuf)) - curbuf_idx++; - curbuf_offset = (u32)(PAGE_SIZE * (index % buf_cnt_perhugebuf)); - esgl->page.page_address = - (u64)lport->esgl_pool.esgl_buff_list.buflist[curbuf_idx].vaddr + curbuf_offset; - esgl->page.page_size = PAGE_SIZE; - esgl->page.esgl_phy_addr = - lport->esgl_pool.esgl_buff_list.buflist[curbuf_idx].paddr + curbuf_offset; - list_add_tail(&esgl->entry_esgl, &lport->esgl_pool.list_esgl_pool); - esgl++; - } - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_INFO, - "[EVENT]Allocate bufnum:%u,buf_total_size:%u", buf_num, buf_total_size); - - return ret; -free_buff: - unf_free_all_esgl_pages(lport); - vfree(lport->esgl_pool.esgl_pool_addr); - - return UNF_RETURN_ERROR; -} - -static void unf_free_esgl_pool(struct unf_lport *lport) -{ - FC_CHECK_RETURN_VOID(lport); - - unf_free_all_esgl_pages(lport); - lport->esgl_pool.esgl_pool_count = 0; - - if (lport->esgl_pool.esgl_pool_addr) { - vfree(lport->esgl_pool.esgl_pool_addr); - lport->esgl_pool.esgl_pool_addr = NULL; - } - - lport->destroy_step = UNF_LPORT_DESTROY_STEP_5_DESTROY_ESGL_POOL; -} - -struct unf_lport *unf_find_lport_by_port_id(u32 port_id) -{ - struct unf_lport *unf_lport = NULL; - struct list_head *node = NULL; - struct list_head *next_node = NULL; - ulong flags = 0; - u32 portid = port_id & (~PORTID_VPINDEX_MASK); - u16 vport_index; - spinlock_t *lport_list_lock = NULL; - - lport_list_lock = &global_lport_mgr.global_lport_list_lock; - vport_index = (port_id & PORTID_VPINDEX_MASK) >> PORTID_VPINDEX_SHIT; - spin_lock_irqsave(lport_list_lock, flags); - - list_for_each_safe(node, next_node, &global_lport_mgr.lport_list_head) { - unf_lport = list_entry(node, struct unf_lport, entry_lport); - if (unf_lport->port_id == portid && !unf_lport->port_removing) { - spin_unlock_irqrestore(lport_list_lock, flags); - - return unf_cm_lookup_vport_by_vp_index(unf_lport, vport_index); - } - } - - list_for_each_safe(node, next_node, &global_lport_mgr.intergrad_head) { - unf_lport = list_entry(node, struct unf_lport, entry_lport); - if (unf_lport->port_id == portid && !unf_lport->port_removing) { - spin_unlock_irqrestore(lport_list_lock, flags); - - return unf_cm_lookup_vport_by_vp_index(unf_lport, vport_index); - } - } - - spin_unlock_irqrestore(lport_list_lock, flags); - - return NULL; -} - -u32 unf_is_vport_valid(struct unf_lport *lport, struct unf_lport *vport) -{ - struct unf_lport *unf_lport = NULL; - struct unf_vport_pool *vport_pool = NULL; - struct unf_lport *unf_vport = NULL; - struct list_head *node = NULL; - struct list_head *next_node = NULL; - ulong flag = 0; - spinlock_t *vport_pool_lock = NULL; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(vport, UNF_RETURN_ERROR); - - unf_lport = lport; - vport_pool = unf_lport->vport_pool; - if (unlikely(!vport_pool)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "[err]Port(0x%x) vport pool is NULL", unf_lport->port_id); - - return UNF_RETURN_ERROR; - } - - vport_pool_lock = &vport_pool->vport_pool_lock; - spin_lock_irqsave(vport_pool_lock, flag); - list_for_each_safe(node, next_node, &unf_lport->list_vports_head) { - unf_vport = list_entry(node, struct unf_lport, entry_vport); - - if (unf_vport == vport && !unf_vport->port_removing) { - spin_unlock_irqrestore(vport_pool_lock, flag); - - return RETURN_OK; - } - } - - list_for_each_safe(node, next_node, &unf_lport->list_intergrad_vports) { - unf_vport = list_entry(node, struct unf_lport, entry_vport); - - if (unf_vport == vport && !unf_vport->port_removing) { - spin_unlock_irqrestore(vport_pool_lock, flag); - - return RETURN_OK; - } - } - spin_unlock_irqrestore(vport_pool_lock, flag); - - return UNF_RETURN_ERROR; -} - -u32 unf_is_lport_valid(struct unf_lport *lport) -{ - struct unf_lport *unf_lport = NULL; - struct list_head *node = NULL; - struct list_head *next_node = NULL; - ulong flags = 0; - spinlock_t *lport_list_lock = NULL; - - lport_list_lock = &global_lport_mgr.global_lport_list_lock; - spin_lock_irqsave(lport_list_lock, flags); - - list_for_each_safe(node, next_node, &global_lport_mgr.lport_list_head) { - unf_lport = list_entry(node, struct unf_lport, entry_lport); - - if (unf_lport == lport && !unf_lport->port_removing) { - spin_unlock_irqrestore(lport_list_lock, flags); - - return RETURN_OK; - } - - if (unf_is_vport_valid(unf_lport, lport) == RETURN_OK) { - spin_unlock_irqrestore(lport_list_lock, flags); - - return RETURN_OK; - } - } - - list_for_each_safe(node, next_node, &global_lport_mgr.intergrad_head) { - unf_lport = list_entry(node, struct unf_lport, entry_lport); - - if (unf_lport == lport && !unf_lport->port_removing) { - spin_unlock_irqrestore(lport_list_lock, flags); - - return RETURN_OK; - } - - if (unf_is_vport_valid(unf_lport, lport) == RETURN_OK) { - spin_unlock_irqrestore(lport_list_lock, flags); - - return RETURN_OK; - } - } - - list_for_each_safe(node, next_node, &global_lport_mgr.destroy_list_head) { - unf_lport = list_entry(node, struct unf_lport, entry_lport); - - if (unf_lport == lport && !unf_lport->port_removing) { - spin_unlock_irqrestore(lport_list_lock, flags); - - return RETURN_OK; - } - - if (unf_is_vport_valid(unf_lport, lport) == RETURN_OK) { - spin_unlock_irqrestore(lport_list_lock, flags); - - return RETURN_OK; - } - } - - spin_unlock_irqrestore(lport_list_lock, flags); - - return UNF_RETURN_ERROR; -} - -static void unf_clean_linkdown_io(struct unf_lport *lport, bool clean_flag) -{ - /* Clean L_Port/V_Port Link Down I/O: Set Abort Tag */ - FC_CHECK_RETURN_VOID(lport); - FC_CHECK_RETURN_VOID(lport->xchg_mgr_temp.unf_xchg_abort_all_io); - - lport->xchg_mgr_temp.unf_xchg_abort_all_io(lport, UNF_XCHG_TYPE_INI, clean_flag); - lport->xchg_mgr_temp.unf_xchg_abort_all_io(lport, UNF_XCHG_TYPE_SFS, clean_flag); -} - -u32 unf_fc_port_link_event(void *lport, u32 events, void *input) -{ - struct unf_lport *unf_lport = NULL; - u32 ret = UNF_RETURN_ERROR; - u32 index = 0; - - if (unlikely(!lport)) - return UNF_RETURN_ERROR; - unf_lport = (struct unf_lport *)lport; - - ret = unf_lport_ref_inc(unf_lport); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x) is removing and do nothing", - unf_lport->port_id); - - return RETURN_OK; - } - - /* process port event */ - while (index < (sizeof(g_lport_action) / sizeof(struct unf_port_action))) { - if (g_lport_action[index].action == events) { - ret = g_lport_action[index].unf_action(unf_lport, input); - - unf_lport_ref_dec_to_destroy(unf_lport); - - return ret; - } - index++; - } - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) receive unknown event(0x%x)", - unf_lport->port_id, events); - - unf_lport_ref_dec_to_destroy(unf_lport); - - return ret; -} - -void unf_port_mgmt_init(void) -{ - memset(&global_lport_mgr, 0, sizeof(struct unf_global_lport)); - - INIT_LIST_HEAD(&global_lport_mgr.lport_list_head); - - INIT_LIST_HEAD(&global_lport_mgr.intergrad_head); - - INIT_LIST_HEAD(&global_lport_mgr.destroy_list_head); - - INIT_LIST_HEAD(&global_lport_mgr.dirty_list_head); - - spin_lock_init(&global_lport_mgr.global_lport_list_lock); - - UNF_SET_NOMAL_MODE(global_lport_mgr.dft_mode); - - global_lport_mgr.start_work = true; -} - -void unf_port_mgmt_deinit(void) -{ - if (global_lport_mgr.lport_sum != 0) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[warn]There are %u port pool memory giveaway", - global_lport_mgr.lport_sum); - } - - memset(&global_lport_mgr, 0, sizeof(struct unf_global_lport)); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[info]Common port manager exit succeed"); -} - -static void unf_port_register(struct unf_lport *lport) -{ - ulong flags = 0; - - FC_CHECK_RETURN_VOID(lport); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_INFO, - "Register LPort(0x%p), port ID(0x%x).", lport, lport->port_id); - - /* Add to the global management linked list header */ - spin_lock_irqsave(&global_lport_mgr.global_lport_list_lock, flags); - list_add_tail(&lport->entry_lport, &global_lport_mgr.lport_list_head); - global_lport_mgr.lport_sum++; - spin_unlock_irqrestore(&global_lport_mgr.global_lport_list_lock, flags); -} - -static void unf_port_unregister(struct unf_lport *lport) -{ - ulong flags = 0; - - FC_CHECK_RETURN_VOID(lport); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_INFO, - "Unregister LPort(0x%p), port ID(0x%x).", lport, lport->port_id); - - /* Remove from the global management linked list header */ - spin_lock_irqsave(&global_lport_mgr.global_lport_list_lock, flags); - list_del(&lport->entry_lport); - global_lport_mgr.lport_sum--; - spin_unlock_irqrestore(&global_lport_mgr.global_lport_list_lock, flags); -} - -int unf_port_start_work(struct unf_lport *lport) -{ - ulong flag = 0; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - - spin_lock_irqsave(&lport->lport_state_lock, flag); - if (lport->start_work_state != UNF_START_WORK_STOP) { - spin_unlock_irqrestore(&lport->lport_state_lock, flag); - - return RETURN_OK; - } - lport->start_work_state = UNF_START_WORK_COMPLETE; - spin_unlock_irqrestore(&lport->lport_state_lock, flag); - - /* switch sfp to start work */ - (void)unf_port_switch(lport, true); - - return RETURN_OK; -} - -static u32 -unf_lport_init_lw_funop(struct unf_lport *lport, - struct unf_low_level_functioon_op *low_level_op) -{ - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(low_level_op, UNF_RETURN_ERROR); - - lport->port_id = low_level_op->lport_cfg_items.port_id; - lport->port_name = low_level_op->sys_port_name; - lport->node_name = low_level_op->sys_node_name; - lport->options = low_level_op->lport_cfg_items.port_mode; - lport->act_topo = UNF_ACT_TOP_UNKNOWN; - lport->max_ssq_num = low_level_op->support_max_ssq_num; - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_INFO, - "Port(0x%x) .", lport->port_id); - - memcpy(&lport->low_level_func, low_level_op, sizeof(struct unf_low_level_functioon_op)); - - return RETURN_OK; -} - -void unf_lport_release_lw_funop(struct unf_lport *lport) -{ - FC_CHECK_RETURN_VOID(lport); - - memset(&lport->low_level_func, 0, sizeof(struct unf_low_level_functioon_op)); - - lport->destroy_step = UNF_LPORT_DESTROY_STEP_13_DESTROY_LW_INTERFACE; -} - -struct unf_lport *unf_find_lport_by_scsi_hostid(u32 scsi_host_id) -{ - struct list_head *node = NULL, *next_node = NULL; - struct list_head *vp_node = NULL, *next_vp_node = NULL; - struct unf_lport *unf_lport = NULL; - struct unf_lport *unf_vport = NULL; - ulong flags = 0; - ulong pool_flags = 0; - spinlock_t *vp_pool_lock = NULL; - spinlock_t *lport_list_lock = &global_lport_mgr.global_lport_list_lock; - - spin_lock_irqsave(lport_list_lock, flags); - list_for_each_safe(node, next_node, &global_lport_mgr.lport_list_head) { - unf_lport = list_entry(node, struct unf_lport, entry_lport); - vp_pool_lock = &unf_lport->vport_pool->vport_pool_lock; - if (scsi_host_id == UNF_GET_SCSI_HOST_ID(unf_lport->host_info.host)) { - spin_unlock_irqrestore(lport_list_lock, flags); - - return unf_lport; - } - - /* support NPIV */ - if (unf_lport->vport_pool) { - spin_lock_irqsave(vp_pool_lock, pool_flags); - list_for_each_safe(vp_node, next_vp_node, &unf_lport->list_vports_head) { - unf_vport = list_entry(vp_node, struct unf_lport, entry_vport); - - if (scsi_host_id == - UNF_GET_SCSI_HOST_ID(unf_vport->host_info.host)) { - spin_unlock_irqrestore(vp_pool_lock, pool_flags); - spin_unlock_irqrestore(lport_list_lock, flags); - - return unf_vport; - } - } - spin_unlock_irqrestore(vp_pool_lock, pool_flags); - } - } - - list_for_each_safe(node, next_node, &global_lport_mgr.intergrad_head) { - unf_lport = list_entry(node, struct unf_lport, entry_lport); - vp_pool_lock = &unf_lport->vport_pool->vport_pool_lock; - if (scsi_host_id == - UNF_GET_SCSI_HOST_ID(unf_lport->host_info.host)) { - spin_unlock_irqrestore(lport_list_lock, flags); - - return unf_lport; - } - - /* support NPIV */ - if (unf_lport->vport_pool) { - spin_lock_irqsave(vp_pool_lock, pool_flags); - list_for_each_safe(vp_node, next_vp_node, &unf_lport->list_vports_head) { - unf_vport = list_entry(vp_node, struct unf_lport, entry_vport); - - if (scsi_host_id == - UNF_GET_SCSI_HOST_ID(unf_vport->host_info.host)) { - spin_unlock_irqrestore(vp_pool_lock, pool_flags); - spin_unlock_irqrestore(lport_list_lock, flags); - - return unf_vport; - } - } - spin_unlock_irqrestore(vp_pool_lock, pool_flags); - } - } - spin_unlock_irqrestore(lport_list_lock, flags); - - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Can not find port by scsi_host_id(0x%x), may be removing", - scsi_host_id); - - return NULL; -} - -u32 unf_init_scsi_id_table(struct unf_lport *lport) -{ - struct unf_rport_scsi_id_image *rport_scsi_id_image = NULL; - struct unf_wwpn_rport_info *wwpn_port_info = NULL; - u32 idx; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - - rport_scsi_id_image = &lport->rport_scsi_table; - rport_scsi_id_image->max_scsi_id = UNF_MAX_SCSI_ID; - - /* If the number of remote connections supported by the L_Port is 0, an - * exception occurs - */ - if (rport_scsi_id_image->max_scsi_id == 0) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Port(0x%x), supported maximum login is zero.", lport->port_id); - - return UNF_RETURN_ERROR; - } - - rport_scsi_id_image->wwn_rport_info_table = - vmalloc(rport_scsi_id_image->max_scsi_id * sizeof(struct unf_wwpn_rport_info)); - if (!rport_scsi_id_image->wwn_rport_info_table) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Port(0x%x) can't allocate SCSI ID Table(0x%x).", - lport->port_id, rport_scsi_id_image->max_scsi_id); - - return UNF_RETURN_ERROR; - } - memset(rport_scsi_id_image->wwn_rport_info_table, 0, - rport_scsi_id_image->max_scsi_id * sizeof(struct unf_wwpn_rport_info)); - - wwpn_port_info = rport_scsi_id_image->wwn_rport_info_table; - - for (idx = 0; idx < rport_scsi_id_image->max_scsi_id; idx++) { - INIT_DELAYED_WORK(&wwpn_port_info->loss_tmo_work, unf_sesion_loss_timeout); - INIT_LIST_HEAD(&wwpn_port_info->fc_lun_list); - wwpn_port_info->lport = lport; - wwpn_port_info->target_id = INVALID_VALUE32; - wwpn_port_info++; - } - - spin_lock_init(&rport_scsi_id_image->scsi_image_table_lock); - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_INFO, - "[info]Port(0x%x) supported maximum login is %u.", - lport->port_id, rport_scsi_id_image->max_scsi_id); - - return RETURN_OK; -} - -void unf_destroy_scsi_id_table(struct unf_lport *lport) -{ - struct unf_rport_scsi_id_image *rport_scsi_id_image = NULL; - struct unf_wwpn_rport_info *wwn_rport_info = NULL; - u32 i = 0; - u32 ret = UNF_RETURN_ERROR; - - FC_CHECK_RETURN_VOID(lport); - - rport_scsi_id_image = &lport->rport_scsi_table; - if (rport_scsi_id_image->wwn_rport_info_table) { - for (i = 0; i < UNF_MAX_SCSI_ID; i++) { - wwn_rport_info = &rport_scsi_id_image->wwn_rport_info_table[i]; - UNF_DELAYED_WORK_SYNC(ret, (lport->port_id), - (&wwn_rport_info->loss_tmo_work), - "loss tmo Timer work"); - if (wwn_rport_info->lun_qos_level) { - vfree(wwn_rport_info->lun_qos_level); - wwn_rport_info->lun_qos_level = NULL; - } - } - - if (ret) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "Port(0x%x) cancel loss tmo work success", lport->port_id); - } - vfree(rport_scsi_id_image->wwn_rport_info_table); - rport_scsi_id_image->wwn_rport_info_table = NULL; - } - - rport_scsi_id_image->max_scsi_id = 0; - lport->destroy_step = UNF_LPORT_DESTROY_STEP_10_DESTROY_SCSI_TABLE; -} - -static u32 unf_lport_init(struct unf_lport *lport, void *private_data, - struct unf_low_level_functioon_op *low_level_op) -{ - u32 ret = RETURN_OK; - char work_queue_name[13]; - - unf_init_port_parms(lport); - - /* Associating LPort with FCPort */ - lport->fc_port = private_data; - - /* VpIndx=0 is reserved for Lport, and rootLport points to its own */ - lport->vp_index = 0; - lport->root_lport = lport; - lport->chip_info = NULL; - - /* Initialize the units related to L_Port and lw func */ - ret = unf_lport_init_lw_funop(lport, low_level_op); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "LPort(0x%x) initialize lowlevel function unsuccessful.", - lport->port_id); - - return ret; - } - - /* Init Linkevent workqueue */ - snprintf(work_queue_name, sizeof(work_queue_name), "%x_lkq", lport->port_id); - - lport->link_event_wq = create_singlethread_workqueue(work_queue_name); - if (!lport->link_event_wq) { - FC_DRV_PRINT(UNF_LOG_NORMAL, UNF_ERR, - "[err]Port(0x%x) creat link event work queue failed", lport->port_id); - - return UNF_RETURN_ERROR; - } - snprintf(work_queue_name, sizeof(work_queue_name), "%x_xchgwq", lport->port_id); - lport->xchg_wq = create_workqueue(work_queue_name); - if (!lport->xchg_wq) { - FC_DRV_PRINT(UNF_LOG_NORMAL, UNF_ERR, - "[err]Port(0x%x) creat Exchg work queue failed", - lport->port_id); - flush_workqueue(lport->link_event_wq); - destroy_workqueue(lport->link_event_wq); - lport->link_event_wq = NULL; - return UNF_RETURN_ERROR; - } - /* scsi table (R_Port) required for initializing INI - * Initialize the scsi id Table table to manage the mapping between SCSI - * ID, WWN, and Rport. - */ - - ret = unf_init_scsi_id_table(lport); - if (ret != RETURN_OK) { - flush_workqueue(lport->link_event_wq); - destroy_workqueue(lport->link_event_wq); - lport->link_event_wq = NULL; - - flush_workqueue(lport->xchg_wq); - destroy_workqueue(lport->xchg_wq); - lport->xchg_wq = NULL; - return ret; - } - - /* Initialize the EXCH resource */ - ret = unf_alloc_xchg_resource(lport); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "LPort(0x%x) can't allocate exchange resource.", lport->port_id); - - flush_workqueue(lport->link_event_wq); - destroy_workqueue(lport->link_event_wq); - lport->link_event_wq = NULL; - - flush_workqueue(lport->xchg_wq); - destroy_workqueue(lport->xchg_wq); - lport->xchg_wq = NULL; - unf_destroy_scsi_id_table(lport); - - return ret; - } - - /* Initialize the ESGL resource pool used by Lport */ - ret = unf_init_esgl_pool(lport); - if (ret != RETURN_OK) { - flush_workqueue(lport->link_event_wq); - destroy_workqueue(lport->link_event_wq); - lport->link_event_wq = NULL; - - flush_workqueue(lport->xchg_wq); - destroy_workqueue(lport->xchg_wq); - lport->xchg_wq = NULL; - unf_free_all_xchg_mgr(lport); - unf_destroy_scsi_id_table(lport); - - return ret; - } - /* Initialize the disc manager under Lport */ - ret = unf_init_disc_mgr(lport); - if (ret != RETURN_OK) { - flush_workqueue(lport->link_event_wq); - destroy_workqueue(lport->link_event_wq); - lport->link_event_wq = NULL; - - flush_workqueue(lport->xchg_wq); - destroy_workqueue(lport->xchg_wq); - lport->xchg_wq = NULL; - unf_free_esgl_pool(lport); - unf_free_all_xchg_mgr(lport); - unf_destroy_scsi_id_table(lport); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "LPort(0x%x) initialize discover manager unsuccessful.", - lport->port_id); - - return ret; - } - - /* Initialize the LPort manager */ - ret = unf_init_vport_mgr_temp(lport); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "LPort(0x%x) initialize RPort manager unsuccessful.", lport->port_id); - - goto RELEASE_LPORT; - } - - /* Initialize the EXCH manager */ - ret = unf_init_xchg_mgr_temp(lport); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "LPort(0x%x) initialize exchange manager unsuccessful.", - lport->port_id); - goto RELEASE_LPORT; - } - /* Initialize the resources required by the event processing center */ - ret = unf_init_event_center(lport); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "LPort(0x%x) initialize event center unsuccessful.", lport->port_id); - goto RELEASE_LPORT; - } - /* Initialize the initialization status of Lport */ - unf_set_lport_state(lport, UNF_LPORT_ST_INITIAL); - - /* Initialize the Lport route test case */ - ret = unf_init_lport_route(lport); - if (ret != RETURN_OK) { - flush_workqueue(lport->link_event_wq); - destroy_workqueue(lport->link_event_wq); - lport->link_event_wq = NULL; - - flush_workqueue(lport->xchg_wq); - destroy_workqueue(lport->xchg_wq); - lport->xchg_wq = NULL; - (void)unf_event_center_destroy(lport); - unf_disc_mgr_destroy(lport); - unf_free_esgl_pool(lport); - unf_free_all_xchg_mgr(lport); - unf_destroy_scsi_id_table(lport); - - return ret; - } - /* Thesupports the initialization stepof the NPIV */ - ret = unf_init_vport_pool(lport); - if (ret != RETURN_OK) { - flush_workqueue(lport->link_event_wq); - destroy_workqueue(lport->link_event_wq); - lport->link_event_wq = NULL; - - flush_workqueue(lport->xchg_wq); - destroy_workqueue(lport->xchg_wq); - lport->xchg_wq = NULL; - - unf_destroy_lport_route(lport); - (void)unf_event_center_destroy(lport); - unf_disc_mgr_destroy(lport); - unf_free_esgl_pool(lport); - unf_free_all_xchg_mgr(lport); - unf_destroy_scsi_id_table(lport); - - return ret; - } - - /* qualifier rport callback */ - lport->unf_qualify_rport = unf_rport_set_qualifier_key_reuse; - lport->unf_tmf_abnormal_recovery = unf_tmf_timeout_recovery_special; - return RETURN_OK; -RELEASE_LPORT: - flush_workqueue(lport->link_event_wq); - destroy_workqueue(lport->link_event_wq); - lport->link_event_wq = NULL; - - flush_workqueue(lport->xchg_wq); - destroy_workqueue(lport->xchg_wq); - lport->xchg_wq = NULL; - - unf_disc_mgr_destroy(lport); - unf_free_esgl_pool(lport); - unf_free_all_xchg_mgr(lport); - unf_destroy_scsi_id_table(lport); - - return ret; -} - -void unf_free_qos_info(struct unf_lport *lport) -{ - struct list_head *node = NULL; - struct list_head *next_node = NULL; - struct unf_qos_info *qos_info = NULL; - ulong flag = 0; - - FC_CHECK_RETURN_VOID(lport); - - spin_lock_irqsave(&lport->qos_mgr_lock, flag); - list_for_each_safe(node, next_node, &lport->list_qos_head) { - qos_info = (struct unf_qos_info *)list_entry(node, - struct unf_qos_info, entry_qos_info); - list_del_init(&qos_info->entry_qos_info); - kfree(qos_info); - } - - spin_unlock_irqrestore(&lport->qos_mgr_lock, flag); -} - -u32 unf_lport_deinit(struct unf_lport *lport) -{ - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - - unf_free_qos_info(lport); - - unf_unregister_scsi_host(lport); - - /* If the card is unloaded normally, the thread is stopped once. The - * problem does not occur if you stop the thread again. - */ - unf_destroy_lport_route(lport); - - /* minus the reference count of the card event; the last port deletes - * the card thread - */ - unf_destroy_card_thread(lport); - flush_workqueue(lport->link_event_wq); - destroy_workqueue(lport->link_event_wq); - lport->link_event_wq = NULL; - - (void)unf_event_center_destroy(lport); - unf_free_vport_pool(lport); - unf_xchg_mgr_destroy(lport); - - unf_free_esgl_pool(lport); - - /* reliability review :Disc should release after Xchg. Destroy the disc - * manager - */ - unf_disc_mgr_destroy(lport); - - unf_release_xchg_mgr_temp(lport); - - unf_release_vport_mgr_temp(lport); - - unf_destroy_scsi_id_table(lport); - - flush_workqueue(lport->xchg_wq); - destroy_workqueue(lport->xchg_wq); - lport->xchg_wq = NULL; - - /* Releasing the lw Interface Template */ - unf_lport_release_lw_funop(lport); - lport->fc_port = NULL; - - return RETURN_OK; -} - -static int unf_card_event_process(void *arg) -{ - struct list_head *node = NULL; - struct unf_cm_event_report *event_node = NULL; - ulong flags = 0; - struct unf_chip_manage_info *chip_info = (struct unf_chip_manage_info *)arg; - - set_user_nice(current, UNF_OS_THRD_PRI_LOW); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_INFO, - "Slot(%u) chip(0x%x) enter event thread.", - chip_info->slot_id, chip_info->chip_id); - - while (!kthread_should_stop()) { - if (chip_info->thread_exit) - break; - - spin_lock_irqsave(&chip_info->chip_event_list_lock, flags); - if (list_empty(&chip_info->list_head)) { - spin_unlock_irqrestore(&chip_info->chip_event_list_lock, flags); - - set_current_state(TASK_INTERRUPTIBLE); - schedule_timeout((long)msecs_to_jiffies(UNF_S_TO_MS)); - } else { - node = UNF_OS_LIST_NEXT(&chip_info->list_head); - list_del_init(node); - chip_info->list_num--; - event_node = list_entry(node, struct unf_cm_event_report, list_entry); - spin_unlock_irqrestore(&chip_info->chip_event_list_lock, flags); - unf_handle_event(event_node); - } - } - FC_DRV_PRINT(UNF_LOG_EVENT, UNF_MAJOR, - "Slot(%u) chip(0x%x) exit event thread.", - chip_info->slot_id, chip_info->chip_id); - - return RETURN_OK; -} - -static void unf_destroy_card_thread(struct unf_lport *lport) -{ - struct unf_event_mgr *event_mgr = NULL; - struct unf_chip_manage_info *chip_info = NULL; - struct list_head *list = NULL; - struct list_head *list_tmp = NULL; - struct unf_cm_event_report *event_node = NULL; - ulong event_lock_flag = 0; - ulong flag = 0; - - FC_CHECK_RETURN_VOID(lport); - - /* If the thread cannot be found, apply for a new thread. */ - chip_info = lport->chip_info; - if (!chip_info) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "Port(0x%x) has no event thread.", lport->port_id); - return; - } - event_mgr = &lport->event_mgr; - - spin_lock_irqsave(&chip_info->chip_event_list_lock, flag); - if (!list_empty(&chip_info->list_head)) { - list_for_each_safe(list, list_tmp, &chip_info->list_head) { - event_node = list_entry(list, struct unf_cm_event_report, list_entry); - - /* The LPort under the global event node is null. */ - if (event_node->lport == lport) { - list_del_init(&event_node->list_entry); - if (event_node->event_asy_flag == UNF_EVENT_SYN) { - event_node->result = UNF_RETURN_ERROR; - complete(&event_node->event_comp); - } - - spin_lock_irqsave(&event_mgr->port_event_lock, event_lock_flag); - event_mgr->free_event_count++; - list_add_tail(&event_node->list_entry, &event_mgr->list_free_event); - spin_unlock_irqrestore(&event_mgr->port_event_lock, - event_lock_flag); - } - } - } - spin_unlock_irqrestore(&chip_info->chip_event_list_lock, flag); - - /* If the number of events introduced by the event thread is 0, - * it indicates that no interface is used. In this case, thread - * resources need to be consumed - */ - if (atomic_dec_and_test(&chip_info->ref_cnt)) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "Port(0x%x) destroy slot(%u) chip(0x%x) event thread succeed.", - lport->port_id, chip_info->slot_id, chip_info->chip_id); - chip_info->thread_exit = true; - wake_up_process(chip_info->thread); - kthread_stop(chip_info->thread); - chip_info->thread = NULL; - - spin_lock_irqsave(&card_thread_mgr.global_card_list_lock, flag); - list_del_init(&chip_info->list_chip_thread_entry); - card_thread_mgr.card_num--; - spin_unlock_irqrestore(&card_thread_mgr.global_card_list_lock, flag); - - vfree(chip_info); - } - - lport->chip_info = NULL; -} - -static u32 unf_creat_card_thread(struct unf_lport *lport) -{ - ulong flag = 0; - struct unf_chip_manage_info *chip_manage_info = NULL; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - - /* If the thread cannot be found, apply for a new thread. */ - chip_manage_info = (struct unf_chip_manage_info *) - vmalloc(sizeof(struct unf_chip_manage_info)); - if (!chip_manage_info) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "Port(0x%x) cannot allocate thread memory.", lport->port_id); - - return UNF_RETURN_ERROR; - } - memset(chip_manage_info, 0, sizeof(struct unf_chip_manage_info)); - - memcpy(&chip_manage_info->chip_info, &lport->low_level_func.chip_info, - sizeof(struct unf_chip_info)); - chip_manage_info->slot_id = UNF_GET_BOARD_TYPE_AND_SLOT_ID_BY_PORTID(lport->port_id); - chip_manage_info->chip_id = lport->low_level_func.chip_id; - chip_manage_info->list_num = 0; - chip_manage_info->sfp_9545_fault = false; - chip_manage_info->sfp_power_fault = false; - atomic_set(&chip_manage_info->ref_cnt, 1); - atomic_set(&chip_manage_info->card_loop_test_flag, false); - spin_lock_init(&chip_manage_info->card_loop_back_state_lock); - INIT_LIST_HEAD(&chip_manage_info->list_head); - spin_lock_init(&chip_manage_info->chip_event_list_lock); - - chip_manage_info->thread_exit = false; - chip_manage_info->thread = kthread_create(unf_card_event_process, - chip_manage_info, "%x_et", lport->port_id); - - if (IS_ERR(chip_manage_info->thread) || !chip_manage_info->thread) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "Port(0x%x) creat event thread(0x%p) unsuccessful.", - lport->port_id, chip_manage_info->thread); - - vfree(chip_manage_info); - - return UNF_RETURN_ERROR; - } - - lport->chip_info = chip_manage_info; - wake_up_process(chip_manage_info->thread); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_INFO, - "Port(0x%x) creat slot(%u) chip(0x%x) event thread succeed.", - lport->port_id, chip_manage_info->slot_id, - chip_manage_info->chip_id); - - spin_lock_irqsave(&card_thread_mgr.global_card_list_lock, flag); - list_add_tail(&chip_manage_info->list_chip_thread_entry, &card_thread_mgr.card_list_head); - card_thread_mgr.card_num++; - spin_unlock_irqrestore(&card_thread_mgr.global_card_list_lock, flag); - - return RETURN_OK; -} - -static u32 unf_find_card_thread(struct unf_lport *lport) -{ - ulong flag = 0; - struct list_head *node = NULL; - struct list_head *next_node = NULL; - struct unf_chip_manage_info *chip_info = NULL; - u32 ret = UNF_RETURN_ERROR; - - spin_lock_irqsave(&card_thread_mgr.global_card_list_lock, flag); - list_for_each_safe(node, next_node, &card_thread_mgr.card_list_head) { - chip_info = list_entry(node, struct unf_chip_manage_info, list_chip_thread_entry); - - if (chip_info->chip_id == lport->low_level_func.chip_id && - chip_info->slot_id == - UNF_GET_BOARD_TYPE_AND_SLOT_ID_BY_PORTID(lport->port_id)) { - atomic_inc(&chip_info->ref_cnt); - lport->chip_info = chip_info; - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "Port(0x%x) find card(%u) chip(0x%x) event thread succeed.", - lport->port_id, chip_info->slot_id, chip_info->chip_id); - - spin_unlock_irqrestore(&card_thread_mgr.global_card_list_lock, flag); - - return RETURN_OK; - } - } - spin_unlock_irqrestore(&card_thread_mgr.global_card_list_lock, flag); - - ret = unf_creat_card_thread(lport); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "LPort(0x%x) creat event thread unsuccessful. Destroy LPort.", - lport->port_id); - - return UNF_RETURN_ERROR; - } else { - return RETURN_OK; - } -} - -void *unf_lport_create_and_init(void *private_data, struct unf_low_level_functioon_op *low_level_op) -{ - struct unf_lport *unf_lport = NULL; - u32 ret = UNF_RETURN_ERROR; - - if (!private_data) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "Private Data is NULL"); - - return NULL; - } - if (!low_level_op) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "LowLevel port(0x%p) function is NULL", private_data); - - return NULL; - } - - /* 1. vmalloc & Memset L_Port */ - unf_lport = vmalloc(sizeof(struct unf_lport)); - if (!unf_lport) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "Alloc LPort memory failed."); - - return NULL; - } - memset(unf_lport, 0, sizeof(struct unf_lport)); - - /* 2. L_Port Init */ - if (unf_lport_init(unf_lport, private_data, low_level_op) != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "LPort initialize unsuccessful."); - - vfree(unf_lport); - - return NULL; - } - - /* 4. Get or Create Chip Thread - * Chip_ID & Slot_ID - */ - ret = unf_find_card_thread(unf_lport); - if (ret != RETURN_OK) { - (void)unf_lport_deinit(unf_lport); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "LPort(0x%x) Find Chip thread unsuccessful. Destroy LPort.", - unf_lport->port_id); - - vfree(unf_lport); - return NULL; - } - - /* 5. Registers with in the port management global linked list */ - unf_port_register(unf_lport); - /* update WWN */ - if (unf_build_lport_wwn(unf_lport) != RETURN_OK) { - unf_port_unregister(unf_lport); - (void)unf_lport_deinit(unf_lport); - vfree(unf_lport); - return NULL; - } - - // unf_init_link_lose_tmo(unf_lport);//TO DO - - /* initialize Scsi Host */ - if (unf_register_scsi_host(unf_lport) != RETURN_OK) { - unf_port_unregister(unf_lport); - (void)unf_lport_deinit(unf_lport); - vfree(unf_lport); - return NULL; - } - /* 7. Here, start work now */ - if (global_lport_mgr.start_work) { - if (unf_port_start_work(unf_lport) != RETURN_OK) { - unf_port_unregister(unf_lport); - - (void)unf_lport_deinit(unf_lport); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[warn]Port(0x%x) start work failed", unf_lport->port_id); - vfree(unf_lport); - return NULL; - } - } - - return unf_lport; -} - -static int unf_lport_destroy(void *lport, void *arg_out) -{ - struct unf_lport *unf_lport = NULL; - ulong flags = 0; - - if (!lport) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, "LPort is NULL."); - - return UNF_RETURN_ERROR; - } - - unf_lport = (struct unf_lport *)lport; - - FC_DRV_PRINT(UNF_LOG_NORMAL, UNF_MAJOR, - "Destroy LPort(0x%p), ID(0x%x).", unf_lport, unf_lport->port_id); - /* NPIV Ensure that all Vport are deleted */ - unf_destroy_all_vports(unf_lport); - unf_lport->destroy_step = UNF_LPORT_DESTROY_STEP_1_REPORT_PORT_OUT; - - (void)unf_lport_deinit(lport); - - /* The port is removed from the destroy linked list. The next step is to - * release the memory - */ - spin_lock_irqsave(&global_lport_mgr.global_lport_list_lock, flags); - list_del(&unf_lport->entry_lport); - - /* If the port has dirty memory, the port is mounted to the linked list - * of dirty ports - */ - if (unf_lport->dirty_flag) - list_add_tail(&unf_lport->entry_lport, &global_lport_mgr.dirty_list_head); - spin_unlock_irqrestore(&global_lport_mgr.global_lport_list_lock, flags); - - if (unf_lport->lport_free_completion) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "Complete LPort(0x%p), port ID(0x%x)'s Free Completion.", - unf_lport, unf_lport->port_id); - complete(unf_lport->lport_free_completion); - } else { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "LPort(0x%p), port ID(0x%x)'s Free Completion is NULL.", - unf_lport, unf_lport->port_id); - dump_stack(); - } - - return RETURN_OK; -} - -static int unf_port_switch(struct unf_lport *lport, bool switch_flag) -{ - struct unf_lport *unf_lport = lport; - int ret = UNF_RETURN_ERROR; - bool flag = false; - - FC_CHECK_RETURN_VALUE(unf_lport, UNF_RETURN_ERROR); - - if (!unf_lport->low_level_func.port_mgr_op.ll_port_config_set) { - FC_DRV_PRINT(UNF_LOG_EQUIP_ATT, UNF_WARN, - "[warn]Port(0x%x)'s config(switch) function is NULL", - unf_lport->port_id); - - return UNF_RETURN_ERROR; - } - - flag = switch_flag ? true : false; - - ret = (int)unf_lport->low_level_func.port_mgr_op.ll_port_config_set(unf_lport->fc_port, - UNF_PORT_CFG_SET_PORT_SWITCH, (void *)&flag); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_EQUIP_ATT, - UNF_WARN, "[warn]Port(0x%x) switch %s failed", - unf_lport->port_id, switch_flag ? "On" : "Off"); - - return UNF_RETURN_ERROR; - } - - unf_lport->switch_state = (bool)flag; - - return RETURN_OK; -} - -static int unf_send_event(u32 port_id, u32 syn_flag, void *argc_in, void *argc_out, - int (*func)(void *argc_in, void *argc_out)) -{ - struct unf_lport *lport = NULL; - struct unf_cm_event_report *event = NULL; - int ret = 0; - - lport = unf_find_lport_by_port_id(port_id); - if (!lport) { - FC_DRV_PRINT(UNF_LOG_EQUIP_ATT, - UNF_INFO, "Cannot find LPort(0x%x).", port_id); - - return UNF_RETURN_ERROR; - } - - if (unf_lport_ref_inc(lport) != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "LPort(0x%x) is removing, no need process.", - lport->port_id); - - return UNF_RETURN_ERROR; - } - if (unlikely(!lport->event_mgr.unf_get_free_event_func || - !lport->event_mgr.unf_post_event_func || - !lport->event_mgr.unf_release_event)) { - FC_DRV_PRINT(UNF_LOG_EQUIP_ATT, - UNF_MAJOR, "Event function is NULL."); - - unf_lport_ref_dec_to_destroy(lport); - - return UNF_RETURN_ERROR; - } - - if (lport->port_removing) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "LPort(0x%x) is removing, no need process.", - lport->port_id); - - unf_lport_ref_dec_to_destroy(lport); - - return UNF_RETURN_ERROR; - } - - event = lport->event_mgr.unf_get_free_event_func((void *)lport); - if (!event) { - unf_lport_ref_dec_to_destroy(lport); - - return UNF_RETURN_ERROR; - } - - init_completion(&event->event_comp); - event->lport = lport; - event->event_asy_flag = syn_flag; - event->unf_event_task = func; - event->para_in = argc_in; - event->para_out = argc_out; - lport->event_mgr.unf_post_event_func(lport, event); - - if (event->event_asy_flag) { - /* You must wait for the other party to return. Otherwise, the - * linked list may be in disorder. - */ - wait_for_completion(&event->event_comp); - ret = (int)event->result; - lport->event_mgr.unf_release_event(lport, event); - } else { - ret = RETURN_OK; - } - - unf_lport_ref_dec_to_destroy(lport); - return ret; -} - -static int unf_reset_port(void *arg_in, void *arg_out) -{ - struct unf_reset_port_argin *input = (struct unf_reset_port_argin *)arg_in; - struct unf_lport *lport = NULL; - u32 ret = UNF_RETURN_ERROR; - enum unf_port_config_state port_state = UNF_PORT_CONFIG_STATE_RESET; - - FC_CHECK_RETURN_VALUE(input, UNF_RETURN_ERROR); - - lport = unf_find_lport_by_port_id(input->port_id); - if (!lport) { - FC_DRV_PRINT(UNF_LOG_EQUIP_ATT, - UNF_MAJOR, "Not find LPort(0x%x).", - input->port_id); - - return UNF_RETURN_ERROR; - } - - /* reset port */ - if (!lport->low_level_func.port_mgr_op.ll_port_config_set) { - FC_DRV_PRINT(UNF_LOG_EQUIP_ATT, UNF_MAJOR, - "Port(0x%x)'s corresponding function is NULL.", lport->port_id); - - return UNF_RETURN_ERROR; - } - - lport->act_topo = UNF_ACT_TOP_UNKNOWN; - lport->speed = UNF_PORT_SPEED_UNKNOWN; - lport->fabric_node_name = 0; - - ret = lport->low_level_func.port_mgr_op.ll_port_config_set(lport->fc_port, - UNF_PORT_CFG_SET_PORT_STATE, - (void *)&port_state); - - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_EQUIP_ATT, - UNF_MAJOR, "Reset port(0x%x) unsuccessful.", - lport->port_id); - - return UNF_RETURN_ERROR; - } - - return RETURN_OK; -} - -int unf_cm_reset_port(u32 port_id) -{ - int ret = UNF_RETURN_ERROR; - - ret = unf_send_event(port_id, UNF_EVENT_SYN, (void *)&port_id, - (void *)NULL, unf_reset_port); - return ret; -} - -int unf_lport_reset_port(struct unf_lport *lport, u32 flag) -{ - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - - return unf_send_event(lport->port_id, flag, (void *)&lport->port_id, - (void *)NULL, unf_reset_port); -} - -static inline u32 unf_get_loop_alpa(struct unf_lport *lport, void *loop_alpa) -{ - u32 ret = UNF_RETURN_ERROR; - - FC_CHECK_RETURN_VALUE(lport->low_level_func.port_mgr_op.ll_port_config_get, - UNF_RETURN_ERROR); - - ret = lport->low_level_func.port_mgr_op.ll_port_config_get(lport->fc_port, - UNF_PORT_CFG_GET_LOOP_ALPA, loop_alpa); - - return ret; -} - -static u32 unf_lport_enter_private_loop_login(struct unf_lport *lport) -{ - struct unf_lport *unf_lport = lport; - ulong flag = 0; - u32 ret = UNF_RETURN_ERROR; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - - spin_lock_irqsave(&unf_lport->lport_state_lock, flag); - unf_lport_state_ma(unf_lport, UNF_EVENT_LPORT_READY); /* LPort: LINK_UP --> READY */ - spin_unlock_irqrestore(&unf_lport->lport_state_lock, flag); - - unf_lport_update_topo(unf_lport, UNF_ACT_TOP_PRIVATE_LOOP); - - /* NOP: check L_Port state */ - if (atomic_read(&unf_lport->lport_no_operate_flag) == UNF_LPORT_NOP) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, - UNF_MAJOR, "[info]Port(0x%x) is NOP, do nothing", - unf_lport->port_id); - - return RETURN_OK; - } - - /* INI: check L_Port mode */ - if (UNF_PORT_MODE_INI != (unf_lport->options & UNF_PORT_MODE_INI)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x) has no INI feature(0x%x), do nothing", - unf_lport->port_id, unf_lport->options); - - return RETURN_OK; - } - - if (unf_lport->disc.disc_temp.unf_disc_start) { - ret = unf_lport->disc.disc_temp.unf_disc_start(unf_lport); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) with nportid(0x%x) start discovery failed", - unf_lport->port_id, unf_lport->nport_id); - } - } - - return ret; -} - -u32 unf_lport_login(struct unf_lport *lport, enum unf_act_topo act_topo) -{ - u32 loop_alpa = 0; - u32 ret = RETURN_OK; - ulong flag = 0; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - - /* 1. Update (set) L_Port topo which get from low level */ - unf_lport_update_topo(lport, act_topo); - - spin_lock_irqsave(&lport->lport_state_lock, flag); - - /* 2. Link state check */ - if (lport->link_up != UNF_PORT_LINK_UP) { - spin_unlock_irqrestore(&lport->lport_state_lock, flag); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) with link_state(0x%x) port_state(0x%x) when login", - lport->port_id, lport->link_up, lport->states); - - return UNF_RETURN_ERROR; - } - - /* 3. Update L_Port state */ - unf_lport_state_ma(lport, UNF_EVENT_LPORT_LINK_UP); /* LPort: INITIAL --> LINK UP */ - spin_unlock_irqrestore(&lport->lport_state_lock, flag); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "[info]LOGIN: Port(0x%x) start to login with topology(0x%x)", - lport->port_id, lport->act_topo); - - /* 4. Start logoin */ - if (act_topo == UNF_TOP_P2P_MASK || - act_topo == UNF_ACT_TOP_P2P_FABRIC || - act_topo == UNF_ACT_TOP_P2P_DIRECT) { - /* P2P or Fabric mode */ - ret = unf_lport_enter_flogi(lport); - } else if (act_topo == UNF_ACT_TOP_PUBLIC_LOOP) { - /* Public loop */ - (void)unf_get_loop_alpa(lport, &loop_alpa); - - /* Before FLOGI ALPA just low 8 bit, after FLOGI ACC, switch - * will assign complete addresses - */ - spin_lock_irqsave(&lport->lport_state_lock, flag); - lport->nport_id = loop_alpa; - spin_unlock_irqrestore(&lport->lport_state_lock, flag); - - ret = unf_lport_enter_flogi(lport); - } else if (act_topo == UNF_ACT_TOP_PRIVATE_LOOP) { - /* Private loop */ - (void)unf_get_loop_alpa(lport, &loop_alpa); - - spin_lock_irqsave(&lport->lport_state_lock, flag); - lport->nport_id = loop_alpa; - spin_unlock_irqrestore(&lport->lport_state_lock, flag); - - ret = unf_lport_enter_private_loop_login(lport); - } else { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]LOGIN: Port(0x%x) login with unknown topology(0x%x)", - lport->port_id, lport->act_topo); - } - - return ret; -} - -static u32 unf_port_linkup(struct unf_lport *lport, void *input) -{ - struct unf_lport *unf_lport = lport; - u32 ret = RETURN_OK; - enum unf_act_topo act_topo = UNF_ACT_TOP_UNKNOWN; - ulong flag = 0; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - - /* If NOP state, stop */ - if (atomic_read(&unf_lport->lport_no_operate_flag) == UNF_LPORT_NOP) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[warn]Port(0x%x) is NOP and do nothing", unf_lport->port_id); - - return RETURN_OK; - } - - /* Update port state */ - spin_lock_irqsave(&unf_lport->lport_state_lock, flag); - unf_lport->link_up = UNF_PORT_LINK_UP; - unf_lport->speed = *((u32 *)input); - unf_set_lport_state(lport, UNF_LPORT_ST_INITIAL); /* INITIAL state */ - spin_unlock_irqrestore(&unf_lport->lport_state_lock, flag); - - /* set hot pool wait state: so far, do not care */ - unf_set_hot_pool_wait_state(unf_lport, true); - - unf_lport->enhanced_features |= UNF_LPORT_ENHANCED_FEATURE_READ_SFP_ONCE; - - /* Get port active topopolgy (from low level) */ - if (!unf_lport->low_level_func.port_mgr_op.ll_port_config_get) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[warn]Port(0x%x) get topo function is NULL", unf_lport->port_id); - - return UNF_RETURN_ERROR; - } - ret = unf_lport->low_level_func.port_mgr_op.ll_port_config_get(unf_lport->fc_port, - UNF_PORT_CFG_GET_TOPO_ACT, (void *)&act_topo); - - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[warn]Port(0x%x) get topo from low level failed", - unf_lport->port_id); - - return UNF_RETURN_ERROR; - } - - /* Start Login process */ - ret = unf_lport_login(unf_lport, act_topo); - - return ret; -} - -static u32 unf_port_linkdown(struct unf_lport *lport, void *input) -{ - ulong flag = 0; - struct unf_lport *unf_lport = NULL; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - - unf_lport = lport; - - /* To prevent repeated reporting linkdown */ - spin_lock_irqsave(&unf_lport->lport_state_lock, flag); - unf_lport->speed = UNF_PORT_SPEED_UNKNOWN; - unf_lport->act_topo = UNF_ACT_TOP_UNKNOWN; - if (unf_lport->link_up == UNF_PORT_LINK_DOWN) { - spin_unlock_irqrestore(&unf_lport->lport_state_lock, flag); - - return RETURN_OK; - } - unf_lport_state_ma(unf_lport, UNF_EVENT_LPORT_LINK_DOWN); - unf_reset_lport_params(unf_lport); - spin_unlock_irqrestore(&unf_lport->lport_state_lock, flag); - - unf_set_hot_pool_wait_state(unf_lport, false); - - /* - * clear I/O: - * 1. INI do ABORT only, - * 2. TGT need do source clear with Wait_IO - * * - * for INI: busy/delay/delay_transfer/wait - * Clean L_Port/V_Port Link Down I/O: only set ABORT tag - */ - unf_flush_disc_event(&unf_lport->disc, NULL); - - unf_clean_linkdown_io(unf_lport, false); - - /* for L_Port's R_Ports */ - unf_clean_linkdown_rport(unf_lport); - /* for L_Port's all Vports */ - unf_linkdown_all_vports(lport); - return RETURN_OK; -} - -static u32 unf_port_abnormal_reset(struct unf_lport *lport, void *input) -{ - u32 ret = UNF_RETURN_ERROR; - struct unf_lport *unf_lport = NULL; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - - unf_lport = lport; - - ret = (u32)unf_lport_reset_port(unf_lport, UNF_EVENT_ASYN); - - return ret; -} - -static u32 unf_port_reset_start(struct unf_lport *lport, void *input) -{ - u32 ret = RETURN_OK; - ulong flag = 0; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - - spin_lock_irqsave(&lport->lport_state_lock, flag); - unf_set_lport_state(lport, UNF_LPORT_ST_RESET); - spin_unlock_irqrestore(&lport->lport_state_lock, flag); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "Port(0x%x) begin to reset.", lport->port_id); - - return ret; -} - -static u32 unf_port_reset_end(struct unf_lport *lport, void *input) -{ - ulong flag = 0; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "Port(0x%x) reset end.", lport->port_id); - - /* Task management command returns success and avoid repair measures - * case offline device - */ - unf_wake_up_scsi_task_cmnd(lport); - - spin_lock_irqsave(&lport->lport_state_lock, flag); - unf_set_lport_state(lport, UNF_LPORT_ST_INITIAL); - spin_unlock_irqrestore(&lport->lport_state_lock, flag); - - return RETURN_OK; -} - -static u32 unf_port_nop(struct unf_lport *lport, void *input) -{ - struct unf_lport *unf_lport = NULL; - ulong flag = 0; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - - unf_lport = lport; - - atomic_set(&unf_lport->lport_no_operate_flag, UNF_LPORT_NOP); - - spin_lock_irqsave(&unf_lport->lport_state_lock, flag); - unf_lport_state_ma(unf_lport, UNF_EVENT_LPORT_LINK_DOWN); - unf_reset_lport_params(unf_lport); - spin_unlock_irqrestore(&unf_lport->lport_state_lock, flag); - - /* Set Tag prevent pending I/O to wait_list when close sfp failed */ - unf_set_hot_pool_wait_state(unf_lport, false); - - unf_flush_disc_event(&unf_lport->disc, NULL); - - /* L_Port/V_Port's I/O(s): Clean Link Down I/O: Set Abort Tag */ - unf_clean_linkdown_io(unf_lport, false); - - /* L_Port/V_Port's R_Port(s): report link down event to scsi & clear - * resource - */ - unf_clean_linkdown_rport(unf_lport); - unf_linkdown_all_vports(unf_lport); - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x) report NOP event done", unf_lport->nport_id); - - return RETURN_OK; -} - -static u32 unf_port_begin_remove(struct unf_lport *lport, void *input) -{ - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - /* Cancel route timer delay work */ - unf_destroy_lport_route(lport); - - return RETURN_OK; -} - -static u32 unf_get_pcie_link_state(struct unf_lport *lport) -{ - struct unf_lport *unf_lport = lport; - bool linkstate = true; - u32 ret = UNF_RETURN_ERROR; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(unf_lport->low_level_func.port_mgr_op.ll_port_config_get, - UNF_RETURN_ERROR); - - ret = unf_lport->low_level_func.port_mgr_op.ll_port_config_get(unf_lport->fc_port, - UNF_PORT_CFG_GET_PCIE_LINK_STATE, (void *)&linkstate); - if (ret != RETURN_OK || linkstate != true) { - FC_DRV_PRINT(UNF_LOG_EQUIP_ATT, - UNF_KEVENT, "[err]Can't Get Pcie Link State"); - - return UNF_RETURN_ERROR; - } - - return RETURN_OK; -} - -void unf_root_lport_ref_dec(struct unf_lport *lport) -{ - ulong flags = 0; - ulong lport_flags = 0; - u32 ret = UNF_RETURN_ERROR; - - FC_CHECK_RETURN_VOID(lport); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "[info]Port(0x%p) port_id(0x%x) reference count is %d", - lport, lport->port_id, atomic_read(&lport->port_ref_cnt)); - - spin_lock_irqsave(&global_lport_mgr.global_lport_list_lock, flags); - spin_lock_irqsave(&lport->lport_state_lock, lport_flags); - if (atomic_dec_and_test(&lport->port_ref_cnt)) { - spin_unlock_irqrestore(&lport->lport_state_lock, lport_flags); - - list_del(&lport->entry_lport); - global_lport_mgr.lport_sum--; - - /* Put L_Port to destroy list for debuging */ - list_add_tail(&lport->entry_lport, &global_lport_mgr.destroy_list_head); - spin_unlock_irqrestore(&global_lport_mgr.global_lport_list_lock, flags); - - ret = unf_schedule_global_event((void *)lport, UNF_GLOBAL_EVENT_ASYN, - unf_lport_destroy); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_EVENT, UNF_CRITICAL, - "[warn]Schedule global event faile. remain nodes(0x%x)", - global_event_queue.list_number); - } - } else { - spin_unlock_irqrestore(&lport->lport_state_lock, lport_flags); - spin_unlock_irqrestore(&global_lport_mgr.global_lport_list_lock, flags); - } -} - -void unf_lport_ref_dec_to_destroy(struct unf_lport *lport) -{ - if (lport->root_lport != lport) - unf_vport_ref_dec(lport); - else - unf_root_lport_ref_dec(lport); -} - -void unf_lport_route_work(struct work_struct *work) -{ -#define UNF_MAX_PCIE_LINK_DOWN_TIMES 3 - struct unf_lport *unf_lport = NULL; - int ret = 0; - - FC_CHECK_RETURN_VOID(work); - - unf_lport = container_of(work, struct unf_lport, route_timer_work.work); - if (unlikely(!unf_lport)) { - FC_DRV_PRINT(UNF_LOG_EQUIP_ATT, - UNF_KEVENT, "[err]LPort is NULL"); - - return; - } - - if (unlikely(unf_lport->port_removing)) { - FC_DRV_PRINT(UNF_LOG_EQUIP_ATT, UNF_KEVENT, - "[warn]LPort(0x%x) route work is closing.", unf_lport->port_id); - - unf_lport_ref_dec_to_destroy(unf_lport); - - return; - } - - if (unlikely(unf_get_pcie_link_state(unf_lport))) - unf_lport->pcie_link_down_cnt++; - else - unf_lport->pcie_link_down_cnt = 0; - - if (unf_lport->pcie_link_down_cnt >= UNF_MAX_PCIE_LINK_DOWN_TIMES) { - FC_DRV_PRINT(UNF_LOG_EQUIP_ATT, UNF_KEVENT, - "[warn]LPort(0x%x) detected pcie linkdown, closing route work", - unf_lport->port_id); - unf_lport->pcie_link_down = true; - unf_free_lport_all_xchg(unf_lport); - unf_lport_ref_dec_to_destroy(unf_lport); - return; - } - - if (unlikely(UNF_LPORT_CHIP_ERROR(unf_lport))) { - FC_DRV_PRINT(UNF_LOG_EQUIP_ATT, UNF_KEVENT, - "[warn]LPort(0x%x) reported chip error, closing route work. ", - unf_lport->port_id); - - unf_lport_ref_dec_to_destroy(unf_lport); - - return; - } - - if (unf_lport->enhanced_features & - UNF_LPORT_ENHANCED_FEATURE_CLOSE_FW_ROUTE) { - FC_DRV_PRINT(UNF_LOG_EQUIP_ATT, UNF_KEVENT, - "[warn]User close LPort(0x%x) route work. ", unf_lport->port_id); - - unf_lport_ref_dec_to_destroy(unf_lport); - - return; - } - - /* Scheduling 1 second */ - ret = queue_delayed_work(unf_wq, &unf_lport->route_timer_work, - (ulong)msecs_to_jiffies(UNF_LPORT_POLL_TIMER)); - if (ret == 0) { - FC_DRV_PRINT(UNF_LOG_EQUIP_ATT, UNF_KEVENT, - "[warn]LPort(0x%x) schedule work unsuccessful.", unf_lport->port_id); - - unf_lport_ref_dec_to_destroy(unf_lport); - } -} - -static int unf_cm_get_mac_adr(void *argc_in, void *argc_out) -{ - struct unf_lport *unf_lport = NULL; - struct unf_get_chip_info_argout *chip_info = NULL; - - FC_CHECK_RETURN_VALUE(argc_in, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(argc_out, UNF_RETURN_ERROR); - - unf_lport = (struct unf_lport *)argc_in; - chip_info = (struct unf_get_chip_info_argout *)argc_out; - - if (!unf_lport) { - FC_DRV_PRINT(UNF_LOG_EQUIP_ATT, - UNF_MAJOR, " LPort is null."); - - return UNF_RETURN_ERROR; - } - - if (!unf_lport->low_level_func.port_mgr_op.ll_port_config_get) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "Port(0x%x)'s corresponding function is NULL.", unf_lport->port_id); - - return UNF_RETURN_ERROR; - } - - if (unf_lport->low_level_func.port_mgr_op.ll_port_config_get(unf_lport->fc_port, - UNF_PORT_CFG_GET_MAC_ADDR, - chip_info) != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "Port(0x%x) get .", unf_lport->port_id); - - return UNF_RETURN_ERROR; - } - - return RETURN_OK; -} - -int unf_build_sys_wwn(u32 port_id, u64 *sys_port_name, u64 *sys_node_name) -{ - struct unf_get_chip_info_argout wwn = {0}; - u32 ret = UNF_RETURN_ERROR; - struct unf_lport *unf_lport = NULL; - - FC_CHECK_RETURN_VALUE((sys_port_name), UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE((sys_node_name), UNF_RETURN_ERROR); - - unf_lport = unf_find_lport_by_port_id(port_id); - if (!unf_lport) - return UNF_RETURN_ERROR; - - ret = (u32)unf_send_event(unf_lport->port_id, UNF_EVENT_SYN, - (void *)unf_lport, (void *)&wwn, unf_cm_get_mac_adr); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "send event(port get mac adr) fail."); - return UNF_RETURN_ERROR; - } - - /* save card mode: UNF_FC_SERVER_BOARD_32_G(6):32G; - * UNF_FC_SERVER_BOARD_16_G(7):16G MODE - */ - unf_lport->card_type = wwn.board_type; - - /* update port max speed */ - if (wwn.board_type == UNF_FC_SERVER_BOARD_32_G) - unf_lport->low_level_func.fc_ser_max_speed = UNF_PORT_SPEED_32_G; - else if (wwn.board_type == UNF_FC_SERVER_BOARD_16_G) - unf_lport->low_level_func.fc_ser_max_speed = UNF_PORT_SPEED_16_G; - else if (wwn.board_type == UNF_FC_SERVER_BOARD_8_G) - unf_lport->low_level_func.fc_ser_max_speed = UNF_PORT_SPEED_8_G; - else - unf_lport->low_level_func.fc_ser_max_speed = UNF_PORT_SPEED_32_G; - - *sys_port_name = wwn.wwpn; - *sys_node_name = wwn.wwnn; - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_INFO, - "Port(0x%x) Port Name(0x%llx), Node Name(0x%llx.)", - port_id, *sys_port_name, *sys_node_name); - - return RETURN_OK; -} - -static u32 unf_update_port_wwn(struct unf_lport *lport, - struct unf_port_wwn *port_wwn) -{ - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(port_wwn, UNF_RETURN_ERROR); - - /* Now notice lowlevel to update */ - if (!lport->low_level_func.port_mgr_op.ll_port_config_set) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "Port(0x%x)'s corresponding function is NULL.", - lport->port_id); - - return UNF_RETURN_ERROR; - } - - if (lport->low_level_func.port_mgr_op.ll_port_config_set(lport->fc_port, - UNF_PORT_CFG_UPDATE_WWN, - port_wwn) != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "Port(0x%x) update WWN unsuccessful.", - lport->port_id); - - return UNF_RETURN_ERROR; - } - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "Port(0x%x) update WWN: previous(0x%llx, 0x%llx), now(0x%llx, 0x%llx).", - lport->port_id, lport->port_name, lport->node_name, - port_wwn->sys_port_wwn, port_wwn->sys_node_name); - - lport->port_name = port_wwn->sys_port_wwn; - lport->node_name = port_wwn->sys_node_name; - - return RETURN_OK; -} - -static u32 unf_build_lport_wwn(struct unf_lport *lport) -{ - struct unf_port_wwn port_wwn = {0}; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - - if (unf_build_sys_wwn(lport->port_id, &port_wwn.sys_port_wwn, - &port_wwn.sys_node_name) != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "Port(0x%x) build WWN unsuccessful.", lport->port_id); - - return UNF_RETURN_ERROR; - } - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[info]Port(0x%x) build WWN succeed", lport->port_id); - - if (unf_update_port_wwn(lport, &port_wwn) != RETURN_OK) - return UNF_RETURN_ERROR; - - return RETURN_OK; -} - -u32 unf_port_release_rport_index(struct unf_lport *lport, void *input) -{ - u32 rport_index = INVALID_VALUE32; - ulong flag = 0; - struct unf_rport_pool *rport_pool = NULL; - struct unf_lport *unf_lport = NULL; - spinlock_t *rport_pool_lock = NULL; - - unf_lport = (struct unf_lport *)lport->root_lport; - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - - if (input) { - rport_index = *(u32 *)input; - if (rport_index < lport->low_level_func.support_max_rport) { - rport_pool = &unf_lport->rport_pool; - rport_pool_lock = &rport_pool->rport_free_pool_lock; - spin_lock_irqsave(rport_pool_lock, flag); - if (test_bit((int)rport_index, rport_pool->rpi_bitmap)) { - clear_bit((int)rport_index, rport_pool->rpi_bitmap); - } else { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) try to release a free rport index(0x%x)", - lport->port_id, rport_index); - } - spin_unlock_irqrestore(rport_pool_lock, flag); - } else { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) try to release a not exist rport index(0x%x)", - lport->port_id, rport_index); - } - } - - return RETURN_OK; -} - -void *unf_lookup_lport_by_nportid(void *lport, u32 nport_id) -{ - struct unf_lport *unf_lport = NULL; - struct unf_vport_pool *vport_pool = NULL; - struct unf_lport *unf_vport = NULL; - struct list_head *node = NULL; - struct list_head *next_node = NULL; - ulong flag = 0; - - FC_CHECK_RETURN_VALUE(lport, NULL); - - unf_lport = (struct unf_lport *)lport; - unf_lport = unf_lport->root_lport; - vport_pool = unf_lport->vport_pool; - - if (unf_lport->nport_id == nport_id) - return unf_lport; - - if (unlikely(!vport_pool)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%x) vport pool is NULL", unf_lport->port_id); - - return NULL; - } - - spin_lock_irqsave(&vport_pool->vport_pool_lock, flag); - list_for_each_safe(node, next_node, &unf_lport->list_vports_head) { - unf_vport = list_entry(node, struct unf_lport, entry_vport); - if (unf_vport->nport_id == nport_id) { - spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flag); - return unf_vport; - } - } - - list_for_each_safe(node, next_node, &unf_lport->list_intergrad_vports) { - unf_vport = list_entry(node, struct unf_lport, entry_vport); - if (unf_vport->nport_id == nport_id) { - spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flag); - return unf_vport; - } - } - spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flag); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "Port(0x%x) has no vport Nport ID(0x%x)", - unf_lport->port_id, nport_id); - - return NULL; -} - -int unf_get_link_lose_tmo(struct unf_lport *lport) -{ - u32 tmo_value = 0; - - if (!lport) - return UNF_LOSE_TMO; - - tmo_value = atomic_read(&lport->link_lose_tmo); - - if (!tmo_value) - tmo_value = UNF_LOSE_TMO; - - return (int)tmo_value; -} - -u32 unf_register_scsi_host(struct unf_lport *lport) -{ - struct unf_host_param host_param = {0}; - - struct Scsi_Host **scsi_host = NULL; - struct unf_lport_cfg_item *lport_cfg_items = NULL; - - FC_CHECK_RETURN_VALUE((lport), UNF_RETURN_ERROR); - - /* Point to -->> L_port->Scsi_host */ - scsi_host = &lport->host_info.host; - - lport_cfg_items = &lport->low_level_func.lport_cfg_items; - host_param.can_queue = (int)lport_cfg_items->max_queue_depth; - - /* Performance optimization */ - host_param.cmnd_per_lun = UNF_MAX_CMND_PER_LUN; - - host_param.sg_table_size = UNF_MAX_DMA_SEGS; - host_param.max_id = UNF_MAX_TARGET_NUMBER; - host_param.max_lun = UNF_DEFAULT_MAX_LUN; - host_param.max_channel = UNF_MAX_BUS_CHANNEL; - host_param.max_cmnd_len = UNF_MAX_SCSI_CMND_LEN; /* CDB-16 */ - host_param.dma_boundary = UNF_DMA_BOUNDARY; - host_param.max_sectors = UNF_MAX_SECTORS; - host_param.port_id = lport->port_id; - host_param.lport = lport; - host_param.pdev = &lport->low_level_func.dev->dev; - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_INFO, - "[info]Port(0x%x) allocate scsi host: can queue(%u), command performance LUN(%u), max lun(%u)", - lport->port_id, host_param.can_queue, host_param.cmnd_per_lun, - host_param.max_lun); - - if (unf_alloc_scsi_host(scsi_host, &host_param) != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Port(0x%x) allocate scsi host failed", lport->port_id); - - return UNF_RETURN_ERROR; - } - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_KEVENT, - "[event]Port(0x%x) allocate scsi host(0x%x) succeed", - lport->port_id, UNF_GET_SCSI_HOST_ID(*scsi_host)); - - return RETURN_OK; -} - -void unf_unregister_scsi_host(struct unf_lport *lport) -{ - struct Scsi_Host *scsi_host = NULL; - u32 host_no = 0; - - FC_CHECK_RETURN_VOID(lport); - - scsi_host = lport->host_info.host; - - if (scsi_host) { - host_no = UNF_GET_SCSI_HOST_ID(scsi_host); - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[event]Port(0x%x) starting unregister scsi host(0x%x)", - lport->port_id, host_no); - unf_free_scsi_host(scsi_host); - /* can`t set scsi_host for NULL, since it does`t alloc by itself */ - } else { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_KEVENT, - "[warn]Port(0x%x) unregister scsi host, invalid scsi_host ", - lport->port_id); - } - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[event]Port(0x%x) unregister scsi host(0x%x) succeed", - lport->port_id, host_no); - - lport->destroy_step = UNF_LPORT_DESTROY_STEP_12_UNREG_SCSI_HOST; -} diff --git a/drivers/scsi/spfc/common/unf_portman.h b/drivers/scsi/spfc/common/unf_portman.h deleted file mode 100644 index 4ad93d32bcaa436ce4c139f06dbc2e7d9e3c6020..0000000000000000000000000000000000000000 --- a/drivers/scsi/spfc/common/unf_portman.h +++ /dev/null @@ -1,96 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef UNF_PORTMAN_H -#define UNF_PORTMAN_H - -#include "unf_type.h" -#include "unf_lport.h" - -#define UNF_LPORT_POLL_TIMER ((u32)(1 * 1000)) -#define UNF_TX_CREDIT_REG_32_G 0x2289420 -#define UNF_RX_CREDIT_REG_32_G 0x228950c -#define UNF_CREDIT_REG_16_G 0x2283418 -#define UNF_PORT_OFFSET_BASE 0x10000 -#define UNF_CREDIT_EMU_VALUE 0x20 -#define UNF_CREDIT_VALUE_32_G 0x8 -#define UNF_CREDIT_VALUE_16_G 0x8000000080008 - -struct unf_nportid_map { - u32 sid; - u32 did; - void *rport[1024]; - void *lport; -}; - -struct unf_global_card_thread { - struct list_head card_list_head; - spinlock_t global_card_list_lock; - u32 card_num; -}; - -/* Global L_Port MG,manage all L_Port */ -struct unf_global_lport { - struct list_head lport_list_head; - - /* Temporary list,used in hold list traverse */ - struct list_head intergrad_head; - - /* destroy list,used in card remove */ - struct list_head destroy_list_head; - - /* Dirty list,abnormal port */ - struct list_head dirty_list_head; - spinlock_t global_lport_list_lock; - u32 lport_sum; - u8 dft_mode; - bool start_work; -}; - -struct unf_port_action { - u32 action; - u32 (*unf_action)(struct unf_lport *lport, void *input); -}; - -struct unf_reset_port_argin { - u32 port_id; -}; - -extern struct unf_global_lport global_lport_mgr; -extern struct unf_global_card_thread card_thread_mgr; -extern struct workqueue_struct *unf_wq; - -struct unf_lport *unf_find_lport_by_port_id(u32 port_id); -struct unf_lport *unf_find_lport_by_scsi_hostid(u32 scsi_host_id); -void * -unf_lport_create_and_init(void *private_data, - struct unf_low_level_functioon_op *low_level_op); -u32 unf_fc_port_link_event(void *lport, u32 events, void *input); -u32 unf_release_local_port(void *lport); -void unf_lport_route_work(struct work_struct *work); -void unf_lport_update_topo(struct unf_lport *lport, - enum unf_act_topo active_topo); -void unf_lport_ref_dec(struct unf_lport *lport); -u32 unf_lport_ref_inc(struct unf_lport *lport); -void unf_lport_ref_dec_to_destroy(struct unf_lport *lport); -void unf_port_mgmt_deinit(void); -void unf_port_mgmt_init(void); -void unf_show_dirty_port(bool show_only, u32 *dirty_port_num); -void *unf_lookup_lport_by_nportid(void *lport, u32 nport_id); -u32 unf_is_lport_valid(struct unf_lport *lport); -int unf_lport_reset_port(struct unf_lport *lport, u32 flag); -int unf_cm_ops_handle(u32 type, void **arg_in); -u32 unf_register_scsi_host(struct unf_lport *lport); -void unf_unregister_scsi_host(struct unf_lport *lport); -void unf_destroy_scsi_id_table(struct unf_lport *lport); -u32 unf_lport_login(struct unf_lport *lport, enum unf_act_topo act_topo); -u32 unf_init_scsi_id_table(struct unf_lport *lport); -void unf_set_lport_removing(struct unf_lport *lport); -void unf_lport_release_lw_funop(struct unf_lport *lport); -void unf_show_all_rport(struct unf_lport *lport); -void unf_disc_state_ma(struct unf_lport *lport, enum unf_disc_event evnet); -int unf_get_link_lose_tmo(struct unf_lport *lport); -u32 unf_port_release_rport_index(struct unf_lport *lport, void *input); -int unf_cm_reset_port(u32 port_id); - -#endif diff --git a/drivers/scsi/spfc/common/unf_rport.c b/drivers/scsi/spfc/common/unf_rport.c deleted file mode 100644 index aa4967fc0ab6fb8d2c59c1ae88847ebb969e00dc..0000000000000000000000000000000000000000 --- a/drivers/scsi/spfc/common/unf_rport.c +++ /dev/null @@ -1,2286 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#include "unf_rport.h" -#include "unf_log.h" -#include "unf_exchg.h" -#include "unf_ls.h" -#include "unf_service.h" -#include "unf_portman.h" - -/* rport state:ready --->>> link_down --->>> closing --->>> timeout --->>> delete */ -struct unf_rport_feature_pool *port_feature_pool; - -void unf_sesion_loss_timeout(struct work_struct *work) -{ - struct unf_wwpn_rport_info *wwpn_rport_info = NULL; - - FC_CHECK_RETURN_VOID(work); - - wwpn_rport_info = container_of(work, struct unf_wwpn_rport_info, loss_tmo_work.work); - if (unlikely(!wwpn_rport_info)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]wwpn_rport_info is NULL"); - return; - } - - atomic_set(&wwpn_rport_info->scsi_state, UNF_SCSI_ST_DEAD); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_KEVENT, - "[info]Port(0x%x) wwpn(0x%llx) set target(0x%x) scsi state to dead", - ((struct unf_lport *)(wwpn_rport_info->lport))->port_id, - wwpn_rport_info->wwpn, wwpn_rport_info->target_id); -} - -u32 unf_alloc_scsi_id(struct unf_lport *lport, struct unf_rport *rport) -{ - struct unf_rport_scsi_id_image *rport_scsi_table = NULL; - struct unf_wwpn_rport_info *wwn_rport_info = NULL; - ulong flags = 0; - u32 index = 0; - u32 ret = UNF_RETURN_ERROR; - spinlock_t *rport_scsi_tb_lock = NULL; - - rport_scsi_table = &lport->rport_scsi_table; - rport_scsi_tb_lock = &rport_scsi_table->scsi_image_table_lock; - spin_lock_irqsave(rport_scsi_tb_lock, flags); - - /* 1. At first, existence check */ - for (index = 0; index < rport_scsi_table->max_scsi_id; index++) { - wwn_rport_info = &rport_scsi_table->wwn_rport_info_table[index]; - if (rport->port_name == wwn_rport_info->wwpn) { - spin_unlock_irqrestore(rport_scsi_tb_lock, flags); - UNF_DELAYED_WORK_SYNC(ret, (lport->port_id), - (&wwn_rport_info->loss_tmo_work), - "loss tmo Timer work"); - - /* Plug case: reuse again */ - spin_lock_irqsave(rport_scsi_tb_lock, flags); - wwn_rport_info->rport = rport; - wwn_rport_info->las_ten_scsi_state = - atomic_read(&wwn_rport_info->scsi_state); - atomic_set(&wwn_rport_info->scsi_state, UNF_SCSI_ST_ONLINE); - spin_unlock_irqrestore(rport_scsi_tb_lock, flags); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x) find the same scsi_id(0x%x) by wwpn(0x%llx) RPort(%p) N_Port_ID(0x%x)", - lport->port_id, index, wwn_rport_info->wwpn, rport, - rport->nport_id); - - atomic_inc(&lport->resume_scsi_id); - goto find; - } - } - - /* 2. Alloc new SCSI ID */ - for (index = 0; index < rport_scsi_table->max_scsi_id; index++) { - wwn_rport_info = &rport_scsi_table->wwn_rport_info_table[index]; - if (wwn_rport_info->wwpn == INVALID_WWPN) { - spin_unlock_irqrestore(rport_scsi_tb_lock, flags); - UNF_DELAYED_WORK_SYNC(ret, (lport->port_id), - (&wwn_rport_info->loss_tmo_work), - "loss tmo Timer work"); - /* Use the free space */ - spin_lock_irqsave(rport_scsi_tb_lock, flags); - wwn_rport_info->rport = rport; - wwn_rport_info->wwpn = rport->port_name; - wwn_rport_info->las_ten_scsi_state = - atomic_read(&wwn_rport_info->scsi_state); - atomic_set(&wwn_rport_info->scsi_state, UNF_SCSI_ST_ONLINE); - spin_unlock_irqrestore(rport_scsi_tb_lock, flags); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x) allco new scsi_id(0x%x) by wwpn(0x%llx) RPort(%p) N_Port_ID(0x%x)", - lport->port_id, index, wwn_rport_info->wwpn, rport, - rport->nport_id); - - atomic_inc(&lport->alloc_scsi_id); - goto find; - } - } - - /* 3. Reuse space has been used */ - for (index = 0; index < rport_scsi_table->max_scsi_id; index++) { - wwn_rport_info = &rport_scsi_table->wwn_rport_info_table[index]; - if (atomic_read(&wwn_rport_info->scsi_state) == UNF_SCSI_ST_DEAD) { - spin_unlock_irqrestore(rport_scsi_tb_lock, flags); - UNF_DELAYED_WORK_SYNC(ret, (lport->port_id), - (&wwn_rport_info->loss_tmo_work), - "loss tmo Timer work"); - - spin_lock_irqsave(rport_scsi_tb_lock, flags); - if (wwn_rport_info->dfx_counter) { - memset(wwn_rport_info->dfx_counter, 0, - sizeof(struct unf_wwpn_dfx_counter_info)); - } - if (wwn_rport_info->lun_qos_level) { - memset(wwn_rport_info->lun_qos_level, 0, - sizeof(u8) * UNF_MAX_LUN_PER_TARGET); - } - wwn_rport_info->rport = rport; - wwn_rport_info->wwpn = rport->port_name; - wwn_rport_info->las_ten_scsi_state = - atomic_read(&wwn_rport_info->scsi_state); - atomic_set(&wwn_rport_info->scsi_state, UNF_SCSI_ST_ONLINE); - spin_unlock_irqrestore(rport_scsi_tb_lock, flags); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[info]Port(0x%x) reuse a dead scsi_id(0x%x) by wwpn(0x%llx) RPort(%p) N_Port_ID(0x%x)", - lport->port_id, index, wwn_rport_info->wwpn, rport, - rport->nport_id); - - atomic_inc(&lport->reuse_scsi_id); - goto find; - } - } - - spin_unlock_irqrestore(rport_scsi_tb_lock, flags); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) there is not enough scsi_id with max_value(0x%x)", - lport->port_id, index); - - return INVALID_VALUE32; - -find: - if (!wwn_rport_info->dfx_counter) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_INFO, - "[info]Port(0x%x) allocate Rport(0x%x) DFX buffer", - lport->port_id, wwn_rport_info->rport->nport_id); - wwn_rport_info->dfx_counter = vmalloc(sizeof(struct unf_wwpn_dfx_counter_info)); - if (!wwn_rport_info->dfx_counter) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Port(0x%x) allocate DFX buffer fail", - lport->port_id); - - return INVALID_VALUE32; - } - - memset(wwn_rport_info->dfx_counter, 0, sizeof(struct unf_wwpn_dfx_counter_info)); - } - - return index; -} - -u32 unf_get_scsi_id_by_wwpn(struct unf_lport *lport, u64 wwpn) -{ - struct unf_rport_scsi_id_image *rport_scsi_table = NULL; - struct unf_wwpn_rport_info *wwn_rport_info = NULL; - ulong flags = 0; - u32 index = 0; - spinlock_t *rport_scsi_tb_lock = NULL; - - FC_CHECK_RETURN_VALUE(lport, INVALID_VALUE32); - rport_scsi_table = &lport->rport_scsi_table; - rport_scsi_tb_lock = &rport_scsi_table->scsi_image_table_lock; - - if (wwpn == 0) - return INVALID_VALUE32; - - spin_lock_irqsave(rport_scsi_tb_lock, flags); - - for (index = 0; index < rport_scsi_table->max_scsi_id; index++) { - wwn_rport_info = &rport_scsi_table->wwn_rport_info_table[index]; - if (wwn_rport_info->wwpn == wwpn) { - spin_unlock_irqrestore(rport_scsi_tb_lock, flags); - return index; - } - } - - spin_unlock_irqrestore(rport_scsi_tb_lock, flags); - - return INVALID_VALUE32; -} - -void unf_set_device_state(struct unf_lport *lport, u32 scsi_id, int scsi_state) -{ - struct unf_rport_scsi_id_image *scsi_image_table = NULL; - struct unf_wwpn_rport_info *wwpn_rport_info = NULL; - - if (unlikely(scsi_id >= UNF_MAX_SCSI_ID)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) RPort scsi_id(0x%x) is max than 0x%x", - lport->port_id, scsi_id, UNF_MAX_SCSI_ID); - return; - } - - scsi_image_table = &lport->rport_scsi_table; - wwpn_rport_info = &scsi_image_table->wwn_rport_info_table[scsi_id]; - atomic_set(&wwpn_rport_info->scsi_state, scsi_state); -} - -void unf_rport_linkdown(struct unf_lport *lport, struct unf_rport *rport) -{ - /* - * 1. port_logout - * 2. rcvd_rscn_port_not_in_disc - * 3. each_rport_after_rscn - * 4. rcvd_gpnid_rjt - * 5. rport_after_logout(rport is fabric port) - */ - ulong flag = 0; - - FC_CHECK_RETURN_VOID(lport); - FC_CHECK_RETURN_VOID(rport); - - /* 1. Update R_Port state: Link Down Event --->>> closing state */ - spin_lock_irqsave(&rport->rport_state_lock, flag); - unf_rport_state_ma(rport, UNF_EVENT_RPORT_LINK_DOWN); - spin_unlock_irqrestore(&rport->rport_state_lock, flag); - - /* 3. Port enter closing (then enter to Delete) process */ - unf_rport_enter_closing(rport); -} - -static struct unf_rport *unf_rport_is_changed(struct unf_lport *lport, - struct unf_rport *rport, u32 sid) -{ - if (rport) { - /* S_ID or D_ID has been changed */ - if (rport->nport_id != sid || rport->local_nport_id != lport->nport_id) { - /* 1. Swap case: (SID or DID changed): Report link down - * & delete immediately - */ - unf_rport_immediate_link_down(lport, rport); - return NULL; - } - } - - return rport; -} - -struct unf_rport *unf_rport_set_qualifier_key_reuse(struct unf_lport *lport, - struct unf_rport *rport_by_nport_id, - struct unf_rport *rport_by_wwpn, - u64 wwpn, u32 sid) -{ - /* Used for SPFC Chip */ - struct unf_rport *rport = NULL; - struct unf_rport *rporta = NULL; - struct unf_rport *rportb = NULL; - bool wwpn_flag = false; - - FC_CHECK_RETURN_VALUE(lport, NULL); - - /* About R_Port by N_Port_ID */ - rporta = unf_rport_is_changed(lport, rport_by_nport_id, sid); - - /* About R_Port by WWpn */ - rportb = unf_rport_is_changed(lport, rport_by_wwpn, sid); - - if (!rporta && !rportb) { - return NULL; - } else if (!rporta && rportb) { - /* 3. Plug case: reuse again */ - rport = rportb; - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x) RPort(0x%p) WWPN(0x%llx) S_ID(0x%x) D_ID(0x%x) reused by wwpn", - lport->port_id, rport, rport->port_name, - rport->nport_id, rport->local_nport_id); - - return rport; - } else if (rporta && !rportb) { - wwpn_flag = (rporta->port_name != wwpn && rporta->port_name != 0 && - rporta->port_name != INVALID_VALUE64); - if (wwpn_flag) { - /* 4. WWPN changed: Report link down & delete - * immediately - */ - unf_rport_immediate_link_down(lport, rporta); - return NULL; - } - - /* Updtae WWPN */ - rporta->port_name = wwpn; - rport = rporta; - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x) RPort(0x%p) WWPN(0x%llx) S_ID(0x%x) D_ID(0x%x) reused by N_Port_ID", - lport->port_id, rport, rport->port_name, - rport->nport_id, rport->local_nport_id); - - return rport; - } - - /* 5. Case for A == B && A != NULL && B != NULL */ - if (rportb == rporta) { - rport = rporta; - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x) find the same RPort(0x%p) WWPN(0x%llx) S_ID(0x%x) D_ID(0x%x)", - lport->port_id, rport, rport->port_name, rport->nport_id, - rport->local_nport_id); - - return rport; - } - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) find two duplicate login. RPort(A:0x%p, WWPN:0x%llx, S_ID:0x%x, D_ID:0x%x) RPort(B:0x%p, WWPN:0x%llx, S_ID:0x%x, D_ID:0x%x)", - lport->port_id, rporta, rporta->port_name, rporta->nport_id, - rporta->local_nport_id, rportb, rportb->port_name, rportb->nport_id, - rportb->local_nport_id); - - /* 6. Case for A != B && A != NULL && B != NULL: Immediate - * Report && Deletion - */ - unf_rport_immediate_link_down(lport, rporta); - unf_rport_immediate_link_down(lport, rportb); - - return NULL; -} - -struct unf_rport *unf_find_valid_rport(struct unf_lport *lport, u64 wwpn, u32 sid) -{ - struct unf_rport *rport = NULL; - struct unf_rport *rport_by_nport_id = NULL; - struct unf_rport *rport_by_wwpn = NULL; - ulong flags = 0; - spinlock_t *rport_state_lock = NULL; - - FC_CHECK_RETURN_VALUE(lport, NULL); - FC_CHECK_RETURN_VALUE(lport->unf_qualify_rport, NULL); - - /* Get R_Port by WWN & N_Port_ID */ - rport_by_nport_id = unf_get_rport_by_nport_id(lport, sid); - rport_by_wwpn = unf_get_rport_by_wwn(lport, wwpn); - rport_state_lock = &rport_by_wwpn->rport_state_lock; - - /* R_Port check: by WWPN */ - if (rport_by_wwpn) { - spin_lock_irqsave(rport_state_lock, flags); - if (rport_by_wwpn->nport_id == UNF_FC_FID_FLOGI) { - spin_unlock_irqrestore(rport_state_lock, flags); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "[err]Port(0x%x) RPort(0x%p) find by WWPN(0x%llx) is invalid", - lport->port_id, rport_by_wwpn, wwpn); - - rport_by_wwpn = NULL; - } else { - spin_unlock_irqrestore(rport_state_lock, flags); - } - } - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x_0x%x) RPort(0x%p) find by N_Port_ID(0x%x) and RPort(0x%p) by WWPN(0x%llx)", - lport->port_id, lport->nport_id, rport_by_nport_id, sid, rport_by_wwpn, wwpn); - - /* R_Port validity check: get by WWPN & N_Port_ID */ - rport = lport->unf_qualify_rport(lport, rport_by_nport_id, - rport_by_wwpn, wwpn, sid); - - return rport; -} - -void unf_rport_delay_login(struct unf_rport *rport) -{ - FC_CHECK_RETURN_VOID(rport); - - /* Do R_Port recovery: PLOGI or PRLI or LOGO */ - unf_rport_error_recovery(rport); -} - -void unf_rport_enter_logo(struct unf_lport *lport, struct unf_rport *rport) -{ - /* - * 1. TMF/ABTS timeout recovery :Y - * 2. L_Port error recovery --->>> larger than retry_count :Y - * 3. R_Port error recovery --->>> larger than retry_count :Y - * 4. Check PLOGI parameter --->>> parameter is error :Y - * 5. PRLI handler --->>> R_Port state is error :Y - * 6. PDISC handler --->>> R_Port state is not PRLI_WAIT :Y - * 7. ADISC handler --->>> R_Port state is not PRLI_WAIT :Y - * 8. PLOGI wait timeout with R_PORT is INI mode :Y - * 9. RCVD GFFID_RJT --->>> R_Port state is INIT :Y - * 10. RCVD GPNID_ACC --->>> R_Port state is error :Y - * 11. Private Loop mode with LOGO case :Y - * 12. P2P mode with LOGO case :Y - * 13. Fabric mode with LOGO case :Y - * 14. RCVD PRLI_ACC with R_Port is INI :Y - * 15. TGT RCVD BLS_REQ with session is error :Y - */ - ulong flags = 0; - - FC_CHECK_RETURN_VOID(lport); - FC_CHECK_RETURN_VOID(rport); - - spin_lock_irqsave(&rport->rport_state_lock, flags); - - if (rport->rp_state == UNF_RPORT_ST_CLOSING || - rport->rp_state == UNF_RPORT_ST_DELETE) { - /* 1. Already within Closing or Delete: Do nothing */ - spin_unlock_irqrestore(&rport->rport_state_lock, flags); - - return; - } else if (rport->rp_state == UNF_RPORT_ST_LOGO) { - /* 2. Update R_Port state: Normal Enter Event --->>> closing - * state - */ - unf_rport_state_ma(rport, UNF_EVENT_RPORT_NORMAL_ENTER); - spin_unlock_irqrestore(&rport->rport_state_lock, flags); - - /* Send Logo if necessary */ - if (unf_send_logo(lport, rport) != RETURN_OK) - unf_rport_enter_closing(rport); - } else { - /* 3. Update R_Port state: Link Down Event --->>> closing state - */ - unf_rport_state_ma(rport, UNF_EVENT_RPORT_LINK_DOWN); - spin_unlock_irqrestore(&rport->rport_state_lock, flags); - - unf_rport_enter_closing(rport); - } -} - -u32 unf_free_scsi_id(struct unf_lport *lport, u32 scsi_id) -{ - ulong flags = 0; - struct unf_rport_scsi_id_image *rport_scsi_table = NULL; - struct unf_wwpn_rport_info *wwn_rport_info = NULL; - spinlock_t *rport_scsi_tb_lock = NULL; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - - if (unlikely(lport->port_removing)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x_0x%x) is removing and do nothing", - lport->port_id, lport->nport_id); - - return UNF_RETURN_ERROR; - } - - if (unlikely(scsi_id >= UNF_MAX_SCSI_ID)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x_0x%x) scsi_id(0x%x) is bigger than %d", - lport->port_id, lport->nport_id, scsi_id, UNF_MAX_SCSI_ID); - - return UNF_RETURN_ERROR; - } - - rport_scsi_table = &lport->rport_scsi_table; - rport_scsi_tb_lock = &rport_scsi_table->scsi_image_table_lock; - if (rport_scsi_table->wwn_rport_info_table) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "[warn]Port(0x%x_0x%x) RPort(0x%p) free scsi_id(0x%x) wwpn(0x%llx) target_id(0x%x) succeed", - lport->port_id, lport->nport_id, - rport_scsi_table->wwn_rport_info_table[scsi_id].rport, - scsi_id, rport_scsi_table->wwn_rport_info_table[scsi_id].wwpn, - rport_scsi_table->wwn_rport_info_table[scsi_id].target_id); - - spin_lock_irqsave(rport_scsi_tb_lock, flags); - wwn_rport_info = &rport_scsi_table->wwn_rport_info_table[scsi_id]; - if (wwn_rport_info->rport) { - wwn_rport_info->rport->rport = NULL; - wwn_rport_info->rport = NULL; - } - wwn_rport_info->target_id = INVALID_VALUE32; - atomic_set(&wwn_rport_info->scsi_state, UNF_SCSI_ST_DEAD); - - /* NOTE: remain WWPN/Port_Name unchanged(un-cleared) */ - spin_unlock_irqrestore(rport_scsi_tb_lock, flags); - - return RETURN_OK; - } - - return UNF_RETURN_ERROR; -} - -static void unf_report_ini_linkwown_event(struct unf_lport *lport, struct unf_rport *rport) -{ - u32 scsi_id = 0; - struct fc_rport *unf_rport = NULL; - ulong flag = 0; - - FC_CHECK_RETURN_VOID(lport); - FC_CHECK_RETURN_VOID(rport); - - /* - * 1. set local device(rport/rport_info_table) state - * -------------------------------------------------OFF_LINE - * * - * about rport->scsi_id - * valid during rport link up to link down - */ - - spin_lock_irqsave(&rport->rport_state_lock, flag); - scsi_id = rport->scsi_id; - unf_set_device_state(lport, scsi_id, UNF_SCSI_ST_OFFLINE); - - /* 2. delete scsi's rport */ - unf_rport = (struct fc_rport *)rport->rport; - spin_unlock_irqrestore(&rport->rport_state_lock, flag); - if (unf_rport) { - fc_remote_port_delete(unf_rport); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_KEVENT, - "[event]Port(0x%x_0x%x) delete RPort(0x%x) wwpn(0x%llx) scsi_id(0x%x) succeed", - lport->port_id, lport->nport_id, rport->nport_id, - rport->port_name, scsi_id); - - atomic_inc(&lport->scsi_session_del_success); - } else { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_KEVENT, - "[warn]Port(0x%x_0x%x) delete RPort(0x%x_0x%p) failed", - lport->port_id, lport->nport_id, rport->nport_id, rport); - } -} - -static void unf_report_ini_linkup_event(struct unf_lport *lport, struct unf_rport *rport) -{ - FC_CHECK_RETURN_VOID(lport); - FC_CHECK_RETURN_VOID(rport); - - FC_DRV_PRINT(UNF_LOG_NORMAL, UNF_MAJOR, - "[event]Port(0x%x) RPort(0x%x_0x%p) put INI link up work(%p) to work_queue", - lport->port_id, rport->nport_id, rport, &rport->start_work); - - if (unlikely(!queue_work(lport->link_event_wq, &rport->start_work))) { - atomic_inc(&lport->add_start_work_failed); - - FC_DRV_PRINT(UNF_LOG_NORMAL, UNF_ERR, - "[err]Port(0x%x) RPort(0x%x_0x%p) put INI link up to work_queue failed", - lport->port_id, rport->nport_id, rport); - } -} - -void unf_update_lport_state_by_linkup_event(struct unf_lport *lport, - struct unf_rport *rport, - u32 rport_att) -{ - /* Report R_Port Link Up/Down Event */ - ulong flag = 0; - enum unf_port_state lport_state = 0; - - FC_CHECK_RETURN_VOID(lport); - FC_CHECK_RETURN_VOID(rport); - - spin_lock_irqsave(&rport->rport_state_lock, flag); - - /* 1. R_Port does not has TGT mode any more */ - if (((rport_att & UNF_FC4_FRAME_PARM_3_TGT) == 0) && - rport->lport_ini_state == UNF_PORT_STATE_LINKUP) { - rport->last_lport_ini_state = rport->lport_ini_state; - rport->lport_ini_state = UNF_PORT_STATE_LINKDOWN; - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) RPort(0x%x) does not have TGT attribute(0x%x) any more", - lport->port_id, rport->nport_id, rport_att); - } - - /* 2. R_Port with TGT mode, L_Port with INI mode */ - if ((rport_att & UNF_FC4_FRAME_PARM_3_TGT) && - (lport->options & UNF_FC4_FRAME_PARM_3_INI)) { - rport->last_lport_ini_state = rport->lport_ini_state; - rport->lport_ini_state = UNF_PORT_STATE_LINKUP; - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "[warn]Port(0x%x) update INI state with last(0x%x) and now(0x%x)", - lport->port_id, rport->last_lport_ini_state, - rport->lport_ini_state); - } - - /* 3. Report L_Port INI/TGT Down/Up event to SCSI */ - if (rport->last_lport_ini_state == rport->lport_ini_state) { - if (rport->nport_id < UNF_FC_FID_DOM_MGR) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) RPort(0x%x %p) INI state(0x%x) has not been changed", - lport->port_id, rport->nport_id, rport, - rport->lport_ini_state); - } - - spin_unlock_irqrestore(&rport->rport_state_lock, flag); - - return; - } - - lport_state = rport->lport_ini_state; - - spin_unlock_irqrestore(&rport->rport_state_lock, flag); - - switch (lport_state) { - case UNF_PORT_STATE_LINKDOWN: - unf_report_ini_linkwown_event(lport, rport); - break; - case UNF_PORT_STATE_LINKUP: - unf_report_ini_linkup_event(lport, rport); - break; - default: - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) with unknown link status(0x%x)", - lport->port_id, rport->lport_ini_state); - break; - } -} - -static void unf_rport_callback(void *rport, void *lport, u32 result) -{ - struct unf_rport *unf_rport = NULL; - struct unf_lport *unf_lport = NULL; - ulong flag = 0; - - FC_CHECK_RETURN_VOID(rport); - FC_CHECK_RETURN_VOID(lport); - unf_rport = (struct unf_rport *)rport; - unf_lport = (struct unf_lport *)lport; - - spin_lock_irqsave(&unf_rport->rport_state_lock, flag); - unf_rport->last_lport_ini_state = unf_rport->lport_ini_state; - unf_rport->lport_ini_state = UNF_PORT_STATE_LINKDOWN; - unf_rport->last_lport_tgt_state = unf_rport->lport_tgt_state; - unf_rport->lport_tgt_state = UNF_PORT_STATE_LINKDOWN; - - /* Report R_Port Link Down Event to scsi */ - if (unf_rport->last_lport_ini_state == unf_rport->lport_ini_state) { - if (unf_rport->nport_id < UNF_FC_FID_DOM_MGR) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) RPort(0x%x %p) INI state(0x%x) has not been changed", - unf_lport->port_id, unf_rport->nport_id, - unf_rport, unf_rport->lport_ini_state); - } - spin_unlock_irqrestore(&unf_rport->rport_state_lock, flag); - - return; - } - - spin_unlock_irqrestore(&unf_rport->rport_state_lock, flag); - - unf_report_ini_linkwown_event(unf_lport, unf_rport); -} - -static void unf_rport_recovery_timeout(struct work_struct *work) -{ - struct unf_lport *lport = NULL; - struct unf_rport *rport = NULL; - u32 ret = RETURN_OK; - ulong flag = 0; - enum unf_rport_login_state rp_state = UNF_RPORT_ST_INIT; - - FC_CHECK_RETURN_VOID(work); - - rport = container_of(work, struct unf_rport, recovery_work.work); - if (unlikely(!rport)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]RPort is NULL"); - - return; - } - - lport = rport->lport; - if (unlikely(!lport)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]RPort(0x%x) Port is NULL", rport->nport_id); - - /* for timer */ - unf_rport_ref_dec(rport); - return; - } - - spin_lock_irqsave(&rport->rport_state_lock, flag); - rp_state = rport->rp_state; - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x_0x%x) RPort(0x%x) state(0x%x) recovery timer timeout", - lport->port_id, lport->nport_id, rport->nport_id, rp_state); - spin_unlock_irqrestore(&rport->rport_state_lock, flag); - - switch (rp_state) { - case UNF_RPORT_ST_PLOGI_WAIT: - if ((lport->act_topo == UNF_ACT_TOP_P2P_DIRECT && - lport->port_name > rport->port_name) || - lport->act_topo != UNF_ACT_TOP_P2P_DIRECT) { - /* P2P: Name is master with P2P_D - * or has INI Mode - */ - ret = unf_send_plogi(rport->lport, rport); - } - break; - case UNF_RPORT_ST_PRLI_WAIT: - ret = unf_send_prli(rport->lport, rport, ELS_PRLI); - if (ret != RETURN_OK) - unf_rport_error_recovery(rport); - fallthrough; - default: - break; - } - - if (ret != RETURN_OK) - unf_rport_error_recovery(rport); - - /* company with timer */ - unf_rport_ref_dec(rport); -} - -void unf_schedule_closing_work(struct unf_lport *lport, struct unf_rport *rport) -{ - ulong flags = 0; - struct unf_rport_scsi_id_image *rport_scsi_table = NULL; - struct unf_wwpn_rport_info *wwn_rport_info = NULL; - u32 scsi_id = 0; - u32 ret = 0; - u32 delay = 0; - - FC_CHECK_RETURN_VOID(lport); - FC_CHECK_RETURN_VOID(rport); - - delay = (u32)(unf_get_link_lose_tmo(lport) * 1000); - - rport_scsi_table = &lport->rport_scsi_table; - scsi_id = rport->scsi_id; - spin_lock_irqsave(&rport->rport_state_lock, flags); - - /* 1. Cancel recovery_work */ - if (cancel_delayed_work(&rport->recovery_work)) { - atomic_dec(&rport->rport_ref_cnt); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x_0x%x) RPort(0x%x_0x%p) cancel recovery work succeed", - lport->port_id, lport->nport_id, rport->nport_id, rport); - } - - /* 2. Cancel Open_work */ - if (cancel_delayed_work(&rport->open_work)) { - atomic_dec(&rport->rport_ref_cnt); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x_0x%x) RPort(0x%x_0x%p) cancel open work succeed", - lport->port_id, lport->nport_id, rport->nport_id, rport); - } - - spin_unlock_irqrestore(&rport->rport_state_lock, flags); - - /* 3. Work in-queue (switch to thread context) */ - if (!queue_work(lport->link_event_wq, &rport->closing_work)) { - FC_DRV_PRINT(UNF_LOG_NORMAL, UNF_ERR, - "[warn]Port(0x%x) RPort(0x%x_0x%p) add link down to work queue failed", - lport->port_id, rport->nport_id, rport); - - atomic_inc(&lport->add_closing_work_failed); - } else { - spin_lock_irqsave(&rport->rport_state_lock, flags); - (void)unf_rport_ref_inc(rport); - spin_unlock_irqrestore(&rport->rport_state_lock, flags); - - FC_DRV_PRINT(UNF_LOG_NORMAL, UNF_MAJOR, - "[info]Port(0x%x) RPort(0x%x_0x%p) add link down to work(%p) queue succeed", - lport->port_id, rport->nport_id, rport, - &rport->closing_work); - } - - if (rport->nport_id > UNF_FC_FID_DOM_MGR) - return; - - if (scsi_id >= UNF_MAX_SCSI_ID) { - scsi_id = unf_get_scsi_id_by_wwpn(lport, rport->port_name); - if (scsi_id >= UNF_MAX_SCSI_ID) { - FC_DRV_PRINT(UNF_LOG_NORMAL, UNF_WARN, - "[warn]Port(0x%x) RPort(0x%p) NPortId(0x%x) wwpn(0x%llx) option(0x%x) scsi_id(0x%x) is max than(0x%x)", - lport->port_id, rport, rport->nport_id, - rport->port_name, rport->options, scsi_id, - UNF_MAX_SCSI_ID); - - return; - } - } - - wwn_rport_info = &rport_scsi_table->wwn_rport_info_table[scsi_id]; - ret = queue_delayed_work(unf_wq, &wwn_rport_info->loss_tmo_work, - (ulong)msecs_to_jiffies((u32)delay)); - if (!ret) { - FC_DRV_PRINT(UNF_LOG_NORMAL, UNF_MAJOR, - "[info] Port(0x%x) add RPort(0x%p) NPortId(0x%x) scsi_id(0x%x) wwpn(0x%llx) loss timeout work failed", - lport->port_id, rport, rport->nport_id, scsi_id, - rport->port_name); - } -} - -static void unf_rport_closing_timeout(struct work_struct *work) -{ - /* closing --->>>(timeout)--->>> delete */ - struct unf_rport *rport = NULL; - struct unf_lport *lport = NULL; - struct unf_disc *disc = NULL; - ulong rport_flag = 0; - ulong disc_flag = 0; - void (*unf_rport_callback)(void *, void *, u32) = NULL; - enum unf_rport_login_state old_state; - - FC_CHECK_RETURN_VOID(work); - - /* Get R_Port & L_Port & Disc */ - rport = container_of(work, struct unf_rport, closing_work); - if (unlikely(!rport)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]RPort is NULL"); - return; - } - - lport = rport->lport; - if (unlikely(!lport)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]RPort(0x%x_0x%p) Port is NULL", - rport->nport_id, rport); - - /* Release directly (for timer) */ - unf_rport_ref_dec(rport); - return; - } - disc = &lport->disc; - - spin_lock_irqsave(&rport->rport_state_lock, rport_flag); - - old_state = rport->rp_state; - /* 1. Update R_Port state: event_timeout --->>> state_delete */ - unf_rport_state_ma(rport, UNF_EVENT_RPORT_CLS_TIMEOUT); - - /* Check R_Port state */ - if (rport->rp_state != UNF_RPORT_ST_DELETE) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x_0x%x) RPort(0x%x_0x%p) closing timeout with error state(0x%x->0x%x)", - lport->port_id, lport->nport_id, rport->nport_id, - rport, old_state, rport->rp_state); - - spin_unlock_irqrestore(&rport->rport_state_lock, rport_flag); - - /* Dec ref_cnt for timer */ - unf_rport_ref_dec(rport); - return; - } - - unf_rport_callback = rport->unf_rport_callback; - spin_unlock_irqrestore(&rport->rport_state_lock, rport_flag); - - /* 2. Put R_Port to delete list */ - spin_lock_irqsave(&disc->rport_busy_pool_lock, disc_flag); - list_del_init(&rport->entry_rport); - list_add_tail(&rport->entry_rport, &disc->list_delete_rports); - spin_unlock_irqrestore(&disc->rport_busy_pool_lock, disc_flag); - - /* 3. Report rport link down event to scsi */ - if (unf_rport_callback) { - unf_rport_callback((void *)rport, (void *)rport->lport, RETURN_OK); - } else { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]RPort(0x%x) callback is NULL", - rport->nport_id); - } - - /* 4. Remove/delete R_Port */ - unf_rport_ref_dec(rport); - unf_rport_ref_dec(rport); -} - -static void unf_rport_linkup_to_scsi(struct work_struct *work) -{ - struct fc_rport_identifiers rport_ids; - struct fc_rport *rport = NULL; - ulong flags = RETURN_OK; - struct unf_wwpn_rport_info *wwn_rport_info = NULL; - struct unf_rport_scsi_id_image *rport_scsi_table = NULL; - u32 scsi_id = 0; - - struct unf_lport *lport = NULL; - struct unf_rport *unf_rport = NULL; - - FC_CHECK_RETURN_VOID(work); - - unf_rport = container_of(work, struct unf_rport, start_work); - if (unlikely(!unf_rport)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]RPort is NULL for work(%p)", work); - return; - } - - lport = unf_rport->lport; - if (unlikely(!lport)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]RPort(0x%x_0x%p) Port is NULL", - unf_rport->nport_id, unf_rport); - return; - } - - /* 1. Alloc R_Port SCSI_ID (image table) */ - unf_rport->scsi_id = unf_alloc_scsi_id(lport, unf_rport); - if (unlikely(unf_rport->scsi_id == INVALID_VALUE32)) { - atomic_inc(&lport->scsi_session_add_failed); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[err]Port(0x%x_0x%x) RPort(0x%x_0x%p) wwpn(0x%llx) scsi_id(0x%x) is invalid", - lport->port_id, lport->nport_id, - unf_rport->nport_id, unf_rport, - unf_rport->port_name, unf_rport->scsi_id); - - /* NOTE: return */ - return; - } - - /* 2. Add rport to scsi */ - scsi_id = unf_rport->scsi_id; - rport_ids.node_name = unf_rport->node_name; - rport_ids.port_name = unf_rport->port_name; - rport_ids.port_id = unf_rport->nport_id; - rport_ids.roles = FC_RPORT_ROLE_UNKNOWN; - rport = fc_remote_port_add(lport->host_info.host, 0, &rport_ids); - if (unlikely(!rport)) { - atomic_inc(&lport->scsi_session_add_failed); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x_0x%x) RPort(0x%x_0x%p) wwpn(0x%llx) report link up to scsi failed", - lport->port_id, lport->nport_id, unf_rport->nport_id, unf_rport, - unf_rport->port_name); - - unf_free_scsi_id(lport, scsi_id); - return; - } - - /* 3. Change rport role */ - *((u32 *)rport->dd_data) = scsi_id; /* save local SCSI_ID to scsi rport */ - rport->supported_classes = FC_COS_CLASS3; - rport_ids.roles |= FC_PORT_ROLE_FCP_TARGET; - rport->dev_loss_tmo = (u32)unf_get_link_lose_tmo(lport); /* default 30s */ - fc_remote_port_rolechg(rport, rport_ids.roles); - - /* 4. Save scsi rport info to local R_Port */ - spin_lock_irqsave(&unf_rport->rport_state_lock, flags); - unf_rport->rport = rport; - spin_unlock_irqrestore(&unf_rport->rport_state_lock, flags); - - rport_scsi_table = &lport->rport_scsi_table; - spin_lock_irqsave(&rport_scsi_table->scsi_image_table_lock, flags); - wwn_rport_info = &rport_scsi_table->wwn_rport_info_table[scsi_id]; - wwn_rport_info->target_id = rport->scsi_target_id; - wwn_rport_info->rport = unf_rport; - atomic_set(&wwn_rport_info->scsi_state, UNF_SCSI_ST_ONLINE); - spin_unlock_irqrestore(&rport_scsi_table->scsi_image_table_lock, flags); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_KEVENT, - "[event]Port(0x%x_0x%x) RPort(0x%x) wwpn(0x%llx) scsi_id(0x%x) link up to scsi succeed", - lport->port_id, lport->nport_id, unf_rport->nport_id, - unf_rport->port_name, scsi_id); - - atomic_inc(&lport->scsi_session_add_success); -} - -static void unf_rport_open_timeout(struct work_struct *work) -{ - struct unf_rport *rport = NULL; - struct unf_lport *lport = NULL; - ulong flags = 0; - - FC_CHECK_RETURN_VOID(work); - - rport = container_of(work, struct unf_rport, open_work.work); - if (!rport) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, - UNF_WARN, "[warn]RPort is NULL"); - - return; - } - - spin_lock_irqsave(&rport->rport_state_lock, flags); - lport = rport->lport; - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x_0x%x) RPort(0x%x) open work timeout with state(0x%x)", - lport->port_id, lport->nport_id, rport->nport_id, - rport->rp_state); - - /* NOTE: R_Port state check */ - if (rport->rp_state != UNF_RPORT_ST_PRLI_WAIT) { - spin_unlock_irqrestore(&rport->rport_state_lock, flags); - - /* Dec ref_cnt for timer case */ - unf_rport_ref_dec(rport); - return; - } - - /* Report R_Port Link Down event */ - unf_rport_state_ma(rport, UNF_EVENT_RPORT_LINK_DOWN); - spin_unlock_irqrestore(&rport->rport_state_lock, flags); - - unf_rport_enter_closing(rport); - /* Dec ref_cnt for timer case */ - unf_rport_ref_dec(rport); -} - -static u32 unf_alloc_index_for_rport(struct unf_lport *lport, struct unf_rport *rport) -{ - ulong rport_flag = 0; - ulong pool_flag = 0; - u32 alloc_indx = 0; - u32 max_rport = 0; - struct unf_rport_pool *rport_pool = NULL; - spinlock_t *rport_scsi_tb_lock = NULL; - - rport_pool = &lport->rport_pool; - rport_scsi_tb_lock = &rport_pool->rport_free_pool_lock; - max_rport = lport->low_level_func.lport_cfg_items.max_login; - - max_rport = max_rport > SPFC_DEFAULT_RPORT_INDEX ? SPFC_DEFAULT_RPORT_INDEX : max_rport; - - spin_lock_irqsave(rport_scsi_tb_lock, pool_flag); - while (alloc_indx < max_rport) { - if (!test_bit((int)alloc_indx, rport_pool->rpi_bitmap)) { - /* Case for SPFC */ - if (unlikely(atomic_read(&lport->lport_no_operate_flag) == UNF_LPORT_NOP)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) is within NOP", lport->port_id); - - spin_unlock_irqrestore(rport_scsi_tb_lock, pool_flag); - return UNF_RETURN_ERROR; - } - - spin_lock_irqsave(&rport->rport_state_lock, rport_flag); - rport->rport_index = alloc_indx; - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "[info]Port(0x%x) RPort(0x%x) alloc index(0x%x) succeed", - lport->port_id, alloc_indx, rport->nport_id); - - spin_unlock_irqrestore(&rport->rport_state_lock, rport_flag); - - /* Set (index) bit */ - set_bit((int)alloc_indx, rport_pool->rpi_bitmap); - - /* Break here */ - break; - } - alloc_indx++; - } - spin_unlock_irqrestore(rport_scsi_tb_lock, pool_flag); - - if (max_rport == alloc_indx) - return UNF_RETURN_ERROR; - return RETURN_OK; -} - -static void unf_check_rport_pool_status(struct unf_lport *lport) -{ - struct unf_lport *unf_lport = lport; - struct unf_rport_pool *rport_pool = NULL; - ulong flags = 0; - u32 max_rport = 0; - - FC_CHECK_RETURN_VOID(lport); - rport_pool = &unf_lport->rport_pool; - - spin_lock_irqsave(&rport_pool->rport_free_pool_lock, flags); - max_rport = unf_lport->low_level_func.lport_cfg_items.max_login; - if (rport_pool->rport_pool_completion && - rport_pool->rport_pool_count == max_rport) { - complete(rport_pool->rport_pool_completion); - } - - spin_unlock_irqrestore(&rport_pool->rport_free_pool_lock, flags); -} - -static void unf_init_rport_sq_num(struct unf_rport *rport, struct unf_lport *lport) -{ - u32 session_order; - u32 ssq_average_session_num; - - ssq_average_session_num = (lport->max_ssq_num - 1) / UNF_SQ_NUM_PER_SESSION; - session_order = (rport->rport_index) % ssq_average_session_num; - rport->sqn_base = (session_order * UNF_SQ_NUM_PER_SESSION); -} - -void unf_init_rport_params(struct unf_rport *rport, struct unf_lport *lport) -{ - struct unf_rport *unf_rport = rport; - ulong flag = 0; - - FC_CHECK_RETURN_VOID(unf_rport); - FC_CHECK_RETURN_VOID(lport); - - spin_lock_irqsave(&unf_rport->rport_state_lock, flag); - unf_set_rport_state(unf_rport, UNF_RPORT_ST_INIT); - unf_rport->unf_rport_callback = unf_rport_callback; - unf_rport->lport = lport; - unf_rport->fcp_conf_needed = false; - unf_rport->tape_support_needed = false; - unf_rport->max_retries = UNF_MAX_RETRY_COUNT; - unf_rport->logo_retries = 0; - unf_rport->retries = 0; - unf_rport->rscn_position = UNF_RPORT_NOT_NEED_PROCESS; - unf_rport->last_lport_ini_state = UNF_PORT_STATE_LINKDOWN; - unf_rport->lport_ini_state = UNF_PORT_STATE_LINKDOWN; - unf_rport->last_lport_tgt_state = UNF_PORT_STATE_LINKDOWN; - unf_rport->lport_tgt_state = UNF_PORT_STATE_LINKDOWN; - unf_rport->node_name = 0; - unf_rport->port_name = INVALID_WWPN; - unf_rport->disc_done = 0; - unf_rport->scsi_id = INVALID_VALUE32; - unf_rport->data_thread = NULL; - sema_init(&unf_rport->task_sema, 0); - atomic_set(&unf_rport->rport_ref_cnt, 0); - atomic_set(&unf_rport->pending_io_cnt, 0); - unf_rport->rport_alloc_jifs = jiffies; - - unf_rport->ed_tov = UNF_DEFAULT_EDTOV + 500; - unf_rport->ra_tov = UNF_DEFAULT_RATOV; - - INIT_WORK(&unf_rport->closing_work, unf_rport_closing_timeout); - INIT_WORK(&unf_rport->start_work, unf_rport_linkup_to_scsi); - INIT_DELAYED_WORK(&unf_rport->recovery_work, unf_rport_recovery_timeout); - INIT_DELAYED_WORK(&unf_rport->open_work, unf_rport_open_timeout); - - atomic_inc(&unf_rport->rport_ref_cnt); - spin_unlock_irqrestore(&unf_rport->rport_state_lock, flag); -} - -static u32 unf_alloc_ll_rport_resource(struct unf_lport *lport, - struct unf_rport *rport, u32 nport_id) -{ - u32 ret = RETURN_OK; - struct unf_port_info rport_info = {0}; - struct list_head *node = NULL; - struct list_head *next_node = NULL; - struct unf_qos_info *qos_info = NULL; - struct unf_lport *unf_lport = NULL; - ulong flag = 0; - - unf_lport = lport->root_lport; - - if (unf_lport->low_level_func.service_op.unf_alloc_rport_res) { - spin_lock_irqsave(&lport->qos_mgr_lock, flag); - rport_info.qos_level = lport->qos_level; - list_for_each_safe(node, next_node, &lport->list_qos_head) { - qos_info = (struct unf_qos_info *)list_entry(node, struct unf_qos_info, - entry_qos_info); - - if (qos_info && qos_info->nport_id == nport_id) { - rport_info.qos_level = qos_info->qos_level; - break; - } - } - - spin_unlock_irqrestore(&lport->qos_mgr_lock, flag); - - unf_init_rport_sq_num(rport, unf_lport); - - rport->qos_level = rport_info.qos_level; - rport_info.nport_id = nport_id; - rport_info.rport_index = rport->rport_index; - rport_info.local_nport_id = lport->nport_id; - rport_info.port_name = 0; - rport_info.cs_ctrl = UNF_CSCTRL_INVALID; - rport_info.sqn_base = rport->sqn_base; - - if (unf_lport->priority == UNF_PRIORITY_ENABLE) { - if (rport_info.qos_level == UNF_QOS_LEVEL_DEFAULT) - rport_info.cs_ctrl = UNF_CSCTRL_LOW; - else if (rport_info.qos_level == UNF_QOS_LEVEL_MIDDLE) - rport_info.cs_ctrl = UNF_CSCTRL_MIDDLE; - else if (rport_info.qos_level == UNF_QOS_LEVEL_HIGH) - rport_info.cs_ctrl = UNF_CSCTRL_HIGH; - } - - ret = unf_lport->low_level_func.service_op.unf_alloc_rport_res(unf_lport->fc_port, - &rport_info); - } else { - ret = RETURN_OK; - } - - return ret; -} - -static void *unf_add_rport_to_busy_list(struct unf_lport *lport, - struct unf_rport *new_rport, - u32 nport_id) -{ - struct unf_rport_pool *rport_pool = NULL; - struct unf_lport *unf_lport = NULL; - struct unf_disc *disc = NULL; - struct unf_rport *unf_new_rport = new_rport; - struct unf_rport *old_rport = NULL; - struct list_head *node = NULL; - struct list_head *next_node = NULL; - ulong flag = 0; - spinlock_t *rport_free_lock = NULL; - spinlock_t *rport_busy_lock = NULL; - - FC_CHECK_RETURN_VALUE(lport, NULL); - FC_CHECK_RETURN_VALUE(new_rport, NULL); - - unf_lport = lport->root_lport; - disc = &lport->disc; - FC_CHECK_RETURN_VALUE(unf_lport, NULL); - rport_pool = &unf_lport->rport_pool; - rport_free_lock = &rport_pool->rport_free_pool_lock; - rport_busy_lock = &disc->rport_busy_pool_lock; - - spin_lock_irqsave(rport_busy_lock, flag); - list_for_each_safe(node, next_node, &disc->list_busy_rports) { - /* According to N_Port_ID */ - old_rport = list_entry(node, struct unf_rport, entry_rport); - if (old_rport->nport_id == nport_id) - break; - old_rport = NULL; - } - - if (old_rport) { - spin_unlock_irqrestore(rport_busy_lock, flag); - - /* Use old R_Port & Add new R_Port back to R_Port Pool */ - spin_lock_irqsave(rport_free_lock, flag); - clear_bit((int)unf_new_rport->rport_index, rport_pool->rpi_bitmap); - list_add_tail(&unf_new_rport->entry_rport, &rport_pool->list_rports_pool); - rport_pool->rport_pool_count++; - spin_unlock_irqrestore(rport_free_lock, flag); - - unf_check_rport_pool_status(unf_lport); - return (void *)old_rport; - } - spin_unlock_irqrestore(rport_busy_lock, flag); - if (nport_id != UNF_FC_FID_FLOGI) { - if (unf_alloc_ll_rport_resource(lport, unf_new_rport, nport_id) != RETURN_OK) { - /* Add new R_Port back to R_Port Pool */ - spin_lock_irqsave(rport_free_lock, flag); - clear_bit((int)unf_new_rport->rport_index, rport_pool->rpi_bitmap); - list_add_tail(&unf_new_rport->entry_rport, &rport_pool->list_rports_pool); - rport_pool->rport_pool_count++; - spin_unlock_irqrestore(rport_free_lock, flag); - unf_check_rport_pool_status(unf_lport); - - return NULL; - } - } - - spin_lock_irqsave(rport_busy_lock, flag); - /* Add new R_Port to busy list */ - list_add_tail(&unf_new_rport->entry_rport, &disc->list_busy_rports); - unf_new_rport->nport_id = nport_id; - unf_new_rport->local_nport_id = lport->nport_id; - spin_unlock_irqrestore(rport_busy_lock, flag); - unf_init_rport_params(unf_new_rport, lport); - - return (void *)unf_new_rport; -} - -void *unf_rport_get_free_and_init(void *lport, u32 port_type, u32 nport_id) -{ - struct unf_lport *unf_lport = NULL; - struct unf_rport_pool *rport_pool = NULL; - struct unf_disc *disc = NULL; - struct unf_disc *vport_disc = NULL; - struct unf_rport *rport = NULL; - struct list_head *list_head = NULL; - ulong flag = 0; - struct unf_disc_rport *disc_rport = NULL; - - FC_CHECK_RETURN_VALUE(lport, NULL); - unf_lport = ((struct unf_lport *)lport)->root_lport; - FC_CHECK_RETURN_VALUE(unf_lport, NULL); - - /* Check L_Port state: NOP */ - if (unlikely(atomic_read(&unf_lport->lport_no_operate_flag) == UNF_LPORT_NOP)) - return NULL; - - rport_pool = &unf_lport->rport_pool; - disc = &unf_lport->disc; - - /* 1. UNF_PORT_TYPE_DISC: Get from disc_rport_pool */ - if (port_type == UNF_PORT_TYPE_DISC) { - vport_disc = &((struct unf_lport *)lport)->disc; - /* NOTE: list_disc_rports_pool used with list_disc_rports_busy */ - spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); - if (!list_empty(&disc->disc_rport_mgr.list_disc_rports_pool)) { - /* Get & delete from Disc R_Port Pool & Add it to Busy list */ - list_head = UNF_OS_LIST_NEXT(&disc->disc_rport_mgr.list_disc_rports_pool); - list_del_init(list_head); - disc_rport = list_entry(list_head, struct unf_disc_rport, entry_rport); - disc_rport->nport_id = nport_id; - spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); - - /* Add to list_disc_rports_busy */ - spin_lock_irqsave(&vport_disc->rport_busy_pool_lock, flag); - list_add_tail(list_head, &vport_disc->disc_rport_mgr.list_disc_rports_busy); - spin_unlock_irqrestore(&vport_disc->rport_busy_pool_lock, flag); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "Port(0x%x_0x%x) add nportid:0x%x to rportbusy list", - unf_lport->port_id, unf_lport->nport_id, - disc_rport->nport_id); - } else { - disc_rport = NULL; - spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); - } - - /* NOTE: return */ - return disc_rport; - } - - /* 2. UNF_PORT_TYPE_FC (rport_pool): Get from list_rports_pool */ - spin_lock_irqsave(&rport_pool->rport_free_pool_lock, flag); - if (!list_empty(&rport_pool->list_rports_pool)) { - /* Get & delete from R_Port free Pool */ - list_head = UNF_OS_LIST_NEXT(&rport_pool->list_rports_pool); - list_del_init(list_head); - rport_pool->rport_pool_count--; - rport = list_entry(list_head, struct unf_rport, entry_rport); - } else { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x_0x%x) RPort pool is empty", - unf_lport->port_id, unf_lport->nport_id); - - spin_unlock_irqrestore(&rport_pool->rport_free_pool_lock, flag); - - return NULL; - } - spin_unlock_irqrestore(&rport_pool->rport_free_pool_lock, flag); - - /* 3. Alloc (& set bit) R_Port index */ - if (unf_alloc_index_for_rport(unf_lport, rport) != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) allocate index for new RPort failed", - unf_lport->nport_id); - - /* Alloc failed: Add R_Port back to R_Port Pool */ - spin_lock_irqsave(&rport_pool->rport_free_pool_lock, flag); - list_add_tail(&rport->entry_rport, &rport_pool->list_rports_pool); - rport_pool->rport_pool_count++; - spin_unlock_irqrestore(&rport_pool->rport_free_pool_lock, flag); - unf_check_rport_pool_status(unf_lport); - return NULL; - } - - /* 4. Add R_Port to busy list */ - rport = unf_add_rport_to_busy_list(lport, rport, nport_id); - - return (void *)rport; -} - -u32 unf_release_rport_res(struct unf_lport *lport, struct unf_rport *rport) -{ - u32 ret = UNF_RETURN_ERROR; - struct unf_port_info rport_info; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(rport, UNF_RETURN_ERROR); - - memset(&rport_info, 0, sizeof(struct unf_port_info)); - - rport_info.rport_index = rport->rport_index; - rport_info.nport_id = rport->nport_id; - rport_info.port_name = rport->port_name; - rport_info.sqn_base = rport->sqn_base; - - /* 2. release R_Port(parent context/Session) resource */ - if (!lport->low_level_func.service_op.unf_release_rport_res) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) release rport resource function can't be NULL", - lport->port_id); - - return ret; - } - - ret = lport->low_level_func.service_op.unf_release_rport_res(lport->fc_port, &rport_info); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) rport_index(0x%x, %p) send release session CMND failed", - lport->port_id, rport_info.rport_index, rport); - } - - return ret; -} - -static void unf_reset_rport_attribute(struct unf_rport *rport) -{ - ulong flag = 0; - - FC_CHECK_RETURN_VOID(rport); - - spin_lock_irqsave(&rport->rport_state_lock, flag); - rport->unf_rport_callback = NULL; - rport->lport = NULL; - rport->node_name = INVALID_VALUE64; - rport->port_name = INVALID_WWPN; - rport->nport_id = INVALID_VALUE32; - rport->local_nport_id = INVALID_VALUE32; - rport->max_frame_size = UNF_MAX_FRAME_SIZE; - rport->ed_tov = UNF_DEFAULT_EDTOV; - rport->ra_tov = UNF_DEFAULT_RATOV; - rport->rport_index = INVALID_VALUE32; - rport->scsi_id = INVALID_VALUE32; - rport->rport_alloc_jifs = INVALID_VALUE64; - - /* ini or tgt */ - rport->options = 0; - - /* fcp conf */ - rport->fcp_conf_needed = false; - - /* special req retry times */ - rport->retries = 0; - rport->logo_retries = 0; - - /* special req retry times */ - rport->max_retries = UNF_MAX_RETRY_COUNT; - - /* for target mode */ - rport->session = NULL; - rport->last_lport_ini_state = UNF_PORT_STATE_LINKDOWN; - rport->lport_ini_state = UNF_PORT_STATE_LINKDOWN; - rport->rp_state = UNF_RPORT_ST_INIT; - rport->last_lport_tgt_state = UNF_PORT_STATE_LINKDOWN; - rport->lport_tgt_state = UNF_PORT_STATE_LINKDOWN; - rport->rscn_position = UNF_RPORT_NOT_NEED_PROCESS; - rport->disc_done = 0; - rport->sqn_base = 0; - - /* for scsi */ - rport->data_thread = NULL; - spin_unlock_irqrestore(&rport->rport_state_lock, flag); -} - -u32 unf_rport_remove(void *rport) -{ - struct unf_lport *lport = NULL; - struct unf_rport *unf_rport = NULL; - struct unf_rport_pool *rport_pool = NULL; - ulong flag = 0; - u32 rport_index = 0; - u32 nport_id = 0; - - FC_CHECK_RETURN_VALUE(rport, UNF_RETURN_ERROR); - - unf_rport = (struct unf_rport *)rport; - lport = unf_rport->lport; - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - rport_pool = &((struct unf_lport *)lport->root_lport)->rport_pool; - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "[info]Remove RPort(0x%p) with remote_nport_id(0x%x) local_nport_id(0x%x)", - unf_rport, unf_rport->nport_id, unf_rport->local_nport_id); - - /* 1. Terminate open exchange before rport remove: set ABORT tag */ - unf_cm_xchg_mgr_abort_io_by_id(lport, unf_rport, unf_rport->nport_id, lport->nport_id, 0); - - /* 2. Abort sfp exchange before rport remove */ - unf_cm_xchg_mgr_abort_sfs_by_id(lport, unf_rport, unf_rport->nport_id, lport->nport_id); - - /* 3. Release R_Port resource: session reset/delete */ - if (likely(unf_rport->nport_id != UNF_FC_FID_FLOGI)) - (void)unf_release_rport_res(lport, unf_rport); - - nport_id = unf_rport->nport_id; - - /* 4.1 Delete R_Port from disc destroy/delete list */ - spin_lock_irqsave(&lport->disc.rport_busy_pool_lock, flag); - list_del_init(&unf_rport->entry_rport); - spin_unlock_irqrestore(&lport->disc.rport_busy_pool_lock, flag); - - rport_index = unf_rport->rport_index; - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_KEVENT, - "[event]Port(0x%x) release RPort(0x%x_%p) with index(0x%x)", - lport->port_id, unf_rport->nport_id, unf_rport, - unf_rport->rport_index); - - unf_reset_rport_attribute(unf_rport); - - /* 4.2 Add rport to --->>> rport_pool (free pool) & clear bitmap */ - spin_lock_irqsave(&rport_pool->rport_free_pool_lock, flag); - if (unlikely(nport_id == UNF_FC_FID_FLOGI)) { - if (test_bit((int)rport_index, rport_pool->rpi_bitmap)) - clear_bit((int)rport_index, rport_pool->rpi_bitmap); - } - - list_add_tail(&unf_rport->entry_rport, &rport_pool->list_rports_pool); - rport_pool->rport_pool_count++; - spin_unlock_irqrestore(&rport_pool->rport_free_pool_lock, flag); - - unf_check_rport_pool_status((struct unf_lport *)lport->root_lport); - up(&unf_rport->task_sema); - - return RETURN_OK; -} - -u32 unf_rport_ref_inc(struct unf_rport *rport) -{ - FC_CHECK_RETURN_VALUE(rport, UNF_RETURN_ERROR); - - if (atomic_read(&rport->rport_ref_cnt) <= 0) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Rport(0x%x) reference count is wrong %d", - rport->nport_id, - atomic_read(&rport->rport_ref_cnt)); - return UNF_RETURN_ERROR; - } - - atomic_inc(&rport->rport_ref_cnt); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "[info]Rport(0x%x) reference count is %d", rport->nport_id, - atomic_read(&rport->rport_ref_cnt)); - - return RETURN_OK; -} - -void unf_rport_ref_dec(struct unf_rport *rport) -{ - ulong flag = 0; - - FC_CHECK_RETURN_VOID(rport); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "[info]Rport(0x%x) reference count is %d", rport->nport_id, - atomic_read(&rport->rport_ref_cnt)); - - spin_lock_irqsave(&rport->rport_state_lock, flag); - if (atomic_dec_and_test(&rport->rport_ref_cnt)) { - spin_unlock_irqrestore(&rport->rport_state_lock, flag); - (void)unf_rport_remove(rport); - } else { - spin_unlock_irqrestore(&rport->rport_state_lock, flag); - } -} - -void unf_set_rport_state(struct unf_rport *rport, - enum unf_rport_login_state states) -{ - FC_CHECK_RETURN_VOID(rport); - - if (rport->rp_state != states) { - /* Reset R_Port retry count */ - rport->retries = 0; - } - - rport->rp_state = states; -} - -static enum unf_rport_login_state -unf_rport_stat_init(enum unf_rport_login_state old_state, - enum unf_rport_event event) -{ - enum unf_rport_login_state next_state = UNF_RPORT_ST_INIT; - - switch (event) { - case UNF_EVENT_RPORT_LOGO: - next_state = UNF_RPORT_ST_LOGO; - break; - - case UNF_EVENT_RPORT_ENTER_PLOGI: - next_state = UNF_RPORT_ST_PLOGI_WAIT; - break; - - case UNF_EVENT_RPORT_LINK_DOWN: - next_state = UNF_RPORT_ST_CLOSING; - break; - - default: - next_state = old_state; - break; - } - - return next_state; -} - -static enum unf_rport_login_state unf_rport_stat_plogi_wait(enum unf_rport_login_state old_state, - enum unf_rport_event event) -{ - enum unf_rport_login_state next_state = UNF_RPORT_ST_INIT; - - switch (event) { - case UNF_EVENT_RPORT_ENTER_PRLI: - next_state = UNF_RPORT_ST_PRLI_WAIT; - break; - - case UNF_EVENT_RPORT_LINK_DOWN: - next_state = UNF_RPORT_ST_CLOSING; - break; - - case UNF_EVENT_RPORT_LOGO: - next_state = UNF_RPORT_ST_LOGO; - break; - - case UNF_EVENT_RPORT_RECOVERY: - next_state = UNF_RPORT_ST_READY; - break; - - default: - next_state = old_state; - break; - } - - return next_state; -} - -static enum unf_rport_login_state unf_rport_stat_prli_wait(enum unf_rport_login_state old_state, - enum unf_rport_event event) -{ - enum unf_rport_login_state next_state = UNF_RPORT_ST_INIT; - - switch (event) { - case UNF_EVENT_RPORT_READY: - next_state = UNF_RPORT_ST_READY; - break; - - case UNF_EVENT_RPORT_LOGO: - next_state = UNF_RPORT_ST_LOGO; - break; - - case UNF_EVENT_RPORT_LINK_DOWN: - next_state = UNF_RPORT_ST_CLOSING; - break; - - case UNF_EVENT_RPORT_RECOVERY: - next_state = UNF_RPORT_ST_READY; - break; - - default: - next_state = old_state; - break; - } - - return next_state; -} - -static enum unf_rport_login_state unf_rport_stat_ready(enum unf_rport_login_state old_state, - enum unf_rport_event event) -{ - enum unf_rport_login_state next_state = UNF_RPORT_ST_INIT; - - switch (event) { - case UNF_EVENT_RPORT_LOGO: - next_state = UNF_RPORT_ST_LOGO; - break; - - case UNF_EVENT_RPORT_LINK_DOWN: - next_state = UNF_RPORT_ST_CLOSING; - break; - - case UNF_EVENT_RPORT_ENTER_PLOGI: - next_state = UNF_RPORT_ST_PLOGI_WAIT; - break; - - default: - next_state = old_state; - break; - } - - return next_state; -} - -static enum unf_rport_login_state unf_rport_stat_closing(enum unf_rport_login_state old_state, - enum unf_rport_event event) -{ - enum unf_rport_login_state next_state = UNF_RPORT_ST_INIT; - - switch (event) { - case UNF_EVENT_RPORT_CLS_TIMEOUT: - next_state = UNF_RPORT_ST_DELETE; - break; - - case UNF_EVENT_RPORT_RELOGIN: - next_state = UNF_RPORT_ST_INIT; - break; - - case UNF_EVENT_RPORT_RECOVERY: - next_state = UNF_RPORT_ST_READY; - break; - - default: - next_state = old_state; - break; - } - - return next_state; -} - -static enum unf_rport_login_state unf_rport_stat_logo(enum unf_rport_login_state old_state, - enum unf_rport_event event) -{ - enum unf_rport_login_state next_state = UNF_RPORT_ST_INIT; - - switch (event) { - case UNF_EVENT_RPORT_NORMAL_ENTER: - next_state = UNF_RPORT_ST_CLOSING; - break; - - case UNF_EVENT_RPORT_RECOVERY: - next_state = UNF_RPORT_ST_READY; - break; - - default: - next_state = old_state; - break; - } - - return next_state; -} - -void unf_rport_state_ma(struct unf_rport *rport, enum unf_rport_event event) -{ - enum unf_rport_login_state old_state = UNF_RPORT_ST_INIT; - enum unf_rport_login_state next_state = UNF_RPORT_ST_INIT; - - FC_CHECK_RETURN_VOID(rport); - - old_state = rport->rp_state; - - switch (rport->rp_state) { - case UNF_RPORT_ST_INIT: - next_state = unf_rport_stat_init(old_state, event); - break; - case UNF_RPORT_ST_PLOGI_WAIT: - next_state = unf_rport_stat_plogi_wait(old_state, event); - break; - case UNF_RPORT_ST_PRLI_WAIT: - next_state = unf_rport_stat_prli_wait(old_state, event); - break; - case UNF_RPORT_ST_LOGO: - next_state = unf_rport_stat_logo(old_state, event); - break; - case UNF_RPORT_ST_CLOSING: - next_state = unf_rport_stat_closing(old_state, event); - break; - case UNF_RPORT_ST_READY: - next_state = unf_rport_stat_ready(old_state, event); - break; - case UNF_RPORT_ST_DELETE: - default: - next_state = UNF_RPORT_ST_INIT; - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, - UNF_MAJOR, "[info]RPort(0x%x) hold state(0x%x)", - rport->nport_id, rport->rp_state); - break; - } - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MINOR, - "[info]RPort(0x%x) with oldstate(0x%x) event(0x%x) nextstate(0x%x)", - rport->nport_id, old_state, event, next_state); - - unf_set_rport_state(rport, next_state); -} - -void unf_clean_linkdown_rport(struct unf_lport *lport) -{ - /* for L_Port's R_Port(s) */ - struct unf_disc *disc = NULL; - struct list_head *node = NULL; - struct list_head *next_node = NULL; - struct unf_rport *rport = NULL; - struct unf_lport *unf_lport = NULL; - ulong disc_lock_flag = 0; - ulong rport_lock_flag = 0; - - FC_CHECK_RETURN_VOID(lport); - disc = &lport->disc; - - /* for each busy R_Port */ - spin_lock_irqsave(&disc->rport_busy_pool_lock, disc_lock_flag); - list_for_each_safe(node, next_node, &disc->list_busy_rports) { - rport = list_entry(node, struct unf_rport, entry_rport); - - /* 1. Prevent process Repeatly: Closing */ - spin_lock_irqsave(&rport->rport_state_lock, rport_lock_flag); - if (rport->rp_state == UNF_RPORT_ST_CLOSING) { - spin_unlock_irqrestore(&rport->rport_state_lock, rport_lock_flag); - continue; - } - - /* 2. Increase ref_cnt to protect R_Port */ - if (unf_rport_ref_inc(rport) != RETURN_OK) { - spin_unlock_irqrestore(&rport->rport_state_lock, rport_lock_flag); - continue; - } - - /* 3. Update R_Port state: Link Down Event --->>> closing state - */ - unf_rport_state_ma(rport, UNF_EVENT_RPORT_LINK_DOWN); - - /* 4. Put R_Port from busy to destroy list */ - list_del_init(&rport->entry_rport); - list_add_tail(&rport->entry_rport, &disc->list_destroy_rports); - - unf_lport = rport->lport; - spin_unlock_irqrestore(&rport->rport_state_lock, rport_lock_flag); - - /* 5. Schedule Closing work (Enqueuing workqueue) */ - unf_schedule_closing_work(unf_lport, rport); - - /* 6. decrease R_Port ref_cnt (company with 2) */ - unf_rport_ref_dec(rport); - } - spin_unlock_irqrestore(&disc->rport_busy_pool_lock, disc_lock_flag); -} - -void unf_rport_enter_closing(struct unf_rport *rport) -{ - /* - * call by - * 1. with RSCN processer - * 2. with LOGOUT processer - * * - * from - * 1. R_Port Link Down - * 2. R_Port enter LOGO - */ - ulong rport_lock_flag = 0; - u32 ret = UNF_RETURN_ERROR; - struct unf_lport *lport = NULL; - struct unf_disc *disc = NULL; - - FC_CHECK_RETURN_VOID(rport); - - /* 1. Increase ref_cnt to protect R_Port */ - spin_lock_irqsave(&rport->rport_state_lock, rport_lock_flag); - ret = unf_rport_ref_inc(rport); - if (ret != RETURN_OK) { - spin_unlock_irqrestore(&rport->rport_state_lock, rport_lock_flag); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]RPort(0x%x_0x%p) is removing and no need process", - rport->nport_id, rport); - - return; - } - - /* NOTE: R_Port state has been set(with closing) */ - - lport = rport->lport; - spin_unlock_irqrestore(&rport->rport_state_lock, rport_lock_flag); - - /* 2. Put R_Port from busy to destroy list */ - disc = &lport->disc; - spin_lock_irqsave(&disc->rport_busy_pool_lock, rport_lock_flag); - list_del_init(&rport->entry_rport); - list_add_tail(&rport->entry_rport, &disc->list_destroy_rports); - spin_unlock_irqrestore(&disc->rport_busy_pool_lock, rport_lock_flag); - - /* 3. Schedule Closing work (Enqueuing workqueue) */ - unf_schedule_closing_work(lport, rport); - - /* 4. dec R_Port ref_cnt */ - unf_rport_ref_dec(rport); -} - -void unf_rport_error_recovery(struct unf_rport *rport) -{ - ulong delay = 0; - ulong flag = 0; - u32 ret = UNF_RETURN_ERROR; - - FC_CHECK_RETURN_VOID(rport); - - spin_lock_irqsave(&rport->rport_state_lock, flag); - - ret = unf_rport_ref_inc(rport); - if (ret != RETURN_OK) { - spin_unlock_irqrestore(&rport->rport_state_lock, flag); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]RPort(0x%x_0x%p) is removing and no need process", - rport->nport_id, rport); - return; - } - - /* Check R_Port state */ - if (rport->rp_state == UNF_RPORT_ST_CLOSING || - rport->rp_state == UNF_RPORT_ST_DELETE) { - spin_unlock_irqrestore(&rport->rport_state_lock, flag); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]RPort(0x%x_0x%p) offline and no need process", - rport->nport_id, rport); - - unf_rport_ref_dec(rport); - return; - } - - /* Check repeatability with recovery work */ - if (delayed_work_pending(&rport->recovery_work)) { - spin_unlock_irqrestore(&rport->rport_state_lock, flag); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]RPort(0x%x_0x%p) recovery work is running and no need process", - rport->nport_id, rport); - - unf_rport_ref_dec(rport); - return; - } - - /* NOTE: Re-login or Logout directly (recovery work) */ - if (rport->retries < rport->max_retries) { - rport->retries++; - delay = UNF_DEFAULT_EDTOV / 4; - - if (queue_delayed_work(unf_wq, &rport->recovery_work, - (ulong)msecs_to_jiffies((u32)delay))) { - /* Inc ref_cnt: corresponding to this work timer */ - (void)unf_rport_ref_inc(rport); - } - spin_unlock_irqrestore(&rport->rport_state_lock, flag); - } else { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]RPort(0x%x_0x%p) state(0x%x) retry login failed", - rport->nport_id, rport, rport->rp_state); - - /* Update R_Port state: LOGO event --->>> ST_LOGO */ - unf_rport_state_ma(rport, UNF_EVENT_RPORT_LOGO); - spin_unlock_irqrestore(&rport->rport_state_lock, flag); - - unf_rport_enter_logo(rport->lport, rport); - } - - unf_rport_ref_dec(rport); -} - -static u32 unf_rport_reuse_only(struct unf_rport *rport) -{ - ulong flag = 0; - u32 ret = UNF_RETURN_ERROR; - - FC_CHECK_RETURN_VALUE(rport, UNF_RETURN_ERROR); - - spin_lock_irqsave(&rport->rport_state_lock, flag); - ret = unf_rport_ref_inc(rport); - if (ret != RETURN_OK) { - spin_unlock_irqrestore(&rport->rport_state_lock, flag); - - /* R_Port with delete state */ - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]RPort(0x%x_0x%p) is removing and no need process", - rport->nport_id, rport); - - return UNF_RETURN_ERROR; - } - - /* R_Port State check: delete */ - if (rport->rp_state == UNF_RPORT_ST_DELETE || - rport->rp_state == UNF_RPORT_ST_CLOSING) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]RPort(0x%x_0x%p) state(0x%x) is delete or closing no need process", - rport->nport_id, rport, rport->rp_state); - - ret = UNF_RETURN_ERROR; - } - spin_unlock_irqrestore(&rport->rport_state_lock, flag); - - unf_rport_ref_dec(rport); - - return ret; -} - -static u32 unf_rport_reuse_recover(struct unf_rport *rport) -{ - ulong flags = 0; - u32 ret = UNF_RETURN_ERROR; - - FC_CHECK_RETURN_VALUE(rport, UNF_RETURN_ERROR); - - spin_lock_irqsave(&rport->rport_state_lock, flags); - ret = unf_rport_ref_inc(rport); - if (ret != RETURN_OK) { - spin_unlock_irqrestore(&rport->rport_state_lock, flags); - - /* R_Port with delete state */ - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]RPort(0x%x_0x%p) is removing and no need process", - rport->nport_id, rport); - - return UNF_RETURN_ERROR; - } - - /* R_Port state check: delete */ - if (rport->rp_state == UNF_RPORT_ST_DELETE || - rport->rp_state == UNF_RPORT_ST_CLOSING) { - ret = UNF_RETURN_ERROR; - } - - /* Update R_Port state: recovery --->>> ready */ - unf_rport_state_ma(rport, UNF_EVENT_RPORT_RECOVERY); - spin_unlock_irqrestore(&rport->rport_state_lock, flags); - - unf_rport_ref_dec(rport); - - return ret; -} - -static u32 unf_rport_reuse_init(struct unf_rport *rport) -{ - ulong flag = 0; - u32 ret = UNF_RETURN_ERROR; - - FC_CHECK_RETURN_VALUE(rport, UNF_RETURN_ERROR); - - spin_lock_irqsave(&rport->rport_state_lock, flag); - ret = unf_rport_ref_inc(rport); - if (ret != RETURN_OK) { - spin_unlock_irqrestore(&rport->rport_state_lock, flag); - - /* R_Port with delete state */ - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]RPort(0x%x_0x%p) is removing and no need process", - rport->nport_id, rport); - - return UNF_RETURN_ERROR; - } - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "[info]RPort(0x%x)'s state is 0x%x with use_init flag", - rport->nport_id, rport->rp_state); - - /* R_Port State check: delete */ - if (rport->rp_state == UNF_RPORT_ST_DELETE || - rport->rp_state == UNF_RPORT_ST_CLOSING) { - ret = UNF_RETURN_ERROR; - } else { - /* Update R_Port state: re-enter Init state */ - unf_set_rport_state(rport, UNF_RPORT_ST_INIT); - } - spin_unlock_irqrestore(&rport->rport_state_lock, flag); - - unf_rport_ref_dec(rport); - - return ret; -} - -struct unf_rport *unf_get_rport_by_nport_id(struct unf_lport *lport, - u32 nport_id) -{ - struct unf_lport *unf_lport = NULL; - struct unf_disc *disc = NULL; - struct unf_rport *rport = NULL; - struct list_head *node = NULL; - struct list_head *next_node = NULL; - ulong flag = 0; - struct unf_rport *find_rport = NULL; - - FC_CHECK_RETURN_VALUE(lport, NULL); - unf_lport = (struct unf_lport *)lport; - disc = &unf_lport->disc; - - /* for each r_port from rport_busy_list: compare N_Port_ID */ - spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); - list_for_each_safe(node, next_node, &disc->list_busy_rports) { - rport = list_entry(node, struct unf_rport, entry_rport); - if (rport && rport->nport_id == nport_id) { - find_rport = rport; - break; - } - } - spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); - - return find_rport; -} - -struct unf_rport *unf_get_rport_by_wwn(struct unf_lport *lport, u64 wwpn) -{ - struct unf_lport *unf_lport = NULL; - struct unf_disc *disc = NULL; - struct unf_rport *rport = NULL; - struct list_head *node = NULL; - struct list_head *next_node = NULL; - ulong flag = 0; - struct unf_rport *find_rport = NULL; - - FC_CHECK_RETURN_VALUE(lport, NULL); - unf_lport = (struct unf_lport *)lport; - disc = &unf_lport->disc; - - /* for each r_port from busy_list: compare wwpn(port name) */ - spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); - list_for_each_safe(node, next_node, &disc->list_busy_rports) { - rport = list_entry(node, struct unf_rport, entry_rport); - if (rport && rport->port_name == wwpn) { - find_rport = rport; - break; - } - } - spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); - - return find_rport; -} - -struct unf_rport *unf_get_safe_rport(struct unf_lport *lport, - struct unf_rport *rport, - enum unf_rport_reuse_flag reuse_flag, - u32 nport_id) -{ - /* - * New add or plug - * * - * retry_flogi --->>> reuse_only - * name_server_register --->>> reuse_only - * SNS_plogi --->>> reuse_only - * enter_flogi --->>> reuse_only - * logout --->>> reuse_only - * flogi_handler --->>> reuse_only - * plogi_handler --->>> reuse_only - * adisc_handler --->>> reuse_recovery - * logout_handler --->>> reuse_init - * prlo_handler --->>> reuse_init - * login_with_loop --->>> reuse_only - * gffid_callback --->>> reuse_only - * delay_plogi --->>> reuse_only - * gffid_rjt --->>> reuse_only - * gffid_rsp_unknown --->>> reuse_only - * gpnid_acc --->>> reuse_init - * fdisc_callback --->>> reuse_only - * flogi_acc --->>> reuse_only - * plogi_acc --->>> reuse_only - * logo_callback --->>> reuse_init - * rffid_callback --->>> reuse_only - */ -#define UNF_AVOID_LINK_FLASH_TIME 3000 - - struct unf_rport *unf_rport = rport; - u32 ret = UNF_RETURN_ERROR; - - FC_CHECK_RETURN_VALUE(lport, NULL); - - /* 1. Alloc New R_Port or Update R_Port Property */ - if (!unf_rport) { - /* If NULL, get/Alloc new node (R_Port from R_Port pool) - * directly - */ - unf_rport = unf_rport_get_free_and_init(lport, UNF_PORT_TYPE_FC, nport_id); - } else { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, - UNF_INFO, - "[info]Port(0x%x) get exist RPort(0x%x) with state(0x%x) and reuse_flag(0x%x)", - lport->port_id, unf_rport->nport_id, - unf_rport->rp_state, reuse_flag); - - switch (reuse_flag) { - case UNF_RPORT_REUSE_ONLY: - ret = unf_rport_reuse_only(unf_rport); - if (ret != RETURN_OK) { - /* R_Port within delete list: need get new */ - unf_rport = unf_rport_get_free_and_init(lport, UNF_PORT_TYPE_FC, - nport_id); - } - break; - - case UNF_RPORT_REUSE_INIT: - ret = unf_rport_reuse_init(unf_rport); - if (ret != RETURN_OK) { - /* R_Port within delete list: need get new */ - unf_rport = unf_rport_get_free_and_init(lport, UNF_PORT_TYPE_FC, - nport_id); - } - break; - - case UNF_RPORT_REUSE_RECOVER: - ret = unf_rport_reuse_recover(unf_rport); - if (ret != RETURN_OK) { - /* R_Port within delete list, - * NOTE: do nothing - */ - unf_rport = NULL; - } - break; - - default: - break; - } - } // end else: R_Port != NULL - - return unf_rport; -} - -u32 unf_get_port_feature(u64 wwpn) -{ - struct unf_rport_feature_recard *port_fea = NULL; - struct list_head *node = NULL; - struct list_head *next_node = NULL; - ulong flags = 0; - struct list_head list_temp_node; - struct list_head *list_busy_head = NULL; - struct list_head *list_free_head = NULL; - spinlock_t *feature_lock = NULL; - - list_busy_head = &port_feature_pool->list_busy_head; - list_free_head = &port_feature_pool->list_free_head; - feature_lock = &port_feature_pool->port_fea_pool_lock; - spin_lock_irqsave(feature_lock, flags); - list_for_each_safe(node, next_node, list_busy_head) { - port_fea = list_entry(node, struct unf_rport_feature_recard, entry_feature); - - if (port_fea->wwpn == wwpn) { - list_del(&port_fea->entry_feature); - list_add(&port_fea->entry_feature, list_busy_head); - spin_unlock_irqrestore(feature_lock, flags); - - return port_fea->port_feature; - } - } - - list_for_each_safe(node, next_node, list_free_head) { - port_fea = list_entry(node, struct unf_rport_feature_recard, entry_feature); - - if (port_fea->wwpn == wwpn) { - list_del(&port_fea->entry_feature); - list_add(&port_fea->entry_feature, list_busy_head); - spin_unlock_irqrestore(feature_lock, flags); - - return port_fea->port_feature; - } - } - - /* can't find wwpn */ - if (list_empty(list_free_head)) { - /* free is empty, transport busy to free */ - list_temp_node = port_feature_pool->list_free_head; - port_feature_pool->list_free_head = port_feature_pool->list_busy_head; - port_feature_pool->list_busy_head = list_temp_node; - } - - port_fea = list_entry(UNF_OS_LIST_PREV(list_free_head), - struct unf_rport_feature_recard, - entry_feature); - list_del(&port_fea->entry_feature); - list_add(&port_fea->entry_feature, list_busy_head); - - port_fea->wwpn = wwpn; - port_fea->port_feature = UNF_PORT_MODE_UNKNOWN; - - spin_unlock_irqrestore(feature_lock, flags); - return UNF_PORT_MODE_UNKNOWN; -} - -void unf_update_port_feature(u64 wwpn, u32 port_feature) -{ - struct unf_rport_feature_recard *port_fea = NULL; - struct list_head *node = NULL; - struct list_head *next_node = NULL; - struct list_head *busy_head = NULL; - struct list_head *free_head = NULL; - ulong flags = 0; - spinlock_t *feature_lock = NULL; - - feature_lock = &port_feature_pool->port_fea_pool_lock; - busy_head = &port_feature_pool->list_busy_head; - free_head = &port_feature_pool->list_free_head; - - spin_lock_irqsave(feature_lock, flags); - list_for_each_safe(node, next_node, busy_head) { - port_fea = list_entry(node, struct unf_rport_feature_recard, entry_feature); - - if (port_fea->wwpn == wwpn) { - port_fea->port_feature = port_feature; - list_del(&port_fea->entry_feature); - list_add(&port_fea->entry_feature, busy_head); - spin_unlock_irqrestore(feature_lock, flags); - - return; - } - } - - list_for_each_safe(node, next_node, free_head) { - port_fea = list_entry(node, struct unf_rport_feature_recard, entry_feature); - - if (port_fea->wwpn == wwpn) { - port_fea->port_feature = port_feature; - list_del(&port_fea->entry_feature); - list_add(&port_fea->entry_feature, busy_head); - - spin_unlock_irqrestore(feature_lock, flags); - - return; - } - } - - spin_unlock_irqrestore(feature_lock, flags); -} diff --git a/drivers/scsi/spfc/common/unf_rport.h b/drivers/scsi/spfc/common/unf_rport.h deleted file mode 100644 index a9d58cb29b8a97935cc6141c587959a815cb1a40..0000000000000000000000000000000000000000 --- a/drivers/scsi/spfc/common/unf_rport.h +++ /dev/null @@ -1,301 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef UNF_RPORT_H -#define UNF_RPORT_H - -#include "unf_type.h" -#include "unf_common.h" -#include "unf_lport.h" - -extern struct unf_rport_feature_pool *port_feature_pool; - -#define UNF_MAX_SCSI_ID 2048 -#define UNF_LOSE_TMO 30 -#define UNF_RPORT_INVALID_INDEX 0xffff - -/* RSCN compare DISC list with local RPort macro */ -#define UNF_RPORT_NEED_PROCESS 0x1 -#define UNF_RPORT_ONLY_IN_DISC_PROCESS 0x2 -#define UNF_RPORT_ONLY_IN_LOCAL_PROCESS 0x3 -#define UNF_RPORT_IN_DISC_AND_LOCAL_PROCESS 0x4 -#define UNF_RPORT_NOT_NEED_PROCESS 0x5 - -#define UNF_ECHO_SEND_MAX_TIMES 1 - -/* csctrl level value */ -#define UNF_CSCTRL_LOW 0x81 -#define UNF_CSCTRL_MIDDLE 0x82 -#define UNF_CSCTRL_HIGH 0x83 -#define UNF_CSCTRL_INVALID 0x0 - -enum unf_rport_login_state { - UNF_RPORT_ST_INIT = 0x1000, /* initialized */ - UNF_RPORT_ST_PLOGI_WAIT, /* waiting for PLOGI completion */ - UNF_RPORT_ST_PRLI_WAIT, /* waiting for PRLI completion */ - UNF_RPORT_ST_READY, /* ready for use */ - UNF_RPORT_ST_LOGO, /* port logout sent */ - UNF_RPORT_ST_CLOSING, /* being closed */ - UNF_RPORT_ST_DELETE, /* port being deleted */ - UNF_RPORT_ST_BUTT -}; - -enum unf_rport_event { - UNF_EVENT_RPORT_NORMAL_ENTER = 0x9000, - UNF_EVENT_RPORT_ENTER_PLOGI = 0x9001, - UNF_EVENT_RPORT_ENTER_PRLI = 0x9002, - UNF_EVENT_RPORT_READY = 0x9003, - UNF_EVENT_RPORT_LOGO = 0x9004, - UNF_EVENT_RPORT_CLS_TIMEOUT = 0x9005, - UNF_EVENT_RPORT_RECOVERY = 0x9006, - UNF_EVENT_RPORT_RELOGIN = 0x9007, - UNF_EVENT_RPORT_LINK_DOWN = 0x9008, - UNF_EVENT_RPORT_BUTT -}; - -/* RPort local link state */ -enum unf_port_state { - UNF_PORT_STATE_LINKUP = 0x1001, - UNF_PORT_STATE_LINKDOWN = 0x1002 -}; - -enum unf_rport_reuse_flag { - UNF_RPORT_REUSE_ONLY = 0x1001, - UNF_RPORT_REUSE_INIT = 0x1002, - UNF_RPORT_REUSE_RECOVER = 0x1003 -}; - -struct unf_disc_rport { - /* RPort entry */ - struct list_head entry_rport; - - u32 nport_id; /* Remote port NPortID */ - u32 disc_done; /* 1:Disc done */ -}; - -struct unf_rport_feature_pool { - struct list_head list_busy_head; - struct list_head list_free_head; - void *port_feature_pool_addr; - spinlock_t port_fea_pool_lock; -}; - -struct unf_rport_feature_recard { - struct list_head entry_feature; - u64 wwpn; - u32 port_feature; - u32 reserved; -}; - -struct unf_os_thread_private_data { - struct list_head list; - spinlock_t spin_lock; - struct task_struct *thread; - unsigned int in_process; - unsigned int cpu_id; - atomic_t user_count; -}; - -/* Remote Port struct */ -struct unf_rport { - u32 max_frame_size; - u32 supported_classes; - - /* Dynamic Attributes */ - /* Remote Port loss timeout in seconds. */ - u32 dev_loss_tmo; - - u64 node_name; - u64 port_name; - u32 nport_id; /* Remote port NPortID */ - u32 local_nport_id; - - u32 roles; - - /* Remote port local INI state */ - enum unf_port_state lport_ini_state; - enum unf_port_state last_lport_ini_state; - - /* Remote port local TGT state */ - enum unf_port_state lport_tgt_state; - enum unf_port_state last_lport_tgt_state; - - /* Port Type,fc or fcoe */ - u32 port_type; - - /* RPort reference counter */ - atomic_t rport_ref_cnt; - - /* Pending IO count */ - atomic_t pending_io_cnt; - - /* RPort entry */ - struct list_head entry_rport; - - /* Port State,delay reclaim when uiRpState == complete. */ - enum unf_rport_login_state rp_state; - u32 disc_done; /* 1:Disc done */ - - struct unf_lport *lport; - void *rport; - spinlock_t rport_state_lock; - - /* Port attribution */ - u32 ed_tov; - u32 ra_tov; - u32 options; /* ini or tgt */ - u32 last_report_link_up_options; - u32 fcp_conf_needed; /* INI Rport send FCP CONF flag */ - u32 tape_support_needed; /* INI tape support flag */ - u32 retries; /* special req retry times */ - u32 logo_retries; /* logo error recovery retry times */ - u32 max_retries; /* special req retry times */ - u64 rport_alloc_jifs; /* Rport alloc jiffies */ - - void *session; - - /* binding with SCSI */ - u32 scsi_id; - - /* disc list compare flag */ - u32 rscn_position; - - u32 rport_index; - - u32 sqn_base; - enum unf_rport_qos_level qos_level; - - /* RPort timer,closing status */ - struct work_struct closing_work; - - /* RPort timer,rport linkup */ - struct work_struct start_work; - - /* RPort timer,recovery */ - struct delayed_work recovery_work; - - /* RPort timer,TGT mode,PRLI waiting */ - struct delayed_work open_work; - - struct semaphore task_sema; - /* Callback after rport Ready/delete.[with state:ok/fail].Creat/free TGT session here */ - /* input : L_Port,R_Port,state:ready --creat session/delete--free session */ - void (*unf_rport_callback)(void *rport, void *lport, u32 result); - - struct unf_os_thread_private_data *data_thread; -}; - -#define UNF_IO_RESULT_CNT(scsi_table, scsi_id, io_result) \ - do { \ - if (likely(((io_result) < UNF_MAX_IO_RETURN_VALUE) && \ - ((scsi_id) < UNF_MAX_SCSI_ID) && \ - ((scsi_table)->wwn_rport_info_table) && \ - (((scsi_table)->wwn_rport_info_table[scsi_id].dfx_counter)))) {\ - atomic64_inc(&((scsi_table)->wwn_rport_info_table[scsi_id] \ - .dfx_counter->io_done_cnt[(io_result)])); \ - } else { \ - FC_DRV_PRINT(UNF_LOG_EQUIP_ATT, \ - UNF_ERR, \ - "[err] io return value(0x%x) or " \ - "scsi id(0x%x) is invalid", \ - io_result, scsi_id); \ - } \ - } while (0) - -#define UNF_SCSI_CMD_CNT(scsi_table, scsi_id, io_type) \ - do { \ - if (likely(((io_type) < UNF_MAX_SCSI_CMD) && \ - ((scsi_id) < UNF_MAX_SCSI_ID) && \ - ((scsi_table)->wwn_rport_info_table) && \ - (((scsi_table)->wwn_rport_info_table[scsi_id].dfx_counter)))) { \ - atomic64_inc(&(((scsi_table)->wwn_rport_info_table[scsi_id]) \ - .dfx_counter->scsi_cmd_cnt[io_type])); \ - } else { \ - FC_DRV_PRINT(UNF_LOG_EQUIP_ATT, \ - UNF_ERR, \ - "[err] scsi_cmd(0x%x) or scsi id(0x%x) " \ - "is invalid", \ - io_type, scsi_id); \ - } \ - } while (0) - -#define UNF_SCSI_ERROR_HANDLE_CNT(scsi_table, scsi_id, io_type) \ - do { \ - if (likely(((io_type) < UNF_SCSI_ERROR_HANDLE_BUTT) && \ - ((scsi_id) < UNF_MAX_SCSI_ID) && \ - ((scsi_table)->wwn_rport_info_table) && \ - (((scsi_table)->wwn_rport_info_table[scsi_id] \ - .dfx_counter)))) { \ - atomic_inc(&((scsi_table)->wwn_rport_info_table[scsi_id] \ - .dfx_counter->error_handle[io_type])); \ - } else { \ - FC_DRV_PRINT(UNF_LOG_EQUIP_ATT, \ - UNF_ERR, \ - "[err] scsi_cmd(0x%x) or scsi id(0x%x) " \ - "is invalid", \ - (io_type), (scsi_id)); \ - } \ - } while (0) - -#define UNF_SCSI_ERROR_HANDLE_RESULT_CNT(scsi_table, scsi_id, io_type) \ - do { \ - if (likely(((io_type) < UNF_SCSI_ERROR_HANDLE_BUTT) && \ - ((scsi_id) < UNF_MAX_SCSI_ID) && \ - ((scsi_table)->wwn_rport_info_table) &&\ - (((scsi_table)-> \ - wwn_rport_info_table[scsi_id].dfx_counter)))) { \ - atomic_inc(&( \ - (scsi_table) \ - ->wwn_rport_info_table[scsi_id] \ - .dfx_counter->error_handle_result[io_type])); \ - } else { \ - FC_DRV_PRINT(UNF_LOG_EQUIP_ATT, \ - UNF_ERR, \ - "[err] scsi_cmd(0x%x) or scsi id(0x%x) " \ - "is invalid", \ - io_type, scsi_id); \ - } \ - } while (0) - -void unf_rport_state_ma(struct unf_rport *rport, enum unf_rport_event event); -void unf_update_lport_state_by_linkup_event(struct unf_lport *lport, - struct unf_rport *rport, - u32 rport_att); - -void unf_set_rport_state(struct unf_rport *rport, enum unf_rport_login_state states); -void unf_rport_enter_closing(struct unf_rport *rport); -u32 unf_release_rport_res(struct unf_lport *lport, struct unf_rport *rport); -u32 unf_initrport_mgr_temp(struct unf_lport *lport); -void unf_clean_linkdown_rport(struct unf_lport *lport); -void unf_rport_error_recovery(struct unf_rport *rport); -struct unf_rport *unf_get_rport_by_nport_id(struct unf_lport *lport, u32 nport_id); -struct unf_rport *unf_get_rport_by_wwn(struct unf_lport *lport, u64 wwpn); -void unf_rport_enter_logo(struct unf_lport *lport, struct unf_rport *rport); -u32 unf_rport_ref_inc(struct unf_rport *rport); -void unf_rport_ref_dec(struct unf_rport *rport); - -struct unf_rport *unf_rport_set_qualifier_key_reuse(struct unf_lport *lport, - struct unf_rport *rport_by_nport_id, - struct unf_rport *rport_by_wwpn, - u64 wwpn, u32 sid); -void unf_rport_delay_login(struct unf_rport *rport); -struct unf_rport *unf_find_valid_rport(struct unf_lport *lport, u64 wwpn, - u32 sid); -void unf_rport_linkdown(struct unf_lport *lport, struct unf_rport *rport); -void unf_apply_for_session(struct unf_lport *lport, struct unf_rport *rport); -struct unf_rport *unf_get_safe_rport(struct unf_lport *lport, - struct unf_rport *rport, - enum unf_rport_reuse_flag reuse_flag, - u32 nport_id); -void *unf_rport_get_free_and_init(void *lport, u32 port_type, u32 nport_id); - -void unf_set_device_state(struct unf_lport *lport, u32 scsi_id, int scsi_state); -u32 unf_get_scsi_id_by_wwpn(struct unf_lport *lport, u64 wwpn); -u32 unf_get_device_state(struct unf_lport *lport, u32 scsi_id); -u32 unf_free_scsi_id(struct unf_lport *lport, u32 scsi_id); -void unf_schedule_closing_work(struct unf_lport *lport, struct unf_rport *rport); -void unf_sesion_loss_timeout(struct work_struct *work); -u32 unf_get_port_feature(u64 wwpn); -void unf_update_port_feature(u64 wwpn, u32 port_feature); - -#endif diff --git a/drivers/scsi/spfc/common/unf_scsi.c b/drivers/scsi/spfc/common/unf_scsi.c deleted file mode 100644 index 3615d95c77e981a3fb7a6d1fc69f1103fee0a219..0000000000000000000000000000000000000000 --- a/drivers/scsi/spfc/common/unf_scsi.c +++ /dev/null @@ -1,1462 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#include "unf_type.h" -#include "unf_log.h" -#include "unf_scsi_common.h" -#include "unf_lport.h" -#include "unf_rport.h" -#include "unf_portman.h" -#include "unf_exchg.h" -#include "unf_exchg_abort.h" -#include "unf_npiv.h" -#include "unf_io.h" - -#define UNF_LUN_ID_MASK 0x00000000ffff0000 -#define UNF_CMD_PER_LUN 3 - -static int unf_scsi_queue_cmd(struct Scsi_Host *phost, struct scsi_cmnd *pcmd); -static int unf_scsi_abort_scsi_cmnd(struct scsi_cmnd *v_cmnd); -static int unf_scsi_device_reset_handler(struct scsi_cmnd *v_cmnd); -static int unf_scsi_bus_reset_handler(struct scsi_cmnd *v_cmnd); -static int unf_scsi_target_reset_handler(struct scsi_cmnd *v_cmnd); -static int unf_scsi_slave_alloc(struct scsi_device *sdev); -static void unf_scsi_destroy_slave(struct scsi_device *sdev); -static int unf_scsi_slave_configure(struct scsi_device *sdev); -static int unf_scsi_scan_finished(struct Scsi_Host *shost, unsigned long time); -static void unf_scsi_scan_start(struct Scsi_Host *shost); - -static struct scsi_transport_template *scsi_transport_template; -static struct scsi_transport_template *scsi_transport_template_v; - -struct unf_ini_error_code ini_error_code_table1[] = { - {UNF_IO_SUCCESS, UNF_SCSI_HOST(DID_OK)}, - {UNF_IO_ABORTED, UNF_SCSI_HOST(DID_ABORT)}, - {UNF_IO_FAILED, UNF_SCSI_HOST(DID_ERROR)}, - {UNF_IO_ABORT_ABTS, UNF_SCSI_HOST(DID_ERROR)}, - {UNF_IO_ABORT_LOGIN, UNF_SCSI_HOST(DID_NO_CONNECT)}, - {UNF_IO_ABORT_REET, UNF_SCSI_HOST(DID_RESET)}, - {UNF_IO_ABORT_FAILED, UNF_SCSI_HOST(DID_ERROR)}, - {UNF_IO_OUTOF_ORDER, UNF_SCSI_HOST(DID_ERROR)}, - {UNF_IO_FTO, UNF_SCSI_HOST(DID_TIME_OUT)}, - {UNF_IO_LINK_FAILURE, UNF_SCSI_HOST(DID_ERROR)}, - {UNF_IO_OVER_FLOW, UNF_SCSI_HOST(DID_ERROR)}, - {UNF_IO_RSP_OVER, UNF_SCSI_HOST(DID_ERROR)}, - {UNF_IO_LOST_FRAME, UNF_SCSI_HOST(DID_ERROR)}, - {UNF_IO_UNDER_FLOW, UNF_SCSI_HOST(DID_OK)}, - {UNF_IO_HOST_PROG_ERROR, UNF_SCSI_HOST(DID_ERROR)}, - {UNF_IO_SEST_PROG_ERROR, UNF_SCSI_HOST(DID_ERROR)}, - {UNF_IO_INVALID_ENTRY, UNF_SCSI_HOST(DID_ERROR)}, - {UNF_IO_ABORT_SEQ_NOT, UNF_SCSI_HOST(DID_ERROR)}, - {UNF_IO_REJECT, UNF_SCSI_HOST(DID_ERROR)}, - {UNF_IO_EDC_IN_ERROR, UNF_SCSI_HOST(DID_ERROR)}, - {UNF_IO_EDC_OUT_ERROR, UNF_SCSI_HOST(DID_ERROR)}, - {UNF_IO_UNINIT_KEK_ERR, UNF_SCSI_HOST(DID_ERROR)}, - {UNF_IO_DEK_OUTOF_RANGE, UNF_SCSI_HOST(DID_ERROR)}, - {UNF_IO_KEY_UNWRAP_ERR, UNF_SCSI_HOST(DID_ERROR)}, - {UNF_IO_KEY_TAG_ERR, UNF_SCSI_HOST(DID_ERROR)}, - {UNF_IO_KEY_ECC_ERR, UNF_SCSI_HOST(DID_ERROR)}, - {UNF_IO_BLOCK_SIZE_ERROR, UNF_SCSI_HOST(DID_ERROR)}, - {UNF_IO_ILLEGAL_CIPHER_MODE, UNF_SCSI_HOST(DID_ERROR)}, - {UNF_IO_CLEAN_UP, UNF_SCSI_HOST(DID_ERROR)}, - {UNF_IO_ABORTED_BY_TARGET, UNF_SCSI_HOST(DID_ERROR)}, - {UNF_IO_TRANSPORT_ERROR, UNF_SCSI_HOST(DID_ERROR)}, - {UNF_IO_LINK_FLASH, UNF_SCSI_HOST(DID_NO_CONNECT)}, - {UNF_IO_TIMEOUT, UNF_SCSI_HOST(DID_TIME_OUT)}, - {UNF_IO_DMA_ERROR, UNF_SCSI_HOST(DID_ERROR)}, - {UNF_IO_NO_LPORT, UNF_SCSI_HOST(DID_NO_CONNECT)}, - {UNF_IO_NO_XCHG, UNF_SCSI_HOST(DID_SOFT_ERROR)}, - {UNF_IO_SOFT_ERR, UNF_SCSI_HOST(DID_SOFT_ERROR)}, - {UNF_IO_PORT_LOGOUT, UNF_SCSI_HOST(DID_NO_CONNECT)}, - {UNF_IO_ERREND, UNF_SCSI_HOST(DID_ERROR)}, - {UNF_IO_DIF_ERROR, (UNF_SCSI_HOST(DID_OK) | UNF_SCSI_STATUS(SCSI_CHECK_CONDITION))}, - {UNF_IO_INCOMPLETE, UNF_SCSI_HOST(DID_IMM_RETRY)}, - {UNF_IO_DIF_REF_ERROR, (UNF_SCSI_HOST(DID_OK) | UNF_SCSI_STATUS(SCSI_CHECK_CONDITION))}, - {UNF_IO_DIF_GEN_ERROR, (UNF_SCSI_HOST(DID_OK) | UNF_SCSI_STATUS(SCSI_CHECK_CONDITION))} -}; - -u32 ini_err_code_table_cnt1 = sizeof(ini_error_code_table1) / sizeof(struct unf_ini_error_code); - -static void unf_set_rport_loss_tmo(struct fc_rport *rport, u32 timeout) -{ - if (timeout) - rport->dev_loss_tmo = timeout; - else - rport->dev_loss_tmo = 1; -} - -static void unf_get_host_port_id(struct Scsi_Host *shost) -{ - struct unf_lport *unf_lport = NULL; - - unf_lport = (struct unf_lport *)shost->hostdata[0]; - if (unlikely(!unf_lport)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, "[err]Port is null"); - return; - } - - fc_host_port_id(shost) = unf_lport->port_id; -} - -static void unf_get_host_speed(struct Scsi_Host *shost) -{ - struct unf_lport *unf_lport = NULL; - u32 speed = FC_PORTSPEED_UNKNOWN; - - unf_lport = (struct unf_lport *)shost->hostdata[0]; - if (unlikely(!unf_lport)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, "[err]Port is null"); - return; - } - - switch (unf_lport->speed) { - case UNF_PORT_SPEED_2_G: - speed = FC_PORTSPEED_2GBIT; - break; - case UNF_PORT_SPEED_4_G: - speed = FC_PORTSPEED_4GBIT; - break; - case UNF_PORT_SPEED_8_G: - speed = FC_PORTSPEED_8GBIT; - break; - case UNF_PORT_SPEED_16_G: - speed = FC_PORTSPEED_16GBIT; - break; - case UNF_PORT_SPEED_32_G: - speed = FC_PORTSPEED_32GBIT; - break; - default: - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) with unknown speed(0x%x) for FC mode", - unf_lport->port_id, unf_lport->speed); - break; - } - - fc_host_speed(shost) = speed; -} - -static void unf_get_host_port_type(struct Scsi_Host *shost) -{ - struct unf_lport *unf_lport = NULL; - u32 port_type = FC_PORTTYPE_UNKNOWN; - - unf_lport = (struct unf_lport *)shost->hostdata[0]; - if (unlikely(!unf_lport)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, "[err]Port is null"); - return; - } - - switch (unf_lport->act_topo) { - case UNF_ACT_TOP_PRIVATE_LOOP: - port_type = FC_PORTTYPE_LPORT; - break; - case UNF_ACT_TOP_PUBLIC_LOOP: - port_type = FC_PORTTYPE_NLPORT; - break; - case UNF_ACT_TOP_P2P_DIRECT: - port_type = FC_PORTTYPE_PTP; - break; - case UNF_ACT_TOP_P2P_FABRIC: - port_type = FC_PORTTYPE_NPORT; - break; - default: - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) with unknown topo type(0x%x) for FC mode", - unf_lport->port_id, unf_lport->act_topo); - break; - } - - fc_host_port_type(shost) = port_type; -} - -static void unf_get_symbolic_name(struct Scsi_Host *shost) -{ - u8 *name = NULL; - struct unf_lport *unf_lport = NULL; - - unf_lport = (struct unf_lport *)(uintptr_t)shost->hostdata[0]; - if (unlikely(!unf_lport)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, "[err]Check l_port failed"); - return; - } - - name = fc_host_symbolic_name(shost); - if (name) - snprintf(name, FC_SYMBOLIC_NAME_SIZE, "SPFC_FW_RELEASE:%s SPFC_DRV_RELEASE:%s", - unf_lport->fw_version, SPFC_DRV_VERSION); -} - -static void unf_get_host_fabric_name(struct Scsi_Host *shost) -{ - struct unf_lport *unf_lport = NULL; - - unf_lport = (struct unf_lport *)shost->hostdata[0]; - - if (unlikely(!unf_lport)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, "[err]Port is null"); - return; - } - fc_host_fabric_name(shost) = unf_lport->fabric_node_name; -} - -static void unf_get_host_port_state(struct Scsi_Host *shost) -{ - struct unf_lport *unf_lport = NULL; - enum fc_port_state port_state; - - unf_lport = (struct unf_lport *)shost->hostdata[0]; - if (unlikely(!unf_lport)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, "[err]Port is null"); - return; - } - - switch (unf_lport->link_up) { - case UNF_PORT_LINK_DOWN: - port_state = FC_PORTSTATE_OFFLINE; - break; - case UNF_PORT_LINK_UP: - port_state = FC_PORTSTATE_ONLINE; - break; - default: - port_state = FC_PORTSTATE_UNKNOWN; - break; - } - - fc_host_port_state(shost) = port_state; -} - -static void unf_dev_loss_timeout_callbk(struct fc_rport *rport) -{ - /* - * NOTE: about rport->dd_data - * --->>> local SCSI_ID - * 1. Assignment during scsi rport link up - * 2. Released when scsi rport link down & timeout(30s) - * 3. Used during scsi do callback with slave_alloc function - */ - struct Scsi_Host *host = NULL; - struct unf_lport *unf_lport = NULL; - u32 scsi_id = 0; - - if (unlikely(!rport)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, "[err]SCSI rport is null"); - return; - } - - host = rport_to_shost(rport); - if (unlikely(!host)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, "[err]Host is null"); - return; - } - - scsi_id = *(u32 *)(rport->dd_data); /* according to Local SCSI_ID */ - if (unlikely(scsi_id >= UNF_MAX_SCSI_ID)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]rport(0x%p) scsi_id(0x%x) is max than(0x%x)", - rport, scsi_id, UNF_MAX_SCSI_ID); - return; - } - - unf_lport = (struct unf_lport *)host->hostdata[0]; - if (unf_is_lport_valid(unf_lport) == RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "[event]Port(0x%x_0x%x) rport(0x%p) scsi_id(0x%x) target_id(0x%x) loss timeout", - unf_lport->port_id, unf_lport->nport_id, rport, - scsi_id, rport->scsi_target_id); - - atomic_inc(&unf_lport->session_loss_tmo); - - /* Free SCSI ID & set table state with DEAD */ - (void)unf_free_scsi_id(unf_lport, scsi_id); - unf_xchg_up_abort_io_by_scsi_id(unf_lport, scsi_id); - } else { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(%p) is invalid", unf_lport); - } - - *((u32 *)rport->dd_data) = INVALID_VALUE32; -} - -int unf_scsi_create_vport(struct fc_vport *fc_port, bool disabled) -{ - struct unf_lport *vport = NULL; - struct unf_lport *unf_lport = NULL; - struct Scsi_Host *shost = NULL; - struct vport_config vport_config = {0}; - - shost = vport_to_shost(fc_port); - - unf_lport = (struct unf_lport *)shost->hostdata[0]; - if (unf_is_lport_valid(unf_lport) != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(%p) is invalid", unf_lport); - - return RETURN_ERROR; - } - - vport_config.port_name = fc_port->port_name; - - vport_config.port_mode = fc_port->roles; - - vport = unf_creat_vport(unf_lport, &vport_config); - if (!vport) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) Create Vport failed on lldrive", - unf_lport->port_id); - - return RETURN_ERROR; - } - - fc_port->dd_data = vport; - vport->vport = fc_port; - - return RETURN_OK; -} - -int unf_scsi_delete_vport(struct fc_vport *fc_port) -{ - int ret = RETURN_ERROR; - struct unf_lport *vport = NULL; - - vport = (struct unf_lport *)fc_port->dd_data; - if (unf_is_lport_valid(vport) != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]VPort(%p) is invalid or is removing", vport); - - fc_port->dd_data = NULL; - - return ret; - } - - ret = (int)unf_destroy_one_vport(vport); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]VPort(0x%x) destroy failed on drive", vport->port_id); - - return ret; - } - - fc_port->dd_data = NULL; - return ret; -} - -struct fc_function_template function_template = { - .show_host_node_name = 1, - .show_host_port_name = 1, - .show_host_supported_classes = 1, - .show_host_supported_speeds = 1, - - .get_host_port_id = unf_get_host_port_id, - .show_host_port_id = 1, - .get_host_speed = unf_get_host_speed, - .show_host_speed = 1, - .get_host_port_type = unf_get_host_port_type, - .show_host_port_type = 1, - .get_host_symbolic_name = unf_get_symbolic_name, - .show_host_symbolic_name = 1, - .set_host_system_hostname = NULL, - .show_host_system_hostname = 1, - .get_host_fabric_name = unf_get_host_fabric_name, - .show_host_fabric_name = 1, - .get_host_port_state = unf_get_host_port_state, - .show_host_port_state = 1, - - .dd_fcrport_size = sizeof(void *), - .show_rport_supported_classes = 1, - - .get_starget_node_name = NULL, - .show_starget_node_name = 1, - .get_starget_port_name = NULL, - .show_starget_port_name = 1, - .get_starget_port_id = NULL, - .show_starget_port_id = 1, - - .set_rport_dev_loss_tmo = unf_set_rport_loss_tmo, - .show_rport_dev_loss_tmo = 0, - - .issue_fc_host_lip = NULL, - .dev_loss_tmo_callbk = unf_dev_loss_timeout_callbk, - .terminate_rport_io = NULL, - .get_fc_host_stats = NULL, - - .vport_create = unf_scsi_create_vport, - .vport_disable = NULL, - .vport_delete = unf_scsi_delete_vport, - .bsg_request = NULL, - .bsg_timeout = NULL, -}; - -struct fc_function_template function_template_v = { - .show_host_node_name = 1, - .show_host_port_name = 1, - .show_host_supported_classes = 1, - .show_host_supported_speeds = 1, - - .get_host_port_id = unf_get_host_port_id, - .show_host_port_id = 1, - .get_host_speed = unf_get_host_speed, - .show_host_speed = 1, - .get_host_port_type = unf_get_host_port_type, - .show_host_port_type = 1, - .get_host_symbolic_name = unf_get_symbolic_name, - .show_host_symbolic_name = 1, - .set_host_system_hostname = NULL, - .show_host_system_hostname = 1, - .get_host_fabric_name = unf_get_host_fabric_name, - .show_host_fabric_name = 1, - .get_host_port_state = unf_get_host_port_state, - .show_host_port_state = 1, - - .dd_fcrport_size = sizeof(void *), - .show_rport_supported_classes = 1, - - .get_starget_node_name = NULL, - .show_starget_node_name = 1, - .get_starget_port_name = NULL, - .show_starget_port_name = 1, - .get_starget_port_id = NULL, - .show_starget_port_id = 1, - - .set_rport_dev_loss_tmo = unf_set_rport_loss_tmo, - .show_rport_dev_loss_tmo = 0, - - .issue_fc_host_lip = NULL, - .dev_loss_tmo_callbk = unf_dev_loss_timeout_callbk, - .terminate_rport_io = NULL, - .get_fc_host_stats = NULL, - - .vport_create = NULL, - .vport_disable = NULL, - .vport_delete = NULL, - .bsg_request = NULL, - .bsg_timeout = NULL, -}; - -struct scsi_host_template scsi_host_template = { - .module = THIS_MODULE, - .name = "SPFC", - - .queuecommand = unf_scsi_queue_cmd, - .eh_timed_out = fc_eh_timed_out, - .eh_abort_handler = unf_scsi_abort_scsi_cmnd, - .eh_device_reset_handler = unf_scsi_device_reset_handler, - - .eh_target_reset_handler = unf_scsi_target_reset_handler, - .eh_bus_reset_handler = unf_scsi_bus_reset_handler, - .eh_host_reset_handler = NULL, - - .slave_configure = unf_scsi_slave_configure, - .slave_alloc = unf_scsi_slave_alloc, - .slave_destroy = unf_scsi_destroy_slave, - - .scan_finished = unf_scsi_scan_finished, - .scan_start = unf_scsi_scan_start, - - .this_id = -1, /* this_id: -1 */ - .cmd_per_lun = UNF_CMD_PER_LUN, - .shost_attrs = NULL, - .sg_tablesize = SG_ALL, - .max_sectors = UNF_MAX_SECTORS, - .supported_mode = MODE_INITIATOR, -}; - -void unf_unmap_prot_sgl(struct scsi_cmnd *cmnd) -{ - struct device *dev = NULL; - - if ((scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) && spfc_dif_enable && - (scsi_prot_sg_count(cmnd))) { - dev = cmnd->device->host->dma_dev; - dma_unmap_sg(dev, scsi_prot_sglist(cmnd), - (int)scsi_prot_sg_count(cmnd), - cmnd->sc_data_direction); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_INFO, - "scsi done cmd:%p op:%u, difsglcount:%u", cmnd, - scsi_get_prot_op(cmnd), scsi_prot_sg_count(cmnd)); - } -} - -void unf_scsi_done(struct unf_scsi_cmnd *scsi_cmd) -{ - struct scsi_cmnd *cmd = NULL; - - cmd = (struct scsi_cmnd *)scsi_cmd->upper_cmnd; - FC_CHECK_RETURN_VOID(scsi_cmd); - FC_CHECK_RETURN_VOID(cmd); - FC_CHECK_RETURN_VOID(cmd->scsi_done); - scsi_set_resid(cmd, (int)scsi_cmd->resid); - - cmd->result = scsi_cmd->result; - scsi_dma_unmap(cmd); - unf_unmap_prot_sgl(cmd); - return cmd->scsi_done(cmd); -} - -static void unf_get_protect_op(struct scsi_cmnd *cmd, - struct unf_dif_control_info *dif_control_info) -{ - switch (scsi_get_prot_op(cmd)) { - /* OS-HBA: Unprotected, HBA-Target: Protected */ - case SCSI_PROT_READ_STRIP: - dif_control_info->protect_opcode |= UNF_DIF_ACTION_VERIFY_AND_DELETE; - break; - case SCSI_PROT_WRITE_INSERT: - dif_control_info->protect_opcode |= UNF_DIF_ACTION_INSERT; - break; - - /* OS-HBA: Protected, HBA-Target: Unprotected */ - case SCSI_PROT_READ_INSERT: - dif_control_info->protect_opcode |= UNF_DIF_ACTION_INSERT; - break; - case SCSI_PROT_WRITE_STRIP: - dif_control_info->protect_opcode |= UNF_DIF_ACTION_VERIFY_AND_DELETE; - break; - - /* OS-HBA: Protected, HBA-Target: Protected */ - case SCSI_PROT_READ_PASS: - case SCSI_PROT_WRITE_PASS: - dif_control_info->protect_opcode |= UNF_DIF_ACTION_VERIFY_AND_FORWARD; - break; - - default: - dif_control_info->protect_opcode |= UNF_DIF_ACTION_VERIFY_AND_FORWARD; - break; - } -} - -int unf_get_protect_mode(struct unf_lport *lport, struct scsi_cmnd *scsi_cmd, - struct unf_scsi_cmnd *unf_scsi_cmd) -{ - struct scsi_cmnd *cmd = NULL; - int dif_seg_cnt = 0; - struct unf_dif_control_info *dif_control_info = NULL; - - cmd = scsi_cmd; - dif_control_info = &unf_scsi_cmd->dif_control; - - unf_get_protect_op(cmd, dif_control_info); - - if (dif_sgl_mode) - dif_control_info->flags |= UNF_DIF_DOUBLE_SGL; - dif_control_info->flags |= ((cmd->device->sector_size) == SECTOR_SIZE_4096) - ? UNF_DIF_SECTSIZE_4KB : UNF_DIF_SECTSIZE_512; - dif_control_info->protect_opcode |= UNF_VERIFY_CRC_MASK | UNF_VERIFY_LBA_MASK; - dif_control_info->dif_sge_count = scsi_prot_sg_count(cmd); - dif_control_info->dif_sgl = scsi_prot_sglist(cmd); - dif_control_info->start_lba = cpu_to_le32(((uint32_t)(0xffffffff & scsi_get_lba(cmd)))); - - if (cmd->device->sector_size == SECTOR_SIZE_4096) - dif_control_info->start_lba = dif_control_info->start_lba >> UNF_SHIFT_3; - - if (scsi_prot_sg_count(cmd)) { - dif_seg_cnt = dma_map_sg(&lport->low_level_func.dev->dev, scsi_prot_sglist(cmd), - (int)scsi_prot_sg_count(cmd), cmd->sc_data_direction); - if (unlikely(!dif_seg_cnt)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) cmd:%p map dif sgl err", - lport->port_id, cmd); - return UNF_RETURN_ERROR; - } - } - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_INFO, - "build scsi cmd:%p op:%u,difsglcount:%u,difsegcnt:%u", cmd, - scsi_get_prot_op(cmd), scsi_prot_sg_count(cmd), - dif_seg_cnt); - return RETURN_OK; -} - -static u32 unf_get_rport_qos_level(struct scsi_cmnd *cmd, u32 scsi_id, - struct unf_rport_scsi_id_image *scsi_image_table) -{ - enum unf_rport_qos_level level = 0; - - if (!scsi_image_table->wwn_rport_info_table[scsi_id].lun_qos_level || - cmd->device->lun >= UNF_MAX_LUN_PER_TARGET) { - level = 0; - } else { - level = (scsi_image_table->wwn_rport_info_table[scsi_id] - .lun_qos_level[cmd->device->lun]); - } - return level; -} - -u32 unf_get_frame_entry_buf(void *up_cmnd, void *driver_sgl, void **upper_sgl, - u32 *port_id, u32 *index, char **buf, u32 *buf_len) -{ -#define SPFC_MAX_DMA_LENGTH (0x20000 - 1) - struct scatterlist *scsi_sgl = *upper_sgl; - - if (unlikely(!scsi_sgl)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "[err]Command(0x%p) can not get SGL.", up_cmnd); - return RETURN_ERROR; - } - *buf = (char *)sg_dma_address(scsi_sgl); - *buf_len = sg_dma_len(scsi_sgl); - *upper_sgl = (void *)sg_next(scsi_sgl); - if (unlikely((*buf_len > SPFC_MAX_DMA_LENGTH) || (*buf_len == 0))) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "[err]Command(0x%p) dmalen:0x%x is not support.", - up_cmnd, *buf_len); - return RETURN_ERROR; - } - - return RETURN_OK; -} - -static void unf_init_scsi_cmnd(struct Scsi_Host *host, struct scsi_cmnd *cmd, - struct unf_scsi_cmnd *scsi_cmnd, - struct unf_rport_scsi_id_image *scsi_image_table, - int datasegcnt) -{ - static atomic64_t count; - enum unf_rport_qos_level level = 0; - u32 scsi_id = 0; - - scsi_id = (u32)((u64)cmd->device->hostdata); - level = unf_get_rport_qos_level(cmd, scsi_id, scsi_image_table); - scsi_cmnd->scsi_host_id = host->host_no; /* save host_no to scsi_cmnd->scsi_host_id */ - scsi_cmnd->scsi_id = scsi_id; - scsi_cmnd->raw_lun_id = ((u64)cmd->device->lun << 16) & UNF_LUN_ID_MASK; - scsi_cmnd->data_direction = cmd->sc_data_direction; - scsi_cmnd->under_flow = cmd->underflow; - scsi_cmnd->cmnd_len = cmd->cmd_len; - scsi_cmnd->pcmnd = cmd->cmnd; - scsi_cmnd->transfer_len = cpu_to_le32((uint32_t)scsi_bufflen(cmd)); - scsi_cmnd->sense_buflen = UNF_SCSI_SENSE_BUFFERSIZE; - scsi_cmnd->sense_buf = cmd->sense_buffer; - scsi_cmnd->time_out = 0; - scsi_cmnd->upper_cmnd = cmd; - scsi_cmnd->drv_private = (void *)(*(u64 *)shost_priv(host)); - scsi_cmnd->entry_count = datasegcnt; - scsi_cmnd->sgl = scsi_sglist(cmd); - scsi_cmnd->unf_ini_get_sgl_entry = unf_get_frame_entry_buf; - scsi_cmnd->done = unf_scsi_done; - scsi_cmnd->lun_id = (u8 *)&scsi_cmnd->raw_lun_id; - scsi_cmnd->err_code_table_cout = ini_err_code_table_cnt1; - scsi_cmnd->err_code_table = ini_error_code_table1; - scsi_cmnd->world_id = INVALID_WORLD_ID; - scsi_cmnd->cmnd_sn = atomic64_inc_return(&count); - scsi_cmnd->qos_level = level; - if (unlikely(scsi_cmnd->cmnd_sn == 0)) - scsi_cmnd->cmnd_sn = atomic64_inc_return(&count); -} - -static void unf_io_error_done(struct scsi_cmnd *cmd, - struct unf_rport_scsi_id_image *scsi_image_table, - u32 scsi_id, u32 result) -{ - cmd->result = (int)(result << UNF_SHIFT_16); - cmd->scsi_done(cmd); - if (scsi_image_table) - UNF_IO_RESULT_CNT(scsi_image_table, scsi_id, result); -} - -static bool unf_scan_device_cmd(struct scsi_cmnd *cmd) -{ - return ((cmd->cmnd[0] == INQUIRY) || (cmd->cmnd[0] == REPORT_LUNS)); -} - -static int unf_scsi_queue_cmd(struct Scsi_Host *phost, struct scsi_cmnd *pcmd) -{ - struct Scsi_Host *host = NULL; - struct scsi_cmnd *cmd = NULL; - struct unf_scsi_cmnd scsi_cmd = {0}; - u32 scsi_id = 0; - u32 scsi_state = 0; - int ret = SCSI_MLQUEUE_HOST_BUSY; - struct unf_lport *unf_lport = NULL; - struct fc_rport *rport = NULL; - struct unf_rport_scsi_id_image *scsi_image_table = NULL; - struct unf_rport *unf_rport = NULL; - u32 cmnd_result = 0; - u32 rport_state_err = 0; - bool scan_device_cmd = false; - int datasegcnt = 0; - - host = phost; - cmd = pcmd; - FC_CHECK_RETURN_VALUE(host, RETURN_ERROR); - FC_CHECK_RETURN_VALUE(cmd, RETURN_ERROR); - - /* Get L_Port from scsi_cmd */ - unf_lport = (struct unf_lport *)host->hostdata[0]; - if (unlikely(!unf_lport)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Check l_port failed, cmd(%p)", cmd); - unf_io_error_done(cmd, scsi_image_table, scsi_id, DID_NO_CONNECT); - return 0; - } - - /* Check device/session local state by device_id */ - scsi_id = (u32)((u64)cmd->device->hostdata); - if (unlikely(scsi_id >= UNF_MAX_SCSI_ID)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) scsi_id(0x%x) is max than %d", - unf_lport->port_id, scsi_id, UNF_MAX_SCSI_ID); - unf_io_error_done(cmd, scsi_image_table, scsi_id, DID_NO_CONNECT); - return 0; - } - - scsi_image_table = &unf_lport->rport_scsi_table; - UNF_SCSI_CMD_CNT(scsi_image_table, scsi_id, cmd->cmnd[0]); - - /* Get scsi r_port */ - rport = starget_to_rport(scsi_target(cmd->device)); - if (unlikely(!rport)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) cmd(%p) to get scsi rport failed", - unf_lport->port_id, cmd); - unf_io_error_done(cmd, scsi_image_table, scsi_id, DID_NO_CONNECT); - return 0; - } - - if (unlikely(!scsi_image_table->wwn_rport_info_table)) { - FC_DRV_PRINT(UNF_LOG_ABNORMAL, UNF_WARN, - "[warn]LPort porid(0x%x) WwnRportInfoTable NULL", - unf_lport->port_id); - unf_io_error_done(cmd, scsi_image_table, scsi_id, DID_NO_CONNECT); - return 0; - } - - if (unlikely(unf_lport->port_removing)) { - FC_DRV_PRINT(UNF_LOG_ABNORMAL, UNF_WARN, - "[warn]Port(0x%x) scsi_id(0x%x) rport(0x%p) target_id(0x%x) cmd(0x%p) unf_lport removing", - unf_lport->port_id, scsi_id, rport, rport->scsi_target_id, cmd); - unf_io_error_done(cmd, scsi_image_table, scsi_id, DID_NO_CONNECT); - return 0; - } - - scsi_state = atomic_read(&scsi_image_table->wwn_rport_info_table[scsi_id].scsi_state); - if (unlikely(scsi_state != UNF_SCSI_ST_ONLINE)) { - if (scsi_state == UNF_SCSI_ST_OFFLINE) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) scsi_state(0x%x) scsi_id(0x%x) rport(0x%p) target_id(0x%x) cmd(0x%p), target is busy", - unf_lport->port_id, scsi_state, scsi_id, rport, - rport->scsi_target_id, cmd); - - scan_device_cmd = unf_scan_device_cmd(cmd); - /* report lun or inquiry cmd, if send failed, do not - * retry, prevent - * the scan_mutex in scsi host locked up by eachother - */ - if (scan_device_cmd) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) host(0x%x) scsi_id(0x%x) lun(0x%llx) cmd(0x%x) DID_NO_CONNECT", - unf_lport->port_id, host->host_no, scsi_id, - (u64)cmd->device->lun, cmd->cmnd[0]); - unf_io_error_done(cmd, scsi_image_table, scsi_id, DID_NO_CONNECT); - return 0; - } - - if (likely(scsi_image_table->wwn_rport_info_table)) { - if (likely(scsi_image_table->wwn_rport_info_table[scsi_id] - .dfx_counter)) { - atomic64_inc(&(scsi_image_table - ->wwn_rport_info_table[scsi_id] - .dfx_counter->target_busy)); - } - } - - /* Target busy: need scsi retry */ - return SCSI_MLQUEUE_TARGET_BUSY; - } - /* timeout(DEAD): scsi_done & return 0 & I/O error */ - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) scsi_id(0x%x) rport(0x%p) target_id(0x%x) cmd(0x%p), target is loss timeout", - unf_lport->port_id, scsi_id, rport, - rport->scsi_target_id, cmd); - unf_io_error_done(cmd, scsi_image_table, scsi_id, DID_NO_CONNECT); - return 0; - } - - if (scsi_sg_count(cmd)) { - datasegcnt = dma_map_sg(&unf_lport->low_level_func.dev->dev, scsi_sglist(cmd), - (int)scsi_sg_count(cmd), cmd->sc_data_direction); - if (unlikely(!datasegcnt)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) scsi_id(0x%x) rport(0x%p) target_id(0x%x) cmd(0x%p), dma map sg err", - unf_lport->port_id, scsi_id, rport, - rport->scsi_target_id, cmd); - unf_io_error_done(cmd, scsi_image_table, scsi_id, DID_BUS_BUSY); - return SCSI_MLQUEUE_HOST_BUSY; - } - } - - /* Construct local SCSI CMND info */ - unf_init_scsi_cmnd(host, cmd, &scsi_cmd, scsi_image_table, datasegcnt); - - if ((scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) && spfc_dif_enable) { - ret = unf_get_protect_mode(unf_lport, cmd, &scsi_cmd); - if (ret != RETURN_OK) { - unf_io_error_done(cmd, scsi_image_table, scsi_id, DID_BUS_BUSY); - scsi_dma_unmap(cmd); - return SCSI_MLQUEUE_HOST_BUSY; - } - } - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "[info]Port(0x%x) host(0x%x) scsi_id(0x%x) lun(0x%llx) transfer length(0x%x) cmd_len(0x%x) direction(0x%x) cmd(0x%x) under_flow(0x%x) protect_opcode is (0x%x) dif_sgl_mode is %d, sector size(%d)", - unf_lport->port_id, host->host_no, scsi_id, (u64)cmd->device->lun, - scsi_cmd.transfer_len, scsi_cmd.cmnd_len, cmd->sc_data_direction, - scsi_cmd.pcmnd[0], scsi_cmd.under_flow, - scsi_cmd.dif_control.protect_opcode, dif_sgl_mode, - (cmd->device->sector_size)); - - /* Bind the Exchange address corresponding to scsi_cmd to - * scsi_cmd->host_scribble - */ - cmd->host_scribble = (unsigned char *)scsi_cmd.cmnd_sn; - ret = unf_cm_queue_command(&scsi_cmd); - if (ret != RETURN_OK) { - unf_rport = unf_find_rport_by_scsi_id(unf_lport, ini_error_code_table1, - ini_err_code_table_cnt1, - scsi_id, &cmnd_result); - rport_state_err = (!unf_rport) || - (unf_rport->lport_ini_state != UNF_PORT_STATE_LINKUP) || - (unf_rport->rp_state == UNF_RPORT_ST_CLOSING); - scan_device_cmd = unf_scan_device_cmd(cmd); - - /* report lun or inquiry cmd if send failed, do not - * retry,prevent the scan_mutex in scsi host locked up by - * eachother - */ - if (rport_state_err && scan_device_cmd) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) host(0x%x) scsi_id(0x%x) lun(0x%llx) cmd(0x%x) cmResult(0x%x) DID_NO_CONNECT", - unf_lport->port_id, host->host_no, scsi_id, - (u64)cmd->device->lun, cmd->cmnd[0], - cmnd_result); - unf_io_error_done(cmd, scsi_image_table, scsi_id, DID_NO_CONNECT); - scsi_dma_unmap(cmd); - unf_unmap_prot_sgl(cmd); - return 0; - } - - /* Host busy: scsi need to retry */ - ret = SCSI_MLQUEUE_HOST_BUSY; - if (likely(scsi_image_table->wwn_rport_info_table)) { - if (likely(scsi_image_table->wwn_rport_info_table[scsi_id].dfx_counter)) { - atomic64_inc(&(scsi_image_table->wwn_rport_info_table[scsi_id] - .dfx_counter->host_busy)); - } - } - scsi_dma_unmap(cmd); - unf_unmap_prot_sgl(cmd); - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) return(0x%x) to process INI IO falid", - unf_lport->port_id, ret); - } - return ret; -} - -static void unf_init_abts_tmf_scsi_cmd(struct scsi_cmnd *cmnd, - struct unf_scsi_cmnd *scsi_cmd, - bool abort_cmd) -{ - struct Scsi_Host *scsi_host = NULL; - - scsi_host = cmnd->device->host; - scsi_cmd->scsi_host_id = scsi_host->host_no; - scsi_cmd->scsi_id = (u32)((u64)cmnd->device->hostdata); - scsi_cmd->raw_lun_id = (u64)cmnd->device->lun; - scsi_cmd->upper_cmnd = cmnd; - scsi_cmd->drv_private = (void *)(*(u64 *)shost_priv(scsi_host)); - scsi_cmd->cmnd_sn = (u64)(cmnd->host_scribble); - scsi_cmd->lun_id = (u8 *)&scsi_cmd->raw_lun_id; - if (abort_cmd) { - scsi_cmd->done = unf_scsi_done; - scsi_cmd->world_id = INVALID_WORLD_ID; - } -} - -int unf_scsi_abort_scsi_cmnd(struct scsi_cmnd *cmnd) -{ - /* SCSI ABORT Command --->>> FC ABTS */ - struct unf_scsi_cmnd scsi_cmd = {0}; - int ret = FAILED; - struct unf_rport_scsi_id_image *scsi_image_table = NULL; - struct unf_lport *unf_lport = NULL; - u32 scsi_id = 0; - u32 err_handle = 0; - - FC_CHECK_RETURN_VALUE(cmnd, FAILED); - - unf_lport = (struct unf_lport *)cmnd->device->host->hostdata[0]; - scsi_id = (u32)((u64)cmnd->device->hostdata); - - if (unf_is_lport_valid(unf_lport) == RETURN_OK) { - scsi_image_table = &unf_lport->rport_scsi_table; - err_handle = UNF_SCSI_ABORT_IO_TYPE; - UNF_SCSI_ERROR_HANDLE_CNT(scsi_image_table, scsi_id, err_handle); - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[abort]Port(0x%x) scsi_id(0x%x) lun_id(0x%x) cmnd_type(0x%x)", - unf_lport->port_id, scsi_id, - (u32)cmnd->device->lun, cmnd->cmnd[0]); - } else { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Lport(%p) is moving or null", unf_lport); - return UNF_SCSI_ABORT_FAIL; - } - - /* Check local SCSI_ID validity */ - if (unlikely(scsi_id >= UNF_MAX_SCSI_ID)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]scsi_id(0x%x) is max than(0x%x)", scsi_id, - UNF_MAX_SCSI_ID); - return UNF_SCSI_ABORT_FAIL; - } - - /* Block scsi (check rport state -> whether offline or not) */ - ret = fc_block_scsi_eh(cmnd); - if (unlikely(ret != 0)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Block scsi eh failed(0x%x)", ret); - return ret; - } - - unf_init_abts_tmf_scsi_cmd(cmnd, &scsi_cmd, true); - /* Process scsi Abort cmnd */ - ret = unf_cm_eh_abort_handler(&scsi_cmd); - if (ret == UNF_SCSI_ABORT_SUCCESS) { - if (unf_is_lport_valid(unf_lport) == RETURN_OK) { - scsi_image_table = &unf_lport->rport_scsi_table; - err_handle = UNF_SCSI_ABORT_IO_TYPE; - UNF_SCSI_ERROR_HANDLE_RESULT_CNT(scsi_image_table, - scsi_id, err_handle); - } - } - - return ret; -} - -int unf_scsi_device_reset_handler(struct scsi_cmnd *cmnd) -{ - /* LUN reset */ - struct unf_scsi_cmnd scsi_cmd = {0}; - struct unf_rport_scsi_id_image *scsi_image_table = NULL; - int ret = FAILED; - struct unf_lport *unf_lport = NULL; - u32 scsi_id = 0; - u32 err_handle = 0; - - FC_CHECK_RETURN_VALUE(cmnd, FAILED); - - unf_lport = (struct unf_lport *)cmnd->device->host->hostdata[0]; - if (unf_is_lport_valid(unf_lport) == RETURN_OK) { - scsi_image_table = &unf_lport->rport_scsi_table; - err_handle = UNF_SCSI_DEVICE_RESET_TYPE; - UNF_SCSI_ERROR_HANDLE_CNT(scsi_image_table, scsi_id, err_handle); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_KEVENT, - "[device_reset]Port(0x%x) scsi_id(0x%x) lun_id(0x%x) cmnd_type(0x%x)", - unf_lport->port_id, scsi_id, (u32)cmnd->device->lun, cmnd->cmnd[0]); - } else { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, "[err]Port is invalid"); - - return FAILED; - } - - /* Check local SCSI_ID validity */ - scsi_id = (u32)((u64)cmnd->device->hostdata); - if (unlikely(scsi_id >= UNF_MAX_SCSI_ID)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]scsi_id(0x%x) is max than(0x%x)", scsi_id, - UNF_MAX_SCSI_ID); - - return FAILED; - } - - /* Block scsi (check rport state -> whether offline or not) */ - ret = fc_block_scsi_eh(cmnd); - if (unlikely(ret != 0)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Block scsi eh failed(0x%x)", ret); - - return ret; - } - - unf_init_abts_tmf_scsi_cmd(cmnd, &scsi_cmd, false); - /* Process scsi device/LUN reset cmnd */ - ret = unf_cm_eh_device_reset_handler(&scsi_cmd); - if (ret == UNF_SCSI_ABORT_SUCCESS) { - if (unf_is_lport_valid(unf_lport) == RETURN_OK) { - scsi_image_table = &unf_lport->rport_scsi_table; - err_handle = UNF_SCSI_DEVICE_RESET_TYPE; - UNF_SCSI_ERROR_HANDLE_RESULT_CNT(scsi_image_table, - scsi_id, err_handle); - } - } - - return ret; -} - -int unf_scsi_bus_reset_handler(struct scsi_cmnd *cmnd) -{ - /* BUS Reset */ - struct unf_scsi_cmnd scsi_cmd = {0}; - struct unf_lport *unf_lport = NULL; - struct unf_rport_scsi_id_image *scsi_image_table = NULL; - int ret = FAILED; - u32 scsi_id = 0; - u32 err_handle = 0; - - FC_CHECK_RETURN_VALUE(cmnd, FAILED); - - unf_lport = (struct unf_lport *)cmnd->device->host->hostdata[0]; - if (unlikely(!unf_lport)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port is null"); - - return FAILED; - } - - /* Check local SCSI_ID validity */ - scsi_id = (u32)((u64)cmnd->device->hostdata); - if (unlikely(scsi_id >= UNF_MAX_SCSI_ID)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]scsi_id(0x%x) is max than(0x%x)", scsi_id, - UNF_MAX_SCSI_ID); - - return FAILED; - } - - if (unf_is_lport_valid(unf_lport) == RETURN_OK) { - scsi_image_table = &unf_lport->rport_scsi_table; - err_handle = UNF_SCSI_BUS_RESET_TYPE; - UNF_SCSI_ERROR_HANDLE_CNT(scsi_image_table, scsi_id, err_handle); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info][bus_reset]Port(0x%x) scsi_id(0x%x) lun_id(0x%x) cmnd_type(0x%x)", - unf_lport->port_id, scsi_id, (u32)cmnd->device->lun, - cmnd->cmnd[0]); - } - - /* Block scsi (check rport state -> whether offline or not) */ - ret = fc_block_scsi_eh(cmnd); - if (unlikely(ret != 0)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Block scsi eh failed(0x%x)", ret); - - return ret; - } - - unf_init_abts_tmf_scsi_cmd(cmnd, &scsi_cmd, false); - /* Process scsi BUS Reset cmnd */ - ret = unf_cm_bus_reset_handler(&scsi_cmd); - if (ret == UNF_SCSI_ABORT_SUCCESS) { - if (unf_is_lport_valid(unf_lport) == RETURN_OK) { - scsi_image_table = &unf_lport->rport_scsi_table; - err_handle = UNF_SCSI_BUS_RESET_TYPE; - UNF_SCSI_ERROR_HANDLE_RESULT_CNT(scsi_image_table, scsi_id, err_handle); - } - } - - return ret; -} - -int unf_scsi_target_reset_handler(struct scsi_cmnd *cmnd) -{ - /* Session reset/delete */ - struct unf_scsi_cmnd scsi_cmd = {0}; - struct unf_rport_scsi_id_image *scsi_image_table = NULL; - int ret = FAILED; - struct unf_lport *unf_lport = NULL; - u32 scsi_id = 0; - u32 err_handle = 0; - - FC_CHECK_RETURN_VALUE(cmnd, RETURN_ERROR); - - unf_lport = (struct unf_lport *)cmnd->device->host->hostdata[0]; - if (unlikely(!unf_lport)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port is null"); - - return FAILED; - } - - /* Check local SCSI_ID validity */ - scsi_id = (u32)((u64)cmnd->device->hostdata); - if (unlikely(scsi_id >= UNF_MAX_SCSI_ID)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]scsi_id(0x%x) is max than(0x%x)", scsi_id, UNF_MAX_SCSI_ID); - - return FAILED; - } - - if (unf_is_lport_valid(unf_lport) == RETURN_OK) { - scsi_image_table = &unf_lport->rport_scsi_table; - err_handle = UNF_SCSI_TARGET_RESET_TYPE; - UNF_SCSI_ERROR_HANDLE_CNT(scsi_image_table, scsi_id, err_handle); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_KEVENT, - "[target_reset]Port(0x%x) scsi_id(0x%x) lun_id(0x%x) cmnd_type(0x%x)", - unf_lport->port_id, scsi_id, (u32)cmnd->device->lun, cmnd->cmnd[0]); - } - - /* Block scsi (check rport state -> whether offline or not) */ - ret = fc_block_scsi_eh(cmnd); - if (unlikely(ret != 0)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Block scsi eh failed(0x%x)", ret); - - return ret; - } - - unf_init_abts_tmf_scsi_cmd(cmnd, &scsi_cmd, false); - /* Process scsi Target/Session reset/delete cmnd */ - ret = unf_cm_target_reset_handler(&scsi_cmd); - if (ret == UNF_SCSI_ABORT_SUCCESS) { - if (unf_is_lport_valid(unf_lport) == RETURN_OK) { - scsi_image_table = &unf_lport->rport_scsi_table; - err_handle = UNF_SCSI_TARGET_RESET_TYPE; - UNF_SCSI_ERROR_HANDLE_RESULT_CNT(scsi_image_table, scsi_id, err_handle); - } - } - - return ret; -} - -static int unf_scsi_slave_alloc(struct scsi_device *sdev) -{ - struct fc_rport *rport = NULL; - u32 scsi_id = 0; - struct unf_lport *unf_lport = NULL; - struct Scsi_Host *host = NULL; - struct unf_rport_scsi_id_image *scsi_image_table = NULL; - - /* About device */ - if (unlikely(!sdev)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]SDev is null"); - return -ENXIO; - } - - /* About scsi rport */ - rport = starget_to_rport(scsi_target(sdev)); - if (unlikely(!rport || fc_remote_port_chkready(rport))) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, "[err]SCSI rport is null"); - - if (rport) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]SCSI rport is not ready(0x%x)", - fc_remote_port_chkready(rport)); - } - - return -ENXIO; - } - - /* About host */ - host = rport_to_shost(rport); - if (unlikely(!host)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, "[err]Host is null"); - - return -ENXIO; - } - - /* About Local Port */ - unf_lport = (struct unf_lport *)host->hostdata[0]; - if (unf_is_lport_valid(unf_lport) != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, "[err]Port is invalid"); - - return -ENXIO; - } - - /* About Local SCSI_ID */ - scsi_id = - *(u32 *)rport->dd_data; - if (unlikely(scsi_id >= UNF_MAX_SCSI_ID)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]scsi_id(0x%x) is max than(0x%x)", scsi_id, UNF_MAX_SCSI_ID); - - return -ENXIO; - } - - scsi_image_table = &unf_lport->rport_scsi_table; - if (scsi_image_table->wwn_rport_info_table[scsi_id].dfx_counter) { - atomic_inc(&scsi_image_table->wwn_rport_info_table[scsi_id] - .dfx_counter->device_alloc); - } - atomic_inc(&unf_lport->device_alloc); - sdev->hostdata = (void *)(u64)scsi_id; - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_KEVENT, - "[event]Port(0x%x) use scsi_id(%u) to alloc device[%u:%u:%u:%u]", - unf_lport->port_id, scsi_id, host->host_no, sdev->channel, sdev->id, - (u32)sdev->lun); - - return 0; -} - -static void unf_scsi_destroy_slave(struct scsi_device *sdev) -{ - /* - * NOTE: about sdev->hostdata - * --->>> pointing to local SCSI_ID - * 1. Assignment during slave allocation - * 2. Released when callback for slave destroy - * 3. Used during: Queue_CMND, Abort CMND, Device Reset, Target Reset & - * Bus Reset - */ - struct fc_rport *rport = NULL; - u32 scsi_id = 0; - struct unf_lport *unf_lport = NULL; - struct Scsi_Host *host = NULL; - struct unf_rport_scsi_id_image *scsi_image_table = NULL; - - /* About scsi device */ - if (unlikely(!sdev)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]SDev is null"); - - return; - } - - /* About scsi rport */ - rport = starget_to_rport(scsi_target(sdev)); - if (unlikely(!rport)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]SCSI rport is null or remote port is not ready"); - return; - } - - /* About host */ - host = rport_to_shost(rport); - if (unlikely(!host)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, "[err]Host is null"); - - return; - } - - /* About L_Port */ - unf_lport = (struct unf_lport *)host->hostdata[0]; - if (unf_is_lport_valid(unf_lport) == RETURN_OK) { - scsi_image_table = &unf_lport->rport_scsi_table; - atomic_inc(&unf_lport->device_destroy); - - scsi_id = (u32)((u64)sdev->hostdata); - if (scsi_id < UNF_MAX_SCSI_ID && scsi_image_table->wwn_rport_info_table) { - if (scsi_image_table->wwn_rport_info_table[scsi_id].dfx_counter) { - atomic_inc(&scsi_image_table->wwn_rport_info_table[scsi_id] - .dfx_counter->device_destroy); - } - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_KEVENT, - "[event]Port(0x%x) with scsi_id(%u) to destroy slave device[%u:%u:%u:%u]", - unf_lport->port_id, scsi_id, host->host_no, - sdev->channel, sdev->id, (u32)sdev->lun); - } else { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[err]Port(0x%x) scsi_id(%u) is invalid and destroy device[%u:%u:%u:%u]", - unf_lport->port_id, scsi_id, host->host_no, - sdev->channel, sdev->id, (u32)sdev->lun); - } - } else { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(%p) is invalid", unf_lport); - } - - sdev->hostdata = NULL; -} - -static int unf_scsi_slave_configure(struct scsi_device *sdev) -{ -#define UNF_SCSI_DEV_DEPTH 32 - blk_queue_update_dma_alignment(sdev->request_queue, 0x7); - - scsi_change_queue_depth(sdev, UNF_SCSI_DEV_DEPTH); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "[event]Enter slave configure, set depth is %d, sdev->tagged_supported is (%d)", - UNF_SCSI_DEV_DEPTH, sdev->tagged_supported); - - return 0; -} - -static int unf_scsi_scan_finished(struct Scsi_Host *shost, unsigned long time) -{ - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[event]Scan finished"); - - return 1; -} - -static void unf_scsi_scan_start(struct Scsi_Host *shost) -{ - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[event]Start scsi scan..."); -} - -void unf_host_init_attr_setting(struct Scsi_Host *scsi_host) -{ - struct unf_lport *unf_lport = NULL; - u32 speed = FC_PORTSPEED_UNKNOWN; - - unf_lport = (struct unf_lport *)scsi_host->hostdata[0]; - fc_host_supported_classes(scsi_host) = FC_COS_CLASS3; - fc_host_dev_loss_tmo(scsi_host) = (u32)unf_get_link_lose_tmo(unf_lport); - fc_host_node_name(scsi_host) = unf_lport->node_name; - fc_host_port_name(scsi_host) = unf_lport->port_name; - - fc_host_max_npiv_vports(scsi_host) = (u16)((unf_lport == unf_lport->root_lport) ? - unf_lport->low_level_func.support_max_npiv_num - : 0); - fc_host_npiv_vports_inuse(scsi_host) = 0; - fc_host_next_vport_number(scsi_host) = 0; - - /* About speed mode */ - if (unf_lport->low_level_func.fc_ser_max_speed == UNF_PORT_SPEED_32_G && - unf_lport->card_type == UNF_FC_SERVER_BOARD_32_G) { - speed = FC_PORTSPEED_32GBIT | FC_PORTSPEED_16GBIT | FC_PORTSPEED_8GBIT; - } else if (unf_lport->low_level_func.fc_ser_max_speed == UNF_PORT_SPEED_16_G && - unf_lport->card_type == UNF_FC_SERVER_BOARD_16_G) { - speed = FC_PORTSPEED_16GBIT | FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT; - } else if (unf_lport->low_level_func.fc_ser_max_speed == UNF_PORT_SPEED_8_G && - unf_lport->card_type == UNF_FC_SERVER_BOARD_8_G) { - speed = FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT | FC_PORTSPEED_2GBIT; - } - - fc_host_supported_speeds(scsi_host) = speed; -} - -int unf_alloc_scsi_host(struct Scsi_Host **unf_scsi_host, - struct unf_host_param *host_param) -{ - int ret = RETURN_ERROR; - struct Scsi_Host *scsi_host = NULL; - struct unf_lport *unf_lport = NULL; - - FC_CHECK_RETURN_VALUE(unf_scsi_host, RETURN_ERROR); - FC_CHECK_RETURN_VALUE(host_param, RETURN_ERROR); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, "[event]Alloc scsi host..."); - - /* Check L_Port validity */ - unf_lport = (struct unf_lport *)(host_param->lport); - if (unlikely(!unf_lport)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port is NULL and return directly"); - - return RETURN_ERROR; - } - - scsi_host_template.can_queue = host_param->can_queue; - scsi_host_template.cmd_per_lun = host_param->cmnd_per_lun; - scsi_host_template.sg_tablesize = host_param->sg_table_size; - scsi_host_template.max_sectors = host_param->max_sectors; - - /* Alloc scsi host */ - scsi_host = scsi_host_alloc(&scsi_host_template, sizeof(u64)); - if (unlikely(!scsi_host)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, "[err]Register scsi host failed"); - - return RETURN_ERROR; - } - - scsi_host->max_channel = host_param->max_channel; - scsi_host->max_lun = host_param->max_lun; - scsi_host->max_cmd_len = host_param->max_cmnd_len; - scsi_host->unchecked_isa_dma = 0; - scsi_host->hostdata[0] = (unsigned long)(uintptr_t)unf_lport; /* save lport to scsi */ - scsi_host->unique_id = scsi_host->host_no; - scsi_host->max_id = host_param->max_id; - scsi_host->transportt = (unf_lport == unf_lport->root_lport) - ? scsi_transport_template - : scsi_transport_template_v; - - /* register DIF/DIX protection */ - if (spfc_dif_enable) { - /* Enable DIF and DIX function */ - scsi_host_set_prot(scsi_host, spfc_dif_type); - - spfc_guard = SHOST_DIX_GUARD_CRC; - /* Enable IP checksum algorithm in DIX */ - if (dix_flag) - spfc_guard |= SHOST_DIX_GUARD_IP; - scsi_host_set_guard(scsi_host, spfc_guard); - } - - /* Add scsi host */ - ret = scsi_add_host(scsi_host, host_param->pdev); - if (unlikely(ret)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Add scsi host failed with return value %d", ret); - - scsi_host_put(scsi_host); - return RETURN_ERROR; - } - - /* Set scsi host attribute */ - unf_host_init_attr_setting(scsi_host); - *unf_scsi_host = scsi_host; - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[event]Alloc and add scsi host(0x%llx) succeed", - (u64)scsi_host); - - return RETURN_OK; -} - -void unf_free_scsi_host(struct Scsi_Host *unf_scsi_host) -{ - struct Scsi_Host *scsi_host = NULL; - - scsi_host = unf_scsi_host; - fc_remove_host(scsi_host); - scsi_remove_host(scsi_host); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[event]Remove scsi host(%u) succeed", scsi_host->host_no); - - scsi_host_put(scsi_host); -} - -u32 unf_register_ini_transport(void) -{ - /* Register INI Transport */ - scsi_transport_template = fc_attach_transport(&function_template); - - if (!scsi_transport_template) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Register FC transport to scsi failed"); - - return RETURN_ERROR; - } - - scsi_transport_template_v = fc_attach_transport(&function_template_v); - if (!scsi_transport_template_v) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Register FC vport transport to scsi failed"); - - fc_release_transport(scsi_transport_template); - - return RETURN_ERROR; - } - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[event]Register FC transport to scsi succeed"); - - return RETURN_OK; -} - -void unf_unregister_ini_transport(void) -{ - fc_release_transport(scsi_transport_template); - fc_release_transport(scsi_transport_template_v); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[event]Unregister FC transport succeed"); -} - -void unf_save_sense_data(void *scsi_cmd, const char *sense, int sens_len) -{ - struct scsi_cmnd *cmd = NULL; - - FC_CHECK_RETURN_VOID(scsi_cmd); - FC_CHECK_RETURN_VOID(sense); - - cmd = (struct scsi_cmnd *)scsi_cmd; - memcpy(cmd->sense_buffer, sense, sens_len); -} diff --git a/drivers/scsi/spfc/common/unf_scsi_common.h b/drivers/scsi/spfc/common/unf_scsi_common.h deleted file mode 100644 index f20cdd7f04792c4b29f7b18d3e257b2e342ec671..0000000000000000000000000000000000000000 --- a/drivers/scsi/spfc/common/unf_scsi_common.h +++ /dev/null @@ -1,570 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef UNF_SCSI_COMMON_H -#define UNF_SCSI_COMMON_H - -#include "unf_type.h" - -#define SCSI_SENSE_DATA_LEN 96 -#define DRV_SCSI_CDB_LEN 16 -#define DRV_SCSI_LUN_LEN 8 - -#define DRV_ENTRY_PER_SGL 64 /* Size of an entry array in a hash table */ - -#define UNF_DIF_AREA_SIZE (8) - -struct unf_dif_control_info { - u16 app_tag; - u16 flags; - u32 protect_opcode; - u32 fcp_dl; - u32 start_lba; - u8 actual_dif[UNF_DIF_AREA_SIZE]; - u8 expected_dif[UNF_DIF_AREA_SIZE]; - u32 dif_sge_count; - void *dif_sgl; -}; - -struct dif_result_info { - unsigned char actual_idf[UNF_DIF_AREA_SIZE]; - unsigned char expect_dif[UNF_DIF_AREA_SIZE]; -}; - -struct drv_sge { - char *buf; - void *page_ctrl; - u32 Length; - u32 offset; -}; - -struct drv_scsi_cmd_result { - u32 Status; - u16 sense_data_length; /* sense data length */ - u8 sense_data[SCSI_SENSE_DATA_LEN]; /* fail sense info */ -}; - -enum drv_io_direction { - DRV_IO_BIDIRECTIONAL = 0, - DRV_IO_DIRECTION_WRITE = 1, - DRV_IO_DIRECTION_READ = 2, - DRV_IO_DIRECTION_NONE = 3, -}; - -struct drv_sgl { - struct drv_sgl *next_sgl; /* poin to SGL,SGL list */ - unsigned short num_sges_in_chain; - unsigned short num_sges_in_sgl; - u32 flag; - u64 serial_num; - struct drv_sge sge[DRV_ENTRY_PER_SGL]; - struct list_head node; - u32 cpu_id; -}; - -struct dif_info { -/* Indicates the result returned when the data protection - *information is inconsistent,add by pangea - */ - struct dif_result_info dif_result; -/* Data protection information operation code - * bit[31-24] other operation code - * bit[23-16] Data Protection Information Operation - * bit[15-8] Data protection information - * verification bit[7-0] Data protection information - * replace - */ - u32 protect_opcode; - unsigned short apptag; - u64 start_lba; /* IO start LBA */ - struct drv_sgl *protection_sgl; -}; - -struct drv_device_address { - u16 initiator_id; /* ini id */ - u16 bus_id; /* device bus id */ - u16 target_id; /* device target id,for PCIe SSD,device id */ - u16 function_id; /* function id */ -}; - -struct drv_ini_cmd { - struct drv_scsi_cmd_result result; - void *upper; /* product private pointer */ - void *lower; /* driver private pointer */ - u8 cdb[DRV_SCSI_CDB_LEN]; /* CDB edit by product */ - u8 lun[DRV_SCSI_LUN_LEN]; - u16 cmd_len; - u16 tag; /* SCSI cmd add by driver */ - enum drv_io_direction io_direciton; - u32 data_length; - u32 underflow; - u32 overflow; - u32 resid; - u64 port_id; - u64 cmd_sn; - struct drv_device_address addr; - struct drv_sgl *sgl; - void *device; - void (*done)(struct drv_ini_cmd *cmd); /* callback pointer */ - struct dif_info dif_info; -}; - -typedef void (*uplevel_cmd_done)(struct drv_ini_cmd *scsi_cmnd); - -#ifndef SUCCESS -#define SUCCESS 0x2002 -#endif -#ifndef FAILED -#define FAILED 0x2003 -#endif - -#ifndef DRIVER_OK -#define DRIVER_OK 0x00 /* Driver status */ -#endif - -#ifndef PCI_FUNC -#define PCI_FUNC(devfn) ((devfn) & 0x07) -#endif - -#define UNF_SCSI_ABORT_SUCCESS SUCCESS -#define UNF_SCSI_ABORT_FAIL FAILED - -#define UNF_SCSI_STATUS(byte) (byte) -#define UNF_SCSI_MSG(byte) ((byte) << 8) -#define UNF_SCSI_HOST(byte) ((byte) << 16) -#define UNF_SCSI_DRIVER(byte) ((byte) << 24) -#define UNF_GET_SCSI_HOST_ID(scsi_host) ((scsi_host)->host_no) - -struct unf_ini_error_code { - u32 drv_errcode; /* driver error code */ - u32 ap_errcode; /* up level error code */ -}; - -typedef u32 (*ini_get_sgl_entry_buf)(void *upper_cmnd, void *driver_sgl, - void **upper_sgl, u32 *req_index, - u32 *index, char **buf, - u32 *buf_len); - -#define UNF_SCSI_SENSE_BUFFERSIZE 96 -struct unf_scsi_cmnd { - u32 scsi_host_id; - u32 scsi_id; /* cmd->dev->id */ - u64 raw_lun_id; - u64 port_id; - u32 under_flow; /* Underflow */ - u32 transfer_len; /* Transfer Length */ - u32 resid; /* Resid */ - u32 sense_buflen; - int result; - u32 entry_count; /* IO Buffer counter */ - u32 abort; - u32 err_code_table_cout; /* error code size */ - u64 cmnd_sn; - ulong time_out; /* EPL driver add timer */ - u16 cmnd_len; /* Cdb length */ - u8 data_direction; /* data direction */ - u8 *pcmnd; /* SCSI CDB */ - u8 *sense_buf; - void *drv_private; /* driver host pionter */ - void *driver_scribble; /* Xchg pionter */ - void *upper_cmnd; /* UpperCmnd pointer by driver */ - u8 *lun_id; /* new lunid */ - u32 world_id; - struct unf_dif_control_info dif_control; /* DIF control */ - struct unf_ini_error_code *err_code_table; /* error code table */ - void *sgl; /* Sgl pointer */ - ini_get_sgl_entry_buf unf_ini_get_sgl_entry; - void (*done)(struct unf_scsi_cmnd *cmd); - uplevel_cmd_done uplevel_done; - struct dif_info dif_info; - u32 qos_level; - void *pinitiator; -}; - -#ifndef FC_PORTSPEED_32GBIT -#define FC_PORTSPEED_32GBIT 0x40 -#endif - -#define UNF_GID_PORT_CNT 2048 -#define UNF_RSCN_PAGE_SUM 255 - -#define UNF_CPU_ENDIAN - -#define UNF_NPORTID_MASK 0x00FFFFFF -#define UNF_DOMAIN_MASK 0x00FF0000 -#define UNF_AREA_MASK 0x0000FF00 -#define UNF_ALPA_MASK 0x000000FF - -struct unf_fc_head { - u32 rctl_did; /* Routing control and Destination address of the seq */ - u32 csctl_sid; /* Class control and Source address of the sequence */ - u32 type_fctl; /* Data type and Initial frame control value of the seq - */ - u32 seqid_dfctl_seqcnt; /* Seq ID, Data Field and Initial seq count */ - u32 oxid_rxid; /* Originator & Responder exchange IDs for the sequence - */ - u32 parameter; /* Relative offset of the first frame of the sequence */ -}; - -#define UNF_FCPRSP_CTL_LEN (24) -#define UNF_MAX_RSP_INFO_LEN (8) -#define UNF_RSP_LEN_VLD (1 << 0) -#define UNF_SENSE_LEN_VLD (1 << 1) -#define UNF_RESID_OVERRUN (1 << 2) -#define UNF_RESID_UNDERRUN (1 << 3) -#define UNF_FCP_CONF_REQ (1 << 4) - -/* T10: FCP2r.07 9.4.1 Overview and format of FCP_RSP IU */ -struct unf_fcprsp_iu { - u32 reserved[2]; - u8 reserved2[2]; - u8 control; - u8 fcp_status; - u32 fcp_residual; - u32 fcp_sense_len; /* Length of sense info field */ - u32 fcp_response_len; /* Length of response info field in bytes 0,4 or 8 - */ - u8 fcp_resp_info[UNF_MAX_RSP_INFO_LEN]; /* Buffer for response info */ - u8 fcp_sense_info[SCSI_SENSE_DATA_LEN]; /* Buffer for sense info */ -} __attribute__((packed)); - -#define UNF_CMD_REF_MASK 0xFF000000 -#define UNF_TASK_ATTR_MASK 0x00070000 -#define UNF_TASK_MGMT_MASK 0x0000FF00 -#define UNF_FCP_WR_DATA 0x00000001 -#define UNF_FCP_RD_DATA 0x00000002 -#define UNF_CDB_LEN_MASK 0x0000007C -#define UNF_FCP_CDB_LEN_16 (16) -#define UNF_FCP_CDB_LEN_32 (32) -#define UNF_FCP_LUNID_LEN_8 (8) - -/* FCP-4 :Table 27 - RSP_CODE field */ -#define UNF_FCP_TM_RSP_COMPLETE (0) -#define UNF_FCP_TM_INVALID_CMND (0x2) -#define UNF_FCP_TM_RSP_REJECT (0x4) -#define UNF_FCP_TM_RSP_FAIL (0x5) -#define UNF_FCP_TM_RSP_SUCCEED (0x8) -#define UNF_FCP_TM_RSP_INCRECT_LUN (0x9) - -#define UNF_SET_TASK_MGMT_FLAGS(fcp_tm_code) ((fcp_tm_code) << 8) -#define UNF_GET_TASK_MGMT_FLAGS(control) (((control) & UNF_TASK_MGMT_MASK) >> 8) - -enum unf_task_mgmt_cmd { - UNF_FCP_TM_QUERY_TASK_SET = (1 << 0), - UNF_FCP_TM_ABORT_TASK_SET = (1 << 1), - UNF_FCP_TM_CLEAR_TASK_SET = (1 << 2), - UNF_FCP_TM_QUERY_UNIT_ATTENTION = (1 << 3), - UNF_FCP_TM_LOGICAL_UNIT_RESET = (1 << 4), - UNF_FCP_TM_TARGET_RESET = (1 << 5), - UNF_FCP_TM_CLEAR_ACA = (1 << 6), - UNF_FCP_TM_TERMINATE_TASK = (1 << 7) /* obsolete */ -}; - -struct unf_fcp_cmnd { - u8 lun[UNF_FCP_LUNID_LEN_8]; /* Logical unit number */ - u32 control; - u8 cdb[UNF_FCP_CDB_LEN_16]; /* Payload data containing cdb info */ - u32 data_length; /* Number of bytes expected to be transferred */ -} __attribute__((packed)); - -struct unf_fcp_cmd_hdr { - struct unf_fc_head frame_hdr; /* FCHS structure */ - struct unf_fcp_cmnd fcp_cmnd; /* Fcp Cmnd struct */ -}; - -/* FC-LS-2 Common Service Parameter applicability */ -struct unf_fabric_coparm { -#if defined(UNF_CPU_ENDIAN) - u32 bb_credit : 16; /* 0 [0-15] */ - u32 lowest_version : 8; /* 0 [16-23] */ - u32 highest_version : 8; /* 0 [24-31] */ -#else - u32 highest_version : 8; /* 0 [24-31] */ - u32 lowest_version : 8; /* 0 [16-23] */ - u32 bb_credit : 16; /* 0 [0-15] */ -#endif - -#if defined(UNF_CPU_ENDIAN) - u32 bb_receive_data_field_size : 12; /* 1 [0-11] */ - u32 bbscn : 4; /* 1 [12-15] */ - u32 payload_length : 1; /* 1 [16] */ - u32 seq_cnt : 1; /* 1 [17] */ - u32 dynamic_half_duplex : 1; /* 1 [18] */ - u32 r_t_tov : 1; /* 1 [19] */ - u32 reserved_co2 : 6; /* 1 [20-25] */ - u32 e_d_tov_resolution : 1; /* 1 [26] */ - u32 alternate_bb_credit_mgmt : 1; /* 1 [27] */ - u32 nport : 1; /* 1 [28] */ - u32 mnid_assignment : 1; /* 1 [29] */ - u32 random_relative_offset : 1; /* 1 [30] */ - u32 clean_address : 1; /* 1 [31] */ -#else - u32 reserved_co2 : 2; /* 1 [24-25] */ - u32 e_d_tov_resolution : 1; /* 1 [26] */ - u32 alternate_bb_credit_mgmt : 1; /* 1 [27] */ - u32 nport : 1; /* 1 [28] */ - u32 mnid_assignment : 1; /* 1 [29] */ - u32 random_relative_offset : 1; /* 1 [30] */ - u32 clean_address : 1; /* 1 [31] */ - - u32 payload_length : 1; /* 1 [16] */ - u32 seq_cnt : 1; /* 1 [17] */ - u32 dynamic_half_duplex : 1; /* 1 [18] */ - u32 r_t_tov : 1; /* 1 [19] */ - u32 reserved_co5 : 4; /* 1 [20-23] */ - - u32 bb_receive_data_field_size : 12; /* 1 [0-11] */ - u32 bbscn : 4; /* 1 [12-15] */ -#endif - u32 r_a_tov; /* 2 [0-31] */ - u32 e_d_tov; /* 3 [0-31] */ -}; - -/* FC-LS-2 Common Service Parameter applicability */ -/*Common Service Parameters - PLOGI and PLOGI LS_ACC */ -struct lgn_port_coparm { -#if defined(UNF_CPU_ENDIAN) - u32 bb_credit : 16; /* 0 [0-15] */ - u32 lowest_version : 8; /* 0 [16-23] */ - u32 highest_version : 8; /* 0 [24-31] */ -#else - u32 highest_version : 8; /* 0 [24-31] */ - u32 lowest_version : 8; /* 0 [16-23] */ - u32 bb_credit : 16; /* 0 [0-15] */ -#endif - -#if defined(UNF_CPU_ENDIAN) - u32 bb_receive_data_field_size : 12; /* 1 [0-11] */ - u32 bbscn : 4; /* 1 [12-15] */ - u32 payload_length : 1; /* 1 [16] */ - u32 seq_cnt : 1; /* 1 [17] */ - u32 dynamic_half_duplex : 1; /* 1 [18] */ - u32 reserved_co2 : 7; /* 1 [19-25] */ - u32 e_d_tov_resolution : 1; /* 1 [26] */ - u32 alternate_bb_credit_mgmt : 1; /* 1 [27] */ - u32 nport : 1; /* 1 [28] */ - u32 vendor_version_level : 1; /* 1 [29] */ - u32 random_relative_offset : 1; /* 1 [30] */ - u32 continuously_increasing : 1; /* 1 [31] */ -#else - u32 reserved_co2 : 2; /* 1 [24-25] */ - u32 e_d_tov_resolution : 1; /* 1 [26] */ - u32 alternate_bb_credit_mgmt : 1; /* 1 [27] */ - u32 nport : 1; /* 1 [28] */ - u32 vendor_version_level : 1; /* 1 [29] */ - u32 random_relative_offset : 1; /* 1 [30] */ - u32 continuously_increasing : 1; /* 1 [31] */ - - u32 payload_length : 1; /* 1 [16] */ - u32 seq_cnt : 1; /* 1 [17] */ - u32 dynamic_half_duplex : 1; /* 1 [18] */ - u32 reserved_co5 : 5; /* 1 [19-23] */ - - u32 bb_receive_data_field_size : 12; /* 1 [0-11] */ - u32 reserved_co1 : 4; /* 1 [12-15] */ -#endif - -#if defined(UNF_CPU_ENDIAN) - u32 relative_offset : 16; /* 2 [0-15] */ - u32 nport_total_concurrent_sequences : 16; /* 2 [16-31] */ -#else - u32 nport_total_concurrent_sequences : 16; /* 2 [16-31] */ - u32 relative_offset : 16; /* 2 [0-15] */ -#endif - - u32 e_d_tov; -}; - -/* FC-LS-2 Class Service Parameters Applicability */ -struct unf_lgn_port_clparm { -#if defined(UNF_CPU_ENDIAN) - u32 reserved_cl1 : 6; /* 0 [0-5] */ - u32 ic_data_compression_history_buffer_size : 2; /* 0 [6-7] */ - u32 ic_data_compression_capable : 1; /* 0 [8] */ - - u32 ic_ack_generation_assistance : 1; /* 0 [9] */ - u32 ic_ack_n_capable : 1; /* 0 [10] */ - u32 ic_ack_o_capable : 1; /* 0 [11] */ - u32 ic_initial_responder_processes_accociator : 2; /* 0 [12-13] */ - u32 ic_x_id_reassignment : 2; /* 0 [14-15] */ - - u32 reserved_cl2 : 7; /* 0 [16-22] */ - u32 priority : 1; /* 0 [23] */ - u32 buffered_class : 1; /* 0 [24] */ - u32 camp_on : 1; /* 0 [25] */ - u32 dedicated_simplex : 1; /* 0 [26] */ - u32 sequential_delivery : 1; /* 0 [27] */ - u32 stacked_connect_request : 2; /* 0 [28-29] */ - u32 intermix_mode : 1; /* 0 [30] */ - u32 valid : 1; /* 0 [31] */ -#else - u32 buffered_class : 1; /* 0 [24] */ - u32 camp_on : 1; /* 0 [25] */ - u32 dedicated_simplex : 1; /* 0 [26] */ - u32 sequential_delivery : 1; /* 0 [27] */ - u32 stacked_connect_request : 2; /* 0 [28-29] */ - u32 intermix_mode : 1; /* 0 [30] */ - u32 valid : 1; /* 0 [31] */ - u32 reserved_cl2 : 7; /* 0 [16-22] */ - u32 priority : 1; /* 0 [23] */ - u32 ic_data_compression_capable : 1; /* 0 [8] */ - u32 ic_ack_generation_assistance : 1; /* 0 [9] */ - u32 ic_ack_n_capable : 1; /* 0 [10] */ - u32 ic_ack_o_capable : 1; /* 0 [11] */ - u32 ic_initial_responder_processes_accociator : 2; /* 0 [12-13] */ - u32 ic_x_id_reassignment : 2; /* 0 [14-15] */ - - u32 reserved_cl1 : 6; /* 0 [0-5] */ - u32 ic_data_compression_history_buffer_size : 2; /* 0 [6-7] */ -#endif - -#if defined(UNF_CPU_ENDIAN) - u32 received_data_field_size : 16; /* 1 [0-15] */ - - u32 reserved_cl3 : 5; /* 1 [16-20] */ - u32 rc_data_compression_history_buffer_size : 2; /* 1 [21-22] */ - u32 rc_data_compression_capable : 1; /* 1 [23] */ - - u32 rc_data_categories_per_sequence : 2; /* 1 [24-25] */ - u32 reserved_cl4 : 1; /* 1 [26] */ - u32 rc_error_policy_supported : 2; /* 1 [27-28] */ - u32 rc_x_id_interlock : 1; /* 1 [29] */ - u32 rc_ack_n_capable : 1; /* 1 [30] */ - u32 rc_ack_o_capable : 1; /* 1 [31] */ -#else - u32 rc_data_categories_per_sequence : 2; /* 1 [24-25] */ - u32 reserved_cl4 : 1; /* 1 [26] */ - u32 rc_error_policy_supported : 2; /* 1 [27-28] */ - u32 rc_x_id_interlock : 1; /* 1 [29] */ - u32 rc_ack_n_capable : 1; /* 1 [30] */ - u32 rc_ack_o_capable : 1; /* 1 [31] */ - - u32 reserved_cl3 : 5; /* 1 [16-20] */ - u32 rc_data_compression_history_buffer_size : 2; /* 1 [21-22] */ - u32 rc_data_compression_capable : 1; /* 1 [23] */ - - u32 received_data_field_size : 16; /* 1 [0-15] */ -#endif - -#if defined(UNF_CPU_ENDIAN) - u32 nport_end_to_end_credit : 15; /* 2 [0-14] */ - u32 reserved_cl5 : 1; /* 2 [15] */ - - u32 concurrent_sequences : 16; /* 2 [16-31] */ -#else - u32 concurrent_sequences : 16; /* 2 [16-31] */ - - u32 nport_end_to_end_credit : 15; /* 2 [0-14] */ - u32 reserved_cl5 : 1; /* 2 [15] */ -#endif - -#if defined(UNF_CPU_ENDIAN) - u32 reserved_cl6 : 16; /* 3 [0-15] */ - u32 open_sequence_per_exchange : 16; /* 3 [16-31] */ -#else - u32 open_sequence_per_exchange : 16; /* 3 [16-31] */ - u32 reserved_cl6 : 16; /* 3 [0-15] */ -#endif -}; - -struct unf_fabric_parm { - struct unf_fabric_coparm co_parms; - u32 high_port_name; - u32 low_port_name; - u32 high_node_name; - u32 low_node_name; - struct unf_lgn_port_clparm cl_parms[3]; - u32 reserved_1[4]; - u32 vendor_version_level[4]; -}; - -struct unf_lgn_parm { - struct lgn_port_coparm co_parms; - u32 high_port_name; - u32 low_port_name; - u32 high_node_name; - u32 low_node_name; - struct unf_lgn_port_clparm cl_parms[3]; - u32 reserved_1[4]; - u32 vendor_version_level[4]; -}; - -#define ELS_RJT 0x1 -#define ELS_ACC 0x2 -#define ELS_PLOGI 0x3 -#define ELS_FLOGI 0x4 -#define ELS_LOGO 0x5 -#define ELS_ECHO 0x10 -#define ELS_RRQ 0x12 -#define ELS_REC 0x13 -#define ELS_PRLI 0x20 -#define ELS_PRLO 0x21 -#define ELS_TPRLO 0x24 -#define ELS_PDISC 0x50 -#define ELS_FDISC 0x51 -#define ELS_ADISC 0x52 -#define ELS_RSCN 0x61 /* registered state change notification */ -#define ELS_SCR 0x62 /* state change registration */ - -#define NS_GIEL 0X0101 -#define NS_GA_NXT 0X0100 -#define NS_GPN_ID 0x0112 /* get port name by ID */ -#define NS_GNN_ID 0x0113 /* get node name by ID */ -#define NS_GFF_ID 0x011f /* get FC-4 features by ID */ -#define NS_GID_PN 0x0121 /* get ID for port name */ -#define NS_GID_NN 0x0131 /* get IDs for node name */ -#define NS_GID_FT 0x0171 /* get IDs by FC4 type */ -#define NS_GPN_FT 0x0172 /* get port names by FC4 type */ -#define NS_GID_PT 0x01a1 /* get IDs by port type */ -#define NS_RFT_ID 0x0217 /* reg FC4 type for ID */ -#define NS_RPN_ID 0x0212 /* reg port name for ID */ -#define NS_RNN_ID 0x0213 /* reg node name for ID */ -#define NS_RSNPN 0x0218 /* reg symbolic port name */ -#define NS_RFF_ID 0x021f /* reg FC4 Features for ID */ -#define NS_RSNN 0x0239 /* reg symbolic node name */ -#define ST_NULL 0xffff /* reg symbolic node name */ - -#define BLS_ABTS 0xA001 /* ABTS */ - -#define FCP_SRR 0x14 /* Sequence Retransmission Request */ -#define UNF_FC_FID_DOM_MGR 0xfffc00 /* domain manager base */ -enum unf_fc_well_known_fabric_id { - UNF_FC_FID_NONE = 0x000000, /* No destination */ - UNF_FC_FID_DOM_CTRL = 0xfffc01, /* domain controller */ - UNF_FC_FID_BCAST = 0xffffff, /* broadcast */ - UNF_FC_FID_FLOGI = 0xfffffe, /* fabric login */ - UNF_FC_FID_FCTRL = 0xfffffd, /* fabric controller */ - UNF_FC_FID_DIR_SERV = 0xfffffc, /* directory server */ - UNF_FC_FID_TIME_SERV = 0xfffffb, /* time server */ - UNF_FC_FID_MGMT_SERV = 0xfffffa, /* management server */ - UNF_FC_FID_QOS = 0xfffff9, /* QoS Facilitator */ - UNF_FC_FID_ALIASES = 0xfffff8, /* alias server (FC-PH2) */ - UNF_FC_FID_SEC_KEY = 0xfffff7, /* Security key dist. server */ - UNF_FC_FID_CLOCK = 0xfffff6, /* clock synch server */ - UNF_FC_FID_MCAST_SERV = 0xfffff5 /* multicast server */ -}; - -#define INVALID_WORLD_ID 0xfffffffc - -struct unf_host_param { - int can_queue; - u16 sg_table_size; - short cmnd_per_lun; - u32 max_id; - u32 max_lun; - u32 max_channel; - u16 max_cmnd_len; - u16 max_sectors; - u64 dma_boundary; - u32 port_id; - void *lport; - struct device *pdev; -}; - -int unf_alloc_scsi_host(struct Scsi_Host **unf_scsi_host, struct unf_host_param *host_param); -void unf_free_scsi_host(struct Scsi_Host *unf_scsi_host); -u32 unf_register_ini_transport(void); -void unf_unregister_ini_transport(void); -void unf_save_sense_data(void *scsi_cmd, const char *sense, int sens_len); - -#endif diff --git a/drivers/scsi/spfc/common/unf_service.c b/drivers/scsi/spfc/common/unf_service.c deleted file mode 100644 index 8f72f647064700a3e567725fcfbf4c735e380b1a..0000000000000000000000000000000000000000 --- a/drivers/scsi/spfc/common/unf_service.c +++ /dev/null @@ -1,1439 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#include "unf_service.h" -#include "unf_log.h" -#include "unf_rport.h" -#include "unf_ls.h" -#include "unf_gs.h" - -struct unf_els_handle_table els_handle_table[] = { - {ELS_PLOGI, unf_plogi_handler}, {ELS_FLOGI, unf_flogi_handler}, - {ELS_LOGO, unf_logo_handler}, {ELS_ECHO, unf_echo_handler}, - {ELS_RRQ, unf_rrq_handler}, {ELS_REC, unf_rec_handler}, - {ELS_PRLI, unf_prli_handler}, {ELS_PRLO, unf_prlo_handler}, - {ELS_PDISC, unf_pdisc_handler}, {ELS_ADISC, unf_adisc_handler}, - {ELS_RSCN, unf_rscn_handler} }; - -u32 max_frame_size = UNF_DEFAULT_FRAME_SIZE; - -#define UNF_NEED_BIG_RESPONSE_BUFF(cmnd_code) \ - (((cmnd_code) == ELS_ECHO) || ((cmnd_code) == NS_GID_PT) || \ - ((cmnd_code) == NS_GID_FT)) - -#define NEED_REFRESH_NPORTID(pkg) \ - ((((pkg)->cmnd == ELS_PLOGI) || ((pkg)->cmnd == ELS_PDISC) || \ - ((pkg)->cmnd == ELS_ADISC))) - -void unf_select_sq(struct unf_xchg *xchg, struct unf_frame_pkg *pkg) -{ - u32 ssq_index = 0; - struct unf_rport *unf_rport = NULL; - - if (likely(xchg)) { - unf_rport = xchg->rport; - - if (unf_rport) { - ssq_index = (xchg->hotpooltag % UNF_SQ_NUM_PER_SESSION) + - unf_rport->sqn_base; - } - } - - pkg->private_data[PKG_PRIVATE_XCHG_SSQ_INDEX] = ssq_index; -} - -u32 unf_ls_gs_cmnd_send(struct unf_lport *lport, struct unf_frame_pkg *pkg, - struct unf_xchg *xchg) -{ - u32 ret = UNF_RETURN_ERROR; - ulong time_out = 0; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(pkg, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(xchg, UNF_RETURN_ERROR); - - if (unlikely(!lport->low_level_func.service_op.unf_ls_gs_send)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) LS/GS send function is NULL", - lport->port_id); - - return ret; - } - - if (pkg->type == UNF_PKG_GS_REQ) - time_out = UNF_GET_GS_SFS_XCHG_TIMER(lport); - else - time_out = UNF_GET_ELS_SFS_XCHG_TIMER(lport); - - if (xchg->cmnd_code == ELS_RRQ) { - time_out = ((ulong)UNF_GET_ELS_SFS_XCHG_TIMER(lport) > UNF_RRQ_MIN_TIMEOUT_INTERVAL) - ? (ulong)UNF_GET_ELS_SFS_XCHG_TIMER(lport) - : UNF_RRQ_MIN_TIMEOUT_INTERVAL; - } else if (xchg->cmnd_code == ELS_LOGO) { - time_out = UNF_LOGO_TIMEOUT_INTERVAL; - } - - pkg->private_data[PKG_PRIVATE_XCHG_TIMEER] = (u32)time_out; - lport->xchg_mgr_temp.unf_xchg_add_timer((void *)xchg, time_out, UNF_TIMER_TYPE_SFS); - - unf_select_sq(xchg, pkg); - - ret = lport->low_level_func.service_op.unf_ls_gs_send(lport->fc_port, pkg); - if (unlikely(ret != RETURN_OK)) - lport->xchg_mgr_temp.unf_xchg_cancel_timer((void *)xchg); - - return ret; -} - -static u32 unf_bls_cmnd_send(struct unf_lport *lport, struct unf_frame_pkg *pkg, - struct unf_xchg *xchg) -{ - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(pkg, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(xchg, UNF_RETURN_ERROR); - - pkg->private_data[PKG_PRIVATE_XCHG_TIMEER] = (u32)UNF_GET_BLS_SFS_XCHG_TIMER(lport); - pkg->private_data[PKG_PRIVATE_XCHG_ALLOC_TIME] = - xchg->private_data[PKG_PRIVATE_XCHG_ALLOC_TIME]; - - unf_select_sq(xchg, pkg); - - return lport->low_level_func.service_op.unf_bls_send(lport->fc_port, pkg); -} - -void unf_fill_package(struct unf_frame_pkg *pkg, struct unf_xchg *xchg, - struct unf_rport *rport) -{ - /* v_pstRport maybe NULL */ - FC_CHECK_RETURN_VOID(pkg); - FC_CHECK_RETURN_VOID(xchg); - - pkg->cmnd = xchg->cmnd_code; - pkg->fcp_cmnd = &xchg->fcp_cmnd; - pkg->frame_head.csctl_sid = xchg->sid; - pkg->frame_head.rctl_did = xchg->did; - pkg->frame_head.oxid_rxid = ((u32)xchg->oxid << UNF_SHIFT_16 | xchg->rxid); - pkg->xchg_contex = xchg; - - FC_CHECK_RETURN_VOID(xchg->lport); - pkg->private_data[PKG_PRIVATE_XCHG_VP_INDEX] = xchg->lport->vp_index; - - if (!rport) { - pkg->private_data[PKG_PRIVATE_XCHG_RPORT_INDEX] = UNF_RPORT_INVALID_INDEX; - pkg->private_data[PKG_PRIVATE_RPORT_RX_SIZE] = INVALID_VALUE32; - } else { - if (likely(rport->nport_id != UNF_FC_FID_FLOGI)) - pkg->private_data[PKG_PRIVATE_XCHG_RPORT_INDEX] = rport->rport_index; - else - pkg->private_data[PKG_PRIVATE_XCHG_RPORT_INDEX] = SPFC_DEFAULT_RPORT_INDEX; - - pkg->private_data[PKG_PRIVATE_RPORT_RX_SIZE] = rport->max_frame_size; - } - - pkg->private_data[PKG_PRIVATE_XCHG_HOT_POOL_INDEX] = xchg->hotpooltag | UNF_HOTTAG_FLAG; - pkg->private_data[PKG_PRIVATE_XCHG_ALLOC_TIME] = - xchg->private_data[PKG_PRIVATE_XCHG_ALLOC_TIME]; - pkg->private_data[PKG_PRIVATE_LOWLEVEL_XCHG_ADD] = - xchg->private_data[PKG_PRIVATE_LOWLEVEL_XCHG_ADD]; - pkg->unf_cmnd_pload_bl.buffer_ptr = - (u8 *)xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr; - pkg->unf_cmnd_pload_bl.buf_dma_addr = - xchg->fcp_sfs_union.sfs_entry.sfs_buff_phy_addr; - - /* Low level need to know payload length if send ECHO response */ - pkg->unf_cmnd_pload_bl.length = xchg->fcp_sfs_union.sfs_entry.cur_offset; -} - -struct unf_xchg *unf_get_sfs_free_xchg_and_init(struct unf_lport *lport, u32 did, - struct unf_rport *rport, - union unf_sfs_u **fc_entry) -{ - struct unf_xchg *xchg = NULL; - union unf_sfs_u *sfs_fc_entry = NULL; - - xchg = unf_cm_get_free_xchg(lport, UNF_XCHG_TYPE_SFS); - if (!xchg) - return NULL; - - xchg->did = did; - xchg->sid = lport->nport_id; - xchg->oid = xchg->sid; - xchg->lport = lport; - xchg->rport = rport; - xchg->disc_rport = NULL; - xchg->callback = NULL; - xchg->ob_callback = NULL; - - sfs_fc_entry = xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr; - if (!sfs_fc_entry) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) entry can't be NULL with tag(0x%x)", - lport->port_id, xchg->hotpooltag); - - unf_cm_free_xchg(lport, xchg); - return NULL; - } - - *fc_entry = sfs_fc_entry; - - return xchg; -} - -void *unf_get_one_big_sfs_buf(struct unf_xchg *xchg) -{ - struct unf_big_sfs *big_sfs = NULL; - struct list_head *list_head = NULL; - struct unf_xchg_mgr *xchg_mgr = NULL; - ulong flag = 0; - spinlock_t *big_sfs_pool_lock = NULL; - - FC_CHECK_RETURN_VALUE(xchg, NULL); - xchg_mgr = xchg->xchg_mgr; - FC_CHECK_RETURN_VALUE(xchg_mgr, NULL); - big_sfs_pool_lock = &xchg_mgr->big_sfs_pool.big_sfs_pool_lock; - - spin_lock_irqsave(big_sfs_pool_lock, flag); - if (!list_empty(&xchg_mgr->big_sfs_pool.list_freepool)) { - /* from free to busy */ - list_head = UNF_OS_LIST_NEXT(&xchg_mgr->big_sfs_pool.list_freepool); - list_del(list_head); - xchg_mgr->big_sfs_pool.free_count--; - list_add_tail(list_head, &xchg_mgr->big_sfs_pool.list_busypool); - big_sfs = list_entry(list_head, struct unf_big_sfs, entry_bigsfs); - } else { - spin_unlock_irqrestore(big_sfs_pool_lock, flag); - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Allocate big sfs buf failed, count(0x%x) exchange(0x%p) command(0x%x)", - xchg_mgr->big_sfs_pool.free_count, xchg, xchg->cmnd_code); - - return NULL; - } - spin_unlock_irqrestore(big_sfs_pool_lock, flag); - - xchg->big_sfs_buf = big_sfs; - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "[info]Allocate one big sfs buffer(0x%p), remaining count(0x%x) exchange(0x%p) command(0x%x)", - big_sfs->addr, xchg_mgr->big_sfs_pool.free_count, xchg, - xchg->cmnd_code); - - return big_sfs->addr; -} - -static void unf_fill_rjt_pld(struct unf_els_rjt *els_rjt, u32 reason_code, - u32 reason_explanation) -{ - FC_CHECK_RETURN_VOID(els_rjt); - - els_rjt->cmnd = UNF_ELS_CMND_RJT; - els_rjt->reason_code = (reason_code | reason_explanation); -} - -u32 unf_send_abts(struct unf_lport *lport, struct unf_xchg *xchg) -{ - struct unf_rport *unf_rport = NULL; - u32 ret = UNF_RETURN_ERROR; - struct unf_frame_pkg pkg; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(xchg, UNF_RETURN_ERROR); - unf_rport = xchg->rport; - FC_CHECK_RETURN_VALUE(unf_rport, UNF_RETURN_ERROR); - - /* set pkg info */ - memset(&pkg, 0, sizeof(struct unf_frame_pkg)); - pkg.type = UNF_PKG_BLS_REQ; - pkg.frame_head.csctl_sid = xchg->sid; - pkg.frame_head.rctl_did = xchg->did; - pkg.frame_head.oxid_rxid = ((u32)xchg->oxid << UNF_SHIFT_16 | xchg->rxid); - pkg.xchg_contex = xchg; - pkg.unf_cmnd_pload_bl.buffer_ptr = (u8 *)xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr; - - pkg.unf_cmnd_pload_bl.buf_dma_addr = xchg->fcp_sfs_union.sfs_entry.sfs_buff_phy_addr; - pkg.private_data[PKG_PRIVATE_XCHG_HOT_POOL_INDEX] = xchg->hotpooltag | UNF_HOTTAG_FLAG; - - UNF_SET_XCHG_ALLOC_TIME(&pkg, xchg); - UNF_SET_ABORT_INFO_IOTYPE(&pkg, xchg); - - pkg.private_data[PKG_PRIVATE_XCHG_RPORT_INDEX] = - xchg->private_data[PKG_PRIVATE_XCHG_RPORT_INDEX]; - - /* Send ABTS frame to target */ - ret = unf_bls_cmnd_send(lport, &pkg, xchg); - - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_MAJOR, - "[info]Port(0x%x_0x%x) send ABTS %s. Abort exch(0x%p) Cmdsn:0x%lx, tag(0x%x) iotype(0x%x)", - lport->port_id, lport->nport_id, - (ret == UNF_RETURN_ERROR) ? "failed" : "succeed", xchg, - (ulong)xchg->cmnd_sn, xchg->hotpooltag, xchg->data_direction); - - return ret; -} - -u32 unf_send_els_rjt_by_rport(struct unf_lport *lport, struct unf_xchg *xchg, - struct unf_rport *rport, struct unf_rjt_info *rjt_info) -{ - struct unf_els_rjt *els_rjt = NULL; - union unf_sfs_u *fc_entry = NULL; - struct unf_frame_pkg pkg = {0}; - u32 ret = UNF_RETURN_ERROR; - u16 ox_id = 0; - u16 rx_id = 0; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(xchg, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(rport, UNF_RETURN_ERROR); - - xchg->cmnd_code = UNF_SET_ELS_RJT_TYPE(rjt_info->els_cmnd_code); - xchg->did = rport->nport_id; - xchg->sid = lport->nport_id; - xchg->oid = xchg->sid; - xchg->lport = lport; - xchg->rport = rport; - xchg->disc_rport = NULL; - - xchg->callback = NULL; - xchg->ob_callback = NULL; - - unf_fill_package(&pkg, xchg, rport); - pkg.class_mode = UNF_FC_PROTOCOL_CLASS_3; - pkg.type = UNF_PKG_ELS_REPLY; - - fc_entry = xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr; - if (!fc_entry) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) entry can't be NULL with tag(0x%x)", - lport->port_id, xchg->hotpooltag); - - unf_cm_free_xchg(lport, xchg); - return UNF_RETURN_ERROR; - } - - els_rjt = &fc_entry->els_rjt; - memset(els_rjt, 0, sizeof(struct unf_els_rjt)); - unf_fill_rjt_pld(els_rjt, rjt_info->reason_code, rjt_info->reason_explanation); - ox_id = xchg->oxid; - rx_id = xchg->rxid; - - ret = unf_ls_gs_cmnd_send(lport, &pkg, xchg); - if (ret != RETURN_OK) - unf_cm_free_xchg((void *)lport, (void *)xchg); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]LOGIN: Send LS_RJT for 0x%x %s. Port(0x%x)--->RPort(0x%x) with OX_ID(0x%x) RX_ID(0x%x)", - rjt_info->els_cmnd_code, - (ret != RETURN_OK) ? "failed" : "succeed", lport->port_id, - rport->nport_id, ox_id, rx_id); - - return ret; -} - -u32 unf_send_els_rjt_by_did(struct unf_lport *lport, struct unf_xchg *xchg, - u32 did, struct unf_rjt_info *rjt_info) -{ - struct unf_els_rjt *els_rjt = NULL; - union unf_sfs_u *fc_entry = NULL; - struct unf_frame_pkg pkg = {0}; - u32 ret = UNF_RETURN_ERROR; - u16 ox_id = 0; - u16 rx_id = 0; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(xchg, UNF_RETURN_ERROR); - - xchg->cmnd_code = UNF_SET_ELS_RJT_TYPE(rjt_info->els_cmnd_code); - xchg->did = did; - xchg->sid = lport->nport_id; - xchg->oid = xchg->sid; - xchg->lport = lport; - xchg->rport = NULL; - xchg->disc_rport = NULL; - - xchg->callback = NULL; - xchg->ob_callback = NULL; - - unf_fill_package(&pkg, xchg, NULL); - pkg.class_mode = UNF_FC_PROTOCOL_CLASS_3; - pkg.type = UNF_PKG_ELS_REPLY; - - if (rjt_info->reason_code == UNF_LS_RJT_CLASS_ERROR && - rjt_info->class_mode != UNF_FC_PROTOCOL_CLASS_3) { - pkg.class_mode = rjt_info->class_mode; - } - - fc_entry = xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr; - if (!fc_entry) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) entry can't be NULL with tag(0x%x)", - lport->port_id, xchg->hotpooltag); - - unf_cm_free_xchg(lport, xchg); - return UNF_RETURN_ERROR; - } - - els_rjt = &fc_entry->els_rjt; - memset(els_rjt, 0, sizeof(struct unf_els_rjt)); - unf_fill_rjt_pld(els_rjt, rjt_info->reason_code, rjt_info->reason_explanation); - ox_id = xchg->oxid; - rx_id = xchg->rxid; - - ret = unf_ls_gs_cmnd_send(lport, &pkg, xchg); - if (ret != RETURN_OK) - unf_cm_free_xchg((void *)lport, (void *)xchg); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]LOGIN: Send LS_RJT %s. Port(0x%x)--->RPort(0x%x) with OX_ID(0x%x) RX_ID(0x%x)", - (ret != RETURN_OK) ? "failed" : "succeed", lport->port_id, did, ox_id, rx_id); - - return ret; -} - -static u32 unf_els_cmnd_default_handler(struct unf_lport *lport, struct unf_xchg *xchg, u32 sid, - u32 els_cmnd_code) -{ - struct unf_rport *unf_rport = NULL; - struct unf_rjt_info rjt_info = {0}; - u32 ret = UNF_RETURN_ERROR; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(xchg, UNF_RETURN_ERROR); - - FC_DRV_PRINT(UNF_LOG_ABNORMAL, UNF_KEVENT, - "[info]Receive Unknown ELS command(0x%x). Port(0x%x)<---RPort(0x%x) with OX_ID(0x%x)", - els_cmnd_code, lport->port_id, sid, xchg->oxid); - - memset(&rjt_info, 0, sizeof(struct unf_rjt_info)); - rjt_info.els_cmnd_code = els_cmnd_code; - rjt_info.reason_code = UNF_LS_RJT_NOT_SUPPORTED; - - unf_rport = unf_get_rport_by_nport_id(lport, sid); - if (unf_rport) { - if (unf_rport->rport_index != - xchg->private_data[PKG_PRIVATE_XCHG_RPORT_INDEX]) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x_0x%x) NPort handle(0x%x) from low level is not equal to RPort index(0x%x)", - lport->port_id, lport->nport_id, - xchg->private_data[PKG_PRIVATE_XCHG_RPORT_INDEX], - unf_rport->rport_index); - } - ret = unf_send_els_rjt_by_rport(lport, xchg, unf_rport, &rjt_info); - } else { - ret = unf_send_els_rjt_by_did(lport, xchg, sid, &rjt_info); - } - - return ret; -} - -static struct unf_xchg *unf_alloc_xchg_for_rcv_cmnd(struct unf_lport *lport, - struct unf_frame_pkg *pkg) -{ - struct unf_xchg *xchg = NULL; - ulong flags = 0; - u32 i = 0; - u32 offset = 0; - u8 *cmnd_pld = NULL; - u32 first_dword = 0; - u32 alloc_time = 0; - - FC_CHECK_RETURN_VALUE(lport, NULL); - FC_CHECK_RETURN_VALUE(pkg, NULL); - - if (!pkg->xchg_contex) { - xchg = unf_cm_get_free_xchg(lport, UNF_XCHG_TYPE_SFS); - if (!xchg) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[warn]Port(0x%x) get new exchange failed", - lport->port_id); - - return NULL; - } - - offset = (xchg->fcp_sfs_union.sfs_entry.cur_offset); - cmnd_pld = (u8 *)xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->rscn.rscn_pld; - first_dword = xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr - ->sfs_common.frame_head.rctl_did; - - if (cmnd_pld || first_dword != 0 || offset != 0) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) exchange(0x%p) abnormal, maybe data overrun, start(%llu) command(0x%x)", - lport->port_id, xchg, xchg->alloc_jif, pkg->cmnd); - - UNF_PRINT_SFS(UNF_INFO, lport->port_id, - xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr, - sizeof(union unf_sfs_u)); - } - - memset(xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr, 0, sizeof(union unf_sfs_u)); - - pkg->xchg_contex = (void *)xchg; - - spin_lock_irqsave(&xchg->xchg_state_lock, flags); - xchg->fcp_sfs_union.sfs_entry.cur_offset = 0; - alloc_time = xchg->private_data[PKG_PRIVATE_XCHG_ALLOC_TIME]; - for (i = 0; i < PKG_MAX_PRIVATE_DATA_SIZE; i++) - xchg->private_data[i] = pkg->private_data[i]; - - xchg->private_data[PKG_PRIVATE_XCHG_ALLOC_TIME] = alloc_time; - spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); - } else { - xchg = (struct unf_xchg *)pkg->xchg_contex; - } - - if (!xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr) { - unf_cm_free_xchg((void *)lport, (void *)xchg); - - return NULL; - } - - return xchg; -} - -static u8 *unf_calc_big_cmnd_pld_buffer(struct unf_xchg *xchg, u32 cmnd_code) -{ - u8 *cmnd_pld = NULL; - void *buf = NULL; - u8 *dest = NULL; - - FC_CHECK_RETURN_VALUE(xchg, NULL); - - if (cmnd_code == ELS_RSCN) - cmnd_pld = (u8 *)xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->rscn.rscn_pld; - else - cmnd_pld = (u8 *)xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->echo.echo_pld; - - if (!cmnd_pld) { - buf = unf_get_one_big_sfs_buf(xchg); - if (!buf) - return NULL; - - if (cmnd_code == ELS_RSCN) { - memset(buf, 0, sizeof(struct unf_rscn_pld)); - xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->rscn.rscn_pld = buf; - } else { - memset(buf, 0, sizeof(struct unf_echo_payload)); - xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->echo.echo_pld = buf; - } - - dest = (u8 *)buf; - } else { - dest = (u8 *)(cmnd_pld + xchg->fcp_sfs_union.sfs_entry.cur_offset); - } - - return dest; -} - -static u8 *unf_calc_other_pld_buffer(struct unf_xchg *xchg) -{ - u8 *dest = NULL; - u32 offset = 0; - - FC_CHECK_RETURN_VALUE(xchg, NULL); - - offset = (sizeof(struct unf_fc_head)) + (xchg->fcp_sfs_union.sfs_entry.cur_offset); - dest = (u8 *)((u8 *)(xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr) + offset); - - return dest; -} - -struct unf_xchg *unf_mv_data_2_xchg(struct unf_lport *lport, struct unf_frame_pkg *pkg) -{ - struct unf_xchg *xchg = NULL; - u8 *dest = NULL; - u32 length = 0; - ulong flags = 0; - - FC_CHECK_RETURN_VALUE(lport, NULL); - FC_CHECK_RETURN_VALUE(pkg, NULL); - - xchg = unf_alloc_xchg_for_rcv_cmnd(lport, pkg); - if (!xchg) - return NULL; - - spin_lock_irqsave(&xchg->xchg_state_lock, flags); - - memcpy(&xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->sfs_common.frame_head, - &pkg->frame_head, sizeof(pkg->frame_head)); - - if (pkg->cmnd == ELS_RSCN || pkg->cmnd == ELS_ECHO) - dest = unf_calc_big_cmnd_pld_buffer(xchg, pkg->cmnd); - else - dest = unf_calc_other_pld_buffer(xchg); - - if (!dest) { - spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); - unf_cm_free_xchg((void *)lport, (void *)xchg); - - return NULL; - } - - if (((xchg->fcp_sfs_union.sfs_entry.cur_offset + - pkg->unf_cmnd_pload_bl.length) > (u32)sizeof(union unf_sfs_u)) && - pkg->cmnd != ELS_RSCN && pkg->cmnd != ELS_ECHO) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) excange(0x%p) command(0x%x,0x%x) copy payload overrun(0x%x:0x%x:0x%x)", - lport->port_id, xchg, pkg->cmnd, xchg->hotpooltag, - xchg->fcp_sfs_union.sfs_entry.cur_offset, - pkg->unf_cmnd_pload_bl.length, (u32)sizeof(union unf_sfs_u)); - - spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); - unf_cm_free_xchg((void *)lport, (void *)xchg); - - return NULL; - } - - length = pkg->unf_cmnd_pload_bl.length; - if (length > 0) - memcpy(dest, pkg->unf_cmnd_pload_bl.buffer_ptr, length); - - xchg->fcp_sfs_union.sfs_entry.cur_offset += length; - spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); - - return xchg; -} - -static u32 unf_check_els_cmnd_valid(struct unf_lport *lport, struct unf_frame_pkg *pkg, - struct unf_xchg *xchg) -{ - struct unf_rjt_info rjt_info = {0}; - struct unf_lport *vport = NULL; - u32 sid = 0; - u32 did = 0; - - sid = (pkg->frame_head.csctl_sid) & UNF_NPORTID_MASK; - did = (pkg->frame_head.rctl_did) & UNF_NPORTID_MASK; - - memset(&rjt_info, 0, sizeof(struct unf_rjt_info)); - - if (pkg->class_mode != UNF_FC_PROTOCOL_CLASS_3) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) unsupport class 0x%x cmd 0x%x and send RJT", - lport->port_id, pkg->class_mode, pkg->cmnd); - - rjt_info.reason_code = UNF_LS_RJT_CLASS_ERROR; - rjt_info.els_cmnd_code = pkg->cmnd; - rjt_info.class_mode = pkg->class_mode; - (void)unf_send_els_rjt_by_did(lport, xchg, sid, &rjt_info); - - return UNF_RETURN_ERROR; - } - - rjt_info.reason_code = UNF_LS_RJT_NOT_SUPPORTED; - - if (pkg->cmnd == ELS_FLOGI && lport->act_topo == UNF_ACT_TOP_PRIVATE_LOOP) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]LOGIN: Port(0x%x) receive FLOGI in top (0x%x) and send LS_RJT", - lport->port_id, lport->act_topo); - - rjt_info.els_cmnd_code = ELS_FLOGI; - (void)unf_send_els_rjt_by_did(lport, xchg, sid, &rjt_info); - - return UNF_RETURN_ERROR; - } - - if (pkg->cmnd == ELS_PLOGI && did >= UNF_FC_FID_DOM_MGR) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x)receive PLOGI with wellknown address(0x%x) and Send LS_RJT", - lport->port_id, did); - - rjt_info.els_cmnd_code = ELS_PLOGI; - (void)unf_send_els_rjt_by_did(lport, xchg, sid, &rjt_info); - - return UNF_RETURN_ERROR; - } - - if ((lport->nport_id == 0 || lport->nport_id == INVALID_VALUE32) && - (NEED_REFRESH_NPORTID(pkg))) { - lport->nport_id = did; - } else if ((lport->nport_id != did) && (pkg->cmnd != ELS_FLOGI)) { - vport = unf_cm_lookup_vport_by_did(lport, did); - if (!vport) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) receive ELS cmd(0x%x) with abnormal D_ID(0x%x)", - lport->nport_id, pkg->cmnd, did); - - unf_cm_free_xchg(lport, xchg); - return UNF_RETURN_ERROR; - } - } - - return RETURN_OK; -} - -static u32 unf_rcv_els_cmnd_req(struct unf_lport *lport, struct unf_frame_pkg *pkg) -{ - struct unf_xchg *xchg = NULL; - u32 ret = UNF_RETURN_ERROR; - u32 i = 0; - u32 sid = 0; - u32 did = 0; - struct unf_lport *vport = NULL; - u32 (*els_cmnd_handler)(struct unf_lport *, u32, struct unf_xchg *) = NULL; - - sid = (pkg->frame_head.csctl_sid) & UNF_NPORTID_MASK; - did = (pkg->frame_head.rctl_did) & UNF_NPORTID_MASK; - - xchg = unf_mv_data_2_xchg(lport, pkg); - if (!xchg) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) receive ElsCmnd(0x%x), exchange is NULL", - lport->port_id, pkg->cmnd); - return UNF_RETURN_ERROR; - } - - if (!pkg->last_pkg_flag) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "[info]Exchange(%u) waiting for last WQE", - xchg->hotpooltag); - return RETURN_OK; - } - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "[info]Exchange(%u) get last WQE", xchg->hotpooltag); - - xchg->oxid = UNF_GET_OXID(pkg); - xchg->abort_oxid = xchg->oxid; - xchg->rxid = UNF_GET_RXID(pkg); - xchg->cmnd_code = pkg->cmnd; - - ret = unf_check_els_cmnd_valid(lport, pkg, xchg); - if (ret != RETURN_OK) - return UNF_RETURN_ERROR; - - if (lport->nport_id != did && pkg->cmnd != ELS_FLOGI) { - vport = unf_cm_lookup_vport_by_did(lport, did); - if (!vport) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%x) received unknown ELS command with S_ID(0x%x) D_ID(0x%x))", - lport->port_id, sid, did); - return UNF_RETURN_ERROR; - } - lport = vport; - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_INFO, - "[info]VPort(0x%x) received ELS command with S_ID(0x%x) D_ID(0x%x)", - lport->port_id, sid, did); - } - - do { - if (pkg->cmnd == els_handle_table[i].cmnd) { - els_cmnd_handler = els_handle_table[i].els_cmnd_handler; - break; - } - i++; - } while (i < (sizeof(els_handle_table) / sizeof(struct unf_els_handle_table))); - - if (els_cmnd_handler) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "[info]Port(0x%x) receive ELS(0x%x) from RPort(0x%x) and process it", - lport->port_id, pkg->cmnd, sid); - ret = els_cmnd_handler(lport, sid, xchg); - } else { - ret = unf_els_cmnd_default_handler(lport, xchg, sid, pkg->cmnd); - } - return ret; -} - -u32 unf_send_els_rsp_succ(struct unf_lport *lport, struct unf_frame_pkg *pkg) -{ - struct unf_xchg *xchg = NULL; - u32 ret = RETURN_OK; - u16 hot_pool_tag = 0; - ulong flags = 0; - void (*ob_callback)(struct unf_xchg *) = NULL; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(pkg, UNF_RETURN_ERROR); - - if (!lport->xchg_mgr_temp.unf_look_up_xchg_by_tag) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) lookup exchange by tag function is NULL", - lport->port_id); - - return UNF_RETURN_ERROR; - } - - hot_pool_tag = (u16)(pkg->private_data[PKG_PRIVATE_XCHG_HOT_POOL_INDEX]); - xchg = (struct unf_xchg *)(lport->xchg_mgr_temp.unf_look_up_xchg_by_tag((void *)lport, - hot_pool_tag)); - if (!xchg) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) find exhange by tag(0x%x) failed", - lport->port_id, hot_pool_tag); - - return UNF_RETURN_ERROR; - } - - lport->xchg_mgr_temp.unf_xchg_cancel_timer((void *)xchg); - - spin_lock_irqsave(&xchg->xchg_state_lock, flags); - if (xchg->ob_callback && - (!(xchg->io_state & TGT_IO_STATE_ABORT))) { - ob_callback = xchg->ob_callback; - spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "[info]Port(0x%x) with exchange(0x%p) tag(0x%x) do callback", - lport->port_id, xchg, hot_pool_tag); - - ob_callback(xchg); - } else { - spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); - } - - unf_cm_free_xchg((void *)lport, (void *)xchg); - return ret; -} - -static u8 *unf_calc_big_resp_pld_buffer(struct unf_xchg *xchg, u32 cmnd_code) -{ - u8 *resp_pld = NULL; - u8 *dest = NULL; - - FC_CHECK_RETURN_VALUE(xchg, NULL); - - if (cmnd_code == ELS_ECHO) { - resp_pld = (u8 *)xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->echo.echo_pld; - } else { - resp_pld = (u8 *)xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr - ->get_id.gid_rsp.gid_acc_pld; - } - - if (resp_pld) - dest = (u8 *)(resp_pld + xchg->fcp_sfs_union.sfs_entry.cur_offset); - - return dest; -} - -static u8 *unf_calc_other_resp_pld_buffer(struct unf_xchg *xchg) -{ - u8 *dest = NULL; - u32 offset = 0; - - FC_CHECK_RETURN_VALUE(xchg, NULL); - - offset = xchg->fcp_sfs_union.sfs_entry.cur_offset; - dest = (u8 *)((u8 *)(xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr) + offset); - - return dest; -} - -u32 unf_mv_resp_2_xchg(struct unf_xchg *xchg, struct unf_frame_pkg *pkg) -{ - u8 *dest = NULL; - u32 length = 0; - u32 offset = 0; - u32 max_frame_len = 0; - ulong flags = 0; - - spin_lock_irqsave(&xchg->xchg_state_lock, flags); - - if (UNF_NEED_BIG_RESPONSE_BUFF(xchg->cmnd_code)) { - dest = unf_calc_big_resp_pld_buffer(xchg, xchg->cmnd_code); - offset = 0; - max_frame_len = sizeof(struct unf_gid_acc_pld); - } else if (NS_GA_NXT == xchg->cmnd_code || NS_GIEL == xchg->cmnd_code) { - dest = unf_calc_big_resp_pld_buffer(xchg, xchg->cmnd_code); - offset = 0; - max_frame_len = xchg->fcp_sfs_union.sfs_entry.sfs_buff_len; - } else { - dest = unf_calc_other_resp_pld_buffer(xchg); - offset = sizeof(struct unf_fc_head); - max_frame_len = sizeof(union unf_sfs_u); - } - - if (!dest) { - spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); - - return UNF_RETURN_ERROR; - } - - if (xchg->fcp_sfs_union.sfs_entry.cur_offset == 0) { - xchg->fcp_sfs_union.sfs_entry.cur_offset += offset; - dest = dest + offset; - } - - length = pkg->unf_cmnd_pload_bl.length; - - if ((xchg->fcp_sfs_union.sfs_entry.cur_offset + length) > - max_frame_len) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Exchange(0x%p) command(0x%x) hotpooltag(0x%x) OX_RX_ID(0x%x) S_ID(0x%x) D_ID(0x%x) copy payload overrun(0x%x:0x%x:0x%x)", - xchg, xchg->cmnd_code, xchg->hotpooltag, pkg->frame_head.oxid_rxid, - pkg->frame_head.csctl_sid & UNF_NPORTID_MASK, - pkg->frame_head.rctl_did & UNF_NPORTID_MASK, - xchg->fcp_sfs_union.sfs_entry.cur_offset, - pkg->unf_cmnd_pload_bl.length, max_frame_len); - - length = max_frame_len - xchg->fcp_sfs_union.sfs_entry.cur_offset; - } - - if (length > 0) - memcpy(dest, pkg->unf_cmnd_pload_bl.buffer_ptr, length); - - xchg->fcp_sfs_union.sfs_entry.cur_offset += length; - spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); - - return RETURN_OK; -} - -static void unf_ls_gs_do_callback(struct unf_xchg *xchg, - struct unf_frame_pkg *pkg) -{ - ulong flags = 0; - void (*callback)(void *, void *, void *) = NULL; - - spin_lock_irqsave(&xchg->xchg_state_lock, flags); - if (xchg->callback && - (xchg->cmnd_code == ELS_RRQ || - xchg->cmnd_code == ELS_LOGO || - !(xchg->io_state & TGT_IO_STATE_ABORT))) { - callback = xchg->callback; - - if (xchg->cmnd_code == ELS_FLOGI || xchg->cmnd_code == ELS_FDISC) - xchg->sid = pkg->frame_head.rctl_did & UNF_NPORTID_MASK; - - if (xchg->cmnd_code == ELS_ECHO) { - xchg->private_data[PKG_PRIVATE_ECHO_CMD_RCV_TIME] = - pkg->private_data[PKG_PRIVATE_ECHO_CMD_RCV_TIME]; - xchg->private_data[PKG_PRIVATE_ECHO_RSP_SND_TIME] = - pkg->private_data[PKG_PRIVATE_ECHO_RSP_SND_TIME]; - xchg->private_data[PKG_PRIVATE_ECHO_CMD_SND_TIME] = - pkg->private_data[PKG_PRIVATE_ECHO_CMD_SND_TIME]; - xchg->private_data[PKG_PRIVATE_ECHO_ACC_RCV_TIME] = - pkg->private_data[PKG_PRIVATE_ECHO_ACC_RCV_TIME]; - } - spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); - callback(xchg->lport, xchg->rport, xchg); - } else { - spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); - } -} - -u32 unf_send_ls_gs_cmnd_succ(struct unf_lport *lport, struct unf_frame_pkg *pkg) -{ - struct unf_xchg *xchg = NULL; - u32 ret = RETURN_OK; - u16 hot_pool_tag = 0; - struct unf_lport *unf_lport = NULL; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(pkg, UNF_RETURN_ERROR); - unf_lport = lport; - - if (!unf_lport->xchg_mgr_temp.unf_look_up_xchg_by_tag) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) lookup exchange by tag function can't be NULL", - unf_lport->port_id); - - return UNF_RETURN_ERROR; - } - - hot_pool_tag = (u16)(pkg->private_data[PKG_PRIVATE_XCHG_HOT_POOL_INDEX]); - xchg = (struct unf_xchg *)(unf_lport->xchg_mgr_temp - .unf_look_up_xchg_by_tag((void *)unf_lport, hot_pool_tag)); - if (!xchg) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x_0x%x) find exchange by tag(0x%x) failed", - unf_lport->port_id, unf_lport->nport_id, hot_pool_tag); - - return UNF_RETURN_ERROR; - } - - UNF_CHECK_ALLOCTIME_VALID(unf_lport, hot_pool_tag, xchg, - pkg->private_data[PKG_PRIVATE_XCHG_ALLOC_TIME], - xchg->private_data[PKG_PRIVATE_XCHG_ALLOC_TIME]); - - if ((pkg->frame_head.csctl_sid & UNF_NPORTID_MASK) != xchg->did) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) find exhange invalid, package S_ID(0x%x) exchange S_ID(0x%x) D_ID(0x%x)", - unf_lport->port_id, pkg->frame_head.csctl_sid, xchg->sid, xchg->did); - - return UNF_RETURN_ERROR; - } - - if (pkg->last_pkg_flag == UNF_PKG_NOT_LAST_RESPONSE) { - ret = unf_mv_resp_2_xchg(xchg, pkg); - return ret; - } - - xchg->byte_orders = pkg->byte_orders; - unf_lport->xchg_mgr_temp.unf_xchg_cancel_timer((void *)xchg); - unf_ls_gs_do_callback(xchg, pkg); - unf_cm_free_xchg((void *)unf_lport, (void *)xchg); - return ret; -} - -u32 unf_send_ls_gs_cmnd_failed(struct unf_lport *lport, - struct unf_frame_pkg *pkg) -{ - struct unf_xchg *xchg = NULL; - u32 ret = RETURN_OK; - u16 hot_pool_tag = 0; - ulong flags = 0; - void (*ob_callback)(struct unf_xchg *) = NULL; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(pkg, UNF_RETURN_ERROR); - - if (!lport->xchg_mgr_temp.unf_look_up_xchg_by_tag) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) lookup exchange by tag function can't be NULL", - lport->port_id); - - return UNF_RETURN_ERROR; - } - - hot_pool_tag = (u16)(pkg->private_data[PKG_PRIVATE_XCHG_HOT_POOL_INDEX]); - xchg = (struct unf_xchg *)(lport->xchg_mgr_temp.unf_look_up_xchg_by_tag((void *)lport, - hot_pool_tag)); - if (!xchg) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x_0x%x) find exhange by tag(0x%x) failed", - lport->port_id, lport->nport_id, hot_pool_tag); - - return UNF_RETURN_ERROR; - } - - UNF_CHECK_ALLOCTIME_VALID(lport, hot_pool_tag, xchg, - pkg->private_data[PKG_PRIVATE_XCHG_ALLOC_TIME], - xchg->private_data[PKG_PRIVATE_XCHG_ALLOC_TIME]); - - lport->xchg_mgr_temp.unf_xchg_cancel_timer((void *)xchg); - - spin_lock_irqsave(&xchg->xchg_state_lock, flags); - if (xchg->ob_callback && - (xchg->cmnd_code == ELS_RRQ || xchg->cmnd_code == ELS_LOGO || - (!(xchg->io_state & TGT_IO_STATE_ABORT)))) { - ob_callback = xchg->ob_callback; - xchg->ob_callback_sts = pkg->status; - spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); - - ob_callback(xchg); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "[info]Port(0x%x) exchange(0x%p) tag(0x%x) do callback", - lport->port_id, xchg, hot_pool_tag); - } else { - spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); - } - - unf_cm_free_xchg((void *)lport, (void *)xchg); - return ret; -} - -static u32 unf_rcv_ls_gs_cmnd_reply(struct unf_lport *lport, - struct unf_frame_pkg *pkg) -{ - u32 ret = RETURN_OK; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(pkg, UNF_RETURN_ERROR); - - if (pkg->status == UNF_IO_SUCCESS || pkg->status == UNF_IO_UNDER_FLOW) - ret = unf_send_ls_gs_cmnd_succ(lport, pkg); - else - ret = unf_send_ls_gs_cmnd_failed(lport, pkg); - - return ret; -} - -u32 unf_receive_ls_gs_pkg(void *lport, struct unf_frame_pkg *pkg) -{ - struct unf_lport *unf_lport = NULL; - u32 ret = UNF_RETURN_ERROR; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(pkg, UNF_RETURN_ERROR); - unf_lport = (struct unf_lport *)lport; - - switch (pkg->type) { - case UNF_PKG_ELS_REQ_DONE: - case UNF_PKG_GS_REQ_DONE: - ret = unf_rcv_ls_gs_cmnd_reply(unf_lport, pkg); - break; - - case UNF_PKG_ELS_REQ: - ret = unf_rcv_els_cmnd_req(unf_lport, pkg); - break; - - default: - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x_0x%x) with exchange type(0x%x) abnormal", - unf_lport->port_id, unf_lport->nport_id, pkg->type); - break; - } - - return ret; -} - -u32 unf_send_els_done(void *lport, struct unf_frame_pkg *pkg) -{ - u32 ret = UNF_RETURN_ERROR; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(pkg, UNF_RETURN_ERROR); - - if (pkg->type == UNF_PKG_ELS_REPLY_DONE) { - if (pkg->status == UNF_IO_SUCCESS || pkg->status == UNF_IO_UNDER_FLOW) - ret = unf_send_els_rsp_succ(lport, pkg); - else - ret = unf_send_ls_gs_cmnd_failed(lport, pkg); - } - - return ret; -} - -void unf_rport_immediate_link_down(struct unf_lport *lport, struct unf_rport *rport) -{ - /* Swap case: Report Link Down immediately & release R_Port */ - ulong flags = 0; - struct unf_disc *disc = NULL; - - FC_CHECK_RETURN_VOID(lport); - FC_CHECK_RETURN_VOID(rport); - - spin_lock_irqsave(&rport->rport_state_lock, flags); - /* 1. Inc R_Port ref_cnt */ - if (unf_rport_ref_inc(rport) != RETURN_OK) { - spin_unlock_irqrestore(&rport->rport_state_lock, flags); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) Rport(0x%p,0x%x) is removing and no need process", - lport->port_id, rport, rport->nport_id); - - return; - } - - /* 2. R_PORT state update: Link Down Event --->>> closing state */ - unf_rport_state_ma(rport, UNF_EVENT_RPORT_LINK_DOWN); - spin_unlock_irqrestore(&rport->rport_state_lock, flags); - - /* 3. Put R_Port from busy to destroy list */ - disc = &lport->disc; - spin_lock_irqsave(&disc->rport_busy_pool_lock, flags); - list_del_init(&rport->entry_rport); - list_add_tail(&rport->entry_rport, &disc->list_destroy_rports); - spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flags); - - /* 4. Schedule Closing work (Enqueuing workqueue) */ - unf_schedule_closing_work(lport, rport); - - unf_rport_ref_dec(rport); -} - -struct unf_rport *unf_find_rport(struct unf_lport *lport, u32 rport_nport_id, - u64 lport_name) -{ - struct unf_lport *unf_lport = lport; - struct unf_rport *unf_rport = NULL; - - FC_CHECK_RETURN_VALUE(lport, NULL); - - if (rport_nport_id >= UNF_FC_FID_DOM_MGR) { - /* R_Port is Fabric: by N_Port_ID */ - unf_rport = unf_get_rport_by_nport_id(unf_lport, rport_nport_id); - } else { - /* Others: by WWPN & N_Port_ID */ - unf_rport = unf_find_valid_rport(unf_lport, lport_name, rport_nport_id); - } - - return unf_rport; -} - -void unf_process_logo_in_pri_loop(struct unf_lport *lport, struct unf_rport *rport) -{ - /* Send PLOGI or LOGO */ - struct unf_rport *unf_rport = rport; - ulong flag = 0; - - FC_CHECK_RETURN_VOID(lport); - FC_CHECK_RETURN_VOID(rport); - - spin_lock_irqsave(&unf_rport->rport_state_lock, flag); - unf_rport_state_ma(unf_rport, UNF_EVENT_RPORT_ENTER_PLOGI); /* PLOGI WAIT */ - spin_unlock_irqrestore(&unf_rport->rport_state_lock, flag); - - /* Private Loop with INI mode, Avoid COM Mode problem */ - unf_rport_delay_login(unf_rport); -} - -void unf_process_logo_in_n2n(struct unf_lport *lport, struct unf_rport *rport) -{ - /* Send PLOGI or LOGO */ - struct unf_lport *unf_lport = lport; - struct unf_rport *unf_rport = rport; - ulong flag = 0; - u32 ret = UNF_RETURN_ERROR; - - FC_CHECK_RETURN_VOID(lport); - FC_CHECK_RETURN_VOID(rport); - - spin_lock_irqsave(&unf_rport->rport_state_lock, flag); - - unf_rport_state_ma(unf_rport, UNF_EVENT_RPORT_ENTER_PLOGI); - spin_unlock_irqrestore(&unf_rport->rport_state_lock, flag); - - if (unf_lport->port_name > unf_rport->port_name) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x)'s WWN(0x%llx) is larger than(0x%llx), should be master", - unf_lport->port_id, unf_lport->port_name, unf_rport->port_name); - - ret = unf_send_plogi(unf_lport, unf_rport); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]LOGIN: Port(0x%x) send PLOGI failed, enter recovery", - lport->port_id); - - unf_rport_error_recovery(unf_rport); - } - } else { - unf_rport_enter_logo(unf_lport, unf_rport); - } -} - -void unf_process_logo_in_fabric(struct unf_lport *lport, - struct unf_rport *rport) -{ - /* Send GFF_ID or LOGO */ - struct unf_lport *unf_lport = lport; - struct unf_rport *unf_rport = rport; - struct unf_rport *sns_port = NULL; - u32 ret = UNF_RETURN_ERROR; - - FC_CHECK_RETURN_VOID(lport); - FC_CHECK_RETURN_VOID(rport); - - /* L_Port with INI Mode: Send GFF_ID */ - sns_port = unf_get_rport_by_nport_id(unf_lport, UNF_FC_FID_DIR_SERV); - if (!sns_port) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) can't find fabric port", - unf_lport->port_id); - return; - } - - ret = unf_get_and_post_disc_event(lport, sns_port, unf_rport->nport_id, - UNF_DISC_GET_FEATURE); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) add discovery event(0x%x) failed Rport(0x%x)", - unf_lport->port_id, UNF_DISC_GET_FEATURE, - unf_rport->nport_id); - - unf_rcv_gff_id_rsp_unknown(unf_lport, unf_rport->nport_id); - } -} - -void unf_process_rport_after_logo(struct unf_lport *lport, struct unf_rport *rport) -{ - /* - * 1. LOGO handler - * 2. RPLO handler - * 3. LOGO_CALL_BACK (send LOGO ACC) handler - */ - struct unf_lport *unf_lport = lport; - struct unf_rport *unf_rport = rport; - - FC_CHECK_RETURN_VOID(lport); - FC_CHECK_RETURN_VOID(rport); - - if (unf_rport->nport_id < UNF_FC_FID_DOM_MGR) { - /* R_Port is not fabric port (retry LOGIN or LOGO) */ - if (unf_lport->act_topo == UNF_ACT_TOP_PRIVATE_LOOP) { - /* Private Loop: PLOGI or LOGO */ - unf_process_logo_in_pri_loop(unf_lport, unf_rport); - } else if (unf_lport->act_topo == UNF_ACT_TOP_P2P_DIRECT) { - /* Point to Point: LOGIN or LOGO */ - unf_process_logo_in_n2n(unf_lport, unf_rport); - } else { - /* Fabric or Public Loop: GFF_ID or LOGO */ - unf_process_logo_in_fabric(unf_lport, unf_rport); - } - } else { - /* Rport is fabric port: link down now */ - unf_rport_linkdown(unf_lport, unf_rport); - } -} - -static u32 unf_rcv_bls_req_done(struct unf_lport *lport, struct unf_frame_pkg *pkg) -{ - /* - * About I/O resource: - * 1. normal: Release I/O resource during RRQ processer - * 2. exception: Release I/O resource immediately - */ - struct unf_xchg *xchg = NULL; - u16 hot_pool_tag = 0; - ulong flags = 0; - ulong time_ms = 0; - u32 ret = RETURN_OK; - struct unf_lport *unf_lport = NULL; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(pkg, UNF_RETURN_ERROR); - unf_lport = lport; - - hot_pool_tag = (u16)pkg->private_data[PKG_PRIVATE_XCHG_HOT_POOL_INDEX]; - xchg = (struct unf_xchg *)unf_cm_lookup_xchg_by_tag((void *)unf_lport, hot_pool_tag); - if (!xchg) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%x) can't find exchange by tag(0x%x) when receiving ABTS response", - unf_lport->port_id, hot_pool_tag); - return UNF_RETURN_ERROR; - } - - UNF_CHECK_ALLOCTIME_VALID(lport, hot_pool_tag, xchg, - pkg->private_data[PKG_PRIVATE_XCHG_ALLOC_TIME], - xchg->private_data[PKG_PRIVATE_XCHG_ALLOC_TIME]); - - ret = unf_xchg_ref_inc(xchg, TGT_ABTS_DONE); - FC_CHECK_RETURN_VALUE((ret == RETURN_OK), UNF_RETURN_ERROR); - - spin_lock_irqsave(&xchg->xchg_state_lock, flags); - xchg->oxid = UNF_GET_OXID(pkg); - xchg->rxid = UNF_GET_RXID(pkg); - xchg->io_state |= INI_IO_STATE_DONE; - xchg->abts_state |= ABTS_RESPONSE_RECEIVED; - if (!(INI_IO_STATE_UPABORT & xchg->io_state)) { - /* NOTE: I/O exchange has been released and used again */ - spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); - - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%x_0x%x) SID(0x%x) exch(0x%p) (0x%x:0x%x:0x%x:0x%x) state(0x%x) is abnormal with cnt(0x%x)", - unf_lport->port_id, unf_lport->nport_id, xchg->sid, - xchg, xchg->hotpooltag, xchg->oxid, xchg->rxid, - xchg->oid, xchg->io_state, - atomic_read(&xchg->ref_cnt)); - - unf_xchg_ref_dec(xchg, TGT_ABTS_DONE); - return UNF_RETURN_ERROR; - } - spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); - unf_lport->xchg_mgr_temp.unf_xchg_cancel_timer((void *)xchg); - /* - * Exchage I/O Status check: Succ-> Add RRQ Timer - * ***** pkg->status --- to --->>> scsi_cmnd->result ***** - * * - * FAILED: ERR_Code or X_ID is err, or BA_RSP type is err - */ - spin_lock_irqsave(&xchg->xchg_state_lock, flags); - if (pkg->status == UNF_IO_SUCCESS) { - /* Succeed: PKG status -->> EXCH status -->> scsi status */ - UNF_SET_SCSI_CMND_RESULT(xchg, UNF_IO_SUCCESS); - xchg->io_state |= INI_IO_STATE_WAIT_RRQ; - xchg->rxid = UNF_GET_RXID(pkg); - spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); - - /* Add RRQ timer */ - time_ms = (ulong)(unf_lport->ra_tov); - unf_lport->xchg_mgr_temp.unf_xchg_add_timer((void *)xchg, time_ms, - UNF_TIMER_TYPE_INI_RRQ); - } else { - /* Failed: PKG status -->> EXCH status -->> scsi status */ - UNF_SET_SCSI_CMND_RESULT(xchg, UNF_IO_FAILED); - if (MARKER_STS_RECEIVED & xchg->abts_state) { - spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); - - /* NOTE: release I/O resource immediately */ - unf_cm_free_xchg(unf_lport, xchg); - } else { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%x) exch(0x%p) OX_RX(0x%x:0x%x) IOstate(0x%x) ABTSstate(0x%x) receive response abnormal ref(0x%x)", - unf_lport->port_id, xchg, xchg->oxid, xchg->rxid, - xchg->io_state, xchg->abts_state, atomic_read(&xchg->ref_cnt)); - spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); - } - } - - /* - * If abts response arrived before - * marker sts received just wake up abts marker sema - */ - spin_lock_irqsave(&xchg->xchg_state_lock, flags); - if (!(MARKER_STS_RECEIVED & xchg->abts_state)) { - xchg->ucode_abts_state = pkg->status; - spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); - up(&xchg->task_sema); - } else { - spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); - } - - unf_xchg_ref_dec(xchg, TGT_ABTS_DONE); - return ret; -} - -u32 unf_receive_bls_pkg(void *lport, struct unf_frame_pkg *pkg) -{ - struct unf_lport *unf_lport = NULL; - u32 ret = UNF_RETURN_ERROR; - - unf_lport = (struct unf_lport *)lport; - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(pkg, UNF_RETURN_ERROR); - - if (pkg->type == UNF_PKG_BLS_REQ_DONE) { - /* INI: RCVD BLS Req Done */ - ret = unf_rcv_bls_req_done(lport, pkg); - } else { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) received BLS packet type(%xh) is error", - unf_lport->port_id, pkg->type); - - return UNF_RETURN_ERROR; - } - - return ret; -} - -static void unf_fill_free_xid_pkg(struct unf_xchg *xchg, struct unf_frame_pkg *pkg) -{ - pkg->frame_head.csctl_sid = xchg->sid; - pkg->frame_head.rctl_did = xchg->did; - pkg->frame_head.oxid_rxid = (u32)(((u32)xchg->oxid << UNF_SHIFT_16) | xchg->rxid); - pkg->private_data[PKG_PRIVATE_XCHG_HOT_POOL_INDEX] = xchg->hotpooltag | UNF_HOTTAG_FLAG; - UNF_SET_XCHG_ALLOC_TIME(pkg, xchg); - - if (xchg->xchg_type == UNF_XCHG_TYPE_SFS) { - if (UNF_XCHG_IS_ELS_REPLY(xchg)) { - pkg->type = UNF_PKG_ELS_REPLY; - pkg->rx_or_ox_id = UNF_PKG_FREE_RXID; - pkg->private_data[PKG_PRIVATE_XCHG_HOT_POOL_INDEX] = INVALID_VALUE32; - pkg->private_data[PKG_PRIVATE_XCHG_ALLOC_TIME] = INVALID_VALUE32; - } else { - pkg->type = UNF_PKG_ELS_REQ; - pkg->rx_or_ox_id = UNF_PKG_FREE_OXID; - } - } else if (xchg->xchg_type == UNF_XCHG_TYPE_INI) { - pkg->type = UNF_PKG_INI_IO; - pkg->rx_or_ox_id = UNF_PKG_FREE_OXID; - } -} - -void unf_notify_chip_free_xid(struct unf_xchg *xchg) -{ - struct unf_lport *unf_lport = NULL; - u32 ret = RETURN_ERROR; - struct unf_frame_pkg pkg = {0}; - - FC_CHECK_RETURN_VOID(xchg); - unf_lport = xchg->lport; - FC_CHECK_RETURN_VOID(unf_lport); - - unf_fill_free_xid_pkg(xchg, &pkg); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "[info]Sid_Did(0x%x)(0x%x) Xchg(0x%p) RXorOX(0x%x) tag(0x%x) xid(0x%x) magic(0x%x) Stat(0x%x)type(0x%x) wait timeout.", - xchg->sid, xchg->did, xchg, pkg.rx_or_ox_id, - pkg.private_data[PKG_PRIVATE_XCHG_HOT_POOL_INDEX], pkg.frame_head.oxid_rxid, - pkg.private_data[PKG_PRIVATE_XCHG_ALLOC_TIME], xchg->io_state, pkg.type); - - ret = unf_lport->low_level_func.service_op.ll_release_xid(unf_lport->fc_port, &pkg); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Free xid abnormal:Sid_Did(0x%x 0x%x) Xchg(0x%p) RXorOX(0x%x) xid(0x%x) Stat(0x%x) tag(0x%x) magic(0x%x) type(0x%x).", - xchg->sid, xchg->did, xchg, pkg.rx_or_ox_id, - pkg.frame_head.oxid_rxid, xchg->io_state, - pkg.private_data[PKG_PRIVATE_XCHG_HOT_POOL_INDEX], - pkg.private_data[PKG_PRIVATE_XCHG_ALLOC_TIME], - pkg.type); - } -} diff --git a/drivers/scsi/spfc/common/unf_service.h b/drivers/scsi/spfc/common/unf_service.h deleted file mode 100644 index 0dd2975c6a7b5e1d4250d47ab2a000c29065b528..0000000000000000000000000000000000000000 --- a/drivers/scsi/spfc/common/unf_service.h +++ /dev/null @@ -1,66 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef UNF_SERVICE_H -#define UNF_SERVICE_H - -#include "unf_type.h" -#include "unf_exchg.h" -#include "unf_rport.h" - -#ifdef __cplusplus -extern "C" { -#endif /* __cplusplus */ - -extern u32 max_frame_size; -#define UNF_INIT_DISC 0x1 /* first time DISC */ -#define UNF_RSCN_DISC 0x2 /* RSCN Port Addr DISC */ -#define UNF_SET_ELS_ACC_TYPE(els_cmd) ((u32)(els_cmd) << 16 | ELS_ACC) -#define UNF_SET_ELS_RJT_TYPE(els_cmd) ((u32)(els_cmd) << 16 | ELS_RJT) -#define UNF_XCHG_IS_ELS_REPLY(xchg) \ - ((ELS_ACC == ((xchg)->cmnd_code & 0x0ffff)) || \ - (ELS_RJT == ((xchg)->cmnd_code & 0x0ffff))) - -struct unf_els_handle_table { - u32 cmnd; - u32 (*els_cmnd_handler)(struct unf_lport *lport, u32 sid, struct unf_xchg *xchg); -}; - -void unf_select_sq(struct unf_xchg *xchg, struct unf_frame_pkg *pkg); -void unf_fill_package(struct unf_frame_pkg *pkg, struct unf_xchg *xchg, - struct unf_rport *rport); -struct unf_xchg *unf_get_sfs_free_xchg_and_init(struct unf_lport *lport, - u32 did, - struct unf_rport *rport, - union unf_sfs_u **fc_entry); -void *unf_get_one_big_sfs_buf(struct unf_xchg *xchg); -u32 unf_mv_resp_2_xchg(struct unf_xchg *xchg, struct unf_frame_pkg *pkg); -void unf_rport_immediate_link_down(struct unf_lport *lport, - struct unf_rport *rport); -struct unf_rport *unf_find_rport(struct unf_lport *lport, u32 rport_nport_id, - u64 port_name); -void unf_process_logo_in_fabric(struct unf_lport *lport, - struct unf_rport *rport); -void unf_notify_chip_free_xid(struct unf_xchg *xchg); - -u32 unf_ls_gs_cmnd_send(struct unf_lport *lport, struct unf_frame_pkg *pkg, - struct unf_xchg *xchg); -u32 unf_receive_ls_gs_pkg(void *lport, struct unf_frame_pkg *pkg); -struct unf_xchg *unf_mv_data_2_xchg(struct unf_lport *lport, - struct unf_frame_pkg *pkg); -u32 unf_receive_bls_pkg(void *lport, struct unf_frame_pkg *pkg); -u32 unf_send_els_done(void *lport, struct unf_frame_pkg *pkg); -u32 unf_send_els_rjt_by_did(struct unf_lport *lport, struct unf_xchg *xchg, - u32 did, struct unf_rjt_info *rjt_info); -u32 unf_send_els_rjt_by_rport(struct unf_lport *lport, struct unf_xchg *xchg, - struct unf_rport *rport, - struct unf_rjt_info *rjt_info); -u32 unf_send_abts(struct unf_lport *lport, struct unf_xchg *xchg); -void unf_process_rport_after_logo(struct unf_lport *lport, - struct unf_rport *rport); - -#ifdef __cplusplus -} -#endif /* __cplusplus */ - -#endif /* __UNF_SERVICE_H__ */ diff --git a/drivers/scsi/spfc/common/unf_type.h b/drivers/scsi/spfc/common/unf_type.h deleted file mode 100644 index 28e163d0543ce70201df8f9f06ef55bb5e71f20c..0000000000000000000000000000000000000000 --- a/drivers/scsi/spfc/common/unf_type.h +++ /dev/null @@ -1,216 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef UNF_TYPE_H -#define UNF_TYPE_H - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include - -#ifndef SPFC_FT -#define SPFC_FT -#endif - -#define BUF_LIST_PAGE_SIZE (PAGE_SIZE << 8) - -#define UNF_S_TO_NS (1000000000) -#define UNF_S_TO_MS (1000) - -enum UNF_OS_THRD_PRI_E { - UNF_OS_THRD_PRI_HIGHEST = 0, - UNF_OS_THRD_PRI_HIGH, - UNF_OS_THRD_PRI_SUBHIGH, - UNF_OS_THRD_PRI_MIDDLE, - UNF_OS_THRD_PRI_LOW, - UNF_OS_THRD_PRI_BUTT -}; - -#define UNF_OS_LIST_NEXT(a) ((a)->next) -#define UNF_OS_LIST_PREV(a) ((a)->prev) - -#define UNF_OS_PER_NS (1000000000) -#define UNF_OS_MS_TO_NS (1000000) - -#ifndef MIN -#define MIN(X, Y) ((X) < (Y) ? (X) : (Y)) -#endif - -#ifndef MAX -#define MAX(X, Y) ((X) > (Y) ? (X) : (Y)) -#endif - -#ifndef INVALID_VALUE64 -#define INVALID_VALUE64 0xFFFFFFFFFFFFFFFFULL -#endif /* INVALID_VALUE64 */ - -#ifndef INVALID_VALUE32 -#define INVALID_VALUE32 0xFFFFFFFF -#endif /* INVALID_VALUE32 */ - -#ifndef INVALID_VALUE16 -#define INVALID_VALUE16 0xFFFF -#endif /* INVALID_VALUE16 */ - -#ifndef INVALID_VALUE8 -#define INVALID_VALUE8 0xFF -#endif /* INVALID_VALUE8 */ - -#ifndef RETURN_OK -#define RETURN_OK 0 -#endif - -#ifndef RETURN_ERROR -#define RETURN_ERROR (~0) -#endif -#define UNF_RETURN_ERROR (~0) - -/* define shift bits */ -#define UNF_SHIFT_1 1 -#define UNF_SHIFT_2 2 -#define UNF_SHIFT_3 3 -#define UNF_SHIFT_4 4 -#define UNF_SHIFT_6 6 -#define UNF_SHIFT_7 7 -#define UNF_SHIFT_8 8 -#define UNF_SHIFT_11 11 -#define UNF_SHIFT_12 12 -#define UNF_SHIFT_15 15 -#define UNF_SHIFT_16 16 -#define UNF_SHIFT_17 17 -#define UNF_SHIFT_19 19 -#define UNF_SHIFT_20 20 -#define UNF_SHIFT_23 23 -#define UNF_SHIFT_24 24 -#define UNF_SHIFT_25 25 -#define UNF_SHIFT_26 26 -#define UNF_SHIFT_28 28 -#define UNF_SHIFT_29 29 -#define UNF_SHIFT_32 32 -#define UNF_SHIFT_35 35 -#define UNF_SHIFT_37 37 -#define UNF_SHIFT_39 39 -#define UNF_SHIFT_40 40 -#define UNF_SHIFT_43 43 -#define UNF_SHIFT_48 48 -#define UNF_SHIFT_51 51 -#define UNF_SHIFT_56 56 -#define UNF_SHIFT_57 57 -#define UNF_SHIFT_59 59 -#define UNF_SHIFT_60 60 -#define UNF_SHIFT_61 61 - -/* array index */ -#define ARRAY_INDEX_0 0 -#define ARRAY_INDEX_1 1 -#define ARRAY_INDEX_2 2 -#define ARRAY_INDEX_3 3 -#define ARRAY_INDEX_4 4 -#define ARRAY_INDEX_5 5 -#define ARRAY_INDEX_6 6 -#define ARRAY_INDEX_7 7 -#define ARRAY_INDEX_8 8 -#define ARRAY_INDEX_10 10 -#define ARRAY_INDEX_11 11 -#define ARRAY_INDEX_12 12 -#define ARRAY_INDEX_13 13 - -/* define mask bits */ -#define UNF_MASK_BIT_7_0 0xff -#define UNF_MASK_BIT_15_0 0x0000ffff -#define UNF_MASK_BIT_31_16 0xffff0000 - -#define UNF_IO_SUCCESS 0x00000000 -#define UNF_IO_ABORTED 0x00000001 /* the host system aborted the command */ -#define UNF_IO_FAILED 0x00000002 -#define UNF_IO_ABORT_ABTS 0x00000003 -#define UNF_IO_ABORT_LOGIN 0x00000004 /* abort login */ -#define UNF_IO_ABORT_REET 0x00000005 /* reset event aborted the transport */ -#define UNF_IO_ABORT_FAILED 0x00000006 /* abort failed */ -/* data out of order ,data reassembly error */ -#define UNF_IO_OUTOF_ORDER 0x00000007 -#define UNF_IO_FTO 0x00000008 /* frame time out */ -#define UNF_IO_LINK_FAILURE 0x00000009 -#define UNF_IO_OVER_FLOW 0x0000000a /* data over run */ -#define UNF_IO_RSP_OVER 0x0000000b -#define UNF_IO_LOST_FRAME 0x0000000c -#define UNF_IO_UNDER_FLOW 0x0000000d /* data under run */ -#define UNF_IO_HOST_PROG_ERROR 0x0000000e -#define UNF_IO_SEST_PROG_ERROR 0x0000000f -#define UNF_IO_INVALID_ENTRY 0x00000010 -#define UNF_IO_ABORT_SEQ_NOT 0x00000011 -#define UNF_IO_REJECT 0x00000012 -#define UNF_IO_RS_INFO 0x00000013 -#define UNF_IO_EDC_IN_ERROR 0x00000014 -#define UNF_IO_EDC_OUT_ERROR 0x00000015 -#define UNF_IO_UNINIT_KEK_ERR 0x00000016 -#define UNF_IO_DEK_OUTOF_RANGE 0x00000017 -#define UNF_IO_KEY_UNWRAP_ERR 0x00000018 -#define UNF_IO_KEY_TAG_ERR 0x00000019 -#define UNF_IO_KEY_ECC_ERR 0x0000001a -#define UNF_IO_BLOCK_SIZE_ERROR 0x0000001b -#define UNF_IO_ILLEGAL_CIPHER_MODE 0x0000001c -#define UNF_IO_CLEAN_UP 0x0000001d -#define UNF_SRR_RECEIVE 0x0000001e /* receive srr */ -/* The target device sent an ABTS to abort the I/O.*/ -#define UNF_IO_ABORTED_BY_TARGET 0x0000001f -#define UNF_IO_TRANSPORT_ERROR 0x00000020 -#define UNF_IO_LINK_FLASH 0x00000021 -#define UNF_IO_TIMEOUT 0x00000022 -#define UNF_IO_PORT_UNAVAILABLE 0x00000023 -#define UNF_IO_PORT_LOGOUT 0x00000024 -#define UNF_IO_PORT_CFG_CHG 0x00000025 -#define UNF_IO_FIRMWARE_RES_UNAVAILABLE 0x00000026 -#define UNF_IO_TASK_MGT_OVERRUN 0x00000027 -#define UNF_IO_DMA_ERROR 0x00000028 -#define UNF_IO_DIF_ERROR 0x00000029 -#define UNF_IO_NO_LPORT 0x0000002a -#define UNF_IO_NO_XCHG 0x0000002b -#define UNF_IO_SOFT_ERR 0x0000002c -#define UNF_IO_XCHG_ADD_ERROR 0x0000002d -#define UNF_IO_NO_LOGIN 0x0000002e -#define UNF_IO_NO_BUFFER 0x0000002f -#define UNF_IO_DID_ERROR 0x00000030 -#define UNF_IO_UNSUPPORT 0x00000031 -#define UNF_IO_NOREADY 0x00000032 -#define UNF_IO_NPORTID_REUSED 0x00000033 -#define UNF_IO_NPORT_HANDLE_REUSED 0x00000034 -#define UNF_IO_NO_NPORT_HANDLE 0x00000035 -#define UNF_IO_ABORT_BY_FW 0x00000036 -#define UNF_IO_ABORT_PORT_REMOVING 0x00000037 -#define UNF_IO_INCOMPLETE 0x00000038 -#define UNF_IO_DIF_REF_ERROR 0x00000039 -#define UNF_IO_DIF_GEN_ERROR 0x0000003a - -#define UNF_IO_ERREND 0xFFFFFFFF - -#endif diff --git a/drivers/scsi/spfc/hw/spfc_chipitf.c b/drivers/scsi/spfc/hw/spfc_chipitf.c deleted file mode 100644 index be6073ff4dc079969a026700c0063399d1c04e20..0000000000000000000000000000000000000000 --- a/drivers/scsi/spfc/hw/spfc_chipitf.c +++ /dev/null @@ -1,1105 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#include "spfc_chipitf.h" -#include "sphw_hw.h" -#include "sphw_crm.h" - -#define SPFC_MBOX_TIME_SEC_MAX (60) - -#define SPFC_LINK_UP_COUNT 1 -#define SPFC_LINK_DOWN_COUNT 2 -#define SPFC_FC_DELETE_CMND_COUNT 3 - -#define SPFC_MBX_MAX_TIMEOUT 10000 - -u32 spfc_get_chip_msg(void *hba, void *mac) -{ - struct spfc_hba_info *spfc_hba = NULL; - struct unf_get_chip_info_argout *wwn = NULL; - struct spfc_inmbox_get_chip_info get_chip_info; - union spfc_outmbox_generic *get_chip_info_sts = NULL; - u32 ret = UNF_RETURN_ERROR; - - FC_CHECK_RETURN_VALUE(hba, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(mac, UNF_RETURN_ERROR); - - spfc_hba = (struct spfc_hba_info *)hba; - wwn = (struct unf_get_chip_info_argout *)mac; - - memset(&get_chip_info, 0, sizeof(struct spfc_inmbox_get_chip_info)); - - get_chip_info_sts = kmalloc(sizeof(union spfc_outmbox_generic), GFP_ATOMIC); - if (!get_chip_info_sts) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]malloc outmbox memory failed"); - return UNF_RETURN_ERROR; - } - memset(get_chip_info_sts, 0, sizeof(union spfc_outmbox_generic)); - - get_chip_info.header.cmnd_type = SPFC_MBOX_GET_CHIP_INFO; - get_chip_info.header.length = - SPFC_BYTES_TO_DW_NUM(sizeof(struct spfc_inmbox_get_chip_info)); - - if (spfc_mb_send_and_wait_mbox(spfc_hba, &get_chip_info, - sizeof(struct spfc_inmbox_get_chip_info), - get_chip_info_sts) != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]spfc can't send and wait mailbox, command type: 0x%x.", - get_chip_info.header.cmnd_type); - - goto exit; - } - - if (get_chip_info_sts->get_chip_info_sts.status != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Port(0x%x) mailbox status incorrect status(0x%x) .", - spfc_hba->port_cfg.port_id, - get_chip_info_sts->get_chip_info_sts.status); - - goto exit; - } - - if (get_chip_info_sts->get_chip_info_sts.header.cmnd_type != SPFC_MBOX_GET_CHIP_INFO_STS) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "Port(0x%x) receive mailbox type incorrect type: 0x%x.", - spfc_hba->port_cfg.port_id, - get_chip_info_sts->get_chip_info_sts.header.cmnd_type); - - goto exit; - } - - wwn->board_type = get_chip_info_sts->get_chip_info_sts.board_type; - spfc_hba->card_info.card_type = get_chip_info_sts->get_chip_info_sts.board_type; - wwn->wwnn = get_chip_info_sts->get_chip_info_sts.wwnn; - wwn->wwpn = get_chip_info_sts->get_chip_info_sts.wwpn; - - ret = RETURN_OK; -exit: - kfree(get_chip_info_sts); - - return ret; -} - -u32 spfc_get_chip_capability(void *hwdev_handle, - struct spfc_chip_info *chip_info) -{ - struct spfc_inmbox_get_chip_info get_chip_info; - union spfc_outmbox_generic *get_chip_info_sts = NULL; - u16 out_size = 0; - u32 ret = UNF_RETURN_ERROR; - - FC_CHECK_RETURN_VALUE(hwdev_handle, UNF_RETURN_ERROR); - - memset(&get_chip_info, 0, sizeof(struct spfc_inmbox_get_chip_info)); - - get_chip_info_sts = kmalloc(sizeof(union spfc_outmbox_generic), GFP_ATOMIC); - if (!get_chip_info_sts) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "malloc outmbox memory failed"); - return UNF_RETURN_ERROR; - } - memset(get_chip_info_sts, 0, sizeof(union spfc_outmbox_generic)); - - get_chip_info.header.cmnd_type = SPFC_MBOX_GET_CHIP_INFO; - get_chip_info.header.length = - SPFC_BYTES_TO_DW_NUM(sizeof(struct spfc_inmbox_get_chip_info)); - get_chip_info.header.port_id = (u8)sphw_global_func_id(hwdev_handle); - out_size = sizeof(union spfc_outmbox_generic); - - if (sphw_msg_to_mgmt_sync(hwdev_handle, COMM_MOD_FC, SPFC_MBOX_GET_CHIP_INFO, - (void *)&get_chip_info.header, - sizeof(struct spfc_inmbox_get_chip_info), - (union spfc_outmbox_generic *)(get_chip_info_sts), &out_size, - (SPFC_MBX_MAX_TIMEOUT), SPHW_CHANNEL_FC) != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "spfc can't send and wait mailbox, command type: 0x%x.", - SPFC_MBOX_GET_CHIP_INFO); - - goto exit; - } - - if (get_chip_info_sts->get_chip_info_sts.status != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Port mailbox status incorrect status(0x%x) .", - get_chip_info_sts->get_chip_info_sts.status); - - goto exit; - } - - if (get_chip_info_sts->get_chip_info_sts.header.cmnd_type != SPFC_MBOX_GET_CHIP_INFO_STS) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Port receive mailbox type incorrect type: 0x%x.", - get_chip_info_sts->get_chip_info_sts.header.cmnd_type); - - goto exit; - } - - chip_info->wwnn = get_chip_info_sts->get_chip_info_sts.wwnn; - chip_info->wwpn = get_chip_info_sts->get_chip_info_sts.wwpn; - - ret = RETURN_OK; -exit: - kfree(get_chip_info_sts); - - return ret; -} - -u32 spfc_config_port_table(struct spfc_hba_info *hba) -{ - struct spfc_inmbox_config_api config_api; - union spfc_outmbox_generic *out_mbox = NULL; - u32 ret = UNF_RETURN_ERROR; - - FC_CHECK_RETURN_VALUE(hba, UNF_RETURN_ERROR); - - memset(&config_api, 0, sizeof(config_api)); - out_mbox = kmalloc(sizeof(union spfc_outmbox_generic), GFP_ATOMIC); - if (!out_mbox) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]malloc outmbox memory failed"); - return UNF_RETURN_ERROR; - } - memset(out_mbox, 0, sizeof(union spfc_outmbox_generic)); - - config_api.header.cmnd_type = SPFC_MBOX_CONFIG_API; - config_api.header.length = SPFC_BYTES_TO_DW_NUM(sizeof(struct spfc_inmbox_config_api)); - - config_api.op_code = UNDEFINEOPCODE; - - /* change switching top cmd of CM to the cmd that up recognize */ - /* if the cmd equals UNF_TOP_P2P_MASK sending in CM means that it - * should be changed into P2P top, LL using SPFC_TOP_NON_LOOP_MASK - */ - if (((u8)(hba->port_topo_cfg)) == UNF_TOP_P2P_MASK) { - config_api.topy_mode = 0x2; - /* if the cmd equals UNF_TOP_LOOP_MASK sending in CM means that it - *should be changed into loop top, LL using SPFC_TOP_LOOP_MASK - */ - } else if (((u8)(hba->port_topo_cfg)) == UNF_TOP_LOOP_MASK) { - config_api.topy_mode = 0x1; - /* if the cmd equals UNF_TOP_AUTO_MASK sending in CM means that it - *should be changed into loop top, LL using SPFC_TOP_AUTO_MASK - */ - } else if (((u8)(hba->port_topo_cfg)) == UNF_TOP_AUTO_MASK) { - config_api.topy_mode = 0x0; - } else { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Port(0x%x) topo cmd is error, command type: 0x%x", - hba->port_cfg.port_id, (u8)(hba->port_topo_cfg)); - - goto exit; - } - - /* About speed */ - config_api.sfp_speed = (u8)(hba->port_speed_cfg); - config_api.max_speed = (u8)(hba->max_support_speed); - - config_api.rx_6432g_bb_credit = SPFC_LOWLEVEL_DEFAULT_32G_BB_CREDIT; - config_api.rx_16g_bb_credit = SPFC_LOWLEVEL_DEFAULT_16G_BB_CREDIT; - config_api.rx_84g_bb_credit = SPFC_LOWLEVEL_DEFAULT_8G_BB_CREDIT; - config_api.rdy_cnt_bf_fst_frm = SPFC_LOWLEVEL_DEFAULT_LOOP_BB_CREDIT; - config_api.esch_32g_value = SPFC_LOWLEVEL_DEFAULT_32G_ESCH_VALUE; - config_api.esch_16g_value = SPFC_LOWLEVEL_DEFAULT_16G_ESCH_VALUE; - config_api.esch_8g_value = SPFC_LOWLEVEL_DEFAULT_8G_ESCH_VALUE; - config_api.esch_4g_value = SPFC_LOWLEVEL_DEFAULT_8G_ESCH_VALUE; - config_api.esch_64g_value = SPFC_LOWLEVEL_DEFAULT_8G_ESCH_VALUE; - config_api.esch_bust_size = SPFC_LOWLEVEL_DEFAULT_ESCH_BUST_SIZE; - - /* default value:0xFF */ - config_api.hard_alpa = 0xFF; - memcpy(config_api.port_name, hba->sys_port_name, UNF_WWN_LEN); - - /* if only for slave, the value is 1; if participate master choosing, - * the value is 0 - */ - config_api.slave = hba->port_loop_role; - - /* 1:auto negotiate, 0:fixed mode negotiate */ - if (config_api.sfp_speed == 0) - config_api.auto_sneg = 0x1; - else - config_api.auto_sneg = 0x0; - - if (spfc_mb_send_and_wait_mbox(hba, &config_api, sizeof(config_api), - out_mbox) != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[warn]Port(0x%x) SPFC can't send and wait mailbox, command type: 0x%x", - hba->port_cfg.port_id, - config_api.header.cmnd_type); - - goto exit; - } - - if (out_mbox->config_api_sts.status != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_EQUIP_ATT, UNF_ERR, - "[err]Port(0x%x) receive mailbox type(0x%x) with status(0x%x) error", - hba->port_cfg.port_id, - out_mbox->config_api_sts.header.cmnd_type, - out_mbox->config_api_sts.status); - - goto exit; - } - - if (out_mbox->config_api_sts.header.cmnd_type != SPFC_MBOX_CONFIG_API_STS) { - FC_DRV_PRINT(UNF_LOG_EQUIP_ATT, UNF_ERR, - "[err]Port(0x%x) receive mailbox type(0x%x) error", - hba->port_cfg.port_id, - out_mbox->config_api_sts.header.cmnd_type); - - goto exit; - } - - ret = RETURN_OK; -exit: - kfree(out_mbox); - - return ret; -} - -u32 spfc_port_switch(struct spfc_hba_info *hba, bool turn_on) -{ - struct spfc_inmbox_port_switch port_switch; - union spfc_outmbox_generic *port_switch_sts = NULL; - u32 ret = UNF_RETURN_ERROR; - - FC_CHECK_RETURN_VALUE(hba, UNF_RETURN_ERROR); - - memset(&port_switch, 0, sizeof(port_switch)); - - port_switch_sts = kmalloc(sizeof(union spfc_outmbox_generic), GFP_ATOMIC); - if (!port_switch_sts) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]malloc outmbox memory failed"); - return UNF_RETURN_ERROR; - } - memset(port_switch_sts, 0, sizeof(union spfc_outmbox_generic)); - - port_switch.header.cmnd_type = SPFC_MBOX_PORT_SWITCH; - port_switch.header.length = SPFC_BYTES_TO_DW_NUM(sizeof(struct spfc_inmbox_port_switch)); - port_switch.op_code = (u8)turn_on; - - if (spfc_mb_send_and_wait_mbox(hba, &port_switch, sizeof(port_switch), - (union spfc_outmbox_generic *)((void *)port_switch_sts)) != - RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[warn]Port(0x%x) SPFC can't send and wait mailbox, command type(0x%x) opcode(0x%x)", - hba->port_cfg.port_id, - port_switch.header.cmnd_type, port_switch.op_code); - - goto exit; - } - - if (port_switch_sts->port_switch_sts.status != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_EQUIP_ATT, UNF_ERR, - "[err]Port(0x%x) receive mailbox type(0x%x) status(0x%x) error", - hba->port_cfg.port_id, - port_switch_sts->port_switch_sts.header.cmnd_type, - port_switch_sts->port_switch_sts.status); - - goto exit; - } - - if (port_switch_sts->port_switch_sts.header.cmnd_type != SPFC_MBOX_PORT_SWITCH_STS) { - FC_DRV_PRINT(UNF_LOG_EQUIP_ATT, UNF_ERR, - "[err]Port(0x%x) receive mailbox type(0x%x) error", - hba->port_cfg.port_id, - port_switch_sts->port_switch_sts.header.cmnd_type); - - goto exit; - } - - FC_DRV_PRINT(UNF_LOG_EQUIP_ATT, UNF_MAJOR, - "[event]Port(0x%x) switch succeed, turns to %s", - hba->port_cfg.port_id, (turn_on) ? "on" : "off"); - - ret = RETURN_OK; -exit: - kfree(port_switch_sts); - - return ret; -} - -u32 spfc_config_login_api(struct spfc_hba_info *hba, - struct unf_port_login_parms *login_parms) -{ -#define SPFC_LOOP_RDYNUM 8 - int iret = RETURN_OK; - u32 ret = UNF_RETURN_ERROR; - struct spfc_inmbox_config_login config_login; - union spfc_outmbox_generic *cfg_login_sts = NULL; - - FC_CHECK_RETURN_VALUE(hba, UNF_RETURN_ERROR); - - memset(&config_login, 0, sizeof(config_login)); - cfg_login_sts = kmalloc(sizeof(union spfc_outmbox_generic), GFP_ATOMIC); - if (!cfg_login_sts) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]malloc outmbox memory failed"); - return UNF_RETURN_ERROR; - } - memset(cfg_login_sts, 0, sizeof(union spfc_outmbox_generic)); - - config_login.header.cmnd_type = SPFC_MBOX_CONFIG_LOGIN_API; - config_login.header.length = SPFC_BYTES_TO_DW_NUM(sizeof(struct spfc_inmbox_config_login)); - config_login.header.port_id = hba->port_index; - - config_login.op_code = UNDEFINEOPCODE; - - config_login.tx_bb_credit = hba->remote_bb_credit; - - config_login.etov = hba->compared_edtov_val; - config_login.rtov = hba->compared_ratov_val; - - config_login.rt_tov_tag = hba->remote_rttov_tag; - config_login.ed_tov_tag = hba->remote_edtov_tag; - config_login.bb_credit = hba->remote_bb_credit; - config_login.bb_scn = SPFC_LSB(hba->compared_bb_scn); - - if (config_login.bb_scn) { - config_login.lr_flag = (login_parms->els_cmnd_code == ELS_PLOGI) ? 0 : 1; - ret = spfc_mb_send_and_wait_mbox(hba, &config_login, sizeof(config_login), - (union spfc_outmbox_generic *)cfg_login_sts); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Port(0x%x) SPFC can't send and wait mailbox, command type: 0x%x.", - hba->port_cfg.port_id, config_login.header.cmnd_type); - - goto exit; - } - - if (cfg_login_sts->config_login_sts.header.cmnd_type != - SPFC_MBOX_CONFIG_LOGIN_API_STS) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_INFO, - "Port(0x%x) Receive mailbox type incorrect. Type: 0x%x.", - hba->port_cfg.port_id, - cfg_login_sts->config_login_sts.header.cmnd_type); - - goto exit; - } - - if (cfg_login_sts->config_login_sts.status != STATUS_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "Port(0x%x) Receive mailbox type(0x%x) status incorrect. Status: 0x%x.", - hba->port_cfg.port_id, - cfg_login_sts->config_login_sts.header.cmnd_type, - cfg_login_sts->config_login_sts.status); - - goto exit; - } - } else { - iret = sphw_msg_to_mgmt_async(hba->dev_handle, COMM_MOD_FC, - SPFC_MBOX_CONFIG_LOGIN_API, &config_login, - sizeof(config_login), SPHW_CHANNEL_FC); - - if (iret != 0) { - SPFC_MAILBOX_STAT(hba, SPFC_SEND_CONFIG_LOGINAPI_FAIL); - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Port(0x%x) spfc can't send config login cmd to up,ret:%d.", - hba->port_cfg.port_id, iret); - - goto exit; - } - - SPFC_MAILBOX_STAT(hba, SPFC_SEND_CONFIG_LOGINAPI); - } - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "Port(0x%x) Topo(0x%x) Config login param to up: txbbcredit(0x%x), BB_SC_N(0x%x).", - hba->port_cfg.port_id, hba->active_topo, - config_login.tx_bb_credit, config_login.bb_scn); - - ret = RETURN_OK; -exit: - kfree(cfg_login_sts); - - return ret; -} - -u32 spfc_mb_send_and_wait_mbox(struct spfc_hba_info *hba, const void *in_mbox, - u16 in_size, - union spfc_outmbox_generic *out_mbox) -{ - void *handle = NULL; - u16 out_size = 0; - ulong time_out = 0; - int ret = 0; - struct spfc_mbox_header *header = NULL; - - FC_CHECK_RETURN_VALUE(hba, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(in_mbox, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(out_mbox, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(hba->dev_handle, UNF_RETURN_ERROR); - header = (struct spfc_mbox_header *)in_mbox; - out_size = sizeof(union spfc_outmbox_generic); - handle = hba->dev_handle; - header->port_id = (u8)sphw_global_func_id(handle); - - /* Wait for las mailbox completion: */ - time_out = wait_for_completion_timeout(&hba->mbox_complete, - (ulong)msecs_to_jiffies(SPFC_MBOX_TIME_SEC_MAX * - UNF_S_TO_MS)); - if (time_out == SPFC_ZERO) { - FC_DRV_PRINT(UNF_LOG_EQUIP_ATT, UNF_ERR, - "[err]Port(0x%x) wait mailbox(0x%x) completion timeout: %d sec", - hba->port_cfg.port_id, header->cmnd_type, - SPFC_MBOX_TIME_SEC_MAX); - - return UNF_RETURN_ERROR; - } - - /* Send Msg to uP Sync: timer 10s */ - ret = sphw_msg_to_mgmt_sync(handle, COMM_MOD_FC, header->cmnd_type, - (void *)in_mbox, in_size, - (union spfc_outmbox_generic *)out_mbox, - &out_size, (SPFC_MBX_MAX_TIMEOUT), - SPHW_CHANNEL_FC); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[warn]Port(0x%x) can not send mailbox(0x%x) with ret:%d", - hba->port_cfg.port_id, header->cmnd_type, ret); - - complete(&hba->mbox_complete); - return UNF_RETURN_ERROR; - } - - complete(&hba->mbox_complete); - - return RETURN_OK; -} - -void spfc_initial_dynamic_info(struct spfc_hba_info *fc_port) -{ - struct spfc_hba_info *hba = fc_port; - ulong flag = 0; - - FC_CHECK_RETURN_VOID(hba); - - spin_lock_irqsave(&hba->hba_lock, flag); - hba->active_port_speed = UNF_PORT_SPEED_UNKNOWN; - hba->active_topo = UNF_ACT_TOP_UNKNOWN; - hba->phy_link = UNF_PORT_LINK_DOWN; - hba->queue_set_stage = SPFC_QUEUE_SET_STAGE_INIT; - hba->loop_map_valid = LOOP_MAP_INVALID; - hba->srq_delay_info.srq_delay_flag = 0; - hba->srq_delay_info.root_rq_rcvd_flag = 0; - spin_unlock_irqrestore(&hba->hba_lock, flag); -} - -static u32 spfc_recv_fc_linkup(struct spfc_hba_info *hba, void *buf_in) -{ -#define SPFC_LOOP_MASK 0x1 -#define SPFC_LOOPMAP_COUNT 128 - - u32 ret = UNF_RETURN_ERROR; - struct spfc_link_event *link_event = NULL; - - link_event = (struct spfc_link_event *)buf_in; - hba->phy_link = UNF_PORT_LINK_UP; - hba->active_port_speed = link_event->speed; - hba->led_states.green_speed_led = (u8)(link_event->green_speed_led); - hba->led_states.yellow_speed_led = (u8)(link_event->yellow_speed_led); - hba->led_states.ac_led = (u8)(link_event->ac_led); - - if (link_event->top_type == SPFC_LOOP_MASK && - (link_event->loop_map_info[ARRAY_INDEX_1] == UNF_FL_PORT_LOOP_ADDR || - link_event->loop_map_info[ARRAY_INDEX_2] == UNF_FL_PORT_LOOP_ADDR)) { - hba->active_topo = UNF_ACT_TOP_PUBLIC_LOOP; /* Public Loop */ - hba->active_alpa = link_event->alpa_value; /* AL_PA */ - memcpy(hba->loop_map, link_event->loop_map_info, SPFC_LOOPMAP_COUNT); - hba->loop_map_valid = LOOP_MAP_VALID; - } else if (link_event->top_type == SPFC_LOOP_MASK) { - hba->active_topo = UNF_ACT_TOP_PRIVATE_LOOP; /* Private Loop */ - hba->active_alpa = link_event->alpa_value; /* AL_PA */ - memcpy(hba->loop_map, link_event->loop_map_info, SPFC_LOOPMAP_COUNT); - hba->loop_map_valid = LOOP_MAP_VALID; - } else { - hba->active_topo = UNF_TOP_P2P_MASK; /* P2P_D or P2P_F */ - } - - FC_DRV_PRINT(UNF_LOG_EVENT, UNF_KEVENT, - "[event]Port(0x%x) receive link up event(0x%x) with speed(0x%x) uP_topo(0x%x) driver_topo(0x%x)", - hba->port_cfg.port_id, link_event->link_event, - link_event->speed, link_event->top_type, hba->active_topo); - - /* Set clear & flush state */ - spfc_set_hba_clear_state(hba, false); - spfc_set_hba_flush_state(hba, false); - spfc_set_rport_flush_state(hba, false); - - /* Report link up event to COM */ - UNF_LOWLEVEL_PORT_EVENT(ret, hba->lport, UNF_PORT_LINK_UP, - &hba->active_port_speed); - - SPFC_LINK_EVENT_STAT(hba, SPFC_LINK_UP_COUNT); - - return ret; -} - -static u32 spfc_recv_fc_linkdown(struct spfc_hba_info *hba, void *buf_in) -{ - u32 ret = UNF_RETURN_ERROR; - struct spfc_link_event *link_event = NULL; - - link_event = (struct spfc_link_event *)buf_in; - - /* 1. Led state setting */ - hba->led_states.green_speed_led = (u8)(link_event->green_speed_led); - hba->led_states.yellow_speed_led = (u8)(link_event->yellow_speed_led); - hba->led_states.ac_led = (u8)(link_event->ac_led); - - FC_DRV_PRINT(UNF_LOG_EVENT, UNF_KEVENT, - "[event]Port(0x%x) receive link down event(0x%x) reason(0x%x)", - hba->port_cfg.port_id, link_event->link_event, link_event->reason); - - spfc_initial_dynamic_info(hba); - - /* 2. set HBA flush state */ - spfc_set_hba_flush_state(hba, true); - - /* 3. set R_Port (parent SQ) flush state */ - spfc_set_rport_flush_state(hba, true); - - /* 4. Report link down event to COM */ - UNF_LOWLEVEL_PORT_EVENT(ret, hba->lport, UNF_PORT_LINK_DOWN, 0); - - /* DFX setting */ - SPFC_LINK_REASON_STAT(hba, link_event->reason); - SPFC_LINK_EVENT_STAT(hba, SPFC_LINK_DOWN_COUNT); - - return ret; -} - -static u32 spfc_recv_fc_delcmd(struct spfc_hba_info *hba, void *buf_in) -{ - u32 ret = UNF_RETURN_ERROR; - struct spfc_link_event *link_event = NULL; - - link_event = (struct spfc_link_event *)buf_in; - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_KEVENT, - "[event]Port(0x%x) receive delete cmd event(0x%x)", - hba->port_cfg.port_id, link_event->link_event); - - /* Send buffer clear cmnd */ - ret = spfc_clear_fetched_sq_wqe(hba); - - hba->queue_set_stage = SPFC_QUEUE_SET_STAGE_SCANNING; - SPFC_LINK_EVENT_STAT(hba, SPFC_FC_DELETE_CMND_COUNT); - - return ret; -} - -static u32 spfc_recv_fc_error(struct spfc_hba_info *hba, void *buf_in) -{ -#define FC_ERR_LEVEL_DEAD 0 -#define FC_ERR_LEVEL_HIGH 1 -#define FC_ERR_LEVEL_LOW 2 - - u32 ret = UNF_RETURN_ERROR; - struct spfc_up_error_event *up_error_event = NULL; - - up_error_event = (struct spfc_up_error_event *)buf_in; - if (up_error_event->error_type >= SPFC_UP_ERR_BUTT || - up_error_event->error_value >= SPFC_ERR_VALUE_BUTT) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "Port(0x%x) receive a unsupported UP Error Event Type(0x%x) Value(0x%x).", - hba->port_cfg.port_id, up_error_event->error_type, - up_error_event->error_value); - return ret; - } - - switch (up_error_event->error_level) { - case FC_ERR_LEVEL_DEAD: - ret = RETURN_OK; - break; - - case FC_ERR_LEVEL_HIGH: - /* port reset */ - UNF_LOWLEVEL_PORT_EVENT(ret, hba->lport, - UNF_PORT_ABNORMAL_RESET, NULL); - break; - - case FC_ERR_LEVEL_LOW: - ret = RETURN_OK; - break; - - default: - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "Port(0x%x) receive a unsupported UP Error Event Level(0x%x), Can not Process.", - hba->port_cfg.port_id, - up_error_event->error_level); - return ret; - } - if (up_error_event->error_value < SPFC_ERR_VALUE_BUTT) - SPFC_UP_ERR_EVENT_STAT(hba, up_error_event->error_value); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_KEVENT, - "[event]Port(0x%x) process UP Error Event Level(0x%x) Type(0x%x) Value(0x%x) %s.", - hba->port_cfg.port_id, up_error_event->error_level, - up_error_event->error_type, up_error_event->error_value, - (ret == UNF_RETURN_ERROR) ? "ERROR" : "OK"); - - return ret; -} - -static struct spfc_up2drv_msg_handle up_msg_handle[] = { - {SPFC_MBOX_RECV_FC_LINKUP, spfc_recv_fc_linkup}, - {SPFC_MBOX_RECV_FC_LINKDOWN, spfc_recv_fc_linkdown}, - {SPFC_MBOX_RECV_FC_DELCMD, spfc_recv_fc_delcmd}, - {SPFC_MBOX_RECV_FC_ERROR, spfc_recv_fc_error} -}; - -void spfc_up_msg2driver_proc(void *hwdev_handle, void *pri_handle, u16 cmd, - void *buf_in, u16 in_size, void *buf_out, - u16 *out_size) -{ - u32 ret = UNF_RETURN_ERROR; - u32 index = 0; - struct spfc_hba_info *hba = NULL; - struct spfc_mbox_header *mbx_header = NULL; - - FC_CHECK_RETURN_VOID(hwdev_handle); - FC_CHECK_RETURN_VOID(pri_handle); - FC_CHECK_RETURN_VOID(buf_in); - FC_CHECK_RETURN_VOID(buf_out); - FC_CHECK_RETURN_VOID(out_size); - - hba = (struct spfc_hba_info *)pri_handle; - if (!hba) { - FC_DRV_PRINT(UNF_LOG_EVENT, UNF_ERR, "[err]Hba is null"); - return; - } - - mbx_header = (struct spfc_mbox_header *)buf_in; - if (mbx_header->cmnd_type != cmd) { - *out_size = sizeof(struct spfc_link_event); - FC_DRV_PRINT(UNF_LOG_EVENT, UNF_ERR, - "[err]Port(0x%x) cmd(0x%x) is not matched with header cmd type(0x%x)", - hba->port_cfg.port_id, cmd, mbx_header->cmnd_type); - return; - } - - while (index < (sizeof(up_msg_handle) / sizeof(struct spfc_up2drv_msg_handle))) { - if (up_msg_handle[index].cmd == cmd && - up_msg_handle[index].spfc_msg_up2driver_handler) { - ret = up_msg_handle[index].spfc_msg_up2driver_handler(hba, buf_in); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_EVENT, UNF_ERR, - "[warn]Port(0x%x) process up cmd(0x%x) failed", - hba->port_cfg.port_id, cmd); - } - *out_size = sizeof(struct spfc_link_event); - return; - } - index++; - } - - *out_size = sizeof(struct spfc_link_event); - - FC_DRV_PRINT(UNF_LOG_EVENT, UNF_ERR, - "[err]Port(0x%x) process up cmd(0x%x) failed", - hba->port_cfg.port_id, cmd); -} - -u32 spfc_get_topo_act(void *hba, void *topo_act) -{ - struct spfc_hba_info *spfc_hba = hba; - enum unf_act_topo *pen_topo_act = topo_act; - - FC_CHECK_RETURN_VALUE(hba, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(topo_act, UNF_RETURN_ERROR); - - /* Get topo from low_level */ - *pen_topo_act = spfc_hba->active_topo; - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[info]Get active topology: 0x%x", *pen_topo_act); - - return RETURN_OK; -} - -u32 spfc_get_loop_alpa(void *hba, void *alpa) -{ - ulong flags = 0; - struct spfc_hba_info *spfc_hba = hba; - u8 *alpa_temp = alpa; - - FC_CHECK_RETURN_VALUE(spfc_hba, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(alpa, UNF_RETURN_ERROR); - - spin_lock_irqsave(&spfc_hba->hba_lock, flags); - *alpa_temp = spfc_hba->active_alpa; - spin_unlock_irqrestore(&spfc_hba->hba_lock, flags); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_INFO, - "[info]Get active AL_PA(0x%x)", *alpa_temp); - - return RETURN_OK; -} - -static void spfc_get_fabric_login_params(struct spfc_hba_info *hba, - struct unf_port_login_parms *params_addr) -{ - ulong flag = 0; - - spin_lock_irqsave(&hba->hba_lock, flag); - hba->active_topo = params_addr->act_topo; - hba->compared_ratov_val = params_addr->compared_ratov_val; - hba->compared_edtov_val = params_addr->compared_edtov_val; - hba->compared_bb_scn = params_addr->compared_bbscn; - hba->remote_edtov_tag = params_addr->remote_edtov_tag; - hba->remote_rttov_tag = params_addr->remote_rttov_tag; - hba->remote_bb_credit = params_addr->remote_bb_credit; - spin_unlock_irqrestore(&hba->hba_lock, flag); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "[info]Port(0x%x) topo(0x%x) get fabric params: R_A_TOV(0x%x) E_D_TOV(%u) BB_CREDIT(0x%x) BB_SC_N(0x%x)", - hba->port_cfg.port_id, hba->active_topo, - hba->compared_ratov_val, hba->compared_edtov_val, - hba->remote_bb_credit, hba->compared_bb_scn); -} - -static void spfc_get_port_login_params(struct spfc_hba_info *hba, - struct unf_port_login_parms *params_addr) -{ - ulong flag = 0; - - spin_lock_irqsave(&hba->hba_lock, flag); - hba->compared_ratov_val = params_addr->compared_ratov_val; - hba->compared_edtov_val = params_addr->compared_edtov_val; - hba->compared_bb_scn = params_addr->compared_bbscn; - hba->remote_edtov_tag = params_addr->remote_edtov_tag; - hba->remote_rttov_tag = params_addr->remote_rttov_tag; - hba->remote_bb_credit = params_addr->remote_bb_credit; - spin_unlock_irqrestore(&hba->hba_lock, flag); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "Port(0x%x) Topo(0x%x) Get Port Params: R_A_TOV(0x%x), E_D_TOV(0x%x), BB_CREDIT(0x%x), BB_SC_N(0x%x).", - hba->port_cfg.port_id, hba->active_topo, - hba->compared_ratov_val, hba->compared_edtov_val, - hba->remote_bb_credit, hba->compared_bb_scn); -} - -u32 spfc_update_fabric_param(void *hba, void *para_in) -{ - u32 ret = RETURN_OK; - struct spfc_hba_info *spfc_hba = hba; - struct unf_port_login_parms *login_coparms = para_in; - - FC_CHECK_RETURN_VALUE(spfc_hba, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(para_in, UNF_RETURN_ERROR); - - spfc_get_fabric_login_params(spfc_hba, login_coparms); - - if (spfc_hba->active_topo == UNF_ACT_TOP_P2P_FABRIC || - spfc_hba->active_topo == UNF_ACT_TOP_PUBLIC_LOOP) { - if (spfc_hba->work_mode == SPFC_SMARTIO_WORK_MODE_FC) - ret = spfc_config_login_api(spfc_hba, login_coparms); - } - - return ret; -} - -u32 spfc_update_port_param(void *hba, void *para_in) -{ - u32 ret = RETURN_OK; - struct spfc_hba_info *spfc_hba = hba; - struct unf_port_login_parms *login_coparms = - (struct unf_port_login_parms *)para_in; - - FC_CHECK_RETURN_VALUE(spfc_hba, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(para_in, UNF_RETURN_ERROR); - - if (spfc_hba->active_topo == UNF_ACT_TOP_PRIVATE_LOOP || - spfc_hba->active_topo == UNF_ACT_TOP_P2P_DIRECT) { - spfc_get_port_login_params(spfc_hba, login_coparms); - ret = spfc_config_login_api(spfc_hba, login_coparms); - } - - spfc_save_login_parms_in_sq_info(spfc_hba, login_coparms); - - return ret; -} - -u32 spfc_get_workable_bb_credit(void *hba, void *bb_credit) -{ - u32 *bb_credit_temp = (u32 *)bb_credit; - struct spfc_hba_info *spfc_hba = hba; - - FC_CHECK_RETURN_VALUE(spfc_hba, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(bb_credit, UNF_RETURN_ERROR); - if (spfc_hba->active_port_speed == UNF_PORT_SPEED_32_G) - *bb_credit_temp = SPFC_LOWLEVEL_DEFAULT_32G_BB_CREDIT; - else if (spfc_hba->active_port_speed == UNF_PORT_SPEED_16_G) - *bb_credit_temp = SPFC_LOWLEVEL_DEFAULT_16G_BB_CREDIT; - else - *bb_credit_temp = SPFC_LOWLEVEL_DEFAULT_8G_BB_CREDIT; - - return RETURN_OK; -} - -u32 spfc_get_workable_bb_scn(void *hba, void *bb_scn) -{ - u32 *bb_scn_temp = (u32 *)bb_scn; - struct spfc_hba_info *spfc_hba = (struct spfc_hba_info *)hba; - - FC_CHECK_RETURN_VALUE(hba, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(bb_scn, UNF_RETURN_ERROR); - - *bb_scn_temp = spfc_hba->port_bb_scn_cfg; - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_INFO, - "Return BBSCN(0x%x) to CM", *bb_scn_temp); - - return RETURN_OK; -} - -u32 spfc_get_loop_map(void *hba, void *buf) -{ - ulong flags = 0; - struct unf_buf *buf_temp = (struct unf_buf *)buf; - struct spfc_hba_info *spfc_hba = hba; - - FC_CHECK_RETURN_VALUE(spfc_hba, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(buf_temp, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(buf_temp->buf, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(buf_temp->buf_len, UNF_RETURN_ERROR); - - if (buf_temp->buf_len > UNF_LOOPMAP_COUNT) - return UNF_RETURN_ERROR; - - spin_lock_irqsave(&spfc_hba->hba_lock, flags); - if (spfc_hba->loop_map_valid != LOOP_MAP_VALID) { - spin_unlock_irqrestore(&spfc_hba->hba_lock, flags); - return UNF_RETURN_ERROR; - } - memcpy(buf_temp->buf, spfc_hba->loop_map, buf_temp->buf_len); - spin_unlock_irqrestore(&spfc_hba->hba_lock, flags); - - return RETURN_OK; -} - -u32 spfc_mb_reset_chip(struct spfc_hba_info *hba, u8 sub_type) -{ - struct spfc_inmbox_port_reset port_reset; - union spfc_outmbox_generic *port_reset_sts = NULL; - u32 ret = UNF_RETURN_ERROR; - - FC_CHECK_RETURN_VALUE(hba, UNF_RETURN_ERROR); - - memset(&port_reset, 0, sizeof(port_reset)); - - port_reset_sts = kmalloc(sizeof(union spfc_outmbox_generic), GFP_ATOMIC); - if (!port_reset_sts) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "malloc outmbox memory failed"); - return UNF_RETURN_ERROR; - } - memset(port_reset_sts, 0, sizeof(union spfc_outmbox_generic)); - port_reset.header.cmnd_type = SPFC_MBOX_PORT_RESET; - port_reset.header.length = SPFC_BYTES_TO_DW_NUM(sizeof(struct spfc_inmbox_port_reset)); - port_reset.op_code = sub_type; - - if (spfc_mb_send_and_wait_mbox(hba, &port_reset, sizeof(port_reset), - port_reset_sts) != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[warn]Port(0x%x) can't send and wait mailbox with command type(0x%x)", - hba->port_cfg.port_id, port_reset.header.cmnd_type); - - goto exit; - } - - if (port_reset_sts->port_reset_sts.status != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_EQUIP_ATT, UNF_ERR, - "[warn]Port(0x%x) receive mailbox type(0x%x) status(0x%x) incorrect", - hba->port_cfg.port_id, - port_reset_sts->port_reset_sts.header.cmnd_type, - port_reset_sts->port_reset_sts.status); - - goto exit; - } - - if (port_reset_sts->port_reset_sts.header.cmnd_type != SPFC_MBOX_PORT_RESET_STS) { - FC_DRV_PRINT(UNF_LOG_EQUIP_ATT, UNF_ERR, - "[warn]Port(0x%x) recv mailbox type(0x%x) incorrect", - hba->port_cfg.port_id, - port_reset_sts->port_reset_sts.header.cmnd_type); - - goto exit; - } - - FC_DRV_PRINT(UNF_LOG_EQUIP_ATT, UNF_MAJOR, - "[info]Port(0x%x) reset chip mailbox success", - hba->port_cfg.port_id); - - ret = RETURN_OK; -exit: - kfree(port_reset_sts); - - return ret; -} - -u32 spfc_clear_sq_wqe_done(struct spfc_hba_info *hba) -{ - int ret1 = RETURN_OK; - u32 ret2 = RETURN_OK; - struct spfc_inmbox_clear_done clear_done; - - clear_done.header.cmnd_type = SPFC_MBOX_BUFFER_CLEAR_DONE; - clear_done.header.length = SPFC_BYTES_TO_DW_NUM(sizeof(struct spfc_inmbox_clear_done)); - clear_done.header.port_id = hba->port_index; - - ret1 = sphw_msg_to_mgmt_async(hba->dev_handle, COMM_MOD_FC, - SPFC_MBOX_BUFFER_CLEAR_DONE, &clear_done, - sizeof(clear_done), SPHW_CHANNEL_FC); - - if (ret1 != 0) { - SPFC_MAILBOX_STAT(hba, SPFC_SEND_CLEAR_DONE_FAIL); - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]SPFC Port(0x%x) can't send clear done cmd to up, ret:%d", - hba->port_cfg.port_id, ret1); - - return UNF_RETURN_ERROR; - } - - SPFC_MAILBOX_STAT(hba, SPFC_SEND_CLEAR_DONE); - hba->queue_set_stage = SPFC_QUEUE_SET_STAGE_FLUSHDONE; - hba->next_clear_sq = 0; - - FC_DRV_PRINT(UNF_LOG_EVENT, UNF_KEVENT, - "[info]Port(0x%x) clear done msg(0x%x) sent to up succeed with stage(0x%x)", - hba->port_cfg.port_id, clear_done.header.cmnd_type, - hba->queue_set_stage); - - return ret2; -} - -u32 spfc_mbx_get_fw_clear_stat(struct spfc_hba_info *hba, u32 *clear_state) -{ - struct spfc_inmbox_get_clear_state get_clr_state; - union spfc_outmbox_generic *port_clear_state_sts = NULL; - u32 ret = UNF_RETURN_ERROR; - - FC_CHECK_RETURN_VALUE(hba, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(clear_state, UNF_RETURN_ERROR); - - memset(&get_clr_state, 0, sizeof(get_clr_state)); - - port_clear_state_sts = kmalloc(sizeof(union spfc_outmbox_generic), GFP_ATOMIC); - if (!port_clear_state_sts) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "malloc outmbox memory failed"); - return UNF_RETURN_ERROR; - } - memset(port_clear_state_sts, 0, sizeof(union spfc_outmbox_generic)); - - get_clr_state.header.cmnd_type = SPFC_MBOX_GET_CLEAR_STATE; - get_clr_state.header.length = - SPFC_BYTES_TO_DW_NUM(sizeof(struct spfc_inmbox_get_clear_state)); - - if (spfc_mb_send_and_wait_mbox(hba, &get_clr_state, sizeof(get_clr_state), - port_clear_state_sts) != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "spfc can't send and wait mailbox, command type: 0x%x", - get_clr_state.header.cmnd_type); - - goto exit; - } - - if (port_clear_state_sts->get_clr_state_sts.status != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_EQUIP_ATT, UNF_ERR, - "Port(0x%x) Receive mailbox type(0x%x) status incorrect. Status: 0x%x, state 0x%x.", - hba->port_cfg.port_id, - port_clear_state_sts->get_clr_state_sts.header.cmnd_type, - port_clear_state_sts->get_clr_state_sts.status, - port_clear_state_sts->get_clr_state_sts.state); - - goto exit; - } - - if (port_clear_state_sts->get_clr_state_sts.header.cmnd_type != - SPFC_MBOX_GET_CLEAR_STATE_STS) { - FC_DRV_PRINT(UNF_LOG_EQUIP_ATT, UNF_ERR, - "Port(0x%x) recv mailbox type(0x%x) incorrect.", - hba->port_cfg.port_id, - port_clear_state_sts->get_clr_state_sts.header.cmnd_type); - - goto exit; - } - - FC_DRV_PRINT(UNF_LOG_EVENT, UNF_MAJOR, - "Port(0x%x) get port clear state 0x%x.", - hba->port_cfg.port_id, - port_clear_state_sts->get_clr_state_sts.state); - - *clear_state = port_clear_state_sts->get_clr_state_sts.state; - - ret = RETURN_OK; -exit: - kfree(port_clear_state_sts); - - return ret; -} - -u32 spfc_mbx_config_default_session(void *hba, u32 flag) -{ - struct spfc_hba_info *spfc_hba = NULL; - struct spfc_inmbox_default_sq_info default_sq_info; - union spfc_outmbox_generic default_sq_info_sts; - u32 ret = UNF_RETURN_ERROR; - - FC_CHECK_RETURN_VALUE(hba, UNF_RETURN_ERROR); - - spfc_hba = (struct spfc_hba_info *)hba; - - memset(&default_sq_info, 0, sizeof(struct spfc_inmbox_default_sq_info)); - memset(&default_sq_info_sts, 0, sizeof(union spfc_outmbox_generic)); - - default_sq_info.header.cmnd_type = SPFC_MBOX_SEND_DEFAULT_SQ_INFO; - default_sq_info.header.length = - SPFC_BYTES_TO_DW_NUM(sizeof(struct spfc_inmbox_default_sq_info)); - default_sq_info.func_id = sphw_global_func_id(spfc_hba->dev_handle); - - /* When flag is 1, set default SQ info when probe, when 0, clear when - * remove - */ - if (flag) { - default_sq_info.sq_cid = spfc_hba->default_sq_info.sq_cid; - default_sq_info.sq_xid = spfc_hba->default_sq_info.sq_xid; - default_sq_info.valid = 1; - } - - ret = - spfc_mb_send_and_wait_mbox(spfc_hba, &default_sq_info, sizeof(default_sq_info), - (union spfc_outmbox_generic *)(void *)&default_sq_info_sts); - - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "spfc can't send and wait mailbox, command type: 0x%x.", - default_sq_info.header.cmnd_type); - - return UNF_RETURN_ERROR; - } - - if (default_sq_info_sts.default_sq_sts.status != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "Port(0x%x) mailbox status incorrect status(0x%x) .", - spfc_hba->port_cfg.port_id, - default_sq_info_sts.default_sq_sts.status); - - return UNF_RETURN_ERROR; - } - - if (SPFC_MBOX_SEND_DEFAULT_SQ_INFO_STS != - default_sq_info_sts.default_sq_sts.header.cmnd_type) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "Port(0x%x) receive mailbox type incorrect type: 0x%x.", - spfc_hba->port_cfg.port_id, - default_sq_info_sts.default_sq_sts.header.cmnd_type); - - return UNF_RETURN_ERROR; - } - - return RETURN_OK; -} diff --git a/drivers/scsi/spfc/hw/spfc_chipitf.h b/drivers/scsi/spfc/hw/spfc_chipitf.h deleted file mode 100644 index acd770514edffcacb2711d9277d966cf10a1a58c..0000000000000000000000000000000000000000 --- a/drivers/scsi/spfc/hw/spfc_chipitf.h +++ /dev/null @@ -1,797 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPFC_CHIPITF_H -#define SPFC_CHIPITF_H - -#include "unf_type.h" -#include "unf_log.h" -#include "spfc_utils.h" -#include "spfc_module.h" - -#include "spfc_service.h" - -/* CONF_API_CMND */ -#define SPFC_MBOX_CONFIG_API (0x00) -#define SPFC_MBOX_CONFIG_API_STS (0xA0) - -/* GET_CHIP_INFO_API_CMD */ -#define SPFC_MBOX_GET_CHIP_INFO (0x01) -#define SPFC_MBOX_GET_CHIP_INFO_STS (0xA1) - -/* PORT_RESET */ -#define SPFC_MBOX_PORT_RESET (0x02) -#define SPFC_MBOX_PORT_RESET_STS (0xA2) - -/* SFP_SWITCH_API_CMND */ -#define SPFC_MBOX_PORT_SWITCH (0x03) -#define SPFC_MBOX_PORT_SWITCH_STS (0xA3) - -/* CONF_AF_LOGIN_API_CMND */ -#define SPFC_MBOX_CONFIG_LOGIN_API (0x06) -#define SPFC_MBOX_CONFIG_LOGIN_API_STS (0xA6) - -/* BUFFER_CLEAR_DONE_CMND */ -#define SPFC_MBOX_BUFFER_CLEAR_DONE (0x07) -#define SPFC_MBOX_BUFFER_CLEAR_DONE_STS (0xA7) - -#define SPFC_MBOX_GET_UP_STATE (0x09) -#define SPFC_MBOX_GET_UP_STATE_STS (0xA9) - -/* GET CLEAR DONE STATE */ -#define SPFC_MBOX_GET_CLEAR_STATE (0x0E) -#define SPFC_MBOX_GET_CLEAR_STATE_STS (0xAE) - -/* CONFIG TIMER */ -#define SPFC_MBOX_CONFIG_TIMER (0x10) -#define SPFC_MBOX_CONFIG_TIMER_STS (0xB0) - -/* Led Test */ -#define SPFC_MBOX_LED_TEST (0x12) -#define SPFC_MBOX_LED_TEST_STS (0xB2) - -/* set esch */ -#define SPFC_MBOX_SET_ESCH (0x13) -#define SPFC_MBOX_SET_ESCH_STS (0xB3) - -/* set get tx serdes */ -#define SPFC_MBOX_SET_GET_SERDES_TX (0x14) -#define SPFC_MBOX_SET_GET_SERDES_TX_STS (0xB4) - -/* get rx serdes */ -#define SPFC_MBOX_GET_SERDES_RX (0x15) -#define SPFC_MBOX_GET_SERDES_RX_STS (0xB5) - -/* i2c read write */ -#define SPFC_MBOX_I2C_WR_RD (0x16) -#define SPFC_MBOX_I2C_WR_RD_STS (0xB6) - -/* GET UCODE STATS CMD */ -#define SPFC_MBOX_GET_UCODE_STAT (0x18) -#define SPFC_MBOX_GET_UCODE_STAT_STS (0xB8) - -/* gpio read write */ -#define SPFC_MBOX_GPIO_WR_RD (0x19) -#define SPFC_MBOX_GPIO_WR_RD_STS (0xB9) - -#define SPFC_MBOX_SEND_DEFAULT_SQ_INFO (0x26) -#define SPFC_MBOX_SEND_DEFAULT_SQ_INFO_STS (0xc6) - -/* FC: DRV->UP */ -#define SPFC_MBOX_SEND_ELS_CMD (0x2A) -#define SPFC_MBOX_SEND_VPORT_INFO (0x2B) - -/* FC: UP->DRV */ -#define SPFC_MBOX_RECV_FC_LINKUP (0x40) -#define SPFC_MBOX_RECV_FC_LINKDOWN (0x41) -#define SPFC_MBOX_RECV_FC_DELCMD (0x42) -#define SPFC_MBOX_RECV_FC_ERROR (0x43) - -#define LOOP_MAP_VALID (1) -#define LOOP_MAP_INVALID (0) - -#define SPFC_MBOX_SIZE (1024) -#define SPFC_MBOX_HEADER_SIZE (4) - -#define UNDEFINEOPCODE (0) - -#define VALUEMASK_L 0x00000000FFFFFFFF -#define VALUEMASK_H 0xFFFFFFFF00000000 - -#define STATUS_OK (0) -#define STATUS_FAIL (1) - -enum spfc_drv2up_unblock_msg_cmd_code { - SPFC_SEND_ELS_CMD, - SPFC_SEND_ELS_CMD_FAIL, - SPFC_RCV_ELS_CMD_RSP, - SPFC_SEND_CONFIG_LOGINAPI, - SPFC_SEND_CONFIG_LOGINAPI_FAIL, - SPFC_RCV_CONFIG_LOGIN_API_RSP, - SPFC_SEND_CLEAR_DONE, - SPFC_SEND_CLEAR_DONE_FAIL, - SPFC_RCV_CLEAR_DONE_RSP, - SPFC_SEND_VPORT_INFO_DONE, - SPFC_SEND_VPORT_INFO_FAIL, - SPFC_SEND_VPORT_INFO_RSP, - SPFC_MBOX_CMD_BUTT -}; - -/* up to dirver cmd code */ -enum spfc_up2drv_msg_cmd_code { - SPFC_UP2DRV_MSG_CMD_LINKUP = 0x1, - SPFC_UP2DRV_MSG_CMD_LINKDOWN = 0x2, - SPFC_UP2DRV_MSG_CMD_BUTT -}; - -/* up to driver handle templete */ -struct spfc_up2drv_msg_handle { - u8 cmd; - u32 (*spfc_msg_up2driver_handler)(struct spfc_hba_info *hba, void *buf_in); -}; - -/* tile to driver cmd code */ -enum spfc_tile2drv_msg_cmd_code { - SPFC_TILE2DRV_MSG_CMD_SCAN_DONE, - SPFC_TILE2DRV_MSG_CMD_FLUSH_DONE, - SPFC_TILE2DRV_MSG_CMD_BUTT -}; - -/* tile to driver handle templete */ -struct spfc_tile2drv_msg_handle { - u8 cmd; - u32 (*spfc_msg_tile2driver_handler)(struct spfc_hba_info *hba, u8 cmd, u64 val); -}; - -/* Mbox Common Header */ -struct spfc_mbox_header { - u8 cmnd_type; - u8 length; - u8 port_id; - u8 reserved; -}; - -/* open or close the sfp */ -struct spfc_inmbox_port_switch { - struct spfc_mbox_header header; - u32 op_code : 8; - u32 rsvd0 : 24; - u32 rsvd1[6]; -}; - -struct spfc_inmbox_send_vport_info { - struct spfc_mbox_header header; - - u64 sys_port_wwn; - u64 sys_node_name; - - u32 nport_id : 24; - u32 vpi : 8; -}; - -struct spfc_outmbox_port_switch_sts { - struct spfc_mbox_header header; - - u16 reserved1; - u8 reserved2; - u8 status; -}; - -/* config API */ -struct spfc_inmbox_config_api { - struct spfc_mbox_header header; - - u32 op_code : 8; - u32 reserved1 : 24; - - u8 topy_mode; - u8 sfp_speed; - u8 max_speed; - u8 hard_alpa; - - u8 port_name[UNF_WWN_LEN]; - - u32 slave : 1; - u32 auto_sneg : 1; - u32 reserved2 : 30; - - u32 rx_6432g_bb_credit : 16; /* 160 */ - u32 rx_16g_bb_credit : 16; /* 80 */ - u32 rx_84g_bb_credit : 16; /* 50 */ - u32 rdy_cnt_bf_fst_frm : 16; /* 8 */ - - u32 esch_32g_value; - u32 esch_16g_value; - u32 esch_8g_value; - u32 esch_4g_value; - u32 esch_64g_value; - u32 esch_bust_size; -}; - -struct spfc_outmbox_config_api_sts { - struct spfc_mbox_header header; - u16 reserved1; - u8 reserved2; - u8 status; -}; - -/* Get chip info */ -struct spfc_inmbox_get_chip_info { - struct spfc_mbox_header header; -}; - -struct spfc_outmbox_get_chip_info_sts { - struct spfc_mbox_header header; - u8 status; - u8 board_type; - u8 rvsd0[2]; - u64 wwpn; - u64 wwnn; - u64 rsvd1; -}; - -/* Get reg info */ -struct spfc_inmbox_get_reg_info { - struct spfc_mbox_header header; - u32 op_code : 1; - u32 reg_len : 8; - u32 rsvd1 : 23; - u32 reg_addr; - u32 reg_value_l32; - u32 reg_value_h32; - u32 rsvd2[27]; -}; - -/* Get reg info sts */ -struct spfc_outmbox_get_reg_info_sts { - struct spfc_mbox_header header; - - u16 rsvd0; - u8 rsvd1; - u8 status; - u32 reg_value_l32; - u32 reg_value_h32; - u32 rsvd2[28]; -}; - -/* Config login API */ -struct spfc_inmbox_config_login { - struct spfc_mbox_header header; - - u32 op_code : 8; - u32 reserved1 : 24; - - u16 tx_bb_credit; - u16 reserved2; - - u32 rtov; - u32 etov; - - u32 rt_tov_tag : 1; - u32 ed_tov_tag : 1; - u32 bb_credit : 6; - u32 bb_scn : 8; - u32 lr_flag : 16; -}; - -struct spfc_outmbox_config_login_sts { - struct spfc_mbox_header header; - - u16 reserved1; - u8 reserved2; - u8 status; -}; - -/* port reset */ -#define SPFC_MBOX_SUBTYPE_LIGHT_RESET (0x0) -#define SPFC_MBOX_SUBTYPE_HEAVY_RESET (0x1) - -struct spfc_inmbox_port_reset { - struct spfc_mbox_header header; - - u32 op_code : 8; - u32 reserved : 24; -}; - -struct spfc_outmbox_port_reset_sts { - struct spfc_mbox_header header; - - u16 reserved1; - u8 reserved2; - u8 status; -}; - -/* led test */ -struct spfc_inmbox_led_test { - struct spfc_mbox_header header; - - /* 0->act type;1->low speed;1->high speed */ - u8 led_type; - /* 0:twinkle;1:light on;2:light off;0xff:defalut */ - u8 led_mode; - u8 resvd[ARRAY_INDEX_2]; -}; - -struct spfc_outmbox_led_test_sts { - struct spfc_mbox_header header; - - u16 rsvd1; - u8 rsvd2; - u8 status; -}; - -/* set esch */ -struct spfc_inmbox_set_esch { - struct spfc_mbox_header header; - - u32 esch_value; - u32 esch_bust_size; -}; - -struct spfc_outmbox_set_esch_sts { - struct spfc_mbox_header header; - - u16 rsvd1; - u8 rsvd2; - u8 status; -}; - -struct spfc_inmbox_set_serdes_tx { - struct spfc_mbox_header header; - - u8 swing; /* amplitude setting */ - char serdes_pre1; /* pre1 setting */ - char serdes_pre2; /* pre2 setting */ - char serdes_post; /* post setting */ - u8 serdes_main; /* main setting */ - u8 op_code; /* opcode,0:setting;1:read */ - u8 rsvd[ARRAY_INDEX_2]; -}; - -struct spfc_outmbox_set_serdes_tx_sts { - struct spfc_mbox_header header; - u16 rvsd0; - u8 rvsd1; - u8 status; - u8 swing; - char serdes_pre1; - char serdes_pre2; - char serdes_post; - u8 serdes_main; - u8 rsvd2[ARRAY_INDEX_3]; -}; - -struct spfc_inmbox_i2c_wr_rd { - struct spfc_mbox_header header; - u8 op_code; /* 0 write, 1 read */ - u8 rsvd[ARRAY_INDEX_3]; - - u32 dev_addr; - u32 offset; - u32 wr_data; -}; - -struct spfc_outmbox_i2c_wr_rd_sts { - struct spfc_mbox_header header; - u8 status; - u8 resvd[ARRAY_INDEX_3]; - - u32 rd_data; -}; - -struct spfc_inmbox_gpio_wr_rd { - struct spfc_mbox_header header; - u8 op_code; /* 0 write,1 read */ - u8 rsvd[ARRAY_INDEX_3]; - - u32 pin; - u32 wr_data; -}; - -struct spfc_outmbox_gpio_wr_rd_sts { - struct spfc_mbox_header header; - u8 status; - u8 resvd[ARRAY_INDEX_3]; - - u32 rd_data; -}; - -struct spfc_inmbox_get_serdes_rx { - struct spfc_mbox_header header; - - u8 op_code; - u8 h16_macro; - u8 h16_lane; - u8 rsvd; -}; - -struct spfc_inmbox_get_serdes_rx_sts { - struct spfc_mbox_header header; - u16 rvsd0; - u8 rvsd1; - u8 status; - int left_eye; - int right_eye; - int low_eye; - int high_eye; -}; - -struct spfc_ser_op_m_l { - u8 op_code; - u8 h16_macro; - u8 h16_lane; - u8 rsvd; -}; - -/* get sfp info */ -#define SPFC_MBOX_GET_SFP_INFO_MB_LENGTH 1 -#define OFFSET_TWO_DWORD 2 -#define OFFSET_ONE_DWORD 1 - -struct spfc_inmbox_get_sfp_info { - struct spfc_mbox_header header; -}; - -struct spfc_outmbox_get_sfp_info_sts { - struct spfc_mbox_header header; - - u32 rcvd : 8; - u32 length : 16; - u32 status : 8; -}; - -/* get ucode stats */ -#define SPFC_UCODE_STAT_NUM 64 - -struct spfc_outmbox_get_ucode_stat { - struct spfc_mbox_header header; -}; - -struct spfc_outmbox_get_ucode_stat_sts { - struct spfc_mbox_header header; - - u16 rsvd; - u8 rsvd2; - u8 status; - - u32 ucode_stat[SPFC_UCODE_STAT_NUM]; -}; - -/* uP-->Driver asyn event API */ -struct spfc_link_event { - struct spfc_mbox_header header; - - u8 link_event; - u8 reason; - u8 speed; - u8 top_type; - - u8 alpa_value; - u8 reserved1; - u16 paticpate : 1; - u16 ac_led : 1; - u16 yellow_speed_led : 1; - u16 green_speed_led : 1; - u16 reserved2 : 12; - - u8 loop_map_info[128]; -}; - -enum spfc_up_err_type { - SPFC_UP_ERR_DRV_PARA = 0, - SPFC_UP_ERR_SFP = 1, - SPFC_UP_ERR_32G_PUB = 2, - SPFC_UP_ERR_32G_UA = 3, - SPFC_UP_ERR_32G_MAC = 4, - SPFC_UP_ERR_NON32G_DFX = 5, - SPFC_UP_ERR_NON32G_MAC = 6, - SPFC_UP_ERR_BUTT - -}; - -enum spfc_up_err_value { - /* ERR type 0 */ - SPFC_DRV_2_UP_PARA_ERR = 0, - - /* ERR type 1 */ - SPFC_SFP_SPEED_ERR, - - /* ERR type 2 */ - SPFC_32GPUB_UA_RXESCH_FIFO_OF, - SPFC_32GPUB_UA_RXESCH_FIFO_UCERR, - - /* ERR type 3 */ - SPFC_32G_UA_UATX_LEN_ABN, - SPFC_32G_UA_RXAFIFO_OF, - SPFC_32G_UA_TXAFIFO_OF, - SPFC_32G_UA_RXAFIFO_UCERR, - SPFC_32G_UA_TXAFIFO_UCERR, - - /* ERR type 4 */ - SPFC_32G_MAC_RX_BBC_FATAL, - SPFC_32G_MAC_TX_BBC_FATAL, - SPFC_32G_MAC_TXFIFO_UF, - SPFC_32G_MAC_PCS_TXFIFO_UF, - SPFC_32G_MAC_RXBBC_CRDT_TO, - SPFC_32G_MAC_PCS_RXAFIFO_OF, - SPFC_32G_MAC_PCS_TXFIFO_OF, - SPFC_32G_MAC_FC2P_RXFIFO_OF, - SPFC_32G_MAC_FC2P_TXFIFO_OF, - SPFC_32G_MAC_FC2P_CAFIFO_OF, - SPFC_32G_MAC_PCS_RXRSFECM_UCEER, - SPFC_32G_MAC_PCS_RXAFIFO_UCEER, - SPFC_32G_MAC_PCS_TXFIFO_UCEER, - SPFC_32G_MAC_FC2P_RXFIFO_UCEER, - SPFC_32G_MAC_FC2P_TXFIFO_UCEER, - - /* ERR type 5 */ - SPFC_NON32G_DFX_FC1_DFX_BF_FIFO, - SPFC_NON32G_DFX_FC1_DFX_BP_FIFO, - SPFC_NON32G_DFX_FC1_DFX_RX_AFIFO_ERR, - SPFC_NON32G_DFX_FC1_DFX_TX_AFIFO_ERR, - SPFC_NON32G_DFX_FC1_DFX_DIRQ_RXBUF_FIFO1, - SPFC_NON32G_DFX_FC1_DFX_DIRQ_RXBBC_TO, - SPFC_NON32G_DFX_FC1_DFX_DIRQ_TXDAT_FIFO, - SPFC_NON32G_DFX_FC1_DFX_DIRQ_TXCMD_FIFO, - SPFC_NON32G_DFX_FC1_ERR_R_RDY, - - /* ERR type 6 */ - SPFC_NON32G_MAC_FC1_FAIRNESS_ERROR, - - SPFC_ERR_VALUE_BUTT - -}; - -struct spfc_up_error_event { - struct spfc_mbox_header header; - - u8 link_event; - u8 error_level; - u8 error_type; - u8 error_value; -}; - -struct spfc_inmbox_clear_done { - struct spfc_mbox_header header; -}; - -/* receive els cmd */ -struct spfc_inmbox_rcv_els { - struct spfc_mbox_header header; - u16 pkt_type; - u16 pkt_len; - u8 frame[ARRAY_INDEX_0]; -}; - -/* FCF event type */ -enum spfc_fcf_event_type { - SPFC_FCF_SELECTED = 0, - SPFC_FCF_DEAD, - SPFC_FCF_CLEAR_VLINK, - SPFC_FCF_CLEAR_VLINK_APPOINTED -}; - -struct spfc_nport_id_info { - u32 nport_id : 24; - u32 vp_index : 8; -}; - -struct spfc_inmbox_fcf_event { - struct spfc_mbox_header header; - - u8 fcf_map[ARRAY_INDEX_3]; - u8 event_type; - - u8 fcf_mac_h4[ARRAY_INDEX_4]; - - u16 vlan_info; - u8 fcf_mac_l2[ARRAY_INDEX_2]; - - struct spfc_nport_id_info nport_id_info[UNF_SPFC_MAXNPIV_NUM + 1]; -}; - -/* send els cmd */ -struct spfc_inmbox_send_els { - struct spfc_mbox_header header; - - u8 oper_code; - u8 rsvd[ARRAY_INDEX_3]; - - u8 resvd; - u8 els_cmd_type; - u16 pkt_len; - - u8 fcf_mac_h4[ARRAY_INDEX_4]; - - u16 vlan_info; - u8 fcf_mac_l2[ARRAY_INDEX_2]; - - u8 fc_frame[SPFC_FC_HEAD_LEN + UNF_FLOGI_PAYLOAD_LEN]; -}; - -struct spfc_inmbox_send_els_sts { - struct spfc_mbox_header header; - - u16 rx_id; - u16 err_code; - - u16 ox_id; - u16 rsvd; -}; - -struct spfc_inmbox_get_clear_state { - struct spfc_mbox_header header; - u32 resvd[31]; -}; - -struct spfc_outmbox_get_clear_state_sts { - struct spfc_mbox_header header; - u16 rsvd1; - u8 state; /* 1--clear doing. 0---clear done. */ - u8 status; /* 0--ok,!0---fail */ - u32 rsvd2[30]; -}; - -#define SPFC_FIP_MODE_VN2VF (0) -#define SPFC_FIP_MODE_VN2VN (1) - -/* get up state */ -struct spfc_inmbox_get_up_state { - struct spfc_mbox_header header; - - u64 cur_jiff_time; -}; - -/* get port state */ -struct spfc_inmbox_get_port_info { - struct spfc_mbox_header header; -}; - -struct spfc_outmbox_get_up_state_sts { - struct spfc_mbox_header header; - - u8 status; - u8 rsv0; - u16 rsv1; - struct unf_port_dynamic_info dymic_info; -}; - -struct spfc_outmbox_get_port_info_sts { - struct spfc_mbox_header header; - - u32 status : 8; - u32 fe_16g_cvis_tts : 8; - u32 bb_scn : 8; - u32 loop_credit : 8; - - u32 non_loop_rx_credit : 8; - u32 non_loop_tx_credit : 8; - u32 sfp_speed : 8; - u32 present : 8; -}; - -struct spfc_inmbox_config_timer { - struct spfc_mbox_header header; - - u16 op_code; - u16 fun_id; - u32 user_data; -}; - -struct spfc_inmbox_config_srqc { - struct spfc_mbox_header header; - - u16 valid; - u16 fun_id; - u32 srqc_gpa_hi; - u32 srqc_gpa_lo; -}; - -struct spfc_outmbox_config_timer_sts { - struct spfc_mbox_header header; - - u8 status; - u8 rsv[ARRAY_INDEX_3]; -}; - -struct spfc_outmbox_config_srqc_sts { - struct spfc_mbox_header header; - - u8 status; - u8 rsv[ARRAY_INDEX_3]; -}; - -struct spfc_inmbox_default_sq_info { - struct spfc_mbox_header header; - u32 sq_cid; - u32 sq_xid; - u16 func_id; - u16 valid; -}; - -struct spfc_outmbox_default_sq_info_sts { - struct spfc_mbox_header header; - u8 status; - u8 rsv[ARRAY_INDEX_3]; -}; - -/* Generic Inmailbox and Outmailbox */ -union spfc_inmbox_generic { - struct { - struct spfc_mbox_header header; - u32 rsvd[(SPFC_MBOX_SIZE - SPFC_MBOX_HEADER_SIZE) / sizeof(u32)]; - } generic; - - struct spfc_inmbox_port_switch port_switch; - struct spfc_inmbox_config_api config_api; - struct spfc_inmbox_get_chip_info get_chip_info; - struct spfc_inmbox_config_login config_login; - struct spfc_inmbox_port_reset port_reset; - struct spfc_inmbox_set_esch esch_set; - struct spfc_inmbox_led_test led_test; - struct spfc_inmbox_get_sfp_info get_sfp_info; - struct spfc_inmbox_clear_done clear_done; - struct spfc_outmbox_get_ucode_stat get_ucode_stat; - struct spfc_inmbox_get_clear_state get_clr_state; - struct spfc_inmbox_send_vport_info send_vport_info; - struct spfc_inmbox_get_up_state get_up_state; - struct spfc_inmbox_config_timer timer_config; - struct spfc_inmbox_config_srqc config_srqc; - struct spfc_inmbox_get_port_info get_port_info; -}; - -union spfc_outmbox_generic { - struct { - struct spfc_mbox_header header; - u32 rsvd[(SPFC_MBOX_SIZE - SPFC_MBOX_HEADER_SIZE) / sizeof(u32)]; - } generic; - - struct spfc_outmbox_port_switch_sts port_switch_sts; - struct spfc_outmbox_config_api_sts config_api_sts; - struct spfc_outmbox_get_chip_info_sts get_chip_info_sts; - struct spfc_outmbox_get_reg_info_sts get_reg_info_sts; - struct spfc_outmbox_config_login_sts config_login_sts; - struct spfc_outmbox_port_reset_sts port_reset_sts; - struct spfc_outmbox_led_test_sts led_test_sts; - struct spfc_outmbox_set_esch_sts esch_set_sts; - struct spfc_inmbox_get_serdes_rx_sts serdes_rx_get_sts; - struct spfc_outmbox_set_serdes_tx_sts serdes_tx_set_sts; - struct spfc_outmbox_i2c_wr_rd_sts i2c_wr_rd_sts; - struct spfc_outmbox_gpio_wr_rd_sts gpio_wr_rd_sts; - struct spfc_outmbox_get_sfp_info_sts get_sfp_info_sts; - struct spfc_outmbox_get_ucode_stat_sts get_ucode_stat_sts; - struct spfc_outmbox_get_clear_state_sts get_clr_state_sts; - struct spfc_outmbox_get_up_state_sts get_up_state_sts; - struct spfc_outmbox_config_timer_sts timer_config_sts; - struct spfc_outmbox_config_srqc_sts config_srqc_sts; - struct spfc_outmbox_get_port_info_sts get_port_info_sts; - struct spfc_outmbox_default_sq_info_sts default_sq_sts; -}; - -u32 spfc_get_chip_msg(void *hba, void *mac); -u32 spfc_config_port_table(struct spfc_hba_info *hba); -u32 spfc_port_switch(struct spfc_hba_info *hba, bool turn_on); -u32 spfc_get_loop_map(void *hba, void *buf); -u32 spfc_get_workable_bb_credit(void *hba, void *bb_credit); -u32 spfc_get_workable_bb_scn(void *hba, void *bb_scn); -u32 spfc_get_port_current_info(void *hba, void *port_info); -u32 spfc_get_port_fec(void *hba, void *para_out); - -u32 spfc_get_loop_alpa(void *hba, void *alpa); -u32 spfc_get_topo_act(void *hba, void *topo_act); -u32 spfc_config_login_api(struct spfc_hba_info *hba, struct unf_port_login_parms *login_parms); -u32 spfc_mb_send_and_wait_mbox(struct spfc_hba_info *hba, const void *in_mbox, u16 in_size, - union spfc_outmbox_generic *out_mbox); -void spfc_up_msg2driver_proc(void *hwdev_handle, void *pri_handle, u16 cmd, - void *buf_in, u16 in_size, void *buf_out, u16 *out_size); - -u32 spfc_mb_reset_chip(struct spfc_hba_info *hba, u8 sub_type); -u32 spfc_clear_sq_wqe_done(struct spfc_hba_info *hba); -u32 spfc_update_fabric_param(void *hba, void *para_in); -u32 spfc_update_port_param(void *hba, void *para_in); -u32 spfc_update_fdisc_param(void *hba, void *vport_info); -u32 spfc_mbx_get_fw_clear_stat(struct spfc_hba_info *hba, u32 *clear_state); -u32 spfc_get_chip_capability(void *hwdev_handle, struct spfc_chip_info *chip_info); -u32 spfc_mbx_config_default_session(void *hba, u32 flag); - -#endif diff --git a/drivers/scsi/spfc/hw/spfc_cqm_bat_cla.c b/drivers/scsi/spfc/hw/spfc_cqm_bat_cla.c deleted file mode 100644 index 0c1d97d9e3e6cc8d68e6156caf83dcf8b9f08096..0000000000000000000000000000000000000000 --- a/drivers/scsi/spfc/hw/spfc_cqm_bat_cla.c +++ /dev/null @@ -1,1611 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include "sphw_crm.h" -#include "sphw_hw.h" -#include "sphw_hwdev.h" -#include "sphw_hwif.h" - -#include "spfc_cqm_object.h" -#include "spfc_cqm_bitmap_table.h" -#include "spfc_cqm_bat_cla.h" -#include "spfc_cqm_main.h" - -static unsigned char cqm_ver = 8; -module_param(cqm_ver, byte, 0644); -MODULE_PARM_DESC(cqm_ver, "for cqm version control (default=8)"); - -static void -cqm_bat_fill_cla_common_gpa(struct cqm_handle *cqm_handle, - struct cqm_cla_table *cla_table, - struct cqm_bat_entry_standerd *bat_entry_standerd) -{ - u8 gpa_check_enable = cqm_handle->func_capability.gpa_check_enable; - struct sphw_func_attr *func_attr = NULL; - struct cqm_bat_entry_vf2pf gpa = {0}; - u32 cla_gpa_h = 0; - dma_addr_t pa; - - if (cla_table->cla_lvl == CQM_CLA_LVL_0) - pa = cla_table->cla_z_buf.buf_list[0].pa; - else if (cla_table->cla_lvl == CQM_CLA_LVL_1) - pa = cla_table->cla_y_buf.buf_list[0].pa; - else - pa = cla_table->cla_x_buf.buf_list[0].pa; - - gpa.cla_gpa_h = CQM_ADDR_HI(pa) & CQM_CHIP_GPA_HIMASK; - - /* On the SPU, the value of spu_en in the GPA address - * in the BAT is determined by the host ID and fun IDx. - */ - if (sphw_host_id(cqm_handle->ex_handle) == CQM_SPU_HOST_ID) { - func_attr = &cqm_handle->func_attribute; - gpa.acs_spu_en = func_attr->func_global_idx & 0x1; - } else { - gpa.acs_spu_en = 0; - } - - memcpy(&cla_gpa_h, &gpa, sizeof(u32)); - bat_entry_standerd->cla_gpa_h = cla_gpa_h; - - /* GPA is valid when gpa[0] = 1. - * CQM_BAT_ENTRY_T_REORDER does not support GPA validity check. - */ - if (cla_table->type == CQM_BAT_ENTRY_T_REORDER) - bat_entry_standerd->cla_gpa_l = CQM_ADDR_LW(pa); - else - bat_entry_standerd->cla_gpa_l = CQM_ADDR_LW(pa) | gpa_check_enable; -} - -static void cqm_bat_fill_cla_common(struct cqm_handle *cqm_handle, - struct cqm_cla_table *cla_table, - u8 *entry_base_addr) -{ - struct cqm_bat_entry_standerd *bat_entry_standerd = NULL; - struct sphw_hwdev *handle = cqm_handle->ex_handle; - u32 cache_line = 0; - - if (cla_table->type == CQM_BAT_ENTRY_T_TIMER && cqm_ver == 8) - cache_line = CQM_CHIP_TIMER_CACHELINE; - else - cache_line = CQM_CHIP_CACHELINE; - - if (cla_table->obj_num == 0) { - cqm_info(handle->dev_hdl, - "Cla alloc: cla_type %u, obj_num=0, don't init bat entry\n", - cla_table->type); - return; - } - - bat_entry_standerd = (struct cqm_bat_entry_standerd *)entry_base_addr; - - /* The QPC value is 256/512/1024 and the timer value is 512. - * The other cacheline value is 256B. - * The conversion operation is performed inside the chip. - */ - if (cla_table->obj_size > cache_line) { - if (cla_table->obj_size == CQM_OBJECT_512) - bat_entry_standerd->entry_size = CQM_BAT_ENTRY_SIZE_512; - else - bat_entry_standerd->entry_size = CQM_BAT_ENTRY_SIZE_1024; - bat_entry_standerd->max_number = cla_table->max_buffer_size / cla_table->obj_size; - } else { - if (cache_line == CQM_CHIP_CACHELINE) { - bat_entry_standerd->entry_size = CQM_BAT_ENTRY_SIZE_256; - bat_entry_standerd->max_number = cla_table->max_buffer_size / cache_line; - } else { - bat_entry_standerd->entry_size = CQM_BAT_ENTRY_SIZE_512; - bat_entry_standerd->max_number = cla_table->max_buffer_size / cache_line; - } - } - - bat_entry_standerd->max_number = bat_entry_standerd->max_number - 1; - - bat_entry_standerd->bypass = CQM_BAT_NO_BYPASS_CACHE; - bat_entry_standerd->z = cla_table->cacheline_z; - bat_entry_standerd->y = cla_table->cacheline_y; - bat_entry_standerd->x = cla_table->cacheline_x; - bat_entry_standerd->cla_level = cla_table->cla_lvl; - - cqm_bat_fill_cla_common_gpa(cqm_handle, cla_table, bat_entry_standerd); -} - -static void cqm_bat_fill_cla_cfg(struct cqm_handle *cqm_handle, - struct cqm_cla_table *cla_table, - u8 **entry_base_addr) -{ - struct cqm_func_capability *func_cap = &cqm_handle->func_capability; - struct cqm_bat_entry_cfg *bat_entry_cfg = NULL; - - bat_entry_cfg = (struct cqm_bat_entry_cfg *)(*entry_base_addr); - bat_entry_cfg->cur_conn_cache = 0; - bat_entry_cfg->max_conn_cache = - func_cap->flow_table_based_conn_cache_number; - bat_entry_cfg->cur_conn_num_h_4 = 0; - bat_entry_cfg->cur_conn_num_l_16 = 0; - bat_entry_cfg->max_conn_num = func_cap->flow_table_based_conn_number; - - /* Aligns with 64 buckets and shifts rightward by 6 bits. - * The maximum value of this field is 16 bits. A maximum of 4M buckets - * can be supported. The value is subtracted by 1. It is used for &hash - * value. - */ - if ((func_cap->hash_number >> CQM_HASH_NUMBER_UNIT) != 0) { - bat_entry_cfg->bucket_num = ((func_cap->hash_number >> - CQM_HASH_NUMBER_UNIT) - 1); - } - if (func_cap->bloomfilter_length != 0) { - bat_entry_cfg->bloom_filter_len = func_cap->bloomfilter_length - - 1; - bat_entry_cfg->bloom_filter_addr = func_cap->bloomfilter_addr; - } - - (*entry_base_addr) += sizeof(struct cqm_bat_entry_cfg); -} - -static void cqm_bat_fill_cla_other(struct cqm_handle *cqm_handle, - struct cqm_cla_table *cla_table, - u8 **entry_base_addr) -{ - cqm_bat_fill_cla_common(cqm_handle, cla_table, *entry_base_addr); - - (*entry_base_addr) += sizeof(struct cqm_bat_entry_standerd); -} - -static void cqm_bat_fill_cla_taskmap(struct cqm_handle *cqm_handle, - struct cqm_cla_table *cla_table, - u8 **entry_base_addr) -{ - struct cqm_bat_entry_taskmap *bat_entry_taskmap = NULL; - struct sphw_hwdev *handle = cqm_handle->ex_handle; - int i; - - if (cqm_handle->func_capability.taskmap_number != 0) { - bat_entry_taskmap = - (struct cqm_bat_entry_taskmap *)(*entry_base_addr); - for (i = 0; i < CQM_BAT_ENTRY_TASKMAP_NUM; i++) { - bat_entry_taskmap->addr[i].gpa_h = - (u32)(cla_table->cla_z_buf.buf_list[i].pa >> - CQM_CHIP_GPA_HSHIFT); - bat_entry_taskmap->addr[i].gpa_l = - (u32)(cla_table->cla_z_buf.buf_list[i].pa & - CQM_CHIP_GPA_LOMASK); - cqm_info(handle->dev_hdl, - "Cla alloc: taskmap bat entry: 0x%x 0x%x\n", - bat_entry_taskmap->addr[i].gpa_h, - bat_entry_taskmap->addr[i].gpa_l); - } - } - - (*entry_base_addr) += sizeof(struct cqm_bat_entry_taskmap); -} - -static void cqm_bat_fill_cla_timer(struct cqm_handle *cqm_handle, - struct cqm_cla_table *cla_table, - u8 **entry_base_addr) -{ - /* Only the PPF allocates timer resources. */ - if (cqm_handle->func_attribute.func_type != CQM_PPF) { - (*entry_base_addr) += CQM_BAT_ENTRY_SIZE; - } else { - cqm_bat_fill_cla_common(cqm_handle, cla_table, *entry_base_addr); - - (*entry_base_addr) += sizeof(struct cqm_bat_entry_standerd); - } -} - -static void cqm_bat_fill_cla_invalid(struct cqm_handle *cqm_handle, - struct cqm_cla_table *cla_table, - u8 **entry_base_addr) -{ - (*entry_base_addr) += CQM_BAT_ENTRY_SIZE; -} - -static void cqm_bat_fill_cla(struct cqm_handle *cqm_handle) -{ - struct cqm_bat_table *bat_table = &cqm_handle->bat_table; - struct cqm_cla_table *cla_table = NULL; - u32 entry_type = CQM_BAT_ENTRY_T_INVALID; - u8 *entry_base_addr = NULL; - u32 i = 0; - - /* Fills each item in the BAT table according to the BAT format. */ - entry_base_addr = bat_table->bat; - for (i = 0; i < CQM_BAT_ENTRY_MAX; i++) { - entry_type = bat_table->bat_entry_type[i]; - cla_table = &bat_table->entry[i]; - - if (entry_type == CQM_BAT_ENTRY_T_CFG) - cqm_bat_fill_cla_cfg(cqm_handle, cla_table, &entry_base_addr); - else if (entry_type == CQM_BAT_ENTRY_T_TASKMAP) - cqm_bat_fill_cla_taskmap(cqm_handle, cla_table, &entry_base_addr); - else if (entry_type == CQM_BAT_ENTRY_T_INVALID) - cqm_bat_fill_cla_invalid(cqm_handle, cla_table, &entry_base_addr); - else if (entry_type == CQM_BAT_ENTRY_T_TIMER) - cqm_bat_fill_cla_timer(cqm_handle, cla_table, &entry_base_addr); - else - cqm_bat_fill_cla_other(cqm_handle, cla_table, &entry_base_addr); - - /* Check whether entry_base_addr is out-of-bounds array. */ - if (entry_base_addr >= (bat_table->bat + CQM_BAT_ENTRY_MAX * CQM_BAT_ENTRY_SIZE)) - break; - } -} - -u32 cqm_funcid2smfid(struct cqm_handle *cqm_handle) -{ - u32 funcid = 0; - u32 smf_sel = 0; - u32 smf_id = 0; - u32 smf_pg_partial = 0; - /* SMF_Selection is selected based on - * the lower two bits of the function id - */ - u32 lbf_smfsel[4] = {0, 2, 1, 3}; - /* SMFID is selected based on SMF_PG[1:0] and SMF_Selection(0-1) */ - u32 smfsel_smfid01[4][2] = { {0, 0}, {0, 0}, {1, 1}, {0, 1} }; - /* SMFID is selected based on SMF_PG[3:2] and SMF_Selection(2-4) */ - u32 smfsel_smfid23[4][2] = { {2, 2}, {2, 2}, {3, 3}, {2, 3} }; - - /* When the LB mode is disabled, SMF0 is always returned. */ - if (cqm_handle->func_capability.lb_mode == CQM_LB_MODE_NORMAL) { - smf_id = 0; - } else { - funcid = cqm_handle->func_attribute.func_global_idx & 0x3; - smf_sel = lbf_smfsel[funcid]; - - if (smf_sel < 2) { - smf_pg_partial = cqm_handle->func_capability.smf_pg & 0x3; - smf_id = smfsel_smfid01[smf_pg_partial][smf_sel]; - } else { - smf_pg_partial = (cqm_handle->func_capability.smf_pg >> 2) & 0x3; - smf_id = smfsel_smfid23[smf_pg_partial][smf_sel - 2]; - } - } - - return smf_id; -} - -/* This function is used in LB mode 1/2. The timer spoker info - * of independent space needs to be configured for 4 SMFs. - */ -static void cqm_update_timer_gpa(struct cqm_handle *cqm_handle, u32 smf_id) -{ - struct cqm_bat_table *bat_table = &cqm_handle->bat_table; - struct cqm_cla_table *cla_table = NULL; - u32 entry_type = CQM_BAT_ENTRY_T_INVALID; - u8 *entry_base_addr = NULL; - u32 i = 0; - - if (cqm_handle->func_attribute.func_type != CQM_PPF) - return; - - if (cqm_handle->func_capability.lb_mode != CQM_LB_MODE_1 && - cqm_handle->func_capability.lb_mode != CQM_LB_MODE_2) - return; - - cla_table = &bat_table->timer_entry[smf_id]; - entry_base_addr = bat_table->bat; - for (i = 0; i < CQM_BAT_ENTRY_MAX; i++) { - entry_type = bat_table->bat_entry_type[i]; - - if (entry_type == CQM_BAT_ENTRY_T_TIMER) { - cqm_bat_fill_cla_timer(cqm_handle, cla_table, &entry_base_addr); - break; - } - - if (entry_type == CQM_BAT_ENTRY_T_TASKMAP) - entry_base_addr += sizeof(struct cqm_bat_entry_taskmap); - else - entry_base_addr += CQM_BAT_ENTRY_SIZE; - - /* Check whether entry_base_addr is out-of-bounds array. */ - if (entry_base_addr >= - (bat_table->bat + CQM_BAT_ENTRY_MAX * CQM_BAT_ENTRY_SIZE)) - break; - } -} - -static s32 cqm_bat_update_cmd(struct cqm_handle *cqm_handle, struct cqm_cmd_buf *buf_in, - u32 smf_id, u32 func_id) -{ - struct sphw_hwdev *handle = cqm_handle->ex_handle; - struct cqm_cmdq_bat_update *bat_update_cmd = NULL; - s32 ret = CQM_FAIL; - - bat_update_cmd = (struct cqm_cmdq_bat_update *)(buf_in->buf); - bat_update_cmd->offset = 0; - - if (cqm_handle->bat_table.bat_size > CQM_BAT_MAX_SIZE) { - cqm_err(handle->dev_hdl, - "bat_size = %u, which is more than %d.\n", - cqm_handle->bat_table.bat_size, CQM_BAT_MAX_SIZE); - return CQM_FAIL; - } - bat_update_cmd->byte_len = cqm_handle->bat_table.bat_size; - - memcpy(bat_update_cmd->data, cqm_handle->bat_table.bat, bat_update_cmd->byte_len); - - bat_update_cmd->smf_id = smf_id; - bat_update_cmd->func_id = func_id; - - cqm_info(handle->dev_hdl, "Bat update: smf_id=%u\n", bat_update_cmd->smf_id); - cqm_info(handle->dev_hdl, "Bat update: func_id=%u\n", bat_update_cmd->func_id); - - cqm_swab32((u8 *)bat_update_cmd, sizeof(struct cqm_cmdq_bat_update) >> CQM_DW_SHIFT); - - ret = cqm3_send_cmd_box((void *)(cqm_handle->ex_handle), CQM_MOD_CQM, - CQM_CMD_T_BAT_UPDATE, buf_in, NULL, NULL, - CQM_CMD_TIMEOUT, SPHW_CHANNEL_DEFAULT); - if (ret != CQM_SUCCESS) { - cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm3_send_cmd_box)); - cqm_err(handle->dev_hdl, "%s: send_cmd_box ret=%d\n", __func__, - ret); - return CQM_FAIL; - } - - return CQM_SUCCESS; -} - -s32 cqm_bat_update(struct cqm_handle *cqm_handle) -{ - struct sphw_hwdev *handle = cqm_handle->ex_handle; - struct cqm_cmd_buf *buf_in = NULL; - s32 ret = CQM_FAIL; - u32 smf_id = 0; - u32 func_id = 0; - u32 i = 0; - - buf_in = cqm3_cmd_alloc((void *)(cqm_handle->ex_handle)); - CQM_PTR_CHECK_RET(buf_in, CQM_FAIL, CQM_ALLOC_FAIL(buf_in)); - buf_in->size = sizeof(struct cqm_cmdq_bat_update); - - /* In non-fake mode, func_id is set to 0xffff */ - func_id = 0xffff; - - /* The LB scenario is supported. - * The normal mode is the traditional mode and is configured on SMF0. - * In mode 0, load is balanced to four SMFs based on the func ID (except - * the PPF func ID). The PPF in mode 0 needs to be configured on four - * SMF, so the timer resources can be shared by the four timer engine. - * Mode 1/2 is load balanced to four SMF by flow. Therefore, one - * function needs to be configured to four SMF. - */ - if (cqm_handle->func_capability.lb_mode == CQM_LB_MODE_NORMAL || - (cqm_handle->func_capability.lb_mode == CQM_LB_MODE_0 && - cqm_handle->func_attribute.func_type != CQM_PPF)) { - smf_id = cqm_funcid2smfid(cqm_handle); - ret = cqm_bat_update_cmd(cqm_handle, buf_in, smf_id, func_id); - } else if ((cqm_handle->func_capability.lb_mode == CQM_LB_MODE_1) || - (cqm_handle->func_capability.lb_mode == CQM_LB_MODE_2) || - ((cqm_handle->func_capability.lb_mode == CQM_LB_MODE_0) && - (cqm_handle->func_attribute.func_type == CQM_PPF))) { - for (i = 0; i < CQM_LB_SMF_MAX; i++) { - cqm_update_timer_gpa(cqm_handle, i); - - /* The smf_pg variable stores the currently enabled SMF. */ - if (cqm_handle->func_capability.smf_pg & (1U << i)) { - smf_id = i; - ret = cqm_bat_update_cmd(cqm_handle, buf_in, smf_id, func_id); - if (ret != CQM_SUCCESS) - goto out; - } - } - } else { - cqm_err(handle->dev_hdl, "Bat update: unsupport lb mode=%u\n", - cqm_handle->func_capability.lb_mode); - ret = CQM_FAIL; - } - -out: - cqm3_cmd_free((void *)(cqm_handle->ex_handle), buf_in); - return ret; -} - -s32 cqm_bat_init_ft(struct cqm_handle *cqm_handle, struct cqm_bat_table *bat_table, - enum func_type function_type) -{ - struct sphw_hwdev *handle = cqm_handle->ex_handle; - u32 i = 0; - - bat_table->bat_entry_type[CQM_BAT_INDEX0] = CQM_BAT_ENTRY_T_CFG; - bat_table->bat_entry_type[CQM_BAT_INDEX1] = CQM_BAT_ENTRY_T_HASH; - bat_table->bat_entry_type[CQM_BAT_INDEX2] = CQM_BAT_ENTRY_T_QPC; - bat_table->bat_entry_type[CQM_BAT_INDEX3] = CQM_BAT_ENTRY_T_SCQC; - bat_table->bat_entry_type[CQM_BAT_INDEX4] = CQM_BAT_ENTRY_T_LUN; - bat_table->bat_entry_type[CQM_BAT_INDEX5] = CQM_BAT_ENTRY_T_TASKMAP; - - if (function_type == CQM_PF || function_type == CQM_PPF) { - bat_table->bat_entry_type[CQM_BAT_INDEX6] = CQM_BAT_ENTRY_T_L3I; - bat_table->bat_entry_type[CQM_BAT_INDEX7] = CQM_BAT_ENTRY_T_CHILDC; - bat_table->bat_entry_type[CQM_BAT_INDEX8] = CQM_BAT_ENTRY_T_TIMER; - bat_table->bat_entry_type[CQM_BAT_INDEX9] = CQM_BAT_ENTRY_T_XID2CID; - bat_table->bat_entry_type[CQM_BAT_INDEX10] = CQM_BAT_ENTRY_T_REORDER; - bat_table->bat_size = CQM_BAT_SIZE_FT_PF; - } else if (function_type == CQM_VF) { - bat_table->bat_size = CQM_BAT_SIZE_FT_VF; - } else { - for (i = 0; i < CQM_BAT_ENTRY_MAX; i++) - bat_table->bat_entry_type[i] = CQM_BAT_ENTRY_T_INVALID; - - cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(function_type)); - return CQM_FAIL; - } - - return CQM_SUCCESS; -} - -s32 cqm_bat_init(struct cqm_handle *cqm_handle) -{ - struct cqm_func_capability *capability = &cqm_handle->func_capability; - enum func_type function_type = cqm_handle->func_attribute.func_type; - struct cqm_bat_table *bat_table = &cqm_handle->bat_table; - struct sphw_hwdev *handle = cqm_handle->ex_handle; - u32 i; - - memset(bat_table, 0, sizeof(struct cqm_bat_table)); - - /* Initialize the type of each bat entry. */ - for (i = 0; i < CQM_BAT_ENTRY_MAX; i++) - bat_table->bat_entry_type[i] = CQM_BAT_ENTRY_T_INVALID; - - /* Select BATs based on service types. Currently, - * feature-related resources of the VF are stored in the BATs of the VF. - */ - if (capability->ft_enable) - return cqm_bat_init_ft(cqm_handle, bat_table, function_type); - - cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(capability->ft_enable)); - - return CQM_FAIL; -} - -void cqm_bat_uninit(struct cqm_handle *cqm_handle) -{ - struct cqm_bat_table *bat_table = &cqm_handle->bat_table; - struct sphw_hwdev *handle = cqm_handle->ex_handle; - u32 i; - - for (i = 0; i < CQM_BAT_ENTRY_MAX; i++) - bat_table->bat_entry_type[i] = CQM_BAT_ENTRY_T_INVALID; - - memset(bat_table->bat, 0, CQM_BAT_ENTRY_MAX * CQM_BAT_ENTRY_SIZE); - - /* Instruct the chip to update the BAT table. */ - if (cqm_bat_update(cqm_handle) != CQM_SUCCESS) - cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_bat_update)); -} - -s32 cqm_cla_fill_buf(struct cqm_handle *cqm_handle, struct cqm_buf *cla_base_buf, - struct cqm_buf *cla_sub_buf, u8 gpa_check_enable) -{ - struct sphw_hwdev *handle = cqm_handle->ex_handle; - struct sphw_func_attr *func_attr = NULL; - dma_addr_t *base = NULL; - u64 fake_en = 0; - u64 spu_en = 0; - u64 pf_id = 0; - u32 i = 0; - u32 addr_num; - u32 buf_index = 0; - - /* Apply for space for base_buf */ - if (!cla_base_buf->buf_list) { - if (cqm_buf_alloc(cqm_handle, cla_base_buf, false) == - CQM_FAIL) { - cqm_err(handle->dev_hdl, CQM_ALLOC_FAIL(cla_base_buf)); - return CQM_FAIL; - } - } - - /* Apply for space for sub_buf */ - if (!cla_sub_buf->buf_list) { - if (cqm_buf_alloc(cqm_handle, cla_sub_buf, false) == CQM_FAIL) { - cqm_err(handle->dev_hdl, CQM_ALLOC_FAIL(cla_sub_buf)); - cqm_buf_free(cla_base_buf, cqm_handle->dev); - return CQM_FAIL; - } - } - - /* Fill base_buff with the gpa of sub_buf */ - addr_num = cla_base_buf->buf_size / sizeof(dma_addr_t); - base = (dma_addr_t *)(cla_base_buf->buf_list[0].va); - for (i = 0; i < cla_sub_buf->buf_number; i++) { - /* The SPU SMF supports load balancing from the SMF to the CPI, - * depending on the host ID and func ID. - */ - if (sphw_host_id(cqm_handle->ex_handle) == CQM_SPU_HOST_ID) { - func_attr = &cqm_handle->func_attribute; - spu_en = (u64)(func_attr->func_global_idx & 0x1) << 63; - } else { - spu_en = 0; - } - - *base = (((((cla_sub_buf->buf_list[i].pa & CQM_CHIP_GPA_MASK) | - spu_en) | - fake_en) | - pf_id) | - gpa_check_enable); - - cqm_swab64((u8 *)base, 1); - if ((i + 1) % addr_num == 0) { - buf_index++; - if (buf_index < cla_base_buf->buf_number) - base = cla_base_buf->buf_list[buf_index].va; - } else { - base++; - } - } - - return CQM_SUCCESS; -} - -s32 cqm_cla_xyz_lvl1(struct cqm_handle *cqm_handle, struct cqm_cla_table *cla_table, - u32 trunk_size) -{ - struct sphw_hwdev *handle = cqm_handle->ex_handle; - struct cqm_buf *cla_y_buf = NULL; - struct cqm_buf *cla_z_buf = NULL; - s32 shift = 0; - s32 ret = CQM_FAIL; - u8 gpa_check_enable = cqm_handle->func_capability.gpa_check_enable; - u32 cache_line = 0; - - if (cla_table->type == CQM_BAT_ENTRY_T_TIMER && cqm_ver == 8) - cache_line = CQM_CHIP_TIMER_CACHELINE; - else - cache_line = CQM_CHIP_CACHELINE; - - if (cla_table->type == CQM_BAT_ENTRY_T_REORDER) - gpa_check_enable = 0; - - cla_table->cla_lvl = CQM_CLA_LVL_1; - - shift = cqm_shift(trunk_size / cla_table->obj_size); - cla_table->z = shift ? (shift - 1) : (shift); - cla_table->y = CQM_MAX_INDEX_BIT; - cla_table->x = 0; - - if (cla_table->obj_size >= cache_line) { - cla_table->cacheline_z = cla_table->z; - cla_table->cacheline_y = cla_table->y; - cla_table->cacheline_x = cla_table->x; - } else { - shift = cqm_shift(trunk_size / cache_line); - cla_table->cacheline_z = shift ? (shift - 1) : (shift); - cla_table->cacheline_y = CQM_MAX_INDEX_BIT; - cla_table->cacheline_x = 0; - } - - /* Applying for CLA_Y_BUF Space */ - cla_y_buf = &cla_table->cla_y_buf; - cla_y_buf->buf_size = trunk_size; - cla_y_buf->buf_number = 1; - cla_y_buf->page_number = cla_y_buf->buf_number << - cla_table->trunk_order; - ret = cqm_buf_alloc(cqm_handle, cla_y_buf, false); - CQM_CHECK_EQUAL_RET(handle->dev_hdl, ret, CQM_SUCCESS, CQM_FAIL, - CQM_ALLOC_FAIL(lvl_1_y_buf)); - - /* Applying for CLA_Z_BUF Space */ - cla_z_buf = &cla_table->cla_z_buf; - cla_z_buf->buf_size = trunk_size; - cla_z_buf->buf_number = - (ALIGN(cla_table->max_buffer_size, trunk_size)) / trunk_size; - cla_z_buf->page_number = cla_z_buf->buf_number << - cla_table->trunk_order; - /* All buffer space must be statically allocated. */ - if (cla_table->alloc_static) { - ret = cqm_cla_fill_buf(cqm_handle, cla_y_buf, cla_z_buf, - gpa_check_enable); - CQM_CHECK_EQUAL_RET(handle->dev_hdl, ret, CQM_SUCCESS, CQM_FAIL, - CQM_FUNCTION_FAIL(cqm_cla_fill_buf)); - } else { /* Only the buffer list space is initialized. The buffer space - * is dynamically allocated in services. - */ - cla_z_buf->buf_list = vmalloc(cla_z_buf->buf_number * - sizeof(struct cqm_buf_list)); - if (!cla_z_buf->buf_list) { - cqm_err(handle->dev_hdl, CQM_ALLOC_FAIL(lvl_1_z_buf)); - cqm_buf_free(cla_y_buf, cqm_handle->dev); - return CQM_FAIL; - } - memset(cla_z_buf->buf_list, 0, - cla_z_buf->buf_number * sizeof(struct cqm_buf_list)); - } - - return CQM_SUCCESS; -} - -s32 cqm_cla_xyz_lvl2(struct cqm_handle *cqm_handle, struct cqm_cla_table *cla_table, - u32 trunk_size) -{ - struct sphw_hwdev *handle = cqm_handle->ex_handle; - struct cqm_buf *cla_x_buf = NULL; - struct cqm_buf *cla_y_buf = NULL; - struct cqm_buf *cla_z_buf = NULL; - s32 shift = 0; - s32 ret = CQM_FAIL; - u8 gpa_check_enable = cqm_handle->func_capability.gpa_check_enable; - u32 cache_line = 0; - - if (cla_table->type == CQM_BAT_ENTRY_T_TIMER && cqm_ver == 8) - cache_line = CQM_CHIP_TIMER_CACHELINE; - else - cache_line = CQM_CHIP_CACHELINE; - - if (cla_table->type == CQM_BAT_ENTRY_T_REORDER) - gpa_check_enable = 0; - - cla_table->cla_lvl = CQM_CLA_LVL_2; - - shift = cqm_shift(trunk_size / cla_table->obj_size); - cla_table->z = shift ? (shift - 1) : (shift); - shift = cqm_shift(trunk_size / sizeof(dma_addr_t)); - cla_table->y = cla_table->z + shift; - cla_table->x = CQM_MAX_INDEX_BIT; - - if (cla_table->obj_size >= cache_line) { - cla_table->cacheline_z = cla_table->z; - cla_table->cacheline_y = cla_table->y; - cla_table->cacheline_x = cla_table->x; - } else { - shift = cqm_shift(trunk_size / cache_line); - cla_table->cacheline_z = shift ? (shift - 1) : (shift); - shift = cqm_shift(trunk_size / sizeof(dma_addr_t)); - cla_table->cacheline_y = cla_table->cacheline_z + shift; - cla_table->cacheline_x = CQM_MAX_INDEX_BIT; - } - - /* Apply for CLA_X_BUF Space */ - cla_x_buf = &cla_table->cla_x_buf; - cla_x_buf->buf_size = trunk_size; - cla_x_buf->buf_number = 1; - cla_x_buf->page_number = cla_x_buf->buf_number << - cla_table->trunk_order; - ret = cqm_buf_alloc(cqm_handle, cla_x_buf, false); - CQM_CHECK_EQUAL_RET(handle->dev_hdl, ret, CQM_SUCCESS, CQM_FAIL, - CQM_ALLOC_FAIL(lvl_2_x_buf)); - - /* Apply for CLA_Z_BUF and CLA_Y_BUF Space */ - cla_z_buf = &cla_table->cla_z_buf; - cla_z_buf->buf_size = trunk_size; - cla_z_buf->buf_number = - (ALIGN(cla_table->max_buffer_size, trunk_size)) / trunk_size; - cla_z_buf->page_number = cla_z_buf->buf_number << - cla_table->trunk_order; - - cla_y_buf = &cla_table->cla_y_buf; - cla_y_buf->buf_size = trunk_size; - cla_y_buf->buf_number = - (ALIGN(cla_z_buf->buf_number * sizeof(dma_addr_t), trunk_size)) / - trunk_size; - cla_y_buf->page_number = cla_y_buf->buf_number << - cla_table->trunk_order; - /* All buffer space must be statically allocated. */ - if (cla_table->alloc_static) { - /* Apply for y buf and z buf, and fill the gpa of - * z buf list in y buf - */ - if (cqm_cla_fill_buf(cqm_handle, cla_y_buf, cla_z_buf, - gpa_check_enable) == CQM_FAIL) { - cqm_err(handle->dev_hdl, - CQM_FUNCTION_FAIL(cqm_cla_fill_buf)); - cqm_buf_free(cla_x_buf, cqm_handle->dev); - return CQM_FAIL; - } - - /* Fill the gpa of the y buf list into the x buf. - * After the x and y bufs are applied for, - * this function will not fail. - * Use void to forcibly convert the return of the function. - */ - (void)cqm_cla_fill_buf(cqm_handle, cla_x_buf, cla_y_buf, - gpa_check_enable); - } else { /* Only the buffer list space is initialized. The buffer space - * is dynamically allocated in services. - */ - cla_z_buf->buf_list = vmalloc(cla_z_buf->buf_number * - sizeof(struct cqm_buf_list)); - if (!cla_z_buf->buf_list) { - cqm_err(handle->dev_hdl, CQM_ALLOC_FAIL(lvl_2_z_buf)); - cqm_buf_free(cla_x_buf, cqm_handle->dev); - return CQM_FAIL; - } - memset(cla_z_buf->buf_list, 0, - cla_z_buf->buf_number * sizeof(struct cqm_buf_list)); - - cla_y_buf->buf_list = vmalloc(cla_y_buf->buf_number * - sizeof(struct cqm_buf_list)); - if (!cla_y_buf->buf_list) { - cqm_err(handle->dev_hdl, CQM_ALLOC_FAIL(lvl_2_y_buf)); - cqm_buf_free(cla_z_buf, cqm_handle->dev); - cqm_buf_free(cla_x_buf, cqm_handle->dev); - return CQM_FAIL; - } - memset(cla_y_buf->buf_list, 0, - cla_y_buf->buf_number * sizeof(struct cqm_buf_list)); - } - - return CQM_SUCCESS; -} - -s32 cqm_cla_xyz_check(struct cqm_handle *cqm_handle, struct cqm_cla_table *cla_table, - u32 *size) -{ - struct sphw_hwdev *handle = cqm_handle->ex_handle; - u32 trunk_size = 0; - - /* If the capability(obj_num) is set to 0, the CLA does not need to be - * initialized and exits directly. - */ - if (cla_table->obj_num == 0) { - cqm_info(handle->dev_hdl, - "Cla alloc: cla_type %u, obj_num=0, don't alloc buffer\n", - cla_table->type); - return CQM_SUCCESS; - } - - cqm_info(handle->dev_hdl, - "Cla alloc: cla_type %u, obj_num=0x%x, gpa_check_enable=%d\n", - cla_table->type, cla_table->obj_num, - cqm_handle->func_capability.gpa_check_enable); - - /* Check whether obj_size is 2^n-aligned. An error is reported when - * obj_size is 0 or 1. - */ - if (!cqm_check_align(cla_table->obj_size)) { - cqm_err(handle->dev_hdl, - "Cla alloc: cla_type %u, obj_size 0x%x is not align on 2^n\n", - cla_table->type, cla_table->obj_size); - return CQM_FAIL; - } - - trunk_size = (u32)(PAGE_SIZE << cla_table->trunk_order); - - if (trunk_size < cla_table->obj_size) { - cqm_err(handle->dev_hdl, - "Cla alloc: cla type %u, obj_size 0x%x is out of trunk size\n", - cla_table->type, cla_table->obj_size); - return CQM_FAIL; - } - - *size = trunk_size; - - return CQM_CONTINUE; -} - -s32 cqm_cla_xyz(struct cqm_handle *cqm_handle, struct cqm_cla_table *cla_table) -{ - struct sphw_hwdev *handle = cqm_handle->ex_handle; - struct cqm_buf *cla_z_buf = NULL; - u32 trunk_size = 0; - s32 ret = CQM_FAIL; - - ret = cqm_cla_xyz_check(cqm_handle, cla_table, &trunk_size); - if (ret != CQM_CONTINUE) - return ret; - - /* Level-0 CLA occupies a small space. - * Only CLA_Z_BUF can be allocated during initialization. - */ - if (cla_table->max_buffer_size <= trunk_size) { - cla_table->cla_lvl = CQM_CLA_LVL_0; - - cla_table->z = CQM_MAX_INDEX_BIT; - cla_table->y = 0; - cla_table->x = 0; - - cla_table->cacheline_z = cla_table->z; - cla_table->cacheline_y = cla_table->y; - cla_table->cacheline_x = cla_table->x; - - /* Applying for CLA_Z_BUF Space */ - cla_z_buf = &cla_table->cla_z_buf; - cla_z_buf->buf_size = trunk_size; /* (u32)(PAGE_SIZE << - * cla_table->trunk_order); - */ - cla_z_buf->buf_number = 1; - cla_z_buf->page_number = cla_z_buf->buf_number << cla_table->trunk_order; - ret = cqm_buf_alloc(cqm_handle, cla_z_buf, false); - CQM_CHECK_EQUAL_RET(handle->dev_hdl, ret, CQM_SUCCESS, CQM_FAIL, - CQM_ALLOC_FAIL(lvl_0_z_buf)); - } - /* Level-1 CLA - * Allocates CLA_Y_BUF and CLA_Z_BUF during initialization. - */ - else if (cla_table->max_buffer_size <= (trunk_size * (trunk_size / sizeof(dma_addr_t)))) { - if (cqm_cla_xyz_lvl1(cqm_handle, cla_table, trunk_size) == CQM_FAIL) { - cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_cla_xyz_lvl1)); - return CQM_FAIL; - } - } - /* Level-2 CLA - * Allocates CLA_X_BUF, CLA_Y_BUF, and CLA_Z_BUF during initialization. - */ - else if (cla_table->max_buffer_size <= - (trunk_size * (trunk_size / sizeof(dma_addr_t)) * - (trunk_size / sizeof(dma_addr_t)))) { - if (cqm_cla_xyz_lvl2(cqm_handle, cla_table, trunk_size) == - CQM_FAIL) { - cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_cla_xyz_lvl2)); - return CQM_FAIL; - } - } else { /* The current memory management mode does not support such - * a large buffer addressing. The order value needs to - * be increased. - */ - cqm_err(handle->dev_hdl, - "Cla alloc: cla max_buffer_size 0x%x exceeds support range\n", - cla_table->max_buffer_size); - return CQM_FAIL; - } - - return CQM_SUCCESS; -} - -void cqm_cla_init_entry_normal(struct cqm_handle *cqm_handle, - struct cqm_cla_table *cla_table, - struct cqm_func_capability *capability) -{ - struct sphw_hwdev *handle = cqm_handle->ex_handle; - - switch (cla_table->type) { - case CQM_BAT_ENTRY_T_HASH: - cla_table->trunk_order = capability->pagesize_reorder; - cla_table->max_buffer_size = capability->hash_number * capability->hash_basic_size; - cla_table->obj_size = capability->hash_basic_size; - cla_table->obj_num = capability->hash_number; - cla_table->alloc_static = true; - break; - case CQM_BAT_ENTRY_T_QPC: - cla_table->trunk_order = capability->pagesize_reorder; - cla_table->max_buffer_size = capability->qpc_number * capability->qpc_basic_size; - cla_table->obj_size = capability->qpc_basic_size; - cla_table->obj_num = capability->qpc_number; - cla_table->alloc_static = capability->qpc_alloc_static; - cqm_info(handle->dev_hdl, "Cla alloc: qpc alloc_static=%d\n", - cla_table->alloc_static); - break; - case CQM_BAT_ENTRY_T_MPT: - cla_table->trunk_order = capability->pagesize_reorder; - cla_table->max_buffer_size = capability->mpt_number * capability->mpt_basic_size; - cla_table->obj_size = capability->mpt_basic_size; - cla_table->obj_num = capability->mpt_number; - /* CCB decided. MPT uses only static application scenarios. */ - cla_table->alloc_static = true; - break; - case CQM_BAT_ENTRY_T_SCQC: - cla_table->trunk_order = capability->pagesize_reorder; - cla_table->max_buffer_size = capability->scqc_number * capability->scqc_basic_size; - cla_table->obj_size = capability->scqc_basic_size; - cla_table->obj_num = capability->scqc_number; - cla_table->alloc_static = capability->scqc_alloc_static; - cqm_info(handle->dev_hdl, "Cla alloc: scqc alloc_static=%d\n", - cla_table->alloc_static); - break; - case CQM_BAT_ENTRY_T_SRQC: - cla_table->trunk_order = capability->pagesize_reorder; - cla_table->max_buffer_size = capability->srqc_number * capability->srqc_basic_size; - cla_table->obj_size = capability->srqc_basic_size; - cla_table->obj_num = capability->srqc_number; - cla_table->alloc_static = false; - break; - default: - break; - } -} - -void cqm_cla_init_entry_extern(struct cqm_handle *cqm_handle, - struct cqm_cla_table *cla_table, - struct cqm_func_capability *capability) -{ - switch (cla_table->type) { - case CQM_BAT_ENTRY_T_GID: - /* Level-0 CLA table required */ - cla_table->max_buffer_size = capability->gid_number * capability->gid_basic_size; - cla_table->trunk_order = - (u32)cqm_shift(ALIGN(cla_table->max_buffer_size, PAGE_SIZE) / PAGE_SIZE); - cla_table->obj_size = capability->gid_basic_size; - cla_table->obj_num = capability->gid_number; - cla_table->alloc_static = true; - break; - case CQM_BAT_ENTRY_T_LUN: - cla_table->trunk_order = CLA_TABLE_PAGE_ORDER; - cla_table->max_buffer_size = capability->lun_number * capability->lun_basic_size; - cla_table->obj_size = capability->lun_basic_size; - cla_table->obj_num = capability->lun_number; - cla_table->alloc_static = true; - break; - case CQM_BAT_ENTRY_T_TASKMAP: - cla_table->trunk_order = CQM_4K_PAGE_ORDER; - cla_table->max_buffer_size = capability->taskmap_number * - capability->taskmap_basic_size; - cla_table->obj_size = capability->taskmap_basic_size; - cla_table->obj_num = capability->taskmap_number; - cla_table->alloc_static = true; - break; - case CQM_BAT_ENTRY_T_L3I: - cla_table->trunk_order = CLA_TABLE_PAGE_ORDER; - cla_table->max_buffer_size = capability->l3i_number * capability->l3i_basic_size; - cla_table->obj_size = capability->l3i_basic_size; - cla_table->obj_num = capability->l3i_number; - cla_table->alloc_static = true; - break; - case CQM_BAT_ENTRY_T_CHILDC: - cla_table->trunk_order = capability->pagesize_reorder; - cla_table->max_buffer_size = capability->childc_number * - capability->childc_basic_size; - cla_table->obj_size = capability->childc_basic_size; - cla_table->obj_num = capability->childc_number; - cla_table->alloc_static = true; - break; - case CQM_BAT_ENTRY_T_TIMER: - /* Ensure that the basic size of the timer buffer page does not - * exceed 128 x 4 KB. Otherwise, clearing the timer buffer of - * the function is complex. - */ - cla_table->trunk_order = CQM_4K_PAGE_ORDER; - cla_table->max_buffer_size = capability->timer_number * - capability->timer_basic_size; - cla_table->obj_size = capability->timer_basic_size; - cla_table->obj_num = capability->timer_number; - cla_table->alloc_static = true; - break; - case CQM_BAT_ENTRY_T_XID2CID: - cla_table->trunk_order = capability->pagesize_reorder; - cla_table->max_buffer_size = capability->xid2cid_number * - capability->xid2cid_basic_size; - cla_table->obj_size = capability->xid2cid_basic_size; - cla_table->obj_num = capability->xid2cid_number; - cla_table->alloc_static = true; - break; - case CQM_BAT_ENTRY_T_REORDER: - /* This entry supports only IWARP and does not support GPA validity check. */ - cla_table->trunk_order = capability->pagesize_reorder; - cla_table->max_buffer_size = capability->reorder_number * - capability->reorder_basic_size; - cla_table->obj_size = capability->reorder_basic_size; - cla_table->obj_num = capability->reorder_number; - cla_table->alloc_static = true; - break; - default: - break; - } -} - -s32 cqm_cla_init_entry_condition(struct cqm_handle *cqm_handle, u32 entry_type) -{ - struct cqm_bat_table *bat_table = &cqm_handle->bat_table; - struct cqm_cla_table *cla_table = &bat_table->entry[entry_type]; - struct cqm_cla_table *cla_table_timer = NULL; - u32 i; - - /* When the timer is in LB mode 1 or 2, the timer needs to be - * configured for four SMFs and the address space is independent. - */ - if (cla_table->type == CQM_BAT_ENTRY_T_TIMER && - (cqm_handle->func_capability.lb_mode == CQM_LB_MODE_1 || - cqm_handle->func_capability.lb_mode == CQM_LB_MODE_2)) { - for (i = 0; i < CQM_LB_SMF_MAX; i++) { - cla_table_timer = &bat_table->timer_entry[i]; - memcpy(cla_table_timer, cla_table, sizeof(struct cqm_cla_table)); - - if (cqm_cla_xyz(cqm_handle, cla_table_timer) == CQM_FAIL) { - cqm_cla_uninit(cqm_handle, entry_type); - return CQM_FAIL; - } - } - } - - if (cqm_cla_xyz(cqm_handle, cla_table) == CQM_FAIL) { - cqm_cla_uninit(cqm_handle, entry_type); - return CQM_FAIL; - } - - return CQM_SUCCESS; -} - -s32 cqm_cla_init_entry(struct cqm_handle *cqm_handle, - struct cqm_func_capability *capability) -{ - struct cqm_bat_table *bat_table = &cqm_handle->bat_table; - struct cqm_cla_table *cla_table = NULL; - s32 ret; - u32 i = 0; - - for (i = 0; i < CQM_BAT_ENTRY_MAX; i++) { - cla_table = &bat_table->entry[i]; - cla_table->type = bat_table->bat_entry_type[i]; - - cqm_cla_init_entry_normal(cqm_handle, cla_table, capability); - cqm_cla_init_entry_extern(cqm_handle, cla_table, capability); - - /* Allocate CLA entry space at each level. */ - if (cla_table->type < CQM_BAT_ENTRY_T_HASH || - cla_table->type > CQM_BAT_ENTRY_T_REORDER) { - mutex_init(&cla_table->lock); - continue; - } - - /* For the PPF, resources (8 wheels x 2k scales x 32B x - * func_num) need to be applied for to the timer. The - * structure of the timer entry in the BAT table needs - * to be filled. For the PF, no resource needs to be - * applied for the timer and no structure needs to be - * filled in the timer entry in the BAT table. - */ - if (!(cla_table->type == CQM_BAT_ENTRY_T_TIMER && - cqm_handle->func_attribute.func_type != CQM_PPF)) { - ret = cqm_cla_init_entry_condition(cqm_handle, i); - if (ret != CQM_SUCCESS) - return CQM_FAIL; - } - mutex_init(&cla_table->lock); - } - - return CQM_SUCCESS; -} - -s32 cqm_cla_init(struct cqm_handle *cqm_handle) -{ - struct cqm_func_capability *capability = &cqm_handle->func_capability; - struct sphw_hwdev *handle = cqm_handle->ex_handle; - s32 ret; - - /* Applying for CLA Entries */ - ret = cqm_cla_init_entry(cqm_handle, capability); - if (ret != CQM_SUCCESS) { - cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_cla_init_entry)); - return ret; - } - - /* After the CLA entry is applied, the address is filled in the BAT table. */ - cqm_bat_fill_cla(cqm_handle); - - /* Instruct the chip to update the BAT table. */ - ret = cqm_bat_update(cqm_handle); - if (ret != CQM_SUCCESS) { - cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_bat_update)); - goto err; - } - - cqm_info(handle->dev_hdl, "Timer start: func_type=%d, timer_enable=%u\n", - cqm_handle->func_attribute.func_type, - cqm_handle->func_capability.timer_enable); - - if (cqm_handle->func_attribute.func_type == CQM_PPF && - cqm_handle->func_capability.timer_enable == CQM_TIMER_ENABLE) { - /* Enable the timer after the timer resources are applied for */ - cqm_info(handle->dev_hdl, "Timer start: spfc ppf timer start\n"); - ret = sphw_ppf_tmr_start((void *)(cqm_handle->ex_handle)); - if (ret != CQM_SUCCESS) { - cqm_err(handle->dev_hdl, "Timer start: spfc ppf timer start, ret=%d\n", - ret); - goto err; - } - } - - return CQM_SUCCESS; - -err: - cqm_cla_uninit(cqm_handle, CQM_BAT_ENTRY_MAX); - return CQM_FAIL; -} - -void cqm_cla_uninit(struct cqm_handle *cqm_handle, u32 entry_numb) -{ - struct cqm_bat_table *bat_table = &cqm_handle->bat_table; - struct cqm_cla_table *cla_table = NULL; - s32 inv_flag = 0; - u32 i; - - for (i = 0; i < entry_numb; i++) { - cla_table = &bat_table->entry[i]; - if (cla_table->type != CQM_BAT_ENTRY_T_INVALID) { - cqm_buf_free_cache_inv(cqm_handle, &cla_table->cla_x_buf, &inv_flag); - cqm_buf_free_cache_inv(cqm_handle, &cla_table->cla_y_buf, &inv_flag); - cqm_buf_free_cache_inv(cqm_handle, &cla_table->cla_z_buf, &inv_flag); - } - } - - /* When the lb mode is 1/2, the timer space allocated to the 4 SMFs - * needs to be released. - */ - if (cqm_handle->func_attribute.func_type == CQM_PPF && - (cqm_handle->func_capability.lb_mode == CQM_LB_MODE_1 || - cqm_handle->func_capability.lb_mode == CQM_LB_MODE_2)) { - for (i = 0; i < CQM_LB_SMF_MAX; i++) { - cla_table = &bat_table->timer_entry[i]; - cqm_buf_free_cache_inv(cqm_handle, &cla_table->cla_x_buf, &inv_flag); - cqm_buf_free_cache_inv(cqm_handle, &cla_table->cla_y_buf, &inv_flag); - cqm_buf_free_cache_inv(cqm_handle, &cla_table->cla_z_buf, &inv_flag); - } - } -} - -s32 cqm_cla_update_cmd(struct cqm_handle *cqm_handle, struct cqm_cmd_buf *buf_in, - struct cqm_cla_update_cmd *cmd) -{ - struct sphw_hwdev *handle = cqm_handle->ex_handle; - struct cqm_cla_update_cmd *cla_update_cmd = NULL; - s32 ret = CQM_FAIL; - - cla_update_cmd = (struct cqm_cla_update_cmd *)(buf_in->buf); - - cla_update_cmd->gpa_h = cmd->gpa_h; - cla_update_cmd->gpa_l = cmd->gpa_l; - cla_update_cmd->value_h = cmd->value_h; - cla_update_cmd->value_l = cmd->value_l; - cla_update_cmd->smf_id = cmd->smf_id; - cla_update_cmd->func_id = cmd->func_id; - - cqm_swab32((u8 *)cla_update_cmd, - (sizeof(struct cqm_cla_update_cmd) >> CQM_DW_SHIFT)); - - ret = cqm3_send_cmd_box((void *)(cqm_handle->ex_handle), CQM_MOD_CQM, - CQM_CMD_T_CLA_UPDATE, buf_in, NULL, NULL, - CQM_CMD_TIMEOUT, SPHW_CHANNEL_DEFAULT); - if (ret != CQM_SUCCESS) { - cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm3_send_cmd_box)); - cqm_err(handle->dev_hdl, "Cla alloc: cqm_cla_update, cqm3_send_cmd_box_ret=%d\n", - ret); - cqm_err(handle->dev_hdl, "Cla alloc: cqm_cla_update, cla_update_cmd: 0x%x 0x%x 0x%x 0x%x\n", - cmd->gpa_h, cmd->gpa_l, cmd->value_h, cmd->value_l); - return CQM_FAIL; - } - - return CQM_SUCCESS; -} - -s32 cqm_cla_update(struct cqm_handle *cqm_handle, struct cqm_buf_list *buf_node_parent, - struct cqm_buf_list *buf_node_child, u32 child_index, u8 cla_update_mode) -{ - struct sphw_hwdev *handle = cqm_handle->ex_handle; - struct cqm_cmd_buf *buf_in = NULL; - struct cqm_cla_update_cmd cmd; - dma_addr_t pa = 0; - s32 ret = CQM_FAIL; - u8 gpa_check_enable = cqm_handle->func_capability.gpa_check_enable; - u32 i = 0; - u64 spu_en; - - buf_in = cqm3_cmd_alloc(cqm_handle->ex_handle); - CQM_PTR_CHECK_RET(buf_in, CQM_FAIL, CQM_ALLOC_FAIL(buf_in)); - buf_in->size = sizeof(struct cqm_cla_update_cmd); - - /* Fill command format, convert to big endian. */ - /* SPU function sets bit63: acs_spu_en based on function id. */ - if (sphw_host_id(cqm_handle->ex_handle) == CQM_SPU_HOST_ID) - spu_en = ((u64)(cqm_handle->func_attribute.func_global_idx & - 0x1)) << 63; - else - spu_en = 0; - - pa = ((buf_node_parent->pa + (child_index * sizeof(dma_addr_t))) | - spu_en); - cmd.gpa_h = CQM_ADDR_HI(pa); - cmd.gpa_l = CQM_ADDR_LW(pa); - - pa = (buf_node_child->pa | spu_en); - cmd.value_h = CQM_ADDR_HI(pa); - cmd.value_l = CQM_ADDR_LW(pa); - - /* current CLA GPA CHECK */ - if (gpa_check_enable) { - switch (cla_update_mode) { - /* gpa[0]=1 means this GPA is valid */ - case CQM_CLA_RECORD_NEW_GPA: - cmd.value_l |= 1; - break; - /* gpa[0]=0 means this GPA is valid */ - case CQM_CLA_DEL_GPA_WITHOUT_CACHE_INVALID: - case CQM_CLA_DEL_GPA_WITH_CACHE_INVALID: - cmd.value_l &= (~1); - break; - default: - cqm_err(handle->dev_hdl, - "Cla alloc: %s, wrong cla_update_mode=%u\n", - __func__, cla_update_mode); - break; - } - } - - /* In non-fake mode, set func_id to 0xffff. */ - cmd.func_id = 0xffff; - - /* Mode 0 is hashed to 4 SMF engines (excluding PPF) by func ID. */ - if (cqm_handle->func_capability.lb_mode == CQM_LB_MODE_NORMAL || - (cqm_handle->func_capability.lb_mode == CQM_LB_MODE_0 && - cqm_handle->func_attribute.func_type != CQM_PPF)) { - cmd.smf_id = cqm_funcid2smfid(cqm_handle); - ret = cqm_cla_update_cmd(cqm_handle, buf_in, &cmd); - } - /* Modes 1/2 are allocated to four SMF engines by flow. - * Therefore, one function needs to be allocated to four SMF engines. - */ - /* Mode 0 PPF needs to be configured on 4 engines, - * and the timer resources need to be shared by the 4 engines. - */ - else if (cqm_handle->func_capability.lb_mode == CQM_LB_MODE_1 || - cqm_handle->func_capability.lb_mode == CQM_LB_MODE_2 || - (cqm_handle->func_capability.lb_mode == CQM_LB_MODE_0 && - cqm_handle->func_attribute.func_type == CQM_PPF)) { - for (i = 0; i < CQM_LB_SMF_MAX; i++) { - /* The smf_pg variable stores currently enabled SMF. */ - if (cqm_handle->func_capability.smf_pg & (1U << i)) { - cmd.smf_id = i; - ret = cqm_cla_update_cmd(cqm_handle, buf_in, - &cmd); - if (ret != CQM_SUCCESS) - goto out; - } - } - } else { - cqm_err(handle->dev_hdl, "Cla update: unsupport lb mode=%u\n", - cqm_handle->func_capability.lb_mode); - ret = CQM_FAIL; - } - -out: - cqm3_cmd_free((void *)(cqm_handle->ex_handle), buf_in); - return ret; -} - -s32 cqm_cla_alloc(struct cqm_handle *cqm_handle, struct cqm_cla_table *cla_table, - struct cqm_buf_list *buf_node_parent, - struct cqm_buf_list *buf_node_child, u32 child_index) -{ - struct sphw_hwdev *handle = cqm_handle->ex_handle; - s32 ret = CQM_FAIL; - - /* Apply for trunk page */ - buf_node_child->va = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, - cla_table->trunk_order); - CQM_PTR_CHECK_RET(buf_node_child->va, CQM_FAIL, CQM_ALLOC_FAIL(va)); - - /* PCI mapping */ - buf_node_child->pa = pci_map_single(cqm_handle->dev, buf_node_child->va, - PAGE_SIZE << cla_table->trunk_order, - PCI_DMA_BIDIRECTIONAL); - if (pci_dma_mapping_error(cqm_handle->dev, buf_node_child->pa)) { - cqm_err(handle->dev_hdl, CQM_MAP_FAIL(buf_node_child->pa)); - goto err1; - } - - /* Notify the chip of trunk_pa so that the chip fills in cla entry */ - ret = cqm_cla_update(cqm_handle, buf_node_parent, buf_node_child, - child_index, CQM_CLA_RECORD_NEW_GPA); - if (ret != CQM_SUCCESS) { - cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_cla_update)); - goto err2; - } - - return CQM_SUCCESS; - -err2: - pci_unmap_single(cqm_handle->dev, buf_node_child->pa, - PAGE_SIZE << cla_table->trunk_order, - PCI_DMA_BIDIRECTIONAL); -err1: - free_pages((ulong)(buf_node_child->va), cla_table->trunk_order); - buf_node_child->va = NULL; - return CQM_FAIL; -} - -void cqm_cla_free(struct cqm_handle *cqm_handle, struct cqm_cla_table *cla_table, - struct cqm_buf_list *buf_node_parent, - struct cqm_buf_list *buf_node_child, u32 child_index, u8 cla_update_mode) -{ - struct sphw_hwdev *handle = cqm_handle->ex_handle; - u32 trunk_size; - - if (cqm_cla_update(cqm_handle, buf_node_parent, buf_node_child, - child_index, cla_update_mode) != CQM_SUCCESS) { - cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_cla_update)); - return; - } - - if (cla_update_mode == CQM_CLA_DEL_GPA_WITH_CACHE_INVALID) { - trunk_size = (u32)(PAGE_SIZE << cla_table->trunk_order); - if (cqm_cla_cache_invalid(cqm_handle, buf_node_child->pa, - trunk_size) != CQM_SUCCESS) { - cqm_err(handle->dev_hdl, - CQM_FUNCTION_FAIL(cqm_cla_cache_invalid)); - return; - } - } - - /* Remove PCI mapping from the trunk page */ - pci_unmap_single(cqm_handle->dev, buf_node_child->pa, - PAGE_SIZE << cla_table->trunk_order, - PCI_DMA_BIDIRECTIONAL); - - /* Rlease trunk page */ - free_pages((ulong)(buf_node_child->va), cla_table->trunk_order); - buf_node_child->va = NULL; -} - -u8 *cqm_cla_get_unlock_lvl0(struct cqm_handle *cqm_handle, - struct cqm_cla_table *cla_table, - u32 index, u32 count, dma_addr_t *pa) -{ - struct cqm_buf *cla_z_buf = &cla_table->cla_z_buf; - u8 *ret_addr = NULL; - u32 offset = 0; - - /* Level 0 CLA pages are statically allocated. */ - offset = index * cla_table->obj_size; - ret_addr = (u8 *)(cla_z_buf->buf_list->va) + offset; - *pa = cla_z_buf->buf_list->pa + offset; - - return ret_addr; -} - -u8 *cqm_cla_get_unlock_lvl1(struct cqm_handle *cqm_handle, - struct cqm_cla_table *cla_table, - u32 index, u32 count, dma_addr_t *pa) -{ - struct cqm_buf *cla_y_buf = &cla_table->cla_y_buf; - struct cqm_buf *cla_z_buf = &cla_table->cla_z_buf; - struct sphw_hwdev *handle = cqm_handle->ex_handle; - struct cqm_buf_list *buf_node_y = NULL; - struct cqm_buf_list *buf_node_z = NULL; - u32 y_index = 0; - u32 z_index = 0; - u8 *ret_addr = NULL; - u32 offset = 0; - - z_index = index & ((1U << (cla_table->z + 1)) - 1); - y_index = index >> (cla_table->z + 1); - - if (y_index >= cla_z_buf->buf_number) { - cqm_err(handle->dev_hdl, - "Cla get: index exceeds buf_number, y_index %u, z_buf_number %u\n", - y_index, cla_z_buf->buf_number); - return NULL; - } - buf_node_z = &cla_z_buf->buf_list[y_index]; - buf_node_y = cla_y_buf->buf_list; - - /* The z buf node does not exist, applying for a page first. */ - if (!buf_node_z->va) { - if (cqm_cla_alloc(cqm_handle, cla_table, buf_node_y, buf_node_z, - y_index) == CQM_FAIL) { - cqm_err(handle->dev_hdl, - CQM_FUNCTION_FAIL(cqm_cla_alloc)); - cqm_err(handle->dev_hdl, - "Cla get: cla_table->type=%u\n", - cla_table->type); - return NULL; - } - } - - buf_node_z->refcount += count; - offset = z_index * cla_table->obj_size; - ret_addr = (u8 *)(buf_node_z->va) + offset; - *pa = buf_node_z->pa + offset; - - return ret_addr; -} - -u8 *cqm_cla_get_unlock_lvl2(struct cqm_handle *cqm_handle, - struct cqm_cla_table *cla_table, - u32 index, u32 count, dma_addr_t *pa) -{ - struct cqm_buf *cla_x_buf = &cla_table->cla_x_buf; - struct cqm_buf *cla_y_buf = &cla_table->cla_y_buf; - struct cqm_buf *cla_z_buf = &cla_table->cla_z_buf; - struct sphw_hwdev *handle = cqm_handle->ex_handle; - struct cqm_buf_list *buf_node_x = NULL; - struct cqm_buf_list *buf_node_y = NULL; - struct cqm_buf_list *buf_node_z = NULL; - u32 x_index = 0; - u32 y_index = 0; - u32 z_index = 0; - u32 trunk_size = (u32)(PAGE_SIZE << cla_table->trunk_order); - u8 *ret_addr = NULL; - u32 offset = 0; - u64 tmp; - - z_index = index & ((1U << (cla_table->z + 1)) - 1); - y_index = (index >> (cla_table->z + 1)) & - ((1U << (cla_table->y - cla_table->z)) - 1); - x_index = index >> (cla_table->y + 1); - tmp = x_index * (trunk_size / sizeof(dma_addr_t)) + y_index; - - if (x_index >= cla_y_buf->buf_number || tmp >= cla_z_buf->buf_number) { - cqm_err(handle->dev_hdl, - "Cla get: index exceeds buf_number, x_index %u, y_index %u, y_buf_number %u, z_buf_number %u\n", - x_index, y_index, cla_y_buf->buf_number, - cla_z_buf->buf_number); - return NULL; - } - - buf_node_x = cla_x_buf->buf_list; - buf_node_y = &cla_y_buf->buf_list[x_index]; - buf_node_z = &cla_z_buf->buf_list[tmp]; - - /* The y buf node does not exist, applying for pages for y node. */ - if (!buf_node_y->va) { - if (cqm_cla_alloc(cqm_handle, cla_table, buf_node_x, buf_node_y, - x_index) == CQM_FAIL) { - cqm_err(handle->dev_hdl, - CQM_FUNCTION_FAIL(cqm_cla_alloc)); - return NULL; - } - } - - /* The z buf node does not exist, applying for pages for z node. */ - if (!buf_node_z->va) { - if (cqm_cla_alloc(cqm_handle, cla_table, buf_node_y, buf_node_z, - y_index) == CQM_FAIL) { - cqm_err(handle->dev_hdl, - CQM_FUNCTION_FAIL(cqm_cla_alloc)); - if (buf_node_y->refcount == 0) - /* To release node Y, cache_invalid is - * required. - */ - cqm_cla_free(cqm_handle, cla_table, buf_node_x, buf_node_y, x_index, - CQM_CLA_DEL_GPA_WITH_CACHE_INVALID); - return NULL; - } - - /* reference counting of the y buffer node needs to increase - * by 1. - */ - buf_node_y->refcount++; - } - - buf_node_z->refcount += count; - offset = z_index * cla_table->obj_size; - ret_addr = (u8 *)(buf_node_z->va) + offset; - *pa = buf_node_z->pa + offset; - - return ret_addr; -} - -u8 *cqm_cla_get_unlock(struct cqm_handle *cqm_handle, struct cqm_cla_table *cla_table, - u32 index, u32 count, dma_addr_t *pa) -{ - u8 *ret_addr = NULL; - - if (cla_table->cla_lvl == CQM_CLA_LVL_0) - ret_addr = cqm_cla_get_unlock_lvl0(cqm_handle, cla_table, index, - count, pa); - else if (cla_table->cla_lvl == CQM_CLA_LVL_1) - ret_addr = cqm_cla_get_unlock_lvl1(cqm_handle, cla_table, index, - count, pa); - else - ret_addr = cqm_cla_get_unlock_lvl2(cqm_handle, cla_table, index, - count, pa); - - return ret_addr; -} - -u8 *cqm_cla_get_lock(struct cqm_handle *cqm_handle, struct cqm_cla_table *cla_table, - u32 index, u32 count, dma_addr_t *pa) -{ - u8 *ret_addr = NULL; - - mutex_lock(&cla_table->lock); - - ret_addr = cqm_cla_get_unlock(cqm_handle, cla_table, index, count, pa); - - mutex_unlock(&cla_table->lock); - - return ret_addr; -} - -void cqm_cla_put(struct cqm_handle *cqm_handle, struct cqm_cla_table *cla_table, - u32 index, u32 count) -{ - struct cqm_buf *cla_z_buf = &cla_table->cla_z_buf; - struct cqm_buf *cla_y_buf = &cla_table->cla_y_buf; - struct cqm_buf *cla_x_buf = &cla_table->cla_x_buf; - struct sphw_hwdev *handle = cqm_handle->ex_handle; - struct cqm_buf_list *buf_node_z = NULL; - struct cqm_buf_list *buf_node_y = NULL; - struct cqm_buf_list *buf_node_x = NULL; - u32 x_index = 0; - u32 y_index = 0; - u32 trunk_size = (u32)(PAGE_SIZE << cla_table->trunk_order); - u64 tmp; - - /* The buffer is applied statically, and the reference counting - * does not need to be controlled. - */ - if (cla_table->alloc_static) - return; - - mutex_lock(&cla_table->lock); - - if (cla_table->cla_lvl == CQM_CLA_LVL_1) { - y_index = index >> (cla_table->z + 1); - - if (y_index >= cla_z_buf->buf_number) { - cqm_err(handle->dev_hdl, - "Cla put: index exceeds buf_number, y_index %u, z_buf_number %u\n", - y_index, cla_z_buf->buf_number); - cqm_err(handle->dev_hdl, - "Cla put: cla_table->type=%u\n", - cla_table->type); - mutex_unlock(&cla_table->lock); - return; - } - - buf_node_z = &cla_z_buf->buf_list[y_index]; - buf_node_y = cla_y_buf->buf_list; - - /* When the value of reference counting on the z node page is 0, - * the z node page is released. - */ - buf_node_z->refcount -= count; - if (buf_node_z->refcount == 0) - /* The cache invalid is not required for the Z node. */ - cqm_cla_free(cqm_handle, cla_table, buf_node_y, - buf_node_z, y_index, - CQM_CLA_DEL_GPA_WITHOUT_CACHE_INVALID); - } else if (cla_table->cla_lvl == CQM_CLA_LVL_2) { - y_index = (index >> (cla_table->z + 1)) & - ((1U << (cla_table->y - cla_table->z)) - 1); - x_index = index >> (cla_table->y + 1); - tmp = x_index * (trunk_size / sizeof(dma_addr_t)) + y_index; - - if (x_index >= cla_y_buf->buf_number || tmp >= cla_z_buf->buf_number) { - cqm_err(handle->dev_hdl, - "Cla put: index exceeds buf_number, x_index %u, y_index %u, y_buf_number %u, z_buf_number %u\n", - x_index, y_index, cla_y_buf->buf_number, - cla_z_buf->buf_number); - mutex_unlock(&cla_table->lock); - return; - } - - buf_node_x = cla_x_buf->buf_list; - buf_node_y = &cla_y_buf->buf_list[x_index]; - buf_node_z = &cla_z_buf->buf_list[tmp]; - - /* When the value of reference counting on the z node page is 0, - * the z node page is released. - */ - buf_node_z->refcount -= count; - if (buf_node_z->refcount == 0) { - cqm_cla_free(cqm_handle, cla_table, buf_node_y, - buf_node_z, y_index, - CQM_CLA_DEL_GPA_WITHOUT_CACHE_INVALID); - - /* When the value of reference counting on the y node - * page is 0, the y node page is released. - */ - buf_node_y->refcount--; - if (buf_node_y->refcount == 0) - /* Node y requires cache to be invalid. */ - cqm_cla_free(cqm_handle, cla_table, buf_node_x, buf_node_y, x_index, - CQM_CLA_DEL_GPA_WITH_CACHE_INVALID); - } - } - - mutex_unlock(&cla_table->lock); -} - -struct cqm_cla_table *cqm_cla_table_get(struct cqm_bat_table *bat_table, u32 entry_type) -{ - struct cqm_cla_table *cla_table = NULL; - u32 i = 0; - - for (i = 0; i < CQM_BAT_ENTRY_MAX; i++) { - cla_table = &bat_table->entry[i]; - if (entry_type == cla_table->type) - return cla_table; - } - - return NULL; -} diff --git a/drivers/scsi/spfc/hw/spfc_cqm_bat_cla.h b/drivers/scsi/spfc/hw/spfc_cqm_bat_cla.h deleted file mode 100644 index 85b060e7935cbbcf6d856072ad43487c1d95d304..0000000000000000000000000000000000000000 --- a/drivers/scsi/spfc/hw/spfc_cqm_bat_cla.h +++ /dev/null @@ -1,215 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPFC_CQM_BAT_CLA_H -#define SPFC_CQM_BAT_CLA_H - -/* When the connection check is enabled, the maximum number of connections - * supported by the chip is 1M - 63, which cannot reach 1M - */ -#define CQM_BAT_MAX_CONN_NUM (0x100000 - 63) -#define CQM_BAT_MAX_CACHE_CONN_NUM (0x100000 - 63) - -#define CLA_TABLE_PAGE_ORDER 0 -#define CQM_4K_PAGE_ORDER 0 -#define CQM_4K_PAGE_SIZE 4096 - -#define CQM_BAT_ENTRY_MAX 16 -#define CQM_BAT_ENTRY_SIZE 16 - -#define CQM_BAT_SIZE_FT_PF 192 -#define CQM_BAT_SIZE_FT_VF 112 - -#define CQM_BAT_INDEX0 0 -#define CQM_BAT_INDEX1 1 -#define CQM_BAT_INDEX2 2 -#define CQM_BAT_INDEX3 3 -#define CQM_BAT_INDEX4 4 -#define CQM_BAT_INDEX5 5 -#define CQM_BAT_INDEX6 6 -#define CQM_BAT_INDEX7 7 -#define CQM_BAT_INDEX8 8 -#define CQM_BAT_INDEX9 9 -#define CQM_BAT_INDEX10 10 -#define CQM_BAT_INDEX11 11 -#define CQM_BAT_INDEX12 12 -#define CQM_BAT_INDEX13 13 -#define CQM_BAT_INDEX14 14 -#define CQM_BAT_INDEX15 15 - -enum cqm_bat_entry_type { - CQM_BAT_ENTRY_T_CFG = 0, - CQM_BAT_ENTRY_T_HASH = 1, - CQM_BAT_ENTRY_T_QPC = 2, - CQM_BAT_ENTRY_T_SCQC = 3, - CQM_BAT_ENTRY_T_SRQC = 4, - CQM_BAT_ENTRY_T_MPT = 5, - CQM_BAT_ENTRY_T_GID = 6, - CQM_BAT_ENTRY_T_LUN = 7, - CQM_BAT_ENTRY_T_TASKMAP = 8, - CQM_BAT_ENTRY_T_L3I = 9, - CQM_BAT_ENTRY_T_CHILDC = 10, - CQM_BAT_ENTRY_T_TIMER = 11, - CQM_BAT_ENTRY_T_XID2CID = 12, - CQM_BAT_ENTRY_T_REORDER = 13, - CQM_BAT_ENTRY_T_INVALID = 14, - CQM_BAT_ENTRY_T_MAX = 15, -}; - -/* CLA update mode */ -#define CQM_CLA_RECORD_NEW_GPA 0 -#define CQM_CLA_DEL_GPA_WITHOUT_CACHE_INVALID 1 -#define CQM_CLA_DEL_GPA_WITH_CACHE_INVALID 2 - -#define CQM_CLA_LVL_0 0 -#define CQM_CLA_LVL_1 1 -#define CQM_CLA_LVL_2 2 - -#define CQM_MAX_INDEX_BIT 19 - -#define CQM_CHIP_CACHELINE 256 -#define CQM_CHIP_TIMER_CACHELINE 512 -#define CQM_OBJECT_256 256 -#define CQM_OBJECT_512 512 -#define CQM_OBJECT_1024 1024 -#define CQM_CHIP_GPA_MASK 0x1ffffffffffffff -#define CQM_CHIP_GPA_HIMASK 0x1ffffff -#define CQM_CHIP_GPA_LOMASK 0xffffffff -#define CQM_CHIP_GPA_HSHIFT 32 - -/* Aligns with 64 buckets and shifts rightward by 6 bits */ -#define CQM_HASH_NUMBER_UNIT 6 - -struct cqm_cla_table { - u32 type; - u32 max_buffer_size; - u32 obj_num; - bool alloc_static; /* Whether the buffer is statically allocated */ - u32 cla_lvl; - u32 cacheline_x; /* x value calculated based on cacheline, used by the chip */ - u32 cacheline_y; /* y value calculated based on cacheline, used by the chip */ - u32 cacheline_z; /* z value calculated based on cacheline, used by the chip */ - u32 x; /* x value calculated based on obj_size, used by software */ - u32 y; /* y value calculated based on obj_size, used by software */ - u32 z; /* z value calculated based on obj_size, used by software */ - struct cqm_buf cla_x_buf; - struct cqm_buf cla_y_buf; - struct cqm_buf cla_z_buf; - u32 trunk_order; /* A continuous physical page contains 2^order pages */ - u32 obj_size; - struct mutex lock; /* Lock for cla buffer allocation and free */ - - struct cqm_bitmap bitmap; - - struct cqm_object_table obj_table; /* Mapping table between indexes and objects */ -}; - -struct cqm_bat_entry_cfg { - u32 cur_conn_num_h_4 : 4; - u32 rsv1 : 4; - u32 max_conn_num : 20; - u32 rsv2 : 4; - - u32 max_conn_cache : 10; - u32 rsv3 : 6; - u32 cur_conn_num_l_16 : 16; - - u32 bloom_filter_addr : 16; - u32 cur_conn_cache : 10; - u32 rsv4 : 6; - - u32 bucket_num : 16; - u32 bloom_filter_len : 16; -}; - -#define CQM_BAT_NO_BYPASS_CACHE 0 -#define CQM_BAT_BYPASS_CACHE 1 - -#define CQM_BAT_ENTRY_SIZE_256 0 -#define CQM_BAT_ENTRY_SIZE_512 1 -#define CQM_BAT_ENTRY_SIZE_1024 2 - -struct cqm_bat_entry_standerd { - u32 entry_size : 2; - u32 rsv1 : 6; - u32 max_number : 20; - u32 rsv2 : 4; - - u32 cla_gpa_h : 32; - - u32 cla_gpa_l : 32; - - u32 rsv3 : 8; - u32 z : 5; - u32 y : 5; - u32 x : 5; - u32 rsv24 : 1; - u32 bypass : 1; - u32 cla_level : 2; - u32 rsv5 : 5; -}; - -struct cqm_bat_entry_vf2pf { - u32 cla_gpa_h : 25; - u32 pf_id : 5; - u32 fake_vf_en : 1; - u32 acs_spu_en : 1; -}; - -#define CQM_BAT_ENTRY_TASKMAP_NUM 4 -struct cqm_bat_entry_taskmap_addr { - u32 gpa_h; - u32 gpa_l; -}; - -struct cqm_bat_entry_taskmap { - struct cqm_bat_entry_taskmap_addr addr[CQM_BAT_ENTRY_TASKMAP_NUM]; -}; - -struct cqm_bat_table { - u32 bat_entry_type[CQM_BAT_ENTRY_MAX]; - u8 bat[CQM_BAT_ENTRY_MAX * CQM_BAT_ENTRY_SIZE]; - struct cqm_cla_table entry[CQM_BAT_ENTRY_MAX]; - /* In LB mode 1, the timer needs to be configured in 4 SMFs, - * and the GPAs must be different and independent. - */ - struct cqm_cla_table timer_entry[4]; - u32 bat_size; -}; - -#define CQM_BAT_MAX_SIZE 256 -struct cqm_cmdq_bat_update { - u32 offset; - u32 byte_len; - u8 data[CQM_BAT_MAX_SIZE]; - u32 smf_id; - u32 func_id; -}; - -struct cqm_cla_update_cmd { - /* Gpa address to be updated */ - u32 gpa_h; - u32 gpa_l; - - /* Updated Value */ - u32 value_h; - u32 value_l; - - u32 smf_id; - u32 func_id; -}; - -s32 cqm_bat_init(struct cqm_handle *cqm_handle); -void cqm_bat_uninit(struct cqm_handle *cqm_handle); -s32 cqm_cla_init(struct cqm_handle *cqm_handle); -void cqm_cla_uninit(struct cqm_handle *cqm_handle, u32 entry_numb); -u8 *cqm_cla_get_unlock(struct cqm_handle *cqm_handle, struct cqm_cla_table *cla_table, - u32 index, u32 count, dma_addr_t *pa); -u8 *cqm_cla_get_lock(struct cqm_handle *cqm_handle, struct cqm_cla_table *cla_table, - u32 index, u32 count, dma_addr_t *pa); -void cqm_cla_put(struct cqm_handle *cqm_handle, struct cqm_cla_table *cla_table, - u32 index, u32 count); -struct cqm_cla_table *cqm_cla_table_get(struct cqm_bat_table *bat_table, u32 entry_type); -u32 cqm_funcid2smfid(struct cqm_handle *cqm_handle); - -#endif /* SPFC_CQM_BAT_CLA_H */ diff --git a/drivers/scsi/spfc/hw/spfc_cqm_bitmap_table.c b/drivers/scsi/spfc/hw/spfc_cqm_bitmap_table.c deleted file mode 100644 index 21100e8db8f46f288be55fd0bd535be9e5c8b293..0000000000000000000000000000000000000000 --- a/drivers/scsi/spfc/hw/spfc_cqm_bitmap_table.c +++ /dev/null @@ -1,885 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#include -#include -#include -#include -#include -#include -#include -#include - -#include "sphw_crm.h" -#include "sphw_hw.h" -#include "sphw_hwdev.h" -#include "sphw_hwif.h" - -#include "spfc_cqm_object.h" -#include "spfc_cqm_bitmap_table.h" -#include "spfc_cqm_bat_cla.h" -#include "spfc_cqm_main.h" - -#define common_section - -void cqm_swab64(u8 *addr, u32 cnt) -{ - u64 *temp = (u64 *)addr; - u64 value = 0; - u32 i; - - for (i = 0; i < cnt; i++) { - value = __swab64(*temp); - *temp = value; - temp++; - } -} - -void cqm_swab32(u8 *addr, u32 cnt) -{ - u32 *temp = (u32 *)addr; - u32 value = 0; - u32 i; - - for (i = 0; i < cnt; i++) { - value = __swab32(*temp); - *temp = value; - temp++; - } -} - -s32 cqm_shift(u32 data) -{ - s32 shift = -1; - - do { - data >>= 1; - shift++; - } while (data); - - return shift; -} - -bool cqm_check_align(u32 data) -{ - if (data == 0) - return false; - - do { - /* When the value can be exactly divided by 2, - * the value of data is shifted right by one bit, that is, - * divided by 2. - */ - if ((data & 0x1) == 0) - data >>= 1; - /* If the value cannot be divisible by 2, the value is - * not 2^n-aligned and false is returned. - */ - else - return false; - } while (data != 1); - - return true; -} - -void *cqm_kmalloc_align(size_t size, gfp_t flags, u16 align_order) -{ - void *orig_addr = NULL; - void *align_addr = NULL; - void *index_addr = NULL; - - orig_addr = kmalloc(size + ((u64)1 << align_order) + sizeof(void *), - flags); - if (!orig_addr) - return NULL; - - index_addr = (void *)((char *)orig_addr + sizeof(void *)); - align_addr = - (void *)((((u64)index_addr + ((u64)1 << align_order) - 1) >> - align_order) << align_order); - - /* Record the original memory address for memory release. */ - index_addr = (void *)((char *)align_addr - sizeof(void *)); - *(void **)index_addr = orig_addr; - - return align_addr; -} - -void cqm_kfree_align(void *addr) -{ - void *index_addr = NULL; - - /* Release the original memory address. */ - index_addr = (void *)((char *)addr - sizeof(void *)); - - kfree(*(void **)index_addr); -} - -void cqm_write_lock(rwlock_t *lock, bool bh) -{ - if (bh) - write_lock_bh(lock); - else - write_lock(lock); -} - -void cqm_write_unlock(rwlock_t *lock, bool bh) -{ - if (bh) - write_unlock_bh(lock); - else - write_unlock(lock); -} - -void cqm_read_lock(rwlock_t *lock, bool bh) -{ - if (bh) - read_lock_bh(lock); - else - read_lock(lock); -} - -void cqm_read_unlock(rwlock_t *lock, bool bh) -{ - if (bh) - read_unlock_bh(lock); - else - read_unlock(lock); -} - -s32 cqm_buf_alloc_direct(struct cqm_handle *cqm_handle, struct cqm_buf *buf, bool direct) -{ - struct sphw_hwdev *handle = cqm_handle->ex_handle; - struct page **pages = NULL; - u32 i, j, order; - - order = get_order(buf->buf_size); - - if (!direct) { - buf->direct.va = NULL; - return CQM_SUCCESS; - } - - pages = vmalloc(sizeof(struct page *) * buf->page_number); - if (!pages) { - cqm_err(handle->dev_hdl, CQM_ALLOC_FAIL(pages)); - return CQM_FAIL; - } - - for (i = 0; i < buf->buf_number; i++) { - for (j = 0; j < ((u32)1 << order); j++) - pages[(ulong)(unsigned int)((i << order) + j)] = - (void *)virt_to_page((u8 *)(buf->buf_list[i].va) + - (PAGE_SIZE * j)); - } - - buf->direct.va = vmap(pages, buf->page_number, VM_MAP, PAGE_KERNEL); - vfree(pages); - if (!buf->direct.va) { - cqm_err(handle->dev_hdl, CQM_MAP_FAIL(buf->direct.va)); - return CQM_FAIL; - } - - return CQM_SUCCESS; -} - -s32 cqm_buf_alloc_page(struct cqm_handle *cqm_handle, struct cqm_buf *buf) -{ - struct sphw_hwdev *handle = cqm_handle->ex_handle; - struct page *newpage = NULL; - u32 order; - void *va = NULL; - s32 i, node; - - order = get_order(buf->buf_size); - /* Page for applying for each buffer for non-ovs */ - if (handle->board_info.service_mode != 0) { - for (i = 0; i < (s32)buf->buf_number; i++) { - va = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, - order); - if (!va) { - cqm_err(handle->dev_hdl, - CQM_ALLOC_FAIL(buf_page)); - break; - } - /* Initialize the page after the page is applied for. - * If hash entries are involved, the initialization - * value must be 0. - */ - memset(va, 0, buf->buf_size); - buf->buf_list[i].va = va; - } - } else { - node = dev_to_node(handle->dev_hdl); - for (i = 0; i < (s32)buf->buf_number; i++) { - newpage = alloc_pages_node(node, - GFP_KERNEL | __GFP_ZERO, - order); - if (!newpage) { - cqm_err(handle->dev_hdl, - CQM_ALLOC_FAIL(buf_page)); - break; - } - va = (void *)page_address(newpage); - /* Initialize the page after the page is applied for. - * If hash entries are involved, the initialization - * value must be 0. - */ - memset(va, 0, buf->buf_size); - buf->buf_list[i].va = va; - } - } - - if (i != buf->buf_number) { - i--; - for (; i >= 0; i--) { - free_pages((ulong)(buf->buf_list[i].va), order); - buf->buf_list[i].va = NULL; - } - return CQM_FAIL; - } - - return CQM_SUCCESS; -} - -s32 cqm_buf_alloc_map(struct cqm_handle *cqm_handle, struct cqm_buf *buf) -{ - struct sphw_hwdev *handle = cqm_handle->ex_handle; - struct pci_dev *dev = cqm_handle->dev; - void *va = NULL; - s32 i; - - for (i = 0; i < (s32)buf->buf_number; i++) { - va = buf->buf_list[i].va; - buf->buf_list[i].pa = pci_map_single(dev, va, buf->buf_size, - PCI_DMA_BIDIRECTIONAL); - if (pci_dma_mapping_error(dev, buf->buf_list[i].pa)) { - cqm_err(handle->dev_hdl, CQM_MAP_FAIL(buf_list)); - break; - } - } - - if (i != buf->buf_number) { - i--; - for (; i >= 0; i--) - pci_unmap_single(dev, buf->buf_list[i].pa, - buf->buf_size, PCI_DMA_BIDIRECTIONAL); - return CQM_FAIL; - } - - return CQM_SUCCESS; -} - -s32 cqm_buf_alloc(struct cqm_handle *cqm_handle, struct cqm_buf *buf, bool direct) -{ - struct sphw_hwdev *handle = cqm_handle->ex_handle; - struct pci_dev *dev = cqm_handle->dev; - u32 order; - s32 i; - - order = get_order(buf->buf_size); - - /* Applying for the buffer list descriptor space */ - buf->buf_list = vmalloc(buf->buf_number * sizeof(struct cqm_buf_list)); - CQM_PTR_CHECK_RET(buf->buf_list, CQM_FAIL, - CQM_ALLOC_FAIL(linux_buf_list)); - memset(buf->buf_list, 0, buf->buf_number * sizeof(struct cqm_buf_list)); - - /* Page for applying for each buffer */ - if (cqm_buf_alloc_page(cqm_handle, buf) == CQM_FAIL) { - cqm_err(handle->dev_hdl, - CQM_FUNCTION_FAIL(linux_cqm_buf_alloc_page)); - goto err1; - } - - /* PCI mapping of the buffer */ - if (cqm_buf_alloc_map(cqm_handle, buf) == CQM_FAIL) { - cqm_err(handle->dev_hdl, - CQM_FUNCTION_FAIL(linux_cqm_buf_alloc_map)); - goto err2; - } - - /* direct remapping */ - if (cqm_buf_alloc_direct(cqm_handle, buf, direct) == CQM_FAIL) { - cqm_err(handle->dev_hdl, - CQM_FUNCTION_FAIL(cqm_buf_alloc_direct)); - goto err3; - } - - return CQM_SUCCESS; - -err3: - for (i = 0; i < (s32)buf->buf_number; i++) - pci_unmap_single(dev, buf->buf_list[i].pa, buf->buf_size, - PCI_DMA_BIDIRECTIONAL); -err2: - for (i = 0; i < (s32)buf->buf_number; i++) { - free_pages((ulong)(buf->buf_list[i].va), order); - buf->buf_list[i].va = NULL; - } -err1: - vfree(buf->buf_list); - buf->buf_list = NULL; - return CQM_FAIL; -} - -void cqm_buf_free(struct cqm_buf *buf, struct pci_dev *dev) -{ - u32 order; - s32 i; - - order = get_order(buf->buf_size); - - if (buf->direct.va) { - vunmap(buf->direct.va); - buf->direct.va = NULL; - } - - if (buf->buf_list) { - for (i = 0; i < (s32)(buf->buf_number); i++) { - if (buf->buf_list[i].va) { - pci_unmap_single(dev, buf->buf_list[i].pa, - buf->buf_size, - PCI_DMA_BIDIRECTIONAL); - - free_pages((ulong)(buf->buf_list[i].va), order); - buf->buf_list[i].va = NULL; - } - } - - vfree(buf->buf_list); - buf->buf_list = NULL; - } -} - -s32 cqm_cla_cache_invalid_cmd(struct cqm_handle *cqm_handle, struct cqm_cmd_buf *buf_in, - struct cqm_cla_cache_invalid_cmd *cmd) -{ - struct sphw_hwdev *handle = cqm_handle->ex_handle; - struct cqm_cla_cache_invalid_cmd *cla_cache_invalid_cmd = NULL; - s32 ret; - - cla_cache_invalid_cmd = (struct cqm_cla_cache_invalid_cmd *)(buf_in->buf); - cla_cache_invalid_cmd->gpa_h = cmd->gpa_h; - cla_cache_invalid_cmd->gpa_l = cmd->gpa_l; - cla_cache_invalid_cmd->cache_size = cmd->cache_size; - cla_cache_invalid_cmd->smf_id = cmd->smf_id; - cla_cache_invalid_cmd->func_id = cmd->func_id; - - cqm_swab32((u8 *)cla_cache_invalid_cmd, - /* shift 2 bits by right to get length of dw(4B) */ - (sizeof(struct cqm_cla_cache_invalid_cmd) >> 2)); - - /* Send the cmdq command. */ - ret = cqm3_send_cmd_box((void *)(cqm_handle->ex_handle), CQM_MOD_CQM, - CQM_CMD_T_CLA_CACHE_INVALID, buf_in, NULL, NULL, - CQM_CMD_TIMEOUT, SPHW_CHANNEL_DEFAULT); - if (ret != CQM_SUCCESS) { - cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm3_send_cmd_box)); - cqm_err(handle->dev_hdl, - "Cla cache invalid: cqm3_send_cmd_box_ret=%d\n", - ret); - cqm_err(handle->dev_hdl, - "Cla cache invalid: cla_cache_invalid_cmd: 0x%x 0x%x 0x%x\n", - cmd->gpa_h, cmd->gpa_l, cmd->cache_size); - return CQM_FAIL; - } - - return CQM_SUCCESS; -} - -s32 cqm_cla_cache_invalid(struct cqm_handle *cqm_handle, dma_addr_t gpa, u32 cache_size) -{ - struct sphw_hwdev *handle = cqm_handle->ex_handle; - struct cqm_cmd_buf *buf_in = NULL; - struct cqm_cla_cache_invalid_cmd cmd; - s32 ret = CQM_FAIL; - u32 i; - - buf_in = cqm3_cmd_alloc((void *)(cqm_handle->ex_handle)); - CQM_PTR_CHECK_RET(buf_in, CQM_FAIL, CQM_ALLOC_FAIL(buf_in)); - buf_in->size = sizeof(struct cqm_cla_cache_invalid_cmd); - - /* Fill command and convert it to big endian */ - cmd.cache_size = cache_size; - cmd.gpa_h = CQM_ADDR_HI(gpa); - cmd.gpa_l = CQM_ADDR_LW(gpa); - - /* In non-fake mode, set func_id to 0xffff. */ - cmd.func_id = 0xffff; - - /* Mode 0 is hashed to 4 SMF engines (excluding PPF) by func ID. */ - if (cqm_handle->func_capability.lb_mode == CQM_LB_MODE_NORMAL || - (cqm_handle->func_capability.lb_mode == CQM_LB_MODE_0 && - cqm_handle->func_attribute.func_type != CQM_PPF)) { - cmd.smf_id = cqm_funcid2smfid(cqm_handle); - ret = cqm_cla_cache_invalid_cmd(cqm_handle, buf_in, &cmd); - } - /* Mode 1/2 are allocated to 4 SMF engines by flow. Therefore, - * one function needs to be allocated to 4 SMF engines. - */ - /* The PPF in mode 0 needs to be configured on 4 engines, - * and the timer resources need to be shared by the 4 engines. - */ - else if (cqm_handle->func_capability.lb_mode == CQM_LB_MODE_1 || - cqm_handle->func_capability.lb_mode == CQM_LB_MODE_2 || - (cqm_handle->func_capability.lb_mode == CQM_LB_MODE_0 && - cqm_handle->func_attribute.func_type == CQM_PPF)) { - for (i = 0; i < CQM_LB_SMF_MAX; i++) { - /* The smf_pg stored currently enabled SMF engine. */ - if (cqm_handle->func_capability.smf_pg & (1U << i)) { - cmd.smf_id = i; - ret = cqm_cla_cache_invalid_cmd(cqm_handle, - buf_in, &cmd); - if (ret != CQM_SUCCESS) - goto out; - } - } - } else { - cqm_err(handle->dev_hdl, "Cla cache invalid: unsupport lb mode=%u\n", - cqm_handle->func_capability.lb_mode); - ret = CQM_FAIL; - } - -out: - cqm3_cmd_free((void *)(cqm_handle->ex_handle), buf_in); - return ret; -} - -static void free_cache_inv(struct cqm_handle *cqm_handle, struct cqm_buf *buf, - s32 *inv_flag) -{ - struct sphw_hwdev *handle = cqm_handle->ex_handle; - u32 order; - s32 i; - - order = get_order(buf->buf_size); - - if (!handle->chip_present_flag) - return; - - if (!buf->buf_list) - return; - - for (i = 0; i < (s32)(buf->buf_number); i++) { - if (!buf->buf_list[i].va) - continue; - - if (*inv_flag != CQM_SUCCESS) - continue; - - /* In the Pangea environment, if the cmdq times out, - * no subsequent message is sent. - */ - *inv_flag = cqm_cla_cache_invalid(cqm_handle, buf->buf_list[i].pa, - (u32)(PAGE_SIZE << order)); - if (*inv_flag != CQM_SUCCESS) - cqm_err(handle->dev_hdl, - "Buffer free: fail to invalid buf_list pa cache, inv_flag=%d\n", - *inv_flag); - } -} - -void cqm_buf_free_cache_inv(struct cqm_handle *cqm_handle, struct cqm_buf *buf, - s32 *inv_flag) -{ - /* Send a command to the chip to kick out the cache. */ - free_cache_inv(cqm_handle, buf, inv_flag); - - /* Clear host resources */ - cqm_buf_free(buf, cqm_handle->dev); -} - -#define bitmap_section - -s32 cqm_single_bitmap_init(struct cqm_bitmap *bitmap) -{ - u32 bit_number; - - spin_lock_init(&bitmap->lock); - - /* Max_num of the bitmap is 8-aligned and then - * shifted rightward by 3 bits to obtain the number of bytes required. - */ - bit_number = (ALIGN(bitmap->max_num, CQM_NUM_BIT_BYTE) >> CQM_BYTE_BIT_SHIFT); - bitmap->table = vmalloc(bit_number); - CQM_PTR_CHECK_RET(bitmap->table, CQM_FAIL, CQM_ALLOC_FAIL(bitmap->table)); - memset(bitmap->table, 0, bit_number); - - return CQM_SUCCESS; -} - -s32 cqm_bitmap_init(struct cqm_handle *cqm_handle) -{ - struct cqm_func_capability *capability = &cqm_handle->func_capability; - struct cqm_bat_table *bat_table = &cqm_handle->bat_table; - struct sphw_hwdev *handle = cqm_handle->ex_handle; - struct cqm_cla_table *cla_table = NULL; - struct cqm_bitmap *bitmap = NULL; - s32 ret = CQM_SUCCESS; - u32 i; - - for (i = 0; i < CQM_BAT_ENTRY_MAX; i++) { - cla_table = &bat_table->entry[i]; - if (cla_table->obj_num == 0) { - cqm_info(handle->dev_hdl, - "Cla alloc: cla_type %u, obj_num=0, don't init bitmap\n", - cla_table->type); - continue; - } - - bitmap = &cla_table->bitmap; - - switch (cla_table->type) { - case CQM_BAT_ENTRY_T_QPC: - bitmap->max_num = capability->qpc_number; - bitmap->reserved_top = capability->qpc_reserved; - bitmap->last = capability->qpc_reserved; - cqm_info(handle->dev_hdl, - "Bitmap init: cla_table_type=%u, max_num=0x%x\n", - cla_table->type, bitmap->max_num); - ret = cqm_single_bitmap_init(bitmap); - break; - case CQM_BAT_ENTRY_T_MPT: - bitmap->max_num = capability->mpt_number; - bitmap->reserved_top = capability->mpt_reserved; - bitmap->last = capability->mpt_reserved; - cqm_info(handle->dev_hdl, - "Bitmap init: cla_table_type=%u, max_num=0x%x\n", - cla_table->type, bitmap->max_num); - ret = cqm_single_bitmap_init(bitmap); - break; - case CQM_BAT_ENTRY_T_SCQC: - bitmap->max_num = capability->scqc_number; - bitmap->reserved_top = capability->scq_reserved; - bitmap->last = capability->scq_reserved; - cqm_info(handle->dev_hdl, - "Bitmap init: cla_table_type=%u, max_num=0x%x\n", - cla_table->type, bitmap->max_num); - ret = cqm_single_bitmap_init(bitmap); - break; - case CQM_BAT_ENTRY_T_SRQC: - bitmap->max_num = capability->srqc_number; - bitmap->reserved_top = capability->srq_reserved; - bitmap->last = capability->srq_reserved; - cqm_info(handle->dev_hdl, - "Bitmap init: cla_table_type=%u, max_num=0x%x\n", - cla_table->type, bitmap->max_num); - ret = cqm_single_bitmap_init(bitmap); - break; - default: - break; - } - - if (ret != CQM_SUCCESS) { - cqm_err(handle->dev_hdl, - "Bitmap init: failed to init cla_table_type=%u, obj_num=0x%x\n", - cla_table->type, cla_table->obj_num); - goto err; - } - } - - return CQM_SUCCESS; - -err: - cqm_bitmap_uninit(cqm_handle); - return CQM_FAIL; -} - -void cqm_bitmap_uninit(struct cqm_handle *cqm_handle) -{ - struct cqm_bat_table *bat_table = &cqm_handle->bat_table; - struct cqm_cla_table *cla_table = NULL; - struct cqm_bitmap *bitmap = NULL; - u32 i; - - for (i = 0; i < CQM_BAT_ENTRY_MAX; i++) { - cla_table = &bat_table->entry[i]; - bitmap = &cla_table->bitmap; - if (cla_table->type != CQM_BAT_ENTRY_T_INVALID) { - if (bitmap->table) { - vfree(bitmap->table); - bitmap->table = NULL; - } - } - } -} - -u32 cqm_bitmap_check_range(const ulong *table, u32 step, u32 max_num, u32 begin, - u32 count) -{ - u32 end = (begin + (count - 1)); - u32 i; - - /* Single-bit check is not performed. */ - if (count == 1) - return begin; - - /* The end value exceeds the threshold. */ - if (end >= max_num) - return max_num; - - /* Bit check, the next bit is returned when a non-zero bit is found. */ - for (i = (begin + 1); i <= end; i++) { - if (test_bit((s32)i, table)) - return i + 1; - } - - /* Check whether it's in different steps. */ - if ((begin & (~(step - 1))) != (end & (~(step - 1)))) - return (end & (~(step - 1))); - - /* If the check succeeds, begin is returned. */ - return begin; -} - -void cqm_bitmap_find(struct cqm_bitmap *bitmap, u32 *index, u32 last, u32 step, u32 count) -{ - u32 max_num = bitmap->max_num; - ulong *table = bitmap->table; - - do { - *index = (u32)find_next_zero_bit(table, max_num, last); - if (*index < max_num) - last = cqm_bitmap_check_range(table, step, max_num, - *index, count); - else - break; - } while (last != *index); -} - -u32 cqm_bitmap_alloc(struct cqm_bitmap *bitmap, u32 step, u32 count, bool update_last) -{ - u32 index = 0; - u32 max_num = bitmap->max_num; - u32 last = bitmap->last; - ulong *table = bitmap->table; - u32 i; - - spin_lock(&bitmap->lock); - - /* Search for an idle bit from the last position. */ - cqm_bitmap_find(bitmap, &index, last, step, count); - - /* The preceding search fails. Search for an idle bit - * from the beginning. - */ - if (index >= max_num) { - last = bitmap->reserved_top; - cqm_bitmap_find(bitmap, &index, last, step, count); - } - - /* Set the found bit to 1 and reset last. */ - if (index < max_num) { - for (i = index; i < (index + count); i++) - set_bit(i, table); - - if (update_last) { - bitmap->last = (index + count); - if (bitmap->last >= bitmap->max_num) - bitmap->last = bitmap->reserved_top; - } - } - - spin_unlock(&bitmap->lock); - return index; -} - -u32 cqm_bitmap_alloc_reserved(struct cqm_bitmap *bitmap, u32 count, u32 index) -{ - ulong *table = bitmap->table; - u32 ret_index; - - if (index >= bitmap->reserved_top || index >= bitmap->max_num || count != 1) - return CQM_INDEX_INVALID; - - spin_lock(&bitmap->lock); - - if (test_bit((s32)index, table)) { - ret_index = CQM_INDEX_INVALID; - } else { - set_bit(index, table); - ret_index = index; - } - - spin_unlock(&bitmap->lock); - return ret_index; -} - -void cqm_bitmap_free(struct cqm_bitmap *bitmap, u32 index, u32 count) -{ - u32 i; - - spin_lock(&bitmap->lock); - - for (i = index; i < (index + count); i++) - clear_bit((s32)i, bitmap->table); - - spin_unlock(&bitmap->lock); -} - -#define obj_table_section -s32 cqm_single_object_table_init(struct cqm_object_table *obj_table) -{ - rwlock_init(&obj_table->lock); - - obj_table->table = vmalloc(obj_table->max_num * sizeof(void *)); - CQM_PTR_CHECK_RET(obj_table->table, CQM_FAIL, CQM_ALLOC_FAIL(table)); - memset(obj_table->table, 0, obj_table->max_num * sizeof(void *)); - return CQM_SUCCESS; -} - -s32 cqm_object_table_init(struct cqm_handle *cqm_handle) -{ - struct cqm_func_capability *capability = &cqm_handle->func_capability; - struct cqm_bat_table *bat_table = &cqm_handle->bat_table; - struct sphw_hwdev *handle = cqm_handle->ex_handle; - struct cqm_object_table *obj_table = NULL; - struct cqm_cla_table *cla_table = NULL; - s32 ret = CQM_SUCCESS; - u32 i; - - for (i = 0; i < CQM_BAT_ENTRY_MAX; i++) { - cla_table = &bat_table->entry[i]; - if (cla_table->obj_num == 0) { - cqm_info(handle->dev_hdl, - "Obj table init: cla_table_type %u, obj_num=0, don't init obj table\n", - cla_table->type); - continue; - } - - obj_table = &cla_table->obj_table; - - switch (cla_table->type) { - case CQM_BAT_ENTRY_T_QPC: - obj_table->max_num = capability->qpc_number; - ret = cqm_single_object_table_init(obj_table); - break; - case CQM_BAT_ENTRY_T_MPT: - obj_table->max_num = capability->mpt_number; - ret = cqm_single_object_table_init(obj_table); - break; - case CQM_BAT_ENTRY_T_SCQC: - obj_table->max_num = capability->scqc_number; - ret = cqm_single_object_table_init(obj_table); - break; - case CQM_BAT_ENTRY_T_SRQC: - obj_table->max_num = capability->srqc_number; - ret = cqm_single_object_table_init(obj_table); - break; - default: - break; - } - - if (ret != CQM_SUCCESS) { - cqm_err(handle->dev_hdl, - "Obj table init: failed to init cla_table_type=%u, obj_num=0x%x\n", - cla_table->type, cla_table->obj_num); - goto err; - } - } - - return CQM_SUCCESS; - -err: - cqm_object_table_uninit(cqm_handle); - return CQM_FAIL; -} - -void cqm_object_table_uninit(struct cqm_handle *cqm_handle) -{ - struct cqm_bat_table *bat_table = &cqm_handle->bat_table; - struct cqm_object_table *obj_table = NULL; - struct cqm_cla_table *cla_table = NULL; - u32 i; - - for (i = 0; i < CQM_BAT_ENTRY_MAX; i++) { - cla_table = &bat_table->entry[i]; - obj_table = &cla_table->obj_table; - if (cla_table->type != CQM_BAT_ENTRY_T_INVALID) { - if (obj_table->table) { - vfree(obj_table->table); - obj_table->table = NULL; - } - } - } -} - -s32 cqm_object_table_insert(struct cqm_handle *cqm_handle, - struct cqm_object_table *object_table, - u32 index, struct cqm_object *obj, bool bh) -{ - struct sphw_hwdev *handle = cqm_handle->ex_handle; - - if (index >= object_table->max_num) { - cqm_err(handle->dev_hdl, - "Obj table insert: index 0x%x exceeds max_num 0x%x\n", - index, object_table->max_num); - return CQM_FAIL; - } - - cqm_write_lock(&object_table->lock, bh); - - if (!object_table->table[index]) { - object_table->table[index] = obj; - cqm_write_unlock(&object_table->lock, bh); - return CQM_SUCCESS; - } - - cqm_write_unlock(&object_table->lock, bh); - cqm_err(handle->dev_hdl, - "Obj table insert: object_table->table[0x%x] has been inserted\n", - index); - - return CQM_FAIL; -} - -void cqm_object_table_remove(struct cqm_handle *cqm_handle, - struct cqm_object_table *object_table, - u32 index, const struct cqm_object *obj, bool bh) -{ - struct sphw_hwdev *handle = cqm_handle->ex_handle; - - if (index >= object_table->max_num) { - cqm_err(handle->dev_hdl, - "Obj table remove: index 0x%x exceeds max_num 0x%x\n", - index, object_table->max_num); - return; - } - - cqm_write_lock(&object_table->lock, bh); - - if (object_table->table[index] && object_table->table[index] == obj) - object_table->table[index] = NULL; - else - cqm_err(handle->dev_hdl, - "Obj table remove: object_table->table[0x%x] has been removed\n", - index); - - cqm_write_unlock(&object_table->lock, bh); -} - -struct cqm_object *cqm_object_table_get(struct cqm_handle *cqm_handle, - struct cqm_object_table *object_table, - u32 index, bool bh) -{ - struct sphw_hwdev *handle = cqm_handle->ex_handle; - struct cqm_object *obj = NULL; - - if (index >= object_table->max_num) { - cqm_err(handle->dev_hdl, - "Obj table get: index 0x%x exceeds max_num 0x%x\n", - index, object_table->max_num); - return NULL; - } - - cqm_read_lock(&object_table->lock, bh); - - obj = object_table->table[index]; - if (obj) - atomic_inc(&obj->refcount); - - cqm_read_unlock(&object_table->lock, bh); - - return obj; -} diff --git a/drivers/scsi/spfc/hw/spfc_cqm_bitmap_table.h b/drivers/scsi/spfc/hw/spfc_cqm_bitmap_table.h deleted file mode 100644 index 5ae554eac54ae69c98f84b5cc89d74e2f11d93f6..0000000000000000000000000000000000000000 --- a/drivers/scsi/spfc/hw/spfc_cqm_bitmap_table.h +++ /dev/null @@ -1,65 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPFC_CQM_BITMAP_TABLE_H -#define SPFC_CQM_BITMAP_TABLE_H - -struct cqm_bitmap { - ulong *table; - u32 max_num; - u32 last; - u32 reserved_top; /* reserved index */ - spinlock_t lock; -}; - -struct cqm_object_table { - /* Now is big array. Later will be optimized as a red-black tree. */ - struct cqm_object **table; - u32 max_num; - rwlock_t lock; -}; - -struct cqm_cla_cache_invalid_cmd { - u32 gpa_h; - u32 gpa_l; - - u32 cache_size; /* CLA cache size=4096B */ - - u32 smf_id; - u32 func_id; -}; - -struct cqm_handle; - -s32 cqm_bitmap_init(struct cqm_handle *cqm_handle); -void cqm_bitmap_uninit(struct cqm_handle *cqm_handle); -u32 cqm_bitmap_alloc(struct cqm_bitmap *bitmap, u32 step, u32 count, bool update_last); -u32 cqm_bitmap_alloc_reserved(struct cqm_bitmap *bitmap, u32 count, u32 index); -void cqm_bitmap_free(struct cqm_bitmap *bitmap, u32 index, u32 count); -s32 cqm_object_table_init(struct cqm_handle *cqm_handle); -void cqm_object_table_uninit(struct cqm_handle *cqm_handle); -s32 cqm_object_table_insert(struct cqm_handle *cqm_handle, - struct cqm_object_table *object_table, - u32 index, struct cqm_object *obj, bool bh); -void cqm_object_table_remove(struct cqm_handle *cqm_handle, - struct cqm_object_table *object_table, - u32 index, const struct cqm_object *obj, bool bh); -struct cqm_object *cqm_object_table_get(struct cqm_handle *cqm_handle, - struct cqm_object_table *object_table, - u32 index, bool bh); - -void cqm_swab64(u8 *addr, u32 cnt); -void cqm_swab32(u8 *addr, u32 cnt); -bool cqm_check_align(u32 data); -s32 cqm_shift(u32 data); -s32 cqm_buf_alloc(struct cqm_handle *cqm_handle, struct cqm_buf *buf, bool direct); -s32 cqm_buf_alloc_direct(struct cqm_handle *cqm_handle, struct cqm_buf *buf, bool direct); -void cqm_buf_free(struct cqm_buf *buf, struct pci_dev *dev); -void cqm_buf_free_cache_inv(struct cqm_handle *cqm_handle, struct cqm_buf *buf, - s32 *inv_flag); -s32 cqm_cla_cache_invalid(struct cqm_handle *cqm_handle, dma_addr_t gpa, - u32 cache_size); -void *cqm_kmalloc_align(size_t size, gfp_t flags, u16 align_order); -void cqm_kfree_align(void *addr); - -#endif diff --git a/drivers/scsi/spfc/hw/spfc_cqm_main.c b/drivers/scsi/spfc/hw/spfc_cqm_main.c deleted file mode 100644 index 52cc2c7838e99a75dd5b6426a88cdbe63cebdc0e..0000000000000000000000000000000000000000 --- a/drivers/scsi/spfc/hw/spfc_cqm_main.c +++ /dev/null @@ -1,987 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#include -#include -#include -#include -#include -#include - -#include "sphw_crm.h" -#include "sphw_hw.h" -#include "sphw_hw_cfg.h" -#include "spfc_cqm_main.h" - -s32 cqm3_init(void *ex_handle) -{ - struct sphw_hwdev *handle = (struct sphw_hwdev *)ex_handle; - struct cqm_handle *cqm_handle = NULL; - s32 ret; - - CQM_PTR_CHECK_RET(ex_handle, CQM_FAIL, CQM_PTR_NULL(ex_handle)); - - cqm_handle = kmalloc(sizeof(*cqm_handle), GFP_KERNEL | __GFP_ZERO); - CQM_PTR_CHECK_RET(cqm_handle, CQM_FAIL, CQM_ALLOC_FAIL(cqm_handle)); - - /* Clear the memory to prevent other systems from - * not clearing the memory. - */ - memset(cqm_handle, 0, sizeof(struct cqm_handle)); - - cqm_handle->ex_handle = handle; - cqm_handle->dev = (struct pci_dev *)(handle->pcidev_hdl); - handle->cqm_hdl = (void *)cqm_handle; - - /* Clearing Statistics */ - memset(&handle->hw_stats.cqm_stats, 0, sizeof(struct cqm_stats)); - - /* Reads VF/PF information. */ - cqm_handle->func_attribute = handle->hwif->attr; - cqm_info(handle->dev_hdl, "Func init: function[%u] type %d(0:PF,1:VF,2:PPF)\n", - cqm_handle->func_attribute.func_global_idx, - cqm_handle->func_attribute.func_type); - - /* Read capability from configuration management module */ - ret = cqm_capability_init(ex_handle); - if (ret == CQM_FAIL) { - cqm_err(handle->dev_hdl, - CQM_FUNCTION_FAIL(cqm_capability_init)); - goto err1; - } - - /* Initialize memory entries such as BAT, CLA, and bitmap. */ - if (cqm_mem_init(ex_handle) != CQM_SUCCESS) { - cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_mem_init)); - goto err1; - } - - /* Event callback initialization */ - if (cqm_event_init(ex_handle) != CQM_SUCCESS) { - cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_event_init)); - goto err2; - } - - /* Doorbell initiation */ - if (cqm_db_init(ex_handle) != CQM_SUCCESS) { - cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_db_init)); - goto err3; - } - - /* Initialize the bloom filter. */ - if (cqm_bloomfilter_init(ex_handle) != CQM_SUCCESS) { - cqm_err(handle->dev_hdl, - CQM_FUNCTION_FAIL(cqm_bloomfilter_init)); - goto err4; - } - - /* The timer bitmap is set directly at the beginning of the CQM. - * The ifconfig up/down command is not used to set or clear the bitmap. - */ - if (sphw_func_tmr_bitmap_set(ex_handle, true) != CQM_SUCCESS) { - cqm_err(handle->dev_hdl, "Timer start: enable timer bitmap failed\n"); - goto err5; - } - - return CQM_SUCCESS; - -err5: - cqm_bloomfilter_uninit(ex_handle); -err4: - cqm_db_uninit(ex_handle); -err3: - cqm_event_uninit(ex_handle); -err2: - cqm_mem_uninit(ex_handle); -err1: - handle->cqm_hdl = NULL; - kfree(cqm_handle); - return CQM_FAIL; -} - -void cqm3_uninit(void *ex_handle) -{ - struct sphw_hwdev *handle = (struct sphw_hwdev *)ex_handle; - struct cqm_handle *cqm_handle = NULL; - s32 ret; - - CQM_PTR_CHECK_NO_RET(ex_handle, CQM_PTR_NULL(ex_handle)); - - cqm_handle = (struct cqm_handle *)(handle->cqm_hdl); - CQM_PTR_CHECK_NO_RET(cqm_handle, CQM_PTR_NULL(cqm_handle)); - - /* The timer bitmap is set directly at the beginning of the CQM. - * The ifconfig up/down command is not used to set or clear the bitmap. - */ - cqm_info(handle->dev_hdl, "Timer stop: disable timer\n"); - if (sphw_func_tmr_bitmap_set(ex_handle, false) != CQM_SUCCESS) - cqm_err(handle->dev_hdl, "Timer stop: disable timer bitmap failed\n"); - - /* After the TMR timer stops, the system releases resources - * after a delay of one or two milliseconds. - */ - if (cqm_handle->func_attribute.func_type == CQM_PPF && - cqm_handle->func_capability.timer_enable == CQM_TIMER_ENABLE) { - cqm_info(handle->dev_hdl, "Timer stop: spfc ppf timer stop\n"); - ret = sphw_ppf_tmr_stop(handle); - if (ret != CQM_SUCCESS) - /* The timer fails to be stopped, - * and the resource release is not affected. - */ - cqm_info(handle->dev_hdl, "Timer stop: spfc ppf timer stop, ret=%d\n", - ret); - /* Somebody requires a delay of 1 ms, which is inaccurate. */ - usleep_range(900, 1000); - } - - /* Release Bloom Filter Table */ - cqm_bloomfilter_uninit(ex_handle); - - /* Release hardware doorbell */ - cqm_db_uninit(ex_handle); - - /* Cancel the callback of the event */ - cqm_event_uninit(ex_handle); - - /* Release various memory tables and require the service - * to release all objects. - */ - cqm_mem_uninit(ex_handle); - - /* Release cqm_handle */ - handle->cqm_hdl = NULL; - kfree(cqm_handle); -} - -void cqm_test_mode_init(struct cqm_handle *cqm_handle, - struct service_cap *service_capability) -{ - struct cqm_func_capability *func_cap = &cqm_handle->func_capability; - struct sphw_hwdev *handle = cqm_handle->ex_handle; - - if (service_capability->test_mode == 0) - return; - - cqm_info(handle->dev_hdl, "Enter CQM test mode\n"); - - func_cap->qpc_number = service_capability->test_qpc_num; - func_cap->qpc_reserved = - GET_MAX(func_cap->qpc_reserved, - service_capability->test_qpc_resvd_num); - func_cap->xid_alloc_mode = service_capability->test_xid_alloc_mode; - func_cap->gpa_check_enable = service_capability->test_gpa_check_enable; - func_cap->pagesize_reorder = service_capability->test_page_size_reorder; - func_cap->qpc_alloc_static = - (bool)(service_capability->test_qpc_alloc_mode); - func_cap->scqc_alloc_static = - (bool)(service_capability->test_scqc_alloc_mode); - func_cap->flow_table_based_conn_number = - service_capability->test_max_conn_num; - func_cap->flow_table_based_conn_cache_number = - service_capability->test_max_cache_conn_num; - func_cap->scqc_number = service_capability->test_scqc_num; - func_cap->mpt_number = service_capability->test_mpt_num; - func_cap->mpt_reserved = service_capability->test_mpt_recvd_num; - func_cap->reorder_number = service_capability->test_reorder_num; - /* 256K buckets, 256K*64B = 16MB */ - func_cap->hash_number = service_capability->test_hash_num; -} - -void cqm_service_capability_update(struct cqm_handle *cqm_handle) -{ - struct cqm_func_capability *func_cap = &cqm_handle->func_capability; - - func_cap->qpc_number = GET_MIN(CQM_MAX_QPC_NUM, func_cap->qpc_number); - func_cap->scqc_number = GET_MIN(CQM_MAX_SCQC_NUM, func_cap->scqc_number); - func_cap->srqc_number = GET_MIN(CQM_MAX_SRQC_NUM, func_cap->srqc_number); - func_cap->childc_number = GET_MIN(CQM_MAX_CHILDC_NUM, func_cap->childc_number); -} - -void cqm_service_valid_init(struct cqm_handle *cqm_handle, - struct service_cap *service_capability) -{ - enum cfg_svc_type_en type = service_capability->chip_svc_type; - struct cqm_service *svc = cqm_handle->service; - - svc[CQM_SERVICE_T_FC].valid = ((u32)type & CFG_SVC_FC_BIT5) ? true : false; -} - -void cqm_service_capability_init_fc(struct cqm_handle *cqm_handle, void *pra) -{ - struct cqm_func_capability *func_cap = &cqm_handle->func_capability; - struct service_cap *service_capability = (struct service_cap *)pra; - struct fc_service_cap *fc_cap = &service_capability->fc_cap; - struct dev_fc_svc_cap *dev_fc_cap = &fc_cap->dev_fc_cap; - struct sphw_hwdev *handle = cqm_handle->ex_handle; - - cqm_info(handle->dev_hdl, "Cap init: fc is valid\n"); - cqm_info(handle->dev_hdl, "Cap init: fc qpc 0x%x, scqc 0x%x, srqc 0x%x\n", - dev_fc_cap->max_parent_qpc_num, dev_fc_cap->scq_num, - dev_fc_cap->srq_num); - func_cap->hash_number += dev_fc_cap->max_parent_qpc_num; - func_cap->hash_basic_size = CQM_HASH_BUCKET_SIZE_64; - func_cap->qpc_number += dev_fc_cap->max_parent_qpc_num; - func_cap->qpc_basic_size = GET_MAX(fc_cap->parent_qpc_size, - func_cap->qpc_basic_size); - func_cap->qpc_alloc_static = true; - func_cap->scqc_number += dev_fc_cap->scq_num; - func_cap->scqc_basic_size = GET_MAX(fc_cap->scqc_size, - func_cap->scqc_basic_size); - func_cap->srqc_number += dev_fc_cap->srq_num; - func_cap->srqc_basic_size = GET_MAX(fc_cap->srqc_size, - func_cap->srqc_basic_size); - func_cap->lun_number = CQM_LUN_FC_NUM; - func_cap->lun_basic_size = CQM_LUN_SIZE_8; - func_cap->taskmap_number = CQM_TASKMAP_FC_NUM; - func_cap->taskmap_basic_size = PAGE_SIZE; - func_cap->childc_number += dev_fc_cap->max_child_qpc_num; - func_cap->childc_basic_size = GET_MAX(fc_cap->child_qpc_size, - func_cap->childc_basic_size); - func_cap->pagesize_reorder = CQM_FC_PAGESIZE_ORDER; -} - -void cqm_service_capability_init(struct cqm_handle *cqm_handle, - struct service_cap *service_capability) -{ - struct sphw_hwdev *handle = cqm_handle->ex_handle; - u32 i; - - for (i = 0; i < CQM_SERVICE_T_MAX; i++) { - cqm_handle->service[i].valid = false; - cqm_handle->service[i].has_register = false; - cqm_handle->service[i].buf_order = 0; - } - - cqm_service_valid_init(cqm_handle, service_capability); - - cqm_info(handle->dev_hdl, "Cap init: service type %d\n", - service_capability->chip_svc_type); - - if (cqm_handle->service[CQM_SERVICE_T_FC].valid) - cqm_service_capability_init_fc(cqm_handle, (void *)service_capability); -} - -/* Set func_type in fake_cqm_handle to ppf, pf, or vf. */ -void cqm_set_func_type(struct cqm_handle *cqm_handle) -{ - u32 idx = cqm_handle->func_attribute.func_global_idx; - - if (idx == 0) - cqm_handle->func_attribute.func_type = CQM_PPF; - else if (idx < CQM_MAX_PF_NUM) - cqm_handle->func_attribute.func_type = CQM_PF; - else - cqm_handle->func_attribute.func_type = CQM_VF; -} - -void cqm_lb_fake_mode_init(struct cqm_handle *cqm_handle, struct service_cap *svc_cap) -{ - struct cqm_func_capability *func_cap = &cqm_handle->func_capability; - - func_cap->lb_mode = svc_cap->lb_mode; - - /* Initializing the LB Mode */ - if (func_cap->lb_mode == CQM_LB_MODE_NORMAL) - func_cap->smf_pg = 0; - else - func_cap->smf_pg = svc_cap->smf_pg; - - func_cap->fake_cfg_number = 0; - func_cap->fake_func_type = CQM_FAKE_FUNC_NORMAL; -} - -s32 cqm_capability_init(void *ex_handle) -{ - struct sphw_hwdev *handle = (struct sphw_hwdev *)ex_handle; - struct cqm_handle *cqm_handle = (struct cqm_handle *)(handle->cqm_hdl); - struct service_cap *service_capability = &handle->cfg_mgmt->svc_cap; - struct sphw_func_attr *func_attr = &cqm_handle->func_attribute; - struct cqm_func_capability *func_cap = &cqm_handle->func_capability; - u32 total_function_num = 0; - int err = 0; - - /* Initializes the PPF capabilities: include timer, pf, vf. */ - if (func_attr->func_type == CQM_PPF) { - total_function_num = service_capability->host_total_function; - func_cap->timer_enable = service_capability->timer_en; - func_cap->pf_num = service_capability->pf_num; - func_cap->pf_id_start = service_capability->pf_id_start; - func_cap->vf_num = service_capability->vf_num; - func_cap->vf_id_start = service_capability->vf_id_start; - - cqm_info(handle->dev_hdl, "Cap init: total function num 0x%x\n", - total_function_num); - cqm_info(handle->dev_hdl, "Cap init: pf_num 0x%x, pf_id_start 0x%x, vf_num 0x%x, vf_id_start 0x%x\n", - func_cap->pf_num, func_cap->pf_id_start, - func_cap->vf_num, func_cap->vf_id_start); - cqm_info(handle->dev_hdl, "Cap init: timer_enable %u (1: enable; 0: disable)\n", - func_cap->timer_enable); - } - - func_cap->flow_table_based_conn_number = service_capability->max_connect_num; - func_cap->flow_table_based_conn_cache_number = service_capability->max_stick2cache_num; - cqm_info(handle->dev_hdl, "Cap init: cfg max_conn_num 0x%x, max_cache_conn_num 0x%x\n", - func_cap->flow_table_based_conn_number, - func_cap->flow_table_based_conn_cache_number); - - func_cap->bloomfilter_enable = service_capability->bloomfilter_en; - cqm_info(handle->dev_hdl, "Cap init: bloomfilter_enable %u (1: enable; 0: disable)\n", - func_cap->bloomfilter_enable); - - if (func_cap->bloomfilter_enable) { - func_cap->bloomfilter_length = service_capability->bfilter_len; - func_cap->bloomfilter_addr = - service_capability->bfilter_start_addr; - if (func_cap->bloomfilter_length != 0 && - !cqm_check_align(func_cap->bloomfilter_length)) { - cqm_err(handle->dev_hdl, "Cap init: bloomfilter_length %u is not the power of 2\n", - func_cap->bloomfilter_length); - - err = CQM_FAIL; - goto out; - } - } - - cqm_info(handle->dev_hdl, "Cap init: bloomfilter_length 0x%x, bloomfilter_addr 0x%x\n", - func_cap->bloomfilter_length, func_cap->bloomfilter_addr); - - func_cap->qpc_reserved = 0; - func_cap->mpt_reserved = 0; - func_cap->scq_reserved = 0; - func_cap->srq_reserved = 0; - func_cap->qpc_alloc_static = false; - func_cap->scqc_alloc_static = false; - - func_cap->l3i_number = CQM_L3I_COMM_NUM; - func_cap->l3i_basic_size = CQM_L3I_SIZE_8; - - func_cap->timer_number = CQM_TIMER_ALIGN_SCALE_NUM * total_function_num; - func_cap->timer_basic_size = CQM_TIMER_SIZE_32; - - func_cap->gpa_check_enable = true; - - cqm_lb_fake_mode_init(cqm_handle, service_capability); - cqm_info(handle->dev_hdl, "Cap init: lb_mode=%u\n", func_cap->lb_mode); - cqm_info(handle->dev_hdl, "Cap init: smf_pg=%u\n", func_cap->smf_pg); - cqm_info(handle->dev_hdl, "Cap init: fake_func_type=%u\n", func_cap->fake_func_type); - cqm_info(handle->dev_hdl, "Cap init: fake_cfg_number=%u\n", func_cap->fake_cfg_number); - - cqm_service_capability_init(cqm_handle, service_capability); - - cqm_test_mode_init(cqm_handle, service_capability); - - cqm_service_capability_update(cqm_handle); - - func_cap->ft_enable = service_capability->sf_svc_attr.ft_en; - func_cap->rdma_enable = service_capability->sf_svc_attr.rdma_en; - - cqm_info(handle->dev_hdl, "Cap init: pagesize_reorder %u\n", func_cap->pagesize_reorder); - cqm_info(handle->dev_hdl, "Cap init: xid_alloc_mode %d, gpa_check_enable %d\n", - func_cap->xid_alloc_mode, func_cap->gpa_check_enable); - cqm_info(handle->dev_hdl, "Cap init: qpc_alloc_mode %d, scqc_alloc_mode %d\n", - func_cap->qpc_alloc_static, func_cap->scqc_alloc_static); - cqm_info(handle->dev_hdl, "Cap init: hash_number 0x%x\n", func_cap->hash_number); - cqm_info(handle->dev_hdl, "Cap init: qpc_number 0x%x, qpc_reserved 0x%x, qpc_basic_size 0x%x\n", - func_cap->qpc_number, func_cap->qpc_reserved, func_cap->qpc_basic_size); - cqm_info(handle->dev_hdl, "Cap init: scqc_number 0x%x scqc_reserved 0x%x, scqc_basic_size 0x%x\n", - func_cap->scqc_number, func_cap->scq_reserved, func_cap->scqc_basic_size); - cqm_info(handle->dev_hdl, "Cap init: srqc_number 0x%x, srqc_basic_size 0x%x\n", - func_cap->srqc_number, func_cap->srqc_basic_size); - cqm_info(handle->dev_hdl, "Cap init: mpt_number 0x%x, mpt_reserved 0x%x\n", - func_cap->mpt_number, func_cap->mpt_reserved); - cqm_info(handle->dev_hdl, "Cap init: gid_number 0x%x, lun_number 0x%x\n", - func_cap->gid_number, func_cap->lun_number); - cqm_info(handle->dev_hdl, "Cap init: taskmap_number 0x%x, l3i_number 0x%x\n", - func_cap->taskmap_number, func_cap->l3i_number); - cqm_info(handle->dev_hdl, "Cap init: timer_number 0x%x, childc_number 0x%x\n", - func_cap->timer_number, func_cap->childc_number); - cqm_info(handle->dev_hdl, "Cap init: childc_basic_size 0x%x\n", - func_cap->childc_basic_size); - cqm_info(handle->dev_hdl, "Cap init: xid2cid_number 0x%x, reorder_number 0x%x\n", - func_cap->xid2cid_number, func_cap->reorder_number); - cqm_info(handle->dev_hdl, "Cap init: ft_enable %d, rdma_enable %d\n", - func_cap->ft_enable, func_cap->rdma_enable); - - return CQM_SUCCESS; - -out: - if (func_attr->func_type == CQM_PPF) - func_cap->timer_enable = 0; - - return err; -} - -s32 cqm_mem_init(void *ex_handle) -{ - struct sphw_hwdev *handle = (struct sphw_hwdev *)ex_handle; - struct cqm_handle *cqm_handle = NULL; - - cqm_handle = (struct cqm_handle *)(handle->cqm_hdl); - - if (cqm_bat_init(cqm_handle) != CQM_SUCCESS) { - cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_bat_init)); - return CQM_FAIL; - } - - if (cqm_cla_init(cqm_handle) != CQM_SUCCESS) { - cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_cla_init)); - goto err1; - } - - if (cqm_bitmap_init(cqm_handle) != CQM_SUCCESS) { - cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_bitmap_init)); - goto err2; - } - - if (cqm_object_table_init(cqm_handle) != CQM_SUCCESS) { - cqm_err(handle->dev_hdl, - CQM_FUNCTION_FAIL(cqm_object_table_init)); - goto err3; - } - - return CQM_SUCCESS; - -err3: - cqm_bitmap_uninit(cqm_handle); -err2: - cqm_cla_uninit(cqm_handle, CQM_BAT_ENTRY_MAX); -err1: - cqm_bat_uninit(cqm_handle); - return CQM_FAIL; -} - -void cqm_mem_uninit(void *ex_handle) -{ - struct sphw_hwdev *handle = (struct sphw_hwdev *)ex_handle; - struct cqm_handle *cqm_handle = NULL; - - cqm_handle = (struct cqm_handle *)(handle->cqm_hdl); - - cqm_object_table_uninit(cqm_handle); - cqm_bitmap_uninit(cqm_handle); - cqm_cla_uninit(cqm_handle, CQM_BAT_ENTRY_MAX); - cqm_bat_uninit(cqm_handle); -} - -s32 cqm_event_init(void *ex_handle) -{ - struct sphw_hwdev *handle = (struct sphw_hwdev *)ex_handle; - - if (sphw_aeq_register_swe_cb(ex_handle, SPHW_STATEFULL_EVENT, - cqm_aeq_callback) != CHIPIF_SUCCESS) { - cqm_err(handle->dev_hdl, "Event: fail to register aeq callback\n"); - return CQM_FAIL; - } - - return CQM_SUCCESS; -} - -void cqm_event_uninit(void *ex_handle) -{ - sphw_aeq_unregister_swe_cb(ex_handle, SPHW_STATEFULL_EVENT); -} - -u32 cqm_aeq_event2type(u8 event) -{ - u32 service_type; - - /* Distributes events to different service modules - * based on the event type. - */ - if (event >= CQM_AEQ_BASE_T_FC && event < CQM_AEQ_MAX_T_FC) - service_type = CQM_SERVICE_T_FC; - else - service_type = CQM_SERVICE_T_MAX; - - return service_type; -} - -u8 cqm_aeq_callback(void *ex_handle, u8 event, u8 *data) -{ - struct sphw_hwdev *handle = (struct sphw_hwdev *)ex_handle; - struct service_register_template *service_template = NULL; - struct cqm_handle *cqm_handle = NULL; - struct cqm_service *service = NULL; - u8 event_level = FAULT_LEVEL_MAX; - u32 service_type; - - CQM_PTR_CHECK_RET(ex_handle, event_level, - CQM_PTR_NULL(aeq_callback_ex_handle)); - - atomic_inc(&handle->hw_stats.cqm_stats.cqm_aeq_callback_cnt[event]); - - cqm_handle = (struct cqm_handle *)(handle->cqm_hdl); - CQM_PTR_CHECK_RET(cqm_handle, event_level, - CQM_PTR_NULL(aeq_callback_cqm_handle)); - - /* Distributes events to different service modules - * based on the event type. - */ - service_type = cqm_aeq_event2type(event); - if (service_type == CQM_SERVICE_T_MAX) { - cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(event)); - return event_level; - } - - service = &cqm_handle->service[service_type]; - service_template = &service->service_template; - - if (!service_template->aeq_level_callback) - cqm_err(handle->dev_hdl, "Event: service_type %u aeq_level_callback unregistered\n", - service_type); - else - event_level = service_template->aeq_level_callback(service_template->service_handle, - event, data); - - if (!service_template->aeq_callback) - cqm_err(handle->dev_hdl, "Event: service_type %u aeq_callback unregistered\n", - service_type); - else - service_template->aeq_callback(service_template->service_handle, - event, data); - - return event_level; -} - -s32 cqm3_service_register(void *ex_handle, struct service_register_template *service_template) -{ - struct sphw_hwdev *handle = (struct sphw_hwdev *)ex_handle; - struct cqm_handle *cqm_handle = NULL; - struct cqm_service *service = NULL; - - CQM_PTR_CHECK_RET(ex_handle, CQM_FAIL, CQM_PTR_NULL(ex_handle)); - - cqm_handle = (struct cqm_handle *)(handle->cqm_hdl); - CQM_PTR_CHECK_RET(cqm_handle, CQM_FAIL, CQM_PTR_NULL(cqm_handle)); - CQM_PTR_CHECK_RET(service_template, CQM_FAIL, - CQM_PTR_NULL(service_template)); - - if (service_template->service_type >= CQM_SERVICE_T_MAX) { - cqm_err(handle->dev_hdl, - CQM_WRONG_VALUE(service_template->service_type)); - return CQM_FAIL; - } - service = &cqm_handle->service[service_template->service_type]; - if (!service->valid) { - cqm_err(handle->dev_hdl, "Service register: service_type %u is invalid\n", - service_template->service_type); - return CQM_FAIL; - } - - if (service->has_register) { - cqm_err(handle->dev_hdl, "Service register: service_type %u has registered\n", - service_template->service_type); - return CQM_FAIL; - } - - service->has_register = true; - (void)memcpy((void *)(&service->service_template), - (void *)service_template, - sizeof(struct service_register_template)); - - return CQM_SUCCESS; -} - -void cqm3_service_unregister(void *ex_handle, u32 service_type) -{ - struct sphw_hwdev *handle = (struct sphw_hwdev *)ex_handle; - struct cqm_handle *cqm_handle = NULL; - struct cqm_service *service = NULL; - - CQM_PTR_CHECK_NO_RET(ex_handle, CQM_PTR_NULL(ex_handle)); - - cqm_handle = (struct cqm_handle *)(handle->cqm_hdl); - CQM_PTR_CHECK_NO_RET(cqm_handle, CQM_PTR_NULL(cqm_handle)); - - if (service_type >= CQM_SERVICE_T_MAX) { - cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(service_type)); - return; - } - - service = &cqm_handle->service[service_type]; - if (!service->valid) - cqm_err(handle->dev_hdl, "Service unregister: service_type %u is disable\n", - service_type); - - service->has_register = false; - memset(&service->service_template, 0, sizeof(struct service_register_template)); -} - -struct cqm_cmd_buf *cqm3_cmd_alloc(void *ex_handle) -{ - struct sphw_hwdev *handle = (struct sphw_hwdev *)ex_handle; - - CQM_PTR_CHECK_RET(ex_handle, NULL, CQM_PTR_NULL(ex_handle)); - - atomic_inc(&handle->hw_stats.cqm_stats.cqm_cmd_alloc_cnt); - - return (struct cqm_cmd_buf *)sphw_alloc_cmd_buf(ex_handle); -} - -void cqm3_cmd_free(void *ex_handle, struct cqm_cmd_buf *cmd_buf) -{ - struct sphw_hwdev *handle = (struct sphw_hwdev *)ex_handle; - - CQM_PTR_CHECK_NO_RET(ex_handle, CQM_PTR_NULL(ex_handle)); - CQM_PTR_CHECK_NO_RET(cmd_buf, CQM_PTR_NULL(cmd_buf)); - CQM_PTR_CHECK_NO_RET(cmd_buf->buf, CQM_PTR_NULL(buf)); - - atomic_inc(&handle->hw_stats.cqm_stats.cqm_cmd_free_cnt); - - sphw_free_cmd_buf(ex_handle, (struct sphw_cmd_buf *)cmd_buf); -} - -s32 cqm3_send_cmd_box(void *ex_handle, u8 mod, u8 cmd, struct cqm_cmd_buf *buf_in, - struct cqm_cmd_buf *buf_out, u64 *out_param, u32 timeout, - u16 channel) -{ - struct sphw_hwdev *handle = (struct sphw_hwdev *)ex_handle; - - CQM_PTR_CHECK_RET(ex_handle, CQM_FAIL, CQM_PTR_NULL(ex_handle)); - CQM_PTR_CHECK_RET(buf_in, CQM_FAIL, CQM_PTR_NULL(buf_in)); - CQM_PTR_CHECK_RET(buf_in->buf, CQM_FAIL, CQM_PTR_NULL(buf)); - - atomic_inc(&handle->hw_stats.cqm_stats.cqm_send_cmd_box_cnt); - - return sphw_cmdq_detail_resp(ex_handle, mod, cmd, - (struct sphw_cmd_buf *)buf_in, - (struct sphw_cmd_buf *)buf_out, - out_param, timeout, channel); -} - -int cqm_alloc_fc_db_addr(void *hwdev, void __iomem **db_base, - void __iomem **dwqe_base) -{ - struct sphw_hwif *hwif = NULL; - u32 idx = 0; -#define SPFC_DB_ADDR_RSVD 12 -#define SPFC_DB_MASK 128 - u64 db_base_phy_fc; - - if (!hwdev || !db_base) - return -EINVAL; - - hwif = ((struct sphw_hwdev *)hwdev)->hwif; - - db_base_phy_fc = hwif->db_base_phy >> SPFC_DB_ADDR_RSVD; - - if (db_base_phy_fc & (SPFC_DB_MASK - 1)) - idx = SPFC_DB_MASK - (db_base_phy_fc && (SPFC_DB_MASK - 1)); - - *db_base = hwif->db_base + idx * SPHW_DB_PAGE_SIZE; - - if (!dwqe_base) - return 0; - - *dwqe_base = (u8 *)*db_base + SPHW_DWQE_OFFSET; - - return 0; -} - -s32 cqm3_db_addr_alloc(void *ex_handle, void __iomem **db_addr, - void __iomem **dwqe_addr) -{ - struct sphw_hwdev *handle = (struct sphw_hwdev *)ex_handle; - - CQM_PTR_CHECK_RET(ex_handle, CQM_FAIL, CQM_PTR_NULL(ex_handle)); - CQM_PTR_CHECK_RET(db_addr, CQM_FAIL, CQM_PTR_NULL(db_addr)); - CQM_PTR_CHECK_RET(dwqe_addr, CQM_FAIL, CQM_PTR_NULL(dwqe_addr)); - - atomic_inc(&handle->hw_stats.cqm_stats.cqm_db_addr_alloc_cnt); - - return cqm_alloc_fc_db_addr(ex_handle, db_addr, dwqe_addr); -} - -s32 cqm_db_phy_addr_alloc(void *ex_handle, u64 *db_paddr, u64 *dwqe_addr) -{ - return sphw_alloc_db_phy_addr(ex_handle, db_paddr, dwqe_addr); -} - -void cqm3_db_addr_free(void *ex_handle, const void __iomem *db_addr, - void __iomem *dwqe_addr) -{ - struct sphw_hwdev *handle = (struct sphw_hwdev *)ex_handle; - - CQM_PTR_CHECK_NO_RET(ex_handle, CQM_PTR_NULL(ex_handle)); - - atomic_inc(&handle->hw_stats.cqm_stats.cqm_db_addr_free_cnt); - - sphw_free_db_addr(ex_handle, db_addr, dwqe_addr); -} - -void cqm_db_phy_addr_free(void *ex_handle, u64 *db_paddr, u64 *dwqe_addr) -{ - sphw_free_db_phy_addr(ex_handle, *db_paddr, *dwqe_addr); -} - -s32 cqm_db_init(void *ex_handle) -{ - struct sphw_hwdev *handle = (struct sphw_hwdev *)ex_handle; - struct cqm_handle *cqm_handle = NULL; - struct cqm_service *service = NULL; - s32 i; - - cqm_handle = (struct cqm_handle *)(handle->cqm_hdl); - - /* Allocate hardware doorbells to services. */ - for (i = 0; i < CQM_SERVICE_T_MAX; i++) { - service = &cqm_handle->service[i]; - if (!service->valid) - continue; - - if (cqm3_db_addr_alloc(ex_handle, &service->hardware_db_vaddr, - &service->dwqe_vaddr) != CQM_SUCCESS) { - cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm3_db_addr_alloc)); - break; - } - - if (cqm_db_phy_addr_alloc(handle, &service->hardware_db_paddr, - &service->dwqe_paddr) != CQM_SUCCESS) { - cqm3_db_addr_free(ex_handle, service->hardware_db_vaddr, - service->dwqe_vaddr); - cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_db_phy_addr_alloc)); - break; - } - } - - if (i != CQM_SERVICE_T_MAX) { - i--; - for (; i >= 0; i--) { - service = &cqm_handle->service[i]; - if (!service->valid) - continue; - - cqm3_db_addr_free(ex_handle, service->hardware_db_vaddr, - service->dwqe_vaddr); - cqm_db_phy_addr_free(ex_handle, - &service->hardware_db_paddr, - &service->dwqe_paddr); - } - return CQM_FAIL; - } - - return CQM_SUCCESS; -} - -void cqm_db_uninit(void *ex_handle) -{ - struct sphw_hwdev *handle = (struct sphw_hwdev *)ex_handle; - struct cqm_handle *cqm_handle = NULL; - struct cqm_service *service = NULL; - s32 i; - - cqm_handle = (struct cqm_handle *)(handle->cqm_hdl); - - /* Release hardware doorbell. */ - for (i = 0; i < CQM_SERVICE_T_MAX; i++) { - service = &cqm_handle->service[i]; - if (service->valid) - cqm3_db_addr_free(ex_handle, service->hardware_db_vaddr, - service->dwqe_vaddr); - } -} - -s32 cqm3_ring_hardware_db_fc(void *ex_handle, u32 service_type, u8 db_count, - u8 pagenum, u64 db) -{ -#define SPFC_DB_FAKE_VF_OFFSET 32 - struct cqm_handle *cqm_handle = NULL; - struct cqm_service *service = NULL; - struct sphw_hwdev *handle = NULL; - void *dbaddr = NULL; - - handle = (struct sphw_hwdev *)ex_handle; - cqm_handle = (struct cqm_handle *)(handle->cqm_hdl); - service = &cqm_handle->service[service_type]; - /* Considering the performance of ringing hardware db, - * the parameter is not checked. - */ - wmb(); - dbaddr = (u8 *)service->hardware_db_vaddr + - ((pagenum + SPFC_DB_FAKE_VF_OFFSET) * SPHW_DB_PAGE_SIZE); - *((u64 *)dbaddr + db_count) = db; - return CQM_SUCCESS; -} - -s32 cqm_ring_direct_wqe_db_fc(void *ex_handle, u32 service_type, - void *direct_wqe) -{ - struct cqm_handle *cqm_handle = NULL; - struct cqm_service *service = NULL; - struct sphw_hwdev *handle = NULL; - u64 *tmp = (u64 *)direct_wqe; - int i; - - handle = (struct sphw_hwdev *)ex_handle; - cqm_handle = (struct cqm_handle *)(handle->cqm_hdl); - service = &cqm_handle->service[service_type]; - - /* Considering the performance of ringing hardware db, - * the parameter is not checked. - */ - wmb(); - *((u64 *)service->dwqe_vaddr + 0) = tmp[2]; - *((u64 *)service->dwqe_vaddr + 1) = tmp[3]; - *((u64 *)service->dwqe_vaddr + 2) = tmp[0]; - *((u64 *)service->dwqe_vaddr + 3) = tmp[1]; - tmp += 4; - - /* The FC use 256B WQE. The directwqe is written at block0, - * and the length is 256B - */ - for (i = 4; i < 32; i++) - *((u64 *)service->dwqe_vaddr + i) = *tmp++; - - return CQM_SUCCESS; -} - -static s32 bloomfilter_init_cmd(void *ex_handle) -{ - struct sphw_hwdev *handle = (struct sphw_hwdev *)ex_handle; - struct cqm_handle *cqm_handle = (struct cqm_handle *)(handle->cqm_hdl); - struct cqm_func_capability *capability = &cqm_handle->func_capability; - struct cqm_bloomfilter_init_cmd *cmd = NULL; - struct cqm_cmd_buf *buf_in = NULL; - s32 ret; - - buf_in = cqm3_cmd_alloc((void *)(cqm_handle->ex_handle)); - CQM_PTR_CHECK_RET(buf_in, CQM_FAIL, CQM_ALLOC_FAIL(buf_in)); - - /* Fill the command format and convert it to big-endian. */ - buf_in->size = sizeof(struct cqm_bloomfilter_init_cmd); - cmd = (struct cqm_bloomfilter_init_cmd *)(buf_in->buf); - cmd->bloom_filter_addr = capability->bloomfilter_addr; - cmd->bloom_filter_len = capability->bloomfilter_length; - - cqm_swab32((u8 *)cmd, (sizeof(struct cqm_bloomfilter_init_cmd) >> CQM_DW_SHIFT)); - - ret = cqm3_send_cmd_box((void *)(cqm_handle->ex_handle), - CQM_MOD_CQM, CQM_CMD_T_BLOOMFILTER_INIT, buf_in, - NULL, NULL, CQM_CMD_TIMEOUT, - SPHW_CHANNEL_DEFAULT); - if (ret != CQM_SUCCESS) { - cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm3_send_cmd_box)); - cqm_err(handle->dev_hdl, "Bloomfilter: %s ret=%d\n", __func__, - ret); - cqm_err(handle->dev_hdl, "Bloomfilter: %s: 0x%x 0x%x\n", - __func__, cmd->bloom_filter_addr, - cmd->bloom_filter_len); - cqm3_cmd_free((void *)(cqm_handle->ex_handle), buf_in); - return CQM_FAIL; - } - cqm3_cmd_free((void *)(cqm_handle->ex_handle), buf_in); - return CQM_SUCCESS; -} - -s32 cqm_bloomfilter_init(void *ex_handle) -{ - struct sphw_hwdev *handle = (struct sphw_hwdev *)ex_handle; - struct cqm_bloomfilter_table *bloomfilter_table = NULL; - struct cqm_func_capability *capability = NULL; - struct cqm_handle *cqm_handle = NULL; - u32 array_size; - s32 ret; - - cqm_handle = (struct cqm_handle *)(handle->cqm_hdl); - bloomfilter_table = &cqm_handle->bloomfilter_table; - capability = &cqm_handle->func_capability; - - if (capability->bloomfilter_length == 0) { - cqm_info(handle->dev_hdl, - "Bloomfilter: bf_length=0, don't need to init bloomfilter\n"); - return CQM_SUCCESS; - } - - /* The unit of bloomfilter_length is 64B(512bits). Each bit is a table - * node. Therefore the value must be shift 9 bits to the left. - */ - bloomfilter_table->table_size = capability->bloomfilter_length << - CQM_BF_LENGTH_UNIT; - /* The unit of bloomfilter_length is 64B. The unit of array entryis 32B. - */ - array_size = capability->bloomfilter_length << 1; - if (array_size == 0 || array_size > CQM_BF_BITARRAY_MAX) { - cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(array_size)); - return CQM_FAIL; - } - - bloomfilter_table->array_mask = array_size - 1; - /* This table is not a bitmap, it is the counter of corresponding bit. - */ - bloomfilter_table->table = vmalloc(bloomfilter_table->table_size * (sizeof(u32))); - CQM_PTR_CHECK_RET(bloomfilter_table->table, CQM_FAIL, CQM_ALLOC_FAIL(table)); - - memset(bloomfilter_table->table, 0, - (bloomfilter_table->table_size * sizeof(u32))); - - /* The the bloomfilter must be initialized to 0 by ucode, - * because the bloomfilter is mem mode - */ - if (cqm_handle->func_capability.bloomfilter_enable) { - ret = bloomfilter_init_cmd(ex_handle); - if (ret != CQM_SUCCESS) { - cqm_err(handle->dev_hdl, - "Bloomfilter: bloomfilter_init_cmd ret=%d\n", - ret); - vfree(bloomfilter_table->table); - bloomfilter_table->table = NULL; - return CQM_FAIL; - } - } - - mutex_init(&bloomfilter_table->lock); - return CQM_SUCCESS; -} - -void cqm_bloomfilter_uninit(void *ex_handle) -{ - struct sphw_hwdev *handle = (struct sphw_hwdev *)ex_handle; - struct cqm_bloomfilter_table *bloomfilter_table = NULL; - struct cqm_handle *cqm_handle = NULL; - - cqm_handle = (struct cqm_handle *)(handle->cqm_hdl); - bloomfilter_table = &cqm_handle->bloomfilter_table; - - if (bloomfilter_table->table) { - vfree(bloomfilter_table->table); - bloomfilter_table->table = NULL; - } -} - -s32 cqm_bloomfilter_cmd(void *ex_handle, u32 op, u32 k_flag, u64 id) -{ - struct sphw_hwdev *handle = (struct sphw_hwdev *)ex_handle; - struct cqm_cmd_buf *buf_in = NULL; - struct cqm_bloomfilter_cmd *cmd = NULL; - s32 ret; - - buf_in = cqm3_cmd_alloc(ex_handle); - CQM_PTR_CHECK_RET(buf_in, CQM_FAIL, CQM_ALLOC_FAIL(buf_in)); - - /* Fill the command format and convert it to big-endian. */ - buf_in->size = sizeof(struct cqm_bloomfilter_cmd); - cmd = (struct cqm_bloomfilter_cmd *)(buf_in->buf); - memset((void *)cmd, 0, sizeof(struct cqm_bloomfilter_cmd)); - cmd->k_en = k_flag; - cmd->index_h = (u32)(id >> CQM_DW_OFFSET); - cmd->index_l = (u32)(id & CQM_DW_MASK); - - cqm_swab32((u8 *)cmd, (sizeof(struct cqm_bloomfilter_cmd) >> CQM_DW_SHIFT)); - - ret = cqm3_send_cmd_box(ex_handle, CQM_MOD_CQM, (u8)op, buf_in, NULL, - NULL, CQM_CMD_TIMEOUT, SPHW_CHANNEL_DEFAULT); - if (ret != CQM_SUCCESS) { - cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm3_send_cmd_box)); - cqm_err(handle->dev_hdl, "Bloomfilter: bloomfilter_cmd ret=%d\n", ret); - cqm_err(handle->dev_hdl, "Bloomfilter: op=0x%x, cmd: 0x%x 0x%x 0x%x 0x%x\n", - op, *((u32 *)cmd), *(((u32 *)cmd) + CQM_DW_INDEX1), - *(((u32 *)cmd) + CQM_DW_INDEX2), - *(((u32 *)cmd) + CQM_DW_INDEX3)); - cqm3_cmd_free(ex_handle, buf_in); - return CQM_FAIL; - } - - cqm3_cmd_free(ex_handle, buf_in); - - return CQM_SUCCESS; -} diff --git a/drivers/scsi/spfc/hw/spfc_cqm_main.h b/drivers/scsi/spfc/hw/spfc_cqm_main.h deleted file mode 100644 index cf10d7f5c33962dceaaba2e026faf355fbf7f0b3..0000000000000000000000000000000000000000 --- a/drivers/scsi/spfc/hw/spfc_cqm_main.h +++ /dev/null @@ -1,411 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPFC_CQM_MAIN_H -#define SPFC_CQM_MAIN_H - -#include "sphw_hwdev.h" -#include "sphw_hwif.h" -#include "spfc_cqm_object.h" -#include "spfc_cqm_bitmap_table.h" -#include "spfc_cqm_bat_cla.h" - -#define GET_MAX(a, b) ((a) > (b) ? (a) : (b)) -#define GET_MIN(a, b) ((a) < (b) ? (a) : (b)) -#define CQM_DW_SHIFT 2 -#define CQM_QW_SHIFT 3 -#define CQM_BYTE_BIT_SHIFT 3 -#define CQM_NUM_BIT_BYTE 8 - -#define CHIPIF_SUCCESS 0 -#define CHIPIF_FAIL (-1) - -#define CQM_TIMER_ENABLE 1 -#define CQM_TIMER_DISABLE 0 - -/* The value must be the same as that of sphw_service_type in sphw_crm.h. */ -#define CQM_SERVICE_T_FC SERVICE_T_FC -#define CQM_SERVICE_T_MAX SERVICE_T_MAX - -struct cqm_service { - bool valid; /* Whether to enable this service on the function. */ - bool has_register; /* Registered or Not */ - u64 hardware_db_paddr; - void __iomem *hardware_db_vaddr; - u64 dwqe_paddr; - void __iomem *dwqe_vaddr; - u32 buf_order; /* The size of each buf node is 2^buf_order pages. */ - struct service_register_template service_template; -}; - -struct cqm_fake_cfg { - u32 parent_func; /* The parent func_id of the fake vfs. */ - u32 child_func_start; /* The start func_id of the child fake vfs. */ - u32 child_func_number; /* The number of the child fake vfs. */ -}; - -#define CQM_MAX_FACKVF_GROUP 4 - -struct cqm_func_capability { - /* BAT_PTR table(SMLC) */ - bool ft_enable; /* BAT for flow table enable: support fc service - */ - bool rdma_enable; /* BAT for rdma enable: support RoCE */ - /* VAT table(SMIR) */ - bool ft_pf_enable; /* Same as ft_enable. BAT entry for fc on pf - */ - bool rdma_pf_enable; /* Same as rdma_enable. BAT entry for rdma on pf */ - - /* Dynamic or static memory allocation during the application of - * specified QPC/SCQC for each service. - */ - bool qpc_alloc_static; - bool scqc_alloc_static; - - u8 timer_enable; /* Whether the timer function is enabled */ - u8 bloomfilter_enable; /* Whether the bloomgfilter function is enabled */ - /* Maximum number of connections for fc, whitch cannot excedd qpc_number */ - u32 flow_table_based_conn_number; - u32 flow_table_based_conn_cache_number; /* Maximum number of sticky caches */ - u32 bloomfilter_length; /* Size of the bloomfilter table, 64-byte aligned */ - u32 bloomfilter_addr; /* Start position of the bloomfilter table in the SMF main cache. */ - u32 qpc_reserved; /* Reserved bit in bitmap */ - u32 mpt_reserved; /* The ROCE/IWARP MPT also has a reserved bit. */ - - /* All basic_size must be 2^n-aligned. */ - /* The number of hash bucket. The size of BAT table is aliaed with 64 bucket. - *At least 64 buckets is required. - */ - u32 hash_number; - /* THe basic size of hash bucket is 64B, including 5 valid entry and one next entry. */ - u32 hash_basic_size; - u32 qpc_number; - u32 qpc_basic_size; - - /* NUmber of PFs/VFs on the current host */ - u32 pf_num; - u32 pf_id_start; - u32 vf_num; - u32 vf_id_start; - - u32 lb_mode; - /* Only lower 4bit is valid, indicating which SMFs are enabled. - * For example, 0101B indicates that SMF0 and SMF2 are enabled. - */ - u32 smf_pg; - - u32 fake_mode; - /* Whether the current function belongs to the fake group (parent or child) */ - u32 fake_func_type; - u32 fake_cfg_number; /* Number of current configuration groups */ - struct cqm_fake_cfg fake_cfg[CQM_MAX_FACKVF_GROUP]; - - /* Note: for cqm specail test */ - u32 pagesize_reorder; - bool xid_alloc_mode; - bool gpa_check_enable; - u32 scq_reserved; - u32 srq_reserved; - - u32 mpt_number; - u32 mpt_basic_size; - u32 scqc_number; - u32 scqc_basic_size; - u32 srqc_number; - u32 srqc_basic_size; - - u32 gid_number; - u32 gid_basic_size; - u32 lun_number; - u32 lun_basic_size; - u32 taskmap_number; - u32 taskmap_basic_size; - u32 l3i_number; - u32 l3i_basic_size; - u32 childc_number; - u32 childc_basic_size; - u32 child_qpc_id_start; /* FC service Child CTX is global addressing. */ - u32 childc_number_all_function; /* The chip supports a maximum of 8096 child CTXs. */ - u32 timer_number; - u32 timer_basic_size; - u32 xid2cid_number; - u32 xid2cid_basic_size; - u32 reorder_number; - u32 reorder_basic_size; -}; - -#define CQM_PF TYPE_PF -#define CQM_VF TYPE_VF -#define CQM_PPF TYPE_PPF -#define CQM_UNKNOWN TYPE_UNKNOWN -#define CQM_MAX_PF_NUM 32 - -#define CQM_LB_MODE_NORMAL 0xff -#define CQM_LB_MODE_0 0 -#define CQM_LB_MODE_1 1 -#define CQM_LB_MODE_2 2 - -#define CQM_LB_SMF_MAX 4 - -#define CQM_FPGA_MODE 0 -#define CQM_EMU_MODE 1 -#define CQM_FAKE_MODE_DISABLE 0 -#define CQM_FAKE_CFUNC_START 32 - -#define CQM_FAKE_FUNC_NORMAL 0 -#define CQM_FAKE_FUNC_PARENT 1 -#define CQM_FAKE_FUNC_CHILD 2 -#define CQM_FAKE_FUNC_CHILD_CONFLICT 3 -#define CQM_FAKE_FUNC_MAX 32 - -#define CQM_SPU_HOST_ID 4 - -#define CQM_QPC_ROCE_PER_DRCT 12 -#define CQM_QPC_NORMAL_RESERVE_DRC 0 -#define CQM_QPC_ROCEAA_ENABLE 1 -#define CQM_QPC_ROCE_VBS_MODE 2 -#define CQM_QPC_NORMAL_WITHOUT_RSERVER_DRC 3 - -struct cqm_db_common { - u32 rsvd1 : 23; - u32 c : 1; - u32 cos : 3; - u32 service_type : 5; - - u32 rsvd2; -}; - -struct cqm_bloomfilter_table { - u32 *table; - u32 table_size; /* The unit is bit */ - u32 array_mask; /* The unit of array entry is 32B, used to address entry - */ - struct mutex lock; -}; - -struct cqm_bloomfilter_init_cmd { - u32 bloom_filter_len; - u32 bloom_filter_addr; -}; - -struct cqm_bloomfilter_cmd { - u32 rsv1; - - u32 k_en : 4; - u32 rsv2 : 28; - - u32 index_h; - u32 index_l; -}; - -struct cqm_handle { - struct sphw_hwdev *ex_handle; - struct pci_dev *dev; - struct sphw_func_attr func_attribute; /* vf/pf attributes */ - struct cqm_func_capability func_capability; /* function capability set */ - struct cqm_service service[CQM_SERVICE_T_MAX]; /* Service-related structure */ - struct cqm_bat_table bat_table; - struct cqm_bloomfilter_table bloomfilter_table; - /* fake-vf-related structure */ - struct cqm_handle *fake_cqm_handle[CQM_FAKE_FUNC_MAX]; - struct cqm_handle *parent_cqm_handle; -}; - -enum cqm_cmd_type { - CQM_CMD_T_INVALID = 0, - CQM_CMD_T_BAT_UPDATE, - CQM_CMD_T_CLA_UPDATE, - CQM_CMD_T_CLA_CACHE_INVALID = 6, - CQM_CMD_T_BLOOMFILTER_INIT, - CQM_CMD_T_MAX -}; - -#define CQM_CQN_FROM_CEQE(data) ((data) & 0xfffff) -#define CQM_XID_FROM_CEQE(data) ((data) & 0xfffff) -#define CQM_QID_FROM_CEQE(data) (((data) >> 20) & 0x7) -#define CQM_TYPE_FROM_CEQE(data) (((data) >> 23) & 0x7) - -#define CQM_HASH_BUCKET_SIZE_64 64 - -#define CQM_MAX_QPC_NUM 0x100000 -#define CQM_MAX_SCQC_NUM 0x100000 -#define CQM_MAX_SRQC_NUM 0x100000 -#define CQM_MAX_CHILDC_NUM 0x100000 - -#define CQM_QPC_SIZE_256 256 -#define CQM_QPC_SIZE_512 512 -#define CQM_QPC_SIZE_1024 1024 - -#define CQM_SCQC_SIZE_32 32 -#define CQM_SCQC_SIZE_64 64 -#define CQM_SCQC_SIZE_128 128 - -#define CQM_SRQC_SIZE_32 32 -#define CQM_SRQC_SIZE_64 64 -#define CQM_SRQC_SIZE_128 128 - -#define CQM_MPT_SIZE_64 64 - -#define CQM_GID_SIZE_32 32 - -#define CQM_LUN_SIZE_8 8 - -#define CQM_L3I_SIZE_8 8 - -#define CQM_TIMER_SIZE_32 32 - -#define CQM_XID2CID_SIZE_8 8 - -#define CQM_XID2CID_SIZE_8K 8192 - -#define CQM_REORDER_SIZE_256 256 - -#define CQM_CHILDC_SIZE_256 256 - -#define CQM_XID2CID_VBS_NUM (18 * 1024) /* 16K virtio VQ + 2K nvme Q */ - -#define CQM_VBS_QPC_NUM 2048 /* 2K VOLQ */ - -#define CQM_VBS_QPC_SIZE 512 - -#define CQM_XID2CID_VIRTIO_NUM (16 * 1024) - -#define CQM_GID_RDMA_NUM 128 - -#define CQM_LUN_FC_NUM 64 - -#define CQM_TASKMAP_FC_NUM 4 - -#define CQM_L3I_COMM_NUM 64 - -#define CQM_CHILDC_ROCE_NUM (8 * 1024) -#define CQM_CHILDC_OVS_VBS_NUM (8 * 1024) -#define CQM_CHILDC_TOE_NUM 256 -#define CQM_CHILDC_IPSEC_NUM (4 * 1024) - -#define CQM_TIMER_SCALE_NUM (2 * 1024) -#define CQM_TIMER_ALIGN_WHEEL_NUM 8 -#define CQM_TIMER_ALIGN_SCALE_NUM \ - (CQM_TIMER_SCALE_NUM * CQM_TIMER_ALIGN_WHEEL_NUM) - -#define CQM_QPC_OVS_RSVD (1024 * 1024) -#define CQM_QPC_ROCE_RSVD 2 -#define CQM_QPC_ROCEAA_SWITCH_QP_NUM 4 -#define CQM_QPC_ROCEAA_RSVD \ - (4 * 1024 + CQM_QPC_ROCEAA_SWITCH_QP_NUM) /* 4096 Normal QP + 4 Switch QP */ -#define CQM_CQ_ROCEAA_RSVD 64 -#define CQM_SRQ_ROCEAA_RSVD 64 -#define CQM_QPC_ROCE_VBS_RSVD \ - (1024 + CQM_QPC_ROCE_RSVD) /* (204800 + CQM_QPC_ROCE_RSVD) */ - -#define CQM_OVS_PAGESIZE_ORDER 8 -#define CQM_OVS_MAX_TIMER_FUNC 48 - -#define CQM_FC_PAGESIZE_ORDER 0 - -#define CQM_QHEAD_ALIGN_ORDER 6 - -#define CQM_CMD_TIMEOUT 300000 /* ms */ - -#define CQM_DW_MASK 0xffffffff -#define CQM_DW_OFFSET 32 -#define CQM_DW_INDEX0 0 -#define CQM_DW_INDEX1 1 -#define CQM_DW_INDEX2 2 -#define CQM_DW_INDEX3 3 - -/* The unit of bloomfilter_length is 64B(512bits). */ -#define CQM_BF_LENGTH_UNIT 9 -#define CQM_BF_BITARRAY_MAX BIT(17) - -typedef void (*serv_cap_init_cb)(struct cqm_handle *, void *); - -/* Only for llt test */ -s32 cqm_capability_init(void *ex_handle); -/* Can be defined as static */ -s32 cqm_mem_init(void *ex_handle); -void cqm_mem_uninit(void *ex_handle); -s32 cqm_event_init(void *ex_handle); -void cqm_event_uninit(void *ex_handle); -u8 cqm_aeq_callback(void *ex_handle, u8 event, u8 *data); - -s32 cqm3_init(void *ex_handle); -void cqm3_uninit(void *ex_handle); -s32 cqm3_service_register(void *ex_handle, struct service_register_template *service_template); -void cqm3_service_unregister(void *ex_handle, u32 service_type); - -struct cqm_cmd_buf *cqm3_cmd_alloc(void *ex_handle); -void cqm3_cmd_free(void *ex_handle, struct cqm_cmd_buf *cmd_buf); -s32 cqm3_send_cmd_box(void *ex_handle, u8 mod, u8 cmd, struct cqm_cmd_buf *buf_in, - struct cqm_cmd_buf *buf_out, u64 *out_param, u32 timeout, - u16 channel); - -s32 cqm3_db_addr_alloc(void *ex_handle, void __iomem **db_addr, void __iomem **dwqe_addr); -s32 cqm_db_phy_addr_alloc(void *ex_handle, u64 *db_paddr, u64 *dwqe_addr); -s32 cqm_db_init(void *ex_handle); -void cqm_db_uninit(void *ex_handle); - -s32 cqm_bloomfilter_cmd(void *ex_handle, u32 op, u32 k_flag, u64 id); -s32 cqm_bloomfilter_init(void *ex_handle); -void cqm_bloomfilter_uninit(void *ex_handle); - -#define CQM_LOG_ID 0 - -#define CQM_PTR_NULL(x) "%s: " #x " is null\n", __func__ -#define CQM_ALLOC_FAIL(x) "%s: " #x " alloc fail\n", __func__ -#define CQM_MAP_FAIL(x) "%s: " #x " map fail\n", __func__ -#define CQM_FUNCTION_FAIL(x) "%s: " #x " return failure\n", __func__ -#define CQM_WRONG_VALUE(x) "%s: " #x " %u is wrong\n", __func__, (u32)(x) - -#define cqm_err(dev, format, ...) dev_err(dev, "[CQM]" format, ##__VA_ARGS__) -#define cqm_warn(dev, format, ...) dev_warn(dev, "[CQM]" format, ##__VA_ARGS__) -#define cqm_notice(dev, format, ...) \ - dev_notice(dev, "[CQM]" format, ##__VA_ARGS__) -#define cqm_info(dev, format, ...) dev_info(dev, "[CQM]" format, ##__VA_ARGS__) - -#define CQM_32_ALIGN_CHECK_RET(dev_hdl, x, ret, desc) \ - do { \ - if (unlikely(((x) & 0x1f) != 0)) { \ - cqm_err(dev_hdl, desc); \ - return ret; \ - } \ - } while (0) -#define CQM_64_ALIGN_CHECK_RET(dev_hdl, x, ret, desc) \ - do { \ - if (unlikely(((x) & 0x3f) != 0)) { \ - cqm_err(dev_hdl, desc); \ - return ret; \ - } \ - } while (0) - -#define CQM_PTR_CHECK_RET(ptr, ret, desc) \ - do { \ - if (unlikely((ptr) == NULL)) { \ - pr_err("[CQM]" desc); \ - return ret; \ - } \ - } while (0) - -#define CQM_PTR_CHECK_NO_RET(ptr, desc) \ - do { \ - if (unlikely((ptr) == NULL)) { \ - pr_err("[CQM]" desc); \ - return; \ - } \ - } while (0) -#define CQM_CHECK_EQUAL_RET(dev_hdl, actual, expect, ret, desc) \ - do { \ - if (unlikely((expect) != (actual))) { \ - cqm_err(dev_hdl, desc); \ - return ret; \ - } \ - } while (0) -#define CQM_CHECK_EQUAL_NO_RET(dev_hdl, actual, expect, desc) \ - do { \ - if (unlikely((expect) != (actual))) { \ - cqm_err(dev_hdl, desc); \ - return; \ - } \ - } while (0) - -#endif /* SPFC_CQM_MAIN_H */ diff --git a/drivers/scsi/spfc/hw/spfc_cqm_object.c b/drivers/scsi/spfc/hw/spfc_cqm_object.c deleted file mode 100644 index 165794e9c7e5814eca66a7af8fe21a2f6d6893b2..0000000000000000000000000000000000000000 --- a/drivers/scsi/spfc/hw/spfc_cqm_object.c +++ /dev/null @@ -1,937 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#include -#include -#include -#include -#include -#include -#include -#include - -#include "sphw_crm.h" -#include "sphw_hw.h" -#include "sphw_hwdev.h" -#include "sphw_hwif.h" - -#include "spfc_cqm_object.h" -#include "spfc_cqm_bitmap_table.h" -#include "spfc_cqm_bat_cla.h" -#include "spfc_cqm_main.h" - -s32 cqm_qpc_mpt_bitmap_alloc(struct cqm_object *object, struct cqm_cla_table *cla_table) -{ - struct cqm_qpc_mpt *common = container_of(object, struct cqm_qpc_mpt, object); - struct cqm_qpc_mpt_info *qpc_mpt_info = container_of(common, - struct cqm_qpc_mpt_info, - common); - struct cqm_handle *cqm_handle = (struct cqm_handle *)object->cqm_handle; - struct cqm_func_capability *func_cap = &cqm_handle->func_capability; - struct sphw_hwdev *handle = cqm_handle->ex_handle; - struct cqm_bitmap *bitmap = &cla_table->bitmap; - u32 index, count; - - count = (ALIGN(object->object_size, cla_table->obj_size)) / cla_table->obj_size; - qpc_mpt_info->index_count = count; - - if (qpc_mpt_info->common.xid == CQM_INDEX_INVALID) { - /* apply for an index normally */ - index = cqm_bitmap_alloc(bitmap, 1U << (cla_table->z + 1), - count, func_cap->xid_alloc_mode); - if (index < bitmap->max_num) { - qpc_mpt_info->common.xid = index; - } else { - cqm_err(handle->dev_hdl, - CQM_FUNCTION_FAIL(cqm_bitmap_alloc)); - return CQM_FAIL; - } - } else { - /* apply for index to be reserved */ - index = cqm_bitmap_alloc_reserved(bitmap, count, - qpc_mpt_info->common.xid); - if (index != qpc_mpt_info->common.xid) { - cqm_err(handle->dev_hdl, - CQM_FUNCTION_FAIL(cqm_bitmap_alloc_reserved)); - return CQM_FAIL; - } - } - - return CQM_SUCCESS; -} - -s32 cqm_qpc_mpt_create(struct cqm_object *object) -{ - struct cqm_qpc_mpt *common = container_of(object, struct cqm_qpc_mpt, object); - struct cqm_qpc_mpt_info *qpc_mpt_info = container_of(common, - struct cqm_qpc_mpt_info, - common); - struct cqm_handle *cqm_handle = (struct cqm_handle *)object->cqm_handle; - struct cqm_bat_table *bat_table = &cqm_handle->bat_table; - struct sphw_hwdev *handle = cqm_handle->ex_handle; - struct cqm_object_table *object_table = NULL; - struct cqm_cla_table *cla_table = NULL; - struct cqm_bitmap *bitmap = NULL; - u32 index, count; - - /* find the corresponding cla table */ - if (object->object_type == CQM_OBJECT_SERVICE_CTX) { - cla_table = cqm_cla_table_get(bat_table, CQM_BAT_ENTRY_T_QPC); - } else { - cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(object->object_type)); - return CQM_FAIL; - } - - CQM_PTR_CHECK_RET(cla_table, CQM_FAIL, - CQM_FUNCTION_FAIL(cqm_cla_table_get)); - - /* Bitmap applies for index. */ - if (cqm_qpc_mpt_bitmap_alloc(object, cla_table) == CQM_FAIL) { - cqm_err(handle->dev_hdl, - CQM_FUNCTION_FAIL(cqm_qpc_mpt_bitmap_alloc)); - return CQM_FAIL; - } - - bitmap = &cla_table->bitmap; - index = qpc_mpt_info->common.xid; - count = qpc_mpt_info->index_count; - - /* Find the trunk page from the BAT/CLA and allocate the buffer. - * Ensure that the released buffer has been cleared. - */ - if (cla_table->alloc_static) - qpc_mpt_info->common.vaddr = cqm_cla_get_unlock(cqm_handle, - cla_table, - index, count, - &common->paddr); - else - qpc_mpt_info->common.vaddr = cqm_cla_get_lock(cqm_handle, - cla_table, index, - count, - &common->paddr); - - if (!qpc_mpt_info->common.vaddr) { - cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_cla_get_lock)); - cqm_err(handle->dev_hdl, "Qpc mpt init: qpc mpt vaddr is null, cla_table->alloc_static=%d\n", - cla_table->alloc_static); - goto err1; - } - - /* Indexes are associated with objects, and FC is executed - * in the interrupt context. - */ - object_table = &cla_table->obj_table; - if (object->service_type == CQM_SERVICE_T_FC) { - if (cqm_object_table_insert(cqm_handle, object_table, index, - object, false) != CQM_SUCCESS) { - cqm_err(handle->dev_hdl, - CQM_FUNCTION_FAIL(cqm_object_table_insert)); - goto err2; - } - } else { - if (cqm_object_table_insert(cqm_handle, object_table, index, - object, true) != CQM_SUCCESS) { - cqm_err(handle->dev_hdl, - CQM_FUNCTION_FAIL(cqm_object_table_insert)); - goto err2; - } - } - - return CQM_SUCCESS; - -err2: - cqm_cla_put(cqm_handle, cla_table, index, count); -err1: - cqm_bitmap_free(bitmap, index, count); - return CQM_FAIL; -} - -struct cqm_qpc_mpt *cqm3_object_qpc_mpt_create(void *ex_handle, u32 service_type, - enum cqm_object_type object_type, - u32 object_size, void *object_priv, - u32 index) -{ - struct sphw_hwdev *handle = (struct sphw_hwdev *)ex_handle; - struct cqm_qpc_mpt_info *qpc_mpt_info = NULL; - struct cqm_handle *cqm_handle = NULL; - s32 ret = CQM_FAIL; - - CQM_PTR_CHECK_RET(ex_handle, NULL, CQM_PTR_NULL(ex_handle)); - - atomic_inc(&handle->hw_stats.cqm_stats.cqm_qpc_mpt_create_cnt); - - cqm_handle = (struct cqm_handle *)(handle->cqm_hdl); - CQM_PTR_CHECK_RET(cqm_handle, NULL, CQM_PTR_NULL(cqm_handle)); - - if (service_type >= CQM_SERVICE_T_MAX) { - cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(service_type)); - return NULL; - } - /* exception of service registrion check */ - if (!cqm_handle->service[service_type].has_register) { - cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(service_type)); - return NULL; - } - - if (object_type != CQM_OBJECT_SERVICE_CTX) { - cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(object_type)); - return NULL; - } - - qpc_mpt_info = kmalloc(sizeof(*qpc_mpt_info), GFP_ATOMIC | __GFP_ZERO); - CQM_PTR_CHECK_RET(qpc_mpt_info, NULL, CQM_ALLOC_FAIL(qpc_mpt_info)); - - qpc_mpt_info->common.object.service_type = service_type; - qpc_mpt_info->common.object.object_type = object_type; - qpc_mpt_info->common.object.object_size = object_size; - atomic_set(&qpc_mpt_info->common.object.refcount, 1); - init_completion(&qpc_mpt_info->common.object.free); - qpc_mpt_info->common.object.cqm_handle = cqm_handle; - qpc_mpt_info->common.xid = index; - - qpc_mpt_info->common.priv = object_priv; - - ret = cqm_qpc_mpt_create(&qpc_mpt_info->common.object); - if (ret == CQM_SUCCESS) - return &qpc_mpt_info->common; - - cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_qpc_mpt_create)); - kfree(qpc_mpt_info); - return NULL; -} - -void cqm_linkwqe_fill(struct cqm_buf *buf, u32 wqe_per_buf, u32 wqe_size, - u32 wqe_number, bool tail, u8 link_mode) -{ - struct cqm_linkwqe_128B *linkwqe = NULL; - struct cqm_linkwqe *wqe = NULL; - dma_addr_t addr; - u8 *tmp = NULL; - u8 *va = NULL; - u32 i; - - /* The linkwqe of other buffer except the last buffer - * is directly filled to the tail. - */ - for (i = 0; i < buf->buf_number; i++) { - va = (u8 *)(buf->buf_list[i].va); - - if (i != (buf->buf_number - 1)) { - wqe = (struct cqm_linkwqe *)(va + (u32)(wqe_size * wqe_per_buf)); - wqe->wf = CQM_WQE_WF_LINK; - wqe->ctrlsl = CQM_LINK_WQE_CTRLSL_VALUE; - wqe->lp = CQM_LINK_WQE_LP_INVALID; - /* The valid value of link wqe needs to be set to 1. - * Each service ensures that o-bit=1 indicates that - * link wqe is valid and o-bit=0 indicates that - * link wqe is invalid. - */ - wqe->o = CQM_LINK_WQE_OWNER_VALID; - addr = buf->buf_list[(u32)(i + 1)].pa; - wqe->next_page_gpa_h = CQM_ADDR_HI(addr); - wqe->next_page_gpa_l = CQM_ADDR_LW(addr); - } else { /* linkwqe special padding of the last buffer */ - if (tail) { - /* must be filled at the end of the page */ - tmp = va + (u32)(wqe_size * wqe_per_buf); - wqe = (struct cqm_linkwqe *)tmp; - } else { - /* The last linkwqe is filled - * following the last wqe. - */ - tmp = va + (u32)(wqe_size * (wqe_number - - wqe_per_buf * - (buf->buf_number - - 1))); - wqe = (struct cqm_linkwqe *)tmp; - } - wqe->wf = CQM_WQE_WF_LINK; - wqe->ctrlsl = CQM_LINK_WQE_CTRLSL_VALUE; - - /* In link mode, the last link WQE is invalid; - * In ring mode, the last link wqe is valid, pointing to - * the home page, and the lp is set. - */ - if (link_mode == CQM_QUEUE_LINK_MODE) { - wqe->o = CQM_LINK_WQE_OWNER_INVALID; - } else { - /* The lp field of the last link_wqe is set to - * 1, indicating that the meaning of the o-bit - * is reversed. - */ - wqe->lp = CQM_LINK_WQE_LP_VALID; - wqe->o = CQM_LINK_WQE_OWNER_VALID; - addr = buf->buf_list[0].pa; - wqe->next_page_gpa_h = CQM_ADDR_HI(addr); - wqe->next_page_gpa_l = CQM_ADDR_LW(addr); - } - } - - if (wqe_size == CQM_LINKWQE_128B) { - /* After the B800 version, the WQE obit scheme is - * changed. The 64B bits before and after the 128B WQE - * need to be assigned a value: - * ifoe the 63rd bit from the end of the last 64B is - * obit; - * toe the 157th bit from the end of the last 64B is - * obit. - */ - linkwqe = (struct cqm_linkwqe_128B *)wqe; - linkwqe->second64B.forth_16B.bs.ifoe_o = CQM_LINK_WQE_OWNER_VALID; - - /* shift 2 bits by right to get length of dw(4B) */ - cqm_swab32((u8 *)wqe, sizeof(struct cqm_linkwqe_128B) >> 2); - } else { - /* shift 2 bits by right to get length of dw(4B) */ - cqm_swab32((u8 *)wqe, sizeof(struct cqm_linkwqe) >> 2); - } - } -} - -s32 cqm_nonrdma_queue_ctx_create(struct cqm_object *object) -{ - struct cqm_queue *common = container_of(object, struct cqm_queue, object); - struct cqm_nonrdma_qinfo *qinfo = container_of(common, struct cqm_nonrdma_qinfo, - common); - struct cqm_handle *cqm_handle = (struct cqm_handle *)object->cqm_handle; - struct cqm_bat_table *bat_table = &cqm_handle->bat_table; - struct sphw_hwdev *handle = cqm_handle->ex_handle; - struct cqm_object_table *object_table = NULL; - struct cqm_cla_table *cla_table = NULL; - struct cqm_bitmap *bitmap = NULL; - s32 shift; - - if (object->object_type == CQM_OBJECT_NONRDMA_SRQ) { - shift = cqm_shift(qinfo->q_ctx_size); - common->q_ctx_vaddr = cqm_kmalloc_align(qinfo->q_ctx_size, - GFP_KERNEL | __GFP_ZERO, - (u16)shift); - if (!common->q_ctx_vaddr) { - cqm_err(handle->dev_hdl, CQM_ALLOC_FAIL(q_ctx_vaddr)); - return CQM_FAIL; - } - - common->q_ctx_paddr = pci_map_single(cqm_handle->dev, - common->q_ctx_vaddr, - qinfo->q_ctx_size, - PCI_DMA_BIDIRECTIONAL); - if (pci_dma_mapping_error(cqm_handle->dev, - common->q_ctx_paddr)) { - cqm_err(handle->dev_hdl, CQM_MAP_FAIL(q_ctx_vaddr)); - cqm_kfree_align(common->q_ctx_vaddr); - common->q_ctx_vaddr = NULL; - return CQM_FAIL; - } - } else if (object->object_type == CQM_OBJECT_NONRDMA_SCQ) { - /* find the corresponding cla table */ - cla_table = cqm_cla_table_get(bat_table, CQM_BAT_ENTRY_T_SCQC); - if (!cla_table) { - cqm_err(handle->dev_hdl, - CQM_FUNCTION_FAIL(nonrdma_cqm_cla_table_get)); - return CQM_FAIL; - } - - /* bitmap applies for index */ - bitmap = &cla_table->bitmap; - qinfo->index_count = - (ALIGN(qinfo->q_ctx_size, cla_table->obj_size)) / - cla_table->obj_size; - qinfo->common.index = cqm_bitmap_alloc(bitmap, 1U << (cla_table->z + 1), - qinfo->index_count, - cqm_handle->func_capability.xid_alloc_mode); - if (qinfo->common.index >= bitmap->max_num) { - cqm_err(handle->dev_hdl, - CQM_FUNCTION_FAIL(nonrdma_cqm_bitmap_alloc)); - return CQM_FAIL; - } - - /* find the trunk page from BAT/CLA and allocate the buffer */ - common->q_ctx_vaddr = cqm_cla_get_lock(cqm_handle, cla_table, - qinfo->common.index, - qinfo->index_count, - &common->q_ctx_paddr); - if (!common->q_ctx_vaddr) { - cqm_err(handle->dev_hdl, - CQM_FUNCTION_FAIL(nonrdma_cqm_cla_get_lock)); - cqm_bitmap_free(bitmap, qinfo->common.index, - qinfo->index_count); - return CQM_FAIL; - } - - /* index and object association */ - object_table = &cla_table->obj_table; - if (object->service_type == CQM_SERVICE_T_FC) { - if (cqm_object_table_insert(cqm_handle, object_table, - qinfo->common.index, object, - false) != CQM_SUCCESS) { - cqm_err(handle->dev_hdl, - CQM_FUNCTION_FAIL(nonrdma_cqm_object_table_insert)); - cqm_cla_put(cqm_handle, cla_table, - qinfo->common.index, - qinfo->index_count); - cqm_bitmap_free(bitmap, qinfo->common.index, - qinfo->index_count); - return CQM_FAIL; - } - } else { - if (cqm_object_table_insert(cqm_handle, object_table, - qinfo->common.index, object, - true) != CQM_SUCCESS) { - cqm_err(handle->dev_hdl, - CQM_FUNCTION_FAIL(nonrdma_cqm_object_table_insert)); - cqm_cla_put(cqm_handle, cla_table, - qinfo->common.index, - qinfo->index_count); - cqm_bitmap_free(bitmap, qinfo->common.index, - qinfo->index_count); - return CQM_FAIL; - } - } - } - - return CQM_SUCCESS; -} - -s32 cqm_nonrdma_queue_create(struct cqm_object *object) -{ - struct cqm_queue *common = container_of(object, struct cqm_queue, object); - struct cqm_nonrdma_qinfo *qinfo = container_of(common, struct cqm_nonrdma_qinfo, - common); - struct cqm_handle *cqm_handle = (struct cqm_handle *)object->cqm_handle; - struct cqm_service *service = cqm_handle->service + object->service_type; - struct cqm_buf *q_room_buf = &common->q_room_buf_1; - struct sphw_hwdev *handle = cqm_handle->ex_handle; - u32 wqe_number = qinfo->common.object.object_size; - u32 wqe_size = qinfo->wqe_size; - u32 order = service->buf_order; - u32 buf_number, buf_size; - bool tail = false; /* determine whether the linkwqe is at the end of the page */ - - /* When creating a CQ/SCQ queue, the page size is 4 KB, - * the linkwqe must be at the end of the page. - */ - if (object->object_type == CQM_OBJECT_NONRDMA_EMBEDDED_CQ || - object->object_type == CQM_OBJECT_NONRDMA_SCQ) { - /* depth: 2^n-aligned; depth range: 256-32 K */ - if (wqe_number < CQM_CQ_DEPTH_MIN || - wqe_number > CQM_CQ_DEPTH_MAX) { - cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(wqe_number)); - return CQM_FAIL; - } - if (!cqm_check_align(wqe_number)) { - cqm_err(handle->dev_hdl, "Nonrdma queue alloc: wqe_number is not align on 2^n\n"); - return CQM_FAIL; - } - - order = CQM_4K_PAGE_ORDER; /* wqe page 4k */ - tail = true; /* The linkwqe must be at the end of the page. */ - buf_size = CQM_4K_PAGE_SIZE; - } else { - buf_size = (u32)(PAGE_SIZE << order); - } - - /* Calculate the total number of buffers required, - * -1 indicates that the link wqe in a buffer is deducted. - */ - qinfo->wqe_per_buf = (buf_size / wqe_size) - 1; - /* number of linkwqes that are included in the depth transferred - * by the service - */ - buf_number = ALIGN((wqe_size * wqe_number), buf_size) / buf_size; - - /* apply for buffer */ - q_room_buf->buf_number = buf_number; - q_room_buf->buf_size = buf_size; - q_room_buf->page_number = buf_number << order; - if (cqm_buf_alloc(cqm_handle, q_room_buf, false) == CQM_FAIL) { - cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_buf_alloc)); - return CQM_FAIL; - } - /* fill link wqe, wqe_number - buf_number is the number of wqe without - * link wqe - */ - cqm_linkwqe_fill(q_room_buf, qinfo->wqe_per_buf, wqe_size, - wqe_number - buf_number, tail, - common->queue_link_mode); - - /* create queue header */ - qinfo->common.q_header_vaddr = cqm_kmalloc_align(sizeof(struct cqm_queue_header), - GFP_KERNEL | __GFP_ZERO, - CQM_QHEAD_ALIGN_ORDER); - if (!qinfo->common.q_header_vaddr) { - cqm_err(handle->dev_hdl, CQM_ALLOC_FAIL(q_header_vaddr)); - goto err1; - } - - common->q_header_paddr = pci_map_single(cqm_handle->dev, - qinfo->common.q_header_vaddr, - sizeof(struct cqm_queue_header), - PCI_DMA_BIDIRECTIONAL); - if (pci_dma_mapping_error(cqm_handle->dev, common->q_header_paddr)) { - cqm_err(handle->dev_hdl, CQM_MAP_FAIL(q_header_vaddr)); - goto err2; - } - - /* create queue ctx */ - if (cqm_nonrdma_queue_ctx_create(object) == CQM_FAIL) { - cqm_err(handle->dev_hdl, - CQM_FUNCTION_FAIL(cqm_nonrdma_queue_ctx_create)); - goto err3; - } - - return CQM_SUCCESS; - -err3: - pci_unmap_single(cqm_handle->dev, common->q_header_paddr, - sizeof(struct cqm_queue_header), PCI_DMA_BIDIRECTIONAL); -err2: - cqm_kfree_align(qinfo->common.q_header_vaddr); - qinfo->common.q_header_vaddr = NULL; -err1: - cqm_buf_free(q_room_buf, cqm_handle->dev); - return CQM_FAIL; -} - -struct cqm_queue *cqm3_object_fc_srq_create(void *ex_handle, u32 service_type, - enum cqm_object_type object_type, - u32 wqe_number, u32 wqe_size, - void *object_priv) -{ - struct sphw_hwdev *handle = (struct sphw_hwdev *)ex_handle; - struct cqm_nonrdma_qinfo *nonrdma_qinfo = NULL; - struct cqm_handle *cqm_handle = NULL; - struct cqm_service *service = NULL; - u32 valid_wqe_per_buffer; - u32 wqe_sum; /* include linkwqe, normal wqe */ - u32 buf_size; - u32 buf_num; - s32 ret; - - CQM_PTR_CHECK_RET(ex_handle, NULL, CQM_PTR_NULL(ex_handle)); - - atomic_inc(&handle->hw_stats.cqm_stats.cqm_fc_srq_create_cnt); - - cqm_handle = (struct cqm_handle *)(handle->cqm_hdl); - CQM_PTR_CHECK_RET(cqm_handle, NULL, CQM_PTR_NULL(cqm_handle)); - - /* service_type must be fc */ - if (service_type != CQM_SERVICE_T_FC) { - cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(service_type)); - return NULL; - } - - /* exception of service unregistered check */ - if (!cqm_handle->service[service_type].has_register) { - cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(service_type)); - return NULL; - } - - /* wqe_size cannot exceed PAGE_SIZE and must be 2^n aligned. */ - if (wqe_size >= PAGE_SIZE || (!cqm_check_align(wqe_size))) { - cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(wqe_size)); - return NULL; - } - - /* FC RQ is SRQ. (Different from the SRQ concept of TOE, FC indicates - * that packets received by all flows are placed on the same RQ. - * The SRQ of TOE is similar to the RQ resource pool.) - */ - if (object_type != CQM_OBJECT_NONRDMA_SRQ) { - cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(object_type)); - return NULL; - } - - service = &cqm_handle->service[service_type]; - buf_size = (u32)(PAGE_SIZE << (service->buf_order)); - /* subtract 1 link wqe */ - valid_wqe_per_buffer = buf_size / wqe_size - 1; - buf_num = wqe_number / valid_wqe_per_buffer; - if (wqe_number % valid_wqe_per_buffer != 0) - buf_num++; - - /* calculate the total number of WQEs */ - wqe_sum = buf_num * (valid_wqe_per_buffer + 1); - nonrdma_qinfo = kmalloc(sizeof(*nonrdma_qinfo), GFP_KERNEL | __GFP_ZERO); - CQM_PTR_CHECK_RET(nonrdma_qinfo, NULL, CQM_ALLOC_FAIL(nonrdma_qinfo)); - - /* initialize object member */ - nonrdma_qinfo->common.object.service_type = service_type; - nonrdma_qinfo->common.object.object_type = object_type; - /* total number of WQEs */ - nonrdma_qinfo->common.object.object_size = wqe_sum; - atomic_set(&nonrdma_qinfo->common.object.refcount, 1); - init_completion(&nonrdma_qinfo->common.object.free); - nonrdma_qinfo->common.object.cqm_handle = cqm_handle; - - /* Initialize the doorbell used by the current queue. - * The default doorbell is the hardware doorbell. - */ - nonrdma_qinfo->common.current_q_doorbell = CQM_HARDWARE_DOORBELL; - /* Currently, the connection mode is fixed. In the future, - * the service needs to transfer the connection mode. - */ - nonrdma_qinfo->common.queue_link_mode = CQM_QUEUE_RING_MODE; - - /* initialize public members */ - nonrdma_qinfo->common.priv = object_priv; - nonrdma_qinfo->common.valid_wqe_num = wqe_sum - buf_num; - - /* initialize internal private members */ - nonrdma_qinfo->wqe_size = wqe_size; - /* RQ (also called SRQ of FC) created by FC services, - * CTX needs to be created. - */ - nonrdma_qinfo->q_ctx_size = service->service_template.srq_ctx_size; - - ret = cqm_nonrdma_queue_create(&nonrdma_qinfo->common.object); - if (ret == CQM_SUCCESS) - return &nonrdma_qinfo->common; - - cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_fc_queue_create)); - kfree(nonrdma_qinfo); - return NULL; -} - -struct cqm_queue *cqm3_object_nonrdma_queue_create(void *ex_handle, u32 service_type, - enum cqm_object_type object_type, - u32 wqe_number, u32 wqe_size, - void *object_priv) -{ - struct sphw_hwdev *handle = (struct sphw_hwdev *)ex_handle; - struct cqm_nonrdma_qinfo *nonrdma_qinfo = NULL; - struct cqm_handle *cqm_handle = NULL; - struct cqm_service *service = NULL; - s32 ret; - - CQM_PTR_CHECK_RET(ex_handle, NULL, CQM_PTR_NULL(ex_handle)); - - atomic_inc(&handle->hw_stats.cqm_stats.cqm_nonrdma_queue_create_cnt); - - cqm_handle = (struct cqm_handle *)(handle->cqm_hdl); - CQM_PTR_CHECK_RET(cqm_handle, NULL, CQM_PTR_NULL(cqm_handle)); - - /* exception of service registrion check */ - if (!cqm_handle->service[service_type].has_register) { - cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(service_type)); - return NULL; - } - /* wqe_size can't be more than PAGE_SIZE, can't be zero, must be power - * of 2 the function of cqm_check_align is to check above - */ - if (wqe_size >= PAGE_SIZE || (!cqm_check_align(wqe_size))) { - cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(wqe_size)); - return NULL; - } - - /* nonrdma supports: RQ, SQ, SRQ, CQ, SCQ */ - if (object_type < CQM_OBJECT_NONRDMA_EMBEDDED_RQ || - object_type > CQM_OBJECT_NONRDMA_SCQ) { - cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(object_type)); - return NULL; - } - - nonrdma_qinfo = kmalloc(sizeof(*nonrdma_qinfo), GFP_KERNEL | __GFP_ZERO); - CQM_PTR_CHECK_RET(nonrdma_qinfo, NULL, CQM_ALLOC_FAIL(nonrdma_qinfo)); - - nonrdma_qinfo->common.object.service_type = service_type; - nonrdma_qinfo->common.object.object_type = object_type; - nonrdma_qinfo->common.object.object_size = wqe_number; - atomic_set(&nonrdma_qinfo->common.object.refcount, 1); - init_completion(&nonrdma_qinfo->common.object.free); - nonrdma_qinfo->common.object.cqm_handle = cqm_handle; - - /* Initialize the doorbell used by the current queue. - * The default value is hardware doorbell - */ - nonrdma_qinfo->common.current_q_doorbell = CQM_HARDWARE_DOORBELL; - /* Currently, the link mode is hardcoded and needs to be transferred by - * the service side. - */ - nonrdma_qinfo->common.queue_link_mode = CQM_QUEUE_RING_MODE; - - nonrdma_qinfo->common.priv = object_priv; - - /* Initialize internal private members */ - nonrdma_qinfo->wqe_size = wqe_size; - service = &cqm_handle->service[service_type]; - switch (object_type) { - case CQM_OBJECT_NONRDMA_SCQ: - nonrdma_qinfo->q_ctx_size = - service->service_template.scq_ctx_size; - break; - case CQM_OBJECT_NONRDMA_SRQ: - /* Currently, the SRQ of the service is created through a - * dedicated interface. - */ - nonrdma_qinfo->q_ctx_size = - service->service_template.srq_ctx_size; - break; - default: - break; - } - - ret = cqm_nonrdma_queue_create(&nonrdma_qinfo->common.object); - if (ret == CQM_SUCCESS) - return &nonrdma_qinfo->common; - - cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_nonrdma_queue_create)); - kfree(nonrdma_qinfo); - return NULL; -} - -void cqm_qpc_mpt_delete(struct cqm_object *object) -{ - struct cqm_qpc_mpt *common = container_of(object, struct cqm_qpc_mpt, object); - struct cqm_qpc_mpt_info *qpc_mpt_info = container_of(common, - struct cqm_qpc_mpt_info, - common); - struct cqm_handle *cqm_handle = (struct cqm_handle *)object->cqm_handle; - struct cqm_bat_table *bat_table = &cqm_handle->bat_table; - struct sphw_hwdev *handle = cqm_handle->ex_handle; - struct cqm_object_table *object_table = NULL; - struct cqm_cla_table *cla_table = NULL; - u32 count = qpc_mpt_info->index_count; - u32 index = qpc_mpt_info->common.xid; - struct cqm_bitmap *bitmap = NULL; - - atomic_inc(&handle->hw_stats.cqm_stats.cqm_qpc_mpt_delete_cnt); - - /* find the corresponding cla table */ - if (object->object_type == CQM_OBJECT_SERVICE_CTX) { - cla_table = cqm_cla_table_get(bat_table, CQM_BAT_ENTRY_T_QPC); - } else { - cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(object->object_type)); - return; - } - - CQM_PTR_CHECK_NO_RET(cla_table, - CQM_FUNCTION_FAIL(cqm_cla_table_get_qpc)); - - /* disassociate index and object */ - object_table = &cla_table->obj_table; - if (object->service_type == CQM_SERVICE_T_FC) - cqm_object_table_remove(cqm_handle, object_table, index, object, - false); - else - cqm_object_table_remove(cqm_handle, object_table, index, object, - true); - - /* wait for completion to ensure that all references to - * the QPC are complete - */ - if (atomic_dec_and_test(&object->refcount)) - complete(&object->free); - else - cqm_err(handle->dev_hdl, "Qpc mpt del: object is referred by others, has to wait for completion\n"); - - /* Static QPC allocation must be non-blocking. - * Services ensure that the QPC is referenced - * when the QPC is deleted. - */ - if (!cla_table->alloc_static) - wait_for_completion(&object->free); - - /* release qpc buffer */ - cqm_cla_put(cqm_handle, cla_table, index, count); - - /* release the index to the bitmap */ - bitmap = &cla_table->bitmap; - cqm_bitmap_free(bitmap, index, count); -} - -s32 cqm_qpc_mpt_delete_ret(struct cqm_object *object) -{ - u32 object_type; - - object_type = object->object_type; - switch (object_type) { - case CQM_OBJECT_SERVICE_CTX: - cqm_qpc_mpt_delete(object); - return CQM_SUCCESS; - default: - return CQM_FAIL; - } -} - -void cqm_nonrdma_queue_delete(struct cqm_object *object) -{ - struct cqm_queue *common = container_of(object, struct cqm_queue, object); - struct cqm_nonrdma_qinfo *qinfo = container_of(common, struct cqm_nonrdma_qinfo, - common); - struct cqm_handle *cqm_handle = (struct cqm_handle *)object->cqm_handle; - struct cqm_bat_table *bat_table = &cqm_handle->bat_table; - struct cqm_buf *q_room_buf = &common->q_room_buf_1; - struct sphw_hwdev *handle = cqm_handle->ex_handle; - struct cqm_object_table *object_table = NULL; - struct cqm_cla_table *cla_table = NULL; - struct cqm_bitmap *bitmap = NULL; - u32 index = qinfo->common.index; - u32 count = qinfo->index_count; - - atomic_inc(&handle->hw_stats.cqm_stats.cqm_nonrdma_queue_delete_cnt); - - /* The SCQ has an independent SCQN association. */ - if (object->object_type == CQM_OBJECT_NONRDMA_SCQ) { - cla_table = cqm_cla_table_get(bat_table, CQM_BAT_ENTRY_T_SCQC); - CQM_PTR_CHECK_NO_RET(cla_table, CQM_FUNCTION_FAIL(cqm_cla_table_get_queue)); - - /* disassociate index and object */ - object_table = &cla_table->obj_table; - if (object->service_type == CQM_SERVICE_T_FC) - cqm_object_table_remove(cqm_handle, object_table, index, - object, false); - else - cqm_object_table_remove(cqm_handle, object_table, index, - object, true); - } - - /* wait for completion to ensure that all references to - * the QPC are complete - */ - if (atomic_dec_and_test(&object->refcount)) - complete(&object->free); - else - cqm_err(handle->dev_hdl, "Nonrdma queue del: object is referred by others, has to wait for completion\n"); - - wait_for_completion(&object->free); - - /* If the q header exists, release. */ - if (qinfo->common.q_header_vaddr) { - pci_unmap_single(cqm_handle->dev, common->q_header_paddr, - sizeof(struct cqm_queue_header), - PCI_DMA_BIDIRECTIONAL); - - cqm_kfree_align(qinfo->common.q_header_vaddr); - qinfo->common.q_header_vaddr = NULL; - } - - cqm_buf_free(q_room_buf, cqm_handle->dev); - /* SRQ and SCQ have independent CTXs and release. */ - if (object->object_type == CQM_OBJECT_NONRDMA_SRQ) { - /* The CTX of the SRQ of the nordma is - * applied for independently. - */ - if (common->q_ctx_vaddr) { - pci_unmap_single(cqm_handle->dev, common->q_ctx_paddr, - qinfo->q_ctx_size, - PCI_DMA_BIDIRECTIONAL); - - cqm_kfree_align(common->q_ctx_vaddr); - common->q_ctx_vaddr = NULL; - } - } else if (object->object_type == CQM_OBJECT_NONRDMA_SCQ) { - /* The CTX of the SCQ of the nordma is managed by BAT/CLA. */ - cqm_cla_put(cqm_handle, cla_table, index, count); - - /* release the index to the bitmap */ - bitmap = &cla_table->bitmap; - cqm_bitmap_free(bitmap, index, count); - } -} - -s32 cqm_nonrdma_queue_delete_ret(struct cqm_object *object) -{ - u32 object_type; - - object_type = object->object_type; - switch (object_type) { - case CQM_OBJECT_NONRDMA_EMBEDDED_RQ: - case CQM_OBJECT_NONRDMA_EMBEDDED_SQ: - case CQM_OBJECT_NONRDMA_EMBEDDED_CQ: - case CQM_OBJECT_NONRDMA_SCQ: - cqm_nonrdma_queue_delete(object); - return CQM_SUCCESS; - case CQM_OBJECT_NONRDMA_SRQ: - cqm_nonrdma_queue_delete(object); - return CQM_SUCCESS; - default: - return CQM_FAIL; - } -} - -void cqm3_object_delete(struct cqm_object *object) -{ - struct cqm_handle *cqm_handle = NULL; - struct sphw_hwdev *handle = NULL; - - CQM_PTR_CHECK_NO_RET(object, CQM_PTR_NULL(object)); - if (!object->cqm_handle) { - pr_err("[CQM]object del: cqm_handle is null, service type %u, refcount %d\n", - object->service_type, (int)object->refcount.counter); - kfree(object); - return; - } - - cqm_handle = (struct cqm_handle *)object->cqm_handle; - - if (!cqm_handle->ex_handle) { - pr_err("[CQM]object del: ex_handle is null, service type %u, refcount %d\n", - object->service_type, (int)object->refcount.counter); - kfree(object); - return; - } - - handle = cqm_handle->ex_handle; - - if (object->service_type >= CQM_SERVICE_T_MAX) { - cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(object->service_type)); - kfree(object); - return; - } - - if (cqm_qpc_mpt_delete_ret(object) == CQM_SUCCESS) { - kfree(object); - return; - } - - if (cqm_nonrdma_queue_delete_ret(object) == CQM_SUCCESS) { - kfree(object); - return; - } - - cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(object->object_type)); - kfree(object); -} - -struct cqm_object *cqm3_object_get(void *ex_handle, enum cqm_object_type object_type, - u32 index, bool bh) -{ - struct sphw_hwdev *handle = (struct sphw_hwdev *)ex_handle; - struct cqm_handle *cqm_handle = (struct cqm_handle *)(handle->cqm_hdl); - struct cqm_bat_table *bat_table = &cqm_handle->bat_table; - struct cqm_object_table *object_table = NULL; - struct cqm_cla_table *cla_table = NULL; - struct cqm_object *object = NULL; - - /* The data flow path takes performance into consideration and - * does not check input parameters. - */ - switch (object_type) { - case CQM_OBJECT_SERVICE_CTX: - cla_table = cqm_cla_table_get(bat_table, CQM_BAT_ENTRY_T_QPC); - break; - case CQM_OBJECT_NONRDMA_SCQ: - cla_table = cqm_cla_table_get(bat_table, CQM_BAT_ENTRY_T_SCQC); - break; - default: - return NULL; - } - - if (!cla_table) { - cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_cla_table_get)); - return NULL; - } - - object_table = &cla_table->obj_table; - object = cqm_object_table_get(cqm_handle, object_table, index, bh); - return object; -} - -void cqm3_object_put(struct cqm_object *object) -{ - /* The data flow path takes performance into consideration and - * does not check input parameters. - */ - if (atomic_dec_and_test(&object->refcount)) - complete(&object->free); -} diff --git a/drivers/scsi/spfc/hw/spfc_cqm_object.h b/drivers/scsi/spfc/hw/spfc_cqm_object.h deleted file mode 100644 index 02a3e9070162b8cf21e09e013c98a9ac0224dd32..0000000000000000000000000000000000000000 --- a/drivers/scsi/spfc/hw/spfc_cqm_object.h +++ /dev/null @@ -1,279 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPFC_CQM_OBJECT_H -#define SPFC_CQM_OBJECT_H - -#ifdef __cplusplus -#if __cplusplus -extern "C" { -#endif -#endif /* __cplusplus */ - -#define CQM_SUCCESS 0 -#define CQM_FAIL (-1) -/* Ignore the return value and continue */ -#define CQM_CONTINUE 1 - -/* type of WQE is LINK WQE */ -#define CQM_WQE_WF_LINK 1 - -/* chain queue mode */ -#define CQM_QUEUE_LINK_MODE 0 -/* RING queue mode */ -#define CQM_QUEUE_RING_MODE 1 - -#define CQM_CQ_DEPTH_MAX 32768 -#define CQM_CQ_DEPTH_MIN 256 - -/* linkwqe */ -#define CQM_LINK_WQE_CTRLSL_VALUE 2 -#define CQM_LINK_WQE_LP_VALID 1 -#define CQM_LINK_WQE_LP_INVALID 0 -#define CQM_LINK_WQE_OWNER_VALID 1 -#define CQM_LINK_WQE_OWNER_INVALID 0 - -#define CQM_ADDR_HI(addr) ((u32)((u64)(addr) >> 32)) -#define CQM_ADDR_LW(addr) ((u32)((u64)(addr) & 0xffffffff)) - -#define CQM_QPC_LAYOUT_TABLE_SIZE 16 - -#define CQM_MOD_CQM 8 - -/* generic linkwqe structure */ -struct cqm_linkwqe { - u32 rsv1 : 14; /* sys_node_name = port_wwn->sys_node_name; - *(u64 *)spfc_hba->sys_port_name = port_wwn->sys_port_wwn; - - FC_DRV_PRINT(UNF_LOG_EQUIP_ATT, UNF_INFO, - "[info]Port(0x%x) updates WWNN(0x%llx) WWPN(0x%llx)", - spfc_hba->port_cfg.port_id, - *(u64 *)spfc_hba->sys_node_name, - *(u64 *)spfc_hba->sys_port_name); - - return RETURN_OK; -} - -static u32 spfc_port_config_set(void *hba, enum unf_port_config_set_op opcode, - void *var_in) -{ - u32 op_idx = 0; - - FC_CHECK_RETURN_VALUE(hba, UNF_RETURN_ERROR); - - for (op_idx = 0; op_idx < sizeof(spfc_config_set_op) / - sizeof(struct spfc_port_cfg_op); op_idx++) { - if (opcode == spfc_config_set_op[op_idx].opcode) { - if (!spfc_config_set_op[op_idx].spfc_operation) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[warn]Null operation for configuration, opcode(0x%x), operation ID(0x%x)", - opcode, op_idx); - - return UNF_RETURN_ERROR; - } - return spfc_config_set_op[op_idx].spfc_operation(hba, var_in); - } - } - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[warn]No operation code for configuration, opcode(0x%x)", - opcode); - - return UNF_RETURN_ERROR; -} - -static u32 spfc_port_config_get(void *hba, enum unf_port_cfg_get_op opcode, - void *para_out) -{ - u32 op_idx = 0; - - FC_CHECK_RETURN_VALUE(hba, UNF_RETURN_ERROR); - - for (op_idx = 0; op_idx < sizeof(spfc_config_get_op) / - sizeof(struct spfc_port_cfg_get_op); op_idx++) { - if (opcode == spfc_config_get_op[op_idx].opcode) { - if (!spfc_config_get_op[op_idx].spfc_operation) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[warn]Null operation to get configuration, opcode(0x%x), operation ID(0x%x)", - opcode, op_idx); - return UNF_RETURN_ERROR; - } - return spfc_config_get_op[op_idx].spfc_operation(hba, para_out); - } - } - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[warn]No operation to get configuration, opcode(0x%x)", - opcode); - - return UNF_RETURN_ERROR; -} - -static u32 spfc_fc_mode_check(void *hw_dev_handle) -{ - FC_CHECK_RETURN_VALUE(hw_dev_handle, UNF_RETURN_ERROR); - - if (!sphw_support_fc(hw_dev_handle, NULL)) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Work mode is error"); - return UNF_RETURN_ERROR; - } - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[info]Selected work mode is FC"); - - return RETURN_OK; -} - -static u32 spfc_check_port_cfg(const struct spfc_port_cfg *port_cfg) -{ - bool topo_condition = false; - bool speed_condition = false; - /* About Work Topology */ - topo_condition = ((port_cfg->port_topology != UNF_TOP_LOOP_MASK) && - (port_cfg->port_topology != UNF_TOP_P2P_MASK) && - (port_cfg->port_topology != UNF_TOP_AUTO_MASK)); - if (topo_condition) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Configured port topology(0x%x) is incorrect", - port_cfg->port_topology); - - return UNF_RETURN_ERROR; - } - - /* About Work Mode */ - if (port_cfg->port_mode != UNF_PORT_MODE_INI) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Configured port mode(0x%x) is incorrect", - port_cfg->port_mode); - - return UNF_RETURN_ERROR; - } - - /* About Work Speed */ - speed_condition = ((port_cfg->port_speed != UNF_PORT_SPEED_AUTO) && - (port_cfg->port_speed != UNF_PORT_SPEED_2_G) && - (port_cfg->port_speed != UNF_PORT_SPEED_4_G) && - (port_cfg->port_speed != UNF_PORT_SPEED_8_G) && - (port_cfg->port_speed != UNF_PORT_SPEED_16_G) && - (port_cfg->port_speed != UNF_PORT_SPEED_32_G)); - if (speed_condition) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Configured port speed(0x%x) is incorrect", - port_cfg->port_speed); - - return UNF_RETURN_ERROR; - } - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_INFO, - "[info]Check port configuration OK"); - - return RETURN_OK; -} - -static u32 spfc_get_port_cfg(struct spfc_hba_info *hba, - struct spfc_chip_info *chip_info, u8 card_num) -{ -#define UNF_CONFIG_ITEM_LEN 15 - /* Maximum length of a configuration item name, including the end - * character - */ -#define UNF_MAX_ITEM_NAME_LEN (32 + 1) - - /* Get and check parameters */ - char cfg_item[UNF_MAX_ITEM_NAME_LEN]; - u32 ret = UNF_RETURN_ERROR; - struct spfc_hba_info *spfc_hba = hba; - - FC_CHECK_RETURN_VALUE(spfc_hba, UNF_RETURN_ERROR); - memset((void *)cfg_item, 0, sizeof(cfg_item)); - - spfc_hba->card_info.func_num = (sphw_global_func_id(hba->dev_handle)) & UNF_FUN_ID_MASK; - spfc_hba->card_info.card_num = card_num; - - /* The range of PF of FC server is from PF1 to PF2 */ - snprintf(cfg_item, UNF_MAX_ITEM_NAME_LEN, "spfc_cfg_%1u", (spfc_hba->card_info.func_num)); - - cfg_item[UNF_MAX_ITEM_NAME_LEN - 1] = 0; - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_INFO, - "[info]Get port configuration: %s", cfg_item); - - /* Get configuration parameters from file */ - UNF_LOWLEVEL_GET_CFG_PARMS(ret, cfg_item, &spfc_port_cfg_parm[ARRAY_INDEX_0], - (u32 *)(void *)(&spfc_hba->port_cfg), - sizeof(spfc_port_cfg_parm) / sizeof(struct unf_cfg_item)); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Port(0x%x) can't get configuration", - spfc_hba->port_cfg.port_id); - - return ret; - } - - if (max_parent_qpc_num <= SPFC_MAX_PARENT_QPC_NUM) { - spfc_hba->port_cfg.sest_num = UNF_SPFC_MAXRPORT_NUM; - spfc_hba->port_cfg.max_login = UNF_SPFC_MAXRPORT_NUM; - } - - spfc_hba->port_cfg.port_id &= SPFC_PORT_ID_MASK; - spfc_hba->port_cfg.port_id |= spfc_hba->card_info.card_num << UNF_SHIFT_8; - spfc_hba->port_cfg.port_id |= spfc_hba->card_info.func_num; - spfc_hba->port_cfg.tape_support = (u32)chip_info->tape_support; - - /* Parameters check */ - ret = spfc_check_port_cfg(&spfc_hba->port_cfg); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Port(0x%x) check configuration incorrect", - spfc_hba->port_cfg.port_id); - - return ret; - } - - /* Set configuration which is got from file */ - spfc_hba->port_speed_cfg = spfc_hba->port_cfg.port_speed; - spfc_hba->port_topo_cfg = spfc_hba->port_cfg.port_topology; - spfc_hba->port_mode = (enum unf_port_mode)(spfc_hba->port_cfg.port_mode); - - return ret; -} - -void spfc_generate_sys_wwn(struct spfc_hba_info *hba) -{ - FC_CHECK_RETURN_VOID(hba); - - *(u64 *)hba->sys_node_name = (((u64)hba->port_cfg.node_name_hi << UNF_SHIFT_32) | - (hba->port_cfg.node_name_lo)); - *(u64 *)hba->sys_port_name = (((u64)hba->port_cfg.port_name_hi << UNF_SHIFT_32) | - (hba->port_cfg.port_name_lo)); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_INFO, - "[info]NodeName = 0x%llx, PortName = 0x%llx", - *(u64 *)hba->sys_node_name, *(u64 *)hba->sys_port_name); -} - -static u32 spfc_create_queues(struct spfc_hba_info *hba) -{ - u32 ret = UNF_RETURN_ERROR; - - FC_CHECK_RETURN_VALUE(hba, UNF_RETURN_ERROR); - - SPFC_FUNCTION_ENTER; - - /* Initialize shared resources of SCQ and SRQ in parent queue */ - ret = spfc_create_common_share_queues(hba); - if (ret != RETURN_OK) - goto out_create_common_queue_fail; - - /* Initialize parent queue manager resources */ - ret = spfc_alloc_parent_queue_mgr(hba); - if (ret != RETURN_OK) - goto out_free_share_queue_resource; - - /* Initialize shared WQE page pool in parent SQ */ - ret = spfc_alloc_parent_sq_wqe_page_pool(hba); - if (ret != RETURN_OK) - goto out_free_parent_queue_resource; - - ret = spfc_create_ssq(hba); - if (ret != RETURN_OK) - goto out_free_parent_wqe_page_pool; - - /* - * Notice: the configuration of SQ and QID(default_sqid) - * must be the same in FC - */ - hba->next_clear_sq = 0; - hba->default_sqid = SPFC_QID_SQ; - - SPFC_FUNCTION_RETURN; - return RETURN_OK; -out_free_parent_wqe_page_pool: - spfc_free_parent_sq_wqe_page_pool(hba); - -out_free_parent_queue_resource: - spfc_free_parent_queue_mgr(hba); - -out_free_share_queue_resource: - spfc_flush_scq_ctx(hba); - spfc_flush_srq_ctx(hba); - spfc_destroy_common_share_queues(hba); - -out_create_common_queue_fail: - SPFC_FUNCTION_RETURN; - - return ret; -} - -static u32 spfc_alloc_dma_buffers(struct spfc_hba_info *hba) -{ - struct pci_dev *pci_dev = NULL; - - FC_CHECK_RETURN_VALUE(hba, UNF_RETURN_ERROR); - pci_dev = hba->pci_dev; - FC_CHECK_RETURN_VALUE(pci_dev, UNF_RETURN_ERROR); - - hba->sfp_buf = dma_alloc_coherent(&hba->pci_dev->dev, - sizeof(struct unf_sfp_err_rome_info), - &hba->sfp_dma_addr, GFP_KERNEL); - if (!hba->sfp_buf) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Port(0x%x) can't allocate SFP DMA buffer", - hba->port_cfg.port_id); - - return UNF_RETURN_ERROR; - } - memset(hba->sfp_buf, 0, sizeof(struct unf_sfp_err_rome_info)); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[info]Port(0x%x) allocate sfp buffer(0x%p 0x%llx)", - hba->port_cfg.port_id, hba->sfp_buf, - (u64)hba->sfp_dma_addr); - - return RETURN_OK; -} - -static void spfc_free_dma_buffers(struct spfc_hba_info *hba) -{ - struct pci_dev *pci_dev = NULL; - - FC_CHECK_RETURN_VOID(hba); - pci_dev = hba->pci_dev; - FC_CHECK_RETURN_VOID(pci_dev); - - if (hba->sfp_buf) { - dma_free_coherent(&pci_dev->dev, sizeof(struct unf_sfp_err_rome_info), - hba->sfp_buf, hba->sfp_dma_addr); - - hba->sfp_buf = NULL; - hba->sfp_dma_addr = 0; - } -} - -static void spfc_destroy_queues(struct spfc_hba_info *hba) -{ - /* Free ssq */ - spfc_free_ssq(hba, SPFC_MAX_SSQ_NUM); - - /* Free parent queue resource */ - spfc_free_parent_queues(hba); - - /* Free queue manager resource */ - spfc_free_parent_queue_mgr(hba); - - /* Free linked List SQ and WQE page pool resource */ - spfc_free_parent_sq_wqe_page_pool(hba); - - /* Free shared SRQ and SCQ queue resource */ - spfc_destroy_common_share_queues(hba); -} - -static u32 spfc_alloc_default_session(struct spfc_hba_info *hba) -{ - struct unf_port_info rport_info = {0}; - u32 wait_sq_cnt = 0; - - rport_info.nport_id = 0xffffff; - rport_info.rport_index = SPFC_DEFAULT_RPORT_INDEX; - rport_info.local_nport_id = 0xffffff; - rport_info.port_name = 0; - rport_info.cs_ctrl = 0x81; - - if (spfc_alloc_parent_resource((void *)hba, &rport_info) != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Alloc default session resource failed"); - goto failed; - } - - for (;;) { - if (hba->default_sq_info.default_sq_flag == 1) - break; - - msleep(SPFC_WAIT_SESS_ENABLE_ONE_TIME_MS); - wait_sq_cnt++; - if (wait_sq_cnt >= SPFC_MAX_WAIT_LOOP_TIMES) { - hba->default_sq_info.default_sq_flag = 0xF; - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Wait Default Session enable timeout"); - goto failed; - } - } - - if (spfc_mbx_config_default_session(hba, 1) != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Notify up config default session table fail"); - goto failed; - } - - return RETURN_OK; - -failed: - spfc_sess_resource_free_sync((void *)hba, &rport_info); - return UNF_RETURN_ERROR; -} - -static u32 spfc_init_host_res(struct spfc_hba_info *hba) -{ - u32 ret = RETURN_OK; - struct spfc_hba_info *spfc_hba = hba; - - FC_CHECK_RETURN_VALUE(spfc_hba, UNF_RETURN_ERROR); - - SPFC_FUNCTION_ENTER; - - /* Initialize spin lock */ - spin_lock_init(&spfc_hba->hba_lock); - spin_lock_init(&spfc_hba->flush_state_lock); - spin_lock_init(&spfc_hba->clear_state_lock); - spin_lock_init(&spfc_hba->spin_lock); - spin_lock_init(&spfc_hba->srq_delay_info.srq_lock); - /* Initialize init_completion */ - init_completion(&spfc_hba->hba_init_complete); - init_completion(&spfc_hba->mbox_complete); - init_completion(&spfc_hba->vpf_complete); - init_completion(&spfc_hba->fcfi_complete); - init_completion(&spfc_hba->get_sfp_complete); - /* Step-1: initialize the communication channel between driver and uP */ - ret = spfc_initial_chip_access(spfc_hba); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]SPFC port(0x%x) can't initialize chip access", - spfc_hba->port_cfg.port_id); - - goto out_unmap_memory; - } - /* Step-2: get chip configuration information before creating - * queue resources - */ - ret = spfc_get_chip_info(spfc_hba); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]SPFC port(0x%x) can't get chip information", - spfc_hba->port_cfg.port_id); - - goto out_unmap_memory; - } - - /* Step-3: create queue resources */ - ret = spfc_create_queues(spfc_hba); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]SPFC port(0x%x) can't create queues", - spfc_hba->port_cfg.port_id); - - goto out_release_chip_access; - } - /* Allocate DMA buffer (SFP information) */ - ret = spfc_alloc_dma_buffers(spfc_hba); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]SPFC port(0x%x) can't allocate DMA buffers", - spfc_hba->port_cfg.port_id); - - goto out_destroy_queues; - } - /* Initialize status parameters */ - spfc_hba->active_port_speed = UNF_PORT_SPEED_UNKNOWN; - spfc_hba->active_topo = UNF_ACT_TOP_UNKNOWN; - spfc_hba->sfp_on = false; - spfc_hba->port_loop_role = UNF_LOOP_ROLE_MASTER_OR_SLAVE; - spfc_hba->phy_link = UNF_PORT_LINK_DOWN; - spfc_hba->queue_set_stage = SPFC_QUEUE_SET_STAGE_INIT; - - /* Initialize parameters referring to the lowlevel */ - spfc_hba->remote_rttov_tag = 0; - spfc_hba->port_bb_scn_cfg = SPFC_LOWLEVEL_DEFAULT_BB_SCN; - - /* Initialize timer, and the unit of E_D_TOV is ms */ - spfc_hba->remote_edtov_tag = 0; - spfc_hba->remote_bb_credit = 0; - spfc_hba->compared_bb_scn = 0; - spfc_hba->compared_edtov_val = UNF_DEFAULT_EDTOV; - spfc_hba->compared_ratov_val = UNF_DEFAULT_RATOV; - spfc_hba->removing = false; - spfc_hba->dev_present = true; - - /* Initialize parameters about cos */ - spfc_hba->cos_bitmap = cos_bit_map; - memset(spfc_hba->cos_rport_cnt, 0, SPFC_MAX_COS_NUM * sizeof(atomic_t)); - - /* Mailbox access completion */ - complete(&spfc_hba->mbox_complete); - - ret = spfc_alloc_default_session(spfc_hba); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]SPFC port(0x%x) can't allocate Default Session", - spfc_hba->port_cfg.port_id); - - goto out_destroy_dma_buff; - } - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[info]SPFC port(0x%x) initialize host resources succeeded", - spfc_hba->port_cfg.port_id); - - return ret; - -out_destroy_dma_buff: - spfc_free_dma_buffers(spfc_hba); -out_destroy_queues: - spfc_flush_scq_ctx(spfc_hba); - spfc_flush_srq_ctx(spfc_hba); - spfc_destroy_queues(spfc_hba); - -out_release_chip_access: - spfc_release_chip_access(spfc_hba); - -out_unmap_memory: - return ret; -} - -static u32 spfc_get_chip_info(struct spfc_hba_info *hba) -{ - u32 ret = RETURN_OK; - u32 exi_count = 0; - u32 exi_base = 0; - u32 exi_stride = 0; - u32 fun_idx = 0; - - FC_CHECK_RETURN_VALUE(hba, UNF_RETURN_ERROR); - - hba->vpid_start = hba->service_cap.dev_fc_cap.vp_id_start; - hba->vpid_end = hba->service_cap.dev_fc_cap.vp_id_end; - fun_idx = sphw_global_func_id(hba->dev_handle); - - exi_count = (max_parent_qpc_num <= SPFC_MAX_PARENT_QPC_NUM) ? - exit_count >> UNF_SHIFT_1 : exit_count; - exi_stride = (max_parent_qpc_num <= SPFC_MAX_PARENT_QPC_NUM) ? - exit_stride >> UNF_SHIFT_1 : exit_stride; - exi_base = exit_base; - - exi_base += (fun_idx * exi_stride); - hba->exi_base = SPFC_LSW(exi_base); - hba->exi_count = SPFC_LSW(exi_count); - hba->max_support_speed = max_speed; - hba->port_index = SPFC_LSB(fun_idx); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[info]Port(0x%x) base information: PortIndex=0x%x, ExiBase=0x%x, ExiCount=0x%x, VpIdStart=0x%x, VpIdEnd=0x%x, MaxSpeed=0x%x, Speed=0x%x, Topo=0x%x", - hba->port_cfg.port_id, hba->port_index, hba->exi_base, - hba->exi_count, hba->vpid_start, hba->vpid_end, - hba->max_support_speed, hba->port_speed_cfg, hba->port_topo_cfg); - - return ret; -} - -static u32 spfc_initial_chip_access(struct spfc_hba_info *hba) -{ - int ret = RETURN_OK; - - FC_CHECK_RETURN_VALUE(hba, UNF_RETURN_ERROR); - - /* 1. Initialize cqm access related with scq, emb cq, aeq(ucode-->driver) */ - service_cqm_temp.service_handle = hba; - - ret = cqm3_service_register(hba->dev_handle, &service_cqm_temp); - if (ret != CQM_SUCCESS) - return UNF_RETURN_ERROR; - - /* 2. Initialize mailbox(driver-->up), aeq(up--->driver) access */ - ret = sphw_register_mgmt_msg_cb(hba->dev_handle, COMM_MOD_FC, hba, - spfc_up_msg2driver_proc); - if (ret != CQM_SUCCESS) - goto out_unreg_cqm; - - return RETURN_OK; - -out_unreg_cqm: - cqm3_service_unregister(hba->dev_handle, SERVICE_T_FC); - - return UNF_RETURN_ERROR; -} - -static void spfc_release_chip_access(struct spfc_hba_info *hba) -{ - FC_CHECK_RETURN_VOID(hba); - FC_CHECK_RETURN_VOID(hba->dev_handle); - - sphw_unregister_mgmt_msg_cb(hba->dev_handle, COMM_MOD_FC); - - cqm3_service_unregister(hba->dev_handle, SERVICE_T_FC); -} - -static void spfc_update_lport_config(struct spfc_hba_info *hba, - struct unf_low_level_functioon_op *lowlevel_func) -{ -#define SPFC_MULTI_CONF_NONSUPPORT 0 - - struct unf_lport_cfg_item *lport_cfg = NULL; - - lport_cfg = &lowlevel_func->lport_cfg_items; - - if (hba->port_cfg.max_login < lowlevel_func->support_max_rport) - lport_cfg->max_login = hba->port_cfg.max_login; - else - lport_cfg->max_login = lowlevel_func->support_max_rport; - - if (hba->port_cfg.sest_num >> UNF_SHIFT_1 < UNF_RESERVE_SFS_XCHG) - lport_cfg->max_io = hba->port_cfg.sest_num; - else - lport_cfg->max_io = hba->port_cfg.sest_num - UNF_RESERVE_SFS_XCHG; - - lport_cfg->max_sfs_xchg = UNF_MAX_SFS_XCHG; - lport_cfg->port_id = hba->port_cfg.port_id; - lport_cfg->port_mode = hba->port_cfg.port_mode; - lport_cfg->port_topology = hba->port_cfg.port_topology; - lport_cfg->max_queue_depth = hba->port_cfg.max_queue_depth; - - lport_cfg->port_speed = hba->port_cfg.port_speed; - lport_cfg->tape_support = hba->port_cfg.tape_support; - - lowlevel_func->sys_port_name = *(u64 *)hba->sys_port_name; - lowlevel_func->sys_node_name = *(u64 *)hba->sys_node_name; - - /* Update chip information */ - lowlevel_func->dev = hba->pci_dev; - lowlevel_func->chip_info.chip_work_mode = hba->work_mode; - lowlevel_func->chip_info.chip_type = hba->chip_type; - lowlevel_func->chip_info.disable_err_flag = 0; - lowlevel_func->support_max_speed = hba->max_support_speed; - lowlevel_func->support_min_speed = hba->min_support_speed; - - lowlevel_func->chip_id = 0; - - lowlevel_func->sfp_type = UNF_PORT_TYPE_FC_SFP; - - lowlevel_func->multi_conf_support = SPFC_MULTI_CONF_NONSUPPORT; - lowlevel_func->support_max_hot_tag_range = hba->port_cfg.sest_num; - lowlevel_func->update_fw_reset_active = UNF_PORT_UNGRADE_FW_RESET_INACTIVE; - lowlevel_func->port_type = 0; /* DRV_PORT_ENTITY_TYPE_PHYSICAL */ - - if ((lport_cfg->port_id & UNF_FIRST_LPORT_ID_MASK) == lport_cfg->port_id) - lowlevel_func->support_upgrade_report = UNF_PORT_SUPPORT_UPGRADE_REPORT; - else - lowlevel_func->support_upgrade_report = UNF_PORT_UNSUPPORT_UPGRADE_REPORT; -} - -static u32 spfc_create_lport(struct spfc_hba_info *hba) -{ - void *lport = NULL; - struct unf_low_level_functioon_op lowlevel_func; - - FC_CHECK_RETURN_VALUE(hba, UNF_RETURN_ERROR); - spfc_func_op.dev = hba->pci_dev; - memcpy(&lowlevel_func, &spfc_func_op, sizeof(struct unf_low_level_functioon_op)); - - /* Update port configuration table */ - spfc_update_lport_config(hba, &lowlevel_func); - - /* Apply for lport resources */ - UNF_LOWLEVEL_ALLOC_LPORT(lport, hba, &lowlevel_func); - if (!lport) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Port(0x%x) can't allocate Lport", - hba->port_cfg.port_id); - - return UNF_RETURN_ERROR; - } - hba->lport = lport; - - return RETURN_OK; -} - -void spfc_release_probe_index(u32 probe_index) -{ - if (probe_index >= SPFC_MAX_PROBE_PORT_NUM) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[warn]Probe index(0x%x) is invalid", probe_index); - - return; - } - - spin_lock(&probe_spin_lock); - if (!test_bit((int)probe_index, (const ulong *)probe_bit_map)) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[warn]Probe index(0x%x) is not probed", - probe_index); - - spin_unlock(&probe_spin_lock); - - return; - } - - clear_bit((int)probe_index, probe_bit_map); - spin_unlock(&probe_spin_lock); -} - -static void spfc_delete_default_session(struct spfc_hba_info *hba) -{ - struct unf_port_info rport_info = {0}; - - rport_info.nport_id = 0xffffff; - rport_info.rport_index = SPFC_DEFAULT_RPORT_INDEX; - rport_info.local_nport_id = 0xffffff; - rport_info.port_name = 0; - rport_info.cs_ctrl = 0x81; - - /* Need config table to up first, then delete default session */ - (void)spfc_mbx_config_default_session(hba, 0); - spfc_sess_resource_free_sync((void *)hba, &rport_info); -} - -static void spfc_release_host_res(struct spfc_hba_info *hba) -{ - spfc_free_dma_buffers(hba); - - spfc_destroy_queues(hba); - - spfc_release_chip_access(hba); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x) release low level resource done", - hba->port_cfg.port_id); -} - -static struct spfc_hba_info *spfc_init_hba(struct pci_dev *pci_dev, - void *hw_dev_handle, - struct spfc_chip_info *chip_info, - u8 card_num) -{ - u32 ret = RETURN_OK; - struct spfc_hba_info *hba = NULL; - - FC_CHECK_RETURN_VALUE(pci_dev, NULL); - FC_CHECK_RETURN_VALUE(hw_dev_handle, NULL); - - /* Allocate HBA */ - hba = kmalloc(sizeof(struct spfc_hba_info), GFP_ATOMIC); - FC_CHECK_RETURN_VALUE(hba, NULL); - memset(hba, 0, sizeof(struct spfc_hba_info)); - - /* Heartbeat default */ - hba->heart_status = 1; - /* Private data in pciDev */ - hba->pci_dev = pci_dev; - hba->dev_handle = hw_dev_handle; - - /* Work mode */ - hba->work_mode = chip_info->work_mode; - /* Create work queue */ - hba->work_queue = create_singlethread_workqueue("spfc"); - if (!hba->work_queue) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[err]Spfc creat workqueue failed"); - - goto out_free_hba; - } - /* Init delay work */ - INIT_DELAYED_WORK(&hba->srq_delay_info.del_work, spfc_rcvd_els_from_srq_timeout); - INIT_WORK(&hba->els_srq_clear_work, spfc_wq_destroy_els_srq); - - /* Notice: Only use FC features */ - (void)sphw_support_fc(hw_dev_handle, &hba->service_cap); - /* Check parent context available */ - if (hba->service_cap.dev_fc_cap.max_parent_qpc_num == 0) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]FC parent context is not allocated in this function"); - - goto out_destroy_workqueue; - } - max_parent_qpc_num = hba->service_cap.dev_fc_cap.max_parent_qpc_num; - - /* Get port configuration */ - ret = spfc_get_port_cfg(hba, chip_info, card_num); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[err]Can't get port configuration"); - - goto out_destroy_workqueue; - } - /* Get WWN */ - spfc_generate_sys_wwn(hba); - - /* Initialize host resources */ - ret = spfc_init_host_res(hba); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]SPFC port(0x%x) can't initialize host resource", - hba->port_cfg.port_id); - - goto out_destroy_workqueue; - } - /* Local Port create */ - ret = spfc_create_lport(hba); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]SPFC port(0x%x) can't create lport", - hba->port_cfg.port_id); - goto out_release_host_res; - } - complete(&hba->hba_init_complete); - - /* Print reference count */ - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_KEVENT, - "[info]Port(0x%x) probe succeeded. Memory reference is 0x%x", - hba->port_cfg.port_id, atomic_read(&fc_mem_ref)); - - return hba; - -out_release_host_res: - spfc_delete_default_session(hba); - spfc_flush_scq_ctx(hba); - spfc_flush_srq_ctx(hba); - spfc_release_host_res(hba); - -out_destroy_workqueue: - flush_workqueue(hba->work_queue); - destroy_workqueue(hba->work_queue); - hba->work_queue = NULL; - -out_free_hba: - kfree(hba); - - return NULL; -} - -void spfc_get_total_probed_num(u32 *probe_cnt) -{ - u32 i = 0; - u32 cnt = 0; - - spin_lock(&probe_spin_lock); - for (i = 0; i < SPFC_MAX_PROBE_PORT_NUM; i++) { - if (test_bit((int)i, (const ulong *)probe_bit_map)) - cnt++; - } - - *probe_cnt = cnt; - spin_unlock(&probe_spin_lock); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_INFO, - "[info]Probed port total number is 0x%x", cnt); -} - -u32 spfc_assign_card_num(struct spfc_lld_dev *lld_dev, - struct spfc_chip_info *chip_info, u8 *card_num) -{ - u8 i = 0; - u64 card_index = 0; - - card_index = (!pci_is_root_bus(lld_dev->pdev->bus)) ? - lld_dev->pdev->bus->parent->number : lld_dev->pdev->bus->number; - - spin_lock(&probe_spin_lock); - - for (i = 0; i < SPFC_MAX_CARD_NUM; i++) { - if (test_bit((int)i, (const ulong *)card_num_bit_map)) { - if (card_num_manage[i].card_number == - card_index && !card_num_manage[i].is_removing - ) { - card_num_manage[i].port_count++; - *card_num = i; - spin_unlock(&probe_spin_lock); - return RETURN_OK; - } - } - } - - for (i = 0; i < SPFC_MAX_CARD_NUM; i++) { - if (!test_bit((int)i, (const ulong *)card_num_bit_map)) { - card_num_manage[i].card_number = card_index; - card_num_manage[i].port_count = 1; - card_num_manage[i].is_removing = false; - - *card_num = i; - set_bit(i, card_num_bit_map); - - spin_unlock(&probe_spin_lock); - - return RETURN_OK; - } - } - - spin_unlock(&probe_spin_lock); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Have probe more than 0x%x port, probe failed", i); - - return UNF_RETURN_ERROR; -} - -static void spfc_dec_and_free_card_num(u8 card_num) -{ - /* 2 ports per card */ - if (card_num >= SPFC_MAX_CARD_NUM) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Card number(0x%x) is invalid", card_num); - - return; - } - - spin_lock(&probe_spin_lock); - - if (test_bit((int)card_num, (const ulong *)card_num_bit_map)) { - card_num_manage[card_num].port_count--; - card_num_manage[card_num].is_removing = true; - - if (card_num_manage[card_num].port_count == 0) { - card_num_manage[card_num].card_number = 0; - card_num_manage[card_num].is_removing = false; - clear_bit((int)card_num, card_num_bit_map); - } - } else { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Can not find card number(0x%x)", card_num); - } - - spin_unlock(&probe_spin_lock); -} - -u32 spfc_assign_probe_index(u32 *probe_index) -{ - u32 i = 0; - - spin_lock(&probe_spin_lock); - for (i = 0; i < SPFC_MAX_PROBE_PORT_NUM; i++) { - if (!test_bit((int)i, (const ulong *)probe_bit_map)) { - *probe_index = i; - set_bit(i, probe_bit_map); - - spin_unlock(&probe_spin_lock); - - return RETURN_OK; - } - } - spin_unlock(&probe_spin_lock); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Have probe more than 0x%x port, probe failed", i); - - return UNF_RETURN_ERROR; -} - -u32 spfc_get_probe_index_by_port_id(u32 port_id, u32 *probe_index) -{ - u32 total_probe_num = 0; - u32 i = 0; - u32 probe_cnt = 0; - - spfc_get_total_probed_num(&total_probe_num); - - for (i = 0; i < SPFC_MAX_PROBE_PORT_NUM; i++) { - if (!spfc_hba[i]) - continue; - - if (total_probe_num == probe_cnt) - break; - - if (port_id == spfc_hba[i]->port_cfg.port_id) { - *probe_index = spfc_hba[i]->probe_index; - - return RETURN_OK; - } - - probe_cnt++; - } - - return UNF_RETURN_ERROR; -} - -static int spfc_probe(struct spfc_lld_dev *lld_dev, void **uld_dev, - char *uld_dev_name) -{ - struct pci_dev *pci_dev = NULL; - struct spfc_hba_info *hba = NULL; - u32 ret = UNF_RETURN_ERROR; - const u8 work_mode = SPFC_SMARTIO_WORK_MODE_FC; - u32 probe_index = 0; - u32 probe_total_num = 0; - u8 card_num = INVALID_VALUE8; - struct spfc_chip_info chip_info; - - FC_CHECK_RETURN_VALUE(lld_dev, UNF_RETURN_ERROR_S32); - FC_CHECK_RETURN_VALUE(lld_dev->hwdev, UNF_RETURN_ERROR_S32); - FC_CHECK_RETURN_VALUE(lld_dev->pdev, UNF_RETURN_ERROR_S32); - FC_CHECK_RETURN_VALUE(uld_dev, UNF_RETURN_ERROR_S32); - FC_CHECK_RETURN_VALUE(uld_dev_name, UNF_RETURN_ERROR_S32); - - pci_dev = lld_dev->pdev; - memset(&chip_info, 0, sizeof(struct spfc_chip_info)); - /* 1. Get & check Total_Probed_number */ - spfc_get_total_probed_num(&probe_total_num); - if (probe_total_num >= allowed_probe_num) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Total probe num (0x%x) is larger than allowed number(0x%x)", - probe_total_num, allowed_probe_num); - - return UNF_RETURN_ERROR_S32; - } - /* 2. Check device work mode */ - ret = spfc_fc_mode_check(lld_dev->hwdev); - if (ret != RETURN_OK) - return UNF_RETURN_ERROR_S32; - - /* 3. Assign & Get new Probe index */ - ret = spfc_assign_probe_index(&probe_index); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]AssignProbeIndex fail"); - - return UNF_RETURN_ERROR_S32; - } - - ret = spfc_get_chip_capability((void *)lld_dev->hwdev, &chip_info); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]GetChipCapability fail"); - return UNF_RETURN_ERROR_S32; - } - chip_info.work_mode = work_mode; - - /* Assign & Get new Card number */ - ret = spfc_assign_card_num(lld_dev, &chip_info, &card_num); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]spfc_assign_card_num fail"); - spfc_release_probe_index(probe_index); - - return UNF_RETURN_ERROR_S32; - } - - /* Init HBA resource */ - hba = spfc_init_hba(pci_dev, lld_dev->hwdev, &chip_info, card_num); - if (!hba) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Probe HBA(0x%x) failed. Memory reference = 0x%x", - probe_index, atomic_read(&fc_mem_ref)); - - spfc_release_probe_index(probe_index); - spfc_dec_and_free_card_num(card_num); - - return UNF_RETURN_ERROR_S32; - } - - /* Name by the order of probe */ - *uld_dev = hba; - snprintf(uld_dev_name, SPFC_PORT_NAME_STR_LEN, "%s%02x%02x", - SPFC_PORT_NAME_LABEL, hba->card_info.card_num, - hba->card_info.func_num); - memcpy(hba->port_name, uld_dev_name, SPFC_PORT_NAME_STR_LEN); - hba->probe_index = probe_index; - spfc_hba[probe_index] = hba; - - return RETURN_OK; -} - -u32 spfc_sfp_switch(void *hba, void *para_in) -{ - struct spfc_hba_info *spfc_hba = (struct spfc_hba_info *)hba; - bool turn_on = false; - u32 ret = RETURN_OK; - - FC_CHECK_RETURN_VALUE(spfc_hba, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(para_in, UNF_RETURN_ERROR); - - /* Redundancy check */ - turn_on = *((bool *)para_in); - if ((u32)turn_on == (u32)spfc_hba->sfp_on) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_INFO, - "[info]Port(0x%x) FC physical port is already %s", - spfc_hba->port_cfg.port_id, (turn_on) ? "on" : "off"); - - return ret; - } - - if (turn_on) { - ret = spfc_port_check_fw_ready(spfc_hba); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[warn]Get port(0x%x) clear state failed, turn on fail", - spfc_hba->port_cfg.port_id); - return ret; - } - /* At first, configure port table info if necessary */ - ret = spfc_config_port_table(spfc_hba); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Port(0x%x) can't configurate port table", - spfc_hba->port_cfg.port_id); - - return ret; - } - } - - /* Switch physical port */ - ret = spfc_port_switch(spfc_hba, turn_on); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[err]Port(0x%x) switch failed", - spfc_hba->port_cfg.port_id); - - return ret; - } - - /* Update HBA's sfp state */ - spfc_hba->sfp_on = turn_on; - - return ret; -} - -static u32 spfc_destroy_lport(struct spfc_hba_info *hba) -{ - u32 ret = UNF_RETURN_ERROR; - - FC_CHECK_RETURN_VALUE(hba, UNF_RETURN_ERROR); - - UNF_LOWLEVEL_RELEASE_LOCAL_PORT(ret, hba->lport); - hba->lport = NULL; - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x) destroy L_Port done", - hba->port_cfg.port_id); - - return ret; -} - -static u32 spfc_port_check_fw_ready(struct spfc_hba_info *hba) -{ -#define SPFC_PORT_CLEAR_DONE 0 -#define SPFC_PORT_CLEAR_DOING 1 -#define SPFC_WAIT_ONE_TIME_MS 1000 -#define SPFC_LOOP_TIMES 30 - - u32 clear_state = SPFC_PORT_CLEAR_DOING; - u32 ret = RETURN_OK; - u32 wait_timeout = 0; - - do { - msleep(SPFC_WAIT_ONE_TIME_MS); - wait_timeout += SPFC_WAIT_ONE_TIME_MS; - ret = spfc_mbx_get_fw_clear_stat(hba, &clear_state); - if (ret != RETURN_OK) - return UNF_RETURN_ERROR; - - /* Total time more than 30s retry more than 3 times failed */ - if (wait_timeout > SPFC_LOOP_TIMES * SPFC_WAIT_ONE_TIME_MS && - clear_state != SPFC_PORT_CLEAR_DONE) - return UNF_RETURN_ERROR; - } while (clear_state != SPFC_PORT_CLEAR_DONE); - - return RETURN_OK; -} - -u32 spfc_port_reset(struct spfc_hba_info *hba) -{ - u32 ret = RETURN_OK; - ulong timeout = 0; - bool sfp_before_reset = false; - bool off_para_in = false; - struct pci_dev *pci_dev = NULL; - struct spfc_hba_info *spfc_hba = hba; - - FC_CHECK_RETURN_VALUE(spfc_hba, UNF_RETURN_ERROR); - pci_dev = spfc_hba->pci_dev; - FC_CHECK_RETURN_VALUE(pci_dev, UNF_RETURN_ERROR); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_KEVENT, - "[event]Port(0x%x) reset HBA begin", - spfc_hba->port_cfg.port_id); - - /* Wait for last init/reset completion */ - timeout = wait_for_completion_timeout(&spfc_hba->hba_init_complete, - (ulong)SPFC_PORT_INIT_TIME_SEC_MAX * HZ); - - if (timeout == SPFC_ZERO) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Last HBA initialize/reset timeout: %d second", - SPFC_PORT_INIT_TIME_SEC_MAX); - - return UNF_RETURN_ERROR; - } - - /* Save current port state */ - sfp_before_reset = spfc_hba->sfp_on; - - /* Inform the reset event to CM level before beginning */ - UNF_LOWLEVEL_PORT_EVENT(ret, spfc_hba->lport, UNF_PORT_RESET_START, NULL); - spfc_hba->reset_time = jiffies; - - /* Close SFP */ - ret = spfc_sfp_switch(spfc_hba, &off_para_in); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Port(0x%x) can't close SFP", - spfc_hba->port_cfg.port_id); - spfc_hba->sfp_on = sfp_before_reset; - - complete(&spfc_hba->hba_init_complete); - - return ret; - } - - ret = spfc_port_check_fw_ready(spfc_hba); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Get port(0x%x) clear state failed, hang port and report chip error", - spfc_hba->port_cfg.port_id); - - complete(&spfc_hba->hba_init_complete); - - return ret; - } - - spfc_queue_pre_process(spfc_hba, false); - - ret = spfc_mb_reset_chip(spfc_hba, SPFC_MBOX_SUBTYPE_LIGHT_RESET); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]SPFC port(0x%x) can't reset chip mailbox", - spfc_hba->port_cfg.port_id); - - UNF_LOWLEVEL_PORT_EVENT(ret, spfc_hba->lport, UNF_PORT_GET_FWLOG, NULL); - UNF_LOWLEVEL_PORT_EVENT(ret, spfc_hba->lport, UNF_PORT_DEBUG_DUMP, NULL); - } - - /* Inform the success to CM level */ - UNF_LOWLEVEL_PORT_EVENT(ret, spfc_hba->lport, UNF_PORT_RESET_END, NULL); - - /* Queue open */ - spfc_queue_post_process(spfc_hba); - - /* Open SFP */ - (void)spfc_sfp_switch(spfc_hba, &sfp_before_reset); - - complete(&spfc_hba->hba_init_complete); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[event]Port(0x%x) reset HBA done", - spfc_hba->port_cfg.port_id); - - return ret; -#undef SPFC_WAIT_LINKDOWN_EVENT_MS -} - -static u32 spfc_delete_scqc_via_cmdq_sync(struct spfc_hba_info *hba, u32 scqn) -{ - /* Via CMND Queue */ -#define SPFC_DEL_SCQC_TIMEOUT 3000 - - int ret; - struct spfc_cmdqe_delete_scqc del_scqc_cmd; - struct sphw_cmd_buf *cmd_buf; - - /* Alloc cmd buffer */ - cmd_buf = sphw_alloc_cmd_buf(hba->dev_handle); - if (!cmd_buf) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "[err]cmdq in_cmd_buf alloc failed"); - - SPFC_ERR_IO_STAT(hba, SPFC_TASK_T_DEL_SCQC); - return UNF_RETURN_ERROR; - } - - /* Build & Send Cmnd */ - memset(&del_scqc_cmd, 0, sizeof(del_scqc_cmd)); - del_scqc_cmd.wd0.task_type = SPFC_TASK_T_DEL_SCQC; - del_scqc_cmd.wd1.scqn = SPFC_LSW(scqn); - spfc_cpu_to_big32(&del_scqc_cmd, sizeof(del_scqc_cmd)); - memcpy(cmd_buf->buf, &del_scqc_cmd, sizeof(del_scqc_cmd)); - cmd_buf->size = sizeof(del_scqc_cmd); - - ret = sphw_cmdq_detail_resp(hba->dev_handle, COMM_MOD_FC, 0, cmd_buf, - NULL, NULL, SPFC_DEL_SCQC_TIMEOUT, - SPHW_CHANNEL_FC); - - /* Free cmnd buffer */ - sphw_free_cmd_buf(hba->dev_handle, cmd_buf); - - if (ret) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "[err]Send del scqc via cmdq failed, ret=0x%x", - ret); - - SPFC_ERR_IO_STAT(hba, SPFC_TASK_T_DEL_SCQC); - return UNF_RETURN_ERROR; - } - - SPFC_IO_STAT(hba, SPFC_TASK_T_DEL_SCQC); - - return RETURN_OK; -} - -static u32 spfc_delete_srqc_via_cmdq_sync(struct spfc_hba_info *hba, u64 sqrc_gpa) -{ - /* Via CMND Queue */ -#define SPFC_DEL_SRQC_TIMEOUT 3000 - - int ret; - struct spfc_cmdqe_delete_srqc del_srqc_cmd; - struct sphw_cmd_buf *cmd_buf; - - /* Alloc Cmnd buffer */ - cmd_buf = sphw_alloc_cmd_buf(hba->dev_handle); - if (!cmd_buf) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "[err]cmdq in_cmd_buf allocate failed"); - - SPFC_ERR_IO_STAT(hba, SPFC_TASK_T_DEL_SRQC); - return UNF_RETURN_ERROR; - } - - /* Build & Send Cmnd */ - memset(&del_srqc_cmd, 0, sizeof(del_srqc_cmd)); - del_srqc_cmd.wd0.task_type = SPFC_TASK_T_DEL_SRQC; - del_srqc_cmd.srqc_gpa_h = SPFC_HIGH_32_BITS(sqrc_gpa); - del_srqc_cmd.srqc_gpa_l = SPFC_LOW_32_BITS(sqrc_gpa); - spfc_cpu_to_big32(&del_srqc_cmd, sizeof(del_srqc_cmd)); - memcpy(cmd_buf->buf, &del_srqc_cmd, sizeof(del_srqc_cmd)); - cmd_buf->size = sizeof(del_srqc_cmd); - - ret = sphw_cmdq_detail_resp(hba->dev_handle, COMM_MOD_FC, 0, cmd_buf, - NULL, NULL, SPFC_DEL_SRQC_TIMEOUT, - SPHW_CHANNEL_FC); - - /* Free Cmnd Buffer */ - sphw_free_cmd_buf(hba->dev_handle, cmd_buf); - - if (ret) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "[err]Send del srqc via cmdq failed, ret=0x%x", - ret); - - SPFC_ERR_IO_STAT(hba, SPFC_TASK_T_DEL_SRQC); - return UNF_RETURN_ERROR; - } - - SPFC_IO_STAT(hba, SPFC_TASK_T_DEL_SRQC); - - return RETURN_OK; -} - -void spfc_flush_scq_ctx(struct spfc_hba_info *hba) -{ - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[info]Start destroy total 0x%x SCQC", SPFC_TOTAL_SCQ_NUM); - - FC_CHECK_RETURN_VOID(hba); - - (void)spfc_delete_scqc_via_cmdq_sync(hba, 0); -} - -void spfc_flush_srq_ctx(struct spfc_hba_info *hba) -{ - struct spfc_srq_info *srq_info = NULL; - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[info]Start destroy ELS&IMMI SRQC"); - - FC_CHECK_RETURN_VOID(hba); - - /* Check state to avoid to flush SRQC again */ - srq_info = &hba->els_srq_info; - if (srq_info->srq_type == SPFC_SRQ_ELS && srq_info->enable) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_MAJOR, - "[event]HBA(0x%x) flush ELS SRQC", - hba->port_index); - - (void)spfc_delete_srqc_via_cmdq_sync(hba, srq_info->cqm_srq_info->q_ctx_paddr); - } -} - -void spfc_set_hba_flush_state(struct spfc_hba_info *hba, bool in_flush) -{ - ulong flag = 0; - - spin_lock_irqsave(&hba->flush_state_lock, flag); - hba->in_flushing = in_flush; - spin_unlock_irqrestore(&hba->flush_state_lock, flag); -} - -void spfc_set_hba_clear_state(struct spfc_hba_info *hba, bool clear_flag) -{ - ulong flag = 0; - - spin_lock_irqsave(&hba->clear_state_lock, flag); - hba->port_is_cleared = clear_flag; - spin_unlock_irqrestore(&hba->clear_state_lock, flag); -} - -bool spfc_hba_is_present(struct spfc_hba_info *hba) -{ - int ret_val = RETURN_OK; - bool present_flag = false; - u32 vendor_id = 0; - - ret_val = pci_read_config_dword(hba->pci_dev, 0, &vendor_id); - vendor_id &= SPFC_PCI_VENDOR_ID_MASK; - if (ret_val == RETURN_OK && vendor_id == SPFC_PCI_VENDOR_ID_RAMAXEL) { - present_flag = true; - } else { - present_flag = false; - hba->dev_present = false; - } - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_KEVENT, - "[info]Port %s remove: vender_id=0x%x, ret=0x%x", - present_flag ? "normal" : "surprise", vendor_id, ret_val); - - return present_flag; -} - -static void spfc_exit(struct pci_dev *pci_dev, struct spfc_hba_info *hba) -{ -#define SPFC_WAIT_CLR_RESOURCE_MS 1000 - u32 ret = UNF_RETURN_ERROR; - bool sfp_switch = false; - bool present_flag = true; - - FC_CHECK_RETURN_VOID(pci_dev); - FC_CHECK_RETURN_VOID(hba); - - hba->removing = true; - - /* 1. Check HBA present or not */ - present_flag = spfc_hba_is_present(hba); - if (present_flag) { - if (hba->phy_link == UNF_PORT_LINK_DOWN) - hba->queue_set_stage = SPFC_QUEUE_SET_STAGE_FLUSHDONE; - - /* At first, close sfp */ - sfp_switch = false; - (void)spfc_sfp_switch((void *)hba, (void *)&sfp_switch); - } - - /* 2. Report COM with HBA removing: delete route timer delay work */ - UNF_LOWLEVEL_PORT_EVENT(ret, hba->lport, UNF_PORT_BEGIN_REMOVE, NULL); - - /* 3. Report COM with HBA Nop, COM release I/O(s) & R_Port(s) forcely */ - UNF_LOWLEVEL_PORT_EVENT(ret, hba->lport, UNF_PORT_NOP, NULL); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]PCI device(%p) remove port(0x%x) failed", - pci_dev, hba->port_index); - } - - spfc_delete_default_session(hba); - - if (present_flag) - /* 4.1 Wait for all SQ empty, free SRQ buffer & SRQC */ - spfc_queue_pre_process(hba, true); - - /* 5. Destroy L_Port */ - (void)spfc_destroy_lport(hba); - - /* 6. With HBA is present */ - if (present_flag) { - /* Enable Queues dispatch */ - spfc_queue_post_process(hba); - - /* Need reset port if necessary */ - (void)spfc_mb_reset_chip(hba, SPFC_MBOX_SUBTYPE_HEAVY_RESET); - - /* Flush SCQ context */ - spfc_flush_scq_ctx(hba); - - /* Flush SRQ context */ - spfc_flush_srq_ctx(hba); - - sphw_func_rx_tx_flush(hba->dev_handle, SPHW_CHANNEL_FC); - - /* NOTE: while flushing txrx, hash bucket will be cached out in - * UP. Wait to clear resources completely - */ - msleep(SPFC_WAIT_CLR_RESOURCE_MS); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x) flush scq & srq & root context done", - hba->port_cfg.port_id); - } - - /* 7. Release host resources */ - spfc_release_host_res(hba); - - /* 8. Destroy FC work queue */ - if (hba->work_queue) { - flush_workqueue(hba->work_queue); - destroy_workqueue(hba->work_queue); - hba->work_queue = NULL; - } - - /* 9. Release Probe index & Decrease card number */ - spfc_release_probe_index(hba->probe_index); - spfc_dec_and_free_card_num((u8)hba->card_info.card_num); - - /* 10. Free HBA memory */ - kfree(hba); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[event]PCI device(%p) remove succeed, memory reference is 0x%x", - pci_dev, atomic_read(&fc_mem_ref)); -} - -static void spfc_remove(struct spfc_lld_dev *lld_dev, void *uld_dev) -{ - struct pci_dev *pci_dev = NULL; - struct spfc_hba_info *hba = (struct spfc_hba_info *)uld_dev; - u32 probe_total_num = 0; - u32 probe_index = 0; - - FC_CHECK_RETURN_VOID(lld_dev); - FC_CHECK_RETURN_VOID(uld_dev); - FC_CHECK_RETURN_VOID(lld_dev->hwdev); - FC_CHECK_RETURN_VOID(lld_dev->pdev); - - pci_dev = hba->pci_dev; - - /* Get total probed port number */ - spfc_get_total_probed_num(&probe_total_num); - if (probe_total_num < 1) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[warn]Port manager is empty and no need to remove"); - return; - } - - /* check pci vendor id */ - if (pci_dev->vendor != SPFC_PCI_VENDOR_ID_RAMAXEL) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[warn]Wrong vendor id(0x%x) and exit", - pci_dev->vendor); - return; - } - - /* Check function ability */ - if (!sphw_support_fc(lld_dev->hwdev, NULL)) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]FC is not enable in this function"); - return; - } - - /* Get probe index */ - probe_index = hba->probe_index; - - /* Parent context alloc check */ - if (hba->service_cap.dev_fc_cap.max_parent_qpc_num == 0) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]FC parent context not allocate in this function"); - return; - } - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[info]HBA(0x%x) start removing...", hba->port_index); - - /* HBA removinig... */ - spfc_exit(pci_dev, hba); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_KEVENT, - "[event]Port(0x%x) pci device removed, vendorid(0x%04x) devid(0x%04x)", - probe_index, pci_dev->vendor, pci_dev->device); - - /* Probe index check */ - if (probe_index < SPFC_HBA_PORT_MAX_NUM) { - spfc_hba[probe_index] = NULL; - } else { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Probe index(0x%x) is invalid and remove failed", - probe_index); - } - - spfc_get_total_probed_num(&probe_total_num); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[event]Removed index=%u, RemainNum=%u, AllowNum=%u", - probe_index, probe_total_num, allowed_probe_num); -} - -static u32 spfc_get_hba_pcie_link_state(void *hba, void *link_state) -{ - bool *link_state_info = link_state; - bool present_flag = true; - struct spfc_hba_info *spfc_hba = hba; - int ret; - bool last_dev_state = true; - bool cur_dev_state = true; - - FC_CHECK_RETURN_VALUE(hba, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(link_state, UNF_RETURN_ERROR); - last_dev_state = spfc_hba->dev_present; - ret = sphw_get_card_present_state(spfc_hba->dev_handle, (bool *)&present_flag); - if (ret || !present_flag) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_KEVENT, - "[event]port(0x%x) is not present,ret:%d, present_flag:%d", - spfc_hba->port_cfg.port_id, ret, present_flag); - cur_dev_state = false; - } else { - cur_dev_state = true; - } - - spfc_hba->dev_present = cur_dev_state; - - /* To prevent false alarms, the heartbeat is considered lost only - * when the PCIe link is down for two consecutive times. - */ - if (!last_dev_state && !cur_dev_state) - spfc_hba->heart_status = false; - - *link_state_info = spfc_hba->dev_present; - - return RETURN_OK; -} diff --git a/drivers/scsi/spfc/hw/spfc_hba.h b/drivers/scsi/spfc/hw/spfc_hba.h deleted file mode 100644 index 937f00ea8fc7b4aa226cf9baf50b3dfd4beaa283..0000000000000000000000000000000000000000 --- a/drivers/scsi/spfc/hw/spfc_hba.h +++ /dev/null @@ -1,341 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPFC_HBA_H -#define SPFC_HBA_H - -#include "unf_type.h" -#include "unf_common.h" -#include "spfc_queue.h" -#include "sphw_crm.h" -#define SPFC_PCI_VENDOR_ID_MASK (0xffff) - -#define FW_VER_LEN (32) -#define HW_VER_LEN (32) -#define FW_SUB_VER_LEN (24) - -#define SPFC_LOWLEVEL_RTTOV_TAG 0 -#define SPFC_LOWLEVEL_EDTOV_TAG 0 -#define SPFC_LOWLEVEL_DEFAULT_LOOP_BB_CREDIT (8) -#define SPFC_LOWLEVEL_DEFAULT_32G_BB_CREDIT (255) -#define SPFC_LOWLEVEL_DEFAULT_16G_BB_CREDIT (255) -#define SPFC_LOWLEVEL_DEFAULT_8G_BB_CREDIT (255) -#define SPFC_LOWLEVEL_DEFAULT_BB_SCN 0 -#define SPFC_LOWLEVEL_DEFAULT_RA_TOV UNF_DEFAULT_RATOV -#define SPFC_LOWLEVEL_DEFAULT_ED_TOV UNF_DEFAULT_EDTOV - -#define SPFC_LOWLEVEL_DEFAULT_32G_ESCH_VALUE 28081 -#define SPFC_LOWLEVEL_DEFAULT_16G_ESCH_VALUE 14100 -#define SPFC_LOWLEVEL_DEFAULT_8G_ESCH_VALUE 7000 -#define SPFC_LOWLEVEL_DEFAULT_ESCH_BUST_SIZE 0x2000 - -#define SPFC_PCI_STATUS 0x06 - -#define SPFC_SMARTIO_WORK_MODE_FC 0x1 -#define SPFC_SMARTIO_WORK_MODE_OTHER 0xF -#define UNF_FUN_ID_MASK 0x07 - -#define UNF_SPFC_FC (0x01) -#define UNF_SPFC_MAXNPIV_NUM 64 /* If not support NPIV, Initialized to 0 */ - -#define SPFC_MAX_COS_NUM (8) - -#define SPFC_INTR_ENABLE 0x5 -#define SPFC_INTR_DISABLE 0x0 -#define SPFC_CLEAR_FW_INTR 0x1 -#define SPFC_REG_ENABLE_INTR 0x00000200 - -#define SPFC_PCI_VENDOR_ID_RAMAXEL 0x1E81 - -#define SPFC_SCQ_CNTX_SIZE 32 -#define SPFC_SRQ_CNTX_SIZE 64 - -#define SPFC_PORT_INIT_TIME_SEC_MAX 1 - -#define SPFC_PORT_NAME_LABEL "spfc" -#define SPFC_PORT_NAME_STR_LEN (16) - -#define SPFC_MAX_PROBE_PORT_NUM (64) -#define SPFC_PORT_NUM_PER_TABLE (64) -#define SPFC_MAX_CARD_NUM (32) - -#define SPFC_HBA_PORT_MAX_NUM SPFC_MAX_PROBE_PORT_NUM -#define SPFC_SIRT_MIN_RXID 0 -#define SPFC_SIRT_MAX_RXID 255 - -#define SPFC_GET_HBA_PORT_ID(hba) ((hba)->port_index) - -#define SPFC_MAX_WAIT_LOOP_TIMES 10000 -#define SPFC_WAIT_SESS_ENABLE_ONE_TIME_MS 1 -#define SPFC_WAIT_SESS_FREE_ONE_TIME_MS 1 - -#define SPFC_PORT_ID_MASK 0xff0000 - -#define SPFC_MAX_PARENT_QPC_NUM 2048 -struct spfc_port_cfg { - u32 port_id; /* Port ID */ - u32 port_mode; /* Port mode:INI(0x20), TGT(0x10), BOTH(0x30) */ - u32 port_topology; /* Port topo:0x3:loop,0xc:p2p,0xf:auto */ - u32 port_alpa; /* Port ALPA */ - u32 max_queue_depth; /* Max Queue depth Registration to SCSI */ - u32 sest_num; /* IO burst num:512-4096 */ - u32 max_login; /* Max Login Session. */ - u32 node_name_hi; /* nodename high 32 bits */ - u32 node_name_lo; /* nodename low 32 bits */ - u32 port_name_hi; /* portname high 32 bits */ - u32 port_name_lo; /* portname low 32 bits */ - u32 port_speed; /* Port speed 0:auto 4:4Gbps 8:8Gbps 16:16Gbps */ - u32 interrupt_delay; /* Delay times(ms) in interrupt */ - u32 tape_support; /* tape support */ -}; - -#define SPFC_VER_INFO_SIZE 128 -struct spfc_drv_version { - char ver[SPFC_VER_INFO_SIZE]; -}; - -struct spfc_card_info { - u32 card_num : 8; - u32 func_num : 8; - u32 base_func : 8; - /* Card type:UNF_FC_SERVER_BOARD_32_G(6) 32G mode, - * UNF_FC_SERVER_BOARD_16_G(7)16G mode - */ - u32 card_type : 8; -}; - -struct spfc_card_num_manage { - bool is_removing; - u32 port_count; - u64 card_number; -}; - -struct spfc_sim_ini_err { - u32 err_code; - u32 times; -}; - -struct spfc_sim_pcie_err { - u32 err_code; - u32 times; -}; - -struct spfc_led_state { - u8 green_speed_led; - u8 yellow_speed_led; - u8 ac_led; - u8 rsvd; -}; - -enum spfc_led_activity { - SPFC_LED_CFG_ACTVE_FRAME = 0, - SPFC_LED_CFG_ACTVE_FC = 3 -}; - -enum spfc_queue_set_stage { - SPFC_QUEUE_SET_STAGE_INIT = 0, - SPFC_QUEUE_SET_STAGE_SCANNING, - SPFC_QUEUE_SET_STAGE_FLUSHING, - SPFC_QUEUE_SET_STAGE_FLUSHDONE, - SPFC_QUEUE_SET_STAGE_BUTT -}; - -struct spfc_vport_info { - u64 node_name; - u64 port_name; - u32 port_mode; /* INI, TGT or both */ - u32 nport_id; /* maybe acquired by lowlevel and update to common */ - void *vport; - u16 vp_index; -}; - -struct spfc_srq_delay_info { - u8 srq_delay_flag; /* Check whether need to delay */ - u8 root_rq_rcvd_flag; - u16 rsd; - - spinlock_t srq_lock; - struct unf_frame_pkg frame_pkg; - - struct delayed_work del_work; -}; - -struct spfc_fw_ver_detail { - u8 ucode_ver[SPFC_VER_LEN]; - u8 ucode_compile_time[SPFC_COMPILE_TIME_LEN]; - - u8 up_ver[SPFC_VER_LEN]; - u8 up_compile_time[SPFC_COMPILE_TIME_LEN]; - - u8 boot_ver[SPFC_VER_LEN]; - u8 boot_compile_time[SPFC_COMPILE_TIME_LEN]; -}; - -/* get wwpn and wwnn */ -struct spfc_chip_info { - u8 work_mode; - u8 tape_support; - u64 wwpn; - u64 wwnn; -}; - -/* Default SQ info */ -struct spfc_default_sq_info { - u32 sq_cid; - u32 sq_xid; - u32 fun_cid; - u32 default_sq_flag; -}; - -struct spfc_hba_info { - struct pci_dev *pci_dev; - void *dev_handle; - - struct fc_service_cap service_cap; /* struct fc_service_cap pstFcoeServiceCap; */ - - struct spfc_scq_info scq_info[SPFC_TOTAL_SCQ_NUM]; - struct spfc_srq_info els_srq_info; - - struct spfc_vport_info vport_info[UNF_SPFC_MAXNPIV_NUM + 1]; - - /* PCI IO Memory */ - void __iomem *bar0; - u32 bar0_len; - - struct spfc_parent_queue_mgr *parent_queue_mgr; - - /* Link list Sq WqePage Pool */ - struct spfc_sq_wqepage_pool sq_wpg_pool; - - enum spfc_queue_set_stage queue_set_stage; - u32 next_clear_sq; - u32 default_sqid; - - /* Port parameters, Obtained through firmware */ - u16 queue_set_max_count; - u8 port_type; /* FC or FCoE Port */ - u8 port_index; /* Phy Port */ - u32 default_scqn; - char fw_ver[FW_VER_LEN]; /* FW version */ - char hw_ver[HW_VER_LEN]; /* HW version */ - char mst_fw_ver[FW_SUB_VER_LEN]; - char fc_fw_ver[FW_SUB_VER_LEN]; - u8 chip_type; /* chiptype:Smart or fc */ - u8 work_mode; - struct spfc_card_info card_info; - char port_name[SPFC_PORT_NAME_STR_LEN]; - u32 probe_index; - - u16 exi_base; - u16 exi_count; - u16 vpf_count; - u8 vpid_start; - u8 vpid_end; - - spinlock_t flush_state_lock; - bool in_flushing; - - spinlock_t clear_state_lock; - bool port_is_cleared; - - struct spfc_port_cfg port_cfg; /* Obtained through Config */ - - void *lport; /* Used in UNF level */ - - u8 sys_node_name[UNF_WWN_LEN]; - u8 sys_port_name[UNF_WWN_LEN]; - - struct completion hba_init_complete; - struct completion mbox_complete; - struct completion vpf_complete; - struct completion fcfi_complete; - struct completion get_sfp_complete; - - u16 init_stage; - u16 removing; - bool sfp_on; - bool dev_present; - bool heart_status; - spinlock_t hba_lock; - u32 port_topo_cfg; - u32 port_bb_scn_cfg; - u32 port_loop_role; - u32 port_speed_cfg; - u32 max_support_speed; - u32 min_support_speed; - u32 server_max_speed; - - u8 remote_rttov_tag; - u8 remote_edtov_tag; - u16 compared_bb_scn; - u16 remote_bb_credit; - u32 compared_edtov_val; - u32 compared_ratov_val; - enum unf_act_topo active_topo; - u32 active_port_speed; - u32 active_rxbb_credit; - u32 active_bb_scn; - - u32 phy_link; - - enum unf_port_mode port_mode; - - u32 fcp_cfg; - - /* loop */ - u8 active_alpa; - u8 loop_map_valid; - u8 loop_map[UNF_LOOPMAP_COUNT]; - - /* sfp info dma */ - void *sfp_buf; - dma_addr_t sfp_dma_addr; - u32 sfp_status; - int chip_temp; - u32 sfp_posion; - - u32 cos_bitmap; - atomic_t cos_rport_cnt[SPFC_MAX_COS_NUM]; - - /* fw debug dma buffer */ - void *debug_buf; - dma_addr_t debug_buf_dma_addr; - void *log_buf; - dma_addr_t fw_log_dma_addr; - - void *dma_addr; - dma_addr_t update_dma_addr; - - struct spfc_sim_ini_err sim_ini_err; - struct spfc_sim_pcie_err sim_pcie_err; - - struct spfc_led_state led_states; - - u32 fec_status; - - struct workqueue_struct *work_queue; - struct work_struct els_srq_clear_work; - u64 reset_time; - - spinlock_t spin_lock; - - struct spfc_srq_delay_info srq_delay_info; - struct spfc_fw_ver_detail hardinfo; - struct spfc_default_sq_info default_sq_info; -}; - -extern struct spfc_hba_info *spfc_hba[SPFC_HBA_PORT_MAX_NUM]; -extern spinlock_t probe_spin_lock; -extern ulong probe_bit_map[SPFC_MAX_PROBE_PORT_NUM / SPFC_PORT_NUM_PER_TABLE]; - -u32 spfc_port_reset(struct spfc_hba_info *hba); -void spfc_flush_scq_ctx(struct spfc_hba_info *hba); -void spfc_flush_srq_ctx(struct spfc_hba_info *hba); -void spfc_set_hba_flush_state(struct spfc_hba_info *hba, bool in_flush); -void spfc_set_hba_clear_state(struct spfc_hba_info *hba, bool clear_flag); -u32 spfc_get_probe_index_by_port_id(u32 port_id, u32 *probe_index); -void spfc_get_total_probed_num(u32 *probe_cnt); -u32 spfc_sfp_switch(void *hba, void *para_in); -bool spfc_hba_is_present(struct spfc_hba_info *hba); - -#endif diff --git a/drivers/scsi/spfc/hw/spfc_hw_wqe.h b/drivers/scsi/spfc/hw/spfc_hw_wqe.h deleted file mode 100644 index e03d24a985790c58b3376ead6cba360788b9a8ca..0000000000000000000000000000000000000000 --- a/drivers/scsi/spfc/hw/spfc_hw_wqe.h +++ /dev/null @@ -1,1645 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPFC_HW_WQE_H -#define SPFC_HW_WQE_H - -#define FC_ICQ_EN -#define FC_SCSI_CMDIU_LEN 48 -#define FC_NVME_CMDIU_LEN 96 -#define FC_LS_GS_USERID_CNT_MAX 10 -#define FC_SENSEDATA_USERID_CNT_MAX 2 -#define FC_INVALID_MAGIC_NUM 0xFFFFFFFF -#define FC_INVALID_HOTPOOLTAG 0xFFFF - -/* TASK TYPE: in order to compatible wiht EDA, please add new type before BUTT. */ -enum spfc_task_type { - SPFC_TASK_T_EMPTY = 0, /* SCQE TYPE: means task type not initialize */ - - SPFC_TASK_T_IWRITE = 1, /* SQE TYPE: ini send FCP Write Command */ - SPFC_TASK_T_IREAD = 2, /* SQE TYPE: ini send FCP Read Command */ - SPFC_TASK_T_IRESP = 3, /* SCQE TYPE: ini recv fcp rsp for IREAD/IWRITE/ITMF */ - SPFC_TASK_T_TCMND = 4, /* NA */ - SPFC_TASK_T_TREAD = 5, /* SQE TYPE: tgt send FCP Read Command */ - SPFC_TASK_T_TWRITE = 6, /* SQE TYPE: tgt send FCP Write Command (XFER_RDY) */ - SPFC_TASK_T_TRESP = 7, /* SQE TYPE: tgt send fcp rsp of Read/Write */ - SPFC_TASK_T_TSTS = 8, /* SCQE TYPE: tgt sts for TREAD/TWRITE/TRESP */ - SPFC_TASK_T_ABTS = 9, /* SQE TYPE: ini send abts request Command */ - SPFC_TASK_T_IELS = 10, /* NA */ - SPFC_TASK_T_ITMF = 11, /* SQE TYPE: ini send tmf request Command */ - SPFC_TASK_T_CLEAN_UP = 12, /* NA */ - SPFC_TASK_T_CLEAN_UP_ALL = 13, /* NA */ - SPFC_TASK_T_UNSOLICITED = 14, /* NA */ - SPFC_TASK_T_ERR_WARN = 15, /* NA */ - SPFC_TASK_T_SESS_EN = 16, /* CMDQ TYPE: enable session */ - SPFC_TASK_T_SESS_DIS = 17, /* NA */ - SPFC_TASK_T_SESS_DEL = 18, /* NA */ - SPFC_TASK_T_RQE_REPLENISH = 19, /* NA */ - - SPFC_TASK_T_RCV_TCMND = 20, /* SCQE TYPE: tgt recv fcp cmd */ - SPFC_TASK_T_RCV_ELS_CMD = 21, /* SCQE TYPE: tgt recv els cmd */ - SPFC_TASK_T_RCV_ABTS_CMD = 22, /* SCQE TYPE: tgt recv abts cmd */ - SPFC_TASK_T_RCV_IMMEDIATE = 23, /* SCQE TYPE: tgt recv immediate data */ - /* SQE TYPE: send ESL rsp. PLOGI_ACC, PRLI_ACC will carry the parent - *context parameter indication. - */ - SPFC_TASK_T_ELS_RSP = 24, - SPFC_TASK_T_ELS_RSP_STS = 25, /* SCQE TYPE: ELS rsp sts */ - SPFC_TASK_T_ABTS_RSP = 26, /* CMDQ TYPE: tgt send abts rsp */ - SPFC_TASK_T_ABTS_RSP_STS = 27, /* SCQE TYPE: tgt abts rsp sts */ - - SPFC_TASK_T_ABORT = 28, /* CMDQ TYPE: tgt send Abort Command */ - SPFC_TASK_T_ABORT_STS = 29, /* SCQE TYPE: Abort sts */ - - SPFC_TASK_T_ELS = 30, /* SQE TYPE: send ELS request Command */ - SPFC_TASK_T_RCV_ELS_RSP = 31, /* SCQE TYPE: recv ELS response */ - - SPFC_TASK_T_GS = 32, /* SQE TYPE: send GS request Command */ - SPFC_TASK_T_RCV_GS_RSP = 33, /* SCQE TYPE: recv GS response */ - - SPFC_TASK_T_SESS_EN_STS = 34, /* SCQE TYPE: enable session sts */ - SPFC_TASK_T_SESS_DIS_STS = 35, /* NA */ - SPFC_TASK_T_SESS_DEL_STS = 36, /* NA */ - - SPFC_TASK_T_RCV_ABTS_RSP = 37, /* SCQE TYPE: ini recv abts rsp */ - - SPFC_TASK_T_BUFFER_CLEAR = 38, /* CMDQ TYPE: Buffer Clear */ - SPFC_TASK_T_BUFFER_CLEAR_STS = 39, /* SCQE TYPE: Buffer Clear sts */ - SPFC_TASK_T_FLUSH_SQ = 40, /* CMDQ TYPE: flush sq */ - SPFC_TASK_T_FLUSH_SQ_STS = 41, /* SCQE TYPE: flush sq sts */ - - SPFC_TASK_T_SESS_RESET = 42, /* SQE TYPE: Reset session */ - SPFC_TASK_T_SESS_RESET_STS = 43, /* SCQE TYPE: Reset session sts */ - SPFC_TASK_T_RQE_REPLENISH_STS = 44, /* NA */ - SPFC_TASK_T_DUMP_EXCH = 45, /* CMDQ TYPE: dump exch */ - SPFC_TASK_T_INIT_SRQC = 46, /* CMDQ TYPE: init SRQC */ - SPFC_TASK_T_CLEAR_SRQ = 47, /* CMDQ TYPE: clear SRQ */ - SPFC_TASK_T_CLEAR_SRQ_STS = 48, /* SCQE TYPE: clear SRQ sts */ - SPFC_TASK_T_INIT_SCQC = 49, /* CMDQ TYPE: init SCQC */ - SPFC_TASK_T_DEL_SCQC = 50, /* CMDQ TYPE: delete SCQC */ - SPFC_TASK_T_TMF_RESP = 51, /* SQE TYPE: tgt send tmf rsp */ - SPFC_TASK_T_DEL_SRQC = 52, /* CMDQ TYPE: delete SRQC */ - SPFC_TASK_T_RCV_IMMI_CONTINUE = 53, /* SCQE TYPE: tgt recv continue immediate data */ - - SPFC_TASK_T_ITMF_RESP = 54, /* SCQE TYPE: ini recv tmf rsp */ - SPFC_TASK_T_ITMF_MARKER_STS = 55, /* SCQE TYPE: tmf marker sts */ - SPFC_TASK_T_TACK = 56, - SPFC_TASK_T_SEND_AEQERR = 57, - SPFC_TASK_T_ABTS_MARKER_STS = 58, /* SCQE TYPE: abts marker sts */ - SPFC_TASK_T_FLR_CLEAR_IO = 59, /* FLR clear io type */ - SPFC_TASK_T_CREATE_SSQ_CONTEXT = 60, - SPFC_TASK_T_CLEAR_SSQ_CONTEXT = 61, - SPFC_TASK_T_EXCH_ID_FREE = 62, - SPFC_TASK_T_DIFX_RESULT_STS = 63, - SPFC_TASK_T_EXCH_ID_FREE_ABORT = 64, - SPFC_TASK_T_EXCH_ID_FREE_ABORT_STS = 65, - SPFC_TASK_T_PARAM_CHECK_FAIL = 66, - SPFC_TASK_T_TGT_UNKNOWN = 67, - SPFC_TASK_T_NVME_LS = 70, /* SQE TYPE: Snd Ls Req */ - SPFC_TASK_T_RCV_NVME_LS_RSP = 71, /* SCQE TYPE: Rcv Ls Rsp */ - - SPFC_TASK_T_NVME_LS_RSP = 72, /* SQE TYPE: Snd Ls Rsp */ - SPFC_TASK_T_RCV_NVME_LS_RSP_STS = 73, /* SCQE TYPE: Rcv Ls Rsp sts */ - - SPFC_TASK_T_RCV_NVME_LS_CMD = 74, /* SCQE TYPE: Rcv ls cmd */ - - SPFC_TASK_T_NVME_IREAD = 75, /* SQE TYPE: Ini Snd Nvme Read Cmd */ - SPFC_TASK_T_NVME_IWRITE = 76, /* SQE TYPE: Ini Snd Nvme write Cmd */ - - SPFC_TASK_T_NVME_TREAD = 77, /* SQE TYPE: Tgt Snd Nvme Read Cmd */ - SPFC_TASK_T_NVME_TWRITE = 78, /* SQE TYPE: Tgt Snd Nvme write Cmd */ - - SPFC_TASK_T_NVME_IRESP = 79, /* SCQE TYPE: Ini recv nvme rsp for NVMEIREAD/NVMEIWRITE */ - - SPFC_TASK_T_INI_IO_ABORT = 80, /* SQE type: INI Abort Cmd */ - SPFC_TASK_T_INI_IO_ABORT_STS = 81, /* SCQE type: INI Abort sts */ - - SPFC_TASK_T_INI_LS_ABORT = 82, /* SQE type: INI ls abort Cmd */ - SPFC_TASK_T_INI_LS_ABORT_STS = 83, /* SCQE type: INI ls abort sts */ - SPFC_TASK_T_EXCHID_TIMEOUT_STS = 84, /* SCQE TYPE: EXCH_ID TIME OUT */ - SPFC_TASK_T_PARENT_ERR_STS = 85, /* SCQE TYPE: PARENT ERR */ - - SPFC_TASK_T_NOP = 86, - SPFC_TASK_T_NOP_STS = 87, - - SPFC_TASK_T_DFX_INFO = 126, - SPFC_TASK_T_BUTT -}; - -/* error code for error report */ - -enum spfc_err_code { - FC_CQE_COMPLETED = 0, /* Successful */ - FC_SESS_HT_INSERT_FAIL = 1, /* Offload fail: hash insert fail */ - FC_SESS_HT_INSERT_DUPLICATE = 2, /* Offload fail: duplicate offload */ - FC_SESS_HT_BIT_SET_FAIL = 3, /* Offload fail: bloom filter set fail */ - FC_SESS_HT_DELETE_FAIL = 4, /* Offload fail: hash delete fail(duplicate delete) */ - FC_CQE_BUFFER_CLEAR_IO_COMPLETED = 5, /* IO done in buffer clear */ - FC_CQE_SESSION_ONLY_CLEAR_IO_COMPLETED = 6, /* IO done in session rst mode=1 */ - FC_CQE_SESSION_RST_CLEAR_IO_COMPLETED = 7, /* IO done in session rst mode=3 */ - FC_CQE_TMF_RSP_IO_COMPLETED = 8, /* IO done in tgt tmf rsp */ - FC_CQE_TMF_IO_COMPLETED = 9, /* IO done in ini tmf */ - FC_CQE_DRV_ABORT_IO_COMPLETED = 10, /* IO done in tgt abort */ - /* - *IO done in fcp rsp process. Used for the sceanrio: 1.abort before cmd 2. - *send fcp rsp directly after recv cmd. - */ - FC_CQE_DRV_ABORT_IO_IN_RSP_COMPLETED = 11, - /* - *IO done in fcp cmd process. Used for the sceanrio: 1.abort before cmd 2.child setup fail. - */ - FC_CQE_DRV_ABORT_IO_IN_CMD_COMPLETED = 12, - FC_CQE_WQE_FLUSH_IO_COMPLETED = 13, /* IO done in FLUSH SQ */ - FC_ERROR_CODE_DATA_DIFX_FAILED = 14, /* fcp data format check: DIFX check error */ - /* fcp data format check: task_type is not read */ - FC_ERROR_CODE_DATA_TASK_TYPE_INCORRECT = 15, - FC_ERROR_CODE_DATA_OOO_RO = 16, /* fcp data format check: data offset is not continuous */ - FC_ERROR_CODE_DATA_EXCEEDS_DATA2TRNS = 17, /* fcp data format check: data is over run */ - /* fcp rsp format check: payload is too short */ - FC_ERROR_CODE_FCP_RSP_INVALID_LENGTH_FIELD = 18, - /* fcp rsp format check: fcp_conf need, but exch don't hold seq initiative */ - FC_ERROR_CODE_FCP_RSP_CONF_REQ_NOT_SUPPORTED_YET = 19, - /* fcp rsp format check: fcp_conf is required, but it's the last seq */ - FC_ERROR_CODE_FCP_RSP_OPENED_SEQ = 20, - /* xfer rdy format check: payload is too short */ - FC_ERROR_CODE_XFER_INVALID_PAYLOAD_SIZE = 21, - /* xfer rdy format check: last data out havn't finished */ - FC_ERROR_CODE_XFER_PEND_XFER_SET = 22, - /* xfer rdy format check: data offset is not continuous */ - FC_ERROR_CODE_XFER_OOO_RO = 23, - FC_ERROR_CODE_XFER_NULL_BURST_LEN = 24, /* xfer rdy format check: burst len is 0 */ - FC_ERROR_CODE_REC_TIMER_EXPIRE = 25, /* Timer expire: REC_TIMER */ - FC_ERROR_CODE_E_D_TIMER_EXPIRE = 26, /* Timer expire: E_D_TIMER */ - FC_ERROR_CODE_ABORT_TIMER_EXPIRE = 27, /* Timer expire: Abort timer */ - FC_ERROR_CODE_ABORT_MAGIC_NUM_NOT_MATCH = 28, /* Abort IO magic number mismatch */ - FC_IMMI_CMDPKT_SETUP_FAIL = 29, /* RX immediate data cmd pkt child setup fail */ - FC_ERROR_CODE_DATA_SEQ_ID_NOT_EQUAL = 30, /* RX fcp data sequence id not equal */ - FC_ELS_GS_RSP_EXCH_CHECK_FAIL = 31, /* ELS/GS exch info check fail */ - FC_CQE_ELS_GS_SRQE_GET_FAIL = 32, /* ELS/GS process get SRQE fail */ - FC_CQE_DATA_DMA_REQ_FAIL = 33, /* SMF soli-childdma rsp error */ - FC_CQE_SESSION_CLOSED = 34, /* Session is closed */ - FC_SCQ_IS_FULL = 35, /* SCQ is full */ - FC_SRQ_IS_FULL = 36, /* SRQ is full */ - FC_ERROR_DUCHILDCTX_SETUP_FAIL = 37, /* dpchild ctx setup fail */ - FC_ERROR_INVALID_TXMFS = 38, /* invalid txmfs */ - FC_ERROR_OFFLOAD_LACKOF_SCQE_FAIL = 39, /* offload fail,lack of SCQE,through AEQ */ - FC_ERROR_INVALID_TASK_ID = 40, /* tx invlaid task id */ - FC_ERROR_INVALID_PKT_LEN = 41, /* tx els gs pakcet len check */ - FC_CQE_ELS_GS_REQ_CLR_IO_COMPLETED = 42, /* IO done in els gs tx */ - FC_CQE_ELS_RSP_CLR_IO_COMPLETED = 43, /* IO done in els rsp tx */ - FC_ERROR_CODE_RESID_UNDER_ERR = 44, /* FCP RSP RESID ERROR */ - FC_ERROR_EXCH_ID_FREE_ERR = 45, /* Abnormal free xid failed */ - FC_ALLOC_EXCH_ID_FAILED = 46, /* ucode alloc EXCH ID failed */ - FC_ERROR_DUPLICATE_IO_RECEIVED = 47, /* Duplicate tcmnd or tmf rsp received */ - FC_ERROR_RXID_MISCOMPARE = 48, - FC_ERROR_FAILOVER_CLEAR_VALID_HOST = 49, /* Failover cleared valid host io */ - FC_ERROR_EXCH_ID_NOT_MATCH = 50, /* SCQ TYPE: xid not match */ - FC_ERROR_ABORT_FAIL = 51, /* SCQ TYPE: abort fail */ - FC_ERROR_SHARD_TABLE_OP_FAIL = 52, /* SCQ TYPE: shard table OP fail */ - FC_ERROR_E0E1_FAIL = 53, - FC_INSERT_EXCH_ID_HASH_FAILED = 54, /* ucode INSERT EXCH ID HASH failed */ - FC_ERROR_CODE_FCP_RSP_UPDMA_FAILED = 55, /* up dma req failed,while fcp rsp is rcving */ - FC_ERROR_CODE_SID_DID_NOT_MATCH = 56, /* sid or did not match */ - FC_ERROR_DATA_NOT_REL_OFF = 57, /* data not rel off */ - FC_ERROR_CODE_EXCH_ID_TIMEOUT = 58, /* exch id timeout */ - FC_ERROR_PARENT_CHECK_FAIL = 59, - FC_ERROR_RECV_REC_REJECT = 60, /* RECV REC RSP REJECT */ - FC_ERROR_RECV_SRR_REJECT = 61, /* RECV REC SRR REJECT */ - FC_ERROR_REC_NOT_FIND_EXID_INVALID = 62, - FC_ERROR_RECV_REC_NO_ERR = 63, - FC_ERROR_PARENT_CTX_ERR = 64 -}; - -/* AEQ EVENT TYPE */ -enum spfc_aeq_evt_type { - /* SCQ and SRQ not enough, HOST will initiate a operation to associated SCQ/SRQ */ - FC_AEQ_EVENT_QUEUE_ERROR = 48, - FC_AEQ_EVENT_WQE_FATAL_ERROR = 49, /* WQE MSN check error,HOST will reset port */ - FC_AEQ_EVENT_CTX_FATAL_ERROR = 50, /* serious chip error, HOST will reset chip */ - FC_AEQ_EVENT_OFFLOAD_ERROR = 51, - FC_FC_AEQ_EVENT_TYPE_LAST -}; - -enum spfc_protocol_class { - FC_PROTOCOL_CLASS_3 = 0x0, - FC_PROTOCOL_CLASS_2 = 0x1, - FC_PROTOCOL_CLASS_1 = 0x2, - FC_PROTOCOL_CLASS_F = 0x3, - FC_PROTOCOL_CLASS_OTHER = 0x4 -}; - -enum spfc_aeq_evt_err_code { - /* detail type of resource lack */ - FC_SCQ_IS_FULL_ERR = 0, - FC_SRQ_IS_FULL_ERR, - - /* detail type of FC_AEQ_EVENT_WQE_FATAL_ERROR */ - FC_SQE_CHILD_SETUP_WQE_MSN_ERR = 2, - FC_SQE_CHILD_SETUP_WQE_GPA_ERR, - FC_CMDPKT_CHILD_SETUP_INVALID_WQE_ERR_1, - FC_CMDPKT_CHILD_SETUP_INVALID_WQE_ERR_2, - FC_CLEAEQ_WQE_ERR, - FC_WQEFETCH_WQE_MSN_ERR, - FC_WQEFETCH_QUINFO_ERR, - - /* detail type of FC_AEQ_EVENT_CTX_FATAL_ERROR */ - FC_SCQE_ERR_BIT_ERR = 9, - FC_UPDMA_ADDR_REQ_SRQ_ERR, - FC_SOLICHILDDMA_ADDR_REQ_ERR, - FC_UNSOLICHILDDMA_ADDR_REQ_ERR, - FC_SQE_CHILD_SETUP_QINFO_ERR_1, - FC_SQE_CHILD_SETUP_QINFO_ERR_2, - FC_CMDPKT_CHILD_SETUP_QINFO_ERR_1, - FC_CMDPKT_CHILD_SETUP_QINFO_ERR_2, - FC_CMDPKT_CHILD_SETUP_PMSN_ERR, - FC_CLEAEQ_CTX_ERR, - FC_WQEFETCH_CTX_ERR, - FC_FLUSH_QPC_ERR_LQP, - FC_FLUSH_QPC_ERR_SMF, - FC_PREFETCH_QPC_ERR_PCM_MHIT_LQP, - FC_PREFETCH_QPC_ERR_PCM_MHIT_FQG, - FC_PREFETCH_QPC_ERR_PCM_ABM_FQG, - FC_PREFETCH_QPC_ERR_MAP_FQG, - FC_PREFETCH_QPC_ERR_MAP_LQP, - FC_PREFETCH_QPC_ERR_SMF_RTN, - FC_PREFETCH_QPC_ERR_CFG, - FC_PREFETCH_QPC_ERR_FLSH_HIT, - FC_PREFETCH_QPC_ERR_FLSH_ACT, - FC_PREFETCH_QPC_ERR_ABM_W_RSC, - FC_PREFETCH_QPC_ERR_RW_ABM, - FC_PREFETCH_QPC_ERR_DEFAULT, - FC_CHILDHASH_INSERT_SW_ERR, - FC_CHILDHASH_LOOKUP_SW_ERR, - FC_CHILDHASH_DEL_SW_ERR, - FC_EXCH_ID_FREE_SW_ERR, - FC_FLOWHASH_INSERT_SW_ERR, - FC_FLOWHASH_LOOKUP_SW_ERR, - FC_FLOWHASH_DEL_SW_ERR, - FC_FLUSH_QPC_ERR_USED, - FC_FLUSH_QPC_ERR_OUTER_LOCK, - FC_SETUP_SESSION_ERR, - - FC_AEQ_EVT_ERR_CODE_BUTT - -}; - -/* AEQ data structure */ -struct spfc_aqe_data { - union { - struct { - u32 conn_id : 16; - u32 rsvd : 8; - u32 evt_code : 8; - } wd0; - - u32 data0; - }; - - union { - struct { - u32 xid : 20; - u32 rsvd : 12; - } wd1; - - u32 data1; - }; -}; - -/* Control Section: Common Header */ -struct spfc_wqe_ctrl_ch { - union { - struct { - u32 bdsl : 8; - u32 drv_sl : 2; - u32 rsvd0 : 4; - u32 wf : 1; - u32 cf : 1; - u32 tsl : 5; - u32 va : 1; - u32 df : 1; - u32 cr : 1; - u32 dif_sl : 3; - u32 csl : 2; - u32 ctrl_sl : 2; - u32 owner : 1; - } wd0; - - u32 ctrl_ch_val; - }; -}; - -/* Control Section: Queue Specific Field */ -struct spfc_wqe_ctrl_qsf { - u32 wqe_sn : 16; - u32 dump_wqe_sn : 16; -}; - -/* DIF info definition in WQE */ -struct spfc_fc_dif_info { - struct { - u32 app_tag_ctrl : 3; /* DIF/DIX APP TAG Control */ - /* Bit 0: scenario of the reference tag verify mode. - *Bit 1: scenario of the reference tag insert/replace mode. - */ - u32 ref_tag_mode : 2; - /* 0: fixed; 1: increasement; */ - u32 ref_tag_ctrl : 3; /* The DIF/DIX Reference tag control */ - u32 grd_agm_ini_ctrl : 3; - u32 grd_agm_ctrl : 2; /* Bit 0: DIF/DIX guard verify algorithm control */ - /* Bit 1: DIF/DIX guard replace or insert algorithm control */ - u32 grd_ctrl : 3; /* The DIF/DIX Guard control */ - u32 dif_verify_type : 2; /* verify type */ - u32 difx_ref_esc : 1; /* Check blocks whose reference tag contains 0xFFFF flag */ - u32 difx_app_esc : 1;/* Check blocks whose application tag contains 0xFFFF flag */ - u32 rsvd : 8; - u32 sct_size : 1; /* Sector size, 1: 4K; 0: 512 */ - u32 smd_tp : 2; - u32 difx_en : 1; - } wd0; - - struct { - u32 cmp_app_tag_msk : 16; - u32 rsvd : 7; - u32 lun_qos_en : 2; - u32 vpid : 7; - } wd1; - - u16 cmp_app_tag; - u16 rep_app_tag; - - u32 cmp_ref_tag; - u32 rep_ref_tag; -}; - -/* Task Section: TMF SQE for INI */ -struct spfc_tmf_info { - union { - struct { - u32 reset_exch_end : 16; - u32 reset_exch_start : 16; - } bs; - u32 value; - } w0; - - union { - struct { - u32 reset_did : 24; - u32 reset_type : 2; - u32 marker_sts : 1; - u32 rsvd0 : 5; - } bs; - u32 value; - } w1; - - union { - struct { - u32 reset_sid : 24; - u32 rsvd0 : 8; - } bs; - u32 value; - } w2; - - u8 reset_lun[8]; -}; - -/* Task Section: CMND SQE for INI */ -struct spfc_sqe_icmnd { - u8 fcp_cmnd_iu[FC_SCSI_CMDIU_LEN]; - union { - struct spfc_fc_dif_info dif_info; - struct spfc_tmf_info tmf; - } info; -}; - -/* Task Section: ABTS SQE */ -struct spfc_sqe_abts { - u32 fh_parm_abts; - u32 hotpooltag; - u32 release_timer; -}; - -struct spfc_keys { - struct { - u32 smac1 : 8; - u32 smac0 : 8; - u32 rsv : 16; - } wd0; - - u8 smac[4]; - - u8 dmac[6]; - u8 sid[3]; - u8 did[3]; - - struct { - u32 port_id : 3; - u32 host_id : 2; - u32 rsvd : 27; - } wd5; - u32 rsvd; -}; - -/* BDSL: Session Enable WQE.keys field only use 26 bytes room */ -struct spfc_cmdqe_sess_en { - struct { - u32 rx_id : 16; - u32 port_id : 8; - u32 task_type : 8; - } wd0; - - struct { - u32 cid : 20; - u32 rsvd1 : 12; - } wd1; - - struct { - u32 conn_id : 16; - u32 scqn : 16; - } wd2; - - struct { - u32 xid_p : 20; - u32 rsvd3 : 12; - } wd3; - - u32 context_gpa_hi; - u32 context_gpa_lo; - struct spfc_keys keys; - u32 context[64]; -}; - -/* Control Section */ -struct spfc_wqe_ctrl { - struct spfc_wqe_ctrl_ch ch; - struct spfc_wqe_ctrl_qsf qsf; -}; - -struct spfc_sqe_els_rsp { - struct { - u32 echo_flag : 16; - u32 data_len : 16; - } wd0; - - struct { - u32 rsvd1 : 27; - u32 offload_flag : 1; - u32 lp_bflag : 1; - u32 clr_io : 1; - u32 para_update : 2; - } wd1; - - struct { - u32 seq_cnt : 1; - u32 e_d_tov : 1; - u32 rsvd2 : 6; - u32 class_mode : 8; /* 0:class3, 1:class2*/ - u32 tx_mfs : 16; - } wd2; - - u32 e_d_tov_timer_val; - - struct { - u32 conf : 1; - u32 rec : 1; - u32 xfer_dis : 1; - u32 immi_taskid_cnt : 13; - u32 immi_taskid_start : 16; - } wd4; - - u32 first_burst_len; - - struct { - u32 reset_exch_end : 16; - u32 reset_exch_start : 16; - } wd6; - - struct { - u32 scqn : 16; - u32 hotpooltag : 16; - } wd7; - - u32 magic_local; - u32 magic_remote; - u32 ts_rcv_echo_req; - u32 sid; - u32 did; - u32 context_gpa_hi; - u32 context_gpa_lo; -}; - -struct spfc_sqe_reset_session { - struct { - u32 reset_exch_end : 16; - u32 reset_exch_start : 16; - } wd0; - - struct { - u32 reset_did : 24; - u32 mode : 2; - u32 rsvd : 6; - } wd1; - - struct { - u32 reset_sid : 24; - u32 rsvd : 8; - } wd2; - - struct { - u32 scqn : 16; - u32 rsvd : 16; - } wd3; -}; - -struct spfc_sqe_nop_sq { - struct { - u32 scqn : 16; - u32 rsvd : 16; - } wd0; - u32 magic_num; -}; - -struct spfc_sqe_t_els_gs { - u16 echo_flag; - u16 data_len; - - struct { - u32 rsvd1 : 9; - u32 offload_flag : 1; - u32 origin_hottag : 16; - u32 rec_flag : 1; - u32 rec_support : 1; - u32 lp_bflag : 1; - u32 clr_io : 1; - u32 para_update : 2; - } wd4; - - struct { - u32 seq_cnt : 1; - u32 e_d_tov : 1; - u32 rsvd2 : 14; - u32 tx_mfs : 16; - } wd5; - - u32 e_d_tov_timer_val; - - struct { - u32 reset_exch_end : 16; - u32 reset_exch_start : 16; - } wd6; - - struct { - u32 scqn : 16; - u32 hotpooltag : 16; /* used for send ELS rsp */ - } wd7; - - u32 sid; - u32 did; - u32 context_gpa_hi; - u32 context_gpa_lo; - u32 origin_magicnum; -}; - -struct spfc_sqe_els_gs_elsrsp_comm { - u16 rsvd; - u16 data_len; -}; - -struct spfc_sqe_lpb_msg { - struct { - u32 reset_exch_end : 16; - u32 reset_exch_start : 16; - } w0; - - struct { - u32 reset_did : 24; - u32 reset_type : 2; - u32 rsvd0 : 6; - } w1; - - struct { - u32 reset_sid : 24; - u32 rsvd0 : 8; - } w2; - - u16 tmf_exch_id; - u16 rsvd1; - - u8 reset_lun[8]; -}; - -/* SQE Task Section's Contents except Common Header */ -union spfc_sqe_ts_cont { - struct spfc_sqe_icmnd icmnd; - struct spfc_sqe_abts abts; - struct spfc_sqe_els_rsp els_rsp; - struct spfc_sqe_t_els_gs t_els_gs; - struct spfc_sqe_els_gs_elsrsp_comm els_gs_elsrsp_comm; - struct spfc_sqe_reset_session reset_session; - struct spfc_sqe_lpb_msg lpb_msg; - struct spfc_sqe_nop_sq nop_sq; - u32 value[17]; -}; - -struct spfc_sqe_nvme_icmnd_part2 { - u8 nvme_cmnd_iu_part2_data[FC_NVME_CMDIU_LEN - FC_SCSI_CMDIU_LEN]; -}; - -union spfc_sqe_ts_ex { - struct spfc_sqe_nvme_icmnd_part2 nvme_icmnd_part2; - u32 value[12]; -}; - -struct spfc_sqe_ts { - /* SQE Task Section's Common Header */ - u32 local_xid : 16; /* local exch_id, icmnd/els send used for hotpooltag */ - u32 crc_inj : 1; - u32 immi_std : 1; - u32 cdb_type : 1; /* cdb_type = 0:CDB_LEN = 16B, cdb_type = 1:CDB_LEN = 32B */ - u32 rsvd : 5; /* used for loopback saving bdsl's num */ - u32 task_type : 8; - - struct { - u16 conn_id; - u16 remote_xid; - } wd0; - - u32 xid : 20; - u32 sqn : 12; - u32 cid; - u32 magic_num; - union spfc_sqe_ts_cont cont; -}; - -struct spfc_constant_sge { - u32 buf_addr_hi; - u32 buf_addr_lo; -}; - -struct spfc_variable_sge { - u32 buf_addr_hi; - u32 buf_addr_lo; - - struct { - u32 buf_len : 31; - u32 r_flag : 1; - } wd0; - - struct { - u32 buf_addr_gpa : 16; - u32 xid : 14; - u32 extension_flag : 1; - u32 last_flag : 1; - } wd1; -}; - -#define FC_WQE_SIZE 256 -/* SQE, should not be over 256B */ -struct spfc_sqe { - struct spfc_wqe_ctrl ctrl_sl; - u32 sid; - u32 did; - u64 wqe_gpa; /* gpa shift 6 bit to right*/ - u64 db_val; - union spfc_sqe_ts_ex ts_ex; - struct spfc_variable_sge esge[3]; - struct spfc_wqe_ctrl ectrl_sl; - struct spfc_sqe_ts ts_sl; - struct spfc_variable_sge sge[2]; -}; - -struct spfc_rqe_ctrl { - struct spfc_wqe_ctrl_ch ch; - - struct { - u16 wqe_msn; - u16 dump_wqe_msn; - } wd0; -}; - -struct spfc_rqe_drv { - struct { - u32 rsvd0 : 16; - u32 user_id : 16; - } wd0; - - u32 rsvd1; -}; - -/* RQE,should not be over 32B */ -struct spfc_rqe { - struct spfc_rqe_ctrl ctrl_sl; - u32 cqe_gpa_h; - u32 cqe_gpa_l; - struct spfc_constant_sge bds_sl; - struct spfc_rqe_drv drv_sl; -}; - -struct spfc_cmdqe_abort { - struct { - u32 rx_id : 16; - u32 rsvd0 : 8; - u32 task_type : 8; - } wd0; - - struct { - u32 ox_id : 16; - u32 rsvd1 : 12; - u32 trsp_send : 1; - u32 tcmd_send : 1; - u32 immi : 1; - u32 reply_sts : 1; - } wd1; - - struct { - u32 conn_id : 16; - u32 scqn : 16; - } wd2; - - struct { - u32 xid : 20; - u32 rsvd : 12; - } wd3; - - struct { - u32 cid : 20; - u32 rsvd : 12; - } wd4; - struct { - u32 hotpooltag : 16; - u32 rsvd : 16; - } wd5; /* v6 new define */ - /* abort time out. Used for abort and io cmd reach ucode in different path - * and io cmd will not arrive. - */ - u32 time_out; - u32 magic_num; -}; - -struct spfc_cmdqe_abts_rsp { - struct { - u32 rx_id : 16; - u32 rsvd0 : 8; - u32 task_type : 8; - } wd0; - - struct { - u32 ox_id : 16; - u32 rsvd1 : 4; - u32 port_id : 4; - u32 payload_len : 7; - u32 rsp_type : 1; - } wd1; - - struct { - u32 conn_id : 16; - u32 scqn : 16; - } wd2; - - struct { - u32 xid : 20; - u32 rsvd : 12; - } wd3; - - struct { - u32 cid : 20; - u32 rsvd : 12; - } wd4; - - struct { - u32 req_rx_id : 16; - u32 hotpooltag : 16; - } wd5; - - /* payload length is according to rsp_type:1DWORD or 3DWORD */ - u32 payload[3]; -}; - -struct spfc_cmdqe_buffer_clear { - struct { - u32 rsvd1 : 16; - u32 rsvd0 : 8; - u32 wqe_type : 8; - } wd0; - - struct { - u32 rx_id_end : 16; - u32 rx_id_start : 16; - } wd1; - - u32 scqn; - u32 wd3; -}; - -struct spfc_cmdqe_flush_sq { - struct { - u32 entry_count : 16; - u32 rsvd : 8; - u32 wqe_type : 8; - } wd0; - - struct { - u32 scqn : 16; - u32 port_id : 4; - u32 pos : 11; - u32 last_wqe : 1; - } wd1; - - struct { - u32 rsvd : 4; - u32 clr_pos : 12; - u32 pkt_ptr : 16; - } wd2; - - struct { - u32 first_sq_xid : 24; - u32 sqqid_start_per_session : 4; - u32 sqcnt_per_session : 4; - } wd3; -}; - -struct spfc_cmdqe_dump_exch { - struct { - u32 rsvd1 : 16; - u32 rsvd0 : 8; - u32 task_type : 8; - } wd0; - - u16 oqid_wr; - u16 oqid_rd; - - u32 host_id; - u32 func_id; - u32 cache_id; - u32 exch_id; -}; - -struct spfc_cmdqe_creat_srqc { - struct { - u32 rsvd1 : 16; - u32 rsvd0 : 8; - u32 task_type : 8; - } wd0; - - u32 srqc_gpa_h; - u32 srqc_gpa_l; - - u32 srqc[16]; /* srqc_size=64B */ -}; - -struct spfc_cmdqe_delete_srqc { - struct { - u32 rsvd1 : 16; - u32 rsvd0 : 8; - u32 task_type : 8; - } wd0; - - u32 srqc_gpa_h; - u32 srqc_gpa_l; -}; - -struct spfc_cmdqe_clr_srq { - struct { - u32 rsvd1 : 16; - u32 rsvd0 : 8; - u32 task_type : 8; - } wd0; - - struct { - u32 scqn : 16; - u32 srq_type : 16; - } wd1; - - u32 srqc_gpa_h; - u32 srqc_gpa_l; -}; - -struct spfc_cmdqe_creat_scqc { - struct { - u32 rsvd1 : 16; - u32 rsvd0 : 8; - u32 task_type : 8; - } wd0; - - struct { - u32 scqn : 16; - u32 rsvd2 : 16; - } wd1; - - u32 scqc[16]; /* scqc_size=64B */ -}; - -struct spfc_cmdqe_delete_scqc { - struct { - u32 rsvd1 : 16; - u32 rsvd0 : 8; - u32 task_type : 8; - } wd0; - - struct { - u32 scqn : 16; - u32 rsvd2 : 16; - } wd1; -}; - -struct spfc_cmdqe_creat_ssqc { - struct { - u32 rsvd1 : 4; - u32 xid : 20; - u32 task_type : 8; - } wd0; - - struct { - u32 scqn : 16; - u32 rsvd2 : 16; - } wd1; - u32 context_gpa_hi; - u32 context_gpa_lo; - - u32 ssqc[64]; /* ssqc_size=256B */ -}; - -struct spfc_cmdqe_delete_ssqc { - struct { - u32 entry_count : 4; - u32 xid : 20; - u32 task_type : 8; - } wd0; - - struct { - u32 scqn : 16; - u32 rsvd2 : 16; - } wd1; - u32 context_gpa_hi; - u32 context_gpa_lo; -}; - -/* add xid free via cmdq */ -struct spfc_cmdqe_exch_id_free { - struct { - u32 task_id : 16; - u32 port_id : 8; - u32 rsvd0 : 8; - } wd0; - - u32 magic_num; - - struct { - u32 scqn : 16; - u32 hotpool_tag : 16; - } wd2; - struct { - u32 rsvd1 : 31; - u32 clear_abort_flag : 1; - } wd3; - u32 sid; - u32 did; - u32 type; /* ELS/ELS RSP/IO */ -}; - -struct spfc_cmdqe_cmdqe_dfx { - struct { - u32 rsvd1 : 4; - u32 xid : 20; - u32 task_type : 8; - } wd0; - - struct { - u32 qid_crclen : 12; - u32 cid : 20; - } wd1; - u32 context_gpa_hi; - u32 context_gpa_lo; - u32 dfx_type; - - u32 rsv[16]; -}; - -struct spfc_sqe_t_rsp { - struct { - u32 rsvd1 : 16; - u32 fcp_rsp_len : 8; - u32 busy_rsp : 3; - u32 immi : 1; - u32 mode : 1; - u32 conf : 1; - u32 fill : 2; - } wd0; - - u32 hotpooltag; - - union { - struct { - u32 addr_h; - u32 addr_l; - } gpa; - - struct { - u32 data[23]; /* FCP_RESP payload buf, 92B rsvd */ - } buf; - } payload; -}; - -struct spfc_sqe_tmf_t_rsp { - struct { - u32 scqn : 16; - u32 fcp_rsp_len : 8; - u32 pkt_nosnd_flag : 3; /* tmf rsp snd flag, 0:snd, 1: not snd, Driver ignore */ - u32 reset_type : 2; - u32 conf : 1; - u32 fill : 2; - } wd0; - - struct { - u32 reset_exch_end : 16; - u32 reset_exch_start : 16; - } wd1; - - struct { - u16 hotpooltag; /*tmf rsp hotpooltag, Driver ignore */ - u16 rsvd; - } wd2; - - u8 lun[8]; /* Lun ID */ - u32 data[20]; /* FCP_RESP payload buf, 80B rsvd */ -}; - -struct spfc_sqe_tresp_ts { - /* SQE Task Section's Common Header */ - u16 local_xid; - u8 rsvd0; - u8 task_type; - - struct { - u16 conn_id; - u16 remote_xid; - } wd0; - - u32 xid : 20; - u32 sqn : 12; - u32 cid; - u32 magic_num; - struct spfc_sqe_t_rsp t_rsp; -}; - -struct spfc_sqe_tmf_resp_ts { - /* SQE Task Section's Common Header */ - u16 local_xid; - u8 rsvd0; - u8 task_type; - - struct { - u16 conn_id; - u16 remote_xid; - } wd0; - - u32 xid : 20; - u32 sqn : 12; - u32 cid; - u32 magic_num; /* magic num */ - struct spfc_sqe_tmf_t_rsp tmf_rsp; -}; - -/* SQE for fcp response, max TSL is 120B */ -struct spfc_sqe_tresp { - struct spfc_wqe_ctrl ctrl_sl; - u64 taskrsvd; - u64 wqe_gpa; - u64 db_val; - union spfc_sqe_ts_ex ts_ex; - struct spfc_variable_sge esge[3]; - struct spfc_wqe_ctrl ectrl_sl; - struct spfc_sqe_tresp_ts ts_sl; -}; - -/* SQE for tmf response, max TSL is 120B */ -struct spfc_sqe_tmf_rsp { - struct spfc_wqe_ctrl ctrl_sl; - u64 taskrsvd; - u64 wqe_gpa; - u64 db_val; - union spfc_sqe_ts_ex ts_ex; - struct spfc_variable_sge esge[3]; - struct spfc_wqe_ctrl ectrl_sl; - struct spfc_sqe_tmf_resp_ts ts_sl; -}; - -/* SCQE Common Header */ -struct spfc_scqe_ch { - struct { - u32 task_type : 8; - u32 sqn : 13; - u32 cqe_remain_cnt : 3; - u32 err_code : 7; - u32 owner : 1; - } wd0; -}; - -struct spfc_scqe_type { - struct spfc_scqe_ch ch; - - u32 rsvd0; - - u16 conn_id; - u16 rsvd4; - - u32 rsvd1[12]; - - struct { - u32 done : 1; - u32 rsvd : 23; - u32 dif_vry_rst : 8; - } wd0; -}; - -struct spfc_scqe_sess_sts { - struct spfc_scqe_ch ch; - - struct { - u32 xid_qpn : 20; - u32 rsvd1 : 12; - } wd0; - - struct { - u32 conn_id : 16; - u32 rsvd3 : 16; - } wd1; - - struct { - u32 cid : 20; - u32 rsvd2 : 12; - } wd2; - - u64 rsvd3; -}; - -struct spfc_scqe_comm_rsp_sts { - struct spfc_scqe_ch ch; - - struct { - u32 rx_id : 16; - u32 ox_id : 16; - } wd0; - - struct { - u32 conn_id : 16; - u32 hotpooltag : 16; /* ucode return hotpooltag to drv */ - } wd1; - - u32 magic_num; -}; - -struct spfc_scqe_iresp { - struct spfc_scqe_ch ch; - - struct { - u32 rx_id : 16; - u32 ox_id : 16; - } wd0; - - struct { - u32 conn_id : 16; - u32 rsvd0 : 3; - u32 user_id_num : 8; - u32 dif_info : 5; - } wd1; - - struct { - u32 scsi_status : 8; - u32 fcp_flag : 8; - u32 hotpooltag : 16; /* ucode return hotpooltag to drv */ - } wd2; - - u32 fcp_resid; - u32 fcp_sns_len; - u32 fcp_rsp_len; - u32 magic_num; - u16 user_id[FC_SENSEDATA_USERID_CNT_MAX]; - u32 rsv1; -}; - -struct spfc_scqe_nvme_iresp { - struct spfc_scqe_ch ch; - - struct { - u32 rx_id : 16; - u32 ox_id : 16; - } wd0; - - struct { - u32 conn_id : 16; - u32 eresp_flag : 8; - u32 user_id_num : 8; - } wd1; - - struct { - u32 scsi_status : 8; - u32 fcp_flag : 8; - u32 hotpooltag : 16; /* ucode return hotpooltag to drv */ - } wd2; - u32 magic_num; - u32 eresp[8]; -}; - -#pragma pack(1) -struct spfc_dif_result { - u8 vrd_rpt; - u16 pad; - u8 rcv_pi_vb; - u32 rcv_pi_h; - u32 rcv_pi_l; - u16 vrf_agm_imm; - u16 ri_agm_imm; -}; - -#pragma pack() - -struct spfc_scqe_dif_result { - struct spfc_scqe_ch ch; - - struct { - u32 rx_id : 16; - u32 ox_id : 16; - } wd0; - - struct { - u32 conn_id : 16; - u32 rsvd0 : 11; - u32 dif_info : 5; - } wd1; - - struct { - u32 scsi_status : 8; - u32 fcp_flag : 8; - u32 hotpooltag : 16; /* ucode return hotpooltag to drv */ - } wd2; - - u32 fcp_resid; - u32 fcp_sns_len; - u32 fcp_rsp_len; - u32 magic_num; - - u32 rsv1[3]; - struct spfc_dif_result difinfo; -}; - -struct spfc_scqe_rcv_abts_rsp { - struct spfc_scqe_ch ch; - - struct { - u32 rx_id : 16; - u32 ox_id : 16; - } wd0; - - struct { - u32 conn_id : 16; - u32 hotpooltag : 16; - } wd1; - - struct { - u32 fh_rctrl : 8; - u32 rsvd0 : 24; - } wd2; - - struct { - u32 did : 24; - u32 rsvd1 : 8; - } wd3; - - struct { - u32 sid : 24; - u32 rsvd2 : 8; - } wd4; - - /* payload length is according to fh_rctrl:1DWORD or 3DWORD */ - u32 payload[3]; - u32 magic_num; -}; - -struct spfc_scqe_fcp_rsp_sts { - struct spfc_scqe_ch ch; - - struct { - u32 rx_id : 16; - u32 ox_id : 16; - } wd0; - - struct { - u32 conn_id : 16; - u32 rsvd0 : 10; - u32 immi : 1; - u32 dif_info : 5; - } wd1; - - u32 magic_num; - u32 hotpooltag; - u32 xfer_rsp; - u32 rsvd[5]; - - u32 dif_tmp[4]; /* HW will overwrite it */ -}; - -struct spfc_scqe_rcv_els_cmd { - struct spfc_scqe_ch ch; - - struct { - u32 did : 24; - u32 class_mode : 8; /* 0:class3, 1:class2 */ - } wd0; - - struct { - u32 sid : 24; - u32 rsvd1 : 8; - } wd1; - - struct { - u32 rx_id : 16; - u32 ox_id : 16; - } wd2; - - struct { - u32 user_id_num : 16; - u32 data_len : 16; - } wd3; - /* User ID of SRQ SGE, used for drvier buffer release */ - u16 user_id[FC_LS_GS_USERID_CNT_MAX]; - u32 ts; -}; - -struct spfc_scqe_param_check_scq { - struct spfc_scqe_ch ch; - - u8 rsvd0[3]; - u8 port_id; - - u16 scqn; - u16 check_item; - - u16 exch_id_load; - u16 exch_id; - - u16 historty_type; - u16 entry_count; - - u32 xid; - - u32 gpa_h; - u32 gpa_l; - - u32 magic_num; - u32 hotpool_tag; - - u32 payload_len; - u32 sub_err; - - u32 rsvd2[3]; -}; - -struct spfc_scqe_rcv_abts_cmd { - struct spfc_scqe_ch ch; - - struct { - u32 did : 24; - u32 rsvd0 : 8; - } wd0; - - struct { - u32 sid : 24; - u32 rsvd1 : 8; - } wd1; - - struct { - u32 rx_id : 16; - u32 ox_id : 16; - } wd2; -}; - -struct spfc_scqe_rcv_els_gs_rsp { - struct spfc_scqe_ch ch; - - struct { - u32 rx_id : 16; - u32 ox_id : 16; - } wd1; - - struct { - u32 conn_id : 16; - u32 data_len : 16; /* ELS/GS RSP Payload length */ - } wd2; - - struct { - u32 did : 24; - u32 rsvd : 6; - u32 echo_rsp : 1; - u32 end_rsp : 1; - } wd3; - - struct { - u32 sid : 24; - u32 user_id_num : 8; - } wd4; - - struct { - u32 rsvd : 16; - u32 hotpooltag : 16; - } wd5; - - u32 magic_num; - u16 user_id[FC_LS_GS_USERID_CNT_MAX]; -}; - -struct spfc_scqe_rcv_flush_sts { - struct spfc_scqe_ch ch; - - struct { - u32 rsvd0 : 4; - u32 clr_pos : 12; - u32 port_id : 8; - u32 last_flush : 8; - } wd0; -}; - -struct spfc_scqe_rcv_clear_buf_sts { - struct spfc_scqe_ch ch; - - struct { - u32 rsvd0 : 24; - u32 port_id : 8; - } wd0; -}; - -struct spfc_scqe_clr_srq_rsp { - struct spfc_scqe_ch ch; - - struct { - u32 srq_type : 16; - u32 cur_wqe_msn : 16; - } wd0; -}; - -struct spfc_scqe_itmf_marker_sts { - struct spfc_scqe_ch ch; - - struct { - u32 rx_id : 16; - u32 ox_id : 16; - } wd1; - - struct { - u32 did : 24; - u32 end_rsp : 8; - } wd2; - - struct { - u32 sid : 24; - u32 rsvd1 : 8; - } wd3; - - struct { - u32 hotpooltag : 16; - u32 rsvd : 16; - } wd4; - - u32 magic_num; -}; - -struct spfc_scqe_abts_marker_sts { - struct spfc_scqe_ch ch; - - struct { - u32 rx_id : 16; - u32 ox_id : 16; - } wd1; - - struct { - u32 did : 24; - u32 end_rsp : 8; - } wd2; - - struct { - u32 sid : 24; - u32 io_state : 8; - } wd3; - - struct { - u32 hotpooltag : 16; - u32 rsvd : 16; - } wd4; - - u32 magic_num; -}; - -struct spfc_scqe_ini_abort_sts { - struct spfc_scqe_ch ch; - - struct { - u32 rx_id : 16; - u32 ox_id : 16; - } wd1; - - struct { - u32 did : 24; - u32 rsvd : 8; - } wd2; - - struct { - u32 sid : 24; - u32 io_state : 8; - } wd3; - - struct { - u32 hotpooltag : 16; - u32 rsvd : 16; - } wd4; - - u32 magic_num; -}; - -struct spfc_scqe_sq_nop_sts { - struct spfc_scqe_ch ch; - struct { - u32 rsvd : 16; - u32 sqn : 16; - } wd0; - struct { - u32 rsvd : 16; - u32 conn_id : 16; - } wd1; - u32 magic_num; -}; - -/* SCQE, should not be over 64B */ -#define FC_SCQE_SIZE 64 -union spfc_scqe { - struct spfc_scqe_type common; - struct spfc_scqe_sess_sts sess_sts; /* session enable/disable/delete sts */ - struct spfc_scqe_comm_rsp_sts comm_sts; /* aborts/abts_rsp/els rsp sts */ - struct spfc_scqe_rcv_clear_buf_sts clear_sts; /* clear buffer sts */ - struct spfc_scqe_rcv_flush_sts flush_sts; /* flush sq sts */ - struct spfc_scqe_iresp iresp; - struct spfc_scqe_rcv_abts_rsp rcv_abts_rsp; /* recv abts rsp */ - struct spfc_scqe_fcp_rsp_sts fcp_rsp_sts; /* Read/Write/Rsp sts */ - struct spfc_scqe_rcv_els_cmd rcv_els_cmd; /* recv els cmd */ - struct spfc_scqe_rcv_abts_cmd rcv_abts_cmd; /* recv abts cmd */ - struct spfc_scqe_rcv_els_gs_rsp rcv_els_gs_rsp; /* recv els/gs rsp */ - struct spfc_scqe_clr_srq_rsp clr_srq_sts; - struct spfc_scqe_itmf_marker_sts itmf_marker_sts; /* tmf marker */ - struct spfc_scqe_abts_marker_sts abts_marker_sts; /* abts marker */ - struct spfc_scqe_dif_result dif_result; - struct spfc_scqe_param_check_scq param_check_sts; - struct spfc_scqe_nvme_iresp nvme_iresp; - struct spfc_scqe_ini_abort_sts ini_abort_sts; - struct spfc_scqe_sq_nop_sts sq_nop_sts; -}; - -struct spfc_cmdqe_type { - struct { - u32 rx_id : 16; - u32 rsvd0 : 8; - u32 task_type : 8; - } wd0; -}; - -struct spfc_cmdqe_send_ack { - struct { - u32 rx_id : 16; - u32 immi_stand : 1; - u32 rsvd0 : 7; - u32 task_type : 8; - } wd0; - - u32 xid; - u32 cid; -}; - -struct spfc_cmdqe_send_aeq_err { - struct { - u32 errorevent : 8; - u32 errortype : 8; - u32 portid : 8; - u32 task_type : 8; - } wd0; -}; - -/* CMDQE, variable length */ -union spfc_cmdqe { - struct spfc_cmdqe_type common; - struct spfc_cmdqe_sess_en session_enable; - struct spfc_cmdqe_abts_rsp snd_abts_rsp; - struct spfc_cmdqe_abort snd_abort; - struct spfc_cmdqe_buffer_clear buffer_clear; - struct spfc_cmdqe_flush_sq flush_sq; - struct spfc_cmdqe_dump_exch dump_exch; - struct spfc_cmdqe_creat_srqc create_srqc; - struct spfc_cmdqe_delete_srqc delete_srqc; - struct spfc_cmdqe_clr_srq clear_srq; - struct spfc_cmdqe_creat_scqc create_scqc; - struct spfc_cmdqe_delete_scqc delete_scqc; - struct spfc_cmdqe_send_ack send_ack; - struct spfc_cmdqe_send_aeq_err send_aeqerr; - struct spfc_cmdqe_creat_ssqc createssqc; - struct spfc_cmdqe_delete_ssqc deletessqc; - struct spfc_cmdqe_cmdqe_dfx dfx_info; - struct spfc_cmdqe_exch_id_free xid_free; -}; - -#endif diff --git a/drivers/scsi/spfc/hw/spfc_io.c b/drivers/scsi/spfc/hw/spfc_io.c deleted file mode 100644 index 2b1d1c607b132c91bc09a7a2c4767c0929986779..0000000000000000000000000000000000000000 --- a/drivers/scsi/spfc/hw/spfc_io.c +++ /dev/null @@ -1,1193 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#include "spfc_io.h" -#include "spfc_module.h" -#include "spfc_service.h" - -#define SPFC_SGE_WD1_XID_MASK 0x3fff - -u32 dif_protect_opcode = INVALID_VALUE32; -u32 dif_app_esc_check = SPFC_DIF_APP_REF_ESC_CHECK; -u32 dif_ref_esc_check = SPFC_DIF_APP_REF_ESC_CHECK; -u32 grd_agm_ini_ctrl = SPFC_DIF_CRC_CS_INITIAL_CONFIG_BY_BIT0_1; -u32 ref_tag_no_increase; -u32 dix_flag; -u32 grd_ctrl; -u32 grd_agm_ctrl = SPFC_DIF_GUARD_VERIFY_ALGORITHM_CTL_T10_CRC16; -u32 cmp_app_tag_mask = 0xffff; -u32 app_tag_ctrl; -u32 ref_tag_ctrl; -u32 ref_tag_mod = INVALID_VALUE32; -u32 rep_ref_tag; -u32 rx_rep_ref_tag; -u16 cmp_app_tag; -u16 rep_app_tag; - -static void spfc_dif_err_count(struct spfc_hba_info *hba, u8 info) -{ - u8 dif_info = info; - - if (dif_info & SPFC_TX_DIF_ERROR_FLAG) { - SPFC_DIF_ERR_STAT(hba, SPFC_DIF_SEND_DIFERR_ALL); - if (dif_info & SPFC_DIF_ERROR_CODE_CRC) - SPFC_DIF_ERR_STAT(hba, SPFC_DIF_SEND_DIFERR_CRC); - - if (dif_info & SPFC_DIF_ERROR_CODE_APP) - SPFC_DIF_ERR_STAT(hba, SPFC_DIF_SEND_DIFERR_APP); - - if (dif_info & SPFC_DIF_ERROR_CODE_REF) - SPFC_DIF_ERR_STAT(hba, SPFC_DIF_SEND_DIFERR_REF); - } else { - SPFC_DIF_ERR_STAT(hba, SPFC_DIF_RECV_DIFERR_ALL); - if (dif_info & SPFC_DIF_ERROR_CODE_CRC) - SPFC_DIF_ERR_STAT(hba, SPFC_DIF_RECV_DIFERR_CRC); - - if (dif_info & SPFC_DIF_ERROR_CODE_APP) - SPFC_DIF_ERR_STAT(hba, SPFC_DIF_RECV_DIFERR_APP); - - if (dif_info & SPFC_DIF_ERROR_CODE_REF) - SPFC_DIF_ERR_STAT(hba, SPFC_DIF_RECV_DIFERR_REF); - } -} - -void spfc_build_no_dif_control(struct unf_frame_pkg *pkg, - struct spfc_fc_dif_info *info) -{ - struct spfc_fc_dif_info *dif_info = info; - - /* dif enable or disable */ - dif_info->wd0.difx_en = SPFC_DIF_DISABLE; - - dif_info->wd1.vpid = pkg->qos_level; - dif_info->wd1.lun_qos_en = 1; -} - -void spfc_dif_action_forward(struct spfc_fc_dif_info *dif_info_l1, - struct unf_dif_control_info *dif_ctrl_u1) -{ - dif_info_l1->wd0.grd_ctrl |= - (dif_ctrl_u1->protect_opcode & UNF_VERIFY_CRC_MASK) - ? SPFC_DIF_GARD_REF_APP_CTRL_VERIFY - : SPFC_DIF_GARD_REF_APP_CTRL_NOT_VERIFY; - dif_info_l1->wd0.grd_ctrl |= - (dif_ctrl_u1->protect_opcode & UNF_REPLACE_CRC_MASK) - ? SPFC_DIF_GARD_REF_APP_CTRL_REPLACE - : SPFC_DIF_GARD_REF_APP_CTRL_FORWARD; - - dif_info_l1->wd0.ref_tag_ctrl |= - (dif_ctrl_u1->protect_opcode & UNF_VERIFY_LBA_MASK) - ? SPFC_DIF_GARD_REF_APP_CTRL_VERIFY - : SPFC_DIF_GARD_REF_APP_CTRL_NOT_VERIFY; - dif_info_l1->wd0.ref_tag_ctrl |= - (dif_ctrl_u1->protect_opcode & UNF_REPLACE_LBA_MASK) - ? SPFC_DIF_GARD_REF_APP_CTRL_REPLACE - : SPFC_DIF_GARD_REF_APP_CTRL_FORWARD; - - dif_info_l1->wd0.app_tag_ctrl |= - (dif_ctrl_u1->protect_opcode & UNF_VERIFY_APP_MASK) - ? SPFC_DIF_GARD_REF_APP_CTRL_VERIFY - : SPFC_DIF_GARD_REF_APP_CTRL_NOT_VERIFY; - dif_info_l1->wd0.app_tag_ctrl |= - (dif_ctrl_u1->protect_opcode & UNF_REPLACE_APP_MASK) - ? SPFC_DIF_GARD_REF_APP_CTRL_REPLACE - : SPFC_DIF_GARD_REF_APP_CTRL_FORWARD; -} - -void spfc_dif_action_delete(struct spfc_fc_dif_info *dif_info_l1, - struct unf_dif_control_info *dif_ctrl_u1) -{ - dif_info_l1->wd0.grd_ctrl |= - (dif_ctrl_u1->protect_opcode & UNF_VERIFY_CRC_MASK) - ? SPFC_DIF_GARD_REF_APP_CTRL_VERIFY - : SPFC_DIF_GARD_REF_APP_CTRL_NOT_VERIFY; - dif_info_l1->wd0.grd_ctrl |= SPFC_DIF_GARD_REF_APP_CTRL_DELETE; - - dif_info_l1->wd0.ref_tag_ctrl |= - (dif_ctrl_u1->protect_opcode & UNF_VERIFY_LBA_MASK) - ? SPFC_DIF_GARD_REF_APP_CTRL_VERIFY - : SPFC_DIF_GARD_REF_APP_CTRL_NOT_VERIFY; - dif_info_l1->wd0.ref_tag_ctrl |= SPFC_DIF_GARD_REF_APP_CTRL_DELETE; - - dif_info_l1->wd0.app_tag_ctrl |= - (dif_ctrl_u1->protect_opcode & UNF_VERIFY_APP_MASK) - ? SPFC_DIF_GARD_REF_APP_CTRL_VERIFY - : SPFC_DIF_GARD_REF_APP_CTRL_NOT_VERIFY; - dif_info_l1->wd0.app_tag_ctrl |= SPFC_DIF_GARD_REF_APP_CTRL_DELETE; -} - -static void spfc_convert_dif_action(struct unf_dif_control_info *dif_ctrl, - struct spfc_fc_dif_info *dif_info) -{ - struct spfc_fc_dif_info *dif_info_l1 = NULL; - struct unf_dif_control_info *dif_ctrl_u1 = NULL; - - dif_info_l1 = dif_info; - dif_ctrl_u1 = dif_ctrl; - - switch (UNF_DIF_ACTION_MASK & dif_ctrl_u1->protect_opcode) { - case UNF_DIF_ACTION_VERIFY_AND_REPLACE: - case UNF_DIF_ACTION_VERIFY_AND_FORWARD: - spfc_dif_action_forward(dif_info_l1, dif_ctrl_u1); - break; - - case UNF_DIF_ACTION_INSERT: - dif_info_l1->wd0.grd_ctrl |= - SPFC_DIF_GARD_REF_APP_CTRL_NOT_VERIFY; - dif_info_l1->wd0.grd_ctrl |= SPFC_DIF_GARD_REF_APP_CTRL_INSERT; - dif_info_l1->wd0.ref_tag_ctrl |= - SPFC_DIF_GARD_REF_APP_CTRL_NOT_VERIFY; - dif_info_l1->wd0.ref_tag_ctrl |= - SPFC_DIF_GARD_REF_APP_CTRL_INSERT; - dif_info_l1->wd0.app_tag_ctrl |= - SPFC_DIF_GARD_REF_APP_CTRL_NOT_VERIFY; - dif_info_l1->wd0.app_tag_ctrl |= - SPFC_DIF_GARD_REF_APP_CTRL_INSERT; - break; - - case UNF_DIF_ACTION_VERIFY_AND_DELETE: - spfc_dif_action_delete(dif_info_l1, dif_ctrl_u1); - break; - - default: - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "Unknown dif protect opcode 0x%x", - dif_ctrl_u1->protect_opcode); - break; - } -} - -void spfc_get_dif_info_l1(struct spfc_fc_dif_info *dif_info_l1, - struct unf_dif_control_info *dif_ctrl_u1) -{ - dif_info_l1->wd1.cmp_app_tag_msk = cmp_app_tag_mask; - - dif_info_l1->rep_app_tag = dif_ctrl_u1->app_tag; - dif_info_l1->rep_ref_tag = dif_ctrl_u1->start_lba; - - dif_info_l1->cmp_app_tag = dif_ctrl_u1->app_tag; - dif_info_l1->cmp_ref_tag = dif_ctrl_u1->start_lba; - - if (cmp_app_tag != 0) - dif_info_l1->cmp_app_tag = cmp_app_tag; - - if (rep_app_tag != 0) - dif_info_l1->rep_app_tag = rep_app_tag; - - if (rep_ref_tag != 0) - dif_info_l1->rep_ref_tag = rep_ref_tag; -} - -void spfc_build_dif_control(struct spfc_hba_info *hba, - struct unf_frame_pkg *pkg, - struct spfc_fc_dif_info *dif_info) -{ - struct spfc_fc_dif_info *dif_info_l1 = NULL; - struct unf_dif_control_info *dif_ctrl_u1 = NULL; - - dif_info_l1 = dif_info; - dif_ctrl_u1 = &pkg->dif_control; - - /* dif enable or disable */ - dif_info_l1->wd0.difx_en = SPFC_DIF_ENABLE; - - dif_info_l1->wd1.vpid = pkg->qos_level; - dif_info_l1->wd1.lun_qos_en = 1; - - /* 512B + 8 size mode */ - dif_info_l1->wd0.sct_size = (dif_ctrl_u1->flags & UNF_DIF_SECTSIZE_4KB) - ? SPFC_DIF_SECTOR_4KB_MODE - : SPFC_DIF_SECTOR_512B_MODE; - - /* dif type 1 */ - dif_info_l1->wd0.dif_verify_type = dif_type; - - /* Check whether the 0xffff app or ref domain is isolated */ - /* If all ff messages are displayed in type1 app, checkcheck sector - * dif_info_l1->wd0.difx_app_esc = SPFC_DIF_APP_REF_ESC_CHECK - */ - - dif_info_l1->wd0.difx_app_esc = dif_app_esc_check; - - /* type1 ref tag If all ff is displayed, check sector is required */ - dif_info_l1->wd0.difx_ref_esc = dif_ref_esc_check; - - /* Currently, only t10 crc is supported */ - dif_info_l1->wd0.grd_agm_ctrl = 0; - - /* Set this parameter based on the values of bit zero and bit one. - * The initial value is 0, and the value is UNF_DEFAULT_CRC_GUARD_SEED - */ - dif_info_l1->wd0.grd_agm_ini_ctrl = grd_agm_ini_ctrl; - dif_info_l1->wd0.app_tag_ctrl = 0; - dif_info_l1->wd0.grd_ctrl = 0; - dif_info_l1->wd0.ref_tag_ctrl = 0; - - /* Convert the verify operation, replace, forward, insert, - * and delete operations based on the actual operation code of the upper - * layer - */ - if (dif_protect_opcode != INVALID_VALUE32) { - dif_ctrl_u1->protect_opcode = - dif_protect_opcode | - (dif_ctrl_u1->protect_opcode & UNF_DIF_ACTION_MASK); - } - - spfc_convert_dif_action(dif_ctrl_u1, dif_info_l1); - dif_info_l1->wd0.app_tag_ctrl |= app_tag_ctrl; - - /* Address self-increase mode */ - dif_info_l1->wd0.ref_tag_mode = - (dif_ctrl_u1->protect_opcode & UNF_DIF_ACTION_NO_INCREASE_REFTAG) - ? (BOTH_NONE) - : (BOTH_INCREASE); - - if (ref_tag_mod != INVALID_VALUE32) - dif_info_l1->wd0.ref_tag_mode = ref_tag_mod; - - /* This parameter is used only when type 3 is set to 0xffff. */ - spfc_get_dif_info_l1(dif_info_l1, dif_ctrl_u1); - - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_MAJOR, - "Port(0x%x) sid_did(0x%x_0x%x) package type(0x%x) apptag(0x%x) flag(0x%x) opcode(0x%x) fcpdl(0x%x) statlba(0x%x)", - hba->port_cfg.port_id, pkg->frame_head.csctl_sid, - pkg->frame_head.rctl_did, pkg->type, pkg->dif_control.app_tag, - pkg->dif_control.flags, pkg->dif_control.protect_opcode, - pkg->dif_control.fcp_dl, pkg->dif_control.start_lba); - - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_MAJOR, - "Port(0x%x) cover dif control info, app:cmp_tag(0x%x) cmp_tag_mask(0x%x) rep_tag(0x%x), ref:tag_mode(0x%x) cmp_tag(0x%x) rep_tag(0x%x).", - hba->port_cfg.port_id, dif_info_l1->cmp_app_tag, - dif_info_l1->wd1.cmp_app_tag_msk, dif_info_l1->rep_app_tag, - dif_info_l1->wd0.ref_tag_mode, dif_info_l1->cmp_ref_tag, - dif_info_l1->rep_ref_tag); - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_MAJOR, - "Port(0x%x) cover dif control info, ctrl:grd(0x%x) ref(0x%x) app(0x%x).", - hba->port_cfg.port_id, dif_info_l1->wd0.grd_ctrl, - dif_info_l1->wd0.ref_tag_ctrl, - dif_info_l1->wd0.app_tag_ctrl); -} - -static u32 spfc_fill_external_sgl_page(struct spfc_hba_info *hba, - struct unf_frame_pkg *pkg, - struct unf_esgl_page *esgl_page, - u32 sge_num, int direction, - u32 context_id, u32 dif_flag) -{ - u32 ret = UNF_RETURN_ERROR; - u32 index = 0; - u32 sge_num_per_page = 0; - u32 buffer_addr = 0; - u32 buf_len = 0; - char *buf = NULL; - ulong phys = 0; - struct unf_esgl_page *unf_esgl_page = NULL; - struct spfc_variable_sge *sge = NULL; - - unf_esgl_page = esgl_page; - while (sge_num > 0) { - /* Obtains the initial address of the sge page */ - sge = (struct spfc_variable_sge *)unf_esgl_page->page_address; - - /* Calculate the number of sge on each page */ - sge_num_per_page = (unf_esgl_page->page_size) / sizeof(struct spfc_variable_sge); - - /* Fill in sgl page. The last sge of each page is link sge by - * default - */ - for (index = 0; index < (sge_num_per_page - 1); index++) { - UNF_GET_SGL_ENTRY(ret, (void *)pkg, &buf, &buf_len, dif_flag); - if (ret != RETURN_OK) - return UNF_RETURN_ERROR; - phys = (ulong)buf; - sge[index].buf_addr_hi = UNF_DMA_HI32(phys); - sge[index].buf_addr_lo = UNF_DMA_LO32(phys); - sge[index].wd0.buf_len = buf_len; - sge[index].wd0.r_flag = 0; - sge[index].wd1.extension_flag = SPFC_WQE_SGE_NOT_EXTEND_FLAG; - sge[index].wd1.last_flag = SPFC_WQE_SGE_NOT_LAST_FLAG; - - /* Parity bit */ - sge[index].wd1.buf_addr_gpa = (sge[index].buf_addr_lo >> UNF_SHIFT_16); - sge[index].wd1.xid = (context_id & SPFC_SGE_WD1_XID_MASK); - - spfc_cpu_to_big32(&sge[index], sizeof(struct spfc_variable_sge)); - - sge_num--; - if (sge_num == 0) - break; - } - - /* sge Set the end flag on the last sge of the page if all the - * pages have been filled. - */ - if (sge_num == 0) { - sge[index].wd1.extension_flag = SPFC_WQE_SGE_NOT_EXTEND_FLAG; - sge[index].wd1.last_flag = SPFC_WQE_SGE_LAST_FLAG; - - /* Parity bit */ - buffer_addr = be32_to_cpu(sge[index].buf_addr_lo); - sge[index].wd1.buf_addr_gpa = (buffer_addr >> UNF_SHIFT_16); - sge[index].wd1.xid = (context_id & SPFC_SGE_WD1_XID_MASK); - - spfc_cpu_to_big32(&sge[index].wd1, SPFC_DWORD_BYTE); - } - /* If only one sge is left empty, the sge reserved on the page - * is used for filling. - */ - else if (sge_num == 1) { - UNF_GET_SGL_ENTRY(ret, (void *)pkg, &buf, &buf_len, - dif_flag); - if (ret != RETURN_OK) - return UNF_RETURN_ERROR; - phys = (ulong)buf; - sge[index].buf_addr_hi = UNF_DMA_HI32(phys); - sge[index].buf_addr_lo = UNF_DMA_LO32(phys); - sge[index].wd0.buf_len = buf_len; - sge[index].wd0.r_flag = 0; - sge[index].wd1.extension_flag = SPFC_WQE_SGE_NOT_EXTEND_FLAG; - sge[index].wd1.last_flag = SPFC_WQE_SGE_LAST_FLAG; - - /* Parity bit */ - sge[index].wd1.buf_addr_gpa = (sge[index].buf_addr_lo >> UNF_SHIFT_16); - sge[index].wd1.xid = (context_id & SPFC_SGE_WD1_XID_MASK); - - spfc_cpu_to_big32(&sge[index], sizeof(struct spfc_variable_sge)); - - sge_num--; - } else { - /* Apply for a new sgl page and fill in link sge */ - UNF_GET_FREE_ESGL_PAGE(unf_esgl_page, hba->lport, pkg); - if (!unf_esgl_page) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Get free esgl page failed."); - return UNF_RETURN_ERROR; - } - phys = unf_esgl_page->esgl_phy_addr; - sge[index].buf_addr_hi = UNF_DMA_HI32(phys); - sge[index].buf_addr_lo = UNF_DMA_LO32(phys); - - /* For the cascaded wqe, you only need to enter the - * cascading buffer address and extension flag, and do - * not need to fill in other fields - */ - sge[index].wd0.buf_len = 0; - sge[index].wd0.r_flag = 0; - sge[index].wd1.extension_flag = SPFC_WQE_SGE_EXTEND_FLAG; - sge[index].wd1.last_flag = SPFC_WQE_SGE_NOT_LAST_FLAG; - - /* parity bit */ - sge[index].wd1.buf_addr_gpa = (sge[index].buf_addr_lo >> UNF_SHIFT_16); - sge[index].wd1.xid = (context_id & SPFC_SGE_WD1_XID_MASK); - - spfc_cpu_to_big32(&sge[index], sizeof(struct spfc_variable_sge)); - } - - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_INFO, - "[info]Port(0x%x) SID(0x%x) DID(0x%x) RXID(0x%x) build esgl left sge num: %u.", - hba->port_cfg.port_id, pkg->frame_head.csctl_sid, - pkg->frame_head.rctl_did, - pkg->frame_head.oxid_rxid, sge_num); - } - - return RETURN_OK; -} - -static u32 spfc_build_local_dif_sgl(struct spfc_hba_info *hba, - struct unf_frame_pkg *pkg, struct spfc_sqe *sqe, - int direction, u32 bd_sge_num) -{ - u32 ret = UNF_RETURN_ERROR; - char *buf = NULL; - u32 buf_len = 0; - ulong phys = 0; - u32 dif_sge_place = 0; - - /* DIF SGE must be followed by BD SGE */ - dif_sge_place = ((bd_sge_num <= pkg->entry_count) ? bd_sge_num : pkg->entry_count); - - /* The entry_count= 0 needs to be specially processed and does not need - * to be mounted. As long as len is set to zero, Last-bit is set to one, - * and E-bit is set to 0. - */ - if (pkg->dif_control.dif_sge_count == 0) { - sqe->sge[dif_sge_place].buf_addr_hi = 0; - sqe->sge[dif_sge_place].buf_addr_lo = 0; - sqe->sge[dif_sge_place].wd0.buf_len = 0; - } else { - UNF_CM_GET_DIF_SGL_ENTRY(ret, (void *)pkg, &buf, &buf_len); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "DOUBLE DIF Get Dif Buf Fail."); - return UNF_RETURN_ERROR; - } - phys = (ulong)buf; - sqe->sge[dif_sge_place].buf_addr_hi = UNF_DMA_HI32(phys); - sqe->sge[dif_sge_place].buf_addr_lo = UNF_DMA_LO32(phys); - sqe->sge[dif_sge_place].wd0.buf_len = buf_len; - } - - /* rdma flag. If the fc is not used, enter 0. */ - sqe->sge[dif_sge_place].wd0.r_flag = 0; - - /* parity bit */ - sqe->sge[dif_sge_place].wd1.buf_addr_gpa = 0; - sqe->sge[dif_sge_place].wd1.xid = 0; - - /* The local sgl does not use the cascading SGE. Therefore, the value of - * this field is always 0. - */ - sqe->sge[dif_sge_place].wd1.extension_flag = SPFC_WQE_SGE_NOT_EXTEND_FLAG; - sqe->sge[dif_sge_place].wd1.last_flag = SPFC_WQE_SGE_LAST_FLAG; - - spfc_cpu_to_big32(&sqe->sge[dif_sge_place], sizeof(struct spfc_variable_sge)); - - return RETURN_OK; -} - -static u32 spfc_build_external_dif_sgl(struct spfc_hba_info *hba, - struct unf_frame_pkg *pkg, - struct spfc_sqe *sqe, int direction, - u32 bd_sge_num) -{ - u32 ret = UNF_RETURN_ERROR; - struct unf_esgl_page *esgl_page = NULL; - ulong phys = 0; - u32 left_sge_num = 0; - u32 dif_sge_place = 0; - struct spfc_parent_ssq_info *ssq = NULL; - u32 ssqn = 0; - - ssqn = (u16)pkg->private_data[PKG_PRIVATE_XCHG_SSQ_INDEX]; - ssq = &hba->parent_queue_mgr->shared_queue[ssqn].parent_ssq_info; - - /* DIF SGE must be followed by BD SGE */ - dif_sge_place = ((bd_sge_num <= pkg->entry_count) ? bd_sge_num : pkg->entry_count); - - /* Allocate the first page first */ - UNF_GET_FREE_ESGL_PAGE(esgl_page, hba->lport, pkg); - if (!esgl_page) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "DOUBLE DIF Get External Page Fail."); - return UNF_RETURN_ERROR; - } - - phys = esgl_page->esgl_phy_addr; - - /* Configuring the Address of the Cascading Page */ - sqe->sge[dif_sge_place].buf_addr_hi = UNF_DMA_HI32(phys); - sqe->sge[dif_sge_place].buf_addr_lo = UNF_DMA_LO32(phys); - - /* Configuring Control Information About the Cascading Page */ - sqe->sge[dif_sge_place].wd0.buf_len = 0; - sqe->sge[dif_sge_place].wd0.r_flag = 0; - sqe->sge[dif_sge_place].wd1.extension_flag = SPFC_WQE_SGE_EXTEND_FLAG; - sqe->sge[dif_sge_place].wd1.last_flag = SPFC_WQE_SGE_NOT_LAST_FLAG; - - /* parity bit */ - sqe->sge[dif_sge_place].wd1.buf_addr_gpa = 0; - sqe->sge[dif_sge_place].wd1.xid = 0; - - spfc_cpu_to_big32(&sqe->sge[dif_sge_place], sizeof(struct spfc_variable_sge)); - - /* Fill in the sge information on the cascading page */ - left_sge_num = pkg->dif_control.dif_sge_count; - ret = spfc_fill_external_sgl_page(hba, pkg, esgl_page, left_sge_num, - direction, ssq->context_id, true); - if (ret != RETURN_OK) - return UNF_RETURN_ERROR; - - return RETURN_OK; -} - -static u32 spfc_build_local_sgl(struct spfc_hba_info *hba, - struct unf_frame_pkg *pkg, struct spfc_sqe *sqe, - int direction) -{ - u32 ret = UNF_RETURN_ERROR; - char *buf = NULL; - u32 buf_len = 0; - u32 index = 0; - ulong phys = 0; - - for (index = 0; index < pkg->entry_count; index++) { - UNF_CM_GET_SGL_ENTRY(ret, (void *)pkg, &buf, &buf_len); - if (ret != RETURN_OK) - return UNF_RETURN_ERROR; - - phys = (ulong)buf; - sqe->sge[index].buf_addr_hi = UNF_DMA_HI32(phys); - sqe->sge[index].buf_addr_lo = UNF_DMA_LO32(phys); - sqe->sge[index].wd0.buf_len = buf_len; - - /* rdma flag. If the fc is not used, enter 0. */ - sqe->sge[index].wd0.r_flag = 0; - - /* parity bit */ - sqe->sge[index].wd1.buf_addr_gpa = SPFC_ZEROCOPY_PCIE_TEMPLATE_VALUE; - sqe->sge[index].wd1.xid = 0; - - /* The local sgl does not use the cascading SGE. Therefore, the - * value of this field is always 0. - */ - sqe->sge[index].wd1.extension_flag = SPFC_WQE_SGE_NOT_EXTEND_FLAG; - sqe->sge[index].wd1.last_flag = SPFC_WQE_SGE_NOT_LAST_FLAG; - - if (index == (pkg->entry_count - 1)) { - /* Sets the last WQE end flag 1 */ - sqe->sge[index].wd1.last_flag = SPFC_WQE_SGE_LAST_FLAG; - } - - spfc_cpu_to_big32(&sqe->sge[index], sizeof(struct spfc_variable_sge)); - } - - /* Adjust the length of the BDSL field in the CTRL domain. */ - SPFC_ADJUST_DATA(sqe->ctrl_sl.ch.wd0.bdsl, - SPFC_BYTES_TO_QW_NUM((pkg->entry_count * - sizeof(struct spfc_variable_sge)))); - - /* The entry_count= 0 needs to be specially processed and does not need - * to be mounted. As long as len is set to zero, Last-bit is set to one, - * and E-bit is set to 0. - */ - if (pkg->entry_count == 0) { - sqe->sge[ARRAY_INDEX_0].buf_addr_hi = 0; - sqe->sge[ARRAY_INDEX_0].buf_addr_lo = 0; - sqe->sge[ARRAY_INDEX_0].wd0.buf_len = 0; - - /* rdma flag. This field is not used in fc. Set it to 0. */ - sqe->sge[ARRAY_INDEX_0].wd0.r_flag = 0; - - /* parity bit */ - sqe->sge[ARRAY_INDEX_0].wd1.buf_addr_gpa = SPFC_ZEROCOPY_PCIE_TEMPLATE_VALUE; - sqe->sge[ARRAY_INDEX_0].wd1.xid = 0; - - /* The local sgl does not use the cascading SGE. Therefore, the - * value of this field is always 0. - */ - sqe->sge[ARRAY_INDEX_0].wd1.extension_flag = SPFC_WQE_SGE_NOT_EXTEND_FLAG; - sqe->sge[ARRAY_INDEX_0].wd1.last_flag = SPFC_WQE_SGE_LAST_FLAG; - - spfc_cpu_to_big32(&sqe->sge[ARRAY_INDEX_0], sizeof(struct spfc_variable_sge)); - - /* Adjust the length of the BDSL field in the CTRL domain. */ - SPFC_ADJUST_DATA(sqe->ctrl_sl.ch.wd0.bdsl, - SPFC_BYTES_TO_QW_NUM(sizeof(struct spfc_variable_sge))); - } - - return RETURN_OK; -} - -static u32 spfc_build_external_sgl(struct spfc_hba_info *hba, - struct unf_frame_pkg *pkg, struct spfc_sqe *sqe, - int direction, u32 bd_sge_num) -{ - u32 ret = UNF_RETURN_ERROR; - char *buf = NULL; - struct unf_esgl_page *esgl_page = NULL; - ulong phys = 0; - u32 buf_len = 0; - u32 index = 0; - u32 left_sge_num = 0; - u32 local_sge_num = 0; - struct spfc_parent_ssq_info *ssq = NULL; - u16 ssqn = 0; - - ssqn = (u16)pkg->private_data[PKG_PRIVATE_XCHG_SSQ_INDEX]; - ssq = &hba->parent_queue_mgr->shared_queue[ssqn].parent_ssq_info; - - /* Ensure that the value of bd_sge_num is greater than or equal to one - */ - local_sge_num = bd_sge_num - 1; - - for (index = 0; index < local_sge_num; index++) { - UNF_CM_GET_SGL_ENTRY(ret, (void *)pkg, &buf, &buf_len); - if (unlikely(ret != RETURN_OK)) - return UNF_RETURN_ERROR; - - phys = (ulong)buf; - - sqe->sge[index].buf_addr_hi = UNF_DMA_HI32(phys); - sqe->sge[index].buf_addr_lo = UNF_DMA_LO32(phys); - sqe->sge[index].wd0.buf_len = buf_len; - - /* RDMA flag, which is not used by FC. */ - sqe->sge[index].wd0.r_flag = 0; - sqe->sge[index].wd1.extension_flag = SPFC_WQE_SGE_NOT_EXTEND_FLAG; - sqe->sge[index].wd1.last_flag = SPFC_WQE_SGE_NOT_LAST_FLAG; - - /* parity bit */ - sqe->sge[index].wd1.buf_addr_gpa = SPFC_ZEROCOPY_PCIE_TEMPLATE_VALUE; - sqe->sge[index].wd1.xid = 0; - - spfc_cpu_to_big32(&sqe->sge[index], sizeof(struct spfc_variable_sge)); - } - - /* Calculate the number of remaining sge. */ - left_sge_num = pkg->entry_count - local_sge_num; - /* Adjust the length of the BDSL field in the CTRL domain. */ - SPFC_ADJUST_DATA(sqe->ctrl_sl.ch.wd0.bdsl, - SPFC_BYTES_TO_QW_NUM((bd_sge_num * sizeof(struct spfc_variable_sge)))); - - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_INFO, - "alloc extended sgl page,leftsge:%d", left_sge_num); - /* Allocating the first cascading page */ - UNF_GET_FREE_ESGL_PAGE(esgl_page, hba->lport, pkg); - if (unlikely(!esgl_page)) - return UNF_RETURN_ERROR; - - phys = esgl_page->esgl_phy_addr; - - /* Configuring the Address of the Cascading Page */ - sqe->sge[index].buf_addr_hi = (u32)UNF_DMA_HI32(phys); - sqe->sge[index].buf_addr_lo = (u32)UNF_DMA_LO32(phys); - - /* Configuring Control Information About the Cascading Page */ - sqe->sge[index].wd0.buf_len = 0; - sqe->sge[index].wd0.r_flag = 0; - sqe->sge[index].wd1.extension_flag = SPFC_WQE_SGE_EXTEND_FLAG; - sqe->sge[index].wd1.last_flag = SPFC_WQE_SGE_NOT_LAST_FLAG; - - /* parity bit */ - sqe->sge[index].wd1.buf_addr_gpa = SPFC_ZEROCOPY_PCIE_TEMPLATE_VALUE; - sqe->sge[index].wd1.xid = 0; - - spfc_cpu_to_big32(&sqe->sge[index], sizeof(struct spfc_variable_sge)); - - /* Fill in the sge information on the cascading page. */ - ret = spfc_fill_external_sgl_page(hba, pkg, esgl_page, left_sge_num, - direction, ssq->context_id, false); - if (ret != RETURN_OK) - return UNF_RETURN_ERROR; - /* Copy the extended data sge to the extended sge of the extended wqe.*/ - if (left_sge_num > 0) { - memcpy(sqe->esge, (void *)esgl_page->page_address, - SPFC_WQE_MAX_ESGE_NUM * sizeof(struct spfc_variable_sge)); - } - - return RETURN_OK; -} - -u32 spfc_build_sgl_by_local_sge_num(struct unf_frame_pkg *pkg, - struct spfc_hba_info *hba, struct spfc_sqe *sqe, - int direction, u32 bd_sge_num) -{ - u32 ret = RETURN_OK; - - if (pkg->entry_count <= bd_sge_num) - ret = spfc_build_local_sgl(hba, pkg, sqe, direction); - else - ret = spfc_build_external_sgl(hba, pkg, sqe, direction, bd_sge_num); - - return ret; -} - -u32 spfc_conf_dual_sgl_info(struct unf_frame_pkg *pkg, - struct spfc_hba_info *hba, struct spfc_sqe *sqe, - int direction, u32 bd_sge_num, bool double_sgl) -{ - u32 ret = RETURN_OK; - - if (double_sgl) { - /* Adjust the length of the DIF_SL field in the CTRL domain */ - SPFC_ADJUST_DATA(sqe->ctrl_sl.ch.wd0.dif_sl, - SPFC_BYTES_TO_QW_NUM(sizeof(struct spfc_variable_sge))); - - if (pkg->dif_control.dif_sge_count <= SPFC_WQE_SGE_DIF_ENTRY_NUM) - ret = spfc_build_local_dif_sgl(hba, pkg, sqe, direction, bd_sge_num); - else - ret = spfc_build_external_dif_sgl(hba, pkg, sqe, direction, bd_sge_num); - } - - return ret; -} - -u32 spfc_build_sgl(struct spfc_hba_info *hba, struct unf_frame_pkg *pkg, - struct spfc_sqe *sqe, int direction, u32 dif_flag) -{ -#define SPFC_ESGE_CNT 3 - u32 ret = RETURN_OK; - u32 bd_sge_num = SPFC_WQE_SGE_ENTRY_NUM; - bool double_sgl = false; - - if (dif_flag != 0 && (pkg->dif_control.flags & UNF_DIF_DOUBLE_SGL)) { - bd_sge_num = SPFC_WQE_SGE_ENTRY_NUM - SPFC_WQE_SGE_DIF_ENTRY_NUM; - double_sgl = true; - } - - /* Only one wqe local sge can be loaded. If more than one wqe local sge - * is used, use the esgl - */ - ret = spfc_build_sgl_by_local_sge_num(pkg, hba, sqe, direction, bd_sge_num); - - if (unlikely(ret != RETURN_OK)) - return ret; - - /* Configuring Dual SGL Information for DIF */ - ret = spfc_conf_dual_sgl_info(pkg, hba, sqe, direction, bd_sge_num, double_sgl); - - return ret; -} - -void spfc_adjust_dix(struct unf_frame_pkg *pkg, struct spfc_fc_dif_info *dif_info, - u8 task_type) -{ - u8 tasktype = task_type; - struct spfc_fc_dif_info *dif_info_l1 = NULL; - - dif_info_l1 = dif_info; - - if (dix_flag == 1) { - if (tasktype == SPFC_SQE_FCP_IWRITE || - tasktype == SPFC_SQE_FCP_TRD) { - if ((UNF_DIF_ACTION_MASK & pkg->dif_control.protect_opcode) == - UNF_DIF_ACTION_VERIFY_AND_FORWARD) { - dif_info_l1->wd0.grd_ctrl |= - SPFC_DIF_GARD_REF_APP_CTRL_REPLACE; - dif_info_l1->wd0.grd_agm_ctrl = - SPFC_DIF_GUARD_VERIFY_IP_CHECKSUM_REPLACE_CRC16; - } - - if ((UNF_DIF_ACTION_MASK & pkg->dif_control.protect_opcode) == - UNF_DIF_ACTION_VERIFY_AND_DELETE) { - dif_info_l1->wd0.grd_agm_ctrl = - SPFC_DIF_GUARD_VERIFY_IP_CHECKSUM_REPLACE_CRC16; - } - } - - if (tasktype == SPFC_SQE_FCP_IREAD || - tasktype == SPFC_SQE_FCP_TWR) { - if ((UNF_DIF_ACTION_MASK & - pkg->dif_control.protect_opcode) == - UNF_DIF_ACTION_VERIFY_AND_FORWARD) { - dif_info_l1->wd0.grd_ctrl |= - SPFC_DIF_GARD_REF_APP_CTRL_REPLACE; - dif_info_l1->wd0.grd_agm_ctrl = - SPFC_DIF_GUARD_VERIFY_CRC16_REPLACE_IP_CHECKSUM; - } - - if ((UNF_DIF_ACTION_MASK & - pkg->dif_control.protect_opcode) == - UNF_DIF_ACTION_INSERT) { - dif_info_l1->wd0.grd_agm_ctrl = - SPFC_DIF_GUARD_VERIFY_CRC16_REPLACE_IP_CHECKSUM; - } - } - } - - if (grd_agm_ctrl != 0) - dif_info_l1->wd0.grd_agm_ctrl = grd_agm_ctrl; - - if (grd_ctrl != 0) - dif_info_l1->wd0.grd_ctrl = grd_ctrl; -} - -void spfc_get_dma_direction_by_fcp_cmnd(const struct unf_fcp_cmnd *fcp_cmnd, - int *dma_direction, u8 *task_type) -{ - if (UNF_FCP_WR_DATA & fcp_cmnd->control) { - *task_type = SPFC_SQE_FCP_IWRITE; - *dma_direction = DMA_TO_DEVICE; - } else if (UNF_GET_TASK_MGMT_FLAGS(fcp_cmnd->control) != 0) { - *task_type = SPFC_SQE_FCP_ITMF; - *dma_direction = DMA_FROM_DEVICE; - } else { - *task_type = SPFC_SQE_FCP_IREAD; - *dma_direction = DMA_FROM_DEVICE; - } -} - -static inline u32 spfc_build_icmnd_wqe(struct spfc_hba_info *hba, - struct unf_frame_pkg *pkg, - struct spfc_sqe *sge) -{ - u32 ret = RETURN_OK; - int direction = 0; - u8 tasktype = 0; - struct unf_fcp_cmnd *fcp_cmnd = NULL; - struct spfc_sqe *sqe = sge; - u32 dif_flag = 0; - - fcp_cmnd = pkg->fcp_cmnd; - if (unlikely(!fcp_cmnd)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "[err]Package's FCP commond pointer is NULL."); - - return UNF_RETURN_ERROR; - } - - spfc_get_dma_direction_by_fcp_cmnd(fcp_cmnd, &direction, &tasktype); - - spfc_build_icmnd_wqe_ts_header(pkg, sqe, tasktype, hba->exi_base, hba->port_index); - - spfc_build_icmnd_wqe_ctrls(pkg, sqe); - - spfc_build_icmnd_wqe_ts(hba, pkg, &sqe->ts_sl, &sqe->ts_ex); - - if (sqe->ts_sl.task_type != SPFC_SQE_FCP_ITMF) { - if (pkg->dif_control.protect_opcode == UNF_DIF_ACTION_NONE) { - dif_flag = 0; - spfc_build_no_dif_control(pkg, &sqe->ts_sl.cont.icmnd.info.dif_info); - } else { - dif_flag = 1; - spfc_build_dif_control(hba, pkg, &sqe->ts_sl.cont.icmnd.info.dif_info); - spfc_adjust_dix(pkg, - &sqe->ts_sl.cont.icmnd.info.dif_info, - tasktype); - } - } - - ret = spfc_build_sgl(hba, pkg, sqe, direction, dif_flag); - - sqe->sid = UNF_GET_SID(pkg); - sqe->did = UNF_GET_DID(pkg); - - return ret; -} - -u32 spfc_send_scsi_cmnd(void *hba, struct unf_frame_pkg *pkg) -{ - struct spfc_hba_info *spfc_hba = NULL; - struct spfc_parent_sq_info *parent_sq = NULL; - u32 ret = UNF_RETURN_ERROR; - struct spfc_sqe sqe; - u16 ssqn; - struct spfc_parent_queue_info *parent_queue = NULL; - - /* input param check */ - FC_CHECK_RETURN_VALUE(hba, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(pkg, UNF_RETURN_ERROR); - - SPFC_CHECK_PKG_ALLOCTIME(pkg); - memset(&sqe, 0, sizeof(struct spfc_sqe)); - spfc_hba = hba; - - /* 1. find parent sq for scsi_cmnd(pkg) */ - parent_sq = spfc_find_parent_sq_by_pkg(spfc_hba, pkg); - if (unlikely(!parent_sq)) { - /* Do not need to print info */ - return UNF_RETURN_ERROR; - } - - pkg->qos_level += spfc_hba->vpid_start; - - /* 2. build cmnd wqe (to sqe) for scsi_cmnd(pkg) */ - ret = spfc_build_icmnd_wqe(spfc_hba, pkg, &sqe); - if (unlikely(ret != RETURN_OK)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "[fail]Port(0x%x) Build WQE failed, SID(0x%x) DID(0x%x) pkg type(0x%x) hottag(0x%x).", - spfc_hba->port_cfg.port_id, pkg->frame_head.csctl_sid, - pkg->frame_head.rctl_did, pkg->type, UNF_GET_XCHG_TAG(pkg)); - - return ret; - } - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_INFO, - "Port(0x%x) RPort(0x%x) send FCP_CMND TYPE(0x%x) Local_Xid(0x%x) hottag(0x%x) LBA(0x%llx)", - spfc_hba->port_cfg.port_id, parent_sq->rport_index, - sqe.ts_sl.task_type, sqe.ts_sl.local_xid, - pkg->private_data[PKG_PRIVATE_XCHG_HOT_POOL_INDEX], - *((u64 *)pkg->fcp_cmnd->cdb)); - - ssqn = (u16)pkg->private_data[PKG_PRIVATE_XCHG_SSQ_INDEX]; - if (sqe.ts_sl.task_type == SPFC_SQE_FCP_ITMF) { - parent_queue = container_of(parent_sq, struct spfc_parent_queue_info, - parent_sq_info); - ret = spfc_suspend_sqe_and_send_nop(spfc_hba, parent_queue, &sqe, pkg); - return ret; - } - /* 3. En-Queue Parent SQ for scsi_cmnd(pkg) sqe */ - ret = spfc_parent_sq_enqueue(parent_sq, &sqe, ssqn); - - return ret; -} - -static void spfc_ini_status_default_handler(struct spfc_scqe_iresp *iresp, - struct unf_frame_pkg *pkg) -{ - u8 control = 0; - u16 com_err_code = 0; - - control = iresp->wd2.fcp_flag & SPFC_CTRL_MASK; - - if (iresp->fcp_resid != 0) { - com_err_code = UNF_IO_FAILED; - pkg->residus_len = iresp->fcp_resid; - } else { - com_err_code = UNF_IO_SUCCESS; - pkg->residus_len = 0; - } - - pkg->status = spfc_fill_pkg_status(com_err_code, control, iresp->wd2.scsi_status); - - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_INFO, - "[info]Fill package with status: 0x%x, residus len: 0x%x", - pkg->status, pkg->residus_len); -} - -static void spfc_check_fcp_rsp_iu(struct spfc_scqe_iresp *iresp, - struct spfc_hba_info *hba, - struct unf_frame_pkg *pkg) -{ - u8 scsi_status = 0; - u8 control = 0; - - control = (u8)iresp->wd2.fcp_flag; - scsi_status = (u8)iresp->wd2.scsi_status; - - /* FcpRspIU with Little End from IOB WQE to COM's pkg also */ - if (control & FCP_RESID_UNDER_MASK) { - /* under flow: usually occurs in inquiry */ - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_INFO, - "[info]I_STS IOB posts under flow with residus len: %u, FCP residue: %u.", - pkg->residus_len, iresp->fcp_resid); - - if (pkg->residus_len != iresp->fcp_resid) - pkg->status = spfc_fill_pkg_status(UNF_IO_FAILED, control, scsi_status); - else - pkg->status = spfc_fill_pkg_status(UNF_IO_UNDER_FLOW, control, scsi_status); - } - - if (control & FCP_RESID_OVER_MASK) { - /* over flow: error happened */ - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]I_STS IOB posts over flow with residus len: %u, FCP residue: %u.", - pkg->residus_len, iresp->fcp_resid); - - if (pkg->residus_len != iresp->fcp_resid) - pkg->status = spfc_fill_pkg_status(UNF_IO_FAILED, control, scsi_status); - else - pkg->status = spfc_fill_pkg_status(UNF_IO_OVER_FLOW, control, scsi_status); - } - - pkg->unf_rsp_pload_bl.length = 0; - pkg->unf_sense_pload_bl.length = 0; - - if (control & FCP_RSP_LEN_VALID_MASK) { - /* dma by chip */ - pkg->unf_rsp_pload_bl.buffer_ptr = NULL; - - pkg->unf_rsp_pload_bl.length = iresp->fcp_rsp_len; - pkg->byte_orders |= UNF_BIT_3; - } - - if (control & FCP_SNS_LEN_VALID_MASK) { - /* dma by chip */ - pkg->unf_sense_pload_bl.buffer_ptr = NULL; - - pkg->unf_sense_pload_bl.length = iresp->fcp_sns_len; - pkg->byte_orders |= UNF_BIT_4; - } - - if (iresp->wd1.user_id_num == 1 && - (pkg->unf_sense_pload_bl.length + pkg->unf_rsp_pload_bl.length > 0)) { - pkg->unf_rsp_pload_bl.buffer_ptr = - (u8 *)spfc_get_els_buf_by_user_id(hba, (u16)iresp->user_id[ARRAY_INDEX_0]); - } else if (iresp->wd1.user_id_num > 1) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]receive buff num 0x%x > 1 0x%x", - iresp->wd1.user_id_num, control); - } -} - -u16 spfc_get_com_err_code(struct unf_frame_pkg *pkg) -{ - u16 com_err_code = UNF_IO_FAILED; - u32 status_subcode = 0; - - status_subcode = pkg->status_sub_code; - - if (likely(status_subcode == 0)) - com_err_code = 0; - else if (status_subcode == UNF_DIF_CRC_ERR) - com_err_code = UNF_IO_DIF_ERROR; - else if (status_subcode == UNF_DIF_LBA_ERR) - com_err_code = UNF_IO_DIF_REF_ERROR; - else if (status_subcode == UNF_DIF_APP_ERR) - com_err_code = UNF_IO_DIF_GEN_ERROR; - - return com_err_code; -} - -void spfc_process_ini_fail_io(struct spfc_hba_info *hba, union spfc_scqe *iresp, - struct unf_frame_pkg *pkg) -{ - u16 com_err_code = UNF_IO_FAILED; - - /* 1. error stats process */ - if (SPFC_GET_SCQE_STATUS((union spfc_scqe *)(void *)iresp) != 0) { - switch (SPFC_GET_SCQE_STATUS((union spfc_scqe *)(void *)iresp)) { - /* I/O not complete: 1.session reset; 2.clear buffer */ - case FC_CQE_BUFFER_CLEAR_IO_COMPLETED: - case FC_CQE_SESSION_RST_CLEAR_IO_COMPLETED: - case FC_CQE_SESSION_ONLY_CLEAR_IO_COMPLETED: - case FC_CQE_WQE_FLUSH_IO_COMPLETED: - com_err_code = UNF_IO_CLEAN_UP; - - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_MAJOR, - "[warn]Port(0x%x) INI IO not complete, OX_ID(0x%x) RX_ID(0x%x) status(0x%x)", - hba->port_cfg.port_id, - ((struct spfc_scqe_iresp *)iresp)->wd0.ox_id, - ((struct spfc_scqe_iresp *)iresp)->wd0.rx_id, - com_err_code); - - break; - /* Allocate task id(oxid) fail */ - case FC_ERROR_INVALID_TASK_ID: - com_err_code = UNF_IO_NO_XCHG; - break; - case FC_ALLOC_EXCH_ID_FAILED: - com_err_code = UNF_IO_NO_XCHG; - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_MAJOR, - "[warn]Port(0x%x) INI IO, tag 0x%x alloc oxid fail.", - hba->port_cfg.port_id, - ((struct spfc_scqe_iresp *)iresp)->wd2.hotpooltag); - break; - case FC_ERROR_CODE_DATA_DIFX_FAILED: - com_err_code = pkg->status >> UNF_SHIFT_16; - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_MAJOR, - "[warn]Port(0x%x) INI IO, tag 0x%x tx dif error.", - hba->port_cfg.port_id, - ((struct spfc_scqe_iresp *)iresp)->wd2.hotpooltag); - break; - /* any other: I/O failed --->>> DID error */ - default: - com_err_code = UNF_IO_FAILED; - break; - } - - /* fill pkg status & return directly */ - pkg->status = - spfc_fill_pkg_status(com_err_code, - ((struct spfc_scqe_iresp *)iresp)->wd2.fcp_flag, - ((struct spfc_scqe_iresp *)iresp)->wd2.scsi_status); - - return; - } - - /* 2. default stats process */ - spfc_ini_status_default_handler((struct spfc_scqe_iresp *)iresp, pkg); - - /* 3. FCP RSP IU check */ - spfc_check_fcp_rsp_iu((struct spfc_scqe_iresp *)iresp, hba, pkg); -} - -void spfc_process_dif_result(struct spfc_hba_info *hba, union spfc_scqe *wqe, - struct unf_frame_pkg *pkg) -{ - u16 com_err_code = UNF_IO_FAILED; - u8 dif_info = 0; - - dif_info = wqe->common.wd0.dif_vry_rst; - if (dif_info == SPFC_TX_DIF_ERROR_FLAG) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[error]Port(0x%x) TGT recv tx dif result abnormal.", - hba->port_cfg.port_id); - } - - pkg->status_sub_code = - (dif_info & SPFC_DIF_ERROR_CODE_CRC) - ? UNF_DIF_CRC_ERR - : ((dif_info & SPFC_DIF_ERROR_CODE_REF) - ? UNF_DIF_LBA_ERR - : ((dif_info & SPFC_DIF_ERROR_CODE_APP) ? UNF_DIF_APP_ERR : 0)); - com_err_code = spfc_get_com_err_code(pkg); - pkg->status = (u32)(com_err_code) << UNF_SHIFT_16; - - if (unlikely(com_err_code != 0)) { - spfc_dif_err_count(hba, dif_info); - - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_INFO, - "[error]Port(0x%x) INI io status with dif result(0x%x),subcode(0x%x) pkg->status(0x%x)", - hba->port_cfg.port_id, dif_info, - pkg->status_sub_code, pkg->status); - } -} - -u32 spfc_scq_recv_iresp(struct spfc_hba_info *hba, union spfc_scqe *wqe) -{ -#define SPFC_IRSP_USERID_LEN ((FC_SENSEDATA_USERID_CNT_MAX + 1) / 2) - struct spfc_scqe_iresp *iresp = NULL; - struct unf_frame_pkg pkg; - u32 ret = RETURN_OK; - u16 hot_tag; - - FC_CHECK_RETURN_VALUE((hba), UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE((wqe), UNF_RETURN_ERROR); - - iresp = (struct spfc_scqe_iresp *)(void *)wqe; - - /* 1. Constraints: I_STS remain cnt must be zero */ - if (unlikely(SPFC_GET_SCQE_REMAIN_CNT(wqe) != 0)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "[err]Port(0x%x) ini_wqe(OX_ID:0x%x RX_ID:0x%x) HotTag(0x%x) remain_cnt(0x%x) abnormal, status(0x%x)", - hba->port_cfg.port_id, iresp->wd0.ox_id, - iresp->wd0.rx_id, iresp->wd2.hotpooltag, - SPFC_GET_SCQE_REMAIN_CNT(wqe), - SPFC_GET_SCQE_STATUS(wqe)); - - UNF_PRINT_SFS_LIMIT(UNF_MAJOR, hba->port_cfg.port_id, wqe, sizeof(union spfc_scqe)); - - /* return directly */ - return UNF_RETURN_ERROR; - } - - spfc_swap_16_in_32((u32 *)iresp->user_id, SPFC_IRSP_USERID_LEN); - - memset(&pkg, 0, sizeof(struct unf_frame_pkg)); - pkg.private_data[PKG_PRIVATE_XCHG_ALLOC_TIME] = iresp->magic_num; - pkg.frame_head.oxid_rxid = (((iresp->wd0.ox_id) << UNF_SHIFT_16) | (iresp->wd0.rx_id)); - - hot_tag = (u16)iresp->wd2.hotpooltag & UNF_ORIGIN_HOTTAG_MASK; - /* 2. HotTag validity check */ - if (likely(hot_tag >= hba->exi_base && (hot_tag < hba->exi_base + hba->exi_count))) { - pkg.status = UNF_IO_SUCCESS; - pkg.private_data[PKG_PRIVATE_XCHG_HOT_POOL_INDEX] = - hot_tag - hba->exi_base; - } else { - /* OX_ID error: return by COM */ - pkg.status = UNF_IO_FAILED; - pkg.private_data[PKG_PRIVATE_XCHG_HOT_POOL_INDEX] = INVALID_VALUE16; - - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "[err]Port(0x%x) ini_cmnd_wqe(OX_ID:0x%x RX_ID:0x%x) ox_id invalid, status(0x%x)", - hba->port_cfg.port_id, iresp->wd0.ox_id, iresp->wd0.rx_id, - SPFC_GET_SCQE_STATUS(wqe)); - - UNF_PRINT_SFS_LIMIT(UNF_MAJOR, hba->port_cfg.port_id, wqe, - sizeof(union spfc_scqe)); - } - - /* process dif result */ - spfc_process_dif_result(hba, wqe, &pkg); - - /* 3. status check */ - if (unlikely(SPFC_GET_SCQE_STATUS(wqe) || - iresp->wd2.scsi_status != 0 || iresp->fcp_resid != 0 || - ((iresp->wd2.fcp_flag & SPFC_CTRL_MASK) != 0))) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_INFO, - "[warn]Port(0x%x) scq_status(0x%x) scsi_status(0x%x) fcp_resid(0x%x) fcp_flag(0x%x)", - hba->port_cfg.port_id, SPFC_GET_SCQE_STATUS(wqe), - iresp->wd2.scsi_status, iresp->fcp_resid, - iresp->wd2.fcp_flag); - - /* set pkg status & check fcp_rsp IU */ - spfc_process_ini_fail_io(hba, (union spfc_scqe *)iresp, &pkg); - } - - /* 4. LL_Driver ---to--->>> COM_Driver */ - UNF_LOWLEVEL_SCSI_COMPLETED(ret, hba->lport, &pkg); - if (iresp->wd1.user_id_num == 1 && - (pkg.unf_sense_pload_bl.length + pkg.unf_rsp_pload_bl.length > 0)) { - spfc_post_els_srq_wqe(&hba->els_srq_info, (u16)iresp->user_id[ARRAY_INDEX_0]); - } - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_INFO, - "[info]Port(0x%x) rport(0x%x) recv(%s) hottag(0x%x) OX_ID(0x%x) RX_ID(0x%x) return(%s)", - hba->port_cfg.port_id, iresp->wd1.conn_id, - (SPFC_SCQE_FCP_IRSP == (SPFC_GET_SCQE_TYPE(wqe)) ? "IRESP" : "ITMF_RSP"), - pkg.private_data[PKG_PRIVATE_XCHG_HOT_POOL_INDEX], iresp->wd0.ox_id, - iresp->wd0.rx_id, (ret == RETURN_OK) ? "OK" : "ERROR"); - - return ret; -} diff --git a/drivers/scsi/spfc/hw/spfc_io.h b/drivers/scsi/spfc/hw/spfc_io.h deleted file mode 100644 index 26d10a51bbe492b66e08ef13d4d59dcb2687da03..0000000000000000000000000000000000000000 --- a/drivers/scsi/spfc/hw/spfc_io.h +++ /dev/null @@ -1,138 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPFC_IO_H -#define SPFC_IO_H - -#include "unf_type.h" -#include "unf_common.h" -#include "spfc_hba.h" - -#ifdef __cplusplus -#if __cplusplus -extern "C" { -#endif -#endif /* __cplusplus */ - -#define BYTE_PER_DWORD 4 -#define SPFC_TRESP_DIRECT_CARRY_LEN (23 * 4) -#define FCP_RESP_IU_LEN_BYTE_GOOD_STATUS 24 -#define SPFC_TRSP_IU_CONTROL_OFFSET 2 -#define SPFC_TRSP_IU_FCP_CONF_REP (1 << 12) - -struct spfc_dif_io_param { - u32 all_len; - u32 buf_len; - char **buf; - char *in_buf; - int drect; -}; - -enum dif_mode_type { - DIF_MODE_NONE = 0x0, - DIF_MODE_INSERT = 0x1, - DIF_MODE_REMOVE = 0x2, - DIF_MODE_FORWARD_OR_REPLACE = 0x3 -}; - -enum ref_tag_mode_type { - BOTH_NONE = 0x0, - RECEIVE_INCREASE = 0x1, - REPLACE_INCREASE = 0x2, - BOTH_INCREASE = 0x3 -}; - -#define SPFC_DIF_DISABLE 0 -#define SPFC_DIF_ENABLE 1 -#define SPFC_DIF_SINGLE_SGL 0 -#define SPFC_DIF_DOUBLE_SGL 1 -#define SPFC_DIF_SECTOR_512B_MODE 0 -#define SPFC_DIF_SECTOR_4KB_MODE 1 -#define SPFC_DIF_TYPE1 0x01 -#define SPFC_DIF_TYPE3 0x03 -#define SPFC_DIF_GUARD_VERIFY_ALGORITHM_CTL_T10_CRC16 0x0 -#define SPFC_DIF_GUARD_VERIFY_CRC16_REPLACE_IP_CHECKSUM 0x1 -#define SPFC_DIF_GUARD_VERIFY_IP_CHECKSUM_REPLACE_CRC16 0x2 -#define SPFC_DIF_GUARD_VERIFY_ALGORITHM_CTL_IP_CHECKSUM 0x3 -#define SPFC_DIF_CRC16_INITIAL_SELECTOR_DEFAUL 0 -#define SPFC_DIF_CRC_CS_INITIAL_CONFIG_BY_REGISTER 0 -#define SPFC_DIF_CRC_CS_INITIAL_CONFIG_BY_BIT0_1 0x4 - -#define SPFC_DIF_GARD_REF_APP_CTRL_VERIFY 0x4 -#define SPFC_DIF_GARD_REF_APP_CTRL_NOT_VERIFY 0x0 -#define SPFC_DIF_GARD_REF_APP_CTRL_INSERT 0x0 -#define SPFC_DIF_GARD_REF_APP_CTRL_DELETE 0x1 -#define SPFC_DIF_GARD_REF_APP_CTRL_FORWARD 0x2 -#define SPFC_DIF_GARD_REF_APP_CTRL_REPLACE 0x3 - -#define SPFC_BUILD_RESPONSE_INFO_NON_GAP_MODE0 0 -#define SPFC_BUILD_RESPONSE_INFO_GPA_MODE1 1 -#define SPFC_CONF_SUPPORT 1 -#define SPFC_CONF_NOT_SUPPORT 0 -#define SPFC_XID_INTERVAL 2048 - -#define SPFC_DIF_ERROR_CODE_MASK 0xe -#define SPFC_DIF_ERROR_CODE_CRC 0x2 -#define SPFC_DIF_ERROR_CODE_REF 0x4 -#define SPFC_DIF_ERROR_CODE_APP 0x8 -#define SPFC_TX_DIF_ERROR_FLAG (1 << 7) - -#define SPFC_DIF_PAYLOAD_TYPE (1 << 0) -#define SPFC_DIF_CRC_TYPE (1 << 1) -#define SPFC_DIF_APP_TYPE (1 << 2) -#define SPFC_DIF_REF_TYPE (1 << 3) - -#define SPFC_DIF_SEND_DIFERR_ALL (0) -#define SPFC_DIF_SEND_DIFERR_CRC (1) -#define SPFC_DIF_SEND_DIFERR_APP (2) -#define SPFC_DIF_SEND_DIFERR_REF (3) -#define SPFC_DIF_RECV_DIFERR_ALL (4) -#define SPFC_DIF_RECV_DIFERR_CRC (5) -#define SPFC_DIF_RECV_DIFERR_APP (6) -#define SPFC_DIF_RECV_DIFERR_REF (7) -#define SPFC_DIF_ERR_ENABLE (382855) -#define SPFC_DIF_ERR_DISABLE (0) - -#define SPFC_DIF_LENGTH (8) -#define SPFC_SECT_SIZE_512 (512) -#define SPFC_SECT_SIZE_4096 (4096) -#define SPFC_SECT_SIZE_512_8 (520) -#define SPFC_SECT_SIZE_4096_8 (4104) -#define SPFC_DIF_SECT_SIZE_APP_OFFSET (2) -#define SPFC_DIF_SECT_SIZE_LBA_OFFSET (4) - -#define SPFC_MAX_IO_TAG (2048) -#define SPFC_PRINT_WORD (8) - -extern u32 dif_protect_opcode; -extern u32 dif_sect_size; -extern u32 no_dif_sect_size; -extern u32 grd_agm_ini_ctrl; -extern u32 ref_tag_mod; -extern u32 grd_ctrl; -extern u32 grd_agm_ctrl; -extern u32 cmp_app_tag_mask; -extern u32 app_tag_ctrl; -extern u32 ref_tag_ctrl; -extern u32 rep_ref_tag; -extern u32 rx_rep_ref_tag; -extern u16 cmp_app_tag; -extern u16 rep_app_tag; - -#define spfc_fill_pkg_status(com_err_code, control, scsi_status) \ - (((u32)(com_err_code) << 16) | ((u32)(control) << 8) | \ - (u32)(scsi_status)) -#define SPFC_CTRL_MASK 0x1f - -u32 spfc_send_scsi_cmnd(void *hba, struct unf_frame_pkg *pkg); -u32 spfc_scq_recv_iresp(struct spfc_hba_info *hba, union spfc_scqe *wqe); -void spfc_process_dif_result(struct spfc_hba_info *hba, union spfc_scqe *wqe, - struct unf_frame_pkg *pkg); - -#ifdef __cplusplus -#if __cplusplus -} -#endif -#endif /* __cplusplus */ - -#endif /* __SPFC_IO_H__ */ diff --git a/drivers/scsi/spfc/hw/spfc_lld.c b/drivers/scsi/spfc/hw/spfc_lld.c deleted file mode 100644 index a35484f1c9173dee71c900351914c2afd2ae4863..0000000000000000000000000000000000000000 --- a/drivers/scsi/spfc/hw/spfc_lld.c +++ /dev/null @@ -1,997 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "spfc_lld.h" -#include "sphw_hw.h" -#include "sphw_mt.h" -#include "sphw_hw_cfg.h" -#include "sphw_hw_comm.h" -#include "sphw_common.h" -#include "spfc_cqm_main.h" -#include "spfc_module.h" - -#define SPFC_DRV_NAME "spfc" -#define SPFC_CHIP_NAME "spfc" - -#define PCI_VENDOR_ID_RAMAXEL 0x1E81 -#define SPFC_DEV_ID_PF_STD 0x9010 -#define SPFC_DEV_ID_VF 0x9008 - -#define SPFC_VF_PCI_CFG_REG_BAR 0 -#define SPFC_PF_PCI_CFG_REG_BAR 1 - -#define SPFC_PCI_INTR_REG_BAR 2 -#define SPFC_PCI_MGMT_REG_BAR 3 -#define SPFC_PCI_DB_BAR 4 - -#define SPFC_SECOND_BASE (1000) -#define SPFC_SYNC_YEAR_OFFSET (1900) -#define SPFC_SYNC_MONTH_OFFSET (1) -#define SPFC_MINUTE_BASE (60) -#define SPFC_WAIT_TOOL_CNT_TIMEOUT 10000 - -#define SPFC_MIN_TIME_IN_USECS 900 -#define SPFC_MAX_TIME_IN_USECS 1000 -#define SPFC_MAX_LOOP_TIMES 10000 - -#define SPFC_TOOL_MIN_TIME_IN_USECS 9900 -#define SPFC_TOOL_MAX_TIME_IN_USECS 10000 - -#define SPFC_EVENT_PROCESS_TIMEOUT 10000 - -#define FIND_BIT(num, n) (((num) & (1UL << (n))) ? 1 : 0) -#define SET_BIT(num, n) ((num) | (1UL << (n))) -#define CLEAR_BIT(num, n) ((num) & (~(1UL << (n)))) - -#define MAX_CARD_ID 64 -static unsigned long card_bit_map; -LIST_HEAD(g_spfc_chip_list); -struct spfc_uld_info g_uld_info[SERVICE_T_MAX] = { {0} }; - -struct unf_cm_handle_op spfc_cm_op_handle = {0}; - -u32 allowed_probe_num = SPFC_MAX_PORT_NUM; -u32 dif_sgl_mode; -u32 max_speed = SPFC_SPEED_32G; -u32 accum_db_num = 1; -u32 dif_type = 0x1; -u32 wqe_page_size = 4096; -u32 wqe_pre_load = 6; -u32 combo_length = 128; -u32 cos_bit_map = 0x1f; -u32 spfc_dif_type; -u32 spfc_dif_enable; -u8 spfc_guard; -int link_lose_tmo = 30; - -u32 exit_count = 4096; -u32 exit_stride = 4096; -u32 exit_base; - -/* dfx counter */ -atomic64_t rx_tx_stat[SPFC_MAX_PORT_NUM][SPFC_MAX_PORT_TASK_TYPE_STAT_NUM]; -atomic64_t rx_tx_err[SPFC_MAX_PORT_NUM][SPFC_MAX_PORT_TASK_TYPE_STAT_NUM]; -atomic64_t scq_err_stat[SPFC_MAX_PORT_NUM][SPFC_MAX_PORT_TASK_TYPE_STAT_NUM]; -atomic64_t aeq_err_stat[SPFC_MAX_PORT_NUM][SPFC_MAX_PORT_TASK_TYPE_STAT_NUM]; -atomic64_t dif_err_stat[SPFC_MAX_PORT_NUM][SPFC_MAX_PORT_TASK_TYPE_STAT_NUM]; -atomic64_t mail_box_stat[SPFC_MAX_PORT_NUM][SPFC_MAX_PORT_TASK_TYPE_STAT_NUM]; -atomic64_t up_err_event_stat[SPFC_MAX_PORT_NUM][SPFC_MAX_PORT_TASK_TYPE_STAT_NUM]; -u64 link_event_stat[SPFC_MAX_PORT_NUM][SPFC_MAX_LINK_EVENT_CNT]; -u64 link_reason_stat[SPFC_MAX_PORT_NUM][SPFC_MAX_LINK_REASON_CNT]; -u64 hba_stat[SPFC_MAX_PORT_NUM][SPFC_HBA_STAT_BUTT]; -atomic64_t com_up_event_err_stat[SPFC_MAX_PORT_NUM][SPFC_MAX_PORT_TASK_TYPE_STAT_NUM]; - -#ifndef MAX_SIZE -#define MAX_SIZE (16) -#endif - -struct spfc_lld_lock g_lld_lock; - -/* g_device_mutex */ -struct mutex g_device_mutex; - -/* pci device initialize lock */ -struct mutex g_pci_init_mutex; - -#define WAIT_LLD_DEV_HOLD_TIMEOUT (10 * 60 * 1000) /* 10minutes */ -#define WAIT_LLD_DEV_NODE_CHANGED (10 * 60 * 1000) /* 10minutes */ -#define WAIT_LLD_DEV_REF_CNT_EMPTY (2 * 60 * 1000) /* 2minutes */ - -void lld_dev_cnt_init(struct spfc_pcidev *pci_adapter) -{ - atomic_set(&pci_adapter->ref_cnt, 0); -} - -void lld_dev_hold(struct spfc_lld_dev *dev) -{ - struct spfc_pcidev *pci_adapter = pci_get_drvdata(dev->pdev); - - atomic_inc(&pci_adapter->ref_cnt); -} - -void lld_dev_put(struct spfc_lld_dev *dev) -{ - struct spfc_pcidev *pci_adapter = pci_get_drvdata(dev->pdev); - - atomic_dec(&pci_adapter->ref_cnt); -} - -static void spfc_sync_time_to_fmw(struct spfc_pcidev *pdev_pri) -{ - struct tm tm = {0}; - u64 tv_msec; - int err; - - tv_msec = ktime_to_ms(ktime_get_real()); - err = sphw_sync_time(pdev_pri->hwdev, tv_msec); - if (err) { - sdk_err(&pdev_pri->pcidev->dev, "Synchronize UTC time to firmware failed, errno:%d.\n", - err); - } else { - time64_to_tm(tv_msec / MSEC_PER_SEC, 0, &tm); - sdk_info(&pdev_pri->pcidev->dev, "Synchronize UTC time to firmware succeed. UTC time %ld-%02d-%02d %02d:%02d:%02d.\n", - tm.tm_year + 1900, tm.tm_mon + 1, - tm.tm_mday, tm.tm_hour, - tm.tm_min, tm.tm_sec); - } -} - -void wait_lld_dev_unused(struct spfc_pcidev *pci_adapter) -{ - u32 loop_cnt = 0; - - while (loop_cnt < SPFC_WAIT_TOOL_CNT_TIMEOUT) { - if (!atomic_read(&pci_adapter->ref_cnt)) - return; - - usleep_range(SPFC_TOOL_MIN_TIME_IN_USECS, SPFC_TOOL_MAX_TIME_IN_USECS); - loop_cnt++; - } -} - -static void lld_lock_chip_node(void) -{ - u32 loop_cnt; - - mutex_lock(&g_lld_lock.lld_mutex); - - loop_cnt = 0; - while (loop_cnt < WAIT_LLD_DEV_NODE_CHANGED) { - if (!test_and_set_bit(SPFC_NODE_CHANGE, &g_lld_lock.status)) - break; - - loop_cnt++; - - if (loop_cnt % SPFC_MAX_LOOP_TIMES == 0) - pr_warn("[warn]Wait for lld node change complete for %us", - loop_cnt / UNF_S_TO_MS); - - usleep_range(SPFC_MIN_TIME_IN_USECS, SPFC_MAX_TIME_IN_USECS); - } - - if (loop_cnt == WAIT_LLD_DEV_NODE_CHANGED) - pr_warn("[warn]Wait for lld node change complete timeout when try to get lld lock"); - - loop_cnt = 0; - while (loop_cnt < WAIT_LLD_DEV_REF_CNT_EMPTY) { - if (!atomic_read(&g_lld_lock.dev_ref_cnt)) - break; - - loop_cnt++; - - if (loop_cnt % SPFC_MAX_LOOP_TIMES == 0) - pr_warn("[warn]Wait for lld dev unused for %us, reference count: %d", - loop_cnt / UNF_S_TO_MS, atomic_read(&g_lld_lock.dev_ref_cnt)); - - usleep_range(SPFC_MIN_TIME_IN_USECS, SPFC_MAX_TIME_IN_USECS); - } - - if (loop_cnt == WAIT_LLD_DEV_REF_CNT_EMPTY) - pr_warn("[warn]Wait for lld dev unused timeout"); - - mutex_unlock(&g_lld_lock.lld_mutex); -} - -static void lld_unlock_chip_node(void) -{ - clear_bit(SPFC_NODE_CHANGE, &g_lld_lock.status); -} - -void lld_hold(void) -{ - u32 loop_cnt = 0; - - /* ensure there have not any chip node in changing */ - mutex_lock(&g_lld_lock.lld_mutex); - - while (loop_cnt < WAIT_LLD_DEV_HOLD_TIMEOUT) { - if (!test_bit(SPFC_NODE_CHANGE, &g_lld_lock.status)) - break; - - loop_cnt++; - - if (loop_cnt % SPFC_MAX_LOOP_TIMES == 0) - pr_warn("[warn]Wait lld node change complete for %u", - loop_cnt / UNF_S_TO_MS); - - usleep_range(SPFC_MIN_TIME_IN_USECS, SPFC_MAX_TIME_IN_USECS); - } - - if (loop_cnt == WAIT_LLD_DEV_HOLD_TIMEOUT) - pr_warn("[warn]Wait lld node change complete timeout when try to hode lld dev %u", - loop_cnt / UNF_S_TO_MS); - - atomic_inc(&g_lld_lock.dev_ref_cnt); - - mutex_unlock(&g_lld_lock.lld_mutex); -} - -void lld_put(void) -{ - atomic_dec(&g_lld_lock.dev_ref_cnt); -} - -static void spfc_lld_lock_init(void) -{ - mutex_init(&g_lld_lock.lld_mutex); - atomic_set(&g_lld_lock.dev_ref_cnt, 0); -} - -static void spfc_realease_cmo_op_handle(void) -{ - memset(&spfc_cm_op_handle, 0, sizeof(struct unf_cm_handle_op)); -} - -static void spfc_check_module_para(void) -{ - if (spfc_dif_enable) { - dif_sgl_mode = true; - spfc_dif_type = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIX_TYPE1_PROTECTION; - dix_flag = 1; - } - - if (dif_sgl_mode != 0) - dif_sgl_mode = 1; -} - -void spfc_event_process(void *adapter, struct sphw_event_info *event) -{ - struct spfc_pcidev *dev = adapter; - - if (test_and_set_bit(SERVICE_T_FC, &dev->state)) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[WARN]Event: fc is in detach"); - return; - } - - if (g_uld_info[SERVICE_T_FC].event) - g_uld_info[SERVICE_T_FC].event(&dev->lld_dev, dev->uld_dev[SERVICE_T_FC], event); - - clear_bit(SERVICE_T_FC, &dev->state); -} - -int spfc_stateful_init(void *hwdev) -{ - struct sphw_hwdev *dev = hwdev; - int stateful_en; - int err; - - if (!dev) - return -EINVAL; - - if (dev->statufull_ref_cnt++) - return 0; - - stateful_en = IS_FT_TYPE(dev) | IS_RDMA_TYPE(dev); - if (stateful_en && SPHW_IS_PPF(dev)) { - err = sphw_ppf_ext_db_init(dev); - if (err) - goto out; - } - - err = cqm3_init(dev); - if (err) { - sdk_err(dev->dev_hdl, "Failed to init cqm, err: %d\n", err); - goto init_cqm_err; - } - - sdk_info(dev->dev_hdl, "Initialize statefull resource success\n"); - - return 0; - -init_cqm_err: - if (stateful_en) - sphw_ppf_ext_db_deinit(dev); - -out: - dev->statufull_ref_cnt--; - - return err; -} - -void spfc_stateful_deinit(void *hwdev) -{ - struct sphw_hwdev *dev = hwdev; - u32 stateful_en; - - if (!dev || !dev->statufull_ref_cnt) - return; - - if (--dev->statufull_ref_cnt) - return; - - cqm3_uninit(hwdev); - - stateful_en = IS_FT_TYPE(dev) | IS_RDMA_TYPE(dev); - if (stateful_en) - sphw_ppf_ext_db_deinit(hwdev); - - sdk_info(dev->dev_hdl, "Clear statefull resource success\n"); -} - -static int attach_uld(struct spfc_pcidev *dev, struct spfc_uld_info *uld_info) -{ - void *uld_dev = NULL; - int err; - - mutex_lock(&dev->pdev_mutex); - if (dev->uld_dev[SERVICE_T_FC]) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]fc driver has attached to pcie device"); - err = 0; - goto out_unlock; - } - - err = spfc_stateful_init(dev->hwdev); - if (err) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Failed to initialize statefull resources"); - goto out_unlock; - } - - err = uld_info->probe(&dev->lld_dev, &uld_dev, - dev->uld_dev_name[SERVICE_T_FC]); - if (err || !uld_dev) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Failed to add object for fc driver to pcie device"); - goto probe_failed; - } - - dev->uld_dev[SERVICE_T_FC] = uld_dev; - mutex_unlock(&dev->pdev_mutex); - - return RETURN_OK; - -probe_failed: - spfc_stateful_deinit(dev->hwdev); - -out_unlock: - mutex_unlock(&dev->pdev_mutex); - - return err; -} - -static void detach_uld(struct spfc_pcidev *dev) -{ - struct spfc_uld_info *uld_info = &g_uld_info[SERVICE_T_FC]; - u32 cnt = 0; - - mutex_lock(&dev->pdev_mutex); - if (!dev->uld_dev[SERVICE_T_FC]) { - mutex_unlock(&dev->pdev_mutex); - return; - } - - while (cnt < SPFC_EVENT_PROCESS_TIMEOUT) { - if (!test_and_set_bit(SERVICE_T_FC, &dev->state)) - break; - usleep_range(900, 1000); - cnt++; - } - - uld_info->remove(&dev->lld_dev, dev->uld_dev[SERVICE_T_FC]); - dev->uld_dev[SERVICE_T_FC] = NULL; - spfc_stateful_deinit(dev->hwdev); - if (cnt < SPFC_EVENT_PROCESS_TIMEOUT) - clear_bit(SERVICE_T_FC, &dev->state); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_KEVENT, - "Detach fc driver from pcie device succeed"); - mutex_unlock(&dev->pdev_mutex); -} - -int spfc_register_uld(struct spfc_uld_info *uld_info) -{ - memset(g_uld_info, 0, sizeof(g_uld_info)); - spfc_lld_lock_init(); - mutex_init(&g_device_mutex); - mutex_init(&g_pci_init_mutex); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_KEVENT, - "[event]Module Init Success, wait for pci init and probe"); - - if (!uld_info || !uld_info->probe || !uld_info->remove) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Invalid information of fc driver to register"); - return -EINVAL; - } - - lld_hold(); - - if (g_uld_info[SERVICE_T_FC].probe) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]fc driver has registered"); - lld_put(); - return -EINVAL; - } - - memcpy(&g_uld_info[SERVICE_T_FC], uld_info, sizeof(*uld_info)); - - lld_put(); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_KEVENT, - "[KEVENT]Register spfc driver succeed"); - return RETURN_OK; -} - -void spfc_unregister_uld(void) -{ - struct spfc_uld_info *uld_info = NULL; - - lld_hold(); - uld_info = &g_uld_info[SERVICE_T_FC]; - memset(uld_info, 0, sizeof(*uld_info)); - lld_put(); -} - -static int spfc_pci_init(struct pci_dev *pdev) -{ - struct spfc_pcidev *pci_adapter = NULL; - int err = 0; - - pci_adapter = kzalloc(sizeof(*pci_adapter), GFP_KERNEL); - if (!pci_adapter) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Failed to alloc pci device adapter"); - return -ENOMEM; - } - pci_adapter->pcidev = pdev; - mutex_init(&pci_adapter->pdev_mutex); - - pci_set_drvdata(pdev, pci_adapter); - - err = pci_enable_device(pdev); - if (err) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Failed to enable PCI device"); - goto pci_enable_err; - } - - err = pci_request_regions(pdev, SPFC_DRV_NAME); - if (err) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Failed to request regions"); - goto pci_regions_err; - } - - pci_enable_pcie_error_reporting(pdev); - - pci_set_master(pdev); - - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); - if (err) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[warn]Couldn't set 64-bit DMA mask"); - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); - if (err) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, - UNF_ERR, "[err]Failed to set DMA mask"); - goto dma_mask_err; - } - } - - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); - if (err) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[warn]Couldn't set 64-bit coherent DMA mask"); - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); - if (err) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, - UNF_ERR, - "[err]Failed to set coherent DMA mask"); - goto dma_consistnet_mask_err; - } - } - - return 0; - -dma_consistnet_mask_err: -dma_mask_err: - pci_clear_master(pdev); - pci_release_regions(pdev); - -pci_regions_err: - pci_disable_device(pdev); - -pci_enable_err: - pci_set_drvdata(pdev, NULL); - kfree(pci_adapter); - - return err; -} - -static void spfc_pci_deinit(struct pci_dev *pdev) -{ - struct spfc_pcidev *pci_adapter = pci_get_drvdata(pdev); - - pci_clear_master(pdev); - pci_release_regions(pdev); - pci_disable_pcie_error_reporting(pdev); - pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); - kfree(pci_adapter); -} - -static int alloc_chip_node(struct spfc_pcidev *pci_adapter) -{ - struct card_node *chip_node = NULL; - unsigned char i; - unsigned char bus_number = 0; - - if (!pci_is_root_bus(pci_adapter->pcidev->bus)) - bus_number = pci_adapter->pcidev->bus->number; - - if (bus_number != 0) { - list_for_each_entry(chip_node, &g_spfc_chip_list, node) { - if (chip_node->bus_num == bus_number) { - pci_adapter->chip_node = chip_node; - return 0; - } - } - } else if (pci_adapter->pcidev->device == SPFC_DEV_ID_VF) { - list_for_each_entry(chip_node, &g_spfc_chip_list, node) { - if (chip_node) { - pci_adapter->chip_node = chip_node; - return 0; - } - } - } - - for (i = 0; i < MAX_CARD_ID; i++) { - if (!test_and_set_bit(i, &card_bit_map)) - break; - } - - if (i == MAX_CARD_ID) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Failed to alloc card id"); - return -EFAULT; - } - - chip_node = kzalloc(sizeof(*chip_node), GFP_KERNEL); - if (!chip_node) { - clear_bit(i, &card_bit_map); - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Failed to alloc chip node"); - return -ENOMEM; - } - - /* bus number */ - chip_node->bus_num = bus_number; - - snprintf(chip_node->chip_name, IFNAMSIZ, "%s%d", SPFC_CHIP_NAME, i); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_INFO, - "[INFO]Add new chip %s to global list succeed", - chip_node->chip_name); - - list_add_tail(&chip_node->node, &g_spfc_chip_list); - - INIT_LIST_HEAD(&chip_node->func_list); - pci_adapter->chip_node = chip_node; - - return 0; -} - -#ifdef CONFIG_X86 -void cfg_order_reg(struct spfc_pcidev *pci_adapter) -{ - u8 cpu_model[] = {0x3c, 0x3f, 0x45, 0x46, 0x3d, 0x47, 0x4f, 0x56}; - struct cpuinfo_x86 *cpuinfo = NULL; - u32 i; - - if (sphw_func_type(pci_adapter->hwdev) == TYPE_VF) - return; - - cpuinfo = &cpu_data(0); - - for (i = 0; i < sizeof(cpu_model); i++) { - if (cpu_model[i] == cpuinfo->x86_model) - sphw_set_pcie_order_cfg(pci_adapter->hwdev); - } -} -#endif - -static int mapping_bar(struct pci_dev *pdev, struct spfc_pcidev *pci_adapter) -{ - int cfg_bar; - - cfg_bar = pdev->is_virtfn ? SPFC_VF_PCI_CFG_REG_BAR : SPFC_PF_PCI_CFG_REG_BAR; - - pci_adapter->cfg_reg_base = pci_ioremap_bar(pdev, cfg_bar); - if (!pci_adapter->cfg_reg_base) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "Failed to map configuration regs"); - return -ENOMEM; - } - - pci_adapter->intr_reg_base = pci_ioremap_bar(pdev, SPFC_PCI_INTR_REG_BAR); - if (!pci_adapter->intr_reg_base) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "Failed to map interrupt regs"); - goto map_intr_bar_err; - } - - if (!pdev->is_virtfn) { - pci_adapter->mgmt_reg_base = pci_ioremap_bar(pdev, SPFC_PCI_MGMT_REG_BAR); - if (!pci_adapter->mgmt_reg_base) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, - UNF_ERR, "Failed to map mgmt regs"); - goto map_mgmt_bar_err; - } - } - - pci_adapter->db_base_phy = pci_resource_start(pdev, SPFC_PCI_DB_BAR); - pci_adapter->db_dwqe_len = pci_resource_len(pdev, SPFC_PCI_DB_BAR); - pci_adapter->db_base = pci_ioremap_bar(pdev, SPFC_PCI_DB_BAR); - if (!pci_adapter->db_base) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "Failed to map doorbell regs"); - goto map_db_err; - } - - return 0; - -map_db_err: - if (!pdev->is_virtfn) - iounmap(pci_adapter->mgmt_reg_base); - -map_mgmt_bar_err: - iounmap(pci_adapter->intr_reg_base); - -map_intr_bar_err: - iounmap(pci_adapter->cfg_reg_base); - - return -ENOMEM; -} - -static void unmapping_bar(struct spfc_pcidev *pci_adapter) -{ - iounmap(pci_adapter->db_base); - - if (!pci_adapter->pcidev->is_virtfn) - iounmap(pci_adapter->mgmt_reg_base); - - iounmap(pci_adapter->intr_reg_base); - iounmap(pci_adapter->cfg_reg_base); -} - -static int spfc_func_init(struct pci_dev *pdev, struct spfc_pcidev *pci_adapter) -{ - struct sphw_init_para init_para = {0}; - int err; - - init_para.adapter_hdl = pci_adapter; - init_para.pcidev_hdl = pdev; - init_para.dev_hdl = &pdev->dev; - init_para.cfg_reg_base = pci_adapter->cfg_reg_base; - init_para.intr_reg_base = pci_adapter->intr_reg_base; - init_para.mgmt_reg_base = pci_adapter->mgmt_reg_base; - init_para.db_base = pci_adapter->db_base; - init_para.db_base_phy = pci_adapter->db_base_phy; - init_para.db_dwqe_len = pci_adapter->db_dwqe_len; - init_para.hwdev = &pci_adapter->hwdev; - init_para.chip_node = pci_adapter->chip_node; - err = sphw_init_hwdev(&init_para); - if (err) { - pci_adapter->hwdev = NULL; - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Failed to initialize hardware device"); - return -EFAULT; - } - - pci_adapter->lld_dev.pdev = pdev; - pci_adapter->lld_dev.hwdev = pci_adapter->hwdev; - - sphw_event_register(pci_adapter->hwdev, pci_adapter, spfc_event_process); - - if (sphw_func_type(pci_adapter->hwdev) != TYPE_VF) - spfc_sync_time_to_fmw(pci_adapter); - lld_lock_chip_node(); - list_add_tail(&pci_adapter->node, &pci_adapter->chip_node->func_list); - lld_unlock_chip_node(); - err = attach_uld(pci_adapter, &g_uld_info[SERVICE_T_FC]); - - if (err) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Spfc3 attach uld fail"); - goto attach_fc_err; - } - -#ifdef CONFIG_X86 - cfg_order_reg(pci_adapter); -#endif - - return 0; - -attach_fc_err: - lld_lock_chip_node(); - list_del(&pci_adapter->node); - lld_unlock_chip_node(); - wait_lld_dev_unused(pci_adapter); - - return err; -} - -static void spfc_func_deinit(struct pci_dev *pdev) -{ - struct spfc_pcidev *pci_adapter = pci_get_drvdata(pdev); - - lld_lock_chip_node(); - list_del(&pci_adapter->node); - lld_unlock_chip_node(); - wait_lld_dev_unused(pci_adapter); - - detach_uld(pci_adapter); - sphw_disable_mgmt_msg_report(pci_adapter->hwdev); - sphw_flush_mgmt_workq(pci_adapter->hwdev); - sphw_event_unregister(pci_adapter->hwdev); - sphw_free_hwdev(pci_adapter->hwdev); -} - -static void free_chip_node(struct spfc_pcidev *pci_adapter) -{ - struct card_node *chip_node = pci_adapter->chip_node; - int id, err; - - if (list_empty(&chip_node->func_list)) { - list_del(&chip_node->node); - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_INFO, - "[INFO]Delete chip %s from global list succeed", - chip_node->chip_name); - err = sscanf(chip_node->chip_name, SPFC_CHIP_NAME "%d", &id); - if (err < 0) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, - UNF_ERR, "[err]Failed to get spfc id"); - } - - clear_bit(id, &card_bit_map); - - kfree(chip_node); - } -} - -static int spfc_probe(struct pci_dev *pdev, const struct pci_device_id *id) -{ - struct spfc_pcidev *pci_adapter = NULL; - int err; - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_KEVENT, - "[event]Spfc3 Pcie device probe begin"); - - mutex_lock(&g_pci_init_mutex); - err = spfc_pci_init(pdev); - if (err) { - mutex_unlock(&g_pci_init_mutex); - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]pci init fail, return %d", err); - return err; - } - pci_adapter = pci_get_drvdata(pdev); - err = mapping_bar(pdev, pci_adapter); - if (err) { - mutex_unlock(&g_pci_init_mutex); - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Failed to map bar"); - goto map_bar_failed; - } - mutex_unlock(&g_pci_init_mutex); - pci_adapter->id = *id; - lld_dev_cnt_init(pci_adapter); - - /* if chip information of pcie function exist, add the function into chip */ - lld_lock_chip_node(); - err = alloc_chip_node(pci_adapter); - if (err) { - lld_unlock_chip_node(); - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Failed to add new chip node to global list"); - goto alloc_chip_node_fail; - } - - lld_unlock_chip_node(); - err = spfc_func_init(pdev, pci_adapter); - if (err) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]spfc func init fail"); - goto func_init_err; - } - - return 0; - -func_init_err: - lld_lock_chip_node(); - free_chip_node(pci_adapter); - lld_unlock_chip_node(); - -alloc_chip_node_fail: - unmapping_bar(pci_adapter); - -map_bar_failed: - spfc_pci_deinit(pdev); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Pcie device probe failed"); - return err; -} - -static void spfc_remove(struct pci_dev *pdev) -{ - struct spfc_pcidev *pci_adapter = pci_get_drvdata(pdev); - - if (!pci_adapter) - return; - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_INFO, - "[INFO]Pcie device remove begin"); - sphw_detect_hw_present(pci_adapter->hwdev); - spfc_func_deinit(pdev); - lld_lock_chip_node(); - free_chip_node(pci_adapter); - lld_unlock_chip_node(); - unmapping_bar(pci_adapter); - mutex_lock(&g_pci_init_mutex); - spfc_pci_deinit(pdev); - mutex_unlock(&g_pci_init_mutex); - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_INFO, - "[INFO]Pcie device removed"); -} - -static void spfc_shutdown(struct pci_dev *pdev) -{ - struct spfc_pcidev *pci_adapter = pci_get_drvdata(pdev); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Shutdown device"); - - if (pci_adapter) - sphw_shutdown_hwdev(pci_adapter->hwdev); - - pci_disable_device(pdev); -} - -static pci_ers_result_t spfc_io_error_detected(struct pci_dev *pdev, - pci_channel_state_t state) -{ - struct spfc_pcidev *pci_adapter = NULL; - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Uncorrectable error detected, log and cleanup error status: 0x%08x", - state); - - pci_aer_clear_nonfatal_status(pdev); - pci_adapter = pci_get_drvdata(pdev); - - if (pci_adapter) - sphw_record_pcie_error(pci_adapter->hwdev); - - return PCI_ERS_RESULT_CAN_RECOVER; -} - -static int unf_global_value_init(void) -{ - memset(rx_tx_stat, 0, sizeof(rx_tx_stat)); - memset(rx_tx_err, 0, sizeof(rx_tx_err)); - memset(scq_err_stat, 0, sizeof(scq_err_stat)); - memset(aeq_err_stat, 0, sizeof(aeq_err_stat)); - memset(dif_err_stat, 0, sizeof(dif_err_stat)); - memset(link_event_stat, 0, sizeof(link_event_stat)); - memset(link_reason_stat, 0, sizeof(link_reason_stat)); - memset(hba_stat, 0, sizeof(hba_stat)); - memset(&spfc_cm_op_handle, 0, sizeof(struct unf_cm_handle_op)); - memset(up_err_event_stat, 0, sizeof(up_err_event_stat)); - memset(mail_box_stat, 0, sizeof(mail_box_stat)); - memset(spfc_hba, 0, sizeof(spfc_hba)); - - spin_lock_init(&probe_spin_lock); - - /* 4. Get COM Handlers used for low_level */ - if (unf_get_cm_handle_ops(&spfc_cm_op_handle) != RETURN_OK) { - spfc_realease_cmo_op_handle(); - return RETURN_ERROR_S32; - } - - return RETURN_OK; -} - -static const struct pci_device_id spfc_pci_table[] = { - {PCI_VDEVICE(RAMAXEL, SPFC_DEV_ID_PF_STD), 0}, - {0, 0} -}; - -MODULE_DEVICE_TABLE(pci, spfc_pci_table); - -static struct pci_error_handlers spfc_err_handler = { - .error_detected = spfc_io_error_detected, -}; - -static struct pci_driver spfc_driver = {.name = SPFC_DRV_NAME, - .id_table = spfc_pci_table, - .probe = spfc_probe, - .remove = spfc_remove, - .shutdown = spfc_shutdown, - .err_handler = &spfc_err_handler}; - -static __init int spfc_lld_init(void) -{ - if (unf_common_init() != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]UNF_Common_init failed"); - return RETURN_ERROR_S32; - } - - spfc_check_module_para(); - - if (unf_global_value_init() != RETURN_OK) - return RETURN_ERROR_S32; - - spfc_register_uld(&fc_uld_info); - return pci_register_driver(&spfc_driver); -} - -static __exit void spfc_lld_exit(void) -{ - pci_unregister_driver(&spfc_driver); - spfc_unregister_uld(); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[event]SPFC module removing..."); - - spfc_realease_cmo_op_handle(); - - /* 2. Unregister FC COM module(level) */ - unf_common_exit(); -} - -module_init(spfc_lld_init); -module_exit(spfc_lld_exit); - -MODULE_AUTHOR("Ramaxel Memory Technology, Ltd"); -MODULE_DESCRIPTION(SPFC_DRV_DESC); -MODULE_VERSION(SPFC_DRV_VERSION); -MODULE_LICENSE("GPL"); - -module_param(allowed_probe_num, uint, 0444); -module_param(dif_sgl_mode, uint, 0444); -module_param(max_speed, uint, 0444); -module_param(wqe_page_size, uint, 0444); -module_param(combo_length, uint, 0444); -module_param(cos_bit_map, uint, 0444); -module_param(spfc_dif_enable, uint, 0444); -MODULE_PARM_DESC(spfc_dif_enable, "set dif enable/disable(1/0), default is 0(disable)."); -module_param(link_lose_tmo, uint, 0444); -MODULE_PARM_DESC(link_lose_tmo, "set link time out, default is 30s."); diff --git a/drivers/scsi/spfc/hw/spfc_lld.h b/drivers/scsi/spfc/hw/spfc_lld.h deleted file mode 100644 index f7b4a5e5ce07db8e83153248e98486936c075462..0000000000000000000000000000000000000000 --- a/drivers/scsi/spfc/hw/spfc_lld.h +++ /dev/null @@ -1,76 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPFC_LLD_H -#define SPFC_LLD_H - -#include "sphw_crm.h" - -struct spfc_lld_dev { - struct pci_dev *pdev; - void *hwdev; -}; - -struct spfc_uld_info { - /* uld_dev: should not return null even the function capability - * is not support the up layer driver - * uld_dev_name: NIC driver should copy net device name. - * FC driver could copy fc device name. - * other up layer driver don`t need copy anything - */ - int (*probe)(struct spfc_lld_dev *lld_dev, void **uld_dev, - char *uld_dev_name); - void (*remove)(struct spfc_lld_dev *lld_dev, void *uld_dev); - int (*suspend)(struct spfc_lld_dev *lld_dev, void *uld_dev, - pm_message_t state); - int (*resume)(struct spfc_lld_dev *lld_dev, void *uld_dev); - void (*event)(struct spfc_lld_dev *lld_dev, void *uld_dev, - struct sphw_event_info *event); - int (*ioctl)(void *uld_dev, u32 cmd, const void *buf_in, u32 in_size, - void *buf_out, u32 *out_size); -}; - -/* Structure pcidev private */ -struct spfc_pcidev { - struct pci_dev *pcidev; - void *hwdev; - struct card_node *chip_node; - struct spfc_lld_dev lld_dev; - /* such as fc_dev */ - void *uld_dev[SERVICE_T_MAX]; - /* Record the service object name */ - char uld_dev_name[SERVICE_T_MAX][IFNAMSIZ]; - /* It is a the global variable for driver to manage - * all function device linked list - */ - struct list_head node; - void __iomem *cfg_reg_base; - void __iomem *intr_reg_base; - void __iomem *mgmt_reg_base; - u64 db_dwqe_len; - u64 db_base_phy; - void __iomem *db_base; - /* lock for attach/detach uld */ - struct mutex pdev_mutex; - /* setted when uld driver processing event */ - unsigned long state; - struct pci_device_id id; - atomic_t ref_cnt; -}; - -enum spfc_lld_status { - SPFC_NODE_CHANGE = BIT(0), -}; - -struct spfc_lld_lock { - /* lock for chip list */ - struct mutex lld_mutex; - unsigned long status; - atomic_t dev_ref_cnt; -}; - -#ifndef MAX_SIZE -#define MAX_SIZE (16) -#endif - -#endif diff --git a/drivers/scsi/spfc/hw/spfc_module.h b/drivers/scsi/spfc/hw/spfc_module.h deleted file mode 100644 index 153d59955339431e21075c480826c924315c45a4..0000000000000000000000000000000000000000 --- a/drivers/scsi/spfc/hw/spfc_module.h +++ /dev/null @@ -1,297 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPFC_MODULE_H -#define SPFC_MODULE_H -#include "unf_type.h" -#include "unf_log.h" -#include "unf_common.h" -#include "spfc_utils.h" -#include "spfc_hba.h" - -#define SPFC_FT_ENABLE (1) -#define SPFC_FC_DISABLE (0) - -#define SPFC_P2P_DIRECT (0) -#define SPFC_P2P_FABRIC (1) -#define SPFC_LOOP (2) -#define SPFC_ATUOSPEED (1) -#define SPFC_FIXEDSPEED (0) -#define SPFC_AUTOTOPO (0) -#define SPFC_P2PTOPO (0x2) -#define SPFC_LOOPTOPO (0x1) -#define SPFC_SPEED_2G (0x2) -#define SPFC_SPEED_4G (0x4) -#define SPFC_SPEED_8G (0x8) -#define SPFC_SPEED_16G (0x10) -#define SPFC_SPEED_32G (0x20) - -#define SPFC_MAX_PORT_NUM SPFC_MAX_PROBE_PORT_NUM -#define SPFC_MAX_PORT_TASK_TYPE_STAT_NUM (128) -#define SPFC_MAX_LINK_EVENT_CNT (4) -#define SPFC_MAX_LINK_REASON_CNT (256) - -#define SPFC_MML_LOGCTRO_NUM (14) - -#define WWN_SIZE 8 /* Size of WWPN, WWN & WWNN */ - -/* - * Define the data type - */ -struct spfc_log_ctrl { - char *log_option; - u32 state; -}; - -/* - * Declare the global function. - */ -extern struct unf_cm_handle_op spfc_cm_op_handle; -extern struct spfc_uld_info fc_uld_info; -extern u32 allowed_probe_num; -extern u32 max_speed; -extern u32 accum_db_num; -extern u32 wqe_page_size; -extern u32 dif_type; -extern u32 wqe_pre_load; -extern u32 combo_length; -extern u32 cos_bit_map; -extern u32 exit_count; -extern u32 exit_stride; -extern u32 exit_base; - -extern atomic64_t rx_tx_stat[SPFC_MAX_PORT_NUM][SPFC_MAX_PORT_TASK_TYPE_STAT_NUM]; -extern atomic64_t rx_tx_err[SPFC_MAX_PORT_NUM][SPFC_MAX_PORT_TASK_TYPE_STAT_NUM]; -extern atomic64_t scq_err_stat[SPFC_MAX_PORT_NUM][SPFC_MAX_PORT_TASK_TYPE_STAT_NUM]; -extern atomic64_t aeq_err_stat[SPFC_MAX_PORT_NUM][SPFC_MAX_PORT_TASK_TYPE_STAT_NUM]; -extern atomic64_t dif_err_stat[SPFC_MAX_PORT_NUM][SPFC_MAX_PORT_TASK_TYPE_STAT_NUM]; -extern atomic64_t mail_box_stat[SPFC_MAX_PORT_NUM][SPFC_MAX_PORT_TASK_TYPE_STAT_NUM]; -extern atomic64_t com_up_event_err_stat[SPFC_MAX_PORT_NUM][SPFC_MAX_PORT_TASK_TYPE_STAT_NUM]; -extern u64 link_event_stat[SPFC_MAX_PORT_NUM][SPFC_MAX_LINK_EVENT_CNT]; -extern u64 link_reason_stat[SPFC_MAX_PORT_NUM][SPFC_MAX_LINK_REASON_CNT]; -extern atomic64_t up_err_event_stat[SPFC_MAX_PORT_NUM][SPFC_MAX_PORT_TASK_TYPE_STAT_NUM]; -extern u64 hba_stat[SPFC_MAX_PORT_NUM][SPFC_HBA_STAT_BUTT]; -#define SPFC_LINK_EVENT_STAT(hba, link_ent) \ - (link_event_stat[(hba)->probe_index][link_ent]++) -#define SPFC_LINK_REASON_STAT(hba, link_rsn) \ - (link_reason_stat[(hba)->probe_index][link_rsn]++) -#define SPFC_HBA_STAT(hba, hba_stat_type) \ - (hba_stat[(hba)->probe_index][hba_stat_type]++) - -#define SPFC_UP_ERR_EVENT_STAT(hba, err_type) \ - (atomic64_inc(&up_err_event_stat[(hba)->probe_index][err_type])) -#define SPFC_UP_ERR_EVENT_STAT_READ(probe_index, io_type) \ - (atomic64_read(&up_err_event_stat[probe_index][io_type])) -#define SPFC_DIF_ERR_STAT(hba, dif_err) \ - (atomic64_inc(&dif_err_stat[(hba)->probe_index][dif_err])) -#define SPFC_DIF_ERR_STAT_READ(probe_index, dif_err) \ - (atomic64_read(&dif_err_stat[probe_index][dif_err])) - -#define SPFC_IO_STAT(hba, io_type) \ - (atomic64_inc(&rx_tx_stat[(hba)->probe_index][io_type])) -#define SPFC_IO_STAT_READ(probe_index, io_type) \ - (atomic64_read(&rx_tx_stat[probe_index][io_type])) - -#define SPFC_ERR_IO_STAT(hba, io_type) \ - (atomic64_inc(&rx_tx_err[(hba)->probe_index][io_type])) -#define SPFC_ERR_IO_STAT_READ(probe_index, io_type) \ - (atomic64_read(&rx_tx_err[probe_index][io_type])) - -#define SPFC_SCQ_ERR_TYPE_STAT(hba, err_type) \ - (atomic64_inc(&scq_err_stat[(hba)->probe_index][err_type])) -#define SPFC_SCQ_ERR_TYPE_STAT_READ(probe_index, io_type) \ - (atomic64_read(&scq_err_stat[probe_index][io_type])) -#define SPFC_AEQ_ERR_TYPE_STAT(hba, err_type) \ - (atomic64_inc(&aeq_err_stat[(hba)->probe_index][err_type])) -#define SPFC_AEQ_ERR_TYPE_STAT_READ(probe_index, io_type) \ - (atomic64_read(&aeq_err_stat[probe_index][io_type])) - -#define SPFC_MAILBOX_STAT(hba, io_type) \ - (atomic64_inc(&mail_box_stat[(hba)->probe_index][io_type])) -#define SPFC_MAILBOX_STAT_READ(probe_index, io_type) \ - (atomic64_read(&mail_box_stat[probe_index][io_type])) - -#define SPFC_COM_UP_ERR_EVENT_STAT(hba, err_type) \ - (atomic64_inc( \ - &com_up_event_err_stat[(hba)->probe_index][err_type])) -#define SPFC_COM_UP_ERR_EVENT_STAT_READ(probe_index, err_type) \ - (atomic64_read(&com_up_event_err_stat[probe_index][err_type])) - -#define UNF_LOWLEVEL_ALLOC_LPORT(lport, fc_port, low_level) \ - do { \ - if (spfc_cm_op_handle.unf_alloc_local_port) { \ - lport = spfc_cm_op_handle.unf_alloc_local_port( \ - (fc_port), (low_level)); \ - } else { \ - lport = NULL; \ - } \ - } while (0) - -#define UNF_LOWLEVEL_RECEIVE_LS_GS_PKG(ret, fc_port, pkg) \ - do { \ - if (spfc_cm_op_handle.unf_receive_ls_gs_pkg) { \ - ret = spfc_cm_op_handle.unf_receive_ls_gs_pkg( \ - (fc_port), (pkg)); \ - } else { \ - ret = UNF_RETURN_ERROR; \ - } \ - } while (0) - -#define UNF_LOWLEVEL_SEND_ELS_DONE(ret, fc_port, pkg) \ - do { \ - if (spfc_cm_op_handle.unf_send_els_done) { \ - ret = spfc_cm_op_handle.unf_send_els_done((fc_port), \ - (pkg)); \ - } else { \ - ret = UNF_RETURN_ERROR; \ - } \ - } while (0) - -#define UNF_LOWLEVEL_GET_CFG_PARMS(ret, section_name, cfg_parm, cfg_value, \ - item_num) \ - do { \ - if (spfc_cm_op_handle.unf_get_cfg_parms) { \ - ret = (u32)spfc_cm_op_handle.unf_get_cfg_parms( \ - (section_name), (cfg_parm), (cfg_value), \ - (item_num)); \ - } else { \ - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, \ - "Get config parameter function is NULL."); \ - ret = UNF_RETURN_ERROR; \ - } \ - } while (0) - -#define UNF_LOWLEVEL_RELEASE_LOCAL_PORT(ret, lport) \ - do { \ - if (unlikely(!spfc_cm_op_handle.unf_release_local_port)) { \ - ret = UNF_RETURN_ERROR; \ - } else { \ - ret = \ - spfc_cm_op_handle.unf_release_local_port(lport); \ - } \ - } while (0) - -#define UNF_CM_GET_SGL_ENTRY(ret, pkg, buf, buf_len) \ - do { \ - if (unlikely(!spfc_cm_op_handle.unf_cm_get_sgl_entry)) { \ - ret = UNF_RETURN_ERROR; \ - } else { \ - ret = spfc_cm_op_handle.unf_cm_get_sgl_entry( \ - pkg, buf, buf_len); \ - } \ - } while (0) - -#define UNF_CM_GET_DIF_SGL_ENTRY(ret, pkg, buf, buf_len) \ - do { \ - if (unlikely(!spfc_cm_op_handle.unf_cm_get_dif_sgl_entry)) { \ - ret = UNF_RETURN_ERROR; \ - } else { \ - ret = spfc_cm_op_handle.unf_cm_get_dif_sgl_entry( \ - pkg, buf, buf_len); \ - } \ - } while (0) - -#define UNF_GET_SGL_ENTRY(ret, pkg, buf, buf_len, dif_flag) \ - do { \ - if (dif_flag) { \ - UNF_CM_GET_DIF_SGL_ENTRY(ret, pkg, buf, buf_len); \ - } else { \ - UNF_CM_GET_SGL_ENTRY(ret, pkg, buf, buf_len); \ - } \ - } while (0) - -#define UNF_GET_FREE_ESGL_PAGE(ret, lport, pkg) \ - do { \ - if (unlikely( \ - !spfc_cm_op_handle.unf_get_one_free_esgl_page)) { \ - ret = NULL; \ - } else { \ - ret = \ - spfc_cm_op_handle.unf_get_one_free_esgl_page( \ - lport, pkg); \ - } \ - } while (0) - -#define UNF_LOWLEVEL_FCP_CMND_RECEIVED(ret, lport, pkg) \ - do { \ - if (unlikely(!spfc_cm_op_handle.unf_process_fcp_cmnd)) { \ - ret = UNF_RETURN_ERROR; \ - } else { \ - ret = spfc_cm_op_handle.unf_process_fcp_cmnd(lport, \ - pkg); \ - } \ - } while (0) - -#define UNF_LOWLEVEL_SCSI_COMPLETED(ret, lport, pkg) \ - do { \ - if (unlikely(NULL == \ - spfc_cm_op_handle.unf_receive_ini_response)) { \ - ret = UNF_RETURN_ERROR; \ - } else { \ - ret = spfc_cm_op_handle.unf_receive_ini_response( \ - lport, pkg); \ - } \ - } while (0) - -#define UNF_LOWLEVEL_PORT_EVENT(ret, lport, event, input) \ - do { \ - if (unlikely(!spfc_cm_op_handle.unf_fc_port_event)) { \ - ret = UNF_RETURN_ERROR; \ - } else { \ - ret = spfc_cm_op_handle.unf_fc_port_event( \ - lport, event, input); \ - } \ - } while (0) - -#define UNF_LOWLEVEL_RECEIVE_FC4LS_PKG(ret, fc_port, pkg) \ - do { \ - if (spfc_cm_op_handle.unf_receive_fc4ls_pkg) { \ - ret = spfc_cm_op_handle.unf_receive_fc4ls_pkg( \ - (fc_port), (pkg)); \ - } else { \ - ret = UNF_RETURN_ERROR; \ - } \ - } while (0) - -#define UNF_LOWLEVEL_SEND_FC4LS_DONE(ret, lport, pkg) \ - do { \ - if (spfc_cm_op_handle.unf_send_fc4ls_done) { \ - ret = spfc_cm_op_handle.unf_send_fc4ls_done((lport), \ - (pkg)); \ - } else { \ - ret = UNF_RETURN_ERROR; \ - } \ - } while (0) - -#define UNF_LOWLEVEL_RECEIVE_BLS_PKG(ret, lport, pkg) \ - do { \ - if (spfc_cm_op_handle.unf_receive_bls_pkg) { \ - ret = spfc_cm_op_handle.unf_receive_bls_pkg((lport), \ - (pkg)); \ - } else { \ - ret = UNF_RETURN_ERROR; \ - } \ - } while (0) - -#define UNF_LOWLEVEL_RECEIVE_MARKER_STS(ret, lport, pkg) \ - do { \ - if (spfc_cm_op_handle.unf_receive_marker_status) { \ - ret = spfc_cm_op_handle.unf_receive_marker_status( \ - (lport), (pkg)); \ - } else { \ - ret = UNF_RETURN_ERROR; \ - } \ - } while (0) - -#define UNF_LOWLEVEL_RECEIVE_ABTS_MARKER_STS(ret, lport, pkg) \ - do { \ - if (spfc_cm_op_handle.unf_receive_abts_marker_status) { \ - ret = \ - spfc_cm_op_handle.unf_receive_abts_marker_status( \ - (lport), (pkg)); \ - } else { \ - ret = UNF_RETURN_ERROR; \ - } \ - } while (0) - -#endif diff --git a/drivers/scsi/spfc/hw/spfc_parent_context.h b/drivers/scsi/spfc/hw/spfc_parent_context.h deleted file mode 100644 index dc4baffe5c44fb0d145fe97b6a671b52821645c5..0000000000000000000000000000000000000000 --- a/drivers/scsi/spfc/hw/spfc_parent_context.h +++ /dev/null @@ -1,269 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPFC_PARENT_CONTEXT_H -#define SPFC_PARENT_CONTEXT_H - -enum fc_parent_status { - FC_PARENT_STATUS_INVALID = 0, - FC_PARENT_STATUS_NORMAL, - FC_PARENT_STATUS_CLOSING -}; - -#define SPFC_PARENT_CONTEXT_KEY_ALIGN_SIZE (48) - -#define SPFC_PARENT_CONTEXT_TIMER_SIZE (32) /* 24+2*N,N=timer count */ - -#define FC_CALC_CID(_xid) \ - (((((_xid) >> 5) & 0x1ff) << 11) | ((((_xid) >> 5) & 0x1ff) << 2) | \ - (((_xid) >> 3) & 0x3)) - -#define MAX_PKT_SIZE_PER_DISPATCH (fc_child_ctx_ex->per_xmit_data_size) - -/* immediate data DIF info definition in parent context */ -struct immi_dif_info { - union { - u32 value; - struct { - u32 app_tag_ctrl : 3; /* DIF/DIX APP TAG Control */ - u32 ref_tag_mode : 2; /* Bit 0: scenario of the reference tag verify mode */ - /* Bit 1: scenario of the reference tag insert/replace - * mode 0: fixed; 1: increasement; - */ - u32 ref_tag_ctrl : 3; /* The DIF/DIX Reference tag control */ - u32 grd_agm_ini_ctrl : 3; - u32 grd_agm_ctrl : 2; /* Bit 0: DIF/DIX guard verify algorithm control */ - /* Bit 1: DIF/DIX guard replace or insert algorithm control */ - u32 grd_ctrl : 3; /* The DIF/DIX Guard control */ - u32 dif_verify_type : 2; /* verify type */ - /* Check blocks whose reference tag contains 0xFFFF flag */ - u32 difx_ref_esc : 1; - /* Check blocks whose application tag contains 0xFFFF flag */ - u32 difx_app_esc : 1; - u32 rsvd : 8; - u32 sct_size : 1; /* Sector size, 1: 4K; 0: 512 */ - u32 smd_tp : 2; - u32 difx_en : 1; - } info; - } dif_dw3; - - u32 cmp_app_tag : 16; - u32 rep_app_tag : 16; - /* The ref tag value for verify compare, do not support replace or insert ref tag */ - u32 cmp_ref_tag; - u32 rep_ref_tag; - - u32 rsv1 : 16; - u32 cmp_app_tag_msk : 16; -}; - -/* parent context SW section definition: SW(80B) */ -struct spfc_sw_section { - u16 scq_num_rcv_cmd; - u16 scq_num_max_scqn; - - struct { - u32 xid : 13; - u32 vport : 7; - u32 csctrl : 8; - u32 rsvd0 : 4; - } sw_ctxt_vport_xid; - - u32 scq_num_scqn_mask : 12; - u32 cid : 20; /* ucode init */ - - u16 conn_id; - u16 immi_rq_page_size; - - u16 immi_taskid_min; - u16 immi_taskid_max; - - union { - u32 pctxt_val0; - struct { - u32 srv_type : 5; /* driver init */ - u32 srr_support : 2; /* sequence retransmition support flag */ - u32 rsvd1 : 5; - u32 port_id : 4; /* driver init */ - u32 vlan_id : 16; /* driver init */ - } dw; - } sw_ctxt_misc; - - u32 rsvd2; - u32 per_xmit_data_size; - - /* RW fields */ - u32 cmd_scq_gpa_h; - u32 cmd_scq_gpa_l; - u32 e_d_tov_timer_val; /* E_D_TOV timer value: value should be set on ms by driver */ - u16 mfs_unaligned_bytes; /* mfs unalined bytes of per 64KB dispatch*/ - u16 tx_mfs; /* remote port max receive fc payload length */ - u32 xfer_rdy_dis_max_len_remote; /* max data len allowed in xfer_rdy dis scenario */ - u32 xfer_rdy_dis_max_len_local; - - union { - struct { - u32 priority : 3; /* vlan priority */ - u32 rsvd4 : 2; - u32 status : 8; /* status of flow */ - u32 cos : 3; /* doorbell cos value */ - u32 oq_cos_data : 3; /* esch oq cos for data */ - u32 oq_cos_cmd : 3; /* esch oq cos for cmd/xferrdy/rsp */ - /* used for parent context cache Consistency judgment,1: done */ - u32 flush_done : 1; - u32 work_mode : 2; /* 0:Target, 1:Initiator, 2:Target&Initiator */ - u32 seq_cnt : 1; /* seq_cnt */ - u32 e_d_tov : 1; /* E_D_TOV resolution */ - u32 vlan_enable : 1; /* Vlan enable flag */ - u32 conf_support : 1; /* Response confirm support flag */ - u32 rec_support : 1; /* REC support flag */ - u32 write_xfer_rdy : 1; /* WRITE Xfer_Rdy disable or enable */ - u32 sgl_num : 1; /* Double or single SGL, 1: double; 0: single */ - } dw; - u32 pctxt_val1; - } sw_ctxt_config; - struct immi_dif_info immi_dif_info; /* immediate data dif control info(20B) */ -}; - -struct spfc_hw_rsvd_queue { - /* bitmap[0]:255-192 */ - /* bitmap[1]:191-128 */ - /* bitmap[2]:127-64 */ - /* bitmap[3]:63-0 */ - u64 seq_id_bitmap[4]; - struct { - u64 last_req_seq_id : 8; - u64 xid : 20; - u64 rsvd0 : 36; - } wd0; -}; - -struct spfc_sq_qinfo { - u64 rsvd_0 : 10; - u64 pmsn_type : 1; /* 0: get pmsn from queue header; 1: get pmsn from ucode */ - u64 rsvd_1 : 4; - u64 cur_wqe_o : 1; /* should be opposite from loop_o */ - u64 rsvd_2 : 48; - - u64 cur_sqe_gpa; - u64 pmsn_gpa; /* sq's queue header gpa */ - - u64 sqe_dmaattr_idx : 6; - u64 sq_so_ro : 2; - u64 rsvd_3 : 2; - u64 ring : 1; /* 0: link; 1: ring */ - u64 loop_o : 1; /* init to be the first round o-bit */ - u64 rsvd_4 : 4; - u64 zerocopy_dmaattr_idx : 6; - u64 zerocopy_so_ro : 2; - u64 parity : 8; - u64 r : 1; - u64 s : 1; - u64 enable_256 : 1; - u64 rsvd_5 : 23; - u64 pcie_template : 6; -}; - -struct spfc_cq_qinfo { - u64 pcie_template_hi : 3; - u64 parity_2 : 1; - u64 cur_cqe_gpa : 60; - - u64 pi : 15; - u64 pi_o : 1; - u64 ci : 15; - u64 ci_o : 1; - u64 c_eqn_msi_x : 10; /* if init_mode = 2, is msi/msi-x; other the low-5-bit means c_eqn */ - u64 parity_1 : 1; - u64 ci_type : 1; /* 0: get ci from queue header; 1: get ci from ucode */ - u64 cq_depth : 3; /* valid when ring = 1 */ - u64 armq : 1; /* 0: IDLE state; 1: NEXT state */ - u64 cur_cqe_cnt : 8; - u64 cqe_max_cnt : 8; - - u64 cqe_dmaattr_idx : 6; - u64 cq_so_ro : 2; - u64 init_mode : 2; /* 1: armQ; 2: msi/msi-x; others: rsvd */ - u64 next_o : 1; /* next pate valid o-bit */ - u64 loop_o : 1; /* init to be the first round o-bit */ - u64 next_cq_wqe_page_gpa : 52; - - u64 pcie_template_lo : 3; - u64 parity_0 : 1; - u64 ci_gpa : 60; /* cq's queue header gpa */ -}; - -struct spfc_scq_qinfo { - union { - struct { - u64 scq_n : 20; /* scq number */ - u64 sq_min_preld_cache_num : 4; - u64 sq_th0_preld_cache_num : 5; - u64 sq_th1_preld_cache_num : 5; - u64 sq_th2_preld_cache_num : 5; - u64 rq_min_preld_cache_num : 4; - u64 rq_th0_preld_cache_num : 5; - u64 rq_th1_preld_cache_num : 5; - u64 rq_th2_preld_cache_num : 5; - u64 parity : 6; - } info; - - u64 pctxt_val1; - } hw_scqc_config; -}; - -struct spfc_srq_qinfo { - u64 parity : 4; - u64 srqc_gpa : 60; -}; - -/* here is the layout of service type 12/13 */ -struct spfc_parent_context { - u8 key[SPFC_PARENT_CONTEXT_KEY_ALIGN_SIZE]; - struct spfc_scq_qinfo resp_scq_qinfo; - struct spfc_srq_qinfo imm_srq_info; - struct spfc_sq_qinfo sq_qinfo; - u8 timer_section[SPFC_PARENT_CONTEXT_TIMER_SIZE]; - struct spfc_hw_rsvd_queue hw_rsvdq; - struct spfc_srq_qinfo els_srq_info; - struct spfc_sw_section sw_section; -}; - -/* here is the layout of service type 13 */ -struct spfc_ssq_parent_context { - u8 rsvd0[64]; - struct spfc_sq_qinfo sq1_qinfo; - u8 rsvd1[32]; - struct spfc_sq_qinfo sq2_qinfo; - u8 rsvd2[32]; - struct spfc_sq_qinfo sq3_qinfo; - struct spfc_scq_qinfo sq_pretchinfo; - u8 rsvd3[24]; -}; - -/* FC Key Section */ -struct spfc_fc_key_section { - u32 xid_h : 4; - u32 key_size : 2; - u32 rsvd1 : 1; - u32 srv_type : 5; - u32 csize : 2; - u32 rsvd0 : 17; - u32 v : 1; - - u32 tag_fp_h : 4; - u32 rsvd2 : 12; - u32 xid_l : 16; - - u16 tag_fp_l; - u8 smac[6]; /* Source MAC */ - u8 dmac[6]; /* Dest MAC */ - u8 sid[3]; /* Source FC ID */ - u8 did[3]; /* Dest FC ID */ - u8 svlan[4]; /* Svlan */ - u8 cvlan[4]; /* Cvlan */ - - u32 next_ptr_h; -}; - -#endif diff --git a/drivers/scsi/spfc/hw/spfc_queue.c b/drivers/scsi/spfc/hw/spfc_queue.c deleted file mode 100644 index abcf1ff3f49f466a5bc0cf4fe298ec35d5c78245..0000000000000000000000000000000000000000 --- a/drivers/scsi/spfc/hw/spfc_queue.c +++ /dev/null @@ -1,4857 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#include "spfc_queue.h" -#include "unf_log.h" -#include "unf_lport.h" -#include "spfc_module.h" -#include "spfc_utils.h" -#include "spfc_service.h" -#include "spfc_chipitf.h" -#include "spfc_parent_context.h" -#include "sphw_hw.h" -#include "sphw_crm.h" - -#define SPFC_UCODE_CMD_MODIFY_QUEUE_CONTEXT 0 - -#define SPFC_DONE_MASK (0x00000001) -#define SPFC_OWNER_MASK (0x80000000) - -#define SPFC_SQ_LINK_PRE (1 << 2) - -#define SPFC_SQ_HEADER_ADDR_ALIGN_SIZE (64) -#define SPFC_SQ_HEADER_ADDR_ALIGN_SIZE_MASK (SPFC_SQ_HEADER_ADDR_ALIGN_SIZE - 1) - -#define SPFC_ADDR_64_ALIGN(addr) \ - (((addr) + (SPFC_SQ_HEADER_ADDR_ALIGN_SIZE_MASK)) & \ - ~(SPFC_SQ_HEADER_ADDR_ALIGN_SIZE_MASK)) - -u32 spfc_get_parity_value(u64 *src_data, u32 row, u32 col) -{ - u32 i = 0; - u32 j = 0; - u32 offset = 0; - u32 group = 0; - u32 bit_offset = 0; - u32 bit_val = 0; - u32 tmp_val = 0; - u32 dest_data = 0; - - for (i = 0; i < row; i++) { - for (j = 0; j < col; j++) { - offset = (row * j + i); - group = offset / (sizeof(src_data[ARRAY_INDEX_0]) * UNF_BITS_PER_BYTE); - bit_offset = offset % (sizeof(src_data[ARRAY_INDEX_0]) * UNF_BITS_PER_BYTE); - tmp_val = (src_data[group] >> bit_offset) & SPFC_PARITY_MASK; - - if (j == 0) { - bit_val = tmp_val; - continue; - } - - bit_val ^= tmp_val; - } - - bit_val = (~bit_val) & SPFC_PARITY_MASK; - - dest_data |= (bit_val << i); - } - - return dest_data; -} - -static void spfc_update_producer_info(u16 q_depth, u16 *pus_pi, u16 *pus_owner) -{ - u16 current_pi = 0; - u16 next_pi = 0; - u16 owner = 0; - - current_pi = *pus_pi; - next_pi = current_pi + 1; - - if (next_pi < q_depth) { - *pus_pi = next_pi; - } else { - /* PI reversal */ - *pus_pi = 0; - - /* obit reversal */ - owner = *pus_owner; - *pus_owner = !owner; - } -} - -static void spfc_update_consumer_info(u16 q_depth, u16 *pus_ci, u16 *pus_owner) -{ - u16 current_ci = 0; - u16 next_ci = 0; - u16 owner = 0; - - current_ci = *pus_ci; - next_ci = current_ci + 1; - - if (next_ci < q_depth) { - *pus_ci = next_ci; - } else { - /* CI reversal */ - *pus_ci = 0; - - /* obit reversal */ - owner = *pus_owner; - *pus_owner = !owner; - } -} - -static inline void spfc_update_cq_header(struct ci_record *ci_record, u16 ci, - u16 owner) -{ - u32 size = 0; - struct ci_record record = {0}; - - size = sizeof(struct ci_record); - memcpy(&record, ci_record, size); - spfc_big_to_cpu64(&record, size); - record.cmsn = ci + (u16)(owner << SPFC_CQ_HEADER_OWNER_SHIFT); - record.dump_cmsn = record.cmsn; - spfc_cpu_to_big64(&record, size); - - wmb(); - memcpy(ci_record, &record, size); -} - -static void spfc_update_srq_header(struct db_record *pmsn_record, u16 pmsn) -{ - u32 size = 0; - struct db_record record = {0}; - - size = sizeof(struct db_record); - memcpy(&record, pmsn_record, size); - spfc_big_to_cpu64(&record, size); - record.pmsn = pmsn; - record.dump_pmsn = record.pmsn; - spfc_cpu_to_big64(&record, sizeof(struct db_record)); - - wmb(); - memcpy(pmsn_record, &record, size); -} - -static void spfc_set_srq_wqe_owner_be(struct spfc_wqe_ctrl *sqe_ctrl_in_wp, - u32 owner) -{ - struct spfc_wqe_ctrl_ch wqe_ctrl_ch; - - mb(); - - wqe_ctrl_ch.ctrl_ch_val = be32_to_cpu(sqe_ctrl_in_wp->ch.ctrl_ch_val); - wqe_ctrl_ch.wd0.owner = owner; - sqe_ctrl_in_wp->ch.ctrl_ch_val = cpu_to_be32(wqe_ctrl_ch.ctrl_ch_val); - - mb(); -} - -static inline void spfc_set_sq_wqe_owner_be(void *sqe) -{ - u32 *sqe_dw = (u32 *)sqe; - u32 *e_sqe_dw = (u32 *)((u8 *)sqe + SPFC_EXTEND_WQE_OFFSET); - - /* Ensure that the write of WQE is complete */ - mb(); - e_sqe_dw[SPFC_SQE_SECOND_OBIT_DW_POS] |= SPFC_SQE_OBIT_SET_MASK_BE; - e_sqe_dw[SPFC_SQE_FIRST_OBIT_DW_POS] |= SPFC_SQE_OBIT_SET_MASK_BE; - sqe_dw[SPFC_SQE_SECOND_OBIT_DW_POS] |= SPFC_SQE_OBIT_SET_MASK_BE; - sqe_dw[SPFC_SQE_FIRST_OBIT_DW_POS] |= SPFC_SQE_OBIT_SET_MASK_BE; - mb(); -} - -void spfc_clear_sq_wqe_owner_be(struct spfc_sqe *sqe) -{ - u32 *sqe_dw = (u32 *)sqe; - u32 *e_sqe_dw = (u32 *)((u8 *)sqe + SPFC_EXTEND_WQE_OFFSET); - - mb(); - sqe_dw[SPFC_SQE_SECOND_OBIT_DW_POS] &= SPFC_SQE_OBIT_CLEAR_MASK_BE; - mb(); - sqe_dw[SPFC_SQE_FIRST_OBIT_DW_POS] &= SPFC_SQE_OBIT_CLEAR_MASK_BE; - e_sqe_dw[SPFC_SQE_SECOND_OBIT_DW_POS] &= SPFC_SQE_OBIT_CLEAR_MASK_BE; - e_sqe_dw[SPFC_SQE_FIRST_OBIT_DW_POS] &= SPFC_SQE_OBIT_CLEAR_MASK_BE; -} - -static void spfc_set_direct_wqe_owner_be(void *sqe, u16 owner) -{ - if (owner) - spfc_set_sq_wqe_owner_be(sqe); - else - spfc_clear_sq_wqe_owner_be(sqe); -} - -static void spfc_set_srq_link_wqe_owner_be(struct spfc_linkwqe *link_wqe, - u32 owner, u16 pmsn) -{ - struct spfc_linkwqe local_lw; - - mb(); - local_lw.val_wd1 = be32_to_cpu(link_wqe->val_wd1); - local_lw.wd1.msn = pmsn; - local_lw.wd1.dump_msn = (local_lw.wd1.msn & SPFC_LOCAL_LW_WD1_DUMP_MSN_MASK); - link_wqe->val_wd1 = cpu_to_be32(local_lw.val_wd1); - - local_lw.val_wd0 = be32_to_cpu(link_wqe->val_wd0); - local_lw.wd0.o = owner; - link_wqe->val_wd0 = cpu_to_be32(local_lw.val_wd0); - mb(); -} - -static inline bool spfc_is_scq_link_wqe(struct spfc_scq_info *scq_info) -{ - u16 custom_scqe_num = 0; - - custom_scqe_num = scq_info->ci + 1; - - if ((custom_scqe_num % scq_info->wqe_num_per_buf == 0) || - scq_info->valid_wqe_num == custom_scqe_num) - return true; - else - return false; -} - -static struct spfc_wqe_page * -spfc_add_tail_wqe_page(struct spfc_parent_ssq_info *ssq) -{ - struct spfc_hba_info *hba = NULL; - struct spfc_wqe_page *esgl = NULL; - struct list_head *free_list_head = NULL; - ulong flag = 0; - - hba = (struct spfc_hba_info *)ssq->hba; - - spin_lock_irqsave(&hba->sq_wpg_pool.wpg_pool_lock, flag); - - /* Get a WqePage from hba->sq_wpg_pool.list_free_wpg_pool, and add to - * sq.list_SqTailWqePage - */ - if (!list_empty(&hba->sq_wpg_pool.list_free_wpg_pool)) { - free_list_head = UNF_OS_LIST_NEXT(&hba->sq_wpg_pool.list_free_wpg_pool); - list_del(free_list_head); - list_add_tail(free_list_head, &ssq->list_linked_list_sq); - esgl = list_entry(free_list_head, struct spfc_wqe_page, entry_wpg); - - /* WqePage Pool counter */ - atomic_inc(&hba->sq_wpg_pool.wpg_in_use); - } else { - esgl = NULL; - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]SQ pool is empty when SQ(0x%x) try to get wqe page", - ssq->sqn); - SPFC_HBA_STAT(hba, SPFC_STAT_SQ_POOL_EMPTY); - } - - spin_unlock_irqrestore(&hba->sq_wpg_pool.wpg_pool_lock, flag); - - return esgl; -} - -static inline struct spfc_sqe *spfc_get_wqe_page_entry(struct spfc_wqe_page *wpg, - u32 wqe_offset) -{ - struct spfc_sqe *sqe_wpg = NULL; - - sqe_wpg = (struct spfc_sqe *)(wpg->wpg_addr); - sqe_wpg += wqe_offset; - - return sqe_wpg; -} - -static void spfc_free_head_wqe_page(struct spfc_parent_ssq_info *ssq) -{ - struct spfc_hba_info *hba = NULL; - struct spfc_wqe_page *sq_wpg = NULL; - struct list_head *entry_head_wqe_page = NULL; - ulong flag = 0; - - atomic_dec(&ssq->wqe_page_cnt); - - hba = (struct spfc_hba_info *)ssq->hba; - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_INFO, - "Port(0x%x) free wqe page nowpagecnt:%d", - hba->port_cfg.port_id, - atomic_read(&ssq->wqe_page_cnt)); - sq_wpg = SPFC_GET_SQ_HEAD(ssq); - - memset((void *)sq_wpg->wpg_addr, WQE_MARKER_0, hba->sq_wpg_pool.wpg_size); - - spin_lock_irqsave(&hba->sq_wpg_pool.wpg_pool_lock, flag); - entry_head_wqe_page = &sq_wpg->entry_wpg; - list_del(entry_head_wqe_page); - list_add_tail(entry_head_wqe_page, &hba->sq_wpg_pool.list_free_wpg_pool); - - /* WqePage Pool counter */ - atomic_dec(&hba->sq_wpg_pool.wpg_in_use); - spin_unlock_irqrestore(&hba->sq_wpg_pool.wpg_pool_lock, flag); -} - -static void spfc_free_link_list_wpg(struct spfc_parent_ssq_info *ssq) -{ - ulong flag = 0; - struct spfc_hba_info *hba = NULL; - struct list_head *node = NULL; - struct list_head *next_node = NULL; - struct list_head *entry_head_wqe_page = NULL; - struct spfc_wqe_page *sq_wpg = NULL; - - hba = (struct spfc_hba_info *)ssq->hba; - - list_for_each_safe(node, next_node, &ssq->list_linked_list_sq) { - sq_wpg = list_entry(node, struct spfc_wqe_page, entry_wpg); - memset((void *)sq_wpg->wpg_addr, WQE_MARKER_0, hba->sq_wpg_pool.wpg_size); - - spin_lock_irqsave(&hba->sq_wpg_pool.wpg_pool_lock, flag); - entry_head_wqe_page = &sq_wpg->entry_wpg; - list_del(entry_head_wqe_page); - list_add_tail(entry_head_wqe_page, &hba->sq_wpg_pool.list_free_wpg_pool); - - /* WqePage Pool counter */ - atomic_dec(&ssq->wqe_page_cnt); - atomic_dec(&hba->sq_wpg_pool.wpg_in_use); - - spin_unlock_irqrestore(&hba->sq_wpg_pool.wpg_pool_lock, flag); - } - - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_INFO, - "[info]Port(0x%x) RPort(0x%x) Sq(0x%x) link list destroyed, Sq.WqePageCnt=0x%x, SqWpgPool.wpg_in_use=0x%x", - hba->port_cfg.port_id, ssq->sqn, ssq->context_id, - atomic_read(&ssq->wqe_page_cnt), atomic_read(&hba->sq_wpg_pool.wpg_in_use)); -} - -struct spfc_wqe_page * -spfc_add_one_wqe_page(struct spfc_parent_ssq_info *ssq) -{ - u32 wqe_inx = 0; - struct spfc_wqe_page *wqe_page = NULL; - struct spfc_sqe *sqe_in_wp = NULL; - struct spfc_linkwqe *link_wqe_in_wpg = NULL; - struct spfc_linkwqe link_wqe; - - /* Add a new Wqe Page */ - wqe_page = spfc_add_tail_wqe_page(ssq); - - if (!wqe_page) - return NULL; - - for (wqe_inx = 0; wqe_inx <= ssq->wqe_num_per_buf; wqe_inx++) { - sqe_in_wp = spfc_get_wqe_page_entry(wqe_page, wqe_inx); - sqe_in_wp->ctrl_sl.ch.ctrl_ch_val = 0; - sqe_in_wp->ectrl_sl.ch.ctrl_ch_val = 0; - } - - /* Set last WqePage as linkwqe */ - link_wqe_in_wpg = (struct spfc_linkwqe *)spfc_get_wqe_page_entry(wqe_page, - ssq->wqe_num_per_buf); - link_wqe.val_wd0 = 0; - link_wqe.val_wd1 = 0; - link_wqe.next_page_addr_hi = (ssq->queue_style == SPFC_QUEUE_RING_STYLE) - ? SPFC_MSD(wqe_page->wpg_phy_addr) - : 0; - link_wqe.next_page_addr_lo = (ssq->queue_style == SPFC_QUEUE_RING_STYLE) - ? SPFC_LSD(wqe_page->wpg_phy_addr) - : 0; - link_wqe.wd0.wf = CQM_WQE_WF_LINK; - link_wqe.wd0.ctrlsl = CQM_LINK_WQE_CTRLSL_VALUE; - link_wqe.wd0.o = !(ssq->last_pi_owner); - link_wqe.wd1.lp = (ssq->queue_style == SPFC_QUEUE_RING_STYLE) - ? CQM_LINK_WQE_LP_VALID - : CQM_LINK_WQE_LP_INVALID; - spfc_cpu_to_big32(&link_wqe, sizeof(struct spfc_linkwqe)); - memcpy(link_wqe_in_wpg, &link_wqe, sizeof(struct spfc_linkwqe)); - memcpy((u8 *)link_wqe_in_wpg + SPFC_EXTEND_WQE_OFFSET, - &link_wqe, sizeof(struct spfc_linkwqe)); - - return wqe_page; -} - -static inline struct spfc_scqe_type * -spfc_get_scq_entry(struct spfc_scq_info *scq_info) -{ - u32 buf_id = 0; - u16 buf_offset = 0; - u16 ci = 0; - struct cqm_buf_list *buf = NULL; - - FC_CHECK_RETURN_VALUE(scq_info, NULL); - - ci = scq_info->ci; - buf_id = ci / scq_info->wqe_num_per_buf; - buf = &scq_info->cqm_scq_info->q_room_buf_1.buf_list[buf_id]; - buf_offset = (u16)(ci % scq_info->wqe_num_per_buf); - - return (struct spfc_scqe_type *)(buf->va) + buf_offset; -} - -static inline bool spfc_is_cqe_done(u32 *done, u32 *owner, u16 driver_owner) -{ - return ((((u16)(!!(*done & SPFC_DONE_MASK)) == driver_owner) && - ((u16)(!!(*owner & SPFC_OWNER_MASK)) == driver_owner)) ? true : false); -} - -u32 spfc_process_scq_cqe_entity(ulong info, u32 proc_cnt) -{ - u32 ret = UNF_RETURN_ERROR; - u32 index = 0; - struct wq_header *queue_header = NULL; - struct spfc_scqe_type *scqe = NULL; - struct spfc_scqe_type tmp_scqe; - struct spfc_scq_info *scq_info = (struct spfc_scq_info *)info; - - FC_CHECK_RETURN_VALUE(scq_info, ret); - SPFC_FUNCTION_ENTER; - - queue_header = (struct wq_header *)(void *)(scq_info->cqm_scq_info->q_header_vaddr); - - for (index = 0; index < proc_cnt;) { - /* If linked wqe, then update CI */ - if (spfc_is_scq_link_wqe(scq_info)) { - spfc_update_consumer_info(scq_info->valid_wqe_num, - &scq_info->ci, - &scq_info->ci_owner); - spfc_update_cq_header(&queue_header->ci_record, - scq_info->ci, scq_info->ci_owner); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, - UNF_INFO, - "[info]Current wqe is a linked wqe"); - continue; - } - - /* Get SCQE and then check obit & donebit whether been set */ - scqe = spfc_get_scq_entry(scq_info); - if (unlikely(!scqe)) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[warn]Scqe is NULL"); - break; - } - - if (!spfc_is_cqe_done((u32 *)(void *)&scqe->wd0, - (u32 *)(void *)&scqe->ch.wd0, - scq_info->ci_owner)) { - atomic_set(&scq_info->flush_stat, SPFC_QUEUE_FLUSH_DONE); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, - UNF_INFO, "[info]Now has no valid scqe"); - break; - } - - /* rmb & do memory copy */ - rmb(); - memcpy(&tmp_scqe, scqe, sizeof(struct spfc_scqe_type)); - /* process SCQ entry */ - ret = spfc_rcv_scq_entry_from_scq(scq_info->hba, (void *)&tmp_scqe, - scq_info->queue_id); - if (unlikely(ret != RETURN_OK)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]QueueId(0x%x) scqn(0x%x) scqe process error at CI(0x%x)", - scq_info->queue_id, scq_info->scqn, scq_info->ci); - } - - /* Update Driver's CI & Obit */ - spfc_update_consumer_info(scq_info->valid_wqe_num, - &scq_info->ci, &scq_info->ci_owner); - spfc_update_cq_header(&queue_header->ci_record, scq_info->ci, - scq_info->ci_owner); - index++; - } - - /* Re-schedule again if necessary */ - if (index == proc_cnt) - tasklet_schedule(&scq_info->tasklet); - - SPFC_FUNCTION_RETURN; - - return index; -} - -void spfc_set_scq_irg_cfg(struct spfc_hba_info *hba, u32 mode, u16 msix_index) -{ -#define SPFC_POLLING_MODE_ITERRUPT_PENDING_CNT 5 -#define SPFC_POLLING_MODE_ITERRUPT_COALESC_TIMER_CFG 10 - u8 pending_limt = 0; - u8 coalesc_timer_cfg = 0; - - struct interrupt_info info = {0}; - - if (mode != SPFC_SCQ_INTR_LOW_LATENCY_MODE) { - pending_limt = SPFC_POLLING_MODE_ITERRUPT_PENDING_CNT; - coalesc_timer_cfg = - SPFC_POLLING_MODE_ITERRUPT_COALESC_TIMER_CFG; - } - - memset(&info, 0, sizeof(info)); - info.interrupt_coalesc_set = 1; - info.lli_set = 0; - info.pending_limt = pending_limt; - info.coalesc_timer_cfg = coalesc_timer_cfg; - info.resend_timer_cfg = 0; - info.msix_index = msix_index; - - sphw_set_interrupt_cfg(hba->dev_handle, info, SPHW_CHANNEL_FC); -} - -void spfc_process_scq_cqe(ulong info) -{ - struct spfc_scq_info *scq_info = (struct spfc_scq_info *)info; - - FC_CHECK_RETURN_VOID(scq_info); - - spfc_process_scq_cqe_entity(info, SPFC_CQE_MAX_PROCESS_NUM_PER_INTR); -} - -irqreturn_t spfc_scq_irq(int irq, void *scq_info) -{ - SPFC_FUNCTION_ENTER; - - FC_CHECK_RETURN_VALUE(scq_info, IRQ_NONE); - - tasklet_schedule(&((struct spfc_scq_info *)scq_info)->tasklet); - - SPFC_FUNCTION_RETURN; - - return IRQ_HANDLED; -} - -static u32 spfc_alloc_scq_int(struct spfc_scq_info *scq_info) -{ - int ret = UNF_RETURN_ERROR_S32; - u16 act_num = 0; - struct irq_info irq_info; - struct spfc_hba_info *hba = NULL; - - FC_CHECK_RETURN_VALUE(scq_info, UNF_RETURN_ERROR); - - /* 1. Alloc & check SCQ IRQ */ - hba = (struct spfc_hba_info *)(scq_info->hba); - ret = sphw_alloc_irqs(hba->dev_handle, SERVICE_T_FC, SPFC_INT_NUM_PER_QUEUE, - &irq_info, &act_num); - if (ret != RETURN_OK || act_num != SPFC_INT_NUM_PER_QUEUE) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[err]Allocate scq irq failed, return %d", ret); - return UNF_RETURN_ERROR; - } - - if (irq_info.msix_entry_idx >= SPFC_SCQ_INT_ID_MAX) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]SCQ irq id exceed %d, msix_entry_idx %d", - SPFC_SCQ_INT_ID_MAX, irq_info.msix_entry_idx); - sphw_free_irq(hba->dev_handle, SERVICE_T_FC, irq_info.irq_id); - return UNF_RETURN_ERROR; - } - - scq_info->irq_id = (u32)(irq_info.irq_id); - scq_info->msix_entry_idx = (u16)(irq_info.msix_entry_idx); - - snprintf(scq_info->irq_name, SPFC_IRQ_NAME_MAX, "fc_scq%u_%x_msix%u", - scq_info->queue_id, hba->port_cfg.port_id, scq_info->msix_entry_idx); - - /* 2. SCQ IRQ tasklet init */ - tasklet_init(&scq_info->tasklet, spfc_process_scq_cqe, (ulong)(uintptr_t)scq_info); - - /* 3. Request IRQ for SCQ */ - ret = request_irq(scq_info->irq_id, spfc_scq_irq, 0, scq_info->irq_name, scq_info); - - sphw_set_msix_state(hba->dev_handle, scq_info->msix_entry_idx, SPHW_MSIX_ENABLE); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[err]Request SCQ irq failed, SCQ Index = %u, return %d", - scq_info->queue_id, ret); - sphw_free_irq(hba->dev_handle, SERVICE_T_FC, scq_info->irq_id); - memset(scq_info->irq_name, 0, SPFC_IRQ_NAME_MAX); - scq_info->irq_id = 0; - scq_info->msix_entry_idx = 0; - return UNF_RETURN_ERROR; - } - - return RETURN_OK; -} - -static void spfc_free_scq_int(struct spfc_scq_info *scq_info) -{ - struct spfc_hba_info *hba = NULL; - - FC_CHECK_RETURN_VOID(scq_info); - - hba = (struct spfc_hba_info *)(scq_info->hba); - sphw_set_msix_state(hba->dev_handle, scq_info->msix_entry_idx, SPHW_MSIX_DISABLE); - free_irq(scq_info->irq_id, scq_info); - tasklet_kill(&scq_info->tasklet); - sphw_free_irq(hba->dev_handle, SERVICE_T_FC, scq_info->irq_id); - memset(scq_info->irq_name, 0, SPFC_IRQ_NAME_MAX); - scq_info->irq_id = 0; - scq_info->msix_entry_idx = 0; -} - -static void spfc_init_scq_info(struct spfc_hba_info *hba, struct cqm_queue *cqm_scq, - u32 queue_id, struct spfc_scq_info **scq_info) -{ - FC_CHECK_RETURN_VOID(hba); - FC_CHECK_RETURN_VOID(cqm_scq); - FC_CHECK_RETURN_VOID(scq_info); - - *scq_info = &hba->scq_info[queue_id]; - (*scq_info)->queue_id = queue_id; - (*scq_info)->scqn = cqm_scq->index; - (*scq_info)->hba = (void *)hba; - - (*scq_info)->cqm_scq_info = cqm_scq; - (*scq_info)->wqe_num_per_buf = - cqm_scq->q_room_buf_1.buf_size / SPFC_SCQE_SIZE; - (*scq_info)->wqe_size = SPFC_SCQE_SIZE; - (*scq_info)->valid_wqe_num = (SPFC_SCQ_IS_STS(queue_id) ? SPFC_STS_SCQ_DEPTH - : SPFC_CMD_SCQ_DEPTH); - (*scq_info)->scqc_cq_depth = (SPFC_SCQ_IS_STS(queue_id) ? SPFC_STS_SCQC_CQ_DEPTH - : SPFC_CMD_SCQC_CQ_DEPTH); - (*scq_info)->scqc_ci_type = SPFC_STS_SCQ_CI_TYPE; - (*scq_info)->ci = 0; - (*scq_info)->ci_owner = 1; -} - -static void spfc_init_scq_header(struct wq_header *queue_header) -{ - FC_CHECK_RETURN_VOID(queue_header); - - memset(queue_header, 0, sizeof(struct wq_header)); - - /* Obit default is 1 */ - queue_header->db_record.pmsn = 1 << UNF_SHIFT_15; - queue_header->db_record.dump_pmsn = queue_header->db_record.pmsn; - queue_header->ci_record.cmsn = 1 << UNF_SHIFT_15; - queue_header->ci_record.dump_cmsn = queue_header->ci_record.cmsn; - - /* Big endian convert */ - spfc_cpu_to_big64((void *)queue_header, sizeof(struct wq_header)); -} - -static void spfc_cfg_scq_ctx(struct spfc_scq_info *scq_info, - struct spfc_cq_qinfo *scq_ctx) -{ - struct cqm_queue *cqm_scq_info = NULL; - struct spfc_queue_info_bus queue_bus; - u64 parity = 0; - - FC_CHECK_RETURN_VOID(scq_info); - - cqm_scq_info = scq_info->cqm_scq_info; - - scq_ctx->pcie_template_hi = 0; - scq_ctx->cur_cqe_gpa = cqm_scq_info->q_room_buf_1.buf_list->pa >> SPFC_CQE_GPA_SHIFT; - scq_ctx->pi = 0; - scq_ctx->pi_o = 1; - scq_ctx->ci = scq_info->ci; - scq_ctx->ci_o = scq_info->ci_owner; - scq_ctx->c_eqn_msi_x = scq_info->msix_entry_idx; - scq_ctx->ci_type = scq_info->scqc_ci_type; - scq_ctx->cq_depth = scq_info->scqc_cq_depth; - scq_ctx->armq = SPFC_ARMQ_IDLE; - scq_ctx->cur_cqe_cnt = 0; - scq_ctx->cqe_max_cnt = 0; - scq_ctx->cqe_dmaattr_idx = 0; - scq_ctx->cq_so_ro = 0; - scq_ctx->init_mode = SPFC_CQ_INT_MODE; - scq_ctx->next_o = 1; - scq_ctx->loop_o = 1; - scq_ctx->next_cq_wqe_page_gpa = cqm_scq_info->q_room_buf_1.buf_list[ARRAY_INDEX_1].pa >> - SPFC_NEXT_CQE_GPA_SHIFT; - scq_ctx->pcie_template_lo = 0; - - scq_ctx->ci_gpa = (cqm_scq_info->q_header_paddr + offsetof(struct wq_header, ci_record)) >> - SPFC_CQE_GPA_SHIFT; - - memset(&queue_bus, 0, sizeof(struct spfc_queue_info_bus)); - queue_bus.bus[ARRAY_INDEX_0] |= ((u64)(scq_info->scqn & SPFC_SCQN_MASK)); /* bits 20 */ - queue_bus.bus[ARRAY_INDEX_0] |= (((u64)(scq_ctx->pcie_template_lo)) << UNF_SHIFT_20); - queue_bus.bus[ARRAY_INDEX_0] |= (((u64)(scq_ctx->ci_gpa & SPFC_SCQ_CTX_CI_GPA_MASK)) << - UNF_SHIFT_23); /* bits 28 */ - queue_bus.bus[ARRAY_INDEX_0] |= (((u64)(scq_ctx->cqe_dmaattr_idx)) << UNF_SHIFT_51); - queue_bus.bus[ARRAY_INDEX_0] |= (((u64)(scq_ctx->cq_so_ro)) << UNF_SHIFT_57); /* bits 2 */ - queue_bus.bus[ARRAY_INDEX_0] |= (((u64)(scq_ctx->init_mode)) << UNF_SHIFT_59); /* bits 2 */ - queue_bus.bus[ARRAY_INDEX_0] |= (((u64)(scq_ctx->c_eqn_msi_x & - SPFC_SCQ_CTX_C_EQN_MSI_X_MASK)) << UNF_SHIFT_61); - queue_bus.bus[ARRAY_INDEX_1] |= ((u64)(scq_ctx->c_eqn_msi_x >> UNF_SHIFT_3)); /* bits 7 */ - queue_bus.bus[ARRAY_INDEX_1] |= (((u64)(scq_ctx->ci_type)) << UNF_SHIFT_7); /* bits 1 */ - queue_bus.bus[ARRAY_INDEX_1] |= (((u64)(scq_ctx->cq_depth)) << UNF_SHIFT_8); /* bits 3 */ - queue_bus.bus[ARRAY_INDEX_1] |= (((u64)(scq_ctx->cqe_max_cnt)) << UNF_SHIFT_11); - queue_bus.bus[ARRAY_INDEX_1] |= (((u64)(scq_ctx->pcie_template_hi)) << UNF_SHIFT_19); - - parity = spfc_get_parity_value(queue_bus.bus, SPFC_SCQC_BUS_ROW, SPFC_SCQC_BUS_COL); - scq_ctx->parity_0 = parity & SPFC_PARITY_MASK; - scq_ctx->parity_1 = (parity >> UNF_SHIFT_1) & SPFC_PARITY_MASK; - scq_ctx->parity_2 = (parity >> UNF_SHIFT_2) & SPFC_PARITY_MASK; - - spfc_cpu_to_big64((void *)scq_ctx, sizeof(struct spfc_cq_qinfo)); -} - -static u32 spfc_creat_scqc_via_cmdq_sync(struct spfc_hba_info *hba, - struct spfc_cq_qinfo *scqc, u32 scqn) -{ -#define SPFC_INIT_SCQC_TIMEOUT 3000 - int ret; - u32 covrt_size; - struct spfc_cmdqe_creat_scqc init_scqc_cmd; - struct sphw_cmd_buf *cmdq_in_buf; - - cmdq_in_buf = sphw_alloc_cmd_buf(hba->dev_handle); - if (!cmdq_in_buf) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "[err]cmdq in_cmd_buf alloc failed"); - - SPFC_ERR_IO_STAT(hba, SPFC_TASK_T_INIT_SCQC); - return UNF_RETURN_ERROR; - } - - memset(&init_scqc_cmd, 0, sizeof(init_scqc_cmd)); - init_scqc_cmd.wd0.task_type = SPFC_TASK_T_INIT_SCQC; - init_scqc_cmd.wd1.scqn = SPFC_LSW(scqn); - covrt_size = sizeof(init_scqc_cmd) - sizeof(init_scqc_cmd.scqc); - spfc_cpu_to_big32(&init_scqc_cmd, covrt_size); - - /* scqc is already big endian */ - memcpy(init_scqc_cmd.scqc, scqc, sizeof(*scqc)); - memcpy(cmdq_in_buf->buf, &init_scqc_cmd, sizeof(init_scqc_cmd)); - cmdq_in_buf->size = sizeof(init_scqc_cmd); - - ret = sphw_cmdq_detail_resp(hba->dev_handle, COMM_MOD_FC, 0, - cmdq_in_buf, NULL, NULL, - SPFC_INIT_SCQC_TIMEOUT, SPHW_CHANNEL_FC); - sphw_free_cmd_buf(hba->dev_handle, cmdq_in_buf); - if (ret) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "[err]Send creat scqc via cmdq failed, ret=%d", - ret); - - SPFC_ERR_IO_STAT(hba, SPFC_TASK_T_INIT_SCQC); - return UNF_RETURN_ERROR; - } - - SPFC_IO_STAT(hba, SPFC_TASK_T_INIT_SCQC); - - return RETURN_OK; -} - -static u32 spfc_delete_ssqc_via_cmdq_sync(struct spfc_hba_info *hba, u32 xid, - u64 context_gpa, u32 entry_count) -{ -#define SPFC_DELETE_SSQC_TIMEOUT 3000 - int ret = RETURN_OK; - struct spfc_cmdqe_delete_ssqc delete_ssqc_cmd; - struct sphw_cmd_buf *cmdq_in_buf = NULL; - - cmdq_in_buf = sphw_alloc_cmd_buf(hba->dev_handle); - if (!cmdq_in_buf) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "[err]cmdq in_cmd_buf alloc failed"); - return UNF_RETURN_ERROR; - } - - memset(&delete_ssqc_cmd, 0, sizeof(delete_ssqc_cmd)); - delete_ssqc_cmd.wd0.task_type = SPFC_TASK_T_CLEAR_SSQ_CONTEXT; - delete_ssqc_cmd.wd0.xid = xid; - delete_ssqc_cmd.wd0.entry_count = entry_count; - delete_ssqc_cmd.wd1.scqn = SPFC_LSW(0); - delete_ssqc_cmd.context_gpa_hi = SPFC_HIGH_32_BITS(context_gpa); - delete_ssqc_cmd.context_gpa_lo = SPFC_LOW_32_BITS(context_gpa); - spfc_cpu_to_big32(&delete_ssqc_cmd, sizeof(delete_ssqc_cmd)); - memcpy(cmdq_in_buf->buf, &delete_ssqc_cmd, sizeof(delete_ssqc_cmd)); - cmdq_in_buf->size = sizeof(delete_ssqc_cmd); - - ret = sphw_cmdq_detail_resp(hba->dev_handle, COMM_MOD_FC, 0, - cmdq_in_buf, NULL, NULL, - SPFC_DELETE_SSQC_TIMEOUT, - SPHW_CHANNEL_FC); - - sphw_free_cmd_buf(hba->dev_handle, cmdq_in_buf); - - return ret; -} - -static void spfc_free_ssq_qpc(struct spfc_hba_info *hba, u32 free_sq_num) -{ - u32 global_sq_index = 0; - u32 qid = 0; - struct spfc_parent_shared_queue_info *ssq_info = NULL; - - SPFC_FUNCTION_ENTER; - for (global_sq_index = 0; global_sq_index < free_sq_num;) { - for (qid = 1; qid <= SPFC_SQ_NUM_PER_QPC; qid++) { - ssq_info = &hba->parent_queue_mgr->shared_queue[global_sq_index]; - if (qid == SPFC_SQ_NUM_PER_QPC || - global_sq_index == free_sq_num - 1) { - if (ssq_info->parent_ctx.cqm_parent_ctx_obj) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_INFO, - "[INFO]qid 0x%x, global_sq_index 0x%x, free_sq_num 0x%x", - qid, global_sq_index, free_sq_num); - cqm3_object_delete(&ssq_info->parent_ctx - .cqm_parent_ctx_obj->object); - ssq_info->parent_ctx.cqm_parent_ctx_obj = NULL; - } - } - global_sq_index++; - if (global_sq_index >= free_sq_num) - break; - } - } -} - -void spfc_free_ssq(void *handle, u32 free_sq_num) -{ -#define SPFC_FREE_SSQ_WAIT_MS 1000 - u32 global_sq_index = 0; - u32 qid = 0; - struct spfc_parent_shared_queue_info *ssq_info = NULL; - struct spfc_parent_ssq_info *sq_ctrl = NULL; - struct cqm_qpc_mpt *prnt_ctx = NULL; - u32 ret = UNF_RETURN_ERROR; - u32 entry_count = 0; - struct spfc_hba_info *hba = NULL; - - SPFC_FUNCTION_ENTER; - - hba = (struct spfc_hba_info *)handle; - for (global_sq_index = 0; global_sq_index < free_sq_num;) { - for (qid = 1; qid <= SPFC_SQ_NUM_PER_QPC; qid++) { - ssq_info = &hba->parent_queue_mgr->shared_queue[global_sq_index]; - sq_ctrl = &ssq_info->parent_ssq_info; - /* Free data cos */ - spfc_free_link_list_wpg(sq_ctrl); - if (sq_ctrl->queue_head_original) { - pci_unmap_single(hba->pci_dev, - sq_ctrl->queue_hdr_phy_addr_original, - sizeof(struct spfc_queue_header) + - SPFC_SQ_HEADER_ADDR_ALIGN_SIZE, - DMA_BIDIRECTIONAL); - kfree(sq_ctrl->queue_head_original); - sq_ctrl->queue_head_original = NULL; - } - if (qid == SPFC_SQ_NUM_PER_QPC || global_sq_index == free_sq_num - 1) { - if (ssq_info->parent_ctx.cqm_parent_ctx_obj) { - prnt_ctx = ssq_info->parent_ctx.cqm_parent_ctx_obj; - entry_count = (qid == SPFC_SQ_NUM_PER_QPC ? - SPFC_SQ_NUM_PER_QPC : - free_sq_num - global_sq_index); - ret = spfc_delete_ssqc_via_cmdq_sync(hba, prnt_ctx->xid, - prnt_ctx->paddr, - entry_count); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "[err]ucode delete ssq fail, glbindex 0x%x, qid 0x%x, glsqindex 0x%x", - global_sq_index, qid, free_sq_num); - } - } - } - global_sq_index++; - if (global_sq_index >= free_sq_num) - break; - } - } - - msleep(SPFC_FREE_SSQ_WAIT_MS); - - spfc_free_ssq_qpc(hba, free_sq_num); -} - -u32 spfc_creat_ssqc_via_cmdq_sync(struct spfc_hba_info *hba, - struct spfc_ssq_parent_context *ssqc, - u32 xid, u64 context_gpa) -{ -#define SPFC_INIT_SSQC_TIMEOUT 3000 - int ret; - u32 covrt_size; - struct spfc_cmdqe_creat_ssqc create_ssqc_cmd; - struct sphw_cmd_buf *cmdq_in_buf = NULL; - - cmdq_in_buf = sphw_alloc_cmd_buf(hba->dev_handle); - if (!cmdq_in_buf) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "[err]cmdq in_cmd_buf alloc failed"); - return UNF_RETURN_ERROR; - } - - memset(&create_ssqc_cmd, 0, sizeof(create_ssqc_cmd)); - create_ssqc_cmd.wd0.task_type = SPFC_TASK_T_CREATE_SSQ_CONTEXT; - create_ssqc_cmd.wd0.xid = xid; - create_ssqc_cmd.wd1.scqn = SPFC_LSW(0); - create_ssqc_cmd.context_gpa_hi = SPFC_HIGH_32_BITS(context_gpa); - create_ssqc_cmd.context_gpa_lo = SPFC_LOW_32_BITS(context_gpa); - covrt_size = sizeof(create_ssqc_cmd) - sizeof(create_ssqc_cmd.ssqc); - spfc_cpu_to_big32(&create_ssqc_cmd, covrt_size); - - /* scqc is already big endian */ - memcpy(create_ssqc_cmd.ssqc, ssqc, sizeof(*ssqc)); - memcpy(cmdq_in_buf->buf, &create_ssqc_cmd, sizeof(create_ssqc_cmd)); - cmdq_in_buf->size = sizeof(create_ssqc_cmd); - ret = sphw_cmdq_detail_resp(hba->dev_handle, COMM_MOD_FC, 0, - cmdq_in_buf, NULL, NULL, - SPFC_INIT_SSQC_TIMEOUT, SPHW_CHANNEL_FC); - sphw_free_cmd_buf(hba->dev_handle, cmdq_in_buf); - if (ret) - return UNF_RETURN_ERROR; - return RETURN_OK; -} - -void spfc_init_sq_prnt_ctxt_sq_qinfo(struct spfc_sq_qinfo *sq_info, - struct spfc_parent_ssq_info *ssq) -{ - struct spfc_wqe_page *head_wqe_page = NULL; - struct spfc_sq_qinfo *prnt_sq_ctx = NULL; - struct spfc_queue_info_bus queue_bus; - - SPFC_FUNCTION_ENTER; - - /* Obtains the Parent Context address */ - head_wqe_page = SPFC_GET_SQ_HEAD(ssq); - - prnt_sq_ctx = sq_info; - - /* The PMSN is updated by the host driver */ - prnt_sq_ctx->pmsn_type = SPFC_PMSN_CI_TYPE_FROM_HOST; - - /* Indicates the value of O of the valid SQE in the current round of SQ. - * * The value of Linked List SQ is always one, and the value of 0 is - * invalid. - */ - prnt_sq_ctx->loop_o = - SPFC_OWNER_DRIVER_PRODUCT; /* current valid o-bit */ - - /* should be opposite from loop_o */ - prnt_sq_ctx->cur_wqe_o = ~(prnt_sq_ctx->loop_o); - - /* the first sqe's gpa */ - prnt_sq_ctx->cur_sqe_gpa = head_wqe_page->wpg_phy_addr; - - /* Indicates the GPA of the Queue header that is initialized to the SQ - * in * the Host memory. The value must be 16-byte aligned. - */ - prnt_sq_ctx->pmsn_gpa = ssq->queue_hdr_phy_addr; - if (wqe_pre_load != 0) - prnt_sq_ctx->pmsn_gpa |= SPFC_SQ_LINK_PRE; - - /* This field is used to fill in the dmaattr_idx field of the ComboDMA. - * The default value is 0 - */ - prnt_sq_ctx->sqe_dmaattr_idx = SPFC_DMA_ATTR_OFST; - - /* This field is filled using the value of RO_SO in the SGL0 of the - * ComboDMA - */ - prnt_sq_ctx->sq_so_ro = SPFC_PCIE_RELAXED_ORDERING; - - prnt_sq_ctx->ring = ssq->queue_style; - - /* This field is used to set the SGL0 field of the Child solicDMA */ - prnt_sq_ctx->zerocopy_dmaattr_idx = SPFC_DMA_ATTR_OFST; - - prnt_sq_ctx->zerocopy_so_ro = SPFC_PCIE_RELAXED_ORDERING; - prnt_sq_ctx->enable_256 = SPFC_256BWQE_ENABLE; - - /* PCIe attribute information */ - prnt_sq_ctx->pcie_template = SPFC_PCIE_TEMPLATE; - - memset(&queue_bus, 0, sizeof(struct spfc_queue_info_bus)); - queue_bus.bus[ARRAY_INDEX_0] |= ((u64)(ssq->context_id & SPFC_SSQ_CTX_MASK)); /* bits 20 */ - queue_bus.bus[ARRAY_INDEX_0] |= (((u64)(prnt_sq_ctx->sqe_dmaattr_idx)) << UNF_SHIFT_20); - queue_bus.bus[ARRAY_INDEX_0] |= (((u64)(prnt_sq_ctx->sq_so_ro)) << UNF_SHIFT_26); - queue_bus.bus[ARRAY_INDEX_0] |= (((u64)(prnt_sq_ctx->ring)) << UNF_SHIFT_28); /* bits 1 */ - queue_bus.bus[ARRAY_INDEX_0] |= (((u64)(prnt_sq_ctx->zerocopy_dmaattr_idx)) - << UNF_SHIFT_29); /* bits 6 */ - queue_bus.bus[ARRAY_INDEX_0] |= (((u64)(prnt_sq_ctx->zerocopy_so_ro)) << UNF_SHIFT_35); - queue_bus.bus[ARRAY_INDEX_0] |= (((u64)(prnt_sq_ctx->pcie_template)) << UNF_SHIFT_37); - queue_bus.bus[ARRAY_INDEX_0] |= (((u64)(prnt_sq_ctx->pmsn_gpa >> UNF_SHIFT_4)) - << UNF_SHIFT_43); /* bits 21 */ - queue_bus.bus[ARRAY_INDEX_1] |= ((u64)(prnt_sq_ctx->pmsn_gpa >> UNF_SHIFT_25)); - queue_bus.bus[ARRAY_INDEX_1] |= (((u64)(prnt_sq_ctx->pmsn_type)) << UNF_SHIFT_39); - prnt_sq_ctx->parity = spfc_get_parity_value(queue_bus.bus, SPFC_SQC_BUS_ROW, - SPFC_SQC_BUS_COL); - spfc_cpu_to_big64(prnt_sq_ctx, sizeof(struct spfc_sq_qinfo)); - - SPFC_FUNCTION_RETURN; -} - -u32 spfc_create_ssq(void *handle) -{ - u32 ret = RETURN_OK; - u32 global_sq_index = 0; - u32 qid = 0; - struct cqm_qpc_mpt *prnt_ctx = NULL; - struct spfc_parent_shared_queue_info *ssq_info = NULL; - struct spfc_parent_ssq_info *sq_ctrl = NULL; - u32 queue_header_alloc_size = 0; - struct spfc_wqe_page *head_wpg = NULL; - struct spfc_ssq_parent_context prnt_ctx_info; - struct spfc_sq_qinfo *sq_info = NULL; - struct spfc_scq_qinfo *psq_pretchinfo = NULL; - struct spfc_queue_info_bus queue_bus; - struct spfc_fc_key_section *keysection = NULL; - struct spfc_hba_info *hba = NULL; - dma_addr_t origin_addr; - - FC_CHECK_RETURN_VALUE(handle, UNF_RETURN_ERROR); - hba = (struct spfc_hba_info *)handle; - for (global_sq_index = 0; global_sq_index < SPFC_MAX_SSQ_NUM;) { - qid = 0; - prnt_ctx = cqm3_object_qpc_mpt_create(hba->dev_handle, SERVICE_T_FC, - CQM_OBJECT_SERVICE_CTX, - SPFC_CNTX_SIZE_256B, NULL, - CQM_INDEX_INVALID); - if (!prnt_ctx) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Create ssq context failed, CQM_INDEX is 0x%x", - CQM_INDEX_INVALID); - goto ssq_ctx_create_fail; - } - memset(&prnt_ctx_info, 0, sizeof(prnt_ctx_info)); - keysection = (struct spfc_fc_key_section *)&prnt_ctx_info; - keysection->xid_h = (prnt_ctx->xid >> UNF_SHIFT_16) & SPFC_KEYSECTION_XID_H_MASK; - keysection->xid_l = prnt_ctx->xid & SPFC_KEYSECTION_XID_L_MASK; - spfc_cpu_to_big32(keysection, sizeof(struct spfc_fc_key_section)); - for (qid = 0; qid < SPFC_SQ_NUM_PER_QPC; qid++) { - sq_info = (struct spfc_sq_qinfo *)((u8 *)(&prnt_ctx_info) + ((qid + 1) * - SPFC_SQ_SPACE_OFFSET)); - ssq_info = &hba->parent_queue_mgr->shared_queue[global_sq_index]; - ssq_info->parent_ctx.cqm_parent_ctx_obj = prnt_ctx; - /* Initialize struct spfc_parent_sq_info */ - sq_ctrl = &ssq_info->parent_ssq_info; - sq_ctrl->hba = (void *)hba; - sq_ctrl->context_id = prnt_ctx->xid; - sq_ctrl->sq_queue_id = qid + SPFC_SQ_QID_START_PER_QPC; - sq_ctrl->cache_id = FC_CALC_CID(prnt_ctx->xid); - sq_ctrl->sqn = global_sq_index; - sq_ctrl->max_sqe_num = hba->exi_count; - /* Reduce one Link Wqe */ - sq_ctrl->wqe_num_per_buf = hba->sq_wpg_pool.wqe_per_wpg - 1; - sq_ctrl->wqe_size = SPFC_SQE_SIZE; - sq_ctrl->wqe_offset = 0; - sq_ctrl->head_start_cmsn = 0; - sq_ctrl->head_end_cmsn = SPFC_GET_WP_END_CMSN(0, sq_ctrl->wqe_num_per_buf); - sq_ctrl->last_pmsn = 0; - /* Linked List SQ Owner Bit 1 valid,0 invalid */ - sq_ctrl->last_pi_owner = 1; - atomic_set(&sq_ctrl->sq_valid, true); - sq_ctrl->accum_wqe_cnt = 0; - sq_ctrl->service_type = SPFC_SERVICE_TYPE_FC_SQ; - sq_ctrl->queue_style = (global_sq_index == SPFC_DIRECTWQE_SQ_INDEX) ? - SPFC_QUEUE_RING_STYLE : SPFC_QUEUE_LINK_STYLE; - INIT_LIST_HEAD(&sq_ctrl->list_linked_list_sq); - atomic_set(&sq_ctrl->wqe_page_cnt, 0); - atomic_set(&sq_ctrl->sq_db_cnt, 0); - atomic_set(&sq_ctrl->sqe_minus_cqe_cnt, 1); - atomic_set(&sq_ctrl->sq_wqe_cnt, 0); - atomic_set(&sq_ctrl->sq_cqe_cnt, 0); - spin_lock_init(&sq_ctrl->parent_sq_enqueue_lock); - memset(sq_ctrl->io_stat, 0, sizeof(sq_ctrl->io_stat)); - - /* Allocate and initialize the Queue Header space. 64B - * alignment is required. * Additional 64B is applied - * for alignment - */ - queue_header_alloc_size = sizeof(struct spfc_queue_header) + - SPFC_SQ_HEADER_ADDR_ALIGN_SIZE; - sq_ctrl->queue_head_original = kmalloc(queue_header_alloc_size, GFP_ATOMIC); - if (!sq_ctrl->queue_head_original) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]SQ(0x%x) create SQ queue header failed", - global_sq_index); - goto ssq_qheader_create_fail; - } - - memset((u8 *)sq_ctrl->queue_head_original, 0, queue_header_alloc_size); - - sq_ctrl->queue_hdr_phy_addr_original = - pci_map_single(hba->pci_dev, sq_ctrl->queue_head_original, - queue_header_alloc_size, DMA_BIDIRECTIONAL); - origin_addr = sq_ctrl->queue_hdr_phy_addr_original; - if (pci_dma_mapping_error(hba->pci_dev, origin_addr)) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]SQ(0x%x) SQ queue header DMA mapping failed", - global_sq_index); - goto ssq_qheader_dma_map_fail; - } - - /* Obtains the 64B alignment address */ - sq_ctrl->queue_header = (struct spfc_queue_header *)(uintptr_t) - SPFC_ADDR_64_ALIGN((u64)((uintptr_t)(sq_ctrl->queue_head_original))); - sq_ctrl->queue_hdr_phy_addr = SPFC_ADDR_64_ALIGN(origin_addr); - - /* Each SQ is allocated with a Wqe Page by default. The - * WqePageCnt is incremented by one - */ - head_wpg = spfc_add_one_wqe_page(sq_ctrl); - if (!head_wpg) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[err]SQ(0x%x) create SQ first wqe page failed", - global_sq_index); - goto ssq_headwpg_create_fail; - } - - atomic_inc(&sq_ctrl->wqe_page_cnt); - spfc_init_sq_prnt_ctxt_sq_qinfo(sq_info, sq_ctrl); - global_sq_index++; - if (global_sq_index == SPFC_MAX_SSQ_NUM) - break; - } - psq_pretchinfo = &prnt_ctx_info.sq_pretchinfo; - psq_pretchinfo->hw_scqc_config.info.rq_th2_preld_cache_num = wqe_pre_load; - psq_pretchinfo->hw_scqc_config.info.rq_th1_preld_cache_num = wqe_pre_load; - psq_pretchinfo->hw_scqc_config.info.rq_th0_preld_cache_num = wqe_pre_load; - psq_pretchinfo->hw_scqc_config.info.rq_min_preld_cache_num = wqe_pre_load; - psq_pretchinfo->hw_scqc_config.info.sq_th2_preld_cache_num = wqe_pre_load; - psq_pretchinfo->hw_scqc_config.info.sq_th1_preld_cache_num = wqe_pre_load; - psq_pretchinfo->hw_scqc_config.info.sq_th0_preld_cache_num = wqe_pre_load; - psq_pretchinfo->hw_scqc_config.info.sq_min_preld_cache_num = wqe_pre_load; - psq_pretchinfo->hw_scqc_config.info.scq_n = (u64)0; - psq_pretchinfo->hw_scqc_config.info.parity = 0; - - memset(&queue_bus, 0, sizeof(struct spfc_queue_info_bus)); - queue_bus.bus[ARRAY_INDEX_0] = psq_pretchinfo->hw_scqc_config.pctxt_val1; - psq_pretchinfo->hw_scqc_config.info.parity = - spfc_get_parity_value(queue_bus.bus, SPFC_HW_SCQC_BUS_ROW, - SPFC_HW_SCQC_BUS_COL); - spfc_cpu_to_big64(psq_pretchinfo, sizeof(struct spfc_scq_qinfo)); - ret = spfc_creat_ssqc_via_cmdq_sync(hba, &prnt_ctx_info, - prnt_ctx->xid, prnt_ctx->paddr); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[err]SQ(0x%x) create ssqc failed.", - global_sq_index); - goto ssq_cmdqsync_fail; - } - } - - return RETURN_OK; - -ssq_headwpg_create_fail: - pci_unmap_single(hba->pci_dev, sq_ctrl->queue_hdr_phy_addr_original, - queue_header_alloc_size, DMA_BIDIRECTIONAL); - -ssq_qheader_dma_map_fail: - kfree(sq_ctrl->queue_head_original); - sq_ctrl->queue_head_original = NULL; - -ssq_qheader_create_fail: - cqm3_object_delete(&prnt_ctx->object); - ssq_info->parent_ctx.cqm_parent_ctx_obj = NULL; - if (qid > 0) { - while (qid--) { - ssq_info = &hba->parent_queue_mgr->shared_queue[global_sq_index - qid]; - ssq_info->parent_ctx.cqm_parent_ctx_obj = NULL; - } - } - -ssq_ctx_create_fail: -ssq_cmdqsync_fail: - if (global_sq_index > 0) - spfc_free_ssq(hba, global_sq_index); - - return UNF_RETURN_ERROR; -} - -static u32 spfc_create_scq(struct spfc_hba_info *hba) -{ - u32 ret = UNF_RETURN_ERROR; - u32 scq_index = 0; - u32 scq_cfg_num = 0; - struct cqm_queue *cqm_scq = NULL; - void *handle = NULL; - struct spfc_scq_info *scq_info = NULL; - struct spfc_cq_qinfo cq_qinfo; - - FC_CHECK_RETURN_VALUE(hba, UNF_RETURN_ERROR); - handle = hba->dev_handle; - /* Create SCQ by CQM interface */ - for (scq_index = 0; scq_index < SPFC_TOTAL_SCQ_NUM; scq_index++) { - /* - * 1. Create/Allocate SCQ - * * - * Notice: SCQ[0, 2, 4 ...]--->CMD SCQ, - * SCQ[1, 3, 5 ...]--->STS SCQ, - * SCQ[SPFC_TOTAL_SCQ_NUM-1]--->Defaul SCQ - */ - cqm_scq = cqm3_object_nonrdma_queue_create(handle, SERVICE_T_FC, - CQM_OBJECT_NONRDMA_SCQ, - SPFC_SCQ_IS_STS(scq_index) ? - SPFC_STS_SCQ_DEPTH : - SPFC_CMD_SCQ_DEPTH, - SPFC_SCQE_SIZE, hba); - if (!cqm_scq) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, - UNF_WARN, "[err]Create scq failed"); - - goto free_scq; - } - - /* 2. Initialize SCQ (info) */ - spfc_init_scq_info(hba, cqm_scq, scq_index, &scq_info); - - /* 3. Allocate & Initialize SCQ interrupt */ - ret = spfc_alloc_scq_int(scq_info); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[err]Allocate scq interrupt failed"); - - cqm3_object_delete(&cqm_scq->object); - memset(scq_info, 0, sizeof(struct spfc_scq_info)); - goto free_scq; - } - - /* 4. Initialize SCQ queue header */ - spfc_init_scq_header((struct wq_header *)(void *)cqm_scq->q_header_vaddr); - - /* 5. Initialize & Create SCQ CTX */ - memset(&cq_qinfo, 0, sizeof(cq_qinfo)); - spfc_cfg_scq_ctx(scq_info, &cq_qinfo); - ret = spfc_creat_scqc_via_cmdq_sync(hba, &cq_qinfo, scq_info->scqn); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[err]Create scq context failed"); - - cqm3_object_delete(&cqm_scq->object); - memset(scq_info, 0, sizeof(struct spfc_scq_info)); - goto free_scq; - } - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_INFO, - "[info]Create SCQ[%u] Scqn=%u WqeNum=%u WqeSize=%u WqePerBuf=%u CqDepth=%u CiType=%u irq=%u msix=%u", - scq_info->queue_id, scq_info->scqn, - scq_info->valid_wqe_num, scq_info->wqe_size, - scq_info->wqe_num_per_buf, scq_info->scqc_cq_depth, - scq_info->scqc_ci_type, scq_info->irq_id, - scq_info->msix_entry_idx); - } - - /* Last SCQ is used to handle SCQE delivery access when clearing buffer - */ - hba->default_scqn = scq_info->scqn; - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[info]Default Scqn=%u CqmScqIndex=%u", hba->default_scqn, - cqm_scq->index); - - return RETURN_OK; - -free_scq: - spfc_flush_scq_ctx(hba); - - scq_cfg_num = scq_index; - for (scq_index = 0; scq_index < scq_cfg_num; scq_index++) { - scq_info = &hba->scq_info[scq_index]; - spfc_free_scq_int(scq_info); - cqm_scq = scq_info->cqm_scq_info; - cqm3_object_delete(&cqm_scq->object); - memset(scq_info, 0, sizeof(struct spfc_scq_info)); - } - - return UNF_RETURN_ERROR; -} - -static void spfc_destroy_scq(struct spfc_hba_info *hba) -{ - u32 scq_index = 0; - struct cqm_queue *cqm_scq = NULL; - struct spfc_scq_info *scq_info = NULL; - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[info]Start destroy total %d SCQ", SPFC_TOTAL_SCQ_NUM); - - FC_CHECK_RETURN_VOID(hba); - - /* Use CQM to delete SCQ */ - for (scq_index = 0; scq_index < SPFC_TOTAL_SCQ_NUM; scq_index++) { - scq_info = &hba->scq_info[scq_index]; - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ALL, - "[info]Destroy SCQ%u, Scqn=%u, Irq=%u, msix=%u, name=%s", - scq_index, scq_info->scqn, scq_info->irq_id, - scq_info->msix_entry_idx, scq_info->irq_name); - - spfc_free_scq_int(scq_info); - cqm_scq = scq_info->cqm_scq_info; - cqm3_object_delete(&cqm_scq->object); - memset(scq_info, 0, sizeof(struct spfc_scq_info)); - } -} - -static void spfc_init_srq_info(struct spfc_hba_info *hba, struct cqm_queue *cqm_srq, - struct spfc_srq_info *srq_info) -{ - FC_CHECK_RETURN_VOID(hba); - FC_CHECK_RETURN_VOID(cqm_srq); - FC_CHECK_RETURN_VOID(srq_info); - - srq_info->hba = (void *)hba; - - srq_info->cqm_srq_info = cqm_srq; - srq_info->wqe_num_per_buf = cqm_srq->q_room_buf_1.buf_size / SPFC_SRQE_SIZE - 1; - srq_info->wqe_size = SPFC_SRQE_SIZE; - srq_info->valid_wqe_num = cqm_srq->valid_wqe_num; - srq_info->pi = 0; - srq_info->pi_owner = SPFC_SRQ_INIT_LOOP_O; - srq_info->pmsn = 0; - srq_info->srqn = cqm_srq->index; - srq_info->first_rqe_recv_dma = 0; - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[info]Init srq info(srq index 0x%x) valid wqe num 0x%x, buffer size 0x%x, wqe num per buf 0x%x", - cqm_srq->index, srq_info->valid_wqe_num, - cqm_srq->q_room_buf_1.buf_size, srq_info->wqe_num_per_buf); -} - -static void spfc_init_srq_header(struct wq_header *queue_header) -{ - FC_CHECK_RETURN_VOID(queue_header); - - memset(queue_header, 0, sizeof(struct wq_header)); -} - -/* - *Function Name : spfc_get_srq_entry - *Function Description: Obtain RQE in SRQ via PI. - *Input Parameters : *srq_info, - * **linked_rqe, - * position - *Output Parameters : N/A - *Return Type : struct spfc_rqe* - */ -static struct spfc_rqe *spfc_get_srq_entry(struct spfc_srq_info *srq_info, - struct spfc_rqe **linked_rqe, u16 position) -{ - u32 buf_id = 0; - u32 wqe_num_per_buf = 0; - u16 buf_offset = 0; - struct cqm_buf_list *buf = NULL; - - FC_CHECK_RETURN_VALUE(srq_info, NULL); - - wqe_num_per_buf = srq_info->wqe_num_per_buf; - - buf_id = position / wqe_num_per_buf; - buf = &srq_info->cqm_srq_info->q_room_buf_1.buf_list[buf_id]; - buf_offset = position % ((u16)wqe_num_per_buf); - - if (buf_offset + 1 == wqe_num_per_buf) - *linked_rqe = (struct spfc_rqe *)(buf->va) + wqe_num_per_buf; - else - *linked_rqe = NULL; - - return (struct spfc_rqe *)(buf->va) + buf_offset; -} - -void spfc_post_els_srq_wqe(struct spfc_srq_info *srq_info, u16 buf_id) -{ - struct spfc_rqe *rqe = NULL; - struct spfc_rqe tmp_rqe; - struct spfc_rqe *linked_rqe = NULL; - struct wq_header *wq_header = NULL; - struct spfc_drq_buff_entry *buff_entry = NULL; - - FC_CHECK_RETURN_VOID(srq_info); - FC_CHECK_RETURN_VOID(buf_id < srq_info->valid_wqe_num); - - buff_entry = srq_info->els_buff_entry_head + buf_id; - - spin_lock(&srq_info->srq_spin_lock); - - /* Obtain RQE, not include link wqe */ - rqe = spfc_get_srq_entry(srq_info, &linked_rqe, srq_info->pi); - if (!rqe) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]post els srq,get srqe failed, valid wqe num 0x%x, pi 0x%x, pmsn 0x%x", - srq_info->valid_wqe_num, srq_info->pi, - srq_info->pmsn); - - spin_unlock(&srq_info->srq_spin_lock); - return; - } - - /* Initialize RQE */ - /* cs section is not used */ - memset(&tmp_rqe, 0, sizeof(struct spfc_rqe)); - - /* default Obit is invalid, and set valid finally */ - spfc_build_srq_wqe_ctrls(&tmp_rqe, !srq_info->pi_owner, srq_info->pmsn + 1); - - tmp_rqe.bds_sl.buf_addr_hi = SPFC_HIGH_32_BITS(buff_entry->buff_dma); - tmp_rqe.bds_sl.buf_addr_lo = SPFC_LOW_32_BITS(buff_entry->buff_dma); - tmp_rqe.drv_sl.wd0.user_id = buf_id; - - /* convert to big endian */ - spfc_cpu_to_big32(&tmp_rqe, sizeof(struct spfc_rqe)); - - memcpy(rqe, &tmp_rqe, sizeof(struct spfc_rqe)); - - /* reset Obit */ - spfc_set_srq_wqe_owner_be((struct spfc_wqe_ctrl *)(void *)(&rqe->ctrl_sl), - srq_info->pi_owner); - - if (linked_rqe) { - /* Update Obit in linked WQE */ - spfc_set_srq_link_wqe_owner_be((struct spfc_linkwqe *)(void *)linked_rqe, - srq_info->pi_owner, srq_info->pmsn + 1); - } - - /* Update PI and PMSN */ - spfc_update_producer_info((u16)(srq_info->valid_wqe_num), - &srq_info->pi, &srq_info->pi_owner); - - /* pmsn is 16bit. The value is added to the maximum value and is - * automatically reversed - */ - srq_info->pmsn++; - - /* Update pmsn in queue header */ - wq_header = (struct wq_header *)(void *)srq_info->cqm_srq_info->q_header_vaddr; - spfc_update_srq_header(&wq_header->db_record, srq_info->pmsn); - - spin_unlock(&srq_info->srq_spin_lock); -} - -/* - *Function Name : spfc_cfg_srq_ctx - *Function Description: Initialize the CTX of the SRQ that receives the - * immediate data. The RQE of the SRQ - * needs to be - *initialized when the RQE is filled. Input Parameters : *srq_info, *srq_ctx, - * sge_size, - * rqe_gpa - *Output Parameters : N/A - *Return Type : void - */ -static void spfc_cfg_srq_ctx(struct spfc_srq_info *srq_info, - struct spfc_srq_ctx *ctx, u32 sge_size, - u64 rqe_gpa) -{ - struct spfc_srq_ctx *srq_ctx = NULL; - struct cqm_queue *cqm_srq_info = NULL; - struct spfc_queue_info_bus queue_bus; - - FC_CHECK_RETURN_VOID(srq_info); - FC_CHECK_RETURN_VOID(ctx); - - cqm_srq_info = srq_info->cqm_srq_info; - srq_ctx = ctx; - srq_ctx->last_rq_pmsn = 0; - srq_ctx->cur_rqe_msn = 0; - srq_ctx->pcie_template = 0; - /* The value of CTX needs to be updated - *when RQE is configured - */ - srq_ctx->cur_rqe_gpa = rqe_gpa; - srq_ctx->cur_sge_v = 0; - srq_ctx->cur_sge_l = 0; - /* The information received by the SRQ is reported through the - *SCQ. The interrupt and ArmCQ are disabled. - */ - srq_ctx->int_mode = 0; - srq_ctx->ceqn_msix = 0; - srq_ctx->cur_sge_remain_len = 0; - srq_ctx->cur_sge_id = 0; - srq_ctx->consant_sge_len = sge_size; - srq_ctx->cur_wqe_o = 0; - srq_ctx->pmsn_type = SPFC_PMSN_CI_TYPE_FROM_HOST; - srq_ctx->bdsl = 0; - srq_ctx->cr = 0; - srq_ctx->csl = 0; - srq_ctx->cf = 0; - srq_ctx->ctrl_sl = 0; - srq_ctx->cur_sge_gpa = 0; - srq_ctx->cur_pmsn_gpa = cqm_srq_info->q_header_paddr; - srq_ctx->prefetch_max_masn = 0; - srq_ctx->cqe_max_cnt = 0; - srq_ctx->cur_cqe_cnt = 0; - srq_ctx->arm_q = 0; - srq_ctx->cq_so_ro = 0; - srq_ctx->cqe_dma_attr_idx = 0; - srq_ctx->rq_so_ro = 0; - srq_ctx->rqe_dma_attr_idx = 0; - srq_ctx->loop_o = SPFC_SRQ_INIT_LOOP_O; - srq_ctx->ring = SPFC_QUEUE_RING; - - memset(&queue_bus, 0, sizeof(struct spfc_queue_info_bus)); - queue_bus.bus[ARRAY_INDEX_0] |= ((u64)(cqm_srq_info->q_ctx_paddr >> UNF_SHIFT_4)); - queue_bus.bus[ARRAY_INDEX_0] |= (((u64)(srq_ctx->rqe_dma_attr_idx & - SPFC_SRQ_CTX_rqe_dma_attr_idx_MASK)) - << UNF_SHIFT_60); /* bits 4 */ - - queue_bus.bus[ARRAY_INDEX_1] |= ((u64)(srq_ctx->rqe_dma_attr_idx >> UNF_SHIFT_4)); - queue_bus.bus[ARRAY_INDEX_1] |= (((u64)(srq_ctx->rq_so_ro)) << UNF_SHIFT_2); /* bits 2 */ - queue_bus.bus[ARRAY_INDEX_1] |= (((u64)(srq_ctx->cur_pmsn_gpa >> UNF_SHIFT_4)) - << UNF_SHIFT_4); /* bits 60 */ - - queue_bus.bus[ARRAY_INDEX_2] |= ((u64)(srq_ctx->consant_sge_len)); /* bits 17 */ - queue_bus.bus[ARRAY_INDEX_2] |= (((u64)(srq_ctx->pcie_template)) << UNF_SHIFT_17); - - srq_ctx->parity = spfc_get_parity_value((void *)queue_bus.bus, SPFC_SRQC_BUS_ROW, - SPFC_SRQC_BUS_COL); - - spfc_cpu_to_big64((void *)srq_ctx, sizeof(struct spfc_srq_ctx)); -} - -static u32 spfc_creat_srqc_via_cmdq_sync(struct spfc_hba_info *hba, - struct spfc_srq_ctx *srqc, - u64 ctx_gpa) -{ -#define SPFC_INIT_SRQC_TIMEOUT 3000 - - int ret; - u32 covrt_size; - struct spfc_cmdqe_creat_srqc init_srq_cmd; - struct sphw_cmd_buf *cmdq_in_buf; - - cmdq_in_buf = sphw_alloc_cmd_buf(hba->dev_handle); - if (!cmdq_in_buf) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "[err]cmdq in_cmd_buf alloc failed"); - - SPFC_ERR_IO_STAT(hba, SPFC_TASK_T_INIT_SRQC); - return UNF_RETURN_ERROR; - } - - memset(&init_srq_cmd, 0, sizeof(init_srq_cmd)); - init_srq_cmd.wd0.task_type = SPFC_TASK_T_INIT_SRQC; - init_srq_cmd.srqc_gpa_h = SPFC_HIGH_32_BITS(ctx_gpa); - init_srq_cmd.srqc_gpa_l = SPFC_LOW_32_BITS(ctx_gpa); - covrt_size = sizeof(init_srq_cmd) - sizeof(init_srq_cmd.srqc); - spfc_cpu_to_big32(&init_srq_cmd, covrt_size); - - /* srqc is already big-endian */ - memcpy(init_srq_cmd.srqc, srqc, sizeof(*srqc)); - memcpy(cmdq_in_buf->buf, &init_srq_cmd, sizeof(init_srq_cmd)); - cmdq_in_buf->size = sizeof(init_srq_cmd); - - ret = sphw_cmdq_detail_resp(hba->dev_handle, COMM_MOD_FC, 0, - cmdq_in_buf, NULL, NULL, - SPFC_INIT_SRQC_TIMEOUT, SPHW_CHANNEL_FC); - - sphw_free_cmd_buf(hba->dev_handle, cmdq_in_buf); - - if (ret) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "[err]Send creat srqc via cmdq failed, ret=%d", - ret); - - SPFC_ERR_IO_STAT(hba, SPFC_TASK_T_INIT_SRQC); - return UNF_RETURN_ERROR; - } - - SPFC_IO_STAT(hba, SPFC_TASK_T_INIT_SRQC); - - return RETURN_OK; -} - -static void spfc_init_els_srq_wqe(struct spfc_srq_info *srq_info) -{ - u32 rqe_index = 0; - struct spfc_drq_buff_entry *buf_entry = NULL; - - FC_CHECK_RETURN_VOID(srq_info); - - for (rqe_index = 0; rqe_index < srq_info->valid_wqe_num - 1; rqe_index++) { - buf_entry = srq_info->els_buff_entry_head + rqe_index; - spfc_post_els_srq_wqe(srq_info, buf_entry->buff_id); - } -} - -static void spfc_free_els_srq_buff(struct spfc_hba_info *hba, u32 srq_valid_wqe) -{ - u32 buff_index = 0; - struct spfc_srq_info *srq_info = NULL; - struct spfc_drq_buff_entry *buff_entry = NULL; - - FC_CHECK_RETURN_VOID(hba); - - srq_info = &hba->els_srq_info; - - if (!srq_info->els_buff_entry_head) - return; - - for (buff_index = 0; buff_index < srq_valid_wqe; buff_index++) { - buff_entry = &srq_info->els_buff_entry_head[buff_index]; - buff_entry->buff_addr = NULL; - } - - if (srq_info->buf_list.buflist) { - for (buff_index = 0; buff_index < srq_info->buf_list.buf_num; - buff_index++) { - if (srq_info->buf_list.buflist[buff_index].paddr != 0) { - pci_unmap_single(hba->pci_dev, - srq_info->buf_list.buflist[buff_index].paddr, - srq_info->buf_list.buf_size, - DMA_FROM_DEVICE); - srq_info->buf_list.buflist[buff_index].paddr = 0; - } - kfree(srq_info->buf_list.buflist[buff_index].vaddr); - srq_info->buf_list.buflist[buff_index].vaddr = NULL; - } - - kfree(srq_info->buf_list.buflist); - srq_info->buf_list.buflist = NULL; - } - - kfree(srq_info->els_buff_entry_head); - srq_info->els_buff_entry_head = NULL; -} - -static u32 spfc_alloc_els_srq_buff(struct spfc_hba_info *hba, u32 srq_valid_wqe) -{ - u32 req_buff_size = 0; - u32 buff_index = 0; - struct spfc_srq_info *srq_info = NULL; - struct spfc_drq_buff_entry *buff_entry = NULL; - u32 buf_total_size; - u32 buf_num; - u32 alloc_idx; - u32 cur_buf_idx = 0; - u32 cur_buf_offset = 0; - u32 buf_cnt_perhugebuf; - - srq_info = &hba->els_srq_info; - - /* Apply for entry buffer */ - req_buff_size = (u32)(srq_valid_wqe * sizeof(struct spfc_drq_buff_entry)); - srq_info->els_buff_entry_head = (struct spfc_drq_buff_entry *)kmalloc(req_buff_size, - GFP_KERNEL); - if (!srq_info->els_buff_entry_head) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[err]Allocate ELS Srq receive buffer entries failed"); - - return UNF_RETURN_ERROR; - } - memset(srq_info->els_buff_entry_head, 0, req_buff_size); - - buf_total_size = SPFC_SRQ_ELS_SGE_LEN * srq_valid_wqe; - - srq_info->buf_list.buf_size = buf_total_size > BUF_LIST_PAGE_SIZE - ? BUF_LIST_PAGE_SIZE - : buf_total_size; - buf_cnt_perhugebuf = srq_info->buf_list.buf_size / SPFC_SRQ_ELS_SGE_LEN; - buf_num = srq_valid_wqe % buf_cnt_perhugebuf ? - srq_valid_wqe / buf_cnt_perhugebuf + 1 : - srq_valid_wqe / buf_cnt_perhugebuf; - srq_info->buf_list.buflist = (struct buff_list *)kmalloc(buf_num * sizeof(struct buff_list), - GFP_KERNEL); - srq_info->buf_list.buf_num = buf_num; - - if (!srq_info->buf_list.buflist) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[err]Allocate ELS buf list failed out of memory"); - goto free_buff; - } - memset(srq_info->buf_list.buflist, 0, buf_num * sizeof(struct buff_list)); - - for (alloc_idx = 0; alloc_idx < buf_num; alloc_idx++) { - srq_info->buf_list.buflist[alloc_idx].vaddr = kmalloc(srq_info->buf_list.buf_size, - GFP_KERNEL); - if (!srq_info->buf_list.buflist[alloc_idx].vaddr) - goto free_buff; - - memset(srq_info->buf_list.buflist[alloc_idx].vaddr, 0, srq_info->buf_list.buf_size); - - srq_info->buf_list.buflist[alloc_idx].paddr = - pci_map_single(hba->pci_dev, srq_info->buf_list.buflist[alloc_idx].vaddr, - srq_info->buf_list.buf_size, DMA_FROM_DEVICE); - if (pci_dma_mapping_error(hba->pci_dev, - srq_info->buf_list.buflist[alloc_idx].paddr)) { - srq_info->buf_list.buflist[alloc_idx].paddr = 0; - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[err]Map els srq buffer failed"); - - goto free_buff; - } - } - - /* Apply for receiving buffer and attach it to the free linked list */ - for (buff_index = 0; buff_index < srq_valid_wqe; buff_index++) { - buff_entry = &srq_info->els_buff_entry_head[buff_index]; - cur_buf_idx = buff_index / buf_cnt_perhugebuf; - cur_buf_offset = SPFC_SRQ_ELS_SGE_LEN * (buff_index % buf_cnt_perhugebuf); - buff_entry->buff_addr = srq_info->buf_list.buflist[cur_buf_idx].vaddr + - cur_buf_offset; - buff_entry->buff_dma = srq_info->buf_list.buflist[cur_buf_idx].paddr + - cur_buf_offset; - buff_entry->buff_id = (u16)buff_index; - } - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_INFO, - "[EVENT]Allocate bufnum:%u,buf_total_size:%u", buf_num, - buf_total_size); - - return RETURN_OK; - -free_buff: - spfc_free_els_srq_buff(hba, srq_valid_wqe); - return UNF_RETURN_ERROR; -} - -void spfc_send_clear_srq_cmd(struct spfc_hba_info *hba, - struct spfc_srq_info *srq_info) -{ - union spfc_cmdqe cmdqe; - struct cqm_queue *cqm_fcp_srq = NULL; - ulong flag = 0; - - memset(&cmdqe, 0, sizeof(union spfc_cmdqe)); - - spin_lock_irqsave(&srq_info->srq_spin_lock, flag); - cqm_fcp_srq = srq_info->cqm_srq_info; - if (!cqm_fcp_srq) { - srq_info->state = SPFC_CLEAN_DONE; - spin_unlock_irqrestore(&srq_info->srq_spin_lock, flag); - return; - } - - cmdqe.clear_srq.wd0.task_type = SPFC_TASK_T_CLEAR_SRQ; - cmdqe.clear_srq.wd1.scqn = SPFC_LSW(hba->default_scqn); - cmdqe.clear_srq.wd1.srq_type = srq_info->srq_type; - cmdqe.clear_srq.srqc_gpa_h = SPFC_HIGH_32_BITS(cqm_fcp_srq->q_ctx_paddr); - cmdqe.clear_srq.srqc_gpa_l = SPFC_LOW_32_BITS(cqm_fcp_srq->q_ctx_paddr); - - (void)queue_delayed_work(hba->work_queue, &srq_info->del_work, - (ulong)msecs_to_jiffies(SPFC_SRQ_DEL_STAGE_TIMEOUT_MS)); - spin_unlock_irqrestore(&srq_info->srq_spin_lock, flag); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[info]Port 0x%x begin to clear srq 0x%x(0x%x,0x%llx)", - hba->port_cfg.port_id, srq_info->srq_type, - SPFC_LSW(hba->default_scqn), - (u64)cqm_fcp_srq->q_ctx_paddr); - - /* Run the ROOT CMDQ command to issue the clear srq command. If the - * command fails to be delivered, retry upon timeout. - */ - (void)spfc_root_cmdq_enqueue(hba, &cmdqe, sizeof(cmdqe.clear_srq)); -} - -/* - *Function Name : spfc_srq_clr_timeout - *Function Description: Delete srq when timeout. - *Input Parameters : *work - *Output Parameters : N/A - *Return Type : void - */ -static void spfc_srq_clr_timeout(struct work_struct *work) -{ -#define SPFC_MAX_DEL_SRQ_RETRY_TIMES 2 - struct spfc_srq_info *srq = NULL; - struct spfc_hba_info *hba = NULL; - struct cqm_queue *cqm_fcp_imm_srq = NULL; - ulong flag = 0; - - srq = container_of(work, struct spfc_srq_info, del_work.work); - - spin_lock_irqsave(&srq->srq_spin_lock, flag); - hba = srq->hba; - cqm_fcp_imm_srq = srq->cqm_srq_info; - spin_unlock_irqrestore(&srq->srq_spin_lock, flag); - - if (hba && cqm_fcp_imm_srq) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[warn]Port 0x%x clear srq 0x%x stat 0x%x timeout", - hba->port_cfg.port_id, srq->srq_type, srq->state); - - /* If the delivery fails or the execution times out after the - * delivery, try again once - */ - srq->del_retry_time++; - if (srq->del_retry_time < SPFC_MAX_DEL_SRQ_RETRY_TIMES) - spfc_send_clear_srq_cmd(hba, srq); - else - srq->del_retry_time = 0; - } -} - -static u32 spfc_create_els_srq(struct spfc_hba_info *hba) -{ - u32 ret = UNF_RETURN_ERROR; - struct cqm_queue *cqm_srq = NULL; - struct wq_header *wq_header = NULL; - struct spfc_srq_info *srq_info = NULL; - struct spfc_srq_ctx srq_ctx = {0}; - - FC_CHECK_RETURN_VALUE(hba, UNF_RETURN_ERROR); - - cqm_srq = cqm3_object_fc_srq_create(hba->dev_handle, SERVICE_T_FC, - CQM_OBJECT_NONRDMA_SRQ, SPFC_SRQ_ELS_DATA_DEPTH, - SPFC_SRQE_SIZE, hba); - if (!cqm_srq) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[err]Create Els Srq failed"); - - return UNF_RETURN_ERROR; - } - - /* Initialize SRQ */ - srq_info = &hba->els_srq_info; - spfc_init_srq_info(hba, cqm_srq, srq_info); - srq_info->srq_type = SPFC_SRQ_ELS; - srq_info->enable = true; - srq_info->state = SPFC_CLEAN_DONE; - srq_info->del_retry_time = 0; - - /* The srq lock is initialized and can be created repeatedly */ - spin_lock_init(&srq_info->srq_spin_lock); - srq_info->spin_lock_init = true; - - /* Initialize queue header */ - wq_header = (struct wq_header *)(void *)cqm_srq->q_header_vaddr; - spfc_init_srq_header(wq_header); - INIT_DELAYED_WORK(&srq_info->del_work, spfc_srq_clr_timeout); - - /* Apply for RQ buffer */ - ret = spfc_alloc_els_srq_buff(hba, srq_info->valid_wqe_num); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[err]Allocate Els Srq buffer failed"); - - cqm3_object_delete(&cqm_srq->object); - memset(srq_info, 0, sizeof(struct spfc_srq_info)); - return UNF_RETURN_ERROR; - } - - /* Fill RQE, update queue header */ - spfc_init_els_srq_wqe(srq_info); - - /* Fill SRQ CTX */ - memset(&srq_ctx, 0, sizeof(srq_ctx)); - spfc_cfg_srq_ctx(srq_info, &srq_ctx, SPFC_SRQ_ELS_SGE_LEN, - srq_info->cqm_srq_info->q_room_buf_1.buf_list->pa); - - ret = spfc_creat_srqc_via_cmdq_sync(hba, &srq_ctx, srq_info->cqm_srq_info->q_ctx_paddr); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Creat Els Srqc failed"); - - spfc_free_els_srq_buff(hba, srq_info->valid_wqe_num); - cqm3_object_delete(&cqm_srq->object); - memset(srq_info, 0, sizeof(struct spfc_srq_info)); - - return UNF_RETURN_ERROR; - } - - return RETURN_OK; -} - -void spfc_wq_destroy_els_srq(struct work_struct *work) -{ - struct spfc_hba_info *hba = NULL; - - FC_CHECK_RETURN_VOID(work); - hba = - container_of(work, struct spfc_hba_info, els_srq_clear_work); - spfc_destroy_els_srq(hba); -} - -void spfc_destroy_els_srq(void *handle) -{ - /* - * Receive clear els srq sts - * ---then--->>> destroy els srq - */ - struct spfc_srq_info *srq_info = NULL; - struct spfc_hba_info *hba = NULL; - - FC_CHECK_RETURN_VOID(handle); - - hba = (struct spfc_hba_info *)handle; - srq_info = &hba->els_srq_info; - - /* release receive buffer */ - spfc_free_els_srq_buff(hba, srq_info->valid_wqe_num); - - /* release srq info */ - if (srq_info->cqm_srq_info) { - cqm3_object_delete(&srq_info->cqm_srq_info->object); - srq_info->cqm_srq_info = NULL; - } - if (srq_info->spin_lock_init) - srq_info->spin_lock_init = false; - srq_info->hba = NULL; - srq_info->enable = false; - srq_info->state = SPFC_CLEAN_DONE; -} - -/* - *Function Name : spfc_create_srq - *Function Description: Create SRQ, which contains four SRQ for receiving - * instant data and a SRQ for receiving - * ELS data. - *Input Parameters : *hba Output Parameters : N/A Return Type :u32 - */ -static u32 spfc_create_srq(struct spfc_hba_info *hba) -{ - u32 ret = UNF_RETURN_ERROR; - - FC_CHECK_RETURN_VALUE(hba, UNF_RETURN_ERROR); - - /* Create ELS SRQ */ - ret = spfc_create_els_srq(hba); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[err]Create Els Srq failed"); - return UNF_RETURN_ERROR; - } - - return RETURN_OK; -} - -/* - *Function Name : spfc_destroy_srq - *Function Description: Release the SRQ resource, including the SRQ for - * receiving the immediate data and the - * SRQ forreceiving the ELS data. - *Input Parameters : *hba Output Parameters : N/A - *Return Type : void - */ -static void spfc_destroy_srq(struct spfc_hba_info *hba) -{ - FC_CHECK_RETURN_VOID(hba); - - spfc_destroy_els_srq(hba); -} - -u32 spfc_create_common_share_queues(void *handle) -{ - u32 ret = UNF_RETURN_ERROR; - struct spfc_hba_info *hba = NULL; - - FC_CHECK_RETURN_VALUE(handle, UNF_RETURN_ERROR); - hba = (struct spfc_hba_info *)handle; - /* Create & Init 8 pairs SCQ */ - ret = spfc_create_scq(hba); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[err]Create scq failed"); - - return UNF_RETURN_ERROR; - } - - /* Alloc SRQ resource for SIRT & ELS */ - ret = spfc_create_srq(hba); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[err]Create srq failed"); - - spfc_flush_scq_ctx(hba); - spfc_destroy_scq(hba); - - return UNF_RETURN_ERROR; - } - - return RETURN_OK; -} - -void spfc_destroy_common_share_queues(void *hba) -{ - FC_CHECK_RETURN_VOID(hba); - - spfc_destroy_scq((struct spfc_hba_info *)hba); - spfc_destroy_srq((struct spfc_hba_info *)hba); -} - -static u8 spfc_map_fcp_data_cos(struct spfc_hba_info *hba) -{ - u8 i = 0; - u8 min_cnt_index = SPFC_PACKET_COS_FC_DATA; - bool get_init_index = false; - - for (i = 0; i < SPFC_MAX_COS_NUM; i++) { - /* Check whether the CoS is valid for the FC and cannot be - * occupied by the CMD - */ - if ((!(hba->cos_bitmap & ((u32)1 << i))) || i == SPFC_PACKET_COS_FC_CMD) - continue; - - if (!get_init_index) { - min_cnt_index = i; - get_init_index = true; - continue; - } - - if (atomic_read(&hba->cos_rport_cnt[i]) < - atomic_read(&hba->cos_rport_cnt[min_cnt_index])) - min_cnt_index = i; - } - - atomic_inc(&hba->cos_rport_cnt[min_cnt_index]); - - return min_cnt_index; -} - -static void spfc_update_cos_rport_cnt(struct spfc_hba_info *hba, u8 cos_index) -{ - if (cos_index >= SPFC_MAX_COS_NUM || - cos_index == SPFC_PACKET_COS_FC_CMD || - (!(hba->cos_bitmap & ((u32)1 << cos_index))) || - (atomic_read(&hba->cos_rport_cnt[cos_index]) == 0)) - return; - - atomic_dec(&hba->cos_rport_cnt[cos_index]); -} - -void spfc_invalid_parent_sq(struct spfc_parent_sq_info *sq_info) -{ - sq_info->rport_index = INVALID_VALUE32; - sq_info->context_id = INVALID_VALUE32; - sq_info->sq_queue_id = INVALID_VALUE32; - sq_info->cache_id = INVALID_VALUE32; - sq_info->local_port_id = INVALID_VALUE32; - sq_info->remote_port_id = INVALID_VALUE32; - sq_info->hba = NULL; - sq_info->del_start_jiff = INVALID_VALUE64; - sq_info->port_in_flush = false; - sq_info->sq_in_sess_rst = false; - sq_info->oqid_rd = INVALID_VALUE16; - sq_info->oqid_wr = INVALID_VALUE16; - sq_info->srq_ctx_addr = 0; - sq_info->sqn_base = 0; - atomic_set(&sq_info->sq_cached, false); - sq_info->vport_id = 0; - sq_info->sirt_dif_control.protect_opcode = UNF_DIF_ACTION_NONE; - sq_info->need_offloaded = INVALID_VALUE8; - atomic_set(&sq_info->sq_valid, false); - atomic_set(&sq_info->flush_done_wait_cnt, 0); - memset(&sq_info->delay_sqe, 0, sizeof(struct spfc_delay_sqe_ctrl_info)); - memset(sq_info->io_stat, 0, sizeof(sq_info->io_stat)); -} - -static void spfc_parent_sq_opreate_timeout(struct work_struct *work) -{ - ulong flag = 0; - struct spfc_parent_sq_info *parent_sq = NULL; - struct spfc_parent_queue_info *parent_queue = NULL; - struct spfc_hba_info *hba = NULL; - - FC_CHECK_RETURN_VOID(work); - - parent_sq = container_of(work, struct spfc_parent_sq_info, del_work.work); - parent_queue = container_of(parent_sq, struct spfc_parent_queue_info, parent_sq_info); - hba = (struct spfc_hba_info *)parent_sq->hba; - FC_CHECK_RETURN_VOID(hba); - - spin_lock_irqsave(&parent_queue->parent_queue_state_lock, flag); - if (parent_queue->offload_state == SPFC_QUEUE_STATE_DESTROYING) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "Port(0x%x) sq rport index(0x%x) local nportid(0x%x),remote nportid(0x%x) reset timeout.", - hba->port_cfg.port_id, parent_sq->rport_index, - parent_sq->local_port_id, - parent_sq->remote_port_id); - } - spin_unlock_irqrestore(&parent_queue->parent_queue_state_lock, flag); -} - -static void spfc_parent_sq_wait_flush_done_timeout(struct work_struct *work) -{ - ulong flag = 0; - struct spfc_parent_sq_info *parent_sq = NULL; - struct spfc_parent_queue_info *parent_queue = NULL; - struct spfc_hba_info *hba = NULL; - u32 ctx_flush_done; - u32 *ctx_dw = NULL; - int ret; - int sq_state = SPFC_STAT_PARENT_SQ_QUEUE_DELAYED_WORK; - spinlock_t *prtq_state_lock = NULL; - - FC_CHECK_RETURN_VOID(work); - - parent_sq = container_of(work, struct spfc_parent_sq_info, flush_done_timeout_work.work); - - FC_CHECK_RETURN_VOID(parent_sq); - - parent_queue = container_of(parent_sq, struct spfc_parent_queue_info, parent_sq_info); - prtq_state_lock = &parent_queue->parent_queue_state_lock; - hba = (struct spfc_hba_info *)parent_sq->hba; - FC_CHECK_RETURN_VOID(hba); - FC_CHECK_RETURN_VOID(parent_queue); - - spin_lock_irqsave(prtq_state_lock, flag); - if (parent_queue->offload_state != SPFC_QUEUE_STATE_DESTROYING) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) sq rport index(0x%x) is not destroying status,offloadsts is %d", - hba->port_cfg.port_id, parent_sq->rport_index, - parent_queue->offload_state); - spin_unlock_irqrestore(prtq_state_lock, flag); - return; - } - - if (parent_queue->parent_ctx.cqm_parent_ctx_obj) { - ctx_dw = (u32 *)((void *)(parent_queue->parent_ctx.cqm_parent_ctx_obj->vaddr)); - ctx_flush_done = ctx_dw[SPFC_CTXT_FLUSH_DONE_DW_POS] & SPFC_CTXT_FLUSH_DONE_MASK_BE; - if (ctx_flush_done == 0) { - spin_unlock_irqrestore(prtq_state_lock, flag); - - if (atomic_read(&parent_queue->parent_sq_info.flush_done_wait_cnt) < - SPFC_SQ_WAIT_FLUSH_DONE_TIMEOUT_CNT) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[info]Port(0x%x) sq rport index(0x%x) wait flush done timeout %d times", - hba->port_cfg.port_id, parent_sq->rport_index, - atomic_read(&(parent_queue->parent_sq_info - .flush_done_wait_cnt))); - - atomic_inc(&parent_queue->parent_sq_info.flush_done_wait_cnt); - - /* Delay Free Sq info */ - ret = queue_delayed_work(hba->work_queue, - &(parent_queue->parent_sq_info - .flush_done_timeout_work), - (ulong)msecs_to_jiffies((u32) - SPFC_SQ_WAIT_FLUSH_DONE_TIMEOUT_MS)); - if (!ret) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) rport(0x%x) queue delayed work failed ret:%d", - hba->port_cfg.port_id, - parent_sq->rport_index, ret); - SPFC_HBA_STAT(hba, sq_state); - } - - return; - } - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) sq rport index(0x%x) has wait flush done %d times,do not free sq", - hba->port_cfg.port_id, - parent_sq->rport_index, - atomic_read(&(parent_queue->parent_sq_info - .flush_done_wait_cnt))); - - SPFC_HBA_STAT(hba, SPFC_STAT_CTXT_FLUSH_DONE); - return; - } - } - - spin_unlock_irqrestore(prtq_state_lock, flag); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x) sq rport index(0x%x) flush done bit is ok,free sq now", - hba->port_cfg.port_id, parent_sq->rport_index); - - spfc_free_parent_queue_info(hba, parent_queue); -} - -static void spfc_free_parent_sq(struct spfc_hba_info *hba, - struct spfc_parent_queue_info *parq_info) -{ -#define SPFC_WAIT_PRT_CTX_FUSH_DONE_LOOP_TIMES 100 - u32 ctx_flush_done = 0; - u32 *ctx_dw = NULL; - struct spfc_parent_sq_info *sq_info = NULL; - u32 uidelaycnt = 0; - struct list_head *list = NULL; - struct spfc_suspend_sqe_info *suspend_sqe = NULL; - ulong flag = 0; - - sq_info = &parq_info->parent_sq_info; - - spin_lock_irqsave(&parq_info->parent_queue_state_lock, flag); - while (!list_empty(&sq_info->suspend_sqe_list)) { - list = UNF_OS_LIST_NEXT(&sq_info->suspend_sqe_list); - list_del(list); - suspend_sqe = list_entry(list, struct spfc_suspend_sqe_info, list_sqe_entry); - if (suspend_sqe) { - if (!cancel_delayed_work(&suspend_sqe->timeout_work)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[warn]reset worker timer maybe timeout"); - } - - kfree(suspend_sqe); - } - } - spin_unlock_irqrestore(&parq_info->parent_queue_state_lock, flag); - - /* Free data cos */ - spfc_update_cos_rport_cnt(hba, parq_info->queue_data_cos); - - if (parq_info->parent_ctx.cqm_parent_ctx_obj) { - ctx_dw = (u32 *)((void *)(parq_info->parent_ctx.cqm_parent_ctx_obj->vaddr)); - ctx_flush_done = ctx_dw[SPFC_CTXT_FLUSH_DONE_DW_POS] & SPFC_CTXT_FLUSH_DONE_MASK_BE; - mb(); - if (parq_info->offload_state == SPFC_QUEUE_STATE_DESTROYING && - ctx_flush_done == 0) { - do { - ctx_flush_done = ctx_dw[SPFC_CTXT_FLUSH_DONE_DW_POS] & - SPFC_CTXT_FLUSH_DONE_MASK_BE; - mb(); - if (ctx_flush_done != 0) - break; - uidelaycnt++; - } while (uidelaycnt < SPFC_WAIT_PRT_CTX_FUSH_DONE_LOOP_TIMES); - - if (ctx_flush_done == 0) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[warn]Port(0x%x) Rport(0x%x) flush done is not set", - hba->port_cfg.port_id, - sq_info->rport_index); - } - } - - cqm3_object_delete(&parq_info->parent_ctx.cqm_parent_ctx_obj->object); - parq_info->parent_ctx.cqm_parent_ctx_obj = NULL; - } - - spfc_invalid_parent_sq(sq_info); -} - -u32 spfc_alloc_parent_sq(struct spfc_hba_info *hba, - struct spfc_parent_queue_info *parq_info, - struct unf_port_info *rport_info) -{ - struct spfc_parent_sq_info *sq_ctrl = NULL; - struct cqm_qpc_mpt *prnt_ctx = NULL; - ulong flag = 0; - - /* Craete parent context via CQM */ - prnt_ctx = cqm3_object_qpc_mpt_create(hba->dev_handle, SERVICE_T_FC, - CQM_OBJECT_SERVICE_CTX, SPFC_CNTX_SIZE_256B, - parq_info, CQM_INDEX_INVALID); - if (!prnt_ctx) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Create parent context failed, CQM_INDEX is 0x%x", - CQM_INDEX_INVALID); - goto parent_create_fail; - } - - parq_info->parent_ctx.cqm_parent_ctx_obj = prnt_ctx; - /* Initialize struct spfc_parent_sq_info */ - sq_ctrl = &parq_info->parent_sq_info; - sq_ctrl->hba = (void *)hba; - sq_ctrl->rport_index = rport_info->rport_index; - sq_ctrl->sqn_base = rport_info->sqn_base; - sq_ctrl->context_id = prnt_ctx->xid; - sq_ctrl->sq_queue_id = SPFC_QID_SQ; - sq_ctrl->cache_id = INVALID_VALUE32; - sq_ctrl->local_port_id = INVALID_VALUE32; - sq_ctrl->remote_port_id = INVALID_VALUE32; - sq_ctrl->sq_in_sess_rst = false; - atomic_set(&sq_ctrl->sq_valid, true); - sq_ctrl->del_start_jiff = INVALID_VALUE64; - sq_ctrl->service_type = SPFC_SERVICE_TYPE_FC; - sq_ctrl->vport_id = (u8)rport_info->qos_level; - sq_ctrl->cs_ctrl = (u8)rport_info->cs_ctrl; - sq_ctrl->sirt_dif_control.protect_opcode = UNF_DIF_ACTION_NONE; - sq_ctrl->need_offloaded = INVALID_VALUE8; - atomic_set(&sq_ctrl->flush_done_wait_cnt, 0); - - /* Check whether the HBA is in the Linkdown state. Note that - * offload_state must be in the non-FREE state. - */ - spin_lock_irqsave(&hba->flush_state_lock, flag); - sq_ctrl->port_in_flush = hba->in_flushing; - spin_unlock_irqrestore(&hba->flush_state_lock, flag); - memset(sq_ctrl->io_stat, 0, sizeof(sq_ctrl->io_stat)); - - INIT_DELAYED_WORK(&sq_ctrl->del_work, spfc_parent_sq_opreate_timeout); - INIT_DELAYED_WORK(&sq_ctrl->flush_done_timeout_work, - spfc_parent_sq_wait_flush_done_timeout); - INIT_LIST_HEAD(&sq_ctrl->suspend_sqe_list); - - memset(&sq_ctrl->delay_sqe, 0, sizeof(struct spfc_delay_sqe_ctrl_info)); - - return RETURN_OK; - -parent_create_fail: - parq_info->parent_ctx.cqm_parent_ctx_obj = NULL; - - return UNF_RETURN_ERROR; -} - -static void -spfc_init_prnt_ctxt_scq_qinfo(void *hba, - struct spfc_parent_queue_info *prnt_qinfo) -{ - u32 resp_scqn = 0; - struct spfc_parent_context *ctx = NULL; - struct spfc_scq_qinfo *resp_prnt_scq_ctxt = NULL; - struct spfc_queue_info_bus queue_bus; - - /* Obtains the queue id of the scq returned by the CQM when the SCQ is - * created - */ - resp_scqn = prnt_qinfo->parent_sts_scq_info.cqm_queue_id; - - /* Obtains the Parent Context address */ - ctx = (struct spfc_parent_context *)(prnt_qinfo->parent_ctx.parent_ctx); - - resp_prnt_scq_ctxt = &ctx->resp_scq_qinfo; - resp_prnt_scq_ctxt->hw_scqc_config.info.rq_th2_preld_cache_num = wqe_pre_load; - resp_prnt_scq_ctxt->hw_scqc_config.info.rq_th1_preld_cache_num = wqe_pre_load; - resp_prnt_scq_ctxt->hw_scqc_config.info.rq_th0_preld_cache_num = wqe_pre_load; - resp_prnt_scq_ctxt->hw_scqc_config.info.rq_min_preld_cache_num = wqe_pre_load; - resp_prnt_scq_ctxt->hw_scqc_config.info.sq_th2_preld_cache_num = wqe_pre_load; - resp_prnt_scq_ctxt->hw_scqc_config.info.sq_th1_preld_cache_num = wqe_pre_load; - resp_prnt_scq_ctxt->hw_scqc_config.info.sq_th0_preld_cache_num = wqe_pre_load; - resp_prnt_scq_ctxt->hw_scqc_config.info.sq_min_preld_cache_num = wqe_pre_load; - resp_prnt_scq_ctxt->hw_scqc_config.info.scq_n = (u64)resp_scqn; - resp_prnt_scq_ctxt->hw_scqc_config.info.parity = 0; - - memset(&queue_bus, 0, sizeof(struct spfc_queue_info_bus)); - queue_bus.bus[ARRAY_INDEX_0] = resp_prnt_scq_ctxt->hw_scqc_config.pctxt_val1; - resp_prnt_scq_ctxt->hw_scqc_config.info.parity = spfc_get_parity_value(queue_bus.bus, - SPFC_HW_SCQC_BUS_ROW, - SPFC_HW_SCQC_BUS_COL - ); - spfc_cpu_to_big64(resp_prnt_scq_ctxt, sizeof(struct spfc_scq_qinfo)); -} - -static void -spfc_init_prnt_ctxt_srq_qinfo(void *handle, struct spfc_parent_queue_info *prnt_qinfo) -{ - struct spfc_parent_context *ctx = NULL; - struct cqm_queue *cqm_els_srq = NULL; - struct spfc_parent_sq_info *sq = NULL; - struct spfc_queue_info_bus queue_bus; - struct spfc_hba_info *hba = NULL; - - hba = (struct spfc_hba_info *)handle; - /* Obtains the SQ address */ - sq = &prnt_qinfo->parent_sq_info; - - /* Obtains the Parent Context address */ - ctx = (struct spfc_parent_context *)(prnt_qinfo->parent_ctx.parent_ctx); - - cqm_els_srq = hba->els_srq_info.cqm_srq_info; - - /* Initialize the Parent SRQ INFO used when the ELS is received */ - ctx->els_srq_info.srqc_gpa = cqm_els_srq->q_ctx_paddr >> UNF_SHIFT_4; - - memset(&queue_bus, 0, sizeof(struct spfc_queue_info_bus)); - queue_bus.bus[ARRAY_INDEX_0] = ctx->els_srq_info.srqc_gpa; - ctx->els_srq_info.parity = spfc_get_parity_value(queue_bus.bus, SPFC_HW_SRQC_BUS_ROW, - SPFC_HW_SRQC_BUS_COL); - spfc_cpu_to_big64(&ctx->els_srq_info, sizeof(struct spfc_srq_qinfo)); - - ctx->imm_srq_info.srqc_gpa = 0; - sq->srq_ctx_addr = 0; -} - -static u16 spfc_get_max_sequence_id(void) -{ - return SPFC_HRQI_SEQ_ID_MAX; -} - -static void spfc_init_prnt_rsvd_qinfo(struct spfc_parent_queue_info *prnt_qinfo) -{ - struct spfc_parent_context *ctx = NULL; - struct spfc_hw_rsvd_queue *hw_rsvd_qinfo = NULL; - u16 max_seq = 0; - u32 each = 0, seq_index = 0; - - /* Obtains the Parent Context address */ - ctx = (struct spfc_parent_context *)(prnt_qinfo->parent_ctx.parent_ctx); - hw_rsvd_qinfo = (struct spfc_hw_rsvd_queue *)&ctx->hw_rsvdq; - memset(hw_rsvd_qinfo->seq_id_bitmap, 0, sizeof(hw_rsvd_qinfo->seq_id_bitmap)); - - max_seq = spfc_get_max_sequence_id(); - - /* special set for sequence id 0, which is always kept by ucode for - * sending fcp-cmd - */ - hw_rsvd_qinfo->seq_id_bitmap[SPFC_HRQI_SEQ_SEPCIAL_ID] = 1; - seq_index = SPFC_HRQI_SEQ_SEPCIAL_ID - (max_seq >> SPFC_HRQI_SEQ_INDEX_SHIFT); - - /* Set the unavailable mask to start from max + 1 */ - for (each = (max_seq % SPFC_HRQI_SEQ_INDEX_MAX) + 1; - each < SPFC_HRQI_SEQ_INDEX_MAX; each++) { - hw_rsvd_qinfo->seq_id_bitmap[seq_index] |= ((u64)0x1) << each; - } - - hw_rsvd_qinfo->seq_id_bitmap[seq_index] = - cpu_to_be64(hw_rsvd_qinfo->seq_id_bitmap[seq_index]); - - /* sepcial set for sequence id 0 */ - if (seq_index != SPFC_HRQI_SEQ_SEPCIAL_ID) - hw_rsvd_qinfo->seq_id_bitmap[SPFC_HRQI_SEQ_SEPCIAL_ID] = - cpu_to_be64(hw_rsvd_qinfo->seq_id_bitmap[SPFC_HRQI_SEQ_SEPCIAL_ID]); - - for (each = 0; each < seq_index; each++) - hw_rsvd_qinfo->seq_id_bitmap[each] = SPFC_HRQI_SEQ_INVALID_ID; - - /* no matter what the range of seq id, last_req_seq_id is fixed value - * 0xff - */ - hw_rsvd_qinfo->wd0.last_req_seq_id = SPFC_HRQI_SEQ_ID_MAX; - hw_rsvd_qinfo->wd0.xid = prnt_qinfo->parent_sq_info.context_id; - - *(u64 *)&hw_rsvd_qinfo->wd0 = - cpu_to_be64(*(u64 *)&hw_rsvd_qinfo->wd0); -} - -/* - *Function Name : spfc_init_prnt_sw_section_info - *Function Description: Initialize the SW Section area that can be accessed by - * the Parent Context uCode. - *Input Parameters : *hba, - * *prnt_qinfo - *Output Parameters : N/A - *Return Type : void - */ -static void spfc_init_prnt_sw_section_info(struct spfc_hba_info *hba, - struct spfc_parent_queue_info *prnt_qinfo) -{ -#define SPFC_VLAN_ENABLE (1) -#define SPFC_MB_PER_KB 1024 - u16 rport_index; - struct spfc_parent_context *ctx = NULL; - struct spfc_sw_section *sw_setion = NULL; - u16 total_scq_num = SPFC_TOTAL_SCQ_NUM; - u32 queue_id; - dma_addr_t queue_hdr_paddr; - - /* Obtains the Parent Context address */ - ctx = (struct spfc_parent_context *)(prnt_qinfo->parent_ctx.parent_ctx); - sw_setion = &ctx->sw_section; - - /* xid+vPortId */ - sw_setion->sw_ctxt_vport_xid.xid = prnt_qinfo->parent_sq_info.context_id; - spfc_cpu_to_big32(&sw_setion->sw_ctxt_vport_xid, sizeof(sw_setion->sw_ctxt_vport_xid)); - - /* conn_id */ - rport_index = SPFC_LSW(prnt_qinfo->parent_sq_info.rport_index); - sw_setion->conn_id = cpu_to_be16(rport_index); - - /* Immediate parameters */ - sw_setion->immi_rq_page_size = 0; - - /* Parent SCQ INFO used for sending packets to the Cmnd */ - sw_setion->scq_num_rcv_cmd = cpu_to_be16((u16)prnt_qinfo->parent_cmd_scq_info.cqm_queue_id); - sw_setion->scq_num_max_scqn = cpu_to_be16(total_scq_num); - - /* sw_ctxt_misc */ - sw_setion->sw_ctxt_misc.dw.srv_type = prnt_qinfo->parent_sq_info.service_type; - sw_setion->sw_ctxt_misc.dw.port_id = hba->port_index; - - /* only the VN2VF mode is supported */ - sw_setion->sw_ctxt_misc.dw.vlan_id = 0; - spfc_cpu_to_big32(&sw_setion->sw_ctxt_misc.pctxt_val0, - sizeof(sw_setion->sw_ctxt_misc.pctxt_val0)); - - /* Configuring the combo length */ - sw_setion->per_xmit_data_size = cpu_to_be32(combo_length * SPFC_MB_PER_KB); - sw_setion->sw_ctxt_config.dw.work_mode = SPFC_PORT_MODE_INI; - sw_setion->sw_ctxt_config.dw.status = FC_PARENT_STATUS_INVALID; - sw_setion->sw_ctxt_config.dw.cos = 0; - sw_setion->sw_ctxt_config.dw.oq_cos_cmd = SPFC_PACKET_COS_FC_CMD; - sw_setion->sw_ctxt_config.dw.oq_cos_data = prnt_qinfo->queue_data_cos; - sw_setion->sw_ctxt_config.dw.priority = 0; - sw_setion->sw_ctxt_config.dw.vlan_enable = SPFC_VLAN_ENABLE; - sw_setion->sw_ctxt_config.dw.sgl_num = dif_sgl_mode; - spfc_cpu_to_big32(&sw_setion->sw_ctxt_config.pctxt_val1, - sizeof(sw_setion->sw_ctxt_config.pctxt_val1)); - spfc_cpu_to_big32(&sw_setion->immi_dif_info, sizeof(sw_setion->immi_dif_info)); - - queue_id = prnt_qinfo->parent_cmd_scq_info.local_queue_id; - queue_hdr_paddr = hba->scq_info[queue_id].cqm_scq_info->q_header_paddr; - sw_setion->cmd_scq_gpa_h = SPFC_HIGH_32_BITS(queue_hdr_paddr); - sw_setion->cmd_scq_gpa_l = SPFC_LOW_32_BITS(queue_hdr_paddr); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_INFO, - "[info]Port(0x%x) RPort(0x%x) CmdLocalScqn(0x%x) QheaderGpaH(0x%x) QheaderGpaL(0x%x)", - hba->port_cfg.port_id, prnt_qinfo->parent_sq_info.rport_index, queue_id, - sw_setion->cmd_scq_gpa_h, sw_setion->cmd_scq_gpa_l); - - spfc_cpu_to_big32(&sw_setion->cmd_scq_gpa_h, sizeof(sw_setion->cmd_scq_gpa_h)); - spfc_cpu_to_big32(&sw_setion->cmd_scq_gpa_l, sizeof(sw_setion->cmd_scq_gpa_l)); -} - -static void spfc_init_parent_context(void *hba, struct spfc_parent_queue_info *prnt_qinfo) -{ - struct spfc_parent_context *ctx = NULL; - - ctx = (struct spfc_parent_context *)(prnt_qinfo->parent_ctx.parent_ctx); - - /* Initialize Parent Context */ - memset(ctx, 0, SPFC_CNTX_SIZE_256B); - - /* Initialize the Queue Info hardware area */ - spfc_init_prnt_ctxt_scq_qinfo(hba, prnt_qinfo); - spfc_init_prnt_ctxt_srq_qinfo(hba, prnt_qinfo); - spfc_init_prnt_rsvd_qinfo(prnt_qinfo); - - /* Initialize Software Section */ - spfc_init_prnt_sw_section_info(hba, prnt_qinfo); -} - -void spfc_map_shared_queue_qid(struct spfc_hba_info *hba, - struct spfc_parent_queue_info *parent_queue_info, - u32 rport_index) -{ - u32 cmd_scqn_local = 0; - u32 sts_scqn_local = 0; - - /* The SCQ is used for each connection based on the balanced * - * distribution of commands and responses - */ - cmd_scqn_local = SPFC_RPORTID_TO_CMD_SCQN(rport_index); - sts_scqn_local = SPFC_RPORTID_TO_STS_SCQN(rport_index); - parent_queue_info->parent_cmd_scq_info.local_queue_id = cmd_scqn_local; - parent_queue_info->parent_sts_scq_info.local_queue_id = sts_scqn_local; - parent_queue_info->parent_cmd_scq_info.cqm_queue_id = - hba->scq_info[cmd_scqn_local].scqn; - parent_queue_info->parent_sts_scq_info.cqm_queue_id = - hba->scq_info[sts_scqn_local].scqn; - - /* Each session share with immediate SRQ and ElsSRQ */ - parent_queue_info->parent_els_srq_info.local_queue_id = 0; - parent_queue_info->parent_els_srq_info.cqm_queue_id = hba->els_srq_info.srqn; - - /* Allocate fcp data cos value */ - parent_queue_info->queue_data_cos = spfc_map_fcp_data_cos(hba); - - /* Allocate Parent SQ vPort */ - parent_queue_info->parent_sq_info.vport_id += parent_queue_info->queue_vport_id; -} - -u32 spfc_send_session_enable(struct spfc_hba_info *hba, struct unf_port_info *rport_info) -{ - struct spfc_parent_queue_info *parent_queue_info = NULL; - dma_addr_t ctx_phy_addr = 0; - void *ctx_addr = NULL; - union spfc_cmdqe session_enable; - u32 ret = UNF_RETURN_ERROR; - struct spfc_parent_context *ctx = NULL; - struct spfc_sw_section *sw_setion = NULL; - struct spfc_host_keys key; - u32 tx_mfs = 2048; - u32 edtov_timer = 2000; - ulong flag = 0; - spinlock_t *prtq_state_lock = NULL; - u32 index; - - memset(&session_enable, 0, sizeof(union spfc_cmdqe)); - memset(&key, 0, sizeof(struct spfc_host_keys)); - index = rport_info->rport_index; - parent_queue_info = &hba->parent_queue_mgr->parent_queue[index]; - prtq_state_lock = &parent_queue_info->parent_queue_state_lock; - spin_lock_irqsave(prtq_state_lock, flag); - - ctx = (struct spfc_parent_context *)(parent_queue_info->parent_ctx.parent_ctx); - sw_setion = &ctx->sw_section; - - sw_setion->tx_mfs = cpu_to_be16((u16)(tx_mfs)); - sw_setion->e_d_tov_timer_val = cpu_to_be32(edtov_timer); - - spfc_big_to_cpu32(&sw_setion->sw_ctxt_misc.pctxt_val0, - sizeof(sw_setion->sw_ctxt_misc.pctxt_val0)); - sw_setion->sw_ctxt_misc.dw.port_id = SPFC_GET_NETWORK_PORT_ID(hba); - spfc_cpu_to_big32(&sw_setion->sw_ctxt_misc.pctxt_val0, - sizeof(sw_setion->sw_ctxt_misc.pctxt_val0)); - - spfc_big_to_cpu32(&sw_setion->sw_ctxt_config.pctxt_val1, - sizeof(sw_setion->sw_ctxt_config.pctxt_val1)); - spfc_cpu_to_big32(&sw_setion->sw_ctxt_config.pctxt_val1, - sizeof(sw_setion->sw_ctxt_config.pctxt_val1)); - - parent_queue_info->parent_sq_info.rport_index = rport_info->rport_index; - parent_queue_info->parent_sq_info.local_port_id = rport_info->local_nport_id; - parent_queue_info->parent_sq_info.remote_port_id = rport_info->nport_id; - parent_queue_info->parent_sq_info.context_id = - parent_queue_info->parent_ctx.cqm_parent_ctx_obj->xid; - - /* Fill in contex to the chip */ - ctx_phy_addr = parent_queue_info->parent_ctx.cqm_parent_ctx_obj->paddr; - ctx_addr = parent_queue_info->parent_ctx.cqm_parent_ctx_obj->vaddr; - memcpy(ctx_addr, parent_queue_info->parent_ctx.parent_ctx, - sizeof(struct spfc_parent_context)); - session_enable.session_enable.wd0.task_type = SPFC_TASK_T_SESS_EN; - session_enable.session_enable.wd2.conn_id = rport_info->rport_index; - session_enable.session_enable.wd2.scqn = hba->default_scqn; - session_enable.session_enable.wd3.xid_p = - parent_queue_info->parent_ctx.cqm_parent_ctx_obj->xid; - session_enable.session_enable.context_gpa_hi = SPFC_HIGH_32_BITS(ctx_phy_addr); - session_enable.session_enable.context_gpa_lo = SPFC_LOW_32_BITS(ctx_phy_addr); - - spin_unlock_irqrestore(prtq_state_lock, flag); - - key.wd3.sid_2 = (rport_info->local_nport_id & SPFC_KEY_WD3_SID_2_MASK) >> UNF_SHIFT_16; - key.wd3.sid_1 = (rport_info->local_nport_id & SPFC_KEY_WD3_SID_1_MASK) >> UNF_SHIFT_8; - key.wd4.sid_0 = rport_info->local_nport_id & SPFC_KEY_WD3_SID_0_MASK; - key.wd4.did_0 = rport_info->nport_id & SPFC_KEY_WD4_DID_0_MASK; - key.wd4.did_1 = (rport_info->nport_id & SPFC_KEY_WD4_DID_1_MASK) >> UNF_SHIFT_8; - key.wd4.did_2 = (rport_info->nport_id & SPFC_KEY_WD4_DID_2_MASK) >> UNF_SHIFT_16; - key.wd5.host_id = 0; - key.wd5.port_id = hba->port_index; - - memcpy(&session_enable.session_enable.keys, &key, sizeof(struct spfc_host_keys)); - - memcpy((void *)(uintptr_t)session_enable.session_enable.context, - parent_queue_info->parent_ctx.parent_ctx, - sizeof(struct spfc_parent_context)); - spfc_big_to_cpu32((void *)(uintptr_t)session_enable.session_enable.context, - sizeof(struct spfc_parent_context)); - - FC_DRV_PRINT(UNF_LOG_NORMAL, UNF_MAJOR, - "[info] xid:0x%x, sid:0x%x,did:0x%x parentcontext:", - parent_queue_info->parent_ctx.cqm_parent_ctx_obj->xid, - rport_info->local_nport_id, rport_info->nport_id); - - ret = spfc_root_cmdq_enqueue(hba, &session_enable, sizeof(session_enable.session_enable)); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_NORMAL, UNF_ERR, - "[err]RootCMDQEnqueue Error, free default session parent resource"); - return UNF_RETURN_ERROR; - } - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x) send default session enable success,rport index(0x%x),context id(0x%x) SID=(0x%x), DID=(0x%x)", - hba->port_cfg.port_id, rport_info->rport_index, - parent_queue_info->parent_sq_info.context_id, - rport_info->local_nport_id, rport_info->nport_id); - - return RETURN_OK; -} - -u32 spfc_alloc_parent_resource(void *handle, struct unf_port_info *rport_info) -{ - u32 ret = UNF_RETURN_ERROR; - struct spfc_hba_info *hba = NULL; - struct spfc_parent_queue_info *parent_queue_info = NULL; - ulong flag = 0; - spinlock_t *prtq_state_lock = NULL; - u32 index; - - FC_CHECK_RETURN_VALUE(handle, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(rport_info, UNF_RETURN_ERROR); - - hba = (struct spfc_hba_info *)handle; - if (!hba->parent_queue_mgr) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) cannot find parent queue pool", - hba->port_cfg.port_id); - - return UNF_RETURN_ERROR; - } - - index = rport_info->rport_index; - if (index >= UNF_SPFC_MAXRPORT_NUM) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) allocate parent resource failed, invlaid rport index(0x%x),rport nportid(0x%x)", - hba->port_cfg.port_id, index, - rport_info->nport_id); - - return UNF_RETURN_ERROR; - } - - parent_queue_info = &hba->parent_queue_mgr->parent_queue[index]; - prtq_state_lock = &parent_queue_info->parent_queue_state_lock; - spin_lock_irqsave(prtq_state_lock, flag); - - if (parent_queue_info->offload_state != SPFC_QUEUE_STATE_FREE) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) allocate parent resource failed, invlaid rport index(0x%x),rport nportid(0x%x), offload state(0x%x)", - hba->port_cfg.port_id, index, rport_info->nport_id, - parent_queue_info->offload_state); - - spin_unlock_irqrestore(prtq_state_lock, flag); - return UNF_RETURN_ERROR; - } - - parent_queue_info->offload_state = SPFC_QUEUE_STATE_INITIALIZED; - /* Create Parent Context and Link List SQ */ - ret = spfc_alloc_parent_sq(hba, parent_queue_info, rport_info); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "Port(0x%x) alloc session resoure failed.rport index(0x%x),rport nportid(0x%x).", - hba->port_cfg.port_id, index, - rport_info->nport_id); - - parent_queue_info->offload_state = SPFC_QUEUE_STATE_FREE; - spfc_invalid_parent_sq(&parent_queue_info->parent_sq_info); - spin_unlock_irqrestore(prtq_state_lock, flag); - - return UNF_RETURN_ERROR; - } - - /* Allocate the corresponding queue xid to each parent */ - spfc_map_shared_queue_qid(hba, parent_queue_info, rport_info->rport_index); - - /* Initialize Parent Context, including hardware area and ucode area */ - spfc_init_parent_context(hba, parent_queue_info); - - spin_unlock_irqrestore(prtq_state_lock, flag); - - /* Only default enable session obviously, other will enable secertly */ - if (unlikely(rport_info->rport_index == SPFC_DEFAULT_RPORT_INDEX)) - return spfc_send_session_enable(handle, rport_info); - - parent_queue_info->parent_sq_info.local_port_id = rport_info->local_nport_id; - parent_queue_info->parent_sq_info.remote_port_id = rport_info->nport_id; - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x) allocate parent sq success,rport index(0x%x),rport nportid(0x%x),context id(0x%x)", - hba->port_cfg.port_id, rport_info->rport_index, - rport_info->nport_id, - parent_queue_info->parent_sq_info.context_id); - - return ret; -} - -u32 spfc_free_parent_resource(void *handle, struct unf_port_info *rport_info) -{ - struct spfc_parent_queue_info *parent_queue_info = NULL; - ulong flag = 0; - ulong rst_flag = 0; - u32 ret = UNF_RETURN_ERROR; - enum spfc_session_reset_mode mode = SPFC_SESS_RST_DELETE_IO_CONN_BOTH; - struct spfc_hba_info *hba = NULL; - spinlock_t *prtq_state_lock = NULL; - spinlock_t *sq_enq_lock = NULL; - u32 index; - - FC_CHECK_RETURN_VALUE(handle, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(rport_info, UNF_RETURN_ERROR); - - hba = (struct spfc_hba_info *)handle; - if (!hba->parent_queue_mgr) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[warn]Port(0x%x) cannot find parent queue pool", - hba->port_cfg.port_id); - - return UNF_RETURN_ERROR; - } - - /* get parent queue info (by rport index) */ - if (rport_info->rport_index >= UNF_SPFC_MAXRPORT_NUM) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[warn]Port(0x%x) free parent resource failed, invlaid rport_index(%u) rport_nport_id(0x%x)", - hba->port_cfg.port_id, rport_info->rport_index, rport_info->nport_id); - - return UNF_RETURN_ERROR; - } - - index = rport_info->rport_index; - parent_queue_info = &hba->parent_queue_mgr->parent_queue[index]; - prtq_state_lock = &parent_queue_info->parent_queue_state_lock; - sq_enq_lock = &parent_queue_info->parent_sq_info.parent_sq_enqueue_lock; - - spin_lock_irqsave(prtq_state_lock, flag); - /* 1. for has been offload */ - if (parent_queue_info->offload_state == SPFC_QUEUE_STATE_OFFLOADED) { - parent_queue_info->offload_state = SPFC_QUEUE_STATE_DESTROYING; - spin_unlock_irqrestore(prtq_state_lock, flag); - - /* set reset state, in order to prevent I/O in_SQ */ - spin_lock_irqsave(sq_enq_lock, rst_flag); - parent_queue_info->parent_sq_info.sq_in_sess_rst = true; - spin_unlock_irqrestore(sq_enq_lock, rst_flag); - - /* check pcie device state */ - if (!hba->dev_present) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x) hba is not present, free directly. rport_index(0x%x:0x%x) local_nportid(0x%x) remote_nportid(0x%x:0x%x)", - hba->port_cfg.port_id, rport_info->rport_index, - parent_queue_info->parent_sq_info.rport_index, - parent_queue_info->parent_sq_info.local_port_id, - rport_info->nport_id, - parent_queue_info->parent_sq_info.remote_port_id); - - spfc_free_parent_queue_info(hba, parent_queue_info); - return RETURN_OK; - } - - parent_queue_info->parent_sq_info.del_start_jiff = jiffies; - (void)queue_delayed_work(hba->work_queue, - &parent_queue_info->parent_sq_info.del_work, - (ulong)msecs_to_jiffies((u32) - SPFC_SQ_DEL_STAGE_TIMEOUT_MS)); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x) begin to reset parent session, rport_index(0x%x:0x%x) local_nportid(0x%x) remote_nportid(0x%x:0x%x)", - hba->port_cfg.port_id, rport_info->rport_index, - parent_queue_info->parent_sq_info.rport_index, - parent_queue_info->parent_sq_info.local_port_id, - rport_info->nport_id, - parent_queue_info->parent_sq_info.remote_port_id); - /* Forcibly set both mode */ - mode = SPFC_SESS_RST_DELETE_IO_CONN_BOTH; - ret = spfc_send_session_rst_cmd(hba, parent_queue_info, mode); - - return ret; - } else if (parent_queue_info->offload_state == SPFC_QUEUE_STATE_INITIALIZED) { - /* 2. for resource has been alloc, but not offload */ - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x) parent sq is not offloaded, free directly. rport_index(0x%x:0x%x) local_nportid(0x%x) remote_nportid(0x%x:0x%x)", - hba->port_cfg.port_id, rport_info->rport_index, - parent_queue_info->parent_sq_info.rport_index, - parent_queue_info->parent_sq_info.local_port_id, - rport_info->nport_id, - parent_queue_info->parent_sq_info.remote_port_id); - - spin_unlock_irqrestore(prtq_state_lock, flag); - spfc_free_parent_queue_info(hba, parent_queue_info); - - return RETURN_OK; - } else if (parent_queue_info->offload_state == - SPFC_QUEUE_STATE_OFFLOADING) { - /* 3. for driver has offloading CMND to uCode */ - spfc_push_destroy_parent_queue_sqe(hba, parent_queue_info, rport_info); - spin_unlock_irqrestore(prtq_state_lock, flag); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x) parent sq is offloading, push to delay free. rport_index(0x%x:0x%x) local_nportid(0x%x) remote_nportid(0x%x:0x%x)", - hba->port_cfg.port_id, rport_info->rport_index, - parent_queue_info->parent_sq_info.rport_index, - parent_queue_info->parent_sq_info.local_port_id, - rport_info->nport_id, - parent_queue_info->parent_sq_info.remote_port_id); - - return RETURN_OK; - } - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) parent sq is not created, do not need free state(0x%x) rport_index(0x%x:0x%x) local_nportid(0x%x) remote_nportid(0x%x:0x%x)", - hba->port_cfg.port_id, parent_queue_info->offload_state, - rport_info->rport_index, - parent_queue_info->parent_sq_info.rport_index, - parent_queue_info->parent_sq_info.local_port_id, - rport_info->nport_id, - parent_queue_info->parent_sq_info.remote_port_id); - - spin_unlock_irqrestore(prtq_state_lock, flag); - - return RETURN_OK; -} - -void spfc_free_parent_queue_mgr(void *handle) -{ - u32 index = 0; - struct spfc_parent_queue_mgr *parent_queue_mgr = NULL; - struct spfc_hba_info *hba = NULL; - - FC_CHECK_RETURN_VOID(handle); - - hba = (struct spfc_hba_info *)handle; - if (!hba->parent_queue_mgr) - return; - parent_queue_mgr = hba->parent_queue_mgr; - - for (index = 0; index < UNF_SPFC_MAXRPORT_NUM; index++) { - if (parent_queue_mgr->parent_queue[index].parent_ctx.parent_ctx) - parent_queue_mgr->parent_queue[index].parent_ctx.parent_ctx = NULL; - } - - if (parent_queue_mgr->parent_sq_buf_list.buflist) { - for (index = 0; index < parent_queue_mgr->parent_sq_buf_list.buf_num; index++) { - if (parent_queue_mgr->parent_sq_buf_list.buflist[index].paddr != 0) { - pci_unmap_single(hba->pci_dev, - parent_queue_mgr->parent_sq_buf_list - .buflist[index].paddr, - parent_queue_mgr->parent_sq_buf_list.buf_size, - DMA_BIDIRECTIONAL); - parent_queue_mgr->parent_sq_buf_list.buflist[index].paddr = 0; - } - kfree(parent_queue_mgr->parent_sq_buf_list.buflist[index].vaddr); - parent_queue_mgr->parent_sq_buf_list.buflist[index].vaddr = NULL; - } - - kfree(parent_queue_mgr->parent_sq_buf_list.buflist); - parent_queue_mgr->parent_sq_buf_list.buflist = NULL; - } - - vfree(parent_queue_mgr); - hba->parent_queue_mgr = NULL; -} - -void spfc_free_parent_queues(void *handle) -{ - u32 index = 0; - ulong flag = 0; - struct spfc_parent_queue_mgr *parent_queue_mgr = NULL; - struct spfc_hba_info *hba = NULL; - spinlock_t *prtq_state_lock = NULL; - - FC_CHECK_RETURN_VOID(handle); - - hba = (struct spfc_hba_info *)handle; - parent_queue_mgr = hba->parent_queue_mgr; - - for (index = 0; index < UNF_SPFC_MAXRPORT_NUM; index++) { - prtq_state_lock = &parent_queue_mgr->parent_queue[index].parent_queue_state_lock; - spin_lock_irqsave(prtq_state_lock, flag); - - if (SPFC_QUEUE_STATE_DESTROYING == - parent_queue_mgr->parent_queue[index].offload_state) { - spin_unlock_irqrestore(prtq_state_lock, flag); - - (void)cancel_delayed_work_sync(&parent_queue_mgr->parent_queue[index] - .parent_sq_info.del_work); - (void)cancel_delayed_work_sync(&parent_queue_mgr->parent_queue[index] - .parent_sq_info.flush_done_timeout_work); - - /* free parent queue */ - spfc_free_parent_queue_info(hba, &parent_queue_mgr->parent_queue[index]); - continue; - } - - spin_unlock_irqrestore(prtq_state_lock, flag); - } -} - -/* - *Function Name : spfc_alloc_parent_queue_mgr - *Function Description: Allocate and initialize parent queue manager. - *Input Parameters : *handle - *Output Parameters : N/A - *Return Type : void - */ -u32 spfc_alloc_parent_queue_mgr(void *handle) -{ - u32 index = 0; - struct spfc_parent_queue_mgr *parent_queue_mgr = NULL; - u32 buf_total_size; - u32 buf_num; - u32 alloc_idx; - u32 cur_buf_idx = 0; - u32 cur_buf_offset = 0; - u32 prt_ctx_size = sizeof(struct spfc_parent_context); - u32 buf_cnt_perhugebuf; - struct spfc_hba_info *hba = NULL; - u32 init_val = INVALID_VALUE32; - dma_addr_t paddr; - - FC_CHECK_RETURN_VALUE(handle, UNF_RETURN_ERROR); - - hba = (struct spfc_hba_info *)handle; - parent_queue_mgr = (struct spfc_parent_queue_mgr *)vmalloc(sizeof - (struct spfc_parent_queue_mgr)); - if (!parent_queue_mgr) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Port(0x%x) cannot allocate queue manager", - hba->port_cfg.port_id); - - return UNF_RETURN_ERROR; - } - - hba->parent_queue_mgr = parent_queue_mgr; - memset(parent_queue_mgr, 0, sizeof(struct spfc_parent_queue_mgr)); - - for (index = 0; index < UNF_SPFC_MAXRPORT_NUM; index++) { - spin_lock_init(&parent_queue_mgr->parent_queue[index].parent_queue_state_lock); - parent_queue_mgr->parent_queue[index].offload_state = SPFC_QUEUE_STATE_FREE; - spin_lock_init(&(parent_queue_mgr->parent_queue[index] - .parent_sq_info.parent_sq_enqueue_lock)); - parent_queue_mgr->parent_queue[index].parent_cmd_scq_info.cqm_queue_id = init_val; - parent_queue_mgr->parent_queue[index].parent_sts_scq_info.cqm_queue_id = init_val; - parent_queue_mgr->parent_queue[index].parent_els_srq_info.cqm_queue_id = init_val; - parent_queue_mgr->parent_queue[index].parent_sq_info.del_start_jiff = init_val; - parent_queue_mgr->parent_queue[index].queue_vport_id = hba->vpid_start; - } - - buf_total_size = prt_ctx_size * UNF_SPFC_MAXRPORT_NUM; - parent_queue_mgr->parent_sq_buf_list.buf_size = buf_total_size > BUF_LIST_PAGE_SIZE ? - BUF_LIST_PAGE_SIZE : buf_total_size; - buf_cnt_perhugebuf = parent_queue_mgr->parent_sq_buf_list.buf_size / prt_ctx_size; - buf_num = UNF_SPFC_MAXRPORT_NUM % buf_cnt_perhugebuf ? - UNF_SPFC_MAXRPORT_NUM / buf_cnt_perhugebuf + 1 : - UNF_SPFC_MAXRPORT_NUM / buf_cnt_perhugebuf; - parent_queue_mgr->parent_sq_buf_list.buflist = - (struct buff_list *)kmalloc(buf_num * sizeof(struct buff_list), GFP_KERNEL); - parent_queue_mgr->parent_sq_buf_list.buf_num = buf_num; - - if (!parent_queue_mgr->parent_sq_buf_list.buflist) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[err]Allocate QueuMgr buf list failed out of memory"); - goto free_parent_queue; - } - memset(parent_queue_mgr->parent_sq_buf_list.buflist, 0, buf_num * sizeof(struct buff_list)); - - for (alloc_idx = 0; alloc_idx < buf_num; alloc_idx++) { - parent_queue_mgr->parent_sq_buf_list.buflist[alloc_idx].vaddr = - kmalloc(parent_queue_mgr->parent_sq_buf_list.buf_size, GFP_KERNEL); - if (!parent_queue_mgr->parent_sq_buf_list.buflist[alloc_idx].vaddr) - goto free_parent_queue; - - memset(parent_queue_mgr->parent_sq_buf_list.buflist[alloc_idx].vaddr, 0, - parent_queue_mgr->parent_sq_buf_list.buf_size); - - parent_queue_mgr->parent_sq_buf_list.buflist[alloc_idx].paddr = - pci_map_single(hba->pci_dev, - parent_queue_mgr->parent_sq_buf_list.buflist[alloc_idx].vaddr, - parent_queue_mgr->parent_sq_buf_list.buf_size, - DMA_BIDIRECTIONAL); - paddr = parent_queue_mgr->parent_sq_buf_list.buflist[alloc_idx].paddr; - if (pci_dma_mapping_error(hba->pci_dev, paddr)) { - parent_queue_mgr->parent_sq_buf_list.buflist[alloc_idx].paddr = 0; - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[err]Map QueuMgr address failed"); - - goto free_parent_queue; - } - } - - for (index = 0; index < UNF_SPFC_MAXRPORT_NUM; index++) { - cur_buf_idx = index / buf_cnt_perhugebuf; - cur_buf_offset = prt_ctx_size * (index % buf_cnt_perhugebuf); - - parent_queue_mgr->parent_queue[index].parent_ctx.parent_ctx = - parent_queue_mgr->parent_sq_buf_list.buflist[cur_buf_idx].vaddr + - cur_buf_offset; - parent_queue_mgr->parent_queue[index].parent_ctx.parent_ctx_addr = - parent_queue_mgr->parent_sq_buf_list.buflist[cur_buf_idx].paddr + - cur_buf_offset; - } - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_INFO, - "[EVENT]Allocate bufnum:%u,buf_total_size:%u", buf_num, buf_total_size); - - return RETURN_OK; - -free_parent_queue: - spfc_free_parent_queue_mgr(hba); - return UNF_RETURN_ERROR; -} - -static void spfc_rlease_all_wqe_pages(struct spfc_hba_info *hba) -{ - u32 index; - struct spfc_wqe_page *wpg = NULL; - - FC_CHECK_RETURN_VOID((hba)); - - wpg = hba->sq_wpg_pool.wpg_pool_addr; - - for (index = 0; index < hba->sq_wpg_pool.wpg_cnt; index++) { - if (wpg->wpg_addr) { - dma_pool_free(hba->sq_wpg_pool.wpg_dma_pool, - wpg->wpg_addr, wpg->wpg_phy_addr); - wpg->wpg_addr = NULL; - wpg->wpg_phy_addr = 0; - } - - wpg++; - } - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[info]Port[%u] free total %u wqepages", hba->port_index, - index); -} - -u32 spfc_alloc_parent_sq_wqe_page_pool(void *handle) -{ - u32 index = 0; - struct spfc_sq_wqepage_pool *wpg_pool = NULL; - struct spfc_wqe_page *wpg = NULL; - struct spfc_hba_info *hba = NULL; - - hba = (struct spfc_hba_info *)handle; - wpg_pool = &hba->sq_wpg_pool; - - INIT_LIST_HEAD(&wpg_pool->list_free_wpg_pool); - spin_lock_init(&wpg_pool->wpg_pool_lock); - atomic_set(&wpg_pool->wpg_in_use, 0); - - /* Calculate the number of Wqe Page required in the pool */ - wpg_pool->wpg_size = wqe_page_size; - wpg_pool->wpg_cnt = SPFC_MIN_WP_NUM * SPFC_MAX_SSQ_NUM + - ((hba->exi_count * SPFC_SQE_SIZE) / wpg_pool->wpg_size); - wpg_pool->wqe_per_wpg = wpg_pool->wpg_size / SPFC_SQE_SIZE; - - /* Craete DMA POOL */ - wpg_pool->wpg_dma_pool = dma_pool_create("spfc_wpg_pool", - &hba->pci_dev->dev, - wpg_pool->wpg_size, - SPFC_SQE_SIZE, 0); - if (!wpg_pool->wpg_dma_pool) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Cannot allocate SQ WqePage DMA pool"); - - goto out_create_dma_pool_err; - } - - /* Allocate arrays to record all WqePage addresses */ - wpg_pool->wpg_pool_addr = (struct spfc_wqe_page *)vmalloc(wpg_pool->wpg_cnt * - sizeof(struct spfc_wqe_page)); - if (!wpg_pool->wpg_pool_addr) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Allocate SQ WqePageAddr array failed"); - - goto out_alloc_wpg_array_err; - } - wpg = wpg_pool->wpg_pool_addr; - memset(wpg, 0, wpg_pool->wpg_cnt * sizeof(struct spfc_wqe_page)); - - for (index = 0; index < wpg_pool->wpg_cnt; index++) { - wpg->wpg_addr = dma_pool_alloc(wpg_pool->wpg_dma_pool, GFP_KERNEL, - (u64 *)&wpg->wpg_phy_addr); - if (!wpg->wpg_addr) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, - UNF_ERR, "[err]Dma pool allocated failed"); - break; - } - - /* To ensure security, clear the memory */ - memset(wpg->wpg_addr, 0, wpg_pool->wpg_size); - - /* Add to the idle linked list */ - INIT_LIST_HEAD(&wpg->entry_wpg); - list_add_tail(&wpg->entry_wpg, &wpg_pool->list_free_wpg_pool); - - wpg++; - } - /* ALL allocated successfully */ - if (wpg_pool->wpg_cnt == index) - return RETURN_OK; - - spfc_rlease_all_wqe_pages(hba); - vfree(wpg_pool->wpg_pool_addr); - wpg_pool->wpg_pool_addr = NULL; - -out_alloc_wpg_array_err: - dma_pool_destroy(wpg_pool->wpg_dma_pool); - wpg_pool->wpg_dma_pool = NULL; - -out_create_dma_pool_err: - return UNF_RETURN_ERROR; -} - -void spfc_free_parent_sq_wqe_page_pool(void *handle) -{ - struct spfc_hba_info *hba = NULL; - - FC_CHECK_RETURN_VOID((handle)); - hba = (struct spfc_hba_info *)handle; - spfc_rlease_all_wqe_pages(hba); - hba->sq_wpg_pool.wpg_cnt = 0; - - if (hba->sq_wpg_pool.wpg_pool_addr) { - vfree(hba->sq_wpg_pool.wpg_pool_addr); - hba->sq_wpg_pool.wpg_pool_addr = NULL; - } - - dma_pool_destroy(hba->sq_wpg_pool.wpg_dma_pool); - hba->sq_wpg_pool.wpg_dma_pool = NULL; -} - -static u32 spfc_parent_sq_ring_direct_wqe_doorbell(struct spfc_parent_ssq_info *sq, u8 *direct_wqe) -{ - u32 ret = RETURN_OK; - int ravl; - u16 pmsn; - u64 queue_hdr_db_val; - struct spfc_hba_info *hba; - - hba = (struct spfc_hba_info *)sq->hba; - pmsn = sq->last_pmsn; - - if (sq->cache_id == INVALID_VALUE32) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "[err]SQ(0x%x) invalid cid", sq->context_id); - return RETURN_ERROR; - } - /* Fill Doorbell Record */ - queue_hdr_db_val = sq->queue_header->door_bell_record; - queue_hdr_db_val &= (u64)(~(0xFFFFFFFF)); - queue_hdr_db_val |= (u64)((u64)pmsn << UNF_SHIFT_16 | pmsn); - sq->queue_header->door_bell_record = - cpu_to_be64(queue_hdr_db_val); - - ravl = cqm_ring_direct_wqe_db_fc(hba->dev_handle, SERVICE_T_FC, direct_wqe); - if (unlikely(ravl != CQM_SUCCESS)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "[err]SQ(0x%x) send DB failed", sq->context_id); - - ret = RETURN_ERROR; - } - - atomic_inc(&sq->sq_db_cnt); - - return ret; -} - -u32 spfc_parent_sq_ring_doorbell(struct spfc_parent_ssq_info *sq, u8 qos_level, u32 c) -{ - u32 ret = RETURN_OK; - int ravl; - u16 pmsn; - u8 pmsn_lo; - u8 pmsn_hi; - u64 db_val_qw; - struct spfc_hba_info *hba; - struct spfc_parent_sq_db door_bell; - - hba = (struct spfc_hba_info *)sq->hba; - pmsn = sq->last_pmsn; - /* Obtain the low 8 Bit of PMSN */ - pmsn_lo = (u8)(pmsn & SPFC_PMSN_MASK); - /* Obtain the high 8 Bit of PMSN */ - pmsn_hi = (u8)((pmsn >> UNF_SHIFT_8) & SPFC_PMSN_MASK); - door_bell.wd0.service_type = SPFC_LSW(sq->service_type); - door_bell.wd0.cos = 0; - /* c = 0 data type, c = 1 control type, two type are different in mqm */ - door_bell.wd0.c = c; - door_bell.wd0.arm = SPFC_DB_ARM_DISABLE; - door_bell.wd0.cntx_size = SPFC_CNTX_SIZE_T_256B; - door_bell.wd0.xid = sq->context_id; - door_bell.wd1.sm_data = sq->cache_id; - door_bell.wd1.qid = sq->sq_queue_id; - door_bell.wd1.pi_hi = (u32)pmsn_hi; - - if (sq->cache_id == INVALID_VALUE32) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "[err]SQ(0x%x) invalid cid", sq->context_id); - return UNF_RETURN_ERROR; - } - /* Fill Doorbell Record */ - db_val_qw = sq->queue_header->door_bell_record; - db_val_qw &= (u64)(~(SPFC_DB_VAL_MASK)); - db_val_qw |= (u64)((u64)pmsn << UNF_SHIFT_16 | pmsn); - sq->queue_header->door_bell_record = cpu_to_be64(db_val_qw); - - /* ring doorbell */ - db_val_qw = *(u64 *)&door_bell; - ravl = cqm3_ring_hardware_db_fc(hba->dev_handle, SERVICE_T_FC, pmsn_lo, - (qos_level & SPFC_QOS_LEVEL_MASK), - db_val_qw); - if (unlikely(ravl != CQM_SUCCESS)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "[err]SQ(0x%x) send DB(0x%llx) failed", - sq->context_id, db_val_qw); - - ret = UNF_RETURN_ERROR; - } - - /* Doorbell success counter */ - atomic_inc(&sq->sq_db_cnt); - - return ret; -} - -u32 spfc_direct_sq_enqueue(struct spfc_parent_ssq_info *ssq, struct spfc_sqe *io_sqe, u8 wqe_type) -{ - u32 ret = RETURN_OK; - u32 msn_wd = INVALID_VALUE32; - u16 link_wqe_msn = 0; - ulong flag = 0; - struct spfc_wqe_page *tail_wpg = NULL; - struct spfc_sqe *sqe_in_wp = NULL; - struct spfc_linkwqe *link_wqe = NULL; - struct spfc_linkwqe *link_wqe_last_part = NULL; - u64 wqe_gpa; - struct spfc_direct_wqe_db dre_door_bell; - - spin_lock_irqsave(&ssq->parent_sq_enqueue_lock, flag); - tail_wpg = SPFC_GET_SQ_TAIL(ssq); - if (ssq->wqe_offset == ssq->wqe_num_per_buf) { - FC_DRV_PRINT(UNF_LOG_NORMAL, UNF_INFO, - "[info]Ssq(0x%x), xid(0x%x) qid(0x%x) add wqepage at Pmsn(0x%x), sqe_minus_cqe_cnt(0x%x)", - ssq->sqn, ssq->context_id, ssq->sq_queue_id, - ssq->last_pmsn, - atomic_read(&ssq->sqe_minus_cqe_cnt)); - - link_wqe_msn = SPFC_MSN_DEC(ssq->last_pmsn); - link_wqe = (struct spfc_linkwqe *)spfc_get_wqe_page_entry(tail_wpg, - ssq->wqe_offset); - msn_wd = be32_to_cpu(link_wqe->val_wd1); - msn_wd |= ((u32)(link_wqe_msn & SPFC_MSNWD_L_MASK)); - msn_wd |= (((u32)(link_wqe_msn & SPFC_MSNWD_H_MASK)) << UNF_SHIFT_16); - link_wqe->val_wd1 = cpu_to_be32(msn_wd); - link_wqe_last_part = (struct spfc_linkwqe *)((u8 *)link_wqe + - SPFC_EXTEND_WQE_OFFSET); - link_wqe_last_part->val_wd1 = link_wqe->val_wd1; - spfc_set_direct_wqe_owner_be(link_wqe, ssq->last_pi_owner); - ssq->wqe_offset = 0; - ssq->last_pi_owner = !ssq->last_pi_owner; - } - sqe_in_wp = - (struct spfc_sqe *)spfc_get_wqe_page_entry(tail_wpg, ssq->wqe_offset); - spfc_build_wqe_owner_pmsn(io_sqe, (ssq->last_pi_owner), ssq->last_pmsn); - SPFC_IO_STAT((struct spfc_hba_info *)ssq->hba, wqe_type); - - wqe_gpa = tail_wpg->wpg_phy_addr + (ssq->wqe_offset * sizeof(struct spfc_sqe)); - io_sqe->wqe_gpa = (wqe_gpa >> UNF_SHIFT_6); - - dre_door_bell.wd0.ddb = IWARP_FC_DDB_TYPE; - dre_door_bell.wd0.cos = 0; - dre_door_bell.wd0.c = 0; - dre_door_bell.wd0.pi_hi = - (u32)(ssq->last_pmsn >> UNF_SHIFT_12) & SPFC_DB_WD0_PI_H_MASK; - dre_door_bell.wd0.cntx_size = SPFC_CNTX_SIZE_T_256B; - dre_door_bell.wd0.xid = ssq->context_id; - dre_door_bell.wd1.sm_data = ssq->cache_id; - dre_door_bell.wd1.pi_lo = (u32)(ssq->last_pmsn & SPFC_DB_WD0_PI_L_MASK); - io_sqe->db_val = *(u64 *)&dre_door_bell; - - spfc_convert_parent_wqe_to_big_endian(io_sqe); - memcpy(sqe_in_wp, io_sqe, sizeof(struct spfc_sqe)); - spfc_set_direct_wqe_owner_be(sqe_in_wp, ssq->last_pi_owner); - - FC_DRV_PRINT(UNF_LOG_NORMAL, UNF_INFO, - "[INFO]Ssq(0x%x) xid:0x%x,qid:0x%x wqegpa:0x%llx,o:0x%x,outstandind:0x%x,pmsn:0x%x,cmsn:0x%x", - ssq->sqn, ssq->context_id, ssq->sq_queue_id, wqe_gpa, - ssq->last_pi_owner, atomic_read(&ssq->sqe_minus_cqe_cnt), - ssq->last_pmsn, SPFC_GET_QUEUE_CMSN(ssq)); - - ssq->accum_wqe_cnt++; - if (ssq->accum_wqe_cnt == accum_db_num) { - ret = spfc_parent_sq_ring_direct_wqe_doorbell(ssq, (void *)sqe_in_wp); - if (unlikely(ret != RETURN_OK)) - SPFC_ERR_IO_STAT((struct spfc_hba_info *)ssq->hba, wqe_type); - ssq->accum_wqe_cnt = 0; - } - - ssq->wqe_offset += 1; - ssq->last_pmsn = SPFC_MSN_INC(ssq->last_pmsn); - atomic_inc(&ssq->sq_wqe_cnt); - atomic_inc(&ssq->sqe_minus_cqe_cnt); - SPFC_SQ_IO_STAT(ssq, wqe_type); - spin_unlock_irqrestore(&ssq->parent_sq_enqueue_lock, flag); - return ret; -} - -u32 spfc_parent_ssq_enqueue(struct spfc_parent_ssq_info *ssq, struct spfc_sqe *io_sqe, u8 wqe_type) -{ - u32 ret = RETURN_OK; - u32 addr_wd = INVALID_VALUE32; - u32 msn_wd = INVALID_VALUE32; - u16 link_wqe_msn = 0; - ulong flag = 0; - struct spfc_wqe_page *new_wqe_page = NULL; - struct spfc_wqe_page *tail_wpg = NULL; - struct spfc_sqe *sqe_in_wp = NULL; - struct spfc_linkwqe *link_wqe = NULL; - struct spfc_linkwqe *link_wqe_last_part = NULL; - u32 cur_cmsn = 0; - u8 qos_level = (u8)io_sqe->ts_sl.cont.icmnd.info.dif_info.wd1.vpid; - u32 c = SPFC_DB_C_BIT_CONTROL_TYPE; - - if (ssq->queue_style == SPFC_QUEUE_RING_STYLE) - return spfc_direct_sq_enqueue(ssq, io_sqe, wqe_type); - - spin_lock_irqsave(&ssq->parent_sq_enqueue_lock, flag); - tail_wpg = SPFC_GET_SQ_TAIL(ssq); - if (ssq->wqe_offset == ssq->wqe_num_per_buf) { - FC_DRV_PRINT(UNF_LOG_NORMAL, UNF_INFO, - "[info]Ssq(0x%x), xid(0x%x) qid(0x%x) add wqepage at Pmsn(0x%x), WpgCnt(0x%x)", - ssq->sqn, ssq->context_id, ssq->sq_queue_id, - ssq->last_pmsn, - atomic_read(&ssq->wqe_page_cnt)); - cur_cmsn = SPFC_GET_QUEUE_CMSN(ssq); - spfc_free_sq_wqe_page(ssq, cur_cmsn); - new_wqe_page = spfc_add_one_wqe_page(ssq); - if (unlikely(!new_wqe_page)) { - SPFC_ERR_IO_STAT((struct spfc_hba_info *)ssq->hba, wqe_type); - spin_unlock_irqrestore(&ssq->parent_sq_enqueue_lock, flag); - return UNF_RETURN_ERROR; - } - link_wqe = (struct spfc_linkwqe *)spfc_get_wqe_page_entry(tail_wpg, - ssq->wqe_offset); - addr_wd = SPFC_MSD(new_wqe_page->wpg_phy_addr); - link_wqe->next_page_addr_hi = cpu_to_be32(addr_wd); - addr_wd = SPFC_LSD(new_wqe_page->wpg_phy_addr); - link_wqe->next_page_addr_lo = cpu_to_be32(addr_wd); - link_wqe_msn = SPFC_MSN_DEC(ssq->last_pmsn); - msn_wd = be32_to_cpu(link_wqe->val_wd1); - msn_wd |= ((u32)(link_wqe_msn & SPFC_MSNWD_L_MASK)); - msn_wd |= (((u32)(link_wqe_msn & SPFC_MSNWD_H_MASK)) << UNF_SHIFT_16); - link_wqe->val_wd1 = cpu_to_be32(msn_wd); - link_wqe_last_part = (struct spfc_linkwqe *)((u8 *)link_wqe + - SPFC_EXTEND_WQE_OFFSET); - link_wqe_last_part->next_page_addr_hi = link_wqe->next_page_addr_hi; - link_wqe_last_part->next_page_addr_lo = link_wqe->next_page_addr_lo; - link_wqe_last_part->val_wd1 = link_wqe->val_wd1; - spfc_set_sq_wqe_owner_be(link_wqe); - ssq->wqe_offset = 0; - tail_wpg = SPFC_GET_SQ_TAIL(ssq); - atomic_inc(&ssq->wqe_page_cnt); - } - - spfc_build_wqe_owner_pmsn(io_sqe, !(ssq->last_pi_owner), ssq->last_pmsn); - SPFC_IO_STAT((struct spfc_hba_info *)ssq->hba, wqe_type); - spfc_convert_parent_wqe_to_big_endian(io_sqe); - sqe_in_wp = (struct spfc_sqe *)spfc_get_wqe_page_entry(tail_wpg, ssq->wqe_offset); - memcpy(sqe_in_wp, io_sqe, sizeof(struct spfc_sqe)); - spfc_set_sq_wqe_owner_be(sqe_in_wp); - - FC_DRV_PRINT(UNF_LOG_NORMAL, UNF_INFO, - "[INFO]Ssq(0x%x) xid:0x%x,qid:0x%x wqegpa:0x%llx, qos_level:0x%x, c:0x%x", - ssq->sqn, ssq->context_id, ssq->sq_queue_id, - virt_to_phys(sqe_in_wp), qos_level, c); - - ssq->accum_wqe_cnt++; - if (ssq->accum_wqe_cnt == accum_db_num) { - ret = spfc_parent_sq_ring_doorbell(ssq, qos_level, c); - if (unlikely(ret != RETURN_OK)) - SPFC_ERR_IO_STAT((struct spfc_hba_info *)ssq->hba, wqe_type); - ssq->accum_wqe_cnt = 0; - } - ssq->wqe_offset += 1; - ssq->last_pmsn = SPFC_MSN_INC(ssq->last_pmsn); - atomic_inc(&ssq->sq_wqe_cnt); - atomic_inc(&ssq->sqe_minus_cqe_cnt); - SPFC_SQ_IO_STAT(ssq, wqe_type); - spin_unlock_irqrestore(&ssq->parent_sq_enqueue_lock, flag); - return ret; -} - -u32 spfc_parent_sq_enqueue(struct spfc_parent_sq_info *sq, struct spfc_sqe *io_sqe, u16 ssqn) -{ - u8 wqe_type = 0; - struct spfc_hba_info *hba = (struct spfc_hba_info *)sq->hba; - struct spfc_parent_ssq_info *ssq = NULL; - - if (unlikely(ssqn >= SPFC_MAX_SSQ_NUM)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "[err]Ssqn 0x%x is invalid.", ssqn); - - return UNF_RETURN_ERROR; - } - - wqe_type = (u8)SPFC_GET_WQE_TYPE(io_sqe); - - /* Serial enqueue */ - io_sqe->ts_sl.xid = sq->context_id; - io_sqe->ts_sl.cid = sq->cache_id; - io_sqe->ts_sl.sqn = ssqn; - - /* Choose SSQ */ - ssq = &hba->parent_queue_mgr->shared_queue[ssqn].parent_ssq_info; - - /* If the SQ is invalid, the wqe is discarded */ - if (unlikely(!atomic_read(&sq->sq_valid))) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "[err]SQ is invalid, reject wqe(0x%x)", wqe_type); - - return UNF_RETURN_ERROR; - } - - /* The heartbeat detection status is 0, which allows control sessions - * enqueuing - */ - if (unlikely(!hba->heart_status && SPFC_WQE_IS_IO(io_sqe))) { - FC_DRV_PRINT(UNF_LOG_NORMAL, UNF_ERR, - "[err]Heart status is false"); - - return UNF_RETURN_ERROR; - } - - if (sq->need_offloaded != SPFC_NEED_DO_OFFLOAD) { - /* Ensure to be offloaded */ - if (unlikely(!atomic_read(&sq->sq_cached))) { - SPFC_ERR_IO_STAT((struct spfc_hba_info *)sq->hba, wqe_type); - SPFC_HBA_STAT((struct spfc_hba_info *)sq->hba, - SPFC_STAT_PARENT_SQ_NOT_OFFLOADED); - - FC_DRV_PRINT(UNF_LOG_NORMAL, UNF_ERR, - "[err]RPort(0x%x) Session(0x%x) is not offloaded, reject wqe(0x%x)", - sq->rport_index, sq->context_id, wqe_type); - - return UNF_RETURN_ERROR; - } - } - - /* Whether the SQ is in the flush state. Temporarily allow the control - * sessions to enqueue. - */ - if (unlikely(sq->port_in_flush && SPFC_WQE_IS_IO(io_sqe))) { - SPFC_ERR_IO_STAT((struct spfc_hba_info *)sq->hba, wqe_type); - SPFC_HBA_STAT((struct spfc_hba_info *)sq->hba, SPFC_STAT_PARENT_IO_FLUSHED); - - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Session(0x%x) in flush, Sqn(0x%x) cmsn(0x%x), reject wqe(0x%x)", - sq->context_id, ssqn, SPFC_GET_QUEUE_CMSN(ssq), - wqe_type); - - return UNF_RETURN_ERROR; - } - - /* If the SQ is in the Seesion deletion state and is the WQE of the I/O - * path, * the I/O failure is directly returned - */ - if (unlikely(sq->sq_in_sess_rst && SPFC_WQE_IS_IO(io_sqe))) { - SPFC_ERR_IO_STAT((struct spfc_hba_info *)sq->hba, wqe_type); - SPFC_HBA_STAT((struct spfc_hba_info *)sq->hba, SPFC_STAT_PARENT_IO_FLUSHED); - - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "[err]Session(0x%x) in session reset, reject wqe(0x%x)", - sq->context_id, wqe_type); - - return UNF_RETURN_ERROR; - } - - return spfc_parent_ssq_enqueue(ssq, io_sqe, wqe_type); -} - -static bool spfc_msn_in_wqe_page(u32 start_msn, u32 end_msn, u32 cur_cmsn) -{ - bool ret = true; - - if (end_msn >= start_msn) { - if (cur_cmsn < start_msn || cur_cmsn > end_msn) - ret = false; - else - ret = true; - } else { - if (cur_cmsn > end_msn && cur_cmsn < start_msn) - ret = false; - else - ret = true; - } - - return ret; -} - -void spfc_free_sq_wqe_page(struct spfc_parent_ssq_info *ssq, u32 cur_cmsn) -{ - u16 wpg_start_cmsn = 0; - u16 wpg_end_cmsn = 0; - bool wqe_page_in_use = false; - - /* If there is only zero or one Wqe Page, no release is required */ - if (atomic_read(&ssq->wqe_page_cnt) <= SPFC_MIN_WP_NUM) - return; - - /* Check whether the current MSN is within the MSN range covered by the - * WqePage - */ - wpg_start_cmsn = ssq->head_start_cmsn; - wpg_end_cmsn = ssq->head_end_cmsn; - wqe_page_in_use = spfc_msn_in_wqe_page(wpg_start_cmsn, wpg_end_cmsn, cur_cmsn); - - /* If the value of CMSN is within the current Wqe Page, no release is - * required - */ - if (wqe_page_in_use) - return; - - /* If the next WqePage is available and the CMSN is not in the current - * WqePage, * the current WqePage is released - */ - while (!wqe_page_in_use && - (atomic_read(&ssq->wqe_page_cnt) > SPFC_MIN_WP_NUM)) { - /* Free WqePage */ - spfc_free_head_wqe_page(ssq); - - /* Obtain the start MSN of the next WqePage */ - wpg_start_cmsn = SPFC_MSN_INC(wpg_end_cmsn); - - /* obtain the end MSN of the next WqePage */ - wpg_end_cmsn = - SPFC_GET_WP_END_CMSN(wpg_start_cmsn, ssq->wqe_num_per_buf); - - /* Set new MSN range */ - ssq->head_start_cmsn = wpg_start_cmsn; - ssq->head_end_cmsn = wpg_end_cmsn; - cur_cmsn = SPFC_GET_QUEUE_CMSN(ssq); - /* Check whether the current MSN is within the MSN range covered - * by the WqePage - */ - wqe_page_in_use = spfc_msn_in_wqe_page(wpg_start_cmsn, wpg_end_cmsn, cur_cmsn); - } -} - -/* - *Function Name : SPFC_UpdateSqCompletionStat - *Function Description: Update the calculation statistics of the CQE - *corresponding to the WQE on the connection SQ. - *Input Parameters : *sq, *scqe - *Output Parameters : N/A - *Return Type : void - */ -static void spfc_update_sq_wqe_completion_stat(struct spfc_parent_ssq_info *ssq, - union spfc_scqe *scqe) -{ - struct spfc_scqe_rcv_els_gs_rsp *els_gs_rsp = NULL; - - els_gs_rsp = (struct spfc_scqe_rcv_els_gs_rsp *)scqe; - - /* For the ELS/GS RSP intermediate frame and the CQE that is more than - * the ELS_GS_RSP_EXCH_CHECK_FAIL, no statistics are required - */ - if (unlikely(SPFC_GET_SCQE_TYPE(scqe) == SPFC_SCQE_ELS_RSP) || - (SPFC_GET_SCQE_TYPE(scqe) == SPFC_SCQE_GS_RSP)) { - if (!els_gs_rsp->wd3.end_rsp || !SPFC_SCQE_ERR_TO_CM(scqe)) - return; - } - - /* When the SQ statistics are updated, the PlogiAcc or PlogiAccSts that - * is * implicitly unloaded will enter here, and one more CQE count is - * added - */ - atomic_inc(&ssq->sq_cqe_cnt); - atomic_dec(&ssq->sqe_minus_cqe_cnt); - SPFC_SQ_IO_STAT(ssq, SPFC_GET_SCQE_TYPE(scqe)); -} - -/* - *Function Name : spfc_reclaim_sq_wqe_page - *Function Description: Reclaim the Wqe Pgae that has been used up in the Linked - * List SQ. - *Input Parameters : *handle, - * *scqe - *Output Parameters : N/A - *Return Type : u32 - */ -u32 spfc_reclaim_sq_wqe_page(void *handle, union spfc_scqe *scqe) -{ - u32 ret = RETURN_OK; - u32 cur_cmsn = 0; - u32 sqn = INVALID_VALUE32; - struct spfc_parent_ssq_info *ssq = NULL; - struct spfc_parent_shared_queue_info *parent_queue_info = NULL; - struct spfc_hba_info *hba = NULL; - ulong flag = 0; - - hba = (struct spfc_hba_info *)handle; - sqn = SPFC_GET_SCQE_SQN(scqe); - if (sqn >= SPFC_MAX_SSQ_NUM) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "[err]Port(0x%x) do not have sqn: 0x%x", - hba->port_cfg.port_id, sqn); - - return UNF_RETURN_ERROR; - } - - parent_queue_info = &hba->parent_queue_mgr->shared_queue[sqn]; - ssq = &parent_queue_info->parent_ssq_info; - /* If there is only zero or one Wqe Page, no release is required */ - if (atomic_read(&ssq->wqe_page_cnt) <= SPFC_MIN_WP_NUM) { - spfc_update_sq_wqe_completion_stat(ssq, scqe); - return RETURN_OK; - } - - spin_lock_irqsave(&ssq->parent_sq_enqueue_lock, flag); - cur_cmsn = SPFC_GET_QUEUE_CMSN(ssq); - spfc_free_sq_wqe_page(ssq, cur_cmsn); - spin_unlock_irqrestore(&ssq->parent_sq_enqueue_lock, flag); - - spfc_update_sq_wqe_completion_stat(ssq, scqe); - - return ret; -} - -u32 spfc_root_cmdq_enqueue(void *handle, union spfc_cmdqe *cmdqe, u16 cmd_len) -{ -#define SPFC_ROOTCMDQ_TIMEOUT_MS 3000 - u8 wqe_type = 0; - int cmq_ret = 0; - struct sphw_cmd_buf *cmd_buf = NULL; - struct spfc_hba_info *hba = NULL; - - hba = (struct spfc_hba_info *)handle; - wqe_type = (u8)cmdqe->common.wd0.task_type; - SPFC_IO_STAT(hba, wqe_type); - - cmd_buf = sphw_alloc_cmd_buf(hba->dev_handle); - if (!cmd_buf) { - SPFC_ERR_IO_STAT(hba, wqe_type); - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%x) CqmHandle(0x%p) allocate cmdq buffer failed", - hba->port_cfg.port_id, hba->dev_handle); - - return UNF_RETURN_ERROR; - } - - memcpy(cmd_buf->buf, cmdqe, cmd_len); - spfc_cpu_to_big32(cmd_buf->buf, cmd_len); - cmd_buf->size = cmd_len; - - cmq_ret = sphw_cmdq_async(hba->dev_handle, COMM_MOD_FC, 0, cmd_buf, SPHW_CHANNEL_FC); - - if (cmq_ret != RETURN_OK) { - sphw_free_cmd_buf(hba->dev_handle, cmd_buf); - SPFC_ERR_IO_STAT(hba, wqe_type); - - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%x) CqmHandle(0x%p) send buff clear cmnd failed(0x%x)", - hba->port_cfg.port_id, hba->dev_handle, cmq_ret); - return UNF_RETURN_ERROR; - } - - return RETURN_OK; -} - -struct spfc_parent_queue_info * -spfc_find_parent_queue_info_by_pkg(void *handle, struct unf_frame_pkg *pkg) -{ - u32 rport_index = 0; - struct spfc_parent_queue_info *parent_queue_info = NULL; - struct spfc_hba_info *hba = NULL; - - hba = (struct spfc_hba_info *)handle; - rport_index = pkg->private_data[PKG_PRIVATE_XCHG_RPORT_INDEX]; - - if (unlikely(rport_index >= UNF_SPFC_MAXRPORT_NUM)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "[warn]Port(0x%x) send pkg sid_did(0x%x_0x%x), but uplevel allocate invalid rport index: 0x%x", - hba->port_cfg.port_id, pkg->frame_head.csctl_sid, - pkg->frame_head.rctl_did, rport_index); - - return NULL; - } - - /* parent -->> session */ - parent_queue_info = &hba->parent_queue_mgr->parent_queue[rport_index]; - - return parent_queue_info; -} - -struct spfc_parent_queue_info *spfc_find_parent_queue_info_by_id(struct spfc_hba_info *hba, - u32 local_id, u32 remote_id) -{ - u32 index = 0; - ulong flag = 0; - struct spfc_parent_queue_mgr *parent_queue_mgr = NULL; - struct spfc_parent_queue_info *parent_queue_info = NULL; - spinlock_t *prtq_state_lock = NULL; - u32 lport_id; - u32 rport_id; - - parent_queue_mgr = hba->parent_queue_mgr; - if (!parent_queue_mgr) - return NULL; - - /* rport_number -->> parent_number -->> session_number */ - for (index = 0; index < UNF_SPFC_MAXRPORT_NUM; index++) { - prtq_state_lock = &parent_queue_mgr->parent_queue[index].parent_queue_state_lock; - lport_id = parent_queue_mgr->parent_queue[index].parent_sq_info.local_port_id; - rport_id = parent_queue_mgr->parent_queue[index].parent_sq_info.remote_port_id; - spin_lock_irqsave(prtq_state_lock, flag); - - /* local_id & remote_id & offload */ - if (local_id == lport_id && remote_id == rport_id && - parent_queue_mgr->parent_queue[index].offload_state == - SPFC_QUEUE_STATE_OFFLOADED) { - parent_queue_info = &parent_queue_mgr->parent_queue[index]; - spin_unlock_irqrestore(prtq_state_lock, flag); - - return parent_queue_info; - } - - spin_unlock_irqrestore(prtq_state_lock, flag); - } - - return NULL; -} - -struct spfc_parent_queue_info *spfc_find_offload_parent_queue(void *handle, u32 local_id, - u32 remote_id, u32 rport_index) -{ - u32 index = 0; - ulong flag = 0; - struct spfc_parent_queue_mgr *parent_queue_mgr = NULL; - struct spfc_parent_queue_info *parent_queue_info = NULL; - struct spfc_hba_info *hba = NULL; - spinlock_t *prtq_state_lock = NULL; - - hba = (struct spfc_hba_info *)handle; - parent_queue_mgr = hba->parent_queue_mgr; - if (!parent_queue_mgr) - return NULL; - - for (index = 0; index < UNF_SPFC_MAXRPORT_NUM; index++) { - if (rport_index == index) - continue; - prtq_state_lock = &parent_queue_mgr->parent_queue[index].parent_queue_state_lock; - spin_lock_irqsave(prtq_state_lock, flag); - - if (local_id == parent_queue_mgr->parent_queue[index] - .parent_sq_info.local_port_id && - remote_id == parent_queue_mgr->parent_queue[index] - .parent_sq_info.remote_port_id && - parent_queue_mgr->parent_queue[index].offload_state != - SPFC_QUEUE_STATE_FREE && - parent_queue_mgr->parent_queue[index].offload_state != - SPFC_QUEUE_STATE_INITIALIZED) { - parent_queue_info = &parent_queue_mgr->parent_queue[index]; - spin_unlock_irqrestore(prtq_state_lock, flag); - - return parent_queue_info; - } - - spin_unlock_irqrestore(prtq_state_lock, flag); - } - - return NULL; -} - -struct spfc_parent_sq_info *spfc_find_parent_sq_by_pkg(void *handle, struct unf_frame_pkg *pkg) -{ - struct spfc_parent_queue_info *parent_queue_info = NULL; - struct cqm_qpc_mpt *cqm_parent_ctxt_obj = NULL; - struct spfc_hba_info *hba = NULL; - - hba = (struct spfc_hba_info *)handle; - parent_queue_info = spfc_find_parent_queue_info_by_pkg(hba, pkg); - if (unlikely(!parent_queue_info)) { - parent_queue_info = spfc_find_parent_queue_info_by_id(hba, - pkg->frame_head.csctl_sid & - UNF_NPORTID_MASK, - pkg->frame_head.rctl_did & - UNF_NPORTID_MASK); - if (!parent_queue_info) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[err]Port(0x%x) send pkg sid_did(0x%x_0x%x), get a null parent queue information", - hba->port_cfg.port_id, pkg->frame_head.csctl_sid, - pkg->frame_head.rctl_did); - - return NULL; - } - } - - cqm_parent_ctxt_obj = (parent_queue_info->parent_ctx.cqm_parent_ctx_obj); - if (unlikely(!cqm_parent_ctxt_obj)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[err]Port(0x%x) send pkg sid_did(0x%x_0x%x) with this rport has not alloc parent sq information", - hba->port_cfg.port_id, pkg->frame_head.csctl_sid, - pkg->frame_head.rctl_did); - - return NULL; - } - - return &parent_queue_info->parent_sq_info; -} - -u32 spfc_check_all_parent_queue_free(struct spfc_hba_info *hba) -{ - u32 index = 0; - ulong flag = 0; - struct spfc_parent_queue_mgr *parent_queue_mgr = NULL; - spinlock_t *prtq_state_lock = NULL; - - parent_queue_mgr = hba->parent_queue_mgr; - if (!parent_queue_mgr) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[err]Port(0x%x) get a null parent queue mgr", - hba->port_cfg.port_id); - - return UNF_RETURN_ERROR; - } - - for (index = 0; index < UNF_SPFC_MAXRPORT_NUM; index++) { - prtq_state_lock = &parent_queue_mgr->parent_queue[index].parent_queue_state_lock; - spin_lock_irqsave(prtq_state_lock, flag); - - if (parent_queue_mgr->parent_queue[index].offload_state != SPFC_QUEUE_STATE_FREE) { - spin_unlock_irqrestore(prtq_state_lock, flag); - return UNF_RETURN_ERROR; - } - - spin_unlock_irqrestore(prtq_state_lock, flag); - } - - return RETURN_OK; -} - -void spfc_flush_specific_scq(struct spfc_hba_info *hba, u32 index) -{ - /* The software interrupt is scheduled and processed during the second - * timeout period - */ - struct spfc_scq_info *scq_info = NULL; - u32 flush_done_time = 0; - - scq_info = &hba->scq_info[index]; - atomic_set(&scq_info->flush_stat, SPFC_QUEUE_FLUSH_DOING); - tasklet_schedule(&scq_info->tasklet); - - /* Wait for a maximum of 2 seconds. If the SCQ soft interrupt is not - * scheduled * within 2 seconds, only timeout is returned - */ - while ((atomic_read(&scq_info->flush_stat) != SPFC_QUEUE_FLUSH_DONE) && - (flush_done_time < SPFC_QUEUE_FLUSH_WAIT_TIMEOUT_MS)) { - msleep(SPFC_QUEUE_FLUSH_WAIT_MS); - flush_done_time += SPFC_QUEUE_FLUSH_WAIT_MS; - tasklet_schedule(&scq_info->tasklet); - } - - if (atomic_read(&scq_info->flush_stat) != SPFC_QUEUE_FLUSH_DONE) { - FC_DRV_PRINT(UNF_LOG_NORMAL, UNF_WARN, - "[warn]Port(0x%x) special scq(0x%x) flush timeout", - hba->port_cfg.port_id, index); - } -} - -static void spfc_flush_cmd_scq(struct spfc_hba_info *hba) -{ - u32 index = 0; - - for (index = SPFC_CMD_SCQN_START; index < SPFC_SESSION_SCQ_NUM; - index += SPFC_SCQS_PER_SESSION) { - spfc_flush_specific_scq(hba, index); - } -} - -static void spfc_flush_sts_scq(struct spfc_hba_info *hba) -{ - u32 index = 0; - - /* for each STS SCQ */ - for (index = SPFC_STS_SCQN_START; index < SPFC_SESSION_SCQ_NUM; - index += SPFC_SCQS_PER_SESSION) { - spfc_flush_specific_scq(hba, index); - } -} - -static void spfc_flush_all_scq(struct spfc_hba_info *hba) -{ - spfc_flush_cmd_scq(hba); - spfc_flush_sts_scq(hba); - /* Flush Default SCQ */ - spfc_flush_specific_scq(hba, SPFC_SESSION_SCQ_NUM); -} - -void spfc_wait_all_queues_empty(struct spfc_hba_info *hba) -{ - spfc_flush_all_scq(hba); -} - -void spfc_set_rport_flush_state(void *handle, bool in_flush) -{ - u32 index = 0; - ulong flag = 0; - struct spfc_parent_queue_mgr *parent_queue_mgr = NULL; - struct spfc_hba_info *hba = NULL; - - hba = (struct spfc_hba_info *)handle; - parent_queue_mgr = hba->parent_queue_mgr; - if (!parent_queue_mgr) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Port(0x%x) parent queue manager is empty", - hba->port_cfg.port_id); - return; - } - - /* - * for each HBA's R_Port(SQ), - * set state with been flushing or flush done - */ - for (index = 0; index < UNF_SPFC_MAXRPORT_NUM; index++) { - spin_lock_irqsave(&parent_queue_mgr->parent_queue[index] - .parent_sq_info.parent_sq_enqueue_lock, flag); - if (parent_queue_mgr->parent_queue[index].offload_state != SPFC_QUEUE_STATE_FREE) { - parent_queue_mgr->parent_queue[index] - .parent_sq_info.port_in_flush = in_flush; - } - spin_unlock_irqrestore(&parent_queue_mgr->parent_queue[index] - .parent_sq_info.parent_sq_enqueue_lock, flag); - } -} - -u32 spfc_clear_fetched_sq_wqe(void *handle) -{ - u32 ret = UNF_RETURN_ERROR; - union spfc_cmdqe cmdqe; - struct spfc_hba_info *hba = NULL; - - FC_CHECK_RETURN_VALUE(handle, UNF_RETURN_ERROR); - - hba = (struct spfc_hba_info *)handle; - /* - * The ROOT SQ cannot control the WQE in the empty queue of the ROOT SQ. - * Therefore, the ROOT SQ does not enqueue the WQE after the hardware - * obtains the. Link down after the wait mode is used. Therefore, the - * WQE of the hardware driver needs to enter the WQE of the queue after - * the Link down of the Link down is reported. - */ - memset(&cmdqe, 0, sizeof(union spfc_cmdqe)); - spfc_build_cmdqe_common(&cmdqe, SPFC_TASK_T_BUFFER_CLEAR, 0); - cmdqe.buffer_clear.wd1.rx_id_start = hba->exi_base; - cmdqe.buffer_clear.wd1.rx_id_end = hba->exi_base + hba->exi_count - 1; - cmdqe.buffer_clear.scqn = hba->default_scqn; - - FC_DRV_PRINT(UNF_LOG_EVENT, UNF_MAJOR, - "[info]Port(0x%x) start clear all fetched wqe in start(0x%x) - end(0x%x) scqn(0x%x) stage(0x%x)", - hba->port_cfg.port_id, cmdqe.buffer_clear.wd1.rx_id_start, - cmdqe.buffer_clear.wd1.rx_id_end, cmdqe.buffer_clear.scqn, - hba->queue_set_stage); - - /* Send BUFFER_CLEAR command via ROOT CMDQ */ - ret = spfc_root_cmdq_enqueue(hba, &cmdqe, sizeof(cmdqe.buffer_clear)); - - return ret; -} - -u32 spfc_clear_pending_sq_wqe(void *handle) -{ - u32 ret = UNF_RETURN_ERROR; - u32 cmdqe_len = 0; - ulong flag = 0; - struct spfc_parent_ssq_info *ssq_info = NULL; - union spfc_cmdqe cmdqe; - struct spfc_hba_info *hba = NULL; - - hba = (struct spfc_hba_info *)handle; - memset(&cmdqe, 0, sizeof(union spfc_cmdqe)); - spfc_build_cmdqe_common(&cmdqe, SPFC_TASK_T_FLUSH_SQ, 0); - cmdqe.flush_sq.wd0.wqe_type = SPFC_TASK_T_FLUSH_SQ; - cmdqe.flush_sq.wd1.scqn = SPFC_LSW(hba->default_scqn); - cmdqe.flush_sq.wd1.port_id = hba->port_index; - - ssq_info = &hba->parent_queue_mgr->shared_queue[ARRAY_INDEX_0].parent_ssq_info; - - spin_lock_irqsave(&ssq_info->parent_sq_enqueue_lock, flag); - cmdqe.flush_sq.wd3.first_sq_xid = ssq_info->context_id; - spin_unlock_irqrestore(&ssq_info->parent_sq_enqueue_lock, flag); - cmdqe.flush_sq.wd0.entry_count = SPFC_MAX_SSQ_NUM; - cmdqe.flush_sq.wd3.sqqid_start_per_session = SPFC_SQ_QID_START_PER_QPC; - cmdqe.flush_sq.wd3.sqcnt_per_session = SPFC_SQ_NUM_PER_QPC; - cmdqe.flush_sq.wd1.last_wqe = 1; - - /* Clear pending Queue */ - cmdqe_len = (u32)(sizeof(cmdqe.flush_sq)); - ret = spfc_root_cmdq_enqueue(hba, &cmdqe, (u16)cmdqe_len); - - FC_DRV_PRINT(UNF_LOG_EVENT, UNF_MAJOR, - "[info]Port(0x%x) clear total 0x%x SQ in this CMDQE(last=%u), stage (0x%x)", - hba->port_cfg.port_id, SPFC_MAX_SSQ_NUM, - cmdqe.flush_sq.wd1.last_wqe, hba->queue_set_stage); - - return ret; -} - -u32 spfc_wait_queue_set_flush_done(struct spfc_hba_info *hba) -{ - u32 flush_done_time = 0; - u32 ret = RETURN_OK; - - while ((hba->queue_set_stage != SPFC_QUEUE_SET_STAGE_FLUSHDONE) && - (flush_done_time < SPFC_QUEUE_FLUSH_WAIT_TIMEOUT_MS)) { - msleep(SPFC_QUEUE_FLUSH_WAIT_MS); - flush_done_time += SPFC_QUEUE_FLUSH_WAIT_MS; - } - - if (hba->queue_set_stage != SPFC_QUEUE_SET_STAGE_FLUSHDONE) { - FC_DRV_PRINT(UNF_LOG_NORMAL, UNF_WARN, - "[warn]Port(0x%x) queue sets flush timeout with stage(0x%x)", - hba->port_cfg.port_id, hba->queue_set_stage); - - ret = UNF_RETURN_ERROR; - } - - return ret; -} - -void spfc_disable_all_scq_schedule(struct spfc_hba_info *hba) -{ - struct spfc_scq_info *scq_info = NULL; - u32 index = 0; - - for (index = 0; index < SPFC_TOTAL_SCQ_NUM; index++) { - scq_info = &hba->scq_info[index]; - tasklet_disable(&scq_info->tasklet); - } -} - -void spfc_disable_queues_dispatch(struct spfc_hba_info *hba) -{ - spfc_disable_all_scq_schedule(hba); -} - -void spfc_enable_all_scq_schedule(struct spfc_hba_info *hba) -{ - struct spfc_scq_info *scq_info = NULL; - u32 index = 0; - - for (index = 0; index < SPFC_TOTAL_SCQ_NUM; index++) { - scq_info = &hba->scq_info[index]; - tasklet_enable(&scq_info->tasklet); - } -} - -void spfc_enalbe_queues_dispatch(void *handle) -{ - spfc_enable_all_scq_schedule((struct spfc_hba_info *)handle); -} - -/* - *Function Name : spfc_clear_els_srq - *Function Description: When the port is used as the remove, the resources - *related to the els srq are deleted. - *Input Parameters : *hba Output Parameters - *Return Type : void - */ -void spfc_clear_els_srq(struct spfc_hba_info *hba) -{ -#define SPFC_WAIT_CLR_SRQ_CTX_MS 500 -#define SPFC_WAIT_CLR_SRQ_CTX_LOOP_TIMES 60 - - u32 index = 0; - ulong flag = 0; - struct spfc_srq_info *srq_info = NULL; - - srq_info = &hba->els_srq_info; - - spin_lock_irqsave(&srq_info->srq_spin_lock, flag); - if (!srq_info->enable || srq_info->state == SPFC_CLEAN_DOING) { - spin_unlock_irqrestore(&srq_info->srq_spin_lock, flag); - - return; - } - srq_info->enable = false; - srq_info->state = SPFC_CLEAN_DOING; - spin_unlock_irqrestore(&srq_info->srq_spin_lock, flag); - - spfc_send_clear_srq_cmd(hba, &hba->els_srq_info); - - /* wait for uCode to clear SRQ context, the timer is 30S */ - while ((srq_info->state != SPFC_CLEAN_DONE) && - (index < SPFC_WAIT_CLR_SRQ_CTX_LOOP_TIMES)) { - msleep(SPFC_WAIT_CLR_SRQ_CTX_MS); - index++; - } - - if (srq_info->state != SPFC_CLEAN_DONE) { - FC_DRV_PRINT(UNF_LOG_NORMAL, UNF_WARN, - "[warn]SPFC Port(0x%x) clear els srq timeout", - hba->port_cfg.port_id); - } -} - -u32 spfc_wait_all_parent_queue_free(struct spfc_hba_info *hba) -{ -#define SPFC_MAX_LOOP_TIMES 6000 -#define SPFC_WAIT_ONE_TIME_MS 5 - u32 index = 0; - u32 ret = UNF_RETURN_ERROR; - - do { - ret = spfc_check_all_parent_queue_free(hba); - if (ret == RETURN_OK) - break; - - index++; - msleep(SPFC_WAIT_ONE_TIME_MS); - } while (index < SPFC_MAX_LOOP_TIMES); - - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_NORMAL, UNF_ERR, - "[warn]Port(0x%x) wait all parent queue state free timeout", - hba->port_cfg.port_id); - } - - return ret; -} - -/* - *Function Name : spfc_queue_pre_process - *Function Description: When the port functions as the remove, the queue needs - * to be preprocessed. - *Input Parameters : *handle, - * clean - *Output Parameters : N/A - *Return Type : void - */ -void spfc_queue_pre_process(void *handle, bool clean) -{ -#define SPFC_WAIT_LINKDOWN_EVENT_MS 500 - struct spfc_hba_info *hba = NULL; - - hba = (struct spfc_hba_info *)handle; - /* From port reset & port remove */ - /* 1. Wait for 2s and wait for QUEUE to be FLUSH Done. */ - if (spfc_wait_queue_set_flush_done(hba) != RETURN_OK) { - /* - * During the process of removing the card, if the port is - * disabled and the flush done is not available, the chip is - * powered off or the pcie link is disconnected. In this case, - * you can proceed with the next step. - */ - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[warn]SPFC Port(0x%x) clean queue sets timeout", - hba->port_cfg.port_id); - } - - /* - * 2. Port remove: - * 2.1 free parent queue - * 2.2 clear & destroy ELS/SIRT SRQ - */ - if (clean) { - if (spfc_wait_all_parent_queue_free(hba) != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, - UNF_WARN, - "[warn]SPFC Port(0x%x) free all parent queue timeout", - hba->port_cfg.port_id); - } - - /* clear & than destroy ELS/SIRT SRQ */ - spfc_clear_els_srq(hba); - } - - msleep(SPFC_WAIT_LINKDOWN_EVENT_MS); - - /* - * 3. The internal resources of the port chip are flush done. However, - * there may be residual scqe or rq in the queue. The scheduling is - * forcibly refreshed once. - */ - spfc_wait_all_queues_empty(hba); - - /* 4. Disable tasklet scheduling for upstream queues on the software - * layer - */ - spfc_disable_queues_dispatch(hba); -} - -void spfc_queue_post_process(void *hba) -{ - spfc_enalbe_queues_dispatch((struct spfc_hba_info *)hba); -} - -/* - *Function Name : spfc_push_delay_sqe - *Function Description: Check whether there is a sq that is being deleted. - * If yes, add the sq to the sq. - *Input Parameters : *hba, - * *offload_parent_queue, - * *sqe, - * *pkg - *Output Parameters : N/A - *Return Type : u32 - */ -u32 spfc_push_delay_sqe(void *hba, - struct spfc_parent_queue_info *offload_parent_queue, - struct spfc_sqe *sqe, struct unf_frame_pkg *pkg) -{ - ulong flag = 0; - spinlock_t *prtq_state_lock = NULL; - - prtq_state_lock = &offload_parent_queue->parent_queue_state_lock; - spin_lock_irqsave(prtq_state_lock, flag); - - if (offload_parent_queue->offload_state != SPFC_QUEUE_STATE_INITIALIZED && - offload_parent_queue->offload_state != SPFC_QUEUE_STATE_FREE) { - memcpy(&offload_parent_queue->parent_sq_info.delay_sqe.sqe, - sqe, sizeof(struct spfc_sqe)); - offload_parent_queue->parent_sq_info.delay_sqe.start_jiff = jiffies; - offload_parent_queue->parent_sq_info.delay_sqe.time_out = - pkg->private_data[PKG_PRIVATE_XCHG_TIMEER]; - offload_parent_queue->parent_sq_info.delay_sqe.valid = true; - offload_parent_queue->parent_sq_info.delay_sqe.rport_index = - pkg->private_data[PKG_PRIVATE_XCHG_RPORT_INDEX]; - offload_parent_queue->parent_sq_info.delay_sqe.sid = - pkg->frame_head.csctl_sid & UNF_NPORTID_MASK; - offload_parent_queue->parent_sq_info.delay_sqe.did = - pkg->frame_head.rctl_did & UNF_NPORTID_MASK; - offload_parent_queue->parent_sq_info.delay_sqe.xid = - sqe->ts_sl.xid; - offload_parent_queue->parent_sq_info.delay_sqe.ssqn = - (u16)pkg->private_data[PKG_PRIVATE_XCHG_SSQ_INDEX]; - - spin_unlock_irqrestore(prtq_state_lock, flag); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[info]Port(0x%x) RPort(0x%x) delay send ELS, OXID(0x%x), RXID(0x%x)", - ((struct spfc_hba_info *)hba)->port_cfg.port_id, - pkg->private_data[PKG_PRIVATE_XCHG_RPORT_INDEX], - UNF_GET_OXID(pkg), UNF_GET_RXID(pkg)); - - return RETURN_OK; - } - - spin_unlock_irqrestore(prtq_state_lock, flag); - - return UNF_RETURN_ERROR; -} - -static u32 spfc_pop_session_valid_check(struct spfc_hba_info *hba, - struct spfc_delay_sqe_ctrl_info *sqe_info, u32 rport_index) -{ - if (!sqe_info->valid) - return UNF_RETURN_ERROR; - - if (jiffies_to_msecs(jiffies - sqe_info->start_jiff) >= sqe_info->time_out) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) pop delay enable session failed, start time 0x%llx, timeout value 0x%x", - hba->port_cfg.port_id, sqe_info->start_jiff, - sqe_info->time_out); - - return UNF_RETURN_ERROR; - } - - if (rport_index >= UNF_SPFC_MAXRPORT_NUM) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) pop delay enable session failed, rport index(0x%x) is invalid", - hba->port_cfg.port_id, rport_index); - - return UNF_RETURN_ERROR; - } - - return RETURN_OK; -} - -/* - *Function Name : spfc_pop_delay_sqe - *Function Description: The sqe that is delayed due to the deletion of the old - * connection is sent to the root sq for - *processing. Input Parameters : *hba, *sqe_info Output Parameters : N/A - *Return Type : void - */ -static void spfc_pop_delay_sqe(struct spfc_hba_info *hba, - struct spfc_delay_sqe_ctrl_info *sqe_info) -{ - ulong flag; - u32 delay_rport_index = INVALID_VALUE32; - struct spfc_parent_queue_info *parent_queue = NULL; - enum spfc_parent_queue_state offload_state = - SPFC_QUEUE_STATE_DESTROYING; - struct spfc_delay_destroy_ctrl_info destroy_sqe_info; - u32 ret = UNF_RETURN_ERROR; - struct spfc_parent_sq_info *sq_info = NULL; - spinlock_t *prtq_state_lock = NULL; - - memset(&destroy_sqe_info, 0, sizeof(struct spfc_delay_destroy_ctrl_info)); - delay_rport_index = sqe_info->rport_index; - - /* According to the sequence, the rport index id is reported and then - * the sqe of the new link setup request is delivered. - */ - ret = spfc_pop_session_valid_check(hba, sqe_info, delay_rport_index); - - if (ret != RETURN_OK) - return; - - parent_queue = &hba->parent_queue_mgr->parent_queue[delay_rport_index]; - sq_info = &parent_queue->parent_sq_info; - prtq_state_lock = &parent_queue->parent_queue_state_lock; - /* Before the root sq is delivered, check the status again to - * ensure that the initialization status is not uninstalled. Other - * states are not processed and are discarded directly. - */ - spin_lock_irqsave(prtq_state_lock, flag); - offload_state = parent_queue->offload_state; - - /* Before re-enqueuing the rootsq, check whether the offload status and - * connection information is consistent to prevent the old request from - * being sent after the connection status is changed. - */ - if (offload_state == SPFC_QUEUE_STATE_INITIALIZED && - parent_queue->parent_sq_info.local_port_id == sqe_info->sid && - parent_queue->parent_sq_info.remote_port_id == sqe_info->did && - SPFC_CHECK_XID_MATCHED(parent_queue->parent_sq_info.context_id, - sqe_info->sqe.ts_sl.xid)) { - parent_queue->offload_state = SPFC_QUEUE_STATE_OFFLOADING; - spin_unlock_irqrestore(prtq_state_lock, flag); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x) pop up delay session enable, sqe start time 0x%llx, timeout value 0x%x, rport index 0x%x, offload state 0x%x", - hba->port_cfg.port_id, sqe_info->start_jiff, - sqe_info->time_out, delay_rport_index, offload_state); - - if (spfc_parent_sq_enqueue(sq_info, &sqe_info->sqe, sqe_info->ssqn) != RETURN_OK) { - spin_lock_irqsave(prtq_state_lock, flag); - - if (parent_queue->offload_state == SPFC_QUEUE_STATE_OFFLOADING) - parent_queue->offload_state = offload_state; - - if (parent_queue->parent_sq_info.destroy_sqe.valid) { - memcpy(&destroy_sqe_info, - &parent_queue->parent_sq_info.destroy_sqe, - sizeof(struct spfc_delay_destroy_ctrl_info)); - - parent_queue->parent_sq_info.destroy_sqe.valid = false; - } - - spin_unlock_irqrestore(prtq_state_lock, flag); - - spfc_pop_destroy_parent_queue_sqe((void *)hba, &destroy_sqe_info); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) pop up delay session enable fail, recover offload state 0x%x", - hba->port_cfg.port_id, parent_queue->offload_state); - return; - } - } else { - spin_unlock_irqrestore(prtq_state_lock, flag); - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port 0x%x pop delay session enable failed, sqe start time 0x%llx, timeout value 0x%x, rport index 0x%x, offload state 0x%x", - hba->port_cfg.port_id, sqe_info->start_jiff, - sqe_info->time_out, delay_rport_index, - offload_state); - } -} - -void spfc_push_destroy_parent_queue_sqe(void *hba, - struct spfc_parent_queue_info *offloading_parent_queue, - struct unf_port_info *rport_info) -{ - offloading_parent_queue->parent_sq_info.destroy_sqe.valid = true; - offloading_parent_queue->parent_sq_info.destroy_sqe.rport_index = rport_info->rport_index; - offloading_parent_queue->parent_sq_info.destroy_sqe.time_out = - SPFC_SQ_DEL_STAGE_TIMEOUT_MS; - offloading_parent_queue->parent_sq_info.destroy_sqe.start_jiff = jiffies; - offloading_parent_queue->parent_sq_info.destroy_sqe.rport_info.nport_id = - rport_info->nport_id; - offloading_parent_queue->parent_sq_info.destroy_sqe.rport_info.rport_index = - rport_info->rport_index; - offloading_parent_queue->parent_sq_info.destroy_sqe.rport_info.port_name = - rport_info->port_name; -} - -/* - *Function Name : spfc_pop_destroy_parent_queue_sqe - *Function Description: The deletion connection sqe that is delayed due to - * connection uninstallation is sent to - *the parent sq for processing. Input Parameters : *handle, *destroy_sqe_info - *Output Parameters : N/A - *Return Type : void - */ -void spfc_pop_destroy_parent_queue_sqe(void *handle, - struct spfc_delay_destroy_ctrl_info *destroy_sqe_info) -{ - u32 ret = UNF_RETURN_ERROR; - ulong flag; - u32 index = INVALID_VALUE32; - struct spfc_parent_queue_info *parent_queue = NULL; - enum spfc_parent_queue_state offload_state = - SPFC_QUEUE_STATE_DESTROYING; - struct spfc_hba_info *hba = NULL; - spinlock_t *prtq_state_lock = NULL; - - hba = (struct spfc_hba_info *)handle; - if (!destroy_sqe_info->valid) - return; - - if (jiffies_to_msecs(jiffies - destroy_sqe_info->start_jiff) < destroy_sqe_info->time_out) { - index = destroy_sqe_info->rport_index; - parent_queue = &hba->parent_queue_mgr->parent_queue[index]; - prtq_state_lock = &parent_queue->parent_queue_state_lock; - /* Before delivery, check the status again to ensure that the - * initialization status is not uninstalled. Other states are - * not processed and are discarded directly. - */ - spin_lock_irqsave(prtq_state_lock, flag); - - offload_state = parent_queue->offload_state; - if (offload_state == SPFC_QUEUE_STATE_OFFLOADED || - offload_state == SPFC_QUEUE_STATE_INITIALIZED) { - spin_unlock_irqrestore(prtq_state_lock, flag); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port 0x%x pop up delay destroy parent sq, sqe start time 0x%llx, timeout value 0x%x, rport index 0x%x, offload state 0x%x", - hba->port_cfg.port_id, - destroy_sqe_info->start_jiff, - destroy_sqe_info->time_out, - index, offload_state); - ret = spfc_free_parent_resource(hba, &destroy_sqe_info->rport_info); - } else { - ret = UNF_RETURN_ERROR; - spin_unlock_irqrestore(prtq_state_lock, flag); - } - } - - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port 0x%x pop delay destroy parent sq failed, sqe start time 0x%llx, timeout value 0x%x, rport index 0x%x, rport nport id 0x%x,offload state 0x%x", - hba->port_cfg.port_id, destroy_sqe_info->start_jiff, - destroy_sqe_info->time_out, index, - destroy_sqe_info->rport_info.nport_id, offload_state); - } -} - -void spfc_free_parent_queue_info(void *handle, struct spfc_parent_queue_info *parent_queue_info) -{ - ulong flag = 0; - u32 ret = UNF_RETURN_ERROR; - u32 rport_index = INVALID_VALUE32; - struct spfc_hba_info *hba = NULL; - struct spfc_delay_sqe_ctrl_info sqe_info; - spinlock_t *prtq_state_lock = NULL; - - memset(&sqe_info, 0, sizeof(struct spfc_delay_sqe_ctrl_info)); - hba = (struct spfc_hba_info *)handle; - prtq_state_lock = &parent_queue_info->parent_queue_state_lock; - spin_lock_irqsave(prtq_state_lock, flag); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "[info]Port(0x%x) begin to free parent sq, rport_index(0x%x)", - hba->port_cfg.port_id, parent_queue_info->parent_sq_info.rport_index); - - if (parent_queue_info->offload_state == SPFC_QUEUE_STATE_FREE) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[info]Port(0x%x) duplicate free parent sq, rport_index(0x%x)", - hba->port_cfg.port_id, - parent_queue_info->parent_sq_info.rport_index); - - spin_unlock_irqrestore(prtq_state_lock, flag); - return; - } - - if (parent_queue_info->parent_sq_info.delay_sqe.valid) { - memcpy(&sqe_info, &parent_queue_info->parent_sq_info.delay_sqe, - sizeof(struct spfc_delay_sqe_ctrl_info)); - } - - rport_index = parent_queue_info->parent_sq_info.rport_index; - - /* The Parent Contexe and SQ information is released. After - * initialization, the Parent Contexe and SQ information is associated - * with the sq in the queue of the parent - */ - - spin_unlock_irqrestore(prtq_state_lock, flag); - spfc_free_parent_sq(hba, parent_queue_info); - spin_lock_irqsave(prtq_state_lock, flag); - - /* The initialization of all queue id is invalid */ - parent_queue_info->parent_cmd_scq_info.cqm_queue_id = INVALID_VALUE32; - parent_queue_info->parent_sts_scq_info.cqm_queue_id = INVALID_VALUE32; - parent_queue_info->parent_els_srq_info.cqm_queue_id = INVALID_VALUE32; - parent_queue_info->offload_state = SPFC_QUEUE_STATE_FREE; - - spin_unlock_irqrestore(prtq_state_lock, flag); - - UNF_LOWLEVEL_PORT_EVENT(ret, hba->lport, UNF_PORT_RELEASE_RPORT_INDEX, - (void *)&rport_index); - - spfc_pop_delay_sqe(hba, &sqe_info); - - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[warn]Port(0x%x) free parent sq with rport_index(0x%x) failed", - hba->port_cfg.port_id, rport_index); - } -} - -static void spfc_do_port_reset(struct work_struct *work) -{ - struct spfc_suspend_sqe_info *suspend_sqe = NULL; - struct spfc_hba_info *hba = NULL; - - FC_CHECK_RETURN_VOID(work); - - suspend_sqe = container_of(work, struct spfc_suspend_sqe_info, - timeout_work.work); - hba = (struct spfc_hba_info *)suspend_sqe->hba; - FC_CHECK_RETURN_VOID(hba); - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x) magic num (0x%x)do port reset.", - hba->port_cfg.port_id, suspend_sqe->magic_num); - - spfc_port_reset(hba); -} - -static void -spfc_push_sqe_suspend(void *hba, struct spfc_parent_queue_info *parent_queue, - struct spfc_sqe *sqe, struct unf_frame_pkg *pkg, u32 magic_num) -{ -#define SPFC_SQ_NOP_TIMEOUT_MS 1000 - ulong flag = 0; - u32 sqn_base; - struct spfc_parent_sq_info *sq = NULL; - struct spfc_suspend_sqe_info *suspend_sqe = NULL; - - sq = &parent_queue->parent_sq_info; - suspend_sqe = - kmalloc(sizeof(struct spfc_suspend_sqe_info), GFP_ATOMIC); - if (!suspend_sqe) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[err]alloc suspend sqe memory failed"); - return; - } - memset(suspend_sqe, 0, sizeof(struct spfc_suspend_sqe_info)); - memcpy(&suspend_sqe->sqe, sqe, sizeof(struct spfc_sqe)); - suspend_sqe->magic_num = magic_num; - suspend_sqe->old_offload_sts = sq->need_offloaded; - suspend_sqe->hba = sq->hba; - - if (pkg) { - memcpy(&suspend_sqe->pkg, pkg, sizeof(struct unf_frame_pkg)); - } else { - sqn_base = sq->sqn_base; - suspend_sqe->pkg.private_data[PKG_PRIVATE_XCHG_SSQ_INDEX] = - sqn_base; - } - - INIT_DELAYED_WORK(&suspend_sqe->timeout_work, spfc_do_port_reset); - INIT_LIST_HEAD(&suspend_sqe->list_sqe_entry); - - spin_lock_irqsave(&parent_queue->parent_queue_state_lock, flag); - list_add_tail(&suspend_sqe->list_sqe_entry, &sq->suspend_sqe_list); - spin_unlock_irqrestore(&parent_queue->parent_queue_state_lock, flag); - - (void)queue_delayed_work(((struct spfc_hba_info *)hba)->work_queue, - &suspend_sqe->timeout_work, - (ulong)msecs_to_jiffies((u32)SPFC_SQ_NOP_TIMEOUT_MS)); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[info]Port(0x%x) magic num(0x%x)suspend sqe", - ((struct spfc_hba_info *)hba)->port_cfg.port_id, magic_num); -} - -u32 spfc_pop_suspend_sqe(void *handle, struct spfc_parent_queue_info *parent_queue, - struct spfc_suspend_sqe_info *suspen_sqe) -{ - ulong flag; - u32 ret = UNF_RETURN_ERROR; - struct spfc_parent_sq_info *sq = NULL; - u16 ssqn; - struct unf_frame_pkg *pkg = NULL; - struct spfc_hba_info *hba = (struct spfc_hba_info *)handle; - u8 task_type; - spinlock_t *prtq_state_lock = NULL; - - sq = &parent_queue->parent_sq_info; - task_type = suspen_sqe->sqe.ts_sl.task_type; - pkg = &suspen_sqe->pkg; - if (!pkg) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, - UNF_MAJOR, "[error]pkt is null."); - return UNF_RETURN_ERROR; - } - - ssqn = (u16)pkg->private_data[PKG_PRIVATE_XCHG_SSQ_INDEX]; - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x) pop up suspend wqe sqn (0x%x) TaskType(0x%x)", - hba->port_cfg.port_id, ssqn, task_type); - - prtq_state_lock = &parent_queue->parent_queue_state_lock; - spin_lock_irqsave(prtq_state_lock, flag); - if (SPFC_RPORT_NOT_OFFLOADED(parent_queue) && - (task_type == SPFC_SQE_ELS_RSP || - task_type == SPFC_TASK_T_ELS)) { - spin_unlock_irqrestore(prtq_state_lock, flag); - /* Send PLOGI or PLOGI ACC or SCR if session not offload */ - ret = spfc_send_els_via_default_session(hba, &suspen_sqe->sqe, pkg, parent_queue); - } else { - spin_unlock_irqrestore(prtq_state_lock, flag); - ret = spfc_parent_sq_enqueue(sq, &suspen_sqe->sqe, ssqn); - } - return ret; -} - -static void spfc_build_nop_sqe(struct spfc_hba_info *hba, struct spfc_parent_sq_info *sq, - struct spfc_sqe *sqe, u32 magic_num, u32 scqn) -{ - sqe->ts_sl.task_type = SPFC_SQE_NOP; - sqe->ts_sl.wd0.conn_id = (u16)(sq->rport_index); - sqe->ts_sl.cont.nop_sq.wd0.scqn = scqn; - sqe->ts_sl.cont.nop_sq.magic_num = magic_num; - spfc_build_common_wqe_ctrls(&sqe->ctrl_sl, - sizeof(struct spfc_sqe_ts) / SPFC_WQE_SECTION_CHUNK_SIZE); -} - -u32 spfc_send_nop_cmd(void *handle, struct spfc_parent_sq_info *parent_sq_info, - u32 magic_num, u16 sqn) -{ - struct spfc_sqe empty_sq_sqe; - struct spfc_hba_info *hba = (struct spfc_hba_info *)handle; - u32 ret; - - memset(&empty_sq_sqe, 0, sizeof(struct spfc_sqe)); - - spfc_build_nop_sqe(hba, parent_sq_info, &empty_sq_sqe, magic_num, hba->default_scqn); - ret = spfc_parent_sq_enqueue(parent_sq_info, &empty_sq_sqe, sqn); - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[info]send nop cmd scqn(0x%x) sq(0x%x).", - hba->default_scqn, sqn); - return ret; -} - -u32 spfc_suspend_sqe_and_send_nop(void *handle, - struct spfc_parent_queue_info *parent_queue, - struct spfc_sqe *sqe, struct unf_frame_pkg *pkg) -{ - u32 ret = UNF_RETURN_ERROR; - u32 magic_num; - struct spfc_hba_info *hba = (struct spfc_hba_info *)handle; - struct spfc_parent_sq_info *parent_sq = &parent_queue->parent_sq_info; - struct unf_lport *lport = (struct unf_lport *)hba->lport; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - - if (pkg) { - magic_num = pkg->private_data[PKG_PRIVATE_XCHG_ALLOC_TIME]; - } else { - magic_num = (u32)atomic64_inc_return(&((struct unf_lport *) - lport->root_lport)->exchg_index); - } - - spfc_push_sqe_suspend(hba, parent_queue, sqe, pkg, magic_num); - if (SPFC_RPORT_NOT_OFFLOADED(parent_queue)) - parent_sq->need_offloaded = SPFC_NEED_DO_OFFLOAD; - - ret = spfc_send_nop_cmd(hba, parent_sq, magic_num, - (u16)parent_sq->sqn_base); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[err]Port(0x%x) rport_index(0x%x)send sq empty failed.", - hba->port_cfg.port_id, parent_sq->rport_index); - } - return ret; -} - -void spfc_build_session_rst_wqe(void *handle, struct spfc_parent_sq_info *sq, - struct spfc_sqe *sqe, enum spfc_session_reset_mode mode, u32 scqn) -{ - struct spfc_hba_info *hba = NULL; - - hba = (struct spfc_hba_info *)handle; - /* The reset session command does not occupy xid. Therefore, - * 0xffff can be used to align with the microcode. - */ - sqe->ts_sl.task_type = SPFC_SQE_SESS_RST; - sqe->ts_sl.local_xid = 0xffff; - sqe->ts_sl.wd0.conn_id = (u16)(sq->rport_index); - sqe->ts_sl.wd0.remote_xid = 0xffff; - sqe->ts_sl.cont.reset_session.wd0.reset_exch_start = hba->exi_base; - sqe->ts_sl.cont.reset_session.wd0.reset_exch_end = hba->exi_base + (hba->exi_count - 1); - sqe->ts_sl.cont.reset_session.wd1.reset_did = sq->remote_port_id; - sqe->ts_sl.cont.reset_session.wd1.mode = mode; - sqe->ts_sl.cont.reset_session.wd2.reset_sid = sq->local_port_id; - sqe->ts_sl.cont.reset_session.wd3.scqn = scqn; - - spfc_build_common_wqe_ctrls(&sqe->ctrl_sl, - sizeof(struct spfc_sqe_ts) / SPFC_WQE_SECTION_CHUNK_SIZE); -} - -u32 spfc_send_session_rst_cmd(void *handle, - struct spfc_parent_queue_info *parent_queue_info, - enum spfc_session_reset_mode mode) -{ - struct spfc_parent_sq_info *sq = NULL; - struct spfc_sqe rst_sess_sqe; - u32 ret = UNF_RETURN_ERROR; - u32 sts_scqn = 0; - struct spfc_hba_info *hba = NULL; - - hba = (struct spfc_hba_info *)handle; - memset(&rst_sess_sqe, 0, sizeof(struct spfc_sqe)); - sq = &parent_queue_info->parent_sq_info; - sts_scqn = hba->default_scqn; - - spfc_build_session_rst_wqe(hba, sq, &rst_sess_sqe, mode, sts_scqn); - ret = spfc_suspend_sqe_and_send_nop(hba, parent_queue_info, &rst_sess_sqe, NULL); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[info]RPort(0x%x) send SESS_RST(%d) start_exch_id(0x%x) end_exch_id(0x%x), scqn(0x%x) ctx_id(0x%x) cid(0x%x)", - sq->rport_index, mode, - rst_sess_sqe.ts_sl.cont.reset_session.wd0.reset_exch_start, - rst_sess_sqe.ts_sl.cont.reset_session.wd0.reset_exch_end, - rst_sess_sqe.ts_sl.cont.reset_session.wd3.scqn, - sq->context_id, sq->cache_id); - return ret; -} - -void spfc_rcvd_els_from_srq_timeout(struct work_struct *work) -{ - struct spfc_hba_info *hba = NULL; - - hba = container_of(work, struct spfc_hba_info, srq_delay_info.del_work.work); - - /* If the frame is not processed, the frame is pushed to the CM layer: - * The frame may have been processed when the root rq receives data. - */ - if (hba->srq_delay_info.srq_delay_flag) { - spfc_recv_els_cmnd(hba, &hba->srq_delay_info.frame_pkg, - hba->srq_delay_info.frame_pkg.unf_cmnd_pload_bl.buffer_ptr, - 0, false); - hba->srq_delay_info.srq_delay_flag = 0; - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[info]Port(0x%x) srq delay work timeout, send saved plgoi to CM", - hba->port_cfg.port_id); - } -} - -u32 spfc_flush_ini_resp_queue(void *handle) -{ - struct spfc_hba_info *hba = NULL; - - FC_CHECK_RETURN_VALUE(handle, UNF_RETURN_ERROR); - hba = (struct spfc_hba_info *)handle; - - spfc_flush_sts_scq(hba); - - return RETURN_OK; -} - -static void spfc_handle_aeq_queue_error(struct spfc_hba_info *hba, - struct spfc_aqe_data *aeq_msg) -{ - u32 sts_scqn_local = 0; - u32 full_ci = INVALID_VALUE32; - u32 full_ci_owner = INVALID_VALUE32; - struct spfc_scq_info *scq_info = NULL; - - sts_scqn_local = SPFC_RPORTID_TO_STS_SCQN(aeq_msg->wd0.conn_id); - scq_info = &hba->scq_info[sts_scqn_local]; - full_ci = scq_info->ci; - full_ci_owner = scq_info->ci_owner; - - /* Currently, Flush is forcibly set to StsScq. No matter whether scq is - * processed, AEQE is returned - */ - tasklet_schedule(&scq_info->tasklet); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[info]Port(0x%x) RPort(0x%x) LocalScqn(0x%x) CqmScqn(0x%x) is full, force flush CI from (%u|0x%x) to (%u|0x%x)", - hba->port_cfg.port_id, aeq_msg->wd0.conn_id, - sts_scqn_local, scq_info->scqn, full_ci_owner, full_ci, - scq_info->ci_owner, scq_info->ci); -} - -void spfc_process_aeqe(void *handle, u8 event_type, u8 *val) -{ - u32 ret = RETURN_OK; - struct spfc_hba_info *hba = (struct spfc_hba_info *)handle; - struct spfc_aqe_data aeq_msg; - u8 event_code = INVALID_VALUE8; - u64 event_val = *((u64 *)val); - - FC_CHECK_RETURN_VOID(hba); - - memcpy(&aeq_msg, (struct spfc_aqe_data *)&event_val, sizeof(struct spfc_aqe_data)); - event_code = (u8)aeq_msg.wd0.evt_code; - - switch (event_type) { - case FC_AEQ_EVENT_QUEUE_ERROR: - spfc_handle_aeq_queue_error(hba, &aeq_msg); - break; - - case FC_AEQ_EVENT_WQE_FATAL_ERROR: - UNF_LOWLEVEL_PORT_EVENT(ret, hba->lport, - UNF_PORT_ABNORMAL_RESET, NULL); - break; - - case FC_AEQ_EVENT_CTX_FATAL_ERROR: - break; - - case FC_AEQ_EVENT_OFFLOAD_ERROR: - ret = spfc_handle_aeq_off_load_err(hba, &aeq_msg); - break; - - default: - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[warn]Port(0x%x) receive a unsupported AEQ EventType(0x%x) EventVal(0x%llx).", - hba->port_cfg.port_id, event_type, (u64)event_val); - return; - } - - if (event_code < FC_AEQ_EVT_ERR_CODE_BUTT) - SPFC_AEQ_ERR_TYPE_STAT(hba, aeq_msg.wd0.evt_code); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_KEVENT, - "[info]Port(0x%x) receive AEQ EventType(0x%x) EventVal(0x%llx) EvtCode(0x%x) Conn_id(0x%x) Xid(0x%x) %s", - hba->port_cfg.port_id, event_type, (u64)event_val, event_code, - aeq_msg.wd0.conn_id, aeq_msg.wd1.xid, - (ret == UNF_RETURN_ERROR) ? "ERROR" : "OK"); -} - -void spfc_sess_resource_free_sync(void *handle, - struct unf_port_info *rport_info) -{ - struct spfc_parent_queue_info *parent_queue_info = NULL; - ulong flag = 0; - u32 wait_sq_cnt = 0; - struct spfc_hba_info *hba = NULL; - spinlock_t *prtq_state_lock = NULL; - u32 index = SPFC_DEFAULT_RPORT_INDEX; - - FC_CHECK_RETURN_VOID(handle); - FC_CHECK_RETURN_VOID(rport_info); - - hba = (struct spfc_hba_info *)handle; - parent_queue_info = &hba->parent_queue_mgr->parent_queue[index]; - prtq_state_lock = &parent_queue_info->parent_queue_state_lock; - (void)spfc_free_parent_resource((void *)hba, rport_info); - - for (;;) { - spin_lock_irqsave(prtq_state_lock, flag); - if (parent_queue_info->offload_state == SPFC_QUEUE_STATE_FREE) { - spin_unlock_irqrestore(prtq_state_lock, flag); - break; - } - spin_unlock_irqrestore(prtq_state_lock, flag); - msleep(SPFC_WAIT_SESS_FREE_ONE_TIME_MS); - wait_sq_cnt++; - if (wait_sq_cnt >= SPFC_MAX_WAIT_LOOP_TIMES) - break; - } -} diff --git a/drivers/scsi/spfc/hw/spfc_queue.h b/drivers/scsi/spfc/hw/spfc_queue.h deleted file mode 100644 index c09f098e73242d439f15633ccc679867809b962e..0000000000000000000000000000000000000000 --- a/drivers/scsi/spfc/hw/spfc_queue.h +++ /dev/null @@ -1,711 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPFC_QUEUE_H -#define SPFC_QUEUE_H - -#include "unf_type.h" -#include "spfc_wqe.h" -#include "spfc_cqm_main.h" -#define SPFC_MIN_WP_NUM (2) -#define SPFC_EXTEND_WQE_OFFSET (128) -#define SPFC_SQE_SIZE (256) -#define WQE_MARKER_0 (0x0) -#define WQE_MARKER_6B (0x6b) - -/* PARENT SQ & Context defines */ -#define SPFC_MAX_MSN (65535) -#define SPFC_MSN_MASK (0xffff000000000000LL) -#define SPFC_SQE_TS_SIZE (72) -#define SPFC_SQE_FIRST_OBIT_DW_POS (0) -#define SPFC_SQE_SECOND_OBIT_DW_POS (30) -#define SPFC_SQE_OBIT_SET_MASK_BE (0x80) -#define SPFC_SQE_OBIT_CLEAR_MASK_BE (0xffffff7f) -#define SPFC_MAX_SQ_TASK_TYPE_CNT (128) -#define SPFC_SQ_NUM_PER_QPC (3) -#define SPFC_SQ_QID_START_PER_QPC 0 -#define SPFC_SQ_SPACE_OFFSET (64) -#define SPFC_MAX_SSQ_NUM (SPFC_SQ_NUM_PER_QPC * 63 + 1) /* must be a multiple of 3 */ -#define SPFC_DIRECTWQE_SQ_INDEX (SPFC_MAX_SSQ_NUM - 1) - -/* Note: if the location of flush done bit changes, the definition must be - * modifyed again - */ -#define SPFC_CTXT_FLUSH_DONE_DW_POS (58) -#define SPFC_CTXT_FLUSH_DONE_MASK_BE (0x4000) -#define SPFC_CTXT_FLUSH_DONE_MASK_LE (0x400000) - -#define SPFC_PCIE_TEMPLATE (0) -#define SPFC_DMA_ATTR_OFST (0) - -/* - *When driver assembles WQE SGE, the GPA parity bit is multiplexed as follows: - * {rsvd'2,zerocopysoro'2,zerocopy_dmaattr_idx'6,pcie_template'6} - */ -#define SPFC_PCIE_TEMPLATE_OFFSET 0 -#define SPFC_PCIE_ZEROCOPY_DMAATTR_IDX_OFFSET 6 -#define SPFC_PCIE_ZEROCOPY_SO_RO_OFFSET 12 -#define SPFC_PCIE_RELAXED_ORDERING (1) -#define SPFC_ZEROCOPY_PCIE_TEMPLATE_VALUE \ - (SPFC_PCIE_RELAXED_ORDERING << SPFC_PCIE_ZEROCOPY_SO_RO_OFFSET | \ - SPFC_DMA_ATTR_OFST << SPFC_PCIE_ZEROCOPY_DMAATTR_IDX_OFFSET | \ - SPFC_PCIE_TEMPLATE) - -#define SPFC_GET_SQ_HEAD(sq) \ - list_entry(UNF_OS_LIST_NEXT(&(sq)->list_linked_list_sq), \ - struct spfc_wqe_page, entry_wpg) -#define SPFC_GET_SQ_TAIL(sq) \ - list_entry(UNF_OS_LIST_PREV(&(sq)->list_linked_list_sq), \ - struct spfc_wqe_page, entry_wpg) -#define SPFC_SQ_IO_STAT(ssq, io_type) \ - (atomic_inc(&(ssq)->io_stat[io_type])) -#define SPFC_SQ_IO_STAT_READ(ssq, io_type) \ - (atomic_read(&(ssq)->io_stat[io_type])) -#define SPFC_GET_QUEUE_CMSN(ssq) \ - ((u32)(be64_to_cpu(((((ssq)->queue_header)->ci_record) & SPFC_MSN_MASK)))) -#define SPFC_GET_WP_END_CMSN(head_start_cmsn, wqe_num_per_buf) \ - ((u16)(((u32)(head_start_cmsn) + (u32)(wqe_num_per_buf) - 1) % (SPFC_MAX_MSN + 1))) -#define SPFC_MSN_INC(msn) (((SPFC_MAX_MSN) == (msn)) ? 0 : ((msn) + 1)) -#define SPFC_MSN_DEC(msn) (((msn) == 0) ? (SPFC_MAX_MSN) : ((msn) - 1)) -#define SPFC_QUEUE_MSN_OFFSET(start_cmsn, end_cmsn) \ - ((u32)((((u32)(end_cmsn) + (SPFC_MAX_MSN)) - (u32)(start_cmsn)) % (SPFC_MAX_MSN + 1))) -#define SPFC_MSN32_ADD(msn, inc) (((msn) + (inc)) % (SPFC_MAX_MSN + 1)) - -/* - *SCQ defines - */ -#define SPFC_INT_NUM_PER_QUEUE (1) -#define SPFC_SCQ_INT_ID_MAX (2048) /* 11BIT */ -#define SPFC_SCQE_SIZE (64) -#define SPFC_CQE_GPA_SHIFT (4) -#define SPFC_NEXT_CQE_GPA_SHIFT (12) -/* 1-Update Ci by Tile, 0-Update Ci by Hardware */ -#define SPFC_PMSN_CI_TYPE_FROM_HOST (0) -#define SPFC_PMSN_CI_TYPE_FROM_UCODE (1) -#define SPFC_ARMQ_IDLE (0) -#define SPFC_CQ_INT_MODE (2) -#define SPFC_CQ_HEADER_OWNER_SHIFT (15) - -/* SCQC_CQ_DEPTH 0-256, 1-512, 2-1k, 3-2k, 4-4k, 5-8k, 6-16k, 7-32k. - * include LinkWqe - */ -#define SPFC_CMD_SCQ_DEPTH (4096) -#define SPFC_STS_SCQ_DEPTH (8192) - -#define SPFC_CMD_SCQC_CQ_DEPTH (spfc_log2n(SPFC_CMD_SCQ_DEPTH >> 8)) -#define SPFC_STS_SCQC_CQ_DEPTH (spfc_log2n(SPFC_STS_SCQ_DEPTH >> 8)) -#define SPFC_STS_SCQ_CI_TYPE SPFC_PMSN_CI_TYPE_FROM_HOST - -#define SPFC_CMD_SCQ_CI_TYPE SPFC_PMSN_CI_TYPE_FROM_UCODE - -#define SPFC_SCQ_INTR_LOW_LATENCY_MODE 0 -#define SPFC_SCQ_INTR_POLLING_MODE 1 -#define SPFC_SCQ_PROC_CNT_PER_SECOND_THRESHOLD (30000) - -#define SPFC_CQE_MAX_PROCESS_NUM_PER_INTR (128) -#define SPFC_SESSION_SCQ_NUM (16) - -/* SCQ[0, 2, 4 ...]CMD SCQ,SCQ[1, 3, 5 ...]STS - * SCQ,SCQ[SPFC_TOTAL_SCQ_NUM-1]Defaul SCQ - */ -#define SPFC_CMD_SCQN_START (0) -#define SPFC_STS_SCQN_START (1) -#define SPFC_SCQS_PER_SESSION (2) - -#define SPFC_TOTAL_SCQ_NUM (SPFC_SESSION_SCQ_NUM + 1) - -#define SPFC_SCQ_IS_STS(scq_index) \ - (((scq_index) % SPFC_SCQS_PER_SESSION) || ((scq_index) == SPFC_SESSION_SCQ_NUM)) -#define SPFC_SCQ_IS_CMD(scq_index) (!SPFC_SCQ_IS_STS(scq_index)) -#define SPFC_RPORTID_TO_CMD_SCQN(rport_index) \ - (((rport_index) * SPFC_SCQS_PER_SESSION) % SPFC_SESSION_SCQ_NUM) -#define SPFC_RPORTID_TO_STS_SCQN(rport_index) \ - ((((rport_index) * SPFC_SCQS_PER_SESSION) + 1) % SPFC_SESSION_SCQ_NUM) - -/* - *SRQ defines - */ -#define SPFC_SRQE_SIZE (32) -#define SPFC_SRQ_INIT_LOOP_O (1) -#define SPFC_QUEUE_RING (1) -#define SPFC_SRQ_ELS_DATA_NUM (1) -#define SPFC_SRQ_ELS_SGE_LEN (256) -#define SPFC_SRQ_ELS_DATA_DEPTH (31750) /* depth should Divide 127 */ - -#define SPFC_IRQ_NAME_MAX (30) - -/* Support 2048 sessions(xid) */ -#define SPFC_CQM_XID_MASK (0x7ff) - -#define SPFC_QUEUE_FLUSH_DOING (0) -#define SPFC_QUEUE_FLUSH_DONE (1) -#define SPFC_QUEUE_FLUSH_WAIT_TIMEOUT_MS (2000) -#define SPFC_QUEUE_FLUSH_WAIT_MS (2) - -/* - *RPort defines - */ -#define SPFC_RPORT_OFFLOADED(prnt_qinfo) \ - ((prnt_qinfo)->offload_state == SPFC_QUEUE_STATE_OFFLOADED) -#define SPFC_RPORT_NOT_OFFLOADED(prnt_qinfo) \ - ((prnt_qinfo)->offload_state != SPFC_QUEUE_STATE_OFFLOADED) -#define SPFC_RPORT_FLUSH_NOT_NEEDED(prnt_qinfo) \ - (((prnt_qinfo)->offload_state == SPFC_QUEUE_STATE_INITIALIZED) || \ - ((prnt_qinfo)->offload_state == SPFC_QUEUE_STATE_OFFLOADING) || \ - ((prnt_qinfo)->offload_state == SPFC_QUEUE_STATE_FREE)) -#define SPFC_CHECK_XID_MATCHED(sq_xid, sqe_xid) \ - (((sq_xid) & SPFC_CQM_XID_MASK) == ((sqe_xid) & SPFC_CQM_XID_MASK)) -#define SPFC_PORT_MODE_TGT (0) /* Port mode */ -#define SPFC_PORT_MODE_INI (1) -#define SPFC_PORT_MODE_BOTH (2) - -/* - *Hardware Reserved Queue Info defines - */ -#define SPFC_HRQI_SEQ_ID_MAX (255) -#define SPFC_HRQI_SEQ_INDEX_MAX (64) -#define SPFC_HRQI_SEQ_INDEX_SHIFT (6) -#define SPFC_HRQI_SEQ_SEPCIAL_ID (3) -#define SPFC_HRQI_SEQ_INVALID_ID (~0LL) - -enum spfc_session_reset_mode { - SPFC_SESS_RST_DELETE_IO_ONLY = 1, - SPFC_SESS_RST_DELETE_CONN_ONLY = 2, - SPFC_SESS_RST_DELETE_IO_CONN_BOTH = 3, - SPFC_SESS_RST_MODE_BUTT -}; - -/* linkwqe */ -#define CQM_LINK_WQE_CTRLSL_VALUE 2 -#define CQM_LINK_WQE_LP_VALID 1 -#define CQM_LINK_WQE_LP_INVALID 0 - -/* bit mask */ -#define SPFC_SCQN_MASK 0xfffff -#define SPFC_SCQ_CTX_CI_GPA_MASK 0xfffffff -#define SPFC_SCQ_CTX_C_EQN_MSI_X_MASK 0x7 -#define SPFC_PARITY_MASK 0x1 -#define SPFC_KEYSECTION_XID_H_MASK 0xf -#define SPFC_KEYSECTION_XID_L_MASK 0xffff -#define SPFC_SRQ_CTX_rqe_dma_attr_idx_MASK 0xf -#define SPFC_SSQ_CTX_MASK 0xfffff -#define SPFC_KEY_WD3_SID_2_MASK 0x00ff0000 -#define SPFC_KEY_WD3_SID_1_MASK 0x00ff00 -#define SPFC_KEY_WD3_SID_0_MASK 0x0000ff -#define SPFC_KEY_WD4_DID_2_MASK 0x00ff0000 -#define SPFC_KEY_WD4_DID_1_MASK 0x00ff00 -#define SPFC_KEY_WD4_DID_0_MASK 0x0000ff -#define SPFC_LOCAL_LW_WD1_DUMP_MSN_MASK 0x7fff -#define SPFC_PMSN_MASK 0xff -#define SPFC_QOS_LEVEL_MASK 0x3 -#define SPFC_DB_VAL_MASK 0xFFFFFFFF -#define SPFC_MSNWD_L_MASK 0xffff -#define SPFC_MSNWD_H_MASK 0x7fff -#define SPFC_DB_WD0_PI_H_MASK 0xf -#define SPFC_DB_WD0_PI_L_MASK 0xfff - -#define SPFC_DB_C_BIT_DATA_TYPE 0 -#define SPFC_DB_C_BIT_CONTROL_TYPE 1 - -#define SPFC_OWNER_DRIVER_PRODUCT (1) - -#define SPFC_256BWQE_ENABLE (1) -#define SPFC_DB_ARM_DISABLE (0) - -#define SPFC_CNTX_SIZE_T_256B (0) -#define SPFC_CNTX_SIZE_256B (256) - -#define SPFC_SERVICE_TYPE_FC (12) -#define SPFC_SERVICE_TYPE_FC_SQ (13) - -#define SPFC_PACKET_COS_FC_CMD (0) -#define SPFC_PACKET_COS_FC_DATA (1) - -#define SPFC_QUEUE_LINK_STYLE (0) -#define SPFC_QUEUE_RING_STYLE (1) - -#define SPFC_NEED_DO_OFFLOAD (1) -#define SPFC_QID_SQ (0) - -/* - *SCQ defines - */ -struct spfc_scq_info { - struct cqm_queue *cqm_scq_info; - u32 wqe_num_per_buf; - u32 wqe_size; - u32 scqc_cq_depth; /* 0-256, 1-512, 2-1k, 3-2k, 4-4k, 5-8k, 6-16k, 7-32k */ - u16 scqc_ci_type; - u16 valid_wqe_num; /* ScQ depth include link wqe */ - u16 ci; - u16 ci_owner; - u32 queue_id; - u32 scqn; - char irq_name[SPFC_IRQ_NAME_MAX]; - u16 msix_entry_idx; - u32 irq_id; - struct tasklet_struct tasklet; - atomic_t flush_stat; - void *hba; - u32 reserved; - struct task_struct *delay_task; - bool task_exit; - u32 intr_mode; -}; - -struct spfc_srq_ctx { - /* DW0 */ - u64 pcie_template : 6; - u64 rsvd0 : 2; - u64 parity : 8; - u64 cur_rqe_usr_id : 16; - u64 cur_rqe_msn : 16; - u64 last_rq_pmsn : 16; - - /* DW1 */ - u64 cur_rqe_gpa; - - /* DW2 */ - u64 ctrl_sl : 1; - u64 cf : 1; - u64 csl : 2; - u64 cr : 1; - u64 bdsl : 4; - u64 pmsn_type : 1; - u64 cur_wqe_o : 1; - u64 consant_sge_len : 17; - u64 cur_sge_id : 4; - u64 cur_sge_remain_len : 17; - u64 ceqn_msix : 11; - u64 int_mode : 2; - u64 cur_sge_l : 1; - u64 cur_sge_v : 1; - - /* DW3 */ - u64 cur_sge_gpa; - - /* DW4 */ - u64 cur_pmsn_gpa; - - /* DW5 */ - u64 rsvd3 : 5; - u64 ring : 1; - u64 loop_o : 1; - u64 rsvd2 : 1; - u64 rqe_dma_attr_idx : 6; - u64 rq_so_ro : 2; - u64 cqe_dma_attr_idx : 6; - u64 cq_so_ro : 2; - u64 rsvd1 : 7; - u64 arm_q : 1; - u64 cur_cqe_cnt : 8; - u64 cqe_max_cnt : 8; - u64 prefetch_max_masn : 16; - - /* DW6~DW7 */ - u64 rsvd4; - u64 rsvd5; -}; - -struct spfc_drq_buff_entry { - u16 buff_id; - void *buff_addr; - dma_addr_t buff_dma; -}; - -enum spfc_clean_state { SPFC_CLEAN_DONE, SPFC_CLEAN_DOING, SPFC_CLEAN_BUTT }; -enum spfc_srq_type { SPFC_SRQ_ELS = 1, SPFC_SRQ_IMMI, SPFC_SRQ_BUTT }; - -struct spfc_srq_info { - enum spfc_srq_type srq_type; - - struct cqm_queue *cqm_srq_info; - u32 wqe_num_per_buf; /* Wqe number per buf, dont't inlcude link wqe */ - u32 wqe_size; - u32 valid_wqe_num; /* valid wqe number, dont't include link wqe */ - u16 pi; - u16 pi_owner; - u16 pmsn; - u16 ci; - u16 cmsn; - u32 srqn; - - dma_addr_t first_rqe_recv_dma; - - struct spfc_drq_buff_entry *els_buff_entry_head; - struct buf_describe buf_list; - spinlock_t srq_spin_lock; - bool spin_lock_init; - bool enable; - enum spfc_clean_state state; - - atomic_t ref; - - struct delayed_work del_work; - u32 del_retry_time; - void *hba; -}; - -/* - * The doorbell record keeps PI of WQE, which will be produced next time. - * The PI is 15 bits width o-bit - */ -struct db_record { - u64 pmsn : 16; - u64 dump_pmsn : 16; - u64 rsvd0 : 32; -}; - -/* - * The ci record keeps CI of WQE, which will be consumed next time. - * The ci is 15 bits width with 1 o-bit - */ -struct ci_record { - u64 cmsn : 16; - u64 dump_cmsn : 16; - u64 rsvd0 : 32; -}; - -/* The accumulate data in WQ header */ -struct accumulate { - u64 data_2_uc; - u64 data_2_drv; -}; - -/* The WQ header structure */ -struct wq_header { - struct db_record db_record; - struct ci_record ci_record; - struct accumulate soft_data; -}; - -/* Link list Sq WqePage Pool */ -/* queue header struct */ -struct spfc_queue_header { - u64 door_bell_record; - u64 ci_record; - u64 rsv1; - u64 rsv2; -}; - -/* WPG-WQEPAGE, LLSQ-LINKED LIST SQ */ -struct spfc_wqe_page { - struct list_head entry_wpg; - - /* Wqe Page virtual addr */ - void *wpg_addr; - - /* Wqe Page physical addr */ - u64 wpg_phy_addr; -}; - -struct spfc_sq_wqepage_pool { - u32 wpg_cnt; - u32 wpg_size; - u32 wqe_per_wpg; - - /* PCI DMA Pool */ - struct dma_pool *wpg_dma_pool; - struct spfc_wqe_page *wpg_pool_addr; - struct list_head list_free_wpg_pool; - spinlock_t wpg_pool_lock; - atomic_t wpg_in_use; -}; - -#define SPFC_SQ_DEL_STAGE_TIMEOUT_MS (3 * 1000) -#define SPFC_SRQ_DEL_STAGE_TIMEOUT_MS (10 * 1000) -#define SPFC_SQ_WAIT_FLUSH_DONE_TIMEOUT_MS (10 * 1000) -#define SPFC_SQ_WAIT_FLUSH_DONE_TIMEOUT_CNT (3) - -#define SPFC_SRQ_PROCESS_DELAY_MS (20) - -/* PLOGI parameters */ -struct spfc_plogi_copram { - u32 seq_cnt : 1; - u32 ed_tov : 1; - u32 rsvd : 14; - u32 tx_mfs : 16; - u32 ed_tov_time; -}; - -struct spfc_delay_sqe_ctrl_info { - bool valid; - u32 rport_index; - u32 time_out; - u64 start_jiff; - u32 sid; - u32 did; - u32 xid; - u16 ssqn; - struct spfc_sqe sqe; -}; - -struct spfc_suspend_sqe_info { - void *hba; - u32 magic_num; - u8 old_offload_sts; - struct unf_frame_pkg pkg; - struct spfc_sqe sqe; - struct delayed_work timeout_work; - struct list_head list_sqe_entry; -}; - -struct spfc_delay_destroy_ctrl_info { - bool valid; - u32 rport_index; - u32 time_out; - u64 start_jiff; - struct unf_port_info rport_info; -}; - -/* PARENT SQ Info */ -struct spfc_parent_sq_info { - void *hba; - spinlock_t parent_sq_enqueue_lock; - u32 rport_index; - u32 context_id; - /* Fixed value,used for Doorbell */ - u32 sq_queue_id; - /* When a session is offloaded, tile will return the CacheId to the - * driver,which is used for Doorbell - */ - u32 cache_id; - /* service type, fc or fc */ - u32 service_type; - /* OQID */ - u16 oqid_rd; - u16 oqid_wr; - u32 local_port_id; - u32 remote_port_id; - u32 sqn_base; - bool port_in_flush; - bool sq_in_sess_rst; - atomic_t sq_valid; - /* Used by NPIV QoS */ - u8 vport_id; - /* Used by NPIV QoS */ - u8 cs_ctrl; - struct delayed_work del_work; - struct delayed_work flush_done_timeout_work; - u64 del_start_jiff; - dma_addr_t srq_ctx_addr; - atomic_t sq_cached; - atomic_t flush_done_wait_cnt; - struct spfc_plogi_copram plogi_co_parms; - /* dif control info for immi */ - struct unf_dif_control_info sirt_dif_control; - struct spfc_delay_sqe_ctrl_info delay_sqe; - struct spfc_delay_destroy_ctrl_info destroy_sqe; - struct list_head suspend_sqe_list; - atomic_t io_stat[SPFC_MAX_SQ_TASK_TYPE_CNT]; - u8 need_offloaded; -}; - -/* parent context doorbell */ -struct spfc_parent_sq_db { - struct { - u32 xid : 20; - u32 cntx_size : 2; - u32 arm : 1; - u32 c : 1; - u32 cos : 3; - u32 service_type : 5; - } wd0; - - struct { - u32 pi_hi : 8; - u32 sm_data : 20; - u32 qid : 4; - } wd1; -}; - -#define IWARP_FC_DDB_TYPE 3 - -/* direct wqe doorbell */ -struct spfc_direct_wqe_db { - struct { - u32 xid : 20; - u32 cntx_size : 2; - u32 pi_hi : 4; - u32 c : 1; - u32 cos : 3; - u32 ddb : 2; - } wd0; - - struct { - u32 pi_lo : 12; - u32 sm_data : 20; - } wd1; -}; - -struct spfc_parent_cmd_scq_info { - u32 cqm_queue_id; - u32 local_queue_id; -}; - -struct spfc_parent_st_scq_info { - u32 cqm_queue_id; - u32 local_queue_id; -}; - -struct spfc_parent_els_srq_info { - u32 cqm_queue_id; - u32 local_queue_id; -}; - -enum spfc_parent_queue_state { - SPFC_QUEUE_STATE_INITIALIZED = 0, - SPFC_QUEUE_STATE_OFFLOADING = 1, - SPFC_QUEUE_STATE_OFFLOADED = 2, - SPFC_QUEUE_STATE_DESTROYING = 3, - SPFC_QUEUE_STATE_FREE = 4, - SPFC_QUEUE_STATE_BUTT -}; - -struct spfc_parent_ctx { - dma_addr_t parent_ctx_addr; - void *parent_ctx; - struct cqm_qpc_mpt *cqm_parent_ctx_obj; -}; - -struct spfc_parent_queue_info { - spinlock_t parent_queue_state_lock; - struct spfc_parent_ctx parent_ctx; - enum spfc_parent_queue_state offload_state; - struct spfc_parent_sq_info parent_sq_info; - struct spfc_parent_cmd_scq_info parent_cmd_scq_info; - struct spfc_parent_st_scq_info - parent_sts_scq_info; - struct spfc_parent_els_srq_info parent_els_srq_info; - u8 queue_vport_id; - u8 queue_data_cos; -}; - -struct spfc_parent_ssq_info { - void *hba; - spinlock_t parent_sq_enqueue_lock; - atomic_t wqe_page_cnt; - u32 context_id; - u32 cache_id; - u32 sq_queue_id; - u32 sqn; - u32 service_type; - u32 max_sqe_num; /* SQ depth */ - u32 wqe_num_per_buf; - u32 wqe_size; - u32 accum_wqe_cnt; - u32 wqe_offset; - u16 head_start_cmsn; - u16 head_end_cmsn; - u16 last_pmsn; - u16 last_pi_owner; - u32 queue_style; - atomic_t sq_valid; - void *queue_head_original; - struct spfc_queue_header *queue_header; - dma_addr_t queue_hdr_phy_addr_original; - dma_addr_t queue_hdr_phy_addr; - struct list_head list_linked_list_sq; - atomic_t sq_db_cnt; - atomic_t sq_wqe_cnt; - atomic_t sq_cqe_cnt; - atomic_t sqe_minus_cqe_cnt; - atomic_t io_stat[SPFC_MAX_SQ_TASK_TYPE_CNT]; -}; - -struct spfc_parent_shared_queue_info { - struct spfc_parent_ctx parent_ctx; - struct spfc_parent_ssq_info parent_ssq_info; -}; - -struct spfc_parent_queue_mgr { - struct spfc_parent_queue_info parent_queue[UNF_SPFC_MAXRPORT_NUM]; - struct spfc_parent_shared_queue_info shared_queue[SPFC_MAX_SSQ_NUM]; - struct buf_describe parent_sq_buf_list; -}; - -#define SPFC_SRQC_BUS_ROW 8 -#define SPFC_SRQC_BUS_COL 19 -#define SPFC_SQC_BUS_ROW 8 -#define SPFC_SQC_BUS_COL 13 -#define SPFC_HW_SCQC_BUS_ROW 6 -#define SPFC_HW_SCQC_BUS_COL 10 -#define SPFC_HW_SRQC_BUS_ROW 4 -#define SPFC_HW_SRQC_BUS_COL 15 -#define SPFC_SCQC_BUS_ROW 3 -#define SPFC_SCQC_BUS_COL 29 - -#define SPFC_QUEUE_INFO_BUS_NUM 4 -struct spfc_queue_info_bus { - u64 bus[SPFC_QUEUE_INFO_BUS_NUM]; -}; - -u32 spfc_free_parent_resource(void *handle, struct unf_port_info *rport_info); -u32 spfc_alloc_parent_resource(void *handle, struct unf_port_info *rport_info); -u32 spfc_alloc_parent_queue_mgr(void *handle); -void spfc_free_parent_queue_mgr(void *handle); -u32 spfc_create_common_share_queues(void *handle); -u32 spfc_create_ssq(void *handle); -void spfc_destroy_common_share_queues(void *v_pstHba); -u32 spfc_alloc_parent_sq_wqe_page_pool(void *handle); -void spfc_free_parent_sq_wqe_page_pool(void *handle); -struct spfc_parent_queue_info * -spfc_find_parent_queue_info_by_pkg(void *handle, struct unf_frame_pkg *pkg); -struct spfc_parent_sq_info * -spfc_find_parent_sq_by_pkg(void *handle, struct unf_frame_pkg *pkg); -u32 spfc_root_cmdq_enqueue(void *handle, union spfc_cmdqe *cmdqe, u16 cmd_len); -void spfc_process_scq_cqe(ulong scq_info); -u32 spfc_process_scq_cqe_entity(ulong scq_info, u32 proc_cnt); -void spfc_post_els_srq_wqe(struct spfc_srq_info *srq_info, u16 buf_id); -void spfc_process_aeqe(void *handle, u8 event_type, u8 *event_val); -u32 spfc_parent_sq_enqueue(struct spfc_parent_sq_info *sq, struct spfc_sqe *io_sqe, - u16 ssqn); -u32 spfc_parent_ssq_enqueue(struct spfc_parent_ssq_info *ssq, - struct spfc_sqe *io_sqe, u8 wqe_type); -void spfc_free_sq_wqe_page(struct spfc_parent_ssq_info *ssq, u32 cur_cmsn); -u32 spfc_reclaim_sq_wqe_page(void *handle, union spfc_scqe *scqe); -void spfc_set_rport_flush_state(void *handle, bool in_flush); -u32 spfc_clear_fetched_sq_wqe(void *handle); -u32 spfc_clear_pending_sq_wqe(void *handle); -void spfc_free_parent_queues(void *handle); -void spfc_free_ssq(void *handle, u32 free_sq_num); -void spfc_enalbe_queues_dispatch(void *handle); -void spfc_queue_pre_process(void *handle, bool clean); -void spfc_queue_post_process(void *handle); -void spfc_free_parent_queue_info(void *handle, struct spfc_parent_queue_info *parent_queue_info); -u32 spfc_send_session_rst_cmd(void *handle, - struct spfc_parent_queue_info *parent_queue_info, - enum spfc_session_reset_mode mode); -u32 spfc_send_nop_cmd(void *handle, struct spfc_parent_sq_info *parent_sq_info, - u32 magic_num, u16 sqn); -void spfc_build_session_rst_wqe(void *handle, struct spfc_parent_sq_info *sq, - struct spfc_sqe *sqe, - enum spfc_session_reset_mode mode, u32 scqn); -void spfc_wq_destroy_els_srq(struct work_struct *work); -void spfc_destroy_els_srq(void *handle); -u32 spfc_push_delay_sqe(void *hba, - struct spfc_parent_queue_info *offload_parent_queue, - struct spfc_sqe *sqe, struct unf_frame_pkg *pkg); -void spfc_push_destroy_parent_queue_sqe(void *hba, - struct spfc_parent_queue_info *offloading_parent_queue, - struct unf_port_info *rport_info); -void spfc_pop_destroy_parent_queue_sqe(void *handle, - struct spfc_delay_destroy_ctrl_info *destroy_sqe_info); -struct spfc_parent_queue_info *spfc_find_offload_parent_queue(void *handle, - u32 local_id, - u32 remote_id, - u32 rport_index); -u32 spfc_flush_ini_resp_queue(void *handle); -void spfc_rcvd_els_from_srq_timeout(struct work_struct *work); -u32 spfc_send_aeq_info_via_cmdq(void *hba, u32 aeq_error_type); -u32 spfc_parent_sq_ring_doorbell(struct spfc_parent_ssq_info *sq, u8 qos_level, - u32 c); -void spfc_sess_resource_free_sync(void *handle, - struct unf_port_info *rport_info); -u32 spfc_suspend_sqe_and_send_nop(void *handle, - struct spfc_parent_queue_info *parent_queue, - struct spfc_sqe *sqe, struct unf_frame_pkg *pkg); -u32 spfc_pop_suspend_sqe(void *handle, - struct spfc_parent_queue_info *parent_queue, - struct spfc_suspend_sqe_info *suspen_sqe); -#endif diff --git a/drivers/scsi/spfc/hw/spfc_service.c b/drivers/scsi/spfc/hw/spfc_service.c deleted file mode 100644 index e99802df50a2caf955b1639e5d07fced2d1fa91c..0000000000000000000000000000000000000000 --- a/drivers/scsi/spfc/hw/spfc_service.c +++ /dev/null @@ -1,2168 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#include "spfc_service.h" -#include "unf_log.h" -#include "spfc_io.h" -#include "spfc_chipitf.h" - -#define SPFC_ELS_SRQ_BUF_NUM (0x9) -#define SPFC_LS_GS_USERID_LEN ((FC_LS_GS_USERID_CNT_MAX + 1) / 2) - -struct unf_scqe_handle_table { - u32 scqe_type; /* ELS type */ - bool reclaim_sq_wpg; - u32 (*scqe_handle_func)(struct spfc_hba_info *hba, union spfc_scqe *scqe); -}; - -static u32 spfc_get_els_rsp_pld_len(u16 els_type, u16 els_cmnd, - u32 *els_acc_pld_len) -{ - u32 ret = RETURN_OK; - - FC_CHECK_RETURN_VALUE(els_acc_pld_len, UNF_RETURN_ERROR); - - /* RJT */ - if (els_type == ELS_RJT) { - *els_acc_pld_len = UNF_ELS_ACC_RJT_LEN; - return RETURN_OK; - } - - /* ACC */ - switch (els_cmnd) { - /* uses the same PAYLOAD length as PLOGI. */ - case ELS_FLOGI: - case ELS_PDISC: - case ELS_PLOGI: - *els_acc_pld_len = UNF_PLOGI_ACC_PAYLOAD_LEN; - break; - - case ELS_PRLI: - /* If sirt is enabled, The PRLI ACC payload extends 12 bytes */ - *els_acc_pld_len = (UNF_PRLI_ACC_PAYLOAD_LEN - UNF_PRLI_SIRT_EXTRA_SIZE); - - break; - - case ELS_LOGO: - *els_acc_pld_len = UNF_LOGO_ACC_PAYLOAD_LEN; - break; - - case ELS_PRLO: - *els_acc_pld_len = UNF_PRLO_ACC_PAYLOAD_LEN; - break; - - case ELS_RSCN: - *els_acc_pld_len = UNF_RSCN_ACC_PAYLOAD_LEN; - break; - - case ELS_ADISC: - *els_acc_pld_len = UNF_ADISC_ACC_PAYLOAD_LEN; - break; - - case ELS_RRQ: - *els_acc_pld_len = UNF_RRQ_ACC_PAYLOAD_LEN; - break; - - case ELS_SCR: - *els_acc_pld_len = UNF_SCR_RSP_PAYLOAD_LEN; - break; - - case ELS_ECHO: - *els_acc_pld_len = UNF_ECHO_ACC_PAYLOAD_LEN; - break; - - case ELS_REC: - *els_acc_pld_len = UNF_REC_ACC_PAYLOAD_LEN; - break; - - default: - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, - UNF_WARN, "[warn]Unknown ELS command(0x%x)", - els_cmnd); - ret = UNF_RETURN_ERROR; - break; - } - - return ret; -} - -struct unf_els_cmd_paylod_table { - u16 els_cmnd; /* ELS type */ - u32 els_req_pld_len; - u32 els_rsp_pld_len; -}; - -static const struct unf_els_cmd_paylod_table els_pld_table_map[] = { - {ELS_FDISC, UNF_FDISC_PAYLOAD_LEN, UNF_FDISC_ACC_PAYLOAD_LEN}, - {ELS_FLOGI, UNF_FLOGI_PAYLOAD_LEN, UNF_FLOGI_ACC_PAYLOAD_LEN}, - {ELS_PLOGI, UNF_PLOGI_PAYLOAD_LEN, UNF_PLOGI_ACC_PAYLOAD_LEN}, - {ELS_SCR, UNF_SCR_PAYLOAD_LEN, UNF_SCR_RSP_PAYLOAD_LEN}, - {ELS_PDISC, UNF_PDISC_PAYLOAD_LEN, UNF_PDISC_ACC_PAYLOAD_LEN}, - {ELS_LOGO, UNF_LOGO_PAYLOAD_LEN, UNF_LOGO_ACC_PAYLOAD_LEN}, - {ELS_PRLO, UNF_PRLO_PAYLOAD_LEN, UNF_PRLO_ACC_PAYLOAD_LEN}, - {ELS_ADISC, UNF_ADISC_PAYLOAD_LEN, UNF_ADISC_ACC_PAYLOAD_LEN}, - {ELS_RRQ, UNF_RRQ_PAYLOAD_LEN, UNF_RRQ_ACC_PAYLOAD_LEN}, - {ELS_RSCN, 0, UNF_RSCN_ACC_PAYLOAD_LEN}, - {ELS_ECHO, UNF_ECHO_PAYLOAD_LEN, UNF_ECHO_ACC_PAYLOAD_LEN}, - {ELS_REC, UNF_REC_PAYLOAD_LEN, UNF_REC_ACC_PAYLOAD_LEN} -}; - -static u32 spfc_get_els_req_acc_pld_len(u16 els_cmnd, u32 *req_pld_len, u32 *rsp_pld_len) -{ - u32 ret = RETURN_OK; - u32 i; - - FC_CHECK_RETURN_VALUE(req_pld_len, UNF_RETURN_ERROR); - - for (i = 0; i < (sizeof(els_pld_table_map) / - sizeof(struct unf_els_cmd_paylod_table)); - i++) { - if (els_pld_table_map[i].els_cmnd == els_cmnd) { - *req_pld_len = els_pld_table_map[i].els_req_pld_len; - *rsp_pld_len = els_pld_table_map[i].els_rsp_pld_len; - return ret; - } - } - - switch (els_cmnd) { - case ELS_PRLI: - /* If sirt is enabled, The PRLI ACC payload extends 12 bytes */ - *req_pld_len = SPFC_GET_PRLI_PAYLOAD_LEN; - *rsp_pld_len = SPFC_GET_PRLI_PAYLOAD_LEN; - - break; - - default: - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Unknown ELS_CMD(0x%x)", els_cmnd); - ret = UNF_RETURN_ERROR; - break; - } - - return ret; -} - -static u32 spfc_check_parent_qinfo_valid(struct spfc_hba_info *hba, struct unf_frame_pkg *pkg, - struct spfc_parent_queue_info **prt_qinfo) -{ - if (!*prt_qinfo) { - if (pkg->type == UNF_PKG_ELS_REQ || pkg->type == UNF_PKG_ELS_REPLY) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%x) send LS SID(0x%x) DID(0x%x) with null prtqinfo", - hba->port_cfg.port_id, pkg->frame_head.csctl_sid, - pkg->frame_head.rctl_did); - pkg->private_data[PKG_PRIVATE_XCHG_RPORT_INDEX] = SPFC_DEFAULT_RPORT_INDEX; - *prt_qinfo = spfc_find_parent_queue_info_by_pkg(hba, pkg); - if (!*prt_qinfo) - return UNF_RETURN_ERROR; - } else { - return UNF_RETURN_ERROR; - } - } - - if (pkg->type == UNF_PKG_GS_REQ && SPFC_RPORT_NOT_OFFLOADED(*prt_qinfo)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_MAJOR, - "[info]Port(0x%x) send GS SID(0x%x) DID(0x%x), send GS Request before PLOGI", - hba->port_cfg.port_id, pkg->frame_head.csctl_sid, - pkg->frame_head.rctl_did); - return UNF_RETURN_ERROR; - } - return RETURN_OK; -} - -static void spfc_get_pkt_cmnd_type_code(struct unf_frame_pkg *pkg, - u16 *ls_gs_cmnd_code, - u16 *ls_gs_cmnd_type) -{ - *ls_gs_cmnd_type = SPFC_GET_LS_GS_CMND_CODE(pkg->cmnd); - if (SPFC_PKG_IS_ELS_RSP(*ls_gs_cmnd_type)) { - *ls_gs_cmnd_code = SPFC_GET_ELS_RSP_CODE(pkg->cmnd); - } else if (pkg->type == UNF_PKG_GS_REQ) { - *ls_gs_cmnd_code = *ls_gs_cmnd_type; - } else { - *ls_gs_cmnd_code = *ls_gs_cmnd_type; - *ls_gs_cmnd_type = ELS_CMND; - } -} - -static u32 spfc_get_gs_req_rsp_pld_len(u16 cmnd_code, u32 *gs_pld_len, u32 *gs_rsp_pld_len) -{ - FC_CHECK_RETURN_VALUE(gs_pld_len, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(gs_rsp_pld_len, UNF_RETURN_ERROR); - - switch (cmnd_code) { - case NS_GPN_ID: - *gs_pld_len = UNF_GPNID_PAYLOAD_LEN; - *gs_rsp_pld_len = UNF_GPNID_RSP_PAYLOAD_LEN; - break; - - case NS_GNN_ID: - *gs_pld_len = UNF_GNNID_PAYLOAD_LEN; - *gs_rsp_pld_len = UNF_GNNID_RSP_PAYLOAD_LEN; - break; - - case NS_GFF_ID: - *gs_pld_len = UNF_GFFID_PAYLOAD_LEN; - *gs_rsp_pld_len = UNF_GFFID_RSP_PAYLOAD_LEN; - break; - - case NS_GID_FT: - case NS_GID_PT: - *gs_pld_len = UNF_GID_PAYLOAD_LEN; - *gs_rsp_pld_len = UNF_GID_ACC_PAYLOAD_LEN; - break; - - case NS_RFT_ID: - *gs_pld_len = UNF_RFTID_PAYLOAD_LEN; - *gs_rsp_pld_len = UNF_RFTID_RSP_PAYLOAD_LEN; - break; - - case NS_RFF_ID: - *gs_pld_len = UNF_RFFID_PAYLOAD_LEN; - *gs_rsp_pld_len = UNF_RFFID_RSP_PAYLOAD_LEN; - break; - case NS_GA_NXT: - *gs_pld_len = UNF_GID_PAYLOAD_LEN; - *gs_rsp_pld_len = UNF_GID_ACC_PAYLOAD_LEN; - break; - - case NS_GIEL: - *gs_pld_len = UNF_RFTID_RSP_PAYLOAD_LEN; - *gs_rsp_pld_len = UNF_GID_ACC_PAYLOAD_LEN; - break; - - default: - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[warn]Unknown GS commond type(0x%x)", cmnd_code); - return UNF_RETURN_ERROR; - } - - return RETURN_OK; -} - -static void *spfc_get_els_frame_addr(struct spfc_hba_info *hba, - struct unf_frame_pkg *pkg, - u16 els_cmnd_code, u16 els_cmnd_type, - u64 *phy_addr) -{ - void *frame_pld_addr = NULL; - dma_addr_t els_frame_addr = 0; - - if (els_cmnd_code == ELS_ECHO) { - frame_pld_addr = (void *)UNF_GET_ECHO_PAYLOAD(pkg); - els_frame_addr = UNF_GET_ECHO_PAYLOAD_PHYADDR(pkg); - } else if (els_cmnd_code == ELS_RSCN) { - if (els_cmnd_type == ELS_CMND) { - /* Not Support */ - frame_pld_addr = NULL; - els_frame_addr = 0; - } else { - frame_pld_addr = (void *)UNF_GET_RSCN_ACC_PAYLOAD(pkg); - els_frame_addr = pkg->unf_cmnd_pload_bl.buf_dma_addr + - sizeof(struct unf_fc_head); - } - } else { - frame_pld_addr = (void *)SPFC_GET_CMND_PAYLOAD_ADDR(pkg); - els_frame_addr = pkg->unf_cmnd_pload_bl.buf_dma_addr + - sizeof(struct unf_fc_head); - } - *phy_addr = els_frame_addr; - return frame_pld_addr; -} - -static u32 spfc_get_frame_info(struct spfc_hba_info *hba, - struct unf_frame_pkg *pkg, void **frame_pld_addr, - u32 *frame_pld_len, u64 *frame_phy_addr, - u32 *acc_pld_len) -{ - u32 ret = RETURN_OK; - u16 ls_gs_cmnd_code = SPFC_ZERO; - u16 ls_gs_cmnd_type = SPFC_ZERO; - - spfc_get_pkt_cmnd_type_code(pkg, &ls_gs_cmnd_code, &ls_gs_cmnd_type); - - if (pkg->type == UNF_PKG_GS_REQ) { - ret = spfc_get_gs_req_rsp_pld_len(ls_gs_cmnd_code, - frame_pld_len, acc_pld_len); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "[err]Port(0x%x) send GS SID(0x%x) DID(0x%x), get error GS request and response payload length", - hba->port_cfg.port_id, - pkg->frame_head.csctl_sid, - pkg->frame_head.rctl_did); - - return ret; - } - *frame_pld_addr = (void *)(SPFC_GET_CMND_PAYLOAD_ADDR(pkg)); - *frame_phy_addr = pkg->unf_cmnd_pload_bl.buf_dma_addr + sizeof(struct unf_fc_head); - if (ls_gs_cmnd_code == NS_GID_FT || ls_gs_cmnd_code == NS_GID_PT) - *frame_pld_addr = (void *)(UNF_GET_GID_PAYLOAD(pkg)); - } else { - *frame_pld_addr = spfc_get_els_frame_addr(hba, pkg, ls_gs_cmnd_code, - ls_gs_cmnd_type, frame_phy_addr); - if (SPFC_PKG_IS_ELS_RSP(ls_gs_cmnd_type)) { - ret = spfc_get_els_rsp_pld_len(ls_gs_cmnd_type, ls_gs_cmnd_code, - frame_pld_len); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[info]Port(0x%x) get els cmd (0x%x) rsp len failed.", - hba->port_cfg.port_id, - ls_gs_cmnd_code); - return ret; - } - } else { - ret = spfc_get_els_req_acc_pld_len(ls_gs_cmnd_code, frame_pld_len, - acc_pld_len); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[info]Port(0x%x) get els cmd (0x%x) req and acc len failed.", - hba->port_cfg.port_id, - ls_gs_cmnd_code); - return ret; - } - } - } - return ret; -} - -static u32 -spfc_send_ls_gs_via_parent(struct spfc_hba_info *hba, struct unf_frame_pkg *pkg, - struct spfc_parent_queue_info *prt_queue_info) -{ - u32 ret = UNF_RETURN_ERROR; - u16 ls_gs_cmnd_code = SPFC_ZERO; - u16 ls_gs_cmnd_type = SPFC_ZERO; - u16 remote_exid = 0; - u16 hot_tag = 0; - struct spfc_parent_sq_info *parent_sq_info = NULL; - struct spfc_sqe tmp_sqe; - struct spfc_sqe *sqe = NULL; - void *frame_pld_addr = NULL; - u32 frame_pld_len = 0; - u32 acc_pld_len = 0; - u64 frame_pa = 0; - ulong flags = 0; - u16 ssqn = 0; - spinlock_t *prtq_state_lock = NULL; - - ssqn = (u16)pkg->private_data[PKG_PRIVATE_XCHG_SSQ_INDEX]; - sqe = &tmp_sqe; - memset(sqe, 0, sizeof(struct spfc_sqe)); - - parent_sq_info = &prt_queue_info->parent_sq_info; - hot_tag = (u16)UNF_GET_HOTPOOL_TAG(pkg) + hba->exi_base; - - spfc_get_pkt_cmnd_type_code(pkg, &ls_gs_cmnd_code, &ls_gs_cmnd_type); - - ret = spfc_get_frame_info(hba, pkg, &frame_pld_addr, &frame_pld_len, - &frame_pa, &acc_pld_len); - if (ret != RETURN_OK) - return ret; - - if (SPFC_PKG_IS_ELS_RSP(ls_gs_cmnd_type)) { - remote_exid = UNF_GET_OXID(pkg); - spfc_build_els_wqe_ts_rsp(sqe, prt_queue_info, pkg, - frame_pld_addr, ls_gs_cmnd_type, - ls_gs_cmnd_code); - - /* Assemble the SQE Task Section Els Common part */ - spfc_build_service_wqe_ts_common(&sqe->ts_sl, parent_sq_info->rport_index, - UNF_GET_RXID(pkg), remote_exid, - SPFC_LSW(frame_pld_len)); - } else { - remote_exid = UNF_GET_RXID(pkg); - /* send els req ,only use local_xid for hotpooltag */ - spfc_build_els_wqe_ts_req(sqe, parent_sq_info, - prt_queue_info->parent_sts_scq_info.cqm_queue_id, - frame_pld_addr, pkg); - spfc_build_service_wqe_ts_common(&sqe->ts_sl, parent_sq_info->rport_index, hot_tag, - remote_exid, SPFC_LSW(frame_pld_len)); - } - /* Assemble the SQE Control Section part */ - spfc_build_service_wqe_ctrl_section(&sqe->ctrl_sl, SPFC_BYTES_TO_QW_NUM(SPFC_SQE_TS_SIZE), - SPFC_BYTES_TO_QW_NUM(sizeof(struct spfc_variable_sge))); - - /* Build SGE */ - spfc_build_els_gs_wqe_sge(sqe, frame_pld_addr, frame_pa, frame_pld_len, - parent_sq_info->context_id, hba); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[info]Port(0x%x) RPort(0x%x) send ELS/GS Type(0x%x) Code(0x%x) HotTag(0x%x)", - hba->port_cfg.port_id, parent_sq_info->rport_index, ls_gs_cmnd_type, - ls_gs_cmnd_code, hot_tag); - if (ls_gs_cmnd_code == ELS_PLOGI || ls_gs_cmnd_code == ELS_LOGO) { - ret = spfc_suspend_sqe_and_send_nop(hba, prt_queue_info, sqe, pkg); - return ret; - } - prtq_state_lock = &prt_queue_info->parent_queue_state_lock; - spin_lock_irqsave(prtq_state_lock, flags); - if (SPFC_RPORT_NOT_OFFLOADED(prt_queue_info)) { - spin_unlock_irqrestore(prtq_state_lock, flags); - /* Send PLOGI or PLOGI ACC or SCR if session not offload */ - ret = spfc_send_els_via_default_session(hba, sqe, pkg, prt_queue_info); - } else { - spin_unlock_irqrestore(prtq_state_lock, flags); - ret = spfc_parent_sq_enqueue(parent_sq_info, sqe, ssqn); - } - - return ret; -} - -u32 spfc_send_ls_gs_cmnd(void *handle, struct unf_frame_pkg *pkg) -{ - u32 ret = UNF_RETURN_ERROR; - struct spfc_hba_info *hba = NULL; - struct spfc_parent_queue_info *prt_qinfo = NULL; - u16 ls_gs_cmnd_code = SPFC_ZERO; - union unf_sfs_u *sfs_entry = NULL; - struct unf_rrq *rrq_pld = NULL; - u16 ox_id = 0; - u16 rx_id = 0; - - /* Check Parameters */ - FC_CHECK_RETURN_VALUE(handle, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(pkg, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(UNF_GET_SFS_ENTRY(pkg), UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(SPFC_GET_CMND_PAYLOAD_ADDR(pkg), UNF_RETURN_ERROR); - - SPFC_CHECK_PKG_ALLOCTIME(pkg); - hba = (struct spfc_hba_info *)handle; - ls_gs_cmnd_code = SPFC_GET_LS_GS_CMND_CODE(pkg->cmnd); - - /* If RRQ Req, Special processing */ - if (ls_gs_cmnd_code == ELS_RRQ) { - sfs_entry = UNF_GET_SFS_ENTRY(pkg); - rrq_pld = &sfs_entry->rrq; - ox_id = (u16)(rrq_pld->oxid_rxid >> UNF_SHIFT_16); - rx_id = (u16)(rrq_pld->oxid_rxid & SPFC_RXID_MASK); - rrq_pld->oxid_rxid = (u32)ox_id << UNF_SHIFT_16 | rx_id; - } - - prt_qinfo = spfc_find_parent_queue_info_by_pkg(hba, pkg); - ret = spfc_check_parent_qinfo_valid(hba, pkg, &prt_qinfo); - - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_MAJOR, - "[error]Port(0x%x) send ELS/GS SID(0x%x) DID(0x%x) check qinfo invalid", - hba->port_cfg.port_id, pkg->frame_head.csctl_sid, - pkg->frame_head.rctl_did); - return UNF_RETURN_ERROR; - } - - ret = spfc_send_ls_gs_via_parent(hba, pkg, prt_qinfo); - - return ret; -} - -void spfc_save_login_parms_in_sq_info(struct spfc_hba_info *hba, - struct unf_port_login_parms *login_params) -{ - u32 rport_index = login_params->rport_index; - struct spfc_parent_sq_info *parent_sq_info = NULL; - - if (rport_index >= UNF_SPFC_MAXRPORT_NUM) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "[err]Port(0x%x) save login parms,but uplevel alloc invalid rport index: 0x%x", - hba->port_cfg.port_id, rport_index); - - return; - } - - parent_sq_info = &hba->parent_queue_mgr->parent_queue[rport_index].parent_sq_info; - - parent_sq_info->plogi_co_parms.seq_cnt = login_params->seq_cnt; - parent_sq_info->plogi_co_parms.ed_tov = login_params->ed_tov; - parent_sq_info->plogi_co_parms.tx_mfs = (login_params->tx_mfs < - SPFC_DEFAULT_TX_MAX_FREAM_SIZE) ? - SPFC_DEFAULT_TX_MAX_FREAM_SIZE : - login_params->tx_mfs; - parent_sq_info->plogi_co_parms.ed_tov_time = login_params->ed_tov_timer_val; -} - -static void -spfc_recover_offloading_state(struct spfc_parent_queue_info *prt_queue_info, - enum spfc_parent_queue_state offload_state) -{ - ulong flags = 0; - - spin_lock_irqsave(&prt_queue_info->parent_queue_state_lock, flags); - - if (prt_queue_info->offload_state == SPFC_QUEUE_STATE_OFFLOADING) - prt_queue_info->offload_state = offload_state; - - spin_unlock_irqrestore(&prt_queue_info->parent_queue_state_lock, flags); -} - -static bool spfc_check_need_delay_offload(void *hba, struct unf_frame_pkg *pkg, u32 rport_index, - struct spfc_parent_queue_info *cur_prt_queue_info, - struct spfc_parent_queue_info **offload_prt_queue_info) -{ - ulong flags = 0; - struct spfc_parent_queue_info *prt_queue_info = NULL; - spinlock_t *prtq_state_lock = NULL; - - prtq_state_lock = &cur_prt_queue_info->parent_queue_state_lock; - spin_lock_irqsave(prtq_state_lock, flags); - - if (cur_prt_queue_info->offload_state == SPFC_QUEUE_STATE_OFFLOADING) { - spin_unlock_irqrestore(prtq_state_lock, flags); - - prt_queue_info = spfc_find_offload_parent_queue(hba, pkg->frame_head.csctl_sid & - UNF_NPORTID_MASK, - pkg->frame_head.rctl_did & - UNF_NPORTID_MASK, rport_index); - if (prt_queue_info) { - *offload_prt_queue_info = prt_queue_info; - return true; - } - } else { - spin_unlock_irqrestore(prtq_state_lock, flags); - } - - return false; -} - -static u16 spfc_build_wqe_with_offload(struct spfc_hba_info *hba, struct spfc_sqe *sqe, - struct spfc_parent_queue_info *prt_queue_info, - struct unf_frame_pkg *pkg, - enum spfc_parent_queue_state last_offload_state) -{ - u32 tx_mfs = 2048; - u32 edtov_timer = 2000; - dma_addr_t ctx_pa = 0; - u16 els_cmnd_type = SPFC_ZERO; - u16 els_cmnd_code = SPFC_ZERO; - void *ctx_va = NULL; - struct spfc_parent_context *parent_ctx_info = NULL; - struct spfc_sw_section *sw_setction = NULL; - struct spfc_parent_sq_info *parent_sq_info = &prt_queue_info->parent_sq_info; - u16 offload_flag = 0; - - els_cmnd_type = SPFC_GET_ELS_RSP_TYPE(pkg->cmnd); - if (SPFC_PKG_IS_ELS_RSP(els_cmnd_type)) { - els_cmnd_code = SPFC_GET_ELS_RSP_CODE(pkg->cmnd); - } else { - els_cmnd_code = els_cmnd_type; - els_cmnd_type = ELS_CMND; - } - - offload_flag = SPFC_CHECK_NEED_OFFLOAD(els_cmnd_code, els_cmnd_type, last_offload_state); - - parent_ctx_info = (struct spfc_parent_context *)(prt_queue_info->parent_ctx.parent_ctx); - sw_setction = &parent_ctx_info->sw_section; - - sw_setction->tx_mfs = cpu_to_be16((u16)(tx_mfs)); - sw_setction->e_d_tov_timer_val = cpu_to_be32(edtov_timer); - - spfc_big_to_cpu32(&sw_setction->sw_ctxt_misc.pctxt_val0, - sizeof(sw_setction->sw_ctxt_misc.pctxt_val0)); - sw_setction->sw_ctxt_misc.dw.port_id = SPFC_GET_NETWORK_PORT_ID(hba); - spfc_cpu_to_big32(&sw_setction->sw_ctxt_misc.pctxt_val0, - sizeof(sw_setction->sw_ctxt_misc.pctxt_val0)); - - spfc_big_to_cpu32(&sw_setction->sw_ctxt_config.pctxt_val1, - sizeof(sw_setction->sw_ctxt_config.pctxt_val1)); - spfc_cpu_to_big32(&sw_setction->sw_ctxt_config.pctxt_val1, - sizeof(sw_setction->sw_ctxt_config.pctxt_val1)); - - /* Fill in contex to the chip */ - ctx_pa = prt_queue_info->parent_ctx.cqm_parent_ctx_obj->paddr; - ctx_va = prt_queue_info->parent_ctx.cqm_parent_ctx_obj->vaddr; - - /* No need write key and no need do BIG TO CPU32 */ - memcpy(ctx_va, prt_queue_info->parent_ctx.parent_ctx, sizeof(struct spfc_parent_context)); - - if (SPFC_PKG_IS_ELS_RSP(els_cmnd_type)) { - sqe->ts_sl.cont.els_rsp.context_gpa_hi = SPFC_HIGH_32_BITS(ctx_pa); - sqe->ts_sl.cont.els_rsp.context_gpa_lo = SPFC_LOW_32_BITS(ctx_pa); - sqe->ts_sl.cont.els_rsp.wd1.offload_flag = offload_flag; - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_INFO, - "[info]sid 0x%x, did 0x%x, GPA HIGH 0x%x,GPA LOW 0x%x, scq 0x%x,offload flag 0x%x", - parent_sq_info->local_port_id, - parent_sq_info->remote_port_id, - sqe->ts_sl.cont.els_rsp.context_gpa_hi, - sqe->ts_sl.cont.els_rsp.context_gpa_lo, - prt_queue_info->parent_sts_scq_info.cqm_queue_id, - offload_flag); - } else { - sqe->ts_sl.cont.t_els_gs.context_gpa_hi = SPFC_HIGH_32_BITS(ctx_pa); - sqe->ts_sl.cont.t_els_gs.context_gpa_lo = SPFC_LOW_32_BITS(ctx_pa); - sqe->ts_sl.cont.t_els_gs.wd4.offload_flag = offload_flag; - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_INFO, - "[info]sid 0x%x, did 0x%x, GPA HIGH 0x%x,GPA LOW 0x%x, scq 0x%x,offload flag 0x%x", - parent_sq_info->local_port_id, - parent_sq_info->remote_port_id, - sqe->ts_sl.cont.t_els_gs.context_gpa_hi, - sqe->ts_sl.cont.t_els_gs.context_gpa_lo, - prt_queue_info->parent_sts_scq_info.cqm_queue_id, - offload_flag); - } - - if (offload_flag) { - prt_queue_info->offload_state = SPFC_QUEUE_STATE_OFFLOADING; - parent_sq_info->need_offloaded = SPFC_NEED_DO_OFFLOAD; - } - - return offload_flag; -} - -u32 spfc_send_els_via_default_session(struct spfc_hba_info *hba, struct spfc_sqe *io_sqe, - struct unf_frame_pkg *pkg, - struct spfc_parent_queue_info *prt_queue_info) -{ - ulong flags = 0; - bool sqe_delay = false; - u32 ret = UNF_RETURN_ERROR; - u16 els_cmnd_code = SPFC_ZERO; - u16 els_cmnd_type = SPFC_ZERO; - u16 ssqn = (u16)pkg->private_data[PKG_PRIVATE_XCHG_SSQ_INDEX]; - u32 rport_index = pkg->private_data[PKG_PRIVATE_XCHG_RPORT_INDEX]; - struct spfc_sqe *sqe = io_sqe; - struct spfc_parent_queue_info *default_prt_queue_info = NULL; - struct spfc_parent_sq_info *parent_sq_info = &prt_queue_info->parent_sq_info; - struct spfc_parent_queue_info *offload_queue_info = NULL; - enum spfc_parent_queue_state last_offload_state = SPFC_QUEUE_STATE_INITIALIZED; - struct spfc_delay_destroy_ctrl_info delay_ctl_info; - u16 offload_flag = 0; - u32 default_index = SPFC_DEFAULT_RPORT_INDEX; - - memset(&delay_ctl_info, 0, sizeof(struct spfc_delay_destroy_ctrl_info)); - /* Determine the ELS type in pkg */ - els_cmnd_type = SPFC_GET_LS_GS_CMND_CODE(pkg->cmnd); - - if (SPFC_PKG_IS_ELS_RSP(els_cmnd_type)) { - els_cmnd_code = SPFC_GET_ELS_RSP_CODE(pkg->cmnd); - } else { - els_cmnd_code = els_cmnd_type; - els_cmnd_type = ELS_CMND; - } - - spin_lock_irqsave(&prt_queue_info->parent_queue_state_lock, flags); - - last_offload_state = prt_queue_info->offload_state; - - offload_flag = spfc_build_wqe_with_offload(hba, sqe, prt_queue_info, - pkg, last_offload_state); - - spin_unlock_irqrestore(&prt_queue_info->parent_queue_state_lock, flags); - - if (!offload_flag) { - default_prt_queue_info = &hba->parent_queue_mgr->parent_queue[default_index]; - if (!default_prt_queue_info) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "[ERR]cmd(0x%x), type(0x%x) send fail, default session null", - els_cmnd_code, els_cmnd_type); - return UNF_RETURN_ERROR; - } - parent_sq_info = &default_prt_queue_info->parent_sq_info; - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_INFO, - "[info]cmd(0x%x), type(0x%x) send via default session", - els_cmnd_code, els_cmnd_type); - } else { - /* Need this xid to judge delay offload, when Sqe Enqueue will - * write again - */ - sqe->ts_sl.xid = parent_sq_info->context_id; - sqe_delay = spfc_check_need_delay_offload(hba, pkg, rport_index, prt_queue_info, - &offload_queue_info); - - if (sqe_delay) { - ret = spfc_push_delay_sqe(hba, offload_queue_info, sqe, pkg); - if (ret == RETURN_OK) { - spfc_recover_offloading_state(prt_queue_info, last_offload_state); - return ret; - } - } - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_INFO, - "[info]cmd(0x%x), type(0x%x) do secretly offload", - els_cmnd_code, els_cmnd_type); - } - - ret = spfc_parent_sq_enqueue(parent_sq_info, sqe, ssqn); - - if (ret != RETURN_OK) { - spfc_recover_offloading_state(prt_queue_info, last_offload_state); - - spin_lock_irqsave(&prt_queue_info->parent_queue_state_lock, - flags); - - if (prt_queue_info->parent_sq_info.destroy_sqe.valid) { - memcpy(&delay_ctl_info, &prt_queue_info->parent_sq_info.destroy_sqe, - sizeof(struct spfc_delay_destroy_ctrl_info)); - - prt_queue_info->parent_sq_info.destroy_sqe.valid = false; - } - - spin_unlock_irqrestore(&prt_queue_info->parent_queue_state_lock, flags); - - spfc_pop_destroy_parent_queue_sqe((void *)hba, &delay_ctl_info); - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[warn]Port(0x%x) RPort(0x%x) send ELS Type(0x%x) Code(0x%x) fail,recover offloadstatus(%u)", - hba->port_cfg.port_id, rport_index, els_cmnd_type, - els_cmnd_code, prt_queue_info->offload_state); - } - - return ret; -} - -static u32 spfc_rcv_ls_gs_rsp_payload(struct spfc_hba_info *hba, - struct unf_frame_pkg *pkg, u32 hot_tag, - u8 *els_pld_buf, u32 pld_len) -{ - u32 ret = UNF_RETURN_ERROR; - - pkg->private_data[PKG_PRIVATE_XCHG_HOT_POOL_INDEX] = hot_tag; - if (pkg->type == UNF_PKG_GS_REQ_DONE) - spfc_big_to_cpu32(els_pld_buf, pld_len); - else - pkg->byte_orders |= SPFC_BIT_2; - - pkg->unf_cmnd_pload_bl.buffer_ptr = els_pld_buf; - pkg->unf_cmnd_pload_bl.length = pld_len; - - pkg->last_pkg_flag = UNF_PKG_NOT_LAST_RESPONSE; - - UNF_LOWLEVEL_RECEIVE_LS_GS_PKG(ret, hba->lport, pkg); - - return ret; -} - -u32 spfc_scq_recv_abts_rsp(struct spfc_hba_info *hba, union spfc_scqe *scqe) -{ - /* Default path, which is sent from SCQ to the driver */ - u8 status = 0; - u32 ret = UNF_RETURN_ERROR; - u32 ox_id = INVALID_VALUE32; - u32 hot_tag = INVALID_VALUE32; - struct unf_frame_pkg pkg = {0}; - struct spfc_scqe_rcv_abts_rsp *abts_rsp = NULL; - - abts_rsp = &scqe->rcv_abts_rsp; - pkg.private_data[PKG_PRIVATE_XCHG_ALLOC_TIME] = abts_rsp->magic_num; - - ox_id = (u32)(abts_rsp->wd0.ox_id); - - hot_tag = abts_rsp->wd1.hotpooltag & UNF_ORIGIN_HOTTAG_MASK; - if (unlikely(hot_tag < (u32)hba->exi_base || - hot_tag >= (u32)(hba->exi_base + hba->exi_count))) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) has bad HotTag(0x%x) for bls_rsp", - hba->port_cfg.port_id, hot_tag); - - status = UNF_IO_FAILED; - hot_tag = INVALID_VALUE32; - } else { - hot_tag -= hba->exi_base; - if (unlikely(SPFC_SCQE_HAS_ERRCODE(scqe))) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) BLS response has error code(0x%x) tag(0x%x)", - hba->port_cfg.port_id, - SPFC_GET_SCQE_STATUS(scqe), (u32)hot_tag); - - status = UNF_IO_FAILED; - } else { - pkg.frame_head.rctl_did = abts_rsp->wd3.did; - pkg.frame_head.csctl_sid = abts_rsp->wd4.sid; - pkg.frame_head.oxid_rxid = (u32)(abts_rsp->wd0.rx_id) | ox_id << - UNF_SHIFT_16; - - /* BLS_ACC/BLS_RJT: IO_succeed */ - if (abts_rsp->wd2.fh_rctrl == SPFC_RCTL_BLS_ACC) { - status = UNF_IO_SUCCESS; - } else if (abts_rsp->wd2.fh_rctrl == SPFC_RCTL_BLS_RJT) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x) ABTS RJT: %08x-%08x-%08x", - hba->port_cfg.port_id, - abts_rsp->payload[ARRAY_INDEX_0], - abts_rsp->payload[ARRAY_INDEX_1], - abts_rsp->payload[ARRAY_INDEX_2]); - - status = UNF_IO_SUCCESS; - } else { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) BLS response RCTL is error", - hba->port_cfg.port_id); - SPFC_ERR_IO_STAT(hba, SPFC_SCQE_ABTS_RSP); - status = UNF_IO_FAILED; - } - } - } - - /* Set PKG/exchange status & Process BLS_RSP */ - pkg.status = status; - ret = spfc_rcv_bls_rsp(hba, &pkg, hot_tag); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[info]Port(0x%x) recv ABTS rsp OX_ID(0x%x) RX_ID(0x%x) HotTag(0x%x) SID(0x%x) DID(0x%x) %s", - hba->port_cfg.port_id, ox_id, abts_rsp->wd0.rx_id, hot_tag, - abts_rsp->wd4.sid, abts_rsp->wd3.did, - (ret == RETURN_OK) ? "OK" : "ERROR"); - - return ret; -} - -u32 spfc_recv_els_cmnd(const struct spfc_hba_info *hba, - struct unf_frame_pkg *pkg, u8 *els_pld, u32 pld_len, - bool first) -{ - u32 ret = UNF_RETURN_ERROR; - - /* Convert Payload to small endian */ - spfc_big_to_cpu32(els_pld, pld_len); - - pkg->type = UNF_PKG_ELS_REQ; - - pkg->unf_cmnd_pload_bl.buffer_ptr = els_pld; - - /* Payload length */ - pkg->unf_cmnd_pload_bl.length = pld_len; - - /* Obtain the Cmnd type from the Paylaod. The Cmnd is in small endian */ - if (first) - pkg->cmnd = UNF_GET_FC_PAYLOAD_ELS_CMND(pkg->unf_cmnd_pload_bl.buffer_ptr); - - /* Errors have been processed in SPFC_RecvElsError */ - pkg->status = UNF_IO_SUCCESS; - - /* Send PKG to the CM layer */ - UNF_LOWLEVEL_RECEIVE_LS_GS_PKG(ret, hba->lport, pkg); - - if (ret != RETURN_OK) { - pkg->rx_or_ox_id = UNF_PKG_FREE_RXID; - pkg->private_data[PKG_PRIVATE_XCHG_HOT_POOL_INDEX] = INVALID_VALUE32; - pkg->private_data[PKG_PRIVATE_XCHG_ALLOC_TIME] = INVALID_VALUE32; - ret = spfc_free_xid((void *)hba, pkg); - - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%x) recv %s ox_id(0x%x) RXID(0x%x) PldLen(0x%x) failed, Free xid %s", - hba->port_cfg.port_id, - UNF_GET_FC_HEADER_RCTL(&pkg->frame_head) == SPFC_FC_RCTL_ELS_REQ ? - "ELS REQ" : "ELS RSP", - UNF_GET_OXID(pkg), UNF_GET_RXID(pkg), pld_len, - (ret == RETURN_OK) ? "OK" : "ERROR"); - } - - return ret; -} - -u32 spfc_rcv_ls_gs_rsp(const struct spfc_hba_info *hba, - struct unf_frame_pkg *pkg, u32 hot_tag) -{ - u32 ret = UNF_RETURN_ERROR; - - pkg->private_data[PKG_PRIVATE_XCHG_HOT_POOL_INDEX] = hot_tag; - if (pkg->type == UNF_PKG_ELS_REQ_DONE) - pkg->byte_orders |= SPFC_BIT_2; - - pkg->last_pkg_flag = UNF_PKG_LAST_RESPONSE; - - UNF_LOWLEVEL_RECEIVE_LS_GS_PKG(ret, hba->lport, pkg); - - return ret; -} - -u32 spfc_rcv_els_rsp_sts(const struct spfc_hba_info *hba, - struct unf_frame_pkg *pkg, u32 hot_tag) -{ - u32 ret = UNF_RETURN_ERROR; - - pkg->type = UNF_PKG_ELS_REPLY_DONE; - pkg->private_data[PKG_PRIVATE_XCHG_HOT_POOL_INDEX] = hot_tag; - - UNF_LOWLEVEL_SEND_ELS_DONE(ret, hba->lport, pkg); - - return ret; -} - -u32 spfc_rcv_bls_rsp(const struct spfc_hba_info *hba, struct unf_frame_pkg *pkg, - u32 hot_tag) -{ - /* - * 1. SCQ (normal) - * 2. from Root RQ (parent no existence) - * * - * single frame, single sequence - */ - u32 ret = UNF_RETURN_ERROR; - - pkg->type = UNF_PKG_BLS_REQ_DONE; - pkg->private_data[PKG_PRIVATE_XCHG_HOT_POOL_INDEX] = hot_tag; - pkg->last_pkg_flag = UNF_PKG_LAST_RESPONSE; - - UNF_LOWLEVEL_RECEIVE_BLS_PKG(ret, hba->lport, pkg); - - return ret; -} - -u32 spfc_rsv_bls_rsp_sts(const struct spfc_hba_info *hba, - struct unf_frame_pkg *pkg, u32 rx_id) -{ - u32 ret = UNF_RETURN_ERROR; - - pkg->type = UNF_PKG_BLS_REPLY_DONE; - pkg->private_data[PKG_PRIVATE_XCHG_HOT_POOL_INDEX] = rx_id; - - UNF_LOWLEVEL_RECEIVE_BLS_PKG(ret, hba->lport, pkg); - - return ret; -} - -u32 spfc_rcv_tmf_marker_sts(const struct spfc_hba_info *hba, - struct unf_frame_pkg *pkg, u32 hot_tag) -{ - u32 ret = UNF_RETURN_ERROR; - - pkg->private_data[PKG_PRIVATE_XCHG_HOT_POOL_INDEX] = hot_tag; - - /* Send PKG info to COM */ - UNF_LOWLEVEL_RECEIVE_MARKER_STS(ret, hba->lport, pkg); - - return ret; -} - -u32 spfc_rcv_abts_marker_sts(const struct spfc_hba_info *hba, - struct unf_frame_pkg *pkg, u32 hot_tag) -{ - u32 ret = UNF_RETURN_ERROR; - - pkg->private_data[PKG_PRIVATE_XCHG_HOT_POOL_INDEX] = hot_tag; - - UNF_LOWLEVEL_RECEIVE_ABTS_MARKER_STS(ret, hba->lport, pkg); - - return ret; -} - -static void spfc_scqe_error_pre_proc(struct spfc_hba_info *hba, union spfc_scqe *scqe) -{ - /* Currently, only printing and statistics collection are performed */ - SPFC_ERR_IO_STAT(hba, SPFC_GET_SCQE_TYPE(scqe)); - SPFC_SCQ_ERR_TYPE_STAT(hba, SPFC_GET_SCQE_STATUS(scqe)); - - FC_DRV_PRINT(UNF_LOG_ABNORMAL, UNF_WARN, - "[warn]Port(0x%x)-Task_type(%u) SCQE contain error code(%u),additional info(0x%x)", - hba->port_cfg.port_id, scqe->common.ch.wd0.task_type, - scqe->common.ch.wd0.err_code, scqe->common.conn_id); -} - -void *spfc_get_els_buf_by_user_id(struct spfc_hba_info *hba, u16 user_id) -{ - struct spfc_drq_buff_entry *srq_buf_entry = NULL; - struct spfc_srq_info *srq_info = NULL; - - FC_CHECK_RETURN_VALUE(hba, NULL); - - srq_info = &hba->els_srq_info; - FC_CHECK_RETURN_VALUE(user_id < srq_info->valid_wqe_num, NULL); - - srq_buf_entry = &srq_info->els_buff_entry_head[user_id]; - - return srq_buf_entry->buff_addr; -} - -static u32 spfc_check_srq_buf_valid(struct spfc_hba_info *hba, - u16 *buf_id_array, u32 buf_num) -{ - u32 index = 0; - u32 buf_id = 0; - void *srq_buf = NULL; - - for (index = 0; index < buf_num; index++) { - buf_id = buf_id_array[index]; - - if (buf_id < hba->els_srq_info.valid_wqe_num) - srq_buf = spfc_get_els_buf_by_user_id(hba, (u16)buf_id); - else - srq_buf = NULL; - - if (!srq_buf) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) get srq buffer user id(0x%x) is null", - hba->port_cfg.port_id, buf_id); - - return UNF_RETURN_ERROR; - } - } - - return RETURN_OK; -} - -static void spfc_reclaim_srq_buf(struct spfc_hba_info *hba, u16 *buf_id_array, - u32 buf_num) -{ - u32 index = 0; - u32 buf_id = 0; - void *srq_buf = NULL; - - for (index = 0; index < buf_num; index++) { - buf_id = buf_id_array[index]; - if (buf_id < hba->els_srq_info.valid_wqe_num) - srq_buf = spfc_get_els_buf_by_user_id(hba, (u16)buf_id); - else - srq_buf = NULL; - - /* If the value of buffer is NULL, it indicates that the value - * of buffer is invalid. In this case, exit directly. - */ - if (!srq_buf) - break; - - spfc_post_els_srq_wqe(&hba->els_srq_info, (u16)buf_id); - } -} - -static u32 spfc_check_ls_gs_valid(struct spfc_hba_info *hba, union spfc_scqe *scqe, - struct unf_frame_pkg *pkg, u16 *buf_id_array, - u32 buf_num, u32 frame_len) -{ - u32 hot_tag; - - hot_tag = UNF_GET_HOTPOOL_TAG(pkg); - - /* The ELS CMD returns an error code and discards it directly */ - if ((sizeof(struct spfc_fc_frame_header) > frame_len) || - (SPFC_SCQE_HAS_ERRCODE(scqe)) || buf_num > SPFC_ELS_SRQ_BUF_NUM) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) get scqe type(0x%x) payload len(0x%x),scq status(0x%x),user id num(0x%x) abnormal", - hba->port_cfg.port_id, SPFC_GET_SCQE_TYPE(scqe), frame_len, - SPFC_GET_SCQE_STATUS(scqe), buf_num); - - /* ELS RSP Special Processing */ - if (SPFC_GET_SCQE_TYPE(scqe) == SPFC_SCQE_ELS_RSP || - SPFC_GET_SCQE_TYPE(scqe) == SPFC_SCQE_GS_RSP) { - if (SPFC_SCQE_ERR_TO_CM(scqe)) { - pkg->status = UNF_IO_FAILED; - (void)spfc_rcv_ls_gs_rsp(hba, pkg, hot_tag); - } else { - if (SPFC_GET_SCQE_TYPE(scqe) == SPFC_SCQE_ELS_RSP) - SPFC_HBA_STAT(hba, SPFC_STAT_ELS_RSP_EXCH_REUSE); - else - SPFC_HBA_STAT(hba, SPFC_STAT_GS_RSP_EXCH_REUSE); - } - } - - /* Reclaim srq */ - if (buf_num <= SPFC_ELS_SRQ_BUF_NUM) - spfc_reclaim_srq_buf(hba, buf_id_array, buf_num); - - return UNF_RETURN_ERROR; - } - - /* ELS CMD Check the validity of the buffer sent by the ucode */ - if (SPFC_GET_SCQE_TYPE(scqe) == SPFC_SCQE_ELS_CMND) { - if (spfc_check_srq_buf_valid(hba, buf_id_array, buf_num) != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) get els cmnd scqe user id num(0x%x) abnormal, as some srq buff is null", - hba->port_cfg.port_id, buf_num); - - spfc_reclaim_srq_buf(hba, buf_id_array, buf_num); - - return UNF_RETURN_ERROR; - } - } - - return RETURN_OK; -} - -u32 spfc_scq_recv_els_cmnd(struct spfc_hba_info *hba, union spfc_scqe *scqe) -{ - u32 ret = RETURN_OK; - u32 pld_len = 0; - u32 header_len = 0; - u32 frame_len = 0; - u32 rcv_data_len = 0; - u32 max_buf_num = 0; - u16 buf_id = 0; - u32 index = 0; - u8 *pld_addr = NULL; - struct unf_frame_pkg pkg = {0}; - struct spfc_scqe_rcv_els_cmd *els_cmd = NULL; - struct spfc_fc_frame_header *els_frame = NULL; - struct spfc_fc_frame_header tmp_frame = {0}; - void *els_buf = NULL; - bool first = false; - - els_cmd = &scqe->rcv_els_cmd; - frame_len = els_cmd->wd3.data_len; - max_buf_num = els_cmd->wd3.user_id_num; - spfc_swap_16_in_32((u32 *)els_cmd->user_id, SPFC_LS_GS_USERID_LEN); - - pkg.xchg_contex = NULL; - pkg.status = UNF_IO_SUCCESS; - - /* Check the validity of error codes and buff. If an exception occurs, - * discard the error code - */ - ret = spfc_check_ls_gs_valid(hba, scqe, &pkg, els_cmd->user_id, - max_buf_num, frame_len); - if (ret != RETURN_OK) { - pkg.rx_or_ox_id = UNF_PKG_FREE_RXID; - pkg.frame_head.oxid_rxid = - (u32)(els_cmd->wd2.rx_id) | (u32)(els_cmd->wd2.ox_id) << UNF_SHIFT_16; - pkg.private_data[PKG_PRIVATE_XCHG_HOT_POOL_INDEX] = INVALID_VALUE32; - pkg.private_data[PKG_PRIVATE_XCHG_ALLOC_TIME] = INVALID_VALUE32; - pkg.frame_head.csctl_sid = els_cmd->wd1.sid; - pkg.frame_head.rctl_did = els_cmd->wd0.did; - spfc_free_xid((void *)hba, &pkg); - return RETURN_OK; - } - - /* Send data to COM cyclically */ - for (index = 0; index < max_buf_num; index++) { - /* Exception record, which is not processed currently */ - if (rcv_data_len >= frame_len) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) get els cmd date len(0x%x) is bigger than fream len(0x%x)", - hba->port_cfg.port_id, rcv_data_len, frame_len); - } - - buf_id = (u16)els_cmd->user_id[index]; - els_buf = spfc_get_els_buf_by_user_id(hba, buf_id); - - /* Obtain playload address */ - pld_addr = (u8 *)(els_buf); - header_len = 0; - first = false; - if (index == 0) { - els_frame = (struct spfc_fc_frame_header *)els_buf; - pld_addr = (u8 *)(els_frame + 1); - - header_len = sizeof(struct spfc_fc_frame_header); - first = true; - - memcpy(&tmp_frame, els_frame, sizeof(struct spfc_fc_frame_header)); - spfc_big_to_cpu32(&tmp_frame, sizeof(struct spfc_fc_frame_header)); - memcpy(&pkg.frame_head, &tmp_frame, sizeof(pkg.frame_head)); - pkg.frame_head.oxid_rxid = (u32)((pkg.frame_head.oxid_rxid & - SPFC_OXID_MASK) | (els_cmd->wd2.rx_id)); - } - - /* Calculate the playload length */ - pkg.last_pkg_flag = 0; - pld_len = SPFC_SRQ_ELS_SGE_LEN; - - if ((rcv_data_len + SPFC_SRQ_ELS_SGE_LEN) >= frame_len) { - pkg.last_pkg_flag = 1; - pld_len = frame_len - rcv_data_len; - } - - pkg.class_mode = els_cmd->wd0.class_mode; - - /* Push data to COM */ - if (ret == RETURN_OK) { - ret = spfc_recv_els_cmnd(hba, &pkg, pld_addr, - (pld_len - header_len), first); - } - - /* Reclaim srq buffer */ - spfc_post_els_srq_wqe(&hba->els_srq_info, buf_id); - - rcv_data_len += pld_len; - } - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[info]Port(0x%x) recv ELS Type(0x%x) Cmnd(0x%x) ox_id(0x%x) RXID(0x%x) SID(0x%x) DID(0x%x) %u", - hba->port_cfg.port_id, pkg.type, pkg.cmnd, els_cmd->wd2.ox_id, - els_cmd->wd2.rx_id, els_cmd->wd1.sid, els_cmd->wd0.did, ret); - - return ret; -} - -static u32 spfc_get_ls_gs_pld_len(struct spfc_hba_info *hba, u32 rcv_data_len, u32 frame_len) -{ - u32 pld_len; - - /* Exception record, which is not processed currently */ - if (rcv_data_len >= frame_len) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) get els rsp data len(0x%x) is bigger than fream len(0x%x)", - hba->port_cfg.port_id, rcv_data_len, frame_len); - } - - pld_len = SPFC_SRQ_ELS_SGE_LEN; - if ((rcv_data_len + SPFC_SRQ_ELS_SGE_LEN) >= frame_len) - pld_len = frame_len - rcv_data_len; - - return pld_len; -} - -u32 spfc_scq_recv_ls_gs_rsp(struct spfc_hba_info *hba, union spfc_scqe *scqe) -{ - u32 ret = RETURN_OK; - u32 pld_len = 0; - u32 header_len = 0; - u32 frame_len = 0; - u32 rcv_data_len = 0; - u32 max_buf_num = 0; - u16 buf_id = 0; - u32 hot_tag = INVALID_VALUE32; - u32 index = 0; - u32 ox_id = (~0); - struct unf_frame_pkg pkg = {0}; - struct spfc_scqe_rcv_els_gs_rsp *ls_gs_rsp_scqe = NULL; - struct spfc_fc_frame_header *els_frame = NULL; - void *ls_gs_buf = NULL; - u8 *pld_addr = NULL; - u8 task_type; - - ls_gs_rsp_scqe = &scqe->rcv_els_gs_rsp; - frame_len = ls_gs_rsp_scqe->wd2.data_len; - max_buf_num = ls_gs_rsp_scqe->wd4.user_id_num; - spfc_swap_16_in_32((u32 *)ls_gs_rsp_scqe->user_id, SPFC_LS_GS_USERID_LEN); - - ox_id = ls_gs_rsp_scqe->wd1.ox_id; - hot_tag = ((u16)(ls_gs_rsp_scqe->wd5.hotpooltag) & UNF_ORIGIN_HOTTAG_MASK) - hba->exi_base; - pkg.frame_head.oxid_rxid = (u32)(ls_gs_rsp_scqe->wd1.rx_id) | ox_id << UNF_SHIFT_16; - pkg.private_data[PKG_PRIVATE_XCHG_ALLOC_TIME] = ls_gs_rsp_scqe->magic_num; - pkg.private_data[PKG_PRIVATE_XCHG_HOT_POOL_INDEX] = hot_tag; - pkg.frame_head.csctl_sid = ls_gs_rsp_scqe->wd4.sid; - pkg.frame_head.rctl_did = ls_gs_rsp_scqe->wd3.did; - pkg.status = UNF_IO_SUCCESS; - pkg.type = UNF_PKG_ELS_REQ_DONE; - - task_type = SPFC_GET_SCQE_TYPE(scqe); - if (task_type == SPFC_SCQE_GS_RSP) { - if (ls_gs_rsp_scqe->wd3.end_rsp) - SPFC_HBA_STAT(hba, SPFC_STAT_LAST_GS_SCQE); - pkg.type = UNF_PKG_GS_REQ_DONE; - } - - /* Handle the exception first. The LS/GS RSP returns the error code. - * Only the ox_id can submit the error code to the CM layer. - */ - ret = spfc_check_ls_gs_valid(hba, scqe, &pkg, ls_gs_rsp_scqe->user_id, - max_buf_num, frame_len); - if (ret != RETURN_OK) - return RETURN_OK; - - if (ls_gs_rsp_scqe->wd3.echo_rsp) { - pkg.private_data[PKG_PRIVATE_ECHO_CMD_RCV_TIME] = - ls_gs_rsp_scqe->user_id[ARRAY_INDEX_5]; - pkg.private_data[PKG_PRIVATE_ECHO_RSP_SND_TIME] = - ls_gs_rsp_scqe->user_id[ARRAY_INDEX_6]; - pkg.private_data[PKG_PRIVATE_ECHO_CMD_SND_TIME] = - ls_gs_rsp_scqe->user_id[ARRAY_INDEX_7]; - pkg.private_data[PKG_PRIVATE_ECHO_ACC_RCV_TIME] = - ls_gs_rsp_scqe->user_id[ARRAY_INDEX_8]; - } - - /* Send data to COM cyclically */ - for (index = 0; index < max_buf_num; index++) { - /* Obtain buffer address */ - ls_gs_buf = NULL; - buf_id = (u16)ls_gs_rsp_scqe->user_id[index]; - ls_gs_buf = spfc_get_els_buf_by_user_id(hba, buf_id); - - if (unlikely(!ls_gs_buf)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) ox_id(0x%x) RXID(0x%x) SID(0x%x) DID(0x%x) Index(0x%x) get els rsp buff user id(0x%x) abnormal", - hba->port_cfg.port_id, ox_id, - ls_gs_rsp_scqe->wd1.rx_id, ls_gs_rsp_scqe->wd4.sid, - ls_gs_rsp_scqe->wd3.did, index, buf_id); - - if (index == 0) { - pkg.status = UNF_IO_FAILED; - ret = spfc_rcv_ls_gs_rsp(hba, &pkg, hot_tag); - } - - return ret; - } - - header_len = 0; - pld_addr = (u8 *)(ls_gs_buf); - if (index == 0) { - header_len = sizeof(struct spfc_fc_frame_header); - els_frame = (struct spfc_fc_frame_header *)ls_gs_buf; - pld_addr = (u8 *)(els_frame + 1); - } - - /* Calculate the playload length */ - pld_len = spfc_get_ls_gs_pld_len(hba, rcv_data_len, frame_len); - - /* Push data to COM */ - if (ret == RETURN_OK) { - ret = spfc_rcv_ls_gs_rsp_payload(hba, &pkg, hot_tag, pld_addr, - (pld_len - header_len)); - } - - /* Reclaim srq buffer */ - spfc_post_els_srq_wqe(&hba->els_srq_info, buf_id); - - rcv_data_len += pld_len; - } - - if (ls_gs_rsp_scqe->wd3.end_rsp && ret == RETURN_OK) - ret = spfc_rcv_ls_gs_rsp(hba, &pkg, hot_tag); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[info]Port(0x%x) receive LS/GS RSP ox_id(0x%x) RXID(0x%x) SID(0x%x) DID(0x%x) end_rsp(0x%x) user_num(0x%x)", - hba->port_cfg.port_id, ox_id, ls_gs_rsp_scqe->wd1.rx_id, - ls_gs_rsp_scqe->wd4.sid, ls_gs_rsp_scqe->wd3.did, - ls_gs_rsp_scqe->wd3.end_rsp, - ls_gs_rsp_scqe->wd4.user_id_num); - - return ret; -} - -u32 spfc_scq_recv_els_rsp_sts(struct spfc_hba_info *hba, union spfc_scqe *scqe) -{ - u32 ret = UNF_RETURN_ERROR; - u32 rx_id = INVALID_VALUE32; - u32 hot_tag = INVALID_VALUE32; - struct unf_frame_pkg pkg = {0}; - struct spfc_scqe_comm_rsp_sts *els_rsp_sts_scqe = NULL; - - els_rsp_sts_scqe = &scqe->comm_sts; - rx_id = (u32)els_rsp_sts_scqe->wd0.rx_id; - - pkg.private_data[PKG_PRIVATE_XCHG_ALLOC_TIME] = - els_rsp_sts_scqe->magic_num; - pkg.frame_head.oxid_rxid = rx_id | (u32)(els_rsp_sts_scqe->wd0.ox_id) << UNF_SHIFT_16; - hot_tag = (u32)((els_rsp_sts_scqe->wd1.hotpooltag & UNF_ORIGIN_HOTTAG_MASK) - - hba->exi_base); - - if (unlikely(SPFC_SCQE_HAS_ERRCODE(scqe))) - pkg.status = UNF_IO_FAILED; - else - pkg.status = UNF_IO_SUCCESS; - - ret = spfc_rcv_els_rsp_sts(hba, &pkg, hot_tag); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[info]Port(0x%x) recv ELS RSP STS ox_id(0x%x) RXID(0x%x) HotTag(0x%x) %s", - hba->port_cfg.port_id, els_rsp_sts_scqe->wd0.ox_id, rx_id, - hot_tag, (ret == RETURN_OK) ? "OK" : "ERROR"); - - return ret; -} - -static u32 spfc_check_rport_valid(const struct spfc_parent_queue_info *prt_queue_info, u32 scqe_xid) -{ - if (prt_queue_info->parent_ctx.cqm_parent_ctx_obj) { - if ((prt_queue_info->parent_sq_info.context_id & SPFC_CQM_XID_MASK) == - (scqe_xid & SPFC_CQM_XID_MASK)) { - return RETURN_OK; - } - } - - return UNF_RETURN_ERROR; -} - -u32 spfc_scq_recv_offload_sts(struct spfc_hba_info *hba, union spfc_scqe *scqe) -{ - u32 valid = UNF_RETURN_ERROR; - u32 rport_index = 0; - u32 cid = 0; - u32 xid = 0; - ulong flags = 0; - struct spfc_parent_queue_info *prt_qinfo = NULL; - struct spfc_parent_sq_info *parent_sq_info = NULL; - struct spfc_scqe_sess_sts *offload_sts_scqe = NULL; - struct spfc_delay_destroy_ctrl_info delay_ctl_info; - - memset(&delay_ctl_info, 0, sizeof(struct spfc_delay_destroy_ctrl_info)); - offload_sts_scqe = &scqe->sess_sts; - rport_index = offload_sts_scqe->wd1.conn_id; - cid = offload_sts_scqe->wd2.cid; - xid = offload_sts_scqe->wd0.xid_qpn; - - if (rport_index >= UNF_SPFC_MAXRPORT_NUM) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) receive an error offload status: rport(0x%x) is invalid, cacheid(0x%x)", - hba->port_cfg.port_id, rport_index, cid); - - return UNF_RETURN_ERROR; - } - - if (rport_index == SPFC_DEFAULT_RPORT_INDEX && - hba->default_sq_info.default_sq_flag == 0xF) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) default session timeout: rport(0x%x) cacheid(0x%x)", - hba->port_cfg.port_id, rport_index, cid); - return UNF_RETURN_ERROR; - } - - prt_qinfo = &hba->parent_queue_mgr->parent_queue[rport_index]; - parent_sq_info = &prt_qinfo->parent_sq_info; - - valid = spfc_check_rport_valid(prt_qinfo, xid); - if (valid != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) receive an error offload status: rport(0x%x), context id(0x%x) is invalid", - hba->port_cfg.port_id, rport_index, xid); - - return UNF_RETURN_ERROR; - } - - /* Offload failed */ - if (SPFC_GET_SCQE_STATUS(scqe) != SPFC_COMPLETION_STATUS_SUCCESS) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x), rport(0x%x), context id(0x%x), cache id(0x%x), offload failed", - hba->port_cfg.port_id, rport_index, xid, cid); - - spin_lock_irqsave(&prt_qinfo->parent_queue_state_lock, flags); - if (prt_qinfo->offload_state != SPFC_QUEUE_STATE_OFFLOADED) { - prt_qinfo->offload_state = SPFC_QUEUE_STATE_INITIALIZED; - parent_sq_info->need_offloaded = INVALID_VALUE8; - } - spin_unlock_irqrestore(&prt_qinfo->parent_queue_state_lock, - flags); - - return UNF_RETURN_ERROR; - } - - spin_lock_irqsave(&prt_qinfo->parent_queue_state_lock, flags); - prt_qinfo->parent_sq_info.cache_id = cid; - prt_qinfo->offload_state = SPFC_QUEUE_STATE_OFFLOADED; - parent_sq_info->need_offloaded = SPFC_HAVE_OFFLOAD; - atomic_set(&prt_qinfo->parent_sq_info.sq_cached, true); - - if (prt_qinfo->parent_sq_info.destroy_sqe.valid) { - delay_ctl_info.valid = prt_qinfo->parent_sq_info.destroy_sqe.valid; - delay_ctl_info.rport_index = prt_qinfo->parent_sq_info.destroy_sqe.rport_index; - delay_ctl_info.time_out = prt_qinfo->parent_sq_info.destroy_sqe.time_out; - delay_ctl_info.start_jiff = prt_qinfo->parent_sq_info.destroy_sqe.start_jiff; - delay_ctl_info.rport_info.nport_id = - prt_qinfo->parent_sq_info.destroy_sqe.rport_info.nport_id; - delay_ctl_info.rport_info.rport_index = - prt_qinfo->parent_sq_info.destroy_sqe.rport_info.rport_index; - delay_ctl_info.rport_info.port_name = - prt_qinfo->parent_sq_info.destroy_sqe.rport_info.port_name; - prt_qinfo->parent_sq_info.destroy_sqe.valid = false; - } - spin_unlock_irqrestore(&prt_qinfo->parent_queue_state_lock, flags); - - if (rport_index == SPFC_DEFAULT_RPORT_INDEX) { - hba->default_sq_info.sq_cid = cid; - hba->default_sq_info.sq_xid = xid; - hba->default_sq_info.default_sq_flag = 1; - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, - UNF_MAJOR, "[info]Receive default Session info"); - } - - spfc_pop_destroy_parent_queue_sqe((void *)hba, &delay_ctl_info); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x) offload success: rport index(0x%x),rport nportid(0x%x),context id(0x%x),cache id(0x%x).", - hba->port_cfg.port_id, rport_index, - prt_qinfo->parent_sq_info.remote_port_id, xid, cid); - - return RETURN_OK; -} - -static u32 spfc_send_bls_via_parent(struct spfc_hba_info *hba, struct unf_frame_pkg *pkg) -{ - u32 ret = UNF_RETURN_ERROR; - u16 ox_id = INVALID_VALUE16; - u16 rx_id = INVALID_VALUE16; - struct spfc_sqe tmp_sqe; - struct spfc_sqe *sqe = NULL; - struct spfc_parent_sq_info *parent_sq_info = NULL; - struct spfc_parent_queue_info *prt_qinfo = NULL; - u16 ssqn; - - FC_CHECK_RETURN_VALUE((pkg->type == UNF_PKG_BLS_REQ), UNF_RETURN_ERROR); - - sqe = &tmp_sqe; - memset(sqe, 0, sizeof(struct spfc_sqe)); - - prt_qinfo = spfc_find_parent_queue_info_by_pkg(hba, pkg); - if (!prt_qinfo) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%x) send BLS SID_DID(0x%x_0x%x) with null parent queue information", - hba->port_cfg.port_id, pkg->frame_head.csctl_sid, - pkg->frame_head.rctl_did); - - return ret; - } - - parent_sq_info = spfc_find_parent_sq_by_pkg(hba, pkg); - if (!parent_sq_info) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%x) send ABTS SID_DID(0x%x_0x%x) with null parent queue information", - hba->port_cfg.port_id, pkg->frame_head.csctl_sid, - pkg->frame_head.rctl_did); - - return ret; - } - - rx_id = UNF_GET_RXID(pkg); - ox_id = UNF_GET_OXID(pkg); - - /* Assemble the SQE Control Section part. The ABTS does not have - * Payload. bdsl=0 - */ - spfc_build_service_wqe_ctrl_section(&sqe->ctrl_sl, SPFC_BYTES_TO_QW_NUM(SPFC_SQE_TS_SIZE), - 0); - - /* Assemble the SQE Task Section BLS Common part. The value of DW2 of - * BLS WQE is Rsvd, and the value of DW2 is 0 - */ - spfc_build_service_wqe_ts_common(&sqe->ts_sl, parent_sq_info->rport_index, ox_id, rx_id, 0); - - /* Assemble the special part of the ABTS */ - spfc_build_bls_wqe_ts_req(sqe, pkg, hba); - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[info]Port(0x%x) RPort(0x%x) send ABTS_REQ ox_id(0x%x) RXID(0x%x), HotTag(0x%x)", - hba->port_cfg.port_id, parent_sq_info->rport_index, ox_id, - rx_id, (u16)(UNF_GET_HOTPOOL_TAG(pkg) + hba->exi_base)); - - ssqn = (u16)pkg->private_data[PKG_PRIVATE_XCHG_SSQ_INDEX]; - ret = spfc_parent_sq_enqueue(parent_sq_info, sqe, ssqn); - - return ret; -} - -u32 spfc_send_bls_cmnd(void *handle, struct unf_frame_pkg *pkg) -{ - u32 ret = UNF_RETURN_ERROR; - struct spfc_hba_info *hba = NULL; - ulong flags = 0; - struct spfc_parent_queue_info *prt_qinfo = NULL; - - FC_CHECK_RETURN_VALUE(handle, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(pkg, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(pkg->type == UNF_PKG_BLS_REQ || pkg->type == UNF_PKG_BLS_REPLY, - UNF_RETURN_ERROR); - - SPFC_CHECK_PKG_ALLOCTIME(pkg); - hba = (struct spfc_hba_info *)handle; - - prt_qinfo = spfc_find_parent_queue_info_by_pkg(hba, pkg); - if (!prt_qinfo) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%x) send BLS SID_DID(0x%x_0x%x) with null parent queue information", - hba->port_cfg.port_id, pkg->frame_head.csctl_sid, - pkg->frame_head.rctl_did); - - return ret; - } - - spin_lock_irqsave(&prt_qinfo->parent_queue_state_lock, flags); - - if (SPFC_RPORT_OFFLOADED(prt_qinfo)) { - spin_unlock_irqrestore(&prt_qinfo->parent_queue_state_lock, flags); - ret = spfc_send_bls_via_parent(hba, pkg); - } else { - spin_unlock_irqrestore(&prt_qinfo->parent_queue_state_lock, flags); - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[error]Port(0x%x) send BLS SID_DID(0x%x_0x%x) with no offloaded, do noting", - hba->port_cfg.port_id, pkg->frame_head.csctl_sid, - pkg->frame_head.rctl_did); - } - - return ret; -} - -static u32 spfc_scq_rcv_flush_sq_sts(struct spfc_hba_info *hba, union spfc_scqe *scqe) -{ - /* - * RCVD sq flush sts - * --->>> continue flush or clear done - */ - u32 ret = UNF_RETURN_ERROR; - - if (scqe->flush_sts.wd0.port_id != hba->port_index) { - FC_DRV_PRINT(UNF_LOG_EVENT, UNF_CRITICAL, - "[err]Port(0x%x) clear_sts_port_idx(0x%x) not match hba_port_idx(0x%x), stage(0x%x)", - hba->port_cfg.port_id, scqe->clear_sts.wd0.port_id, - hba->port_index, hba->queue_set_stage); - - return UNF_RETURN_ERROR; - } - - if (scqe->flush_sts.wd0.last_flush) { - FC_DRV_PRINT(UNF_LOG_EVENT, UNF_INFO, - "[info]Port(0x%x) flush sq(0x%x) done, stage(0x%x)", - hba->port_cfg.port_id, hba->next_clear_sq, hba->queue_set_stage); - - /* If the Flush STS is last one, send cmd done */ - ret = spfc_clear_sq_wqe_done(hba); - } else { - FC_DRV_PRINT(UNF_LOG_EVENT, UNF_MAJOR, - "[info]Port(0x%x) continue flush sq(0x%x), stage(0x%x)", - hba->port_cfg.port_id, hba->next_clear_sq, hba->queue_set_stage); - - ret = spfc_clear_pending_sq_wqe(hba); - } - - return ret; -} - -static u32 spfc_scq_rcv_buf_clear_sts(struct spfc_hba_info *hba, union spfc_scqe *scqe) -{ - /* - * clear: fetched sq wqe - * ---to--->>> pending sq wqe - */ - u32 ret = UNF_RETURN_ERROR; - - if (scqe->clear_sts.wd0.port_id != hba->port_index) { - FC_DRV_PRINT(UNF_LOG_EVENT, UNF_CRITICAL, - "[err]Port(0x%x) clear_sts_port_idx(0x%x) not match hba_port_idx(0x%x), stage(0x%x)", - hba->port_cfg.port_id, scqe->clear_sts.wd0.port_id, - hba->port_index, hba->queue_set_stage); - - return UNF_RETURN_ERROR; - } - - /* set port with I/O cleared state */ - spfc_set_hba_clear_state(hba, true); - - FC_DRV_PRINT(UNF_LOG_EVENT, UNF_KEVENT, - "[info]Port(0x%x) cleared all fetched wqe, start clear sq pending wqe, stage (0x%x)", - hba->port_cfg.port_id, hba->queue_set_stage); - - hba->queue_set_stage = SPFC_QUEUE_SET_STAGE_FLUSHING; - ret = spfc_clear_pending_sq_wqe(hba); - - return ret; -} - -u32 spfc_scq_recv_sess_rst_sts(struct spfc_hba_info *hba, union spfc_scqe *scqe) -{ - u32 rport_index = INVALID_VALUE32; - ulong flags = 0; - struct spfc_parent_queue_info *parent_queue_info = NULL; - struct spfc_scqe_sess_sts *sess_sts_scqe = (struct spfc_scqe_sess_sts *)(void *)scqe; - u32 flush_done; - u32 *ctx_array = NULL; - int ret; - spinlock_t *prtq_state_lock = NULL; - - rport_index = sess_sts_scqe->wd1.conn_id; - if (rport_index >= UNF_SPFC_MAXRPORT_NUM) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) receive reset session cmd sts failed, invlaid rport(0x%x) status_code(0x%x) remain_cnt(0x%x)", - hba->port_cfg.port_id, rport_index, - sess_sts_scqe->ch.wd0.err_code, - sess_sts_scqe->ch.wd0.cqe_remain_cnt); - - return UNF_RETURN_ERROR; - } - - parent_queue_info = &hba->parent_queue_mgr->parent_queue[rport_index]; - prtq_state_lock = &parent_queue_info->parent_queue_state_lock; - /* - * If only session reset is used, the offload status of sq remains - * unchanged. If a link is deleted, the offload status is set to - * destroying and is irreversible. - */ - spin_lock_irqsave(prtq_state_lock, flags); - - /* - * According to the fault tolerance principle, even if the connection - * deletion times out and the sts returns to delete the connection, one - * indicates that the cancel timer is successful, and 0 indicates that - * the timer is being processed. - */ - if (!cancel_delayed_work(&parent_queue_info->parent_sq_info.del_work)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x) rport_index(0x%x) delete rport timer maybe timeout", - hba->port_cfg.port_id, rport_index); - } - - /* - * If the SessRstSts is returned too late and the Parent Queue Info - * resource is released, OK is returned. - */ - if (parent_queue_info->offload_state != SPFC_QUEUE_STATE_DESTROYING) { - spin_unlock_irqrestore(prtq_state_lock, flags); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[info]Port(0x%x) reset session cmd complete, no need to free parent qinfo, rport(0x%x) status_code(0x%x) remain_cnt(0x%x)", - hba->port_cfg.port_id, rport_index, - sess_sts_scqe->ch.wd0.err_code, - sess_sts_scqe->ch.wd0.cqe_remain_cnt); - - return RETURN_OK; - } - - if (parent_queue_info->parent_ctx.cqm_parent_ctx_obj) { - ctx_array = (u32 *)((void *)(parent_queue_info->parent_ctx - .cqm_parent_ctx_obj->vaddr)); - flush_done = ctx_array[SPFC_CTXT_FLUSH_DONE_DW_POS] & SPFC_CTXT_FLUSH_DONE_MASK_BE; - mb(); - if (flush_done == 0) { - spin_unlock_irqrestore(prtq_state_lock, flags); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) rport(0x%x) flushdone is not set, delay to free parent session", - hba->port_cfg.port_id, rport_index); - - /* If flushdone bit is not set,delay free Sq info */ - ret = queue_delayed_work(hba->work_queue, - &(parent_queue_info->parent_sq_info - .flush_done_timeout_work), - (ulong)msecs_to_jiffies((u32) - SPFC_SQ_WAIT_FLUSH_DONE_TIMEOUT_MS)); - if (!ret) { - SPFC_HBA_STAT(hba, SPFC_STAT_PARENT_SQ_QUEUE_DELAYED_WORK); - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) rport(0x%x) queue delayed work failed ret:%d", - hba->port_cfg.port_id, rport_index, - ret); - } - - return RETURN_OK; - } - } - - spin_unlock_irqrestore(prtq_state_lock, flags); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x) begin to free parent session with rport(0x%x)", - hba->port_cfg.port_id, rport_index); - - spfc_free_parent_queue_info(hba, parent_queue_info); - - return RETURN_OK; -} - -static u32 spfc_scq_rcv_clear_srq_sts(struct spfc_hba_info *hba, union spfc_scqe *scqe) -{ - /* - * clear ELS/Immi SRQ - * ---then--->>> Destroy SRQ - */ - struct spfc_srq_info *srq_info = NULL; - - if (SPFC_GET_SCQE_STATUS(scqe) != 0) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) clear srq failed, status(0x%x)", - hba->port_cfg.port_id, SPFC_GET_SCQE_STATUS(scqe)); - - return RETURN_OK; - } - - srq_info = &hba->els_srq_info; - - /* - * 1: cancel timer succeed - * 0: the timer is being processed, the SQ is released when the timer - * times out - */ - if (cancel_delayed_work(&srq_info->del_work)) - queue_work(hba->work_queue, &hba->els_srq_clear_work); - - return RETURN_OK; -} - -u32 spfc_scq_recv_marker_sts(struct spfc_hba_info *hba, union spfc_scqe *scqe) -{ - u32 ret = UNF_RETURN_ERROR; - u32 ox_id = INVALID_VALUE32; - u32 rx_id = INVALID_VALUE32; - u32 hot_tag = INVALID_VALUE32; - struct unf_frame_pkg pkg = {0}; - struct spfc_scqe_itmf_marker_sts *tmf_marker_sts_scqe = NULL; - - tmf_marker_sts_scqe = &scqe->itmf_marker_sts; - ox_id = (u32)tmf_marker_sts_scqe->wd1.ox_id; - rx_id = (u32)tmf_marker_sts_scqe->wd1.rx_id; - hot_tag = (tmf_marker_sts_scqe->wd4.hotpooltag & UNF_ORIGIN_HOTTAG_MASK) - hba->exi_base; - pkg.frame_head.oxid_rxid = rx_id | (u32)(ox_id) << UNF_SHIFT_16; - pkg.private_data[PKG_PRIVATE_XCHG_ALLOC_TIME] = tmf_marker_sts_scqe->magic_num; - pkg.frame_head.csctl_sid = tmf_marker_sts_scqe->wd3.sid; - pkg.frame_head.rctl_did = tmf_marker_sts_scqe->wd2.did; - - /* 1. set pkg status */ - if (unlikely(SPFC_SCQE_HAS_ERRCODE(scqe))) - pkg.status = UNF_IO_FAILED; - else - pkg.status = UNF_IO_SUCCESS; - - /* 2 .process rcvd marker STS: set exchange state */ - ret = spfc_rcv_tmf_marker_sts(hba, &pkg, hot_tag); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[event]Port(0x%x) recv marker STS OX_ID(0x%x) RX_ID(0x%x) HotTag(0x%x) result %s", - hba->port_cfg.port_id, ox_id, rx_id, hot_tag, - (ret == RETURN_OK) ? "succeed" : "failed"); - - return ret; -} - -u32 spfc_scq_recv_abts_marker_sts(struct spfc_hba_info *hba, union spfc_scqe *scqe) -{ - u32 ret = UNF_RETURN_ERROR; - u32 ox_id = INVALID_VALUE32; - u32 rx_id = INVALID_VALUE32; - u32 hot_tag = INVALID_VALUE32; - struct unf_frame_pkg pkg = {0}; - struct spfc_scqe_abts_marker_sts *abts_marker_sts_scqe = NULL; - - abts_marker_sts_scqe = &scqe->abts_marker_sts; - if (!abts_marker_sts_scqe) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]ABTS marker STS is NULL"); - return ret; - } - - ox_id = (u32)abts_marker_sts_scqe->wd1.ox_id; - rx_id = (u32)abts_marker_sts_scqe->wd1.rx_id; - hot_tag = (abts_marker_sts_scqe->wd4.hotpooltag & UNF_ORIGIN_HOTTAG_MASK) - hba->exi_base; - pkg.frame_head.oxid_rxid = rx_id | (u32)(ox_id) << UNF_SHIFT_16; - pkg.frame_head.csctl_sid = abts_marker_sts_scqe->wd3.sid; - pkg.frame_head.rctl_did = abts_marker_sts_scqe->wd2.did; - pkg.abts_maker_status = (u32)abts_marker_sts_scqe->wd3.io_state; - pkg.private_data[PKG_PRIVATE_XCHG_ALLOC_TIME] = abts_marker_sts_scqe->magic_num; - - if (unlikely(SPFC_SCQE_HAS_ERRCODE(scqe))) - pkg.status = UNF_IO_FAILED; - else - pkg.status = UNF_IO_SUCCESS; - - ret = spfc_rcv_abts_marker_sts(hba, &pkg, hot_tag); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[info]Port(0x%x) recv abts marker STS ox_id(0x%x) RXID(0x%x) HotTag(0x%x) %s", - hba->port_cfg.port_id, ox_id, rx_id, hot_tag, - (ret == RETURN_OK) ? "SUCCEED" : "FAILED"); - - return ret; -} - -u32 spfc_handle_aeq_off_load_err(struct spfc_hba_info *hba, struct spfc_aqe_data *aeq_msg) -{ - u32 ret = RETURN_OK; - u32 rport_index = 0; - u32 xid = 0; - struct spfc_parent_queue_info *prt_qinfo = NULL; - struct spfc_delay_destroy_ctrl_info delay_ctl_info; - ulong flags = 0; - - memset(&delay_ctl_info, 0, sizeof(struct spfc_delay_destroy_ctrl_info)); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Port(0x%x) receive Offload Err Event, EvtCode(0x%x) Conn_id(0x%x) Xid(0x%x)", - hba->port_cfg.port_id, aeq_msg->wd0.evt_code, - aeq_msg->wd0.conn_id, aeq_msg->wd1.xid); - - /* Currently, only the offload failure caused by insufficient scqe is - * processed. Other errors are not processed temporarily. - */ - if (unlikely(aeq_msg->wd0.evt_code != FC_ERROR_OFFLOAD_LACKOF_SCQE_FAIL)) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Port(0x%x) receive an unsupported error code of AEQ Event,EvtCode(0x%x) Conn_id(0x%x)", - hba->port_cfg.port_id, aeq_msg->wd0.evt_code, - aeq_msg->wd0.conn_id); - - return UNF_RETURN_ERROR; - } - SPFC_SCQ_ERR_TYPE_STAT(hba, FC_ERROR_OFFLOAD_LACKOF_SCQE_FAIL); - - rport_index = aeq_msg->wd0.conn_id; - xid = aeq_msg->wd1.xid; - - if (rport_index >= UNF_SPFC_MAXRPORT_NUM) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) receive an error offload status: rport(0x%x) is invalid, Xid(0x%x)", - hba->port_cfg.port_id, rport_index, aeq_msg->wd1.xid); - - return UNF_RETURN_ERROR; - } - - prt_qinfo = &hba->parent_queue_mgr->parent_queue[rport_index]; - if (spfc_check_rport_valid(prt_qinfo, xid) != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) receive an error offload status: rport(0x%x), context id(0x%x) is invalid", - hba->port_cfg.port_id, rport_index, xid); - - return UNF_RETURN_ERROR; - } - - /* The offload status is restored only when the offload status is offloading */ - spin_lock_irqsave(&prt_qinfo->parent_queue_state_lock, flags); - if (prt_qinfo->offload_state == SPFC_QUEUE_STATE_OFFLOADING) - prt_qinfo->offload_state = SPFC_QUEUE_STATE_INITIALIZED; - spin_unlock_irqrestore(&prt_qinfo->parent_queue_state_lock, flags); - - if (prt_qinfo->parent_sq_info.destroy_sqe.valid) { - delay_ctl_info.valid = prt_qinfo->parent_sq_info.destroy_sqe.valid; - delay_ctl_info.rport_index = prt_qinfo->parent_sq_info.destroy_sqe.rport_index; - delay_ctl_info.time_out = prt_qinfo->parent_sq_info.destroy_sqe.time_out; - delay_ctl_info.start_jiff = prt_qinfo->parent_sq_info.destroy_sqe.start_jiff; - delay_ctl_info.rport_info.nport_id = - prt_qinfo->parent_sq_info.destroy_sqe.rport_info.nport_id; - delay_ctl_info.rport_info.rport_index = - prt_qinfo->parent_sq_info.destroy_sqe.rport_info.rport_index; - delay_ctl_info.rport_info.port_name = - prt_qinfo->parent_sq_info.destroy_sqe.rport_info.port_name; - prt_qinfo->parent_sq_info.destroy_sqe.valid = false; - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x) pop up delay sqe, start:0x%llx, timeout:0x%x, rport:0x%x, offload state:0x%x", - hba->port_cfg.port_id, delay_ctl_info.start_jiff, - delay_ctl_info.time_out, - prt_qinfo->parent_sq_info.destroy_sqe.rport_info.rport_index, - SPFC_QUEUE_STATE_INITIALIZED); - - ret = spfc_free_parent_resource(hba, &delay_ctl_info.rport_info); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) pop delay destroy parent sq failed, rport(0x%x), rport nport id 0x%x", - hba->port_cfg.port_id, - delay_ctl_info.rport_info.rport_index, - delay_ctl_info.rport_info.nport_id); - } - } - - return ret; -} - -u32 spfc_free_xid(void *handle, struct unf_frame_pkg *pkg) -{ - u32 ret = RETURN_ERROR; - u16 rx_id = INVALID_VALUE16; - u16 ox_id = INVALID_VALUE16; - u16 hot_tag = INVALID_VALUE16; - struct spfc_hba_info *hba = (struct spfc_hba_info *)handle; - union spfc_cmdqe tmp_cmd_wqe; - union spfc_cmdqe *cmd_wqe = NULL; - - FC_CHECK_RETURN_VALUE(hba, RETURN_ERROR); - FC_CHECK_RETURN_VALUE(pkg, RETURN_ERROR); - SPFC_CHECK_PKG_ALLOCTIME(pkg); - - cmd_wqe = &tmp_cmd_wqe; - memset(cmd_wqe, 0, sizeof(union spfc_cmdqe)); - - rx_id = UNF_GET_RXID(pkg); - ox_id = UNF_GET_OXID(pkg); - if (UNF_GET_HOTPOOL_TAG(pkg) != INVALID_VALUE32) - hot_tag = (u16)UNF_GET_HOTPOOL_TAG(pkg) + hba->exi_base; - - spfc_build_cmdqe_common(cmd_wqe, SPFC_TASK_T_EXCH_ID_FREE, rx_id); - cmd_wqe->xid_free.wd2.hotpool_tag = hot_tag; - cmd_wqe->xid_free.magic_num = UNF_GETXCHGALLOCTIME(pkg); - cmd_wqe->xid_free.sid = pkg->frame_head.csctl_sid; - cmd_wqe->xid_free.did = pkg->frame_head.rctl_did; - cmd_wqe->xid_free.type = pkg->type; - - if (pkg->rx_or_ox_id == UNF_PKG_FREE_OXID) - cmd_wqe->xid_free.wd0.task_id = ox_id; - else - cmd_wqe->xid_free.wd0.task_id = rx_id; - - cmd_wqe->xid_free.wd0.port_id = hba->port_index; - cmd_wqe->xid_free.wd2.scqn = hba->default_scqn; - ret = spfc_root_cmdq_enqueue(hba, cmd_wqe, sizeof(cmd_wqe->xid_free)); - - FC_DRV_PRINT(UNF_LOG_EQUIP_ATT, UNF_INFO, - "[info]Port(0x%x) ox_id(0x%x) RXID(0x%x) hottag(0x%x) magic_num(0x%x) Sid(0x%x) Did(0x%x), send free xid %s", - hba->port_cfg.port_id, ox_id, rx_id, hot_tag, - cmd_wqe->xid_free.magic_num, cmd_wqe->xid_free.sid, - cmd_wqe->xid_free.did, - (ret == RETURN_OK) ? "OK" : "ERROR"); - - return ret; -} - -u32 spfc_scq_free_xid_sts(struct spfc_hba_info *hba, union spfc_scqe *scqe) -{ - u32 hot_tag = INVALID_VALUE32; - u32 magic_num = INVALID_VALUE32; - u32 ox_id = INVALID_VALUE32; - u32 rx_id = INVALID_VALUE32; - struct spfc_scqe_comm_rsp_sts *free_xid_sts_scqe = NULL; - - free_xid_sts_scqe = &scqe->comm_sts; - magic_num = free_xid_sts_scqe->magic_num; - ox_id = (u32)free_xid_sts_scqe->wd0.ox_id; - rx_id = (u32)free_xid_sts_scqe->wd0.rx_id; - - if (free_xid_sts_scqe->wd1.hotpooltag != INVALID_VALUE16) { - hot_tag = (free_xid_sts_scqe->wd1.hotpooltag & - UNF_ORIGIN_HOTTAG_MASK) - hba->exi_base; - } - - FC_DRV_PRINT(UNF_LOG_EQUIP_ATT, UNF_INFO, - "Port(0x%x) hottag(0x%x) magicnum(0x%x) ox_id(0x%x) rxid(0x%x) sts(%d)", - hba->port_cfg.port_id, hot_tag, magic_num, ox_id, rx_id, - SPFC_GET_SCQE_STATUS(scqe)); - - return RETURN_OK; -} - -u32 spfc_scq_exchg_timeout_sts(struct spfc_hba_info *hba, union spfc_scqe *scqe) -{ - u32 hot_tag = INVALID_VALUE32; - u32 magic_num = INVALID_VALUE32; - u32 ox_id = INVALID_VALUE32; - u32 rx_id = INVALID_VALUE32; - struct spfc_scqe_comm_rsp_sts *time_out_scqe = NULL; - - time_out_scqe = &scqe->comm_sts; - magic_num = time_out_scqe->magic_num; - ox_id = (u32)time_out_scqe->wd0.ox_id; - rx_id = (u32)time_out_scqe->wd0.rx_id; - - if (time_out_scqe->wd1.hotpooltag != INVALID_VALUE16) - hot_tag = (time_out_scqe->wd1.hotpooltag & UNF_ORIGIN_HOTTAG_MASK) - hba->exi_base; - - FC_DRV_PRINT(UNF_LOG_EQUIP_ATT, UNF_INFO, - "Port(0x%x) recv timer time out sts hotpooltag(0x%x) magicnum(0x%x) ox_id(0x%x) rxid(0x%x) sts(%d)", - hba->port_cfg.port_id, hot_tag, magic_num, ox_id, rx_id, - SPFC_GET_SCQE_STATUS(scqe)); - - return RETURN_OK; -} - -u32 spfc_scq_rcv_sq_nop_sts(struct spfc_hba_info *hba, union spfc_scqe *scqe) -{ - struct spfc_scqe_sq_nop_sts *sq_nop_scqe = NULL; - struct spfc_parent_queue_info *prt_qinfo = NULL; - struct spfc_parent_sq_info *parent_sq_info = NULL; - struct list_head *node = NULL; - struct list_head *next_node = NULL; - struct spfc_suspend_sqe_info *suspend_sqe = NULL; - struct spfc_suspend_sqe_info *sqe = NULL; - u32 rport_index = 0; - u32 magic_num; - u16 sqn; - u32 sqn_base; - u32 sqn_max; - u32 ret = RETURN_OK; - ulong flags = 0; - - sq_nop_scqe = &scqe->sq_nop_sts; - rport_index = sq_nop_scqe->wd1.conn_id; - magic_num = sq_nop_scqe->magic_num; - sqn = sq_nop_scqe->wd0.sqn; - prt_qinfo = &hba->parent_queue_mgr->parent_queue[rport_index]; - parent_sq_info = &prt_qinfo->parent_sq_info; - sqn_base = parent_sq_info->sqn_base; - sqn_max = sqn_base + UNF_SQ_NUM_PER_SESSION - 1; - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "[info]Port(0x%x) rport(0x%x), magic_num(0x%x) receive nop sq sts form sq(0x%x)", - hba->port_cfg.port_id, rport_index, magic_num, sqn); - - spin_lock_irqsave(&prt_qinfo->parent_queue_state_lock, flags); - list_for_each_safe(node, next_node, &parent_sq_info->suspend_sqe_list) { - sqe = list_entry(node, struct spfc_suspend_sqe_info, list_sqe_entry); - if (sqe->magic_num != magic_num) - continue; - suspend_sqe = sqe; - if (sqn == sqn_max) - list_del(node); - break; - } - spin_unlock_irqrestore(&prt_qinfo->parent_queue_state_lock, flags); - - if (suspend_sqe) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "[info]Port(0x%x) rport_index(0x%x) find suspend sqe.", - hba->port_cfg.port_id, rport_index); - if (sqn < sqn_max) { - ret = spfc_send_nop_cmd(hba, parent_sq_info, magic_num, sqn + 1); - } else if (sqn == sqn_max) { - if (!cancel_delayed_work(&suspend_sqe->timeout_work)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[warn]Port(0x%x) rport(0x%x) reset worker timer maybe timeout", - hba->port_cfg.port_id, rport_index); - } - parent_sq_info->need_offloaded = suspend_sqe->old_offload_sts; - ret = spfc_pop_suspend_sqe(hba, prt_qinfo, suspend_sqe); - kfree(suspend_sqe); - } - } else { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) rport(0x%x) magicnum(0x%x)can't find suspend sqe", - hba->port_cfg.port_id, rport_index, magic_num); - } - return ret; -} - -static const struct unf_scqe_handle_table scqe_handle_table[] = { - {/* INI rcvd FCP RSP */ - SPFC_SCQE_FCP_IRSP, true, spfc_scq_recv_iresp}, - {/* INI/TGT rcvd ELS_CMND */ - SPFC_SCQE_ELS_CMND, false, spfc_scq_recv_els_cmnd}, - {/* INI/TGT rcvd ELS_RSP */ - SPFC_SCQE_ELS_RSP, true, spfc_scq_recv_ls_gs_rsp}, - {/* INI/TGT rcvd GS_RSP */ - SPFC_SCQE_GS_RSP, true, spfc_scq_recv_ls_gs_rsp}, - {/* INI rcvd BLS_RSP */ - SPFC_SCQE_ABTS_RSP, true, spfc_scq_recv_abts_rsp}, - {/* INI/TGT rcvd ELS_RSP STS(Done) */ - SPFC_SCQE_ELS_RSP_STS, true, spfc_scq_recv_els_rsp_sts}, - {/* INI or TGT rcvd Session enable STS */ - SPFC_SCQE_SESS_EN_STS, false, spfc_scq_recv_offload_sts}, - {/* INI or TGT rcvd flush (pending) SQ STS */ - SPFC_SCQE_FLUSH_SQ_STS, false, spfc_scq_rcv_flush_sq_sts}, - {/* INI or TGT rcvd Buffer clear STS */ - SPFC_SCQE_BUF_CLEAR_STS, false, spfc_scq_rcv_buf_clear_sts}, - {/* INI or TGT rcvd session reset STS */ - SPFC_SCQE_SESS_RST_STS, false, spfc_scq_recv_sess_rst_sts}, - {/* ELS/IMMI SRQ */ - SPFC_SCQE_CLEAR_SRQ_STS, false, spfc_scq_rcv_clear_srq_sts}, - {/* INI rcvd TMF RSP */ - SPFC_SCQE_FCP_ITMF_RSP, true, spfc_scq_recv_iresp}, - {/* INI rcvd TMF Marker STS */ - SPFC_SCQE_ITMF_MARKER_STS, false, spfc_scq_recv_marker_sts}, - {/* INI rcvd ABTS Marker STS */ - SPFC_SCQE_ABTS_MARKER_STS, false, spfc_scq_recv_abts_marker_sts}, - {SPFC_SCQE_XID_FREE_ABORT_STS, false, spfc_scq_free_xid_sts}, - {SPFC_SCQE_EXCHID_TIMEOUT_STS, false, spfc_scq_exchg_timeout_sts}, - {SPFC_SQE_NOP_STS, true, spfc_scq_rcv_sq_nop_sts}, - -}; - -u32 spfc_rcv_scq_entry_from_scq(struct spfc_hba_info *hba, union spfc_scqe *scqe, u32 scqn) -{ - u32 ret = UNF_RETURN_ERROR; - bool reclaim = false; - u32 index = 0; - u32 total = 0; - - FC_CHECK_RETURN_VALUE(hba, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(scqe, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(scqn < SPFC_TOTAL_SCQ_NUM, UNF_RETURN_ERROR); - - SPFC_IO_STAT(hba, SPFC_GET_SCQE_TYPE(scqe)); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "[info]Port(0x%x) receive scqe type %d from SCQ[%u]", - hba->port_cfg.port_id, SPFC_GET_SCQE_TYPE(scqe), scqn); - - /* 1. error code cheking */ - if (unlikely(SPFC_SCQE_HAS_ERRCODE(scqe))) { - /* So far, just print & counter */ - spfc_scqe_error_pre_proc(hba, scqe); - } - - /* 2. Process SCQE by corresponding processer */ - total = sizeof(scqe_handle_table) / sizeof(struct unf_scqe_handle_table); - while (index < total) { - if (SPFC_GET_SCQE_TYPE(scqe) == scqe_handle_table[index].scqe_type) { - ret = scqe_handle_table[index].scqe_handle_func(hba, scqe); - reclaim = scqe_handle_table[index].reclaim_sq_wpg; - - break; - } - - index++; - } - - /* 3. SCQE type check */ - if (unlikely(total == index)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[warn]Unknown SCQE type %d", - SPFC_GET_SCQE_TYPE(scqe)); - - UNF_PRINT_SFS_LIMIT(UNF_ERR, hba->port_cfg.port_id, scqe, sizeof(union spfc_scqe)); - } - - /* 4. If SCQE is for SQ-WQE then recovery Link List SQ free page */ - if (reclaim) { - if (SPFC_GET_SCQE_SQN(scqe) < SPFC_MAX_SSQ_NUM) { - ret = spfc_reclaim_sq_wqe_page(hba, scqe); - } else { - /* NOTE: for buffer clear, the SCQE conn_id is 0xFFFF,count with HBA */ - SPFC_HBA_STAT((struct spfc_hba_info *)hba, SPFC_STAT_SQ_IO_BUFFER_CLEARED); - } - } - - return ret; -} diff --git a/drivers/scsi/spfc/hw/spfc_service.h b/drivers/scsi/spfc/hw/spfc_service.h deleted file mode 100644 index e2555c55f4d1183812d0105ba8c43fa2150a420d..0000000000000000000000000000000000000000 --- a/drivers/scsi/spfc/hw/spfc_service.h +++ /dev/null @@ -1,282 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPFC_SERVICE_H -#define SPFC_SERVICE_H - -#include "unf_type.h" -#include "unf_common.h" -#include "unf_scsi_common.h" -#include "spfc_hba.h" - -#define SPFC_HAVE_OFFLOAD (0) - -/* FC txmfs */ -#define SPFC_DEFAULT_TX_MAX_FREAM_SIZE (256) - -#define SPFC_GET_NETWORK_PORT_ID(hba) \ - (((hba)->port_index > 1) ? ((hba)->port_index + 2) : (hba)->port_index) - -#define SPFC_GET_PRLI_PAYLOAD_LEN \ - (UNF_PRLI_PAYLOAD_LEN - UNF_PRLI_SIRT_EXTRA_SIZE) -/* Start addr of the header/payloed of the cmnd buffer in the pkg */ -#define SPFC_FC_HEAD_LEN (sizeof(struct unf_fc_head)) -#define SPFC_PAYLOAD_OFFSET (sizeof(struct unf_fc_head)) -#define SPFC_GET_CMND_PAYLOAD_ADDR(pkg) UNF_GET_FLOGI_PAYLOAD(pkg) -#define SPFC_GET_CMND_HEADER_ADDR(pkg) \ - ((pkg)->unf_cmnd_pload_bl.buffer_ptr) -#define SPFC_GET_RSP_HEADER_ADDR(pkg) \ - ((pkg)->unf_rsp_pload_bl.buffer_ptr) -#define SPFC_GET_RSP_PAYLOAD_ADDR(pkg) \ - ((pkg)->unf_rsp_pload_bl.buffer_ptr + SPFC_PAYLOAD_OFFSET) -#define SPFC_GET_CMND_FC_HEADER(pkg) \ - (&(UNF_GET_SFS_ENTRY(pkg)->sfs_common.frame_head)) -#define SPFC_PKG_IS_ELS_RSP(cmd_type) \ - (((cmd_type) == ELS_ACC) || ((cmd_type) == ELS_RJT)) -#define SPFC_XID_IS_VALID(exid, base, exi_count) \ - (((exid) >= (base)) && ((exid) < ((base) + (exi_count)))) -#define SPFC_CHECK_NEED_OFFLOAD(cmd_code, cmd_type, offload_state) \ - (((cmd_code) == ELS_PLOGI) && ((cmd_type) != ELS_RJT) && \ - ((offload_state) == SPFC_QUEUE_STATE_INITIALIZED)) - -#define UNF_FC_PAYLOAD_ELS_MASK (0xFF000000) -#define UNF_FC_PAYLOAD_ELS_SHIFT (24) -#define UNF_FC_PAYLOAD_ELS_DWORD (0) - -/* Note: this pfcpayload is little endian */ -#define UNF_GET_FC_PAYLOAD_ELS_CMND(pfcpayload) \ - UNF_GET_SHIFTMASK(((u32 *)(void *)(pfcpayload))[UNF_FC_PAYLOAD_ELS_DWORD], \ - UNF_FC_PAYLOAD_ELS_SHIFT, UNF_FC_PAYLOAD_ELS_MASK) - -/* Note: this pfcpayload is big endian */ -#define SPFC_GET_FC_PAYLOAD_ELS_CMND(pfcpayload) \ - UNF_GET_SHIFTMASK(be32_to_cpu(((u32 *)(void *)(pfcpayload))[UNF_FC_PAYLOAD_ELS_DWORD]), \ - UNF_FC_PAYLOAD_ELS_SHIFT, UNF_FC_PAYLOAD_ELS_MASK) - -#define UNF_FC_PAYLOAD_RX_SZ_MASK (0x00000FFF) -#define UNF_FC_PAYLOAD_RX_SZ_SHIFT (16) -#define UNF_FC_PAYLOAD_RX_SZ_DWORD (2) - -/* Note: this pfcpayload is little endian */ -#define UNF_GET_FC_PAYLOAD_RX_SZ(pfcpayload) \ - ((u16)(((u32 *)(void *)(pfcpayload))[UNF_FC_PAYLOAD_RX_SZ_DWORD] & \ - UNF_FC_PAYLOAD_RX_SZ_MASK)) - -/* Note: this pfcpayload is big endian */ -#define SPFC_GET_FC_PAYLOAD_RX_SZ(pfcpayload) \ - (be32_to_cpu((u16)(((u32 *)(void *)(pfcpayload))[UNF_FC_PAYLOAD_RX_SZ_DWORD]) & \ - UNF_FC_PAYLOAD_RX_SZ_MASK)) - -#define SPFC_GET_RA_TOV_FROM_PAYLOAD(pfcpayload) \ - (((struct unf_flogi_fdisc_payload *)(pfcpayload))->fabric_parms.co_parms.r_a_tov) -#define SPFC_GET_RT_TOV_FROM_PAYLOAD(pfcpayload) \ - (((struct unf_flogi_fdisc_payload *)(pfcpayload))->fabric_parms.co_parms.r_t_tov) -#define SPFC_GET_E_D_TOV_FROM_PAYLOAD(pfcpayload) \ - (((struct unf_flogi_fdisc_payload *)(pfcpayload))->fabric_parms.co_parms.e_d_tov) -#define SPFC_GET_E_D_TOV_RESOLUTION_FROM_PAYLOAD(pfcpayload) \ - (((struct unf_flogi_fdisc_payload *)(pfcpayload))->fabric_parms.co_parms.e_d_tov_resolution) -#define SPFC_GET_BB_SC_N_FROM_PAYLOAD(pfcpayload) \ - (((struct unf_flogi_fdisc_payload *)(pfcpayload))->fabric_parms.co_parms.bbscn) -#define SPFC_GET_BB_CREDIT_FROM_PAYLOAD(pfcpayload) \ - (((struct unf_flogi_fdisc_payload *)(pfcpayload))->fabric_parms.co_parms.bb_credit) - -#define SPFC_GET_RA_TOV_FROM_PARAMS(pfcparams) \ - (((struct unf_fabric_parm *)(pfcparams))->co_parms.r_a_tov) -#define SPFC_GET_RT_TOV_FROM_PARAMS(pfcparams) \ - (((struct unf_fabric_parm *)(pfcparams))->co_parms.r_t_tov) -#define SPFC_GET_E_D_TOV_FROM_PARAMS(pfcparams) \ - (((struct unf_fabric_parm *)(pfcparams))->co_parms.e_d_tov) -#define SPFC_GET_E_D_TOV_RESOLUTION_FROM_PARAMS(pfcparams) \ - (((struct unf_fabric_parm *)(pfcparams))->co_parms.e_d_tov_resolution) -#define SPFC_GET_BB_SC_N_FROM_PARAMS(pfcparams) \ - (((struct unf_fabric_parm *)(pfcparams))->co_parms.bbscn) -#define SPFC_GET_BB_CREDIT_FROM_PARAMS(pfcparams) \ - (((struct unf_fabric_parm *)(pfcparams))->co_parms.bb_credit) -#define SPFC_CHECK_NPORT_FPORT_BIT(pfcparams) \ - (((struct unf_fabric_parm *)(pfcparams))->co_parms.nport) - -#define UNF_FC_RCTL_BLS_MASK (0x80) -#define SPFC_UNSOLICITED_FRAME_IS_BLS(hdr) (UNF_GET_FC_HEADER_RCTL(hdr) & UNF_FC_RCTL_BLS_MASK) - -#define SPFC_LOW_SEQ_CNT (0) -#define SPFC_HIGH_SEQ_CNT (0xFFFF) - -/* struct unf_frame_pkg.cmnd meaning: - * The least significant 16 bits indicate whether to send ELS CMND or ELS RSP - * (ACC or RJT). The most significant 16 bits indicate the corresponding ELS - * CMND when the lower 16 bits are ELS RSP. - */ -#define SPFC_ELS_CMND_MASK (0xffff) -#define SPFC_ELS_CMND__RELEVANT_SHIFT (16UL) -#define SPFC_GET_LS_GS_CMND_CODE(cmnd) ((u16)((cmnd) & SPFC_ELS_CMND_MASK)) -#define SPFC_GET_ELS_RSP_TYPE(cmnd) ((u16)((cmnd) & SPFC_ELS_CMND_MASK)) -#define SPFC_GET_ELS_RSP_CODE(cmnd) \ - ((u16)((cmnd) >> SPFC_ELS_CMND__RELEVANT_SHIFT & SPFC_ELS_CMND_MASK)) - -/* ELS CMND Request */ -#define ELS_CMND (0) - -/* fh_f_ctl - Frame control flags. */ -#define SPFC_FC_EX_CTX BIT(23) /* sent by responder to exchange */ -#define SPFC_FC_SEQ_CTX BIT(22) /* sent by responder to sequence */ -#define SPFC_FC_FIRST_SEQ BIT(21) /* first sequence of this exchange */ -#define SPFC_FC_LAST_SEQ BIT(20) /* last sequence of this exchange */ -#define SPFC_FC_END_SEQ BIT(19) /* last frame of sequence */ -#define SPFC_FC_END_CONN BIT(18) /* end of class 1 connection pending */ -#define SPFC_FC_RES_B17 BIT(17) /* reserved */ -#define SPFC_FC_SEQ_INIT BIT(16) /* transfer of sequence initiative */ -#define SPFC_FC_X_ID_REASS BIT(15) /* exchange ID has been changed */ -#define SPFC_FC_X_ID_INVAL BIT(14) /* exchange ID invalidated */ -#define SPFC_FC_ACK_1 BIT(12) /* 13:12 = 1: ACK_1 expected */ -#define SPFC_FC_ACK_N (2 << 12) /* 13:12 = 2: ACK_N expected */ -#define SPFC_FC_ACK_0 (3 << 12) /* 13:12 = 3: ACK_0 expected */ -#define SPFC_FC_RES_B11 BIT(11) /* reserved */ -#define SPFC_FC_RES_B10 BIT(10) /* reserved */ -#define SPFC_FC_RETX_SEQ BIT(9) /* retransmitted sequence */ -#define SPFC_FC_UNI_TX BIT(8) /* unidirectional transmit (class 1) */ -#define SPFC_FC_CONT_SEQ(i) ((i) << 6) -#define SPFC_FC_ABT_SEQ(i) ((i) << 4) -#define SPFC_FC_REL_OFF BIT(3) /* parameter is relative offset */ -#define SPFC_FC_RES2 BIT(2) /* reserved */ -#define SPFC_FC_FILL(i) ((i) & 3) /* 1:0: bytes of trailing fill */ - -#define SPFC_FCTL_REQ (SPFC_FC_FIRST_SEQ | SPFC_FC_END_SEQ | SPFC_FC_SEQ_INIT) -#define SPFC_FCTL_RESP \ - (SPFC_FC_EX_CTX | SPFC_FC_LAST_SEQ | SPFC_FC_END_SEQ | SPFC_FC_SEQ_INIT) -#define SPFC_RCTL_BLS_REQ (0x81) -#define SPFC_RCTL_BLS_ACC (0x84) -#define SPFC_RCTL_BLS_RJT (0x85) - -#define PHY_PORT_TYPE_FC 0x1 /* Physical port type of FC */ -#define PHY_PORT_TYPE_FCOE 0x2 /* Physical port type of FCoE */ -#define SPFC_FC_COS_VALUE (0X4) - -#define SPFC_CDB16_LBA_MASK 0xffff -#define SPFC_CDB16_TRANSFERLEN_MASK 0xff -#define SPFC_RXID_MASK 0xffff -#define SPFC_OXID_MASK 0xffff0000 - -enum spfc_fc_fh_type { - SPFC_FC_TYPE_BLS = 0x00, /* basic link service */ - SPFC_FC_TYPE_ELS = 0x01, /* extended link service */ - SPFC_FC_TYPE_IP = 0x05, /* IP over FC, RFC 4338 */ - SPFC_FC_TYPE_FCP = 0x08, /* SCSI FCP */ - SPFC_FC_TYPE_CT = 0x20, /* Fibre Channel Services (FC-CT) */ - SPFC_FC_TYPE_ILS = 0x22 /* internal link service */ -}; - -enum spfc_fc_fh_rctl { - SPFC_FC_RCTL_DD_UNCAT = 0x00, /* uncategorized information */ - SPFC_FC_RCTL_DD_SOL_DATA = 0x01, /* solicited data */ - SPFC_FC_RCTL_DD_UNSOL_CTL = 0x02, /* unsolicited control */ - SPFC_FC_RCTL_DD_SOL_CTL = 0x03, /* solicited control or reply */ - SPFC_FC_RCTL_DD_UNSOL_DATA = 0x04, /* unsolicited data */ - SPFC_FC_RCTL_DD_DATA_DESC = 0x05, /* data descriptor */ - SPFC_FC_RCTL_DD_UNSOL_CMD = 0x06, /* unsolicited command */ - SPFC_FC_RCTL_DD_CMD_STATUS = 0x07, /* command status */ - -#define SPFC_FC_RCTL_ILS_REQ SPFC_FC_RCTL_DD_UNSOL_CTL /* ILS request */ -#define SPFC_FC_RCTL_ILS_REP SPFC_FC_RCTL_DD_SOL_CTL /* ILS reply */ - - /* - * Extended Link_Data - */ - SPFC_FC_RCTL_ELS_REQ = 0x22, /* extended link services request */ - SPFC_FC_RCTL_ELS_RSP = 0x23, /* extended link services reply */ - SPFC_FC_RCTL_ELS4_REQ = 0x32, /* FC-4 ELS request */ - SPFC_FC_RCTL_ELS4_RSP = 0x33, /* FC-4 ELS reply */ - /* - * Optional Extended Headers - */ - SPFC_FC_RCTL_VFTH = 0x50, /* virtual fabric tagging header */ - SPFC_FC_RCTL_IFRH = 0x51, /* inter-fabric routing header */ - SPFC_FC_RCTL_ENCH = 0x52, /* encapsulation header */ - /* - * Basic Link Services fh_r_ctl values. - */ - SPFC_FC_RCTL_BA_NOP = 0x80, /* basic link service NOP */ - SPFC_FC_RCTL_BA_ABTS = 0x81, /* basic link service abort */ - SPFC_FC_RCTL_BA_RMC = 0x82, /* remove connection */ - SPFC_FC_RCTL_BA_ACC = 0x84, /* basic accept */ - SPFC_FC_RCTL_BA_RJT = 0x85, /* basic reject */ - SPFC_FC_RCTL_BA_PRMT = 0x86, /* dedicated connection preempted */ - /* - * Link Control Information. - */ - SPFC_FC_RCTL_ACK_1 = 0xc0, /* acknowledge_1 */ - SPFC_FC_RCTL_ACK_0 = 0xc1, /* acknowledge_0 */ - SPFC_FC_RCTL_P_RJT = 0xc2, /* port reject */ - SPFC_FC_RCTL_F_RJT = 0xc3, /* fabric reject */ - SPFC_FC_RCTL_P_BSY = 0xc4, /* port busy */ - SPFC_FC_RCTL_F_BSY = 0xc5, /* fabric busy to data frame */ - SPFC_FC_RCTL_F_BSYL = 0xc6, /* fabric busy to link control frame */ - SPFC_FC_RCTL_LCR = 0xc7, /* link credit reset */ - SPFC_FC_RCTL_END = 0xc9 /* end */ -}; - -struct spfc_fc_frame_header { - u8 rctl; /* routing control */ - u8 did[ARRAY_INDEX_3]; /* Destination ID */ - - u8 cs_ctrl; /* class of service control / pri */ - u8 sid[ARRAY_INDEX_3]; /* Source ID */ - - u8 type; /* see enum fc_fh_type below */ - u8 frame_ctrl[ARRAY_INDEX_3]; /* frame control */ - - u8 seq_id; /* sequence ID */ - u8 df_ctrl; /* data field control */ - u16 seq_cnt; /* sequence count */ - - u16 oxid; /* originator exchange ID */ - u16 rxid; /* responder exchange ID */ - u32 param_offset; /* parameter or relative offset */ -}; - -u32 spfc_recv_els_cmnd(const struct spfc_hba_info *hba, - struct unf_frame_pkg *pkg, u8 *els_pld, u32 pld_len, - bool first); -u32 spfc_rcv_ls_gs_rsp(const struct spfc_hba_info *hba, - struct unf_frame_pkg *pkg, u32 hot_tag); -u32 spfc_rcv_els_rsp_sts(const struct spfc_hba_info *hba, - struct unf_frame_pkg *pkg, u32 hot_tag); -u32 spfc_rcv_bls_rsp(const struct spfc_hba_info *hba, struct unf_frame_pkg *pkg, - u32 hot_tag); -u32 spfc_rsv_bls_rsp_sts(const struct spfc_hba_info *hba, - struct unf_frame_pkg *pkg, u32 rx_id); -void spfc_save_login_parms_in_sq_info(struct spfc_hba_info *hba, - struct unf_port_login_parms *login_params); -u32 spfc_handle_aeq_off_load_err(struct spfc_hba_info *hba, - struct spfc_aqe_data *aeq_msg); -u32 spfc_free_xid(void *handle, struct unf_frame_pkg *pkg); -u32 spfc_scq_free_xid_sts(struct spfc_hba_info *hba, union spfc_scqe *scqe); -u32 spfc_scq_exchg_timeout_sts(struct spfc_hba_info *hba, union spfc_scqe *scqe); -u32 spfc_scq_rcv_sq_nop_sts(struct spfc_hba_info *hba, union spfc_scqe *scqe); -u32 spfc_send_els_via_default_session(struct spfc_hba_info *hba, struct spfc_sqe *io_sqe, - struct unf_frame_pkg *pkg, - struct spfc_parent_queue_info *prt_queue_info); -u32 spfc_send_ls_gs_cmnd(void *handle, struct unf_frame_pkg *pkg); -u32 spfc_send_bls_cmnd(void *handle, struct unf_frame_pkg *pkg); - -/* Receive Frame from SCQ */ -u32 spfc_rcv_scq_entry_from_scq(struct spfc_hba_info *hba, - union spfc_scqe *scqe, u32 scqn); -void *spfc_get_els_buf_by_user_id(struct spfc_hba_info *hba, u16 user_id); - -#define SPFC_CHECK_PKG_ALLOCTIME(pkg) \ - do { \ - if (unlikely(UNF_GETXCHGALLOCTIME(pkg) == 0)) { \ - FC_DRV_PRINT(UNF_LOG_NORMAL, \ - UNF_WARN, \ - "[warn]Invalid MagicNum,S_ID(0x%x) " \ - "D_ID(0x%x) OXID(0x%x) " \ - "RX_ID(0x%x) Pkg type(0x%x) hot " \ - "pooltag(0x%x)", \ - UNF_GET_SID(pkg), UNF_GET_DID(pkg), \ - UNF_GET_OXID(pkg), UNF_GET_RXID(pkg), \ - ((struct unf_frame_pkg *)(pkg))->type, \ - UNF_GET_XCHG_TAG(pkg)); \ - } \ - } while (0) - -#endif diff --git a/drivers/scsi/spfc/hw/spfc_utils.c b/drivers/scsi/spfc/hw/spfc_utils.c deleted file mode 100644 index 328c388c95fe84fb10dab442fd25685e8963b310..0000000000000000000000000000000000000000 --- a/drivers/scsi/spfc/hw/spfc_utils.c +++ /dev/null @@ -1,102 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#include "spfc_utils.h" -#include "unf_log.h" -#include "unf_common.h" - -void spfc_cpu_to_big64(void *addr, u32 size) -{ - u32 index = 0; - u32 cnt = 0; - u64 *temp = NULL; - - FC_CHECK_VALID(addr, dump_stack(); return); - FC_CHECK_VALID((size % SPFC_QWORD_BYTE) == 0, dump_stack(); return); - - temp = (u64 *)addr; - cnt = SPFC_SHIFT_TO_U64(size); - - for (index = 0; index < cnt; index++) { - *temp = cpu_to_be64(*temp); - temp++; - } -} - -void spfc_big_to_cpu64(void *addr, u32 size) -{ - u32 index = 0; - u32 cnt = 0; - u64 *temp = NULL; - - FC_CHECK_VALID(addr, dump_stack(); return); - FC_CHECK_VALID((size % SPFC_QWORD_BYTE) == 0, dump_stack(); return); - - temp = (u64 *)addr; - cnt = SPFC_SHIFT_TO_U64(size); - - for (index = 0; index < cnt; index++) { - *temp = be64_to_cpu(*temp); - temp++; - } -} - -void spfc_cpu_to_big32(void *addr, u32 size) -{ - unf_cpu_to_big_end(addr, size); -} - -void spfc_big_to_cpu32(void *addr, u32 size) -{ - if (size % UNF_BYTES_OF_DWORD) - dump_stack(); - - unf_big_end_to_cpu(addr, size); -} - -void spfc_cpu_to_be24(u8 *data, u32 value) -{ - data[ARRAY_INDEX_0] = (value >> UNF_SHIFT_16) & UNF_MASK_BIT_7_0; - data[ARRAY_INDEX_1] = (value >> UNF_SHIFT_8) & UNF_MASK_BIT_7_0; - data[ARRAY_INDEX_2] = value & UNF_MASK_BIT_7_0; -} - -u32 spfc_big_to_cpu24(u8 *data) -{ - return (data[ARRAY_INDEX_0] << UNF_SHIFT_16) | - (data[ARRAY_INDEX_1] << UNF_SHIFT_8) | data[ARRAY_INDEX_2]; -} - -void spfc_print_buff(u32 dbg_level, void *buff, u32 size) -{ - u32 *spfc_buff = NULL; - u32 loop = 0; - u32 index = 0; - - FC_CHECK_VALID(buff, dump_stack(); return); - FC_CHECK_VALID(0 == (size % SPFC_DWORD_BYTE), dump_stack(); return); - - if ((dbg_level) <= unf_dgb_level) { - spfc_buff = (u32 *)buff; - loop = size / SPFC_DWORD_BYTE; - - for (index = 0; index < loop; index++) { - spfc_buff = (u32 *)buff + index; - FC_DRV_PRINT(UNF_LOG_NORMAL, - UNF_MAJOR, "Buff DW%u 0x%08x.", index, *spfc_buff); - } - } -} - -u32 spfc_log2n(u32 val) -{ - u32 result = 0; - u32 logn = (val >> UNF_SHIFT_1); - - while (logn) { - logn >>= UNF_SHIFT_1; - result++; - } - - return result; -} diff --git a/drivers/scsi/spfc/hw/spfc_utils.h b/drivers/scsi/spfc/hw/spfc_utils.h deleted file mode 100644 index 6b4330da3f1d75c89417e005beed200ea54e0323..0000000000000000000000000000000000000000 --- a/drivers/scsi/spfc/hw/spfc_utils.h +++ /dev/null @@ -1,202 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPFC_UTILS_H -#define SPFC_UTILS_H - -#include "unf_type.h" -#include "unf_log.h" - -#define SPFC_ZERO (0) - -#define SPFC_BIT(n) (0x1UL << (n)) -#define SPFC_BIT_0 SPFC_BIT(0) -#define SPFC_BIT_1 SPFC_BIT(1) -#define SPFC_BIT_2 SPFC_BIT(2) -#define SPFC_BIT_3 SPFC_BIT(3) -#define SPFC_BIT_4 SPFC_BIT(4) -#define SPFC_BIT_5 SPFC_BIT(5) -#define SPFC_BIT_6 SPFC_BIT(6) -#define SPFC_BIT_7 SPFC_BIT(7) -#define SPFC_BIT_8 SPFC_BIT(8) -#define SPFC_BIT_9 SPFC_BIT(9) -#define SPFC_BIT_10 SPFC_BIT(10) -#define SPFC_BIT_11 SPFC_BIT(11) -#define SPFC_BIT_12 SPFC_BIT(12) -#define SPFC_BIT_13 SPFC_BIT(13) -#define SPFC_BIT_14 SPFC_BIT(14) -#define SPFC_BIT_15 SPFC_BIT(15) -#define SPFC_BIT_16 SPFC_BIT(16) -#define SPFC_BIT_17 SPFC_BIT(17) -#define SPFC_BIT_18 SPFC_BIT(18) -#define SPFC_BIT_19 SPFC_BIT(19) -#define SPFC_BIT_20 SPFC_BIT(20) -#define SPFC_BIT_21 SPFC_BIT(21) -#define SPFC_BIT_22 SPFC_BIT(22) -#define SPFC_BIT_23 SPFC_BIT(23) -#define SPFC_BIT_24 SPFC_BIT(24) -#define SPFC_BIT_25 SPFC_BIT(25) -#define SPFC_BIT_26 SPFC_BIT(26) -#define SPFC_BIT_27 SPFC_BIT(27) -#define SPFC_BIT_28 SPFC_BIT(28) -#define SPFC_BIT_29 SPFC_BIT(29) -#define SPFC_BIT_30 SPFC_BIT(30) -#define SPFC_BIT_31 SPFC_BIT(31) - -#define SPFC_GET_BITS(data, mask) ((data) & (mask)) /* Obtains the bit */ -#define SPFC_SET_BITS(data, mask) ((data) |= (mask)) /* set the bit */ -#define SPFC_CLR_BITS(data, mask) ((data) &= ~(mask)) /* clear the bit */ - -#define SPFC_LSB(x) ((u8)(x)) -#define SPFC_MSB(x) ((u8)((u16)(x) >> 8)) - -#define SPFC_LSW(x) ((u16)(x)) -#define SPFC_MSW(x) ((u16)((u32)(x) >> 16)) - -#define SPFC_LSD(x) ((u32)((u64)(x))) -#define SPFC_MSD(x) ((u32)((((u64)(x)) >> 16) >> 16)) - -#define SPFC_BYTES_TO_QW_NUM(x) ((x) >> 3) -#define SPFC_BYTES_TO_DW_NUM(x) ((x) >> 2) - -#define UNF_GET_SHIFTMASK(__src, __shift, __mask) (((__src) & (__mask)) >> (__shift)) -#define UNF_FC_SET_SHIFTMASK(__des, __val, __shift, __mask) \ - ((__des) = (((__des) & ~(__mask)) | (((__val) << (__shift)) & (__mask)))) - -/* R_CTL */ -#define UNF_FC_HEADER_RCTL_MASK (0xFF000000) -#define UNF_FC_HEADER_RCTL_SHIFT (24) -#define UNF_FC_HEADER_RCTL_DWORD (0) -#define UNF_GET_FC_HEADER_RCTL(__pfcheader) \ - UNF_GET_SHIFTMASK(((u32 *)(void *)(__pfcheader))[UNF_FC_HEADER_RCTL_DWORD], \ - UNF_FC_HEADER_RCTL_SHIFT, UNF_FC_HEADER_RCTL_MASK) - -#define UNF_SET_FC_HEADER_RCTL(__pfcheader, __val) \ - do { \ - UNF_FC_SET_SHIFTMASK(((u32 *)(void *)(__pfcheader)[UNF_FC_HEADER_RCTL_DWORD], \ - __val, UNF_FC_HEADER_RCTL_SHIFT, UNF_FC_HEADER_RCTL_MASK) \ - } while (0) - -/* PRLI PARAM 3 */ -#define SPFC_PRLI_PARAM_WXFER_ENABLE_MASK (0x00000001) -#define SPFC_PRLI_PARAM_WXFER_ENABLE_SHIFT (0) -#define SPFC_PRLI_PARAM_WXFER_DWORD (3) -#define SPFC_GET_PRLI_PARAM_WXFER(__pfcheader) \ - UNF_GET_SHIFTMASK(((u32 *)(void *)(__pfcheader))[SPFC_PRLI_PARAM_WXFER_DWORD], \ - SPFC_PRLI_PARAM_WXFER_ENABLE_SHIFT, \ - SPFC_PRLI_PARAM_WXFER_ENABLE_MASK) - -#define SPFC_PRLI_PARAM_CONF_ENABLE_MASK (0x00000080) -#define SPFC_PRLI_PARAM_CONF_ENABLE_SHIFT (7) -#define SPFC_PRLI_PARAM_CONF_DWORD (3) -#define SPFC_GET_PRLI_PARAM_CONF(__pfcheader) \ - UNF_GET_SHIFTMASK(((u32 *)(void *)(__pfcheader))[SPFC_PRLI_PARAM_CONF_DWORD], \ - SPFC_PRLI_PARAM_CONF_ENABLE_SHIFT, \ - SPFC_PRLI_PARAM_CONF_ENABLE_MASK) - -#define SPFC_PRLI_PARAM_REC_ENABLE_MASK (0x00000400) -#define SPFC_PRLI_PARAM_REC_ENABLE_SHIFT (10) -#define SPFC_PRLI_PARAM_CONF_REC (3) -#define SPFC_GET_PRLI_PARAM_REC(__pfcheader) \ - UNF_GET_SHIFTMASK(((u32 *)(void *)(__pfcheader))[SPFC_PRLI_PARAM_CONF_REC], \ - SPFC_PRLI_PARAM_REC_ENABLE_SHIFT, SPFC_PRLI_PARAM_REC_ENABLE_MASK) - -#define SPFC_FUNCTION_ENTER \ - FC_DRV_PRINT(UNF_LOG_NORMAL, UNF_ALL, \ - "%s Enter.", __func__) -#define SPFC_FUNCTION_RETURN \ - FC_DRV_PRINT(UNF_LOG_NORMAL, UNF_ALL, \ - "%s Return.", __func__) - -#define SPFC_SPIN_LOCK_IRQSAVE(interrupt, hw_adapt_lock, flags) \ - do { \ - if ((interrupt) == false) { \ - spin_lock_irqsave(&(hw_adapt_lock), flags); \ - } \ - } while (0) - -#define SPFC_SPIN_UNLOCK_IRQRESTORE(interrupt, hw_adapt_lock, flags) \ - do { \ - if ((interrupt) == false) { \ - spin_unlock_irqrestore(&(hw_adapt_lock), flags); \ - } \ - } while (0) - -#define FC_CHECK_VALID(condition, fail_do) \ - do { \ - if (unlikely(!(condition))) { \ - FC_DRV_PRINT(UNF_LOG_REG_ATT, \ - UNF_ERR, "Para check(%s) invalid", \ - #condition); \ - fail_do; \ - } \ - } while (0) - -#define RETURN_ERROR_S32 (-1) -#define UNF_RETURN_ERROR_S32 (-1) - -enum SPFC_LOG_CTRL_E { - SPFC_LOG_ALL = 0, - SPFC_LOG_SCQE_RX, - SPFC_LOG_ELS_TX, - SPFC_LOG_ELS_RX, - SPFC_LOG_GS_TX, - SPFC_LOG_GS_RX, - SPFC_LOG_BLS_TX, - SPFC_LOG_BLS_RX, - SPFC_LOG_FCP_TX, - SPFC_LOG_FCP_RX, - SPFC_LOG_SESS_TX, - SPFC_LOG_SESS_RX, - SPFC_LOG_DIF_TX, - SPFC_LOG_DIF_RX -}; - -extern u32 spfc_log_en; -#define SPFC_LOG_EN(hba, log_ctrl) (spfc_log_en + (log_ctrl)) - -enum SPFC_HBA_ERR_STAT_E { - SPFC_STAT_CTXT_FLUSH_DONE = 0, - SPFC_STAT_SQ_WAIT_EMPTY, - SPFC_STAT_LAST_GS_SCQE, - SPFC_STAT_SQ_POOL_EMPTY, - SPFC_STAT_PARENT_IO_FLUSHED, - SPFC_STAT_ROOT_IO_FLUSHED, /* 5 */ - SPFC_STAT_ROOT_SQ_FULL, - SPFC_STAT_ELS_RSP_EXCH_REUSE, - SPFC_STAT_GS_RSP_EXCH_REUSE, - SPFC_STAT_SQ_IO_BUFFER_CLEARED, - SPFC_STAT_PARENT_SQ_NOT_OFFLOADED, /* 10 */ - SPFC_STAT_PARENT_SQ_QUEUE_DELAYED_WORK, - SPFC_STAT_PARENT_SQ_INVALID_CACHED_ID, - SPFC_HBA_STAT_BUTT -}; - -#define SPFC_DWORD_BYTE (4) -#define SPFC_QWORD_BYTE (8) -#define SPFC_SHIFT_TO_U64(x) ((x) >> 3) -#define SPFC_SHIFT_TO_U32(x) ((x) >> 2) - -void spfc_cpu_to_big64(void *addr, u32 size); -void spfc_big_to_cpu64(void *addr, u32 size); -void spfc_cpu_to_big32(void *addr, u32 size); -void spfc_big_to_cpu32(void *addr, u32 size); -void spfc_cpu_to_be24(u8 *data, u32 value); -u32 spfc_big_to_cpu24(u8 *data); - -void spfc_print_buff(u32 dbg_level, void *buff, u32 size); - -u32 spfc_log2n(u32 val); - -static inline void spfc_swap_16_in_32(u32 *paddr, u32 length) -{ - u32 i; - - for (i = 0; i < length; i++) { - paddr[i] = - ((((paddr[i]) & UNF_MASK_BIT_31_16) >> UNF_SHIFT_16) | - (((paddr[i]) & UNF_MASK_BIT_15_0) << UNF_SHIFT_16)); - } -} - -#endif /* __SPFC_UTILS_H__ */ diff --git a/drivers/scsi/spfc/hw/spfc_wqe.c b/drivers/scsi/spfc/hw/spfc_wqe.c deleted file mode 100644 index 61909c51bc8cd2b2902dae5803685ec465abeb45..0000000000000000000000000000000000000000 --- a/drivers/scsi/spfc/hw/spfc_wqe.c +++ /dev/null @@ -1,646 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#include "spfc_wqe.h" -#include "spfc_module.h" -#include "spfc_service.h" - -void spfc_build_tmf_rsp_wqe_ts_header(struct unf_frame_pkg *pkg, - struct spfc_sqe_tmf_rsp *sqe, u16 exi_base, - u32 scqn) -{ - sqe->ts_sl.task_type = SPFC_SQE_FCP_TMF_TRSP; - sqe->ts_sl.wd0.conn_id = - (u16)(pkg->private_data[PKG_PRIVATE_XCHG_RPORT_INDEX]); - - if (UNF_GET_RXID(pkg) == INVALID_VALUE16) - sqe->ts_sl.local_xid = INVALID_VALUE16; - else - sqe->ts_sl.local_xid = UNF_GET_RXID(pkg) + exi_base; - - sqe->ts_sl.tmf_rsp.wd0.scqn = scqn; - sqe->ts_sl.magic_num = UNF_GETXCHGALLOCTIME(pkg); -} - -void spfc_build_common_wqe_ctrls(struct spfc_wqe_ctrl *ctrl_sl, u8 task_len) -{ - /* "BDSL" field of CtrlS - defines the size of BDS, which varies from 0 - * to 2040 bytes (8 bits of 8 bytes' chunk) - */ - ctrl_sl->ch.wd0.bdsl = 0; - - /* "DrvSL" field of CtrlS - defines the size of DrvS, which varies from - * 0 to 24 bytes - */ - ctrl_sl->ch.wd0.drv_sl = 0; - - /* a. - * b1 - linking WQE, which will be only used in linked page architecture - * instead of ring, it's a special control WQE which does not contain - * any buffer or inline data information, and will only be consumed by - * hardware. The size is aligned to WQEBB/WQE b0 - normal WQE, either - * normal SEG WQE or inline data WQE - */ - ctrl_sl->ch.wd0.wf = 0; - - /* - * "CF" field of CtrlS - Completion Format - defines the format of CS. - * a. b0 - Status information is embedded inside of Completion Section - * b. b1 - Completion Section keeps SGL, where Status information - * should be written. (For the definition of SGLs see ?4.1 - * .) - */ - ctrl_sl->ch.wd0.cf = 0; - - /* "TSL" field of CtrlS - defines the size of TS, which varies from 0 to - * 248 bytes - */ - ctrl_sl->ch.wd0.tsl = task_len; - - /* - * Variable length SGE (vSGE). The size of SGE is 16 bytes. The vSGE - * format is of two types, which are defined by "VA " field of CtrlS. - * "VA" stands for Virtual Address: o b0. SGE comprises 64-bits - * buffer's pointer and 31-bits Length, each SGE can only support up to - * 2G-1B, it can guarantee each single SGE length can not exceed 2GB by - * nature, A byte count value of zero means a 0byte data transfer. o b1. - * SGE comprises 64-bits buffer's pointer, 31-bits Length and 30-bits - * Key of the Translation table , each SGE can only support up to 2G-1B, - * it can guarantee each single SGE length can not exceed 2GB by nature, - * A byte count value of zero means a 0byte data transfer - */ - ctrl_sl->ch.wd0.va = 0; - - /* - * "DF" field of CtrlS - Data Format - defines the format of BDS - * a. b0 - BDS carries the list of SGEs (SGL) - * b. b1 - BDS carries the inline data - */ - ctrl_sl->ch.wd0.df = 0; - - /* "CR" - Completion is Required - marks CQE generation request per WQE - */ - ctrl_sl->ch.wd0.cr = 1; - - /* "DIFSL" field of CtrlS - defines the size of DIFS, which varies from - * 0 to 56 bytes - */ - ctrl_sl->ch.wd0.dif_sl = 0; - - /* "CSL" field of CtrlS - defines the size of CS, which varies from 0 to - * 24 bytes - */ - ctrl_sl->ch.wd0.csl = 0; - - /* CtrlSL describes the size of CtrlS in 8 bytes chunks. The - * value Zero is not valid - */ - ctrl_sl->ch.wd0.ctrl_sl = 1; - - /* "O" - Owner - marks ownership of WQE */ - ctrl_sl->ch.wd0.owner = 0; -} - -void spfc_build_trd_twr_wqe_ctrls(struct unf_frame_pkg *pkg, struct spfc_sqe *sqe) -{ - /* "BDSL" field of CtrlS - defines the size of BDS, which varies from 0 - * to 2040 bytes (8 bits of 8 bytes' chunk) - */ - /* TrdWqe carry 2 SGE defaultly, 4DW per SGE, the value is 4 because - * unit is 2DW, in double SGL mode, bdsl is 2 - */ - sqe->ctrl_sl.ch.wd0.bdsl = SPFC_T_RD_WR_WQE_CTR_BDSL_SIZE; - - /* "DrvSL" field of CtrlS - defines the size of DrvS, which varies from - * 0 to 24 bytes - */ - /* DrvSL = 0 */ - sqe->ctrl_sl.ch.wd0.drv_sl = 0; - - /* a. - * b1 - linking WQE, which will be only used in linked page architecture - * instead of ring, it's a special control WQE which does not contain - * any buffer or inline data information, and will only be consumed by - * hardware. The size is aligned to WQEBB/WQE b0 - normal WQE, either - * normal SEG WQE or inline data WQE - */ - /* normal wqe */ - sqe->ctrl_sl.ch.wd0.wf = 0; - - /* - * "CF" field of CtrlS - Completion Format - defines the format of CS. - * a. b0 - Status information is embedded inside of Completion Section - * b. b1 - Completion Section keeps SGL, where Status information - * should be written. (For the definition of SGLs see ?4.1) - */ - /* by SCQE mode, the value is ignored */ - sqe->ctrl_sl.ch.wd0.cf = 0; - - /* "TSL" field of CtrlS - defines the size of TS, which varies from 0 to - * 248 bytes - */ - /* TSL is configured by 56 bytes */ - sqe->ctrl_sl.ch.wd0.tsl = - sizeof(struct spfc_sqe_ts) / SPFC_WQE_SECTION_CHUNK_SIZE; - - /* - * Variable length SGE (vSGE). The size of SGE is 16 bytes. The vSGE - * format is of two types, which are defined by "VA " field of CtrlS. - * "VA" stands for Virtual Address: o b0. SGE comprises 64-bits buffer's - * pointer and 31-bits Length, each SGE can only support up to 2G-1B, it - * can guarantee each single SGE length can not exceed 2GB by nature, A - * byte count value of zero means a 0byte data transfer. o b1. SGE - * comprises 64-bits buffer's pointer, 31-bits Length and 30-bits Key of - * the Translation table , each SGE can only support up to 2G-1B, it can - * guarantee each single SGE length can not exceed 2GB by nature, A byte - * count value of zero means a 0byte data transfer - */ - sqe->ctrl_sl.ch.wd0.va = 0; - - /* - * "DF" field of CtrlS - Data Format - defines the format of BDS - * a. b0 - BDS carries the list of SGEs (SGL) - * b. b1 - BDS carries the inline data - */ - sqe->ctrl_sl.ch.wd0.df = 0; - - /* "CR" - Completion is Required - marks CQE generation request per WQE - */ - /* by SCQE mode, this value is ignored */ - sqe->ctrl_sl.ch.wd0.cr = 1; - - /* "DIFSL" field of CtrlS - defines the size of DIFS, which varies from - * 0 to 56 bytes. - */ - sqe->ctrl_sl.ch.wd0.dif_sl = 0; - - /* "CSL" field of CtrlS - defines the size of CS, which varies from 0 to - * 24 bytes - */ - sqe->ctrl_sl.ch.wd0.csl = 0; - - /* CtrlSL describes the size of CtrlS in 8 bytes chunks. The - * value Zero is not valid. - */ - sqe->ctrl_sl.ch.wd0.ctrl_sl = SPFC_T_RD_WR_WQE_CTR_CTRLSL_SIZE; - - /* "O" - Owner - marks ownership of WQE */ - sqe->ctrl_sl.ch.wd0.owner = 0; -} - -/* **************************************************************************** - * Function Name : spfc_build_service_wqe_ts_common - * Function Description : Construct the DW1~DW3 field in the Parent SQ WQE - * request of the ELS and ELS_RSP requests. - * Input Parameters : struct spfc_sqe_ts *sqe_ts u32 rport_index u16 local_xid - * u16 remote_xid u16 data_len - * Output Parameters : N/A - * Return Type : void - **************************************************************************** - */ -void spfc_build_service_wqe_ts_common(struct spfc_sqe_ts *sqe_ts, u32 rport_index, - u16 local_xid, u16 remote_xid, u16 data_len) -{ - sqe_ts->local_xid = local_xid; - - sqe_ts->wd0.conn_id = (u16)rport_index; - sqe_ts->wd0.remote_xid = remote_xid; - - sqe_ts->cont.els_gs_elsrsp_comm.data_len = data_len; -} - -/* **************************************************************************** - * Function Name : spfc_build_els_gs_wqe_sge - * Function Description : Construct the SGE field of the ELS and ELS_RSP WQE. - * The SGE and frame content have been converted to large ends in this - * function. - * Input Parameters: struct spfc_sqe *sqe void *buf_addr u32 buf_len u32 xid - * Output Parameters : N/A - * Return Type : void - **************************************************************************** - */ -void spfc_build_els_gs_wqe_sge(struct spfc_sqe *sqe, void *buf_addr, u64 phy_addr, - u32 buf_len, u32 xid, void *handle) -{ - u64 els_rsp_phy_addr; - struct spfc_variable_sge *sge = NULL; - - /* Fill in SGE and convert it to big-endian. */ - sge = &sqe->sge[ARRAY_INDEX_0]; - els_rsp_phy_addr = phy_addr; - sge->buf_addr_hi = SPFC_HIGH_32_BITS(els_rsp_phy_addr); - sge->buf_addr_lo = SPFC_LOW_32_BITS(els_rsp_phy_addr); - sge->wd0.buf_len = buf_len; - sge->wd0.r_flag = 0; - sge->wd1.extension_flag = SPFC_WQE_SGE_NOT_EXTEND_FLAG; - sge->wd1.buf_addr_gpa = SPFC_ZEROCOPY_PCIE_TEMPLATE_VALUE; - sge->wd1.xid = 0; - sge->wd1.last_flag = SPFC_WQE_SGE_LAST_FLAG; - spfc_cpu_to_big32(sge, sizeof(*sge)); - - /* Converts the payload of an FC frame into a big end. */ - if (buf_addr) - spfc_cpu_to_big32(buf_addr, buf_len); -} - -/* **************************************************************************** - * Function Name : spfc_build_els_wqe_ts_rsp - * Function Description : Construct the DW2~DW6 field in the Parent SQ WQE - * of the ELS_RSP request. - * Input Parameters : struct spfc_sqe *sqe void *sq_info void *frame_pld - * u16 type u16 cmnd u32 scqn - * Output Parameters: N/A - * Return Type : void - **************************************************************************** - */ -void spfc_build_els_wqe_ts_rsp(struct spfc_sqe *sqe, void *info, - struct unf_frame_pkg *pkg, void *frame_pld, - u16 type, u16 cmnd) -{ - struct unf_prli_payload *prli_acc_pld = NULL; - struct spfc_sqe_els_rsp *els_rsp = NULL; - struct spfc_sqe_ts *sqe_ts = NULL; - struct spfc_parent_sq_info *sq_info = NULL; - struct spfc_hba_info *hba = NULL; - struct unf_fc_head *pkg_fc_hdr_info = NULL; - struct spfc_parent_queue_info *prnt_q_info = (struct spfc_parent_queue_info *)info; - - FC_CHECK_RETURN_VOID(sqe); - FC_CHECK_RETURN_VOID(frame_pld); - - sqe_ts = &sqe->ts_sl; - els_rsp = &sqe_ts->cont.els_rsp; - sqe_ts->task_type = SPFC_SQE_ELS_RSP; - - /* The default chip does not need to update parameters. */ - els_rsp->wd1.para_update = 0x0; - - sq_info = &prnt_q_info->parent_sq_info; - hba = (struct spfc_hba_info *)sq_info->hba; - - pkg_fc_hdr_info = &pkg->frame_head; - els_rsp->sid = pkg_fc_hdr_info->csctl_sid; - els_rsp->did = pkg_fc_hdr_info->rctl_did; - els_rsp->wd7.hotpooltag = UNF_GET_HOTPOOL_TAG(pkg) + hba->exi_base; - els_rsp->wd2.class_mode = FC_PROTOCOL_CLASS_3; - - if (type == ELS_RJT) - els_rsp->wd2.class_mode = pkg->class_mode; - - /* When the PLOGI request is sent, the microcode needs to be instructed - * to clear the I/O related to the link to avoid data inconsistency - * caused by the disorder of the IO. - */ - if ((cmnd == ELS_LOGO || cmnd == ELS_PLOGI)) { - els_rsp->wd1.clr_io = 1; - els_rsp->wd6.reset_exch_start = hba->exi_base; - els_rsp->wd6.reset_exch_end = - hba->exi_base + (hba->exi_count - 1); - els_rsp->wd7.scqn = - prnt_q_info->parent_sts_scq_info.cqm_queue_id; - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "Port(0x%x) send cmd(0x%x) to RPort(0x%x),rport index(0x%x), notify clean io start 0x%x, end 0x%x, scqn 0x%x.", - sq_info->local_port_id, cmnd, sq_info->remote_port_id, - sq_info->rport_index, els_rsp->wd6.reset_exch_start, - els_rsp->wd6.reset_exch_end, els_rsp->wd7.scqn); - - return; - } - - if (type == ELS_RJT) - return; - - /* Enter WQE in the PrliAcc negotiation parameter, and fill in the - * Update flag in WQE. - */ - if (cmnd == ELS_PRLI) { - /* The chip updates the PLOGI ACC negotiation parameters. */ - els_rsp->wd2.seq_cnt = sq_info->plogi_co_parms.seq_cnt; - els_rsp->wd2.e_d_tov = sq_info->plogi_co_parms.ed_tov; - els_rsp->wd2.tx_mfs = sq_info->plogi_co_parms.tx_mfs; - els_rsp->e_d_tov_timer_val = sq_info->plogi_co_parms.ed_tov_time; - - /* The chip updates the PRLI ACC parameter. */ - prli_acc_pld = (struct unf_prli_payload *)frame_pld; - els_rsp->wd4.xfer_dis = SPFC_GET_PRLI_PARAM_WXFER(prli_acc_pld->parms); - els_rsp->wd4.conf = SPFC_GET_PRLI_PARAM_CONF(prli_acc_pld->parms); - els_rsp->wd4.rec = SPFC_GET_PRLI_PARAM_REC(prli_acc_pld->parms); - - els_rsp->wd1.para_update = 0x03; - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "Port(0x%x) save rport index(0x%x) login parms,seqcnt:0x%x,e_d_tov:0x%x,txmfs:0x%x,e_d_tovtimerval:0x%x, xfer_dis:0x%x,conf:0x%x,rec:0x%x.", - sq_info->local_port_id, sq_info->rport_index, - els_rsp->wd2.seq_cnt, els_rsp->wd2.e_d_tov, - els_rsp->wd2.tx_mfs, els_rsp->e_d_tov_timer_val, - els_rsp->wd4.xfer_dis, els_rsp->wd4.conf, els_rsp->wd4.rec); - } -} - -/* **************************************************************************** - * Function Name : spfc_build_els_wqe_ts_req - * Function Description: Construct the DW2~DW4 field in the Parent SQ WQE - * of the ELS request. - * Input Parameters: struct spfc_sqe *sqe void *sq_info u16 cmnd u32 scqn - * Output Parameters: N/A - * Return Type: void - **************************************************************************** - */ -void spfc_build_els_wqe_ts_req(struct spfc_sqe *sqe, void *info, u32 scqn, - void *frame_pld, struct unf_frame_pkg *pkg) -{ - struct spfc_sqe_ts *sqe_ts = NULL; - struct spfc_sqe_t_els_gs *els_req = NULL; - struct spfc_parent_sq_info *sq_info = NULL; - struct spfc_hba_info *hba = NULL; - struct unf_fc_head *pkg_fc_hdr_info = NULL; - u16 cmnd; - - cmnd = SPFC_GET_LS_GS_CMND_CODE(pkg->cmnd); - - sqe_ts = &sqe->ts_sl; - if (pkg->type == UNF_PKG_GS_REQ) - sqe_ts->task_type = SPFC_SQE_GS_CMND; - else - sqe_ts->task_type = SPFC_SQE_ELS_CMND; - - sqe_ts->magic_num = UNF_GETXCHGALLOCTIME(pkg); - - els_req = &sqe_ts->cont.t_els_gs; - pkg_fc_hdr_info = &pkg->frame_head; - - sq_info = (struct spfc_parent_sq_info *)info; - hba = (struct spfc_hba_info *)sq_info->hba; - els_req->sid = pkg_fc_hdr_info->csctl_sid; - els_req->did = pkg_fc_hdr_info->rctl_did; - - /* When the PLOGI request is sent, the microcode needs to be instructed - * to clear the I/O related to the link to avoid data inconsistency - * caused by the disorder of the IO. - */ - if ((cmnd == ELS_LOGO || cmnd == ELS_PLOGI) && hba) { - els_req->wd4.clr_io = 1; - els_req->wd6.reset_exch_start = hba->exi_base; - els_req->wd6.reset_exch_end = hba->exi_base + (hba->exi_count - 1); - els_req->wd7.scqn = scqn; - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "Port(0x%x) Rport(0x%x) SID(0x%x) send %s to DID(0x%x), notify clean io start 0x%x, end 0x%x, scqn 0x%x.", - hba->port_cfg.port_id, sq_info->rport_index, - sq_info->local_port_id, (cmnd == ELS_PLOGI) ? "PLOGI" : "LOGO", - sq_info->remote_port_id, els_req->wd6.reset_exch_start, - els_req->wd6.reset_exch_end, scqn); - - return; - } - - /* The chip updates the PLOGI ACC negotiation parameters. */ - if (cmnd == ELS_PRLI) { - els_req->wd5.seq_cnt = sq_info->plogi_co_parms.seq_cnt; - els_req->wd5.e_d_tov = sq_info->plogi_co_parms.ed_tov; - els_req->wd5.tx_mfs = sq_info->plogi_co_parms.tx_mfs; - els_req->e_d_tov_timer_val = sq_info->plogi_co_parms.ed_tov_time; - - els_req->wd4.rec_support = hba->port_cfg.tape_support ? 1 : 0; - els_req->wd4.para_update = 0x01; - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, - UNF_INFO, - "Port(0x%x) save rport index(0x%x) login parms,seqcnt:0x%x,e_d_tov:0x%x,txmfs:0x%x,e_d_tovtimerval:0x%x.", - sq_info->local_port_id, sq_info->rport_index, - els_req->wd5.seq_cnt, els_req->wd5.e_d_tov, - els_req->wd5.tx_mfs, els_req->e_d_tov_timer_val); - } - - if (cmnd == ELS_ECHO) - els_req->echo_flag = true; - - if (cmnd == ELS_REC) { - els_req->wd4.rec_flag = 1; - els_req->wd4.origin_hottag = pkg->origin_hottag + hba->exi_base; - els_req->origin_magicnum = pkg->origin_magicnum; - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "Port(0x%x) Rport(0x%x) SID(0x%x) send Rec to DID(0x%x), origin_hottag 0x%x", - hba->port_cfg.port_id, sq_info->rport_index, - sq_info->local_port_id, sq_info->remote_port_id, - els_req->wd4.origin_hottag); - } -} - -/* **************************************************************************** - * Function Name : spfc_build_bls_wqe_ts_req - * Function Description: Construct the DW2 field in the Parent SQ WQE of - * the ELS request, that is, ABTS parameter. - * Input Parameters:struct unf_frame_pkg *pkg void *hba - * Output Parameters: N/A - * Return Type: void - **************************************************************************** - */ -void spfc_build_bls_wqe_ts_req(struct spfc_sqe *sqe, struct unf_frame_pkg *pkg, void *handle) -{ - struct spfc_sqe_abts *abts; - - sqe->ts_sl.task_type = SPFC_SQE_BLS_CMND; - sqe->ts_sl.magic_num = UNF_GETXCHGALLOCTIME(pkg); - - abts = &sqe->ts_sl.cont.abts; - abts->fh_parm_abts = pkg->frame_head.parameter; - abts->hotpooltag = UNF_GET_HOTPOOL_TAG(pkg) + - ((struct spfc_hba_info *)handle)->exi_base; - abts->release_timer = UNF_GET_XID_RELEASE_TIMER(pkg); -} - -/* **************************************************************************** - * Function Name : spfc_build_service_wqe_ctrl_section - * Function Description: fill Parent SQ WQE and Root SQ WQE's Control Section - * Input Parameters : struct spfc_wqe_ctrl *wqe_cs u32 ts_size u32 bdsl - * Output Parameters : N/A - * Return Type : void - **************************************************************************** - */ -void spfc_build_service_wqe_ctrl_section(struct spfc_wqe_ctrl *wqe_cs, u32 ts_size, - u32 bdsl) -{ - wqe_cs->ch.wd0.bdsl = bdsl; - wqe_cs->ch.wd0.drv_sl = 0; - wqe_cs->ch.wd0.rsvd0 = 0; - wqe_cs->ch.wd0.wf = 0; - wqe_cs->ch.wd0.cf = 0; - wqe_cs->ch.wd0.tsl = ts_size; - wqe_cs->ch.wd0.va = 0; - wqe_cs->ch.wd0.df = 0; - wqe_cs->ch.wd0.cr = 1; - wqe_cs->ch.wd0.dif_sl = 0; - wqe_cs->ch.wd0.csl = 0; - wqe_cs->ch.wd0.ctrl_sl = SPFC_BYTES_TO_QW_NUM(sizeof(*wqe_cs)); /* divided by 8 */ - wqe_cs->ch.wd0.owner = 0; -} - -/* **************************************************************************** - * Function Name : spfc_build_wqe_owner_pmsn - * Function Description: This field is filled using the value of Control - * Section of Parent SQ WQE. - * Input Parameters: struct spfc_wqe_ctrl *wqe_cs u16 owner u16 pmsn - * Output Parameters : N/A - * Return Type: void - **************************************************************************** - */ -void spfc_build_wqe_owner_pmsn(struct spfc_sqe *io_sqe, u16 owner, u16 pmsn) -{ - struct spfc_wqe_ctrl *wqe_cs = &io_sqe->ctrl_sl; - struct spfc_wqe_ctrl *wqee_cs = &io_sqe->ectrl_sl; - - wqe_cs->qsf.wqe_sn = pmsn; - wqe_cs->qsf.dump_wqe_sn = wqe_cs->qsf.wqe_sn; - wqe_cs->ch.wd0.owner = (u32)owner; - wqee_cs->ch.ctrl_ch_val = wqe_cs->ch.ctrl_ch_val; - wqee_cs->qsf.wqe_sn = wqe_cs->qsf.wqe_sn; - wqee_cs->qsf.dump_wqe_sn = wqe_cs->qsf.dump_wqe_sn; -} - -/* **************************************************************************** - * Function Name : spfc_convert_parent_wqe_to_big_endian - * Function Description: Set the Done field of Parent SQ WQE and convert - * Control Section and Task Section to big-endian. - * Input Parameters:struct spfc_sqe *sqe - * Output Parameters : N/A - * Return Type : void - **************************************************************************** - */ -void spfc_convert_parent_wqe_to_big_endian(struct spfc_sqe *sqe) -{ - if (likely(sqe->ts_sl.task_type != SPFC_TASK_T_TRESP && - sqe->ts_sl.task_type != SPFC_TASK_T_TMF_RESP)) { - /* Convert Control Secton and Task Section to big-endian. Before - * the SGE enters the queue, the upper-layer driver converts the - * SGE and Task Section to the big-endian mode. - */ - spfc_cpu_to_big32(&sqe->ctrl_sl, sizeof(sqe->ctrl_sl)); - spfc_cpu_to_big32(&sqe->ts_sl, sizeof(sqe->ts_sl)); - spfc_cpu_to_big32(&sqe->ectrl_sl, sizeof(sqe->ectrl_sl)); - spfc_cpu_to_big32(&sqe->sid, sizeof(sqe->sid)); - spfc_cpu_to_big32(&sqe->did, sizeof(sqe->did)); - spfc_cpu_to_big32(&sqe->wqe_gpa, sizeof(sqe->wqe_gpa)); - spfc_cpu_to_big32(&sqe->db_val, sizeof(sqe->db_val)); - } else { - /* The SPFC_TASK_T_TRESP may use the SGE as the Task Section to - * convert the entire SQE into a large end. - */ - spfc_cpu_to_big32(sqe, sizeof(struct spfc_sqe_tresp)); - } -} - -/* **************************************************************************** - * Function Name : spfc_build_cmdqe_common - * Function Description : Assemble the Cmdqe Common part. - * Input Parameters: union spfc_cmdqe *cmd_qe enum spfc_task_type task_type u16 rxid - * Output Parameters : N/A - * Return Type: void - **************************************************************************** - */ -void spfc_build_cmdqe_common(union spfc_cmdqe *cmd_qe, enum spfc_task_type task_type, - u16 rxid) -{ - cmd_qe->common.wd0.task_type = task_type; - cmd_qe->common.wd0.rx_id = rxid; - cmd_qe->common.wd0.rsvd0 = 0; -} - -#define SPFC_STANDARD_SIRT_ENABLE (1) -#define SPFC_STANDARD_SIRT_DISABLE (0) -#define SPFC_UNKNOWN_ID (0xFFFF) - -void spfc_build_icmnd_wqe_ts_header(struct unf_frame_pkg *pkg, struct spfc_sqe *sqe, - u8 task_type, u16 exi_base, u8 port_idx) -{ - sqe->ts_sl.local_xid = (u16)UNF_GET_HOTPOOL_TAG(pkg) + exi_base; - sqe->ts_sl.task_type = task_type; - sqe->ts_sl.wd0.conn_id = - (u16)(pkg->private_data[PKG_PRIVATE_XCHG_RPORT_INDEX]); - - sqe->ts_sl.wd0.remote_xid = SPFC_UNKNOWN_ID; - sqe->ts_sl.magic_num = UNF_GETXCHGALLOCTIME(pkg); -} - -/* **************************************************************************** - * Function Name : spfc_build_icmnd_wqe_ts - * Function Description : Constructing the TS Domain of the ICmnd - * Input Parameters: void *hba struct unf_frame_pkg *pkg - * struct spfc_sqe_ts *sqe_ts - * Output Parameters :N/A - * Return Type : void - **************************************************************************** - */ -void spfc_build_icmnd_wqe_ts(void *handle, struct unf_frame_pkg *pkg, - struct spfc_sqe_ts *sqe_ts, union spfc_sqe_ts_ex *sqe_tsex) -{ - struct spfc_sqe_icmnd *icmnd = &sqe_ts->cont.icmnd; - struct spfc_hba_info *hba = NULL; - - hba = (struct spfc_hba_info *)handle; - - sqe_ts->cdb_type = 0; - memcpy(icmnd->fcp_cmnd_iu, pkg->fcp_cmnd, sizeof(struct unf_fcp_cmnd)); - - if (sqe_ts->task_type == SPFC_SQE_FCP_ITMF) { - icmnd->info.tmf.w0.bs.reset_exch_start = hba->exi_base; - icmnd->info.tmf.w0.bs.reset_exch_end = hba->exi_base + hba->exi_count - 1; - - icmnd->info.tmf.w1.bs.reset_did = UNF_GET_DID(pkg); - /* delivers the marker status flag to the microcode. */ - icmnd->info.tmf.w1.bs.marker_sts = 1; - SPFC_GET_RESET_TYPE(UNF_GET_TASK_MGMT_FLAGS(pkg->fcp_cmnd->control), - icmnd->info.tmf.w1.bs.reset_type); - - icmnd->info.tmf.w2.bs.reset_sid = UNF_GET_SID(pkg); - - memcpy(icmnd->info.tmf.reset_lun, pkg->fcp_cmnd->lun, - sizeof(icmnd->info.tmf.reset_lun)); - } -} - -/* **************************************************************************** - * Function Name : spfc_build_icmnd_wqe_ctrls - * Function Description : The CtrlS domain of the ICmnd is constructed. The - * analysis result is the same as that of the TWTR. - * Input Parameters: struct unf_frame_pkg *pkg struct spfc_sqe *sqe - * Output Parameters: N/A - * Return Type: void - **************************************************************************** - */ -void spfc_build_icmnd_wqe_ctrls(struct unf_frame_pkg *pkg, struct spfc_sqe *sqe) -{ - spfc_build_trd_twr_wqe_ctrls(pkg, sqe); -} - -/* **************************************************************************** - * Function Name : spfc_build_srq_wqe_ctrls - * Function Description : Construct the CtrlS domain of the ICmnd. The analysis - * result is the same as that of the TWTR. - * Input Parameters : struct spfc_rqe *rqe u16 owner u16 pmsn - * Output Parameters : N/A - * Return Type : void - **************************************************************************** - */ -void spfc_build_srq_wqe_ctrls(struct spfc_rqe *rqe, u16 owner, u16 pmsn) -{ - struct spfc_wqe_ctrl_ch *wqe_ctrls = NULL; - - wqe_ctrls = &rqe->ctrl_sl.ch; - wqe_ctrls->wd0.owner = owner; - wqe_ctrls->wd0.ctrl_sl = sizeof(struct spfc_wqe_ctrl) >> UNF_SHIFT_3; - wqe_ctrls->wd0.csl = 1; - wqe_ctrls->wd0.dif_sl = 0; - wqe_ctrls->wd0.cr = 1; - wqe_ctrls->wd0.df = 0; - wqe_ctrls->wd0.va = 0; - wqe_ctrls->wd0.tsl = 0; - wqe_ctrls->wd0.cf = 0; - wqe_ctrls->wd0.wf = 0; - wqe_ctrls->wd0.drv_sl = sizeof(struct spfc_rqe_drv) >> UNF_SHIFT_3; - wqe_ctrls->wd0.bdsl = sizeof(struct spfc_constant_sge) >> UNF_SHIFT_3; - - rqe->ctrl_sl.wd0.wqe_msn = pmsn; - rqe->ctrl_sl.wd0.dump_wqe_msn = rqe->ctrl_sl.wd0.wqe_msn; -} diff --git a/drivers/scsi/spfc/hw/spfc_wqe.h b/drivers/scsi/spfc/hw/spfc_wqe.h deleted file mode 100644 index ec6d7bbdf8f9c0519a963d6fa8a871382fa2254d..0000000000000000000000000000000000000000 --- a/drivers/scsi/spfc/hw/spfc_wqe.h +++ /dev/null @@ -1,239 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPFC_WQE_H -#define SPFC_WQE_H - -#include "unf_type.h" -#include "unf_common.h" -#include "spfc_hw_wqe.h" -#include "spfc_parent_context.h" - -/* TGT WQE type */ -/* DRV->uCode via Parent SQ */ -#define SPFC_SQE_FCP_TRD SPFC_TASK_T_TREAD -#define SPFC_SQE_FCP_TWR SPFC_TASK_T_TWRITE -#define SPFC_SQE_FCP_TRSP SPFC_TASK_T_TRESP -#define SPFC_SQE_FCP_TACK SPFC_TASK_T_TACK -#define SPFC_SQE_ELS_CMND SPFC_TASK_T_ELS -#define SPFC_SQE_ELS_RSP SPFC_TASK_T_ELS_RSP -#define SPFC_SQE_GS_CMND SPFC_TASK_T_GS -#define SPFC_SQE_BLS_CMND SPFC_TASK_T_ABTS -#define SPFC_SQE_FCP_IREAD SPFC_TASK_T_IREAD -#define SPFC_SQE_FCP_IWRITE SPFC_TASK_T_IWRITE -#define SPFC_SQE_FCP_ITMF SPFC_TASK_T_ITMF -#define SPFC_SQE_SESS_RST SPFC_TASK_T_SESS_RESET -#define SPFC_SQE_FCP_TMF_TRSP SPFC_TASK_T_TMF_RESP -#define SPFC_SQE_NOP SPFC_TASK_T_NOP -/* DRV->uCode Via CMDQ */ -#define SPFC_CMDQE_ABTS_RSP SPFC_TASK_T_ABTS_RSP -#define SPFC_CMDQE_ABORT SPFC_TASK_T_ABORT -#define SPFC_CMDQE_SESS_DIS SPFC_TASK_T_SESS_DIS -#define SPFC_CMDQE_SESS_DEL SPFC_TASK_T_SESS_DEL - -/* uCode->Drv Via CMD SCQ */ -#define SPFC_SCQE_FCP_TCMND SPFC_TASK_T_RCV_TCMND -#define SPFC_SCQE_ELS_CMND SPFC_TASK_T_RCV_ELS_CMD -#define SPFC_SCQE_ABTS_CMD SPFC_TASK_T_RCV_ABTS_CMD -#define SPFC_SCQE_FCP_IRSP SPFC_TASK_T_IRESP -#define SPFC_SCQE_FCP_ITMF_RSP SPFC_TASK_T_ITMF_RESP - -/* uCode->Drv Via STS SCQ */ -#define SPFC_SCQE_FCP_TSTS SPFC_TASK_T_TSTS -#define SPFC_SCQE_GS_RSP SPFC_TASK_T_RCV_GS_RSP -#define SPFC_SCQE_ELS_RSP SPFC_TASK_T_RCV_ELS_RSP -#define SPFC_SCQE_ABTS_RSP SPFC_TASK_T_RCV_ABTS_RSP -#define SPFC_SCQE_ELS_RSP_STS SPFC_TASK_T_ELS_RSP_STS -#define SPFC_SCQE_ABORT_STS SPFC_TASK_T_ABORT_STS -#define SPFC_SCQE_SESS_EN_STS SPFC_TASK_T_SESS_EN_STS -#define SPFC_SCQE_SESS_DIS_STS SPFC_TASK_T_SESS_DIS_STS -#define SPFC_SCQE_SESS_DEL_STS SPFC_TASK_T_SESS_DEL_STS -#define SPFC_SCQE_SESS_RST_STS SPFC_TASK_T_SESS_RESET_STS -#define SPFC_SCQE_ITMF_MARKER_STS SPFC_TASK_T_ITMF_MARKER_STS -#define SPFC_SCQE_ABTS_MARKER_STS SPFC_TASK_T_ABTS_MARKER_STS -#define SPFC_SCQE_FLUSH_SQ_STS SPFC_TASK_T_FLUSH_SQ_STS -#define SPFC_SCQE_BUF_CLEAR_STS SPFC_TASK_T_BUFFER_CLEAR_STS -#define SPFC_SCQE_CLEAR_SRQ_STS SPFC_TASK_T_CLEAR_SRQ_STS -#define SPFC_SCQE_DIFX_RESULT_STS SPFC_TASK_T_DIFX_RESULT_STS -#define SPFC_SCQE_XID_FREE_ABORT_STS SPFC_TASK_T_EXCH_ID_FREE_ABORT_STS -#define SPFC_SCQE_EXCHID_TIMEOUT_STS SPFC_TASK_T_EXCHID_TIMEOUT_STS -#define SPFC_SQE_NOP_STS SPFC_TASK_T_NOP_STS - -#define SPFC_LOW_32_BITS(__addr) ((u32)((u64)(__addr) & 0xffffffff)) -#define SPFC_HIGH_32_BITS(__addr) ((u32)(((u64)(__addr) >> 32) & 0xffffffff)) - -/* Error Code from SCQ */ -#define SPFC_COMPLETION_STATUS_SUCCESS FC_CQE_COMPLETED -#define SPFC_COMPLETION_STATUS_ABORTED_SETUP_FAIL FC_IMMI_CMDPKT_SETUP_FAIL - -#define SPFC_COMPLETION_STATUS_TIMEOUT FC_ERROR_CODE_E_D_TIMER_EXPIRE -#define SPFC_COMPLETION_STATUS_DIF_ERROR FC_ERROR_CODE_DATA_DIFX_FAILED -#define SPFC_COMPLETION_STATUS_DATA_OOO FC_ERROR_CODE_DATA_OOO_RO -#define SPFC_COMPLETION_STATUS_DATA_OVERFLOW \ - FC_ERROR_CODE_DATA_EXCEEDS_DATA2TRNS - -#define SPFC_SCQE_INVALID_CONN_ID (0xffff) -#define SPFC_GET_SCQE_TYPE(scqe) ((scqe)->common.ch.wd0.task_type) -#define SPFC_GET_SCQE_STATUS(scqe) ((scqe)->common.ch.wd0.err_code) -#define SPFC_GET_SCQE_REMAIN_CNT(scqe) ((scqe)->common.ch.wd0.cqe_remain_cnt) -#define SPFC_GET_SCQE_CONN_ID(scqe) ((scqe)->common.conn_id) -#define SPFC_GET_SCQE_SQN(scqe) ((scqe)->common.ch.wd0.sqn) -#define SPFC_GET_WQE_TYPE(wqe) ((wqe)->ts_sl.task_type) - -#define SPFC_WQE_IS_IO(wqe) \ - ((SPFC_GET_WQE_TYPE(wqe) != SPFC_SQE_SESS_RST) && \ - (SPFC_GET_WQE_TYPE(wqe) != SPFC_SQE_NOP)) -#define SPFC_SCQE_HAS_ERRCODE(scqe) \ - (SPFC_GET_SCQE_STATUS(scqe) != SPFC_COMPLETION_STATUS_SUCCESS) -#define SPFC_SCQE_ERR_TO_CM(scqe) \ - (SPFC_GET_SCQE_STATUS(scqe) != FC_ELS_GS_RSP_EXCH_CHECK_FAIL) -#define SPFC_SCQE_EXCH_ABORTED(scqe) \ - ((SPFC_GET_SCQE_STATUS(scqe) >= \ - FC_CQE_BUFFER_CLEAR_IO_COMPLETED) && \ - (SPFC_GET_SCQE_STATUS(scqe) <= FC_CQE_WQE_FLUSH_IO_COMPLETED)) -#define SPFC_SCQE_CONN_ID_VALID(scqe) \ - (SPFC_GET_SCQE_CONN_ID(scqe) != SPFC_SCQE_INVALID_CONN_ID) - -/* - * checksum error bitmap define - */ -#define NIC_RX_CSUM_HW_BYPASS_ERR (1) -#define NIC_RX_CSUM_IP_CSUM_ERR (1 << 1) -#define NIC_RX_CSUM_TCP_CSUM_ERR (1 << 2) -#define NIC_RX_CSUM_UDP_CSUM_ERR (1 << 3) -#define NIC_RX_CSUM_SCTP_CRC_ERR (1 << 4) - -#define SPFC_WQE_SECTION_CHUNK_SIZE 8 /* 8 bytes' chunk */ -#define SPFC_T_RESP_WQE_CTR_TSL_SIZE 15 /* 8 bytes' chunk */ -#define SPFC_T_RD_WR_WQE_CTR_TSL_SIZE 9 /* 8 bytes' chunk */ -#define SPFC_T_RD_WR_WQE_CTR_BDSL_SIZE 4 /* 8 bytes' chunk */ -#define SPFC_T_RD_WR_WQE_CTR_CTRLSL_SIZE 1 /* 8 bytes' chunk */ - -#define SPFC_WQE_MAX_ESGE_NUM 3 /* 3 ESGE In Extended wqe */ -#define SPFC_WQE_SGE_ENTRY_NUM 2 /* BD SGE and DIF SGE count */ -#define SPFC_WQE_SGE_DIF_ENTRY_NUM 1 /* DIF SGE count */ -#define SPFC_WQE_SGE_LAST_FLAG 1 -#define SPFC_WQE_SGE_NOT_LAST_FLAG 0 -#define SPFC_WQE_SGE_EXTEND_FLAG 1 -#define SPFC_WQE_SGE_NOT_EXTEND_FLAG 0 - -#define SPFC_FCP_TMF_PORT_RESET (0) -#define SPFC_FCP_TMF_LUN_RESET (1) -#define SPFC_FCP_TMF_TGT_RESET (2) -#define SPFC_FCP_TMF_RSVD (3) - -#define SPFC_ADJUST_DATA(old_va, new_va) \ - { \ - (old_va) = new_va; \ - } - -#define SPFC_GET_RESET_TYPE(tmf_flag, reset_flag) \ - { \ - switch (tmf_flag) { \ - case UNF_FCP_TM_ABORT_TASK_SET: \ - case UNF_FCP_TM_LOGICAL_UNIT_RESET: \ - (reset_flag) = SPFC_FCP_TMF_LUN_RESET; \ - break; \ - case UNF_FCP_TM_TARGET_RESET: \ - (reset_flag) = SPFC_FCP_TMF_TGT_RESET; \ - break; \ - case UNF_FCP_TM_CLEAR_TASK_SET: \ - (reset_flag) = SPFC_FCP_TMF_PORT_RESET; \ - break; \ - default: \ - (reset_flag) = SPFC_FCP_TMF_RSVD; \ - } \ - } - -/* Link WQE structure */ -struct spfc_linkwqe { - union { - struct { - u32 rsv1 : 14; - u32 wf : 1; - u32 rsv2 : 14; - u32 ctrlsl : 2; - u32 o : 1; - } wd0; - - u32 val_wd0; - }; - - union { - struct { - u32 msn : 16; - u32 dump_msn : 15; - u32 lp : 1; /* lp means whether O bit is overturn */ - } wd1; - - u32 val_wd1; - }; - - u32 next_page_addr_hi; - u32 next_page_addr_lo; -}; - -/* Session Enable */ -struct spfc_host_keys { - struct { - u32 smac1 : 8; - u32 smac0 : 8; - u32 rsv : 16; - } wd0; - - u8 smac[ARRAY_INDEX_4]; - - u8 dmac[ARRAY_INDEX_4]; - struct { - u8 sid_1; - u8 sid_2; - u8 dmac_rvd[ARRAY_INDEX_2]; - } wd3; - struct { - u8 did_0; - u8 did_1; - u8 did_2; - u8 sid_0; - } wd4; - - struct { - u32 port_id : 3; - u32 host_id : 2; - u32 rsvd : 27; - } wd5; - u32 rsvd; -}; - -/* Parent SQ WQE Related function */ -void spfc_build_service_wqe_ctrl_section(struct spfc_wqe_ctrl *wqe_cs, u32 ts_size, - u32 bdsl); -void spfc_build_service_wqe_ts_common(struct spfc_sqe_ts *sqe_ts, u32 rport_index, - u16 local_xid, u16 remote_xid, - u16 data_len); -void spfc_build_els_gs_wqe_sge(struct spfc_sqe *sqe, void *buf_addr, u64 phy_addr, - u32 buf_len, u32 xid, void *handle); -void spfc_build_els_wqe_ts_req(struct spfc_sqe *sqe, void *info, u32 scqn, - void *frame_pld, struct unf_frame_pkg *pkg); -void spfc_build_els_wqe_ts_rsp(struct spfc_sqe *sqe, void *info, - struct unf_frame_pkg *pkg, void *frame_pld, - u16 type, u16 cmnd); -void spfc_build_bls_wqe_ts_req(struct spfc_sqe *sqe, struct unf_frame_pkg *pkg, - void *handle); -void spfc_build_trd_twr_wqe_ctrls(struct unf_frame_pkg *pkg, struct spfc_sqe *sqe); -void spfc_build_wqe_owner_pmsn(struct spfc_sqe *io_sqe, u16 owner, u16 pmsn); -void spfc_convert_parent_wqe_to_big_endian(struct spfc_sqe *sqe); -void spfc_build_icmnd_wqe_ctrls(struct unf_frame_pkg *pkg, struct spfc_sqe *sqe); -void spfc_build_icmnd_wqe_ts(void *handle, struct unf_frame_pkg *pkg, - struct spfc_sqe_ts *sqe_ts, union spfc_sqe_ts_ex *sqe_tsex); -void spfc_build_icmnd_wqe_ts_header(struct unf_frame_pkg *pkg, struct spfc_sqe *sqe, - u8 task_type, u16 exi_base, u8 port_idx); - -void spfc_build_cmdqe_common(union spfc_cmdqe *cmd_qe, enum spfc_task_type task_type, - u16 rxid); -void spfc_build_srq_wqe_ctrls(struct spfc_rqe *rqe, u16 owner, u16 pmsn); -void spfc_build_common_wqe_ctrls(struct spfc_wqe_ctrl *ctrl_sl, u8 task_len); -void spfc_build_tmf_rsp_wqe_ts_header(struct unf_frame_pkg *pkg, - struct spfc_sqe_tmf_rsp *sqe, u16 exi_base, - u32 scqn); - -#endif diff --git a/drivers/scsi/spfc/sphw_api_cmd.c b/drivers/scsi/spfc/sphw_api_cmd.c deleted file mode 120000 index 27c7c0770fa3d910d7df2ec5e366d6a272dfd0de..0000000000000000000000000000000000000000 --- a/drivers/scsi/spfc/sphw_api_cmd.c +++ /dev/null @@ -1 +0,0 @@ -../../net/ethernet/ramaxel/spnic/hw/sphw_api_cmd.c \ No newline at end of file diff --git a/drivers/scsi/spfc/sphw_cmdq.c b/drivers/scsi/spfc/sphw_cmdq.c deleted file mode 120000 index 5ac779ba274bc9d006cf19a8b9b5f47ed253e38b..0000000000000000000000000000000000000000 --- a/drivers/scsi/spfc/sphw_cmdq.c +++ /dev/null @@ -1 +0,0 @@ -../../net/ethernet/ramaxel/spnic/hw/sphw_cmdq.c \ No newline at end of file diff --git a/drivers/scsi/spfc/sphw_common.c b/drivers/scsi/spfc/sphw_common.c deleted file mode 120000 index a1a30a4840e1e0465a8ca430f70e2047b0c7988a..0000000000000000000000000000000000000000 --- a/drivers/scsi/spfc/sphw_common.c +++ /dev/null @@ -1 +0,0 @@ -../../net/ethernet/ramaxel/spnic/hw/sphw_common.c \ No newline at end of file diff --git a/drivers/scsi/spfc/sphw_eqs.c b/drivers/scsi/spfc/sphw_eqs.c deleted file mode 120000 index 74430dcb9dc5a74cb3bd4618627c25c5a331cf9e..0000000000000000000000000000000000000000 --- a/drivers/scsi/spfc/sphw_eqs.c +++ /dev/null @@ -1 +0,0 @@ -../../net/ethernet/ramaxel/spnic/hw/sphw_eqs.c \ No newline at end of file diff --git a/drivers/scsi/spfc/sphw_hw_cfg.c b/drivers/scsi/spfc/sphw_hw_cfg.c deleted file mode 120000 index 4f43d68624c1bd7fdddb4f731f048d7fc271632d..0000000000000000000000000000000000000000 --- a/drivers/scsi/spfc/sphw_hw_cfg.c +++ /dev/null @@ -1 +0,0 @@ -../../net/ethernet/ramaxel/spnic/hw/sphw_hw_cfg.c \ No newline at end of file diff --git a/drivers/scsi/spfc/sphw_hw_comm.c b/drivers/scsi/spfc/sphw_hw_comm.c deleted file mode 120000 index c943b3b2933aa07251a4711d8cb37f91c8a59169..0000000000000000000000000000000000000000 --- a/drivers/scsi/spfc/sphw_hw_comm.c +++ /dev/null @@ -1 +0,0 @@ -../../net/ethernet/ramaxel/spnic/hw/sphw_hw_comm.c \ No newline at end of file diff --git a/drivers/scsi/spfc/sphw_hwdev.c b/drivers/scsi/spfc/sphw_hwdev.c deleted file mode 120000 index b7279f17eaa21edb7344f51b19d850e37344b23c..0000000000000000000000000000000000000000 --- a/drivers/scsi/spfc/sphw_hwdev.c +++ /dev/null @@ -1 +0,0 @@ -../../net/ethernet/ramaxel/spnic/hw/sphw_hwdev.c \ No newline at end of file diff --git a/drivers/scsi/spfc/sphw_hwif.c b/drivers/scsi/spfc/sphw_hwif.c deleted file mode 120000 index d40ef71f903330184222b0ea8d855ff83782a939..0000000000000000000000000000000000000000 --- a/drivers/scsi/spfc/sphw_hwif.c +++ /dev/null @@ -1 +0,0 @@ -../../net/ethernet/ramaxel/spnic/hw/sphw_hwif.c \ No newline at end of file diff --git a/drivers/scsi/spfc/sphw_mbox.c b/drivers/scsi/spfc/sphw_mbox.c deleted file mode 120000 index 1b00fe7289ccaa5cc64769f8fd30311f807b7bd3..0000000000000000000000000000000000000000 --- a/drivers/scsi/spfc/sphw_mbox.c +++ /dev/null @@ -1 +0,0 @@ -../../net/ethernet/ramaxel/spnic/hw/sphw_mbox.c \ No newline at end of file diff --git a/drivers/scsi/spfc/sphw_mgmt.c b/drivers/scsi/spfc/sphw_mgmt.c deleted file mode 120000 index fd18a73e9d3ac72aee2e1d412dcb1b67b3fbf09b..0000000000000000000000000000000000000000 --- a/drivers/scsi/spfc/sphw_mgmt.c +++ /dev/null @@ -1 +0,0 @@ -../../net/ethernet/ramaxel/spnic/hw/sphw_mgmt.c \ No newline at end of file diff --git a/drivers/scsi/spfc/sphw_prof_adap.c b/drivers/scsi/spfc/sphw_prof_adap.c deleted file mode 120000 index fbc7db05dd27d5749edc848b91efab3e7b107407..0000000000000000000000000000000000000000 --- a/drivers/scsi/spfc/sphw_prof_adap.c +++ /dev/null @@ -1 +0,0 @@ -../../net/ethernet/ramaxel/spnic/hw/sphw_prof_adap.c \ No newline at end of file diff --git a/drivers/scsi/spfc/sphw_wq.c b/drivers/scsi/spfc/sphw_wq.c deleted file mode 120000 index cdfcb3a610c0b27ffd3c0c2788e724ef0dc27e91..0000000000000000000000000000000000000000 --- a/drivers/scsi/spfc/sphw_wq.c +++ /dev/null @@ -1 +0,0 @@ -../../net/ethernet/ramaxel/spnic/hw/sphw_wq.c \ No newline at end of file diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c index 4cb4ab9c6137ee7d4c5ba509b37438c76c644aef..464418413ced0ec7b6ecabcbd4f0a8448017a0cb 100644 --- a/drivers/scsi/sr.c +++ b/drivers/scsi/sr.c @@ -917,7 +917,7 @@ static void get_capabilities(struct scsi_cd *cd) /* allocate transfer buffer */ - buffer = kmalloc(512, GFP_KERNEL | GFP_DMA); + buffer = kmalloc(512, GFP_KERNEL); if (!buffer) { sr_printk(KERN_ERR, cd, "out of memory.\n"); return; diff --git a/drivers/scsi/sr_vendor.c b/drivers/scsi/sr_vendor.c index 1f988a1b9166f49f83384e7525b35f37861580a4..a61635326ae0ac07e7a042b18ceb708ebd243de6 100644 --- a/drivers/scsi/sr_vendor.c +++ b/drivers/scsi/sr_vendor.c @@ -131,7 +131,7 @@ int sr_set_blocklength(Scsi_CD *cd, int blocklength) if (cd->vendor == VENDOR_TOSHIBA) density = (blocklength > 2048) ? 0x81 : 0x83; - buffer = kmalloc(512, GFP_KERNEL | GFP_DMA); + buffer = kmalloc(512, GFP_KERNEL); if (!buffer) return -ENOMEM; @@ -179,7 +179,7 @@ int sr_cd_check(struct cdrom_device_info *cdi) if (cd->cdi.mask & CDC_MULTI_SESSION) return 0; - buffer = kmalloc(512, GFP_KERNEL | GFP_DMA); + buffer = kmalloc(512, GFP_KERNEL); if (!buffer) return -ENOMEM; diff --git a/drivers/scsi/ufs/tc-dwc-g210-pci.c b/drivers/scsi/ufs/tc-dwc-g210-pci.c index 67a6a61154b7184771bbe2447f19a84f69ac4b18..4e471484539d24abd57030bf2e8799c4d546f9e9 100644 --- a/drivers/scsi/ufs/tc-dwc-g210-pci.c +++ b/drivers/scsi/ufs/tc-dwc-g210-pci.c @@ -135,7 +135,6 @@ tc_dwc_g210_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) return err; } - pci_set_drvdata(pdev, hba); pm_runtime_put_noidle(&pdev->dev); pm_runtime_allow(&pdev->dev); diff --git a/drivers/scsi/ufs/ufshcd-pci.c b/drivers/scsi/ufs/ufshcd-pci.c index fadd566025b86ab4ffe18aefd366823ea6f1ae98..4bf8ec88676ee54a08e65c3408c66b0a051f951e 100644 --- a/drivers/scsi/ufs/ufshcd-pci.c +++ b/drivers/scsi/ufs/ufshcd-pci.c @@ -347,8 +347,6 @@ ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) return err; } - pci_set_drvdata(pdev, hba); - hba->vops = (struct ufs_hba_variant_ops *)id->driver_data; err = ufshcd_init(hba, mmio_base, pdev->irq); diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c index 8c92d1bde64be902b23cb1fb03049fec56f70dde..0f2430fb398db6b780b70c87e03c66ca20c17174 100644 --- a/drivers/scsi/ufs/ufshcd-pltfrm.c +++ b/drivers/scsi/ufs/ufshcd-pltfrm.c @@ -92,6 +92,11 @@ static int ufshcd_parse_clock_info(struct ufs_hba *hba) clki->min_freq = clkfreq[i]; clki->max_freq = clkfreq[i+1]; clki->name = devm_kstrdup(dev, name, GFP_KERNEL); + if (!clki->name) { + ret = -ENOMEM; + goto out; + } + if (!strcmp(name, "ref_clk")) clki->keep_link_active = true; dev_dbg(dev, "%s: min %u max %u name %s\n", "freq-table-hz", @@ -128,6 +133,8 @@ static int ufshcd_populate_vreg(struct device *dev, const char *name, return -ENOMEM; vreg->name = devm_kstrdup(dev, name, GFP_KERNEL); + if (!vreg->name) + return -ENOMEM; snprintf(prop_name, MAX_PROP_SIZE, "%s-max-microamp", name); if (of_property_read_u32(np, prop_name, &vreg->max_uA)) { @@ -412,8 +419,6 @@ int ufshcd_pltfrm_init(struct platform_device *pdev, goto dealloc_host; } - platform_set_drvdata(pdev, hba); - pm_runtime_set_active(&pdev->dev); pm_runtime_enable(&pdev->dev); diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index fd184ecaec16b7b3fcb21d215bad2ad7902906b7..39aa9db4b1aa2dcb79343116c56e7c5734d249b9 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -9085,6 +9085,13 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) struct device *dev = hba->dev; char eh_wq_name[sizeof("ufs_eh_wq_00")]; + /* + * dev_set_drvdata() must be called before any callbacks are registered + * that use dev_get_drvdata() (frequency scaling, clock scaling, hwmon, + * sysfs). + */ + dev_set_drvdata(dev, hba); + if (!mmio_base) { dev_err(hba->dev, "Invalid memory reference for mmio_base is NULL\n"); diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h index 6795e1f0e8f8c50aec6d0bf26d5b9bc1b64d576b..1d999228efc85104127f6d56b4cd6b1cc8f684e0 100644 --- a/drivers/scsi/ufs/ufshci.h +++ b/drivers/scsi/ufs/ufshci.h @@ -138,7 +138,8 @@ enum { #define INT_FATAL_ERRORS (DEVICE_FATAL_ERROR |\ CONTROLLER_FATAL_ERROR |\ SYSTEM_BUS_FATAL_ERROR |\ - CRYPTO_ENGINE_FATAL_ERROR) + CRYPTO_ENGINE_FATAL_ERROR |\ + UIC_LINK_LOST) /* HCS - Host Controller Status 30h */ #define DEVICE_PRESENT 0x1 diff --git a/drivers/scsi/xen-scsifront.c b/drivers/scsi/xen-scsifront.c index 259fc248d06cffa7278592d79cff2b6207987bda..a25c9386fdf785b3061a1c4a7550507c1010e076 100644 --- a/drivers/scsi/xen-scsifront.c +++ b/drivers/scsi/xen-scsifront.c @@ -233,12 +233,11 @@ static void scsifront_gnttab_done(struct vscsifrnt_info *info, return; for (i = 0; i < shadow->nr_grants; i++) { - if (unlikely(gnttab_query_foreign_access(shadow->gref[i]))) { + if (unlikely(!gnttab_try_end_foreign_access(shadow->gref[i]))) { shost_printk(KERN_ALERT, info->host, KBUILD_MODNAME "grant still in use by backend\n"); BUG(); } - gnttab_end_foreign_access(shadow->gref[i], 0, 0UL); } kfree(shadow->sg); diff --git a/drivers/scsi/zorro7xx.c b/drivers/scsi/zorro7xx.c index 27b9e2baab1a61c2ca62b7caf0f9ced69ee0025c..7acf9193a9e800519f6381b8ef27b201bf34650d 100644 --- a/drivers/scsi/zorro7xx.c +++ b/drivers/scsi/zorro7xx.c @@ -159,6 +159,8 @@ static void zorro7xx_remove_one(struct zorro_dev *z) scsi_remove_host(host); NCR_700_release(host); + if (host->base > 0x01000000) + iounmap(hostdata->base); kfree(hostdata); free_irq(host->irq, host); zorro_release_device(z); diff --git a/drivers/soc/aspeed/aspeed-lpc-ctrl.c b/drivers/soc/aspeed/aspeed-lpc-ctrl.c index 040c7dc1d479242d6843e6eb56fa0901b297c82c..71b555c715d2e68d8f08bafa2daddb870fdbc5e5 100644 --- a/drivers/soc/aspeed/aspeed-lpc-ctrl.c +++ b/drivers/soc/aspeed/aspeed-lpc-ctrl.c @@ -251,10 +251,9 @@ static int aspeed_lpc_ctrl_probe(struct platform_device *pdev) } lpc_ctrl->clk = devm_clk_get(dev, NULL); - if (IS_ERR(lpc_ctrl->clk)) { - dev_err(dev, "couldn't get clock\n"); - return PTR_ERR(lpc_ctrl->clk); - } + if (IS_ERR(lpc_ctrl->clk)) + return dev_err_probe(dev, PTR_ERR(lpc_ctrl->clk), + "couldn't get clock\n"); rc = clk_prepare_enable(lpc_ctrl->clk); if (rc) { dev_err(dev, "couldn't enable clock\n"); diff --git a/drivers/soc/fsl/guts.c b/drivers/soc/fsl/guts.c index 34810f9bb2ee781b3dbf96aa73326b2b5552a57f..091e94c04f3095ceb57a9c48b4c03960d1793850 100644 --- a/drivers/soc/fsl/guts.c +++ b/drivers/soc/fsl/guts.c @@ -28,7 +28,6 @@ struct fsl_soc_die_attr { static struct guts *guts; static struct soc_device_attribute soc_dev_attr; static struct soc_device *soc_dev; -static struct device_node *root; /* SoC die attribute definition for QorIQ platform */ @@ -138,7 +137,7 @@ static u32 fsl_guts_get_svr(void) static int fsl_guts_probe(struct platform_device *pdev) { - struct device_node *np = pdev->dev.of_node; + struct device_node *root, *np = pdev->dev.of_node; struct device *dev = &pdev->dev; struct resource *res; const struct fsl_soc_die_attr *soc_die; @@ -161,8 +160,14 @@ static int fsl_guts_probe(struct platform_device *pdev) root = of_find_node_by_path("/"); if (of_property_read_string(root, "model", &machine)) of_property_read_string_index(root, "compatible", 0, &machine); - if (machine) - soc_dev_attr.machine = machine; + if (machine) { + soc_dev_attr.machine = devm_kstrdup(dev, machine, GFP_KERNEL); + if (!soc_dev_attr.machine) { + of_node_put(root); + return -ENOMEM; + } + } + of_node_put(root); svr = fsl_guts_get_svr(); soc_die = fsl_soc_die_match(svr, fsl_soc_die); @@ -197,7 +202,6 @@ static int fsl_guts_probe(struct platform_device *pdev) static int fsl_guts_remove(struct platform_device *dev) { soc_device_unregister(soc_dev); - of_node_put(root); return 0; } diff --git a/drivers/soc/fsl/qe/qe_io.c b/drivers/soc/fsl/qe/qe_io.c index 11ea08e97db75e0e2d06a5504c661a5ace8bcab0..1bb46d955d52576bf0cfc7cf4ffe72e200df2915 100644 --- a/drivers/soc/fsl/qe/qe_io.c +++ b/drivers/soc/fsl/qe/qe_io.c @@ -35,6 +35,8 @@ int par_io_init(struct device_node *np) if (ret) return ret; par_io = ioremap(res.start, resource_size(&res)); + if (!par_io) + return -ENOMEM; if (!of_property_read_u32(np, "num-ports", &num_ports)) num_par_io_ports = num_ports; diff --git a/drivers/soc/qcom/cpr.c b/drivers/soc/qcom/cpr.c index b24cc77d1889fb75a404cd100975ac97164f7750..6298561bc29c9dfd30739416c73e87585ae5d611 100644 --- a/drivers/soc/qcom/cpr.c +++ b/drivers/soc/qcom/cpr.c @@ -1043,7 +1043,7 @@ static int cpr_interpolate(const struct corner *corner, int step_volt, return corner->uV; temp = f_diff * (uV_high - uV_low); - do_div(temp, f_high - f_low); + temp = div64_ul(temp, f_high - f_low); /* * max_volt_scale has units of uV/MHz while freq values diff --git a/drivers/soc/qcom/ocmem.c b/drivers/soc/qcom/ocmem.c index f1875dc31ae2cb8991e98a3a28e6b52fcd5405a9..85f82e195ef8b7115536ea852a7fafd94cd1a3d2 100644 --- a/drivers/soc/qcom/ocmem.c +++ b/drivers/soc/qcom/ocmem.c @@ -206,6 +206,7 @@ struct ocmem *of_get_ocmem(struct device *dev) ocmem = platform_get_drvdata(pdev); if (!ocmem) { dev_err(dev, "Cannot get ocmem\n"); + put_device(&pdev->dev); return ERR_PTR(-ENODEV); } return ocmem; diff --git a/drivers/soc/qcom/qcom_aoss.c b/drivers/soc/qcom/qcom_aoss.c index 4fe88d4690e2b259de1279de7a3af46e76af533c..941499b117580fb8e6d49874030272a9c68f50f4 100644 --- a/drivers/soc/qcom/qcom_aoss.c +++ b/drivers/soc/qcom/qcom_aoss.c @@ -548,7 +548,7 @@ static int qmp_probe(struct platform_device *pdev) } irq = platform_get_irq(pdev, 0); - ret = devm_request_irq(&pdev->dev, irq, qmp_intr, IRQF_ONESHOT, + ret = devm_request_irq(&pdev->dev, irq, qmp_intr, 0, "aoss-qmp", qmp); if (ret < 0) { dev_err(&pdev->dev, "failed to request interrupt\n"); diff --git a/drivers/soc/qcom/rpmpd.c b/drivers/soc/qcom/rpmpd.c index f2168e4259b231485a91ade30f3515e48d8732ba..c6084c0d35302ca2b94943d07d2bdc41dcb1a58f 100644 --- a/drivers/soc/qcom/rpmpd.c +++ b/drivers/soc/qcom/rpmpd.c @@ -387,6 +387,9 @@ static int rpmpd_probe(struct platform_device *pdev) data->domains = devm_kcalloc(&pdev->dev, num, sizeof(*data->domains), GFP_KERNEL); + if (!data->domains) + return -ENOMEM; + data->num_domains = num; for (i = 0; i < num; i++) { diff --git a/drivers/soc/ti/pruss.c b/drivers/soc/ti/pruss.c index cc0b4ad7a3d34fb06654f0f04c2ef37c12d496a7..30695172a508f610d9571f841aa0461701f905db 100644 --- a/drivers/soc/ti/pruss.c +++ b/drivers/soc/ti/pruss.c @@ -131,7 +131,7 @@ static int pruss_clk_init(struct pruss *pruss, struct device_node *cfg_node) clks_np = of_get_child_by_name(cfg_node, "clocks"); if (!clks_np) { - dev_err(dev, "%pOF is missing its 'clocks' node\n", clks_np); + dev_err(dev, "%pOF is missing its 'clocks' node\n", cfg_node); return -ENODEV; } diff --git a/drivers/soc/ti/wkup_m3_ipc.c b/drivers/soc/ti/wkup_m3_ipc.c index e9ece45d7a3334e7612cc4f61c616873472e312c..ef3f95fefab582d264e8fbbb5fe1d3ec45147cb9 100644 --- a/drivers/soc/ti/wkup_m3_ipc.c +++ b/drivers/soc/ti/wkup_m3_ipc.c @@ -447,9 +447,9 @@ static int wkup_m3_ipc_probe(struct platform_device *pdev) } irq = platform_get_irq(pdev, 0); - if (!irq) { + if (irq < 0) { dev_err(&pdev->dev, "no irq resource\n"); - return -ENXIO; + return irq; } ret = devm_request_irq(dev, irq, wkup_m3_txev_handler, diff --git a/drivers/soundwire/intel.c b/drivers/soundwire/intel.c index dad4326a2a714219577d31eda64f22a28592e7aa..824d9f900aca74e41e4b62a04420e7a0c8c951ec 100644 --- a/drivers/soundwire/intel.c +++ b/drivers/soundwire/intel.c @@ -521,8 +521,8 @@ static void intel_shim_wake(struct sdw_intel *sdw, bool wake_enable) /* Clear wake status */ wake_sts = intel_readw(shim, SDW_SHIM_WAKESTS); - wake_sts |= (SDW_SHIM_WAKEEN_ENABLE << link_id); - intel_writew(shim, SDW_SHIM_WAKESTS_STATUS, wake_sts); + wake_sts |= (SDW_SHIM_WAKESTS_STATUS << link_id); + intel_writew(shim, SDW_SHIM_WAKESTS, wake_sts); } mutex_unlock(sdw->link_res->shim_lock); } diff --git a/drivers/spi/spi-bcm-qspi.c b/drivers/spi/spi-bcm-qspi.c index 3c0ae6dbc43e212b451fc670e88a739b12963eba..766b00350e39103eb5964752bfba9ef783588dec 100644 --- a/drivers/spi/spi-bcm-qspi.c +++ b/drivers/spi/spi-bcm-qspi.c @@ -551,7 +551,7 @@ static void bcm_qspi_chip_select(struct bcm_qspi *qspi, int cs) u32 rd = 0; u32 wr = 0; - if (qspi->base[CHIP_SELECT]) { + if (cs >= 0 && qspi->base[CHIP_SELECT]) { rd = bcm_qspi_read(qspi, CHIP_SELECT, 0); wr = (rd & ~0xff) | (1 << cs); if (rd == wr) @@ -1032,7 +1032,7 @@ static int bcm_qspi_exec_mem_op(struct spi_mem *mem, addr = op->addr.val; len = op->data.nbytes; - if (bcm_qspi_bspi_ver_three(qspi) == true) { + if (has_bspi(qspi) && bcm_qspi_bspi_ver_three(qspi) == true) { /* * The address coming into this function is a raw flash offset. * But for BSPI <= V3, we need to convert it to a remapped BSPI @@ -1051,7 +1051,7 @@ static int bcm_qspi_exec_mem_op(struct spi_mem *mem, len < 4) mspi_read = true; - if (mspi_read) + if (!has_bspi(qspi) || mspi_read) return bcm_qspi_mspi_exec_mem_op(spi, op); ret = bcm_qspi_bspi_set_mode(qspi, op, 0); diff --git a/drivers/spi/spi-hisi-kunpeng.c b/drivers/spi/spi-hisi-kunpeng.c index 3f986ba1c328c5d7a6679198b7ac704ac66d2c20..525cc0143a3050ec7a592aea4ea1eb75097277ad 100644 --- a/drivers/spi/spi-hisi-kunpeng.c +++ b/drivers/spi/spi-hisi-kunpeng.c @@ -9,6 +9,7 @@ #include #include +#include #include #include #include @@ -133,8 +134,52 @@ struct hisi_spi { void *rx; unsigned int rx_len; u8 n_bytes; /* current is a 1/2/4 bytes op */ + + struct dentry *debugfs; + struct debugfs_regset32 regset; +}; + +#define HISI_SPI_DBGFS_REG(_name, _off) \ +{ \ + .name = _name, \ + .offset = _off, \ +} + +static const struct debugfs_reg32 hisi_spi_regs[] = { + HISI_SPI_DBGFS_REG("CSCR", HISI_SPI_CSCR), + HISI_SPI_DBGFS_REG("CR", HISI_SPI_CR), + HISI_SPI_DBGFS_REG("ENR", HISI_SPI_ENR), + HISI_SPI_DBGFS_REG("FIFOC", HISI_SPI_FIFOC), + HISI_SPI_DBGFS_REG("IMR", HISI_SPI_IMR), + HISI_SPI_DBGFS_REG("DIN", HISI_SPI_DIN), + HISI_SPI_DBGFS_REG("DOUT", HISI_SPI_DOUT), + HISI_SPI_DBGFS_REG("SR", HISI_SPI_SR), + HISI_SPI_DBGFS_REG("RISR", HISI_SPI_RISR), + HISI_SPI_DBGFS_REG("ISR", HISI_SPI_ISR), + HISI_SPI_DBGFS_REG("ICR", HISI_SPI_ICR), + HISI_SPI_DBGFS_REG("VERSION", HISI_SPI_VERSION), }; +static int hisi_spi_debugfs_init(struct hisi_spi *hs) +{ + char name[32]; + + struct spi_controller *master; + + master = container_of(hs->dev, struct spi_controller, dev); + snprintf(name, 32, "hisi_spi%d", master->bus_num); + hs->debugfs = debugfs_create_dir(name, NULL); + if (!hs->debugfs) + return -ENOMEM; + + hs->regset.regs = hisi_spi_regs; + hs->regset.nregs = ARRAY_SIZE(hisi_spi_regs); + hs->regset.base = hs->regs; + debugfs_create_regset32("registers", 0400, hs->debugfs, &hs->regset); + + return 0; +} + static u32 hisi_spi_busy(struct hisi_spi *hs) { return readl(hs->regs + HISI_SPI_SR) & SR_BUSY; @@ -468,6 +513,9 @@ static int hisi_spi_probe(struct platform_device *pdev) return ret; } + if (hisi_spi_debugfs_init(hs)) + dev_info(dev, "failed to create debugfs dir\n"); + dev_info(dev, "hw version:0x%x max-freq:%u kHz\n", readl(hs->regs + HISI_SPI_VERSION), master->max_speed_hz / 1000); @@ -478,7 +526,9 @@ static int hisi_spi_probe(struct platform_device *pdev) static int hisi_spi_remove(struct platform_device *pdev) { struct spi_controller *master = platform_get_drvdata(pdev); + struct hisi_spi *hs = spi_controller_get_devdata(master); + debugfs_remove_recursive(hs->debugfs); spi_unregister_controller(master); return 0; diff --git a/drivers/spi/spi-meson-spicc.c b/drivers/spi/spi-meson-spicc.c index c208efeadd1847a4be807ce4dc886851f031dc35..0bc7daa7afc83d0d20424800845bb16f1c2c473f 100644 --- a/drivers/spi/spi-meson-spicc.c +++ b/drivers/spi/spi-meson-spicc.c @@ -693,6 +693,11 @@ static int meson_spicc_probe(struct platform_device *pdev) writel_relaxed(0, spicc->base + SPICC_INTREG); irq = platform_get_irq(pdev, 0); + if (irq < 0) { + ret = irq; + goto out_master; + } + ret = devm_request_irq(&pdev->dev, irq, meson_spicc_irq, 0, NULL, spicc); if (ret) { diff --git a/drivers/spi/spi-meson-spifc.c b/drivers/spi/spi-meson-spifc.c index 8eca6f24cb799b461c6e63f3e190b87f2c600c56..c8ed7815c4ba6d84b18da6adc73111d44ce0f5f2 100644 --- a/drivers/spi/spi-meson-spifc.c +++ b/drivers/spi/spi-meson-spifc.c @@ -349,6 +349,7 @@ static int meson_spifc_probe(struct platform_device *pdev) return 0; out_clk: clk_disable_unprepare(spifc->clk); + pm_runtime_disable(spifc->dev); out_err: spi_master_put(master); return ret; diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c index 83e56ee62649d445ea08b05b99a54059e17264f7..92a09dfb99a8e7a124d8be3a0823af4da977cb5f 100644 --- a/drivers/spi/spi-mt65xx.c +++ b/drivers/spi/spi-mt65xx.c @@ -540,7 +540,7 @@ static irqreturn_t mtk_spi_interrupt(int irq, void *dev_id) else mdata->state = MTK_SPI_IDLE; - if (!master->can_dma(master, master->cur_msg->spi, trans)) { + if (!master->can_dma(master, NULL, trans)) { if (trans->rx_buf) { cnt = mdata->xfer_len / 4; ioread32_rep(mdata->base + SPI_RX_DATA_REG, diff --git a/drivers/spi/spi-mxic.c b/drivers/spi/spi-mxic.c index 96b418293bf2a493dca92f016e7dc342120db141..4fb19e6f94b05256400c0d27b621418d10a63684 100644 --- a/drivers/spi/spi-mxic.c +++ b/drivers/spi/spi-mxic.c @@ -304,25 +304,21 @@ static int mxic_spi_data_xfer(struct mxic_spi *mxic, const void *txbuf, writel(data, mxic->regs + TXD(nbytes % 4)); + ret = readl_poll_timeout(mxic->regs + INT_STS, sts, + sts & INT_TX_EMPTY, 0, USEC_PER_SEC); + if (ret) + return ret; + + ret = readl_poll_timeout(mxic->regs + INT_STS, sts, + sts & INT_RX_NOT_EMPTY, 0, + USEC_PER_SEC); + if (ret) + return ret; + + data = readl(mxic->regs + RXD); if (rxbuf) { - ret = readl_poll_timeout(mxic->regs + INT_STS, sts, - sts & INT_TX_EMPTY, 0, - USEC_PER_SEC); - if (ret) - return ret; - - ret = readl_poll_timeout(mxic->regs + INT_STS, sts, - sts & INT_RX_NOT_EMPTY, 0, - USEC_PER_SEC); - if (ret) - return ret; - - data = readl(mxic->regs + RXD); data >>= (8 * (4 - nbytes)); memcpy(rxbuf + pos, &data, nbytes); - WARN_ON(readl(mxic->regs + INT_STS) & INT_RX_NOT_EMPTY); - } else { - readl(mxic->regs + RXD); } WARN_ON(readl(mxic->regs + INT_STS) & INT_RX_NOT_EMPTY); diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c index e4ee8b084799365eb7620f02d1f8c57379de893c..f7603c209e9d5fd89ce96d1006836e3d1d066813 100644 --- a/drivers/spi/spi-pl022.c +++ b/drivers/spi/spi-pl022.c @@ -2315,13 +2315,13 @@ static int pl022_probe(struct amba_device *adev, const struct amba_id *id) return status; } -static int +static void pl022_remove(struct amba_device *adev) { struct pl022 *pl022 = amba_get_drvdata(adev); if (!pl022) - return 0; + return; /* * undo pm_runtime_put() in probe. I assume that we're not @@ -2336,7 +2336,6 @@ pl022_remove(struct amba_device *adev) clk_disable_unprepare(pl022->clk); amba_release_regions(adev); tasklet_disable(&pl022->pump_transfers); - return 0; } #ifdef CONFIG_PM_SLEEP diff --git a/drivers/spi/spi-pxa2xx-pci.c b/drivers/spi/spi-pxa2xx-pci.c index aafac128bb5f1816b092eff0a020c0b34ec9760f..4eb979a096c78a49a9f3c272653a2766bdeece29 100644 --- a/drivers/spi/spi-pxa2xx-pci.c +++ b/drivers/spi/spi-pxa2xx-pci.c @@ -74,14 +74,23 @@ static bool lpss_dma_filter(struct dma_chan *chan, void *param) return true; } +static void lpss_dma_put_device(void *dma_dev) +{ + pci_dev_put(dma_dev); +} + static int lpss_spi_setup(struct pci_dev *dev, struct pxa_spi_info *c) { struct pci_dev *dma_dev; + int ret; c->num_chipselect = 1; c->max_clk_rate = 50000000; dma_dev = pci_get_slot(dev->bus, PCI_DEVFN(PCI_SLOT(dev->devfn), 0)); + ret = devm_add_action_or_reset(&dev->dev, lpss_dma_put_device, dma_dev); + if (ret) + return ret; if (c->tx_param) { struct dw_dma_slave *slave = c->tx_param; @@ -105,8 +114,9 @@ static int lpss_spi_setup(struct pci_dev *dev, struct pxa_spi_info *c) static int mrfld_spi_setup(struct pci_dev *dev, struct pxa_spi_info *c) { - struct pci_dev *dma_dev = pci_get_slot(dev->bus, PCI_DEVFN(21, 0)); struct dw_dma_slave *tx, *rx; + struct pci_dev *dma_dev; + int ret; switch (PCI_FUNC(dev->devfn)) { case 0: @@ -131,6 +141,11 @@ static int mrfld_spi_setup(struct pci_dev *dev, struct pxa_spi_info *c) return -ENODEV; } + dma_dev = pci_get_slot(dev->bus, PCI_DEVFN(21, 0)); + ret = devm_add_action_or_reset(&dev->dev, lpss_dma_put_device, dma_dev); + if (ret) + return ret; + tx = c->tx_param; tx->dma_dev = &dma_dev->dev; diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c index 624273d0e727fa564a507572cb500e0f579c23de..a9f97023d5a00da66064abdfbc2d3abaaf4db0d7 100644 --- a/drivers/spi/spi-rockchip.c +++ b/drivers/spi/spi-rockchip.c @@ -567,6 +567,12 @@ static int rockchip_spi_slave_abort(struct spi_controller *ctlr) { struct rockchip_spi *rs = spi_controller_get_devdata(ctlr); + if (atomic_read(&rs->state) & RXDMA) + dmaengine_terminate_sync(ctlr->dma_rx); + if (atomic_read(&rs->state) & TXDMA) + dmaengine_terminate_sync(ctlr->dma_tx); + atomic_set(&rs->state, 0); + spi_enable_chip(rs, false); rs->slave_abort = true; complete(&ctlr->xfer_completion); @@ -636,7 +642,7 @@ static int rockchip_spi_probe(struct platform_device *pdev) struct spi_controller *ctlr; struct resource *mem; struct device_node *np = pdev->dev.of_node; - u32 rsd_nsecs; + u32 rsd_nsecs, num_cs; bool slave_mode; slave_mode = of_property_read_bool(np, "spi-slave"); @@ -744,8 +750,9 @@ static int rockchip_spi_probe(struct platform_device *pdev) * rk spi0 has two native cs, spi1..5 one cs only * if num-cs is missing in the dts, default to 1 */ - if (of_property_read_u16(np, "num-cs", &ctlr->num_chipselect)) - ctlr->num_chipselect = 1; + if (of_property_read_u32(np, "num-cs", &num_cs)) + num_cs = 1; + ctlr->num_chipselect = num_cs; ctlr->use_gpio_descriptors = true; } ctlr->dev.of_node = pdev->dev.of_node; diff --git a/drivers/spi/spi-tegra114.c b/drivers/spi/spi-tegra114.c index a2e5907276e7fd6ce045ece59ceb8b0064d143b3..ed42665b12241b803a2dc7e2e6071b01bbda043d 100644 --- a/drivers/spi/spi-tegra114.c +++ b/drivers/spi/spi-tegra114.c @@ -1353,6 +1353,10 @@ static int tegra_spi_probe(struct platform_device *pdev) tspi->phys = r->start; spi_irq = platform_get_irq(pdev, 0); + if (spi_irq < 0) { + ret = spi_irq; + goto exit_free_master; + } tspi->irq = spi_irq; tspi->clk = devm_clk_get(&pdev->dev, "spi"); diff --git a/drivers/spi/spi-tegra20-slink.c b/drivers/spi/spi-tegra20-slink.c index 669fc4286231f508743c50ffce21e86c0cb0b6d5..9e2b812b9025f69a668f157decf4a4a36b281848 100644 --- a/drivers/spi/spi-tegra20-slink.c +++ b/drivers/spi/spi-tegra20-slink.c @@ -1006,14 +1006,8 @@ static int tegra_slink_probe(struct platform_device *pdev) struct resource *r; int ret, spi_irq; const struct tegra_slink_chip_data *cdata = NULL; - const struct of_device_id *match; - match = of_match_device(tegra_slink_of_match, &pdev->dev); - if (!match) { - dev_err(&pdev->dev, "Error: No device match found\n"); - return -ENODEV; - } - cdata = match->data; + cdata = of_device_get_match_data(&pdev->dev); master = spi_alloc_master(&pdev->dev, sizeof(*tspi)); if (!master) { diff --git a/drivers/spi/spi-uniphier.c b/drivers/spi/spi-uniphier.c index 6a9ef8ee3cc9028f73c3bcf0a1f4ab3a8252373e..ad0088e3947237a8c70c37c9de385b67b8e2febb 100644 --- a/drivers/spi/spi-uniphier.c +++ b/drivers/spi/spi-uniphier.c @@ -726,7 +726,7 @@ static int uniphier_spi_probe(struct platform_device *pdev) if (ret) { dev_err(&pdev->dev, "failed to get TX DMA capacities: %d\n", ret); - goto out_disable_clk; + goto out_release_dma; } dma_tx_burst = caps.max_burst; } @@ -735,7 +735,7 @@ static int uniphier_spi_probe(struct platform_device *pdev) if (IS_ERR_OR_NULL(master->dma_rx)) { if (PTR_ERR(master->dma_rx) == -EPROBE_DEFER) { ret = -EPROBE_DEFER; - goto out_disable_clk; + goto out_release_dma; } master->dma_rx = NULL; dma_rx_burst = INT_MAX; @@ -744,7 +744,7 @@ static int uniphier_spi_probe(struct platform_device *pdev) if (ret) { dev_err(&pdev->dev, "failed to get RX DMA capacities: %d\n", ret); - goto out_disable_clk; + goto out_release_dma; } dma_rx_burst = caps.max_burst; } @@ -753,10 +753,20 @@ static int uniphier_spi_probe(struct platform_device *pdev) ret = devm_spi_register_master(&pdev->dev, master); if (ret) - goto out_disable_clk; + goto out_release_dma; return 0; +out_release_dma: + if (!IS_ERR_OR_NULL(master->dma_rx)) { + dma_release_channel(master->dma_rx); + master->dma_rx = NULL; + } + if (!IS_ERR_OR_NULL(master->dma_tx)) { + dma_release_channel(master->dma_tx); + master->dma_tx = NULL; + } + out_disable_clk: clk_disable_unprepare(priv->clk); @@ -767,12 +777,13 @@ static int uniphier_spi_probe(struct platform_device *pdev) static int uniphier_spi_remove(struct platform_device *pdev) { - struct uniphier_spi_priv *priv = platform_get_drvdata(pdev); + struct spi_master *master = platform_get_drvdata(pdev); + struct uniphier_spi_priv *priv = spi_master_get_devdata(master); - if (priv->master->dma_tx) - dma_release_channel(priv->master->dma_tx); - if (priv->master->dma_rx) - dma_release_channel(priv->master->dma_rx); + if (master->dma_tx) + dma_release_channel(master->dma_tx); + if (master->dma_rx) + dma_release_channel(master->dma_rx); clk_disable_unprepare(priv->clk); diff --git a/drivers/spi/spi-zynq-qspi.c b/drivers/spi/spi-zynq-qspi.c index b635835729d66401501b99a31782760ddee54763..13c0b15fe17649490b7075c0d5119e4d012b7413 100644 --- a/drivers/spi/spi-zynq-qspi.c +++ b/drivers/spi/spi-zynq-qspi.c @@ -570,6 +570,9 @@ static int zynq_qspi_exec_mem_op(struct spi_mem *mem, if (op->dummy.nbytes) { tmpbuf = kzalloc(op->dummy.nbytes, GFP_KERNEL); + if (!tmpbuf) + return -ENOMEM; + memset(tmpbuf, 0xff, op->dummy.nbytes); reinit_completion(&xqspi->data_completion); xqspi->txbuf = tmpbuf; diff --git a/drivers/spi/spi-zynqmp-gqspi.c b/drivers/spi/spi-zynqmp-gqspi.c index 1dd2af9cc237441effef5a5ca9d55f54e454e24a..3d3ac48243ebd6ae8813c527210ee87d98246afe 100644 --- a/drivers/spi/spi-zynqmp-gqspi.c +++ b/drivers/spi/spi-zynqmp-gqspi.c @@ -1165,7 +1165,10 @@ static int zynqmp_qspi_probe(struct platform_device *pdev) goto clk_dis_all; } - dma_set_mask(&pdev->dev, DMA_BIT_MASK(44)); + ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(44)); + if (ret) + goto clk_dis_all; + ctlr->bits_per_word_mask = SPI_BPW_MASK(8); ctlr->num_chipselect = GQSPI_DEFAULT_NUM_CS; ctlr->mem_ops = &zynqmp_qspi_mem_ops; diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 8c261eac2cee5b64148e9fd3f319f003f5b8ba97..6ea7b286c80c250fc1649b710f0e5056be1617bf 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -881,10 +881,10 @@ int spi_map_buf(struct spi_controller *ctlr, struct device *dev, int i, ret; if (vmalloced_buf || kmap_buf) { - desc_len = min_t(int, max_seg_size, PAGE_SIZE); + desc_len = min_t(unsigned long, max_seg_size, PAGE_SIZE); sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len); } else if (virt_addr_valid(buf)) { - desc_len = min_t(int, max_seg_size, ctlr->max_dma_len); + desc_len = min_t(size_t, max_seg_size, ctlr->max_dma_len); sgs = DIV_ROUND_UP(len, desc_len); } else { return -EINVAL; diff --git a/drivers/staging/fbtft/fb_st7789v.c b/drivers/staging/fbtft/fb_st7789v.c index 3a280cc1892ca027062006f5e4b6450123f29543..0a2dbed9ffc74f3399fa0fc6e1de2afb648d3de8 100644 --- a/drivers/staging/fbtft/fb_st7789v.c +++ b/drivers/staging/fbtft/fb_st7789v.c @@ -82,6 +82,8 @@ enum st7789v_command { */ static int init_display(struct fbtft_par *par) { + par->fbtftops.reset(par); + /* turn off sleep mode */ write_reg(par, MIPI_DCS_EXIT_SLEEP_MODE); mdelay(120); diff --git a/drivers/staging/fbtft/fbtft.h b/drivers/staging/fbtft/fbtft.h index 76f8c090a83702b56b2399194898d002b1151af1..06afaa9d505bac250b5f3b7a215307dfbaf05977 100644 --- a/drivers/staging/fbtft/fbtft.h +++ b/drivers/staging/fbtft/fbtft.h @@ -332,7 +332,10 @@ static int __init fbtft_driver_module_init(void) \ ret = spi_register_driver(&fbtft_driver_spi_driver); \ if (ret < 0) \ return ret; \ - return platform_driver_register(&fbtft_driver_platform_driver); \ + ret = platform_driver_register(&fbtft_driver_platform_driver); \ + if (ret < 0) \ + spi_unregister_driver(&fbtft_driver_spi_driver); \ + return ret; \ } \ \ static void __exit fbtft_driver_module_exit(void) \ diff --git a/drivers/staging/gdm724x/gdm_lte.c b/drivers/staging/gdm724x/gdm_lte.c index bd5f874334043217014a2c59c5167d82c838be80..de30262c3fae023e2e34d58f21aa6444370fa706 100644 --- a/drivers/staging/gdm724x/gdm_lte.c +++ b/drivers/staging/gdm724x/gdm_lte.c @@ -76,14 +76,15 @@ static void tx_complete(void *arg) static int gdm_lte_rx(struct sk_buff *skb, struct nic *nic, int nic_type) { - int ret; + int ret, len; + len = skb->len + ETH_HLEN; ret = netif_rx_ni(skb); if (ret == NET_RX_DROP) { nic->stats.rx_dropped++; } else { nic->stats.rx_packets++; - nic->stats.rx_bytes += skb->len + ETH_HLEN; + nic->stats.rx_bytes += len; } return 0; diff --git a/drivers/staging/greybus/audio_topology.c b/drivers/staging/greybus/audio_topology.c index 662e3e8b4b634d038b4ecc83e7a9829d3dc1d1ee..e1579f356af5c1f49cadc9f2a5790ed9f49de1d7 100644 --- a/drivers/staging/greybus/audio_topology.c +++ b/drivers/staging/greybus/audio_topology.c @@ -147,6 +147,9 @@ static const char **gb_generate_enum_strings(struct gbaudio_module_info *gb, items = le32_to_cpu(gbenum->items); strings = devm_kcalloc(gb->dev, items, sizeof(char *), GFP_KERNEL); + if (!strings) + return NULL; + data = gbenum->names; for (i = 0; i < items; i++) { @@ -655,6 +658,8 @@ static int gbaudio_tplg_create_enum_kctl(struct gbaudio_module_info *gb, /* since count=1, and reg is dummy */ gbe->items = le32_to_cpu(gb_enum->items); gbe->texts = gb_generate_enum_strings(gb, gb_enum); + if (!gbe->texts) + return -ENOMEM; /* debug enum info */ dev_dbg(gb->dev, "Max:%d, name_length:%d\n", gbe->items, @@ -862,6 +867,8 @@ static int gbaudio_tplg_create_enum_ctl(struct gbaudio_module_info *gb, /* since count=1, and reg is dummy */ gbe->items = le32_to_cpu(gb_enum->items); gbe->texts = gb_generate_enum_strings(gb, gb_enum); + if (!gbe->texts) + return -ENOMEM; /* debug enum info */ dev_dbg(gb->dev, "Max:%d, name_length:%d\n", gbe->items, @@ -974,6 +981,44 @@ static int gbaudio_widget_event(struct snd_soc_dapm_widget *w, return ret; } +static const struct snd_soc_dapm_widget gbaudio_widgets[] = { + [snd_soc_dapm_spk] = SND_SOC_DAPM_SPK(NULL, gbcodec_event_spk), + [snd_soc_dapm_hp] = SND_SOC_DAPM_HP(NULL, gbcodec_event_hp), + [snd_soc_dapm_mic] = SND_SOC_DAPM_MIC(NULL, gbcodec_event_int_mic), + [snd_soc_dapm_output] = SND_SOC_DAPM_OUTPUT(NULL), + [snd_soc_dapm_input] = SND_SOC_DAPM_INPUT(NULL), + [snd_soc_dapm_switch] = SND_SOC_DAPM_SWITCH_E(NULL, SND_SOC_NOPM, + 0, 0, NULL, + gbaudio_widget_event, + SND_SOC_DAPM_PRE_PMU | + SND_SOC_DAPM_POST_PMD), + [snd_soc_dapm_pga] = SND_SOC_DAPM_PGA_E(NULL, SND_SOC_NOPM, + 0, 0, NULL, 0, + gbaudio_widget_event, + SND_SOC_DAPM_PRE_PMU | + SND_SOC_DAPM_POST_PMD), + [snd_soc_dapm_mixer] = SND_SOC_DAPM_MIXER_E(NULL, SND_SOC_NOPM, + 0, 0, NULL, 0, + gbaudio_widget_event, + SND_SOC_DAPM_PRE_PMU | + SND_SOC_DAPM_POST_PMD), + [snd_soc_dapm_mux] = SND_SOC_DAPM_MUX_E(NULL, SND_SOC_NOPM, + 0, 0, NULL, + gbaudio_widget_event, + SND_SOC_DAPM_PRE_PMU | + SND_SOC_DAPM_POST_PMD), + [snd_soc_dapm_aif_in] = SND_SOC_DAPM_AIF_IN_E(NULL, NULL, 0, + SND_SOC_NOPM, 0, 0, + gbaudio_widget_event, + SND_SOC_DAPM_PRE_PMU | + SND_SOC_DAPM_POST_PMD), + [snd_soc_dapm_aif_out] = SND_SOC_DAPM_AIF_OUT_E(NULL, NULL, 0, + SND_SOC_NOPM, 0, 0, + gbaudio_widget_event, + SND_SOC_DAPM_PRE_PMU | + SND_SOC_DAPM_POST_PMD), +}; + static int gbaudio_tplg_create_widget(struct gbaudio_module_info *module, struct snd_soc_dapm_widget *dw, struct gb_audio_widget *w, int *w_size) @@ -1034,6 +1079,10 @@ static int gbaudio_tplg_create_widget(struct gbaudio_module_info *module, csize += le16_to_cpu(gbenum->names_length); control->texts = (const char * const *) gb_generate_enum_strings(module, gbenum); + if (!control->texts) { + ret = -ENOMEM; + goto error; + } control->items = le32_to_cpu(gbenum->items); } else { csize = sizeof(struct gb_audio_control); @@ -1052,77 +1101,37 @@ static int gbaudio_tplg_create_widget(struct gbaudio_module_info *module, switch (w->type) { case snd_soc_dapm_spk: - *dw = (struct snd_soc_dapm_widget) - SND_SOC_DAPM_SPK(w->name, gbcodec_event_spk); + *dw = gbaudio_widgets[w->type]; module->op_devices |= GBAUDIO_DEVICE_OUT_SPEAKER; break; case snd_soc_dapm_hp: - *dw = (struct snd_soc_dapm_widget) - SND_SOC_DAPM_HP(w->name, gbcodec_event_hp); + *dw = gbaudio_widgets[w->type]; module->op_devices |= (GBAUDIO_DEVICE_OUT_WIRED_HEADSET | GBAUDIO_DEVICE_OUT_WIRED_HEADPHONE); module->ip_devices |= GBAUDIO_DEVICE_IN_WIRED_HEADSET; break; case snd_soc_dapm_mic: - *dw = (struct snd_soc_dapm_widget) - SND_SOC_DAPM_MIC(w->name, gbcodec_event_int_mic); + *dw = gbaudio_widgets[w->type]; module->ip_devices |= GBAUDIO_DEVICE_IN_BUILTIN_MIC; break; case snd_soc_dapm_output: - *dw = (struct snd_soc_dapm_widget)SND_SOC_DAPM_OUTPUT(w->name); - break; case snd_soc_dapm_input: - *dw = (struct snd_soc_dapm_widget)SND_SOC_DAPM_INPUT(w->name); - break; case snd_soc_dapm_switch: - *dw = (struct snd_soc_dapm_widget) - SND_SOC_DAPM_SWITCH_E(w->name, SND_SOC_NOPM, 0, 0, - widget_kctls, - gbaudio_widget_event, - SND_SOC_DAPM_PRE_PMU | - SND_SOC_DAPM_POST_PMD); - break; case snd_soc_dapm_pga: - *dw = (struct snd_soc_dapm_widget) - SND_SOC_DAPM_PGA_E(w->name, SND_SOC_NOPM, 0, 0, NULL, 0, - gbaudio_widget_event, - SND_SOC_DAPM_PRE_PMU | - SND_SOC_DAPM_POST_PMD); - break; case snd_soc_dapm_mixer: - *dw = (struct snd_soc_dapm_widget) - SND_SOC_DAPM_MIXER_E(w->name, SND_SOC_NOPM, 0, 0, NULL, - 0, gbaudio_widget_event, - SND_SOC_DAPM_PRE_PMU | - SND_SOC_DAPM_POST_PMD); - break; case snd_soc_dapm_mux: - *dw = (struct snd_soc_dapm_widget) - SND_SOC_DAPM_MUX_E(w->name, SND_SOC_NOPM, 0, 0, - widget_kctls, gbaudio_widget_event, - SND_SOC_DAPM_PRE_PMU | - SND_SOC_DAPM_POST_PMD); + *dw = gbaudio_widgets[w->type]; break; case snd_soc_dapm_aif_in: - *dw = (struct snd_soc_dapm_widget) - SND_SOC_DAPM_AIF_IN_E(w->name, w->sname, 0, - SND_SOC_NOPM, - 0, 0, gbaudio_widget_event, - SND_SOC_DAPM_PRE_PMU | - SND_SOC_DAPM_POST_PMD); - break; case snd_soc_dapm_aif_out: - *dw = (struct snd_soc_dapm_widget) - SND_SOC_DAPM_AIF_OUT_E(w->name, w->sname, 0, - SND_SOC_NOPM, - 0, 0, gbaudio_widget_event, - SND_SOC_DAPM_PRE_PMU | - SND_SOC_DAPM_POST_PMD); + *dw = gbaudio_widgets[w->type]; + dw->sname = w->sname; break; default: ret = -EINVAL; goto error; } + dw->name = w->name; dev_dbg(module->dev, "%s: widget of type %d created\n", dw->name, dw->id); @@ -1183,6 +1192,10 @@ static int gbaudio_tplg_process_kcontrols(struct gbaudio_module_info *module, csize += le16_to_cpu(gbenum->names_length); control->texts = (const char * const *) gb_generate_enum_strings(module, gbenum); + if (!control->texts) { + ret = -ENOMEM; + goto error; + } control->items = le32_to_cpu(gbenum->items); } else { csize = sizeof(struct gb_audio_control); diff --git a/drivers/staging/iio/adc/ad7280a.c b/drivers/staging/iio/adc/ad7280a.c index fef0055b89909fc0454870e602cecad237554873..20183b2ea127962f347a9af6a7a4ea1a37eb5926 100644 --- a/drivers/staging/iio/adc/ad7280a.c +++ b/drivers/staging/iio/adc/ad7280a.c @@ -107,9 +107,9 @@ static unsigned int ad7280a_devaddr(unsigned int addr) { return ((addr & 0x1) << 4) | - ((addr & 0x2) << 3) | + ((addr & 0x2) << 2) | (addr & 0x4) | - ((addr & 0x8) >> 3) | + ((addr & 0x8) >> 2) | ((addr & 0x10) >> 4); } diff --git a/drivers/staging/media/atomisp/i2c/ov2680.h b/drivers/staging/media/atomisp/i2c/ov2680.h index 49920245e0647ba231fafc9206720c6441c8fede..cafb798a71abeb76d784bb93c9d09a4f0c0cca08 100644 --- a/drivers/staging/media/atomisp/i2c/ov2680.h +++ b/drivers/staging/media/atomisp/i2c/ov2680.h @@ -289,8 +289,6 @@ static struct ov2680_reg const ov2680_global_setting[] = { */ static struct ov2680_reg const ov2680_QCIF_30fps[] = { {0x3086, 0x01}, - {0x3501, 0x24}, - {0x3502, 0x40}, {0x370a, 0x23}, {0x3801, 0xa0}, {0x3802, 0x00}, @@ -334,8 +332,6 @@ static struct ov2680_reg const ov2680_QCIF_30fps[] = { */ static struct ov2680_reg const ov2680_CIF_30fps[] = { {0x3086, 0x01}, - {0x3501, 0x24}, - {0x3502, 0x40}, {0x370a, 0x23}, {0x3801, 0xa0}, {0x3802, 0x00}, @@ -377,8 +373,6 @@ static struct ov2680_reg const ov2680_CIF_30fps[] = { */ static struct ov2680_reg const ov2680_QVGA_30fps[] = { {0x3086, 0x01}, - {0x3501, 0x24}, - {0x3502, 0x40}, {0x370a, 0x23}, {0x3801, 0xa0}, {0x3802, 0x00}, @@ -420,8 +414,6 @@ static struct ov2680_reg const ov2680_QVGA_30fps[] = { */ static struct ov2680_reg const ov2680_656x496_30fps[] = { {0x3086, 0x01}, - {0x3501, 0x24}, - {0x3502, 0x40}, {0x370a, 0x23}, {0x3801, 0xa0}, {0x3802, 0x00}, @@ -463,8 +455,6 @@ static struct ov2680_reg const ov2680_656x496_30fps[] = { */ static struct ov2680_reg const ov2680_720x592_30fps[] = { {0x3086, 0x01}, - {0x3501, 0x26}, - {0x3502, 0x40}, {0x370a, 0x23}, {0x3801, 0x00}, // X_ADDR_START; {0x3802, 0x00}, @@ -508,8 +498,6 @@ static struct ov2680_reg const ov2680_720x592_30fps[] = { */ static struct ov2680_reg const ov2680_800x600_30fps[] = { {0x3086, 0x01}, - {0x3501, 0x26}, - {0x3502, 0x40}, {0x370a, 0x23}, {0x3801, 0x00}, {0x3802, 0x00}, @@ -551,8 +539,6 @@ static struct ov2680_reg const ov2680_800x600_30fps[] = { */ static struct ov2680_reg const ov2680_720p_30fps[] = { {0x3086, 0x00}, - {0x3501, 0x48}, - {0x3502, 0xe0}, {0x370a, 0x21}, {0x3801, 0xa0}, {0x3802, 0x00}, @@ -594,8 +580,6 @@ static struct ov2680_reg const ov2680_720p_30fps[] = { */ static struct ov2680_reg const ov2680_1296x976_30fps[] = { {0x3086, 0x00}, - {0x3501, 0x48}, - {0x3502, 0xe0}, {0x370a, 0x21}, {0x3801, 0xa0}, {0x3802, 0x00}, @@ -637,8 +621,6 @@ static struct ov2680_reg const ov2680_1296x976_30fps[] = { */ static struct ov2680_reg const ov2680_1456x1096_30fps[] = { {0x3086, 0x00}, - {0x3501, 0x48}, - {0x3502, 0xe0}, {0x370a, 0x21}, {0x3801, 0x90}, {0x3802, 0x00}, @@ -682,8 +664,6 @@ static struct ov2680_reg const ov2680_1456x1096_30fps[] = { static struct ov2680_reg const ov2680_1616x916_30fps[] = { {0x3086, 0x00}, - {0x3501, 0x48}, - {0x3502, 0xe0}, {0x370a, 0x21}, {0x3801, 0x00}, {0x3802, 0x00}, @@ -726,8 +706,6 @@ static struct ov2680_reg const ov2680_1616x916_30fps[] = { #if 0 static struct ov2680_reg const ov2680_1616x1082_30fps[] = { {0x3086, 0x00}, - {0x3501, 0x48}, - {0x3502, 0xe0}, {0x370a, 0x21}, {0x3801, 0x00}, {0x3802, 0x00}, @@ -769,8 +747,6 @@ static struct ov2680_reg const ov2680_1616x1082_30fps[] = { */ static struct ov2680_reg const ov2680_1616x1216_30fps[] = { {0x3086, 0x00}, - {0x3501, 0x48}, - {0x3502, 0xe0}, {0x370a, 0x21}, {0x3801, 0x00}, {0x3802, 0x00}, diff --git a/drivers/staging/media/atomisp/pci/atomisp_acc.c b/drivers/staging/media/atomisp/pci/atomisp_acc.c index f638d0bd09fe6f4d3c956e17f79c050ac56de31d..b1614cce2dfb05dda05aac6d3d860caaceac1c2e 100644 --- a/drivers/staging/media/atomisp/pci/atomisp_acc.c +++ b/drivers/staging/media/atomisp/pci/atomisp_acc.c @@ -439,6 +439,18 @@ int atomisp_acc_s_mapped_arg(struct atomisp_sub_device *asd, return 0; } +static void atomisp_acc_unload_some_extensions(struct atomisp_sub_device *asd, + int i, + struct atomisp_acc_fw *acc_fw) +{ + while (--i >= 0) { + if (acc_fw->flags & acc_flag_to_pipe[i].flag) { + atomisp_css_unload_acc_extension(asd, acc_fw->fw, + acc_flag_to_pipe[i].pipe_id); + } + } +} + /* * Appends the loaded acceleration binary extensions to the * current ISP mode. Must be called just before sh_css_start(). @@ -477,16 +489,20 @@ int atomisp_acc_load_extensions(struct atomisp_sub_device *asd) acc_fw->fw, acc_flag_to_pipe[i].pipe_id, acc_fw->type); - if (ret) + if (ret) { + atomisp_acc_unload_some_extensions(asd, i, acc_fw); goto error; + } ext_loaded = true; } } ret = atomisp_css_set_acc_parameters(acc_fw); - if (ret < 0) + if (ret < 0) { + atomisp_acc_unload_some_extensions(asd, i, acc_fw); goto error; + } } if (!ext_loaded) @@ -495,6 +511,7 @@ int atomisp_acc_load_extensions(struct atomisp_sub_device *asd) ret = atomisp_css_update_stream(asd); if (ret) { dev_err(isp->dev, "%s: update stream failed.\n", __func__); + atomisp_acc_unload_extensions(asd); goto error; } @@ -502,13 +519,6 @@ int atomisp_acc_load_extensions(struct atomisp_sub_device *asd) return 0; error: - while (--i >= 0) { - if (acc_fw->flags & acc_flag_to_pipe[i].flag) { - atomisp_css_unload_acc_extension(asd, acc_fw->fw, - acc_flag_to_pipe[i].pipe_id); - } - } - list_for_each_entry_continue_reverse(acc_fw, &asd->acc.fw, list) { if (acc_fw->type != ATOMISP_ACC_FW_LOAD_TYPE_OUTPUT && acc_fw->type != ATOMISP_ACC_FW_LOAD_TYPE_VIEWFINDER) diff --git a/drivers/staging/media/atomisp/pci/atomisp_cmd.c b/drivers/staging/media/atomisp/pci/atomisp_cmd.c index 592ea990d4ca4014b72a7666d5be077e7b888280..90d50a693ce57243784d3c9f1354473d3978945e 100644 --- a/drivers/staging/media/atomisp/pci/atomisp_cmd.c +++ b/drivers/staging/media/atomisp/pci/atomisp_cmd.c @@ -1138,9 +1138,10 @@ void atomisp_buf_done(struct atomisp_sub_device *asd, int error, asd->frame_status[vb->i] = ATOMISP_FRAME_STATUS_OK; } - } else + } else { asd->frame_status[vb->i] = ATOMISP_FRAME_STATUS_OK; + } } else { asd->frame_status[vb->i] = ATOMISP_FRAME_STATUS_OK; } @@ -1714,6 +1715,12 @@ void atomisp_wdt_refresh_pipe(struct atomisp_video_pipe *pipe, { unsigned long next; + if (!pipe->asd) { + dev_err(pipe->isp->dev, "%s(): asd is NULL, device is %s\n", + __func__, pipe->vdev.name); + return; + } + if (delay != ATOMISP_WDT_KEEP_CURRENT_DELAY) pipe->wdt_duration = delay; @@ -1776,6 +1783,12 @@ void atomisp_wdt_refresh(struct atomisp_sub_device *asd, unsigned int delay) /* ISP2401 */ void atomisp_wdt_stop_pipe(struct atomisp_video_pipe *pipe, bool sync) { + if (!pipe->asd) { + dev_err(pipe->isp->dev, "%s(): asd is NULL, device is %s\n", + __func__, pipe->vdev.name); + return; + } + if (!atomisp_is_wdt_running(pipe)) return; @@ -4108,6 +4121,12 @@ void atomisp_handle_parameter_and_buffer(struct atomisp_video_pipe *pipe) unsigned long irqflags; bool need_to_enqueue_buffer = false; + if (!asd) { + dev_err(pipe->isp->dev, "%s(): asd is NULL, device is %s\n", + __func__, pipe->vdev.name); + return; + } + if (atomisp_is_vf_pipe(pipe)) return; @@ -4195,6 +4214,12 @@ int atomisp_set_parameters(struct video_device *vdev, struct atomisp_css_params *css_param = &asd->params.css_param; int ret; + if (!asd) { + dev_err(pipe->isp->dev, "%s(): asd is NULL, device is %s\n", + __func__, vdev->name); + return -EINVAL; + } + if (!asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream) { dev_err(asd->isp->dev, "%s: internal error!\n", __func__); return -EINVAL; @@ -4855,6 +4880,12 @@ int atomisp_try_fmt(struct video_device *vdev, struct v4l2_format *f, int source_pad = atomisp_subdev_source_pad(vdev); int ret; + if (!asd) { + dev_err(isp->dev, "%s(): asd is NULL, device is %s\n", + __func__, vdev->name); + return -EINVAL; + } + if (!isp->inputs[asd->input_curr].camera) return -EINVAL; @@ -4945,9 +4976,9 @@ atomisp_try_fmt_file(struct atomisp_device *isp, struct v4l2_format *f) depth = get_pixel_depth(pixelformat); - if (field == V4L2_FIELD_ANY) + if (field == V4L2_FIELD_ANY) { field = V4L2_FIELD_NONE; - else if (field != V4L2_FIELD_NONE) { + } else if (field != V4L2_FIELD_NONE) { dev_err(isp->dev, "Wrong output field\n"); return -EINVAL; } @@ -5201,6 +5232,12 @@ static int atomisp_set_fmt_to_isp(struct video_device *vdev, const struct atomisp_in_fmt_conv *fc; int ret, i; + if (!asd) { + dev_err(isp->dev, "%s(): asd is NULL, device is %s\n", + __func__, vdev->name); + return -EINVAL; + } + v4l2_fh_init(&fh.vfh, vdev); isp_sink_crop = atomisp_subdev_get_rect( @@ -5512,6 +5549,7 @@ static int atomisp_set_fmt_to_snr(struct video_device *vdev, unsigned int dvs_env_w, unsigned int dvs_env_h) { struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd; + struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev); const struct atomisp_format_bridge *format; struct v4l2_subdev_pad_config pad_cfg; struct v4l2_subdev_format vformat = { @@ -5527,6 +5565,12 @@ static int atomisp_set_fmt_to_snr(struct video_device *vdev, struct v4l2_subdev_fh fh; int ret; + if (!asd) { + dev_err(pipe->isp->dev, "%s(): asd is NULL, device is %s\n", + __func__, vdev->name); + return -EINVAL; + } + v4l2_fh_init(&fh.vfh, vdev); stream_index = atomisp_source_pad_to_stream_id(asd, source_pad); @@ -5617,6 +5661,12 @@ int atomisp_set_fmt(struct video_device *vdev, struct v4l2_format *f) struct v4l2_subdev_fh fh; int ret; + if (!asd) { + dev_err(isp->dev, "%s(): asd is NULL, device is %s\n", + __func__, vdev->name); + return -EINVAL; + } + if (source_pad >= ATOMISP_SUBDEV_PADS_NUM) return -EINVAL; @@ -6050,6 +6100,12 @@ int atomisp_set_fmt_file(struct video_device *vdev, struct v4l2_format *f) struct v4l2_subdev_fh fh; int ret; + if (!asd) { + dev_err(isp->dev, "%s(): asd is NULL, device is %s\n", + __func__, vdev->name); + return -EINVAL; + } + v4l2_fh_init(&fh.vfh, vdev); dev_dbg(isp->dev, "setting fmt %ux%u 0x%x for file inject\n", @@ -6374,6 +6430,12 @@ bool atomisp_is_vf_pipe(struct atomisp_video_pipe *pipe) { struct atomisp_sub_device *asd = pipe->asd; + if (!asd) { + dev_err(pipe->isp->dev, "%s(): asd is NULL, device is %s\n", + __func__, pipe->vdev.name); + return false; + } + if (pipe == &asd->video_out_vf) return true; @@ -6587,17 +6649,23 @@ static int atomisp_get_pipe_id(struct atomisp_video_pipe *pipe) { struct atomisp_sub_device *asd = pipe->asd; - if (ATOMISP_USE_YUVPP(asd)) + if (!asd) { + dev_err(pipe->isp->dev, "%s(): asd is NULL, device is %s\n", + __func__, pipe->vdev.name); + return -EINVAL; + } + + if (ATOMISP_USE_YUVPP(asd)) { return IA_CSS_PIPE_ID_YUVPP; - else if (asd->vfpp->val == ATOMISP_VFPP_DISABLE_SCALER) + } else if (asd->vfpp->val == ATOMISP_VFPP_DISABLE_SCALER) { return IA_CSS_PIPE_ID_VIDEO; - else if (asd->vfpp->val == ATOMISP_VFPP_DISABLE_LOWLAT) + } else if (asd->vfpp->val == ATOMISP_VFPP_DISABLE_LOWLAT) { return IA_CSS_PIPE_ID_CAPTURE; - else if (pipe == &asd->video_out_video_capture) + } else if (pipe == &asd->video_out_video_capture) { return IA_CSS_PIPE_ID_VIDEO; - else if (pipe == &asd->video_out_vf) + } else if (pipe == &asd->video_out_vf) { return IA_CSS_PIPE_ID_CAPTURE; - else if (pipe == &asd->video_out_preview) { + } else if (pipe == &asd->video_out_preview) { if (asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO) return IA_CSS_PIPE_ID_VIDEO; else @@ -6624,6 +6692,12 @@ int atomisp_get_invalid_frame_num(struct video_device *vdev, struct ia_css_pipe_info p_info; int ret; + if (!asd) { + dev_err(pipe->isp->dev, "%s(): asd is NULL, device is %s\n", + __func__, vdev->name); + return -EINVAL; + } + if (asd->isp->inputs[asd->input_curr].camera_caps-> sensor[asd->sensor_curr].stream_num > 1) { /* External ISP */ diff --git a/drivers/staging/media/atomisp/pci/atomisp_fops.c b/drivers/staging/media/atomisp/pci/atomisp_fops.c index f1e6b25978534143b450da34c817f5c6bb866bac..b751df31cc24cd4181f0750ccbe97155779c5ef7 100644 --- a/drivers/staging/media/atomisp/pci/atomisp_fops.c +++ b/drivers/staging/media/atomisp/pci/atomisp_fops.c @@ -877,6 +877,11 @@ static int atomisp_open(struct file *file) else pipe->users++; rt_mutex_unlock(&isp->mutex); + + /* Ensure that a mode is set */ + if (asd) + v4l2_ctrl_s_ctrl(asd->run_mode, pipe->default_run_mode); + return 0; css_error: @@ -1171,6 +1176,12 @@ static int atomisp_mmap(struct file *file, struct vm_area_struct *vma) u32 origin_size, new_size; int ret; + if (!asd) { + dev_err(isp->dev, "%s(): asd is NULL, device is %s\n", + __func__, vdev->name); + return -EINVAL; + } + if (!(vma->vm_flags & (VM_WRITE | VM_READ))) return -EACCES; diff --git a/drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c b/drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c index 135994d44802c70b3b6415cf38cd787261888f5e..c9ee85037644fdd893ca94f3889d16515ef6967a 100644 --- a/drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c +++ b/drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c @@ -481,7 +481,7 @@ static int atomisp_get_acpi_power(struct device *dev) static u8 gmin_get_pmic_id_and_addr(struct device *dev) { - struct i2c_client *power; + struct i2c_client *power = NULL; static u8 pmic_i2c_addr; if (pmic_id) @@ -729,6 +729,21 @@ static int axp_regulator_set(struct device *dev, struct gmin_subdev *gs, return 0; } +/* + * Some boards contain a hw-bug where turning eldo2 back on after having turned + * it off causes the CPLM3218 ambient-light-sensor on the image-sensor's I2C bus + * to crash, hanging the bus. Do not turn eldo2 off on these systems. + */ +static const struct dmi_system_id axp_leave_eldo2_on_ids[] = { + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TrekStor"), + DMI_MATCH(DMI_PRODUCT_NAME, "SurfTab duo W1 10.1 (VT4)"), + }, + }, + { } +}; + static int axp_v1p8_on(struct device *dev, struct gmin_subdev *gs) { int ret; @@ -763,6 +778,9 @@ static int axp_v1p8_off(struct device *dev, struct gmin_subdev *gs) if (ret) return ret; + if (dmi_check_system(axp_leave_eldo2_on_ids)) + return 0; + ret = axp_regulator_set(dev, gs, gs->eldo2_sel_reg, gs->eldo2_1p8v, ELDO_CTRL_REG, gs->eldo2_ctrl_shift, false); return ret; diff --git a/drivers/staging/media/atomisp/pci/atomisp_ioctl.c b/drivers/staging/media/atomisp/pci/atomisp_ioctl.c index 9da82855552dec0c65b80c1aeb35e12940b5c4a9..8a0648fd7c8135bbb371dc7f1c3b236548ce3e03 100644 --- a/drivers/staging/media/atomisp/pci/atomisp_ioctl.c +++ b/drivers/staging/media/atomisp/pci/atomisp_ioctl.c @@ -646,6 +646,12 @@ static int atomisp_g_input(struct file *file, void *fh, unsigned int *input) struct atomisp_device *isp = video_get_drvdata(vdev); struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd; + if (!asd) { + dev_err(isp->dev, "%s(): asd is NULL, device is %s\n", + __func__, vdev->name); + return -EINVAL; + } + rt_mutex_lock(&isp->mutex); *input = asd->input_curr; rt_mutex_unlock(&isp->mutex); @@ -665,6 +671,12 @@ static int atomisp_s_input(struct file *file, void *fh, unsigned int input) struct v4l2_subdev *motor; int ret; + if (!asd) { + dev_err(isp->dev, "%s(): asd is NULL, device is %s\n", + __func__, vdev->name); + return -EINVAL; + } + rt_mutex_lock(&isp->mutex); if (input >= ATOM_ISP_MAX_INPUTS || input >= isp->input_cnt) { dev_dbg(isp->dev, "input_cnt: %d\n", isp->input_cnt); @@ -761,18 +773,33 @@ static int atomisp_enum_fmt_cap(struct file *file, void *fh, struct video_device *vdev = video_devdata(file); struct atomisp_device *isp = video_get_drvdata(vdev); struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd; - struct v4l2_subdev_mbus_code_enum code = { 0 }; + struct v4l2_subdev_mbus_code_enum code = { + .which = V4L2_SUBDEV_FORMAT_ACTIVE, + }; + struct v4l2_subdev *camera; unsigned int i, fi = 0; int rval; + if (!asd) { + dev_err(isp->dev, "%s(): asd is NULL, device is %s\n", + __func__, vdev->name); + return -EINVAL; + } + + camera = isp->inputs[asd->input_curr].camera; + if(!camera) { + dev_err(isp->dev, "%s(): camera is NULL, device is %s\n", + __func__, vdev->name); + return -EINVAL; + } + rt_mutex_lock(&isp->mutex); - rval = v4l2_subdev_call(isp->inputs[asd->input_curr].camera, pad, - enum_mbus_code, NULL, &code); + + rval = v4l2_subdev_call(camera, pad, enum_mbus_code, NULL, &code); if (rval == -ENOIOCTLCMD) { dev_warn(isp->dev, - "enum_mbus_code pad op not supported. Please fix your sensor driver!\n"); - // rval = v4l2_subdev_call(isp->inputs[asd->input_curr].camera, - // video, enum_mbus_fmt, 0, &code.code); + "enum_mbus_code pad op not supported by %s. Please fix your sensor driver!\n", + camera->name); } rt_mutex_unlock(&isp->mutex); @@ -802,6 +829,8 @@ static int atomisp_enum_fmt_cap(struct file *file, void *fh, f->pixelformat = format->pixelformat; return 0; } + dev_err(isp->dev, "%s(): format for code %x not found.\n", + __func__, code.code); return -EINVAL; } @@ -834,6 +863,72 @@ static int atomisp_g_fmt_file(struct file *file, void *fh, return 0; } +static int atomisp_adjust_fmt(struct v4l2_format *f) +{ + const struct atomisp_format_bridge *format_bridge; + u32 padded_width; + + format_bridge = atomisp_get_format_bridge(f->fmt.pix.pixelformat); + + padded_width = f->fmt.pix.width + pad_w; + + if (format_bridge->planar) { + f->fmt.pix.bytesperline = padded_width; + f->fmt.pix.sizeimage = PAGE_ALIGN(f->fmt.pix.height * + DIV_ROUND_UP(format_bridge->depth * + padded_width, 8)); + } else { + f->fmt.pix.bytesperline = DIV_ROUND_UP(format_bridge->depth * + padded_width, 8); + f->fmt.pix.sizeimage = PAGE_ALIGN(f->fmt.pix.height * f->fmt.pix.bytesperline); + } + + if (f->fmt.pix.field == V4L2_FIELD_ANY) + f->fmt.pix.field = V4L2_FIELD_NONE; + + format_bridge = atomisp_get_format_bridge(f->fmt.pix.pixelformat); + if (!format_bridge) + return -EINVAL; + + /* Currently, raw formats are broken!!! */ + if (format_bridge->sh_fmt == IA_CSS_FRAME_FORMAT_RAW) { + f->fmt.pix.pixelformat = V4L2_PIX_FMT_YUV420; + + format_bridge = atomisp_get_format_bridge(f->fmt.pix.pixelformat); + if (!format_bridge) + return -EINVAL; + } + + padded_width = f->fmt.pix.width + pad_w; + + if (format_bridge->planar) { + f->fmt.pix.bytesperline = padded_width; + f->fmt.pix.sizeimage = PAGE_ALIGN(f->fmt.pix.height * + DIV_ROUND_UP(format_bridge->depth * + padded_width, 8)); + } else { + f->fmt.pix.bytesperline = DIV_ROUND_UP(format_bridge->depth * + padded_width, 8); + f->fmt.pix.sizeimage = PAGE_ALIGN(f->fmt.pix.height * f->fmt.pix.bytesperline); + } + + if (f->fmt.pix.field == V4L2_FIELD_ANY) + f->fmt.pix.field = V4L2_FIELD_NONE; + + /* + * FIXME: do we need to setup this differently, depending on the + * sensor or the pipeline? + */ + f->fmt.pix.colorspace = V4L2_COLORSPACE_REC709; + f->fmt.pix.ycbcr_enc = V4L2_YCBCR_ENC_709; + f->fmt.pix.xfer_func = V4L2_XFER_FUNC_709; + + f->fmt.pix.width -= pad_w; + f->fmt.pix.height -= pad_h; + + return 0; +} + /* This function looks up the closest available resolution. */ static int atomisp_try_fmt_cap(struct file *file, void *fh, struct v4l2_format *f) @@ -845,7 +940,11 @@ static int atomisp_try_fmt_cap(struct file *file, void *fh, rt_mutex_lock(&isp->mutex); ret = atomisp_try_fmt(vdev, f, NULL); rt_mutex_unlock(&isp->mutex); - return ret; + + if (ret) + return ret; + + return atomisp_adjust_fmt(f); } static int atomisp_s_fmt_cap(struct file *file, void *fh, @@ -1027,6 +1126,12 @@ int __atomisp_reqbufs(struct file *file, void *fh, u16 stream_id = atomisp_source_pad_to_stream_id(asd, source_pad); int ret = 0, i = 0; + if (!asd) { + dev_err(pipe->isp->dev, "%s(): asd is NULL, device is %s\n", + __func__, vdev->name); + return -EINVAL; + } + if (req->count == 0) { mutex_lock(&pipe->capq.vb_lock); if (!list_empty(&pipe->capq.stream)) @@ -1154,6 +1259,12 @@ static int atomisp_qbuf(struct file *file, void *fh, struct v4l2_buffer *buf) u32 pgnr; int ret = 0; + if (!asd) { + dev_err(isp->dev, "%s(): asd is NULL, device is %s\n", + __func__, vdev->name); + return -EINVAL; + } + rt_mutex_lock(&isp->mutex); if (isp->isp_fatal_error) { ret = -EIO; @@ -1389,6 +1500,12 @@ static int atomisp_dqbuf(struct file *file, void *fh, struct v4l2_buffer *buf) struct atomisp_device *isp = video_get_drvdata(vdev); int ret = 0; + if (!asd) { + dev_err(isp->dev, "%s(): asd is NULL, device is %s\n", + __func__, vdev->name); + return -EINVAL; + } + rt_mutex_lock(&isp->mutex); if (isp->isp_fatal_error) { @@ -1640,6 +1757,12 @@ static int atomisp_streamon(struct file *file, void *fh, int ret = 0; unsigned long irqflags; + if (!asd) { + dev_err(isp->dev, "%s(): asd is NULL, device is %s\n", + __func__, vdev->name); + return -EINVAL; + } + dev_dbg(isp->dev, "Start stream on pad %d for asd%d\n", atomisp_subdev_source_pad(vdev), asd->index); @@ -1901,6 +2024,12 @@ int __atomisp_streamoff(struct file *file, void *fh, enum v4l2_buf_type type) unsigned long flags; bool first_streamoff = false; + if (!asd) { + dev_err(isp->dev, "%s(): asd is NULL, device is %s\n", + __func__, vdev->name); + return -EINVAL; + } + dev_dbg(isp->dev, "Stop stream on pad %d for asd%d\n", atomisp_subdev_source_pad(vdev), asd->index); @@ -2150,6 +2279,12 @@ static int atomisp_g_ctrl(struct file *file, void *fh, struct atomisp_device *isp = video_get_drvdata(vdev); int i, ret = -EINVAL; + if (!asd) { + dev_err(isp->dev, "%s(): asd is NULL, device is %s\n", + __func__, vdev->name); + return -EINVAL; + } + for (i = 0; i < ctrls_num; i++) { if (ci_v4l2_controls[i].id == control->id) { ret = 0; @@ -2229,6 +2364,12 @@ static int atomisp_s_ctrl(struct file *file, void *fh, struct atomisp_device *isp = video_get_drvdata(vdev); int i, ret = -EINVAL; + if (!asd) { + dev_err(isp->dev, "%s(): asd is NULL, device is %s\n", + __func__, vdev->name); + return -EINVAL; + } + for (i = 0; i < ctrls_num; i++) { if (ci_v4l2_controls[i].id == control->id) { ret = 0; @@ -2310,6 +2451,12 @@ static int atomisp_queryctl(struct file *file, void *fh, struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd; struct atomisp_device *isp = video_get_drvdata(vdev); + if (!asd) { + dev_err(isp->dev, "%s(): asd is NULL, device is %s\n", + __func__, vdev->name); + return -EINVAL; + } + switch (qc->id) { case V4L2_CID_FOCUS_ABSOLUTE: case V4L2_CID_FOCUS_RELATIVE: @@ -2355,6 +2502,12 @@ static int atomisp_camera_g_ext_ctrls(struct file *file, void *fh, int i; int ret = 0; + if (!asd) { + dev_err(isp->dev, "%s(): asd is NULL, device is %s\n", + __func__, vdev->name); + return -EINVAL; + } + if (!IS_ISP2401) motor = isp->inputs[asd->input_curr].motor; else @@ -2466,6 +2619,12 @@ static int atomisp_camera_s_ext_ctrls(struct file *file, void *fh, int i; int ret = 0; + if (!asd) { + dev_err(isp->dev, "%s(): asd is NULL, device is %s\n", + __func__, vdev->name); + return -EINVAL; + } + if (!IS_ISP2401) motor = isp->inputs[asd->input_curr].motor; else @@ -2591,6 +2750,12 @@ static int atomisp_g_parm(struct file *file, void *fh, struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd; struct atomisp_device *isp = video_get_drvdata(vdev); + if (!asd) { + dev_err(isp->dev, "%s(): asd is NULL, device is %s\n", + __func__, vdev->name); + return -EINVAL; + } + if (parm->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) { dev_err(isp->dev, "unsupported v4l2 buf type\n"); return -EINVAL; @@ -2613,6 +2778,12 @@ static int atomisp_s_parm(struct file *file, void *fh, int rval; int fps; + if (!asd) { + dev_err(isp->dev, "%s(): asd is NULL, device is %s\n", + __func__, vdev->name); + return -EINVAL; + } + if (parm->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) { dev_err(isp->dev, "unsupported v4l2 buf type\n"); return -EINVAL; diff --git a/drivers/staging/media/atomisp/pci/atomisp_subdev.c b/drivers/staging/media/atomisp/pci/atomisp_subdev.c index dcc2dd981ca6077c5fcbafaa41d0e8944c8711f1..628e85799274d6cf50c5787e92c00a543cbe384a 100644 --- a/drivers/staging/media/atomisp/pci/atomisp_subdev.c +++ b/drivers/staging/media/atomisp/pci/atomisp_subdev.c @@ -1178,23 +1178,28 @@ static int isp_subdev_init_entities(struct atomisp_sub_device *asd) atomisp_init_acc_pipe(asd, &asd->video_acc); - ret = atomisp_video_init(&asd->video_in, "MEMORY"); + ret = atomisp_video_init(&asd->video_in, "MEMORY", + ATOMISP_RUN_MODE_SDV); if (ret < 0) return ret; - ret = atomisp_video_init(&asd->video_out_capture, "CAPTURE"); + ret = atomisp_video_init(&asd->video_out_capture, "CAPTURE", + ATOMISP_RUN_MODE_STILL_CAPTURE); if (ret < 0) return ret; - ret = atomisp_video_init(&asd->video_out_vf, "VIEWFINDER"); + ret = atomisp_video_init(&asd->video_out_vf, "VIEWFINDER", + ATOMISP_RUN_MODE_CONTINUOUS_CAPTURE); if (ret < 0) return ret; - ret = atomisp_video_init(&asd->video_out_preview, "PREVIEW"); + ret = atomisp_video_init(&asd->video_out_preview, "PREVIEW", + ATOMISP_RUN_MODE_PREVIEW); if (ret < 0) return ret; - ret = atomisp_video_init(&asd->video_out_video_capture, "VIDEO"); + ret = atomisp_video_init(&asd->video_out_video_capture, "VIDEO", + ATOMISP_RUN_MODE_VIDEO); if (ret < 0) return ret; diff --git a/drivers/staging/media/atomisp/pci/atomisp_subdev.h b/drivers/staging/media/atomisp/pci/atomisp_subdev.h index 330a77eed8aa65501491c2bd63e6720a4ac2b66a..12215d7406169066e3d3b232dd4752f4216142f1 100644 --- a/drivers/staging/media/atomisp/pci/atomisp_subdev.h +++ b/drivers/staging/media/atomisp/pci/atomisp_subdev.h @@ -81,6 +81,9 @@ struct atomisp_video_pipe { /* the link list to store per_frame parameters */ struct list_head per_frame_params; + /* Store here the initial run mode */ + unsigned int default_run_mode; + unsigned int buffers_in_css; /* irq_lock is used to protect video buffer state change operations and diff --git a/drivers/staging/media/atomisp/pci/atomisp_v4l2.c b/drivers/staging/media/atomisp/pci/atomisp_v4l2.c index fa1bd99cd6f17758ac3626378aecc6ac9a54f4f2..8aeea74cfd06bb0244c45ae9c726a79bb35024d5 100644 --- a/drivers/staging/media/atomisp/pci/atomisp_v4l2.c +++ b/drivers/staging/media/atomisp/pci/atomisp_v4l2.c @@ -447,7 +447,8 @@ const struct atomisp_dfs_config dfs_config_cht_soc = { .dfs_table_size = ARRAY_SIZE(dfs_rules_cht_soc), }; -int atomisp_video_init(struct atomisp_video_pipe *video, const char *name) +int atomisp_video_init(struct atomisp_video_pipe *video, const char *name, + unsigned int run_mode) { int ret; const char *direction; @@ -478,6 +479,7 @@ int atomisp_video_init(struct atomisp_video_pipe *video, const char *name) "ATOMISP ISP %s %s", name, direction); video->vdev.release = video_device_release_empty; video_set_drvdata(&video->vdev, video->isp); + video->default_run_mode = run_mode; return 0; } @@ -711,15 +713,15 @@ static int atomisp_mrfld_power(struct atomisp_device *isp, bool enable) dev_dbg(isp->dev, "IUNIT power-%s.\n", enable ? "on" : "off"); - /*WA:Enable DVFS*/ + /* WA for P-Unit, if DVFS enabled, ISP timeout observed */ if (IS_CHT && enable) - punit_ddr_dvfs_enable(true); + punit_ddr_dvfs_enable(false); /* * FIXME:WA for ECS28A, with this sleep, CTS * android.hardware.camera2.cts.CameraDeviceTest#testCameraDeviceAbort * PASS, no impact on other platforms - */ + */ if (IS_BYT && enable) msleep(10); @@ -727,7 +729,7 @@ static int atomisp_mrfld_power(struct atomisp_device *isp, bool enable) iosf_mbi_modify(BT_MBI_UNIT_PMC, MBI_REG_READ, MRFLD_ISPSSPM0, val, MRFLD_ISPSSPM0_ISPSSC_MASK); - /*WA:Enable DVFS*/ + /* WA:Enable DVFS */ if (IS_CHT && !enable) punit_ddr_dvfs_enable(true); @@ -1182,6 +1184,7 @@ static void atomisp_unregister_entities(struct atomisp_device *isp) v4l2_device_unregister(&isp->v4l2_dev); media_device_unregister(&isp->media_dev); + media_device_cleanup(&isp->media_dev); } static int atomisp_register_entities(struct atomisp_device *isp) diff --git a/drivers/staging/media/atomisp/pci/atomisp_v4l2.h b/drivers/staging/media/atomisp/pci/atomisp_v4l2.h index 81bb356b81720a359c8996acfce0f42a89b1abc1..72611b8286a4aec66728b175447bb7673be57cb3 100644 --- a/drivers/staging/media/atomisp/pci/atomisp_v4l2.h +++ b/drivers/staging/media/atomisp/pci/atomisp_v4l2.h @@ -27,7 +27,8 @@ struct v4l2_device; struct atomisp_device; struct firmware; -int atomisp_video_init(struct atomisp_video_pipe *video, const char *name); +int atomisp_video_init(struct atomisp_video_pipe *video, const char *name, + unsigned int run_mode); void atomisp_acc_init(struct atomisp_acc_pipe *video, const char *name); void atomisp_video_unregister(struct atomisp_video_pipe *video); void atomisp_acc_unregister(struct atomisp_acc_pipe *video); diff --git a/drivers/staging/media/atomisp/pci/hmm/hmm.c b/drivers/staging/media/atomisp/pci/hmm/hmm.c index 6a5ee46070898b3b15cd44b1cf4ea594a5e3f307..c1cda16f2dc018d10ca9014511dca2c3688ec80b 100644 --- a/drivers/staging/media/atomisp/pci/hmm/hmm.c +++ b/drivers/staging/media/atomisp/pci/hmm/hmm.c @@ -39,7 +39,7 @@ struct hmm_bo_device bo_device; struct hmm_pool dynamic_pool; struct hmm_pool reserved_pool; -static ia_css_ptr dummy_ptr; +static ia_css_ptr dummy_ptr = mmgr_EXCEPTION; static bool hmm_initialized; struct _hmm_mem_stat hmm_mem_stat; @@ -209,7 +209,7 @@ int hmm_init(void) void hmm_cleanup(void) { - if (!dummy_ptr) + if (dummy_ptr == mmgr_EXCEPTION) return; sysfs_remove_group(&atomisp_dev->kobj, atomisp_attribute_group); @@ -288,7 +288,8 @@ void hmm_free(ia_css_ptr virt) dev_dbg(atomisp_dev, "%s: free 0x%08x\n", __func__, virt); - WARN_ON(!virt); + if (WARN_ON(virt == mmgr_EXCEPTION)) + return; bo = hmm_bo_device_search_start(&bo_device, (unsigned int)virt); diff --git a/drivers/staging/media/atomisp/pci/sh_css.c b/drivers/staging/media/atomisp/pci/sh_css.c index ddee04c8248d0433552b0c41cdce24b1cb84393c..54a18921fbd15d8dd0a6ae41c0d09f2cf81c0e4a 100644 --- a/drivers/staging/media/atomisp/pci/sh_css.c +++ b/drivers/staging/media/atomisp/pci/sh_css.c @@ -527,6 +527,7 @@ ia_css_stream_input_format_bits_per_pixel(struct ia_css_stream *stream) return bpp; } +/* TODO: move define to proper file in tools */ #define GP_ISEL_TPG_MODE 0x90058 #if !defined(ISP2401) @@ -579,12 +580,8 @@ sh_css_config_input_network(struct ia_css_stream *stream) { vblank_cycles = vblank_lines * (width + hblank_cycles); sh_css_sp_configure_sync_gen(width, height, hblank_cycles, vblank_cycles); - if (!IS_ISP2401) { - if (pipe->stream->config.mode == IA_CSS_INPUT_MODE_TPG) { - /* TODO: move define to proper file in tools */ - ia_css_device_store_uint32(GP_ISEL_TPG_MODE, 0); - } - } + if (pipe->stream->config.mode == IA_CSS_INPUT_MODE_TPG) + ia_css_device_store_uint32(GP_ISEL_TPG_MODE, 0); } ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "sh_css_config_input_network() leave:\n"); @@ -1019,16 +1016,14 @@ static bool sh_css_translate_stream_cfg_to_isys_stream_descr( * ia_css_isys_stream_capture_indication() instead of * ia_css_pipeline_sp_wait_for_isys_stream_N() as isp processing of * capture takes longer than getting an ISYS frame - * - * Only 2401 relevant ?? */ -#if 0 // FIXME: NOT USED on Yocto Aero - isys_stream_descr->polling_mode - = early_polling ? INPUT_SYSTEM_POLL_ON_CAPTURE_REQUEST - : INPUT_SYSTEM_POLL_ON_WAIT_FOR_FRAME; - ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, - "sh_css_translate_stream_cfg_to_isys_stream_descr() leave:\n"); -#endif + if (IS_ISP2401) { + isys_stream_descr->polling_mode + = early_polling ? INPUT_SYSTEM_POLL_ON_CAPTURE_REQUEST + : INPUT_SYSTEM_POLL_ON_WAIT_FOR_FRAME; + ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, + "sh_css_translate_stream_cfg_to_isys_stream_descr() leave:\n"); + } return rc; } @@ -1451,7 +1446,7 @@ static void start_pipe( assert(me); /* all callers are in this file and call with non null argument */ - if (!IS_ISP2401) { + if (IS_ISP2401) { coord = &me->config.internal_frame_origin_bqs_on_sctbl; params = me->stream->isp_params_configs; } diff --git a/drivers/staging/media/atomisp/pci/sh_css_mipi.c b/drivers/staging/media/atomisp/pci/sh_css_mipi.c index d5ae7f0b5864bb7acde86e6b8adb62c660960f5f..651eda0469b23eed8d090752f819d7c7657e19dc 100644 --- a/drivers/staging/media/atomisp/pci/sh_css_mipi.c +++ b/drivers/staging/media/atomisp/pci/sh_css_mipi.c @@ -389,17 +389,17 @@ static bool buffers_needed(struct ia_css_pipe *pipe) { if (!IS_ISP2401) { if (pipe->stream->config.mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR) - return false; - else return true; + else + return false; } if (pipe->stream->config.mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR || pipe->stream->config.mode == IA_CSS_INPUT_MODE_TPG || pipe->stream->config.mode == IA_CSS_INPUT_MODE_PRBS) - return false; + return true; - return true; + return false; } int @@ -439,14 +439,17 @@ allocate_mipi_frames(struct ia_css_pipe *pipe, return 0; /* AM TODO: Check */ } - if (!IS_ISP2401) + if (!IS_ISP2401) { port = (unsigned int)pipe->stream->config.source.port.port; - else - err = ia_css_mipi_is_source_port_valid(pipe, &port); + } else { + /* Returns true if port is valid. So, invert it */ + err = !ia_css_mipi_is_source_port_valid(pipe, &port); + } assert(port < N_CSI_PORTS); - if (port >= N_CSI_PORTS || err) { + if ((!IS_ISP2401 && port >= N_CSI_PORTS) || + (IS_ISP2401 && err)) { ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "allocate_mipi_frames(%p) exit: error: port is not correct (port=%d).\n", pipe, port); @@ -571,14 +574,17 @@ free_mipi_frames(struct ia_css_pipe *pipe) { return err; } - if (!IS_ISP2401) + if (!IS_ISP2401) { port = (unsigned int)pipe->stream->config.source.port.port; - else - err = ia_css_mipi_is_source_port_valid(pipe, &port); + } else { + /* Returns true if port is valid. So, invert it */ + err = !ia_css_mipi_is_source_port_valid(pipe, &port); + } assert(port < N_CSI_PORTS); - if (port >= N_CSI_PORTS || err) { + if ((!IS_ISP2401 && port >= N_CSI_PORTS) || + (IS_ISP2401 && err)) { ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "free_mipi_frames(%p, %d) exit: error: pipe port is not correct.\n", pipe, port); @@ -683,14 +689,17 @@ send_mipi_frames(struct ia_css_pipe *pipe) { /* TODO: AM: maybe this should be returning an error. */ } - if (!IS_ISP2401) + if (!IS_ISP2401) { port = (unsigned int)pipe->stream->config.source.port.port; - else - err = ia_css_mipi_is_source_port_valid(pipe, &port); + } else { + /* Returns true if port is valid. So, invert it */ + err = !ia_css_mipi_is_source_port_valid(pipe, &port); + } assert(port < N_CSI_PORTS); - if (port >= N_CSI_PORTS || err) { + if ((!IS_ISP2401 && port >= N_CSI_PORTS) || + (IS_ISP2401 && err)) { IA_CSS_ERROR("send_mipi_frames(%p) exit: invalid port specified (port=%d).\n", pipe, port); return err; diff --git a/drivers/staging/media/atomisp/pci/sh_css_params.c b/drivers/staging/media/atomisp/pci/sh_css_params.c index 24fc497bd4915eb2e138f839dcb4d25bfa348c02..8d6514c45eeb63ea8e5cfea0e9dd0234a9462a09 100644 --- a/drivers/staging/media/atomisp/pci/sh_css_params.c +++ b/drivers/staging/media/atomisp/pci/sh_css_params.c @@ -2437,7 +2437,7 @@ sh_css_create_isp_params(struct ia_css_stream *stream, unsigned int i; struct sh_css_ddr_address_map *ddr_ptrs; struct sh_css_ddr_address_map_size *ddr_ptrs_size; - int err = 0; + int err; size_t params_size; struct ia_css_isp_parameters *params = kvmalloc(sizeof(struct ia_css_isp_parameters), GFP_KERNEL); @@ -2482,7 +2482,11 @@ sh_css_create_isp_params(struct ia_css_stream *stream, succ &= (ddr_ptrs->macc_tbl != mmgr_NULL); *isp_params_out = params; - return err; + + if (!succ) + return -ENOMEM; + + return 0; } static bool diff --git a/drivers/staging/media/hantro/hantro_drv.c b/drivers/staging/media/hantro/hantro_drv.c index 7749ca9a8ebbfe7a432bdf4a9b4976b0eebf4a80..bc97ec0a7e4af193a22e4fd5b0855552e79b8666 100644 --- a/drivers/staging/media/hantro/hantro_drv.c +++ b/drivers/staging/media/hantro/hantro_drv.c @@ -829,7 +829,7 @@ static int hantro_probe(struct platform_device *pdev) ret = clk_bulk_prepare(vpu->variant->num_clocks, vpu->clocks); if (ret) { dev_err(&pdev->dev, "Failed to prepare clocks\n"); - return ret; + goto err_pm_disable; } ret = v4l2_device_register(&pdev->dev, &vpu->v4l2_dev); @@ -885,6 +885,7 @@ static int hantro_probe(struct platform_device *pdev) v4l2_device_unregister(&vpu->v4l2_dev); err_clk_unprepare: clk_bulk_unprepare(vpu->variant->num_clocks, vpu->clocks); +err_pm_disable: pm_runtime_dont_use_autosuspend(vpu->dev); pm_runtime_disable(vpu->dev); return ret; diff --git a/drivers/staging/media/hantro/hantro_h1_jpeg_enc.c b/drivers/staging/media/hantro/hantro_h1_jpeg_enc.c index b88dc4ed06db710806a1d647bf5624e582dca395..ed244aee196c3d6d631a7f64905679050de6cf08 100644 --- a/drivers/staging/media/hantro/hantro_h1_jpeg_enc.c +++ b/drivers/staging/media/hantro/hantro_h1_jpeg_enc.c @@ -23,7 +23,7 @@ static void hantro_h1_set_src_img_ctrl(struct hantro_dev *vpu, reg = H1_REG_IN_IMG_CTRL_ROW_LEN(pix_fmt->width) | H1_REG_IN_IMG_CTRL_OVRFLR_D4(0) - | H1_REG_IN_IMG_CTRL_OVRFLB_D4(0) + | H1_REG_IN_IMG_CTRL_OVRFLB(0) | H1_REG_IN_IMG_CTRL_FMT(ctx->vpu_src_fmt->enc_fmt); vepu_write_relaxed(vpu, reg, H1_REG_IN_IMG_CTRL); } diff --git a/drivers/staging/media/hantro/hantro_h1_regs.h b/drivers/staging/media/hantro/hantro_h1_regs.h index d6e9825bb5c7be3ca3e336c3649c5bfc7b697b78..30e7e7b920b553aeca0c3fc1ca4be04712d8ef42 100644 --- a/drivers/staging/media/hantro/hantro_h1_regs.h +++ b/drivers/staging/media/hantro/hantro_h1_regs.h @@ -47,7 +47,7 @@ #define H1_REG_IN_IMG_CTRL 0x03c #define H1_REG_IN_IMG_CTRL_ROW_LEN(x) ((x) << 12) #define H1_REG_IN_IMG_CTRL_OVRFLR_D4(x) ((x) << 10) -#define H1_REG_IN_IMG_CTRL_OVRFLB_D4(x) ((x) << 6) +#define H1_REG_IN_IMG_CTRL_OVRFLB(x) ((x) << 6) #define H1_REG_IN_IMG_CTRL_FMT(x) ((x) << 2) #define H1_REG_ENC_CTRL0 0x040 #define H1_REG_ENC_CTRL0_INIT_QP(x) ((x) << 26) diff --git a/drivers/staging/media/meson/vdec/esparser.c b/drivers/staging/media/meson/vdec/esparser.c index db7022707ff8dd4ce5cfd9f89203c2e46885da3b..86ccc8937afcaf0a89c96cfc92d3a13de3ee2121 100644 --- a/drivers/staging/media/meson/vdec/esparser.c +++ b/drivers/staging/media/meson/vdec/esparser.c @@ -328,7 +328,12 @@ esparser_queue(struct amvdec_session *sess, struct vb2_v4l2_buffer *vbuf) offset = esparser_get_offset(sess); - amvdec_add_ts(sess, vb->timestamp, vbuf->timecode, offset, vbuf->flags); + ret = amvdec_add_ts(sess, vb->timestamp, vbuf->timecode, offset, vbuf->flags); + if (ret) { + v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR); + return ret; + } + dev_dbg(core->dev, "esparser: ts = %llu pld_size = %u offset = %08X flags = %08X\n", vb->timestamp, payload_size, offset, vbuf->flags); diff --git a/drivers/staging/media/meson/vdec/vdec_helpers.c b/drivers/staging/media/meson/vdec/vdec_helpers.c index 7f07a9175815f025a918e0ecb33788e516e2735c..db4a854e59a38df94fc72084c38e8fc9174e7783 100644 --- a/drivers/staging/media/meson/vdec/vdec_helpers.c +++ b/drivers/staging/media/meson/vdec/vdec_helpers.c @@ -227,13 +227,16 @@ int amvdec_set_canvases(struct amvdec_session *sess, } EXPORT_SYMBOL_GPL(amvdec_set_canvases); -void amvdec_add_ts(struct amvdec_session *sess, u64 ts, - struct v4l2_timecode tc, u32 offset, u32 vbuf_flags) +int amvdec_add_ts(struct amvdec_session *sess, u64 ts, + struct v4l2_timecode tc, u32 offset, u32 vbuf_flags) { struct amvdec_timestamp *new_ts; unsigned long flags; new_ts = kzalloc(sizeof(*new_ts), GFP_KERNEL); + if (!new_ts) + return -ENOMEM; + new_ts->ts = ts; new_ts->tc = tc; new_ts->offset = offset; @@ -242,6 +245,7 @@ void amvdec_add_ts(struct amvdec_session *sess, u64 ts, spin_lock_irqsave(&sess->ts_spinlock, flags); list_add_tail(&new_ts->list, &sess->timestamps); spin_unlock_irqrestore(&sess->ts_spinlock, flags); + return 0; } EXPORT_SYMBOL_GPL(amvdec_add_ts); diff --git a/drivers/staging/media/meson/vdec/vdec_helpers.h b/drivers/staging/media/meson/vdec/vdec_helpers.h index cfaed52ab526577e17d2d50659a9761a498a9235..798e5a8a9b3f19b83f4d56779bbb3e2b358b0d40 100644 --- a/drivers/staging/media/meson/vdec/vdec_helpers.h +++ b/drivers/staging/media/meson/vdec/vdec_helpers.h @@ -55,8 +55,8 @@ void amvdec_dst_buf_done_offset(struct amvdec_session *sess, * @offset: offset in the VIFIFO where the associated packet was written * @flags the vb2_v4l2_buffer flags */ -void amvdec_add_ts(struct amvdec_session *sess, u64 ts, - struct v4l2_timecode tc, u32 offset, u32 flags); +int amvdec_add_ts(struct amvdec_session *sess, u64 ts, + struct v4l2_timecode tc, u32 offset, u32 flags); void amvdec_remove_ts(struct amvdec_session *sess, u64 ts); /** diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_h264.c b/drivers/staging/media/sunxi/cedrus/cedrus_h264.c index de7442d4834dcab1237dc7f9558021c6871c1551..d3e26bfe6c90b21c9a222263c483a2c69f35f694 100644 --- a/drivers/staging/media/sunxi/cedrus/cedrus_h264.c +++ b/drivers/staging/media/sunxi/cedrus/cedrus_h264.c @@ -38,7 +38,7 @@ struct cedrus_h264_sram_ref_pic { #define CEDRUS_H264_FRAME_NUM 18 -#define CEDRUS_NEIGHBOR_INFO_BUF_SIZE (16 * SZ_1K) +#define CEDRUS_NEIGHBOR_INFO_BUF_SIZE (32 * SZ_1K) #define CEDRUS_MIN_PIC_INFO_BUF_SIZE (130 * SZ_1K) static void cedrus_h264_write_sram(struct cedrus_dev *dev, diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_h265.c b/drivers/staging/media/sunxi/cedrus/cedrus_h265.c index 10744fab7ceaa1e11471d9b4ace77794d45e2bd7..368439cf5e1744229dda9b6fb16eb7ee4267d551 100644 --- a/drivers/staging/media/sunxi/cedrus/cedrus_h265.c +++ b/drivers/staging/media/sunxi/cedrus/cedrus_h265.c @@ -23,7 +23,7 @@ * Subsequent BSP implementations seem to double the neighbor info buffer size * for the H6 SoC, which may be related to 10 bit H265 support. */ -#define CEDRUS_H265_NEIGHBOR_INFO_BUF_SIZE (397 * SZ_1K) +#define CEDRUS_H265_NEIGHBOR_INFO_BUF_SIZE (794 * SZ_1K) #define CEDRUS_H265_ENTRY_POINTS_BUF_SIZE (4 * SZ_1K) #define CEDRUS_H265_MV_COL_BUF_UNIT_CTB_SIZE 160 diff --git a/drivers/staging/media/zoran/zoran.h b/drivers/staging/media/zoran/zoran.h index e7fe8da7732c7845189fe9b0fb52bde7db288829..3f223e5b1872ba2e687721639a0793ba8a7ae364 100644 --- a/drivers/staging/media/zoran/zoran.h +++ b/drivers/staging/media/zoran/zoran.h @@ -314,6 +314,6 @@ static inline struct zoran *to_zoran(struct v4l2_device *v4l2_dev) #endif -int zoran_queue_init(struct zoran *zr, struct vb2_queue *vq); +int zoran_queue_init(struct zoran *zr, struct vb2_queue *vq, int dir); void zoran_queue_exit(struct zoran *zr); int zr_set_buf(struct zoran *zr); diff --git a/drivers/staging/media/zoran/zoran_card.c b/drivers/staging/media/zoran/zoran_card.c index dfc60e2e9dd7ab355cffd7731fa4b1a9e290aa39..fe0cca12119c7510c6041f2fc5800fe1f26a52c9 100644 --- a/drivers/staging/media/zoran/zoran_card.c +++ b/drivers/staging/media/zoran/zoran_card.c @@ -802,6 +802,52 @@ int zoran_check_jpg_settings(struct zoran *zr, return 0; } +static int zoran_init_video_device(struct zoran *zr, struct video_device *video_dev, int dir) +{ + int err; + + /* Now add the template and register the device unit. */ + *video_dev = zoran_template; + video_dev->v4l2_dev = &zr->v4l2_dev; + video_dev->lock = &zr->lock; + video_dev->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_READWRITE | dir; + + strscpy(video_dev->name, ZR_DEVNAME(zr), sizeof(video_dev->name)); + /* + * It's not a mem2mem device, but you can both capture and output from one and the same + * device. This should really be split up into two device nodes, but that's a job for + * another day. + */ + video_dev->vfl_dir = VFL_DIR_M2M; + zoran_queue_init(zr, &zr->vq, V4L2_BUF_TYPE_VIDEO_CAPTURE); + + err = video_register_device(video_dev, VFL_TYPE_VIDEO, video_nr[zr->id]); + if (err < 0) + return err; + video_set_drvdata(video_dev, zr); + return 0; +} + +static void zoran_exit_video_devices(struct zoran *zr) +{ + video_unregister_device(zr->video_dev); + kfree(zr->video_dev); +} + +static int zoran_init_video_devices(struct zoran *zr) +{ + int err; + + zr->video_dev = video_device_alloc(); + if (!zr->video_dev) + return -ENOMEM; + + err = zoran_init_video_device(zr, zr->video_dev, V4L2_CAP_VIDEO_CAPTURE); + if (err) + kfree(zr->video_dev); + return err; +} + void zoran_open_init_params(struct zoran *zr) { int i; @@ -873,17 +919,11 @@ static int zr36057_init(struct zoran *zr) zoran_open_init_params(zr); /* allocate memory *before* doing anything to the hardware in case allocation fails */ - zr->video_dev = video_device_alloc(); - if (!zr->video_dev) { - err = -ENOMEM; - goto exit; - } zr->stat_com = dma_alloc_coherent(&zr->pci_dev->dev, BUZ_NUM_STAT_COM * sizeof(u32), &zr->p_sc, GFP_KERNEL); if (!zr->stat_com) { - err = -ENOMEM; - goto exit_video; + return -ENOMEM; } for (j = 0; j < BUZ_NUM_STAT_COM; j++) zr->stat_com[j] = cpu_to_le32(1); /* mark as unavailable to zr36057 */ @@ -896,26 +936,9 @@ static int zr36057_init(struct zoran *zr) goto exit_statcom; } - /* Now add the template and register the device unit. */ - *zr->video_dev = zoran_template; - zr->video_dev->v4l2_dev = &zr->v4l2_dev; - zr->video_dev->lock = &zr->lock; - zr->video_dev->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_CAPTURE; - - strscpy(zr->video_dev->name, ZR_DEVNAME(zr), sizeof(zr->video_dev->name)); - /* - * It's not a mem2mem device, but you can both capture and output from one and the same - * device. This should really be split up into two device nodes, but that's a job for - * another day. - */ - zr->video_dev->vfl_dir = VFL_DIR_M2M; - - zoran_queue_init(zr, &zr->vq); - - err = video_register_device(zr->video_dev, VFL_TYPE_VIDEO, video_nr[zr->id]); - if (err < 0) + err = zoran_init_video_devices(zr); + if (err) goto exit_statcomb; - video_set_drvdata(zr->video_dev, zr); zoran_init_hardware(zr); if (!pass_through) { @@ -930,9 +953,6 @@ static int zr36057_init(struct zoran *zr) dma_free_coherent(&zr->pci_dev->dev, BUZ_NUM_STAT_COM * sizeof(u32) * 2, zr->stat_comb, zr->p_scb); exit_statcom: dma_free_coherent(&zr->pci_dev->dev, BUZ_NUM_STAT_COM * sizeof(u32), zr->stat_com, zr->p_sc); -exit_video: - kfree(zr->video_dev); -exit: return err; } @@ -964,7 +984,7 @@ static void zoran_remove(struct pci_dev *pdev) dma_free_coherent(&zr->pci_dev->dev, BUZ_NUM_STAT_COM * sizeof(u32) * 2, zr->stat_comb, zr->p_scb); pci_release_regions(pdev); pci_disable_device(zr->pci_dev); - video_unregister_device(zr->video_dev); + zoran_exit_video_devices(zr); exit_free: v4l2_ctrl_handler_free(&zr->hdl); v4l2_device_unregister(&zr->v4l2_dev); @@ -1068,8 +1088,10 @@ static int zoran_probe(struct pci_dev *pdev, const struct pci_device_id *ent) err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); if (err) - return -ENODEV; - vb2_dma_contig_set_max_seg_size(&pdev->dev, DMA_BIT_MASK(32)); + return err; + err = vb2_dma_contig_set_max_seg_size(&pdev->dev, U32_MAX); + if (err) + return err; nr = zoran_num++; if (nr >= BUZ_MAX) { diff --git a/drivers/staging/media/zoran/zoran_device.c b/drivers/staging/media/zoran/zoran_device.c index e569a1341d0103d2b329841ba81b228fa64c443d..913f5a3c5bfce083389678c797ed4c999b461ab4 100644 --- a/drivers/staging/media/zoran/zoran_device.c +++ b/drivers/staging/media/zoran/zoran_device.c @@ -879,7 +879,7 @@ static void zoran_reap_stat_com(struct zoran *zr) if (zr->jpg_settings.tmp_dcm == 1) i = (zr->jpg_dma_tail - zr->jpg_err_shift) & BUZ_MASK_STAT_COM; else - i = ((zr->jpg_dma_tail - zr->jpg_err_shift) & 1) * 2 + 1; + i = ((zr->jpg_dma_tail - zr->jpg_err_shift) & 1) * 2; stat_com = le32_to_cpu(zr->stat_com[i]); if ((stat_com & 1) == 0) { @@ -891,6 +891,11 @@ static void zoran_reap_stat_com(struct zoran *zr) size = (stat_com & GENMASK(22, 1)) >> 1; buf = zr->inuse[i]; + if (!buf) { + spin_unlock_irqrestore(&zr->queued_bufs_lock, flags); + pci_err(zr->pci_dev, "No buffer at slot %d\n", i); + return; + } buf->vbuf.vb2_buf.timestamp = ktime_get_ns(); if (zr->codec_mode == BUZ_MODE_MOTION_COMPRESS) { diff --git a/drivers/staging/media/zoran/zoran_driver.c b/drivers/staging/media/zoran/zoran_driver.c index 808196ea5b81b14e8e6ae46657a95c7c19fbad81..ea04f6c732b212bf61f17e444012244a0f1719cf 100644 --- a/drivers/staging/media/zoran/zoran_driver.c +++ b/drivers/staging/media/zoran/zoran_driver.c @@ -255,8 +255,6 @@ static int zoran_querycap(struct file *file, void *__fh, struct v4l2_capability strscpy(cap->card, ZR_DEVNAME(zr), sizeof(cap->card)); strscpy(cap->driver, "zoran", sizeof(cap->driver)); snprintf(cap->bus_info, sizeof(cap->bus_info), "PCI:%s", pci_name(zr->pci_dev)); - cap->device_caps = zr->video_dev->device_caps; - cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS; return 0; } @@ -582,6 +580,9 @@ static int zoran_s_std(struct file *file, void *__fh, v4l2_std_id std) struct zoran *zr = video_drvdata(file); int res = 0; + if (zr->norm == std) + return 0; + if (zr->running != ZORAN_MAP_MODE_NONE) return -EBUSY; @@ -737,6 +738,7 @@ static int zoran_g_parm(struct file *file, void *priv, struct v4l2_streamparm *p if (parm->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) return -EINVAL; + parm->parm.capture.readbuffers = 9; return 0; } @@ -867,6 +869,10 @@ int zr_set_buf(struct zoran *zr) vbuf = &buf->vbuf; buf->vbuf.field = V4L2_FIELD_INTERLACED; + if (BUZ_MAX_HEIGHT < (zr->v4l_settings.height * 2)) + buf->vbuf.field = V4L2_FIELD_INTERLACED; + else + buf->vbuf.field = V4L2_FIELD_TOP; vb2_set_plane_payload(&buf->vbuf.vb2_buf, 0, zr->buffer_size); vb2_buffer_done(&buf->vbuf.vb2_buf, VB2_BUF_STATE_DONE); zr->inuse[0] = NULL; @@ -926,6 +932,7 @@ static int zr_vb2_start_streaming(struct vb2_queue *vq, unsigned int count) zr->stat_com[j] = cpu_to_le32(1); zr->inuse[j] = NULL; } + zr->vbseq = 0; if (zr->map_mode != ZORAN_MAP_MODE_RAW) { pci_info(zr->pci_dev, "START JPG\n"); @@ -1006,7 +1013,7 @@ static const struct vb2_ops zr_video_qops = { .wait_finish = vb2_ops_wait_finish, }; -int zoran_queue_init(struct zoran *zr, struct vb2_queue *vq) +int zoran_queue_init(struct zoran *zr, struct vb2_queue *vq, int dir) { int err; @@ -1014,8 +1021,9 @@ int zoran_queue_init(struct zoran *zr, struct vb2_queue *vq) INIT_LIST_HEAD(&zr->queued_bufs); vq->dev = &zr->pci_dev->dev; - vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; - vq->io_modes = VB2_USERPTR | VB2_DMABUF | VB2_MMAP | VB2_READ | VB2_WRITE; + vq->type = dir; + + vq->io_modes = VB2_DMABUF | VB2_MMAP | VB2_READ | VB2_WRITE; vq->drv_priv = zr; vq->buf_struct_size = sizeof(struct zr_buffer); vq->ops = &zr_video_qops; diff --git a/drivers/staging/mt7621-dts/gbpc1.dts b/drivers/staging/mt7621-dts/gbpc1.dts index a7c0d3115d7264f36b25c563a9ccee03c9923e2d..d48ca5a25c2c4f635b176ba74c26a68485b48d95 100644 --- a/drivers/staging/mt7621-dts/gbpc1.dts +++ b/drivers/staging/mt7621-dts/gbpc1.dts @@ -11,7 +11,8 @@ memory@0 { device_type = "memory"; - reg = <0x0 0x1c000000>, <0x20000000 0x4000000>; + reg = <0x00000000 0x1c000000>, + <0x20000000 0x04000000>; }; chosen { @@ -37,24 +38,16 @@ gpio-leds { compatible = "gpio-leds"; - system { - label = "gb-pc1:green:system"; + power { + label = "green:power"; gpios = <&gpio 6 GPIO_ACTIVE_LOW>; + linux,default-trigger = "default-on"; }; - status { - label = "gb-pc1:green:status"; + system { + label = "green:system"; gpios = <&gpio 8 GPIO_ACTIVE_LOW>; - }; - - lan1 { - label = "gb-pc1:green:lan1"; - gpios = <&gpio 24 GPIO_ACTIVE_LOW>; - }; - - lan2 { - label = "gb-pc1:green:lan2"; - gpios = <&gpio 25 GPIO_ACTIVE_LOW>; + linux,default-trigger = "disk-activity"; }; }; }; @@ -94,9 +87,8 @@ partition@50000 { label = "firmware"; - reg = <0x50000 0x1FB0000>; + reg = <0x50000 0x1fb0000>; }; - }; }; @@ -122,9 +114,12 @@ }; &pinctrl { - state_default: pinctrl0 { - default_gpio: gpio { - groups = "wdt", "rgmii2", "uart3"; + pinctrl-names = "default"; + pinctrl-0 = <&state_default>; + + state_default: state-default { + gpio-pinmux { + groups = "rgmii2", "uart3", "wdt"; function = "gpio"; }; }; @@ -133,12 +128,13 @@ &switch0 { ports { port@0 { + status = "okay"; label = "ethblack"; - status = "ok"; }; + port@4 { + status = "okay"; label = "ethblue"; - status = "ok"; }; }; }; diff --git a/drivers/staging/mt7621-dts/gbpc2.dts b/drivers/staging/mt7621-dts/gbpc2.dts index 52760e7351f6c855ed6d9b2fe0521c067461f705..6f6fed071dda01de8bded044ecf49b1698fd2c5f 100644 --- a/drivers/staging/mt7621-dts/gbpc2.dts +++ b/drivers/staging/mt7621-dts/gbpc2.dts @@ -1,21 +1,121 @@ /dts-v1/; -#include "gbpc1.dts" +#include "mt7621.dtsi" + +#include +#include / { compatible = "gnubee,gb-pc2", "mediatek,mt7621-soc"; model = "GB-PC2"; + + memory@0 { + device_type = "memory"; + reg = <0x00000000 0x1c000000>, + <0x20000000 0x04000000>; + }; + + chosen { + bootargs = "console=ttyS0,57600"; + }; + + palmbus: palmbus@1e000000 { + i2c@900 { + status = "okay"; + }; + }; + + gpio-keys { + compatible = "gpio-keys"; + + reset { + label = "reset"; + gpios = <&gpio 18 GPIO_ACTIVE_HIGH>; + linux,code = ; + }; + }; +}; + +&sdhci { + status = "okay"; +}; + +&spi0 { + status = "okay"; + + m25p80@0 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "jedec,spi-nor"; + reg = <0>; + spi-max-frequency = <50000000>; + broken-flash-reset; + + partition@0 { + label = "u-boot"; + reg = <0x0 0x30000>; + read-only; + }; + + partition@30000 { + label = "u-boot-env"; + reg = <0x30000 0x10000>; + read-only; + }; + + factory: partition@40000 { + label = "factory"; + reg = <0x40000 0x10000>; + read-only; + }; + + partition@50000 { + label = "firmware"; + reg = <0x50000 0x1fb0000>; + }; + }; }; -&default_gpio { - groups = "wdt", "uart3"; - function = "gpio"; +&pcie { + status = "okay"; }; -&gmac1 { - status = "ok"; +&pinctrl { + pinctrl-names = "default"; + pinctrl-0 = <&state_default>; + + state_default: state-default { + gpio-pinmux { + groups = "wdt"; + function = "gpio"; + }; + }; }; -&phy_external { - status = "ok"; +ðernet { + gmac1: mac@1 { + status = "okay"; + phy-handle = <ðphy7>; + }; + + mdio-bus { + ethphy7: ethernet-phy@7 { + reg = <7>; + phy-mode = "rgmii-rxid"; + }; + }; +}; + +&switch0 { + ports { + port@0 { + status = "okay"; + label = "ethblack"; + }; + + port@4 { + status = "okay"; + label = "ethblue"; + }; + }; }; diff --git a/drivers/staging/mt7621-dts/mt7621.dtsi b/drivers/staging/mt7621-dts/mt7621.dtsi index 27222f7b246fd970d982be1c483a87aa6838b7ac..91a7fa74829643d898dcb515ce1caeceabf7b411 100644 --- a/drivers/staging/mt7621-dts/mt7621.dtsi +++ b/drivers/staging/mt7621-dts/mt7621.dtsi @@ -56,9 +56,9 @@ regulator-max-microvolt = <3300000>; enable-active-high; regulator-always-on; - }; + }; - mmc_fixed_1v8_io: fixedregulator@1 { + mmc_fixed_1v8_io: fixedregulator@1 { compatible = "regulator-fixed"; regulator-name = "mmc_io"; regulator-min-microvolt = <1800000>; @@ -412,37 +412,32 @@ mediatek,ethsys = <ðsys>; + pinctrl-names = "default"; + pinctrl-0 = <&mdio_pins>, <&rgmii1_pins>, <&rgmii2_pins>; gmac0: mac@0 { compatible = "mediatek,eth-mac"; reg = <0>; phy-mode = "rgmii"; + fixed-link { speed = <1000>; full-duplex; pause; }; }; + gmac1: mac@1 { compatible = "mediatek,eth-mac"; reg = <1>; status = "off"; phy-mode = "rgmii-rxid"; - phy-handle = <&phy_external>; }; + mdio-bus { #address-cells = <1>; #size-cells = <0>; - phy_external: ethernet-phy@5 { - status = "off"; - reg = <5>; - phy-mode = "rgmii-rxid"; - - pinctrl-names = "default"; - pinctrl-0 = <&rgmii2_pins>; - }; - switch0: switch0@0 { compatible = "mediatek,mt7621"; #address-cells = <1>; @@ -456,36 +451,43 @@ #address-cells = <1>; #size-cells = <0>; reg = <0>; + port@0 { status = "off"; reg = <0>; label = "lan0"; }; + port@1 { status = "off"; reg = <1>; label = "lan1"; }; + port@2 { status = "off"; reg = <2>; label = "lan2"; }; + port@3 { status = "off"; reg = <3>; label = "lan3"; }; + port@4 { status = "off"; reg = <4>; label = "lan4"; }; + port@6 { reg = <6>; label = "cpu"; ethernet = <&gmac0>; phy-mode = "trgmii"; + fixed-link { speed = <1000>; full-duplex; diff --git a/drivers/staging/rtl8192e/rtllib.h b/drivers/staging/rtl8192e/rtllib.h index 4cabaf21c1ca051b28296b27f1aff8322d0955cc..367db4acc78523ae8b198346a9ff626cee0ead5c 100644 --- a/drivers/staging/rtl8192e/rtllib.h +++ b/drivers/staging/rtl8192e/rtllib.h @@ -1982,7 +1982,7 @@ void rtllib_softmac_xmit(struct rtllib_txb *txb, struct rtllib_device *ieee); void rtllib_stop_send_beacons(struct rtllib_device *ieee); void notify_wx_assoc_event(struct rtllib_device *ieee); void rtllib_start_ibss(struct rtllib_device *ieee); -void rtllib_softmac_init(struct rtllib_device *ieee); +int rtllib_softmac_init(struct rtllib_device *ieee); void rtllib_softmac_free(struct rtllib_device *ieee); void rtllib_disassociate(struct rtllib_device *ieee); void rtllib_stop_scan(struct rtllib_device *ieee); diff --git a/drivers/staging/rtl8192e/rtllib_module.c b/drivers/staging/rtl8192e/rtllib_module.c index 64d9feee1f392d33fb575336289570e565caa300..f00ac94b2639b4ca467c693cfe5be87771c5813b 100644 --- a/drivers/staging/rtl8192e/rtllib_module.c +++ b/drivers/staging/rtl8192e/rtllib_module.c @@ -88,7 +88,7 @@ struct net_device *alloc_rtllib(int sizeof_priv) err = rtllib_networks_allocate(ieee); if (err) { pr_err("Unable to allocate beacon storage: %d\n", err); - goto failed; + goto free_netdev; } rtllib_networks_initialize(ieee); @@ -121,11 +121,13 @@ struct net_device *alloc_rtllib(int sizeof_priv) ieee->hwsec_active = 0; memset(ieee->swcamtable, 0, sizeof(struct sw_cam_table) * 32); - rtllib_softmac_init(ieee); + err = rtllib_softmac_init(ieee); + if (err) + goto free_crypt_info; ieee->pHTInfo = kzalloc(sizeof(struct rt_hi_throughput), GFP_KERNEL); if (!ieee->pHTInfo) - return NULL; + goto free_softmac; HTUpdateDefaultSetting(ieee); HTInitializeHTInfo(ieee); @@ -141,8 +143,14 @@ struct net_device *alloc_rtllib(int sizeof_priv) return dev; - failed: +free_softmac: + rtllib_softmac_free(ieee); +free_crypt_info: + lib80211_crypt_info_free(&ieee->crypt_info); + rtllib_networks_free(ieee); +free_netdev: free_netdev(dev); + return NULL; } EXPORT_SYMBOL(alloc_rtllib); diff --git a/drivers/staging/rtl8192e/rtllib_softmac.c b/drivers/staging/rtl8192e/rtllib_softmac.c index 2c752ba5a802a275fe36e1be366eb9974ac86a18..e8e72f79ca00790671ef024b59ab359f1db6620b 100644 --- a/drivers/staging/rtl8192e/rtllib_softmac.c +++ b/drivers/staging/rtl8192e/rtllib_softmac.c @@ -2953,7 +2953,7 @@ void rtllib_start_protocol(struct rtllib_device *ieee) } } -void rtllib_softmac_init(struct rtllib_device *ieee) +int rtllib_softmac_init(struct rtllib_device *ieee) { int i; @@ -2964,7 +2964,8 @@ void rtllib_softmac_init(struct rtllib_device *ieee) ieee->seq_ctrl[i] = 0; ieee->dot11d_info = kzalloc(sizeof(struct rt_dot11d_info), GFP_ATOMIC); if (!ieee->dot11d_info) - netdev_err(ieee->dev, "Can't alloc memory for DOT11D\n"); + return -ENOMEM; + ieee->LinkDetectInfo.SlotIndex = 0; ieee->LinkDetectInfo.SlotNum = 2; ieee->LinkDetectInfo.NumRecvBcnInPeriod = 0; @@ -3030,6 +3031,7 @@ void rtllib_softmac_init(struct rtllib_device *ieee) tasklet_setup(&ieee->ps_task, rtllib_sta_ps); + return 0; } void rtllib_softmac_free(struct rtllib_device *ieee) diff --git a/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c b/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c index 4df6d04315e39d3f456fee1e6e6c7c8412f1eed1..b912ad2f4b720f64091e2a7e1da32d913baab95c 100644 --- a/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c +++ b/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c @@ -6679,6 +6679,7 @@ u8 chk_bmc_sleepq_hdl(struct adapter *padapter, unsigned char *pbuf) struct sta_info *psta_bmc; struct list_head *xmitframe_plist, *xmitframe_phead; struct xmit_frame *pxmitframe = NULL; + struct xmit_priv *pxmitpriv = &padapter->xmitpriv; struct sta_priv *pstapriv = &padapter->stapriv; /* for BC/MC Frames */ @@ -6689,7 +6690,8 @@ u8 chk_bmc_sleepq_hdl(struct adapter *padapter, unsigned char *pbuf) if ((pstapriv->tim_bitmap&BIT(0)) && (psta_bmc->sleepq_len > 0)) { msleep(10);/* 10ms, ATIM(HIQ) Windows */ - spin_lock_bh(&psta_bmc->sleep_q.lock); + /* spin_lock_bh(&psta_bmc->sleep_q.lock); */ + spin_lock_bh(&pxmitpriv->lock); xmitframe_phead = get_list_head(&psta_bmc->sleep_q); xmitframe_plist = get_next(xmitframe_phead); @@ -6715,7 +6717,8 @@ u8 chk_bmc_sleepq_hdl(struct adapter *padapter, unsigned char *pbuf) rtw_hal_xmitframe_enqueue(padapter, pxmitframe); } - spin_unlock_bh(&psta_bmc->sleep_q.lock); + /* spin_unlock_bh(&psta_bmc->sleep_q.lock); */ + spin_unlock_bh(&pxmitpriv->lock); /* check hi queue and bmc_sleepq */ rtw_chk_hi_queue_cmd(padapter); diff --git a/drivers/staging/rtl8723bs/core/rtw_recv.c b/drivers/staging/rtl8723bs/core/rtw_recv.c index 0d47e6e121777c72185d423209b6c51624e0de77..6979f8dbccb84920b85fa199110438cd5b09660a 100644 --- a/drivers/staging/rtl8723bs/core/rtw_recv.c +++ b/drivers/staging/rtl8723bs/core/rtw_recv.c @@ -1144,8 +1144,10 @@ sint validate_recv_ctrl_frame(struct adapter *padapter, union recv_frame *precv_ if ((psta->state&WIFI_SLEEP_STATE) && (pstapriv->sta_dz_bitmap&BIT(psta->aid))) { struct list_head *xmitframe_plist, *xmitframe_phead; struct xmit_frame *pxmitframe = NULL; + struct xmit_priv *pxmitpriv = &padapter->xmitpriv; - spin_lock_bh(&psta->sleep_q.lock); + /* spin_lock_bh(&psta->sleep_q.lock); */ + spin_lock_bh(&pxmitpriv->lock); xmitframe_phead = get_list_head(&psta->sleep_q); xmitframe_plist = get_next(xmitframe_phead); @@ -1180,10 +1182,12 @@ sint validate_recv_ctrl_frame(struct adapter *padapter, union recv_frame *precv_ update_beacon(padapter, _TIM_IE_, NULL, true); } - spin_unlock_bh(&psta->sleep_q.lock); + /* spin_unlock_bh(&psta->sleep_q.lock); */ + spin_unlock_bh(&pxmitpriv->lock); } else { - spin_unlock_bh(&psta->sleep_q.lock); + /* spin_unlock_bh(&psta->sleep_q.lock); */ + spin_unlock_bh(&pxmitpriv->lock); /* DBG_871X("no buffered packets to xmit\n"); */ if (pstapriv->tim_bitmap&BIT(psta->aid)) { diff --git a/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c b/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c index b1784b4e466f3a44b06234468a5a8f97a3eb3ec8..e3f56c6cc882e816a660c4ba914a8a83d3d5b8e2 100644 --- a/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c +++ b/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c @@ -330,48 +330,46 @@ u32 rtw_free_stainfo(struct adapter *padapter, struct sta_info *psta) /* list_del_init(&psta->wakeup_list); */ - spin_lock_bh(&psta->sleep_q.lock); + spin_lock_bh(&pxmitpriv->lock); + rtw_free_xmitframe_queue(pxmitpriv, &psta->sleep_q); psta->sleepq_len = 0; - spin_unlock_bh(&psta->sleep_q.lock); - - spin_lock_bh(&pxmitpriv->lock); /* vo */ - spin_lock_bh(&pstaxmitpriv->vo_q.sta_pending.lock); + /* spin_lock_bh(&(pxmitpriv->vo_pending.lock)); */ rtw_free_xmitframe_queue(pxmitpriv, &pstaxmitpriv->vo_q.sta_pending); list_del_init(&(pstaxmitpriv->vo_q.tx_pending)); phwxmit = pxmitpriv->hwxmits; phwxmit->accnt -= pstaxmitpriv->vo_q.qcnt; pstaxmitpriv->vo_q.qcnt = 0; - spin_unlock_bh(&pstaxmitpriv->vo_q.sta_pending.lock); + /* spin_unlock_bh(&(pxmitpriv->vo_pending.lock)); */ /* vi */ - spin_lock_bh(&pstaxmitpriv->vi_q.sta_pending.lock); + /* spin_lock_bh(&(pxmitpriv->vi_pending.lock)); */ rtw_free_xmitframe_queue(pxmitpriv, &pstaxmitpriv->vi_q.sta_pending); list_del_init(&(pstaxmitpriv->vi_q.tx_pending)); phwxmit = pxmitpriv->hwxmits+1; phwxmit->accnt -= pstaxmitpriv->vi_q.qcnt; pstaxmitpriv->vi_q.qcnt = 0; - spin_unlock_bh(&pstaxmitpriv->vi_q.sta_pending.lock); + /* spin_unlock_bh(&(pxmitpriv->vi_pending.lock)); */ /* be */ - spin_lock_bh(&pstaxmitpriv->be_q.sta_pending.lock); + /* spin_lock_bh(&(pxmitpriv->be_pending.lock)); */ rtw_free_xmitframe_queue(pxmitpriv, &pstaxmitpriv->be_q.sta_pending); list_del_init(&(pstaxmitpriv->be_q.tx_pending)); phwxmit = pxmitpriv->hwxmits+2; phwxmit->accnt -= pstaxmitpriv->be_q.qcnt; pstaxmitpriv->be_q.qcnt = 0; - spin_unlock_bh(&pstaxmitpriv->be_q.sta_pending.lock); + /* spin_unlock_bh(&(pxmitpriv->be_pending.lock)); */ /* bk */ - spin_lock_bh(&pstaxmitpriv->bk_q.sta_pending.lock); + /* spin_lock_bh(&(pxmitpriv->bk_pending.lock)); */ rtw_free_xmitframe_queue(pxmitpriv, &pstaxmitpriv->bk_q.sta_pending); list_del_init(&(pstaxmitpriv->bk_q.tx_pending)); phwxmit = pxmitpriv->hwxmits+3; phwxmit->accnt -= pstaxmitpriv->bk_q.qcnt; pstaxmitpriv->bk_q.qcnt = 0; - spin_unlock_bh(&pstaxmitpriv->bk_q.sta_pending.lock); + /* spin_unlock_bh(&(pxmitpriv->bk_pending.lock)); */ spin_unlock_bh(&pxmitpriv->lock); diff --git a/drivers/staging/rtl8723bs/core/rtw_xmit.c b/drivers/staging/rtl8723bs/core/rtw_xmit.c index d78cff7ed6a01a321c85a799c8d9dcc8dbdd1587..6ecaff9728fd4831001d9327c32f6ac917bc13f3 100644 --- a/drivers/staging/rtl8723bs/core/rtw_xmit.c +++ b/drivers/staging/rtl8723bs/core/rtw_xmit.c @@ -1871,6 +1871,8 @@ void rtw_free_xmitframe_queue(struct xmit_priv *pxmitpriv, struct __queue *pfram struct list_head *plist, *phead; struct xmit_frame *pxmitframe; + spin_lock_bh(&pframequeue->lock); + phead = get_list_head(pframequeue); plist = get_next(phead); @@ -1881,6 +1883,7 @@ void rtw_free_xmitframe_queue(struct xmit_priv *pxmitpriv, struct __queue *pfram rtw_free_xmitframe(pxmitpriv, pxmitframe); } + spin_unlock_bh(&pframequeue->lock); } s32 rtw_xmitframe_enqueue(struct adapter *padapter, struct xmit_frame *pxmitframe) @@ -1943,7 +1946,6 @@ s32 rtw_xmit_classifier(struct adapter *padapter, struct xmit_frame *pxmitframe) struct sta_info *psta; struct tx_servq *ptxservq; struct pkt_attrib *pattrib = &pxmitframe->attrib; - struct xmit_priv *xmit_priv = &padapter->xmitpriv; struct hw_xmit *phwxmits = padapter->xmitpriv.hwxmits; sint res = _SUCCESS; @@ -1972,14 +1974,12 @@ s32 rtw_xmit_classifier(struct adapter *padapter, struct xmit_frame *pxmitframe) ptxservq = rtw_get_sta_pending(padapter, psta, pattrib->priority, (u8 *)(&ac_index)); - spin_lock_bh(&xmit_priv->lock); if (list_empty(&ptxservq->tx_pending)) list_add_tail(&ptxservq->tx_pending, get_list_head(phwxmits[ac_index].sta_queue)); list_add_tail(&pxmitframe->list, get_list_head(&ptxservq->sta_pending)); ptxservq->qcnt++; phwxmits[ac_index].accnt++; - spin_unlock_bh(&xmit_priv->lock); exit: @@ -2397,10 +2397,11 @@ void wakeup_sta_to_xmit(struct adapter *padapter, struct sta_info *psta) struct list_head *xmitframe_plist, *xmitframe_phead; struct xmit_frame *pxmitframe = NULL; struct sta_priv *pstapriv = &padapter->stapriv; + struct xmit_priv *pxmitpriv = &padapter->xmitpriv; psta_bmc = rtw_get_bcmc_stainfo(padapter); - spin_lock_bh(&psta->sleep_q.lock); + spin_lock_bh(&pxmitpriv->lock); xmitframe_phead = get_list_head(&psta->sleep_q); xmitframe_plist = get_next(xmitframe_phead); @@ -2508,7 +2509,7 @@ void wakeup_sta_to_xmit(struct adapter *padapter, struct sta_info *psta) _exit: - spin_unlock_bh(&psta->sleep_q.lock); + spin_unlock_bh(&pxmitpriv->lock); if (update_mask) update_beacon(padapter, _TIM_IE_, NULL, true); @@ -2520,8 +2521,9 @@ void xmit_delivery_enabled_frames(struct adapter *padapter, struct sta_info *pst struct list_head *xmitframe_plist, *xmitframe_phead; struct xmit_frame *pxmitframe = NULL; struct sta_priv *pstapriv = &padapter->stapriv; + struct xmit_priv *pxmitpriv = &padapter->xmitpriv; - spin_lock_bh(&psta->sleep_q.lock); + spin_lock_bh(&pxmitpriv->lock); xmitframe_phead = get_list_head(&psta->sleep_q); xmitframe_plist = get_next(xmitframe_phead); @@ -2577,7 +2579,7 @@ void xmit_delivery_enabled_frames(struct adapter *padapter, struct sta_info *pst } } - spin_unlock_bh(&psta->sleep_q.lock); + spin_unlock_bh(&pxmitpriv->lock); } void enqueue_pending_xmitbuf( diff --git a/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c b/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c index ce5bf2861d0c14b7c25fe7711a487da6f5c89bd4..44799c4a9f35b95b13bab83264f330c69d4e15f3 100644 --- a/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c +++ b/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c @@ -572,7 +572,9 @@ s32 rtl8723bs_hal_xmit( rtw_issue_addbareq_cmd(padapter, pxmitframe); } + spin_lock_bh(&pxmitpriv->lock); err = rtw_xmitframe_enqueue(padapter, pxmitframe); + spin_unlock_bh(&pxmitpriv->lock); if (err != _SUCCESS) { RT_TRACE(_module_hal_xmit_c_, _drv_err_, ("rtl8723bs_hal_xmit: enqueue xmitframe fail\n")); rtw_free_xmitframe(pxmitpriv, pxmitframe); diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c index 38b10fd5d992134e58ee9f5fccf96c52c095076e..95b91fe45cb38afeb9c5c1a8d914963c02fe8e75 100644 --- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c +++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c @@ -2280,6 +2280,9 @@ void vchiq_msg_queue_push(unsigned int handle, struct vchiq_header *header) struct vchiq_service *service = find_service_by_handle(handle); int pos; + if (!service) + return; + while (service->msg_queue_write == service->msg_queue_read + VCHIQ_MAX_SLOTS) { if (wait_for_completion_interruptible(&service->msg_queue_pop)) @@ -2299,6 +2302,9 @@ struct vchiq_header *vchiq_msg_hold(unsigned int handle) struct vchiq_header *header; int pos; + if (!service) + return NULL; + if (service->msg_queue_write == service->msg_queue_read) return NULL; diff --git a/drivers/staging/wfx/main.c b/drivers/staging/wfx/main.c index e7bc1988124a828cb8049a03e538069275a83efb..d5dacd5583c6efa2bfb2936f5f45a2d1632bd8f7 100644 --- a/drivers/staging/wfx/main.c +++ b/drivers/staging/wfx/main.c @@ -309,7 +309,8 @@ struct wfx_dev *wfx_init_common(struct device *dev, wdev->pdata.gpio_wakeup = devm_gpiod_get_optional(dev, "wakeup", GPIOD_OUT_LOW); if (IS_ERR(wdev->pdata.gpio_wakeup)) - return NULL; + goto err; + if (wdev->pdata.gpio_wakeup) gpiod_set_consumer_name(wdev->pdata.gpio_wakeup, "wfx wakeup"); @@ -328,6 +329,10 @@ struct wfx_dev *wfx_init_common(struct device *dev, return NULL; return wdev; + +err: + ieee80211_free_hw(hw); + return NULL; } int wfx_probe(struct wfx_dev *wdev) diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c index f2a0e16b0318c2ae7dc4275e07147b993208def2..fac3f34d4a1f595d8be8b8100f664861a02526a1 100644 --- a/drivers/staging/wlan-ng/hfa384x_usb.c +++ b/drivers/staging/wlan-ng/hfa384x_usb.c @@ -3779,18 +3779,18 @@ static void hfa384x_usb_throttlefn(struct timer_list *t) spin_lock_irqsave(&hw->ctlxq.lock, flags); - /* - * We need to check BOTH the RX and the TX throttle controls, - * so we use the bitwise OR instead of the logical OR. - */ pr_debug("flags=0x%lx\n", hw->usb_flags); - if (!hw->wlandev->hwremoved && - ((test_and_clear_bit(THROTTLE_RX, &hw->usb_flags) && - !test_and_set_bit(WORK_RX_RESUME, &hw->usb_flags)) | - (test_and_clear_bit(THROTTLE_TX, &hw->usb_flags) && - !test_and_set_bit(WORK_TX_RESUME, &hw->usb_flags)) - )) { - schedule_work(&hw->usb_work); + if (!hw->wlandev->hwremoved) { + bool rx_throttle = test_and_clear_bit(THROTTLE_RX, &hw->usb_flags) && + !test_and_set_bit(WORK_RX_RESUME, &hw->usb_flags); + bool tx_throttle = test_and_clear_bit(THROTTLE_TX, &hw->usb_flags) && + !test_and_set_bit(WORK_TX_RESUME, &hw->usb_flags); + /* + * We need to check BOTH the RX and the TX throttle controls, + * so we use the bitwise OR instead of the logical OR. + */ + if (rx_throttle | tx_throttle) + schedule_work(&hw->usb_work); } spin_unlock_irqrestore(&hw->ctlxq.lock, flags); diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c index 8075f60fd02c337a5fcb0efc8ad74a066737cfb3..2d5cf1714ae05de28eb17ba66db9b5be12338e98 100644 --- a/drivers/target/iscsi/iscsi_target_tpg.c +++ b/drivers/target/iscsi/iscsi_target_tpg.c @@ -443,6 +443,9 @@ static bool iscsit_tpg_check_network_portal( break; } spin_unlock(&tpg->tpg_np_lock); + + if (match) + break; } spin_unlock(&tiqn->tiqn_tpg_lock); diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index c6950f157b99f8527b5f5f982577b53502e45cc9..c283e45ac300bd3a5f0692e5fd47d2d11d62738b 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c @@ -1676,6 +1676,7 @@ static struct page *tcmu_try_get_block_page(struct tcmu_dev *udev, uint32_t dbi) mutex_lock(&udev->cmdr_lock); page = tcmu_get_block_page(udev, dbi); if (likely(page)) { + get_page(page); mutex_unlock(&udev->cmdr_lock); return page; } @@ -1714,6 +1715,7 @@ static vm_fault_t tcmu_vma_fault(struct vm_fault *vmf) /* For the vmalloc()ed cmd area pages */ addr = (void *)(unsigned long)info->mem[mi].addr + offset; page = vmalloc_to_page(addr); + get_page(page); } else { uint32_t dbi; @@ -1724,7 +1726,6 @@ static vm_fault_t tcmu_vma_fault(struct vm_fault *vmf) return VM_FAULT_SIGBUS; } - get_page(page); vmf->page = page; return 0; } diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c index f255a96ae5a48ca6323b0d932348cdb86fe6379f..6ea80add7378fce0de69adecad0b611d38ce0247 100644 --- a/drivers/tee/optee/core.c +++ b/drivers/tee/optee/core.c @@ -588,6 +588,7 @@ static int optee_remove(struct platform_device *pdev) /* Unregister OP-TEE specific client devices on TEE bus */ optee_unregister_devices(); + teedev_close_context(optee->ctx); /* * Ask OP-TEE to free all cached shared memory objects to decrease * reference counters and also avoid wild pointers in secure world @@ -633,6 +634,7 @@ static int optee_probe(struct platform_device *pdev) struct optee *optee = NULL; void *memremaped_shm = NULL; struct tee_device *teedev; + struct tee_context *ctx; u32 sec_caps; int rc; @@ -719,6 +721,12 @@ static int optee_probe(struct platform_device *pdev) optee_supp_init(&optee->supp); optee->memremaped_shm = memremaped_shm; optee->pool = pool; + ctx = teedev_open(optee->teedev); + if (IS_ERR(ctx)) { + rc = PTR_ERR(ctx); + goto err; + } + optee->ctx = ctx; /* * Ensure that there are no pre-existing shm objects before enabling diff --git a/drivers/tee/optee/optee_private.h b/drivers/tee/optee/optee_private.h index f6bb4a763ba94e80f0463cc42957702ef47f1b93..ea09533e30cdef5b214043e801241618084cc38f 100644 --- a/drivers/tee/optee/optee_private.h +++ b/drivers/tee/optee/optee_private.h @@ -70,6 +70,7 @@ struct optee_supp { * struct optee - main service struct * @supp_teedev: supplicant device * @teedev: client device + * @ctx: driver internal TEE context * @invoke_fn: function to issue smc or hvc * @call_queue: queue of threads waiting to call @invoke_fn * @wait_queue: queue of threads from secure world waiting for a @@ -87,6 +88,7 @@ struct optee { struct tee_device *supp_teedev; struct tee_device *teedev; optee_invoke_fn *invoke_fn; + struct tee_context *ctx; struct optee_call_queue call_queue; struct optee_wait_queue wait_queue; struct optee_supp supp; diff --git a/drivers/tee/optee/rpc.c b/drivers/tee/optee/rpc.c index 9dbdd783d6f2d3b15c4bff071bc153f147ebcd58..f1e0332b0f6e8609d024f535fffb19252b8a2777 100644 --- a/drivers/tee/optee/rpc.c +++ b/drivers/tee/optee/rpc.c @@ -284,6 +284,7 @@ static struct tee_shm *cmd_alloc_suppl(struct tee_context *ctx, size_t sz) } static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx, + struct optee *optee, struct optee_msg_arg *arg, struct optee_call_ctx *call_ctx) { @@ -313,7 +314,8 @@ static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx, shm = cmd_alloc_suppl(ctx, sz); break; case OPTEE_MSG_RPC_SHM_TYPE_KERNEL: - shm = tee_shm_alloc(ctx, sz, TEE_SHM_MAPPED | TEE_SHM_PRIV); + shm = tee_shm_alloc(optee->ctx, sz, + TEE_SHM_MAPPED | TEE_SHM_PRIV); break; default: arg->ret = TEEC_ERROR_BAD_PARAMETERS; @@ -470,7 +472,7 @@ static void handle_rpc_func_cmd(struct tee_context *ctx, struct optee *optee, break; case OPTEE_MSG_RPC_CMD_SHM_ALLOC: free_pages_list(call_ctx); - handle_rpc_func_cmd_shm_alloc(ctx, arg, call_ctx); + handle_rpc_func_cmd_shm_alloc(ctx, optee, arg, call_ctx); break; case OPTEE_MSG_RPC_CMD_SHM_FREE: handle_rpc_func_cmd_shm_free(ctx, arg); @@ -501,7 +503,7 @@ void optee_handle_rpc(struct tee_context *ctx, struct optee_rpc_param *param, switch (OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0)) { case OPTEE_SMC_RPC_FUNC_ALLOC: - shm = tee_shm_alloc(ctx, param->a1, + shm = tee_shm_alloc(optee->ctx, param->a1, TEE_SHM_MAPPED | TEE_SHM_PRIV); if (!IS_ERR(shm) && !tee_shm_get_pa(shm, 0, &pa)) { reg_pair_from_64(¶m->a1, ¶m->a2, pa); diff --git a/drivers/tee/tee_core.c b/drivers/tee/tee_core.c index 480d294a23ab0f5ea27a1d0667435d8e6c1c641c..b615aca0023ed317c15cb1a116ca5c15d864b56b 100644 --- a/drivers/tee/tee_core.c +++ b/drivers/tee/tee_core.c @@ -43,7 +43,7 @@ static DEFINE_SPINLOCK(driver_lock); static struct class *tee_class; static dev_t tee_devt; -static struct tee_context *teedev_open(struct tee_device *teedev) +struct tee_context *teedev_open(struct tee_device *teedev) { int rc; struct tee_context *ctx; @@ -70,6 +70,7 @@ static struct tee_context *teedev_open(struct tee_device *teedev) return ERR_PTR(rc); } +EXPORT_SYMBOL_GPL(teedev_open); void teedev_ctx_get(struct tee_context *ctx) { @@ -96,11 +97,14 @@ void teedev_ctx_put(struct tee_context *ctx) kref_put(&ctx->refcount, teedev_ctx_release); } -static void teedev_close_context(struct tee_context *ctx) +void teedev_close_context(struct tee_context *ctx) { - tee_device_put(ctx->teedev); + struct tee_device *teedev = ctx->teedev; + teedev_ctx_put(ctx); + tee_device_put(teedev); } +EXPORT_SYMBOL_GPL(teedev_close_context); static int tee_open(struct inode *inode, struct file *filp) { diff --git a/drivers/thermal/imx8mm_thermal.c b/drivers/thermal/imx8mm_thermal.c index a1e4f9bb4cb01e7eb17538d1597759c8ec1ae557..0f4cabd2a8c6247f6ccd152cbec2f306d13efcb7 100644 --- a/drivers/thermal/imx8mm_thermal.c +++ b/drivers/thermal/imx8mm_thermal.c @@ -21,6 +21,7 @@ #define TPS 0x4 #define TRITSR 0x20 /* TMU immediate temp */ +#define TER_ADC_PD BIT(30) #define TER_EN BIT(31) #define TRITSR_TEMP0_VAL_MASK 0xff #define TRITSR_TEMP1_VAL_MASK 0xff0000 @@ -113,6 +114,8 @@ static void imx8mm_tmu_enable(struct imx8mm_tmu *tmu, bool enable) val = readl_relaxed(tmu->base + TER); val = enable ? (val | TER_EN) : (val & ~TER_EN); + if (tmu->socdata->version == TMU_VER2) + val = enable ? (val & ~TER_ADC_PD) : (val | TER_ADC_PD); writel_relaxed(val, tmu->base + TER); } diff --git a/drivers/thermal/imx_thermal.c b/drivers/thermal/imx_thermal.c index 2c7473d86a59b013b43872777dfa5d7011152e6c..16663373b6829999971f5210f27193131fbf3a37 100644 --- a/drivers/thermal/imx_thermal.c +++ b/drivers/thermal/imx_thermal.c @@ -15,6 +15,7 @@ #include #include #include +#include #define REG_SET 0x4 #define REG_CLR 0x8 @@ -194,6 +195,7 @@ static struct thermal_soc_data thermal_imx7d_data = { }; struct imx_thermal_data { + struct device *dev; struct cpufreq_policy *policy; struct thermal_zone_device *tz; struct thermal_cooling_device *cdev; @@ -252,44 +254,15 @@ static int imx_get_temp(struct thermal_zone_device *tz, int *temp) const struct thermal_soc_data *soc_data = data->socdata; struct regmap *map = data->tempmon; unsigned int n_meas; - bool wait, run_measurement; u32 val; + int ret; - run_measurement = !data->irq_enabled; - if (!run_measurement) { - /* Check if a measurement is currently in progress */ - regmap_read(map, soc_data->temp_data, &val); - wait = !(val & soc_data->temp_valid_mask); - } else { - /* - * Every time we measure the temperature, we will power on the - * temperature sensor, enable measurements, take a reading, - * disable measurements, power off the temperature sensor. - */ - regmap_write(map, soc_data->sensor_ctrl + REG_CLR, - soc_data->power_down_mask); - regmap_write(map, soc_data->sensor_ctrl + REG_SET, - soc_data->measure_temp_mask); - - wait = true; - } - - /* - * According to the temp sensor designers, it may require up to ~17us - * to complete a measurement. - */ - if (wait) - usleep_range(20, 50); + ret = pm_runtime_resume_and_get(data->dev); + if (ret < 0) + return ret; regmap_read(map, soc_data->temp_data, &val); - if (run_measurement) { - regmap_write(map, soc_data->sensor_ctrl + REG_CLR, - soc_data->measure_temp_mask); - regmap_write(map, soc_data->sensor_ctrl + REG_SET, - soc_data->power_down_mask); - } - if ((val & soc_data->temp_valid_mask) == 0) { dev_dbg(&tz->device, "temp measurement never finished\n"); return -EAGAIN; @@ -328,6 +301,8 @@ static int imx_get_temp(struct thermal_zone_device *tz, int *temp) enable_irq(data->irq); } + pm_runtime_put(data->dev); + return 0; } @@ -335,24 +310,16 @@ static int imx_change_mode(struct thermal_zone_device *tz, enum thermal_device_mode mode) { struct imx_thermal_data *data = tz->devdata; - struct regmap *map = data->tempmon; - const struct thermal_soc_data *soc_data = data->socdata; if (mode == THERMAL_DEVICE_ENABLED) { - regmap_write(map, soc_data->sensor_ctrl + REG_CLR, - soc_data->power_down_mask); - regmap_write(map, soc_data->sensor_ctrl + REG_SET, - soc_data->measure_temp_mask); + pm_runtime_get(data->dev); if (!data->irq_enabled) { data->irq_enabled = true; enable_irq(data->irq); } } else { - regmap_write(map, soc_data->sensor_ctrl + REG_CLR, - soc_data->measure_temp_mask); - regmap_write(map, soc_data->sensor_ctrl + REG_SET, - soc_data->power_down_mask); + pm_runtime_put(data->dev); if (data->irq_enabled) { disable_irq(data->irq); @@ -393,6 +360,11 @@ static int imx_set_trip_temp(struct thermal_zone_device *tz, int trip, int temp) { struct imx_thermal_data *data = tz->devdata; + int ret; + + ret = pm_runtime_resume_and_get(data->dev); + if (ret < 0) + return ret; /* do not allow changing critical threshold */ if (trip == IMX_TRIP_CRITICAL) @@ -406,6 +378,8 @@ static int imx_set_trip_temp(struct thermal_zone_device *tz, int trip, imx_set_alarm_temp(data, temp); + pm_runtime_put(data->dev); + return 0; } @@ -681,6 +655,8 @@ static int imx_thermal_probe(struct platform_device *pdev) if (!data) return -ENOMEM; + data->dev = &pdev->dev; + map = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, "fsl,tempmon"); if (IS_ERR(map)) { ret = PTR_ERR(map); @@ -800,6 +776,16 @@ static int imx_thermal_probe(struct platform_device *pdev) data->socdata->power_down_mask); regmap_write(map, data->socdata->sensor_ctrl + REG_SET, data->socdata->measure_temp_mask); + /* After power up, we need a delay before first access can be done. */ + usleep_range(20, 50); + + /* the core was configured and enabled just before */ + pm_runtime_set_active(&pdev->dev); + pm_runtime_enable(data->dev); + + ret = pm_runtime_resume_and_get(data->dev); + if (ret < 0) + goto disable_runtime_pm; data->irq_enabled = true; ret = thermal_zone_device_enable(data->tz); @@ -814,10 +800,15 @@ static int imx_thermal_probe(struct platform_device *pdev) goto thermal_zone_unregister; } + pm_runtime_put(data->dev); + return 0; thermal_zone_unregister: thermal_zone_device_unregister(data->tz); +disable_runtime_pm: + pm_runtime_put_noidle(data->dev); + pm_runtime_disable(data->dev); clk_disable: clk_disable_unprepare(data->thermal_clk); legacy_cleanup: @@ -829,13 +820,9 @@ static int imx_thermal_probe(struct platform_device *pdev) static int imx_thermal_remove(struct platform_device *pdev) { struct imx_thermal_data *data = platform_get_drvdata(pdev); - struct regmap *map = data->tempmon; - /* Disable measurements */ - regmap_write(map, data->socdata->sensor_ctrl + REG_SET, - data->socdata->power_down_mask); - if (!IS_ERR(data->thermal_clk)) - clk_disable_unprepare(data->thermal_clk); + pm_runtime_put_noidle(data->dev); + pm_runtime_disable(data->dev); thermal_zone_device_unregister(data->tz); imx_thermal_unregister_legacy_cooling(data); @@ -858,29 +845,79 @@ static int __maybe_unused imx_thermal_suspend(struct device *dev) ret = thermal_zone_device_disable(data->tz); if (ret) return ret; + + return pm_runtime_force_suspend(data->dev); +} + +static int __maybe_unused imx_thermal_resume(struct device *dev) +{ + struct imx_thermal_data *data = dev_get_drvdata(dev); + int ret; + + ret = pm_runtime_force_resume(data->dev); + if (ret) + return ret; + /* Enabled thermal sensor after resume */ + return thermal_zone_device_enable(data->tz); +} + +static int __maybe_unused imx_thermal_runtime_suspend(struct device *dev) +{ + struct imx_thermal_data *data = dev_get_drvdata(dev); + const struct thermal_soc_data *socdata = data->socdata; + struct regmap *map = data->tempmon; + int ret; + + ret = regmap_write(map, socdata->sensor_ctrl + REG_CLR, + socdata->measure_temp_mask); + if (ret) + return ret; + + ret = regmap_write(map, socdata->sensor_ctrl + REG_SET, + socdata->power_down_mask); + if (ret) + return ret; + clk_disable_unprepare(data->thermal_clk); return 0; } -static int __maybe_unused imx_thermal_resume(struct device *dev) +static int __maybe_unused imx_thermal_runtime_resume(struct device *dev) { struct imx_thermal_data *data = dev_get_drvdata(dev); + const struct thermal_soc_data *socdata = data->socdata; + struct regmap *map = data->tempmon; int ret; ret = clk_prepare_enable(data->thermal_clk); if (ret) return ret; - /* Enabled thermal sensor after resume */ - ret = thermal_zone_device_enable(data->tz); + + ret = regmap_write(map, socdata->sensor_ctrl + REG_CLR, + socdata->power_down_mask); + if (ret) + return ret; + + ret = regmap_write(map, socdata->sensor_ctrl + REG_SET, + socdata->measure_temp_mask); if (ret) return ret; + /* + * According to the temp sensor designers, it may require up to ~17us + * to complete a measurement. + */ + usleep_range(20, 50); + return 0; } -static SIMPLE_DEV_PM_OPS(imx_thermal_pm_ops, - imx_thermal_suspend, imx_thermal_resume); +static const struct dev_pm_ops imx_thermal_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(imx_thermal_suspend, imx_thermal_resume) + SET_RUNTIME_PM_OPS(imx_thermal_runtime_suspend, + imx_thermal_runtime_resume, NULL) +}; static struct platform_driver imx_thermal = { .driver = { diff --git a/drivers/thermal/intel/Kconfig b/drivers/thermal/intel/Kconfig index 8025b21f43fa541bad16f989611b1ca9f738c89e..876869d5731373d021a8f4bc7e49284ddfcd7ae2 100644 --- a/drivers/thermal/intel/Kconfig +++ b/drivers/thermal/intel/Kconfig @@ -8,6 +8,10 @@ config INTEL_POWERCLAMP enforce idle time which results in more package C-state residency. The user interface is exposed via generic thermal framework. +config X86_THERMAL_VECTOR + def_bool y + depends on X86 && CPU_SUP_INTEL && X86_LOCAL_APIC + config X86_PKG_TEMP_THERMAL tristate "X86 package temperature thermal driver" depends on X86_THERMAL_VECTOR @@ -75,3 +79,17 @@ config INTEL_PCH_THERMAL Enable this to support thermal reporting on certain intel PCHs. Thermal reporting device will provide temperature reading, programmable trip points and other information. + +config INTEL_HFI_THERMAL + bool "Intel Hardware Feedback Interface" + depends on NET + depends on CPU_SUP_INTEL + depends on X86_THERMAL_VECTOR + select THERMAL_NETLINK + help + Select this option to enable the Hardware Feedback Interface. If + selected, hardware provides guidance to the operating system on + the performance and energy efficiency capabilities of each CPU. + These capabilities may change as a result of changes in the operating + conditions of the system such power and thermal limits. If selected, + the kernel relays updates in CPUs' capabilities to userspace. diff --git a/drivers/thermal/intel/Makefile b/drivers/thermal/intel/Makefile index 0d9736ced5d4eb649abba63f2aecb9e6d6f572ba..ece3888f30afcf528187fa3375b6e0953e6842e8 100644 --- a/drivers/thermal/intel/Makefile +++ b/drivers/thermal/intel/Makefile @@ -10,3 +10,5 @@ obj-$(CONFIG_INTEL_QUARK_DTS_THERMAL) += intel_quark_dts_thermal.o obj-$(CONFIG_INT340X_THERMAL) += int340x_thermal/ obj-$(CONFIG_INTEL_BXT_PMIC_THERMAL) += intel_bxt_pmic_thermal.o obj-$(CONFIG_INTEL_PCH_THERMAL) += intel_pch_thermal.o +obj-$(CONFIG_X86_THERMAL_VECTOR) += therm_throt.o +obj-$(CONFIG_INTEL_HFI_THERMAL) += intel_hfi.o diff --git a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c index 0966551cbaaa0aad9259c39ee6a91b745a7af406..72a26867c2092fff21799ca3c89b4bd2633d7775 100644 --- a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c +++ b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c @@ -53,7 +53,7 @@ struct int3400_thermal_priv { struct art *arts; int trt_count; struct trt *trts; - u8 uuid_bitmap; + u32 uuid_bitmap; int rel_misc_dev_res; int current_uuid_index; char *data_vault; @@ -402,6 +402,10 @@ static void int3400_notify(acpi_handle handle, thermal_prop[3] = kasprintf(GFP_KERNEL, "EVENT=%d", therm_event); thermal_prop[4] = NULL; kobject_uevent_env(&priv->thermal->device.kobj, KOBJ_CHANGE, thermal_prop); + kfree(thermal_prop[0]); + kfree(thermal_prop[1]); + kfree(thermal_prop[2]); + kfree(thermal_prop[3]); } static int int3400_thermal_get_temp(struct thermal_zone_device *thermal, @@ -462,6 +466,11 @@ static void int3400_setup_gddv(struct int3400_thermal_priv *priv) priv->data_vault = kmemdup(obj->package.elements[0].buffer.pointer, obj->package.elements[0].buffer.length, GFP_KERNEL); + if (!priv->data_vault) { + kfree(buffer.pointer); + return; + } + bin_attr_data_vault.private = priv->data_vault; bin_attr_data_vault.size = obj->package.elements[0].buffer.length; kfree(buffer.pointer); diff --git a/drivers/thermal/intel/intel_hfi.c b/drivers/thermal/intel/intel_hfi.c new file mode 100644 index 0000000000000000000000000000000000000000..158a2b7c6b6f591adb9e555e9b9ed3fd5b981352 --- /dev/null +++ b/drivers/thermal/intel/intel_hfi.c @@ -0,0 +1,568 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Hardware Feedback Interface Driver + * + * Copyright (c) 2021, Intel Corporation. + * + * Authors: Aubrey Li + * Ricardo Neri + * + * + * The Hardware Feedback Interface provides a performance and energy efficiency + * capability information for each CPU in the system. Depending on the processor + * model, hardware may periodically update these capabilities as a result of + * changes in the operating conditions (e.g., power limits or thermal + * constraints). On other processor models, there is a single HFI update + * at boot. + * + * This file provides functionality to process HFI updates and relay these + * updates to userspace. + */ + +#define pr_fmt(fmt) "intel-hfi: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "../thermal_core.h" +#include "intel_hfi.h" + +#define THERM_STATUS_CLEAR_PKG_MASK (BIT(1) | BIT(3) | BIT(5) | BIT(7) | \ + BIT(9) | BIT(11) | BIT(26)) + +/* Hardware Feedback Interface MSR configuration bits */ +#define HW_FEEDBACK_PTR_VALID_BIT BIT(0) +#define HW_FEEDBACK_CONFIG_HFI_ENABLE_BIT BIT(0) + +/* CPUID detection and enumeration definitions for HFI */ + +#define CPUID_HFI_LEAF 6 + +union hfi_capabilities { + struct { + u8 performance:1; + u8 energy_efficiency:1; + u8 __reserved:6; + } split; + u8 bits; +}; + +union cpuid6_edx { + struct { + union hfi_capabilities capabilities; + u32 table_pages:4; + u32 __reserved:4; + s32 index:16; + } split; + u32 full; +}; + +/** + * struct hfi_cpu_data - HFI capabilities per CPU + * @perf_cap: Performance capability + * @ee_cap: Energy efficiency capability + * + * Capabilities of a logical processor in the HFI table. These capabilities are + * unitless. + */ +struct hfi_cpu_data { + u8 perf_cap; + u8 ee_cap; +} __packed; + +/** + * struct hfi_hdr - Header of the HFI table + * @perf_updated: Hardware updated performance capabilities + * @ee_updated: Hardware updated energy efficiency capabilities + * + * Properties of the data in an HFI table. + */ +struct hfi_hdr { + u8 perf_updated; + u8 ee_updated; +} __packed; + +/** + * struct hfi_instance - Representation of an HFI instance (i.e., a table) + * @local_table: Base of the local copy of the HFI table + * @timestamp: Timestamp of the last update of the local table. + * Located at the base of the local table. + * @hdr: Base address of the header of the local table + * @data: Base address of the data of the local table + * @cpus: CPUs represented in this HFI table instance + * @hw_table: Pointer to the HFI table of this instance + * @update_work: Delayed work to process HFI updates + * @table_lock: Lock to protect acceses to the table of this instance + * @event_lock: Lock to process HFI interrupts + * + * A set of parameters to parse and navigate a specific HFI table. + */ +struct hfi_instance { + union { + void *local_table; + u64 *timestamp; + }; + void *hdr; + void *data; + cpumask_var_t cpus; + void *hw_table; + struct delayed_work update_work; + raw_spinlock_t table_lock; + raw_spinlock_t event_lock; +}; + +/** + * struct hfi_features - Supported HFI features + * @nr_table_pages: Size of the HFI table in 4KB pages + * @cpu_stride: Stride size to locate the capability data of a logical + * processor within the table (i.e., row stride) + * @hdr_size: Size of the table header + * + * Parameters and supported features that are common to all HFI instances + */ +struct hfi_features { + unsigned int nr_table_pages; + unsigned int cpu_stride; + unsigned int hdr_size; +}; + +/** + * struct hfi_cpu_info - Per-CPU attributes to consume HFI data + * @index: Row of this CPU in its HFI table + * @hfi_instance: Attributes of the HFI table to which this CPU belongs + * + * Parameters to link a logical processor to an HFI table and a row within it. + */ +struct hfi_cpu_info { + s16 index; + struct hfi_instance *hfi_instance; +}; + +static DEFINE_PER_CPU(struct hfi_cpu_info, hfi_cpu_info) = { .index = -1 }; + +static int max_hfi_instances; +static struct hfi_instance *hfi_instances; + +static struct hfi_features hfi_features; +static DEFINE_MUTEX(hfi_instance_lock); + +static struct workqueue_struct *hfi_updates_wq; +#define HFI_UPDATE_INTERVAL HZ +#define HFI_MAX_THERM_NOTIFY_COUNT 16 + +static void get_hfi_caps(struct hfi_instance *hfi_instance, + struct thermal_genl_cpu_caps *cpu_caps) +{ + int cpu, i = 0; + + raw_spin_lock_irq(&hfi_instance->table_lock); + for_each_cpu(cpu, hfi_instance->cpus) { + struct hfi_cpu_data *caps; + s16 index; + + index = per_cpu(hfi_cpu_info, cpu).index; + caps = hfi_instance->data + index * hfi_features.cpu_stride; + cpu_caps[i].cpu = cpu; + + /* + * Scale performance and energy efficiency to + * the [0, 1023] interval that thermal netlink uses. + */ + cpu_caps[i].performance = caps->perf_cap << 2; + cpu_caps[i].efficiency = caps->ee_cap << 2; + + ++i; + } + raw_spin_unlock_irq(&hfi_instance->table_lock); +} + +/* + * Call update_capabilities() when there are changes in the HFI table. + */ +static void update_capabilities(struct hfi_instance *hfi_instance) +{ + struct thermal_genl_cpu_caps *cpu_caps; + int i = 0, cpu_count; + + /* CPUs may come online/offline while processing an HFI update. */ + mutex_lock(&hfi_instance_lock); + + cpu_count = cpumask_weight(hfi_instance->cpus); + + /* No CPUs to report in this hfi_instance. */ + if (!cpu_count) + goto out; + + cpu_caps = kcalloc(cpu_count, sizeof(*cpu_caps), GFP_KERNEL); + if (!cpu_caps) + goto out; + + get_hfi_caps(hfi_instance, cpu_caps); + + if (cpu_count < HFI_MAX_THERM_NOTIFY_COUNT) + goto last_cmd; + + /* Process complete chunks of HFI_MAX_THERM_NOTIFY_COUNT capabilities. */ + for (i = 0; + (i + HFI_MAX_THERM_NOTIFY_COUNT) <= cpu_count; + i += HFI_MAX_THERM_NOTIFY_COUNT) + thermal_genl_cpu_capability_event(HFI_MAX_THERM_NOTIFY_COUNT, + &cpu_caps[i]); + + cpu_count = cpu_count - i; + +last_cmd: + /* Process the remaining capabilities if any. */ + if (cpu_count) + thermal_genl_cpu_capability_event(cpu_count, &cpu_caps[i]); + + kfree(cpu_caps); +out: + mutex_unlock(&hfi_instance_lock); +} + +static void hfi_update_work_fn(struct work_struct *work) +{ + struct hfi_instance *hfi_instance; + + hfi_instance = container_of(to_delayed_work(work), struct hfi_instance, + update_work); + if (!hfi_instance) + return; + + update_capabilities(hfi_instance); +} + +void intel_hfi_process_event(__u64 pkg_therm_status_msr_val) +{ + struct hfi_instance *hfi_instance; + int cpu = smp_processor_id(); + struct hfi_cpu_info *info; + u64 new_timestamp; + + if (!pkg_therm_status_msr_val) + return; + + info = &per_cpu(hfi_cpu_info, cpu); + if (!info) + return; + + /* + * A CPU is linked to its HFI instance before the thermal vector in the + * local APIC is unmasked. Hence, info->hfi_instance cannot be NULL + * when receiving an HFI event. + */ + hfi_instance = info->hfi_instance; + if (unlikely(!hfi_instance)) { + pr_debug("Received event on CPU %d but instance was null", cpu); + return; + } + + /* + * On most systems, all CPUs in the package receive a package-level + * thermal interrupt when there is an HFI update. It is sufficient to + * let a single CPU to acknowledge the update and queue work to + * process it. The remaining CPUs can resume their work. + */ + if (!raw_spin_trylock(&hfi_instance->event_lock)) + return; + + /* Skip duplicated updates. */ + new_timestamp = *(u64 *)hfi_instance->hw_table; + if (*hfi_instance->timestamp == new_timestamp) { + raw_spin_unlock(&hfi_instance->event_lock); + return; + } + + raw_spin_lock(&hfi_instance->table_lock); + + /* + * Copy the updated table into our local copy. This includes the new + * timestamp. + */ + memcpy(hfi_instance->local_table, hfi_instance->hw_table, + hfi_features.nr_table_pages << PAGE_SHIFT); + + raw_spin_unlock(&hfi_instance->table_lock); + raw_spin_unlock(&hfi_instance->event_lock); + + /* + * Let hardware know that we are done reading the HFI table and it is + * free to update it again. + */ + pkg_therm_status_msr_val &= THERM_STATUS_CLEAR_PKG_MASK & + ~PACKAGE_THERM_STATUS_HFI_UPDATED; + wrmsrl(MSR_IA32_PACKAGE_THERM_STATUS, pkg_therm_status_msr_val); + + queue_delayed_work(hfi_updates_wq, &hfi_instance->update_work, + HFI_UPDATE_INTERVAL); +} + +static void init_hfi_cpu_index(struct hfi_cpu_info *info) +{ + union cpuid6_edx edx; + + /* Do not re-read @cpu's index if it has already been initialized. */ + if (info->index > -1) + return; + + edx.full = cpuid_edx(CPUID_HFI_LEAF); + info->index = edx.split.index; +} + +/* + * The format of the HFI table depends on the number of capabilities that the + * hardware supports. Keep a data structure to navigate the table. + */ +static void init_hfi_instance(struct hfi_instance *hfi_instance) +{ + /* The HFI header is below the time-stamp. */ + hfi_instance->hdr = hfi_instance->local_table + + sizeof(*hfi_instance->timestamp); + + /* The HFI data starts below the header. */ + hfi_instance->data = hfi_instance->hdr + hfi_features.hdr_size; +} + +/** + * intel_hfi_online() - Enable HFI on @cpu + * @cpu: CPU in which the HFI will be enabled + * + * Enable the HFI to be used in @cpu. The HFI is enabled at the die/package + * level. The first CPU in the die/package to come online does the full HFI + * initialization. Subsequent CPUs will just link themselves to the HFI + * instance of their die/package. + * + * This function is called before enabling the thermal vector in the local APIC + * in order to ensure that @cpu has an associated HFI instance when it receives + * an HFI event. + */ +void intel_hfi_online(unsigned int cpu) +{ + struct hfi_instance *hfi_instance; + struct hfi_cpu_info *info; + phys_addr_t hw_table_pa; + u64 msr_val; + u16 die_id; + + /* Nothing to do if hfi_instances are missing. */ + if (!hfi_instances) + return; + + /* + * Link @cpu to the HFI instance of its package/die. It does not + * matter whether the instance has been initialized. + */ + info = &per_cpu(hfi_cpu_info, cpu); + die_id = topology_logical_die_id(cpu); + hfi_instance = info->hfi_instance; + if (!hfi_instance) { + if (die_id < 0 || die_id >= max_hfi_instances) + return; + + hfi_instance = &hfi_instances[die_id]; + info->hfi_instance = hfi_instance; + } + + init_hfi_cpu_index(info); + + /* + * Now check if the HFI instance of the package/die of @cpu has been + * initialized (by checking its header). In such case, all we have to + * do is to add @cpu to this instance's cpumask. + */ + mutex_lock(&hfi_instance_lock); + if (hfi_instance->hdr) { + cpumask_set_cpu(cpu, hfi_instance->cpus); + goto unlock; + } + + /* + * Hardware is programmed with the physical address of the first page + * frame of the table. Hence, the allocated memory must be page-aligned. + */ + hfi_instance->hw_table = alloc_pages_exact(hfi_features.nr_table_pages, + GFP_KERNEL | __GFP_ZERO); + if (!hfi_instance->hw_table) + goto unlock; + + hw_table_pa = virt_to_phys(hfi_instance->hw_table); + + /* + * Allocate memory to keep a local copy of the table that + * hardware generates. + */ + hfi_instance->local_table = kzalloc(hfi_features.nr_table_pages << PAGE_SHIFT, + GFP_KERNEL); + if (!hfi_instance->local_table) + goto free_hw_table; + + /* + * Program the address of the feedback table of this die/package. On + * some processors, hardware remembers the old address of the HFI table + * even after having been reprogrammed and re-enabled. Thus, do not free + * the pages allocated for the table or reprogram the hardware with a + * new base address. Namely, program the hardware only once. + */ + msr_val = hw_table_pa | HW_FEEDBACK_PTR_VALID_BIT; + wrmsrl(MSR_IA32_HW_FEEDBACK_PTR, msr_val); + + init_hfi_instance(hfi_instance); + + INIT_DELAYED_WORK(&hfi_instance->update_work, hfi_update_work_fn); + raw_spin_lock_init(&hfi_instance->table_lock); + raw_spin_lock_init(&hfi_instance->event_lock); + + cpumask_set_cpu(cpu, hfi_instance->cpus); + + /* + * Enable the hardware feedback interface and never disable it. See + * comment on programming the address of the table. + */ + rdmsrl(MSR_IA32_HW_FEEDBACK_CONFIG, msr_val); + msr_val |= HW_FEEDBACK_CONFIG_HFI_ENABLE_BIT; + wrmsrl(MSR_IA32_HW_FEEDBACK_CONFIG, msr_val); + +unlock: + mutex_unlock(&hfi_instance_lock); + return; + +free_hw_table: + free_pages_exact(hfi_instance->hw_table, hfi_features.nr_table_pages); + goto unlock; +} + +/** + * intel_hfi_offline() - Disable HFI on @cpu + * @cpu: CPU in which the HFI will be disabled + * + * Remove @cpu from those covered by its HFI instance. + * + * On some processors, hardware remembers previous programming settings even + * after being reprogrammed. Thus, keep HFI enabled even if all CPUs in the + * die/package of @cpu are offline. See note in intel_hfi_online(). + */ +void intel_hfi_offline(unsigned int cpu) +{ + struct hfi_cpu_info *info = &per_cpu(hfi_cpu_info, cpu); + struct hfi_instance *hfi_instance; + + /* + * Check if @cpu as an associated, initialized (i.e., with a non-NULL + * header). Also, HFI instances are only initialized if X86_FEATURE_HFI + * is present. + */ + hfi_instance = info->hfi_instance; + if (!hfi_instance) + return; + + if (!hfi_instance->hdr) + return; + + mutex_lock(&hfi_instance_lock); + cpumask_clear_cpu(cpu, hfi_instance->cpus); + mutex_unlock(&hfi_instance_lock); +} + +static __init int hfi_parse_features(void) +{ + unsigned int nr_capabilities; + union cpuid6_edx edx; + + if (!boot_cpu_has(X86_FEATURE_HFI)) + return -ENODEV; + + /* + * If we are here we know that CPUID_HFI_LEAF exists. Parse the + * supported capabilities and the size of the HFI table. + */ + edx.full = cpuid_edx(CPUID_HFI_LEAF); + + if (!edx.split.capabilities.split.performance) { + pr_debug("Performance reporting not supported! Not using HFI\n"); + return -ENODEV; + } + + /* + * The number of supported capabilities determines the number of + * columns in the HFI table. Exclude the reserved bits. + */ + edx.split.capabilities.split.__reserved = 0; + nr_capabilities = hweight8(edx.split.capabilities.bits); + + /* The number of 4KB pages required by the table */ + hfi_features.nr_table_pages = edx.split.table_pages + 1; + + /* + * The header contains change indications for each supported feature. + * The size of the table header is rounded up to be a multiple of 8 + * bytes. + */ + hfi_features.hdr_size = DIV_ROUND_UP(nr_capabilities, 8) * 8; + + /* + * Data of each logical processor is also rounded up to be a multiple + * of 8 bytes. + */ + hfi_features.cpu_stride = DIV_ROUND_UP(nr_capabilities, 8) * 8; + + return 0; +} + +void __init intel_hfi_init(void) +{ + struct hfi_instance *hfi_instance; + int i, j; + + if (hfi_parse_features()) + return; + + /* There is one HFI instance per die/package. */ + max_hfi_instances = topology_max_packages() * + topology_max_die_per_package(); + + /* + * This allocation may fail. CPU hotplug callbacks must check + * for a null pointer. + */ + hfi_instances = kcalloc(max_hfi_instances, sizeof(*hfi_instances), + GFP_KERNEL); + if (!hfi_instances) + return; + + for (i = 0; i < max_hfi_instances; i++) { + hfi_instance = &hfi_instances[i]; + if (!zalloc_cpumask_var(&hfi_instance->cpus, GFP_KERNEL)) + goto err_nomem; + } + + hfi_updates_wq = create_singlethread_workqueue("hfi-updates"); + if (!hfi_updates_wq) + goto err_nomem; + + return; + +err_nomem: + for (j = 0; j < i; ++j) { + hfi_instance = &hfi_instances[j]; + free_cpumask_var(hfi_instance->cpus); + } + + kfree(hfi_instances); + hfi_instances = NULL; +} diff --git a/drivers/thermal/intel/intel_hfi.h b/drivers/thermal/intel/intel_hfi.h new file mode 100644 index 0000000000000000000000000000000000000000..325aa78b745cf9d233ac888548cd3e6aa3bbfe47 --- /dev/null +++ b/drivers/thermal/intel/intel_hfi.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _INTEL_HFI_H +#define _INTEL_HFI_H + +#if defined(CONFIG_INTEL_HFI_THERMAL) +void __init intel_hfi_init(void); +void intel_hfi_online(unsigned int cpu); +void intel_hfi_offline(unsigned int cpu); +void intel_hfi_process_event(__u64 pkg_therm_status_msr_val); +#else +static inline void intel_hfi_init(void) { } +static inline void intel_hfi_online(unsigned int cpu) { } +static inline void intel_hfi_offline(unsigned int cpu) { } +static inline void intel_hfi_process_event(__u64 pkg_therm_status_msr_val) { } +#endif /* CONFIG_INTEL_HFI_THERMAL */ + +#endif /* _INTEL_HFI_H */ diff --git a/arch/x86/kernel/cpu/mce/therm_throt.c b/drivers/thermal/intel/therm_throt.c similarity index 96% rename from arch/x86/kernel/cpu/mce/therm_throt.c rename to drivers/thermal/intel/therm_throt.c index a7cd2d203ceda64ebd711be273b97249921f1edf..8cc35a9f3862d8fb187c5530bfc43792eabfa8f9 100644 --- a/arch/x86/kernel/cpu/mce/therm_throt.c +++ b/drivers/thermal/intel/therm_throt.c @@ -26,13 +26,14 @@ #include #include +#include #include #include -#include +#include #include -#include -#include "internal.h" +#include "intel_hfi.h" +#include "thermal_interrupt.h" /* How long to wait between reporting thermal events */ #define CHECK_INTERVAL (300 * HZ) @@ -475,6 +476,13 @@ static int thermal_throttle_online(unsigned int cpu) INIT_DELAYED_WORK(&state->package_throttle.therm_work, throttle_active_work); INIT_DELAYED_WORK(&state->core_throttle.therm_work, throttle_active_work); + /* + * The first CPU coming online will enable the HFI. Usually this causes + * hardware to issue an HFI thermal interrupt. Such interrupt will reach + * the CPU once we enable the thermal vector in the local APIC. + */ + intel_hfi_online(cpu); + /* Unmask the thermal vector after the above workqueues are initialized. */ l = apic_read(APIC_LVTTHMR); apic_write(APIC_LVTTHMR, l & ~APIC_LVT_MASKED); @@ -492,6 +500,8 @@ static int thermal_throttle_offline(unsigned int cpu) l = apic_read(APIC_LVTTHMR); apic_write(APIC_LVTTHMR, l | APIC_LVT_MASKED); + intel_hfi_offline(cpu); + cancel_delayed_work_sync(&state->package_throttle.therm_work); cancel_delayed_work_sync(&state->core_throttle.therm_work); @@ -509,6 +519,8 @@ static __init int thermal_throttle_init_device(void) if (!atomic_read(&therm_throt_en)) return 0; + intel_hfi_init(); + ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/therm:online", thermal_throttle_online, thermal_throttle_offline); @@ -570,7 +582,7 @@ static void notify_thresholds(__u64 msr_val) } /* Thermal transition interrupt handler */ -static void intel_thermal_interrupt(void) +void intel_thermal_interrupt(void) { __u64 msr_val; @@ -603,24 +615,11 @@ static void intel_thermal_interrupt(void) PACKAGE_THERM_STATUS_POWER_LIMIT, POWER_LIMIT_EVENT, PACKAGE_LEVEL); - } -} - -static void unexpected_thermal_interrupt(void) -{ - pr_err("CPU%d: Unexpected LVT thermal interrupt!\n", - smp_processor_id()); -} - -static void (*smp_thermal_vector)(void) = unexpected_thermal_interrupt; -DEFINE_IDTENTRY_SYSVEC(sysvec_thermal) -{ - trace_thermal_apic_entry(THERMAL_APIC_VECTOR); - inc_irq_stat(irq_thermal_count); - smp_thermal_vector(); - trace_thermal_apic_exit(THERMAL_APIC_VECTOR); - ack_APIC_irq(); + if (this_cpu_has(X86_FEATURE_HFI)) + intel_hfi_process_event(msr_val & + PACKAGE_THERM_STATUS_HFI_UPDATED); + } } /* Thermal monitoring depends on APIC, ACPI and clock modulation */ @@ -633,15 +632,9 @@ static int intel_thermal_supported(struct cpuinfo_x86 *c) return 1; } -void __init mcheck_intel_therm_init(void) +bool x86_thermal_enabled(void) { - /* - * This function is only called on boot CPU. Save the init thermal - * LVT value on BSP and use that value to restore APs' thermal LVT - * entry BIOS programmed later - */ - if (intel_thermal_supported(&boot_cpu_data)) - lvtthmr_init = apic_read(APIC_LVTTHMR); + return atomic_read(&therm_throt_en); } void intel_init_thermal(struct cpuinfo_x86 *c) @@ -653,6 +646,10 @@ void intel_init_thermal(struct cpuinfo_x86 *c) if (!intel_thermal_supported(c)) return; + /* On the BSP? */ + if (c == &boot_cpu_data) + lvtthmr_init = apic_read(APIC_LVTTHMR); + /* * First check if its enabled already, in which case there might * be some SMM goo which handles it, so we can't even put a handler @@ -724,9 +721,13 @@ void intel_init_thermal(struct cpuinfo_x86 *c) wrmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT, l | (PACKAGE_THERM_INT_LOW_ENABLE | PACKAGE_THERM_INT_HIGH_ENABLE), h); - } - smp_thermal_vector = intel_thermal_interrupt; + if (cpu_has(c, X86_FEATURE_HFI)) { + rdmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h); + wrmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT, + l | PACKAGE_THERM_INT_HFI_ENABLE, h); + } + } rdmsr(MSR_IA32_MISC_ENABLE, l, h); wrmsr(MSR_IA32_MISC_ENABLE, l | MSR_IA32_MISC_ENABLE_TM1, h); diff --git a/drivers/thermal/intel/thermal_interrupt.h b/drivers/thermal/intel/thermal_interrupt.h new file mode 100644 index 0000000000000000000000000000000000000000..53f427bb58dcea9b0c51bc872e16b21b45040ee8 --- /dev/null +++ b/drivers/thermal/intel/thermal_interrupt.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _INTEL_THERMAL_INTERRUPT_H +#define _INTEL_THERMAL_INTERRUPT_H + +/* Interrupt Handler for package thermal thresholds */ +extern int (*platform_thermal_package_notify)(__u64 msr_val); + +/* Interrupt Handler for core thermal thresholds */ +extern int (*platform_thermal_notify)(__u64 msr_val); + +/* Callback support of rate control, return true, if + * callback has rate control */ +extern bool (*platform_thermal_package_rate_control)(void); + +#endif /* _INTEL_THERMAL_INTERRUPT_H */ diff --git a/drivers/thermal/intel/x86_pkg_temp_thermal.c b/drivers/thermal/intel/x86_pkg_temp_thermal.c index 4f5d97329ee3299d0a29b7c827d6c003dea82b1e..e734929b2cd0958e47f2c92c84df8e8109d703c9 100644 --- a/drivers/thermal/intel/x86_pkg_temp_thermal.c +++ b/drivers/thermal/intel/x86_pkg_temp_thermal.c @@ -17,9 +17,9 @@ #include #include #include -#include -#include +#include +#include "thermal_interrupt.h" /* * Rate control delay: Idea is to introduce denounce effect * This should be long enough to avoid reduce events, when diff --git a/drivers/thermal/thermal_netlink.c b/drivers/thermal/thermal_netlink.c index 1234dbe95895112ccba9364512df7679c4cff499..dc535831b6609204f00b909c4a08a3338aba4a8b 100644 --- a/drivers/thermal/thermal_netlink.c +++ b/drivers/thermal/thermal_netlink.c @@ -43,6 +43,11 @@ static const struct nla_policy thermal_genl_policy[THERMAL_GENL_ATTR_MAX + 1] = [THERMAL_GENL_ATTR_CDEV_MAX_STATE] = { .type = NLA_U32 }, [THERMAL_GENL_ATTR_CDEV_NAME] = { .type = NLA_STRING, .len = THERMAL_NAME_LENGTH }, + /* CPU capabilities */ + [THERMAL_GENL_ATTR_CPU_CAPABILITY] = { .type = NLA_NESTED }, + [THERMAL_GENL_ATTR_CPU_CAPABILITY_ID] = { .type = NLA_U32 }, + [THERMAL_GENL_ATTR_CPU_CAPABILITY_PERFORMANCE] = { .type = NLA_U32 }, + [THERMAL_GENL_ATTR_CPU_CAPABILITY_EFFICIENCY] = { .type = NLA_U32 }, }; struct param { @@ -58,6 +63,8 @@ struct param { int temp; int cdev_state; int cdev_max_state; + struct thermal_genl_cpu_caps *cpu_capabilities; + int cpu_capabilities_count; }; typedef int (*cb_t)(struct param *); @@ -189,6 +196,42 @@ static int thermal_genl_event_gov_change(struct param *p) return 0; } +static int thermal_genl_event_cpu_capability_change(struct param *p) +{ + struct thermal_genl_cpu_caps *cpu_cap = p->cpu_capabilities; + struct sk_buff *msg = p->msg; + struct nlattr *start_cap; + int i; + + start_cap = nla_nest_start(msg, THERMAL_GENL_ATTR_CPU_CAPABILITY); + if (!start_cap) + return -EMSGSIZE; + + for (i = 0; i < p->cpu_capabilities_count; ++i) { + if (nla_put_u32(msg, THERMAL_GENL_ATTR_CPU_CAPABILITY_ID, + cpu_cap->cpu)) + goto out_cancel_nest; + + if (nla_put_u32(msg, THERMAL_GENL_ATTR_CPU_CAPABILITY_PERFORMANCE, + cpu_cap->performance)) + goto out_cancel_nest; + + if (nla_put_u32(msg, THERMAL_GENL_ATTR_CPU_CAPABILITY_EFFICIENCY, + cpu_cap->efficiency)) + goto out_cancel_nest; + + ++cpu_cap; + } + + nla_nest_end(msg, start_cap); + + return 0; +out_cancel_nest: + nla_nest_cancel(msg, start_cap); + + return -EMSGSIZE; +} + int thermal_genl_event_tz_delete(struct param *p) __attribute__((alias("thermal_genl_event_tz"))); @@ -218,6 +261,7 @@ static cb_t event_cb[] = { [THERMAL_GENL_EVENT_CDEV_DELETE] = thermal_genl_event_cdev_delete, [THERMAL_GENL_EVENT_CDEV_STATE_UPDATE] = thermal_genl_event_cdev_state_update, [THERMAL_GENL_EVENT_TZ_GOV_CHANGE] = thermal_genl_event_gov_change, + [THERMAL_GENL_EVENT_CPU_CAPABILITY_CHANGE] = thermal_genl_event_cpu_capability_change, }; /* @@ -355,6 +399,15 @@ int thermal_notify_tz_gov_change(int tz_id, const char *name) return thermal_genl_send_event(THERMAL_GENL_EVENT_TZ_GOV_CHANGE, &p); } +int thermal_genl_cpu_capability_event(int count, + struct thermal_genl_cpu_caps *caps) +{ + struct param p = { .cpu_capabilities_count = count, .cpu_capabilities = caps }; + + return thermal_genl_send_event(THERMAL_GENL_EVENT_CPU_CAPABILITY_CHANGE, &p); +} +EXPORT_SYMBOL_GPL(thermal_genl_cpu_capability_event); + /*************************** Command encoding ********************************/ static int __thermal_genl_cmd_tz_get_id(struct thermal_zone_device *tz, @@ -418,11 +471,12 @@ static int thermal_genl_cmd_tz_get_trip(struct param *p) for (i = 0; i < tz->trips; i++) { enum thermal_trip_type type; - int temp, hyst; + int temp, hyst = 0; tz->ops->get_trip_type(tz, i, &type); tz->ops->get_trip_temp(tz, i, &temp); - tz->ops->get_trip_hyst(tz, i, &hyst); + if (tz->ops->get_trip_hyst) + tz->ops->get_trip_hyst(tz, i, &hyst); if (nla_put_u32(msg, THERMAL_GENL_ATTR_TZ_TRIP_ID, i) || nla_put_u32(msg, THERMAL_GENL_ATTR_TZ_TRIP_TYPE, type) || diff --git a/drivers/thermal/thermal_netlink.h b/drivers/thermal/thermal_netlink.h index 828d1dddfa98b5677628748542bea40c7d7e71bd..fd4188470d3f84b210cc8498cc6d99cce82e4e14 100644 --- a/drivers/thermal/thermal_netlink.h +++ b/drivers/thermal/thermal_netlink.h @@ -4,6 +4,12 @@ * Author: Daniel Lezcano */ +struct thermal_genl_cpu_caps { + int cpu; + int performance; + int efficiency; +}; + /* Netlink notification function */ #ifdef CONFIG_THERMAL_NETLINK int __init thermal_netlink_init(void); @@ -23,6 +29,8 @@ int thermal_notify_cdev_add(int cdev_id, const char *name, int max_state); int thermal_notify_cdev_delete(int cdev_id); int thermal_notify_tz_gov_change(int tz_id, const char *name); int thermal_genl_sampling_temp(int id, int temp); +int thermal_genl_cpu_capability_event(int count, + struct thermal_genl_cpu_caps *caps); #else static inline int thermal_netlink_init(void) { @@ -101,4 +109,10 @@ static inline int thermal_genl_sampling_temp(int id, int temp) { return 0; } + +static inline int thermal_genl_cpu_capability_event(int count, struct thermal_genl_cpu_caps *caps) +{ + return 0; +} + #endif /* CONFIG_THERMAL_NETLINK */ diff --git a/drivers/thunderbolt/acpi.c b/drivers/thunderbolt/acpi.c index b5442f979b4d0170950d9ded3d3dbe9e3066d6da..6355fdf7d71a3eef91b5422401bbe176bfb8e4e0 100644 --- a/drivers/thunderbolt/acpi.c +++ b/drivers/thunderbolt/acpi.c @@ -7,6 +7,7 @@ */ #include +#include #include "tb.h" @@ -74,8 +75,18 @@ static acpi_status tb_acpi_add_link(acpi_handle handle, u32 level, void *data, pci_pcie_type(pdev) == PCI_EXP_TYPE_DOWNSTREAM))) { const struct device_link *link; + /* + * Make them both active first to make sure the NHI does + * not runtime suspend before the consumer. The + * pm_runtime_put() below then allows the consumer to + * runtime suspend again (which then allows NHI runtime + * suspend too now that the device link is established). + */ + pm_runtime_get_sync(&pdev->dev); + link = device_link_add(&pdev->dev, &nhi->pdev->dev, DL_FLAG_AUTOREMOVE_SUPPLIER | + DL_FLAG_RPM_ACTIVE | DL_FLAG_PM_RUNTIME); if (link) { dev_dbg(&nhi->pdev->dev, "created link from %s\n", @@ -84,6 +95,8 @@ static acpi_status tb_acpi_add_link(acpi_handle handle, u32 level, void *data, dev_warn(&nhi->pdev->dev, "device link creation from %s failed\n", dev_name(&pdev->dev)); } + + pm_runtime_put(&pdev->dev); } out_put: diff --git a/drivers/tty/hvc/hvc_iucv.c b/drivers/tty/hvc/hvc_iucv.c index 2af1e5751bd6302463a397cedd68bdccb23d8313..796fbff623f6e9fbe0c27290922cd0bebd689ca5 100644 --- a/drivers/tty/hvc/hvc_iucv.c +++ b/drivers/tty/hvc/hvc_iucv.c @@ -1470,7 +1470,9 @@ static int __init hvc_iucv_init(void) */ static int __init hvc_iucv_config(char *val) { - return kstrtoul(val, 10, &hvc_iucv_devices); + if (kstrtoul(val, 10, &hvc_iucv_devices)) + pr_warn("hvc_iucv= invalid parameter value '%s'\n", val); + return 1; } diff --git a/drivers/tty/mxser.c b/drivers/tty/mxser.c index 3703987c46661396bbf3c276a4ce295e3157ca0b..8344265a1948bf78acc66f836f79e75cd7d33f93 100644 --- a/drivers/tty/mxser.c +++ b/drivers/tty/mxser.c @@ -858,6 +858,7 @@ static int mxser_activate(struct tty_port *port, struct tty_struct *tty) struct mxser_port *info = container_of(port, struct mxser_port, port); unsigned long page; unsigned long flags; + int ret; page = __get_free_page(GFP_KERNEL); if (!page) @@ -867,9 +868,9 @@ static int mxser_activate(struct tty_port *port, struct tty_struct *tty) if (!info->ioaddr || !info->type) { set_bit(TTY_IO_ERROR, &tty->flags); - free_page(page); spin_unlock_irqrestore(&info->slock, flags); - return 0; + ret = 0; + goto err_free_xmit; } info->port.xmit_buf = (unsigned char *) page; @@ -895,8 +896,10 @@ static int mxser_activate(struct tty_port *port, struct tty_struct *tty) if (capable(CAP_SYS_ADMIN)) { set_bit(TTY_IO_ERROR, &tty->flags); return 0; - } else - return -ENODEV; + } + + ret = -ENODEV; + goto err_free_xmit; } /* @@ -941,6 +944,10 @@ static int mxser_activate(struct tty_port *port, struct tty_struct *tty) spin_unlock_irqrestore(&info->slock, flags); return 0; +err_free_xmit: + free_page(page); + info->port.xmit_buf = NULL; + return ret; } /* diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c index d76880ae68c83be740ee77277955ed43986df9af..05562b3cca451a9bf5549a9edb6c3e9530653797 100644 --- a/drivers/tty/n_gsm.c +++ b/drivers/tty/n_gsm.c @@ -317,6 +317,7 @@ static struct tty_driver *gsm_tty_driver; #define GSM1_ESCAPE_BITS 0x20 #define XON 0x11 #define XOFF 0x13 +#define ISO_IEC_646_MASK 0x7F static const struct tty_port_operations gsm_port_ops; @@ -433,7 +434,7 @@ static u8 gsm_encode_modem(const struct gsm_dlci *dlci) modembits |= MDM_RTR; if (dlci->modem_tx & TIOCM_RI) modembits |= MDM_IC; - if (dlci->modem_tx & TIOCM_CD) + if (dlci->modem_tx & TIOCM_CD || dlci->gsm->initiator) modembits |= MDM_DV; return modembits; } @@ -526,7 +527,8 @@ static int gsm_stuff_frame(const u8 *input, u8 *output, int len) int olen = 0; while (len--) { if (*input == GSM1_SOF || *input == GSM1_ESCAPE - || *input == XON || *input == XOFF) { + || (*input & ISO_IEC_646_MASK) == XON + || (*input & ISO_IEC_646_MASK) == XOFF) { *output++ = GSM1_ESCAPE; *output++ = *input++ ^ GSM1_ESCAPE_BITS; olen++; @@ -1424,6 +1426,9 @@ static void gsm_dlci_close(struct gsm_dlci *dlci) if (dlci->addr != 0) { tty_port_tty_hangup(&dlci->port, false); kfifo_reset(&dlci->fifo); + /* Ensure that gsmtty_open() can return. */ + tty_port_set_initialized(&dlci->port, 0); + wake_up_interruptible(&dlci->port.open_wait); } else dlci->gsm->dead = true; wake_up(&dlci->gsm->event); @@ -1483,7 +1488,7 @@ static void gsm_dlci_t1(struct timer_list *t) dlci->mode = DLCI_MODE_ADM; gsm_dlci_open(dlci); } else { - gsm_dlci_close(dlci); + gsm_dlci_begin_close(dlci); /* prevent half open link */ } break; @@ -1717,7 +1722,12 @@ static void gsm_dlci_release(struct gsm_dlci *dlci) gsm_destroy_network(dlci); mutex_unlock(&dlci->mutex); - tty_hangup(tty); + /* We cannot use tty_hangup() because in tty_kref_put() the tty + * driver assumes that the hangup queue is free and reuses it to + * queue release_one_tty() -> NULL pointer panic in + * process_one_work(). + */ + tty_vhangup(tty); tty_port_tty_set(&dlci->port, NULL); tty_kref_put(tty); @@ -3171,9 +3181,9 @@ static void gsmtty_throttle(struct tty_struct *tty) if (dlci->state == DLCI_CLOSED) return; if (C_CRTSCTS(tty)) - dlci->modem_tx &= ~TIOCM_DTR; + dlci->modem_tx &= ~TIOCM_RTS; dlci->throttled = true; - /* Send an MSC with DTR cleared */ + /* Send an MSC with RTS cleared */ gsmtty_modem_update(dlci, 0); } @@ -3183,9 +3193,9 @@ static void gsmtty_unthrottle(struct tty_struct *tty) if (dlci->state == DLCI_CLOSED) return; if (C_CRTSCTS(tty)) - dlci->modem_tx |= TIOCM_DTR; + dlci->modem_tx |= TIOCM_RTS; dlci->throttled = false; - /* Send an MSC with DTR set */ + /* Send an MSC with RTS set */ gsmtty_modem_update(dlci, 0); } diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c index e4f4b2186bcecce1467d37c1221ac1353610a882..58190135efb7dd3088f9895cdceb1e01a646d7c8 100644 --- a/drivers/tty/n_tty.c +++ b/drivers/tty/n_tty.c @@ -1372,7 +1372,7 @@ n_tty_receive_char_special(struct tty_struct *tty, unsigned char c) put_tty_queue(c, ldata); smp_store_release(&ldata->canon_head, ldata->read_head); kill_fasync(&tty->fasync, SIGIO, POLL_IN); - wake_up_interruptible_poll(&tty->read_wait, EPOLLIN); + wake_up_interruptible_poll(&tty->read_wait, EPOLLIN | EPOLLRDNORM); return 0; } } @@ -1653,7 +1653,7 @@ static void __receive_buf(struct tty_struct *tty, const unsigned char *cp, if (read_cnt(ldata)) { kill_fasync(&tty->fasync, SIGIO, POLL_IN); - wake_up_interruptible_poll(&tty->read_wait, EPOLLIN); + wake_up_interruptible_poll(&tty->read_wait, EPOLLIN | EPOLLRDNORM); } } @@ -2024,7 +2024,7 @@ static bool canon_copy_from_read_buf(struct tty_struct *tty, return false; canon_head = smp_load_acquire(&ldata->canon_head); - n = min(*nr + 1, canon_head - ldata->read_tail); + n = min(*nr, canon_head - ldata->read_tail); tail = ldata->read_tail & (N_TTY_BUF_SIZE - 1); size = min_t(size_t, tail + n, N_TTY_BUF_SIZE); @@ -2046,10 +2046,8 @@ static bool canon_copy_from_read_buf(struct tty_struct *tty, n += N_TTY_BUF_SIZE; c = n + found; - if (!found || read_buf(ldata, eol) != __DISABLED_CHAR) { - c = min(*nr, c); + if (!found || read_buf(ldata, eol) != __DISABLED_CHAR) n = c; - } n_tty_trace("%s: eol:%zu found:%d n:%zu c:%zu tail:%zu more:%zu\n", __func__, eol, found, n, c, tail, more); diff --git a/drivers/tty/serial/8250/8250_dma.c b/drivers/tty/serial/8250/8250_dma.c index 890fa7ddaa7f36650a1b69a8554e5567fd674f84..b3c3f7e5851aba3a17ccb7f80f18586c5aac4e4c 100644 --- a/drivers/tty/serial/8250/8250_dma.c +++ b/drivers/tty/serial/8250/8250_dma.c @@ -64,10 +64,19 @@ int serial8250_tx_dma(struct uart_8250_port *p) struct uart_8250_dma *dma = p->dma; struct circ_buf *xmit = &p->port.state->xmit; struct dma_async_tx_descriptor *desc; + struct uart_port *up = &p->port; int ret; - if (dma->tx_running) + if (dma->tx_running) { + if (up->x_char) { + dmaengine_pause(dma->txchan); + uart_xchar_out(up, UART_TX); + dmaengine_resume(dma->txchan); + } return 0; + } else if (up->x_char) { + uart_xchar_out(up, UART_TX); + } if (uart_tx_stopped(&p->port) || uart_circ_empty(xmit)) { /* We have been called from __dma_tx_complete() */ diff --git a/drivers/tty/serial/8250/8250_gsc.c b/drivers/tty/serial/8250/8250_gsc.c index 673cda3d011d0c64c0c71a19dfa3052447d21c4f..948d0a1c6ae8edc91eefaadb5037f4a311d12509 100644 --- a/drivers/tty/serial/8250/8250_gsc.c +++ b/drivers/tty/serial/8250/8250_gsc.c @@ -26,7 +26,7 @@ static int __init serial_init_chip(struct parisc_device *dev) unsigned long address; int err; -#ifdef CONFIG_64BIT +#if defined(CONFIG_64BIT) && defined(CONFIG_IOSAPIC) if (!dev->irq && (dev->id.sversion == 0xad)) dev->irq = iosapic_serial_irq(dev); #endif diff --git a/drivers/tty/serial/8250/8250_lpss.c b/drivers/tty/serial/8250/8250_lpss.c index 4dee8a9e0c9512ae65f39e8b44e3bf10e861db29..dfb730b7ea2ae7b20ab0e42ff49d4ff78cb302b8 100644 --- a/drivers/tty/serial/8250/8250_lpss.c +++ b/drivers/tty/serial/8250/8250_lpss.c @@ -121,8 +121,7 @@ static int byt_serial_setup(struct lpss8250 *lpss, struct uart_port *port) { struct dw_dma_slave *param = &lpss->dma_param; struct pci_dev *pdev = to_pci_dev(port->dev); - unsigned int dma_devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0); - struct pci_dev *dma_dev = pci_get_slot(pdev->bus, dma_devfn); + struct pci_dev *dma_dev; switch (pdev->device) { case PCI_DEVICE_ID_INTEL_BYT_UART1: @@ -141,6 +140,8 @@ static int byt_serial_setup(struct lpss8250 *lpss, struct uart_port *port) return -EINVAL; } + dma_dev = pci_get_slot(pdev->bus, PCI_DEVFN(PCI_SLOT(pdev->devfn), 0)); + param->dma_dev = &dma_dev->dev; param->m_master = 0; param->p_master = 1; @@ -156,11 +157,26 @@ static int byt_serial_setup(struct lpss8250 *lpss, struct uart_port *port) return 0; } +static void byt_serial_exit(struct lpss8250 *lpss) +{ + struct dw_dma_slave *param = &lpss->dma_param; + + /* Paired with pci_get_slot() in the byt_serial_setup() above */ + put_device(param->dma_dev); +} + static int ehl_serial_setup(struct lpss8250 *lpss, struct uart_port *port) { return 0; } +static void ehl_serial_exit(struct lpss8250 *lpss) +{ + struct uart_8250_port *up = serial8250_get_port(lpss->data.line); + + up->dma = NULL; +} + #ifdef CONFIG_SERIAL_8250_DMA static const struct dw_dma_platform_data qrk_serial_dma_pdata = { .nr_channels = 2, @@ -335,8 +351,7 @@ static int lpss8250_probe(struct pci_dev *pdev, const struct pci_device_id *id) return 0; err_exit: - if (lpss->board->exit) - lpss->board->exit(lpss); + lpss->board->exit(lpss); pci_free_irq_vectors(pdev); return ret; } @@ -347,8 +362,7 @@ static void lpss8250_remove(struct pci_dev *pdev) serial8250_unregister_port(lpss->data.line); - if (lpss->board->exit) - lpss->board->exit(lpss); + lpss->board->exit(lpss); pci_free_irq_vectors(pdev); } @@ -356,12 +370,14 @@ static const struct lpss8250_board byt_board = { .freq = 100000000, .base_baud = 2764800, .setup = byt_serial_setup, + .exit = byt_serial_exit, }; static const struct lpss8250_board ehl_board = { .freq = 200000000, .base_baud = 12500000, .setup = ehl_serial_setup, + .exit = ehl_serial_exit, }; static const struct lpss8250_board qrk_board = { diff --git a/drivers/tty/serial/8250/8250_mid.c b/drivers/tty/serial/8250/8250_mid.c index efa0515139f8ec052486a414f614519780ff96d3..e6c1791609ddf339427212a857eddcb1f98d8a10 100644 --- a/drivers/tty/serial/8250/8250_mid.c +++ b/drivers/tty/serial/8250/8250_mid.c @@ -73,6 +73,11 @@ static int pnw_setup(struct mid8250 *mid, struct uart_port *p) return 0; } +static void pnw_exit(struct mid8250 *mid) +{ + pci_dev_put(mid->dma_dev); +} + static int tng_handle_irq(struct uart_port *p) { struct mid8250 *mid = p->private_data; @@ -124,6 +129,11 @@ static int tng_setup(struct mid8250 *mid, struct uart_port *p) return 0; } +static void tng_exit(struct mid8250 *mid) +{ + pci_dev_put(mid->dma_dev); +} + static int dnv_handle_irq(struct uart_port *p) { struct mid8250 *mid = p->private_data; @@ -330,9 +340,9 @@ static int mid8250_probe(struct pci_dev *pdev, const struct pci_device_id *id) pci_set_drvdata(pdev, mid); return 0; + err: - if (mid->board->exit) - mid->board->exit(mid); + mid->board->exit(mid); return ret; } @@ -342,8 +352,7 @@ static void mid8250_remove(struct pci_dev *pdev) serial8250_unregister_port(mid->line); - if (mid->board->exit) - mid->board->exit(mid); + mid->board->exit(mid); } static const struct mid8250_board pnw_board = { @@ -351,6 +360,7 @@ static const struct mid8250_board pnw_board = { .freq = 50000000, .base_baud = 115200, .setup = pnw_setup, + .exit = pnw_exit, }; static const struct mid8250_board tng_board = { @@ -358,6 +368,7 @@ static const struct mid8250_board tng_board = { .freq = 38400000, .base_baud = 1843200, .setup = tng_setup, + .exit = tng_exit, }; static const struct mid8250_board dnv_board = { diff --git a/drivers/tty/serial/8250/8250_of.c b/drivers/tty/serial/8250/8250_of.c index 65e9045dafe6d16a38260a4ee4dc23dc1026ab28..5595c63c46eaf33bc4fd4d0ef0360a8f0b3b6ea7 100644 --- a/drivers/tty/serial/8250/8250_of.c +++ b/drivers/tty/serial/8250/8250_of.c @@ -83,8 +83,17 @@ static int of_platform_serial_setup(struct platform_device *ofdev, port->mapsize = resource_size(&resource); /* Check for shifted address mapping */ - if (of_property_read_u32(np, "reg-offset", &prop) == 0) + if (of_property_read_u32(np, "reg-offset", &prop) == 0) { + if (prop >= port->mapsize) { + dev_warn(&ofdev->dev, "reg-offset %u exceeds region size %pa\n", + prop, &port->mapsize); + ret = -EINVAL; + goto err_unprepare; + } + port->mapbase += prop; + port->mapsize -= prop; + } port->iotype = UPIO_MEM; if (of_property_read_u32(np, "reg-io-width", &prop) == 0) { diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c index 019328d644d8b5852b05bb9c8a6c0efc204cf6e3..3a985e953b8e904f69da97f163af86010e85c544 100644 --- a/drivers/tty/serial/8250/8250_pci.c +++ b/drivers/tty/serial/8250/8250_pci.c @@ -5171,8 +5171,30 @@ static const struct pci_device_id serial_pci_tbl[] = { { PCI_VENDOR_ID_INTASHIELD, PCI_DEVICE_ID_INTASHIELD_IS400, PCI_ANY_ID, PCI_ANY_ID, 0, 0, /* 135a.0dc0 */ pbn_b2_4_115200 }, + /* Brainboxes Devices */ /* - * BrainBoxes UC-260 + * Brainboxes UC-101 + */ + { PCI_VENDOR_ID_INTASHIELD, 0x0BA1, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_b2_2_115200 }, + /* + * Brainboxes UC-235/246 + */ + { PCI_VENDOR_ID_INTASHIELD, 0x0AA1, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_b2_1_115200 }, + /* + * Brainboxes UC-257 + */ + { PCI_VENDOR_ID_INTASHIELD, 0x0861, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_b2_2_115200 }, + /* + * Brainboxes UC-260/271/701/756 */ { PCI_VENDOR_ID_INTASHIELD, 0x0D21, PCI_ANY_ID, PCI_ANY_ID, @@ -5180,7 +5202,81 @@ static const struct pci_device_id serial_pci_tbl[] = { pbn_b2_4_115200 }, { PCI_VENDOR_ID_INTASHIELD, 0x0E34, PCI_ANY_ID, PCI_ANY_ID, - PCI_CLASS_COMMUNICATION_MULTISERIAL << 8, 0xffff00, + PCI_CLASS_COMMUNICATION_MULTISERIAL << 8, 0xffff00, + pbn_b2_4_115200 }, + /* + * Brainboxes UC-268 + */ + { PCI_VENDOR_ID_INTASHIELD, 0x0841, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_b2_4_115200 }, + /* + * Brainboxes UC-275/279 + */ + { PCI_VENDOR_ID_INTASHIELD, 0x0881, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_b2_8_115200 }, + /* + * Brainboxes UC-302 + */ + { PCI_VENDOR_ID_INTASHIELD, 0x08E1, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_b2_2_115200 }, + /* + * Brainboxes UC-310 + */ + { PCI_VENDOR_ID_INTASHIELD, 0x08C1, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_b2_2_115200 }, + /* + * Brainboxes UC-313 + */ + { PCI_VENDOR_ID_INTASHIELD, 0x08A3, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_b2_2_115200 }, + /* + * Brainboxes UC-320/324 + */ + { PCI_VENDOR_ID_INTASHIELD, 0x0A61, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_b2_1_115200 }, + /* + * Brainboxes UC-346 + */ + { PCI_VENDOR_ID_INTASHIELD, 0x0B02, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_b2_4_115200 }, + /* + * Brainboxes UC-357 + */ + { PCI_VENDOR_ID_INTASHIELD, 0x0A81, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_b2_2_115200 }, + { PCI_VENDOR_ID_INTASHIELD, 0x0A83, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_b2_2_115200 }, + /* + * Brainboxes UC-368 + */ + { PCI_VENDOR_ID_INTASHIELD, 0x0C41, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_b2_4_115200 }, + /* + * Brainboxes UC-420/431 + */ + { PCI_VENDOR_ID_INTASHIELD, 0x0921, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, pbn_b2_4_115200 }, /* * Perle PCI-RAS cards diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c index 7c07ebb37b1b9c07ff15a6f5f303ac493f7421dc..3055353514e1dd891f3cbe5278513d77311624bb 100644 --- a/drivers/tty/serial/8250/8250_port.c +++ b/drivers/tty/serial/8250/8250_port.c @@ -1620,6 +1620,18 @@ static inline void start_tx_rs485(struct uart_port *port) struct uart_8250_port *up = up_to_u8250p(port); struct uart_8250_em485 *em485 = up->em485; + /* + * While serial8250_em485_handle_stop_tx() is a noop if + * em485->active_timer != &em485->stop_tx_timer, it might happen that + * the timer is still armed and triggers only after the current bunch of + * chars is send and em485->active_timer == &em485->stop_tx_timer again. + * So cancel the timer. There is still a theoretical race condition if + * the timer is already running and only comes around to check for + * em485->active_timer when &em485->stop_tx_timer is armed again. + */ + if (em485->active_timer == &em485->stop_tx_timer) + hrtimer_try_to_cancel(&em485->stop_tx_timer); + em485->active_timer = NULL; if (em485->tx_stopped) { @@ -1805,9 +1817,7 @@ void serial8250_tx_chars(struct uart_8250_port *up) int count; if (port->x_char) { - serial_out(up, UART_TX, port->x_char); - port->icount.tx++; - port->x_char = 0; + uart_xchar_out(port, UART_TX); return; } if (uart_tx_stopped(port)) { diff --git a/drivers/tty/serial/amba-pl010.c b/drivers/tty/serial/amba-pl010.c index 3284f34e9dfe145036d60a16ba63351eb84ed15f..e538d6d75155e4840c420f5bf2a2007dcd8e8891 100644 --- a/drivers/tty/serial/amba-pl010.c +++ b/drivers/tty/serial/amba-pl010.c @@ -448,14 +448,11 @@ pl010_set_termios(struct uart_port *port, struct ktermios *termios, if ((termios->c_cflag & CREAD) == 0) uap->port.ignore_status_mask |= UART_DUMMY_RSR_RX; - /* first, disable everything */ old_cr = readb(uap->port.membase + UART010_CR) & ~UART010_CR_MSIE; if (UART_ENABLE_MS(port, termios->c_cflag)) old_cr |= UART010_CR_MSIE; - writel(0, uap->port.membase + UART010_CR); - /* Set baud rate */ quot -= 1; writel((quot & 0xf00) >> 8, uap->port.membase + UART010_LCRM); @@ -754,7 +751,7 @@ static int pl010_probe(struct amba_device *dev, const struct amba_id *id) return ret; } -static int pl010_remove(struct amba_device *dev) +static void pl010_remove(struct amba_device *dev) { struct uart_amba_port *uap = amba_get_drvdata(dev); int i; @@ -770,8 +767,6 @@ static int pl010_remove(struct amba_device *dev) if (!busy) uart_unregister_driver(&amba_reg); - - return 0; } #ifdef CONFIG_PM_SLEEP diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c index 4f2c9378931e8b358c575273e87510231904c529..90b370ee9a34ea8fc7f60d1f6a16d55b6ef35316 100644 --- a/drivers/tty/serial/amba-pl011.c +++ b/drivers/tty/serial/amba-pl011.c @@ -2152,32 +2152,13 @@ static const char *pl011_type(struct uart_port *port) return uap->port.type == PORT_AMBA ? uap->type : NULL; } -/* - * Release the memory region(s) being used by 'port' - */ -static void pl011_release_port(struct uart_port *port) -{ - release_mem_region(port->mapbase, SZ_4K); -} - -/* - * Request the memory region(s) being used by 'port' - */ -static int pl011_request_port(struct uart_port *port) -{ - return request_mem_region(port->mapbase, SZ_4K, "uart-pl011") - != NULL ? 0 : -EBUSY; -} - /* * Configure/autoconfigure the port. */ static void pl011_config_port(struct uart_port *port, int flags) { - if (flags & UART_CONFIG_TYPE) { + if (flags & UART_CONFIG_TYPE) port->type = PORT_AMBA; - pl011_request_port(port); - } } /* @@ -2192,6 +2173,8 @@ static int pl011_verify_port(struct uart_port *port, struct serial_struct *ser) ret = -EINVAL; if (ser->baud_base < 9600) ret = -EINVAL; + if (port->mapbase != (unsigned long) ser->iomem_base) + ret = -EINVAL; return ret; } @@ -2209,8 +2192,6 @@ static const struct uart_ops amba_pl011_pops = { .flush_buffer = pl011_dma_flush_buffer, .set_termios = pl011_set_termios, .type = pl011_type, - .release_port = pl011_release_port, - .request_port = pl011_request_port, .config_port = pl011_config_port, .verify_port = pl011_verify_port, #ifdef CONFIG_CONSOLE_POLL @@ -2240,8 +2221,6 @@ static const struct uart_ops sbsa_uart_pops = { .shutdown = sbsa_uart_shutdown, .set_termios = sbsa_uart_set_termios, .type = pl011_type, - .release_port = pl011_release_port, - .request_port = pl011_request_port, .config_port = pl011_config_port, .verify_port = pl011_verify_port, #ifdef CONFIG_CONSOLE_POLL @@ -2757,13 +2736,12 @@ static int pl011_probe(struct amba_device *dev, const struct amba_id *id) return pl011_register_port(uap); } -static int pl011_remove(struct amba_device *dev) +static void pl011_remove(struct amba_device *dev) { struct uart_amba_port *uap = amba_get_drvdata(dev); uart_remove_one_port(&amba_reg, &uap->port); pl011_unregister_port(uap); - return 0; } #ifdef CONFIG_PM_SLEEP diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c index a24e5c2b30bc956ab1f828b8fb702125e3259cd5..602065bfc9bb8766e9c16038dd10fabc90f5ae89 100644 --- a/drivers/tty/serial/atmel_serial.c +++ b/drivers/tty/serial/atmel_serial.c @@ -1004,6 +1004,13 @@ static void atmel_tx_dma(struct uart_port *port) desc->callback = atmel_complete_tx_dma; desc->callback_param = atmel_port; atmel_port->cookie_tx = dmaengine_submit(desc); + if (dma_submit_error(atmel_port->cookie_tx)) { + dev_err(port->dev, "dma_submit_error %d\n", + atmel_port->cookie_tx); + return; + } + + dma_async_issue_pending(chan); } if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) @@ -1264,6 +1271,13 @@ static int atmel_prepare_rx_dma(struct uart_port *port) desc->callback_param = port; atmel_port->desc_rx = desc; atmel_port->cookie_rx = dmaengine_submit(desc); + if (dma_submit_error(atmel_port->cookie_rx)) { + dev_err(port->dev, "dma_submit_error %d\n", + atmel_port->cookie_rx); + goto chan_err; + } + + dma_async_issue_pending(atmel_port->chan_rx); return 0; diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c index e40f647985b6240706dd8858b8898c6a3d05ce23..a95ec61f8266f2c5b6442b15e21c1fb1912d14aa 100644 --- a/drivers/tty/serial/imx.c +++ b/drivers/tty/serial/imx.c @@ -508,18 +508,21 @@ static void imx_uart_stop_tx(struct uart_port *port) static void imx_uart_stop_rx(struct uart_port *port) { struct imx_port *sport = (struct imx_port *)port; - u32 ucr1, ucr2; + u32 ucr1, ucr2, ucr4; ucr1 = imx_uart_readl(sport, UCR1); ucr2 = imx_uart_readl(sport, UCR2); + ucr4 = imx_uart_readl(sport, UCR4); if (sport->dma_is_enabled) { ucr1 &= ~(UCR1_RXDMAEN | UCR1_ATDMAEN); } else { ucr1 &= ~UCR1_RRDYEN; ucr2 &= ~UCR2_ATEN; + ucr4 &= ~UCR4_OREN; } imx_uart_writel(sport, ucr1, UCR1); + imx_uart_writel(sport, ucr4, UCR4); ucr2 &= ~UCR2_RXEN; imx_uart_writel(sport, ucr2, UCR2); @@ -1576,7 +1579,7 @@ static void imx_uart_shutdown(struct uart_port *port) imx_uart_writel(sport, ucr1, UCR1); ucr4 = imx_uart_readl(sport, UCR4); - ucr4 &= ~(UCR4_OREN | UCR4_TCEN); + ucr4 &= ~UCR4_TCEN; imx_uart_writel(sport, ucr4, UCR4); spin_unlock_irqrestore(&sport->port.lock, flags); diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c index 49d0c7f2b29b8f927915327025a84a555a73a7bd..79b7db8580e05c9240e447b83d42137fc7f3a8a9 100644 --- a/drivers/tty/serial/kgdboc.c +++ b/drivers/tty/serial/kgdboc.c @@ -403,16 +403,16 @@ static int kgdboc_option_setup(char *opt) { if (!opt) { pr_err("config string not provided\n"); - return -EINVAL; + return 1; } if (strlen(opt) >= MAX_CONFIG_LEN) { pr_err("config string too long\n"); - return -ENOSPC; + return 1; } strcpy(config, opt); - return 0; + return 1; } __setup("kgdboc=", kgdboc_option_setup); diff --git a/drivers/tty/serial/samsung_tty.c b/drivers/tty/serial/samsung_tty.c index 8ae3e03fbd8ce625742782950af00eebbf698c64..81faead3c4f80623d0d2a5728695983c4346222f 100644 --- a/drivers/tty/serial/samsung_tty.c +++ b/drivers/tty/serial/samsung_tty.c @@ -883,11 +883,8 @@ static irqreturn_t s3c24xx_serial_tx_chars(int irq, void *id) goto out; } - if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) { - spin_unlock(&port->lock); + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) uart_write_wakeup(port); - spin_lock(&port->lock); - } if (uart_circ_empty(xmit)) s3c24xx_serial_stop_tx(port); diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c index 9adb8362578c5d8c54a5c1a34061e5688eaf727d..04b4ed5d06341f76e96c6f6a7cf7ac2b4f4130ed 100644 --- a/drivers/tty/serial/sc16is7xx.c +++ b/drivers/tty/serial/sc16is7xx.c @@ -734,12 +734,15 @@ static irqreturn_t sc16is7xx_irq(int irq, void *dev_id) static void sc16is7xx_tx_proc(struct kthread_work *ws) { struct uart_port *port = &(to_sc16is7xx_one(ws, tx_work)->port); + struct sc16is7xx_port *s = dev_get_drvdata(port->dev); if ((port->rs485.flags & SER_RS485_ENABLED) && (port->rs485.delay_rts_before_send > 0)) msleep(port->rs485.delay_rts_before_send); + mutex_lock(&s->efr_lock); sc16is7xx_handle_tx(port); + mutex_unlock(&s->efr_lock); } static void sc16is7xx_reconf_rs485(struct uart_port *port) diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c index 046bedca7b8f550f79bca2010bc6768aae6186d6..19f0c5db11e3364c2716c6158f724cf8c2013e05 100644 --- a/drivers/tty/serial/serial_core.c +++ b/drivers/tty/serial/serial_core.c @@ -162,7 +162,7 @@ static void uart_port_dtr_rts(struct uart_port *uport, int raise) int RTS_after_send = !!(uport->rs485.flags & SER_RS485_RTS_AFTER_SEND); if (raise) { - if (rs485_on && !RTS_after_send) { + if (rs485_on && RTS_after_send) { uart_set_mctrl(uport, TIOCM_DTR); uart_clear_mctrl(uport, TIOCM_RTS); } else { @@ -171,7 +171,7 @@ static void uart_port_dtr_rts(struct uart_port *uport, int raise) } else { unsigned int clear = TIOCM_DTR; - clear |= (!rs485_on || !RTS_after_send) ? TIOCM_RTS : 0; + clear |= (!rs485_on || RTS_after_send) ? TIOCM_RTS : 0; uart_clear_mctrl(uport, clear); } } @@ -676,6 +676,20 @@ static void uart_flush_buffer(struct tty_struct *tty) tty_port_tty_wakeup(&state->port); } +/* + * This function performs low-level write of high-priority XON/XOFF + * character and accounting for it. + * + * Requires uart_port to implement .serial_out(). + */ +void uart_xchar_out(struct uart_port *uport, int offset) +{ + serial_port_out(uport, offset, uport->x_char); + uport->icount.tx++; + uport->x_char = 0; +} +EXPORT_SYMBOL_GPL(uart_xchar_out); + /* * This function is used to send a high-priority XON/XOFF character to * the device @@ -2414,7 +2428,8 @@ uart_configure_port(struct uart_driver *drv, struct uart_state *state, * We probably don't need a spinlock around this, but */ spin_lock_irqsave(&port->lock, flags); - port->ops->set_mctrl(port, port->mctrl & TIOCM_DTR); + port->mctrl &= TIOCM_DTR; + port->ops->set_mctrl(port, port->mctrl); spin_unlock_irqrestore(&port->lock, flags); /* diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c index 844059861f9e18057001610d9b16df591189ac52..6afae051ba8d117cc5170c03b556db514d60a4a2 100644 --- a/drivers/tty/serial/stm32-usart.c +++ b/drivers/tty/serial/stm32-usart.c @@ -420,10 +420,22 @@ static void stm32_usart_transmit_chars(struct uart_port *port) struct stm32_port *stm32_port = to_stm32_port(port); const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; struct circ_buf *xmit = &port->state->xmit; + u32 isr; + int ret; if (port->x_char) { if (stm32_port->tx_dma_busy) stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT); + + /* Check that TDR is empty before filling FIFO */ + ret = + readl_relaxed_poll_timeout_atomic(port->membase + ofs->isr, + isr, + (isr & USART_SR_TXE), + 10, 1000); + if (ret) + dev_warn(port->dev, "1 character may be erased\n"); + writel_relaxed(port->x_char, port->membase + ofs->tdr); port->x_char = 0; port->icount.tx++; @@ -574,7 +586,7 @@ static void stm32_usart_start_tx(struct uart_port *port) struct serial_rs485 *rs485conf = &port->rs485; struct circ_buf *xmit = &port->state->xmit; - if (uart_circ_empty(xmit)) + if (uart_circ_empty(xmit) && !port->x_char) return; if (rs485conf->flags & SER_RS485_ENABLED) { diff --git a/drivers/tty/serial/uartlite.c b/drivers/tty/serial/uartlite.c index 7081ab322b4028e1284d11391c88741f30470650..48923cd8c07d14d9de69e1ff88476423ea0607f8 100644 --- a/drivers/tty/serial/uartlite.c +++ b/drivers/tty/serial/uartlite.c @@ -615,7 +615,7 @@ static struct uart_driver ulite_uart_driver = { * * Returns: 0 on success, <0 otherwise */ -static int ulite_assign(struct device *dev, int id, u32 base, int irq, +static int ulite_assign(struct device *dev, int id, phys_addr_t base, int irq, struct uartlite_data *pdata) { struct uart_port *port; diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c index a7ee1171eeb3e2bb56508979dbe9a0bf67d38b36..0a6336d54a650a932b55d5019f27ca1e55fa91a5 100644 --- a/drivers/tty/vt/vt.c +++ b/drivers/tty/vt/vt.c @@ -4625,16 +4625,8 @@ static int con_font_get(struct vc_data *vc, struct console_font_op *op) if (op->data && font.charcount > op->charcount) rc = -ENOSPC; - if (!(op->flags & KD_FONT_FLAG_OLD)) { - if (font.width > op->width || font.height > op->height) - rc = -ENOSPC; - } else { - if (font.width != 8) - rc = -EIO; - else if ((op->height && font.height > op->height) || - font.height > 32) - rc = -ENOSPC; - } + if (font.width > op->width || font.height > op->height) + rc = -ENOSPC; if (rc) goto out; @@ -4662,7 +4654,7 @@ static int con_font_set(struct vc_data *vc, struct console_font_op *op) return -EINVAL; if (op->charcount > 512) return -EINVAL; - if (op->width <= 0 || op->width > 32 || op->height > 32) + if (op->width <= 0 || op->width > 32 || !op->height || op->height > 32) return -EINVAL; size = (op->width+7)/8 * 32 * op->charcount; if (size > max_font_size) @@ -4672,31 +4664,6 @@ static int con_font_set(struct vc_data *vc, struct console_font_op *op) if (IS_ERR(font.data)) return PTR_ERR(font.data); - if (!op->height) { /* Need to guess font height [compat] */ - int h, i; - u8 *charmap = font.data; - - /* - * If from KDFONTOP ioctl, don't allow things which can be done - * in userland,so that we can get rid of this soon - */ - if (!(op->flags & KD_FONT_FLAG_OLD)) { - kfree(font.data); - return -EINVAL; - } - - for (h = 32; h > 0; h--) - for (i = 0; i < op->charcount; i++) - if (charmap[32*i+h-1]) - goto nonzero; - - kfree(font.data); - return -EINVAL; - - nonzero: - op->height = h; - } - font.charcount = op->charcount; font.width = op->width; font.height = op->height; diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c index 90e4fcd3dc39a932888fa18497b184c2fdc6c0d1..b10b86e2c17e92ad484b95df1a8299ca872f46f1 100644 --- a/drivers/tty/vt/vt_ioctl.c +++ b/drivers/tty/vt/vt_ioctl.c @@ -486,70 +486,6 @@ static int vt_k_ioctl(struct tty_struct *tty, unsigned int cmd, return 0; } -static inline int do_fontx_ioctl(struct vc_data *vc, int cmd, - struct consolefontdesc __user *user_cfd, - struct console_font_op *op) -{ - struct consolefontdesc cfdarg; - int i; - - if (copy_from_user(&cfdarg, user_cfd, sizeof(struct consolefontdesc))) - return -EFAULT; - - switch (cmd) { - case PIO_FONTX: - op->op = KD_FONT_OP_SET; - op->flags = KD_FONT_FLAG_OLD; - op->width = 8; - op->height = cfdarg.charheight; - op->charcount = cfdarg.charcount; - op->data = cfdarg.chardata; - return con_font_op(vc, op); - - case GIO_FONTX: - op->op = KD_FONT_OP_GET; - op->flags = KD_FONT_FLAG_OLD; - op->width = 8; - op->height = cfdarg.charheight; - op->charcount = cfdarg.charcount; - op->data = cfdarg.chardata; - i = con_font_op(vc, op); - if (i) - return i; - cfdarg.charheight = op->height; - cfdarg.charcount = op->charcount; - if (copy_to_user(user_cfd, &cfdarg, sizeof(struct consolefontdesc))) - return -EFAULT; - return 0; - } - return -EINVAL; -} - -static int vt_io_fontreset(struct vc_data *vc, struct console_font_op *op) -{ - int ret; - - if (__is_defined(BROKEN_GRAPHICS_PROGRAMS)) { - /* - * With BROKEN_GRAPHICS_PROGRAMS defined, the default font is - * not saved. - */ - return -ENOSYS; - } - - op->op = KD_FONT_OP_SET_DEFAULT; - op->data = NULL; - ret = con_font_op(vc, op); - if (ret) - return ret; - - console_lock(); - con_set_default_unimap(vc); - console_unlock(); - - return 0; -} - static inline int do_unimap_ioctl(int cmd, struct unimapdesc __user *user_ud, bool perm, struct vc_data *vc) { @@ -574,29 +510,7 @@ static inline int do_unimap_ioctl(int cmd, struct unimapdesc __user *user_ud, static int vt_io_ioctl(struct vc_data *vc, unsigned int cmd, void __user *up, bool perm) { - struct console_font_op op; /* used in multiple places here */ - switch (cmd) { - case PIO_FONT: - if (!perm) - return -EPERM; - op.op = KD_FONT_OP_SET; - op.flags = KD_FONT_FLAG_OLD | KD_FONT_FLAG_DONT_RECALC; /* Compatibility */ - op.width = 8; - op.height = 0; - op.charcount = 256; - op.data = up; - return con_font_op(vc, &op); - - case GIO_FONT: - op.op = KD_FONT_OP_GET; - op.flags = KD_FONT_FLAG_OLD; - op.width = 8; - op.height = 32; - op.charcount = 256; - op.data = up; - return con_font_op(vc, &op); - case PIO_CMAP: if (!perm) return -EPERM; @@ -605,20 +519,6 @@ static int vt_io_ioctl(struct vc_data *vc, unsigned int cmd, void __user *up, case GIO_CMAP: return con_get_cmap(up); - case PIO_FONTX: - if (!perm) - return -EPERM; - - fallthrough; - case GIO_FONTX: - return do_fontx_ioctl(vc, cmd, up, &op); - - case PIO_FONTRESET: - if (!perm) - return -EPERM; - - return vt_io_fontreset(vc, &op); - case PIO_SCRNMAP: if (!perm) return -EPERM; @@ -699,8 +599,8 @@ static int vt_setactivate(struct vt_setactivate __user *sa) if (vsa.console == 0 || vsa.console > MAX_NR_CONSOLES) return -ENXIO; - vsa.console = array_index_nospec(vsa.console, MAX_NR_CONSOLES + 1); vsa.console--; + vsa.console = array_index_nospec(vsa.console, MAX_NR_CONSOLES); console_lock(); ret = vc_allocate(vsa.console); if (ret) { @@ -945,6 +845,7 @@ int vt_ioctl(struct tty_struct *tty, return -ENXIO; arg--; + arg = array_index_nospec(arg, MAX_NR_CONSOLES); console_lock(); ret = vc_allocate(arg); console_unlock(); @@ -1098,54 +999,6 @@ void vc_SAK(struct work_struct *work) #ifdef CONFIG_COMPAT -struct compat_consolefontdesc { - unsigned short charcount; /* characters in font (256 or 512) */ - unsigned short charheight; /* scan lines per character (1-32) */ - compat_caddr_t chardata; /* font data in expanded form */ -}; - -static inline int -compat_fontx_ioctl(struct vc_data *vc, int cmd, - struct compat_consolefontdesc __user *user_cfd, - int perm, struct console_font_op *op) -{ - struct compat_consolefontdesc cfdarg; - int i; - - if (copy_from_user(&cfdarg, user_cfd, sizeof(struct compat_consolefontdesc))) - return -EFAULT; - - switch (cmd) { - case PIO_FONTX: - if (!perm) - return -EPERM; - op->op = KD_FONT_OP_SET; - op->flags = KD_FONT_FLAG_OLD; - op->width = 8; - op->height = cfdarg.charheight; - op->charcount = cfdarg.charcount; - op->data = compat_ptr(cfdarg.chardata); - return con_font_op(vc, op); - - case GIO_FONTX: - op->op = KD_FONT_OP_GET; - op->flags = KD_FONT_FLAG_OLD; - op->width = 8; - op->height = cfdarg.charheight; - op->charcount = cfdarg.charcount; - op->data = compat_ptr(cfdarg.chardata); - i = con_font_op(vc, op); - if (i) - return i; - cfdarg.charheight = op->height; - cfdarg.charcount = op->charcount; - if (copy_to_user(user_cfd, &cfdarg, sizeof(struct compat_consolefontdesc))) - return -EFAULT; - return 0; - } - return -EINVAL; -} - struct compat_console_font_op { compat_uint_t op; /* operation code KD_FONT_OP_* */ compat_uint_t flags; /* KD_FONT_FLAG_* */ @@ -1222,9 +1075,6 @@ long vt_compat_ioctl(struct tty_struct *tty, /* * these need special handlers for incompatible data structures */ - case PIO_FONTX: - case GIO_FONTX: - return compat_fontx_ioctl(vc, cmd, up, perm, &op); case KDFONTOP: return compat_kdfontop_ioctl(up, perm, &op, vc); diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c index 58274c507353120062531efac0f1b174219e9fb1..49f59d53b4b26d213b21efa62eee468c79a20e0c 100644 --- a/drivers/usb/class/usbtmc.c +++ b/drivers/usb/class/usbtmc.c @@ -1889,6 +1889,7 @@ static int usbtmc_ioctl_request(struct usbtmc_device_data *data, struct usbtmc_ctrlrequest request; u8 *buffer = NULL; int rv; + unsigned int is_in, pipe; unsigned long res; res = copy_from_user(&request, arg, sizeof(struct usbtmc_ctrlrequest)); @@ -1898,12 +1899,14 @@ static int usbtmc_ioctl_request(struct usbtmc_device_data *data, if (request.req.wLength > USBTMC_BUFSIZE) return -EMSGSIZE; + is_in = request.req.bRequestType & USB_DIR_IN; + if (request.req.wLength) { buffer = kmalloc(request.req.wLength, GFP_KERNEL); if (!buffer) return -ENOMEM; - if ((request.req.bRequestType & USB_DIR_IN) == 0) { + if (!is_in) { /* Send control data to device */ res = copy_from_user(buffer, request.data, request.req.wLength); @@ -1914,8 +1917,12 @@ static int usbtmc_ioctl_request(struct usbtmc_device_data *data, } } + if (is_in) + pipe = usb_rcvctrlpipe(data->usb_dev, 0); + else + pipe = usb_sndctrlpipe(data->usb_dev, 0); rv = usb_control_msg(data->usb_dev, - usb_rcvctrlpipe(data->usb_dev, 0), + pipe, request.req.bRequest, request.req.bRequestType, request.req.wValue, @@ -1927,7 +1934,7 @@ static int usbtmc_ioctl_request(struct usbtmc_device_data *data, goto exit; } - if (rv && (request.req.bRequestType & USB_DIR_IN)) { + if (rv && is_in) { /* Read control data from device */ res = copy_to_user(request.data, buffer, rv); if (res) diff --git a/drivers/usb/common/ulpi.c b/drivers/usb/common/ulpi.c index a18d7c4222ddfedb62c3e1605190239601c1a1d5..3c705f1bead8c17acec460896f48f6cf13011274 100644 --- a/drivers/usb/common/ulpi.c +++ b/drivers/usb/common/ulpi.c @@ -39,8 +39,11 @@ static int ulpi_match(struct device *dev, struct device_driver *driver) struct ulpi *ulpi = to_ulpi_dev(dev); const struct ulpi_device_id *id; - /* Some ULPI devices don't have a vendor id so rely on OF match */ - if (ulpi->id.vendor == 0) + /* + * Some ULPI devices don't have a vendor id + * or provide an id_table so rely on OF match. + */ + if (ulpi->id.vendor == 0 || !drv->id_table) return of_driver_match_device(dev, driver); for (id = drv->id_table; id->vendor; id++) @@ -129,6 +132,7 @@ static const struct attribute_group *ulpi_dev_attr_groups[] = { static void ulpi_dev_release(struct device *dev) { + of_node_put(dev->of_node); kfree(to_ulpi_dev(dev)); } @@ -246,12 +250,16 @@ static int ulpi_register(struct device *dev, struct ulpi *ulpi) return ret; ret = ulpi_read_id(ulpi); - if (ret) + if (ret) { + of_node_put(ulpi->dev.of_node); return ret; + } ret = device_register(&ulpi->dev); - if (ret) + if (ret) { + put_device(&ulpi->dev); return ret; + } dev_dbg(&ulpi->dev, "registered ULPI PHY: vendor %04x, product %04x\n", ulpi->id.vendor, ulpi->id.product); @@ -298,7 +306,6 @@ EXPORT_SYMBOL_GPL(ulpi_register_interface); */ void ulpi_unregister_interface(struct ulpi *ulpi) { - of_node_put(ulpi->dev.of_node); device_unregister(&ulpi->dev); } EXPORT_SYMBOL_GPL(ulpi_unregister_interface); diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index 99908d8d2dd36d5e9018257883e7d7969385d9ee..ddd1d3eef912b89f8f803c2f94a9a0df74f0f1de 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c @@ -754,6 +754,7 @@ void usb_hcd_poll_rh_status(struct usb_hcd *hcd) { struct urb *urb; int length; + int status; unsigned long flags; char buffer[6]; /* Any root hubs with > 31 ports? */ @@ -771,11 +772,17 @@ void usb_hcd_poll_rh_status(struct usb_hcd *hcd) if (urb) { clear_bit(HCD_FLAG_POLL_PENDING, &hcd->flags); hcd->status_urb = NULL; + if (urb->transfer_buffer_length >= length) { + status = 0; + } else { + status = -EOVERFLOW; + length = urb->transfer_buffer_length; + } urb->actual_length = length; memcpy(urb->transfer_buffer, buffer, length); usb_hcd_unlink_urb_from_ep(hcd, urb); - usb_hcd_giveback_urb(hcd, urb, 0); + usb_hcd_giveback_urb(hcd, urb, status); } else { length = 0; set_bit(HCD_FLAG_POLL_PENDING, &hcd->flags); @@ -1555,6 +1562,13 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags) urb->hcpriv = NULL; INIT_LIST_HEAD(&urb->urb_list); atomic_dec(&urb->use_count); + /* + * Order the write of urb->use_count above before the read + * of urb->reject below. Pairs with the memory barriers in + * usb_kill_urb() and usb_poison_urb(). + */ + smp_mb__after_atomic(); + atomic_dec(&urb->dev->urbnum); if (atomic_read(&urb->reject)) wake_up(&usb_kill_urb_queue); @@ -1659,6 +1673,13 @@ static void __usb_hcd_giveback_urb(struct urb *urb) usb_anchor_resume_wakeups(anchor); atomic_dec(&urb->use_count); + /* + * Order the write of urb->use_count above before the read + * of urb->reject below. Pairs with the memory barriers in + * usb_kill_urb() and usb_poison_urb(). + */ + smp_mb__after_atomic(); + if (unlikely(atomic_read(&urb->reject))) wake_up(&usb_kill_urb_queue); usb_put_urb(urb); diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index 3f406519da58d883f504163672fd43975e2e019b..18ee3914b46866690ea25bf6e9d1c79f1f8bab25 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -1109,7 +1109,10 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type) } else { hub_power_on(hub, true); } - } + /* Give some time on remote wakeup to let links to transit to U0 */ + } else if (hub_is_superspeed(hub->hdev)) + msleep(20); + init2: /* @@ -1224,7 +1227,7 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type) */ if (portchange || (hub_is_superspeed(hub->hdev) && port_resumed)) - set_bit(port1, hub->change_bits); + set_bit(port1, hub->event_bits); } else if (udev->persist_enabled) { #ifdef CONFIG_PM diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c index 357b149b20d3a1cafdbb1d36aba1a157984166f0..9c285026f827675d099e7f3c38a39b1d6b07e47e 100644 --- a/drivers/usb/core/urb.c +++ b/drivers/usb/core/urb.c @@ -706,6 +706,12 @@ void usb_kill_urb(struct urb *urb) if (!(urb && urb->dev && urb->ep)) return; atomic_inc(&urb->reject); + /* + * Order the write of urb->reject above before the read + * of urb->use_count below. Pairs with the barriers in + * __usb_hcd_giveback_urb() and usb_hcd_submit_urb(). + */ + smp_mb__after_atomic(); usb_hcd_unlink_urb(urb, -ENOENT); wait_event(usb_kill_urb_queue, atomic_read(&urb->use_count) == 0); @@ -747,6 +753,12 @@ void usb_poison_urb(struct urb *urb) if (!urb) return; atomic_inc(&urb->reject); + /* + * Order the write of urb->reject above before the read + * of urb->use_count below. Pairs with the barriers in + * __usb_hcd_giveback_urb() and usb_hcd_submit_urb(). + */ + smp_mb__after_atomic(); if (!urb->dev || !urb->ep) return; diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h index 641e4251cb7f10d501e059532092918686eaf108..03d16a08261d88e9f1ad91660cb363ed11155b65 100644 --- a/drivers/usb/dwc2/core.h +++ b/drivers/usb/dwc2/core.h @@ -1406,6 +1406,7 @@ void dwc2_hsotg_core_connect(struct dwc2_hsotg *hsotg); void dwc2_hsotg_disconnect(struct dwc2_hsotg *dwc2); int dwc2_hsotg_set_test_mode(struct dwc2_hsotg *hsotg, int testmode); #define dwc2_is_device_connected(hsotg) (hsotg->connected) +#define dwc2_is_device_enabled(hsotg) (hsotg->enabled) int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg); int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg, int remote_wakeup); int dwc2_gadget_enter_hibernation(struct dwc2_hsotg *hsotg); @@ -1434,6 +1435,7 @@ static inline int dwc2_hsotg_set_test_mode(struct dwc2_hsotg *hsotg, int testmode) { return 0; } #define dwc2_is_device_connected(hsotg) (0) +#define dwc2_is_device_enabled(hsotg) (0) static inline int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg) { return 0; } static inline int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg, diff --git a/drivers/usb/dwc2/drd.c b/drivers/usb/dwc2/drd.c index aa6eb76f64ddc0ea78b39bf53451005fa7bc87a1..36f2c38416e5ec4c2b942c0a49db3571ee9d04fd 100644 --- a/drivers/usb/dwc2/drd.c +++ b/drivers/usb/dwc2/drd.c @@ -109,8 +109,10 @@ static int dwc2_drd_role_sw_set(struct usb_role_switch *sw, enum usb_role role) already = dwc2_ovr_avalid(hsotg, true); } else if (role == USB_ROLE_DEVICE) { already = dwc2_ovr_bvalid(hsotg, true); - /* This clear DCTL.SFTDISCON bit */ - dwc2_hsotg_core_connect(hsotg); + if (dwc2_is_device_enabled(hsotg)) { + /* This clear DCTL.SFTDISCON bit */ + dwc2_hsotg_core_connect(hsotg); + } } else { if (dwc2_is_device_mode(hsotg)) { if (!dwc2_ovr_bvalid(hsotg, false)) diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c index 449f19c3633c23cde59fcb2d55739f34ccf9b17a..ec54971063f8f499d77fe5e1c842b268a68d6f0a 100644 --- a/drivers/usb/dwc2/gadget.c +++ b/drivers/usb/dwc2/gadget.c @@ -5032,7 +5032,7 @@ int dwc2_hsotg_suspend(struct dwc2_hsotg *hsotg) hsotg->gadget.speed = USB_SPEED_UNKNOWN; spin_unlock_irqrestore(&hsotg->lock, flags); - for (ep = 0; ep < hsotg->num_of_eps; ep++) { + for (ep = 1; ep < hsotg->num_of_eps; ep++) { if (hsotg->eps_in[ep]) dwc2_hsotg_ep_disable_lock(&hsotg->eps_in[ep]->ep); if (hsotg->eps_out[ep]) diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c index e196673f5c647cb03150c6007aa10db8ad0408bb..efaf0db595f4619ae4f32237967963626c4ca44b 100644 --- a/drivers/usb/dwc3/dwc3-omap.c +++ b/drivers/usb/dwc3/dwc3-omap.c @@ -242,7 +242,7 @@ static void dwc3_omap_set_mailbox(struct dwc3_omap *omap, break; case OMAP_DWC3_ID_FLOAT: - if (omap->vbus_reg) + if (omap->vbus_reg && regulator_is_enabled(omap->vbus_reg)) regulator_disable(omap->vbus_reg); val = dwc3_omap_read_utmi_ctrl(omap); val |= USBOTGSS_UTMI_OTG_CTRL_IDDIG; diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c index 17117870f6cea1989f9df85b841f5f8b2c671e4d..98df8d52c765c9b4147a3d572907b12d40678bb3 100644 --- a/drivers/usb/dwc3/dwc3-pci.c +++ b/drivers/usb/dwc3/dwc3-pci.c @@ -81,8 +81,8 @@ static const struct acpi_gpio_mapping acpi_dwc3_byt_gpios[] = { static struct gpiod_lookup_table platform_bytcr_gpios = { .dev_id = "0000:00:16.0", .table = { - GPIO_LOOKUP("INT33FC:00", 54, "reset", GPIO_ACTIVE_HIGH), - GPIO_LOOKUP("INT33FC:02", 14, "cs", GPIO_ACTIVE_HIGH), + GPIO_LOOKUP("INT33FC:00", 54, "cs", GPIO_ACTIVE_HIGH), + GPIO_LOOKUP("INT33FC:02", 14, "reset", GPIO_ACTIVE_HIGH), {} }, }; diff --git a/drivers/usb/dwc3/dwc3-qcom.c b/drivers/usb/dwc3/dwc3-qcom.c index 2a29e2f681fe6f0e8285362d41db0ac3cb034927..504f8af4d0f80ba319276d9c8347a6c0d5b2f38c 100644 --- a/drivers/usb/dwc3/dwc3-qcom.c +++ b/drivers/usb/dwc3/dwc3-qcom.c @@ -764,9 +764,12 @@ static int dwc3_qcom_probe(struct platform_device *pdev) if (qcom->acpi_pdata->is_urs) { qcom->urs_usb = dwc3_qcom_create_urs_usb_platdev(dev); - if (!qcom->urs_usb) { + if (IS_ERR_OR_NULL(qcom->urs_usb)) { dev_err(dev, "failed to create URS USB platdev\n"); - return -ENODEV; + if (!qcom->urs_usb) + return -ENODEV; + else + return PTR_ERR(qcom->urs_usb); } } } diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index e9a87e1f495081f12ef6263c38c53a0bd3c25f27..b68fe48ac57923a66598b39ad0392344a75fbec6 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -1072,6 +1072,19 @@ static void __dwc3_prepare_one_trb(struct dwc3_ep *dep, struct dwc3_trb *trb, if (usb_endpoint_xfer_bulk(dep->endpoint.desc) && dep->stream_capable) trb->ctrl |= DWC3_TRB_CTRL_SID_SOFN(stream_id); + /* + * As per data book 4.2.3.2TRB Control Bit Rules section + * + * The controller autonomously checks the HWO field of a TRB to determine if the + * entire TRB is valid. Therefore, software must ensure that the rest of the TRB + * is valid before setting the HWO field to '1'. In most systems, this means that + * software must update the fourth DWORD of a TRB last. + * + * However there is a possibility of CPU re-ordering here which can cause + * controller to observe the HWO bit set prematurely. + * Add a write memory barrier to prevent CPU re-ordering. + */ + wmb(); trb->ctrl |= DWC3_TRB_CTRL_HWO; dwc3_ep_inc_enq(dep); @@ -3762,9 +3775,11 @@ static irqreturn_t dwc3_thread_interrupt(int irq, void *_evt) unsigned long flags; irqreturn_t ret = IRQ_NONE; + local_bh_disable(); spin_lock_irqsave(&dwc->lock, flags); ret = dwc3_process_event_buf(evt); spin_unlock_irqrestore(&dwc->lock, flags); + local_bh_enable(); return ret; } diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c index 8bec0cbf844ed944b90f3338f937cd07400c2208..a980799900e71c69545b799ce143719344b0de94 100644 --- a/drivers/usb/gadget/composite.c +++ b/drivers/usb/gadget/composite.c @@ -1944,6 +1944,9 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl) if (w_index != 0x5 || (w_value >> 8)) break; interface = w_value & 0xFF; + if (interface >= MAX_CONFIG_INTERFACES || + !os_desc_cfg->interface[interface]) + break; buf[6] = w_index; count = count_ext_prop(os_desc_cfg, interface); diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index cbb7947f366f939b900fc1918622f530933cb3e1..bb0d92837f6770f804310b25faaa2bd7d5453c4a 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c @@ -614,7 +614,7 @@ static int ffs_ep0_open(struct inode *inode, struct file *file) file->private_data = ffs; ffs_data_opened(ffs); - return 0; + return stream_open(inode, file); } static int ffs_ep0_release(struct inode *inode, struct file *file) @@ -1152,7 +1152,7 @@ ffs_epfile_open(struct inode *inode, struct file *file) file->private_data = epfile; ffs_data_opened(epfile->ffs); - return 0; + return stream_open(inode, file); } static int ffs_aio_cancel(struct kiocb *kiocb) @@ -1710,16 +1710,24 @@ static void ffs_data_put(struct ffs_data *ffs) static void ffs_data_closed(struct ffs_data *ffs) { + struct ffs_epfile *epfiles; + unsigned long flags; + ENTER(); if (atomic_dec_and_test(&ffs->opened)) { if (ffs->no_disconnect) { ffs->state = FFS_DEACTIVATED; - if (ffs->epfiles) { - ffs_epfiles_destroy(ffs->epfiles, - ffs->eps_count); - ffs->epfiles = NULL; - } + spin_lock_irqsave(&ffs->eps_lock, flags); + epfiles = ffs->epfiles; + ffs->epfiles = NULL; + spin_unlock_irqrestore(&ffs->eps_lock, + flags); + + if (epfiles) + ffs_epfiles_destroy(epfiles, + ffs->eps_count); + if (ffs->setup_state == FFS_SETUP_PENDING) __ffs_ep0_stall(ffs); } else { @@ -1766,14 +1774,27 @@ static struct ffs_data *ffs_data_new(const char *dev_name) static void ffs_data_clear(struct ffs_data *ffs) { + struct ffs_epfile *epfiles; + unsigned long flags; + ENTER(); ffs_closed(ffs); BUG_ON(ffs->gadget); - if (ffs->epfiles) { - ffs_epfiles_destroy(ffs->epfiles, ffs->eps_count); + spin_lock_irqsave(&ffs->eps_lock, flags); + epfiles = ffs->epfiles; + ffs->epfiles = NULL; + spin_unlock_irqrestore(&ffs->eps_lock, flags); + + /* + * potential race possible between ffs_func_eps_disable + * & ffs_epfile_release therefore maintaining a local + * copy of epfile will save us from use-after-free. + */ + if (epfiles) { + ffs_epfiles_destroy(epfiles, ffs->eps_count); ffs->epfiles = NULL; } @@ -1921,12 +1942,15 @@ static void ffs_epfiles_destroy(struct ffs_epfile *epfiles, unsigned count) static void ffs_func_eps_disable(struct ffs_function *func) { - struct ffs_ep *ep = func->eps; - struct ffs_epfile *epfile = func->ffs->epfiles; - unsigned count = func->ffs->eps_count; + struct ffs_ep *ep; + struct ffs_epfile *epfile; + unsigned short count; unsigned long flags; spin_lock_irqsave(&func->ffs->eps_lock, flags); + count = func->ffs->eps_count; + epfile = func->ffs->epfiles; + ep = func->eps; while (count--) { /* pending requests get nuked */ if (likely(ep->ep)) @@ -1944,14 +1968,18 @@ static void ffs_func_eps_disable(struct ffs_function *func) static int ffs_func_eps_enable(struct ffs_function *func) { - struct ffs_data *ffs = func->ffs; - struct ffs_ep *ep = func->eps; - struct ffs_epfile *epfile = ffs->epfiles; - unsigned count = ffs->eps_count; + struct ffs_data *ffs; + struct ffs_ep *ep; + struct ffs_epfile *epfile; + unsigned short count; unsigned long flags; int ret = 0; spin_lock_irqsave(&func->ffs->eps_lock, flags); + ffs = func->ffs; + ep = func->eps; + epfile = ffs->epfiles; + count = ffs->eps_count; while(count--) { ep->ep->driver_data = ep; diff --git a/drivers/usb/gadget/function/f_sourcesink.c b/drivers/usb/gadget/function/f_sourcesink.c index 282737e4609ce85cc3ee16b491de22f99f5b65dd..2c65a9bb3c81bc52a5cd073b758b90deba5c714f 100644 --- a/drivers/usb/gadget/function/f_sourcesink.c +++ b/drivers/usb/gadget/function/f_sourcesink.c @@ -583,6 +583,7 @@ static int source_sink_start_ep(struct f_sourcesink *ss, bool is_in, if (is_iso) { switch (speed) { + case USB_SPEED_SUPER_PLUS: case USB_SPEED_SUPER: size = ss->isoc_maxpacket * (ss->isoc_mult + 1) * diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c index dd960cea642f3ae0fc5b5080734138ba481762cf..11cc6056b5902ddbdd28bfc670d176138f5c0177 100644 --- a/drivers/usb/gadget/function/f_uac2.c +++ b/drivers/usb/gadget/function/f_uac2.c @@ -176,7 +176,7 @@ static struct uac2_input_terminal_descriptor io_in_it_desc = { .bDescriptorSubtype = UAC_INPUT_TERMINAL, /* .bTerminalID = DYNAMIC */ - .wTerminalType = cpu_to_le16(UAC_INPUT_TERMINAL_UNDEFINED), + .wTerminalType = cpu_to_le16(UAC_INPUT_TERMINAL_MICROPHONE), .bAssocTerminal = 0, /* .bCSourceID = DYNAMIC */ .iChannelNames = 0, @@ -204,7 +204,7 @@ static struct uac2_output_terminal_descriptor io_out_ot_desc = { .bDescriptorSubtype = UAC_OUTPUT_TERMINAL, /* .bTerminalID = DYNAMIC */ - .wTerminalType = cpu_to_le16(UAC_OUTPUT_TERMINAL_UNDEFINED), + .wTerminalType = cpu_to_le16(UAC_OUTPUT_TERMINAL_SPEAKER), .bAssocTerminal = 0, /* .bSourceID = DYNAMIC */ /* .bCSourceID = DYNAMIC */ diff --git a/drivers/usb/gadget/function/rndis.c b/drivers/usb/gadget/function/rndis.c index 9ea94215e113ade290e0c2720ea709cd2a4a16ab..eef71a12f6f23cec2d41de35b26f659d0548c11b 100644 --- a/drivers/usb/gadget/function/rndis.c +++ b/drivers/usb/gadget/function/rndis.c @@ -655,14 +655,18 @@ static int rndis_set_response(struct rndis_params *params, rndis_set_cmplt_type *resp; rndis_resp_t *r; + BufLength = le32_to_cpu(buf->InformationBufferLength); + BufOffset = le32_to_cpu(buf->InformationBufferOffset); + if ((BufLength > RNDIS_MAX_TOTAL_SIZE) || + (BufOffset > RNDIS_MAX_TOTAL_SIZE) || + (BufOffset + 8 >= RNDIS_MAX_TOTAL_SIZE)) + return -EINVAL; + r = rndis_add_response(params, sizeof(rndis_set_cmplt_type)); if (!r) return -ENOMEM; resp = (rndis_set_cmplt_type *)r->buf; - BufLength = le32_to_cpu(buf->InformationBufferLength); - BufOffset = le32_to_cpu(buf->InformationBufferOffset); - #ifdef VERBOSE_DEBUG pr_debug("%s: Length: %d\n", __func__, BufLength); pr_debug("%s: Offset: %d\n", __func__, BufOffset); @@ -948,6 +952,7 @@ struct rndis_params *rndis_register(void (*resp_avail)(void *v), void *v) params->resp_avail = resp_avail; params->v = v; INIT_LIST_HEAD(¶ms->resp_queue); + spin_lock_init(¶ms->resp_lock); pr_debug("%s: configNr = %d\n", __func__, i); return params; @@ -1041,12 +1046,14 @@ void rndis_free_response(struct rndis_params *params, u8 *buf) { rndis_resp_t *r, *n; + spin_lock(¶ms->resp_lock); list_for_each_entry_safe(r, n, ¶ms->resp_queue, list) { if (r->buf == buf) { list_del(&r->list); kfree(r); } } + spin_unlock(¶ms->resp_lock); } EXPORT_SYMBOL_GPL(rndis_free_response); @@ -1056,14 +1063,17 @@ u8 *rndis_get_next_response(struct rndis_params *params, u32 *length) if (!length) return NULL; + spin_lock(¶ms->resp_lock); list_for_each_entry_safe(r, n, ¶ms->resp_queue, list) { if (!r->send) { r->send = 1; *length = r->length; + spin_unlock(¶ms->resp_lock); return r->buf; } } + spin_unlock(¶ms->resp_lock); return NULL; } EXPORT_SYMBOL_GPL(rndis_get_next_response); @@ -1080,7 +1090,9 @@ static rndis_resp_t *rndis_add_response(struct rndis_params *params, u32 length) r->length = length; r->send = 0; + spin_lock(¶ms->resp_lock); list_add_tail(&r->list, ¶ms->resp_queue); + spin_unlock(¶ms->resp_lock); return r; } diff --git a/drivers/usb/gadget/function/rndis.h b/drivers/usb/gadget/function/rndis.h index f6167f7fea82b59da4b0eb40c0fd7effc06bec5c..6206b8b7490f64b0e11dcc71aa2d99691d5d1ac3 100644 --- a/drivers/usb/gadget/function/rndis.h +++ b/drivers/usb/gadget/function/rndis.h @@ -174,6 +174,7 @@ typedef struct rndis_params { void (*resp_avail)(void *v); void *v; struct list_head resp_queue; + spinlock_t resp_lock; } rndis_params; /* RNDIS Message parser and other useless functions */ diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c index 217d2b66fa514ac09488478608b4bf0965ed614d..454860d52ce77f96b3ea047d37e90da6dc289fe0 100644 --- a/drivers/usb/gadget/legacy/inode.c +++ b/drivers/usb/gadget/legacy/inode.c @@ -1828,8 +1828,9 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) spin_lock_irq (&dev->lock); value = -EINVAL; if (dev->buf) { + spin_unlock_irq(&dev->lock); kfree(kbuf); - goto fail; + return value; } dev->buf = kbuf; @@ -1876,8 +1877,8 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) value = usb_gadget_probe_driver(&gadgetfs_driver); if (value != 0) { - kfree (dev->buf); - dev->buf = NULL; + spin_lock_irq(&dev->lock); + goto fail; } else { /* at this point "good" hardware has for the first time * let the USB the host see us. alternatively, if users @@ -1894,6 +1895,9 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) return value; fail: + dev->config = NULL; + dev->hs_config = NULL; + dev->dev = NULL; spin_unlock_irq (&dev->lock); pr_debug ("%s: %s fail %zd, %p\n", shortname, __func__, value, dev); kfree (dev->buf); diff --git a/drivers/usb/gadget/legacy/raw_gadget.c b/drivers/usb/gadget/legacy/raw_gadget.c index 062dfac3039968d3f09868141aed177f91d8e5d4..33efa6915b91dc0745f1e0d12816c935c2f72822 100644 --- a/drivers/usb/gadget/legacy/raw_gadget.c +++ b/drivers/usb/gadget/legacy/raw_gadget.c @@ -1003,7 +1003,7 @@ static int raw_process_ep_io(struct raw_dev *dev, struct usb_raw_ep_io *io, ret = -EBUSY; goto out_unlock; } - if ((in && !ep->ep->caps.dir_in) || (!in && ep->ep->caps.dir_in)) { + if (in != usb_endpoint_dir_in(ep->ep->desc)) { dev_dbg(&dev->gadget->dev, "fail, wrong direction\n"); ret = -EINVAL; goto out_unlock; diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c index da691a69fec10c0bdf9bbab58656e263cbe32a8f..3a3b5a03dda75a297588642cdb5e9889af387922 100644 --- a/drivers/usb/gadget/udc/core.c +++ b/drivers/usb/gadget/udc/core.c @@ -1343,7 +1343,6 @@ static void usb_gadget_remove_driver(struct usb_udc *udc) usb_gadget_udc_stop(udc); udc->driver = NULL; - udc->dev.driver = NULL; udc->gadget->dev.driver = NULL; } @@ -1405,7 +1404,6 @@ static int udc_bind_to_driver(struct usb_udc *udc, struct usb_gadget_driver *dri driver->function); udc->driver = driver; - udc->dev.driver = &driver->driver; udc->gadget->dev.driver = &driver->driver; usb_gadget_udc_set_speed(udc, driver->max_speed); @@ -1427,7 +1425,6 @@ static int udc_bind_to_driver(struct usb_udc *udc, struct usb_gadget_driver *dri dev_err(&udc->dev, "failed to start %s: %d\n", udc->driver->function, ret); udc->driver = NULL; - udc->dev.driver = NULL; udc->gadget->dev.driver = NULL; return ret; } diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c index 57d417a7c3e0a687e9fbc11b604b7bdb93aedc8f..601829a6b4badd7f0fd98805edcb0659f28f4a0f 100644 --- a/drivers/usb/gadget/udc/renesas_usb3.c +++ b/drivers/usb/gadget/udc/renesas_usb3.c @@ -2378,6 +2378,8 @@ static void handle_ext_role_switch_states(struct device *dev, switch (role) { case USB_ROLE_NONE: usb3->connection_state = USB_ROLE_NONE; + if (cur_role == USB_ROLE_HOST) + device_release_driver(host); if (usb3->driver) usb3_disconnect(usb3); usb3_vbus_out(usb3, false); diff --git a/drivers/usb/gadget/udc/tegra-xudc.c b/drivers/usb/gadget/udc/tegra-xudc.c index 57ee72fead45a771518233bfbe0e7704f335e9cd..de178bf264c218393399d057242499ebe38dc912 100644 --- a/drivers/usb/gadget/udc/tegra-xudc.c +++ b/drivers/usb/gadget/udc/tegra-xudc.c @@ -32,9 +32,6 @@ #include /* XUSB_DEV registers */ -#define SPARAM 0x000 -#define SPARAM_ERSTMAX_MASK GENMASK(20, 16) -#define SPARAM_ERSTMAX(x) (((x) << 16) & SPARAM_ERSTMAX_MASK) #define DB 0x004 #define DB_TARGET_MASK GENMASK(15, 8) #define DB_TARGET(x) (((x) << 8) & DB_TARGET_MASK) @@ -275,8 +272,10 @@ BUILD_EP_CONTEXT_RW(deq_hi, deq_hi, 0, 0xffffffff) BUILD_EP_CONTEXT_RW(avg_trb_len, tx_info, 0, 0xffff) BUILD_EP_CONTEXT_RW(max_esit_payload, tx_info, 16, 0xffff) BUILD_EP_CONTEXT_RW(edtla, rsvd[0], 0, 0xffffff) -BUILD_EP_CONTEXT_RW(seq_num, rsvd[0], 24, 0xff) +BUILD_EP_CONTEXT_RW(rsvd, rsvd[0], 24, 0x1) BUILD_EP_CONTEXT_RW(partial_td, rsvd[0], 25, 0x1) +BUILD_EP_CONTEXT_RW(splitxstate, rsvd[0], 26, 0x1) +BUILD_EP_CONTEXT_RW(seq_num, rsvd[0], 27, 0x1f) BUILD_EP_CONTEXT_RW(cerrcnt, rsvd[1], 18, 0x3) BUILD_EP_CONTEXT_RW(data_offset, rsvd[2], 0, 0x1ffff) BUILD_EP_CONTEXT_RW(numtrbs, rsvd[2], 22, 0x1f) @@ -1557,6 +1556,9 @@ static int __tegra_xudc_ep_set_halt(struct tegra_xudc_ep *ep, bool halt) ep_reload(xudc, ep->index); ep_ctx_write_state(ep->context, EP_STATE_RUNNING); + ep_ctx_write_rsvd(ep->context, 0); + ep_ctx_write_partial_td(ep->context, 0); + ep_ctx_write_splitxstate(ep->context, 0); ep_ctx_write_seq_num(ep->context, 0); ep_reload(xudc, ep->index); @@ -2812,7 +2814,10 @@ static void tegra_xudc_reset(struct tegra_xudc *xudc) xudc->setup_seq_num = 0; xudc->queued_setup_packet = false; - ep_ctx_write_seq_num(ep0->context, xudc->setup_seq_num); + ep_ctx_write_rsvd(ep0->context, 0); + ep_ctx_write_partial_td(ep0->context, 0); + ep_ctx_write_splitxstate(ep0->context, 0); + ep_ctx_write_seq_num(ep0->context, 0); deq_ptr = trb_virt_to_phys(ep0, &ep0->transfer_ring[ep0->deq_ptr]); @@ -3295,11 +3300,6 @@ static void tegra_xudc_init_event_ring(struct tegra_xudc *xudc) unsigned int i; u32 val; - val = xudc_readl(xudc, SPARAM); - val &= ~(SPARAM_ERSTMAX_MASK); - val |= SPARAM_ERSTMAX(XUDC_NR_EVENT_RINGS); - xudc_writel(xudc, val, SPARAM); - for (i = 0; i < ARRAY_SIZE(xudc->event_ring); i++) { memset(xudc->event_ring[i], 0, XUDC_EVENT_RING_SIZE * sizeof(*xudc->event_ring[i])); diff --git a/drivers/usb/gadget/udc/udc-xilinx.c b/drivers/usb/gadget/udc/udc-xilinx.c index d5e9d20c097d205054378fb79796296b969d4043..096f56a09e6a2ae53d1ac8d525574766608adee6 100644 --- a/drivers/usb/gadget/udc/udc-xilinx.c +++ b/drivers/usb/gadget/udc/udc-xilinx.c @@ -1612,6 +1612,8 @@ static void xudc_getstatus(struct xusb_udc *udc) break; case USB_RECIP_ENDPOINT: epnum = udc->setup.wIndex & USB_ENDPOINT_NUMBER_MASK; + if (epnum >= XUSB_MAX_ENDPOINTS) + goto stall; target_ep = &udc->ep[epnum]; epcfgreg = udc->read_fn(udc->addr + target_ep->offset); halt = epcfgreg & XUSB_EP_CFG_STALL_MASK; @@ -1679,6 +1681,10 @@ static void xudc_set_clear_feature(struct xusb_udc *udc) case USB_RECIP_ENDPOINT: if (!udc->setup.wValue) { endpoint = udc->setup.wIndex & USB_ENDPOINT_NUMBER_MASK; + if (endpoint >= XUSB_MAX_ENDPOINTS) { + xudc_ep0_stall(udc); + return; + } target_ep = &udc->ep[endpoint]; outinbit = udc->setup.wIndex & USB_ENDPOINT_DIR_MASK; outinbit = outinbit >> 7; diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c index a5e27deda83a793656587a0bf3f8aa50c21acc65..d0d07d30e9128a476003eca5002ba0e1c0706cd0 100644 --- a/drivers/usb/host/ehci-pci.c +++ b/drivers/usb/host/ehci-pci.c @@ -21,6 +21,9 @@ static const char hcd_name[] = "ehci-pci"; /* defined here to avoid adding to pci_ids.h for single instance use */ #define PCI_DEVICE_ID_INTEL_CE4100_USB 0x2e70 +#define PCI_VENDOR_ID_ASPEED 0x1a03 +#define PCI_DEVICE_ID_ASPEED_EHCI 0x2603 + /*-------------------------------------------------------------------------*/ #define PCI_DEVICE_ID_INTEL_QUARK_X1000_SOC 0x0939 static inline bool is_intel_quark_x1000(struct pci_dev *pdev) @@ -226,6 +229,12 @@ static int ehci_pci_setup(struct usb_hcd *hcd) if (pdev->device == 0x3104 && (pdev->revision & 0xf0) == 0x90) ehci->zx_wakeup_clear = 1; break; + case PCI_VENDOR_ID_ASPEED: + if (pdev->device == PCI_DEVICE_ID_ASPEED_EHCI) { + ehci_info(ehci, "applying Aspeed HC workaround\n"); + ehci->is_aspeed = 1; + } + break; } /* optional debug port, normally in the first BAR */ diff --git a/drivers/usb/host/uhci-platform.c b/drivers/usb/host/uhci-platform.c index 70dbd95c3f063a43db2035df59b152e66ea45984..be9e9db7cad1048f75aab2c01eb994a5171a6577 100644 --- a/drivers/usb/host/uhci-platform.c +++ b/drivers/usb/host/uhci-platform.c @@ -113,7 +113,8 @@ static int uhci_hcd_platform_probe(struct platform_device *pdev) num_ports); } if (of_device_is_compatible(np, "aspeed,ast2400-uhci") || - of_device_is_compatible(np, "aspeed,ast2500-uhci")) { + of_device_is_compatible(np, "aspeed,ast2500-uhci") || + of_device_is_compatible(np, "aspeed,ast2600-uhci")) { uhci->is_aspeed = 1; dev_info(&pdev->dev, "Enabled Aspeed implementation workarounds\n"); diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index 71b018e9a5735bbc45ab89c683297f2a163fabe3..460a8a86e3111ffe2d6d5e621c58e80d47759c9f 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c @@ -676,7 +676,7 @@ static int xhci_exit_test_mode(struct xhci_hcd *xhci) } pm_runtime_allow(xhci_to_hcd(xhci)->self.controller); xhci->test_mode = 0; - return xhci_reset(xhci); + return xhci_reset(xhci, XHCI_RESET_SHORT_USEC); } void xhci_set_link_state(struct xhci_hcd *xhci, struct xhci_port *port, @@ -1002,6 +1002,9 @@ static void xhci_get_usb2_port_status(struct xhci_port *port, u32 *status, if (link_state == XDEV_U2) *status |= USB_PORT_STAT_L1; if (link_state == XDEV_U0) { + if (bus_state->resume_done[portnum]) + usb_hcd_end_port_resume(&port->rhub->hcd->self, + portnum); bus_state->resume_done[portnum] = 0; clear_bit(portnum, &bus_state->resuming_ports); if (bus_state->suspended_ports & (1 << portnum)) { diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index 93f429436e454de07dc99598b8b3a5591132f252..4aab93d5b6514165a8b9df5861a223f33c3d1118 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -2603,7 +2603,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) fail: xhci_halt(xhci); - xhci_reset(xhci); + xhci_reset(xhci, XHCI_RESET_SHORT_USEC); xhci_mem_cleanup(xhci); return -ENOMEM; } diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c index c1edcc9b13cece07c9dc0e00c27a7f0848f4875c..dc570ce4e8319ad09d6088e3dc2ee557ccba5c78 100644 --- a/drivers/usb/host/xhci-plat.c +++ b/drivers/usb/host/xhci-plat.c @@ -437,6 +437,9 @@ static int __maybe_unused xhci_plat_suspend(struct device *dev) struct xhci_hcd *xhci = hcd_to_xhci(hcd); int ret; + if (pm_runtime_suspended(dev)) + pm_runtime_resume(dev); + ret = xhci_priv_suspend_quirk(hcd); if (ret) return ret; diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 7f1e5296d0f68aec1731b88d8069d23ae56720fe..cc21f5f9d0ad4295d3ccd43eeaa891ee630d9b23 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -66,7 +66,7 @@ static bool td_on_ring(struct xhci_td *td, struct xhci_ring *ring) * handshake done). There are two failure modes: "usec" have passed (major * hardware flakeout), or the register reads as all-ones (hardware removed). */ -int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, int usec) +int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, u64 timeout_us) { u32 result; int ret; @@ -74,7 +74,7 @@ int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, int usec) ret = readl_poll_timeout_atomic(ptr, result, (result & mask) == done || result == U32_MAX, - 1, usec); + 1, timeout_us); if (result == U32_MAX) /* card removed */ return -ENODEV; @@ -163,7 +163,7 @@ int xhci_start(struct xhci_hcd *xhci) * Transactions will be terminated immediately, and operational registers * will be set to their defaults. */ -int xhci_reset(struct xhci_hcd *xhci) +int xhci_reset(struct xhci_hcd *xhci, u64 timeout_us) { u32 command; u32 state; @@ -196,8 +196,7 @@ int xhci_reset(struct xhci_hcd *xhci) if (xhci->quirks & XHCI_INTEL_HOST) udelay(1000); - ret = xhci_handshake(&xhci->op_regs->command, - CMD_RESET, 0, 10 * 1000 * 1000); + ret = xhci_handshake(&xhci->op_regs->command, CMD_RESET, 0, timeout_us); if (ret) return ret; @@ -210,8 +209,7 @@ int xhci_reset(struct xhci_hcd *xhci) * xHCI cannot write to any doorbells or operational registers other * than status until the "Controller Not Ready" flag is cleared. */ - ret = xhci_handshake(&xhci->op_regs->status, - STS_CNR, 0, 10 * 1000 * 1000); + ret = xhci_handshake(&xhci->op_regs->status, STS_CNR, 0, timeout_us); xhci->usb2_rhub.bus_state.port_c_suspend = 0; xhci->usb2_rhub.bus_state.suspended_ports = 0; @@ -732,7 +730,7 @@ static void xhci_stop(struct usb_hcd *hcd) xhci->xhc_state |= XHCI_STATE_HALTED; xhci->cmd_ring_state = CMD_RING_STATE_STOPPED; xhci_halt(xhci); - xhci_reset(xhci); + xhci_reset(xhci, XHCI_RESET_SHORT_USEC); spin_unlock_irq(&xhci->lock); xhci_cleanup_msix(xhci); @@ -785,7 +783,7 @@ void xhci_shutdown(struct usb_hcd *hcd) xhci_halt(xhci); /* Workaround for spurious wakeups at shutdown with HSW */ if (xhci->quirks & XHCI_SPURIOUS_WAKEUP) - xhci_reset(xhci); + xhci_reset(xhci, XHCI_RESET_SHORT_USEC); spin_unlock_irq(&xhci->lock); xhci_cleanup_msix(xhci); @@ -1091,6 +1089,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) int retval = 0; bool comp_timer_running = false; bool pending_portevent = false; + bool reinit_xhc = false; if (!hcd->state) return 0; @@ -1107,10 +1106,11 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags); spin_lock_irq(&xhci->lock); - if ((xhci->quirks & XHCI_RESET_ON_RESUME) || xhci->broken_suspend) - hibernated = true; - if (!hibernated) { + if (hibernated || xhci->quirks & XHCI_RESET_ON_RESUME || xhci->broken_suspend) + reinit_xhc = true; + + if (!reinit_xhc) { /* * Some controllers might lose power during suspend, so wait * for controller not ready bit to clear, just as in xHC init. @@ -1143,12 +1143,17 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) spin_unlock_irq(&xhci->lock); return -ETIMEDOUT; } - temp = readl(&xhci->op_regs->status); } - /* If restore operation fails, re-initialize the HC during resume */ - if ((temp & STS_SRE) || hibernated) { + temp = readl(&xhci->op_regs->status); + + /* re-initialize the HC on Restore Error, or Host Controller Error */ + if (temp & (STS_SRE | STS_HCE)) { + reinit_xhc = true; + xhci_warn(xhci, "xHC error in resume, USBSTS 0x%x, Reinit\n", temp); + } + if (reinit_xhc) { if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && !(xhci_all_ports_seen_u0(xhci))) { del_timer_sync(&xhci->comp_mode_recovery_timer); @@ -1163,7 +1168,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) xhci_dbg(xhci, "Stop HCD\n"); xhci_halt(xhci); xhci_zero_64b_regs(xhci); - retval = xhci_reset(xhci); + retval = xhci_reset(xhci, XHCI_RESET_LONG_USEC); spin_unlock_irq(&xhci->lock); if (retval) return retval; @@ -1480,9 +1485,12 @@ static int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flag struct urb_priv *urb_priv; int num_tds; - if (!urb || xhci_check_args(hcd, urb->dev, urb->ep, - true, true, __func__) <= 0) + if (!urb) return -EINVAL; + ret = xhci_check_args(hcd, urb->dev, urb->ep, + true, true, __func__); + if (ret <= 0) + return ret ? ret : -EINVAL; slot_id = urb->dev->slot_id; ep_index = xhci_get_endpoint_index(&urb->ep->desc); @@ -3282,7 +3290,7 @@ static int xhci_check_streams_endpoint(struct xhci_hcd *xhci, return -EINVAL; ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, true, __func__); if (ret <= 0) - return -EINVAL; + return ret ? ret : -EINVAL; if (usb_ss_max_streams(&ep->ss_ep_comp) == 0) { xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion" " descriptor for ep 0x%x does not support streams\n", @@ -5261,10 +5269,10 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks) if (XHCI_EXT_PORT_PSIV(xhci->port_caps[j].psi[i]) >= 5) minor_rev = 1; } - if (minor_rev != 1) { - hcd->speed = HCD_USB3; - hcd->self.root_hub->speed = USB_SPEED_SUPER; - } + } + if (minor_rev != 1) { + hcd->speed = HCD_USB3; + hcd->self.root_hub->speed = USB_SPEED_SUPER; } } @@ -5315,7 +5323,7 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks) xhci_dbg(xhci, "Resetting HCD\n"); /* Reset the internal HC memory state and registers. */ - retval = xhci_reset(xhci); + retval = xhci_reset(xhci, XHCI_RESET_LONG_USEC); if (retval) return retval; xhci_dbg(xhci, "Reset complete\n"); diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 679ef073d99dcee21f6e9160f11635d213808658..1c84d73250d0981814eac93445a7e0bf62985212 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -229,6 +229,9 @@ struct xhci_op_regs { #define CMD_ETE (1 << 14) /* bits 15:31 are reserved (and should be preserved on writes). */ +#define XHCI_RESET_LONG_USEC (10 * 1000 * 1000) +#define XHCI_RESET_SHORT_USEC (250 * 1000) + /* IMAN - Interrupt Management Register */ #define IMAN_IE (1 << 1) #define IMAN_IP (1 << 0) @@ -2070,11 +2073,11 @@ void xhci_free_container_ctx(struct xhci_hcd *xhci, /* xHCI host controller glue */ typedef void (*xhci_get_quirks_t)(struct device *, struct xhci_hcd *); -int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, int usec); +int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, u64 timeout_us); void xhci_quiesce(struct xhci_hcd *xhci); int xhci_halt(struct xhci_hcd *xhci); int xhci_start(struct xhci_hcd *xhci); -int xhci_reset(struct xhci_hcd *xhci); +int xhci_reset(struct xhci_hcd *xhci, u64 timeout_us); int xhci_run(struct usb_hcd *hcd); int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks); void xhci_shutdown(struct usb_hcd *hcd); @@ -2457,6 +2460,8 @@ static inline const char *xhci_decode_ctrl_ctx(char *str, unsigned int bit; int ret = 0; + str[0] = '\0'; + if (drop) { ret = sprintf(str, "Drop:"); for_each_set_bit(bit, &drop, 32) @@ -2614,8 +2619,11 @@ static inline const char *xhci_decode_usbsts(char *str, u32 usbsts) { int ret = 0; + ret = sprintf(str, " 0x%08x", usbsts); + if (usbsts == ~(u32)0) - return " 0xffffffff"; + return str; + if (usbsts & STS_HALT) ret += sprintf(str + ret, " HCHalted"); if (usbsts & STS_FATAL) diff --git a/drivers/usb/misc/ftdi-elan.c b/drivers/usb/misc/ftdi-elan.c index 8a3d9c0c8d8bc54304b5d46dc801ea1476af64f1..157b31d354ac23b14576f7f00cdb07d5d8827671 100644 --- a/drivers/usb/misc/ftdi-elan.c +++ b/drivers/usb/misc/ftdi-elan.c @@ -202,6 +202,7 @@ static void ftdi_elan_delete(struct kref *kref) mutex_unlock(&ftdi_module_lock); kfree(ftdi->bulk_in_buffer); ftdi->bulk_in_buffer = NULL; + kfree(ftdi); } static void ftdi_elan_put_kref(struct usb_ftdi *ftdi) diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig index 4007fa25a8ffae389570b32c470189ef716c8f4c..169251ec8353e592b628840b3dd8d155aad2f65b 100644 --- a/drivers/usb/serial/Kconfig +++ b/drivers/usb/serial/Kconfig @@ -66,6 +66,7 @@ config USB_SERIAL_SIMPLE - Libtransistor USB console - a number of Motorola phones - Motorola Tetra devices + - Nokia mobile phones - Novatel Wireless GPS receivers - Siemens USB/MPI adapter. - ViVOtech ViVOpay USB device. diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c index f26861246f653073448b9ccf311834d2f0611fe7..a2a38fc76ca53c602b94bba123fd9776d03a7a57 100644 --- a/drivers/usb/serial/ch341.c +++ b/drivers/usb/serial/ch341.c @@ -81,10 +81,10 @@ #define CH341_QUIRK_SIMULATE_BREAK BIT(1) static const struct usb_device_id id_table[] = { - { USB_DEVICE(0x1a86, 0x5512) }, { USB_DEVICE(0x1a86, 0x5523) }, { USB_DEVICE(0x1a86, 0x7522) }, { USB_DEVICE(0x1a86, 0x7523) }, + { USB_DEVICE(0x2184, 0x0057) }, { USB_DEVICE(0x4348, 0x5523) }, { USB_DEVICE(0x9986, 0x7523) }, { }, diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index f906c1308f9f9c136599d041b0f6d711b8f844ee..7ac668023da872f844a2e6764aee50f4187c9865 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c @@ -55,6 +55,7 @@ static void cp210x_enable_event_mode(struct usb_serial_port *port); static void cp210x_disable_event_mode(struct usb_serial_port *port); static const struct usb_device_id id_table[] = { + { USB_DEVICE(0x0404, 0x034C) }, /* NCR Retail IO Box */ { USB_DEVICE(0x045B, 0x0053) }, /* Renesas RX610 RX-Stick */ { USB_DEVICE(0x0471, 0x066A) }, /* AKTAKOM ACE-1001 cable */ { USB_DEVICE(0x0489, 0xE000) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */ @@ -72,6 +73,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x0FCF, 0x1004) }, /* Dynastream ANT2USB */ { USB_DEVICE(0x0FCF, 0x1006) }, /* Dynastream ANT development board */ { USB_DEVICE(0x0FDE, 0xCA05) }, /* OWL Wireless Electricity Monitor CM-160 */ + { USB_DEVICE(0x106F, 0x0003) }, /* CPI / Money Controls Bulk Coin Recycler */ { USB_DEVICE(0x10A6, 0xAA26) }, /* Knock-off DCU-11 cable */ { USB_DEVICE(0x10AB, 0x10C5) }, /* Siemens MC60 Cable */ { USB_DEVICE(0x10B5, 0xAC70) }, /* Nokia CA-42 USB */ diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index dfcf79bdfddce2effb15b979cbfd5102a270ccba..b74621dc2a65888024a3502515baae552109e59c 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -969,6 +969,7 @@ static const struct usb_device_id id_table_combined[] = { { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_VX_023_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_VX_034_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_101_PID) }, + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_159_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_1_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_2_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_3_PID) }, @@ -977,12 +978,14 @@ static const struct usb_device_id id_table_combined[] = { { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_6_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_7_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_8_PID) }, + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_235_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_257_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_279_1_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_279_2_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_279_3_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_279_4_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_313_PID) }, + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_320_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_324_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_346_1_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_346_2_PID) }, diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index 755858ca20bacf7702efc2d42c68b135cbc984f3..d1a9564697a4becf6f32b52461eef0fe36c1bbc0 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h @@ -1506,6 +1506,9 @@ #define BRAINBOXES_VX_023_PID 0x1003 /* VX-023 ExpressCard 1 Port RS422/485 */ #define BRAINBOXES_VX_034_PID 0x1004 /* VX-034 ExpressCard 2 Port RS422/485 */ #define BRAINBOXES_US_101_PID 0x1011 /* US-101 1xRS232 */ +#define BRAINBOXES_US_159_PID 0x1021 /* US-159 1xRS232 */ +#define BRAINBOXES_US_235_PID 0x1017 /* US-235 1xRS232 */ +#define BRAINBOXES_US_320_PID 0x1019 /* US-320 1xRS422/485 */ #define BRAINBOXES_US_324_PID 0x1013 /* US-324 1xRS422/485 1Mbaud */ #define BRAINBOXES_US_606_1_PID 0x2001 /* US-606 6 Port RS232 Serial Port 1 and 2 */ #define BRAINBOXES_US_606_2_PID 0x2002 /* US-606 6 Port RS232 Serial Port 3 and 4 */ diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 21b1488fe446128b56ee835735b230e11c97ac18..b878f4c87fee8e5d1eafc3cf5e46ce785a71e753 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -198,6 +198,8 @@ static void option_instat_callback(struct urb *urb); #define DELL_PRODUCT_5821E 0x81d7 #define DELL_PRODUCT_5821E_ESIM 0x81e0 +#define DELL_PRODUCT_5829E_ESIM 0x81e4 +#define DELL_PRODUCT_5829E 0x81e6 #define KYOCERA_VENDOR_ID 0x0c88 #define KYOCERA_PRODUCT_KPC650 0x17da @@ -1063,6 +1065,10 @@ static const struct usb_device_id option_ids[] = { .driver_info = RSVD(0) | RSVD(1) | RSVD(6) }, { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5821E_ESIM), .driver_info = RSVD(0) | RSVD(1) | RSVD(6) }, + { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5829E), + .driver_info = RSVD(0) | RSVD(6) }, + { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5829E_ESIM), + .driver_info = RSVD(0) | RSVD(6) }, { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) }, /* ADU-E100, ADU-310 */ { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) }, { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) }, @@ -1273,10 +1279,16 @@ static const struct usb_device_id option_ids[] = { .driver_info = NCTRL(2) }, { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x7011, 0xff), /* Telit LE910-S1 (ECM) */ .driver_info = NCTRL(2) }, + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x701a, 0xff), /* Telit LE910R1 (RNDIS) */ + .driver_info = NCTRL(2) }, + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x701b, 0xff), /* Telit LE910R1 (ECM) */ + .driver_info = NCTRL(2) }, { USB_DEVICE(TELIT_VENDOR_ID, 0x9010), /* Telit SBL FN980 flashing device */ .driver_info = NCTRL(0) | ZLP }, { USB_DEVICE(TELIT_VENDOR_ID, 0x9200), /* Telit LE910S1 flashing device */ .driver_info = NCTRL(0) | ZLP }, + { USB_DEVICE(TELIT_VENDOR_ID, 0x9201), /* Telit LE910R1 flashing device */ + .driver_info = NCTRL(0) | ZLP }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff), .driver_info = RSVD(1) }, @@ -1649,6 +1661,8 @@ static const struct usb_device_id option_ids[] = { .driver_info = RSVD(2) }, { USB_DEVICE_INTERFACE_CLASS(ZTE_VENDOR_ID, 0x1476, 0xff) }, /* GosunCn ZTE WeLink ME3630 (ECM/NCM mode) */ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1481, 0xff, 0x00, 0x00) }, /* ZTE MF871A */ + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1485, 0xff, 0xff, 0xff), /* ZTE MF286D */ + .driver_info = RSVD(5) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1533, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1534, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1535, 0xff, 0xff, 0xff) }, diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c index 1bbe18f3f9f11eb21c76da0740d4f17ef8f2b7ec..d736822e95e184485485fd057966a7c114027d64 100644 --- a/drivers/usb/serial/pl2303.c +++ b/drivers/usb/serial/pl2303.c @@ -116,6 +116,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(ADLINK_VENDOR_ID, ADLINK_ND6530GC_PRODUCT_ID) }, { USB_DEVICE(SMART_VENDOR_ID, SMART_PRODUCT_ID) }, { USB_DEVICE(AT_VENDOR_ID, AT_VTKIT3_PRODUCT_ID) }, + { USB_DEVICE(IBM_VENDOR_ID, IBM_PRODUCT_ID) }, { } /* Terminating entry */ }; diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h index 6097ee8fccb25cdbf6feaa28d49f784c913ca492..c5406452b774ef9dc6cdc986f0aa4ce6ead1b866 100644 --- a/drivers/usb/serial/pl2303.h +++ b/drivers/usb/serial/pl2303.h @@ -35,6 +35,9 @@ #define ATEN_PRODUCT_UC232B 0x2022 #define ATEN_PRODUCT_ID2 0x2118 +#define IBM_VENDOR_ID 0x04b3 +#define IBM_PRODUCT_ID 0x4016 + #define IODATA_VENDOR_ID 0x04bb #define IODATA_PRODUCT_ID 0x0a03 #define IODATA_PRODUCT_ID_RSAQ5 0x0a0e diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c index bd23a7cb1be2bceaa8422e88bc3cdb5255dd566c..4c6747889a194664a04f90824e417bedec625b3f 100644 --- a/drivers/usb/serial/usb-serial-simple.c +++ b/drivers/usb/serial/usb-serial-simple.c @@ -91,6 +91,11 @@ DEVICE(moto_modem, MOTO_IDS); { USB_DEVICE(0x0cad, 0x9016) } /* TPG2200 */ DEVICE(motorola_tetra, MOTOROLA_TETRA_IDS); +/* Nokia mobile phone driver */ +#define NOKIA_IDS() \ + { USB_DEVICE(0x0421, 0x069a) } /* Nokia 130 (RM-1035) */ +DEVICE(nokia, NOKIA_IDS); + /* Novatel Wireless GPS driver */ #define NOVATEL_IDS() \ { USB_DEVICE(0x09d7, 0x0100) } /* NovAtel FlexPack GPS */ @@ -123,6 +128,7 @@ static struct usb_serial_driver * const serial_drivers[] = { &vivopay_device, &moto_modem_device, &motorola_tetra_device, + &nokia_device, &novatel_gps_device, &hp4x_device, &suunto_device, @@ -140,6 +146,7 @@ static const struct usb_device_id id_table[] = { VIVOPAY_IDS(), MOTO_IDS(), MOTOROLA_TETRA_IDS(), + NOKIA_IDS(), NOVATEL_IDS(), HP4X_IDS(), SUUNTO_IDS(), diff --git a/drivers/usb/storage/ene_ub6250.c b/drivers/usb/storage/ene_ub6250.c index 98c1aa594e6c4d58e568366b106746c4dbf81599..c9ce1c25c80cc59b61701c868d0d8ff8206e993c 100644 --- a/drivers/usb/storage/ene_ub6250.c +++ b/drivers/usb/storage/ene_ub6250.c @@ -237,36 +237,33 @@ static struct us_unusual_dev ene_ub6250_unusual_dev_list[] = { #define memstick_logaddr(logadr1, logadr0) ((((u16)(logadr1)) << 8) | (logadr0)) -struct SD_STATUS { - u8 Insert:1; - u8 Ready:1; - u8 MediaChange:1; - u8 IsMMC:1; - u8 HiCapacity:1; - u8 HiSpeed:1; - u8 WtP:1; - u8 Reserved:1; -}; - -struct MS_STATUS { - u8 Insert:1; - u8 Ready:1; - u8 MediaChange:1; - u8 IsMSPro:1; - u8 IsMSPHG:1; - u8 Reserved1:1; - u8 WtP:1; - u8 Reserved2:1; -}; - -struct SM_STATUS { - u8 Insert:1; - u8 Ready:1; - u8 MediaChange:1; - u8 Reserved:3; - u8 WtP:1; - u8 IsMS:1; -}; +/* SD_STATUS bits */ +#define SD_Insert BIT(0) +#define SD_Ready BIT(1) +#define SD_MediaChange BIT(2) +#define SD_IsMMC BIT(3) +#define SD_HiCapacity BIT(4) +#define SD_HiSpeed BIT(5) +#define SD_WtP BIT(6) + /* Bit 7 reserved */ + +/* MS_STATUS bits */ +#define MS_Insert BIT(0) +#define MS_Ready BIT(1) +#define MS_MediaChange BIT(2) +#define MS_IsMSPro BIT(3) +#define MS_IsMSPHG BIT(4) + /* Bit 5 reserved */ +#define MS_WtP BIT(6) + /* Bit 7 reserved */ + +/* SM_STATUS bits */ +#define SM_Insert BIT(0) +#define SM_Ready BIT(1) +#define SM_MediaChange BIT(2) + /* Bits 3-5 reserved */ +#define SM_WtP BIT(6) +#define SM_IsMS BIT(7) struct ms_bootblock_cis { u8 bCistplDEVICE[6]; /* 0 */ @@ -437,9 +434,9 @@ struct ene_ub6250_info { u8 *bbuf; /* for 6250 code */ - struct SD_STATUS SD_Status; - struct MS_STATUS MS_Status; - struct SM_STATUS SM_Status; + u8 SD_Status; + u8 MS_Status; + u8 SM_Status; /* ----- SD Control Data ---------------- */ /*SD_REGISTER SD_Regs; */ @@ -602,7 +599,7 @@ static int sd_scsi_test_unit_ready(struct us_data *us, struct scsi_cmnd *srb) { struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra; - if (info->SD_Status.Insert && info->SD_Status.Ready) + if ((info->SD_Status & SD_Insert) && (info->SD_Status & SD_Ready)) return USB_STOR_TRANSPORT_GOOD; else { ene_sd_init(us); @@ -622,7 +619,7 @@ static int sd_scsi_mode_sense(struct us_data *us, struct scsi_cmnd *srb) 0x0b, 0x00, 0x80, 0x08, 0x00, 0x00, 0x71, 0xc0, 0x00, 0x00, 0x02, 0x00 }; - if (info->SD_Status.WtP) + if (info->SD_Status & SD_WtP) usb_stor_set_xfer_buf(mediaWP, 12, srb); else usb_stor_set_xfer_buf(mediaNoWP, 12, srb); @@ -641,9 +638,9 @@ static int sd_scsi_read_capacity(struct us_data *us, struct scsi_cmnd *srb) struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra; usb_stor_dbg(us, "sd_scsi_read_capacity\n"); - if (info->SD_Status.HiCapacity) { + if (info->SD_Status & SD_HiCapacity) { bl_len = 0x200; - if (info->SD_Status.IsMMC) + if (info->SD_Status & SD_IsMMC) bl_num = info->HC_C_SIZE-1; else bl_num = (info->HC_C_SIZE + 1) * 1024 - 1; @@ -693,7 +690,7 @@ static int sd_scsi_read(struct us_data *us, struct scsi_cmnd *srb) return USB_STOR_TRANSPORT_ERROR; } - if (info->SD_Status.HiCapacity) + if (info->SD_Status & SD_HiCapacity) bnByte = bn; /* set up the command wrapper */ @@ -733,7 +730,7 @@ static int sd_scsi_write(struct us_data *us, struct scsi_cmnd *srb) return USB_STOR_TRANSPORT_ERROR; } - if (info->SD_Status.HiCapacity) + if (info->SD_Status & SD_HiCapacity) bnByte = bn; /* set up the command wrapper */ @@ -1455,7 +1452,7 @@ static int ms_scsi_test_unit_ready(struct us_data *us, struct scsi_cmnd *srb) struct ene_ub6250_info *info = (struct ene_ub6250_info *)(us->extra); /* pr_info("MS_SCSI_Test_Unit_Ready\n"); */ - if (info->MS_Status.Insert && info->MS_Status.Ready) { + if ((info->MS_Status & MS_Insert) && (info->MS_Status & MS_Ready)) { return USB_STOR_TRANSPORT_GOOD; } else { ene_ms_init(us); @@ -1475,7 +1472,7 @@ static int ms_scsi_mode_sense(struct us_data *us, struct scsi_cmnd *srb) 0x0b, 0x00, 0x80, 0x08, 0x00, 0x00, 0x71, 0xc0, 0x00, 0x00, 0x02, 0x00 }; - if (info->MS_Status.WtP) + if (info->MS_Status & MS_WtP) usb_stor_set_xfer_buf(mediaWP, 12, srb); else usb_stor_set_xfer_buf(mediaNoWP, 12, srb); @@ -1494,7 +1491,7 @@ static int ms_scsi_read_capacity(struct us_data *us, struct scsi_cmnd *srb) usb_stor_dbg(us, "ms_scsi_read_capacity\n"); bl_len = 0x200; - if (info->MS_Status.IsMSPro) + if (info->MS_Status & MS_IsMSPro) bl_num = info->MSP_TotalBlock - 1; else bl_num = info->MS_Lib.NumberOfLogBlock * info->MS_Lib.blockSize * 2 - 1; @@ -1649,7 +1646,7 @@ static int ms_scsi_read(struct us_data *us, struct scsi_cmnd *srb) if (bn > info->bl_num) return USB_STOR_TRANSPORT_ERROR; - if (info->MS_Status.IsMSPro) { + if (info->MS_Status & MS_IsMSPro) { result = ene_load_bincode(us, MSP_RW_PATTERN); if (result != USB_STOR_XFER_GOOD) { usb_stor_dbg(us, "Load MPS RW pattern Fail !!\n"); @@ -1750,7 +1747,7 @@ static int ms_scsi_write(struct us_data *us, struct scsi_cmnd *srb) if (bn > info->bl_num) return USB_STOR_TRANSPORT_ERROR; - if (info->MS_Status.IsMSPro) { + if (info->MS_Status & MS_IsMSPro) { result = ene_load_bincode(us, MSP_RW_PATTERN); if (result != USB_STOR_XFER_GOOD) { pr_info("Load MSP RW pattern Fail !!\n"); @@ -1858,12 +1855,12 @@ static int ene_get_card_status(struct us_data *us, u8 *buf) tmpreg = (u16) reg4b; reg4b = *(u32 *)(&buf[0x14]); - if (info->SD_Status.HiCapacity && !info->SD_Status.IsMMC) + if ((info->SD_Status & SD_HiCapacity) && !(info->SD_Status & SD_IsMMC)) info->HC_C_SIZE = (reg4b >> 8) & 0x3fffff; info->SD_C_SIZE = ((tmpreg & 0x03) << 10) | (u16)(reg4b >> 22); info->SD_C_SIZE_MULT = (u8)(reg4b >> 7) & 0x07; - if (info->SD_Status.HiCapacity && info->SD_Status.IsMMC) + if ((info->SD_Status & SD_HiCapacity) && (info->SD_Status & SD_IsMMC)) info->HC_C_SIZE = *(u32 *)(&buf[0x100]); if (info->SD_READ_BL_LEN > SD_BLOCK_LEN) { @@ -2075,6 +2072,7 @@ static int ene_ms_init(struct us_data *us) u16 MSP_BlockSize, MSP_UserAreaBlocks; struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra; u8 *bbuf = info->bbuf; + unsigned int s; printk(KERN_INFO "transport --- ENE_MSInit\n"); @@ -2099,15 +2097,16 @@ static int ene_ms_init(struct us_data *us) return USB_STOR_TRANSPORT_ERROR; } /* the same part to test ENE */ - info->MS_Status = *(struct MS_STATUS *) bbuf; - - if (info->MS_Status.Insert && info->MS_Status.Ready) { - printk(KERN_INFO "Insert = %x\n", info->MS_Status.Insert); - printk(KERN_INFO "Ready = %x\n", info->MS_Status.Ready); - printk(KERN_INFO "IsMSPro = %x\n", info->MS_Status.IsMSPro); - printk(KERN_INFO "IsMSPHG = %x\n", info->MS_Status.IsMSPHG); - printk(KERN_INFO "WtP= %x\n", info->MS_Status.WtP); - if (info->MS_Status.IsMSPro) { + info->MS_Status = bbuf[0]; + + s = info->MS_Status; + if ((s & MS_Insert) && (s & MS_Ready)) { + printk(KERN_INFO "Insert = %x\n", !!(s & MS_Insert)); + printk(KERN_INFO "Ready = %x\n", !!(s & MS_Ready)); + printk(KERN_INFO "IsMSPro = %x\n", !!(s & MS_IsMSPro)); + printk(KERN_INFO "IsMSPHG = %x\n", !!(s & MS_IsMSPHG)); + printk(KERN_INFO "WtP= %x\n", !!(s & MS_WtP)); + if (s & MS_IsMSPro) { MSP_BlockSize = (bbuf[6] << 8) | bbuf[7]; MSP_UserAreaBlocks = (bbuf[10] << 8) | bbuf[11]; info->MSP_TotalBlock = MSP_BlockSize * MSP_UserAreaBlocks; @@ -2168,17 +2167,17 @@ static int ene_sd_init(struct us_data *us) return USB_STOR_TRANSPORT_ERROR; } - info->SD_Status = *(struct SD_STATUS *) bbuf; - if (info->SD_Status.Insert && info->SD_Status.Ready) { - struct SD_STATUS *s = &info->SD_Status; + info->SD_Status = bbuf[0]; + if ((info->SD_Status & SD_Insert) && (info->SD_Status & SD_Ready)) { + unsigned int s = info->SD_Status; ene_get_card_status(us, bbuf); - usb_stor_dbg(us, "Insert = %x\n", s->Insert); - usb_stor_dbg(us, "Ready = %x\n", s->Ready); - usb_stor_dbg(us, "IsMMC = %x\n", s->IsMMC); - usb_stor_dbg(us, "HiCapacity = %x\n", s->HiCapacity); - usb_stor_dbg(us, "HiSpeed = %x\n", s->HiSpeed); - usb_stor_dbg(us, "WtP = %x\n", s->WtP); + usb_stor_dbg(us, "Insert = %x\n", !!(s & SD_Insert)); + usb_stor_dbg(us, "Ready = %x\n", !!(s & SD_Ready)); + usb_stor_dbg(us, "IsMMC = %x\n", !!(s & SD_IsMMC)); + usb_stor_dbg(us, "HiCapacity = %x\n", !!(s & SD_HiCapacity)); + usb_stor_dbg(us, "HiSpeed = %x\n", !!(s & SD_HiSpeed)); + usb_stor_dbg(us, "WtP = %x\n", !!(s & SD_WtP)); } else { usb_stor_dbg(us, "SD Card Not Ready --- %x\n", bbuf[0]); return USB_STOR_TRANSPORT_ERROR; @@ -2200,14 +2199,14 @@ static int ene_init(struct us_data *us) misc_reg03 = bbuf[0]; if (misc_reg03 & 0x01) { - if (!info->SD_Status.Ready) { + if (!(info->SD_Status & SD_Ready)) { result = ene_sd_init(us); if (result != USB_STOR_XFER_GOOD) return USB_STOR_TRANSPORT_ERROR; } } if (misc_reg03 & 0x02) { - if (!info->MS_Status.Ready) { + if (!(info->MS_Status & MS_Ready)) { result = ene_ms_init(us); if (result != USB_STOR_XFER_GOOD) return USB_STOR_TRANSPORT_ERROR; @@ -2306,14 +2305,14 @@ static int ene_transport(struct scsi_cmnd *srb, struct us_data *us) /*US_DEBUG(usb_stor_show_command(us, srb)); */ scsi_set_resid(srb, 0); - if (unlikely(!(info->SD_Status.Ready || info->MS_Status.Ready))) + if (unlikely(!(info->SD_Status & SD_Ready) || (info->MS_Status & MS_Ready))) result = ene_init(us); if (result == USB_STOR_XFER_GOOD) { result = USB_STOR_TRANSPORT_ERROR; - if (info->SD_Status.Ready) + if (info->SD_Status & SD_Ready) result = sd_scsi_irp(us, srb); - if (info->MS_Status.Ready) + if (info->MS_Status & MS_Ready) result = ms_scsi_irp(us, srb); } return result; @@ -2377,7 +2376,6 @@ static int ene_ub6250_probe(struct usb_interface *intf, static int ene_ub6250_resume(struct usb_interface *iface) { - u8 tmp = 0; struct us_data *us = usb_get_intfdata(iface); struct ene_ub6250_info *info = (struct ene_ub6250_info *)(us->extra); @@ -2389,17 +2387,16 @@ static int ene_ub6250_resume(struct usb_interface *iface) mutex_unlock(&us->dev_mutex); info->Power_IsResum = true; - /*info->SD_Status.Ready = 0; */ - info->SD_Status = *(struct SD_STATUS *)&tmp; - info->MS_Status = *(struct MS_STATUS *)&tmp; - info->SM_Status = *(struct SM_STATUS *)&tmp; + /* info->SD_Status &= ~SD_Ready; */ + info->SD_Status = 0; + info->MS_Status = 0; + info->SM_Status = 0; return 0; } static int ene_ub6250_reset_resume(struct usb_interface *iface) { - u8 tmp = 0; struct us_data *us = usb_get_intfdata(iface); struct ene_ub6250_info *info = (struct ene_ub6250_info *)(us->extra); @@ -2411,10 +2408,10 @@ static int ene_ub6250_reset_resume(struct usb_interface *iface) * the device */ info->Power_IsResum = true; - /*info->SD_Status.Ready = 0; */ - info->SD_Status = *(struct SD_STATUS *)&tmp; - info->MS_Status = *(struct MS_STATUS *)&tmp; - info->SM_Status = *(struct SM_STATUS *)&tmp; + /* info->SD_Status &= ~SD_Ready; */ + info->SD_Status = 0; + info->MS_Status = 0; + info->SM_Status = 0; return 0; } diff --git a/drivers/usb/storage/realtek_cr.c b/drivers/usb/storage/realtek_cr.c index 3789698d9d3c6431096f0320b280014b6ca4477b..0c423916d7bfa4e325243ea4b0e0b9ce8854a535 100644 --- a/drivers/usb/storage/realtek_cr.c +++ b/drivers/usb/storage/realtek_cr.c @@ -365,7 +365,7 @@ static int rts51x_read_mem(struct us_data *us, u16 addr, u8 *data, u16 len) buf = kmalloc(len, GFP_NOIO); if (buf == NULL) - return USB_STOR_TRANSPORT_ERROR; + return -ENOMEM; usb_stor_dbg(us, "addr = 0x%x, len = %d\n", addr, len); diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index 29191d33c0e3edf43c4a1be70adc03e902b3ce53..1a05e3dcfec8a10652ec437bc0d80ce38c401d50 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h @@ -2301,6 +2301,16 @@ UNUSUAL_DEV( 0x2027, 0xa001, 0x0000, 0x9999, USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_euscsi_init, US_FL_SCM_MULT_TARG ), +/* + * Reported by DocMAX + * and Thomas Weißschuh + */ +UNUSUAL_DEV( 0x2109, 0x0715, 0x9999, 0x9999, + "VIA Labs, Inc.", + "VL817 SATA Bridge", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_IGNORE_UAS), + UNUSUAL_DEV( 0x2116, 0x0320, 0x0001, 0x0001, "ST", "2A", diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c index 721d9c4ddc81f9df7de71c93b1309614aac0d3c7..8333c80b5f7c119628c36e11575b3882e52d7507 100644 --- a/drivers/usb/typec/tcpm/tcpm.c +++ b/drivers/usb/typec/tcpm/tcpm.c @@ -4164,7 +4164,8 @@ static void _tcpm_pd_vbus_off(struct tcpm_port *port) case SNK_TRYWAIT_DEBOUNCE: break; case SNK_ATTACH_WAIT: - tcpm_set_state(port, SNK_UNATTACHED, 0); + case SNK_DEBOUNCED: + /* Do nothing, as TCPM is still waiting for vbus to reaach VSAFE5V to connect */ break; case SNK_NEGOTIATE_CAPABILITIES: diff --git a/drivers/usb/typec/ucsi/ucsi_ccg.c b/drivers/usb/typec/ucsi/ucsi_ccg.c index bff96d64dddffee6857125ad6040f5d67a873847..6db7c8ddd51cd0aac16d89b86e83963fe39af70b 100644 --- a/drivers/usb/typec/ucsi/ucsi_ccg.c +++ b/drivers/usb/typec/ucsi/ucsi_ccg.c @@ -325,7 +325,7 @@ static int ucsi_ccg_init(struct ucsi_ccg *uc) if (status < 0) return status; - if (!data) + if (!(data & DEV_INT)) return 0; status = ccg_write(uc, CCGX_RAB_INTR_REG, &data, sizeof(data)); diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c index fbdc9468818d32a5a44783e4f57804ece5e9be32..577ff786f11b1295b2715685f82b5e4395da0dc4 100644 --- a/drivers/vdpa/mlx5/net/mlx5_vnet.c +++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c @@ -812,8 +812,6 @@ static int create_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtque MLX5_SET(virtio_q, vq_ctx, umem_3_id, mvq->umem3.id); MLX5_SET(virtio_q, vq_ctx, umem_3_size, mvq->umem3.size); MLX5_SET(virtio_q, vq_ctx, pd, ndev->mvdev.res.pdn); - if (MLX5_CAP_DEV_VDPA_EMULATION(ndev->mvdev.mdev, eth_frame_offload_type)) - MLX5_SET(virtio_q, vq_ctx, virtio_version_1_0, 1); err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, sizeof(out)); if (err) @@ -1484,11 +1482,25 @@ static u64 mlx5_vdpa_get_features(struct vdpa_device *vdev) return ndev->mvdev.mlx_features; } -static int verify_min_features(struct mlx5_vdpa_dev *mvdev, u64 features) +static int verify_driver_features(struct mlx5_vdpa_dev *mvdev, u64 features) { + /* Minimum features to expect */ if (!(features & BIT_ULL(VIRTIO_F_ACCESS_PLATFORM))) return -EOPNOTSUPP; + /* Double check features combination sent down by the driver. + * Fail invalid features due to absence of the depended feature. + * + * Per VIRTIO v1.1 specification, section 5.1.3.1 Feature bit + * requirements: "VIRTIO_NET_F_MQ Requires VIRTIO_NET_F_CTRL_VQ". + * By failing the invalid features sent down by untrusted drivers, + * we're assured the assumption made upon is_index_valid() and + * is_ctrl_vq_idx() will not be compromised. + */ + if ((features & (BIT_ULL(VIRTIO_NET_F_MQ) | BIT_ULL(VIRTIO_NET_F_CTRL_VQ))) == + BIT_ULL(VIRTIO_NET_F_MQ)) + return -EINVAL; + return 0; } @@ -1546,7 +1558,7 @@ static int mlx5_vdpa_set_features(struct vdpa_device *vdev, u64 features) print_features(mvdev, features, true); - err = verify_min_features(mvdev, features); + err = verify_driver_features(mvdev, features); if (err) return err; diff --git a/drivers/vfio/platform/vfio_amba.c b/drivers/vfio/platform/vfio_amba.c index 9636a2afaecd1b4599473debb5ee7dc7908055b0..3626c21501017e95163572fe8ee5cdb8a40bc75b 100644 --- a/drivers/vfio/platform/vfio_amba.c +++ b/drivers/vfio/platform/vfio_amba.c @@ -71,18 +71,13 @@ static int vfio_amba_probe(struct amba_device *adev, const struct amba_id *id) return ret; } -static int vfio_amba_remove(struct amba_device *adev) +static void vfio_amba_remove(struct amba_device *adev) { - struct vfio_platform_device *vdev; - - vdev = vfio_platform_remove_common(&adev->dev); - if (vdev) { - kfree(vdev->name); - kfree(vdev); - return 0; - } + struct vfio_platform_device *vdev = + vfio_platform_remove_common(&adev->dev); - return -EINVAL; + kfree(vdev->name); + kfree(vdev); } static const struct amba_id pl330_ids[] = { diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index 5daceec4881110f6c873aaa20dd7a10c692490ce..1422cbb37013713e152301907695a4871a1606fd 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c @@ -1096,7 +1096,7 @@ static int vfio_iova_dirty_log_clear(u64 __user *bitmap, unsigned long bitmap_size; unsigned long *bitmap_buffer = NULL; bool clear_valid; - int rs, re, start, end, dma_offset; + unsigned int rs, re, start, end, dma_offset; int ret = 0; bitmap_size = DIRTY_BITMAP_BYTES(size >> pgshift); @@ -1128,7 +1128,7 @@ static int vfio_iova_dirty_log_clear(u64 __user *bitmap, end = (end_iova - iova) >> pgshift; bitmap_for_each_set_region(bitmap_buffer, rs, re, start, end) { clear_valid = true; - riova = iova + (rs << pgshift); + riova = iova + ((unsigned long)rs << pgshift); dma_offset = (riova - dma->iova) >> pgshift; bitmap_clear(dma->bitmap, dma_offset, re - rs); } @@ -2420,7 +2420,7 @@ static void vfio_iommu_update_hwdbm(struct vfio_iommu *iommu, bool num_non_hwdbm_zeroed = false; bool log_enabled, should_enable; - if (old_hwdbm && !new_hwdbm && attach) { + if ((old_hwdbm || singular) && !new_hwdbm && attach) { iommu->num_non_hwdbm_domains++; } else if (!old_hwdbm && new_hwdbm && !attach) { iommu->num_non_hwdbm_domains--; diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index 666c2d5b667375701b80b118b87bfaa9a1bac454..eba91da505e03757c610e6524d7d85367cbe9c15 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -472,6 +472,7 @@ static void vhost_tx_batch(struct vhost_net *net, goto signal_used; msghdr->msg_control = &ctl; + msghdr->msg_controllen = sizeof(ctl); err = sock->ops->sendmsg(sock, msghdr, 0); if (unlikely(err < 0)) { vq_err(&nvq->vq, "Fail to batch sending packets\n"); @@ -651,8 +652,6 @@ static bool tx_can_batch(struct vhost_virtqueue *vq, size_t total_len) !vhost_vq_avail_empty(vq->dev, vq); } -#define SKB_FRAG_PAGE_ORDER get_order(32768) - static bool vhost_net_page_frag_refill(struct vhost_net *net, unsigned int sz, struct page_frag *pfrag, gfp_t gfp) { diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c index 5cd1ee66d23266931202d3229720905b83a2d318..5d2d6ce7ff41340f2d4ec7d07601d2b6f3fbea84 100644 --- a/drivers/vhost/vsock.c +++ b/drivers/vhost/vsock.c @@ -573,16 +573,18 @@ static int vhost_vsock_start(struct vhost_vsock *vsock) return ret; } -static int vhost_vsock_stop(struct vhost_vsock *vsock) +static int vhost_vsock_stop(struct vhost_vsock *vsock, bool check_owner) { size_t i; - int ret; + int ret = 0; mutex_lock(&vsock->dev.mutex); - ret = vhost_dev_check_owner(&vsock->dev); - if (ret) - goto err; + if (check_owner) { + ret = vhost_dev_check_owner(&vsock->dev); + if (ret) + goto err; + } for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) { struct vhost_virtqueue *vq = &vsock->vqs[i]; @@ -695,9 +697,15 @@ static int vhost_vsock_dev_release(struct inode *inode, struct file *file) /* Iterating over all connections for all CIDs to find orphans is * inefficient. Room for improvement here. */ - vsock_for_each_connected_socket(vhost_vsock_reset_orphans); + vsock_for_each_connected_socket(&vhost_transport.transport, + vhost_vsock_reset_orphans); - vhost_vsock_stop(vsock); + /* Don't check the owner, because we are in the release path, so we + * need to stop the vsock device in any case. + * vhost_vsock_stop() can not fail in this case, so we don't need to + * check the return code. + */ + vhost_vsock_stop(vsock, false); vhost_vsock_flush(vsock); vhost_dev_stop(&vsock->dev); @@ -801,7 +809,7 @@ static long vhost_vsock_dev_ioctl(struct file *f, unsigned int ioctl, if (start) return vhost_vsock_start(vsock); else - return vhost_vsock_stop(vsock); + return vhost_vsock_stop(vsock, true); case VHOST_GET_FEATURES: features = VHOST_VSOCK_FEATURES; if (copy_to_user(argp, &features, sizeof(features))) diff --git a/drivers/video/backlight/qcom-wled.c b/drivers/video/backlight/qcom-wled.c index cd11c5776438184c6bc466b1a0632d623940acc4..486d35da0150724e124d477b31153da2911c2fd9 100644 --- a/drivers/video/backlight/qcom-wled.c +++ b/drivers/video/backlight/qcom-wled.c @@ -231,14 +231,14 @@ struct wled { static int wled3_set_brightness(struct wled *wled, u16 brightness) { int rc, i; - u8 v[2]; + __le16 v; - v[0] = brightness & 0xff; - v[1] = (brightness >> 8) & 0xf; + v = cpu_to_le16(brightness & WLED3_SINK_REG_BRIGHT_MAX); for (i = 0; i < wled->cfg.num_strings; ++i) { rc = regmap_bulk_write(wled->regmap, wled->ctrl_addr + - WLED3_SINK_REG_BRIGHT(i), v, 2); + WLED3_SINK_REG_BRIGHT(wled->cfg.enabled_strings[i]), + &v, sizeof(v)); if (rc < 0) return rc; } @@ -250,18 +250,18 @@ static int wled4_set_brightness(struct wled *wled, u16 brightness) { int rc, i; u16 low_limit = wled->max_brightness * 4 / 1000; - u8 v[2]; + __le16 v; /* WLED4's lower limit of operation is 0.4% */ if (brightness > 0 && brightness < low_limit) brightness = low_limit; - v[0] = brightness & 0xff; - v[1] = (brightness >> 8) & 0xf; + v = cpu_to_le16(brightness & WLED3_SINK_REG_BRIGHT_MAX); for (i = 0; i < wled->cfg.num_strings; ++i) { rc = regmap_bulk_write(wled->regmap, wled->sink_addr + - WLED4_SINK_REG_BRIGHT(i), v, 2); + WLED4_SINK_REG_BRIGHT(wled->cfg.enabled_strings[i]), + &v, sizeof(v)); if (rc < 0) return rc; } @@ -273,21 +273,20 @@ static int wled5_set_brightness(struct wled *wled, u16 brightness) { int rc, offset; u16 low_limit = wled->max_brightness * 1 / 1000; - u8 v[2]; + __le16 v; /* WLED5's lower limit is 0.1% */ if (brightness < low_limit) brightness = low_limit; - v[0] = brightness & 0xff; - v[1] = (brightness >> 8) & 0x7f; + v = cpu_to_le16(brightness & WLED5_SINK_REG_BRIGHT_MAX_15B); offset = (wled->cfg.mod_sel == MOD_A) ? WLED5_SINK_REG_MOD_A_BRIGHTNESS_LSB : WLED5_SINK_REG_MOD_B_BRIGHTNESS_LSB; rc = regmap_bulk_write(wled->regmap, wled->sink_addr + offset, - v, 2); + &v, sizeof(v)); return rc; } @@ -572,7 +571,7 @@ static irqreturn_t wled_short_irq_handler(int irq, void *_wled) static void wled_auto_string_detection(struct wled *wled) { - int rc = 0, i, delay_time_us; + int rc = 0, i, j, delay_time_us; u32 sink_config = 0; u8 sink_test = 0, sink_valid = 0, val; bool fault_set; @@ -619,14 +618,15 @@ static void wled_auto_string_detection(struct wled *wled) /* Iterate through the strings one by one */ for (i = 0; i < wled->cfg.num_strings; i++) { - sink_test = BIT((WLED4_SINK_REG_CURR_SINK_SHFT + i)); + j = wled->cfg.enabled_strings[i]; + sink_test = BIT((WLED4_SINK_REG_CURR_SINK_SHFT + j)); /* Enable feedback control */ rc = regmap_write(wled->regmap, wled->ctrl_addr + - WLED3_CTRL_REG_FEEDBACK_CONTROL, i + 1); + WLED3_CTRL_REG_FEEDBACK_CONTROL, j + 1); if (rc < 0) { dev_err(wled->dev, "Failed to enable feedback for SINK %d rc = %d\n", - i + 1, rc); + j + 1, rc); goto failed_detect; } @@ -635,7 +635,7 @@ static void wled_auto_string_detection(struct wled *wled) WLED4_SINK_REG_CURR_SINK, sink_test); if (rc < 0) { dev_err(wled->dev, "Failed to configure SINK %d rc=%d\n", - i + 1, rc); + j + 1, rc); goto failed_detect; } @@ -662,7 +662,7 @@ static void wled_auto_string_detection(struct wled *wled) if (fault_set) dev_dbg(wled->dev, "WLED OVP fault detected with SINK %d\n", - i + 1); + j + 1); else sink_valid |= sink_test; @@ -702,15 +702,16 @@ static void wled_auto_string_detection(struct wled *wled) /* Enable valid sinks */ if (wled->version == 4) { for (i = 0; i < wled->cfg.num_strings; i++) { + j = wled->cfg.enabled_strings[i]; if (sink_config & - BIT(WLED4_SINK_REG_CURR_SINK_SHFT + i)) + BIT(WLED4_SINK_REG_CURR_SINK_SHFT + j)) val = WLED4_SINK_REG_STR_MOD_MASK; else /* Disable modulator_en for unused sink */ val = 0; rc = regmap_write(wled->regmap, wled->sink_addr + - WLED4_SINK_REG_STR_MOD_EN(i), val); + WLED4_SINK_REG_STR_MOD_EN(j), val); if (rc < 0) { dev_err(wled->dev, "Failed to configure MODULATOR_EN rc=%d\n", rc); @@ -1256,21 +1257,6 @@ static const struct wled_var_cfg wled5_ovp_cfg = { .size = 16, }; -static u32 wled3_num_strings_values_fn(u32 idx) -{ - return idx + 1; -} - -static const struct wled_var_cfg wled3_num_strings_cfg = { - .fn = wled3_num_strings_values_fn, - .size = 3, -}; - -static const struct wled_var_cfg wled4_num_strings_cfg = { - .fn = wled3_num_strings_values_fn, - .size = 4, -}; - static u32 wled3_switch_freq_values_fn(u32 idx) { return 19200 / (2 * (1 + idx)); @@ -1344,11 +1330,6 @@ static int wled_configure(struct wled *wled) .val_ptr = &cfg->switch_freq, .cfg = &wled3_switch_freq_cfg, }, - { - .name = "qcom,num-strings", - .val_ptr = &cfg->num_strings, - .cfg = &wled3_num_strings_cfg, - }, }; const struct wled_u32_opts wled4_opts[] = { @@ -1372,11 +1353,6 @@ static int wled_configure(struct wled *wled) .val_ptr = &cfg->switch_freq, .cfg = &wled3_switch_freq_cfg, }, - { - .name = "qcom,num-strings", - .val_ptr = &cfg->num_strings, - .cfg = &wled4_num_strings_cfg, - }, }; const struct wled_u32_opts wled5_opts[] = { @@ -1400,11 +1376,6 @@ static int wled_configure(struct wled *wled) .val_ptr = &cfg->switch_freq, .cfg = &wled3_switch_freq_cfg, }, - { - .name = "qcom,num-strings", - .val_ptr = &cfg->num_strings, - .cfg = &wled4_num_strings_cfg, - }, { .name = "qcom,modulator-sel", .val_ptr = &cfg->mod_sel, @@ -1523,16 +1494,57 @@ static int wled_configure(struct wled *wled) *bool_opts[i].val_ptr = true; } - cfg->num_strings = cfg->num_strings + 1; - string_len = of_property_count_elems_of_size(dev->of_node, "qcom,enabled-strings", sizeof(u32)); - if (string_len > 0) - of_property_read_u32_array(dev->of_node, + if (string_len > 0) { + if (string_len > wled->max_string_count) { + dev_err(dev, "Cannot have more than %d strings\n", + wled->max_string_count); + return -EINVAL; + } + + rc = of_property_read_u32_array(dev->of_node, "qcom,enabled-strings", wled->cfg.enabled_strings, - sizeof(u32)); + string_len); + if (rc) { + dev_err(dev, "Failed to read %d elements from qcom,enabled-strings: %d\n", + string_len, rc); + return rc; + } + + for (i = 0; i < string_len; ++i) { + if (wled->cfg.enabled_strings[i] >= wled->max_string_count) { + dev_err(dev, + "qcom,enabled-strings index %d at %d is out of bounds\n", + wled->cfg.enabled_strings[i], i); + return -EINVAL; + } + } + + cfg->num_strings = string_len; + } + + rc = of_property_read_u32(dev->of_node, "qcom,num-strings", &val); + if (!rc) { + if (val < 1 || val > wled->max_string_count) { + dev_err(dev, "qcom,num-strings must be between 1 and %d\n", + wled->max_string_count); + return -EINVAL; + } + + if (string_len > 0) { + dev_warn(dev, "Only one of qcom,num-strings or qcom,enabled-strings" + " should be set\n"); + if (val > string_len) { + dev_err(dev, "qcom,num-strings exceeds qcom,enabled-strings\n"); + return -EINVAL; + } + } + + cfg->num_strings = val; + } return 0; } diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig index ee33b8ec62bb259b2368fa97dc7294c751fbe059..47c4939577725059a905459d818f9c4bad9c0203 100644 --- a/drivers/video/console/Kconfig +++ b/drivers/video/console/Kconfig @@ -78,6 +78,26 @@ config FRAMEBUFFER_CONSOLE help Low-level framebuffer-based console driver. +config FRAMEBUFFER_CONSOLE_LEGACY_ACCELERATION + bool "Enable legacy fbcon hardware acceleration code" + depends on FRAMEBUFFER_CONSOLE + default y if PARISC + default n + help + This option enables the fbcon (framebuffer text-based) hardware + acceleration for graphics drivers which were written for the fbdev + graphics interface. + + On modern machines, on mainstream machines (like x86-64) or when + using a modern Linux distribution those fbdev drivers usually aren't used. + So enabling this option wouldn't have any effect, which is why you want + to disable this option on such newer machines. + + If you compile this kernel for older machines which still require the + fbdev drivers, you may want to say Y. + + If unsure, select n. + config FRAMEBUFFER_CONSOLE_DETECT_PRIMARY bool "Map the console to the primary display device" depends on FRAMEBUFFER_CONSOLE diff --git a/drivers/video/fbdev/amba-clcd.c b/drivers/video/fbdev/amba-clcd.c index b7682de412d83fd0e64152c5e301c57d26e5a19a..33595cc4778e9f18c83e2a482b55665f1609eaf8 100644 --- a/drivers/video/fbdev/amba-clcd.c +++ b/drivers/video/fbdev/amba-clcd.c @@ -925,7 +925,7 @@ static int clcdfb_probe(struct amba_device *dev, const struct amba_id *id) return ret; } -static int clcdfb_remove(struct amba_device *dev) +static void clcdfb_remove(struct amba_device *dev) { struct clcd_fb *fb = amba_get_drvdata(dev); @@ -942,8 +942,6 @@ static int clcdfb_remove(struct amba_device *dev) kfree(fb); amba_release_regions(dev); - - return 0; } static const struct amba_id clcdfb_id_table[] = { diff --git a/drivers/video/fbdev/atafb.c b/drivers/video/fbdev/atafb.c index f253daa05d9d3872777077692c51e0aa2d9fbcdb..a7a1739cff1bd4fd78585b7548961983be20f093 100644 --- a/drivers/video/fbdev/atafb.c +++ b/drivers/video/fbdev/atafb.c @@ -1691,9 +1691,9 @@ static int falcon_setcolreg(unsigned int regno, unsigned int red, ((blue & 0xfc00) >> 8)); if (regno < 16) { shifter_tt.color_reg[regno] = - (((red & 0xe000) >> 13) | ((red & 0x1000) >> 12) << 8) | - (((green & 0xe000) >> 13) | ((green & 0x1000) >> 12) << 4) | - ((blue & 0xe000) >> 13) | ((blue & 0x1000) >> 12); + ((((red & 0xe000) >> 13) | ((red & 0x1000) >> 12)) << 8) | + ((((green & 0xe000) >> 13) | ((green & 0x1000) >> 12)) << 4) | + ((blue & 0xe000) >> 13) | ((blue & 0x1000) >> 12); ((u32 *)info->pseudo_palette)[regno] = ((red & 0xf800) | ((green & 0xfc00) >> 5) | ((blue & 0xf800) >> 11)); @@ -1979,9 +1979,9 @@ static int stste_setcolreg(unsigned int regno, unsigned int red, green >>= 12; if (ATARIHW_PRESENT(EXTD_SHIFTER)) shifter_tt.color_reg[regno] = - (((red & 0xe) >> 1) | ((red & 1) << 3) << 8) | - (((green & 0xe) >> 1) | ((green & 1) << 3) << 4) | - ((blue & 0xe) >> 1) | ((blue & 1) << 3); + ((((red & 0xe) >> 1) | ((red & 1) << 3)) << 8) | + ((((green & 0xe) >> 1) | ((green & 1) << 3)) << 4) | + ((blue & 0xe) >> 1) | ((blue & 1) << 3); else shifter_tt.color_reg[regno] = ((red & 0xe) << 7) | diff --git a/drivers/video/fbdev/atmel_lcdfb.c b/drivers/video/fbdev/atmel_lcdfb.c index 355b6120dc4f0dcd77433ac4c8753ffa5561e8af..1fc8de4ecbebf9e5e2974c3c97cba94d536d73c4 100644 --- a/drivers/video/fbdev/atmel_lcdfb.c +++ b/drivers/video/fbdev/atmel_lcdfb.c @@ -1062,15 +1062,16 @@ static int __init atmel_lcdfb_probe(struct platform_device *pdev) INIT_LIST_HEAD(&info->modelist); - if (pdev->dev.of_node) { - ret = atmel_lcdfb_of_init(sinfo); - if (ret) - goto free_info; - } else { + if (!pdev->dev.of_node) { dev_err(dev, "cannot get default configuration\n"); goto free_info; } + ret = atmel_lcdfb_of_init(sinfo); + if (ret) + goto free_info; + + ret = -ENODEV; if (!sinfo->config) goto free_info; diff --git a/drivers/video/fbdev/cirrusfb.c b/drivers/video/fbdev/cirrusfb.c index 15a9ee7cd734d3fa1147a9cacbe65cb06bf5b3e4..b4980bc2985e3c9cf971b2c1bb0516f84c77e5f2 100644 --- a/drivers/video/fbdev/cirrusfb.c +++ b/drivers/video/fbdev/cirrusfb.c @@ -469,7 +469,7 @@ static int cirrusfb_check_mclk(struct fb_info *info, long freq) return 0; } -static int cirrusfb_check_pixclock(const struct fb_var_screeninfo *var, +static int cirrusfb_check_pixclock(struct fb_var_screeninfo *var, struct fb_info *info) { long freq; @@ -478,9 +478,7 @@ static int cirrusfb_check_pixclock(const struct fb_var_screeninfo *var, unsigned maxclockidx = var->bits_per_pixel >> 3; /* convert from ps to kHz */ - freq = PICOS2KHZ(var->pixclock); - - dev_dbg(info->device, "desired pixclock: %ld kHz\n", freq); + freq = PICOS2KHZ(var->pixclock ? : 1); maxclock = cirrusfb_board_info[cinfo->btype].maxclock[maxclockidx]; cinfo->multiplexing = 0; @@ -488,11 +486,13 @@ static int cirrusfb_check_pixclock(const struct fb_var_screeninfo *var, /* If the frequency is greater than we can support, we might be able * to use multiplexing for the video mode */ if (freq > maxclock) { - dev_err(info->device, - "Frequency greater than maxclock (%ld kHz)\n", - maxclock); - return -EINVAL; + var->pixclock = KHZ2PICOS(maxclock); + + while ((freq = PICOS2KHZ(var->pixclock)) > maxclock) + var->pixclock++; } + dev_dbg(info->device, "desired pixclock: %ld kHz\n", freq); + /* * Additional constraint: 8bpp uses DAC clock doubling to allow maximum * pixel clock diff --git a/drivers/video/fbdev/controlfb.c b/drivers/video/fbdev/controlfb.c index 2df56bd303d25d125c6a7813ed0bf2787abc1ec9..bd59e7b11ed5305d922b42d33d0dd01c98edb09f 100644 --- a/drivers/video/fbdev/controlfb.c +++ b/drivers/video/fbdev/controlfb.c @@ -64,10 +64,12 @@ #undef in_le32 #undef out_le32 #define in_8(addr) 0 -#define out_8(addr, val) +#define out_8(addr, val) (void)(val) #define in_le32(addr) 0 -#define out_le32(addr, val) +#define out_le32(addr, val) (void)(val) +#ifndef pgprot_cached_wthru #define pgprot_cached_wthru(prot) (prot) +#endif #else static void invalid_vram_cache(void __force *addr) { diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c index 42c72d051158fa47451187d0450c30a1113c5017..f102519ccefb43be0fb7806e7d0c716c51d1fdb1 100644 --- a/drivers/video/fbdev/core/fbcon.c +++ b/drivers/video/fbdev/core/fbcon.c @@ -1033,7 +1033,7 @@ static void fbcon_init(struct vc_data *vc, int init) struct vc_data *svc = *default_mode; struct fbcon_display *t, *p = &fb_display[vc->vc_num]; int logo = 1, new_rows, new_cols, rows, cols, charcnt = 256; - int ret; + int cap, ret; if (WARN_ON(info_idx == -1)) return; @@ -1042,6 +1042,7 @@ static void fbcon_init(struct vc_data *vc, int init) con2fb_map[vc->vc_num] = info_idx; info = registered_fb[con2fb_map[vc->vc_num]]; + cap = info->flags; if (logo_shown < 0 && console_loglevel <= CONSOLE_LOGLEVEL_QUIET) logo_shown = FBCON_LOGO_DONTSHOW; @@ -1146,13 +1147,13 @@ static void fbcon_init(struct vc_data *vc, int init) ops->graphics = 0; - /* - * No more hw acceleration for fbcon. - * - * FIXME: Garbage collect all the now dead code after sufficient time - * has passed. - */ - p->scrollmode = SCROLL_REDRAW; +#ifdef CONFIG_FRAMEBUFFER_CONSOLE_LEGACY_ACCELERATION + if ((cap & FBINFO_HWACCEL_COPYAREA) && + !(cap & FBINFO_HWACCEL_DISABLED)) + p->scrollmode = SCROLL_MOVE; + else /* default to something safe */ + p->scrollmode = SCROLL_REDRAW; +#endif /* * ++guenther: console.c:vc_allocate() relies on initializing @@ -1718,7 +1719,7 @@ static bool fbcon_scroll(struct vc_data *vc, unsigned int t, unsigned int b, count = vc->vc_rows; if (logo_shown >= 0) goto redraw_up; - switch (p->scrollmode) { + switch (fb_scrollmode(p)) { case SCROLL_MOVE: fbcon_redraw_blit(vc, info, p, t, b - t - count, count); @@ -1808,7 +1809,7 @@ static bool fbcon_scroll(struct vc_data *vc, unsigned int t, unsigned int b, count = vc->vc_rows; if (logo_shown >= 0) goto redraw_down; - switch (p->scrollmode) { + switch (fb_scrollmode(p)) { case SCROLL_MOVE: fbcon_redraw_blit(vc, info, p, b - 1, b - t - count, -count); @@ -1959,6 +1960,48 @@ static void fbcon_bmove_rec(struct vc_data *vc, struct fbcon_display *p, int sy, height, width); } +static void updatescrollmode_accel(struct fbcon_display *p, + struct fb_info *info, + struct vc_data *vc) +{ +#ifdef CONFIG_FRAMEBUFFER_CONSOLE_LEGACY_ACCELERATION + struct fbcon_ops *ops = info->fbcon_par; + int cap = info->flags; + u16 t = 0; + int ypan = FBCON_SWAP(ops->rotate, info->fix.ypanstep, + info->fix.xpanstep); + int ywrap = FBCON_SWAP(ops->rotate, info->fix.ywrapstep, t); + int yres = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres); + int vyres = FBCON_SWAP(ops->rotate, info->var.yres_virtual, + info->var.xres_virtual); + int good_pan = (cap & FBINFO_HWACCEL_YPAN) && + divides(ypan, vc->vc_font.height) && vyres > yres; + int good_wrap = (cap & FBINFO_HWACCEL_YWRAP) && + divides(ywrap, vc->vc_font.height) && + divides(vc->vc_font.height, vyres) && + divides(vc->vc_font.height, yres); + int reading_fast = cap & FBINFO_READS_FAST; + int fast_copyarea = (cap & FBINFO_HWACCEL_COPYAREA) && + !(cap & FBINFO_HWACCEL_DISABLED); + int fast_imageblit = (cap & FBINFO_HWACCEL_IMAGEBLIT) && + !(cap & FBINFO_HWACCEL_DISABLED); + + if (good_wrap || good_pan) { + if (reading_fast || fast_copyarea) + p->scrollmode = good_wrap ? + SCROLL_WRAP_MOVE : SCROLL_PAN_MOVE; + else + p->scrollmode = good_wrap ? SCROLL_REDRAW : + SCROLL_PAN_REDRAW; + } else { + if (reading_fast || (fast_copyarea && !fast_imageblit)) + p->scrollmode = SCROLL_MOVE; + else + p->scrollmode = SCROLL_REDRAW; + } +#endif +} + static void updatescrollmode(struct fbcon_display *p, struct fb_info *info, struct vc_data *vc) @@ -1974,6 +2017,9 @@ static void updatescrollmode(struct fbcon_display *p, p->vrows -= (yres - (fh * vc->vc_rows)) / fh; if ((yres % fh) && (vyres % fh < yres % fh)) p->vrows--; + + /* update scrollmode in case hardware acceleration is used */ + updatescrollmode_accel(p, info, vc); } #define PITCH(w) (((w) + 7) >> 3) @@ -2134,7 +2180,7 @@ static int fbcon_switch(struct vc_data *vc) updatescrollmode(p, info, vc); - switch (p->scrollmode) { + switch (fb_scrollmode(p)) { case SCROLL_WRAP_MOVE: scrollback_phys_max = p->vrows - vc->vc_rows; break; diff --git a/drivers/video/fbdev/core/fbcon.h b/drivers/video/fbdev/core/fbcon.h index 75ef09f52814be5b9258744a8c597fb445ef2d81..d77fb304510051723e9ae35f3737e780cedc0f02 100644 --- a/drivers/video/fbdev/core/fbcon.h +++ b/drivers/video/fbdev/core/fbcon.h @@ -29,7 +29,9 @@ struct fbcon_display { /* Filled in by the low-level console driver */ const u_char *fontdata; int userfont; /* != 0 if fontdata kmalloc()ed */ - u_short scrollmode; /* Scroll Method */ +#ifdef CONFIG_FRAMEBUFFER_CONSOLE_LEGACY_ACCELERATION + u_short scrollmode; /* Scroll Method, use fb_scrollmode() */ +#endif u_short inverse; /* != 0 text black on white as default */ short yscroll; /* Hardware scrolling */ int vrows; /* number of virtual rows */ @@ -208,6 +210,17 @@ static inline int attr_col_ec(int shift, struct vc_data *vc, #define SCROLL_REDRAW 0x004 #define SCROLL_PAN_REDRAW 0x005 +static inline u_short fb_scrollmode(struct fbcon_display *fb) +{ +#ifdef CONFIG_FRAMEBUFFER_CONSOLE_LEGACY_ACCELERATION + return fb->scrollmode; +#else + /* hardcoded to SCROLL_REDRAW if acceleration was disabled. */ + return SCROLL_REDRAW; +#endif +} + + #ifdef CONFIG_FB_TILEBLITTING extern void fbcon_set_tileops(struct vc_data *vc, struct fb_info *info); #endif diff --git a/drivers/video/fbdev/core/fbcon_ccw.c b/drivers/video/fbdev/core/fbcon_ccw.c index bbd869efd03bc94ae9387d4355eee28a280fb9c2..f75b24c32d497e27fdb6c56b512b68e92349b9a2 100644 --- a/drivers/video/fbdev/core/fbcon_ccw.c +++ b/drivers/video/fbdev/core/fbcon_ccw.c @@ -65,7 +65,7 @@ static void ccw_bmove(struct vc_data *vc, struct fb_info *info, int sy, { struct fbcon_ops *ops = info->fbcon_par; struct fb_copyarea area; - u32 vyres = GETVYRES(ops->p->scrollmode, info); + u32 vyres = GETVYRES(ops->p, info); area.sx = sy * vc->vc_font.height; area.sy = vyres - ((sx + width) * vc->vc_font.width); @@ -83,7 +83,7 @@ static void ccw_clear(struct vc_data *vc, struct fb_info *info, int sy, struct fbcon_ops *ops = info->fbcon_par; struct fb_fillrect region; int bgshift = (vc->vc_hi_font_mask) ? 13 : 12; - u32 vyres = GETVYRES(ops->p->scrollmode, info); + u32 vyres = GETVYRES(ops->p, info); region.color = attr_bgcol_ec(bgshift,vc,info); region.dx = sy * vc->vc_font.height; @@ -140,7 +140,7 @@ static void ccw_putcs(struct vc_data *vc, struct fb_info *info, u32 cnt, pitch, size; u32 attribute = get_attribute(info, scr_readw(s)); u8 *dst, *buf = NULL; - u32 vyres = GETVYRES(ops->p->scrollmode, info); + u32 vyres = GETVYRES(ops->p, info); if (!ops->fontbuffer) return; @@ -229,7 +229,7 @@ static void ccw_cursor(struct vc_data *vc, struct fb_info *info, int mode, int attribute, use_sw = vc->vc_cursor_type & CUR_SW; int err = 1, dx, dy; char *src; - u32 vyres = GETVYRES(ops->p->scrollmode, info); + u32 vyres = GETVYRES(ops->p, info); if (!ops->fontbuffer) return; @@ -387,7 +387,7 @@ static int ccw_update_start(struct fb_info *info) { struct fbcon_ops *ops = info->fbcon_par; u32 yoffset; - u32 vyres = GETVYRES(ops->p->scrollmode, info); + u32 vyres = GETVYRES(ops->p, info); int err; yoffset = (vyres - info->var.yres) - ops->var.xoffset; diff --git a/drivers/video/fbdev/core/fbcon_cw.c b/drivers/video/fbdev/core/fbcon_cw.c index a34cbe8e98744b12ffee9e45fa834f2d7aab8a4f..cf03dc62f35d37869fd6aac1bb679285527a9f97 100644 --- a/drivers/video/fbdev/core/fbcon_cw.c +++ b/drivers/video/fbdev/core/fbcon_cw.c @@ -50,7 +50,7 @@ static void cw_bmove(struct vc_data *vc, struct fb_info *info, int sy, { struct fbcon_ops *ops = info->fbcon_par; struct fb_copyarea area; - u32 vxres = GETVXRES(ops->p->scrollmode, info); + u32 vxres = GETVXRES(ops->p, info); area.sx = vxres - ((sy + height) * vc->vc_font.height); area.sy = sx * vc->vc_font.width; @@ -68,7 +68,7 @@ static void cw_clear(struct vc_data *vc, struct fb_info *info, int sy, struct fbcon_ops *ops = info->fbcon_par; struct fb_fillrect region; int bgshift = (vc->vc_hi_font_mask) ? 13 : 12; - u32 vxres = GETVXRES(ops->p->scrollmode, info); + u32 vxres = GETVXRES(ops->p, info); region.color = attr_bgcol_ec(bgshift,vc,info); region.dx = vxres - ((sy + height) * vc->vc_font.height); @@ -125,7 +125,7 @@ static void cw_putcs(struct vc_data *vc, struct fb_info *info, u32 cnt, pitch, size; u32 attribute = get_attribute(info, scr_readw(s)); u8 *dst, *buf = NULL; - u32 vxres = GETVXRES(ops->p->scrollmode, info); + u32 vxres = GETVXRES(ops->p, info); if (!ops->fontbuffer) return; @@ -212,7 +212,7 @@ static void cw_cursor(struct vc_data *vc, struct fb_info *info, int mode, int attribute, use_sw = vc->vc_cursor_type & CUR_SW; int err = 1, dx, dy; char *src; - u32 vxres = GETVXRES(ops->p->scrollmode, info); + u32 vxres = GETVXRES(ops->p, info); if (!ops->fontbuffer) return; @@ -369,7 +369,7 @@ static void cw_cursor(struct vc_data *vc, struct fb_info *info, int mode, static int cw_update_start(struct fb_info *info) { struct fbcon_ops *ops = info->fbcon_par; - u32 vxres = GETVXRES(ops->p->scrollmode, info); + u32 vxres = GETVXRES(ops->p, info); u32 xoffset; int err; diff --git a/drivers/video/fbdev/core/fbcon_rotate.h b/drivers/video/fbdev/core/fbcon_rotate.h index e233444cda664783d055b372929cafd2f4ac2e89..01cbe303b8a29592d3236add1f8b46db85d2b74f 100644 --- a/drivers/video/fbdev/core/fbcon_rotate.h +++ b/drivers/video/fbdev/core/fbcon_rotate.h @@ -12,11 +12,11 @@ #define _FBCON_ROTATE_H #define GETVYRES(s,i) ({ \ - (s == SCROLL_REDRAW || s == SCROLL_MOVE) ? \ + (fb_scrollmode(s) == SCROLL_REDRAW || fb_scrollmode(s) == SCROLL_MOVE) ? \ (i)->var.yres : (i)->var.yres_virtual; }) #define GETVXRES(s,i) ({ \ - (s == SCROLL_REDRAW || s == SCROLL_MOVE || !(i)->fix.xpanstep) ? \ + (fb_scrollmode(s) == SCROLL_REDRAW || fb_scrollmode(s) == SCROLL_MOVE || !(i)->fix.xpanstep) ? \ (i)->var.xres : (i)->var.xres_virtual; }) diff --git a/drivers/video/fbdev/core/fbcon_ud.c b/drivers/video/fbdev/core/fbcon_ud.c index 199cbc7abe35315d5adb9c79aaa29ca20eedcd32..c5d2da731d68629b2a2fc4ce15fcdc5d654ced21 100644 --- a/drivers/video/fbdev/core/fbcon_ud.c +++ b/drivers/video/fbdev/core/fbcon_ud.c @@ -50,8 +50,8 @@ static void ud_bmove(struct vc_data *vc, struct fb_info *info, int sy, { struct fbcon_ops *ops = info->fbcon_par; struct fb_copyarea area; - u32 vyres = GETVYRES(ops->p->scrollmode, info); - u32 vxres = GETVXRES(ops->p->scrollmode, info); + u32 vyres = GETVYRES(ops->p, info); + u32 vxres = GETVXRES(ops->p, info); area.sy = vyres - ((sy + height) * vc->vc_font.height); area.sx = vxres - ((sx + width) * vc->vc_font.width); @@ -69,8 +69,8 @@ static void ud_clear(struct vc_data *vc, struct fb_info *info, int sy, struct fbcon_ops *ops = info->fbcon_par; struct fb_fillrect region; int bgshift = (vc->vc_hi_font_mask) ? 13 : 12; - u32 vyres = GETVYRES(ops->p->scrollmode, info); - u32 vxres = GETVXRES(ops->p->scrollmode, info); + u32 vyres = GETVYRES(ops->p, info); + u32 vxres = GETVXRES(ops->p, info); region.color = attr_bgcol_ec(bgshift,vc,info); region.dy = vyres - ((sy + height) * vc->vc_font.height); @@ -162,8 +162,8 @@ static void ud_putcs(struct vc_data *vc, struct fb_info *info, u32 mod = vc->vc_font.width % 8, cnt, pitch, size; u32 attribute = get_attribute(info, scr_readw(s)); u8 *dst, *buf = NULL; - u32 vyres = GETVYRES(ops->p->scrollmode, info); - u32 vxres = GETVXRES(ops->p->scrollmode, info); + u32 vyres = GETVYRES(ops->p, info); + u32 vxres = GETVXRES(ops->p, info); if (!ops->fontbuffer) return; @@ -259,8 +259,8 @@ static void ud_cursor(struct vc_data *vc, struct fb_info *info, int mode, int attribute, use_sw = vc->vc_cursor_type & CUR_SW; int err = 1, dx, dy; char *src; - u32 vyres = GETVYRES(ops->p->scrollmode, info); - u32 vxres = GETVXRES(ops->p->scrollmode, info); + u32 vyres = GETVYRES(ops->p, info); + u32 vxres = GETVXRES(ops->p, info); if (!ops->fontbuffer) return; @@ -410,8 +410,8 @@ static int ud_update_start(struct fb_info *info) { struct fbcon_ops *ops = info->fbcon_par; int xoffset, yoffset; - u32 vyres = GETVYRES(ops->p->scrollmode, info); - u32 vxres = GETVXRES(ops->p->scrollmode, info); + u32 vyres = GETVYRES(ops->p, info); + u32 vxres = GETVXRES(ops->p, info); int err; xoffset = vxres - info->var.xres - ops->var.xoffset; diff --git a/drivers/video/fbdev/core/fbcvt.c b/drivers/video/fbdev/core/fbcvt.c index 55d2bd0ce5c0229cddf48cb0960a59e31d604060..64843464c66135b4c5c083418c8c89ae237f052a 100644 --- a/drivers/video/fbdev/core/fbcvt.c +++ b/drivers/video/fbdev/core/fbcvt.c @@ -214,9 +214,11 @@ static u32 fb_cvt_aspect_ratio(struct fb_cvt_data *cvt) static void fb_cvt_print_name(struct fb_cvt_data *cvt) { u32 pixcount, pixcount_mod; - int cnt = 255, offset = 0, read = 0; - u8 *buf = kzalloc(256, GFP_KERNEL); + int size = 256; + int off = 0; + u8 *buf; + buf = kzalloc(size, GFP_KERNEL); if (!buf) return; @@ -224,43 +226,30 @@ static void fb_cvt_print_name(struct fb_cvt_data *cvt) pixcount_mod = (cvt->xres * (cvt->yres/cvt->interlace)) % 1000000; pixcount_mod /= 1000; - read = snprintf(buf+offset, cnt, "fbcvt: %dx%d@%d: CVT Name - ", - cvt->xres, cvt->yres, cvt->refresh); - offset += read; - cnt -= read; + off += scnprintf(buf + off, size - off, "fbcvt: %dx%d@%d: CVT Name - ", + cvt->xres, cvt->yres, cvt->refresh); - if (cvt->status) - snprintf(buf+offset, cnt, "Not a CVT standard - %d.%03d Mega " - "Pixel Image\n", pixcount, pixcount_mod); - else { - if (pixcount) { - read = snprintf(buf+offset, cnt, "%d", pixcount); - cnt -= read; - offset += read; - } + if (cvt->status) { + off += scnprintf(buf + off, size - off, + "Not a CVT standard - %d.%03d Mega Pixel Image\n", + pixcount, pixcount_mod); + } else { + if (pixcount) + off += scnprintf(buf + off, size - off, "%d", pixcount); - read = snprintf(buf+offset, cnt, ".%03dM", pixcount_mod); - cnt -= read; - offset += read; + off += scnprintf(buf + off, size - off, ".%03dM", pixcount_mod); if (cvt->aspect_ratio == 0) - read = snprintf(buf+offset, cnt, "3"); + off += scnprintf(buf + off, size - off, "3"); else if (cvt->aspect_ratio == 3) - read = snprintf(buf+offset, cnt, "4"); + off += scnprintf(buf + off, size - off, "4"); else if (cvt->aspect_ratio == 1 || cvt->aspect_ratio == 4) - read = snprintf(buf+offset, cnt, "9"); + off += scnprintf(buf + off, size - off, "9"); else if (cvt->aspect_ratio == 2) - read = snprintf(buf+offset, cnt, "A"); - else - read = 0; - cnt -= read; - offset += read; - - if (cvt->flags & FB_CVT_FLAG_REDUCED_BLANK) { - read = snprintf(buf+offset, cnt, "-R"); - cnt -= read; - offset += read; - } + off += scnprintf(buf + off, size - off, "A"); + + if (cvt->flags & FB_CVT_FLAG_REDUCED_BLANK) + off += scnprintf(buf + off, size - off, "-R"); } printk(KERN_INFO "%s\n", buf); diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c index 4dc9077dd2ac04d07401e210b13fcb052b0f6ab1..3c309ab20887473d3f7f1b97fc1fbf62d9349856 100644 --- a/drivers/video/fbdev/hyperv_fb.c +++ b/drivers/video/fbdev/hyperv_fb.c @@ -286,8 +286,6 @@ struct hvfb_par { static uint screen_width = HVFB_WIDTH; static uint screen_height = HVFB_HEIGHT; -static uint screen_width_max = HVFB_WIDTH; -static uint screen_height_max = HVFB_HEIGHT; static uint screen_depth; static uint screen_fb_size; static uint dio_fb_size; /* FB size for deferred IO */ @@ -581,7 +579,6 @@ static int synthvid_get_supported_resolution(struct hv_device *hdev) int ret = 0; unsigned long t; u8 index; - int i; memset(msg, 0, sizeof(struct synthvid_msg)); msg->vid_hdr.type = SYNTHVID_RESOLUTION_REQUEST; @@ -612,13 +609,6 @@ static int synthvid_get_supported_resolution(struct hv_device *hdev) goto out; } - for (i = 0; i < msg->resolution_resp.resolution_count; i++) { - screen_width_max = max_t(unsigned int, screen_width_max, - msg->resolution_resp.supported_resolution[i].width); - screen_height_max = max_t(unsigned int, screen_height_max, - msg->resolution_resp.supported_resolution[i].height); - } - screen_width = msg->resolution_resp.supported_resolution[index].width; screen_height = @@ -940,7 +930,7 @@ static void hvfb_get_option(struct fb_info *info) if (x < HVFB_WIDTH_MIN || y < HVFB_HEIGHT_MIN || (synthvid_ver_ge(par->synthvid_version, SYNTHVID_VERSION_WIN10) && - (x > screen_width_max || y > screen_height_max)) || + (x * y * screen_depth / 8 > screen_fb_size)) || (par->synthvid_version == SYNTHVID_VERSION_WIN8 && x * y * screen_depth / 8 > SYNTHVID_FB_SIZE_WIN8) || (par->synthvid_version == SYNTHVID_VERSION_WIN7 && @@ -1193,8 +1183,8 @@ static int hvfb_probe(struct hv_device *hdev, } hvfb_get_option(info); - pr_info("Screen resolution: %dx%d, Color depth: %d\n", - screen_width, screen_height, screen_depth); + pr_info("Screen resolution: %dx%d, Color depth: %d, Frame buffer size: %d\n", + screen_width, screen_height, screen_depth, screen_fb_size); ret = hvfb_getmem(hdev, info); if (ret) { diff --git a/drivers/video/fbdev/matrox/matroxfb_base.c b/drivers/video/fbdev/matrox/matroxfb_base.c index 570439b326552a674e89a59cce9abce202f7856a..daaa99818d3b7532e8d31c8cce299907dcfd9dba 100644 --- a/drivers/video/fbdev/matrox/matroxfb_base.c +++ b/drivers/video/fbdev/matrox/matroxfb_base.c @@ -1377,7 +1377,7 @@ static struct video_board vbG200 = { .lowlevel = &matrox_G100 }; static struct video_board vbG200eW = { - .maxvram = 0x800000, + .maxvram = 0x100000, .maxdisplayable = 0x800000, .accelID = FB_ACCEL_MATROX_MGAG200, .lowlevel = &matrox_G100 diff --git a/drivers/video/fbdev/nvidia/nv_i2c.c b/drivers/video/fbdev/nvidia/nv_i2c.c index d7994a1732459d831775374d61f135ecaf5e06c5..0b48965a6420c2a2a27928a55a22aee3cc1d57e2 100644 --- a/drivers/video/fbdev/nvidia/nv_i2c.c +++ b/drivers/video/fbdev/nvidia/nv_i2c.c @@ -86,7 +86,7 @@ static int nvidia_setup_i2c_bus(struct nvidia_i2c_chan *chan, const char *name, { int rc; - strcpy(chan->adapter.name, name); + strscpy(chan->adapter.name, name, sizeof(chan->adapter.name)); chan->adapter.owner = THIS_MODULE; chan->adapter.class = i2c_class; chan->adapter.algo_data = &chan->algo; diff --git a/drivers/video/fbdev/omap2/omapfb/displays/connector-dvi.c b/drivers/video/fbdev/omap2/omapfb/displays/connector-dvi.c index b4a1aefff7661da1650c9ba230c4dcb5dd96dc89..777f6d66c28c3d5ba69113740fad7723e6a53a07 100644 --- a/drivers/video/fbdev/omap2/omapfb/displays/connector-dvi.c +++ b/drivers/video/fbdev/omap2/omapfb/displays/connector-dvi.c @@ -251,6 +251,7 @@ static int dvic_probe_of(struct platform_device *pdev) adapter_node = of_parse_phandle(node, "ddc-i2c-bus", 0); if (adapter_node) { adapter = of_get_i2c_adapter_by_node(adapter_node); + of_node_put(adapter_node); if (adapter == NULL) { dev_err(&pdev->dev, "failed to parse ddc-i2c-bus\n"); omap_dss_put_device(ddata->in); diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c index 4b0793abdd84b4509afea88329230c56261e1d80..a2c7c5cb1523460508b439dbfde2e0efa170d158 100644 --- a/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c +++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c @@ -409,7 +409,7 @@ static ssize_t dsicm_num_errors_show(struct device *dev, if (r) return r; - return snprintf(buf, PAGE_SIZE, "%d\n", errors); + return sysfs_emit(buf, "%d\n", errors); } static ssize_t dsicm_hw_revision_show(struct device *dev, @@ -439,7 +439,7 @@ static ssize_t dsicm_hw_revision_show(struct device *dev, if (r) return r; - return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x\n", id1, id2, id3); + return sysfs_emit(buf, "%02x.%02x.%02x\n", id1, id2, id3); } static ssize_t dsicm_store_ulps(struct device *dev, @@ -487,7 +487,7 @@ static ssize_t dsicm_show_ulps(struct device *dev, t = ddata->ulps_enabled; mutex_unlock(&ddata->lock); - return snprintf(buf, PAGE_SIZE, "%u\n", t); + return sysfs_emit(buf, "%u\n", t); } static ssize_t dsicm_store_ulps_timeout(struct device *dev, @@ -532,7 +532,7 @@ static ssize_t dsicm_show_ulps_timeout(struct device *dev, t = ddata->ulps_timeout; mutex_unlock(&ddata->lock); - return snprintf(buf, PAGE_SIZE, "%u\n", t); + return sysfs_emit(buf, "%u\n", t); } static DEVICE_ATTR(num_dsi_errors, S_IRUGO, dsicm_num_errors_show, NULL); diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c index 1293515e4b1692e1917411c5eaf6159581454825..0cbc5b9183f895c4a1d994cc2ca35731b8d364fe 100644 --- a/drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c +++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c @@ -476,7 +476,7 @@ static ssize_t show_cabc_available_modes(struct device *dev, int i; if (!ddata->has_cabc) - return snprintf(buf, PAGE_SIZE, "%s\n", cabc_modes[0]); + return sysfs_emit(buf, "%s\n", cabc_modes[0]); for (i = 0, len = 0; len < PAGE_SIZE && i < ARRAY_SIZE(cabc_modes); i++) diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c index bb85b21f072487239f87005191f8c3fa117a38ac..9f6ef9e04d9ce41a57b2e1791183390a008599a0 100644 --- a/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c +++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c @@ -169,7 +169,7 @@ static ssize_t tpo_td043_vmirror_show(struct device *dev, { struct panel_drv_data *ddata = dev_get_drvdata(dev); - return snprintf(buf, PAGE_SIZE, "%d\n", ddata->vmirror); + return sysfs_emit(buf, "%d\n", ddata->vmirror); } static ssize_t tpo_td043_vmirror_store(struct device *dev, @@ -199,7 +199,7 @@ static ssize_t tpo_td043_mode_show(struct device *dev, { struct panel_drv_data *ddata = dev_get_drvdata(dev); - return snprintf(buf, PAGE_SIZE, "%d\n", ddata->mode); + return sysfs_emit(buf, "%d\n", ddata->mode); } static ssize_t tpo_td043_mode_store(struct device *dev, diff --git a/drivers/video/fbdev/sm712fb.c b/drivers/video/fbdev/sm712fb.c index 0dbc6bf8268acf5fa0528f28f047372cbd4fd5e8..092a1caa1208e191b016a0ec2097208e5e2d960f 100644 --- a/drivers/video/fbdev/sm712fb.c +++ b/drivers/video/fbdev/sm712fb.c @@ -1047,7 +1047,7 @@ static ssize_t smtcfb_read(struct fb_info *info, char __user *buf, if (count + p > total_size) count = total_size - p; - buffer = kmalloc((count > PAGE_SIZE) ? PAGE_SIZE : count, GFP_KERNEL); + buffer = kmalloc(PAGE_SIZE, GFP_KERNEL); if (!buffer) return -ENOMEM; @@ -1059,25 +1059,14 @@ static ssize_t smtcfb_read(struct fb_info *info, char __user *buf, while (count) { c = (count > PAGE_SIZE) ? PAGE_SIZE : count; dst = buffer; - for (i = c >> 2; i--;) { - *dst = fb_readl(src++); - *dst = big_swap(*dst); + for (i = (c + 3) >> 2; i--;) { + u32 val; + + val = fb_readl(src); + *dst = big_swap(val); + src++; dst++; } - if (c & 3) { - u8 *dst8 = (u8 *)dst; - u8 __iomem *src8 = (u8 __iomem *)src; - - for (i = c & 3; i--;) { - if (i & 1) { - *dst8++ = fb_readb(++src8); - } else { - *dst8++ = fb_readb(--src8); - src8 += 2; - } - } - src = (u32 __iomem *)src8; - } if (copy_to_user(buf, buffer, c)) { err = -EFAULT; @@ -1130,7 +1119,7 @@ static ssize_t smtcfb_write(struct fb_info *info, const char __user *buf, count = total_size - p; } - buffer = kmalloc((count > PAGE_SIZE) ? PAGE_SIZE : count, GFP_KERNEL); + buffer = kmalloc(PAGE_SIZE, GFP_KERNEL); if (!buffer) return -ENOMEM; @@ -1148,24 +1137,11 @@ static ssize_t smtcfb_write(struct fb_info *info, const char __user *buf, break; } - for (i = c >> 2; i--;) { - fb_writel(big_swap(*src), dst++); + for (i = (c + 3) >> 2; i--;) { + fb_writel(big_swap(*src), dst); + dst++; src++; } - if (c & 3) { - u8 *src8 = (u8 *)src; - u8 __iomem *dst8 = (u8 __iomem *)dst; - - for (i = c & 3; i--;) { - if (i & 1) { - fb_writeb(*src8++, ++dst8); - } else { - fb_writeb(*src8++, --dst8); - dst8 += 2; - } - } - dst = (u32 __iomem *)dst8; - } *ppos += c; buf += c; diff --git a/drivers/video/fbdev/smscufx.c b/drivers/video/fbdev/smscufx.c index bfac3ee4a64228160adc5d11e85e66e7b7bcf9a8..28768c272b73d3e57ff4212dfe44f9c81d4af56a 100644 --- a/drivers/video/fbdev/smscufx.c +++ b/drivers/video/fbdev/smscufx.c @@ -1656,6 +1656,7 @@ static int ufx_usb_probe(struct usb_interface *interface, info->par = dev; info->pseudo_palette = dev->pseudo_palette; info->fbops = &ufx_ops; + INIT_LIST_HEAD(&info->modelist); retval = fb_alloc_cmap(&info->cmap, 256, 0); if (retval < 0) { @@ -1666,8 +1667,6 @@ static int ufx_usb_probe(struct usb_interface *interface, INIT_DELAYED_WORK(&dev->free_framebuffer_work, ufx_free_framebuffer_work); - INIT_LIST_HEAD(&info->modelist); - retval = ufx_reg_read(dev, 0x3000, &id_rev); check_warn_goto_error(retval, "error %d reading 0x3000 register from device", retval); dev_dbg(dev->gdev, "ID_REV register value 0x%08x", id_rev); diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c index b9cdd02c100095d99ef929571115f82c28290f29..90f48b71fd8f790c23a0f7d109a286a8e515b542 100644 --- a/drivers/video/fbdev/udlfb.c +++ b/drivers/video/fbdev/udlfb.c @@ -1426,7 +1426,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev, struct device_attribute *a, char *buf) { struct fb_info *fb_info = dev_get_drvdata(fbdev); struct dlfb_data *dlfb = fb_info->par; - return snprintf(buf, PAGE_SIZE, "%u\n", + return sysfs_emit(buf, "%u\n", atomic_read(&dlfb->bytes_rendered)); } @@ -1434,7 +1434,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev, struct device_attribute *a, char *buf) { struct fb_info *fb_info = dev_get_drvdata(fbdev); struct dlfb_data *dlfb = fb_info->par; - return snprintf(buf, PAGE_SIZE, "%u\n", + return sysfs_emit(buf, "%u\n", atomic_read(&dlfb->bytes_identical)); } @@ -1442,7 +1442,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev, struct device_attribute *a, char *buf) { struct fb_info *fb_info = dev_get_drvdata(fbdev); struct dlfb_data *dlfb = fb_info->par; - return snprintf(buf, PAGE_SIZE, "%u\n", + return sysfs_emit(buf, "%u\n", atomic_read(&dlfb->bytes_sent)); } @@ -1450,7 +1450,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev, struct device_attribute *a, char *buf) { struct fb_info *fb_info = dev_get_drvdata(fbdev); struct dlfb_data *dlfb = fb_info->par; - return snprintf(buf, PAGE_SIZE, "%u\n", + return sysfs_emit(buf, "%u\n", atomic_read(&dlfb->cpu_kcycles_used)); } diff --git a/drivers/video/fbdev/vga16fb.c b/drivers/video/fbdev/vga16fb.c index 1e8a38a7967d890a9e44fb245e2c6ce11f7b0f5c..5c6e9dc88060ba30187bb5727a1c519c32d8d09e 100644 --- a/drivers/video/fbdev/vga16fb.c +++ b/drivers/video/fbdev/vga16fb.c @@ -184,6 +184,25 @@ static inline void setindex(int index) vga_io_w(VGA_GFX_I, index); } +/* Check if the video mode is supported by the driver */ +static inline int check_mode_supported(void) +{ + /* non-x86 architectures treat orig_video_isVGA as a boolean flag */ +#if defined(CONFIG_X86) + /* only EGA and VGA in 16 color graphic mode are supported */ + if (screen_info.orig_video_isVGA != VIDEO_TYPE_EGAC && + screen_info.orig_video_isVGA != VIDEO_TYPE_VGAC) + return -ENODEV; + + if (screen_info.orig_video_mode != 0x0D && /* 320x200/4 (EGA) */ + screen_info.orig_video_mode != 0x0E && /* 640x200/4 (EGA) */ + screen_info.orig_video_mode != 0x10 && /* 640x350/4 (EGA) */ + screen_info.orig_video_mode != 0x12) /* 640x480/4 (VGA) */ + return -ENODEV; +#endif + return 0; +} + static void vga16fb_pan_var(struct fb_info *info, struct fb_var_screeninfo *var) { @@ -1422,6 +1441,11 @@ static int __init vga16fb_init(void) vga16fb_setup(option); #endif + + ret = check_mode_supported(); + if (ret) + return ret; + ret = platform_driver_register(&vga16fb_driver); if (!ret) { diff --git a/drivers/video/fbdev/w100fb.c b/drivers/video/fbdev/w100fb.c index d96ab28f8ce4ae54b384c3a4d8f2b9bfa085187e..4e641a780726e784892110c72b71a379fd94637a 100644 --- a/drivers/video/fbdev/w100fb.c +++ b/drivers/video/fbdev/w100fb.c @@ -770,12 +770,18 @@ static int w100fb_probe(struct platform_device *pdev) fb_dealloc_cmap(&info->cmap); kfree(info->pseudo_palette); } - if (remapped_fbuf != NULL) + if (remapped_fbuf != NULL) { iounmap(remapped_fbuf); - if (remapped_regs != NULL) + remapped_fbuf = NULL; + } + if (remapped_regs != NULL) { iounmap(remapped_regs); - if (remapped_base != NULL) + remapped_regs = NULL; + } + if (remapped_base != NULL) { iounmap(remapped_base); + remapped_base = NULL; + } if (info) framebuffer_release(info); return err; @@ -795,8 +801,11 @@ static int w100fb_remove(struct platform_device *pdev) fb_dealloc_cmap(&info->cmap); iounmap(remapped_base); + remapped_base = NULL; iounmap(remapped_regs); + remapped_regs = NULL; iounmap(remapped_fbuf); + remapped_fbuf = NULL; framebuffer_release(info); diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c index 5c53098755a3594c2e429054632d2a8080faf3d6..441bc057896f5fd6521791467f2c152385a1a5b0 100644 --- a/drivers/virtio/virtio.c +++ b/drivers/virtio/virtio.c @@ -167,14 +167,13 @@ void virtio_add_status(struct virtio_device *dev, unsigned int status) } EXPORT_SYMBOL_GPL(virtio_add_status); -int virtio_finalize_features(struct virtio_device *dev) +/* Do some validation, then set FEATURES_OK */ +static int virtio_features_ok(struct virtio_device *dev) { - int ret = dev->config->finalize_features(dev); unsigned status; + int ret; might_sleep(); - if (ret) - return ret; ret = arch_has_restricted_virtio_memory_access(); if (ret) { @@ -203,7 +202,6 @@ int virtio_finalize_features(struct virtio_device *dev) } return 0; } -EXPORT_SYMBOL_GPL(virtio_finalize_features); static int virtio_dev_probe(struct device *_d) { @@ -240,17 +238,6 @@ static int virtio_dev_probe(struct device *_d) driver_features_legacy = driver_features; } - /* - * Some devices detect legacy solely via F_VERSION_1. Write - * F_VERSION_1 to force LE config space accesses before FEATURES_OK for - * these when needed. - */ - if (drv->validate && !virtio_legacy_is_little_endian() - && device_features & BIT_ULL(VIRTIO_F_VERSION_1)) { - dev->features = BIT_ULL(VIRTIO_F_VERSION_1); - dev->config->finalize_features(dev); - } - if (device_features & (1ULL << VIRTIO_F_VERSION_1)) dev->features = driver_features & device_features; else @@ -261,13 +248,26 @@ static int virtio_dev_probe(struct device *_d) if (device_features & (1ULL << i)) __virtio_set_bit(dev, i); + err = dev->config->finalize_features(dev); + if (err) + goto err; + if (drv->validate) { + u64 features = dev->features; + err = drv->validate(dev); if (err) goto err; + + /* Did validation change any features? Then write them again. */ + if (features != dev->features) { + err = dev->config->finalize_features(dev); + if (err) + goto err; + } } - err = virtio_finalize_features(dev); + err = virtio_features_ok(dev); if (err) goto err; @@ -438,7 +438,11 @@ int virtio_device_restore(struct virtio_device *dev) /* We have a driver! */ virtio_add_status(dev, VIRTIO_CONFIG_S_DRIVER); - ret = virtio_finalize_features(dev); + ret = dev->config->finalize_features(dev); + if (ret) + goto err; + + ret = virtio_features_ok(dev); if (ret) goto err; diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index cce75d3b3ba05da8a9dba8c1208a2eaf86dce261..3cc2a4ee7152c6fb280f21b848707743581e5042 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c @@ -1124,8 +1124,10 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq, if (virtqueue_use_indirect(_vq, total_sg)) { err = virtqueue_add_indirect_packed(vq, sgs, total_sg, out_sgs, in_sgs, data, gfp); - if (err != -ENOMEM) + if (err != -ENOMEM) { + END_USE(vq); return err; + } /* fall back on direct */ } diff --git a/drivers/w1/slaves/w1_ds28e04.c b/drivers/w1/slaves/w1_ds28e04.c index e4f336111edc6feb51cf0c602b19585bed1b87a5..6cef6e2edb892d710f58b5ee8c606b28554dde7f 100644 --- a/drivers/w1/slaves/w1_ds28e04.c +++ b/drivers/w1/slaves/w1_ds28e04.c @@ -32,7 +32,7 @@ static int w1_strong_pullup = 1; module_param_named(strong_pullup, w1_strong_pullup, int, 0); /* enable/disable CRC checking on DS28E04-100 memory accesses */ -static char w1_enable_crccheck = 1; +static bool w1_enable_crccheck = true; #define W1_EEPROM_SIZE 512 #define W1_PAGE_COUNT 16 @@ -339,32 +339,18 @@ static BIN_ATTR_RW(pio, 1); static ssize_t crccheck_show(struct device *dev, struct device_attribute *attr, char *buf) { - if (put_user(w1_enable_crccheck + 0x30, buf)) - return -EFAULT; - - return sizeof(w1_enable_crccheck); + return sysfs_emit(buf, "%d\n", w1_enable_crccheck); } static ssize_t crccheck_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - char val; - - if (count != 1 || !buf) - return -EINVAL; + int err = kstrtobool(buf, &w1_enable_crccheck); - if (get_user(val, buf)) - return -EFAULT; + if (err) + return err; - /* convert to decimal */ - val = val - 0x30; - if (val != 0 && val != 1) - return -EINVAL; - - /* set the new value */ - w1_enable_crccheck = val; - - return sizeof(w1_enable_crccheck); + return count; } static DEVICE_ATTR_RW(crccheck); diff --git a/drivers/w1/slaves/w1_therm.c b/drivers/w1/slaves/w1_therm.c index 974d02bb3a45cc87c26e4f6c8244f72d75fc0909..6546d029c7fd6fd8cf1fd1da26ef53e0255f76f5 100644 --- a/drivers/w1/slaves/w1_therm.c +++ b/drivers/w1/slaves/w1_therm.c @@ -2092,16 +2092,20 @@ static ssize_t w1_seq_show(struct device *device, if (sl->reg_num.id == reg_num->id) seq = i; + if (w1_reset_bus(sl->master)) + goto error; + + /* Put the device into chain DONE state */ + w1_write_8(sl->master, W1_MATCH_ROM); + w1_write_block(sl->master, (u8 *)&rn, 8); w1_write_8(sl->master, W1_42_CHAIN); w1_write_8(sl->master, W1_42_CHAIN_DONE); w1_write_8(sl->master, W1_42_CHAIN_DONE_INV); - w1_read_block(sl->master, &ack, sizeof(ack)); /* check for acknowledgment */ ack = w1_read_8(sl->master); if (ack != W1_42_SUCCESS_CONFIRM_BYTE) goto error; - } /* Exit from CHAIN state */ diff --git a/drivers/watchdog/rti_wdt.c b/drivers/watchdog/rti_wdt.c index 359302f71f7efef34ccc419842a0079be2e364b4..ae7f9357bb871aa44911609e60529116565c98d5 100644 --- a/drivers/watchdog/rti_wdt.c +++ b/drivers/watchdog/rti_wdt.c @@ -229,6 +229,7 @@ static int rti_wdt_probe(struct platform_device *pdev) ret = pm_runtime_get_sync(dev); if (ret) { pm_runtime_put_noidle(dev); + pm_runtime_disable(&pdev->dev); return dev_err_probe(dev, ret, "runtime pm failed\n"); } diff --git a/drivers/watchdog/sp805_wdt.c b/drivers/watchdog/sp805_wdt.c index 190d26e2e75f9e7a8c5bf2fd35fe4c202d9380f3..2815f78d22bb354afc6d7a2f73547d607c7b0406 100644 --- a/drivers/watchdog/sp805_wdt.c +++ b/drivers/watchdog/sp805_wdt.c @@ -304,14 +304,12 @@ sp805_wdt_probe(struct amba_device *adev, const struct amba_id *id) return ret; } -static int sp805_wdt_remove(struct amba_device *adev) +static void sp805_wdt_remove(struct amba_device *adev) { struct sp805_wdt *wdt = amba_get_drvdata(adev); watchdog_unregister_device(&wdt->wdd); watchdog_set_drvdata(&wdt->wdd, NULL); - - return 0; } static int __maybe_unused sp805_wdt_suspend(struct device *dev) diff --git a/drivers/xen/gntalloc.c b/drivers/xen/gntalloc.c index 3fa40c723e8e95cb85d8c4ee5449ac716cb43e63..edb0acd0b8323cad2165d23fb4764358af8a45b8 100644 --- a/drivers/xen/gntalloc.c +++ b/drivers/xen/gntalloc.c @@ -169,20 +169,14 @@ static int add_grefs(struct ioctl_gntalloc_alloc_gref *op, __del_gref(gref); } - /* It's possible for the target domain to map the just-allocated grant - * references by blindly guessing their IDs; if this is done, then - * __del_gref will leave them in the queue_gref list. They need to be - * added to the global list so that we can free them when they are no - * longer referenced. - */ - if (unlikely(!list_empty(&queue_gref))) - list_splice_tail(&queue_gref, &gref_list); mutex_unlock(&gref_mutex); return rc; } static void __del_gref(struct gntalloc_gref *gref) { + unsigned long addr; + if (gref->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) { uint8_t *tmp = kmap(gref->page); tmp[gref->notify.pgoff] = 0; @@ -196,21 +190,16 @@ static void __del_gref(struct gntalloc_gref *gref) gref->notify.flags = 0; if (gref->gref_id) { - if (gnttab_query_foreign_access(gref->gref_id)) - return; - - if (!gnttab_end_foreign_access_ref(gref->gref_id, 0)) - return; - - gnttab_free_grant_reference(gref->gref_id); + if (gref->page) { + addr = (unsigned long)page_to_virt(gref->page); + gnttab_end_foreign_access(gref->gref_id, 0, addr); + } else + gnttab_free_grant_reference(gref->gref_id); } gref_size--; list_del(&gref->next_gref); - if (gref->page) - __free_page(gref->page); - kfree(gref); } diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c index b9651f797676c47b036c6f7c173ee9130383bcc4..54778aadf618da4a5612404bd4f45851b47aa2df 100644 --- a/drivers/xen/gntdev.c +++ b/drivers/xen/gntdev.c @@ -240,13 +240,13 @@ void gntdev_put_map(struct gntdev_priv *priv, struct gntdev_grant_map *map) if (!refcount_dec_and_test(&map->users)) return; + if (map->pages && !use_ptemod) + unmap_grant_pages(map, 0, map->count); + if (map->notify.flags & UNMAP_NOTIFY_SEND_EVENT) { notify_remote_via_evtchn(map->notify.event); evtchn_put(map->notify.event); } - - if (map->pages && !use_ptemod) - unmap_grant_pages(map, 0, map->count); gntdev_free_map(map); } diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c index 3729bea0c98956ac6124c01260dc4d7423445dca..5c83d41766c8522cb0fbf65f28469b818f7b1ed0 100644 --- a/drivers/xen/grant-table.c +++ b/drivers/xen/grant-table.c @@ -134,12 +134,9 @@ struct gnttab_ops { */ unsigned long (*end_foreign_transfer_ref)(grant_ref_t ref); /* - * Query the status of a grant entry. Ref parameter is reference of - * queried grant entry, return value is the status of queried entry. - * Detailed status(writing/reading) can be gotten from the return value - * by bit operations. + * Read the frame number related to a given grant reference. */ - int (*query_foreign_access)(grant_ref_t ref); + unsigned long (*read_frame)(grant_ref_t ref); }; struct unmap_refs_callback_data { @@ -284,22 +281,6 @@ int gnttab_grant_foreign_access(domid_t domid, unsigned long frame, } EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access); -static int gnttab_query_foreign_access_v1(grant_ref_t ref) -{ - return gnttab_shared.v1[ref].flags & (GTF_reading|GTF_writing); -} - -static int gnttab_query_foreign_access_v2(grant_ref_t ref) -{ - return grstatus[ref] & (GTF_reading|GTF_writing); -} - -int gnttab_query_foreign_access(grant_ref_t ref) -{ - return gnttab_interface->query_foreign_access(ref); -} -EXPORT_SYMBOL_GPL(gnttab_query_foreign_access); - static int gnttab_end_foreign_access_ref_v1(grant_ref_t ref, int readonly) { u16 flags, nflags; @@ -353,6 +334,16 @@ int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly) } EXPORT_SYMBOL_GPL(gnttab_end_foreign_access_ref); +static unsigned long gnttab_read_frame_v1(grant_ref_t ref) +{ + return gnttab_shared.v1[ref].frame; +} + +static unsigned long gnttab_read_frame_v2(grant_ref_t ref) +{ + return gnttab_shared.v2[ref].full_page.frame; +} + struct deferred_entry { struct list_head list; grant_ref_t ref; @@ -382,12 +373,9 @@ static void gnttab_handle_deferred(struct timer_list *unused) spin_unlock_irqrestore(&gnttab_list_lock, flags); if (_gnttab_end_foreign_access_ref(entry->ref, entry->ro)) { put_free_entry(entry->ref); - if (entry->page) { - pr_debug("freeing g.e. %#x (pfn %#lx)\n", - entry->ref, page_to_pfn(entry->page)); - put_page(entry->page); - } else - pr_info("freeing g.e. %#x\n", entry->ref); + pr_debug("freeing g.e. %#x (pfn %#lx)\n", + entry->ref, page_to_pfn(entry->page)); + put_page(entry->page); kfree(entry); entry = NULL; } else { @@ -412,9 +400,18 @@ static void gnttab_handle_deferred(struct timer_list *unused) static void gnttab_add_deferred(grant_ref_t ref, bool readonly, struct page *page) { - struct deferred_entry *entry = kmalloc(sizeof(*entry), GFP_ATOMIC); + struct deferred_entry *entry; + gfp_t gfp = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL; const char *what = KERN_WARNING "leaking"; + entry = kmalloc(sizeof(*entry), gfp); + if (!page) { + unsigned long gfn = gnttab_interface->read_frame(ref); + + page = pfn_to_page(gfn_to_pfn(gfn)); + get_page(page); + } + if (entry) { unsigned long flags; @@ -435,11 +432,21 @@ static void gnttab_add_deferred(grant_ref_t ref, bool readonly, what, ref, page ? page_to_pfn(page) : -1); } +int gnttab_try_end_foreign_access(grant_ref_t ref) +{ + int ret = _gnttab_end_foreign_access_ref(ref, 0); + + if (ret) + put_free_entry(ref); + + return ret; +} +EXPORT_SYMBOL_GPL(gnttab_try_end_foreign_access); + void gnttab_end_foreign_access(grant_ref_t ref, int readonly, unsigned long page) { - if (gnttab_end_foreign_access_ref(ref, readonly)) { - put_free_entry(ref); + if (gnttab_try_end_foreign_access(ref)) { if (page != 0) put_page(virt_to_page(page)); } else @@ -1417,7 +1424,7 @@ static const struct gnttab_ops gnttab_v1_ops = { .update_entry = gnttab_update_entry_v1, .end_foreign_access_ref = gnttab_end_foreign_access_ref_v1, .end_foreign_transfer_ref = gnttab_end_foreign_transfer_ref_v1, - .query_foreign_access = gnttab_query_foreign_access_v1, + .read_frame = gnttab_read_frame_v1, }; static const struct gnttab_ops gnttab_v2_ops = { @@ -1429,7 +1436,7 @@ static const struct gnttab_ops gnttab_v2_ops = { .update_entry = gnttab_update_entry_v2, .end_foreign_access_ref = gnttab_end_foreign_access_ref_v2, .end_foreign_transfer_ref = gnttab_end_foreign_transfer_ref_v2, - .query_foreign_access = gnttab_query_foreign_access_v2, + .read_frame = gnttab_read_frame_v2, }; static bool gnttab_need_v2(void) diff --git a/drivers/xen/pvcalls-front.c b/drivers/xen/pvcalls-front.c index 7984645b59563b7fc974ec4ecfcc10fb2dbf3e39..bbe337dc296e3d6c78ed8afcb48c3ac308b92ea2 100644 --- a/drivers/xen/pvcalls-front.c +++ b/drivers/xen/pvcalls-front.c @@ -337,8 +337,8 @@ static void free_active_ring(struct sock_mapping *map) if (!map->active.ring) return; - free_pages((unsigned long)map->active.data.in, - map->active.ring->ring_order); + free_pages_exact(map->active.data.in, + PAGE_SIZE << map->active.ring->ring_order); free_page((unsigned long)map->active.ring); } @@ -352,8 +352,8 @@ static int alloc_active_ring(struct sock_mapping *map) goto out; map->active.ring->ring_order = PVCALLS_RING_ORDER; - bytes = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, - PVCALLS_RING_ORDER); + bytes = alloc_pages_exact(PAGE_SIZE << PVCALLS_RING_ORDER, + GFP_KERNEL | __GFP_ZERO); if (!bytes) goto out; diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c index 0cd728961fce9eb004bb77cf71aecfa0637933c0..16cfef09932953167de76fff884a24be4e4cdd1f 100644 --- a/drivers/xen/xenbus/xenbus_client.c +++ b/drivers/xen/xenbus/xenbus_client.c @@ -379,7 +379,14 @@ int xenbus_grant_ring(struct xenbus_device *dev, void *vaddr, unsigned int nr_pages, grant_ref_t *grefs) { int err; - int i, j; + unsigned int i; + grant_ref_t gref_head; + + err = gnttab_alloc_grant_references(nr_pages, &gref_head); + if (err) { + xenbus_dev_fatal(dev, err, "granting access to ring page"); + return err; + } for (i = 0; i < nr_pages; i++) { unsigned long gfn; @@ -389,23 +396,14 @@ int xenbus_grant_ring(struct xenbus_device *dev, void *vaddr, else gfn = virt_to_gfn(vaddr); - err = gnttab_grant_foreign_access(dev->otherend_id, gfn, 0); - if (err < 0) { - xenbus_dev_fatal(dev, err, - "granting access to ring page"); - goto fail; - } - grefs[i] = err; + grefs[i] = gnttab_claim_grant_reference(&gref_head); + gnttab_grant_foreign_access_ref(grefs[i], dev->otherend_id, + gfn, 0); vaddr = vaddr + XEN_PAGE_SIZE; } return 0; - -fail: - for (j = 0; j < i; j++) - gnttab_end_foreign_access_ref(grefs[j], 0); - return err; } EXPORT_SYMBOL_GPL(xenbus_grant_ring); diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c index 72b67d810b8c202ac8323c700361920c96dd193c..a13ef836fe4e1b13b7162b0ce6fc5c630e048c70 100644 --- a/fs/9p/vfs_inode_dotl.c +++ b/fs/9p/vfs_inode_dotl.c @@ -541,7 +541,10 @@ int v9fs_vfs_setattr_dotl(struct dentry *dentry, struct iattr *iattr) { int retval; struct p9_fid *fid = NULL; - struct p9_iattr_dotl p9attr; + struct p9_iattr_dotl p9attr = { + .uid = INVALID_UID, + .gid = INVALID_GID, + }; struct inode *inode = d_inode(dentry); p9_debug(P9_DEBUG_VFS, "\n"); @@ -551,14 +554,22 @@ int v9fs_vfs_setattr_dotl(struct dentry *dentry, struct iattr *iattr) return retval; p9attr.valid = v9fs_mapped_iattr_valid(iattr->ia_valid); - p9attr.mode = iattr->ia_mode; - p9attr.uid = iattr->ia_uid; - p9attr.gid = iattr->ia_gid; - p9attr.size = iattr->ia_size; - p9attr.atime_sec = iattr->ia_atime.tv_sec; - p9attr.atime_nsec = iattr->ia_atime.tv_nsec; - p9attr.mtime_sec = iattr->ia_mtime.tv_sec; - p9attr.mtime_nsec = iattr->ia_mtime.tv_nsec; + if (iattr->ia_valid & ATTR_MODE) + p9attr.mode = iattr->ia_mode; + if (iattr->ia_valid & ATTR_UID) + p9attr.uid = iattr->ia_uid; + if (iattr->ia_valid & ATTR_GID) + p9attr.gid = iattr->ia_gid; + if (iattr->ia_valid & ATTR_SIZE) + p9attr.size = iattr->ia_size; + if (iattr->ia_valid & ATTR_ATIME_SET) { + p9attr.atime_sec = iattr->ia_atime.tv_sec; + p9attr.atime_nsec = iattr->ia_atime.tv_nsec; + } + if (iattr->ia_valid & ATTR_MTIME_SET) { + p9attr.mtime_sec = iattr->ia_mtime.tv_sec; + p9attr.mtime_nsec = iattr->ia_mtime.tv_nsec; + } if (iattr->ia_valid & ATTR_FILE) { fid = iattr->ia_file->private_data; diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index 04c4aa7a1df2c59716abaa12aefdf29fd7d4630c..ed507d27034b1dc804e6482d7403bd6bcc0025cd 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -170,8 +170,8 @@ static int padzero(unsigned long elf_bss) static int create_elf_tables(struct linux_binprm *bprm, const struct elfhdr *exec, - unsigned long load_addr, unsigned long interp_load_addr, - unsigned long e_entry) + unsigned long interp_load_addr, + unsigned long e_entry, unsigned long phdr_addr) { struct mm_struct *mm = current->mm; unsigned long p = bprm->p; @@ -256,7 +256,7 @@ create_elf_tables(struct linux_binprm *bprm, const struct elfhdr *exec, NEW_AUX_ENT(AT_HWCAP, ELF_HWCAP); NEW_AUX_ENT(AT_PAGESZ, ELF_EXEC_PAGESIZE); NEW_AUX_ENT(AT_CLKTCK, CLOCKS_PER_SEC); - NEW_AUX_ENT(AT_PHDR, load_addr + exec->e_phoff); + NEW_AUX_ENT(AT_PHDR, phdr_addr); NEW_AUX_ENT(AT_PHENT, sizeof(struct elf_phdr)); NEW_AUX_ENT(AT_PHNUM, exec->e_phnum); NEW_AUX_ENT(AT_BASE, interp_load_addr); @@ -820,7 +820,7 @@ static int parse_elf_properties(struct file *f, const struct elf_phdr *phdr, static int load_elf_binary(struct linux_binprm *bprm) { struct file *interpreter = NULL; /* to shut gcc up */ - unsigned long load_addr = 0, load_bias = 0; + unsigned long load_addr, load_bias = 0, phdr_addr = 0; int load_addr_set = 0; unsigned long error; struct elf_phdr *elf_ppnt, *elf_phdata, *interp_elf_phdata = NULL; @@ -1153,6 +1153,17 @@ static int load_elf_binary(struct linux_binprm *bprm) reloc_func_desc = load_bias; } } + + /* + * Figure out which segment in the file contains the Program + * Header table, and map to the associated memory address. + */ + if (elf_ppnt->p_offset <= elf_ex->e_phoff && + elf_ex->e_phoff < elf_ppnt->p_offset + elf_ppnt->p_filesz) { + phdr_addr = elf_ex->e_phoff - elf_ppnt->p_offset + + elf_ppnt->p_vaddr; + } + k = elf_ppnt->p_vaddr; if ((elf_ppnt->p_flags & PF_X) && k < start_code) start_code = k; @@ -1188,6 +1199,7 @@ static int load_elf_binary(struct linux_binprm *bprm) } e_entry = elf_ex->e_entry + load_bias; + phdr_addr += load_bias; elf_bss += load_bias; elf_brk += load_bias; start_code += load_bias; @@ -1251,8 +1263,8 @@ static int load_elf_binary(struct linux_binprm *bprm) goto out; #endif /* ARCH_HAS_SETUP_ADDITIONAL_PAGES */ - retval = create_elf_tables(bprm, elf_ex, - load_addr, interp_load_addr, e_entry); + retval = create_elf_tables(bprm, elf_ex, interp_load_addr, + e_entry, phdr_addr); if (retval < 0) goto out; diff --git a/fs/block_dev.c b/fs/block_dev.c index 46801789f2dc30bedfa2c210a4834a4487bf4df3..c8a3c93cc2560b0f0b7a041ce92e4892ab1c4eab 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -1172,7 +1172,6 @@ static void bd_clear_claiming(struct block_device *whole, void *holder) static void bd_finish_claiming(struct block_device *bdev, struct block_device *whole, void *holder) { - spin_lock(&bdev_lock); BUG_ON(!bd_may_claim(bdev, whole, holder)); /* * Note that for a whole device bd_holders will be incremented twice, @@ -1183,7 +1182,6 @@ static void bd_finish_claiming(struct block_device *bdev, bdev->bd_holders++; bdev->bd_holder = holder; bd_clear_claiming(whole, holder); - spin_unlock(&bdev_lock); } /** @@ -1481,6 +1479,39 @@ int bdev_disk_changed(struct block_device *bdev, bool invalidate) */ EXPORT_SYMBOL_GPL(bdev_disk_changed); +static void blkdev_dump_conflict_opener(struct block_device *bdev, char *msg) +{ + char name[BDEVNAME_SIZE]; + struct task_struct *p = NULL; + char comm_buf[TASK_COMM_LEN]; + pid_t p_pid; + + rcu_read_lock(); + p = rcu_dereference(current->real_parent); + task_lock(p); + strncpy(comm_buf, p->comm, TASK_COMM_LEN); + p_pid = p->pid; + task_unlock(p); + rcu_read_unlock(); + + pr_info_ratelimited("%s %s. current [%d %s]. parent [%d %s]\n", + msg, bdevname(bdev, name), + current->pid, current->comm, p_pid, comm_buf); +} + +static bool is_conflict_excl_open(struct block_device *bdev, struct block_device *whole, fmode_t mode) +{ + if (bdev->bd_holders) + return false; + + if (bdev->bd_write_openers > ((mode & FMODE_WRITE) ? 1 : 0)) + return true; + + if (bdev == whole) + return !!bdev->bd_part_write_openers; + + return !!whole->bd_write_openers; +} /* * bd_mutex locking: * @@ -1599,8 +1630,40 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, void *holder, bdev->bd_openers++; if (for_part) bdev->bd_part_count++; - if (claiming) + + if (!for_part && (mode & FMODE_WRITE)) { + spin_lock(&bdev_lock); + bdev->bd_write_openers++; + if (bdev->bd_contains != bdev) + bdev->bd_contains->bd_part_write_openers++; + spin_unlock(&bdev_lock); + } + + if (claiming) { + spin_lock(&bdev_lock); + /* + * Open an write opened block device exclusively, the + * writing process may probability corrupt the device, + * such as a mounted file system, give a hint here. + */ + if (is_conflict_excl_open(bdev, claiming, mode)) + blkdev_dump_conflict_opener(bdev, "VFS: Open an write opened " + "block device exclusively"); bd_finish_claiming(bdev, claiming, holder); + spin_unlock(&bdev_lock); + } else if (!for_part && (mode & FMODE_WRITE)) { + spin_lock(&bdev_lock); + /* + * Open an exclusive opened device for write may + * probability corrupt the device, such as a + * mounted file system, give a hint here. + */ + if (bdev->bd_holders || + (whole && (whole->bd_holder != NULL) && (whole->bd_holder != bd_may_claim))) + blkdev_dump_conflict_opener(bdev, "VFS: Open an exclusive opened " + "block device for write"); + spin_unlock(&bdev_lock); + } /* * Block event polling for write claims if requested. Any write holder @@ -1818,6 +1881,14 @@ static void __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part) if (for_part) bdev->bd_part_count--; + if (!for_part && (mode & FMODE_WRITE)) { + spin_lock(&bdev_lock); + bdev->bd_write_openers--; + if (bdev->bd_contains != bdev) + bdev->bd_contains->bd_part_write_openers--; + spin_unlock(&bdev_lock); + } + if (!--bdev->bd_openers) { WARN_ON_ONCE(bdev->bd_holders); sync_blockdev(bdev); diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c index 6e447bdaf9ec8b381465dfbaeb73a240b6af4416..baff31a147e7dfa802c6a8823dce91837443daf5 100644 --- a/fs/btrfs/backref.c +++ b/fs/btrfs/backref.c @@ -1213,7 +1213,12 @@ static int find_parent_nodes(struct btrfs_trans_handle *trans, ret = btrfs_search_slot(trans, fs_info->extent_root, &key, path, 0, 0); if (ret < 0) goto out; - BUG_ON(ret == 0); + if (ret == 0) { + /* This shouldn't happen, indicates a bug or fs corruption. */ + ASSERT(ret != 0); + ret = -EUCLEAN; + goto out; + } #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS if (trans && likely(trans->type != __TRANS_DUMMY) && @@ -1361,10 +1366,18 @@ static int find_parent_nodes(struct btrfs_trans_handle *trans, goto out; if (!ret && extent_item_pos) { /* - * we've recorded that parent, so we must extend - * its inode list here + * We've recorded that parent, so we must extend + * its inode list here. + * + * However if there was corruption we may not + * have found an eie, return an error in this + * case. */ - BUG_ON(!eie); + ASSERT(eie); + if (!eie) { + ret = -EUCLEAN; + goto out; + } while (eie->next) eie = eie->next; eie->next = ref->inode_list; diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c index c99e293b50f5456669bdd75735b2ce5f5b11340b..e351f531995054abff9942499ed17fb8f96be981 100644 --- a/fs/btrfs/block-group.c +++ b/fs/btrfs/block-group.c @@ -2570,7 +2570,6 @@ int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans) struct btrfs_path *path = NULL; LIST_HEAD(dirty); struct list_head *io = &cur_trans->io_bgs; - int num_started = 0; int loops = 0; spin_lock(&cur_trans->dirty_bgs_lock); @@ -2636,7 +2635,6 @@ int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans) cache->io_ctl.inode = NULL; ret = btrfs_write_out_cache(trans, cache, path); if (ret == 0 && cache->io_ctl.inode) { - num_started++; should_put = 0; /* @@ -2737,7 +2735,6 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans) int should_put; struct btrfs_path *path; struct list_head *io = &cur_trans->io_bgs; - int num_started = 0; path = btrfs_alloc_path(); if (!path) @@ -2795,7 +2792,6 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans) cache->io_ctl.inode = NULL; ret = btrfs_write_out_cache(trans, cache, path); if (ret == 0 && cache->io_ctl.inode) { - num_started++; should_put = 0; list_add_tail(&cache->io_list, io); } else { diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 519cf145f9bd176596bef40a03a17a6b3e81b07e..5addd1e36a8ee1699b5f8be4a4b48c36aeabe282 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -2589,12 +2589,9 @@ static struct extent_buffer *btrfs_search_slot_get_root(struct btrfs_root *root, { struct btrfs_fs_info *fs_info = root->fs_info; struct extent_buffer *b; - int root_lock; + int root_lock = 0; int level = 0; - /* We try very hard to do read locks on the root */ - root_lock = BTRFS_READ_LOCK; - if (p->search_commit_root) { /* * The commit roots are read only so we always do read locks, @@ -2632,6 +2629,9 @@ static struct extent_buffer *btrfs_search_slot_get_root(struct btrfs_root *root, goto out; } + /* We try very hard to do read locks on the root */ + root_lock = BTRFS_READ_LOCK; + /* * If the level is set to maximum, we can skip trying to get the read * lock. @@ -2658,6 +2658,17 @@ static struct extent_buffer *btrfs_search_slot_get_root(struct btrfs_root *root, level = btrfs_header_level(b); out: + /* + * The root may have failed to write out at some point, and thus is no + * longer valid, return an error in this case. + */ + if (!extent_buffer_uptodate(b)) { + if (root_lock) + btrfs_tree_unlock_rw(b, root_lock); + free_extent_buffer(b); + return ERR_PTR(-EIO); + } + p->nodes[level] = b; if (!p->skip_locking) p->locks[level] = root_lock; diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index a5bcad0278835789a251bb2e74fb43ca58b8f578..87e55b024ac2eb8191234d0ab0c14b2dfad33468 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1596,9 +1596,10 @@ static struct btrfs_root *btrfs_get_root_ref(struct btrfs_fs_info *fs_info, ret = btrfs_insert_fs_root(fs_info, root); if (ret) { - btrfs_put_root(root); - if (ret == -EEXIST) + if (ret == -EEXIST) { + btrfs_put_root(root); goto again; + } goto fail; } return root; diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index f39d02e7f7efe1d191f48bc6452b55ca2c5b58a4..16f44bc481ab442740090bcc296e284b3492a0b8 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -121,7 +121,7 @@ struct extent_buffer { */ struct extent_changeset { /* How many bytes are set/cleared in this operation */ - unsigned int bytes_changed; + u64 bytes_changed; /* Changed ranges */ struct ulist range_changed; diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index f59ec55e5feb232bbf210837843bd1f87b8a5e4e..416a1b753ff628425f27c636d1177b0caa832dc6 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -2833,8 +2833,9 @@ int btrfs_replace_file_extents(struct inode *inode, struct btrfs_path *path, return ret; } -static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len) +static int btrfs_punch_hole(struct file *file, loff_t offset, loff_t len) { + struct inode *inode = file_inode(file); struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); struct btrfs_root *root = BTRFS_I(inode)->root; struct extent_state *cached_state = NULL; @@ -2866,6 +2867,10 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len) goto out_only_mutex; } + ret = file_modified(file); + if (ret) + goto out_only_mutex; + lockstart = round_up(offset, btrfs_inode_sectorsize(BTRFS_I(inode))); lockend = round_down(offset + len, btrfs_inode_sectorsize(BTRFS_I(inode))) - 1; @@ -3301,7 +3306,7 @@ static long btrfs_fallocate(struct file *file, int mode, return -EOPNOTSUPP; if (mode & FALLOC_FL_PUNCH_HOLE) - return btrfs_punch_hole(inode, offset, len); + return btrfs_punch_hole(file, offset, len); /* * Only trigger disk allocation, don't trigger qgroup reserve @@ -3323,6 +3328,10 @@ static long btrfs_fallocate(struct file *file, int mode, goto out; } + ret = file_modified(file); + if (ret) + goto out; + /* * TODO: Move these two operations after we have checked * accurate reserved space, or fallocate can still fail but diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index ff3f0638cdb907af56c04cc32a710ff01da00cd2..4a5248097d7aae5a5c1a87cc506fd3baa68ffec1 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -995,7 +995,6 @@ static noinline int cow_file_range(struct btrfs_inode *inode, int ret = 0; if (btrfs_is_free_space_inode(inode)) { - WARN_ON_ONCE(1); ret = -EINVAL; goto out_unlock; } @@ -4023,6 +4022,13 @@ int btrfs_delete_subvolume(struct inode *dir, struct dentry *dentry) dest->root_key.objectid); return -EPERM; } + if (atomic_read(&dest->nr_swapfiles)) { + spin_unlock(&dest->root_item_lock); + btrfs_warn(fs_info, + "attempt to delete subvolume %llu with active swapfile", + root->root_key.objectid); + return -EPERM; + } root_flags = btrfs_root_flags(&dest->root_item); btrfs_set_root_flags(&dest->root_item, root_flags | BTRFS_ROOT_SUBVOL_DEAD); @@ -10094,9 +10100,19 @@ static int btrfs_add_swap_extent(struct swap_info_struct *sis, struct btrfs_swap_info *bsi) { unsigned long nr_pages; + unsigned long max_pages; u64 first_ppage, first_ppage_reported, next_ppage; int ret; + /* + * Our swapfile may have had its size extended after the swap header was + * written. In that case activating the swapfile should not go beyond + * the max size set in the swap header. + */ + if (bsi->nr_pages >= sis->max) + return 0; + + max_pages = sis->max - bsi->nr_pages; first_ppage = ALIGN(bsi->block_start, PAGE_SIZE) >> PAGE_SHIFT; next_ppage = ALIGN_DOWN(bsi->block_start + bsi->block_len, PAGE_SIZE) >> PAGE_SHIFT; @@ -10104,6 +10120,7 @@ static int btrfs_add_swap_extent(struct swap_info_struct *sis, if (first_ppage >= next_ppage) return 0; nr_pages = next_ppage - first_ppage; + nr_pages = min(nr_pages, max_pages); first_ppage_reported = first_ppage; if (bsi->start == 0) @@ -10204,8 +10221,23 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file, * set. We use this counter to prevent snapshots. We must increment it * before walking the extents because we don't want a concurrent * snapshot to run after we've already checked the extents. + * + * It is possible that subvolume is marked for deletion but still not + * removed yet. To prevent this race, we check the root status before + * activating the swapfile. */ + spin_lock(&root->root_item_lock); + if (btrfs_root_dead(root)) { + spin_unlock(&root->root_item_lock); + + btrfs_exclop_finish(fs_info); + btrfs_warn(fs_info, + "cannot activate swapfile because subvolume %llu is being deleted", + root->root_key.objectid); + return -EPERM; + } atomic_inc(&root->nr_swapfiles); + spin_unlock(&root->root_item_lock); isize = ALIGN_DOWN(inode->i_size, fs_info->sectorsize); diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 040db0dfba264315572a4abe3933e4213e4137e8..b5e9bfe884c4b7eec01517fdb708eb2fa13636b7 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -3103,10 +3103,8 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file, inode_lock(inode); err = btrfs_delete_subvolume(dir, dentry); inode_unlock(inode); - if (!err) { - fsnotify_rmdir(dir, dentry); - d_delete(dentry); - } + if (!err) + d_delete_notify(dir, dentry); out_dput: dput(dentry); diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c index 4bac32a274ceb0b6d4602574e512170be06b1f94..a02e38fb696c17f62d81a9ef0358bf376d411c48 100644 --- a/fs/btrfs/qgroup.c +++ b/fs/btrfs/qgroup.c @@ -941,6 +941,14 @@ int btrfs_quota_enable(struct btrfs_fs_info *fs_info) int ret = 0; int slot; + /* + * We need to have subvol_sem write locked, to prevent races between + * concurrent tasks trying to enable quotas, because we will unlock + * and relock qgroup_ioctl_lock before setting fs_info->quota_root + * and before setting BTRFS_FS_QUOTA_ENABLED. + */ + lockdep_assert_held_write(&fs_info->subvol_sem); + mutex_lock(&fs_info->qgroup_ioctl_lock); if (fs_info->quota_root) goto out; @@ -1118,8 +1126,19 @@ int btrfs_quota_enable(struct btrfs_fs_info *fs_info) goto out_free_path; } + mutex_unlock(&fs_info->qgroup_ioctl_lock); + /* + * Commit the transaction while not holding qgroup_ioctl_lock, to avoid + * a deadlock with tasks concurrently doing other qgroup operations, such + * adding/removing qgroups or adding/deleting qgroup relations for example, + * because all qgroup operations first start or join a transaction and then + * lock the qgroup_ioctl_lock mutex. + * We are safe from a concurrent task trying to enable quotas, by calling + * this function, since we are serialized by fs_info->subvol_sem. + */ ret = btrfs_commit_transaction(trans); trans = NULL; + mutex_lock(&fs_info->qgroup_ioctl_lock); if (ret) goto out_free_path; @@ -1167,11 +1186,33 @@ int btrfs_quota_disable(struct btrfs_fs_info *fs_info) struct btrfs_trans_handle *trans = NULL; int ret = 0; + /* + * We need to have subvol_sem write locked, to prevent races between + * concurrent tasks trying to disable quotas, because we will unlock + * and relock qgroup_ioctl_lock across BTRFS_FS_QUOTA_ENABLED changes. + */ + lockdep_assert_held_write(&fs_info->subvol_sem); + mutex_lock(&fs_info->qgroup_ioctl_lock); if (!fs_info->quota_root) goto out; + + /* + * Unlock the qgroup_ioctl_lock mutex before waiting for the rescan worker to + * complete. Otherwise we can deadlock because btrfs_remove_qgroup() needs + * to lock that mutex while holding a transaction handle and the rescan + * worker needs to commit a transaction. + */ mutex_unlock(&fs_info->qgroup_ioctl_lock); + /* + * Request qgroup rescan worker to complete and wait for it. This wait + * must be done before transaction start for quota disable since it may + * deadlock with transaction by the qgroup rescan worker. + */ + clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags); + btrfs_qgroup_wait_for_completion(fs_info, false); + /* * 1 For the root item * @@ -1187,14 +1228,13 @@ int btrfs_quota_disable(struct btrfs_fs_info *fs_info) if (IS_ERR(trans)) { ret = PTR_ERR(trans); trans = NULL; + set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags); goto out; } if (!fs_info->quota_root) goto out; - clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags); - btrfs_qgroup_wait_for_completion(fs_info, false); spin_lock(&fs_info->qgroup_lock); quota_root = fs_info->quota_root; fs_info->quota_root = NULL; @@ -3371,6 +3411,9 @@ qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid, btrfs_warn(fs_info, "qgroup rescan init failed, qgroup is not enabled"); ret = -EINVAL; + } else if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) { + /* Quota disable is in progress */ + ret = -EBUSY; } if (ret) { diff --git a/fs/btrfs/reflink.c b/fs/btrfs/reflink.c index 3a3102bc15a057291154e3a44c1e369fd822bda3..4b3ae0faf548e5df94ff2e38b3d483fd42b2d106 100644 --- a/fs/btrfs/reflink.c +++ b/fs/btrfs/reflink.c @@ -503,8 +503,11 @@ static int btrfs_clone(struct inode *src, struct inode *inode, */ ASSERT(key.offset == 0); ASSERT(datal <= fs_info->sectorsize); - if (key.offset != 0 || datal > fs_info->sectorsize) - return -EUCLEAN; + if (WARN_ON(key.offset != 0) || + WARN_ON(datal > fs_info->sectorsize)) { + ret = -EUCLEAN; + goto out; + } ret = clone_copy_inline_extent(inode, path, &new_key, drop_start, datal, size, diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c index 10f020ab1186f1fd049ed9cf80060bf33801ca1e..6b80dee17f49d99219bdd86f5fc130b1bb098a42 100644 --- a/fs/btrfs/send.c +++ b/fs/btrfs/send.c @@ -5006,6 +5006,10 @@ static int put_file_data(struct send_ctx *sctx, u64 offset, u32 len) lock_page(page); if (!PageUptodate(page)) { unlock_page(page); + btrfs_err(fs_info, + "send: IO error at offset %llu for inode %llu root %llu", + page_offset(page), sctx->cur_ino, + sctx->send_root->root_key.objectid); put_page(page); ret = -EIO; break; diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c index d4a3a56726aa8cba77739a45e8f591d420370838..32f1b15b25dcc34b027b3f4883e36b12e4793e4c 100644 --- a/fs/btrfs/tree-checker.c +++ b/fs/btrfs/tree-checker.c @@ -947,6 +947,7 @@ static int check_dev_item(struct extent_buffer *leaf, struct btrfs_key *key, int slot) { struct btrfs_dev_item *ditem; + const u32 item_size = btrfs_item_size_nr(leaf, slot); if (key->objectid != BTRFS_DEV_ITEMS_OBJECTID) { dev_item_err(leaf, slot, @@ -954,6 +955,13 @@ static int check_dev_item(struct extent_buffer *leaf, key->objectid, BTRFS_DEV_ITEMS_OBJECTID); return -EUCLEAN; } + + if (unlikely(item_size != sizeof(*ditem))) { + dev_item_err(leaf, slot, "invalid item size: has %u expect %zu", + item_size, sizeof(*ditem)); + return -EUCLEAN; + } + ditem = btrfs_item_ptr(leaf, slot, struct btrfs_dev_item); if (btrfs_device_id(leaf, ditem) != key->offset) { dev_item_err(leaf, slot, @@ -989,6 +997,7 @@ static int check_inode_item(struct extent_buffer *leaf, struct btrfs_inode_item *iitem; u64 super_gen = btrfs_super_generation(fs_info->super_copy); u32 valid_mask = (S_IFMT | S_ISUID | S_ISGID | S_ISVTX | 0777); + const u32 item_size = btrfs_item_size_nr(leaf, slot); u32 mode; int ret; @@ -996,6 +1005,12 @@ static int check_inode_item(struct extent_buffer *leaf, if (ret < 0) return ret; + if (unlikely(item_size != sizeof(*iitem))) { + generic_err(leaf, slot, "invalid item size: has %u expect %zu", + item_size, sizeof(*iitem)); + return -EUCLEAN; + } + iitem = btrfs_item_ptr(leaf, slot, struct btrfs_inode_item); /* Here we use super block generation + 1 to handle log tree */ diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 09ef6419e890a6af419c4ada7cc64ac2c16ead48..62784b99a80741bfecc9ae40d7fffc27cd34f099 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -1286,6 +1286,15 @@ static int unlink_old_inode_refs(struct btrfs_trans_handle *trans, inode, name, namelen); kfree(name); iput(dir); + /* + * Whenever we need to check if a name exists or not, we + * check the subvolume tree. So after an unlink we must + * run delayed items, so that future checks for a name + * during log replay see that the name does not exists + * anymore. + */ + if (!ret) + ret = btrfs_run_delayed_items(trans); if (ret) goto out; goto again; @@ -1537,6 +1546,15 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans, */ if (!ret && inode->i_nlink == 0) inc_nlink(inode); + /* + * Whenever we need to check if a name exists or + * not, we check the subvolume tree. So after an + * unlink we must run delayed items, so that future + * checks for a name during log replay see that the + * name does not exists anymore. + */ + if (!ret) + ret = btrfs_run_delayed_items(trans); } if (ret < 0) goto out; @@ -4297,7 +4315,7 @@ static int log_one_extent(struct btrfs_trans_handle *trans, /* * Log all prealloc extents beyond the inode's i_size to make sure we do not - * lose them after doing a fast fsync and replaying the log. We scan the + * lose them after doing a full/fast fsync and replaying the log. We scan the * subvolume's root instead of iterating the inode's extent map tree because * otherwise we can log incorrect extent items based on extent map conversion. * That can happen due to the fact that extent maps are merged when they @@ -5084,6 +5102,7 @@ static int copy_inode_items_to_log(struct btrfs_trans_handle *trans, struct btrfs_log_ctx *ctx, bool *need_log_inode_item) { + const u64 i_size = i_size_read(&inode->vfs_inode); struct btrfs_root *root = inode->root; int ins_start_slot = 0; int ins_nr = 0; @@ -5104,13 +5123,21 @@ static int copy_inode_items_to_log(struct btrfs_trans_handle *trans, if (min_key->type > max_key->type) break; - if (min_key->type == BTRFS_INODE_ITEM_KEY) + if (min_key->type == BTRFS_INODE_ITEM_KEY) { *need_log_inode_item = false; - - if ((min_key->type == BTRFS_INODE_REF_KEY || - min_key->type == BTRFS_INODE_EXTREF_KEY) && - inode->generation == trans->transid && - !recursive_logging) { + } else if (min_key->type == BTRFS_EXTENT_DATA_KEY && + min_key->offset >= i_size) { + /* + * Extents at and beyond eof are logged with + * btrfs_log_prealloc_extents(). + * Only regular files have BTRFS_EXTENT_DATA_KEY keys, + * and no keys greater than that, so bail out. + */ + break; + } else if ((min_key->type == BTRFS_INODE_REF_KEY || + min_key->type == BTRFS_INODE_EXTREF_KEY) && + inode->generation == trans->transid && + !recursive_logging) { u64 other_ino = 0; u64 other_parent = 0; @@ -5141,10 +5168,8 @@ static int copy_inode_items_to_log(struct btrfs_trans_handle *trans, btrfs_release_path(path); goto next_key; } - } - - /* Skip xattrs, we log them later with btrfs_log_all_xattrs() */ - if (min_key->type == BTRFS_XATTR_ITEM_KEY) { + } else if (min_key->type == BTRFS_XATTR_ITEM_KEY) { + /* Skip xattrs, logged later with btrfs_log_all_xattrs() */ if (ins_nr == 0) goto next_slot; ret = copy_items(trans, inode, dst_path, path, @@ -5197,9 +5222,21 @@ static int copy_inode_items_to_log(struct btrfs_trans_handle *trans, break; } } - if (ins_nr) + if (ins_nr) { ret = copy_items(trans, inode, dst_path, path, ins_start_slot, ins_nr, inode_only, logged_isize); + if (ret) + return ret; + } + + if (inode_only == LOG_INODE_ALL && S_ISREG(inode->vfs_inode.i_mode)) { + /* + * Release the path because otherwise we might attempt to double + * lock the same leaf with btrfs_log_prealloc_extents() below. + */ + btrfs_release_path(path); + ret = btrfs_log_prealloc_extents(trans, inode, dst_path); + } return ret; } diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index e462de9917237bc5b558aa4365bb11293a71c8f8..366d047638646b21d10d8fde46b63e64c7e30b95 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -4220,10 +4220,12 @@ static int balance_kthread(void *data) struct btrfs_fs_info *fs_info = data; int ret = 0; + sb_start_write(fs_info->sb); mutex_lock(&fs_info->balance_mutex); if (fs_info->balance_ctl) ret = btrfs_balance(fs_info, fs_info->balance_ctl, NULL); mutex_unlock(&fs_info->balance_mutex); + sb_end_write(fs_info->sb); return ret; } diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c index f63c1a090139ce967062d663b53ccc4f43824dd2..1fddb9cd3e88e282e390ff4deddc520c3a73cab5 100644 --- a/fs/ceph/dir.c +++ b/fs/ceph/dir.c @@ -478,8 +478,11 @@ static int ceph_readdir(struct file *file, struct dir_context *ctx) 2 : (fpos_off(rde->offset) + 1); err = note_last_dentry(dfi, rde->name, rde->name_len, next_offset); - if (err) + if (err) { + ceph_mdsc_put_request(dfi->last_readdir); + dfi->last_readdir = NULL; return err; + } } else if (req->r_reply_info.dir_end) { dfi->next_offset = 2; /* keep last name */ @@ -520,6 +523,12 @@ static int ceph_readdir(struct file *file, struct dir_context *ctx) if (!dir_emit(ctx, rde->name, rde->name_len, ceph_present_ino(inode->i_sb, le64_to_cpu(rde->inode.in->ino)), le32_to_cpu(rde->inode.in->mode) >> 12)) { + /* + * NOTE: Here no need to put the 'dfi->last_readdir', + * because when dir_emit stops us it's most likely + * doesn't have enough memory, etc. So for next readdir + * it will continue. + */ dout("filldir stopping us...\n"); return 0; } diff --git a/fs/ceph/file.c b/fs/ceph/file.c index 8ed881fd7440d13292cbd17dee2fa688b68e2d3e..450050801f3b6ef3a1fd094aaeddd0d5d11d8e06 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c @@ -577,6 +577,7 @@ static int ceph_finish_async_create(struct inode *dir, struct dentry *dentry, struct ceph_inode_info *ci = ceph_inode(dir); struct inode *inode; struct timespec64 now; + struct ceph_string *pool_ns; struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(dir->i_sb); struct ceph_vino vino = { .ino = req->r_deleg_ino, .snap = CEPH_NOSNAP }; @@ -626,6 +627,12 @@ static int ceph_finish_async_create(struct inode *dir, struct dentry *dentry, in.max_size = cpu_to_le64(lo->stripe_unit); ceph_file_layout_to_legacy(lo, &in.layout); + /* lo is private, so pool_ns can't change */ + pool_ns = rcu_dereference_raw(lo->pool_ns); + if (pool_ns) { + iinfo.pool_ns_len = pool_ns->len; + iinfo.pool_ns_data = pool_ns->str; + } down_read(&mdsc->snap_rwsem); ret = ceph_fill_inode(inode, NULL, &iinfo, NULL, req->r_session, @@ -743,8 +750,10 @@ int ceph_atomic_open(struct inode *dir, struct dentry *dentry, restore_deleg_ino(dir, req->r_deleg_ino); ceph_mdsc_put_request(req); try_async = false; + ceph_put_string(rcu_dereference_raw(lo.pool_ns)); goto retry; } + ceph_put_string(rcu_dereference_raw(lo.pool_ns)); goto out_req; } } diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index f0ed29a9a6f11a3e923d4eb61574324cc4482747..aa5a4d759ca236a10df969bed3f0080cb48e582e 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -864,6 +864,7 @@ cifs_smb3_do_mount(struct file_system_type *fs_type, out_super: deactivate_locked_super(sb); + return root; out: cifs_cleanup_volume_info(volume_info); return root; diff --git a/fs/cifs/link.c b/fs/cifs/link.c index 94dab4309fbb41ca6acd8e0ee66726e0d3227c81..85d30fef98a2903671bf9bb25155be98fe8a57eb 100644 --- a/fs/cifs/link.c +++ b/fs/cifs/link.c @@ -97,6 +97,9 @@ parse_mf_symlink(const u8 *buf, unsigned int buf_len, unsigned int *_link_len, if (rc != 1) return -EINVAL; + if (link_len > CIFS_MF_SYMLINK_LINK_MAXLEN) + return -EINVAL; + rc = symlink_hash(link_len, link_str, md5_hash); if (rc) { cifs_dbg(FYI, "%s: MD5 hash failure: %d\n", __func__, rc); diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c index fdb1d660bd136ed2587746c860926c1ad5dbe321..0e8f484031da981c64df21bfd720b7f109d83a69 100644 --- a/fs/cifs/smb2ops.c +++ b/fs/cifs/smb2ops.c @@ -1526,6 +1526,7 @@ smb2_ioctl_query_info(const unsigned int xid, unsigned int size[2]; void *data[2]; int create_options = is_dir ? CREATE_NOT_FILE : CREATE_NOT_DIR; + void (*free_req1_func)(struct smb_rqst *r); vars = kzalloc(sizeof(*vars), GFP_ATOMIC); if (vars == NULL) @@ -1535,27 +1536,29 @@ smb2_ioctl_query_info(const unsigned int xid, resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER; - if (copy_from_user(&qi, arg, sizeof(struct smb_query_info))) - goto e_fault; - + if (copy_from_user(&qi, arg, sizeof(struct smb_query_info))) { + rc = -EFAULT; + goto free_vars; + } if (qi.output_buffer_length > 1024) { - kfree(vars); - return -EINVAL; + rc = -EINVAL; + goto free_vars; } if (!ses || !server) { - kfree(vars); - return -EIO; + rc = -EIO; + goto free_vars; } if (smb3_encryption_required(tcon)) flags |= CIFS_TRANSFORM_REQ; - buffer = memdup_user(arg + sizeof(struct smb_query_info), - qi.output_buffer_length); - if (IS_ERR(buffer)) { - kfree(vars); - return PTR_ERR(buffer); + if (qi.output_buffer_length) { + buffer = memdup_user(arg + sizeof(struct smb_query_info), qi.output_buffer_length); + if (IS_ERR(buffer)) { + rc = PTR_ERR(buffer); + goto free_vars; + } } /* Open */ @@ -1593,45 +1596,45 @@ smb2_ioctl_query_info(const unsigned int xid, rc = SMB2_open_init(tcon, server, &rqst[0], &oplock, &oparms, path); if (rc) - goto iqinf_exit; + goto free_output_buffer; smb2_set_next_command(tcon, &rqst[0]); /* Query */ if (qi.flags & PASSTHRU_FSCTL) { /* Can eventually relax perm check since server enforces too */ - if (!capable(CAP_SYS_ADMIN)) + if (!capable(CAP_SYS_ADMIN)) { rc = -EPERM; - else { - rqst[1].rq_iov = &vars->io_iov[0]; - rqst[1].rq_nvec = SMB2_IOCTL_IOV_SIZE; - - rc = SMB2_ioctl_init(tcon, server, - &rqst[1], - COMPOUND_FID, COMPOUND_FID, - qi.info_type, true, buffer, - qi.output_buffer_length, - CIFSMaxBufSize - - MAX_SMB2_CREATE_RESPONSE_SIZE - - MAX_SMB2_CLOSE_RESPONSE_SIZE); + goto free_open_req; } + rqst[1].rq_iov = &vars->io_iov[0]; + rqst[1].rq_nvec = SMB2_IOCTL_IOV_SIZE; + + rc = SMB2_ioctl_init(tcon, server, &rqst[1], COMPOUND_FID, COMPOUND_FID, + qi.info_type, true, buffer, qi.output_buffer_length, + CIFSMaxBufSize - MAX_SMB2_CREATE_RESPONSE_SIZE - + MAX_SMB2_CLOSE_RESPONSE_SIZE); + free_req1_func = SMB2_ioctl_free; } else if (qi.flags == PASSTHRU_SET_INFO) { /* Can eventually relax perm check since server enforces too */ - if (!capable(CAP_SYS_ADMIN)) + if (!capable(CAP_SYS_ADMIN)) { rc = -EPERM; - else { - rqst[1].rq_iov = &vars->si_iov[0]; - rqst[1].rq_nvec = 1; - - size[0] = 8; - data[0] = buffer; - - rc = SMB2_set_info_init(tcon, server, - &rqst[1], - COMPOUND_FID, COMPOUND_FID, - current->tgid, - FILE_END_OF_FILE_INFORMATION, - SMB2_O_INFO_FILE, 0, data, size); + goto free_open_req; } + if (qi.output_buffer_length < 8) { + rc = -EINVAL; + goto free_open_req; + } + rqst[1].rq_iov = &vars->si_iov[0]; + rqst[1].rq_nvec = 1; + + /* MS-FSCC 2.4.13 FileEndOfFileInformation */ + size[0] = 8; + data[0] = buffer; + + rc = SMB2_set_info_init(tcon, server, &rqst[1], COMPOUND_FID, COMPOUND_FID, + current->tgid, FILE_END_OF_FILE_INFORMATION, + SMB2_O_INFO_FILE, 0, data, size); + free_req1_func = SMB2_set_info_free; } else if (qi.flags == PASSTHRU_QUERY_INFO) { rqst[1].rq_iov = &vars->qi_iov[0]; rqst[1].rq_nvec = 1; @@ -1642,6 +1645,7 @@ smb2_ioctl_query_info(const unsigned int xid, qi.info_type, qi.additional_information, qi.input_buffer_length, qi.output_buffer_length, buffer); + free_req1_func = SMB2_query_info_free; } else { /* unknown flags */ cifs_tcon_dbg(VFS, "Invalid passthru query flags: 0x%x\n", qi.flags); @@ -1649,7 +1653,7 @@ smb2_ioctl_query_info(const unsigned int xid, } if (rc) - goto iqinf_exit; + goto free_open_req; smb2_set_next_command(tcon, &rqst[1]); smb2_set_related(&rqst[1]); @@ -1660,14 +1664,14 @@ smb2_ioctl_query_info(const unsigned int xid, rc = SMB2_close_init(tcon, server, &rqst[2], COMPOUND_FID, COMPOUND_FID, false); if (rc) - goto iqinf_exit; + goto free_req_1; smb2_set_related(&rqst[2]); rc = compound_send_recv(xid, ses, server, flags, 3, rqst, resp_buftype, rsp_iov); if (rc) - goto iqinf_exit; + goto out; /* No need to bump num_remote_opens since handle immediately closed */ if (qi.flags & PASSTHRU_FSCTL) { @@ -1677,18 +1681,22 @@ smb2_ioctl_query_info(const unsigned int xid, qi.input_buffer_length = le32_to_cpu(io_rsp->OutputCount); if (qi.input_buffer_length > 0 && le32_to_cpu(io_rsp->OutputOffset) + qi.input_buffer_length - > rsp_iov[1].iov_len) - goto e_fault; + > rsp_iov[1].iov_len) { + rc = -EFAULT; + goto out; + } if (copy_to_user(&pqi->input_buffer_length, &qi.input_buffer_length, - sizeof(qi.input_buffer_length))) - goto e_fault; + sizeof(qi.input_buffer_length))) { + rc = -EFAULT; + goto out; + } if (copy_to_user((void __user *)pqi + sizeof(struct smb_query_info), (const void *)io_rsp + le32_to_cpu(io_rsp->OutputOffset), qi.input_buffer_length)) - goto e_fault; + rc = -EFAULT; } else { pqi = (struct smb_query_info __user *)arg; qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base; @@ -1696,28 +1704,30 @@ smb2_ioctl_query_info(const unsigned int xid, qi.input_buffer_length = le32_to_cpu(qi_rsp->OutputBufferLength); if (copy_to_user(&pqi->input_buffer_length, &qi.input_buffer_length, - sizeof(qi.input_buffer_length))) - goto e_fault; + sizeof(qi.input_buffer_length))) { + rc = -EFAULT; + goto out; + } if (copy_to_user(pqi + 1, qi_rsp->Buffer, qi.input_buffer_length)) - goto e_fault; + rc = -EFAULT; } - iqinf_exit: - cifs_small_buf_release(rqst[0].rq_iov[0].iov_base); - cifs_small_buf_release(rqst[1].rq_iov[0].iov_base); - cifs_small_buf_release(rqst[2].rq_iov[0].iov_base); +out: free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base); free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base); free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base); - kfree(vars); + SMB2_close_free(&rqst[2]); +free_req_1: + free_req1_func(&rqst[1]); +free_open_req: + SMB2_open_free(&rqst[0]); +free_output_buffer: kfree(buffer); +free_vars: + kfree(vars); return rc; - -e_fault: - rc = -EFAULT; - goto iqinf_exit; } static ssize_t diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c index a9cc6d2d727c7afeb88fbc0ce4c822c677057064..5ad27e484014fdf2cce3df1895089ae4955fe269 100644 --- a/fs/configfs/dir.c +++ b/fs/configfs/dir.c @@ -1813,8 +1813,8 @@ void configfs_unregister_group(struct config_group *group) configfs_detach_group(&group->cg_item); d_inode(dentry)->i_flags |= S_DEAD; dont_mount(dentry); + d_drop(dentry); fsnotify_rmdir(d_inode(parent), dentry); - d_delete(dentry); inode_unlock(d_inode(parent)); dput(dentry); @@ -1959,10 +1959,10 @@ void configfs_unregister_subsystem(struct configfs_subsystem *subsys) configfs_detach_group(&group->cg_item); d_inode(dentry)->i_flags |= S_DEAD; dont_mount(dentry); - fsnotify_rmdir(d_inode(root), dentry); inode_unlock(d_inode(dentry)); - d_delete(dentry); + d_drop(dentry); + fsnotify_rmdir(d_inode(root), dentry); inode_unlock(d_inode(root)); diff --git a/fs/coredump.c b/fs/coredump.c index 335c98787e668640f075728e33f104117c5f6dbf..42c9c3dde764d7163eb951619d07fbbe3fd5f219 100644 --- a/fs/coredump.c +++ b/fs/coredump.c @@ -41,6 +41,7 @@ #include #include #include +#include #include #include @@ -969,6 +970,8 @@ static bool always_dump_vma(struct vm_area_struct *vma) return false; } +#define DUMP_SIZE_MAYBE_ELFHDR_PLACEHOLDER 1 + /* * Decide how much of @vma's contents should be included in a core dump. */ @@ -1028,9 +1031,20 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma, * dump the first page to aid in determining what was mapped here. */ if (FILTER(ELF_HEADERS) && - vma->vm_pgoff == 0 && (vma->vm_flags & VM_READ) && - (READ_ONCE(file_inode(vma->vm_file)->i_mode) & 0111) != 0) - return PAGE_SIZE; + vma->vm_pgoff == 0 && (vma->vm_flags & VM_READ)) { + if ((READ_ONCE(file_inode(vma->vm_file)->i_mode) & 0111) != 0) + return PAGE_SIZE; + + /* + * ELF libraries aren't always executable. + * We'll want to check whether the mapping starts with the ELF + * magic, but not now - we're holding the mmap lock, + * so copy_from_user() doesn't work here. + * Use a placeholder instead, and fix it up later in + * dump_vma_snapshot(). + */ + return DUMP_SIZE_MAYBE_ELFHDR_PLACEHOLDER; + } #undef FILTER @@ -1105,8 +1119,6 @@ int dump_vma_snapshot(struct coredump_params *cprm, int *vma_count, m->end = vma->vm_end; m->flags = vma->vm_flags; m->dump_size = vma_dump_size(vma, cprm->mm_flags); - - vma_data_size += m->dump_size; } mmap_write_unlock(mm); @@ -1116,6 +1128,23 @@ int dump_vma_snapshot(struct coredump_params *cprm, int *vma_count, return -EFAULT; } + for (i = 0; i < *vma_count; i++) { + struct core_vma_metadata *m = (*vma_meta) + i; + + if (m->dump_size == DUMP_SIZE_MAYBE_ELFHDR_PLACEHOLDER) { + char elfmag[SELFMAG]; + + if (copy_from_user(elfmag, (void __user *)m->start, SELFMAG) || + memcmp(elfmag, ELFMAG, SELFMAG) != 0) { + m->dump_size = 0; + } else { + m->dump_size = PAGE_SIZE; + } + } + + vma_data_size += m->dump_size; + } + *vma_data_size_ptr = vma_data_size; return 0; } diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c index 3aa5eb9ce498e46b89ef40e7cc900378de3bb29a..96059af28f5084827dceda7aaac2bb8094fe36cc 100644 --- a/fs/debugfs/file.c +++ b/fs/debugfs/file.c @@ -147,7 +147,7 @@ static int debugfs_locked_down(struct inode *inode, struct file *filp, const struct file_operations *real_fops) { - if ((inode->i_mode & 07777) == 0444 && + if ((inode->i_mode & 07777 & ~0444) == 0 && !(filp->f_mode & FMODE_WRITE) && !real_fops->unlocked_ioctl && !real_fops->compat_ioctl && diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c index 42e5a766d33c7d1fea3ca9c05be73639c38c421e..4f25015aa5342a332c76ad7d931a507784eea764 100644 --- a/fs/devpts/inode.c +++ b/fs/devpts/inode.c @@ -621,8 +621,8 @@ void devpts_pty_kill(struct dentry *dentry) dentry->d_fsdata = NULL; drop_nlink(dentry->d_inode); - fsnotify_unlink(d_inode(dentry->d_parent), dentry); d_drop(dentry); + fsnotify_unlink(d_inode(dentry->d_parent), dentry); dput(dentry); /* d_alloc_name() in devpts_pty_new() */ } diff --git a/fs/direct-io.c b/fs/direct-io.c index c64d4eb38995a7639eec0756111071d8a861830f..9dafbb07dd6a6f387512eb78dcccdef4fa834789 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c @@ -426,6 +426,8 @@ static inline void dio_bio_submit(struct dio *dio, struct dio_submit *sdio) unsigned long flags; bio->bi_private = dio; + /* don't account direct I/O as memory stall */ + bio_clear_flag(bio, BIO_WORKINGSET); spin_lock_irqsave(&dio->bio_lock, flags); dio->refcount++; diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c index 002123efc6b052346ee8c7093789fbbb08929bd1..1e9d8999b9390a32c4dcbf217a5b1be93d183885 100644 --- a/fs/dlm/lock.c +++ b/fs/dlm/lock.c @@ -3975,6 +3975,14 @@ static int validate_message(struct dlm_lkb *lkb, struct dlm_message *ms) int from = ms->m_header.h_nodeid; int error = 0; + /* currently mixing of user/kernel locks are not supported */ + if (ms->m_flags & DLM_IFL_USER && ~lkb->lkb_flags & DLM_IFL_USER) { + log_error(lkb->lkb_resource->res_ls, + "got user dlm message for a kernel lock"); + error = -EINVAL; + goto out; + } + switch (ms->m_type) { case DLM_MSG_CONVERT: case DLM_MSG_UNLOCK: @@ -4003,6 +4011,7 @@ static int validate_message(struct dlm_lkb *lkb, struct dlm_message *ms) error = -EINVAL; } +out: if (error) log_error(lkb->lkb_resource->res_ls, "ignore invalid message %d from %d %x %x %x %d", diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c index 0c78fdfb1f6faaec81253a6535055cef23a0acc8..68b765369c928fcda16b0a92f61910a96d9b6dd1 100644 --- a/fs/dlm/lowcomms.c +++ b/fs/dlm/lowcomms.c @@ -471,8 +471,8 @@ int dlm_lowcomms_connect_node(int nodeid) static void lowcomms_error_report(struct sock *sk) { struct connection *con; - struct sockaddr_storage saddr; void (*orig_report)(struct sock *) = NULL; + struct inet_sock *inet; read_lock_bh(&sk->sk_callback_lock); con = sock2con(sk); @@ -480,34 +480,33 @@ static void lowcomms_error_report(struct sock *sk) goto out; orig_report = listen_sock.sk_error_report; - if (con->sock == NULL || - kernel_getpeername(con->sock, (struct sockaddr *)&saddr) < 0) { - printk_ratelimited(KERN_ERR "dlm: node %d: socket error " - "sending to node %d, port %d, " - "sk_err=%d/%d\n", dlm_our_nodeid(), - con->nodeid, dlm_config.ci_tcp_port, - sk->sk_err, sk->sk_err_soft); - } else if (saddr.ss_family == AF_INET) { - struct sockaddr_in *sin4 = (struct sockaddr_in *)&saddr; + inet = inet_sk(sk); + switch (sk->sk_family) { + case AF_INET: printk_ratelimited(KERN_ERR "dlm: node %d: socket error " - "sending to node %d at %pI4, port %d, " + "sending to node %d at %pI4, dport %d, " "sk_err=%d/%d\n", dlm_our_nodeid(), - con->nodeid, &sin4->sin_addr.s_addr, - dlm_config.ci_tcp_port, sk->sk_err, + con->nodeid, &inet->inet_daddr, + ntohs(inet->inet_dport), sk->sk_err, sk->sk_err_soft); - } else { - struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&saddr; - + break; +#if IS_ENABLED(CONFIG_IPV6) + case AF_INET6: printk_ratelimited(KERN_ERR "dlm: node %d: socket error " - "sending to node %d at %u.%u.%u.%u, " - "port %d, sk_err=%d/%d\n", dlm_our_nodeid(), - con->nodeid, sin6->sin6_addr.s6_addr32[0], - sin6->sin6_addr.s6_addr32[1], - sin6->sin6_addr.s6_addr32[2], - sin6->sin6_addr.s6_addr32[3], - dlm_config.ci_tcp_port, sk->sk_err, + "sending to node %d at %pI6c, " + "dport %d, sk_err=%d/%d\n", dlm_our_nodeid(), + con->nodeid, &sk->sk_v6_daddr, + ntohs(inet->inet_dport), sk->sk_err, sk->sk_err_soft); + break; +#endif + default: + printk_ratelimited(KERN_ERR "dlm: node %d: socket error " + "invalid socket family %d set, " + "sk_err=%d/%d\n", dlm_our_nodeid(), + sk->sk_family, sk->sk_err, sk->sk_err_soft); + goto out; } out: read_unlock_bh(&sk->sk_callback_lock); diff --git a/fs/eulerfs/dax.c b/fs/eulerfs/dax.c index 9ec8ad713fd9c2f474f935745eaa134612f4b3eb..131d08f1c6c18a91db97310e06685ca2e9b8b64b 100644 --- a/fs/eulerfs/dax.c +++ b/fs/eulerfs/dax.c @@ -1172,8 +1172,8 @@ static ssize_t do_mapping_read(struct address_space *mapping, goto out; } - copied += (nr - left); - offset += (nr - left); + copied += nr; + offset += nr; index += offset >> PAGE_SHIFT; offset &= ~PAGE_MASK; } while (copied < len); diff --git a/fs/eulerfs/dep.c b/fs/eulerfs/dep.c index ec014bbf3700223d2b8a809063a819e04ca1338e..a41471c5f2ec33132e888d45a54dca0afa2f57e4 100644 --- a/fs/eulerfs/dep.c +++ b/fs/eulerfs/dep.c @@ -718,7 +718,7 @@ int dep_init(struct super_block *sb) for_each_possible_cpu(cpu) init_llist_head(per_cpu_ptr(sbi->persistee_list, cpu)); - sbi->persisters = kmalloc(sizeof(struct task_struct *) * + sbi->persisters = kzalloc(sizeof(struct task_struct *) * persisters_per_socket * num_sockets, GFP_KERNEL); if (!sbi->persisters) { diff --git a/fs/exec.c b/fs/exec.c index 72f8763b3ce9cd154b274b6afdd9be624213cec5..4c2d18061633247d926e80b4176135c77145b23f 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -143,16 +143,6 @@ SYSCALL_DEFINE1(uselib, const char __user *, library) if (IS_ERR(file)) goto out; - /* - * may_open() has already checked for this, so it should be - * impossible to trip now. But we need to be extra cautious - * and check again at the very end too. - */ - error = -EACCES; - if (WARN_ON_ONCE(!S_ISREG(file_inode(file)->i_mode) || - path_noexec(&file->f_path))) - goto exit; - fsnotify_open(file); error = -ENOEXEC; @@ -171,7 +161,7 @@ SYSCALL_DEFINE1(uselib, const char __user *, library) break; } read_unlock(&binfmt_lock); -exit: + fput(file); out: return error; @@ -494,8 +484,14 @@ static int bprm_stack_limits(struct linux_binprm *bprm) * the stack. They aren't stored until much later when we can't * signal to the parent that the child has run out of stack space. * Instead, calculate it here so it's possible to fail gracefully. + * + * In the case of argc = 0, make sure there is space for adding a + * empty string (which will bump argc to 1), to ensure confused + * userspace programs don't start processing from argv[1], thinking + * argc can never be 0, to keep them from walking envp by accident. + * See do_execveat_common(). */ - ptr_size = (bprm->argc + bprm->envc) * sizeof(void *); + ptr_size = (max(bprm->argc, 1) + bprm->envc) * sizeof(void *); if (limit <= ptr_size) return -E2BIG; limit -= ptr_size; @@ -913,16 +909,6 @@ static struct file *do_open_execat(int fd, struct filename *name, int flags) if (IS_ERR(file)) goto out; - /* - * may_open() has already checked for this, so it should be - * impossible to trip now. But we need to be extra cautious - * and check again at the very end too. - */ - err = -EACCES; - if (WARN_ON_ONCE(!S_ISREG(file_inode(file)->i_mode) || - path_noexec(&file->f_path))) - goto exit; - err = deny_write_access(file); if (err) goto exit; @@ -1886,6 +1872,9 @@ static int do_execveat_common(int fd, struct filename *filename, } retval = count(argv, MAX_ARG_STRINGS); + if (retval == 0) + pr_warn_once("process '%s' launched '%s' with NULL argv: empty string added\n", + current->comm, bprm->filename); if (retval < 0) goto out_free; bprm->argc = retval; @@ -1912,6 +1901,19 @@ static int do_execveat_common(int fd, struct filename *filename, if (retval < 0) goto out_free; + /* + * When argv is empty, add an empty string ("") as argv[0] to + * ensure confused userspace programs that start processing + * from argv[1] won't end up walking envp. See also + * bprm_stack_limits(). + */ + if (bprm->argc == 0) { + retval = copy_string_kernel("", bprm); + if (retval < 0) + goto out_free; + bprm->argc = 1; + } + retval = bprm_execve(bprm, fd, filename, flags); out_free: free_bprm(bprm); @@ -1940,6 +1942,8 @@ int kernel_execve(const char *kernel_filename, } retval = count_strings_kernel(argv); + if (WARN_ON_ONCE(retval == 0)) + retval = -EINVAL; if (retval < 0) goto out_free; bprm->argc = retval; diff --git a/fs/exfat/file.c b/fs/exfat/file.c index a92478eabfa4e43f2198ccf9b62b8632b697c8dc..c819e8427ea577f1734c783986fbddc19445b721 100644 --- a/fs/exfat/file.c +++ b/fs/exfat/file.c @@ -109,8 +109,7 @@ int __exfat_truncate(struct inode *inode, loff_t new_size) exfat_set_volume_dirty(sb); num_clusters_new = EXFAT_B_TO_CLU_ROUND_UP(i_size_read(inode), sbi); - num_clusters_phys = - EXFAT_B_TO_CLU_ROUND_UP(EXFAT_I(inode)->i_size_ondisk, sbi); + num_clusters_phys = EXFAT_B_TO_CLU_ROUND_UP(ei->i_size_ondisk, sbi); exfat_chain_set(&clu, ei->start_clu, num_clusters_phys, ei->flags); @@ -227,12 +226,13 @@ void exfat_truncate(struct inode *inode, loff_t size) { struct super_block *sb = inode->i_sb; struct exfat_sb_info *sbi = EXFAT_SB(sb); + struct exfat_inode_info *ei = EXFAT_I(inode); unsigned int blocksize = i_blocksize(inode); loff_t aligned_size; int err; mutex_lock(&sbi->s_lock); - if (EXFAT_I(inode)->start_clu == 0) { + if (ei->start_clu == 0) { /* * Empty start_clu != ~0 (not allocated) */ @@ -250,8 +250,8 @@ void exfat_truncate(struct inode *inode, loff_t size) else mark_inode_dirty(inode); - inode->i_blocks = ((i_size_read(inode) + (sbi->cluster_size - 1)) & - ~(sbi->cluster_size - 1)) >> inode->i_blkbits; + inode->i_blocks = round_up(i_size_read(inode), sbi->cluster_size) >> + inode->i_blkbits; write_size: aligned_size = i_size_read(inode); if (aligned_size & (blocksize - 1)) { @@ -259,11 +259,11 @@ void exfat_truncate(struct inode *inode, loff_t size) aligned_size++; } - if (EXFAT_I(inode)->i_size_ondisk > i_size_read(inode)) - EXFAT_I(inode)->i_size_ondisk = aligned_size; + if (ei->i_size_ondisk > i_size_read(inode)) + ei->i_size_ondisk = aligned_size; - if (EXFAT_I(inode)->i_size_aligned > i_size_read(inode)) - EXFAT_I(inode)->i_size_aligned = aligned_size; + if (ei->i_size_aligned > i_size_read(inode)) + ei->i_size_aligned = aligned_size; mutex_unlock(&sbi->s_lock); } diff --git a/fs/exfat/inode.c b/fs/exfat/inode.c index 8b0288f70e93db1ba12c0493c52e661836435056..2a9f6a80584ee3d63d9325b3f540d5457fa9a217 100644 --- a/fs/exfat/inode.c +++ b/fs/exfat/inode.c @@ -114,10 +114,9 @@ static int exfat_map_cluster(struct inode *inode, unsigned int clu_offset, unsigned int local_clu_offset = clu_offset; unsigned int num_to_be_allocated = 0, num_clusters = 0; - if (EXFAT_I(inode)->i_size_ondisk > 0) + if (ei->i_size_ondisk > 0) num_clusters = - EXFAT_B_TO_CLU_ROUND_UP(EXFAT_I(inode)->i_size_ondisk, - sbi); + EXFAT_B_TO_CLU_ROUND_UP(ei->i_size_ondisk, sbi); if (clu_offset >= num_clusters) num_to_be_allocated = clu_offset - num_clusters + 1; @@ -415,10 +414,10 @@ static int exfat_write_end(struct file *file, struct address_space *mapping, err = generic_write_end(file, mapping, pos, len, copied, pagep, fsdata); - if (EXFAT_I(inode)->i_size_aligned < i_size_read(inode)) { + if (ei->i_size_aligned < i_size_read(inode)) { exfat_fs_error(inode->i_sb, "invalid size(size(%llu) > aligned(%llu)\n", - i_size_read(inode), EXFAT_I(inode)->i_size_aligned); + i_size_read(inode), ei->i_size_aligned); return -EIO; } @@ -601,8 +600,8 @@ static int exfat_fill_inode(struct inode *inode, struct exfat_dir_entry *info) exfat_save_attr(inode, info->attr); - inode->i_blocks = ((i_size_read(inode) + (sbi->cluster_size - 1)) & - ~((loff_t)sbi->cluster_size - 1)) >> inode->i_blkbits; + inode->i_blocks = round_up(i_size_read(inode), sbi->cluster_size) >> + inode->i_blkbits; inode->i_mtime = info->mtime; inode->i_ctime = info->mtime; ei->i_crtime = info->crtime; diff --git a/fs/exfat/namei.c b/fs/exfat/namei.c index 2932b23a3b6c36ebfb82e5a5e7621c07fbb2822d..935f6005090091342c109456c50c0e5a27b8bc5b 100644 --- a/fs/exfat/namei.c +++ b/fs/exfat/namei.c @@ -395,9 +395,9 @@ static int exfat_find_empty_entry(struct inode *inode, /* directory inode should be updated in here */ i_size_write(inode, size); - EXFAT_I(inode)->i_size_ondisk += sbi->cluster_size; - EXFAT_I(inode)->i_size_aligned += sbi->cluster_size; - EXFAT_I(inode)->flags = p_dir->flags; + ei->i_size_ondisk += sbi->cluster_size; + ei->i_size_aligned += sbi->cluster_size; + ei->flags = p_dir->flags; inode->i_blocks += 1 << sbi->sect_per_clus_bits; } diff --git a/fs/exfat/super.c b/fs/exfat/super.c index c6d8d2e534865236d16386575627e0a442a8db52..ba70ed1c980490cf593232722fe1e579459e0e7d 100644 --- a/fs/exfat/super.c +++ b/fs/exfat/super.c @@ -364,11 +364,11 @@ static int exfat_read_root(struct inode *inode) inode->i_op = &exfat_dir_inode_operations; inode->i_fop = &exfat_dir_operations; - inode->i_blocks = ((i_size_read(inode) + (sbi->cluster_size - 1)) - & ~(sbi->cluster_size - 1)) >> inode->i_blkbits; - EXFAT_I(inode)->i_pos = ((loff_t)sbi->root_dir << 32) | 0xffffffff; - EXFAT_I(inode)->i_size_aligned = i_size_read(inode); - EXFAT_I(inode)->i_size_ondisk = i_size_read(inode); + inode->i_blocks = round_up(i_size_read(inode), sbi->cluster_size) >> + inode->i_blkbits; + ei->i_pos = ((loff_t)sbi->root_dir << 32) | 0xffffffff; + ei->i_size_aligned = i_size_read(inode); + ei->i_size_ondisk = i_size_read(inode); exfat_save_attr(inode, ATTR_SUBDIR); inode->i_mtime = inode->i_atime = inode->i_ctime = ei->i_crtime = @@ -690,7 +690,7 @@ static int exfat_fill_super(struct super_block *sb, struct fs_context *fc) if (!sb->s_root) { exfat_err(sb, "failed to get the root dentry"); err = -ENOMEM; - goto put_inode; + goto free_table; } return 0; diff --git a/fs/ext2/super.c b/fs/ext2/super.c index 09f1fe67697278d4963ab036ea75fdc0dd234488..74e11028941328a2363e33a1d820a88c2e59f2b6 100644 --- a/fs/ext2/super.c +++ b/fs/ext2/super.c @@ -756,8 +756,12 @@ static loff_t ext2_max_size(int bits) res += 1LL << (bits-2); res += 1LL << (2*(bits-2)); res += 1LL << (3*(bits-2)); + /* Compute how many metadata blocks are needed */ + meta_blocks = 1; + meta_blocks += 1 + ppb; + meta_blocks += 1 + ppb + ppb * ppb; /* Does block tree limit file size? */ - if (res < upper_limit) + if (res + meta_blocks <= upper_limit) goto check_lfs; res = upper_limit; @@ -1399,7 +1403,6 @@ static int ext2_statfs (struct dentry * dentry, struct kstatfs * buf) struct super_block *sb = dentry->d_sb; struct ext2_sb_info *sbi = EXT2_SB(sb); struct ext2_super_block *es = sbi->s_es; - u64 fsid; spin_lock(&sbi->s_lock); @@ -1453,9 +1456,7 @@ static int ext2_statfs (struct dentry * dentry, struct kstatfs * buf) buf->f_ffree = ext2_count_free_inodes(sb); es->s_free_inodes_count = cpu_to_le32(buf->f_ffree); buf->f_namelen = EXT2_NAME_LEN; - fsid = le64_to_cpup((void *)es->s_uuid) ^ - le64_to_cpup((void *)es->s_uuid + sizeof(u64)); - buf->f_fsid = u64_to_fsid(fsid); + buf->f_fsid = uuid_to_fsid(es->s_uuid); spin_unlock(&sbi->s_lock); return 0; } diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 85dae812c93333b8da9b45167e2694eee46f8714..277f89d5de038ddb29d08167fa4159f86f714da1 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -17,6 +17,7 @@ #ifndef _EXT4_H #define _EXT4_H +#include #include #include #include @@ -235,7 +236,7 @@ typedef struct ext4_io_end { struct bio *bio; /* Linked list of completed * bios covering the extent */ unsigned int flag; /* unwritten or not */ - atomic_t count; /* reference counter */ + refcount_t count; /* reference counter */ struct list_head list_vec; /* list of ext4_io_end_vec */ } ext4_io_end_t; @@ -1164,6 +1165,9 @@ struct ext4_inode_info { __u32 i_csum_seed; kprojid_t i_projid; + + /* Protect concurrent add cluster delayed block and remove block */ + struct mutex i_clu_lock; }; /* @@ -2189,6 +2193,10 @@ static inline int ext4_forced_shutdown(struct ext4_sb_info *sbi) * Structure of a directory entry */ #define EXT4_NAME_LEN 255 +/* + * Base length of the ext4 directory entry excluding the name length + */ +#define EXT4_BASE_DIR_LEN (sizeof(struct ext4_dir_entry_2) - EXT4_NAME_LEN) struct ext4_dir_entry { __le32 inode; /* Inode number */ @@ -2808,6 +2816,10 @@ bool ext4_fc_replay_check_excluded(struct super_block *sb, ext4_fsblk_t block); void ext4_fc_replay_cleanup(struct super_block *sb); int ext4_fc_commit(journal_t *journal, tid_t commit_tid); int __init ext4_fc_init_dentry_cache(void); +void ext4_fc_destroy_dentry_cache(void); +int ext4_fc_record_regions(struct super_block *sb, int ino, + ext4_lblk_t lblk, ext4_fsblk_t pblk, + int len, int replay); /* mballoc.c */ extern const struct seq_operations ext4_mb_seq_groups_ops; diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index 398df993ccd88a63f18dbfd9073d18198c974c73..4323186bae78b3c96399a2b23fec0e20e451c963 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -371,7 +371,7 @@ static int ext4_valid_extent_entries(struct inode *inode, { unsigned short entries; ext4_lblk_t lblock = 0; - ext4_lblk_t prev = 0; + ext4_lblk_t cur = 0; if (eh->eh_entries == 0) return 1; @@ -395,11 +395,11 @@ static int ext4_valid_extent_entries(struct inode *inode, /* Check for overlapping extents */ lblock = le32_to_cpu(ext->ee_block); - if ((lblock <= prev) && prev) { + if (lblock < cur) { *pblk = ext4_ext_pblock(ext); return 0; } - prev = lblock + ext4_ext_get_actual_len(ext) - 1; + cur = lblock + ext4_ext_get_actual_len(ext); ext++; entries--; } @@ -419,13 +419,13 @@ static int ext4_valid_extent_entries(struct inode *inode, /* Check for overlapping index extents */ lblock = le32_to_cpu(ext_idx->ei_block); - if ((lblock <= prev) && prev) { + if (lblock < cur) { *pblk = ext4_idx_pblock(ext_idx); return 0; } ext_idx++; entries--; - prev = lblock; + cur = lblock + 1; } } return 1; @@ -4650,8 +4650,6 @@ static long ext4_zero_range(struct file *file, loff_t offset, ret = ext4_mark_inode_dirty(handle, inode); if (unlikely(ret)) goto out_handle; - ext4_fc_track_range(handle, inode, offset >> inode->i_sb->s_blocksize_bits, - (offset + len - 1) >> inode->i_sb->s_blocksize_bits); /* Zero out partial block at the edges of the range */ ret = ext4_zero_partial_blocks(handle, inode, offset, len); if (ret >= 0) @@ -4702,15 +4700,17 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len) ext4_fc_start_update(inode); + inode_lock(inode); + ret = ext4_convert_inline_data(inode); + inode_unlock(inode); + if (ret) + goto exit; + if (mode & FALLOC_FL_PUNCH_HOLE) { ret = ext4_punch_hole(inode, offset, len); goto exit; } - ret = ext4_convert_inline_data(inode); - if (ret) - goto exit; - if (mode & FALLOC_FL_COLLAPSE_RANGE) { ret = ext4_collapse_range(inode, offset, len); goto exit; @@ -6102,11 +6102,15 @@ int ext4_ext_clear_bb(struct inode *inode) ext4_mb_mark_bb(inode->i_sb, path[j].p_block, 1, 0); + ext4_fc_record_regions(inode->i_sb, inode->i_ino, + 0, path[j].p_block, 1, 1); } ext4_ext_drop_refs(path); kfree(path); } ext4_mb_mark_bb(inode->i_sb, map.m_pblk, map.m_len, 0); + ext4_fc_record_regions(inode->i_sb, inode->i_ino, + map.m_lblk, map.m_pblk, map.m_len, 1); } cur = cur + map.m_len; } diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c index 9a3a8996aacf7356131e3ea6a12e096f3a711695..dd679014db980a1643b43a08ac31642815d4240a 100644 --- a/fs/ext4/extents_status.c +++ b/fs/ext4/extents_status.c @@ -1433,6 +1433,7 @@ static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk, int ext4_es_remove_extent(struct inode *inode, ext4_lblk_t lblk, ext4_lblk_t len) { + struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); ext4_lblk_t end; int err = 0; int reserved = 0; @@ -1455,9 +1456,13 @@ int ext4_es_remove_extent(struct inode *inode, ext4_lblk_t lblk, * so that we are sure __es_shrink() is done with the inode before it * is reclaimed. */ + if (sbi->s_cluster_ratio != 1) + mutex_lock(&EXT4_I(inode)->i_clu_lock); write_lock(&EXT4_I(inode)->i_es_lock); err = __es_remove_extent(inode, lblk, end, &reserved); write_unlock(&EXT4_I(inode)->i_es_lock); + if (sbi->s_cluster_ratio != 1) + mutex_unlock(&EXT4_I(inode)->i_clu_lock); ext4_es_print_tree(inode); ext4_da_release_space(inode, reserved); return err; diff --git a/fs/ext4/fast_commit.c b/fs/ext4/fast_commit.c index 08ca690f928bd3857ed8fb7e617dbe014c6ea8ac..501e60713010e57a0f84ad3b4335b9a43d23d497 100644 --- a/fs/ext4/fast_commit.c +++ b/fs/ext4/fast_commit.c @@ -1388,14 +1388,15 @@ static int ext4_fc_record_modified_inode(struct super_block *sb, int ino) if (state->fc_modified_inodes[i] == ino) return 0; if (state->fc_modified_inodes_used == state->fc_modified_inodes_size) { - state->fc_modified_inodes_size += - EXT4_FC_REPLAY_REALLOC_INCREMENT; state->fc_modified_inodes = krealloc( - state->fc_modified_inodes, sizeof(int) * - state->fc_modified_inodes_size, - GFP_KERNEL); + state->fc_modified_inodes, + sizeof(int) * (state->fc_modified_inodes_size + + EXT4_FC_REPLAY_REALLOC_INCREMENT), + GFP_KERNEL); if (!state->fc_modified_inodes) return -ENOMEM; + state->fc_modified_inodes_size += + EXT4_FC_REPLAY_REALLOC_INCREMENT; } state->fc_modified_inodes[state->fc_modified_inodes_used++] = ino; return 0; @@ -1427,7 +1428,9 @@ static int ext4_fc_replay_inode(struct super_block *sb, struct ext4_fc_tl *tl, } inode = NULL; - ext4_fc_record_modified_inode(sb, ino); + ret = ext4_fc_record_modified_inode(sb, ino); + if (ret) + goto out; raw_fc_inode = (struct ext4_inode *) (val + offsetof(struct ext4_fc_inode, fc_raw_inode)); @@ -1558,16 +1561,23 @@ static int ext4_fc_replay_create(struct super_block *sb, struct ext4_fc_tl *tl, } /* - * Record physical disk regions which are in use as per fast commit area. Our - * simple replay phase allocator excludes these regions from allocation. + * Record physical disk regions which are in use as per fast commit area, + * and used by inodes during replay phase. Our simple replay phase + * allocator excludes these regions from allocation. */ -static int ext4_fc_record_regions(struct super_block *sb, int ino, - ext4_lblk_t lblk, ext4_fsblk_t pblk, int len) +int ext4_fc_record_regions(struct super_block *sb, int ino, + ext4_lblk_t lblk, ext4_fsblk_t pblk, int len, int replay) { struct ext4_fc_replay_state *state; struct ext4_fc_alloc_region *region; state = &EXT4_SB(sb)->s_fc_replay_state; + /* + * during replay phase, the fc_regions_valid may not same as + * fc_regions_used, update it when do new additions. + */ + if (replay && state->fc_regions_used != state->fc_regions_valid) + state->fc_regions_used = state->fc_regions_valid; if (state->fc_regions_used == state->fc_regions_size) { state->fc_regions_size += EXT4_FC_REPLAY_REALLOC_INCREMENT; @@ -1585,6 +1595,9 @@ static int ext4_fc_record_regions(struct super_block *sb, int ino, region->pblk = pblk; region->len = len; + if (replay) + state->fc_regions_valid++; + return 0; } @@ -1616,6 +1629,8 @@ static int ext4_fc_replay_add_range(struct super_block *sb, } ret = ext4_fc_record_modified_inode(sb, inode->i_ino); + if (ret) + goto out; start = le32_to_cpu(ex->ee_block); start_pblk = ext4_ext_pblock(ex); @@ -1633,18 +1648,14 @@ static int ext4_fc_replay_add_range(struct super_block *sb, map.m_pblk = 0; ret = ext4_map_blocks(NULL, inode, &map, 0); - if (ret < 0) { - iput(inode); - return 0; - } + if (ret < 0) + goto out; if (ret == 0) { /* Range is not mapped */ path = ext4_find_extent(inode, cur, NULL, 0); - if (IS_ERR(path)) { - iput(inode); - return 0; - } + if (IS_ERR(path)) + goto out; memset(&newex, 0, sizeof(newex)); newex.ee_block = cpu_to_le32(cur); ext4_ext_store_pblock( @@ -1658,10 +1669,8 @@ static int ext4_fc_replay_add_range(struct super_block *sb, up_write((&EXT4_I(inode)->i_data_sem)); ext4_ext_drop_refs(path); kfree(path); - if (ret) { - iput(inode); - return 0; - } + if (ret) + goto out; goto next; } @@ -1674,10 +1683,8 @@ static int ext4_fc_replay_add_range(struct super_block *sb, ret = ext4_ext_replay_update_ex(inode, cur, map.m_len, ext4_ext_is_unwritten(ex), start_pblk + cur - start); - if (ret) { - iput(inode); - return 0; - } + if (ret) + goto out; /* * Mark the old blocks as free since they aren't used * anymore. We maintain an array of all the modified @@ -1697,10 +1704,8 @@ static int ext4_fc_replay_add_range(struct super_block *sb, ext4_ext_is_unwritten(ex), map.m_pblk); ret = ext4_ext_replay_update_ex(inode, cur, map.m_len, ext4_ext_is_unwritten(ex), map.m_pblk); - if (ret) { - iput(inode); - return 0; - } + if (ret) + goto out; /* * We may have split the extent tree while toggling the state. * Try to shrink the extent tree now. @@ -1712,6 +1717,7 @@ static int ext4_fc_replay_add_range(struct super_block *sb, } ext4_ext_replay_shrink_inode(inode, i_size_read(inode) >> sb->s_blocksize_bits); +out: iput(inode); return 0; } @@ -1741,6 +1747,8 @@ ext4_fc_replay_del_range(struct super_block *sb, struct ext4_fc_tl *tl, } ret = ext4_fc_record_modified_inode(sb, inode->i_ino); + if (ret) + goto out; jbd_debug(1, "DEL_RANGE, inode %ld, lblk %d, len %d\n", inode->i_ino, le32_to_cpu(lrange.fc_lblk), @@ -1750,10 +1758,8 @@ ext4_fc_replay_del_range(struct super_block *sb, struct ext4_fc_tl *tl, map.m_len = remaining; ret = ext4_map_blocks(NULL, inode, &map, 0); - if (ret < 0) { - iput(inode); - return 0; - } + if (ret < 0) + goto out; if (ret > 0) { remaining -= ret; cur += ret; @@ -1764,16 +1770,18 @@ ext4_fc_replay_del_range(struct super_block *sb, struct ext4_fc_tl *tl, } } - ret = ext4_punch_hole(inode, - le32_to_cpu(lrange.fc_lblk) << sb->s_blocksize_bits, - le32_to_cpu(lrange.fc_len) << sb->s_blocksize_bits); + down_write(&EXT4_I(inode)->i_data_sem); + ret = ext4_ext_remove_space(inode, le32_to_cpu(lrange.fc_lblk), + le32_to_cpu(lrange.fc_lblk) + + le32_to_cpu(lrange.fc_len) - 1); + up_write(&EXT4_I(inode)->i_data_sem); if (ret) - jbd_debug(1, "ext4_punch_hole returned %d", ret); + goto out; ext4_ext_replay_shrink_inode(inode, i_size_read(inode) >> sb->s_blocksize_bits); ext4_mark_inode_dirty(NULL, inode); +out: iput(inode); - return 0; } @@ -1951,7 +1959,7 @@ static int ext4_fc_replay_scan(journal_t *journal, ret = ext4_fc_record_regions(sb, le32_to_cpu(ext.fc_ino), le32_to_cpu(ex->ee_block), ext4_ext_pblock(ex), - ext4_ext_get_actual_len(ex)); + ext4_ext_get_actual_len(ex), 0); if (ret < 0) break; ret = JBD2_FC_REPLAY_CONTINUE; @@ -2166,3 +2174,8 @@ int __init ext4_fc_init_dentry_cache(void) return 0; } + +void ext4_fc_destroy_dentry_cache(void) +{ + kmem_cache_destroy(ext4_fc_dentry_cachep); +} diff --git a/fs/ext4/file.c b/fs/ext4/file.c index 3b09ddbe897078100d4d8e043e45262702f8445b..6f78f7fbf419fb9958ee71b86a767cb447702d79 100644 --- a/fs/ext4/file.c +++ b/fs/ext4/file.c @@ -30,6 +30,7 @@ #include #include #include +#include #include "ext4.h" #include "ext4_jbd2.h" #include "xattr.h" @@ -128,6 +129,7 @@ static ssize_t ext4_file_read_iter(struct kiocb *iocb, struct iov_iter *to) if (iocb->ki_flags & IOCB_DIRECT) return ext4_dio_read_iter(iocb, to); + fs_file_read_do_trace(iocb); return generic_file_read_iter(iocb, to); } @@ -138,6 +140,8 @@ static ssize_t ext4_file_read_iter(struct kiocb *iocb, struct iov_iter *to) */ static int ext4_release_file(struct inode *inode, struct file *filp) { + trace_fs_file_release(inode, filp); + if (ext4_test_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE)) { ext4_alloc_da_blocks(inode); ext4_clear_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE); diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c index cb42b2245c21cd1736e9378fa184d5b025f31f51..c2c688cb45005616a9c6f852411f6bb2017c5f40 100644 --- a/fs/ext4/inline.c +++ b/fs/ext4/inline.c @@ -34,6 +34,9 @@ static int get_max_inline_xattr_value_size(struct inode *inode, struct ext4_inode *raw_inode; int free, min_offs; + if (!EXT4_INODE_HAS_XATTR_SPACE(inode)) + return 0; + min_offs = EXT4_SB(inode->i_sb)->s_inode_size - EXT4_GOOD_OLD_INODE_SIZE - EXT4_I(inode)->i_extra_isize - @@ -1126,7 +1129,15 @@ static void ext4_restore_inline_data(handle_t *handle, struct inode *inode, struct ext4_iloc *iloc, void *buf, int inline_size) { - ext4_create_inline_data(handle, inode, inline_size); + int ret; + + ret = ext4_create_inline_data(handle, inode, inline_size); + if (ret) { + ext4_msg(inode->i_sb, KERN_EMERG, + "error restoring inline_data for inode -- potential data loss! (inode %lu, error %d)", + inode->i_ino, ret); + return; + } ext4_write_inline_data(inode, iloc, buf, 0, inline_size); ext4_set_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA); } @@ -1766,19 +1777,20 @@ bool empty_inline_dir(struct inode *dir, int *has_inline_data) void *inline_pos; unsigned int offset; struct ext4_dir_entry_2 *de; - bool ret = true; + bool ret = false; err = ext4_get_inode_loc(dir, &iloc); if (err) { EXT4_ERROR_INODE_ERR(dir, -err, "error %d getting inode %lu block", err, dir->i_ino); - return true; + return false; } down_read(&EXT4_I(dir)->xattr_sem); if (!ext4_has_inline_data(dir)) { *has_inline_data = 0; + ret = true; goto out; } @@ -1787,7 +1799,6 @@ bool empty_inline_dir(struct inode *dir, int *has_inline_data) ext4_warning(dir->i_sb, "bad inline directory (dir #%lu) - no `..'", dir->i_ino); - ret = true; goto out; } @@ -1806,16 +1817,15 @@ bool empty_inline_dir(struct inode *dir, int *has_inline_data) dir->i_ino, le32_to_cpu(de->inode), le16_to_cpu(de->rec_len), de->name_len, inline_size); - ret = true; goto out; } if (le32_to_cpu(de->inode)) { - ret = false; goto out; } offset += ext4_rec_len_from_disk(de->rec_len, inline_size); } + ret = true; out: up_read(&EXT4_I(dir)->xattr_sem); brelse(iloc.bh); @@ -1973,6 +1983,18 @@ int ext4_convert_inline_data(struct inode *inode) if (!ext4_has_inline_data(inode)) { ext4_clear_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA); return 0; + } else if (!ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) { + /* + * Inode has inline data but EXT4_STATE_MAY_INLINE_DATA is + * cleared. This means we are in the middle of moving of + * inline data to delay allocated block. Just force writeout + * here to finish conversion. + */ + error = filemap_flush(inode->i_mapping); + if (error) + return error; + if (!ext4_has_inline_data(inode)) + return 0; } needed_blocks = ext4_writepage_trans_blocks(inode); diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 74f2504b939bab8d39daffc466e5c3b5c24a8e2b..e85c238edd8545fbac91d1c49d583bf84a75a240 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -744,10 +744,11 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode, if (ret) return ret; } - ext4_fc_track_range(handle, inode, map->m_lblk, - map->m_lblk + map->m_len - 1); } - + if (retval > 0 && (map->m_flags & EXT4_MAP_UNWRITTEN || + map->m_flags & EXT4_MAP_MAPPED)) + ext4_fc_track_range(handle, inode, map->m_lblk, + map->m_lblk + map->m_len - 1); if (retval < 0) ext_debug(inode, "failed with err %d\n", retval); return retval; @@ -1647,17 +1648,22 @@ static int ext4_insert_delayed_block(struct inode *inode, ext4_lblk_t lblk) goto errout; reserved = true; } else { /* bigalloc */ + mutex_lock(&EXT4_I(inode)->i_clu_lock); if (!ext4_es_scan_clu(inode, &ext4_es_is_delonly, lblk)) { if (!ext4_es_scan_clu(inode, &ext4_es_is_mapped, lblk)) { ret = ext4_clu_mapped(inode, EXT4_B2C(sbi, lblk)); - if (ret < 0) + if (ret < 0) { + mutex_unlock(&EXT4_I(inode)->i_clu_lock); goto errout; + } if (ret == 0) { ret = ext4_da_reserve_space(inode); - if (ret != 0) /* ENOSPC */ + if (ret != 0) { /* ENOSPC */ + mutex_unlock(&EXT4_I(inode)->i_clu_lock); goto errout; + } reserved = true; } else { allocated = true; @@ -1669,6 +1675,8 @@ static int ext4_insert_delayed_block(struct inode *inode, ext4_lblk_t lblk) } ret = ext4_es_insert_delayed_block(inode, lblk, allocated); + if (sbi->s_cluster_ratio != 1) + mutex_unlock(&EXT4_I(inode)->i_clu_lock); if (ret && reserved) ext4_da_release_space(inode, 1); @@ -2011,6 +2019,15 @@ static int ext4_writepage(struct page *page, else len = PAGE_SIZE; + /* Should never happen but for bugs in other kernel subsystems */ + if (!page_has_buffers(page)) { + ext4_warning_inode(inode, + "page %lu does not have buffers attached", page->index); + ClearPageDirty(page); + unlock_page(page); + return 0; + } + page_bufs = page_buffers(page); /* * We cannot do block allocation or other extent handling in this @@ -2620,6 +2637,22 @@ static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd) wait_on_page_writeback(page); BUG_ON(PageWriteback(page)); + /* + * Should never happen but for buggy code in + * other subsystems that call + * set_page_dirty() without properly warning + * the file system first. See [1] for more + * information. + * + * [1] https://lore.kernel.org/linux-mm/20180103100430.GE4911@quack2.suse.cz + */ + if (!page_has_buffers(page)) { + ext4_warning_inode(mpd->inode, "page %lu does not have buffers attached", page->index); + ClearPageDirty(page); + unlock_page(page); + continue; + } + if (mpd->map.m_len == 0) mpd->first_page = page->index; mpd->next_page = page->index + 1; @@ -3951,15 +3984,6 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length) trace_ext4_punch_hole(inode, offset, length, 0); - ext4_clear_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA); - if (ext4_has_inline_data(inode)) { - down_write(&EXT4_I(inode)->i_mmap_sem); - ret = ext4_convert_inline_data(inode); - up_write(&EXT4_I(inode)->i_mmap_sem); - if (ret) - return ret; - } - /* * Write out all dirty pages to avoid race conditions * Then release them. @@ -4532,7 +4556,7 @@ static int __ext4_get_inode_loc(struct super_block *sb, unsigned long ino, static int __ext4_get_inode_loc_noinmem(struct inode *inode, struct ext4_iloc *iloc) { - ext4_fsblk_t err_blk; + ext4_fsblk_t err_blk = 0; int ret; ret = __ext4_get_inode_loc(inode->i_sb, inode->i_ino, NULL, iloc, @@ -4547,7 +4571,7 @@ static int __ext4_get_inode_loc_noinmem(struct inode *inode, int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc) { - ext4_fsblk_t err_blk; + ext4_fsblk_t err_blk = 0; int ret; ret = __ext4_get_inode_loc(inode->i_sb, inode->i_ino, inode, iloc, @@ -4655,8 +4679,7 @@ static inline int ext4_iget_extra_inode(struct inode *inode, __le32 *magic = (void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize; - if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize + sizeof(__le32) <= - EXT4_INODE_SIZE(inode->i_sb) && + if (EXT4_INODE_HAS_XATTR_SPACE(inode) && *magic == cpu_to_le32(EXT4_XATTR_MAGIC)) { ext4_set_inode_state(inode, EXT4_STATE_XATTR); return ext4_find_inline_data_nolock(inode); @@ -5369,6 +5392,7 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr) if (attr->ia_valid & ATTR_SIZE) { handle_t *handle; loff_t oldsize = inode->i_size; + loff_t old_disksize; int shrink = (attr->ia_size < inode->i_size); if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) { @@ -5432,8 +5456,7 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr) ext4_fc_track_range(handle, inode, (attr->ia_size > 0 ? attr->ia_size - 1 : 0) >> inode->i_sb->s_blocksize_bits, - (oldsize > 0 ? oldsize - 1 : 0) >> - inode->i_sb->s_blocksize_bits); + EXT_MAX_BLOCKS - 1); else ext4_fc_track_range( handle, inode, @@ -5443,6 +5466,7 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr) inode->i_sb->s_blocksize_bits); down_write(&EXT4_I(inode)->i_data_sem); + old_disksize = EXT4_I(inode)->i_disksize; EXT4_I(inode)->i_disksize = attr->ia_size; rc = ext4_mark_inode_dirty(handle, inode); if (!error) @@ -5454,6 +5478,8 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr) */ if (!error) i_size_write(inode, attr->ia_size); + else + EXT4_I(inode)->i_disksize = old_disksize; up_write(&EXT4_I(inode)->i_data_sem); ext4_journal_stop(handle); if (error) diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c index cb54ea6461fd87a8950986924a296752b7cd01ee..413bf3d2f784437945dec5e36ad9649feda031da 100644 --- a/fs/ext4/ioctl.c +++ b/fs/ext4/ioctl.c @@ -1123,8 +1123,6 @@ static long __ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) sizeof(range))) return -EFAULT; - range.minlen = max((unsigned int)range.minlen, - q->limits.discard_granularity); ret = ext4_trim_fs(sb, &range); if (ret < 0) return ret; diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index d7cb7d719ee5845924146bd92c5837b4042de266..19108d6bb566c19d17b4977fe0f409b27dec44bb 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -3320,69 +3320,95 @@ void ext4_mb_mark_bb(struct super_block *sb, ext4_fsblk_t block, struct ext4_sb_info *sbi = EXT4_SB(sb); ext4_group_t group; ext4_grpblk_t blkoff; - int i, clen, err; + int i, err; int already; + unsigned int clen, clen_changed, thisgrp_len; - clen = EXT4_B2C(sbi, len); + while (len > 0) { + ext4_get_group_no_and_offset(sb, block, &group, &blkoff); - ext4_get_group_no_and_offset(sb, block, &group, &blkoff); - bitmap_bh = ext4_read_block_bitmap(sb, group); - if (IS_ERR(bitmap_bh)) { - err = PTR_ERR(bitmap_bh); - bitmap_bh = NULL; - goto out_err; - } + /* + * Check to see if we are freeing blocks across a group + * boundary. + * In case of flex_bg, this can happen that (block, len) may + * span across more than one group. In that case we need to + * get the corresponding group metadata to work with. + * For this we have goto again loop. + */ + thisgrp_len = min_t(unsigned int, (unsigned int)len, + EXT4_BLOCKS_PER_GROUP(sb) - EXT4_C2B(sbi, blkoff)); + clen = EXT4_NUM_B2C(sbi, thisgrp_len); - err = -EIO; - gdp = ext4_get_group_desc(sb, group, &gdp_bh); - if (!gdp) - goto out_err; + bitmap_bh = ext4_read_block_bitmap(sb, group); + if (IS_ERR(bitmap_bh)) { + err = PTR_ERR(bitmap_bh); + bitmap_bh = NULL; + break; + } - ext4_lock_group(sb, group); - already = 0; - for (i = 0; i < clen; i++) - if (!mb_test_bit(blkoff + i, bitmap_bh->b_data) == !state) - already++; + err = -EIO; + gdp = ext4_get_group_desc(sb, group, &gdp_bh); + if (!gdp) + break; - if (state) - ext4_set_bits(bitmap_bh->b_data, blkoff, clen); - else - mb_test_and_clear_bits(bitmap_bh->b_data, blkoff, clen); - if (ext4_has_group_desc_csum(sb) && - (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) { - gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT); - ext4_free_group_clusters_set(sb, gdp, - ext4_free_clusters_after_init(sb, - group, gdp)); - } - if (state) - clen = ext4_free_group_clusters(sb, gdp) - clen + already; - else - clen = ext4_free_group_clusters(sb, gdp) + clen - already; + ext4_lock_group(sb, group); + already = 0; + for (i = 0; i < clen; i++) + if (!mb_test_bit(blkoff + i, bitmap_bh->b_data) == + !state) + already++; + + clen_changed = clen - already; + if (state) + ext4_set_bits(bitmap_bh->b_data, blkoff, clen); + else + mb_test_and_clear_bits(bitmap_bh->b_data, blkoff, clen); + if (ext4_has_group_desc_csum(sb) && + (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) { + gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT); + ext4_free_group_clusters_set(sb, gdp, + ext4_free_clusters_after_init(sb, group, gdp)); + } + if (state) + clen = ext4_free_group_clusters(sb, gdp) - clen_changed; + else + clen = ext4_free_group_clusters(sb, gdp) + clen_changed; - ext4_free_group_clusters_set(sb, gdp, clen); - ext4_block_bitmap_csum_set(sb, group, gdp, bitmap_bh); - ext4_group_desc_csum_set(sb, group, gdp); + ext4_free_group_clusters_set(sb, gdp, clen); + ext4_block_bitmap_csum_set(sb, group, gdp, bitmap_bh); + ext4_group_desc_csum_set(sb, group, gdp); - ext4_unlock_group(sb, group); + ext4_unlock_group(sb, group); - if (sbi->s_log_groups_per_flex) { - ext4_group_t flex_group = ext4_flex_group(sbi, group); + if (sbi->s_log_groups_per_flex) { + ext4_group_t flex_group = ext4_flex_group(sbi, group); + struct flex_groups *fg = sbi_array_rcu_deref(sbi, + s_flex_groups, flex_group); - atomic64_sub(len, - &sbi_array_rcu_deref(sbi, s_flex_groups, - flex_group)->free_clusters); + if (state) + atomic64_sub(clen_changed, &fg->free_clusters); + else + atomic64_add(clen_changed, &fg->free_clusters); + + } + + err = ext4_handle_dirty_metadata(NULL, NULL, bitmap_bh); + if (err) + break; + sync_dirty_buffer(bitmap_bh); + err = ext4_handle_dirty_metadata(NULL, NULL, gdp_bh); + sync_dirty_buffer(gdp_bh); + if (err) + break; + + block += thisgrp_len; + len -= thisgrp_len; + brelse(bitmap_bh); + BUG_ON(len < 0); } - err = ext4_handle_dirty_metadata(NULL, NULL, bitmap_bh); if (err) - goto out_err; - sync_dirty_buffer(bitmap_bh); - err = ext4_handle_dirty_metadata(NULL, NULL, gdp_bh); - sync_dirty_buffer(gdp_bh); - -out_err: - brelse(bitmap_bh); + brelse(bitmap_bh); } /* @@ -3494,6 +3520,15 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac, size = size >> bsbits; start = start_off >> bsbits; + /* + * For tiny groups (smaller than 8MB) the chosen allocation + * alignment may be larger than group size. Make sure the + * alignment does not move allocation to a different group which + * makes mballoc fail assertions later. + */ + start = max(start, rounddown(ac->ac_o_ex.fe_logical, + (ext4_lblk_t)EXT4_BLOCKS_PER_GROUP(ac->ac_sb))); + /* don't cover already allocated blocks in selected range */ if (ar->pleft && start <= ar->lleft) { size -= ar->lleft + 1 - start; @@ -3566,7 +3601,22 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac, } rcu_read_unlock(); - if (start + size <= ac->ac_o_ex.fe_logical && + /* + * In this function "start" and "size" are normalized for better + * alignment and length such that we could preallocate more blocks. + * This normalization is done such that original request of + * ac->ac_o_ex.fe_logical & fe_len should always lie within "start" and + * "size" boundaries. + * (Note fe_len can be relaxed since FS block allocation API does not + * provide gurantee on number of contiguous blocks allocation since that + * depends upon free space left, etc). + * In case of inode pa, later we use the allocated blocks + * [pa_start + fe_logical - pa_lstart, fe_len/size] from the preallocated + * range of goal/best blocks [start, size] to put it at the + * ac_o_ex.fe_logical extent of this inode. + * (See ext4_mb_use_inode_pa() for more details) + */ + if (start + size <= ac->ac_o_ex.fe_logical || start > ac->ac_o_ex.fe_logical) { ext4_msg(ac->ac_sb, KERN_ERR, "start %lu, size %lu, fe_logical %lu", @@ -4234,7 +4284,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b, */ static noinline_for_stack int ext4_mb_discard_group_preallocations(struct super_block *sb, - ext4_group_t group, int needed) + ext4_group_t group, int *busy) { struct ext4_group_info *grp = ext4_get_group_info(sb, group); struct buffer_head *bitmap_bh = NULL; @@ -4242,8 +4292,7 @@ ext4_mb_discard_group_preallocations(struct super_block *sb, struct list_head list; struct ext4_buddy e4b; int err; - int busy = 0; - int free, free_total = 0; + int free = 0; mb_debug(sb, "discard preallocation for group %u\n", group); if (list_empty(&grp->bb_prealloc_list)) @@ -4266,19 +4315,14 @@ ext4_mb_discard_group_preallocations(struct super_block *sb, goto out_dbg; } - if (needed == 0) - needed = EXT4_CLUSTERS_PER_GROUP(sb) + 1; - INIT_LIST_HEAD(&list); -repeat: - free = 0; ext4_lock_group(sb, group); list_for_each_entry_safe(pa, tmp, &grp->bb_prealloc_list, pa_group_list) { spin_lock(&pa->pa_lock); if (atomic_read(&pa->pa_count)) { spin_unlock(&pa->pa_lock); - busy = 1; + *busy = 1; continue; } if (pa->pa_deleted) { @@ -4318,22 +4362,13 @@ ext4_mb_discard_group_preallocations(struct super_block *sb, call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); } - free_total += free; - - /* if we still need more blocks and some PAs were used, try again */ - if (free_total < needed && busy) { - ext4_unlock_group(sb, group); - cond_resched(); - busy = 0; - goto repeat; - } ext4_unlock_group(sb, group); ext4_mb_unload_buddy(&e4b); put_bh(bitmap_bh); out_dbg: mb_debug(sb, "discarded (%d) blocks preallocated for group %u bb_free (%d)\n", - free_total, group, grp->bb_free); - return free_total; + free, group, grp->bb_free); + return free; } /* @@ -4875,13 +4910,24 @@ static int ext4_mb_discard_preallocations(struct super_block *sb, int needed) { ext4_group_t i, ngroups = ext4_get_groups_count(sb); int ret; - int freed = 0; + int freed = 0, busy = 0; + int retry = 0; trace_ext4_mb_discard_preallocations(sb, needed); + + if (needed == 0) + needed = EXT4_CLUSTERS_PER_GROUP(sb) + 1; + repeat: for (i = 0; i < ngroups && needed > 0; i++) { - ret = ext4_mb_discard_group_preallocations(sb, i, needed); + ret = ext4_mb_discard_group_preallocations(sb, i, &busy); freed += ret; needed -= ret; + cond_resched(); + } + + if (needed > 0 && busy && ++retry < 3) { + busy = 0; + goto repeat; } return freed; @@ -5177,7 +5223,8 @@ static ext4_fsblk_t ext4_mb_new_blocks_simple(handle_t *handle, struct super_block *sb = ar->inode->i_sb; ext4_group_t group; ext4_grpblk_t blkoff; - int i = sb->s_blocksize; + ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb); + ext4_grpblk_t i = 0; ext4_fsblk_t goal, block; struct ext4_super_block *es = EXT4_SB(sb)->s_es; @@ -5199,19 +5246,26 @@ static ext4_fsblk_t ext4_mb_new_blocks_simple(handle_t *handle, ext4_get_group_no_and_offset(sb, max(ext4_group_first_block_no(sb, group), goal), NULL, &blkoff); - i = mb_find_next_zero_bit(bitmap_bh->b_data, sb->s_blocksize, + while (1) { + i = mb_find_next_zero_bit(bitmap_bh->b_data, max, blkoff); + if (i >= max) + break; + if (ext4_fc_replay_check_excluded(sb, + ext4_group_first_block_no(sb, group) + i)) { + blkoff = i + 1; + } else + break; + } brelse(bitmap_bh); - if (i >= sb->s_blocksize) - continue; - if (ext4_fc_replay_check_excluded(sb, - ext4_group_first_block_no(sb, group) + i)) - continue; - break; + if (i < max) + break; } - if (group >= ext4_get_groups_count(sb) && i >= sb->s_blocksize) + if (group >= ext4_get_groups_count(sb) || i >= max) { + *errp = -ENOSPC; return 0; + } block = ext4_group_first_block_no(sb, group) + i; ext4_mb_mark_bb(sb, block, 1, 1); @@ -5815,6 +5869,7 @@ ext4_trim_all_free(struct super_block *sb, ext4_group_t group, */ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range) { + struct request_queue *q = bdev_get_queue(sb->s_bdev); struct ext4_group_info *grp; ext4_group_t group, first_group, last_group; ext4_grpblk_t cnt = 0, first_cluster, last_cluster; @@ -5833,6 +5888,13 @@ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range) start >= max_blks || range->len < sb->s_blocksize) return -EINVAL; + /* No point to try to trim less than discard granularity */ + if (range->minlen < q->limits.discard_granularity) { + minlen = EXT4_NUM_B2C(EXT4_SB(sb), + q->limits.discard_granularity >> sb->s_blocksize_bits); + if (minlen > EXT4_CLUSTERS_PER_GROUP(sb)) + goto out; + } if (end >= max_blks) end = max_blks - 1; if (end <= first_data_blk) diff --git a/fs/ext4/migrate.c b/fs/ext4/migrate.c index c5e3fc998211acfe9449e8be42ef2c6cd96542d7..04320715d61f1bbfad7f41c0fc5161f0cb975a86 100644 --- a/fs/ext4/migrate.c +++ b/fs/ext4/migrate.c @@ -417,7 +417,7 @@ int ext4_ext_migrate(struct inode *inode) struct inode *tmp_inode = NULL; struct migrate_struct lb; unsigned long max_entries; - __u32 goal; + __u32 goal, tmp_csum_seed; uid_t owner[2]; /* @@ -437,12 +437,12 @@ int ext4_ext_migrate(struct inode *inode) percpu_down_write(&sbi->s_writepages_rwsem); /* - * Worst case we can touch the allocation bitmaps, a bgd - * block, and a block to link in the orphan list. We do need - * need to worry about credits for modifying the quota inode. + * Worst case we can touch the allocation bitmaps and a block + * group descriptor block. We do need need to worry about + * credits for modifying the quota inode. */ handle = ext4_journal_start(inode, EXT4_HT_MIGRATE, - 4 + EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb)); + 3 + EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb)); if (IS_ERR(handle)) { retval = PTR_ERR(handle); @@ -459,6 +459,14 @@ int ext4_ext_migrate(struct inode *inode) ext4_journal_stop(handle); goto out_unlock; } + /* + * Use the correct seed for checksum (i.e. the seed from 'inode'). This + * is so that the metadata blocks will have the correct checksum after + * the migration. + */ + ei = EXT4_I(inode); + tmp_csum_seed = EXT4_I(tmp_inode)->i_csum_seed; + EXT4_I(tmp_inode)->i_csum_seed = ei->i_csum_seed; i_size_write(tmp_inode, i_size_read(inode)); /* * Set the i_nlink to zero so it will be deleted later @@ -467,7 +475,6 @@ int ext4_ext_migrate(struct inode *inode) clear_nlink(tmp_inode); ext4_ext_tree_init(handle, tmp_inode); - ext4_orphan_add(handle, tmp_inode); ext4_journal_stop(handle); /* @@ -492,17 +499,10 @@ int ext4_ext_migrate(struct inode *inode) handle = ext4_journal_start(inode, EXT4_HT_MIGRATE, 1); if (IS_ERR(handle)) { - /* - * It is impossible to update on-disk structures without - * a handle, so just rollback in-core changes and live other - * work to orphan_list_cleanup() - */ - ext4_orphan_del(NULL, tmp_inode); retval = PTR_ERR(handle); goto out_tmp_inode; } - ei = EXT4_I(inode); i_data = ei->i_data; memset(&lb, 0, sizeof(lb)); @@ -576,6 +576,7 @@ int ext4_ext_migrate(struct inode *inode) * the inode is not visible to user space. */ tmp_inode->i_blocks = 0; + EXT4_I(tmp_inode)->i_csum_seed = tmp_csum_seed; /* Reset the extent details */ ext4_ext_tree_init(handle, tmp_inode); diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c index 526960e343862a40125d2fedde5e2c0cc6da7dab..0076f0ba3d3cedc3278b4662313636c82f6c7915 100644 --- a/fs/ext4/namei.c +++ b/fs/ext4/namei.c @@ -1388,10 +1388,10 @@ int ext4_search_dir(struct buffer_head *bh, char *search_buf, int buf_size, de = (struct ext4_dir_entry_2 *)search_buf; dlimit = search_buf + buf_size; - while ((char *) de < dlimit) { + while ((char *) de < dlimit - EXT4_BASE_DIR_LEN) { /* this code is executed quadratically often */ /* do minimal checking `by hand' */ - if ((char *) de + de->name_len <= dlimit && + if (de->name + de->name_len <= dlimit && ext4_match(dir, fname, de)) { /* found a match - just to be sure, do * a full check */ @@ -2888,14 +2888,14 @@ bool ext4_empty_dir(struct inode *inode) sb = inode->i_sb; if (inode->i_size < EXT4_DIR_REC_LEN(1) + EXT4_DIR_REC_LEN(2)) { EXT4_ERROR_INODE(inode, "invalid size"); - return true; + return false; } /* The first directory block must not be a hole, * so treat it as DIRENT_HTREE */ bh = ext4_read_dirblock(inode, 0, DIRENT_HTREE); if (IS_ERR(bh)) - return true; + return false; de = (struct ext4_dir_entry_2 *) bh->b_data; if (ext4_check_dir_entry(inode, NULL, de, bh, bh->b_data, bh->b_size, @@ -2903,7 +2903,7 @@ bool ext4_empty_dir(struct inode *inode) le32_to_cpu(de->inode) != inode->i_ino || strcmp(".", de->name)) { ext4_warning_inode(inode, "directory missing '.'"); brelse(bh); - return true; + return false; } offset = ext4_rec_len_from_disk(de->rec_len, sb->s_blocksize); de = ext4_next_entry(de, sb->s_blocksize); @@ -2912,7 +2912,7 @@ bool ext4_empty_dir(struct inode *inode) le32_to_cpu(de->inode) == 0 || strcmp("..", de->name)) { ext4_warning_inode(inode, "directory missing '..'"); brelse(bh); - return true; + return false; } offset += ext4_rec_len_from_disk(de->rec_len, sb->s_blocksize); while (offset < inode->i_size) { @@ -2926,7 +2926,7 @@ bool ext4_empty_dir(struct inode *inode) continue; } if (IS_ERR(bh)) - return true; + return false; } de = (struct ext4_dir_entry_2 *) (bh->b_data + (offset & (sb->s_blocksize - 1))); @@ -3526,6 +3526,9 @@ static struct buffer_head *ext4_get_first_dir_block(handle_t *handle, struct buffer_head *bh; if (!ext4_has_inline_data(inode)) { + struct ext4_dir_entry_2 *de; + unsigned int offset; + /* The first directory block must not be a hole, so * treat it as DIRENT_HTREE */ @@ -3534,9 +3537,30 @@ static struct buffer_head *ext4_get_first_dir_block(handle_t *handle, *retval = PTR_ERR(bh); return NULL; } - *parent_de = ext4_next_entry( - (struct ext4_dir_entry_2 *)bh->b_data, - inode->i_sb->s_blocksize); + + de = (struct ext4_dir_entry_2 *) bh->b_data; + if (ext4_check_dir_entry(inode, NULL, de, bh, bh->b_data, + bh->b_size, 0) || + le32_to_cpu(de->inode) != inode->i_ino || + strcmp(".", de->name)) { + EXT4_ERROR_INODE(inode, "directory missing '.'"); + brelse(bh); + *retval = -EFSCORRUPTED; + return NULL; + } + offset = ext4_rec_len_from_disk(de->rec_len, + inode->i_sb->s_blocksize); + de = ext4_next_entry(de, inode->i_sb->s_blocksize); + if (ext4_check_dir_entry(inode, NULL, de, bh, bh->b_data, + bh->b_size, offset) || + le32_to_cpu(de->inode) == 0 || strcmp("..", de->name)) { + EXT4_ERROR_INODE(inode, "directory missing '..'"); + brelse(bh); + *retval = -EFSCORRUPTED; + return NULL; + } + *parent_de = de; + return bh; } diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c index defd2e10dfd10bf9e71e09650de369dbaaf15920..b076fabb72e29be8591c1f540e5a443207e13ed8 100644 --- a/fs/ext4/page-io.c +++ b/fs/ext4/page-io.c @@ -137,8 +137,10 @@ static void ext4_finish_bio(struct bio *bio) continue; } clear_buffer_async_write(bh); - if (bio->bi_status) + if (bio->bi_status) { + set_buffer_write_io_error(bh); buffer_io_error(bh); + } } while ((bh = bh->b_this_page) != head); spin_unlock_irqrestore(&head->b_uptodate_lock, flags); if (!under_io) { @@ -282,14 +284,14 @@ ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags) io_end->inode = inode; INIT_LIST_HEAD(&io_end->list); INIT_LIST_HEAD(&io_end->list_vec); - atomic_set(&io_end->count, 1); + refcount_set(&io_end->count, 1); } return io_end; } void ext4_put_io_end_defer(ext4_io_end_t *io_end) { - if (atomic_dec_and_test(&io_end->count)) { + if (refcount_dec_and_test(&io_end->count)) { if (!(io_end->flag & EXT4_IO_END_UNWRITTEN) || list_empty(&io_end->list_vec)) { ext4_release_io_end(io_end); @@ -303,7 +305,7 @@ int ext4_put_io_end(ext4_io_end_t *io_end) { int err = 0; - if (atomic_dec_and_test(&io_end->count)) { + if (refcount_dec_and_test(&io_end->count)) { if (io_end->flag & EXT4_IO_END_UNWRITTEN) { err = ext4_convert_unwritten_io_end_vec(io_end->handle, io_end); @@ -317,7 +319,7 @@ int ext4_put_io_end(ext4_io_end_t *io_end) ext4_io_end_t *ext4_get_io_end(ext4_io_end_t *io_end) { - atomic_inc(&io_end->count); + refcount_inc(&io_end->count); return io_end; } diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c index bd0d185654f3357cdc7a5826a6d3afa87c052dff..ebb6affb26c2033be54a4460a6844579323c9d8f 100644 --- a/fs/ext4/resize.c +++ b/fs/ext4/resize.c @@ -74,6 +74,11 @@ int ext4_resize_begin(struct super_block *sb) return -EPERM; } + if (ext4_has_feature_sparse_super2(sb)) { + ext4_msg(sb, KERN_ERR, "Online resizing not supported with sparse_super2"); + return -EOPNOTSUPP; + } + if (test_and_set_bit_lock(EXT4_FLAGS_RESIZING, &EXT4_SB(sb)->s_ext4_flags)) ret = -EBUSY; @@ -2006,6 +2011,9 @@ int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count) ext4_warning(sb, "Error opening resize inode"); return PTR_ERR(resize_inode); } + } else if (es->s_reserved_gdt_blocks) { + ext4_error(sb, "resize_inode disabled but reserved GDT blocks non-zero"); + return -EFSCORRUPTED; } if ((!resize_inode && !meta_bg) || n_blocks_count == o_blocks_count) { diff --git a/fs/ext4/super.c b/fs/ext4/super.c index f1a089ebe848a75e6565e58e15fcc59fe2517060..26c9383608950d5d19c1e8193f0d3af7a27eab3f 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -1216,19 +1216,24 @@ static void ext4_put_super(struct super_block *sb) int aborted = 0; int i, err; - ext4_unregister_li_request(sb); - ext4_quota_off_umount(sb); - - flush_work(&sbi->s_error_work); - destroy_workqueue(sbi->rsv_conversion_wq); - /* * Unregister sysfs before destroying jbd2 journal. * Since we could still access attr_journal_task attribute via sysfs * path which could have sbi->s_journal->j_task as NULL + * Unregister sysfs before flush sbi->s_error_work. + * Since user may read /proc/fs/ext4/xx/mb_groups during umount, If + * read metadata verify failed then will queue error work. + * flush_stashed_error_work will call start_this_handle may trigger + * BUG_ON. */ ext4_unregister_sysfs(sb); + ext4_unregister_li_request(sb); + ext4_quota_off_umount(sb); + + flush_work(&sbi->s_error_work); + destroy_workqueue(sbi->rsv_conversion_wq); + if (sbi->s_journal) { aborted = is_journal_aborted(sbi->s_journal); err = jbd2_journal_destroy(sbi->s_journal); @@ -1362,6 +1367,7 @@ static struct inode *ext4_alloc_inode(struct super_block *sb) INIT_WORK(&ei->i_rsv_conversion_work, ext4_end_io_rsv_work); ext4_fc_init_inode(&ei->vfs_inode); mutex_init(&ei->i_fc_lock); + mutex_init(&ei->i_clu_lock); return &ei->vfs_inode; } @@ -5633,7 +5639,6 @@ static void ext4_update_super(struct super_block *sb) static int ext4_commit_super(struct super_block *sb) { struct buffer_head *sbh = EXT4_SB(sb)->s_sbh; - int error = 0; if (!sbh) return -EINVAL; @@ -5642,6 +5647,13 @@ static int ext4_commit_super(struct super_block *sb) ext4_update_super(sb); + lock_buffer(sbh); + /* Buffer got discarded which means block device got invalidated */ + if (!buffer_mapped(sbh)) { + unlock_buffer(sbh); + return -EIO; + } + if (buffer_write_io_error(sbh) || !buffer_uptodate(sbh)) { /* * Oh, dear. A previous attempt to write the @@ -5656,17 +5668,20 @@ static int ext4_commit_super(struct super_block *sb) clear_buffer_write_io_error(sbh); set_buffer_uptodate(sbh); } - BUFFER_TRACE(sbh, "marking dirty"); - mark_buffer_dirty(sbh); - error = __sync_dirty_buffer(sbh, - REQ_SYNC | (test_opt(sb, BARRIER) ? REQ_FUA : 0)); + get_bh(sbh); + sbh->b_end_io = end_buffer_write_sync; + clear_buffer_dirty(sbh); + submit_bh(REQ_OP_WRITE, + REQ_SYNC | (test_opt(sb, BARRIER) ? REQ_FUA : 0), sbh); + wait_on_buffer(sbh); if (buffer_write_io_error(sbh)) { ext4_msg(sb, KERN_ERR, "I/O error while writing " "superblock"); clear_buffer_write_io_error(sbh); set_buffer_uptodate(sbh); + return -EIO; } - return error; + return 0; } /* @@ -6231,7 +6246,6 @@ static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf) struct ext4_sb_info *sbi = EXT4_SB(sb); struct ext4_super_block *es = sbi->s_es; ext4_fsblk_t overhead = 0, resv_blocks; - u64 fsid; s64 bfree; resv_blocks = EXT4_C2B(sbi, atomic64_read(&sbi->s_resv_clusters)); @@ -6252,9 +6266,7 @@ static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf) buf->f_files = le32_to_cpu(es->s_inodes_count); buf->f_ffree = percpu_counter_sum_positive(&sbi->s_freeinodes_counter); buf->f_namelen = EXT4_NAME_LEN; - fsid = le64_to_cpup((void *)es->s_uuid) ^ - le64_to_cpup((void *)es->s_uuid + sizeof(u64)); - buf->f_fsid = u64_to_fsid(fsid); + buf->f_fsid = uuid_to_fsid(es->s_uuid); #ifdef CONFIG_QUOTA if (ext4_test_inode_flag(dentry->d_inode, EXT4_INODE_PROJINHERIT) && @@ -6438,10 +6450,7 @@ static int ext4_quota_on(struct super_block *sb, int type, int format_id, lockdep_set_quota_inode(path->dentry->d_inode, I_DATA_SEM_QUOTA); err = dquot_quota_on(sb, type, format_id, path); - if (err) { - lockdep_set_quota_inode(path->dentry->d_inode, - I_DATA_SEM_NORMAL); - } else { + if (!err) { struct inode *inode = d_inode(path->dentry); handle_t *handle; @@ -6461,7 +6470,12 @@ static int ext4_quota_on(struct super_block *sb, int type, int format_id, ext4_journal_stop(handle); unlock_inode: inode_unlock(inode); + if (err) + dquot_quota_off(sb, type); } + if (err) + lockdep_set_quota_inode(path->dentry->d_inode, + I_DATA_SEM_NORMAL); return err; } @@ -6524,8 +6538,19 @@ static int ext4_enable_quotas(struct super_block *sb) "Failed to enable quota tracking " "(type=%d, err=%d). Please run " "e2fsck to fix.", type, err); - for (type--; type >= 0; type--) + for (type--; type >= 0; type--) { + struct inode *inode; + + inode = sb_dqopt(sb)->files[type]; + if (inode) + inode = igrab(inode); dquot_quota_off(sb, type); + if (inode) { + lockdep_set_quota_inode(inode, + I_DATA_SEM_NORMAL); + iput(inode); + } + } return err; } @@ -6817,6 +6842,7 @@ static int __init ext4_init_fs(void) out: unregister_as_ext2(); unregister_as_ext3(); + ext4_fc_destroy_dentry_cache(); out05: destroy_inodecache(); out1: @@ -6843,6 +6869,7 @@ static void __exit ext4_exit_fs(void) unregister_as_ext2(); unregister_as_ext3(); unregister_filesystem(&ext4_fs_type); + ext4_fc_destroy_dentry_cache(); destroy_inodecache(); ext4_exit_mballoc(); ext4_exit_sysfs(); diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c index 2f93e8b90492e5ebbb884cf485afdf278058764c..b5016eb7b37323e1999c3e72765f257f018dc5f8 100644 --- a/fs/ext4/xattr.c +++ b/fs/ext4/xattr.c @@ -2170,8 +2170,9 @@ int ext4_xattr_ibody_find(struct inode *inode, struct ext4_xattr_info *i, struct ext4_inode *raw_inode; int error; - if (EXT4_I(inode)->i_extra_isize == 0) + if (!EXT4_INODE_HAS_XATTR_SPACE(inode)) return 0; + raw_inode = ext4_raw_inode(&is->iloc); header = IHDR(inode, raw_inode); is->s.base = is->s.first = IFIRST(header); @@ -2199,8 +2200,9 @@ int ext4_xattr_ibody_inline_set(handle_t *handle, struct inode *inode, struct ext4_xattr_search *s = &is->s; int error; - if (EXT4_I(inode)->i_extra_isize == 0) + if (!EXT4_INODE_HAS_XATTR_SPACE(inode)) return -ENOSPC; + error = ext4_xattr_set_entry(i, s, handle, inode, false /* is_block */); if (error) return error; @@ -2223,8 +2225,9 @@ static int ext4_xattr_ibody_set(handle_t *handle, struct inode *inode, struct ext4_xattr_search *s = &is->s; int error; - if (EXT4_I(inode)->i_extra_isize == 0) + if (!EXT4_INODE_HAS_XATTR_SPACE(inode)) return -ENOSPC; + error = ext4_xattr_set_entry(i, s, handle, inode, false /* is_block */); if (error) return error; diff --git a/fs/ext4/xattr.h b/fs/ext4/xattr.h index 730b91fa0dd70231c1c3300650146c2e5ea8902b..87e5863bb4931be9b633e95ba043edc8f048020f 100644 --- a/fs/ext4/xattr.h +++ b/fs/ext4/xattr.h @@ -95,6 +95,19 @@ struct ext4_xattr_entry { #define EXT4_ZERO_XATTR_VALUE ((void *)-1) +/* + * If we want to add an xattr to the inode, we should make sure that + * i_extra_isize is not 0 and that the inode size is not less than + * EXT4_GOOD_OLD_INODE_SIZE + extra_isize + pad. + * EXT4_GOOD_OLD_INODE_SIZE extra_isize header entry pad data + * |--------------------------|------------|------|---------|---|-------| + */ +#define EXT4_INODE_HAS_XATTR_SPACE(inode) \ + ((EXT4_I(inode)->i_extra_isize != 0) && \ + (EXT4_GOOD_OLD_INODE_SIZE + EXT4_I(inode)->i_extra_isize + \ + sizeof(struct ext4_xattr_ibody_header) + EXT4_XATTR_PAD <= \ + EXT4_INODE_SIZE((inode)->i_sb))) + struct ext4_xattr_info { const char *name; const void *value; diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c index 9bcd77db980dfe2d22499a829d774c36da4f4fd4..77f30320f8628cec3620da426f492f670871be9f 100644 --- a/fs/f2fs/checkpoint.c +++ b/fs/f2fs/checkpoint.c @@ -851,6 +851,7 @@ static struct page *validate_checkpoint(struct f2fs_sb_info *sbi, struct page *cp_page_1 = NULL, *cp_page_2 = NULL; struct f2fs_checkpoint *cp_block = NULL; unsigned long long cur_version = 0, pre_version = 0; + unsigned int cp_blocks; int err; err = get_checkpoint_version(sbi, cp_addr, &cp_block, @@ -858,15 +859,16 @@ static struct page *validate_checkpoint(struct f2fs_sb_info *sbi, if (err) return NULL; - if (le32_to_cpu(cp_block->cp_pack_total_block_count) > - sbi->blocks_per_seg) { + cp_blocks = le32_to_cpu(cp_block->cp_pack_total_block_count); + + if (cp_blocks > sbi->blocks_per_seg || cp_blocks <= F2FS_CP_PACKS) { f2fs_warn(sbi, "invalid cp_pack_total_block_count:%u", le32_to_cpu(cp_block->cp_pack_total_block_count)); goto invalid_cp; } pre_version = *version; - cp_addr += le32_to_cpu(cp_block->cp_pack_total_block_count) - 1; + cp_addr += cp_blocks - 1; err = get_checkpoint_version(sbi, cp_addr, &cp_block, &cp_page_2, version); if (err) diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c index 30987ea011f1a57c4c1c1dabd7fad7b4d95587c0..1541da5ace85ef47e420850708847a18fc960f71 100644 --- a/fs/f2fs/compress.c +++ b/fs/f2fs/compress.c @@ -286,10 +286,9 @@ static int lz4_decompress_pages(struct decompress_io_ctx *dic) } if (ret != PAGE_SIZE << dic->log_cluster_size) { - printk_ratelimited("%sF2FS-fs (%s): lz4 invalid rlen:%zu, " + printk_ratelimited("%sF2FS-fs (%s): lz4 invalid ret:%d, " "expected:%lu\n", KERN_ERR, - F2FS_I_SB(dic->inode)->sb->s_id, - dic->rlen, + F2FS_I_SB(dic->inode)->sb->s_id, ret, PAGE_SIZE << dic->log_cluster_size); return -EIO; } @@ -1362,25 +1361,38 @@ static int f2fs_write_raw_pages(struct compress_ctx *cc, enum iostat_type io_type) { struct address_space *mapping = cc->inode->i_mapping; - int _submitted, compr_blocks, ret; - int i = -1, err = 0; + int _submitted, compr_blocks, ret, i; compr_blocks = f2fs_compressed_blocks(cc); - if (compr_blocks < 0) { - err = compr_blocks; - goto out_err; + + for (i = 0; i < cc->cluster_size; i++) { + if (!cc->rpages[i]) + continue; + + redirty_page_for_writepage(wbc, cc->rpages[i]); + unlock_page(cc->rpages[i]); } + if (compr_blocks < 0) + return compr_blocks; + for (i = 0; i < cc->cluster_size; i++) { if (!cc->rpages[i]) continue; retry_write: + lock_page(cc->rpages[i]); + if (cc->rpages[i]->mapping != mapping) { +continue_unlock: unlock_page(cc->rpages[i]); continue; } - BUG_ON(!PageLocked(cc->rpages[i])); + if (!PageDirty(cc->rpages[i])) + goto continue_unlock; + + if (!clear_page_dirty_for_io(cc->rpages[i])) + goto continue_unlock; ret = f2fs_write_single_data_page(cc->rpages[i], &_submitted, NULL, NULL, wbc, io_type, @@ -1395,26 +1407,15 @@ static int f2fs_write_raw_pages(struct compress_ctx *cc, * avoid deadlock caused by cluster update race * from foreground operation. */ - if (IS_NOQUOTA(cc->inode)) { - err = 0; - goto out_err; - } + if (IS_NOQUOTA(cc->inode)) + return 0; ret = 0; cond_resched(); congestion_wait(BLK_RW_ASYNC, DEFAULT_IO_TIMEOUT); - lock_page(cc->rpages[i]); - - if (!PageDirty(cc->rpages[i])) { - unlock_page(cc->rpages[i]); - continue; - } - - clear_page_dirty_for_io(cc->rpages[i]); goto retry_write; } - err = ret; - goto out_err; + return ret; } *submitted += _submitted; @@ -1423,14 +1424,6 @@ static int f2fs_write_raw_pages(struct compress_ctx *cc, f2fs_balance_fs(F2FS_M_SB(mapping), true); return 0; -out_err: - for (++i; i < cc->cluster_size; i++) { - if (!cc->rpages[i]) - continue; - redirty_page_for_writepage(wbc, cc->rpages[i]); - unlock_page(cc->rpages[i]); - } - return err; } int f2fs_write_multi_pages(struct compress_ctx *cc, diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index 1b11a42847c48e92b8a3af471c5793c724ffcc66..b2016fd3a7ca301f714f7792b6307d64b520b350 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -3264,8 +3264,12 @@ static int __f2fs_write_data_pages(struct address_space *mapping, /* to avoid spliting IOs due to mixed WB_SYNC_ALL and WB_SYNC_NONE */ if (wbc->sync_mode == WB_SYNC_ALL) atomic_inc(&sbi->wb_sync_req[DATA]); - else if (atomic_read(&sbi->wb_sync_req[DATA])) + else if (atomic_read(&sbi->wb_sync_req[DATA])) { + /* to avoid potential deadlock */ + if (current->plug) + blk_finish_plug(current->plug); goto skip_write; + } if (__should_serialize_io(inode, wbc)) { mutex_lock(&sbi->writepages); @@ -3457,6 +3461,9 @@ static int f2fs_write_begin(struct file *file, struct address_space *mapping, *fsdata = NULL; + if (len == PAGE_SIZE && !(f2fs_is_atomic_file(inode))) + goto repeat; + ret = f2fs_prepare_compress_overwrite(inode, pagep, index, fsdata); if (ret < 0) { diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index bc488a7d01903180a723f4a7f5fa8abf532fe004..6c4bf22a3e83e6c1e7e9763144370ac6be0b04a1 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -955,6 +955,7 @@ struct f2fs_sm_info { unsigned int segment_count; /* total # of segments */ unsigned int main_segments; /* # of segments in main area */ unsigned int reserved_segments; /* # of reserved segments */ + unsigned int additional_reserved_segments;/* reserved segs for IO align feature */ unsigned int ovp_segments; /* # of overprovision segments */ /* a threshold to reclaim prefree segments */ @@ -1984,6 +1985,11 @@ static inline int inc_valid_block_count(struct f2fs_sb_info *sbi, if (!__allow_reserved_blocks(sbi, inode, true)) avail_user_block_count -= F2FS_OPTION(sbi).root_reserved_blocks; + + if (F2FS_IO_ALIGNED(sbi)) + avail_user_block_count -= sbi->blocks_per_seg * + SM_I(sbi)->additional_reserved_segments; + if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) { if (avail_user_block_count > sbi->unusable_block_count) avail_user_block_count -= sbi->unusable_block_count; @@ -2229,6 +2235,11 @@ static inline int inc_valid_node_count(struct f2fs_sb_info *sbi, if (!__allow_reserved_blocks(sbi, inode, false)) valid_block_count += F2FS_OPTION(sbi).root_reserved_blocks; + + if (F2FS_IO_ALIGNED(sbi)) + valid_block_count += sbi->blocks_per_seg * + SM_I(sbi)->additional_reserved_segments; + user_block_count = sbi->user_block_count; if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) user_block_count -= sbi->unusable_block_count; diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c index 1fbaab1f7aba8666da7f07193ffc4c179b55da57..792f9059d897c3b21cb387c7a229216196cd9fae 100644 --- a/fs/f2fs/file.c +++ b/fs/f2fs/file.c @@ -2035,7 +2035,10 @@ static int f2fs_ioc_start_atomic_write(struct file *filp) inode_lock(inode); - f2fs_disable_compressed_file(inode); + if (!f2fs_disable_compressed_file(inode)) { + ret = -EINVAL; + goto out; + } if (f2fs_is_atomic_file(inode)) { if (is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST)) diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c index 6153b29de331bf9b861e868fea8a236f6f002259..827b5a6175ecf19487895c4b29a2a053e2303589 100644 --- a/fs/f2fs/gc.c +++ b/fs/f2fs/gc.c @@ -998,8 +998,10 @@ static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, set_sbi_flag(sbi, SBI_NEED_FSCK); } - if (f2fs_check_nid_range(sbi, dni->ino)) + if (f2fs_check_nid_range(sbi, dni->ino)) { + f2fs_put_page(node_page, 1); return false; + } *nofs = ofs_of_node(node_page); source_blkaddr = data_blkaddr(NULL, node_page, ofs_in_node); diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c index a35fcf43ad5a3346399b05ed624f98c12e6f017d..98483f50e5e9218dd9f35a660cd2fed3f236d406 100644 --- a/fs/f2fs/inode.c +++ b/fs/f2fs/inode.c @@ -848,6 +848,7 @@ void f2fs_handle_failed_inode(struct inode *inode) err = f2fs_get_node_info(sbi, inode->i_ino, &ni); if (err) { set_sbi_flag(sbi, SBI_NEED_FSCK); + set_inode_flag(inode, FI_FREE_NID); f2fs_warn(sbi, "May loss orphan inode, run fsck to fix."); goto out; } diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c index 7e625806bd4a2f33b383579487fa37f7a7bfef7f..5fa10d0b006837e397c4f60dc8ef5f3540c6b6ec 100644 --- a/fs/f2fs/node.c +++ b/fs/f2fs/node.c @@ -2055,8 +2055,12 @@ static int f2fs_write_node_pages(struct address_space *mapping, if (wbc->sync_mode == WB_SYNC_ALL) atomic_inc(&sbi->wb_sync_req[NODE]); - else if (atomic_read(&sbi->wb_sync_req[NODE])) + else if (atomic_read(&sbi->wb_sync_req[NODE])) { + /* to avoid potential deadlock */ + if (current->plug) + blk_finish_plug(current->plug); goto skip_write; + } trace_f2fs_writepages(mapping->host, wbc, NODE); diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index d04b449978aa8e4c8146527508bb2d6fcd60096b..49f5cb532738d99c5a1207d4e790442e8a479283 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c @@ -4650,6 +4650,13 @@ static int sanity_check_curseg(struct f2fs_sb_info *sbi) sanity_check_seg_type(sbi, curseg->seg_type); + if (curseg->alloc_type != LFS && curseg->alloc_type != SSR) { + f2fs_err(sbi, + "Current segment has invalid alloc_type:%d", + curseg->alloc_type); + return -EFSCORRUPTED; + } + if (f2fs_test_bit(blkofs, se->cur_valid_map)) goto out; diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h index 1bf33fc27b8f83b69630c1266c67e36a45318627..beef833a6960489fe5e4be6f166cd6df65ac31c4 100644 --- a/fs/f2fs/segment.h +++ b/fs/f2fs/segment.h @@ -539,7 +539,8 @@ static inline unsigned int free_segments(struct f2fs_sb_info *sbi) static inline unsigned int reserved_segments(struct f2fs_sb_info *sbi) { - return SM_I(sbi)->reserved_segments; + return SM_I(sbi)->reserved_segments + + SM_I(sbi)->additional_reserved_segments; } static inline unsigned int free_sections(struct f2fs_sb_info *sbi) diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index b7287b722e9e113c27c4b7797ec522dc6782a61b..78ee14f6e939eb620f00e18249cd90ed43829be6 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c @@ -289,6 +289,46 @@ static inline void limit_reserve_root(struct f2fs_sb_info *sbi) F2FS_OPTION(sbi).s_resgid)); } +static inline int adjust_reserved_segment(struct f2fs_sb_info *sbi) +{ + unsigned int sec_blks = sbi->blocks_per_seg * sbi->segs_per_sec; + unsigned int avg_vblocks; + unsigned int wanted_reserved_segments; + block_t avail_user_block_count; + + if (!F2FS_IO_ALIGNED(sbi)) + return 0; + + /* average valid block count in section in worst case */ + avg_vblocks = sec_blks / F2FS_IO_SIZE(sbi); + + /* + * we need enough free space when migrating one section in worst case + */ + wanted_reserved_segments = (F2FS_IO_SIZE(sbi) / avg_vblocks) * + reserved_segments(sbi); + wanted_reserved_segments -= reserved_segments(sbi); + + avail_user_block_count = sbi->user_block_count - + sbi->current_reserved_blocks - + F2FS_OPTION(sbi).root_reserved_blocks; + + if (wanted_reserved_segments * sbi->blocks_per_seg > + avail_user_block_count) { + f2fs_err(sbi, "IO align feature can't grab additional reserved segment: %u, available segments: %u", + wanted_reserved_segments, + avail_user_block_count >> sbi->log_blocks_per_seg); + return -ENOSPC; + } + + SM_I(sbi)->additional_reserved_segments = wanted_reserved_segments; + + f2fs_info(sbi, "IO align feature needs additional reserved segment: %u", + wanted_reserved_segments); + + return 0; +} + static inline void adjust_unusable_cap_perc(struct f2fs_sb_info *sbi) { if (!F2FS_OPTION(sbi).unusable_cap_perc) @@ -2238,7 +2278,7 @@ int f2fs_quota_sync(struct super_block *sb, int type) struct f2fs_sb_info *sbi = F2FS_SB(sb); struct quota_info *dqopt = sb_dqopt(sb); int cnt; - int ret; + int ret = 0; /* * Now when everything is written we can discard the pagecache so @@ -2249,8 +2289,8 @@ int f2fs_quota_sync(struct super_block *sb, int type) if (type != -1 && cnt != type) continue; - if (!sb_has_quota_active(sb, type)) - return 0; + if (!sb_has_quota_active(sb, cnt)) + continue; inode_lock(dqopt->files[cnt]); @@ -3736,6 +3776,10 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent) goto free_nm; } + err = adjust_reserved_segment(sbi); + if (err) + goto free_nm; + /* For write statistics */ if (sb->s_bdev->bd_part) sbi->sectors_written_start = diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c index b8850c81068a0e861413041c0c78482f35dcd839..a7e7d68256e004f2212de2deb9cb984c8a153559 100644 --- a/fs/f2fs/sysfs.c +++ b/fs/f2fs/sysfs.c @@ -330,7 +330,9 @@ static ssize_t __sbi_store(struct f2fs_attr *a, if (a->struct_type == RESERVED_BLOCKS) { spin_lock(&sbi->stat_lock); if (t > (unsigned long)(sbi->user_block_count - - F2FS_OPTION(sbi).root_reserved_blocks)) { + F2FS_OPTION(sbi).root_reserved_blocks - + sbi->blocks_per_seg * + SM_I(sbi)->additional_reserved_segments)) { spin_unlock(&sbi->stat_lock); return -EINVAL; } @@ -384,7 +386,7 @@ static ssize_t __sbi_store(struct f2fs_attr *a, } else if (t == GC_IDLE_AT) { if (!sbi->am.atgc_enabled) return -EINVAL; - sbi->gc_mode = GC_AT; + sbi->gc_mode = GC_IDLE_AT; } else { sbi->gc_mode = GC_NORMAL; } diff --git a/fs/file.c b/fs/file.c index 0aa251ca02a6e4d1cae56dd1be1acb8af3b2c9d8..cf236025e7984ee53175cea67e041353b8c7018d 100644 --- a/fs/file.c +++ b/fs/file.c @@ -86,6 +86,21 @@ static void copy_fdtable(struct fdtable *nfdt, struct fdtable *ofdt) copy_fd_bitmaps(nfdt, ofdt, ofdt->max_fds); } +/* + * Note how the fdtable bitmap allocations very much have to be a multiple of + * BITS_PER_LONG. This is not only because we walk those things in chunks of + * 'unsigned long' in some places, but simply because that is how the Linux + * kernel bitmaps are defined to work: they are not "bits in an array of bytes", + * they are very much "bits in an array of unsigned long". + * + * The ALIGN(nr, BITS_PER_LONG) here is for clarity: since we just multiplied + * by that "1024/sizeof(ptr)" before, we already know there are sufficient + * clear low bits. Clang seems to realize that, gcc ends up being confused. + * + * On a 128-bit machine, the ALIGN() would actually matter. In the meantime, + * let's consider it documentation (and maybe a test-case for gcc to improve + * its code generation ;) + */ static struct fdtable * alloc_fdtable(unsigned int nr) { struct fdtable *fdt; @@ -101,6 +116,7 @@ static struct fdtable * alloc_fdtable(unsigned int nr) nr /= (1024 / sizeof(struct file *)); nr = roundup_pow_of_two(nr + 1); nr *= (1024 / sizeof(struct file *)); + nr = ALIGN(nr, BITS_PER_LONG); /* * Note that this can drive nr *below* what we had passed if sysctl_nr_open * had been set lower between the check in expand_files() and here. Deal @@ -268,6 +284,19 @@ static unsigned int count_open_files(struct fdtable *fdt) return i; } +/* + * Note that a sane fdtable size always has to be a multiple of + * BITS_PER_LONG, since we have bitmaps that are sized by this. + * + * 'max_fds' will normally already be properly aligned, but it + * turns out that in the close_range() -> __close_range() -> + * unshare_fd() -> dup_fd() -> sane_fdtable_size() we can end + * up having a 'max_fds' value that isn't already aligned. + * + * Rather than make close_range() have to worry about this, + * just make that BITS_PER_LONG alignment be part of a sane + * fdtable size. Becuase that's really what it is. + */ static unsigned int sane_fdtable_size(struct fdtable *fdt, unsigned int max_fds) { unsigned int count; @@ -275,7 +304,7 @@ static unsigned int sane_fdtable_size(struct fdtable *fdt, unsigned int max_fds) count = count_open_files(fdt); if (max_fds < NR_OPEN_DEFAULT) max_fds = NR_OPEN_DEFAULT; - return min(count, max_fds); + return ALIGN(min(count, max_fds), BITS_PER_LONG); } /* diff --git a/fs/file_table.c b/fs/file_table.c index 709ada3151da5303c8c2fa46d0c0d7a7b7274cff..7a3b4a7f68086d2f8ace0ac9fe25ddb534967c31 100644 --- a/fs/file_table.c +++ b/fs/file_table.c @@ -376,6 +376,7 @@ void __fput_sync(struct file *file) } EXPORT_SYMBOL(fput); +EXPORT_SYMBOL(__fput_sync); void __init files_init(void) { diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 050d40c465bcb3cf5d504b279379e7dcdab66e39..2011199476ea6b5a9c23309aad61f644e0e163d3 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -1650,11 +1650,12 @@ static long writeback_sb_inodes(struct super_block *sb, }; unsigned long start_time = jiffies; long write_chunk; - long wrote = 0; /* count both pages and inodes */ + long total_wrote = 0; /* count both pages and inodes */ while (!list_empty(&wb->b_io)) { struct inode *inode = wb_inode(wb->b_io.prev); struct bdi_writeback *tmp_wb; + long wrote; if (inode->i_sb != sb) { if (work->sb) { @@ -1730,7 +1731,9 @@ static long writeback_sb_inodes(struct super_block *sb, wbc_detach_inode(&wbc); work->nr_pages -= write_chunk - wbc.nr_to_write; - wrote += write_chunk - wbc.nr_to_write; + wrote = write_chunk - wbc.nr_to_write - wbc.pages_skipped; + wrote = wrote < 0 ? 0 : wrote; + total_wrote += wrote; if (need_resched()) { /* @@ -1752,7 +1755,7 @@ static long writeback_sb_inodes(struct super_block *sb, tmp_wb = inode_to_wb_and_lock_list(inode); spin_lock(&inode->i_lock); if (!(inode->i_state & I_DIRTY_ALL)) - wrote++; + total_wrote++; requeue_inode(inode, tmp_wb, &wbc); inode_sync_complete(inode); spin_unlock(&inode->i_lock); @@ -1766,14 +1769,14 @@ static long writeback_sb_inodes(struct super_block *sb, * bail out to wb_writeback() often enough to check * background threshold and other termination conditions. */ - if (wrote) { + if (total_wrote) { if (time_is_before_jiffies(start_time + HZ / 10UL)) break; if (work->nr_pages <= 0) break; } } - return wrote; + return total_wrote; } static long __writeback_inodes_wb(struct bdi_writeback *wb, diff --git a/fs/fs_context.c b/fs/fs_context.c index b7e43a780a625bca1b0faeba53e2702463ad0496..24ce12f0db32e5779d389ebcc8c49e9b35ecb9e0 100644 --- a/fs/fs_context.c +++ b/fs/fs_context.c @@ -548,7 +548,7 @@ static int legacy_parse_param(struct fs_context *fc, struct fs_parameter *param) param->key); } - if (len > PAGE_SIZE - 2 - size) + if (size + len + 2 > PAGE_SIZE) return invalf(fc, "VFS: Legacy: Cumulative options too large"); if (strchr(param->key, ',') || (param->type == fs_value_is_string && diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index d100b5dfedbd2f1990b5aa7736f8124df87646ed..8ac91ba05d6de8d67ba715db4230aa60bd646da2 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c @@ -945,7 +945,17 @@ static int fuse_copy_page(struct fuse_copy_state *cs, struct page **pagep, while (count) { if (cs->write && cs->pipebufs && page) { - return fuse_ref_page(cs, page, offset, count); + /* + * Can't control lifetime of pipe buffers, so always + * copy user pages. + */ + if (cs->req->args->user_pages) { + err = fuse_copy_fill(cs); + if (err) + return err; + } else { + return fuse_ref_page(cs, page, offset, count); + } } else if (!cs->len) { if (cs->move_pages && page && offset == 0 && count == PAGE_SIZE) { diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index 8e95a75a4559c6435a52a9e352854ec05c639a24..6ab3798da0796320c8fb29a2ae8012f51723e0be 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c @@ -560,6 +560,12 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry, inarg.flags = flags; inarg.mode = mode; inarg.umask = current_umask(); + + if (fm->fc->handle_killpriv_v2 && (flags & O_TRUNC) && + !(flags & O_EXCL) && !capable(CAP_FSETID)) { + inarg.open_flags |= FUSE_OPEN_KILL_SUIDGID; + } + args.opcode = FUSE_CREATE; args.nodeid = get_node_id(dir); args.in_numargs = 2; @@ -1693,10 +1699,20 @@ int fuse_do_setattr(struct dentry *dentry, struct iattr *attr, inarg.valid |= FATTR_FH; inarg.fh = ff->fh; } + + /* Kill suid/sgid for non-directory chown unconditionally */ + if (fc->handle_killpriv_v2 && !S_ISDIR(inode->i_mode) && + attr->ia_valid & (ATTR_UID | ATTR_GID)) + inarg.valid |= FATTR_KILL_SUIDGID; + if (attr->ia_valid & ATTR_SIZE) { /* For mandatory locking in truncate */ inarg.valid |= FATTR_LOCKOWNER; inarg.lock_owner = fuse_lock_owner_id(fc, current->files); + + /* Kill suid/sgid for truncate only if no CAP_FSETID */ + if (fc->handle_killpriv_v2 && !capable(CAP_FSETID)) + inarg.valid |= FATTR_KILL_SUIDGID; } fuse_setattr_fill(fc, &args, inode, &inarg, &outarg); err = fuse_simple_request(fm, &args); @@ -1787,7 +1803,7 @@ static int fuse_setattr(struct dentry *entry, struct iattr *attr) * * This should be done on write(), truncate() and chown(). */ - if (!fc->handle_killpriv) { + if (!fc->handle_killpriv && !fc->handle_killpriv_v2) { /* * ia_mode calculation may have used stale i_mode. * Refresh and recalculate. diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 4dd70b53df81a922c536fd926328f952a0ba8dff..86f2e1c2ad0fd7fa256e18ef5cce2dd148634805 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -42,6 +42,12 @@ static int fuse_send_open(struct fuse_mount *fm, u64 nodeid, struct file *file, inarg.flags = file->f_flags & ~(O_CREAT | O_EXCL | O_NOCTTY); if (!fm->fc->atomic_o_trunc) inarg.flags &= ~O_TRUNC; + + if (fm->fc->handle_killpriv_v2 && + (inarg.flags & O_TRUNC) && !capable(CAP_FSETID)) { + inarg.open_flags |= FUSE_OPEN_KILL_SUIDGID; + } + args.opcode = opcode; args.nodeid = nodeid; args.in_numargs = 1; @@ -1104,6 +1110,8 @@ static ssize_t fuse_send_write_pages(struct fuse_io_args *ia, fuse_write_args_fill(ia, ff, pos, count); ia->write.in.flags = fuse_write_flags(iocb); + if (fm->fc->handle_killpriv_v2 && !capable(CAP_FSETID)) + ia->write.in.write_flags |= FUSE_WRITE_KILL_SUIDGID; err = fuse_simple_request(fm, &ap->args); if (!err && ia->write.out.size > count) @@ -1283,17 +1291,24 @@ static ssize_t fuse_cache_write_iter(struct kiocb *iocb, struct iov_iter *from) ssize_t written_buffered = 0; struct inode *inode = mapping->host; ssize_t err; + struct fuse_conn *fc = get_fuse_conn(inode); loff_t endbyte = 0; - if (get_fuse_conn(inode)->writeback_cache) { + if (fc->writeback_cache) { /* Update size (EOF optimization) and mode (SUID clearing) */ err = fuse_update_attributes(mapping->host, file); if (err) return err; + if (fc->handle_killpriv_v2 && + should_remove_suid(file_dentry(file))) { + goto writethrough; + } + return generic_file_write_iter(iocb, from); } +writethrough: inode_lock(inode); /* We can write back this queue in page reclaim */ @@ -1418,6 +1433,7 @@ static int fuse_get_user_pages(struct fuse_args_pages *ap, struct iov_iter *ii, (PAGE_SIZE - ret) & (PAGE_SIZE - 1); } + ap->args.user_pages = true; if (write) ap->args.in_pages = true; else @@ -1474,7 +1490,7 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter, if (write) { if (!capable(CAP_FSETID)) - ia->write.in.write_flags |= FUSE_WRITE_KILL_PRIV; + ia->write.in.write_flags |= FUSE_WRITE_KILL_SUIDGID; nres = fuse_send_write(ia, pos, nbytes, owner); } else { @@ -3251,7 +3267,7 @@ fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter) static int fuse_writeback_range(struct inode *inode, loff_t start, loff_t end) { - int err = filemap_write_and_wait_range(inode->i_mapping, start, -1); + int err = filemap_write_and_wait_range(inode->i_mapping, start, LLONG_MAX); if (!err) fuse_sync_writes(inode); diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index ed71ef6fbc9da40925d8a86651b11bb5e4f3eeba..029f8e382c97722121c7686a3c9fb0de43909420 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h @@ -270,6 +270,9 @@ struct fuse_args { bool page_zeroing:1; bool page_replace:1; bool may_block:1; +#ifndef __GENKSYMS__ + bool user_pages:1; +#endif struct fuse_in_arg in_args[3]; struct fuse_arg out_args[2]; void (*end)(struct fuse_mount *fm, struct fuse_args *args, int error); @@ -642,6 +645,14 @@ struct fuse_conn { /* show legacy mount options */ unsigned int legacy_opts_show:1; + /* + * fs kills suid/sgid/cap on write/chown/trunc. suid is killed on + * write/trunc only if caller did not have CAP_FSETID. sgid is killed + * on write/truncate only if caller did not have CAP_FSETID as well as + * file has group execute permission. + */ + unsigned handle_killpriv_v2:1; + /* * The following bitfields are only for optimization purposes * and hence races in setting them will not cause malfunction diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index 5e484676343eba9cbacfcfe56edf8023c4e0ac42..5e7bb8826f3fb27d1fbdaf68dcabe8551acdc095 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c @@ -207,6 +207,16 @@ void fuse_change_attributes_common(struct inode *inode, struct fuse_attr *attr, inode->i_mode &= ~S_ISVTX; fi->orig_ino = attr->ino; + + /* + * We are refreshing inode data and it is possible that another + * client set suid/sgid or security.capability xattr. So clear + * S_NOSEC. Ideally, we could have cleared it only if suid/sgid + * was set or if security.capability xattr was set. But we don't + * know if security.capability has been set or not. So clear it + * anyway. Its less efficient but should be safe. + */ + inode->i_flags &= ~S_NOSEC; } void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr, @@ -1058,6 +1068,10 @@ static void process_init_reply(struct fuse_mount *fm, struct fuse_args *args, !fuse_dax_check_alignment(fc, arg->map_alignment)) { ok = false; } + if (arg->flags & FUSE_HANDLE_KILLPRIV_V2) { + fc->handle_killpriv_v2 = 1; + fm->sb->s_flags |= SB_NOSEC; + } } else { ra_pages = fc->max_read / PAGE_SIZE; fc->no_lock = 1; @@ -1100,7 +1114,8 @@ void fuse_send_init(struct fuse_mount *fm) FUSE_WRITEBACK_CACHE | FUSE_NO_OPEN_SUPPORT | FUSE_PARALLEL_DIROPS | FUSE_HANDLE_KILLPRIV | FUSE_POSIX_ACL | FUSE_ABORT_ERROR | FUSE_MAX_PAGES | FUSE_CACHE_SYMLINKS | - FUSE_NO_OPENDIR_SUPPORT | FUSE_EXPLICIT_INVAL_DATA; + FUSE_NO_OPENDIR_SUPPORT | FUSE_EXPLICIT_INVAL_DATA | + FUSE_HANDLE_KILLPRIV_V2; #ifdef CONFIG_FUSE_DAX if (fm->fc->dax) ia->in.flags |= FUSE_MAP_ALIGNMENT; diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c index b34c02985d9d2ea671f31afb6ebda29196f9fb0e..6c047570d6a948c3479b3cd52f6e78dbcb6941a4 100644 --- a/fs/gfs2/bmap.c +++ b/fs/gfs2/bmap.c @@ -2200,7 +2200,7 @@ int gfs2_setattr_size(struct inode *inode, u64 newsize) ret = do_shrink(inode, newsize); out: - gfs2_rs_delete(ip, NULL); + gfs2_rs_delete(ip); gfs2_qa_put(ip); return ret; } diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c index cfd9d03f604fee4a79cd6ad5a0bd01c8de6f6a78..2e6f622ed4283ef7602accd3334446467cb18c5c 100644 --- a/fs/gfs2/file.c +++ b/fs/gfs2/file.c @@ -717,7 +717,8 @@ static int gfs2_release(struct inode *inode, struct file *file) file->private_data = NULL; if (file->f_mode & FMODE_WRITE) { - gfs2_rs_delete(ip, &inode->i_writecount); + if (gfs2_rs_active(&ip->i_res)) + gfs2_rs_delete(ip); gfs2_qa_put(ip); } return 0; diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c index 65ae4fc28ede49a7354a18c6928141a09d5acbfc..74a6b0800e059d925ace05aba56b8c6499fefa82 100644 --- a/fs/gfs2/inode.c +++ b/fs/gfs2/inode.c @@ -811,7 +811,7 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry, if (free_vfs_inode) /* else evict will do the put for us */ gfs2_glock_put(ip->i_gl); } - gfs2_rs_delete(ip, NULL); + gfs2_rs_deltree(&ip->i_res); gfs2_qa_put(ip); fail_free_acls: posix_acl_release(default_acl); diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c index 5e8eef9990e326428d9c5e44ee19a5233029425f..dc55b029afaa4c3494b6e09770b53f7bbf97bde5 100644 --- a/fs/gfs2/rgrp.c +++ b/fs/gfs2/rgrp.c @@ -664,13 +664,14 @@ void gfs2_rs_deltree(struct gfs2_blkreserv *rs) /** * gfs2_rs_delete - delete a multi-block reservation * @ip: The inode for this reservation - * @wcount: The inode's write count, or NULL * */ -void gfs2_rs_delete(struct gfs2_inode *ip, atomic_t *wcount) +void gfs2_rs_delete(struct gfs2_inode *ip) { + struct inode *inode = &ip->i_inode; + down_write(&ip->i_rw_mutex); - if ((wcount == NULL) || (atomic_read(wcount) <= 1)) + if (atomic_read(&inode->i_writecount) <= 1) gfs2_rs_deltree(&ip->i_res); up_write(&ip->i_rw_mutex); } @@ -1389,7 +1390,8 @@ int gfs2_fitrim(struct file *filp, void __user *argp) start = r.start >> bs_shift; end = start + (r.len >> bs_shift); - minlen = max_t(u64, r.minlen, + minlen = max_t(u64, r.minlen, sdp->sd_sb.sb_bsize); + minlen = max_t(u64, minlen, q->limits.discard_granularity) >> bs_shift; if (end <= start || minlen > sdp->sd_max_rg_data) diff --git a/fs/gfs2/rgrp.h b/fs/gfs2/rgrp.h index 9a587ada51edaf2c16cc71af4367ab4fd0de5c7c..2d3c150c55bd5c8ecd8d7c58682d97ea5f4e9296 100644 --- a/fs/gfs2/rgrp.h +++ b/fs/gfs2/rgrp.h @@ -45,7 +45,7 @@ extern int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *n, bool dinode, u64 *generation); extern void gfs2_rs_deltree(struct gfs2_blkreserv *rs); -extern void gfs2_rs_delete(struct gfs2_inode *ip, atomic_t *wcount); +extern void gfs2_rs_delete(struct gfs2_inode *ip); extern void __gfs2_free_blocks(struct gfs2_inode *ip, struct gfs2_rgrpd *rgd, u64 bstart, u32 blen, int meta); extern void gfs2_free_meta(struct gfs2_inode *ip, struct gfs2_rgrpd *rgd, diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c index d2b7ecbd1b1503a89b0116e66cd08986e0da258d..d14b98aa1c3ebeae654d669d736137908cce98c4 100644 --- a/fs/gfs2/super.c +++ b/fs/gfs2/super.c @@ -1434,7 +1434,7 @@ static void gfs2_evict_inode(struct inode *inode) truncate_inode_pages_final(&inode->i_data); if (ip->i_qadata) gfs2_assert_warn(sdp, ip->i_qadata->qa_ref == 0); - gfs2_rs_delete(ip, NULL); + gfs2_rs_deltree(&ip->i_res); gfs2_ordered_del_inode(ip); clear_inode(inode); gfs2_dir_hash_inval(ip); diff --git a/fs/io_uring.c b/fs/io_uring.c index 600dd0898d7ead73b4ddb3eb92b5fa61e98d4520..3da9c6f457aecdfdae1206858c9a44581a6a49e5 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -1156,7 +1156,7 @@ static inline void __io_req_init_async(struct io_kiocb *req) */ static inline void io_req_init_async(struct io_kiocb *req) { - struct io_uring_task *tctx = current->io_uring; + struct io_uring_task *tctx = req->task->io_uring; if (req->flags & REQ_F_WORK_INITIALIZED) return; @@ -1556,6 +1556,7 @@ static void __io_queue_deferred(struct io_ring_ctx *ctx) static void io_flush_timeouts(struct io_ring_ctx *ctx) { + struct io_kiocb *req, *tmp; u32 seq; if (list_empty(&ctx->timeout_list)) @@ -1563,10 +1564,8 @@ static void io_flush_timeouts(struct io_ring_ctx *ctx) seq = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts); - do { + list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) { u32 events_needed, events_got; - struct io_kiocb *req = list_first_entry(&ctx->timeout_list, - struct io_kiocb, timeout.list); if (io_is_timeout_noseq(req)) break; @@ -1583,9 +1582,8 @@ static void io_flush_timeouts(struct io_ring_ctx *ctx) if (events_got < events_needed) break; - list_del_init(&req->timeout.list); io_kill_timeout(req, 0); - } while (!list_empty(&ctx->timeout_list)); + } ctx->cq_last_tm_flush = seq; } @@ -1713,18 +1711,22 @@ static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force, return cqe != NULL; } -static void io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force, +static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force, struct task_struct *tsk, struct files_struct *files) { + bool ret = true; + if (test_bit(0, &ctx->cq_check_overflow)) { /* iopoll syncs against uring_lock, not completion_lock */ if (ctx->flags & IORING_SETUP_IOPOLL) mutex_lock(&ctx->uring_lock); - __io_cqring_overflow_flush(ctx, force, tsk, files); + ret = __io_cqring_overflow_flush(ctx, force, tsk, files); if (ctx->flags & IORING_SETUP_IOPOLL) mutex_unlock(&ctx->uring_lock); } + + return ret; } static void __io_cqring_fill_event(struct io_kiocb *req, long res, @@ -2580,45 +2582,6 @@ static void io_complete_rw_common(struct kiocb *kiocb, long res, #ifdef CONFIG_BLOCK static bool io_resubmit_prep(struct io_kiocb *req, int error) { - struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs; - ssize_t ret = -ECANCELED; - struct iov_iter iter; - int rw; - - if (error) { - ret = error; - goto end_req; - } - - switch (req->opcode) { - case IORING_OP_READV: - case IORING_OP_READ_FIXED: - case IORING_OP_READ: - rw = READ; - break; - case IORING_OP_WRITEV: - case IORING_OP_WRITE_FIXED: - case IORING_OP_WRITE: - rw = WRITE; - break; - default: - printk_once(KERN_WARNING "io_uring: bad opcode in resubmit %d\n", - req->opcode); - goto end_req; - } - - if (!req->async_data) { - ret = io_import_iovec(rw, req, &iovec, &iter, false); - if (ret < 0) - goto end_req; - ret = io_setup_async_rw(req, iovec, inline_vecs, &iter, false); - if (!ret) - return true; - kfree(iovec); - } else { - return true; - } -end_req: req_set_fail_links(req); return false; } @@ -3219,13 +3182,15 @@ static ssize_t loop_rw_iter(int rw, struct io_kiocb *req, struct iov_iter *iter) ret = nr; break; } + ret += nr; if (!iov_iter_is_bvec(iter)) { iov_iter_advance(iter, nr); } else { - req->rw.len -= nr; req->rw.addr += nr; + req->rw.len -= nr; + if (!req->rw.len) + break; } - ret += nr; if (nr != iovec.iov_len) break; } @@ -3420,6 +3385,7 @@ static int io_read(struct io_kiocb *req, bool force_nonblock, struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs; struct kiocb *kiocb = &req->rw.kiocb; struct iov_iter __iter, *iter = &__iter; + struct iov_iter iter_cp; struct io_async_rw *rw = req->async_data; ssize_t io_size, ret, ret2; bool no_async; @@ -3430,6 +3396,7 @@ static int io_read(struct io_kiocb *req, bool force_nonblock, ret = io_import_iovec(READ, req, &iovec, iter, !force_nonblock); if (ret < 0) return ret; + iter_cp = *iter; io_size = iov_iter_count(iter); req->result = io_size; ret = 0; @@ -3465,7 +3432,7 @@ static int io_read(struct io_kiocb *req, bool force_nonblock, if (req->file->f_flags & O_NONBLOCK) goto done; /* some cases will consume bytes even on error returns */ - iov_iter_revert(iter, io_size - iov_iter_count(iter)); + *iter = iter_cp; ret = 0; goto copy_iov; } else if (ret < 0) { @@ -3548,6 +3515,7 @@ static int io_write(struct io_kiocb *req, bool force_nonblock, struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs; struct kiocb *kiocb = &req->rw.kiocb; struct iov_iter __iter, *iter = &__iter; + struct iov_iter iter_cp; struct io_async_rw *rw = req->async_data; ssize_t ret, ret2, io_size; @@ -3557,6 +3525,7 @@ static int io_write(struct io_kiocb *req, bool force_nonblock, ret = io_import_iovec(WRITE, req, &iovec, iter, !force_nonblock); if (ret < 0) return ret; + iter_cp = *iter; io_size = iov_iter_count(iter); req->result = io_size; @@ -3618,7 +3587,7 @@ static int io_write(struct io_kiocb *req, bool force_nonblock, } else { copy_iov: /* some cases will consume bytes even on error returns */ - iov_iter_revert(iter, io_size - iov_iter_count(iter)); + *iter = iter_cp; ret = io_setup_async_rw(req, iovec, inline_vecs, iter, false); if (!ret) return -EAGAIN; @@ -4063,6 +4032,7 @@ static int io_add_buffers(struct io_provide_buf *pbuf, struct io_buffer **head) } else { list_add_tail(&buf->list, &(*head)->list); } + cond_resched(); } return i ? i : -ENOMEM; @@ -5625,6 +5595,7 @@ static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe, else data->mode = HRTIMER_MODE_REL; + INIT_LIST_HEAD(&req->timeout.list); hrtimer_init(&data->timer, CLOCK_MONOTONIC, data->mode); return 0; } @@ -6277,6 +6248,7 @@ static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer) prev = NULL; } + list_del(&req->timeout.list); spin_unlock_irqrestore(&ctx->completion_lock, flags); if (prev) { @@ -6721,6 +6693,9 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr) err = io_submit_sqe(req, sqe, &link, &state.comp); if (err) goto fail_req; + /* to avoid doing too much in one submit round */ + if (submitted > IORING_MAX_ENTRIES / 2) + cond_resched(); } if (unlikely(submitted != nr)) { @@ -7051,7 +7026,11 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events, iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts); trace_io_uring_cqring_wait(ctx, min_events); do { - io_cqring_overflow_flush(ctx, false, NULL, NULL); + /* if we can't even flush overflow, don't wait for more */ + if (!io_cqring_overflow_flush(ctx, false, NULL, NULL)) { + ret = -EBUSY; + break; + } prepare_to_wait_exclusive(&ctx->wait, &iowq.wq, TASK_INTERRUPTIBLE); /* make sure we run task_work before checking for signals */ @@ -7335,10 +7314,15 @@ static int __io_sqe_files_scm(struct io_ring_ctx *ctx, int nr, int offset) refcount_add(skb->truesize, &sk->sk_wmem_alloc); skb_queue_head(&sk->sk_receive_queue, skb); - for (i = 0; i < nr_files; i++) - fput(fpl->fp[i]); + for (i = 0; i < nr; i++) { + struct file *file = io_file_from_index(ctx, i + offset); + + if (file) + fput(file); + } } else { kfree_skb(skb); + free_uid(fpl->user); kfree(fpl); } diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c index b121d7d434c67510a58c7d962610abc68f3423cc..98cfa73cb165bc512987bfb9ede08ad447f082c8 100644 --- a/fs/jbd2/commit.c +++ b/fs/jbd2/commit.c @@ -501,7 +501,6 @@ void jbd2_journal_commit_transaction(journal_t *journal) } spin_unlock(&commit_transaction->t_handle_lock); commit_transaction->t_state = T_SWITCH; - write_unlock(&journal->j_state_lock); J_ASSERT (atomic_read(&commit_transaction->t_outstanding_credits) <= journal->j_max_transaction_buffers); @@ -521,6 +520,8 @@ void jbd2_journal_commit_transaction(journal_t *journal) * has reserved. This is consistent with the existing behaviour * that multiple jbd2_journal_get_write_access() calls to the same * buffer are perfectly permissible. + * We use journal->j_state_lock here to serialize processing of + * t_reserved_list with eviction of buffers from journal_unmap_buffer(). */ while (commit_transaction->t_reserved_list) { jh = commit_transaction->t_reserved_list; @@ -540,6 +541,7 @@ void jbd2_journal_commit_transaction(journal_t *journal) jbd2_journal_refile_buffer(journal, jh); } + write_unlock(&journal->j_state_lock); /* * Now try to drop any written-back buffers from the journal's * checkpoint lists. We do this *before* commit because it potentially @@ -562,13 +564,13 @@ void jbd2_journal_commit_transaction(journal_t *journal) */ jbd2_journal_switch_revoke_table(journal); + write_lock(&journal->j_state_lock); /* * Reserved credits cannot be claimed anymore, free them */ atomic_sub(atomic_read(&journal->j_reserved_credits), &commit_transaction->t_outstanding_credits); - write_lock(&journal->j_state_lock); trace_jbd2_commit_flushing(journal, commit_transaction); stats.run.rs_flushing = jiffies; stats.run.rs_locked = jbd2_time_diff(stats.run.rs_locked, diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c index d6873bb36e540a6086c3d60b0a5ab8675a0e5609..aae412b0bfae17caffbb4f3758ceac60b43074e1 100644 --- a/fs/jbd2/journal.c +++ b/fs/jbd2/journal.c @@ -2869,6 +2869,7 @@ struct journal_head *jbd2_journal_grab_journal_head(struct buffer_head *bh) jbd_unlock_bh_journal_head(bh); return jh; } +EXPORT_SYMBOL(jbd2_journal_grab_journal_head); static void __journal_remove_journal_head(struct buffer_head *bh) { @@ -2921,6 +2922,7 @@ void jbd2_journal_put_journal_head(struct journal_head *jh) jbd_unlock_bh_journal_head(bh); } } +EXPORT_SYMBOL(jbd2_journal_put_journal_head); /* * Initialize jbd inode head diff --git a/fs/jffs2/scan.c b/fs/jffs2/scan.c index 49b0637fb36eed0ed28c7756b176640b60f5808b..29671e33a1714c8ca5c1e272ac58821e00078ef1 100644 --- a/fs/jffs2/scan.c +++ b/fs/jffs2/scan.c @@ -136,7 +136,7 @@ int jffs2_scan_medium(struct jffs2_sb_info *c) if (!s) { JFFS2_WARNING("Can't allocate memory for summary\n"); ret = -ENOMEM; - goto out; + goto out_buf; } } @@ -275,15 +275,15 @@ int jffs2_scan_medium(struct jffs2_sb_info *c) } ret = 0; out: + jffs2_sum_reset_collected(s); + kfree(s); + out_buf: if (buf_size) kfree(flashbuf); #ifndef __ECOS else mtd_unpoint(c->mtd, 0, c->mtd->size); #endif - if (s->sum_list_head) - jffs2_sum_reset_collected(s); - kfree(s); return ret; } diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c index b0eb9c85eea0c45480082328b1b142884005b097..980aa3300f106fae9f514fee4dec1e704dcc3dde 100644 --- a/fs/jfs/inode.c +++ b/fs/jfs/inode.c @@ -146,12 +146,13 @@ void jfs_evict_inode(struct inode *inode) dquot_initialize(inode); if (JFS_IP(inode)->fileset == FILESYSTEM_I) { + struct inode *ipimap = JFS_SBI(inode->i_sb)->ipimap; truncate_inode_pages_final(&inode->i_data); if (test_cflag(COMMIT_Freewmap, inode)) jfs_free_zero_link(inode); - if (JFS_SBI(inode->i_sb)->ipimap) + if (ipimap && JFS_IP(ipimap)->i_imap) diFree(inode); /* diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c index aedad59f8a4589af32e0ee3002f33b44ba65e667..e58ae29a223d755bf58f685ae248db2909097c46 100644 --- a/fs/jfs/jfs_dmap.c +++ b/fs/jfs/jfs_dmap.c @@ -148,6 +148,7 @@ static const s8 budtab[256] = { * 0 - success * -ENOMEM - insufficient memory * -EIO - i/o error + * -EINVAL - wrong bmap data */ int dbMount(struct inode *ipbmap) { @@ -179,6 +180,12 @@ int dbMount(struct inode *ipbmap) bmp->db_nfree = le64_to_cpu(dbmp_le->dn_nfree); bmp->db_l2nbperpage = le32_to_cpu(dbmp_le->dn_l2nbperpage); bmp->db_numag = le32_to_cpu(dbmp_le->dn_numag); + if (!bmp->db_numag) { + release_metapage(mp); + kfree(bmp); + return -EINVAL; + } + bmp->db_maxlevel = le32_to_cpu(dbmp_le->dn_maxlevel); bmp->db_maxag = le32_to_cpu(dbmp_le->dn_maxag); bmp->db_agpref = le32_to_cpu(dbmp_le->dn_agpref); diff --git a/fs/minix/inode.c b/fs/minix/inode.c index 34f546404aa11385388f140c7a7a22d345f5fc31..e938f5b1e4b94c6a438428178a1910a3aba9d050 100644 --- a/fs/minix/inode.c +++ b/fs/minix/inode.c @@ -446,7 +446,8 @@ static const struct address_space_operations minix_aops = { .writepage = minix_writepage, .write_begin = minix_write_begin, .write_end = generic_write_end, - .bmap = minix_bmap + .bmap = minix_bmap, + .direct_IO = noop_direct_IO }; static const struct inode_operations minix_symlink_inode_operations = { diff --git a/fs/namei.c b/fs/namei.c index c94a814e86b22802d43915e9fd0c782701e5c60a..4b55e176cbfcf76b80dc05aeb1480f9bbed608b6 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -3715,13 +3715,12 @@ int vfs_rmdir(struct inode *dir, struct dentry *dentry) dentry->d_inode->i_flags |= S_DEAD; dont_mount(dentry); detach_mounts(dentry); - fsnotify_rmdir(dir, dentry); out: inode_unlock(dentry->d_inode); dput(dentry); if (!error) - d_delete(dentry); + d_delete_notify(dir, dentry); return error; } EXPORT_SYMBOL(vfs_rmdir); @@ -3819,7 +3818,9 @@ int vfs_unlink(struct inode *dir, struct dentry *dentry, struct inode **delegate return -EPERM; inode_lock(target); - if (is_local_mountpoint(dentry)) + if (IS_SWAPFILE(target)) + error = -EPERM; + else if (is_local_mountpoint(dentry)) error = -EBUSY; else { error = security_inode_unlink(dir, dentry); @@ -3831,7 +3832,6 @@ int vfs_unlink(struct inode *dir, struct dentry *dentry, struct inode **delegate if (!error) { dont_mount(dentry); detach_mounts(dentry); - fsnotify_unlink(dir, dentry); } } } @@ -3839,9 +3839,11 @@ int vfs_unlink(struct inode *dir, struct dentry *dentry, struct inode **delegate inode_unlock(target); /* We don't d_delete() NFS sillyrenamed files--they still exist. */ - if (!error && !(dentry->d_flags & DCACHE_NFSFS_RENAMED)) { + if (!error && dentry->d_flags & DCACHE_NFSFS_RENAMED) { + fsnotify_unlink(dir, dentry); + } else if (!error) { fsnotify_link_count(target); - d_delete(dentry); + d_delete_notify(dir, dentry); } return error; @@ -4282,6 +4284,10 @@ int vfs_rename(struct inode *old_dir, struct dentry *old_dentry, else if (target) inode_lock(target); + error = -EPERM; + if (IS_SWAPFILE(source) || (target && IS_SWAPFILE(target))) + goto out; + error = -EBUSY; if (is_local_mountpoint(old_dentry) || is_local_mountpoint(new_dentry)) goto out; diff --git a/fs/nfs/callback.h b/fs/nfs/callback.h index 6a2033131c068e82e9934c6baba20c243dfad62f..ccd4f245cae240b812c3a7e6d44e2de32de819c5 100644 --- a/fs/nfs/callback.h +++ b/fs/nfs/callback.h @@ -170,7 +170,7 @@ struct cb_devicenotifyitem { }; struct cb_devicenotifyargs { - int ndevs; + uint32_t ndevs; struct cb_devicenotifyitem *devs; }; diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c index be546ece383f5af783f6bdbd5c75a696c04ec823..a5209643ac36ce4c7d04c8a7be1e6ce4db79bd45 100644 --- a/fs/nfs/callback_proc.c +++ b/fs/nfs/callback_proc.c @@ -353,12 +353,11 @@ __be32 nfs4_callback_devicenotify(void *argp, void *resp, struct cb_process_state *cps) { struct cb_devicenotifyargs *args = argp; - int i; + const struct pnfs_layoutdriver_type *ld = NULL; + uint32_t i; __be32 res = 0; - struct nfs_client *clp = cps->clp; - struct nfs_server *server = NULL; - if (!clp) { + if (!cps->clp) { res = cpu_to_be32(NFS4ERR_OP_NOT_IN_SESSION); goto out; } @@ -366,23 +365,15 @@ __be32 nfs4_callback_devicenotify(void *argp, void *resp, for (i = 0; i < args->ndevs; i++) { struct cb_devicenotifyitem *dev = &args->devs[i]; - if (!server || - server->pnfs_curr_ld->id != dev->cbd_layout_type) { - rcu_read_lock(); - list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) - if (server->pnfs_curr_ld && - server->pnfs_curr_ld->id == dev->cbd_layout_type) { - rcu_read_unlock(); - goto found; - } - rcu_read_unlock(); - continue; + if (!ld || ld->id != dev->cbd_layout_type) { + pnfs_put_layoutdriver(ld); + ld = pnfs_find_layoutdriver(dev->cbd_layout_type); + if (!ld) + continue; } - - found: - nfs4_delete_deviceid(server->pnfs_curr_ld, clp, &dev->cbd_dev_id); + nfs4_delete_deviceid(ld, cps->clp, &dev->cbd_dev_id); } - + pnfs_put_layoutdriver(ld); out: kfree(args->devs); return res; diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c index 79ff172eb1c81ac0bc2b89740157b6163ca6aa8b..ca8a4aa351dc948303dafa647a50f8893caa0465 100644 --- a/fs/nfs/callback_xdr.c +++ b/fs/nfs/callback_xdr.c @@ -259,11 +259,9 @@ __be32 decode_devicenotify_args(struct svc_rqst *rqstp, void *argp) { struct cb_devicenotifyargs *args = argp; + uint32_t tmp, n, i; __be32 *p; __be32 status = 0; - u32 tmp; - int n, i; - args->ndevs = 0; /* Num of device notifications */ p = xdr_inline_decode(xdr, sizeof(uint32_t)); @@ -272,12 +270,8 @@ __be32 decode_devicenotify_args(struct svc_rqst *rqstp, goto out; } n = ntohl(*p++); - if (n <= 0) - goto out; - if (n > ULONG_MAX / sizeof(*args->devs)) { - status = htonl(NFS4ERR_BADXDR); + if (n == 0) goto out; - } args->devs = kmalloc_array(n, sizeof(*args->devs), GFP_KERNEL); if (!args->devs) { @@ -331,19 +325,21 @@ __be32 decode_devicenotify_args(struct svc_rqst *rqstp, dev->cbd_immediate = 0; } - args->ndevs++; - dprintk("%s: type %d layout 0x%x immediate %d\n", __func__, dev->cbd_notify_type, dev->cbd_layout_type, dev->cbd_immediate); } + args->ndevs = n; + dprintk("%s: ndevs %d\n", __func__, args->ndevs); + return 0; +err: + kfree(args->devs); out: + args->devs = NULL; + args->ndevs = 0; dprintk("%s: status %d ndevs %d\n", __func__, ntohl(status), args->ndevs); return status; -err: - kfree(args->devs); - goto out; } static __be32 decode_sessionid(struct xdr_stream *xdr, diff --git a/fs/nfs/client.c b/fs/nfs/client.c index e365ef543f4ea0a73075e12ecd99ef6a6de50825..f5e261f6338afaa881d9c1c20374737098fb58da 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c @@ -177,6 +177,7 @@ struct nfs_client *nfs_alloc_client(const struct nfs_client_initdata *cl_init) INIT_LIST_HEAD(&clp->cl_superblocks); clp->cl_rpcclient = ERR_PTR(-EINVAL); + clp->cl_flags = cl_init->init_flags; clp->cl_proto = cl_init->proto; clp->cl_nconnect = cl_init->nconnect; clp->cl_net = get_net(cl_init->net); @@ -426,7 +427,6 @@ struct nfs_client *nfs_get_client(const struct nfs_client_initdata *cl_init) list_add_tail(&new->cl_share_link, &nn->nfs_client_list); spin_unlock(&nn->nfs_client_lock); - new->cl_flags = cl_init->init_flags; return rpc_ops->init_client(new, cl_init); } diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 1276437b48debb5dfe7ed766b40186c42b9e3903..9f88ca7b2001567447aa47c79d6b91aba12b3246 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -1628,16 +1628,6 @@ const struct dentry_operations nfs4_dentry_operations = { }; EXPORT_SYMBOL_GPL(nfs4_dentry_operations); -static fmode_t flags_to_mode(int flags) -{ - fmode_t res = (__force fmode_t)flags & FMODE_EXEC; - if ((flags & O_ACCMODE) != O_WRONLY) - res |= FMODE_READ; - if ((flags & O_ACCMODE) != O_RDONLY) - res |= FMODE_WRITE; - return res; -} - static struct nfs_open_context *create_nfs_open_context(struct dentry *dentry, int open_flags, struct file *filp) { return alloc_nfs_open_context(dentry, flags_to_mode(open_flags), filp); @@ -1780,14 +1770,14 @@ int nfs_atomic_open(struct inode *dir, struct dentry *dentry, if (!res) { inode = d_inode(dentry); if ((lookup_flags & LOOKUP_DIRECTORY) && inode && - !S_ISDIR(inode->i_mode)) + !(S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))) res = ERR_PTR(-ENOTDIR); else if (inode && S_ISREG(inode->i_mode)) res = ERR_PTR(-EOPENSTALE); } else if (!IS_ERR(res)) { inode = d_inode(res); if ((lookup_flags & LOOKUP_DIRECTORY) && inode && - !S_ISDIR(inode->i_mode)) { + !(S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))) { dput(res); res = ERR_PTR(-ENOTDIR); } else if (inode && S_ISREG(inode->i_mode)) { @@ -2192,6 +2182,8 @@ nfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) trace_nfs_link_enter(inode, dir, dentry); d_drop(dentry); + if (S_ISREG(inode->i_mode)) + nfs_sync_inode(inode); error = NFS_PROTO(dir)->link(inode, dir, &dentry->d_name); if (error == 0) { ihold(inode); @@ -2280,6 +2272,8 @@ int nfs_rename(struct inode *old_dir, struct dentry *old_dentry, } } + if (S_ISREG(old_inode->i_mode)) + nfs_sync_inode(old_inode); task = nfs_async_rename(old_dir, new_dir, old_dentry, new_dentry, NULL); if (IS_ERR(task)) { error = PTR_ERR(task); @@ -2485,7 +2479,7 @@ static struct nfs_access_entry *nfs_access_search_rbtree(struct inode *inode, co return NULL; } -static int nfs_access_get_cached_locked(struct inode *inode, const struct cred *cred, struct nfs_access_entry *res, bool may_block) +static int nfs_access_get_cached_locked(struct inode *inode, const struct cred *cred, u32 *mask, bool may_block) { struct nfs_inode *nfsi = NFS_I(inode); struct nfs_access_entry *cache; @@ -2515,8 +2509,7 @@ static int nfs_access_get_cached_locked(struct inode *inode, const struct cred * spin_lock(&inode->i_lock); retry = false; } - res->cred = cache->cred; - res->mask = cache->mask; + *mask = cache->mask; list_move_tail(&cache->lru, &nfsi->access_cache_entry_lru); err = 0; out: @@ -2528,7 +2521,7 @@ static int nfs_access_get_cached_locked(struct inode *inode, const struct cred * return -ENOENT; } -static int nfs_access_get_cached_rcu(struct inode *inode, const struct cred *cred, struct nfs_access_entry *res) +static int nfs_access_get_cached_rcu(struct inode *inode, const struct cred *cred, u32 *mask) { /* Only check the most recently returned cache entry, * but do it without locking. @@ -2550,22 +2543,21 @@ static int nfs_access_get_cached_rcu(struct inode *inode, const struct cred *cre goto out; if (nfs_check_cache_invalid(inode, NFS_INO_INVALID_ACCESS)) goto out; - res->cred = cache->cred; - res->mask = cache->mask; + *mask = cache->mask; err = 0; out: rcu_read_unlock(); return err; } -int nfs_access_get_cached(struct inode *inode, const struct cred *cred, struct -nfs_access_entry *res, bool may_block) +int nfs_access_get_cached(struct inode *inode, const struct cred *cred, + u32 *mask, bool may_block) { int status; - status = nfs_access_get_cached_rcu(inode, cred, res); + status = nfs_access_get_cached_rcu(inode, cred, mask); if (status != 0) - status = nfs_access_get_cached_locked(inode, cred, res, + status = nfs_access_get_cached_locked(inode, cred, mask, may_block); return status; @@ -2686,7 +2678,7 @@ static int nfs_do_access(struct inode *inode, const struct cred *cred, int mask) trace_nfs_access_enter(inode); - status = nfs_access_get_cached(inode, cred, &cache, may_block); + status = nfs_access_get_cached(inode, cred, &cache.mask, may_block); if (status == 0) goto out_cached; diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index 3c0335c15a730054b0c09f1d9801765610a1e9b7..c220810c61d145a4b6e3f113aa0a27781b01a5c3 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c @@ -172,8 +172,8 @@ ssize_t nfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter) VM_BUG_ON(iov_iter_count(iter) != PAGE_SIZE); if (iov_iter_rw(iter) == READ) - return nfs_file_direct_read(iocb, iter); - return nfs_file_direct_write(iocb, iter); + return nfs_file_direct_read(iocb, iter, true); + return nfs_file_direct_write(iocb, iter, true); } static void nfs_direct_release_pages(struct page **pages, unsigned int npages) @@ -424,6 +424,7 @@ static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq, * nfs_file_direct_read - file direct read operation for NFS files * @iocb: target I/O control block * @iter: vector of user buffers into which to read data + * @swap: flag indicating this is swap IO, not O_DIRECT IO * * We use this function for direct reads instead of calling * generic_file_aio_read() in order to avoid gfar's check to see if @@ -439,7 +440,8 @@ static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq, * client must read the updated atime from the server back into its * cache. */ -ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter) +ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter, + bool swap) { struct file *file = iocb->ki_filp; struct address_space *mapping = file->f_mapping; @@ -481,12 +483,14 @@ ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter) if (iter_is_iovec(iter)) dreq->flags = NFS_ODIRECT_SHOULD_DIRTY; - nfs_start_io_direct(inode); + if (!swap) + nfs_start_io_direct(inode); NFS_I(inode)->read_io += count; requested = nfs_direct_read_schedule_iovec(dreq, iter, iocb->ki_pos); - nfs_end_io_direct(inode); + if (!swap) + nfs_end_io_direct(inode); if (requested > 0) { result = nfs_direct_wait(dreq); @@ -789,7 +793,7 @@ static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops = { */ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq, struct iov_iter *iter, - loff_t pos) + loff_t pos, int ioflags) { struct nfs_pageio_descriptor desc; struct inode *inode = dreq->inode; @@ -797,7 +801,7 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq, size_t requested_bytes = 0; size_t wsize = max_t(size_t, NFS_SERVER(inode)->wsize, PAGE_SIZE); - nfs_pageio_init_write(&desc, inode, FLUSH_COND_STABLE, false, + nfs_pageio_init_write(&desc, inode, ioflags, false, &nfs_direct_write_completion_ops); desc.pg_dreq = dreq; get_dreq(dreq); @@ -875,6 +879,7 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq, * nfs_file_direct_write - file direct write operation for NFS files * @iocb: target I/O control block * @iter: vector of user buffers from which to write data + * @swap: flag indicating this is swap IO, not O_DIRECT IO * * We use this function for direct writes instead of calling * generic_file_aio_write() in order to avoid taking the inode @@ -891,7 +896,8 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq, * Note that O_APPEND is not supported for NFS direct writes, as there * is no atomic O_APPEND write facility in the NFS protocol. */ -ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter) +ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter, + bool swap) { ssize_t result, requested; size_t count; @@ -905,7 +911,11 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter) dfprintk(FILE, "NFS: direct write(%pD2, %zd@%Ld)\n", file, iov_iter_count(iter), (long long) iocb->ki_pos); - result = generic_write_checks(iocb, iter); + if (swap) + /* bypass generic checks */ + result = iov_iter_count(iter); + else + result = generic_write_checks(iocb, iter); if (result <= 0) return result; count = result; @@ -936,16 +946,22 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter) dreq->iocb = iocb; pnfs_init_ds_commit_info_ops(&dreq->ds_cinfo, inode); - nfs_start_io_direct(inode); + if (swap) { + requested = nfs_direct_write_schedule_iovec(dreq, iter, pos, + FLUSH_STABLE); + } else { + nfs_start_io_direct(inode); - requested = nfs_direct_write_schedule_iovec(dreq, iter, pos); + requested = nfs_direct_write_schedule_iovec(dreq, iter, pos, + FLUSH_COND_STABLE); - if (mapping->nrpages) { - invalidate_inode_pages2_range(mapping, - pos >> PAGE_SHIFT, end); - } + if (mapping->nrpages) { + invalidate_inode_pages2_range(mapping, + pos >> PAGE_SHIFT, end); + } - nfs_end_io_direct(inode); + nfs_end_io_direct(inode); + } if (requested > 0) { result = nfs_direct_wait(dreq); diff --git a/fs/nfs/file.c b/fs/nfs/file.c index 63940a7a70be15760a8939b9a615bfe313d302bf..aff1e65af6bd3290148f5851900e7b880bc39204 100644 --- a/fs/nfs/file.c +++ b/fs/nfs/file.c @@ -140,7 +140,6 @@ static int nfs_file_flush(struct file *file, fl_owner_t id) { struct inode *inode = file_inode(file); - errseq_t since; dprintk("NFS: flush(%pD2)\n", file); @@ -149,9 +148,7 @@ nfs_file_flush(struct file *file, fl_owner_t id) return 0; /* Flush writes to the server and return any errors */ - since = filemap_sample_wb_err(file->f_mapping); - nfs_wb_all(inode); - return filemap_check_wb_err(file->f_mapping, since); + return nfs_wb_all(inode); } ssize_t @@ -161,7 +158,7 @@ nfs_file_read(struct kiocb *iocb, struct iov_iter *to) ssize_t result; if (iocb->ki_flags & IOCB_DIRECT) - return nfs_file_direct_read(iocb, to); + return nfs_file_direct_read(iocb, to, false); dprintk("NFS: read(%pD2, %zu@%lu)\n", iocb->ki_filp, @@ -590,14 +587,12 @@ static const struct vm_operations_struct nfs_file_vm_ops = { .page_mkwrite = nfs_vm_page_mkwrite, }; -static int nfs_need_check_write(struct file *filp, struct inode *inode, - int error) +static int nfs_need_check_write(struct file *filp, struct inode *inode) { struct nfs_open_context *ctx; ctx = nfs_file_open_context(filp); - if (nfs_error_is_fatal_on_server(error) || - nfs_ctx_key_to_expire(ctx, inode)) + if (nfs_ctx_key_to_expire(ctx, inode)) return 1; return 0; } @@ -608,15 +603,13 @@ ssize_t nfs_file_write(struct kiocb *iocb, struct iov_iter *from) struct inode *inode = file_inode(file); unsigned long written = 0; ssize_t result; - errseq_t since; - int error; result = nfs_key_timeout_notify(file, inode); if (result) return result; if (iocb->ki_flags & IOCB_DIRECT) - return nfs_file_direct_write(iocb, from); + return nfs_file_direct_write(iocb, from, false); dprintk("NFS: write(%pD2, %zu@%Ld)\n", file, iov_iter_count(from), (long long) iocb->ki_pos); @@ -634,7 +627,6 @@ ssize_t nfs_file_write(struct kiocb *iocb, struct iov_iter *from) if (iocb->ki_pos > i_size_read(inode)) nfs_revalidate_mapping(inode, file->f_mapping); - since = filemap_sample_wb_err(file->f_mapping); nfs_start_io_write(inode); result = generic_write_checks(iocb, from); if (result > 0) { @@ -653,8 +645,7 @@ ssize_t nfs_file_write(struct kiocb *iocb, struct iov_iter *from) goto out; /* Return error values */ - error = filemap_check_wb_err(file->f_mapping, since); - if (nfs_need_check_write(file, inode, error)) { + if (nfs_need_check_write(file, inode)) { int err = nfs_wb_all(inode); if (err < 0) result = err; diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index 21addb78523d229666eff8cbd9a97247b160ba66..1adece1cff3ed9cacc449845a7927a1364328743 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -195,6 +195,18 @@ bool nfs_check_cache_invalid(struct inode *inode, unsigned long flags) } EXPORT_SYMBOL_GPL(nfs_check_cache_invalid); +#ifdef CONFIG_NFS_V4_2 +static bool nfs_has_xattr_cache(const struct nfs_inode *nfsi) +{ + return nfsi->xattr_cache != NULL; +} +#else +static bool nfs_has_xattr_cache(const struct nfs_inode *nfsi) +{ + return false; +} +#endif + static void nfs_set_cache_invalid(struct inode *inode, unsigned long flags) { struct nfs_inode *nfsi = NFS_I(inode); @@ -210,6 +222,8 @@ static void nfs_set_cache_invalid(struct inode *inode, unsigned long flags) } else if (flags & NFS_INO_REVAL_PAGECACHE) flags |= NFS_INO_INVALID_CHANGE | NFS_INO_INVALID_SIZE; + if (!nfs_has_xattr_cache(nfsi)) + flags &= ~NFS_INO_INVALID_XATTR; if (inode->i_mapping->nrpages == 0) flags &= ~(NFS_INO_INVALID_DATA|NFS_INO_DATA_INVAL_DEFER); nfsi->cache_validity |= flags; @@ -807,12 +821,9 @@ int nfs_getattr(const struct path *path, struct kstat *stat, } /* Flush out writes to the server in order to update c/mtime. */ - if ((request_mask & (STATX_CTIME|STATX_MTIME)) && - S_ISREG(inode->i_mode)) { - err = filemap_write_and_wait(inode->i_mapping); - if (err) - goto out; - } + if ((request_mask & (STATX_CTIME | STATX_MTIME)) && + S_ISREG(inode->i_mode)) + filemap_write_and_wait(inode->i_mapping); /* * We may force a getattr if the user cares about atime. @@ -1128,7 +1139,6 @@ int nfs_open(struct inode *inode, struct file *filp) nfs_fscache_open_file(inode, filp); return 0; } -EXPORT_SYMBOL_GPL(nfs_open); /* * This function is called whenever some part of NFS notices that diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index 98554dd18a7157619c2b5f85c751b4dd88c7311a..7009a8dddd45bd59cf30d20c0768aa94842ba649 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h @@ -42,6 +42,16 @@ static inline bool nfs_lookup_is_soft_revalidate(const struct dentry *dentry) return true; } +static inline fmode_t flags_to_mode(int flags) +{ + fmode_t res = (__force fmode_t)flags & FMODE_EXEC; + if ((flags & O_ACCMODE) != O_WRONLY) + res |= FMODE_READ; + if ((flags & O_ACCMODE) != O_RDONLY) + res |= FMODE_WRITE; + return res; +} + /* * Note: RFC 1813 doesn't limit the number of auth flavors that * a server can return, so make something up. @@ -578,6 +588,13 @@ nfs_write_match_verf(const struct nfs_writeverf *verf, !nfs_write_verifier_cmp(&req->wb_verf, &verf->verifier); } +static inline gfp_t nfs_io_gfp_mask(void) +{ + if (current->flags & PF_WQ_WORKER) + return GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN; + return GFP_KERNEL; +} + /* unlink.c */ extern struct rpc_task * nfs_async_rename(struct inode *old_dir, struct inode *new_dir, diff --git a/fs/nfs/nfs2xdr.c b/fs/nfs/nfs2xdr.c index f6676af37d5dbb26baee5e776f5c7c05554227ab..5e6453e9b30790bd8cb7160c14feecfc5bb71186 100644 --- a/fs/nfs/nfs2xdr.c +++ b/fs/nfs/nfs2xdr.c @@ -948,7 +948,7 @@ int nfs2_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry, error = decode_filename_inline(xdr, &entry->name, &entry->len); if (unlikely(error)) - return error; + return -EAGAIN; /* * The type (size and byte order) of nfscookie isn't defined in diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c index dff6b52d26a856ab698828d32f35f56ab7a7c939..b5a9379b14504cb15172886a6c97acd7e6bdc0d7 100644 --- a/fs/nfs/nfs3xdr.c +++ b/fs/nfs/nfs3xdr.c @@ -1964,7 +1964,6 @@ int nfs3_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry, bool plus) { struct user_namespace *userns = rpc_userns(entry->server->client); - struct nfs_entry old = *entry; __be32 *p; int error; u64 new_cookie; @@ -1984,15 +1983,15 @@ int nfs3_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry, error = decode_fileid3(xdr, &entry->ino); if (unlikely(error)) - return error; + return -EAGAIN; error = decode_inline_filename3(xdr, &entry->name, &entry->len); if (unlikely(error)) - return error; + return -EAGAIN; error = decode_cookie3(xdr, &new_cookie); if (unlikely(error)) - return error; + return -EAGAIN; entry->d_type = DT_UNKNOWN; @@ -2000,7 +1999,7 @@ int nfs3_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry, entry->fattr->valid = 0; error = decode_post_op_attr(xdr, entry->fattr, userns); if (unlikely(error)) - return error; + return -EAGAIN; if (entry->fattr->valid & NFS_ATTR_FATTR_V3) entry->d_type = nfs_umode_to_dtype(entry->fattr->mode); @@ -2015,11 +2014,8 @@ int nfs3_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry, return -EAGAIN; if (*p != xdr_zero) { error = decode_nfs_fh3(xdr, entry->fh); - if (unlikely(error)) { - if (error == -E2BIG) - goto out_truncated; - return error; - } + if (unlikely(error)) + return -EAGAIN; } else zero_nfs_fh3(entry->fh); } @@ -2028,11 +2024,6 @@ int nfs3_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry, entry->cookie = new_cookie; return 0; - -out_truncated: - dprintk("NFS: directory entry contains invalid file handle\n"); - *entry = old; - return -EAGAIN; } /* diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c index 2587b1b8e2ef760ea482065c4d1ab091d8c65969..dad32b171e677a93e6f2c88de5761445c249b7c0 100644 --- a/fs/nfs/nfs42proc.c +++ b/fs/nfs/nfs42proc.c @@ -567,8 +567,10 @@ static int _nfs42_proc_copy_notify(struct file *src, struct file *dst, ctx = get_nfs_open_context(nfs_file_open_context(src)); l_ctx = nfs_get_lock_context(ctx); - if (IS_ERR(l_ctx)) - return PTR_ERR(l_ctx); + if (IS_ERR(l_ctx)) { + status = PTR_ERR(l_ctx); + goto out; + } status = nfs4_set_rw_stateid(&args->cna_src_stateid, ctx, l_ctx, FMODE_READ); @@ -576,7 +578,7 @@ static int _nfs42_proc_copy_notify(struct file *src, struct file *dst, if (status) { if (status == -EAGAIN) status = -NFS4ERR_BAD_STATEID; - return status; + goto out; } status = nfs4_call_sync(src_server->client, src_server, &msg, @@ -584,6 +586,7 @@ static int _nfs42_proc_copy_notify(struct file *src, struct file *dst, if (status == -ENOTSUPP) src_server->caps &= ~NFS_CAP_COPY_NOTIFY; +out: put_nfs_open_context(nfs_file_open_context(src)); return status; } diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h index 3e344bec3647b09889b113c6b568d6316b41140d..6d916563356ef372d594a482a4b2246888d5f090 100644 --- a/fs/nfs/nfs4_fs.h +++ b/fs/nfs/nfs4_fs.h @@ -281,7 +281,8 @@ struct rpc_clnt *nfs4_negotiate_security(struct rpc_clnt *, struct inode *, int nfs4_submount(struct fs_context *, struct nfs_server *); int nfs4_replace_transport(struct nfs_server *server, const struct nfs4_fs_locations *locations); - +size_t nfs_parse_server_name(char *string, size_t len, struct sockaddr *sa, + size_t salen, struct net *net, int port); /* nfs4proc.c */ extern int nfs4_handle_exception(struct nfs_server *, int, struct nfs4_exception *); extern int nfs4_async_handle_error(struct rpc_task *task, diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c index 9c81ef6e1ce731f5854f00304a1e178872cce40d..3edbfe7bfcefbaececafc4e3e3d4b5f0dfe209c4 100644 --- a/fs/nfs/nfs4client.c +++ b/fs/nfs/nfs4client.c @@ -1330,8 +1330,11 @@ int nfs4_update_server(struct nfs_server *server, const char *hostname, } nfs_put_client(clp); - if (server->nfs_client->cl_hostname == NULL) + if (server->nfs_client->cl_hostname == NULL) { server->nfs_client->cl_hostname = kstrdup(hostname, GFP_KERNEL); + if (server->nfs_client->cl_hostname == NULL) + return -ENOMEM; + } nfs_server_insert_lists(server); return nfs_probe_destination(server); diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c index a1e5c6b85dedc5836b5ecbc407bc02a29dad3e72..f12494e08267f5c2163069b001c0c582dfec66cf 100644 --- a/fs/nfs/nfs4file.c +++ b/fs/nfs/nfs4file.c @@ -32,6 +32,7 @@ nfs4_file_open(struct inode *inode, struct file *filp) struct dentry *parent = NULL; struct inode *dir; unsigned openflags = filp->f_flags; + fmode_t f_mode; struct iattr attr; int err; @@ -50,8 +51,9 @@ nfs4_file_open(struct inode *inode, struct file *filp) if (err) return err; + f_mode = filp->f_mode; if ((openflags & O_ACCMODE) == 3) - return nfs_open(inode, filp); + f_mode |= flags_to_mode(openflags); /* We can't create new files here */ openflags &= ~(O_CREAT|O_EXCL); @@ -59,7 +61,7 @@ nfs4_file_open(struct inode *inode, struct file *filp) parent = dget_parent(dentry); dir = d_inode(parent); - ctx = alloc_nfs_open_context(file_dentry(filp), filp->f_mode, filp); + ctx = alloc_nfs_open_context(file_dentry(filp), f_mode, filp); err = PTR_ERR(ctx); if (IS_ERR(ctx)) goto out; @@ -111,7 +113,6 @@ static int nfs4_file_flush(struct file *file, fl_owner_t id) { struct inode *inode = file_inode(file); - errseq_t since; dprintk("NFS: flush(%pD2)\n", file); @@ -127,9 +128,7 @@ nfs4_file_flush(struct file *file, fl_owner_t id) return filemap_fdatawrite(file->f_mapping); /* Flush writes to the server and return any errors */ - since = filemap_sample_wb_err(file->f_mapping); - nfs_wb_all(inode); - return filemap_check_wb_err(file->f_mapping, since); + return nfs_wb_all(inode); } #ifdef CONFIG_NFS_V4_2 diff --git a/fs/nfs/nfs4namespace.c b/fs/nfs/nfs4namespace.c index 873342308dc0d9a131d93665b338e3eb1a49d6e9..3680c8da510c9787bcbae896a292a2cfec039469 100644 --- a/fs/nfs/nfs4namespace.c +++ b/fs/nfs/nfs4namespace.c @@ -164,16 +164,21 @@ static int nfs4_validate_fspath(struct dentry *dentry, return 0; } -static size_t nfs_parse_server_name(char *string, size_t len, - struct sockaddr *sa, size_t salen, struct net *net) +size_t nfs_parse_server_name(char *string, size_t len, struct sockaddr *sa, + size_t salen, struct net *net, int port) { ssize_t ret; ret = rpc_pton(net, string, len, sa, salen); if (ret == 0) { - ret = nfs_dns_resolve_name(net, string, len, sa, salen); - if (ret < 0) - ret = 0; + ret = rpc_uaddr2sockaddr(net, string, len, sa, salen); + if (ret == 0) { + ret = nfs_dns_resolve_name(net, string, len, sa, salen); + if (ret < 0) + ret = 0; + } + } else if (port) { + rpc_set_port(sa, port); } return ret; } @@ -328,7 +333,7 @@ static int try_location(struct fs_context *fc, nfs_parse_server_name(buf->data, buf->len, &ctx->nfs_server.address, sizeof(ctx->nfs_server._address), - fc->net_ns); + fc->net_ns, 0); if (ctx->nfs_server.addrlen == 0) continue; @@ -496,7 +501,7 @@ static int nfs4_try_replacing_one_location(struct nfs_server *server, continue; salen = nfs_parse_server_name(buf->data, buf->len, - sap, addr_bufsize, net); + sap, addr_bufsize, net, 0); if (salen == 0) continue; rpc_set_port(sap, NFS_PORT); diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 3106bd28b1132a754e5c8daf280724817d080c0b..77199d3560429753e4b2c64a3874dbc72fad6902 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -7597,7 +7597,7 @@ static int nfs4_xattr_set_nfs4_user(const struct xattr_handler *handler, const char *key, const void *buf, size_t buflen, int flags) { - struct nfs_access_entry cache; + u32 mask; int ret; if (!nfs_server_capable(inode, NFS_CAP_XATTR)) @@ -7612,8 +7612,8 @@ static int nfs4_xattr_set_nfs4_user(const struct xattr_handler *handler, * do a cached access check for the XA* flags to possibly avoid * doing an RPC and getting EACCES back. */ - if (!nfs_access_get_cached(inode, current_cred(), &cache, true)) { - if (!(cache.mask & NFS_ACCESS_XAWRITE)) + if (!nfs_access_get_cached(inode, current_cred(), &mask, true)) { + if (!(mask & NFS_ACCESS_XAWRITE)) return -EACCES; } @@ -7634,14 +7634,14 @@ static int nfs4_xattr_get_nfs4_user(const struct xattr_handler *handler, struct dentry *unused, struct inode *inode, const char *key, void *buf, size_t buflen) { - struct nfs_access_entry cache; + u32 mask; ssize_t ret; if (!nfs_server_capable(inode, NFS_CAP_XATTR)) return -EOPNOTSUPP; - if (!nfs_access_get_cached(inode, current_cred(), &cache, true)) { - if (!(cache.mask & NFS_ACCESS_XAREAD)) + if (!nfs_access_get_cached(inode, current_cred(), &mask, true)) { + if (!(mask & NFS_ACCESS_XAREAD)) return -EACCES; } @@ -7666,13 +7666,13 @@ nfs4_listxattr_nfs4_user(struct inode *inode, char *list, size_t list_len) ssize_t ret, size; char *buf; size_t buflen; - struct nfs_access_entry cache; + u32 mask; if (!nfs_server_capable(inode, NFS_CAP_XATTR)) return 0; - if (!nfs_access_get_cached(inode, current_cred(), &cache, true)) { - if (!(cache.mask & NFS_ACCESS_XALIST)) + if (!nfs_access_get_cached(inode, current_cred(), &mask, true)) { + if (!(mask & NFS_ACCESS_XALIST)) return 0; } @@ -8205,6 +8205,7 @@ nfs4_bind_one_conn_to_session_done(struct rpc_task *task, void *calldata) case -NFS4ERR_DEADSESSION: nfs4_schedule_session_recovery(clp->cl_session, task->tk_status); + return; } if (args->dir == NFS4_CDFC4_FORE_OR_BOTH && res->dir != NFS4_CDFS4_BOTH) { diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 4bf10792cb5b1049790ec0d0584f263fb3bac96c..a8fe8f84c5ae00dcc0387617314cff2d9fa6a676 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -49,6 +49,7 @@ #include #include #include +#include #include @@ -2104,6 +2105,9 @@ static int nfs4_try_migration(struct nfs_server *server, const struct cred *cred } result = -NFS4ERR_NXIO; + if (!locations->nlocations) + goto out; + if (!(locations->fattr.valid & NFS_ATTR_FATTR_V4_LOCATIONS)) { dprintk("<-- %s: No fs_locations data, migration skipped\n", __func__); @@ -2554,9 +2558,17 @@ static void nfs4_layoutreturn_any_run(struct nfs_client *clp) static void nfs4_state_manager(struct nfs_client *clp) { + unsigned int memflags; int status = 0; const char *section = "", *section_sep = ""; + /* + * State recovery can deadlock if the direct reclaim code tries + * start NFS writeback. So ensure memory allocations are all + * GFP_NOFS. + */ + memflags = memalloc_nofs_save(); + /* Ensure exclusive access to NFSv4 state */ do { trace_nfs4_state_mgr(clp); @@ -2651,6 +2663,7 @@ static void nfs4_state_manager(struct nfs_client *clp) clear_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state); } + memalloc_nofs_restore(memflags); nfs4_end_drain_session(clp); nfs4_clear_state_manager_bit(clp); @@ -2668,6 +2681,7 @@ static void nfs4_state_manager(struct nfs_client *clp) return; if (test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) != 0) return; + memflags = memalloc_nofs_save(); } while (refcount_read(&clp->cl_count) > 1 && !signalled()); goto out_drain; @@ -2680,6 +2694,7 @@ static void nfs4_state_manager(struct nfs_client *clp) clp->cl_hostname, -status); ssleep(1); out_drain: + memalloc_nofs_restore(memflags); nfs4_end_drain_session(clp); nfs4_clear_state_manager_bit(clp); } diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index c16b93df1bc14222d7dd4393704f7d090cbd6d74..e2f0e3446e22a99ad6d06ec9af112fe696840595 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -3680,8 +3680,6 @@ static int decode_attr_fs_locations(struct xdr_stream *xdr, uint32_t *bitmap, st if (unlikely(!p)) goto out_eio; n = be32_to_cpup(p); - if (n <= 0) - goto out_eio; for (res->nlocations = 0; res->nlocations < n; res->nlocations++) { u32 m; struct nfs4_fs_location *loc; @@ -4184,10 +4182,11 @@ static int decode_attr_security_label(struct xdr_stream *xdr, uint32_t *bitmap, } else printk(KERN_WARNING "%s: label too long (%u)!\n", __func__, len); + if (label && label->label) + dprintk("%s: label=%.*s, len=%d, PI=%d, LFS=%d\n", + __func__, label->len, (char *)label->label, + label->len, label->pi, label->lfs); } - if (label && label->label) - dprintk("%s: label=%s, len=%d, PI=%d, LFS=%d\n", __func__, - (char *)label->label, label->len, label->pi, label->lfs); return status; } diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c index 98b9c1ed366ee8280db2754f98383c045d0e3bf2..17fef6eb490c5ca4fd8222f7e8886ec2c788e58c 100644 --- a/fs/nfs/pagelist.c +++ b/fs/nfs/pagelist.c @@ -90,10 +90,10 @@ void nfs_set_pgio_error(struct nfs_pgio_header *hdr, int error, loff_t pos) } } -static inline struct nfs_page * -nfs_page_alloc(void) +static inline struct nfs_page *nfs_page_alloc(void) { - struct nfs_page *p = kmem_cache_zalloc(nfs_page_cachep, GFP_KERNEL); + struct nfs_page *p = + kmem_cache_zalloc(nfs_page_cachep, nfs_io_gfp_mask()); if (p) INIT_LIST_HEAD(&p->wb_list); return p; @@ -901,7 +901,7 @@ int nfs_generic_pgio(struct nfs_pageio_descriptor *desc, struct nfs_commit_info cinfo; struct nfs_page_array *pg_array = &hdr->page_array; unsigned int pagecount, pageused; - gfp_t gfp_flags = GFP_KERNEL; + gfp_t gfp_flags = nfs_io_gfp_mask(); pagecount = nfs_page_array_len(mirror->pg_base, mirror->pg_count); pg_array->npages = pagecount; @@ -984,7 +984,7 @@ nfs_pageio_alloc_mirrors(struct nfs_pageio_descriptor *desc, desc->pg_mirrors_dynamic = NULL; if (mirror_count == 1) return desc->pg_mirrors_static; - ret = kmalloc_array(mirror_count, sizeof(*ret), GFP_KERNEL); + ret = kmalloc_array(mirror_count, sizeof(*ret), nfs_io_gfp_mask()); if (ret != NULL) { for (i = 0; i < mirror_count; i++) nfs_pageio_mirror_init(&ret[i], desc->pg_bsize); diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index 5370e082aded53187e7779f0832b7a2901e9701e..b3b9eff5d57275a4be84a9e394c67ea581fa90aa 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -92,6 +92,17 @@ find_pnfs_driver(u32 id) return local; } +const struct pnfs_layoutdriver_type *pnfs_find_layoutdriver(u32 id) +{ + return find_pnfs_driver(id); +} + +void pnfs_put_layoutdriver(const struct pnfs_layoutdriver_type *ld) +{ + if (ld) + module_put(ld->owner); +} + void unset_pnfs_layoutdriver(struct nfs_server *nfss) { diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h index 0212fe32e63aa285f871b80595ef7a339f36114d..11d9ed9addc06ef51d6f00704c0c4080f68b86a2 100644 --- a/fs/nfs/pnfs.h +++ b/fs/nfs/pnfs.h @@ -236,6 +236,8 @@ struct pnfs_devicelist { extern int pnfs_register_layoutdriver(struct pnfs_layoutdriver_type *); extern void pnfs_unregister_layoutdriver(struct pnfs_layoutdriver_type *); +extern const struct pnfs_layoutdriver_type *pnfs_find_layoutdriver(u32 id); +extern void pnfs_put_layoutdriver(const struct pnfs_layoutdriver_type *ld); /* nfs4proc.c */ extern size_t max_response_pages(struct nfs_server *server); diff --git a/fs/nfs/pnfs_nfs.c b/fs/nfs/pnfs_nfs.c index 7b9d701bef016ebdb65ab220bb982a704ddaa95c..a2ad8bb87e2db87468ce554fa0bf6c0cea20dacb 100644 --- a/fs/nfs/pnfs_nfs.c +++ b/fs/nfs/pnfs_nfs.c @@ -419,7 +419,7 @@ static struct nfs_commit_data * pnfs_bucket_fetch_commitdata(struct pnfs_commit_bucket *bucket, struct nfs_commit_info *cinfo) { - struct nfs_commit_data *data = nfs_commitdata_alloc(false); + struct nfs_commit_data *data = nfs_commitdata_alloc(); if (!data) return NULL; @@ -515,7 +515,11 @@ pnfs_generic_commit_pagelist(struct inode *inode, struct list_head *mds_pages, unsigned int nreq = 0; if (!list_empty(mds_pages)) { - data = nfs_commitdata_alloc(true); + data = nfs_commitdata_alloc(); + if (!data) { + nfs_retry_commit(mds_pages, NULL, cinfo, -1); + return -ENOMEM; + } data->ds_commit_index = -1; list_splice_init(mds_pages, &data->pages); list_add_tail(&data->list, &list); diff --git a/fs/nfs/write.c b/fs/nfs/write.c index bde4c362841f009cdb492d0b5787c14fbc100409..5d07799513a6595434b6ebf5e21c0174efce88f4 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -70,27 +70,17 @@ static mempool_t *nfs_wdata_mempool; static struct kmem_cache *nfs_cdata_cachep; static mempool_t *nfs_commit_mempool; -struct nfs_commit_data *nfs_commitdata_alloc(bool never_fail) +struct nfs_commit_data *nfs_commitdata_alloc(void) { struct nfs_commit_data *p; - if (never_fail) - p = mempool_alloc(nfs_commit_mempool, GFP_NOIO); - else { - /* It is OK to do some reclaim, not no safe to wait - * for anything to be returned to the pool. - * mempool_alloc() cannot handle that particular combination, - * so we need two separate attempts. - */ + p = kmem_cache_zalloc(nfs_cdata_cachep, nfs_io_gfp_mask()); + if (!p) { p = mempool_alloc(nfs_commit_mempool, GFP_NOWAIT); - if (!p) - p = kmem_cache_alloc(nfs_cdata_cachep, GFP_NOIO | - __GFP_NOWARN | __GFP_NORETRY); if (!p) return NULL; + memset(p, 0, sizeof(*p)); } - - memset(p, 0, sizeof(*p)); INIT_LIST_HEAD(&p->pages); return p; } @@ -104,9 +94,15 @@ EXPORT_SYMBOL_GPL(nfs_commit_free); static struct nfs_pgio_header *nfs_writehdr_alloc(void) { - struct nfs_pgio_header *p = mempool_alloc(nfs_wdata_mempool, GFP_KERNEL); + struct nfs_pgio_header *p; - memset(p, 0, sizeof(*p)); + p = kmem_cache_zalloc(nfs_wdata_cachep, nfs_io_gfp_mask()); + if (!p) { + p = mempool_alloc(nfs_wdata_mempool, GFP_NOWAIT); + if (!p) + return NULL; + memset(p, 0, sizeof(*p)); + } p->rw_mode = FMODE_WRITE; return p; } @@ -314,7 +310,10 @@ static void nfs_mapping_set_error(struct page *page, int error) struct address_space *mapping = page_file_mapping(page); SetPageError(page); - mapping_set_error(mapping, error); + filemap_set_wb_err(mapping, error); + if (mapping->host) + errseq_set(&mapping->host->i_sb->s_wb_err, + error == -ENOSPC ? -ENOSPC : -EIO); nfs_set_pageerror(mapping); } @@ -1797,7 +1796,11 @@ nfs_commit_list(struct inode *inode, struct list_head *head, int how, if (list_empty(head)) return 0; - data = nfs_commitdata_alloc(true); + data = nfs_commitdata_alloc(); + if (!data) { + nfs_retry_commit(head, NULL, cinfo, -1); + return -ENOMEM; + } /* Set up the argument struct */ nfs_init_commit(data, head, NULL, cinfo); diff --git a/fs/nfsd/filecache.c b/fs/nfsd/filecache.c index e5aad1c10ea32dd050a2fedb504f0b33d1fb5c94..acd0898e3866d88d7d44edb7845052207707ea9a 100644 --- a/fs/nfsd/filecache.c +++ b/fs/nfsd/filecache.c @@ -641,7 +641,7 @@ nfsd_file_cache_init(void) if (!nfsd_filecache_wq) goto out; - nfsd_file_hashtbl = kcalloc(NFSD_FILE_HASH_SIZE, + nfsd_file_hashtbl = kvcalloc(NFSD_FILE_HASH_SIZE, sizeof(*nfsd_file_hashtbl), GFP_KERNEL); if (!nfsd_file_hashtbl) { pr_err("nfsd: unable to allocate nfsd_file_hashtbl\n"); @@ -708,7 +708,7 @@ nfsd_file_cache_init(void) nfsd_file_slab = NULL; kmem_cache_destroy(nfsd_file_mark_slab); nfsd_file_mark_slab = NULL; - kfree(nfsd_file_hashtbl); + kvfree(nfsd_file_hashtbl); nfsd_file_hashtbl = NULL; destroy_workqueue(nfsd_filecache_wq); nfsd_filecache_wq = NULL; @@ -854,7 +854,7 @@ nfsd_file_cache_shutdown(void) fsnotify_wait_marks_destroyed(); kmem_cache_destroy(nfsd_file_mark_slab); nfsd_file_mark_slab = NULL; - kfree(nfsd_file_hashtbl); + kvfree(nfsd_file_hashtbl); nfsd_file_hashtbl = NULL; destroy_workqueue(nfsd_filecache_wq); nfsd_filecache_wq = NULL; diff --git a/fs/nfsd/nfs3proc.c b/fs/nfsd/nfs3proc.c index a633044b0dc1f7b9022a627c0debca97df9e9e4d..981a4e4c9a3cf31cff0987c940cf6e6188cb7431 100644 --- a/fs/nfsd/nfs3proc.c +++ b/fs/nfsd/nfs3proc.c @@ -183,6 +183,11 @@ nfsd3_proc_write(struct svc_rqst *rqstp) (unsigned long long) argp->offset, argp->stable? " stable" : ""); + resp->status = nfserr_fbig; + if (argp->offset > (u64)OFFSET_MAX || + argp->offset + argp->len > (u64)OFFSET_MAX) + return rpc_success; + fh_copy(&resp->fh, &argp->fh); resp->committed = argp->stable; nvecs = svc_fill_write_vector(rqstp, rqstp->rq_arg.pages, diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c index 00440337efc1fd42f9523a6d14e7315e91ecbc32..7850d141c7621d7a6382d3b2fdca382f05b9456e 100644 --- a/fs/nfsd/nfs4proc.c +++ b/fs/nfsd/nfs4proc.c @@ -1008,8 +1008,9 @@ nfsd4_write(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, unsigned long cnt; int nvecs; - if (write->wr_offset >= OFFSET_MAX) - return nfserr_inval; + if (write->wr_offset > (u64)OFFSET_MAX || + write->wr_offset + write->wr_buflen > (u64)OFFSET_MAX) + return nfserr_fbig; cnt = write->wr_buflen; trace_nfsd_write_start(rqstp, &cstate->current_fh, diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 210147960c52ecb59406fb2726e38b0667e1539f..84dd68091f42262ba2eafa95e01296b84666a3c0 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -4047,8 +4047,10 @@ nfsd4_setclientid_confirm(struct svc_rqst *rqstp, status = nfserr_clid_inuse; if (client_has_state(old) && !same_creds(&unconf->cl_cred, - &old->cl_cred)) + &old->cl_cred)) { + old = NULL; goto out; + } status = mark_client_expired_locked(old); if (status) { old = NULL; @@ -4605,6 +4607,14 @@ nfsd_break_deleg_cb(struct file_lock *fl) return ret; } +/** + * nfsd_breaker_owns_lease - Check if lease conflict was resolved + * @fl: Lock state to check + * + * Return values: + * %true: Lease conflict was resolved + * %false: Lease conflict was not resolved. + */ static bool nfsd_breaker_owns_lease(struct file_lock *fl) { struct nfs4_delegation *dl = fl->fl_owner; @@ -4612,11 +4622,11 @@ static bool nfsd_breaker_owns_lease(struct file_lock *fl) struct nfs4_client *clp; if (!i_am_nfsd()) - return NULL; + return false; rqst = kthread_data(current); /* Note rq_prog == NFS_ACL_PROGRAM is also possible: */ if (rqst->rq_prog != NFS_PROGRAM || rqst->rq_vers < 4) - return NULL; + return false; clp = *(rqst->rq_lease_breaker); return dl->dl_stid.sc_client == clp; } diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c index a8f954bbde4f5a466b97e67d4a75fa3b7e364dc1..5b09b82a4e5934fc8784fd1ddbc218c0b5554edf 100644 --- a/fs/nfsd/nfsctl.c +++ b/fs/nfsd/nfsctl.c @@ -1247,7 +1247,8 @@ static void nfsdfs_remove_file(struct inode *dir, struct dentry *dentry) clear_ncl(d_inode(dentry)); dget(dentry); ret = simple_unlink(dir, dentry); - d_delete(dentry); + d_drop(dentry); + fsnotify_unlink(dir, dentry); dput(dentry); WARN_ON_ONCE(ret); } @@ -1336,8 +1337,8 @@ void nfsd_client_rmdir(struct dentry *dentry) dget(dentry); ret = simple_rmdir(dir, dentry); WARN_ON_ONCE(ret); + d_drop(dentry); fsnotify_rmdir(dir, dentry); - d_delete(dentry); dput(dentry); inode_unlock(dir); } @@ -1539,20 +1540,20 @@ static int __init init_nfsd(void) retval = create_proc_exports_entry(); if (retval) goto out_free_lockd; - retval = register_filesystem(&nfsd_fs_type); - if (retval) - goto out_free_exports; retval = register_pernet_subsys(&nfsd_net_ops); if (retval < 0) - goto out_free_filesystem; + goto out_free_exports; retval = register_cld_notifier(); + if (retval) + goto out_free_subsys; + retval = register_filesystem(&nfsd_fs_type); if (retval) goto out_free_all; return 0; out_free_all: + unregister_cld_notifier(); +out_free_subsys: unregister_pernet_subsys(&nfsd_net_ops); -out_free_filesystem: - unregister_filesystem(&nfsd_fs_type); out_free_exports: remove_proc_entry("fs/nfs/exports", NULL); remove_proc_entry("fs/nfs", NULL); @@ -1569,6 +1570,7 @@ static int __init init_nfsd(void) static void __exit exit_nfsd(void) { + unregister_filesystem(&nfsd_fs_type); unregister_cld_notifier(); unregister_pernet_subsys(&nfsd_net_ops); nfsd_drc_slab_free(); @@ -1578,7 +1580,6 @@ static void __exit exit_nfsd(void) nfsd_lockd_shutdown(); nfsd4_free_slabs(); nfsd4_exit_pnfs(); - unregister_filesystem(&nfsd_fs_type); } MODULE_AUTHOR("Olaf Kirch "); diff --git a/fs/nfsd/nfsproc.c b/fs/nfsd/nfsproc.c index 9c9de2b66e64169b93c21a028d3a78c113dabe6f..bbd01e8397f6e53905f4a8ba1515986a440044e0 100644 --- a/fs/nfsd/nfsproc.c +++ b/fs/nfsd/nfsproc.c @@ -223,7 +223,7 @@ nfsd_proc_write(struct svc_rqst *rqstp) unsigned long cnt = argp->len; unsigned int nvecs; - dprintk("nfsd: WRITE %s %d bytes at %d\n", + dprintk("nfsd: WRITE %s %u bytes at %d\n", SVCFH_fmt(&argp->fh), argp->len, argp->offset); diff --git a/fs/nfsd/trace.h b/fs/nfsd/trace.h index c8ca73d69ad04f51bdedb325ceb4f43b44757fdb..a952f4a9b2a68012d4ff1e04b7190b946c8a2ce3 100644 --- a/fs/nfsd/trace.h +++ b/fs/nfsd/trace.h @@ -175,14 +175,14 @@ TRACE_EVENT(nfsd_export_update, DECLARE_EVENT_CLASS(nfsd_io_class, TP_PROTO(struct svc_rqst *rqstp, struct svc_fh *fhp, - loff_t offset, - unsigned long len), + u64 offset, + u32 len), TP_ARGS(rqstp, fhp, offset, len), TP_STRUCT__entry( __field(u32, xid) __field(u32, fh_hash) - __field(loff_t, offset) - __field(unsigned long, len) + __field(u64, offset) + __field(u32, len) ), TP_fast_assign( __entry->xid = be32_to_cpu(rqstp->rq_xid); @@ -190,7 +190,7 @@ DECLARE_EVENT_CLASS(nfsd_io_class, __entry->offset = offset; __entry->len = len; ), - TP_printk("xid=0x%08x fh_hash=0x%08x offset=%lld len=%lu", + TP_printk("xid=0x%08x fh_hash=0x%08x offset=%llu len=%u", __entry->xid, __entry->fh_hash, __entry->offset, __entry->len) ) @@ -199,8 +199,8 @@ DECLARE_EVENT_CLASS(nfsd_io_class, DEFINE_EVENT(nfsd_io_class, nfsd_##name, \ TP_PROTO(struct svc_rqst *rqstp, \ struct svc_fh *fhp, \ - loff_t offset, \ - unsigned long len), \ + u64 offset, \ + u32 len), \ TP_ARGS(rqstp, fhp, offset, len)) DEFINE_NFSD_IO_EVENT(read_start); diff --git a/fs/nfsd/xdr.h b/fs/nfsd/xdr.h index 0ff336b0b25f9a3b498516dab19863631e96f088..b8cc6a4b2e0ec6f272cb9f17d8c66f951d3bad53 100644 --- a/fs/nfsd/xdr.h +++ b/fs/nfsd/xdr.h @@ -33,7 +33,7 @@ struct nfsd_readargs { struct nfsd_writeargs { svc_fh fh; __u32 offset; - int len; + __u32 len; struct kvec first; }; diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c index 086b6bacbad176819cc0950a2f2bc0499eed49ec..18e014fa06480eae000d20fe7c16226b24b116a0 100644 --- a/fs/notify/fanotify/fanotify_user.c +++ b/fs/notify/fanotify/fanotify_user.c @@ -366,9 +366,6 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group, if (fanotify_is_perm_event(event->mask)) FANOTIFY_PERM(event)->fd = fd; - if (f) - fd_install(fd, f); - /* Event info records order is: dir fid + name, child fid */ if (fanotify_event_dir_fh_len(event)) { info_type = info->name_len ? FAN_EVENT_INFO_TYPE_DFID_NAME : @@ -432,6 +429,9 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group, count -= ret; } + if (f) + fd_install(fd, f); + return metadata.event_len; out_close_fd: diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c index ea18e4a2a691df3a3679dbc56dfbf31596be21b0..cf222c9225d6d5816b7928d26a1c0eecda00f0c4 100644 --- a/fs/ntfs/inode.c +++ b/fs/ntfs/inode.c @@ -1881,6 +1881,10 @@ int ntfs_read_inode_mount(struct inode *vi) } /* Now allocate memory for the attribute list. */ ni->attr_list_size = (u32)ntfs_attr_size(a); + if (!ni->attr_list_size) { + ntfs_error(sb, "Attr_list_size is zero"); + goto put_err_out; + } ni->attr_list = ntfs_malloc_nofs(ni->attr_list_size); if (!ni->attr_list) { ntfs_error(sb, "Not enough memory to allocate buffer " diff --git a/fs/ntfs3/fslog.c b/fs/ntfs3/fslog.c index 06492f088d6020592bd4fdb8b1a6ecd22b823324..fc36c53b865a7f654faa14872f970f9a2ab2fce7 100644 --- a/fs/ntfs3/fslog.c +++ b/fs/ntfs3/fslog.c @@ -1185,8 +1185,6 @@ static int log_read_rst(struct ntfs_log *log, u32 l_size, bool first, if (!r_page) return -ENOMEM; - memset(info, 0, sizeof(struct restart_info)); - /* Determine which restart area we are looking for. */ if (first) { vbo = 0; @@ -3791,10 +3789,11 @@ int log_replay(struct ntfs_inode *ni, bool *initialized) if (!log) return -ENOMEM; + memset(&rst_info, 0, sizeof(struct restart_info)); + log->ni = ni; log->l_size = l_size; log->one_page_buf = kmalloc(page_size, GFP_NOFS); - if (!log->one_page_buf) { err = -ENOMEM; goto out; @@ -3842,6 +3841,7 @@ int log_replay(struct ntfs_inode *ni, bool *initialized) if (rst_info.vbo) goto check_restart_area; + memset(&rst_info2, 0, sizeof(struct restart_info)); err = log_read_rst(log, l_size, false, &rst_info2); /* Determine which restart area to use. */ diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c index e7d04adb6cb874be6fed6096d8eaaed009360300..4f48003e4327138b67d30466649dc44f869ac584 100644 --- a/fs/ocfs2/suballoc.c +++ b/fs/ocfs2/suballoc.c @@ -1253,26 +1253,23 @@ static int ocfs2_test_bg_bit_allocatable(struct buffer_head *bg_bh, { struct ocfs2_group_desc *bg = (struct ocfs2_group_desc *) bg_bh->b_data; struct journal_head *jh; - int ret = 1; + int ret; if (ocfs2_test_bit(nr, (unsigned long *)bg->bg_bitmap)) return 0; - if (!buffer_jbd(bg_bh)) + jh = jbd2_journal_grab_journal_head(bg_bh); + if (!jh) return 1; - jbd_lock_bh_journal_head(bg_bh); - if (buffer_jbd(bg_bh)) { - jh = bh2jh(bg_bh); - spin_lock(&jh->b_state_lock); - bg = (struct ocfs2_group_desc *) jh->b_committed_data; - if (bg) - ret = !ocfs2_test_bit(nr, (unsigned long *)bg->bg_bitmap); - else - ret = 1; - spin_unlock(&jh->b_state_lock); - } - jbd_unlock_bh_journal_head(bg_bh); + spin_lock(&jh->b_state_lock); + bg = (struct ocfs2_group_desc *) jh->b_committed_data; + if (bg) + ret = !ocfs2_test_bit(nr, (unsigned long *)bg->bg_bitmap); + else + ret = 1; + spin_unlock(&jh->b_state_lock); + jbd2_journal_put_journal_head(jh); return ret; } diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c index 435f82892432c6b33e7502259f3491c451301a64..477ad05a34ea2edf075fb8db225b60f608713b49 100644 --- a/fs/ocfs2/super.c +++ b/fs/ocfs2/super.c @@ -1110,17 +1110,6 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent) goto read_super_error; } - root = d_make_root(inode); - if (!root) { - status = -ENOMEM; - mlog_errno(status); - goto read_super_error; - } - - sb->s_root = root; - - ocfs2_complete_mount_recovery(osb); - osb->osb_dev_kset = kset_create_and_add(sb->s_id, NULL, &ocfs2_kset->kobj); if (!osb->osb_dev_kset) { @@ -1138,6 +1127,17 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent) goto read_super_error; } + root = d_make_root(inode); + if (!root) { + status = -ENOMEM; + mlog_errno(status); + goto read_super_error; + } + + sb->s_root = root; + + ocfs2_complete_mount_recovery(osb); + if (ocfs2_mount_local(osb)) snprintf(nodestr, sizeof(nodestr), "local"); else diff --git a/fs/orangefs/orangefs-bufmap.c b/fs/orangefs/orangefs-bufmap.c index 538e839590ef5c1d489aea7f6d94a7e9d1be821b..b501dc07f92224474f2cf1a27194e492429a019f 100644 --- a/fs/orangefs/orangefs-bufmap.c +++ b/fs/orangefs/orangefs-bufmap.c @@ -176,7 +176,7 @@ orangefs_bufmap_free(struct orangefs_bufmap *bufmap) { kfree(bufmap->page_array); kfree(bufmap->desc_array); - kfree(bufmap->buffer_index_array); + bitmap_free(bufmap->buffer_index_array); kfree(bufmap); } @@ -226,8 +226,7 @@ orangefs_bufmap_alloc(struct ORANGEFS_dev_map_desc *user_desc) bufmap->desc_size = user_desc->size; bufmap->desc_shift = ilog2(bufmap->desc_size); - bufmap->buffer_index_array = - kzalloc(DIV_ROUND_UP(bufmap->desc_count, BITS_PER_LONG), GFP_KERNEL); + bufmap->buffer_index_array = bitmap_zalloc(bufmap->desc_count, GFP_KERNEL); if (!bufmap->buffer_index_array) goto out_free_bufmap; @@ -250,7 +249,7 @@ orangefs_bufmap_alloc(struct ORANGEFS_dev_map_desc *user_desc) out_free_desc_array: kfree(bufmap->desc_array); out_free_index_array: - kfree(bufmap->buffer_index_array); + bitmap_free(bufmap->buffer_index_array); out_free_bufmap: kfree(bufmap); out: diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c index 4fadafd8bdc12927d155fc201efef88903685684..d7a410d8374391c871ec5f099497457275cb216b 100644 --- a/fs/overlayfs/inode.c +++ b/fs/overlayfs/inode.c @@ -478,7 +478,7 @@ static int ovl_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, u64 start, u64 len) { int err; - struct inode *realinode = ovl_inode_real(inode); + struct inode *realinode = ovl_inode_realdata(inode); const struct cred *old_cred; if (!realinode->i_op->fiemap) diff --git a/fs/pipe.c b/fs/pipe.c index f5ae4feb512ea62d6d9423bb3c5fda6812f1921d..2c2bacd315322c7ee788ee58b2bdc5c8c92995bd 100644 --- a/fs/pipe.c +++ b/fs/pipe.c @@ -252,7 +252,8 @@ pipe_read(struct kiocb *iocb, struct iov_iter *to) */ was_full = pipe_full(pipe->head, pipe->tail, pipe->max_usage); for (;;) { - unsigned int head = pipe->head; + /* Read ->head with a barrier vs post_one_notification() */ + unsigned int head = smp_load_acquire(&pipe->head); unsigned int tail = pipe->tail; unsigned int mask = pipe->ring_size - 1; @@ -832,10 +833,8 @@ void free_pipe_info(struct pipe_inode_info *pipe) int i; #ifdef CONFIG_WATCH_QUEUE - if (pipe->watch_queue) { + if (pipe->watch_queue) watch_queue_clear(pipe->watch_queue); - put_watch_queue(pipe->watch_queue); - } #endif (void) account_pipe_buffers(pipe->user, pipe->nr_accounted, 0); @@ -845,6 +844,10 @@ void free_pipe_info(struct pipe_inode_info *pipe) if (buf->ops) pipe_buf_release(pipe, buf); } +#ifdef CONFIG_WATCH_QUEUE + if (pipe->watch_queue) + put_watch_queue(pipe->watch_queue); +#endif if (pipe->tmp_page) __free_page(pipe->tmp_page); kfree(pipe->bufs); diff --git a/fs/proc/base.c b/fs/proc/base.c index 2ba1313aa444948a90e7935c49b2d9107e817532..b9052be86e8d56a7aaa83d847b30bd58ded52a7a 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -1988,7 +1988,7 @@ void proc_pid_evict_inode(struct proc_inode *ei) put_pid(pid); } -struct inode *proc_pid_make_inode(struct super_block * sb, +struct inode *proc_pid_make_inode(struct super_block *sb, struct task_struct *task, umode_t mode) { struct inode * inode; @@ -2017,11 +2017,6 @@ struct inode *proc_pid_make_inode(struct super_block * sb, /* Let the pid remember us for quick removal */ ei->pid = pid; - if (S_ISDIR(mode)) { - spin_lock(&pid->lock); - hlist_add_head_rcu(&ei->sibling_inodes, &pid->inodes); - spin_unlock(&pid->lock); - } task_dump_owner(task, 0, &inode->i_uid, &inode->i_gid); security_task_to_inode(task, inode); @@ -2034,6 +2029,27 @@ struct inode *proc_pid_make_inode(struct super_block * sb, return NULL; } +static struct inode *proc_pid_make_base_inode(struct super_block *sb, + struct task_struct *task, umode_t mode) +{ + struct inode *inode; + struct proc_inode *ei; + struct pid *pid; + + inode = proc_pid_make_inode(sb, task, mode); + if (!inode) + return NULL; + + /* Let proc_flush_pid find this directory inode */ + ei = PROC_I(inode); + pid = ei->pid; + spin_lock(&pid->lock); + hlist_add_head_rcu(&ei->sibling_inodes, &pid->inodes); + spin_unlock(&pid->lock); + + return inode; +} + int pid_getattr(const struct path *path, struct kstat *stat, u32 request_mask, unsigned int query_flags) { @@ -3459,7 +3475,8 @@ static struct dentry *proc_pid_instantiate(struct dentry * dentry, { struct inode *inode; - inode = proc_pid_make_inode(dentry->d_sb, task, S_IFDIR | S_IRUGO | S_IXUGO); + inode = proc_pid_make_base_inode(dentry->d_sb, task, + S_IFDIR | S_IRUGO | S_IXUGO); if (!inode) return ERR_PTR(-ENOENT); @@ -3765,7 +3782,8 @@ static struct dentry *proc_task_instantiate(struct dentry *dentry, struct task_struct *task, const void *ptr) { struct inode *inode; - inode = proc_pid_make_inode(dentry->d_sb, task, S_IFDIR | S_IRUGO | S_IXUGO); + inode = proc_pid_make_base_inode(dentry->d_sb, task, + S_IFDIR | S_IRUGO | S_IXUGO); if (!inode) return ERR_PTR(-ENOENT); diff --git a/fs/proc/bootconfig.c b/fs/proc/bootconfig.c index ad31ec4ad6270138af7528ad2acc1f8192dee5d7..d82dae133243b1471a603d5261774c97f013b752 100644 --- a/fs/proc/bootconfig.c +++ b/fs/proc/bootconfig.c @@ -32,6 +32,8 @@ static int __init copy_xbc_key_value_list(char *dst, size_t size) int ret = 0; key = kzalloc(XBC_KEYLEN_MAX, GFP_KERNEL); + if (!key) + return -ENOMEM; xbc_for_each_key_value(leaf, val) { ret = xbc_node_compose_key(leaf, key, XBC_KEYLEN_MAX); diff --git a/fs/proc/page.c b/fs/proc/page.c index d00c23d543fe92f95d900501b08cf63d6b588fdd..4c5bef99ec1069e2a5e627efa2cebb12f21a2755 100644 --- a/fs/proc/page.c +++ b/fs/proc/page.c @@ -220,7 +220,9 @@ u64 stable_page_flags(struct page *page) #ifdef CONFIG_64BIT u |= kpf_copy_bit(k, KPF_ARCH_2, PG_arch_2); #endif +#if defined(CONFIG_X86_64) || defined(CONFIG_ARM64) u |= kpf_copy_bit(k, KPF_POOL, PG_pool); +#endif return u; }; diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index dacdd0a466afda0ccc74ab6422381ad99819f52a..7b8a513d9f69e6b66f8f4203fb64cdf15c84034d 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -431,7 +431,8 @@ static void smaps_page_accumulate(struct mem_size_stats *mss, } static void smaps_account(struct mem_size_stats *mss, struct page *page, - bool compound, bool young, bool dirty, bool locked) + bool compound, bool young, bool dirty, bool locked, + bool migration) { int i, nr = compound ? compound_nr(page) : 1; unsigned long size = nr * PAGE_SIZE; @@ -458,8 +459,15 @@ static void smaps_account(struct mem_size_stats *mss, struct page *page, * page_count(page) == 1 guarantees the page is mapped exactly once. * If any subpage of the compound page mapped with PTE it would elevate * page_count(). + * + * The page_mapcount() is called to get a snapshot of the mapcount. + * Without holding the page lock this snapshot can be slightly wrong as + * we cannot always read the mapcount atomically. It is not safe to + * call page_mapcount() even with PTL held if the page is not mapped, + * especially for migration entries. Treat regular migration entries + * as mapcount == 1. */ - if (page_count(page) == 1) { + if ((page_count(page) == 1) || migration) { smaps_page_accumulate(mss, page, size, size << PSS_SHIFT, dirty, locked, true); return; @@ -496,6 +504,7 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr, struct vm_area_struct *vma = walk->vma; bool locked = !!(vma->vm_flags & VM_LOCKED); struct page *page = NULL; + bool migration = false; if (pte_present(*pte)) { page = vm_normal_page(vma, addr, *pte); @@ -515,9 +524,10 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr, } else { mss->swap_pss += (u64)PAGE_SIZE << PSS_SHIFT; } - } else if (is_migration_entry(swpent)) + } else if (is_migration_entry(swpent)) { + migration = true; page = migration_entry_to_page(swpent); - else if (is_device_private_entry(swpent)) + } else if (is_device_private_entry(swpent)) page = device_private_entry_to_page(swpent); } else if (unlikely(IS_ENABLED(CONFIG_SHMEM) && mss->check_shmem_swap && pte_none(*pte))) { @@ -531,7 +541,8 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr, if (!page) return; - smaps_account(mss, page, false, pte_young(*pte), pte_dirty(*pte), locked); + smaps_account(mss, page, false, pte_young(*pte), pte_dirty(*pte), + locked, migration); } #ifdef CONFIG_TRANSPARENT_HUGEPAGE @@ -542,6 +553,7 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr, struct vm_area_struct *vma = walk->vma; bool locked = !!(vma->vm_flags & VM_LOCKED); struct page *page = NULL; + bool migration = false; if (pmd_present(*pmd)) { /* FOLL_DUMP will return -EFAULT on huge zero page */ @@ -549,8 +561,10 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr, } else if (unlikely(thp_migration_supported() && is_swap_pmd(*pmd))) { swp_entry_t entry = pmd_to_swp_entry(*pmd); - if (is_migration_entry(entry)) + if (is_migration_entry(entry)) { + migration = true; page = migration_entry_to_page(entry); + } } if (IS_ERR_OR_NULL(page)) return; @@ -562,7 +576,9 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr, /* pass */; else mss->file_thp += HPAGE_PMD_SIZE; - smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd), locked); + + smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd), + locked, migration); } #else static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr, @@ -1373,6 +1389,7 @@ static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm, { u64 frame = 0, flags = 0; struct page *page = NULL; + bool migration = false; if (pte_present(pte)) { if (pm->show_pfn) @@ -1390,8 +1407,10 @@ static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm, frame = swp_type(entry) | (swp_offset(entry) << MAX_SWAPFILES_SHIFT); flags |= PM_SWAP; - if (is_migration_entry(entry)) + if (is_migration_entry(entry)) { + migration = true; page = migration_entry_to_page(entry); + } if (is_device_private_entry(entry)) page = device_private_entry_to_page(entry); @@ -1399,7 +1418,7 @@ static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm, if (page && !PageAnon(page)) flags |= PM_FILE; - if (page && page_mapcount(page) == 1) + if (page && !migration && page_mapcount(page) == 1) flags |= PM_MMAP_EXCLUSIVE; if (vma->vm_flags & VM_SOFTDIRTY) flags |= PM_SOFT_DIRTY; @@ -1415,8 +1434,9 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end, spinlock_t *ptl; pte_t *pte, *orig_pte; int err = 0; - #ifdef CONFIG_TRANSPARENT_HUGEPAGE + bool migration = false; + ptl = pmd_trans_huge_lock(pmdp, vma); if (ptl) { u64 flags = 0, frame = 0; @@ -1451,11 +1471,12 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end, if (pmd_swp_soft_dirty(pmd)) flags |= PM_SOFT_DIRTY; VM_BUG_ON(!is_pmd_migration_entry(pmd)); + migration = is_migration_entry(entry); page = migration_entry_to_page(entry); } #endif - if (page && page_mapcount(page) == 1) + if (page && !migration && page_mapcount(page) == 1) flags |= PM_MMAP_EXCLUSIVE; for (; addr != end; addr += PAGE_SIZE) { diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c index b1ebf7b61732c396f731cb988f5e07addee065b8..ce03c3dbb5c308ea93d2a84491123e279fa31435 100644 --- a/fs/pstore/platform.c +++ b/fs/pstore/platform.c @@ -143,21 +143,22 @@ static void pstore_timer_kick(void) mod_timer(&pstore_timer, jiffies + msecs_to_jiffies(pstore_update_ms)); } -/* - * Should pstore_dump() wait for a concurrent pstore_dump()? If - * not, the current pstore_dump() will report a failure to dump - * and return. - */ -static bool pstore_cannot_wait(enum kmsg_dump_reason reason) +static bool pstore_cannot_block_path(enum kmsg_dump_reason reason) { - /* In NMI path, pstore shouldn't block regardless of reason. */ + /* + * In case of NMI path, pstore shouldn't be blocked + * regardless of reason. + */ if (in_nmi()) return true; switch (reason) { /* In panic case, other cpus are stopped by smp_send_stop(). */ case KMSG_DUMP_PANIC: - /* Emergency restart shouldn't be blocked. */ + /* + * Emergency restart shouldn't be blocked by spinning on + * pstore_info::buf_lock. + */ case KMSG_DUMP_EMERG: return true; default: @@ -388,21 +389,19 @@ static void pstore_dump(struct kmsg_dumper *dumper, unsigned long total = 0; const char *why; unsigned int part = 1; + unsigned long flags = 0; int ret; why = kmsg_dump_reason_str(reason); - if (down_trylock(&psinfo->buf_lock)) { - /* Failed to acquire lock: give up if we cannot wait. */ - if (pstore_cannot_wait(reason)) { - pr_err("dump skipped in %s path: may corrupt error record\n", - in_nmi() ? "NMI" : why); - return; - } - if (down_interruptible(&psinfo->buf_lock)) { - pr_err("could not grab semaphore?!\n"); + if (pstore_cannot_block_path(reason)) { + if (!spin_trylock_irqsave(&psinfo->buf_lock, flags)) { + pr_err("dump skipped in %s path because of concurrent dump\n", + in_nmi() ? "NMI" : why); return; } + } else { + spin_lock_irqsave(&psinfo->buf_lock, flags); } oopscount++; @@ -464,8 +463,7 @@ static void pstore_dump(struct kmsg_dumper *dumper, total += record.size; part++; } - - up(&psinfo->buf_lock); + spin_unlock_irqrestore(&psinfo->buf_lock, flags); } static struct kmsg_dumper pstore_dumper = { @@ -591,7 +589,7 @@ int pstore_register(struct pstore_info *psi) psi->write_user = pstore_write_user_compat; psinfo = psi; mutex_init(&psinfo->read_mutex); - sema_init(&psinfo->buf_lock, 1); + spin_lock_init(&psinfo->buf_lock); if (psi->flags & PSTORE_FLAGS_DMESG) allocate_buf_for_compression(); diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c index 4f13734637660056e299d4ed04ceb38fb04cb580..09fb8459bb5cea2ef07ae1f58b935c2fa06250f6 100644 --- a/fs/quota/dquot.c +++ b/fs/quota/dquot.c @@ -692,9 +692,14 @@ int dquot_quota_sync(struct super_block *sb, int type) /* This is not very clever (and fast) but currently I don't know about * any other simple way of getting quota data to disk and we must get * them there for userspace to be visible... */ - if (sb->s_op->sync_fs) - sb->s_op->sync_fs(sb, 1); - sync_blockdev(sb->s_bdev); + if (sb->s_op->sync_fs) { + ret = sb->s_op->sync_fs(sb, 1); + if (ret) + return ret; + } + ret = sync_blockdev(sb->s_bdev); + if (ret) + return ret; /* * Now when everything is written we can discard the pagecache so diff --git a/fs/read_write.c b/fs/read_write.c index 88f445da7515ba864b0e0b37931e34263b0ec91a..d175b5e8d3d3cc917102ab716deebd85b348fecd 100644 --- a/fs/read_write.c +++ b/fs/read_write.c @@ -24,6 +24,8 @@ #include #include +#define CREATE_TRACE_POINTS +#include const struct file_operations generic_ro_fops = { .llseek = generic_file_llseek, @@ -1679,3 +1681,39 @@ int generic_file_rw_checks(struct file *file_in, struct file *file_out) return 0; } + +#ifdef CONFIG_TRACEPOINTS +static void fs_file_read_ctx_init(struct fs_file_read_ctx *ctx, + struct file *filp, loff_t pos) +{ + memset(ctx, 0, sizeof(*ctx)); + ctx->name = file_dentry(filp)->d_name.name; + ctx->f_ctl_mode = filp->f_ctl_mode; + ctx->key = (unsigned long)filp; + ctx->i_size = file_inode(filp)->i_size; + ctx->prev_index = filp->f_ra.prev_pos >> PAGE_SHIFT; + ctx->index = pos >> PAGE_SHIFT; +} + +#define FS_FILE_READ_VERSION 1 +#define FS_FILE_READ_MODE_MASK (FMODE_CTL_RANDOM | FMODE_CTL_WILLNEED) + +void fs_file_read_update_args_by_trace(struct kiocb *iocb) +{ + struct file *filp = iocb->ki_filp; + struct fs_file_read_ctx ctx; + + fs_file_read_ctx_init(&ctx, filp, iocb->ki_pos); + trace_fs_file_read(&ctx, FS_FILE_READ_VERSION); + + if (!ctx.set_f_ctl_mode && !ctx.clr_f_ctl_mode) + return; + + filp->f_ctl_mode |= ctx.set_f_ctl_mode & FS_FILE_READ_MODE_MASK; + filp->f_ctl_mode &= ~(ctx.clr_f_ctl_mode & FS_FILE_READ_MODE_MASK); +} +EXPORT_SYMBOL_GPL(fs_file_read_update_args_by_trace); +#endif + +EXPORT_TRACEPOINT_SYMBOL_GPL(fs_file_read); +EXPORT_TRACEPOINT_SYMBOL_GPL(fs_file_release); diff --git a/fs/select.c b/fs/select.c index 945896d0ac9e7624db36d48169ebc757d4f18adb..5edffee1162c2423821e2282153a5ff1f3c985eb 100644 --- a/fs/select.c +++ b/fs/select.c @@ -458,9 +458,11 @@ static int max_select_fd(unsigned long n, fd_set_bits *fds) return max; } -#define POLLIN_SET (EPOLLRDNORM | EPOLLRDBAND | EPOLLIN | EPOLLHUP | EPOLLERR) -#define POLLOUT_SET (EPOLLWRBAND | EPOLLWRNORM | EPOLLOUT | EPOLLERR) -#define POLLEX_SET (EPOLLPRI) +#define POLLIN_SET (EPOLLRDNORM | EPOLLRDBAND | EPOLLIN | EPOLLHUP | EPOLLERR |\ + EPOLLNVAL) +#define POLLOUT_SET (EPOLLWRBAND | EPOLLWRNORM | EPOLLOUT | EPOLLERR |\ + EPOLLNVAL) +#define POLLEX_SET (EPOLLPRI | EPOLLNVAL) static inline void wait_key_set(poll_table *wait, unsigned long in, unsigned long out, unsigned long bit, @@ -527,6 +529,7 @@ static int do_select(int n, fd_set_bits *fds, struct timespec64 *end_time) break; if (!(bit & all_bits)) continue; + mask = EPOLLNVAL; f = fdget(i); if (f.file) { wait_key_set(wait, in, out, bit, @@ -534,34 +537,34 @@ static int do_select(int n, fd_set_bits *fds, struct timespec64 *end_time) mask = vfs_poll(f.file, wait); fdput(f); - if ((mask & POLLIN_SET) && (in & bit)) { - res_in |= bit; - retval++; - wait->_qproc = NULL; - } - if ((mask & POLLOUT_SET) && (out & bit)) { - res_out |= bit; - retval++; - wait->_qproc = NULL; - } - if ((mask & POLLEX_SET) && (ex & bit)) { - res_ex |= bit; - retval++; - wait->_qproc = NULL; - } - /* got something, stop busy polling */ - if (retval) { - can_busy_loop = false; - busy_flag = 0; - - /* - * only remember a returned - * POLL_BUSY_LOOP if we asked for it - */ - } else if (busy_flag & mask) - can_busy_loop = true; - } + if ((mask & POLLIN_SET) && (in & bit)) { + res_in |= bit; + retval++; + wait->_qproc = NULL; + } + if ((mask & POLLOUT_SET) && (out & bit)) { + res_out |= bit; + retval++; + wait->_qproc = NULL; + } + if ((mask & POLLEX_SET) && (ex & bit)) { + res_ex |= bit; + retval++; + wait->_qproc = NULL; + } + /* got something, stop busy polling */ + if (retval) { + can_busy_loop = false; + busy_flag = 0; + + /* + * only remember a returned + * POLL_BUSY_LOOP if we asked for it + */ + } else if (busy_flag & mask) + can_busy_loop = true; + } if (res_in) *rinp = res_in; diff --git a/fs/super.c b/fs/super.c index 494bfdc6f778b3f5803adb2fc05d0c0b62295a7c..c91368fb4b96c910b686e22b2b5fbfa60f45d713 100644 --- a/fs/super.c +++ b/fs/super.c @@ -1472,8 +1472,8 @@ struct dentry *mount_nodev(struct file_system_type *fs_type, } EXPORT_SYMBOL(mount_nodev); -static int reconfigure_single(struct super_block *s, - int flags, void *data) +int reconfigure_single(struct super_block *s, + int flags, void *data) { struct fs_context *fc; int ret; @@ -1667,11 +1667,9 @@ static void lockdep_sb_freeze_acquire(struct super_block *sb) percpu_rwsem_acquire(sb->s_writers.rw_sem + level, 0, _THIS_IP_); } -static void sb_freeze_unlock(struct super_block *sb) +static void sb_freeze_unlock(struct super_block *sb, int level) { - int level; - - for (level = SB_FREEZE_LEVELS - 1; level >= 0; level--) + for (level--; level >= 0; level--) percpu_up_write(sb->s_writers.rw_sem + level); } @@ -1744,7 +1742,14 @@ int freeze_super(struct super_block *sb) sb_wait_write(sb, SB_FREEZE_PAGEFAULT); /* All writers are done so after syncing there won't be dirty data */ - sync_filesystem(sb); + ret = sync_filesystem(sb); + if (ret) { + sb->s_writers.frozen = SB_UNFROZEN; + sb_freeze_unlock(sb, SB_FREEZE_PAGEFAULT); + wake_up(&sb->s_writers.wait_unfrozen); + deactivate_locked_super(sb); + return ret; + } /* Now wait for internal filesystem counter */ sb->s_writers.frozen = SB_FREEZE_FS; @@ -1756,7 +1761,7 @@ int freeze_super(struct super_block *sb) printk(KERN_ERR "VFS:Filesystem freeze failed\n"); sb->s_writers.frozen = SB_UNFROZEN; - sb_freeze_unlock(sb); + sb_freeze_unlock(sb, SB_FREEZE_FS); wake_up(&sb->s_writers.wait_unfrozen); deactivate_locked_super(sb); return ret; @@ -1812,7 +1817,7 @@ static int thaw_super_locked(struct super_block *sb) } sb->s_writers.frozen = SB_UNFROZEN; - sb_freeze_unlock(sb); + sb_freeze_unlock(sb, SB_FREEZE_FS); out: wake_up(&sb->s_writers.wait_unfrozen); deactivate_locked_super(sb); diff --git a/fs/tracefs/inode.c b/fs/tracefs/inode.c index ade05887070dd64a04403c27ade76d8b0198a19d..8b7315c22f0d1208e3b897d61c912561642c8b00 100644 --- a/fs/tracefs/inode.c +++ b/fs/tracefs/inode.c @@ -262,7 +262,6 @@ static int tracefs_parse_options(char *data, struct tracefs_mount_opts *opts) if (!gid_valid(gid)) return -EINVAL; opts->gid = gid; - set_gid(tracefs_mount->mnt_root, gid); break; case Opt_mode: if (match_octal(&args[0], &option)) @@ -289,7 +288,9 @@ static int tracefs_apply_options(struct super_block *sb) inode->i_mode |= opts->mode; inode->i_uid = opts->uid; - inode->i_gid = opts->gid; + + /* Set all the group ids to the mount option */ + set_gid(sb->s_root, opts->gid); return 0; } diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index 98cbd9cac2469d08a5ab2e139f8369885b1b8652..2dd79fd123ff7b8a654c47608340bc1ea390cd08 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c @@ -1853,7 +1853,6 @@ static int ubifs_remount_rw(struct ubifs_info *c) kthread_stop(c->bgt); c->bgt = NULL; } - free_wbufs(c); kfree(c->write_reserve_buf); c->write_reserve_buf = NULL; vfree(c->ileb_buf); diff --git a/fs/udf/ialloc.c b/fs/udf/ialloc.c index 84ed23edebfd379fa5906860758e3fe44a09da18..87a77bf70ee19fcad4f4077b83d62c5741fb8f75 100644 --- a/fs/udf/ialloc.c +++ b/fs/udf/ialloc.c @@ -77,6 +77,7 @@ struct inode *udf_new_inode(struct inode *dir, umode_t mode) GFP_KERNEL); } if (!iinfo->i_data) { + make_bad_inode(inode); iput(inode); return ERR_PTR(-ENOMEM); } @@ -86,6 +87,7 @@ struct inode *udf_new_inode(struct inode *dir, umode_t mode) dinfo->i_location.partitionReferenceNum, start, &err); if (err) { + make_bad_inode(inode); iput(inode); return ERR_PTR(err); } diff --git a/fs/xfs/libxfs/xfs_dir2.h b/fs/xfs/libxfs/xfs_dir2.h index e55378640b056122348c2594ae4039c4a5ddcaca..d03e6098ded9f9bf7b0e6548cc4b33d4dbdbe8f7 100644 --- a/fs/xfs/libxfs/xfs_dir2.h +++ b/fs/xfs/libxfs/xfs_dir2.h @@ -47,8 +47,6 @@ extern int xfs_dir_lookup(struct xfs_trans *tp, struct xfs_inode *dp, extern int xfs_dir_removename(struct xfs_trans *tp, struct xfs_inode *dp, struct xfs_name *name, xfs_ino_t ino, xfs_extlen_t tot); -extern bool xfs_dir2_sf_replace_needblock(struct xfs_inode *dp, - xfs_ino_t inum); extern int xfs_dir_replace(struct xfs_trans *tp, struct xfs_inode *dp, struct xfs_name *name, xfs_ino_t inum, xfs_extlen_t tot); diff --git a/fs/xfs/libxfs/xfs_dir2_sf.c b/fs/xfs/libxfs/xfs_dir2_sf.c index 2463b5d73447244fa559b393ab96017610135c89..8c4f76bba88be1618e0c1267a5e2d2a69de61f23 100644 --- a/fs/xfs/libxfs/xfs_dir2_sf.c +++ b/fs/xfs/libxfs/xfs_dir2_sf.c @@ -1018,7 +1018,7 @@ xfs_dir2_sf_removename( /* * Check whether the sf dir replace operation need more blocks. */ -bool +static bool xfs_dir2_sf_replace_needblock( struct xfs_inode *dp, xfs_ino_t inum) diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c index 5d6535370f87f5be4d74ced4218eae502e5aa20c..a3d5ecccfc2ccf72f20e50eea674f59e60b8e6c3 100644 --- a/fs/xfs/xfs_buf_item.c +++ b/fs/xfs/xfs_buf_item.c @@ -393,17 +393,8 @@ xfs_buf_item_pin( } /* - * This is called to unpin the buffer associated with the buf log - * item which was previously pinned with a call to xfs_buf_item_pin(). - * - * Also drop the reference to the buf item for the current transaction. - * If the XFS_BLI_STALE flag is set and we are the last reference, - * then free up the buf log item and unlock the buffer. - * - * If the remove flag is set we are called from uncommit in the - * forced-shutdown path. If that is true and the reference count on - * the log item is going to drop to zero we need to free the item's - * descriptor in the transaction. + * This is called to unpin the buffer associated with the buf log item which + * was previously pinned with a call to xfs_buf_item_pin(). */ STATIC void xfs_buf_item_unpin( @@ -420,38 +411,35 @@ xfs_buf_item_unpin( trace_xfs_buf_item_unpin(bip); + /* + * Drop the bli ref associated with the pin and grab the hold required + * for the I/O simulation failure in the abort case. We have to do this + * before the pin count drops because the AIL doesn't acquire a bli + * reference. Therefore if the refcount drops to zero, the bli could + * still be AIL resident and the buffer submitted for I/O (and freed on + * completion) at any point before we return. This can be removed once + * the AIL properly holds a reference on the bli. + */ freed = atomic_dec_and_test(&bip->bli_refcount); - + if (freed && !stale && remove) + xfs_buf_hold(bp); if (atomic_dec_and_test(&bp->b_pin_count)) wake_up_all(&bp->b_waiters); - if (freed && stale) { + /* nothing to do but drop the pin count if the bli is active */ + if (!freed) + return; + + if (stale) { ASSERT(bip->bli_flags & XFS_BLI_STALE); ASSERT(xfs_buf_islocked(bp)); ASSERT(bp->b_flags & XBF_STALE); ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL); + ASSERT(list_empty(&lip->li_trans)); + ASSERT(!bp->b_transp); trace_xfs_buf_item_unpin_stale(bip); - if (remove) { - /* - * If we are in a transaction context, we have to - * remove the log item from the transaction as we are - * about to release our reference to the buffer. If we - * don't, the unlock that occurs later in - * xfs_trans_uncommit() will try to reference the - * buffer which we no longer have a hold on. - */ - if (!list_empty(&lip->li_trans)) - xfs_trans_del_item(lip); - - /* - * Since the transaction no longer refers to the buffer, - * the buffer should no longer refer to the transaction. - */ - bp->b_transp = NULL; - } - /* * If we get called here because of an IO error, we may or may * not have the item on the AIL. xfs_trans_ail_delete() will @@ -468,13 +456,13 @@ xfs_buf_item_unpin( ASSERT(bp->b_log_item == NULL); } xfs_buf_relse(bp); - } else if (freed && remove) { + } else if (remove) { /* * The buffer must be locked and held by the caller to simulate - * an async I/O failure. + * an async I/O failure. We acquired the hold for this case + * before the buffer was unpinned. */ xfs_buf_lock(bp); - xfs_buf_hold(bp); bp->b_flags |= XBF_ASYNC; xfs_buf_ioend_fail(bp); } diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c index 2fb8cf556f8d74b335b95f6793b4fddec39de5df..80adec66744b119c493919bc3ed7b13c77dcbc69 100644 --- a/fs/xfs/xfs_file.c +++ b/fs/xfs/xfs_file.c @@ -29,6 +29,7 @@ #include #include #include +#include static const struct vm_operations_struct xfs_file_vm_ops; @@ -289,6 +290,7 @@ xfs_file_buffered_aio_read( ssize_t ret; trace_xfs_file_buffered_read(ip, iov_iter_count(to), iocb->ki_pos); + fs_file_read_do_trace(iocb); if (iocb->ki_flags & IOCB_NOWAIT) { if (!xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED)) @@ -1197,6 +1199,7 @@ xfs_file_release( struct inode *inode, struct file *filp) { + trace_fs_file_release(inode, filp); return xfs_release(XFS_I(inode)); } diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c index 4a3a4f38531aae7e2273e5d025bcf22069e08683..2741dbd22704568900974cb0d116fc2ded585983 100644 --- a/fs/xfs/xfs_fsops.c +++ b/fs/xfs/xfs_fsops.c @@ -459,6 +459,11 @@ xfs_fs_goingdown( * consistent. We don't do an unmount here; just shutdown the shop, make sure * that absolutely nothing persistent happens to this filesystem after this * point. + * + * The shutdown state change is atomic, resulting in the first and only the + * first shutdown call processing the shutdown. This means we only shutdown the + * log once as it requires, and we don't spam the logs when multiple concurrent + * shutdowns race to set the shutdown flags. */ void xfs_do_force_shutdown( @@ -467,48 +472,40 @@ xfs_do_force_shutdown( char *fname, int lnnum) { - bool logerror = flags & SHUTDOWN_LOG_IO_ERROR; + int tag; + const char *why; - /* - * No need to duplicate efforts. - */ - if (XFS_FORCED_SHUTDOWN(mp) && !logerror) - return; - - /* - * This flags XFS_MOUNT_FS_SHUTDOWN, makes sure that we don't - * queue up anybody new on the log reservations, and wakes up - * everybody who's sleeping on log reservations to tell them - * the bad news. - */ - if (xfs_log_force_umount(mp, logerror)) - return; - - if (flags & SHUTDOWN_FORCE_UMOUNT) { - xfs_alert(mp, -"User initiated shutdown received. Shutting down filesystem"); + spin_lock(&mp->m_sb_lock); + if (XFS_FORCED_SHUTDOWN(mp)) { + spin_unlock(&mp->m_sb_lock); return; } + mp->m_flags |= XFS_MOUNT_FS_SHUTDOWN; + if (mp->m_sb_bp) + mp->m_sb_bp->b_flags |= XBF_DONE; + spin_unlock(&mp->m_sb_lock); + + if (flags & SHUTDOWN_FORCE_UMOUNT) + xfs_alert(mp, "User initiated shutdown received."); - xfs_notice(mp, -"%s(0x%x) called from line %d of file %s. Return address = "PTR_FMT, - __func__, flags, lnnum, fname, __return_address); - - if (flags & SHUTDOWN_CORRUPT_INCORE) { - xfs_alert_tag(mp, XFS_PTAG_SHUTDOWN_CORRUPT, -"Corruption of in-memory data detected. Shutting down filesystem"); - if (XFS_ERRLEVEL_HIGH <= xfs_error_level) - xfs_stack_trace(); - } else if (logerror) { - xfs_alert_tag(mp, XFS_PTAG_SHUTDOWN_LOGERROR, - "Log I/O Error Detected. Shutting down filesystem"); + if (xlog_force_shutdown(mp->m_log, flags)) { + tag = XFS_PTAG_SHUTDOWN_LOGERROR; + why = "Log I/O Error"; + } else if (flags & SHUTDOWN_CORRUPT_INCORE) { + tag = XFS_PTAG_SHUTDOWN_CORRUPT; + why = "Corruption of in-memory data"; } else { - xfs_alert_tag(mp, XFS_PTAG_SHUTDOWN_IOERROR, - "I/O Error Detected. Shutting down filesystem"); + tag = XFS_PTAG_SHUTDOWN_IOERROR; + why = "Metadata I/O Error"; } + xfs_alert_tag(mp, tag, +"%s (0x%x) detected at %pS (%s:%d). Shutting down filesystem.", + why, flags, __return_address, fname, lnnum); xfs_alert(mp, "Please unmount the filesystem and rectify the problem(s)"); + if (xfs_error_level >= XFS_ERRLEVEL_HIGH) + xfs_stack_trace(); } /* diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index af54224ebbe728156acee391223a0206365776b8..b72dd3f67ca7bafb0105922a41ee723e1479df95 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -3212,7 +3212,7 @@ xfs_rename( struct xfs_trans *tp; struct xfs_inode *wip = NULL; /* whiteout inode */ struct xfs_inode *inodes[__XFS_SORT_INODES]; - struct xfs_buf *agibp; + int i; int num_inodes = __XFS_SORT_INODES; bool new_parent = (src_dp != target_dp); bool src_is_directory = S_ISDIR(VFS_I(src_ip)->i_mode); @@ -3325,6 +3325,30 @@ xfs_rename( } } + /* + * Lock the AGI buffers we need to handle bumping the nlink of the + * whiteout inode off the unlinked list and to handle dropping the + * nlink of the target inode. Per locking order rules, do this in + * increasing AG order and before directory block allocation tries to + * grab AGFs because we grab AGIs before AGFs. + * + * The (vfs) caller must ensure that if src is a directory then + * target_ip is either null or an empty directory. + */ + for (i = 0; i < num_inodes && inodes[i] != NULL; i++) { + if (inodes[i] == wip || + (inodes[i] == target_ip && + (VFS_I(target_ip)->i_nlink == 1 || src_is_directory))) { + struct xfs_buf *bp; + xfs_agnumber_t agno; + + agno = XFS_INO_TO_AGNO(mp, inodes[i]->i_ino); + error = xfs_read_agi(mp, tp, agno, &bp); + if (error) + goto out_trans_cancel; + } + } + /* * Directory entry creation below may acquire the AGF. Remove * the whiteout from the unlinked list first to preserve correct @@ -3377,22 +3401,6 @@ xfs_rename( * In case there is already an entry with the same * name at the destination directory, remove it first. */ - - /* - * Check whether the replace operation will need to allocate - * blocks. This happens when the shortform directory lacks - * space and we have to convert it to a block format directory. - * When more blocks are necessary, we must lock the AGI first - * to preserve locking order (AGI -> AGF). - */ - if (xfs_dir2_sf_replace_needblock(target_dp, src_ip->i_ino)) { - error = xfs_read_agi(mp, tp, - XFS_INO_TO_AGNO(mp, target_ip->i_ino), - &agibp); - if (error) - goto out_trans_cancel; - } - error = xfs_dir_replace(tp, target_dp, target_name, src_ip->i_ino, spaceres); if (error) diff --git a/fs/xfs/xfs_iwalk.c b/fs/xfs/xfs_iwalk.c index c06702c7e64b79612a54888bd57b73912f6efb72..e26aeea3f0c0ca1d948bfb978e74799d9be615d2 100644 --- a/fs/xfs/xfs_iwalk.c +++ b/fs/xfs/xfs_iwalk.c @@ -365,7 +365,7 @@ xfs_iwalk_run_callbacks( /* Delete cursor but remember the last record we cached... */ xfs_iwalk_del_inobt(iwag->tp, curpp, agi_bpp, 0); irec = &iwag->recs[iwag->nr_recs - 1]; - ASSERT(next_agino == irec->ir_startino + XFS_INODES_PER_CHUNK); + ASSERT(next_agino >= irec->ir_startino + XFS_INODES_PER_CHUNK); if (iwag->drop_trans) { xfs_trans_cancel(iwag->tp); diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index b9f2ad4e8345fcaf6f9070abefc352b45e02ed9f..607a101585fc4fbc2f9631646c869d27469900d0 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c @@ -41,6 +41,8 @@ xlog_dealloc_log( /* local state machine functions */ STATIC void xlog_state_done_syncing( struct xlog_in_core *iclog); +STATIC void xlog_state_do_callback( + struct xlog *log); STATIC int xlog_state_get_iclog_space( struct xlog *log, @@ -239,7 +241,7 @@ xlog_grant_head_wait( list_add_tail(&tic->t_queue, &head->waiters); do { - if (XLOG_FORCED_SHUTDOWN(log)) + if (xlog_is_shutdown(log)) goto shutdown; xlog_grant_push_ail(log, need_bytes); @@ -253,7 +255,7 @@ xlog_grant_head_wait( trace_xfs_log_grant_wake(log, tic); spin_lock(&head->lock); - if (XLOG_FORCED_SHUTDOWN(log)) + if (xlog_is_shutdown(log)) goto shutdown; } while (xlog_space_left(log, &head->grant) < need_bytes); @@ -291,7 +293,7 @@ xlog_grant_head_check( int free_bytes; int error = 0; - ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY)); + ASSERT(!xlog_in_recovery(log)); /* * If there are other waiters on the queue then give them a chance at @@ -354,7 +356,7 @@ xfs_log_regrant( int need_bytes; int error = 0; - if (XLOG_FORCED_SHUTDOWN(log)) + if (xlog_is_shutdown(log)) return -EIO; XFS_STATS_INC(mp, xs_try_logspace); @@ -422,7 +424,7 @@ xfs_log_reserve( ASSERT(client == XFS_TRANSACTION || client == XFS_LOG); - if (XLOG_FORCED_SHUTDOWN(log)) + if (xlog_is_shutdown(log)) return -EIO; XFS_STATS_INC(mp, xs_try_logspace); @@ -458,6 +460,42 @@ xfs_log_reserve( return error; } +/* + * Run all the pending iclog callbacks and wake log force waiters and iclog + * space waiters so they can process the newly set shutdown state. We really + * don't care what order we process callbacks here because the log is shut down + * and so state cannot change on disk anymore. + * + * We avoid processing actively referenced iclogs so that we don't run callbacks + * while the iclog owner might still be preparing the iclog for IO submssion. + * These will be caught by xlog_state_iclog_release() and call this function + * again to process any callbacks that may have been added to that iclog. + */ +static void +xlog_state_shutdown_callbacks( + struct xlog *log) +{ + struct xlog_in_core *iclog; + LIST_HEAD(cb_list); + + spin_lock(&log->l_icloglock); + iclog = log->l_iclog; + do { + if (atomic_read(&iclog->ic_refcnt)) { + /* Reference holder will re-run iclog callbacks. */ + continue; + } + list_splice_init(&iclog->ic_callbacks, &cb_list); + wake_up_all(&iclog->ic_write_wait); + wake_up_all(&iclog->ic_force_wait); + } while ((iclog = iclog->ic_next) != log->l_iclog); + + wake_up_all(&log->l_flush_wait); + spin_unlock(&log->l_icloglock); + + xlog_cil_process_committed(&cb_list); +} + /* * Flush iclog to disk if this is the last reference to the given iclog and the * it is in the WANT_SYNC state. If the caller passes in a non-zero @@ -473,12 +511,11 @@ xlog_state_release_iclog( xfs_lsn_t old_tail_lsn) { xfs_lsn_t tail_lsn; + bool last_ref; + lockdep_assert_held(&log->l_icloglock); trace_xlog_iclog_release(iclog, _RET_IP_); - if (iclog->ic_state == XLOG_STATE_IOERROR) - return -EIO; - /* * Grabbing the current log tail needs to be atomic w.r.t. the writing * of the tail LSN into the iclog so we guarantee that the log tail does @@ -492,7 +529,23 @@ xlog_state_release_iclog( iclog->ic_flags |= XLOG_ICL_NEED_FLUSH; } - if (!atomic_dec_and_test(&iclog->ic_refcnt)) + last_ref = atomic_dec_and_test(&iclog->ic_refcnt); + + if (xlog_is_shutdown(log)) { + /* + * If there are no more references to this iclog, process the + * pending iclog callbacks that were waiting on the release of + * this iclog. + */ + if (last_ref) { + spin_unlock(&log->l_icloglock); + xlog_state_shutdown_callbacks(log); + spin_lock(&log->l_icloglock); + } + return -EIO; + } + + if (!last_ref) return 0; if (iclog->ic_state != XLOG_STATE_WANT_SYNC) { @@ -528,6 +581,7 @@ xfs_log_mount( xfs_daddr_t blk_offset, int num_bblks) { + struct xlog *log; bool fatal = xfs_sb_version_hascrc(&mp->m_sb); int error = 0; int min_logfsbs; @@ -542,11 +596,12 @@ xfs_log_mount( ASSERT(mp->m_flags & XFS_MOUNT_RDONLY); } - mp->m_log = xlog_alloc_log(mp, log_target, blk_offset, num_bblks); - if (IS_ERR(mp->m_log)) { - error = PTR_ERR(mp->m_log); + log = xlog_alloc_log(mp, log_target, blk_offset, num_bblks); + if (IS_ERR(log)) { + error = PTR_ERR(log); goto out; } + mp->m_log = log; /* * Validate the given log space and drop a critical message via syslog @@ -611,7 +666,7 @@ xfs_log_mount( xfs_warn(mp, "AIL initialisation failed: error %d", error); goto out_free_log; } - mp->m_log->l_ailp = mp->m_ail; + log->l_ailp = mp->m_ail; /* * skip log recovery on a norecovery mount. pretend it all @@ -623,39 +678,39 @@ xfs_log_mount( if (readonly) mp->m_flags &= ~XFS_MOUNT_RDONLY; - error = xlog_recover(mp->m_log); + error = xlog_recover(log); if (readonly) mp->m_flags |= XFS_MOUNT_RDONLY; if (error) { xfs_warn(mp, "log mount/recovery failed: error %d", error); - xlog_recover_cancel(mp->m_log); + xlog_recover_cancel(log); goto out_destroy_ail; } } - error = xfs_sysfs_init(&mp->m_log->l_kobj, &xfs_log_ktype, &mp->m_kobj, + error = xfs_sysfs_init(&log->l_kobj, &xfs_log_ktype, &mp->m_kobj, "log"); if (error) goto out_destroy_ail; /* Normal transactions can now occur */ - mp->m_log->l_flags &= ~XLOG_ACTIVE_RECOVERY; + clear_bit(XLOG_ACTIVE_RECOVERY, &log->l_opstate); /* * Now the log has been fully initialised and we know were our * space grant counters are, we can initialise the permanent ticket * needed for delayed logging to work. */ - xlog_cil_init_post_recovery(mp->m_log); + xlog_cil_init_post_recovery(log); return 0; out_destroy_ail: xfs_trans_ail_destroy(mp); out_free_log: - xlog_dealloc_log(mp->m_log); + xlog_dealloc_log(log); out: return error; } @@ -674,9 +729,9 @@ int xfs_log_mount_finish( struct xfs_mount *mp) { - int error = 0; - bool readonly = (mp->m_flags & XFS_MOUNT_RDONLY); - bool recovered = mp->m_log->l_flags & XLOG_RECOVERY_NEEDED; + struct xlog *log = mp->m_log; + bool readonly = (mp->m_flags & XFS_MOUNT_RDONLY); + int error = 0; if (mp->m_flags & XFS_MOUNT_NORECOVERY) { ASSERT(mp->m_flags & XFS_MOUNT_RDONLY); @@ -707,7 +762,8 @@ xfs_log_mount_finish( * mount failure occurs. */ mp->m_super->s_flags |= SB_ACTIVE; - error = xlog_recover_finish(mp->m_log); + if (xlog_recovery_needed(log)) + error = xlog_recover_finish(log); if (!error) xfs_log_work_queue(mp); mp->m_super->s_flags &= ~SB_ACTIVE; @@ -722,17 +778,24 @@ xfs_log_mount_finish( * Don't push in the error case because the AIL may have pending intents * that aren't removed until recovery is cancelled. */ - if (!error && recovered) { - xfs_log_force(mp, XFS_LOG_SYNC); - xfs_ail_push_all_sync(mp->m_ail); + if (xlog_recovery_needed(log)) { + if (!error) { + xfs_log_force(mp, XFS_LOG_SYNC); + xfs_ail_push_all_sync(mp->m_ail); + } + xfs_notice(mp, "Ending recovery (logdev: %s)", + mp->m_logname ? mp->m_logname : "internal"); + } else { + xfs_info(mp, "Ending clean mount"); } xfs_wait_buftarg(mp->m_ddev_targp); + clear_bit(XLOG_RECOVERY_NEEDED, &log->l_opstate); if (readonly) mp->m_flags |= XFS_MOUNT_RDONLY; /* Make sure the log is dead if we're returning failure. */ - ASSERT(!error || (mp->m_log->l_flags & XLOG_IO_ERROR)); + ASSERT(!error || xlog_is_shutdown(log)); return error; } @@ -778,7 +841,7 @@ xlog_wait_on_iclog( struct xlog *log = iclog->ic_log; trace_xlog_iclog_wait_on(iclog, _RET_IP_); - if (!XLOG_FORCED_SHUTDOWN(log) && + if (!xlog_is_shutdown(log) && iclog->ic_state != XLOG_STATE_ACTIVE && iclog->ic_state != XLOG_STATE_DIRTY) { XFS_STATS_INC(log->l_mp, xs_log_force_sleep); @@ -787,7 +850,7 @@ xlog_wait_on_iclog( spin_unlock(&log->l_icloglock); } - if (XLOG_FORCED_SHUTDOWN(log)) + if (xlog_is_shutdown(log)) return -EIO; return 0; } @@ -818,7 +881,7 @@ xlog_write_unmount_record( /* account for space used by record data */ ticket->t_curr_res -= sizeof(ulf); - return xlog_write(log, &vec, ticket, NULL, NULL, XLOG_UNMOUNT_TRANS); + return xlog_write(log, NULL, &vec, ticket, XLOG_UNMOUNT_TRANS); } /* @@ -841,7 +904,7 @@ xlog_unmount_write( error = xlog_write_unmount_record(log, tic); /* * At this point, we're umounting anyway, so there's no point in - * transitioning log state to IOERROR. Just continue... + * transitioning log state to shutdown. Just continue... */ out_err: if (error) @@ -895,7 +958,7 @@ xfs_log_unmount_write( xfs_log_force(mp, XFS_LOG_SYNC); - if (XLOG_FORCED_SHUTDOWN(log)) + if (xlog_is_shutdown(log)) return; /* @@ -995,11 +1058,11 @@ xfs_log_space_wake( struct xlog *log = mp->m_log; int free_bytes; - if (XLOG_FORCED_SHUTDOWN(log)) + if (xlog_is_shutdown(log)) return; if (!list_empty_careful(&log->l_write_head.waiters)) { - ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY)); + ASSERT(!xlog_in_recovery(log)); spin_lock(&log->l_write_head.lock); free_bytes = xlog_space_left(log, &log->l_write_head.grant); @@ -1008,7 +1071,7 @@ xfs_log_space_wake( } if (!list_empty_careful(&log->l_reserve_head.waiters)) { - ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY)); + ASSERT(!xlog_in_recovery(log)); spin_lock(&log->l_reserve_head.lock); free_bytes = xlog_space_left(log, &log->l_reserve_head.grant); @@ -1124,16 +1187,18 @@ xlog_assign_tail_lsn( * wrap the tail, we should blow up. Rather than catch this case here, * we depend on other ASSERTions in other parts of the code. XXXmiken * - * This code also handles the case where the reservation head is behind - * the tail. The details of this case are described below, but the end - * result is that we return the size of the log as the amount of space left. + * If reservation head is behind the tail, we have a problem. Warn about it, + * but then treat it as if the log is empty. + * + * If the log is shut down, the head and tail may be invalid or out of whack, so + * shortcut invalidity asserts in this case so that we don't trigger them + * falsely. */ STATIC int xlog_space_left( struct xlog *log, atomic64_t *head) { - int free_bytes; int tail_bytes; int tail_cycle; int head_cycle; @@ -1143,29 +1208,30 @@ xlog_space_left( xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_bytes); tail_bytes = BBTOB(tail_bytes); if (tail_cycle == head_cycle && head_bytes >= tail_bytes) - free_bytes = log->l_logsize - (head_bytes - tail_bytes); - else if (tail_cycle + 1 < head_cycle) + return log->l_logsize - (head_bytes - tail_bytes); + if (tail_cycle + 1 < head_cycle) return 0; - else if (tail_cycle < head_cycle) { + + /* Ignore potential inconsistency when shutdown. */ + if (xlog_is_shutdown(log)) + return log->l_logsize; + + if (tail_cycle < head_cycle) { ASSERT(tail_cycle == (head_cycle - 1)); - free_bytes = tail_bytes - head_bytes; - } else { - /* - * The reservation head is behind the tail. - * In this case we just want to return the size of the - * log as the amount of space left. - */ - xfs_alert(log->l_mp, "xlog_space_left: head behind tail"); - xfs_alert(log->l_mp, - " tail_cycle = %d, tail_bytes = %d", - tail_cycle, tail_bytes); - xfs_alert(log->l_mp, - " GH cycle = %d, GH bytes = %d", - head_cycle, head_bytes); - ASSERT(0); - free_bytes = log->l_logsize; + return tail_bytes - head_bytes; } - return free_bytes; + + /* + * The reservation head is behind the tail. In this case we just want to + * return the size of the log as the amount of space left. + */ + xfs_alert(log->l_mp, "xlog_space_left: head behind tail"); + xfs_alert(log->l_mp, " tail_cycle = %d, tail_bytes = %d", + tail_cycle, tail_bytes); + xfs_alert(log->l_mp, " GH cycle = %d, GH bytes = %d", + head_cycle, head_bytes); + ASSERT(0); + return log->l_logsize; } @@ -1311,7 +1377,7 @@ xlog_alloc_log( log->l_logBBstart = blk_offset; log->l_logBBsize = num_bblks; log->l_covered_state = XLOG_STATE_COVER_IDLE; - log->l_flags |= XLOG_ACTIVE_RECOVERY; + set_bit(XLOG_ACTIVE_RECOVERY, &log->l_opstate); INIT_DELAYED_WORK(&log->l_work, xfs_log_worker); log->l_prev_block = -1; @@ -1443,37 +1509,6 @@ xlog_alloc_log( return ERR_PTR(error); } /* xlog_alloc_log */ -/* - * Write out the commit record of a transaction associated with the given - * ticket to close off a running log write. Return the lsn of the commit record. - */ -int -xlog_commit_record( - struct xlog *log, - struct xlog_ticket *ticket, - struct xlog_in_core **iclog, - xfs_lsn_t *lsn) -{ - struct xfs_log_iovec reg = { - .i_addr = NULL, - .i_len = 0, - .i_type = XLOG_REG_TYPE_COMMIT, - }; - struct xfs_log_vec vec = { - .lv_niovecs = 1, - .lv_iovecp = ®, - }; - int error; - - if (XLOG_FORCED_SHUTDOWN(log)) - return -EIO; - - error = xlog_write(log, &vec, ticket, lsn, iclog, XLOG_COMMIT_TRANS); - if (error) - xfs_force_shutdown(log->l_mp, SHUTDOWN_LOG_IO_ERROR); - return error; -} - /* * Compute the LSN that we'd need to push the log tail towards in order to have * (a) enough on-disk log space to log the number of bytes specified, (b) at @@ -1546,7 +1581,7 @@ xlog_grant_push_ail( xfs_lsn_t threshold_lsn; threshold_lsn = xlog_grant_push_threshold(log, need_bytes); - if (threshold_lsn == NULLCOMMITLSN || XLOG_FORCED_SHUTDOWN(log)) + if (threshold_lsn == NULLCOMMITLSN || xlog_is_shutdown(log)) return; /* @@ -1688,7 +1723,7 @@ xlog_write_iclog( * across the log IO to archieve that. */ down(&iclog->ic_sema); - if (unlikely(iclog->ic_state == XLOG_STATE_IOERROR)) { + if (xlog_is_shutdown(log)) { /* * It would seem logical to return EIO here, but we rely on * the log state machine to propagate I/O errors instead of @@ -2197,8 +2232,7 @@ xlog_write_copy_finish( int *data_cnt, int *partial_copy, int *partial_copy_len, - int log_offset, - struct xlog_in_core **commit_iclog) + int log_offset) { int error; @@ -2217,27 +2251,20 @@ xlog_write_copy_finish( *partial_copy = 0; *partial_copy_len = 0; - if (iclog->ic_size - log_offset <= sizeof(xlog_op_header_t)) { - /* no more space in this iclog - push it. */ - spin_lock(&log->l_icloglock); - xlog_state_finish_copy(log, iclog, *record_cnt, *data_cnt); - *record_cnt = 0; - *data_cnt = 0; - - if (iclog->ic_state == XLOG_STATE_ACTIVE) - xlog_state_switch_iclogs(log, iclog, 0); - else - ASSERT(iclog->ic_state == XLOG_STATE_WANT_SYNC || - iclog->ic_state == XLOG_STATE_IOERROR); - if (!commit_iclog) - goto release_iclog; - spin_unlock(&log->l_icloglock); - ASSERT(flags & XLOG_COMMIT_TRANS); - *commit_iclog = iclog; - } + if (iclog->ic_size - log_offset > sizeof(xlog_op_header_t)) + return 0; - return 0; + /* no more space in this iclog - push it. */ + spin_lock(&log->l_icloglock); + xlog_state_finish_copy(log, iclog, *record_cnt, *data_cnt); + *record_cnt = 0; + *data_cnt = 0; + if (iclog->ic_state == XLOG_STATE_ACTIVE) + xlog_state_switch_iclogs(log, iclog, 0); + else + ASSERT(iclog->ic_state == XLOG_STATE_WANT_SYNC || + xlog_is_shutdown(log)); release_iclog: error = xlog_state_release_iclog(log, iclog, 0); spin_unlock(&log->l_icloglock); @@ -2287,10 +2314,9 @@ xlog_write_copy_finish( int xlog_write( struct xlog *log, + struct xfs_cil_ctx *ctx, struct xfs_log_vec *log_vector, struct xlog_ticket *ticket, - xfs_lsn_t *start_lsn, - struct xlog_in_core **commit_iclog, uint optype) { struct xlog_in_core *iclog = NULL; @@ -2320,8 +2346,6 @@ xlog_write( } len = xlog_write_calc_vec_length(ticket, log_vector, optype); - if (start_lsn) - *start_lsn = 0; while (lv && (!lv->lv_niovecs || index < lv->lv_niovecs)) { void *ptr; int log_offset; @@ -2334,9 +2358,15 @@ xlog_write( ASSERT(log_offset <= iclog->ic_size - 1); ptr = iclog->ic_datap + log_offset; - /* Start_lsn is the first lsn written to. */ - if (start_lsn && !*start_lsn) - *start_lsn = be64_to_cpu(iclog->ic_header.h_lsn); + /* + * If we have a context pointer, pass it the first iclog we are + * writing to so it can record state needed for iclog write + * ordering. + */ + if (ctx) { + xlog_cil_set_ctx_write_state(ctx, iclog); + ctx = NULL; + } /* * This loop writes out as many regions as can fit in the amount @@ -2415,8 +2445,7 @@ xlog_write( &record_cnt, &data_cnt, &partial_copy, &partial_copy_len, - log_offset, - commit_iclog); + log_offset); if (error) return error; @@ -2454,12 +2483,7 @@ xlog_write( spin_lock(&log->l_icloglock); xlog_state_finish_copy(log, iclog, record_cnt, data_cnt); - if (commit_iclog) { - ASSERT(optype & XLOG_COMMIT_TRANS); - *commit_iclog = iclog; - } else { - error = xlog_state_release_iclog(log, iclog, 0); - } + error = xlog_state_release_iclog(log, iclog, 0); spin_unlock(&log->l_icloglock); return error; @@ -2644,8 +2668,7 @@ xlog_state_set_callback( static bool xlog_state_iodone_process_iclog( struct xlog *log, - struct xlog_in_core *iclog, - bool *ioerror) + struct xlog_in_core *iclog) { xfs_lsn_t lowest_lsn; xfs_lsn_t header_lsn; @@ -2657,15 +2680,6 @@ xlog_state_iodone_process_iclog( * Skip all iclogs in the ACTIVE & DIRTY states: */ return false; - case XLOG_STATE_IOERROR: - /* - * Between marking a filesystem SHUTDOWN and stopping the log, - * we do flush all iclogs to disk (if there wasn't a log I/O - * error). So, we do want things to go smoothly in case of just - * a SHUTDOWN w/o a LOG_IO_ERROR. - */ - *ioerror = true; - return false; case XLOG_STATE_DONE_SYNC: /* * Now that we have an iclog that is in the DONE_SYNC state, do @@ -2689,72 +2703,75 @@ xlog_state_iodone_process_iclog( } } -STATIC void -xlog_state_do_callback( +/* + * Loop over all the iclogs, running attached callbacks on them. Return true if + * we ran any callbacks, indicating that we dropped the icloglock. We don't need + * to handle transient shutdown state here at all because + * xlog_state_shutdown_callbacks() will be run to do the necessary shutdown + * cleanup of the callbacks. + */ +static bool +xlog_state_do_iclog_callbacks( struct xlog *log) + __releases(&log->l_icloglock) + __acquires(&log->l_icloglock) { - struct xlog_in_core *iclog; - struct xlog_in_core *first_iclog; - bool cycled_icloglock; - bool ioerror; - int flushcnt = 0; - int repeats = 0; + struct xlog_in_core *first_iclog = log->l_iclog; + struct xlog_in_core *iclog = first_iclog; + bool ran_callback = false; - spin_lock(&log->l_icloglock); do { - /* - * Scan all iclogs starting with the one pointed to by the - * log. Reset this starting point each time the log is - * unlocked (during callbacks). - * - * Keep looping through iclogs until one full pass is made - * without running any callbacks. - */ - first_iclog = log->l_iclog; - iclog = log->l_iclog; - cycled_icloglock = false; - ioerror = false; - repeats++; + LIST_HEAD(cb_list); - do { - LIST_HEAD(cb_list); + if (xlog_state_iodone_process_iclog(log, iclog)) + break; + if (iclog->ic_state != XLOG_STATE_CALLBACK) { + iclog = iclog->ic_next; + continue; + } + list_splice_init(&iclog->ic_callbacks, &cb_list); + spin_unlock(&log->l_icloglock); - if (xlog_state_iodone_process_iclog(log, iclog, - &ioerror)) - break; + trace_xlog_iclog_callbacks_start(iclog, _RET_IP_); + xlog_cil_process_committed(&cb_list); + trace_xlog_iclog_callbacks_done(iclog, _RET_IP_); + ran_callback = true; - if (iclog->ic_state != XLOG_STATE_CALLBACK && - iclog->ic_state != XLOG_STATE_IOERROR) { - iclog = iclog->ic_next; - continue; - } - list_splice_init(&iclog->ic_callbacks, &cb_list); - spin_unlock(&log->l_icloglock); + spin_lock(&log->l_icloglock); + xlog_state_clean_iclog(log, iclog); + iclog = iclog->ic_next; + } while (iclog != first_iclog); - trace_xlog_iclog_callbacks_start(iclog, _RET_IP_); - xlog_cil_process_committed(&cb_list); - trace_xlog_iclog_callbacks_done(iclog, _RET_IP_); - cycled_icloglock = true; + return ran_callback; +} - spin_lock(&log->l_icloglock); - if (XLOG_FORCED_SHUTDOWN(log)) - wake_up_all(&iclog->ic_force_wait); - else - xlog_state_clean_iclog(log, iclog); - iclog = iclog->ic_next; - } while (first_iclog != iclog); - if (repeats > 5000) { +/* + * Loop running iclog completion callbacks until there are no more iclogs in a + * state that can run callbacks. + */ +STATIC void +xlog_state_do_callback( + struct xlog *log) +{ + int flushcnt = 0; + int repeats = 0; + + spin_lock(&log->l_icloglock); + while (xlog_state_do_iclog_callbacks(log)) { + if (xlog_is_shutdown(log)) + break; + + if (++repeats > 5000) { flushcnt += repeats; repeats = 0; xfs_warn(log->l_mp, "%s: possible infinite loop (%d iterations)", __func__, flushcnt); } - } while (!ioerror && cycled_icloglock); + } - if (log->l_iclog->ic_state == XLOG_STATE_ACTIVE || - log->l_iclog->ic_state == XLOG_STATE_IOERROR) + if (log->l_iclog->ic_state == XLOG_STATE_ACTIVE) wake_up_all(&log->l_flush_wait); spin_unlock(&log->l_icloglock); @@ -2764,13 +2781,6 @@ xlog_state_do_callback( /* * Finish transitioning this iclog to the dirty state. * - * Make sure that we completely execute this routine only when this is - * the last call to the iclog. There is a good chance that iclog flushes, - * when we reach the end of the physical log, get turned into 2 separate - * calls to bwrite. Hence, one iclog flush could generate two calls to this - * routine. By using the reference count bwritecnt, we guarantee that only - * the second completion goes through. - * * Callbacks could take time, so they are done outside the scope of the * global state machine log lock. */ @@ -2789,7 +2799,7 @@ xlog_state_done_syncing( * split log writes, on the second, we shut down the file system and * no iclogs should ever be attempted to be written to disk again. */ - if (!XLOG_FORCED_SHUTDOWN(log)) { + if (!xlog_is_shutdown(log)) { ASSERT(iclog->ic_state == XLOG_STATE_SYNCING); iclog->ic_state = XLOG_STATE_DONE_SYNC; } @@ -2837,7 +2847,7 @@ xlog_state_get_iclog_space( restart: spin_lock(&log->l_icloglock); - if (XLOG_FORCED_SHUTDOWN(log)) { + if (xlog_is_shutdown(log)) { spin_unlock(&log->l_icloglock); return -EIO; } @@ -3130,10 +3140,10 @@ xfs_log_force( xlog_cil_force(log); spin_lock(&log->l_icloglock); - iclog = log->l_iclog; - if (iclog->ic_state == XLOG_STATE_IOERROR) + if (xlog_is_shutdown(log)) goto out_error; + iclog = log->l_iclog; trace_xlog_iclog_force(iclog, _RET_IP_); if (iclog->ic_state == XLOG_STATE_DIRTY || @@ -3213,10 +3223,10 @@ xlog_force_lsn( bool completed; spin_lock(&log->l_icloglock); - iclog = log->l_iclog; - if (iclog->ic_state == XLOG_STATE_IOERROR) + if (xlog_is_shutdown(log)) goto out_error; + iclog = log->l_iclog; while (be64_to_cpu(iclog->ic_header.h_lsn) != lsn) { trace_xlog_iclog_force_lsn(iclog, _RET_IP_); iclog = iclog->ic_next; @@ -3521,17 +3531,15 @@ xlog_verify_grant_tail( xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_blocks); if (tail_cycle != cycle) { if (cycle - 1 != tail_cycle && - !(log->l_flags & XLOG_TAIL_WARN)) { + !test_and_set_bit(XLOG_TAIL_WARN, &log->l_opstate)) { xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES, "%s: cycle - 1 != tail_cycle", __func__); - log->l_flags |= XLOG_TAIL_WARN; } if (space > BBTOB(tail_blocks) && - !(log->l_flags & XLOG_TAIL_WARN)) { + !test_and_set_bit(XLOG_TAIL_WARN, &log->l_opstate)) { xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES, "%s: space > BBTOB(tail_blocks)", __func__); - log->l_flags |= XLOG_TAIL_WARN; } } } @@ -3667,105 +3675,66 @@ xlog_verify_iclog( #endif /* - * Mark all iclogs IOERROR. l_icloglock is held by the caller. - */ -STATIC int -xlog_state_ioerror( - struct xlog *log) -{ - xlog_in_core_t *iclog, *ic; - - iclog = log->l_iclog; - if (iclog->ic_state != XLOG_STATE_IOERROR) { - /* - * Mark all the incore logs IOERROR. - * From now on, no log flushes will result. - */ - ic = iclog; - do { - ic->ic_state = XLOG_STATE_IOERROR; - ic = ic->ic_next; - } while (ic != iclog); - return 0; - } - /* - * Return non-zero, if state transition has already happened. - */ - return 1; -} - -/* - * This is called from xfs_force_shutdown, when we're forcibly - * shutting down the filesystem, typically because of an IO error. + * Perform a forced shutdown on the log. This should be called once and once + * only by the high level filesystem shutdown code to shut the log subsystem + * down cleanly. + * * Our main objectives here are to make sure that: - * a. if !logerror, flush the logs to disk. Anything modified - * after this is ignored. - * b. the filesystem gets marked 'SHUTDOWN' for all interested - * parties to find out, 'atomically'. - * c. those who're sleeping on log reservations, pinned objects and - * other resources get woken up, and be told the bad news. - * d. nothing new gets queued up after (b) and (c) are done. + * a. if the shutdown was not due to a log IO error, flush the logs to + * disk. Anything modified after this is ignored. + * b. the log gets atomically marked 'XLOG_IO_ERROR' for all interested + * parties to find out. Nothing new gets queued after this is done. + * c. Tasks sleeping on log reservations, pinned objects and + * other resources get woken up. * - * Note: for the !logerror case we need to flush the regions held in memory out - * to disk first. This needs to be done before the log is marked as shutdown, - * otherwise the iclog writes will fail. + * Return true if the shutdown cause was a log IO error and we actually shut the + * log down. */ -int -xfs_log_force_umount( - struct xfs_mount *mp, - int logerror) +bool +xlog_force_shutdown( + struct xlog *log, + int shutdown_flags) { - struct xlog *log; - int retval; - - log = mp->m_log; + bool log_error = (shutdown_flags & SHUTDOWN_LOG_IO_ERROR); /* - * If this happens during log recovery, don't worry about - * locking; the log isn't open for business yet. + * If this happens during log recovery then we aren't using the runtime + * log mechanisms yet so there's nothing to shut down. */ - if (!log || - log->l_flags & XLOG_ACTIVE_RECOVERY) { - mp->m_flags |= XFS_MOUNT_FS_SHUTDOWN; - if (mp->m_sb_bp) - mp->m_sb_bp->b_flags |= XBF_DONE; - return 0; - } + if (!log || xlog_in_recovery(log)) + return false; - /* - * Somebody could've already done the hard work for us. - * No need to get locks for this. - */ - if (logerror && log->l_iclog->ic_state == XLOG_STATE_IOERROR) { - ASSERT(XLOG_FORCED_SHUTDOWN(log)); - return 1; - } + ASSERT(!xlog_is_shutdown(log)); /* * Flush all the completed transactions to disk before marking the log - * being shut down. We need to do it in this order to ensure that - * completed operations are safely on disk before we shut down, and that - * we don't have to issue any buffer IO after the shutdown flags are set - * to guarantee this. + * being shut down. We need to do this first as shutting down the log + * before the force will prevent the log force from flushing the iclogs + * to disk. + * + * Re-entry due to a log IO error shutdown during the log force is + * prevented by the atomicity of higher level shutdown code. */ - if (!logerror) - xfs_log_force(mp, XFS_LOG_SYNC); + if (!log_error) + xfs_log_force(log->l_mp, XFS_LOG_SYNC); /* - * mark the filesystem and the as in a shutdown state and wake - * everybody up to tell them the bad news. + * Atomically set the shutdown state. If the shutdown state is already + * set, there someone else is performing the shutdown and so we are done + * here. This should never happen because we should only ever get called + * once by the first shutdown caller. + * + * Much of the log state machine transitions assume that shutdown state + * cannot change once they hold the log->l_icloglock. Hence we need to + * hold that lock here, even though we use the atomic test_and_set_bit() + * operation to set the shutdown state. */ spin_lock(&log->l_icloglock); - mp->m_flags |= XFS_MOUNT_FS_SHUTDOWN; - if (mp->m_sb_bp) - mp->m_sb_bp->b_flags |= XBF_DONE; - - /* - * Mark the log and the iclogs with IO error flags to prevent any - * further log IO from being issued or completed. - */ - log->l_flags |= XLOG_IO_ERROR; - retval = xlog_state_ioerror(log); + if (test_and_set_bit(XLOG_IO_ERROR, &log->l_opstate)) { + spin_unlock(&log->l_icloglock); + ASSERT(0); + return false; + } spin_unlock(&log->l_icloglock); /* @@ -3785,12 +3754,12 @@ xfs_log_force_umount( * avoid races. */ spin_lock(&log->l_cilp->xc_push_lock); + wake_up_all(&log->l_cilp->xc_start_wait); wake_up_all(&log->l_cilp->xc_commit_wait); spin_unlock(&log->l_cilp->xc_push_lock); - xlog_state_do_callback(log); + xlog_state_shutdown_callbacks(log); - /* return non-zero if log IOERROR transition had already happened */ - return retval; + return log_error; } STATIC int @@ -3853,12 +3822,3 @@ xfs_log_check_lsn( return valid; } - -bool -xfs_log_in_recovery( - struct xfs_mount *mp) -{ - struct xlog *log = mp->m_log; - - return log->l_flags & XLOG_ACTIVE_RECOVERY; -} diff --git a/fs/xfs/xfs_log.h b/fs/xfs/xfs_log.h index 66bd673344a8eacbe0c7bd5188a1e56cc3f35ab6..764e054464b8f5edc89c7e5ec043f69b695f758c 100644 --- a/fs/xfs/xfs_log.h +++ b/fs/xfs/xfs_log.h @@ -126,7 +126,6 @@ int xfs_log_reserve(struct xfs_mount *mp, bool permanent); int xfs_log_regrant(struct xfs_mount *mp, struct xlog_ticket *tic); void xfs_log_unmount(struct xfs_mount *mp); -int xfs_log_force_umount(struct xfs_mount *mp, int logerror); struct xlog_ticket *xfs_log_ticket_get(struct xlog_ticket *ticket); void xfs_log_ticket_put(struct xlog_ticket *ticket); @@ -137,8 +136,8 @@ bool xfs_log_item_in_current_chkpt(struct xfs_log_item *lip); void xfs_log_work_queue(struct xfs_mount *mp); void xfs_log_quiesce(struct xfs_mount *mp); bool xfs_log_check_lsn(struct xfs_mount *, xfs_lsn_t); -bool xfs_log_in_recovery(struct xfs_mount *); xfs_lsn_t xlog_grant_push_threshold(struct xlog *log, int need_bytes); +bool xlog_force_shutdown(struct xlog *log, int shutdown_flags); #endif /* __XFS_LOG_H__ */ diff --git a/fs/xfs/xfs_log_cil.c b/fs/xfs/xfs_log_cil.c index d17e8b7e7fc6e6d5196391bfdef5f854db82b63a..ad98bb8956862f8de7393d2d6a1a30f197539783 100644 --- a/fs/xfs/xfs_log_cil.c +++ b/fs/xfs/xfs_log_cil.c @@ -612,7 +612,7 @@ xlog_cil_committed( struct xfs_cil_ctx *ctx) { struct xfs_mount *mp = ctx->cil->xc_log->l_mp; - bool abort = XLOG_FORCED_SHUTDOWN(ctx->cil->xc_log); + bool abort = xlog_is_shutdown(ctx->cil->xc_log); /* * If the I/O failed, we're aborting the commit and already shutdown. @@ -623,6 +623,7 @@ xlog_cil_committed( */ if (abort) { spin_lock(&ctx->cil->xc_push_lock); + wake_up_all(&ctx->cil->xc_start_wait); wake_up_all(&ctx->cil->xc_commit_wait); spin_unlock(&ctx->cil->xc_push_lock); } @@ -659,6 +660,180 @@ xlog_cil_process_committed( } } +/* +* Record the LSN of the iclog we were just granted space to start writing into. +* If the context doesn't have a start_lsn recorded, then this iclog will +* contain the start record for the checkpoint. Otherwise this write contains +* the commit record for the checkpoint. +*/ +void +xlog_cil_set_ctx_write_state( + struct xfs_cil_ctx *ctx, + struct xlog_in_core *iclog) +{ + struct xfs_cil *cil = ctx->cil; + xfs_lsn_t lsn = be64_to_cpu(iclog->ic_header.h_lsn); + + ASSERT(!ctx->commit_lsn); + if (!ctx->start_lsn) { + spin_lock(&cil->xc_push_lock); + /* + * The LSN we need to pass to the log items on transaction + * commit is the LSN reported by the first log vector write, not + * the commit lsn. If we use the commit record lsn then we can + * move the tail beyond the grant write head. + */ + ctx->start_lsn = lsn; + wake_up_all(&cil->xc_start_wait); + spin_unlock(&cil->xc_push_lock); + return; + } + + /* + * Take a reference to the iclog for the context so that we still hold + * it when xlog_write is done and has released it. This means the + * context controls when the iclog is released for IO. + */ + atomic_inc(&iclog->ic_refcnt); + + /* + * xlog_state_get_iclog_space() guarantees there is enough space in the + * iclog for an entire commit record, so we can attach the context + * callbacks now. This needs to be done before we make the commit_lsn + * visible to waiters so that checkpoints with commit records in the + * same iclog order their IO completion callbacks in the same order that + * the commit records appear in the iclog. + */ + spin_lock(&cil->xc_log->l_icloglock); + list_add_tail(&ctx->iclog_entry, &iclog->ic_callbacks); + spin_unlock(&cil->xc_log->l_icloglock); + + /* + * Now we can record the commit LSN and wake anyone waiting for this + * sequence to have the ordered commit record assigned to a physical + * location in the log. + */ + spin_lock(&cil->xc_push_lock); + ctx->commit_iclog = iclog; + ctx->commit_lsn = lsn; + wake_up_all(&cil->xc_commit_wait); + spin_unlock(&cil->xc_push_lock); +} + + +/* + * Ensure that the order of log writes follows checkpoint sequence order. This + * relies on the context LSN being zero until the log write has guaranteed the + * LSN that the log write will start at via xlog_state_get_iclog_space(). + */ +enum _record_type { + _START_RECORD, + _COMMIT_RECORD, +}; + +static int +xlog_cil_order_write( + struct xfs_cil *cil, + xfs_csn_t sequence, + enum _record_type record) +{ + struct xfs_cil_ctx *ctx; + +restart: + spin_lock(&cil->xc_push_lock); + list_for_each_entry(ctx, &cil->xc_committing, committing) { + /* + * Avoid getting stuck in this loop because we were woken by the + * shutdown, but then went back to sleep once already in the + * shutdown state. + */ + if (xlog_is_shutdown(cil->xc_log)) { + spin_unlock(&cil->xc_push_lock); + return -EIO; + } + + /* + * Higher sequences will wait for this one so skip them. + * Don't wait for our own sequence, either. + */ + if (ctx->sequence >= sequence) + continue; + + /* Wait until the LSN for the record has been recorded. */ + switch (record) { + case _START_RECORD: + if (!ctx->start_lsn) { + xlog_wait(&cil->xc_start_wait, &cil->xc_push_lock); + goto restart; + } + break; + case _COMMIT_RECORD: + if (!ctx->commit_lsn) { + xlog_wait(&cil->xc_commit_wait, &cil->xc_push_lock); + goto restart; + } + break; + } + } + spin_unlock(&cil->xc_push_lock); + return 0; +} + +/* + * Write out the log vector change now attached to the CIL context. This will + * write a start record that needs to be strictly ordered in ascending CIL + * sequence order so that log recovery will always use in-order start LSNs when + * replaying checkpoints. + */ +static int +xlog_cil_write_chain( + struct xfs_cil_ctx *ctx, + struct xfs_log_vec *chain) +{ + struct xlog *log = ctx->cil->xc_log; + int error; + + error = xlog_cil_order_write(ctx->cil, ctx->sequence, _START_RECORD); + if (error) + return error; + return xlog_write(log, ctx, chain, ctx->ticket, XLOG_START_TRANS); +} + +/* + * Write out the commit record of a checkpoint transaction to close off a + * running log write. These commit records are strictly ordered in ascending CIL + * sequence order so that log recovery will always replay the checkpoints in the + * correct order. + */ +static int +xlog_cil_write_commit_record( + struct xfs_cil_ctx *ctx) +{ + struct xlog *log = ctx->cil->xc_log; + struct xfs_log_iovec reg = { + .i_addr = NULL, + .i_len = 0, + .i_type = XLOG_REG_TYPE_COMMIT, + }; + struct xfs_log_vec vec = { + .lv_niovecs = 1, + .lv_iovecp = ®, + }; + int error; + + if (xlog_is_shutdown(log)) + return -EIO; + + error = xlog_cil_order_write(ctx->cil, ctx->sequence, _COMMIT_RECORD); + if (error) + return error; + + error = xlog_write(log, ctx, &vec, ctx->ticket, XLOG_COMMIT_TRANS); + if (error) + xfs_force_shutdown(log->l_mp, SHUTDOWN_LOG_IO_ERROR); + return error; +} + /* * Push the Committed Item List to the log. * @@ -683,7 +858,6 @@ xlog_cil_push_work( struct xlog *log = cil->xc_log; struct xfs_log_vec *lv; struct xfs_cil_ctx *new_ctx; - struct xlog_in_core *commit_iclog; struct xlog_ticket *tic; int num_iovecs; int error = 0; @@ -691,7 +865,6 @@ xlog_cil_push_work( struct xfs_log_iovec lhdr; struct xfs_log_vec lvhdr = { NULL }; xfs_lsn_t preflush_tail_lsn; - xfs_lsn_t commit_lsn; xfs_csn_t push_seq; struct bio bio; DECLARE_COMPLETION_ONSTACK(bdev_flush); @@ -863,77 +1036,16 @@ xlog_cil_push_work( */ wait_for_completion(&bdev_flush); - error = xlog_write(log, &lvhdr, tic, &ctx->start_lsn, NULL, - XLOG_START_TRANS); + error = xlog_cil_write_chain(ctx, &lvhdr); if (error) goto out_abort_free_ticket; - /* - * now that we've written the checkpoint into the log, strictly - * order the commit records so replay will get them in the right order. - */ -restart: - spin_lock(&cil->xc_push_lock); - list_for_each_entry(new_ctx, &cil->xc_committing, committing) { - /* - * Avoid getting stuck in this loop because we were woken by the - * shutdown, but then went back to sleep once already in the - * shutdown state. - */ - if (XLOG_FORCED_SHUTDOWN(log)) { - spin_unlock(&cil->xc_push_lock); - goto out_abort_free_ticket; - } - - /* - * Higher sequences will wait for this one so skip them. - * Don't wait for our own sequence, either. - */ - if (new_ctx->sequence >= ctx->sequence) - continue; - if (!new_ctx->commit_lsn) { - /* - * It is still being pushed! Wait for the push to - * complete, then start again from the beginning. - */ - xlog_wait(&cil->xc_commit_wait, &cil->xc_push_lock); - goto restart; - } - } - spin_unlock(&cil->xc_push_lock); - - error = xlog_commit_record(log, tic, &commit_iclog, &commit_lsn); + error = xlog_cil_write_commit_record(ctx); if (error) goto out_abort_free_ticket; xfs_log_ticket_ungrant(log, tic); - /* - * Once we attach the ctx to the iclog, a shutdown can process the - * iclog, run the callbacks and free the ctx. The only thing preventing - * this potential UAF situation here is that we are holding the - * icloglock. Hence we cannot access the ctx once we have attached the - * callbacks and dropped the icloglock. - */ - spin_lock(&log->l_icloglock); - if (commit_iclog->ic_state == XLOG_STATE_IOERROR) { - spin_unlock(&log->l_icloglock); - goto out_abort; - } - ASSERT_ALWAYS(commit_iclog->ic_state == XLOG_STATE_ACTIVE || - commit_iclog->ic_state == XLOG_STATE_WANT_SYNC); - list_add_tail(&ctx->iclog_entry, &commit_iclog->ic_callbacks); - - /* - * now the checkpoint commit is complete and we've attached the - * callbacks to the iclog we can assign the commit LSN to the context - * and wake up anyone who is waiting for the commit to complete. - */ - spin_lock(&cil->xc_push_lock); - ctx->commit_lsn = commit_lsn; - wake_up_all(&cil->xc_commit_wait); - spin_unlock(&cil->xc_push_lock); - /* * If the checkpoint spans multiple iclogs, wait for all previous iclogs * to complete before we submit the commit_iclog. We can't use state @@ -945,21 +1057,19 @@ xlog_cil_push_work( * wakeup until this commit_iclog is written to disk. Hence we use the * iclog header lsn and compare it to the commit lsn to determine if we * need to wait on iclogs or not. - * - * NOTE: It is not safe to reference the ctx after this check as we drop - * the icloglock if we have to wait for completion of other iclogs. */ - if (ctx->start_lsn != commit_lsn) { + spin_lock(&log->l_icloglock); + if (ctx->start_lsn != ctx->commit_lsn) { xfs_lsn_t plsn; - plsn = be64_to_cpu(commit_iclog->ic_prev->ic_header.h_lsn); - if (plsn && XFS_LSN_CMP(plsn, commit_lsn) < 0) { + plsn = be64_to_cpu(ctx->commit_iclog->ic_prev->ic_header.h_lsn); + if (plsn && XFS_LSN_CMP(plsn, ctx->commit_lsn) < 0) { /* * Waiting on ic_force_wait orders the completion of * iclogs older than ic_prev. Hence we only need to wait * on the most recent older iclog here. */ - xlog_wait_on_iclog(commit_iclog->ic_prev); + xlog_wait_on_iclog(ctx->commit_iclog->ic_prev); spin_lock(&log->l_icloglock); } @@ -967,7 +1077,7 @@ xlog_cil_push_work( * We need to issue a pre-flush so that the ordering for this * checkpoint is correctly preserved down to stable storage. */ - commit_iclog->ic_flags |= XLOG_ICL_NEED_FLUSH; + ctx->commit_iclog->ic_flags |= XLOG_ICL_NEED_FLUSH; } /* @@ -980,11 +1090,11 @@ xlog_cil_push_work( * will be written when released, switch it's state to WANT_SYNC right * now. */ - commit_iclog->ic_flags |= XLOG_ICL_NEED_FUA; + ctx->commit_iclog->ic_flags |= XLOG_ICL_NEED_FUA; if (push_commit_stable && - commit_iclog->ic_state == XLOG_STATE_ACTIVE) - xlog_state_switch_iclogs(log, commit_iclog, 0); - xlog_state_release_iclog(log, commit_iclog, preflush_tail_lsn); + ctx->commit_iclog->ic_state == XLOG_STATE_ACTIVE) + xlog_state_switch_iclogs(log, ctx->commit_iclog, 0); + xlog_state_release_iclog(log, ctx->commit_iclog, preflush_tail_lsn); /* Not safe to reference ctx now! */ @@ -999,9 +1109,15 @@ xlog_cil_push_work( out_abort_free_ticket: xfs_log_ticket_ungrant(log, tic); -out_abort: - ASSERT(XLOG_FORCED_SHUTDOWN(log)); - xlog_cil_committed(ctx); + ASSERT(xlog_is_shutdown(log)); + if (!ctx->commit_iclog) { + xlog_cil_committed(ctx); + return; + } + spin_lock(&log->l_icloglock); + xlog_state_release_iclog(log, ctx->commit_iclog, 0); + /* Not safe to reference ctx now! */ + spin_unlock(&log->l_icloglock); } /* @@ -1168,7 +1284,7 @@ xlog_cil_commit( xlog_cil_insert_items(log, tp); - if (regrant && !XLOG_FORCED_SHUTDOWN(log)) + if (regrant && !xlog_is_shutdown(log)) xfs_log_ticket_regrant(log, tp->t_ticket); else xfs_log_ticket_ungrant(log, tp->t_ticket); @@ -1260,7 +1376,7 @@ xlog_cil_force_seq( * shutdown, but then went back to sleep once already in the * shutdown state. */ - if (XLOG_FORCED_SHUTDOWN(log)) + if (xlog_is_shutdown(log)) goto out_shutdown; if (ctx->sequence > sequence) continue; @@ -1370,6 +1486,7 @@ xlog_cil_init( spin_lock_init(&cil->xc_push_lock); init_waitqueue_head(&cil->xc_push_wait); init_rwsem(&cil->xc_ctx_lock); + init_waitqueue_head(&cil->xc_start_wait); init_waitqueue_head(&cil->xc_commit_wait); cil->xc_log = log; log->l_cilp = cil; diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h index ef96bd5e7e56d12e26fad3cf7f807f8f43772c8d..b01fb03e60d41d2055c7641c8319a9213ae34db5 100644 --- a/fs/xfs/xfs_log_priv.h +++ b/fs/xfs/xfs_log_priv.h @@ -11,15 +11,6 @@ struct xlog; struct xlog_ticket; struct xfs_mount; -/* - * Flags for log structure - */ -#define XLOG_ACTIVE_RECOVERY 0x2 /* in the middle of recovery */ -#define XLOG_RECOVERY_NEEDED 0x4 /* log was recovered */ -#define XLOG_IO_ERROR 0x8 /* log hit an I/O error, and being - shutdown */ -#define XLOG_TAIL_WARN 0x10 /* log tail verify warning issued */ - /* * get client id from packed copy. * @@ -47,7 +38,6 @@ enum xlog_iclog_state { XLOG_STATE_DONE_SYNC, /* Done syncing to disk */ XLOG_STATE_CALLBACK, /* Callback functions now */ XLOG_STATE_DIRTY, /* Dirty IC log, not ready for ACTIVE status */ - XLOG_STATE_IOERROR, /* IO error happened in sync'ing log */ }; #define XLOG_STATE_STRINGS \ @@ -56,8 +46,7 @@ enum xlog_iclog_state { { XLOG_STATE_SYNCING, "XLOG_STATE_SYNCING" }, \ { XLOG_STATE_DONE_SYNC, "XLOG_STATE_DONE_SYNC" }, \ { XLOG_STATE_CALLBACK, "XLOG_STATE_CALLBACK" }, \ - { XLOG_STATE_DIRTY, "XLOG_STATE_DIRTY" }, \ - { XLOG_STATE_IOERROR, "XLOG_STATE_IOERROR" } + { XLOG_STATE_DIRTY, "XLOG_STATE_DIRTY" } /* @@ -244,6 +233,7 @@ struct xfs_cil_ctx { xfs_csn_t sequence; /* chkpt sequence # */ xfs_lsn_t start_lsn; /* first LSN of chkpt commit */ xfs_lsn_t commit_lsn; /* chkpt commit record lsn */ + struct xlog_in_core *commit_iclog; struct xlog_ticket *ticket; /* chkpt ticket */ int nvecs; /* number of regions */ int space_used; /* aggregate size of regions */ @@ -285,6 +275,7 @@ struct xfs_cil { bool xc_push_commit_stable; struct list_head xc_committing; wait_queue_head_t xc_commit_wait; + wait_queue_head_t xc_start_wait; xfs_csn_t xc_current_sequence; wait_queue_head_t xc_push_wait; /* background push throttle */ } ____cacheline_aligned_in_smp; @@ -402,7 +393,7 @@ struct xlog { struct xfs_buftarg *l_targ; /* buftarg of log */ struct workqueue_struct *l_ioend_workqueue; /* for I/O completions */ struct delayed_work l_work; /* background flush work */ - uint l_flags; + long l_opstate; /* operational state */ uint l_quotaoffs_flag; /* XFS_DQ_*, for QUOTAOFFs */ struct list_head *l_buf_cancel_table; int l_iclog_hsize; /* size of iclog header */ @@ -456,8 +447,32 @@ struct xlog { #define XLOG_BUF_CANCEL_BUCKET(log, blkno) \ ((log)->l_buf_cancel_table + ((uint64_t)blkno % XLOG_BC_TABLE_SIZE)) -#define XLOG_FORCED_SHUTDOWN(log) \ - (unlikely((log)->l_flags & XLOG_IO_ERROR)) +/* + * Bits for operational state + */ +#define XLOG_ACTIVE_RECOVERY 0 /* in the middle of recovery */ +#define XLOG_RECOVERY_NEEDED 1 /* log was recovered */ +#define XLOG_IO_ERROR 2 /* log hit an I/O error, and being + shutdown */ +#define XLOG_TAIL_WARN 3 /* log tail verify warning issued */ + +static inline bool +xlog_recovery_needed(struct xlog *log) +{ + return test_bit(XLOG_RECOVERY_NEEDED, &log->l_opstate); +} + +static inline bool +xlog_in_recovery(struct xlog *log) +{ + return test_bit(XLOG_ACTIVE_RECOVERY, &log->l_opstate); +} + +static inline bool +xlog_is_shutdown(struct xlog *log) +{ + return test_bit(XLOG_IO_ERROR, &log->l_opstate); +} /* common routines */ extern int @@ -491,11 +506,9 @@ xlog_write_adv_cnt(void **ptr, int *len, int *off, size_t bytes) void xlog_print_tic_res(struct xfs_mount *mp, struct xlog_ticket *ticket); void xlog_print_trans(struct xfs_trans *); -int xlog_write(struct xlog *log, struct xfs_log_vec *log_vector, - struct xlog_ticket *tic, xfs_lsn_t *start_lsn, - struct xlog_in_core **commit_iclog, uint optype); -int xlog_commit_record(struct xlog *log, struct xlog_ticket *ticket, - struct xlog_in_core **iclog, xfs_lsn_t *lsn); +int xlog_write(struct xlog *log, struct xfs_cil_ctx *ctx, + struct xfs_log_vec *log_vector, struct xlog_ticket *tic, + uint optype); void xfs_log_ticket_ungrant(struct xlog *log, struct xlog_ticket *ticket); void xfs_log_ticket_regrant(struct xlog *log, struct xlog_ticket *ticket); @@ -568,6 +581,9 @@ void xlog_cil_destroy(struct xlog *log); bool xlog_cil_empty(struct xlog *log); void xlog_cil_commit(struct xlog *log, struct xfs_trans *tp, xfs_csn_t *commit_seq, bool regrant); +void xlog_cil_set_ctx_write_state(struct xfs_cil_ctx *ctx, + struct xlog_in_core *iclog); + /* * CIL force routines diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index 36f01c8eb57e44d0a27ce18f67032fbc849ff387..f3e7016823e8dc1db60924fc8a24e98a4f6074b4 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c @@ -143,7 +143,7 @@ xlog_do_io( error = xfs_rw_bdev(log->l_targ->bt_bdev, log->l_logBBstart + blk_no, BBTOB(nbblks), data, op); - if (error && !XFS_FORCED_SHUTDOWN(log->l_mp)) { + if (error && !xlog_is_shutdown(log)) { xfs_alert(log->l_mp, "log recovery %s I/O error at daddr 0x%llx len %d error %d", op == REQ_OP_WRITE ? "write" : "read", @@ -3292,10 +3292,7 @@ xlog_do_recover( if (error) return error; - /* - * If IO errors happened during recovery, bail out. - */ - if (XFS_FORCED_SHUTDOWN(mp)) + if (xlog_is_shutdown(log)) return -EIO; /* @@ -3317,7 +3314,7 @@ xlog_do_recover( xfs_buf_hold(bp); error = _xfs_buf_read(bp, XBF_READ); if (error) { - if (!XFS_FORCED_SHUTDOWN(mp)) { + if (!xlog_is_shutdown(log)) { xfs_buf_ioerror_alert(bp, __this_address); ASSERT(0); } @@ -3341,7 +3338,7 @@ xlog_do_recover( xlog_recover_check_summary(log); /* Normal transactions can now occur */ - log->l_flags &= ~XLOG_ACTIVE_RECOVERY; + clear_bit(XLOG_ACTIVE_RECOVERY, &log->l_opstate); return 0; } @@ -3425,68 +3422,49 @@ xlog_recover( : "internal"); error = xlog_do_recover(log, head_blk, tail_blk); - log->l_flags |= XLOG_RECOVERY_NEEDED; + set_bit(XLOG_RECOVERY_NEEDED, &log->l_opstate); } return error; } /* - * In the first part of recovery we replay inodes and buffers and build - * up the list of extent free items which need to be processed. Here - * we process the extent free items and clean up the on disk unlinked - * inode lists. This is separated from the first part of recovery so - * that the root and real-time bitmap inodes can be read in from disk in - * between the two stages. This is necessary so that we can free space - * in the real-time portion of the file system. + * In the first part of recovery we replay inodes and buffers and build up the + * list of intents which need to be processed. Here we process the intents and + * clean up the on disk unlinked inode lists. This is separated from the first + * part of recovery so that the root and real-time bitmap inodes can be read in + * from disk in between the two stages. This is necessary so that we can free + * space in the real-time portion of the file system. */ int xlog_recover_finish( struct xlog *log) { - /* - * Now we're ready to do the transactions needed for the - * rest of recovery. Start with completing all the extent - * free intent records and then process the unlinked inode - * lists. At this point, we essentially run in normal mode - * except that we're still performing recovery actions - * rather than accepting new requests. - */ - if (log->l_flags & XLOG_RECOVERY_NEEDED) { - int error; - error = xlog_recover_process_intents(log); - if (error) { - /* - * Cancel all the unprocessed intent items now so that - * we don't leave them pinned in the AIL. This can - * cause the AIL to livelock on the pinned item if - * anyone tries to push the AIL (inode reclaim does - * this) before we get around to xfs_log_mount_cancel. - */ - xlog_recover_cancel_intents(log); - xfs_force_shutdown(log->l_mp, SHUTDOWN_LOG_IO_ERROR); - xfs_alert(log->l_mp, "Failed to recover intents"); - return error; - } + int error; + error = xlog_recover_process_intents(log); + if (error) { /* - * Sync the log to get all the intents out of the AIL. - * This isn't absolutely necessary, but it helps in - * case the unlink transactions would have problems - * pushing the intents out of the way. + * Cancel all the unprocessed intent items now so that we don't + * leave them pinned in the AIL. This can cause the AIL to + * livelock on the pinned item if anyone tries to push the AIL + * (inode reclaim does this) before we get around to + * xfs_log_mount_cancel. */ - xfs_log_force(log->l_mp, XFS_LOG_SYNC); - - xlog_recover_process_iunlinks(log); + xlog_recover_cancel_intents(log); + xfs_alert(log->l_mp, "Failed to recover intents"); + xfs_force_shutdown(log->l_mp, SHUTDOWN_LOG_IO_ERROR); + return error; + } - xlog_recover_check_summary(log); + /* + * Sync the log to get all the intents out of the AIL. This isn't + * absolutely necessary, but it helps in case the unlink transactions + * would have problems pushing the intents out of the way. + */ + xfs_log_force(log->l_mp, XFS_LOG_SYNC); - xfs_notice(log->l_mp, "Ending recovery (logdev: %s)", - log->l_mp->m_logname ? log->l_mp->m_logname - : "internal"); - log->l_flags &= ~XLOG_RECOVERY_NEEDED; - } else { - xfs_info(log->l_mp, "Ending clean mount"); - } + xlog_recover_process_iunlinks(log); + xlog_recover_check_summary(log); return 0; } @@ -3494,7 +3472,7 @@ void xlog_recover_cancel( struct xlog *log) { - if (log->l_flags & XLOG_RECOVERY_NEEDED) + if (xlog_recovery_needed(log)) xlog_recover_cancel_intents(log); } diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c index 475bb2ffd4f42a6a8b4903ed18c0a9648a607553..9148170a12cbfd310e2cb03b80f24764a78db323 100644 --- a/fs/xfs/xfs_super.c +++ b/fs/xfs/xfs_super.c @@ -707,7 +707,7 @@ xfs_fs_drop_inode( * that. See the comment for this inode flag. */ if (ip->i_flags & XFS_IRECOVERY) { - ASSERT(ip->i_mount->m_log->l_flags & XLOG_RECOVERY_NEEDED); + ASSERT(xlog_recovery_needed(ip->i_mount->m_log)); return 0; } @@ -1205,6 +1205,22 @@ suffix_kstrtoint( return ret; } +static inline void +xfs_fs_warn_deprecated( + struct fs_context *fc, + struct fs_parameter *param, + uint64_t flag, + bool value) +{ + /* Don't print the warning if reconfiguring and current mount point + * already had the flag set + */ + if ((fc->purpose & FS_CONTEXT_FOR_RECONFIGURE) && + !!(XFS_M(fc->root->d_sb)->m_flags & flag) == value) + return; + xfs_warn(fc->s_fs_info, "%s mount option is deprecated.", param->key); +} + /* * Set mount state from a mount option. * @@ -1215,7 +1231,7 @@ xfs_fc_parse_param( struct fs_context *fc, struct fs_parameter *param) { - struct xfs_mount *mp = fc->s_fs_info; + struct xfs_mount *parsing_mp = fc->s_fs_info; struct fs_parse_result result; int size = 0; int opt; @@ -1226,138 +1242,138 @@ xfs_fc_parse_param( switch (opt) { case Opt_logbufs: - mp->m_logbufs = result.uint_32; + parsing_mp->m_logbufs = result.uint_32; return 0; case Opt_logbsize: - if (suffix_kstrtoint(param->string, 10, &mp->m_logbsize)) + if (suffix_kstrtoint(param->string, 10, &parsing_mp->m_logbsize)) return -EINVAL; return 0; case Opt_logdev: - kfree(mp->m_logname); - mp->m_logname = kstrdup(param->string, GFP_KERNEL); - if (!mp->m_logname) + kfree(parsing_mp->m_logname); + parsing_mp->m_logname = kstrdup(param->string, GFP_KERNEL); + if (!parsing_mp->m_logname) return -ENOMEM; return 0; case Opt_rtdev: - kfree(mp->m_rtname); - mp->m_rtname = kstrdup(param->string, GFP_KERNEL); - if (!mp->m_rtname) + kfree(parsing_mp->m_rtname); + parsing_mp->m_rtname = kstrdup(param->string, GFP_KERNEL); + if (!parsing_mp->m_rtname) return -ENOMEM; return 0; case Opt_allocsize: if (suffix_kstrtoint(param->string, 10, &size)) return -EINVAL; - mp->m_allocsize_log = ffs(size) - 1; - mp->m_flags |= XFS_MOUNT_ALLOCSIZE; + parsing_mp->m_allocsize_log = ffs(size) - 1; + parsing_mp->m_flags |= XFS_MOUNT_ALLOCSIZE; return 0; case Opt_grpid: case Opt_bsdgroups: - mp->m_flags |= XFS_MOUNT_GRPID; + parsing_mp->m_flags |= XFS_MOUNT_GRPID; return 0; case Opt_nogrpid: case Opt_sysvgroups: - mp->m_flags &= ~XFS_MOUNT_GRPID; + parsing_mp->m_flags &= ~XFS_MOUNT_GRPID; return 0; case Opt_wsync: - mp->m_flags |= XFS_MOUNT_WSYNC; + parsing_mp->m_flags |= XFS_MOUNT_WSYNC; return 0; case Opt_norecovery: - mp->m_flags |= XFS_MOUNT_NORECOVERY; + parsing_mp->m_flags |= XFS_MOUNT_NORECOVERY; return 0; case Opt_noalign: - mp->m_flags |= XFS_MOUNT_NOALIGN; + parsing_mp->m_flags |= XFS_MOUNT_NOALIGN; return 0; case Opt_swalloc: - mp->m_flags |= XFS_MOUNT_SWALLOC; + parsing_mp->m_flags |= XFS_MOUNT_SWALLOC; return 0; case Opt_sunit: - mp->m_dalign = result.uint_32; + parsing_mp->m_dalign = result.uint_32; return 0; case Opt_swidth: - mp->m_swidth = result.uint_32; + parsing_mp->m_swidth = result.uint_32; return 0; case Opt_inode32: - mp->m_flags |= XFS_MOUNT_SMALL_INUMS; + parsing_mp->m_flags |= XFS_MOUNT_SMALL_INUMS; return 0; case Opt_inode64: - mp->m_flags &= ~XFS_MOUNT_SMALL_INUMS; + parsing_mp->m_flags &= ~XFS_MOUNT_SMALL_INUMS; return 0; case Opt_nouuid: - mp->m_flags |= XFS_MOUNT_NOUUID; + parsing_mp->m_flags |= XFS_MOUNT_NOUUID; return 0; case Opt_largeio: - mp->m_flags |= XFS_MOUNT_LARGEIO; + parsing_mp->m_flags |= XFS_MOUNT_LARGEIO; return 0; case Opt_nolargeio: - mp->m_flags &= ~XFS_MOUNT_LARGEIO; + parsing_mp->m_flags &= ~XFS_MOUNT_LARGEIO; return 0; case Opt_filestreams: - mp->m_flags |= XFS_MOUNT_FILESTREAMS; + parsing_mp->m_flags |= XFS_MOUNT_FILESTREAMS; return 0; case Opt_noquota: - mp->m_qflags &= ~XFS_ALL_QUOTA_ACCT; - mp->m_qflags &= ~XFS_ALL_QUOTA_ENFD; + parsing_mp->m_qflags &= ~XFS_ALL_QUOTA_ACCT; + parsing_mp->m_qflags &= ~XFS_ALL_QUOTA_ENFD; return 0; case Opt_quota: case Opt_uquota: case Opt_usrquota: - mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ENFD); + parsing_mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ENFD); return 0; case Opt_qnoenforce: case Opt_uqnoenforce: - mp->m_qflags |= XFS_UQUOTA_ACCT; - mp->m_qflags &= ~XFS_UQUOTA_ENFD; + parsing_mp->m_qflags |= XFS_UQUOTA_ACCT; + parsing_mp->m_qflags &= ~XFS_UQUOTA_ENFD; return 0; case Opt_pquota: case Opt_prjquota: - mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ENFD); + parsing_mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ENFD); return 0; case Opt_pqnoenforce: - mp->m_qflags |= XFS_PQUOTA_ACCT; - mp->m_qflags &= ~XFS_PQUOTA_ENFD; + parsing_mp->m_qflags |= XFS_PQUOTA_ACCT; + parsing_mp->m_qflags &= ~XFS_PQUOTA_ENFD; return 0; case Opt_gquota: case Opt_grpquota: - mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ENFD); + parsing_mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ENFD); return 0; case Opt_gqnoenforce: - mp->m_qflags |= XFS_GQUOTA_ACCT; - mp->m_qflags &= ~XFS_GQUOTA_ENFD; + parsing_mp->m_qflags |= XFS_GQUOTA_ACCT; + parsing_mp->m_qflags &= ~XFS_GQUOTA_ENFD; return 0; case Opt_discard: - mp->m_flags |= XFS_MOUNT_DISCARD; + parsing_mp->m_flags |= XFS_MOUNT_DISCARD; return 0; case Opt_nodiscard: - mp->m_flags &= ~XFS_MOUNT_DISCARD; + parsing_mp->m_flags &= ~XFS_MOUNT_DISCARD; return 0; #ifdef CONFIG_FS_DAX case Opt_dax: - xfs_mount_set_dax_mode(mp, XFS_DAX_ALWAYS); + xfs_mount_set_dax_mode(parsing_mp, XFS_DAX_ALWAYS); return 0; case Opt_dax_enum: - xfs_mount_set_dax_mode(mp, result.uint_32); + xfs_mount_set_dax_mode(parsing_mp, result.uint_32); return 0; #endif /* Following mount options will be removed in September 2025 */ case Opt_ikeep: - xfs_warn(mp, "%s mount option is deprecated.", param->key); - mp->m_flags |= XFS_MOUNT_IKEEP; + xfs_fs_warn_deprecated(fc, param, XFS_MOUNT_IKEEP, true); + parsing_mp->m_flags |= XFS_MOUNT_IKEEP; return 0; case Opt_noikeep: - xfs_warn(mp, "%s mount option is deprecated.", param->key); - mp->m_flags &= ~XFS_MOUNT_IKEEP; + xfs_fs_warn_deprecated(fc, param, XFS_MOUNT_IKEEP, false); + parsing_mp->m_flags &= ~XFS_MOUNT_IKEEP; return 0; case Opt_attr2: - xfs_warn(mp, "%s mount option is deprecated.", param->key); - mp->m_flags |= XFS_MOUNT_ATTR2; + xfs_fs_warn_deprecated(fc, param, XFS_MOUNT_ATTR2, true); + parsing_mp->m_flags |= XFS_MOUNT_ATTR2; return 0; case Opt_noattr2: - xfs_warn(mp, "%s mount option is deprecated.", param->key); - mp->m_flags &= ~XFS_MOUNT_ATTR2; - mp->m_flags |= XFS_MOUNT_NOATTR2; + xfs_fs_warn_deprecated(fc, param, XFS_MOUNT_NOATTR2, true); + parsing_mp->m_flags &= ~XFS_MOUNT_ATTR2; + parsing_mp->m_flags |= XFS_MOUNT_NOATTR2; return 0; default: - xfs_warn(mp, "unknown mount option [%s].", param->key); + xfs_warn(parsing_mp, "unknown mount option [%s].", param->key); return -EINVAL; } diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h index 5201111d24ffcede3e09a8b84ad697fb13470e00..3f4cc7e3915c41a93039aa902cc7c2a9c660bbdb 100644 --- a/fs/xfs/xfs_trace.h +++ b/fs/xfs/xfs_trace.h @@ -4001,7 +4001,6 @@ TRACE_DEFINE_ENUM(XLOG_STATE_SYNCING); TRACE_DEFINE_ENUM(XLOG_STATE_DONE_SYNC); TRACE_DEFINE_ENUM(XLOG_STATE_CALLBACK); TRACE_DEFINE_ENUM(XLOG_STATE_DIRTY); -TRACE_DEFINE_ENUM(XLOG_STATE_IOERROR); DECLARE_EVENT_CLASS(xlog_iclog_class, TP_PROTO(struct xlog_in_core *iclog, unsigned long caller_ip), diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c index 3d1c4a4cb16ed1eb810b003f68733fede1a21436..e8a9967e71942a1e1a371b26ee6cb0a5b85e205e 100644 --- a/fs/xfs/xfs_trans.c +++ b/fs/xfs/xfs_trans.c @@ -919,7 +919,7 @@ __xfs_trans_commit( */ xfs_trans_unreserve_and_mod_dquots(tp); if (tp->t_ticket) { - if (regrant && !XLOG_FORCED_SHUTDOWN(mp->m_log)) + if (regrant && !xlog_is_shutdown(mp->m_log)) xfs_log_ticket_regrant(mp->m_log, tp->t_ticket); else xfs_log_ticket_ungrant(mp->m_log, tp->t_ticket); diff --git a/fs/zonefs/super.c b/fs/zonefs/super.c index e60759d8bb5fba0438b72bef13ebfc12c2c1b6d5..2d1f5d4d12e02f28ff2b124dbfe77c619542b2e6 100644 --- a/fs/zonefs/super.c +++ b/fs/zonefs/super.c @@ -1169,7 +1169,6 @@ static int zonefs_statfs(struct dentry *dentry, struct kstatfs *buf) struct super_block *sb = dentry->d_sb; struct zonefs_sb_info *sbi = ZONEFS_SB(sb); enum zonefs_ztype t; - u64 fsid; buf->f_type = ZONEFS_MAGIC; buf->f_bsize = sb->s_blocksize; @@ -1192,9 +1191,7 @@ static int zonefs_statfs(struct dentry *dentry, struct kstatfs *buf) spin_unlock(&sbi->s_lock); - fsid = le64_to_cpup((void *)sbi->s_uuid.b) ^ - le64_to_cpup((void *)sbi->s_uuid.b + sizeof(u64)); - buf->f_fsid = u64_to_fsid(fsid); + buf->f_fsid = uuid_to_fsid(sbi->s_uuid.b); return 0; } diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h index a072afc7609a80004984ccdcd1045105ccaf1545..b5ff52052db3809190ba7f2695132bf30461e379 100644 --- a/include/acpi/acpi_bus.h +++ b/include/acpi/acpi_bus.h @@ -608,9 +608,10 @@ int acpi_enable_wakeup_device_power(struct acpi_device *dev, int state); int acpi_disable_wakeup_device_power(struct acpi_device *dev); #ifdef CONFIG_X86 -bool acpi_device_always_present(struct acpi_device *adev); +bool acpi_device_override_status(struct acpi_device *adev, unsigned long long *status); #else -static inline bool acpi_device_always_present(struct acpi_device *adev) +static inline bool acpi_device_override_status(struct acpi_device *adev, + unsigned long long *status) { return false; } diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h index 647cb11d0a0a3026321dddcc9a261a6b94c645ef..7334037624c5c3e493c21d7ce96dd1c893a38e04 100644 --- a/include/acpi/actypes.h +++ b/include/acpi/actypes.h @@ -536,8 +536,14 @@ typedef u64 acpi_integer; * Can be used with access_width of struct acpi_generic_address and access_size of * struct acpi_resource_generic_register. */ -#define ACPI_ACCESS_BIT_WIDTH(size) (1 << ((size) + 2)) -#define ACPI_ACCESS_BYTE_WIDTH(size) (1 << ((size) - 1)) +#define ACPI_ACCESS_BIT_SHIFT 2 +#define ACPI_ACCESS_BYTE_SHIFT -1 +#define ACPI_ACCESS_BIT_MAX (31 - ACPI_ACCESS_BIT_SHIFT) +#define ACPI_ACCESS_BYTE_MAX (31 - ACPI_ACCESS_BYTE_SHIFT) +#define ACPI_ACCESS_BIT_DEFAULT (8 - ACPI_ACCESS_BIT_SHIFT) +#define ACPI_ACCESS_BYTE_DEFAULT (8 - ACPI_ACCESS_BYTE_SHIFT) +#define ACPI_ACCESS_BIT_WIDTH(size) (1 << ((size) + ACPI_ACCESS_BIT_SHIFT)) +#define ACPI_ACCESS_BYTE_WIDTH(size) (1 << ((size) + ACPI_ACCESS_BYTE_SHIFT)) /******************************************************************************* * diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index 6661ee1cff479ffb5297be5a001625d03b517f85..a0c4b99d28994136aed771888cdf3206cc0fa089 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h @@ -563,10 +563,14 @@ static inline void tlb_flush_p4d_range(struct mmu_gather *tlb, #define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \ do { \ unsigned long _sz = huge_page_size(h); \ - if (_sz == PMD_SIZE) \ - tlb_flush_pmd_range(tlb, address, _sz); \ - else if (_sz == PUD_SIZE) \ + if (_sz >= P4D_SIZE) \ + tlb_flush_p4d_range(tlb, address, _sz); \ + else if (_sz >= PUD_SIZE) \ tlb_flush_pud_range(tlb, address, _sz); \ + else if (_sz >= PMD_SIZE) \ + tlb_flush_pmd_range(tlb, address, _sz); \ + else \ + tlb_flush_pte_range(tlb, address, _sz); \ __tlb_remove_tlb_entry(tlb, ptep, address); \ } while (0) diff --git a/include/crypto/sm3.h b/include/crypto/sm3.h index 42ea21289ba954fc4c279c0df31c257cb5c91459..1f021ad0533ffb18cdab0c0f42fc2e4505192458 100644 --- a/include/crypto/sm3.h +++ b/include/crypto/sm3.h @@ -1,5 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ /* * Common values for SM3 algorithm + * + * Copyright (C) 2017 ARM Limited or its affiliates. + * Copyright (C) 2017 Gilad Ben-Yossef + * Copyright (C) 2021 Tianjia Zhang */ #ifndef _CRYPTO_SM3_H @@ -30,13 +35,30 @@ struct sm3_state { u8 buffer[SM3_BLOCK_SIZE]; }; -struct shash_desc; +/* + * Stand-alone implementation of the SM3 algorithm. It is designed to + * have as little dependencies as possible so it can be used in the + * kexec_file purgatory. In other cases you should generally use the + * hash APIs from include/crypto/hash.h. Especially when hashing large + * amounts of data as those APIs may be hw-accelerated. + * + * For details see lib/crypto/sm3.c + */ -extern int crypto_sm3_update(struct shash_desc *desc, const u8 *data, - unsigned int len); +static inline void sm3_init(struct sm3_state *sctx) +{ + sctx->state[0] = SM3_IVA; + sctx->state[1] = SM3_IVB; + sctx->state[2] = SM3_IVC; + sctx->state[3] = SM3_IVD; + sctx->state[4] = SM3_IVE; + sctx->state[5] = SM3_IVF; + sctx->state[6] = SM3_IVG; + sctx->state[7] = SM3_IVH; + sctx->count = 0; +} -extern int crypto_sm3_final(struct shash_desc *desc, u8 *out); +void sm3_update(struct sm3_state *sctx, const u8 *data, unsigned int len); +void sm3_final(struct sm3_state *sctx, u8 *out); -extern int crypto_sm3_finup(struct shash_desc *desc, const u8 *data, - unsigned int len, u8 *hash); #endif diff --git a/include/crypto/sm4.h b/include/crypto/sm4.h index 709f286e7b25355e60e1aa007ad84ee5f67ac9b5..9656a9a403264cf26a7efe598b4b15ded9540fc7 100644 --- a/include/crypto/sm4.h +++ b/include/crypto/sm4.h @@ -21,6 +21,10 @@ struct sm4_ctx { u32 rkey_dec[SM4_RKEY_WORDS]; }; +extern const u32 crypto_sm4_fk[]; +extern const u32 crypto_sm4_ck[]; +extern const u8 crypto_sm4_sbox[]; + /** * sm4_expandkey - Expands the SM4 key as described in GB/T 32907-2016 * @ctx: The location where the computed key will be stored. diff --git a/include/linux/align.h b/include/linux/align.h new file mode 100644 index 0000000000000000000000000000000000000000..2b4acec7b95a27b6768d48f46519abc584c94d5d --- /dev/null +++ b/include/linux/align.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_ALIGN_H +#define _LINUX_ALIGN_H + +#include + +/* @a is a power of 2 value */ +#define ALIGN(x, a) __ALIGN_KERNEL((x), (a)) +#define ALIGN_DOWN(x, a) __ALIGN_KERNEL((x) - ((a) - 1), (a)) +#define __ALIGN_MASK(x, mask) __ALIGN_KERNEL_MASK((x), (mask)) +#define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a))) +#define PTR_ALIGN_DOWN(p, a) ((typeof(p))ALIGN_DOWN((unsigned long)(p), (a))) +#define IS_ALIGNED(x, a) (((x) & ((typeof(x))(a) - 1)) == 0) + +#endif /* _LINUX_ALIGN_H */ diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h index 0bbfd647f5c6dec3c12c52c621a94cf57975ebc5..6cc93ab5b8096d1def9ebd9256e45c08fa86d161 100644 --- a/include/linux/amba/bus.h +++ b/include/linux/amba/bus.h @@ -76,7 +76,7 @@ struct amba_device { struct amba_driver { struct device_driver drv; int (*probe)(struct amba_device *, const struct amba_id *); - int (*remove)(struct amba_device *); + void (*remove)(struct amba_device *); void (*shutdown)(struct amba_device *); const struct amba_id *id_table; }; diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h index 5491c6d46e3a3e5e92f877f09fa393087d045226..9b1ddf9905f8ab5a52742ccc3f34fef8810a483d 100644 --- a/include/linux/arm-smccc.h +++ b/include/linux/arm-smccc.h @@ -90,6 +90,11 @@ ARM_SMCCC_SMC_32, \ 0, 0x7fff) +#define ARM_SMCCC_ARCH_WORKAROUND_3 \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ + ARM_SMCCC_SMC_32, \ + 0, 0x3fff) + #define SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED 1 /* Paravirtualised time calls (defined by ARM DEN0057A) */ diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 3134aaf9032aefca00757abba56ffbed98eef6c2..ac83257972a0f5794f1e309e5fb6dc8f56631aae 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -141,10 +141,6 @@ struct blk_mq_hw_ctx { * shared across request queues. */ atomic_t nr_active; - /** - * @elevator_queued: Number of queued requests on hctx. - */ - atomic_t elevator_queued; /** @cpuhp_online: List to store request if CPU is going to die */ struct hlist_node cpuhp_online; @@ -176,6 +172,12 @@ struct blk_mq_hw_ctx { */ struct list_head hctx_list; + /** + * @dtag_wait_time: record when hardware queue is pending, specifically + * when BLK_MQ_S_DTAG_WAIT is set in state. + */ + unsigned long dtag_wait_time; + KABI_RESERVE(1) KABI_RESERVE(2) KABI_RESERVE(3) @@ -270,6 +272,7 @@ struct blk_mq_tag_set { unsigned int flags; void *driver_data; atomic_t active_queues_shared_sbitmap; + atomic_t pending_queues_shared_sbitmap; struct sbitmap_queue __bitmap_tags; struct sbitmap_queue __breserved_tags; @@ -301,6 +304,15 @@ struct blk_mq_queue_data { KABI_RESERVE(1) }; +struct request_wrapper { + struct request rq; + + /* Time that I/O was counted in part_get_stat_info(). */ + u64 stat_time_ns; +}; + +#define request_to_wrapper(_rq) container_of(_rq, struct request_wrapper, rq) + typedef bool (busy_iter_fn)(struct blk_mq_hw_ctx *, struct request *, void *, bool); typedef bool (busy_tag_iter_fn)(struct request *, void *, bool); @@ -446,6 +458,9 @@ enum { /* hw queue is inactive after all its CPUs become offline */ BLK_MQ_S_INACTIVE = 3, + /* hw queue is waiting for driver tag */ + BLK_MQ_S_DTAG_WAIT = 4, + BLK_MQ_MAX_DEPTH = 10240, BLK_MQ_CPU_WORK_BATCH = 8, @@ -589,7 +604,7 @@ static inline bool blk_should_fake_timeout(struct request_queue *q) */ static inline struct request *blk_mq_rq_from_pdu(void *pdu) { - return pdu - sizeof(struct request); + return pdu - sizeof(struct request_wrapper); } /** @@ -603,12 +618,23 @@ static inline struct request *blk_mq_rq_from_pdu(void *pdu) */ static inline void *blk_mq_rq_to_pdu(struct request *rq) { - return rq + 1; + return request_to_wrapper(rq) + 1; +} + +static inline struct blk_mq_hw_ctx *queue_hctx(struct request_queue *q, int id) +{ + struct blk_mq_hw_ctx *hctx; + + rcu_read_lock(); + hctx = *(rcu_dereference(q->queue_hw_ctx) + id); + rcu_read_unlock(); + + return hctx; } #define queue_for_each_hw_ctx(q, hctx, i) \ for ((i) = 0; (i) < (q)->nr_hw_queues && \ - ({ hctx = (q)->queue_hw_ctx[i]; 1; }); (i)++) + ({ hctx = queue_hctx((q), i); 1; }); (i)++) #define hctx_for_each_ctx(hctx, ctx, i) \ for ((i) = 0; (i) < (hctx)->nr_ctx && \ diff --git a/include/linux/blk-pm.h b/include/linux/blk-pm.h index b80c65aba2493aeba29c94e641bb7bd2e96ec6a4..2580e05a8ab67203efde59504ffb655480b531cb 100644 --- a/include/linux/blk-pm.h +++ b/include/linux/blk-pm.h @@ -14,7 +14,7 @@ extern void blk_pm_runtime_init(struct request_queue *q, struct device *dev); extern int blk_pre_runtime_suspend(struct request_queue *q); extern void blk_post_runtime_suspend(struct request_queue *q, int err); extern void blk_pre_runtime_resume(struct request_queue *q); -extern void blk_post_runtime_resume(struct request_queue *q, int err); +extern void blk_post_runtime_resume(struct request_queue *q); extern void blk_set_runtime_active(struct request_queue *q); #else static inline void blk_pm_runtime_init(struct request_queue *q, diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index 11b9505b14c6c1190d196ca17dc92c2461bf7804..bbb62ff84601b85c0e3d9ce30d86fcc7f2115914 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -48,7 +48,11 @@ struct block_device { int bd_fsfreeze_count; /* Mutex for freeze */ struct mutex bd_fsfreeze_mutex; +#ifndef __GENKSYMS__ + KABI_USE2(1, int bd_write_openers, int bd_part_write_openers); +#else KABI_RESERVE(1) +#endif KABI_RESERVE(2) KABI_RESERVE(3) KABI_RESERVE(4) diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index eae4a046037e2ee837c52cb2a7d8c6ada2788dca..49540ce9e325d825d95a7f9b014cdbbb9bdfe0de 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -27,6 +27,7 @@ #include #include #include +#include struct module; struct scsi_ioctl_command; @@ -206,7 +207,6 @@ struct request { u64 start_time_ns; /* Time that I/O was submitted to the device. */ u64 io_start_time_ns; - #ifdef CONFIG_BLK_WBT unsigned short wbt_flags; #endif @@ -421,7 +421,7 @@ struct request_queue { unsigned int queue_depth; /* hw dispatch queues */ - struct blk_mq_hw_ctx **queue_hw_ctx; + struct blk_mq_hw_ctx __rcu **queue_hw_ctx; unsigned int nr_hw_queues; struct backing_dev_info *backing_dev_info; @@ -602,6 +602,7 @@ struct request_queue { #define BLK_MAX_WRITE_HINTS 5 u64 write_hints[BLK_MAX_WRITE_HINTS]; + unsigned long dtag_wait_time; KABI_RESERVE(1) KABI_RESERVE(2) KABI_RESERVE(3) @@ -640,6 +641,8 @@ struct request_queue { #define QUEUE_FLAG_RQ_ALLOC_TIME 27 /* record rq->alloc_time_ns */ #define QUEUE_FLAG_HCTX_ACTIVE 28 /* at least one blk-mq hctx is active */ #define QUEUE_FLAG_NOWAIT 29 /* device supports NOWAIT */ +/*at least one blk-mq hctx can't get driver tag */ +#define QUEUE_FLAG_HCTX_WAIT 30 #define QUEUE_FLAG_MQ_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ (1 << QUEUE_FLAG_SAME_COMP) | \ diff --git a/include/linux/bpf.h b/include/linux/bpf.h index d526073fe354bd1d3ea4c5412f33c274349c2788..3521e557cc100caecf7b3846c8951433dc9bad7a 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -254,6 +254,39 @@ bool bpf_map_meta_equal(const struct bpf_map *meta0, extern const struct bpf_map_ops bpf_map_offload_ops; +/* bpf_type_flag contains a set of flags that are applicable to the values of + * arg_type, ret_type and reg_type. For example, a pointer value may be null, + * or a memory is read-only. We classify types into two categories: base types + * and extended types. Extended types are base types combined with a type flag. + * + * Currently there are no more than 32 base types in arg_type, ret_type and + * reg_types. + */ +#define BPF_BASE_TYPE_BITS 8 + +enum bpf_type_flag { + /* PTR may be NULL. */ + PTR_MAYBE_NULL = BIT(0 + BPF_BASE_TYPE_BITS), + + /* MEM is read-only. When applied on bpf_arg, it indicates the arg is + * compatible with both mutable and immutable memory. + */ + MEM_RDONLY = BIT(1 + BPF_BASE_TYPE_BITS), + + /* MEM was "allocated" from a different helper, and cannot be mixed + * with regular non-MEM_ALLOC'ed MEM types. + */ + MEM_ALLOC = BIT(2 + BPF_BASE_TYPE_BITS), + + __BPF_TYPE_LAST_FLAG = MEM_ALLOC, +}; + +/* Max number of base types. */ +#define BPF_BASE_TYPE_LIMIT (1UL << BPF_BASE_TYPE_BITS) + +/* Max number of all types. */ +#define BPF_TYPE_LIMIT (__BPF_TYPE_LAST_FLAG | (__BPF_TYPE_LAST_FLAG - 1)) + /* function argument constraints */ enum bpf_arg_type { ARG_DONTCARE = 0, /* unused argument in helper function */ @@ -265,13 +298,11 @@ enum bpf_arg_type { ARG_PTR_TO_MAP_KEY, /* pointer to stack used as map key */ ARG_PTR_TO_MAP_VALUE, /* pointer to stack used as map value */ ARG_PTR_TO_UNINIT_MAP_VALUE, /* pointer to valid memory used to store a map value */ - ARG_PTR_TO_MAP_VALUE_OR_NULL, /* pointer to stack used as map value or NULL */ /* the following constraints used to prototype bpf_memcmp() and other * functions that access data on eBPF program stack */ ARG_PTR_TO_MEM, /* pointer to valid memory (stack, packet, map value) */ - ARG_PTR_TO_MEM_OR_NULL, /* pointer to valid memory or NULL */ ARG_PTR_TO_UNINIT_MEM, /* pointer to memory does not need to be initialized, * helper function must fill all bytes or clear * them in error case. @@ -281,37 +312,60 @@ enum bpf_arg_type { ARG_CONST_SIZE_OR_ZERO, /* number of bytes accessed from memory or 0 */ ARG_PTR_TO_CTX, /* pointer to context */ - ARG_PTR_TO_CTX_OR_NULL, /* pointer to context or NULL */ ARG_ANYTHING, /* any (initialized) argument is ok */ ARG_PTR_TO_SPIN_LOCK, /* pointer to bpf_spin_lock */ ARG_PTR_TO_SOCK_COMMON, /* pointer to sock_common */ ARG_PTR_TO_INT, /* pointer to int */ ARG_PTR_TO_LONG, /* pointer to long */ ARG_PTR_TO_SOCKET, /* pointer to bpf_sock (fullsock) */ - ARG_PTR_TO_SOCKET_OR_NULL, /* pointer to bpf_sock (fullsock) or NULL */ ARG_PTR_TO_BTF_ID, /* pointer to in-kernel struct */ ARG_PTR_TO_ALLOC_MEM, /* pointer to dynamically allocated memory */ - ARG_PTR_TO_ALLOC_MEM_OR_NULL, /* pointer to dynamically allocated memory or NULL */ ARG_CONST_ALLOC_SIZE_OR_ZERO, /* number of allocated bytes requested */ ARG_PTR_TO_BTF_ID_SOCK_COMMON, /* pointer to in-kernel sock_common or bpf-mirrored bpf_sock */ ARG_PTR_TO_PERCPU_BTF_ID, /* pointer to in-kernel percpu type */ __BPF_ARG_TYPE_MAX, + + /* Extended arg_types. */ + ARG_PTR_TO_MAP_VALUE_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_MAP_VALUE, + ARG_PTR_TO_MEM_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_MEM, + ARG_PTR_TO_CTX_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_CTX, + ARG_PTR_TO_SOCKET_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_SOCKET, + ARG_PTR_TO_ALLOC_MEM_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_ALLOC_MEM, + + /* This must be the last entry. Its purpose is to ensure the enum is + * wide enough to hold the higher bits reserved for bpf_type_flag. + */ + __BPF_ARG_TYPE_LIMIT = BPF_TYPE_LIMIT, }; +static_assert(__BPF_ARG_TYPE_MAX <= BPF_BASE_TYPE_LIMIT); /* type of values returned from helper functions */ enum bpf_return_type { RET_INTEGER, /* function returns integer */ RET_VOID, /* function doesn't return anything */ RET_PTR_TO_MAP_VALUE, /* returns a pointer to map elem value */ - RET_PTR_TO_MAP_VALUE_OR_NULL, /* returns a pointer to map elem value or NULL */ - RET_PTR_TO_SOCKET_OR_NULL, /* returns a pointer to a socket or NULL */ - RET_PTR_TO_TCP_SOCK_OR_NULL, /* returns a pointer to a tcp_sock or NULL */ - RET_PTR_TO_SOCK_COMMON_OR_NULL, /* returns a pointer to a sock_common or NULL */ - RET_PTR_TO_ALLOC_MEM_OR_NULL, /* returns a pointer to dynamically allocated memory or NULL */ - RET_PTR_TO_BTF_ID_OR_NULL, /* returns a pointer to a btf_id or NULL */ - RET_PTR_TO_MEM_OR_BTF_ID_OR_NULL, /* returns a pointer to a valid memory or a btf_id or NULL */ + RET_PTR_TO_SOCKET, /* returns a pointer to a socket */ + RET_PTR_TO_TCP_SOCK, /* returns a pointer to a tcp_sock */ + RET_PTR_TO_SOCK_COMMON, /* returns a pointer to a sock_common */ + RET_PTR_TO_ALLOC_MEM, /* returns a pointer to dynamically allocated memory */ RET_PTR_TO_MEM_OR_BTF_ID, /* returns a pointer to a valid memory or a btf_id */ + RET_PTR_TO_BTF_ID, /* returns a pointer to a btf_id */ + __BPF_RET_TYPE_MAX, + + /* Extended ret_types. */ + RET_PTR_TO_MAP_VALUE_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_MAP_VALUE, + RET_PTR_TO_SOCKET_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_SOCKET, + RET_PTR_TO_TCP_SOCK_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_TCP_SOCK, + RET_PTR_TO_SOCK_COMMON_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_SOCK_COMMON, + RET_PTR_TO_ALLOC_MEM_OR_NULL = PTR_MAYBE_NULL | MEM_ALLOC | RET_PTR_TO_ALLOC_MEM, + RET_PTR_TO_BTF_ID_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_BTF_ID, + + /* This must be the last entry. Its purpose is to ensure the enum is + * wide enough to hold the higher bits reserved for bpf_type_flag. + */ + __BPF_RET_TYPE_LIMIT = BPF_TYPE_LIMIT, }; +static_assert(__BPF_RET_TYPE_MAX <= BPF_BASE_TYPE_LIMIT); /* eBPF function prototype used by verifier to allow BPF_CALLs from eBPF programs * to in-kernel helper functions and for adjusting imm32 field in BPF_CALL @@ -373,18 +427,14 @@ enum bpf_reg_type { PTR_TO_CTX, /* reg points to bpf_context */ CONST_PTR_TO_MAP, /* reg points to struct bpf_map */ PTR_TO_MAP_VALUE, /* reg points to map element value */ - PTR_TO_MAP_VALUE_OR_NULL,/* points to map elem value or NULL */ PTR_TO_STACK, /* reg == frame_pointer + offset */ PTR_TO_PACKET_META, /* skb->data - meta_len */ PTR_TO_PACKET, /* reg points to skb->data */ PTR_TO_PACKET_END, /* skb->data + headlen */ PTR_TO_FLOW_KEYS, /* reg points to bpf_flow_keys */ PTR_TO_SOCKET, /* reg points to struct bpf_sock */ - PTR_TO_SOCKET_OR_NULL, /* reg points to struct bpf_sock or NULL */ PTR_TO_SOCK_COMMON, /* reg points to sock_common */ - PTR_TO_SOCK_COMMON_OR_NULL, /* reg points to sock_common or NULL */ PTR_TO_TCP_SOCK, /* reg points to struct tcp_sock */ - PTR_TO_TCP_SOCK_OR_NULL, /* reg points to struct tcp_sock or NULL */ PTR_TO_TP_BUFFER, /* reg points to a writable raw tp's buffer */ PTR_TO_XDP_SOCK, /* reg points to struct xdp_sock */ /* PTR_TO_BTF_ID points to a kernel struct that does not need @@ -402,15 +452,24 @@ enum bpf_reg_type { * been checked for null. Used primarily to inform the verifier * an explicit null check is required for this struct. */ - PTR_TO_BTF_ID_OR_NULL, PTR_TO_MEM, /* reg points to valid memory region */ - PTR_TO_MEM_OR_NULL, /* reg points to valid memory region or NULL */ - PTR_TO_RDONLY_BUF, /* reg points to a readonly buffer */ - PTR_TO_RDONLY_BUF_OR_NULL, /* reg points to a readonly buffer or NULL */ - PTR_TO_RDWR_BUF, /* reg points to a read/write buffer */ - PTR_TO_RDWR_BUF_OR_NULL, /* reg points to a read/write buffer or NULL */ + PTR_TO_BUF, /* reg points to a read/write buffer */ PTR_TO_PERCPU_BTF_ID, /* reg points to a percpu kernel variable */ + __BPF_REG_TYPE_MAX, + + /* Extended reg_types. */ + PTR_TO_MAP_VALUE_OR_NULL = PTR_MAYBE_NULL | PTR_TO_MAP_VALUE, + PTR_TO_SOCKET_OR_NULL = PTR_MAYBE_NULL | PTR_TO_SOCKET, + PTR_TO_SOCK_COMMON_OR_NULL = PTR_MAYBE_NULL | PTR_TO_SOCK_COMMON, + PTR_TO_TCP_SOCK_OR_NULL = PTR_MAYBE_NULL | PTR_TO_TCP_SOCK, + PTR_TO_BTF_ID_OR_NULL = PTR_MAYBE_NULL | PTR_TO_BTF_ID, + + /* This must be the last entry. Its purpose is to ensure the enum is + * wide enough to hold the higher bits reserved for bpf_type_flag. + */ + __BPF_REG_TYPE_LIMIT = BPF_TYPE_LIMIT, }; +static_assert(__BPF_REG_TYPE_MAX <= BPF_BASE_TYPE_LIMIT); /* The information passed from prog-specific *_is_valid_access * back to the verifier. @@ -1514,6 +1573,12 @@ struct bpf_prog *bpf_prog_by_id(u32 id); struct bpf_link *bpf_link_by_id(u32 id); const struct bpf_func_proto *bpf_base_func_proto(enum bpf_func_id func_id); + +static inline bool unprivileged_ebpf_enabled(void) +{ + return !sysctl_unprivileged_bpf_disabled; +} + #else /* !CONFIG_BPF_SYSCALL */ static inline struct bpf_prog *bpf_prog_get(u32 ufd) { @@ -1708,6 +1773,12 @@ bpf_base_func_proto(enum bpf_func_id func_id) { return NULL; } + +static inline bool unprivileged_ebpf_enabled(void) +{ + return false; +} + #endif /* CONFIG_BPF_SYSCALL */ static inline struct bpf_prog *bpf_prog_get_type(u32 ufd, diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index 6e330ff2f28df0a2e729a631524ac92ffa127c71..351923aab8697c2e92ed6dccdd36c165ead29f30 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -17,6 +17,8 @@ * that converting umax_value to int cannot overflow. */ #define BPF_MAX_VAR_SIZ (1 << 29) +/* size of type_str_buf in bpf_verifier. */ +#define TYPE_STR_BUF_LEN 64 /* Liveness marks, used for registers and spilled-regs (in stack slots). * Read marks propagate upwards until they find a write mark; they record that @@ -367,6 +369,13 @@ static inline bool bpf_verifier_log_needed(const struct bpf_verifier_log *log) log->level == BPF_LOG_KERNEL); } +static inline bool +bpf_verifier_log_attr_valid(const struct bpf_verifier_log *log) +{ + return log->len_total >= 128 && log->len_total <= UINT_MAX >> 2 && + log->level && log->ubuf && !(log->level & ~BPF_LOG_MASK); +} + #define BPF_MAX_SUBPROGS 256 struct bpf_subprog_info { @@ -434,6 +443,8 @@ struct bpf_verifier_env { u32 peak_states; /* longest register parentage chain walked for liveness marking */ u32 longest_mark_read_walk; + /* buffer used in reg_type_str() to generate reg_type string */ + char type_str_buf[TYPE_STR_BUF_LEN]; }; __printf(2, 0) void bpf_verifier_vlog(struct bpf_verifier_log *log, @@ -465,8 +476,8 @@ bpf_prog_offload_replace_insn(struct bpf_verifier_env *env, u32 off, void bpf_prog_offload_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt); -int check_ctx_reg(struct bpf_verifier_env *env, - const struct bpf_reg_state *reg, int regno); +int check_ptr_off_reg(struct bpf_verifier_env *env, + const struct bpf_reg_state *reg, int regno); /* this lives here instead of in bpf.h because it needs to dereference tgt_prog */ static inline u64 bpf_trampoline_compute_key(const struct bpf_prog *tgt_prog, @@ -480,5 +491,18 @@ int bpf_check_attach_target(struct bpf_verifier_log *log, const struct bpf_prog *tgt_prog, u32 btf_id, struct bpf_attach_target_info *tgt_info); +#define BPF_BASE_TYPE_MASK GENMASK(BPF_BASE_TYPE_BITS - 1, 0) + +/* extract base type from bpf_{arg, return, reg}_type. */ +static inline u32 base_type(u32 type) +{ + return type & BPF_BASE_TYPE_MASK; +} + +/* extract flags from an extended type. See bpf_type_flag in bpf.h. */ +static inline u32 type_flag(u32 type) +{ + return type & ~BPF_BASE_TYPE_MASK; +} #endif /* _LINUX_BPF_VERIFIER_H */ diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index 648c2e1ec442b365d03f154a94fa5774c2c5f9aa..35d8ce603815eb1068f73bfc75e8e23bd3fdebb9 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h @@ -72,6 +72,9 @@ enum { /* Cgroup is frozen. */ CGRP_FROZEN, + + /* Control group has to be killed. */ + CGRP_KILL, }; /* cgroup_root->flags */ @@ -484,7 +487,12 @@ struct cgroup { /* used to schedule release agent */ struct work_struct release_agent_work; - /* used to track pressure stalls */ + /* used to track pressure stalls. */ + + /* + * It is accessed only the cgroup core code and so changes made to + * the cgroup structure should not affect third-party kernel modules. + */ struct psi_group psi; /* used to store eBPF programs */ diff --git a/include/linux/cpu.h b/include/linux/cpu.h index d6428aaf67e7313be7db903dfead2b907e9d95a1..d63b8f70d1239df47eb79217779d8469c4aad473 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -65,6 +65,9 @@ extern ssize_t cpu_show_tsx_async_abort(struct device *dev, extern ssize_t cpu_show_itlb_multihit(struct device *dev, struct device_attribute *attr, char *buf); extern ssize_t cpu_show_srbds(struct device *dev, struct device_attribute *attr, char *buf); +extern ssize_t cpu_show_mmio_stale_data(struct device *dev, + struct device_attribute *attr, + char *buf); extern __printf(4, 5) struct device *cpu_device_create(struct device *parent, void *drvdata, diff --git a/include/linux/dynamic_hugetlb.h b/include/linux/dynamic_hugetlb.h index 237a7329ff64e130a44efa4758cda77df868aacf..5dcba8e8b9334832d1ffc958c5199f38f7fbf916 100644 --- a/include/linux/dynamic_hugetlb.h +++ b/include/linux/dynamic_hugetlb.h @@ -112,10 +112,12 @@ void free_huge_page_to_dhugetlb_pool(struct page *page, bool restore_reserve); struct dhugetlb_pool {}; +#ifdef CONFIG_CGROUPS static inline bool dhugetlb_hide_files(struct cftype *cft) { return false; } +#endif static inline void hugetlb_pool_inherit(struct mem_cgroup *memcg, struct mem_cgroup *parent) { } diff --git a/include/linux/elevator.h b/include/linux/elevator.h index 820563e85c41f323892748dc6dfcafb04e438239..1363b5858486f42a4f7a87ed3ccdd6e2418e6cc1 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h @@ -188,6 +188,8 @@ extern struct request *elv_rb_find(struct rb_root *, sector_t); /* Supports zoned block devices sequential write constraint */ #define ELEVATOR_F_ZBD_SEQ_WRITE (1U << 0) +/* Supports scheduling on multiple hardware queues */ +#define ELEVATOR_F_MQ_AWARE (1U << 1) #endif /* CONFIG_BLOCK */ #endif diff --git a/include/linux/fs.h b/include/linux/fs.h index db632747781a73a9f280296bb6837778bac7266f..a7bc1eaa27eeb5b1629966a19bfaae566f1b957c 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -41,6 +41,7 @@ #include #include #include +#include #include #include @@ -183,6 +184,12 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset, /* File supports async buffered reads */ #define FMODE_BUF_RASYNC ((__force fmode_t)0x40000000) +/* File mode control flag, expect random access pattern */ +#define FMODE_CTL_RANDOM ((__force fmode_t)0x1) + +/* File mode control flag, will try to read head of the file into pagecache */ +#define FMODE_CTL_WILLNEED ((__force fmode_t)0x2) + /* * Attribute flags. These should be or-ed together to figure out what * has been changed! @@ -968,8 +975,14 @@ struct file { struct address_space *f_mapping; errseq_t f_wb_err; errseq_t f_sb_err; /* for syncfs */ - +#ifndef __GENKSYMS__ + union { + fmode_t f_ctl_mode; + u64 kabi_reserved1; + }; +#else KABI_RESERVE(1) +#endif } __randomize_layout __attribute__((aligned(4))); /* lest something weird decides that 2 is OK */ @@ -2831,6 +2844,10 @@ static inline int bmap(struct inode *inode, sector_t *block) extern int notify_change(struct dentry *, struct iattr *, struct inode **); extern int inode_permission(struct inode *, int); extern int generic_permission(struct inode *, int); +static inline int path_permission(const struct path *path, int mask) +{ + return inode_permission(d_inode(path->dentry), mask); +} extern int __check_sticky(struct inode *dir, struct inode *inode); static inline bool execute_ok(struct inode *inode) @@ -3555,4 +3572,33 @@ static inline int inode_drain_writes(struct inode *inode) return filemap_write_and_wait(inode->i_mapping); } +struct fs_file_read_ctx { + const unsigned char *name; + unsigned int f_ctl_mode; + unsigned int rsvd; + /* clear from f_ctl_mode */ + unsigned int clr_f_ctl_mode; + /* set into f_ctl_mode */ + unsigned int set_f_ctl_mode; + unsigned long key; + /* file size */ + long long i_size; + /* previous page index */ + long long prev_index; + /* current page index */ + long long index; +}; + +#ifdef CONFIG_TRACEPOINTS +DECLARE_TRACEPOINT(fs_file_read); +extern void fs_file_read_update_args_by_trace(struct kiocb *iocb); +#else +static inline void fs_file_read_update_args_by_trace(struct kiocb *iocb) {} +#endif + +static inline void fs_file_read_do_trace(struct kiocb *iocb) +{ + if (tracepoint_enabled(fs_file_read)) + fs_file_read_update_args_by_trace(iocb); +} #endif /* _LINUX_FS_H */ diff --git a/include/linux/fs_context.h b/include/linux/fs_context.h index 6b54982fc5f378ec704f56def2fdb299e7d8b42d..13fa6f3df8e4651af389223e0e1b94994ea6b172 100644 --- a/include/linux/fs_context.h +++ b/include/linux/fs_context.h @@ -142,6 +142,8 @@ extern void put_fs_context(struct fs_context *fc); extern int vfs_parse_fs_param_source(struct fs_context *fc, struct fs_parameter *param); extern void fc_drop_locked(struct fs_context *fc); +int reconfigure_single(struct super_block *s, + int flags, void *data); /* * sget() wrappers to be called from the ->get_tree() op. diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h index f8acddcf54fb4ce2e7b03dda41f52dd936b591e8..79add91eaa04ebf3b4355cabae415709ab3bbb6f 100644 --- a/include/linux/fsnotify.h +++ b/include/linux/fsnotify.h @@ -203,6 +203,42 @@ static inline void fsnotify_link(struct inode *dir, struct inode *inode, fsnotify_name(dir, FS_CREATE, inode, &new_dentry->d_name, 0); } +/* + * fsnotify_delete - @dentry was unlinked and unhashed + * + * Caller must make sure that dentry->d_name is stable. + * + * Note: unlike fsnotify_unlink(), we have to pass also the unlinked inode + * as this may be called after d_delete() and old_dentry may be negative. + */ +static inline void fsnotify_delete(struct inode *dir, struct inode *inode, + struct dentry *dentry) +{ + __u32 mask = FS_DELETE; + + if (S_ISDIR(inode->i_mode)) + mask |= FS_ISDIR; + + fsnotify_name(dir, mask, inode, &dentry->d_name, 0); +} + +/** + * d_delete_notify - delete a dentry and call fsnotify_delete() + * @dentry: The dentry to delete + * + * This helper is used to guaranty that the unlinked inode cannot be found + * by lookup of this name after fsnotify_delete() event has been delivered. + */ +static inline void d_delete_notify(struct inode *dir, struct dentry *dentry) +{ + struct inode *inode = d_inode(dentry); + + ihold(inode); + d_delete(dentry); + fsnotify_delete(dir, inode, dentry); + iput(inode); +} + /* * fsnotify_unlink - 'name' was unlinked * @@ -210,10 +246,10 @@ static inline void fsnotify_link(struct inode *dir, struct inode *inode, */ static inline void fsnotify_unlink(struct inode *dir, struct dentry *dentry) { - /* Expected to be called before d_delete() */ - WARN_ON_ONCE(d_is_negative(dentry)); + if (WARN_ON_ONCE(d_is_negative(dentry))) + return; - fsnotify_dirent(dir, dentry, FS_DELETE); + fsnotify_delete(dir, d_inode(dentry), dentry); } /* @@ -233,10 +269,10 @@ static inline void fsnotify_mkdir(struct inode *inode, struct dentry *dentry) */ static inline void fsnotify_rmdir(struct inode *dir, struct dentry *dentry) { - /* Expected to be called before d_delete() */ - WARN_ON_ONCE(d_is_negative(dentry)); + if (WARN_ON_ONCE(d_is_negative(dentry))) + return; - fsnotify_dirent(dir, dentry, FS_DELETE | FS_ISDIR); + fsnotify_delete(dir, d_inode(dentry), dentry); } /* diff --git a/include/linux/genhd.h b/include/linux/genhd.h index 09da27361620cd34e4407d118456825a3f16fb50..05927a1c6b5b18c8ae00626edfc65ccdca7cc998 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h @@ -76,7 +76,7 @@ struct hd_struct { #endif struct rcu_work rcu_work; - KABI_RESERVE(1) + KABI_USE(1, u64 stat_time) KABI_RESERVE(2) KABI_RESERVE(3) KABI_RESERVE(4) diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h index 8e144306e2622fd801886851eba895c5cf522a08..b216899b4745e7697ba9d6d781ab4acba31ff364 100644 --- a/include/linux/gpio/driver.h +++ b/include/linux/gpio/driver.h @@ -224,6 +224,15 @@ struct gpio_irq_chip { unsigned long *valid_mask, unsigned int ngpios); + /** + * @initialized: + * + * Flag to track GPIO chip irq member's initialization. + * This flag will make sure GPIO chip irq members are not used + * before they are initialized. + */ + bool initialized; + /** * @valid_mask: * diff --git a/include/linux/hid.h b/include/linux/hid.h index fc56d53cc68bffc85b9e7dc014d5b2d700d8e1e1..2ba33d708942c163bb5b3d5c01c8840129f393b5 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h @@ -345,6 +345,8 @@ struct hid_item { /* BIT(9) reserved for backward compatibility, was NO_INIT_INPUT_REPORTS */ #define HID_QUIRK_ALWAYS_POLL BIT(10) #define HID_QUIRK_INPUT_PER_APP BIT(11) +#define HID_QUIRK_X_INVERT BIT(12) +#define HID_QUIRK_Y_INVERT BIT(13) #define HID_QUIRK_SKIP_OUTPUT_REPORTS BIT(16) #define HID_QUIRK_SKIP_OUTPUT_REPORT_ID BIT(17) #define HID_QUIRK_NO_OUTPUT_REPORTS_ON_INTR_EP BIT(18) diff --git a/drivers/crypto/hisilicon/qm.h b/include/linux/hisi_acc_qm.h similarity index 85% rename from drivers/crypto/hisilicon/qm.h rename to include/linux/hisi_acc_qm.h index 718687dd7242764cc5b5b33cc58f204a7cd74846..7ffdee5851539c384a4a905f07b3ec4a08b50557 100644 --- a/drivers/crypto/hisilicon/qm.h +++ b/include/linux/hisi_acc_qm.h @@ -34,6 +34,40 @@ #define QM_WUSER_M_CFG_ENABLE 0x1000a8 #define WUSER_M_CFG_ENABLE 0xffffffff +/* mailbox */ +#define QM_MB_CMD_SQC 0x0 +#define QM_MB_CMD_CQC 0x1 +#define QM_MB_CMD_EQC 0x2 +#define QM_MB_CMD_AEQC 0x3 +#define QM_MB_CMD_SQC_BT 0x4 +#define QM_MB_CMD_CQC_BT 0x5 +#define QM_MB_CMD_SQC_VFT_V2 0x6 +#define QM_MB_CMD_STOP_QP 0x8 +#define QM_MB_CMD_SRC 0xc +#define QM_MB_CMD_DST 0xd + +#define QM_MB_CMD_SEND_BASE 0x300 +#define QM_MB_EVENT_SHIFT 8 +#define QM_MB_BUSY_SHIFT 13 +#define QM_MB_OP_SHIFT 14 +#define QM_MB_CMD_DATA_ADDR_L 0x304 +#define QM_MB_CMD_DATA_ADDR_H 0x308 +#define QM_MB_MAX_WAIT_CNT 6000 + +/* doorbell */ +#define QM_DOORBELL_CMD_SQ 0 +#define QM_DOORBELL_CMD_CQ 1 +#define QM_DOORBELL_CMD_EQ 2 +#define QM_DOORBELL_CMD_AEQ 3 + +#define QM_DOORBELL_SQ_CQ_BASE_V2 0x1000 +#define QM_DOORBELL_EQ_AEQ_BASE_V2 0x2000 +#define QM_QP_MAX_NUM_SHIFT 11 +#define QM_DB_CMD_SHIFT_V2 12 +#define QM_DB_RAND_SHIFT_V2 16 +#define QM_DB_INDEX_SHIFT_V2 32 +#define QM_DB_PRIORITY_SHIFT_V2 48 + /* qm cache */ #define QM_CACHE_CTL 0x100050 #define SQC_CACHE_ENABLE BIT(0) @@ -103,6 +137,12 @@ enum qm_state { QM_STOP, }; +struct dfx_diff_registers { + u32 *regs; + u32 reg_offset; + u32 reg_len; +}; + enum qp_state { QP_INIT = 1, QP_START, @@ -157,6 +197,11 @@ struct qm_debug { struct dentry *debug_root; struct dentry *qm_d; struct debugfs_file files[DEBUG_FILE_NUM]; + unsigned int *qm_last_words; + /* ACC engines recoreding last regs */ + unsigned int *last_words; + struct dfx_diff_registers *qm_diff_regs; + struct dfx_diff_registers *acc_diff_regs; }; struct qm_shaper_factor { @@ -210,6 +255,7 @@ struct hisi_qm_err_ini { void (*open_sva_prefetch)(struct hisi_qm *qm); void (*close_sva_prefetch)(struct hisi_qm *qm); void (*log_dev_hw_err)(struct hisi_qm *qm, u32 err_sts); + void (*show_last_dfx_regs)(struct hisi_qm *qm); void (*err_info_init)(struct hisi_qm *qm); }; @@ -400,27 +446,32 @@ int hisi_qm_init(struct hisi_qm *qm); void hisi_qm_uninit(struct hisi_qm *qm); int hisi_qm_start(struct hisi_qm *qm); int hisi_qm_stop(struct hisi_qm *qm, enum qm_stop_reason r); -struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, u8 alg_type); int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg); int hisi_qm_stop_qp(struct hisi_qp *qp); -void hisi_qm_release_qp(struct hisi_qp *qp); int hisi_qp_send(struct hisi_qp *qp, const void *msg); -int hisi_qm_get_free_qp_num(struct hisi_qm *qm); -int hisi_qm_get_vft(struct hisi_qm *qm, u32 *base, u32 *number); void hisi_qm_debug_init(struct hisi_qm *qm); -enum qm_hw_ver hisi_qm_get_hw_version(struct pci_dev *pdev); void hisi_qm_debug_regs_clear(struct hisi_qm *qm); int hisi_qm_sriov_enable(struct pci_dev *pdev, int max_vfs); int hisi_qm_sriov_disable(struct pci_dev *pdev, bool is_frozen); int hisi_qm_sriov_configure(struct pci_dev *pdev, int num_vfs); void hisi_qm_dev_err_init(struct hisi_qm *qm); void hisi_qm_dev_err_uninit(struct hisi_qm *qm); +int hisi_qm_diff_regs_init(struct hisi_qm *qm, + struct dfx_diff_registers *dregs, int reg_len); +void hisi_qm_diff_regs_uninit(struct hisi_qm *qm, int reg_len); +void hisi_qm_acc_diff_regs_dump(struct hisi_qm *qm, struct seq_file *s, + struct dfx_diff_registers *dregs, int regs_len); + pci_ers_result_t hisi_qm_dev_err_detected(struct pci_dev *pdev, pci_channel_state_t state); pci_ers_result_t hisi_qm_dev_slot_reset(struct pci_dev *pdev); void hisi_qm_reset_prepare(struct pci_dev *pdev); void hisi_qm_reset_done(struct pci_dev *pdev); +int hisi_qm_wait_mb_ready(struct hisi_qm *qm); +int hisi_qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue, + bool op); + struct hisi_acc_sgl_pool; struct hisi_acc_hw_sgl *hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev, struct scatterlist *sgl, struct hisi_acc_sgl_pool *pool, diff --git a/include/linux/if_arp.h b/include/linux/if_arp.h index bf5c5f32c65e46cc3d58208612482c4f42594333..e147ea6794670888fbf949e9cee5dc38e16d8c3e 100644 --- a/include/linux/if_arp.h +++ b/include/linux/if_arp.h @@ -51,6 +51,7 @@ static inline bool dev_is_mac_header_xmit(const struct net_device *dev) case ARPHRD_VOID: case ARPHRD_NONE: case ARPHRD_RAWIP: + case ARPHRD_PIMREG: return false; default: return true; diff --git a/include/linux/intel_rapl.h b/include/linux/intel_rapl.h index 50b8398ffd214f189c1713a453b5595621d1c16d..acf72c018142e5d97dcc88373b9d581e5809aba5 100644 --- a/include/linux/intel_rapl.h +++ b/include/linux/intel_rapl.h @@ -58,6 +58,12 @@ enum rapl_primitives { THROTTLED_TIME, PRIORITY_LEVEL, + PSYS_POWER_LIMIT1, + PSYS_POWER_LIMIT2, + PSYS_PL1_ENABLE, + PSYS_PL2_ENABLE, + PSYS_TIME_WINDOW1, + PSYS_TIME_WINDOW2, /* below are not raw primitive data */ AVERAGE_POWER, NR_RAPL_PRIMITIVES, diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 591a6c5d2ddf214632215485bbac2e61afdd0e70..092384b71ab22e6dd14c660bbbb8a1b91439a093 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -87,7 +87,6 @@ struct iommu_domain { void *handler_token; struct iommu_domain_geometry geometry; void *iova_cookie; - bool dirty_log_tracking; struct mutex switch_log_lock; KABI_RESERVE(1) KABI_RESERVE(2) @@ -394,7 +393,7 @@ struct iommu_device { struct iommu_fault_event { struct iommu_fault fault; struct list_head list; - u64 expire; + _KABI_DEPRECATE(u64, expire); }; /** @@ -409,7 +408,7 @@ struct iommu_fault_param { iommu_dev_fault_handler_t handler; void *data; struct list_head faults; - struct timer_list timer; + _KABI_DEPRECATE(struct timer_list, timer); struct mutex lock; }; diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index 0e6f5d2785e9db175548c8fbec82b4601e2eef12..210402b26a20fe4944242ef0932b9bbb21edac86 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h @@ -51,7 +51,7 @@ struct ipv6_devconf { __s32 use_optimistic; #endif #ifdef CONFIG_IPV6_MROUTE - __s32 mc_forwarding; + KABI_REPLACE(__s32 mc_forwarding, atomic_t mc_forwarding) #endif __s32 disable_ipv6; __s32 drop_unicast_in_l2_multicast; diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h index f6d092fdb93dc8949ab1e25a4c2e5eb2efeda43e..380c83b7a89fe62685106acdf28a8064dbfa2e01 100644 --- a/include/linux/irqchip/arm-gic-v3.h +++ b/include/linux/irqchip/arm-gic-v3.h @@ -689,6 +689,11 @@ struct rdists { struct irq_domain; struct fwnode_handle; int its_cpu_init(void); +#ifdef CONFIG_ASCEND_INIT_ALL_GICR +void its_set_gicr_nr(int nr); +bool its_init_all_gicr(void); +int its_cpu_init_others(void __iomem *base, phys_addr_t phys_base, int idx); +#endif int its_init(struct fwnode_handle *handle, struct rdists *rdists, struct irq_domain *domain); int mbi_init(struct fwnode_handle *fwnode, struct irq_domain *parent); diff --git a/include/linux/kabi.h b/include/linux/kabi.h index a52d9fa72cfa41b8720775cd1a975d8b0892de8a..fe3213c0f5760d4b043be300cf2f9c8c96bd8a10 100644 --- a/include/linux/kabi.h +++ b/include/linux/kabi.h @@ -393,6 +393,8 @@ # define __KABI_CHECK_SIZE(_item, _size) #endif +#define KABI_UNIQUE_ID __PASTE(kabi_hidden_, __LINE__) + # define _KABI_DEPRECATE(_type, _orig) _type kabi_reserved_##_orig # define _KABI_DEPRECATE_FN(_type, _orig, _args...) \ _type (* kabi_reserved_##_orig)(_args) @@ -402,7 +404,7 @@ _new; \ struct { \ _orig; \ - } __UNIQUE_ID(kabi_hide); \ + } KABI_UNIQUE_ID; \ __KABI_CHECK_SIZE_ALIGN(_orig, _new); \ } #else diff --git a/include/linux/kd.h b/include/linux/kd.h deleted file mode 100644 index b130a18f860f0ec95864de523186040a64d38cb8..0000000000000000000000000000000000000000 --- a/include/linux/kd.h +++ /dev/null @@ -1,8 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _LINUX_KD_H -#define _LINUX_KD_H - -#include - -#define KD_FONT_FLAG_OLD 0x80000000 /* Invoked via old interface [compat] */ -#endif /* _LINUX_KD_H */ diff --git a/include/linux/kfence.h b/include/linux/kfence.h index f77b0e4de937271d24844368fe61d901ade27c15..6486c3dcac36e3475c28fab5394f090dcabb34b7 100644 --- a/include/linux/kfence.h +++ b/include/linux/kfence.h @@ -65,6 +65,7 @@ static __always_inline bool is_kfence_address(const void *addr) return unlikely((unsigned long)((char *)addr - __kfence_pool) < KFENCE_POOL_SIZE && __kfence_pool); } +void __init kfence_early_alloc_pool(void); /** * kfence_alloc_pool() - allocate the KFENCE pool via memblock */ @@ -214,6 +215,7 @@ bool __must_check kfence_handle_page_fault(unsigned long addr, bool is_write, st #else /* CONFIG_KFENCE */ static inline bool is_kfence_address(const void *addr) { return false; } +static inline void kfence_early_alloc_pool(void) { } static inline void kfence_alloc_pool(void) { } static inline void kfence_init(void) { } static inline void kfence_shutdown_cache(struct kmem_cache *s) { } diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index fea02cfe65205a87912a03d2e55fcf65a4c1f52e..96f2cd2b46f7eb4b604195f91552a582b97efb82 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -1480,6 +1480,8 @@ static inline long kvm_arch_vcpu_async_ioctl(struct file *filp, void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, unsigned long start, unsigned long end); +void kvm_arch_guest_memory_reclaimed(struct kvm *kvm); + #ifdef CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu); #else diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h index 5f88e6429484bb10829c1143a647a7138245681e..9301f8e9bb9081c95ae050bb7a8faa51c7cf389c 100644 --- a/include/linux/livepatch.h +++ b/include/linux/livepatch.h @@ -229,11 +229,36 @@ struct klp_func_node { struct list_head func_stack; void *old_func; struct arch_klp_data arch_data; + /* + * Used in breakpoint exception handling functions. + * If 'brk_func' is NULL, no breakpoint is inserted into the entry of + * the old function. + * If it is not NULL, the value is the new function that will jump to + * when the breakpoint exception is triggered. + */ + void *brk_func; }; struct klp_func_node *klp_find_func_node(const void *old_func); void klp_add_func_node(struct klp_func_node *func_node); void klp_del_func_node(struct klp_func_node *func_node); +void *klp_get_brk_func(void *addr); + +static inline +int klp_compare_address(unsigned long pc, unsigned long func_addr, + const char *func_name, unsigned long check_size) +{ + if (pc >= func_addr && pc < func_addr + check_size) { + pr_warn("func %s is in use!\n", func_name); + /* Return -EAGAIN for next retry */ + return -EAGAIN; + } + return 0; +} + +void arch_klp_init(void); +int klp_module_delete_safety_check(struct module *mod); + #endif int klp_apply_section_relocs(struct module *pmod, Elf_Shdr *sechdrs, diff --git a/include/linux/memcg_memfs_info.h b/include/linux/memcg_memfs_info.h new file mode 100644 index 0000000000000000000000000000000000000000..658a91e22bd7ef6bd6fb1bf98d21b52840fbb563 --- /dev/null +++ b/include/linux/memcg_memfs_info.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +#ifndef _LINUX_MEMCG_MEMFS_INFO_H +#define _LINUX_MEMCG_MEMFS_INFO_H + +#include +#include + +#ifdef CONFIG_MEMCG_MEMFS_INFO +void mem_cgroup_print_memfs_info(struct mem_cgroup *memcg, struct seq_file *m); +int mem_cgroup_memfs_files_show(struct seq_file *m, void *v); +void mem_cgroup_memfs_info_init(void); +#else +static inline void mem_cgroup_print_memfs_info(struct mem_cgroup *memcg, + struct seq_file *m) +{ +} +static inline void mem_cgroup_memfs_info_init(void) +{ +} +#endif +#endif diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 7cc7cfe55d9ad9e539e50424c5eb90eaacfae74d..3a4ea9ed39fd40f749ebe99dd44c799d71767ccc 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -235,7 +235,7 @@ struct obj_cgroup { struct mem_cgroup *memcg; atomic_t nr_charged_bytes; union { - struct list_head list; + struct list_head list; /* protected by objcg_lock */ struct rcu_head rcu; }; }; @@ -344,7 +344,8 @@ struct mem_cgroup { int kmemcg_id; enum memcg_kmem_state kmem_state; struct obj_cgroup __rcu *objcg; - struct list_head objcg_list; /* list of inherited objcgs */ + /* list of inherited objcgs, protected by objcg_lock */ + struct list_head objcg_list; #endif MEMCG_PADDING(_pad2_); @@ -1197,6 +1198,18 @@ static inline void count_memcg_event_mm(struct mm_struct *mm, rcu_read_unlock(); } +static bool memcg_event_add(struct mem_cgroup *memcg, + enum memcg_memory_event event) +{ + if (!mem_cgroup_is_root(memcg)) + return true; + + if (event == MEMCG_OOM_KILL && !cgroup_subsys_on_dfl(memory_cgrp_subsys)) + return true; + + return false; +} + static inline void memcg_memory_event(struct mem_cgroup *memcg, enum memcg_memory_event event) { @@ -1214,12 +1227,10 @@ static inline void memcg_memory_event(struct mem_cgroup *memcg, else cgroup_file_notify(&memcg->events_file); - if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) - break; if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_LOCAL_EVENTS) break; } while ((memcg = parent_mem_cgroup(memcg)) && - !mem_cgroup_is_root(memcg)); + memcg_event_add(memcg, event)); } static inline void memcg_memory_event_mm(struct mm_struct *mm, @@ -1258,6 +1269,8 @@ static inline bool memcg_has_children(struct mem_cgroup *memcg) return ret; } +int mem_cgroup_force_empty(struct mem_cgroup *memcg); + #else /* CONFIG_MEMCG */ #define MEM_CGROUP_ID_SHIFT 0 diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h index 64ab4398ba9050dde814d732bd5215671735461a..ba74e7399dc6600a3dc54ba2724d158b000d9c59 100644 --- a/include/linux/mempolicy.h +++ b/include/linux/mempolicy.h @@ -201,6 +201,9 @@ extern bool vma_migratable(struct vm_area_struct *vma); extern int mpol_misplaced(struct page *, struct vm_area_struct *, unsigned long); extern void mpol_put_task_policy(struct task_struct *); +extern long __do_mbind(unsigned long start, unsigned long len, + unsigned short mode, unsigned short mode_flags, + nodemask_t *nmask, unsigned long flags, struct mm_struct *mm); #else struct mempolicy {}; @@ -301,6 +304,13 @@ static inline int mpol_misplaced(struct page *page, struct vm_area_struct *vma, return -1; /* no node preference */ } +static inline long __do_mbind(unsigned long start, unsigned long len, + unsigned short mode, unsigned short mode_flags, + nodemask_t *nmask, unsigned long flags, struct mm_struct *mm) +{ + return 0; +} + static inline void mpol_put_task_policy(struct task_struct *task) { } diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index f5e829e12a76deabe6bff05e9593c15d68f6e293..eba1f1cbc9fbd1b4093f2285d5fa15f4276807f7 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -9307,8 +9307,8 @@ struct mlx5_ifc_bufferx_reg_bits { u8 reserved_at_0[0x6]; u8 lossy[0x1]; u8 epsb[0x1]; - u8 reserved_at_8[0xc]; - u8 size[0xc]; + u8 reserved_at_8[0x8]; + u8 size[0x10]; u8 xoff_threshold[0x10]; u8 xon_threshold[0x10]; diff --git a/include/linux/mm.h b/include/linux/mm.h index 859d5200c57b531a98e51b37e3cd1d89848f05fd..25891b581bf4e2d0352d5754bef0071053f7b4c3 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -425,6 +425,19 @@ extern unsigned int kobjsize(const void *objp); /* VMA basic access permission flags */ #define VM_ACCESS_FLAGS (VM_READ | VM_WRITE | VM_EXEC) +#ifndef arch_memory_failure +static inline int arch_memory_failure(unsigned long pfn, int flags) +{ + return -ENXIO; +} +#endif + +#ifndef arch_is_platform_page +static inline bool arch_is_platform_page(u64 paddr) +{ + return false; +} +#endif /* * Special vmas that are non-mergable, non-mlock()able. @@ -2712,17 +2725,45 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr, struct vm_area_struct **pprev); -/* Look up the first VMA which intersects the interval start_addr..end_addr-1, - NULL if none. Assume start_addr < end_addr. */ -static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr) +/** + * find_vma_intersection() - Look up the first VMA which intersects the interval + * @mm: The process address space. + * @start_addr: The inclusive start user address. + * @end_addr: The exclusive end user address. + * + * Returns: The first VMA within the provided range, %NULL otherwise. Assumes + * start_addr < end_addr. + */ +static inline +struct vm_area_struct *find_vma_intersection(struct mm_struct *mm, + unsigned long start_addr, + unsigned long end_addr) { - struct vm_area_struct * vma = find_vma(mm,start_addr); + struct vm_area_struct *vma = find_vma(mm, start_addr); if (vma && end_addr <= vma->vm_start) vma = NULL; return vma; } +/** + * vma_lookup() - Find a VMA at a specific address + * @mm: The process address space. + * @addr: The user address. + * + * Return: The vm_area_struct at the given address, %NULL otherwise. + */ +static inline +struct vm_area_struct *vma_lookup(struct mm_struct *mm, unsigned long addr) +{ + struct vm_area_struct *vma = find_vma(mm, addr); + + if (vma && addr < vma->vm_start) + vma = NULL; + + return vma; +} + static inline unsigned long vm_start_gap(struct vm_area_struct *vma) { unsigned long vm_start = vma->vm_start; @@ -3109,7 +3150,8 @@ extern int sysctl_memory_failure_recovery; extern void shake_page(struct page *p, int access); extern atomic_long_t num_poisoned_pages __read_mostly; extern int soft_offline_page(unsigned long pfn, int flags); - +extern void collect_procs(struct page *page, struct list_head *tokill, + int force_early); /* * Error handlers for various types of pages. diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 96d4a148648d145ea4c0e17debf62f63e8cf534e..13c7a048e5ab1f031d3054ac44a86b21bad59db0 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -968,6 +968,15 @@ static inline int is_highmem_idx(enum zone_type idx) #endif } +#ifdef CONFIG_ZONE_DMA +bool has_managed_dma(void); +#else +static inline bool has_managed_dma(void) +{ + return false; +} +#endif + /** * is_highmem - helper function to quickly check if a struct zone is a * highmem zone or not. This is an attempt to keep references @@ -1273,13 +1282,16 @@ static inline unsigned long *section_to_usemap(struct mem_section *ms) static inline struct mem_section *__nr_to_section(unsigned long nr) { + unsigned long root = SECTION_NR_TO_ROOT(nr); + + if (unlikely(root >= NR_SECTION_ROOTS)) + return NULL; + #ifdef CONFIG_SPARSEMEM_EXTREME - if (!mem_section) + if (!mem_section || !mem_section[root]) return NULL; #endif - if (!mem_section[SECTION_NR_TO_ROOT(nr)]) - return NULL; - return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK]; + return &mem_section[root][nr & SECTION_ROOT_MASK]; } extern unsigned long __section_nr(struct mem_section *ms); extern size_t mem_section_usage_size(void); diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h index aac07940de09d7b503d23f74d5adfe6e59873b7b..db2eaff77f41aa10e16c619a6c2de632fa11637a 100644 --- a/include/linux/mtd/rawnand.h +++ b/include/linux/mtd/rawnand.h @@ -1083,6 +1083,7 @@ struct nand_manufacturer { * @lock: Lock protecting the suspended field. Also used to serialize accesses * to the NAND device * @suspended: Set to 1 when the device is suspended, 0 when it's not + * @resume_wq: wait queue to sleep if rawnand is in suspended state. * @cur_cs: Currently selected target. -1 means no target selected, otherwise we * should always have cur_cs >= 0 && cur_cs < nanddev_ntargets(). * NAND Controller drivers should not modify this value, but they're @@ -1135,6 +1136,7 @@ struct nand_chip { /* Internals */ struct mutex lock; unsigned int suspended : 1; + wait_queue_head_t resume_wq; int cur_cs; int read_retries; diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 41b991fc3a4b9b449be8528642a19451a5f5cdd0..0f95a702bc222e6960988b52ee9be4f34bf632d1 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -2100,7 +2100,7 @@ struct net_device { struct netdev_queue *_tx ____cacheline_aligned_in_smp; unsigned int num_tx_queues; unsigned int real_num_tx_queues; - struct Qdisc *qdisc; + struct Qdisc __rcu *qdisc; unsigned int tx_queue_len; spinlock_t tx_global_lock; @@ -2595,7 +2595,7 @@ struct packet_type { void *af_packet_priv; struct list_head list; - KABI_RESERVE(1) + KABI_USE(1, struct net *af_packet_net) KABI_RESERVE(2) KABI_RESERVE(3) KABI_RESERVE(4) @@ -4033,7 +4033,8 @@ void netdev_run_todo(void); */ static inline void dev_put(struct net_device *dev) { - this_cpu_dec(*dev->pcpu_refcnt); + if (dev) + this_cpu_dec(*dev->pcpu_refcnt); } /** @@ -4044,7 +4045,8 @@ static inline void dev_put(struct net_device *dev) */ static inline void dev_hold(struct net_device *dev) { - this_cpu_inc(*dev->pcpu_refcnt); + if (dev) + this_cpu_inc(*dev->pcpu_refcnt); } /* Carrier loss detection, dial on demand. The functions netif_carrier_on diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index aff5cd382fef55d8cb5743d020e676e030176e53..e39342945a80ba88360de774cf28e9208cb77721 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -478,10 +478,10 @@ static inline const struct cred *nfs_file_cred(struct file *file) * linux/fs/nfs/direct.c */ extern ssize_t nfs_direct_IO(struct kiocb *, struct iov_iter *); -extern ssize_t nfs_file_direct_read(struct kiocb *iocb, - struct iov_iter *iter); -extern ssize_t nfs_file_direct_write(struct kiocb *iocb, - struct iov_iter *iter); +ssize_t nfs_file_direct_read(struct kiocb *iocb, + struct iov_iter *iter, bool swap); +ssize_t nfs_file_direct_write(struct kiocb *iocb, + struct iov_iter *iter, bool swap); /* * linux/fs/nfs/dir.c @@ -501,8 +501,8 @@ extern int nfs_instantiate(struct dentry *dentry, struct nfs_fh *fh, struct nfs_fattr *fattr, struct nfs4_label *label); extern int nfs_may_open(struct inode *inode, const struct cred *cred, int openflags); extern void nfs_access_zap_cache(struct inode *inode); -extern int nfs_access_get_cached(struct inode *inode, const struct cred *cred, struct nfs_access_entry *res, - bool may_block); +extern int nfs_access_get_cached(struct inode *inode, const struct cred *cred, + u32 *mask, bool may_block); /* * linux/fs/nfs/symlink.c @@ -551,7 +551,7 @@ extern int nfs_wb_all(struct inode *inode); extern int nfs_wb_page(struct inode *inode, struct page *page); extern int nfs_wb_page_cancel(struct inode *inode, struct page* page); extern int nfs_commit_inode(struct inode *, int); -extern struct nfs_commit_data *nfs_commitdata_alloc(bool never_fail); +extern struct nfs_commit_data *nfs_commitdata_alloc(void); extern void nfs_commit_free(struct nfs_commit_data *data); bool nfs_commit_end(struct nfs_mds_commit_info *cinfo); diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index eb2fe22bc0e9a01711f55c3235e060c7aab71d47..65e1cbe1d1ce66eaf05c6de381ca496bb3a18085 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -139,7 +139,9 @@ enum pageflags { #ifdef CONFIG_64BIT PG_arch_2, #endif +#if defined(CONFIG_X86_64) || defined(CONFIG_ARM64) PG_pool, /* Used to track page allocated from dynamic hugetlb pool */ +#endif #ifdef CONFIG_PIN_MEMORY PG_hotreplace, #endif @@ -148,8 +150,10 @@ enum pageflags { * flags which backported from kernel upstream, please place them * behind the reserved page flags. */ +#if defined(CONFIG_X86_64) || defined(CONFIG_ARM64) PG_reserve_pgflag_0, PG_reserve_pgflag_1, +#endif __NR_PAGEFLAGS, @@ -474,7 +478,11 @@ __PAGEFLAG(Reported, reported, PF_NO_COMPOUND) /* * PagePool() is used to track page allocated from hpool. */ +#if defined(CONFIG_X86_64) || defined(CONFIG_ARM64) PAGEFLAG(Pool, pool, PF_NO_TAIL) +#else +PAGEFLAG_FALSE(Pool) +#endif /* * On an anonymous page mapped into a user virtual memory area, diff --git a/include/linux/pci.h b/include/linux/pci.h index ff6236a3dd6b2691e05ea4d62b28649f5e1c883e..aee7a7563fc2bf91d106e071949b5f35278e18a8 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -307,6 +307,7 @@ struct pcie_link_state; struct pci_vpd; struct pci_sriov; struct pci_p2pdma; +struct rcec_ea; /* The pci_dev structure describes PCI devices */ struct pci_dev { @@ -511,8 +512,18 @@ struct pci_dev { unsigned long slot_being_removed_rescanned; struct pci_dev *rpdev; /* root port pci_dev */ +#ifndef __GENKSYMS__ +#ifdef CONFIG_PCIEPORTBUS + struct rcec_ea *rcec_ea; /* RCEC cached endpoint association */ + struct pci_dev *rcec; /* Associated RCEC device */ +#else KABI_RESERVE(1) KABI_RESERVE(2) +#endif +#else + KABI_RESERVE(1) + KABI_RESERVE(2) +#endif KABI_RESERVE(3) KABI_RESERVE(4) KABI_RESERVE(5) diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index f8da3669b6b0d5fc8d2d41417e3d4bc7460c981c..f9792f9128fb506eccc9634b8781adbba7f98eee 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -81,6 +81,7 @@ #define PCI_CLASS_SYSTEM_RTC 0x0803 #define PCI_CLASS_SYSTEM_PCI_HOTPLUG 0x0804 #define PCI_CLASS_SYSTEM_SDHCI 0x0805 +#define PCI_CLASS_SYSTEM_RCEC 0x0807 #define PCI_CLASS_SYSTEM_OTHER 0x0880 #define PCI_BASE_CLASS_INPUT 0x09 @@ -2569,6 +2570,9 @@ #define PCI_DEVICE_ID_KORENIX_JETCARDF3 0x17ff #define PCI_VENDOR_ID_HUAWEI 0x19e5 +#define PCI_DEVICE_ID_HUAWEI_ZIP_VF 0xa251 +#define PCI_DEVICE_ID_HUAWEI_SEC_VF 0xa256 +#define PCI_DEVICE_ID_HUAWEI_HPRE_VF 0xa259 /* Hisilicon PCIe NP devices */ #define PCIE_DEVICE_ID_HISI_5896 0x5896 diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index a08a02c77559751c9db37d8178cc087a0de5ea8b..d4c912443ac65222f5b457ac15e76d5d30a59389 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -1243,7 +1243,18 @@ extern void perf_event_bpf_event(struct bpf_prog *prog, enum perf_bpf_event_type type, u16 flags); -extern struct perf_guest_info_callbacks *perf_guest_cbs; +extern struct perf_guest_info_callbacks __rcu *perf_guest_cbs; +static inline struct perf_guest_info_callbacks *perf_get_guest_cbs(void) +{ + /* + * Callbacks are RCU-protected and must be READ_ONCE to avoid reloading + * the callbacks between a !NULL check and dereferences, to ensure + * pending stores/changes to the callback pointers are visible before a + * non-NULL perf_guest_cbs is visible to readers, and to prevent a + * module from unloading callbacks while readers are active. + */ + return rcu_dereference(perf_guest_cbs); +} extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks); extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks); diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h index d4b8fa22837b19b3cf8c802e39f29b5e5a110f3d..f924468d84ec4150584c7ceb0d228842669d46fd 100644 --- a/include/linux/pgtable.h +++ b/include/linux/pgtable.h @@ -44,6 +44,7 @@ static inline unsigned long pte_index(unsigned long address) { return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1); } +#define pte_index pte_index #ifndef pmd_index static inline unsigned long pmd_index(unsigned long address) diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h index 161acd4ede448eed9c19a02a6e3ce958b94a670d..30091ab5de287c25b6b3d6653fa2e68c39a91896 100644 --- a/include/linux/pm_runtime.h +++ b/include/linux/pm_runtime.h @@ -58,6 +58,7 @@ extern void pm_runtime_get_suppliers(struct device *dev); extern void pm_runtime_put_suppliers(struct device *dev); extern void pm_runtime_new_link(struct device *dev); extern void pm_runtime_drop_link(struct device_link *link); +extern void pm_runtime_release_supplier(struct device_link *link, bool check_idle); /** * pm_runtime_get_if_in_use - Conditionally bump up runtime PM usage counter. @@ -279,6 +280,8 @@ static inline void pm_runtime_get_suppliers(struct device *dev) {} static inline void pm_runtime_put_suppliers(struct device *dev) {} static inline void pm_runtime_new_link(struct device *dev) {} static inline void pm_runtime_drop_link(struct device_link *link) {} +static inline void pm_runtime_release_supplier(struct device_link *link, + bool check_idle) {} #endif /* !CONFIG_PM */ diff --git a/include/linux/psi.h b/include/linux/psi.h index 8f59276b566b43006f24acfd014b6daee2ea6545..d290f0493c3335cddcc9153d4cdf36499866c0b4 100644 --- a/include/linux/psi.h +++ b/include/linux/psi.h @@ -22,7 +22,6 @@ void psi_task_change(struct task_struct *task, int clear, int set); void psi_task_switch(struct task_struct *prev, struct task_struct *next, bool sleep); -void psi_memstall_tick(struct task_struct *task, int cpu); void psi_memstall_enter(unsigned long *flags); void psi_memstall_leave(unsigned long *flags); @@ -35,7 +34,7 @@ void cgroup_move_task(struct task_struct *p, struct css_set *to); struct psi_trigger *psi_trigger_create(struct psi_group *group, char *buf, size_t nbytes, enum psi_res res); -void psi_trigger_replace(void **trigger_ptr, struct psi_trigger *t); +void psi_trigger_destroy(struct psi_trigger *t); __poll_t psi_trigger_poll(void **trigger_ptr, struct file *file, poll_table *wait); diff --git a/include/linux/psi_types.h b/include/linux/psi_types.h index b95f3211566a22c88c53154cb8f7085d026ba98a..c17aeb774e23a2d77b5a4008c433e47f46e0d951 100644 --- a/include/linux/psi_types.h +++ b/include/linux/psi_types.h @@ -10,6 +10,11 @@ #ifdef CONFIG_PSI /* Tracked task states */ +#ifdef __GENKSYMS__ +/* + * This definition is used to keep kabi unchanged, + * and **should not changed** + */ enum psi_task_count { NR_IOWAIT, NR_MEMSTALL, @@ -23,12 +28,41 @@ enum psi_task_count { NR_ONCPU, NR_PSI_TASK_COUNTS = 4, }; +#else +/* + * All modification to psi_task_count should apply to here. + */ +enum psi_task_count { + NR_IOWAIT, + NR_MEMSTALL, + NR_RUNNING, + /* + * This can't have values other than 0 or 1 and could be + * implemented as a bit flag. But for now we still have room + * in the first cacheline of psi_group_cpu, and this way we + * don't have to special case any state tracking for it. + */ + NR_ONCPU, + /* + * For IO and CPU stalls the presence of running/oncpu tasks + * in the domain means a partial rather than a full stall. + * For memory it's not so simple because of page reclaimers: + * they are running/oncpu while representing a stall. To tell + * whether a domain has productivity left or not, we need to + * distinguish between regular running (i.e. productive) + * threads and memstall ones. + */ + NR_MEMSTALL_RUNNING, + NR_PSI_TASK_COUNTS = 5, +}; +#endif /* Task state bitmasks */ #define TSK_IOWAIT (1 << NR_IOWAIT) #define TSK_MEMSTALL (1 << NR_MEMSTALL) #define TSK_RUNNING (1 << NR_RUNNING) #define TSK_ONCPU (1 << NR_ONCPU) +#define TSK_MEMSTALL_RUNNING (1 << NR_MEMSTALL_RUNNING) /* Resources that workloads could be stalled on */ enum psi_res { @@ -44,6 +78,11 @@ enum psi_res { * SOME: Stalled tasks & working tasks * FULL: Stalled tasks & no working tasks */ +#ifdef __GENKSYMS__ +/* + * This definition is used to keep kabi unchanged, + * and **should not changed** + */ enum psi_states { PSI_IO_SOME, PSI_IO_FULL, @@ -54,6 +93,23 @@ enum psi_states { PSI_NONIDLE, NR_PSI_STATES = 6, }; +#else +/* + * All modification to psi_states should apply to here. + */ +enum psi_states { + PSI_IO_SOME, + PSI_IO_FULL, + PSI_MEM_SOME, + PSI_MEM_FULL, + PSI_CPU_SOME, + PSI_CPU_FULL, + /* Only per-CPU, to weigh the CPU in the global average: */ + PSI_NONIDLE, + NR_PSI_STATES = 7, +}; +#endif + enum psi_aggregators { PSI_AVGS = 0, @@ -128,9 +184,6 @@ struct psi_trigger { * events to one per window */ u64 last_event_time; - - /* Refcounting to prevent premature destruction */ - struct kref refcount; }; struct psi_group { diff --git a/include/linux/pstore.h b/include/linux/pstore.h index eb93a54cff31fa21d49b77012f14888b51e01bca..e97a8188f0fd8bcac0e0edd8a5570c7aa00edf9d 100644 --- a/include/linux/pstore.h +++ b/include/linux/pstore.h @@ -14,7 +14,7 @@ #include #include #include -#include +#include #include #include @@ -87,7 +87,7 @@ struct pstore_record { * @owner: module which is responsible for this backend driver * @name: name of the backend driver * - * @buf_lock: semaphore to serialize access to @buf + * @buf_lock: spinlock to serialize access to @buf * @buf: preallocated crash dump buffer * @bufsize: size of @buf available for crash dump bytes (must match * smallest number of bytes available for writing to a @@ -178,7 +178,7 @@ struct pstore_info { struct module *owner; const char *name; - struct semaphore buf_lock; + spinlock_t buf_lock; char *buf; size_t bufsize; diff --git a/include/linux/sched.h b/include/linux/sched.h index edd236f98f0c7b1a3f778ae4b8a48fe3d30aa133..47f462040f4dfc4c1baed9960ece7aa2ba8e8b4a 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -461,8 +461,13 @@ struct sched_statistics { u64 nr_wakeups_passive; u64 nr_wakeups_idle; +#if defined(CONFIG_QOS_SCHED_SMT_EXPELLER) && !defined(__GENKSYMS__) + u64 nr_qos_smt_send_ipi; + u64 nr_qos_smt_expelled; +#else KABI_RESERVE(1) KABI_RESERVE(2) +#endif KABI_RESERVE(3) KABI_RESERVE(4) #endif @@ -1592,7 +1597,6 @@ extern struct pid *cad_pid; #define PF_MEMALLOC 0x00000800 /* Allocating memory */ #define PF_NPROC_EXCEEDED 0x00001000 /* set_user() noticed that RLIMIT_NPROC was exceeded */ #define PF_USED_MATH 0x00002000 /* If unset the fpu must be initialized before use */ -#define PF_USED_ASYNC 0x00004000 /* Used async_schedule*(), used by module init */ #define PF_NOFREEZE 0x00008000 /* This thread should not be frozen */ #define PF_FROZEN 0x00010000 /* Frozen for system suspend */ #define PF_KSWAPD 0x00020000 /* I am kswapd */ @@ -1830,9 +1834,16 @@ extern char *__get_task_comm(char *to, size_t len, struct task_struct *tsk); __get_task_comm(buf, sizeof(buf), tsk); \ }) +#ifdef CONFIG_QOS_SCHED_SMT_EXPELLER +void qos_smt_check_need_resched(void); +#endif + #ifdef CONFIG_SMP static __always_inline void scheduler_ipi(void) { +#ifdef CONFIG_QOS_SCHED_SMT_EXPELLER + qos_smt_check_need_resched(); +#endif /* * Fold TIF_NEED_RESCHED into the preempt_count; anybody setting * TIF_NEED_RESCHED remotely (for the first time) will also send @@ -2165,5 +2176,4 @@ static inline int sched_qos_cpu_overload(void) return 0; } #endif - #endif diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h index fa75f325dad5328784acfc902b3b320754c15a87..cea4bdfd0f05f32ea26a40c22b140e40375262aa 100644 --- a/include/linux/sched/task.h +++ b/include/linux/sched/task.h @@ -55,8 +55,8 @@ extern asmlinkage void schedule_tail(struct task_struct *prev); extern void init_idle(struct task_struct *idle, int cpu); extern int sched_fork(unsigned long clone_flags, struct task_struct *p); -extern void sched_post_fork(struct task_struct *p, - struct kernel_clone_args *kargs); +extern void sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs); +extern void sched_post_fork(struct task_struct *p); extern void sched_dead(struct task_struct *p); void __noreturn do_task_dead(void); diff --git a/include/linux/security.h b/include/linux/security.h index 35355429648e3bd5fbe73f7dbefb955236933769..330029ef7e894b06e8c17399f714d901bb514627 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -121,10 +121,12 @@ enum lockdown_reason { LOCKDOWN_DEBUGFS, LOCKDOWN_XMON_WR, LOCKDOWN_BPF_WRITE_USER, + LOCKDOWN_DBG_WRITE_KERNEL, LOCKDOWN_INTEGRITY_MAX, LOCKDOWN_KCORE, LOCKDOWN_KPROBES, LOCKDOWN_BPF_READ, + LOCKDOWN_DBG_READ_KERNEL, LOCKDOWN_PERF, LOCKDOWN_TRACEFS, LOCKDOWN_XMON_RW, diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index ff63c2963359d295dae0a93c19382d985056abb7..35b26743dbb283c6a0d5f9de886d101ea407a3a8 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h @@ -463,6 +463,8 @@ extern void uart_handle_cts_change(struct uart_port *uport, extern void uart_insert_char(struct uart_port *port, unsigned int status, unsigned int overrun, unsigned int ch, unsigned int flag); +void uart_xchar_out(struct uart_port *uport, int offset); + #ifdef CONFIG_MAGIC_SYSRQ_SERIAL #define SYSRQ_TIMEOUT (HZ * 5) diff --git a/include/linux/share_pool.h b/include/linux/share_pool.h index 6f294911c6af0015041ec5e30893be09c902c56d..1911cd35843b4c38c2c401f336eeb94e15321ff6 100644 --- a/include/linux/share_pool.h +++ b/include/linux/share_pool.h @@ -10,11 +10,13 @@ #include #include #include +#include #define SP_HUGEPAGE (1 << 0) #define SP_HUGEPAGE_ONLY (1 << 1) #define SP_DVPP (1 << 2) #define SP_SPEC_NODE_ID (1 << 3) +#define SP_PROT_RO (1 << 16) #define DEVICE_ID_BITS 4UL #define DEVICE_ID_MASK ((1UL << DEVICE_ID_BITS) - 1UL) @@ -24,7 +26,7 @@ #define NODE_ID_SHIFT (DEVICE_ID_SHIFT + DEVICE_ID_BITS) #define SP_FLAG_MASK (SP_HUGEPAGE | SP_HUGEPAGE_ONLY | SP_DVPP | \ - SP_SPEC_NODE_ID | \ + SP_SPEC_NODE_ID | SP_PROT_RO | \ (DEVICE_ID_MASK << DEVICE_ID_SHIFT) | \ (NODE_ID_MASK << NODE_ID_SHIFT)) @@ -38,6 +40,11 @@ #define SPG_ID_AUTO_MIN 100000 #define SPG_ID_AUTO_MAX 199999 #define SPG_ID_AUTO 200000 /* generate group id automatically */ +#define SPG_ID_LOCAL_MIN 200001 +#define SPG_ID_LOCAL_MAX 299999 + +#define SPG_FLAG_NON_DVPP (1 << 0) +#define SPG_FLAG_MASK (SPG_FLAG_NON_DVPP) #define MAX_DEVID 8 /* the max num of Da-vinci devices */ @@ -100,6 +107,24 @@ struct sp_proc_stat { atomic64_t k2u_size; }; +/* + * address space management + */ +struct sp_mapping { + unsigned long flag; + atomic_t user; + unsigned long start[MAX_DEVID]; + unsigned long end[MAX_DEVID]; + struct rb_root area_root; + + struct rb_node *free_area_cache; + unsigned long cached_hole_size; + unsigned long cached_vstart; + + /* list head for all groups attached to this mapping, dvpp mapping only */ + struct list_head group_head; +}; + /* Processes in the same sp_group can share memory. * Memory layout for share pool: * @@ -124,6 +149,7 @@ struct sp_proc_stat { */ struct sp_group { int id; + unsigned long flag; struct file *file; struct file *file_hugetlb; /* number of process in this group */ @@ -141,6 +167,10 @@ struct sp_group { atomic_t use_count; /* protect the group internal elements, except spa_list */ struct rw_semaphore rw_lock; + /* list node for dvpp mapping */ + struct list_head mnode; + struct sp_mapping *dvpp; + struct sp_mapping *normal; }; /* a per-process(per mm) struct which manages a sp_group_node list */ @@ -154,6 +184,11 @@ struct sp_group_master { struct list_head node_list; struct mm_struct *mm; struct sp_proc_stat *stat; + /* + * Used to apply for the shared pool memory of the current process. + * For example, sp_alloc non-share memory or k2task. + */ + KABI_EXTEND(struct sp_group *local) }; /* @@ -178,6 +213,7 @@ struct sp_walk_data { unsigned long uva_aligned; unsigned long page_size; bool is_hugepage; + bool is_page_type_set; pmd_t *pmd; }; @@ -223,8 +259,8 @@ extern int proc_sp_group_state(struct seq_file *m, struct pid_namespace *ns, extern void *sp_alloc(unsigned long size, unsigned long sp_flags, int spg_id); extern void *mg_sp_alloc(unsigned long size, unsigned long sp_flags, int spg_id); -extern int sp_free(unsigned long addr); -extern int mg_sp_free(unsigned long addr); +extern int sp_free(unsigned long addr, int id); +extern int mg_sp_free(unsigned long addr, int id); extern void *sp_make_share_k2u(unsigned long kva, unsigned long size, unsigned long sp_flags, int pid, int spg_id); @@ -235,7 +271,7 @@ extern void *sp_make_share_u2k(unsigned long uva, unsigned long size, int pid); extern void *mg_sp_make_share_u2k(unsigned long uva, unsigned long size, int pid); extern int sp_unshare(unsigned long va, unsigned long size, int pid, int spg_id); -extern int mg_sp_unshare(unsigned long va, unsigned long size); +extern int mg_sp_unshare(unsigned long va, unsigned long size, int id); extern int sp_walk_page_range(unsigned long uva, unsigned long size, struct task_struct *tsk, struct sp_walk_data *sp_walk_data); @@ -254,8 +290,8 @@ extern bool mg_sp_config_dvpp_range(size_t start, size_t size, int device_id, in extern bool is_sharepool_addr(unsigned long addr); extern bool mg_is_sharepool_addr(unsigned long addr); -extern int mg_sp_group_add_task(int pid, unsigned long prot, int spg_id); -extern int sp_group_add_task(int pid, int spg_id); +extern int sp_id_of_current(void); +extern int mg_sp_id_of_current(void); extern void sp_area_drop(struct vm_area_struct *vma); extern int sp_group_exit(struct mm_struct *mm); @@ -362,12 +398,12 @@ static inline void *mg_sp_alloc(unsigned long size, unsigned long sp_flags, int return NULL; } -static inline int sp_free(unsigned long addr) +static inline int sp_free(unsigned long addr, int id) { return -EPERM; } -static inline int mg_sp_free(unsigned long addr) +static inline int mg_sp_free(unsigned long addr, int id) { return -EPERM; } @@ -399,11 +435,20 @@ static inline int sp_unshare(unsigned long va, unsigned long size, int pid, int return -EPERM; } -static inline int mg_sp_unshare(unsigned long va, unsigned long size) +static inline int mg_sp_unshare(unsigned long va, unsigned long size, int id) { return -EPERM; } +static inline int sp_id_of_current(void) +{ + return -EPERM; +} + +static inline int mg_sp_id_of_current(void) +{ + return -EPERM; +} static inline void sp_init_mm(struct mm_struct *mm) { @@ -468,10 +513,6 @@ static inline struct sp_proc_stat *sp_get_proc_stat_ref(struct mm_struct *mm) return NULL; } -static inline void sp_proc_stat_drop(struct sp_proc_stat *stat) -{ -} - static inline void spa_overview_show(struct seq_file *seq) { } diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h index a5a5d1d4d7b1d23681a7b7f88f9049b550d52416..93240799a404f996eaf747a5f072880abb73b6c1 100644 --- a/include/linux/shmem_fs.h +++ b/include/linux/shmem_fs.h @@ -18,6 +18,7 @@ struct shmem_inode_info { unsigned long flags; unsigned long alloced; /* data pages alloced to file */ unsigned long swapped; /* subtotal assigned to swap */ + pgoff_t fallocend; /* highest fallocate endindex */ struct list_head shrinklist; /* shrinkable hpage inodes */ struct list_head swaplist; /* chain of maybes on swap */ struct shared_policy policy; /* NUMA memory alloc policy */ @@ -115,6 +116,18 @@ static inline bool shmem_file(struct file *file) return shmem_mapping(file->f_mapping); } +/* + * If fallocate(FALLOC_FL_KEEP_SIZE) has been used, there may be pages + * beyond i_size's notion of EOF, which fallocate has committed to reserving: + * which split_huge_page() must therefore not delete. This use of a single + * "fallocend" per inode errs on the side of not deleting a reservation when + * in doubt: there are plenty of cases when it preserves unreserved pages. + */ +static inline pgoff_t shmem_fallocend(struct inode *inode, pgoff_t eof) +{ + return max(eof, SHMEM_I(inode)->fallocend); +} + extern bool shmem_charge(struct inode *inode, long pages); extern void shmem_uncharge(struct inode *inode, long pages); diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index e5f61bdd42a844f05160ecf170ea6578d7e154fb..68efccc15a879860ea158ab633fe4ed0e37c0916 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -3609,6 +3609,7 @@ int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset, unsigned int flags); int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset, int len); +int skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, int len); void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to); unsigned int skb_zerocopy_headlen(const struct sk_buff *from); int skb_zerocopy(struct sk_buff *to, struct sk_buff *from, diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h index 23d4e86a439588cb3dc1ffd8b48bb61808bbfb87..3994a69b85d2b2303c43cc28baa3623277d04b37 100644 --- a/include/linux/skmsg.h +++ b/include/linux/skmsg.h @@ -99,6 +99,7 @@ struct sk_psock { struct sk_psock_parser parser; struct sk_buff_head ingress_skb; struct list_head ingress_msg; + spinlock_t ingress_lock; unsigned long state; struct list_head link; spinlock_t link_lock; @@ -107,6 +108,7 @@ struct sk_psock { void (*saved_close)(struct sock *sk, long timeout); void (*saved_write_space)(struct sock *sk); struct proto *sk_proto; + struct mutex work_mutex; struct sk_psock_work_state work_state; struct work_struct work; union { @@ -295,10 +297,77 @@ static inline struct sk_psock *sk_psock(const struct sock *sk) return rcu_dereference_sk_user_data(sk); } +static inline void sk_psock_set_state(struct sk_psock *psock, + enum sk_psock_state_bits bit) +{ + set_bit(bit, &psock->state); +} + +static inline void sk_psock_clear_state(struct sk_psock *psock, + enum sk_psock_state_bits bit) +{ + clear_bit(bit, &psock->state); +} + +static inline bool sk_psock_test_state(const struct sk_psock *psock, + enum sk_psock_state_bits bit) +{ + return test_bit(bit, &psock->state); +} + +static inline void sock_drop(struct sock *sk, struct sk_buff *skb) +{ + sk_drops_add(sk, skb); + kfree_skb(skb); +} + static inline void sk_psock_queue_msg(struct sk_psock *psock, struct sk_msg *msg) { - list_add_tail(&msg->list, &psock->ingress_msg); + spin_lock_bh(&psock->ingress_lock); + if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) + list_add_tail(&msg->list, &psock->ingress_msg); + else { + sk_msg_free(psock->sk, msg); + kfree(msg); + } + spin_unlock_bh(&psock->ingress_lock); +} + +static inline struct sk_msg *sk_psock_dequeue_msg(struct sk_psock *psock) +{ + struct sk_msg *msg; + + spin_lock_bh(&psock->ingress_lock); + msg = list_first_entry_or_null(&psock->ingress_msg, struct sk_msg, list); + if (msg) + list_del(&msg->list); + spin_unlock_bh(&psock->ingress_lock); + return msg; +} + +static inline struct sk_msg *sk_psock_peek_msg(struct sk_psock *psock) +{ + struct sk_msg *msg; + + spin_lock_bh(&psock->ingress_lock); + msg = list_first_entry_or_null(&psock->ingress_msg, struct sk_msg, list); + spin_unlock_bh(&psock->ingress_lock); + return msg; +} + +static inline struct sk_msg *sk_psock_next_msg(struct sk_psock *psock, + struct sk_msg *msg) +{ + struct sk_msg *ret; + + spin_lock_bh(&psock->ingress_lock); + if (list_is_last(&msg->list, &psock->ingress_msg)) + ret = NULL; + else + ret = list_next_entry(msg, list); + spin_unlock_bh(&psock->ingress_lock); + return ret; } static inline bool sk_psock_queue_empty(const struct sk_psock *psock) @@ -306,6 +375,13 @@ static inline bool sk_psock_queue_empty(const struct sk_psock *psock) return psock ? list_empty(&psock->ingress_msg) : true; } +static inline void kfree_sk_msg(struct sk_msg *msg) +{ + if (msg->skb) + consume_skb(msg->skb); + kfree(msg); +} + static inline void sk_psock_report_error(struct sk_psock *psock, int err) { struct sock *sk = psock->sk; @@ -315,6 +391,7 @@ static inline void sk_psock_report_error(struct sk_psock *psock, int err) } struct sk_psock *sk_psock_init(struct sock *sk, int node); +void sk_psock_stop(struct sk_psock *psock, bool wait); int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock); void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock); @@ -375,24 +452,6 @@ static inline void sk_psock_restore_proto(struct sock *sk, } } -static inline void sk_psock_set_state(struct sk_psock *psock, - enum sk_psock_state_bits bit) -{ - set_bit(bit, &psock->state); -} - -static inline void sk_psock_clear_state(struct sk_psock *psock, - enum sk_psock_state_bits bit) -{ - clear_bit(bit, &psock->state); -} - -static inline bool sk_psock_test_state(const struct sk_psock *psock, - enum sk_psock_state_bits bit) -{ - return test_bit(bit, &psock->state); -} - static inline struct sk_psock *sk_psock_get(struct sock *sk) { struct sk_psock *psock; @@ -405,7 +464,6 @@ static inline struct sk_psock *sk_psock_get(struct sock *sk) return psock; } -void sk_psock_stop(struct sock *sk, struct sk_psock *psock); void sk_psock_drop(struct sock *sk, struct sk_psock *psock); static inline void sk_psock_put(struct sock *sk, struct sk_psock *psock) diff --git a/include/linux/soc/ti/ti_sci_protocol.h b/include/linux/soc/ti/ti_sci_protocol.h index cf27b080e148216905d65c8be80630e2fb624784..b1af87330f863e1aaa346edab61996504d601948 100644 --- a/include/linux/soc/ti/ti_sci_protocol.h +++ b/include/linux/soc/ti/ti_sci_protocol.h @@ -618,7 +618,7 @@ devm_ti_sci_get_of_resource(const struct ti_sci_handle *handle, static inline struct ti_sci_resource * devm_ti_sci_get_resource(const struct ti_sci_handle *handle, struct device *dev, - u32 dev_id, u32 sub_type); + u32 dev_id, u32 sub_type) { return ERR_PTR(-EINVAL); } diff --git a/include/linux/statfs.h b/include/linux/statfs.h index 20f695b90aab10382fe40b919e8bcb48368d7f60..02c862686ea3f850ab1c2a8f4ef1f9e8ff2562e0 100644 --- a/include/linux/statfs.h +++ b/include/linux/statfs.h @@ -4,6 +4,7 @@ #include #include +#include struct kstatfs { long f_type; @@ -50,4 +51,11 @@ static inline __kernel_fsid_t u64_to_fsid(u64 v) return (__kernel_fsid_t){.val = {(u32)v, (u32)(v>>32)}}; } +/* Fold 16 bytes uuid to 64 bit fsid */ +static inline __kernel_fsid_t uuid_to_fsid(__u8 *uuid) +{ + return u64_to_fsid(le64_to_cpup((void *)uuid) ^ + le64_to_cpup((void *)(uuid + sizeof(u64)))); +} + #endif diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h index b998e4b7369129328ad616831e1f73245ca71fef..6d9d1520612b87e07dedff9875c49f302aa70075 100644 --- a/include/linux/sunrpc/xdr.h +++ b/include/linux/sunrpc/xdr.h @@ -603,6 +603,8 @@ xdr_stream_decode_uint32_array(struct xdr_stream *xdr, if (unlikely(xdr_stream_decode_u32(xdr, &len) < 0)) return -EBADMSG; + if (len > SIZE_MAX / sizeof(*p)) + return -EBADMSG; p = xdr_inline_decode(xdr, len * sizeof(*p)); if (unlikely(!p)) return -EBADMSG; diff --git a/include/linux/suspend.h b/include/linux/suspend.h index 8af13ba60c7e4500153ba1612f80a96f4ddae05d..4bcd65679cee0c46d48040330572d40d26715903 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h @@ -430,15 +430,7 @@ struct platform_hibernation_ops { #ifdef CONFIG_HIBERNATION /* kernel/power/snapshot.c */ -extern void __register_nosave_region(unsigned long b, unsigned long e, int km); -static inline void __init register_nosave_region(unsigned long b, unsigned long e) -{ - __register_nosave_region(b, e, 0); -} -static inline void __init register_nosave_region_late(unsigned long b, unsigned long e) -{ - __register_nosave_region(b, e, 1); -} +extern void register_nosave_region(unsigned long b, unsigned long e); extern int swsusp_page_is_forbidden(struct page *); extern void swsusp_set_page_free(struct page *); extern void swsusp_unset_page_free(struct page *); @@ -457,7 +449,6 @@ int pfn_is_nosave(unsigned long pfn); int hibernate_quiet_exec(int (*func)(void *data), void *data); #else /* CONFIG_HIBERNATION */ static inline void register_nosave_region(unsigned long b, unsigned long e) {} -static inline void register_nosave_region_late(unsigned long b, unsigned long e) {} static inline int swsusp_page_is_forbidden(struct page *p) { return 0; } static inline void swsusp_set_page_free(struct page *p) {} static inline void swsusp_unset_page_free(struct page *p) {} @@ -505,14 +496,14 @@ extern void ksys_sync_helper(void); /* drivers/base/power/wakeup.c */ extern bool events_check_enabled; -extern unsigned int pm_wakeup_irq; extern suspend_state_t pm_suspend_target_state; extern bool pm_wakeup_pending(void); extern void pm_system_wakeup(void); extern void pm_system_cancel_wakeup(void); -extern void pm_wakeup_clear(bool reset); +extern void pm_wakeup_clear(unsigned int irq_number); extern void pm_system_irq_wakeup(unsigned int irq_number); +extern unsigned int pm_wakeup_irq(void); extern bool pm_get_wakeup_count(unsigned int *count, bool block); extern bool pm_save_wakeup_count(unsigned int count); extern void pm_wakep_autosleep_enabled(bool set); diff --git a/include/linux/tee_drv.h b/include/linux/tee_drv.h index 0c6c1de6f3b7782c833090aed78a28b4efdf7c3f..18a9949bba187e7979a4d409e0485ae3d149f7ca 100644 --- a/include/linux/tee_drv.h +++ b/include/linux/tee_drv.h @@ -582,4 +582,18 @@ struct tee_client_driver { #define to_tee_client_driver(d) \ container_of(d, struct tee_client_driver, driver) +/** + * teedev_open() - Open a struct tee_device + * @teedev: Device to open + * + * @return a pointer to struct tee_context on success or an ERR_PTR on failure. + */ +struct tee_context *teedev_open(struct tee_device *teedev); + +/** + * teedev_close_context() - closes a struct tee_context + * @ctx: The struct tee_context to close + */ +void teedev_close_context(struct tee_context *ctx); + #endif /*__TEE_DRV_H*/ diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h index b480e1a07ed85198e4d43dde761ae9420734dc95..5913deb26219a363a6e6cc6f2f56ab1ce625a52b 100644 --- a/include/linux/tracehook.h +++ b/include/linux/tracehook.h @@ -196,6 +196,10 @@ static inline void tracehook_notify_resume(struct pt_regs *regs) mem_cgroup_handle_over_high(); blkcg_maybe_throttle_current(); +#ifdef CONFIG_QOS_SCHED + sched_qos_offline_wait(); +#endif + } #endif /* */ diff --git a/include/linux/usb/role.h b/include/linux/usb/role.h index 0164fed31b06cc8f5f24c8dab3651961c89528f9..b9ccaeb8a4aef14e08e092a4da405c4f22764c67 100644 --- a/include/linux/usb/role.h +++ b/include/linux/usb/role.h @@ -90,6 +90,12 @@ fwnode_usb_role_switch_get(struct fwnode_handle *node) static inline void usb_role_switch_put(struct usb_role_switch *sw) { } +static inline struct usb_role_switch * +usb_role_switch_find_by_fwnode(const struct fwnode_handle *fwnode) +{ + return NULL; +} + static inline struct usb_role_switch * usb_role_switch_register(struct device *parent, const struct usb_role_switch_desc *desc) diff --git a/include/linux/virtio.h b/include/linux/virtio.h index 8ecc2e208d6137715fb1455f4b5af1e79f9d9f7e..90c5ad556809721a37fe6512bc7265fb9e251e18 100644 --- a/include/linux/virtio.h +++ b/include/linux/virtio.h @@ -135,7 +135,6 @@ void virtio_break_device(struct virtio_device *dev); void virtio_config_changed(struct virtio_device *dev); void virtio_config_disable(struct virtio_device *dev); void virtio_config_enable(struct virtio_device *dev); -int virtio_finalize_features(struct virtio_device *dev); #ifdef CONFIG_PM_SLEEP int virtio_device_freeze(struct virtio_device *dev); int virtio_device_restore(struct virtio_device *dev); diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h index 8519b3ae5d52ef4d78763109ab7b419721289880..b341dd62aa4da9843f2af1d3132c7132dcc48341 100644 --- a/include/linux/virtio_config.h +++ b/include/linux/virtio_config.h @@ -62,8 +62,9 @@ struct virtio_shm_region { * Returns the first 64 feature bits (all we currently need). * @finalize_features: confirm what device features we'll be using. * vdev: the virtio_device - * This gives the final feature bits for the device: it can change + * This sends the driver feature bits to the device: it can change * the dev->feature bits if it wants. + * Note: despite the name this can be called any number of times. * Returns 0 on success or error status * @bus_name: return the bus name associated with the device (optional) * vdev: the virtio_device diff --git a/include/linux/watch_queue.h b/include/linux/watch_queue.h index c994d1b2cdbaa2abb313170749d0ee396a807a90..3b9a40ae8bdba76dec83554989fa67f98cb59aba 100644 --- a/include/linux/watch_queue.h +++ b/include/linux/watch_queue.h @@ -28,7 +28,8 @@ struct watch_type_filter { struct watch_filter { union { struct rcu_head rcu; - unsigned long type_filter[2]; /* Bitmask of accepted types */ + /* Bitmask of accepted types */ + DECLARE_BITMAP(type_filter, WATCH_TYPE__NR); }; u32 nr_filters; /* Number of filters */ struct watch_type_filter filters[]; diff --git a/include/net/addrconf.h b/include/net/addrconf.h index 78ea3e332688f9985336c25d943f24345fdbc083..e7ce719838b5eca5d1824170e33fd6d176d9b983 100644 --- a/include/net/addrconf.h +++ b/include/net/addrconf.h @@ -6,6 +6,8 @@ #define RTR_SOLICITATION_INTERVAL (4*HZ) #define RTR_SOLICITATION_MAX_INTERVAL (3600*HZ) /* 1 hour */ +#define MIN_VALID_LIFETIME (2*3600) /* 2 hours */ + #define TEMP_VALID_LIFETIME (7*86400) #define TEMP_PREFERRED_LIFETIME (86400) #define REGEN_MAX_RETRY (3) diff --git a/include/net/af_unix.h b/include/net/af_unix.h index f42fdddecd417dcf2660d94a79796f912f31db88..ae41d8ee970a01a2a10992b75e5aacb406f8ef81 100644 --- a/include/net/af_unix.h +++ b/include/net/af_unix.h @@ -20,13 +20,13 @@ struct sock *unix_peer_get(struct sock *sk); #define UNIX_HASH_BITS 8 extern unsigned int unix_tot_inflight; -extern spinlock_t unix_table_lock; +extern spinlock_t unix_table_locks[2 * UNIX_HASH_SIZE]; extern struct hlist_head unix_socket_table[2 * UNIX_HASH_SIZE]; struct unix_address { refcount_t refcnt; int len; - unsigned int hash; + unsigned int hash; /* deprecated */ struct sockaddr_un name[]; }; diff --git a/include/net/af_vsock.h b/include/net/af_vsock.h index b1c7172869939c2d0bfb2f08f11d773e1ed1028b..4d8589244dc75acb2d8efbea4521c2a67d0008c4 100644 --- a/include/net/af_vsock.h +++ b/include/net/af_vsock.h @@ -197,7 +197,8 @@ struct sock *vsock_find_bound_socket(struct sockaddr_vm *addr); struct sock *vsock_find_connected_socket(struct sockaddr_vm *src, struct sockaddr_vm *dst); void vsock_remove_sock(struct vsock_sock *vsk); -void vsock_for_each_connected_socket(void (*fn)(struct sock *sk)); +void vsock_for_each_connected_socket(struct vsock_transport *transport, + void (*fn)(struct sock *sk)); int vsock_assign_transport(struct vsock_sock *vsk, struct vsock_sock *psk); bool vsock_find_cid(unsigned int cid); diff --git a/include/net/arp.h b/include/net/arp.h index 4950191f6b2bf424519c7ecf3483336739fea143..4a23a97195f3357962c69d73923f5b6be0a53d2e 100644 --- a/include/net/arp.h +++ b/include/net/arp.h @@ -71,6 +71,7 @@ void arp_send(int type, int ptype, __be32 dest_ip, const unsigned char *src_hw, const unsigned char *th); int arp_mc_map(__be32 addr, u8 *haddr, struct net_device *dev, int dir); void arp_ifdown(struct net_device *dev); +int arp_invalidate(struct net_device *dev, __be32 ip, bool force); struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip, struct net_device *dev, __be32 src_ip, diff --git a/include/net/ax25.h b/include/net/ax25.h index 8b7eb46ad72d8804c1ffaa3943bb2816113239d8..aadff553e4b734aa6af206ca65f64fff5119eba9 100644 --- a/include/net/ax25.h +++ b/include/net/ax25.h @@ -236,6 +236,7 @@ typedef struct ax25_dev { #if defined(CONFIG_AX25_DAMA_SLAVE) || defined(CONFIG_AX25_DAMA_MASTER) ax25_dama_info dama; #endif + refcount_t refcount; } ax25_dev; typedef struct ax25_cb { @@ -290,6 +291,17 @@ static __inline__ void ax25_cb_put(ax25_cb *ax25) } } +static inline void ax25_dev_hold(ax25_dev *ax25_dev) +{ + refcount_inc(&ax25_dev->refcount); +} + +static inline void ax25_dev_put(ax25_dev *ax25_dev) +{ + if (refcount_dec_and_test(&ax25_dev->refcount)) { + kfree(ax25_dev); + } +} static inline __be16 ax25_type_trans(struct sk_buff *skb, struct net_device *dev) { skb->dev = dev; diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h index 9125effbf4483dc82e5a1749f4beff5d8baca929..3fecc4a411a13a52d24f717d053dd6f64014c370 100644 --- a/include/net/bluetooth/bluetooth.h +++ b/include/net/bluetooth/bluetooth.h @@ -180,19 +180,21 @@ void bt_err_ratelimited(const char *fmt, ...); #define BT_DBG(fmt, ...) pr_debug(fmt "\n", ##__VA_ARGS__) #endif +#define bt_dev_name(hdev) ((hdev) ? (hdev)->name : "null") + #define bt_dev_info(hdev, fmt, ...) \ - BT_INFO("%s: " fmt, (hdev)->name, ##__VA_ARGS__) + BT_INFO("%s: " fmt, bt_dev_name(hdev), ##__VA_ARGS__) #define bt_dev_warn(hdev, fmt, ...) \ - BT_WARN("%s: " fmt, (hdev)->name, ##__VA_ARGS__) + BT_WARN("%s: " fmt, bt_dev_name(hdev), ##__VA_ARGS__) #define bt_dev_err(hdev, fmt, ...) \ - BT_ERR("%s: " fmt, (hdev)->name, ##__VA_ARGS__) + BT_ERR("%s: " fmt, bt_dev_name(hdev), ##__VA_ARGS__) #define bt_dev_dbg(hdev, fmt, ...) \ - BT_DBG("%s: " fmt, (hdev)->name, ##__VA_ARGS__) + BT_DBG("%s: " fmt, bt_dev_name(hdev), ##__VA_ARGS__) #define bt_dev_warn_ratelimited(hdev, fmt, ...) \ - bt_warn_ratelimited("%s: " fmt, (hdev)->name, ##__VA_ARGS__) + bt_warn_ratelimited("%s: " fmt, bt_dev_name(hdev), ##__VA_ARGS__) #define bt_dev_err_ratelimited(hdev, fmt, ...) \ - bt_err_ratelimited("%s: " fmt, (hdev)->name, ##__VA_ARGS__) + bt_err_ratelimited("%s: " fmt, bt_dev_name(hdev), ##__VA_ARGS__) /* Connection and socket states */ enum { diff --git a/include/net/bond_3ad.h b/include/net/bond_3ad.h index c8696a230b7d922479b6e2d8368f65050d4e2cc8..1a28f299a4c6138ac3e932aa64a90d12b35a37ff 100644 --- a/include/net/bond_3ad.h +++ b/include/net/bond_3ad.h @@ -262,7 +262,7 @@ struct ad_system { struct ad_bond_info { struct ad_system system; /* 802.3ad system structure */ struct bond_3ad_stats stats; - u32 agg_select_timer; /* Timer to select aggregator after all adapter's hand shakes */ + atomic_t agg_select_timer; /* Timer to select aggregator after all adapter's hand shakes */ u16 aggregator_identifier; }; diff --git a/include/net/checksum.h b/include/net/checksum.h index 0d05b9e8690b8bdf7c526f9625484d2bb54c8106..8b7d0c31598f5158fdae9a513c3cf8f01ffa9d99 100644 --- a/include/net/checksum.h +++ b/include/net/checksum.h @@ -22,7 +22,7 @@ #include #ifndef _HAVE_ARCH_COPY_AND_CSUM_FROM_USER -static inline +static __always_inline __wsum csum_and_copy_from_user (const void __user *src, void *dst, int len) { @@ -33,7 +33,7 @@ __wsum csum_and_copy_from_user (const void __user *src, void *dst, #endif #ifndef HAVE_CSUM_COPY_USER -static __inline__ __wsum csum_and_copy_to_user +static __always_inline __wsum csum_and_copy_to_user (const void *src, void __user *dst, int len) { __wsum sum = csum_partial(src, len, ~0U); @@ -45,7 +45,7 @@ static __inline__ __wsum csum_and_copy_to_user #endif #ifndef _HAVE_ARCH_CSUM_AND_COPY -static inline __wsum +static __always_inline __wsum csum_partial_copy_nocheck(const void *src, void *dst, int len) { memcpy(dst, src, len); @@ -54,7 +54,7 @@ csum_partial_copy_nocheck(const void *src, void *dst, int len) #endif #ifndef HAVE_ARCH_CSUM_ADD -static inline __wsum csum_add(__wsum csum, __wsum addend) +static __always_inline __wsum csum_add(__wsum csum, __wsum addend) { u32 res = (__force u32)csum; res += (__force u32)addend; @@ -62,12 +62,12 @@ static inline __wsum csum_add(__wsum csum, __wsum addend) } #endif -static inline __wsum csum_sub(__wsum csum, __wsum addend) +static __always_inline __wsum csum_sub(__wsum csum, __wsum addend) { return csum_add(csum, ~addend); } -static inline __sum16 csum16_add(__sum16 csum, __be16 addend) +static __always_inline __sum16 csum16_add(__sum16 csum, __be16 addend) { u16 res = (__force u16)csum; @@ -75,12 +75,12 @@ static inline __sum16 csum16_add(__sum16 csum, __be16 addend) return (__force __sum16)(res + (res < (__force u16)addend)); } -static inline __sum16 csum16_sub(__sum16 csum, __be16 addend) +static __always_inline __sum16 csum16_sub(__sum16 csum, __be16 addend) { return csum16_add(csum, ~addend); } -static inline __wsum +static __always_inline __wsum csum_block_add(__wsum csum, __wsum csum2, int offset) { u32 sum = (__force u32)csum2; @@ -92,36 +92,37 @@ csum_block_add(__wsum csum, __wsum csum2, int offset) return csum_add(csum, (__force __wsum)sum); } -static inline __wsum +static __always_inline __wsum csum_block_add_ext(__wsum csum, __wsum csum2, int offset, int len) { return csum_block_add(csum, csum2, offset); } -static inline __wsum +static __always_inline __wsum csum_block_sub(__wsum csum, __wsum csum2, int offset) { return csum_block_add(csum, ~csum2, offset); } -static inline __wsum csum_unfold(__sum16 n) +static __always_inline __wsum csum_unfold(__sum16 n) { return (__force __wsum)n; } -static inline __wsum csum_partial_ext(const void *buff, int len, __wsum sum) +static __always_inline +__wsum csum_partial_ext(const void *buff, int len, __wsum sum) { return csum_partial(buff, len, sum); } #define CSUM_MANGLED_0 ((__force __sum16)0xffff) -static inline void csum_replace_by_diff(__sum16 *sum, __wsum diff) +static __always_inline void csum_replace_by_diff(__sum16 *sum, __wsum diff) { *sum = csum_fold(csum_add(diff, ~csum_unfold(*sum))); } -static inline void csum_replace4(__sum16 *sum, __be32 from, __be32 to) +static __always_inline void csum_replace4(__sum16 *sum, __be32 from, __be32 to) { __wsum tmp = csum_sub(~csum_unfold(*sum), (__force __wsum)from); @@ -134,11 +135,16 @@ static inline void csum_replace4(__sum16 *sum, __be32 from, __be32 to) * m : old value of a 16bit field * m' : new value of a 16bit field */ -static inline void csum_replace2(__sum16 *sum, __be16 old, __be16 new) +static __always_inline void csum_replace2(__sum16 *sum, __be16 old, __be16 new) { *sum = ~csum16_add(csum16_sub(~(*sum), old), new); } +static inline void csum_replace(__wsum *csum, __wsum old, __wsum new) +{ + *csum = csum_add(csum_sub(*csum, old), new); +} + struct sk_buff; void inet_proto_csum_replace4(__sum16 *sum, struct sk_buff *skb, __be32 from, __be32 to, bool pseudohdr); @@ -148,16 +154,16 @@ void inet_proto_csum_replace16(__sum16 *sum, struct sk_buff *skb, void inet_proto_csum_replace_by_diff(__sum16 *sum, struct sk_buff *skb, __wsum diff, bool pseudohdr); -static inline void inet_proto_csum_replace2(__sum16 *sum, struct sk_buff *skb, - __be16 from, __be16 to, - bool pseudohdr) +static __always_inline +void inet_proto_csum_replace2(__sum16 *sum, struct sk_buff *skb, + __be16 from, __be16 to, bool pseudohdr) { inet_proto_csum_replace4(sum, skb, (__force __be32)from, (__force __be32)to, pseudohdr); } -static inline __wsum remcsum_adjust(void *ptr, __wsum csum, - int start, int offset) +static __always_inline __wsum remcsum_adjust(void *ptr, __wsum csum, + int start, int offset) { __sum16 *psum = (__sum16 *)(ptr + offset); __wsum delta; @@ -173,7 +179,7 @@ static inline __wsum remcsum_adjust(void *ptr, __wsum csum, return delta; } -static inline void remcsum_unadjust(__sum16 *psum, __wsum delta) +static __always_inline void remcsum_unadjust(__sum16 *psum, __wsum delta) { *psum = csum_fold(csum_sub(delta, (__force __wsum)*psum)); } diff --git a/include/net/dst_metadata.h b/include/net/dst_metadata.h index 14efa0ded75dd93d6f63735a95feaf72de6fdef7..adab27ba1ecbf054f5284e4342b1551b104d9a2c 100644 --- a/include/net/dst_metadata.h +++ b/include/net/dst_metadata.h @@ -123,8 +123,20 @@ static inline struct metadata_dst *tun_dst_unclone(struct sk_buff *skb) memcpy(&new_md->u.tun_info, &md_dst->u.tun_info, sizeof(struct ip_tunnel_info) + md_size); +#ifdef CONFIG_DST_CACHE + /* Unclone the dst cache if there is one */ + if (new_md->u.tun_info.dst_cache.cache) { + int ret; + + ret = dst_cache_init(&new_md->u.tun_info.dst_cache, GFP_ATOMIC); + if (ret) { + metadata_dst_free(new_md); + return ERR_PTR(ret); + } + } +#endif + skb_dst_drop(skb); - dst_hold(&new_md->dst); skb_dst_set(skb, &new_md->dst); return new_md; } diff --git a/include/net/esp.h b/include/net/esp.h index 9c5637d41d95168052686caf7b3ff51b517e6b9b..90cd02ff77ef67f7f65e2c53127c4510c23bd4a9 100644 --- a/include/net/esp.h +++ b/include/net/esp.h @@ -4,6 +4,8 @@ #include +#define ESP_SKB_FRAG_MAXSIZE (PAGE_SIZE << SKB_FRAG_PAGE_ORDER) + struct ip_esp_hdr; static inline struct ip_esp_hdr *ip_esp_hdr(const struct sk_buff *skb) diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h index bac79e817776c6e65ed37f5b5c6af61a5df59ec0..4cbd413e71a3faac2e694b4713f62b42cf9a75ed 100644 --- a/include/net/inet_frag.h +++ b/include/net/inet_frag.h @@ -116,8 +116,15 @@ int fqdir_init(struct fqdir **fqdirp, struct inet_frags *f, struct net *net); static inline void fqdir_pre_exit(struct fqdir *fqdir) { - fqdir->high_thresh = 0; /* prevent creation of new frags */ - fqdir->dead = true; + /* Prevent creation of new frags. + * Pairs with READ_ONCE() in inet_frag_find(). + */ + WRITE_ONCE(fqdir->high_thresh, 0); + + /* Pairs with READ_ONCE() in inet_frag_kill(), ip_expire() + * and ip6frag_expire_frag_queue(). + */ + WRITE_ONCE(fqdir->dead, true); } void fqdir_exit(struct fqdir *fqdir); diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h index ca6a3ea9057ec431b2dd30726c2e6d06be94fe72..d4d611064a76f587217ac5b51b91d5df7814758e 100644 --- a/include/net/inet_hashtables.h +++ b/include/net/inet_hashtables.h @@ -419,7 +419,7 @@ static inline void sk_rcv_saddr_set(struct sock *sk, __be32 addr) } int __inet_hash_connect(struct inet_timewait_death_row *death_row, - struct sock *sk, u32 port_offset, + struct sock *sk, u64 port_offset, int (*check_established)(struct inet_timewait_death_row *, struct sock *, __u16, struct inet_timewait_sock **)); diff --git a/include/net/ip.h b/include/net/ip.h index 5538e54d4620c04aaab1781391b44d59a2ea894d..6b87c7ef645904225d9e80d3fe9c996c23c99d71 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -291,7 +291,11 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb, #define NET_ADD_STATS(net, field, adnd) SNMP_ADD_STATS((net)->mib.net_statistics, field, adnd) #define __NET_ADD_STATS(net, field, adnd) __SNMP_ADD_STATS((net)->mib.net_statistics, field, adnd) -u64 snmp_get_cpu_field(void __percpu *mib, int cpu, int offct); +static inline u64 snmp_get_cpu_field(void __percpu *mib, int cpu, int offt) +{ + return *(((unsigned long *)per_cpu_ptr(mib, cpu)) + offt); +} + unsigned long snmp_fold_field(void __percpu *mib, int offt); #if BITS_PER_LONG==32 u64 snmp_get_cpu_field64(void __percpu *mib, int cpu, int offct, @@ -506,19 +510,18 @@ static inline void ip_select_ident_segs(struct net *net, struct sk_buff *skb, { struct iphdr *iph = ip_hdr(skb); + /* We had many attacks based on IPID, use the private + * generator as much as we can. + */ + if (sk && inet_sk(sk)->inet_daddr) { + iph->id = htons(inet_sk(sk)->inet_id); + inet_sk(sk)->inet_id += segs; + return; + } if ((iph->frag_off & htons(IP_DF)) && !skb->ignore_df) { - /* This is only to work around buggy Windows95/2000 - * VJ compression implementations. If the ID field - * does not change, they drop every other packet in - * a TCP stream using header compression. - */ - if (sk && inet_sk(sk)->inet_daddr) { - iph->id = htons(inet_sk(sk)->inet_id); - inet_sk(sk)->inet_id += segs; - } else { - iph->id = 0; - } + iph->id = 0; } else { + /* Unfortunately we need the big hammer to get a suitable IPID */ __ip_select_ident(net, iph, segs); } } diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h index e6b6766d620a11b14829b1e7f570c83fa034023e..d8d79c6f98e6babfbc98592a1a4480b49f0119dd 100644 --- a/include/net/ip6_fib.h +++ b/include/net/ip6_fib.h @@ -289,7 +289,7 @@ static inline bool fib6_get_cookie_safe(const struct fib6_info *f6i, fn = rcu_dereference(f6i->fib6_node); if (fn) { - *cookie = fn->fn_sernum; + *cookie = READ_ONCE(fn->fn_sernum); /* pairs with smp_wmb() in fib6_update_sernum_upto_root() */ smp_rmb(); status = true; diff --git a/include/net/ipv6.h b/include/net/ipv6.h index c0273ae502964b99aebcadff39bd15c950ff8c8a..9392a81a3ae4ca1f8a690a37c606fcbf7a35f3d3 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -390,17 +390,20 @@ static inline void txopt_put(struct ipv6_txoptions *opt) kfree_rcu(opt, rcu); } +#if IS_ENABLED(CONFIG_IPV6) struct ip6_flowlabel *__fl6_sock_lookup(struct sock *sk, __be32 label); extern struct static_key_false_deferred ipv6_flowlabel_exclusive; static inline struct ip6_flowlabel *fl6_sock_lookup(struct sock *sk, __be32 label) { - if (static_branch_unlikely(&ipv6_flowlabel_exclusive.key)) + if (static_branch_unlikely(&ipv6_flowlabel_exclusive.key) && + READ_ONCE(sock_net(sk)->ipv6.flowlabel_has_excl)) return __fl6_sock_lookup(sk, label) ? : ERR_PTR(-ENOENT); return NULL; } +#endif struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions *opt_space, struct ip6_flowlabel *fl, diff --git a/include/net/ipv6_frag.h b/include/net/ipv6_frag.h index 851029ecff13cb617d41b4043f039ae59e832b82..0a4779175a5238d8989a777ef8417223b6157679 100644 --- a/include/net/ipv6_frag.h +++ b/include/net/ipv6_frag.h @@ -67,7 +67,8 @@ ip6frag_expire_frag_queue(struct net *net, struct frag_queue *fq) struct sk_buff *head; rcu_read_lock(); - if (fq->q.fqdir->dead) + /* Paired with the WRITE_ONCE() in fqdir_pre_exit(). */ + if (READ_ONCE(fq->q.fqdir->dead)) goto out_rcu_unlock; spin_lock(&fq->q.lock); diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h index 0acbd9c40a5f6dede3685893ba9d1c134b5144e0..2b2d9deed9071c2ed89ee958a32759cacb5c3504 100644 --- a/include/net/netfilter/nf_conntrack.h +++ b/include/net/netfilter/nf_conntrack.h @@ -342,4 +342,8 @@ nf_ct_set(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info info) #define MODULE_ALIAS_NFCT_HELPER(helper) \ MODULE_ALIAS("nfct-helper-" helper) +typedef int (*bpf_getorigdst_opt_func)(struct sock *sk, int optname, + void *optval, int *optlen, int dir); +extern bpf_getorigdst_opt_func bpf_getorigdst_opt; + #endif /* _NF_CONNTRACK_H */ diff --git a/include/net/netfilter/nf_queue.h b/include/net/netfilter/nf_queue.h index e770bba00066453885c147cfebaf2f3ed6f00124..b1d43894296a6ac35b9e69e90efa13f32ba9143e 100644 --- a/include/net/netfilter/nf_queue.h +++ b/include/net/netfilter/nf_queue.h @@ -37,7 +37,7 @@ void nf_register_queue_handler(struct net *net, const struct nf_queue_handler *q void nf_unregister_queue_handler(struct net *net); void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict); -void nf_queue_entry_get_refs(struct nf_queue_entry *entry); +bool nf_queue_entry_get_refs(struct nf_queue_entry *entry); void nf_queue_entry_free(struct nf_queue_entry *entry); static inline void init_hashrandom(u32 *jhash_initval) diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h index ce5ed87accda51249937179c7efd7c478ea7b5a8..b2a28201f4fdea03abae09651237b644eb12b57d 100644 --- a/include/net/netns/ipv6.h +++ b/include/net/netns/ipv6.h @@ -83,6 +83,11 @@ struct netns_ipv6 { unsigned long ip6_rt_last_gc; #ifdef CONFIG_IPV6_MULTIPLE_TABLES unsigned int fib6_rules_require_fldissect; +#endif +#ifndef __GENKSYMS__ + unsigned char flowlabel_has_excl; +#endif +#ifdef CONFIG_IPV6_MULTIPLE_TABLES bool fib6_has_custom_rules; #ifdef CONFIG_IPV6_SUBTREES unsigned int fib6_routes_require_src; diff --git a/include/net/route.h b/include/net/route.h index ff021cab657e503e7db2044bc7c3309053d6c173..a07c277cd33e82b96d9075a8447438316c5ef181 100644 --- a/include/net/route.h +++ b/include/net/route.h @@ -369,7 +369,7 @@ static inline struct neighbour *ip_neigh_gw4(struct net_device *dev, { struct neighbour *neigh; - neigh = __ipv4_neigh_lookup_noref(dev, daddr); + neigh = __ipv4_neigh_lookup_noref(dev, (__force u32)daddr); if (unlikely(!neigh)) neigh = __neigh_create(&arp_tbl, &daddr, dev, false); diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 3696246d336aa9c0f6d92dbf231f3ebae5806667..330094583c72d187bee75c098b8a1193e49539ef 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -1269,6 +1269,7 @@ struct psched_ratecfg { u64 rate_bytes_ps; /* bytes per second */ u32 mult; u16 overhead; + u16 mpu; u8 linklayer; u8 shift; }; @@ -1278,6 +1279,9 @@ static inline u64 psched_l2t_ns(const struct psched_ratecfg *r, { len += r->overhead; + if (len < r->mpu) + len = r->mpu; + if (unlikely(r->linklayer == TC_LINKLAYER_ATM)) return ((u64)(DIV_ROUND_UP(len,48)*53) * r->mult) >> r->shift; @@ -1300,6 +1304,7 @@ static inline void psched_ratecfg_getrate(struct tc_ratespec *res, res->rate = min_t(u64, r->rate_bytes_ps, ~0U); res->overhead = r->overhead; + res->mpu = r->mpu; res->linklayer = (r->linklayer & TC_LINKLAYER_MASK); } diff --git a/include/net/secure_seq.h b/include/net/secure_seq.h index d7d2495f83c27cc6707fed26f8e433dd6d1eb295..dac91aa38c5af389648e84971b0ad17947ef844c 100644 --- a/include/net/secure_seq.h +++ b/include/net/secure_seq.h @@ -4,8 +4,8 @@ #include -u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport); -u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr, +u64 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport); +u64 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr, __be16 dport); u32 secure_tcp_seq(__be32 saddr, __be32 daddr, __be16 sport, __be16 dport); diff --git a/include/net/sock.h b/include/net/sock.h index e2150b3ecd41916d1eae4ca4f80187581aa43235..7d068cf871a3a9138ce76db8e52568b17f57d07e 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -303,6 +303,7 @@ struct bpf_local_storage; * @sk_ack_backlog: current listen backlog * @sk_max_ack_backlog: listen backlog set in listen() * @sk_uid: user id of owner + * @sk_gid: group id of owner * @sk_priority: %SO_PRIORITY setting * @sk_type: socket type (%SOCK_STREAM, etc) * @sk_protocol: which protocol this socket belongs in this network family @@ -428,7 +429,7 @@ struct sock { #ifdef CONFIG_XFRM struct xfrm_policy __rcu *sk_policy[2]; #endif - struct dst_entry *sk_rx_dst; + struct dst_entry __rcu *sk_rx_dst; struct dst_entry __rcu *sk_dst_cache; atomic_t sk_omem_alloc; int sk_sndbuf; @@ -527,7 +528,14 @@ struct sock { #endif struct rcu_head sk_rcu; +#ifndef __GENKSYMS__ + union { + kgid_t sk_gid; + u64 sk_gid_padding; + }; +#else KABI_RESERVE(1) +#endif KABI_RESERVE(2) KABI_RESERVE(3) KABI_RESERVE(4) @@ -1904,6 +1912,7 @@ static inline void sock_graft(struct sock *sk, struct socket *parent) parent->sk = sk; sk_set_socket(sk, parent); sk->sk_uid = SOCK_INODE(parent)->i_uid; + sk->sk_gid = SOCK_INODE(parent)->i_gid; security_sock_graft(sk, parent); write_unlock_bh(&sk->sk_callback_lock); } @@ -1916,6 +1925,11 @@ static inline kuid_t sock_net_uid(const struct net *net, const struct sock *sk) return sk ? sk->sk_uid : make_kuid(net->user_ns, 0); } +static inline kgid_t sock_net_gid(const struct net *net, const struct sock *sk) +{ + return sk ? sk->sk_gid : make_kgid(net->user_ns, 0); +} + static inline u32 net_tx_rndhash(void) { u32 v = prandom_u32(); @@ -2693,6 +2707,7 @@ extern int sysctl_optmem_max; extern __u32 sysctl_wmem_default; extern __u32 sysctl_rmem_default; +#define SKB_FRAG_PAGE_ORDER get_order(32768) DECLARE_STATIC_KEY_FALSE(net_high_order_alloc_disable_key); static inline int sk_get_wmem0(const struct sock *sk, const struct proto *proto) diff --git a/include/net/tls.h b/include/net/tls.h index fee4e55bd8d960fb2ac03f555e8ec3fde191d55e..166903b38f5ed1ef556fbed194bdc6cfed374876 100644 --- a/include/net/tls.h +++ b/include/net/tls.h @@ -67,7 +67,7 @@ #define MAX_IV_SIZE 16 #define TLS_MAX_REC_SEQ_SIZE 8 -/* For AES-CCM, the full 16-bytes of IV is made of '4' fields of given sizes. +/* For CCM mode, the full 16-bytes of IV is made of '4' fields of given sizes. * * IV[16] = b0[1] || implicit nonce[4] || explicit nonce[8] || length[3] * @@ -75,6 +75,7 @@ * Hence b0 contains (3 - 1) = 2. */ #define TLS_AES_CCM_IV_B0_BYTE 2 +#define TLS_SM4_CCM_IV_B0_BYTE 2 #define __TLS_INC_STATS(net, field) \ __SNMP_INC_STATS((net)->mib.tls_statistics, field) @@ -526,31 +527,30 @@ static inline void tls_advance_record_sn(struct sock *sk, tls_err_abort(sk, -EBADMSG); if (prot->version != TLS_1_3_VERSION) - tls_bigint_increment(ctx->iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, + tls_bigint_increment(ctx->iv + prot->salt_size, prot->iv_size); } static inline void tls_fill_prepend(struct tls_context *ctx, char *buf, size_t plaintext_len, - unsigned char record_type, - int version) + unsigned char record_type) { struct tls_prot_info *prot = &ctx->prot_info; size_t pkt_len, iv_size = prot->iv_size; pkt_len = plaintext_len + prot->tag_size; - if (version != TLS_1_3_VERSION) { + if (prot->version != TLS_1_3_VERSION) { pkt_len += iv_size; memcpy(buf + TLS_NONCE_OFFSET, - ctx->tx.iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, iv_size); + ctx->tx.iv + prot->salt_size, iv_size); } /* we cover nonce explicit here as well, so buf should be of * size KTLS_DTLS_HEADER_SIZE + KTLS_DTLS_NONCE_EXPLICIT_SIZE */ - buf[0] = version == TLS_1_3_VERSION ? + buf[0] = prot->version == TLS_1_3_VERSION ? TLS_RECORD_TYPE_DATA : record_type; /* Note that VERSION must be TLS_1_2 for both TLS1.2 and TLS1.3 */ buf[1] = TLS_1_2_VERSION_MINOR; @@ -563,18 +563,17 @@ static inline void tls_fill_prepend(struct tls_context *ctx, static inline void tls_make_aad(char *buf, size_t size, char *record_sequence, - int record_sequence_size, unsigned char record_type, - int version) + struct tls_prot_info *prot) { - if (version != TLS_1_3_VERSION) { - memcpy(buf, record_sequence, record_sequence_size); + if (prot->version != TLS_1_3_VERSION) { + memcpy(buf, record_sequence, prot->rec_seq_size); buf += 8; } else { - size += TLS_CIPHER_AES_GCM_128_TAG_SIZE; + size += prot->tag_size; } - buf[0] = version == TLS_1_3_VERSION ? + buf[0] = prot->version == TLS_1_3_VERSION ? TLS_RECORD_TYPE_DATA : record_type; buf[1] = TLS_1_2_VERSION_MAJOR; buf[2] = TLS_1_2_VERSION_MINOR; @@ -582,11 +581,11 @@ static inline void tls_make_aad(char *buf, buf[4] = size & 0xFF; } -static inline void xor_iv_with_seq(int version, char *iv, char *seq) +static inline void xor_iv_with_seq(struct tls_prot_info *prot, char *iv, char *seq) { int i; - if (version == TLS_1_3_VERSION) { + if (prot->version == TLS_1_3_VERSION) { for (i = 0; i < 8; i++) iv[i + 4] ^= seq[i]; } diff --git a/include/net/udp.h b/include/net/udp.h index 435cc009e6eaaa79d376d3d71156ed5d3186534b..4017f257628f34301d4a3571804b6e662453949e 100644 --- a/include/net/udp.h +++ b/include/net/udp.h @@ -467,6 +467,7 @@ void udp_init(void); DECLARE_STATIC_KEY_FALSE(udp_encap_needed_key); void udp_encap_enable(void); +void udp_encap_disable(void); #if IS_ENABLED(CONFIG_IPV6) DECLARE_STATIC_KEY_FALSE(udpv6_encap_needed_key); void udpv6_encap_enable(void); diff --git a/include/net/udp_tunnel.h b/include/net/udp_tunnel.h index 2ea453dac87624ba1612fe008f08b1c7de9c0b1e..24ece06bad9eff61612d89f618d389ae0e2f03b8 100644 --- a/include/net/udp_tunnel.h +++ b/include/net/udp_tunnel.h @@ -177,9 +177,8 @@ static inline void udp_tunnel_encap_enable(struct socket *sock) #if IS_ENABLED(CONFIG_IPV6) if (sock->sk->sk_family == PF_INET6) ipv6_stub->udpv6_encap_enable(); - else #endif - udp_encap_enable(); + udp_encap_enable(); } #define UDP_TUNNEL_NIC_MAX_TABLES 4 diff --git a/include/net/xfrm.h b/include/net/xfrm.h index f2875c629d06f19f2632e5cd0c779407d86e6e22..8bcf40e4e6e3c75a48101b6f7797b65c2109885c 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -1552,7 +1552,6 @@ void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si); void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si); u32 xfrm_replay_seqhi(struct xfrm_state *x, __be32 net_seq); int xfrm_init_replay(struct xfrm_state *x); -u32 __xfrm_state_mtu(struct xfrm_state *x, int mtu); u32 xfrm_state_mtu(struct xfrm_state *x, int mtu); int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload); int xfrm_init_state(struct xfrm_state *x); @@ -1670,14 +1669,15 @@ int km_migrate(const struct xfrm_selector *sel, u8 dir, u8 type, const struct xfrm_migrate *m, int num_bundles, const struct xfrm_kmaddress *k, const struct xfrm_encap_tmpl *encap); -struct xfrm_state *xfrm_migrate_state_find(struct xfrm_migrate *m, struct net *net); +struct xfrm_state *xfrm_migrate_state_find(struct xfrm_migrate *m, struct net *net, + u32 if_id); struct xfrm_state *xfrm_state_migrate(struct xfrm_state *x, struct xfrm_migrate *m, struct xfrm_encap_tmpl *encap); int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type, struct xfrm_migrate *m, int num_bundles, struct xfrm_kmaddress *k, struct net *net, - struct xfrm_encap_tmpl *encap); + struct xfrm_encap_tmpl *encap, u32 if_id); #endif int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport); diff --git a/include/scsi/iscsi_if.h b/include/scsi/iscsi_if.h index 5225a23f2d0e6ea9b931b444b5c0e6695ed24794..5679b9fb2b1eb82c0ab14e9217d5f0be03085193 100644 --- a/include/scsi/iscsi_if.h +++ b/include/scsi/iscsi_if.h @@ -761,6 +761,7 @@ enum iscsi_ping_status_code { and verification */ #define CAP_LOGIN_OFFLOAD 0x4000 /* offload session login */ +#define CAP_OPS_EXPAND 0x8000 /* oiscsi_transport->ops_expand flag */ /* * These flags describes reason of stop_conn() call */ diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h index b47428d86a4bdedb5f6df48e901b04171194dfdc..881e4762d626b4f9547bb69adbf297967a1aac07 100644 --- a/include/scsi/libiscsi.h +++ b/include/scsi/libiscsi.h @@ -424,6 +424,7 @@ extern int iscsi_conn_start(struct iscsi_cls_conn *); extern void iscsi_conn_stop(struct iscsi_cls_conn *, int); extern int iscsi_conn_bind(struct iscsi_cls_session *, struct iscsi_cls_conn *, int); +extern void iscsi_conn_unbind(struct iscsi_cls_conn *cls_conn, bool is_active); extern void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err); extern void iscsi_session_failure(struct iscsi_session *session, enum iscsi_err err); diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h index 79e4903bd414ff13fe65fde9572467edf559ff67..698f2032807b022ad350154003ae3365565c1c33 100644 --- a/include/scsi/libsas.h +++ b/include/scsi/libsas.h @@ -356,6 +356,7 @@ enum sas_ha_state { SAS_HA_DRAINING, SAS_HA_ATA_EH_ACTIVE, SAS_HA_FROZEN, + SAS_HA_RESUMING, }; struct sas_ha_struct { @@ -660,6 +661,7 @@ extern int sas_register_ha(struct sas_ha_struct *); extern int sas_unregister_ha(struct sas_ha_struct *); extern void sas_prep_resume_ha(struct sas_ha_struct *sas_ha); extern void sas_resume_ha(struct sas_ha_struct *sas_ha); +extern void sas_resume_ha_no_sync(struct sas_ha_struct *sas_ha); extern void sas_suspend_ha(struct sas_ha_struct *sas_ha); int sas_set_phy_speed(struct sas_phy *phy, struct sas_phy_linkrates *rates); diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h index f28bb20d627134b7d072562cd4f816c824eabac7..f9297176dcb89598712a95d2e5524284a17e9c47 100644 --- a/include/scsi/scsi_transport_iscsi.h +++ b/include/scsi/scsi_transport_iscsi.h @@ -29,6 +29,15 @@ struct bsg_job; struct iscsi_bus_flash_session; struct iscsi_bus_flash_conn; +/* + * The expansion of iscsi_transport to fix kabi while adding members. + */ +struct iscsi_transport_expand { + int (*tgt_dscvr)(struct Scsi_Host *shost, enum iscsi_tgt_dscvr type, + uint32_t enable, struct sockaddr *dst_addr); + void (*unbind_conn)(struct iscsi_cls_conn *conn, bool is_active); +}; + /** * struct iscsi_transport - iSCSI Transport template * @@ -123,8 +132,15 @@ struct iscsi_transport { int non_blocking); int (*ep_poll) (struct iscsi_endpoint *ep, int timeout_ms); void (*ep_disconnect) (struct iscsi_endpoint *ep); +#ifdef __GENKSYMS__ int (*tgt_dscvr) (struct Scsi_Host *shost, enum iscsi_tgt_dscvr type, uint32_t enable, struct sockaddr *dst_addr); +#else + /* + * onece ops_expand is used, caps must be set to CAP_OPS_EXPAND + */ + struct iscsi_transport_expand *ops_expand; +#endif int (*set_path) (struct Scsi_Host *shost, struct iscsi_path *params); int (*set_iface_param) (struct Scsi_Host *shost, void *data, uint32_t len); @@ -196,18 +212,38 @@ enum iscsi_connection_state { ISCSI_CONN_BOUND, }; +#define ISCSI_CLS_CONN_BIT_CLEANUP 1 + struct iscsi_cls_conn { struct list_head conn_list; /* item in connlist */ - struct list_head conn_list_err; /* item in connlist_err */ + struct list_head conn_list_err; /* add back for fix kabi broken */ void *dd_data; /* LLD private data */ struct iscsi_transport *transport; uint32_t cid; /* connection id */ + /* + * This protects the conn startup and binding/unbinding of the ep to + * the conn. Unbinding includes ep_disconnect and stop_conn. + */ struct mutex ep_mutex; struct iscsi_endpoint *ep; struct device dev; /* sysfs transport/container device */ enum iscsi_connection_state state; }; +/* + * The wrapper of iscsi_cls_conn to fix kabi while adding members. + */ +struct iscsi_cls_conn_wrapper { + struct iscsi_cls_conn conn; + + /* Used when accessing flags and queueing work. */ + spinlock_t lock; + unsigned long flags; + struct work_struct cleanup_work; +}; + +#define conn_to_wrapper(ic_conn) \ + container_of(ic_conn, struct iscsi_cls_conn_wrapper, conn) #define iscsi_dev_to_conn(_dev) \ container_of(_dev, struct iscsi_cls_conn, dev) @@ -443,6 +479,7 @@ extern int iscsi_scan_finished(struct Scsi_Host *shost, unsigned long time); extern struct iscsi_endpoint *iscsi_create_endpoint(int dd_size); extern void iscsi_destroy_endpoint(struct iscsi_endpoint *ep); extern struct iscsi_endpoint *iscsi_lookup_endpoint(u64 handle); +extern void iscsi_put_endpoint(struct iscsi_endpoint *ep); extern int iscsi_block_scsi_eh(struct scsi_cmnd *cmd); extern struct iscsi_iface *iscsi_create_iface(struct Scsi_Host *shost, struct iscsi_transport *t, diff --git a/include/sound/pcm.h b/include/sound/pcm.h index 2336bf9243e185cd0e8b84b4afe3add26faf59b6..5ffc2efedd9f83f9d4f2ab88f83e21a5bea6b9e4 100644 --- a/include/sound/pcm.h +++ b/include/sound/pcm.h @@ -398,6 +398,8 @@ struct snd_pcm_runtime { wait_queue_head_t tsleep; /* transfer sleep */ struct fasync_struct *fasync; bool stop_operating; /* sync_stop will be called */ + struct mutex buffer_mutex; /* protect for buffer changes */ + atomic_t buffer_accessing; /* >0: in r/w operation, <0: blocked */ /* -- private section -- */ void *private_data; diff --git a/include/trace/bpf_probe.h b/include/trace/bpf_probe.h index cd74bffed5c69decf2e6cabb601f9b329b4ced2a..a8e97f84b65273db9250e67d0ee34a72d4888b27 100644 --- a/include/trace/bpf_probe.h +++ b/include/trace/bpf_probe.h @@ -55,8 +55,7 @@ /* tracepoints with more than 12 arguments will hit build error */ #define CAST_TO_U64(...) CONCATENATE(__CAST, COUNT_ARGS(__VA_ARGS__))(__VA_ARGS__) -#undef DECLARE_EVENT_CLASS -#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ +#define __BPF_DECLARE_TRACE(call, proto, args) \ static notrace void \ __bpf_trace_##call(void *__data, proto) \ { \ @@ -64,6 +63,10 @@ __bpf_trace_##call(void *__data, proto) \ CONCATENATE(bpf_trace_run, COUNT_ARGS(args))(prog, CAST_TO_U64(args)); \ } +#undef DECLARE_EVENT_CLASS +#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ + __BPF_DECLARE_TRACE(call, PARAMS(proto), PARAMS(args)) + /* * This part is compiled out, it is only here as a build time check * to make sure that if the tracepoint handling changes, the @@ -90,8 +93,7 @@ __section("__bpf_raw_tp_map") = { \ #define FIRST(x, ...) x -#undef DEFINE_EVENT_WRITABLE -#define DEFINE_EVENT_WRITABLE(template, call, proto, args, size) \ +#define __CHECK_WRITABLE_BUF_SIZE(call, proto, args, size) \ static inline void bpf_test_buffer_##call(void) \ { \ /* BUILD_BUG_ON() is ignored if the code is completely eliminated, but \ @@ -100,8 +102,12 @@ static inline void bpf_test_buffer_##call(void) \ */ \ FIRST(proto); \ (void)BUILD_BUG_ON_ZERO(size != sizeof(*FIRST(args))); \ -} \ -__DEFINE_EVENT(template, call, PARAMS(proto), PARAMS(args), size) +} + +#undef DEFINE_EVENT_WRITABLE +#define DEFINE_EVENT_WRITABLE(template, call, proto, args, size) \ + __CHECK_WRITABLE_BUF_SIZE(call, PARAMS(proto), PARAMS(args), size) \ + __DEFINE_EVENT(template, call, PARAMS(proto), PARAMS(args), size) #undef DEFINE_EVENT #define DEFINE_EVENT(template, call, proto, args) \ @@ -111,9 +117,22 @@ __DEFINE_EVENT(template, call, PARAMS(proto), PARAMS(args), size) #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) +#undef DECLARE_TRACE +#define DECLARE_TRACE(call, proto, args) \ + __BPF_DECLARE_TRACE(call, PARAMS(proto), PARAMS(args)) \ + __DEFINE_EVENT(call, call, PARAMS(proto), PARAMS(args), 0) + +#undef DECLARE_TRACE_WRITABLE +#define DECLARE_TRACE_WRITABLE(call, proto, args, size) \ + __CHECK_WRITABLE_BUF_SIZE(call, PARAMS(proto), PARAMS(args), size) \ + __BPF_DECLARE_TRACE(call, PARAMS(proto), PARAMS(args)) \ + __DEFINE_EVENT(call, call, PARAMS(proto), PARAMS(args), size) + #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) +#undef DECLARE_TRACE_WRITABLE #undef DEFINE_EVENT_WRITABLE +#undef __CHECK_WRITABLE_BUF_SIZE #undef __DEFINE_EVENT #undef FIRST diff --git a/include/trace/events/cgroup.h b/include/trace/events/cgroup.h index 7f42a3de59e6b46ff313738371439bf532a88a79..dd7d7c9efecdf33d7c28c89d769e7d00e70cae2d 100644 --- a/include/trace/events/cgroup.h +++ b/include/trace/events/cgroup.h @@ -59,8 +59,8 @@ DECLARE_EVENT_CLASS(cgroup, TP_STRUCT__entry( __field( int, root ) - __field( int, id ) __field( int, level ) + __field( u64, id ) __string( path, path ) ), @@ -71,7 +71,7 @@ DECLARE_EVENT_CLASS(cgroup, __assign_str(path, path); ), - TP_printk("root=%d id=%d level=%d path=%s", + TP_printk("root=%d id=%llu level=%d path=%s", __entry->root, __entry->id, __entry->level, __get_str(path)) ); @@ -126,8 +126,8 @@ DECLARE_EVENT_CLASS(cgroup_migrate, TP_STRUCT__entry( __field( int, dst_root ) - __field( int, dst_id ) __field( int, dst_level ) + __field( u64, dst_id ) __field( int, pid ) __string( dst_path, path ) __string( comm, task->comm ) @@ -142,7 +142,7 @@ DECLARE_EVENT_CLASS(cgroup_migrate, __assign_str(comm, task->comm); ), - TP_printk("dst_root=%d dst_id=%d dst_level=%d dst_path=%s pid=%d comm=%s", + TP_printk("dst_root=%d dst_id=%llu dst_level=%d dst_path=%s pid=%d comm=%s", __entry->dst_root, __entry->dst_id, __entry->dst_level, __get_str(dst_path), __entry->pid, __get_str(comm)) ); @@ -171,8 +171,8 @@ DECLARE_EVENT_CLASS(cgroup_event, TP_STRUCT__entry( __field( int, root ) - __field( int, id ) __field( int, level ) + __field( u64, id ) __string( path, path ) __field( int, val ) ), @@ -185,7 +185,7 @@ DECLARE_EVENT_CLASS(cgroup_event, __entry->val = val; ), - TP_printk("root=%d id=%d level=%d path=%s val=%d", + TP_printk("root=%d id=%llu level=%d path=%s val=%d", __entry->root, __entry->id, __entry->level, __get_str(path), __entry->val) ); diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h index 70ae5497b73a6d3da4ec9543aa34c6cebf073a2e..4973265655a7fb324d20943fef497c046457c252 100644 --- a/include/trace/events/ext4.h +++ b/include/trace/events/ext4.h @@ -95,6 +95,17 @@ TRACE_DEFINE_ENUM(ES_REFERENCED_B); { FALLOC_FL_COLLAPSE_RANGE, "COLLAPSE_RANGE"}, \ { FALLOC_FL_ZERO_RANGE, "ZERO_RANGE"}) +TRACE_DEFINE_ENUM(EXT4_FC_REASON_XATTR); +TRACE_DEFINE_ENUM(EXT4_FC_REASON_CROSS_RENAME); +TRACE_DEFINE_ENUM(EXT4_FC_REASON_JOURNAL_FLAG_CHANGE); +TRACE_DEFINE_ENUM(EXT4_FC_REASON_NOMEM); +TRACE_DEFINE_ENUM(EXT4_FC_REASON_SWAP_BOOT); +TRACE_DEFINE_ENUM(EXT4_FC_REASON_RESIZE); +TRACE_DEFINE_ENUM(EXT4_FC_REASON_RENAME_DIR); +TRACE_DEFINE_ENUM(EXT4_FC_REASON_FALLOC_RANGE); +TRACE_DEFINE_ENUM(EXT4_FC_REASON_INODE_JOURNAL_DATA); +TRACE_DEFINE_ENUM(EXT4_FC_REASON_MAX); + #define show_fc_reason(reason) \ __print_symbolic(reason, \ { EXT4_FC_REASON_XATTR, "XATTR"}, \ @@ -2899,41 +2910,50 @@ TRACE_EVENT(ext4_fc_commit_stop, #define FC_REASON_NAME_STAT(reason) \ show_fc_reason(reason), \ - __entry->sbi->s_fc_stats.fc_ineligible_reason_count[reason] + __entry->fc_ineligible_rc[reason] TRACE_EVENT(ext4_fc_stats, - TP_PROTO(struct super_block *sb), - - TP_ARGS(sb), + TP_PROTO(struct super_block *sb), - TP_STRUCT__entry( - __field(dev_t, dev) - __field(struct ext4_sb_info *, sbi) - __field(int, count) - ), + TP_ARGS(sb), - TP_fast_assign( - __entry->dev = sb->s_dev; - __entry->sbi = EXT4_SB(sb); - ), + TP_STRUCT__entry( + __field(dev_t, dev) + __array(unsigned int, fc_ineligible_rc, EXT4_FC_REASON_MAX) + __field(unsigned long, fc_commits) + __field(unsigned long, fc_ineligible_commits) + __field(unsigned long, fc_numblks) + ), - TP_printk("dev %d:%d fc ineligible reasons:\n" - "%s:%d, %s:%d, %s:%d, %s:%d, %s:%d, %s:%d, %s:%d, %s:%d, %s:%d; " - "num_commits:%ld, ineligible: %ld, numblks: %ld", - MAJOR(__entry->dev), MINOR(__entry->dev), - FC_REASON_NAME_STAT(EXT4_FC_REASON_XATTR), - FC_REASON_NAME_STAT(EXT4_FC_REASON_CROSS_RENAME), - FC_REASON_NAME_STAT(EXT4_FC_REASON_JOURNAL_FLAG_CHANGE), - FC_REASON_NAME_STAT(EXT4_FC_REASON_NOMEM), - FC_REASON_NAME_STAT(EXT4_FC_REASON_SWAP_BOOT), - FC_REASON_NAME_STAT(EXT4_FC_REASON_RESIZE), - FC_REASON_NAME_STAT(EXT4_FC_REASON_RENAME_DIR), - FC_REASON_NAME_STAT(EXT4_FC_REASON_FALLOC_RANGE), - FC_REASON_NAME_STAT(EXT4_FC_REASON_INODE_JOURNAL_DATA), - __entry->sbi->s_fc_stats.fc_num_commits, - __entry->sbi->s_fc_stats.fc_ineligible_commits, - __entry->sbi->s_fc_stats.fc_numblks) + TP_fast_assign( + int i; + __entry->dev = sb->s_dev; + for (i = 0; i < EXT4_FC_REASON_MAX; i++) { + __entry->fc_ineligible_rc[i] = + EXT4_SB(sb)->s_fc_stats.fc_ineligible_reason_count[i]; + } + __entry->fc_commits = EXT4_SB(sb)->s_fc_stats.fc_num_commits; + __entry->fc_ineligible_commits = + EXT4_SB(sb)->s_fc_stats.fc_ineligible_commits; + __entry->fc_numblks = EXT4_SB(sb)->s_fc_stats.fc_numblks; + ), + + TP_printk("dev %d,%d fc ineligible reasons:\n" + "%s:%u, %s:%u, %s:%u, %s:%u, %s:%u, %s:%u, %s:%u, %s:%u, %s:%u " + "num_commits:%lu, ineligible: %lu, numblks: %lu", + MAJOR(__entry->dev), MINOR(__entry->dev), + FC_REASON_NAME_STAT(EXT4_FC_REASON_XATTR), + FC_REASON_NAME_STAT(EXT4_FC_REASON_CROSS_RENAME), + FC_REASON_NAME_STAT(EXT4_FC_REASON_JOURNAL_FLAG_CHANGE), + FC_REASON_NAME_STAT(EXT4_FC_REASON_NOMEM), + FC_REASON_NAME_STAT(EXT4_FC_REASON_SWAP_BOOT), + FC_REASON_NAME_STAT(EXT4_FC_REASON_RESIZE), + FC_REASON_NAME_STAT(EXT4_FC_REASON_RENAME_DIR), + FC_REASON_NAME_STAT(EXT4_FC_REASON_FALLOC_RANGE), + FC_REASON_NAME_STAT(EXT4_FC_REASON_INODE_JOURNAL_DATA), + __entry->fc_commits, __entry->fc_ineligible_commits, + __entry->fc_numblks) ); #define DEFINE_TRACE_DENTRY_EVENT(__type) \ diff --git a/include/trace/events/fs.h b/include/trace/events/fs.h new file mode 100644 index 0000000000000000000000000000000000000000..ee82dad9d9dadc1f7d9bfe715a1e489ed5a2bd9c --- /dev/null +++ b/include/trace/events/fs.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM fs + +#if !defined(_TRACE_FS_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_FS_H + +#include +#include +#include + +#undef FS_DECLARE_TRACE +#ifdef DECLARE_TRACE_WRITABLE +#define FS_DECLARE_TRACE(call, proto, args, size) \ + DECLARE_TRACE_WRITABLE(call, PARAMS(proto), PARAMS(args), size) +#else +#define FS_DECLARE_TRACE(call, proto, args, size) \ + DECLARE_TRACE(call, PARAMS(proto), PARAMS(args)) +#endif + +FS_DECLARE_TRACE(fs_file_read, + TP_PROTO(struct fs_file_read_ctx *ctx, int version), + TP_ARGS(ctx, version), + sizeof(struct fs_file_read_ctx)); + +DECLARE_TRACE(fs_file_release, + TP_PROTO(struct inode *inode, struct file *filp), + TP_ARGS(inode, filp)); + +#endif /* _TRACE_FS_H */ + +/* This part must be outside protection */ +#include diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h index dc1805fbf893452fb1560fc5faab83047e716703..366d972ce735530781635d075e77c4d6e0d0ba39 100644 --- a/include/trace/events/mmflags.h +++ b/include/trace/events/mmflags.h @@ -87,6 +87,16 @@ #define IF_HAVE_PG_ARCH_2(flag,string) #endif +#if defined(CONFIG_X86_64) || defined(CONFIG_ARM64) +#define IF_HAVE_PG_POOL(flag,string) ,{1UL << flag, string} +#define IF_HAVE_PG_RESERVE0(flag,string) ,{1UL << flag, string} +#define IF_HAVE_PG_RESERVE1(flag,string) ,{1UL << flag, string} +#else +#define IF_HAVE_PG_POOL(flag,string) +#define IF_HAVE_PG_RESERVE0(flag,string) +#define IF_HAVE_PG_RESERVE1(flag,string) +#endif + #ifdef CONFIG_PIN_MEMORY #define IF_HAVE_PG_HOTREPLACE(flag, string) ,{1UL << flag, string} #else @@ -114,17 +124,17 @@ {1UL << PG_mappedtodisk, "mappedtodisk" }, \ {1UL << PG_reclaim, "reclaim" }, \ {1UL << PG_swapbacked, "swapbacked" }, \ - {1UL << PG_unevictable, "unevictable" }, \ - {1UL << PG_pool, "pool" } \ + {1UL << PG_unevictable, "unevictable" } \ IF_HAVE_PG_MLOCK(PG_mlocked, "mlocked" ) \ IF_HAVE_PG_UNCACHED(PG_uncached, "uncached" ) \ IF_HAVE_PG_HWPOISON(PG_hwpoison, "hwpoison" ) \ IF_HAVE_PG_IDLE(PG_young, "young" ) \ IF_HAVE_PG_IDLE(PG_idle, "idle" ) \ IF_HAVE_PG_ARCH_2(PG_arch_2, "arch_2" ) \ -IF_HAVE_PG_HOTREPLACE(PG_hotreplace, "hotreplace" ), \ - {1UL << PG_reserve_pgflag_0, "reserve_pgflag_0"}, \ - {1UL << PG_reserve_pgflag_1, "reserve_pgflag_1"} +IF_HAVE_PG_POOL(PG_pool, "pool" ) \ +IF_HAVE_PG_HOTREPLACE(PG_hotreplace, "hotreplace" ) \ +IF_HAVE_PG_RESERVE0(PG_reserve_pgflag_0,"reserve_pgflag_0") \ +IF_HAVE_PG_RESERVE1(PG_reserve_pgflag_1,"reserve_pgflag_1") #define show_page_flags(flags) \ (flags) ? __print_flags(flags, "|", \ diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h index e70c90116edaed0f14d3df2c7fa4cb98cec4eb22..4a3ab0ed6e0629d48c61c50b49cbdcb5d4638dea 100644 --- a/include/trace/events/rxrpc.h +++ b/include/trace/events/rxrpc.h @@ -83,12 +83,15 @@ enum rxrpc_call_trace { rxrpc_call_error, rxrpc_call_got, rxrpc_call_got_kernel, + rxrpc_call_got_timer, rxrpc_call_got_userid, rxrpc_call_new_client, rxrpc_call_new_service, rxrpc_call_put, rxrpc_call_put_kernel, rxrpc_call_put_noqueue, + rxrpc_call_put_notimer, + rxrpc_call_put_timer, rxrpc_call_put_userid, rxrpc_call_queued, rxrpc_call_queued_ref, @@ -278,12 +281,15 @@ enum rxrpc_tx_point { EM(rxrpc_call_error, "*E*") \ EM(rxrpc_call_got, "GOT") \ EM(rxrpc_call_got_kernel, "Gke") \ + EM(rxrpc_call_got_timer, "GTM") \ EM(rxrpc_call_got_userid, "Gus") \ EM(rxrpc_call_new_client, "NWc") \ EM(rxrpc_call_new_service, "NWs") \ EM(rxrpc_call_put, "PUT") \ EM(rxrpc_call_put_kernel, "Pke") \ - EM(rxrpc_call_put_noqueue, "PNQ") \ + EM(rxrpc_call_put_noqueue, "PnQ") \ + EM(rxrpc_call_put_notimer, "PnT") \ + EM(rxrpc_call_put_timer, "PTM") \ EM(rxrpc_call_put_userid, "Pus") \ EM(rxrpc_call_queued, "QUE") \ EM(rxrpc_call_queued_ref, "QUR") \ diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h index c96a4337afe6c562e0b59fee69072de6424f3d72..eb5ec1fb66b4b365c62ab7a8f8431420b960910f 100644 --- a/include/trace/events/sched.h +++ b/include/trace/events/sched.h @@ -183,6 +183,61 @@ TRACE_EVENT(sched_switch, __entry->next_comm, __entry->next_pid, __entry->next_prio) ); +#ifdef CONFIG_QOS_SCHED_SMT_EXPELLER +/* + * Tracepoint for a offline task being resched: + */ +TRACE_EVENT(sched_qos_smt_expel, + + TP_PROTO(struct task_struct *sibling_p, int qos_smt_status), + + TP_ARGS(sibling_p, qos_smt_status), + + TP_STRUCT__entry( + __array( char, sibling_comm, TASK_COMM_LEN ) + __field( pid_t, sibling_pid ) + __field( int, sibling_qos_status ) + __field( int, sibling_cpu ) + ), + + TP_fast_assign( + memcpy(__entry->sibling_comm, sibling_p->comm, TASK_COMM_LEN); + __entry->sibling_pid = sibling_p->pid; + __entry->sibling_qos_status = qos_smt_status; + __entry->sibling_cpu = task_cpu(sibling_p); + ), + + TP_printk("sibling_comm=%s sibling_pid=%d sibling_qos_status=%d sibling_cpu=%d", + __entry->sibling_comm, __entry->sibling_pid, __entry->sibling_qos_status, + __entry->sibling_cpu) +); + +/* + * Tracepoint for a offline task being expelled: + */ +TRACE_EVENT(sched_qos_smt_expelled, + + TP_PROTO(struct task_struct *p, int qos_smt_status), + + TP_ARGS(p, qos_smt_status), + + TP_STRUCT__entry( + __array( char, comm, TASK_COMM_LEN ) + __field( pid_t, pid ) + __field( int, qos_status ) + ), + + TP_fast_assign( + memcpy(__entry->comm, p->comm, TASK_COMM_LEN); + __entry->pid = p->pid; + __entry->qos_status = qos_smt_status; + ), + + TP_printk("comm=%s pid=%d qos_status=%d", + __entry->comm, __entry->pid, __entry->qos_status) +); +#endif + /* * Tracepoint for a task being migrated: */ @@ -650,6 +705,33 @@ DECLARE_TRACE(sched_update_nr_running_tp, TP_PROTO(struct rq *rq, int change), TP_ARGS(rq, change)); +DECLARE_EVENT_CLASS(psi_memstall_template, + + TP_PROTO(unsigned long function), + + TP_ARGS(function), + + TP_STRUCT__entry( + __field(unsigned long, function) + ), + + TP_fast_assign( + __entry->function = function; + ), + + TP_printk("%ps", (void *)__entry->function) +); + +DEFINE_EVENT(psi_memstall_template, psi_memstall_enter, + TP_PROTO(unsigned long function), + TP_ARGS(function) +); + +DEFINE_EVENT(psi_memstall_template, psi_memstall_leave, + TP_PROTO(unsigned long function), + TP_ARGS(function) +); + #endif /* _TRACE_SCHED_H */ /* This part must be outside protection */ diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h index 23db248a7fdbea047d317a82ecf269501fb2761c..8220369ee6105858c3d01ecc7e7ee9acececbe44 100644 --- a/include/trace/events/sunrpc.h +++ b/include/trace/events/sunrpc.h @@ -1006,7 +1006,6 @@ DEFINE_RPC_XPRT_LIFETIME_EVENT(connect); DEFINE_RPC_XPRT_LIFETIME_EVENT(disconnect_auto); DEFINE_RPC_XPRT_LIFETIME_EVENT(disconnect_done); DEFINE_RPC_XPRT_LIFETIME_EVENT(disconnect_force); -DEFINE_RPC_XPRT_LIFETIME_EVENT(disconnect_cleanup); DEFINE_RPC_XPRT_LIFETIME_EVENT(destroy); DECLARE_EVENT_CLASS(rpc_xprt_event, @@ -1874,17 +1873,18 @@ DECLARE_EVENT_CLASS(svc_deferred_event, TP_STRUCT__entry( __field(const void *, dr) __field(u32, xid) - __string(addr, dr->xprt->xpt_remotebuf) + __array(__u8, addr, INET6_ADDRSTRLEN + 10) ), TP_fast_assign( __entry->dr = dr; __entry->xid = be32_to_cpu(*(__be32 *)(dr->args + (dr->xprt_hlen>>2))); - __assign_str(addr, dr->xprt->xpt_remotebuf); + snprintf(__entry->addr, sizeof(__entry->addr) - 1, + "%pISpc", (struct sockaddr *)&dr->addr); ), - TP_printk("addr=%s dr=%p xid=0x%08x", __get_str(addr), __entry->dr, + TP_printk("addr=%s dr=%p xid=0x%08x", __entry->addr, __entry->dr, __entry->xid) ); diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 00afbbc130eea02e65ede4da81e31de5dceb9a70..8fae845d80e260f59a1729c8b1d4364a0fdc80fc 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -1490,8 +1490,8 @@ union bpf_attr { * Return * The return value depends on the result of the test, and can be: * - * * 0, if current task belongs to the cgroup2. - * * 1, if current task does not belong to the cgroup2. + * * 1, if current task belongs to the cgroup2. + * * 0, if current task does not belong to the cgroup2. * * A negative error code, if an error occurred. * * long bpf_skb_change_tail(struct sk_buff *skb, u32 len, u64 flags) @@ -2163,8 +2163,8 @@ union bpf_attr { * * # sysctl kernel.perf_event_max_stack= * Return - * A non-negative value equal to or less than *size* on success, - * or a negative error in case of failure. + * The non-negative copied *buf* length equal to or less than + * *size* on success, or a negative error in case of failure. * * long bpf_skb_load_bytes_relative(const void *skb, u32 offset, void *to, u32 len, u32 start_header) * Description @@ -3448,8 +3448,8 @@ union bpf_attr { * * # sysctl kernel.perf_event_max_stack= * Return - * A non-negative value equal to or less than *size* on success, - * or a negative error in case of failure. + * The non-negative copied *buf* length equal to or less than + * *size* on success, or a negative error in case of failure. * * long bpf_load_hdr_opt(struct bpf_sock_ops *skops, void *searchby_res, u32 len, u64 flags) * Description @@ -3742,6 +3742,19 @@ union bpf_attr { * Return * The helper returns **TC_ACT_REDIRECT** on success or * **TC_ACT_SHOT** on error. + * + * u64 bpf_get_sockops_uid_gid(void *sockops) + * Description + * Get sock's uid and gid + * Return + * A 64-bit integer containing the current GID and UID, and + * created as such: *current_gid* **<< 32 \|** *current_uid*. + * + * int bpf_sk_original_addr(void *bpf_socket, int optname, char *optval, int optlen) + * Description + * Get Ipv4 origdst or replysrc. Works with IPv4. + * Return + * 0 on success, or a negative error in case of failure. */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -3900,6 +3913,8 @@ union bpf_attr { FN(per_cpu_ptr), \ FN(this_cpu_ptr), \ FN(redirect_peer), \ + FN(get_sockops_uid_gid), \ + FN(sk_original_addr), \ /* */ /* integer value in 'imm' field of BPF_CALL instruction selects which helper @@ -4180,7 +4195,8 @@ struct bpf_sock { __u32 src_ip4; __u32 src_ip6[4]; __u32 src_port; /* host byte order */ - __u32 dst_port; /* network byte order */ + __be16 dst_port; /* network byte order */ + __u16 :16; /* zero padding */ __u32 dst_ip4; __u32 dst_ip6[4]; __u32 state; diff --git a/include/uapi/linux/can/isotp.h b/include/uapi/linux/can/isotp.h index 7793b26aa154d68f5fb2b074007afdfa0ccd3a44..590f8aea2b6d25eebceb77264bcc7513280e55f0 100644 --- a/include/uapi/linux/can/isotp.h +++ b/include/uapi/linux/can/isotp.h @@ -135,22 +135,18 @@ struct can_isotp_ll_options { #define CAN_ISOTP_FORCE_RXSTMIN 0x100 /* ignore CFs depending on rx stmin */ #define CAN_ISOTP_RX_EXT_ADDR 0x200 /* different rx extended addressing */ #define CAN_ISOTP_WAIT_TX_DONE 0x400 /* wait for tx completion */ +#define CAN_ISOTP_SF_BROADCAST 0x800 /* 1-to-N functional addressing */ - -/* default values */ +/* protocol machine default values */ #define CAN_ISOTP_DEFAULT_FLAGS 0 #define CAN_ISOTP_DEFAULT_EXT_ADDRESS 0x00 #define CAN_ISOTP_DEFAULT_PAD_CONTENT 0xCC /* prevent bit-stuffing */ -#define CAN_ISOTP_DEFAULT_FRAME_TXTIME 0 +#define CAN_ISOTP_DEFAULT_FRAME_TXTIME 50000 /* 50 micro seconds */ #define CAN_ISOTP_DEFAULT_RECV_BS 0 #define CAN_ISOTP_DEFAULT_RECV_STMIN 0x00 #define CAN_ISOTP_DEFAULT_RECV_WFTMAX 0 -#define CAN_ISOTP_DEFAULT_LL_MTU CAN_MTU -#define CAN_ISOTP_DEFAULT_LL_TX_DL CAN_MAX_DLEN -#define CAN_ISOTP_DEFAULT_LL_TX_FLAGS 0 - /* * Remark on CAN_ISOTP_DEFAULT_RECV_* values: * @@ -162,4 +158,24 @@ struct can_isotp_ll_options { * consistency and copied directly into the flow control (FC) frame. */ +/* link layer default values => make use of Classical CAN frames */ + +#define CAN_ISOTP_DEFAULT_LL_MTU CAN_MTU +#define CAN_ISOTP_DEFAULT_LL_TX_DL CAN_MAX_DLEN +#define CAN_ISOTP_DEFAULT_LL_TX_FLAGS 0 + +/* + * The CAN_ISOTP_DEFAULT_FRAME_TXTIME has become a non-zero value as + * it only makes sense for isotp implementation tests to run without + * a N_As value. As user space applications usually do not set the + * frame_txtime element of struct can_isotp_options the new in-kernel + * default is very likely overwritten with zero when the sockopt() + * CAN_ISOTP_OPTS is invoked. + * To make sure that a N_As value of zero is only set intentional the + * value '0' is now interpreted as 'do not change the current value'. + * When a frame_txtime of zero is required for testing purposes this + * CAN_ISOTP_FRAME_TXTIME_ZERO u32 value has to be set in frame_txtime. + */ +#define CAN_ISOTP_FRAME_TXTIME_ZERO 0xFFFFFFFF + #endif /* !_UAPI_CAN_ISOTP_H */ diff --git a/include/uapi/linux/fuse.h b/include/uapi/linux/fuse.h index 7233502ea991fa9e0342d154d86fdd0068debfe0..98ca64d1beb6fb1d540475fe20ec67a73b81b846 100644 --- a/include/uapi/linux/fuse.h +++ b/include/uapi/linux/fuse.h @@ -175,6 +175,10 @@ * * 7.32 * - add flags to fuse_attr, add FUSE_ATTR_SUBMOUNT, add FUSE_SUBMOUNTS + * + * 7.33 + * - add FUSE_HANDLE_KILLPRIV_V2, FUSE_WRITE_KILL_SUIDGID, FATTR_KILL_SUIDGID + * - add FUSE_OPEN_KILL_SUIDGID */ #ifndef _LINUX_FUSE_H @@ -210,7 +214,7 @@ #define FUSE_KERNEL_VERSION 7 /** Minor version number of this interface */ -#define FUSE_KERNEL_MINOR_VERSION 32 +#define FUSE_KERNEL_MINOR_VERSION 33 /** The node ID of the root inode */ #define FUSE_ROOT_ID 1 @@ -271,6 +275,7 @@ struct fuse_file_lock { #define FATTR_MTIME_NOW (1 << 8) #define FATTR_LOCKOWNER (1 << 9) #define FATTR_CTIME (1 << 10) +#define FATTR_KILL_SUIDGID (1 << 11) /** * Flags returned by the OPEN request @@ -320,6 +325,11 @@ struct fuse_file_lock { * foffset and moffset fields in struct * fuse_setupmapping_out and fuse_removemapping_one. * FUSE_SUBMOUNTS: kernel supports auto-mounting directory submounts + * FUSE_HANDLE_KILLPRIV_V2: fs kills suid/sgid/cap on write/chown/trunc. + * Upon write/truncate suid/sgid is only killed if caller + * does not have CAP_FSETID. Additionally upon + * write/truncate sgid is killed only if file has group + * execute permission. (Same as Linux VFS behavior). */ #define FUSE_ASYNC_READ (1 << 0) #define FUSE_POSIX_LOCKS (1 << 1) @@ -349,6 +359,7 @@ struct fuse_file_lock { #define FUSE_EXPLICIT_INVAL_DATA (1 << 25) #define FUSE_MAP_ALIGNMENT (1 << 26) #define FUSE_SUBMOUNTS (1 << 27) +#define FUSE_HANDLE_KILLPRIV_V2 (1 << 28) /** * CUSE INIT request/reply flags @@ -378,11 +389,14 @@ struct fuse_file_lock { * * FUSE_WRITE_CACHE: delayed write from page cache, file handle is guessed * FUSE_WRITE_LOCKOWNER: lock_owner field is valid - * FUSE_WRITE_KILL_PRIV: kill suid and sgid bits + * FUSE_WRITE_KILL_SUIDGID: kill suid and sgid bits */ #define FUSE_WRITE_CACHE (1 << 0) #define FUSE_WRITE_LOCKOWNER (1 << 1) -#define FUSE_WRITE_KILL_PRIV (1 << 2) +#define FUSE_WRITE_KILL_SUIDGID (1 << 2) + +/* Obsolete alias; this flag implies killing suid/sgid only. */ +#define FUSE_WRITE_KILL_PRIV FUSE_WRITE_KILL_SUIDGID /** * Read flags @@ -431,6 +445,12 @@ struct fuse_file_lock { */ #define FUSE_ATTR_SUBMOUNT (1 << 0) +/** + * Open flags + * FUSE_OPEN_KILL_SUIDGID: Kill suid and sgid if executable + */ +#define FUSE_OPEN_KILL_SUIDGID (1 << 0) + enum fuse_opcode { FUSE_LOOKUP = 1, FUSE_FORGET = 2, /* no reply */ @@ -592,14 +612,14 @@ struct fuse_setattr_in { struct fuse_open_in { uint32_t flags; - uint32_t unused; + uint32_t open_flags; /* FUSE_OPEN_... */ }; struct fuse_create_in { uint32_t flags; uint32_t mode; uint32_t umask; - uint32_t padding; + uint32_t open_flags; /* FUSE_OPEN_... */ }; struct fuse_open_out { diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index 50d4705e1cbcffa634101054f82346c70fe60a07..0507ecc7275a73f05b95b37e0a6e3f8a00de848d 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -689,8 +689,8 @@ enum { enum ipvlan_mode { IPVLAN_MODE_L2 = 0, IPVLAN_MODE_L3, - IPVLAN_MODE_L3S, IPVLAN_MODE_L2E, + IPVLAN_MODE_L3S, IPVLAN_MODE_MAX }; diff --git a/include/uapi/linux/input-event-codes.h b/include/uapi/linux/input-event-codes.h index 225ec87d4f2283c7e2aacba55fbf3d405753b570..7989d9483ea75e2bbaaf78c1fd3d3bca741678ff 100644 --- a/include/uapi/linux/input-event-codes.h +++ b/include/uapi/linux/input-event-codes.h @@ -278,7 +278,8 @@ #define KEY_PAUSECD 201 #define KEY_PROG3 202 #define KEY_PROG4 203 -#define KEY_DASHBOARD 204 /* AL Dashboard */ +#define KEY_ALL_APPLICATIONS 204 /* AC Desktop Show All Applications */ +#define KEY_DASHBOARD KEY_ALL_APPLICATIONS #define KEY_SUSPEND 205 #define KEY_CLOSE 206 /* AC Close */ #define KEY_PLAY 207 @@ -612,6 +613,7 @@ #define KEY_ASSISTANT 0x247 /* AL Context-aware desktop assistant */ #define KEY_KBD_LAYOUT_NEXT 0x248 /* AC Next Keyboard Layout Select */ #define KEY_EMOJI_PICKER 0x249 /* Show/hide emoji picker (HUTRR101) */ +#define KEY_DICTATE 0x24a /* Start or Stop Voice Dictation Session (HUTRR99) */ #define KEY_BRIGHTNESS_MIN 0x250 /* Set Brightness to Minimum */ #define KEY_BRIGHTNESS_MAX 0x251 /* Set Brightness to Maximum */ diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 3af8b0164f1e6b2fa07469778aa31f47d1e39645..4e0ebd66368fbeb804aa74fd3d2a015b4c770f29 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1061,6 +1061,7 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_X86_USER_SPACE_MSR 188 #define KVM_CAP_X86_MSR_FILTER 189 #define KVM_CAP_ENFORCE_PV_FEATURE_CPUID 190 +#define KVM_CAP_SGX_ATTRIBUTE 196 #define KVM_CAP_ARM_CPU_FEATURE 555 diff --git a/include/uapi/linux/netfilter/nf_conntrack_common.h b/include/uapi/linux/netfilter/nf_conntrack_common.h index 4b3395082d15c95acc2afaeb7f15b32baf65ef53..26071021e986f6e3f6f71cf12fba8bd11a2390bd 100644 --- a/include/uapi/linux/netfilter/nf_conntrack_common.h +++ b/include/uapi/linux/netfilter/nf_conntrack_common.h @@ -106,7 +106,7 @@ enum ip_conntrack_status { IPS_NAT_CLASH = IPS_UNTRACKED, #endif - /* Conntrack got a helper explicitly attached via CT target. */ + /* Conntrack got a helper explicitly attached (ruleset, ctnetlink). */ IPS_HELPER_BIT = 13, IPS_HELPER = (1 << IPS_HELPER_BIT), diff --git a/include/uapi/linux/netfilter_ipv4.h b/include/uapi/linux/netfilter_ipv4.h index 155e77d6a42d0d60f75a4e9bc1fa7e7c2f074fdf..00e78cc2782ba3c3dc0cac222cf9abbbf25eee38 100644 --- a/include/uapi/linux/netfilter_ipv4.h +++ b/include/uapi/linux/netfilter_ipv4.h @@ -50,6 +50,8 @@ enum nf_ip_hook_priorities { /* 2.2 firewalling (+ masq) went from 64 through 76 */ /* 2.4 firewalling went 64 through 67. */ #define SO_ORIGINAL_DST 80 +#define BPF_SO_ORIGINAL_DST 800 +#define BPF_SO_REPLY_SRC 801 #endif /* _UAPI__LINUX_IP_NETFILTER_H */ diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h index 142b184eca8b4cbba4321d9a4e0a5b5b164fa08e..9e257fe9efa536e05044608d5718580be730d80b 100644 --- a/include/uapi/linux/pci_regs.h +++ b/include/uapi/linux/pci_regs.h @@ -729,6 +729,7 @@ #define PCI_EXT_CAP_ID_DPC 0x1D /* Downstream Port Containment */ #define PCI_EXT_CAP_ID_L1SS 0x1E /* L1 PM Substates */ #define PCI_EXT_CAP_ID_PTM 0x1F /* Precision Time Measurement */ +#define PCI_EXT_CAP_ID_DVSEC 0x23 /* Designated Vendor-Specific */ #define PCI_EXT_CAP_ID_DLF 0x25 /* Data Link Feature */ #define PCI_EXT_CAP_ID_PL_16GT 0x26 /* Physical Layer 16.0 GT/s */ #define PCI_EXT_CAP_ID_MAX PCI_EXT_CAP_ID_PL_16GT @@ -837,6 +838,13 @@ #define PCI_PWR_CAP_BUDGET(x) ((x) & 1) /* Included in system budget */ #define PCI_EXT_CAP_PWR_SIZEOF 16 +/* Root Complex Event Collector Endpoint Association */ +#define PCI_RCEC_RCIEP_BITMAP 4 /* Associated Bitmap for RCiEPs */ +#define PCI_RCEC_BUSN 8 /* RCEC Associated Bus Numbers */ +#define PCI_RCEC_BUSN_REG_VER 0x02 /* Least version with BUSN present */ +#define PCI_RCEC_BUSN_NEXT(x) (((x) >> 8) & 0xff) +#define PCI_RCEC_BUSN_LAST(x) (((x) >> 16) & 0xff) + /* Vendor-Specific (VSEC, PCI_EXT_CAP_ID_VNDR) */ #define PCI_VNDR_HEADER 4 /* Vendor-Specific Header */ #define PCI_VNDR_HEADER_ID(x) ((x) & 0xffff) @@ -1072,6 +1080,10 @@ #define PCI_L1SS_CTL1_LTR_L12_TH_SCALE 0xe0000000 /* LTR_L1.2_THRESHOLD_Scale */ #define PCI_L1SS_CTL2 0x0c /* Control 2 Register */ +/* Designated Vendor-Specific (DVSEC, PCI_EXT_CAP_ID_DVSEC) */ +#define PCI_DVSEC_HEADER1 0x4 /* Designated Vendor-Specific Header1 */ +#define PCI_DVSEC_HEADER2 0x8 /* Designated Vendor-Specific Header2 */ + /* Data Link Feature */ #define PCI_DLF_CAP 0x04 /* Capabilities Register */ #define PCI_DLF_EXCHANGE_ENABLE 0x80000000 /* Data Link Feature Exchange Enable */ diff --git a/include/uapi/linux/ptrace.h b/include/uapi/linux/ptrace.h index 83ee45fa634b953d9afed56dbeeb281d09dd146c..3747bf816f9a6b7c660e2871abdb0b6c79cdb9c2 100644 --- a/include/uapi/linux/ptrace.h +++ b/include/uapi/linux/ptrace.h @@ -102,6 +102,16 @@ struct ptrace_syscall_info { }; }; +#define PTRACE_GET_RSEQ_CONFIGURATION 0x420f + +struct ptrace_rseq_configuration { + __u64 rseq_abi_pointer; + __u32 rseq_abi_size; + __u32 signature; + __u32 flags; + __u32 pad; +}; + /* * These values are stored in task->ptrace_message * by tracehook_report_syscall_* to describe the current syscall-stop. diff --git a/include/uapi/linux/thermal.h b/include/uapi/linux/thermal.h index c105054cbb57acb9dbef29790822c9f6830a0969..06a8cea36fe3b8d03c9895d8cece7332247b5a92 100644 --- a/include/uapi/linux/thermal.h +++ b/include/uapi/linux/thermal.h @@ -44,7 +44,10 @@ enum thermal_genl_attr { THERMAL_GENL_ATTR_CDEV_MAX_STATE, THERMAL_GENL_ATTR_CDEV_NAME, THERMAL_GENL_ATTR_GOV_NAME, - + THERMAL_GENL_ATTR_CPU_CAPABILITY, + THERMAL_GENL_ATTR_CPU_CAPABILITY_ID, + THERMAL_GENL_ATTR_CPU_CAPABILITY_PERFORMANCE, + THERMAL_GENL_ATTR_CPU_CAPABILITY_EFFICIENCY, __THERMAL_GENL_ATTR_MAX, }; #define THERMAL_GENL_ATTR_MAX (__THERMAL_GENL_ATTR_MAX - 1) @@ -71,6 +74,7 @@ enum thermal_genl_event { THERMAL_GENL_EVENT_CDEV_DELETE, /* Cdev unbound */ THERMAL_GENL_EVENT_CDEV_STATE_UPDATE, /* Cdev state updated */ THERMAL_GENL_EVENT_TZ_GOV_CHANGE, /* Governor policy changed */ + THERMAL_GENL_EVENT_CPU_CAPABILITY_CHANGE, /* CPU capability changed */ __THERMAL_GENL_EVENT_MAX, }; #define THERMAL_GENL_EVENT_MAX (__THERMAL_GENL_EVENT_MAX - 1) diff --git a/include/uapi/linux/tls.h b/include/uapi/linux/tls.h index bcd2869ed472f8ddff64c9c584de44ff4cf90826..acd81e4220081f46e07b78b11efad4565311d27e 100644 --- a/include/uapi/linux/tls.h +++ b/include/uapi/linux/tls.h @@ -77,6 +77,20 @@ #define TLS_CIPHER_AES_CCM_128_TAG_SIZE 16 #define TLS_CIPHER_AES_CCM_128_REC_SEQ_SIZE 8 +#define TLS_CIPHER_SM4_GCM 55 +#define TLS_CIPHER_SM4_GCM_IV_SIZE 8 +#define TLS_CIPHER_SM4_GCM_KEY_SIZE 16 +#define TLS_CIPHER_SM4_GCM_SALT_SIZE 4 +#define TLS_CIPHER_SM4_GCM_TAG_SIZE 16 +#define TLS_CIPHER_SM4_GCM_REC_SEQ_SIZE 8 + +#define TLS_CIPHER_SM4_CCM 56 +#define TLS_CIPHER_SM4_CCM_IV_SIZE 8 +#define TLS_CIPHER_SM4_CCM_KEY_SIZE 16 +#define TLS_CIPHER_SM4_CCM_SALT_SIZE 4 +#define TLS_CIPHER_SM4_CCM_TAG_SIZE 16 +#define TLS_CIPHER_SM4_CCM_REC_SEQ_SIZE 8 + #define TLS_SET_RECORD_TYPE 1 #define TLS_GET_RECORD_TYPE 2 @@ -109,6 +123,22 @@ struct tls12_crypto_info_aes_ccm_128 { unsigned char rec_seq[TLS_CIPHER_AES_CCM_128_REC_SEQ_SIZE]; }; +struct tls12_crypto_info_sm4_gcm { + struct tls_crypto_info info; + unsigned char iv[TLS_CIPHER_SM4_GCM_IV_SIZE]; + unsigned char key[TLS_CIPHER_SM4_GCM_KEY_SIZE]; + unsigned char salt[TLS_CIPHER_SM4_GCM_SALT_SIZE]; + unsigned char rec_seq[TLS_CIPHER_SM4_GCM_REC_SEQ_SIZE]; +}; + +struct tls12_crypto_info_sm4_ccm { + struct tls_crypto_info info; + unsigned char iv[TLS_CIPHER_SM4_CCM_IV_SIZE]; + unsigned char key[TLS_CIPHER_SM4_CCM_KEY_SIZE]; + unsigned char salt[TLS_CIPHER_SM4_CCM_SALT_SIZE]; + unsigned char rec_seq[TLS_CIPHER_SM4_CCM_REC_SEQ_SIZE]; +}; + enum { TLS_INFO_UNSPEC, TLS_INFO_VERSION, diff --git a/include/uapi/linux/xfrm.h b/include/uapi/linux/xfrm.h index ffc6a5391bb7bb8f7543cb36fb16487596fd3255..66073c082a06004f0387cce30bd85b4d2ddbaf65 100644 --- a/include/uapi/linux/xfrm.h +++ b/include/uapi/linux/xfrm.h @@ -505,6 +505,12 @@ struct xfrm_user_offload { int ifindex; __u8 flags; }; +/* This flag was exposed without any kernel code that supporting it. + * Unfortunately, strongswan has the code that uses sets this flag, + * which makes impossible to reuse this bit. + * + * So leave it here to make sure that it won't be reused by mistake. + */ #define XFRM_OFFLOAD_IPV6 1 #define XFRM_OFFLOAD_INBOUND 2 diff --git a/include/xen/grant_table.h b/include/xen/grant_table.h index 0b1182a3cf4128b4502b7e3db59adeb8522daea5..57b4ae6a4a186e4823877b2d78d7f97e021ac7d0 100644 --- a/include/xen/grant_table.h +++ b/include/xen/grant_table.h @@ -97,17 +97,32 @@ int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly); * access has been ended, free the given page too. Access will be ended * immediately iff the grant entry is not in use, otherwise it will happen * some time later. page may be 0, in which case no freeing will occur. + * Note that the granted page might still be accessed (read or write) by the + * other side after gnttab_end_foreign_access() returns, so even if page was + * specified as 0 it is not allowed to just reuse the page for other + * purposes immediately. gnttab_end_foreign_access() will take an additional + * reference to the granted page in this case, which is dropped only after + * the grant is no longer in use. + * This requires that multi page allocations for areas subject to + * gnttab_end_foreign_access() are done via alloc_pages_exact() (and freeing + * via free_pages_exact()) in order to avoid high order pages. */ void gnttab_end_foreign_access(grant_ref_t ref, int readonly, unsigned long page); +/* + * End access through the given grant reference, iff the grant entry is + * no longer in use. In case of success ending foreign access, the + * grant reference is deallocated. + * Return 1 if the grant entry was freed, 0 if it is still in use. + */ +int gnttab_try_end_foreign_access(grant_ref_t ref); + int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn); unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref); unsigned long gnttab_end_foreign_transfer(grant_ref_t ref); -int gnttab_query_foreign_access(grant_ref_t ref); - /* * operations on reserved batches of grant references */ diff --git a/init/Kconfig b/init/Kconfig index 17533f1f19d4f99999e7db58449fef17a24c51e2..27c5ed16fef173303a64de3c228e002df858defa 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -913,6 +913,16 @@ config MEMCG_KMEM depends on MEMCG && !SLOB default y +config MEMCG_MEMFS_INFO + bool "Show memfs files that have pages charged in given memory cgroup" + depends on MEMCG + default n + help + Support to print rootfs files and tmpfs files that having pages + charged in given memory cgroup. The files infomations can be printed + through interface "memory.memfs_files_info" or printed when OOM is + triggered. + config BLK_CGROUP bool "IO controller" depends on BLOCK @@ -956,6 +966,15 @@ config QOS_SCHED default n +config QOS_SCHED_SMT_EXPELLER + bool "Qos smt expeller" + depends on SCHED_SMT + depends on QOS_SCHED + default n + help + This feature enable online tasks to expel offline tasks + on the smt sibling cpus, and exclusively occupy CPU resources. + config FAIR_GROUP_SCHED bool "Group scheduling for SCHED_OTHER" depends on CGROUP_SCHED diff --git a/init/main.c b/init/main.c index 11c96633c3f7b4ffbab661396326b421d15e7f38..41a9ce782acc9319cbfd8cd736069f0646b3bf46 100644 --- a/init/main.c +++ b/init/main.c @@ -1110,7 +1110,7 @@ static int __init initcall_blacklist(char *str) } } while (str_entry); - return 0; + return 1; } static bool __init_or_module initcall_blacklisted(initcall_t fn) @@ -1373,7 +1373,9 @@ static noinline void __init kernel_init_freeable(void); bool rodata_enabled __ro_after_init = true; static int __init set_debug_rodata(char *str) { - return strtobool(str, &rodata_enabled); + if (strtobool(str, &rodata_enabled)) + pr_warn("Invalid option string for rodata: '%s'\n", str); + return 1; } __setup("rodata=", set_debug_rodata); #endif diff --git a/kernel/async.c b/kernel/async.c index 33258e6e20f83c319649c266142180926cba689c..1746cd65e271bcd7e41f3eb571d314c8ea4d8fa7 100644 --- a/kernel/async.c +++ b/kernel/async.c @@ -205,9 +205,6 @@ async_cookie_t async_schedule_node_domain(async_func_t func, void *data, atomic_inc(&entry_count); spin_unlock_irqrestore(&async_lock, flags); - /* mark that this task has queued an async job, used by module init */ - current->flags |= PF_USED_ASYNC; - /* schedule for execution */ queue_work_node(node, system_unbound_wq, &entry->work); diff --git a/kernel/audit.c b/kernel/audit.c index 21be62bc8205fa0e856aa061a74eff73c99233bd..aeec86ed470883f28acde2f3b73011252f4d4d66 100644 --- a/kernel/audit.c +++ b/kernel/audit.c @@ -541,20 +541,22 @@ static void kauditd_printk_skb(struct sk_buff *skb) /** * kauditd_rehold_skb - Handle a audit record send failure in the hold queue * @skb: audit record + * @error: error code (unused) * * Description: * This should only be used by the kauditd_thread when it fails to flush the * hold queue. */ -static void kauditd_rehold_skb(struct sk_buff *skb) +static void kauditd_rehold_skb(struct sk_buff *skb, __always_unused int error) { - /* put the record back in the queue at the same place */ - skb_queue_head(&audit_hold_queue, skb); + /* put the record back in the queue */ + skb_queue_tail(&audit_hold_queue, skb); } /** * kauditd_hold_skb - Queue an audit record, waiting for auditd * @skb: audit record + * @error: error code * * Description: * Queue the audit record, waiting for an instance of auditd. When this @@ -564,19 +566,31 @@ static void kauditd_rehold_skb(struct sk_buff *skb) * and queue it, if we have room. If we want to hold on to the record, but we * don't have room, record a record lost message. */ -static void kauditd_hold_skb(struct sk_buff *skb) +static void kauditd_hold_skb(struct sk_buff *skb, int error) { /* at this point it is uncertain if we will ever send this to auditd so * try to send the message via printk before we go any further */ kauditd_printk_skb(skb); /* can we just silently drop the message? */ - if (!audit_default) { - kfree_skb(skb); - return; + if (!audit_default) + goto drop; + + /* the hold queue is only for when the daemon goes away completely, + * not -EAGAIN failures; if we are in a -EAGAIN state requeue the + * record on the retry queue unless it's full, in which case drop it + */ + if (error == -EAGAIN) { + if (!audit_backlog_limit || + skb_queue_len(&audit_retry_queue) < audit_backlog_limit) { + skb_queue_tail(&audit_retry_queue, skb); + return; + } + audit_log_lost("kauditd retry queue overflow"); + goto drop; } - /* if we have room, queue the message */ + /* if we have room in the hold queue, queue the message */ if (!audit_backlog_limit || skb_queue_len(&audit_hold_queue) < audit_backlog_limit) { skb_queue_tail(&audit_hold_queue, skb); @@ -585,24 +599,32 @@ static void kauditd_hold_skb(struct sk_buff *skb) /* we have no other options - drop the message */ audit_log_lost("kauditd hold queue overflow"); +drop: kfree_skb(skb); } /** * kauditd_retry_skb - Queue an audit record, attempt to send again to auditd * @skb: audit record + * @error: error code (unused) * * Description: * Not as serious as kauditd_hold_skb() as we still have a connected auditd, * but for some reason we are having problems sending it audit records so * queue the given record and attempt to resend. */ -static void kauditd_retry_skb(struct sk_buff *skb) +static void kauditd_retry_skb(struct sk_buff *skb, __always_unused int error) { - /* NOTE: because records should only live in the retry queue for a - * short period of time, before either being sent or moved to the hold - * queue, we don't currently enforce a limit on this queue */ - skb_queue_tail(&audit_retry_queue, skb); + if (!audit_backlog_limit || + skb_queue_len(&audit_retry_queue) < audit_backlog_limit) { + skb_queue_tail(&audit_retry_queue, skb); + return; + } + + /* we have to drop the record, send it via printk as a last effort */ + kauditd_printk_skb(skb); + audit_log_lost("kauditd retry queue overflow"); + kfree_skb(skb); } /** @@ -640,7 +662,7 @@ static void auditd_reset(const struct auditd_connection *ac) /* flush the retry queue to the hold queue, but don't touch the main * queue since we need to process that normally for multicast */ while ((skb = skb_dequeue(&audit_retry_queue))) - kauditd_hold_skb(skb); + kauditd_hold_skb(skb, -ECONNREFUSED); } /** @@ -714,16 +736,18 @@ static int kauditd_send_queue(struct sock *sk, u32 portid, struct sk_buff_head *queue, unsigned int retry_limit, void (*skb_hook)(struct sk_buff *skb), - void (*err_hook)(struct sk_buff *skb)) + void (*err_hook)(struct sk_buff *skb, int error)) { int rc = 0; - struct sk_buff *skb; + struct sk_buff *skb = NULL; + struct sk_buff *skb_tail; unsigned int failed = 0; /* NOTE: kauditd_thread takes care of all our locking, we just use * the netlink info passed to us (e.g. sk and portid) */ - while ((skb = skb_dequeue(queue))) { + skb_tail = skb_peek_tail(queue); + while ((skb != skb_tail) && (skb = skb_dequeue(queue))) { /* call the skb_hook for each skb we touch */ if (skb_hook) (*skb_hook)(skb); @@ -731,9 +755,7 @@ static int kauditd_send_queue(struct sock *sk, u32 portid, /* can we send to anyone via unicast? */ if (!sk) { if (err_hook) - (*err_hook)(skb); - if (queue == &audit_hold_queue) - goto out; + (*err_hook)(skb, -ECONNREFUSED); continue; } @@ -747,11 +769,9 @@ static int kauditd_send_queue(struct sock *sk, u32 portid, rc == -ECONNREFUSED || rc == -EPERM) { sk = NULL; if (err_hook) - (*err_hook)(skb); + (*err_hook)(skb, rc); if (rc == -EAGAIN) rc = 0; - if (queue == &audit_hold_queue) - goto out; /* continue to drain the queue */ continue; } else @@ -763,7 +783,6 @@ static int kauditd_send_queue(struct sock *sk, u32 portid, } } -out: return (rc >= 0 ? 0 : rc); } diff --git a/kernel/audit.h b/kernel/audit.h index 3b9c0945225a121e7031cfe29299925bb0b42516..1918019e6aaf7b8c54a248fcc593d22e3ac772c8 100644 --- a/kernel/audit.h +++ b/kernel/audit.h @@ -191,6 +191,10 @@ struct audit_context { struct { char *name; } module; + struct { + struct audit_ntp_data ntp_data; + struct timespec64 tk_injoffset; + } time; }; int fds[2]; struct audit_proctitle proctitle; diff --git a/kernel/auditsc.c b/kernel/auditsc.c index 638f424859edc384aa22b8908b1dc5a784d6bd97..07e2788bbbf124ea6b7a7f6077a4eed2d3669d8b 100644 --- a/kernel/auditsc.c +++ b/kernel/auditsc.c @@ -1214,6 +1214,53 @@ static void audit_log_fcaps(struct audit_buffer *ab, struct audit_names *name) from_kuid(&init_user_ns, name->fcap.rootid)); } +static void audit_log_time(struct audit_context *context, struct audit_buffer **ab) +{ + const struct audit_ntp_data *ntp = &context->time.ntp_data; + const struct timespec64 *tk = &context->time.tk_injoffset; + static const char * const ntp_name[] = { + "offset", + "freq", + "status", + "tai", + "tick", + "adjust", + }; + int type; + + if (context->type == AUDIT_TIME_ADJNTPVAL) { + for (type = 0; type < AUDIT_NTP_NVALS; type++) { + if (ntp->vals[type].newval != ntp->vals[type].oldval) { + if (!*ab) { + *ab = audit_log_start(context, + GFP_KERNEL, + AUDIT_TIME_ADJNTPVAL); + if (!*ab) + return; + } + audit_log_format(*ab, "op=%s old=%lli new=%lli", + ntp_name[type], + ntp->vals[type].oldval, + ntp->vals[type].newval); + audit_log_end(*ab); + *ab = NULL; + } + } + } + if (tk->tv_sec != 0 || tk->tv_nsec != 0) { + if (!*ab) { + *ab = audit_log_start(context, GFP_KERNEL, + AUDIT_TIME_INJOFFSET); + if (!*ab) + return; + } + audit_log_format(*ab, "sec=%lli nsec=%li", + (long long)tk->tv_sec, tk->tv_nsec); + audit_log_end(*ab); + *ab = NULL; + } +} + static void show_special(struct audit_context *context, int *call_panic) { struct audit_buffer *ab; @@ -1319,6 +1366,11 @@ static void show_special(struct audit_context *context, int *call_panic) audit_log_format(ab, "(null)"); break; + case AUDIT_TIME_ADJNTPVAL: + case AUDIT_TIME_INJOFFSET: + /* this call deviates from the rest, eating the buffer */ + audit_log_time(context, &ab); + break; } audit_log_end(ab); } @@ -2560,31 +2612,26 @@ void __audit_fanotify(unsigned int response) void __audit_tk_injoffset(struct timespec64 offset) { - audit_log(audit_context(), GFP_KERNEL, AUDIT_TIME_INJOFFSET, - "sec=%lli nsec=%li", - (long long)offset.tv_sec, offset.tv_nsec); -} - -static void audit_log_ntp_val(const struct audit_ntp_data *ad, - const char *op, enum audit_ntp_type type) -{ - const struct audit_ntp_val *val = &ad->vals[type]; - - if (val->newval == val->oldval) - return; + struct audit_context *context = audit_context(); - audit_log(audit_context(), GFP_KERNEL, AUDIT_TIME_ADJNTPVAL, - "op=%s old=%lli new=%lli", op, val->oldval, val->newval); + /* only set type if not already set by NTP */ + if (!context->type) + context->type = AUDIT_TIME_INJOFFSET; + memcpy(&context->time.tk_injoffset, &offset, sizeof(offset)); } void __audit_ntp_log(const struct audit_ntp_data *ad) { - audit_log_ntp_val(ad, "offset", AUDIT_NTP_OFFSET); - audit_log_ntp_val(ad, "freq", AUDIT_NTP_FREQ); - audit_log_ntp_val(ad, "status", AUDIT_NTP_STATUS); - audit_log_ntp_val(ad, "tai", AUDIT_NTP_TAI); - audit_log_ntp_val(ad, "tick", AUDIT_NTP_TICK); - audit_log_ntp_val(ad, "adjust", AUDIT_NTP_ADJUST); + struct audit_context *context = audit_context(); + int type; + + for (type = 0; type < AUDIT_NTP_NVALS; type++) + if (ad->vals[type].newval != ad->vals[type].oldval) { + /* unconditionally set type, overwriting TK */ + context->type = AUDIT_TIME_ADJNTPVAL; + memcpy(&context->time.ntp_data, ad, sizeof(*ad)); + break; + } } void __audit_log_nfcfg(const char *name, u8 af, unsigned int nentries, diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index aaf2fbaa0cc76e4a8615ac961a16da6042c76ec3..fba28f17e61aa7b353cb8d42b3fef9eae740c0d3 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -4135,8 +4135,7 @@ static struct btf *btf_parse(void __user *btf_data, u32 btf_data_size, log->len_total = log_size; /* log attributes have to be sane */ - if (log->len_total < 128 || log->len_total > UINT_MAX >> 8 || - !log->level || !log->ubuf) { + if (!bpf_verifier_log_attr_valid(log)) { err = -EINVAL; goto errout; } @@ -4525,10 +4524,12 @@ bool btf_ctx_access(int off, int size, enum bpf_access_type type, /* check for PTR_TO_RDONLY_BUF_OR_NULL or PTR_TO_RDWR_BUF_OR_NULL */ for (i = 0; i < prog->aux->ctx_arg_info_size; i++) { const struct bpf_ctx_arg_aux *ctx_arg_info = &prog->aux->ctx_arg_info[i]; + u32 type, flag; - if (ctx_arg_info->offset == off && - (ctx_arg_info->reg_type == PTR_TO_RDONLY_BUF_OR_NULL || - ctx_arg_info->reg_type == PTR_TO_RDWR_BUF_OR_NULL)) { + type = base_type(ctx_arg_info->reg_type); + flag = type_flag(ctx_arg_info->reg_type); + if (ctx_arg_info->offset == off && type == PTR_TO_BUF && + (flag & PTR_MAYBE_NULL)) { info->reg_type = ctx_arg_info->reg_type; return true; } @@ -5205,7 +5206,7 @@ int btf_check_func_arg_match(struct bpf_verifier_env *env, int subprog, i, btf_kind_str[BTF_INFO_KIND(t->info)]); goto out; } - if (check_ctx_reg(env, ®[i + 1], i + 1)) + if (check_ptr_off_reg(env, ®[i + 1], i + 1)) goto out; continue; } diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c index 8f53be0558c1b4ceb867683c35014aa4b846a35d..e1d0c6248aae763501fce3e58025494e8e78877d 100644 --- a/kernel/bpf/cgroup.c +++ b/kernel/bpf/cgroup.c @@ -667,6 +667,60 @@ static struct bpf_prog_list *find_detach_entry(struct list_head *progs, return ERR_PTR(-ENOENT); } +/** + * purge_effective_progs() - After compute_effective_progs fails to alloc new + * cgrp->bpf.inactive table we can recover by + * recomputing the array in place. + * + * @cgrp: The cgroup which descendants to travers + * @prog: A program to detach or NULL + * @link: A link to detach or NULL + * @atype: Type of detach operation + */ +static void purge_effective_progs(struct cgroup *cgrp, struct bpf_prog *prog, + struct bpf_cgroup_link *link, + enum cgroup_bpf_attach_type atype) +{ + struct cgroup_subsys_state *css; + struct bpf_prog_array *progs; + struct bpf_prog_list *pl; + struct list_head *head; + struct cgroup *cg; + int pos; + + /* recompute effective prog array in place */ + css_for_each_descendant_pre(css, &cgrp->self) { + struct cgroup *desc = container_of(css, struct cgroup, self); + + if (percpu_ref_is_zero(&desc->bpf.refcnt)) + continue; + + /* find position of link or prog in effective progs array */ + for (pos = 0, cg = desc; cg; cg = cgroup_parent(cg)) { + if (pos && !(cg->bpf.flags[atype] & BPF_F_ALLOW_MULTI)) + continue; + + head = &cg->bpf.progs[atype]; + list_for_each_entry(pl, head, node) { + if (!prog_list_prog(pl)) + continue; + if (pl->prog == prog && pl->link == link) + goto found; + pos++; + } + } +found: + BUG_ON(!cg); + progs = rcu_dereference_protected( + desc->bpf.effective[atype], + lockdep_is_held(&cgroup_mutex)); + + /* Remove the program from the array */ + WARN_ONCE(bpf_prog_array_delete_safe_at(progs, pos), + "Failed to purge a prog from array at index %d", pos); + } +} + /** * __cgroup_bpf_detach() - Detach the program or link from a cgroup, and * propagate the change to descendants @@ -686,7 +740,6 @@ int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog, struct bpf_prog_list *pl; struct list_head *progs; u32 flags; - int err; atype = to_cgroup_bpf_attach_type(type); if (atype < 0) @@ -708,9 +761,12 @@ int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog, pl->prog = NULL; pl->link = NULL; - err = update_effective_progs(cgrp, atype); - if (err) - goto cleanup; + if (update_effective_progs(cgrp, atype)) { + /* if update effective array failed replace the prog with a dummy prog*/ + pl->prog = old_prog; + pl->link = link; + purge_effective_progs(cgrp, old_prog, link, atype); + } /* now can actually delete it from this cgroup list */ list_del(&pl->node); @@ -722,12 +778,6 @@ int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog, bpf_prog_put(old_prog); static_branch_dec(&cgroup_bpf_enabled_key[atype]); return 0; - -cleanup: - /* restore back prog or link */ - pl->prog = old_prog; - pl->link = link; - return err; } /* Must be called with cgroup_mutex held to avoid races. */ @@ -1703,7 +1753,7 @@ static const struct bpf_func_proto bpf_sysctl_set_new_value_proto = { .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, - .arg2_type = ARG_PTR_TO_MEM, + .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, .arg3_type = ARG_CONST_SIZE, }; diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index bb4350de9f110e51bda6c97ad6c4df69869d07b0..4bb5921a7d2177b3c11883a15bee72f13207f0fe 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -499,7 +499,7 @@ const struct bpf_func_proto bpf_strtol_proto = { .func = bpf_strtol, .gpl_only = false, .ret_type = RET_INTEGER, - .arg1_type = ARG_PTR_TO_MEM, + .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY, .arg2_type = ARG_CONST_SIZE, .arg3_type = ARG_ANYTHING, .arg4_type = ARG_PTR_TO_LONG, @@ -527,7 +527,7 @@ const struct bpf_func_proto bpf_strtoul_proto = { .func = bpf_strtoul, .gpl_only = false, .ret_type = RET_INTEGER, - .arg1_type = ARG_PTR_TO_MEM, + .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY, .arg2_type = ARG_CONST_SIZE, .arg3_type = ARG_ANYTHING, .arg4_type = ARG_PTR_TO_LONG, @@ -599,7 +599,7 @@ const struct bpf_func_proto bpf_event_output_data_proto = { .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_CONST_MAP_PTR, .arg3_type = ARG_ANYTHING, - .arg4_type = ARG_PTR_TO_MEM, + .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY, .arg5_type = ARG_CONST_SIZE_OR_ZERO, }; @@ -636,7 +636,7 @@ BPF_CALL_2(bpf_per_cpu_ptr, const void *, ptr, u32, cpu) const struct bpf_func_proto bpf_per_cpu_ptr_proto = { .func = bpf_per_cpu_ptr, .gpl_only = false, - .ret_type = RET_PTR_TO_MEM_OR_BTF_ID_OR_NULL, + .ret_type = RET_PTR_TO_MEM_OR_BTF_ID | PTR_MAYBE_NULL | MEM_RDONLY, .arg1_type = ARG_PTR_TO_PERCPU_BTF_ID, .arg2_type = ARG_ANYTHING, }; @@ -649,7 +649,7 @@ BPF_CALL_1(bpf_this_cpu_ptr, const void *, percpu_ptr) const struct bpf_func_proto bpf_this_cpu_ptr_proto = { .func = bpf_this_cpu_ptr, .gpl_only = false, - .ret_type = RET_PTR_TO_MEM_OR_BTF_ID, + .ret_type = RET_PTR_TO_MEM_OR_BTF_ID | MEM_RDONLY, .arg1_type = ARG_PTR_TO_PERCPU_BTF_ID, }; diff --git a/kernel/bpf/map_iter.c b/kernel/bpf/map_iter.c index 6a9542af4212a12e70c7c045a2c54025b3dce30c..b0fa190b097903f570c47e5af9636f93172c14eb 100644 --- a/kernel/bpf/map_iter.c +++ b/kernel/bpf/map_iter.c @@ -174,9 +174,9 @@ static const struct bpf_iter_reg bpf_map_elem_reg_info = { .ctx_arg_info_size = 2, .ctx_arg_info = { { offsetof(struct bpf_iter__bpf_map_elem, key), - PTR_TO_RDONLY_BUF_OR_NULL }, + PTR_TO_BUF | PTR_MAYBE_NULL | MEM_RDONLY }, { offsetof(struct bpf_iter__bpf_map_elem, value), - PTR_TO_RDWR_BUF_OR_NULL }, + PTR_TO_BUF | PTR_MAYBE_NULL }, }, }; diff --git a/kernel/bpf/ringbuf.c b/kernel/bpf/ringbuf.c index f9913bc65ef8d7a34feea4261a1cab351ce66a26..d6fbe17432ae5f74872c2d2f88394063197277b1 100644 --- a/kernel/bpf/ringbuf.c +++ b/kernel/bpf/ringbuf.c @@ -108,7 +108,7 @@ static struct bpf_ringbuf *bpf_ringbuf_area_alloc(size_t data_sz, int numa_node) } rb = vmap(pages, nr_meta_pages + 2 * nr_data_pages, - VM_ALLOC | VM_USERMAP, PAGE_KERNEL); + VM_MAP | VM_USERMAP, PAGE_KERNEL); if (rb) { kmemleak_not_leak(pages); rb->pages = pages; @@ -463,7 +463,7 @@ const struct bpf_func_proto bpf_ringbuf_output_proto = { .func = bpf_ringbuf_output, .ret_type = RET_INTEGER, .arg1_type = ARG_CONST_MAP_PTR, - .arg2_type = ARG_PTR_TO_MEM, + .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, .arg3_type = ARG_CONST_SIZE_OR_ZERO, .arg4_type = ARG_ANYTHING, }; diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c index 92b38a5da61ab9696bbc6c573d17f557e95a54bf..22ef49037579131735974fda3d5eedf22c5de1fc 100644 --- a/kernel/bpf/stackmap.c +++ b/kernel/bpf/stackmap.c @@ -364,7 +364,7 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs, } static struct perf_callchain_entry * -get_callchain_entry_for_task(struct task_struct *task, u32 init_nr) +get_callchain_entry_for_task(struct task_struct *task, u32 max_depth) { #ifdef CONFIG_STACKTRACE struct perf_callchain_entry *entry; @@ -375,9 +375,8 @@ get_callchain_entry_for_task(struct task_struct *task, u32 init_nr) if (!entry) return NULL; - entry->nr = init_nr + - stack_trace_save_tsk(task, (unsigned long *)(entry->ip + init_nr), - sysctl_perf_event_max_stack - init_nr, 0); + entry->nr = stack_trace_save_tsk(task, (unsigned long *)entry->ip, + max_depth, 0); /* stack_trace_save_tsk() works on unsigned long array, while * perf_callchain_entry uses u64 array. For 32-bit systems, it is @@ -389,7 +388,7 @@ get_callchain_entry_for_task(struct task_struct *task, u32 init_nr) int i; /* copy data from the end to avoid using extra buffer */ - for (i = entry->nr - 1; i >= (int)init_nr; i--) + for (i = entry->nr - 1; i >= 0; i--) to[i] = (u64)(from[i]); } @@ -406,27 +405,19 @@ static long __bpf_get_stackid(struct bpf_map *map, { struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map); struct stack_map_bucket *bucket, *new_bucket, *old_bucket; - u32 max_depth = map->value_size / stack_map_data_size(map); - /* stack_map_alloc() checks that max_depth <= sysctl_perf_event_max_stack */ - u32 init_nr = sysctl_perf_event_max_stack - max_depth; u32 skip = flags & BPF_F_SKIP_FIELD_MASK; u32 hash, id, trace_nr, trace_len; bool user = flags & BPF_F_USER_STACK; u64 *ips; bool hash_matches; - /* get_perf_callchain() guarantees that trace->nr >= init_nr - * and trace-nr <= sysctl_perf_event_max_stack, so trace_nr <= max_depth - */ - trace_nr = trace->nr - init_nr; - - if (trace_nr <= skip) + if (trace->nr <= skip) /* skipping more than usable stack trace */ return -EFAULT; - trace_nr -= skip; + trace_nr = trace->nr - skip; trace_len = trace_nr * sizeof(u64); - ips = trace->ip + skip + init_nr; + ips = trace->ip + skip; hash = jhash2((u32 *)ips, trace_len / sizeof(u32), 0); id = hash & (smap->n_buckets - 1); bucket = READ_ONCE(smap->buckets[id]); @@ -483,8 +474,7 @@ BPF_CALL_3(bpf_get_stackid, struct pt_regs *, regs, struct bpf_map *, map, u64, flags) { u32 max_depth = map->value_size / stack_map_data_size(map); - /* stack_map_alloc() checks that max_depth <= sysctl_perf_event_max_stack */ - u32 init_nr = sysctl_perf_event_max_stack - max_depth; + u32 skip = flags & BPF_F_SKIP_FIELD_MASK; bool user = flags & BPF_F_USER_STACK; struct perf_callchain_entry *trace; bool kernel = !user; @@ -493,8 +483,12 @@ BPF_CALL_3(bpf_get_stackid, struct pt_regs *, regs, struct bpf_map *, map, BPF_F_FAST_STACK_CMP | BPF_F_REUSE_STACKID))) return -EINVAL; - trace = get_perf_callchain(regs, init_nr, kernel, user, - sysctl_perf_event_max_stack, false, false); + max_depth += skip; + if (max_depth > sysctl_perf_event_max_stack) + max_depth = sysctl_perf_event_max_stack; + + trace = get_perf_callchain(regs, 0, kernel, user, max_depth, + false, false); if (unlikely(!trace)) /* couldn't fetch the stack trace */ @@ -585,7 +579,7 @@ static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task, struct perf_callchain_entry *trace_in, void *buf, u32 size, u64 flags) { - u32 init_nr, trace_nr, copy_len, elem_size, num_elem; + u32 trace_nr, copy_len, elem_size, num_elem, max_depth; bool user_build_id = flags & BPF_F_USER_BUILD_ID; u32 skip = flags & BPF_F_SKIP_FIELD_MASK; bool user = flags & BPF_F_USER_STACK; @@ -610,30 +604,28 @@ static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task, goto err_fault; num_elem = size / elem_size; - if (sysctl_perf_event_max_stack < num_elem) - init_nr = 0; - else - init_nr = sysctl_perf_event_max_stack - num_elem; + max_depth = num_elem + skip; + if (sysctl_perf_event_max_stack < max_depth) + max_depth = sysctl_perf_event_max_stack; if (trace_in) trace = trace_in; else if (kernel && task) - trace = get_callchain_entry_for_task(task, init_nr); + trace = get_callchain_entry_for_task(task, max_depth); else - trace = get_perf_callchain(regs, init_nr, kernel, user, - sysctl_perf_event_max_stack, + trace = get_perf_callchain(regs, 0, kernel, user, max_depth, false, false); if (unlikely(!trace)) goto err_fault; - trace_nr = trace->nr - init_nr; - if (trace_nr < skip) + if (trace->nr < skip) goto err_fault; - trace_nr -= skip; + trace_nr = trace->nr - skip; trace_nr = (trace_nr <= num_elem) ? trace_nr : num_elem; copy_len = trace_nr * elem_size; - ips = trace->ip + skip + init_nr; + + ips = trace->ip + skip; if (user && user_build_id) stack_map_get_build_id_offset(buf, ips, trace_nr, user); else @@ -670,13 +662,14 @@ BPF_CALL_4(bpf_get_task_stack, struct task_struct *, task, void *, buf, u32, size, u64, flags) { struct pt_regs *regs; - long res; + long res = -EINVAL; if (!try_get_task_stack(task)) return -EFAULT; regs = task_pt_regs(task); - res = __bpf_get_stack(regs, task, NULL, buf, size, flags); + if (regs) + res = __bpf_get_stack(regs, task, NULL, buf, size, flags); put_task_stack(task); return res; diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 209e6567cdab051df2d3640cac5b83ce577866eb..419dbc3d060ee1fea6835a59291bf36b9c570d7e 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -1308,6 +1308,7 @@ int generic_map_delete_batch(struct bpf_map *map, maybe_wait_bpf_programs(map); if (err) break; + cond_resched(); } if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp))) err = -EFAULT; @@ -1365,6 +1366,7 @@ int generic_map_update_batch(struct bpf_map *map, if (err) break; + cond_resched(); } if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp))) @@ -1462,6 +1464,7 @@ int generic_map_lookup_batch(struct bpf_map *map, swap(prev_key, key); retry = MAP_LOOKUP_RETRIES; cp++; + cond_resched(); } if (err == -EFAULT) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 4e28961cfa53e932987d23d88816c2baddd45eb0..6423f1714a2f456b17c017217e63218611f653cb 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -404,18 +404,6 @@ static bool reg_type_not_null(enum bpf_reg_type type) type == PTR_TO_SOCK_COMMON; } -static bool reg_type_may_be_null(enum bpf_reg_type type) -{ - return type == PTR_TO_MAP_VALUE_OR_NULL || - type == PTR_TO_SOCKET_OR_NULL || - type == PTR_TO_SOCK_COMMON_OR_NULL || - type == PTR_TO_TCP_SOCK_OR_NULL || - type == PTR_TO_BTF_ID_OR_NULL || - type == PTR_TO_MEM_OR_NULL || - type == PTR_TO_RDONLY_BUF_OR_NULL || - type == PTR_TO_RDWR_BUF_OR_NULL; -} - static bool reg_may_point_to_spin_lock(const struct bpf_reg_state *reg) { return reg->type == PTR_TO_MAP_VALUE && @@ -424,12 +412,14 @@ static bool reg_may_point_to_spin_lock(const struct bpf_reg_state *reg) static bool reg_type_may_be_refcounted_or_null(enum bpf_reg_type type) { - return type == PTR_TO_SOCKET || - type == PTR_TO_SOCKET_OR_NULL || - type == PTR_TO_TCP_SOCK || - type == PTR_TO_TCP_SOCK_OR_NULL || - type == PTR_TO_MEM || - type == PTR_TO_MEM_OR_NULL; + return base_type(type) == PTR_TO_SOCKET || + base_type(type) == PTR_TO_TCP_SOCK || + base_type(type) == PTR_TO_MEM; +} + +static bool type_is_rdonly_mem(u32 type) +{ + return type & MEM_RDONLY; } static bool arg_type_may_be_refcounted(enum bpf_arg_type type) @@ -437,13 +427,9 @@ static bool arg_type_may_be_refcounted(enum bpf_arg_type type) return type == ARG_PTR_TO_SOCK_COMMON; } -static bool arg_type_may_be_null(enum bpf_arg_type type) +static bool type_may_be_null(u32 type) { - return type == ARG_PTR_TO_MAP_VALUE_OR_NULL || - type == ARG_PTR_TO_MEM_OR_NULL || - type == ARG_PTR_TO_CTX_OR_NULL || - type == ARG_PTR_TO_SOCKET_OR_NULL || - type == ARG_PTR_TO_ALLOC_MEM_OR_NULL; + return type & PTR_MAYBE_NULL; } /* Determine whether the function releases some resources allocated by another @@ -496,37 +482,54 @@ static bool is_ptr_cast_function(enum bpf_func_id func_id) func_id == BPF_FUNC_skc_to_tcp_request_sock; } -/* string representation of 'enum bpf_reg_type' */ -static const char * const reg_type_str[] = { - [NOT_INIT] = "?", - [SCALAR_VALUE] = "inv", - [PTR_TO_CTX] = "ctx", - [CONST_PTR_TO_MAP] = "map_ptr", - [PTR_TO_MAP_VALUE] = "map_value", - [PTR_TO_MAP_VALUE_OR_NULL] = "map_value_or_null", - [PTR_TO_STACK] = "fp", - [PTR_TO_PACKET] = "pkt", - [PTR_TO_PACKET_META] = "pkt_meta", - [PTR_TO_PACKET_END] = "pkt_end", - [PTR_TO_FLOW_KEYS] = "flow_keys", - [PTR_TO_SOCKET] = "sock", - [PTR_TO_SOCKET_OR_NULL] = "sock_or_null", - [PTR_TO_SOCK_COMMON] = "sock_common", - [PTR_TO_SOCK_COMMON_OR_NULL] = "sock_common_or_null", - [PTR_TO_TCP_SOCK] = "tcp_sock", - [PTR_TO_TCP_SOCK_OR_NULL] = "tcp_sock_or_null", - [PTR_TO_TP_BUFFER] = "tp_buffer", - [PTR_TO_XDP_SOCK] = "xdp_sock", - [PTR_TO_BTF_ID] = "ptr_", - [PTR_TO_BTF_ID_OR_NULL] = "ptr_or_null_", - [PTR_TO_PERCPU_BTF_ID] = "percpu_ptr_", - [PTR_TO_MEM] = "mem", - [PTR_TO_MEM_OR_NULL] = "mem_or_null", - [PTR_TO_RDONLY_BUF] = "rdonly_buf", - [PTR_TO_RDONLY_BUF_OR_NULL] = "rdonly_buf_or_null", - [PTR_TO_RDWR_BUF] = "rdwr_buf", - [PTR_TO_RDWR_BUF_OR_NULL] = "rdwr_buf_or_null", -}; +/* string representation of 'enum bpf_reg_type' + * + * Note that reg_type_str() can not appear more than once in a single verbose() + * statement. + */ +static const char *reg_type_str(struct bpf_verifier_env *env, + enum bpf_reg_type type) +{ + char postfix[16] = {0}, prefix[16] = {0}; + static const char * const str[] = { + [NOT_INIT] = "?", + [SCALAR_VALUE] = "inv", + [PTR_TO_CTX] = "ctx", + [CONST_PTR_TO_MAP] = "map_ptr", + [PTR_TO_MAP_VALUE] = "map_value", + [PTR_TO_STACK] = "fp", + [PTR_TO_PACKET] = "pkt", + [PTR_TO_PACKET_META] = "pkt_meta", + [PTR_TO_PACKET_END] = "pkt_end", + [PTR_TO_FLOW_KEYS] = "flow_keys", + [PTR_TO_SOCKET] = "sock", + [PTR_TO_SOCK_COMMON] = "sock_common", + [PTR_TO_TCP_SOCK] = "tcp_sock", + [PTR_TO_TP_BUFFER] = "tp_buffer", + [PTR_TO_XDP_SOCK] = "xdp_sock", + [PTR_TO_BTF_ID] = "ptr_", + [PTR_TO_PERCPU_BTF_ID] = "percpu_ptr_", + [PTR_TO_MEM] = "mem", + [PTR_TO_BUF] = "buf", + }; + + if (type & PTR_MAYBE_NULL) { + if (base_type(type) == PTR_TO_BTF_ID || + base_type(type) == PTR_TO_PERCPU_BTF_ID) + strncpy(postfix, "or_null_", 16); + else + strncpy(postfix, "_or_null", 16); + } + + if (type & MEM_RDONLY) + strncpy(prefix, "rdonly_", 16); + if (type & MEM_ALLOC) + strncpy(prefix, "alloc_", 16); + + snprintf(env->type_str_buf, TYPE_STR_BUF_LEN, "%s%s%s", + prefix, str[base_type(type)], postfix); + return env->type_str_buf; +} static char slot_type_char[] = { [STACK_INVALID] = '?', @@ -578,7 +581,7 @@ static void print_verifier_state(struct bpf_verifier_env *env, continue; verbose(env, " R%d", i); print_liveness(env, reg->live); - verbose(env, "=%s", reg_type_str[t]); + verbose(env, "=%s", reg_type_str(env, t)); if (t == SCALAR_VALUE && reg->precise) verbose(env, "P"); if ((t == SCALAR_VALUE || t == PTR_TO_STACK) && @@ -586,9 +589,8 @@ static void print_verifier_state(struct bpf_verifier_env *env, /* reg->off should be 0 for SCALAR_VALUE */ verbose(env, "%lld", reg->var_off.value + reg->off); } else { - if (t == PTR_TO_BTF_ID || - t == PTR_TO_BTF_ID_OR_NULL || - t == PTR_TO_PERCPU_BTF_ID) + if (base_type(t) == PTR_TO_BTF_ID || + base_type(t) == PTR_TO_PERCPU_BTF_ID) verbose(env, "%s", kernel_type_name(reg->btf_id)); verbose(env, "(id=%d", reg->id); if (reg_type_may_be_refcounted_or_null(t)) @@ -597,9 +599,8 @@ static void print_verifier_state(struct bpf_verifier_env *env, verbose(env, ",off=%d", reg->off); if (type_is_pkt_pointer(t)) verbose(env, ",r=%d", reg->range); - else if (t == CONST_PTR_TO_MAP || - t == PTR_TO_MAP_VALUE || - t == PTR_TO_MAP_VALUE_OR_NULL) + else if (base_type(t) == CONST_PTR_TO_MAP || + base_type(t) == PTR_TO_MAP_VALUE) verbose(env, ",ks=%d,vs=%d", reg->map_ptr->key_size, reg->map_ptr->value_size); @@ -669,7 +670,7 @@ static void print_verifier_state(struct bpf_verifier_env *env, if (state->stack[i].slot_type[0] == STACK_SPILL) { reg = &state->stack[i].spilled_ptr; t = reg->type; - verbose(env, "=%s", reg_type_str[t]); + verbose(env, "=%s", reg_type_str(env, t)); if (t == SCALAR_VALUE && reg->precise) verbose(env, "P"); if (t == SCALAR_VALUE && tnum_is_const(reg->var_off)) @@ -1568,7 +1569,7 @@ static int mark_reg_read(struct bpf_verifier_env *env, break; if (parent->live & REG_LIVE_DONE) { verbose(env, "verifier BUG type %s var_off %lld off %d\n", - reg_type_str[parent->type], + reg_type_str(env, parent->type), parent->var_off.value, parent->off); return -EFAULT; } @@ -2194,9 +2195,8 @@ static int mark_chain_precision_stack(struct bpf_verifier_env *env, int spi) static bool is_spillable_regtype(enum bpf_reg_type type) { - switch (type) { + switch (base_type(type)) { case PTR_TO_MAP_VALUE: - case PTR_TO_MAP_VALUE_OR_NULL: case PTR_TO_STACK: case PTR_TO_CTX: case PTR_TO_PACKET: @@ -2205,21 +2205,13 @@ static bool is_spillable_regtype(enum bpf_reg_type type) case PTR_TO_FLOW_KEYS: case CONST_PTR_TO_MAP: case PTR_TO_SOCKET: - case PTR_TO_SOCKET_OR_NULL: case PTR_TO_SOCK_COMMON: - case PTR_TO_SOCK_COMMON_OR_NULL: case PTR_TO_TCP_SOCK: - case PTR_TO_TCP_SOCK_OR_NULL: case PTR_TO_XDP_SOCK: case PTR_TO_BTF_ID: - case PTR_TO_BTF_ID_OR_NULL: - case PTR_TO_RDONLY_BUF: - case PTR_TO_RDONLY_BUF_OR_NULL: - case PTR_TO_RDWR_BUF: - case PTR_TO_RDWR_BUF_OR_NULL: + case PTR_TO_BUF: case PTR_TO_PERCPU_BTF_ID: case PTR_TO_MEM: - case PTR_TO_MEM_OR_NULL: return true; default: return false; @@ -3020,7 +3012,7 @@ static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, */ *reg_type = info.reg_type; - if (*reg_type == PTR_TO_BTF_ID || *reg_type == PTR_TO_BTF_ID_OR_NULL) + if (base_type(*reg_type) == PTR_TO_BTF_ID) *btf_id = info.btf_id; else env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size; @@ -3086,7 +3078,7 @@ static int check_sock_access(struct bpf_verifier_env *env, int insn_idx, } verbose(env, "R%d invalid %s access off=%d size=%d\n", - regno, reg_type_str[reg->type], off, size); + regno, reg_type_str(env, reg->type), off, size); return -EACCES; } @@ -3369,16 +3361,17 @@ static int get_callee_stack_depth(struct bpf_verifier_env *env, } #endif -int check_ctx_reg(struct bpf_verifier_env *env, - const struct bpf_reg_state *reg, int regno) +static int __check_ptr_off_reg(struct bpf_verifier_env *env, + const struct bpf_reg_state *reg, int regno, + bool fixed_off_ok) { - /* Access to ctx or passing it to a helper is only allowed in - * its original, unmodified form. + /* Access to this pointer-typed register or passing it to a helper + * is only allowed in its original, unmodified form. */ - if (reg->off) { - verbose(env, "dereference of modified ctx ptr R%d off=%d disallowed\n", - regno, reg->off); + if (!fixed_off_ok && reg->off) { + verbose(env, "dereference of modified %s ptr R%d off=%d disallowed\n", + reg_type_str(env, reg->type), regno, reg->off); return -EACCES; } @@ -3386,13 +3379,20 @@ int check_ctx_reg(struct bpf_verifier_env *env, char tn_buf[48]; tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); - verbose(env, "variable ctx access var_off=%s disallowed\n", tn_buf); + verbose(env, "variable %s access var_off=%s disallowed\n", + reg_type_str(env, reg->type), tn_buf); return -EACCES; } return 0; } +int check_ptr_off_reg(struct bpf_verifier_env *env, + const struct bpf_reg_state *reg, int regno) +{ + return __check_ptr_off_reg(env, reg, regno, false); +} + static int __check_buffer_access(struct bpf_verifier_env *env, const char *buf_info, const struct bpf_reg_state *reg, @@ -3789,15 +3789,30 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn mark_reg_unknown(env, regs, value_regno); } } - } else if (reg->type == PTR_TO_MEM) { + } else if (base_type(reg->type) == PTR_TO_MEM) { + bool rdonly_mem = type_is_rdonly_mem(reg->type); + + if (type_may_be_null(reg->type)) { + verbose(env, "R%d invalid mem access '%s'\n", regno, + reg_type_str(env, reg->type)); + return -EACCES; + } + + if (t == BPF_WRITE && rdonly_mem) { + verbose(env, "R%d cannot write into %s\n", + regno, reg_type_str(env, reg->type)); + return -EACCES; + } + if (t == BPF_WRITE && value_regno >= 0 && is_pointer_value(env, value_regno)) { verbose(env, "R%d leaks addr into mem\n", value_regno); return -EACCES; } + err = check_mem_region_access(env, regno, off, size, reg->mem_size, false); - if (!err && t == BPF_READ && value_regno >= 0) + if (!err && value_regno >= 0 && (t == BPF_READ || rdonly_mem)) mark_reg_unknown(env, regs, value_regno); } else if (reg->type == PTR_TO_CTX) { enum bpf_reg_type reg_type = SCALAR_VALUE; @@ -3809,7 +3824,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn return -EACCES; } - err = check_ctx_reg(env, reg, regno); + err = check_ptr_off_reg(env, reg, regno); if (err < 0) return err; @@ -3826,7 +3841,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn } else { mark_reg_known_zero(env, regs, value_regno); - if (reg_type_may_be_null(reg_type)) + if (type_may_be_null(reg_type)) regs[value_regno].id = ++env->id_gen; /* A load of ctx field could have different * actual load size with the one encoded in the @@ -3834,8 +3849,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn * a sub-register. */ regs[value_regno].subreg_def = DEF_NOT_SUBREG; - if (reg_type == PTR_TO_BTF_ID || - reg_type == PTR_TO_BTF_ID_OR_NULL) + if (base_type(reg_type) == PTR_TO_BTF_ID) regs[value_regno].btf_id = btf_id; } regs[value_regno].type = reg_type; @@ -3886,7 +3900,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn } else if (type_is_sk_pointer(reg->type)) { if (t == BPF_WRITE) { verbose(env, "R%d cannot write into %s\n", - regno, reg_type_str[reg->type]); + regno, reg_type_str(env, reg->type)); return -EACCES; } err = check_sock_access(env, insn_idx, regno, off, size, t); @@ -3902,26 +3916,31 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn } else if (reg->type == CONST_PTR_TO_MAP) { err = check_ptr_to_map_access(env, regs, regno, off, size, t, value_regno); - } else if (reg->type == PTR_TO_RDONLY_BUF) { - if (t == BPF_WRITE) { - verbose(env, "R%d cannot write into %s\n", - regno, reg_type_str[reg->type]); - return -EACCES; + } else if (base_type(reg->type) == PTR_TO_BUF) { + bool rdonly_mem = type_is_rdonly_mem(reg->type); + const char *buf_info; + u32 *max_access; + + if (rdonly_mem) { + if (t == BPF_WRITE) { + verbose(env, "R%d cannot write into %s\n", + regno, reg_type_str(env, reg->type)); + return -EACCES; + } + buf_info = "rdonly"; + max_access = &env->prog->aux->max_rdonly_access; + } else { + buf_info = "rdwr"; + max_access = &env->prog->aux->max_rdwr_access; } + err = check_buffer_access(env, reg, regno, off, size, false, - "rdonly", - &env->prog->aux->max_rdonly_access); - if (!err && value_regno >= 0) - mark_reg_unknown(env, regs, value_regno); - } else if (reg->type == PTR_TO_RDWR_BUF) { - err = check_buffer_access(env, reg, regno, off, size, false, - "rdwr", - &env->prog->aux->max_rdwr_access); - if (!err && t == BPF_READ && value_regno >= 0) + buf_info, max_access); + if (!err && value_regno >= 0 && (rdonly_mem || t == BPF_READ)) mark_reg_unknown(env, regs, value_regno); } else { verbose(env, "R%d invalid mem access '%s'\n", regno, - reg_type_str[reg->type]); + reg_type_str(env, reg->type)); return -EACCES; } @@ -3964,7 +3983,7 @@ static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_ins is_sk_reg(env, insn->dst_reg)) { verbose(env, "BPF_XADD stores into R%d %s is not allowed\n", insn->dst_reg, - reg_type_str[reg_state(env, insn->dst_reg)->type]); + reg_type_str(env, reg_state(env, insn->dst_reg)->type)); return -EACCES; } @@ -4120,8 +4139,10 @@ static int check_helper_mem_access(struct bpf_verifier_env *env, int regno, struct bpf_call_arg_meta *meta) { struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; + const char *buf_info; + u32 *max_access; - switch (reg->type) { + switch (base_type(reg->type)) { case PTR_TO_PACKET: case PTR_TO_PACKET_META: return check_packet_access(env, regno, reg->off, access_size, @@ -4137,18 +4158,20 @@ static int check_helper_mem_access(struct bpf_verifier_env *env, int regno, return check_mem_region_access(env, regno, reg->off, access_size, reg->mem_size, zero_size_allowed); - case PTR_TO_RDONLY_BUF: - if (meta && meta->raw_mode) - return -EACCES; - return check_buffer_access(env, reg, regno, reg->off, - access_size, zero_size_allowed, - "rdonly", - &env->prog->aux->max_rdonly_access); - case PTR_TO_RDWR_BUF: + case PTR_TO_BUF: + if (type_is_rdonly_mem(reg->type)) { + if (meta && meta->raw_mode) + return -EACCES; + + buf_info = "rdonly"; + max_access = &env->prog->aux->max_rdonly_access; + } else { + buf_info = "rdwr"; + max_access = &env->prog->aux->max_rdwr_access; + } return check_buffer_access(env, reg, regno, reg->off, access_size, zero_size_allowed, - "rdwr", - &env->prog->aux->max_rdwr_access); + buf_info, max_access); case PTR_TO_STACK: return check_stack_range_initialized( env, @@ -4160,9 +4183,9 @@ static int check_helper_mem_access(struct bpf_verifier_env *env, int regno, register_is_null(reg)) return 0; - verbose(env, "R%d type=%s expected=%s\n", regno, - reg_type_str[reg->type], - reg_type_str[PTR_TO_STACK]); + verbose(env, "R%d type=%s ", regno, + reg_type_str(env, reg->type)); + verbose(env, "expected=%s\n", reg_type_str(env, PTR_TO_STACK)); return -EACCES; } } @@ -4250,9 +4273,8 @@ static int process_spin_lock(struct bpf_verifier_env *env, int regno, static bool arg_type_is_mem_ptr(enum bpf_arg_type type) { - return type == ARG_PTR_TO_MEM || - type == ARG_PTR_TO_MEM_OR_NULL || - type == ARG_PTR_TO_UNINIT_MEM; + return base_type(type) == ARG_PTR_TO_MEM || + base_type(type) == ARG_PTR_TO_UNINIT_MEM; } static bool arg_type_is_mem_size(enum bpf_arg_type type) @@ -4352,8 +4374,8 @@ static const struct bpf_reg_types mem_types = { PTR_TO_PACKET_META, PTR_TO_MAP_VALUE, PTR_TO_MEM, - PTR_TO_RDONLY_BUF, - PTR_TO_RDWR_BUF, + PTR_TO_MEM | MEM_ALLOC, + PTR_TO_BUF, }, }; @@ -4369,7 +4391,7 @@ static const struct bpf_reg_types int_ptr_types = { static const struct bpf_reg_types fullsock_types = { .types = { PTR_TO_SOCKET } }; static const struct bpf_reg_types scalar_types = { .types = { SCALAR_VALUE } }; static const struct bpf_reg_types context_types = { .types = { PTR_TO_CTX } }; -static const struct bpf_reg_types alloc_mem_types = { .types = { PTR_TO_MEM } }; +static const struct bpf_reg_types alloc_mem_types = { .types = { PTR_TO_MEM | MEM_ALLOC } }; static const struct bpf_reg_types const_map_ptr_types = { .types = { CONST_PTR_TO_MAP } }; static const struct bpf_reg_types btf_ptr_types = { .types = { PTR_TO_BTF_ID } }; static const struct bpf_reg_types spin_lock_types = { .types = { PTR_TO_MAP_VALUE } }; @@ -4379,26 +4401,21 @@ static const struct bpf_reg_types *compatible_reg_types[__BPF_ARG_TYPE_MAX] = { [ARG_PTR_TO_MAP_KEY] = &map_key_value_types, [ARG_PTR_TO_MAP_VALUE] = &map_key_value_types, [ARG_PTR_TO_UNINIT_MAP_VALUE] = &map_key_value_types, - [ARG_PTR_TO_MAP_VALUE_OR_NULL] = &map_key_value_types, [ARG_CONST_SIZE] = &scalar_types, [ARG_CONST_SIZE_OR_ZERO] = &scalar_types, [ARG_CONST_ALLOC_SIZE_OR_ZERO] = &scalar_types, [ARG_CONST_MAP_PTR] = &const_map_ptr_types, [ARG_PTR_TO_CTX] = &context_types, - [ARG_PTR_TO_CTX_OR_NULL] = &context_types, [ARG_PTR_TO_SOCK_COMMON] = &sock_types, #ifdef CONFIG_NET [ARG_PTR_TO_BTF_ID_SOCK_COMMON] = &btf_id_sock_common_types, #endif [ARG_PTR_TO_SOCKET] = &fullsock_types, - [ARG_PTR_TO_SOCKET_OR_NULL] = &fullsock_types, [ARG_PTR_TO_BTF_ID] = &btf_ptr_types, [ARG_PTR_TO_SPIN_LOCK] = &spin_lock_types, [ARG_PTR_TO_MEM] = &mem_types, - [ARG_PTR_TO_MEM_OR_NULL] = &mem_types, [ARG_PTR_TO_UNINIT_MEM] = &mem_types, [ARG_PTR_TO_ALLOC_MEM] = &alloc_mem_types, - [ARG_PTR_TO_ALLOC_MEM_OR_NULL] = &alloc_mem_types, [ARG_PTR_TO_INT] = &int_ptr_types, [ARG_PTR_TO_LONG] = &int_ptr_types, [ARG_PTR_TO_PERCPU_BTF_ID] = &percpu_btf_ptr_types, @@ -4413,12 +4430,27 @@ static int check_reg_type(struct bpf_verifier_env *env, u32 regno, const struct bpf_reg_types *compatible; int i, j; - compatible = compatible_reg_types[arg_type]; + compatible = compatible_reg_types[base_type(arg_type)]; if (!compatible) { verbose(env, "verifier internal error: unsupported arg type %d\n", arg_type); return -EFAULT; } + /* ARG_PTR_TO_MEM + RDONLY is compatible with PTR_TO_MEM and PTR_TO_MEM + RDONLY, + * but ARG_PTR_TO_MEM is compatible only with PTR_TO_MEM and NOT with PTR_TO_MEM + RDONLY + * + * Same for MAYBE_NULL: + * + * ARG_PTR_TO_MEM + MAYBE_NULL is compatible with PTR_TO_MEM and PTR_TO_MEM + MAYBE_NULL, + * but ARG_PTR_TO_MEM is compatible only with PTR_TO_MEM but NOT with PTR_TO_MEM + MAYBE_NULL + * + * Therefore we fold these flags depending on the arg_type before comparison. + */ + if (arg_type & MEM_RDONLY) + type &= ~MEM_RDONLY; + if (arg_type & PTR_MAYBE_NULL) + type &= ~PTR_MAYBE_NULL; + for (i = 0; i < ARRAY_SIZE(compatible->types); i++) { expected = compatible->types[i]; if (expected == NOT_INIT) @@ -4428,14 +4460,14 @@ static int check_reg_type(struct bpf_verifier_env *env, u32 regno, goto found; } - verbose(env, "R%d type=%s expected=", regno, reg_type_str[type]); + verbose(env, "R%d type=%s expected=", regno, reg_type_str(env, reg->type)); for (j = 0; j + 1 < i; j++) - verbose(env, "%s, ", reg_type_str[compatible->types[j]]); - verbose(env, "%s\n", reg_type_str[compatible->types[j]]); + verbose(env, "%s, ", reg_type_str(env, compatible->types[j])); + verbose(env, "%s\n", reg_type_str(env, compatible->types[j])); return -EACCES; found: - if (type == PTR_TO_BTF_ID) { + if (reg->type == PTR_TO_BTF_ID) { if (!arg_btf_id) { if (!compatible->btf_id) { verbose(env, "verifier internal error: missing arg compatible BTF ID\n"); @@ -4451,12 +4483,6 @@ static int check_reg_type(struct bpf_verifier_env *env, u32 regno, kernel_type_name(*arg_btf_id)); return -EACCES; } - - if (!tnum_is_const(reg->var_off) || reg->var_off.value) { - verbose(env, "R%d is a pointer to in-kernel struct with non-zero offset\n", - regno); - return -EACCES; - } } return 0; @@ -4494,15 +4520,14 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 arg, return -EACCES; } - if (arg_type == ARG_PTR_TO_MAP_VALUE || - arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE || - arg_type == ARG_PTR_TO_MAP_VALUE_OR_NULL) { + if (base_type(arg_type) == ARG_PTR_TO_MAP_VALUE || + base_type(arg_type) == ARG_PTR_TO_UNINIT_MAP_VALUE) { err = resolve_map_arg_type(env, meta, &arg_type); if (err) return err; } - if (register_is_null(reg) && arg_type_may_be_null(arg_type)) + if (register_is_null(reg) && type_may_be_null(arg_type)) /* A NULL register has a SCALAR_VALUE type, so skip * type checking. */ @@ -4512,10 +4537,32 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 arg, if (err) return err; - if (type == PTR_TO_CTX) { - err = check_ctx_reg(env, reg, regno); + switch ((u32)type) { + case SCALAR_VALUE: + /* Pointer types where reg offset is explicitly allowed: */ + case PTR_TO_PACKET: + case PTR_TO_PACKET_META: + case PTR_TO_MAP_VALUE: + case PTR_TO_MEM: + case PTR_TO_MEM | MEM_RDONLY: + case PTR_TO_MEM | MEM_ALLOC: + case PTR_TO_BUF: + case PTR_TO_BUF | MEM_RDONLY: + case PTR_TO_STACK: + /* Some of the argument types nevertheless require a + * zero register offset. + */ + if (arg_type == ARG_PTR_TO_ALLOC_MEM) + goto force_off_check; + break; + /* All the rest must be rejected: */ + default: +force_off_check: + err = __check_ptr_off_reg(env, reg, regno, + type == PTR_TO_BTF_ID); if (err < 0) return err; + break; } skip_type_check: @@ -4549,10 +4596,11 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 arg, err = check_helper_mem_access(env, regno, meta->map_ptr->key_size, false, NULL); - } else if (arg_type == ARG_PTR_TO_MAP_VALUE || - (arg_type == ARG_PTR_TO_MAP_VALUE_OR_NULL && - !register_is_null(reg)) || - arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE) { + } else if (base_type(arg_type) == ARG_PTR_TO_MAP_VALUE || + base_type(arg_type) == ARG_PTR_TO_UNINIT_MAP_VALUE) { + if (type_may_be_null(arg_type) && register_is_null(reg)) + return 0; + /* bpf_map_xxx(..., map_ptr, ..., value) call: * check [value, value + map->value_size) validity */ @@ -5332,6 +5380,8 @@ static int check_reference_leak(struct bpf_verifier_env *env) static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn_idx) { const struct bpf_func_proto *fn = NULL; + enum bpf_return_type ret_type; + enum bpf_type_flag ret_flag; struct bpf_reg_state *regs; struct bpf_call_arg_meta meta; bool changes_data; @@ -5443,13 +5493,14 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG; /* update return register (already marked as written above) */ - if (fn->ret_type == RET_INTEGER) { + ret_type = fn->ret_type; + ret_flag = type_flag(fn->ret_type); + if (ret_type == RET_INTEGER) { /* sets type to SCALAR_VALUE */ mark_reg_unknown(env, regs, BPF_REG_0); - } else if (fn->ret_type == RET_VOID) { + } else if (ret_type == RET_VOID) { regs[BPF_REG_0].type = NOT_INIT; - } else if (fn->ret_type == RET_PTR_TO_MAP_VALUE_OR_NULL || - fn->ret_type == RET_PTR_TO_MAP_VALUE) { + } else if (base_type(ret_type) == RET_PTR_TO_MAP_VALUE) { /* There is no offset yet applied, variable or fixed */ mark_reg_known_zero(env, regs, BPF_REG_0); /* remember map_ptr, so that check_map_access() @@ -5462,28 +5513,25 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn return -EINVAL; } regs[BPF_REG_0].map_ptr = meta.map_ptr; - if (fn->ret_type == RET_PTR_TO_MAP_VALUE) { - regs[BPF_REG_0].type = PTR_TO_MAP_VALUE; - if (map_value_has_spin_lock(meta.map_ptr)) - regs[BPF_REG_0].id = ++env->id_gen; - } else { - regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL; + regs[BPF_REG_0].type = PTR_TO_MAP_VALUE | ret_flag; + if (!type_may_be_null(ret_type) && + map_value_has_spin_lock(meta.map_ptr)) { + regs[BPF_REG_0].id = ++env->id_gen; } - } else if (fn->ret_type == RET_PTR_TO_SOCKET_OR_NULL) { + } else if (base_type(ret_type) == RET_PTR_TO_SOCKET) { mark_reg_known_zero(env, regs, BPF_REG_0); - regs[BPF_REG_0].type = PTR_TO_SOCKET_OR_NULL; - } else if (fn->ret_type == RET_PTR_TO_SOCK_COMMON_OR_NULL) { + regs[BPF_REG_0].type = PTR_TO_SOCKET | ret_flag; + } else if (base_type(ret_type) == RET_PTR_TO_SOCK_COMMON) { mark_reg_known_zero(env, regs, BPF_REG_0); - regs[BPF_REG_0].type = PTR_TO_SOCK_COMMON_OR_NULL; - } else if (fn->ret_type == RET_PTR_TO_TCP_SOCK_OR_NULL) { + regs[BPF_REG_0].type = PTR_TO_SOCK_COMMON | ret_flag; + } else if (base_type(ret_type) == RET_PTR_TO_TCP_SOCK) { mark_reg_known_zero(env, regs, BPF_REG_0); - regs[BPF_REG_0].type = PTR_TO_TCP_SOCK_OR_NULL; - } else if (fn->ret_type == RET_PTR_TO_ALLOC_MEM_OR_NULL) { + regs[BPF_REG_0].type = PTR_TO_TCP_SOCK | ret_flag; + } else if (base_type(ret_type) == RET_PTR_TO_ALLOC_MEM) { mark_reg_known_zero(env, regs, BPF_REG_0); - regs[BPF_REG_0].type = PTR_TO_MEM_OR_NULL; + regs[BPF_REG_0].type = PTR_TO_MEM | ret_flag; regs[BPF_REG_0].mem_size = meta.mem_size; - } else if (fn->ret_type == RET_PTR_TO_MEM_OR_BTF_ID_OR_NULL || - fn->ret_type == RET_PTR_TO_MEM_OR_BTF_ID) { + } else if (base_type(ret_type) == RET_PTR_TO_MEM_OR_BTF_ID) { const struct btf_type *t; mark_reg_known_zero(env, regs, BPF_REG_0); @@ -5501,35 +5549,39 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn tname, PTR_ERR(ret)); return -EINVAL; } - regs[BPF_REG_0].type = - fn->ret_type == RET_PTR_TO_MEM_OR_BTF_ID ? - PTR_TO_MEM : PTR_TO_MEM_OR_NULL; + regs[BPF_REG_0].type = PTR_TO_MEM | ret_flag; regs[BPF_REG_0].mem_size = tsize; } else { - regs[BPF_REG_0].type = - fn->ret_type == RET_PTR_TO_MEM_OR_BTF_ID ? - PTR_TO_BTF_ID : PTR_TO_BTF_ID_OR_NULL; + /* MEM_RDONLY may be carried from ret_flag, but it + * doesn't apply on PTR_TO_BTF_ID. Fold it, otherwise + * it will confuse the check of PTR_TO_BTF_ID in + * check_mem_access(). + */ + ret_flag &= ~MEM_RDONLY; + + regs[BPF_REG_0].type = PTR_TO_BTF_ID | ret_flag; regs[BPF_REG_0].btf_id = meta.ret_btf_id; } - } else if (fn->ret_type == RET_PTR_TO_BTF_ID_OR_NULL) { + } else if (base_type(ret_type) == RET_PTR_TO_BTF_ID) { int ret_btf_id; mark_reg_known_zero(env, regs, BPF_REG_0); - regs[BPF_REG_0].type = PTR_TO_BTF_ID_OR_NULL; + regs[BPF_REG_0].type = PTR_TO_BTF_ID | ret_flag; ret_btf_id = *fn->ret_btf_id; if (ret_btf_id == 0) { - verbose(env, "invalid return type %d of func %s#%d\n", - fn->ret_type, func_id_name(func_id), func_id); + verbose(env, "invalid return type %u of func %s#%d\n", + base_type(ret_type), func_id_name(func_id), + func_id); return -EINVAL; } regs[BPF_REG_0].btf_id = ret_btf_id; } else { - verbose(env, "unknown return type %d of func %s#%d\n", - fn->ret_type, func_id_name(func_id), func_id); + verbose(env, "unknown return type %u of func %s#%d\n", + base_type(ret_type), func_id_name(func_id), func_id); return -EINVAL; } - if (reg_type_may_be_null(regs[BPF_REG_0].type)) + if (type_may_be_null(regs[BPF_REG_0].type)) regs[BPF_REG_0].id = ++env->id_gen; if (is_ptr_cast_function(func_id)) { @@ -5630,25 +5682,25 @@ static bool check_reg_sane_offset(struct bpf_verifier_env *env, if (known && (val >= BPF_MAX_VAR_OFF || val <= -BPF_MAX_VAR_OFF)) { verbose(env, "math between %s pointer and %lld is not allowed\n", - reg_type_str[type], val); + reg_type_str(env, type), val); return false; } if (reg->off >= BPF_MAX_VAR_OFF || reg->off <= -BPF_MAX_VAR_OFF) { verbose(env, "%s pointer offset %d is not allowed\n", - reg_type_str[type], reg->off); + reg_type_str(env, type), reg->off); return false; } if (smin == S64_MIN) { verbose(env, "math between %s pointer and register with unbounded min value is not allowed\n", - reg_type_str[type]); + reg_type_str(env, type)); return false; } if (smin >= BPF_MAX_VAR_OFF || smin <= -BPF_MAX_VAR_OFF) { verbose(env, "value %lld makes %s pointer be out of bounds\n", - smin, reg_type_str[type]); + smin, reg_type_str(env, type)); return false; } @@ -6025,11 +6077,13 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, return -EACCES; } - switch (ptr_reg->type) { - case PTR_TO_MAP_VALUE_OR_NULL: + if (ptr_reg->type & PTR_MAYBE_NULL) { verbose(env, "R%d pointer arithmetic on %s prohibited, null-check it first\n", - dst, reg_type_str[ptr_reg->type]); + dst, reg_type_str(env, ptr_reg->type)); return -EACCES; + } + + switch (base_type(ptr_reg->type)) { case CONST_PTR_TO_MAP: /* smin_val represents the known value */ if (known && smin_val == 0 && opcode == BPF_ADD) @@ -6037,16 +6091,16 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, fallthrough; case PTR_TO_PACKET_END: case PTR_TO_SOCKET: - case PTR_TO_SOCKET_OR_NULL: case PTR_TO_SOCK_COMMON: - case PTR_TO_SOCK_COMMON_OR_NULL: case PTR_TO_TCP_SOCK: - case PTR_TO_TCP_SOCK_OR_NULL: case PTR_TO_XDP_SOCK: +reject: verbose(env, "R%d pointer arithmetic on %s prohibited\n", - dst, reg_type_str[ptr_reg->type]); + dst, reg_type_str(env, ptr_reg->type)); return -EACCES; default: + if (type_may_be_null(ptr_reg->type)) + goto reject; break; } @@ -7723,21 +7777,21 @@ static void mark_ptr_or_null_reg(struct bpf_func_state *state, struct bpf_reg_state *reg, u32 id, bool is_null) { - if (reg_type_may_be_null(reg->type) && reg->id == id && + if (type_may_be_null(reg->type) && reg->id == id && !WARN_ON_ONCE(!reg->id)) { - /* Old offset (both fixed and variable parts) should - * have been known-zero, because we don't allow pointer - * arithmetic on pointers that might be NULL. - */ if (WARN_ON_ONCE(reg->smin_value || reg->smax_value || !tnum_equals_const(reg->var_off, 0) || reg->off)) { - __mark_reg_known_zero(reg); - reg->off = 0; + /* Old offset (both fixed and variable parts) should + * have been known-zero, because we don't allow pointer + * arithmetic on pointers that might be NULL. If we + * see this happening, don't convert the register. + */ + return; } if (is_null) { reg->type = SCALAR_VALUE; - } else if (reg->type == PTR_TO_MAP_VALUE_OR_NULL) { + } else if (base_type(reg->type) == PTR_TO_MAP_VALUE) { const struct bpf_map *map = reg->map_ptr; if (map->inner_map_meta) { @@ -7751,21 +7805,10 @@ static void mark_ptr_or_null_reg(struct bpf_func_state *state, } else { reg->type = PTR_TO_MAP_VALUE; } - } else if (reg->type == PTR_TO_SOCKET_OR_NULL) { - reg->type = PTR_TO_SOCKET; - } else if (reg->type == PTR_TO_SOCK_COMMON_OR_NULL) { - reg->type = PTR_TO_SOCK_COMMON; - } else if (reg->type == PTR_TO_TCP_SOCK_OR_NULL) { - reg->type = PTR_TO_TCP_SOCK; - } else if (reg->type == PTR_TO_BTF_ID_OR_NULL) { - reg->type = PTR_TO_BTF_ID; - } else if (reg->type == PTR_TO_MEM_OR_NULL) { - reg->type = PTR_TO_MEM; - } else if (reg->type == PTR_TO_RDONLY_BUF_OR_NULL) { - reg->type = PTR_TO_RDONLY_BUF; - } else if (reg->type == PTR_TO_RDWR_BUF_OR_NULL) { - reg->type = PTR_TO_RDWR_BUF; + } else { + reg->type &= ~PTR_MAYBE_NULL; } + if (is_null) { /* We don't need id and ref_obj_id from this point * onwards anymore, thus we should better reset it, @@ -8112,7 +8155,7 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env, */ if (!is_jmp32 && BPF_SRC(insn->code) == BPF_K && insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) && - reg_type_may_be_null(dst_reg->type)) { + type_may_be_null(dst_reg->type)) { /* Mark all identical registers in each branch as either * safe or unknown depending R == 0 or R != 0 conditional. */ @@ -8163,11 +8206,15 @@ static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn) return 0; } - if (insn->src_reg == BPF_PSEUDO_BTF_ID) { - mark_reg_known_zero(env, regs, insn->dst_reg); + /* All special src_reg cases are listed below. From this point onwards + * we either succeed and assign a corresponding dst_reg->type after + * zeroing the offset, or fail and reject the program. + */ + mark_reg_known_zero(env, regs, insn->dst_reg); + if (insn->src_reg == BPF_PSEUDO_BTF_ID) { dst_reg->type = aux->btf_var.reg_type; - switch (dst_reg->type) { + switch (base_type(dst_reg->type)) { case PTR_TO_MEM: dst_reg->mem_size = aux->btf_var.mem_size; break; @@ -8183,7 +8230,6 @@ static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn) } map = env->used_maps[aux->map_index]; - mark_reg_known_zero(env, regs, insn->dst_reg); dst_reg->map_ptr = map; if (insn->src_reg == BPF_PSEUDO_MAP_VALUE) { @@ -8285,7 +8331,7 @@ static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn) return err; } - err = check_ctx_reg(env, ®s[ctx_reg], ctx_reg); + err = check_ptr_off_reg(env, ®s[ctx_reg], ctx_reg); if (err < 0) return err; @@ -8341,7 +8387,7 @@ static int check_return_code(struct bpf_verifier_env *env) if (is_subprog) { if (reg->type != SCALAR_VALUE) { verbose(env, "At subprogram exit the register R0 is not a scalar value (%s)\n", - reg_type_str[reg->type]); + reg_type_str(env, reg->type)); return -EINVAL; } return 0; @@ -8402,7 +8448,7 @@ static int check_return_code(struct bpf_verifier_env *env) if (reg->type != SCALAR_VALUE) { verbose(env, "At program exit the register R0 is not a known value (%s)\n", - reg_type_str[reg->type]); + reg_type_str(env, reg->type)); return -EINVAL; } @@ -9150,7 +9196,7 @@ static bool regsafe(struct bpf_verifier_env *env, struct bpf_reg_state *rold, return true; if (rcur->type == NOT_INIT) return false; - switch (rold->type) { + switch (base_type(rold->type)) { case SCALAR_VALUE: if (env->explore_alu_limits) return false; @@ -9171,6 +9217,22 @@ static bool regsafe(struct bpf_verifier_env *env, struct bpf_reg_state *rold, return false; } case PTR_TO_MAP_VALUE: + /* a PTR_TO_MAP_VALUE could be safe to use as a + * PTR_TO_MAP_VALUE_OR_NULL into the same map. + * However, if the old PTR_TO_MAP_VALUE_OR_NULL then got NULL- + * checked, doing so could have affected others with the same + * id, and we can't check for that because we lost the id when + * we converted to a PTR_TO_MAP_VALUE. + */ + if (type_may_be_null(rold->type)) { + if (!type_may_be_null(rcur->type)) + return false; + if (memcmp(rold, rcur, offsetof(struct bpf_reg_state, id))) + return false; + /* Check our ids match any regs they're supposed to */ + return check_ids(rold->id, rcur->id, idmap); + } + /* If the new min/max/var_off satisfy the old ones and * everything else matches, we are OK. * 'id' is not compared, since it's only used for maps with @@ -9182,20 +9244,6 @@ static bool regsafe(struct bpf_verifier_env *env, struct bpf_reg_state *rold, return memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)) == 0 && range_within(rold, rcur) && tnum_in(rold->var_off, rcur->var_off); - case PTR_TO_MAP_VALUE_OR_NULL: - /* a PTR_TO_MAP_VALUE could be safe to use as a - * PTR_TO_MAP_VALUE_OR_NULL into the same map. - * However, if the old PTR_TO_MAP_VALUE_OR_NULL then got NULL- - * checked, doing so could have affected others with the same - * id, and we can't check for that because we lost the id when - * we converted to a PTR_TO_MAP_VALUE. - */ - if (rcur->type != PTR_TO_MAP_VALUE_OR_NULL) - return false; - if (memcmp(rold, rcur, offsetof(struct bpf_reg_state, id))) - return false; - /* Check our ids match any regs they're supposed to */ - return check_ids(rold->id, rcur->id, idmap); case PTR_TO_PACKET_META: case PTR_TO_PACKET: if (rcur->type != rold->type) @@ -9224,11 +9272,8 @@ static bool regsafe(struct bpf_verifier_env *env, struct bpf_reg_state *rold, case PTR_TO_PACKET_END: case PTR_TO_FLOW_KEYS: case PTR_TO_SOCKET: - case PTR_TO_SOCKET_OR_NULL: case PTR_TO_SOCK_COMMON: - case PTR_TO_SOCK_COMMON_OR_NULL: case PTR_TO_TCP_SOCK: - case PTR_TO_TCP_SOCK_OR_NULL: case PTR_TO_XDP_SOCK: /* Only valid matches are exact, which memcmp() above * would have accepted @@ -9738,17 +9783,13 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) /* Return true if it's OK to have the same insn return a different type. */ static bool reg_type_mismatch_ok(enum bpf_reg_type type) { - switch (type) { + switch (base_type(type)) { case PTR_TO_CTX: case PTR_TO_SOCKET: - case PTR_TO_SOCKET_OR_NULL: case PTR_TO_SOCK_COMMON: - case PTR_TO_SOCK_COMMON_OR_NULL: case PTR_TO_TCP_SOCK: - case PTR_TO_TCP_SOCK_OR_NULL: case PTR_TO_XDP_SOCK: case PTR_TO_BTF_ID: - case PTR_TO_BTF_ID_OR_NULL: return false; default: return true; @@ -9966,7 +10007,7 @@ static int do_check(struct bpf_verifier_env *env) if (is_ctx_reg(env, insn->dst_reg)) { verbose(env, "BPF_ST stores into R%d %s is not allowed\n", insn->dst_reg, - reg_type_str[reg_state(env, insn->dst_reg)->type]); + reg_type_str(env, reg_state(env, insn->dst_reg)->type)); return -EACCES; } @@ -10174,7 +10215,7 @@ static int check_pseudo_btf_id(struct bpf_verifier_env *env, tname, PTR_ERR(ret)); return -EINVAL; } - aux->btf_var.reg_type = PTR_TO_MEM; + aux->btf_var.reg_type = PTR_TO_MEM | MEM_RDONLY; aux->btf_var.mem_size = tsize; } else { aux->btf_var.reg_type = PTR_TO_BTF_ID; @@ -12349,11 +12390,11 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, log->ubuf = (char __user *) (unsigned long) attr->log_buf; log->len_total = attr->log_size; - ret = -EINVAL; /* log attributes have to be sane */ - if (log->len_total < 128 || log->len_total > UINT_MAX >> 2 || - !log->level || !log->ubuf || log->level & ~BPF_LOG_MASK) + if (!bpf_verifier_log_attr_valid(log)) { + ret = -EINVAL; goto err_unlock; + } } if (IS_ERR(btf_vmlinux)) { diff --git a/kernel/cgroup/cgroup-internal.h b/kernel/cgroup/cgroup-internal.h index 6e36e854b5124df5f0b2cd0daefd12b264c56cc4..3f116765bb001ef612585f57584abccb3ed6e1b3 100644 --- a/kernel/cgroup/cgroup-internal.h +++ b/kernel/cgroup/cgroup-internal.h @@ -266,6 +266,9 @@ int cgroup_show_path(struct seq_file *sf, struct kernfs_node *kf_node, int __cgroup_task_count(const struct cgroup *cgrp); int cgroup_task_count(const struct cgroup *cgrp); +ssize_t cgroup_kill_write(struct kernfs_open_file *of, char *buf, size_t nbytes, + loff_t off); + /* * rstat.c */ diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c index 9f5221653f80c979ac77dfd57801c5b8ac4fb2f7..be884bc2ae61b6ec870728704cb485f0460d47ac 100644 --- a/kernel/cgroup/cgroup-v1.c +++ b/kernel/cgroup/cgroup-v1.c @@ -545,6 +545,7 @@ static ssize_t cgroup_release_agent_write(struct kernfs_open_file *of, char *buf, size_t nbytes, loff_t off) { struct cgroup *cgrp; + struct cgroup_file_ctx *ctx; BUILD_BUG_ON(sizeof(cgrp->root->release_agent_path) < PATH_MAX); @@ -552,8 +553,9 @@ static ssize_t cgroup_release_agent_write(struct kernfs_open_file *of, * Release agent gets called with all capabilities, * require capabilities to set release agent. */ - if ((of->file->f_cred->user_ns != &init_user_ns) || - !capable(CAP_SYS_ADMIN)) + ctx = of->priv; + if ((ctx->ns->user_ns != &init_user_ns) || + !file_ns_capable(of->file, &init_user_ns, CAP_SYS_ADMIN)) return -EPERM; cgrp = cgroup_kn_lock_live(of->kn, false); @@ -658,6 +660,11 @@ struct cftype cgroup1_base_files[] = { .write = cgroup_release_agent_write, .max_write_len = PATH_MAX - 1, }, + { + .name = "cgroup.kill", + .flags = CFTYPE_NOT_ON_ROOT, + .write = cgroup_kill_write, + }, { } /* terminate */ }; diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c index 59cc82ef52a6b9989e4158c83911ce0870ac4f5d..57f4e19df8c6d0ccbe1ce1bdf4b6c9daa9ab3e50 100644 --- a/kernel/cgroup/cgroup.c +++ b/kernel/cgroup/cgroup.c @@ -3654,6 +3654,12 @@ static ssize_t cgroup_pressure_write(struct kernfs_open_file *of, char *buf, cgroup_get(cgrp); cgroup_kn_unlock(of->kn); + /* Allow only one trigger per file descriptor */ + if (ctx->psi.trigger) { + cgroup_put(cgrp); + return -EBUSY; + } + psi = cgroup_ino(cgrp) == 1 ? &psi_system : &cgrp->psi; new = psi_trigger_create(psi, buf, nbytes, res); if (IS_ERR(new)) { @@ -3661,8 +3667,7 @@ static ssize_t cgroup_pressure_write(struct kernfs_open_file *of, char *buf, return PTR_ERR(new); } - psi_trigger_replace(&ctx->psi.trigger, new); - + smp_store_release(&ctx->psi.trigger, new); cgroup_put(cgrp); return nbytes; @@ -3701,7 +3706,7 @@ static void cgroup_pressure_release(struct kernfs_open_file *of) { struct cgroup_file_ctx *ctx = of->priv; - psi_trigger_replace(&ctx->psi.trigger, NULL); + psi_trigger_destroy(ctx->psi.trigger); } struct cftype cgroup_v1_psi_files[] = { @@ -3767,6 +3772,80 @@ static ssize_t cgroup_freeze_write(struct kernfs_open_file *of, return nbytes; } +static void __cgroup_kill(struct cgroup *cgrp) +{ + struct css_task_iter it; + struct task_struct *task; + + lockdep_assert_held(&cgroup_mutex); + + spin_lock_irq(&css_set_lock); + set_bit(CGRP_KILL, &cgrp->flags); + spin_unlock_irq(&css_set_lock); + + css_task_iter_start(&cgrp->self, CSS_TASK_ITER_PROCS | CSS_TASK_ITER_THREADED, &it); + while ((task = css_task_iter_next(&it))) { + /* Ignore kernel threads here. */ + if (task->flags & PF_KTHREAD) + continue; + + /* Skip tasks that are already dying. */ + if (__fatal_signal_pending(task)) + continue; + + send_sig(SIGKILL, task, 0); + } + css_task_iter_end(&it); + + spin_lock_irq(&css_set_lock); + clear_bit(CGRP_KILL, &cgrp->flags); + spin_unlock_irq(&css_set_lock); +} + +static void cgroup_kill(struct cgroup *cgrp) +{ + struct cgroup_subsys_state *css; + struct cgroup *dsct; + + lockdep_assert_held(&cgroup_mutex); + + cgroup_for_each_live_descendant_pre(dsct, css, cgrp) + __cgroup_kill(dsct); +} + +ssize_t cgroup_kill_write(struct kernfs_open_file *of, char *buf, size_t nbytes, + loff_t off) +{ + ssize_t ret = 0; + int kill; + struct cgroup *cgrp; + + ret = kstrtoint(strstrip(buf), 0, &kill); + if (ret) + return ret; + + if (kill != 1) + return -ERANGE; + + cgrp = cgroup_kn_lock_live(of->kn, false); + if (!cgrp) + return -ENOENT; + + /* + * Killing is a process directed operation, i.e. the whole thread-group + * is taken down so act like we do for cgroup.procs and only make this + * writable in non-threaded cgroups. + */ + if (cgroup_is_threaded(cgrp)) + ret = -EOPNOTSUPP; + else + cgroup_kill(cgrp); + + cgroup_kn_unlock(of->kn); + + return ret ?: nbytes; +} + static int cgroup_file_open(struct kernfs_open_file *of) { struct cftype *cft = of_cft(of); @@ -4988,6 +5067,11 @@ static struct cftype cgroup_base_files[] = { .seq_show = cgroup_freeze_show, .write = cgroup_freeze_write, }, + { + .name = "cgroup.kill", + .flags = CFTYPE_NOT_ON_ROOT, + .write = cgroup_kill_write, + }, { .name = "cpu.stat", .seq_show = cpu_stat_show, @@ -6227,6 +6311,8 @@ void cgroup_post_fork(struct task_struct *child, struct kernel_clone_args *kargs) __releases(&cgroup_threadgroup_rwsem) __releases(&cgroup_mutex) { + unsigned long cgrp_flags = 0; + bool kill = false; struct cgroup_subsys *ss; struct css_set *cset; int i; @@ -6238,6 +6324,11 @@ void cgroup_post_fork(struct task_struct *child, /* init tasks are special, only link regular threads */ if (likely(child->pid)) { + if (kargs->cgrp) + cgrp_flags = kargs->cgrp->flags; + else + cgrp_flags = cset->dfl_cgrp->flags; + WARN_ON_ONCE(!list_empty(&child->cg_list)); cset->nr_tasks++; css_set_move_task(child, NULL, cset, false); @@ -6246,23 +6337,32 @@ void cgroup_post_fork(struct task_struct *child, cset = NULL; } - /* - * If the cgroup has to be frozen, the new task has too. Let's set - * the JOBCTL_TRAP_FREEZE jobctl bit to get the task into the - * frozen state. - */ - if (unlikely(cgroup_task_freeze(child))) { - spin_lock(&child->sighand->siglock); - WARN_ON_ONCE(child->frozen); - child->jobctl |= JOBCTL_TRAP_FREEZE; - spin_unlock(&child->sighand->siglock); + if (!(child->flags & PF_KTHREAD)) { + if (unlikely(test_bit(CGRP_FREEZE, &cgrp_flags))) { + /* + * If the cgroup has to be frozen, the new task has + * too. Let's set the JOBCTL_TRAP_FREEZE jobctl bit to + * get the task into the frozen state. + */ + spin_lock(&child->sighand->siglock); + WARN_ON_ONCE(child->frozen); + child->jobctl |= JOBCTL_TRAP_FREEZE; + spin_unlock(&child->sighand->siglock); + + /* + * Calling cgroup_update_frozen() isn't required here, + * because it will be called anyway a bit later from + * do_freezer_trap(). So we avoid cgroup's transient + * switch from the frozen state and back. + */ + } /* - * Calling cgroup_update_frozen() isn't required here, - * because it will be called anyway a bit later from - * do_freezer_trap(). So we avoid cgroup's transient switch - * from the frozen state and back. + * If the cgroup is to be killed notice it now and take the + * child down right after we finished preparing it for + * userspace. */ + kill = test_bit(CGRP_KILL, &cgrp_flags); } spin_unlock_irq(&css_set_lock); @@ -6285,6 +6385,10 @@ void cgroup_post_fork(struct task_struct *child, put_css_set(rcset); } + /* Cgroup has to be killed so take down child immediately. */ + if (unlikely(kill)) + do_send_sig_info(SIGKILL, SEND_SIG_NOINFO, child, PIDTYPE_TGID); + cgroup_css_set_put_fork(kargs); } diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c index 01966adceced1a14fbe2c675fb5246edc2643a94..b7a936e5d05bab20ae09e414783ec3db2fdc9a0e 100644 --- a/kernel/cgroup/cpuset.c +++ b/kernel/cgroup/cpuset.c @@ -1489,10 +1489,15 @@ static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs, struct cpuset *sibling; struct cgroup_subsys_state *pos_css; + percpu_rwsem_assert_held(&cpuset_rwsem); + /* * Check all its siblings and call update_cpumasks_hier() * if their use_parent_ecpus flag is set in order for them * to use the right effective_cpus value. + * + * The update_cpumasks_hier() function may sleep. So we have to + * release the RCU read lock before calling it. */ rcu_read_lock(); cpuset_for_each_child(sibling, pos_css, parent) { @@ -1500,8 +1505,13 @@ static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs, continue; if (!sibling->use_parent_ecpus) continue; + if (!css_tryget_online(&sibling->css)) + continue; + rcu_read_unlock(); update_cpumasks_hier(sibling, tmp); + rcu_read_lock(); + css_put(&sibling->css); } rcu_read_unlock(); } @@ -1574,8 +1584,7 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs, * Make sure that subparts_cpus is a subset of cpus_allowed. */ if (cs->nr_subparts_cpus) { - cpumask_andnot(cs->subparts_cpus, cs->subparts_cpus, - cs->cpus_allowed); + cpumask_and(cs->subparts_cpus, cs->subparts_cpus, cs->cpus_allowed); cs->nr_subparts_cpus = cpumask_weight(cs->subparts_cpus); } spin_unlock_irq(&callback_lock); diff --git a/kernel/crash_core.c b/kernel/crash_core.c index 88d93da963e8ea92379d13224b6aaf394eb2bc37..0865f816b57a34054ef0c258fa5261004dad11b5 100644 --- a/kernel/crash_core.c +++ b/kernel/crash_core.c @@ -321,6 +321,10 @@ int __init parse_crashkernel_low(char *cmdline, */ #ifdef CONFIG_ARCH_WANT_RESERVE_CRASH_KERNEL +bool crash_low_mem_page_map __initdata; +static bool crash_high_mem_reserved __initdata; +static struct resource crashk_res_high; + static int __init reserve_crashkernel_low(void) { #ifdef CONFIG_64BIT @@ -374,6 +378,68 @@ static int __init reserve_crashkernel_low(void) return 0; } +void __init reserve_crashkernel_high(void) +{ + unsigned long long crash_base, crash_size; + char *cmdline = boot_command_line; + int ret; + + if (!IS_ENABLED(CONFIG_KEXEC_CORE)) + return; + + /* crashkernel=X[@offset] */ + ret = parse_crashkernel(cmdline, memblock_phys_mem_size(), + &crash_size, &crash_base); + if (ret || !crash_size) { + ret = parse_crashkernel_high(cmdline, 0, &crash_size, &crash_base); + if (ret || !crash_size) + return; + } else if (!crash_base) { + crash_low_mem_page_map = true; + } + + crash_size = PAGE_ALIGN(crash_size); + + /* + * For the case crashkernel=X, may fall back to reserve memory above + * 4G, make reservations here in advance. It will be released later if + * the region is successfully reserved under 4G. + */ + if (!crash_base) { + crash_base = memblock_phys_alloc_range(crash_size, CRASH_ALIGN, + crash_base, CRASH_ADDR_HIGH_MAX); + if (!crash_base) + return; + + crash_high_mem_reserved = true; + } + + /* Mark the memory range that requires page-level mappings */ + crashk_res.start = crash_base; + crashk_res.end = crash_base + crash_size - 1; +} + +static void __init hand_over_reserved_high_mem(void) +{ + crashk_res_high.start = crashk_res.start; + crashk_res_high.end = crashk_res.end; + + crashk_res.start = 0; + crashk_res.end = 0; +} + +static void __init take_reserved_high_mem(unsigned long long *crash_base, + unsigned long long *crash_size) +{ + *crash_base = crashk_res_high.start; + *crash_size = resource_size(&crashk_res_high); +} + +static void __init free_reserved_high_mem(void) +{ + memblock_free(crashk_res_high.start, resource_size(&crashk_res_high)); +} + /* * reserve_crashkernel() - reserves memory for crash kernel * @@ -389,6 +455,8 @@ void __init reserve_crashkernel(void) total_mem = memblock_phys_mem_size(); + hand_over_reserved_high_mem(); + /* crashkernel=XM */ ret = parse_crashkernel(boot_command_line, total_mem, &crash_size, &crash_base); if (ret != 0 || crash_size <= 0) { @@ -398,6 +466,11 @@ void __init reserve_crashkernel(void) if (ret != 0 || crash_size <= 0) return; high = true; + + if (crash_high_mem_reserved) { + take_reserved_high_mem(&crash_base, &crash_size); + goto reserve_low; + } } /* 0 means: find the address automatically */ @@ -411,10 +484,15 @@ void __init reserve_crashkernel(void) * So try low memory first and fall back to high memory * unless "crashkernel=size[KMG],high" is specified. */ - if (!high) + if (!high) { crash_base = memblock_find_in_range(CRASH_ALIGN, CRASH_ADDR_LOW_MAX, crash_size, CRASH_ALIGN); + if (!crash_base && crash_high_mem_reserved) { + take_reserved_high_mem(&crash_base, &crash_size); + goto reserve_low; + } + } if (!crash_base) crash_base = memblock_find_in_range(CRASH_ALIGN, CRASH_ADDR_HIGH_MAX, crash_size, @@ -447,9 +525,18 @@ void __init reserve_crashkernel(void) return; } - if (crash_base >= CRASH_ADDR_LOW_MAX && reserve_crashkernel_low()) { - memblock_free(crash_base, crash_size); - return; + if ((crash_base >= CRASH_ADDR_LOW_MAX) || high) { +reserve_low: + if (reserve_crashkernel_low()) { + memblock_free(crash_base, crash_size); + return; + } + } else if (crash_high_mem_reserved) { + /* + * The crash memory is successfully allocated under 4G, and the + * previously reserved high memory is no longer required. + */ + free_reserved_high_mem(); } pr_info("Reserving %ldMB of memory at %ldMB for crashkernel (System RAM: %ldMB)\n", diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c index be5b6b97adbfe1f12f95de61e510dc902fa52d19..363f781b56cad6b5cc5796c08aebfa7f3b016c55 100644 --- a/kernel/debug/debug_core.c +++ b/kernel/debug/debug_core.c @@ -56,6 +56,7 @@ #include #include #include +#include #include #include @@ -762,6 +763,29 @@ static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs, continue; kgdb_connected = 0; } else { + /* + * This is a brutal way to interfere with the debugger + * and prevent gdb being used to poke at kernel memory. + * This could cause trouble if lockdown is applied when + * there is already an active gdb session. For now the + * answer is simply "don't do that". Typically lockdown + * *will* be applied before the debug core gets started + * so only developers using kgdb for fairly advanced + * early kernel debug can be biten by this. Hopefully + * they are sophisticated enough to take care of + * themselves, especially with help from the lockdown + * message printed on the console! + */ + if (security_locked_down(LOCKDOWN_DBG_WRITE_KERNEL)) { + if (IS_ENABLED(CONFIG_KGDB_KDB)) { + /* Switch back to kdb if possible... */ + dbg_kdb_mode = 1; + continue; + } else { + /* ... otherwise just bail */ + break; + } + } error = gdb_serial_stub(ks); } diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c index 930ac1b25ec7c4377e3cfcdc63c7a558826bf60e..4e09fab52faf50beed7f4af7de696d4689c16d40 100644 --- a/kernel/debug/kdb/kdb_main.c +++ b/kernel/debug/kdb/kdb_main.c @@ -45,6 +45,7 @@ #include #include #include +#include #include "kdb_private.h" #undef MODULE_PARAM_PREFIX @@ -197,10 +198,62 @@ struct task_struct *kdb_curr_task(int cpu) } /* - * Check whether the flags of the current command and the permissions - * of the kdb console has allow a command to be run. + * Update the permissions flags (kdb_cmd_enabled) to match the + * current lockdown state. + * + * Within this function the calls to security_locked_down() are "lazy". We + * avoid calling them if the current value of kdb_cmd_enabled already excludes + * flags that might be subject to lockdown. Additionally we deliberately check + * the lockdown flags independently (even though read lockdown implies write + * lockdown) since that results in both simpler code and clearer messages to + * the user on first-time debugger entry. + * + * The permission masks during a read+write lockdown permits the following + * flags: INSPECT, SIGNAL, REBOOT (and ALWAYS_SAFE). + * + * The INSPECT commands are not blocked during lockdown because they are + * not arbitrary memory reads. INSPECT covers the backtrace family (sometimes + * forcing them to have no arguments) and lsmod. These commands do expose + * some kernel state but do not allow the developer seated at the console to + * choose what state is reported. SIGNAL and REBOOT should not be controversial, + * given these are allowed for root during lockdown already. + */ +static void kdb_check_for_lockdown(void) +{ + const int write_flags = KDB_ENABLE_MEM_WRITE | + KDB_ENABLE_REG_WRITE | + KDB_ENABLE_FLOW_CTRL; + const int read_flags = KDB_ENABLE_MEM_READ | + KDB_ENABLE_REG_READ; + + bool need_to_lockdown_write = false; + bool need_to_lockdown_read = false; + + if (kdb_cmd_enabled & (KDB_ENABLE_ALL | write_flags)) + need_to_lockdown_write = + security_locked_down(LOCKDOWN_DBG_WRITE_KERNEL); + + if (kdb_cmd_enabled & (KDB_ENABLE_ALL | read_flags)) + need_to_lockdown_read = + security_locked_down(LOCKDOWN_DBG_READ_KERNEL); + + /* De-compose KDB_ENABLE_ALL if required */ + if (need_to_lockdown_write || need_to_lockdown_read) + if (kdb_cmd_enabled & KDB_ENABLE_ALL) + kdb_cmd_enabled = KDB_ENABLE_MASK & ~KDB_ENABLE_ALL; + + if (need_to_lockdown_write) + kdb_cmd_enabled &= ~write_flags; + + if (need_to_lockdown_read) + kdb_cmd_enabled &= ~read_flags; +} + +/* + * Check whether the flags of the current command, the permissions of the kdb + * console and the lockdown state allow a command to be run. */ -static inline bool kdb_check_flags(kdb_cmdflags_t flags, int permissions, +static bool kdb_check_flags(kdb_cmdflags_t flags, int permissions, bool no_args) { /* permissions comes from userspace so needs massaging slightly */ @@ -1194,6 +1247,9 @@ static int kdb_local(kdb_reason_t reason, int error, struct pt_regs *regs, kdb_curr_task(raw_smp_processor_id()); KDB_DEBUG_STATE("kdb_local 1", reason); + + kdb_check_for_lockdown(); + kdb_go_count = 0; if (reason == KDB_REASON_DEBUG) { /* special case below */ diff --git a/kernel/debug/kdb/kdb_support.c b/kernel/debug/kdb/kdb_support.c index 6226502ce04991d8ba1642ca19c97f37ad5df35f..13417f0045f028ed04b4348460b415ab31dd94c1 100644 --- a/kernel/debug/kdb/kdb_support.c +++ b/kernel/debug/kdb/kdb_support.c @@ -350,7 +350,7 @@ int kdb_getarea_size(void *res, unsigned long addr, size_t size) */ int kdb_putarea_size(unsigned long addr, void *res, size_t size) { - int ret = copy_from_kernel_nofault((char *)addr, (char *)res, size); + int ret = copy_to_kernel_nofault((char *)addr, (char *)res, size); if (ret) { if (!KDB_STATE(SUPPRESS)) { kdb_printf("kdb_putarea: Bad address 0x%lx\n", addr); diff --git a/kernel/dma/debug.c b/kernel/dma/debug.c index 10d07ace46c15d22873ee44de06b703319123987..f8ae5467986511a5c406d192b7113c4624bfc25a 100644 --- a/kernel/dma/debug.c +++ b/kernel/dma/debug.c @@ -928,7 +928,7 @@ static __init int dma_debug_cmdline(char *str) global_disable = true; } - return 0; + return 1; } static __init int dma_debug_entries_cmdline(char *str) @@ -937,7 +937,7 @@ static __init int dma_debug_entries_cmdline(char *str) return -EINVAL; if (!get_option(&str, &nr_prealloc_entries)) nr_prealloc_entries = PREALLOC_DMA_DEBUG_ENTRIES; - return 0; + return 1; } __setup("dma_debug=", dma_debug_cmdline); diff --git a/kernel/dma/direct.h b/kernel/dma/direct.h index b986155787376896a36990720364c04e7084bc2f..c9d380318dd8947db5a3eb2cf36ae4b3e7dd6cc6 100644 --- a/kernel/dma/direct.h +++ b/kernel/dma/direct.h @@ -114,6 +114,7 @@ static inline void dma_direct_unmap_page(struct device *dev, dma_addr_t addr, dma_direct_sync_single_for_cpu(dev, addr, size, dir); if (unlikely(is_swiotlb_buffer(phys))) - swiotlb_tbl_unmap_single(dev, phys, size, size, dir, attrs); + swiotlb_tbl_unmap_single(dev, phys, size, size, dir, + attrs | DMA_ATTR_SKIP_CPU_SYNC); } #endif /* _KERNEL_DMA_DIRECT_H */ diff --git a/kernel/dma/pool.c b/kernel/dma/pool.c index d4637f72239b404f4fbef822e74cb4c441062034..b9082b572e0f822bb752a6924efdf1e11496b079 100644 --- a/kernel/dma/pool.c +++ b/kernel/dma/pool.c @@ -206,7 +206,7 @@ static int __init dma_atomic_pool_init(void) GFP_KERNEL); if (!atomic_pool_kernel) ret = -ENOMEM; - if (IS_ENABLED(CONFIG_ZONE_DMA)) { + if (has_managed_dma()) { atomic_pool_dma = __dma_atomic_pool_init(atomic_pool_size, GFP_KERNEL | GFP_DMA); if (!atomic_pool_dma) @@ -229,7 +229,7 @@ static inline struct gen_pool *dma_guess_pool(struct gen_pool *prev, gfp_t gfp) if (prev == NULL) { if (IS_ENABLED(CONFIG_ZONE_DMA32) && (gfp & GFP_DMA32)) return atomic_pool_dma32; - if (IS_ENABLED(CONFIG_ZONE_DMA) && (gfp & GFP_DMA)) + if (atomic_pool_dma && (gfp & GFP_DMA)) return atomic_pool_dma; return atomic_pool_kernel; } diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index 0ed0e1f215c75ea11c17cc3ecb66c9ad2fd57e57..274587a57717f16ee686892078ef8549a0805097 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c @@ -597,9 +597,14 @@ phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr, io_tlb_orig_addr[index + i] = slot_addr(orig_addr, i); tlb_addr = slot_addr(io_tlb_start, index) + offset; - if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && - (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)) - swiotlb_bounce(orig_addr, tlb_addr, mapping_size, DMA_TO_DEVICE); + /* + * When dir == DMA_FROM_DEVICE we could omit the copy from the orig + * to the tlb buffer, if we knew for sure the device will + * overwirte the entire current content. But we don't. Thus + * unconditional bounce may prevent leaking swiotlb content (i.e. + * kernel memory) to user-space. + */ + swiotlb_bounce(orig_addr, tlb_addr, mapping_size, DMA_TO_DEVICE); return tlb_addr; } diff --git a/kernel/entry/common.c b/kernel/entry/common.c index 18a29ca01bfe554d8ea9064f2f720a4242b32357..cea3957ebdbcd8f75f98ed819299788f500b48bd 100644 --- a/kernel/entry/common.c +++ b/kernel/entry/common.c @@ -160,10 +160,6 @@ static unsigned long exit_to_user_mode_loop(struct pt_regs *regs, if (ti_work & _TIF_SIGPENDING) arch_do_signal(regs); -#ifdef CONFIG_QOS_SCHED - sched_qos_offline_wait(); -#endif - if (ti_work & _TIF_NOTIFY_RESUME) { tracehook_notify_resume(regs); rseq_handle_notify_resume(NULL, regs); @@ -198,8 +194,7 @@ static void exit_to_user_mode_prepare(struct pt_regs *regs) /* Flush pending rcuog wakeup before the last need_resched() check */ rcu_nocb_flush_deferred_wakeup(); - if (unlikely((ti_work & EXIT_TO_USER_MODE_WORK) || - sched_qos_cpu_overload())) + if (unlikely(ti_work & EXIT_TO_USER_MODE_WORK)) ti_work = exit_to_user_mode_loop(regs, ti_work); arch_exit_to_user_mode_prepare(regs, ti_work); diff --git a/kernel/events/core.c b/kernel/events/core.c index 639b99a318db1fe02767f2efc883f4847226babb..4bd9dd6c3b72cc287ca4ec819e4e5b5ee8f0fabc 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -266,7 +266,7 @@ static void event_function_call(struct perf_event *event, event_f func, void *da if (!event->parent) { /* * If this is a !child event, we must hold ctx::mutex to - * stabilize the the event->ctx relation. See + * stabilize the event->ctx relation. See * perf_event_ctx_lock(). */ lockdep_assert_held(&ctx->mutex); @@ -801,7 +801,7 @@ static DEFINE_PER_CPU(struct list_head, cgrp_cpuctx_list); */ static void perf_cgroup_switch(struct task_struct *task, int mode) { - struct perf_cpu_context *cpuctx; + struct perf_cpu_context *cpuctx, *tmp; struct list_head *list; unsigned long flags; @@ -812,7 +812,7 @@ static void perf_cgroup_switch(struct task_struct *task, int mode) local_irq_save(flags); list = this_cpu_ptr(&cgrp_cpuctx_list); - list_for_each_entry(cpuctx, list, cgrp_cpuctx_entry) { + list_for_each_entry_safe(cpuctx, tmp, list, cgrp_cpuctx_entry) { WARN_ON_ONCE(cpuctx->ctx.nr_cgroups == 0); perf_ctx_lock(cpuctx, cpuctx->task_ctx); @@ -1300,7 +1300,7 @@ static void put_ctx(struct perf_event_context *ctx) * life-time rules separate them. That is an exiting task cannot fork, and a * spawning task cannot (yet) exit. * - * But remember that that these are parent<->child context relations, and + * But remember that these are parent<->child context relations, and * migration does not affect children, therefore these two orderings should not * interact. * @@ -1439,7 +1439,7 @@ static u64 primary_event_id(struct perf_event *event) /* * Get the perf_event_context for a task and lock it. * - * This has to cope with with the fact that until it is locked, + * This has to cope with the fact that until it is locked, * the context could get moved to another task. */ static struct perf_event_context * @@ -2492,7 +2492,7 @@ static void perf_set_shadow_time(struct perf_event *event, * But this is a bit hairy. * * So instead, we have an explicit cgroup call to remain - * within the time time source all along. We believe it + * within the time source all along. We believe it * is cleaner and simpler to understand. */ if (is_cgroup_event(event)) @@ -5830,6 +5830,8 @@ static void ring_buffer_attach(struct perf_event *event, struct perf_buffer *old_rb = NULL; unsigned long flags; + WARN_ON_ONCE(event->parent); + if (event->rb) { /* * Should be impossible, we set this when removing @@ -5887,6 +5889,9 @@ static void ring_buffer_wakeup(struct perf_event *event) { struct perf_buffer *rb; + if (event->parent) + event = event->parent; + rcu_read_lock(); rb = rcu_dereference(event->rb); if (rb) { @@ -5900,6 +5905,9 @@ struct perf_buffer *ring_buffer_get(struct perf_event *event) { struct perf_buffer *rb; + if (event->parent) + event = event->parent; + rcu_read_lock(); rb = rcu_dereference(event->rb); if (rb) { @@ -6395,18 +6403,25 @@ static void perf_pending_event(struct irq_work *entry) * Later on, we might change it to a list if there is * another virtualization implementation supporting the callbacks. */ -struct perf_guest_info_callbacks *perf_guest_cbs; +struct perf_guest_info_callbacks __rcu *perf_guest_cbs; int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs) { - perf_guest_cbs = cbs; + if (WARN_ON_ONCE(rcu_access_pointer(perf_guest_cbs))) + return -EBUSY; + + rcu_assign_pointer(perf_guest_cbs, cbs); return 0; } EXPORT_SYMBOL_GPL(perf_register_guest_info_callbacks); int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs) { - perf_guest_cbs = NULL; + if (WARN_ON_ONCE(rcu_access_pointer(perf_guest_cbs) != cbs)) + return -EINVAL; + + rcu_assign_pointer(perf_guest_cbs, NULL); + synchronize_rcu(); return 0; } EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks); @@ -6565,7 +6580,7 @@ static unsigned long perf_prepare_sample_aux(struct perf_event *event, if (WARN_ON_ONCE(READ_ONCE(sampler->oncpu) != smp_processor_id())) goto out; - rb = ring_buffer_get(sampler->parent ? sampler->parent : sampler); + rb = ring_buffer_get(sampler); if (!rb) goto out; @@ -6631,7 +6646,7 @@ static void perf_aux_sample_output(struct perf_event *event, if (WARN_ON_ONCE(!sampler || !data->aux_size)) return; - rb = ring_buffer_get(sampler->parent ? sampler->parent : sampler); + rb = ring_buffer_get(sampler); if (!rb) return; @@ -10204,8 +10219,11 @@ perf_event_parse_addr_filter(struct perf_event *event, char *fstr, } /* ready to consume more filters */ + kfree(filename); + filename = NULL; state = IF_STATE_ACTION; filter = NULL; + kernel = 0; } } @@ -11882,6 +11900,9 @@ SYSCALL_DEFINE5(perf_event_open, * Do not allow to attach to a group in a different task * or CPU context. If we're moving SW events, we'll fix * this up later, so allow that. + * + * Racy, not holding group_leader->ctx->mutex, see comment with + * perf_event_ctx_lock(). */ if (!move_group && group_leader->ctx != ctx) goto err_context; @@ -11949,6 +11970,7 @@ SYSCALL_DEFINE5(perf_event_open, } else { perf_event_ctx_unlock(group_leader, gctx); move_group = 0; + goto not_move_group; } } @@ -11965,7 +11987,17 @@ SYSCALL_DEFINE5(perf_event_open, } } else { mutex_lock(&ctx->mutex); + + /* + * Now that we hold ctx->lock, (re)validate group_leader->ctx == ctx, + * see the group_leader && !move_group test earlier. + */ + if (group_leader && group_leader->ctx != ctx) { + err = -EINVAL; + goto err_locked; + } } +not_move_group: if (ctx->task == TASK_TOMBSTONE) { err = -ESRCH; diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 00b0358739ab3b6b66b780e20f3374bbef4d4d34..e1bbb3b92921d8eb084d2844cd2ae85ebaf5e14f 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -1735,7 +1735,7 @@ void uprobe_free_utask(struct task_struct *t) } /* - * Allocate a uprobe_task object for the task if if necessary. + * Allocate a uprobe_task object for the task if necessary. * Called when the thread hits a breakpoint. * * Returns: diff --git a/kernel/fork.c b/kernel/fork.c index bf27ee90ad2392f32dc7df3d9b87b29caacd0695..0fb86b65ae60ca5e1fc9fdc757fa4151ec98d7e3 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -2233,6 +2233,17 @@ static __latent_entropy struct task_struct *copy_process( if (retval) goto bad_fork_put_pidfd; + /* + * Now that the cgroups are pinned, re-clone the parent cgroup and put + * the new task on the correct runqueue. All this *before* the task + * becomes visible. + * + * This isn't part of ->can_fork() because while the re-cloning is + * cgroup specific, it unconditionally needs to place the task on a + * runqueue. + */ + sched_cgroup_fork(p, args); + /* * From this point on we must avoid any synchronous user-space * communication until we take the tasklist-lock. In particular, we do @@ -2288,10 +2299,6 @@ static __latent_entropy struct task_struct *copy_process( goto bad_fork_cancel_cgroup; } - /* past the last point of failure */ - if (pidfile) - fd_install(pidfd, pidfile); - init_task_pid_links(p); if (likely(p->pid)) { ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace); @@ -2340,8 +2347,11 @@ static __latent_entropy struct task_struct *copy_process( syscall_tracepoint_update(p); write_unlock_irq(&tasklist_lock); + if (pidfile) + fd_install(pidfd, pidfile); + proc_fork_connector(p); - sched_post_fork(p, args); + sched_post_fork(p); cgroup_post_fork(p, args); perf_event_fork(p); diff --git a/kernel/irq/affinity.c b/kernel/irq/affinity.c index 4d89ad4fae3bb15e5c500a6ccc2f19aa9d63367b..5fb78addff51b4fec9ef0ed530b9107b5d7099f3 100644 --- a/kernel/irq/affinity.c +++ b/kernel/irq/affinity.c @@ -269,8 +269,9 @@ static int __irq_build_affinity_masks(unsigned int startvec, */ if (numvecs <= nodes) { for_each_node_mask(n, nodemsk) { - cpumask_or(&masks[curvec].mask, &masks[curvec].mask, - node_to_cpumask[n]); + /* Ensure that only CPUs which are in both masks are set */ + cpumask_and(nmsk, cpu_mask, node_to_cpumask[n]); + cpumask_or(&masks[curvec].mask, &masks[curvec].mask, nmsk); if (++curvec == last_affv) curvec = firstvec; } diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index d3033e1f9d87e1c82b28474bc18f1f7ed41a1ced..8d3d49c0483eace0c664e223f9dce11a2999ca7c 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -223,11 +223,16 @@ int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask, { struct irq_desc *desc = irq_data_to_desc(data); struct irq_chip *chip = irq_data_get_irq_chip(data); + const struct cpumask *prog_mask; int ret; + static DEFINE_RAW_SPINLOCK(tmp_mask_lock); + static struct cpumask tmp_mask; + if (!chip || !chip->irq_set_affinity) return -EINVAL; + raw_spin_lock(&tmp_mask_lock); /* * If this is a managed interrupt and housekeeping is enabled on * it check whether the requested affinity mask intersects with @@ -249,24 +254,34 @@ int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask, */ if (irqd_affinity_is_managed(data) && housekeeping_enabled(HK_FLAG_MANAGED_IRQ)) { - const struct cpumask *hk_mask, *prog_mask; - - static DEFINE_RAW_SPINLOCK(tmp_mask_lock); - static struct cpumask tmp_mask; + const struct cpumask *hk_mask; hk_mask = housekeeping_cpumask(HK_FLAG_MANAGED_IRQ); - raw_spin_lock(&tmp_mask_lock); cpumask_and(&tmp_mask, mask, hk_mask); if (!cpumask_intersects(&tmp_mask, cpu_online_mask)) prog_mask = mask; else prog_mask = &tmp_mask; - ret = chip->irq_set_affinity(data, prog_mask, force); - raw_spin_unlock(&tmp_mask_lock); } else { - ret = chip->irq_set_affinity(data, mask, force); + prog_mask = mask; } + + /* + * Make sure we only provide online CPUs to the irqchip, + * unless we are being asked to force the affinity (in which + * case we do as we are told). + */ + cpumask_and(&tmp_mask, prog_mask, cpu_online_mask); + if (!force && !cpumask_empty(&tmp_mask)) + ret = chip->irq_set_affinity(data, &tmp_mask, force); + else if (force) + ret = chip->irq_set_affinity(data, mask, force); + else + ret = -EINVAL; + + raw_spin_unlock(&tmp_mask_lock); + switch (ret) { case IRQ_SET_MASK_OK: case IRQ_SET_MASK_OK_DONE: diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c index d217acc9f71b6a615e5add7d1dbaa876eb180371..77722ebdf6f5fd7f8fbfd37574fd685cee1427ea 100644 --- a/kernel/irq/msi.c +++ b/kernel/irq/msi.c @@ -456,6 +456,21 @@ int __msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev, irqd_clr_can_reserve(irq_data); if (domain->flags & IRQ_DOMAIN_MSI_NOMASK_QUIRK) irqd_set_msi_nomask_quirk(irq_data); + + /* + * If the interrupt is managed but no CPU is available to + * service it, shut it down until better times. Note that + * we only do this on the !RESERVE path as x86 (the only + * architecture using this flag) deals with this in a + * different way by using a catch-all vector. + */ + if ((info->flags & MSI_FLAG_ACTIVATE_EARLY) && + irqd_affinity_is_managed(irq_data) && + !cpumask_intersects(irq_data_get_affinity_mask(irq_data), + cpu_online_mask)) { + irqd_set_managed_shutdown(irq_data); + return 0; + } } ret = irq_domain_activate_irq(irq_data, can_reserve); if (ret) diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index 2da8b922278a06da84e0769b2b44d373bfe7e1bf..780a825cee8be202f5de0f8d5a18e1b557413228 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c @@ -31,6 +31,7 @@ #include "state.h" #include "transition.h" #elif defined(CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY) +#include #include #endif @@ -57,6 +58,7 @@ static struct kobject *klp_root_kobj; struct patch_data { struct klp_patch *patch; atomic_t cpu_count; + bool rollback; }; #endif @@ -953,19 +955,6 @@ static int klp_init_func(struct klp_object *obj, struct klp_func *func) int ret; #endif - if (!func->old_name) - return -EINVAL; - - /* - * NOPs get the address later. The patched module must be loaded, - * see klp_init_object_loaded(). - */ - if (!func->new_func && !func->nop) - return -EINVAL; - - if (strlen(func->old_name) >= KSYM_NAME_LEN) - return -EINVAL; - INIT_LIST_HEAD(&func->stack_node); func->patched = false; @@ -1039,6 +1028,7 @@ static int klp_init_object_loaded(struct klp_patch *patch, ret = klp_apply_object_relocs(patch, obj); if (ret) { module_enable_ro(patch->mod, true); + pr_err("apply object relocations failed, ret=%d\n", ret); return ret; } } @@ -1059,6 +1049,19 @@ static int klp_init_object_loaded(struct klp_patch *patch, return -ENOENT; } +#ifdef PPC64_ELF_ABI_v1 + /* + * PPC64 big endian binary format is 'elfv1' defaultly, actual + * symbol name of old function need a prefix '.' (related + * feature 'function descriptor'), otherwise size found by + * 'kallsyms_lookup_size_offset' may be abnormal. + */ + if (func->old_name[0] != '.') { + pr_warn("old_name '%s' may miss the prefix '.', old_size=%lu\n", + func->old_name, func->old_size); + } +#endif + if (func->nop) func->new_func = func->old_func; @@ -1080,8 +1083,28 @@ static int klp_init_object(struct klp_patch *patch, struct klp_object *obj) int ret; const char *name; - if (klp_is_module(obj) && strlen(obj->name) >= MODULE_NAME_LEN) + if (klp_is_module(obj) && strnlen(obj->name, MODULE_NAME_LEN) >= MODULE_NAME_LEN) { + pr_err("obj name is too long\n"); return -EINVAL; + } + klp_for_each_func(obj, func) { + if (!func->old_name) { + pr_err("old name is invalid\n"); + return -EINVAL; + } + /* + * NOPs get the address later. The patched module must be loaded, + * see klp_init_object_loaded(). + */ + if (!func->new_func && !func->nop) { + pr_err("new_func is invalid\n"); + return -EINVAL; + } + if (strlen(func->old_name) >= KSYM_NAME_LEN) { + pr_err("function old name is too long\n"); + return -EINVAL; + } + } obj->patched = false; obj->mod = NULL; @@ -1197,6 +1220,7 @@ static int klp_init_patch(struct klp_patch *patch) ret = jump_label_register(patch->mod); if (ret) { module_enable_ro(patch->mod, true); + pr_err("register jump label failed, ret=%d\n", ret); return ret; } module_enable_ro(patch->mod, true); @@ -1251,11 +1275,16 @@ int __weak klp_check_calltrace(struct klp_patch *patch, int enable) static LIST_HEAD(klp_func_list); +/* + * The caller must ensure that the klp_mutex lock is held or is in the rcu read + * critical area. + */ struct klp_func_node *klp_find_func_node(const void *old_func) { struct klp_func_node *func_node; - list_for_each_entry(func_node, &klp_func_list, node) { + list_for_each_entry_rcu(func_node, &klp_func_list, node, + lockdep_is_held(&klp_mutex)) { if (func_node->old_func == old_func) return func_node; } @@ -1273,6 +1302,37 @@ void klp_del_func_node(struct klp_func_node *func_node) list_del_rcu(&func_node->node); } +/* + * Called from the breakpoint exception handler function. + */ +void *klp_get_brk_func(void *addr) +{ + struct klp_func_node *func_node; + void *brk_func = NULL; + + if (!addr) + return NULL; + + rcu_read_lock(); + + func_node = klp_find_func_node(addr); + if (!func_node) + goto unlock; + + /* + * Corresponds to smp_wmb() in {add, remove}_breakpoint(). If the + * current breakpoint exception belongs to us, we have observed the + * breakpoint instruction, so brk_func must be observed. + */ + smp_rmb(); + + brk_func = func_node->brk_func; + +unlock: + rcu_read_unlock(); + return brk_func; +} + /* * This function is called from stop_machine() context. */ @@ -1343,6 +1403,34 @@ long __weak arch_klp_save_old_code(struct arch_klp_data *arch_data, void *old_fu return -ENOSYS; } +void __weak arch_klp_init(void) +{ +} + +int __weak arch_klp_check_breakpoint(struct arch_klp_data *arch_data, void *old_func) +{ + return 0; +} + +int __weak arch_klp_add_breakpoint(struct arch_klp_data *arch_data, void *old_func) +{ + return -ENOTSUPP; +} + +void __weak arch_klp_remove_breakpoint(struct arch_klp_data *arch_data, void *old_func) +{ +} + +void __weak arch_klp_set_brk_func(struct klp_func_node *func_node, void *new_func) +{ + func_node->brk_func = new_func; +} + +int __weak arch_klp_module_check_calltrace(void *data) +{ + return 0; +} + static struct klp_func_node *func_node_alloc(struct klp_func *func) { long ret; @@ -1381,11 +1469,24 @@ static void func_node_free(struct klp_func *func) func->func_node = NULL; if (list_empty(&func_node->func_stack)) { klp_del_func_node(func_node); + synchronize_rcu(); arch_klp_mem_free(func_node); } } } +static void klp_mem_recycle(struct klp_patch *patch) +{ + struct klp_object *obj; + struct klp_func *func; + + klp_for_each_object(patch, obj) { + klp_for_each_func(obj, func) { + func_node_free(func); + } + } +} + static int klp_mem_prepare(struct klp_patch *patch) { struct klp_object *obj; @@ -1395,6 +1496,7 @@ static int klp_mem_prepare(struct klp_patch *patch) klp_for_each_func(obj, func) { func->func_node = func_node_alloc(func); if (func->func_node == NULL) { + klp_mem_recycle(patch); pr_err("alloc func_node failed\n"); return -ENOMEM; } @@ -1403,16 +1505,108 @@ static int klp_mem_prepare(struct klp_patch *patch) return 0; } -static void klp_mem_recycle(struct klp_patch *patch) +static void remove_breakpoint(struct klp_func *func, bool restore) +{ + + struct klp_func_node *func_node = klp_find_func_node(func->old_func); + struct arch_klp_data *arch_data = &func_node->arch_data; + + if (!func_node->brk_func) + return; + + if (restore) + arch_klp_remove_breakpoint(arch_data, func->old_func); + + /* Wait for all breakpoint exception handler functions to exit. */ + synchronize_rcu(); + + /* 'brk_func' cannot be set to NULL before the breakpoint is removed. */ + smp_wmb(); + + arch_klp_set_brk_func(func_node, NULL); +} + +static void __klp_breakpoint_post_process(struct klp_patch *patch, bool restore) { struct klp_object *obj; struct klp_func *func; klp_for_each_object(patch, obj) { klp_for_each_func(obj, func) { - func_node_free(func); + remove_breakpoint(func, restore); + } + } +} + +static int add_breakpoint(struct klp_func *func) +{ + struct klp_func_node *func_node = klp_find_func_node(func->old_func); + struct arch_klp_data *arch_data = &func_node->arch_data; + int ret; + + if (WARN_ON_ONCE(func_node->brk_func)) + return -EINVAL; + + ret = arch_klp_check_breakpoint(arch_data, func->old_func); + if (ret) + return ret; + + arch_klp_set_brk_func(func_node, func->new_func); + + /* + * When entering an exception, we must see 'brk_func' or the kernel + * will not be able to handle the breakpoint exception we are about + * to insert. + */ + smp_wmb(); + + ret = arch_klp_add_breakpoint(arch_data, func->old_func); + if (ret) + arch_klp_set_brk_func(func_node, NULL); + + return ret; +} + +static int klp_add_breakpoint(struct klp_patch *patch) +{ + struct klp_object *obj; + struct klp_func *func; + int ret; + + /* + * Ensure that the module is not uninstalled before the breakpoint is + * removed. After the breakpoint is removed, it can be ensured that the + * new function will not be jumped through the handler function of the + * breakpoint. + */ + if (!try_module_get(patch->mod)) + return -ENODEV; + + arch_klp_code_modify_prepare(); + + klp_for_each_object(patch, obj) { + klp_for_each_func(obj, func) { + ret = add_breakpoint(func); + if (ret) { + __klp_breakpoint_post_process(patch, true); + arch_klp_code_modify_post_process(); + module_put(patch->mod); + return ret; + } } } + + arch_klp_code_modify_post_process(); + + return 0; +} + +static void klp_breakpoint_post_process(struct klp_patch *patch, bool restore) +{ + arch_klp_code_modify_prepare(); + __klp_breakpoint_post_process(patch, restore); + arch_klp_code_modify_post_process(); + module_put(patch->mod); } static int __klp_disable_patch(struct klp_patch *patch) @@ -1585,7 +1779,7 @@ EXPORT_SYMBOL_GPL(klp_enable_patch); /* * This function is called from stop_machine() context. */ -static int enable_patch(struct klp_patch *patch) +static int enable_patch(struct klp_patch *patch, bool rollback) { struct klp_object *obj; int ret; @@ -1593,19 +1787,21 @@ static int enable_patch(struct klp_patch *patch) pr_notice_once("tainting kernel with TAINT_LIVEPATCH\n"); add_taint(TAINT_LIVEPATCH, LOCKDEP_STILL_OK); - if (!try_module_get(patch->mod)) - return -ENODEV; + if (!patch->enabled) { + if (!try_module_get(patch->mod)) + return -ENODEV; - patch->enabled = true; + patch->enabled = true; - pr_notice("enabling patch '%s'\n", patch->mod->name); + pr_notice("enabling patch '%s'\n", patch->mod->name); + } klp_for_each_object(patch, obj) { if (!klp_is_object_loaded(obj)) continue; - ret = klp_patch_object(obj); - if (ret) { + ret = klp_patch_object(obj, rollback); + if (ret && klp_need_rollback(ret, rollback)) { pr_warn("failed to patch object '%s'\n", klp_is_module(obj) ? obj->name : "vmlinux"); goto disable; @@ -1637,7 +1833,7 @@ int klp_try_enable_patch(void *data) atomic_inc(&pd->cpu_count); return ret; } - ret = enable_patch(patch); + ret = enable_patch(patch, pd->rollback); if (ret) { atomic_inc(&pd->cpu_count); return ret; @@ -1653,12 +1849,89 @@ int klp_try_enable_patch(void *data) return ret; } +/* + * When the stop_machine is used to enable the patch, if the patch fails to be + * enabled because the stack check fails, a certain number of retries are + * allowed. The maximum number of retries is KLP_RETRY_COUNT. + * + * Sleeps for KLP_RETRY_INTERVAL milliseconds before each retry to give tasks + * that fail the stack check a chance to run out of the instruction replacement + * area. + */ +#define KLP_RETRY_COUNT 5 +#define KLP_RETRY_INTERVAL 100 + +static bool klp_use_breakpoint(struct klp_patch *patch) +{ + struct klp_object *obj; + struct klp_func *func; + + klp_for_each_object(patch, obj) { + klp_for_each_func(obj, func) { + if (func->force != KLP_STACK_OPTIMIZE) + return false; + } + } + + return true; +} + +static int klp_breakpoint_optimize(struct klp_patch *patch) +{ + int ret; + int i; + int cnt = 0; + + ret = klp_add_breakpoint(patch); + if (ret) { + pr_err("failed to add breakpoints, ret=%d\n", ret); + return ret; + } + + for (i = 0; i < KLP_RETRY_COUNT; i++) { + struct patch_data patch_data = { + .patch = patch, + .cpu_count = ATOMIC_INIT(0), + .rollback = false, + }; + + if (i == KLP_RETRY_COUNT - 1) + patch_data.rollback = true; + + cnt++; + + arch_klp_code_modify_prepare(); + ret = stop_machine(klp_try_enable_patch, &patch_data, + cpu_online_mask); + arch_klp_code_modify_post_process(); + if (!ret || ret != -EAGAIN) + break; + + pr_notice("try again in %d ms.\n", KLP_RETRY_INTERVAL); + + msleep(KLP_RETRY_INTERVAL); + } + pr_notice("patching %s, tried %d times, ret=%d.\n", + ret ? "failed" : "success", cnt, ret); + + /* + * If the patch is enabled successfully, the breakpoint instruction + * has been replaced with the jump instruction. However, if the patch + * fails to be enabled, we need to delete the previously inserted + * breakpoint to restore the instruction at the old function entry. + */ + klp_breakpoint_post_process(patch, !!ret); + + return ret; +} + static int __klp_enable_patch(struct klp_patch *patch) { int ret; struct patch_data patch_data = { .patch = patch, .cpu_count = ATOMIC_INIT(0), + .rollback = true, }; if (WARN_ON(patch->enabled)) @@ -1673,16 +1946,29 @@ static int __klp_enable_patch(struct klp_patch *patch) } #endif - arch_klp_code_modify_prepare(); ret = klp_mem_prepare(patch); - if (ret == 0) - ret = stop_machine(klp_try_enable_patch, &patch_data, cpu_online_mask); - arch_klp_code_modify_post_process(); - if (ret) { - klp_mem_recycle(patch); + if (ret) return ret; + + arch_klp_code_modify_prepare(); + ret = stop_machine(klp_try_enable_patch, &patch_data, + cpu_online_mask); + arch_klp_code_modify_post_process(); + if (!ret) + goto move_patch_to_tail; + if (ret != -EAGAIN) + goto err_out; + + if (!klp_use_breakpoint(patch)) { + pr_debug("breakpoint exception optimization is not used.\n"); + goto err_out; } + ret = klp_breakpoint_optimize(patch); + if (ret) + goto err_out; + +move_patch_to_tail: #ifndef CONFIG_LIVEPATCH_STACK /* move the enabled patch to the list tail */ list_del(&patch->list); @@ -1690,6 +1976,10 @@ static int __klp_enable_patch(struct klp_patch *patch) #endif return 0; + +err_out: + klp_mem_recycle(patch); + return ret; } /** @@ -1706,12 +1996,24 @@ int klp_register_patch(struct klp_patch *patch) int ret; struct klp_object *obj; - if (!patch || !patch->mod || !patch->objs) + if (!patch) { + pr_err("patch invalid\n"); return -EINVAL; + } + if (!patch->mod) { + pr_err("patch->mod invalid\n"); + return -EINVAL; + } + if (!patch->objs) { + pr_err("patch->objs invalid\n"); + return -EINVAL; + } klp_for_each_object_static(patch, obj) { - if (!obj->funcs) + if (!obj->funcs) { + pr_err("obj->funcs invalid\n"); return -EINVAL; + } } if (!is_livepatch_module(patch->mod)) { @@ -1720,8 +2022,10 @@ int klp_register_patch(struct klp_patch *patch) return -EINVAL; } - if (!klp_initialized()) + if (!klp_initialized()) { + pr_err("kernel live patch not available\n"); return -ENODEV; + } mutex_lock(&klp_mutex); @@ -1794,6 +2098,37 @@ int klp_unregister_patch(struct klp_patch *patch) } EXPORT_SYMBOL_GPL(klp_unregister_patch); +/** + * klp_module_delete_safety_check() - safety check in livepatch scenario when delete a module + * @mod: Module to be deleted + * + * Module refcnt ensures that there is no rare case between enable_patch and delete_module: + * 1. safety_check -> try_enable_patch -> try_release_module_ref: + * try_enable_patch would increase module refcnt, which cause try_release_module_ref fails. + * 2. safety_check -> try_release_module_ref -> try_enable_patch: + * after release module ref, try_enable_patch would fail because try_module_get fails. + * So the problem that release resources unsafely when enable livepatch after safety_check is + * passed during module deletion does not exist, complex synchronization protection is not + * required. + + * Return: 0 on success, otherwise error + */ +int klp_module_delete_safety_check(struct module *mod) +{ + int ret; + + if (!mod || !is_livepatch_module(mod)) + return 0; + + ret = stop_machine(arch_klp_module_check_calltrace, (void *)mod, NULL); + if (ret) { + pr_debug("failed to check klp module calltrace: %d\n", ret); + return ret; + } + + return 0; +} + #endif /* #ifdef CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY */ /* * This function unpatches objects from the replaced livepatches. @@ -1998,6 +2333,9 @@ static int __init klp_init(void) if (!klp_root_kobj) goto error_remove; +#ifdef CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY + arch_klp_init(); +#endif return 0; error_remove: diff --git a/kernel/livepatch/core.h b/kernel/livepatch/core.h index 9bcd139eb7d6272ca649dfa4a966a76607db6a4b..911b6452e5be972f85ec3e2e418138d2aa3b6399 100644 --- a/kernel/livepatch/core.h +++ b/kernel/livepatch/core.h @@ -57,4 +57,18 @@ static inline void klp_post_unpatch_callback(struct klp_object *obj) obj->callbacks.post_unpatch_enabled = false; } #endif /* CONFIG_LIVEPATCH_PER_TASK_CONSISTENCY */ + +#ifdef CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY +/* + * In the enable_patch() process, we do not need to roll back the patch + * immediately if the patch fails to enabled. In this way, the function that has + * been successfully patched does not need to be enabled repeatedly during + * retry. However, if it is the last retry (rollback == true) or not because of + * stack check failure (patch_err != -EAGAIN), rollback is required immediately. + */ +static inline bool klp_need_rollback(int patch_err, bool rollback) +{ + return patch_err != -EAGAIN || rollback; +} +#endif /* CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY */ #endif /* _LIVEPATCH_CORE_H */ diff --git a/kernel/livepatch/patch.c b/kernel/livepatch/patch.c index 6515b8e9982941436c4e5d078f50ac5c9109bd4f..bea6c5d0af942e541e63a5d1232c624e2a287dfd 100644 --- a/kernel/livepatch/patch.c +++ b/kernel/livepatch/patch.c @@ -269,10 +269,10 @@ static inline int klp_patch_func(struct klp_func *func) { int ret = 0; + if (func->patched) + return 0; if (WARN_ON(!func->old_func)) return -EINVAL; - if (WARN_ON(func->patched)) - return -EINVAL; if (WARN_ON(!func->func_node)) return -EINVAL; @@ -306,6 +306,27 @@ void klp_unpatch_object(struct klp_object *obj) __klp_unpatch_object(obj, false); } +#ifdef CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY +int klp_patch_object(struct klp_object *obj, bool rollback) +{ + struct klp_func *func; + int ret; + + if (obj->patched) + return 0; + + klp_for_each_func(obj, func) { + ret = klp_patch_func(func); + if (ret && klp_need_rollback(ret, rollback)) { + klp_unpatch_object(obj); + return ret; + } + } + obj->patched = true; + + return 0; +} +#else int klp_patch_object(struct klp_object *obj) { struct klp_func *func; @@ -325,6 +346,7 @@ int klp_patch_object(struct klp_object *obj) return 0; } +#endif static void __klp_unpatch_objects(struct klp_patch *patch, bool nops_only) { diff --git a/kernel/livepatch/patch.h b/kernel/livepatch/patch.h index c9cde47f7e979b789a2ff11a9f3f1971022fc188..9566681660e4e1d49b0bed016ef34ed54f6aff9e 100644 --- a/kernel/livepatch/patch.h +++ b/kernel/livepatch/patch.h @@ -29,7 +29,11 @@ struct klp_ops { struct klp_ops *klp_find_ops(void *old_func); +#ifdef CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY +int klp_patch_object(struct klp_object *obj, bool rollback); +#else int klp_patch_object(struct klp_object *obj); +#endif void klp_unpatch_object(struct klp_object *obj); void klp_unpatch_objects(struct klp_patch *patch); void klp_unpatch_objects_dynamic(struct klp_patch *patch); diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 1f6a2f1226fa948c3bbfdae2b38355795b2dbeaa..b6683cefe19a4950af2a2be057401593d38c8472 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -182,11 +182,9 @@ static DECLARE_BITMAP(list_entries_in_use, MAX_LOCKDEP_ENTRIES); static struct hlist_head lock_keys_hash[KEYHASH_SIZE]; unsigned long nr_lock_classes; unsigned long nr_zapped_classes; -#ifndef CONFIG_DEBUG_LOCKDEP -static -#endif +unsigned long max_lock_class_idx; struct lock_class lock_classes[MAX_LOCKDEP_KEYS]; -static DECLARE_BITMAP(lock_classes_in_use, MAX_LOCKDEP_KEYS); +DECLARE_BITMAP(lock_classes_in_use, MAX_LOCKDEP_KEYS); static inline struct lock_class *hlock_class(struct held_lock *hlock) { @@ -337,7 +335,7 @@ static inline void lock_release_holdtime(struct held_lock *hlock) * elements. These elements are linked together by the lock_entry member in * struct lock_class. */ -LIST_HEAD(all_lock_classes); +static LIST_HEAD(all_lock_classes); static LIST_HEAD(free_lock_classes); /** @@ -1239,6 +1237,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force) struct lockdep_subclass_key *key; struct hlist_head *hash_head; struct lock_class *class; + int idx; DEBUG_LOCKS_WARN_ON(!irqs_disabled()); @@ -1304,6 +1303,9 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force) * of classes. */ list_move_tail(&class->lock_entry, &all_lock_classes); + idx = class - lock_classes; + if (idx > max_lock_class_idx) + max_lock_class_idx = idx; if (verbose(class)) { graph_unlock(); @@ -3387,7 +3389,7 @@ struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i) u16 chain_hlock = chain_hlocks[chain->base + i]; unsigned int class_idx = chain_hlock_class_idx(chain_hlock); - return lock_classes + class_idx - 1; + return lock_classes + class_idx; } /* @@ -3455,7 +3457,7 @@ static void print_chain_keys_chain(struct lock_chain *chain) hlock_id = chain_hlocks[chain->base + i]; chain_key = print_chain_key_iteration(hlock_id, chain_key); - print_lock_name(lock_classes + chain_hlock_class_idx(hlock_id) - 1); + print_lock_name(lock_classes + chain_hlock_class_idx(hlock_id)); printk("\n"); } } @@ -5919,6 +5921,8 @@ static void zap_class(struct pending_free *pf, struct lock_class *class) WRITE_ONCE(class->name, NULL); nr_lock_classes--; __clear_bit(class - lock_classes, lock_classes_in_use); + if (class - lock_classes == max_lock_class_idx) + max_lock_class_idx--; } else { WARN_ONCE(true, "%s() failed for class %s\n", __func__, class->name); @@ -6209,7 +6213,13 @@ void lockdep_reset_lock(struct lockdep_map *lock) lockdep_reset_lock_reg(lock); } -/* Unregister a dynamically allocated key. */ +/* + * Unregister a dynamically allocated key. + * + * Unlike lockdep_register_key(), a search is always done to find a matching + * key irrespective of debug_locks to avoid potential invalid access to freed + * memory in lock_class entry. + */ void lockdep_unregister_key(struct lock_class_key *key) { struct hlist_head *hash_head = keyhashentry(key); @@ -6224,10 +6234,8 @@ void lockdep_unregister_key(struct lock_class_key *key) return; raw_local_irq_save(flags); - if (!graph_lock()) - goto out_irq; + lockdep_lock(); - pf = get_pending_free(); hlist_for_each_entry_rcu(k, hash_head, hash_entry) { if (k == key) { hlist_del_rcu(&k->hash_entry); @@ -6235,11 +6243,13 @@ void lockdep_unregister_key(struct lock_class_key *key) break; } } - WARN_ON_ONCE(!found); - __lockdep_free_key_range(pf, key, 1); - call_rcu_zapped(pf); - graph_unlock(); -out_irq: + WARN_ON_ONCE(!found && debug_locks); + if (found) { + pf = get_pending_free(); + __lockdep_free_key_range(pf, key, 1); + call_rcu_zapped(pf); + } + lockdep_unlock(); raw_local_irq_restore(flags); /* Wait until is_dynamic_key() has finished accessing k->hash_entry. */ diff --git a/kernel/locking/lockdep_internals.h b/kernel/locking/lockdep_internals.h index de49f9e1c11ba8be13fffcda9159c9c91db16e85..a19b016353478f844eaa758880939925e62cd82d 100644 --- a/kernel/locking/lockdep_internals.h +++ b/kernel/locking/lockdep_internals.h @@ -121,7 +121,6 @@ static const unsigned long LOCKF_USED_IN_IRQ_READ = #define MAX_LOCKDEP_CHAIN_HLOCKS (MAX_LOCKDEP_CHAINS*5) -extern struct list_head all_lock_classes; extern struct lock_chain lock_chains[]; #define LOCK_USAGE_CHARS (2*XXX_LOCK_USAGE_STATES + 1) @@ -151,6 +150,10 @@ extern unsigned int nr_large_chain_blocks; extern unsigned int max_lockdep_depth; extern unsigned int max_bfs_queue_depth; +extern unsigned long max_lock_class_idx; + +extern struct lock_class lock_classes[MAX_LOCKDEP_KEYS]; +extern unsigned long lock_classes_in_use[]; #ifdef CONFIG_PROVE_LOCKING extern unsigned long lockdep_count_forward_deps(struct lock_class *); @@ -205,7 +208,6 @@ struct lockdep_stats { }; DECLARE_PER_CPU(struct lockdep_stats, lockdep_stats); -extern struct lock_class lock_classes[MAX_LOCKDEP_KEYS]; #define __debug_atomic_inc(ptr) \ this_cpu_inc(lockdep_stats.ptr); diff --git a/kernel/locking/lockdep_proc.c b/kernel/locking/lockdep_proc.c index 02ef87f50df29c2a53dbf7b27320e1001fe09e33..ccb5292d1e1944bae1150d14f4103588aea1b1f5 100644 --- a/kernel/locking/lockdep_proc.c +++ b/kernel/locking/lockdep_proc.c @@ -24,14 +24,33 @@ #include "lockdep_internals.h" +/* + * Since iteration of lock_classes is done without holding the lockdep lock, + * it is not safe to iterate all_lock_classes list directly as the iteration + * may branch off to free_lock_classes or the zapped list. Iteration is done + * directly on the lock_classes array by checking the lock_classes_in_use + * bitmap and max_lock_class_idx. + */ +#define iterate_lock_classes(idx, class) \ + for (idx = 0, class = lock_classes; idx <= max_lock_class_idx; \ + idx++, class++) + static void *l_next(struct seq_file *m, void *v, loff_t *pos) { - return seq_list_next(v, &all_lock_classes, pos); + struct lock_class *class = v; + + ++class; + *pos = class - lock_classes; + return (*pos > max_lock_class_idx) ? NULL : class; } static void *l_start(struct seq_file *m, loff_t *pos) { - return seq_list_start_head(&all_lock_classes, *pos); + unsigned long idx = *pos; + + if (idx > max_lock_class_idx) + return NULL; + return lock_classes + idx; } static void l_stop(struct seq_file *m, void *v) @@ -57,14 +76,16 @@ static void print_name(struct seq_file *m, struct lock_class *class) static int l_show(struct seq_file *m, void *v) { - struct lock_class *class = list_entry(v, struct lock_class, lock_entry); + struct lock_class *class = v; struct lock_list *entry; char usage[LOCK_USAGE_CHARS]; + int idx = class - lock_classes; - if (v == &all_lock_classes) { + if (v == lock_classes) seq_printf(m, "all lock classes:\n"); + + if (!test_bit(idx, lock_classes_in_use)) return 0; - } seq_printf(m, "%p", class->key); #ifdef CONFIG_DEBUG_LOCKDEP @@ -218,8 +239,11 @@ static int lockdep_stats_show(struct seq_file *m, void *v) #ifdef CONFIG_PROVE_LOCKING struct lock_class *class; + unsigned long idx; - list_for_each_entry(class, &all_lock_classes, lock_entry) { + iterate_lock_classes(idx, class) { + if (!test_bit(idx, lock_classes_in_use)) + continue; if (class->usage_mask == 0) nr_unused++; @@ -252,6 +276,7 @@ static int lockdep_stats_show(struct seq_file *m, void *v) sum_forward_deps += lockdep_count_forward_deps(class); } + #ifdef CONFIG_DEBUG_LOCKDEP DEBUG_LOCKS_WARN_ON(debug_atomic_read(nr_unused_locks) != nr_unused); #endif @@ -343,6 +368,8 @@ static int lockdep_stats_show(struct seq_file *m, void *v) seq_printf(m, " max bfs queue depth: %11u\n", max_bfs_queue_depth); #endif + seq_printf(m, " max lock class index: %11lu\n", + max_lock_class_idx); lockdep_stats_debug_show(m); seq_printf(m, " debug_locks: %11u\n", debug_locks); @@ -620,12 +647,16 @@ static int lock_stat_open(struct inode *inode, struct file *file) if (!res) { struct lock_stat_data *iter = data->stats; struct seq_file *m = file->private_data; + unsigned long idx; - list_for_each_entry(class, &all_lock_classes, lock_entry) { + iterate_lock_classes(idx, class) { + if (!test_bit(idx, lock_classes_in_use)) + continue; iter->class = class; iter->stats = lock_stats(class); iter++; } + data->iter_end = iter; sort(data->stats, data->iter_end - data->stats, @@ -643,6 +674,7 @@ static ssize_t lock_stat_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { struct lock_class *class; + unsigned long idx; char c; if (count) { @@ -652,8 +684,11 @@ static ssize_t lock_stat_write(struct file *file, const char __user *buf, if (c != '0') return count; - list_for_each_entry(class, &all_lock_classes, lock_entry) + iterate_lock_classes(idx, class) { + if (!test_bit(idx, lock_classes_in_use)) + continue; clear_lock_stats(class); + } } return count; } diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index 2f8cd616d3b29aa93498db95e426acd4abbb153b..f00dd928fc7113c6a31aaa05d6214c08f3fee201 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c @@ -1438,7 +1438,7 @@ rt_mutex_fasttrylock(struct rt_mutex *lock, } /* - * Performs the wakeup of the the top-waiter and re-enables preemption. + * Performs the wakeup of the top-waiter and re-enables preemption. */ void rt_mutex_postunlock(struct wake_q_head *wake_q) { @@ -1832,7 +1832,7 @@ struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock) * been started. * @waiter: the pre-initialized rt_mutex_waiter * - * Wait for the the lock acquisition started on our behalf by + * Wait for the lock acquisition started on our behalf by * rt_mutex_start_proxy_lock(). Upon failure, the caller must call * rt_mutex_cleanup_proxy_lock(). * diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c index a163542d178ee11198080f6cad124c9e2c25d115..cc5cc889b5b7fb8e9d6e57863b177e38c462d059 100644 --- a/kernel/locking/rwsem.c +++ b/kernel/locking/rwsem.c @@ -1177,7 +1177,7 @@ rwsem_down_write_slowpath(struct rw_semaphore *sem, int state) /* * If there were already threads queued before us and: - * 1) there are no no active locks, wake the front + * 1) there are no active locks, wake the front * queued process(es) as the handoff bit might be set. * 2) there are no active writers and some readers, the lock * must be read owned; so we try to wake any read lock diff --git a/kernel/locking/semaphore.c b/kernel/locking/semaphore.c index d9dd94defc0a928c4697b49c2071090c6105966e..9aa855a96c4ae2e1149baafa56f1d0d6e1c8717e 100644 --- a/kernel/locking/semaphore.c +++ b/kernel/locking/semaphore.c @@ -119,7 +119,7 @@ EXPORT_SYMBOL(down_killable); * @sem: the semaphore to be acquired * * Try to acquire the semaphore atomically. Returns 0 if the semaphore has - * been acquired successfully or 1 if it it cannot be acquired. + * been acquired successfully or 1 if it cannot be acquired. * * NOTE: This return value is inverted from both spin_trylock and * mutex_trylock! Be careful about this when converting code. diff --git a/kernel/module.c b/kernel/module.c index e7b9ecc1aa3419e814acec313d670b1c89511a88..5fdfa29a0738eab5c9db875fd5e9f40b419094c4 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -57,6 +57,9 @@ #include #include #include +#ifdef CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY +#include +#endif #include #include "module-internal.h" @@ -1027,6 +1030,12 @@ SYSCALL_DEFINE2(delete_module, const char __user *, name_user, } } +#ifdef CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY + ret = klp_module_delete_safety_check(mod); + if (ret != 0) + goto out; +#endif + /* Stop the machine so refcounts can't move and disable module. */ ret = try_stop_module(mod, flags, &forced); if (ret != 0) @@ -3729,12 +3738,6 @@ static noinline int do_init_module(struct module *mod) } freeinit->module_init = mod->init_layout.base; - /* - * We want to find out whether @mod uses async during init. Clear - * PF_USED_ASYNC. async_schedule*() will set it. - */ - current->flags &= ~PF_USED_ASYNC; - do_mod_ctors(mod); /* Start the module */ if (mod->init != NULL) @@ -3760,22 +3763,13 @@ static noinline int do_init_module(struct module *mod) /* * We need to finish all async code before the module init sequence - * is done. This has potential to deadlock. For example, a newly - * detected block device can trigger request_module() of the - * default iosched from async probing task. Once userland helper - * reaches here, async_synchronize_full() will wait on the async - * task waiting on request_module() and deadlock. - * - * This deadlock is avoided by perfomring async_synchronize_full() - * iff module init queued any async jobs. This isn't a full - * solution as it will deadlock the same if module loading from - * async jobs nests more than once; however, due to the various - * constraints, this hack seems to be the best option for now. - * Please refer to the following thread for details. + * is done. This has potential to deadlock if synchronous module + * loading is requested from async (which is not allowed!). * - * http://thread.gmane.org/gmane.linux.kernel/1420814 + * See commit 0fdff3ec6d87 ("async, kmod: warn on synchronous + * request_module() from async workers") for more details. */ - if (!mod->async_probe_requested && (current->flags & PF_USED_ASYNC)) + if (!mod->async_probe_requested) async_synchronize_full(); ftrace_free_mem(mod, mod->init_layout.base, mod->init_layout.base + diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c index bf640fd6142a00f0f1812d29506db5988cca7051..522cb1387462c981222aadbcd9cef005216cd192 100644 --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c @@ -1323,7 +1323,7 @@ static int __init resumedelay_setup(char *str) int rc = kstrtouint(str, 0, &resume_delay); if (rc) - return rc; + pr_warn("resumedelay: bad option string '%s'\n", str); return 1; } diff --git a/kernel/power/main.c b/kernel/power/main.c index 0aefd6f57e0acd0bf0fd726fcc6cd6c700ec7312..d6140ed15d0b17098e8e28ec981e33759a26790a 100644 --- a/kernel/power/main.c +++ b/kernel/power/main.c @@ -504,7 +504,10 @@ static ssize_t pm_wakeup_irq_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { - return pm_wakeup_irq ? sprintf(buf, "%u\n", pm_wakeup_irq) : -ENODATA; + if (!pm_wakeup_irq()) + return -ENODATA; + + return sprintf(buf, "%u\n", pm_wakeup_irq()); } power_attr_ro(pm_wakeup_irq); diff --git a/kernel/power/process.c b/kernel/power/process.c index 45b054b7b5ec81f814f6bf12c9462318424e6e01..b9faa363c46af612125629eaa5fc41f5c0579609 100644 --- a/kernel/power/process.c +++ b/kernel/power/process.c @@ -134,7 +134,7 @@ int freeze_processes(void) if (!pm_freezing) atomic_inc(&system_freezing_cnt); - pm_wakeup_clear(true); + pm_wakeup_clear(0); pr_info("Freezing user space processes ... "); pm_freezing = true; error = try_to_freeze_tasks(true); diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index 46b1804c1ddf7c22e81a21bd44b299899ddc069a..1da013f50059a32dc4744a68dafde9d6927c9000 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c @@ -944,8 +944,7 @@ static void memory_bm_recycle(struct memory_bitmap *bm) * Register a range of page frames the contents of which should not be saved * during hibernation (to be used in the early initialization code). */ -void __init __register_nosave_region(unsigned long start_pfn, - unsigned long end_pfn, int use_kmalloc) +void __init register_nosave_region(unsigned long start_pfn, unsigned long end_pfn) { struct nosave_region *region; @@ -961,18 +960,12 @@ void __init __register_nosave_region(unsigned long start_pfn, goto Report; } } - if (use_kmalloc) { - /* During init, this shouldn't fail */ - region = kmalloc(sizeof(struct nosave_region), GFP_KERNEL); - BUG_ON(!region); - } else { - /* This allocation cannot fail */ - region = memblock_alloc(sizeof(struct nosave_region), - SMP_CACHE_BYTES); - if (!region) - panic("%s: Failed to allocate %zu bytes\n", __func__, - sizeof(struct nosave_region)); - } + /* This allocation cannot fail */ + region = memblock_alloc(sizeof(struct nosave_region), + SMP_CACHE_BYTES); + if (!region) + panic("%s: Failed to allocate %zu bytes\n", __func__, + sizeof(struct nosave_region)); region->start_pfn = start_pfn; region->end_pfn = end_pfn; list_add_tail(®ion->list, &nosave_regions); diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c index 32391acc806bf6c9d19bad31e958755eb3b0ab99..4aa4d5d3947f140e3793e575bbd3519bfa678735 100644 --- a/kernel/power/suspend.c +++ b/kernel/power/suspend.c @@ -138,8 +138,6 @@ static void s2idle_loop(void) break; } - pm_wakeup_clear(false); - s2idle_enter(); } diff --git a/kernel/power/suspend_test.c b/kernel/power/suspend_test.c index e1ed58adb69e480b8537bf4a37b2d2e1f23be9b2..be480ae5cb2aa1da01379025ff4f862bbb2aef94 100644 --- a/kernel/power/suspend_test.c +++ b/kernel/power/suspend_test.c @@ -157,22 +157,22 @@ static int __init setup_test_suspend(char *value) value++; suspend_type = strsep(&value, ","); if (!suspend_type) - return 0; + return 1; repeat = strsep(&value, ","); if (repeat) { if (kstrtou32(repeat, 0, &test_repeat_count_max)) - return 0; + return 1; } for (i = PM_SUSPEND_MIN; i < PM_SUSPEND_MAX; i++) if (!strcmp(pm_labels[i], suspend_type)) { test_state_label = pm_labels[i]; - return 0; + return 1; } printk(warn_bad_state, suspend_type); - return 0; + return 1; } __setup("test_suspend", setup_test_suspend); diff --git a/kernel/power/wakelock.c b/kernel/power/wakelock.c index 105df4dfc783971acdd1d17ee2971cb892c97d02..52571dcad768b988eaadbd3ce98a4ac42dd2f7dd 100644 --- a/kernel/power/wakelock.c +++ b/kernel/power/wakelock.c @@ -39,23 +39,20 @@ ssize_t pm_show_wakelocks(char *buf, bool show_active) { struct rb_node *node; struct wakelock *wl; - char *str = buf; - char *end = buf + PAGE_SIZE; + int len = 0; mutex_lock(&wakelocks_lock); for (node = rb_first(&wakelocks_tree); node; node = rb_next(node)) { wl = rb_entry(node, struct wakelock, node); if (wl->ws->active == show_active) - str += scnprintf(str, end - str, "%s ", wl->name); + len += sysfs_emit_at(buf, len, "%s ", wl->name); } - if (str > buf) - str--; - str += scnprintf(str, end - str, "\n"); + len += sysfs_emit_at(buf, len, "\n"); mutex_unlock(&wakelocks_lock); - return (str - buf); + return len; } #if CONFIG_PM_WAKELOCKS_LIMIT > 0 diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index 43f8f2573eac4a73ed27a4b1a95af745325a9f83..ecd28d4fa20eb5d80c3bc9b46cffa4f33a011f93 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -146,8 +146,10 @@ static int __control_devkmsg(char *str) static int __init control_devkmsg(char *str) { - if (__control_devkmsg(str) < 0) + if (__control_devkmsg(str) < 0) { + pr_warn("printk.devkmsg: bad option string '%s'\n", str); return 1; + } /* * Set sysctl string accordingly: @@ -166,7 +168,7 @@ static int __init control_devkmsg(char *str) */ devkmsg_log |= DEVKMSG_LOG_MASK_LOCK; - return 0; + return 1; } __setup("printk.devkmsg=", control_devkmsg); diff --git a/kernel/ptrace.c b/kernel/ptrace.c index 0087ce50d99e63130021c95ffd649520c231788a..072033f40e2300764827d4bcd2d7a0e71301bd43 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c @@ -31,6 +31,7 @@ #include #include #include +#include #include /* for syscall_get_* */ @@ -370,6 +371,26 @@ bool ptrace_may_access(struct task_struct *task, unsigned int mode) return !err; } +static int check_ptrace_options(unsigned long data) +{ + if (data & ~(unsigned long)PTRACE_O_MASK) + return -EINVAL; + + if (unlikely(data & PTRACE_O_SUSPEND_SECCOMP)) { + if (!IS_ENABLED(CONFIG_CHECKPOINT_RESTORE) || + !IS_ENABLED(CONFIG_SECCOMP)) + return -EINVAL; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + if (seccomp_mode(¤t->seccomp) != SECCOMP_MODE_DISABLED || + current->ptrace & PT_SUSPEND_SECCOMP) + return -EPERM; + } + return 0; +} + static int ptrace_attach(struct task_struct *task, long request, unsigned long addr, unsigned long flags) @@ -381,8 +402,16 @@ static int ptrace_attach(struct task_struct *task, long request, if (seize) { if (addr != 0) goto out; + /* + * This duplicates the check in check_ptrace_options() because + * ptrace_attach() and ptrace_setoptions() have historically + * used different error codes for unknown ptrace options. + */ if (flags & ~(unsigned long)PTRACE_O_MASK) goto out; + retval = check_ptrace_options(flags); + if (retval) + return retval; flags = PT_PTRACED | PT_SEIZED | (flags << PT_OPT_FLAG_SHIFT); } else { flags = PT_PTRACED; @@ -655,22 +684,11 @@ int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long ds static int ptrace_setoptions(struct task_struct *child, unsigned long data) { unsigned flags; + int ret; - if (data & ~(unsigned long)PTRACE_O_MASK) - return -EINVAL; - - if (unlikely(data & PTRACE_O_SUSPEND_SECCOMP)) { - if (!IS_ENABLED(CONFIG_CHECKPOINT_RESTORE) || - !IS_ENABLED(CONFIG_SECCOMP)) - return -EINVAL; - - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; - - if (seccomp_mode(¤t->seccomp) != SECCOMP_MODE_DISABLED || - current->ptrace & PT_SUSPEND_SECCOMP) - return -EPERM; - } + ret = check_ptrace_options(data); + if (ret) + return ret; /* Avoid intermediate state when all opts are cleared */ flags = child->ptrace; @@ -795,6 +813,24 @@ static int ptrace_peek_siginfo(struct task_struct *child, return ret; } +#ifdef CONFIG_RSEQ +static long ptrace_get_rseq_configuration(struct task_struct *task, + unsigned long size, void __user *data) +{ + struct ptrace_rseq_configuration conf = { + .rseq_abi_pointer = (u64)(uintptr_t)task->rseq, + .rseq_abi_size = sizeof(*task->rseq), + .signature = task->rseq_sig, + .flags = 0, + }; + + size = min_t(unsigned long, size, sizeof(conf)); + if (copy_to_user(data, &conf, size)) + return -EFAULT; + return sizeof(conf); +} +#endif + #ifdef PTRACE_SINGLESTEP #define is_singlestep(request) ((request) == PTRACE_SINGLESTEP) #else @@ -1243,6 +1279,12 @@ int ptrace_request(struct task_struct *child, long request, ret = seccomp_get_metadata(child, addr, datavp); break; +#ifdef CONFIG_RSEQ + case PTRACE_GET_RSEQ_CONFIGURATION: + ret = ptrace_get_rseq_configuration(child, addr, datavp); + break; +#endif + default: break; } diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index b9c45b2d76904afbd20387bc05624f5437ace135..310bcc79b07b5c5e0f0fd83392b8bad839b2f05a 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -1601,10 +1601,11 @@ static void __maybe_unused rcu_advance_cbs_nowake(struct rcu_node *rnp, struct rcu_data *rdp) { rcu_lockdep_assert_cblist_protected(rdp); - if (!rcu_seq_state(rcu_seq_current(&rnp->gp_seq)) || - !raw_spin_trylock_rcu_node(rnp)) + if (!rcu_seq_state(rcu_seq_current(&rnp->gp_seq)) || !raw_spin_trylock_rcu_node(rnp)) return; - WARN_ON_ONCE(rcu_advance_cbs(rnp, rdp)); + // The grace period cannot end while we hold the rcu_node lock. + if (rcu_seq_state(rcu_seq_current(&rnp->gp_seq))) + WARN_ON_ONCE(rcu_advance_cbs(rnp, rdp)); raw_spin_unlock_rcu_node(rnp); } @@ -2276,8 +2277,6 @@ rcu_report_qs_rdp(struct rcu_data *rdp) unsigned long flags; unsigned long mask; bool needwake = false; - const bool offloaded = IS_ENABLED(CONFIG_RCU_NOCB_CPU) && - rcu_segcblist_is_offloaded(&rdp->cblist); struct rcu_node *rnp; WARN_ON_ONCE(rdp->cpu != smp_processor_id()); @@ -2301,9 +2300,13 @@ rcu_report_qs_rdp(struct rcu_data *rdp) if ((rnp->qsmask & mask) == 0) { raw_spin_unlock_irqrestore_rcu_node(rnp, flags); } else { + const bool offloaded = IS_ENABLED(CONFIG_RCU_NOCB_CPU) && + rcu_segcblist_is_offloaded(&rdp->cblist); /* * This GP can't end until cpu checks in, so all of our * callbacks can be processed during the next GP. + * + * NOCB kthreads have their own way to deal with that. */ if (!offloaded) needwake = rcu_accelerate_cbs(rnp, rdp); diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h index 0ffe185c1f46ade03145f3f5059b0ad637acf110..2bc4538e8a6125788a80724f66ad3dc95d7c99b6 100644 --- a/kernel/rcu/tree_exp.h +++ b/kernel/rcu/tree_exp.h @@ -387,6 +387,7 @@ static void sync_rcu_exp_select_node_cpus(struct work_struct *wp) continue; } if (get_cpu() == cpu) { + mask_ofl_test |= mask; put_cpu(); continue; } @@ -506,7 +507,10 @@ static void synchronize_rcu_expedited_wait(void) if (rdp->rcu_forced_tick_exp) continue; rdp->rcu_forced_tick_exp = true; - tick_dep_set_cpu(cpu, TICK_DEP_BIT_RCU_EXP); + preempt_disable(); + if (cpu_online(cpu)) + tick_dep_set_cpu(cpu, TICK_DEP_BIT_RCU_EXP); + preempt_enable(); } } j = READ_ONCE(jiffies_till_first_fqs); diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index a222faffbafaa5d802c411a16472ee5e75f92779..87fe7f423b287eb7ad1142b87aeea368b88d8886 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -531,16 +531,17 @@ rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags) raw_spin_unlock_irqrestore_rcu_node(rnp, flags); } - /* Unboost if we were boosted. */ - if (IS_ENABLED(CONFIG_RCU_BOOST) && drop_boost_mutex) - rt_mutex_futex_unlock(&rnp->boost_mtx); - /* * If this was the last task on the expedited lists, * then we need to report up the rcu_node hierarchy. */ if (!empty_exp && empty_exp_now) rcu_report_exp_rnp(rnp, true); + + /* Unboost if we were boosted. */ + if (IS_ENABLED(CONFIG_RCU_BOOST) && drop_boost_mutex) + rt_mutex_futex_unlock(&rnp->boost_mtx); + } else { local_irq_restore(flags); } @@ -1646,7 +1647,11 @@ static bool wake_nocb_gp(struct rcu_data *rdp, bool force, rcu_nocb_unlock_irqrestore(rdp, flags); return false; } - del_timer(&rdp->nocb_timer); + + if (READ_ONCE(rdp->nocb_defer_wakeup) > RCU_NOCB_WAKE_NOT) { + WRITE_ONCE(rdp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT); + del_timer(&rdp->nocb_timer); + } rcu_nocb_unlock_irqrestore(rdp, flags); raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags); if (force || READ_ONCE(rdp_gp->nocb_gp_sleep)) { @@ -2167,7 +2172,6 @@ static bool do_nocb_deferred_wakeup_common(struct rcu_data *rdp) return false; } ndw = READ_ONCE(rdp->nocb_defer_wakeup); - WRITE_ONCE(rdp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT); ret = wake_nocb_gp(rdp, ndw == RCU_NOCB_WAKE_FORCE, flags); trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("DeferredWake")); diff --git a/kernel/rseq.c b/kernel/rseq.c index 0077713bf2400126caacd3f8900143adab5edea3..1b4547e0d8414d678509015870a8cfe126ec7508 100644 --- a/kernel/rseq.c +++ b/kernel/rseq.c @@ -120,8 +120,13 @@ static int rseq_get_rseq_cs(struct task_struct *t, struct rseq_cs *rseq_cs) u32 sig; int ret; +#ifdef CONFIG_64BIT + if (get_user(ptr, &t->rseq->rseq_cs.ptr64)) + return -EFAULT; +#else if (copy_from_user(&ptr, &t->rseq->rseq_cs.ptr64, sizeof(ptr))) return -EFAULT; +#endif if (!ptr) { memset(rseq_cs, 0, sizeof(*rseq_cs)); return 0; @@ -204,9 +209,13 @@ static int clear_rseq_cs(struct task_struct *t) * * Set rseq_cs to NULL. */ +#ifdef CONFIG_64BIT + return put_user(0UL, &t->rseq->rseq_cs.ptr64); +#else if (clear_user(&t->rseq->rseq_cs.ptr64, sizeof(t->rseq->rseq_cs.ptr64))) return -EFAULT; return 0; +#endif } /* diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 46d219b871093ef50f416008c50c99585f2ce2ca..e00b39d4e2e267fd4ec558103398e7e5d9d54e9e 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -36,6 +36,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_rt_tp); EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_dl_tp); EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_irq_tp); EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_se_tp); +EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_thermal_tp); EXPORT_TRACEPOINT_SYMBOL_GPL(sched_cpu_capacity_tp); EXPORT_TRACEPOINT_SYMBOL_GPL(sched_overutilized_tp); EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_cfs_tp); @@ -3307,6 +3308,7 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p) init_entity_runnable_average(&p->se); + #ifdef CONFIG_SCHED_INFO if (likely(sched_info_on())) memset(&p->sched_info, 0, sizeof(p->sched_info)); @@ -3322,18 +3324,23 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p) return 0; } -void sched_post_fork(struct task_struct *p, struct kernel_clone_args *kargs) +void sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs) { unsigned long flags; -#ifdef CONFIG_CGROUP_SCHED - struct task_group *tg; -#endif + /* + * Because we're not yet on the pid-hash, p->pi_lock isn't strictly + * required yet, but lockdep gets upset if rules are violated. + */ raw_spin_lock_irqsave(&p->pi_lock, flags); #ifdef CONFIG_CGROUP_SCHED - tg = container_of(kargs->cset->subsys[cpu_cgrp_id], - struct task_group, css); - p->sched_task_group = autogroup_task_group(p, tg); + if (1) { + struct task_group *tg; + tg = container_of(kargs->cset->subsys[cpu_cgrp_id], + struct task_group, css); + tg = autogroup_task_group(p, tg); + p->sched_task_group = tg; + } #endif rseq_migrate(p); /* @@ -3344,7 +3351,10 @@ void sched_post_fork(struct task_struct *p, struct kernel_clone_args *kargs) if (p->sched_class->task_fork) p->sched_class->task_fork(p); raw_spin_unlock_irqrestore(&p->pi_lock, flags); +} +void sched_post_fork(struct task_struct *p) +{ uclamp_post_fork(p); } @@ -4039,7 +4049,6 @@ void scheduler_tick(void) update_thermal_load_avg(rq_clock_thermal(rq), rq, thermal_pressure); curr->sched_class->task_tick(rq, curr, 0); calc_global_load_tick(rq); - psi_task_tick(rq); rq_unlock(rq, &rf); @@ -8206,7 +8215,8 @@ static const u64 max_cfs_runtime = MAX_BW * NSEC_PER_USEC; static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime); -static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota) +static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota, + u64 burst) { int i, ret = 0, runtime_enabled, runtime_was_enabled; struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; @@ -8236,6 +8246,10 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota) if (quota != RUNTIME_INF && quota > max_cfs_runtime) return -EINVAL; + if (quota != RUNTIME_INF && (burst > quota || + burst + quota > max_cfs_runtime)) + return -EINVAL; + /* * Prevent race between setting of cfs_rq->runtime_enabled and * unthrottle_offline_cfs_rqs(). @@ -8257,6 +8271,7 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota) raw_spin_lock_irq(&cfs_b->lock); cfs_b->period = ns_to_ktime(period); cfs_b->quota = quota; + cfs_b->burst = burst; __refill_cfs_bandwidth_runtime(cfs_b); @@ -8290,9 +8305,10 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota) static int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us) { - u64 quota, period; + u64 quota, period, burst; period = ktime_to_ns(tg->cfs_bandwidth.period); + burst = tg->cfs_bandwidth.burst; if (cfs_quota_us < 0) quota = RUNTIME_INF; else if ((u64)cfs_quota_us <= U64_MAX / NSEC_PER_USEC) @@ -8300,7 +8316,7 @@ static int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us) else return -EINVAL; - return tg_set_cfs_bandwidth(tg, period, quota); + return tg_set_cfs_bandwidth(tg, period, quota, burst); } static long tg_get_cfs_quota(struct task_group *tg) @@ -8318,15 +8334,16 @@ static long tg_get_cfs_quota(struct task_group *tg) static int tg_set_cfs_period(struct task_group *tg, long cfs_period_us) { - u64 quota, period; + u64 quota, period, burst; if ((u64)cfs_period_us > U64_MAX / NSEC_PER_USEC) return -EINVAL; period = (u64)cfs_period_us * NSEC_PER_USEC; quota = tg->cfs_bandwidth.quota; + burst = tg->cfs_bandwidth.burst; - return tg_set_cfs_bandwidth(tg, period, quota); + return tg_set_cfs_bandwidth(tg, period, quota, burst); } static long tg_get_cfs_period(struct task_group *tg) @@ -8339,6 +8356,30 @@ static long tg_get_cfs_period(struct task_group *tg) return cfs_period_us; } +static int tg_set_cfs_burst(struct task_group *tg, long cfs_burst_us) +{ + u64 quota, period, burst; + + if ((u64)cfs_burst_us > U64_MAX / NSEC_PER_USEC) + return -EINVAL; + + burst = (u64)cfs_burst_us * NSEC_PER_USEC; + period = ktime_to_ns(tg->cfs_bandwidth.period); + quota = tg->cfs_bandwidth.quota; + + return tg_set_cfs_bandwidth(tg, period, quota, burst); +} + +static long tg_get_cfs_burst(struct task_group *tg) +{ + u64 burst_us; + + burst_us = tg->cfs_bandwidth.burst; + do_div(burst_us, NSEC_PER_USEC); + + return burst_us; +} + static s64 cpu_cfs_quota_read_s64(struct cgroup_subsys_state *css, struct cftype *cft) { @@ -8363,6 +8404,18 @@ static int cpu_cfs_period_write_u64(struct cgroup_subsys_state *css, return tg_set_cfs_period(css_tg(css), cfs_period_us); } +static u64 cpu_cfs_burst_read_u64(struct cgroup_subsys_state *css, + struct cftype *cft) +{ + return tg_get_cfs_burst(css_tg(css)); +} + +static int cpu_cfs_burst_write_u64(struct cgroup_subsys_state *css, + struct cftype *cftype, u64 cfs_burst_us) +{ + return tg_set_cfs_burst(css_tg(css), cfs_burst_us); +} + struct cfs_schedulable_data { struct task_group *tg; u64 period, quota; @@ -8465,6 +8518,9 @@ static int cpu_cfs_stat_show(struct seq_file *sf, void *v) seq_printf(sf, "wait_sum %llu\n", ws); } + seq_printf(sf, "nr_bursts %d\n", cfs_b->nr_burst); + seq_printf(sf, "burst_time %llu\n", cfs_b->burst_time); + return 0; } #endif /* CONFIG_CFS_BANDWIDTH */ @@ -8507,13 +8563,10 @@ static int tg_change_scheduler(struct task_group *tg, void *data) struct cgroup_subsys_state *css = &tg->css; tg->qos_level = qos_level; - if (qos_level == -1) { + if (qos_level == -1) policy = SCHED_IDLE; - cfs_bandwidth_usage_inc(); - } else { + else policy = SCHED_NORMAL; - cfs_bandwidth_usage_dec(); - } param.sched_priority = 0; css_task_iter_start(css, 0, &it); @@ -8541,6 +8594,13 @@ static int cpu_qos_write(struct cgroup_subsys_state *css, if (tg->qos_level == -1 && qos_level == 0) return -EINVAL; + cpus_read_lock(); + if (qos_level == -1) + cfs_bandwidth_usage_inc(); + else + cfs_bandwidth_usage_dec(); + cpus_read_unlock(); + rcu_read_lock(); walk_tg_tree_from(tg, tg_change_scheduler, tg_nop, (void *)(&qos_level)); rcu_read_unlock(); @@ -8574,6 +8634,11 @@ static struct cftype cpu_legacy_files[] = { .read_u64 = cpu_cfs_period_read_u64, .write_u64 = cpu_cfs_period_write_u64, }, + { + .name = "cfs_burst_us", + .read_u64 = cpu_cfs_burst_read_u64, + .write_u64 = cpu_cfs_burst_write_u64, + }, { .name = "stat", .seq_show = cpu_cfs_stat_show, @@ -8622,16 +8687,20 @@ static int cpu_extra_stat_show(struct seq_file *sf, { struct task_group *tg = css_tg(css); struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; - u64 throttled_usec; + u64 throttled_usec, burst_usec; throttled_usec = cfs_b->throttled_time; do_div(throttled_usec, NSEC_PER_USEC); + burst_usec = cfs_b->burst_time; + do_div(burst_usec, NSEC_PER_USEC); seq_printf(sf, "nr_periods %d\n" "nr_throttled %d\n" - "throttled_usec %llu\n", + "throttled_usec %llu\n" + "nr_bursts %d\n" + "burst_usec %llu\n", cfs_b->nr_periods, cfs_b->nr_throttled, - throttled_usec); + throttled_usec, cfs_b->nr_burst, burst_usec); } #endif return 0; @@ -8746,12 +8815,13 @@ static ssize_t cpu_max_write(struct kernfs_open_file *of, { struct task_group *tg = css_tg(of_css(of)); u64 period = tg_get_cfs_period(tg); + u64 burst = tg_get_cfs_burst(tg); u64 quota; int ret; ret = cpu_period_quota_parse(buf, &period, "a); if (!ret) - ret = tg_set_cfs_bandwidth(tg, period, quota); + ret = tg_set_cfs_bandwidth(tg, period, quota, burst); return ret ?: nbytes; } #endif @@ -8778,6 +8848,12 @@ static struct cftype cpu_files[] = { .seq_show = cpu_max_show, .write = cpu_max_write, }, + { + .name = "max.burst", + .flags = CFTYPE_NOT_ON_ROOT, + .read_u64 = cpu_cfs_burst_read_u64, + .write_u64 = cpu_cfs_burst_write_u64, + }, #endif #ifdef CONFIG_UCLAMP_TASK_GROUP { diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c index 5a55d230045240246e34f5a9ec099e62e1fde0fc..ca0eef7d3852b56c55641b30de5c87a3c80523d2 100644 --- a/kernel/sched/cputime.c +++ b/kernel/sched/cputime.c @@ -147,10 +147,10 @@ void account_guest_time(struct task_struct *p, u64 cputime) /* Add guest time to cpustat. */ if (task_nice(p) > 0) { - cpustat[CPUTIME_NICE] += cputime; + task_group_account_field(p, CPUTIME_NICE, cputime); cpustat[CPUTIME_GUEST_NICE] += cputime; } else { - cpustat[CPUTIME_USER] += cputime; + task_group_account_field(p, CPUTIME_USER, cputime); cpustat[CPUTIME_GUEST] += cputime; } } diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index 70a57827243633b8cd1ebce1318844714cdac15a..a260ff7800db34cb218fbe98181e03da01f6f1d4 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c @@ -908,25 +908,15 @@ void print_numa_stats(struct seq_file *m, int node, unsigned long tsf, static void sched_show_numa(struct task_struct *p, struct seq_file *m) { #ifdef CONFIG_NUMA_BALANCING - struct mempolicy *pol; - if (p->mm) P(mm->numa_scan_seq); - task_lock(p); - pol = p->mempolicy; - if (pol && !(pol->flags & MPOL_F_MORON)) - pol = NULL; - mpol_get(pol); - task_unlock(p); - P(numa_pages_migrated); P(numa_preferred_nid); P(total_numa_faults); SEQ_printf(m, "current_node=%d, numa_group_id=%d\n", task_node(p), task_numa_group_id(p)); show_numa_stats(p, m); - mpol_put(pol); #endif } @@ -982,6 +972,10 @@ void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns, P_SCHEDSTAT(se.statistics.nr_wakeups_affine_attempts); P_SCHEDSTAT(se.statistics.nr_wakeups_passive); P_SCHEDSTAT(se.statistics.nr_wakeups_idle); +#ifdef CONFIG_QOS_SCHED_SMT_EXPELLER + P_SCHEDSTAT(se.statistics.nr_qos_smt_send_ipi); + P_SCHEDSTAT(se.statistics.nr_qos_smt_expelled); +#endif avg_atom = p->se.sum_exec_runtime; if (nr_switches) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 583b5dcbf61fbfd27b7c1f7546ed32f01f3c0a2c..50d457979db61fa44f73f64c2a082f30fbeab8ee 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -26,6 +26,7 @@ #endif #ifdef CONFIG_QOS_SCHED #include +#include #endif /* @@ -130,6 +131,10 @@ unsigned int sysctl_offline_wait_interval = 100; /* in ms */ static int unthrottle_qos_cfs_rqs(int cpu); #endif +#ifdef CONFIG_QOS_SCHED_SMT_EXPELLER +static DEFINE_PER_CPU(int, qos_smt_status); +#endif + #ifdef CONFIG_CFS_BANDWIDTH /* * Amount of runtime to allocate from global (tg) to local (per-cfs_rq) pool @@ -3397,7 +3402,6 @@ void set_task_rq_fair(struct sched_entity *se, se->avg.last_update_time = n_last_update_time; } - /* * When on migration a sched_entity joins/leaves the PELT hierarchy, we need to * propagate its contribution. The key to this propagation is the invariant @@ -3465,7 +3469,6 @@ void set_task_rq_fair(struct sched_entity *se, * XXX: only do this for the part of runnable > running ? * */ - static inline void update_tg_cfs_util(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq) { @@ -3694,7 +3697,19 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq) r = removed_util; sub_positive(&sa->util_avg, r); - sa->util_sum = sa->util_avg * divider; + sub_positive(&sa->util_sum, r * divider); + /* + * Because of rounding, se->util_sum might ends up being +1 more than + * cfs->util_sum. Although this is not a problem by itself, detaching + * a lot of tasks with the rounding problem between 2 updates of + * util_avg (~1ms) can make cfs->util_sum becoming null whereas + * cfs_util_avg is not. + * Check that util_sum is still above its lower bound for the new + * util_avg. Given that period_contrib might have moved since the last + * sync, we are only sure that util_sum must be above or equal to + * util_avg * minimum possible divider + */ + sa->util_sum = max_t(u32, sa->util_sum, sa->util_avg * PELT_MIN_DIVIDER); r = removed_runnable; sub_positive(&sa->runnable_avg, r); @@ -4723,8 +4738,20 @@ static inline u64 sched_cfs_bandwidth_slice(void) */ void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b) { - if (cfs_b->quota != RUNTIME_INF) - cfs_b->runtime = cfs_b->quota; + s64 runtime; + + if (unlikely(cfs_b->quota == RUNTIME_INF)) + return; + + cfs_b->runtime += cfs_b->quota; + runtime = cfs_b->runtime_snap - cfs_b->runtime; + if (runtime > 0) { + cfs_b->burst_time += runtime; + cfs_b->nr_burst++; + } + + cfs_b->runtime = min(cfs_b->runtime, cfs_b->quota + cfs_b->burst); + cfs_b->runtime_snap = cfs_b->runtime; } static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg) @@ -5080,6 +5107,9 @@ static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun, u throttled = !list_empty(&cfs_b->throttled_cfs_rq); cfs_b->nr_periods += overrun; + /* Refill extra burst quota even if cfs_b->idle */ + __refill_cfs_bandwidth_runtime(cfs_b); + /* * idle depends on !throttled (for the case of a large deficit), and if * we're going inactive then everything else can be deferred @@ -5087,8 +5117,6 @@ static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun, u if (cfs_b->idle && !throttled) goto out_deactivate; - __refill_cfs_bandwidth_runtime(cfs_b); - if (!throttled) { /* mark as potentially idle for the upcoming period */ cfs_b->idle = 1; @@ -5243,7 +5271,7 @@ static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b) /* * When a group wakes up we want to make sure that its quota is not already * expired/exceeded, otherwise it may be allowed to steal additional ticks of - * runtime as update_curr() throttling can not not trigger until it's on-rq. + * runtime as update_curr() throttling can not trigger until it's on-rq. */ static void check_enqueue_throttle(struct cfs_rq *cfs_rq) { @@ -5341,6 +5369,7 @@ static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer) if (new < max_cfs_quota_period) { cfs_b->period = ns_to_ktime(new); cfs_b->quota *= 2; + cfs_b->burst *= 2; pr_warn_ratelimited( "cfs_period_timer[cpu%d]: period too short, scaling up (new cfs_period_us = %lld, cfs_quota_us = %lld)\n", @@ -5372,6 +5401,7 @@ void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) cfs_b->runtime = 0; cfs_b->quota = RUNTIME_INF; cfs_b->period = ns_to_ktime(default_cfs_period()); + cfs_b->burst = 0; INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq); hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED); @@ -5385,6 +5415,9 @@ static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) { cfs_rq->runtime_enabled = 0; INIT_LIST_HEAD(&cfs_rq->throttled_list); +#ifdef CONFIG_QOS_SCHED + INIT_LIST_HEAD(&cfs_rq->qos_throttled_list); +#endif } void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b) @@ -5442,6 +5475,10 @@ static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq) lockdep_assert_held(&rq->lock); +#ifdef CONFIG_QOS_SCHED + unthrottle_qos_cfs_rqs(cpu_of(rq)); +#endif + rcu_read_lock(); list_for_each_entry_rcu(tg, &task_groups, list) { struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; @@ -5464,10 +5501,6 @@ static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq) unthrottle_cfs_rq(cfs_rq); } rcu_read_unlock(); - -#ifdef CONFIG_QOS_SCHED - unthrottle_qos_cfs_rqs(cpu_of(rq)); -#endif } #else /* CONFIG_CFS_BANDWIDTH */ @@ -6404,8 +6437,10 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target) * pattern is IO completions. */ if (is_per_cpu_kthread(current) && + in_task() && prev == smp_processor_id() && - this_rq()->nr_running <= 1) { + this_rq()->nr_running <= 1 && + asym_fits_capacity(task_util, prev)) { SET_STAT(found_idle_cpu_easy); return prev; } @@ -7159,7 +7194,34 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_ } #ifdef CONFIG_QOS_SCHED + +static inline bool is_offline_task(struct task_struct *p) +{ + return task_group(p)->qos_level == QOS_LEVEL_OFFLINE; +} + static void start_qos_hrtimer(int cpu); + +static int qos_tg_unthrottle_up(struct task_group *tg, void *data) +{ + struct rq *rq = data; + struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; + + cfs_rq->throttle_count--; + + return 0; +} + +static int qos_tg_throttle_down(struct task_group *tg, void *data) +{ + struct rq *rq = data; + struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; + + cfs_rq->throttle_count++; + + return 0; +} + static void throttle_qos_cfs_rq(struct cfs_rq *cfs_rq) { struct rq *rq = rq_of(cfs_rq); @@ -7171,7 +7233,7 @@ static void throttle_qos_cfs_rq(struct cfs_rq *cfs_rq) /* freeze hierarchy runnable averages while throttled */ rcu_read_lock(); - walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq); + walk_tg_tree_from(cfs_rq->tg, qos_tg_throttle_down, tg_nop, (void *)rq); rcu_read_unlock(); task_delta = cfs_rq->h_nr_running; @@ -7202,15 +7264,14 @@ static void throttle_qos_cfs_rq(struct cfs_rq *cfs_rq) start_qos_hrtimer(cpu_of(rq)); cfs_rq->throttled = 1; - cfs_rq->throttled_clock = rq_clock(rq); - list_add(&cfs_rq->throttled_list, &per_cpu(qos_throttled_cfs_rq, cpu_of(rq))); + list_add(&cfs_rq->qos_throttled_list, + &per_cpu(qos_throttled_cfs_rq, cpu_of(rq))); } static void unthrottle_qos_cfs_rq(struct cfs_rq *cfs_rq) { struct rq *rq = rq_of(cfs_rq); - struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); struct sched_entity *se; int enqueue = 1; unsigned int prev_nr = cfs_rq->h_nr_running; @@ -7221,12 +7282,12 @@ static void unthrottle_qos_cfs_rq(struct cfs_rq *cfs_rq) cfs_rq->throttled = 0; update_rq_clock(rq); - - cfs_b->throttled_time += rq_clock(rq) - cfs_rq->throttled_clock; - list_del_init(&cfs_rq->throttled_list); + list_del_init(&cfs_rq->qos_throttled_list); /* update hierarchical throttle state */ - walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq); + rcu_read_lock(); + walk_tg_tree_from(cfs_rq->tg, tg_nop, qos_tg_unthrottle_up, (void *)rq); + rcu_read_unlock(); if (!cfs_rq->load.weight) return; @@ -7266,7 +7327,7 @@ static int __unthrottle_qos_cfs_rqs(int cpu) int res = 0; list_for_each_entry_safe(cfs_rq, tmp_rq, &per_cpu(qos_throttled_cfs_rq, cpu), - throttled_list) { + qos_throttled_list) { if (cfs_rq_throttled(cfs_rq)) { unthrottle_qos_cfs_rq(cfs_rq); res++; @@ -7322,15 +7383,11 @@ void sched_qos_offline_wait(void) rcu_read_lock(); qos_level = task_group(current)->qos_level; rcu_read_unlock(); - if (qos_level != -1 || signal_pending(current)) + if (qos_level != -1 || fatal_signal_pending(current)) break; - msleep_interruptible(sysctl_offline_wait_interval); - } -} -int sched_qos_cpu_overload(void) -{ - return __this_cpu_read(qos_cpu_overload); + schedule_timeout_killable(msecs_to_jiffies(sysctl_offline_wait_interval)); + } } static enum hrtimer_restart qos_overload_timer_handler(struct hrtimer *timer) @@ -7363,6 +7420,153 @@ void init_qos_hrtimer(int cpu) hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED); hrtimer->function = qos_overload_timer_handler; } + +/* + * To avoid Priority inversion issues, when this cpu is qos_cpu_overload, + * we should schedule offline tasks to run so that they can leave kernel + * critical sections, and throttle them before returning to user mode. + */ +static void qos_schedule_throttle(struct task_struct *p) +{ + if (unlikely(current->flags & PF_KTHREAD)) + return; + + if (unlikely(this_cpu_read(qos_cpu_overload))) { + if (is_offline_task(p)) + set_notify_resume(p); + } +} + +#endif + +#ifdef CONFIG_QOS_SCHED_SMT_EXPELLER +static bool qos_smt_check_siblings_status(int this_cpu) +{ + int cpu; + + if (!sched_smt_active()) + return false; + + for_each_cpu(cpu, cpu_smt_mask(this_cpu)) { + if (cpu == this_cpu) + continue; + + if (per_cpu(qos_smt_status, cpu) == QOS_LEVEL_ONLINE) + return true; + } + + return false; +} + +static bool qos_smt_expelled(int this_cpu) +{ + /* + * The qos_smt_status of siblings cpu is online, and current cpu only has + * offline tasks enqueued, there is not suitable task, + * so pick_next_task_fair return null. + */ + if (qos_smt_check_siblings_status(this_cpu) && sched_idle_cpu(this_cpu)) + return true; + + return false; +} + +static bool qos_smt_update_status(struct task_struct *p) +{ + int status = QOS_LEVEL_OFFLINE; + + if (p != NULL && task_group(p)->qos_level >= QOS_LEVEL_ONLINE) + status = QOS_LEVEL_ONLINE; + + if (__this_cpu_read(qos_smt_status) == status) + return false; + + __this_cpu_write(qos_smt_status, status); + + return true; +} + +static void qos_smt_send_ipi(int this_cpu) +{ + int cpu; + struct rq *rq = NULL; + + if (!sched_smt_active()) + return; + + for_each_cpu(cpu, cpu_smt_mask(this_cpu)) { + if (cpu == this_cpu) + continue; + + rq = cpu_rq(cpu); + + /* + * There are two cases where current don't need to send scheduler_ipi: + * a) The qos_smt_status of siblings cpu is online; + * b) The cfs.h_nr_running of siblings cpu is 0. + */ + if (per_cpu(qos_smt_status, cpu) == QOS_LEVEL_ONLINE || + rq->cfs.h_nr_running == 0) + continue; + + schedstat_inc(current->se.statistics.nr_qos_smt_send_ipi); + smp_send_reschedule(cpu); + } +} + +static void qos_smt_expel(int this_cpu, struct task_struct *p) +{ + if (qos_smt_update_status(p)) + qos_smt_send_ipi(this_cpu); +} + +static bool _qos_smt_check_need_resched(int this_cpu, struct rq *rq) +{ + int cpu; + + if (!sched_smt_active()) + return false; + + for_each_cpu(cpu, cpu_smt_mask(this_cpu)) { + if (cpu == this_cpu) + continue; + + /* + * There are two cases rely on the set need_resched to drive away + * offline task: + * a) The qos_smt_status of siblings cpu is online, the task of current cpu is offline; + * b) The qos_smt_status of siblings cpu is offline, the task of current cpu is idle, + * and current cpu only has SCHED_IDLE tasks enqueued. + */ + if (per_cpu(qos_smt_status, cpu) == QOS_LEVEL_ONLINE && + task_group(current)->qos_level < QOS_LEVEL_ONLINE) { + trace_sched_qos_smt_expel(cpu_curr(cpu), per_cpu(qos_smt_status, cpu)); + return true; + } + + if (per_cpu(qos_smt_status, cpu) == QOS_LEVEL_OFFLINE && + rq->curr == rq->idle && sched_idle_cpu(this_cpu)) { + trace_sched_qos_smt_expel(cpu_curr(cpu), per_cpu(qos_smt_status, cpu)); + return true; + } + } + + return false; +} + +void qos_smt_check_need_resched(void) +{ + struct rq *rq = this_rq(); + int this_cpu = rq->cpu; + + if (test_tsk_need_resched(current)) + return; + + if (_qos_smt_check_need_resched(this_cpu, rq)) { + set_tsk_need_resched(current); + set_preempt_need_resched(); + } +} #endif struct task_struct * @@ -7373,14 +7577,34 @@ pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf struct task_struct *p; int new_tasks; unsigned long time; +#ifdef CONFIG_QOS_SCHED_SMT_EXPELLER + int this_cpu = rq->cpu; +#endif again: +#ifdef CONFIG_QOS_SCHED_SMT_EXPELLER + if (qos_smt_expelled(this_cpu)) { + __this_cpu_write(qos_smt_status, QOS_LEVEL_OFFLINE); + schedstat_inc(rq->curr->se.statistics.nr_qos_smt_expelled); + trace_sched_qos_smt_expelled(rq->curr, per_cpu(qos_smt_status, this_cpu)); + return NULL; + } +#endif + if (!sched_fair_runnable(rq)) goto idle; #ifdef CONFIG_FAIR_GROUP_SCHED - if (!prev || prev->sched_class != &fair_sched_class) - goto simple; + if (!prev || prev->sched_class != &fair_sched_class) { +#ifdef CONFIG_QOS_SCHED + if (cfs_rq->idle_h_nr_running != 0 && rq->online) + goto qos_simple; + else +#endif + goto simple; + } + + /* * Because of the set_next_buddy() in dequeue_task_fair() it is rather @@ -7464,6 +7688,34 @@ pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf } goto done; + +#ifdef CONFIG_QOS_SCHED +qos_simple: + if (prev) + put_prev_task(rq, prev); + + do { + se = pick_next_entity(cfs_rq, NULL); + if (check_qos_cfs_rq(group_cfs_rq(se))) { + cfs_rq = &rq->cfs; + if (!cfs_rq->nr_running) + goto idle; + continue; + } + + cfs_rq = group_cfs_rq(se); + } while (cfs_rq); + + p = task_of(se); + + while (se) { + set_next_entity(cfs_rq_of(se), se); + se = parent_entity(se); + } + + goto done; +#endif + simple: #endif if (prev) @@ -7492,6 +7744,14 @@ done: __maybe_unused; update_misfit_status(p, rq); +#ifdef CONFIG_QOS_SCHED + qos_schedule_throttle(p); +#endif + +#ifdef CONFIG_QOS_SCHED_SMT_EXPELLER + qos_smt_expel(this_cpu, p); +#endif + return p; idle: @@ -7540,6 +7800,9 @@ done: __maybe_unused; */ update_idle_rq_clock_pelt(rq); +#ifdef CONFIG_QOS_SCHED_SMT_EXPELLER + qos_smt_expel(this_cpu, NULL); +#endif return NULL; } diff --git a/kernel/sched/membarrier.c b/kernel/sched/membarrier.c index 16f57e71f9c441ada23d071fabcc8c0e32a54ccc..cc7cd512e4e33833d781b5b6686fd6085fbf879f 100644 --- a/kernel/sched/membarrier.c +++ b/kernel/sched/membarrier.c @@ -19,11 +19,11 @@ #endif #ifdef CONFIG_RSEQ -#define MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ_BITMASK \ +#define MEMBARRIER_PRIVATE_EXPEDITED_RSEQ_BITMASK \ (MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ \ - | MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_RSEQ_BITMASK) + | MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_RSEQ) #else -#define MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ_BITMASK 0 +#define MEMBARRIER_PRIVATE_EXPEDITED_RSEQ_BITMASK 0 #endif #define MEMBARRIER_CMD_BITMASK \ @@ -31,7 +31,8 @@ | MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED \ | MEMBARRIER_CMD_PRIVATE_EXPEDITED \ | MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED \ - | MEMBARRIER_PRIVATE_EXPEDITED_SYNC_CORE_BITMASK) + | MEMBARRIER_PRIVATE_EXPEDITED_SYNC_CORE_BITMASK \ + | MEMBARRIER_PRIVATE_EXPEDITED_RSEQ_BITMASK) static void ipi_mb(void *info) { @@ -315,7 +316,7 @@ static int sync_runqueues_membarrier_state(struct mm_struct *mm) /* * For each cpu runqueue, if the task's mm match @mm, ensure that all - * @mm's membarrier state set bits are also set in in the runqueue's + * @mm's membarrier state set bits are also set in the runqueue's * membarrier state. This ensures that a runqueue scheduling * between threads which are users of @mm has its membarrier state * updated. diff --git a/kernel/sched/pelt.h b/kernel/sched/pelt.h index 0b9aeebb9c325f0b572125b0b91b70f065422771..45bf08e22207cb5d5ee1445dfee9b0e8d1421d20 100644 --- a/kernel/sched/pelt.h +++ b/kernel/sched/pelt.h @@ -37,9 +37,11 @@ update_irq_load_avg(struct rq *rq, u64 running) } #endif +#define PELT_MIN_DIVIDER (LOAD_AVG_MAX - 1024) + static inline u32 get_pelt_divider(struct sched_avg *avg) { - return LOAD_AVG_MAX - 1024 + avg->period_contrib; + return PELT_MIN_DIVIDER + avg->period_contrib; } static inline void cfs_se_util_change(struct sched_avg *avg) diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c index 360f133820699abb42f3dba1da3c23763921d3ba..25f7d46ad7bd37214496f1a02abeb35f5e3f443e 100644 --- a/kernel/sched/psi.c +++ b/kernel/sched/psi.c @@ -34,10 +34,19 @@ * delayed on that resource such that nobody is advancing and the CPU * goes idle. This leaves both workload and CPU unproductive. * - * (Naturally, the FULL state doesn't exist for the CPU resource.) - * * SOME = nr_delayed_tasks != 0 - * FULL = nr_delayed_tasks != 0 && nr_running_tasks == 0 + * FULL = nr_delayed_tasks != 0 && nr_productive_tasks == 0 + * + * What it means for a task to be productive is defined differently + * for each resource. For IO, productive means a running task. For + * memory, productive means a running task that isn't a reclaimer. For + * CPU, productive means an oncpu task. + * + * Naturally, the FULL state doesn't exist for the CPU resource at the + * system level, but exist at the cgroup level. At the cgroup level, + * FULL means all non-idle tasks in the cgroup are delayed on the CPU + * resource which is being used by others outside of the cgroup or + * throttled by the cgroup cpu.max configuration. * * The percentage of wallclock time spent in those compound stall * states gives pressure numbers between 0 and 100 for each resource, @@ -78,13 +87,13 @@ * * threads = min(nr_nonidle_tasks, nr_cpus) * SOME = min(nr_delayed_tasks / threads, 1) - * FULL = (threads - min(nr_running_tasks, threads)) / threads + * FULL = (threads - min(nr_productive_tasks, threads)) / threads * * For the 257 number crunchers on 256 CPUs, this yields: * * threads = min(257, 256) * SOME = min(1 / 256, 1) = 0.4% - * FULL = (256 - min(257, 256)) / 256 = 0% + * FULL = (256 - min(256, 256)) / 256 = 0% * * For the 1 out of 4 memory-delayed tasks, this yields: * @@ -109,7 +118,7 @@ * For each runqueue, we track: * * tSOME[cpu] = time(nr_delayed_tasks[cpu] != 0) - * tFULL[cpu] = time(nr_delayed_tasks[cpu] && !nr_running_tasks[cpu]) + * tFULL[cpu] = time(nr_delayed_tasks[cpu] && !nr_productive_tasks[cpu]) * tNONIDLE[cpu] = time(nr_nonidle_tasks[cpu] != 0) * * and then periodically aggregate: @@ -142,6 +151,8 @@ #include #include "sched.h" +#include + static int psi_bug __read_mostly; DEFINE_STATIC_KEY_FALSE(psi_disabled); @@ -221,15 +232,18 @@ static bool test_state(unsigned int *tasks, enum psi_states state) { switch (state) { case PSI_IO_SOME: - return tasks[NR_IOWAIT]; + return unlikely(tasks[NR_IOWAIT]); case PSI_IO_FULL: - return tasks[NR_IOWAIT] && !tasks[NR_RUNNING]; + return unlikely(tasks[NR_IOWAIT] && !tasks[NR_RUNNING]); case PSI_MEM_SOME: - return tasks[NR_MEMSTALL]; + return unlikely(tasks[NR_MEMSTALL]); case PSI_MEM_FULL: - return tasks[NR_MEMSTALL] && !tasks[NR_RUNNING]; + return unlikely(tasks[NR_MEMSTALL] && + tasks[NR_RUNNING] == tasks[NR_MEMSTALL_RUNNING]); case PSI_CPU_SOME: - return tasks[NR_RUNNING] > tasks[NR_ONCPU]; + return unlikely(tasks[NR_RUNNING] > tasks[NR_ONCPU]); + case PSI_CPU_FULL: + return unlikely(tasks[NR_RUNNING] && !tasks[NR_ONCPU]); case PSI_NONIDLE: return tasks[NR_IOWAIT] || tasks[NR_MEMSTALL] || tasks[NR_RUNNING]; @@ -644,13 +658,10 @@ static void poll_timer_fn(struct timer_list *t) wake_up_interruptible(&group->poll_wait); } -static void record_times(struct psi_group_cpu *groupc, int cpu, - bool memstall_tick) +static void record_times(struct psi_group_cpu *groupc, u64 now) { u32 delta; - u64 now; - now = cpu_clock(cpu); delta = now - groupc->state_start; groupc->state_start = now; @@ -664,34 +675,20 @@ static void record_times(struct psi_group_cpu *groupc, int cpu, groupc->times[PSI_MEM_SOME] += delta; if (groupc->state_mask & (1 << PSI_MEM_FULL)) groupc->times[PSI_MEM_FULL] += delta; - else if (memstall_tick) { - u32 sample; - /* - * Since we care about lost potential, a - * memstall is FULL when there are no other - * working tasks, but also when the CPU is - * actively reclaiming and nothing productive - * could run even if it were runnable. - * - * When the timer tick sees a reclaiming CPU, - * regardless of runnable tasks, sample a FULL - * tick (or less if it hasn't been a full tick - * since the last state change). - */ - sample = min(delta, (u32)jiffies_to_nsecs(1)); - groupc->times[PSI_MEM_FULL] += sample; - } } - if (groupc->state_mask & (1 << PSI_CPU_SOME)) + if (groupc->state_mask & (1 << PSI_CPU_SOME)) { groupc->times[PSI_CPU_SOME] += delta; + if (groupc->state_mask & (1 << PSI_CPU_FULL)) + groupc->times[PSI_CPU_FULL] += delta; + } if (groupc->state_mask & (1 << PSI_NONIDLE)) groupc->times[PSI_NONIDLE] += delta; } static void psi_group_change(struct psi_group *group, int cpu, - unsigned int clear, unsigned int set, + unsigned int clear, unsigned int set, u64 now, bool wake_clock) { struct psi_group_cpu *groupc; @@ -711,7 +708,7 @@ static void psi_group_change(struct psi_group *group, int cpu, */ write_seqcount_begin(&groupc->seq); - record_times(groupc, cpu, false); + record_times(groupc, now); for (t = 0, m = clear; m; m &= ~(1 << t), t++) { if (!(m & (1 << t))) @@ -719,10 +716,11 @@ static void psi_group_change(struct psi_group *group, int cpu, if (groupc->tasks[t]) { groupc->tasks[t]--; } else if (!psi_bug) { - printk_deferred(KERN_ERR "psi: task underflow! cpu=%d t=%d tasks=[%u %u %u %u] clear=%x set=%x\n", + printk_deferred(KERN_ERR "psi: task underflow! cpu=%d t=%d tasks=[%u %u %u %u %u] clear=%x set=%x\n", cpu, t, groupc->tasks[0], groupc->tasks[1], groupc->tasks[2], - groupc->tasks[3], clear, set); + groupc->tasks[3], groupc->tasks[4], + clear, set); psi_bug = 1; } } @@ -736,6 +734,18 @@ static void psi_group_change(struct psi_group *group, int cpu, if (test_state(groupc->tasks, s)) state_mask |= (1 << s); } + + /* + * Since we care about lost potential, a memstall is FULL + * when there are no other working tasks, but also when + * the CPU is actively reclaiming and nothing productive + * could run even if it were runnable. So when the current + * task in a cgroup is in_memstall, the corresponding groupc + * on that cpu is in PSI_MEM_FULL state. + */ + if (unlikely(groupc->tasks[NR_ONCPU] && cpu_curr(cpu)->in_memstall)) + state_mask |= (1 << PSI_MEM_FULL); + groupc->state_mask = state_mask; write_seqcount_end(&groupc->seq); @@ -757,9 +767,13 @@ static struct psi_group *iterate_groups(struct task_struct *task, void **iter) cgroup = task->cgroups->dfl_cgrp; else { #ifdef CONFIG_CGROUP_CPUACCT - rcu_read_lock(); - cgroup = task_cgroup(task, cpuacct_cgrp_id); - rcu_read_unlock(); + if (!cgroup_subsys_on_dfl(cpuacct_cgrp_subsys)) { + rcu_read_lock(); + cgroup = task_cgroup(task, cpuacct_cgrp_id); + rcu_read_unlock(); + } else { + cgroup = task->cgroups->dfl_cgrp; + } #else cgroup = NULL; #endif @@ -802,12 +816,14 @@ void psi_task_change(struct task_struct *task, int clear, int set) struct psi_group *group; bool wake_clock = true; void *iter = NULL; + u64 now; if (!task->pid) return; psi_flags_change(task, clear, set); + now = cpu_clock(cpu); /* * Periodic aggregation shuts off if there is a period of no * task changes, so we wake it back up if necessary. However, @@ -820,7 +836,7 @@ void psi_task_change(struct task_struct *task, int clear, int set) wake_clock = false; while ((group = iterate_groups(task, &iter))) - psi_group_change(group, cpu, clear, set, wake_clock); + psi_group_change(group, cpu, clear, set, now, wake_clock); } void psi_task_switch(struct task_struct *prev, struct task_struct *next, @@ -829,56 +845,64 @@ void psi_task_switch(struct task_struct *prev, struct task_struct *next, struct psi_group *group, *common = NULL; int cpu = task_cpu(prev); void *iter; + u64 now = cpu_clock(cpu); if (next->pid) { + bool identical_state; + psi_flags_change(next, 0, TSK_ONCPU); /* - * When moving state between tasks, the group that - * contains them both does not change: we can stop - * updating the tree once we reach the first common - * ancestor. Iterate @next's ancestors until we - * encounter @prev's state. + * When switching between tasks that have an identical + * runtime state, the cgroup that contains both tasks + * runtime state, the cgroup that contains both tasks + * we reach the first common ancestor. Iterate @next's + * ancestors only until we encounter @prev's ONCPU. */ + identical_state = prev->psi_flags == next->psi_flags; iter = NULL; while ((group = iterate_groups(next, &iter))) { - if (per_cpu_ptr(group->pcpu, cpu)->tasks[NR_ONCPU]) { + if (identical_state && + per_cpu_ptr(group->pcpu, cpu)->tasks[NR_ONCPU]) { common = group; break; } - psi_group_change(group, cpu, 0, TSK_ONCPU, true); + psi_group_change(group, cpu, 0, TSK_ONCPU, now, true); } } - /* - * If this is a voluntary sleep, dequeue will have taken care - * of the outgoing TSK_ONCPU alongside TSK_RUNNING already. We - * only need to deal with it during preemption. - */ - if (sleep) - return; - if (prev->pid) { - psi_flags_change(prev, TSK_ONCPU, 0); + int clear = TSK_ONCPU, set = 0; - iter = NULL; - while ((group = iterate_groups(prev, &iter)) && group != common) - psi_group_change(group, cpu, TSK_ONCPU, 0, true); - } -} + /* + * When we're going to sleep, psi_dequeue() lets us + * handle TSK_RUNNING, TSK_MEMSTALL_RUNNING and + * TSK_IOWAIT here, where we can combine it with + * TSK_ONCPU and save walking common ancestors twice. + */ + if (sleep) { + clear |= TSK_RUNNING; + if (prev->in_memstall) + clear |= TSK_MEMSTALL_RUNNING; + if (prev->in_iowait) + set |= TSK_IOWAIT; + } -void psi_memstall_tick(struct task_struct *task, int cpu) -{ - struct psi_group *group; - void *iter = NULL; + psi_flags_change(prev, clear, set); - while ((group = iterate_groups(task, &iter))) { - struct psi_group_cpu *groupc; + iter = NULL; + while ((group = iterate_groups(prev, &iter)) && group != common) + psi_group_change(group, cpu, clear, set, now, true); - groupc = per_cpu_ptr(group->pcpu, cpu); - write_seqcount_begin(&groupc->seq); - record_times(groupc, cpu, true); - write_seqcount_end(&groupc->seq); + /* + * TSK_ONCPU is handled up to the common ancestor. If we're tasked + * with dequeuing too, finish that for the rest of the hierarchy. + */ + if (sleep) { + clear &= ~TSK_ONCPU; + for (; group; group = iterate_groups(prev, &iter)) + psi_group_change(group, cpu, clear, set, now, true); + } } } @@ -900,6 +924,8 @@ void psi_memstall_enter(unsigned long *flags) *flags = current->in_memstall; if (*flags) return; + + trace_psi_memstall_enter(_RET_IP_); /* * in_memstall setting & accounting needs to be atomic wrt * changes to the task's scheduling state, otherwise we can @@ -908,7 +934,7 @@ void psi_memstall_enter(unsigned long *flags) rq = this_rq_lock_irq(&rf); current->in_memstall = 1; - psi_task_change(current, 0, TSK_MEMSTALL); + psi_task_change(current, 0, TSK_MEMSTALL | TSK_MEMSTALL_RUNNING); rq_unlock_irq(rq, &rf); } @@ -929,6 +955,8 @@ void psi_memstall_leave(unsigned long *flags) if (*flags) return; + + trace_psi_memstall_leave(_RET_IP_); /* * in_memstall clearing & accounting needs to be atomic wrt * changes to the task's scheduling state, otherwise we could @@ -937,7 +965,7 @@ void psi_memstall_leave(unsigned long *flags) rq = this_rq_lock_irq(&rf); current->in_memstall = 0; - psi_task_change(current, TSK_MEMSTALL, 0); + psi_task_change(current, TSK_MEMSTALL | TSK_MEMSTALL_RUNNING, 0); rq_unlock_irq(rq, &rf); } @@ -980,7 +1008,7 @@ void psi_cgroup_free(struct cgroup *cgroup) */ void cgroup_move_task(struct task_struct *task, struct css_set *to) { - unsigned int task_flags = 0; + unsigned int task_flags; struct rq_flags rf; struct rq *rq; @@ -995,15 +1023,31 @@ void cgroup_move_task(struct task_struct *task, struct css_set *to) rq = task_rq_lock(task, &rf); - if (task_on_rq_queued(task)) { - task_flags = TSK_RUNNING; - if (task_current(rq, task)) - task_flags |= TSK_ONCPU; - } else if (task->in_iowait) - task_flags = TSK_IOWAIT; - - if (task->in_memstall) - task_flags |= TSK_MEMSTALL; + /* + * We may race with schedule() dropping the rq lock between + * deactivating prev and switching to next. Because the psi + * updates from the deactivation are deferred to the switch + * callback to save cgroup tree updates, the task's scheduling + * state here is not coherent with its psi state: + * + * schedule() cgroup_move_task() + * rq_lock() + * deactivate_task() + * p->on_rq = 0 + * psi_dequeue() // defers TSK_RUNNING & TSK_IOWAIT updates + * pick_next_task() + * rq_unlock() + * rq_lock() + * psi_task_change() // old cgroup + * task->cgroups = to + * psi_task_change() // new cgroup + * rq_unlock() + * rq_lock() + * psi_sched_switch() // does deferred updates in new cgroup + * + * Don't rely on the scheduling state. Use psi_flags instead. + */ + task_flags = task->psi_flags; if (task_flags) psi_task_change(task, task_flags, 0); @@ -1034,15 +1078,18 @@ int psi_show(struct seq_file *m, struct psi_group *group, enum psi_res res) group->avg_next_update = update_averages(group, now); mutex_unlock(&group->avgs_lock); - for (full = 0; full < 2 - (res == PSI_CPU); full++) { - unsigned long avg[3]; - u64 total; + for (full = 0; full < 2; full++) { + unsigned long avg[3] = { 0, }; + u64 total = 0; int w; - for (w = 0; w < 3; w++) - avg[w] = group->avg[res * 2 + full][w]; - total = div_u64(group->total[PSI_AVGS][res * 2 + full], - NSEC_PER_USEC); + /* CPU FULL is undefined at the system level */ + if (!(group == &psi_system && res == PSI_CPU && full)) { + for (w = 0; w < 3; w++) + avg[w] = group->avg[res * 2 + full][w]; + total = div_u64(group->total[PSI_AVGS][res * 2 + full], + NSEC_PER_USEC); + } seq_printf(m, "%s avg10=%lu.%02lu avg60=%lu.%02lu avg300=%lu.%02lu total=%llu\n", full ? "full" : "some", @@ -1127,7 +1174,6 @@ struct psi_trigger *psi_trigger_create(struct psi_group *group, t->event = 0; t->last_event_time = 0; init_waitqueue_head(&t->event_wait); - kref_init(&t->refcount); mutex_lock(&group->trigger_lock); @@ -1156,15 +1202,19 @@ struct psi_trigger *psi_trigger_create(struct psi_group *group, return t; } -static void psi_trigger_destroy(struct kref *ref) +void psi_trigger_destroy(struct psi_trigger *t) { - struct psi_trigger *t = container_of(ref, struct psi_trigger, refcount); - struct psi_group *group = t->group; + struct psi_group *group; struct task_struct *task_to_destroy = NULL; - if (static_branch_likely(&psi_disabled)) + /* + * We do not check psi_disabled since it might have been disabled after + * the trigger got created. + */ + if (!t) return; + group = t->group; /* * Wakeup waiters to stop polling. Can happen if cgroup is deleted * from under a polling process. @@ -1200,9 +1250,9 @@ static void psi_trigger_destroy(struct kref *ref) mutex_unlock(&group->trigger_lock); /* - * Wait for both *trigger_ptr from psi_trigger_replace and - * poll_task RCUs to complete their read-side critical sections - * before destroying the trigger and optionally the poll_task + * Wait for psi_schedule_poll_work RCU to complete its read-side + * critical section before destroying the trigger and optionally the + * poll_task. */ synchronize_rcu(); /* @@ -1219,18 +1269,6 @@ static void psi_trigger_destroy(struct kref *ref) kfree(t); } -void psi_trigger_replace(void **trigger_ptr, struct psi_trigger *new) -{ - struct psi_trigger *old = *trigger_ptr; - - if (static_branch_likely(&psi_disabled)) - return; - - rcu_assign_pointer(*trigger_ptr, new); - if (old) - kref_put(&old->refcount, psi_trigger_destroy); -} - __poll_t psi_trigger_poll(void **trigger_ptr, struct file *file, poll_table *wait) { @@ -1240,24 +1278,15 @@ __poll_t psi_trigger_poll(void **trigger_ptr, if (static_branch_likely(&psi_disabled)) return DEFAULT_POLLMASK | EPOLLERR | EPOLLPRI; - rcu_read_lock(); - - t = rcu_dereference(*(void __rcu __force **)trigger_ptr); - if (!t) { - rcu_read_unlock(); + t = smp_load_acquire(trigger_ptr); + if (!t) return DEFAULT_POLLMASK | EPOLLERR | EPOLLPRI; - } - kref_get(&t->refcount); - - rcu_read_unlock(); poll_wait(file, &t->event_wait, wait); if (cmpxchg(&t->event, 1, 0) == 1) ret |= EPOLLPRI; - kref_put(&t->refcount, psi_trigger_destroy); - return ret; } @@ -1281,14 +1310,24 @@ static ssize_t psi_write(struct file *file, const char __user *user_buf, buf[buf_size - 1] = '\0'; - new = psi_trigger_create(&psi_system, buf, nbytes, res); - if (IS_ERR(new)) - return PTR_ERR(new); - seq = file->private_data; + /* Take seq->lock to protect seq->private from concurrent writes */ mutex_lock(&seq->lock); - psi_trigger_replace(&seq->private, new); + + /* Allow only one trigger per file descriptor */ + if (seq->private) { + mutex_unlock(&seq->lock); + return -EBUSY; + } + + new = psi_trigger_create(&psi_system, buf, nbytes, res); + if (IS_ERR(new)) { + mutex_unlock(&seq->lock); + return PTR_ERR(new); + } + + smp_store_release(&seq->private, new); mutex_unlock(&seq->lock); return nbytes; @@ -1323,7 +1362,7 @@ static int psi_fop_release(struct inode *inode, struct file *file) { struct seq_file *seq = file->private_data; - psi_trigger_replace(&seq->private, NULL); + psi_trigger_destroy(seq->private); return single_release(inode, file); } diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index dae1e8eaa98329177da6affd4717042bd0826d54..59c3e20943ac42ce803d07f851944c1a5d7f629e 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -52,11 +52,8 @@ void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime) rt_b->rt_period_timer.function = sched_rt_period_timer; } -static void start_rt_bandwidth(struct rt_bandwidth *rt_b) +static inline void do_start_rt_bandwidth(struct rt_bandwidth *rt_b) { - if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF) - return; - raw_spin_lock(&rt_b->rt_runtime_lock); if (!rt_b->rt_period_active) { rt_b->rt_period_active = 1; @@ -75,6 +72,14 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b) raw_spin_unlock(&rt_b->rt_runtime_lock); } +static void start_rt_bandwidth(struct rt_bandwidth *rt_b) +{ + if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF) + return; + + do_start_rt_bandwidth(rt_b); +} + void init_rt_rq(struct rt_rq *rt_rq) { struct rt_prio_array *array; @@ -944,18 +949,6 @@ static inline int rt_se_prio(struct sched_rt_entity *rt_se) return rt_task_of(rt_se)->prio; } -static inline void try_start_rt_bandwidth(struct rt_bandwidth *rt_b) -{ - raw_spin_lock(&rt_b->rt_runtime_lock); - if (!rt_b->rt_period_active) { - rt_b->rt_period_active = 1; - hrtimer_forward_now(&rt_b->rt_period_timer, rt_b->rt_period); - hrtimer_start_expires(&rt_b->rt_period_timer, - HRTIMER_MODE_ABS_PINNED_HARD); - } - raw_spin_unlock(&rt_b->rt_runtime_lock); -} - static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq) { u64 runtime = sched_rt_runtime(rt_rq); @@ -1042,7 +1035,7 @@ static void update_curr_rt(struct rq *rq) resched_curr(rq); raw_spin_unlock(&rt_rq->rt_runtime_lock); if (exceeded) - try_start_rt_bandwidth(sched_rt_bandwidth(rt_rq)); + do_start_rt_bandwidth(sched_rt_bandwidth(rt_rq)); } } } @@ -2737,10 +2730,12 @@ static int sched_rt_global_validate(void) static void sched_rt_do_global(void) { - raw_spin_lock(&def_rt_bandwidth.rt_runtime_lock); + unsigned long flags; + + raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags); def_rt_bandwidth.rt_runtime = global_rt_runtime(); def_rt_bandwidth.rt_period = ns_to_ktime(global_rt_period()); - raw_spin_unlock(&def_rt_bandwidth.rt_runtime_lock); + raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags); } int sched_rt_handler(struct ctl_table *table, int write, void *buffer, diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index d122f1b8e3e67673a2f3e7896ff4ec38e5877451..e41a5207a212edf05ab3804ee1c4c20f2481f58c 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -385,10 +385,17 @@ struct cfs_bandwidth { int nr_throttled; u64 throttled_time; +#if !defined(__GENKSYMS__) + u64 burst; + u64 runtime_snap; + int nr_burst; + u64 burst_time; +#else KABI_RESERVE(1) KABI_RESERVE(2) KABI_RESERVE(3) KABI_RESERVE(4) +#endif KABI_RESERVE(5) KABI_RESERVE(6) #endif @@ -626,8 +633,12 @@ struct cfs_rq { #endif /* CONFIG_CFS_BANDWIDTH */ #endif /* CONFIG_FAIR_GROUP_SCHED */ +#if !defined(__GENKSYMS__) && defined(CONFIG_QOS_SCHED) + struct list_head qos_throttled_list; +#else KABI_RESERVE(1) KABI_RESERVE(2) +#endif KABI_RESERVE(3) KABI_RESERVE(4) }; @@ -1132,6 +1143,11 @@ static inline int cpu_of(struct rq *rq) } #ifdef CONFIG_QOS_SCHED +enum task_qos_level { + QOS_LEVEL_OFFLINE = -1, + QOS_LEVEL_ONLINE = 0, + QOS_LEVEL_MAX +}; void init_qos_hrtimer(int cpu); #endif diff --git a/kernel/sched/stats.h b/kernel/sched/stats.h index 06cf8202c178a059055abf0f782ca342ae83c05b..b8b4e5b2694e94017e6c4e41451ca8bbb9c0a616 100644 --- a/kernel/sched/stats.h +++ b/kernel/sched/stats.h @@ -89,6 +89,9 @@ static inline void psi_enqueue(struct task_struct *p, bool wakeup) if (static_branch_likely(&psi_disabled)) return; + if (p->in_memstall) + set |= TSK_MEMSTALL_RUNNING; + if (!wakeup || p->sched_psi_wake_requeue) { if (p->in_memstall) set |= TSK_MEMSTALL; @@ -104,28 +107,24 @@ static inline void psi_enqueue(struct task_struct *p, bool wakeup) static inline void psi_dequeue(struct task_struct *p, bool sleep) { - int clear = TSK_RUNNING, set = 0; + int clear = TSK_RUNNING; if (static_branch_likely(&psi_disabled)) return; - if (!sleep) { - if (p->in_memstall) - clear |= TSK_MEMSTALL; - } else { - /* - * When a task sleeps, schedule() dequeues it before - * switching to the next one. Merge the clearing of - * TSK_RUNNING and TSK_ONCPU to save an unnecessary - * psi_task_change() call in psi_sched_switch(). - */ - clear |= TSK_ONCPU; + /* + * A voluntary sleep is a dequeue followed by a task switch. To + * avoid walking all ancestors twice, psi_task_switch() handles + * TSK_RUNNING and TSK_IOWAIT for us when it moves TSK_ONCPU. + * Do nothing here. + */ + if (sleep) + return; - if (p->in_iowait) - set |= TSK_IOWAIT; - } + if (p->in_memstall) + clear |= (TSK_MEMSTALL | TSK_MEMSTALL_RUNNING); - psi_task_change(p, clear, set); + psi_task_change(p, clear, 0); } static inline void psi_ttwu_dequeue(struct task_struct *p) @@ -164,14 +163,6 @@ static inline void psi_sched_switch(struct task_struct *prev, psi_task_switch(prev, next, sleep); } -static inline void psi_task_tick(struct rq *rq) -{ - if (static_branch_likely(&psi_disabled)) - return; - - if (unlikely(rq->curr->in_memstall)) - psi_memstall_tick(rq->curr, cpu_of(rq)); -} #else /* CONFIG_PSI */ static inline void psi_enqueue(struct task_struct *p, bool wakeup) {} static inline void psi_dequeue(struct task_struct *p, bool sleep) {} @@ -179,7 +170,6 @@ static inline void psi_ttwu_dequeue(struct task_struct *p) {} static inline void psi_sched_switch(struct task_struct *prev, struct task_struct *next, bool sleep) {} -static inline void psi_task_tick(struct rq *rq) {} #endif /* CONFIG_PSI */ #ifdef CONFIG_SCHED_INFO diff --git a/kernel/smp.c b/kernel/smp.c index a5a87a51e726213ed527e852a5b96073bb53669f..114776d0d11eca6901a088966d582f6865927c86 100644 --- a/kernel/smp.c +++ b/kernel/smp.c @@ -24,6 +24,7 @@ #include #include #include +#include #include "smpboot.h" #include "sched/smp.h" @@ -102,6 +103,20 @@ void __init call_function_init(void) #ifdef CONFIG_CSD_LOCK_WAIT_DEBUG +static DEFINE_STATIC_KEY_FALSE(csdlock_debug_enabled); + +static int __init csdlock_debug(char *str) +{ + unsigned int val = 0; + + get_option(&str, &val); + if (val) + static_branch_enable(&csdlock_debug_enabled); + + return 1; +} +__setup("csdlock_debug=", csdlock_debug); + static DEFINE_PER_CPU(call_single_data_t *, cur_csd); static DEFINE_PER_CPU(smp_call_func_t, cur_csd_func); static DEFINE_PER_CPU(void *, cur_csd_info); @@ -110,7 +125,7 @@ static DEFINE_PER_CPU(void *, cur_csd_info); static atomic_t csd_bug_count = ATOMIC_INIT(0); /* Record current CSD work for current CPU, NULL to erase. */ -static void csd_lock_record(struct __call_single_data *csd) +static void __csd_lock_record(struct __call_single_data *csd) { if (!csd) { smp_mb(); /* NULL cur_csd after unlock. */ @@ -125,6 +140,12 @@ static void csd_lock_record(struct __call_single_data *csd) /* Or before unlock, as the case may be. */ } +static __always_inline void csd_lock_record(struct __call_single_data *csd) +{ + if (static_branch_unlikely(&csdlock_debug_enabled)) + __csd_lock_record(csd); +} + static __always_inline int csd_lock_wait_getcpu(struct __call_single_data *csd) { unsigned int csd_type; @@ -204,7 +225,7 @@ static __always_inline bool csd_lock_wait_toolong(struct __call_single_data *csd * previous function call. For multi-cpu calls its even more interesting * as we'll have to ensure no other cpu is observing our csd. */ -static __always_inline void csd_lock_wait(struct __call_single_data *csd) +static void __csd_lock_wait(struct __call_single_data *csd) { int bug_id = 0; u64 ts0, ts1; @@ -218,6 +239,15 @@ static __always_inline void csd_lock_wait(struct __call_single_data *csd) smp_acquire__after_ctrl_dep(); } +static __always_inline void csd_lock_wait(struct __call_single_data *csd) +{ + if (static_branch_unlikely(&csdlock_debug_enabled)) { + __csd_lock_wait(csd); + return; + } + + smp_cond_load_acquire(&csd->node.u_flags, !(VAL & CSD_FLAG_LOCK)); +} #else static void csd_lock_record(struct __call_single_data *csd) { @@ -346,7 +376,7 @@ static void flush_smp_call_function_queue(bool warn_cpu_offline) /* There shouldn't be any pending callbacks on an offline CPU. */ if (unlikely(warn_cpu_offline && !cpu_online(smp_processor_id()) && - !warned && !llist_empty(head))) { + !warned && entry != NULL)) { warned = true; WARN(1, "IPI on offline CPU %d\n", smp_processor_id()); diff --git a/kernel/stackleak.c b/kernel/stackleak.c index ce161a8e8d97585c8b1152fec1ea088343cf15d8..dd07239ddff9f4ba562b7e46d9fcc4af0679fd7e 100644 --- a/kernel/stackleak.c +++ b/kernel/stackleak.c @@ -48,7 +48,7 @@ int stack_erasing_sysctl(struct ctl_table *table, int write, #define skip_erasing() false #endif /* CONFIG_STACKLEAK_RUNTIME_DISABLE */ -asmlinkage void notrace stackleak_erase(void) +asmlinkage void noinstr stackleak_erase(void) { /* It would be nice not to have 'kstack_ptr' and 'boundary' on stack */ unsigned long kstack_ptr = current->lowest_stack; @@ -102,9 +102,8 @@ asmlinkage void notrace stackleak_erase(void) /* Reset the 'lowest_stack' value for the next syscall */ current->lowest_stack = current_top_of_stack() - THREAD_SIZE/64; } -NOKPROBE_SYMBOL(stackleak_erase); -void __used __no_caller_saved_registers notrace stackleak_track_stack(void) +void __used __no_caller_saved_registers noinstr stackleak_track_stack(void) { unsigned long sp = current_stack_pointer; diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 89ef0c1a16429b3a0f8f30be0e50489c33bb73fb..91812d673c6ba541b1cb1fd9e81870f73c1a56bf 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -243,6 +243,10 @@ static int bpf_stats_handler(struct ctl_table *table, int write, return ret; } +void __weak unpriv_ebpf_notify(int new_state) +{ +} + static int bpf_unpriv_handler(struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { @@ -260,6 +264,9 @@ static int bpf_unpriv_handler(struct ctl_table *table, int write, return -EPERM; *(int *)table->data = unpriv_enable; } + + unpriv_ebpf_notify(unpriv_enable); + return ret; } #endif /* CONFIG_BPF_SYSCALL && CONFIG_SYSCTL */ @@ -2699,7 +2706,7 @@ static struct ctl_table kern_table[] = { .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec_minmax, - .extra1 = &one_thousand, + .extra1 = &one_hundred, .extra2 = &hundred_thousand, }, { diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 2d7899700b7ab40c52f8dafd9d3af6e6e70b92ef..d0440f5d5b458d0cef183e04d27af7e80fe9c518 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -164,7 +164,7 @@ static void tick_sched_do_timer(struct tick_sched *ts, ktime_t now) */ if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE)) { #ifdef CONFIG_NO_HZ_FULL - WARN_ON(tick_nohz_full_running); + WARN_ON_ONCE(tick_nohz_full_running); #endif tick_do_timer_cpu = cpu; } diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index cc4dc2857a8706c292949dc67541c1e6af9536fe..72cad9bf19d70e71fd6345d7a9fc4b2701ff9cef 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -46,9 +46,16 @@ DEFINE_RAW_SPINLOCK(timekeeper_lock); * cache line. */ static struct { +#ifdef CONFIG_ARCH_LLC_128_LINE_SIZE + u64 padding[8]; +#endif seqcount_raw_spinlock_t seq; struct timekeeper timekeeper; +#ifdef CONFIG_ARCH_LLC_128_LINE_SIZE +} tk_core ____cacheline_aligned_128 = { +#else } tk_core ____cacheline_aligned = { +#endif .seq = SEQCNT_RAW_SPINLOCK_ZERO(tk_core.seq, &timekeeper_lock), }; diff --git a/kernel/time/timer.c b/kernel/time/timer.c index 351420c230642171efbcef48aa966ac7f37f0805..f7d3a108e27c9038ebb39c726f11b399e2bb4944 100644 --- a/kernel/time/timer.c +++ b/kernel/time/timer.c @@ -1738,11 +1738,14 @@ static inline void __run_timers(struct timer_base *base) time_after_eq(jiffies, base->next_expiry)) { levels = collect_expired_timers(base, heads); /* - * The only possible reason for not finding any expired - * timer at this clk is that all matching timers have been - * dequeued. + * The two possible reasons for not finding any expired + * timer at this clk are that all matching timers have been + * dequeued or no timer has been queued since + * base::next_expiry was set to base::clk + + * NEXT_TIMER_MAX_DELTA. */ - WARN_ON_ONCE(!levels && !base->next_expiry_recalc); + WARN_ON_ONCE(!levels && !base->next_expiry_recalc + && base->timers_pending); base->clk++; base->next_expiry = __next_timer_interrupt(base); diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 1228e56853e0c1384b2dcb20b99badf7a841adbb..81a7b622c691703e8dca0d6f5cd47ce2916e1f37 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -342,7 +342,7 @@ static const struct bpf_func_proto bpf_probe_write_user_proto = { .gpl_only = true, .ret_type = RET_INTEGER, .arg1_type = ARG_ANYTHING, - .arg2_type = ARG_PTR_TO_MEM, + .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, .arg3_type = ARG_CONST_SIZE, }; @@ -545,7 +545,7 @@ static const struct bpf_func_proto bpf_trace_printk_proto = { .func = bpf_trace_printk, .gpl_only = true, .ret_type = RET_INTEGER, - .arg1_type = ARG_PTR_TO_MEM, + .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY, .arg2_type = ARG_CONST_SIZE, }; @@ -754,9 +754,9 @@ static const struct bpf_func_proto bpf_seq_printf_proto = { .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_BTF_ID, .arg1_btf_id = &btf_seq_file_ids[0], - .arg2_type = ARG_PTR_TO_MEM, + .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, .arg3_type = ARG_CONST_SIZE, - .arg4_type = ARG_PTR_TO_MEM_OR_NULL, + .arg4_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY, .arg5_type = ARG_CONST_SIZE_OR_ZERO, }; @@ -771,7 +771,7 @@ static const struct bpf_func_proto bpf_seq_write_proto = { .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_BTF_ID, .arg1_btf_id = &btf_seq_file_ids[0], - .arg2_type = ARG_PTR_TO_MEM, + .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, .arg3_type = ARG_CONST_SIZE_OR_ZERO, }; @@ -795,7 +795,7 @@ static const struct bpf_func_proto bpf_seq_printf_btf_proto = { .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_BTF_ID, .arg1_btf_id = &btf_seq_file_ids[0], - .arg2_type = ARG_PTR_TO_MEM, + .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, .arg3_type = ARG_CONST_SIZE_OR_ZERO, .arg4_type = ARG_ANYTHING, }; @@ -956,7 +956,7 @@ static const struct bpf_func_proto bpf_perf_event_output_proto = { .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_CONST_MAP_PTR, .arg3_type = ARG_ANYTHING, - .arg4_type = ARG_PTR_TO_MEM, + .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY, .arg5_type = ARG_CONST_SIZE_OR_ZERO, }; @@ -1229,7 +1229,7 @@ const struct bpf_func_proto bpf_snprintf_btf_proto = { .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_MEM, .arg2_type = ARG_CONST_SIZE, - .arg3_type = ARG_PTR_TO_MEM, + .arg3_type = ARG_PTR_TO_MEM | MEM_RDONLY, .arg4_type = ARG_CONST_SIZE, .arg5_type = ARG_ANYTHING, }; @@ -1404,7 +1404,7 @@ static const struct bpf_func_proto bpf_perf_event_output_proto_tp = { .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_CONST_MAP_PTR, .arg3_type = ARG_ANYTHING, - .arg4_type = ARG_PTR_TO_MEM, + .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY, .arg5_type = ARG_CONST_SIZE_OR_ZERO, }; @@ -1517,9 +1517,6 @@ static const struct bpf_func_proto bpf_perf_prog_read_value_proto = { BPF_CALL_4(bpf_read_branch_records, struct bpf_perf_event_data_kern *, ctx, void *, buf, u32, size, u64, flags) { -#ifndef CONFIG_X86 - return -ENOENT; -#else static const u32 br_entry_size = sizeof(struct perf_branch_entry); struct perf_branch_stack *br_stack = ctx->data->br_stack; u32 to_copy; @@ -1528,7 +1525,7 @@ BPF_CALL_4(bpf_read_branch_records, struct bpf_perf_event_data_kern *, ctx, return -EINVAL; if (unlikely(!br_stack)) - return -EINVAL; + return -ENOENT; if (flags & BPF_F_GET_BRANCH_RECORDS_SIZE) return br_stack->nr * br_entry_size; @@ -1540,7 +1537,6 @@ BPF_CALL_4(bpf_read_branch_records, struct bpf_perf_event_data_kern *, ctx, memcpy(buf, br_stack->entries, to_copy); return to_copy; -#endif } static const struct bpf_func_proto bpf_read_branch_records_proto = { @@ -1626,7 +1622,7 @@ static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp = { .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_CONST_MAP_PTR, .arg3_type = ARG_ANYTHING, - .arg4_type = ARG_PTR_TO_MEM, + .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY, .arg5_type = ARG_CONST_SIZE_OR_ZERO, }; @@ -1680,7 +1676,7 @@ static const struct bpf_func_proto bpf_get_stack_proto_raw_tp = { .gpl_only = true, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, - .arg2_type = ARG_PTR_TO_MEM, + .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, .arg3_type = ARG_CONST_SIZE_OR_ZERO, .arg4_type = ARG_ANYTHING, }; diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index aced2292db7e4fe0516d88d896fe4ecd30fc305d..b3a0ee21d31c0b47cb72a7e5e9d436b40806ccc6 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -233,7 +233,7 @@ static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata; static int __init set_trace_boot_options(char *str) { strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE); - return 0; + return 1; } __setup("trace_options=", set_trace_boot_options); @@ -244,12 +244,16 @@ static int __init set_trace_boot_clock(char *str) { strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE); trace_boot_clock = trace_boot_clock_buf; - return 0; + return 1; } __setup("trace_clock=", set_trace_boot_clock); static int __init set_tracepoint_printk(char *str) { + /* Ignore the "tp_printk_stop_on_boot" param */ + if (*str == '_') + return 0; + if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0)) tracepoint_printk = 1; return 1; @@ -1480,10 +1484,12 @@ static int __init set_buf_size(char *str) if (!str) return 0; buf_size = memparse(str, &str); - /* nr_entries can not be zero */ - if (buf_size == 0) - return 0; - trace_buf_size = buf_size; + /* + * nr_entries can not be zero and the startup + * tests require some buffer space. Therefore + * ensure we have at least 4096 bytes of buffer. + */ + trace_buf_size = max(4096UL, buf_size); return 1; } __setup("trace_buf_size=", set_buf_size); @@ -7358,7 +7364,8 @@ static struct tracing_log_err *get_tracing_log_err(struct trace_array *tr) err = kzalloc(sizeof(*err), GFP_KERNEL); if (!err) err = ERR_PTR(-ENOMEM); - tr->n_err_log_entries++; + else + tr->n_err_log_entries++; return err; } diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c index 78a678eeb140935354283529066bc75c4fd4863e..a255ffbe342f3a519a75fcbd1ffe69503db10767 100644 --- a/kernel/trace/trace_events_filter.c +++ b/kernel/trace/trace_events_filter.c @@ -5,6 +5,7 @@ * Copyright (C) 2009 Tom Zanussi */ +#include #include #include #include @@ -654,6 +655,52 @@ DEFINE_EQUALITY_PRED(32); DEFINE_EQUALITY_PRED(16); DEFINE_EQUALITY_PRED(8); +/* user space strings temp buffer */ +#define USTRING_BUF_SIZE 1024 + +struct ustring_buffer { + char buffer[USTRING_BUF_SIZE]; +}; + +static __percpu struct ustring_buffer *ustring_per_cpu; + +static __always_inline char *test_string(char *str) +{ + struct ustring_buffer *ubuf; + char *kstr; + + if (!ustring_per_cpu) + return NULL; + + ubuf = this_cpu_ptr(ustring_per_cpu); + kstr = ubuf->buffer; + + /* For safety, do not trust the string pointer */ + if (!strncpy_from_kernel_nofault(kstr, str, USTRING_BUF_SIZE)) + return NULL; + return kstr; +} + +static __always_inline char *test_ustring(char *str) +{ + struct ustring_buffer *ubuf; + char __user *ustr; + char *kstr; + + if (!ustring_per_cpu) + return NULL; + + ubuf = this_cpu_ptr(ustring_per_cpu); + kstr = ubuf->buffer; + + /* user space address? */ + ustr = (char __user *)str; + if (!strncpy_from_user_nofault(kstr, ustr, USTRING_BUF_SIZE)) + return NULL; + + return kstr; +} + /* Filter predicate for fixed sized arrays of characters */ static int filter_pred_string(struct filter_pred *pred, void *event) { @@ -667,19 +714,43 @@ static int filter_pred_string(struct filter_pred *pred, void *event) return match; } -/* Filter predicate for char * pointers */ -static int filter_pred_pchar(struct filter_pred *pred, void *event) +static __always_inline int filter_pchar(struct filter_pred *pred, char *str) { - char **addr = (char **)(event + pred->offset); int cmp, match; - int len = strlen(*addr) + 1; /* including tailing '\0' */ + int len; - cmp = pred->regex.match(*addr, &pred->regex, len); + len = strlen(str) + 1; /* including tailing '\0' */ + cmp = pred->regex.match(str, &pred->regex, len); match = cmp ^ pred->not; return match; } +/* Filter predicate for char * pointers */ +static int filter_pred_pchar(struct filter_pred *pred, void *event) +{ + char **addr = (char **)(event + pred->offset); + char *str; + + str = test_string(*addr); + if (!str) + return 0; + + return filter_pchar(pred, str); +} + +/* Filter predicate for char * pointers in user space*/ +static int filter_pred_pchar_user(struct filter_pred *pred, void *event) +{ + char **addr = (char **)(event + pred->offset); + char *str; + + str = test_ustring(*addr); + if (!str) + return 0; + + return filter_pchar(pred, str); +} /* * Filter predicate for dynamic sized arrays of characters. @@ -1158,6 +1229,7 @@ static int parse_pred(const char *str, void *data, struct filter_pred *pred = NULL; char num_buf[24]; /* Big enough to hold an address */ char *field_name; + bool ustring = false; char q; u64 val; int len; @@ -1192,6 +1264,12 @@ static int parse_pred(const char *str, void *data, return -EINVAL; } + /* See if the field is a user space string */ + if ((len = str_has_prefix(str + i, ".ustring"))) { + ustring = true; + i += len; + } + while (isspace(str[i])) i++; @@ -1320,8 +1398,20 @@ static int parse_pred(const char *str, void *data, } else if (field->filter_type == FILTER_DYN_STRING) pred->fn = filter_pred_strloc; - else - pred->fn = filter_pred_pchar; + else { + + if (!ustring_per_cpu) { + /* Once allocated, keep it around for good */ + ustring_per_cpu = alloc_percpu(struct ustring_buffer); + if (!ustring_per_cpu) + goto err_mem; + } + + if (ustring) + pred->fn = filter_pred_pchar_user; + else + pred->fn = filter_pred_pchar; + } /* go past the last quote */ i++; @@ -1387,6 +1477,9 @@ static int parse_pred(const char *str, void *data, err_free: kfree(pred); return -EINVAL; +err_mem: + kfree(pred); + return -ENOMEM; } enum { diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c index 003e5f37861e3621b56ed552858855cabf262450..eb7200699cf664ef852cfe7257ed45f704d98b17 100644 --- a/kernel/trace/trace_events_hist.c +++ b/kernel/trace/trace_events_hist.c @@ -1985,9 +1985,9 @@ parse_field(struct hist_trigger_data *hist_data, struct trace_event_file *file, /* * For backward compatibility, if field_name * was "cpu", then we treat this the same as - * common_cpu. + * common_cpu. This also works for "CPU". */ - if (strcmp(field_name, "cpu") == 0) { + if (field && field->filter_type == FILTER_CPU) { *flags |= HIST_FIELD_FL_CPU; } else { hist_err(tr, HIST_ERR_FIELD_NOT_FOUND, @@ -2154,6 +2154,8 @@ static struct hist_field *parse_unary(struct hist_trigger_data *hist_data, (HIST_FIELD_FL_TIMESTAMP | HIST_FIELD_FL_TIMESTAMP_USECS); expr->fn = hist_field_unary_minus; expr->operands[0] = operand1; + expr->size = operand1->size; + expr->is_signed = operand1->is_signed; expr->operator = FIELD_OP_UNARY_MINUS; expr->name = expr_str(expr, 0); expr->type = kstrdup(operand1->type, GFP_KERNEL); @@ -2293,6 +2295,7 @@ static struct hist_field *parse_expr(struct hist_trigger_data *hist_data, /* The operand sizes should be the same, so just pick one */ expr->size = operand1->size; + expr->is_signed = operand1->is_signed; expr->operator = field_op; expr->name = expr_str(expr, 0); @@ -3506,6 +3509,7 @@ static int trace_action_create(struct hist_trigger_data *hist_data, var_ref_idx = find_var_ref_idx(hist_data, var_ref); if (WARN_ON(var_ref_idx < 0)) { + kfree(p); ret = var_ref_idx; goto err; } @@ -4361,7 +4365,7 @@ static int create_tracing_map_fields(struct hist_trigger_data *hist_data) if (hist_field->flags & HIST_FIELD_FL_STACKTRACE) cmp_fn = tracing_map_cmp_none; - else if (!field) + else if (!field || hist_field->flags & HIST_FIELD_FL_CPU) cmp_fn = tracing_map_cmp_num(hist_field->size, hist_field->is_signed); else if (is_string_field(field)) diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c index f725802160c0bc05fab47c29f72234219b6612cc..d0309de2f84fea5bfce5cbe4a8321be92c547057 100644 --- a/kernel/trace/trace_events_trigger.c +++ b/kernel/trace/trace_events_trigger.c @@ -940,6 +940,16 @@ static void traceon_trigger(struct event_trigger_data *data, void *rec, struct ring_buffer_event *event) { + struct trace_event_file *file = data->private_data; + + if (file) { + if (tracer_tracing_is_on(file->tr)) + return; + + tracer_tracing_on(file->tr); + return; + } + if (tracing_is_on()) return; @@ -950,8 +960,15 @@ static void traceon_count_trigger(struct event_trigger_data *data, void *rec, struct ring_buffer_event *event) { - if (tracing_is_on()) - return; + struct trace_event_file *file = data->private_data; + + if (file) { + if (tracer_tracing_is_on(file->tr)) + return; + } else { + if (tracing_is_on()) + return; + } if (!data->count) return; @@ -959,13 +976,26 @@ traceon_count_trigger(struct event_trigger_data *data, void *rec, if (data->count != -1) (data->count)--; - tracing_on(); + if (file) + tracer_tracing_on(file->tr); + else + tracing_on(); } static void traceoff_trigger(struct event_trigger_data *data, void *rec, struct ring_buffer_event *event) { + struct trace_event_file *file = data->private_data; + + if (file) { + if (!tracer_tracing_is_on(file->tr)) + return; + + tracer_tracing_off(file->tr); + return; + } + if (!tracing_is_on()) return; @@ -976,8 +1006,15 @@ static void traceoff_count_trigger(struct event_trigger_data *data, void *rec, struct ring_buffer_event *event) { - if (!tracing_is_on()) - return; + struct trace_event_file *file = data->private_data; + + if (file) { + if (!tracer_tracing_is_on(file->tr)) + return; + } else { + if (!tracing_is_on()) + return; + } if (!data->count) return; @@ -985,7 +1022,10 @@ traceoff_count_trigger(struct event_trigger_data *data, void *rec, if (data->count != -1) (data->count)--; - tracing_off(); + if (file) + tracer_tracing_off(file->tr); + else + tracing_off(); } static int diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index d2780151f78e2d317d712beee773848d2a34da67..a15de1c183775f06a5b4003381463695ac76a805 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -31,7 +31,7 @@ static int __init set_kprobe_boot_events(char *str) strlcpy(kprobe_boot_events_buf, str, COMMAND_LINE_SIZE); disable_tracing_selftest("running kprobe events"); - return 0; + return 1; } __setup("kprobe_event=", set_kprobe_boot_events); @@ -1183,15 +1183,18 @@ static int probes_profile_seq_show(struct seq_file *m, void *v) { struct dyn_event *ev = v; struct trace_kprobe *tk; + unsigned long nmissed; if (!is_trace_kprobe(ev)) return 0; tk = to_trace_kprobe(ev); + nmissed = trace_kprobe_is_return(tk) ? + tk->rp.kp.nmissed + tk->rp.nmissed : tk->rp.kp.nmissed; seq_printf(m, " %-44s %15lu %15lu\n", trace_probe_name(&tk->tp), trace_kprobe_nhit(tk), - tk->rp.kp.nmissed); + nmissed); return 0; } diff --git a/kernel/tsacct.c b/kernel/tsacct.c index 257ffb993ea2358bd81412f305f5e28f6982c060..fd2f7a052fdd94d6871e13420a73d27287e22c2a 100644 --- a/kernel/tsacct.c +++ b/kernel/tsacct.c @@ -38,11 +38,10 @@ void bacct_add_tsk(struct user_namespace *user_ns, stats->ac_btime = clamp_t(time64_t, btime, 0, U32_MAX); stats->ac_btime64 = btime; - if (thread_group_leader(tsk)) { + if (tsk->flags & PF_EXITING) stats->ac_exitcode = tsk->exit_code; - if (tsk->flags & PF_FORKNOEXEC) - stats->ac_flag |= AFORK; - } + if (thread_group_leader(tsk) && (tsk->flags & PF_FORKNOEXEC)) + stats->ac_flag |= AFORK; if (tsk->flags & PF_SUPERPRIV) stats->ac_flag |= ASU; if (tsk->flags & PF_DUMPCORE) diff --git a/kernel/watch_queue.c b/kernel/watch_queue.c index 0ef8f65bd2d71188bbe314b11eda366fe87ff889..249ed32591449d88b59e5eea1cc321514429717c 100644 --- a/kernel/watch_queue.c +++ b/kernel/watch_queue.c @@ -54,6 +54,7 @@ static void watch_queue_pipe_buf_release(struct pipe_inode_info *pipe, bit += page->index; set_bit(bit, wqueue->notes_bitmap); + generic_pipe_buf_release(pipe, buf); } // No try_steal function => no stealing @@ -112,7 +113,7 @@ static bool post_one_notification(struct watch_queue *wqueue, buf->offset = offset; buf->len = len; buf->flags = PIPE_BUF_FLAG_WHOLE; - pipe->head = head + 1; + smp_store_release(&pipe->head, head + 1); /* vs pipe_read() */ if (!test_and_clear_bit(note, wqueue->notes_bitmap)) { spin_unlock_irq(&pipe->rd_wait.lock); @@ -243,7 +244,8 @@ long watch_queue_set_size(struct pipe_inode_info *pipe, unsigned int nr_notes) goto error; } - ret = pipe_resize_ring(pipe, nr_notes); + nr_notes = nr_pages * WATCH_QUEUE_NOTES_PER_PAGE; + ret = pipe_resize_ring(pipe, roundup_pow_of_two(nr_notes)); if (ret < 0) goto error; @@ -268,11 +270,11 @@ long watch_queue_set_size(struct pipe_inode_info *pipe, unsigned int nr_notes) wqueue->notes = pages; wqueue->notes_bitmap = bitmap; wqueue->nr_pages = nr_pages; - wqueue->nr_notes = nr_pages * WATCH_QUEUE_NOTES_PER_PAGE; + wqueue->nr_notes = nr_notes; return 0; error_p: - for (i = 0; i < nr_pages; i++) + while (--i >= 0) __free_page(pages[i]); kfree(pages); error: @@ -320,7 +322,7 @@ long watch_queue_set_filter(struct pipe_inode_info *pipe, tf[i].info_mask & WATCH_INFO_LENGTH) goto err_filter; /* Ignore any unknown types */ - if (tf[i].type >= sizeof(wfilter->type_filter) * 8) + if (tf[i].type >= WATCH_TYPE__NR) continue; nr_filter++; } @@ -336,7 +338,7 @@ long watch_queue_set_filter(struct pipe_inode_info *pipe, q = wfilter->filters; for (i = 0; i < filter.nr_filters; i++) { - if (tf[i].type >= sizeof(wfilter->type_filter) * BITS_PER_LONG) + if (tf[i].type >= WATCH_TYPE__NR) continue; q->type = tf[i].type; @@ -371,6 +373,8 @@ static void __put_watch_queue(struct kref *kref) for (i = 0; i < wqueue->nr_pages; i++) __free_page(wqueue->notes[i]); + kfree(wqueue->notes); + bitmap_free(wqueue->notes_bitmap); wfilter = rcu_access_pointer(wqueue->filter); if (wfilter) @@ -395,6 +399,7 @@ static void free_watch(struct rcu_head *rcu) put_watch_queue(rcu_access_pointer(watch->queue)); atomic_dec(&watch->cred->user->nr_watches); put_cred(watch->cred); + kfree(watch); } static void __put_watch(struct kref *kref) @@ -566,7 +571,7 @@ void watch_queue_clear(struct watch_queue *wqueue) rcu_read_lock(); spin_lock_bh(&wqueue->lock); - /* Prevent new additions and prevent notifications from happening */ + /* Prevent new notifications from being stored. */ wqueue->defunct = true; while (!hlist_empty(&wqueue->watches)) { diff --git a/kernel/workqueue.c b/kernel/workqueue.c index af95ef023e1ab3e17c9e1cd823c62c4d77e97dd4..6bef482a152bd94cd6001ade674e94af19f85e14 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -850,8 +850,17 @@ void wq_worker_running(struct task_struct *task) if (!worker->sleeping) return; + + /* + * If preempted by unbind_workers() between the WORKER_NOT_RUNNING check + * and the nr_running increment below, we may ruin the nr_running reset + * and leave with an unexpected pool->nr_running == 1 on the newly unbound + * pool. Protect against such race. + */ + preempt_disable(); if (!(worker->flags & WORKER_NOT_RUNNING)) atomic_inc(&worker->pool->nr_running); + preempt_enable(); worker->sleeping = 0; } diff --git a/lib/Kconfig b/lib/Kconfig index b46a9fd122c81acabd888e8b250e78be4f575ef6..9216e24e516469944e0938b62461d6174a9fdfab 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -45,7 +45,6 @@ config BITREVERSE config HAVE_ARCH_BITREVERSE bool default n - depends on BITREVERSE help This option enables the use of hardware bit-reversal instructions on architectures which support such operations. diff --git a/lib/crc64.c b/lib/crc64.c index 47cfa054827f3df027873dae9e7eb37c13ae167c..9f852a89ee2a1e4858b40d04373395e385a1d66c 100644 --- a/lib/crc64.c +++ b/lib/crc64.c @@ -37,7 +37,7 @@ MODULE_LICENSE("GPL v2"); /** * crc64_be - Calculate bitwise big-endian ECMA-182 CRC64 * @crc: seed value for computation. 0 or (u64)~0 for a new CRC calculation, - or the previous crc64 value if computing incrementally. + * or the previous crc64 value if computing incrementally. * @p: pointer to buffer over which CRC64 is run * @len: length of buffer @p */ diff --git a/lib/crypto/Kconfig b/lib/crypto/Kconfig index 545ccbddf6a1da69f56dda720ab396012ce0a81d..14c032de276e6bf71b745ea25bb820f5c28a5580 100644 --- a/lib/crypto/Kconfig +++ b/lib/crypto/Kconfig @@ -128,6 +128,3 @@ config CRYPTO_LIB_CHACHA20POLY1305 config CRYPTO_LIB_SHA256 tristate - -config CRYPTO_LIB_SM4 - tristate diff --git a/lib/crypto/Makefile b/lib/crypto/Makefile index 73205ed269bad635d2d057272d01abca4b2dd3f6..3a435629d9ce9c4806f75068d1891dad411556e6 100644 --- a/lib/crypto/Makefile +++ b/lib/crypto/Makefile @@ -38,9 +38,6 @@ libpoly1305-y += poly1305.o obj-$(CONFIG_CRYPTO_LIB_SHA256) += libsha256.o libsha256-y := sha256.o -obj-$(CONFIG_CRYPTO_LIB_SM4) += libsm4.o -libsm4-y := sm4.o - ifneq ($(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS),y) libblake2s-y += blake2s-selftest.o libchacha20poly1305-y += chacha20poly1305-selftest.o diff --git a/lib/iov_iter.c b/lib/iov_iter.c index b364231b5fc8cc629f3cdf496e2b3a72ec5efbd3..1b0a349fbcd926b4e49e48728c545395e0ae6bf5 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c @@ -407,6 +407,7 @@ static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t by return 0; buf->ops = &page_cache_pipe_buf_ops; + buf->flags = 0; get_page(page); buf->page = page; buf->offset = offset; @@ -543,6 +544,7 @@ static size_t push_pipe(struct iov_iter *i, size_t size, break; buf->ops = &default_pipe_buf_ops; + buf->flags = 0; buf->page = page; buf->offset = 0; buf->len = min_t(ssize_t, left, PAGE_SIZE); diff --git a/lib/kunit/try-catch.c b/lib/kunit/try-catch.c index 0dd434e40487cf9ef032932369280fc3300520ad..d7b3fe4d5f240b8d3d0fcf21663afc62848362d1 100644 --- a/lib/kunit/try-catch.c +++ b/lib/kunit/try-catch.c @@ -52,7 +52,7 @@ static unsigned long kunit_test_timeout(void) * If tests timeout due to exceeding sysctl_hung_task_timeout_secs, * the task will be killed and an oops generated. */ - return 300 * MSEC_PER_SEC; /* 5 min */ + return 300 * msecs_to_jiffies(MSEC_PER_SEC); /* 5 min */ } void kunit_try_catch_run(struct kunit_try_catch *try_catch, void *context) @@ -78,6 +78,7 @@ void kunit_try_catch_run(struct kunit_try_catch *try_catch, void *context) if (time_remaining == 0) { kunit_err(test, "try timed out\n"); try_catch->try_result = -ETIMEDOUT; + kthread_stop(task_struct); } exit_code = try_catch->try_result; diff --git a/lib/lz4/lz4_decompress.c b/lib/lz4/lz4_decompress.c index 8a7724a6ce2fb451f0231f1a6ef6c05c11fa4fbf..5b6705c4b2d26321f25decbe2c5d8d91dfd94056 100644 --- a/lib/lz4/lz4_decompress.c +++ b/lib/lz4/lz4_decompress.c @@ -271,8 +271,12 @@ static FORCE_INLINE int LZ4_decompress_generic( ip += length; op += length; - /* Necessarily EOF, due to parsing restrictions */ - if (!partialDecoding || (cpy == oend)) + /* Necessarily EOF when !partialDecoding. + * When partialDecoding, it is EOF if we've either + * filled the output buffer or + * can't proceed with reading an offset for following match. + */ + if (!partialDecoding || (cpy == oend) || (ip >= (iend - 2))) break; } else { /* may overwrite up to WILDCOPYLENGTH beyond cpy */ diff --git a/lib/mpi/mpi-mod.c b/lib/mpi/mpi-mod.c index 47bc59edd4ff939f961389bda33ede24917f53ec..54fcc01564d9dc6f1a1ae5b58ad820e503f64fbe 100644 --- a/lib/mpi/mpi-mod.c +++ b/lib/mpi/mpi-mod.c @@ -40,6 +40,8 @@ mpi_barrett_t mpi_barrett_init(MPI m, int copy) mpi_normalize(m); ctx = kcalloc(1, sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return NULL; if (copy) { ctx->m = mpi_copy(m); diff --git a/lib/raid6/test/Makefile b/lib/raid6/test/Makefile index a4c7cd74cff589dcedc2efc57fa6238ea5e00006..4fb7700a741bdf289eda14ff0d2a6d27c9b8ea10 100644 --- a/lib/raid6/test/Makefile +++ b/lib/raid6/test/Makefile @@ -4,6 +4,8 @@ # from userspace. # +pound := \# + CC = gcc OPTFLAGS = -O2 # Adjust as desired CFLAGS = -I.. -I ../../../include -g $(OPTFLAGS) @@ -42,7 +44,7 @@ else ifeq ($(HAS_NEON),yes) OBJS += neon.o neon1.o neon2.o neon4.o neon8.o recov_neon.o recov_neon_inner.o CFLAGS += -DCONFIG_KERNEL_MODE_NEON=1 else - HAS_ALTIVEC := $(shell printf '\#include \nvector int a;\n' |\ + HAS_ALTIVEC := $(shell printf '$(pound)include \nvector int a;\n' |\ gcc -c -x c - >/dev/null && rm ./-.o && echo yes) ifeq ($(HAS_ALTIVEC),yes) CFLAGS += -I../../../arch/powerpc/include diff --git a/lib/raid6/test/test.c b/lib/raid6/test/test.c index a3cf071941ab42e7b9438f8c50c46a876a914008..841a55242abaaede1c37746be8754567b5d3c270 100644 --- a/lib/raid6/test/test.c +++ b/lib/raid6/test/test.c @@ -19,7 +19,6 @@ #define NDISKS 16 /* Including P and Q */ const char raid6_empty_zero_page[PAGE_SIZE] __attribute__((aligned(PAGE_SIZE))); -struct raid6_calls raid6_call; char *dataptrs[NDISKS]; char data[NDISKS][PAGE_SIZE] __attribute__((aligned(PAGE_SIZE))); diff --git a/lib/test_hmm.c b/lib/test_hmm.c index 80a78877bd939714ab56d9a155b1e490e915548f..a85613068d6019215e2ee4c262a8873589e168b8 100644 --- a/lib/test_hmm.c +++ b/lib/test_hmm.c @@ -965,9 +965,33 @@ static long dmirror_fops_unlocked_ioctl(struct file *filp, return 0; } +static int dmirror_fops_mmap(struct file *file, struct vm_area_struct *vma) +{ + unsigned long addr; + + for (addr = vma->vm_start; addr < vma->vm_end; addr += PAGE_SIZE) { + struct page *page; + int ret; + + page = alloc_page(GFP_KERNEL | __GFP_ZERO); + if (!page) + return -ENOMEM; + + ret = vm_insert_page(vma, addr, page); + if (ret) { + __free_page(page); + return ret; + } + put_page(page); + } + + return 0; +} + static const struct file_operations dmirror_fops = { .open = dmirror_fops_open, .release = dmirror_fops_release, + .mmap = dmirror_fops_mmap, .unlocked_ioctl = dmirror_fops_unlocked_ioctl, .llseek = default_llseek, .owner = THIS_MODULE, diff --git a/lib/test_kmod.c b/lib/test_kmod.c index eab52770070d63d4266f4054ac2583e896ba9804..c637f6b5053a902650a5babcafd07f685af5ae9e 100644 --- a/lib/test_kmod.c +++ b/lib/test_kmod.c @@ -1155,6 +1155,7 @@ static struct kmod_test_device *register_test_dev_kmod(void) if (ret) { pr_err("could not register misc device: %d\n", ret); free_test_dev_kmod(test_dev); + test_dev = NULL; goto out; } diff --git a/lib/test_lockup.c b/lib/test_lockup.c index f1a020bcc763ed593b583229b39e42f7028e439a..78a630bbd03dfac16a45141953cba13c6a1a7e39 100644 --- a/lib/test_lockup.c +++ b/lib/test_lockup.c @@ -417,9 +417,14 @@ static bool test_kernel_ptr(unsigned long addr, int size) return false; /* should be at least readable kernel address */ - if (access_ok(ptr, 1) || - access_ok(ptr + size - 1, 1) || - get_kernel_nofault(buf, ptr) || + if (!IS_ENABLED(CONFIG_ALTERNATE_USER_ADDRESS_SPACE) && + (access_ok((void __user *)ptr, 1) || + access_ok((void __user *)ptr + size - 1, 1))) { + pr_err("user space ptr invalid in kernel: %#lx\n", addr); + return true; + } + + if (get_kernel_nofault(buf, ptr) || get_kernel_nofault(buf, ptr + size - 1)) { pr_err("invalid kernel ptr: %#lx\n", addr); return true; diff --git a/lib/test_meminit.c b/lib/test_meminit.c index e4f706a404b3a1f45b3443b4f8de0fbdd15107f8..3ca717f1139774db6b64df470dee4938bba2d7b4 100644 --- a/lib/test_meminit.c +++ b/lib/test_meminit.c @@ -337,6 +337,7 @@ static int __init do_kmem_cache_size_bulk(int size, int *total_failures) if (num) kmem_cache_free_bulk(c, num, objects); } + kmem_cache_destroy(c); *total_failures += fail; return 1; } diff --git a/lib/test_ubsan.c b/lib/test_ubsan.c index 9ea10adf7a66f2cd2e2ec60de738e4fa5262cf38..b1d0a6ecfe1b8ac3f1c3072b77ea8c71f38d9c6d 100644 --- a/lib/test_ubsan.c +++ b/lib/test_ubsan.c @@ -89,16 +89,6 @@ static void test_ubsan_misaligned_access(void) *ptr = val; } -static void test_ubsan_object_size_mismatch(void) -{ - /* "((aligned(8)))" helps this not into be misaligned for ptr-access. */ - volatile int val __aligned(8) = 4; - volatile long long *ptr, val2; - - ptr = (long long *)&val; - val2 = *ptr; -} - static const test_ubsan_fp test_ubsan_array[] = { test_ubsan_add_overflow, test_ubsan_sub_overflow, @@ -110,7 +100,6 @@ static const test_ubsan_fp test_ubsan_array[] = { test_ubsan_load_invalid_value, //test_ubsan_null_ptr_deref, /* exclude it because there is a crash */ test_ubsan_misaligned_access, - test_ubsan_object_size_mismatch, }; static int __init test_ubsan_init(void) diff --git a/lib/test_xarray.c b/lib/test_xarray.c index 8b1c318189ce801a0935133b4882556fede5bfd2..e77d4856442c3f750434e37819e13688d210048e 100644 --- a/lib/test_xarray.c +++ b/lib/test_xarray.c @@ -1463,6 +1463,25 @@ static noinline void check_create_range_4(struct xarray *xa, XA_BUG_ON(xa, !xa_empty(xa)); } +static noinline void check_create_range_5(struct xarray *xa, + unsigned long index, unsigned int order) +{ + XA_STATE_ORDER(xas, xa, index, order); + unsigned int i; + + xa_store_order(xa, index, order, xa_mk_index(index), GFP_KERNEL); + + for (i = 0; i < order + 10; i++) { + do { + xas_lock(&xas); + xas_create_range(&xas); + xas_unlock(&xas); + } while (xas_nomem(&xas, GFP_KERNEL)); + } + + xa_destroy(xa); +} + static noinline void check_create_range(struct xarray *xa) { unsigned int order; @@ -1490,6 +1509,9 @@ static noinline void check_create_range(struct xarray *xa) check_create_range_4(xa, (3U << order) + 1, order); check_create_range_4(xa, (3U << order) - 1, order); check_create_range_4(xa, (1U << 24) + 1, order); + + check_create_range_5(xa, 0, order); + check_create_range_5(xa, (1U << order), order); } check_create_range_3(); diff --git a/lib/xarray.c b/lib/xarray.c index ed775dee1074c99e798ab55a56eaa73754783b76..75da19a7a93348ae94e1608ef75261b27024a029 100644 --- a/lib/xarray.c +++ b/lib/xarray.c @@ -722,6 +722,8 @@ void xas_create_range(struct xa_state *xas) for (;;) { struct xa_node *node = xas->xa_node; + if (node->shift >= shift) + break; xas->xa_node = xa_parent_locked(xas->xa, node); xas->xa_offset = node->offset - 1; if (node->offset != 0) @@ -1078,6 +1080,7 @@ void xas_split(struct xa_state *xas, void *entry, unsigned int order) xa_mk_node(child)); if (xa_is_value(curr)) values--; + xas_update(xas, child); } else { unsigned int canon = offset - xas->xa_sibs; @@ -1092,6 +1095,7 @@ void xas_split(struct xa_state *xas, void *entry, unsigned int order) } while (offset-- > xas->xa_offset); node->nr_values += values; + xas_update(xas, node); } EXPORT_SYMBOL_GPL(xas_split); #endif diff --git a/mm/Makefile b/mm/Makefile index 4b3a827429f3f4609db8470997f92cb73f0161b6..d2a6a786f9153bdc5e9ddeab66ba5198e41503cc 100644 --- a/mm/Makefile +++ b/mm/Makefile @@ -129,3 +129,4 @@ obj-$(CONFIG_PIN_MEMORY) += pin_mem.o obj-$(CONFIG_SHRINK_PAGECACHE) += page_cache_limit.o obj-$(CONFIG_ASCEND_SHARE_POOL) += share_pool.o obj-$(CONFIG_MEMORY_RELIABLE) += mem_reliable.o +obj-$(CONFIG_MEMCG_MEMFS_INFO) += memcg_memfs_info.o diff --git a/mm/debug_vm_pgtable.c b/mm/debug_vm_pgtable.c index 92bfc37300dfc2d93dd07928abe3ad2ee1c02b83..11d3b46ba18704fbb51f5301375807bf4aa1b31b 100644 --- a/mm/debug_vm_pgtable.c +++ b/mm/debug_vm_pgtable.c @@ -128,6 +128,8 @@ static void __init pte_advanced_tests(struct mm_struct *mm, ptep_test_and_clear_young(vma, vaddr, ptep); pte = ptep_get(ptep); WARN_ON(pte_young(pte)); + + ptep_get_and_clear_full(mm, vaddr, ptep, 1); } static void __init pte_savedwrite_tests(unsigned long pfn, pgprot_t prot) diff --git a/mm/dynamic_hugetlb.c b/mm/dynamic_hugetlb.c index f20e654cc85600897fcc5816c3d128819b1319bc..eb9b528b73de1eae9504e66d252e6bde3fbd5500 100644 --- a/mm/dynamic_hugetlb.c +++ b/mm/dynamic_hugetlb.c @@ -30,15 +30,22 @@ static void add_new_page_to_pool(struct dhugetlb_pool *hpool, struct page *page, switch (hpages_pool_idx) { case HUGE_PAGES_POOL_1G: prep_compound_gigantic_page(page, PUD_SHIFT - PAGE_SHIFT); + set_page_count(page, 0); set_compound_page_dtor(page, HUGETLB_PAGE_DTOR); + hugetlb_set_page_subpool(page, NULL); set_hugetlb_cgroup(page, NULL); + set_hugetlb_cgroup_rsvd(page, NULL); break; case HUGE_PAGES_POOL_2M: - prep_compound_page(page, PMD_SHIFT - PAGE_SHIFT); + prep_new_page(page, PMD_SHIFT - PAGE_SHIFT, __GFP_COMP, 0); + set_page_count(page, 0); set_compound_page_dtor(page, HUGETLB_PAGE_DTOR); + hugetlb_set_page_subpool(page, NULL); set_hugetlb_cgroup(page, NULL); + set_hugetlb_cgroup_rsvd(page, NULL); break; } + page->mapping = NULL; list_add_tail(&page->lru, &hpages_pool->hugepage_freelists); hpages_pool->free_normal_pages++; } @@ -47,20 +54,21 @@ static void __hpool_split_gigantic_page(struct dhugetlb_pool *hpool, struct page { int nr_pages = 1 << (PUD_SHIFT - PAGE_SHIFT); int nr_blocks = 1 << (PMD_SHIFT - PAGE_SHIFT); - int i; + int i, pfn = page_to_pfn(page); lockdep_assert_held(&hpool->lock); atomic_set(compound_mapcount_ptr(page), 0); atomic_set(compound_pincount_ptr(page), 0); for (i = 1; i < nr_pages; i++) - clear_compound_head(&page[i]); + clear_compound_head(pfn_to_page(pfn + i)); set_compound_order(page, 0); page[1].compound_nr = 0; __ClearPageHead(page); for (i = 0; i < nr_pages; i+= nr_blocks) - add_new_page_to_pool(hpool, &page[i], HUGE_PAGES_POOL_2M); + add_new_page_to_pool(hpool, pfn_to_page(pfn + i), + HUGE_PAGES_POOL_2M); } static void __hpool_split_huge_page(struct dhugetlb_pool *hpool, struct page *page) @@ -74,14 +82,14 @@ static void __hpool_split_huge_page(struct dhugetlb_pool *hpool, struct page *pa __ClearPageHead(page); for (i = 0; i < nr_pages; i++) { - page[i].flags &= ~(1 << PG_locked | 1 << PG_error | - 1 << PG_referenced | 1 << PG_dirty | - 1 << PG_active | 1 << PG_private | - 1 << PG_writeback); - if (i != 0) { - page[i].mapping = NULL; + if (i != 0) clear_compound_head(&page[i]); - } + /* + * If a hugepage is mapped in private mode, the PG_uptodate bit + * will not be cleared when the hugepage freed. Clear the + * hugepage using free_pages_prepare() here. + */ + free_pages_prepare(&page[i], 0, false); add_new_page_to_pool(hpool, &page[i], HUGE_PAGES_POOL_4K); } } @@ -201,8 +209,8 @@ static int hpool_merge_page(struct dhugetlb_pool *hpool, int hpages_pool_idx, bo struct huge_pages_pool *hpages_pool, *src_hpages_pool; struct split_hugepage *split_page, *split_next; unsigned long nr_pages, block_size; - struct page *page, *next; - bool need_migrate = false; + struct page *page, *next, *p; + bool need_migrate = false, need_initial = false; int i, try; LIST_HEAD(wait_page_list); @@ -213,8 +221,9 @@ static int hpool_merge_page(struct dhugetlb_pool *hpool, int hpages_pool_idx, bo switch (hpages_pool_idx) { case HUGE_PAGES_POOL_1G: - nr_pages = 1 << (PUD_SHIFT - PMD_SHIFT); + nr_pages = 1 << (PUD_SHIFT - PAGE_SHIFT); block_size = 1 << (PMD_SHIFT - PAGE_SHIFT); + need_initial = true; break; case HUGE_PAGES_POOL_2M: nr_pages = 1 << (PMD_SHIFT - PAGE_SHIFT); @@ -235,7 +244,8 @@ static int hpool_merge_page(struct dhugetlb_pool *hpool, int hpages_pool_idx, bo clear_percpu_pools(hpool); page = pfn_to_page(split_page->start_pfn); for (i = 0; i < nr_pages; i+= block_size) { - if (PagePool(&page[i])) { + p = pfn_to_page(split_page->start_pfn + i); + if (PagePool(p)) { if (!need_migrate) goto next; else @@ -245,11 +255,31 @@ static int hpool_merge_page(struct dhugetlb_pool *hpool, int hpages_pool_idx, bo list_del(&split_page->head_pages); hpages_pool->split_normal_pages--; - kfree(split_page); for (i = 0; i < nr_pages; i+= block_size) { - list_del(&page[i].lru); + p = pfn_to_page(split_page->start_pfn + i); + list_del(&p->lru); src_hpages_pool->free_normal_pages--; + /* + * The input of prep_compound_gigantic_page should be a + * group of pages whose ref count is 1 rather than + * compound_page. + * Initialize the pages before merge them to 1G. + */ + if (need_initial) { + int j; + + set_compound_page_dtor(p, NULL_COMPOUND_DTOR); + atomic_set(compound_mapcount_ptr(p), 0); + set_compound_order(p, 0); + __ClearPageHead(p); + set_page_count(p, 1); + for (j = 1; j < block_size; j++) { + clear_compound_head(&p[j]); + set_page_count(&p[j], 1); + } + } } + kfree(split_page); add_new_page_to_pool(hpool, page, hpages_pool_idx); trace_dynamic_hugetlb_split_merge(hpool, page, DHUGETLB_MERGE, page_size(page)); return 0; @@ -262,8 +292,9 @@ static int hpool_merge_page(struct dhugetlb_pool *hpool, int hpages_pool_idx, bo /* Isolate free page first. */ INIT_LIST_HEAD(&wait_page_list); for (i = 0; i < nr_pages; i+= block_size) { - if (!PagePool(&page[i])) { - list_move(&page[i].lru, &wait_page_list); + p = pfn_to_page(split_page->start_pfn + i); + if (!PagePool(p)) { + list_move(&p->lru, &wait_page_list); src_hpages_pool->free_normal_pages--; } } @@ -271,12 +302,13 @@ static int hpool_merge_page(struct dhugetlb_pool *hpool, int hpages_pool_idx, bo /* Unlock and try migration. */ spin_unlock(&hpool->lock); for (i = 0; i < nr_pages; i+= block_size) { - if (PagePool(&page[i])) + p = pfn_to_page(split_page->start_pfn + i); + if (PagePool(p)) /* * TODO: fatal migration failures should bail * out */ - do_migrate_range(page_to_pfn(&page[i]), page_to_pfn(&page[i]) + block_size); + do_migrate_range(page_to_pfn(p), page_to_pfn(p) + block_size); } spin_lock(&hpool->lock); @@ -293,7 +325,8 @@ static int hugetlb_pool_merge_all_pages(struct dhugetlb_pool *hpool) { int ret = 0; - spin_lock(&hpool->lock); + lockdep_assert_held(&hpool->lock); + while (hpool->hpages_pool[HUGE_PAGES_POOL_2M].split_normal_pages) { ret = hpool_merge_page(hpool, HUGE_PAGES_POOL_2M, true); if (ret) { @@ -317,7 +350,6 @@ static int hugetlb_pool_merge_all_pages(struct dhugetlb_pool *hpool) goto out; } out: - spin_unlock(&hpool->lock); return ret; } @@ -464,20 +496,23 @@ static struct page *__alloc_page_from_dhugetlb_pool(void) */ spin_lock_irqsave(&percpu_pool->lock, flags); - if (percpu_pool->free_pages == 0) { - int ret; - - spin_lock(&hpool->lock); - ret = add_pages_to_percpu_pool(hpool, percpu_pool, - PERCPU_POOL_PAGE_BATCH); - spin_unlock(&hpool->lock); - if (ret) - goto unlock; - } + do { + page = NULL; + if (percpu_pool->free_pages == 0) { + int ret; + + spin_lock(&hpool->lock); + ret = add_pages_to_percpu_pool(hpool, percpu_pool, + PERCPU_POOL_PAGE_BATCH); + spin_unlock(&hpool->lock); + if (ret) + goto unlock; + } - page = list_entry(percpu_pool->head_page.next, struct page, lru); - list_del(&page->lru); - percpu_pool->free_pages--; + page = list_entry(percpu_pool->head_page.next, struct page, lru); + list_del(&page->lru); + percpu_pool->free_pages--; + } while (page && check_new_page(page)); percpu_pool->used_pages++; SetPagePool(page); @@ -724,6 +759,7 @@ static int alloc_hugepage_from_hugetlb(struct dhugetlb_pool *hpool, if (ret) continue; + ClearHPageFreed(page); list_move_tail(&page->lru, &hpages_pool->hugepage_freelists); h->free_huge_pages--; h->free_huge_pages_node[nid]--; @@ -749,7 +785,11 @@ static int free_hugepage_to_hugetlb(struct dhugetlb_pool *hpool) unsigned int nr_pages; int nid, ret = 0; - spin_lock(&hpool->lock); + if (!h) + return ret; + + lockdep_assert_held(&hpool->lock); + spin_lock(&hugetlb_lock); list_for_each_entry_safe(page, next, &hpages_pool->hugepage_freelists, lru) { nr_pages = 1 << huge_page_order(h); @@ -761,6 +801,7 @@ static int free_hugepage_to_hugetlb(struct dhugetlb_pool *hpool) set_compound_page_dtor(page, HUGETLB_PAGE_DTOR); nid = page_to_nid(page); + SetHPageFreed(page); list_move(&page->lru, &h->hugepage_freelists[nid]); hpool->total_huge_pages--; hpages_pool->free_normal_pages--; @@ -773,7 +814,6 @@ static int free_hugepage_to_hugetlb(struct dhugetlb_pool *hpool) break; } spin_unlock(&hugetlb_lock); - spin_unlock(&hpool->lock); return ret; } @@ -831,12 +871,21 @@ int hugetlb_pool_destroy(struct cgroup *cgrp) if (!hpool || hpool->attach_memcg != memcg) return 0; + /* + * Even if no process exists in the memory cgroup, some pages may still + * be occupied. Release these pages before merging them. + */ + mem_cgroup_force_empty(hpool->attach_memcg); + + spin_lock(&hpool->lock); ret = hugetlb_pool_merge_all_pages(hpool); - if (ret) + if (ret) { + spin_unlock(&hpool->lock); return -ENOMEM; + } ret = free_hugepage_to_hugetlb(hpool); memcg->hpool = NULL; - + spin_unlock(&hpool->lock); put_hpool(hpool); return ret; } @@ -911,17 +960,20 @@ static ssize_t update_reserved_pages(struct mem_cgroup *memcg, char *buf, int hp if (hpool_split_page(hpool, hpages_pool_idx - 1)) break; } - /* - * First try to merge pages without migration, If this can not meet - * the requirements, then try to merge pages with migration. - */ - while (delta > hpages_pool->free_normal_pages) { - if (hpool_merge_page(hpool, hpages_pool_idx, false)) - break; - } - while (delta > hpages_pool->free_normal_pages) { - if (hpool_merge_page(hpool, hpages_pool_idx, true)) - break; + /* Currently, only merging 2M hugepages is supported */ + if (hpages_pool_idx == HUGE_PAGES_POOL_2M) { + /* + * First try to merge pages without migration, If this can not meet + * the requirements, then try to merge pages with migration. + */ + while (delta > hpages_pool->free_normal_pages) { + if (hpool_merge_page(hpool, hpages_pool_idx, false)) + break; + } + while (delta > hpages_pool->free_normal_pages) { + if (hpool_merge_page(hpool, hpages_pool_idx, true)) + break; + } } delta = min(nr_pages - hpages_pool->nr_huge_pages, hpages_pool->free_normal_pages); hpages_pool->nr_huge_pages += delta; @@ -1021,7 +1073,7 @@ int hugetlb_pool_info_show(struct seq_file *m, void *v) return 0; if (!hpool) { - seq_printf(m, "Curent hierarchial have not memory pool.\n"); + seq_printf(m, "Current hierarchial have not memory pool.\n"); return 0; } diff --git a/mm/filemap.c b/mm/filemap.c index 3958fc3280d847b3e026f4e03cc653bfb660634e..edb94663c5df0da41492f6a0f7061d3b8bdd4bfa 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -1951,7 +1951,11 @@ unsigned find_lock_entries(struct address_space *mapping, pgoff_t start, rcu_read_lock(); while ((page = find_get_entry(&xas, end, XA_PRESENT))) { + unsigned long next_idx = xas.xa_index + 1; + if (!xa_is_value(page)) { + if (PageTransHuge(page)) + next_idx = page->index + thp_nr_pages(page); if (page->index < start) goto put; VM_BUG_ON_PAGE(page->index != xas.xa_index, page); @@ -1973,13 +1977,11 @@ unsigned find_lock_entries(struct address_space *mapping, pgoff_t start, put: put_page(page); next: - if (!xa_is_value(page) && PageTransHuge(page)) { - unsigned int nr_pages = thp_nr_pages(page); - + if (next_idx != xas.xa_index + 1) { /* Final THP may cross MAX_LFS_FILESIZE on 32-bit */ - xas_set(&xas, page->index + nr_pages); - if (xas.xa_index < nr_pages) + if (next_idx < xas.xa_index) break; + xas_set(&xas, next_idx); } } rcu_read_unlock(); @@ -2435,6 +2437,13 @@ static int generic_file_buffered_read_get_pages(struct kiocb *iocb, goto find_page; } +static inline bool pos_same_page(loff_t pos1, loff_t pos2, struct page *page) +{ + unsigned int shift = page_shift(page); + + return (pos1 >> shift == pos2 >> shift); +} + /** * generic_file_buffered_read - generic file read routine * @iocb: the iocb to read @@ -2525,11 +2534,10 @@ ssize_t generic_file_buffered_read(struct kiocb *iocb, writably_mapped = mapping_writably_mapped(mapping); /* - * When a sequential read accesses a page several times, only + * When a read accesses a page several times, only * mark it as accessed the first time. */ - if (iocb->ki_pos >> PAGE_SHIFT != - ra->prev_pos >> PAGE_SHIFT) + if (pos_same_page(iocb->ki_pos, ra->prev_pos -1, pages[0])) mark_page_accessed(pages[0]); for (i = 1; i < pg_nr; i++) mark_page_accessed(pages[i]); diff --git a/mm/gup.c b/mm/gup.c index ee9c2c39c2997329ea12312c5353b0bdf5e2b75c..4e9945299fe5187abf7680cb8b354975e09fda8f 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -2319,6 +2319,7 @@ static int __gup_device_huge(unsigned long pfn, unsigned long addr, { int nr_start = *nr; struct dev_pagemap *pgmap = NULL; + int ret = 1; do { struct page *page = pfn_to_page(pfn); @@ -2326,21 +2327,22 @@ static int __gup_device_huge(unsigned long pfn, unsigned long addr, pgmap = get_dev_pagemap(pfn, pgmap); if (unlikely(!pgmap)) { undo_dev_pagemap(nr, nr_start, flags, pages); - return 0; + ret = 0; + break; } SetPageReferenced(page); pages[*nr] = page; if (unlikely(!try_grab_page(page, flags))) { undo_dev_pagemap(nr, nr_start, flags, pages); - return 0; + ret = 0; + break; } (*nr)++; pfn++; } while (addr += PAGE_SIZE, addr != end); - if (pgmap) - put_dev_pagemap(pgmap); - return 1; + put_dev_pagemap(pgmap); + return ret; } static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr, diff --git a/mm/hmm.c b/mm/hmm.c index fb617054f96316e79ad22007379aab8582f7d72b..cbe9d0c66650452ba3e4a57b6af6c7f8f0940f56 100644 --- a/mm/hmm.c +++ b/mm/hmm.c @@ -296,7 +296,8 @@ static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr, * Since each architecture defines a struct page for the zero page, just * fall through and treat it like a normal page. */ - if (pte_special(pte) && !pte_devmap(pte) && + if (!vm_normal_page(walk->vma, addr, pte) && + !pte_devmap(pte) && !is_zero_pfn(pte_pfn(pte))) { if (hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, 0)) { pte_unmap(ptep); @@ -514,7 +515,7 @@ static int hmm_vma_walk_test(unsigned long start, unsigned long end, struct hmm_range *range = hmm_vma_walk->range; struct vm_area_struct *vma = walk->vma; - if (!(vma->vm_flags & (VM_IO | VM_PFNMAP | VM_MIXEDMAP)) && + if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)) && vma->vm_flags & VM_READ) return 0; diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 37704a21b3dc1841bf29359281bb7071bde502b2..bfe079e294cb7cd24764f9782c4fa49d41958050 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2501,11 +2501,11 @@ static void __split_huge_page(struct page *page, struct list_head *list, for (i = nr - 1; i >= 1; i--) { __split_huge_page_tail(head, i, lruvec, list); - /* Some pages can be beyond i_size: drop them from page cache */ + /* Some pages can be beyond EOF: drop them from page cache */ if (head[i].index >= end) { ClearPageDirty(head + i); __delete_from_page_cache(head + i, NULL); - if (IS_ENABLED(CONFIG_SHMEM) && PageSwapBacked(head)) + if (shmem_mapping(head->mapping)) shmem_uncharge(head->mapping->host, 1); put_page(head + i); } else if (!PageAnon(page)) { @@ -2733,6 +2733,8 @@ int split_huge_page_to_list(struct page *page, struct list_head *list) * head page lock is good enough to serialize the trimming. */ end = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE); + if (shmem_mapping(mapping)) + end = shmem_fallocend(mapping->host, end); } /* diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 3b787cb5669907c1035332f00d70826f15c59d85..a8c815386ecc59a9a962acfaa9b0547bf4cb98b3 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -1784,6 +1784,33 @@ pgoff_t hugetlb_basepage_index(struct page *page) return (index << compound_order(page_head)) + compound_idx; } +#define HUGE_PAGE_BOOTMEM_ALLOC 0 +#define HUGE_PAGE_FRESH_ALLOC 1 + +static u64 normal_page_reserve_sz; + +static int __init early_normal_page_reserve(char *p) +{ + unsigned long long size; + + if (!p) + return 1; + + size = memparse(p, &p); + if (*p) { + pr_warn("HugeTLB: Invalid normal page reserved size\n"); + return 1; + } + + normal_page_reserve_sz = size & PAGE_MASK; + + pr_info("HugeTLB: Normal page reserved %lldMB\n", + normal_page_reserve_sz >> 20); + + return 0; +} +early_param("hugepage_prohibit_sz", early_normal_page_reserve); + static struct page *alloc_buddy_huge_page(struct hstate *h, gfp_t gfp_mask, int nid, nodemask_t *nmask, nodemask_t *node_alloc_noretry) @@ -1831,6 +1858,45 @@ static struct page *alloc_buddy_huge_page(struct hstate *h, return page; } +static bool __ref huge_page_limit_check(int type, size_t hsize, int nid) +{ + u64 mem_usable = 0; + char *str = NULL; + char buf[32]; + + if (!normal_page_reserve_sz) + return true; + + if (system_state > SYSTEM_SCHEDULING) + return true; + + if (normal_page_reserve_sz >= memblock_phys_mem_size()) { + mem_usable = memblock_phys_mem_size(); + str = "physical memory"; + goto out; + } + + if (type == HUGE_PAGE_BOOTMEM_ALLOC) { + mem_usable = memblock_phys_mem_size() - memblock_reserved_size(); + str = "memblock usable"; + } else if (type == HUGE_PAGE_FRESH_ALLOC) { + mem_usable = nr_free_pages() << PAGE_SHIFT; + str = "free page"; + } + + if (mem_usable < normal_page_reserve_sz + hsize) + goto out; + + return true; +out: + string_get_size(hsize, 1, STRING_UNITS_2, buf, 32); + pr_info("HugeTLB: allocating(%s) + Normal pages reserved(%lldMB) node%d exceed %s size(%lldMB)\n", + buf, normal_page_reserve_sz >> 20, + nid, str, mem_usable >> 20); + + return false; +} + /* * Common helper to allocate a fresh hugetlb page. All specific allocators * should use this function to get new hugetlb pages @@ -1843,6 +1909,9 @@ static struct page *alloc_fresh_huge_page(struct hstate *h, bool retry = false; retry: + if (!huge_page_limit_check(HUGE_PAGE_FRESH_ALLOC, huge_page_size(h), nid)) + return NULL; + if (hstate_is_gigantic(h)) page = alloc_gigantic_page(h, gfp_mask, nid, nmask); else @@ -2637,6 +2706,10 @@ int __alloc_bootmem_huge_page(struct hstate *h, int nid) if (nid != NUMA_NO_NODE && nid >= nr_online_nodes) return 0; + + if (!huge_page_limit_check(HUGE_PAGE_BOOTMEM_ALLOC, huge_page_size(h), nid)) + return 0; + /* do node specific alloc */ if (nid != NUMA_NO_NODE) { m = memblock_alloc_try_nid_raw(huge_page_size(h), huge_page_size(h), @@ -3600,10 +3673,10 @@ static int __init hugepages_setup(char *s) pr_warn("HugeTLB: architecture can't support node specific alloc, ignoring!\n"); return 0; } + if (tmp >= nr_online_nodes) + goto invalid; node = tmp; p += count + 1; - if (node < 0 || node >= nr_online_nodes) - goto invalid; /* Parse hugepages */ if (sscanf(p, "%lu%n", &tmp, &count) != 1) goto invalid; diff --git a/mm/internal.h b/mm/internal.h index 31517354f3c79e45053da4f0aa64da928a373345..917b86b2870ca98a4dbb19e391cf3027b526f57e 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -195,6 +195,7 @@ extern void memblock_free_pages(struct page *page, unsigned long pfn, unsigned int order); extern void __free_pages_core(struct page *page, unsigned int order); extern void prep_compound_page(struct page *page, unsigned int order); +extern int check_new_page(struct page *page); extern void post_alloc_hook(struct page *page, unsigned int order, gfp_t gfp_flags); extern void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags, diff --git a/mm/kfence/core.c b/mm/kfence/core.c index 3a559f0f282d20b124bda1835ddbd98d1dcb38ec..f67418a30282ce2cd82ccdcc2b594f0105268485 100644 --- a/mm/kfence/core.c +++ b/mm/kfence/core.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include @@ -21,6 +22,7 @@ #include #include #include +#include #include #include #include @@ -38,14 +40,18 @@ #define KFENCE_WARN_ON(cond) \ ({ \ const bool __cond = WARN_ON(cond); \ - if (unlikely(__cond)) \ + if (unlikely(__cond)) { \ WRITE_ONCE(kfence_enabled, false); \ + disabled_by_warn = true; \ + } \ __cond; \ }) /* === Data ================================================================= */ static bool kfence_enabled __read_mostly; +static bool disabled_by_warn __read_mostly; +static bool re_enabling __read_mostly; unsigned long kfence_sample_interval __read_mostly = CONFIG_KFENCE_SAMPLE_INTERVAL; EXPORT_SYMBOL_GPL(kfence_sample_interval); /* Export for test modules. */ @@ -55,20 +61,33 @@ EXPORT_SYMBOL_GPL(kfence_sample_interval); /* Export for test modules. */ #endif #define MODULE_PARAM_PREFIX "kfence." +static int kfence_enable_late(void); static int param_set_sample_interval(const char *val, const struct kernel_param *kp) { - unsigned long num; - int ret = kstrtoul(val, 0, &num); + long num; + int ret = kstrtol(val, 0, &num); if (ret < 0) return ret; - if (!num) /* Using 0 to indicate KFENCE is disabled. */ + if (num < -1) + return -ERANGE; + /* + * For architecture that don't require early allocation, always support + * re-enabling. So only need to set num to 0 if num < 0. + */ + num = max_t(long, 0, num); + + /* Using 0 to indicate KFENCE is disabled. */ + if (!num && READ_ONCE(kfence_enabled)) { + pr_info("disabled\n"); WRITE_ONCE(kfence_enabled, false); - else if (!READ_ONCE(kfence_enabled) && system_state != SYSTEM_BOOTING) - return -EINVAL; /* Cannot (re-)enable KFENCE on-the-fly. */ + } - *((unsigned long *)kp->arg) = num; + *((unsigned long *)kp->arg) = (unsigned long)num; + + if (num && !READ_ONCE(kfence_enabled) && system_state != SYSTEM_BOOTING) + return disabled_by_warn ? -EINVAL : kfence_enable_late(); return 0; } @@ -86,12 +105,40 @@ static const struct kernel_param_ops sample_interval_param_ops = { }; module_param_cb(sample_interval, &sample_interval_param_ops, &kfence_sample_interval, 0600); +#ifdef CONFIG_ARM64 +static int __init parse_sample_interval(char *str) +{ + long num; + + if (kstrtol(str, 0, &num) < 0) + return 0; + + if (num < -1) + return 0; + + /* Using -1 to indicate re-enabling is supported */ + if (num == -1) { + re_enabling = true; + pr_err("re-enabling is supported\n"); + } + num = max_t(long, 0, num); + + kfence_sample_interval = (unsigned long)num; + return 0; +} +early_param("kfence.sample_interval", parse_sample_interval); +#endif + /* Pool usage% threshold when currently covered allocations are skipped. */ static unsigned long kfence_skip_covered_thresh __read_mostly = 75; module_param_named(skip_covered_thresh, kfence_skip_covered_thresh, ulong, 0644); +/* If true, check all canary bytes on panic. */ +static bool kfence_check_on_panic __read_mostly; +module_param_named(check_on_panic, kfence_check_on_panic, bool, 0444); + /* The pool of pages used for guard pages and objects. */ -char *__kfence_pool __ro_after_init; +char *__kfence_pool __read_mostly; EXPORT_SYMBOL(__kfence_pool); /* Export for test modules. */ #ifdef CONFIG_KFENCE_DYNAMIC_OBJECTS @@ -139,6 +186,21 @@ static const struct kernel_param_ops num_objects_param_ops = { .get = param_get_num_objects, }; module_param_cb(num_objects, &num_objects_param_ops, &kfence_num_objects, 0600); + +#ifdef CONFIG_ARM64 +static int __init parse_num_objects(char *str) +{ + unsigned long num; + + if (kstrtoul(str, 0, &num) < 0) + return 0; + if (num < MIN_KFENCE_OBJECTS || num > MAX_KFENCE_OBJECTS) + return 0; + kfence_num_objects = num; + return 0; +} +early_param("kfence.num_objects", parse_num_objects); +#endif #endif /* @@ -146,6 +208,8 @@ module_param_cb(num_objects, &num_objects_param_ops, &kfence_num_objects, 0600); * backing pages (in __kfence_pool). */ #ifdef CONFIG_KFENCE_DYNAMIC_OBJECTS +#define ILOG2(x) (ilog2((x))) + struct kfence_metadata *kfence_metadata; static phys_addr_t metadata_size; @@ -155,6 +219,8 @@ static inline bool kfence_metadata_valid(void) } #else +#define ILOG2(x) (const_ilog2((x))) + static_assert(CONFIG_KFENCE_NUM_OBJECTS > 0); struct kfence_metadata kfence_metadata[CONFIG_KFENCE_NUM_OBJECTS]; @@ -185,7 +251,7 @@ atomic_t kfence_allocation_gate = ATOMIC_INIT(1); * P(alloc_traces) = (1 - e^(-HNUM * (alloc_traces / SIZE)) ^ HNUM */ #define ALLOC_COVERED_HNUM 2 -#define ALLOC_COVERED_ORDER (const_ilog2(KFENCE_NR_OBJECTS) + 2) +#define ALLOC_COVERED_ORDER (ILOG2(KFENCE_NR_OBJECTS) + 2) #define ALLOC_COVERED_SIZE (1 << ALLOC_COVERED_ORDER) #define ALLOC_COVERED_HNEXT(h) hash_32(h, ALLOC_COVERED_ORDER) #define ALLOC_COVERED_MASK (ALLOC_COVERED_SIZE - 1) @@ -596,17 +662,66 @@ static void rcu_guarded_free(struct rcu_head *h) kfence_guarded_free((void *)meta->addr, meta, false); } -static bool __init kfence_init_pool(void) +#ifdef CONFIG_KFENCE_DYNAMIC_OBJECTS +static int __ref kfence_dynamic_init(void) +{ + metadata_size = sizeof(struct kfence_metadata) * KFENCE_NR_OBJECTS; + if (system_state < SYSTEM_RUNNING) + kfence_metadata = memblock_alloc(metadata_size, PAGE_SIZE); + else + kfence_metadata = kzalloc(metadata_size, GFP_KERNEL); + if (!kfence_metadata) + return -ENOMEM; + + covered_size = sizeof(atomic_t) * ALLOC_COVERED_SIZE; + if (system_state < SYSTEM_RUNNING) + alloc_covered = memblock_alloc(covered_size, PAGE_SIZE); + else + alloc_covered = kzalloc(covered_size, GFP_KERNEL); + if (!alloc_covered) { + if (system_state < SYSTEM_RUNNING) + memblock_free(__pa(kfence_metadata), metadata_size); + else + kfree(kfence_metadata); + kfence_metadata = NULL; + + return -ENOMEM; + } + + return 0; +} + +static void __ref kfence_dynamic_destroy(void) +{ + if (system_state < SYSTEM_RUNNING) { + memblock_free(__pa(alloc_covered), covered_size); + memblock_free(__pa(kfence_metadata), metadata_size); + } else { + kfree(alloc_covered); + kfree(kfence_metadata); + } + alloc_covered = NULL; + kfence_metadata = NULL; +} + +#else +static int __init kfence_dynamic_init(void) { return 0; } +static void __init kfence_dynamic_destroy(void) { } +#endif + +/* + * Initialization of the KFENCE pool after its allocation. + * Returns 0 on success; otherwise returns the address up to + * which partial initialization succeeded. + */ +static unsigned long kfence_init_pool(void) { unsigned long addr = (unsigned long)__kfence_pool; struct page *pages; int i; - if (!__kfence_pool) - return false; - if (!arch_kfence_init_pool()) - goto err; + return addr; pages = virt_to_page(addr); @@ -624,9 +739,13 @@ static bool __init kfence_init_pool(void) /* Verify we do not have a compound head page. */ if (WARN_ON(compound_head(&pages[i]) != &pages[i])) - goto err; + return addr; __SetPageSlab(&pages[i]); +#ifdef CONFIG_MEMCG + pages[i].memcg_data = (unsigned long)&kfence_metadata[i / 2 - 1].objcg | + MEMCG_DATA_OBJCGS; +#endif } /* @@ -637,7 +756,7 @@ static bool __init kfence_init_pool(void) */ for (i = 0; i < 2; i++) { if (unlikely(!kfence_protect(addr))) - goto err; + return addr; addr += PAGE_SIZE; } @@ -654,7 +773,7 @@ static bool __init kfence_init_pool(void) /* Protect the right redzone. */ if (unlikely(!kfence_protect(addr + PAGE_SIZE))) - goto err; + return addr; addr += 2 * PAGE_SIZE; } @@ -667,9 +786,22 @@ static bool __init kfence_init_pool(void) */ kmemleak_free(__kfence_pool); - return true; + return 0; +} + +static bool __init kfence_init_pool_early(void) +{ + unsigned long addr; + char *p; + + if (!__kfence_pool) + return false; + + addr = kfence_init_pool(); + + if (!addr) + return true; -err: /* * Only release unprotected pages, and do not try to go back and change * page attributes due to risk of failing to do so as well. If changing @@ -677,8 +809,40 @@ static bool __init kfence_init_pool(void) * fails for the first page, and therefore expect addr==__kfence_pool in * most failure cases. */ + for (p = (char *)addr; p < __kfence_pool + KFENCE_POOL_SIZE; p += PAGE_SIZE) { + struct page *page = virt_to_page(p); + + if (!page) + continue; +#ifdef CONFIG_MEMCG + page->memcg_data = 0; +#endif + __ClearPageSlab(page); + } memblock_free_late(__pa(addr), KFENCE_POOL_SIZE - (addr - (unsigned long)__kfence_pool)); __kfence_pool = NULL; + kfence_dynamic_destroy(); + return false; +} + +static bool kfence_init_pool_late(void) +{ + unsigned long addr, free_size; + + addr = kfence_init_pool(); + + if (!addr) + return true; + + /* Same as above. */ + free_size = KFENCE_POOL_SIZE - (addr - (unsigned long)__kfence_pool); +#ifdef CONFIG_CONTIG_ALLOC + free_contig_range(page_to_pfn(virt_to_page(addr)), free_size / PAGE_SIZE); +#else + free_pages_exact((void *)addr, free_size); +#endif + __kfence_pool = NULL; + kfence_dynamic_destroy(); return false; } @@ -722,9 +886,14 @@ static void *next_object(struct seq_file *seq, void *v, loff_t *pos) static int show_object(struct seq_file *seq, void *v) { - struct kfence_metadata *meta = &kfence_metadata[(long)v - 1]; + struct kfence_metadata *meta; unsigned long flags; + if (!kfence_metadata_valid()) + return 0; + + meta = &kfence_metadata[(long)v - 1]; + raw_spin_lock_irqsave(&meta->lock, flags); kfence_print_object(seq, meta); raw_spin_unlock_irqrestore(&meta->lock, flags); @@ -759,14 +928,38 @@ static int __init kfence_debugfs_init(void) debugfs_create_file("stats", 0444, kfence_dir, NULL, &stats_fops); /* Variable kfence_metadata may fail to allocate. */ - if (kfence_metadata_valid()) - debugfs_create_file("objects", 0400, kfence_dir, NULL, &objects_fops); + debugfs_create_file("objects", 0400, kfence_dir, NULL, &objects_fops); return 0; } late_initcall(kfence_debugfs_init); +/* === Panic Notifier ====================================================== */ + +static void kfence_check_all_canary(void) +{ + int i; + + for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) { + struct kfence_metadata *meta = &kfence_metadata[i]; + + if (meta->state == KFENCE_OBJECT_ALLOCATED) + for_each_canary(meta, check_canary_byte); + } +} + +static int kfence_check_canary_callback(struct notifier_block *nb, + unsigned long reason, void *arg) +{ + kfence_check_all_canary(); + return NOTIFY_OK; +} + +static struct notifier_block kfence_check_canary_notifier = { + .notifier_call = kfence_check_canary_callback, +}; + /* === Allocation Gate Timer ================================================ */ #ifdef CONFIG_KFENCE_STATIC_KEYS @@ -821,76 +1014,132 @@ static void toggle_allocation_gate(struct work_struct *work) } static DECLARE_DELAYED_WORK(kfence_timer, toggle_allocation_gate); -#ifdef CONFIG_KFENCE_DYNAMIC_OBJECTS -static int __init kfence_dynamic_init(void) +/* === Public interface ===================================================== */ +void __init kfence_early_alloc_pool(void) { - metadata_size = sizeof(struct kfence_metadata) * KFENCE_NR_OBJECTS; - kfence_metadata = memblock_alloc(metadata_size, PAGE_SIZE); - if (!kfence_metadata) { - pr_err("failed to allocate metadata\n"); - return -ENOMEM; - } - - covered_size = sizeof(atomic_t) * KFENCE_NR_OBJECTS; - alloc_covered = memblock_alloc(covered_size, PAGE_SIZE); - if (!alloc_covered) { - memblock_free((phys_addr_t)kfence_metadata, metadata_size); - kfence_metadata = NULL; - pr_err("failed to allocate covered\n"); - return -ENOMEM; - } + if (!kfence_sample_interval && !re_enabling) + return; - return 0; -} + __kfence_pool = memblock_alloc_raw(KFENCE_POOL_SIZE, PAGE_SIZE); -static void __init kfence_dynamic_destroy(void) -{ - memblock_free((phys_addr_t)alloc_covered, covered_size); - alloc_covered = NULL; - memblock_free((phys_addr_t)kfence_metadata, metadata_size); - kfence_metadata = NULL; + if (!__kfence_pool) { + kfence_sample_interval = 0; + pr_err("failed to early allocate pool, disable KFENCE\n"); + } } -#else -static int __init kfence_dynamic_init(void) { return 0; } -static void __init kfence_dynamic_destroy(void) { } -#endif - -/* === Public interface ===================================================== */ void __init kfence_alloc_pool(void) { - if (!kfence_sample_interval) + if (!kfence_sample_interval && !__kfence_pool) return; - if (kfence_dynamic_init()) + if (kfence_dynamic_init()) { + if (__kfence_pool) { + memblock_free(__pa(__kfence_pool), KFENCE_POOL_SIZE); + __kfence_pool = NULL; + } return; + } - __kfence_pool = memblock_alloc(KFENCE_POOL_SIZE, PAGE_SIZE); + if (!__kfence_pool) + __kfence_pool = memblock_alloc(KFENCE_POOL_SIZE, PAGE_SIZE); if (!__kfence_pool) { pr_err("failed to allocate pool\n"); kfence_dynamic_destroy(); } } +static void kfence_init_enable(void) +{ + if (!IS_ENABLED(CONFIG_KFENCE_STATIC_KEYS)) + static_branch_enable(&kfence_allocation_key); + + if (kfence_check_on_panic) + atomic_notifier_chain_register(&panic_notifier_list, &kfence_check_canary_notifier); + + WRITE_ONCE(kfence_enabled, true); + queue_delayed_work(system_unbound_wq, &kfence_timer, 0); + pr_info("initialized - using %lu bytes for %lu objects at 0x%p-0x%p\n", KFENCE_POOL_SIZE, + (unsigned long)KFENCE_NR_OBJECTS, (void *)__kfence_pool, + (void *)(__kfence_pool + KFENCE_POOL_SIZE)); +} + void __init kfence_init(void) { + stack_hash_seed = (u32)random_get_entropy(); + /* Setting kfence_sample_interval to 0 on boot disables KFENCE. */ - if (!kfence_sample_interval) + if (!kfence_sample_interval && !__kfence_pool) return; - stack_hash_seed = (u32)random_get_entropy(); - if (!kfence_init_pool()) { + if (!kfence_init_pool_early()) { pr_err("%s failed\n", __func__); return; } - if (!IS_ENABLED(CONFIG_KFENCE_STATIC_KEYS)) - static_branch_enable(&kfence_allocation_key); + kfence_init_enable(); + + if (!kfence_sample_interval) + WRITE_ONCE(kfence_enabled, false); +} + +static int kfence_init_late(void) +{ + const unsigned long nr_pages = KFENCE_POOL_SIZE / PAGE_SIZE; + +#ifdef CONFIG_CONTIG_ALLOC + struct page *pages; +#endif + + /* + * For kfence re_enabling on ARM64, kfence_pool should be allocated + * at startup instead of here. So just return -EINVAL here which means + * re_enabling is not supported. + */ + if (IS_ENABLED(CONFIG_ARM64)) + return -EINVAL; + + if (kfence_dynamic_init()) + return -ENOMEM; + +#ifdef CONFIG_CONTIG_ALLOC + pages = alloc_contig_pages(nr_pages, GFP_KERNEL, first_online_node, NULL); + if (!pages) { + kfence_dynamic_destroy(); + return -ENOMEM; + } + + __kfence_pool = page_to_virt(pages); +#else + if (nr_pages > MAX_ORDER_NR_PAGES) { + pr_warn("KFENCE_NUM_OBJECTS too large for buddy allocator\n"); + return -EINVAL; + } + __kfence_pool = alloc_pages_exact(KFENCE_POOL_SIZE, GFP_KERNEL); + if (!__kfence_pool) { + kfence_dynamic_destroy(); + return -ENOMEM; + } +#endif + + if (!kfence_init_pool_late()) { + pr_err("%s failed\n", __func__); + return -EBUSY; + } + + kfence_init_enable(); + return 0; +} + +static int kfence_enable_late(void) +{ + if (!__kfence_pool) + return kfence_init_late(); + WRITE_ONCE(kfence_enabled, true); queue_delayed_work(system_unbound_wq, &kfence_timer, 0); - pr_info("initialized - using %lu bytes for %lu objects at 0x%p-0x%p\n", KFENCE_POOL_SIZE, - (unsigned long)KFENCE_NR_OBJECTS, (void *)__kfence_pool, - (void *)(__kfence_pool + KFENCE_POOL_SIZE)); + pr_info("re-enabled\n"); + return 0; } void kfence_shutdown_cache(struct kmem_cache *s) @@ -1047,6 +1296,9 @@ void __kfence_free(void *addr) { struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr); +#ifdef CONFIG_MEMCG + KFENCE_WARN_ON(meta->objcg); +#endif /* * If the objects of the cache are SLAB_TYPESAFE_BY_RCU, defer freeing * the object, as the object page may be recycled for other-typed diff --git a/mm/kfence/kfence.h b/mm/kfence/kfence.h index e5f8f857791124ce08a5c5c6c028b9027d260d4c..867e7982adb57c3454f26474ba5001a223912622 100644 --- a/mm/kfence/kfence.h +++ b/mm/kfence/kfence.h @@ -89,6 +89,9 @@ struct kfence_metadata { struct kfence_track free_track; /* For updating alloc_covered on frees. */ u32 alloc_stack_hash; +#ifdef CONFIG_MEMCG + struct obj_cgroup *objcg; +#endif }; #ifdef CONFIG_KFENCE_DYNAMIC_OBJECTS diff --git a/mm/kfence/kfence_test.c b/mm/kfence/kfence_test.c index c9952fc8d596af193b6aab64a28602beea42d715..0acbc736541297e5befceec4b1544f2855caee14 100644 --- a/mm/kfence/kfence_test.c +++ b/mm/kfence/kfence_test.c @@ -621,10 +621,11 @@ static void test_gfpzero(struct kunit *test) break; test_free(buf2); - if (i == KFENCE_NR_OBJECTS) { + if (kthread_should_stop() || (i == KFENCE_NR_OBJECTS)) { kunit_warn(test, "giving up ... cannot get same object back\n"); return; } + cond_resched(); } for (i = 0; i < size; i++) diff --git a/mm/kmemleak.c b/mm/kmemleak.c index fe6e3ae8e8c6719f7ae6bead5954e2f43109a523..118be6533374d5b5d7b5c01ce0191636818f4f42 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c @@ -788,6 +788,8 @@ static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp) unsigned long flags; struct kmemleak_object *object; struct kmemleak_scan_area *area = NULL; + unsigned long untagged_ptr; + unsigned long untagged_objp; object = find_and_get_object(ptr, 1); if (!object) { @@ -796,6 +798,9 @@ static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp) return; } + untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr); + untagged_objp = (unsigned long)kasan_reset_tag((void *)object->pointer); + if (scan_area_cache) area = kmem_cache_alloc(scan_area_cache, gfp_kmemleak_mask(gfp)); @@ -807,8 +812,8 @@ static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp) goto out_unlock; } if (size == SIZE_MAX) { - size = object->pointer + object->size - ptr; - } else if (ptr + size > object->pointer + object->size) { + size = untagged_objp + object->size - untagged_ptr; + } else if (untagged_ptr + size > untagged_objp + object->size) { kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr); dump_object_info(object); kmem_cache_free(scan_area_cache, area); @@ -1119,7 +1124,7 @@ EXPORT_SYMBOL(kmemleak_no_scan); void __ref kmemleak_alloc_phys(phys_addr_t phys, size_t size, int min_count, gfp_t gfp) { - if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn) + if (PHYS_PFN(phys) >= min_low_pfn && PHYS_PFN(phys) < max_low_pfn) kmemleak_alloc(__va(phys), size, min_count, gfp); } EXPORT_SYMBOL(kmemleak_alloc_phys); @@ -1133,7 +1138,7 @@ EXPORT_SYMBOL(kmemleak_alloc_phys); */ void __ref kmemleak_free_part_phys(phys_addr_t phys, size_t size) { - if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn) + if (PHYS_PFN(phys) >= min_low_pfn && PHYS_PFN(phys) < max_low_pfn) kmemleak_free_part(__va(phys), size); } EXPORT_SYMBOL(kmemleak_free_part_phys); @@ -1145,7 +1150,7 @@ EXPORT_SYMBOL(kmemleak_free_part_phys); */ void __ref kmemleak_not_leak_phys(phys_addr_t phys) { - if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn) + if (PHYS_PFN(phys) >= min_low_pfn && PHYS_PFN(phys) < max_low_pfn) kmemleak_not_leak(__va(phys)); } EXPORT_SYMBOL(kmemleak_not_leak_phys); @@ -1157,7 +1162,7 @@ EXPORT_SYMBOL(kmemleak_not_leak_phys); */ void __ref kmemleak_ignore_phys(phys_addr_t phys) { - if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn) + if (PHYS_PFN(phys) >= min_low_pfn && PHYS_PFN(phys) < max_low_pfn) kmemleak_ignore(__va(phys)); } EXPORT_SYMBOL(kmemleak_ignore_phys); @@ -1402,7 +1407,8 @@ static void kmemleak_scan(void) { unsigned long flags; struct kmemleak_object *object; - int i; + struct zone *zone; + int __maybe_unused i; int new_leaks = 0; jiffies_last_scan = jiffies; @@ -1442,9 +1448,9 @@ static void kmemleak_scan(void) * Struct page scanning for each node. */ get_online_mems(); - for_each_online_node(i) { - unsigned long start_pfn = node_start_pfn(i); - unsigned long end_pfn = node_end_pfn(i); + for_each_populated_zone(zone) { + unsigned long start_pfn = zone->zone_start_pfn; + unsigned long end_pfn = zone_end_pfn(zone); unsigned long pfn; for (pfn = start_pfn; pfn < end_pfn; pfn++) { @@ -1453,8 +1459,8 @@ static void kmemleak_scan(void) if (!page) continue; - /* only scan pages belonging to this node */ - if (page_to_nid(page) != i) + /* only scan pages belonging to this zone */ + if (page_zone(page) != zone) continue; /* only scan if page is in use */ if (page_count(page) == 0) diff --git a/mm/madvise.c b/mm/madvise.c index 24abc79f8914e86836381dce3e0d406ad034f127..16b1c2885b63333fe674d9a8a3a44e2753f6d0ed 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -877,7 +877,6 @@ static long madvise_remove(struct vm_area_struct *vma, static int madvise_inject_error(int behavior, unsigned long start, unsigned long end) { - struct zone *zone; unsigned long size; if (!capable(CAP_SYS_ADMIN)) @@ -915,10 +914,6 @@ static int madvise_inject_error(int behavior, return ret; } - /* Ensure that all poisoned pages are removed from per-cpu lists */ - for_each_populated_zone(zone) - drain_all_pages(zone); - return 0; } #endif @@ -1229,8 +1224,7 @@ SYSCALL_DEFINE5(process_madvise, int, pidfd, const struct iovec __user *, vec, iov_iter_advance(&iter, iovec.iov_len); } - if (ret == 0) - ret = total_len - iov_iter_count(&iter); + ret = (total_len - iov_iter_count(&iter)) ? : ret; release_mm: mmput(mm); diff --git a/mm/memblock.c b/mm/memblock.c index 873625fdc504791e0808704add6f0fff5cfa53b2..b93fa16292d04c4a89b1e6b2fd5f4ab78315cee4 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -366,14 +366,20 @@ void __init memblock_discard(void) addr = __pa(memblock.reserved.regions); size = PAGE_ALIGN(sizeof(struct memblock_region) * memblock.reserved.max); - __memblock_free_late(addr, size); + if (memblock_reserved_in_slab) + kfree(memblock.reserved.regions); + else + __memblock_free_late(addr, size); } if (memblock.memory.regions != memblock_memory_init_regions) { addr = __pa(memblock.memory.regions); size = PAGE_ALIGN(sizeof(struct memblock_region) * memblock.memory.max); - __memblock_free_late(addr, size); + if (memblock_memory_in_slab) + kfree(memblock.memory.regions); + else + __memblock_free_late(addr, size); } memblock_memory = NULL; diff --git a/mm/memcg_memfs_info.c b/mm/memcg_memfs_info.c new file mode 100644 index 0000000000000000000000000000000000000000..f404367ad08c70b6763bb590f70a3a3322f963ad --- /dev/null +++ b/mm/memcg_memfs_info.c @@ -0,0 +1,316 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include +#include +#include +#include +#include +#include "../fs/mount.h" + +#define SEQ_printf(m, x...) \ +do { \ + if (m) \ + seq_printf(m, x); \ + else \ + pr_info(x); \ +} while (0) + +struct print_files_control { + struct mem_cgroup *memcg; + struct seq_file *m; + unsigned long size_threshold; + unsigned long max_print_files; + + char *pathbuf; + unsigned long pathbuf_size; + + const char *fs_type_name; + struct vfsmount *vfsmnt; + unsigned long total_print_files; + unsigned long total_files_size; +}; + +static bool memfs_enable; +static unsigned long memfs_size_threshold; +static unsigned long memfs_max_print_files = 500; + +static const char *const fs_type_names[] = { + "rootfs", + "tmpfs", +}; + +static struct vfsmount *memfs_get_vfsmount(struct super_block *sb) +{ + struct mount *mnt; + struct vfsmount *vfsmnt; + + lock_mount_hash(); + list_for_each_entry(mnt, &sb->s_mounts, mnt_instance) { + /* + * There may be multiple mount points for a super_block, + * just need to print one of these mount points to determine + * the file path. + */ + vfsmnt = mntget(&mnt->mnt); + unlock_mount_hash(); + return vfsmnt; + } + unlock_mount_hash(); + + return NULL; +} + +static unsigned long memfs_count_in_mem_cgroup(struct mem_cgroup *memcg, + struct address_space *mapping) +{ + XA_STATE(xas, &mapping->i_pages, 0); + unsigned long size = 0; + struct page *page, *head; + + rcu_read_lock(); + xas_for_each(&xas, page, ULONG_MAX) { + if (xas_retry(&xas, page)) + continue; + + if (xa_is_value(page)) + continue; + + head = compound_head(page); + if ((unsigned long)memcg == head->memcg_data) + size += PAGE_SIZE; + } + rcu_read_unlock(); + return size; +} + +static void memfs_show_file_in_mem_cgroup(void *data, struct inode *inode) +{ + struct print_files_control *pfc = data; + struct dentry *dentry; + unsigned long size; + struct path path; + char *filepath; + + size = memfs_count_in_mem_cgroup(pfc->memcg, inode->i_mapping); + if (!size || size < pfc->size_threshold) + return; + + dentry = d_find_alias(inode); + if (!dentry) + return; + path.mnt = pfc->vfsmnt; + path.dentry = dentry; + filepath = d_absolute_path(&path, pfc->pathbuf, pfc->pathbuf_size); + if (!filepath || IS_ERR(filepath)) + filepath = "(too long)"; + pfc->total_print_files++; + pfc->total_files_size += size; + dput(dentry); + + /* + * To prevent excessive logs, limit the amount of data + * that can be output to logs. + */ + if (!pfc->m && pfc->total_print_files > pfc->max_print_files) + return; + + SEQ_printf(pfc->m, "%lukB %llukB %s\n", + size >> 10, inode->i_size >> 10, filepath); +} + +static void memfs_show_files_in_mem_cgroup(struct super_block *sb, void *data) +{ + struct print_files_control *pfc = data; + struct inode *inode, *toput_inode = NULL; + + if (strncmp(sb->s_type->name, + pfc->fs_type_name, strlen(pfc->fs_type_name))) + return; + + pfc->vfsmnt = memfs_get_vfsmount(sb); + if (!pfc->vfsmnt) + return; + + spin_lock(&sb->s_inode_list_lock); + list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { + spin_lock(&inode->i_lock); + + if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) || + (inode->i_mapping->nrpages == 0 && !need_resched())) { + spin_unlock(&inode->i_lock); + continue; + } + __iget(inode); + spin_unlock(&inode->i_lock); + spin_unlock(&sb->s_inode_list_lock); + + memfs_show_file_in_mem_cgroup(pfc, inode); + + iput(toput_inode); + toput_inode = inode; + + cond_resched(); + spin_lock(&sb->s_inode_list_lock); + } + spin_unlock(&sb->s_inode_list_lock); + iput(toput_inode); + mntput(pfc->vfsmnt); +} + +void mem_cgroup_print_memfs_info(struct mem_cgroup *memcg, struct seq_file *m) +{ + struct print_files_control pfc = { + .memcg = memcg, + .m = m, + .max_print_files = memfs_max_print_files, + .size_threshold = memfs_size_threshold, + }; + char *pathbuf; + int i; + + if (!memfs_enable || !memcg) + return; + + pathbuf = kmalloc(PATH_MAX, GFP_KERNEL); + if (!pathbuf) { + SEQ_printf(m, "Show memfs failed due to OOM\n"); + return; + } + pfc.pathbuf = pathbuf; + pfc.pathbuf_size = PATH_MAX; + + for (i = 0; i < ARRAY_SIZE(fs_type_names); i++) { + pfc.fs_type_name = fs_type_names[i]; + pfc.total_print_files = 0; + pfc.total_files_size = 0; + + SEQ_printf(m, "Show %s files (memory-size > %lukB):\n", + pfc.fs_type_name, pfc.size_threshold >> 10); + SEQ_printf(m, " \n"); + iterate_supers(memfs_show_files_in_mem_cgroup, &pfc); + + SEQ_printf(m, "total files: %lu, total memory-size: %lukB\n", + pfc.total_print_files, pfc.total_files_size >> 10); + } + + kfree(pfc.pathbuf); +} + +int mem_cgroup_memfs_files_show(struct seq_file *m, void *v) +{ + struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); + + mem_cgroup_print_memfs_info(memcg, m); + return 0; +} + +static ssize_t memfs_size_threshold_show(struct kobject *kobj, + struct kobj_attribute *attr, + char *buf) +{ + return sprintf(buf, "%lu\n", memfs_size_threshold); +} + +static ssize_t memfs_size_threshold_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t len) +{ + unsigned long count; + int err; + + err = kstrtoul(buf, 10, &count); + if (err) + return err; + memfs_size_threshold = count; + return len; +} + +static struct kobj_attribute memfs_size_threshold_attr = { + .attr = {"size_threshold", 0644}, + .show = &memfs_size_threshold_show, + .store = &memfs_size_threshold_store, +}; + +static ssize_t memfs_max_print_files_show(struct kobject *kobj, + struct kobj_attribute *attr, + char *buf) +{ + return sprintf(buf, "%lu\n", memfs_max_print_files); +} + +static ssize_t memfs_max_print_files_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t len) +{ + unsigned long count; + int err; + + err = kstrtoul(buf, 10, &count); + if (err) + return err; + memfs_max_print_files = count; + return len; +} + +static struct kobj_attribute memfs_max_print_files_attr = { + .attr = {"max_print_files_in_oom", 0644}, + .show = &memfs_max_print_files_show, + .store = &memfs_max_print_files_store, +}; + +static ssize_t memfs_enable_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", memfs_enable); +} + +static ssize_t memfs_enable_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t len) +{ + bool enable; + int err; + + err = kstrtobool(buf, &enable); + if (err) + return err; + + memfs_enable = enable; + return len; +} + +static struct kobj_attribute memfs_enable_attr = { + .attr = {"enable", 0644}, + .show = &memfs_enable_show, + .store = &memfs_enable_store, +}; + +static struct attribute *memfs_attr[] = { + &memfs_size_threshold_attr.attr, + &memfs_max_print_files_attr.attr, + &memfs_enable_attr.attr, + NULL, +}; + +static struct attribute_group memfs_attr_group = { + .attrs = memfs_attr, +}; + +void mem_cgroup_memfs_info_init(void) +{ + struct kobject *memcg_memfs_kobj; + + if (mem_cgroup_disabled()) + return; + + memcg_memfs_kobj = kobject_create_and_add("memcg_memfs_info", mm_kobj); + if (unlikely(!memcg_memfs_kobj)) { + pr_err("failed to create memcg_memfs kobject\n"); + return; + } + + if (sysfs_create_group(memcg_memfs_kobj, &memfs_attr_group)) { + pr_err("failed to register memcg_memfs group\n"); + kobject_put(memcg_memfs_kobj); + } +} diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 2804fe9d3dae26dff086c7694346ea44e07e85e7..a850d1f3fc5bda98d5f9d6436f3bee992bf4966f 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -62,6 +62,7 @@ #include #include #include +#include #include "internal.h" #include #include @@ -257,7 +258,7 @@ struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr) } #ifdef CONFIG_MEMCG_KMEM -extern spinlock_t css_set_lock; +static DEFINE_SPINLOCK(objcg_lock); static void obj_cgroup_uncharge_pages(struct obj_cgroup *objcg, unsigned int nr_pages); @@ -294,13 +295,13 @@ static void obj_cgroup_release(struct percpu_ref *ref) WARN_ON_ONCE(nr_bytes & (PAGE_SIZE - 1)); nr_pages = nr_bytes >> PAGE_SHIFT; - spin_lock_irqsave(&css_set_lock, flags); + spin_lock_irqsave(&objcg_lock, flags); memcg = obj_cgroup_memcg(objcg); if (nr_pages) obj_cgroup_uncharge_pages(objcg, nr_pages); list_del(&objcg->list); mem_cgroup_put(memcg); - spin_unlock_irqrestore(&css_set_lock, flags); + spin_unlock_irqrestore(&objcg_lock, flags); percpu_ref_exit(ref); kfree_rcu(objcg, rcu); @@ -332,7 +333,7 @@ static void memcg_reparent_objcgs(struct mem_cgroup *memcg, objcg = rcu_replace_pointer(memcg->objcg, NULL, true); - spin_lock_irq(&css_set_lock); + spin_lock_irq(&objcg_lock); /* Move active objcg to the parent's list */ xchg(&objcg->memcg, parent); @@ -347,7 +348,7 @@ static void memcg_reparent_objcgs(struct mem_cgroup *memcg, } list_splice(&memcg->objcg_list, &parent->objcg_list); - spin_unlock_irq(&css_set_lock); + spin_unlock_irq(&objcg_lock); percpu_ref_kill(&objcg->refcnt); } @@ -1625,6 +1626,8 @@ void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg) return; pr_info("%s", buf); kfree(buf); + + mem_cgroup_print_memfs_info(memcg, NULL); } /* @@ -3407,7 +3410,7 @@ unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, * * Caller is responsible for holding css reference for memcg. */ -static int mem_cgroup_force_empty(struct mem_cgroup *memcg) +int mem_cgroup_force_empty(struct mem_cgroup *memcg) { int nr_retries = MAX_RECLAIM_RETRIES; @@ -4553,6 +4556,53 @@ static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg, spin_unlock(&memcg_oom_lock); } +static const char *const memcg_flag_name[] = { + "NO_REF", + "ONLINE", + "RELEASED", + "VISIBLE", + "DYING" +}; + +static void memcg_flag_stat_get(int mem_flags, int *stat) +{ + int i; + int flags = mem_flags; + + for (i = 0; i < ARRAY_SIZE(memcg_flag_name); i++) { + if (flags & 1) + stat[i] += 1; + flags >>= 1; + } +} + +static int memcg_flag_stat_show(struct seq_file *sf, void *v) +{ + int self_flag[ARRAY_SIZE(memcg_flag_name)]; + int child_flag[ARRAY_SIZE(memcg_flag_name)]; + int iter; + struct cgroup_subsys_state *child; + struct cgroup_subsys_state *css = seq_css(sf); + + memset(self_flag, 0, sizeof(self_flag)); + memset(child_flag, 0, sizeof(child_flag)); + + memcg_flag_stat_get(css->flags, self_flag); + + rcu_read_lock(); + css_for_each_child(child, css) + memcg_flag_stat_get(child->flags, child_flag); + rcu_read_unlock(); + + for (iter = 0; iter < ARRAY_SIZE(memcg_flag_name); iter++) + seq_printf(sf, "%s %d\n", memcg_flag_name[iter], self_flag[iter]); + + for (iter = 0; iter < ARRAY_SIZE(memcg_flag_name); iter++) + seq_printf(sf, "CHILD_%s %d\n", memcg_flag_name[iter], child_flag[iter]); + + return 0; +} + static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v) { struct mem_cgroup *memcg = mem_cgroup_from_seq(sf); @@ -4561,6 +4611,9 @@ static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v) seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom); seq_printf(sf, "oom_kill %lu\n", atomic_long_read(&memcg->memory_events[MEMCG_OOM_KILL])); + seq_printf(sf, "oom_kill_local %lu\n", + atomic_long_read(&memcg->memory_events_local[MEMCG_OOM_KILL])); + return 0; } @@ -5121,6 +5174,74 @@ static ssize_t memory_high_write(struct kernfs_open_file *of, return nbytes; } +static void __memcg_events_show(struct seq_file *m, atomic_long_t *events) +{ + seq_printf(m, "low %lu\n", atomic_long_read(&events[MEMCG_LOW])); + seq_printf(m, "high %lu\n", atomic_long_read(&events[MEMCG_HIGH])); + seq_printf(m, "limit_in_bytes %lu\n", + atomic_long_read(&events[MEMCG_MAX])); + seq_printf(m, "oom %lu\n", atomic_long_read(&events[MEMCG_OOM])); +} + +static int memcg_events_show(struct seq_file *m, void *v) +{ + struct mem_cgroup *memcg = mem_cgroup_from_seq(m); + + __memcg_events_show(m, memcg->memory_events); + return 0; +} + +static int memcg_events_local_show(struct seq_file *m, void *v) +{ + struct mem_cgroup *memcg = mem_cgroup_from_seq(m); + + __memcg_events_show(m, memcg->memory_events_local); + return 0; +} + +static ssize_t memory_reclaim(struct kernfs_open_file *of, char *buf, + size_t nbytes, loff_t off) +{ + struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); + unsigned int nr_retries = MAX_RECLAIM_RETRIES; + unsigned long nr_to_reclaim, nr_reclaimed = 0; + int err; + + buf = strstrip(buf); + err = page_counter_memparse(buf, "", &nr_to_reclaim); + if (err) + return err; + + if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && + mem_cgroup_is_root(memcg)) + return -EINVAL; + + while (nr_reclaimed < nr_to_reclaim) { + unsigned long reclaimed; + + if (signal_pending(current)) + return -EINTR; + + /* This is the final attempt, drain percpu lru caches in the + * hope of introducing more evictable pages for + * try_to_free_mem_cgroup_pages(). + */ + if (!nr_retries) + lru_add_drain_all(); + + reclaimed = try_to_free_mem_cgroup_pages(memcg, + nr_to_reclaim - nr_reclaimed, + GFP_KERNEL, true); + + if (!reclaimed && !nr_retries--) + return -EAGAIN; + + nr_reclaimed += reclaimed; + } + + return nbytes; +} + static struct cftype mem_cgroup_legacy_files[] = { { .name = "usage_in_bytes", @@ -5185,6 +5306,10 @@ static struct cftype mem_cgroup_legacy_files[] = { .write_u64 = mem_cgroup_oom_control_write, .private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL), }, + { + .name = "flag_stat", + .seq_show = memcg_flag_stat_show, + }, { .name = "pressure_level", }, @@ -5219,6 +5344,12 @@ static struct cftype mem_cgroup_legacy_files[] = { .flags = CFTYPE_NO_PREFIX | CFTYPE_WORLD_WRITABLE | CFTYPE_NOT_ON_ROOT, }, #endif +#ifdef CONFIG_MEMCG_MEMFS_INFO + { + .name = "memfs_files_info", + .seq_show = mem_cgroup_memfs_files_show, + }, +#endif #ifdef CONFIG_NUMA { .name = "numa_stat", @@ -5296,6 +5427,22 @@ static struct cftype mem_cgroup_legacy_files[] = { .seq_show = memory_high_show, .write = memory_high_write, }, + { + .name = "events", + .flags = CFTYPE_NOT_ON_ROOT, + .file_offset = offsetof(struct mem_cgroup, events_file), + .seq_show = memcg_events_show, + }, + { + .name = "events.local", + .flags = CFTYPE_NOT_ON_ROOT, + .file_offset = offsetof(struct mem_cgroup, events_local_file), + .seq_show = memcg_events_local_show, + }, + { + .name = "reclaim", + .write = memory_reclaim, + }, { }, /* terminate */ }; @@ -6720,6 +6867,11 @@ static struct cftype memory_files[] = { .seq_show = memory_oom_group_show, .write = memory_oom_group_write, }, + { + .name = "reclaim", + .flags = CFTYPE_NS_DELEGATABLE, + .write = memory_reclaim, + }, { } /* terminate */ }; @@ -7308,7 +7460,7 @@ static int __init cgroup_memory(char *s) else if (!strcmp(token, "kmem")) cgroup_memory_nokmem = false; } - return 0; + return 1; } __setup("cgroup.memory=", cgroup_memory); @@ -7358,6 +7510,8 @@ static int __init mem_cgroup_init(void) soft_limit_tree.rb_tree_per_node[node] = rtpn; } + mem_cgroup_memfs_info_init(); + return 0; } subsys_initcall(mem_cgroup_init); diff --git a/mm/memfd.c b/mm/memfd.c index 2647c898990c80491b512944a890d47c90f23aca..fae4142f7d25451e048d43a54bb8debcda4d64c4 100644 --- a/mm/memfd.c +++ b/mm/memfd.c @@ -31,20 +31,28 @@ static void memfd_tag_pins(struct xa_state *xas) { struct page *page; - unsigned int tagged = 0; + int latency = 0; + int cache_count; lru_add_drain(); xas_lock_irq(xas); xas_for_each(xas, page, ULONG_MAX) { - if (xa_is_value(page)) - continue; - page = find_subpage(page, xas->xa_index); - if (page_count(page) - page_mapcount(page) > 1) + cache_count = 1; + if (!xa_is_value(page) && + PageTransHuge(page) && !PageHuge(page)) + cache_count = HPAGE_PMD_NR; + + if (!xa_is_value(page) && + page_count(page) - total_mapcount(page) != cache_count) xas_set_mark(xas, MEMFD_TAG_PINNED); + if (cache_count != 1) + xas_set(xas, page->index + cache_count); - if (++tagged % XA_CHECK_SCHED) + latency += cache_count; + if (latency < XA_CHECK_SCHED) continue; + latency = 0; xas_pause(xas); xas_unlock_irq(xas); @@ -73,7 +81,8 @@ static int memfd_wait_for_pins(struct address_space *mapping) error = 0; for (scan = 0; scan <= LAST_SCAN; scan++) { - unsigned int tagged = 0; + int latency = 0; + int cache_count; if (!xas_marked(&xas, MEMFD_TAG_PINNED)) break; @@ -87,10 +96,14 @@ static int memfd_wait_for_pins(struct address_space *mapping) xas_lock_irq(&xas); xas_for_each_marked(&xas, page, ULONG_MAX, MEMFD_TAG_PINNED) { bool clear = true; - if (xa_is_value(page)) - continue; - page = find_subpage(page, xas.xa_index); - if (page_count(page) - page_mapcount(page) != 1) { + + cache_count = 1; + if (!xa_is_value(page) && + PageTransHuge(page) && !PageHuge(page)) + cache_count = HPAGE_PMD_NR; + + if (!xa_is_value(page) && cache_count != + page_count(page) - total_mapcount(page)) { /* * On the last scan, we clean up all those tags * we inserted; but make a note that we still @@ -103,8 +116,11 @@ static int memfd_wait_for_pins(struct address_space *mapping) } if (clear) xas_clear_mark(&xas, MEMFD_TAG_PINNED); - if (++tagged % XA_CHECK_SCHED) + + latency += cache_count; + if (latency < XA_CHECK_SCHED) continue; + latency = 0; xas_pause(&xas); xas_unlock_irq(&xas); diff --git a/mm/memory-failure.c b/mm/memory-failure.c index fb74e61e5aa4d490283a3ba87031f9828df894ec..bfa6d1478a7588133d4eeaad749062859ce273e8 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -541,7 +541,7 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill, /* * Collect the processes who have the corrupted page mapped to kill. */ -static void collect_procs(struct page *page, struct list_head *tokill, +void collect_procs(struct page *page, struct list_head *tokill, int force_early) { if (!page->mapping) @@ -552,6 +552,7 @@ static void collect_procs(struct page *page, struct list_head *tokill, else collect_procs_file(page, tokill, force_early); } +EXPORT_SYMBOL_GPL(collect_procs); static const char *action_name[] = { [MF_IGNORED] = "Ignored", @@ -809,7 +810,7 @@ static int me_swapcache_clean(struct page *p, unsigned long pfn) */ static int me_huge_page(struct page *p, unsigned long pfn) { - int res = 0; + int res; struct page *hpage = compound_head(p); struct address_space *mapping; @@ -820,6 +821,7 @@ static int me_huge_page(struct page *p, unsigned long pfn) if (mapping) { res = truncate_error_page(hpage, pfn, mapping); } else { + res = MF_FAILED; unlock_page(hpage); /* * migration entry prevents later access on error anonymous @@ -828,8 +830,10 @@ static int me_huge_page(struct page *p, unsigned long pfn) */ if (PageAnon(hpage)) put_page(hpage); - dissolve_free_huge_page(p); - res = MF_RECOVERED; + if (!dissolve_free_huge_page(p) && take_page_off_buddy(p)) { + page_ref_inc(p); + res = MF_RECOVERED; + } lock_page(hpage); } @@ -946,13 +950,13 @@ static int page_action(struct page_state *ps, struct page *p, } /** - * get_hwpoison_page() - Get refcount for memory error handling: + * __get_hwpoison_page() - Get refcount for memory error handling: * @page: raw error page (hit by memory error) * * Return: return 0 if failed to grab the refcount, otherwise true (some * non-zero value.) */ -static int get_hwpoison_page(struct page *page) +static int __get_hwpoison_page(struct page *page) { struct page *head = compound_head(page); @@ -982,6 +986,26 @@ static int get_hwpoison_page(struct page *page) return 0; } +static int get_hwpoison_page(struct page *p) +{ + int ret; + bool drained = false; + +retry: + ret = __get_hwpoison_page(p); + if (!ret && !is_free_buddy_page(p) && !page_count(p) && !drained) { + /* + * The page might be in a pcplist, so try to drain those + * and see if we are lucky. + */ + drain_all_pages(page_zone(p)); + drained = true; + goto retry; + } + + return ret; +} + /* * Do all that is necessary to remove user space mappings. Unmap * the pages and send SIGBUS to the processes if the data was dirty. @@ -1147,6 +1171,15 @@ static int try_to_split_thp_page(struct page *page, const char *msg) return 0; } +static bool hugetlb_hwpoison_full; + +static int __init enable_hugetlb_hwpoison_full(char *str) +{ + hugetlb_hwpoison_full = true; + return 0; +} +early_param("hugetlb_hwpoison_full", enable_hugetlb_hwpoison_full); + static int memory_failure_hugetlb(unsigned long pfn, int flags) { struct page *p = pfn_to_page(pfn); @@ -1171,9 +1204,13 @@ static int memory_failure_hugetlb(unsigned long pfn, int flags) return 0; } unlock_page(head); - dissolve_free_huge_page(p); - action_result(pfn, MF_MSG_FREE_HUGE, MF_DELAYED); - return 0; + res = MF_FAILED; + if (!dissolve_free_huge_page(p) && take_page_off_buddy(p)) { + page_ref_inc(p); + res = MF_RECOVERED; + } + action_result(pfn, MF_MSG_FREE_HUGE, res); + return res == MF_RECOVERED ? 0 : -EBUSY; } lock_page(head); @@ -1196,7 +1233,8 @@ static int memory_failure_hugetlb(unsigned long pfn, int flags) * - other mm code walking over page table is aware of pud-aligned * hwpoison entries. */ - if (huge_page_size(page_hstate(head)) > PMD_SIZE) { + if (!hugetlb_hwpoison_full && + huge_page_size(page_hstate(head)) > PMD_SIZE) { action_result(pfn, MF_MSG_NON_PMD_HUGE, MF_IGNORED); res = -EBUSY; goto out; @@ -1327,25 +1365,34 @@ int memory_failure(unsigned long pfn, int flags) int res = 0; unsigned long page_flags; static DEFINE_MUTEX(mf_mutex); + bool retry = true; if (!sysctl_memory_failure_recovery) panic("Memory failure on page %lx", pfn); + mutex_lock(&mf_mutex); + p = pfn_to_online_page(pfn); if (!p) { + res = arch_memory_failure(pfn, flags); + if (res == 0) + goto unlock_mutex; + if (pfn_valid(pfn)) { pgmap = get_dev_pagemap(pfn, NULL); - if (pgmap) - return memory_failure_dev_pagemap(pfn, flags, - pgmap); + if (pgmap) { + res = memory_failure_dev_pagemap(pfn, flags, + pgmap); + goto unlock_mutex; + } } pr_err("Memory failure: %#lx: memory outside kernel control\n", pfn); - return -ENXIO; + res = -ENXIO; + goto unlock_mutex; } - mutex_lock(&mf_mutex); - +try_again: if (PageHuge(p)) { res = memory_failure_hugetlb(pfn, flags); goto unlock_mutex; @@ -1374,7 +1421,21 @@ int memory_failure(unsigned long pfn, int flags) */ if (!(flags & MF_COUNT_INCREASED) && !get_hwpoison_page(p)) { if (is_free_buddy_page(p)) { - action_result(pfn, MF_MSG_BUDDY, MF_DELAYED); + if (take_page_off_buddy(p)) { + page_ref_inc(p); + res = MF_RECOVERED; + } else { + /* We lost the race, try again */ + if (retry) { + ClearPageHWPoison(p); + num_poisoned_pages_dec(); + retry = false; + goto try_again; + } + res = MF_FAILED; + } + action_result(pfn, MF_MSG_BUDDY, res); + res = res == MF_RECOVERED ? 0 : -EBUSY; } else { action_result(pfn, MF_MSG_KERNEL_HIGH_ORDER, MF_IGNORED); res = -EBUSY; @@ -1400,14 +1461,6 @@ int memory_failure(unsigned long pfn, int flags) * walked by the page reclaim code, however that's not a big loss. */ shake_page(p, 0); - /* shake_page could have turned it free. */ - if (!PageLRU(p) && is_free_buddy_page(p)) { - if (flags & MF_COUNT_INCREASED) - action_result(pfn, MF_MSG_BUDDY, MF_DELAYED); - else - action_result(pfn, MF_MSG_BUDDY_2ND, MF_DELAYED); - goto unlock_mutex; - } lock_page(p); diff --git a/mm/memory.c b/mm/memory.c index 58e3e276d753ade65bab0de22648478c8df25c74..6d3a07c821fe45281fd16871623a00bb1b466863 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1204,6 +1204,17 @@ copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma) return ret; } +/* Whether we should zap all COWed (private) pages too */ +static inline bool should_zap_cows(struct zap_details *details) +{ + /* By default, zap all pages */ + if (!details) + return true; + + /* Or, we zap COWed pages only if the caller wants to */ + return !details->check_mapping; +} + static unsigned long zap_pte_range(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, unsigned long end, @@ -1295,16 +1306,18 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, continue; } - /* If details->check_mapping, we leave swap entries. */ - if (unlikely(details)) - continue; - - if (!non_swap_entry(entry)) + if (!non_swap_entry(entry)) { + /* Genuine swap entry, hence a private anon page */ + if (!should_zap_cows(details)) + continue; rss[MM_SWAPENTS]--; - else if (is_migration_entry(entry)) { + } else if (is_migration_entry(entry)) { struct page *page; page = migration_entry_to_page(entry); + if (details && details->check_mapping && + details->check_mapping != page_rmapping(page)) + continue; rss[mm_counter(page)]--; } if (unlikely(!free_swap_and_cache(entry))) @@ -3726,11 +3739,20 @@ static vm_fault_t __do_fault(struct vm_fault *vmf) return ret; if (unlikely(PageHWPoison(vmf->page))) { - if (ret & VM_FAULT_LOCKED) - unlock_page(vmf->page); - put_page(vmf->page); + struct page *page = vmf->page; + vm_fault_t poisonret = VM_FAULT_HWPOISON; + if (ret & VM_FAULT_LOCKED) { + if (page_mapped(page)) + unmap_mapping_pages(page_mapping(page), + page->index, 1, false); + /* Retry if a clean page was removed from the cache. */ + if (invalidate_inode_page(page)) + poisonret = VM_FAULT_NOPAGE; + unlock_page(page); + } + put_page(page); vmf->page = NULL; - return VM_FAULT_HWPOISON; + return poisonret; } if (unlikely(!(ret & VM_FAULT_LOCKED))) diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 746383b54605cddc85e5bcade698b66e4a9a99fa..e7fdb3a749b505bc1715ae063331262be59baf04 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -841,7 +841,6 @@ static int vma_replace_policy(struct vm_area_struct *vma, static int mbind_range(struct mm_struct *mm, unsigned long start, unsigned long end, struct mempolicy *new_pol) { - struct vm_area_struct *next; struct vm_area_struct *prev; struct vm_area_struct *vma; int err = 0; @@ -856,8 +855,7 @@ static int mbind_range(struct mm_struct *mm, unsigned long start, if (start > vma->vm_start) prev = vma; - for (; vma && vma->vm_start < end; prev = vma, vma = next) { - next = vma->vm_next; + for (; vma && vma->vm_start < end; prev = vma, vma = vma->vm_next) { vmstart = max(start, vma->vm_start); vmend = min(end, vma->vm_end); @@ -875,10 +873,6 @@ static int mbind_range(struct mm_struct *mm, unsigned long start, new_pol, vma->vm_userfaultfd_ctx); if (prev) { vma = prev; - next = vma->vm_next; - if (mpol_equal(vma_policy(vma), new_pol)) - continue; - /* vma_merge() joined vma && vma->next, case 8 */ goto replace; } if (vma->vm_start != vmstart) { @@ -1308,11 +1302,10 @@ static struct page *new_page(struct page *page, unsigned long start) } #endif -static long do_mbind(unsigned long start, unsigned long len, - unsigned short mode, unsigned short mode_flags, - nodemask_t *nmask, unsigned long flags) +long __do_mbind(unsigned long start, unsigned long len, + unsigned short mode, unsigned short mode_flags, + nodemask_t *nmask, unsigned long flags, struct mm_struct *mm) { - struct mm_struct *mm = current->mm; struct mempolicy *new; unsigned long end; int err; @@ -1411,6 +1404,13 @@ static long do_mbind(unsigned long start, unsigned long len, return err; } +static long do_mbind(unsigned long start, unsigned long len, + unsigned short mode, unsigned short mode_flags, + nodemask_t *nmask, unsigned long flags) +{ + return __do_mbind(start, len, mode, mode_flags, nmask, flags, current->mm); +} + /* * User space interface with variable sized bitmaps for nodelists. */ @@ -2014,17 +2014,26 @@ unsigned int mempolicy_slab_node(void) */ static unsigned offset_il_node(struct mempolicy *pol, unsigned long n) { - unsigned nnodes = nodes_weight(pol->v.nodes); - unsigned target; + nodemask_t nodemask = pol->v.nodes; + unsigned int target, nnodes; int i; int nid; + /* + * The barrier will stabilize the nodemask in a register or on + * the stack so that it will stop changing under the code. + * + * Between first_node() and next_node(), pol->nodes could be changed + * by other threads. So we put pol->nodes in a local stack. + */ + barrier(); + nnodes = nodes_weight(nodemask); if (!nnodes) return numa_node_id(); target = (unsigned int)n % nnodes; - nid = first_node(pol->v.nodes); + nid = first_node(nodemask); for (i = 0; i < target; i++) - nid = next_node(nid, pol->v.nodes); + nid = next_node(nid, nodemask); return nid; } @@ -2681,6 +2690,7 @@ static int shared_policy_replace(struct shared_policy *sp, unsigned long start, mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL); if (!mpol_new) goto err_out; + atomic_set(&mpol_new->refcnt, 1); goto restart; } diff --git a/mm/mmap.c b/mm/mmap.c index 5c9b27aa337d82b06d59880ad226737f91fcee3a..5ad32537604a5bf17943856ef0e72d4190d4d8a8 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -2873,7 +2873,7 @@ static int __init cmdline_parse_stack_guard_gap(char *p) if (!*endptr) stack_guard_gap = val << PAGE_SHIFT; - return 0; + return 1; } __setup("stack_guard_gap=", cmdline_parse_stack_guard_gap); diff --git a/mm/mprotect.c b/mm/mprotect.c index ab709023e9aafa5cb93ffd09a8fc7ec7ffddb630..4ca703549568419846cde196fcab53bce6ddf056 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -94,7 +94,7 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd, /* Also skip shared copy-on-write pages */ if (is_cow_mapping(vma->vm_flags) && - page_mapcount(page) != 1) + page_count(page) != 1) continue; /* diff --git a/mm/mremap.c b/mm/mremap.c index ecfca97b97ae14cce76ab8858725e7296ebffa8c..2f7f3494a990bc003369e3de46c2b6c922bf5931 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -487,6 +487,9 @@ unsigned long move_page_tables(struct vm_area_struct *vma, pmd_t *old_pmd, *new_pmd; pud_t *old_pud, *new_pud; + if (!len) + return 0; + old_end = old_addr + len; flush_cache_range(vma, old_addr, old_end); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index a72df34fa210ad0b4ab5def7d80ca01b73c40a27..ec73cca1726c533b8cd1af0be572586bbff8ef0c 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2204,7 +2204,7 @@ static void check_new_page_bad(struct page *page) /* * This page is about to be returned from the page allocator */ -static inline int check_new_page(struct page *page) +inline int check_new_page(struct page *page) { if (likely(page_expected_state(page, PAGE_FLAGS_CHECK_AT_PREP|__PG_HWPOISON))) @@ -4017,7 +4017,9 @@ void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...) va_list args; static DEFINE_RATELIMIT_STATE(nopage_rs, 10*HZ, 1); - if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs)) + if ((gfp_mask & __GFP_NOWARN) || + !__ratelimit(&nopage_rs) || + ((gfp_mask & __GFP_DMA) && !has_managed_dma())) return; va_start(args, fmt); @@ -5893,7 +5895,7 @@ static int build_zonerefs_node(pg_data_t *pgdat, struct zoneref *zonerefs) do { zone_type--; zone = pgdat->node_zones + zone_type; - if (managed_zone(zone)) { + if (populated_zone(zone)) { zoneref_set_zone(zone, &zonerefs[nr_zones++]); check_highest_zone(zone_type); } @@ -7675,10 +7677,17 @@ static void __init find_zone_movable_pfns_for_nodes(void) out2: /* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */ - for (nid = 0; nid < MAX_NUMNODES; nid++) + for (nid = 0; nid < MAX_NUMNODES; nid++) { + unsigned long start_pfn, end_pfn; + zone_movable_pfn[nid] = roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES); + get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); + if (zone_movable_pfn[nid] >= end_pfn) + zone_movable_pfn[nid] = 0; + } + out: /* restore the node_state */ node_states[N_MEMORY] = saved_node_state; @@ -9228,3 +9237,18 @@ enum node_type get_node_type(int nid) return nodes_type[nid]; } #endif + +#ifdef CONFIG_ZONE_DMA +bool has_managed_dma(void) +{ + struct pglist_data *pgdat; + + for_each_online_pgdat(pgdat) { + struct zone *zone = &pgdat->node_zones[ZONE_DMA]; + + if (managed_zone(zone)) + return true; + } + return false; +} +#endif /* CONFIG_ZONE_DMA */ diff --git a/mm/page_counter.c b/mm/page_counter.c index b24a60b28bb0127be5b1d6cb19325efe8dfa70f9..6d8715e3255d95c8067e74f79b20b8f44d94eb47 100644 --- a/mm/page_counter.c +++ b/mm/page_counter.c @@ -52,9 +52,13 @@ void page_counter_cancel(struct page_counter *counter, unsigned long nr_pages) long new; new = atomic_long_sub_return(nr_pages, &counter->usage); - propagate_protected_usage(counter, new); /* More uncharges than charges? */ - WARN_ON_ONCE(new < 0); + if (WARN_ONCE(new < 0, "page_counter underflow: %ld nr_pages=%lu\n", + new, nr_pages)) { + new = 0; + atomic_long_set(&counter->usage, new); + } + propagate_protected_usage(counter, new); } /** diff --git a/mm/page_io.c b/mm/page_io.c index 21f3160d39a83edf382f0583309b0146cbee75f0..ee28c39e566e48d5e1e72e723209582367790827 100644 --- a/mm/page_io.c +++ b/mm/page_io.c @@ -69,54 +69,6 @@ void end_swap_bio_write(struct bio *bio) bio_put(bio); } -static void swap_slot_free_notify(struct page *page) -{ - struct swap_info_struct *sis; - struct gendisk *disk; - swp_entry_t entry; - - /* - * There is no guarantee that the page is in swap cache - the software - * suspend code (at least) uses end_swap_bio_read() against a non- - * swapcache page. So we must check PG_swapcache before proceeding with - * this optimization. - */ - if (unlikely(!PageSwapCache(page))) - return; - - sis = page_swap_info(page); - if (data_race(!(sis->flags & SWP_BLKDEV))) - return; - - /* - * The swap subsystem performs lazy swap slot freeing, - * expecting that the page will be swapped out again. - * So we can avoid an unnecessary write if the page - * isn't redirtied. - * This is good for real swap storage because we can - * reduce unnecessary I/O and enhance wear-leveling - * if an SSD is used as the as swap device. - * But if in-memory swap device (eg zram) is used, - * this causes a duplicated copy between uncompressed - * data in VM-owned memory and compressed data in - * zram-owned memory. So let's free zram-owned memory - * and make the VM-owned decompressed page *dirty*, - * so the page should be swapped out somewhere again if - * we again wish to reclaim it. - */ - disk = sis->bdev->bd_disk; - entry.val = page_private(page); - if (disk->fops->swap_slot_free_notify && __swap_count(entry) == 1) { - unsigned long offset; - - offset = swp_offset(entry); - - SetPageDirty(page); - disk->fops->swap_slot_free_notify(sis->bdev, - offset); - } -} - static void end_swap_bio_read(struct bio *bio) { struct page *page = bio_first_page_all(bio); @@ -132,7 +84,6 @@ static void end_swap_bio_read(struct bio *bio) } SetPageUptodate(page); - swap_slot_free_notify(page); out: unlock_page(page); WRITE_ONCE(bio->bi_private, NULL); @@ -411,11 +362,6 @@ int swap_readpage(struct page *page, bool synchronous) if (sis->flags & SWP_SYNCHRONOUS_IO) { ret = bdev_read_page(sis->bdev, swap_page_sector(page), page); if (!ret) { - if (trylock_page(page)) { - swap_slot_free_notify(page); - unlock_page(page); - } - count_vm_event(PSWPIN); goto out; } diff --git a/mm/readahead.c b/mm/readahead.c index c5b0457415bef29257619eefe99bcd53881fed53..ed23d5dec12387fd862caad619b9d072d44649fc 100644 --- a/mm/readahead.c +++ b/mm/readahead.c @@ -26,6 +26,7 @@ #include "internal.h" +#define READAHEAD_FIRST_SIZE (2 * 1024 * 1024) /* * Initialise a struct file's readahead state. Assumes that the caller has * memset *ra to zero. @@ -549,10 +550,41 @@ static void ondemand_readahead(struct readahead_control *ractl, do_page_cache_ra(ractl, ra->size, ra->async_size); } +/* + * Try to read first @ra_size from head of the file. + */ +static bool page_cache_readahead_from_head(struct address_space *mapping, + struct file *filp, pgoff_t offset, + unsigned long req_size, + unsigned long ra_size) +{ + struct backing_dev_info *bdi = inode_to_bdi(mapping->host); + struct file_ra_state *ra = &filp->f_ra; + unsigned long size = min_t(unsigned long, ra_size, + file_inode(filp)->i_size); + unsigned long nrpages = (size + PAGE_SIZE - 1) / PAGE_SIZE; + unsigned long max_pages; + unsigned int offs = 0; + + /* Cannot read date over target size, back to normal way */ + if (offset + req_size > nrpages) + return false; + + max_pages = max_t(unsigned long, bdi->io_pages, ra->ra_pages); + max_pages = min(max_pages, nrpages); + while (offs < nrpages) { + force_page_cache_readahead(mapping, filp, offs, max_pages); + offs += max_pages; + } + return true; +} + void page_cache_sync_ra(struct readahead_control *ractl, struct file_ra_state *ra, unsigned long req_count) { - bool do_forced_ra = ractl->file && (ractl->file->f_mode & FMODE_RANDOM); + bool do_forced_ra = ractl->file && + ((ractl->file->f_mode & FMODE_RANDOM) || + (ractl->file->f_ctl_mode & FMODE_CTL_RANDOM)); /* * Even if read-ahead is disabled, issue this request as read-ahead @@ -567,6 +599,12 @@ void page_cache_sync_ra(struct readahead_control *ractl, do_forced_ra = true; } + /* try to read first READAHEAD_FIRST_SIZE into pagecache */ + if (ractl->file && (ractl->file->f_ctl_mode & FMODE_CTL_WILLNEED) && + page_cache_readahead_from_head(ractl->mapping, ractl->file, + ractl->_index, req_count, READAHEAD_FIRST_SIZE)) + return; + /* be dumb */ if (do_forced_ra) { force_page_cache_ra(ractl, ra, req_count); diff --git a/mm/rmap.c b/mm/rmap.c index a780862cd226301b249d2032562677ed146f6730..0dc39cf94345da8f16d51616b88b33ba60268d6b 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1657,7 +1657,30 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, /* MADV_FREE page check */ if (!PageSwapBacked(page)) { - if (!PageDirty(page)) { + int ref_count, map_count; + + /* + * Synchronize with gup_pte_range(): + * - clear PTE; barrier; read refcount + * - inc refcount; barrier; read PTE + */ + smp_mb(); + + ref_count = page_ref_count(page); + map_count = page_mapcount(page); + + /* + * Order reads for page refcount and dirty flag + * (see comments in __remove_mapping()). + */ + smp_rmb(); + + /* + * The only page refs must be one from isolation + * plus the rmap(s) (dropped by discard:). + */ + if (ref_count == 1 + map_count && + !PageDirty(page)) { /* Invalidate as we cleared the pte */ mmu_notifier_invalidate_range(mm, address, address + PAGE_SIZE); diff --git a/mm/share_pool.c b/mm/share_pool.c index 494a829d6f3a9d1b22fe22600edc2307dbc97819..750524f1afc2ca03e53f403a488bdc34754c2d87 100644 --- a/mm/share_pool.c +++ b/mm/share_pool.c @@ -16,7 +16,6 @@ * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ - #define pr_fmt(fmt) "share pool: " fmt #include @@ -59,13 +58,15 @@ #define spg_valid(spg) ((spg)->is_alive == true) +/* Use spa va address as mmap offset. This can work because spa_file + * is setup with 64-bit address space. So va shall be well covered. + */ +#define addr_offset(spa) ((spa)->va_start) + #define byte2kb(size) ((size) >> 10) #define byte2mb(size) ((size) >> 20) #define page2kb(page_num) ((page_num) << (PAGE_SHIFT - 10)) -#define SINGLE_GROUP_MODE 1 -#define MULTI_GROUP_MODE 2 - #define MAX_GROUP_FOR_SYSTEM 50000 #define MAX_GROUP_FOR_TASK 3000 #define MAX_PROC_PER_GROUP 1024 @@ -94,8 +95,6 @@ int sysctl_share_pool_map_lock_enable; int sysctl_sp_perf_k2u; int sysctl_sp_perf_alloc; -static int share_pool_group_mode = SINGLE_GROUP_MODE; - static int system_group_count; static unsigned int sp_device_number; @@ -131,29 +130,271 @@ static DECLARE_RWSEM(sp_spg_stat_sem); /* for kthread buff_module_guard_work */ static struct sp_proc_stat kthread_stat; -/* The caller must hold sp_group_sem */ -static struct sp_group_master *sp_init_group_master_locked( - struct mm_struct *mm, bool *exist) +#define SP_MAPPING_DVPP 0x1 +#define SP_MAPPING_NORMAL 0x2 +static struct sp_mapping *sp_mapping_normal; + +static void sp_mapping_range_init(struct sp_mapping *spm) +{ + int i; + + for (i = 0; i < MAX_DEVID; i++) { + if (spm->flag & SP_MAPPING_NORMAL) { + spm->start[i] = MMAP_SHARE_POOL_START; + spm->end[i] = MMAP_SHARE_POOL_16G_START; + continue; + } + + if (!is_sp_dev_addr_enabled(i)) { + spm->start[i] = MMAP_SHARE_POOL_16G_START + + i * MMAP_SHARE_POOL_16G_SIZE; + spm->end[i] = spm->start[i] + MMAP_SHARE_POOL_16G_SIZE; + } else { + spm->start[i] = sp_dev_va_start[i]; + spm->end[i] = spm->start[i] + sp_dev_va_size[i]; + } + } +} + +static struct sp_mapping *sp_mapping_create(unsigned long flag) +{ + struct sp_mapping *spm; + + spm = kzalloc(sizeof(struct sp_mapping), GFP_KERNEL); + if (!spm) + return ERR_PTR(-ENOMEM); + + spm->flag = flag; + sp_mapping_range_init(spm); + atomic_set(&spm->user, 0); + spm->area_root = RB_ROOT; + INIT_LIST_HEAD(&spm->group_head); + + return spm; +} + +static void sp_mapping_destroy(struct sp_mapping *spm) +{ + kfree(spm); +} + +static void sp_mapping_attach(struct sp_group *spg, struct sp_mapping *spm) +{ + atomic_inc(&spm->user); + if (spm->flag & SP_MAPPING_DVPP) { + spg->dvpp = spm; + list_add_tail(&spg->mnode, &spm->group_head); + } else if (spm->flag & SP_MAPPING_NORMAL) + spg->normal = spm; +} + +static void sp_mapping_detach(struct sp_group *spg, struct sp_mapping *spm) +{ + if (!spm) + return; + if (spm->flag & SP_MAPPING_DVPP) + list_del(&spg->mnode); + if (atomic_dec_and_test(&spm->user)) + sp_mapping_destroy(spm); +} + +/* merge old mapping to new, and the old mapping would be destroyed */ +static void sp_mapping_merge(struct sp_mapping *new, struct sp_mapping *old) +{ + struct sp_group *spg, *tmp; + + if (new == old) + return; + + list_for_each_entry_safe(spg, tmp, &old->group_head, mnode) { + list_move_tail(&spg->mnode, &new->group_head); + spg->dvpp = new; + } + + atomic_add(atomic_read(&old->user), &new->user); + sp_mapping_destroy(old); +} + +static bool is_mapping_empty(struct sp_mapping *spm) +{ + return RB_EMPTY_ROOT(&spm->area_root); +} + +static bool can_mappings_merge(struct sp_mapping *m1, struct sp_mapping *m2) +{ + int i; + + for (i = 0; i < sp_device_number; i++) + if (m1->start[i] != m2->start[i] || m1->end[i] != m2->end[i]) + return false; + + return true; +} + +/* + * 1. The mappings of local group is set on creating. + * 2. This is used to setup the mapping for groups created during add_task. + * 3. The normal mapping exists for all groups. + * 4. The dvpp mappings for the new group and local group can merge _iff_ at + * least one of the mapping is empty. + * the caller must hold sp_group_sem + * NOTE: undo the mergeing when the later process failed. + */ +static int sp_mapping_group_setup(struct mm_struct *mm, struct sp_group *spg) { struct sp_group_master *master = mm->sp_group_master; + struct sp_group *local = master->local; - if (master) { - *exist = true; - return master; + if (!list_empty(&spg->procs) && !(spg->flag & SPG_FLAG_NON_DVPP)) { + /* + * Don't return an error when the mappings' address range conflict. + * As long as the mapping is unused, we can drop the empty mapping. + * This may change the address range for the task or group implicitly, + * give a warn for it. + */ + bool is_conflict = !can_mappings_merge(local->dvpp, spg->dvpp); + + if (is_mapping_empty(local->dvpp)) { + sp_mapping_merge(spg->dvpp, local->dvpp); + if (is_conflict) + pr_warn_ratelimited("task address space conflict, spg_id=%d\n", spg->id); + } else if (is_mapping_empty(spg->dvpp)) { + sp_mapping_merge(local->dvpp, spg->dvpp); + if (is_conflict) + pr_warn_ratelimited("group address space conflict, spg_id=%d\n", spg->id); + } else { + pr_info_ratelimited("Duplicate address space, id=%d\n", spg->id); + return -EINVAL; + } + } else { + if (!(spg->flag & SPG_FLAG_NON_DVPP)) + /* the mapping of local group is always set */ + sp_mapping_attach(spg, local->dvpp); + if (!spg->normal) + sp_mapping_attach(spg, sp_mapping_normal); } + return 0; +} + +static struct sp_group *create_spg(int spg_id, unsigned long flag); +static void free_new_spg_id(bool new, int spg_id); +static void free_sp_group_locked(struct sp_group *spg); +static int local_group_add_task(struct mm_struct *mm, struct sp_group *spg); +static int init_local_group(struct mm_struct *mm) +{ + int spg_id, ret; + struct sp_group *spg; + struct sp_mapping *spm; + struct sp_group_master *master = mm->sp_group_master; + + spg_id = ida_alloc_range(&sp_group_id_ida, SPG_ID_LOCAL_MIN, + SPG_ID_LOCAL_MAX, GFP_ATOMIC); + if (spg_id < 0) { + pr_err_ratelimited("generate local group id failed %d\n", spg_id); + return spg_id; + } + + spg = create_spg(spg_id, 0); + if (IS_ERR(spg)) { + ret = PTR_ERR(spg); + goto free_spg_id; + } + + master->local = spg; + spm = sp_mapping_create(SP_MAPPING_DVPP); + if (IS_ERR(spm)) { + ret = PTR_ERR(spm); + goto free_spg; + } + sp_mapping_attach(master->local, spm); + sp_mapping_attach(master->local, sp_mapping_normal); + + ret = local_group_add_task(mm, spg); + if (ret < 0) + /* The spm would be released while destroying the spg*/ + goto free_spg; + + return 0; + +free_spg: + free_sp_group_locked(spg); + master->local = NULL; +free_spg_id: + free_new_spg_id(true, spg_id); + + return ret; +} + +static void sp_proc_stat_drop(struct sp_proc_stat *stat); +static int sp_init_proc_stat(struct mm_struct *mm, struct task_struct *tsk); +/* The caller must hold sp_group_sem */ +static int sp_init_group_master_locked(struct task_struct *tsk, struct mm_struct *mm) +{ + int ret; + struct sp_group_master *master; + + if (mm->sp_group_master) + return 0; + master = kmalloc(sizeof(struct sp_group_master), GFP_KERNEL); - if (master == NULL) - return ERR_PTR(-ENOMEM); + if (!master) + return -ENOMEM; INIT_LIST_HEAD(&master->node_list); master->count = 0; - master->stat = NULL; master->mm = mm; mm->sp_group_master = master; - *exist = false; - return master; + ret = sp_init_proc_stat(mm, tsk); + if (ret) + goto free_master; + + ret = init_local_group(mm); + if (ret) + goto put_stat; + + return 0; + +put_stat: + sp_proc_stat_drop(master->stat); +free_master: + mm->sp_group_master = NULL; + kfree(master); + + return ret; +} + +static inline bool is_local_group(int spg_id) +{ + return spg_id >= SPG_ID_LOCAL_MIN && spg_id <= SPG_ID_LOCAL_MAX; +} + +static struct sp_group *sp_get_local_group(struct task_struct *tsk, struct mm_struct *mm) +{ + int ret; + struct sp_group_master *master; + + down_read(&sp_group_sem); + master = mm->sp_group_master; + if (master && master->local) { + atomic_inc(&master->local->use_count); + up_read(&sp_group_sem); + return master->local; + } + up_read(&sp_group_sem); + + down_write(&sp_group_sem); + ret = sp_init_group_master_locked(tsk, mm); + if (ret) { + up_write(&sp_group_sem); + return ERR_PTR(ret); + } + master = mm->sp_group_master; + atomic_inc(&master->local->use_count); + up_write(&sp_group_sem); + + return master->local; } static struct sp_proc_stat *sp_get_proc_stat(struct mm_struct *mm) @@ -192,37 +433,29 @@ static struct sp_proc_stat *create_proc_stat(struct mm_struct *mm, return stat; } -static struct sp_proc_stat *sp_init_proc_stat(struct sp_group_master *master, - struct mm_struct *mm, struct task_struct *tsk) +static int sp_init_proc_stat(struct mm_struct *mm, struct task_struct *tsk) { struct sp_proc_stat *stat; int alloc_id, tgid = tsk->tgid; - - down_write(&sp_proc_stat_sem); - stat = master->stat; - if (stat) { - up_write(&sp_proc_stat_sem); - return stat; - } + struct sp_group_master *master = mm->sp_group_master; stat = create_proc_stat(mm, tsk); - if (IS_ERR(stat)) { - up_write(&sp_proc_stat_sem); - return stat; - } + if (IS_ERR(stat)) + return PTR_ERR(stat); + down_write(&sp_proc_stat_sem); alloc_id = idr_alloc(&sp_proc_stat_idr, stat, tgid, tgid + 1, GFP_KERNEL); if (alloc_id < 0) { up_write(&sp_proc_stat_sem); pr_err_ratelimited("proc stat idr alloc failed %d\n", alloc_id); kfree(stat); - return ERR_PTR(alloc_id); + return alloc_id; } master->stat = stat; up_write(&sp_proc_stat_sem); - return stat; + return 0; } static void update_spg_stat_alloc(unsigned long size, bool inc, @@ -336,18 +569,14 @@ static struct spg_proc_stat *create_spg_proc_stat(int tgid, int spg_id) return stat; } -static struct spg_proc_stat *sp_init_spg_proc_stat( - struct sp_proc_stat *proc_stat, int tgid, struct sp_group *spg) +static struct spg_proc_stat *sp_init_spg_proc_stat(struct sp_proc_stat *proc_stat, + struct sp_group *spg) { struct spg_proc_stat *stat; int spg_id = spg->id; /* visit spg id locklessly */ struct sp_spg_stat *spg_stat = spg->stat; - stat = find_spg_proc_stat(proc_stat, tgid, spg_id); - if (stat) - return stat; - - stat = create_spg_proc_stat(tgid, spg_id); + stat = create_spg_proc_stat(proc_stat->tgid, spg_id); if (IS_ERR(stat)) return stat; @@ -364,31 +593,6 @@ static struct spg_proc_stat *sp_init_spg_proc_stat( return stat; } -/* - * The caller must - * 1. ensure no concurrency problem for task_struct and mm_struct. - * 2. hold sp_group_sem for sp_group_master (pay attention to ABBA deadlock) - */ -static struct spg_proc_stat *sp_init_process_stat(struct task_struct *tsk, - struct mm_struct *mm, struct sp_group *spg) -{ - struct sp_group_master *master; - bool exist; - struct sp_proc_stat *proc_stat; - struct spg_proc_stat *spg_proc_stat; - - master = sp_init_group_master_locked(mm, &exist); - if (IS_ERR(master)) - return (struct spg_proc_stat *)master; - - proc_stat = sp_init_proc_stat(master, mm, tsk); - if (IS_ERR(proc_stat)) - return (struct spg_proc_stat *)proc_stat; - - spg_proc_stat = sp_init_spg_proc_stat(proc_stat, tsk->tgid, spg); - return spg_proc_stat; -} - static struct sp_spg_stat *create_spg_stat(int spg_id) { struct sp_spg_stat *stat; @@ -444,12 +648,6 @@ static void free_spg_stat(int spg_id) kfree(stat); } -/* - * Group '0' for k2u_task and pass through. No process will be actually - * added to. - */ -static struct sp_group *spg_none; - /* statistics of all sp area, protected by sp_area_lock */ struct sp_spa_stat { unsigned int total_num; @@ -478,6 +676,8 @@ static struct sp_overall_stat sp_overall_stat; enum spa_type { SPA_TYPE_ALLOC = 1, + /* NOTE: reorganize after the statisical structure is reconstructed. */ + SPA_TYPE_ALLOC_PRIVATE = SPA_TYPE_ALLOC, SPA_TYPE_K2TASK, SPA_TYPE_K2SPG, }; @@ -507,7 +707,6 @@ struct sp_area { int device_id; }; static DEFINE_SPINLOCK(sp_area_lock); -static struct rb_root sp_area_root = RB_ROOT; static unsigned long spa_size(struct sp_area *spa) { @@ -539,7 +738,7 @@ static void spa_inc_usage(struct sp_area *spa) case SPA_TYPE_K2TASK: spa_stat.k2u_task_num += 1; spa_stat.k2u_task_size += size; - update_spg_stat_k2u(size, true, spg_none->stat); + update_spg_stat_k2u(size, true, spa->spg->stat); break; case SPA_TYPE_K2SPG: spa_stat.k2u_spg_num += 1; @@ -562,7 +761,7 @@ static void spa_inc_usage(struct sp_area *spa) spa_stat.total_num += 1; spa_stat.total_size += size; - if (spa->spg != spg_none) { + if (!is_local_group(spa->spg->id)) { atomic_inc(&sp_overall_stat.spa_total_num); atomic64_add(size, &sp_overall_stat.spa_total_size); } @@ -585,7 +784,7 @@ static void spa_dec_usage(struct sp_area *spa) case SPA_TYPE_K2TASK: spa_stat.k2u_task_num -= 1; spa_stat.k2u_task_size -= size; - update_spg_stat_k2u(size, false, spg_none->stat); + update_spg_stat_k2u(size, false, spa->spg->stat); break; case SPA_TYPE_K2SPG: spa_stat.k2u_spg_num -= 1; @@ -604,7 +803,7 @@ static void spa_dec_usage(struct sp_area *spa) spa_stat.total_num -= 1; spa_stat.total_size -= size; - if (spa->spg != spg_none) { + if (!is_local_group(spa->spg->id)) { atomic_dec(&sp_overall_stat.spa_total_num); atomic64_sub(spa->real_size, &sp_overall_stat.spa_total_size); } @@ -640,9 +839,9 @@ static void sp_update_process_stat(struct task_struct *tsk, bool inc, enum spa_type type = spa->type; down_write(&sp_group_sem); - stat = sp_init_process_stat(tsk, tsk->mm, spa->spg); + stat = find_spg_proc_stat(tsk->mm->sp_group_master->stat, tsk->tgid, spa->spg->id); up_write(&sp_group_sem); - if (unlikely(IS_ERR(stat))) + if (!stat) return; update_spg_proc_stat(size, inc, stat, type); @@ -664,15 +863,33 @@ static inline bool check_aoscore_process(struct task_struct *tsk) static unsigned long sp_mmap(struct mm_struct *mm, struct file *file, struct sp_area *spa, unsigned long *populate, - unsigned long prot); + unsigned long prot, struct vm_area_struct **pvma); static void sp_munmap(struct mm_struct *mm, unsigned long addr, unsigned long size); + +#define K2U_NORMAL 0 +#define K2U_COREDUMP 1 + +struct sp_k2u_context { + unsigned long kva; + unsigned long kva_aligned; + unsigned long size; + unsigned long size_aligned; + unsigned long sp_flags; + int state; + int spg_id; + bool to_task; + struct timespec64 start; + struct timespec64 end; +}; + static unsigned long sp_remap_kva_to_vma(unsigned long kva, struct sp_area *spa, - struct mm_struct *mm, unsigned long prot); + struct mm_struct *mm, unsigned long prot, struct sp_k2u_context *kc); static void free_sp_group_id(int spg_id) { /* ida operation is protected by an internal spin_lock */ - if (spg_id >= SPG_ID_AUTO_MIN && spg_id <= SPG_ID_AUTO_MAX) + if ((spg_id >= SPG_ID_AUTO_MIN && spg_id <= SPG_ID_AUTO_MAX) || + (spg_id >= SPG_ID_LOCAL_MIN && spg_id <= SPG_ID_LOCAL_MAX)) ida_free(&sp_group_id_ida, spg_id); } @@ -682,20 +899,28 @@ static void free_new_spg_id(bool new, int spg_id) free_sp_group_id(spg_id); } -static void free_sp_group(struct sp_group *spg) +static void free_sp_group_locked(struct sp_group *spg) { fput(spg->file); fput(spg->file_hugetlb); free_spg_stat(spg->id); - down_write(&sp_group_sem); idr_remove(&sp_group_idr, spg->id); - up_write(&sp_group_sem); free_sp_group_id((unsigned int)spg->id); + sp_mapping_detach(spg, spg->dvpp); + sp_mapping_detach(spg, spg->normal); + if (!is_local_group(spg->id)) + system_group_count--; kfree(spg); - system_group_count--; WARN(system_group_count < 0, "unexpected group count\n"); } +static void free_sp_group(struct sp_group *spg) +{ + down_write(&sp_group_sem); + free_sp_group_locked(spg); + up_write(&sp_group_sem); +} + static void sp_group_drop(struct sp_group *spg) { if (atomic_dec_and_test(&spg->use_count)) @@ -720,26 +945,6 @@ static int get_task(int pid, struct task_struct **task) return 0; } -static struct sp_group *get_first_group(struct mm_struct *mm) -{ - struct sp_group *spg = NULL; - struct sp_group_master *master = mm->sp_group_master; - - if (master && master->count >= 1) { - struct sp_group_node *spg_node = NULL; - - spg_node = list_first_entry(&master->node_list, - struct sp_group_node, group_node); - spg = spg_node->spg; - - /* don't revive a dead group */ - if (!spg || !atomic_inc_not_zero(&spg->use_count)) - spg = NULL; - } - - return spg; -} - /* * the caller must: * 1. hold spg->rw_lock @@ -764,35 +969,27 @@ static struct sp_group *__sp_find_spg_locked(int pid, int spg_id) struct task_struct *tsk = NULL; int ret = 0; - ret = get_task(pid, &tsk); - if (ret) - return NULL; - if (spg_id == SPG_ID_DEFAULT) { - /* - * Once we encounter a concurrency problem here. - * To fix it, we believe get_task_mm() and mmput() is too - * heavy because we just get the pointer of sp_group. - */ + ret = get_task(pid, &tsk); + if (ret) + return NULL; + task_lock(tsk); if (tsk->mm == NULL) spg = NULL; - else - spg = get_first_group(tsk->mm); + else if (tsk->mm->sp_group_master) + spg = tsk->mm->sp_group_master->local; task_unlock(tsk); + + put_task_struct(tsk); } else { spg = idr_find(&sp_group_idr, spg_id); - /* don't revive a dead group */ - if (!spg || !atomic_inc_not_zero(&spg->use_count)) - goto fail; } - put_task_struct(tsk); - return spg; + if (!spg || !atomic_inc_not_zero(&spg->use_count)) + return NULL; -fail: - put_task_struct(tsk); - return NULL; + return spg; } static struct sp_group *__sp_find_spg(int pid, int spg_id) @@ -818,6 +1015,9 @@ int sp_group_id_by_pid(int pid) struct sp_group *spg; int spg_id = -ENODEV; + if (!sp_is_enabled()) + return -EOPNOTSUPP; + check_interrupt_context(); spg = __sp_find_spg(pid, SPG_ID_DEFAULT); @@ -848,11 +1048,14 @@ EXPORT_SYMBOL_GPL(sp_group_id_by_pid); */ int mg_sp_group_id_by_pid(int pid, int *spg_ids, int *num) { - int ret = 0; + int ret = 0, real_count; struct sp_group_node *node; struct sp_group_master *master = NULL; struct task_struct *tsk; + if (!sp_is_enabled()) + return -EOPNOTSUPP; + check_interrupt_context(); if (!spg_ids || num <= 0) @@ -873,18 +1076,28 @@ int mg_sp_group_id_by_pid(int pid, int *spg_ids, int *num) goto out_up_read; } - if (!master->count) { + /* + * There is a local group for each process which is used for + * passthrough allocation. The local group is a internal + * implementation for convenience and is not attempt to bother + * the user. + */ + real_count = master->count - 1; + if (real_count <= 0) { ret = -ENODEV; goto out_up_read; } - if ((unsigned int)*num < master->count) { + if ((unsigned int)*num < real_count) { ret = -E2BIG; goto out_up_read; } - *num = master->count; + *num = real_count; - list_for_each_entry(node, &master->node_list, group_node) + list_for_each_entry(node, &master->node_list, group_node) { + if (is_local_group(node->spg->id)) + continue; *(spg_ids++) = node->spg->id; + } out_up_read: up_read(&sp_group_sem); @@ -910,23 +1123,7 @@ static bool is_device_addr(unsigned long addr) return false; } -static loff_t addr_offset(struct sp_area *spa) -{ - unsigned long addr; - - if (unlikely(!spa)) { - WARN(1, "invalid spa when calculate addr offset\n"); - return 0; - } - addr = spa->va_start; - - if (!is_device_addr(addr)) - return (loff_t)(addr - MMAP_SHARE_POOL_START); - - return (loff_t)(addr - sp_dev_va_start[spa->device_id]); -} - -static struct sp_group *create_spg(int spg_id) +static struct sp_group *create_spg(int spg_id, unsigned long flag) { int ret; struct sp_group *spg; @@ -934,11 +1131,17 @@ static struct sp_group *create_spg(int spg_id) struct user_struct *user = NULL; int hsize_log = MAP_HUGE_2MB >> MAP_HUGE_SHIFT; - if (unlikely(system_group_count + 1 == MAX_GROUP_FOR_SYSTEM)) { + if (unlikely(system_group_count + 1 == MAX_GROUP_FOR_SYSTEM && + !is_local_group(spg_id))) { pr_err_ratelimited("reach system max group num\n"); return ERR_PTR(-ENOSPC); } + if (flag & ~SPG_FLAG_MASK) { + pr_err_ratelimited("invalid flag:%#lx\n", flag); + return ERR_PTR(-EINVAL); + } + spg = kzalloc(sizeof(*spg), GFP_KERNEL); if (spg == NULL) return ERR_PTR(-ENOMEM); @@ -951,12 +1154,14 @@ static struct sp_group *create_spg(int spg_id) } spg->id = spg_id; + spg->flag = flag; spg->is_alive = true; spg->proc_num = 0; spg->owner = current->group_leader; atomic_set(&spg->use_count, 1); INIT_LIST_HEAD(&spg->procs); INIT_LIST_HEAD(&spg->spa_list); + INIT_LIST_HEAD(&spg->mnode); init_rwsem(&spg->rw_lock); sprintf(name, "sp_group_%d", spg_id); @@ -981,7 +1186,8 @@ static struct sp_group *create_spg(int spg_id) if (ret < 0) goto out_fput_all; - system_group_count++; + if (!is_local_group(spg_id)) + system_group_count++; return spg; out_fput_all: @@ -996,14 +1202,14 @@ static struct sp_group *create_spg(int spg_id) } /* the caller must hold sp_group_sem */ -static struct sp_group *find_or_alloc_sp_group(int spg_id) +static struct sp_group *find_or_alloc_sp_group(int spg_id, unsigned long flag) { struct sp_group *spg; spg = __sp_find_spg_locked(current->pid, spg_id); if (!spg) { - spg = create_spg(spg_id); + spg = create_spg(spg_id, flag); } else { down_read(&spg->rw_lock); if (!spg_valid(spg)) { @@ -1053,32 +1259,27 @@ static void sp_munmap_task_areas(struct mm_struct *mm, struct sp_group *spg, str } /* the caller must hold sp_group_sem */ -static int mm_add_group_init(struct mm_struct *mm, struct sp_group *spg) +static int mm_add_group_init(struct task_struct *tsk, struct mm_struct *mm, + struct sp_group *spg) { - struct sp_group_master *master = mm->sp_group_master; - bool exist = false; - - if (share_pool_group_mode == SINGLE_GROUP_MODE && master && - master->count == 1) { - pr_err_ratelimited("at most one sp group for a task is allowed in single mode\n"); - return -EEXIST; - } - - master = sp_init_group_master_locked(mm, &exist); - if (IS_ERR(master)) - return PTR_ERR(master); - - if (!exist) - return 0; + int ret; + struct sp_group_master *master; - if (is_process_in_group(spg, mm)) { - pr_err_ratelimited("task already in target group, id=%d\n", spg->id); - return -EEXIST; - } + if (!mm->sp_group_master) { + ret = sp_init_group_master_locked(tsk, mm); + if (ret) + return ret; + } else { + if (is_process_in_group(spg, mm)) { + pr_err_ratelimited("task already in target group, id=%d\n", spg->id); + return -EEXIST; + } - if (master->count + 1 == MAX_GROUP_FOR_TASK) { - pr_err("task reaches max group num\n"); - return -ENOSPC; + master = mm->sp_group_master; + if (master->count == MAX_GROUP_FOR_TASK) { + pr_err("task reaches max group num\n"); + return -ENOSPC; + } } return 0; @@ -1117,6 +1318,13 @@ static int insert_spg_node(struct sp_group *spg, struct sp_group_node *node) spg->proc_num++; list_add_tail(&node->proc_node, &spg->procs); + + /* + * The only way where sp_init_spg_proc_stat got failed is that there is no + * memory for sp_spg_stat. We will avoid this failure when we put sp_spg_stat + * into sp_group_node later. + */ + sp_init_spg_proc_stat(node->master->stat, spg); return 0; } @@ -1139,11 +1347,26 @@ static void free_spg_node(struct mm_struct *mm, struct sp_group *spg, kfree(spg_node); } +static int local_group_add_task(struct mm_struct *mm, struct sp_group *spg) +{ + struct sp_group_node *node; + + node = create_spg_node(mm, PROT_READ | PROT_WRITE, spg); + if (IS_ERR(node)) + return PTR_ERR(node); + + insert_spg_node(spg, node); + mmget(mm); + + return 0; +} + /** - * sp_group_add_task() - Add a process to an share group (sp_group). + * mg_sp_group_add_task() - Add a process to an share group (sp_group). * @pid: the pid of the task to be added. * @prot: the prot of task for this spg. * @spg_id: the ID of the sp_group. + * @flag: to give some special message. * * A process can't be added to more than one sp_group in single group mode * and can in multiple group mode. @@ -1156,6 +1379,7 @@ static void free_spg_node(struct mm_struct *mm, struct sp_group *spg, */ int mg_sp_group_add_task(int pid, unsigned long prot, int spg_id) { + unsigned long flag = 0; struct task_struct *tsk; struct mm_struct *mm; struct sp_group *spg; @@ -1163,7 +1387,9 @@ int mg_sp_group_add_task(int pid, unsigned long prot, int spg_id) int ret = 0; bool id_newly_generated = false; struct sp_area *spa, *prev = NULL; - struct spg_proc_stat *stat; + + if (!sp_is_enabled()) + return -EOPNOTSUPP; check_interrupt_context(); @@ -1250,7 +1476,7 @@ int mg_sp_group_add_task(int pid, unsigned long prot, int spg_id) goto out_put_task; } - spg = find_or_alloc_sp_group(spg_id); + spg = find_or_alloc_sp_group(spg_id, flag); if (IS_ERR(spg)) { up_write(&sp_group_sem); ret = PTR_ERR(spg); @@ -1266,25 +1492,26 @@ int mg_sp_group_add_task(int pid, unsigned long prot, int spg_id) } } - ret = mm_add_group_init(mm, spg); - if (ret) + down_write(&spg->rw_lock); + ret = mm_add_group_init(tsk, mm, spg); + if (ret) { + up_write(&spg->rw_lock); goto out_drop_group; + } + + ret = sp_mapping_group_setup(mm, spg); + if (ret) { + up_write(&spg->rw_lock); + goto out_drop_group; + } node = create_spg_node(mm, prot, spg); if (unlikely(IS_ERR(node))) { + up_write(&spg->rw_lock); ret = PTR_ERR(node); - goto out_drop_spg_node; - } - - /* per process statistics initialization */ - stat = sp_init_process_stat(tsk, mm, spg); - if (IS_ERR(stat)) { - ret = PTR_ERR(stat); - pr_err_ratelimited("init process stat failed %lx\n", PTR_ERR(stat)); - goto out_drop_spg_node; + goto out_drop_group; } - down_write(&spg->rw_lock); ret = insert_spg_node(spg, node); if (unlikely(ret)) { up_write(&spg->rw_lock); @@ -1313,7 +1540,7 @@ int mg_sp_group_add_task(int pid, unsigned long prot, int spg_id) spin_unlock(&sp_area_lock); if (spa->type == SPA_TYPE_K2SPG && spa->kva) { - addr = sp_remap_kva_to_vma(spa->kva, spa, mm, prot); + addr = sp_remap_kva_to_vma(spa->kva, spa, mm, prot, NULL); if (IS_ERR_VALUE(addr)) pr_warn("add group remap k2u failed %ld\n", addr); @@ -1331,7 +1558,7 @@ int mg_sp_group_add_task(int pid, unsigned long prot, int spg_id) break; } - addr = sp_mmap(mm, file, spa, &populate, prot); + addr = sp_mmap(mm, file, spa, &populate, prot, NULL); if (IS_ERR_VALUE(addr)) { sp_munmap_task_areas(mm, spg, &spa->link); up_write(&mm->mmap_lock); @@ -1440,6 +1667,9 @@ int mg_sp_group_del_task(int pid, int spg_id) struct mm_struct *mm = NULL; bool is_alive = true; + if (!sp_is_enabled()) + return -EOPNOTSUPP; + if (spg_id < SPG_ID_MIN || spg_id > SPG_ID_AUTO) { pr_err_ratelimited("del from group failed, invalid group id %d\n", spg_id); return -EINVAL; @@ -1526,10 +1756,50 @@ int sp_group_del_task(int pid, int spg_id) } EXPORT_SYMBOL_GPL(sp_group_del_task); +int sp_id_of_current(void) +{ + int ret, spg_id; + struct sp_group_master *master; + + if (!sp_is_enabled()) + return -EOPNOTSUPP; + + if (current->flags & PF_KTHREAD || !current->mm) + return -EINVAL; + + down_read(&sp_group_sem); + master = current->mm->sp_group_master; + if (master) { + spg_id = master->local->id; + up_read(&sp_group_sem); + return spg_id; + } + up_read(&sp_group_sem); + + down_write(&sp_group_sem); + ret = sp_init_group_master_locked(current, current->mm); + if (ret) { + up_write(&sp_group_sem); + return ret; + } + master = current->mm->sp_group_master; + spg_id = master->local->id; + up_write(&sp_group_sem); + + return spg_id; +} +EXPORT_SYMBOL_GPL(sp_id_of_current); + +int mg_sp_id_of_current(void) +{ + return sp_id_of_current(); +} +EXPORT_SYMBOL_GPL(mg_sp_id_of_current); + /* the caller must hold sp_area_lock */ -static void __insert_sp_area(struct sp_area *spa) +static void __insert_sp_area(struct sp_mapping *spm, struct sp_area *spa) { - struct rb_node **p = &sp_area_root.rb_node; + struct rb_node **p = &spm->area_root.rb_node; struct rb_node *parent = NULL; while (*p) { @@ -1546,14 +1816,9 @@ static void __insert_sp_area(struct sp_area *spa) } rb_link_node(&spa->rb_node, parent, p); - rb_insert_color(&spa->rb_node, &sp_area_root); + rb_insert_color(&spa->rb_node, &spm->area_root); } -/* The sp_area cache globals are protected by sp_area_lock */ -static struct rb_node *free_sp_area_cache; -static unsigned long cached_hole_size; -static unsigned long cached_vstart; /* affected by SP_DVPP and sp_config_dvpp_range() */ - /** * sp_alloc_area() - Allocate a region of VA from the share pool. * @size: the size of VA to allocate. @@ -1570,11 +1835,12 @@ static struct sp_area *sp_alloc_area(unsigned long size, unsigned long flags, { struct sp_area *spa, *first, *err; struct rb_node *n; - unsigned long vstart = MMAP_SHARE_POOL_START; - unsigned long vend = MMAP_SHARE_POOL_16G_START; + unsigned long vstart; + unsigned long vend; unsigned long addr; unsigned long size_align = ALIGN(size, PMD_SIZE); /* va aligned to 2M */ int device_id, node_id; + struct sp_mapping *mapping; device_id = sp_flags_device_id(flags); node_id = flags & SP_SPEC_NODE_ID ? sp_flags_node_id(flags) : device_id; @@ -1584,17 +1850,18 @@ static struct sp_area *sp_alloc_area(unsigned long size, unsigned long flags, return ERR_PTR(-EINVAL); } - if ((flags & SP_DVPP)) { - if (!is_sp_dev_addr_enabled(device_id)) { - vstart = MMAP_SHARE_POOL_16G_START + - device_id * MMAP_SHARE_POOL_16G_SIZE; - vend = vstart + MMAP_SHARE_POOL_16G_SIZE; - } else { - vstart = sp_dev_va_start[device_id]; - vend = vstart + sp_dev_va_size[device_id]; - } + if (flags & SP_DVPP) + mapping = spg->dvpp; + else + mapping = spg->normal; + + if (!mapping) { + pr_err_ratelimited("non DVPP spg, id %d\n", spg->id); + return ERR_PTR(-EINVAL); } + vstart = mapping->start[device_id]; + vend = mapping->end[device_id]; spa = __kmalloc_node(sizeof(struct sp_area), GFP_KERNEL, node_id); if (unlikely(!spa)) return ERR_PTR(-ENOMEM); @@ -1604,24 +1871,24 @@ static struct sp_area *sp_alloc_area(unsigned long size, unsigned long flags, /* * Invalidate cache if we have more permissive parameters. * cached_hole_size notes the largest hole noticed _below_ - * the sp_area cached in free_sp_area_cache: if size fits + * the sp_area cached in free_area_cache: if size fits * into that hole, we want to scan from vstart to reuse - * the hole instead of allocating above free_sp_area_cache. - * Note that sp_free_area may update free_sp_area_cache + * the hole instead of allocating above free_area_cache. + * Note that sp_free_area may update free_area_cache * without updating cached_hole_size. */ - if (!free_sp_area_cache || size_align < cached_hole_size || - vstart != cached_vstart) { - cached_hole_size = 0; - free_sp_area_cache = NULL; + if (!mapping->free_area_cache || size_align < mapping->cached_hole_size || + vstart != mapping->cached_vstart) { + mapping->cached_hole_size = 0; + mapping->free_area_cache = NULL; } /* record if we encounter less permissive parameters */ - cached_vstart = vstart; + mapping->cached_vstart = vstart; /* find starting point for our search */ - if (free_sp_area_cache) { - first = rb_entry(free_sp_area_cache, struct sp_area, rb_node); + if (mapping->free_area_cache) { + first = rb_entry(mapping->free_area_cache, struct sp_area, rb_node); addr = first->va_end; if (addr + size_align < addr) { err = ERR_PTR(-EOVERFLOW); @@ -1634,7 +1901,7 @@ static struct sp_area *sp_alloc_area(unsigned long size, unsigned long flags, goto error; } - n = sp_area_root.rb_node; + n = mapping->area_root.rb_node; first = NULL; while (n) { @@ -1656,8 +1923,8 @@ static struct sp_area *sp_alloc_area(unsigned long size, unsigned long flags, /* from the starting point, traverse areas until a suitable hole is found */ while (addr + size_align > first->va_start && addr + size_align <= vend) { - if (addr + cached_hole_size < first->va_start) - cached_hole_size = first->va_start - addr; + if (addr + mapping->cached_hole_size < first->va_start) + mapping->cached_hole_size = first->va_start - addr; addr = first->va_end; if (addr + size_align < addr) { err = ERR_PTR(-EOVERFLOW); @@ -1694,10 +1961,9 @@ static struct sp_area *sp_alloc_area(unsigned long size, unsigned long flags, spa->device_id = device_id; spa_inc_usage(spa); - __insert_sp_area(spa); - free_sp_area_cache = &spa->rb_node; - if (spa->spg != spg_none) - list_add_tail(&spa->link, &spg->spa_list); + __insert_sp_area(mapping, spa); + mapping->free_area_cache = &spa->rb_node; + list_add_tail(&spa->link, &spg->spa_list); spin_unlock(&sp_area_lock); @@ -1710,9 +1976,15 @@ static struct sp_area *sp_alloc_area(unsigned long size, unsigned long flags, } /* the caller should hold sp_area_lock */ -static struct sp_area *__find_sp_area_locked(unsigned long addr) +static struct sp_area *__find_sp_area_locked(struct sp_group *spg, + unsigned long addr) { - struct rb_node *n = sp_area_root.rb_node; + struct rb_node *n; + + if (addr >= MMAP_SHARE_POOL_START && addr < MMAP_SHARE_POOL_16G_START) + n = spg->normal->area_root.rb_node; + else + n = spg->dvpp->area_root.rb_node; while (n) { struct sp_area *spa; @@ -1730,12 +2002,12 @@ static struct sp_area *__find_sp_area_locked(unsigned long addr) return NULL; } -static struct sp_area *__find_sp_area(unsigned long addr) +static struct sp_area *__find_sp_area(struct sp_group *spg, unsigned long addr) { struct sp_area *n; spin_lock(&sp_area_lock); - n = __find_sp_area_locked(addr); + n = __find_sp_area_locked(spg, addr); if (n) atomic_inc(&n->use_count); spin_unlock(&sp_area_lock); @@ -1760,22 +2032,30 @@ static bool vmalloc_area_clr_flag(unsigned long kva, unsigned long flags) */ static void sp_free_area(struct sp_area *spa) { + unsigned long addr = spa->va_start; + struct sp_mapping *spm; + lockdep_assert_held(&sp_area_lock); - if (free_sp_area_cache) { + if (addr >= MMAP_SHARE_POOL_START && addr < MMAP_SHARE_POOL_16G_START) + spm = spa->spg->normal; + else + spm = spa->spg->dvpp; + + if (spm->free_area_cache) { struct sp_area *cache; - cache = rb_entry(free_sp_area_cache, struct sp_area, rb_node); + cache = rb_entry(spm->free_area_cache, struct sp_area, rb_node); if (spa->va_start <= cache->va_start) { - free_sp_area_cache = rb_prev(&spa->rb_node); + spm->free_area_cache = rb_prev(&spa->rb_node); /* * the new cache node may be changed to another region, * i.e. from DVPP region to normal region */ - if (free_sp_area_cache) { - cache = rb_entry(free_sp_area_cache, + if (spm->free_area_cache) { + cache = rb_entry(spm->free_area_cache, struct sp_area, rb_node); - cached_vstart = cache->region_vstart; + spm->cached_vstart = cache->region_vstart; } /* * We don't try to update cached_hole_size, @@ -1788,10 +2068,9 @@ static void sp_free_area(struct sp_area *spa) pr_debug("clear spa->kva %ld is not valid\n", spa->kva); spa_dec_usage(spa); - if (spa->spg != spg_none) - list_del(&spa->link); + list_del(&spa->link); - rb_erase(&spa->rb_node, &sp_area_root); + rb_erase(&spa->rb_node, &spm->area_root); RB_CLEAR_NODE(&spa->rb_node); kfree(spa); } @@ -1820,8 +2099,6 @@ static void __sp_area_drop(struct sp_area *spa) void sp_area_drop(struct vm_area_struct *vma) { - struct sp_area *spa; - if (!(vma->vm_flags & VM_SHARE_POOL)) return; @@ -1833,8 +2110,7 @@ void sp_area_drop(struct vm_area_struct *vma) * an atomic operation. */ spin_lock(&sp_area_lock); - spa = __find_sp_area_locked(vma->vm_start); - __sp_area_drop_locked(spa); + __sp_area_drop_locked(vma->vm_private_data); spin_unlock(&sp_area_lock); } @@ -1949,15 +2225,10 @@ static void sp_fallocate(struct sp_area *spa) static void sp_free_unmap_fallocate(struct sp_area *spa) { - if (spa->spg != spg_none) { - down_read(&spa->spg->rw_lock); - __sp_free(spa->spg, spa->va_start, spa_size(spa), NULL); - sp_fallocate(spa); - up_read(&spa->spg->rw_lock); - } else { - sp_munmap(current->mm, spa->va_start, spa_size(spa)); - sp_fallocate(spa); - } + down_read(&spa->spg->rw_lock); + __sp_free(spa->spg, spa->va_start, spa_size(spa), NULL); + sp_fallocate(spa); + up_read(&spa->spg->rw_lock); } static int sp_check_caller_permission(struct sp_group *spg, struct mm_struct *mm) @@ -1968,6 +2239,7 @@ static int sp_check_caller_permission(struct sp_group *spg, struct mm_struct *mm if (!is_process_in_group(spg, mm)) ret = -EPERM; up_read(&spg->rw_lock); + return ret; } @@ -1978,6 +2250,7 @@ struct sp_free_context { unsigned long addr; struct sp_area *spa; int state; + int spg_id; }; /* when success, __sp_area_drop(spa) should be used */ @@ -1986,10 +2259,18 @@ static int sp_free_get_spa(struct sp_free_context *fc) int ret = 0; unsigned long addr = fc->addr; struct sp_area *spa; + struct sp_group *spg; + + spg = __sp_find_spg(current->tgid, fc->spg_id); + if (!spg) { + pr_debug("sp free get group failed %d\n", fc->spg_id); + return -EINVAL; + } fc->state = FREE_CONT; - spa = __find_sp_area(addr); + spa = __find_sp_area(spg, addr); + sp_group_drop(spg); if (!spa) { pr_debug("sp free invalid input addr %lx\n", addr); return -EINVAL; @@ -2002,46 +2283,37 @@ static int sp_free_get_spa(struct sp_free_context *fc) } fc->spa = spa; - if (spa->spg != spg_none) { - /* - * Access control: an sp addr can only be freed by - * 1. another task in the same spg - * 2. a kthread - * - * a passthrough addr can only be freed by the applier process - */ - if (!current->mm) - goto check_spa; + if (!current->mm) + goto check_spa; - ret = sp_check_caller_permission(spa->spg, current->mm); - if (ret < 0) - goto drop_spa; + ret = sp_check_caller_permission(spa->spg, current->mm); + if (ret < 0) + goto drop_spa; check_spa: - down_write(&spa->spg->rw_lock); - if (!spg_valid(spa->spg)) { - fc->state = FREE_END; - up_write(&spa->spg->rw_lock); - goto drop_spa; - /* we must return success(0) in this situation */ - } - /* the life cycle of spa has a direct relation with sp group */ - if (unlikely(spa->is_dead)) { - up_write(&spa->spg->rw_lock); - pr_err_ratelimited("unexpected double sp free\n"); - dump_stack(); - ret = -EINVAL; - goto drop_spa; - } - spa->is_dead = true; - up_write(&spa->spg->rw_lock); + if (is_local_group(spa->spg->id) && (current->tgid != spa->applier)) { + ret = -EPERM; + goto drop_spa; + } - } else { - if (current->tgid != spa->applier) { - ret = -EPERM; - goto drop_spa; - } + down_write(&spa->spg->rw_lock); + if (!spg_valid(spa->spg)) { + fc->state = FREE_END; + up_write(&spa->spg->rw_lock); + goto drop_spa; + /* we must return success(0) in this situation */ + } + /* the life cycle of spa has a direct relation with sp group */ + if (unlikely(spa->is_dead)) { + up_write(&spa->spg->rw_lock); + pr_err_ratelimited("unexpected double sp free\n"); + dump_stack(); + ret = -EINVAL; + goto drop_spa; } + spa->is_dead = true; + up_write(&spa->spg->rw_lock); + return 0; drop_spa: @@ -2052,21 +2324,29 @@ static int sp_free_get_spa(struct sp_free_context *fc) /** * sp_free() - Free the memory allocated by sp_alloc(). * @addr: the starting VA of the memory. + * @id: Address space identifier, which is used to distinguish the addr. * * Return: * * 0 - success. * * -EINVAL - the memory can't be found or was not allocted by share pool. * * -EPERM - the caller has no permision to free the memory. */ -int sp_free(unsigned long addr) +int sp_free(unsigned long addr, int id) { int ret = 0; struct sp_free_context fc = { .addr = addr, + .spg_id = id, }; + if (!sp_is_enabled()) + return -EOPNOTSUPP; + check_interrupt_context(); + if (current->flags & PF_KTHREAD) + return -EINVAL; + ret = sp_free_get_spa(&fc); if (ret || fc.state == FREE_END) goto out; @@ -2087,16 +2367,16 @@ int sp_free(unsigned long addr) } EXPORT_SYMBOL_GPL(sp_free); -int mg_sp_free(unsigned long addr) +int mg_sp_free(unsigned long addr, int id) { - return sp_free(addr); + return sp_free(addr, id); } EXPORT_SYMBOL_GPL(mg_sp_free); /* wrapper of __do_mmap() and the caller must hold down_write(&mm->mmap_lock). */ static unsigned long sp_mmap(struct mm_struct *mm, struct file *file, struct sp_area *spa, unsigned long *populate, - unsigned long prot) + unsigned long prot, struct vm_area_struct **pvma) { unsigned long addr = spa->va_start; unsigned long size = spa_size(spa); @@ -2104,6 +2384,7 @@ static unsigned long sp_mmap(struct mm_struct *mm, struct file *file, MAP_SHARE_POOL; unsigned long vm_flags = VM_NORESERVE | VM_SHARE_POOL | VM_DONTCOPY; unsigned long pgoff = addr_offset(spa) >> PAGE_SHIFT; + struct vm_area_struct *vma; /* Mark the mapped region to be locked. After the MAP_LOCKED is enable, * multiple tasks will preempt resources, causing performance loss. @@ -2119,14 +2400,20 @@ static unsigned long sp_mmap(struct mm_struct *mm, struct file *file, pr_err("do_mmap fails %ld\n", addr); } else { BUG_ON(addr != spa->va_start); + vma = find_vma(mm, addr); + vma->vm_private_data = spa; + if (pvma) + *pvma = vma; } + return addr; } #define ALLOC_NORMAL 1 #define ALLOC_RETRY 2 #define ALLOC_NOMEM 3 +#define ALLOC_COREDUMP 4 struct sp_alloc_context { struct sp_group *spg; @@ -2139,6 +2426,8 @@ struct sp_alloc_context { bool need_fallocate; struct timespec64 start; struct timespec64 end; + bool have_mbind; + enum spa_type type; }; static void trace_sp_alloc_begin(struct sp_alloc_context *ac) @@ -2152,7 +2441,6 @@ static void trace_sp_alloc_begin(struct sp_alloc_context *ac) static void trace_sp_alloc_finish(struct sp_alloc_context *ac, unsigned long va) { unsigned long cost; - bool is_pass_through = ac->spg == spg_none ? true : false; if (!sysctl_sp_perf_alloc) return; @@ -2164,7 +2452,8 @@ static void trace_sp_alloc_finish(struct sp_alloc_context *ac, unsigned long va) if (cost >= (unsigned long)sysctl_sp_perf_alloc) { pr_err("Task %s(%d/%d) sp_alloc returns 0x%lx consumes %luus, size is %luKB, size_aligned is %luKB, sp_flags is %lx, pass through is %d\n", current->comm, current->tgid, current->pid, - va, cost, byte2kb(ac->size), byte2kb(ac->size_aligned), ac->sp_flags, is_pass_through); + va, cost, byte2kb(ac->size), byte2kb(ac->size_aligned), ac->sp_flags, + is_local_group(ac->spg->id)); } } @@ -2181,6 +2470,11 @@ static int sp_alloc_prepare(unsigned long size, unsigned long sp_flags, if (enable_mdc_default_group) spg_id = mdc_default_group_id; + if (current->flags & PF_KTHREAD) { + pr_err_ratelimited("allocation failed, task is kthread\n"); + return -EINVAL; + } + if (unlikely(!size || (size >> PAGE_SHIFT) > totalram_pages())) { pr_err_ratelimited("allocation failed, invalid size %lu\n", size); return -EINVAL; @@ -2199,72 +2493,35 @@ static int sp_alloc_prepare(unsigned long size, unsigned long sp_flags, if (sp_flags & SP_HUGEPAGE_ONLY) sp_flags |= SP_HUGEPAGE; - if (share_pool_group_mode == SINGLE_GROUP_MODE) { - spg = __sp_find_spg(current->pid, SPG_ID_DEFAULT); - if (spg) { - if (spg_id != SPG_ID_DEFAULT && spg->id != spg_id) { - sp_group_drop(spg); - return -ENODEV; - } - - /* up_read will be at the end of sp_alloc */ - down_read(&spg->rw_lock); - if (!spg_valid(spg)) { - up_read(&spg->rw_lock); - sp_group_drop(spg); - pr_err_ratelimited("allocation failed, spg is dead\n"); - return -ENODEV; - } - } else { /* alocation pass through scene */ - if (enable_mdc_default_group) { - int ret = 0; - - ret = sp_group_add_task(current->tgid, spg_id); - if (ret < 0) { - pr_err_ratelimited("add group failed in pass through\n"); - return ret; - } - - spg = __sp_find_spg(current->pid, SPG_ID_DEFAULT); - - /* up_read will be at the end of sp_alloc */ - down_read(&spg->rw_lock); - if (!spg_valid(spg)) { - up_read(&spg->rw_lock); - sp_group_drop(spg); - pr_err_ratelimited("pass through allocation failed, spg is dead\n"); - return -ENODEV; - } - } else { - spg = spg_none; - } + if (spg_id != SPG_ID_DEFAULT) { + spg = __sp_find_spg(current->pid, spg_id); + if (!spg) { + pr_err_ratelimited("allocation failed, can't find group\n"); + return -ENODEV; } - } else { - if (spg_id != SPG_ID_DEFAULT) { - spg = __sp_find_spg(current->pid, spg_id); - if (!spg) { - pr_err_ratelimited("allocation failed, can't find group\n"); - return -ENODEV; - } - /* up_read will be at the end of sp_alloc */ - down_read(&spg->rw_lock); - if (!spg_valid(spg)) { - up_read(&spg->rw_lock); - sp_group_drop(spg); - pr_err_ratelimited("allocation failed, spg is dead\n"); - return -ENODEV; - } + /* up_read will be at the end of sp_alloc */ + down_read(&spg->rw_lock); + if (!spg_valid(spg)) { + up_read(&spg->rw_lock); + sp_group_drop(spg); + pr_err_ratelimited("allocation failed, spg is dead\n"); + return -ENODEV; + } - if (!is_process_in_group(spg, current->mm)) { - up_read(&spg->rw_lock); - sp_group_drop(spg); - pr_err_ratelimited("allocation failed, task not in group\n"); - return -ENODEV; - } - } else { /* alocation pass through scene */ - spg = spg_none; + if (!is_process_in_group(spg, current->mm)) { + up_read(&spg->rw_lock); + sp_group_drop(spg); + pr_err_ratelimited("allocation failed, task not in group\n"); + return -ENODEV; } + ac->type = SPA_TYPE_ALLOC; + } else { /* allocation pass through scene */ + spg = sp_get_local_group(current, current->mm); + if (IS_ERR(spg)) + return PTR_ERR(spg); + down_read(&spg->rw_lock); + ac->type = SPA_TYPE_ALLOC_PRIVATE; } if (sp_flags & SP_HUGEPAGE) { @@ -2280,14 +2537,14 @@ static int sp_alloc_prepare(unsigned long size, unsigned long sp_flags, ac->sp_flags = sp_flags; ac->state = ALLOC_NORMAL; ac->need_fallocate = false; + ac->have_mbind = false; return 0; } static void sp_alloc_unmap(struct mm_struct *mm, struct sp_area *spa, struct sp_group_node *spg_node) { - if (spa->spg != spg_none) - __sp_free(spa->spg, spa->va_start, spa->real_size, mm); + __sp_free(spa->spg, spa->va_start, spa->real_size, mm); } static int sp_alloc_mmap(struct mm_struct *mm, struct sp_area *spa, @@ -2297,15 +2554,13 @@ static int sp_alloc_mmap(struct mm_struct *mm, struct sp_area *spa, unsigned long mmap_addr; /* pass through default permission */ unsigned long prot = PROT_READ | PROT_WRITE; - unsigned long sp_addr = spa->va_start; unsigned long populate = 0; struct vm_area_struct *vma; down_write(&mm->mmap_lock); if (unlikely(mm->core_state)) { up_write(&mm->mmap_lock); - sp_alloc_unmap(mm, spa, spg_node); - ac->state = ALLOC_NOMEM; + ac->state = ALLOC_COREDUMP; pr_info("allocation encountered coredump\n"); return -EFAULT; } @@ -2313,8 +2568,11 @@ static int sp_alloc_mmap(struct mm_struct *mm, struct sp_area *spa, if (spg_node) prot = spg_node->prot; + if (ac->sp_flags & SP_PROT_RO) + prot = PROT_READ; + /* when success, mmap_addr == spa->va_start */ - mmap_addr = sp_mmap(mm, spa_file(spa), spa, &populate, prot); + mmap_addr = sp_mmap(mm, spa_file(spa), spa, &populate, prot, &vma); if (IS_ERR_VALUE(mmap_addr)) { up_write(&mm->mmap_lock); sp_alloc_unmap(mm, spa, spg_node); @@ -2330,13 +2588,9 @@ static int sp_alloc_mmap(struct mm_struct *mm, struct sp_area *spa, } ac->populate = populate; - vma = find_vma(mm, sp_addr); - if (unlikely(!vma)) { - up_write(&mm->mmap_lock); - WARN(1, "allocation failed, can't find %lx vma\n", sp_addr); - ret = -EINVAL; - goto unmap; - } + if (ac->sp_flags & SP_PROT_RO) + vma->vm_flags &= ~VM_MAYWRITE; + /* clean PTE_RDONLY flags or trigger SMMU event */ if (prot & PROT_WRITE) vma->vm_page_prot = __pgprot(((~PTE_RDONLY) & vma->vm_page_prot.pgprot) | PTE_DIRTY); @@ -2345,10 +2599,7 @@ static int sp_alloc_mmap(struct mm_struct *mm, struct sp_area *spa, return ret; unmap: - if (spa->spg != spg_none) - sp_alloc_unmap(list_next_entry(spg_node, proc_node)->master->mm, spa, spg_node); - else - sp_munmap(mm, spa->va_start, spa->real_size); + sp_alloc_unmap(list_next_entry(spg_node, proc_node)->master->mm, spa, spg_node); return ret; } @@ -2374,7 +2625,7 @@ static void sp_alloc_fallback(struct sp_area *spa, struct sp_alloc_context *ac) } static int sp_alloc_populate(struct mm_struct *mm, struct sp_area *spa, - struct sp_group_node *spg_node, struct sp_alloc_context *ac) + struct sp_alloc_context *ac) { int ret = 0; unsigned long sp_addr = spa->va_start; @@ -2406,25 +2657,20 @@ static int sp_alloc_populate(struct mm_struct *mm, struct sp_area *spa, if (ret) sp_add_work_compact(); } - if (ret) { - if (spa->spg != spg_none) - sp_alloc_unmap(list_next_entry(spg_node, proc_node)->master->mm, spa, spg_node); - else - sp_munmap(mm, spa->va_start, spa->real_size); - - if (unlikely(fatal_signal_pending(current))) - pr_warn_ratelimited("allocation failed, current thread is killed\n"); - else - pr_warn_ratelimited("allocation failed due to mm populate failed(potential no enough memory when -12): %d\n", - ret); - sp_fallocate(spa); /* need this, otherwise memleak */ - sp_alloc_fallback(spa, ac); - } else { - ac->need_fallocate = true; - } return ret; } +static long sp_mbind(struct mm_struct *mm, unsigned long start, unsigned long len, + unsigned long node) +{ + nodemask_t nmask; + + nodes_clear(nmask); + node_set(node, nmask); + return __do_mbind(start, len, MPOL_BIND, MPOL_F_STATIC_NODES, + &nmask, MPOL_MF_STRICT, mm); +} + static int __sp_alloc_mmap_populate(struct mm_struct *mm, struct sp_area *spa, struct sp_group_node *spg_node, struct sp_alloc_context *ac) { @@ -2440,53 +2686,77 @@ static int __sp_alloc_mmap_populate(struct mm_struct *mm, struct sp_area *spa, return ret; } - ret = sp_alloc_populate(mm, spa, spg_node, ac); + if (!ac->have_mbind) { + ret = sp_mbind(mm, spa->va_start, spa->real_size, spa->node_id); + if (ret < 0) { + pr_err("cannot bind the memory range to specified node:%d, err:%d\n", + spa->node_id, ret); + goto err; + } + ac->have_mbind = true; + } + + ret = sp_alloc_populate(mm, spa, ac); + if (ret) { +err: + sp_alloc_unmap(list_next_entry(spg_node, proc_node)->master->mm, spa, spg_node); + + if (unlikely(fatal_signal_pending(current))) + pr_warn_ratelimited("allocation failed, current thread is killed\n"); + else + pr_warn_ratelimited("allocation failed due to mm populate failed(potential no enough memory when -12): %d\n", + ret); + sp_fallocate(spa); /* need this, otherwise memleak */ + sp_alloc_fallback(spa, ac); + } else + ac->need_fallocate = true; + return ret; } static int sp_alloc_mmap_populate(struct sp_area *spa, struct sp_alloc_context *ac) { - int ret; + int ret = -EINVAL; + int mmap_ret = 0; struct mm_struct *mm; struct sp_group_node *spg_node; - if (spa->spg == spg_none) { - ret = __sp_alloc_mmap_populate(current->mm, spa, NULL, ac); - } else { - /* create mapping for each process in the group */ - list_for_each_entry(spg_node, &spa->spg->procs, proc_node) { - mm = spg_node->master->mm; - ret = __sp_alloc_mmap_populate(mm, spa, spg_node, ac); - if (ret) - return ret; + /* create mapping for each process in the group */ + list_for_each_entry(spg_node, &spa->spg->procs, proc_node) { + mm = spg_node->master->mm; + mmap_ret = __sp_alloc_mmap_populate(mm, spa, spg_node, ac); + if (mmap_ret) { + if (ac->state != ALLOC_COREDUMP) + return mmap_ret; + ac->state = ALLOC_NORMAL; + continue; } + ret = mmap_ret; } + return ret; } /* spa maybe an error pointer, so introduce variable spg */ static void sp_alloc_finish(int result, struct sp_area *spa, - struct sp_alloc_context *ac) + struct sp_alloc_context *ac) { struct sp_group *spg = ac->spg; - bool is_pass_through = spg == spg_none ? true : false; - /* match sp_alloc_check_prepare */ - if (!is_pass_through) - up_read(&spg->rw_lock); + /* match sp_alloc_prepare */ + up_read(&spg->rw_lock); if (!result) sp_update_process_stat(current, true, spa); /* this will free spa if mmap failed */ - if (spa && !IS_ERR(spa)) + if (spa && !IS_ERR(spa)) { __sp_area_drop(spa); + trace_sp_alloc_finish(ac, spa->va_start); + } - if (!is_pass_through) - sp_group_drop(spg); - - trace_sp_alloc_finish(ac, spa->va_start); + sp_group_drop(spg); sp_dump_stack(); sp_try_to_compact(); } @@ -2509,13 +2779,16 @@ void *sp_alloc(unsigned long size, unsigned long sp_flags, int spg_id) int ret = 0; struct sp_alloc_context ac; + if (!sp_is_enabled()) + return ERR_PTR(-EOPNOTSUPP); + ret = sp_alloc_prepare(size, sp_flags, spg_id, &ac); if (ret) return ERR_PTR(ret); try_again: spa = sp_alloc_area(ac.size_aligned, ac.sp_flags, ac.spg, - SPA_TYPE_ALLOC, current->tgid); + ac.type, current->tgid); if (IS_ERR(spa)) { pr_err_ratelimited("alloc spa failed in allocation(potential no enough virtual memory when -75): %ld\n", PTR_ERR(spa)); @@ -2586,7 +2859,7 @@ static unsigned long __sp_remap_get_pfn(unsigned long kva) /* when called by k2u to group, always make sure rw_lock of spg is down */ static unsigned long sp_remap_kva_to_vma(unsigned long kva, struct sp_area *spa, - struct mm_struct *mm, unsigned long prot) + struct mm_struct *mm, unsigned long prot, struct sp_k2u_context *kc) { struct vm_area_struct *vma; unsigned long ret_addr; @@ -2598,21 +2871,26 @@ static unsigned long sp_remap_kva_to_vma(unsigned long kva, struct sp_area *spa, if (unlikely(mm->core_state)) { pr_err("k2u mmap: encountered coredump, abort\n"); ret_addr = -EBUSY; + if (kc) + kc->state = K2U_COREDUMP; goto put_mm; } - ret_addr = sp_mmap(mm, spa_file(spa), spa, &populate, prot); + if (kc && kc->sp_flags & SP_PROT_RO) + prot = PROT_READ; + + ret_addr = sp_mmap(mm, spa_file(spa), spa, &populate, prot, &vma); if (IS_ERR_VALUE(ret_addr)) { pr_debug("k2u mmap failed %lx\n", ret_addr); goto put_mm; } - BUG_ON(ret_addr != spa->va_start); - vma = find_vma(mm, ret_addr); - BUG_ON(vma == NULL); if (prot & PROT_WRITE) vma->vm_page_prot = __pgprot(((~PTE_RDONLY) & vma->vm_page_prot.pgprot) | PTE_DIRTY); + if (kc && kc->sp_flags & SP_PROT_RO) + vma->vm_flags &= ~VM_MAYWRITE; + if (is_vm_hugetlb_page(vma)) { ret = remap_vmalloc_hugepage_range(vma, (void *)kva, 0); if (ret) { @@ -2660,21 +2938,27 @@ static unsigned long sp_remap_kva_to_vma(unsigned long kva, struct sp_area *spa, */ static void *sp_make_share_kva_to_task(unsigned long kva, unsigned long size, unsigned long sp_flags) { + int ret; void *uva; struct sp_area *spa; struct spg_proc_stat *stat; unsigned long prot = PROT_READ | PROT_WRITE; + struct sp_k2u_context kc; + struct sp_group *spg; down_write(&sp_group_sem); - stat = sp_init_process_stat(current, current->mm, spg_none); - up_write(&sp_group_sem); - if (IS_ERR(stat)) { - pr_err_ratelimited("k2u_task init process stat failed %lx\n", - PTR_ERR(stat)); - return stat; + ret = sp_init_group_master_locked(current, current->mm); + if (ret) { + up_write(&sp_group_sem); + pr_err_ratelimited("k2u_task init local mapping failed %d\n", ret); + return ERR_PTR(ret); } - spa = sp_alloc_area(size, sp_flags, spg_none, SPA_TYPE_K2TASK, current->tgid); + spg = current->mm->sp_group_master->local; + stat = find_spg_proc_stat(current->mm->sp_group_master->stat, current->tgid, spg->id); + up_write(&sp_group_sem); + + spa = sp_alloc_area(size, sp_flags, spg, SPA_TYPE_K2TASK, current->tgid); if (IS_ERR(spa)) { pr_err_ratelimited("alloc spa failed in k2u_task (potential no enough virtual memory when -75): %ld\n", PTR_ERR(spa)); @@ -2682,8 +2966,8 @@ static void *sp_make_share_kva_to_task(unsigned long kva, unsigned long size, un } spa->kva = kva; - - uva = (void *)sp_remap_kva_to_vma(kva, spa, current->mm, prot); + kc.sp_flags = sp_flags; + uva = (void *)sp_remap_kva_to_vma(kva, spa, current->mm, prot, &kc); __sp_area_drop(spa); if (IS_ERR(uva)) pr_err("remap k2u to task failed %ld\n", PTR_ERR(uva)); @@ -2711,6 +2995,8 @@ static void *sp_make_share_kva_to_spg(unsigned long kva, unsigned long size, struct mm_struct *mm; struct sp_group_node *spg_node; void *uva = ERR_PTR(-ENODEV); + struct sp_k2u_context kc; + unsigned long ret_addr = -ENODEV; down_read(&spg->rw_lock); spa = sp_alloc_area(size, sp_flags, spg, SPA_TYPE_K2SPG, current->tgid); @@ -2722,15 +3008,20 @@ static void *sp_make_share_kva_to_spg(unsigned long kva, unsigned long size, } spa->kva = kva; - + kc.sp_flags = sp_flags; list_for_each_entry(spg_node, &spg->procs, proc_node) { mm = spg_node->master->mm; - uva = (void *)sp_remap_kva_to_vma(kva, spa, mm, spg_node->prot); - if (IS_ERR(uva)) { + kc.state = K2U_NORMAL; + ret_addr = sp_remap_kva_to_vma(kva, spa, mm, spg_node->prot, &kc); + if (IS_ERR_VALUE(ret_addr)) { + if (kc.state == K2U_COREDUMP) + continue; + uva = (void *)ret_addr; pr_err("remap k2u to spg failed %ld\n", PTR_ERR(uva)); __sp_free(spg, spa->va_start, spa_size(spa), mm); goto out; } + uva = (void *)ret_addr; } out: @@ -2755,18 +3046,6 @@ static bool vmalloc_area_set_flag(unsigned long kva, unsigned long flags) return false; } -struct sp_k2u_context { - unsigned long kva; - unsigned long kva_aligned; - unsigned long size; - unsigned long size_aligned; - unsigned long sp_flags; - int spg_id; - bool to_task; - struct timespec64 start; - struct timespec64 end; -}; - static void trace_sp_k2u_begin(struct sp_k2u_context *kc) { if (!sysctl_sp_perf_k2u) @@ -2803,10 +3082,11 @@ static int sp_k2u_prepare(unsigned long kva, unsigned long size, trace_sp_k2u_begin(kc); - if (sp_flags & ~SP_DVPP) { + if (sp_flags & ~SP_FLAG_MASK) { pr_err_ratelimited("k2u sp_flags %lx error\n", sp_flags); return -EINVAL; } + sp_flags &= ~SP_HUGEPAGE; if (!current->mm) { pr_err_ratelimited("k2u: kthread is not allowed\n"); @@ -2839,33 +3119,12 @@ static int sp_k2u_prepare(unsigned long kva, unsigned long size, kc->size_aligned = size_aligned; kc->sp_flags = sp_flags; kc->spg_id = spg_id; - kc->to_task = false; - return 0; -} - -static int sp_check_k2task(struct sp_k2u_context *kc) -{ - int ret = 0; - int spg_id = kc->spg_id; - - if (share_pool_group_mode == SINGLE_GROUP_MODE) { - struct sp_group *spg = get_first_group(current->mm); + if (spg_id == SPG_ID_DEFAULT || spg_id == SPG_ID_NONE) + kc->to_task = true; + else + kc->to_task = false; - if (!spg) { - if (spg_id != SPG_ID_NONE && spg_id != SPG_ID_DEFAULT) - ret = -EINVAL; - else - kc->to_task = true; - } else { - if (spg_id != SPG_ID_DEFAULT && spg_id != spg->id) - ret = -EINVAL; - sp_group_drop(spg); - } - } else { - if (spg_id == SPG_ID_DEFAULT || spg_id == SPG_ID_NONE) - kc->to_task = true; - } - return ret; + return 0; } static void *sp_k2u_finish(void *uva, struct sp_k2u_context *kc) @@ -2904,18 +3163,15 @@ void *sp_make_share_k2u(unsigned long kva, unsigned long size, int ret; struct sp_k2u_context kc; + if (!sp_is_enabled()) + return ERR_PTR(-EOPNOTSUPP); + check_interrupt_context(); ret = sp_k2u_prepare(kva, size, sp_flags, spg_id, &kc); if (ret) return ERR_PTR(ret); - ret = sp_check_k2task(&kc); - if (ret) { - uva = ERR_PTR(ret); - goto out; - } - if (kc.to_task) uva = sp_make_share_kva_to_task(kc.kva_aligned, kc.size_aligned, kc.sp_flags); else { @@ -2950,9 +3206,40 @@ EXPORT_SYMBOL_GPL(mg_sp_make_share_k2u); static int sp_pmd_entry(pmd_t *pmd, unsigned long addr, unsigned long next, struct mm_walk *walk) { + struct page *page; struct sp_walk_data *sp_walk_data = walk->private; + /* + * There exist a scene in DVPP where the pagetable is huge page but its + * vma doesn't record it, something like THP. + * So we cannot make out whether it is a hugepage map until we access the + * pmd here. If mixed size of pages appear, just return an error. + */ + if (pmd_huge(*pmd)) { + if (!sp_walk_data->is_page_type_set) { + sp_walk_data->is_page_type_set = true; + sp_walk_data->is_hugepage = true; + } else if (!sp_walk_data->is_hugepage) + return -EFAULT; + + /* To skip pte level walk */ + walk->action = ACTION_CONTINUE; + + page = pmd_page(*pmd); + get_page(page); + sp_walk_data->pages[sp_walk_data->page_count++] = page; + + return 0; + } + + if (!sp_walk_data->is_page_type_set) { + sp_walk_data->is_page_type_set = true; + sp_walk_data->is_hugepage = false; + } else if (sp_walk_data->is_hugepage) + return -EFAULT; + sp_walk_data->pmd = pmd; + return 0; } @@ -3096,6 +3383,8 @@ static int __sp_walk_page_range(unsigned long uva, unsigned long size, sp_walk.pmd_entry = sp_pmd_entry; } + sp_walk_data->is_page_type_set = false; + sp_walk_data->page_count = 0; sp_walk_data->page_size = page_size; uva_aligned = ALIGN_DOWN(uva, page_size); sp_walk_data->uva_aligned = uva_aligned; @@ -3120,8 +3409,12 @@ static int __sp_walk_page_range(unsigned long uva, unsigned long size, ret = walk_page_range(mm, uva_aligned, uva_aligned + size_aligned, &sp_walk, sp_walk_data); - if (ret) + if (ret) { + while (sp_walk_data->page_count--) + put_page(pages[sp_walk_data->page_count]); kvfree(pages); + sp_walk_data->pages = NULL; + } return ret; } @@ -3157,11 +3450,12 @@ void *sp_make_share_u2k(unsigned long uva, unsigned long size, int pid) int ret = 0; struct mm_struct *mm = current->mm; void *p = ERR_PTR(-ESRCH); - struct sp_walk_data sp_walk_data = { - .page_count = 0, - }; + struct sp_walk_data sp_walk_data; struct vm_struct *area; + if (!sp_is_enabled()) + return ERR_PTR(-EOPNOTSUPP); + check_interrupt_context(); if (mm == NULL) { @@ -3235,7 +3529,7 @@ EXPORT_SYMBOL_GPL(mg_sp_make_share_u2k); * * This also means we must trust DVPP channel destroy and guard worker code. */ -static int sp_unshare_uva(unsigned long uva, unsigned long size) +static int sp_unshare_uva(unsigned long uva, unsigned long size, int group_id) { int ret = 0; struct mm_struct *mm; @@ -3243,14 +3537,21 @@ static int sp_unshare_uva(unsigned long uva, unsigned long size) unsigned long uva_aligned; unsigned long size_aligned; unsigned int page_size; + struct sp_group *spg; + + spg = __sp_find_spg(current->tgid, group_id); + if (!spg) { + pr_debug("sp unshare find group failed %d\n", group_id); + return -EINVAL; + } /* * at first we guess it's a hugepage addr * we can tolerate at most PMD_SIZE or PAGE_SIZE which is matched in k2u */ - spa = __find_sp_area(ALIGN_DOWN(uva, PMD_SIZE)); + spa = __find_sp_area(spg, ALIGN_DOWN(uva, PMD_SIZE)); if (!spa) { - spa = __find_sp_area(ALIGN_DOWN(uva, PAGE_SIZE)); + spa = __find_sp_area(spg, ALIGN_DOWN(uva, PAGE_SIZE)); if (!spa) { ret = -EINVAL; pr_debug("invalid input uva %lx in unshare uva\n", (unsigned long)uva); @@ -3381,6 +3682,7 @@ static int sp_unshare_uva(unsigned long uva, unsigned long size) out_drop_area: __sp_area_drop(spa); out: + sp_group_drop(spg); return ret; } @@ -3442,11 +3744,17 @@ int sp_unshare(unsigned long va, unsigned long size, int pid, int spg_id) { int ret = 0; + if (!sp_is_enabled()) + return -EOPNOTSUPP; + check_interrupt_context(); + if (current->flags & PF_KTHREAD) + return -EINVAL; + if (va < TASK_SIZE) { /* user address */ - ret = sp_unshare_uva(va, size); + ret = sp_unshare_uva(va, size, spg_id); } else if (va >= PAGE_OFFSET) { /* kernel address */ ret = sp_unshare_kva(va, size); @@ -3460,9 +3768,9 @@ int sp_unshare(unsigned long va, unsigned long size, int pid, int spg_id) } EXPORT_SYMBOL_GPL(sp_unshare); -int mg_sp_unshare(unsigned long va, unsigned long size) +int mg_sp_unshare(unsigned long va, unsigned long size, int id) { - return sp_unshare(va, size, 0, 0); + return sp_unshare(va, size, 0, id); } EXPORT_SYMBOL_GPL(mg_sp_unshare); @@ -3484,6 +3792,9 @@ int sp_walk_page_range(unsigned long uva, unsigned long size, struct mm_struct *mm; int ret = 0; + if (!sp_is_enabled()) + return -EOPNOTSUPP; + check_interrupt_context(); if (unlikely(!sp_walk_data)) { @@ -3500,7 +3811,6 @@ int sp_walk_page_range(unsigned long uva, unsigned long size, return -ESRCH; } - sp_walk_data->page_count = 0; down_write(&mm->mmap_lock); if (likely(!mm->core_state)) ret = __sp_walk_page_range(uva, size, mm, sp_walk_data); @@ -3530,6 +3840,9 @@ EXPORT_SYMBOL_GPL(mg_sp_walk_page_range); */ void sp_walk_page_free(struct sp_walk_data *sp_walk_data) { + if (!sp_is_enabled()) + return; + check_interrupt_context(); if (!sp_walk_data) @@ -3571,16 +3884,53 @@ EXPORT_SYMBOL_GPL(sp_unregister_notifier); */ bool sp_config_dvpp_range(size_t start, size_t size, int device_id, int pid) { - if (pid < 0 || - size <= 0 || size > MMAP_SHARE_POOL_16G_SIZE || - device_id < 0 || device_id >= sp_device_number || - !is_online_node_id(device_id) || - is_sp_dev_addr_enabled(device_id)) + int ret; + bool err = false; + struct task_struct *tsk; + struct mm_struct *mm; + struct sp_group *spg; + struct sp_mapping *spm; + unsigned long default_start; + + if (!sp_is_enabled()) return false; - sp_dev_va_start[device_id] = start; - sp_dev_va_size[device_id] = size; - return true; + /* NOTE: check the start address */ + if (pid < 0 || size <= 0 || size > MMAP_SHARE_POOL_16G_SIZE || + device_id < 0 || device_id >= sp_device_number || !is_online_node_id(device_id)) + return false; + + ret = get_task(pid, &tsk); + if (ret) + return false; + + mm = get_task_mm(tsk->group_leader); + if (!mm) + goto put_task; + + spg = sp_get_local_group(tsk, mm); + if (IS_ERR(spg)) + goto put_mm; + + spm = spg->dvpp; + default_start = MMAP_SHARE_POOL_16G_START + device_id * MMAP_SHARE_POOL_16G_SIZE; + /* The dvpp range of each group can be configured only once */ + if (spm->start[device_id] != default_start) + goto put_spg; + + spm->start[device_id] = start; + spm->end[device_id] = start + size; + + err = true; + +put_spg: + sp_group_drop(spg); +put_mm: + mmput(mm); +put_task: + put_task_struct(tsk); + + return err; } EXPORT_SYMBOL_GPL(sp_config_dvpp_range); @@ -3605,7 +3955,8 @@ static bool is_sp_normal_addr(unsigned long addr) */ bool is_sharepool_addr(unsigned long addr) { - return is_sp_normal_addr(addr) || is_device_addr(addr); + return sp_is_enabled() && + (is_sp_normal_addr(addr) || is_device_addr(addr)); } EXPORT_SYMBOL_GPL(is_sharepool_addr); @@ -3623,12 +3974,9 @@ int sp_node_id(struct vm_area_struct *vma) if (!sp_is_enabled()) return node_id; - if (vma) { - spa = __find_sp_area(vma->vm_start); - if (spa) { - node_id = spa->node_id; - __sp_area_drop(spa); - } + if (vma && vma->vm_flags & VM_SHARE_POOL && vma->vm_private_data) { + spa = vma->vm_private_data; + node_id = spa->node_id; } return node_id; @@ -3648,13 +3996,6 @@ static int __init enable_share_k2u_to_group(char *s) } __setup("enable_sp_share_k2u_spg", enable_share_k2u_to_group); -static int __init enable_sp_multi_group_mode(char *s) -{ - share_pool_group_mode = MULTI_GROUP_MODE; - return 1; -} -__setup("enable_sp_multi_group_mode", enable_sp_multi_group_mode); - /*** Statistical and maintenance functions ***/ static void free_process_spg_proc_stat(struct sp_proc_stat *proc_stat) @@ -3688,7 +4029,7 @@ static void free_sp_proc_stat(struct sp_proc_stat *stat) } /* the caller make sure stat is not NULL */ -void sp_proc_stat_drop(struct sp_proc_stat *stat) +static void sp_proc_stat_drop(struct sp_proc_stat *stat) { if (atomic_dec_and_test(&stat->use_count)) free_sp_proc_stat(stat); @@ -3797,7 +4138,7 @@ static void print_process_prot(struct seq_file *seq, unsigned long prot) seq_puts(seq, "R"); else if (prot == (PROT_READ | PROT_WRITE)) seq_puts(seq, "RW"); - else /* e.g. spg_none */ + else seq_puts(seq, "-"); } @@ -3812,6 +4153,9 @@ int proc_sp_group_state(struct seq_file *m, struct pid_namespace *ns, unsigned long anon, file, shmem, total_rss, prot; long sp_res, sp_res_nsize, non_sp_res, non_sp_shm; + if (!sp_is_enabled()) + return 0; + if (!mm) return 0; @@ -3859,14 +4203,13 @@ int proc_sp_group_state(struct seq_file *m, struct pid_namespace *ns, return 0; } -static void rb_spa_stat_show(struct seq_file *seq) +static void spa_stat_of_mapping_show(struct seq_file *seq, struct sp_mapping *spm) { struct rb_node *node; struct sp_area *spa, *prev = NULL; spin_lock(&sp_area_lock); - - for (node = rb_first(&sp_area_root); node; node = rb_next(node)) { + for (node = rb_first(&spm->area_root); node; node = rb_next(node)) { __sp_area_drop_locked(prev); spa = rb_entry(node, struct sp_area, rb_node); @@ -3874,16 +4217,12 @@ static void rb_spa_stat_show(struct seq_file *seq) atomic_inc(&spa->use_count); spin_unlock(&sp_area_lock); - if (spa->spg == spg_none) /* k2u to task */ - seq_printf(seq, "%-10s ", "None"); - else { - down_read(&spa->spg->rw_lock); - if (spg_valid(spa->spg)) /* k2u to group */ - seq_printf(seq, "%-10d ", spa->spg->id); - else /* spg is dead */ - seq_printf(seq, "%-10s ", "Dead"); - up_read(&spa->spg->rw_lock); - } + down_read(&spa->spg->rw_lock); + if (spg_valid(spa->spg)) /* k2u to group */ + seq_printf(seq, "%-10d ", spa->spg->id); + else /* spg is dead */ + seq_printf(seq, "%-10s ", "Dead"); + up_read(&spa->spg->rw_lock); seq_printf(seq, "%2s%-14lx %2s%-14lx %-10ld ", "0x", spa->va_start, @@ -3919,6 +4258,30 @@ static void rb_spa_stat_show(struct seq_file *seq) spin_unlock(&sp_area_lock); } +static void spa_normal_stat_show(struct seq_file *seq) +{ + spa_stat_of_mapping_show(seq, sp_mapping_normal); +} + +static int idr_spg_dvpp_stat_show_cb(int id, void *p, void *data) +{ + struct sp_group *spg = p; + struct seq_file *seq = data; + + if (!is_local_group(spg->id) || atomic_read(&spg->dvpp->user) == 1) + spa_stat_of_mapping_show(seq, spg->dvpp); + + return 0; +} + +static void spa_dvpp_stat_show(struct seq_file *seq) +{ + down_read(&sp_group_sem); + idr_for_each(&sp_group_idr, idr_spg_dvpp_stat_show_cb, seq); + up_read(&sp_group_sem); +} + + void spa_overview_show(struct seq_file *seq) { unsigned int total_num, alloc_num, k2u_task_num, k2u_spg_num; @@ -3972,12 +4335,11 @@ static int idr_spg_stat_cb(int id, void *p, void *data) struct sp_spg_stat *s = p; struct seq_file *seq = data; - if (seq != NULL) { - if (id == 0) - seq_puts(seq, "Non Group "); - else - seq_printf(seq, "Group %6d ", id); + if (is_local_group(id) && atomic64_read(&s->size) == 0) + return 0; + if (seq != NULL) { + seq_printf(seq, "Group %6d ", id); seq_printf(seq, "size: %lld KB, spa num: %d, total alloc: %lld KB, normal alloc: %lld KB, huge alloc: %lld KB\n", byte2kb(atomic64_read(&s->size)), atomic_read(&s->spa_num), @@ -3985,11 +4347,7 @@ static int idr_spg_stat_cb(int id, void *p, void *data) byte2kb(atomic64_read(&s->alloc_nsize)), byte2kb(atomic64_read(&s->alloc_hsize))); } else { - if (id == 0) - pr_info("Non Group "); - else - pr_info("Group %6d ", id); - + pr_info("Group %6d ", id); pr_info("size: %lld KB, spa num: %d, total alloc: %lld KB, normal alloc: %lld KB, huge alloc: %lld KB\n", byte2kb(atomic64_read(&s->size)), atomic_read(&s->spa_num), @@ -4016,9 +4374,9 @@ void spg_overview_show(struct seq_file *seq) atomic_read(&sp_overall_stat.spa_total_num)); } - down_read(&sp_group_sem); + down_read(&sp_spg_stat_sem); idr_for_each(&sp_spg_stat_idr, idr_spg_stat_cb, seq); - up_read(&sp_group_sem); + up_read(&sp_spg_stat_sem); if (seq != NULL) seq_puts(seq, "\n"); @@ -4033,7 +4391,8 @@ static int spa_stat_show(struct seq_file *seq, void *offset) /* print the file header */ seq_printf(seq, "%-10s %-16s %-16s %-10s %-7s %-5s %-8s %-8s\n", "Group ID", "va_start", "va_end", "Size(KB)", "Type", "Huge", "PID", "Ref"); - rb_spa_stat_show(seq); + spa_normal_stat_show(seq); + spa_dvpp_stat_show(seq); return 0; } @@ -4057,7 +4416,6 @@ static int idr_proc_stat_cb(int id, void *p, void *data) long sp_res, sp_res_nsize, non_sp_res, non_sp_shm; /* to prevent ABBA deadlock, first hold sp_group_sem */ - down_read(&sp_group_sem); mutex_lock(&spg_stat->lock); hash_for_each(spg_stat->hash, i, spg_proc_stat, gnode) { proc_stat = spg_proc_stat->proc_stat; @@ -4071,10 +4429,7 @@ static int idr_proc_stat_cb(int id, void *p, void *data) prot = get_process_prot_locked(id, mm); seq_printf(seq, "%-8d ", tgid); - if (id == 0) - seq_printf(seq, "%-8c ", '-'); - else - seq_printf(seq, "%-8d ", id); + seq_printf(seq, "%-8d ", id); seq_printf(seq, "%-9ld %-9ld %-9ld %-10ld %-10ld %-8ld %-7ld %-7ld %-10ld ", get_spg_proc_alloc(spg_proc_stat), get_spg_proc_k2u(spg_proc_stat), @@ -4086,7 +4441,6 @@ static int idr_proc_stat_cb(int id, void *p, void *data) seq_putc(seq, '\n'); } mutex_unlock(&spg_stat->lock); - up_read(&sp_group_sem); return 0; } @@ -4104,10 +4458,16 @@ static int proc_stat_show(struct seq_file *seq, void *offset) byte2kb(atomic64_read(&kthread_stat.alloc_size)), byte2kb(atomic64_read(&kthread_stat.k2u_size))); - /* pay attention to potential ABBA deadlock */ + /* + * This ugly code is just for fixing the ABBA deadlock against + * sp_group_add_task. + */ + down_read(&sp_group_sem); down_read(&sp_spg_stat_sem); idr_for_each(&sp_spg_stat_idr, idr_proc_stat_cb, seq); up_read(&sp_spg_stat_sem); + up_read(&sp_group_sem); + return 0; } @@ -4194,13 +4554,12 @@ vm_fault_t sharepool_no_page(struct mm_struct *mm, int node_id; struct sp_area *spa; - spa = __find_sp_area(vma->vm_start); + spa = vma->vm_private_data; if (!spa) { pr_err("share pool: vma is invalid, not from sp mmap\n"); return ret; } node_id = spa->node_id; - __sp_area_drop(spa); retry: page = find_lock_page(mapping, idx); @@ -4391,13 +4750,15 @@ void sp_group_post_exit(struct mm_struct *mm) sp_proc_stat_drop(stat); } - /* lockless traverse */ + down_write(&sp_group_sem); list_for_each_entry_safe(spg_node, tmp, &master->node_list, group_node) { spg = spg_node->spg; /* match with refcount inc in sp_group_add_task */ - sp_group_drop(spg); + if (atomic_dec_and_test(&spg->use_count)) + free_sp_group_locked(spg); kfree(spg_node); } + up_write(&sp_group_sem); kfree(master); } @@ -4427,11 +4788,13 @@ static void __init sp_device_number_detect(void) static int __init share_pool_init(void) { - /* lockless, as init kthread has no sp operation else */ - spg_none = create_spg(GROUP_NONE); - /* without free spg_none, not a serious problem */ - if (IS_ERR(spg_none) || !spg_none) + if (!sp_is_enabled()) + return 0; + + sp_mapping_normal = sp_mapping_create(SP_MAPPING_NORMAL); + if (IS_ERR(sp_mapping_normal)) goto fail; + atomic_inc(&sp_mapping_normal->user); sp_device_number_detect(); proc_sharepool_init(); diff --git a/mm/shmem.c b/mm/shmem.c index 746e48454cb8ab1094e83afac95f0861837c0f75..ad2d68150ed2f4d8f165c11e05ba0129df73ea01 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -556,7 +556,7 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo, struct shmem_inode_info *info; struct page *page; unsigned long batch = sc ? sc->nr_to_scan : 128; - int removed = 0, split = 0; + int split = 0; if (list_empty(&sbinfo->shrinklist)) return SHRINK_STOP; @@ -571,7 +571,6 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo, /* inode is about to be evicted */ if (!inode) { list_del_init(&info->shrinklist); - removed++; goto next; } @@ -579,12 +578,12 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo, if (round_up(inode->i_size, PAGE_SIZE) == round_up(inode->i_size, HPAGE_PMD_SIZE)) { list_move(&info->shrinklist, &to_remove); - removed++; goto next; } list_move(&info->shrinklist, &list); next: + sbinfo->shrinklist_len--; if (!--batch) break; } @@ -604,7 +603,7 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo, inode = &info->vfs_inode; if (nr_to_split && split >= nr_to_split) - goto leave; + goto move_back; page = find_get_page(inode->i_mapping, (inode->i_size & HPAGE_PMD_MASK) >> PAGE_SHIFT); @@ -618,38 +617,44 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo, } /* - * Leave the inode on the list if we failed to lock - * the page at this time. + * Move the inode on the list back to shrinklist if we failed + * to lock the page at this time. * * Waiting for the lock may lead to deadlock in the * reclaim path. */ if (!trylock_page(page)) { put_page(page); - goto leave; + goto move_back; } ret = split_huge_page(page); unlock_page(page); put_page(page); - /* If split failed leave the inode on the list */ + /* If split failed move the inode on the list back to shrinklist */ if (ret) - goto leave; + goto move_back; split++; drop: list_del_init(&info->shrinklist); - removed++; -leave: + goto put; +move_back: + /* + * Make sure the inode is either on the global list or deleted + * from any local list before iput() since it could be deleted + * in another thread once we put the inode (then the local list + * is corrupted). + */ + spin_lock(&sbinfo->shrinklist_lock); + list_move(&info->shrinklist, &sbinfo->shrinklist); + sbinfo->shrinklist_len++; + spin_unlock(&sbinfo->shrinklist_lock); +put: iput(inode); } - spin_lock(&sbinfo->shrinklist_lock); - list_splice_tail(&list, &sbinfo->shrinklist); - sbinfo->shrinklist_len -= removed; - spin_unlock(&sbinfo->shrinklist_lock); - return split; } @@ -934,6 +939,9 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend, if (lend == -1) end = -1; /* unsigned, so actually very big */ + if (info->fallocend > start && info->fallocend <= end && !unfalloc) + info->fallocend = start; + pagevec_init(&pvec); index = start; while (index < end && find_lock_entries(mapping, index, end - 1, @@ -1085,7 +1093,6 @@ static int shmem_setattr(struct dentry *dentry, struct iattr *attr) { struct inode *inode = d_inode(dentry); struct shmem_inode_info *info = SHMEM_I(inode); - struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); int error; error = setattr_prepare(dentry, attr); @@ -1121,24 +1128,6 @@ static int shmem_setattr(struct dentry *dentry, struct iattr *attr) if (oldsize > holebegin) unmap_mapping_range(inode->i_mapping, holebegin, 0, 1); - - /* - * Part of the huge page can be beyond i_size: subject - * to shrink under memory pressure. - */ - if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) { - spin_lock(&sbinfo->shrinklist_lock); - /* - * _careful to defend against unlocked access to - * ->shrink_list in shmem_unused_huge_shrink() - */ - if (list_empty_careful(&info->shrinklist)) { - list_add_tail(&info->shrinklist, - &sbinfo->shrinklist); - sbinfo->shrinklist_len++; - } - spin_unlock(&sbinfo->shrinklist_lock); - } } } @@ -2714,7 +2703,7 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset, struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); struct shmem_inode_info *info = SHMEM_I(inode); struct shmem_falloc shmem_falloc; - pgoff_t start, index, end; + pgoff_t start, index, end, undo_fallocend; int error; if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) @@ -2783,7 +2772,16 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset, inode->i_private = &shmem_falloc; spin_unlock(&inode->i_lock); - for (index = start; index < end; index++) { + /* + * info->fallocend is only relevant when huge pages might be + * involved: to prevent split_huge_page() freeing fallocated + * pages when FALLOC_FL_KEEP_SIZE committed beyond i_size. + */ + undo_fallocend = info->fallocend; + if (info->fallocend < end) + info->fallocend = end; + + for (index = start; index < end; ) { struct page *page; /* @@ -2797,6 +2795,7 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset, else error = shmem_getpage(inode, index, &page, SGP_FALLOC); if (error) { + info->fallocend = undo_fallocend; /* Remove the !PageUptodate pages we added */ if (index > start) { shmem_undo_range(inode, @@ -2806,13 +2805,26 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset, goto undone; } + index++; + /* + * Here is a more important optimization than it appears: + * a second SGP_FALLOC on the same huge page will clear it, + * making it PageUptodate and un-undoable if we fail later. + */ + if (PageTransCompound(page)) { + index = round_up(index, HPAGE_PMD_NR); + /* Beware 32-bit wraparound */ + if (!index) + index--; + } + /* * Inform shmem_writepage() how far we have reached. * No need for lock or barrier: we have the page lock. */ - shmem_falloc.next++; if (!PageUptodate(page)) - shmem_falloc.nr_falloced++; + shmem_falloc.nr_falloced += index - shmem_falloc.next; + shmem_falloc.next = index; /* * If !PageUptodate, leave it that way so that freeable pages @@ -2857,6 +2869,9 @@ static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf) buf->f_ffree = sbinfo->free_inodes; } /* else leave those fields 0 like simple_statfs */ + + buf->f_fsid = uuid_to_fsid(dentry->d_sb->s_uuid.b); + return 0; } @@ -3417,7 +3432,7 @@ static int shmem_parse_one(struct fs_context *fc, struct fs_parameter *param) break; case Opt_nr_blocks: ctx->blocks = memparse(param->string, &rest); - if (*rest) + if (*rest || ctx->blocks > S64_MAX) goto bad_value; ctx->seen |= SHMEM_SEEN_BLOCKS; break; @@ -3538,6 +3553,7 @@ static int shmem_reconfigure(struct fs_context *fc) spin_lock(&sbinfo->stat_lock); inodes = sbinfo->max_inodes - sbinfo->free_inodes; + if ((ctx->seen & SHMEM_SEEN_BLOCKS) && ctx->blocks) { if (!sbinfo->max_blocks) { err = "Cannot retroactively limit size"; diff --git a/mm/slab.c b/mm/slab.c index d152f910da267d3fb6b61f835ae2cd9bd74fc00d..ae84578f3fdea5075d5f6b19ce776c446cbd1bcc 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -3428,6 +3428,7 @@ static __always_inline void __cache_free(struct kmem_cache *cachep, void *objp, { if (is_kfence_address(objp)) { kmemleak_free_recursive(objp, cachep->flags); + memcg_slab_free_hook(cachep, &objp, 1); __kfence_free(objp); return; } diff --git a/mm/slub.c b/mm/slub.c index 7a7b0bf82b8eb71c1762331c50beff70110324b7..98452815a066c966b5c41f2511779cdb91f1fd12 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -3209,7 +3209,9 @@ static inline void free_nonslab_page(struct page *page, void *object) { unsigned int order = compound_order(page); - VM_BUG_ON_PAGE(!PageCompound(page), page); + if (WARN_ON_ONCE(!PageCompound(page))) + pr_warn_once("object pointer: 0x%p\n", object); + kfree_hook(object); mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B, -(PAGE_SIZE << order)); __free_pages(page, order); diff --git a/mm/swap_state.c b/mm/swap_state.c index a9e42d48312be0bdbd0e0fa400f1e752bfd4b142..149f4678106179117d8571621ac70d278e30008a 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -502,7 +502,7 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, * __read_swap_cache_async(), which has set SWAP_HAS_CACHE * in swap_map, but not yet added its page to swap cache. */ - cond_resched(); + schedule_timeout_uninterruptible(1); } /* diff --git a/mm/swapfile.c b/mm/swapfile.c index 5af6b0f770de626c8ab644563c01e8f3081c6aee..eaf483c7c83e7691297d12b818bfe20ffa1104e8 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -3167,6 +3167,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) struct filename *name; struct file *swap_file = NULL; struct address_space *mapping; + struct dentry *dentry; int prio; int error; union swap_header *swap_header; @@ -3210,6 +3211,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) p->swap_file = swap_file; mapping = swap_file->f_mapping; + dentry = swap_file->f_path.dentry; inode = mapping->host; error = claim_swapfile(p, inode); @@ -3217,6 +3219,10 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) goto bad_swap; inode_lock(inode); + if (d_unlinked(dentry) || cant_mount(dentry)) { + error = -ENOENT; + goto bad_swap_unlock_inode; + } if (IS_SWAPFILE(inode)) { error = -EBUSY; goto bad_swap_unlock_inode; diff --git a/mm/usercopy.c b/mm/usercopy.c index ce83e0b137dd3f9292842d995aa5d0e8dd27e581..1f0dcff7bd23cc910b1b2481bfa35803a89ac8e8 100644 --- a/mm/usercopy.c +++ b/mm/usercopy.c @@ -296,7 +296,10 @@ static bool enable_checks __initdata = true; static int __init parse_hardened_usercopy(char *str) { - return strtobool(str, &enable_checks); + if (strtobool(str, &enable_checks)) + pr_warn("Invalid option string for hardened_usercopy: '%s'\n", + str); + return 1; } __setup("hardened_usercopy=", parse_hardened_usercopy); diff --git a/mm/util.c b/mm/util.c index d31820abadb4b6fe37a2b6397bedc1907622dcfa..67b350f4ffdc5f9e145ed27babea34c4b8b77821 100644 --- a/mm/util.c +++ b/mm/util.c @@ -587,8 +587,10 @@ void *kvmalloc_node(size_t size, gfp_t flags, int node) return ret; /* Don't even allow crazy sizes */ - if (WARN_ON_ONCE(size > INT_MAX)) + if (unlikely(size > INT_MAX)) { + WARN_ON_ONCE(!(flags & __GFP_NOWARN)); return NULL; + } return __vmalloc_node(size, 1, flags, node, __builtin_return_address(0)); diff --git a/net/9p/trans_xen.c b/net/9p/trans_xen.c index 3ec1a51a6944ea0d322b7c0b3cccb8b8cc616f05..432ac5a16f2e04243aa88eac3ab309558ef28313 100644 --- a/net/9p/trans_xen.c +++ b/net/9p/trans_xen.c @@ -304,9 +304,9 @@ static void xen_9pfs_front_free(struct xen_9pfs_front_priv *priv) ref = priv->rings[i].intf->ref[j]; gnttab_end_foreign_access(ref, 0, 0); } - free_pages((unsigned long)priv->rings[i].data.in, - priv->rings[i].intf->ring_order - - (PAGE_SHIFT - XEN_PAGE_SHIFT)); + free_pages_exact(priv->rings[i].data.in, + 1UL << (priv->rings[i].intf->ring_order + + XEN_PAGE_SHIFT)); } gnttab_end_foreign_access(priv->rings[i].ref, 0, 0); free_page((unsigned long)priv->rings[i].intf); @@ -345,8 +345,8 @@ static int xen_9pfs_front_alloc_dataring(struct xenbus_device *dev, if (ret < 0) goto out; ring->ref = ret; - bytes = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, - order - (PAGE_SHIFT - XEN_PAGE_SHIFT)); + bytes = alloc_pages_exact(1UL << (order + XEN_PAGE_SHIFT), + GFP_KERNEL | __GFP_ZERO); if (!bytes) { ret = -ENOMEM; goto out; @@ -377,9 +377,7 @@ static int xen_9pfs_front_alloc_dataring(struct xenbus_device *dev, if (bytes) { for (i--; i >= 0; i--) gnttab_end_foreign_access(ring->intf->ref[i], 0, 0); - free_pages((unsigned long)bytes, - ring->intf->ring_order - - (PAGE_SHIFT - XEN_PAGE_SHIFT)); + free_pages_exact(bytes, 1UL << (order + XEN_PAGE_SHIFT)); } gnttab_end_foreign_access(ring->ref, 0, 0); free_page((unsigned long)ring->intf); diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c index 22278807b3f36dd4519ed39d916b460c50cfc498..5fff027f25fad2c1021f2c14e4746d79123119f6 100644 --- a/net/ax25/af_ax25.c +++ b/net/ax25/af_ax25.c @@ -77,6 +77,7 @@ static void ax25_kill_by_device(struct net_device *dev) { ax25_dev *ax25_dev; ax25_cb *s; + struct sock *sk; if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL) return; @@ -85,13 +86,26 @@ static void ax25_kill_by_device(struct net_device *dev) again: ax25_for_each(s, &ax25_list) { if (s->ax25_dev == ax25_dev) { + sk = s->sk; + if (!sk) { + spin_unlock_bh(&ax25_list_lock); + ax25_disconnect(s, ENETUNREACH); + s->ax25_dev = NULL; + spin_lock_bh(&ax25_list_lock); + goto again; + } + sock_hold(sk); spin_unlock_bh(&ax25_list_lock); - lock_sock(s->sk); - s->ax25_dev = NULL; - release_sock(s->sk); + lock_sock(sk); ax25_disconnect(s, ENETUNREACH); + s->ax25_dev = NULL; + if (sk->sk_socket) { + dev_put(ax25_dev->dev); + ax25_dev_put(ax25_dev); + } + release_sock(sk); spin_lock_bh(&ax25_list_lock); - + sock_put(sk); /* The entry could have been deleted from the * list meanwhile and thus the next pointer is * no longer valid. Play it safe and restart @@ -355,21 +369,25 @@ static int ax25_ctl_ioctl(const unsigned int cmd, void __user *arg) if (copy_from_user(&ax25_ctl, arg, sizeof(ax25_ctl))) return -EFAULT; - if ((ax25_dev = ax25_addr_ax25dev(&ax25_ctl.port_addr)) == NULL) - return -ENODEV; - if (ax25_ctl.digi_count > AX25_MAX_DIGIS) return -EINVAL; if (ax25_ctl.arg > ULONG_MAX / HZ && ax25_ctl.cmd != AX25_KILL) return -EINVAL; + ax25_dev = ax25_addr_ax25dev(&ax25_ctl.port_addr); + if (!ax25_dev) + return -ENODEV; + digi.ndigi = ax25_ctl.digi_count; for (k = 0; k < digi.ndigi; k++) digi.calls[k] = ax25_ctl.digi_addr[k]; - if ((ax25 = ax25_find_cb(&ax25_ctl.source_addr, &ax25_ctl.dest_addr, &digi, ax25_dev->dev)) == NULL) + ax25 = ax25_find_cb(&ax25_ctl.source_addr, &ax25_ctl.dest_addr, &digi, ax25_dev->dev); + if (!ax25) { + ax25_dev_put(ax25_dev); return -ENOTCONN; + } switch (ax25_ctl.cmd) { case AX25_KILL: @@ -436,6 +454,7 @@ static int ax25_ctl_ioctl(const unsigned int cmd, void __user *arg) } out_put: + ax25_dev_put(ax25_dev); ax25_cb_put(ax25); return ret; @@ -536,7 +555,7 @@ static int ax25_setsockopt(struct socket *sock, int level, int optname, ax25_cb *ax25; struct net_device *dev; char devname[IFNAMSIZ]; - unsigned long opt; + unsigned int opt; int res = 0; if (level != SOL_AX25) @@ -568,7 +587,7 @@ static int ax25_setsockopt(struct socket *sock, int level, int optname, break; case AX25_T1: - if (opt < 1 || opt > ULONG_MAX / HZ) { + if (opt < 1 || opt > UINT_MAX / HZ) { res = -EINVAL; break; } @@ -577,7 +596,7 @@ static int ax25_setsockopt(struct socket *sock, int level, int optname, break; case AX25_T2: - if (opt < 1 || opt > ULONG_MAX / HZ) { + if (opt < 1 || opt > UINT_MAX / HZ) { res = -EINVAL; break; } @@ -593,7 +612,7 @@ static int ax25_setsockopt(struct socket *sock, int level, int optname, break; case AX25_T3: - if (opt < 1 || opt > ULONG_MAX / HZ) { + if (opt < 1 || opt > UINT_MAX / HZ) { res = -EINVAL; break; } @@ -601,7 +620,7 @@ static int ax25_setsockopt(struct socket *sock, int level, int optname, break; case AX25_IDLE: - if (opt > ULONG_MAX / (60 * HZ)) { + if (opt > UINT_MAX / (60 * HZ)) { res = -EINVAL; break; } @@ -961,14 +980,16 @@ static int ax25_release(struct socket *sock) { struct sock *sk = sock->sk; ax25_cb *ax25; + ax25_dev *ax25_dev; if (sk == NULL) return 0; sock_hold(sk); - sock_orphan(sk); lock_sock(sk); + sock_orphan(sk); ax25 = sk_to_ax25(sk); + ax25_dev = ax25->ax25_dev; if (sk->sk_type == SOCK_SEQPACKET) { switch (ax25->state) { @@ -1030,6 +1051,15 @@ static int ax25_release(struct socket *sock) sk->sk_state_change(sk); ax25_destroy_socket(ax25); } + if (ax25_dev) { + del_timer_sync(&ax25->timer); + del_timer_sync(&ax25->t1timer); + del_timer_sync(&ax25->t2timer); + del_timer_sync(&ax25->t3timer); + del_timer_sync(&ax25->idletimer); + dev_put(ax25_dev->dev); + ax25_dev_put(ax25_dev); + } sock->sk = NULL; release_sock(sk); @@ -1106,8 +1136,10 @@ static int ax25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) } } - if (ax25_dev != NULL) + if (ax25_dev) { ax25_fillin_cb(ax25, ax25_dev); + dev_hold(ax25_dev->dev); + } done: ax25_cb_add(ax25); diff --git a/net/ax25/ax25_dev.c b/net/ax25/ax25_dev.c index 4ac2e0847652a9426b5acf2ebc618520fbaf1751..d2e0cc67d91a72470c2f865688d3442c9d12e240 100644 --- a/net/ax25/ax25_dev.c +++ b/net/ax25/ax25_dev.c @@ -37,6 +37,7 @@ ax25_dev *ax25_addr_ax25dev(ax25_address *addr) for (ax25_dev = ax25_dev_list; ax25_dev != NULL; ax25_dev = ax25_dev->next) if (ax25cmp(addr, (ax25_address *)ax25_dev->dev->dev_addr) == 0) { res = ax25_dev; + ax25_dev_hold(ax25_dev); } spin_unlock_bh(&ax25_dev_lock); @@ -56,6 +57,7 @@ void ax25_dev_device_up(struct net_device *dev) return; } + refcount_set(&ax25_dev->refcount, 1); dev->ax25_ptr = ax25_dev; ax25_dev->dev = dev; dev_hold(dev); @@ -84,6 +86,7 @@ void ax25_dev_device_up(struct net_device *dev) ax25_dev->next = ax25_dev_list; ax25_dev_list = ax25_dev; spin_unlock_bh(&ax25_dev_lock); + ax25_dev_hold(ax25_dev); ax25_register_dev_sysctl(ax25_dev); } @@ -113,9 +116,10 @@ void ax25_dev_device_down(struct net_device *dev) if ((s = ax25_dev_list) == ax25_dev) { ax25_dev_list = s->next; spin_unlock_bh(&ax25_dev_lock); + ax25_dev_put(ax25_dev); dev->ax25_ptr = NULL; dev_put(dev); - kfree(ax25_dev); + ax25_dev_put(ax25_dev); return; } @@ -123,9 +127,10 @@ void ax25_dev_device_down(struct net_device *dev) if (s->next == ax25_dev) { s->next = ax25_dev->next; spin_unlock_bh(&ax25_dev_lock); + ax25_dev_put(ax25_dev); dev->ax25_ptr = NULL; dev_put(dev); - kfree(ax25_dev); + ax25_dev_put(ax25_dev); return; } @@ -133,6 +138,7 @@ void ax25_dev_device_down(struct net_device *dev) } spin_unlock_bh(&ax25_dev_lock); dev->ax25_ptr = NULL; + ax25_dev_put(ax25_dev); } int ax25_fwd_ioctl(unsigned int cmd, struct ax25_fwd_struct *fwd) @@ -144,20 +150,32 @@ int ax25_fwd_ioctl(unsigned int cmd, struct ax25_fwd_struct *fwd) switch (cmd) { case SIOCAX25ADDFWD: - if ((fwd_dev = ax25_addr_ax25dev(&fwd->port_to)) == NULL) + fwd_dev = ax25_addr_ax25dev(&fwd->port_to); + if (!fwd_dev) { + ax25_dev_put(ax25_dev); return -EINVAL; - if (ax25_dev->forward != NULL) + } + if (ax25_dev->forward) { + ax25_dev_put(fwd_dev); + ax25_dev_put(ax25_dev); return -EINVAL; + } ax25_dev->forward = fwd_dev->dev; + ax25_dev_put(fwd_dev); + ax25_dev_put(ax25_dev); break; case SIOCAX25DELFWD: - if (ax25_dev->forward == NULL) + if (!ax25_dev->forward) { + ax25_dev_put(ax25_dev); return -EINVAL; + } ax25_dev->forward = NULL; + ax25_dev_put(ax25_dev); break; default: + ax25_dev_put(ax25_dev); return -EINVAL; } diff --git a/net/ax25/ax25_route.c b/net/ax25/ax25_route.c index b40e0bce67ead7d1dd36f435aa51bb9c53fa0e19..dc2168d2a32a9e4bcea055a8df49424530f988c2 100644 --- a/net/ax25/ax25_route.c +++ b/net/ax25/ax25_route.c @@ -75,11 +75,13 @@ static int __must_check ax25_rt_add(struct ax25_routes_struct *route) ax25_dev *ax25_dev; int i; - if ((ax25_dev = ax25_addr_ax25dev(&route->port_addr)) == NULL) - return -EINVAL; if (route->digi_count > AX25_MAX_DIGIS) return -EINVAL; + ax25_dev = ax25_addr_ax25dev(&route->port_addr); + if (!ax25_dev) + return -EINVAL; + write_lock_bh(&ax25_route_lock); ax25_rt = ax25_route_list; @@ -91,6 +93,7 @@ static int __must_check ax25_rt_add(struct ax25_routes_struct *route) if (route->digi_count != 0) { if ((ax25_rt->digipeat = kmalloc(sizeof(ax25_digi), GFP_ATOMIC)) == NULL) { write_unlock_bh(&ax25_route_lock); + ax25_dev_put(ax25_dev); return -ENOMEM; } ax25_rt->digipeat->lastrepeat = -1; @@ -101,6 +104,7 @@ static int __must_check ax25_rt_add(struct ax25_routes_struct *route) } } write_unlock_bh(&ax25_route_lock); + ax25_dev_put(ax25_dev); return 0; } ax25_rt = ax25_rt->next; @@ -108,6 +112,7 @@ static int __must_check ax25_rt_add(struct ax25_routes_struct *route) if ((ax25_rt = kmalloc(sizeof(ax25_route), GFP_ATOMIC)) == NULL) { write_unlock_bh(&ax25_route_lock); + ax25_dev_put(ax25_dev); return -ENOMEM; } @@ -120,6 +125,7 @@ static int __must_check ax25_rt_add(struct ax25_routes_struct *route) if ((ax25_rt->digipeat = kmalloc(sizeof(ax25_digi), GFP_ATOMIC)) == NULL) { write_unlock_bh(&ax25_route_lock); kfree(ax25_rt); + ax25_dev_put(ax25_dev); return -ENOMEM; } ax25_rt->digipeat->lastrepeat = -1; @@ -132,6 +138,7 @@ static int __must_check ax25_rt_add(struct ax25_routes_struct *route) ax25_rt->next = ax25_route_list; ax25_route_list = ax25_rt; write_unlock_bh(&ax25_route_lock); + ax25_dev_put(ax25_dev); return 0; } @@ -173,6 +180,7 @@ static int ax25_rt_del(struct ax25_routes_struct *route) } } write_unlock_bh(&ax25_route_lock); + ax25_dev_put(ax25_dev); return 0; } @@ -215,6 +223,7 @@ static int ax25_rt_opt(struct ax25_route_opt_struct *rt_option) out: write_unlock_bh(&ax25_route_lock); + ax25_dev_put(ax25_dev); return err; } diff --git a/net/ax25/ax25_subr.c b/net/ax25/ax25_subr.c index 15ab812c4fe4b29cfdec023ddeadd7f45bd77e1a..3a476e4f6cd0b3f82836104c6119d99a7e795d6d 100644 --- a/net/ax25/ax25_subr.c +++ b/net/ax25/ax25_subr.c @@ -261,12 +261,20 @@ void ax25_disconnect(ax25_cb *ax25, int reason) { ax25_clear_queues(ax25); - if (!ax25->sk || !sock_flag(ax25->sk, SOCK_DESTROY)) - ax25_stop_heartbeat(ax25); - ax25_stop_t1timer(ax25); - ax25_stop_t2timer(ax25); - ax25_stop_t3timer(ax25); - ax25_stop_idletimer(ax25); + if (reason == ENETUNREACH) { + del_timer_sync(&ax25->timer); + del_timer_sync(&ax25->t1timer); + del_timer_sync(&ax25->t2timer); + del_timer_sync(&ax25->t3timer); + del_timer_sync(&ax25->idletimer); + } else { + if (!ax25->sk || !sock_flag(ax25->sk, SOCK_DESTROY)) + ax25_stop_heartbeat(ax25); + ax25_stop_t1timer(ax25); + ax25_stop_t2timer(ax25); + ax25_stop_t3timer(ax25); + ax25_stop_idletimer(ax25); + } ax25->state = AX25_STATE_0; diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c index ee9cead7654502cf5f9b3db1de7870c88babd46d..986f707e7d973a6a545a73951ae1c8163b4574ca 100644 --- a/net/batman-adv/bridge_loop_avoidance.c +++ b/net/batman-adv/bridge_loop_avoidance.c @@ -164,6 +164,9 @@ static void batadv_backbone_gw_release(struct kref *ref) */ static void batadv_backbone_gw_put(struct batadv_bla_backbone_gw *backbone_gw) { + if (!backbone_gw) + return; + kref_put(&backbone_gw->refcount, batadv_backbone_gw_release); } @@ -199,6 +202,9 @@ static void batadv_claim_release(struct kref *ref) */ static void batadv_claim_put(struct batadv_bla_claim *claim) { + if (!claim) + return; + kref_put(&claim->refcount, batadv_claim_release); } diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c index 0e6e53e9b5f35b48cdfff8b70d4f1cd81e044d41..338e4e9c33b8a40414ac90cd872ad56728e682d4 100644 --- a/net/batman-adv/distributed-arp-table.c +++ b/net/batman-adv/distributed-arp-table.c @@ -128,6 +128,9 @@ static void batadv_dat_entry_release(struct kref *ref) */ static void batadv_dat_entry_put(struct batadv_dat_entry *dat_entry) { + if (!dat_entry) + return; + kref_put(&dat_entry->refcount, batadv_dat_entry_release); } diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c index ef3f85b576c4c7cc6b0a7d2c2c3e987b53218291..62f6f13f89ffda04511a3d17dfe709759a7dedc8 100644 --- a/net/batman-adv/gateway_client.c +++ b/net/batman-adv/gateway_client.c @@ -60,7 +60,7 @@ * after rcu grace period * @ref: kref pointer of the gw_node */ -static void batadv_gw_node_release(struct kref *ref) +void batadv_gw_node_release(struct kref *ref) { struct batadv_gw_node *gw_node; @@ -70,16 +70,6 @@ static void batadv_gw_node_release(struct kref *ref) kfree_rcu(gw_node, rcu); } -/** - * batadv_gw_node_put() - decrement the gw_node refcounter and possibly release - * it - * @gw_node: gateway node to free - */ -void batadv_gw_node_put(struct batadv_gw_node *gw_node) -{ - kref_put(&gw_node->refcount, batadv_gw_node_release); -} - /** * batadv_gw_get_selected_gw_node() - Get currently selected gateway * @bat_priv: the bat priv with all the soft interface information diff --git a/net/batman-adv/gateway_client.h b/net/batman-adv/gateway_client.h index 88b5dba843547f714d4c05677958aa2a66d3c496..c5b1de586fde0b2322724cea2eab21d7306ca1a3 100644 --- a/net/batman-adv/gateway_client.h +++ b/net/batman-adv/gateway_client.h @@ -9,6 +9,7 @@ #include "main.h" +#include #include #include #include @@ -28,7 +29,7 @@ void batadv_gw_node_update(struct batadv_priv *bat_priv, void batadv_gw_node_delete(struct batadv_priv *bat_priv, struct batadv_orig_node *orig_node); void batadv_gw_node_free(struct batadv_priv *bat_priv); -void batadv_gw_node_put(struct batadv_gw_node *gw_node); +void batadv_gw_node_release(struct kref *ref); struct batadv_gw_node * batadv_gw_get_selected_gw_node(struct batadv_priv *bat_priv); int batadv_gw_client_seq_print_text(struct seq_file *seq, void *offset); @@ -40,4 +41,17 @@ batadv_gw_dhcp_recipient_get(struct sk_buff *skb, unsigned int *header_len, struct batadv_gw_node *batadv_gw_node_get(struct batadv_priv *bat_priv, struct batadv_orig_node *orig_node); +/** + * batadv_gw_node_put() - decrement the gw_node refcounter and possibly release + * it + * @gw_node: gateway node to free + */ +static inline void batadv_gw_node_put(struct batadv_gw_node *gw_node) +{ + if (!gw_node) + return; + + kref_put(&gw_node->refcount, batadv_gw_node_release); +} + #endif /* _NET_BATMAN_ADV_GATEWAY_CLIENT_H_ */ diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c index 33904595fc56a1e512edd5f217ff602f5a9744fe..fe0898a9b4e82963652da8a32615ea2030e620f9 100644 --- a/net/batman-adv/hard-interface.c +++ b/net/batman-adv/hard-interface.c @@ -151,22 +151,25 @@ static bool batadv_is_on_batman_iface(const struct net_device *net_dev) struct net *net = dev_net(net_dev); struct net_device *parent_dev; struct net *parent_net; + int iflink; bool ret; /* check if this is a batman-adv mesh interface */ if (batadv_softif_is_valid(net_dev)) return true; - /* no more parents..stop recursion */ - if (dev_get_iflink(net_dev) == 0 || - dev_get_iflink(net_dev) == net_dev->ifindex) + iflink = dev_get_iflink(net_dev); + if (iflink == 0) return false; parent_net = batadv_getlink_net(net_dev, net); + /* iflink to itself, most likely physical device */ + if (net == parent_net && iflink == net_dev->ifindex) + return false; + /* recurse over the parent device */ - parent_dev = __dev_get_by_index((struct net *)parent_net, - dev_get_iflink(net_dev)); + parent_dev = __dev_get_by_index((struct net *)parent_net, iflink); /* if we got a NULL parent_dev there is something broken.. */ if (!parent_dev) { pr_err("Cannot find parent device\n"); @@ -216,14 +219,15 @@ static struct net_device *batadv_get_real_netdevice(struct net_device *netdev) struct net_device *real_netdev = NULL; struct net *real_net; struct net *net; - int ifindex; + int iflink; ASSERT_RTNL(); if (!netdev) return NULL; - if (netdev->ifindex == dev_get_iflink(netdev)) { + iflink = dev_get_iflink(netdev); + if (iflink == 0) { dev_hold(netdev); return netdev; } @@ -233,9 +237,16 @@ static struct net_device *batadv_get_real_netdevice(struct net_device *netdev) goto out; net = dev_net(hard_iface->soft_iface); - ifindex = dev_get_iflink(netdev); real_net = batadv_getlink_net(netdev, net); - real_netdev = dev_get_by_index(real_net, ifindex); + + /* iflink to itself, most likely physical device */ + if (net == real_net && netdev->ifindex == iflink) { + real_netdev = netdev; + dev_hold(real_netdev); + goto out; + } + + real_netdev = dev_get_by_index(real_net, iflink); out: if (hard_iface) diff --git a/net/batman-adv/hard-interface.h b/net/batman-adv/hard-interface.h index b1855d9d0b062e446b1e637e5d5fa86a2c1ead54..ba5850cfb2774958ce2cd07fc0c6f97ba3721153 100644 --- a/net/batman-adv/hard-interface.h +++ b/net/batman-adv/hard-interface.h @@ -113,6 +113,9 @@ int batadv_hardif_no_broadcast(struct batadv_hard_iface *if_outgoing, */ static inline void batadv_hardif_put(struct batadv_hard_iface *hard_iface) { + if (!hard_iface) + return; + kref_put(&hard_iface->refcount, batadv_hardif_release); } diff --git a/net/batman-adv/multicast.c b/net/batman-adv/multicast.c index 139894ca788b95f5e5af57123d318625a1cbb7a5..c8a341cd652c74a4b9ded015d049e922f347fbde 100644 --- a/net/batman-adv/multicast.c +++ b/net/batman-adv/multicast.c @@ -136,7 +136,7 @@ static u8 batadv_mcast_mla_rtr_flags_softif_get_ipv6(struct net_device *dev) { struct inet6_dev *in6_dev = __in6_dev_get(dev); - if (in6_dev && in6_dev->cnf.mc_forwarding) + if (in6_dev && atomic_read(&in6_dev->cnf.mc_forwarding)) return BATADV_NO_FLAGS; else return BATADV_MCAST_WANT_NO_RTR6; diff --git a/net/batman-adv/netlink.c b/net/batman-adv/netlink.c index c7a55647b520e87d560bdcb29a644eef30f2855c..121459704b0693e7597e90c7389034f66a2d13ea 100644 --- a/net/batman-adv/netlink.c +++ b/net/batman-adv/netlink.c @@ -1361,21 +1361,21 @@ static const struct genl_small_ops batadv_netlink_ops[] = { { .cmd = BATADV_CMD_TP_METER, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .doit = batadv_netlink_tp_meter_start, .internal_flags = BATADV_FLAG_NEED_MESH, }, { .cmd = BATADV_CMD_TP_METER_CANCEL, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .doit = batadv_netlink_tp_meter_cancel, .internal_flags = BATADV_FLAG_NEED_MESH, }, { .cmd = BATADV_CMD_GET_ROUTING_ALGOS, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .dumpit = batadv_algo_dump, }, { @@ -1390,68 +1390,68 @@ static const struct genl_small_ops batadv_netlink_ops[] = { { .cmd = BATADV_CMD_GET_TRANSTABLE_LOCAL, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .dumpit = batadv_tt_local_dump, }, { .cmd = BATADV_CMD_GET_TRANSTABLE_GLOBAL, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .dumpit = batadv_tt_global_dump, }, { .cmd = BATADV_CMD_GET_ORIGINATORS, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .dumpit = batadv_orig_dump, }, { .cmd = BATADV_CMD_GET_NEIGHBORS, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .dumpit = batadv_hardif_neigh_dump, }, { .cmd = BATADV_CMD_GET_GATEWAYS, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .dumpit = batadv_gw_dump, }, { .cmd = BATADV_CMD_GET_BLA_CLAIM, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .dumpit = batadv_bla_claim_dump, }, { .cmd = BATADV_CMD_GET_BLA_BACKBONE, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .dumpit = batadv_bla_backbone_dump, }, { .cmd = BATADV_CMD_GET_DAT_CACHE, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .dumpit = batadv_dat_cache_dump, }, { .cmd = BATADV_CMD_GET_MCAST_FLAGS, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .dumpit = batadv_mcast_flags_dump, }, { .cmd = BATADV_CMD_SET_MESH, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .doit = batadv_netlink_set_mesh, .internal_flags = BATADV_FLAG_NEED_MESH, }, { .cmd = BATADV_CMD_SET_HARDIF, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .doit = batadv_netlink_set_hardif, .internal_flags = BATADV_FLAG_NEED_MESH | BATADV_FLAG_NEED_HARDIF, @@ -1467,7 +1467,7 @@ static const struct genl_small_ops batadv_netlink_ops[] = { { .cmd = BATADV_CMD_SET_VLAN, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .doit = batadv_netlink_set_vlan, .internal_flags = BATADV_FLAG_NEED_MESH | BATADV_FLAG_NEED_VLAN, diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c index 35b3e03c07774b1095b290938c49a224a0c9f34e..1481b803956892049c90a568078d21eee7a313d9 100644 --- a/net/batman-adv/network-coding.c +++ b/net/batman-adv/network-coding.c @@ -222,6 +222,9 @@ static void batadv_nc_node_release(struct kref *ref) */ static void batadv_nc_node_put(struct batadv_nc_node *nc_node) { + if (!nc_node) + return; + kref_put(&nc_node->refcount, batadv_nc_node_release); } @@ -246,6 +249,9 @@ static void batadv_nc_path_release(struct kref *ref) */ static void batadv_nc_path_put(struct batadv_nc_path *nc_path) { + if (!nc_path) + return; + kref_put(&nc_path->refcount, batadv_nc_path_release); } diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c index 805d8969bdfbc7f3012b6539bdefcee50e806aa4..2d38a09459bb53c8094a0895741efbdb9fb02b1b 100644 --- a/net/batman-adv/originator.c +++ b/net/batman-adv/originator.c @@ -178,7 +178,7 @@ batadv_orig_node_vlan_new(struct batadv_orig_node *orig_node, * and queue for free after rcu grace period * @ref: kref pointer of the originator-vlan object */ -static void batadv_orig_node_vlan_release(struct kref *ref) +void batadv_orig_node_vlan_release(struct kref *ref) { struct batadv_orig_node_vlan *orig_vlan; @@ -187,16 +187,6 @@ static void batadv_orig_node_vlan_release(struct kref *ref) kfree_rcu(orig_vlan, rcu); } -/** - * batadv_orig_node_vlan_put() - decrement the refcounter and possibly release - * the originator-vlan object - * @orig_vlan: the originator-vlan object to release - */ -void batadv_orig_node_vlan_put(struct batadv_orig_node_vlan *orig_vlan) -{ - kref_put(&orig_vlan->refcount, batadv_orig_node_vlan_release); -} - /** * batadv_originator_init() - Initialize all originator structures * @bat_priv: the bat priv with all the soft interface information @@ -232,7 +222,7 @@ int batadv_originator_init(struct batadv_priv *bat_priv) * free after rcu grace period * @ref: kref pointer of the neigh_ifinfo */ -static void batadv_neigh_ifinfo_release(struct kref *ref) +void batadv_neigh_ifinfo_release(struct kref *ref) { struct batadv_neigh_ifinfo *neigh_ifinfo; @@ -244,22 +234,12 @@ static void batadv_neigh_ifinfo_release(struct kref *ref) kfree_rcu(neigh_ifinfo, rcu); } -/** - * batadv_neigh_ifinfo_put() - decrement the refcounter and possibly release - * the neigh_ifinfo - * @neigh_ifinfo: the neigh_ifinfo object to release - */ -void batadv_neigh_ifinfo_put(struct batadv_neigh_ifinfo *neigh_ifinfo) -{ - kref_put(&neigh_ifinfo->refcount, batadv_neigh_ifinfo_release); -} - /** * batadv_hardif_neigh_release() - release hardif neigh node from lists and * queue for free after rcu grace period * @ref: kref pointer of the neigh_node */ -static void batadv_hardif_neigh_release(struct kref *ref) +void batadv_hardif_neigh_release(struct kref *ref) { struct batadv_hardif_neigh_node *hardif_neigh; @@ -274,22 +254,12 @@ static void batadv_hardif_neigh_release(struct kref *ref) kfree_rcu(hardif_neigh, rcu); } -/** - * batadv_hardif_neigh_put() - decrement the hardif neighbors refcounter - * and possibly release it - * @hardif_neigh: hardif neigh neighbor to free - */ -void batadv_hardif_neigh_put(struct batadv_hardif_neigh_node *hardif_neigh) -{ - kref_put(&hardif_neigh->refcount, batadv_hardif_neigh_release); -} - /** * batadv_neigh_node_release() - release neigh_node from lists and queue for * free after rcu grace period * @ref: kref pointer of the neigh_node */ -static void batadv_neigh_node_release(struct kref *ref) +void batadv_neigh_node_release(struct kref *ref) { struct hlist_node *node_tmp; struct batadv_neigh_node *neigh_node; @@ -309,16 +279,6 @@ static void batadv_neigh_node_release(struct kref *ref) kfree_rcu(neigh_node, rcu); } -/** - * batadv_neigh_node_put() - decrement the neighbors refcounter and possibly - * release it - * @neigh_node: neigh neighbor to free - */ -void batadv_neigh_node_put(struct batadv_neigh_node *neigh_node) -{ - kref_put(&neigh_node->refcount, batadv_neigh_node_release); -} - /** * batadv_orig_router_get() - router to the originator depending on iface * @orig_node: the orig node for the router @@ -851,7 +811,7 @@ int batadv_hardif_neigh_dump(struct sk_buff *msg, struct netlink_callback *cb) * free after rcu grace period * @ref: kref pointer of the orig_ifinfo */ -static void batadv_orig_ifinfo_release(struct kref *ref) +void batadv_orig_ifinfo_release(struct kref *ref) { struct batadv_orig_ifinfo *orig_ifinfo; struct batadv_neigh_node *router; @@ -869,16 +829,6 @@ static void batadv_orig_ifinfo_release(struct kref *ref) kfree_rcu(orig_ifinfo, rcu); } -/** - * batadv_orig_ifinfo_put() - decrement the refcounter and possibly release - * the orig_ifinfo - * @orig_ifinfo: the orig_ifinfo object to release - */ -void batadv_orig_ifinfo_put(struct batadv_orig_ifinfo *orig_ifinfo) -{ - kref_put(&orig_ifinfo->refcount, batadv_orig_ifinfo_release); -} - /** * batadv_orig_node_free_rcu() - free the orig_node * @rcu: rcu pointer of the orig_node @@ -902,7 +852,7 @@ static void batadv_orig_node_free_rcu(struct rcu_head *rcu) * free after rcu grace period * @ref: kref pointer of the orig_node */ -static void batadv_orig_node_release(struct kref *ref) +void batadv_orig_node_release(struct kref *ref) { struct hlist_node *node_tmp; struct batadv_neigh_node *neigh_node; @@ -948,16 +898,6 @@ static void batadv_orig_node_release(struct kref *ref) call_rcu(&orig_node->rcu, batadv_orig_node_free_rcu); } -/** - * batadv_orig_node_put() - decrement the orig node refcounter and possibly - * release it - * @orig_node: the orig node to free - */ -void batadv_orig_node_put(struct batadv_orig_node *orig_node) -{ - kref_put(&orig_node->refcount, batadv_orig_node_release); -} - /** * batadv_originator_free() - Free all originator structures * @bat_priv: the bat priv with all the soft interface information diff --git a/net/batman-adv/originator.h b/net/batman-adv/originator.h index 7bc01c138b3ab8bc473c91423d7a777817ce82f4..3b824a79743a252c47f2a5480ac7a633cff33555 100644 --- a/net/batman-adv/originator.h +++ b/net/batman-adv/originator.h @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include @@ -21,19 +22,18 @@ bool batadv_compare_orig(const struct hlist_node *node, const void *data2); int batadv_originator_init(struct batadv_priv *bat_priv); void batadv_originator_free(struct batadv_priv *bat_priv); void batadv_purge_orig_ref(struct batadv_priv *bat_priv); -void batadv_orig_node_put(struct batadv_orig_node *orig_node); +void batadv_orig_node_release(struct kref *ref); struct batadv_orig_node *batadv_orig_node_new(struct batadv_priv *bat_priv, const u8 *addr); struct batadv_hardif_neigh_node * batadv_hardif_neigh_get(const struct batadv_hard_iface *hard_iface, const u8 *neigh_addr); -void -batadv_hardif_neigh_put(struct batadv_hardif_neigh_node *hardif_neigh); +void batadv_hardif_neigh_release(struct kref *ref); struct batadv_neigh_node * batadv_neigh_node_get_or_create(struct batadv_orig_node *orig_node, struct batadv_hard_iface *hard_iface, const u8 *neigh_addr); -void batadv_neigh_node_put(struct batadv_neigh_node *neigh_node); +void batadv_neigh_node_release(struct kref *ref); struct batadv_neigh_node * batadv_orig_router_get(struct batadv_orig_node *orig_node, const struct batadv_hard_iface *if_outgoing); @@ -43,7 +43,7 @@ batadv_neigh_ifinfo_new(struct batadv_neigh_node *neigh, struct batadv_neigh_ifinfo * batadv_neigh_ifinfo_get(struct batadv_neigh_node *neigh, struct batadv_hard_iface *if_outgoing); -void batadv_neigh_ifinfo_put(struct batadv_neigh_ifinfo *neigh_ifinfo); +void batadv_neigh_ifinfo_release(struct kref *ref); int batadv_hardif_neigh_dump(struct sk_buff *msg, struct netlink_callback *cb); int batadv_hardif_neigh_seq_print_text(struct seq_file *seq, void *offset); @@ -54,7 +54,7 @@ batadv_orig_ifinfo_get(struct batadv_orig_node *orig_node, struct batadv_orig_ifinfo * batadv_orig_ifinfo_new(struct batadv_orig_node *orig_node, struct batadv_hard_iface *if_outgoing); -void batadv_orig_ifinfo_put(struct batadv_orig_ifinfo *orig_ifinfo); +void batadv_orig_ifinfo_release(struct kref *ref); int batadv_orig_seq_print_text(struct seq_file *seq, void *offset); int batadv_orig_dump(struct sk_buff *msg, struct netlink_callback *cb); @@ -65,7 +65,7 @@ batadv_orig_node_vlan_new(struct batadv_orig_node *orig_node, struct batadv_orig_node_vlan * batadv_orig_node_vlan_get(struct batadv_orig_node *orig_node, unsigned short vid); -void batadv_orig_node_vlan_put(struct batadv_orig_node_vlan *orig_vlan); +void batadv_orig_node_vlan_release(struct kref *ref); /** * batadv_choose_orig() - Return the index of the orig entry in the hash table @@ -86,4 +86,86 @@ static inline u32 batadv_choose_orig(const void *data, u32 size) struct batadv_orig_node * batadv_orig_hash_find(struct batadv_priv *bat_priv, const void *data); +/** + * batadv_orig_node_vlan_put() - decrement the refcounter and possibly release + * the originator-vlan object + * @orig_vlan: the originator-vlan object to release + */ +static inline void +batadv_orig_node_vlan_put(struct batadv_orig_node_vlan *orig_vlan) +{ + if (!orig_vlan) + return; + + kref_put(&orig_vlan->refcount, batadv_orig_node_vlan_release); +} + +/** + * batadv_neigh_ifinfo_put() - decrement the refcounter and possibly release + * the neigh_ifinfo + * @neigh_ifinfo: the neigh_ifinfo object to release + */ +static inline void +batadv_neigh_ifinfo_put(struct batadv_neigh_ifinfo *neigh_ifinfo) +{ + if (!neigh_ifinfo) + return; + + kref_put(&neigh_ifinfo->refcount, batadv_neigh_ifinfo_release); +} + +/** + * batadv_hardif_neigh_put() - decrement the hardif neighbors refcounter + * and possibly release it + * @hardif_neigh: hardif neigh neighbor to free + */ +static inline void +batadv_hardif_neigh_put(struct batadv_hardif_neigh_node *hardif_neigh) +{ + if (!hardif_neigh) + return; + + kref_put(&hardif_neigh->refcount, batadv_hardif_neigh_release); +} + +/** + * batadv_neigh_node_put() - decrement the neighbors refcounter and possibly + * release it + * @neigh_node: neigh neighbor to free + */ +static inline void batadv_neigh_node_put(struct batadv_neigh_node *neigh_node) +{ + if (!neigh_node) + return; + + kref_put(&neigh_node->refcount, batadv_neigh_node_release); +} + +/** + * batadv_orig_ifinfo_put() - decrement the refcounter and possibly release + * the orig_ifinfo + * @orig_ifinfo: the orig_ifinfo object to release + */ +static inline void +batadv_orig_ifinfo_put(struct batadv_orig_ifinfo *orig_ifinfo) +{ + if (!orig_ifinfo) + return; + + kref_put(&orig_ifinfo->refcount, batadv_orig_ifinfo_release); +} + +/** + * batadv_orig_node_put() - decrement the orig node refcounter and possibly + * release it + * @orig_node: the orig node to free + */ +static inline void batadv_orig_node_put(struct batadv_orig_node *orig_node) +{ + if (!orig_node) + return; + + kref_put(&orig_node->refcount, batadv_orig_node_release); +} + #endif /* _NET_BATMAN_ADV_ORIGINATOR_H_ */ diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c index 7496047b318a412e51d33849bd22f4d59357b5c2..8f7c778255fba74f51e9f84ad094c5beb492b0f9 100644 --- a/net/batman-adv/soft-interface.c +++ b/net/batman-adv/soft-interface.c @@ -512,7 +512,7 @@ void batadv_interface_rx(struct net_device *soft_iface, * after rcu grace period * @ref: kref pointer of the vlan object */ -static void batadv_softif_vlan_release(struct kref *ref) +void batadv_softif_vlan_release(struct kref *ref) { struct batadv_softif_vlan *vlan; @@ -525,19 +525,6 @@ static void batadv_softif_vlan_release(struct kref *ref) kfree_rcu(vlan, rcu); } -/** - * batadv_softif_vlan_put() - decrease the vlan object refcounter and - * possibly release it - * @vlan: the vlan object to release - */ -void batadv_softif_vlan_put(struct batadv_softif_vlan *vlan) -{ - if (!vlan) - return; - - kref_put(&vlan->refcount, batadv_softif_vlan_release); -} - /** * batadv_softif_vlan_get() - get the vlan object for a specific vid * @bat_priv: the bat priv with all the soft interface information diff --git a/net/batman-adv/soft-interface.h b/net/batman-adv/soft-interface.h index 534e08d6ad919e3e244732d6646b4b48e11d5e6b..53aba17b90688b7efc478dde5a1b2d92ecf0b003 100644 --- a/net/batman-adv/soft-interface.h +++ b/net/batman-adv/soft-interface.h @@ -9,6 +9,7 @@ #include "main.h" +#include #include #include #include @@ -24,8 +25,21 @@ void batadv_softif_destroy_sysfs(struct net_device *soft_iface); bool batadv_softif_is_valid(const struct net_device *net_dev); extern struct rtnl_link_ops batadv_link_ops; int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid); -void batadv_softif_vlan_put(struct batadv_softif_vlan *softif_vlan); +void batadv_softif_vlan_release(struct kref *ref); struct batadv_softif_vlan *batadv_softif_vlan_get(struct batadv_priv *bat_priv, unsigned short vid); +/** + * batadv_softif_vlan_put() - decrease the vlan object refcounter and + * possibly release it + * @vlan: the vlan object to release + */ +static inline void batadv_softif_vlan_put(struct batadv_softif_vlan *vlan) +{ + if (!vlan) + return; + + kref_put(&vlan->refcount, batadv_softif_vlan_release); +} + #endif /* _NET_BATMAN_ADV_SOFT_INTERFACE_H_ */ diff --git a/net/batman-adv/tp_meter.c b/net/batman-adv/tp_meter.c index db7e3774825b5f5871b23c0e4fa5ce7afb2accd6..00d62a6c5e0efb619e18789271d86c69c395bfb4 100644 --- a/net/batman-adv/tp_meter.c +++ b/net/batman-adv/tp_meter.c @@ -357,6 +357,9 @@ static void batadv_tp_vars_release(struct kref *ref) */ static void batadv_tp_vars_put(struct batadv_tp_vars *tp_vars) { + if (!tp_vars) + return; + kref_put(&tp_vars->refcount, batadv_tp_vars_release); } diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c index de946ea8f13c8c96d5153c62b68e480c2bbcade6..5f990a2061072b8f7e024efacf9460250e079e5e 100644 --- a/net/batman-adv/translation-table.c +++ b/net/batman-adv/translation-table.c @@ -248,6 +248,9 @@ static void batadv_tt_local_entry_release(struct kref *ref) static void batadv_tt_local_entry_put(struct batadv_tt_local_entry *tt_local_entry) { + if (!tt_local_entry) + return; + kref_put(&tt_local_entry->common.refcount, batadv_tt_local_entry_release); } @@ -271,7 +274,7 @@ static void batadv_tt_global_entry_free_rcu(struct rcu_head *rcu) * queue for free after rcu grace period * @ref: kref pointer of the nc_node */ -static void batadv_tt_global_entry_release(struct kref *ref) +void batadv_tt_global_entry_release(struct kref *ref) { struct batadv_tt_global_entry *tt_global_entry; @@ -283,17 +286,6 @@ static void batadv_tt_global_entry_release(struct kref *ref) call_rcu(&tt_global_entry->common.rcu, batadv_tt_global_entry_free_rcu); } -/** - * batadv_tt_global_entry_put() - decrement the tt_global_entry refcounter and - * possibly release it - * @tt_global_entry: tt_global_entry to be free'd - */ -void batadv_tt_global_entry_put(struct batadv_tt_global_entry *tt_global_entry) -{ - kref_put(&tt_global_entry->common.refcount, - batadv_tt_global_entry_release); -} - /** * batadv_tt_global_hash_count() - count the number of orig entries * @bat_priv: the bat priv with all the soft interface information @@ -453,6 +445,9 @@ static void batadv_tt_orig_list_entry_release(struct kref *ref) static void batadv_tt_orig_list_entry_put(struct batadv_tt_orig_list_entry *orig_entry) { + if (!orig_entry) + return; + kref_put(&orig_entry->refcount, batadv_tt_orig_list_entry_release); } @@ -2818,6 +2813,9 @@ static void batadv_tt_req_node_release(struct kref *ref) */ static void batadv_tt_req_node_put(struct batadv_tt_req_node *tt_req_node) { + if (!tt_req_node) + return; + kref_put(&tt_req_node->refcount, batadv_tt_req_node_release); } diff --git a/net/batman-adv/translation-table.h b/net/batman-adv/translation-table.h index b24d35b9226a1f28be08e374264fd15130d53cdc..63cc8fd3ff66aea16fcecf9f2adbff03dc4ef63d 100644 --- a/net/batman-adv/translation-table.h +++ b/net/batman-adv/translation-table.h @@ -9,6 +9,7 @@ #include "main.h" +#include #include #include #include @@ -31,7 +32,7 @@ void batadv_tt_global_del_orig(struct batadv_priv *bat_priv, struct batadv_tt_global_entry * batadv_tt_global_hash_find(struct batadv_priv *bat_priv, const u8 *addr, unsigned short vid); -void batadv_tt_global_entry_put(struct batadv_tt_global_entry *tt_global_entry); +void batadv_tt_global_entry_release(struct kref *ref); int batadv_tt_global_hash_count(struct batadv_priv *bat_priv, const u8 *addr, unsigned short vid); struct batadv_orig_node *batadv_transtable_search(struct batadv_priv *bat_priv, @@ -58,4 +59,19 @@ bool batadv_tt_global_is_isolated(struct batadv_priv *bat_priv, int batadv_tt_cache_init(void); void batadv_tt_cache_destroy(void); +/** + * batadv_tt_global_entry_put() - decrement the tt_global_entry refcounter and + * possibly release it + * @tt_global_entry: tt_global_entry to be free'd + */ +static inline void +batadv_tt_global_entry_put(struct batadv_tt_global_entry *tt_global_entry) +{ + if (!tt_global_entry) + return; + + kref_put(&tt_global_entry->common.refcount, + batadv_tt_global_entry_release); +} + #endif /* _NET_BATMAN_ADV_TRANSLATION_TABLE_H_ */ diff --git a/net/batman-adv/tvlv.c b/net/batman-adv/tvlv.c index 6a23a566cde17b1ea80bec83a30e975496e8ea2c..99fc48efde5431c8231ebd7ac661f559cf09e94d 100644 --- a/net/batman-adv/tvlv.c +++ b/net/batman-adv/tvlv.c @@ -50,6 +50,9 @@ static void batadv_tvlv_handler_release(struct kref *ref) */ static void batadv_tvlv_handler_put(struct batadv_tvlv_handler *tvlv_handler) { + if (!tvlv_handler) + return; + kref_put(&tvlv_handler->refcount, batadv_tvlv_handler_release); } @@ -106,6 +109,9 @@ static void batadv_tvlv_container_release(struct kref *ref) */ static void batadv_tvlv_container_put(struct batadv_tvlv_container *tvlv) { + if (!tvlv) + return; + kref_put(&tvlv->refcount, batadv_tvlv_container_release); } diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c index 0a2d78e811cf5bbea7d735bc345a1ddc9a46cbf8..83eb84e8e688f3bcab98c99758ee99ae7f85dc46 100644 --- a/net/bluetooth/cmtp/core.c +++ b/net/bluetooth/cmtp/core.c @@ -501,9 +501,7 @@ static int __init cmtp_init(void) { BT_INFO("CMTP (CAPI Emulation) ver %s", VERSION); - cmtp_init_sockets(); - - return 0; + return cmtp_init_sockets(); } static void __exit cmtp_exit(void) diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index 1c5a0a60292d2ae96c54553f34cf5c2261e24292..ecd2ffcf2ba284e499dc7ab525d5fa3930c965ca 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c @@ -508,7 +508,9 @@ static void le_conn_timeout(struct work_struct *work) if (conn->role == HCI_ROLE_SLAVE) { /* Disable LE Advertising */ le_disable_advertising(hdev); + hci_dev_lock(hdev); hci_le_conn_failed(conn, HCI_ERROR_ADVERTISING_TIMEOUT); + hci_dev_unlock(hdev); return; } diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 2ad66f64879f1863e3f441b288deb82e673c5310..2e7998bad133bc45ca5e88c2b5f9d079c5a9ac4e 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -3810,6 +3810,7 @@ int hci_register_dev(struct hci_dev *hdev) return id; err_wqueue: + debugfs_remove_recursive(hdev->debugfs); destroy_workqueue(hdev->workqueue); destroy_workqueue(hdev->req_workqueue); err: diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 9f52145bb7b7639e5c221778188126f8143ef870..e926e80d9731b163e918970fba4feb097b195ac5 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -5061,8 +5061,9 @@ static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev, hci_dev_lock(hdev); hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle); - if (hcon) { + if (hcon && hcon->type == AMP_LINK) { hcon->state = BT_CLOSED; + hci_disconn_cfm(hcon, ev->reason); hci_conn_del(hcon); } @@ -5661,7 +5662,13 @@ static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb) struct hci_ev_le_advertising_info *ev = ptr; s8 rssi; - if (ev->length <= HCI_MAX_AD_LENGTH) { + if (ptr > (void *)skb_tail_pointer(skb) - sizeof(*ev)) { + bt_dev_err(hdev, "Malicious advertising data."); + break; + } + + if (ev->length <= HCI_MAX_AD_LENGTH && + ev->data + ev->length <= skb_tail_pointer(skb)) { rssi = ev->data[ev->length]; process_adv_report(hdev, ev->evt_type, &ev->bdaddr, ev->bdaddr_type, NULL, 0, rssi, diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c index 1a94ed2f8a4f8ebe72a706f01f9472b67b3e92e6..d965b7c66bd628b760e28d51ad441deb4a6a8f0b 100644 --- a/net/bluetooth/hci_request.c +++ b/net/bluetooth/hci_request.c @@ -2118,7 +2118,7 @@ int __hci_req_enable_ext_advertising(struct hci_request *req, u8 instance) /* Set duration per instance since controller is responsible for * scheduling it. */ - if (adv_instance && adv_instance->duration) { + if (adv_instance && adv_instance->timeout) { u16 duration = adv_instance->timeout * MSEC_PER_SEC; /* Time = N * 10 ms */ diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c index 0ddbc415ce156f948ce8ccd3727c953736072ff0..012c1a0abda8c3fb0ed6c441779f0eefda2584e9 100644 --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c @@ -1438,6 +1438,7 @@ static void l2cap_ecred_connect(struct l2cap_chan *chan) l2cap_ecred_init(chan, 0); + memset(&data, 0, sizeof(data)); data.pdu.req.psm = chan->psm; data.pdu.req.mtu = cpu_to_le16(chan->imtu); data.pdu.req.mps = cpu_to_le16(chan->mps); diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c index 160c016a5dfb9eb7842cabc7e8fb2d5c0914d497..d2c67852059921bfd188eee1a4a79736f9415323 100644 --- a/net/bluetooth/l2cap_sock.c +++ b/net/bluetooth/l2cap_sock.c @@ -161,7 +161,11 @@ static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen) break; } - if (chan->psm && bdaddr_type_is_le(chan->src_type)) + /* Use L2CAP_MODE_LE_FLOWCTL (CoC) in case of LE address and + * L2CAP_MODE_EXT_FLOWCTL (ECRED) has not been set. + */ + if (chan->psm && bdaddr_type_is_le(chan->src_type) && + chan->mode != L2CAP_MODE_EXT_FLOWCTL) chan->mode = L2CAP_MODE_LE_FLOWCTL; chan->state = BT_BOUND; @@ -172,6 +176,21 @@ static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen) return err; } +static void l2cap_sock_init_pid(struct sock *sk) +{ + struct l2cap_chan *chan = l2cap_pi(sk)->chan; + + /* Only L2CAP_MODE_EXT_FLOWCTL ever need to access the PID in order to + * group the channels being requested. + */ + if (chan->mode != L2CAP_MODE_EXT_FLOWCTL) + return; + + spin_lock(&sk->sk_peer_lock); + sk->sk_peer_pid = get_pid(task_tgid(current)); + spin_unlock(&sk->sk_peer_lock); +} + static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags) { @@ -240,9 +259,15 @@ static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, return -EINVAL; } - if (chan->psm && bdaddr_type_is_le(chan->src_type) && !chan->mode) + /* Use L2CAP_MODE_LE_FLOWCTL (CoC) in case of LE address and + * L2CAP_MODE_EXT_FLOWCTL (ECRED) has not been set. + */ + if (chan->psm && bdaddr_type_is_le(chan->src_type) && + chan->mode != L2CAP_MODE_EXT_FLOWCTL) chan->mode = L2CAP_MODE_LE_FLOWCTL; + l2cap_sock_init_pid(sk); + err = l2cap_chan_connect(chan, la.l2_psm, __le16_to_cpu(la.l2_cid), &la.l2_bdaddr, la.l2_bdaddr_type); if (err) @@ -298,6 +323,8 @@ static int l2cap_sock_listen(struct socket *sock, int backlog) goto done; } + l2cap_sock_init_pid(sk); + sk->sk_max_ack_backlog = backlog; sk->sk_ack_backlog = 0; @@ -876,6 +903,8 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, struct l2cap_conn *conn; int len, err = 0; u32 opt; + u16 mtu; + u8 mode; BT_DBG("sk %p", sk); @@ -1058,16 +1087,16 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, break; } - if (copy_from_sockptr(&opt, optval, sizeof(u16))) { + if (copy_from_sockptr(&mtu, optval, sizeof(u16))) { err = -EFAULT; break; } if (chan->mode == L2CAP_MODE_EXT_FLOWCTL && sk->sk_state == BT_CONNECTED) - err = l2cap_chan_reconfigure(chan, opt); + err = l2cap_chan_reconfigure(chan, mtu); else - chan->imtu = opt; + chan->imtu = mtu; break; @@ -1089,14 +1118,14 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, break; } - if (copy_from_sockptr(&opt, optval, sizeof(u8))) { + if (copy_from_sockptr(&mode, optval, sizeof(u8))) { err = -EFAULT; break; } - BT_DBG("opt %u", opt); + BT_DBG("mode %u", mode); - err = l2cap_set_mode(chan, opt); + err = l2cap_set_mode(chan, mode); if (err) break; diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c index 8edfb98ae1d583665f5396b5110a3875e0518561..68c0d0f928908e44ade334aa9931279190be8a13 100644 --- a/net/bridge/br_netfilter_hooks.c +++ b/net/bridge/br_netfilter_hooks.c @@ -743,6 +743,9 @@ static int br_nf_dev_queue_xmit(struct net *net, struct sock *sk, struct sk_buff if (nf_bridge->frag_max_size && nf_bridge->frag_max_size < mtu) mtu = nf_bridge->frag_max_size; + nf_bridge_update_protocol(skb); + nf_bridge_push_encap_header(skb); + if (skb_is_gso(skb) || skb->len + mtu_reserved <= mtu) { nf_bridge_info_free(skb); return br_dev_queue_push_xmit(net, sk, skb); @@ -760,8 +763,6 @@ static int br_nf_dev_queue_xmit(struct net *net, struct sock *sk, struct sk_buff IPCB(skb)->frag_max_size = nf_bridge->frag_max_size; - nf_bridge_update_protocol(skb); - data = this_cpu_ptr(&brnf_frag_data_storage); if (skb_vlan_tag_present(skb)) { @@ -789,8 +790,6 @@ static int br_nf_dev_queue_xmit(struct net *net, struct sock *sk, struct sk_buff IP6CB(skb)->frag_max_size = nf_bridge->frag_max_size; - nf_bridge_update_protocol(skb); - data = this_cpu_ptr(&brnf_frag_data_storage); data->encap_size = nf_bridge_encap_header_len(skb); data->size = ETH_HLEN + data->encap_size; diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c index 08c77418c687b75e1ca303922314b010397296fc..852f4b54e8811a275ca3df6d261c5a9c481b6db9 100644 --- a/net/bridge/br_vlan.c +++ b/net/bridge/br_vlan.c @@ -543,10 +543,10 @@ static bool __allowed_ingress(const struct net_bridge *br, if (!br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) { if (*state == BR_STATE_FORWARDING) { *state = br_vlan_get_pvid_state(vg); - return br_vlan_state_allowed(*state, true); - } else { - return true; + if (!br_vlan_state_allowed(*state, true)) + goto drop; } + return true; } } v = br_vlan_find(vg, *vid); @@ -1873,7 +1873,8 @@ static int br_vlan_rtm_dump(struct sk_buff *skb, struct netlink_callback *cb) goto out_err; } err = br_vlan_dump_dev(dev, skb, cb, dump_flags); - if (err && err != -EMSGSIZE) + /* if the dump completed without an error we return 0 here */ + if (err != -EMSGSIZE) goto out_err; } else { for_each_netdev_rcu(net, dev) { diff --git a/net/can/isotp.c b/net/can/isotp.c index 8ee580538d876a85504232803fee47d233b5a56e..9a4a9c5a9f24c3fe24debcdc5d083db131354a42 100644 --- a/net/can/isotp.c +++ b/net/can/isotp.c @@ -56,6 +56,7 @@ #include #include #include +#include #include #include #include @@ -119,8 +120,8 @@ enum { }; struct tpcon { - int idx; - int len; + unsigned int idx; + unsigned int len; u32 state; u8 bs; u8 sn; @@ -140,11 +141,13 @@ struct isotp_sock { struct can_isotp_options opt; struct can_isotp_fc_options rxfc, txfc; struct can_isotp_ll_options ll; + u32 frame_txtime; u32 force_tx_stmin; u32 force_rx_stmin; struct tpcon rx, tx; struct list_head notifier; wait_queue_head_t wait; + spinlock_t rx_lock; /* protect single thread state machine */ }; static LIST_HEAD(isotp_notifier_list); @@ -358,7 +361,7 @@ static int isotp_rcv_fc(struct isotp_sock *so, struct canfd_frame *cf, int ae) so->tx_gap = ktime_set(0, 0); /* add transmission time for CAN frame N_As */ - so->tx_gap = ktime_add_ns(so->tx_gap, so->opt.frame_txtime); + so->tx_gap = ktime_add_ns(so->tx_gap, so->frame_txtime); /* add waiting time for consecutive frames N_Cs */ if (so->opt.flags & CAN_ISOTP_FORCE_TXSTMIN) so->tx_gap = ktime_add_ns(so->tx_gap, @@ -615,11 +618,17 @@ static void isotp_rcv(struct sk_buff *skb, void *data) n_pci_type = cf->data[ae] & 0xF0; + /* Make sure the state changes and data structures stay consistent at + * CAN frame reception time. This locking is not needed in real world + * use cases but the inconsistency can be triggered with syzkaller. + */ + spin_lock(&so->rx_lock); + if (so->opt.flags & CAN_ISOTP_HALF_DUPLEX) { /* check rx/tx path half duplex expectations */ if ((so->tx.state != ISOTP_IDLE && n_pci_type != N_PCI_FC) || (so->rx.state != ISOTP_IDLE && n_pci_type == N_PCI_FC)) - return; + goto out_unlock; } switch (n_pci_type) { @@ -668,6 +677,9 @@ static void isotp_rcv(struct sk_buff *skb, void *data) isotp_rcv_cf(sk, cf, ae, skb); break; } + +out_unlock: + spin_unlock(&so->rx_lock); } static void isotp_fill_dataframe(struct canfd_frame *cf, struct isotp_sock *so, @@ -874,24 +886,34 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) if (!size || size > MAX_MSG_LENGTH) { err = -EINVAL; - goto err_out; + goto err_out_drop; + } + + /* take care of a potential SF_DL ESC offset for TX_DL > 8 */ + off = (so->tx.ll_dl > CAN_MAX_DLEN) ? 1 : 0; + + /* does the given data fit into a single frame for SF_BROADCAST? */ + if ((so->opt.flags & CAN_ISOTP_SF_BROADCAST) && + (size > so->tx.ll_dl - SF_PCI_SZ4 - ae - off)) { + err = -EINVAL; + goto err_out_drop; } err = memcpy_from_msg(so->tx.buf, msg, size); if (err < 0) - goto err_out; + goto err_out_drop; dev = dev_get_by_index(sock_net(sk), so->ifindex); if (!dev) { err = -ENXIO; - goto err_out; + goto err_out_drop; } skb = sock_alloc_send_skb(sk, so->ll.mtu + sizeof(struct can_skb_priv), msg->msg_flags & MSG_DONTWAIT, &err); if (!skb) { dev_put(dev); - goto err_out; + goto err_out_drop; } can_skb_reserve(skb); @@ -904,9 +926,6 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) cf = (struct canfd_frame *)skb->data; skb_put_zero(skb, so->ll.mtu); - /* take care of a potential SF_DL ESC offset for TX_DL > 8 */ - off = (so->tx.ll_dl > CAN_MAX_DLEN) ? 1 : 0; - /* check for single frame transmission depending on TX_DL */ if (size <= so->tx.ll_dl - SF_PCI_SZ4 - ae - off) { /* The message size generally fits into a SingleFrame - good. @@ -956,7 +975,7 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) if (err) { pr_notice_once("can-isotp: %s: can_send_ret %d\n", __func__, err); - goto err_out; + goto err_out_drop; } if (wait_tx_done) { @@ -969,6 +988,9 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) return size; +err_out_drop: + /* drop this PDU and unlock a potential wait queue */ + old_state = ISOTP_IDLE; err_out: so->tx.state = old_state; if (so->tx.state == ISOTP_IDLE) @@ -982,26 +1004,29 @@ static int isotp_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, { struct sock *sk = sock->sk; struct sk_buff *skb; - int err = 0; - int noblock; + struct isotp_sock *so = isotp_sk(sk); + int noblock = flags & MSG_DONTWAIT; + int ret = 0; - noblock = flags & MSG_DONTWAIT; - flags &= ~MSG_DONTWAIT; + if (flags & ~(MSG_DONTWAIT | MSG_TRUNC | MSG_PEEK)) + return -EINVAL; + + if (!so->bound) + return -EADDRNOTAVAIL; - skb = skb_recv_datagram(sk, flags, noblock, &err); + flags &= ~MSG_DONTWAIT; + skb = skb_recv_datagram(sk, flags, noblock, &ret); if (!skb) - return err; + return ret; if (size < skb->len) msg->msg_flags |= MSG_TRUNC; else size = skb->len; - err = memcpy_to_msg(msg, skb->data, size); - if (err < 0) { - skb_free_datagram(sk, skb); - return err; - } + ret = memcpy_to_msg(msg, skb->data, size); + if (ret < 0) + goto out_err; sock_recv_timestamp(msg, sk, skb); @@ -1011,9 +1036,13 @@ static int isotp_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, memcpy(msg->msg_name, skb->cb, msg->msg_namelen); } + /* set length of return value */ + ret = (flags & MSG_TRUNC) ? skb->len : size; + +out_err: skb_free_datagram(sk, skb); - return size; + return ret; } static int isotp_release(struct socket *sock) @@ -1043,7 +1072,7 @@ static int isotp_release(struct socket *sock) lock_sock(sk); /* remove current filters & unregister */ - if (so->bound) { + if (so->bound && (!(so->opt.flags & CAN_ISOTP_SF_BROADCAST))) { if (so->ifindex) { struct net_device *dev; @@ -1081,27 +1110,44 @@ static int isotp_bind(struct socket *sock, struct sockaddr *uaddr, int len) struct net *net = sock_net(sk); int ifindex; struct net_device *dev; + canid_t tx_id, rx_id; int err = 0; int notify_enetdown = 0; + int do_rx_reg = 1; if (len < ISOTP_MIN_NAMELEN) return -EINVAL; - if (addr->can_addr.tp.rx_id == addr->can_addr.tp.tx_id) - return -EADDRNOTAVAIL; + /* sanitize tx/rx CAN identifiers */ + tx_id = addr->can_addr.tp.tx_id; + if (tx_id & CAN_EFF_FLAG) + tx_id &= (CAN_EFF_FLAG | CAN_EFF_MASK); + else + tx_id &= CAN_SFF_MASK; - if ((addr->can_addr.tp.rx_id | addr->can_addr.tp.tx_id) & - (CAN_ERR_FLAG | CAN_RTR_FLAG)) - return -EADDRNOTAVAIL; + rx_id = addr->can_addr.tp.rx_id; + if (rx_id & CAN_EFF_FLAG) + rx_id &= (CAN_EFF_FLAG | CAN_EFF_MASK); + else + rx_id &= CAN_SFF_MASK; if (!addr->can_ifindex) return -ENODEV; lock_sock(sk); + /* do not register frame reception for functional addressing */ + if (so->opt.flags & CAN_ISOTP_SF_BROADCAST) + do_rx_reg = 0; + + /* do not validate rx address for functional addressing */ + if (do_rx_reg && rx_id == tx_id) { + err = -EADDRNOTAVAIL; + goto out; + } + if (so->bound && addr->can_ifindex == so->ifindex && - addr->can_addr.tp.rx_id == so->rxid && - addr->can_addr.tp.tx_id == so->txid) + rx_id == so->rxid && tx_id == so->txid) goto out; dev = dev_get_by_index(net, addr->can_ifindex); @@ -1124,13 +1170,13 @@ static int isotp_bind(struct socket *sock, struct sockaddr *uaddr, int len) ifindex = dev->ifindex; - can_rx_register(net, dev, addr->can_addr.tp.rx_id, - SINGLE_MASK(addr->can_addr.tp.rx_id), isotp_rcv, sk, - "isotp", sk); + if (do_rx_reg) + can_rx_register(net, dev, rx_id, SINGLE_MASK(rx_id), + isotp_rcv, sk, "isotp", sk); dev_put(dev); - if (so->bound) { + if (so->bound && do_rx_reg) { /* unregister old filter */ if (so->ifindex) { dev = dev_get_by_index(net, so->ifindex); @@ -1145,8 +1191,8 @@ static int isotp_bind(struct socket *sock, struct sockaddr *uaddr, int len) /* switch to new settings */ so->ifindex = ifindex; - so->rxid = addr->can_addr.tp.rx_id; - so->txid = addr->can_addr.tp.tx_id; + so->rxid = rx_id; + so->txid = tx_id; so->bound = 1; out: @@ -1179,16 +1225,13 @@ static int isotp_getname(struct socket *sock, struct sockaddr *uaddr, int peer) return ISOTP_MIN_NAMELEN; } -static int isotp_setsockopt(struct socket *sock, int level, int optname, +static int isotp_setsockopt_locked(struct socket *sock, int level, int optname, sockptr_t optval, unsigned int optlen) { struct sock *sk = sock->sk; struct isotp_sock *so = isotp_sk(sk); int ret = 0; - if (level != SOL_CAN_ISOTP) - return -EINVAL; - if (so->bound) return -EISCONN; @@ -1203,6 +1246,14 @@ static int isotp_setsockopt(struct socket *sock, int level, int optname, /* no separate rx_ext_address is given => use ext_address */ if (!(so->opt.flags & CAN_ISOTP_RX_EXT_ADDR)) so->opt.rx_ext_address = so->opt.ext_address; + + /* check for frame_txtime changes (0 => no changes) */ + if (so->opt.frame_txtime) { + if (so->opt.frame_txtime == CAN_ISOTP_FRAME_TXTIME_ZERO) + so->frame_txtime = 0; + else + so->frame_txtime = so->opt.frame_txtime; + } break; case CAN_ISOTP_RECV_FC: @@ -1263,6 +1314,22 @@ static int isotp_setsockopt(struct socket *sock, int level, int optname, return ret; } +static int isotp_setsockopt(struct socket *sock, int level, int optname, + sockptr_t optval, unsigned int optlen) + +{ + struct sock *sk = sock->sk; + int ret; + + if (level != SOL_CAN_ISOTP) + return -EINVAL; + + lock_sock(sk); + ret = isotp_setsockopt_locked(sock, level, optname, optval, optlen); + release_sock(sk); + return ret; +} + static int isotp_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) { @@ -1330,7 +1397,7 @@ static void isotp_notify(struct isotp_sock *so, unsigned long msg, case NETDEV_UNREGISTER: lock_sock(sk); /* remove current filters & unregister */ - if (so->bound) + if (so->bound && (!(so->opt.flags & CAN_ISOTP_SF_BROADCAST))) can_rx_unregister(dev_net(dev), dev, so->rxid, SINGLE_MASK(so->rxid), isotp_rcv, sk); @@ -1388,6 +1455,7 @@ static int isotp_init(struct sock *sk) so->opt.rxpad_content = CAN_ISOTP_DEFAULT_PAD_CONTENT; so->opt.txpad_content = CAN_ISOTP_DEFAULT_PAD_CONTENT; so->opt.frame_txtime = CAN_ISOTP_DEFAULT_FRAME_TXTIME; + so->frame_txtime = CAN_ISOTP_DEFAULT_FRAME_TXTIME; so->rxfc.bs = CAN_ISOTP_DEFAULT_RECV_BS; so->rxfc.stmin = CAN_ISOTP_DEFAULT_RECV_STMIN; so->rxfc.wftmax = CAN_ISOTP_DEFAULT_RECV_WFTMAX; @@ -1407,6 +1475,7 @@ static int isotp_init(struct sock *sk) so->txtimer.function = isotp_tx_timer_handler; init_waitqueue_head(&so->wait); + spin_lock_init(&so->rx_lock); spin_lock(&isotp_notifier_lock); list_add_tail(&so->notifier, &isotp_notifier_list); diff --git a/net/core/bpf_sk_storage.c b/net/core/bpf_sk_storage.c index c907f0dc7f87ae4698b9891d1b4fe008d04c71af..5b61e99b8d631a4d418a1d650f920fc2e05c4420 100644 --- a/net/core/bpf_sk_storage.c +++ b/net/core/bpf_sk_storage.c @@ -858,7 +858,7 @@ static struct bpf_iter_reg bpf_sk_storage_map_reg_info = { { offsetof(struct bpf_iter__bpf_sk_storage_map, sk), PTR_TO_BTF_ID_OR_NULL }, { offsetof(struct bpf_iter__bpf_sk_storage_map, value), - PTR_TO_RDWR_BUF_OR_NULL }, + PTR_TO_BUF | PTR_MAYBE_NULL }, }, .seq_info = &iter_seq_info, }; diff --git a/net/core/dev.c b/net/core/dev.c index 738632e046537098a69fc3896ad6783f351ef961..12089c484b304b25be98bebb8922c79c6098671d 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1648,6 +1648,7 @@ void dev_close_many(struct list_head *head, bool unlink) call_netdevice_notifiers(NETDEV_DOWN, dev); if (unlink) list_del_init(&dev->close_list); + cond_resched(); } } EXPORT_SYMBOL(dev_close_many); @@ -4621,9 +4622,9 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb, struct netdev_rx_queue *rxqueue; void *orig_data, *orig_data_end; u32 metalen, act = XDP_DROP; + bool orig_bcast, orig_host; __be16 orig_eth_type; struct ethhdr *eth; - bool orig_bcast; int hlen, off; u32 mac_len; @@ -4670,6 +4671,7 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb, orig_data_end = xdp->data_end; orig_data = xdp->data; eth = (struct ethhdr *)xdp->data; + orig_host = ether_addr_equal_64bits(eth->h_dest, skb->dev->dev_addr); orig_bcast = is_multicast_ether_addr_64bits(eth->h_dest); orig_eth_type = eth->h_proto; @@ -4700,8 +4702,11 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb, /* check if XDP changed eth hdr such SKB needs update */ eth = (struct ethhdr *)xdp->data; if ((orig_eth_type != eth->h_proto) || + (orig_host != ether_addr_equal_64bits(eth->h_dest, + skb->dev->dev_addr)) || (orig_bcast != is_multicast_ether_addr_64bits(eth->h_dest))) { __skb_push(skb, ETH_HLEN); + skb->pkt_type = PACKET_HOST; skb->protocol = eth_type_trans(skb, skb->dev); } @@ -9339,6 +9344,12 @@ static int bpf_xdp_link_update(struct bpf_link *link, struct bpf_prog *new_prog, goto out_unlock; } old_prog = link->prog; + if (old_prog->type != new_prog->type || + old_prog->expected_attach_type != new_prog->expected_attach_type) { + err = -EINVAL; + goto out_unlock; + } + if (old_prog == new_prog) { /* no-op, don't disturb drivers */ bpf_prog_put(new_prog); @@ -9581,6 +9592,7 @@ static void rollback_registered_many(struct list_head *head) /* Remove XPS queueing entries */ netif_reset_xps_queues_gt(dev, 0); #endif + cond_resched(); } synchronize_net(); @@ -10244,8 +10256,8 @@ EXPORT_SYMBOL(netdev_refcnt_read); #define WAIT_REFS_MIN_MSECS 1 #define WAIT_REFS_MAX_MSECS 250 /** - * netdev_wait_allrefs - wait until all references are gone. - * @dev: target net_device + * netdev_wait_allrefs_any - wait until all references are gone. + * @list: list of net_devices to wait on * * This is called when unregistering network devices. * @@ -10255,37 +10267,42 @@ EXPORT_SYMBOL(netdev_refcnt_read); * We can get stuck here if buggy protocols don't correctly * call dev_put. */ -static void netdev_wait_allrefs(struct net_device *dev) +static struct net_device *netdev_wait_allrefs_any(struct list_head *list) { unsigned long rebroadcast_time, warning_time; - int wait = 0, refcnt; - - linkwatch_forget_dev(dev); + struct net_device *dev; + int wait = 0; rebroadcast_time = warning_time = jiffies; - refcnt = netdev_refcnt_read(dev); - while (refcnt != 0) { + list_for_each_entry(dev, list, todo_list) + if (netdev_refcnt_read(dev) == 0) + return dev; + + while (true) { if (time_after(jiffies, rebroadcast_time + 1 * HZ)) { rtnl_lock(); /* Rebroadcast unregister notification */ - call_netdevice_notifiers(NETDEV_UNREGISTER, dev); + list_for_each_entry(dev, list, todo_list) + call_netdevice_notifiers(NETDEV_UNREGISTER, dev); __rtnl_unlock(); rcu_barrier(); rtnl_lock(); - if (test_bit(__LINK_STATE_LINKWATCH_PENDING, - &dev->state)) { - /* We must not have linkwatch events - * pending on unregister. If this - * happens, we simply run the queue - * unscheduled, resulting in a noop - * for this device. - */ - linkwatch_run_queue(); - } + list_for_each_entry(dev, list, todo_list) + if (test_bit(__LINK_STATE_LINKWATCH_PENDING, + &dev->state)) { + /* We must not have linkwatch events + * pending on unregister. If this + * happens, we simply run the queue + * unscheduled, resulting in a noop + * for this device. + */ + linkwatch_run_queue(); + break; + } __rtnl_unlock(); @@ -10300,11 +10317,14 @@ static void netdev_wait_allrefs(struct net_device *dev) wait = min(wait << 1, WAIT_REFS_MAX_MSECS); } - refcnt = netdev_refcnt_read(dev); + list_for_each_entry(dev, list, todo_list) + if (netdev_refcnt_read(dev) == 0) + return dev; - if (refcnt && time_after(jiffies, warning_time + 10 * HZ)) { - pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n", - dev->name, refcnt); + if (time_after(jiffies, warning_time + 10 * HZ)) { + list_for_each_entry(dev, list, todo_list) + pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n", + dev->name, netdev_refcnt_read(dev)); warning_time = jiffies; } } @@ -10336,6 +10356,7 @@ static void netdev_wait_allrefs(struct net_device *dev) */ void netdev_run_todo(void) { + struct net_device *dev, *tmp; struct list_head list; #ifdef CONFIG_LOCKDEP struct list_head unlink_list; @@ -10356,26 +10377,24 @@ void netdev_run_todo(void) __rtnl_unlock(); - /* Wait for rcu callbacks to finish before next phase */ if (!list_empty(&list)) rcu_barrier(); - while (!list_empty(&list)) { - struct net_device *dev - = list_first_entry(&list, struct net_device, todo_list); - list_del(&dev->todo_list); - + list_for_each_entry_safe(dev, tmp, &list, todo_list) { if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) { - pr_err("network todo '%s' but state %d\n", - dev->name, dev->reg_state); - dump_stack(); + netdev_WARN(dev, "run_todo but not unregistering\n"); + list_del(&dev->todo_list); continue; } dev->reg_state = NETREG_UNREGISTERED; + linkwatch_forget_dev(dev); + } - netdev_wait_allrefs(dev); + while (!list_empty(&list)) { + dev = netdev_wait_allrefs_any(&list); + list_del(&dev->todo_list); /* paranoia */ BUG_ON(netdev_refcnt_read(dev)); diff --git a/net/core/devlink.c b/net/core/devlink.c index 442b67c044a9f9589744bae185f76ce1f1d89753..646d90f63dafc72c4a5bd6bcfda276677dec98c7 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -7852,8 +7852,6 @@ static const struct genl_small_ops devlink_nl_ops[] = { GENL_DONT_VALIDATE_DUMP_STRICT, .dumpit = devlink_nl_cmd_health_reporter_dump_get_dumpit, .flags = GENL_ADMIN_PERM, - .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT | - DEVLINK_NL_FLAG_NO_LOCK, }, { .cmd = DEVLINK_CMD_HEALTH_REPORTER_DUMP_CLEAR, diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c index db65ce62b625ad375cba872ae2f2b31454baca81..ed9dd17f9348c284070fc0cdec5583fcc5533f8d 100644 --- a/net/core/drop_monitor.c +++ b/net/core/drop_monitor.c @@ -280,13 +280,17 @@ static void trace_napi_poll_hit(void *ignore, struct napi_struct *napi, rcu_read_lock(); list_for_each_entry_rcu(new_stat, &hw_stats_list, list) { + struct net_device *dev; + /* * only add a note to our monitor buffer if: * 1) this is the dev we received on * 2) its after the last_rx delta * 3) our rx_dropped count has gone up */ - if ((new_stat->dev == napi->dev) && + /* Paired with WRITE_ONCE() in dropmon_net_event() */ + dev = READ_ONCE(new_stat->dev); + if ((dev == napi->dev) && (time_after(jiffies, new_stat->last_rx + dm_hw_check_delta)) && (napi->dev->stats.rx_dropped != new_stat->last_drop_val)) { trace_drop_common(NULL, NULL); @@ -1574,7 +1578,10 @@ static int dropmon_net_event(struct notifier_block *ev_block, mutex_lock(&net_dm_mutex); list_for_each_entry_safe(new_stat, tmp, &hw_stats_list, list) { if (new_stat->dev == dev) { - new_stat->dev = NULL; + + /* Paired with READ_ONCE() in trace_napi_poll_hit() */ + WRITE_ONCE(new_stat->dev, NULL); + if (trace_state == TRACE_OFF) { list_del_rcu(&new_stat->list); kfree_rcu(new_stat, rcu); diff --git a/net/core/filter.c b/net/core/filter.c index 42d15942ce907ba2e4e26432aaa2f028434a6220..933fdf6e6a900f3110f1ba5ebb6156cbf8c01a14 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -1712,7 +1712,7 @@ static const struct bpf_func_proto bpf_skb_store_bytes_proto = { .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, - .arg3_type = ARG_PTR_TO_MEM, + .arg3_type = ARG_PTR_TO_MEM | MEM_RDONLY, .arg4_type = ARG_CONST_SIZE, .arg5_type = ARG_ANYTHING, }; @@ -2020,9 +2020,9 @@ static const struct bpf_func_proto bpf_csum_diff_proto = { .gpl_only = false, .pkt_access = true, .ret_type = RET_INTEGER, - .arg1_type = ARG_PTR_TO_MEM_OR_NULL, + .arg1_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY, .arg2_type = ARG_CONST_SIZE_OR_ZERO, - .arg3_type = ARG_PTR_TO_MEM_OR_NULL, + .arg3_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY, .arg4_type = ARG_CONST_SIZE_OR_ZERO, .arg5_type = ARG_ANYTHING, }; @@ -2560,7 +2560,7 @@ static const struct bpf_func_proto bpf_redirect_neigh_proto = { .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_ANYTHING, - .arg2_type = ARG_PTR_TO_MEM_OR_NULL, + .arg2_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY, .arg3_type = ARG_CONST_SIZE_OR_ZERO, .arg4_type = ARG_ANYTHING, }; @@ -2730,6 +2730,9 @@ BPF_CALL_4(bpf_msg_push_data, struct sk_msg *, msg, u32, start, if (unlikely(flags)) return -EINVAL; + if (unlikely(len == 0)) + return 0; + /* First find the starting scatterlist element */ i = msg->sg.start; do { @@ -4177,7 +4180,7 @@ static const struct bpf_func_proto bpf_skb_event_output_proto = { .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_CONST_MAP_PTR, .arg3_type = ARG_ANYTHING, - .arg4_type = ARG_PTR_TO_MEM, + .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY, .arg5_type = ARG_CONST_SIZE_OR_ZERO, }; @@ -4191,7 +4194,7 @@ const struct bpf_func_proto bpf_skb_output_proto = { .arg1_btf_id = &bpf_skb_output_btf_ids[0], .arg2_type = ARG_CONST_MAP_PTR, .arg3_type = ARG_ANYTHING, - .arg4_type = ARG_PTR_TO_MEM, + .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY, .arg5_type = ARG_CONST_SIZE_OR_ZERO, }; @@ -4374,7 +4377,7 @@ static const struct bpf_func_proto bpf_skb_set_tunnel_key_proto = { .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, - .arg2_type = ARG_PTR_TO_MEM, + .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, .arg3_type = ARG_CONST_SIZE, .arg4_type = ARG_ANYTHING, }; @@ -4400,7 +4403,7 @@ static const struct bpf_func_proto bpf_skb_set_tunnel_opt_proto = { .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, - .arg2_type = ARG_PTR_TO_MEM, + .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, .arg3_type = ARG_CONST_SIZE, }; @@ -4570,7 +4573,7 @@ static const struct bpf_func_proto bpf_xdp_event_output_proto = { .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_CONST_MAP_PTR, .arg3_type = ARG_ANYTHING, - .arg4_type = ARG_PTR_TO_MEM, + .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY, .arg5_type = ARG_CONST_SIZE_OR_ZERO, }; @@ -4584,7 +4587,7 @@ const struct bpf_func_proto bpf_xdp_output_proto = { .arg1_btf_id = &bpf_xdp_output_btf_ids[0], .arg2_type = ARG_CONST_MAP_PTR, .arg3_type = ARG_ANYTHING, - .arg4_type = ARG_PTR_TO_MEM, + .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY, .arg5_type = ARG_CONST_SIZE_OR_ZERO, }; @@ -4711,12 +4714,14 @@ static int _bpf_setsockopt(struct sock *sk, int level, int optname, switch (optname) { case SO_RCVBUF: val = min_t(u32, val, sysctl_rmem_max); + val = min_t(int, val, INT_MAX / 2); sk->sk_userlocks |= SOCK_RCVBUF_LOCK; WRITE_ONCE(sk->sk_rcvbuf, max_t(int, val * 2, SOCK_MIN_RCVBUF)); break; case SO_SNDBUF: val = min_t(u32, val, sysctl_wmem_max); + val = min_t(int, val, INT_MAX / 2); sk->sk_userlocks |= SOCK_SNDBUF_LOCK; WRITE_ONCE(sk->sk_sndbuf, max_t(int, val * 2, SOCK_MIN_SNDBUF)); @@ -5000,10 +5005,80 @@ static const struct bpf_func_proto bpf_sock_addr_setsockopt_proto = { .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, .arg3_type = ARG_ANYTHING, - .arg4_type = ARG_PTR_TO_MEM, + .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY, .arg5_type = ARG_CONST_SIZE, }; +BPF_CALL_1(bpf_get_sockops_uid_gid, struct bpf_sock_ops_kern *, bpf_sock) +{ + struct sock *sk = bpf_sock->sk; + kuid_t uid; + kgid_t gid; + + if (!sk || !sk_fullsock(sk)) + return -EINVAL; + + uid = sock_net_uid(sock_net(sk), sk); + gid = sock_net_gid(sock_net(sk), sk); + + return ((u64)from_kgid_munged(sock_net(sk)->user_ns, gid)) << 32 | + from_kuid_munged(sock_net(sk)->user_ns, uid); +} + +static const struct bpf_func_proto bpf_get_sockops_uid_gid_proto = { + .func = bpf_get_sockops_uid_gid, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, +}; + +#include +#include + +bpf_getorigdst_opt_func bpf_getorigdst_opt; +EXPORT_SYMBOL(bpf_getorigdst_opt); + +BPF_CALL_4(bpf_sk_original_addr, struct bpf_sock_ops_kern *, bpf_sock, + int, optname, char *, optval, int, optlen) +{ + struct sock *sk = bpf_sock->sk; + int ret = -EINVAL; + + if (!sk_fullsock(sk)) + goto err_clear; + + if (optname != BPF_SO_ORIGINAL_DST && optname != BPF_SO_REPLY_SRC) + goto err_clear; + + if (!bpf_getorigdst_opt) + goto err_clear; +#if IS_ENABLED(CONFIG_NF_CONNTRACK) + if (optname == BPF_SO_ORIGINAL_DST) + ret = bpf_getorigdst_opt(sk, optname, optval, &optlen, + IP_CT_DIR_ORIGINAL); + else if (optname == BPF_SO_REPLY_SRC) + ret = bpf_getorigdst_opt(sk, optname, optval, &optlen, + IP_CT_DIR_REPLY); + if (ret < 0) + goto err_clear; + + return 0; +#endif +err_clear: + memset(optval, 0, optlen); + return ret; +} + +static const struct bpf_func_proto bpf_sk_original_addr_proto = { + .func = bpf_sk_original_addr, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, + .arg2_type = ARG_ANYTHING, + .arg3_type = ARG_PTR_TO_UNINIT_MEM, + .arg4_type = ARG_CONST_SIZE, +}; + BPF_CALL_5(bpf_sock_addr_getsockopt, struct bpf_sock_addr_kern *, ctx, int, level, int, optname, char *, optval, int, optlen) { @@ -5034,7 +5109,7 @@ static const struct bpf_func_proto bpf_sock_ops_setsockopt_proto = { .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, .arg3_type = ARG_ANYTHING, - .arg4_type = ARG_PTR_TO_MEM, + .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY, .arg5_type = ARG_CONST_SIZE, }; @@ -5209,7 +5284,7 @@ static const struct bpf_func_proto bpf_bind_proto = { .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, - .arg2_type = ARG_PTR_TO_MEM, + .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, .arg3_type = ARG_CONST_SIZE, }; @@ -5671,7 +5746,7 @@ static const struct bpf_func_proto bpf_lwt_in_push_encap_proto = { .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, - .arg3_type = ARG_PTR_TO_MEM, + .arg3_type = ARG_PTR_TO_MEM | MEM_RDONLY, .arg4_type = ARG_CONST_SIZE }; @@ -5681,7 +5756,7 @@ static const struct bpf_func_proto bpf_lwt_xmit_push_encap_proto = { .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, - .arg3_type = ARG_PTR_TO_MEM, + .arg3_type = ARG_PTR_TO_MEM | MEM_RDONLY, .arg4_type = ARG_CONST_SIZE }; @@ -5724,7 +5799,7 @@ static const struct bpf_func_proto bpf_lwt_seg6_store_bytes_proto = { .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, - .arg3_type = ARG_PTR_TO_MEM, + .arg3_type = ARG_PTR_TO_MEM | MEM_RDONLY, .arg4_type = ARG_CONST_SIZE }; @@ -5812,7 +5887,7 @@ static const struct bpf_func_proto bpf_lwt_seg6_action_proto = { .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, - .arg3_type = ARG_PTR_TO_MEM, + .arg3_type = ARG_PTR_TO_MEM | MEM_RDONLY, .arg4_type = ARG_CONST_SIZE }; @@ -6037,7 +6112,7 @@ static const struct bpf_func_proto bpf_skc_lookup_tcp_proto = { .pkt_access = true, .ret_type = RET_PTR_TO_SOCK_COMMON_OR_NULL, .arg1_type = ARG_PTR_TO_CTX, - .arg2_type = ARG_PTR_TO_MEM, + .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, .arg3_type = ARG_CONST_SIZE, .arg4_type = ARG_ANYTHING, .arg5_type = ARG_ANYTHING, @@ -6056,7 +6131,7 @@ static const struct bpf_func_proto bpf_sk_lookup_tcp_proto = { .pkt_access = true, .ret_type = RET_PTR_TO_SOCKET_OR_NULL, .arg1_type = ARG_PTR_TO_CTX, - .arg2_type = ARG_PTR_TO_MEM, + .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, .arg3_type = ARG_CONST_SIZE, .arg4_type = ARG_ANYTHING, .arg5_type = ARG_ANYTHING, @@ -6075,7 +6150,7 @@ static const struct bpf_func_proto bpf_sk_lookup_udp_proto = { .pkt_access = true, .ret_type = RET_PTR_TO_SOCKET_OR_NULL, .arg1_type = ARG_PTR_TO_CTX, - .arg2_type = ARG_PTR_TO_MEM, + .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, .arg3_type = ARG_CONST_SIZE, .arg4_type = ARG_ANYTHING, .arg5_type = ARG_ANYTHING, @@ -6112,7 +6187,7 @@ static const struct bpf_func_proto bpf_xdp_sk_lookup_udp_proto = { .pkt_access = true, .ret_type = RET_PTR_TO_SOCKET_OR_NULL, .arg1_type = ARG_PTR_TO_CTX, - .arg2_type = ARG_PTR_TO_MEM, + .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, .arg3_type = ARG_CONST_SIZE, .arg4_type = ARG_ANYTHING, .arg5_type = ARG_ANYTHING, @@ -6135,7 +6210,7 @@ static const struct bpf_func_proto bpf_xdp_skc_lookup_tcp_proto = { .pkt_access = true, .ret_type = RET_PTR_TO_SOCK_COMMON_OR_NULL, .arg1_type = ARG_PTR_TO_CTX, - .arg2_type = ARG_PTR_TO_MEM, + .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, .arg3_type = ARG_CONST_SIZE, .arg4_type = ARG_ANYTHING, .arg5_type = ARG_ANYTHING, @@ -6158,7 +6233,7 @@ static const struct bpf_func_proto bpf_xdp_sk_lookup_tcp_proto = { .pkt_access = true, .ret_type = RET_PTR_TO_SOCKET_OR_NULL, .arg1_type = ARG_PTR_TO_CTX, - .arg2_type = ARG_PTR_TO_MEM, + .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, .arg3_type = ARG_CONST_SIZE, .arg4_type = ARG_ANYTHING, .arg5_type = ARG_ANYTHING, @@ -6177,7 +6252,7 @@ static const struct bpf_func_proto bpf_sock_addr_skc_lookup_tcp_proto = { .gpl_only = false, .ret_type = RET_PTR_TO_SOCK_COMMON_OR_NULL, .arg1_type = ARG_PTR_TO_CTX, - .arg2_type = ARG_PTR_TO_MEM, + .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, .arg3_type = ARG_CONST_SIZE, .arg4_type = ARG_ANYTHING, .arg5_type = ARG_ANYTHING, @@ -6196,7 +6271,7 @@ static const struct bpf_func_proto bpf_sock_addr_sk_lookup_tcp_proto = { .gpl_only = false, .ret_type = RET_PTR_TO_SOCKET_OR_NULL, .arg1_type = ARG_PTR_TO_CTX, - .arg2_type = ARG_PTR_TO_MEM, + .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, .arg3_type = ARG_CONST_SIZE, .arg4_type = ARG_ANYTHING, .arg5_type = ARG_ANYTHING, @@ -6215,7 +6290,7 @@ static const struct bpf_func_proto bpf_sock_addr_sk_lookup_udp_proto = { .gpl_only = false, .ret_type = RET_PTR_TO_SOCKET_OR_NULL, .arg1_type = ARG_PTR_TO_CTX, - .arg2_type = ARG_PTR_TO_MEM, + .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, .arg3_type = ARG_CONST_SIZE, .arg4_type = ARG_ANYTHING, .arg5_type = ARG_ANYTHING, @@ -6487,24 +6562,33 @@ BPF_CALL_5(bpf_tcp_check_syncookie, struct sock *, sk, void *, iph, u32, iph_len if (!th->ack || th->rst || th->syn) return -ENOENT; + if (unlikely(iph_len < sizeof(struct iphdr))) + return -EINVAL; + if (tcp_synq_no_recent_overflow(sk)) return -ENOENT; cookie = ntohl(th->ack_seq) - 1; - switch (sk->sk_family) { - case AF_INET: - if (unlikely(iph_len < sizeof(struct iphdr))) + /* Both struct iphdr and struct ipv6hdr have the version field at the + * same offset so we can cast to the shorter header (struct iphdr). + */ + switch (((struct iphdr *)iph)->version) { + case 4: + if (sk->sk_family == AF_INET6 && ipv6_only_sock(sk)) return -EINVAL; ret = __cookie_v4_check((struct iphdr *)iph, th, cookie); break; #if IS_BUILTIN(CONFIG_IPV6) - case AF_INET6: + case 6: if (unlikely(iph_len < sizeof(struct ipv6hdr))) return -EINVAL; + if (sk->sk_family != AF_INET6) + return -EINVAL; + ret = __cookie_v6_check((struct ipv6hdr *)iph, th, cookie); break; #endif /* CONFIG_IPV6 */ @@ -6528,9 +6612,9 @@ static const struct bpf_func_proto bpf_tcp_check_syncookie_proto = { .pkt_access = true, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON, - .arg2_type = ARG_PTR_TO_MEM, + .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, .arg3_type = ARG_CONST_SIZE, - .arg4_type = ARG_PTR_TO_MEM, + .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY, .arg5_type = ARG_CONST_SIZE, }; @@ -6597,9 +6681,9 @@ static const struct bpf_func_proto bpf_tcp_gen_syncookie_proto = { .pkt_access = true, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON, - .arg2_type = ARG_PTR_TO_MEM, + .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, .arg3_type = ARG_CONST_SIZE, - .arg4_type = ARG_PTR_TO_MEM, + .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY, .arg5_type = ARG_CONST_SIZE, }; @@ -6828,7 +6912,7 @@ static const struct bpf_func_proto bpf_sock_ops_store_hdr_opt_proto = { .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, - .arg2_type = ARG_PTR_TO_MEM, + .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, .arg3_type = ARG_CONST_SIZE, .arg4_type = ARG_ANYTHING, }; @@ -7274,6 +7358,10 @@ sock_ops_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) return &bpf_sk_storage_get_proto; case BPF_FUNC_sk_storage_delete: return &bpf_sk_storage_delete_proto; + case BPF_FUNC_get_sockops_uid_gid: + return &bpf_get_sockops_uid_gid_proto; + case BPF_FUNC_sk_original_addr: + return &bpf_sk_original_addr_proto; #ifdef CONFIG_INET case BPF_FUNC_load_hdr_opt: return &bpf_sock_ops_load_hdr_opt_proto; @@ -7666,6 +7754,7 @@ static bool __sock_filter_check_attach_type(int off, case bpf_ctx_range(struct bpf_sock, src_ip4): switch (attach_type) { case BPF_CGROUP_INET4_POST_BIND: + case BPF_CGROUP_INET_SOCK_RELEASE: goto read_only; default: return false; @@ -7681,6 +7770,7 @@ static bool __sock_filter_check_attach_type(int off, switch (attach_type) { case BPF_CGROUP_INET4_POST_BIND: case BPF_CGROUP_INET6_POST_BIND: + case BPF_CGROUP_INET_SOCK_RELEASE: goto read_only; default: return false; @@ -7708,6 +7798,7 @@ bool bpf_sock_is_valid_access(int off, int size, enum bpf_access_type type, struct bpf_insn_access_aux *info) { const int size_default = sizeof(__u32); + int field_size; if (off < 0 || off >= sizeof(struct bpf_sock)) return false; @@ -7719,7 +7810,6 @@ bool bpf_sock_is_valid_access(int off, int size, enum bpf_access_type type, case offsetof(struct bpf_sock, family): case offsetof(struct bpf_sock, type): case offsetof(struct bpf_sock, protocol): - case offsetof(struct bpf_sock, dst_port): case offsetof(struct bpf_sock, src_port): case offsetof(struct bpf_sock, rx_queue_mapping): case bpf_ctx_range(struct bpf_sock, src_ip4): @@ -7728,6 +7818,14 @@ bool bpf_sock_is_valid_access(int off, int size, enum bpf_access_type type, case bpf_ctx_range_till(struct bpf_sock, dst_ip6[0], dst_ip6[3]): bpf_ctx_record_field_size(info, size_default); return bpf_ctx_narrow_access_ok(off, size, size_default); + case bpf_ctx_range(struct bpf_sock, dst_port): + field_size = size == size_default ? + size_default : sizeof_field(struct bpf_sock, dst_port); + bpf_ctx_record_field_size(info, field_size); + return bpf_ctx_narrow_access_ok(off, size, field_size); + case offsetofend(struct bpf_sock, dst_port) ... + offsetof(struct bpf_sock, dst_ip4) - 1: + return false; } return size == size_default; @@ -7923,9 +8021,9 @@ void bpf_warn_invalid_xdp_action(u32 act) { const u32 act_max = XDP_REDIRECT; - WARN_ONCE(1, "%s XDP return value %u, expect packet loss!\n", - act > act_max ? "Illegal" : "Driver unsupported", - act); + pr_warn_once("%s XDP return value %u, expect packet loss!\n", + act > act_max ? "Illegal" : "Driver unsupported", + act); } EXPORT_SYMBOL_GPL(bpf_warn_invalid_xdp_action); diff --git a/net/core/net-procfs.c b/net/core/net-procfs.c index c714e6a9dad4c91150a7dbd380f66ca82a3b81cd..eadb696360b486c0165c0b4906b5ea76c3430d40 100644 --- a/net/core/net-procfs.c +++ b/net/core/net-procfs.c @@ -193,12 +193,23 @@ static const struct seq_operations softnet_seq_ops = { .show = softnet_seq_show, }; -static void *ptype_get_idx(loff_t pos) +static void *ptype_get_idx(struct seq_file *seq, loff_t pos) { + struct list_head *ptype_list = NULL; struct packet_type *pt = NULL; + struct net_device *dev; loff_t i = 0; int t; + for_each_netdev_rcu(seq_file_net(seq), dev) { + ptype_list = &dev->ptype_all; + list_for_each_entry_rcu(pt, ptype_list, list) { + if (i == pos) + return pt; + ++i; + } + } + list_for_each_entry_rcu(pt, &ptype_all, list) { if (i == pos) return pt; @@ -219,22 +230,40 @@ static void *ptype_seq_start(struct seq_file *seq, loff_t *pos) __acquires(RCU) { rcu_read_lock(); - return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN; + return *pos ? ptype_get_idx(seq, *pos - 1) : SEQ_START_TOKEN; } static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos) { + struct net_device *dev; struct packet_type *pt; struct list_head *nxt; int hash; ++*pos; if (v == SEQ_START_TOKEN) - return ptype_get_idx(0); + return ptype_get_idx(seq, 0); pt = v; nxt = pt->list.next; + if (pt->dev) { + if (nxt != &pt->dev->ptype_all) + goto found; + + dev = pt->dev; + for_each_netdev_continue_rcu(seq_file_net(seq), dev) { + if (!list_empty(&dev->ptype_all)) { + nxt = dev->ptype_all.next; + goto found; + } + } + + nxt = ptype_all.next; + goto ptype_all; + } + if (pt->type == htons(ETH_P_ALL)) { +ptype_all: if (nxt != &ptype_all) goto found; hash = 0; @@ -263,7 +292,8 @@ static int ptype_seq_show(struct seq_file *seq, void *v) if (v == SEQ_START_TOKEN) seq_puts(seq, "Type Device Function\n"); - else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) { + else if ((!pt->af_packet_net || net_eq(pt->af_packet_net, seq_file_net(seq))) && + (!pt->dev || net_eq(dev_net(pt->dev), seq_file_net(seq)))) { if (pt->type == htons(ETH_P_ALL)) seq_puts(seq, "ALL "); else diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index af591236010557823492f8ae043970344516b0ee..989b3f7ee85f40341a9549d424f7a4b262acbacc 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c @@ -213,7 +213,7 @@ static ssize_t speed_show(struct device *dev, if (!rtnl_trylock()) return restart_syscall(); - if (netif_running(netdev)) { + if (netif_running(netdev) && netif_device_present(netdev)) { struct ethtool_link_ksettings cmd; if (!__ethtool_get_link_ksettings(netdev, &cmd)) @@ -1804,6 +1804,9 @@ static void remove_queue_kobjects(struct net_device *dev) net_rx_queue_update_kobjects(dev, real_rx, 0); netdev_queue_update_kobjects(dev, real_tx, 0); + + dev->real_num_rx_queues = 0; + dev->real_num_tx_queues = 0; #ifdef CONFIG_SYSFS kset_unregister(dev->queues_kset); #endif diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c index ac852db83de9fb973a7962c6b4fb7b6cb3d6652b..cbff7d94b993ec941197c99af3118350e7c95445 100644 --- a/net/core/net_namespace.c +++ b/net/core/net_namespace.c @@ -183,8 +183,10 @@ static void ops_exit_list(const struct pernet_operations *ops, { struct net *net; if (ops->exit) { - list_for_each_entry(net, net_exit_list, exit_list) + list_for_each_entry(net, net_exit_list, exit_list) { ops->exit(net); + cond_resched(); + } } if (ops->exit_batch) ops->exit_batch(net_exit_list); diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index e08b506163f5f588c33078f2d9852aa2ec405bef..3c9c2d6e3b92e4fbd50e765c67f8fca5f6dfec0a 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -1705,6 +1705,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, { struct ifinfomsg *ifm; struct nlmsghdr *nlh; + struct Qdisc *qdisc; ASSERT_RTNL(); nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags); @@ -1722,6 +1723,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, if (tgt_netnsid >= 0 && nla_put_s32(skb, IFLA_TARGET_NETNSID, tgt_netnsid)) goto nla_put_failure; + qdisc = rtnl_dereference(dev->qdisc); if (nla_put_string(skb, IFLA_IFNAME, dev->name) || nla_put_u32(skb, IFLA_TXQLEN, dev->tx_queue_len) || nla_put_u8(skb, IFLA_OPERSTATE, @@ -1740,8 +1742,8 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, #endif put_master_ifindex(skb, dev) || nla_put_u8(skb, IFLA_CARRIER, netif_carrier_ok(dev)) || - (dev->qdisc && - nla_put_string(skb, IFLA_QDISC, dev->qdisc->ops->id)) || + (qdisc && + nla_put_string(skb, IFLA_QDISC, qdisc->ops->id)) || nla_put_ifalias(skb, dev) || nla_put_u32(skb, IFLA_CARRIER_CHANGES, atomic_read(&dev->carrier_up_count) + @@ -3238,8 +3240,8 @@ static int __rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh, struct nlattr *slave_attr[RTNL_SLAVE_MAX_TYPE + 1]; unsigned char name_assign_type = NET_NAME_USER; struct nlattr *linkinfo[IFLA_INFO_MAX + 1]; - const struct rtnl_link_ops *m_ops = NULL; - struct net_device *master_dev = NULL; + const struct rtnl_link_ops *m_ops; + struct net_device *master_dev; struct net *net = sock_net(skb->sk); const struct rtnl_link_ops *ops; struct nlattr *tb[IFLA_MAX + 1]; @@ -3277,6 +3279,8 @@ static int __rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh, else dev = NULL; + master_dev = NULL; + m_ops = NULL; if (dev) { master_dev = netdev_master_upper_dev_get(dev); if (master_dev) @@ -3611,13 +3615,24 @@ static int rtnl_alt_ifname(int cmd, struct net_device *dev, struct nlattr *attr, bool *changed, struct netlink_ext_ack *extack) { char *alt_ifname; + size_t size; int err; err = nla_validate(attr, attr->nla_len, IFLA_MAX, ifla_policy, extack); if (err) return err; - alt_ifname = nla_strdup(attr, GFP_KERNEL); + if (cmd == RTM_NEWLINKPROP) { + size = rtnl_prop_list_size(dev); + size += nla_total_size(ALTIFNAMSIZ); + if (size >= U16_MAX) { + NL_SET_ERR_MSG(extack, + "effective property list too long"); + return -EINVAL; + } + } + + alt_ifname = nla_strdup(attr, GFP_KERNEL_ACCOUNT); if (!alt_ifname) return -ENOMEM; diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c index b5bc680d475536de6da68a9a8815691cf81176a6..444cce0184c3722b4585920145bf459e29dfedfe 100644 --- a/net/core/secure_seq.c +++ b/net/core/secure_seq.c @@ -94,7 +94,7 @@ u32 secure_tcpv6_seq(const __be32 *saddr, const __be32 *daddr, } EXPORT_SYMBOL(secure_tcpv6_seq); -u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr, +u64 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr, __be16 dport) { const struct { @@ -142,7 +142,7 @@ u32 secure_tcp_seq(__be32 saddr, __be32 daddr, } EXPORT_SYMBOL_GPL(secure_tcp_seq); -u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport) +u64 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport) { net_secret_init(); return siphash_3u32((__force u32)saddr, (__force u32)daddr, diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 16c74a81b7bf67ce5a8794bb32bae525d84ced78..379c426f8d656390ca28e2e84f4d49b4aa09e83a 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -2156,7 +2156,7 @@ void *__pskb_pull_tail(struct sk_buff *skb, int delta) /* Free pulled out fragments. */ while ((list = skb_shinfo(skb)->frag_list) != insp) { skb_shinfo(skb)->frag_list = list->next; - kfree_skb(list); + consume_skb(list); } /* And insert new clone at head. */ if (clone) { @@ -2478,9 +2478,32 @@ int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset, } EXPORT_SYMBOL_GPL(skb_splice_bits); -/* Send skb data on a socket. Socket must be locked. */ -int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset, - int len) +static int sendmsg_unlocked(struct sock *sk, struct msghdr *msg, + struct kvec *vec, size_t num, size_t size) +{ + struct socket *sock = sk->sk_socket; + + if (!sock) + return -EINVAL; + return kernel_sendmsg(sock, msg, vec, num, size); +} + +static int sendpage_unlocked(struct sock *sk, struct page *page, int offset, + size_t size, int flags) +{ + struct socket *sock = sk->sk_socket; + + if (!sock) + return -EINVAL; + return kernel_sendpage(sock, page, offset, size, flags); +} + +typedef int (*sendmsg_func)(struct sock *sk, struct msghdr *msg, + struct kvec *vec, size_t num, size_t size); +typedef int (*sendpage_func)(struct sock *sk, struct page *page, int offset, + size_t size, int flags); +static int __skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, + int len, sendmsg_func sendmsg, sendpage_func sendpage) { unsigned int orig_len = len; struct sk_buff *head = skb; @@ -2500,7 +2523,8 @@ int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset, memset(&msg, 0, sizeof(msg)); msg.msg_flags = MSG_DONTWAIT; - ret = kernel_sendmsg_locked(sk, &msg, &kv, 1, slen); + ret = INDIRECT_CALL_2(sendmsg, kernel_sendmsg_locked, + sendmsg_unlocked, sk, &msg, &kv, 1, slen); if (ret <= 0) goto error; @@ -2531,9 +2555,11 @@ int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset, slen = min_t(size_t, len, skb_frag_size(frag) - offset); while (slen) { - ret = kernel_sendpage_locked(sk, skb_frag_page(frag), - skb_frag_off(frag) + offset, - slen, MSG_DONTWAIT); + ret = INDIRECT_CALL_2(sendpage, kernel_sendpage_locked, + sendpage_unlocked, sk, + skb_frag_page(frag), + skb_frag_off(frag) + offset, + slen, MSG_DONTWAIT); if (ret <= 0) goto error; @@ -2565,8 +2591,23 @@ int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset, error: return orig_len == len ? ret : orig_len - len; } + +/* Send skb data on a socket. Socket must be locked. */ +int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset, + int len) +{ + return __skb_send_sock(sk, skb, offset, len, kernel_sendmsg_locked, + kernel_sendpage_locked); +} EXPORT_SYMBOL_GPL(skb_send_sock_locked); +/* Send skb data on a socket. Socket must be unlocked. */ +int skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, int len) +{ + return __skb_send_sock(sk, skb, offset, len, sendmsg_unlocked, + sendpage_unlocked); +} + /** * skb_store_bits - store bits from kernel buffer to skb * @skb: destination buffer @@ -3695,6 +3736,7 @@ struct sk_buff *skb_segment_list(struct sk_buff *skb, list_skb = list_skb->next; err = 0; + delta_truesize += nskb->truesize; if (skb_shared(nskb)) { tmp = skb_clone(nskb, GFP_ATOMIC); if (tmp) { @@ -3719,7 +3761,6 @@ struct sk_buff *skb_segment_list(struct sk_buff *skb, tail = nskb; delta_len += nskb->len; - delta_truesize += nskb->truesize; skb_push(nskb, -skb_network_offset(nskb) + offset); @@ -5196,11 +5237,18 @@ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from, if (skb_cloned(to)) return false; - /* The page pool signature of struct page will eventually figure out - * which pages can be recycled or not but for now let's prohibit slab - * allocated and page_pool allocated SKBs from being coalesced. + /* In general, avoid mixing slab allocated and page_pool allocated + * pages within the same SKB. However when @to is not pp_recycle and + * @from is cloned, we can transition frag pages from page_pool to + * reference counted. + * + * On the other hand, don't allow coalescing two pp_recycle SKBs if + * @from is cloned, in case the SKB is using page_pool fragment + * references (PP_FLAG_PAGE_FRAG). Since we only take full page + * references for cloned SKBs at the moment that would result in + * inconsistent reference counts. */ - if (to->pp_recycle != from->pp_recycle) + if (to->pp_recycle != (from->pp_recycle && !skb_cloned(from))) return false; if (len <= skb_tailroom(to)) { @@ -6056,7 +6104,7 @@ static int pskb_carve_frag_list(struct sk_buff *skb, /* Free pulled out fragments. */ while ((list = shinfo->frag_list) != insp) { shinfo->frag_list = list->next; - kfree_skb(list); + consume_skb(list); } /* And insert new clone at head. */ if (clone) { diff --git a/net/core/skmsg.c b/net/core/skmsg.c index 4ee4fe4368474b0c5362218c356c708f8098d091..9dec3d35af791ca20298887139ae4d6c45dbd997 100644 --- a/net/core/skmsg.c +++ b/net/core/skmsg.c @@ -27,6 +27,7 @@ int sk_msg_alloc(struct sock *sk, struct sk_msg *msg, int len, int elem_first_coalesce) { struct page_frag *pfrag = sk_page_frag(sk); + u32 osize = msg->sg.size; int ret = 0; len -= msg->sg.size; @@ -35,13 +36,17 @@ int sk_msg_alloc(struct sock *sk, struct sk_msg *msg, int len, u32 orig_offset; int use, i; - if (!sk_page_frag_refill(sk, pfrag)) - return -ENOMEM; + if (!sk_page_frag_refill(sk, pfrag)) { + ret = -ENOMEM; + goto msg_trim; + } orig_offset = pfrag->offset; use = min_t(int, len, pfrag->size - orig_offset); - if (!sk_wmem_schedule(sk, use)) - return -ENOMEM; + if (!sk_wmem_schedule(sk, use)) { + ret = -ENOMEM; + goto msg_trim; + } i = msg->sg.end; sk_msg_iter_var_prev(i); @@ -71,6 +76,10 @@ int sk_msg_alloc(struct sock *sk, struct sk_msg *msg, int len, } return ret; + +msg_trim: + sk_msg_trim(sk, msg, osize); + return ret; } EXPORT_SYMBOL_GPL(sk_msg_alloc); @@ -504,29 +513,47 @@ static int sk_psock_handle_skb(struct sk_psock *psock, struct sk_buff *skb, if (!ingress) { if (!sock_writeable(psock->sk)) return -EAGAIN; - return skb_send_sock_locked(psock->sk, skb, off, len); + return skb_send_sock(psock->sk, skb, off, len); } return sk_psock_skb_ingress(psock, skb); } +static void sk_psock_skb_state(struct sk_psock *psock, + struct sk_psock_work_state *state, + struct sk_buff *skb, + int len, int off) +{ + spin_lock_bh(&psock->ingress_lock); + if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) { + state->skb = skb; + state->len = len; + state->off = off; + } else { + sock_drop(psock->sk, skb); + } + spin_unlock_bh(&psock->ingress_lock); +} + static void sk_psock_backlog(struct work_struct *work) { struct sk_psock *psock = container_of(work, struct sk_psock, work); struct sk_psock_work_state *state = &psock->work_state; - struct sk_buff *skb; + struct sk_buff *skb = NULL; bool ingress; u32 len, off; int ret; - /* Lock sock to avoid losing sk_socket during loop. */ - lock_sock(psock->sk); - if (state->skb) { + mutex_lock(&psock->work_mutex); + if (unlikely(state->skb)) { + spin_lock_bh(&psock->ingress_lock); skb = state->skb; len = state->len; off = state->off; state->skb = NULL; - goto start; + spin_unlock_bh(&psock->ingress_lock); } + if (skb) + goto start; while ((skb = skb_dequeue(&psock->ingress_skb))) { len = skb->len; @@ -535,14 +562,13 @@ static void sk_psock_backlog(struct work_struct *work) ingress = tcp_skb_bpf_ingress(skb); do { ret = -EIO; - if (likely(psock->sk->sk_socket)) + if (!sock_flag(psock->sk, SOCK_DEAD)) ret = sk_psock_handle_skb(psock, skb, off, len, ingress); if (ret <= 0) { if (ret == -EAGAIN) { - state->skb = skb; - state->len = len; - state->off = off; + sk_psock_skb_state(psock, state, skb, + len, off); goto end; } /* Hard errors break pipe and stop xmit. */ @@ -559,7 +585,7 @@ static void sk_psock_backlog(struct work_struct *work) kfree_skb(skb); } end: - release_sock(psock->sk); + mutex_unlock(&psock->work_mutex); } struct sk_psock *sk_psock_init(struct sock *sk, int node) @@ -597,7 +623,9 @@ struct sk_psock *sk_psock_init(struct sock *sk, int node) spin_lock_init(&psock->link_lock); INIT_WORK(&psock->work, sk_psock_backlog); + mutex_init(&psock->work_mutex); INIT_LIST_HEAD(&psock->ingress_msg); + spin_lock_init(&psock->ingress_lock); skb_queue_head_init(&psock->ingress_skb); sk_psock_set_state(psock, SK_PSOCK_TX_ENABLED); @@ -631,14 +659,25 @@ void __sk_psock_purge_ingress_msg(struct sk_psock *psock) list_for_each_entry_safe(msg, tmp, &psock->ingress_msg, list) { list_del(&msg->list); + atomic_sub(msg->sg.size, &psock->sk->sk_rmem_alloc); sk_msg_free(psock->sk, msg); kfree(msg); } } -static void sk_psock_zap_ingress(struct sk_psock *psock) +static void __sk_psock_zap_ingress(struct sk_psock *psock) { - __skb_queue_purge(&psock->ingress_skb); + struct sk_buff *skb; + + while ((skb = skb_dequeue(&psock->ingress_skb)) != NULL) { + tcp_skb_bpf_redirect_clear(skb); + sock_drop(psock->sk, skb); + } + kfree_skb(psock->work_state.skb); + /* We null the skb here to ensure that calls to sk_psock_backlog + * do not pick up the free'd skb. + */ + psock->work_state.skb = NULL; __sk_psock_purge_ingress_msg(psock); } @@ -652,6 +691,18 @@ static void sk_psock_link_destroy(struct sk_psock *psock) } } +void sk_psock_stop(struct sk_psock *psock, bool wait) +{ + spin_lock_bh(&psock->ingress_lock); + sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED); + sk_psock_cork_free(psock); + __sk_psock_zap_ingress(psock); + spin_unlock_bh(&psock->ingress_lock); + + if (wait) + cancel_work_sync(&psock->work); +} + static void sk_psock_destroy_deferred(struct work_struct *gc) { struct sk_psock *psock = container_of(gc, struct sk_psock, gc); @@ -663,12 +714,12 @@ static void sk_psock_destroy_deferred(struct work_struct *gc) strp_done(&psock->parser.strp); cancel_work_sync(&psock->work); + mutex_destroy(&psock->work_mutex); psock_progs_drop(&psock->progs); sk_psock_link_destroy(psock); sk_psock_cork_free(psock); - sk_psock_zap_ingress(psock); if (psock->sk_redir) sock_put(psock->sk_redir); @@ -686,9 +737,6 @@ static void sk_psock_destroy(struct rcu_head *rcu) void sk_psock_drop(struct sock *sk, struct sk_psock *psock) { - sk_psock_cork_free(psock); - sk_psock_zap_ingress(psock); - write_lock_bh(&sk->sk_callback_lock); sk_psock_restore_proto(sk, psock); rcu_assign_sk_user_data(sk, NULL); @@ -697,8 +745,8 @@ void sk_psock_drop(struct sock *sk, struct sk_psock *psock) else if (psock->progs.skb_verdict) sk_psock_stop_verdict(sk, psock); write_unlock_bh(&sk->sk_callback_lock); - sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED); + sk_psock_stop(psock, false); call_rcu(&psock->rcu, sk_psock_destroy); } EXPORT_SYMBOL_GPL(sk_psock_drop); @@ -783,14 +831,20 @@ static int sk_psock_skb_redirect(struct sk_buff *skb) * error that caused the pipe to break. We can't send a packet on * a socket that is in this state so we drop the skb. */ - if (!psock_other || sock_flag(sk_other, SOCK_DEAD) || - !sk_psock_test_state(psock_other, SK_PSOCK_TX_ENABLED)) { + if (!psock_other || sock_flag(sk_other, SOCK_DEAD)) { + kfree_skb(skb); + return -EIO; + } + spin_lock_bh(&psock_other->ingress_lock); + if (!sk_psock_test_state(psock_other, SK_PSOCK_TX_ENABLED)) { + spin_unlock_bh(&psock_other->ingress_lock); kfree_skb(skb); return -EIO; } skb_queue_tail(&psock_other->ingress_skb, skb); schedule_work(&psock_other->work); + spin_unlock_bh(&psock_other->ingress_lock); return 0; } @@ -856,8 +910,17 @@ static int sk_psock_verdict_apply(struct sk_psock *psock, struct sk_buff *skb, err = sk_psock_skb_ingress_self(psock, skb); } if (err < 0) { - skb_queue_tail(&psock->ingress_skb, skb); - schedule_work(&psock->work); + spin_lock_bh(&psock->ingress_lock); + if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) { + skb_queue_tail(&psock->ingress_skb, skb); + schedule_work(&psock->work); + err = 0; + } + spin_unlock_bh(&psock->ingress_lock); + if (err < 0) { + tcp_skb_bpf_redirect_clear(skb); + goto out_free; + } } break; case __SK_REDIRECT: @@ -947,7 +1010,7 @@ static int sk_psock_verdict_recv(read_descriptor_t *desc, struct sk_buff *skb, struct sk_psock *psock; struct bpf_prog *prog; int ret = __SK_DROP; - int len = skb->len; + int len = orig_len; /* clone here so sk_eat_skb() in tcp_read_sock does not drop our data */ skb = skb_clone(skb, GFP_ATOMIC); diff --git a/net/core/sock.c b/net/core/sock.c index 6d9af4ef93d7a00ef553328d17b99310e717138d..2fa8863caee0fe33839832cb68081c70e9c197f5 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -2445,7 +2445,6 @@ static void sk_leave_memory_pressure(struct sock *sk) } } -#define SKB_FRAG_PAGE_ORDER get_order(32768) DEFINE_STATIC_KEY_FALSE(net_high_order_alloc_disable_key); /** @@ -2986,9 +2985,11 @@ void sock_init_data(struct socket *sock, struct sock *sk) RCU_INIT_POINTER(sk->sk_wq, &sock->wq); sock->sk = sk; sk->sk_uid = SOCK_INODE(sock)->i_uid; + sk->sk_gid = SOCK_INODE(sock)->i_gid; } else { RCU_INIT_POINTER(sk->sk_wq, NULL); sk->sk_uid = make_kuid(sock_net(sk)->user_ns, 0); + sk->sk_gid = make_kgid(sock_net(sk)->user_ns, 0); } rwlock_init(&sk->sk_callback_lock); diff --git a/net/core/sock_map.c b/net/core/sock_map.c index 4ea5bc65848f29f3b9417e9d75244b264dcb8cda..aea06310b2675a5383d87a990a14151824703470 100644 --- a/net/core/sock_map.c +++ b/net/core/sock_map.c @@ -1549,7 +1549,7 @@ void sock_map_close(struct sock *sk, long timeout) lock_sock(sk); rcu_read_lock(); - psock = sk_psock(sk); + psock = sk_psock_get(sk); if (unlikely(!psock)) { rcu_read_unlock(); release_sock(sk); @@ -1559,6 +1559,8 @@ void sock_map_close(struct sock *sk, long timeout) saved_close = psock->saved_close; sock_map_remove_links(sk, psock); rcu_read_unlock(); + sk_psock_stop(psock, true); + sk_psock_put(sk, psock); release_sock(sk); saved_close(sk, timeout); } @@ -1608,7 +1610,7 @@ static struct bpf_iter_reg sock_map_iter_reg = { .ctx_arg_info_size = 2, .ctx_arg_info = { { offsetof(struct bpf_iter__sockmap, key), - PTR_TO_RDONLY_BUF_OR_NULL }, + PTR_TO_BUF | PTR_MAYBE_NULL | MEM_RDONLY }, { offsetof(struct bpf_iter__sockmap, sk), PTR_TO_BTF_ID_OR_NULL }, }, diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c index a352ce4f878a374e078f95b38b11631295f4bc35..2535d3dfb92c8a8fc3a53bdc3d8bff37eb875dc1 100644 --- a/net/dcb/dcbnl.c +++ b/net/dcb/dcbnl.c @@ -2063,10 +2063,54 @@ u8 dcb_ieee_getapp_default_prio_mask(const struct net_device *dev) } EXPORT_SYMBOL(dcb_ieee_getapp_default_prio_mask); +static void dcbnl_flush_dev(struct net_device *dev) +{ + struct dcb_app_type *itr, *tmp; + + spin_lock_bh(&dcb_lock); + + list_for_each_entry_safe(itr, tmp, &dcb_app_list, list) { + if (itr->ifindex == dev->ifindex) { + list_del(&itr->list); + kfree(itr); + } + } + + spin_unlock_bh(&dcb_lock); +} + +static int dcbnl_netdevice_event(struct notifier_block *nb, + unsigned long event, void *ptr) +{ + struct net_device *dev = netdev_notifier_info_to_dev(ptr); + + switch (event) { + case NETDEV_UNREGISTER: + if (!dev->dcbnl_ops) + return NOTIFY_DONE; + + dcbnl_flush_dev(dev); + + return NOTIFY_OK; + default: + return NOTIFY_DONE; + } +} + +static struct notifier_block dcbnl_nb __read_mostly = { + .notifier_call = dcbnl_netdevice_event, +}; + static int __init dcbnl_init(void) { + int err; + INIT_LIST_HEAD(&dcb_app_list); + err = register_netdevice_notifier(&dcbnl_nb); + if (err) + return err; + rtnl_register(PF_UNSPEC, RTM_GETDCB, dcb_doit, NULL, 0); rtnl_register(PF_UNSPEC, RTM_SETDCB, dcb_doit, NULL, 0); diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c index 71c8ef7d40870b3321daba2620b43ee43c134186..f543fca6dfcbfd718eca7fcc430b6691afc13a6f 100644 --- a/net/dsa/dsa2.c +++ b/net/dsa/dsa2.c @@ -766,6 +766,7 @@ static int dsa_port_parse_of(struct dsa_port *dp, struct device_node *dn) struct net_device *master; master = of_find_net_device_by_node(ethernet); + of_node_put(ethernet); if (!master) return -EPROBE_DEFER; diff --git a/net/ieee802154/nl802154.c b/net/ieee802154/nl802154.c index b34e4f827e75668f0406993cc84daa9f14966237..a493965f157f289c4afd3ac5d8f8c58ef088a66f 100644 --- a/net/ieee802154/nl802154.c +++ b/net/ieee802154/nl802154.c @@ -1441,7 +1441,7 @@ static int nl802154_send_key(struct sk_buff *msg, u32 cmd, u32 portid, hdr = nl802154hdr_put(msg, portid, seq, flags, cmd); if (!hdr) - return -1; + return -ENOBUFS; if (nla_put_u32(msg, NL802154_ATTR_IFINDEX, dev->ifindex)) goto nla_put_failure; @@ -1634,7 +1634,7 @@ static int nl802154_send_device(struct sk_buff *msg, u32 cmd, u32 portid, hdr = nl802154hdr_put(msg, portid, seq, flags, cmd); if (!hdr) - return -1; + return -ENOBUFS; if (nla_put_u32(msg, NL802154_ATTR_IFINDEX, dev->ifindex)) goto nla_put_failure; @@ -1812,7 +1812,7 @@ static int nl802154_send_devkey(struct sk_buff *msg, u32 cmd, u32 portid, hdr = nl802154hdr_put(msg, portid, seq, flags, cmd); if (!hdr) - return -1; + return -ENOBUFS; if (nla_put_u32(msg, NL802154_ATTR_IFINDEX, dev->ifindex)) goto nla_put_failure; @@ -1988,7 +1988,7 @@ static int nl802154_send_seclevel(struct sk_buff *msg, u32 cmd, u32 portid, hdr = nl802154hdr_put(msg, portid, seq, flags, cmd); if (!hdr) - return -1; + return -ENOBUFS; if (nla_put_u32(msg, NL802154_ATTR_IFINDEX, dev->ifindex)) goto nla_put_failure; diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index ab3e34241f98e5c22bcdbaa9b304315a4323e697..911ad595dbb944f31a5708f86568a3e840f88965 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -158,7 +158,7 @@ void inet_sock_destruct(struct sock *sk) kfree(rcu_dereference_protected(inet->inet_opt, 1)); dst_release(rcu_dereference_protected(sk->sk_dst_cache, 1)); - dst_release(sk->sk_rx_dst); + dst_release(rcu_dereference_protected(sk->sk_rx_dst, 1)); sk_refcnt_debug_dec(sk); } EXPORT_SYMBOL(inet_sock_destruct); @@ -1375,8 +1375,11 @@ struct sk_buff *inet_gso_segment(struct sk_buff *skb, } ops = rcu_dereference(inet_offloads[proto]); - if (likely(ops && ops->callbacks.gso_segment)) + if (likely(ops && ops->callbacks.gso_segment)) { segs = ops->callbacks.gso_segment(skb, features); + if (!segs) + skb->network_header = skb_mac_header(skb) + nhoff - skb->head; + } if (IS_ERR_OR_NULL(segs)) goto out; @@ -1666,12 +1669,6 @@ int inet_ctl_sock_create(struct sock **sk, unsigned short family, } EXPORT_SYMBOL_GPL(inet_ctl_sock_create); -u64 snmp_get_cpu_field(void __percpu *mib, int cpu, int offt) -{ - return *(((unsigned long *)per_cpu_ptr(mib, cpu)) + offt); -} -EXPORT_SYMBOL_GPL(snmp_get_cpu_field); - unsigned long snmp_fold_field(void __percpu *mib, int offt) { unsigned long res = 0; diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c index 922dd73e57406e95f000be2a76fcc8de27bd06d0..83a47998c4b186e52e1a19e00308f8c1afb108ad 100644 --- a/net/ipv4/arp.c +++ b/net/ipv4/arp.c @@ -1116,13 +1116,18 @@ static int arp_req_get(struct arpreq *r, struct net_device *dev) return err; } -static int arp_invalidate(struct net_device *dev, __be32 ip) +int arp_invalidate(struct net_device *dev, __be32 ip, bool force) { struct neighbour *neigh = neigh_lookup(&arp_tbl, &ip, dev); int err = -ENXIO; struct neigh_table *tbl = &arp_tbl; if (neigh) { + if ((neigh->nud_state & NUD_VALID) && !force) { + neigh_release(neigh); + return 0; + } + if (neigh->nud_state & ~NUD_NOARP) err = neigh_update(neigh, NULL, NUD_FAILED, NEIGH_UPDATE_F_OVERRIDE| @@ -1169,7 +1174,7 @@ static int arp_req_delete(struct net *net, struct arpreq *r, if (!dev) return -EINVAL; } - return arp_invalidate(dev, ip); + return arp_invalidate(dev, ip, true); } /* diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c index ed9857b2875dcfbd302f96711396419cc83d10eb..9aae82145bc16d957f1ca6b98e3bbe410de3b56c 100644 --- a/net/ipv4/esp4.c +++ b/net/ipv4/esp4.c @@ -448,6 +448,7 @@ int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info * struct page *page; struct sk_buff *trailer; int tailen = esp->tailen; + unsigned int allocsz; /* this is non-NULL only with TCP/UDP Encapsulation */ if (x->encap) { @@ -457,6 +458,10 @@ int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info * return err; } + allocsz = ALIGN(skb->data_len + tailen, L1_CACHE_BYTES); + if (allocsz > ESP_SKB_FRAG_MAXSIZE) + goto cow; + if (!skb_cloned(skb)) { if (tailen <= skb_tailroom(skb)) { nfrags = 1; @@ -673,7 +678,7 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb) struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb); u32 padto; - padto = min(x->tfcpad, __xfrm_state_mtu(x, dst->child_mtu_cached)); + padto = min(x->tfcpad, xfrm_state_mtu(x, dst->child_mtu_cached)); if (skb->len < padto) esp.tfclen = padto - skb->len; } diff --git a/net/ipv4/esp4_offload.c b/net/ipv4/esp4_offload.c index 5aa7344dbec7fd80e8bab542e675d26ea423ad16..3450c9ba2728c7a1f5ac021fa88db37ff0cd977b 100644 --- a/net/ipv4/esp4_offload.c +++ b/net/ipv4/esp4_offload.c @@ -160,6 +160,9 @@ static struct sk_buff *xfrm4_beet_gso_segment(struct xfrm_state *x, skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4; } + if (proto == IPPROTO_IPV6) + skb_shinfo(skb)->gso_type |= SKB_GSO_IPXIP4; + __skb_pull(skb, skb_transport_offset(skb)); ops = rcu_dereference(inet_offloads[proto]); if (likely(ops && ops->callbacks.gso_segment)) diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index 917ea953dfad8f1d8a3d579de906b7109c79e2ae..0df4594b49c7831264f999fbd24c8ba87aaeea1a 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c @@ -1112,9 +1112,11 @@ void fib_add_ifaddr(struct in_ifaddr *ifa) return; /* Add broadcast address, if it is explicitly assigned. */ - if (ifa->ifa_broadcast && ifa->ifa_broadcast != htonl(0xFFFFFFFF)) + if (ifa->ifa_broadcast && ifa->ifa_broadcast != htonl(0xFFFFFFFF)) { fib_magic(RTM_NEWROUTE, RTN_BROADCAST, ifa->ifa_broadcast, 32, prim, 0); + arp_invalidate(dev, ifa->ifa_broadcast, false); + } if (!ipv4_is_zeronet(prefix) && !(ifa->ifa_flags & IFA_F_SECONDARY) && (prefix != addr || ifa->ifa_prefixlen < 32)) { @@ -1130,6 +1132,7 @@ void fib_add_ifaddr(struct in_ifaddr *ifa) prim, 0); fib_magic(RTM_NEWROUTE, RTN_BROADCAST, prefix | ~mask, 32, prim, 0); + arp_invalidate(dev, prefix | ~mask, false); } } } diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index ab6a8f35d369ddd948992b83d1ebf9d88ba2b4cb..c8c7b76c3b2e2430050bf520f54214367f5d811c 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c @@ -29,6 +29,7 @@ #include #include #include +#include #include #include @@ -251,7 +252,6 @@ void free_fib_info(struct fib_info *fi) pr_warn("Freeing alive fib_info %p\n", fi); return; } - fib_info_cnt--; call_rcu(&fi->rcu, free_fib_info_rcu); } @@ -262,6 +262,10 @@ void fib_release_info(struct fib_info *fi) spin_lock_bh(&fib_info_lock); if (fi && --fi->fib_treeref == 0) { hlist_del(&fi->fib_hash); + + /* Paired with READ_ONCE() in fib_create_info(). */ + WRITE_ONCE(fib_info_cnt, fib_info_cnt - 1); + if (fi->fib_prefsrc) hlist_del(&fi->fib_lhash); if (fi->nh) { @@ -318,11 +322,15 @@ static inline int nh_comp(struct fib_info *fi, struct fib_info *ofi) static inline unsigned int fib_devindex_hashfn(unsigned int val) { - unsigned int mask = DEVINDEX_HASHSIZE - 1; + return hash_32(val, DEVINDEX_HASHBITS); +} + +static struct hlist_head * +fib_info_devhash_bucket(const struct net_device *dev) +{ + u32 val = net_hash_mix(dev_net(dev)) ^ dev->ifindex; - return (val ^ - (val >> DEVINDEX_HASHBITS) ^ - (val >> (DEVINDEX_HASHBITS * 2))) & mask; + return &fib_info_devhash[fib_devindex_hashfn(val)]; } static unsigned int fib_info_hashfn_1(int init_val, u8 protocol, u8 scope, @@ -432,12 +440,11 @@ int ip_fib_check_default(__be32 gw, struct net_device *dev) { struct hlist_head *head; struct fib_nh *nh; - unsigned int hash; spin_lock(&fib_info_lock); - hash = fib_devindex_hashfn(dev->ifindex); - head = &fib_info_devhash[hash]; + head = fib_info_devhash_bucket(dev); + hlist_for_each_entry(nh, head, nh_hash) { if (nh->fib_nh_dev == dev && nh->fib_nh_gw4 == gw && @@ -881,8 +888,13 @@ int fib_nh_match(struct net *net, struct fib_config *cfg, struct fib_info *fi, } if (cfg->fc_oif || cfg->fc_gw_family) { - struct fib_nh *nh = fib_info_nh(fi, 0); + struct fib_nh *nh; + + /* cannot match on nexthop object attributes */ + if (fi->nh) + return 1; + nh = fib_info_nh(fi, 0); if (cfg->fc_encap) { if (fib_encap_match(net, cfg->fc_encap_type, cfg->fc_encap, nh, cfg, extack)) @@ -1431,7 +1443,9 @@ struct fib_info *fib_create_info(struct fib_config *cfg, #endif err = -ENOBUFS; - if (fib_info_cnt >= fib_info_hash_size) { + + /* Paired with WRITE_ONCE() in fib_release_info() */ + if (READ_ONCE(fib_info_cnt) >= fib_info_hash_size) { unsigned int new_size = fib_info_hash_size << 1; struct hlist_head *new_info_hash; struct hlist_head *new_laddrhash; @@ -1463,7 +1477,6 @@ struct fib_info *fib_create_info(struct fib_config *cfg, return ERR_PTR(err); } - fib_info_cnt++; fi->fib_net = net; fi->fib_protocol = cfg->fc_protocol; fi->fib_scope = cfg->fc_scope; @@ -1590,6 +1603,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg, fi->fib_treeref++; refcount_set(&fi->fib_clntref, 1); spin_lock_bh(&fib_info_lock); + fib_info_cnt++; hlist_add_head(&fi->fib_hash, &fib_info_hash[fib_info_hashfn(fi)]); if (fi->fib_prefsrc) { @@ -1603,12 +1617,10 @@ struct fib_info *fib_create_info(struct fib_config *cfg, } else { change_nexthops(fi) { struct hlist_head *head; - unsigned int hash; if (!nexthop_nh->fib_nh_dev) continue; - hash = fib_devindex_hashfn(nexthop_nh->fib_nh_dev->ifindex); - head = &fib_info_devhash[hash]; + head = fib_info_devhash_bucket(nexthop_nh->fib_nh_dev); hlist_add_head(&nexthop_nh->nh_hash, head); } endfor_nexthops(fi) } @@ -1958,8 +1970,7 @@ void fib_nhc_update_mtu(struct fib_nh_common *nhc, u32 new, u32 orig) void fib_sync_mtu(struct net_device *dev, u32 orig_mtu) { - unsigned int hash = fib_devindex_hashfn(dev->ifindex); - struct hlist_head *head = &fib_info_devhash[hash]; + struct hlist_head *head = fib_info_devhash_bucket(dev); struct fib_nh *nh; hlist_for_each_entry(nh, head, nh_hash) { @@ -1978,12 +1989,11 @@ void fib_sync_mtu(struct net_device *dev, u32 orig_mtu) */ int fib_sync_down_dev(struct net_device *dev, unsigned long event, bool force) { - int ret = 0; - int scope = RT_SCOPE_NOWHERE; + struct hlist_head *head = fib_info_devhash_bucket(dev); struct fib_info *prev_fi = NULL; - unsigned int hash = fib_devindex_hashfn(dev->ifindex); - struct hlist_head *head = &fib_info_devhash[hash]; + int scope = RT_SCOPE_NOWHERE; struct fib_nh *nh; + int ret = 0; if (force) scope = -1; @@ -2128,7 +2138,6 @@ static void fib_select_default(const struct flowi4 *flp, struct fib_result *res) int fib_sync_up(struct net_device *dev, unsigned char nh_flags) { struct fib_info *prev_fi; - unsigned int hash; struct hlist_head *head; struct fib_nh *nh; int ret; @@ -2144,8 +2153,7 @@ int fib_sync_up(struct net_device *dev, unsigned char nh_flags) } prev_fi = NULL; - hash = fib_devindex_hashfn(dev->ifindex); - head = &fib_info_devhash[hash]; + head = fib_info_devhash_bucket(dev); ret = 0; hlist_for_each_entry(nh, head, nh_hash) { diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c index 10d31733297d7358b55b45d64bc447039365df1d..e0e8a65d561ec6553970af7ac8948db515f774dc 100644 --- a/net/ipv4/inet_fragment.c +++ b/net/ipv4/inet_fragment.c @@ -204,9 +204,9 @@ void inet_frag_kill(struct inet_frag_queue *fq) /* The RCU read lock provides a memory barrier * guaranteeing that if fqdir->dead is false then * the hash table destruction will not start until - * after we unlock. Paired with inet_frags_exit_net(). + * after we unlock. Paired with fqdir_pre_exit(). */ - if (!fqdir->dead) { + if (!READ_ONCE(fqdir->dead)) { rhashtable_remove_fast(&fqdir->rhashtable, &fq->node, fqdir->f->rhash_params); refcount_dec(&fq->refcnt); @@ -321,9 +321,11 @@ static struct inet_frag_queue *inet_frag_create(struct fqdir *fqdir, /* TODO : call from rcu_read_lock() and no longer use refcount_inc_not_zero() */ struct inet_frag_queue *inet_frag_find(struct fqdir *fqdir, void *key) { + /* This pairs with WRITE_ONCE() in fqdir_pre_exit(). */ + long high_thresh = READ_ONCE(fqdir->high_thresh); struct inet_frag_queue *fq = NULL, *prev; - if (!fqdir->high_thresh || frag_mem_limit(fqdir) > fqdir->high_thresh) + if (!high_thresh || frag_mem_limit(fqdir) > high_thresh) return NULL; rcu_read_lock(); diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c index e093847c334da59f789509aba9184a58e20951f8..2bb9ded807ee1b98e42bad71a00c9c9073b80298 100644 --- a/net/ipv4/inet_hashtables.c +++ b/net/ipv4/inet_hashtables.c @@ -504,7 +504,7 @@ static int __inet_check_established(struct inet_timewait_death_row *death_row, return -EADDRNOTAVAIL; } -static u32 inet_sk_port_offset(const struct sock *sk) +static u64 inet_sk_port_offset(const struct sock *sk) { const struct inet_sock *inet = inet_sk(sk); @@ -637,7 +637,9 @@ int __inet_hash(struct sock *sk, struct sock *osk) int err = 0; if (sk->sk_state != TCP_LISTEN) { + local_bh_disable(); inet_ehash_nolisten(sk, osk, NULL); + local_bh_enable(); return 0; } WARN_ON(!sk_unhashed(sk)); @@ -669,50 +671,71 @@ int inet_hash(struct sock *sk) { int err = 0; - if (sk->sk_state != TCP_CLOSE) { - local_bh_disable(); + if (sk->sk_state != TCP_CLOSE) err = __inet_hash(sk, NULL); - local_bh_enable(); - } return err; } EXPORT_SYMBOL_GPL(inet_hash); -void inet_unhash(struct sock *sk) +static void __inet_unhash(struct sock *sk, struct inet_listen_hashbucket *ilb) { - struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo; - struct inet_listen_hashbucket *ilb = NULL; - spinlock_t *lock; - if (sk_unhashed(sk)) return; - if (sk->sk_state == TCP_LISTEN) { - ilb = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)]; - lock = &ilb->lock; - } else { - lock = inet_ehash_lockp(hashinfo, sk->sk_hash); - } - spin_lock_bh(lock); - if (sk_unhashed(sk)) - goto unlock; - if (rcu_access_pointer(sk->sk_reuseport_cb)) reuseport_detach_sock(sk); if (ilb) { + struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo; + inet_unhash2(hashinfo, sk); ilb->count--; } __sk_nulls_del_node_init_rcu(sk); sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); -unlock: - spin_unlock_bh(lock); +} + +void inet_unhash(struct sock *sk) +{ + struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo; + + if (sk_unhashed(sk)) + return; + + if (sk->sk_state == TCP_LISTEN) { + struct inet_listen_hashbucket *ilb; + + ilb = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)]; + /* Don't disable bottom halves while acquiring the lock to + * avoid circular locking dependency on PREEMPT_RT. + */ + spin_lock(&ilb->lock); + __inet_unhash(sk, ilb); + spin_unlock(&ilb->lock); + } else { + spinlock_t *lock = inet_ehash_lockp(hashinfo, sk->sk_hash); + + spin_lock_bh(lock); + __inet_unhash(sk, NULL); + spin_unlock_bh(lock); + } } EXPORT_SYMBOL_GPL(inet_unhash); +/* RFC 6056 3.3.4. Algorithm 4: Double-Hash Port Selection Algorithm + * Note that we use 32bit integers (vs RFC 'short integers') + * because 2^16 is not a multiple of num_ephemeral and this + * property might be used by clever attacker. + * RFC claims using TABLE_LENGTH=10 buckets gives an improvement, though + * attacks were since demonstrated, thus we use 65536 instead to really + * give more isolation and privacy, at the expense of 256kB of kernel + * memory. + */ +#define INET_TABLE_PERTURB_SHIFT 16 +static u32 table_perturb[1 << INET_TABLE_PERTURB_SHIFT]; + int __inet_hash_connect(struct inet_timewait_death_row *death_row, - struct sock *sk, u32 port_offset, + struct sock *sk, u64 port_offset, int (*check_established)(struct inet_timewait_death_row *, struct sock *, __u16, struct inet_timewait_sock **)) { @@ -724,8 +747,8 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row, struct inet_bind_bucket *tb; u32 remaining, offset; int ret, i, low, high; - static u32 hint; int l3mdev; + u32 index; if (port) { head = &hinfo->bhash[inet_bhashfn(net, port, @@ -752,7 +775,11 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row, if (likely(remaining > 1)) remaining &= ~1U; - offset = (hint + port_offset) % remaining; + net_get_random_once(table_perturb, sizeof(table_perturb)); + index = hash_32(port_offset, INET_TABLE_PERTURB_SHIFT); + + offset = READ_ONCE(table_perturb[index]) + port_offset; + offset %= remaining; /* In first pass we try ports of @low parity. * inet_csk_get_port() does the opposite choice. */ @@ -806,7 +833,7 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row, return -EADDRNOTAVAIL; ok: - hint += i + 2; + WRITE_ONCE(table_perturb[index], READ_ONCE(table_perturb[index]) + i + 2); /* Head lock still held and bh's disabled */ inet_bind_hash(sk, tb, port); @@ -829,7 +856,7 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row, int inet_hash_connect(struct inet_timewait_death_row *death_row, struct sock *sk) { - u32 port_offset = 0; + u64 port_offset = 0; if (!inet_sk(sk)->inet_num) port_offset = inet_sk_port_offset(sk); diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index cfeb8890f94ee95b2246ba98beb338a33fc45d1d..fad803d2d711ef0d97f7150f0f710a35ac822946 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c @@ -144,7 +144,8 @@ static void ip_expire(struct timer_list *t) rcu_read_lock(); - if (qp->q.fqdir->dead) + /* Paired with WRITE_ONCE() in fqdir_pre_exit(). */ + if (READ_ONCE(qp->q.fqdir->dead)) goto out_rcu_unlock; spin_lock(&qp->q.lock); diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index a9cc05043fa47618e13480fe062aadc10692c646..e4504dd510c6d741ae450990184737cc18a58a52 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -599,8 +599,9 @@ static int gre_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) key = &info->key; ip_tunnel_init_flow(&fl4, IPPROTO_GRE, key->u.ipv4.dst, key->u.ipv4.src, - tunnel_id_to_key32(key->tun_id), key->tos, 0, - skb->mark, skb_get_hash(skb)); + tunnel_id_to_key32(key->tun_id), + key->tos & ~INET_ECN_MASK, 0, skb->mark, + skb_get_hash(skb)); rt = ip_route_output_key(dev_net(dev), &fl4); if (IS_ERR(rt)) return PTR_ERR(rt); diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 10d4cde31c6bf517938f404c7597188ca9cb4c6b..5e48b3d3a00db61f457956607397307da0fe3900 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -162,12 +162,19 @@ int ip_build_and_send_pkt(struct sk_buff *skb, const struct sock *sk, iph->daddr = (opt && opt->opt.srr ? opt->opt.faddr : daddr); iph->saddr = saddr; iph->protocol = sk->sk_protocol; - if (ip_dont_fragment(sk, &rt->dst)) { + /* Do not bother generating IPID for small packets (eg SYNACK) */ + if (skb->len <= IPV4_MIN_MTU || ip_dont_fragment(sk, &rt->dst)) { iph->frag_off = htons(IP_DF); iph->id = 0; } else { iph->frag_off = 0; - __ip_select_ident(net, iph, 1); + /* TCP packets here are SYNACK with fat IPv4/TCP options. + * Avoid using the hashed IP ident generator. + */ + if (sk->sk_protocol == IPPROTO_TCP) + iph->id = (__force __be16)prandom_u32(); + else + __ip_select_ident(net, iph, 1); } if (opt && opt->opt.optlen) { @@ -614,18 +621,6 @@ void ip_fraglist_init(struct sk_buff *skb, struct iphdr *iph, } EXPORT_SYMBOL(ip_fraglist_init); -static void ip_fraglist_ipcb_prepare(struct sk_buff *skb, - struct ip_fraglist_iter *iter) -{ - struct sk_buff *to = iter->frag; - - /* Copy the flags to each fragment. */ - IPCB(to)->flags = IPCB(skb)->flags; - - if (iter->offset == 0) - ip_options_fragment(to); -} - void ip_fraglist_prepare(struct sk_buff *skb, struct ip_fraglist_iter *iter) { unsigned int hlen = iter->hlen; @@ -671,7 +666,7 @@ void ip_frag_init(struct sk_buff *skb, unsigned int hlen, EXPORT_SYMBOL(ip_frag_init); static void ip_frag_ipcb(struct sk_buff *from, struct sk_buff *to, - bool first_frag, struct ip_frag_state *state) + bool first_frag) { /* Copy the flags to each fragment. */ IPCB(to)->flags = IPCB(from)->flags; @@ -850,8 +845,20 @@ int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, /* Prepare header of the next frame, * before previous one went down. */ if (iter.frag) { - ip_fraglist_ipcb_prepare(skb, &iter); + bool first_frag = (iter.offset == 0); + + IPCB(iter.frag)->flags = IPCB(skb)->flags; ip_fraglist_prepare(skb, &iter); + if (first_frag && IPCB(skb)->opt.optlen) { + /* ipcb->opt is not populated for frags + * coming from __ip_make_skb(), + * ip_options_fragment() needs optlen + */ + IPCB(iter.frag)->opt.optlen = + IPCB(skb)->opt.optlen; + ip_options_fragment(iter.frag); + ip_send_check(iter.iph); + } } skb->tstamp = tstamp; @@ -905,7 +912,7 @@ int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, err = PTR_ERR(skb2); goto fail; } - ip_frag_ipcb(skb, skb2, first_frag, &state); + ip_frag_ipcb(skb, skb2, first_frag); /* * Put this fragment into the sending queue. diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 939792a3881461275b9c7fae5b3a5e0881a59584..be1976536f1c00e0b8e525207c79004628b6f8fc 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -261,7 +261,9 @@ static int __net_init ipmr_rules_init(struct net *net) return 0; err2: + rtnl_lock(); ipmr_free_table(mrt); + rtnl_unlock(); err1: fib_rules_unregister(ops); return err; diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c index a8b980ad11d4e1ca225dea411bb8a4af7c995ca3..1088564d4dbcb2490f25008a7872e9c7a378222e 100644 --- a/net/ipv4/netfilter/ipt_CLUSTERIP.c +++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c @@ -505,8 +505,11 @@ static int clusterip_tg_check(const struct xt_tgchk_param *par) if (IS_ERR(config)) return PTR_ERR(config); } - } else if (memcmp(&config->clustermac, &cipinfo->clustermac, ETH_ALEN)) + } else if (memcmp(&config->clustermac, &cipinfo->clustermac, ETH_ALEN)) { + clusterip_config_entry_put(config); + clusterip_config_put(config); return -EINVAL; + } ret = nf_ct_netns_get(par->net, par->family); if (ret < 0) { diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c index 8ce8b7300b9d3eaa47a56096a43498518176598a..e60ca03543a536b2b9a32522d87b93f94ea5b02b 100644 --- a/net/ipv4/ping.c +++ b/net/ipv4/ping.c @@ -172,16 +172,22 @@ static struct sock *ping_lookup(struct net *net, struct sk_buff *skb, u16 ident) struct sock *sk = NULL; struct inet_sock *isk; struct hlist_nulls_node *hnode; - int dif = skb->dev->ifindex; + int dif, sdif; if (skb->protocol == htons(ETH_P_IP)) { + dif = inet_iif(skb); + sdif = inet_sdif(skb); pr_debug("try to find: num = %d, daddr = %pI4, dif = %d\n", (int)ident, &ip_hdr(skb)->daddr, dif); #if IS_ENABLED(CONFIG_IPV6) } else if (skb->protocol == htons(ETH_P_IPV6)) { + dif = inet6_iif(skb); + sdif = inet6_sdif(skb); pr_debug("try to find: num = %d, daddr = %pI6c, dif = %d\n", (int)ident, &ipv6_hdr(skb)->daddr, dif); #endif + } else { + return NULL; } read_lock_bh(&ping_table.lock); @@ -220,7 +226,8 @@ static struct sock *ping_lookup(struct net *net, struct sk_buff *skb, u16 ident) continue; } - if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif) + if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif && + sk->sk_bound_dev_if != sdif) continue; sock_hold(sk); diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index 7d26e0f8bdaeb5f334e77deb5966a976db3d3ff4..5d95f80314f95521eb645f55d7cda33fe894f848 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c @@ -721,6 +721,7 @@ static int raw_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) int ret = -EINVAL; int chk_addr_ret; + lock_sock(sk); if (sk->sk_state != TCP_CLOSE || addr_len < sizeof(struct sockaddr_in)) goto out; @@ -740,7 +741,9 @@ static int raw_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) inet->inet_saddr = 0; /* Use device */ sk_dst_reset(sk); ret = 0; -out: return ret; +out: + release_sock(sk); + return ret; } /* diff --git a/net/ipv4/route.c b/net/ipv4/route.c index ce787c38679384866687d3522f406df71d502983..c72d0de8bf71432d6bce6b73ec73db7e00060ff2 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -529,6 +529,15 @@ void __ip_select_ident(struct net *net, struct iphdr *iph, int segs) } EXPORT_SYMBOL(__ip_select_ident); +static void ip_rt_fix_tos(struct flowi4 *fl4) +{ + __u8 tos = RT_FL_TOS(fl4); + + fl4->flowi4_tos = tos & IPTOS_RT_MASK; + fl4->flowi4_scope = tos & RTO_ONLINK ? + RT_SCOPE_LINK : RT_SCOPE_UNIVERSE; +} + static void __build_flow_key(const struct net *net, struct flowi4 *fl4, const struct sock *sk, const struct iphdr *iph, @@ -853,6 +862,7 @@ static void ip_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buf rt = (struct rtable *) dst; __build_flow_key(net, &fl4, sk, iph, oif, tos, prot, mark, 0); + ip_rt_fix_tos(&fl4); __ip_do_redirect(rt, skb, &fl4, true); } @@ -1077,6 +1087,7 @@ static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk, struct flowi4 fl4; ip_rt_build_flow_key(&fl4, sk, skb); + ip_rt_fix_tos(&fl4); /* Don't make lookup fail for bridged encapsulations */ if (skb && netif_is_any_bridge_port(skb->dev)) @@ -1151,6 +1162,8 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu) goto out; new = true; + } else { + ip_rt_fix_tos(&fl4); } __ip_rt_update_pmtu((struct rtable *)xfrm_dst_path(&rt->dst), &fl4, mtu); @@ -2524,7 +2537,6 @@ static struct rtable *__mkroute_output(const struct fib_result *res, struct rtable *ip_route_output_key_hash(struct net *net, struct flowi4 *fl4, const struct sk_buff *skb) { - __u8 tos = RT_FL_TOS(fl4); struct fib_result res = { .type = RTN_UNSPEC, .fi = NULL, @@ -2534,9 +2546,7 @@ struct rtable *ip_route_output_key_hash(struct net *net, struct flowi4 *fl4, struct rtable *rth; fl4->flowi4_iif = LOOPBACK_IFINDEX; - fl4->flowi4_tos = tos & IPTOS_RT_MASK; - fl4->flowi4_scope = ((tos & RTO_ONLINK) ? - RT_SCOPE_LINK : RT_SCOPE_UNIVERSE); + ip_rt_fix_tos(fl4); rcu_read_lock(); rth = ip_route_output_key_hash_rcu(net, fl4, &res, skb); diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index fcd792816756784b558d28512feaceaa5ae3ae88..4d5280780a8e1eaacf214328dc1c50af3563fca4 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -1656,11 +1656,13 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, if (!copied) copied = used; break; - } else if (used <= len) { - seq += used; - copied += used; - offset += used; } + if (WARN_ON_ONCE(used > len)) + used = len; + seq += used; + copied += used; + offset += used; + /* If recv_actor drops the lock (e.g. TCP splice * receive) the skb pointer might be invalid when * getting here: tcp_collapse might have deleted it @@ -2814,8 +2816,7 @@ int tcp_disconnect(struct sock *sk, int flags) icsk->icsk_ack.rcv_mss = TCP_MIN_MSS; memset(&tp->rx_opt, 0, sizeof(tp->rx_opt)); __sk_dst_reset(sk); - dst_release(sk->sk_rx_dst); - sk->sk_rx_dst = NULL; + dst_release(xchg((__force struct dst_entry **)&sk->sk_rx_dst, NULL)); tcp_saved_syn_free(tp); tp->compressed_ack = 0; tp->segs_in = 0; diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c index 6b745ce4108c8786631bc57e704e7753d0d70356..afeaf35194de6a4deb62e70add700af2a03bdc35 100644 --- a/net/ipv4/tcp_bpf.c +++ b/net/ipv4/tcp_bpf.c @@ -18,9 +18,7 @@ int __tcp_bpf_recvmsg(struct sock *sk, struct sk_psock *psock, struct sk_msg *msg_rx; int i, copied = 0; - msg_rx = list_first_entry_or_null(&psock->ingress_msg, - struct sk_msg, list); - + msg_rx = sk_psock_peek_msg(psock); while (copied != len) { struct scatterlist *sge; @@ -45,8 +43,10 @@ int __tcp_bpf_recvmsg(struct sock *sk, struct sk_psock *psock, if (likely(!peek)) { sge->offset += copy; sge->length -= copy; - if (!msg_rx->skb) + if (!msg_rx->skb) { + atomic_sub(copy, &sk->sk_rmem_alloc); sk_mem_uncharge(sk, copy); + } msg_rx->sg.size -= copy; if (!sge->length) { @@ -68,22 +68,18 @@ int __tcp_bpf_recvmsg(struct sock *sk, struct sk_psock *psock, } while (i != msg_rx->sg.end); if (unlikely(peek)) { - if (msg_rx == list_last_entry(&psock->ingress_msg, - struct sk_msg, list)) + msg_rx = sk_psock_next_msg(psock, msg_rx); + if (!msg_rx) break; - msg_rx = list_next_entry(msg_rx, list); continue; } msg_rx->sg.start = i; if (!sge->length && msg_rx->sg.start == msg_rx->sg.end) { - list_del(&msg_rx->list); - if (msg_rx->skb) - consume_skb(msg_rx->skb); - kfree(msg_rx); + msg_rx = sk_psock_dequeue_msg(psock); + kfree_sk_msg(msg_rx); } - msg_rx = list_first_entry_or_null(&psock->ingress_msg, - struct sk_msg, list); + msg_rx = sk_psock_peek_msg(psock); } return copied; @@ -104,6 +100,11 @@ static int bpf_tcp_ingress(struct sock *sk, struct sk_psock *psock, return -ENOMEM; lock_sock(sk); + if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) { + kfree(tmp); + release_sock(sk); + return -EAGAIN; + } tmp->sg.start = msg->sg.start; i = msg->sg.start; do { @@ -133,6 +134,7 @@ static int bpf_tcp_ingress(struct sock *sk, struct sk_psock *psock, if (!ret) { msg->sg.start = i; sk_psock_queue_msg(psock, tmp); + atomic_add(tmp->sg.size, &sk->sk_rmem_alloc); sk_psock_data_ready(sk, psock); } else { sk_msg_free(sk, tmp); @@ -218,10 +220,9 @@ int tcp_bpf_sendmsg_redir(struct sock *sk, struct sk_msg *msg, struct sk_psock *psock = sk_psock_get(sk); int ret; - if (unlikely(!psock)) { - sk_msg_free(sk, msg); - return 0; - } + if (unlikely(!psock)) + return -EPIPE; + ret = ingress ? bpf_tcp_ingress(sk, psock, msg, bytes, flags) : tcp_bpf_push_locked(sk, msg, bytes, flags, false); sk_psock_put(sk, psock); @@ -371,7 +372,7 @@ static int tcp_bpf_send_verdict(struct sock *sk, struct sk_psock *psock, cork = true; psock->cork = NULL; } - sk_msg_return(sk, msg, tosend); + sk_msg_return(sk, msg, msg->sg.size); release_sock(sk); ret = tcp_bpf_sendmsg_redir(sk_redir, msg, tosend, flags); @@ -411,8 +412,11 @@ static int tcp_bpf_send_verdict(struct sock *sk, struct sk_psock *psock, } if (msg && msg->sg.data[msg->sg.start].page_link && - msg->sg.data[msg->sg.start].length) + msg->sg.data[msg->sg.start].length) { + if (eval == __SK_REDIRECT) + sk_mem_charge(sk, msg->sg.size); goto more_data; + } } return ret; } diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 711bf63dc026d9cfe62a84a193b4e804950b796e..6f4ac0f10f57fb58db992ac39afa8e83b365f5cb 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -1620,6 +1620,8 @@ static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb, (mss != tcp_skb_seglen(skb))) goto out; + if (!tcp_skb_can_collapse(prev, skb)) + goto out; len = skb->len; pcount = tcp_skb_pcount(skb); if (tcp_skb_shift(prev, skb, pcount, len)) @@ -5742,7 +5744,7 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb) trace_tcp_probe(sk, skb); tcp_mstamp_refresh(tp); - if (unlikely(!sk->sk_rx_dst)) + if (unlikely(!rcu_access_pointer(sk->sk_rx_dst))) inet_csk(sk)->icsk_af_ops->sk_rx_dst_set(sk, skb); /* * Header prediction. diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index ebfeeeadd47ce6552ec12e23baafa00476fb0c6f..078f3a5d65b3e1b7193150183d4cce51d8857e05 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -1670,15 +1670,18 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) struct sock *rsk; if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ - struct dst_entry *dst = sk->sk_rx_dst; + struct dst_entry *dst; + + dst = rcu_dereference_protected(sk->sk_rx_dst, + lockdep_sock_is_held(sk)); sock_rps_save_rxhash(sk, skb); sk_mark_napi_id(sk, skb); if (dst) { if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif || !dst->ops->check(dst, 0)) { + RCU_INIT_POINTER(sk->sk_rx_dst, NULL); dst_release(dst); - sk->sk_rx_dst = NULL; } } tcp_rcv_established(sk, skb); @@ -1753,7 +1756,7 @@ int tcp_v4_early_demux(struct sk_buff *skb) skb->sk = sk; skb->destructor = sock_edemux; if (sk_fullsock(sk)) { - struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst); + struct dst_entry *dst = rcu_dereference(sk->sk_rx_dst); if (dst) dst = dst_check(dst, 0); @@ -2160,7 +2163,7 @@ void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb) struct dst_entry *dst = skb_dst(skb); if (dst && dst_hold_safe(dst)) { - sk->sk_rx_dst = dst; + rcu_assign_pointer(sk->sk_rx_dst, dst); inet_sk(sk)->rx_dst_ifindex = skb->skb_iif; } } diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index da8bcdd90715298caa0e315e80abb51041640ac2..68b066d2fe0aa0d76554a4c708e82ce0cc7d66b7 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -3785,6 +3785,7 @@ static void tcp_connect_queue_skb(struct sock *sk, struct sk_buff *skb) */ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn) { + struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); struct tcp_fastopen_request *fo = tp->fastopen_req; int space, err = 0; @@ -3799,8 +3800,10 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn) * private TCP options. The cost is reduced data space in SYN :( */ tp->rx_opt.mss_clamp = tcp_mss_clamp(tp, tp->rx_opt.mss_clamp); + /* Sync mss_cache after updating the mss_clamp */ + tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); - space = __tcp_mtu_to_mss(sk, inet_csk(sk)->icsk_pmtu_cookie) - + space = __tcp_mtu_to_mss(sk, icsk->icsk_pmtu_cookie) - MAX_TCP_OPTION_SPACE; space = min_t(size_t, space, fo->size); diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 8cca8bda061231f333fa7e43d4bf7a15656e62b7..763adffb84d68f52234cfcb2086b18783e4a25b5 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -598,6 +598,12 @@ void udp_encap_enable(void) } EXPORT_SYMBOL(udp_encap_enable); +void udp_encap_disable(void) +{ + static_branch_dec(&udp_encap_needed_key); +} +EXPORT_SYMBOL(udp_encap_disable); + /* Handler for tunnels with arbitrary destination ports: no socket lookup, go * through error handlers in encapsulations looking for a match. */ @@ -2186,7 +2192,7 @@ bool udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst) struct dst_entry *old; if (dst_hold_safe(dst)) { - old = xchg(&sk->sk_rx_dst, dst); + old = xchg((__force struct dst_entry **)&sk->sk_rx_dst, dst); dst_release(old); return old != dst; } @@ -2376,7 +2382,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, struct dst_entry *dst = skb_dst(skb); int ret; - if (unlikely(sk->sk_rx_dst != dst)) + if (unlikely(rcu_dereference(sk->sk_rx_dst) != dst)) udp_sk_rx_dst_set(sk, dst); ret = udp_unicast_rcv_skb(sk, skb, uh); @@ -2535,7 +2541,7 @@ int udp_v4_early_demux(struct sk_buff *skb) skb->sk = sk; skb->destructor = sock_efree; - dst = READ_ONCE(sk->sk_rx_dst); + dst = rcu_dereference(sk->sk_rx_dst); if (dst) dst = dst_check(dst, 0); diff --git a/net/ipv4/udp_tunnel_nic.c b/net/ipv4/udp_tunnel_nic.c index b91003538d87a03855df9bd35b751017d37fe031..bc3a043a5d5c7635b7acabe76bb19f7ad59d8db9 100644 --- a/net/ipv4/udp_tunnel_nic.c +++ b/net/ipv4/udp_tunnel_nic.c @@ -846,7 +846,7 @@ udp_tunnel_nic_unregister(struct net_device *dev, struct udp_tunnel_nic *utn) list_for_each_entry(node, &info->shared->devices, list) if (node->dev == dev) break; - if (node->dev != dev) + if (list_entry_is_head(node, &info->shared->devices, list)) return; list_del(&node->list); diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 29526937077b3471e640c510d5793b38e452e7f9..4e24f3f7595cd52387ac35443c79a3fb6cc550f0 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -368,7 +368,7 @@ static struct inet6_dev *ipv6_add_dev(struct net_device *dev) ASSERT_RTNL(); - if (dev->mtu < IPV6_MIN_MTU) + if (dev->mtu < IPV6_MIN_MTU && dev != blackhole_netdev) return ERR_PTR(-EINVAL); ndev = kzalloc(sizeof(struct inet6_dev), GFP_KERNEL); @@ -404,12 +404,13 @@ static struct inet6_dev *ipv6_add_dev(struct net_device *dev) return ERR_PTR(err); } - if (snmp6_register_dev(ndev) < 0) { - netdev_dbg(dev, "%s: cannot create /proc/net/dev_snmp6/%s\n", - __func__, dev->name); - goto err_release; + if (dev != blackhole_netdev) { + if (snmp6_register_dev(ndev) < 0) { + netdev_dbg(dev, "%s: cannot create /proc/net/dev_snmp6/%s\n", + __func__, dev->name); + goto err_release; + } } - /* One reference from device. */ refcount_set(&ndev->refcnt, 1); @@ -440,25 +441,28 @@ static struct inet6_dev *ipv6_add_dev(struct net_device *dev) ipv6_mc_init_dev(ndev); ndev->tstamp = jiffies; - err = addrconf_sysctl_register(ndev); - if (err) { - ipv6_mc_destroy_dev(ndev); - snmp6_unregister_dev(ndev); - goto err_release; + if (dev != blackhole_netdev) { + err = addrconf_sysctl_register(ndev); + if (err) { + ipv6_mc_destroy_dev(ndev); + snmp6_unregister_dev(ndev); + goto err_release; + } } /* protected by rtnl_lock */ rcu_assign_pointer(dev->ip6_ptr, ndev); - /* Join interface-local all-node multicast group */ - ipv6_dev_mc_inc(dev, &in6addr_interfacelocal_allnodes); - - /* Join all-node multicast group */ - ipv6_dev_mc_inc(dev, &in6addr_linklocal_allnodes); + if (dev != blackhole_netdev) { + /* Join interface-local all-node multicast group */ + ipv6_dev_mc_inc(dev, &in6addr_interfacelocal_allnodes); - /* Join all-router multicast group if forwarding is set */ - if (ndev->cnf.forwarding && (dev->flags & IFF_MULTICAST)) - ipv6_dev_mc_inc(dev, &in6addr_linklocal_allrouters); + /* Join all-node multicast group */ + ipv6_dev_mc_inc(dev, &in6addr_linklocal_allnodes); + /* Join all-router multicast group if forwarding is set */ + if (ndev->cnf.forwarding && (dev->flags & IFF_MULTICAST)) + ipv6_dev_mc_inc(dev, &in6addr_linklocal_allrouters); + } return ndev; err_release: @@ -542,7 +546,7 @@ static int inet6_netconf_fill_devconf(struct sk_buff *skb, int ifindex, #ifdef CONFIG_IPV6_MROUTE if ((all || type == NETCONFA_MC_FORWARDING) && nla_put_s32(skb, NETCONFA_MC_FORWARDING, - devconf->mc_forwarding) < 0) + atomic_read(&devconf->mc_forwarding)) < 0) goto nla_put_failure; #endif if ((all || type == NETCONFA_PROXY_NEIGH) && @@ -2577,7 +2581,7 @@ int addrconf_prefix_rcv_add_addr(struct net *net, struct net_device *dev, __u32 valid_lft, u32 prefered_lft) { struct inet6_ifaddr *ifp = ipv6_get_ifaddr(net, addr, dev, 1); - int create = 0; + int create = 0, update_lft = 0; if (!ifp && valid_lft) { int max_addresses = in6_dev->cnf.max_addresses; @@ -2621,19 +2625,32 @@ int addrconf_prefix_rcv_add_addr(struct net *net, struct net_device *dev, unsigned long now; u32 stored_lft; - /* Update lifetime (RFC4862 5.5.3 e) - * We deviate from RFC4862 by honoring all Valid Lifetimes to - * improve the reaction of SLAAC to renumbering events - * (draft-gont-6man-slaac-renum-06, Section 4.2) - */ + /* update lifetime (RFC2462 5.5.3 e) */ spin_lock_bh(&ifp->lock); now = jiffies; if (ifp->valid_lft > (now - ifp->tstamp) / HZ) stored_lft = ifp->valid_lft - (now - ifp->tstamp) / HZ; else stored_lft = 0; - if (!create && stored_lft) { + const u32 minimum_lft = min_t(u32, + stored_lft, MIN_VALID_LIFETIME); + valid_lft = max(valid_lft, minimum_lft); + + /* RFC4862 Section 5.5.3e: + * "Note that the preferred lifetime of the + * corresponding address is always reset to + * the Preferred Lifetime in the received + * Prefix Information option, regardless of + * whether the valid lifetime is also reset or + * ignored." + * + * So we should always update prefered_lft here. + */ + update_lft = 1; + } + + if (update_lft) { ifp->valid_lft = valid_lft; ifp->prefered_lft = prefered_lft; ifp->tstamp = now; @@ -3699,6 +3716,7 @@ static int addrconf_ifdown(struct net_device *dev, bool unregister) struct inet6_dev *idev; struct inet6_ifaddr *ifa, *tmp; bool keep_addr = false; + bool was_ready; int state, i; ASSERT_RTNL(); @@ -3764,7 +3782,10 @@ static int addrconf_ifdown(struct net_device *dev, bool unregister) addrconf_del_rs_timer(idev); - /* Step 2: clear flags for stateless addrconf */ + /* Step 2: clear flags for stateless addrconf, repeated down + * detection + */ + was_ready = idev->if_flags & IF_READY; if (!unregister) idev->if_flags &= ~(IF_RS_SENT|IF_RA_RCVD|IF_READY); @@ -3838,7 +3859,7 @@ static int addrconf_ifdown(struct net_device *dev, bool unregister) if (unregister) { ipv6_ac_destroy_dev(idev); ipv6_mc_destroy_dev(idev); - } else { + } else if (was_ready) { ipv6_mc_down(idev); } @@ -4962,6 +4983,7 @@ static int inet6_fill_ifaddr(struct sk_buff *skb, struct inet6_ifaddr *ifa, nla_put_s32(skb, IFA_TARGET_NETNSID, args->netnsid)) goto error; + spin_lock_bh(&ifa->lock); if (!((ifa->flags&IFA_F_PERMANENT) && (ifa->prefered_lft == INFINITY_LIFE_TIME))) { preferred = ifa->prefered_lft; @@ -4983,6 +5005,7 @@ static int inet6_fill_ifaddr(struct sk_buff *skb, struct inet6_ifaddr *ifa, preferred = INFINITY_LIFE_TIME; valid = INFINITY_LIFE_TIME; } + spin_unlock_bh(&ifa->lock); if (!ipv6_addr_any(&ifa->peer_addr)) { if (nla_put_in6_addr(skb, IFA_LOCAL, &ifa->addr) < 0 || @@ -5496,7 +5519,7 @@ static inline void ipv6_store_devconf(struct ipv6_devconf *cnf, array[DEVCONF_USE_OPTIMISTIC] = cnf->use_optimistic; #endif #ifdef CONFIG_IPV6_MROUTE - array[DEVCONF_MC_FORWARDING] = cnf->mc_forwarding; + array[DEVCONF_MC_FORWARDING] = atomic_read(&cnf->mc_forwarding); #endif array[DEVCONF_DISABLE_IPV6] = cnf->disable_ipv6; array[DEVCONF_ACCEPT_DAD] = cnf->accept_dad; @@ -7115,26 +7138,8 @@ int __init addrconf_init(void) goto out_nowq; } - /* The addrconf netdev notifier requires that loopback_dev - * has it's ipv6 private information allocated and setup - * before it can bring up and give link-local addresses - * to other devices which are up. - * - * Unfortunately, loopback_dev is not necessarily the first - * entry in the global dev_base list of net devices. In fact, - * it is likely to be the very last entry on that list. - * So this causes the notifier registry below to try and - * give link-local addresses to all devices besides loopback_dev - * first, then loopback_dev, which cases all the non-loopback_dev - * devices to fail to get a link-local address. - * - * So, as a temporary fix, allocate the ipv6 structure for - * loopback_dev first by hand. - * Longer term, all of the dependencies ipv6 has upon the loopback - * device and it being up should be removed. - */ rtnl_lock(); - idev = ipv6_add_dev(init_net.loopback_dev); + idev = ipv6_add_dev(blackhole_netdev); rtnl_unlock(); if (IS_ERR(idev)) { err = PTR_ERR(idev); diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index f141a441490a801d96b5060175e23db438c50d53..9c9ab6b4aec058d6a2d19c640d876935eea23fe5 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c @@ -439,11 +439,13 @@ static int __inet6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len, int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) { struct sock *sk = sock->sk; + const struct proto *prot; int err = 0; /* If the socket has its own bind function then use it. */ - if (sk->sk_prot->bind) - return sk->sk_prot->bind(sk, uaddr, addr_len); + prot = READ_ONCE(sk->sk_prot); + if (prot->bind) + return prot->bind(sk, uaddr, addr_len); if (addr_len < SIN6_LEN_RFC2133) return -EINVAL; @@ -551,6 +553,7 @@ int inet6_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) void __user *argp = (void __user *)arg; struct sock *sk = sock->sk; struct net *net = sock_net(sk); + const struct proto *prot; switch (cmd) { case SIOCADDRT: @@ -568,9 +571,11 @@ int inet6_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) case SIOCSIFDSTADDR: return addrconf_set_dstaddr(net, argp); default: - if (!sk->sk_prot->ioctl) + /* IPV6_ADDRFORM can change sk->sk_prot under us. */ + prot = READ_ONCE(sk->sk_prot); + if (!prot->ioctl) return -ENOIOCTLCMD; - return sk->sk_prot->ioctl(sk, cmd, arg); + return prot->ioctl(sk, cmd, arg); } /*NOTREACHED*/ return 0; @@ -632,11 +637,14 @@ INDIRECT_CALLABLE_DECLARE(int udpv6_sendmsg(struct sock *, struct msghdr *, int inet6_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) { struct sock *sk = sock->sk; + const struct proto *prot; if (unlikely(inet_send_prepare(sk))) return -EAGAIN; - return INDIRECT_CALL_2(sk->sk_prot->sendmsg, tcp_sendmsg, udpv6_sendmsg, + /* IPV6_ADDRFORM can change sk->sk_prot under us. */ + prot = READ_ONCE(sk->sk_prot); + return INDIRECT_CALL_2(prot->sendmsg, tcp_sendmsg, udpv6_sendmsg, sk, msg, size); } @@ -646,13 +654,16 @@ int inet6_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct sock *sk = sock->sk; + const struct proto *prot; int addr_len = 0; int err; if (likely(!(flags & MSG_ERRQUEUE))) sock_rps_record_flow(sk); - err = INDIRECT_CALL_2(sk->sk_prot->recvmsg, tcp_recvmsg, udpv6_recvmsg, + /* IPV6_ADDRFORM can change sk->sk_prot under us. */ + prot = READ_ONCE(sk->sk_prot); + err = INDIRECT_CALL_2(prot->recvmsg, tcp_recvmsg, udpv6_recvmsg, sk, msg, size, flags & MSG_DONTWAIT, flags & ~MSG_DONTWAIT, &addr_len); if (err >= 0) diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c index 7f2ffc7b1f75a64c5dbfc930870ea2829d4f6fcb..20c7bef6829e1fc89188545c9f4d7f33c7937a0b 100644 --- a/net/ipv6/esp6.c +++ b/net/ipv6/esp6.c @@ -483,6 +483,7 @@ int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info struct page *page; struct sk_buff *trailer; int tailen = esp->tailen; + unsigned int allocsz; if (x->encap) { int err = esp6_output_encap(x, skb, esp); @@ -491,6 +492,10 @@ int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info return err; } + allocsz = ALIGN(skb->data_len + tailen, L1_CACHE_BYTES); + if (allocsz > ESP_SKB_FRAG_MAXSIZE) + goto cow; + if (!skb_cloned(skb)) { if (tailen <= skb_tailroom(skb)) { nfrags = 1; @@ -708,7 +713,7 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb) struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb); u32 padto; - padto = min(x->tfcpad, __xfrm_state_mtu(x, dst->child_mtu_cached)); + padto = min(x->tfcpad, xfrm_state_mtu(x, dst->child_mtu_cached)); if (skb->len < padto) esp.tfclen = padto - skb->len; } @@ -808,8 +813,7 @@ int esp6_input_done2(struct sk_buff *skb, int err) struct tcphdr *th; offset = ipv6_skip_exthdr(skb, offset, &nexthdr, &frag_off); - - if (offset < 0) { + if (offset == -1) { err = -EINVAL; goto out; } diff --git a/net/ipv6/esp6_offload.c b/net/ipv6/esp6_offload.c index 4af56affaafd436fbd35ade87ffd2b7c8e6d4d91..1c3f02d05d2bfdaad4e915a1dca29c9d71b310bf 100644 --- a/net/ipv6/esp6_offload.c +++ b/net/ipv6/esp6_offload.c @@ -198,6 +198,9 @@ static struct sk_buff *xfrm6_beet_gso_segment(struct xfrm_state *x, ipv6_skip_exthdr(skb, 0, &proto, &frag); } + if (proto == IPPROTO_IPIP) + skb_shinfo(skb)->gso_type |= SKB_GSO_IPXIP6; + __skb_pull(skb, skb_transport_offset(skb)); ops = rcu_dereference(inet6_offloads[proto]); if (likely(ops && ops->callbacks.gso_segment)) diff --git a/net/ipv6/ila/ila_xlat.c b/net/ipv6/ila/ila_xlat.c index a1ac0e3d8c60c52fa48d4bccb9d1cd4fadb46fa3..47447f0241df604bc657acbd9ce37008b49bacf0 100644 --- a/net/ipv6/ila/ila_xlat.c +++ b/net/ipv6/ila/ila_xlat.c @@ -610,7 +610,11 @@ int ila_xlat_init_net(struct net *net) if (err) return err; - rhashtable_init(&ilan->xlat.rhash_table, &rht_params); + err = rhashtable_init(&ilan->xlat.rhash_table, &rht_params); + if (err) { + free_bucket_spinlocks(ilan->xlat.locks); + return err; + } return 0; } diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c index 67c9114835c84864a3353c6f1c16853ea62a21c5..40203255ed88b90e235e0ed4a2b1a6d7d01a8681 100644 --- a/net/ipv6/inet6_hashtables.c +++ b/net/ipv6/inet6_hashtables.c @@ -308,7 +308,7 @@ static int __inet6_check_established(struct inet_timewait_death_row *death_row, return -EADDRNOTAVAIL; } -static u32 inet6_sk_port_offset(const struct sock *sk) +static u64 inet6_sk_port_offset(const struct sock *sk) { const struct inet_sock *inet = inet_sk(sk); @@ -320,7 +320,7 @@ static u32 inet6_sk_port_offset(const struct sock *sk) int inet6_hash_connect(struct inet_timewait_death_row *death_row, struct sock *sk) { - u32 port_offset = 0; + u64 port_offset = 0; if (!inet_sk(sk)->inet_num) port_offset = inet6_sk_port_offset(sk); @@ -333,11 +333,8 @@ int inet6_hash(struct sock *sk) { int err = 0; - if (sk->sk_state != TCP_CLOSE) { - local_bh_disable(); + if (sk->sk_state != TCP_CLOSE) err = __inet_hash(sk, NULL); - local_bh_enable(); - } return err; } diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index de8b80c8977c32e1d0a27014578b82ec869bf3d8..3a6754699ad046f9e2dfb3baa31bec95dc508b09 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c @@ -110,7 +110,7 @@ void fib6_update_sernum(struct net *net, struct fib6_info *f6i) fn = rcu_dereference_protected(f6i->fib6_node, lockdep_is_held(&f6i->fib6_table->tb6_lock)); if (fn) - fn->fn_sernum = fib6_new_sernum(net); + WRITE_ONCE(fn->fn_sernum, fib6_new_sernum(net)); } /* @@ -588,12 +588,13 @@ static int fib6_dump_table(struct fib6_table *table, struct sk_buff *skb, spin_unlock_bh(&table->tb6_lock); if (res > 0) { cb->args[4] = 1; - cb->args[5] = w->root->fn_sernum; + cb->args[5] = READ_ONCE(w->root->fn_sernum); } } else { - if (cb->args[5] != w->root->fn_sernum) { + int sernum = READ_ONCE(w->root->fn_sernum); + if (cb->args[5] != sernum) { /* Begin at the root if the tree changed */ - cb->args[5] = w->root->fn_sernum; + cb->args[5] = sernum; w->state = FWS_INIT; w->node = w->root; w->skip = w->count; @@ -1343,7 +1344,7 @@ static void __fib6_update_sernum_upto_root(struct fib6_info *rt, /* paired with smp_rmb() in rt6_get_cookie_safe() */ smp_wmb(); while (fn) { - fn->fn_sernum = sernum; + WRITE_ONCE(fn->fn_sernum, sernum); fn = rcu_dereference_protected(fn->parent, lockdep_is_held(&rt->fib6_table->tb6_lock)); } @@ -2172,8 +2173,8 @@ static int fib6_clean_node(struct fib6_walker *w) }; if (c->sernum != FIB6_NO_SERNUM_CHANGE && - w->node->fn_sernum != c->sernum) - w->node->fn_sernum = c->sernum; + READ_ONCE(w->node->fn_sernum) != c->sernum) + WRITE_ONCE(w->node->fn_sernum, c->sernum); if (!c->func) { WARN_ON_ONCE(c->sernum == FIB6_NO_SERNUM_CHANGE); @@ -2537,7 +2538,7 @@ static void ipv6_route_seq_setup_walk(struct ipv6_route_iter *iter, iter->w.state = FWS_INIT; iter->w.node = iter->w.root; iter->w.args = iter; - iter->sernum = iter->w.root->fn_sernum; + iter->sernum = READ_ONCE(iter->w.root->fn_sernum); INIT_LIST_HEAD(&iter->w.lh); fib6_walker_link(net, &iter->w); } @@ -2565,8 +2566,10 @@ static struct fib6_table *ipv6_route_seq_next_table(struct fib6_table *tbl, static void ipv6_route_check_sernum(struct ipv6_route_iter *iter) { - if (iter->sernum != iter->w.root->fn_sernum) { - iter->sernum = iter->w.root->fn_sernum; + int sernum = READ_ONCE(iter->w.root->fn_sernum); + + if (iter->sernum != sernum) { + iter->sernum = sernum; iter->w.state = FWS_INIT; iter->w.node = iter->w.root; WARN_ON(iter->w.skip); diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c index aa673a6a7e4328d03e89527ed3daaccd69c39084..ceb85c67ce3952b7142eeec29bff46a7eaf5217b 100644 --- a/net/ipv6/ip6_flowlabel.c +++ b/net/ipv6/ip6_flowlabel.c @@ -450,8 +450,10 @@ fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq, err = -EINVAL; goto done; } - if (fl_shared_exclusive(fl) || fl->opt) + if (fl_shared_exclusive(fl) || fl->opt) { + WRITE_ONCE(sock_net(sk)->ipv6.flowlabel_has_excl, 1); static_branch_deferred_inc(&ipv6_flowlabel_exclusive); + } return fl; done: diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index 09fa49bbf617db53eb04fd9d739276fc3a14c53d..9a0263f2523237058ec316d4cf007a9917460681 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c @@ -755,6 +755,7 @@ static netdev_tx_t __gre6_xmit(struct sk_buff *skb, fl6->daddr = key->u.ipv6.dst; fl6->flowlabel = key->label; fl6->flowi6_uid = sock_net_uid(dev_net(dev), NULL); + fl6->fl6_gre_key = tunnel_id_to_key32(key->tun_id); dsfield = key->tos; flags = key->tun_flags & @@ -990,6 +991,7 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb, fl6.daddr = key->u.ipv6.dst; fl6.flowlabel = key->label; fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL); + fl6.fl6_gre_key = tunnel_id_to_key32(key->tun_id); dsfield = key->tos; if (!(tun_info->key.tun_flags & TUNNEL_ERSPAN_OPT)) @@ -1098,6 +1100,7 @@ static void ip6gre_tnl_link_config_common(struct ip6_tnl *t) fl6->flowi6_oif = p->link; fl6->flowlabel = 0; fl6->flowi6_proto = IPPROTO_GRE; + fl6->fl6_gre_key = t->parms.o_key; if (!(p->flags&IP6_TNL_F_USE_ORIG_TCLASS)) fl6->flowlabel |= IPV6_TCLASS_MASK & p->flowinfo; @@ -1543,7 +1546,7 @@ static void ip6gre_fb_tunnel_init(struct net_device *dev) static struct inet6_protocol ip6gre_protocol __read_mostly = { .handler = gre_rcv, .err_handler = ip6gre_err, - .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, + .flags = INET6_PROTO_FINAL, }; static void ip6gre_destroy_tunnels(struct net *net, struct list_head *head) diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c index 06d60662717d1672dfeb9631cc21f5d6f1c92ab3..15ea3d082534d2e6616e3eaecc6623f2b453c12a 100644 --- a/net/ipv6/ip6_input.c +++ b/net/ipv6/ip6_input.c @@ -509,7 +509,7 @@ int ip6_mc_input(struct sk_buff *skb) /* * IPv6 multicast router mode is now supported ;) */ - if (dev_net(skb->dev)->ipv6.devconf_all->mc_forwarding && + if (atomic_read(&dev_net(skb->dev)->ipv6.devconf_all->mc_forwarding) && !(ipv6_addr_type(&hdr->daddr) & (IPV6_ADDR_LOOPBACK|IPV6_ADDR_LINKLOCAL)) && likely(!(IP6CB(skb)->flags & IP6SKB_FORWARDED))) { diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c index a80f90bf3ae7dc1aec904fd93b3d8e8c87a926e4..15c8eef1ef443854e207bfaa238cb7aa321fedb9 100644 --- a/net/ipv6/ip6_offload.c +++ b/net/ipv6/ip6_offload.c @@ -113,6 +113,8 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, if (likely(ops && ops->callbacks.gso_segment)) { skb_reset_transport_header(skb); segs = ops->callbacks.gso_segment(skb, features); + if (!segs) + skb->network_header = skb_mac_header(skb) + nhoff - skb->head; } if (IS_ERR_OR_NULL(segs)) diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 54cabf1c2ae1500ff9c03a7c2c230e07dcf409c7..05e19e5d65140276f2060cf1851845b19fb4d6f3 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -508,7 +508,7 @@ int ip6_forward(struct sk_buff *skb) goto drop; if (!net->ipv6.devconf_all->disable_policy && - !idev->cnf.disable_policy && + (!idev || !idev->cnf.disable_policy) && !xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) { __IP6_INC_STATS(net, idev, IPSTATS_MIB_INDISCARDS); goto drop; @@ -1432,8 +1432,6 @@ static int ip6_setup_cork(struct sock *sk, struct inet_cork_full *cork, if (np->frag_size) mtu = np->frag_size; } - if (mtu < IPV6_MIN_MTU) - return -EINVAL; cork->base.fragsize = mtu; cork->base.gso_size = ipc6->gso_size; cork->base.tx_flags = 0; @@ -1495,8 +1493,6 @@ static int __ip6_append_data(struct sock *sk, fragheaderlen = sizeof(struct ipv6hdr) + rt->rt6i_nfheader_len + (opt ? opt->opt_nflen : 0); - maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen - - sizeof(struct frag_hdr); headersize = sizeof(struct ipv6hdr) + (opt ? opt->opt_flen + opt->opt_nflen : 0) + @@ -1504,6 +1500,13 @@ static int __ip6_append_data(struct sock *sk, sizeof(struct frag_hdr) : 0) + rt->rt6i_nfheader_len; + if (mtu <= fragheaderlen || + ((mtu - fragheaderlen) & ~7) + fragheaderlen <= sizeof(struct frag_hdr)) + goto emsgsize; + + maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen - + sizeof(struct frag_hdr); + /* as per RFC 7112 section 5, the entire IPv6 Header Chain must fit * the first fragment */ diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 08441f06afd482066fa1381f8f0d1fb436a1d73d..3a2741569b84764797021a975345db6a687e1fc7 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -1066,14 +1066,14 @@ int ip6_tnl_xmit_ctl(struct ip6_tnl *t, if (unlikely(!ipv6_chk_addr_and_flags(net, laddr, ldev, false, 0, IFA_F_TENTATIVE))) - pr_warn("%s xmit: Local address not yet configured!\n", - p->name); + pr_warn_ratelimited("%s xmit: Local address not yet configured!\n", + p->name); else if (!(p->flags & IP6_TNL_F_ALLOW_LOCAL_REMOTE) && !ipv6_addr_is_multicast(raddr) && unlikely(ipv6_chk_addr_and_flags(net, raddr, ldev, true, 0, IFA_F_TENTATIVE))) - pr_warn("%s xmit: Routing loop! Remote address found on this node!\n", - p->name); + pr_warn_ratelimited("%s xmit: Routing loop! Remote address found on this node!\n", + p->name); else ret = 1; rcu_read_unlock(); diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index 06b0d2c329b94b25f2945b220347c5d9defdc72e..5f0ac47acc74ba014589d2a44de2f38fd145d636 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c @@ -248,7 +248,9 @@ static int __net_init ip6mr_rules_init(struct net *net) return 0; err2: + rtnl_lock(); ip6mr_free_table(mrt); + rtnl_unlock(); err1: fib_rules_unregister(ops); return err; @@ -738,7 +740,7 @@ static int mif6_delete(struct mr_table *mrt, int vifi, int notify, in6_dev = __in6_dev_get(dev); if (in6_dev) { - in6_dev->cnf.mc_forwarding--; + atomic_dec(&in6_dev->cnf.mc_forwarding); inet6_netconf_notify_devconf(dev_net(dev), RTM_NEWNETCONF, NETCONFA_MC_FORWARDING, dev->ifindex, &in6_dev->cnf); @@ -906,7 +908,7 @@ static int mif6_add(struct net *net, struct mr_table *mrt, in6_dev = __in6_dev_get(dev); if (in6_dev) { - in6_dev->cnf.mc_forwarding++; + atomic_inc(&in6_dev->cnf.mc_forwarding); inet6_netconf_notify_devconf(dev_net(dev), RTM_NEWNETCONF, NETCONFA_MC_FORWARDING, dev->ifindex, &in6_dev->cnf); @@ -1556,7 +1558,7 @@ static int ip6mr_sk_init(struct mr_table *mrt, struct sock *sk) } else { rcu_assign_pointer(mrt->mroute_sk, sk); sock_set_flag(sk, SOCK_RCU_FREE); - net->ipv6.devconf_all->mc_forwarding++; + atomic_inc(&net->ipv6.devconf_all->mc_forwarding); } write_unlock_bh(&mrt_lock); @@ -1589,7 +1591,7 @@ int ip6mr_sk_done(struct sock *sk) * so the RCU grace period before sk freeing * is guaranteed by sk_destruct() */ - net->ipv6.devconf_all->mc_forwarding--; + atomic_dec(&net->ipv6.devconf_all->mc_forwarding); write_unlock_bh(&mrt_lock); inet6_netconf_notify_devconf(net, RTM_NEWNETCONF, NETCONFA_MC_FORWARDING, diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index 43a894bf9a1be9ba2c31b5f8d3446a6a8dcf6387..1ae33a882b9a34bcd0aed97311e29d1c855c6608 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c @@ -473,7 +473,8 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, sock_prot_inuse_add(net, sk->sk_prot, -1); sock_prot_inuse_add(net, &tcp_prot, 1); local_bh_enable(); - sk->sk_prot = &tcp_prot; + /* Paired with READ_ONCE(sk->sk_prot) in net/ipv6/af_inet6.c */ + WRITE_ONCE(sk->sk_prot, &tcp_prot); icsk->icsk_af_ops = &ipv4_specific; sk->sk_socket->ops = &inet_stream_ops; sk->sk_family = PF_INET; @@ -487,7 +488,8 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, sock_prot_inuse_add(net, sk->sk_prot, -1); sock_prot_inuse_add(net, prot, 1); local_bh_enable(); - sk->sk_prot = prot; + /* Paired with READ_ONCE(sk->sk_prot) in net/ipv6/af_inet6.c */ + WRITE_ONCE(sk->sk_prot, prot); sk->sk_socket->ops = &inet_dgram_ops; sk->sk_family = PF_INET; } diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 654bf4ca61260ba6650ac714e0a1d729571556d6..a44ad9637e8acbc0f76033a678fd19b8bf745d33 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -156,14 +156,10 @@ void rt6_uncached_list_del(struct rt6_info *rt) } } -static void rt6_uncached_list_flush_dev(struct net *net, struct net_device *dev) +static void rt6_uncached_list_flush_dev(struct net_device *dev) { - struct net_device *loopback_dev = net->loopback_dev; int cpu; - if (dev == loopback_dev) - return; - for_each_possible_cpu(cpu) { struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu); struct rt6_info *rt; @@ -174,7 +170,7 @@ static void rt6_uncached_list_flush_dev(struct net *net, struct net_device *dev) struct net_device *rt_dev = rt->dst.dev; if (rt_idev->dev == dev) { - rt->rt6i_idev = in6_dev_get(loopback_dev); + rt->rt6i_idev = in6_dev_get(blackhole_netdev); in6_dev_put(rt_idev); } @@ -372,13 +368,12 @@ static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev, { struct rt6_info *rt = (struct rt6_info *)dst; struct inet6_dev *idev = rt->rt6i_idev; - struct net_device *loopback_dev = - dev_net(dev)->loopback_dev; - if (idev && idev->dev != loopback_dev) { - struct inet6_dev *loopback_idev = in6_dev_get(loopback_dev); - if (loopback_idev) { - rt->rt6i_idev = loopback_idev; + if (idev && idev->dev != blackhole_netdev) { + struct inet6_dev *blackhole_idev = in6_dev_get(blackhole_netdev); + + if (blackhole_idev) { + rt->rt6i_idev = blackhole_idev; in6_dev_put(idev); } } @@ -2674,7 +2669,7 @@ static void ip6_link_failure(struct sk_buff *skb) if (from) { fn = rcu_dereference(from->fib6_node); if (fn && (rt->rt6i_flags & RTF_DEFAULT)) - fn->fn_sernum = -1; + WRITE_ONCE(fn->fn_sernum, -1); } } rcu_read_unlock(); @@ -4398,7 +4393,7 @@ static int ip6_pkt_drop(struct sk_buff *skb, u8 code, int ipstats_mib_noroutes) struct inet6_dev *idev; int type; - if (netif_is_l3_master(skb->dev) && + if (netif_is_l3_master(skb->dev) || dst->dev == net->loopback_dev) idev = __in6_dev_get_safely(dev_get_by_index_rcu(net, IP6CB(skb)->iif)); else @@ -4799,7 +4794,7 @@ void rt6_sync_down_dev(struct net_device *dev, unsigned long event) void rt6_disable_ip(struct net_device *dev, unsigned long event) { rt6_sync_down_dev(dev, event); - rt6_uncached_list_flush_dev(dev_net(dev), dev); + rt6_uncached_list_flush_dev(dev); neigh_ifdown(&nd_tbl, dev); } diff --git a/net/ipv6/seg6.c b/net/ipv6/seg6.c index d2f8138e5a73a5391963659be446f58f50d9d78e..bff6bc75cfb75b57c6e972cdb418d40f569aea51 100644 --- a/net/ipv6/seg6.c +++ b/net/ipv6/seg6.c @@ -377,7 +377,11 @@ static int __net_init seg6_net_init(struct net *net) net->ipv6.seg6_data = sdata; #ifdef CONFIG_IPV6_SEG6_HMAC - seg6_hmac_net_init(net); + if (seg6_hmac_net_init(net)) { + kfree(rcu_dereference_raw(sdata->tun_src)); + kfree(sdata); + return -ENOMEM; + }; #endif return 0; @@ -391,7 +395,7 @@ static void __net_exit seg6_net_exit(struct net *net) seg6_hmac_net_exit(net); #endif - kfree(sdata->tun_src); + kfree(rcu_dereference_raw(sdata->tun_src)); kfree(sdata); } diff --git a/net/ipv6/seg6_hmac.c b/net/ipv6/seg6_hmac.c index 687d95dce08522bebf6d0c3b0473cb8eb3f2a17d..29bc4e7c3046e2cc2a395ccf2cb452428df72790 100644 --- a/net/ipv6/seg6_hmac.c +++ b/net/ipv6/seg6_hmac.c @@ -405,9 +405,7 @@ int __net_init seg6_hmac_net_init(struct net *net) { struct seg6_pernet_data *sdata = seg6_pernet(net); - rhashtable_init(&sdata->hmac_infos, &rht_params); - - return 0; + return rhashtable_init(&sdata->hmac_infos, &rht_params); } EXPORT_SYMBOL(seg6_hmac_net_init); diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index df33145b876c69eb0c16d2be0ec6441c71a30e8e..b87b04526e651bac97a18bb26087928b7d1f9619 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -107,7 +107,7 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb) if (dst && dst_hold_safe(dst)) { const struct rt6_info *rt = (const struct rt6_info *)dst; - sk->sk_rx_dst = dst; + rcu_assign_pointer(sk->sk_rx_dst, dst); inet_sk(sk)->rx_dst_ifindex = skb->skb_iif; tcp_inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt); } @@ -1482,15 +1482,18 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) opt_skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC)); if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ - struct dst_entry *dst = sk->sk_rx_dst; + struct dst_entry *dst; + + dst = rcu_dereference_protected(sk->sk_rx_dst, + lockdep_sock_is_held(sk)); sock_rps_save_rxhash(sk, skb); sk_mark_napi_id(sk, skb); if (dst) { if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif || dst->ops->check(dst, np->rx_dst_cookie) == NULL) { + RCU_INIT_POINTER(sk->sk_rx_dst, NULL); dst_release(dst); - sk->sk_rx_dst = NULL; } } @@ -1842,7 +1845,7 @@ INDIRECT_CALLABLE_SCOPE void tcp_v6_early_demux(struct sk_buff *skb) skb->sk = sk; skb->destructor = sock_edemux; if (sk_fullsock(sk)) { - struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst); + struct dst_entry *dst = rcu_dereference(sk->sk_rx_dst); if (dst) dst = dst_check(dst, tcp_inet6_sk(sk)->rx_dst_cookie); diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 503706397839ce460951e523bd6a69e28c6dd153..e378ebee4a4dad5053282ec66d23e74f35373dbb 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -940,7 +940,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, struct dst_entry *dst = skb_dst(skb); int ret; - if (unlikely(sk->sk_rx_dst != dst)) + if (unlikely(rcu_dereference(sk->sk_rx_dst) != dst)) udp6_sk_rx_dst_set(sk, dst); if (!uh->check && !udp_sk(sk)->no_check6_rx) { @@ -1054,7 +1054,7 @@ INDIRECT_CALLABLE_SCOPE void udp_v6_early_demux(struct sk_buff *skb) skb->sk = sk; skb->destructor = sock_efree; - dst = READ_ONCE(sk->sk_rx_dst); + dst = rcu_dereference(sk->sk_rx_dst); if (dst) dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie); @@ -1609,8 +1609,10 @@ void udpv6_destroy_sock(struct sock *sk) if (encap_destroy) encap_destroy(sk); } - if (up->encap_enabled) + if (up->encap_enabled) { static_branch_dec(&udpv6_encap_needed_key); + udp_encap_disable(); + } } inet6_destroy_sock(sk); diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c index 6abb45a671994128d618e82f192ec5d943e29185..ee349c2438782315b129ace5511b320af6e7b828 100644 --- a/net/ipv6/xfrm6_output.c +++ b/net/ipv6/xfrm6_output.c @@ -52,6 +52,19 @@ static int __xfrm6_output_finish(struct net *net, struct sock *sk, struct sk_buf return xfrm_output(sk, skb); } +static int xfrm6_noneed_fragment(struct sk_buff *skb) +{ + struct frag_hdr *fh; + u8 prevhdr = ipv6_hdr(skb)->nexthdr; + + if (prevhdr != NEXTHDR_FRAGMENT) + return 0; + fh = (struct frag_hdr *)(skb->data + sizeof(struct ipv6hdr)); + if (fh->nexthdr == NEXTHDR_ESP || fh->nexthdr == NEXTHDR_AUTH) + return 1; + return 0; +} + static int __xfrm6_output(struct net *net, struct sock *sk, struct sk_buff *skb) { struct dst_entry *dst = skb_dst(skb); @@ -80,6 +93,9 @@ static int __xfrm6_output(struct net *net, struct sock *sk, struct sk_buff *skb) xfrm6_local_rxpmtu(skb, mtu); kfree_skb(skb); return -EMSGSIZE; + } else if (toobig && xfrm6_noneed_fragment(skb)) { + skb->ignore_df = 1; + goto skip_frag; } else if (!skb->ignore_df && toobig && skb->sk) { xfrm_local_error(skb, mtu); kfree_skb(skb); diff --git a/net/key/af_key.c b/net/key/af_key.c index ef9b4ac03e7b74e5ddad0ca2d3d337583fc25fee..bd9b5c573b5a4dd328d44af315a9efd5ab51bcc2 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c @@ -1703,7 +1703,7 @@ static int pfkey_register(struct sock *sk, struct sk_buff *skb, const struct sad xfrm_probe_algs(); - supp_skb = compose_sadb_supported(hdr, GFP_KERNEL); + supp_skb = compose_sadb_supported(hdr, GFP_KERNEL | __GFP_ZERO); if (!supp_skb) { if (hdr->sadb_msg_satype != SADB_SATYPE_UNSPEC) pfk->registered &= ~(1<sadb_msg_satype); @@ -2627,7 +2627,7 @@ static int pfkey_migrate(struct sock *sk, struct sk_buff *skb, } return xfrm_migrate(&sel, dir, XFRM_POLICY_TYPE_MAIN, m, i, - kma ? &k : NULL, net, NULL); + kma ? &k : NULL, net, NULL, 0); out: return err; diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c index ac5cadd02cfa8f6ee937ee91252a71a750d447ee..99a37c411323edcf59733486a07a8a1e1ce6b3a6 100644 --- a/net/llc/af_llc.c +++ b/net/llc/af_llc.c @@ -276,6 +276,7 @@ static int llc_ui_autobind(struct socket *sock, struct sockaddr_llc *addr) { struct sock *sk = sock->sk; struct llc_sock *llc = llc_sk(sk); + struct net_device *dev = NULL; struct llc_sap *sap; int rc = -EINVAL; @@ -287,14 +288,14 @@ static int llc_ui_autobind(struct socket *sock, struct sockaddr_llc *addr) goto out; rc = -ENODEV; if (sk->sk_bound_dev_if) { - llc->dev = dev_get_by_index(&init_net, sk->sk_bound_dev_if); - if (llc->dev && addr->sllc_arphrd != llc->dev->type) { - dev_put(llc->dev); - llc->dev = NULL; + dev = dev_get_by_index(&init_net, sk->sk_bound_dev_if); + if (dev && addr->sllc_arphrd != dev->type) { + dev_put(dev); + dev = NULL; } } else - llc->dev = dev_getfirstbyhwtype(&init_net, addr->sllc_arphrd); - if (!llc->dev) + dev = dev_getfirstbyhwtype(&init_net, addr->sllc_arphrd); + if (!dev) goto out; rc = -EUSERS; llc->laddr.lsap = llc_ui_autoport(); @@ -304,6 +305,11 @@ static int llc_ui_autobind(struct socket *sock, struct sockaddr_llc *addr) sap = llc_sap_open(llc->laddr.lsap, NULL); if (!sap) goto out; + + /* Note: We do not expect errors from this point. */ + llc->dev = dev; + dev = NULL; + memcpy(llc->laddr.mac, llc->dev->dev_addr, IFHWADDRLEN); memcpy(&llc->addr, addr, sizeof(llc->addr)); /* assign new connection to its SAP */ @@ -311,6 +317,7 @@ static int llc_ui_autobind(struct socket *sock, struct sockaddr_llc *addr) sock_reset_flag(sk, SOCK_ZAPPED); rc = 0; out: + dev_put(dev); return rc; } @@ -333,6 +340,7 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen) struct sockaddr_llc *addr = (struct sockaddr_llc *)uaddr; struct sock *sk = sock->sk; struct llc_sock *llc = llc_sk(sk); + struct net_device *dev = NULL; struct llc_sap *sap; int rc = -EINVAL; @@ -348,25 +356,26 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen) rc = -ENODEV; rcu_read_lock(); if (sk->sk_bound_dev_if) { - llc->dev = dev_get_by_index_rcu(&init_net, sk->sk_bound_dev_if); - if (llc->dev) { + dev = dev_get_by_index_rcu(&init_net, sk->sk_bound_dev_if); + if (dev) { if (is_zero_ether_addr(addr->sllc_mac)) - memcpy(addr->sllc_mac, llc->dev->dev_addr, + memcpy(addr->sllc_mac, dev->dev_addr, IFHWADDRLEN); - if (addr->sllc_arphrd != llc->dev->type || + if (addr->sllc_arphrd != dev->type || !ether_addr_equal(addr->sllc_mac, - llc->dev->dev_addr)) { + dev->dev_addr)) { rc = -EINVAL; - llc->dev = NULL; + dev = NULL; } } - } else - llc->dev = dev_getbyhwaddr_rcu(&init_net, addr->sllc_arphrd, + } else { + dev = dev_getbyhwaddr_rcu(&init_net, addr->sllc_arphrd, addr->sllc_mac); - if (llc->dev) - dev_hold(llc->dev); + } + if (dev) + dev_hold(dev); rcu_read_unlock(); - if (!llc->dev) + if (!dev) goto out; if (!addr->sllc_sap) { rc = -EUSERS; @@ -399,6 +408,11 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen) goto out_put; } } + + /* Note: We do not expect errors from this point. */ + llc->dev = dev; + dev = NULL; + llc->laddr.lsap = addr->sllc_sap; memcpy(llc->laddr.mac, addr->sllc_mac, IFHWADDRLEN); memcpy(&llc->addr, addr, sizeof(llc->addr)); @@ -409,6 +423,7 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen) out_put: llc_sap_put(sap); out: + dev_put(dev); release_sock(sk); return rc; } diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c index 190f300d8923c8392df58aba7e8e66eccc1e9038..4b4ab1961068fcbcc170d5eb960eb03a344a9058 100644 --- a/net/mac80211/agg-tx.c +++ b/net/mac80211/agg-tx.c @@ -9,7 +9,7 @@ * Copyright 2007, Michael Wu * Copyright 2007-2010, Intel Corporation * Copyright(c) 2015-2017 Intel Deutschland GmbH - * Copyright (C) 2018 - 2021 Intel Corporation + * Copyright (C) 2018 - 2022 Intel Corporation */ #include @@ -626,6 +626,14 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid, return -EINVAL; } + if (test_sta_flag(sta, WLAN_STA_MFP) && + !test_sta_flag(sta, WLAN_STA_AUTHORIZED)) { + ht_dbg(sdata, + "MFP STA not authorized - deny BA session request %pM tid %d\n", + sta->sta.addr, tid); + return -EINVAL; + } + /* * 802.11n-2009 11.5.1.1: If the initiating STA is an HT STA, is a * member of an IBSS, and has no other existing Block Ack agreement diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index d46ed4cbe771757b48b15090d33a17a8d4b83096..8010967a68741447924990329b686f7e853a6ec7 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -2076,14 +2076,12 @@ static int copy_mesh_setup(struct ieee80211_if_mesh *ifmsh, const struct mesh_setup *setup) { u8 *new_ie; - const u8 *old_ie; struct ieee80211_sub_if_data *sdata = container_of(ifmsh, struct ieee80211_sub_if_data, u.mesh); int i; /* allocate information elements */ new_ie = NULL; - old_ie = ifmsh->ie; if (setup->ie_len) { new_ie = kmemdup(setup->ie, setup->ie_len, @@ -2093,7 +2091,6 @@ static int copy_mesh_setup(struct ieee80211_if_mesh *ifmsh, } ifmsh->ie_len = setup->ie_len; ifmsh->ie = new_ie; - kfree(old_ie); /* now copy the rest of the setup parameters */ ifmsh->mesh_id_len = setup->mesh_id_len; diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 7f2be08b72a56a80be12d822a75c66420af38591..fe8f586886b414e22d4cebce25d13d20c69b3d6d 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -374,7 +374,7 @@ struct ieee80211_mgd_auth_data { u8 key[WLAN_KEY_LEN_WEP104]; u8 key_len, key_idx; - bool done; + bool done, waiting; bool peer_confirmed; bool timeout_started; diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 778bf262418b546dad1f797a3a8ee77f539db319..0dba353d3f8fe16c786f7cc383778e6a0b8a60f7 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -37,6 +37,7 @@ #define IEEE80211_AUTH_TIMEOUT_SAE (HZ * 2) #define IEEE80211_AUTH_MAX_TRIES 3 #define IEEE80211_AUTH_WAIT_ASSOC (HZ * 5) +#define IEEE80211_AUTH_WAIT_SAE_RETRY (HZ * 2) #define IEEE80211_ASSOC_TIMEOUT (HZ / 5) #define IEEE80211_ASSOC_TIMEOUT_LONG (HZ / 2) #define IEEE80211_ASSOC_TIMEOUT_SHORT (HZ / 10) @@ -2999,8 +3000,15 @@ static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata, (status_code == WLAN_STATUS_ANTI_CLOG_REQUIRED || (auth_transaction == 1 && (status_code == WLAN_STATUS_SAE_HASH_TO_ELEMENT || - status_code == WLAN_STATUS_SAE_PK)))) + status_code == WLAN_STATUS_SAE_PK)))) { + /* waiting for userspace now */ + ifmgd->auth_data->waiting = true; + ifmgd->auth_data->timeout = + jiffies + IEEE80211_AUTH_WAIT_SAE_RETRY; + ifmgd->auth_data->timeout_started = true; + run_again(sdata, ifmgd->auth_data->timeout); return; + } sdata_info(sdata, "%pM denied authentication (status %d)\n", mgmt->sa, status_code); @@ -4526,10 +4534,10 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata) if (ifmgd->auth_data && ifmgd->auth_data->timeout_started && time_after(jiffies, ifmgd->auth_data->timeout)) { - if (ifmgd->auth_data->done) { + if (ifmgd->auth_data->done || ifmgd->auth_data->waiting) { /* - * ok ... we waited for assoc but userspace didn't, - * so let's just kill the auth data + * ok ... we waited for assoc or continuation but + * userspace didn't do it, so kill the auth data */ ieee80211_destroy_auth_data(sdata, false); } else if (ieee80211_auth(sdata)) { diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 6a24431b9009502c54b51e5a239435fcdae37883..1e7614abd947de8fab8b3bfc1c11cabd8bbd10e3 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -2910,13 +2910,13 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx) ether_addr_equal(sdata->vif.addr, hdr->addr3)) return RX_CONTINUE; - ac = ieee80211_select_queue_80211(sdata, skb, hdr); + ac = ieee802_1d_to_ac[skb->priority]; q = sdata->vif.hw_queue[ac]; if (ieee80211_queue_stopped(&local->hw, q)) { IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_congestion); return RX_DROP_MONITOR; } - skb_set_queue_mapping(skb, q); + skb_set_queue_mapping(skb, ac); if (!--mesh_hdr->ttl) { if (!is_multicast_ether_addr(hdr->addr1)) @@ -4800,7 +4800,7 @@ void ieee80211_rx_list(struct ieee80211_hw *hw, struct ieee80211_sta *pubsta, goto drop; break; case RX_ENC_VHT: - if (WARN_ONCE(status->rate_idx > 9 || + if (WARN_ONCE(status->rate_idx > 11 || !status->nss || status->nss > 8, "Rate marked as a VHT rate but data is invalid: MCS: %d, NSS: %d\n", diff --git a/net/netfilter/core.c b/net/netfilter/core.c index 63d032191e6269acff9e48b51b6c0e09a411ceb1..60332fdb6dd4429fb1e22a645476b3954d9f04e3 100644 --- a/net/netfilter/core.c +++ b/net/netfilter/core.c @@ -406,14 +406,15 @@ static int __nf_register_net_hook(struct net *net, int pf, p = nf_entry_dereference(*pp); new_hooks = nf_hook_entries_grow(p, reg); - if (!IS_ERR(new_hooks)) + if (!IS_ERR(new_hooks)) { + hooks_validate(new_hooks); rcu_assign_pointer(*pp, new_hooks); + } mutex_unlock(&nf_hook_mutex); if (IS_ERR(new_hooks)) return PTR_ERR(new_hooks); - hooks_validate(new_hooks); #ifdef CONFIG_NETFILTER_INGRESS if (nf_ingress_hook(reg, pf)) net_inc_ingress_queue(); diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index f4cf26b606f92ba9fc01ce8b72660707c326188c..8369af0c50eab3a1773de0d385d2d72b1aa72302 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -1832,15 +1832,17 @@ nf_conntrack_in(struct sk_buff *skb, const struct nf_hook_state *state) pr_debug("nf_conntrack_in: Can't track with proto module\n"); nf_conntrack_put(&ct->ct_general); skb->_nfct = 0; - NF_CT_STAT_INC_ATOMIC(state->net, invalid); - if (ret == -NF_DROP) - NF_CT_STAT_INC_ATOMIC(state->net, drop); /* Special case: TCP tracker reports an attempt to reopen a * closed/aborted connection. We have to go back and create a * fresh conntrack. */ if (ret == -NF_REPEAT) goto repeat; + + NF_CT_STAT_INC_ATOMIC(state->net, invalid); + if (ret == -NF_DROP) + NF_CT_STAT_INC_ATOMIC(state->net, drop); + ret = -ret; goto out; } diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index c6bcc28ae33876601f735237e8c515de490ebb0c..eeeaa34b3e7b5d5cecaf24554fa8e51641a89abe 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -2283,7 +2283,8 @@ ctnetlink_create_conntrack(struct net *net, if (helper->from_nlattr) helper->from_nlattr(helpinfo, ct); - /* not in hash table yet so not strictly necessary */ + /* disable helper auto-assignment for this entry */ + ct->status |= IPS_HELPER; RCU_INIT_POINTER(help->helper, helper); } } else { diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c index 71892822bbf5de2a55230c5aea4a9ac009f6c1da..dd1fff72c736d163876ab915f83b230e549995e4 100644 --- a/net/netfilter/nf_conntrack_proto.c +++ b/net/netfilter/nf_conntrack_proto.c @@ -292,6 +292,67 @@ getorigdst(struct sock *sk, int optval, void __user *user, int *len) return -ENOENT; } +static int +bpf_getorigdst_impl(struct sock *sk, int optval, void *user, int *len, int dir) +{ + const struct inet_sock *inet = inet_sk(sk); + const struct nf_conntrack_tuple_hash *h; + struct nf_conntrack_tuple tuple; + + memset(&tuple, 0, sizeof(tuple)); + + tuple.src.u3.ip = inet->inet_rcv_saddr; + tuple.src.u.tcp.port = inet->inet_sport; + tuple.dst.u3.ip = inet->inet_daddr; + tuple.dst.u.tcp.port = inet->inet_dport; + tuple.src.l3num = PF_INET; + tuple.dst.protonum = sk->sk_protocol; + + /* We only do TCP and SCTP at the moment: is there a better way? */ + if (tuple.dst.protonum != IPPROTO_TCP && + tuple.dst.protonum != IPPROTO_SCTP) { + pr_debug("SO_ORIGINAL_DST: Not a TCP/SCTP socket\n"); + return -ENOPROTOOPT; + } + + if ((unsigned int)*len < sizeof(struct sockaddr_in)) { + pr_debug("SO_ORIGINAL_DST: len %d not %zu\n", + *len, sizeof(struct sockaddr_in)); + return -EINVAL; + } + + h = nf_conntrack_find_get(sock_net(sk), &nf_ct_zone_dflt, &tuple); + if (h) { + struct sockaddr_in sin; + struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h); + + sin.sin_family = AF_INET; + if (dir == IP_CT_DIR_REPLY) { + sin.sin_port = ct->tuplehash[IP_CT_DIR_REPLY] + .tuple.src.u.tcp.port; + sin.sin_addr.s_addr = ct->tuplehash[IP_CT_DIR_REPLY] + .tuple.src.u3.ip; + } else { + sin.sin_port = ct->tuplehash[IP_CT_DIR_ORIGINAL] + .tuple.dst.u.tcp.port; + sin.sin_addr.s_addr = ct->tuplehash[IP_CT_DIR_ORIGINAL] + .tuple.dst.u3.ip; + } + memset(sin.sin_zero, 0, sizeof(sin.sin_zero)); + + pr_debug("SO_ORIGINAL_DST: %pI4 %u\n", + &sin.sin_addr.s_addr, ntohs(sin.sin_port)); + nf_ct_put(ct); + + memcpy(user, &sin, sizeof(sin)); + return 0; + } + pr_debug("SO_ORIGINAL_DST: Can't find %pI4/%u-%pI4/%u.\n", + &tuple.src.u3.ip, ntohs(tuple.src.u.tcp.port), + &tuple.dst.u3.ip, ntohs(tuple.dst.u.tcp.port)); + return -ENOENT; +} + static struct nf_sockopt_ops so_getorigdst = { .pf = PF_INET, .get_optmin = SO_ORIGINAL_DST, @@ -656,6 +717,8 @@ int nf_conntrack_proto_init(void) goto cleanup_sockopt; #endif + bpf_getorigdst_opt = bpf_getorigdst_impl; + return ret; #if IS_ENABLED(CONFIG_IPV6) @@ -667,6 +730,8 @@ int nf_conntrack_proto_init(void) void nf_conntrack_proto_fini(void) { + bpf_getorigdst_opt = NULL; + nf_unregister_sockopt(&so_getorigdst); #if IS_ENABLED(CONFIG_IPV6) nf_unregister_sockopt(&so_getorigdst6); diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c index 810cca24b399019862f185a65d064aa56aa8a21c..7626f3e1c70a774968676d1111ee6376284afc17 100644 --- a/net/netfilter/nf_conntrack_proto_sctp.c +++ b/net/netfilter/nf_conntrack_proto_sctp.c @@ -489,6 +489,15 @@ int nf_conntrack_sctp_packet(struct nf_conn *ct, pr_debug("Setting vtag %x for dir %d\n", ih->init_tag, !dir); ct->proto.sctp.vtag[!dir] = ih->init_tag; + + /* don't renew timeout on init retransmit so + * port reuse by client or NAT middlebox cannot + * keep entry alive indefinitely (incl. nat info). + */ + if (new_state == SCTP_CONNTRACK_CLOSED && + old_state == SCTP_CONNTRACK_CLOSED && + nf_ct_is_confirmed(ct)) + ignore = true; } ct->proto.sctp.state = new_state; diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c index c8fb2187ad4b2df200273acdbdf2163c831de09b..3f785bdfa942d7149bf65573ca17c3fe3ce2164a 100644 --- a/net/netfilter/nf_conntrack_proto_tcp.c +++ b/net/netfilter/nf_conntrack_proto_tcp.c @@ -354,8 +354,8 @@ static void tcp_options(const struct sk_buff *skb, length, buff); BUG_ON(ptr == NULL); - state->td_scale = - state->flags = 0; + state->td_scale = 0; + state->flags &= IP_CT_TCP_FLAG_BE_LIBERAL; while (length > 0) { int opcode=*ptr++; @@ -840,6 +840,16 @@ static bool nf_conntrack_tcp_established(const struct nf_conn *ct) test_bit(IPS_ASSURED_BIT, &ct->status); } +static void nf_ct_tcp_state_reset(struct ip_ct_tcp_state *state) +{ + state->td_end = 0; + state->td_maxend = 0; + state->td_maxwin = 0; + state->td_maxack = 0; + state->td_scale = 0; + state->flags &= IP_CT_TCP_FLAG_BE_LIBERAL; +} + /* Returns verdict for packet, or -1 for invalid. */ int nf_conntrack_tcp_packet(struct nf_conn *ct, struct sk_buff *skb, @@ -946,8 +956,7 @@ int nf_conntrack_tcp_packet(struct nf_conn *ct, ct->proto.tcp.last_flags &= ~IP_CT_EXP_CHALLENGE_ACK; ct->proto.tcp.seen[ct->proto.tcp.last_dir].flags = ct->proto.tcp.last_flags; - memset(&ct->proto.tcp.seen[dir], 0, - sizeof(struct ip_ct_tcp_state)); + nf_ct_tcp_state_reset(&ct->proto.tcp.seen[dir]); break; } ct->proto.tcp.last_index = index; diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c index bbd1209694b89bb1164c39ce581f54b24f61e8d4..bb8607ff94bc7a46751724d530efc58d325cdde0 100644 --- a/net/netfilter/nf_queue.c +++ b/net/netfilter/nf_queue.c @@ -46,6 +46,15 @@ void nf_unregister_queue_handler(struct net *net) } EXPORT_SYMBOL(nf_unregister_queue_handler); +static void nf_queue_sock_put(struct sock *sk) +{ +#ifdef CONFIG_INET + sock_gen_put(sk); +#else + sock_put(sk); +#endif +} + static void nf_queue_entry_release_refs(struct nf_queue_entry *entry) { struct nf_hook_state *state = &entry->state; @@ -56,7 +65,7 @@ static void nf_queue_entry_release_refs(struct nf_queue_entry *entry) if (state->out) dev_put(state->out); if (state->sk) - sock_put(state->sk); + nf_queue_sock_put(state->sk); #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) if (entry->physin) @@ -91,16 +100,17 @@ static void __nf_queue_entry_init_physdevs(struct nf_queue_entry *entry) } /* Bump dev refs so they don't vanish while packet is out */ -void nf_queue_entry_get_refs(struct nf_queue_entry *entry) +bool nf_queue_entry_get_refs(struct nf_queue_entry *entry) { struct nf_hook_state *state = &entry->state; + if (state->sk && !refcount_inc_not_zero(&state->sk->sk_refcnt)) + return false; + if (state->in) dev_hold(state->in); if (state->out) dev_hold(state->out); - if (state->sk) - sock_hold(state->sk); #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) if (entry->physin) @@ -108,6 +118,7 @@ void nf_queue_entry_get_refs(struct nf_queue_entry *entry) if (entry->physout) dev_hold(entry->physout); #endif + return true; } EXPORT_SYMBOL_GPL(nf_queue_entry_get_refs); @@ -178,6 +189,18 @@ static int __nf_queue(struct sk_buff *skb, const struct nf_hook_state *state, break; } + if (skb_sk_is_prefetched(skb)) { + struct sock *sk = skb->sk; + + if (!sk_is_refcounted(sk)) { + if (!refcount_inc_not_zero(&sk->sk_refcnt)) + return -ENOTCONN; + + /* drop refcount on skb_orphan */ + skb->destructor = sock_edemux; + } + } + entry = kmalloc(sizeof(*entry) + route_key_size, GFP_ATOMIC); if (!entry) return -ENOMEM; @@ -196,7 +219,10 @@ static int __nf_queue(struct sk_buff *skb, const struct nf_hook_state *state, __nf_queue_entry_init_physdevs(entry); - nf_queue_entry_get_refs(entry); + if (!nf_queue_entry_get_refs(entry)) { + kfree(entry); + return -ENOTCONN; + } switch (entry->state.pf) { case AF_INET: diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index b781ba97c474e3216e85bf95c4505358297ec1d3..560a93aad5b648ea2431e464981a42c4b38eaeaa 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -2679,27 +2679,31 @@ static struct nft_expr *nft_expr_init(const struct nft_ctx *ctx, err = nf_tables_expr_parse(ctx, nla, &info); if (err < 0) - goto err1; + goto err_expr_parse; + + err = -EOPNOTSUPP; + if (!(info.ops->type->flags & NFT_EXPR_STATEFUL)) + goto err_expr_stateful; err = -ENOMEM; expr = kzalloc(info.ops->size, GFP_KERNEL); if (expr == NULL) - goto err2; + goto err_expr_stateful; err = nf_tables_newexpr(ctx, &info, expr); if (err < 0) - goto err3; + goto err_expr_new; return expr; -err3: +err_expr_new: kfree(expr); -err2: +err_expr_stateful: owner = info.ops->type->owner; if (info.ops->type->release_ops) info.ops->type->release_ops(info.ops); module_put(owner); -err1: +err_expr_parse: return ERR_PTR(err); } @@ -4047,6 +4051,9 @@ static int nft_set_desc_concat_parse(const struct nlattr *attr, u32 len; int err; + if (desc->field_count >= ARRAY_SIZE(desc->field_len)) + return -E2BIG; + err = nla_parse_nested_deprecated(tb, NFTA_SET_FIELD_MAX, attr, nft_concat_policy, NULL); if (err < 0) @@ -4056,9 +4063,8 @@ static int nft_set_desc_concat_parse(const struct nlattr *attr, return -EINVAL; len = ntohl(nla_get_be32(tb[NFTA_SET_FIELD_LEN])); - - if (len * BITS_PER_BYTE / 32 > NFT_REG32_COUNT) - return -E2BIG; + if (!len || len > U8_MAX) + return -EINVAL; desc->field_len[desc->field_count++] = len; @@ -4069,7 +4075,8 @@ static int nft_set_desc_concat(struct nft_set_desc *desc, const struct nlattr *nla) { struct nlattr *attr; - int rem, err; + u32 num_regs = 0; + int rem, err, i; nla_for_each_nested(attr, nla, rem) { if (nla_type(attr) != NFTA_LIST_ELEM) @@ -4080,6 +4087,12 @@ static int nft_set_desc_concat(struct nft_set_desc *desc, return err; } + for (i = 0; i < desc->field_count; i++) + num_regs += DIV_ROUND_UP(desc->field_len[i], sizeof(u32)); + + if (num_regs > NFT_REG32_COUNT) + return -E2BIG; + return 0; } @@ -4867,13 +4880,20 @@ static int nft_setelem_parse_data(struct nft_ctx *ctx, struct nft_set *set, struct nft_data *data, struct nlattr *attr) { + u32 dtype; int err; err = nft_data_init(ctx, data, NFT_DATA_VALUE_MAXLEN, desc, attr); if (err < 0) return err; - if (desc->type != NFT_DATA_VERDICT && desc->len != set->dlen) { + if (set->dtype == NFT_DATA_VERDICT) + dtype = NFT_DATA_VERDICT; + else + dtype = NFT_DATA_VALUE; + + if (dtype != desc->type || + set->dlen != desc->len) { nft_data_release(data, desc->type); return -EINVAL; } @@ -5055,9 +5075,6 @@ struct nft_expr *nft_set_elem_expr_alloc(const struct nft_ctx *ctx, return expr; err = -EOPNOTSUPP; - if (!(expr->ops->type->flags & NFT_EXPR_STATEFUL)) - goto err_set_elem_expr; - if (expr->ops->type->flags & NFT_EXPR_GC) { if (set->flags & NFT_SET_TIMEOUT) goto err_set_elem_expr; @@ -5924,12 +5941,15 @@ static int nf_tables_updobj(const struct nft_ctx *ctx, { struct nft_object *newobj; struct nft_trans *trans; - int err; + int err = -ENOMEM; + + if (!try_module_get(type->owner)) + return -ENOENT; trans = nft_trans_alloc(ctx, NFT_MSG_NEWOBJ, sizeof(struct nft_trans_obj)); if (!trans) - return -ENOMEM; + goto err_trans; newobj = nft_obj_init(ctx, type, attr); if (IS_ERR(newobj)) { @@ -5946,6 +5966,8 @@ static int nf_tables_updobj(const struct nft_ctx *ctx, err_free_trans: kfree(trans); +err_trans: + module_put(type->owner); return err; } @@ -7555,7 +7577,7 @@ static void nft_obj_commit_update(struct nft_trans *trans) if (obj->ops->update) obj->ops->update(obj, newobj); - kfree(newobj); + nft_obj_destroy(&trans->ctx, newobj); } static void nft_commit_release(struct nft_trans *trans) @@ -8202,7 +8224,7 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action) break; case NFT_MSG_NEWOBJ: if (nft_trans_obj_update(trans)) { - kfree(nft_trans_obj_newobj(trans)); + nft_obj_destroy(&trans->ctx, nft_trans_obj_newobj(trans)); nft_trans_destroy(trans); } else { trans->ctx.table->use--; diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c index dbc2e945c98ebce86dd498a093cd693d28030d79..a61b5bf5aa0fbb1b059ef1c5ca94f94d48ecb6a2 100644 --- a/net/netfilter/nf_tables_core.c +++ b/net/netfilter/nf_tables_core.c @@ -162,7 +162,7 @@ nft_do_chain(struct nft_pktinfo *pkt, void *priv) struct nft_rule *const *rules; const struct nft_rule *rule; const struct nft_expr *expr, *last; - struct nft_regs regs; + struct nft_regs regs = {}; unsigned int stackptr = 0; struct nft_jumpstack jumpstack[NFT_JUMP_STACK_SIZE]; bool genbit = READ_ONCE(net->nft.gencursor); diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c index b0358f30947ea80b6aa715bd515d652538ee785e..1640da5c50776189b8cfc354eb5e6d09ccc50363 100644 --- a/net/netfilter/nfnetlink_queue.c +++ b/net/netfilter/nfnetlink_queue.c @@ -712,9 +712,15 @@ static struct nf_queue_entry * nf_queue_entry_dup(struct nf_queue_entry *e) { struct nf_queue_entry *entry = kmemdup(e, e->size, GFP_ATOMIC); - if (entry) - nf_queue_entry_get_refs(entry); - return entry; + + if (!entry) + return NULL; + + if (nf_queue_entry_get_refs(entry)) + return entry; + + kfree(entry); + return NULL; } #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c index 1ebee25de67721934531095848d358c9895f0620..6a8495bd08bb2585af8cf0ff7defbe3cfd6cceae 100644 --- a/net/netfilter/nft_payload.c +++ b/net/netfilter/nft_payload.c @@ -502,6 +502,9 @@ static int nft_payload_l4csum_offset(const struct nft_pktinfo *pkt, struct sk_buff *skb, unsigned int *l4csum_offset) { + if (pkt->xt.fragoff) + return -1; + switch (pkt->tprot) { case IPPROTO_TCP: *l4csum_offset = offsetof(struct tcphdr, check); diff --git a/net/netfilter/nft_set_pipapo.c b/net/netfilter/nft_set_pipapo.c index 2d73f265b12c9bbbe2b2b12b117333729ae073a2..f67c4436c5d31f38fd59310a744437db7ff981f3 100644 --- a/net/netfilter/nft_set_pipapo.c +++ b/net/netfilter/nft_set_pipapo.c @@ -1290,6 +1290,11 @@ static struct nft_pipapo_match *pipapo_clone(struct nft_pipapo_match *old) if (!new->scratch_aligned) goto out_scratch; #endif + for_each_possible_cpu(i) + *per_cpu_ptr(new->scratch, i) = NULL; + + if (pipapo_realloc_scratch(new, old->bsize_max)) + goto out_scratch_realloc; rcu_head_init(&new->rcu); @@ -1334,6 +1339,9 @@ static struct nft_pipapo_match *pipapo_clone(struct nft_pipapo_match *old) kvfree(dst->lt); dst--; } +out_scratch_realloc: + for_each_possible_cpu(i) + kfree(*per_cpu_ptr(new->scratch, i)); #ifdef NFT_PIPAPO_ALIGN free_percpu(new->scratch_aligned); #endif diff --git a/net/netfilter/nft_synproxy.c b/net/netfilter/nft_synproxy.c index 4fda8b3f176265b445e075fa51ddd0eb14c5ebe6..59c4dfaf2ea1fc97cade542fae9901fa89742217 100644 --- a/net/netfilter/nft_synproxy.c +++ b/net/netfilter/nft_synproxy.c @@ -191,8 +191,10 @@ static int nft_synproxy_do_init(const struct nft_ctx *ctx, if (err) goto nf_ct_failure; err = nf_synproxy_ipv6_init(snet, ctx->net); - if (err) + if (err) { + nf_synproxy_ipv4_fini(snet, ctx->net); goto nf_ct_failure; + } break; } diff --git a/net/netlabel/netlabel_kapi.c b/net/netlabel/netlabel_kapi.c index 5e1239cef000588dff7963e2194ece26cadcf414..91b35b7c80d824e2793e98142e19ee59b0d93e26 100644 --- a/net/netlabel/netlabel_kapi.c +++ b/net/netlabel/netlabel_kapi.c @@ -885,6 +885,8 @@ int netlbl_bitmap_walk(const unsigned char *bitmap, u32 bitmap_len, unsigned char bitmask; unsigned char byte; + if (offset >= bitmap_len) + return -1; byte_offset = offset / 8; byte = bitmap[byte_offset]; bit_spot = offset; diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index e55af5c078ac096985c842484a8a81e42bffadda..f37916156ca523556c0266ad50acc2e9036ba0a1 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -149,6 +149,8 @@ static const struct rhashtable_params netlink_rhashtable_params; static inline u32 netlink_group_mask(u32 group) { + if (group > 32) + return 0; return group ? 1 << (group - 1) : 0; } diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c index eef0e3f2f25b0972debee403b424891094ef4dea..e5c8a295e64066971869b83ece1daee60a7f2ddc 100644 --- a/net/netrom/af_netrom.c +++ b/net/netrom/af_netrom.c @@ -298,7 +298,7 @@ static int nr_setsockopt(struct socket *sock, int level, int optname, { struct sock *sk = sock->sk; struct nr_sock *nr = nr_sk(sk); - unsigned long opt; + unsigned int opt; if (level != SOL_NETROM) return -ENOPROTOOPT; @@ -306,18 +306,18 @@ static int nr_setsockopt(struct socket *sock, int level, int optname, if (optlen < sizeof(unsigned int)) return -EINVAL; - if (copy_from_sockptr(&opt, optval, sizeof(unsigned long))) + if (copy_from_sockptr(&opt, optval, sizeof(opt))) return -EFAULT; switch (optname) { case NETROM_T1: - if (opt < 1 || opt > ULONG_MAX / HZ) + if (opt < 1 || opt > UINT_MAX / HZ) return -EINVAL; nr->t1 = opt * HZ; return 0; case NETROM_T2: - if (opt < 1 || opt > ULONG_MAX / HZ) + if (opt < 1 || opt > UINT_MAX / HZ) return -EINVAL; nr->t2 = opt * HZ; return 0; @@ -329,13 +329,13 @@ static int nr_setsockopt(struct socket *sock, int level, int optname, return 0; case NETROM_T4: - if (opt < 1 || opt > ULONG_MAX / HZ) + if (opt < 1 || opt > UINT_MAX / HZ) return -EINVAL; nr->t4 = opt * HZ; return 0; case NETROM_IDLE: - if (opt > ULONG_MAX / (60 * HZ)) + if (opt > UINT_MAX / (60 * HZ)) return -EINVAL; nr->idle = opt * 60 * HZ; return 0; diff --git a/net/nfc/core.c b/net/nfc/core.c index 6800470dd6df7a0eb216fa3dc078294946366e30..3b2983813ff13a4d911a388390e7b080c5b8f421 100644 --- a/net/nfc/core.c +++ b/net/nfc/core.c @@ -38,7 +38,7 @@ int nfc_fw_download(struct nfc_dev *dev, const char *firmware_name) device_lock(&dev->dev); - if (!device_is_registered(&dev->dev)) { + if (dev->shutting_down) { rc = -ENODEV; goto error; } @@ -94,7 +94,7 @@ int nfc_dev_up(struct nfc_dev *dev) device_lock(&dev->dev); - if (!device_is_registered(&dev->dev)) { + if (dev->shutting_down) { rc = -ENODEV; goto error; } @@ -142,7 +142,7 @@ int nfc_dev_down(struct nfc_dev *dev) device_lock(&dev->dev); - if (!device_is_registered(&dev->dev)) { + if (dev->shutting_down) { rc = -ENODEV; goto error; } @@ -206,7 +206,7 @@ int nfc_start_poll(struct nfc_dev *dev, u32 im_protocols, u32 tm_protocols) device_lock(&dev->dev); - if (!device_is_registered(&dev->dev)) { + if (dev->shutting_down) { rc = -ENODEV; goto error; } @@ -245,7 +245,7 @@ int nfc_stop_poll(struct nfc_dev *dev) device_lock(&dev->dev); - if (!device_is_registered(&dev->dev)) { + if (dev->shutting_down) { rc = -ENODEV; goto error; } @@ -290,7 +290,7 @@ int nfc_dep_link_up(struct nfc_dev *dev, int target_index, u8 comm_mode) device_lock(&dev->dev); - if (!device_is_registered(&dev->dev)) { + if (dev->shutting_down) { rc = -ENODEV; goto error; } @@ -334,7 +334,7 @@ int nfc_dep_link_down(struct nfc_dev *dev) device_lock(&dev->dev); - if (!device_is_registered(&dev->dev)) { + if (dev->shutting_down) { rc = -ENODEV; goto error; } @@ -400,7 +400,7 @@ int nfc_activate_target(struct nfc_dev *dev, u32 target_idx, u32 protocol) device_lock(&dev->dev); - if (!device_is_registered(&dev->dev)) { + if (dev->shutting_down) { rc = -ENODEV; goto error; } @@ -446,7 +446,7 @@ int nfc_deactivate_target(struct nfc_dev *dev, u32 target_idx, u8 mode) device_lock(&dev->dev); - if (!device_is_registered(&dev->dev)) { + if (dev->shutting_down) { rc = -ENODEV; goto error; } @@ -493,7 +493,7 @@ int nfc_data_exchange(struct nfc_dev *dev, u32 target_idx, struct sk_buff *skb, device_lock(&dev->dev); - if (!device_is_registered(&dev->dev)) { + if (dev->shutting_down) { rc = -ENODEV; kfree_skb(skb); goto error; @@ -550,7 +550,7 @@ int nfc_enable_se(struct nfc_dev *dev, u32 se_idx) device_lock(&dev->dev); - if (!device_is_registered(&dev->dev)) { + if (dev->shutting_down) { rc = -ENODEV; goto error; } @@ -599,7 +599,7 @@ int nfc_disable_se(struct nfc_dev *dev, u32 se_idx) device_lock(&dev->dev); - if (!device_is_registered(&dev->dev)) { + if (dev->shutting_down) { rc = -ENODEV; goto error; } @@ -1126,6 +1126,7 @@ int nfc_register_device(struct nfc_dev *dev) dev->rfkill = NULL; } } + dev->shutting_down = false; device_unlock(&dev->dev); rc = nfc_genl_device_added(dev); @@ -1158,12 +1159,10 @@ void nfc_unregister_device(struct nfc_dev *dev) rfkill_unregister(dev->rfkill); rfkill_destroy(dev->rfkill); } + dev->shutting_down = true; device_unlock(&dev->dev); if (dev->ops->check_presence) { - device_lock(&dev->dev); - dev->shutting_down = true; - device_unlock(&dev->dev); del_timer_sync(&dev->check_pres_timer); cancel_work_sync(&dev->check_pres_work); } diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c index 6cfd30fc07985e69fb68e84b71f3c9fa64aa62b2..0b93a17b9f11f53395c1a2e6f1ebddb3720bb180 100644 --- a/net/nfc/llcp_sock.c +++ b/net/nfc/llcp_sock.c @@ -789,6 +789,11 @@ static int llcp_sock_sendmsg(struct socket *sock, struct msghdr *msg, lock_sock(sk); + if (!llcp_sock->local) { + release_sock(sk); + return -ENODEV; + } + if (sk->sk_type == SOCK_DGRAM) { DECLARE_SOCKADDR(struct sockaddr_nfc_llcp *, addr, msg->msg_name); diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c index e38719e2ee582e538ef6672ff1a9cb315bb2b2ac..2cfff70f70e062db8be50df2542bc8f3596c93e8 100644 --- a/net/nfc/nci/core.c +++ b/net/nfc/nci/core.c @@ -548,6 +548,10 @@ static int nci_close_device(struct nci_dev *ndev) mutex_lock(&ndev->req_lock); if (!test_and_clear_bit(NCI_UP, &ndev->flags)) { + /* Need to flush the cmd wq in case + * there is a queued/running cmd_work + */ + flush_workqueue(ndev->cmd_wq); del_timer_sync(&ndev->cmd_timer); del_timer_sync(&ndev->data_timer); mutex_unlock(&ndev->req_lock); diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c index 72d1e29e28b290b4cb8904acda08abd05ae055f6..f5c816a18b9c1dd8dbd7abe7e21ac1c143e47fb7 100644 --- a/net/nfc/netlink.c +++ b/net/nfc/netlink.c @@ -1244,7 +1244,7 @@ int nfc_genl_fw_download_done(struct nfc_dev *dev, const char *firmware_name, struct sk_buff *msg; void *hdr; - msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); if (!msg) return -ENOMEM; @@ -1260,7 +1260,7 @@ int nfc_genl_fw_download_done(struct nfc_dev *dev, const char *firmware_name, genlmsg_end(msg, hdr); - genlmsg_multicast(&nfc_genl_family, msg, 0, 0, GFP_KERNEL); + genlmsg_multicast(&nfc_genl_family, msg, 0, 0, GFP_ATOMIC); return 0; diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c index fc487f9812fc554003b8d6c8c3b36a7ea3d760df..6d8d700216662ea633a524b6b14a3c2a13c56d8b 100644 --- a/net/openvswitch/actions.c +++ b/net/openvswitch/actions.c @@ -422,12 +422,43 @@ static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto, memcpy(addr, new_addr, sizeof(__be32[4])); } -static void set_ipv6_fl(struct ipv6hdr *nh, u32 fl, u32 mask) +static void set_ipv6_dsfield(struct sk_buff *skb, struct ipv6hdr *nh, u8 ipv6_tclass, u8 mask) { + u8 old_ipv6_tclass = ipv6_get_dsfield(nh); + + ipv6_tclass = OVS_MASKED(old_ipv6_tclass, ipv6_tclass, mask); + + if (skb->ip_summed == CHECKSUM_COMPLETE) + csum_replace(&skb->csum, (__force __wsum)(old_ipv6_tclass << 12), + (__force __wsum)(ipv6_tclass << 12)); + + ipv6_change_dsfield(nh, ~mask, ipv6_tclass); +} + +static void set_ipv6_fl(struct sk_buff *skb, struct ipv6hdr *nh, u32 fl, u32 mask) +{ + u32 ofl; + + ofl = nh->flow_lbl[0] << 16 | nh->flow_lbl[1] << 8 | nh->flow_lbl[2]; + fl = OVS_MASKED(ofl, fl, mask); + /* Bits 21-24 are always unmasked, so this retains their values. */ - OVS_SET_MASKED(nh->flow_lbl[0], (u8)(fl >> 16), (u8)(mask >> 16)); - OVS_SET_MASKED(nh->flow_lbl[1], (u8)(fl >> 8), (u8)(mask >> 8)); - OVS_SET_MASKED(nh->flow_lbl[2], (u8)fl, (u8)mask); + nh->flow_lbl[0] = (u8)(fl >> 16); + nh->flow_lbl[1] = (u8)(fl >> 8); + nh->flow_lbl[2] = (u8)fl; + + if (skb->ip_summed == CHECKSUM_COMPLETE) + csum_replace(&skb->csum, (__force __wsum)htonl(ofl), (__force __wsum)htonl(fl)); +} + +static void set_ipv6_ttl(struct sk_buff *skb, struct ipv6hdr *nh, u8 new_ttl, u8 mask) +{ + new_ttl = OVS_MASKED(nh->hop_limit, new_ttl, mask); + + if (skb->ip_summed == CHECKSUM_COMPLETE) + csum_replace(&skb->csum, (__force __wsum)(nh->hop_limit << 8), + (__force __wsum)(new_ttl << 8)); + nh->hop_limit = new_ttl; } static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl, @@ -545,18 +576,17 @@ static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key, } } if (mask->ipv6_tclass) { - ipv6_change_dsfield(nh, ~mask->ipv6_tclass, key->ipv6_tclass); + set_ipv6_dsfield(skb, nh, key->ipv6_tclass, mask->ipv6_tclass); flow_key->ip.tos = ipv6_get_dsfield(nh); } if (mask->ipv6_label) { - set_ipv6_fl(nh, ntohl(key->ipv6_label), + set_ipv6_fl(skb, nh, ntohl(key->ipv6_label), ntohl(mask->ipv6_label)); flow_key->ipv6.label = *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL); } if (mask->ipv6_hlimit) { - OVS_SET_MASKED(nh->hop_limit, key->ipv6_hlimit, - mask->ipv6_hlimit); + set_ipv6_ttl(skb, nh, key->ipv6_hlimit, mask->ipv6_hlimit); flow_key->ip.ttl = nh->hop_limit; } return 0; @@ -1014,7 +1044,7 @@ static int clone(struct datapath *dp, struct sk_buff *skb, int rem = nla_len(attr); bool dont_clone_flow_key; - /* The first action is always 'OVS_CLONE_ATTR_ARG'. */ + /* The first action is always 'OVS_CLONE_ATTR_EXEC'. */ clone_arg = nla_data(attr); dont_clone_flow_key = nla_get_u32(clone_arg); actions = nla_next(clone_arg, &rem); diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c index a11b558813c10783977a52cbb00d901182072a21..7ff98d39ec942ccf00f1bfdb01e9aad54b7a35f0 100644 --- a/net/openvswitch/conntrack.c +++ b/net/openvswitch/conntrack.c @@ -730,6 +730,57 @@ static bool skb_nfct_cached(struct net *net, } #if IS_ENABLED(CONFIG_NF_NAT) +static void ovs_nat_update_key(struct sw_flow_key *key, + const struct sk_buff *skb, + enum nf_nat_manip_type maniptype) +{ + if (maniptype == NF_NAT_MANIP_SRC) { + __be16 src; + + key->ct_state |= OVS_CS_F_SRC_NAT; + if (key->eth.type == htons(ETH_P_IP)) + key->ipv4.addr.src = ip_hdr(skb)->saddr; + else if (key->eth.type == htons(ETH_P_IPV6)) + memcpy(&key->ipv6.addr.src, &ipv6_hdr(skb)->saddr, + sizeof(key->ipv6.addr.src)); + else + return; + + if (key->ip.proto == IPPROTO_UDP) + src = udp_hdr(skb)->source; + else if (key->ip.proto == IPPROTO_TCP) + src = tcp_hdr(skb)->source; + else if (key->ip.proto == IPPROTO_SCTP) + src = sctp_hdr(skb)->source; + else + return; + + key->tp.src = src; + } else { + __be16 dst; + + key->ct_state |= OVS_CS_F_DST_NAT; + if (key->eth.type == htons(ETH_P_IP)) + key->ipv4.addr.dst = ip_hdr(skb)->daddr; + else if (key->eth.type == htons(ETH_P_IPV6)) + memcpy(&key->ipv6.addr.dst, &ipv6_hdr(skb)->daddr, + sizeof(key->ipv6.addr.dst)); + else + return; + + if (key->ip.proto == IPPROTO_UDP) + dst = udp_hdr(skb)->dest; + else if (key->ip.proto == IPPROTO_TCP) + dst = tcp_hdr(skb)->dest; + else if (key->ip.proto == IPPROTO_SCTP) + dst = sctp_hdr(skb)->dest; + else + return; + + key->tp.dst = dst; + } +} + /* Modelled after nf_nat_ipv[46]_fn(). * range is only used for new, uninitialized NAT state. * Returns either NF_ACCEPT or NF_DROP. @@ -737,7 +788,7 @@ static bool skb_nfct_cached(struct net *net, static int ovs_ct_nat_execute(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, const struct nf_nat_range2 *range, - enum nf_nat_manip_type maniptype) + enum nf_nat_manip_type maniptype, struct sw_flow_key *key) { int hooknum, nh_off, err = NF_ACCEPT; @@ -810,58 +861,11 @@ static int ovs_ct_nat_execute(struct sk_buff *skb, struct nf_conn *ct, skb_push(skb, nh_off); skb_postpush_rcsum(skb, skb->data, nh_off); - return err; -} - -static void ovs_nat_update_key(struct sw_flow_key *key, - const struct sk_buff *skb, - enum nf_nat_manip_type maniptype) -{ - if (maniptype == NF_NAT_MANIP_SRC) { - __be16 src; - - key->ct_state |= OVS_CS_F_SRC_NAT; - if (key->eth.type == htons(ETH_P_IP)) - key->ipv4.addr.src = ip_hdr(skb)->saddr; - else if (key->eth.type == htons(ETH_P_IPV6)) - memcpy(&key->ipv6.addr.src, &ipv6_hdr(skb)->saddr, - sizeof(key->ipv6.addr.src)); - else - return; - - if (key->ip.proto == IPPROTO_UDP) - src = udp_hdr(skb)->source; - else if (key->ip.proto == IPPROTO_TCP) - src = tcp_hdr(skb)->source; - else if (key->ip.proto == IPPROTO_SCTP) - src = sctp_hdr(skb)->source; - else - return; - - key->tp.src = src; - } else { - __be16 dst; - - key->ct_state |= OVS_CS_F_DST_NAT; - if (key->eth.type == htons(ETH_P_IP)) - key->ipv4.addr.dst = ip_hdr(skb)->daddr; - else if (key->eth.type == htons(ETH_P_IPV6)) - memcpy(&key->ipv6.addr.dst, &ipv6_hdr(skb)->daddr, - sizeof(key->ipv6.addr.dst)); - else - return; - - if (key->ip.proto == IPPROTO_UDP) - dst = udp_hdr(skb)->dest; - else if (key->ip.proto == IPPROTO_TCP) - dst = tcp_hdr(skb)->dest; - else if (key->ip.proto == IPPROTO_SCTP) - dst = sctp_hdr(skb)->dest; - else - return; + /* Update the flow key if NAT successful. */ + if (err == NF_ACCEPT) + ovs_nat_update_key(key, skb, maniptype); - key->tp.dst = dst; - } + return err; } /* Returns NF_DROP if the packet should be dropped, NF_ACCEPT otherwise. */ @@ -903,7 +907,7 @@ static int ovs_ct_nat(struct net *net, struct sw_flow_key *key, } else { return NF_ACCEPT; /* Connection is not NATed. */ } - err = ovs_ct_nat_execute(skb, ct, ctinfo, &info->range, maniptype); + err = ovs_ct_nat_execute(skb, ct, ctinfo, &info->range, maniptype, key); if (err == NF_ACCEPT && ct->status & IPS_DST_NAT) { if (ct->status & IPS_SRC_NAT) { @@ -913,17 +917,13 @@ static int ovs_ct_nat(struct net *net, struct sw_flow_key *key, maniptype = NF_NAT_MANIP_SRC; err = ovs_ct_nat_execute(skb, ct, ctinfo, &info->range, - maniptype); + maniptype, key); } else if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) { err = ovs_ct_nat_execute(skb, ct, ctinfo, NULL, - NF_NAT_MANIP_SRC); + NF_NAT_MANIP_SRC, key); } } - /* Mark NAT done if successful and update the flow key. */ - if (err == NF_ACCEPT) - ovs_nat_update_key(key, skb, maniptype); - return err; } #else /* !CONFIG_NF_NAT */ diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c index 4c5c2331e7648fb84c26bc791a8de914e0336e05..98a7e6f64ab0b07894db02030a11cb0535191747 100644 --- a/net/openvswitch/flow_netlink.c +++ b/net/openvswitch/flow_netlink.c @@ -2201,8 +2201,8 @@ static int __ovs_nla_put_key(const struct sw_flow_key *swkey, icmpv6_key->icmpv6_type = ntohs(output->tp.src); icmpv6_key->icmpv6_code = ntohs(output->tp.dst); - if (icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_SOLICITATION || - icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_ADVERTISEMENT) { + if (swkey->tp.src == htons(NDISC_NEIGHBOUR_SOLICITATION) || + swkey->tp.src == htons(NDISC_NEIGHBOUR_ADVERTISEMENT)) { struct ovs_key_nd *nd_key; nla = nla_reserve(skb, OVS_KEY_ATTR_ND, sizeof(*nd_key)); @@ -2288,6 +2288,62 @@ static struct sw_flow_actions *nla_alloc_flow_actions(int size) return sfa; } +static void ovs_nla_free_nested_actions(const struct nlattr *actions, int len); + +static void ovs_nla_free_check_pkt_len_action(const struct nlattr *action) +{ + const struct nlattr *a; + int rem; + + nla_for_each_nested(a, action, rem) { + switch (nla_type(a)) { + case OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_LESS_EQUAL: + case OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_GREATER: + ovs_nla_free_nested_actions(nla_data(a), nla_len(a)); + break; + } + } +} + +static void ovs_nla_free_clone_action(const struct nlattr *action) +{ + const struct nlattr *a = nla_data(action); + int rem = nla_len(action); + + switch (nla_type(a)) { + case OVS_CLONE_ATTR_EXEC: + /* The real list of actions follows this attribute. */ + a = nla_next(a, &rem); + ovs_nla_free_nested_actions(a, rem); + break; + } +} + +static void ovs_nla_free_dec_ttl_action(const struct nlattr *action) +{ + const struct nlattr *a = nla_data(action); + + switch (nla_type(a)) { + case OVS_DEC_TTL_ATTR_ACTION: + ovs_nla_free_nested_actions(nla_data(a), nla_len(a)); + break; + } +} + +static void ovs_nla_free_sample_action(const struct nlattr *action) +{ + const struct nlattr *a = nla_data(action); + int rem = nla_len(action); + + switch (nla_type(a)) { + case OVS_SAMPLE_ATTR_ARG: + /* The real list of actions follows this attribute. */ + a = nla_next(a, &rem); + ovs_nla_free_nested_actions(a, rem); + break; + } +} + static void ovs_nla_free_set_action(const struct nlattr *a) { const struct nlattr *ovs_key = nla_data(a); @@ -2301,25 +2357,54 @@ static void ovs_nla_free_set_action(const struct nlattr *a) } } -void ovs_nla_free_flow_actions(struct sw_flow_actions *sf_acts) +static void ovs_nla_free_nested_actions(const struct nlattr *actions, int len) { const struct nlattr *a; int rem; - if (!sf_acts) + /* Whenever new actions are added, the need to update this + * function should be considered. + */ + BUILD_BUG_ON(OVS_ACTION_ATTR_MAX != 23); + + if (!actions) return; - nla_for_each_attr(a, sf_acts->actions, sf_acts->actions_len, rem) { + nla_for_each_attr(a, actions, len, rem) { switch (nla_type(a)) { - case OVS_ACTION_ATTR_SET: - ovs_nla_free_set_action(a); + case OVS_ACTION_ATTR_CHECK_PKT_LEN: + ovs_nla_free_check_pkt_len_action(a); + break; + + case OVS_ACTION_ATTR_CLONE: + ovs_nla_free_clone_action(a); break; + case OVS_ACTION_ATTR_CT: ovs_ct_free_action(a); break; + + case OVS_ACTION_ATTR_DEC_TTL: + ovs_nla_free_dec_ttl_action(a); + break; + + case OVS_ACTION_ATTR_SAMPLE: + ovs_nla_free_sample_action(a); + break; + + case OVS_ACTION_ATTR_SET: + ovs_nla_free_set_action(a); + break; } } +} + +void ovs_nla_free_flow_actions(struct sw_flow_actions *sf_acts) +{ + if (!sf_acts) + return; + ovs_nla_free_nested_actions(sf_acts->actions, sf_acts->actions_len); kfree(sf_acts); } @@ -3419,7 +3504,9 @@ static int clone_action_to_attr(const struct nlattr *attr, if (!start) return -EMSGSIZE; - err = ovs_nla_put_actions(nla_data(attr), rem, skb); + /* Skipping the OVS_CLONE_ATTR_EXEC that is always the first attribute. */ + attr = nla_next(nla_data(attr), &rem); + err = ovs_nla_put_actions(attr, rem, skb); if (err) nla_nest_cancel(skb, start); diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index f78097aa403a8bea058145a29829392b7015661e..d0c95d7dd292d89f0de3b36816ff927e0ffdacfb 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -1735,6 +1735,7 @@ static int fanout_add(struct sock *sk, struct fanout_args *args) match->prot_hook.dev = po->prot_hook.dev; match->prot_hook.func = packet_rcv_fanout; match->prot_hook.af_packet_priv = match; + match->prot_hook.af_packet_net = read_pnet(&match->net); match->prot_hook.id_match = match_fanout_group; match->max_num_members = args->max_num_members; list_add(&match->list, &fanout_list); @@ -1749,7 +1750,10 @@ static int fanout_add(struct sock *sk, struct fanout_args *args) err = -ENOSPC; if (refcount_read(&match->sk_ref) < match->max_num_members) { __dev_remove_pack(&po->prot_hook); - po->fanout = match; + + /* Paired with packet_setsockopt(PACKET_FANOUT_DATA) */ + WRITE_ONCE(po->fanout, match); + po->rollover = rollover; rollover = NULL; refcount_set(&match->sk_ref, refcount_read(&match->sk_ref) + 1); @@ -2274,8 +2278,11 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, copy_skb = skb_get(skb); skb_head = skb->data; } - if (copy_skb) + if (copy_skb) { + memset(&PACKET_SKB_CB(copy_skb)->sa.ll, 0, + sizeof(PACKET_SKB_CB(copy_skb)->sa.ll)); skb_set_owner_r(copy_skb, sk); + } } snaplen = po->rx_ring.frame_size - macoff; if ((int)snaplen < 0) { @@ -3323,6 +3330,7 @@ static int packet_create(struct net *net, struct socket *sock, int protocol, po->prot_hook.func = packet_rcv_spkt; po->prot_hook.af_packet_priv = sk; + po->prot_hook.af_packet_net = sock_net(sk); if (proto) { po->prot_hook.type = proto; @@ -3429,6 +3437,8 @@ static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, sock_recv_ts_and_drops(msg, sk, skb); if (msg->msg_name) { + const size_t max_len = min(sizeof(skb->cb), + sizeof(struct sockaddr_storage)); int copy_len; /* If the address length field is there to be filled @@ -3451,6 +3461,10 @@ static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, msg->msg_namelen = sizeof(struct sockaddr_ll); } } + if (WARN_ON_ONCE(copy_len > max_len)) { + copy_len = max_len; + msg->msg_namelen = copy_len; + } memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa, copy_len); } @@ -3904,7 +3918,8 @@ packet_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval, } case PACKET_FANOUT_DATA: { - if (!po->fanout) + /* Paired with the WRITE_ONCE() in fanout_add() */ + if (!READ_ONCE(po->fanout)) return -EINVAL; return fanout_set_data(po, optval, optlen); diff --git a/net/rose/rose_timer.c b/net/rose/rose_timer.c index b3138fc2e552ea4e2a1ee23d140093b951ca07da..f06ddbed3fed6396b4ea510a29bb9f8025cfb35d 100644 --- a/net/rose/rose_timer.c +++ b/net/rose/rose_timer.c @@ -31,89 +31,89 @@ static void rose_idletimer_expiry(struct timer_list *); void rose_start_heartbeat(struct sock *sk) { - del_timer(&sk->sk_timer); + sk_stop_timer(sk, &sk->sk_timer); sk->sk_timer.function = rose_heartbeat_expiry; sk->sk_timer.expires = jiffies + 5 * HZ; - add_timer(&sk->sk_timer); + sk_reset_timer(sk, &sk->sk_timer, sk->sk_timer.expires); } void rose_start_t1timer(struct sock *sk) { struct rose_sock *rose = rose_sk(sk); - del_timer(&rose->timer); + sk_stop_timer(sk, &rose->timer); rose->timer.function = rose_timer_expiry; rose->timer.expires = jiffies + rose->t1; - add_timer(&rose->timer); + sk_reset_timer(sk, &rose->timer, rose->timer.expires); } void rose_start_t2timer(struct sock *sk) { struct rose_sock *rose = rose_sk(sk); - del_timer(&rose->timer); + sk_stop_timer(sk, &rose->timer); rose->timer.function = rose_timer_expiry; rose->timer.expires = jiffies + rose->t2; - add_timer(&rose->timer); + sk_reset_timer(sk, &rose->timer, rose->timer.expires); } void rose_start_t3timer(struct sock *sk) { struct rose_sock *rose = rose_sk(sk); - del_timer(&rose->timer); + sk_stop_timer(sk, &rose->timer); rose->timer.function = rose_timer_expiry; rose->timer.expires = jiffies + rose->t3; - add_timer(&rose->timer); + sk_reset_timer(sk, &rose->timer, rose->timer.expires); } void rose_start_hbtimer(struct sock *sk) { struct rose_sock *rose = rose_sk(sk); - del_timer(&rose->timer); + sk_stop_timer(sk, &rose->timer); rose->timer.function = rose_timer_expiry; rose->timer.expires = jiffies + rose->hb; - add_timer(&rose->timer); + sk_reset_timer(sk, &rose->timer, rose->timer.expires); } void rose_start_idletimer(struct sock *sk) { struct rose_sock *rose = rose_sk(sk); - del_timer(&rose->idletimer); + sk_stop_timer(sk, &rose->idletimer); if (rose->idle > 0) { rose->idletimer.function = rose_idletimer_expiry; rose->idletimer.expires = jiffies + rose->idle; - add_timer(&rose->idletimer); + sk_reset_timer(sk, &rose->idletimer, rose->idletimer.expires); } } void rose_stop_heartbeat(struct sock *sk) { - del_timer(&sk->sk_timer); + sk_stop_timer(sk, &sk->sk_timer); } void rose_stop_timer(struct sock *sk) { - del_timer(&rose_sk(sk)->timer); + sk_stop_timer(sk, &rose_sk(sk)->timer); } void rose_stop_idletimer(struct sock *sk) { - del_timer(&rose_sk(sk)->idletimer); + sk_stop_timer(sk, &rose_sk(sk)->idletimer); } static void rose_heartbeat_expiry(struct timer_list *t) @@ -130,6 +130,7 @@ static void rose_heartbeat_expiry(struct timer_list *t) (sk->sk_state == TCP_LISTEN && sock_flag(sk, SOCK_DEAD))) { bh_unlock_sock(sk); rose_destroy_socket(sk); + sock_put(sk); return; } break; @@ -152,6 +153,7 @@ static void rose_heartbeat_expiry(struct timer_list *t) rose_start_heartbeat(sk); bh_unlock_sock(sk); + sock_put(sk); } static void rose_timer_expiry(struct timer_list *t) @@ -181,6 +183,7 @@ static void rose_timer_expiry(struct timer_list *t) break; } bh_unlock_sock(sk); + sock_put(sk); } static void rose_idletimer_expiry(struct timer_list *t) @@ -205,4 +208,5 @@ static void rose_idletimer_expiry(struct timer_list *t) sock_set_flag(sk, SOCK_DEAD); } bh_unlock_sock(sk); + sock_put(sk); } diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h index dce48162f6c274116f25ff6fa04b54dd7835f372..3bad9f5f9102395eac52d393f497602bab6fac69 100644 --- a/net/rxrpc/ar-internal.h +++ b/net/rxrpc/ar-internal.h @@ -760,14 +760,12 @@ void rxrpc_propose_ACK(struct rxrpc_call *, u8, u32, bool, bool, enum rxrpc_propose_ack_trace); void rxrpc_process_call(struct work_struct *); -static inline void rxrpc_reduce_call_timer(struct rxrpc_call *call, - unsigned long expire_at, - unsigned long now, - enum rxrpc_timer_trace why) -{ - trace_rxrpc_timer(call, why, now); - timer_reduce(&call->timer, expire_at); -} +void rxrpc_reduce_call_timer(struct rxrpc_call *call, + unsigned long expire_at, + unsigned long now, + enum rxrpc_timer_trace why); + +void rxrpc_delete_call_timer(struct rxrpc_call *call); /* * call_object.c @@ -791,6 +789,7 @@ void rxrpc_release_calls_on_socket(struct rxrpc_sock *); bool __rxrpc_queue_call(struct rxrpc_call *); bool rxrpc_queue_call(struct rxrpc_call *); void rxrpc_see_call(struct rxrpc_call *); +bool rxrpc_try_get_call(struct rxrpc_call *call, enum rxrpc_call_trace op); void rxrpc_get_call(struct rxrpc_call *, enum rxrpc_call_trace); void rxrpc_put_call(struct rxrpc_call *, enum rxrpc_call_trace); void rxrpc_cleanup_call(struct rxrpc_call *); diff --git a/net/rxrpc/call_event.c b/net/rxrpc/call_event.c index 6be2672a65eabebd5b8fc28d82ed9b3c58542005..22e05de5d1ca96fccb667a0a287e0042fc5be6e9 100644 --- a/net/rxrpc/call_event.c +++ b/net/rxrpc/call_event.c @@ -157,7 +157,7 @@ static void rxrpc_congestion_timeout(struct rxrpc_call *call) static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j) { struct sk_buff *skb; - unsigned long resend_at, rto_j; + unsigned long resend_at; rxrpc_seq_t cursor, seq, top; ktime_t now, max_age, oldest, ack_ts; int ix; @@ -165,10 +165,8 @@ static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j) _enter("{%d,%d}", call->tx_hard_ack, call->tx_top); - rto_j = call->peer->rto_j; - now = ktime_get_real(); - max_age = ktime_sub(now, jiffies_to_usecs(rto_j)); + max_age = ktime_sub(now, jiffies_to_usecs(call->peer->rto_j)); spin_lock_bh(&call->lock); @@ -213,7 +211,7 @@ static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j) } resend_at = nsecs_to_jiffies(ktime_to_ns(ktime_sub(now, oldest))); - resend_at += jiffies + rto_j; + resend_at += jiffies + rxrpc_get_rto_backoff(call->peer, retrans); WRITE_ONCE(call->resend_at, resend_at); if (unacked) @@ -312,7 +310,7 @@ void rxrpc_process_call(struct work_struct *work) } if (call->state == RXRPC_CALL_COMPLETE) { - del_timer_sync(&call->timer); + rxrpc_delete_call_timer(call); goto out_put; } diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c index 4eb91d958a48d3f89c17bcf1d43eadc57a748d70..043508fd8d8a5d3a57d8a805093769b0be1f3b06 100644 --- a/net/rxrpc/call_object.c +++ b/net/rxrpc/call_object.c @@ -53,10 +53,30 @@ static void rxrpc_call_timer_expired(struct timer_list *t) if (call->state < RXRPC_CALL_COMPLETE) { trace_rxrpc_timer(call, rxrpc_timer_expired, jiffies); - rxrpc_queue_call(call); + __rxrpc_queue_call(call); + } else { + rxrpc_put_call(call, rxrpc_call_put); + } +} + +void rxrpc_reduce_call_timer(struct rxrpc_call *call, + unsigned long expire_at, + unsigned long now, + enum rxrpc_timer_trace why) +{ + if (rxrpc_try_get_call(call, rxrpc_call_got_timer)) { + trace_rxrpc_timer(call, why, now); + if (timer_reduce(&call->timer, expire_at)) + rxrpc_put_call(call, rxrpc_call_put_notimer); } } +void rxrpc_delete_call_timer(struct rxrpc_call *call) +{ + if (del_timer_sync(&call->timer)) + rxrpc_put_call(call, rxrpc_call_put_timer); +} + static struct lock_class_key rxrpc_call_user_mutex_lock_class_key; /* @@ -463,6 +483,17 @@ void rxrpc_see_call(struct rxrpc_call *call) } } +bool rxrpc_try_get_call(struct rxrpc_call *call, enum rxrpc_call_trace op) +{ + const void *here = __builtin_return_address(0); + int n = atomic_fetch_add_unless(&call->usage, 1, 0); + + if (n == 0) + return false; + trace_rxrpc_call(call->debug_id, op, n, here, NULL); + return true; +} + /* * Note the addition of a ref on a call. */ @@ -510,8 +541,7 @@ void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call) spin_unlock_bh(&call->lock); rxrpc_put_call_slot(call); - - del_timer_sync(&call->timer); + rxrpc_delete_call_timer(call); /* Make sure we don't get any more notifications */ write_lock_bh(&rx->recvmsg_lock); @@ -618,6 +648,8 @@ static void rxrpc_destroy_call(struct work_struct *work) struct rxrpc_call *call = container_of(work, struct rxrpc_call, processor); struct rxrpc_net *rxnet = call->rxnet; + rxrpc_delete_call_timer(call); + rxrpc_put_connection(call->conn); rxrpc_put_peer(call->peer); kfree(call->rxtx_buffer); @@ -652,8 +684,6 @@ void rxrpc_cleanup_call(struct rxrpc_call *call) memset(&call->sock_node, 0xcd, sizeof(call->sock_node)); - del_timer_sync(&call->timer); - ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE); ASSERT(test_bit(RXRPC_CALL_RELEASED, &call->flags)); diff --git a/net/rxrpc/net_ns.c b/net/rxrpc/net_ns.c index 25bbc4cc8b1359f7b895f181dad227de088ed31d..f15d6942da45306e4fa15399473044281dcbfed9 100644 --- a/net/rxrpc/net_ns.c +++ b/net/rxrpc/net_ns.c @@ -113,8 +113,8 @@ static __net_exit void rxrpc_exit_net(struct net *net) struct rxrpc_net *rxnet = rxrpc_net(net); rxnet->live = false; - del_timer_sync(&rxnet->peer_keepalive_timer); cancel_work_sync(&rxnet->peer_keepalive_work); + del_timer_sync(&rxnet->peer_keepalive_timer); rxrpc_destroy_all_calls(rxnet); rxrpc_destroy_all_connections(rxnet); rxrpc_destroy_all_peers(rxnet); diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c index 10f2bf2e9068abc15ef4ece90e7d6652194b4f77..a45c83f22236e2648c2fa4f97ebebb088361a071 100644 --- a/net/rxrpc/output.c +++ b/net/rxrpc/output.c @@ -468,7 +468,7 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb, if (call->peer->rtt_count > 1) { unsigned long nowj = jiffies, ack_lost_at; - ack_lost_at = rxrpc_get_rto_backoff(call->peer, retrans); + ack_lost_at = rxrpc_get_rto_backoff(call->peer, false); ack_lost_at += nowj; WRITE_ONCE(call->ack_lost_at, ack_lost_at); rxrpc_reduce_call_timer(call, ack_lost_at, nowj, diff --git a/net/sched/act_api.c b/net/sched/act_api.c index f613299ca7f0a668cc2c7579a4f0515e16af1d78..7b29aa1a3ce9a20ba165fb1030628d83c064a3bb 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c @@ -680,15 +680,24 @@ int tcf_action_exec(struct sk_buff *skb, struct tc_action **actions, restart_act_graph: for (i = 0; i < nr_actions; i++) { const struct tc_action *a = actions[i]; + int repeat_ttl; if (jmp_prgcnt > 0) { jmp_prgcnt -= 1; continue; } + + repeat_ttl = 32; repeat: ret = a->ops->act(skb, a, res); - if (ret == TC_ACT_REPEAT) - goto repeat; /* we need a ttl - JHS */ + + if (unlikely(ret == TC_ACT_REPEAT)) { + if (--repeat_ttl != 0) + goto repeat; + /* suspicious opcode, stop pipeline */ + net_warn_ratelimited("TC_ACT_REPEAT abuse ?\n"); + return TC_ACT_OK; + } if (TC_ACT_EXT_CMP(ret, TC_ACT_JUMP)) { jmp_prgcnt = ret & TCA_ACT_MAX_PRIO_MASK; diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c index 812c3c70a53a04029ccf11b78d0c59d877fb2180..825b3e9b55f7e23900e55af586b45e826e1f5fbb 100644 --- a/net/sched/act_ct.c +++ b/net/sched/act_ct.c @@ -514,11 +514,6 @@ static bool tcf_ct_flow_table_lookup(struct tcf_ct_params *p, struct nf_conn *ct; u8 dir; - /* Previously seen or loopback */ - ct = nf_ct_get(skb, &ctinfo); - if ((ct && !nf_ct_is_template(ct)) || ctinfo == IP_CT_UNTRACKED) - return false; - switch (family) { case NFPROTO_IPV4: if (!tcf_ct_flow_table_fill_tuple_ipv4(skb, &tuple, &tcph)) diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index cb1331b3574512932890bef65df1e1deacf5f88d..b8ffb7e4f696c26bd518e85a5c97b6981c082d34 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -1045,7 +1045,7 @@ static int __tcf_qdisc_find(struct net *net, struct Qdisc **q, /* Find qdisc */ if (!*parent) { - *q = dev->qdisc; + *q = rcu_dereference(dev->qdisc); *parent = (*q)->handle; } else { *q = qdisc_lookup_rcu(dev, TC_H_MAJ(*parent)); @@ -1656,10 +1656,10 @@ static int tcf_chain_tp_insert(struct tcf_chain *chain, if (chain->flushing) return -EAGAIN; + RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain, chain_info)); if (*chain_info->pprev == chain->filter_chain) tcf_chain0_head_change(chain, tp); tcf_proto_get(tp); - RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain, chain_info)); rcu_assign_pointer(*chain_info->pprev, tp); return 0; @@ -1954,9 +1954,9 @@ static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n, bool prio_allocate; u32 parent; u32 chain_index; - struct Qdisc *q = NULL; + struct Qdisc *q; struct tcf_chain_info chain_info; - struct tcf_chain *chain = NULL; + struct tcf_chain *chain; struct tcf_block *block; struct tcf_proto *tp; unsigned long cl; @@ -1984,6 +1984,8 @@ static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n, tp = NULL; cl = 0; block = NULL; + q = NULL; + chain = NULL; if (prio == 0) { /* If no priority is provided by the user, @@ -2589,7 +2591,7 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb) parent = tcm->tcm_parent; if (!parent) - q = dev->qdisc; + q = rtnl_dereference(dev->qdisc); else q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent)); if (!q) @@ -2804,8 +2806,8 @@ static int tc_ctl_chain(struct sk_buff *skb, struct nlmsghdr *n, struct tcmsg *t; u32 parent; u32 chain_index; - struct Qdisc *q = NULL; - struct tcf_chain *chain = NULL; + struct Qdisc *q; + struct tcf_chain *chain; struct tcf_block *block; unsigned long cl; int err; @@ -2815,6 +2817,7 @@ static int tc_ctl_chain(struct sk_buff *skb, struct nlmsghdr *n, return -EPERM; replay: + q = NULL; err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX, rtm_tca_policy, extack); if (err < 0) @@ -2974,7 +2977,7 @@ static int tc_dump_chain(struct sk_buff *skb, struct netlink_callback *cb) parent = tcm->tcm_parent; if (!parent) { - q = dev->qdisc; + q = rtnl_dereference(dev->qdisc); parent = q->handle; } else { q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent)); diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c index 54209a18d7fec2ee70664b365a2909fd913c6e01..b61db335c49d15cb357e8f27eb2f2f6fdf17e014 100644 --- a/net/sched/cls_u32.c +++ b/net/sched/cls_u32.c @@ -386,14 +386,19 @@ static int u32_init(struct tcf_proto *tp) return 0; } -static int u32_destroy_key(struct tc_u_knode *n, bool free_pf) +static void __u32_destroy_key(struct tc_u_knode *n) { struct tc_u_hnode *ht = rtnl_dereference(n->ht_down); tcf_exts_destroy(&n->exts); - tcf_exts_put_net(&n->exts); if (ht && --ht->refcnt == 0) kfree(ht); + kfree(n); +} + +static void u32_destroy_key(struct tc_u_knode *n, bool free_pf) +{ + tcf_exts_put_net(&n->exts); #ifdef CONFIG_CLS_U32_PERF if (free_pf) free_percpu(n->pf); @@ -402,8 +407,7 @@ static int u32_destroy_key(struct tc_u_knode *n, bool free_pf) if (free_pf) free_percpu(n->pcpu_success); #endif - kfree(n); - return 0; + __u32_destroy_key(n); } /* u32_delete_key_rcu should be called when free'ing a copied @@ -898,13 +902,13 @@ static int u32_change(struct net *net, struct sk_buff *in_skb, tca[TCA_RATE], ovr, extack); if (err) { - u32_destroy_key(new, false); + __u32_destroy_key(new); return err; } err = u32_replace_hw_knode(tp, new, flags, extack); if (err) { - u32_destroy_key(new, false); + __u32_destroy_key(new); return err; } diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 7b24582a8a1643044a98e51badec7e14b529ba42..6e18aa41778285e7fa97e606f46cfc67e0f03236 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -301,7 +301,7 @@ struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle) if (!handle) return NULL; - q = qdisc_match_from_root(dev->qdisc, handle); + q = qdisc_match_from_root(rtnl_dereference(dev->qdisc), handle); if (q) goto out; @@ -320,7 +320,7 @@ struct Qdisc *qdisc_lookup_rcu(struct net_device *dev, u32 handle) if (!handle) return NULL; - q = qdisc_match_from_root(dev->qdisc, handle); + q = qdisc_match_from_root(rcu_dereference(dev->qdisc), handle); if (q) goto out; @@ -1082,10 +1082,10 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent, skip: if (!ingress) { notify_and_destroy(net, skb, n, classid, - dev->qdisc, new); + rtnl_dereference(dev->qdisc), new); if (new && !new->ops->attach) qdisc_refcount_inc(new); - dev->qdisc = new ? : &noop_qdisc; + rcu_assign_pointer(dev->qdisc, new ? : &noop_qdisc); if (new && new->ops->attach) new->ops->attach(new); @@ -1204,7 +1204,7 @@ static struct Qdisc *qdisc_create(struct net_device *dev, err = -ENOENT; if (!ops) { - NL_SET_ERR_MSG(extack, "Specified qdisc not found"); + NL_SET_ERR_MSG(extack, "Specified qdisc kind is unknown"); goto err_out; } @@ -1460,7 +1460,7 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, q = dev_ingress_queue(dev)->qdisc_sleeping; } } else { - q = dev->qdisc; + q = rtnl_dereference(dev->qdisc); } if (!q) { NL_SET_ERR_MSG(extack, "Cannot find specified qdisc on specified device"); @@ -1549,7 +1549,7 @@ static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n, q = dev_ingress_queue(dev)->qdisc_sleeping; } } else { - q = dev->qdisc; + q = rtnl_dereference(dev->qdisc); } /* It may be default qdisc, ignore it */ @@ -1771,7 +1771,8 @@ static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb) s_q_idx = 0; q_idx = 0; - if (tc_dump_qdisc_root(dev->qdisc, skb, cb, &q_idx, s_q_idx, + if (tc_dump_qdisc_root(rtnl_dereference(dev->qdisc), + skb, cb, &q_idx, s_q_idx, true, tca[TCA_DUMP_INVISIBLE]) < 0) goto done; @@ -2047,7 +2048,7 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, } else if (qid1) { qid = qid1; } else if (qid == 0) - qid = dev->qdisc->handle; + qid = rtnl_dereference(dev->qdisc)->handle; /* Now qid is genuine qdisc handle consistent * both with parent and child. @@ -2058,7 +2059,7 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, portid = TC_H_MAKE(qid, portid); } else { if (qid == 0) - qid = dev->qdisc->handle; + qid = rtnl_dereference(dev->qdisc)->handle; } /* OK. Locate qdisc */ @@ -2219,7 +2220,8 @@ static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb) s_t = cb->args[0]; t = 0; - if (tc_dump_tclass_root(dev->qdisc, skb, tcm, cb, &t, s_t, true) < 0) + if (tc_dump_tclass_root(rtnl_dereference(dev->qdisc), + skb, tcm, cb, &t, s_t, true) < 0) goto done; dev_queue = dev_ingress_queue(dev); diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 6a9c1a39874a0f239797a65941d5591d509c675d..5d5391adb667cb508aaf3d7eec0a5efb990c52e9 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -1088,30 +1088,33 @@ static void attach_default_qdiscs(struct net_device *dev) if (!netif_is_multiqueue(dev) || dev->priv_flags & IFF_NO_QUEUE) { netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL); - dev->qdisc = txq->qdisc_sleeping; - qdisc_refcount_inc(dev->qdisc); + qdisc = txq->qdisc_sleeping; + rcu_assign_pointer(dev->qdisc, qdisc); + qdisc_refcount_inc(qdisc); } else { qdisc = qdisc_create_dflt(txq, &mq_qdisc_ops, TC_H_ROOT, NULL); if (qdisc) { - dev->qdisc = qdisc; + rcu_assign_pointer(dev->qdisc, qdisc); qdisc->ops->attach(qdisc); } } + qdisc = rtnl_dereference(dev->qdisc); /* Detect default qdisc setup/init failed and fallback to "noqueue" */ - if (dev->qdisc == &noop_qdisc) { + if (qdisc == &noop_qdisc) { netdev_warn(dev, "default qdisc (%s) fail, fallback to %s\n", default_qdisc_ops->id, noqueue_qdisc_ops.id); dev->priv_flags |= IFF_NO_QUEUE; netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL); - dev->qdisc = txq->qdisc_sleeping; - qdisc_refcount_inc(dev->qdisc); + qdisc = txq->qdisc_sleeping; + rcu_assign_pointer(dev->qdisc, qdisc); + qdisc_refcount_inc(qdisc); dev->priv_flags ^= IFF_NO_QUEUE; } #ifdef CONFIG_NET_SCHED - if (dev->qdisc != &noop_qdisc) - qdisc_hash_add(dev->qdisc, false); + if (qdisc != &noop_qdisc) + qdisc_hash_add(qdisc, false); #endif } @@ -1141,7 +1144,7 @@ void dev_activate(struct net_device *dev) * and noqueue_qdisc for virtual interfaces */ - if (dev->qdisc == &noop_qdisc) + if (rtnl_dereference(dev->qdisc) == &noop_qdisc) attach_default_qdiscs(dev); if (!netif_carrier_ok(dev)) @@ -1306,7 +1309,7 @@ static int qdisc_change_tx_queue_len(struct net_device *dev, void dev_qdisc_change_real_num_tx(struct net_device *dev, unsigned int new_real_tx) { - struct Qdisc *qdisc = dev->qdisc; + struct Qdisc *qdisc = rtnl_dereference(dev->qdisc); if (qdisc->ops->change_real_num_tx) qdisc->ops->change_real_num_tx(qdisc, new_real_tx); @@ -1346,7 +1349,7 @@ static void dev_init_scheduler_queue(struct net_device *dev, void dev_init_scheduler(struct net_device *dev) { - dev->qdisc = &noop_qdisc; + rcu_assign_pointer(dev->qdisc, &noop_qdisc); netdev_for_each_tx_queue(dev, dev_init_scheduler_queue, &noop_qdisc); if (dev_ingress_queue(dev)) dev_init_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc); @@ -1374,8 +1377,8 @@ void dev_shutdown(struct net_device *dev) netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc); if (dev_ingress_queue(dev)) shutdown_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc); - qdisc_put(dev->qdisc); - dev->qdisc = &noop_qdisc; + qdisc_put(rtnl_dereference(dev->qdisc)); + rcu_assign_pointer(dev->qdisc, &noop_qdisc); WARN_ON(timer_pending(&dev->watchdog_timer)); } @@ -1386,6 +1389,7 @@ void psched_ratecfg_precompute(struct psched_ratecfg *r, { memset(r, 0, sizeof(*r)); r->overhead = conf->overhead; + r->mpu = conf->mpu; r->rate_bytes_ps = max_t(u64, conf->rate, rate64); r->linklayer = (conf->linklayer & TC_LINKLAYER_MASK); r->mult = 1; diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c index 806babdd838d21e2a3256c4337991116913e6e77..eca525791013e63d55a3dd2949188cf445dcfd1d 100644 --- a/net/sched/sch_taprio.c +++ b/net/sched/sch_taprio.c @@ -427,7 +427,8 @@ static int taprio_enqueue(struct sk_buff *skb, struct Qdisc *sch, if (unlikely(!child)) return qdisc_drop(skb, sch, to_free); - if (skb->sk && sock_flag(skb->sk, SOCK_TXTIME)) { + /* sk_flags are only safe to use on full sockets. */ + if (skb->sk && sk_fullsock(skb->sk) && sock_flag(skb->sk, SOCK_TXTIME)) { if (!is_valid_interval(skb, sch)) return qdisc_drop(skb, sch, to_free); } else if (TXTIME_ASSIST_IS_ENABLED(q->flags)) { diff --git a/net/sctp/diag.c b/net/sctp/diag.c index babadd6720a2be1f4d7c5c0840c677f588a01023..68ff82ff49a3dc0373b1992618f606cf694a93d0 100644 --- a/net/sctp/diag.c +++ b/net/sctp/diag.c @@ -61,10 +61,6 @@ static void inet_diag_msg_sctpasoc_fill(struct inet_diag_msg *r, r->idiag_timer = SCTP_EVENT_TIMEOUT_T3_RTX; r->idiag_retrans = asoc->rtx_data_chunks; r->idiag_expires = jiffies_to_msecs(t3_rtx->expires - jiffies); - } else { - r->idiag_timer = 0; - r->idiag_retrans = 0; - r->idiag_expires = 0; } } @@ -144,13 +140,14 @@ static int inet_sctp_diag_fill(struct sock *sk, struct sctp_association *asoc, r = nlmsg_data(nlh); BUG_ON(!sk_fullsock(sk)); + r->idiag_timer = 0; + r->idiag_retrans = 0; + r->idiag_expires = 0; if (asoc) { inet_diag_msg_sctpasoc_fill(r, sk, asoc); } else { inet_diag_msg_common_fill(r, sk); r->idiag_state = sk->sk_state; - r->idiag_timer = 0; - r->idiag_retrans = 0; } if (inet_diag_msg_attrs_fill(sk, skb, r, ext, user_ns, net_admin)) diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 0a9e2c7d8e5f535485f1bc1d319237f0fc06fc1f..e9b4ea3d934fa6ab32b0b0e0a9b3c2bd8b348ed1 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -5518,7 +5518,7 @@ int sctp_do_peeloff(struct sock *sk, sctp_assoc_t id, struct socket **sockp) * Set the daddr and initialize id to something more random and also * copy over any ip options. */ - sp->pf->to_sk_daddr(&asoc->peer.primary_addr, sk); + sp->pf->to_sk_daddr(&asoc->peer.primary_addr, sock->sk); sp->pf->copy_ip_options(sk, sock->sk); /* Populate the fields of the newsk from the oldsk and migrate the diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index 99b902e410c49d16f560dd6bf61c957db9ad8ef7..4f16d406ad8ea9e1716ee26ca826c13c2cf876df 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c @@ -180,7 +180,7 @@ static int smc_release(struct socket *sock) { struct sock *sk = sock->sk; struct smc_sock *smc; - int rc = 0; + int old_state, rc = 0; if (!sk) goto out; @@ -188,8 +188,10 @@ static int smc_release(struct socket *sock) sock_hold(sk); /* sock_put below */ smc = smc_sk(sk); + old_state = sk->sk_state; + /* cleanup for a dangling non-blocking connect */ - if (smc->connect_nonblock && sk->sk_state == SMC_INIT) + if (smc->connect_nonblock && old_state == SMC_INIT) tcp_abort(smc->clcsock->sk, ECONNABORTED); if (cancel_work_sync(&smc->connect_work)) @@ -203,6 +205,10 @@ static int smc_release(struct socket *sock) else lock_sock(sk); + if (old_state == SMC_INIT && sk->sk_state == SMC_ACTIVE && + !smc->use_fallback) + smc_close_active_abort(smc); + rc = __smc_release(smc); /* detach socket */ diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c index 2a22dc85951eee320457b3cfb84f3f247799c754..ef2fd28999bafcc693a9f279968d5383a3400bbe 100644 --- a/net/smc/smc_core.c +++ b/net/smc/smc_core.c @@ -662,8 +662,8 @@ void smc_conn_free(struct smc_connection *conn) cancel_work_sync(&conn->abort_work); } if (!list_empty(&lgr->list)) { - smc_lgr_unregister_conn(conn); smc_buf_unuse(conn, lgr); /* allow buffer reuse */ + smc_lgr_unregister_conn(conn); } if (!lgr->conns_num) @@ -1002,16 +1002,11 @@ void smc_smcd_terminate_all(struct smcd_dev *smcd) /* Called when an SMCR device is removed or the smc module is unloaded. * If smcibdev is given, all SMCR link groups using this device are terminated. * If smcibdev is NULL, all SMCR link groups are terminated. - * - * We must wait here for QPs been destroyed before we destroy the CQs, - * or we won't received any CQEs and cdc_pend_tx_wr cannot reach 0 thus - * smc_sock cannot be released. */ void smc_smcr_terminate_all(struct smc_ib_device *smcibdev) { struct smc_link_group *lgr, *lg; LIST_HEAD(lgr_free_list); - LIST_HEAD(lgr_linkdown_list); int i; spin_lock_bh(&smc_lgr_list.lock); @@ -1023,7 +1018,7 @@ void smc_smcr_terminate_all(struct smc_ib_device *smcibdev) list_for_each_entry_safe(lgr, lg, &smc_lgr_list.list, list) { for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) { if (lgr->lnk[i].smcibdev == smcibdev) - list_move_tail(&lgr->list, &lgr_linkdown_list); + smcr_link_down_cond_sched(&lgr->lnk[i]); } } } @@ -1035,16 +1030,6 @@ void smc_smcr_terminate_all(struct smc_ib_device *smcibdev) __smc_lgr_terminate(lgr, false); } - list_for_each_entry_safe(lgr, lg, &lgr_linkdown_list, list) { - for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) { - if (lgr->lnk[i].smcibdev == smcibdev) { - mutex_lock(&lgr->llc_conf_mutex); - smcr_link_down_cond(&lgr->lnk[i]); - mutex_unlock(&lgr->llc_conf_mutex); - } - } - } - if (smcibdev) { if (atomic_read(&smcibdev->lnk_cnt)) wait_event(smcibdev->lnks_deleted, @@ -1331,7 +1316,8 @@ int smc_conn_create(struct smc_sock *smc, struct smc_init_info *ini) (ini->smcd_version == SMC_V2 || lgr->vlan_id == ini->vlan_id) && (role == SMC_CLNT || ini->is_smcd || - lgr->conns_num < SMC_RMBS_PER_LGR_MAX)) { + (lgr->conns_num < SMC_RMBS_PER_LGR_MAX && + !bitmap_full(lgr->rtokens_used_mask, SMC_RMBS_PER_LGR_MAX)))) { /* link group found */ ini->first_contact_local = 0; conn->lgr = lgr; @@ -1440,7 +1426,7 @@ static struct smc_buf_desc *smc_buf_get_slot(int compressed_bufsize, */ static inline int smc_rmb_wnd_update_limit(int rmbe_size) { - return min_t(int, rmbe_size / 10, SOCK_MIN_SNDBUF / 2); + return max_t(int, rmbe_size / 10, SOCK_MIN_SNDBUF / 2); } /* map an rmb buf to a link */ diff --git a/net/smc/smc_pnet.c b/net/smc/smc_pnet.c index f3c18b991d35c42442b332fbe280e976a8fd6517..30bae60d626c63b1b37d7f4d919c4b184ba5acf4 100644 --- a/net/smc/smc_pnet.c +++ b/net/smc/smc_pnet.c @@ -112,7 +112,7 @@ static int smc_pnet_remove_by_pnetid(struct net *net, char *pnet_name) pnettable = &sn->pnettable; /* remove table entry */ - write_lock(&pnettable->lock); + mutex_lock(&pnettable->lock); list_for_each_entry_safe(pnetelem, tmp_pe, &pnettable->pnetlist, list) { if (!pnet_name || @@ -130,7 +130,7 @@ static int smc_pnet_remove_by_pnetid(struct net *net, char *pnet_name) rc = 0; } } - write_unlock(&pnettable->lock); + mutex_unlock(&pnettable->lock); /* if this is not the initial namespace, stop here */ if (net != &init_net) @@ -191,7 +191,7 @@ static int smc_pnet_add_by_ndev(struct net_device *ndev) sn = net_generic(net, smc_net_id); pnettable = &sn->pnettable; - write_lock(&pnettable->lock); + mutex_lock(&pnettable->lock); list_for_each_entry_safe(pnetelem, tmp_pe, &pnettable->pnetlist, list) { if (pnetelem->type == SMC_PNET_ETH && !pnetelem->ndev && !strncmp(pnetelem->eth_name, ndev->name, IFNAMSIZ)) { @@ -205,7 +205,7 @@ static int smc_pnet_add_by_ndev(struct net_device *ndev) break; } } - write_unlock(&pnettable->lock); + mutex_unlock(&pnettable->lock); return rc; } @@ -223,7 +223,7 @@ static int smc_pnet_remove_by_ndev(struct net_device *ndev) sn = net_generic(net, smc_net_id); pnettable = &sn->pnettable; - write_lock(&pnettable->lock); + mutex_lock(&pnettable->lock); list_for_each_entry_safe(pnetelem, tmp_pe, &pnettable->pnetlist, list) { if (pnetelem->type == SMC_PNET_ETH && pnetelem->ndev == ndev) { dev_put(pnetelem->ndev); @@ -236,7 +236,7 @@ static int smc_pnet_remove_by_ndev(struct net_device *ndev) break; } } - write_unlock(&pnettable->lock); + mutex_unlock(&pnettable->lock); return rc; } @@ -310,8 +310,9 @@ static struct smc_ib_device *smc_pnet_find_ib(char *ib_name) list_for_each_entry(ibdev, &smc_ib_devices.list, list) { if (!strncmp(ibdev->ibdev->name, ib_name, sizeof(ibdev->ibdev->name)) || - !strncmp(dev_name(ibdev->ibdev->dev.parent), ib_name, - IB_DEVICE_NAME_MAX - 1)) { + (ibdev->ibdev->dev.parent && + !strncmp(dev_name(ibdev->ibdev->dev.parent), ib_name, + IB_DEVICE_NAME_MAX - 1))) { goto out; } } @@ -371,7 +372,7 @@ static int smc_pnet_add_eth(struct smc_pnettable *pnettable, struct net *net, rc = -EEXIST; new_netdev = true; - write_lock(&pnettable->lock); + mutex_lock(&pnettable->lock); list_for_each_entry(tmp_pe, &pnettable->pnetlist, list) { if (tmp_pe->type == SMC_PNET_ETH && !strncmp(tmp_pe->eth_name, eth_name, IFNAMSIZ)) { @@ -381,9 +382,9 @@ static int smc_pnet_add_eth(struct smc_pnettable *pnettable, struct net *net, } if (new_netdev) { list_add_tail(&new_pe->list, &pnettable->pnetlist); - write_unlock(&pnettable->lock); + mutex_unlock(&pnettable->lock); } else { - write_unlock(&pnettable->lock); + mutex_unlock(&pnettable->lock); kfree(new_pe); goto out_put; } @@ -445,7 +446,7 @@ static int smc_pnet_add_ib(struct smc_pnettable *pnettable, char *ib_name, new_pe->ib_port = ib_port; new_ibdev = true; - write_lock(&pnettable->lock); + mutex_lock(&pnettable->lock); list_for_each_entry(tmp_pe, &pnettable->pnetlist, list) { if (tmp_pe->type == SMC_PNET_IB && !strncmp(tmp_pe->ib_name, ib_name, IB_DEVICE_NAME_MAX)) { @@ -455,9 +456,9 @@ static int smc_pnet_add_ib(struct smc_pnettable *pnettable, char *ib_name, } if (new_ibdev) { list_add_tail(&new_pe->list, &pnettable->pnetlist); - write_unlock(&pnettable->lock); + mutex_unlock(&pnettable->lock); } else { - write_unlock(&pnettable->lock); + mutex_unlock(&pnettable->lock); kfree(new_pe); } return (new_ibdev) ? 0 : -EEXIST; @@ -602,7 +603,7 @@ static int _smc_pnet_dump(struct net *net, struct sk_buff *skb, u32 portid, pnettable = &sn->pnettable; /* dump pnettable entries */ - read_lock(&pnettable->lock); + mutex_lock(&pnettable->lock); list_for_each_entry(pnetelem, &pnettable->pnetlist, list) { if (pnetid && !smc_pnet_match(pnetelem->pnet_name, pnetid)) continue; @@ -617,7 +618,7 @@ static int _smc_pnet_dump(struct net *net, struct sk_buff *skb, u32 portid, break; } } - read_unlock(&pnettable->lock); + mutex_unlock(&pnettable->lock); return idx; } @@ -859,7 +860,7 @@ int smc_pnet_net_init(struct net *net) struct smc_pnetids_ndev *pnetids_ndev = &sn->pnetids_ndev; INIT_LIST_HEAD(&pnettable->pnetlist); - rwlock_init(&pnettable->lock); + mutex_init(&pnettable->lock); INIT_LIST_HEAD(&pnetids_ndev->list); rwlock_init(&pnetids_ndev->lock); @@ -939,7 +940,7 @@ static int smc_pnet_find_ndev_pnetid_by_table(struct net_device *ndev, sn = net_generic(net, smc_net_id); pnettable = &sn->pnettable; - read_lock(&pnettable->lock); + mutex_lock(&pnettable->lock); list_for_each_entry(pnetelem, &pnettable->pnetlist, list) { if (pnetelem->type == SMC_PNET_ETH && ndev == pnetelem->ndev) { /* get pnetid of netdev device */ @@ -948,7 +949,7 @@ static int smc_pnet_find_ndev_pnetid_by_table(struct net_device *ndev, break; } } - read_unlock(&pnettable->lock); + mutex_unlock(&pnettable->lock); return rc; } @@ -1129,7 +1130,7 @@ int smc_pnetid_by_table_ib(struct smc_ib_device *smcibdev, u8 ib_port) sn = net_generic(&init_net, smc_net_id); pnettable = &sn->pnettable; - read_lock(&pnettable->lock); + mutex_lock(&pnettable->lock); list_for_each_entry(tmp_pe, &pnettable->pnetlist, list) { if (tmp_pe->type == SMC_PNET_IB && !strncmp(tmp_pe->ib_name, ib_name, IB_DEVICE_NAME_MAX) && @@ -1139,7 +1140,7 @@ int smc_pnetid_by_table_ib(struct smc_ib_device *smcibdev, u8 ib_port) break; } } - read_unlock(&pnettable->lock); + mutex_unlock(&pnettable->lock); return rc; } @@ -1158,7 +1159,7 @@ int smc_pnetid_by_table_smcd(struct smcd_dev *smcddev) sn = net_generic(&init_net, smc_net_id); pnettable = &sn->pnettable; - read_lock(&pnettable->lock); + mutex_lock(&pnettable->lock); list_for_each_entry(tmp_pe, &pnettable->pnetlist, list) { if (tmp_pe->type == SMC_PNET_IB && !strncmp(tmp_pe->ib_name, ib_name, IB_DEVICE_NAME_MAX)) { @@ -1167,7 +1168,7 @@ int smc_pnetid_by_table_smcd(struct smcd_dev *smcddev) break; } } - read_unlock(&pnettable->lock); + mutex_unlock(&pnettable->lock); return rc; } diff --git a/net/smc/smc_pnet.h b/net/smc/smc_pnet.h index 14039272f7e4263a3d86f29d0c4f048731af387f..80a88eea494918663f0263b9577c0ca49dcf1542 100644 --- a/net/smc/smc_pnet.h +++ b/net/smc/smc_pnet.h @@ -29,7 +29,7 @@ struct smc_link_group; * @pnetlist: List of PNETIDs */ struct smc_pnettable { - rwlock_t lock; + struct mutex lock; struct list_head pnetlist; }; diff --git a/net/socket.c b/net/socket.c index d52c265ad449b473dea408fb5a2ceb5bad9f6fa8..7d84c289e5ae7b096393cee1dca2ce29937efb94 100644 --- a/net/socket.c +++ b/net/socket.c @@ -543,10 +543,12 @@ static int sockfs_setattr(struct dentry *dentry, struct iattr *iattr) if (!err && (iattr->ia_valid & ATTR_UID)) { struct socket *sock = SOCKET_I(d_inode(dentry)); - if (sock->sk) + if (sock->sk) { sock->sk->sk_uid = iattr->ia_uid; - else + sock->sk->sk_gid = iattr->ia_gid; + } else { err = -ENOENT; + } } return err; diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index 84c8a534029c9ea1056252f8420dd8d0fe64bed2..c5af31312e0cf59d36dcd47287df7b77d86f9220 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c @@ -2175,6 +2175,7 @@ call_transmit_status(struct rpc_task *task) * socket just returned a connection error, * then hold onto the transport lock. */ + case -ENOMEM: case -ENOBUFS: rpc_delay(task, HZ>>2); fallthrough; @@ -2258,6 +2259,7 @@ call_bc_transmit_status(struct rpc_task *task) case -ENOTCONN: case -EPIPE: break; + case -ENOMEM: case -ENOBUFS: rpc_delay(task, HZ>>2); fallthrough; @@ -2340,6 +2342,11 @@ call_status(struct rpc_task *task) case -EPIPE: case -EAGAIN: break; + case -ENFILE: + case -ENOBUFS: + case -ENOMEM: + rpc_delay(task, HZ>>2); + break; case -EIO: /* shutdown or soft timeout */ goto out_exit; diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index eadc0ede928c320a7afae33e1821d7e1ce63f246..5f854ffbab925b20b49516a35740168c4a3701e0 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c @@ -599,9 +599,9 @@ static int __rpc_rmdir(struct inode *dir, struct dentry *dentry) dget(dentry); ret = simple_rmdir(dir, dentry); + d_drop(dentry); if (!ret) fsnotify_rmdir(dir, dentry); - d_delete(dentry); dput(dentry); return ret; } @@ -612,9 +612,9 @@ static int __rpc_unlink(struct inode *dir, struct dentry *dentry) dget(dentry); ret = simple_unlink(dir, dentry); + d_drop(dentry); if (!ret) fsnotify_unlink(dir, dentry); - d_delete(dentry); dput(dentry); return ret; } diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index c045f63d11fa649bbc317906775faf0dbf56f42f..f0f55fbd13752903030e70685aa7c81386e1fbd4 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c @@ -186,11 +186,6 @@ static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue, /* * Add new request to wait queue. - * - * Swapper tasks always get inserted at the head of the queue. - * This should avoid many nasty memory deadlocks and hopefully - * improve overall performance. - * Everyone else gets appended to the queue to ensure proper FIFO behavior. */ static void __rpc_add_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task, @@ -199,8 +194,6 @@ static void __rpc_add_wait_queue(struct rpc_wait_queue *queue, INIT_LIST_HEAD(&task->u.tk_wait.timer_list); if (RPC_IS_PRIORITY(queue)) __rpc_add_wait_queue_priority(queue, task, queue_priority); - else if (RPC_IS_SWAPPER(task)) - list_add(&task->u.tk_wait.list, &queue->tasks[0]); else list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]); task->tk_waitqueue = queue; @@ -1012,8 +1005,10 @@ int rpc_malloc(struct rpc_task *task) struct rpc_buffer *buf; gfp_t gfp = GFP_NOFS; + if (RPC_IS_ASYNC(task)) + gfp = GFP_NOWAIT | __GFP_NOWARN; if (RPC_IS_SWAPPER(task)) - gfp = __GFP_MEMALLOC | GFP_NOWAIT | __GFP_NOWARN; + gfp |= __GFP_MEMALLOC; size += sizeof(struct rpc_buffer); if (size <= RPC_BUFFER_MAXSIZE) diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index eba1714bf09ab33d0c00e734d702e4d326ac3831..6d5bb8bfed38b32ab80e64086d0c269cd2d491cc 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c @@ -1091,7 +1091,9 @@ static int svc_tcp_sendmsg(struct socket *sock, struct msghdr *msg, int flags, ret; *sentp = 0; - xdr_alloc_bvec(xdr, GFP_KERNEL); + ret = xdr_alloc_bvec(xdr, GFP_KERNEL); + if (ret < 0) + return ret; msg->msg_flags = MSG_MORE; ret = kernel_sendmsg(sock, msg, &rm, 1, rm.iov_len); diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index cdf5cc67a005f988d8be05d15ba1d1b063d7664a..55b0c2b7493354abf87dda62a634eca84aa061ce 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -907,12 +907,7 @@ void xprt_connect(struct rpc_task *task) if (!xprt_lock_write(xprt, task)) return; - if (test_and_clear_bit(XPRT_CLOSE_WAIT, &xprt->state)) { - trace_xprt_disconnect_cleanup(xprt); - xprt->ops->close(xprt); - } - - if (!xprt_connected(xprt)) { + if (!xprt_connected(xprt) && !test_bit(XPRT_CLOSE_WAIT, &xprt->state)) { task->tk_rqstp->rq_connect_cookie = xprt->connect_cookie; rpc_sleep_on_timeout(&xprt->pending, task, NULL, xprt_request_timeout(task->tk_rqstp)); @@ -1332,17 +1327,6 @@ xprt_request_enqueue_transmit(struct rpc_task *task) INIT_LIST_HEAD(&req->rq_xmit2); goto out; } - } else if (RPC_IS_SWAPPER(task)) { - list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) { - if (pos->rq_cong || pos->rq_bytes_sent) - continue; - if (RPC_IS_SWAPPER(pos->rq_task)) - continue; - /* Note: req is added _before_ pos */ - list_add_tail(&req->rq_xmit, &pos->rq_xmit); - INIT_LIST_HEAD(&req->rq_xmit2); - goto out; - } } else if (!req->rq_seqno) { list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) { if (pos->rq_task->tk_owner != task->tk_owner) @@ -1661,12 +1645,15 @@ static bool xprt_throttle_congested(struct rpc_xprt *xprt, struct rpc_task *task static struct rpc_rqst *xprt_dynamic_alloc_slot(struct rpc_xprt *xprt) { struct rpc_rqst *req = ERR_PTR(-EAGAIN); + gfp_t gfp_mask = GFP_KERNEL; if (xprt->num_reqs >= xprt->max_reqs) goto out; ++xprt->num_reqs; spin_unlock(&xprt->reserve_lock); - req = kzalloc(sizeof(struct rpc_rqst), GFP_NOFS); + if (current->flags & PF_WQ_WORKER) + gfp_mask |= __GFP_NORETRY | __GFP_NOWARN; + req = kzalloc(sizeof(*req), gfp_mask); spin_lock(&xprt->reserve_lock); if (req != NULL) goto out; @@ -2059,7 +2046,14 @@ static void xprt_destroy(struct rpc_xprt *xprt) */ wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_UNINTERRUPTIBLE); + /* + * xprt_schedule_autodisconnect() can run after XPRT_LOCKED + * is cleared. We use ->transport_lock to ensure the mod_timer() + * can only run *before* del_time_sync(), never after. + */ + spin_lock(&xprt->transport_lock); del_timer_sync(&xprt->timer); + spin_unlock(&xprt->transport_lock); /* * Destroy sockets etc from the system workqueue so they can diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c index 8e2368a0c2a2970bfd079afc8bd46ab381146fe4..9cf10cfb85c6513209bca8aa05f3de15d7786081 100644 --- a/net/sunrpc/xprtrdma/transport.c +++ b/net/sunrpc/xprtrdma/transport.c @@ -519,7 +519,7 @@ xprt_rdma_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task) return; out_sleep: - task->tk_status = -EAGAIN; + task->tk_status = -ENOMEM; xprt_add_backlog(xprt, task); } @@ -572,8 +572,10 @@ xprt_rdma_allocate(struct rpc_task *task) gfp_t flags; flags = RPCRDMA_DEF_GFP; + if (RPC_IS_ASYNC(task)) + flags = GFP_NOWAIT | __GFP_NOWARN; if (RPC_IS_SWAPPER(task)) - flags = __GFP_MEMALLOC | GFP_NOWAIT | __GFP_NOWARN; + flags |= __GFP_MEMALLOC; if (!rpcrdma_check_regbuf(r_xprt, req->rl_sendbuf, rqst->rq_callsize, flags)) diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c index 25554260a5931541288c2abb72184fa44e847462..dcc1992b14d76a9ecdfc5affa12ccb3a8258eec9 100644 --- a/net/sunrpc/xprtrdma/verbs.c +++ b/net/sunrpc/xprtrdma/verbs.c @@ -449,6 +449,7 @@ static int rpcrdma_ep_create(struct rpcrdma_xprt *r_xprt) IB_POLL_WORKQUEUE); if (IS_ERR(ep->re_attr.send_cq)) { rc = PTR_ERR(ep->re_attr.send_cq); + ep->re_attr.send_cq = NULL; goto out_destroy; } @@ -457,6 +458,7 @@ static int rpcrdma_ep_create(struct rpcrdma_xprt *r_xprt) IB_POLL_WORKQUEUE); if (IS_ERR(ep->re_attr.recv_cq)) { rc = PTR_ERR(ep->re_attr.recv_cq); + ep->re_attr.recv_cq = NULL; goto out_destroy; } ep->re_receive_count = 0; @@ -495,6 +497,7 @@ static int rpcrdma_ep_create(struct rpcrdma_xprt *r_xprt) ep->re_pd = ib_alloc_pd(device, 0); if (IS_ERR(ep->re_pd)) { rc = PTR_ERR(ep->re_pd); + ep->re_pd = NULL; goto out_destroy; } diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 16c7758e7bf30fafe667db49920264a502155c42..f57ccf5ae0f25c2dced9869c1a9e76cc8861bcb0 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -754,12 +754,12 @@ xs_stream_start_connect(struct sock_xprt *transport) /** * xs_nospace - handle transmit was incomplete * @req: pointer to RPC request + * @transport: pointer to struct sock_xprt * */ -static int xs_nospace(struct rpc_rqst *req) +static int xs_nospace(struct rpc_rqst *req, struct sock_xprt *transport) { - struct rpc_xprt *xprt = req->rq_xprt; - struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); + struct rpc_xprt *xprt = &transport->xprt; struct sock *sk = transport->inet; int ret = -EAGAIN; @@ -770,25 +770,49 @@ static int xs_nospace(struct rpc_rqst *req) /* Don't race with disconnect */ if (xprt_connected(xprt)) { + struct socket_wq *wq; + + rcu_read_lock(); + wq = rcu_dereference(sk->sk_wq); + set_bit(SOCKWQ_ASYNC_NOSPACE, &wq->flags); + rcu_read_unlock(); + /* wait for more buffer space */ + set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); sk->sk_write_pending++; xprt_wait_for_buffer_space(xprt); } else ret = -ENOTCONN; spin_unlock(&xprt->transport_lock); + return ret; +} - /* Race breaker in case memory is freed before above code is called */ - if (ret == -EAGAIN) { - struct socket_wq *wq; +static int xs_sock_nospace(struct rpc_rqst *req) +{ + struct sock_xprt *transport = + container_of(req->rq_xprt, struct sock_xprt, xprt); + struct sock *sk = transport->inet; + int ret = -EAGAIN; - rcu_read_lock(); - wq = rcu_dereference(sk->sk_wq); - set_bit(SOCKWQ_ASYNC_NOSPACE, &wq->flags); - rcu_read_unlock(); + lock_sock(sk); + if (!sock_writeable(sk)) + ret = xs_nospace(req, transport); + release_sock(sk); + return ret; +} - sk->sk_write_space(sk); - } +static int xs_stream_nospace(struct rpc_rqst *req) +{ + struct sock_xprt *transport = + container_of(req->rq_xprt, struct sock_xprt, xprt); + struct sock *sk = transport->inet; + int ret = -EAGAIN; + + lock_sock(sk); + if (!sk_stream_memory_free(sk)) + ret = xs_nospace(req, transport); + release_sock(sk); return ret; } @@ -847,7 +871,7 @@ static int xs_local_send_request(struct rpc_rqst *req) /* Close the stream if the previous transmission was incomplete */ if (xs_send_request_was_aborted(transport, req)) { - xs_close(xprt); + xprt_force_disconnect(xprt); return -ENOTCONN; } @@ -878,14 +902,14 @@ static int xs_local_send_request(struct rpc_rqst *req) case -ENOBUFS: break; case -EAGAIN: - status = xs_nospace(req); + status = xs_stream_nospace(req); break; default: dprintk("RPC: sendmsg returned unrecognized error %d\n", -status); fallthrough; case -EPIPE: - xs_close(xprt); + xprt_force_disconnect(xprt); status = -ENOTCONN; } @@ -954,7 +978,7 @@ static int xs_udp_send_request(struct rpc_rqst *req) /* Should we call xs_close() here? */ break; case -EAGAIN: - status = xs_nospace(req); + status = xs_sock_nospace(req); break; case -ENETUNREACH: case -ENOBUFS: @@ -1069,7 +1093,7 @@ static int xs_tcp_send_request(struct rpc_rqst *req) /* Should we call xs_close() here? */ break; case -EAGAIN: - status = xs_nospace(req); + status = xs_stream_nospace(req); break; case -ECONNRESET: case -ECONNREFUSED: @@ -1167,6 +1191,16 @@ static void xs_reset_transport(struct sock_xprt *transport) if (sk == NULL) return; + /* + * Make sure we're calling this in a context from which it is safe + * to call __fput_sync(). In practice that means rpciod and the + * system workqueue. + */ + if (!(current->flags & PF_WQ_WORKER)) { + WARN_ON_ONCE(1); + set_bit(XPRT_CLOSE_WAIT, &xprt->state); + return; + } if (atomic_read(&transport->xprt.swapper)) sk_clear_memalloc(sk); @@ -1190,7 +1224,7 @@ static void xs_reset_transport(struct sock_xprt *transport) mutex_unlock(&transport->recv_mutex); trace_rpc_socket_close(xprt, sock); - fput(filp); + __fput_sync(filp); xprt_disconnect_done(xprt); } diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c index 12e535b43d887bac125cd925db0c3cbf376f8cce..6911f1cab2063fb5dd3842301c04b91d66c4ba1c 100644 --- a/net/tipc/bearer.c +++ b/net/tipc/bearer.c @@ -342,16 +342,18 @@ static int tipc_enable_bearer(struct net *net, const char *name, goto rejected; } - test_and_set_bit_lock(0, &b->up); - rcu_assign_pointer(tn->bearer_list[bearer_id], b); - if (skb) - tipc_bearer_xmit_skb(net, bearer_id, skb, &b->bcast_addr); - + /* Create monitoring data before accepting activate messages */ if (tipc_mon_create(net, bearer_id)) { bearer_disable(net, b); + kfree_skb(skb); return -ENOMEM; } + test_and_set_bit_lock(0, &b->up); + rcu_assign_pointer(tn->bearer_list[bearer_id], b); + if (skb) + tipc_bearer_xmit_skb(net, bearer_id, skb, &b->bcast_addr); + pr_info("Enabled bearer <%s>, priority %u\n", name, prio); return res; diff --git a/net/tipc/crypto.c b/net/tipc/crypto.c index d8a2f424786fcc4a2162b888e0f43597d4e05a6f..6f91b9a306dc39e8ce91977c5485948b2ff7cde7 100644 --- a/net/tipc/crypto.c +++ b/net/tipc/crypto.c @@ -2280,7 +2280,7 @@ static bool tipc_crypto_key_rcv(struct tipc_crypto *rx, struct tipc_msg *hdr) struct tipc_crypto *tx = tipc_net(rx->net)->crypto_tx; struct tipc_aead_key *skey = NULL; u16 key_gen = msg_key_gen(hdr); - u16 size = msg_data_sz(hdr); + u32 size = msg_data_sz(hdr); u8 *data = msg_data(hdr); unsigned int keylen; diff --git a/net/tipc/link.c b/net/tipc/link.c index fb835a3822f49d5d509b5b73f552d61328884197..7a353ff6284486de8d752bc3a476272b3e83668b 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c @@ -2245,6 +2245,11 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb, break; case STATE_MSG: + /* Validate Gap ACK blocks, drop if invalid */ + glen = tipc_get_gap_ack_blks(&ga, l, hdr, true); + if (glen > dlen) + break; + l->rcv_nxt_state = msg_seqno(hdr) + 1; /* Update own tolerance if peer indicates a non-zero value */ @@ -2270,10 +2275,6 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb, break; } - /* Receive Gap ACK blocks from peer if any */ - glen = tipc_get_gap_ack_blks(&ga, l, hdr, true); - if(glen > dlen) - break; tipc_mon_rcv(l->net, data + glen, dlen - glen, l->addr, &l->mon_state, l->bearer_id); diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c index fe4edce459ad4a2d657bc854f5668e5defcd9b8d..a757fe28bcb5f064858c82cee9171cf4bd78e7ae 100644 --- a/net/tipc/name_distr.c +++ b/net/tipc/name_distr.c @@ -315,7 +315,7 @@ static bool tipc_update_nametbl(struct net *net, struct distr_item *i, pr_warn_ratelimited("Failed to remove binding %u,%u from %x\n", type, lower, node); } else { - pr_warn("Unrecognized name table message received\n"); + pr_warn_ratelimited("Unknown name table message received\n"); } return false; } diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c index f6a6acef42235e170dfd1806e657022ce75bb093..54c5328f492d2e7bc3870a624cc0236e159168eb 100644 --- a/net/tipc/name_table.c +++ b/net/tipc/name_table.c @@ -931,7 +931,7 @@ static int __tipc_nl_add_nametable_publ(struct tipc_nl_msg *msg, list_for_each_entry(p, &sr->all_publ, all_publ) if (p->key == *last_key) break; - if (p->key != *last_key) + if (list_entry_is_head(p, &sr->all_publ, all_publ)) return -EPIPE; } else { p = list_first_entry(&sr->all_publ, diff --git a/net/tipc/socket.c b/net/tipc/socket.c index ce957ee5383c4c8907645a3c5012935fd6c9640d..42283dc6c5b7c724cb40120050e6e9f90fd53730 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -2846,7 +2846,8 @@ static void tipc_sk_retry_connect(struct sock *sk, struct sk_buff_head *list) /* Try again later if dest link is congested */ if (tsk->cong_link_cnt) { - sk_reset_timer(sk, &sk->sk_timer, msecs_to_jiffies(100)); + sk_reset_timer(sk, &sk->sk_timer, + jiffies + msecs_to_jiffies(100)); return; } /* Prepare SYN for retransmit */ @@ -3743,7 +3744,7 @@ static int __tipc_nl_list_sk_publ(struct sk_buff *skb, if (p->key == *last_publ) break; } - if (p->key != *last_publ) { + if (list_entry_is_head(p, &tsk->publications, binding_sock)) { /* We never set seq or call nl_dump_check_consistent() * this means that setting prev_seq here will cause the * consistence check to fail in the netlink callback diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c index 5a8f1c65ce80ae2446329b77bf7cf6487339cd40..8cbd95630c442de1df2b67b995f7e9eb0d8681f0 100644 --- a/net/tls/tls_device.c +++ b/net/tls/tls_device.c @@ -328,7 +328,7 @@ static int tls_device_record_close(struct sock *sk, /* fill prepend */ tls_fill_prepend(ctx, skb_frag_address(&record->frags[0]), record->len - prot->overhead_size, - record_type, prot->version); + record_type); return ret; } @@ -483,11 +483,13 @@ static int tls_push_data(struct sock *sk, copy = min_t(size_t, size, (pfrag->size - pfrag->offset)); copy = min_t(size_t, copy, (max_open_record_len - record->len)); - rc = tls_device_copy_data(page_address(pfrag->page) + - pfrag->offset, copy, msg_iter); - if (rc) - goto handle_error; - tls_append_frag(record, pfrag, copy); + if (copy) { + rc = tls_device_copy_data(page_address(pfrag->page) + + pfrag->offset, copy, msg_iter); + if (rc) + goto handle_error; + tls_append_frag(record, pfrag, copy); + } size -= copy; if (!size) { @@ -1010,7 +1012,7 @@ static void tls_device_attach(struct tls_context *ctx, struct sock *sk, int tls_set_device_offload(struct sock *sk, struct tls_context *ctx) { - u16 nonce_size, tag_size, iv_size, rec_seq_size; + u16 nonce_size, tag_size, iv_size, rec_seq_size, salt_size; struct tls_context *tls_ctx = tls_get_ctx(sk); struct tls_prot_info *prot = &tls_ctx->prot_info; struct tls_record_info *start_marker_record; @@ -1051,6 +1053,7 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx) iv_size = TLS_CIPHER_AES_GCM_128_IV_SIZE; iv = ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->iv; rec_seq_size = TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE; + salt_size = TLS_CIPHER_AES_GCM_128_SALT_SIZE; rec_seq = ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->rec_seq; break; @@ -1071,6 +1074,7 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx) prot->tag_size = tag_size; prot->overhead_size = prot->prepend_size + prot->tag_size; prot->iv_size = iv_size; + prot->salt_size = salt_size; ctx->tx.iv = kmalloc(iv_size + TLS_CIPHER_AES_GCM_128_SALT_SIZE, GFP_KERNEL); if (!ctx->tx.iv) { diff --git a/net/tls/tls_device_fallback.c b/net/tls/tls_device_fallback.c index 0d40016bf69e034bfa0b7cd3d5a52641b7d7eef0..24226254c8207e6906b46335fcc5687d77485ce3 100644 --- a/net/tls/tls_device_fallback.c +++ b/net/tls/tls_device_fallback.c @@ -49,7 +49,8 @@ static int tls_enc_record(struct aead_request *aead_req, struct crypto_aead *aead, char *aad, char *iv, __be64 rcd_sn, struct scatter_walk *in, - struct scatter_walk *out, int *in_len) + struct scatter_walk *out, int *in_len, + struct tls_prot_info *prot) { unsigned char buf[TLS_HEADER_SIZE + TLS_CIPHER_AES_GCM_128_IV_SIZE]; struct scatterlist sg_in[3]; @@ -73,8 +74,7 @@ static int tls_enc_record(struct aead_request *aead_req, len -= TLS_CIPHER_AES_GCM_128_IV_SIZE; tls_make_aad(aad, len - TLS_CIPHER_AES_GCM_128_TAG_SIZE, - (char *)&rcd_sn, sizeof(rcd_sn), buf[0], - TLS_1_2_VERSION); + (char *)&rcd_sn, buf[0], prot); memcpy(iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, buf + TLS_HEADER_SIZE, TLS_CIPHER_AES_GCM_128_IV_SIZE); @@ -140,7 +140,7 @@ static struct aead_request *tls_alloc_aead_request(struct crypto_aead *aead, static int tls_enc_records(struct aead_request *aead_req, struct crypto_aead *aead, struct scatterlist *sg_in, struct scatterlist *sg_out, char *aad, char *iv, - u64 rcd_sn, int len) + u64 rcd_sn, int len, struct tls_prot_info *prot) { struct scatter_walk out, in; int rc; @@ -150,7 +150,7 @@ static int tls_enc_records(struct aead_request *aead_req, do { rc = tls_enc_record(aead_req, aead, aad, iv, - cpu_to_be64(rcd_sn), &in, &out, &len); + cpu_to_be64(rcd_sn), &in, &out, &len, prot); rcd_sn++; } while (rc == 0 && len); @@ -348,7 +348,8 @@ static struct sk_buff *tls_enc_skb(struct tls_context *tls_ctx, payload_len, sync_size, dummy_buf); if (tls_enc_records(aead_req, ctx->aead_send, sg_in, sg_out, aad, iv, - rcd_sn, sync_size + payload_len) < 0) + rcd_sn, sync_size + payload_len, + &tls_ctx->prot_info) < 0) goto free_nskb; complete_skb(nskb, skb, tcp_payload_offset); diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c index 58d22d6b86ae6f04d4903032fda3b837c3a2157c..29c7503e4b281bb652adf634cbbed6565e278a55 100644 --- a/net/tls/tls_main.c +++ b/net/tls/tls_main.c @@ -423,6 +423,46 @@ static int do_tls_getsockopt_conf(struct sock *sk, char __user *optval, rc = -EFAULT; break; } + case TLS_CIPHER_SM4_GCM: { + struct tls12_crypto_info_sm4_gcm *sm4_gcm_info = + container_of(crypto_info, + struct tls12_crypto_info_sm4_gcm, info); + + if (len != sizeof(*sm4_gcm_info)) { + rc = -EINVAL; + goto out; + } + lock_sock(sk); + memcpy(sm4_gcm_info->iv, + cctx->iv + TLS_CIPHER_SM4_GCM_SALT_SIZE, + TLS_CIPHER_SM4_GCM_IV_SIZE); + memcpy(sm4_gcm_info->rec_seq, cctx->rec_seq, + TLS_CIPHER_SM4_GCM_REC_SEQ_SIZE); + release_sock(sk); + if (copy_to_user(optval, sm4_gcm_info, sizeof(*sm4_gcm_info))) + rc = -EFAULT; + break; + } + case TLS_CIPHER_SM4_CCM: { + struct tls12_crypto_info_sm4_ccm *sm4_ccm_info = + container_of(crypto_info, + struct tls12_crypto_info_sm4_ccm, info); + + if (len != sizeof(*sm4_ccm_info)) { + rc = -EINVAL; + goto out; + } + lock_sock(sk); + memcpy(sm4_ccm_info->iv, + cctx->iv + TLS_CIPHER_SM4_CCM_SALT_SIZE, + TLS_CIPHER_SM4_CCM_IV_SIZE); + memcpy(sm4_ccm_info->rec_seq, cctx->rec_seq, + TLS_CIPHER_SM4_CCM_REC_SEQ_SIZE); + release_sock(sk); + if (copy_to_user(optval, sm4_ccm_info, sizeof(*sm4_ccm_info))) + rc = -EFAULT; + break; + } default: rc = -EINVAL; } @@ -523,6 +563,12 @@ static int do_tls_setsockopt_conf(struct sock *sk, sockptr_t optval, case TLS_CIPHER_AES_CCM_128: optsize = sizeof(struct tls12_crypto_info_aes_ccm_128); break; + case TLS_CIPHER_SM4_GCM: + optsize = sizeof(struct tls12_crypto_info_sm4_gcm); + break; + case TLS_CIPHER_SM4_CCM: + optsize = sizeof(struct tls12_crypto_info_sm4_ccm); + break; default: rc = -EINVAL; goto err_crypto_info; diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index 8cd011ea9fbb851769bf1db4196a99ffe9e11585..3ee8aa7ec04dc16d0d9bd83921d62db665b749ab 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@ -507,15 +507,21 @@ static int tls_do_encryption(struct sock *sk, int rc, iv_offset = 0; /* For CCM based ciphers, first byte of IV is a constant */ - if (prot->cipher_type == TLS_CIPHER_AES_CCM_128) { + switch (prot->cipher_type) { + case TLS_CIPHER_AES_CCM_128: rec->iv_data[0] = TLS_AES_CCM_IV_B0_BYTE; iv_offset = 1; + break; + case TLS_CIPHER_SM4_CCM: + rec->iv_data[0] = TLS_SM4_CCM_IV_B0_BYTE; + iv_offset = 1; + break; } memcpy(&rec->iv_data[iv_offset], tls_ctx->tx.iv, prot->iv_size + prot->salt_size); - xor_iv_with_seq(prot->version, rec->iv_data + iv_offset, tls_ctx->tx.rec_seq); + xor_iv_with_seq(prot, rec->iv_data + iv_offset, tls_ctx->tx.rec_seq); sge->offset += prot->prepend_size; sge->length -= prot->prepend_size; @@ -758,14 +764,13 @@ static int tls_push_record(struct sock *sk, int flags, sg_chain(rec->sg_aead_out, 2, &msg_en->sg.data[i]); tls_make_aad(rec->aad_space, msg_pl->sg.size + prot->tail_size, - tls_ctx->tx.rec_seq, prot->rec_seq_size, - record_type, prot->version); + tls_ctx->tx.rec_seq, record_type, prot); tls_fill_prepend(tls_ctx, page_address(sg_page(&msg_en->sg.data[i])) + msg_en->sg.data[i].offset, msg_pl->sg.size + prot->tail_size, - record_type, prot->version); + record_type); tls_ctx->pending_open_record_frags = false; @@ -1467,10 +1472,16 @@ static int decrypt_internal(struct sock *sk, struct sk_buff *skb, aad = (u8 *)(sgout + n_sgout); iv = aad + prot->aad_size; - /* For CCM based ciphers, first byte of nonce+iv is always '2' */ - if (prot->cipher_type == TLS_CIPHER_AES_CCM_128) { - iv[0] = 2; + /* For CCM based ciphers, first byte of nonce+iv is a constant */ + switch (prot->cipher_type) { + case TLS_CIPHER_AES_CCM_128: + iv[0] = TLS_AES_CCM_IV_B0_BYTE; iv_offset = 1; + break; + case TLS_CIPHER_SM4_CCM: + iv[0] = TLS_SM4_CCM_IV_B0_BYTE; + iv_offset = 1; + break; } /* Prepare IV */ @@ -1483,17 +1494,16 @@ static int decrypt_internal(struct sock *sk, struct sk_buff *skb, } if (prot->version == TLS_1_3_VERSION) memcpy(iv + iv_offset, tls_ctx->rx.iv, - crypto_aead_ivsize(ctx->aead_recv)); + prot->iv_size + prot->salt_size); else memcpy(iv + iv_offset, tls_ctx->rx.iv, prot->salt_size); - xor_iv_with_seq(prot->version, iv + iv_offset, tls_ctx->rx.rec_seq); + xor_iv_with_seq(prot, iv + iv_offset, tls_ctx->rx.rec_seq); /* Prepare AAD */ tls_make_aad(aad, rxm->full_len - prot->overhead_size + prot->tail_size, - tls_ctx->rx.rec_seq, prot->rec_seq_size, - ctx->control, prot->version); + tls_ctx->rx.rec_seq, ctx->control, prot); /* Prepare sgin */ sg_init_table(sgin, n_sgin); @@ -1994,6 +2004,7 @@ ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos, struct sock *sk = sock->sk; struct sk_buff *skb; ssize_t copied = 0; + bool from_queue; int err = 0; long timeo; int chunk; @@ -2003,14 +2014,20 @@ ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos, timeo = sock_rcvtimeo(sk, flags & SPLICE_F_NONBLOCK); - skb = tls_wait_data(sk, NULL, flags & SPLICE_F_NONBLOCK, timeo, &err); - if (!skb) - goto splice_read_end; + from_queue = !skb_queue_empty(&ctx->rx_list); + if (from_queue) { + skb = __skb_dequeue(&ctx->rx_list); + } else { + skb = tls_wait_data(sk, NULL, flags & SPLICE_F_NONBLOCK, timeo, + &err); + if (!skb) + goto splice_read_end; - err = decrypt_skb_update(sk, skb, NULL, &chunk, &zc, false); - if (err < 0) { - tls_err_abort(sk, -EBADMSG); - goto splice_read_end; + err = decrypt_skb_update(sk, skb, NULL, &chunk, &zc, false); + if (err < 0) { + tls_err_abort(sk, -EBADMSG); + goto splice_read_end; + } } /* splice does not support reading control messages */ @@ -2026,8 +2043,17 @@ ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos, if (copied < 0) goto splice_read_end; - if (likely(!(flags & MSG_PEEK))) - tls_sw_advance_skb(sk, skb, copied); + if (!from_queue) { + ctx->recv_pkt = NULL; + __strp_unpause(&ctx->strp); + } + if (chunk < rxm->full_len) { + __skb_queue_head(&ctx->rx_list, skb); + rxm->offset += len; + rxm->full_len -= len; + } else { + consume_skb(skb); + } splice_read_end: release_sock(sk); @@ -2415,6 +2441,40 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx) cipher_name = "ccm(aes)"; break; } + case TLS_CIPHER_SM4_GCM: { + struct tls12_crypto_info_sm4_gcm *sm4_gcm_info; + + sm4_gcm_info = (void *)crypto_info; + nonce_size = TLS_CIPHER_SM4_GCM_IV_SIZE; + tag_size = TLS_CIPHER_SM4_GCM_TAG_SIZE; + iv_size = TLS_CIPHER_SM4_GCM_IV_SIZE; + iv = sm4_gcm_info->iv; + rec_seq_size = TLS_CIPHER_SM4_GCM_REC_SEQ_SIZE; + rec_seq = sm4_gcm_info->rec_seq; + keysize = TLS_CIPHER_SM4_GCM_KEY_SIZE; + key = sm4_gcm_info->key; + salt = sm4_gcm_info->salt; + salt_size = TLS_CIPHER_SM4_GCM_SALT_SIZE; + cipher_name = "gcm(sm4)"; + break; + } + case TLS_CIPHER_SM4_CCM: { + struct tls12_crypto_info_sm4_ccm *sm4_ccm_info; + + sm4_ccm_info = (void *)crypto_info; + nonce_size = TLS_CIPHER_SM4_CCM_IV_SIZE; + tag_size = TLS_CIPHER_SM4_CCM_TAG_SIZE; + iv_size = TLS_CIPHER_SM4_CCM_IV_SIZE; + iv = sm4_ccm_info->iv; + rec_seq_size = TLS_CIPHER_SM4_CCM_REC_SEQ_SIZE; + rec_seq = sm4_ccm_info->rec_seq; + keysize = TLS_CIPHER_SM4_CCM_KEY_SIZE; + key = sm4_ccm_info->key; + salt = sm4_ccm_info->salt; + salt_size = TLS_CIPHER_SM4_CCM_SALT_SIZE; + cipher_name = "ccm(sm4)"; + break; + } default: rc = -EINVAL; goto free_priv; diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index b7edca89e0ba94b3d990db90e41fd44479260518..de3a1ffac26dd38203e3ab343a94b9e8cf366b80 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -116,24 +116,64 @@ #include "scm.h" +spinlock_t unix_table_locks[2 * UNIX_HASH_SIZE]; +EXPORT_SYMBOL_GPL(unix_table_locks); struct hlist_head unix_socket_table[2 * UNIX_HASH_SIZE]; EXPORT_SYMBOL_GPL(unix_socket_table); -DEFINE_SPINLOCK(unix_table_lock); -EXPORT_SYMBOL_GPL(unix_table_lock); static atomic_long_t unix_nr_socks; +/* SMP locking strategy: + * hash table is protected with spinlock unix_table_locks + * each socket state is protected by separate spin lock. + */ -static struct hlist_head *unix_sockets_unbound(void *addr) +static unsigned int unix_unbound_hash(struct sock *sk) { - unsigned long hash = (unsigned long)addr; + unsigned long hash = (unsigned long)sk; hash ^= hash >> 16; hash ^= hash >> 8; - hash %= UNIX_HASH_SIZE; - return &unix_socket_table[UNIX_HASH_SIZE + hash]; + hash ^= sk->sk_type; + + return UNIX_HASH_SIZE + (hash & (UNIX_HASH_SIZE - 1)); +} + +static unsigned int unix_bsd_hash(struct inode *i) +{ + return i->i_ino & (UNIX_HASH_SIZE - 1); +} + +static unsigned int unix_abstract_hash(struct sockaddr_un *sunaddr, + int addr_len, int type) +{ + __wsum csum = csum_partial(sunaddr, addr_len, 0); + unsigned int hash; + + hash = (__force unsigned int)csum_fold(csum); + hash ^= hash >> 8; + hash ^= type; + + return hash & (UNIX_HASH_SIZE - 1); +} + +static void unix_table_double_lock(unsigned int hash1, unsigned int hash2) +{ + /* hash1 and hash2 is never the same because + * one is between 0 and UNIX_HASH_SIZE - 1, and + * another is between UNIX_HASH_SIZE and UNIX_HASH_SIZE * 2. + */ + if (hash1 > hash2) + swap(hash1, hash2); + + spin_lock(&unix_table_locks[hash1]); + spin_lock_nested(&unix_table_locks[hash2], SINGLE_DEPTH_NESTING); } -#define UNIX_ABSTRACT(sk) (unix_sk(sk)->addr->hash < UNIX_HASH_SIZE) +static void unix_table_double_unlock(unsigned int hash1, unsigned int hash2) +{ + spin_unlock(&unix_table_locks[hash1]); + spin_unlock(&unix_table_locks[hash2]); +} #ifdef CONFIG_SECURITY_NETWORK static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb) @@ -163,20 +203,6 @@ static inline bool unix_secdata_eq(struct scm_cookie *scm, struct sk_buff *skb) } #endif /* CONFIG_SECURITY_NETWORK */ -/* - * SMP locking strategy: - * hash table is protected with spinlock unix_table_lock - * each socket state is protected by separate spin lock. - */ - -static inline unsigned int unix_hash_fold(__wsum n) -{ - unsigned int hash = (__force unsigned int)csum_fold(n); - - hash ^= hash>>8; - return hash&(UNIX_HASH_SIZE-1); -} - #define unix_peer(sk) (unix_sk(sk)->peer) static inline int unix_our_peer(struct sock *sk, struct sock *osk) @@ -213,6 +239,22 @@ struct sock *unix_peer_get(struct sock *s) } EXPORT_SYMBOL_GPL(unix_peer_get); +static struct unix_address *unix_create_addr(struct sockaddr_un *sunaddr, + int addr_len) +{ + struct unix_address *addr; + + addr = kmalloc(sizeof(*addr) + addr_len, GFP_KERNEL); + if (!addr) + return NULL; + + refcount_set(&addr->refcnt, 1); + addr->len = addr_len; + memcpy(addr->name, sunaddr, addr_len); + + return addr; +} + static inline void unix_release_addr(struct unix_address *addr) { if (refcount_dec_and_test(&addr->refcnt)) @@ -226,29 +268,29 @@ static inline void unix_release_addr(struct unix_address *addr) * - if started by zero, it is abstract name. */ -static int unix_mkname(struct sockaddr_un *sunaddr, int len, unsigned int *hashp) +static int unix_validate_addr(struct sockaddr_un *sunaddr, int addr_len) { - *hashp = 0; - - if (len <= sizeof(short) || len > sizeof(*sunaddr)) + if (addr_len <= offsetof(struct sockaddr_un, sun_path) || + addr_len > sizeof(*sunaddr)) return -EINVAL; - if (!sunaddr || sunaddr->sun_family != AF_UNIX) + + if (sunaddr->sun_family != AF_UNIX) return -EINVAL; - if (sunaddr->sun_path[0]) { - /* - * This may look like an off by one error but it is a bit more - * subtle. 108 is the longest valid AF_UNIX path for a binding. - * sun_path[108] doesn't as such exist. However in kernel space - * we are guaranteed that it is a valid memory location in our - * kernel address buffer. - */ - ((char *)sunaddr)[len] = 0; - len = strlen(sunaddr->sun_path)+1+sizeof(short); - return len; - } - *hashp = unix_hash_fold(csum_partial(sunaddr, len, 0)); - return len; + return 0; +} + +static void unix_mkname_bsd(struct sockaddr_un *sunaddr, int addr_len) +{ + /* This may look like an off by one error but it is a bit more + * subtle. 108 is the longest valid AF_UNIX path for a binding. + * sun_path[108] doesn't as such exist. However in kernel space + * we are guaranteed that it is a valid memory location in our + * kernel address buffer because syscall functions always pass + * a pointer of struct sockaddr_storage which has a bigger buffer + * than 108. + */ + ((char *)sunaddr)[addr_len] = 0; } static void __unix_remove_socket(struct sock *sk) @@ -256,33 +298,43 @@ static void __unix_remove_socket(struct sock *sk) sk_del_node_init(sk); } -static void __unix_insert_socket(struct hlist_head *list, struct sock *sk) +static void __unix_insert_socket(struct sock *sk) { WARN_ON(!sk_unhashed(sk)); - sk_add_node(sk, list); + sk_add_node(sk, &unix_socket_table[sk->sk_hash]); } -static inline void unix_remove_socket(struct sock *sk) +static void __unix_set_addr_hash(struct sock *sk, struct unix_address *addr, + unsigned int hash) { - spin_lock(&unix_table_lock); __unix_remove_socket(sk); - spin_unlock(&unix_table_lock); + smp_store_release(&unix_sk(sk)->addr, addr); + + sk->sk_hash = hash; + __unix_insert_socket(sk); +} + +static void unix_remove_socket(struct sock *sk) +{ + spin_lock(&unix_table_locks[sk->sk_hash]); + __unix_remove_socket(sk); + spin_unlock(&unix_table_locks[sk->sk_hash]); } -static inline void unix_insert_socket(struct hlist_head *list, struct sock *sk) +static void unix_insert_unbound_socket(struct sock *sk) { - spin_lock(&unix_table_lock); - __unix_insert_socket(list, sk); - spin_unlock(&unix_table_lock); + spin_lock(&unix_table_locks[sk->sk_hash]); + __unix_insert_socket(sk); + spin_unlock(&unix_table_locks[sk->sk_hash]); } static struct sock *__unix_find_socket_byname(struct net *net, struct sockaddr_un *sunname, - int len, int type, unsigned int hash) + int len, unsigned int hash) { struct sock *s; - sk_for_each(s, &unix_socket_table[hash ^ type]) { + sk_for_each(s, &unix_socket_table[hash]) { struct unix_sock *u = unix_sk(s); if (!net_eq(sock_net(s), net)) @@ -297,37 +349,35 @@ static struct sock *__unix_find_socket_byname(struct net *net, static inline struct sock *unix_find_socket_byname(struct net *net, struct sockaddr_un *sunname, - int len, int type, - unsigned int hash) + int len, unsigned int hash) { struct sock *s; - spin_lock(&unix_table_lock); - s = __unix_find_socket_byname(net, sunname, len, type, hash); + spin_lock(&unix_table_locks[hash]); + s = __unix_find_socket_byname(net, sunname, len, hash); if (s) sock_hold(s); - spin_unlock(&unix_table_lock); + spin_unlock(&unix_table_locks[hash]); return s; } static struct sock *unix_find_socket_byinode(struct inode *i) { + unsigned int hash = unix_bsd_hash(i); struct sock *s; - spin_lock(&unix_table_lock); - sk_for_each(s, - &unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) { + spin_lock(&unix_table_locks[hash]); + sk_for_each(s, &unix_socket_table[hash]) { struct dentry *dentry = unix_sk(s)->path.dentry; if (dentry && d_backing_inode(dentry) == i) { sock_hold(s); - goto found; + spin_unlock(&unix_table_locks[hash]); + return s; } } - s = NULL; -found: - spin_unlock(&unix_table_lock); - return s; + spin_unlock(&unix_table_locks[hash]); + return NULL; } /* Support code for asymmetrically connected dgram sockets @@ -800,19 +850,26 @@ static struct proto unix_proto = { static struct sock *unix_create1(struct net *net, struct socket *sock, int kern) { - struct sock *sk = NULL; struct unix_sock *u; + struct sock *sk; + int err; atomic_long_inc(&unix_nr_socks); - if (atomic_long_read(&unix_nr_socks) > 2 * get_max_files()) - goto out; + if (atomic_long_read(&unix_nr_socks) > 2 * get_max_files()) { + err = -ENFILE; + goto err; + } sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_proto, kern); - if (!sk) - goto out; + + if (!sk) { + err = -ENOMEM; + goto err; + } sock_init_data(sock, sk); + sk->sk_hash = unix_unbound_hash(sk); sk->sk_allocation = GFP_KERNEL_ACCOUNT; sk->sk_write_space = unix_write_space; sk->sk_max_ack_backlog = net->unx.sysctl_max_dgram_qlen; @@ -828,21 +885,24 @@ static struct sock *unix_create1(struct net *net, struct socket *sock, int kern) init_waitqueue_head(&u->peer_wait); init_waitqueue_func_entry(&u->peer_wake, unix_dgram_peer_wake_relay); memset(&u->scm_stat, 0, sizeof(struct scm_stat)); - unix_insert_socket(unix_sockets_unbound(sk), sk); -out: - if (sk == NULL) - atomic_long_dec(&unix_nr_socks); - else { - local_bh_disable(); - sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); - local_bh_enable(); - } + unix_insert_unbound_socket(sk); + + local_bh_disable(); + sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); + local_bh_enable(); + return sk; + +err: + atomic_long_dec(&unix_nr_socks); + return ERR_PTR(err); } static int unix_create(struct net *net, struct socket *sock, int protocol, int kern) { + struct sock *sk; + if (protocol && protocol != PF_UNIX) return -EPROTONOSUPPORT; @@ -869,7 +929,11 @@ static int unix_create(struct net *net, struct socket *sock, int protocol, return -ESOCKTNOSUPPORT; } - return unix_create1(net, sock, kern) ? 0 : -ENOMEM; + sk = unix_create1(net, sock, kern); + if (IS_ERR(sk)) + return PTR_ERR(sk); + + return 0; } static int unix_release(struct socket *sock) @@ -885,15 +949,90 @@ static int unix_release(struct socket *sock) return 0; } -static int unix_autobind(struct socket *sock) +static struct sock *unix_find_bsd(struct net *net, struct sockaddr_un *sunaddr, + int addr_len, int type) { - struct sock *sk = sock->sk; - struct net *net = sock_net(sk); + struct inode *inode; + struct path path; + struct sock *sk; + int err; + + unix_mkname_bsd(sunaddr, addr_len); + err = kern_path(sunaddr->sun_path, LOOKUP_FOLLOW, &path); + if (err) + goto fail; + + err = path_permission(&path, MAY_WRITE); + if (err) + goto path_put; + + err = -ECONNREFUSED; + inode = d_backing_inode(path.dentry); + if (!S_ISSOCK(inode->i_mode)) + goto path_put; + + sk = unix_find_socket_byinode(inode); + if (!sk) + goto path_put; + + err = -EPROTOTYPE; + if (sk->sk_type == type) + touch_atime(&path); + else + goto sock_put; + + path_put(&path); + + return sk; + +sock_put: + sock_put(sk); +path_put: + path_put(&path); +fail: + return ERR_PTR(err); +} + +static struct sock *unix_find_abstract(struct net *net, + struct sockaddr_un *sunaddr, + int addr_len, int type) +{ + unsigned int hash = unix_abstract_hash(sunaddr, addr_len, type); + struct dentry *dentry; + struct sock *sk; + + sk = unix_find_socket_byname(net, sunaddr, addr_len, hash); + if (!sk) + return ERR_PTR(-ECONNREFUSED); + + dentry = unix_sk(sk)->path.dentry; + if (dentry) + touch_atime(&unix_sk(sk)->path); + + return sk; +} + +static struct sock *unix_find_other(struct net *net, + struct sockaddr_un *sunaddr, + int addr_len, int type) +{ + struct sock *sk; + + if (sunaddr->sun_path[0]) + sk = unix_find_bsd(net, sunaddr, addr_len, type); + else + sk = unix_find_abstract(net, sunaddr, addr_len, type); + + return sk; +} + +static int unix_autobind(struct sock *sk) +{ + unsigned int new_hash, old_hash = sk->sk_hash; struct unix_sock *u = unix_sk(sk); - static u32 ordernum = 1; struct unix_address *addr; + u32 lastnum, ordernum; int err; - unsigned int retries = 0; err = mutex_lock_interruptible(&u->bindlock); if (err) @@ -903,220 +1042,180 @@ static int unix_autobind(struct socket *sock) goto out; err = -ENOMEM; - addr = kzalloc(sizeof(*addr) + sizeof(short) + 16, GFP_KERNEL); + addr = kzalloc(sizeof(*addr) + + offsetof(struct sockaddr_un, sun_path) + 16, GFP_KERNEL); if (!addr) goto out; + addr->len = offsetof(struct sockaddr_un, sun_path) + 6; addr->name->sun_family = AF_UNIX; refcount_set(&addr->refcnt, 1); + ordernum = prandom_u32(); + lastnum = ordernum & 0xFFFFF; retry: - addr->len = sprintf(addr->name->sun_path+1, "%05x", ordernum) + 1 + sizeof(short); - addr->hash = unix_hash_fold(csum_partial(addr->name, addr->len, 0)); + ordernum = (ordernum + 1) & 0xFFFFF; + sprintf(addr->name->sun_path + 1, "%05x", ordernum); - spin_lock(&unix_table_lock); - ordernum = (ordernum+1)&0xFFFFF; + new_hash = unix_abstract_hash(addr->name, addr->len, sk->sk_type); + unix_table_double_lock(old_hash, new_hash); - if (__unix_find_socket_byname(net, addr->name, addr->len, sock->type, - addr->hash)) { - spin_unlock(&unix_table_lock); - /* - * __unix_find_socket_byname() may take long time if many names + if (__unix_find_socket_byname(sock_net(sk), addr->name, addr->len, + new_hash)) { + unix_table_double_unlock(old_hash, new_hash); + + /* __unix_find_socket_byname() may take long time if many names * are already in use. */ cond_resched(); - /* Give up if all names seems to be in use. */ - if (retries++ == 0xFFFFF) { + + if (ordernum == lastnum) { + /* Give up if all names seems to be in use. */ err = -ENOSPC; - kfree(addr); + unix_release_addr(addr); goto out; } + goto retry; } - addr->hash ^= sk->sk_type; - __unix_remove_socket(sk); - smp_store_release(&u->addr, addr); - __unix_insert_socket(&unix_socket_table[addr->hash], sk); - spin_unlock(&unix_table_lock); + __unix_set_addr_hash(sk, addr, new_hash); + unix_table_double_unlock(old_hash, new_hash); err = 0; out: mutex_unlock(&u->bindlock); return err; } -static struct sock *unix_find_other(struct net *net, - struct sockaddr_un *sunname, int len, - int type, unsigned int hash, int *error) +static int unix_bind_bsd(struct sock *sk, struct sockaddr_un *sunaddr, + int addr_len) { - struct sock *u; - struct path path; - int err = 0; - - if (sunname->sun_path[0]) { - struct inode *inode; - err = kern_path(sunname->sun_path, LOOKUP_FOLLOW, &path); - if (err) - goto fail; - inode = d_backing_inode(path.dentry); - err = inode_permission(inode, MAY_WRITE); - if (err) - goto put_fail; - - err = -ECONNREFUSED; - if (!S_ISSOCK(inode->i_mode)) - goto put_fail; - u = unix_find_socket_byinode(inode); - if (!u) - goto put_fail; - - if (u->sk_type == type) - touch_atime(&path); + umode_t mode = S_IFSOCK | + (SOCK_INODE(sk->sk_socket)->i_mode & ~current_umask()); + unsigned int new_hash, old_hash = sk->sk_hash; + struct unix_sock *u = unix_sk(sk); + struct unix_address *addr; + struct dentry *dentry; + struct path parent; + int err; - path_put(&path); + unix_mkname_bsd(sunaddr, addr_len); + addr_len = strlen(sunaddr->sun_path) + + offsetof(struct sockaddr_un, sun_path) + 1; - err = -EPROTOTYPE; - if (u->sk_type != type) { - sock_put(u); - goto fail; - } - } else { - err = -ECONNREFUSED; - u = unix_find_socket_byname(net, sunname, len, type, hash); - if (u) { - struct dentry *dentry; - dentry = unix_sk(u)->path.dentry; - if (dentry) - touch_atime(&unix_sk(u)->path); - } else - goto fail; - } - return u; - -put_fail: - path_put(&path); -fail: - *error = err; - return NULL; -} + addr = unix_create_addr(sunaddr, addr_len); + if (!addr) + return -ENOMEM; -static int unix_mknod(const char *sun_path, umode_t mode, struct path *res) -{ - struct dentry *dentry; - struct path path; - int err = 0; /* * Get the parent directory, calculate the hash for last * component. */ - dentry = kern_path_create(AT_FDCWD, sun_path, &path, 0); - err = PTR_ERR(dentry); - if (IS_ERR(dentry)) - return err; + dentry = kern_path_create(AT_FDCWD, addr->name->sun_path, &parent, 0); + if (IS_ERR(dentry)) { + err = PTR_ERR(dentry); + goto out; + } /* * All right, let's create it. */ - err = security_path_mknod(&path, dentry, mode, 0); - if (!err) { - err = vfs_mknod(d_inode(path.dentry), dentry, mode, 0); - if (!err) { - res->mnt = mntget(path.mnt); - res->dentry = dget(dentry); - } - } - done_path_create(&path, dentry); - return err; + err = security_path_mknod(&parent, dentry, mode, 0); + if (!err) + err = vfs_mknod(d_inode(parent.dentry), dentry, mode, 0); + if (err) + goto out_path; + err = mutex_lock_interruptible(&u->bindlock); + if (err) + goto out_unlink; + if (u->addr) + goto out_unlock; + + new_hash = unix_bsd_hash(d_backing_inode(dentry)); + unix_table_double_lock(old_hash, new_hash); + u->path.mnt = mntget(parent.mnt); + u->path.dentry = dget(dentry); + __unix_set_addr_hash(sk, addr, new_hash); + unix_table_double_unlock(old_hash, new_hash); + mutex_unlock(&u->bindlock); + done_path_create(&parent, dentry); + return 0; + +out_unlock: + mutex_unlock(&u->bindlock); + err = -EINVAL; +out_unlink: + /* failed after successful mknod? unlink what we'd created... */ + vfs_unlink(d_inode(parent.dentry), dentry, NULL); +out_path: + done_path_create(&parent, dentry); +out: + unix_release_addr(addr); + return err == -EEXIST ? -EADDRINUSE : err; } -static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) +static int unix_bind_abstract(struct sock *sk, struct sockaddr_un *sunaddr, + int addr_len) { - struct sock *sk = sock->sk; - struct net *net = sock_net(sk); + unsigned int new_hash, old_hash = sk->sk_hash; struct unix_sock *u = unix_sk(sk); - struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr; - char *sun_path = sunaddr->sun_path; - int err; - unsigned int hash; struct unix_address *addr; - struct hlist_head *list; - struct path path = { }; - - err = -EINVAL; - if (addr_len < offsetofend(struct sockaddr_un, sun_family) || - sunaddr->sun_family != AF_UNIX) - goto out; + int err; - if (addr_len == sizeof(short)) { - err = unix_autobind(sock); - goto out; - } + addr = unix_create_addr(sunaddr, addr_len); + if (!addr) + return -ENOMEM; - err = unix_mkname(sunaddr, addr_len, &hash); - if (err < 0) + err = mutex_lock_interruptible(&u->bindlock); + if (err) goto out; - addr_len = err; - if (sun_path[0]) { - umode_t mode = S_IFSOCK | - (SOCK_INODE(sock)->i_mode & ~current_umask()); - err = unix_mknod(sun_path, mode, &path); - if (err) { - if (err == -EEXIST) - err = -EADDRINUSE; - goto out; - } + if (u->addr) { + err = -EINVAL; + goto out_mutex; } - err = mutex_lock_interruptible(&u->bindlock); - if (err) - goto out_put; + new_hash = unix_abstract_hash(addr->name, addr->len, sk->sk_type); + unix_table_double_lock(old_hash, new_hash); - err = -EINVAL; - if (u->addr) - goto out_up; + if (__unix_find_socket_byname(sock_net(sk), addr->name, addr->len, + new_hash)) + goto out_spin; - err = -ENOMEM; - addr = kmalloc(sizeof(*addr)+addr_len, GFP_KERNEL); - if (!addr) - goto out_up; - - memcpy(addr->name, sunaddr, addr_len); - addr->len = addr_len; - addr->hash = hash ^ sk->sk_type; - refcount_set(&addr->refcnt, 1); + __unix_set_addr_hash(sk, addr, new_hash); + unix_table_double_unlock(old_hash, new_hash); + mutex_unlock(&u->bindlock); + return 0; - if (sun_path[0]) { - addr->hash = UNIX_HASH_SIZE; - hash = d_backing_inode(path.dentry)->i_ino & (UNIX_HASH_SIZE - 1); - spin_lock(&unix_table_lock); - u->path = path; - list = &unix_socket_table[hash]; - } else { - spin_lock(&unix_table_lock); - err = -EADDRINUSE; - if (__unix_find_socket_byname(net, sunaddr, addr_len, - sk->sk_type, hash)) { - unix_release_addr(addr); - goto out_unlock; - } +out_spin: + unix_table_double_unlock(old_hash, new_hash); + err = -EADDRINUSE; +out_mutex: + mutex_unlock(&u->bindlock); +out: + unix_release_addr(addr); + return err; +} - list = &unix_socket_table[addr->hash]; - } +static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) +{ + struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr; + struct sock *sk = sock->sk; + int err; - err = 0; - __unix_remove_socket(sk); - smp_store_release(&u->addr, addr); - __unix_insert_socket(list, sk); + if (addr_len == offsetof(struct sockaddr_un, sun_path) && + sunaddr->sun_family == AF_UNIX) + return unix_autobind(sk); -out_unlock: - spin_unlock(&unix_table_lock); -out_up: - mutex_unlock(&u->bindlock); -out_put: + err = unix_validate_addr(sunaddr, addr_len); if (err) - path_put(&path); -out: + return err; + + if (sunaddr->sun_path[0]) + err = unix_bind_bsd(sk, sunaddr, addr_len); + else + err = unix_bind_abstract(sk, sunaddr, addr_len); + return err; } @@ -1152,7 +1251,6 @@ static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr, struct net *net = sock_net(sk); struct sockaddr_un *sunaddr = (struct sockaddr_un *)addr; struct sock *other; - unsigned int hash; int err; err = -EINVAL; @@ -1160,19 +1258,23 @@ static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr, goto out; if (addr->sa_family != AF_UNSPEC) { - err = unix_mkname(sunaddr, alen, &hash); - if (err < 0) + err = unix_validate_addr(sunaddr, alen); + if (err) goto out; - alen = err; if (test_bit(SOCK_PASSCRED, &sock->flags) && - !unix_sk(sk)->addr && (err = unix_autobind(sock)) != 0) - goto out; + !unix_sk(sk)->addr) { + err = unix_autobind(sk); + if (err) + goto out; + } restart: - other = unix_find_other(net, sunaddr, alen, sock->type, hash, &err); - if (!other) + other = unix_find_other(net, sunaddr, alen, sock->type); + if (IS_ERR(other)) { + err = PTR_ERR(other); goto out; + } unix_state_double_lock(sk, other); @@ -1257,19 +1359,19 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr, struct sock *newsk = NULL; struct sock *other = NULL; struct sk_buff *skb = NULL; - unsigned int hash; int st; int err; long timeo; - err = unix_mkname(sunaddr, addr_len, &hash); - if (err < 0) + err = unix_validate_addr(sunaddr, addr_len); + if (err) goto out; - addr_len = err; - if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr && - (err = unix_autobind(sock)) != 0) - goto out; + if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr) { + err = unix_autobind(sk); + if (err) + goto out; + } timeo = sock_sndtimeo(sk, flags & O_NONBLOCK); @@ -1278,12 +1380,15 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr, we will have to recheck all again in any case. */ - err = -ENOMEM; - /* create new sock for complete connection */ newsk = unix_create1(sock_net(sk), NULL, 0); - if (newsk == NULL) + if (IS_ERR(newsk)) { + err = PTR_ERR(newsk); + newsk = NULL; goto out; + } + + err = -ENOMEM; /* Allocate skb for sending to listening sock */ skb = sock_wmalloc(newsk, 1, 0, GFP_KERNEL); @@ -1292,9 +1397,12 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr, restart: /* Find listening sock. */ - other = unix_find_other(net, sunaddr, addr_len, sk->sk_type, hash, &err); - if (!other) + other = unix_find_other(net, sunaddr, addr_len, sk->sk_type); + if (IS_ERR(other)) { + err = PTR_ERR(other); + other = NULL; goto out; + } /* Latch state of peer */ unix_state_lock(other); @@ -1382,9 +1490,9 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr, * * The contents of *(otheru->addr) and otheru->path * are seen fully set up here, since we have found - * otheru in hash under unix_table_lock. Insertion + * otheru in hash under unix_table_locks. Insertion * into the hash chain we'd found it in had been done - * in an earlier critical area protected by unix_table_lock, + * in an earlier critical area protected by unix_table_locks, * the same one where we'd set *(otheru->addr) contents, * as well as otheru->path and otheru->addr itself. * @@ -1533,7 +1641,7 @@ static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int peer) if (!addr) { sunaddr->sun_family = AF_UNIX; sunaddr->sun_path[0] = 0; - err = sizeof(short); + err = offsetof(struct sockaddr_un, sun_path); } else { err = addr->len; memcpy(sunaddr, addr->name, addr->len); @@ -1689,9 +1797,7 @@ static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg, struct unix_sock *u = unix_sk(sk); DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, msg->msg_name); struct sock *other = NULL; - int namelen = 0; /* fake GCC */ int err; - unsigned int hash; struct sk_buff *skb; long timeo; struct scm_cookie scm; @@ -1708,10 +1814,9 @@ static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg, goto out; if (msg->msg_namelen) { - err = unix_mkname(sunaddr, msg->msg_namelen, &hash); - if (err < 0) + err = unix_validate_addr(sunaddr, msg->msg_namelen); + if (err) goto out; - namelen = err; } else { sunaddr = NULL; err = -ENOTCONN; @@ -1720,9 +1825,11 @@ static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg, goto out; } - if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr - && (err = unix_autobind(sock)) != 0) - goto out; + if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr) { + err = unix_autobind(sk); + if (err) + goto out; + } err = -EMSGSIZE; if (len > sk->sk_sndbuf - 32) @@ -1762,10 +1869,13 @@ static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg, if (sunaddr == NULL) goto out_free; - other = unix_find_other(net, sunaddr, namelen, sk->sk_type, - hash, &err); - if (other == NULL) + other = unix_find_other(net, sunaddr, msg->msg_namelen, + sk->sk_type); + if (IS_ERR(other)) { + err = PTR_ERR(other); + other = NULL; goto out_free; + } } if (sk_filter(other, skb) < 0) { @@ -2811,7 +2921,7 @@ static __poll_t unix_dgram_poll(struct file *file, struct socket *sock, #define BUCKET_SPACE (BITS_PER_LONG - (UNIX_HASH_BITS + 1) - 1) #define get_bucket(x) ((x) >> BUCKET_SPACE) -#define get_offset(x) ((x) & ((1L << BUCKET_SPACE) - 1)) +#define get_offset(x) ((x) & ((1UL << BUCKET_SPACE) - 1)) #define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o)) static struct sock *unix_from_bucket(struct seq_file *seq, loff_t *pos) @@ -2835,7 +2945,7 @@ static struct sock *unix_next_socket(struct seq_file *seq, struct sock *sk, loff_t *pos) { - unsigned long bucket; + unsigned long bucket = get_bucket(*pos); while (sk > (struct sock *)SEQ_START_TOKEN) { sk = sk_next(sk); @@ -2846,12 +2956,13 @@ static struct sock *unix_next_socket(struct seq_file *seq, } do { + spin_lock(&unix_table_locks[bucket]); sk = unix_from_bucket(seq, pos); if (sk) return sk; next_bucket: - bucket = get_bucket(*pos) + 1; + spin_unlock(&unix_table_locks[bucket++]); *pos = set_bucket_offset(bucket, 1); } while (bucket < ARRAY_SIZE(unix_socket_table)); @@ -2859,10 +2970,7 @@ static struct sock *unix_next_socket(struct seq_file *seq, } static void *unix_seq_start(struct seq_file *seq, loff_t *pos) - __acquires(unix_table_lock) { - spin_lock(&unix_table_lock); - if (!*pos) return SEQ_START_TOKEN; @@ -2879,9 +2987,11 @@ static void *unix_seq_next(struct seq_file *seq, void *v, loff_t *pos) } static void unix_seq_stop(struct seq_file *seq, void *v) - __releases(unix_table_lock) { - spin_unlock(&unix_table_lock); + struct sock *sk = v; + + if (sk) + spin_unlock(&unix_table_locks[sk->sk_hash]); } static int unix_seq_show(struct seq_file *seq, void *v) @@ -2906,15 +3016,16 @@ static int unix_seq_show(struct seq_file *seq, void *v) (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING), sock_i_ino(s)); - if (u->addr) { // under unix_table_lock here + if (u->addr) { // under unix_table_locks here int i, len; seq_putc(seq, ' '); i = 0; - len = u->addr->len - sizeof(short); - if (!UNIX_ABSTRACT(s)) + len = u->addr->len - + offsetof(struct sockaddr_un, sun_path); + if (u->addr->name->sun_path[0]) { len--; - else { + } else { seq_putc(seq, '@'); i++; } @@ -2977,10 +3088,13 @@ static struct pernet_operations unix_net_ops = { static int __init af_unix_init(void) { - int rc = -1; + int i, rc = -1; BUILD_BUG_ON(sizeof(struct unix_skb_parms) > sizeof_field(struct sk_buff, cb)); + for (i = 0; i < 2 * UNIX_HASH_SIZE; i++) + spin_lock_init(&unix_table_locks[i]); + rc = proto_register(&unix_proto, 1); if (rc != 0) { pr_crit("%s: Cannot create unix_sock SLAB cache!\n", __func__); diff --git a/net/unix/diag.c b/net/unix/diag.c index 9ff64f9df1f3bbfed3cb2d4834d53006126db20f..c522ab94d0c8d18163992b4ea7a0255e23092b73 100644 --- a/net/unix/diag.c +++ b/net/unix/diag.c @@ -13,13 +13,14 @@ static int sk_diag_dump_name(struct sock *sk, struct sk_buff *nlskb) { - /* might or might not have unix_table_lock */ + /* might or might not have unix_table_locks */ struct unix_address *addr = smp_load_acquire(&unix_sk(sk)->addr); if (!addr) return 0; - return nla_put(nlskb, UNIX_DIAG_NAME, addr->len - sizeof(short), + return nla_put(nlskb, UNIX_DIAG_NAME, + addr->len - offsetof(struct sockaddr_un, sun_path), addr->name->sun_path); } @@ -203,13 +204,13 @@ static int unix_diag_dump(struct sk_buff *skb, struct netlink_callback *cb) s_slot = cb->args[0]; num = s_num = cb->args[1]; - spin_lock(&unix_table_lock); for (slot = s_slot; slot < ARRAY_SIZE(unix_socket_table); s_num = 0, slot++) { struct sock *sk; num = 0; + spin_lock(&unix_table_locks[slot]); sk_for_each(sk, &unix_socket_table[slot]) { if (!net_eq(sock_net(sk), net)) continue; @@ -220,14 +221,16 @@ static int unix_diag_dump(struct sk_buff *skb, struct netlink_callback *cb) if (sk_diag_dump(sk, skb, req, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, - NLM_F_MULTI) < 0) + NLM_F_MULTI) < 0) { + spin_unlock(&unix_table_locks[slot]); goto done; + } next: num++; } + spin_unlock(&unix_table_locks[slot]); } done: - spin_unlock(&unix_table_lock); cb->args[0] = slot; cb->args[1] = num; @@ -236,21 +239,19 @@ static int unix_diag_dump(struct sk_buff *skb, struct netlink_callback *cb) static struct sock *unix_lookup_by_ino(unsigned int ino) { - int i; struct sock *sk; + int i; - spin_lock(&unix_table_lock); for (i = 0; i < ARRAY_SIZE(unix_socket_table); i++) { + spin_lock(&unix_table_locks[i]); sk_for_each(sk, &unix_socket_table[i]) if (ino == sock_i_ino(sk)) { sock_hold(sk); - spin_unlock(&unix_table_lock); - + spin_unlock(&unix_table_locks[i]); return sk; } + spin_unlock(&unix_table_locks[i]); } - - spin_unlock(&unix_table_lock); return NULL; } diff --git a/net/unix/garbage.c b/net/unix/garbage.c index 12e2ddaf887f204a091f157905f270046fc384a6..d45d5366115a769b21bfc1db5a67f7d53c3fa9b8 100644 --- a/net/unix/garbage.c +++ b/net/unix/garbage.c @@ -192,8 +192,11 @@ void wait_for_unix_gc(void) { /* If number of inflight sockets is insane, * force a garbage collect right now. + * Paired with the WRITE_ONCE() in unix_inflight(), + * unix_notinflight() and gc_in_progress(). */ - if (unix_tot_inflight > UNIX_INFLIGHT_TRIGGER_GC && !gc_in_progress) + if (READ_ONCE(unix_tot_inflight) > UNIX_INFLIGHT_TRIGGER_GC && + !READ_ONCE(gc_in_progress)) unix_gc(); wait_event(unix_gc_wait, gc_in_progress == false); } @@ -213,7 +216,9 @@ void unix_gc(void) if (gc_in_progress) goto out; - gc_in_progress = true; + /* Paired with READ_ONCE() in wait_for_unix_gc(). */ + WRITE_ONCE(gc_in_progress, true); + /* First, select candidates for garbage collection. Only * in-flight sockets are considered, and from those only ones * which don't have any external reference. @@ -299,7 +304,10 @@ void unix_gc(void) /* All candidates should have been detached by now. */ BUG_ON(!list_empty(&gc_candidates)); - gc_in_progress = false; + + /* Paired with READ_ONCE() in wait_for_unix_gc(). */ + WRITE_ONCE(gc_in_progress, false); + wake_up(&unix_gc_wait); out: diff --git a/net/unix/scm.c b/net/unix/scm.c index 052ae709ce2899e74ebb005d8886e42ccbf8b849..aa27a02478dc1a7e4022f77e6ea7ac55f40b95c7 100644 --- a/net/unix/scm.c +++ b/net/unix/scm.c @@ -60,7 +60,8 @@ void unix_inflight(struct user_struct *user, struct file *fp) } else { BUG_ON(list_empty(&u->link)); } - unix_tot_inflight++; + /* Paired with READ_ONCE() in wait_for_unix_gc() */ + WRITE_ONCE(unix_tot_inflight, unix_tot_inflight + 1); } user->unix_inflight++; spin_unlock(&unix_gc_lock); @@ -80,7 +81,8 @@ void unix_notinflight(struct user_struct *user, struct file *fp) if (atomic_long_dec_and_test(&u->inflight)) list_del_init(&u->link); - unix_tot_inflight--; + /* Paired with READ_ONCE() in wait_for_unix_gc() */ + WRITE_ONCE(unix_tot_inflight, unix_tot_inflight - 1); } user->unix_inflight--; spin_unlock(&unix_gc_lock); diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c index 7fe36dbcbe1875bc64e0a4326e37884e8be81d20..c59806253a65abc62dfe763b9c123db77f5484bb 100644 --- a/net/vmw_vsock/af_vsock.c +++ b/net/vmw_vsock/af_vsock.c @@ -333,7 +333,8 @@ void vsock_remove_sock(struct vsock_sock *vsk) } EXPORT_SYMBOL_GPL(vsock_remove_sock); -void vsock_for_each_connected_socket(void (*fn)(struct sock *sk)) +void vsock_for_each_connected_socket(struct vsock_transport *transport, + void (*fn)(struct sock *sk)) { int i; @@ -342,8 +343,12 @@ void vsock_for_each_connected_socket(void (*fn)(struct sock *sk)) for (i = 0; i < ARRAY_SIZE(vsock_connected_table); i++) { struct vsock_sock *vsk; list_for_each_entry(vsk, &vsock_connected_table[i], - connected_table) + connected_table) { + if (vsk->transport != transport) + continue; + fn(sk_vsock(vsk)); + } } spin_unlock_bh(&vsock_table_lock); @@ -1357,6 +1362,7 @@ static int vsock_stream_connect(struct socket *sock, struct sockaddr *addr, sk->sk_state = sk->sk_state == TCP_ESTABLISHED ? TCP_CLOSING : TCP_CLOSE; sock->state = SS_UNCONNECTED; vsock_transport_cancel_pkt(vsk); + vsock_remove_connected(vsk); goto out_wait; } else if (timeout == 0) { err = -ETIMEDOUT; diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c index 3a056f8affd1d20265cb256bb2b6eeddf8e09310..e131121533ad93d718a826f4a634aa12ce2aa476 100644 --- a/net/vmw_vsock/virtio_transport.c +++ b/net/vmw_vsock/virtio_transport.c @@ -24,6 +24,7 @@ static struct workqueue_struct *virtio_vsock_workqueue; static struct virtio_vsock __rcu *the_virtio_vsock; static DEFINE_MUTEX(the_virtio_vsock_mutex); /* protects the_virtio_vsock */ +static struct virtio_transport virtio_transport; /* forward declaration */ struct virtio_vsock { struct virtio_device *vdev; @@ -383,7 +384,8 @@ static void virtio_vsock_event_handle(struct virtio_vsock *vsock, switch (le32_to_cpu(event->id)) { case VIRTIO_VSOCK_EVENT_TRANSPORT_RESET: virtio_vsock_update_guest_cid(vsock); - vsock_for_each_connected_socket(virtio_vsock_reset_sock); + vsock_for_each_connected_socket(&virtio_transport.transport, + virtio_vsock_reset_sock); break; } } @@ -635,7 +637,8 @@ static void virtio_vsock_remove(struct virtio_device *vdev) synchronize_rcu(); /* Reset all connected sockets when the device disappear */ - vsock_for_each_connected_socket(virtio_vsock_reset_sock); + vsock_for_each_connected_socket(&virtio_transport.transport, + virtio_vsock_reset_sock); /* Stop all work handlers to make sure no one is accessing the device, * so we can safely call vdev->config->reset(). diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c index 1c9ecb18b8e644db60264354918a0ad59f12cf05..a9ca95a0fcdda6f1b39b2787efd3f2cea5b14762 100644 --- a/net/vmw_vsock/vmci_transport.c +++ b/net/vmw_vsock/vmci_transport.c @@ -75,6 +75,8 @@ static u32 vmci_transport_qp_resumed_sub_id = VMCI_INVALID_ID; static int PROTOCOL_OVERRIDE = -1; +static struct vsock_transport vmci_transport; /* forward declaration */ + /* Helper function to convert from a VMCI error code to a VSock error code. */ static s32 vmci_transport_error_to_vsock_error(s32 vmci_error) @@ -882,7 +884,8 @@ static void vmci_transport_qp_resumed_cb(u32 sub_id, const struct vmci_event_data *e_data, void *client_data) { - vsock_for_each_connected_socket(vmci_transport_handle_detach); + vsock_for_each_connected_socket(&vmci_transport, + vmci_transport_handle_detach); } static void vmci_transport_recv_pkt_work(struct work_struct *work) diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 8fb0478888fb29de7fe602727df86ddf79e06e61..12f44ad4e0d8ebbfcaf3d4500db073fbb0c32753 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -475,7 +475,8 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = { .len = IEEE80211_MAX_MESH_ID_LEN }, [NL80211_ATTR_MPATH_NEXT_HOP] = NLA_POLICY_ETH_ADDR_COMPAT, - [NL80211_ATTR_REG_ALPHA2] = { .type = NLA_STRING, .len = 2 }, + /* allow 3 for NUL-termination, we used to declare this NLA_STRING */ + [NL80211_ATTR_REG_ALPHA2] = NLA_POLICY_RANGE(NLA_BINARY, 2, 3), [NL80211_ATTR_REG_RULES] = { .type = NLA_NESTED }, [NL80211_ATTR_BSS_CTS_PROT] = { .type = NLA_U8 }, @@ -12930,6 +12931,9 @@ static int handle_nan_filter(struct nlattr *attr_filter, i = 0; nla_for_each_nested(attr, attr_filter, rem) { filter[i].filter = nla_memdup(attr, GFP_KERNEL); + if (!filter[i].filter) + goto err; + filter[i].len = nla_len(attr); i++; } @@ -12942,6 +12946,15 @@ static int handle_nan_filter(struct nlattr *attr_filter, } return 0; + +err: + i = 0; + nla_for_each_nested(attr, attr_filter, rem) { + kfree(filter[i].filter); + i++; + } + kfree(filter); + return -ENOMEM; } static int nl80211_nan_add_func(struct sk_buff *skb, @@ -17115,7 +17128,8 @@ void cfg80211_ch_switch_notify(struct net_device *dev, wdev->chandef = *chandef; wdev->preset_chandef = *chandef; - if (wdev->iftype == NL80211_IFTYPE_STATION && + if ((wdev->iftype == NL80211_IFTYPE_STATION || + wdev->iftype == NL80211_IFTYPE_P2P_CLIENT) && !WARN_ON(!wdev->current_bss)) cfg80211_update_assoc_bss_entry(wdev, chandef->chan); diff --git a/net/wireless/scan.c b/net/wireless/scan.c index fd614a5a00b42093ae00def95f548384ba3aa3cc..6dc9b7e22b71dc88d907e6113f35f6dc8a946c8a 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c @@ -702,8 +702,12 @@ static bool cfg80211_find_ssid_match(struct cfg80211_colocated_ap *ap, for (i = 0; i < request->n_ssids; i++) { /* wildcard ssid in the scan request */ - if (!request->ssids[i].ssid_len) + if (!request->ssids[i].ssid_len) { + if (ap->multi_bss && !ap->transmitted_bssid) + continue; + return true; + } if (ap->ssid_len && ap->ssid_len == request->ssids[i].ssid_len) { @@ -830,6 +834,9 @@ static int cfg80211_scan_6ghz(struct cfg80211_registered_device *rdev) !cfg80211_find_ssid_match(ap, request)) continue; + if (!request->n_ssids && ap->multi_bss && !ap->transmitted_bssid) + continue; + cfg80211_scan_req_add_chan(request, chan, true); memcpy(scan_6ghz_params->bssid, ap->bssid, ETH_ALEN); scan_6ghz_params->short_ssid = ap->short_ssid; @@ -1961,11 +1968,13 @@ cfg80211_inform_single_bss_data(struct wiphy *wiphy, /* this is a nontransmitting bss, we need to add it to * transmitting bss' list if it is not there */ + spin_lock_bh(&rdev->bss_lock); if (cfg80211_add_nontrans_list(non_tx_data->tx_bss, &res->pub)) { if (__cfg80211_unlink_bss(rdev, res)) rdev->bss_generation++; } + spin_unlock_bh(&rdev->bss_lock); } trace_cfg80211_return_bss(&res->pub); diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c index 03ed170b8125e5038e1714acd9b0995e26b3cb9d..d231d4620c38f8c63aa938ec5f92a3d21c79e3b2 100644 --- a/net/x25/af_x25.c +++ b/net/x25/af_x25.c @@ -1775,10 +1775,15 @@ void x25_kill_by_neigh(struct x25_neigh *nb) write_lock_bh(&x25_list_lock); - sk_for_each(s, &x25_list) - if (x25_sk(s)->neighbour == nb) + sk_for_each(s, &x25_list) { + if (x25_sk(s)->neighbour == nb) { + write_unlock_bh(&x25_list_lock); + lock_sock(s); x25_disconnect(s, ENETUNREACH, 0, 0); - + release_sock(s); + write_lock_bh(&x25_list_lock); + } + } write_unlock_bh(&x25_list_lock); /* Remove any related forwards */ diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c index e843b0d9e2a61c16551be51f69bc441ccad4f921..c255aac6b816b4911b435ae76cd3377b88f2d9dd 100644 --- a/net/xfrm/xfrm_device.c +++ b/net/xfrm/xfrm_device.c @@ -223,6 +223,9 @@ int xfrm_dev_state_add(struct net *net, struct xfrm_state *x, if (x->encap || x->tfcpad) return -EINVAL; + if (xuo->flags & ~(XFRM_OFFLOAD_IPV6 | XFRM_OFFLOAD_INBOUND)) + return -EINVAL; + dev = dev_get_by_index(net, xuo->ifindex); if (!dev) { if (!(xuo->flags & XFRM_OFFLOAD_INBOUND)) { @@ -261,7 +264,8 @@ int xfrm_dev_state_add(struct net *net, struct xfrm_state *x, xso->dev = dev; xso->real_dev = dev; xso->num_exthdrs = 1; - xso->flags = xuo->flags; + /* Don't forward bit that is not implemented */ + xso->flags = xuo->flags & ~XFRM_OFFLOAD_IPV6; err = dev->xfrmdev_ops->xdo_dev_state_add(x); if (err) { diff --git a/net/xfrm/xfrm_interface.c b/net/xfrm/xfrm_interface.c index e9ce23343f5cac44703f5394fc0f8c2baee82e49..da518b4ca84c6cad59903be1fcc0cd6c4dab1846 100644 --- a/net/xfrm/xfrm_interface.c +++ b/net/xfrm/xfrm_interface.c @@ -303,7 +303,10 @@ xfrmi_xmit2(struct sk_buff *skb, struct net_device *dev, struct flowi *fl) if (mtu < IPV6_MIN_MTU) mtu = IPV6_MIN_MTU; - icmpv6_ndo_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); + if (skb->len > 1280) + icmpv6_ndo_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); + else + goto xmit; } else { if (!(ip_hdr(skb)->frag_off & htons(IP_DF))) goto xmit; @@ -643,11 +646,16 @@ static int xfrmi_newlink(struct net *src_net, struct net_device *dev, struct netlink_ext_ack *extack) { struct net *net = dev_net(dev); - struct xfrm_if_parms p; + struct xfrm_if_parms p = {}; struct xfrm_if *xi; int err; xfrmi_netlink_parms(data, &p); + if (!p.if_id) { + NL_SET_ERR_MSG(extack, "if_id must be non zero"); + return -EINVAL; + } + xi = xfrmi_locate(net, &p); if (xi) return -EEXIST; @@ -672,9 +680,14 @@ static int xfrmi_changelink(struct net_device *dev, struct nlattr *tb[], { struct xfrm_if *xi = netdev_priv(dev); struct net *net = xi->net; - struct xfrm_if_parms p; + struct xfrm_if_parms p = {}; xfrmi_netlink_parms(data, &p); + if (!p.if_id) { + NL_SET_ERR_MSG(extack, "if_id must be non zero"); + return -EINVAL; + } + xi = xfrmi_locate(net, &p); if (!xi) { xi = netdev_priv(dev); diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 3a9831c05ec71c7733be2b1742a82115a1a34cdf..3d0ffd9270041d2840e96670575cbb407dcf7a46 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -31,8 +31,10 @@ #include #include #include +#include #include #include +#include #if IS_ENABLED(CONFIG_IPV6_MIP6) #include #endif @@ -3293,7 +3295,7 @@ decode_session4(struct sk_buff *skb, struct flowi *fl, bool reverse) fl4->flowi4_proto = iph->protocol; fl4->daddr = reverse ? iph->saddr : iph->daddr; fl4->saddr = reverse ? iph->daddr : iph->saddr; - fl4->flowi4_tos = iph->tos; + fl4->flowi4_tos = iph->tos & ~INET_ECN_MASK; if (!ip_is_fragment(iph)) { switch (iph->protocol) { @@ -3455,6 +3457,26 @@ decode_session6(struct sk_buff *skb, struct flowi *fl, bool reverse) } fl6->flowi6_proto = nexthdr; return; + case IPPROTO_GRE: + if (!onlyproto && + (nh + offset + 12 < skb->data || + pskb_may_pull(skb, nh + offset + 12 - skb->data))) { + struct gre_base_hdr *gre_hdr; + __be32 *gre_key; + + nh = skb_network_header(skb); + gre_hdr = (struct gre_base_hdr *)(nh + offset); + gre_key = (__be32 *)(gre_hdr + 1); + + if (gre_hdr->flags & GRE_KEY) { + if (gre_hdr->flags & GRE_CSUM) + gre_key++; + fl6->fl6_gre_key = *gre_key; + } + } + fl6->flowi6_proto = nexthdr; + return; + #if IS_ENABLED(CONFIG_IPV6_MIP6) case IPPROTO_MH: offset += ipv6_optlen(exthdr); @@ -4265,7 +4287,7 @@ static bool xfrm_migrate_selector_match(const struct xfrm_selector *sel_cmp, } static struct xfrm_policy *xfrm_migrate_policy_find(const struct xfrm_selector *sel, - u8 dir, u8 type, struct net *net) + u8 dir, u8 type, struct net *net, u32 if_id) { struct xfrm_policy *pol, *ret = NULL; struct hlist_head *chain; @@ -4274,7 +4296,8 @@ static struct xfrm_policy *xfrm_migrate_policy_find(const struct xfrm_selector * spin_lock_bh(&net->xfrm.xfrm_policy_lock); chain = policy_hash_direct(net, &sel->daddr, &sel->saddr, sel->family, dir); hlist_for_each_entry(pol, chain, bydst) { - if (xfrm_migrate_selector_match(sel, &pol->selector) && + if ((if_id == 0 || pol->if_id == if_id) && + xfrm_migrate_selector_match(sel, &pol->selector) && pol->type == type) { ret = pol; priority = ret->priority; @@ -4286,7 +4309,8 @@ static struct xfrm_policy *xfrm_migrate_policy_find(const struct xfrm_selector * if ((pol->priority >= priority) && ret) break; - if (xfrm_migrate_selector_match(sel, &pol->selector) && + if ((if_id == 0 || pol->if_id == if_id) && + xfrm_migrate_selector_match(sel, &pol->selector) && pol->type == type) { ret = pol; break; @@ -4402,7 +4426,7 @@ static int xfrm_migrate_check(const struct xfrm_migrate *m, int num_migrate) int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type, struct xfrm_migrate *m, int num_migrate, struct xfrm_kmaddress *k, struct net *net, - struct xfrm_encap_tmpl *encap) + struct xfrm_encap_tmpl *encap, u32 if_id) { int i, err, nx_cur = 0, nx_new = 0; struct xfrm_policy *pol = NULL; @@ -4421,14 +4445,14 @@ int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type, } /* Stage 1 - find policy */ - if ((pol = xfrm_migrate_policy_find(sel, dir, type, net)) == NULL) { + if ((pol = xfrm_migrate_policy_find(sel, dir, type, net, if_id)) == NULL) { err = -ENOENT; goto out; } /* Stage 2 - find and update state(s) */ for (i = 0, mp = m; i < num_migrate; i++, mp++) { - if ((x = xfrm_migrate_state_find(mp, net))) { + if ((x = xfrm_migrate_state_find(mp, net, if_id))) { x_cur[nx_cur] = x; nx_cur++; xc = xfrm_state_migrate(x, mp, encap); diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index c158e70e8ae1020eb63dd09b632dd38a4037138a..3d75a4f103601cb23d270d633975662d7f0b29fa 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -1542,9 +1542,6 @@ static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig, memcpy(&x->mark, &orig->mark, sizeof(x->mark)); memcpy(&x->props.smark, &orig->props.smark, sizeof(x->props.smark)); - if (xfrm_init_state(x) < 0) - goto error; - x->props.flags = orig->props.flags; x->props.extra_flags = orig->props.extra_flags; @@ -1566,7 +1563,8 @@ static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig, return NULL; } -struct xfrm_state *xfrm_migrate_state_find(struct xfrm_migrate *m, struct net *net) +struct xfrm_state *xfrm_migrate_state_find(struct xfrm_migrate *m, struct net *net, + u32 if_id) { unsigned int h; struct xfrm_state *x = NULL; @@ -1582,6 +1580,8 @@ struct xfrm_state *xfrm_migrate_state_find(struct xfrm_migrate *m, struct net *n continue; if (m->reqid && x->props.reqid != m->reqid) continue; + if (if_id != 0 && x->if_id != if_id) + continue; if (!xfrm_addr_equal(&x->id.daddr, &m->old_daddr, m->old_family) || !xfrm_addr_equal(&x->props.saddr, &m->old_saddr, @@ -1597,6 +1597,8 @@ struct xfrm_state *xfrm_migrate_state_find(struct xfrm_migrate *m, struct net *n if (x->props.mode != m->mode || x->id.proto != m->proto) continue; + if (if_id != 0 && x->if_id != if_id) + continue; if (!xfrm_addr_equal(&x->id.daddr, &m->old_daddr, m->old_family) || !xfrm_addr_equal(&x->props.saddr, &m->old_saddr, @@ -1623,6 +1625,11 @@ struct xfrm_state *xfrm_state_migrate(struct xfrm_state *x, if (!xc) return NULL; + xc->props.family = m->new_family; + + if (xfrm_init_state(xc) < 0) + goto error; + memcpy(&xc->id.daddr, &m->new_daddr, sizeof(xc->id.daddr)); memcpy(&xc->props.saddr, &m->new_saddr, sizeof(xc->props.saddr)); @@ -2516,7 +2523,7 @@ void xfrm_state_delete_tunnel(struct xfrm_state *x) } EXPORT_SYMBOL(xfrm_state_delete_tunnel); -u32 __xfrm_state_mtu(struct xfrm_state *x, int mtu) +u32 xfrm_state_mtu(struct xfrm_state *x, int mtu) { const struct xfrm_type *type = READ_ONCE(x->type); struct crypto_aead *aead; @@ -2547,17 +2554,7 @@ u32 __xfrm_state_mtu(struct xfrm_state *x, int mtu) return ((mtu - x->props.header_len - crypto_aead_authsize(aead) - net_adj) & ~(blksize - 1)) + net_adj - 2; } -EXPORT_SYMBOL_GPL(__xfrm_state_mtu); - -u32 xfrm_state_mtu(struct xfrm_state *x, int mtu) -{ - mtu = __xfrm_state_mtu(x, mtu); - - if (x->props.family == AF_INET6 && mtu < IPV6_MIN_MTU) - return IPV6_MIN_MTU; - - return mtu; -} +EXPORT_SYMBOL_GPL(xfrm_state_mtu); int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload) { diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index 6f97665b632ede2263c9ddcd1c07351e523be079..4a307106f98f49716b81205f8cd415be289ed9d7 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c @@ -2423,6 +2423,7 @@ static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh, int n = 0; struct net *net = sock_net(skb->sk); struct xfrm_encap_tmpl *encap = NULL; + u32 if_id = 0; if (attrs[XFRMA_MIGRATE] == NULL) return -EINVAL; @@ -2447,7 +2448,10 @@ static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh, return 0; } - err = xfrm_migrate(&pi->sel, pi->dir, type, m, n, kmp, net, encap); + if (attrs[XFRMA_IF_ID]) + if_id = nla_get_u32(attrs[XFRMA_IF_ID]); + + err = xfrm_migrate(&pi->sel, pi->dir, type, m, n, kmp, net, encap, if_id); kfree(encap); @@ -2898,7 +2902,7 @@ static inline unsigned int xfrm_sa_len(struct xfrm_state *x) if (x->props.extra_flags) l += nla_total_size(sizeof(x->props.extra_flags)); if (x->xso.dev) - l += nla_total_size(sizeof(x->xso)); + l += nla_total_size(sizeof(struct xfrm_user_offload)); if (x->props.smark.v | x->props.smark.m) { l += nla_total_size(sizeof(x->props.smark.v)); l += nla_total_size(sizeof(x->props.smark.m)); diff --git a/samples/bpf/xdpsock_user.c b/samples/bpf/xdpsock_user.c index 2e4508a6cb3a7c4dec89a924c7188a2ab332ec2d..cf5b0a8952254bf86adbc5496fbfbe45976a3f4c 100644 --- a/samples/bpf/xdpsock_user.c +++ b/samples/bpf/xdpsock_user.c @@ -1520,14 +1520,15 @@ int main(int argc, char **argv) setlocale(LC_ALL, ""); + prev_time = get_nsecs(); + start_time = prev_time; + if (!opt_quiet) { ret = pthread_create(&pt, NULL, poller, NULL); if (ret) exit_with_error(ret); } - prev_time = get_nsecs(); - start_time = prev_time; if (opt_bench == BENCH_RXDROP) rx_drop_all(); diff --git a/scripts/Makefile.extrawarn b/scripts/Makefile.extrawarn index 6baee1200615d53ec7d783108a456825b385d5e0..23d3967786b9ff952f4b6fed4cdaf9c46d5cbf1c 100644 --- a/scripts/Makefile.extrawarn +++ b/scripts/Makefile.extrawarn @@ -51,6 +51,7 @@ KBUILD_CFLAGS += -Wno-sign-compare KBUILD_CFLAGS += -Wno-format-zero-length KBUILD_CFLAGS += $(call cc-disable-warning, pointer-to-enum-cast) KBUILD_CFLAGS += -Wno-tautological-constant-out-of-range-compare +KBUILD_CFLAGS += $(call cc-disable-warning, unaligned-access) endif endif diff --git a/scripts/Makefile.ubsan b/scripts/Makefile.ubsan index 9716dab06bc7a9f6012555f8b2d4e8e29e472b6d..2156e18391a3f93db931d267b51e47371e9f6f2b 100644 --- a/scripts/Makefile.ubsan +++ b/scripts/Makefile.ubsan @@ -23,7 +23,6 @@ ifdef CONFIG_UBSAN_MISC CFLAGS_UBSAN += $(call cc-option, -fsanitize=integer-divide-by-zero) CFLAGS_UBSAN += $(call cc-option, -fsanitize=unreachable) CFLAGS_UBSAN += $(call cc-option, -fsanitize=signed-integer-overflow) - CFLAGS_UBSAN += $(call cc-option, -fsanitize=object-size) CFLAGS_UBSAN += $(call cc-option, -fsanitize=bool) CFLAGS_UBSAN += $(call cc-option, -fsanitize=enum) endif diff --git a/scripts/check-kabi b/scripts/check-kabi index e3ec97cebffaa5b3696f6855976c94cca98fb41f..b1b55d512e618e02705e061cc3e922f207bf9798 100755 --- a/scripts/check-kabi +++ b/scripts/check-kabi @@ -42,7 +42,7 @@ def load_symvers(symvers,filename): break if in_line == "\n": continue - checksum,symbol,directory,type = string.split(in_line) + checksum,symbol,directory,type,namespace = string.split(in_line, sep='\t') symvers[symbol] = in_line[0:-1] @@ -57,7 +57,7 @@ def load_kabi(kabi,filename): break if in_line == "\n": continue - checksum,symbol,directory,type = string.split(in_line) + checksum,symbol,directory,type,namespace = string.split(in_line, sep='\t') kabi[symbol] = in_line[0:-1] @@ -70,9 +70,9 @@ def check_kabi(symvers,kabi): moved_symbols=[] for symbol in kabi: - abi_hash,abi_sym,abi_dir,abi_type = string.split(kabi[symbol]) + abi_hash,abi_sym,abi_dir,abi_type,namespace = string.split(kabi[symbol], sep='\t') if symvers.has_key(symbol): - sym_hash,sym_sym,sym_dir,sym_type = string.split(symvers[symbol]) + sym_hash,sym_sym,sym_dir,sym_type,namespace = string.split(symvers[symbol], sep='\t') if abi_hash != sym_hash: fail=1 changed_symbols.append(symbol) diff --git a/scripts/dtc/Makefile b/scripts/dtc/Makefile index 4852bf44e913e25b4e69c4b066c068d2254a13d4..f1d201782346fa84f616b7b15824fc2273e98479 100644 --- a/scripts/dtc/Makefile +++ b/scripts/dtc/Makefile @@ -22,7 +22,7 @@ dtc-objs += yamltree.o # To include installed in a non-default path HOSTCFLAGS_yamltree.o := $(shell pkg-config --cflags yaml-0.1) # To link libyaml installed in a non-default path -HOSTLDLIBS_dtc := $(shell pkg-config yaml-0.1 --libs) +HOSTLDLIBS_dtc := $(shell pkg-config --libs yaml-0.1) endif # Generated files need one more search path to include headers in source tree diff --git a/scripts/dtc/dtx_diff b/scripts/dtc/dtx_diff index d3422ee15e300bc76af3b15adc682aacf2d4e1ee..f2bbde4bba86bc70b27bb5a992a262c1997db811 100755 --- a/scripts/dtc/dtx_diff +++ b/scripts/dtc/dtx_diff @@ -59,12 +59,8 @@ Otherwise DTx is treated as a dts source file (aka .dts). or '/include/' to be processed. If DTx_1 and DTx_2 are in different architectures, then this script - may not work since \${ARCH} is part of the include path. Two possible - workarounds: - - `basename $0` \\ - <(ARCH=arch_of_dtx_1 `basename $0` DTx_1) \\ - <(ARCH=arch_of_dtx_2 `basename $0` DTx_2) + may not work since \${ARCH} is part of the include path. The following + workaround can be used: `basename $0` ARCH=arch_of_dtx_1 DTx_1 >tmp_dtx_1.dts `basename $0` ARCH=arch_of_dtx_2 DTx_2 >tmp_dtx_2.dts diff --git a/scripts/gcc-plugins/latent_entropy_plugin.c b/scripts/gcc-plugins/latent_entropy_plugin.c index cbe1d6c4b1a51757a7cb7bced388bcdf57e308fa..c84bef1d2895510a8a15bdba6fb6aa7c04af86ad 100644 --- a/scripts/gcc-plugins/latent_entropy_plugin.c +++ b/scripts/gcc-plugins/latent_entropy_plugin.c @@ -86,25 +86,31 @@ static struct plugin_info latent_entropy_plugin_info = { .help = "disable\tturn off latent entropy instrumentation\n", }; -static unsigned HOST_WIDE_INT seed; -/* - * get_random_seed() (this is a GCC function) generates the seed. - * This is a simple random generator without any cryptographic security because - * the entropy doesn't come from here. - */ +static unsigned HOST_WIDE_INT deterministic_seed; +static unsigned HOST_WIDE_INT rnd_buf[32]; +static size_t rnd_idx = ARRAY_SIZE(rnd_buf); +static int urandom_fd = -1; + static unsigned HOST_WIDE_INT get_random_const(void) { - unsigned int i; - unsigned HOST_WIDE_INT ret = 0; - - for (i = 0; i < 8 * sizeof(ret); i++) { - ret = (ret << 1) | (seed & 1); - seed >>= 1; - if (ret & 1) - seed ^= 0xD800000000000000ULL; + if (deterministic_seed) { + unsigned HOST_WIDE_INT w = deterministic_seed; + w ^= w << 13; + w ^= w >> 7; + w ^= w << 17; + deterministic_seed = w; + return deterministic_seed; } - return ret; + if (urandom_fd < 0) { + urandom_fd = open("/dev/urandom", O_RDONLY); + gcc_assert(urandom_fd >= 0); + } + if (rnd_idx >= ARRAY_SIZE(rnd_buf)) { + gcc_assert(read(urandom_fd, rnd_buf, sizeof(rnd_buf)) == sizeof(rnd_buf)); + rnd_idx = 0; + } + return rnd_buf[rnd_idx++]; } static tree tree_get_random_const(tree type) @@ -549,8 +555,6 @@ static void latent_entropy_start_unit(void *gcc_data __unused, tree type, id; int quals; - seed = get_random_seed(false); - if (in_lto_p) return; @@ -585,6 +589,12 @@ __visible int plugin_init(struct plugin_name_args *plugin_info, const struct plugin_argument * const argv = plugin_info->argv; int i; + /* + * Call get_random_seed() with noinit=true, so that this returns + * 0 in the case where no seed has been passed via -frandom-seed. + */ + deterministic_seed = get_random_seed(true); + static const struct ggc_root_tab gt_ggc_r_gt_latent_entropy[] = { { .base = &latent_entropy_decl, diff --git a/scripts/gcc-plugins/stackleak_plugin.c b/scripts/gcc-plugins/stackleak_plugin.c index 48e141e07956261e74fdb5b2f56983c285614e36..dacd697ffd3830b5aa32eb93535f7341ace59859 100644 --- a/scripts/gcc-plugins/stackleak_plugin.c +++ b/scripts/gcc-plugins/stackleak_plugin.c @@ -431,6 +431,23 @@ static unsigned int stackleak_cleanup_execute(void) return 0; } +/* + * STRING_CST may or may not be NUL terminated: + * https://gcc.gnu.org/onlinedocs/gccint/Constant-expressions.html + */ +static inline bool string_equal(tree node, const char *string, int length) +{ + if (TREE_STRING_LENGTH(node) < length) + return false; + if (TREE_STRING_LENGTH(node) > length + 1) + return false; + if (TREE_STRING_LENGTH(node) == length + 1 && + TREE_STRING_POINTER(node)[length] != '\0') + return false; + return !memcmp(TREE_STRING_POINTER(node), string, length); +} +#define STRING_EQUAL(node, str) string_equal(node, str, strlen(str)) + static bool stackleak_gate(void) { tree section; @@ -440,13 +457,13 @@ static bool stackleak_gate(void) if (section && TREE_VALUE(section)) { section = TREE_VALUE(TREE_VALUE(section)); - if (!strncmp(TREE_STRING_POINTER(section), ".init.text", 10)) + if (STRING_EQUAL(section, ".init.text")) return false; - if (!strncmp(TREE_STRING_POINTER(section), ".devinit.text", 13)) + if (STRING_EQUAL(section, ".devinit.text")) return false; - if (!strncmp(TREE_STRING_POINTER(section), ".cpuinit.text", 13)) + if (STRING_EQUAL(section, ".cpuinit.text")) return false; - if (!strncmp(TREE_STRING_POINTER(section), ".meminit.text", 13)) + if (STRING_EQUAL(section, ".meminit.text")) return false; } diff --git a/scripts/kconfig/confdata.c b/scripts/kconfig/confdata.c index a39d93e3c6ae8d34f617b835d8ebcf5900cbad89..867b06c6d27976b6458dc37e80234ce6bf0dbb2c 100644 --- a/scripts/kconfig/confdata.c +++ b/scripts/kconfig/confdata.c @@ -968,14 +968,19 @@ static int conf_write_dep(const char *name) static int conf_touch_deps(void) { - const char *name; + const char *name, *tmp; struct symbol *sym; int res, i; - strcpy(depfile_path, "include/config/"); - depfile_prefix_len = strlen(depfile_path); - name = conf_get_autoconfig_name(); + tmp = strrchr(name, '/'); + depfile_prefix_len = tmp ? tmp - name + 1 : 0; + if (depfile_prefix_len + 1 > sizeof(depfile_path)) + return -1; + + strncpy(depfile_path, name, depfile_prefix_len); + depfile_path[depfile_prefix_len] = 0; + conf_read_simple(name, S_DEF_AUTO); sym_calc_value(modules_sym); diff --git a/scripts/kconfig/preprocess.c b/scripts/kconfig/preprocess.c index 0590f86df6e40cfd073100904f3ec5cfbe650f5f..748da578b418c4acb53f454e17e2370d2050ceb6 100644 --- a/scripts/kconfig/preprocess.c +++ b/scripts/kconfig/preprocess.c @@ -141,7 +141,7 @@ static char *do_lineno(int argc, char *argv[]) static char *do_shell(int argc, char *argv[]) { FILE *p; - char buf[256]; + char buf[4096]; char *cmd; size_t nread; int i; diff --git a/scripts/module.lds.S b/scripts/module.lds.S index 088a5a2c446d24058ec116ffe2273acb168ac6a0..c86ce15bca16a7e052fbe07d56cc5e1a8cf90a02 100644 --- a/scripts/module.lds.S +++ b/scripts/module.lds.S @@ -24,6 +24,32 @@ SECTIONS { .init_array 0 : ALIGN(8) { *(SORT(.init_array.*)) *(.init_array) } __jump_table 0 : ALIGN(8) { KEEP(*(__jump_table)) } + + __patchable_function_entries : { *(__patchable_function_entries) } + +#ifdef CONFIG_LTO_CLANG + /* + * With CONFIG_LTO_CLANG, LLD always enables -fdata-sections and + * -ffunction-sections, which increases the size of the final module. + * Merge the split sections in the final binary. + */ + .bss : { + *(.bss .bss.[0-9a-zA-Z_]*) + *(.bss..L*) + } + + .data : { + *(.data .data.[0-9a-zA-Z_]*) + *(.data..L*) + } + + .rodata : { + *(.rodata .rodata.[0-9a-zA-Z_]*) + *(.rodata..L*) + } + + .text : { *(.text .text.[0-9a-zA-Z_]*) } +#endif } /* bring in arch-specific sections */ diff --git a/scripts/sign-file.c b/scripts/sign-file.c index fbd34b8e8f578aba003533348c907899779fb62c..acc9e5f2eb04a3e9445beda94ecaf313361fe5ab 100644 --- a/scripts/sign-file.c +++ b/scripts/sign-file.c @@ -206,6 +206,28 @@ static X509 *read_x509(const char *x509_name) return x509; } +#if defined(EVP_PKEY_SM2) +static int pkey_is_sm2(EVP_PKEY *pkey) +{ + EC_KEY *eckey = NULL; + + const EC_GROUP *group = NULL; + + if (pkey == NULL || EVP_PKEY_id(pkey) != EVP_PKEY_EC) + return 0; + + eckey = EVP_PKEY_get0_EC_KEY(pkey); + if (eckey == NULL) + return 0; + + group = EC_KEY_get0_group(eckey); + if (group == NULL) + return 0; + + return EC_GROUP_get_curve_name(group) == NID_sm2; +} +#endif + int main(int argc, char **argv) { struct module_signature sig_info = { .id_type = PKEY_ID_PKCS7 }; @@ -220,6 +242,10 @@ int main(int argc, char **argv) unsigned int use_signed_attrs; const EVP_MD *digest_algo; EVP_PKEY *private_key; +#if defined(EVP_PKEY_SM2) + EVP_PKEY *public_key; +#endif + #ifndef USE_PKCS7 CMS_ContentInfo *cms = NULL; unsigned int use_keyid = 0; @@ -303,6 +329,16 @@ int main(int argc, char **argv) digest_algo = EVP_get_digestbyname(hash_algo); ERR(!digest_algo, "EVP_get_digestbyname"); +#if defined(EVP_PKEY_SM2) + if (pkey_is_sm2(private_key)) + EVP_PKEY_set_alias_type(private_key, EVP_PKEY_SM2); + + public_key = X509_get0_pubkey(x509); + ERR(!public_key, "X509_get0_pubkey"); + if (pkey_is_sm2(public_key)) + EVP_PKEY_set_alias_type(public_key, EVP_PKEY_SM2); +#endif + #ifndef USE_PKCS7 /* Load the signature message from the digest buffer. */ cms = CMS_sign(NULL, NULL, NULL, NULL, diff --git a/scripts/sphinx-pre-install b/scripts/sphinx-pre-install index 828a8615a9181ccdb4a9883df20ea58c9762984d..8fcea769d44f50dcf3a85054d33871cc2898c911 100755 --- a/scripts/sphinx-pre-install +++ b/scripts/sphinx-pre-install @@ -76,6 +76,7 @@ my %texlive = ( 'ucs.sty' => 'texlive-ucs', 'upquote.sty' => 'texlive-upquote', 'wrapfig.sty' => 'texlive-wrapfig', + 'ctexhook.sty' => 'texlive-ctex', ); # @@ -370,6 +371,9 @@ sub give_debian_hints() ); if ($pdf) { + check_missing_file(["/usr/share/texlive/texmf-dist/tex/latex/ctex/ctexhook.sty"], + "texlive-lang-chinese", 2); + check_missing_file(["/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf"], "fonts-dejavu", 2); diff --git a/security/integrity/evm/evm_main.c b/security/integrity/evm/evm_main.c index f3dd2641d29af1b3bb96bb22dc12109fa785f16d..cddfc0e43a808b918c7768bf23b19e2f914da72c 100644 --- a/security/integrity/evm/evm_main.c +++ b/security/integrity/evm/evm_main.c @@ -70,7 +70,7 @@ static int __init evm_set_param(char *str) else pr_err("invalid \"%s\" mode", str); - return 0; + return 1; } __setup("evm=", evm_set_param); diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c index ecdeab23f0227be272d946499d8e457f16274b1f..f1bc3e201bd83d76a91f6d872f854380fc38ec34 100644 --- a/security/integrity/ima/ima_fs.c +++ b/security/integrity/ima/ima_fs.c @@ -389,19 +389,21 @@ static ssize_t ima_write_data(struct file *file, const char __user *buf, goto out_free; data[datalen] = '\0'; - for (i = 0; data[i] != '\n' && data[i] != '\0'; i++) { - if (iscntrl(data[i])) { - pr_err_once("invalid path (control characters are not allowed)\n"); - result = -EINVAL; - goto out_free; - } - } result = mutex_lock_interruptible(&ima_write_mutex); if (result < 0) goto out_free; if (data[0] == '/') { + for (i = 0; data[i] != '\n' && data[i] != '\0'; i++) { + if (iscntrl(data[i])) { + pr_err_once("invalid path (control characters are not allowed)\n"); + result = -EINVAL; + mutex_unlock(&ima_write_mutex); + goto out_free; + } + } + result = ima_read_file(data, dentry); } else if (dentry == ima_policy) { if (ima_appraise & IMA_APPRAISE_POLICY) { @@ -635,12 +637,12 @@ int __init ima_fs_init(void) securityfs_remove(digest_list_data_del); securityfs_remove(digest_list_data); securityfs_remove(digests_count); + securityfs_remove(ima_policy); securityfs_remove(violations); securityfs_remove(runtime_measurements_count); securityfs_remove(ascii_runtime_measurements); securityfs_remove(binary_runtime_measurements); securityfs_remove(ima_symlink); securityfs_remove(ima_dir); - securityfs_remove(ima_policy); return -1; } diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c index 8e288da665034a4f5a1826a4851cc9f1c1dbe33a..5a06050729a74d4288143b07c50492ef61cc8ab5 100644 --- a/security/integrity/ima/ima_policy.c +++ b/security/integrity/ima/ima_policy.c @@ -1726,6 +1726,14 @@ int ima_policy_show(struct seq_file *m, void *v) rcu_read_lock(); + /* Do not print rules with inactive LSM labels */ + for (i = 0; i < MAX_LSM_RULES; i++) { + if (entry->lsm[i].args_p && !entry->lsm[i].rule) { + rcu_read_unlock(); + return 0; + } + } + if (entry->action & MEASURE) seq_puts(m, pt(Opt_measure)); if (entry->action & DONT_MEASURE) diff --git a/security/integrity/ima/ima_template.c b/security/integrity/ima/ima_template.c index 0d4c698d2707e0281469f60d288c2f4049c867cb..ab6f5c87cd0316dee6ceb1d67af3ddcaff9fae80 100644 --- a/security/integrity/ima/ima_template.c +++ b/security/integrity/ima/ima_template.c @@ -27,6 +27,7 @@ static struct ima_template_desc builtin_templates[] = { static LIST_HEAD(defined_templates); static DEFINE_SPINLOCK(template_list); +static int template_setup_done; static const struct ima_template_field supported_fields[] = { {.field_id = "d", .field_init = ima_eventdigest_init, @@ -82,10 +83,11 @@ static int __init ima_template_setup(char *str) struct ima_template_desc *template_desc; int template_len = strlen(str); - if (ima_template) + if (template_setup_done) return 1; - ima_init_template_list(); + if (!ima_template) + ima_init_template_list(); /* * Verify that a template with the supplied name exists. @@ -109,6 +111,7 @@ static int __init ima_template_setup(char *str) } ima_template = template_desc; + template_setup_done = 1; return 1; } __setup("ima_template=", ima_template_setup); @@ -117,7 +120,7 @@ static int __init ima_template_fmt_setup(char *str) { int num_templates = ARRAY_SIZE(builtin_templates); - if (ima_template) + if (template_setup_done) return 1; if (template_desc_init_fields(str, NULL, NULL) < 0) { @@ -128,6 +131,7 @@ static int __init ima_template_fmt_setup(char *str) builtin_templates[num_templates - 1].fmt = str; ima_template = builtin_templates + num_templates - 1; + template_setup_done = 1; return 1; } diff --git a/security/integrity/integrity_audit.c b/security/integrity/integrity_audit.c index 29220056207f4e51e83afda6965816d7032d5f6e..0ec5e4c22cb2a1066c2b897776ead6d3db72635c 100644 --- a/security/integrity/integrity_audit.c +++ b/security/integrity/integrity_audit.c @@ -45,6 +45,8 @@ void integrity_audit_message(int audit_msgno, struct inode *inode, return; ab = audit_log_start(audit_context(), GFP_KERNEL, audit_msgno); + if (!ab) + return; audit_log_format(ab, "pid=%d uid=%u auid=%u ses=%u", task_pid_nr(current), from_kuid(&init_user_ns, current_uid()), diff --git a/security/keys/keyctl_pkey.c b/security/keys/keyctl_pkey.c index 931d8dfb4a7f42172a934236d046d19de4130c86..63e5c646f76207959d8b3485a4733829be0c26dd 100644 --- a/security/keys/keyctl_pkey.c +++ b/security/keys/keyctl_pkey.c @@ -135,15 +135,23 @@ static int keyctl_pkey_params_get_2(const struct keyctl_pkey_params __user *_par switch (op) { case KEYCTL_PKEY_ENCRYPT: + if (uparams.in_len > info.max_dec_size || + uparams.out_len > info.max_enc_size) + return -EINVAL; + break; case KEYCTL_PKEY_DECRYPT: if (uparams.in_len > info.max_enc_size || uparams.out_len > info.max_dec_size) return -EINVAL; break; case KEYCTL_PKEY_SIGN: + if (uparams.in_len > info.max_data_size || + uparams.out_len > info.max_sig_size) + return -EINVAL; + break; case KEYCTL_PKEY_VERIFY: - if (uparams.in_len > info.max_sig_size || - uparams.out_len > info.max_data_size) + if (uparams.in_len > info.max_data_size || + uparams.in2_len > info.max_sig_size) return -EINVAL; break; default: @@ -151,7 +159,7 @@ static int keyctl_pkey_params_get_2(const struct keyctl_pkey_params __user *_par } params->in_len = uparams.in_len; - params->out_len = uparams.out_len; + params->out_len = uparams.out_len; /* Note: same as in2_len */ return 0; } diff --git a/security/security.c b/security/security.c index 4fb58543eeb9ba1e38795a7941cace0098b62b02..926e035f9978cb018771f6efc6601645a4f3b568 100644 --- a/security/security.c +++ b/security/security.c @@ -59,10 +59,12 @@ const char *const lockdown_reasons[LOCKDOWN_CONFIDENTIALITY_MAX+1] = { [LOCKDOWN_DEBUGFS] = "debugfs access", [LOCKDOWN_XMON_WR] = "xmon write access", [LOCKDOWN_BPF_WRITE_USER] = "use of bpf to write user RAM", + [LOCKDOWN_DBG_WRITE_KERNEL] = "use of kgdb/kdb to write kernel RAM", [LOCKDOWN_INTEGRITY_MAX] = "integrity", [LOCKDOWN_KCORE] = "/proc/kcore access", [LOCKDOWN_KPROBES] = "use of kprobes", [LOCKDOWN_BPF_READ] = "use of bpf to read kernel RAM", + [LOCKDOWN_DBG_READ_KERNEL] = "use of kgdb/kdb to read kernel RAM", [LOCKDOWN_PERF] = "unsafe use of perf", [LOCKDOWN_TRACEFS] = "use of tracefs", [LOCKDOWN_XMON_RW] = "xmon read and write access", @@ -860,9 +862,22 @@ int security_fs_context_dup(struct fs_context *fc, struct fs_context *src_fc) return call_int_hook(fs_context_dup, 0, fc, src_fc); } -int security_fs_context_parse_param(struct fs_context *fc, struct fs_parameter *param) +int security_fs_context_parse_param(struct fs_context *fc, + struct fs_parameter *param) { - return call_int_hook(fs_context_parse_param, -ENOPARAM, fc, param); + struct security_hook_list *hp; + int trc; + int rc = -ENOPARAM; + + hlist_for_each_entry(hp, &security_hook_heads.fs_context_parse_param, + list) { + trc = hp->hook.fs_context_parse_param(fc, param); + if (trc == 0) + rc = 0; + else if (trc != -ENOPARAM) + return trc; + } + return rc; } int security_sb_alloc(struct super_block *sb) diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index ff2191ae53528d80a0890aa9f03989663559feaa..8c901ae05dd845b7df58379459a697a32eb065b8 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c @@ -947,18 +947,22 @@ static int selinux_sb_clone_mnt_opts(const struct super_block *oldsb, static int selinux_add_opt(int token, const char *s, void **mnt_opts) { struct selinux_mnt_opts *opts = *mnt_opts; + bool is_alloc_opts = false; if (token == Opt_seclabel) /* eaten and completely ignored */ return 0; + if (!s) + return -ENOMEM; + if (!opts) { opts = kzalloc(sizeof(struct selinux_mnt_opts), GFP_KERNEL); if (!opts) return -ENOMEM; *mnt_opts = opts; + is_alloc_opts = true; } - if (!s) - return -ENOMEM; + switch (token) { case Opt_context: if (opts->context || opts->defcontext) @@ -983,6 +987,10 @@ static int selinux_add_opt(int token, const char *s, void **mnt_opts) } return 0; Einval: + if (is_alloc_opts) { + kfree(opts); + *mnt_opts = NULL; + } pr_warn(SEL_MOUNT_FAIL_MSG); return -EINVAL; } @@ -2812,10 +2820,9 @@ static int selinux_fs_context_parse_param(struct fs_context *fc, return opt; rc = selinux_add_opt(opt, param->string, &fc->security); - if (!rc) { + if (!rc) param->string = NULL; - rc = 1; - } + return rc; } @@ -3640,6 +3647,12 @@ static int selinux_file_ioctl(struct file *file, unsigned int cmd, CAP_OPT_NONE, true); break; + case FIOCLEX: + case FIONCLEX: + if (!selinux_policycap_ioctl_skip_cloexec()) + error = ioctl_has_perm(cred, file, FILE__IOCTL, (u16) cmd); + break; + /* default case assumes that the command will go * to the file's ioctl() function. */ diff --git a/security/selinux/include/policycap.h b/security/selinux/include/policycap.h index 2ec038efbb03cc53d4e2c5780641f213dadd9894..a9e572ca4fd96d00ea26c917c25f6c91d552475f 100644 --- a/security/selinux/include/policycap.h +++ b/security/selinux/include/policycap.h @@ -11,6 +11,7 @@ enum { POLICYDB_CAPABILITY_CGROUPSECLABEL, POLICYDB_CAPABILITY_NNP_NOSUID_TRANSITION, POLICYDB_CAPABILITY_GENFS_SECLABEL_SYMLINKS, + POLICYDB_CAPABILITY_IOCTL_SKIP_CLOEXEC, __POLICYDB_CAPABILITY_MAX }; #define POLICYDB_CAPABILITY_MAX (__POLICYDB_CAPABILITY_MAX - 1) diff --git a/security/selinux/include/policycap_names.h b/security/selinux/include/policycap_names.h index b89289f092c938ede5c28b293cfb3f51e7d3979c..ebd64afe1defd13dfb20fd9f9fd05c54f61f218f 100644 --- a/security/selinux/include/policycap_names.h +++ b/security/selinux/include/policycap_names.h @@ -12,7 +12,8 @@ const char *selinux_policycap_names[__POLICYDB_CAPABILITY_MAX] = { "always_check_network", "cgroup_seclabel", "nnp_nosuid_transition", - "genfs_seclabel_symlinks" + "genfs_seclabel_symlinks", + "ioctl_skip_cloexec" }; #endif /* _SELINUX_POLICYCAP_NAMES_H_ */ diff --git a/security/selinux/include/security.h b/security/selinux/include/security.h index 63ca6e79daeb996723177b2cf842146fecd49594..1521460a97d4ee9359fa65ae1215e3994deec189 100644 --- a/security/selinux/include/security.h +++ b/security/selinux/include/security.h @@ -219,6 +219,13 @@ static inline bool selinux_policycap_genfs_seclabel_symlinks(void) return READ_ONCE(state->policycap[POLICYDB_CAPABILITY_GENFS_SECLABEL_SYMLINKS]); } +static inline bool selinux_policycap_ioctl_skip_cloexec(void) +{ + struct selinux_state *state = &selinux_state; + + return READ_ONCE(state->policycap[POLICYDB_CAPABILITY_IOCTL_SKIP_CLOEXEC]); +} + struct selinux_policy_convert_data; struct selinux_load_state { diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c index 2b745ae8cb9814a5862534c93f9da482072d1136..d893c2280f595e084cab2089593cd1410eb8b167 100644 --- a/security/selinux/selinuxfs.c +++ b/security/selinux/selinuxfs.c @@ -2124,6 +2124,8 @@ static int sel_fill_super(struct super_block *sb, struct fs_context *fc) } ret = sel_make_avc_files(dentry); + if (ret) + goto err; dentry = sel_make_dir(sb->s_root, "ss", &fsi->last_ino); if (IS_ERR(dentry)) { diff --git a/security/selinux/ss/conditional.c b/security/selinux/ss/conditional.c index 1ef74c085f2b0f288ca689097e38ce03be6090c7..865611127357ee83b8016b65f26dd36791a18727 100644 --- a/security/selinux/ss/conditional.c +++ b/security/selinux/ss/conditional.c @@ -152,6 +152,8 @@ static void cond_list_destroy(struct policydb *p) for (i = 0; i < p->cond_list_len; i++) cond_node_destroy(&p->cond_list[i]); kfree(p->cond_list); + p->cond_list = NULL; + p->cond_list_len = 0; } void cond_policydb_destroy(struct policydb *p) @@ -440,7 +442,6 @@ int cond_read_list(struct policydb *p, void *fp) return 0; err: cond_list_destroy(p); - p->cond_list = NULL; return rc; } diff --git a/security/selinux/xfrm.c b/security/selinux/xfrm.c index 7314196185d15f4f357cc2553d860a69af6fe80e..00e95f8bd7c73ff05a2105e902b547540754f8cb 100644 --- a/security/selinux/xfrm.c +++ b/security/selinux/xfrm.c @@ -346,7 +346,7 @@ int selinux_xfrm_state_alloc_acquire(struct xfrm_state *x, int rc; struct xfrm_sec_ctx *ctx; char *ctx_str = NULL; - int str_len; + u32 str_len; if (!polsec) return 0; diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c index 5c90b9fa4d405c69e9876e6a5b35e8d0cd9c477c..b36b8668f1f4a3f9e80a142f153892f500f87dd4 100644 --- a/security/smack/smack_lsm.c +++ b/security/smack/smack_lsm.c @@ -2506,7 +2506,7 @@ static int smk_ipv6_check(struct smack_known *subject, #ifdef CONFIG_AUDIT smk_ad_init_net(&ad, __func__, LSM_AUDIT_DATA_NET, &net); ad.a.u.net->family = PF_INET6; - ad.a.u.net->dport = ntohs(address->sin6_port); + ad.a.u.net->dport = address->sin6_port; if (act == SMK_RECEIVING) ad.a.u.net->v6info.saddr = address->sin6_addr; else diff --git a/security/tomoyo/load_policy.c b/security/tomoyo/load_policy.c index 3445ae6fd4794eaf430384ec63622004bc93f8fb..363b65be87ab768ed4993df1e5a361892958b3ab 100644 --- a/security/tomoyo/load_policy.c +++ b/security/tomoyo/load_policy.c @@ -24,7 +24,7 @@ static const char *tomoyo_loader; static int __init tomoyo_loader_setup(char *str) { tomoyo_loader = str; - return 0; + return 1; } __setup("TOMOYO_loader=", tomoyo_loader_setup); @@ -64,7 +64,7 @@ static const char *tomoyo_trigger; static int __init tomoyo_trigger_setup(char *str) { tomoyo_trigger = str; - return 0; + return 1; } __setup("TOMOYO_trigger=", tomoyo_trigger_setup); diff --git a/sound/arm/aaci.c b/sound/arm/aaci.c index a0996c47e58fee5b72da40163162ab895770758a..b326a5f5f0d53096a7109fa837c9fac32ed68373 100644 --- a/sound/arm/aaci.c +++ b/sound/arm/aaci.c @@ -1055,7 +1055,7 @@ static int aaci_probe(struct amba_device *dev, return ret; } -static int aaci_remove(struct amba_device *dev) +static void aaci_remove(struct amba_device *dev) { struct snd_card *card = amba_get_drvdata(dev); @@ -1066,8 +1066,6 @@ static int aaci_remove(struct amba_device *dev) snd_card_free(card); amba_release_regions(dev); } - - return 0; } static struct amba_id aaci_ids[] = { diff --git a/sound/core/jack.c b/sound/core/jack.c index d6502dff247a817063dd1945d976c1b47e14a6f5..dc2e06ae2414960beec0a04a1c0042e13407440f 100644 --- a/sound/core/jack.c +++ b/sound/core/jack.c @@ -54,10 +54,13 @@ static int snd_jack_dev_free(struct snd_device *device) struct snd_card *card = device->card; struct snd_jack_kctl *jack_kctl, *tmp_jack_kctl; + down_write(&card->controls_rwsem); list_for_each_entry_safe(jack_kctl, tmp_jack_kctl, &jack->kctl_list, list) { list_del_init(&jack_kctl->list); snd_ctl_remove(card, jack_kctl->kctl); } + up_write(&card->controls_rwsem); + if (jack->private_free) jack->private_free(jack); diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c index 77727a69c3c4ea1f0bf97ef068f5bde2d746a59b..f88de74da1eb36d889886b9637264719b21bb182 100644 --- a/sound/core/oss/pcm_oss.c +++ b/sound/core/oss/pcm_oss.c @@ -774,6 +774,11 @@ static int snd_pcm_oss_period_size(struct snd_pcm_substream *substream, if (oss_period_size < 16) return -EINVAL; + + /* don't allocate too large period; 1MB period must be enough */ + if (oss_period_size > 1024 * 1024) + return -ENOMEM; + runtime->oss.period_bytes = oss_period_size; runtime->oss.period_frames = 1; runtime->oss.periods = oss_periods; @@ -1042,10 +1047,9 @@ static int snd_pcm_oss_change_params_locked(struct snd_pcm_substream *substream) goto failure; } #endif - oss_period_size *= oss_frame_size; - - oss_buffer_size = oss_period_size * runtime->oss.periods; - if (oss_buffer_size < 0) { + oss_period_size = array_size(oss_period_size, oss_frame_size); + oss_buffer_size = array_size(oss_period_size, runtime->oss.periods); + if (oss_buffer_size <= 0) { err = -EINVAL; goto failure; } @@ -2056,7 +2060,7 @@ static int snd_pcm_oss_set_trigger(struct snd_pcm_oss_file *pcm_oss_file, int tr int err, cmd; #ifdef OSS_DEBUG - pcm_dbg(substream->pcm, "pcm_oss: trigger = 0x%x\n", trigger); + pr_debug("pcm_oss: trigger = 0x%x\n", trigger); #endif psubstream = pcm_oss_file->streams[SNDRV_PCM_STREAM_PLAYBACK]; diff --git a/sound/core/oss/pcm_plugin.c b/sound/core/oss/pcm_plugin.c index d5ca161d588c504fa79257a300a958313b98c74d..1e2d1b35c1946cdc768d4b453833c5b4e0b90778 100644 --- a/sound/core/oss/pcm_plugin.c +++ b/sound/core/oss/pcm_plugin.c @@ -61,7 +61,10 @@ static int snd_pcm_plugin_alloc(struct snd_pcm_plugin *plugin, snd_pcm_uframes_t } if ((width = snd_pcm_format_physical_width(format->format)) < 0) return width; - size = frames * format->channels * width; + size = array3_size(frames, format->channels, width); + /* check for too large period size once again */ + if (size > 1024 * 1024) + return -ENOMEM; if (snd_BUG_ON(size % 8)) return -ENXIO; size /= 8; diff --git a/sound/core/pcm.c b/sound/core/pcm.c index 41cbdac5b1cfaa6e93fc0d992727c04dc070f1c4..59d222446d77738b25af374aa15dab527767a2e2 100644 --- a/sound/core/pcm.c +++ b/sound/core/pcm.c @@ -810,7 +810,11 @@ EXPORT_SYMBOL(snd_pcm_new_internal); static void free_chmap(struct snd_pcm_str *pstr) { if (pstr->chmap_kctl) { - snd_ctl_remove(pstr->pcm->card, pstr->chmap_kctl); + struct snd_card *card = pstr->pcm->card; + + down_write(&card->controls_rwsem); + snd_ctl_remove(card, pstr->chmap_kctl); + up_write(&card->controls_rwsem); pstr->chmap_kctl = NULL; } } @@ -965,6 +969,8 @@ int snd_pcm_attach_substream(struct snd_pcm *pcm, int stream, init_waitqueue_head(&runtime->tsleep); runtime->status->state = SNDRV_PCM_STATE_OPEN; + mutex_init(&runtime->buffer_mutex); + atomic_set(&runtime->buffer_accessing, 0); substream->runtime = runtime; substream->private_data = pcm->private_data; @@ -998,6 +1004,7 @@ void snd_pcm_detach_substream(struct snd_pcm_substream *substream) } else { substream->runtime = NULL; } + mutex_destroy(&runtime->buffer_mutex); kfree(runtime); put_pid(substream->pid); substream->pid = NULL; diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c index 5e04c4b9e02399cd1cd697dbadd1a23ceb8a7da6..289f52af15b96b16a94b3572fb73eee720fafc8b 100644 --- a/sound/core/pcm_lib.c +++ b/sound/core/pcm_lib.c @@ -2221,10 +2221,15 @@ snd_pcm_sframes_t __snd_pcm_lib_xfer(struct snd_pcm_substream *substream, err = -EINVAL; goto _end_unlock; } + if (!atomic_inc_unless_negative(&runtime->buffer_accessing)) { + err = -EBUSY; + goto _end_unlock; + } snd_pcm_stream_unlock_irq(substream); err = writer(substream, appl_ofs, data, offset, frames, transfer); snd_pcm_stream_lock_irq(substream); + atomic_dec(&runtime->buffer_accessing); if (err < 0) goto _end_unlock; err = pcm_accessible_state(runtime); diff --git a/sound/core/pcm_memory.c b/sound/core/pcm_memory.c index 4f03ba8ed0ae597ca01547c811f3e6058e8efb87..a9a0d74f31656c976c1102aa3978179b7b6e6c66 100644 --- a/sound/core/pcm_memory.c +++ b/sound/core/pcm_memory.c @@ -164,19 +164,20 @@ static void snd_pcm_lib_preallocate_proc_write(struct snd_info_entry *entry, size_t size; struct snd_dma_buffer new_dmab; + mutex_lock(&substream->pcm->open_mutex); if (substream->runtime) { buffer->error = -EBUSY; - return; + goto unlock; } if (!snd_info_get_line(buffer, line, sizeof(line))) { snd_info_get_str(str, line, sizeof(str)); size = simple_strtoul(str, NULL, 10) * 1024; if ((size != 0 && size < 8192) || size > substream->dma_max) { buffer->error = -EINVAL; - return; + goto unlock; } if (substream->dma_buffer.bytes == size) - return; + goto unlock; memset(&new_dmab, 0, sizeof(new_dmab)); new_dmab.dev = substream->dma_buffer.dev; if (size > 0) { @@ -185,7 +186,7 @@ static void snd_pcm_lib_preallocate_proc_write(struct snd_info_entry *entry, substream->dma_buffer.dev.dev, size, &new_dmab) < 0) { buffer->error = -ENOMEM; - return; + goto unlock; } substream->buffer_bytes_max = size; } else { @@ -197,6 +198,8 @@ static void snd_pcm_lib_preallocate_proc_write(struct snd_info_entry *entry, } else { buffer->error = -EINVAL; } + unlock: + mutex_unlock(&substream->pcm->open_mutex); } static inline void preallocate_info_init(struct snd_pcm_substream *substream) diff --git a/sound/core/pcm_misc.c b/sound/core/pcm_misc.c index 257d412eac5ddb618074e662ad2b0346e19c12e1..30f0f96e0000440d23d0e2bf651fba4b45f59d84 100644 --- a/sound/core/pcm_misc.c +++ b/sound/core/pcm_misc.c @@ -429,7 +429,7 @@ int snd_pcm_format_set_silence(snd_pcm_format_t format, void *data, unsigned int return 0; width = pcm_formats[(INT)format].phys; /* physical width */ pat = pcm_formats[(INT)format].silence; - if (! width) + if (!width || !pat) return -EINVAL; /* signed or 1 byte data */ if (pcm_formats[(INT)format].signd == 1 || width <= 8) { diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c index c5ef5182fcf1907676bca812d544a3624ffb1fa0..6cc7c2a9fe732de3c32392428d05e95fff7f9b51 100644 --- a/sound/core/pcm_native.c +++ b/sound/core/pcm_native.c @@ -667,6 +667,30 @@ static int snd_pcm_hw_params_choose(struct snd_pcm_substream *pcm, return 0; } +/* acquire buffer_mutex; if it's in r/w operation, return -EBUSY, otherwise + * block the further r/w operations + */ +static int snd_pcm_buffer_access_lock(struct snd_pcm_runtime *runtime) +{ + if (!atomic_dec_unless_positive(&runtime->buffer_accessing)) + return -EBUSY; + mutex_lock(&runtime->buffer_mutex); + return 0; /* keep buffer_mutex, unlocked by below */ +} + +/* release buffer_mutex and clear r/w access flag */ +static void snd_pcm_buffer_access_unlock(struct snd_pcm_runtime *runtime) +{ + mutex_unlock(&runtime->buffer_mutex); + atomic_inc(&runtime->buffer_accessing); +} + +#if IS_ENABLED(CONFIG_SND_PCM_OSS) +#define is_oss_stream(substream) ((substream)->oss.oss) +#else +#define is_oss_stream(substream) false +#endif + static int snd_pcm_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { @@ -678,22 +702,25 @@ static int snd_pcm_hw_params(struct snd_pcm_substream *substream, if (PCM_RUNTIME_CHECK(substream)) return -ENXIO; runtime = substream->runtime; + err = snd_pcm_buffer_access_lock(runtime); + if (err < 0) + return err; snd_pcm_stream_lock_irq(substream); switch (runtime->status->state) { case SNDRV_PCM_STATE_OPEN: case SNDRV_PCM_STATE_SETUP: case SNDRV_PCM_STATE_PREPARED: + if (!is_oss_stream(substream) && + atomic_read(&substream->mmap_count)) + err = -EBADFD; break; default: - snd_pcm_stream_unlock_irq(substream); - return -EBADFD; + err = -EBADFD; + break; } snd_pcm_stream_unlock_irq(substream); -#if IS_ENABLED(CONFIG_SND_PCM_OSS) - if (!substream->oss.oss) -#endif - if (atomic_read(&substream->mmap_count)) - return -EBADFD; + if (err) + goto unlock; snd_pcm_sync_stop(substream, true); @@ -780,16 +807,21 @@ static int snd_pcm_hw_params(struct snd_pcm_substream *substream, if ((usecs = period_to_usecs(runtime)) >= 0) cpu_latency_qos_add_request(&substream->latency_pm_qos_req, usecs); - return 0; + err = 0; _error: - /* hardware might be unusable from this time, - so we force application to retry to set - the correct hardware parameter settings */ - snd_pcm_set_state(substream, SNDRV_PCM_STATE_OPEN); - if (substream->ops->hw_free != NULL) - substream->ops->hw_free(substream); - if (substream->managed_buffer_alloc) - snd_pcm_lib_free_pages(substream); + if (err) { + /* hardware might be unusable from this time, + * so we force application to retry to set + * the correct hardware parameter settings + */ + snd_pcm_set_state(substream, SNDRV_PCM_STATE_OPEN); + if (substream->ops->hw_free != NULL) + substream->ops->hw_free(substream); + if (substream->managed_buffer_alloc) + snd_pcm_lib_free_pages(substream); + } + unlock: + snd_pcm_buffer_access_unlock(runtime); return err; } @@ -829,26 +861,33 @@ static int do_hw_free(struct snd_pcm_substream *substream) static int snd_pcm_hw_free(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime; - int result; + int result = 0; if (PCM_RUNTIME_CHECK(substream)) return -ENXIO; runtime = substream->runtime; + result = snd_pcm_buffer_access_lock(runtime); + if (result < 0) + return result; snd_pcm_stream_lock_irq(substream); switch (runtime->status->state) { case SNDRV_PCM_STATE_SETUP: case SNDRV_PCM_STATE_PREPARED: + if (atomic_read(&substream->mmap_count)) + result = -EBADFD; break; default: - snd_pcm_stream_unlock_irq(substream); - return -EBADFD; + result = -EBADFD; + break; } snd_pcm_stream_unlock_irq(substream); - if (atomic_read(&substream->mmap_count)) - return -EBADFD; + if (result) + goto unlock; result = do_hw_free(substream); snd_pcm_set_state(substream, SNDRV_PCM_STATE_OPEN); cpu_latency_qos_remove_request(&substream->latency_pm_qos_req); + unlock: + snd_pcm_buffer_access_unlock(runtime); return result; } @@ -1154,15 +1193,17 @@ struct action_ops { static int snd_pcm_action_group(const struct action_ops *ops, struct snd_pcm_substream *substream, snd_pcm_state_t state, - bool do_lock) + bool stream_lock) { struct snd_pcm_substream *s = NULL; struct snd_pcm_substream *s1; int res = 0, depth = 1; snd_pcm_group_for_each_entry(s, substream) { - if (do_lock && s != substream) { - if (s->pcm->nonatomic) + if (s != substream) { + if (!stream_lock) + mutex_lock_nested(&s->runtime->buffer_mutex, depth); + else if (s->pcm->nonatomic) mutex_lock_nested(&s->self_group.mutex, depth); else spin_lock_nested(&s->self_group.lock, depth); @@ -1190,18 +1231,18 @@ static int snd_pcm_action_group(const struct action_ops *ops, ops->post_action(s, state); } _unlock: - if (do_lock) { - /* unlock streams */ - snd_pcm_group_for_each_entry(s1, substream) { - if (s1 != substream) { - if (s1->pcm->nonatomic) - mutex_unlock(&s1->self_group.mutex); - else - spin_unlock(&s1->self_group.lock); - } - if (s1 == s) /* end */ - break; + /* unlock streams */ + snd_pcm_group_for_each_entry(s1, substream) { + if (s1 != substream) { + if (!stream_lock) + mutex_unlock(&s1->runtime->buffer_mutex); + else if (s1->pcm->nonatomic) + mutex_unlock(&s1->self_group.mutex); + else + spin_unlock(&s1->self_group.lock); } + if (s1 == s) /* end */ + break; } return res; } @@ -1331,10 +1372,15 @@ static int snd_pcm_action_nonatomic(const struct action_ops *ops, /* Guarantee the group members won't change during non-atomic action */ down_read(&snd_pcm_link_rwsem); + res = snd_pcm_buffer_access_lock(substream->runtime); + if (res < 0) + goto unlock; if (snd_pcm_stream_linked(substream)) res = snd_pcm_action_group(ops, substream, state, false); else res = snd_pcm_action_single(ops, substream, state); + snd_pcm_buffer_access_unlock(substream->runtime); + unlock: up_read(&snd_pcm_link_rwsem); return res; } @@ -1829,11 +1875,13 @@ static int snd_pcm_do_reset(struct snd_pcm_substream *substream, int err = snd_pcm_ops_ioctl(substream, SNDRV_PCM_IOCTL1_RESET, NULL); if (err < 0) return err; + snd_pcm_stream_lock_irq(substream); runtime->hw_ptr_base = 0; runtime->hw_ptr_interrupt = runtime->status->hw_ptr - runtime->status->hw_ptr % runtime->period_size; runtime->silence_start = runtime->status->hw_ptr; runtime->silence_filled = 0; + snd_pcm_stream_unlock_irq(substream); return 0; } @@ -1841,10 +1889,12 @@ static void snd_pcm_post_reset(struct snd_pcm_substream *substream, snd_pcm_state_t state) { struct snd_pcm_runtime *runtime = substream->runtime; + snd_pcm_stream_lock_irq(substream); runtime->control->appl_ptr = runtime->status->hw_ptr; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK && runtime->silence_size > 0) snd_pcm_playback_silence(substream, ULONG_MAX); + snd_pcm_stream_unlock_irq(substream); } static const struct action_ops snd_pcm_action_reset = { diff --git a/sound/core/seq/seq_queue.c b/sound/core/seq/seq_queue.c index 71a6ea62c3be7c8a0a1885da0b32b2b527ef8644..4ff0b927230c2f5ee4efe89046b61e68d0880eb9 100644 --- a/sound/core/seq/seq_queue.c +++ b/sound/core/seq/seq_queue.c @@ -234,12 +234,15 @@ struct snd_seq_queue *snd_seq_queue_find_name(char *name) /* -------------------------------------------------------- */ +#define MAX_CELL_PROCESSES_IN_QUEUE 1000 + void snd_seq_check_queue(struct snd_seq_queue *q, int atomic, int hop) { unsigned long flags; struct snd_seq_event_cell *cell; snd_seq_tick_time_t cur_tick; snd_seq_real_time_t cur_time; + int processed = 0; if (q == NULL) return; @@ -262,6 +265,8 @@ void snd_seq_check_queue(struct snd_seq_queue *q, int atomic, int hop) if (!cell) break; snd_seq_dispatch_event(cell, atomic, hop); + if (++processed >= MAX_CELL_PROCESSES_IN_QUEUE) + goto out; /* the rest processed at the next batch */ } /* Process time queue... */ @@ -271,14 +276,19 @@ void snd_seq_check_queue(struct snd_seq_queue *q, int atomic, int hop) if (!cell) break; snd_seq_dispatch_event(cell, atomic, hop); + if (++processed >= MAX_CELL_PROCESSES_IN_QUEUE) + goto out; /* the rest processed at the next batch */ } + out: /* free lock */ spin_lock_irqsave(&q->check_lock, flags); if (q->check_again) { q->check_again = 0; - spin_unlock_irqrestore(&q->check_lock, flags); - goto __again; + if (processed < MAX_CELL_PROCESSES_IN_QUEUE) { + spin_unlock_irqrestore(&q->check_lock, flags); + goto __again; + } } q->check_blocked = 0; spin_unlock_irqrestore(&q->check_lock, flags); diff --git a/sound/firewire/fcp.c b/sound/firewire/fcp.c index bbfbebf4affbc20964e52c5553381facbd7f4118..df44dd5dc4b229785e3dac955105e4da27dbcccf 100644 --- a/sound/firewire/fcp.c +++ b/sound/firewire/fcp.c @@ -240,9 +240,7 @@ int fcp_avc_transaction(struct fw_unit *unit, t.response_match_bytes = response_match_bytes; t.state = STATE_PENDING; init_waitqueue_head(&t.wait); - - if (*(const u8 *)command == 0x00 || *(const u8 *)command == 0x03) - t.deferrable = true; + t.deferrable = (*(const u8 *)command == 0x00 || *(const u8 *)command == 0x03); spin_lock_irq(&transactions_lock); list_add_tail(&t.list, &transactions); diff --git a/sound/isa/cs423x/cs4236.c b/sound/isa/cs423x/cs4236.c index fa3c39cff5f854a88b602feb832fa174624f3a84..9ee3a312c6793248e7752bf8b3691b4b43fd5dce 100644 --- a/sound/isa/cs423x/cs4236.c +++ b/sound/isa/cs423x/cs4236.c @@ -544,7 +544,7 @@ static int snd_cs423x_pnpbios_detect(struct pnp_dev *pdev, static int dev; int err; struct snd_card *card; - struct pnp_dev *cdev; + struct pnp_dev *cdev, *iter; char cid[PNP_ID_LEN]; if (pnp_device_is_isapnp(pdev)) @@ -560,9 +560,11 @@ static int snd_cs423x_pnpbios_detect(struct pnp_dev *pdev, strcpy(cid, pdev->id[0].id); cid[5] = '1'; cdev = NULL; - list_for_each_entry(cdev, &(pdev->protocol->devices), protocol_list) { - if (!strcmp(cdev->id[0].id, cid)) + list_for_each_entry(iter, &(pdev->protocol->devices), protocol_list) { + if (!strcmp(iter->id[0].id, cid)) { + cdev = iter; break; + } } err = snd_cs423x_card_new(&pdev->dev, dev, &card); if (err < 0) diff --git a/sound/pci/ac97/ac97_codec.c b/sound/pci/ac97/ac97_codec.c index 012a7ee849e8aad424e82752c73250b5d0a0d845..963731cf0d8c880f6c5ba5c9235dad01ca040ac5 100644 --- a/sound/pci/ac97/ac97_codec.c +++ b/sound/pci/ac97/ac97_codec.c @@ -938,8 +938,8 @@ static int snd_ac97_ad18xx_pcm_get_volume(struct snd_kcontrol *kcontrol, struct int codec = kcontrol->private_value & 3; mutex_lock(&ac97->page_mutex); - ucontrol->value.integer.value[0] = 31 - ((ac97->spec.ad18xx.pcmreg[codec] >> 0) & 31); - ucontrol->value.integer.value[1] = 31 - ((ac97->spec.ad18xx.pcmreg[codec] >> 8) & 31); + ucontrol->value.integer.value[0] = 31 - ((ac97->spec.ad18xx.pcmreg[codec] >> 8) & 31); + ucontrol->value.integer.value[1] = 31 - ((ac97->spec.ad18xx.pcmreg[codec] >> 0) & 31); mutex_unlock(&ac97->page_mutex); return 0; } diff --git a/sound/pci/cmipci.c b/sound/pci/cmipci.c index 7363d61eaec23fc6754d5d39dbf8717dfc9d1d89..120dd8b33ac81ca825dc11748e3202f12b47adbb 100644 --- a/sound/pci/cmipci.c +++ b/sound/pci/cmipci.c @@ -302,7 +302,6 @@ MODULE_PARM_DESC(joystick_port, "Joystick port address."); #define CM_MICGAINZ 0x01 /* mic boost */ #define CM_MICGAINZ_SHIFT 0 -#define CM_REG_MIXER3 0x24 #define CM_REG_AUX_VOL 0x26 #define CM_VAUXL_MASK 0xf0 #define CM_VAUXR_MASK 0x0f @@ -3291,7 +3290,7 @@ static void snd_cmipci_remove(struct pci_dev *pci) */ static const unsigned char saved_regs[] = { CM_REG_FUNCTRL1, CM_REG_CHFORMAT, CM_REG_LEGACY_CTRL, CM_REG_MISC_CTRL, - CM_REG_MIXER0, CM_REG_MIXER1, CM_REG_MIXER2, CM_REG_MIXER3, CM_REG_PLL, + CM_REG_MIXER0, CM_REG_MIXER1, CM_REG_MIXER2, CM_REG_AUX_VOL, CM_REG_PLL, CM_REG_CH0_FRAME1, CM_REG_CH0_FRAME2, CM_REG_CH1_FRAME1, CM_REG_CH1_FRAME2, CM_REG_EXT_MISC, CM_REG_INT_STATUS, CM_REG_INT_HLDCLR, CM_REG_FUNCTRL0, diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c index 6dece719be669c5f1034d684d08e04d41ddbbd5b..39281106477ebf2df18b9ecd273c272076bc3783 100644 --- a/sound/pci/hda/hda_codec.c +++ b/sound/pci/hda/hda_codec.c @@ -1727,8 +1727,11 @@ void snd_hda_ctls_clear(struct hda_codec *codec) { int i; struct hda_nid_item *items = codec->mixers.list; + + down_write(&codec->card->controls_rwsem); for (i = 0; i < codec->mixers.used; i++) snd_ctl_remove(codec->card, items[i].kctl); + up_write(&codec->card->controls_rwsem); snd_array_free(&codec->mixers); snd_array_free(&codec->nids); } diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c index 323df011b94a3de5188ea3b1f1d4a3975b3657d9..8ee3be7bbd24e7676231eed11ede07ee4bdebd23 100644 --- a/sound/pci/hda/hda_generic.c +++ b/sound/pci/hda/hda_generic.c @@ -91,6 +91,12 @@ static void snd_hda_gen_spec_free(struct hda_gen_spec *spec) free_kctls(spec); snd_array_free(&spec->paths); snd_array_free(&spec->loopback_list); +#ifdef CONFIG_SND_HDA_GENERIC_LEDS + if (spec->led_cdevs[LED_AUDIO_MUTE]) + led_classdev_unregister(spec->led_cdevs[LED_AUDIO_MUTE]); + if (spec->led_cdevs[LED_AUDIO_MICMUTE]) + led_classdev_unregister(spec->led_cdevs[LED_AUDIO_MICMUTE]); +#endif } /* @@ -3911,7 +3917,10 @@ static int create_mute_led_cdev(struct hda_codec *codec, enum led_brightness), bool micmute) { + struct hda_gen_spec *spec = codec->spec; struct led_classdev *cdev; + int idx = micmute ? LED_AUDIO_MICMUTE : LED_AUDIO_MUTE; + int err; cdev = devm_kzalloc(&codec->core.dev, sizeof(*cdev), GFP_KERNEL); if (!cdev) @@ -3921,10 +3930,14 @@ static int create_mute_led_cdev(struct hda_codec *codec, cdev->max_brightness = 1; cdev->default_trigger = micmute ? "audio-micmute" : "audio-mute"; cdev->brightness_set_blocking = callback; - cdev->brightness = ledtrig_audio_get(micmute ? LED_AUDIO_MICMUTE : LED_AUDIO_MUTE); + cdev->brightness = ledtrig_audio_get(idx); cdev->flags = LED_CORE_SUSPENDRESUME; - return devm_led_classdev_register(&codec->core.dev, cdev); + err = led_classdev_register(&codec->core.dev, cdev); + if (err < 0) + return err; + spec->led_cdevs[idx] = cdev; + return 0; } static void vmaster_update_mute_led(void *private_data, int enabled) diff --git a/sound/pci/hda/hda_generic.h b/sound/pci/hda/hda_generic.h index 0886bc81f40be64901b50d6db1a21145891dc810..578faa9adcdcdba43563efddd5fc9e40856c6cb7 100644 --- a/sound/pci/hda/hda_generic.h +++ b/sound/pci/hda/hda_generic.h @@ -305,6 +305,9 @@ struct hda_gen_spec { struct hda_jack_callback *cb); void (*mic_autoswitch_hook)(struct hda_codec *codec, struct hda_jack_callback *cb); + + /* leds */ + struct led_classdev *led_cdevs[NUM_AUDIO_LEDS]; }; /* values for add_stereo_mix_input flag */ diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 3b2dda486f2a3541aa6eb004c80f170fc52c5c1f..6e69c76eddcbadce190796f7a43b7262b36cd680 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -1681,6 +1681,7 @@ static const struct snd_pci_quirk probe_mask_list[] = { /* forced codec slots */ SND_PCI_QUIRK(0x1043, 0x1262, "ASUS W5Fm", 0x103), SND_PCI_QUIRK(0x1046, 0x1262, "ASUS W5F", 0x103), + SND_PCI_QUIRK(0x1558, 0x0351, "Schenker Dock 15", 0x105), /* WinFast VP200 H (Teradici) user reported broken communication */ SND_PCI_QUIRK(0x3a21, 0x040d, "WinFast VP200 H", 0x101), {} @@ -1877,8 +1878,6 @@ static int azx_create(struct snd_card *card, struct pci_dev *pci, assign_position_fix(chip, check_position_fix(chip, position_fix[dev])); - check_probe_mask(chip, dev); - if (single_cmd < 0) /* allow fallback to single_cmd at errors */ chip->fallback_to_single_cmd = 1; else /* explicitly set to single_cmd or not */ @@ -1906,6 +1905,8 @@ static int azx_create(struct snd_card *card, struct pci_dev *pci, chip->bus.core.needs_damn_long_delay = 1; } + check_probe_mask(chip, dev); + err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops); if (err < 0) { dev_err(card->dev, "Error creating device [card]!\n"); diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c index fbfdfcefeb0b623a8446681315e0c74d4e2e1b0e..de710e6dd56b8754075ec2afbd8df8226ce6cfee 100644 --- a/sound/pci/hda/patch_hdmi.c +++ b/sound/pci/hda/patch_hdmi.c @@ -1608,6 +1608,7 @@ static void hdmi_present_sense_via_verbs(struct hdmi_spec_per_pin *per_pin, struct hda_codec *codec = per_pin->codec; struct hdmi_spec *spec = codec->spec; struct hdmi_eld *eld = &spec->temp_eld; + struct device *dev = hda_codec_dev(codec); hda_nid_t pin_nid = per_pin->pin_nid; int dev_id = per_pin->dev_id; /* @@ -1621,8 +1622,13 @@ static void hdmi_present_sense_via_verbs(struct hdmi_spec_per_pin *per_pin, int present; int ret; +#ifdef CONFIG_PM + if (dev->power.runtime_status == RPM_SUSPENDING) + return; +#endif + ret = snd_hda_power_up_pm(codec); - if (ret < 0 && pm_runtime_suspended(hda_codec_dev(codec))) + if (ret < 0 && pm_runtime_suspended(dev)) goto out; present = snd_hda_jack_pin_sense(codec, pin_nid, dev_id); diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 14ce48f1a8e47e406bd9d4474e76d459d5dfff07..11d653190e6eac3a2cd5c1b171af1cb08b0363b7 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -97,6 +97,7 @@ struct alc_spec { unsigned int gpio_mic_led_mask; struct alc_coef_led mute_led_coef; struct alc_coef_led mic_led_coef; + struct mutex coef_mutex; hda_nid_t headset_mic_pin; hda_nid_t headphone_mic_pin; @@ -133,8 +134,24 @@ struct alc_spec { * COEF access helper functions */ -static int alc_read_coefex_idx(struct hda_codec *codec, hda_nid_t nid, - unsigned int coef_idx) +static void coef_mutex_lock(struct hda_codec *codec) +{ + struct alc_spec *spec = codec->spec; + + snd_hda_power_up_pm(codec); + mutex_lock(&spec->coef_mutex); +} + +static void coef_mutex_unlock(struct hda_codec *codec) +{ + struct alc_spec *spec = codec->spec; + + mutex_unlock(&spec->coef_mutex); + snd_hda_power_down_pm(codec); +} + +static int __alc_read_coefex_idx(struct hda_codec *codec, hda_nid_t nid, + unsigned int coef_idx) { unsigned int val; @@ -143,28 +160,56 @@ static int alc_read_coefex_idx(struct hda_codec *codec, hda_nid_t nid, return val; } +static int alc_read_coefex_idx(struct hda_codec *codec, hda_nid_t nid, + unsigned int coef_idx) +{ + unsigned int val; + + coef_mutex_lock(codec); + val = __alc_read_coefex_idx(codec, nid, coef_idx); + coef_mutex_unlock(codec); + return val; +} + #define alc_read_coef_idx(codec, coef_idx) \ alc_read_coefex_idx(codec, 0x20, coef_idx) -static void alc_write_coefex_idx(struct hda_codec *codec, hda_nid_t nid, - unsigned int coef_idx, unsigned int coef_val) +static void __alc_write_coefex_idx(struct hda_codec *codec, hda_nid_t nid, + unsigned int coef_idx, unsigned int coef_val) { snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_COEF_INDEX, coef_idx); snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_PROC_COEF, coef_val); } +static void alc_write_coefex_idx(struct hda_codec *codec, hda_nid_t nid, + unsigned int coef_idx, unsigned int coef_val) +{ + coef_mutex_lock(codec); + __alc_write_coefex_idx(codec, nid, coef_idx, coef_val); + coef_mutex_unlock(codec); +} + #define alc_write_coef_idx(codec, coef_idx, coef_val) \ alc_write_coefex_idx(codec, 0x20, coef_idx, coef_val) +static void __alc_update_coefex_idx(struct hda_codec *codec, hda_nid_t nid, + unsigned int coef_idx, unsigned int mask, + unsigned int bits_set) +{ + unsigned int val = __alc_read_coefex_idx(codec, nid, coef_idx); + + if (val != -1) + __alc_write_coefex_idx(codec, nid, coef_idx, + (val & ~mask) | bits_set); +} + static void alc_update_coefex_idx(struct hda_codec *codec, hda_nid_t nid, unsigned int coef_idx, unsigned int mask, unsigned int bits_set) { - unsigned int val = alc_read_coefex_idx(codec, nid, coef_idx); - - if (val != -1) - alc_write_coefex_idx(codec, nid, coef_idx, - (val & ~mask) | bits_set); + coef_mutex_lock(codec); + __alc_update_coefex_idx(codec, nid, coef_idx, mask, bits_set); + coef_mutex_unlock(codec); } #define alc_update_coef_idx(codec, coef_idx, mask, bits_set) \ @@ -197,13 +242,15 @@ struct coef_fw { static void alc_process_coef_fw(struct hda_codec *codec, const struct coef_fw *fw) { + coef_mutex_lock(codec); for (; fw->nid; fw++) { if (fw->mask == (unsigned short)-1) - alc_write_coefex_idx(codec, fw->nid, fw->idx, fw->val); + __alc_write_coefex_idx(codec, fw->nid, fw->idx, fw->val); else - alc_update_coefex_idx(codec, fw->nid, fw->idx, - fw->mask, fw->val); + __alc_update_coefex_idx(codec, fw->nid, fw->idx, + fw->mask, fw->val); } + coef_mutex_unlock(codec); } /* @@ -1160,6 +1207,7 @@ static int alc_alloc_spec(struct hda_codec *codec, hda_nid_t mixer_nid) codec->spdif_status_reset = 1; codec->forced_resume = 1; codec->patch_ops = alc_patch_ops; + mutex_init(&spec->coef_mutex); err = alc_codec_rename_from_preset(codec); if (err < 0) { @@ -1936,6 +1984,7 @@ enum { ALC887_FIXUP_ASUS_BASS, ALC887_FIXUP_BASS_CHMAP, ALC1220_FIXUP_GB_DUAL_CODECS, + ALC1220_FIXUP_GB_X570, ALC1220_FIXUP_CLEVO_P950, ALC1220_FIXUP_CLEVO_PB51ED, ALC1220_FIXUP_CLEVO_PB51ED_PINS, @@ -2125,6 +2174,30 @@ static void alc1220_fixup_gb_dual_codecs(struct hda_codec *codec, } } +static void alc1220_fixup_gb_x570(struct hda_codec *codec, + const struct hda_fixup *fix, + int action) +{ + static const hda_nid_t conn1[] = { 0x0c }; + static const struct coef_fw gb_x570_coefs[] = { + WRITE_COEF(0x07, 0x03c0), + WRITE_COEF(0x1a, 0x01c1), + WRITE_COEF(0x1b, 0x0202), + WRITE_COEF(0x43, 0x3005), + {} + }; + + switch (action) { + case HDA_FIXUP_ACT_PRE_PROBE: + snd_hda_override_conn_list(codec, 0x14, ARRAY_SIZE(conn1), conn1); + snd_hda_override_conn_list(codec, 0x1b, ARRAY_SIZE(conn1), conn1); + break; + case HDA_FIXUP_ACT_INIT: + alc_process_coef_fw(codec, gb_x570_coefs); + break; + } +} + static void alc1220_fixup_clevo_p950(struct hda_codec *codec, const struct hda_fixup *fix, int action) @@ -2427,6 +2500,10 @@ static const struct hda_fixup alc882_fixups[] = { .type = HDA_FIXUP_FUNC, .v.func = alc1220_fixup_gb_dual_codecs, }, + [ALC1220_FIXUP_GB_X570] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc1220_fixup_gb_x570, + }, [ALC1220_FIXUP_CLEVO_P950] = { .type = HDA_FIXUP_FUNC, .v.func = alc1220_fixup_clevo_p950, @@ -2529,8 +2606,9 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = { SND_PCI_QUIRK(0x13fe, 0x1009, "Advantech MIT-W101", ALC886_FIXUP_EAPD), SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte EP45-DS3/Z87X-UD3H", ALC889_FIXUP_FRONT_HP_NO_PRESENCE), SND_PCI_QUIRK(0x1458, 0xa0b8, "Gigabyte AZ370-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS), - SND_PCI_QUIRK(0x1458, 0xa0cd, "Gigabyte X570 Aorus Master", ALC1220_FIXUP_CLEVO_P950), - SND_PCI_QUIRK(0x1458, 0xa0ce, "Gigabyte X570 Aorus Xtreme", ALC1220_FIXUP_CLEVO_P950), + SND_PCI_QUIRK(0x1458, 0xa0cd, "Gigabyte X570 Aorus Master", ALC1220_FIXUP_GB_X570), + SND_PCI_QUIRK(0x1458, 0xa0ce, "Gigabyte X570 Aorus Xtreme", ALC1220_FIXUP_GB_X570), + SND_PCI_QUIRK(0x1458, 0xa0d5, "Gigabyte X570S Aorus Master", ALC1220_FIXUP_GB_X570), SND_PCI_QUIRK(0x1462, 0x11f7, "MSI-GE63", ALC1220_FIXUP_CLEVO_P950), SND_PCI_QUIRK(0x1462, 0x1228, "MSI-GP63", ALC1220_FIXUP_CLEVO_P950), SND_PCI_QUIRK(0x1462, 0x1229, "MSI-GP73", ALC1220_FIXUP_CLEVO_P950), @@ -2548,6 +2626,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = { SND_PCI_QUIRK(0x1558, 0x65e1, "Clevo PB51[ED][DF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS), SND_PCI_QUIRK(0x1558, 0x65e5, "Clevo PC50D[PRS](?:-D|-G)?", ALC1220_FIXUP_CLEVO_PB51ED_PINS), SND_PCI_QUIRK(0x1558, 0x65f1, "Clevo PC50HS", ALC1220_FIXUP_CLEVO_PB51ED_PINS), + SND_PCI_QUIRK(0x1558, 0x65f5, "Clevo PD50PN[NRT]", ALC1220_FIXUP_CLEVO_PB51ED_PINS), SND_PCI_QUIRK(0x1558, 0x67d1, "Clevo PB71[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS), SND_PCI_QUIRK(0x1558, 0x67e1, "Clevo PB71[DE][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS), SND_PCI_QUIRK(0x1558, 0x67e5, "Clevo PC70D[PRS](?:-D|-G)?", ALC1220_FIXUP_CLEVO_PB51ED_PINS), @@ -2605,6 +2684,7 @@ static const struct hda_model_fixup alc882_fixup_models[] = { {.id = ALC882_FIXUP_NO_PRIMARY_HP, .name = "no-primary-hp"}, {.id = ALC887_FIXUP_ASUS_BASS, .name = "asus-bass"}, {.id = ALC1220_FIXUP_GB_DUAL_CODECS, .name = "dual-codecs"}, + {.id = ALC1220_FIXUP_GB_X570, .name = "gb-x570"}, {.id = ALC1220_FIXUP_CLEVO_P950, .name = "clevo-p950"}, {} }; @@ -3536,8 +3616,8 @@ static void alc256_shutup(struct hda_codec *codec) /* If disable 3k pulldown control for alc257, the Mic detection will not work correctly * when booting with headset plugged. So skip setting it for the codec alc257 */ - if (spec->codec_variant != ALC269_TYPE_ALC257 && - spec->codec_variant != ALC269_TYPE_ALC256) + if (codec->core.vendor_id != 0x10ec0236 && + codec->core.vendor_id != 0x10ec0257) alc_update_coef_idx(codec, 0x46, 0, 3 << 12); if (!spec->no_shutup_pins) @@ -6683,6 +6763,7 @@ enum { ALC236_FIXUP_HP_MUTE_LED, ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF, ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET, + ALC256_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET, ALC295_FIXUP_ASUS_MIC_NO_PRESENCE, ALC269VC_FIXUP_ACER_VCOPPERBOX_PINS, ALC269VC_FIXUP_ACER_HEADSET_MIC, @@ -6729,6 +6810,8 @@ enum { ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE, ALC233_FIXUP_NO_AUDIO_JACK, ALC256_FIXUP_MIC_NO_PRESENCE_AND_RESUME, + ALC285_FIXUP_LEGION_Y9000X_SPEAKERS, + ALC285_FIXUP_LEGION_Y9000X_AUTOMUTE, }; static const struct hda_fixup alc269_fixups[] = { @@ -8002,6 +8085,14 @@ static const struct hda_fixup alc269_fixups[] = { { } }, }, + [ALC256_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET] = { + .type = HDA_FIXUP_VERBS, + .v.verbs = (const struct hda_verb[]) { + { 0x20, AC_VERB_SET_COEF_INDEX, 0x08}, + { 0x20, AC_VERB_SET_PROC_COEF, 0x2fcf}, + { } + }, + }, [ALC295_FIXUP_ASUS_MIC_NO_PRESENCE] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { @@ -8319,6 +8410,18 @@ static const struct hda_fixup alc269_fixups[] = { .chained = true, .chain_id = ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF, }, + [ALC285_FIXUP_LEGION_Y9000X_SPEAKERS] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc285_fixup_ideapad_s740_coef, + .chained = true, + .chain_id = ALC285_FIXUP_LEGION_Y9000X_AUTOMUTE, + }, + [ALC285_FIXUP_LEGION_Y9000X_AUTOMUTE] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc287_fixup_legion_15imhg05_speakers, + .chained = true, + .chain_id = ALC269_FIXUP_THINKPAD_ACPI, + }, [ALC287_FIXUP_LEGION_15IMHG05_SPEAKERS] = { .type = HDA_FIXUP_VERBS, //.v.verbs = legion_15imhg05_coefs, @@ -8708,6 +8811,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1043, 0x1e51, "ASUS Zephyrus M15", ALC294_FIXUP_ASUS_GU502_PINS), SND_PCI_QUIRK(0x1043, 0x1e8e, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA401), SND_PCI_QUIRK(0x1043, 0x1f11, "ASUS Zephyrus G14", ALC289_FIXUP_ASUS_GA401), + SND_PCI_QUIRK(0x1043, 0x1d42, "ASUS Zephyrus G14 2022", ALC289_FIXUP_ASUS_GA401), + SND_PCI_QUIRK(0x1043, 0x16b2, "ASUS GU603", ALC289_FIXUP_ASUS_GA401), SND_PCI_QUIRK(0x1043, 0x3030, "ASUS ZN270IE", ALC256_FIXUP_ASUS_AIO_GPIO2), SND_PCI_QUIRK(0x1043, 0x831a, "ASUS P901", ALC269_FIXUP_STEREO_DMIC), SND_PCI_QUIRK(0x1043, 0x834a, "ASUS S101", ALC269_FIXUP_STEREO_DMIC), @@ -8740,6 +8845,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x144d, 0xc740, "Samsung Ativ book 8 (NP870Z5G)", ALC269_FIXUP_ATIV_BOOK_8), SND_PCI_QUIRK(0x144d, 0xc812, "Samsung Notebook Pen S (NT950SBE-X58)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET), SND_PCI_QUIRK(0x144d, 0xc830, "Samsung Galaxy Book Ion (NT950XCJ-X716A)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET), + SND_PCI_QUIRK(0x144d, 0xc832, "Samsung Galaxy Book Flex Alpha (NP730QCJ)", ALC256_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET), SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_HEADSET_MIC), SND_PCI_QUIRK(0x1462, 0xb120, "MSI Cubi MS-B120", ALC283_FIXUP_HEADSET_MIC), SND_PCI_QUIRK(0x1462, 0xb171, "Cubi N 8GL (MS-B171)", ALC283_FIXUP_HEADSET_MIC), @@ -8790,6 +8896,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1558, 0x8561, "System76 Gazelle (gaze14)", ALC269_FIXUP_HEADSET_MIC), SND_PCI_QUIRK(0x1558, 0x8562, "Clevo NH[5|7][0-9]RZ[Q]", ALC269_FIXUP_DMIC), SND_PCI_QUIRK(0x1558, 0x8668, "Clevo NP50B[BE]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1558, 0x866d, "Clevo NP5[05]PN[HJK]", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1558, 0x867d, "Clevo NP7[01]PN[HJK]", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0x8680, "Clevo NJ50LU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0x8686, "Clevo NH50[CZ]U", ALC256_FIXUP_MIC_NO_PRESENCE_AND_RESUME), SND_PCI_QUIRK(0x1558, 0x8a20, "Clevo NH55DCQ-Y", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), @@ -8857,13 +8965,17 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x17aa, 0x3176, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC), SND_PCI_QUIRK(0x17aa, 0x3178, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC), SND_PCI_QUIRK(0x17aa, 0x31af, "ThinkCentre Station", ALC623_FIXUP_LENOVO_THINKSTATION_P340), + SND_PCI_QUIRK(0x17aa, 0x3813, "Legion 7i 15IMHG05", ALC287_FIXUP_LEGION_15IMHG05_SPEAKERS), SND_PCI_QUIRK(0x17aa, 0x3818, "Lenovo C940", ALC298_FIXUP_LENOVO_SPK_VOLUME), + SND_PCI_QUIRK(0x17aa, 0x3819, "Lenovo 13s Gen2 ITL", ALC287_FIXUP_13S_GEN2_SPEAKERS), + SND_PCI_QUIRK(0x17aa, 0x3824, "Legion Y9000X 2020", ALC285_FIXUP_LEGION_Y9000X_SPEAKERS), SND_PCI_QUIRK(0x17aa, 0x3827, "Ideapad S740", ALC285_FIXUP_IDEAPAD_S740_COEF), + SND_PCI_QUIRK(0x17aa, 0x3834, "Lenovo IdeaPad Slim 9i 14ITL5", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS), + SND_PCI_QUIRK(0x17aa, 0x383d, "Legion Y9000X 2019", ALC285_FIXUP_LEGION_Y9000X_SPEAKERS), SND_PCI_QUIRK(0x17aa, 0x3843, "Yoga 9i", ALC287_FIXUP_IDEAPAD_BASS_SPK_AMP), - SND_PCI_QUIRK(0x17aa, 0x3813, "Legion 7i 15IMHG05", ALC287_FIXUP_LEGION_15IMHG05_SPEAKERS), + SND_PCI_QUIRK(0x17aa, 0x384a, "Lenovo Yoga 7 15ITL5", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS), SND_PCI_QUIRK(0x17aa, 0x3852, "Lenovo Yoga 7 14ITL5", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS), SND_PCI_QUIRK(0x17aa, 0x3853, "Lenovo Yoga 7 15ITL5", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS), - SND_PCI_QUIRK(0x17aa, 0x3819, "Lenovo 13s Gen2 ITL", ALC287_FIXUP_13S_GEN2_SPEAKERS), SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI), SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC), SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo B50-70", ALC269_FIXUP_DMIC_THINKPAD_ACPI), @@ -8883,6 +8995,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x17aa, 0x505d, "Thinkpad", ALC298_FIXUP_TPT470_DOCK), SND_PCI_QUIRK(0x17aa, 0x505f, "Thinkpad", ALC298_FIXUP_TPT470_DOCK), SND_PCI_QUIRK(0x17aa, 0x5062, "Thinkpad", ALC298_FIXUP_TPT470_DOCK), + SND_PCI_QUIRK(0x17aa, 0x508b, "Thinkpad X12 Gen 1", ALC287_FIXUP_LEGION_15IMHG05_SPEAKERS), SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), SND_PCI_QUIRK(0x17aa, 0x511e, "Thinkpad", ALC298_FIXUP_TPT470_DOCK), SND_PCI_QUIRK(0x17aa, 0x511f, "Thinkpad", ALC298_FIXUP_TPT470_DOCK), @@ -9076,6 +9189,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = { {.id = ALC298_FIXUP_HUAWEI_MBX_STEREO, .name = "huawei-mbx-stereo"}, {.id = ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE, .name = "alc256-medion-headset"}, {.id = ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET, .name = "alc298-samsung-headphone"}, + {.id = ALC256_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET, .name = "alc256-samsung-headphone"}, {.id = ALC255_FIXUP_XIAOMI_HEADSET_MIC, .name = "alc255-xiaomi-headset"}, {.id = ALC274_FIXUP_HP_MIC, .name = "alc274-hp-mic-detect"}, {.id = ALC245_FIXUP_HP_X360_AMP, .name = "alc245-hp-x360-amp"}, @@ -10741,6 +10855,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = { SND_PCI_QUIRK(0x1028, 0x069f, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800), SND_PCI_QUIRK(0x103c, 0x873e, "HP", ALC671_FIXUP_HP_HEADSET_MIC2), + SND_PCI_QUIRK(0x103c, 0x885f, "HP 288 Pro G8", ALC671_FIXUP_HP_HEADSET_MIC2), SND_PCI_QUIRK(0x1043, 0x1080, "Asus UX501VW", ALC668_FIXUP_HEADSET_MODE), SND_PCI_QUIRK(0x1043, 0x11cd, "Asus N550", ALC662_FIXUP_ASUS_Nx50), SND_PCI_QUIRK(0x1043, 0x129d, "Asus N750", ALC662_FIXUP_ASUS_Nx50), diff --git a/sound/soc/atmel/atmel_ssc_dai.c b/sound/soc/atmel/atmel_ssc_dai.c index 6a63e8797a0b6b5ecd58076dd1cd751dd81a1afd..97533412ce11e8457d7513e8b804ffbaf9dd69e7 100644 --- a/sound/soc/atmel/atmel_ssc_dai.c +++ b/sound/soc/atmel/atmel_ssc_dai.c @@ -280,7 +280,10 @@ static int atmel_ssc_startup(struct snd_pcm_substream *substream, /* Enable PMC peripheral clock for this SSC */ pr_debug("atmel_ssc_dai: Starting clock\n"); - clk_enable(ssc_p->ssc->clk); + ret = clk_enable(ssc_p->ssc->clk); + if (ret) + return ret; + ssc_p->mck_rate = clk_get_rate(ssc_p->ssc->clk); /* Reset the SSC unless initialized to keep it in a clean state */ diff --git a/sound/soc/atmel/sam9g20_wm8731.c b/sound/soc/atmel/sam9g20_wm8731.c index ed1f69b570244f7078cc2c31a987a21c6153b901..8a55d59a6c2aa97bc7680287479d0a1c9dd2859d 100644 --- a/sound/soc/atmel/sam9g20_wm8731.c +++ b/sound/soc/atmel/sam9g20_wm8731.c @@ -214,6 +214,7 @@ static int at91sam9g20ek_audio_probe(struct platform_device *pdev) cpu_np = of_parse_phandle(np, "atmel,ssc-controller", 0); if (!cpu_np) { dev_err(&pdev->dev, "dai and pcm info missing\n"); + of_node_put(codec_np); return -EINVAL; } at91sam9g20ek_dai.cpus->of_node = cpu_np; diff --git a/sound/soc/atmel/sam9x5_wm8731.c b/sound/soc/atmel/sam9x5_wm8731.c index 9fbc3c1113cc5ecb2bb6bb7b3ad6e73ddb2ec04c..529604a06c53207303cf76e968e7f6a31a2d132e 100644 --- a/sound/soc/atmel/sam9x5_wm8731.c +++ b/sound/soc/atmel/sam9x5_wm8731.c @@ -142,7 +142,7 @@ static int sam9x5_wm8731_driver_probe(struct platform_device *pdev) if (!cpu_np) { dev_err(&pdev->dev, "atmel,ssc-controller node missing\n"); ret = -EINVAL; - goto out; + goto out_put_codec_np; } dai->cpus->of_node = cpu_np; dai->platforms->of_node = cpu_np; @@ -153,13 +153,10 @@ static int sam9x5_wm8731_driver_probe(struct platform_device *pdev) if (ret != 0) { dev_err(&pdev->dev, "Failed to set SSC %d for audio: %d\n", ret, priv->ssc_id); - goto out; + goto out_put_cpu_np; } - of_node_put(codec_np); - of_node_put(cpu_np); - - ret = snd_soc_register_card(card); + ret = devm_snd_soc_register_card(&pdev->dev, card); if (ret) { dev_err(&pdev->dev, "Platform device allocation failed\n"); goto out_put_audio; @@ -167,10 +164,14 @@ static int sam9x5_wm8731_driver_probe(struct platform_device *pdev) dev_dbg(&pdev->dev, "%s ok\n", __func__); - return ret; + goto out_put_cpu_np; out_put_audio: atmel_ssc_put_audio(priv->ssc_id); +out_put_cpu_np: + of_node_put(cpu_np); +out_put_codec_np: + of_node_put(codec_np); out: return ret; } @@ -180,7 +181,6 @@ static int sam9x5_wm8731_driver_remove(struct platform_device *pdev) struct snd_soc_card *card = platform_get_drvdata(pdev); struct sam9x5_drvdata *priv = card->drvdata; - snd_soc_unregister_card(card); atmel_ssc_put_audio(priv->ssc_id); return 0; diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig index 34c6dd04b85a3c9e2619fa1d27a210bf2e484a8f..52c89a6f54e9a6b53e6ea41d88fd5cf8700e220d 100644 --- a/sound/soc/codecs/Kconfig +++ b/sound/soc/codecs/Kconfig @@ -659,6 +659,7 @@ config SND_SOC_CS4349 config SND_SOC_CS47L15 tristate + depends on MFD_CS47L15 config SND_SOC_CS47L24 tristate @@ -666,15 +667,19 @@ config SND_SOC_CS47L24 config SND_SOC_CS47L35 tristate + depends on MFD_CS47L35 config SND_SOC_CS47L85 tristate + depends on MFD_CS47L85 config SND_SOC_CS47L90 tristate + depends on MFD_CS47L90 config SND_SOC_CS47L92 tristate + depends on MFD_CS47L92 # Cirrus Logic Quad-Channel ADC config SND_SOC_CS53L30 diff --git a/sound/soc/codecs/cpcap.c b/sound/soc/codecs/cpcap.c index c0425e3707d9c2d91c56370f7e62d9ed74493ef1..a3597137fee3e40eda5d52d9bca9a31d9725a814 100644 --- a/sound/soc/codecs/cpcap.c +++ b/sound/soc/codecs/cpcap.c @@ -1544,6 +1544,8 @@ static int cpcap_codec_probe(struct platform_device *pdev) { struct device_node *codec_node = of_get_child_by_name(pdev->dev.parent->of_node, "audio-codec"); + if (!codec_node) + return -ENODEV; pdev->dev.of_node = codec_node; diff --git a/sound/soc/codecs/cs4265.c b/sound/soc/codecs/cs4265.c index d76be44f46b406df325c9cda1d6384a99642dbce..36b9e4fab099b971c9f6d4d131f0f86ec40a40b4 100644 --- a/sound/soc/codecs/cs4265.c +++ b/sound/soc/codecs/cs4265.c @@ -150,7 +150,6 @@ static const struct snd_kcontrol_new cs4265_snd_controls[] = { SOC_SINGLE("E to F Buffer Disable Switch", CS4265_SPDIF_CTL1, 6, 1, 0), SOC_ENUM("C Data Access", cam_mode_enum), - SOC_SINGLE("SPDIF Switch", CS4265_SPDIF_CTL2, 5, 1, 1), SOC_SINGLE("Validity Bit Control Switch", CS4265_SPDIF_CTL2, 3, 1, 0), SOC_ENUM("SPDIF Mono/Stereo", spdif_mono_stereo_enum), @@ -186,7 +185,7 @@ static const struct snd_soc_dapm_widget cs4265_dapm_widgets[] = { SND_SOC_DAPM_SWITCH("Loopback", SND_SOC_NOPM, 0, 0, &loopback_ctl), - SND_SOC_DAPM_SWITCH("SPDIF", SND_SOC_NOPM, 0, 0, + SND_SOC_DAPM_SWITCH("SPDIF", CS4265_SPDIF_CTL2, 5, 1, &spdif_switch), SND_SOC_DAPM_SWITCH("DAC", CS4265_PWRCTL, 1, 1, &dac_switch), diff --git a/sound/soc/codecs/max9759.c b/sound/soc/codecs/max9759.c index 00e9d4fd1651fe570a49dc89a85b7ad37bd25126..0c261335c8a16cbb680738e41484b1a2a91545a9 100644 --- a/sound/soc/codecs/max9759.c +++ b/sound/soc/codecs/max9759.c @@ -64,7 +64,8 @@ static int speaker_gain_control_put(struct snd_kcontrol *kcontrol, struct snd_soc_component *c = snd_soc_kcontrol_component(kcontrol); struct max9759 *priv = snd_soc_component_get_drvdata(c); - if (ucontrol->value.integer.value[0] > 3) + if (ucontrol->value.integer.value[0] < 0 || + ucontrol->value.integer.value[0] > 3) return -EINVAL; priv->gain = ucontrol->value.integer.value[0]; diff --git a/sound/soc/codecs/msm8916-wcd-analog.c b/sound/soc/codecs/msm8916-wcd-analog.c index 3ddd822240e3aa05fc8381f1aebf11be933fb723..971b8360b5b1be05a73afa6c412332c6092b1c42 100644 --- a/sound/soc/codecs/msm8916-wcd-analog.c +++ b/sound/soc/codecs/msm8916-wcd-analog.c @@ -1221,8 +1221,10 @@ static int pm8916_wcd_analog_spmi_probe(struct platform_device *pdev) } irq = platform_get_irq_byname(pdev, "mbhc_switch_int"); - if (irq < 0) - return irq; + if (irq < 0) { + ret = irq; + goto err_disable_clk; + } ret = devm_request_threaded_irq(dev, irq, NULL, pm8916_mbhc_switch_irq_handler, @@ -1234,8 +1236,10 @@ static int pm8916_wcd_analog_spmi_probe(struct platform_device *pdev) if (priv->mbhc_btn_enabled) { irq = platform_get_irq_byname(pdev, "mbhc_but_press_det"); - if (irq < 0) - return irq; + if (irq < 0) { + ret = irq; + goto err_disable_clk; + } ret = devm_request_threaded_irq(dev, irq, NULL, mbhc_btn_press_irq_handler, @@ -1246,8 +1250,10 @@ static int pm8916_wcd_analog_spmi_probe(struct platform_device *pdev) dev_err(dev, "cannot request mbhc button press irq\n"); irq = platform_get_irq_byname(pdev, "mbhc_but_rel_det"); - if (irq < 0) - return irq; + if (irq < 0) { + ret = irq; + goto err_disable_clk; + } ret = devm_request_threaded_irq(dev, irq, NULL, mbhc_btn_release_irq_handler, @@ -1264,6 +1270,10 @@ static int pm8916_wcd_analog_spmi_probe(struct platform_device *pdev) return devm_snd_soc_register_component(dev, &pm8916_wcd_analog, pm8916_wcd_analog_dai, ARRAY_SIZE(pm8916_wcd_analog_dai)); + +err_disable_clk: + clk_disable_unprepare(priv->mclk); + return ret; } static int pm8916_wcd_analog_spmi_remove(struct platform_device *pdev) diff --git a/sound/soc/codecs/msm8916-wcd-digital.c b/sound/soc/codecs/msm8916-wcd-digital.c index fcc10c8bc625951c9ffb12699cf05f3fb6f4b50b..9ad7fc0baf072678b40063b96fa8450738b06d70 100644 --- a/sound/soc/codecs/msm8916-wcd-digital.c +++ b/sound/soc/codecs/msm8916-wcd-digital.c @@ -1201,7 +1201,7 @@ static int msm8916_wcd_digital_probe(struct platform_device *pdev) ret = clk_prepare_enable(priv->mclk); if (ret < 0) { dev_err(dev, "failed to enable mclk %d\n", ret); - return ret; + goto err_clk; } dev_set_drvdata(dev, priv); @@ -1209,6 +1209,9 @@ static int msm8916_wcd_digital_probe(struct platform_device *pdev) return devm_snd_soc_register_component(dev, &msm8916_wcd_digital, msm8916_wcd_digital_dai, ARRAY_SIZE(msm8916_wcd_digital_dai)); +err_clk: + clk_disable_unprepare(priv->ahbclk); + return ret; } static int msm8916_wcd_digital_remove(struct platform_device *pdev) diff --git a/sound/soc/codecs/mt6358.c b/sound/soc/codecs/mt6358.c index 1f39d5998cf67518807f04fd30ddd7306af7132c..456d9b24d0249e9bad4ced42de6d82ab0677dd4c 100644 --- a/sound/soc/codecs/mt6358.c +++ b/sound/soc/codecs/mt6358.c @@ -107,6 +107,7 @@ int mt6358_set_mtkaif_protocol(struct snd_soc_component *cmpnt, priv->mtkaif_protocol = mtkaif_protocol; return 0; } +EXPORT_SYMBOL_GPL(mt6358_set_mtkaif_protocol); static void playback_gpio_set(struct mt6358_priv *priv) { @@ -273,6 +274,7 @@ int mt6358_mtkaif_calibration_enable(struct snd_soc_component *cmpnt) 1 << RG_AUD_PAD_TOP_DAT_MISO_LOOPBACK_SFT); return 0; } +EXPORT_SYMBOL_GPL(mt6358_mtkaif_calibration_enable); int mt6358_mtkaif_calibration_disable(struct snd_soc_component *cmpnt) { @@ -296,6 +298,7 @@ int mt6358_mtkaif_calibration_disable(struct snd_soc_component *cmpnt) capture_gpio_reset(priv); return 0; } +EXPORT_SYMBOL_GPL(mt6358_mtkaif_calibration_disable); int mt6358_set_mtkaif_calibration_phase(struct snd_soc_component *cmpnt, int phase_1, int phase_2) @@ -310,6 +313,7 @@ int mt6358_set_mtkaif_calibration_phase(struct snd_soc_component *cmpnt, phase_2 << RG_AUD_PAD_TOP_PHASE_MODE2_SFT); return 0; } +EXPORT_SYMBOL_GPL(mt6358_set_mtkaif_calibration_phase); /* dl pga gain */ enum { diff --git a/sound/soc/codecs/rt5663.c b/sound/soc/codecs/rt5663.c index 619fb9a031e39a8a03e5134d7b8006e88292618a..4423e61bf1abf611380a2ffbff14427a0f8f6e06 100644 --- a/sound/soc/codecs/rt5663.c +++ b/sound/soc/codecs/rt5663.c @@ -3461,6 +3461,7 @@ static void rt5663_calibrate(struct rt5663_priv *rt5663) static int rt5663_parse_dp(struct rt5663_priv *rt5663, struct device *dev) { int table_size; + int ret; device_property_read_u32(dev, "realtek,dc_offset_l_manual", &rt5663->pdata.dc_offset_l_manual); @@ -3477,9 +3478,13 @@ static int rt5663_parse_dp(struct rt5663_priv *rt5663, struct device *dev) table_size = sizeof(struct impedance_mapping_table) * rt5663->pdata.impedance_sensing_num; rt5663->imp_table = devm_kzalloc(dev, table_size, GFP_KERNEL); - device_property_read_u32_array(dev, + if (!rt5663->imp_table) + return -ENOMEM; + ret = device_property_read_u32_array(dev, "realtek,impedance_sensing_table", (u32 *)rt5663->imp_table, table_size); + if (ret) + return ret; } return 0; @@ -3504,8 +3509,11 @@ static int rt5663_i2c_probe(struct i2c_client *i2c, if (pdata) rt5663->pdata = *pdata; - else - rt5663_parse_dp(rt5663, &i2c->dev); + else { + ret = rt5663_parse_dp(rt5663, &i2c->dev); + if (ret) + return ret; + } for (i = 0; i < ARRAY_SIZE(rt5663->supplies); i++) rt5663->supplies[i].supply = rt5663_supply_names[i]; diff --git a/sound/soc/codecs/rt5668.c b/sound/soc/codecs/rt5668.c index bc69adc9c8b707bb8c3e292e35cd7a9ec474e093..e625df57c69e5a99d2195d259a4ed939e455bd1b 100644 --- a/sound/soc/codecs/rt5668.c +++ b/sound/soc/codecs/rt5668.c @@ -1022,11 +1022,13 @@ static void rt5668_jack_detect_handler(struct work_struct *work) container_of(work, struct rt5668_priv, jack_detect_work.work); int val, btn_type; - while (!rt5668->component) - usleep_range(10000, 15000); - - while (!rt5668->component->card->instantiated) - usleep_range(10000, 15000); + if (!rt5668->component || !rt5668->component->card || + !rt5668->component->card->instantiated) { + /* card not yet ready, try later */ + mod_delayed_work(system_power_efficient_wq, + &rt5668->jack_detect_work, msecs_to_jiffies(15)); + return; + } mutex_lock(&rt5668->calibrate_mutex); diff --git a/sound/soc/codecs/rt5682.c b/sound/soc/codecs/rt5682.c index aaef76cc151fa7bf3e8bf09c2ebc99efba6771ba..113ed00ddf1e5839574ed53c251a148ca0fad55c 100644 --- a/sound/soc/codecs/rt5682.c +++ b/sound/soc/codecs/rt5682.c @@ -1081,11 +1081,13 @@ void rt5682_jack_detect_handler(struct work_struct *work) container_of(work, struct rt5682_priv, jack_detect_work.work); int val, btn_type; - while (!rt5682->component) - usleep_range(10000, 15000); - - while (!rt5682->component->card->instantiated) - usleep_range(10000, 15000); + if (!rt5682->component || !rt5682->component->card || + !rt5682->component->card->instantiated) { + /* card not yet ready, try later */ + mod_delayed_work(system_power_efficient_wq, + &rt5682->jack_detect_work, msecs_to_jiffies(15)); + return; + } mutex_lock(&rt5682->calibrate_mutex); diff --git a/sound/soc/codecs/tas2770.c b/sound/soc/codecs/tas2770.c index 61c3238bc265678c099e988a5bf216c66465d39b..315fd9d971c8c3661b0acfb5ca2c86e05a1ee678 100644 --- a/sound/soc/codecs/tas2770.c +++ b/sound/soc/codecs/tas2770.c @@ -38,10 +38,12 @@ static void tas2770_reset(struct tas2770_priv *tas2770) gpiod_set_value_cansleep(tas2770->reset_gpio, 0); msleep(20); gpiod_set_value_cansleep(tas2770->reset_gpio, 1); + usleep_range(1000, 2000); } snd_soc_component_write(tas2770->component, TAS2770_SW_RST, TAS2770_RST); + usleep_range(1000, 2000); } static int tas2770_set_bias_level(struct snd_soc_component *component, @@ -110,6 +112,7 @@ static int tas2770_codec_resume(struct snd_soc_component *component) if (tas2770->sdz_gpio) { gpiod_set_value_cansleep(tas2770->sdz_gpio, 1); + usleep_range(1000, 2000); } else { ret = snd_soc_component_update_bits(component, TAS2770_PWR_CTRL, TAS2770_PWR_CTRL_MASK, @@ -510,8 +513,10 @@ static int tas2770_codec_probe(struct snd_soc_component *component) tas2770->component = component; - if (tas2770->sdz_gpio) + if (tas2770->sdz_gpio) { gpiod_set_value_cansleep(tas2770->sdz_gpio, 1); + usleep_range(1000, 2000); + } tas2770_reset(tas2770); diff --git a/sound/soc/codecs/wcd934x.c b/sound/soc/codecs/wcd934x.c index 01df3f4e045a9e18bcd5a7a396348d2fcd776c6b..8540ac230d0eda3115736cc608789c4412d1ea7f 100644 --- a/sound/soc/codecs/wcd934x.c +++ b/sound/soc/codecs/wcd934x.c @@ -2522,13 +2522,16 @@ static int wcd934x_rx_hph_mode_put(struct snd_kcontrol *kc, mode_val = ucontrol->value.enumerated.item[0]; + if (mode_val == wcd->hph_mode) + return 0; + if (mode_val == 0) { dev_err(wcd->dev, "Invalid HPH Mode, default to ClSH HiFi\n"); mode_val = CLS_H_LOHIFI; } wcd->hph_mode = mode_val; - return 0; + return 1; } static int slim_rx_mux_get(struct snd_kcontrol *kc, @@ -5044,6 +5047,7 @@ static int wcd934x_codec_parse_data(struct wcd934x_codec *wcd) } wcd->sidev = of_slim_get_device(wcd->sdev->ctrl, ifc_dev_np); + of_node_put(ifc_dev_np); if (!wcd->sidev) { dev_err(dev, "Unable to get SLIM Interface device\n"); return -EINVAL; diff --git a/sound/soc/codecs/wm8350.c b/sound/soc/codecs/wm8350.c index a6aa212fa0c89faa2865755cf68da635226f27a6..ec5d997725b9c10f3c875972aef7870af53547b2 100644 --- a/sound/soc/codecs/wm8350.c +++ b/sound/soc/codecs/wm8350.c @@ -1536,18 +1536,38 @@ static int wm8350_component_probe(struct snd_soc_component *component) wm8350_clear_bits(wm8350, WM8350_JACK_DETECT, WM8350_JDL_ENA | WM8350_JDR_ENA); - wm8350_register_irq(wm8350, WM8350_IRQ_CODEC_JCK_DET_L, + ret = wm8350_register_irq(wm8350, WM8350_IRQ_CODEC_JCK_DET_L, wm8350_hpl_jack_handler, 0, "Left jack detect", priv); - wm8350_register_irq(wm8350, WM8350_IRQ_CODEC_JCK_DET_R, + if (ret != 0) + goto err; + + ret = wm8350_register_irq(wm8350, WM8350_IRQ_CODEC_JCK_DET_R, wm8350_hpr_jack_handler, 0, "Right jack detect", priv); - wm8350_register_irq(wm8350, WM8350_IRQ_CODEC_MICSCD, + if (ret != 0) + goto free_jck_det_l; + + ret = wm8350_register_irq(wm8350, WM8350_IRQ_CODEC_MICSCD, wm8350_mic_handler, 0, "Microphone short", priv); - wm8350_register_irq(wm8350, WM8350_IRQ_CODEC_MICD, + if (ret != 0) + goto free_jck_det_r; + + ret = wm8350_register_irq(wm8350, WM8350_IRQ_CODEC_MICD, wm8350_mic_handler, 0, "Microphone detect", priv); + if (ret != 0) + goto free_micscd; return 0; + +free_micscd: + wm8350_free_irq(wm8350, WM8350_IRQ_CODEC_MICSCD, priv); +free_jck_det_r: + wm8350_free_irq(wm8350, WM8350_IRQ_CODEC_JCK_DET_R, priv); +free_jck_det_l: + wm8350_free_irq(wm8350, WM8350_IRQ_CODEC_JCK_DET_L, priv); +err: + return ret; } static void wm8350_component_remove(struct snd_soc_component *component) diff --git a/sound/soc/dwc/dwc-i2s.c b/sound/soc/dwc/dwc-i2s.c index fd4160289faca7b44b8f6feb13d60bf8b4f4c491..36da0f01571a111f899fc12c60bab3cde08efde6 100644 --- a/sound/soc/dwc/dwc-i2s.c +++ b/sound/soc/dwc/dwc-i2s.c @@ -403,9 +403,13 @@ static int dw_i2s_runtime_suspend(struct device *dev) static int dw_i2s_runtime_resume(struct device *dev) { struct dw_i2s_dev *dw_dev = dev_get_drvdata(dev); + int ret; - if (dw_dev->capability & DW_I2S_MASTER) - clk_enable(dw_dev->clk); + if (dw_dev->capability & DW_I2S_MASTER) { + ret = clk_enable(dw_dev->clk); + if (ret) + return ret; + } return 0; } @@ -422,10 +426,13 @@ static int dw_i2s_resume(struct snd_soc_component *component) { struct dw_i2s_dev *dev = snd_soc_component_get_drvdata(component); struct snd_soc_dai *dai; - int stream; + int stream, ret; - if (dev->capability & DW_I2S_MASTER) - clk_enable(dev->clk); + if (dev->capability & DW_I2S_MASTER) { + ret = clk_enable(dev->clk); + if (ret) + return ret; + } for_each_component_dais(component, dai) { for_each_pcm_streams(stream) diff --git a/sound/soc/fsl/fsl_asrc.c b/sound/soc/fsl/fsl_asrc.c index 02c81d2e34ad00f5437a46d5f264679552507a24..5e3c71f025f4531faf9df246b538b8414029423d 100644 --- a/sound/soc/fsl/fsl_asrc.c +++ b/sound/soc/fsl/fsl_asrc.c @@ -19,6 +19,7 @@ #include "fsl_asrc.h" #define IDEAL_RATIO_DECIMAL_DEPTH 26 +#define DIVIDER_NUM 64 #define pair_err(fmt, ...) \ dev_err(&asrc->pdev->dev, "Pair %c: " fmt, 'A' + index, ##__VA_ARGS__) @@ -101,6 +102,55 @@ static unsigned char clk_map_imx8qxp[2][ASRC_CLK_MAP_LEN] = { }, }; +/* + * According to RM, the divider range is 1 ~ 8, + * prescaler is power of 2 from 1 ~ 128. + */ +static int asrc_clk_divider[DIVIDER_NUM] = { + 1, 2, 4, 8, 16, 32, 64, 128, /* divider = 1 */ + 2, 4, 8, 16, 32, 64, 128, 256, /* divider = 2 */ + 3, 6, 12, 24, 48, 96, 192, 384, /* divider = 3 */ + 4, 8, 16, 32, 64, 128, 256, 512, /* divider = 4 */ + 5, 10, 20, 40, 80, 160, 320, 640, /* divider = 5 */ + 6, 12, 24, 48, 96, 192, 384, 768, /* divider = 6 */ + 7, 14, 28, 56, 112, 224, 448, 896, /* divider = 7 */ + 8, 16, 32, 64, 128, 256, 512, 1024, /* divider = 8 */ +}; + +/* + * Check if the divider is available for internal ratio mode + */ +static bool fsl_asrc_divider_avail(int clk_rate, int rate, int *div) +{ + u32 rem, i; + u64 n; + + if (div) + *div = 0; + + if (clk_rate == 0 || rate == 0) + return false; + + n = clk_rate; + rem = do_div(n, rate); + + if (div) + *div = n; + + if (rem != 0) + return false; + + for (i = 0; i < DIVIDER_NUM; i++) { + if (n == asrc_clk_divider[i]) + break; + } + + if (i == DIVIDER_NUM) + return false; + + return true; +} + /** * fsl_asrc_sel_proc - Select the pre-processing and post-processing options * @inrate: input sample rate @@ -330,12 +380,12 @@ static int fsl_asrc_config_pair(struct fsl_asrc_pair *pair, bool use_ideal_rate) enum asrc_word_width input_word_width; enum asrc_word_width output_word_width; u32 inrate, outrate, indiv, outdiv; - u32 clk_index[2], div[2], rem[2]; + u32 clk_index[2], div[2]; u64 clk_rate; int in, out, channels; int pre_proc, post_proc; struct clk *clk; - bool ideal; + bool ideal, div_avail; if (!config) { pair_err("invalid pair config\n"); @@ -415,8 +465,7 @@ static int fsl_asrc_config_pair(struct fsl_asrc_pair *pair, bool use_ideal_rate) clk = asrc_priv->asrck_clk[clk_index[ideal ? OUT : IN]]; clk_rate = clk_get_rate(clk); - rem[IN] = do_div(clk_rate, inrate); - div[IN] = (u32)clk_rate; + div_avail = fsl_asrc_divider_avail(clk_rate, inrate, &div[IN]); /* * The divider range is [1, 1024], defined by the hardware. For non- @@ -425,7 +474,7 @@ static int fsl_asrc_config_pair(struct fsl_asrc_pair *pair, bool use_ideal_rate) * only result in different converting speeds. So remainder does not * matter, as long as we keep the divider within its valid range. */ - if (div[IN] == 0 || (!ideal && (div[IN] > 1024 || rem[IN] != 0))) { + if (div[IN] == 0 || (!ideal && !div_avail)) { pair_err("failed to support input sample rate %dHz by asrck_%x\n", inrate, clk_index[ideal ? OUT : IN]); return -EINVAL; @@ -436,13 +485,12 @@ static int fsl_asrc_config_pair(struct fsl_asrc_pair *pair, bool use_ideal_rate) clk = asrc_priv->asrck_clk[clk_index[OUT]]; clk_rate = clk_get_rate(clk); if (ideal && use_ideal_rate) - rem[OUT] = do_div(clk_rate, IDEAL_RATIO_RATE); + div_avail = fsl_asrc_divider_avail(clk_rate, IDEAL_RATIO_RATE, &div[OUT]); else - rem[OUT] = do_div(clk_rate, outrate); - div[OUT] = clk_rate; + div_avail = fsl_asrc_divider_avail(clk_rate, outrate, &div[OUT]); /* Output divider has the same limitation as the input one */ - if (div[OUT] == 0 || (!ideal && (div[OUT] > 1024 || rem[OUT] != 0))) { + if (div[OUT] == 0 || (!ideal && !div_avail)) { pair_err("failed to support output sample rate %dHz by asrck_%x\n", outrate, clk_index[OUT]); return -EINVAL; @@ -621,8 +669,7 @@ static void fsl_asrc_select_clk(struct fsl_asrc_priv *asrc_priv, clk_index = asrc_priv->clk_map[j][i]; clk_rate = clk_get_rate(asrc_priv->asrck_clk[clk_index]); /* Only match a perfect clock source with no remainder */ - if (clk_rate != 0 && (clk_rate / rate[j]) <= 1024 && - (clk_rate % rate[j]) == 0) + if (fsl_asrc_divider_avail(clk_rate, rate[j], NULL)) break; } diff --git a/sound/soc/fsl/fsl_mqs.c b/sound/soc/fsl/fsl_mqs.c index 69aeb0e71844d9f614e156f4e8a0cbcdb41d728f..0d4efbed41dab6cac2e413f332fa4d1dae55b7bf 100644 --- a/sound/soc/fsl/fsl_mqs.c +++ b/sound/soc/fsl/fsl_mqs.c @@ -337,4 +337,4 @@ module_platform_driver(fsl_mqs_driver); MODULE_AUTHOR("Shengjiu Wang "); MODULE_DESCRIPTION("MQS codec driver"); MODULE_LICENSE("GPL v2"); -MODULE_ALIAS("platform: fsl-mqs"); +MODULE_ALIAS("platform:fsl-mqs"); diff --git a/sound/soc/fsl/fsl_spdif.c b/sound/soc/fsl/fsl_spdif.c index 15bcb0f38ec9e89f5a6c9f161b8d00572ff4e390..d01e8d516df1f8da072031f5cdf77fc2fff76f50 100644 --- a/sound/soc/fsl/fsl_spdif.c +++ b/sound/soc/fsl/fsl_spdif.c @@ -544,6 +544,8 @@ static void fsl_spdif_shutdown(struct snd_pcm_substream *substream, mask = SCR_TXFIFO_AUTOSYNC_MASK | SCR_TXFIFO_CTRL_MASK | SCR_TXSEL_MASK | SCR_USRC_SEL_MASK | SCR_TXFIFO_FSEL_MASK; + /* Disable TX clock */ + regmap_update_bits(regmap, REG_SPDIF_STC, STC_TXCLK_ALL_EN_MASK, 0); } else { scr = SCR_RXFIFO_OFF | SCR_RXFIFO_CTL_ZERO; mask = SCR_RXFIFO_FSEL_MASK | SCR_RXFIFO_AUTOSYNC_MASK| diff --git a/sound/soc/fsl/imx-es8328.c b/sound/soc/fsl/imx-es8328.c index fad1eb6253d53e66aa7646337fc0de34c5745f78..9e602c3456196c5a86ddfb4abf084b577da4ee91 100644 --- a/sound/soc/fsl/imx-es8328.c +++ b/sound/soc/fsl/imx-es8328.c @@ -87,6 +87,7 @@ static int imx_es8328_probe(struct platform_device *pdev) if (int_port > MUX_PORT_MAX || int_port == 0) { dev_err(dev, "mux-int-port: hardware only has %d mux ports\n", MUX_PORT_MAX); + ret = -EINVAL; goto fail; } diff --git a/sound/soc/fsl/pcm030-audio-fabric.c b/sound/soc/fsl/pcm030-audio-fabric.c index af3c3b90c0acab59c88d0050838edcc23a848c0b..83b4a22bf15ac0b8883f3144c55f34d5959f6012 100644 --- a/sound/soc/fsl/pcm030-audio-fabric.c +++ b/sound/soc/fsl/pcm030-audio-fabric.c @@ -93,16 +93,21 @@ static int pcm030_fabric_probe(struct platform_device *op) dev_err(&op->dev, "platform_device_alloc() failed\n"); ret = platform_device_add(pdata->codec_device); - if (ret) + if (ret) { dev_err(&op->dev, "platform_device_add() failed: %d\n", ret); + platform_device_put(pdata->codec_device); + } ret = snd_soc_register_card(card); - if (ret) + if (ret) { dev_err(&op->dev, "snd_soc_register_card() failed: %d\n", ret); + platform_device_del(pdata->codec_device); + platform_device_put(pdata->codec_device); + } platform_set_drvdata(op, pdata); - return ret; + } static int pcm030_fabric_remove(struct platform_device *op) diff --git a/sound/soc/generic/simple-card-utils.c b/sound/soc/generic/simple-card-utils.c index 6cada4c1e283befe3c52848bda0340f082696a88..d0d79f47bfdd50401099c9ce2a0ab9aba3817eb4 100644 --- a/sound/soc/generic/simple-card-utils.c +++ b/sound/soc/generic/simple-card-utils.c @@ -255,7 +255,7 @@ int asoc_simple_hw_params(struct snd_pcm_substream *substream, struct simple_dai_props *dai_props = simple_priv_to_props(priv, rtd->num); unsigned int mclk, mclk_fs = 0; - int ret = 0; + int ret; if (dai_props->mclk_fs) mclk_fs = dai_props->mclk_fs; diff --git a/sound/soc/intel/catpt/dsp.c b/sound/soc/intel/catpt/dsp.c index 9e807b94173219c687758db9b5510bd9d423beab..38a92bbc1ed5682c72db8825ee34101560348fa3 100644 --- a/sound/soc/intel/catpt/dsp.c +++ b/sound/soc/intel/catpt/dsp.c @@ -65,6 +65,7 @@ static int catpt_dma_memcpy(struct catpt_dev *cdev, struct dma_chan *chan, { struct dma_async_tx_descriptor *desc; enum dma_status status; + int ret; desc = dmaengine_prep_dma_memcpy(chan, dst_addr, src_addr, size, DMA_CTRL_ACK); @@ -77,13 +78,22 @@ static int catpt_dma_memcpy(struct catpt_dev *cdev, struct dma_chan *chan, catpt_updatel_shim(cdev, HMDC, CATPT_HMDC_HDDA(CATPT_DMA_DEVID, chan->chan_id), CATPT_HMDC_HDDA(CATPT_DMA_DEVID, chan->chan_id)); - dmaengine_submit(desc); + + ret = dma_submit_error(dmaengine_submit(desc)); + if (ret) { + dev_err(cdev->dev, "submit tx failed: %d\n", ret); + goto clear_hdda; + } + status = dma_wait_for_async_tx(desc); + ret = (status == DMA_COMPLETE) ? 0 : -EPROTO; + +clear_hdda: /* regardless of status, disable access to HOST memory in demand mode */ catpt_updatel_shim(cdev, HMDC, CATPT_HMDC_HDDA(CATPT_DMA_DEVID, chan->chan_id), 0); - return (status == DMA_COMPLETE) ? 0 : -EPROTO; + return ret; } int catpt_dma_memcpy_todsp(struct catpt_dev *cdev, struct dma_chan *chan, diff --git a/sound/soc/mediatek/mt8173/mt8173-max98090.c b/sound/soc/mediatek/mt8173/mt8173-max98090.c index fc94314bfc02ffd40dd9863fe29dc80419f5e04b..3bdd4931316cd0e624e327e2372d609eb738af68 100644 --- a/sound/soc/mediatek/mt8173/mt8173-max98090.c +++ b/sound/soc/mediatek/mt8173/mt8173-max98090.c @@ -180,6 +180,9 @@ static int mt8173_max98090_dev_probe(struct platform_device *pdev) if (ret) dev_err(&pdev->dev, "%s snd_soc_register_card fail %d\n", __func__, ret); + + of_node_put(codec_node); + of_node_put(platform_node); return ret; } diff --git a/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5514.c b/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5514.c index 0f28dc2217c091e174f8fbe132d51dac25c03ba7..390da5bf727ebefedec2c9d92c9b5fa1f963e733 100644 --- a/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5514.c +++ b/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5514.c @@ -218,6 +218,8 @@ static int mt8173_rt5650_rt5514_dev_probe(struct platform_device *pdev) if (ret) dev_err(&pdev->dev, "%s snd_soc_register_card fail %d\n", __func__, ret); + + of_node_put(platform_node); return ret; } diff --git a/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5676.c b/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5676.c index 077c6ee0678067c3ac2a2a6836a4b40625c9f5e4..c8e4e85e105752ac8db257a3d768181c10178103 100644 --- a/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5676.c +++ b/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5676.c @@ -285,6 +285,8 @@ static int mt8173_rt5650_rt5676_dev_probe(struct platform_device *pdev) if (ret) dev_err(&pdev->dev, "%s snd_soc_register_card fail %d\n", __func__, ret); + + of_node_put(platform_node); return ret; } diff --git a/sound/soc/mediatek/mt8173/mt8173-rt5650.c b/sound/soc/mediatek/mt8173/mt8173-rt5650.c index c28ebf891cb05f77b50e88869dc75424edaad51e..e168d31f44459cf75cf242c81bf847a698d2b4fc 100644 --- a/sound/soc/mediatek/mt8173/mt8173-rt5650.c +++ b/sound/soc/mediatek/mt8173/mt8173-rt5650.c @@ -323,6 +323,8 @@ static int mt8173_rt5650_dev_probe(struct platform_device *pdev) if (ret) dev_err(&pdev->dev, "%s snd_soc_register_card fail %d\n", __func__, ret); + + of_node_put(platform_node); return ret; } diff --git a/sound/soc/mediatek/mt8183/mt8183-da7219-max98357.c b/sound/soc/mediatek/mt8183/mt8183-da7219-max98357.c index 20d31b69a5c00bf74354ea60aff898377fb3942d..9cc0f26b08fbcb296d0c657bb006ef241bf6afc5 100644 --- a/sound/soc/mediatek/mt8183/mt8183-da7219-max98357.c +++ b/sound/soc/mediatek/mt8183/mt8183-da7219-max98357.c @@ -787,7 +787,11 @@ static int mt8183_da7219_max98357_dev_probe(struct platform_device *pdev) return ret; } - return devm_snd_soc_register_card(&pdev->dev, card); + ret = devm_snd_soc_register_card(&pdev->dev, card); + + of_node_put(platform_node); + of_node_put(hdmi_codec); + return ret; } #ifdef CONFIG_OF diff --git a/sound/soc/mediatek/mt8183/mt8183-mt6358-ts3a227-max98357.c b/sound/soc/mediatek/mt8183/mt8183-mt6358-ts3a227-max98357.c index 79ba2f2d845223b4603f3ca3862f8ba41ed1dc88..14ce8b93597f3521fe7071aadf3f78a2aaef97ad 100644 --- a/sound/soc/mediatek/mt8183/mt8183-mt6358-ts3a227-max98357.c +++ b/sound/soc/mediatek/mt8183/mt8183-mt6358-ts3a227-max98357.c @@ -720,7 +720,12 @@ mt8183_mt6358_ts3a227_max98357_dev_probe(struct platform_device *pdev) __func__, ret); } - return devm_snd_soc_register_card(&pdev->dev, card); + ret = devm_snd_soc_register_card(&pdev->dev, card); + + of_node_put(platform_node); + of_node_put(ec_codec); + of_node_put(hdmi_codec); + return ret; } #ifdef CONFIG_OF diff --git a/sound/soc/mxs/mxs-saif.c b/sound/soc/mxs/mxs-saif.c index 07f8cf9980e31afe73d3b695e7dd2f8f3f1a3de2..f2eda81985e27fd151a5ce7430c9f2a2ea516396 100644 --- a/sound/soc/mxs/mxs-saif.c +++ b/sound/soc/mxs/mxs-saif.c @@ -455,7 +455,10 @@ static int mxs_saif_hw_params(struct snd_pcm_substream *substream, * basic clock which should be fast enough for the internal * logic. */ - clk_enable(saif->clk); + ret = clk_enable(saif->clk); + if (ret) + return ret; + ret = clk_set_rate(saif->clk, 24000000); clk_disable(saif->clk); if (ret) diff --git a/sound/soc/mxs/mxs-sgtl5000.c b/sound/soc/mxs/mxs-sgtl5000.c index a6407f4388de7de3fde2ebb8cb9ec032f6c09eaa..fb721bc499496b18b68057b467fa9ae14082503f 100644 --- a/sound/soc/mxs/mxs-sgtl5000.c +++ b/sound/soc/mxs/mxs-sgtl5000.c @@ -118,6 +118,9 @@ static int mxs_sgtl5000_probe(struct platform_device *pdev) codec_np = of_parse_phandle(np, "audio-codec", 0); if (!saif_np[0] || !saif_np[1] || !codec_np) { dev_err(&pdev->dev, "phandle missing or invalid\n"); + of_node_put(codec_np); + of_node_put(saif_np[0]); + of_node_put(saif_np[1]); return -EINVAL; } diff --git a/sound/soc/rockchip/rockchip_i2s.c b/sound/soc/rockchip/rockchip_i2s.c index fa84ec695b5251903e661cc05567e5796a1c27a9..785baf98f9da25387a568cb0f1efd2b70c3cd3b5 100644 --- a/sound/soc/rockchip/rockchip_i2s.c +++ b/sound/soc/rockchip/rockchip_i2s.c @@ -624,20 +624,23 @@ static int rockchip_i2s_probe(struct platform_device *pdev) i2s->mclk = devm_clk_get(&pdev->dev, "i2s_clk"); if (IS_ERR(i2s->mclk)) { dev_err(&pdev->dev, "Can't retrieve i2s master clock\n"); - return PTR_ERR(i2s->mclk); + ret = PTR_ERR(i2s->mclk); + goto err_clk; } - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - regs = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(regs)) - return PTR_ERR(regs); + regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res); + if (IS_ERR(regs)) { + ret = PTR_ERR(regs); + goto err_clk; + } i2s->regmap = devm_regmap_init_mmio(&pdev->dev, regs, &rockchip_i2s_regmap_config); if (IS_ERR(i2s->regmap)) { dev_err(&pdev->dev, "Failed to initialise managed register map\n"); - return PTR_ERR(i2s->regmap); + ret = PTR_ERR(i2s->regmap); + goto err_clk; } i2s->playback_dma_data.addr = res->start + I2S_TXDR; @@ -696,7 +699,8 @@ static int rockchip_i2s_probe(struct platform_device *pdev) i2s_runtime_suspend(&pdev->dev); err_pm_disable: pm_runtime_disable(&pdev->dev); - +err_clk: + clk_disable_unprepare(i2s->hclk); return ret; } diff --git a/sound/soc/samsung/idma.c b/sound/soc/samsung/idma.c index 66bcc2f97544bfecf0f905c363a85c37fe3a34a1..c3f1b054e2389c5e233258541d614d759c20a795 100644 --- a/sound/soc/samsung/idma.c +++ b/sound/soc/samsung/idma.c @@ -360,6 +360,8 @@ static int preallocate_idma_buffer(struct snd_pcm *pcm, int stream) buf->addr = idma.lp_tx_addr; buf->bytes = idma_hardware.buffer_bytes_max; buf->area = (unsigned char * __force)ioremap(buf->addr, buf->bytes); + if (!buf->area) + return -ENOMEM; return 0; } diff --git a/sound/soc/sh/fsi.c b/sound/soc/sh/fsi.c index 3c574792231bc5c376d201838f497bd3cb063744..0fa72907d5bf14658be2e23ec08fcec358a2b483 100644 --- a/sound/soc/sh/fsi.c +++ b/sound/soc/sh/fsi.c @@ -816,14 +816,27 @@ static int fsi_clk_enable(struct device *dev, return ret; } - clk_enable(clock->xck); - clk_enable(clock->ick); - clk_enable(clock->div); + ret = clk_enable(clock->xck); + if (ret) + goto err; + ret = clk_enable(clock->ick); + if (ret) + goto disable_xck; + ret = clk_enable(clock->div); + if (ret) + goto disable_ick; clock->count++; } return ret; + +disable_ick: + clk_disable(clock->ick); +disable_xck: + clk_disable(clock->xck); +err: + return ret; } static int fsi_clk_disable(struct device *dev, diff --git a/sound/soc/soc-compress.c b/sound/soc/soc-compress.c index 3a6a60215e815cf1535828759d86a6667a416712..d0f3ff8edd904e2b9765c939c219786412dd9163 100644 --- a/sound/soc/soc-compress.c +++ b/sound/soc/soc-compress.c @@ -766,6 +766,11 @@ int snd_soc_new_compress(struct snd_soc_pcm_runtime *rtd, int num) return -EINVAL; } + if (!codec_dai) { + dev_err(rtd->card->dev, "Missing codec\n"); + return -EINVAL; + } + /* check client and interface hw capabilities */ if (snd_soc_dai_stream_valid(codec_dai, SNDRV_PCM_STREAM_PLAYBACK) && snd_soc_dai_stream_valid(cpu_dai, SNDRV_PCM_STREAM_PLAYBACK)) diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c index 13329659686468e35d81b36284cce8ce79f01e17..a6d6d10cd471bd3b7e92a941634d239e194651ff 100644 --- a/sound/soc/soc-core.c +++ b/sound/soc/soc-core.c @@ -3020,7 +3020,7 @@ int snd_soc_get_dai_name(struct of_phandle_args *args, for_each_component(pos) { component_of_node = soc_component_to_node(pos); - if (component_of_node != args->np) + if (component_of_node != args->np || !pos->num_dai) continue; ret = snd_soc_component_of_xlate_dai_name(pos, args, dai_name); diff --git a/sound/soc/soc-generic-dmaengine-pcm.c b/sound/soc/soc-generic-dmaengine-pcm.c index 9ef80a48707eb6583ff51ec278c1a5b035970ba1..0d100b4e43f7ef6489cbc4a12ff7de91bde2f7dd 100644 --- a/sound/soc/soc-generic-dmaengine-pcm.c +++ b/sound/soc/soc-generic-dmaengine-pcm.c @@ -83,10 +83,10 @@ static int dmaengine_pcm_hw_params(struct snd_soc_component *component, memset(&slave_config, 0, sizeof(slave_config)); - if (!pcm->config) - prepare_slave_config = snd_dmaengine_pcm_prepare_slave_config; - else + if (pcm->config && pcm->config->prepare_slave_config) prepare_slave_config = pcm->config->prepare_slave_config; + else + prepare_slave_config = snd_dmaengine_pcm_prepare_slave_config; if (prepare_slave_config) { ret = prepare_slave_config(substream, params, &slave_config); diff --git a/sound/soc/soc-ops.c b/sound/soc/soc-ops.c index 10f48827bb0e0fee2a6a7d7565d97114f4f06963..2bc9fa6a34b8faf645ef36dcce95d1a38778671c 100644 --- a/sound/soc/soc-ops.c +++ b/sound/soc/soc-ops.c @@ -308,7 +308,7 @@ int snd_soc_put_volsw(struct snd_kcontrol *kcontrol, unsigned int sign_bit = mc->sign_bit; unsigned int mask = (1 << fls(max)) - 1; unsigned int invert = mc->invert; - int err; + int err, ret; bool type_2r = false; unsigned int val2 = 0; unsigned int val, val_mask; @@ -316,13 +316,27 @@ int snd_soc_put_volsw(struct snd_kcontrol *kcontrol, if (sign_bit) mask = BIT(sign_bit + 1) - 1; - val = ((ucontrol->value.integer.value[0] + min) & mask); + val = ucontrol->value.integer.value[0]; + if (mc->platform_max && ((int)val + min) > mc->platform_max) + return -EINVAL; + if (val > max - min) + return -EINVAL; + if (val < 0) + return -EINVAL; + val = (val + min) & mask; if (invert) val = max - val; val_mask = mask << shift; val = val << shift; if (snd_soc_volsw_is_stereo(mc)) { - val2 = ((ucontrol->value.integer.value[1] + min) & mask); + val2 = ucontrol->value.integer.value[1]; + if (mc->platform_max && ((int)val2 + min) > mc->platform_max) + return -EINVAL; + if (val2 > max - min) + return -EINVAL; + if (val2 < 0) + return -EINVAL; + val2 = (val2 + min) & mask; if (invert) val2 = max - val2; if (reg == reg2) { @@ -336,12 +350,18 @@ int snd_soc_put_volsw(struct snd_kcontrol *kcontrol, err = snd_soc_component_update_bits(component, reg, val_mask, val); if (err < 0) return err; + ret = err; - if (type_2r) + if (type_2r) { err = snd_soc_component_update_bits(component, reg2, val_mask, - val2); + val2); + /* Don't discard any error code or drop change flag */ + if (ret == 0 || err < 0) { + ret = err; + } + } - return err; + return ret; } EXPORT_SYMBOL_GPL(snd_soc_put_volsw); @@ -409,8 +429,15 @@ int snd_soc_put_volsw_sx(struct snd_kcontrol *kcontrol, int err = 0; unsigned int val, val_mask, val2 = 0; + val = ucontrol->value.integer.value[0]; + if (mc->platform_max && val > mc->platform_max) + return -EINVAL; + if (val > max - min) + return -EINVAL; + if (val < 0) + return -EINVAL; val_mask = mask << shift; - val = (ucontrol->value.integer.value[0] + min) & mask; + val = (val + min) & mask; val = val << shift; err = snd_soc_component_update_bits(component, reg, val_mask, val); @@ -483,7 +510,7 @@ int snd_soc_put_volsw_range(struct snd_kcontrol *kcontrol, unsigned int mask = (1 << fls(max)) - 1; unsigned int invert = mc->invert; unsigned int val, val_mask; - int ret; + int err, ret; if (invert) val = (max - ucontrol->value.integer.value[0]) & mask; @@ -492,9 +519,10 @@ int snd_soc_put_volsw_range(struct snd_kcontrol *kcontrol, val_mask = mask << shift; val = val << shift; - ret = snd_soc_component_update_bits(component, reg, val_mask, val); - if (ret < 0) - return ret; + err = snd_soc_component_update_bits(component, reg, val_mask, val); + if (err < 0) + return err; + ret = err; if (snd_soc_volsw_is_stereo(mc)) { if (invert) @@ -504,8 +532,12 @@ int snd_soc_put_volsw_range(struct snd_kcontrol *kcontrol, val_mask = mask << shift; val = val << shift; - ret = snd_soc_component_update_bits(component, rreg, val_mask, + err = snd_soc_component_update_bits(component, rreg, val_mask, val); + /* Don't discard any error code or drop change flag */ + if (ret == 0 || err < 0) { + ret = err; + } } return ret; @@ -859,6 +891,8 @@ int snd_soc_put_xr_sx(struct snd_kcontrol *kcontrol, unsigned int i, regval, regmask; int err; + if (val < mc->min || val > mc->max) + return -EINVAL; if (invert) val = max - val; val &= mask; diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c index 4d24ac255d2532a64def67a2b04a9011a05ef087..23a5f9a52da0fddf8ea0b95d3910811893deee70 100644 --- a/sound/soc/soc-topology.c +++ b/sound/soc/soc-topology.c @@ -578,7 +578,8 @@ static int soc_tplg_kcontrol_bind_io(struct snd_soc_tplg_ctl_hdr *hdr, if (le32_to_cpu(hdr->ops.info) == SND_SOC_TPLG_CTL_BYTES && k->iface & SNDRV_CTL_ELEM_IFACE_MIXER - && k->access & SNDRV_CTL_ELEM_ACCESS_TLV_READWRITE + && (k->access & SNDRV_CTL_ELEM_ACCESS_TLV_READ + || k->access & SNDRV_CTL_ELEM_ACCESS_TLV_WRITE) && k->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) { struct soc_bytes_ext *sbe; struct snd_soc_tplg_bytes_control *be; diff --git a/sound/soc/sof/imx/imx8m.c b/sound/soc/sof/imx/imx8m.c index cb822d9537678d0e9e9972bd47375054cfdefaa0..6943c05273ae7b7068cc33c7682b3d0860598ec2 100644 --- a/sound/soc/sof/imx/imx8m.c +++ b/sound/soc/sof/imx/imx8m.c @@ -191,6 +191,7 @@ static int imx8m_probe(struct snd_sof_dev *sdev) } ret = of_address_to_resource(res_node, 0, &res); + of_node_put(res_node); if (ret) { dev_err(&pdev->dev, "failed to get reserved region address\n"); goto exit_pdev_unregister; diff --git a/sound/soc/sof/intel/hda-loader.c b/sound/soc/sof/intel/hda-loader.c index 2707a16c6a4d3db863fe6d322f705f468fdde48d..347636a80b48765109274604b6d9e775775a7162 100644 --- a/sound/soc/sof/intel/hda-loader.c +++ b/sound/soc/sof/intel/hda-loader.c @@ -47,7 +47,7 @@ static struct hdac_ext_stream *cl_stream_prepare(struct snd_sof_dev *sdev, unsig ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV_SG, &pci->dev, size, dmab); if (ret < 0) { dev_err(sdev->dev, "error: memory alloc failed: %x\n", ret); - goto error; + goto out_put; } hstream->period_bytes = 0;/* initialize period_bytes */ @@ -58,22 +58,23 @@ static struct hdac_ext_stream *cl_stream_prepare(struct snd_sof_dev *sdev, unsig ret = hda_dsp_iccmax_stream_hw_params(sdev, dsp_stream, dmab, NULL); if (ret < 0) { dev_err(sdev->dev, "error: iccmax stream prepare failed: %x\n", ret); - goto error; + goto out_free; } } else { ret = hda_dsp_stream_hw_params(sdev, dsp_stream, dmab, NULL); if (ret < 0) { dev_err(sdev->dev, "error: hdac prepare failed: %x\n", ret); - goto error; + goto out_free; } hda_dsp_stream_spib_config(sdev, dsp_stream, HDA_DSP_SPIB_ENABLE, size); } return dsp_stream; -error: - hda_dsp_stream_put(sdev, direction, hstream->stream_tag); +out_free: snd_dma_free_pages(dmab); +out_put: + hda_dsp_stream_put(sdev, direction, hstream->stream_tag); return ERR_PTR(ret); } diff --git a/sound/soc/sti/uniperif_player.c b/sound/soc/sti/uniperif_player.c index 2ed92c990b97c1c0d663b9a4c9b023507db580f2..dd9013c4766491af0980e74020c4dcfb4d4d17a2 100644 --- a/sound/soc/sti/uniperif_player.c +++ b/sound/soc/sti/uniperif_player.c @@ -91,7 +91,7 @@ static irqreturn_t uni_player_irq_handler(int irq, void *dev_id) SET_UNIPERIF_ITM_BCLR_FIFO_ERROR(player); /* Stop the player */ - snd_pcm_stop_xrun(player->substream); + snd_pcm_stop(player->substream, SNDRV_PCM_STATE_XRUN); } ret = IRQ_HANDLED; @@ -105,7 +105,7 @@ static irqreturn_t uni_player_irq_handler(int irq, void *dev_id) SET_UNIPERIF_ITM_BCLR_DMA_ERROR(player); /* Stop the player */ - snd_pcm_stop_xrun(player->substream); + snd_pcm_stop(player->substream, SNDRV_PCM_STATE_XRUN); ret = IRQ_HANDLED; } @@ -138,7 +138,7 @@ static irqreturn_t uni_player_irq_handler(int irq, void *dev_id) dev_err(player->dev, "Underflow recovery failed\n"); /* Stop the player */ - snd_pcm_stop_xrun(player->substream); + snd_pcm_stop(player->substream, SNDRV_PCM_STATE_XRUN); ret = IRQ_HANDLED; } diff --git a/sound/soc/sti/uniperif_reader.c b/sound/soc/sti/uniperif_reader.c index 136059331211d2c37da46dff52e332534f95bb39..065c5f0d1f5f00c65715ccde98360816ddbf67a9 100644 --- a/sound/soc/sti/uniperif_reader.c +++ b/sound/soc/sti/uniperif_reader.c @@ -65,7 +65,7 @@ static irqreturn_t uni_reader_irq_handler(int irq, void *dev_id) if (unlikely(status & UNIPERIF_ITS_FIFO_ERROR_MASK(reader))) { dev_err(reader->dev, "FIFO error detected\n"); - snd_pcm_stop_xrun(reader->substream); + snd_pcm_stop(reader->substream, SNDRV_PCM_STATE_XRUN); ret = IRQ_HANDLED; } diff --git a/sound/soc/ti/davinci-i2s.c b/sound/soc/ti/davinci-i2s.c index dd34504c09ba8178ab5fff4882a3735df46181f6..4895bcee1f5578c24bf5da64505317f74b536d3f 100644 --- a/sound/soc/ti/davinci-i2s.c +++ b/sound/soc/ti/davinci-i2s.c @@ -708,7 +708,9 @@ static int davinci_i2s_probe(struct platform_device *pdev) dev->clk = clk_get(&pdev->dev, NULL); if (IS_ERR(dev->clk)) return -ENODEV; - clk_enable(dev->clk); + ret = clk_enable(dev->clk); + if (ret) + goto err_put_clk; dev->dev = &pdev->dev; dev_set_drvdata(&pdev->dev, dev); @@ -730,6 +732,7 @@ static int davinci_i2s_probe(struct platform_device *pdev) snd_soc_unregister_component(&pdev->dev); err_release_clk: clk_disable(dev->clk); +err_put_clk: clk_put(dev->clk); return ret; } diff --git a/sound/soc/uniphier/Kconfig b/sound/soc/uniphier/Kconfig index aa3592ee1358b06466dd9a05017142681b9669cc..ddfa6424c656bde0df43e24f80d7739c6fb6c94e 100644 --- a/sound/soc/uniphier/Kconfig +++ b/sound/soc/uniphier/Kconfig @@ -23,7 +23,6 @@ config SND_SOC_UNIPHIER_LD11 tristate "UniPhier LD11/LD20 Device Driver" depends on SND_SOC_UNIPHIER select SND_SOC_UNIPHIER_AIO - select SND_SOC_UNIPHIER_AIO_DMA help This adds ASoC driver for Socionext UniPhier LD11/LD20 input and output that can be used with other codecs. @@ -34,7 +33,6 @@ config SND_SOC_UNIPHIER_PXS2 tristate "UniPhier PXs2 Device Driver" depends on SND_SOC_UNIPHIER select SND_SOC_UNIPHIER_AIO - select SND_SOC_UNIPHIER_AIO_DMA help This adds ASoC driver for Socionext UniPhier PXs2 input and output that can be used with other codecs. diff --git a/sound/soc/xilinx/xlnx_formatter_pcm.c b/sound/soc/xilinx/xlnx_formatter_pcm.c index 91afea9d5de6787906ac5e817ba3406488fa3f58..5c4158069a5a893ebf7cfec6d7a6249c353c68df 100644 --- a/sound/soc/xilinx/xlnx_formatter_pcm.c +++ b/sound/soc/xilinx/xlnx_formatter_pcm.c @@ -37,6 +37,7 @@ #define XLNX_AUD_XFER_COUNT 0x28 #define XLNX_AUD_CH_STS_START 0x2C #define XLNX_BYTES_PER_CH 0x44 +#define XLNX_AUD_ALIGN_BYTES 64 #define AUD_STS_IOC_IRQ_MASK BIT(31) #define AUD_STS_CH_STS_MASK BIT(29) @@ -83,6 +84,7 @@ struct xlnx_pcm_drv_data { struct snd_pcm_substream *play_stream; struct snd_pcm_substream *capture_stream; struct clk *axi_clk; + unsigned int sysclk; }; /* @@ -313,6 +315,15 @@ static irqreturn_t xlnx_s2mm_irq_handler(int irq, void *arg) return IRQ_NONE; } +static int xlnx_formatter_set_sysclk(struct snd_soc_component *component, + int clk_id, int source, unsigned int freq, int dir) +{ + struct xlnx_pcm_drv_data *adata = dev_get_drvdata(component->dev); + + adata->sysclk = freq; + return 0; +} + static int xlnx_formatter_pcm_open(struct snd_soc_component *component, struct snd_pcm_substream *substream) { @@ -368,12 +379,32 @@ static int xlnx_formatter_pcm_open(struct snd_soc_component *component, snd_soc_set_runtime_hwparams(substream, &xlnx_pcm_hardware); runtime->private_data = stream_data; - /* Resize the period size divisible by 64 */ + /* Resize the period bytes as divisible by 64 */ + err = snd_pcm_hw_constraint_step(runtime, 0, + SNDRV_PCM_HW_PARAM_PERIOD_BYTES, + XLNX_AUD_ALIGN_BYTES); + if (err) { + dev_err(component->dev, + "Unable to set constraint on period bytes\n"); + return err; + } + + /* Resize the buffer bytes as divisible by 64 */ err = snd_pcm_hw_constraint_step(runtime, 0, - SNDRV_PCM_HW_PARAM_PERIOD_BYTES, 64); + SNDRV_PCM_HW_PARAM_BUFFER_BYTES, + XLNX_AUD_ALIGN_BYTES); if (err) { dev_err(component->dev, - "unable to set constraint on period bytes\n"); + "Unable to set constraint on buffer bytes\n"); + return err; + } + + /* Set periods as integer multiple */ + err = snd_pcm_hw_constraint_integer(runtime, + SNDRV_PCM_HW_PARAM_PERIODS); + if (err < 0) { + dev_err(component->dev, + "Unable to set constraint on periods to be integer\n"); return err; } @@ -429,11 +460,25 @@ static int xlnx_formatter_pcm_hw_params(struct snd_soc_component *component, u64 size; struct snd_pcm_runtime *runtime = substream->runtime; struct xlnx_pcm_stream_param *stream_data = runtime->private_data; + struct xlnx_pcm_drv_data *adata = dev_get_drvdata(component->dev); active_ch = params_channels(params); if (active_ch > stream_data->ch_limit) return -EINVAL; + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK && + adata->sysclk) { + unsigned int mclk_fs = adata->sysclk / params_rate(params); + + if (adata->sysclk % params_rate(params) != 0) { + dev_warn(component->dev, "sysclk %u not divisible by rate %u\n", + adata->sysclk, params_rate(params)); + return -EINVAL; + } + + writel(mclk_fs, stream_data->mmio + XLNX_AUD_FS_MULTIPLIER); + } + if (substream->stream == SNDRV_PCM_STREAM_CAPTURE && stream_data->xfer_mode == AES_TO_PCM) { val = readl(stream_data->mmio + XLNX_AUD_STS); @@ -531,6 +576,7 @@ static int xlnx_formatter_pcm_new(struct snd_soc_component *component, static const struct snd_soc_component_driver xlnx_asoc_component = { .name = DRV_NAME, + .set_sysclk = xlnx_formatter_set_sysclk, .open = xlnx_formatter_pcm_open, .close = xlnx_formatter_pcm_close, .hw_params = xlnx_formatter_pcm_hw_params, diff --git a/sound/spi/at73c213.c b/sound/spi/at73c213.c index 76c0e37a838cf430f1fdec54b07368c11a05c010..8a2da6b1012eb97a004f0ac20d244bbc8f8cc390 100644 --- a/sound/spi/at73c213.c +++ b/sound/spi/at73c213.c @@ -218,7 +218,9 @@ static int snd_at73c213_pcm_open(struct snd_pcm_substream *substream) runtime->hw = snd_at73c213_playback_hw; chip->substream = substream; - clk_enable(chip->ssc->clk); + err = clk_enable(chip->ssc->clk); + if (err) + return err; return 0; } @@ -776,7 +778,9 @@ static int snd_at73c213_chip_init(struct snd_at73c213 *chip) goto out; /* Enable DAC master clock. */ - clk_enable(chip->board->dac_clk); + retval = clk_enable(chip->board->dac_clk); + if (retval) + goto out; /* Initialize at73c213 on SPI bus. */ retval = snd_at73c213_write_reg(chip, DAC_RST, 0x04); @@ -889,7 +893,9 @@ static int snd_at73c213_dev_init(struct snd_card *card, chip->card = card; chip->irq = -1; - clk_enable(chip->ssc->clk); + retval = clk_enable(chip->ssc->clk); + if (retval) + return retval; retval = request_irq(irq, snd_at73c213_interrupt, 0, "at73c213", chip); if (retval) { @@ -1008,7 +1014,9 @@ static int snd_at73c213_remove(struct spi_device *spi) int retval; /* Stop playback. */ - clk_enable(chip->ssc->clk); + retval = clk_enable(chip->ssc->clk); + if (retval) + goto out; ssc_writel(chip->ssc->regs, CR, SSC_BIT(CR_TXDIS)); clk_disable(chip->ssc->clk); @@ -1088,9 +1096,16 @@ static int snd_at73c213_resume(struct device *dev) { struct snd_card *card = dev_get_drvdata(dev); struct snd_at73c213 *chip = card->private_data; + int retval; - clk_enable(chip->board->dac_clk); - clk_enable(chip->ssc->clk); + retval = clk_enable(chip->board->dac_clk); + if (retval) + return retval; + retval = clk_enable(chip->ssc->clk); + if (retval) { + clk_disable(chip->board->dac_clk); + return retval; + } ssc_writel(chip->ssc->regs, CR, SSC_BIT(CR_TXEN)); return 0; diff --git a/sound/usb/format.c b/sound/usb/format.c index 4693384db069509a994c1b16909198ffd9109b74..e8a63ea2189d1728c20b7439ddbdbc9506b06108 100644 --- a/sound/usb/format.c +++ b/sound/usb/format.c @@ -365,7 +365,7 @@ static int parse_uac2_sample_rate_range(struct snd_usb_audio *chip, for (rate = min; rate <= max; rate += res) { /* Filter out invalid rates on Presonus Studio 1810c */ - if (chip->usb_id == USB_ID(0x0194f, 0x010c) && + if (chip->usb_id == USB_ID(0x194f, 0x010c) && !s1810c_valid_sample_rate(fp, rate)) goto skip_rate; diff --git a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c index 8f6823df944fffe5af6e91b83a6fb799d0445a2f..81ace832d7e42da21935ef4e2c4c06dbcf17eeb7 100644 --- a/sound/usb/mixer_maps.c +++ b/sound/usb/mixer_maps.c @@ -542,6 +542,16 @@ static const struct usbmix_ctl_map usbmix_ctl_maps[] = { .id = USB_ID(0x25c4, 0x0003), .map = scms_usb3318_map, }, + { + /* Corsair Virtuoso SE Latest (wired mode) */ + .id = USB_ID(0x1b1c, 0x0a3f), + .map = corsair_virtuoso_map, + }, + { + /* Corsair Virtuoso SE Latest (wireless mode) */ + .id = USB_ID(0x1b1c, 0x0a40), + .map = corsair_virtuoso_map, + }, { .id = USB_ID(0x30be, 0x0101), /* Schiit Hel */ .ignore_ctl_error = 1, diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c index 8297117f4766e79b0d84431dad3aef2582efe35f..99f2203bf51f140c3ddfd13ab9277a48edea1341 100644 --- a/sound/usb/mixer_quirks.c +++ b/sound/usb/mixer_quirks.c @@ -3033,7 +3033,7 @@ int snd_usb_mixer_apply_create_quirk(struct usb_mixer_interface *mixer) err = snd_rme_controls_create(mixer); break; - case USB_ID(0x0194f, 0x010c): /* Presonus Studio 1810c */ + case USB_ID(0x194f, 0x010c): /* Presonus Studio 1810c */ err = snd_sc1810_init_mixer(mixer); break; case USB_ID(0x2a39, 0x3fb0): /* RME Babyface Pro FS */ @@ -3135,9 +3135,10 @@ void snd_usb_mixer_fu_apply_quirk(struct usb_mixer_interface *mixer, if (unitid == 7 && cval->control == UAC_FU_VOLUME) snd_dragonfly_quirk_db_scale(mixer, cval, kctl); break; - /* lowest playback value is muted on C-Media devices */ - case USB_ID(0x0d8c, 0x000c): - case USB_ID(0x0d8c, 0x0014): + /* lowest playback value is muted on some devices */ + case USB_ID(0x0d8c, 0x000c): /* C-Media */ + case USB_ID(0x0d8c, 0x0014): /* C-Media */ + case USB_ID(0x19f7, 0x0003): /* RODE NT-USB */ if (strstr(kctl->id.name, "Playback")) cval->min_mute = 1; break; diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h index 949c6d129f2a952494c443d0c43a2db61274fd96..aabd3a10ec5b46981f0edd2e3996e79779bea655 100644 --- a/sound/usb/quirks-table.h +++ b/sound/usb/quirks-table.h @@ -84,7 +84,7 @@ * combination. */ { - USB_DEVICE(0x041e, 0x4095), + USB_AUDIO_DEVICE(0x041e, 0x4095), .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) { .ifnum = QUIRK_ANY_INTERFACE, .type = QUIRK_COMPOSITE, diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c index 75d4d317b34b6f238af8e72573ef5e4c03078d16..6333a2ecb848aa4600b1c89d67a3c9ab545fb650 100644 --- a/sound/usb/quirks.c +++ b/sound/usb/quirks.c @@ -1310,7 +1310,7 @@ int snd_usb_apply_interface_quirk(struct snd_usb_audio *chip, if (chip->usb_id == USB_ID(0x0763, 0x2012)) return fasttrackpro_skip_setting_quirk(chip, iface, altno); /* presonus studio 1810c: skip altsets incompatible with device_setup */ - if (chip->usb_id == USB_ID(0x0194f, 0x010c)) + if (chip->usb_id == USB_ID(0x194f, 0x010c)) return s1810c_skip_setting_quirk(chip, iface, altno); diff --git a/sound/x86/intel_hdmi_audio.c b/sound/x86/intel_hdmi_audio.c index 9f9fcd2749f224d8ec09e8fae677991f60cc0c9f..dbaa43ffbbd2dca3001a9aec772a6e4ade9a369e 100644 --- a/sound/x86/intel_hdmi_audio.c +++ b/sound/x86/intel_hdmi_audio.c @@ -1276,7 +1276,7 @@ static int had_pcm_mmap(struct snd_pcm_substream *substream, { vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); return remap_pfn_range(vma, vma->vm_start, - substream->dma_buffer.addr >> PAGE_SHIFT, + substream->runtime->dma_addr >> PAGE_SHIFT, vma->vm_end - vma->vm_start, vma->vm_page_prot); } diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h index dad350d42ecfbf3063c64138375e81591729b092..a7b5c5efcf3b0b5fe5c1b186c4795748662b891c 100644 --- a/tools/arch/x86/include/asm/cpufeatures.h +++ b/tools/arch/x86/include/asm/cpufeatures.h @@ -204,7 +204,7 @@ #define X86_FEATURE_SME ( 7*32+10) /* AMD Secure Memory Encryption */ #define X86_FEATURE_PTI ( 7*32+11) /* Kernel Page Table Isolation enabled */ #define X86_FEATURE_RETPOLINE ( 7*32+12) /* "" Generic Retpoline mitigation for Spectre variant 2 */ -#define X86_FEATURE_RETPOLINE_AMD ( 7*32+13) /* "" AMD Retpoline mitigation for Spectre variant 2 */ +#define X86_FEATURE_RETPOLINE_LFENCE ( 7*32+13) /* "" Use LFENCEs for Spectre variant 2 */ #define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */ #define X86_FEATURE_CDP_L2 ( 7*32+15) /* Code and Data Prioritization L2 */ #define X86_FEATURE_MSR_SPEC_CTRL ( 7*32+16) /* "" MSR SPEC_CTRL is implemented */ @@ -417,5 +417,6 @@ #define X86_BUG_TAA X86_BUG(22) /* CPU is affected by TSX Async Abort(TAA) */ #define X86_BUG_ITLB_MULTIHIT X86_BUG(23) /* CPU may incur MCE during certain page attribute changes */ #define X86_BUG_SRBDS X86_BUG(24) /* CPU may leak RNG bits if not mitigated */ +#define X86_BUG_MMIO_STALE_DATA X86_BUG(25) /* CPU is affected by Processor MMIO Stale Data vulnerabilities */ #endif /* _ASM_X86_CPUFEATURES_H */ diff --git a/tools/arch/x86/include/asm/msr-index.h b/tools/arch/x86/include/asm/msr-index.h index c36a083c8ec0acf3d3629edbf6c095dbdc5242ec..7b9259868243a95eadd4bc6d5cfec58ebea3a900 100644 --- a/tools/arch/x86/include/asm/msr-index.h +++ b/tools/arch/x86/include/asm/msr-index.h @@ -114,6 +114,30 @@ * Not susceptible to * TSX Async Abort (TAA) vulnerabilities. */ +#define ARCH_CAP_SBDR_SSDP_NO BIT(13) /* + * Not susceptible to SBDR and SSDP + * variants of Processor MMIO stale data + * vulnerabilities. + */ +#define ARCH_CAP_FBSDP_NO BIT(14) /* + * Not susceptible to FBSDP variant of + * Processor MMIO stale data + * vulnerabilities. + */ +#define ARCH_CAP_PSDP_NO BIT(15) /* + * Not susceptible to PSDP variant of + * Processor MMIO stale data + * vulnerabilities. + */ +#define ARCH_CAP_FB_CLEAR BIT(17) /* + * VERW clears CPU fill buffer + * even on MDS_NO CPUs. + */ +#define ARCH_CAP_FB_CLEAR_CTRL BIT(18) /* + * MSR_IA32_MCU_OPT_CTRL[FB_CLEAR_DIS] + * bit available to control VERW + * behavior. + */ #define MSR_IA32_FLUSH_CMD 0x0000010b #define L1D_FLUSH BIT(0) /* @@ -131,6 +155,7 @@ /* SRBDS support */ #define MSR_IA32_MCU_OPT_CTRL 0x00000123 #define RNGDS_MITG_DIS BIT(0) +#define FB_CLEAR_DIS BIT(3) /* CPU Fill buffer clear disable */ #define MSR_IA32_SYSENTER_CS 0x00000174 #define MSR_IA32_SYSENTER_ESP 0x00000175 diff --git a/tools/bpf/bpftool/Documentation/Makefile b/tools/bpf/bpftool/Documentation/Makefile index f33cb02de95cf1da1e9a783208d65314ad523ac7..3601b1d1974caac3373459398baf1283f34f5d0a 100644 --- a/tools/bpf/bpftool/Documentation/Makefile +++ b/tools/bpf/bpftool/Documentation/Makefile @@ -1,6 +1,5 @@ # SPDX-License-Identifier: GPL-2.0-only include ../../../scripts/Makefile.include -include ../../../scripts/utilities.mak INSTALL ?= install RM ?= rm -f diff --git a/tools/bpf/bpftool/Makefile b/tools/bpf/bpftool/Makefile index d566bced135eebf34e8b92faab699cfa2f34889e..802c4a11340055dc8157cbb250cfa20d397f3b32 100644 --- a/tools/bpf/bpftool/Makefile +++ b/tools/bpf/bpftool/Makefile @@ -1,6 +1,5 @@ # SPDX-License-Identifier: GPL-2.0-only include ../../scripts/Makefile.include -include ../../scripts/utilities.mak ifeq ($(srctree),) srctree := $(patsubst %/,%,$(dir $(CURDIR))) diff --git a/tools/bpf/bpftool/main.c b/tools/bpf/bpftool/main.c index c58a135dc355e68e82de8c00363acf3c95f3757f..1854d6b978604013b2decac6b983d7468dfe345c 100644 --- a/tools/bpf/bpftool/main.c +++ b/tools/bpf/bpftool/main.c @@ -396,6 +396,8 @@ int main(int argc, char **argv) }; int opt, ret; + setlinebuf(stdout); + last_do_help = do_help; pretty_output = false; json_output = false; diff --git a/tools/bpf/resolve_btfids/Makefile b/tools/bpf/resolve_btfids/Makefile index bb9fa8de7e625a559f8edfa58272f082ec32c58a..af9f9d3534c96540feed44bafae1dcde25fb44cb 100644 --- a/tools/bpf/resolve_btfids/Makefile +++ b/tools/bpf/resolve_btfids/Makefile @@ -9,7 +9,11 @@ ifeq ($(V),1) msg = else Q = @ - msg = @printf ' %-8s %s%s\n' "$(1)" "$(notdir $(2))" "$(if $(3), $(3))"; + ifeq ($(silent),1) + msg = + else + msg = @printf ' %-8s %s%s\n' "$(1)" "$(notdir $(2))" "$(if $(3), $(3))"; + endif MAKEFLAGS=--no-print-directory endif diff --git a/tools/build/feature/Makefile b/tools/build/feature/Makefile index d45ec15ac3836cfed33ce6a40401a831c575088a..db3417ff8c5fedb07fcb49432e1d0641f4f1fc50 100644 --- a/tools/build/feature/Makefile +++ b/tools/build/feature/Makefile @@ -214,9 +214,16 @@ strip-libs = $(filter-out -l%,$(1)) PERL_EMBED_LDOPTS = $(shell perl -MExtUtils::Embed -e ldopts 2>/dev/null) PERL_EMBED_LDFLAGS = $(call strip-libs,$(PERL_EMBED_LDOPTS)) PERL_EMBED_LIBADD = $(call grep-libs,$(PERL_EMBED_LDOPTS)) -PERL_EMBED_CCOPTS = `perl -MExtUtils::Embed -e ccopts 2>/dev/null` +PERL_EMBED_CCOPTS = $(shell perl -MExtUtils::Embed -e ccopts 2>/dev/null) FLAGS_PERL_EMBED=$(PERL_EMBED_CCOPTS) $(PERL_EMBED_LDOPTS) +ifeq ($(CC_NO_CLANG), 0) + PERL_EMBED_LDOPTS := $(filter-out -specs=%,$(PERL_EMBED_LDOPTS)) + PERL_EMBED_CCOPTS := $(filter-out -flto=auto -ffat-lto-objects, $(PERL_EMBED_CCOPTS)) + PERL_EMBED_CCOPTS := $(filter-out -specs=%,$(PERL_EMBED_CCOPTS)) + FLAGS_PERL_EMBED += -Wno-compound-token-split-by-macro +endif + $(OUTPUT)test-libperl.bin: $(BUILD) $(FLAGS_PERL_EMBED) diff --git a/tools/include/nolibc/nolibc.h b/tools/include/nolibc/nolibc.h index 2551e9b71167b3526bcf692bb29cc00658f502ea..b8cecb66d28b7f236a1afc3436f8ce315c27500b 100644 --- a/tools/include/nolibc/nolibc.h +++ b/tools/include/nolibc/nolibc.h @@ -422,16 +422,22 @@ struct stat { }) /* startup code */ +/* + * x86-64 System V ABI mandates: + * 1) %rsp must be 16-byte aligned right before the function call. + * 2) The deepest stack frame should be zero (the %rbp). + * + */ asm(".section .text\n" ".global _start\n" "_start:\n" "pop %rdi\n" // argc (first arg, %rdi) "mov %rsp, %rsi\n" // argv[] (second arg, %rsi) "lea 8(%rsi,%rdi,8),%rdx\n" // then a NULL then envp (third arg, %rdx) - "and $-16, %rsp\n" // x86 ABI : esp must be 16-byte aligned when - "sub $8, %rsp\n" // entering the callee + "xor %ebp, %ebp\n" // zero the stack frame + "and $-16, %rsp\n" // x86 ABI : esp must be 16-byte aligned before call "call main\n" // main() returns the status code, we'll exit with it. - "movzb %al, %rdi\n" // retrieve exit code from 8 lower bits + "mov %eax, %edi\n" // retrieve exit code (32 bit) "mov $60, %rax\n" // NR_exit == 60 "syscall\n" // really exit "hlt\n" // ensure it does not return @@ -600,20 +606,28 @@ struct sys_stat_struct { }) /* startup code */ +/* + * i386 System V ABI mandates: + * 1) last pushed argument must be 16-byte aligned. + * 2) The deepest stack frame should be set to zero + * + */ asm(".section .text\n" ".global _start\n" "_start:\n" "pop %eax\n" // argc (first arg, %eax) "mov %esp, %ebx\n" // argv[] (second arg, %ebx) "lea 4(%ebx,%eax,4),%ecx\n" // then a NULL then envp (third arg, %ecx) - "and $-16, %esp\n" // x86 ABI : esp must be 16-byte aligned when + "xor %ebp, %ebp\n" // zero the stack frame + "and $-16, %esp\n" // x86 ABI : esp must be 16-byte aligned before + "sub $4, %esp\n" // the call instruction (args are aligned) "push %ecx\n" // push all registers on the stack so that we "push %ebx\n" // support both regparm and plain stack modes "push %eax\n" "call main\n" // main() returns the status code in %eax - "movzbl %al, %ebx\n" // retrieve exit code from lower 8 bits - "movl $1, %eax\n" // NR_exit == 1 - "int $0x80\n" // exit now + "mov %eax, %ebx\n" // retrieve exit code (32-bit int) + "movl $1, %eax\n" // NR_exit == 1 + "int $0x80\n" // exit now "hlt\n" // ensure it does not ""); @@ -797,7 +811,6 @@ asm(".section .text\n" "and %r3, %r1, $-8\n" // AAPCS : sp must be 8-byte aligned in the "mov %sp, %r3\n" // callee, an bl doesn't push (lr=pc) "bl main\n" // main() returns the status code, we'll exit with it. - "and %r0, %r0, $0xff\n" // limit exit code to 8 bits "movs r7, $1\n" // NR_exit == 1 "svc $0x00\n" ""); @@ -994,7 +1007,6 @@ asm(".section .text\n" "add x2, x2, x1\n" // + argv "and sp, x1, -16\n" // sp must be 16-byte aligned in the callee "bl main\n" // main() returns the status code, we'll exit with it. - "and x0, x0, 0xff\n" // limit exit code to 8 bits "mov x8, 93\n" // NR_exit == 93 "svc #0\n" ""); @@ -1199,7 +1211,7 @@ asm(".section .text\n" "addiu $sp,$sp,-16\n" // the callee expects to save a0..a3 there! "jal main\n" // main() returns the status code, we'll exit with it. "nop\n" // delayed slot - "and $a0, $v0, 0xff\n" // limit exit code to 8 bits + "move $a0, $v0\n" // retrieve 32-bit exit code from v0 "li $v0, 4001\n" // NR_exit == 4001 "syscall\n" ".end __start\n" @@ -1397,7 +1409,6 @@ asm(".section .text\n" "add a2,a2,a1\n" // + argv "andi sp,a1,-16\n" // sp must be 16-byte aligned "call main\n" // main() returns the status code, we'll exit with it. - "andi a0, a0, 0xff\n" // limit exit code to 8 bits "li a7, 93\n" // NR_exit == 93 "ecall\n" ""); diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 00afbbc130eea02e65ede4da81e31de5dceb9a70..a44cb51558254c7bc1dcd9f4c55b17f88c177e40 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -1490,8 +1490,8 @@ union bpf_attr { * Return * The return value depends on the result of the test, and can be: * - * * 0, if current task belongs to the cgroup2. - * * 1, if current task does not belong to the cgroup2. + * * 1, if current task belongs to the cgroup2. + * * 0, if current task does not belong to the cgroup2. * * A negative error code, if an error occurred. * * long bpf_skb_change_tail(struct sk_buff *skb, u32 len, u64 flags) @@ -3742,6 +3742,19 @@ union bpf_attr { * Return * The helper returns **TC_ACT_REDIRECT** on success or * **TC_ACT_SHOT** on error. + * + * u64 bpf_get_sockops_uid_gid(void *sockops) + * Description + * Get sock's uid and gid + * Return + * A 64-bit integer containing the current GID and UID, and + * created as such: *current_gid* **<< 32 \|** *current_uid*. + * + * int bpf_sk_original_addr(void *bpf_socket, int optname, char *optval, int optlen) + * Description + * Get Ipv4 origdst or replysrc. Works with IPv4. + * Return + * 0 on success, or a negative error in case of failure. */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -3900,6 +3913,8 @@ union bpf_attr { FN(per_cpu_ptr), \ FN(this_cpu_ptr), \ FN(redirect_peer), \ + FN(get_sockops_uid_gid), \ + FN(sk_original_addr), \ /* */ /* integer value in 'imm' field of BPF_CALL instruction selects which helper diff --git a/tools/lib/bpf/Makefile b/tools/lib/bpf/Makefile index 154b75fc1373efab8c64de0775372e917c89e6f7..f2a353bba25f4ea56efddd2de3b691cdde0eab3b 100644 --- a/tools/lib/bpf/Makefile +++ b/tools/lib/bpf/Makefile @@ -147,7 +147,7 @@ GLOBAL_SYM_COUNT = $(shell readelf -s --wide $(BPF_IN_SHARED) | \ sort -u | wc -l) VERSIONED_SYM_COUNT = $(shell readelf --dyn-syms --wide $(OUTPUT)libbpf.so | \ sed 's/\[.*\]//' | \ - awk '/GLOBAL/ && /DEFAULT/ && !/UND/ {print $$NF}' | \ + awk '/GLOBAL/ && /DEFAULT/ && !/UND|ABS/ {print $$NF}' | \ grep -Eo '[^ ]+@LIBBPF_' | cut -d@ -f1 | sort -u | wc -l) CMD_TARGETS = $(LIB_TARGET) $(PC_FILE) @@ -216,7 +216,7 @@ check_abi: $(OUTPUT)libbpf.so $(VERSION_SCRIPT) sort -u > $(OUTPUT)libbpf_global_syms.tmp; \ readelf --dyn-syms --wide $(OUTPUT)libbpf.so | \ sed 's/\[.*\]//' | \ - awk '/GLOBAL/ && /DEFAULT/ && !/UND/ {print $$NF}'| \ + awk '/GLOBAL/ && /DEFAULT/ && !/UND|ABS/ {print $$NF}'| \ grep -Eo '[^ ]+@LIBBPF_' | cut -d@ -f1 | \ sort -u > $(OUTPUT)libbpf_versioned_syms.tmp; \ diff -u $(OUTPUT)libbpf_global_syms.tmp \ diff --git a/tools/lib/bpf/btf_dump.c b/tools/lib/bpf/btf_dump.c index 0911aea4cdbe5c68406b5bc9c15ec58c37058c94..bd22853be4a6b6c4ad710d5f870467235ab01571 100644 --- a/tools/lib/bpf/btf_dump.c +++ b/tools/lib/bpf/btf_dump.c @@ -1416,6 +1416,11 @@ static const char *btf_dump_resolve_name(struct btf_dump *d, __u32 id, if (s->name_resolved) return *cached_name ? *cached_name : orig_name; + if (btf_is_fwd(t) || (btf_is_enum(t) && btf_vlen(t) == 0)) { + s->name_resolved = 1; + return orig_name; + } + dup_cnt = btf_dump_name_dups(d, name_map, orig_name); if (dup_cnt > 1) { const size_t max_len = 256; diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index b337d6f29098bfda5a4f1732ce022d0413fa3f2e..c9f5eef6d3d80502f6ca23e4c6902a448f0c8925 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -8320,6 +8320,7 @@ static const struct bpf_sec_def section_defs[] = { .attach_fn = attach_tp), SEC_DEF("raw_tracepoint/", RAW_TRACEPOINT, .attach_fn = attach_raw_tp), + BPF_PROG_SEC("raw_tracepoint.w/", BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE), SEC_DEF("raw_tp/", RAW_TRACEPOINT, .attach_fn = attach_raw_tp), SEC_DEF("tp_btf/", TRACING, @@ -10923,6 +10924,9 @@ void bpf_object__detach_skeleton(struct bpf_object_skeleton *s) void bpf_object__destroy_skeleton(struct bpf_object_skeleton *s) { + if (!s) + return; + if (s->progs) bpf_object__detach_skeleton(s); if (s->obj) diff --git a/tools/lib/bpf/xsk.c b/tools/lib/bpf/xsk.c index 3028f932e10c072a60fd517086aa4fe1dabfe259..c4390ef98b1929e410275fbbd6d02072df57caf5 100644 --- a/tools/lib/bpf/xsk.c +++ b/tools/lib/bpf/xsk.c @@ -895,12 +895,23 @@ int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname, int xsk_umem__delete(struct xsk_umem *umem) { + struct xdp_mmap_offsets off; + int err; + if (!umem) return 0; if (umem->refcount) return -EBUSY; + err = xsk_get_mmap_offsets(umem->fd, &off); + if (!err && umem->fill_save && umem->comp_save) { + munmap(umem->fill_save->ring - off.fr.desc, + off.fr.desc + umem->config.fill_size * sizeof(__u64)); + munmap(umem->comp_save->ring - off.cr.desc, + off.cr.desc + umem->config.comp_size * sizeof(__u64)); + } + close(umem->fd); free(umem); diff --git a/tools/lib/subcmd/subcmd-util.h b/tools/lib/subcmd/subcmd-util.h index 794a375dad3601e26f870edfa75b3458f1341aa8..b2aec04fce8f674e61eddf6e4820b738f4d4c7e3 100644 --- a/tools/lib/subcmd/subcmd-util.h +++ b/tools/lib/subcmd/subcmd-util.h @@ -50,15 +50,8 @@ static NORETURN inline void die(const char *err, ...) static inline void *xrealloc(void *ptr, size_t size) { void *ret = realloc(ptr, size); - if (!ret && !size) - ret = realloc(ptr, 1); - if (!ret) { - ret = realloc(ptr, size); - if (!ret && !size) - ret = realloc(ptr, 1); - if (!ret) - die("Out of memory, realloc failed"); - } + if (!ret) + die("Out of memory, realloc failed"); return ret; } diff --git a/tools/perf/Documentation/perf-c2c.txt b/tools/perf/Documentation/perf-c2c.txt index c81d72e3eecf44dbffbba26ae15bdac8f2ed0ca9..da49c3d26316499181cd2eb0217ed435f1518ff4 100644 --- a/tools/perf/Documentation/perf-c2c.txt +++ b/tools/perf/Documentation/perf-c2c.txt @@ -109,7 +109,8 @@ REPORT OPTIONS -d:: --display:: - Switch to HITM type (rmt, lcl) to display and sort on. Total HITMs as default. + Switch to HITM type (rmt, lcl) or all load cache hit (all) to display + and sort on. Total HITMs as default. --stitch-lbr:: Show callgraph with stitched LBRs, which may have more complete @@ -174,12 +175,18 @@ For each cacheline in the 1) list we display following data: Cacheline - cacheline address (hex number) - Rmt/Lcl Hitm + Rmt/Lcl Hitm (For display with HITM types) - cacheline percentage of all Remote/Local HITM accesses - LLC Load Hitm - Total, LclHitm, RmtHitm + LLC Load Hitm - Total, LclHitm, RmtHitm (For display with HITM types) - count of Total/Local/Remote load HITMs + LD Hit Pct (For display 'all') + - cacheline percentage of all load hit accesses + + LD Hit Total (For display 'all') + - sum of all load hit accesses + Total records - sum of all cachelines accesses @@ -207,9 +214,12 @@ For each cacheline in the 1) list we display following data: For each offset in the 2) list we display following data: - HITM - Rmt, Lcl + HITM - Rmt, Lcl (For display with HITM types) - % of Remote/Local HITM accesses for given offset within cacheline + Load Refs - Hit, Miss (For display 'all') + - % of load accesses that hit/missed cache for given offset within cacheline + Store Refs - L1 Hit, L1 Miss - % of store accesses that hit/missed L1 for given offset within cacheline @@ -249,7 +259,8 @@ The 'Node' field displays nodes that accesses given cacheline offset. Its output comes in 3 flavors: - node IDs separated by ',' - node IDs with stats for each ID, in following format: - Node{cpus %hitms %stores} + Node{cpus %hitms %stores} (For display with HITM types) + Node{cpus %loads %stores} (For display with "all") - node IDs with list of affected CPUs in following format: Node{cpu list} diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config index 99266f1da1b2c57dc0297609610a6302ee36e07b..78700c7ec7df4df73820d6914f13cf194a2e0a87 100644 --- a/tools/perf/Makefile.config +++ b/tools/perf/Makefile.config @@ -261,6 +261,9 @@ ifdef PYTHON_CONFIG PYTHON_EMBED_LIBADD := $(call grep-libs,$(PYTHON_EMBED_LDOPTS)) -lutil PYTHON_EMBED_CCOPTS := $(shell $(PYTHON_CONFIG_SQ) --includes 2>/dev/null) FLAGS_PYTHON_EMBED := $(PYTHON_EMBED_CCOPTS) $(PYTHON_EMBED_LDOPTS) + ifeq ($(CC_NO_CLANG), 0) + PYTHON_EMBED_CCOPTS := $(filter-out -ffat-lto-objects, $(PYTHON_EMBED_CCOPTS)) + endif endif FEATURE_CHECK_CFLAGS-libpython := $(PYTHON_EMBED_CCOPTS) @@ -773,6 +776,9 @@ else LDFLAGS += $(PERL_EMBED_LDFLAGS) EXTLIBS += $(PERL_EMBED_LIBADD) CFLAGS += -DHAVE_LIBPERL_SUPPORT + ifeq ($(CC_NO_CLANG), 0) + CFLAGS += -Wno-compound-token-split-by-macro + endif $(call detected,CONFIG_LIBPERL) endif endif diff --git a/tools/perf/arch/arm64/util/arm-spe.c b/tools/perf/arch/arm64/util/arm-spe.c index f3aa096476065d30dd54b689f5b8c259fc17bdb7..3c0d919a73f451228bc40170a31895a9fba1796f 100644 --- a/tools/perf/arch/arm64/util/arm-spe.c +++ b/tools/perf/arch/arm64/util/arm-spe.c @@ -239,6 +239,12 @@ static int arm_spe_recording_options(struct auxtrace_record *itr, arm_spe_set_timestamp(itr, arm_spe_evsel); } + /* + * Set this only so that perf report knows that SPE generates memory info. It has no effect + * on the opening of the event or the SPE data produced. + */ + evsel__set_sample_bit(arm_spe_evsel, DATA_SRC); + /* Add dummy event to keep tracking */ err = parse_events(evlist, "dummy:u", NULL); if (err) diff --git a/tools/perf/builtin-c2c.c b/tools/perf/builtin-c2c.c index d247f9878948080432e6fb94ed73521c050fb10c..a1d66b0bda84dae24e3de3d538901c0a2ce4634b 100644 --- a/tools/perf/builtin-c2c.c +++ b/tools/perf/builtin-c2c.c @@ -113,13 +113,15 @@ enum { DISPLAY_LCL, DISPLAY_RMT, DISPLAY_TOT, + DISPLAY_ALL, DISPLAY_MAX, }; static const char *display_str[DISPLAY_MAX] = { - [DISPLAY_LCL] = "Local", - [DISPLAY_RMT] = "Remote", - [DISPLAY_TOT] = "Total", + [DISPLAY_LCL] = "Local HITMs", + [DISPLAY_RMT] = "Remote HITMs", + [DISPLAY_TOT] = "Total HITMs", + [DISPLAY_ALL] = "All Load Access", }; static const struct option c2c_options[] = { @@ -615,6 +617,83 @@ tot_hitm_cmp(struct perf_hpp_fmt *fmt __maybe_unused, return tot_hitm_left - tot_hitm_right; } +#define TOT_LD_HIT(stats) \ + ((stats)->ld_fbhit + \ + (stats)->ld_l1hit + \ + (stats)->ld_l2hit + \ + (stats)->ld_llchit + \ + (stats)->lcl_hitm + \ + (stats)->rmt_hitm + \ + (stats)->rmt_hit) + +#define TOT_LD_MISS(stats) \ + ((stats)->lcl_dram + \ + (stats)->rmt_dram) + +static int tot_ld_hit_entry(struct perf_hpp_fmt *fmt, + struct perf_hpp *hpp, + struct hist_entry *he) +{ + struct c2c_hist_entry *c2c_he; + int width = c2c_width(fmt, hpp, he->hists); + unsigned int tot_hit; + + c2c_he = container_of(he, struct c2c_hist_entry, he); + tot_hit = TOT_LD_HIT(&c2c_he->stats); + + return scnprintf(hpp->buf, hpp->size, "%*u", width, tot_hit); +} + +static int64_t tot_ld_hit_cmp(struct perf_hpp_fmt *fmt __maybe_unused, + struct hist_entry *left, + struct hist_entry *right) +{ + struct c2c_hist_entry *c2c_left; + struct c2c_hist_entry *c2c_right; + uint64_t tot_hit_left; + uint64_t tot_hit_right; + + c2c_left = container_of(left, struct c2c_hist_entry, he); + c2c_right = container_of(right, struct c2c_hist_entry, he); + + tot_hit_left = TOT_LD_HIT(&c2c_left->stats); + tot_hit_right = TOT_LD_HIT(&c2c_right->stats); + + return tot_hit_left - tot_hit_right; +} + +static int tot_ld_miss_entry(struct perf_hpp_fmt *fmt, + struct perf_hpp *hpp, + struct hist_entry *he) +{ + struct c2c_hist_entry *c2c_he; + int width = c2c_width(fmt, hpp, he->hists); + unsigned int tot_miss; + + c2c_he = container_of(he, struct c2c_hist_entry, he); + tot_miss = TOT_LD_MISS(&c2c_he->stats); + + return scnprintf(hpp->buf, hpp->size, "%*u", width, tot_miss); +} + +static int64_t tot_ld_miss_cmp(struct perf_hpp_fmt *fmt __maybe_unused, + struct hist_entry *left, + struct hist_entry *right) +{ + struct c2c_hist_entry *c2c_left; + struct c2c_hist_entry *c2c_right; + uint64_t tot_miss_left; + uint64_t tot_miss_right; + + c2c_left = container_of(left, struct c2c_hist_entry, he); + c2c_right = container_of(right, struct c2c_hist_entry, he); + + tot_miss_left = TOT_LD_MISS(&c2c_left->stats); + tot_miss_right = TOT_LD_MISS(&c2c_right->stats); + + return tot_miss_left - tot_miss_right; +} + #define STAT_FN_ENTRY(__f) \ static int \ __f ## _entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, \ @@ -806,6 +885,11 @@ static double percent_hitm(struct c2c_hist_entry *c2c_he) case DISPLAY_TOT: st = stats->tot_hitm; tot = total->tot_hitm; + break; + case DISPLAY_ALL: + ui__warning("Calculate hitm percent for display 'all';\n" + "should never happen!\n"); + break; default: break; } @@ -860,6 +944,58 @@ percent_hitm_cmp(struct perf_hpp_fmt *fmt __maybe_unused, return per_left - per_right; } +static double percent_tot_ld_hit(struct c2c_hist_entry *c2c_he) +{ + struct c2c_hists *hists; + int tot = 0, st = 0; + + hists = container_of(c2c_he->he.hists, struct c2c_hists, hists); + + st = TOT_LD_HIT(&c2c_he->stats); + tot = TOT_LD_HIT(&hists->stats); + + return tot ? (double) st * 100 / tot : 0; +} + +static int +percent_tot_ld_hit_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, + struct hist_entry *he) +{ + struct c2c_hist_entry *c2c_he; + int width = c2c_width(fmt, hpp, he->hists); + char buf[10]; + double per; + + c2c_he = container_of(he, struct c2c_hist_entry, he); + per = percent_tot_ld_hit(c2c_he); + return scnprintf(hpp->buf, hpp->size, "%*s", width, PERC_STR(buf, per)); +} + +static int +percent_tot_ld_hit_color(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, + struct hist_entry *he) +{ + return percent_color(fmt, hpp, he, percent_tot_ld_hit); +} + +static int64_t +percent_tot_ld_hit_cmp(struct perf_hpp_fmt *fmt __maybe_unused, + struct hist_entry *left, struct hist_entry *right) +{ + struct c2c_hist_entry *c2c_left; + struct c2c_hist_entry *c2c_right; + double per_left; + double per_right; + + c2c_left = container_of(left, struct c2c_hist_entry, he); + c2c_right = container_of(right, struct c2c_hist_entry, he); + + per_left = percent_tot_ld_hit(c2c_left); + per_right = percent_tot_ld_hit(c2c_right); + + return per_left - per_right; +} + static struct c2c_stats *he_stats(struct hist_entry *he) { struct c2c_hist_entry *c2c_he; @@ -959,6 +1095,110 @@ percent_lcl_hitm_cmp(struct perf_hpp_fmt *fmt __maybe_unused, return per_left - per_right; } +static double percent_ld_hit(struct c2c_hist_entry *c2c_he) +{ + struct c2c_hists *hists; + int tot, st; + + hists = container_of(c2c_he->he.hists, struct c2c_hists, hists); + + st = TOT_LD_HIT(&c2c_he->stats); + tot = TOT_LD_HIT(&hists->stats); + + return percent(st, tot); +} + +static int +percent_ld_hit_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, + struct hist_entry *he) +{ + struct c2c_hist_entry *c2c_he; + int width = c2c_width(fmt, hpp, he->hists); + char buf[10]; + double per; + + c2c_he = container_of(he, struct c2c_hist_entry, he); + per = percent_ld_hit(c2c_he); + return scnprintf(hpp->buf, hpp->size, "%*s", width, PERC_STR(buf, per)); +} + +static int +percent_ld_hit_color(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, + struct hist_entry *he) +{ + return percent_color(fmt, hpp, he, percent_ld_hit); +} + +static int64_t +percent_ld_hit_cmp(struct perf_hpp_fmt *fmt __maybe_unused, + struct hist_entry *left, struct hist_entry *right) +{ + struct c2c_hist_entry *c2c_left; + struct c2c_hist_entry *c2c_right; + double per_left; + double per_right; + + c2c_left = container_of(left, struct c2c_hist_entry, he); + c2c_right = container_of(right, struct c2c_hist_entry, he); + + per_left = percent_ld_hit(c2c_left); + per_right = percent_ld_hit(c2c_right); + + return per_left - per_right; +} + +static double percent_ld_miss(struct c2c_hist_entry *c2c_he) +{ + struct c2c_hists *hists; + int tot, st; + + hists = container_of(c2c_he->he.hists, struct c2c_hists, hists); + + st = TOT_LD_MISS(&c2c_he->stats); + tot = TOT_LD_MISS(&hists->stats); + + return percent(st, tot); +} + +static int +percent_ld_miss_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, + struct hist_entry *he) +{ + struct c2c_hist_entry *c2c_he; + int width = c2c_width(fmt, hpp, he->hists); + char buf[10]; + double per; + + c2c_he = container_of(he, struct c2c_hist_entry, he); + per = percent_ld_miss(c2c_he); + return scnprintf(hpp->buf, hpp->size, "%*s", width, PERC_STR(buf, per)); +} + +static int +percent_ld_miss_color(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, + struct hist_entry *he) +{ + return percent_color(fmt, hpp, he, percent_ld_miss); +} + +static int64_t +percent_ld_miss_cmp(struct perf_hpp_fmt *fmt __maybe_unused, + struct hist_entry *left, struct hist_entry *right) +{ + struct c2c_hist_entry *c2c_left; + struct c2c_hist_entry *c2c_right; + double per_left; + double per_right; + + c2c_left = container_of(left, struct c2c_hist_entry, he); + c2c_right = container_of(right, struct c2c_hist_entry, he); + + per_left = percent_ld_miss(c2c_left); + per_right = percent_ld_miss(c2c_right); + + return per_left - per_right; +} + static int percent_stores_l1hit_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, struct hist_entry *he) @@ -1117,6 +1357,10 @@ node_entry(struct perf_hpp_fmt *fmt __maybe_unused, struct perf_hpp *hpp, ret = display_metrics(hpp, stats->tot_hitm, c2c_he->stats.tot_hitm); break; + case DISPLAY_ALL: + ret = display_metrics(hpp, TOT_LD_HIT(stats), + TOT_LD_HIT(&c2c_he->stats)); + break; default: break; } @@ -1331,6 +1575,22 @@ static struct c2c_dimension dim_cl_rmt_hitm = { .width = 7, }; +static struct c2c_dimension dim_cl_tot_ld_hit = { + .header = HEADER_SPAN("--- Load ---", "Hit", 1), + .name = "cl_tot_ld_hit", + .cmp = tot_ld_hit_cmp, + .entry = tot_ld_hit_entry, + .width = 7, +}; + +static struct c2c_dimension dim_cl_tot_ld_miss = { + .header = HEADER_SPAN_LOW("Miss"), + .name = "cl_tot_ld_miss", + .cmp = tot_ld_miss_cmp, + .entry = tot_ld_miss_entry, + .width = 7, +}; + static struct c2c_dimension dim_cl_lcl_hitm = { .header = HEADER_SPAN_LOW("Lcl"), .name = "cl_lcl_hitm", @@ -1419,6 +1679,14 @@ static struct c2c_dimension dim_ld_rmthit = { .width = 8, }; +static struct c2c_dimension dim_tot_ld_hit = { + .header = HEADER_BOTH("Load Hit", "Total"), + .name = "tot_ld_hit", + .cmp = tot_ld_hit_cmp, + .entry = tot_ld_hit_entry, + .width = 8, +}; + static struct c2c_dimension dim_tot_recs = { .header = HEADER_BOTH("Total", "records"), .name = "tot_recs", @@ -1439,6 +1707,7 @@ static struct c2c_header percent_hitm_header[] = { [DISPLAY_LCL] = HEADER_BOTH("Lcl", "Hitm"), [DISPLAY_RMT] = HEADER_BOTH("Rmt", "Hitm"), [DISPLAY_TOT] = HEADER_BOTH("Tot", "Hitm"), + [DISPLAY_ALL] = HEADER_BOTH("LLC", "Hit"), }; static struct c2c_dimension dim_percent_hitm = { @@ -1467,6 +1736,33 @@ static struct c2c_dimension dim_percent_lcl_hitm = { .width = 7, }; +static struct c2c_dimension dim_percent_tot_ld_hit = { + .header = HEADER_BOTH("Load Hit", "Pct"), + .name = "percent_tot_ld_hit", + .cmp = percent_tot_ld_hit_cmp, + .entry = percent_tot_ld_hit_entry, + .color = percent_tot_ld_hit_color, + .width = 8, +}; + +static struct c2c_dimension dim_percent_ld_hit = { + .header = HEADER_SPAN("-- Load Refs --", "Hit", 1), + .name = "percent_ld_hit", + .cmp = percent_ld_hit_cmp, + .entry = percent_ld_hit_entry, + .color = percent_ld_hit_color, + .width = 7, +}; + +static struct c2c_dimension dim_percent_ld_miss = { + .header = HEADER_SPAN_LOW("Miss"), + .name = "percent_ld_miss", + .cmp = percent_ld_miss_cmp, + .entry = percent_ld_miss_entry, + .color = percent_ld_miss_color, + .width = 7, +}; + static struct c2c_dimension dim_percent_stores_l1hit = { .header = HEADER_SPAN("-- Store Refs --", "L1 Hit", 1), .name = "percent_stores_l1hit", @@ -1526,12 +1822,6 @@ static struct c2c_dimension dim_dso = { .se = &sort_dso, }; -static struct c2c_header header_node[3] = { - HEADER_LOW("Node"), - HEADER_LOW("Node{cpus %hitms %stores}"), - HEADER_LOW("Node{cpu list}"), -}; - static struct c2c_dimension dim_node = { .name = "node", .cmp = empty_cmp, @@ -1612,6 +1902,8 @@ static struct c2c_dimension *dimensions[] = { &dim_rmt_hitm, &dim_cl_lcl_hitm, &dim_cl_rmt_hitm, + &dim_cl_tot_ld_hit, + &dim_cl_tot_ld_miss, &dim_tot_stores, &dim_stores_l1hit, &dim_stores_l1miss, @@ -1622,11 +1914,15 @@ static struct c2c_dimension *dimensions[] = { &dim_ld_l2hit, &dim_ld_llchit, &dim_ld_rmthit, + &dim_tot_ld_hit, &dim_tot_recs, &dim_tot_loads, &dim_percent_hitm, &dim_percent_rmt_hitm, &dim_percent_lcl_hitm, + &dim_percent_ld_hit, + &dim_percent_ld_miss, + &dim_percent_tot_ld_hit, &dim_percent_stores_l1hit, &dim_percent_stores_l1miss, &dim_dram_lcl, @@ -1888,6 +2184,10 @@ static bool he__display(struct hist_entry *he, struct c2c_stats *stats) he->filtered = filter_display(c2c_he->stats.tot_hitm, stats->tot_hitm); break; + case DISPLAY_ALL: + he->filtered = filter_display(TOT_LD_HIT(&c2c_he->stats), + TOT_LD_HIT(stats)); + break; default: break; } @@ -1916,6 +2216,9 @@ static inline bool is_valid_hist_entry(struct hist_entry *he) case DISPLAY_TOT: has_record = !!c2c_he->stats.tot_hitm; break; + case DISPLAY_ALL: + has_record = !!TOT_LD_HIT(&c2c_he->stats); + break; default: break; } @@ -2004,9 +2307,33 @@ static int resort_cl_cb(struct hist_entry *he, void *arg __maybe_unused) return 0; } +static struct c2c_header header_node_0 = HEADER_LOW("Node"); +static struct c2c_header header_node_1_hitms_stores = + HEADER_LOW("Node{cpus %hitms %stores}"); +static struct c2c_header header_node_1_loads_stores = + HEADER_LOW("Node{cpus %loads %stores}"); +static struct c2c_header header_node_2 = HEADER_LOW("Node{cpu list}"); + static void setup_nodes_header(void) { - dim_node.header = header_node[c2c.node_info]; + switch (c2c.node_info) { + case 0: + dim_node.header = header_node_0; + break; + case 1: + if (c2c.display == DISPLAY_ALL) + dim_node.header = header_node_1_loads_stores; + else + dim_node.header = header_node_1_hitms_stores; + break; + case 2: + dim_node.header = header_node_2; + break; + default: + break; + } + + return; } static int setup_nodes(struct perf_session *session) @@ -2076,11 +2403,13 @@ static int resort_shared_cl_cb(struct hist_entry *he, void *arg __maybe_unused) struct c2c_hist_entry *c2c_he; c2c_he = container_of(he, struct c2c_hist_entry, he); - if (HAS_HITMS(c2c_he)) { + if (c2c.display == DISPLAY_ALL && TOT_LD_HIT(&c2c_he->stats)) { + c2c.shared_clines++; + c2c_add_stats(&c2c.shared_clines_stats, &c2c_he->stats); + } else if (HAS_HITMS(c2c_he)) { c2c.shared_clines++; c2c_add_stats(&c2c.shared_clines_stats, &c2c_he->stats); } - return 0; } @@ -2201,12 +2530,21 @@ static void print_pareto(FILE *out) int ret; const char *cl_output; - cl_output = "cl_num," - "cl_rmt_hitm," - "cl_lcl_hitm," - "cl_stores_l1hit," - "cl_stores_l1miss," - "dcacheline"; + if (c2c.display == DISPLAY_TOT || c2c.display == DISPLAY_LCL || + c2c.display == DISPLAY_RMT) + cl_output = "cl_num," + "cl_rmt_hitm," + "cl_lcl_hitm," + "cl_stores_l1hit," + "cl_stores_l1miss," + "dcacheline"; + else /* c2c.display == DISPLAY_ALL */ + cl_output = "cl_num," + "cl_tot_ld_hit," + "cl_tot_ld_miss," + "cl_stores_l1hit," + "cl_stores_l1miss," + "dcacheline"; perf_hpp_list__init(&hpp_list); ret = hpp_list__parse(&hpp_list, cl_output, NULL); @@ -2242,7 +2580,7 @@ static void print_c2c_info(FILE *out, struct perf_session *session) fprintf(out, "%-36s: %s\n", first ? " Events" : "", evsel__name(evsel)); first = false; } - fprintf(out, " Cachelines sort on : %s HITMs\n", + fprintf(out, " Cachelines sort on : %s\n", display_str[c2c.display]); fprintf(out, " Cacheline data grouping : %s\n", c2c.cl_sort); } @@ -2399,7 +2737,7 @@ static int perf_c2c_browser__title(struct hist_browser *browser, { scnprintf(bf, size, "Shared Data Cache Line Table " - "(%lu entries, sorted on %s HITMs)", + "(%lu entries, sorted on %s)", browser->nr_non_filtered_entries, display_str[c2c.display]); return 0; @@ -2605,6 +2943,8 @@ static int setup_display(const char *str) c2c.display = DISPLAY_RMT; else if (!strcmp(display, "lcl")) c2c.display = DISPLAY_LCL; + else if (!strcmp(display, "all")) + c2c.display = DISPLAY_ALL; else { pr_err("failed: unknown display type: %s\n", str); return -1; @@ -2651,10 +2991,12 @@ static int build_cl_output(char *cl_sort, bool no_source) } if (asprintf(&c2c.cl_output, - "%s%s%s%s%s%s%s%s%s%s", + "%s%s%s%s%s%s%s%s%s%s%s", c2c.use_stdio ? "cl_num_empty," : "", - "percent_rmt_hitm," - "percent_lcl_hitm," + c2c.display == DISPLAY_ALL ? "percent_ld_hit," + "percent_ld_miss," : + "percent_rmt_hitm," + "percent_lcl_hitm,", "percent_stores_l1hit," "percent_stores_l1miss," "offset,offset_node,dcacheline_count,", @@ -2683,6 +3025,7 @@ static int build_cl_output(char *cl_sort, bool no_source) static int setup_coalesce(const char *coalesce, bool no_source) { const char *c = coalesce ?: coalesce_default; + const char *sort_str = NULL; if (asprintf(&c2c.cl_sort, "offset,%s", c) < 0) return -ENOMEM; @@ -2690,12 +3033,16 @@ static int setup_coalesce(const char *coalesce, bool no_source) if (build_cl_output(c2c.cl_sort, no_source)) return -1; - if (asprintf(&c2c.cl_resort, "offset,%s", - c2c.display == DISPLAY_TOT ? - "tot_hitm" : - c2c.display == DISPLAY_RMT ? - "rmt_hitm,lcl_hitm" : - "lcl_hitm,rmt_hitm") < 0) + if (c2c.display == DISPLAY_TOT) + sort_str = "tot_hitm"; + else if (c2c.display == DISPLAY_RMT) + sort_str = "rmt_hitm,lcl_hitm"; + else if (c2c.display == DISPLAY_LCL) + sort_str = "lcl_hitm,rmt_hitm"; + else if (c2c.display == DISPLAY_ALL) + sort_str = "tot_ld_hit"; + + if (asprintf(&c2c.cl_resort, "offset,%s", sort_str) < 0) return -ENOMEM; pr_debug("coalesce sort fields: %s\n", c2c.cl_sort); @@ -2830,20 +3177,37 @@ static int perf_c2c__report(int argc, const char **argv) goto out_mem2node; } - output_str = "cl_idx," - "dcacheline," - "dcacheline_node," - "dcacheline_count," - "percent_hitm," - "tot_hitm,lcl_hitm,rmt_hitm," - "tot_recs," - "tot_loads," - "tot_stores," - "stores_l1hit,stores_l1miss," - "ld_fbhit,ld_l1hit,ld_l2hit," - "ld_lclhit,lcl_hitm," - "ld_rmthit,rmt_hitm," - "dram_lcl,dram_rmt"; + if (c2c.display == DISPLAY_TOT || c2c.display == DISPLAY_LCL || + c2c.display == DISPLAY_RMT) + output_str = "cl_idx," + "dcacheline," + "dcacheline_node," + "dcacheline_count," + "percent_hitm," + "tot_hitm,lcl_hitm,rmt_hitm," + "tot_recs," + "tot_loads," + "tot_stores," + "stores_l1hit,stores_l1miss," + "ld_fbhit,ld_l1hit,ld_l2hit," + "ld_lclhit,lcl_hitm," + "ld_rmthit,rmt_hitm," + "dram_lcl,dram_rmt"; + else /* c2c.display == DISPLAY_ALL */ + output_str = "cl_idx," + "dcacheline," + "dcacheline_node," + "dcacheline_count," + "percent_tot_ld_hit," + "tot_ld_hit," + "tot_recs," + "tot_loads," + "tot_stores," + "stores_l1hit,stores_l1miss," + "ld_fbhit,ld_l1hit,ld_l2hit," + "ld_lclhit,lcl_hitm," + "ld_rmthit,rmt_hitm," + "dram_lcl,dram_rmt"; if (c2c.display == DISPLAY_TOT) sort_str = "tot_hitm"; @@ -2851,6 +3215,8 @@ static int perf_c2c__report(int argc, const char **argv) sort_str = "rmt_hitm"; else if (c2c.display == DISPLAY_LCL) sort_str = "lcl_hitm"; + else if (c2c.display == DISPLAY_ALL) + sort_str = "tot_ld_hit"; c2c_hists__reinit(&c2c.hists, output_str, sort_str); diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c index d3b5f5faf8c1417eb22aa58aa6a969794083fcdf..02e5774cabb6edc9c6c7e22a5271ab18736cd3e7 100644 --- a/tools/perf/builtin-sched.c +++ b/tools/perf/builtin-sched.c @@ -670,7 +670,7 @@ static void create_tasks(struct perf_sched *sched) err = pthread_attr_init(&attr); BUG_ON(err); err = pthread_attr_setstacksize(&attr, - (size_t) max(16 * 1024, PTHREAD_STACK_MIN)); + (size_t) max(16 * 1024, (int)PTHREAD_STACK_MIN)); BUG_ON(err); err = pthread_mutex_lock(&sched->start_work_mutex); BUG_ON(err); diff --git a/tools/perf/perf.c b/tools/perf/perf.c index 27f94b0bb8747c3c3b72609fe9d875785cd69dc7..505e2a2f1872bbab43db7fdbbc3da7ec6fc4e38f 100644 --- a/tools/perf/perf.c +++ b/tools/perf/perf.c @@ -433,7 +433,7 @@ void pthread__unblock_sigwinch(void) static int libperf_print(enum libperf_print_level level, const char *fmt, va_list ap) { - return eprintf(level, verbose, fmt, ap); + return veprintf(level, verbose, fmt, ap); } int main(int argc, const char **argv) diff --git a/tools/perf/util/bpf-loader.c b/tools/perf/util/bpf-loader.c index 0374adcb223c75c418b7b9bb51e0c7215e012160..ac99c0764bee83a87a3f25f8367694bdb0bd3be0 100644 --- a/tools/perf/util/bpf-loader.c +++ b/tools/perf/util/bpf-loader.c @@ -1215,9 +1215,10 @@ bpf__obj_config_map(struct bpf_object *obj, pr_debug("ERROR: Invalid map config option '%s'\n", map_opt); err = -BPF_LOADER_ERRNO__OBJCONF_MAP_OPT; out: - free(map_name); if (!err) *key_scan_pos += strlen(map_opt); + + free(map_name); return err; } diff --git a/tools/perf/util/data.c b/tools/perf/util/data.c index bcb494dc816a0b47aa1884ba82d7a85d2e176b19..48754083791d8d35594a942809b189a727997689 100644 --- a/tools/perf/util/data.c +++ b/tools/perf/util/data.c @@ -44,10 +44,6 @@ int perf_data__create_dir(struct perf_data *data, int nr) if (!files) return -ENOMEM; - data->dir.version = PERF_DIR_VERSION; - data->dir.files = files; - data->dir.nr = nr; - for (i = 0; i < nr; i++) { struct perf_data_file *file = &files[i]; @@ -62,6 +58,9 @@ int perf_data__create_dir(struct perf_data *data, int nr) file->fd = ret; } + data->dir.version = PERF_DIR_VERSION; + data->dir.files = files; + data->dir.nr = nr; return 0; out_err: diff --git a/tools/perf/util/debug.c b/tools/perf/util/debug.c index 5cda5565777a0bfca8f319df7f5a7a79fb0dc4cf..0af163abaa62b87f3d5a4fb841f64e8f84be394d 100644 --- a/tools/perf/util/debug.c +++ b/tools/perf/util/debug.c @@ -145,7 +145,7 @@ static int trace_event_printer(enum binary_printer_ops op, break; case BINARY_PRINT_CHAR_DATA: printed += color_fprintf(fp, color, "%c", - isprint(ch) ? ch : '.'); + isprint(ch) && isascii(ch) ? ch : '.'); break; case BINARY_PRINT_CHAR_PAD: printed += color_fprintf(fp, color, " "); diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index 1cad6051d8b0892c3f04213d93b0f871bd5bea20..1a1cbd16d76d475b3ef06c2455faab6a13a09fb3 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c @@ -1014,6 +1014,17 @@ struct evsel_config_term *__evsel__get_config_term(struct evsel *evsel, enum evs return found_term; } +static void evsel__set_default_freq_period(struct record_opts *opts, + struct perf_event_attr *attr) +{ + if (opts->freq) { + attr->freq = 1; + attr->sample_freq = opts->freq; + } else { + attr->sample_period = opts->default_interval; + } +} + /* * The enable_on_exec/disabled value strategy: * @@ -1080,14 +1091,12 @@ void evsel__config(struct evsel *evsel, struct record_opts *opts, * We default some events to have a default interval. But keep * it a weak assumption overridable by the user. */ - if (!attr->sample_period) { - if (opts->freq) { - attr->freq = 1; - attr->sample_freq = opts->freq; - } else { - attr->sample_period = opts->default_interval; - } - } + if ((evsel->is_libpfm_event && !attr->sample_period) || + (!evsel->is_libpfm_event && (!attr->sample_period || + opts->user_freq != UINT_MAX || + opts->user_interval != ULLONG_MAX))) + evsel__set_default_freq_period(opts, attr); + /* * If attr->freq was set (here or earlier), ask for period * to be sampled. diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c index d0408320c4347711acc2903669d6447f3a56b984..db3c85bb4fd0bf0d04c99324acd93314f69e597f 100644 --- a/tools/perf/util/parse-events.c +++ b/tools/perf/util/parse-events.c @@ -1446,7 +1446,9 @@ int parse_events_add_pmu(struct parse_events_state *parse_state, bool use_uncore_alias; LIST_HEAD(config_terms); - if (verbose > 1) { + pmu = parse_state->fake_pmu ?: perf_pmu__find(name); + + if (verbose > 1 && !(pmu && pmu->selectable)) { fprintf(stderr, "Attempting to add event pmu '%s' with '", name); if (head_config) { @@ -1459,7 +1461,6 @@ int parse_events_add_pmu(struct parse_events_state *parse_state, fprintf(stderr, "' that may result in non-fatal errors\n"); } - pmu = parse_state->fake_pmu ?: perf_pmu__find(name); if (!pmu) { char *err_str; diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c index 07db6cfad65b988e3c77642bba4d05c8cff3b268..d103084fcd56cbf5db6df63b305a716a12aea3dc 100644 --- a/tools/perf/util/probe-event.c +++ b/tools/perf/util/probe-event.c @@ -3035,6 +3035,9 @@ static int find_probe_trace_events_from_map(struct perf_probe_event *pev, for (j = 0; j < num_matched_functions; j++) { sym = syms[j]; + if (sym->type != STT_FUNC) + continue; + /* There can be duplicated symbols in the map */ for (i = 0; i < j; i++) if (sym->start == syms[i]->start) { diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c index 9dddec19a494eda39ce6bdb6cd4e57a734e87177..354e1e04a26627e9b831ddb24f3a92306e003165 100644 --- a/tools/perf/util/session.c +++ b/tools/perf/util/session.c @@ -2056,6 +2056,7 @@ prefetch_event(char *buf, u64 head, size_t mmap_size, bool needs_swap, union perf_event *error) { union perf_event *event; + u16 event_size; /* * Ensure we have enough space remaining to read @@ -2068,15 +2069,23 @@ prefetch_event(char *buf, u64 head, size_t mmap_size, if (needs_swap) perf_event_header__bswap(&event->header); - if (head + event->header.size <= mmap_size) + event_size = event->header.size; + if (head + event_size <= mmap_size) return event; /* We're not fetching the event so swap back again */ if (needs_swap) perf_event_header__bswap(&event->header); - pr_debug("%s: head=%#" PRIx64 " event->header_size=%#x, mmap_size=%#zx:" - " fuzzed or compressed perf.data?\n",__func__, head, event->header.size, mmap_size); + /* Check if the event fits into the next mmapped buf. */ + if (event_size <= mmap_size - head % page_size) { + /* Remap buf and fetch again. */ + return NULL; + } + + /* Invalid input. Event size should never exceed mmap_size. */ + pr_debug("%s: head=%#" PRIx64 " event->header.size=%#x, mmap_size=%#zx:" + " fuzzed or compressed perf.data?\n", __func__, head, event_size, mmap_size); return error; } diff --git a/tools/perf/util/setup.py b/tools/perf/util/setup.py index c5e3e9a68162d784287f8837c2c802808df517a0..b670469a8124f909a6000daa21d90fa198d3e744 100644 --- a/tools/perf/util/setup.py +++ b/tools/perf/util/setup.py @@ -1,12 +1,14 @@ -from os import getenv +from os import getenv, path from subprocess import Popen, PIPE from re import sub cc = getenv("CC") cc_is_clang = b"clang version" in Popen([cc.split()[0], "-v"], stderr=PIPE).stderr.readline() +src_feature_tests = getenv('srctree') + '/tools/build/feature' def clang_has_option(option): - return [o for o in Popen([cc, option], stderr=PIPE).stderr.readlines() if b"unknown argument" in o] == [ ] + cc_output = Popen([cc, option, path.join(src_feature_tests, "test-hello.c") ], stderr=PIPE).stderr.readlines() + return [o for o in cc_output if ((b"unknown argument" in o) or (b"is not supported" in o))] == [ ] if cc_is_clang: from distutils.sysconfig import get_config_vars @@ -23,6 +25,8 @@ if cc_is_clang: vars[var] = sub("-fstack-protector-strong", "", vars[var]) if not clang_has_option("-fno-semantic-interposition"): vars[var] = sub("-fno-semantic-interposition", "", vars[var]) + if not clang_has_option("-ffat-lto-objects"): + vars[var] = sub("-ffat-lto-objects", "", vars[var]) from distutils.core import setup, Extension diff --git a/tools/perf/util/stat-display.c b/tools/perf/util/stat-display.c index a963b5b8eb72409152596d6b33f0d961aeb1aa40..96fe9c1af33644b71393dffb16d1c5c86a0a5342 100644 --- a/tools/perf/util/stat-display.c +++ b/tools/perf/util/stat-display.c @@ -555,15 +555,16 @@ static void collect_all_aliases(struct perf_stat_config *config, struct evsel *c alias = list_prepare_entry(counter, &(evlist->core.entries), core.node); list_for_each_entry_continue (alias, &evlist->core.entries, core.node) { - if (strcmp(evsel__name(alias), evsel__name(counter)) || - alias->scale != counter->scale || - alias->cgrp != counter->cgrp || - strcmp(alias->unit, counter->unit) || - evsel__is_clock(alias) != evsel__is_clock(counter) || - !strcmp(alias->pmu_name, counter->pmu_name)) - break; - alias->merged_stat = true; - cb(config, alias, data, false); + /* Merge events with the same name, etc. but on different PMUs. */ + if (!strcmp(evsel__name(alias), evsel__name(counter)) && + alias->scale == counter->scale && + alias->cgrp == counter->cgrp && + !strcmp(alias->unit, counter->unit) && + evsel__is_clock(alias) == evsel__is_clock(counter) && + strcmp(alias->pmu_name, counter->pmu_name)) { + alias->merged_stat = true; + cb(config, alias, data, false); + } } } diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 4d569ad7db02dd9a3b823503f2e8434b226eb911..3609da7cce0abb781889f39887dbf4b2a73915e0 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -231,7 +231,7 @@ void symbols__fixup_end(struct rb_root_cached *symbols) prev = curr; curr = rb_entry(nd, struct symbol, rb_node); - if (prev->end == prev->start && prev->end != curr->start) + if (prev->end == prev->start || prev->end != curr->start) arch__symbols__fixup_end(prev, curr); } diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index 1d915553336082fd178f9ec15f1b206c2077d3f7..6fabcc2f739bf8d2712a88e4e8c3655f29aaf9be 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -38,6 +38,7 @@ TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test test_netcnt test_tcpnotify_user test_sysctl \ test_progs-no_alu32 \ test_current_pid_tgid_new_ns +TEST_GEN_PROGS += file_read_pattern # Also test bpf-gcc, if present ifneq ($(BPF_GCC),) diff --git a/tools/testing/selftests/bpf/file_read_pattern.c b/tools/testing/selftests/bpf/file_read_pattern.c new file mode 100644 index 0000000000000000000000000000000000000000..81e3a49f04246d3fe44100f9fa3d48d2ef846c0f --- /dev/null +++ b/tools/testing/selftests/bpf/file_read_pattern.c @@ -0,0 +1,73 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021. Huawei Technologies Co., Ltd */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "bpf_rlimit.h" + +#define READ_TP_NAME "fs_file_read" +#define RELEASE_TP_NAME "fs_file_release" + +int main(int argc, char *argv[]) +{ + const char *name = "./file_read_pattern_prog.o"; + struct bpf_object *obj; + const char *prog_name; + struct bpf_program *prog; + int unused; + int err; + int read_fd; + int release_fd; + + err = bpf_prog_load(name, BPF_PROG_TYPE_UNSPEC, &obj, &unused); + if (err) { + printf("Failed to load program\n"); + return err; + } + + prog_name = "raw_tracepoint.w/" READ_TP_NAME; + prog = bpf_object__find_program_by_title(obj, prog_name); + if (!prog) { + printf("no prog %s\n", prog_name); + err = -EINVAL; + goto out; + } + + read_fd = bpf_raw_tracepoint_open(READ_TP_NAME, bpf_program__fd(prog)); + if (read_fd < 0) { + err = -errno; + printf("Failed to attach raw tracepoint %s\n", READ_TP_NAME); + goto out; + } + + prog_name = "raw_tracepoint/" RELEASE_TP_NAME; + prog = bpf_object__find_program_by_title(obj, prog_name); + if (!prog) { + printf("no prog %s\n", prog_name); + err = -EINVAL; + goto out; + } + + release_fd = bpf_raw_tracepoint_open(RELEASE_TP_NAME, + bpf_program__fd(prog)); + if (release_fd < 0) { + err = -errno; + printf("Failed to attach raw tracepoint %s\n", RELEASE_TP_NAME); + goto out; + } + + pause(); + + close(release_fd); + close(read_fd); +out: + bpf_object__close(obj); + return err; +} diff --git a/tools/testing/selftests/bpf/prog_tests/d_path.c b/tools/testing/selftests/bpf/prog_tests/d_path.c index 0a577a248d34a587dc52eb2dc4b7a5fa5bed11d6..85f1386a97e6b65cb168263fbc584cd1ed120c94 100644 --- a/tools/testing/selftests/bpf/prog_tests/d_path.c +++ b/tools/testing/selftests/bpf/prog_tests/d_path.c @@ -9,6 +9,8 @@ #define MAX_FILES 7 #include "test_d_path.skel.h" +#include "test_d_path_check_rdonly_mem.skel.h" +#include "test_d_path_check_types.skel.h" static int duration; @@ -99,7 +101,7 @@ static int trigger_fstat_events(pid_t pid) return ret; } -void test_d_path(void) +static void test_d_path_basic(void) { struct test_d_path__bss *bss; struct test_d_path *skel; @@ -155,3 +157,35 @@ void test_d_path(void) cleanup: test_d_path__destroy(skel); } + +static void test_d_path_check_rdonly_mem(void) +{ + struct test_d_path_check_rdonly_mem *skel; + + skel = test_d_path_check_rdonly_mem__open_and_load(); + CHECK(skel, "skel_open", "unexpected_load_overwriting_rdonly_mem"); + + test_d_path_check_rdonly_mem__destroy(skel); +} + +static void test_d_path_check_types(void) +{ + struct test_d_path_check_types *skel; + + skel = test_d_path_check_types__open_and_load(); + CHECK(skel, "skel_open", "unexpected_load_passing_wrong_type"); + + test_d_path_check_types__destroy(skel); +} + +void test_d_path(void) +{ + if (test__start_subtest("basic")) + test_d_path_basic(); + + if (test__start_subtest("check_rdonly_mem")) + test_d_path_check_rdonly_mem(); + + if (test__start_subtest("check_alloc_mem")) + test_d_path_check_types(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/ksyms_btf.c b/tools/testing/selftests/bpf/prog_tests/ksyms_btf.c index b58b775d19f3f9f217c27671ec2c06af9212d323..97f38d4f6a2637dbedc2d89dc6331b71f9a259cb 100644 --- a/tools/testing/selftests/bpf/prog_tests/ksyms_btf.c +++ b/tools/testing/selftests/bpf/prog_tests/ksyms_btf.c @@ -6,6 +6,7 @@ #include #include "test_ksyms_btf.skel.h" #include "test_ksyms_btf_null_check.skel.h" +#include "test_ksyms_btf_write_check.skel.h" static int duration; @@ -81,6 +82,16 @@ static void test_null_check(void) test_ksyms_btf_null_check__destroy(skel); } +static void test_write_check(void) +{ + struct test_ksyms_btf_write_check *skel; + + skel = test_ksyms_btf_write_check__open_and_load(); + CHECK(skel, "skel_open", "unexpected load of a prog writing to ksym memory\n"); + + test_ksyms_btf_write_check__destroy(skel); +} + void test_ksyms_btf(void) { int percpu_datasec; @@ -106,4 +117,7 @@ void test_ksyms_btf(void) if (test__start_subtest("null_check")) test_null_check(); + + if (test__start_subtest("write_check")) + test_write_check(); } diff --git a/tools/testing/selftests/bpf/prog_tests/skb_ctx.c b/tools/testing/selftests/bpf/prog_tests/skb_ctx.c index fafeddaad6a998e33ea99d95be5440fd6ed32558..23915be6172d63bda9e969c6b9b6f079f50f8d25 100644 --- a/tools/testing/selftests/bpf/prog_tests/skb_ctx.c +++ b/tools/testing/selftests/bpf/prog_tests/skb_ctx.c @@ -105,4 +105,6 @@ void test_skb_ctx(void) "ctx_out_mark", "skb->mark == %u, expected %d\n", skb.mark, 10); + + bpf_object__close(obj); } diff --git a/tools/testing/selftests/bpf/progs/file_read_pattern_prog.c b/tools/testing/selftests/bpf/progs/file_read_pattern_prog.c new file mode 100644 index 0000000000000000000000000000000000000000..cd2dcd7c64bb13a162997f3436b4235b42ca8e11 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/file_read_pattern_prog.c @@ -0,0 +1,142 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021. Huawei Technologies Co., Ltd */ +#include +#include +#include + +#include + +#ifndef __always_inline +#define __always_inline inline __attribute__((always_inline)) +#endif + +/* Need to keep consistent with definitions in include/linux/fs.h */ +#define FMODE_CTL_RANDOM 0x1 +#define FMODE_CTL_WILLNEED 0x2 + +struct fs_file_read_ctx { + const unsigned char *name; + unsigned int f_ctl_mode; + unsigned int rsvd; + /* clear from f_ctl_mode */ + unsigned int clr_f_ctl_mode; + /* set into f_ctl_mode */ + unsigned int set_f_ctl_mode; + unsigned long key; + /* file size */ + long long i_size; + /* previous page index */ + long long prev_index; + /* current page index */ + long long index; +}; + +struct fs_file_read_args { + struct fs_file_read_ctx *ctx; + int version; +}; + +struct fs_file_release_args { + void *inode; + void *filp; +}; + +struct file_rd_hist { + __u64 last_nsec; + __u32 seq_nr; + __u32 tot_nr; +}; + +struct bpf_map_def SEC("maps") htab = { + .type = BPF_MAP_TYPE_HASH, + .key_size = sizeof(long), + .value_size = sizeof(struct file_rd_hist), + .max_entries = 10000, +}; + +static __always_inline bool is_expected_file(void *name) +{ + char prefix[5]; + int err; + + err = bpf_probe_read_str(&prefix, sizeof(prefix), name); + if (err <= 0) + return false; + return !strncmp(prefix, "blk_", 4); +} + +SEC("raw_tracepoint.w/fs_file_read") +int fs_file_read(struct fs_file_read_args *args) +{ + const char fmt[] = "elapsed %llu, seq %u, tot %u\n"; + struct fs_file_read_ctx *rd_ctx = args->ctx; + struct file_rd_hist *hist; + struct file_rd_hist new_hist; + __u64 key; + __u64 now; + bool first; + + if (!is_expected_file((void *)rd_ctx->name)) + return 0; + + if (rd_ctx->i_size <= (4 << 20)) { + rd_ctx->set_f_ctl_mode = FMODE_CTL_WILLNEED; + return 0; + } + + first = false; + now = bpf_ktime_get_ns(); + key = rd_ctx->key; + hist = bpf_map_lookup_elem(&htab, &key); + if (!hist) { + __builtin_memset(&new_hist, 0, sizeof(new_hist)); + new_hist.last_nsec = now; + first = true; + hist = &new_hist; + } + + if (rd_ctx->index >= rd_ctx->prev_index && + rd_ctx->index - rd_ctx->prev_index <= 1) + hist->seq_nr += 1; + hist->tot_nr += 1; + + bpf_trace_printk(fmt, sizeof(fmt), now - hist->last_nsec, + hist->seq_nr, hist->tot_nr); + + if (first) { + bpf_map_update_elem(&htab, &key, hist, 0); + return 0; + } + + /* 500ms or 10 read */ + if (now - hist->last_nsec >= 500000000ULL || hist->tot_nr >= 10) { + if (hist->tot_nr >= 10) { + if (hist->seq_nr <= hist->tot_nr * 3 / 10) + rd_ctx->set_f_ctl_mode = FMODE_CTL_RANDOM; + else if (hist->seq_nr >= hist->tot_nr * 7 / 10) + rd_ctx->clr_f_ctl_mode = FMODE_CTL_RANDOM; + } + + hist->last_nsec = now; + hist->tot_nr = 0; + hist->seq_nr = 0; + } + + return 0; +} + +SEC("raw_tracepoint/fs_file_release") +int fs_file_release(struct fs_file_release_args *args) +{ + __u64 key = (unsigned long)args->filp; + void *value; + + value = bpf_map_lookup_elem(&htab, &key); + if (value) + bpf_map_delete_elem(&htab, &key); + + return 0; +} + +char _license[] SEC("license") = "GPL"; +__u32 _version SEC("version") = 1; diff --git a/tools/testing/selftests/bpf/progs/test_d_path_check_rdonly_mem.c b/tools/testing/selftests/bpf/progs/test_d_path_check_rdonly_mem.c new file mode 100644 index 0000000000000000000000000000000000000000..27c27cff6a3aaa7f50a2cabfdcf059df63c3ce6f --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_d_path_check_rdonly_mem.c @@ -0,0 +1,28 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2022 Google */ + +#include "vmlinux.h" +#include +#include + +extern const int bpf_prog_active __ksym; + +SEC("fentry/security_inode_getattr") +int BPF_PROG(d_path_check_rdonly_mem, struct path *path, struct kstat *stat, + __u32 request_mask, unsigned int query_flags) +{ + void *active; + __u32 cpu; + + cpu = bpf_get_smp_processor_id(); + active = (void *)bpf_per_cpu_ptr(&bpf_prog_active, cpu); + if (active) { + /* FAIL here! 'active' points to readonly memory. bpf helpers + * that update its arguments can not write into it. + */ + bpf_d_path(path, active, sizeof(int)); + } + return 0; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/test_d_path_check_types.c b/tools/testing/selftests/bpf/progs/test_d_path_check_types.c new file mode 100644 index 0000000000000000000000000000000000000000..7e02b7361307c0c91838b6b01c1ace94319ab4f5 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_d_path_check_types.c @@ -0,0 +1,32 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include "vmlinux.h" +#include +#include + +extern const int bpf_prog_active __ksym; + +struct { + __uint(type, BPF_MAP_TYPE_RINGBUF); + __uint(max_entries, 1 << 12); +} ringbuf SEC(".maps"); + +SEC("fentry/security_inode_getattr") +int BPF_PROG(d_path_check_rdonly_mem, struct path *path, struct kstat *stat, + __u32 request_mask, unsigned int query_flags) +{ + void *active; + u32 cpu; + + cpu = bpf_get_smp_processor_id(); + active = (void *)bpf_per_cpu_ptr(&bpf_prog_active, cpu); + if (active) { + /* FAIL here! 'active' points to 'regular' memory. It + * cannot be submitted to ring buffer. + */ + bpf_ringbuf_submit(active, 0); + } + return 0; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/test_ksyms_btf_write_check.c b/tools/testing/selftests/bpf/progs/test_ksyms_btf_write_check.c new file mode 100644 index 0000000000000000000000000000000000000000..2180c41cd890f26c6f245b9b06563ad1f0e8b69a --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_ksyms_btf_write_check.c @@ -0,0 +1,29 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 Google */ + +#include "vmlinux.h" + +#include + +extern const int bpf_prog_active __ksym; /* int type global var. */ + +SEC("raw_tp/sys_enter") +int handler(const void *ctx) +{ + int *active; + __u32 cpu; + + cpu = bpf_get_smp_processor_id(); + active = (int *)bpf_per_cpu_ptr(&bpf_prog_active, cpu); + if (active) { + /* Kernel memory obtained from bpf_{per,this}_cpu_ptr + * is read-only, should _not_ pass verification. + */ + /* WRITE_ONCE */ + *(volatile int *)active = -1; + } + + return 0; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/test_sock_fields.c b/tools/testing/selftests/bpf/progs/test_sock_fields.c index 81b57b9aaaeae2e68d81db22410594dc02fc6060..7967348b11af69efbf6de34515903983b79d0a1e 100644 --- a/tools/testing/selftests/bpf/progs/test_sock_fields.c +++ b/tools/testing/selftests/bpf/progs/test_sock_fields.c @@ -113,7 +113,7 @@ static void tpcpy(struct bpf_tcp_sock *dst, #define RET_LOG() ({ \ linum = __LINE__; \ - bpf_map_update_elem(&linum_map, &linum_idx, &linum, BPF_NOEXIST); \ + bpf_map_update_elem(&linum_map, &linum_idx, &linum, BPF_ANY); \ return CG_OK; \ }) diff --git a/tools/testing/selftests/bpf/progs/test_sockmap_kern.h b/tools/testing/selftests/bpf/progs/test_sockmap_kern.h index 1858435de7aaf91a3f2ef12e1c8a74e9ee934c4e..5cb90ca29218643a08fe5edd832f0675dfaee38e 100644 --- a/tools/testing/selftests/bpf/progs/test_sockmap_kern.h +++ b/tools/testing/selftests/bpf/progs/test_sockmap_kern.h @@ -235,7 +235,7 @@ SEC("sk_msg1") int bpf_prog4(struct sk_msg_md *msg) { int *bytes, zero = 0, one = 1, two = 2, three = 3, four = 4, five = 5; - int *start, *end, *start_push, *end_push, *start_pop, *pop; + int *start, *end, *start_push, *end_push, *start_pop, *pop, err = 0; bytes = bpf_map_lookup_elem(&sock_apply_bytes, &zero); if (bytes) @@ -249,8 +249,11 @@ int bpf_prog4(struct sk_msg_md *msg) bpf_msg_pull_data(msg, *start, *end, 0); start_push = bpf_map_lookup_elem(&sock_bytes, &two); end_push = bpf_map_lookup_elem(&sock_bytes, &three); - if (start_push && end_push) - bpf_msg_push_data(msg, *start_push, *end_push, 0); + if (start_push && end_push) { + err = bpf_msg_push_data(msg, *start_push, *end_push, 0); + if (err) + return SK_DROP; + } start_pop = bpf_map_lookup_elem(&sock_bytes, &four); pop = bpf_map_lookup_elem(&sock_bytes, &five); if (start_pop && pop) @@ -263,6 +266,7 @@ int bpf_prog6(struct sk_msg_md *msg) { int zero = 0, one = 1, two = 2, three = 3, four = 4, five = 5, key = 0; int *bytes, *start, *end, *start_push, *end_push, *start_pop, *pop, *f; + int err = 0; __u64 flags = 0; bytes = bpf_map_lookup_elem(&sock_apply_bytes, &zero); @@ -279,8 +283,11 @@ int bpf_prog6(struct sk_msg_md *msg) start_push = bpf_map_lookup_elem(&sock_bytes, &two); end_push = bpf_map_lookup_elem(&sock_bytes, &three); - if (start_push && end_push) - bpf_msg_push_data(msg, *start_push, *end_push, 0); + if (start_push && end_push) { + err = bpf_msg_push_data(msg, *start_push, *end_push, 0); + if (err) + return SK_DROP; + } start_pop = bpf_map_lookup_elem(&sock_bytes, &four); pop = bpf_map_lookup_elem(&sock_bytes, &five); @@ -338,7 +345,7 @@ SEC("sk_msg5") int bpf_prog10(struct sk_msg_md *msg) { int *bytes, *start, *end, *start_push, *end_push, *start_pop, *pop; - int zero = 0, one = 1, two = 2, three = 3, four = 4, five = 5; + int zero = 0, one = 1, two = 2, three = 3, four = 4, five = 5, err = 0; bytes = bpf_map_lookup_elem(&sock_apply_bytes, &zero); if (bytes) @@ -352,8 +359,11 @@ int bpf_prog10(struct sk_msg_md *msg) bpf_msg_pull_data(msg, *start, *end, 0); start_push = bpf_map_lookup_elem(&sock_bytes, &two); end_push = bpf_map_lookup_elem(&sock_bytes, &three); - if (start_push && end_push) - bpf_msg_push_data(msg, *start_push, *end_push, 0); + if (start_push && end_push) { + err = bpf_msg_push_data(msg, *start_push, *end_push, 0); + if (err) + return SK_PASS; + } start_pop = bpf_map_lookup_elem(&sock_bytes, &four); pop = bpf_map_lookup_elem(&sock_bytes, &five); if (start_pop && pop) diff --git a/tools/testing/selftests/bpf/test_lirc_mode2.sh b/tools/testing/selftests/bpf/test_lirc_mode2.sh index ec4e15948e40631cdb063a5d700310796e3386de..5252b91f48a18c7d632f47cf7140a2282bbf38bb 100755 --- a/tools/testing/selftests/bpf/test_lirc_mode2.sh +++ b/tools/testing/selftests/bpf/test_lirc_mode2.sh @@ -3,6 +3,7 @@ # Kselftest framework requirement - SKIP code is 4. ksft_skip=4 +ret=$ksft_skip msg="skip all tests:" if [ $UID != 0 ]; then @@ -25,7 +26,7 @@ do fi done -if [ -n $LIRCDEV ]; +if [ -n "$LIRCDEV" ]; then TYPE=lirc_mode2 ./test_lirc_mode2_user $LIRCDEV $INPUTDEV @@ -36,3 +37,5 @@ then echo -e ${GREEN}"PASS: $TYPE"${NC} fi fi + +exit $ret diff --git a/tools/testing/selftests/bpf/test_lwt_ip_encap.sh b/tools/testing/selftests/bpf/test_lwt_ip_encap.sh index b497bb85b667f78f0bfa393e5ffaca0766b345f0..6c69c42b1d607075646d1db87804dafd65055a04 100755 --- a/tools/testing/selftests/bpf/test_lwt_ip_encap.sh +++ b/tools/testing/selftests/bpf/test_lwt_ip_encap.sh @@ -120,6 +120,14 @@ setup() ip netns exec ${NS2} sysctl -wq net.ipv4.conf.default.rp_filter=0 ip netns exec ${NS3} sysctl -wq net.ipv4.conf.default.rp_filter=0 + # disable IPv6 DAD because it sometimes takes too long and fails tests + ip netns exec ${NS1} sysctl -wq net.ipv6.conf.all.accept_dad=0 + ip netns exec ${NS2} sysctl -wq net.ipv6.conf.all.accept_dad=0 + ip netns exec ${NS3} sysctl -wq net.ipv6.conf.all.accept_dad=0 + ip netns exec ${NS1} sysctl -wq net.ipv6.conf.default.accept_dad=0 + ip netns exec ${NS2} sysctl -wq net.ipv6.conf.default.accept_dad=0 + ip netns exec ${NS3} sysctl -wq net.ipv6.conf.default.accept_dad=0 + ip link add veth1 type veth peer name veth2 ip link add veth3 type veth peer name veth4 ip link add veth5 type veth peer name veth6 @@ -289,7 +297,7 @@ test_ping() ip netns exec ${NS1} ping -c 1 -W 1 -I veth1 ${IPv4_DST} 2>&1 > /dev/null RET=$? elif [ "${PROTO}" == "IPv6" ] ; then - ip netns exec ${NS1} ping6 -c 1 -W 6 -I veth1 ${IPv6_DST} 2>&1 > /dev/null + ip netns exec ${NS1} ping6 -c 1 -W 1 -I veth1 ${IPv6_DST} 2>&1 > /dev/null RET=$? else echo " test_ping: unknown PROTO: ${PROTO}" diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index a4c55fcb0e7b1606f821444bdaff7249a2405d5e..0fc813235575de01020ae623c9d6b25ac06ba5dc 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c @@ -50,7 +50,7 @@ #define MAX_INSNS BPF_MAXINSNS #define MAX_TEST_INSNS 1000000 #define MAX_FIXUPS 8 -#define MAX_NR_MAPS 20 +#define MAX_NR_MAPS 21 #define MAX_TEST_RUNS 8 #define POINTER_VALUE 0xcafe4all #define TEST_DATA_LEN 64 @@ -87,6 +87,7 @@ struct bpf_test { int fixup_sk_storage_map[MAX_FIXUPS]; int fixup_map_event_output[MAX_FIXUPS]; int fixup_map_reuseport_array[MAX_FIXUPS]; + int fixup_map_ringbuf[MAX_FIXUPS]; const char *errstr; const char *errstr_unpriv; uint32_t insn_processed; @@ -640,6 +641,7 @@ static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type, int *fixup_sk_storage_map = test->fixup_sk_storage_map; int *fixup_map_event_output = test->fixup_map_event_output; int *fixup_map_reuseport_array = test->fixup_map_reuseport_array; + int *fixup_map_ringbuf = test->fixup_map_ringbuf; if (test->fill_helper) { test->fill_insns = calloc(MAX_TEST_INSNS, sizeof(struct bpf_insn)); @@ -817,6 +819,14 @@ static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type, fixup_map_reuseport_array++; } while (*fixup_map_reuseport_array); } + if (*fixup_map_ringbuf) { + map_fds[20] = create_map(BPF_MAP_TYPE_RINGBUF, 0, + 0, 4096); + do { + prog[*fixup_map_ringbuf].imm = map_fds[20]; + fixup_map_ringbuf++; + } while (*fixup_map_ringbuf); + } } struct libcap { diff --git a/tools/testing/selftests/bpf/verifier/ringbuf.c b/tools/testing/selftests/bpf/verifier/ringbuf.c new file mode 100644 index 0000000000000000000000000000000000000000..b64d33e4833c8582fbbf8d5de8fbb640f4352a22 --- /dev/null +++ b/tools/testing/selftests/bpf/verifier/ringbuf.c @@ -0,0 +1,95 @@ +{ + "ringbuf: invalid reservation offset 1", + .insns = { + /* reserve 8 byte ringbuf memory */ + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_MOV64_IMM(BPF_REG_2, 8), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ringbuf_reserve), + /* store a pointer to the reserved memory in R6 */ + BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), + /* check whether the reservation was successful */ + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), + /* spill R6(mem) into the stack */ + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8), + /* fill it back in R7 */ + BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_10, -8), + /* should be able to access *(R7) = 0 */ + BPF_ST_MEM(BPF_DW, BPF_REG_7, 0, 0), + /* submit the reserved ringbuf memory */ + BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), + /* add invalid offset to reserved ringbuf memory */ + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xcafe), + BPF_MOV64_IMM(BPF_REG_2, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ringbuf_submit), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_ringbuf = { 1 }, + .result = REJECT, + .errstr = "dereference of modified alloc_mem ptr R1", +}, +{ + "ringbuf: invalid reservation offset 2", + .insns = { + /* reserve 8 byte ringbuf memory */ + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_MOV64_IMM(BPF_REG_2, 8), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ringbuf_reserve), + /* store a pointer to the reserved memory in R6 */ + BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), + /* check whether the reservation was successful */ + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), + /* spill R6(mem) into the stack */ + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8), + /* fill it back in R7 */ + BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_10, -8), + /* add invalid offset to reserved ringbuf memory */ + BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 0xcafe), + /* should be able to access *(R7) = 0 */ + BPF_ST_MEM(BPF_DW, BPF_REG_7, 0, 0), + /* submit the reserved ringbuf memory */ + BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), + BPF_MOV64_IMM(BPF_REG_2, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ringbuf_submit), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_ringbuf = { 1 }, + .result = REJECT, + .errstr = "R7 min value is outside of the allowed memory range", +}, +{ + "ringbuf: check passing rb mem to helpers", + .insns = { + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), + /* reserve 8 byte ringbuf memory */ + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_MOV64_IMM(BPF_REG_2, 8), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ringbuf_reserve), + BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), + /* check whether the reservation was successful */ + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), + BPF_EXIT_INSN(), + /* pass allocated ring buffer memory to fib lookup */ + BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), + BPF_MOV64_IMM(BPF_REG_3, 8), + BPF_MOV64_IMM(BPF_REG_4, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_fib_lookup), + /* submit the ringbuf memory */ + BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), + BPF_MOV64_IMM(BPF_REG_2, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ringbuf_submit), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_ringbuf = { 2 }, + .prog_type = BPF_PROG_TYPE_XDP, + .result = ACCEPT, +}, diff --git a/tools/testing/selftests/bpf/verifier/spill_fill.c b/tools/testing/selftests/bpf/verifier/spill_fill.c index 45d43bf82f269190bc51ab29f7032e909103dca0..0b943897aaf6c136a8fea40a3804042b28c44e37 100644 --- a/tools/testing/selftests/bpf/verifier/spill_fill.c +++ b/tools/testing/selftests/bpf/verifier/spill_fill.c @@ -28,6 +28,36 @@ .result = ACCEPT, .result_unpriv = ACCEPT, }, +{ + "check valid spill/fill, ptr to mem", + .insns = { + /* reserve 8 byte ringbuf memory */ + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_MOV64_IMM(BPF_REG_2, 8), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ringbuf_reserve), + /* store a pointer to the reserved memory in R6 */ + BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), + /* check whether the reservation was successful */ + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), + /* spill R6(mem) into the stack */ + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8), + /* fill it back in R7 */ + BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_10, -8), + /* should be able to access *(R7) = 0 */ + BPF_ST_MEM(BPF_DW, BPF_REG_7, 0, 0), + /* submit the reserved ringbuf memory */ + BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), + BPF_MOV64_IMM(BPF_REG_2, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ringbuf_submit), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_ringbuf = { 1 }, + .result = ACCEPT, + .result_unpriv = ACCEPT, +}, { "check corrupted spill/fill", .insns = { diff --git a/tools/testing/selftests/cgroup/cgroup_util.c b/tools/testing/selftests/cgroup/cgroup_util.c index 05853b0b883181fa4dc784166f2828fb17cbbedf..962ced827513d2951733346805393a5c472ac158 100644 --- a/tools/testing/selftests/cgroup/cgroup_util.c +++ b/tools/testing/selftests/cgroup/cgroup_util.c @@ -17,6 +17,7 @@ #include "cgroup_util.h" #include "../clone3/clone3_selftests.h" +/* Returns read len on success, or -errno on failure. */ static ssize_t read_text(const char *path, char *buf, size_t max_len) { ssize_t len; @@ -24,35 +25,29 @@ static ssize_t read_text(const char *path, char *buf, size_t max_len) fd = open(path, O_RDONLY); if (fd < 0) - return fd; + return -errno; len = read(fd, buf, max_len - 1); - if (len < 0) - goto out; - buf[len] = 0; -out: + if (len >= 0) + buf[len] = 0; + close(fd); - return len; + return len < 0 ? -errno : len; } +/* Returns written len on success, or -errno on failure. */ static ssize_t write_text(const char *path, char *buf, ssize_t len) { int fd; fd = open(path, O_WRONLY | O_APPEND); if (fd < 0) - return fd; + return -errno; len = write(fd, buf, len); - if (len < 0) { - close(fd); - return len; - } - close(fd); - - return len; + return len < 0 ? -errno : len; } char *cg_name(const char *root, const char *name) @@ -85,16 +80,16 @@ char *cg_control(const char *cgroup, const char *control) return ret; } +/* Returns 0 on success, or -errno on failure. */ int cg_read(const char *cgroup, const char *control, char *buf, size_t len) { char path[PATH_MAX]; + ssize_t ret; snprintf(path, sizeof(path), "%s/%s", cgroup, control); - if (read_text(path, buf, len) >= 0) - return 0; - - return -1; + ret = read_text(path, buf, len); + return ret >= 0 ? 0 : ret; } int cg_read_strcmp(const char *cgroup, const char *control, @@ -175,17 +170,15 @@ long cg_read_lc(const char *cgroup, const char *control) return cnt; } +/* Returns 0 on success, or -errno on failure. */ int cg_write(const char *cgroup, const char *control, char *buf) { char path[PATH_MAX]; - ssize_t len = strlen(buf); + ssize_t len = strlen(buf), ret; snprintf(path, sizeof(path), "%s/%s", cgroup, control); - - if (write_text(path, buf, len) == len) - return 0; - - return -1; + ret = write_text(path, buf, len); + return ret == len ? 0 : ret; } int cg_find_unified_root(char *root, size_t len) @@ -219,7 +212,7 @@ int cg_find_unified_root(char *root, size_t len) int cg_create(const char *cgroup) { - return mkdir(cgroup, 0644); + return mkdir(cgroup, 0755); } int cg_wait_for_proc_count(const char *cgroup, int count) @@ -337,13 +330,13 @@ pid_t clone_into_cgroup(int cgroup_fd) #ifdef CLONE_ARGS_SIZE_VER2 pid_t pid; - struct clone_args args = { + struct __clone_args args = { .flags = CLONE_INTO_CGROUP, .exit_signal = SIGCHLD, .cgroup = cgroup_fd, }; - pid = sys_clone3(&args, sizeof(struct clone_args)); + pid = sys_clone3(&args, sizeof(struct __clone_args)); /* * Verify that this is a genuine test failure: * ENOSYS -> clone3() not available @@ -539,7 +532,8 @@ ssize_t proc_read_text(int pid, bool thread, const char *item, char *buf, size_t else snprintf(path, sizeof(path), "/proc/%d/%s", pid, item); - return read_text(path, buf, size); + size = read_text(path, buf, size); + return size < 0 ? -1 : size; } int proc_read_strstr(int pid, bool thread, const char *item, const char *needle) diff --git a/tools/testing/selftests/cgroup/test_core.c b/tools/testing/selftests/cgroup/test_core.c index 3df648c378765653348791d7f4e53d20b6ee3eb5..60012350306317df8de1261fd93763d191f322db 100644 --- a/tools/testing/selftests/cgroup/test_core.c +++ b/tools/testing/selftests/cgroup/test_core.c @@ -1,11 +1,14 @@ /* SPDX-License-Identifier: GPL-2.0 */ +#define _GNU_SOURCE #include +#include #include #include #include #include #include +#include #include #include #include @@ -674,6 +677,166 @@ static int test_cgcore_thread_migration(const char *root) return ret; } +/* + * cgroup migration permission check should be performed based on the + * credentials at the time of open instead of write. + */ +static int test_cgcore_lesser_euid_open(const char *root) +{ + const uid_t test_euid = 65534; /* usually nobody, any !root is fine */ + int ret = KSFT_FAIL; + char *cg_test_a = NULL, *cg_test_b = NULL; + char *cg_test_a_procs = NULL, *cg_test_b_procs = NULL; + int cg_test_b_procs_fd = -1; + uid_t saved_uid; + + cg_test_a = cg_name(root, "cg_test_a"); + cg_test_b = cg_name(root, "cg_test_b"); + + if (!cg_test_a || !cg_test_b) + goto cleanup; + + cg_test_a_procs = cg_name(cg_test_a, "cgroup.procs"); + cg_test_b_procs = cg_name(cg_test_b, "cgroup.procs"); + + if (!cg_test_a_procs || !cg_test_b_procs) + goto cleanup; + + if (cg_create(cg_test_a) || cg_create(cg_test_b)) + goto cleanup; + + if (cg_enter_current(cg_test_a)) + goto cleanup; + + if (chown(cg_test_a_procs, test_euid, -1) || + chown(cg_test_b_procs, test_euid, -1)) + goto cleanup; + + saved_uid = geteuid(); + if (seteuid(test_euid)) + goto cleanup; + + cg_test_b_procs_fd = open(cg_test_b_procs, O_RDWR); + + if (seteuid(saved_uid)) + goto cleanup; + + if (cg_test_b_procs_fd < 0) + goto cleanup; + + if (write(cg_test_b_procs_fd, "0", 1) >= 0 || errno != EACCES) + goto cleanup; + + ret = KSFT_PASS; + +cleanup: + cg_enter_current(root); + if (cg_test_b_procs_fd >= 0) + close(cg_test_b_procs_fd); + if (cg_test_b) + cg_destroy(cg_test_b); + if (cg_test_a) + cg_destroy(cg_test_a); + free(cg_test_b_procs); + free(cg_test_a_procs); + free(cg_test_b); + free(cg_test_a); + return ret; +} + +struct lesser_ns_open_thread_arg { + const char *path; + int fd; + int err; +}; + +static int lesser_ns_open_thread_fn(void *arg) +{ + struct lesser_ns_open_thread_arg *targ = arg; + + targ->fd = open(targ->path, O_RDWR); + targ->err = errno; + return 0; +} + +/* + * cgroup migration permission check should be performed based on the cgroup + * namespace at the time of open instead of write. + */ +static int test_cgcore_lesser_ns_open(const char *root) +{ + static char stack[65536]; + const uid_t test_euid = 65534; /* usually nobody, any !root is fine */ + int ret = KSFT_FAIL; + char *cg_test_a = NULL, *cg_test_b = NULL; + char *cg_test_a_procs = NULL, *cg_test_b_procs = NULL; + int cg_test_b_procs_fd = -1; + struct lesser_ns_open_thread_arg targ = { .fd = -1 }; + pid_t pid; + int status; + + cg_test_a = cg_name(root, "cg_test_a"); + cg_test_b = cg_name(root, "cg_test_b"); + + if (!cg_test_a || !cg_test_b) + goto cleanup; + + cg_test_a_procs = cg_name(cg_test_a, "cgroup.procs"); + cg_test_b_procs = cg_name(cg_test_b, "cgroup.procs"); + + if (!cg_test_a_procs || !cg_test_b_procs) + goto cleanup; + + if (cg_create(cg_test_a) || cg_create(cg_test_b)) + goto cleanup; + + if (cg_enter_current(cg_test_b)) + goto cleanup; + + if (chown(cg_test_a_procs, test_euid, -1) || + chown(cg_test_b_procs, test_euid, -1)) + goto cleanup; + + targ.path = cg_test_b_procs; + pid = clone(lesser_ns_open_thread_fn, stack + sizeof(stack), + CLONE_NEWCGROUP | CLONE_FILES | CLONE_VM | SIGCHLD, + &targ); + if (pid < 0) + goto cleanup; + + if (waitpid(pid, &status, 0) < 0) + goto cleanup; + + if (!WIFEXITED(status)) + goto cleanup; + + cg_test_b_procs_fd = targ.fd; + if (cg_test_b_procs_fd < 0) + goto cleanup; + + if (cg_enter_current(cg_test_a)) + goto cleanup; + + if ((status = write(cg_test_b_procs_fd, "0", 1)) >= 0 || errno != ENOENT) + goto cleanup; + + ret = KSFT_PASS; + +cleanup: + cg_enter_current(root); + if (cg_test_b_procs_fd >= 0) + close(cg_test_b_procs_fd); + if (cg_test_b) + cg_destroy(cg_test_b); + if (cg_test_a) + cg_destroy(cg_test_a); + free(cg_test_b_procs); + free(cg_test_a_procs); + free(cg_test_b); + free(cg_test_a); + return ret; +} + #define T(x) { x, #x } struct corecg_test { int (*fn)(const char *root); @@ -689,6 +852,8 @@ struct corecg_test { T(test_cgcore_proc_migration), T(test_cgcore_thread_migration), T(test_cgcore_destroy), + T(test_cgcore_lesser_euid_open), + T(test_cgcore_lesser_ns_open), }; #undef T diff --git a/tools/testing/selftests/cgroup/test_memcontrol.c b/tools/testing/selftests/cgroup/test_memcontrol.c index c19a97dd02d4968fd25d1ede46d507fd23a19108..94e16e383bcf879ea73bb257cbfa67466a8b5e52 100644 --- a/tools/testing/selftests/cgroup/test_memcontrol.c +++ b/tools/testing/selftests/cgroup/test_memcontrol.c @@ -210,13 +210,17 @@ static int alloc_pagecache_50M_noexit(const char *cgroup, void *arg) static int alloc_anon_noexit(const char *cgroup, void *arg) { int ppid = getppid(); + size_t size = (unsigned long)arg; + char *buf, *ptr; - if (alloc_anon(cgroup, arg)) - return -1; + buf = malloc(size); + for (ptr = buf; ptr < buf + size; ptr += PAGE_SIZE) + *ptr = 0; while (getppid() == ppid) sleep(1); + free(buf); return 0; } @@ -679,6 +683,111 @@ static int test_memcg_max(const char *root) return ret; } +/* + * This test checks that memory.reclaim reclaims the given + * amount of memory (from both anon and file, if possible). + */ +static int test_memcg_reclaim(const char *root) +{ + int ret = KSFT_FAIL, fd, retries; + char *memcg; + long current, expected_usage, to_reclaim; + char buf[64]; + + memcg = cg_name(root, "memcg_test"); + if (!memcg) + goto cleanup; + + if (cg_create(memcg)) + goto cleanup; + + current = cg_read_long(memcg, "memory.current"); + if (current != 0) + goto cleanup; + + fd = get_temp_fd(); + if (fd < 0) + goto cleanup; + + cg_run_nowait(memcg, alloc_pagecache_50M_noexit, (void *)(long)fd); + + /* + * If swap is enabled, try to reclaim from both anon and file, else try + * to reclaim from file only. + */ + if (is_swap_enabled()) { + cg_run_nowait(memcg, alloc_anon_noexit, (void *) MB(50)); + expected_usage = MB(100); + } else + expected_usage = MB(50); + + /* + * Wait until current usage reaches the expected usage (or we run out of + * retries). + */ + retries = 5; + while (!values_close(cg_read_long(memcg, "memory.current"), + expected_usage, 10)) { + if (retries--) { + sleep(1); + continue; + } else { + fprintf(stderr, + "failed to allocate %ld for memcg reclaim test\n", + expected_usage); + goto cleanup; + } + } + + /* + * Reclaim until current reaches 30M, this makes sure we hit both anon + * and file if swap is enabled. + */ + retries = 5; + while (true) { + int err; + + current = cg_read_long(memcg, "memory.current"); + to_reclaim = current - MB(30); + + /* + * We only keep looping if we get EAGAIN, which means we could + * not reclaim the full amount. + */ + if (to_reclaim <= 0) + goto cleanup; + + + snprintf(buf, sizeof(buf), "%ld", to_reclaim); + err = cg_write(memcg, "memory.reclaim", buf); + if (!err) { + /* + * If writing succeeds, then the written amount should have been + * fully reclaimed (and maybe more). + */ + current = cg_read_long(memcg, "memory.current"); + if (!values_close(current, MB(30), 3) && current > MB(30)) + goto cleanup; + break; + } + + /* The kernel could not reclaim the full amount, try again. */ + if (err == -EAGAIN && retries--) + continue; + + /* We got an unexpected error or ran out of retries. */ + goto cleanup; + } + + ret = KSFT_PASS; +cleanup: + cg_destroy(memcg); + free(memcg); + close(fd); + + return ret; +} + static int alloc_anon_50M_check_swap(const char *cgroup, void *arg) { long mem_max = (long)arg; @@ -1181,6 +1290,7 @@ struct memcg_test { T(test_memcg_low), T(test_memcg_high), T(test_memcg_max), + T(test_memcg_reclaim), T(test_memcg_oom_events), T(test_memcg_swap_max), T(test_memcg_sock), diff --git a/tools/testing/selftests/clone3/clone3.c b/tools/testing/selftests/clone3/clone3.c index 42be3b9258301e87823edfe7284142fca5d49158..cd4582129c7d69c1e45b6a3acbd7a5f2c6c07e67 100644 --- a/tools/testing/selftests/clone3/clone3.c +++ b/tools/testing/selftests/clone3/clone3.c @@ -52,6 +52,12 @@ static int call_clone3(uint64_t flags, size_t size, enum test_mode test_mode) size = sizeof(struct __clone_args); switch (test_mode) { + case CLONE3_ARGS_NO_TEST: + /* + * Uses default 'flags' and 'SIGCHLD' + * assignment. + */ + break; case CLONE3_ARGS_ALL_0: args.flags = 0; args.exit_signal = 0; @@ -120,8 +126,6 @@ static void test_clone3(uint64_t flags, size_t size, int expected, int main(int argc, char *argv[]) { - pid_t pid; - uid_t uid = getuid(); ksft_print_header(); diff --git a/tools/testing/selftests/drivers/net/mlxsw/tc_police_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/tc_police_scale.sh index 3e3e06ea5703cd93e73f619f4c39ffe16115eb80..86e787895f78b19300677529884f01e6eeb65cab 100644 --- a/tools/testing/selftests/drivers/net/mlxsw/tc_police_scale.sh +++ b/tools/testing/selftests/drivers/net/mlxsw/tc_police_scale.sh @@ -60,7 +60,8 @@ __tc_police_test() tc_police_rules_create $count $should_fail - offload_count=$(tc filter show dev $swp1 ingress | grep in_hw | wc -l) + offload_count=$(tc -j filter show dev $swp1 ingress | + jq "[.[] | select(.options.in_hw == true)] | length") ((offload_count == count)) check_err_fail $should_fail $? "tc police offload count" } diff --git a/tools/testing/selftests/exec/Makefile b/tools/testing/selftests/exec/Makefile index dd61118df66edbcde89f92a2626d2e9186122fda..2d7fca446c7f7b418f1be5580ce42f1c205978f2 100644 --- a/tools/testing/selftests/exec/Makefile +++ b/tools/testing/selftests/exec/Makefile @@ -3,9 +3,9 @@ CFLAGS = -Wall CFLAGS += -Wno-nonnull CFLAGS += -D_GNU_SOURCE -TEST_PROGS := binfmt_script non-regular -TEST_GEN_PROGS := execveat load_address_4096 load_address_2097152 load_address_16777216 -TEST_GEN_FILES := execveat.symlink execveat.denatured script subdir pipe +TEST_PROGS := binfmt_script +TEST_GEN_PROGS := execveat load_address_4096 load_address_2097152 load_address_16777216 non-regular +TEST_GEN_FILES := execveat.symlink execveat.denatured script subdir # Makefile is a run-time dependency, since it's accessed by the execveat test TEST_FILES := Makefile diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/profile.tc b/tools/testing/selftests/ftrace/test.d/kprobe/profile.tc index 98166fa3eb91ccbbef854d9ad8c5c5a605b594f9..34fb89b0c61fa35bcc55c81ee2cbdd8f274af492 100644 --- a/tools/testing/selftests/ftrace/test.d/kprobe/profile.tc +++ b/tools/testing/selftests/ftrace/test.d/kprobe/profile.tc @@ -1,6 +1,6 @@ #!/bin/sh # SPDX-License-Identifier: GPL-2.0 -# description: Kprobe dynamic event - adding and removing +# description: Kprobe profile # requires: kprobe_events ! grep -q 'myevent' kprobe_profile diff --git a/tools/testing/selftests/futex/Makefile b/tools/testing/selftests/futex/Makefile index 12631f0076a10274dff566228b05748e9a217be0..11e157d7533b8fbaf5ac691b3df7057e0ff558c1 100644 --- a/tools/testing/selftests/futex/Makefile +++ b/tools/testing/selftests/futex/Makefile @@ -11,7 +11,7 @@ all: @for DIR in $(SUBDIRS); do \ BUILD_TARGET=$(OUTPUT)/$$DIR; \ mkdir $$BUILD_TARGET -p; \ - make OUTPUT=$$BUILD_TARGET -C $$DIR $@;\ + $(MAKE) OUTPUT=$$BUILD_TARGET -C $$DIR $@;\ if [ -e $$DIR/$(TEST_PROGS) ]; then \ rsync -a $$DIR/$(TEST_PROGS) $$BUILD_TARGET/; \ fi \ @@ -32,6 +32,6 @@ override define CLEAN @for DIR in $(SUBDIRS); do \ BUILD_TARGET=$(OUTPUT)/$$DIR; \ mkdir $$BUILD_TARGET -p; \ - make OUTPUT=$$BUILD_TARGET -C $$DIR $@;\ + $(MAKE) OUTPUT=$$BUILD_TARGET -C $$DIR $@;\ done endef diff --git a/tools/testing/selftests/kselftest_harness.h b/tools/testing/selftests/kselftest_harness.h index edce85420d1934e3892f5ffe1d5c6d06c0df593f..3e7b2e521cde435333b0c1e5fafbc77c0f2f7dc1 100644 --- a/tools/testing/selftests/kselftest_harness.h +++ b/tools/testing/selftests/kselftest_harness.h @@ -871,7 +871,8 @@ static void __timeout_handler(int sig, siginfo_t *info, void *ucontext) } t->timed_out = true; - kill(t->pid, SIGKILL); + // signal process group + kill(-(t->pid), SIGKILL); } void __wait_for_test(struct __test_metadata *t) @@ -965,7 +966,7 @@ void __run_test(struct __fixture_metadata *f, t->passed = 1; t->skip = 0; t->trigger = 0; - t->step = 0; + t->step = 1; t->no_print = 0; memset(t->results->reason, 0, sizeof(t->results->reason)); @@ -981,6 +982,7 @@ void __run_test(struct __fixture_metadata *f, ksft_print_msg("ERROR SPAWNING TEST CHILD\n"); t->passed = 0; } else if (t->pid == 0) { + setpgrp(); t->fn(t, variant); if (t->skip) _exit(255); diff --git a/tools/testing/selftests/memfd/memfd_test.c b/tools/testing/selftests/memfd/memfd_test.c index 334a7eea200428b07d8964555783f9959d028aa0..fba322d1c67a1714d74e84d545087cc0aec955c6 100644 --- a/tools/testing/selftests/memfd/memfd_test.c +++ b/tools/testing/selftests/memfd/memfd_test.c @@ -455,6 +455,7 @@ static void mfd_fail_write(int fd) printf("mmap()+mprotect() didn't fail as expected\n"); abort(); } + munmap(p, mfd_def_size); } /* verify PUNCH_HOLE fails */ diff --git a/tools/testing/selftests/mincore/mincore_selftest.c b/tools/testing/selftests/mincore/mincore_selftest.c index 5a1e85ff5d32a4a54fe5578e655692c15750f454..2cf6f2f277ab8a22951e57f906a4880f5f236480 100644 --- a/tools/testing/selftests/mincore/mincore_selftest.c +++ b/tools/testing/selftests/mincore/mincore_selftest.c @@ -208,15 +208,21 @@ TEST(check_file_mmap) errno = 0; fd = open(".", O_TMPFILE | O_RDWR, 0600); - ASSERT_NE(-1, fd) { - TH_LOG("Can't create temporary file: %s", - strerror(errno)); + if (fd < 0) { + ASSERT_EQ(errno, EOPNOTSUPP) { + TH_LOG("Can't create temporary file: %s", + strerror(errno)); + } + SKIP(goto out_free, "O_TMPFILE not supported by filesystem."); } errno = 0; retval = fallocate(fd, 0, 0, FILE_SIZE); - ASSERT_EQ(0, retval) { - TH_LOG("Error allocating space for the temporary file: %s", - strerror(errno)); + if (retval) { + ASSERT_EQ(errno, EOPNOTSUPP) { + TH_LOG("Error allocating space for the temporary file: %s", + strerror(errno)); + } + SKIP(goto out_close, "fallocate not supported by filesystem."); } /* @@ -272,7 +278,9 @@ TEST(check_file_mmap) } munmap(addr, FILE_SIZE); +out_close: close(fd); +out_free: free(vec); } diff --git a/tools/testing/selftests/mqueue/mq_perf_tests.c b/tools/testing/selftests/mqueue/mq_perf_tests.c index b019e0b8221c7c0bf565d163e141d9e69d37014f..84fda3b490735faa7d3daeb05e58deca9f865f5f 100644 --- a/tools/testing/selftests/mqueue/mq_perf_tests.c +++ b/tools/testing/selftests/mqueue/mq_perf_tests.c @@ -180,6 +180,9 @@ void shutdown(int exit_val, char *err_cause, int line_no) if (in_shutdown++) return; + /* Free the cpu_set allocated using CPU_ALLOC in main function */ + CPU_FREE(cpu_set); + for (i = 0; i < num_cpus_to_pin; i++) if (cpu_threads[i]) { pthread_kill(cpu_threads[i], SIGUSR1); @@ -551,6 +554,12 @@ int main(int argc, char *argv[]) perror("sysconf(_SC_NPROCESSORS_ONLN)"); exit(1); } + + if (getuid() != 0) + ksft_exit_skip("Not running as root, but almost all tests " + "require root in order to modify\nsystem settings. " + "Exiting.\n"); + cpus_online = min(MAX_CPUS, sysconf(_SC_NPROCESSORS_ONLN)); cpu_set = CPU_ALLOC(cpus_online); if (cpu_set == NULL) { @@ -589,7 +598,7 @@ int main(int argc, char *argv[]) cpu_set)) { fprintf(stderr, "Any given CPU may " "only be given once.\n"); - exit(1); + goto err_code; } else CPU_SET_S(cpus_to_pin[cpu], cpu_set_size, cpu_set); @@ -607,7 +616,7 @@ int main(int argc, char *argv[]) queue_path = malloc(strlen(option) + 2); if (!queue_path) { perror("malloc()"); - exit(1); + goto err_code; } queue_path[0] = '/'; queue_path[1] = 0; @@ -622,17 +631,12 @@ int main(int argc, char *argv[]) fprintf(stderr, "Must pass at least one CPU to continuous " "mode.\n"); poptPrintUsage(popt_context, stderr, 0); - exit(1); + goto err_code; } else if (!continuous_mode) { num_cpus_to_pin = 1; cpus_to_pin[0] = cpus_online - 1; } - if (getuid() != 0) - ksft_exit_skip("Not running as root, but almost all tests " - "require root in order to modify\nsystem settings. " - "Exiting.\n"); - max_msgs = fopen(MAX_MSGS, "r+"); max_msgsize = fopen(MAX_MSGSIZE, "r+"); if (!max_msgs) @@ -740,4 +744,9 @@ int main(int argc, char *argv[]) sleep(1); } shutdown(0, "", 0); + +err_code: + CPU_FREE(cpu_set); + exit(1); + } diff --git a/tools/testing/selftests/net/pmtu.sh b/tools/testing/selftests/net/pmtu.sh index 3367fb5f2feff5bd161d3512362705e20d6c0395..3253fdc780d62909b659229b37b237a2893fa4d6 100755 --- a/tools/testing/selftests/net/pmtu.sh +++ b/tools/testing/selftests/net/pmtu.sh @@ -799,7 +799,6 @@ setup_ovs_bridge() { setup() { [ "$(id -u)" -ne 0 ] && echo " need to run as root" && return $ksft_skip - cleanup for arg do eval setup_${arg} || { echo " ${arg} not supported"; return 1; } done @@ -810,7 +809,7 @@ trace() { for arg do [ "${ns_cmd}" = "" ] && ns_cmd="${arg}" && continue - ${ns_cmd} tcpdump -s 0 -i "${arg}" -w "${name}_${arg}.pcap" 2> /dev/null & + ${ns_cmd} tcpdump --immediate-mode -s 0 -i "${arg}" -w "${name}_${arg}.pcap" 2> /dev/null & tcpdump_pids="${tcpdump_pids} $!" ns_cmd= done @@ -1636,6 +1635,10 @@ run_test() { unset IFS + # Since cleanup() relies on variables modified by this subshell, it + # has to run in this context. + trap cleanup EXIT + if [ "$VERBOSE" = "1" ]; then printf "\n##########################################################################\n\n" fi diff --git a/tools/testing/selftests/net/test_vxlan_under_vrf.sh b/tools/testing/selftests/net/test_vxlan_under_vrf.sh index 09f9ed92cbe4c8b6e1837698aa273b2f7848145b..a44b9aca7427234346b58a2477324e3bcddfb684 100755 --- a/tools/testing/selftests/net/test_vxlan_under_vrf.sh +++ b/tools/testing/selftests/net/test_vxlan_under_vrf.sh @@ -118,11 +118,11 @@ echo "[ OK ]" # Move the underlay to a non-default VRF ip -netns hv-1 link set veth0 vrf vrf-underlay -ip -netns hv-1 link set veth0 down -ip -netns hv-1 link set veth0 up +ip -netns hv-1 link set vxlan0 down +ip -netns hv-1 link set vxlan0 up ip -netns hv-2 link set veth0 vrf vrf-underlay -ip -netns hv-2 link set veth0 down -ip -netns hv-2 link set veth0 up +ip -netns hv-2 link set vxlan0 down +ip -netns hv-2 link set vxlan0 up echo -n "Check VM connectivity through VXLAN (underlay in a VRF) " ip netns exec vm-1 ping -c 1 -W 1 10.0.0.2 &> /dev/null || (echo "[FAIL]"; false) diff --git a/tools/testing/selftests/net/tls.c b/tools/testing/selftests/net/tls.c index b599f1fa99b55aba5d359163f35a9fc32713e5ee..3d39a735443121ced08ae5fe327326556909831e 100644 --- a/tools/testing/selftests/net/tls.c +++ b/tools/testing/selftests/net/tls.c @@ -25,26 +25,80 @@ #define TLS_PAYLOAD_MAX_LEN 16384 #define SOL_TLS 282 -FIXTURE(tls_basic) -{ - int fd, cfd; - bool notls; +struct tls_crypto_info_keys { + union { + struct tls12_crypto_info_aes_gcm_128 aes128; + struct tls12_crypto_info_sm4_gcm sm4gcm; + struct tls12_crypto_info_sm4_ccm sm4ccm; + struct tls12_crypto_info_aes_ccm_128 aesccm128; + struct tls12_crypto_info_aes_gcm_256 aesgcm256; + }; + size_t len; }; -FIXTURE_SETUP(tls_basic) +static void tls_crypto_info_init(uint16_t tls_version, uint16_t cipher_type, + struct tls_crypto_info_keys *tls12) +{ + memset(tls12, 0, sizeof(*tls12)); + + switch (cipher_type) { + case TLS_CIPHER_AES_GCM_128: + tls12->len = sizeof(struct tls12_crypto_info_aes_gcm_128); + tls12->aes128.info.version = tls_version; + tls12->aes128.info.cipher_type = cipher_type; + break; + case TLS_CIPHER_SM4_GCM: + tls12->len = sizeof(struct tls12_crypto_info_sm4_gcm); + tls12->sm4gcm.info.version = tls_version; + tls12->sm4gcm.info.cipher_type = cipher_type; + break; + case TLS_CIPHER_SM4_CCM: + tls12->len = sizeof(struct tls12_crypto_info_sm4_ccm); + tls12->sm4ccm.info.version = tls_version; + tls12->sm4ccm.info.cipher_type = cipher_type; + break; + case TLS_CIPHER_AES_CCM_128: + tls12->len = sizeof(struct tls12_crypto_info_aes_ccm_128); + tls12->aesccm128.info.version = tls_version; + tls12->aesccm128.info.cipher_type = cipher_type; + break; + case TLS_CIPHER_AES_GCM_256: + tls12->len = sizeof(struct tls12_crypto_info_aes_gcm_256); + tls12->aesgcm256.info.version = tls_version; + tls12->aesgcm256.info.cipher_type = cipher_type; + break; + default: + break; + } +} + +static void memrnd(void *s, size_t n) +{ + int *dword = s; + char *byte; + + for (; n >= 4; n -= 4) + *dword++ = rand(); + byte = (void *)dword; + while (n--) + *byte++ = rand(); +} + +static void ulp_sock_pair(struct __test_metadata *_metadata, + int *fd, int *cfd, bool *notls) { struct sockaddr_in addr; socklen_t len; int sfd, ret; - self->notls = false; + *notls = false; len = sizeof(addr); addr.sin_family = AF_INET; addr.sin_addr.s_addr = htonl(INADDR_ANY); addr.sin_port = 0; - self->fd = socket(AF_INET, SOCK_STREAM, 0); + *fd = socket(AF_INET, SOCK_STREAM, 0); sfd = socket(AF_INET, SOCK_STREAM, 0); ret = bind(sfd, &addr, sizeof(addr)); @@ -55,26 +109,96 @@ FIXTURE_SETUP(tls_basic) ret = getsockname(sfd, &addr, &len); ASSERT_EQ(ret, 0); - ret = connect(self->fd, &addr, sizeof(addr)); + ret = connect(*fd, &addr, sizeof(addr)); ASSERT_EQ(ret, 0); - self->cfd = accept(sfd, &addr, &len); - ASSERT_GE(self->cfd, 0); + *cfd = accept(sfd, &addr, &len); + ASSERT_GE(*cfd, 0); close(sfd); - ret = setsockopt(self->fd, IPPROTO_TCP, TCP_ULP, "tls", sizeof("tls")); + ret = setsockopt(*fd, IPPROTO_TCP, TCP_ULP, "tls", sizeof("tls")); if (ret != 0) { ASSERT_EQ(errno, ENOENT); - self->notls = true; + *notls = true; printf("Failure setting TCP_ULP, testing without tls\n"); return; } - ret = setsockopt(self->cfd, IPPROTO_TCP, TCP_ULP, "tls", sizeof("tls")); + ret = setsockopt(*cfd, IPPROTO_TCP, TCP_ULP, "tls", sizeof("tls")); ASSERT_EQ(ret, 0); } +/* Produce a basic cmsg */ +static int tls_send_cmsg(int fd, unsigned char record_type, + void *data, size_t len, int flags) +{ + char cbuf[CMSG_SPACE(sizeof(char))]; + int cmsg_len = sizeof(char); + struct cmsghdr *cmsg; + struct msghdr msg; + struct iovec vec; + + vec.iov_base = data; + vec.iov_len = len; + memset(&msg, 0, sizeof(struct msghdr)); + msg.msg_iov = &vec; + msg.msg_iovlen = 1; + msg.msg_control = cbuf; + msg.msg_controllen = sizeof(cbuf); + cmsg = CMSG_FIRSTHDR(&msg); + cmsg->cmsg_level = SOL_TLS; + /* test sending non-record types. */ + cmsg->cmsg_type = TLS_SET_RECORD_TYPE; + cmsg->cmsg_len = CMSG_LEN(cmsg_len); + *CMSG_DATA(cmsg) = record_type; + msg.msg_controllen = cmsg->cmsg_len; + + return sendmsg(fd, &msg, flags); +} + +static int tls_recv_cmsg(struct __test_metadata *_metadata, + int fd, unsigned char record_type, + void *data, size_t len, int flags) +{ + char cbuf[CMSG_SPACE(sizeof(char))]; + struct cmsghdr *cmsg; + unsigned char ctype; + struct msghdr msg; + struct iovec vec; + int n; + + vec.iov_base = data; + vec.iov_len = len; + memset(&msg, 0, sizeof(struct msghdr)); + msg.msg_iov = &vec; + msg.msg_iovlen = 1; + msg.msg_control = cbuf; + msg.msg_controllen = sizeof(cbuf); + + n = recvmsg(fd, &msg, flags); + + cmsg = CMSG_FIRSTHDR(&msg); + EXPECT_NE(cmsg, NULL); + EXPECT_EQ(cmsg->cmsg_level, SOL_TLS); + EXPECT_EQ(cmsg->cmsg_type, TLS_GET_RECORD_TYPE); + ctype = *((unsigned char *)CMSG_DATA(cmsg)); + EXPECT_EQ(ctype, record_type); + + return n; +} + +FIXTURE(tls_basic) +{ + int fd, cfd; + bool notls; +}; + +FIXTURE_SETUP(tls_basic) +{ + ulp_sock_pair(_metadata, &self->fd, &self->cfd, &self->notls); +} + FIXTURE_TEARDOWN(tls_basic) { close(self->fd); @@ -103,77 +227,76 @@ FIXTURE(tls) FIXTURE_VARIANT(tls) { - unsigned int tls_version; + uint16_t tls_version; + uint16_t cipher_type; }; -FIXTURE_VARIANT_ADD(tls, 12) +FIXTURE_VARIANT_ADD(tls, 12_aes_gcm) { .tls_version = TLS_1_2_VERSION, + .cipher_type = TLS_CIPHER_AES_GCM_128, }; -FIXTURE_VARIANT_ADD(tls, 13) +FIXTURE_VARIANT_ADD(tls, 13_aes_gcm) { .tls_version = TLS_1_3_VERSION, + .cipher_type = TLS_CIPHER_AES_GCM_128, }; -FIXTURE_SETUP(tls) +FIXTURE_VARIANT_ADD(tls, 13_sm4_gcm) { - struct tls12_crypto_info_aes_gcm_128 tls12; - struct sockaddr_in addr; - socklen_t len; - int sfd, ret; - - self->notls = false; - len = sizeof(addr); - - memset(&tls12, 0, sizeof(tls12)); - tls12.info.version = variant->tls_version; - tls12.info.cipher_type = TLS_CIPHER_AES_GCM_128; + .tls_version = TLS_1_3_VERSION, + .cipher_type = TLS_CIPHER_SM4_GCM, +}; - addr.sin_family = AF_INET; - addr.sin_addr.s_addr = htonl(INADDR_ANY); - addr.sin_port = 0; +FIXTURE_VARIANT_ADD(tls, 13_sm4_ccm) +{ + .tls_version = TLS_1_3_VERSION, + .cipher_type = TLS_CIPHER_SM4_CCM, +}; - self->fd = socket(AF_INET, SOCK_STREAM, 0); - sfd = socket(AF_INET, SOCK_STREAM, 0); +FIXTURE_VARIANT_ADD(tls, 12_aes_ccm) +{ + .tls_version = TLS_1_2_VERSION, + .cipher_type = TLS_CIPHER_AES_CCM_128, +}; - ret = bind(sfd, &addr, sizeof(addr)); - ASSERT_EQ(ret, 0); - ret = listen(sfd, 10); - ASSERT_EQ(ret, 0); +FIXTURE_VARIANT_ADD(tls, 13_aes_ccm) +{ + .tls_version = TLS_1_3_VERSION, + .cipher_type = TLS_CIPHER_AES_CCM_128, +}; - ret = getsockname(sfd, &addr, &len); - ASSERT_EQ(ret, 0); +FIXTURE_VARIANT_ADD(tls, 12_aes_gcm_256) +{ + .tls_version = TLS_1_2_VERSION, + .cipher_type = TLS_CIPHER_AES_GCM_256, +}; - ret = connect(self->fd, &addr, sizeof(addr)); - ASSERT_EQ(ret, 0); +FIXTURE_VARIANT_ADD(tls, 13_aes_gcm_256) +{ + .tls_version = TLS_1_3_VERSION, + .cipher_type = TLS_CIPHER_AES_GCM_256, +}; - ret = setsockopt(self->fd, IPPROTO_TCP, TCP_ULP, "tls", sizeof("tls")); - if (ret != 0) { - self->notls = true; - printf("Failure setting TCP_ULP, testing without tls\n"); - } +FIXTURE_SETUP(tls) +{ + struct tls_crypto_info_keys tls12; + int ret; - if (!self->notls) { - ret = setsockopt(self->fd, SOL_TLS, TLS_TX, &tls12, - sizeof(tls12)); - ASSERT_EQ(ret, 0); - } + tls_crypto_info_init(variant->tls_version, variant->cipher_type, + &tls12); - self->cfd = accept(sfd, &addr, &len); - ASSERT_GE(self->cfd, 0); + ulp_sock_pair(_metadata, &self->fd, &self->cfd, &self->notls); - if (!self->notls) { - ret = setsockopt(self->cfd, IPPROTO_TCP, TCP_ULP, "tls", - sizeof("tls")); - ASSERT_EQ(ret, 0); + if (self->notls) + return; - ret = setsockopt(self->cfd, SOL_TLS, TLS_RX, &tls12, - sizeof(tls12)); - ASSERT_EQ(ret, 0); - } + ret = setsockopt(self->fd, SOL_TLS, TLS_TX, &tls12, tls12.len); + ASSERT_EQ(ret, 0); - close(sfd); + ret = setsockopt(self->cfd, SOL_TLS, TLS_RX, &tls12, tls12.len); + ASSERT_EQ(ret, 0); } FIXTURE_TEARDOWN(tls) @@ -277,6 +400,8 @@ TEST_F(tls, recv_max) char recv_mem[TLS_PAYLOAD_MAX_LEN]; char buf[TLS_PAYLOAD_MAX_LEN]; + memrnd(buf, sizeof(buf)); + EXPECT_GE(send(self->fd, buf, send_len, 0), 0); EXPECT_NE(recv(self->cfd, recv_mem, send_len, 0), -1); EXPECT_EQ(memcmp(buf, recv_mem, send_len), 0); @@ -387,8 +512,9 @@ TEST_F(tls, sendmsg_large) EXPECT_EQ(sendmsg(self->cfd, &msg, 0), send_len); } - while (recvs++ < sends) + while (recvs++ < sends) { EXPECT_NE(recv(self->fd, mem, send_len, 0), -1); + } free(mem); } @@ -531,6 +657,101 @@ TEST_F(tls, splice_to_pipe) EXPECT_EQ(memcmp(mem_send, mem_recv, send_len), 0); } +TEST_F(tls, splice_cmsg_to_pipe) +{ + char *test_str = "test_read"; + char record_type = 100; + int send_len = 10; + char buf[10]; + int p[2]; + + if (self->notls) + SKIP(return, "no TLS support"); + + ASSERT_GE(pipe(p), 0); + EXPECT_EQ(tls_send_cmsg(self->fd, 100, test_str, send_len, 0), 10); + EXPECT_EQ(splice(self->cfd, NULL, p[1], NULL, send_len, 0), -1); + EXPECT_EQ(errno, EINVAL); + EXPECT_EQ(recv(self->cfd, buf, send_len, 0), -1); + EXPECT_EQ(errno, EIO); + EXPECT_EQ(tls_recv_cmsg(_metadata, self->cfd, record_type, + buf, sizeof(buf), MSG_WAITALL), + send_len); + EXPECT_EQ(memcmp(test_str, buf, send_len), 0); +} + +TEST_F(tls, splice_dec_cmsg_to_pipe) +{ + char *test_str = "test_read"; + char record_type = 100; + int send_len = 10; + char buf[10]; + int p[2]; + + if (self->notls) + SKIP(return, "no TLS support"); + + ASSERT_GE(pipe(p), 0); + EXPECT_EQ(tls_send_cmsg(self->fd, 100, test_str, send_len, 0), 10); + EXPECT_EQ(recv(self->cfd, buf, send_len, 0), -1); + EXPECT_EQ(errno, EIO); + EXPECT_EQ(splice(self->cfd, NULL, p[1], NULL, send_len, 0), -1); + EXPECT_EQ(errno, EINVAL); + EXPECT_EQ(tls_recv_cmsg(_metadata, self->cfd, record_type, + buf, sizeof(buf), MSG_WAITALL), + send_len); + EXPECT_EQ(memcmp(test_str, buf, send_len), 0); +} + +TEST_F(tls, recv_and_splice) +{ + int send_len = TLS_PAYLOAD_MAX_LEN; + char mem_send[TLS_PAYLOAD_MAX_LEN]; + char mem_recv[TLS_PAYLOAD_MAX_LEN]; + int half = send_len / 2; + int p[2]; + + ASSERT_GE(pipe(p), 0); + EXPECT_EQ(send(self->fd, mem_send, send_len, 0), send_len); + /* Recv hald of the record, splice the other half */ + EXPECT_EQ(recv(self->cfd, mem_recv, half, MSG_WAITALL), half); + EXPECT_EQ(splice(self->cfd, NULL, p[1], NULL, half, SPLICE_F_NONBLOCK), + half); + EXPECT_EQ(read(p[0], &mem_recv[half], half), half); + EXPECT_EQ(memcmp(mem_send, mem_recv, send_len), 0); +} + +TEST_F(tls, peek_and_splice) +{ + int send_len = TLS_PAYLOAD_MAX_LEN; + char mem_send[TLS_PAYLOAD_MAX_LEN]; + char mem_recv[TLS_PAYLOAD_MAX_LEN]; + int chunk = TLS_PAYLOAD_MAX_LEN / 4; + int n, i, p[2]; + + memrnd(mem_send, sizeof(mem_send)); + + ASSERT_GE(pipe(p), 0); + for (i = 0; i < 4; i++) + EXPECT_EQ(send(self->fd, &mem_send[chunk * i], chunk, 0), + chunk); + + EXPECT_EQ(recv(self->cfd, mem_recv, chunk * 5 / 2, + MSG_WAITALL | MSG_PEEK), + chunk * 5 / 2); + EXPECT_EQ(memcmp(mem_send, mem_recv, chunk * 5 / 2), 0); + + n = 0; + while (n < send_len) { + i = splice(self->cfd, NULL, p[1], NULL, send_len - n, 0); + EXPECT_GT(i, 0); + n += i; + } + EXPECT_EQ(n, send_len); + EXPECT_EQ(read(p[0], mem_recv, send_len), send_len); + EXPECT_EQ(memcmp(mem_send, mem_recv, send_len), 0); +} + TEST_F(tls, recvmsg_single) { char const *test_str = "test_recvmsg_single"; @@ -557,6 +778,8 @@ TEST_F(tls, recvmsg_single_max) struct iovec vec; struct msghdr hdr; + memrnd(send_mem, sizeof(send_mem)); + EXPECT_EQ(send(self->fd, send_mem, send_len, 0), send_len); vec.iov_base = (char *)recv_mem; vec.iov_len = TLS_PAYLOAD_MAX_LEN; @@ -579,6 +802,8 @@ TEST_F(tls, recvmsg_multiple) struct msghdr hdr; int i; + memrnd(buf, sizeof(buf)); + EXPECT_EQ(send(self->fd, buf, send_len, 0), send_len); for (i = 0; i < msg_iovlen; i++) { iov_base[i] = (char *)malloc(iov_len); @@ -603,6 +828,8 @@ TEST_F(tls, single_send_multiple_recv) char send_mem[TLS_PAYLOAD_MAX_LEN * 2]; char recv_mem[TLS_PAYLOAD_MAX_LEN * 2]; + memrnd(send_mem, sizeof(send_mem)); + EXPECT_GE(send(self->fd, send_mem, total_len, 0), 0); memset(recv_mem, 0, total_len); @@ -803,18 +1030,17 @@ TEST_F(tls, bidir) int ret; if (!self->notls) { - struct tls12_crypto_info_aes_gcm_128 tls12; + struct tls_crypto_info_keys tls12; - memset(&tls12, 0, sizeof(tls12)); - tls12.info.version = variant->tls_version; - tls12.info.cipher_type = TLS_CIPHER_AES_GCM_128; + tls_crypto_info_init(variant->tls_version, variant->cipher_type, + &tls12); ret = setsockopt(self->fd, SOL_TLS, TLS_RX, &tls12, - sizeof(tls12)); + tls12.len); ASSERT_EQ(ret, 0); ret = setsockopt(self->cfd, SOL_TLS, TLS_TX, &tls12, - sizeof(tls12)); + tls12.len); ASSERT_EQ(ret, 0); } @@ -1109,60 +1335,30 @@ TEST_F(tls, mutliproc_sendpage_writers) TEST_F(tls, control_msg) { - if (self->notls) - return; - - char cbuf[CMSG_SPACE(sizeof(char))]; - char const *test_str = "test_read"; - int cmsg_len = sizeof(char); + char *test_str = "test_read"; char record_type = 100; - struct cmsghdr *cmsg; - struct msghdr msg; int send_len = 10; - struct iovec vec; char buf[10]; - vec.iov_base = (char *)test_str; - vec.iov_len = 10; - memset(&msg, 0, sizeof(struct msghdr)); - msg.msg_iov = &vec; - msg.msg_iovlen = 1; - msg.msg_control = cbuf; - msg.msg_controllen = sizeof(cbuf); - cmsg = CMSG_FIRSTHDR(&msg); - cmsg->cmsg_level = SOL_TLS; - /* test sending non-record types. */ - cmsg->cmsg_type = TLS_SET_RECORD_TYPE; - cmsg->cmsg_len = CMSG_LEN(cmsg_len); - *CMSG_DATA(cmsg) = record_type; - msg.msg_controllen = cmsg->cmsg_len; + if (self->notls) + SKIP(return, "no TLS support"); - EXPECT_EQ(sendmsg(self->fd, &msg, 0), send_len); + EXPECT_EQ(tls_send_cmsg(self->fd, record_type, test_str, send_len, 0), + send_len); /* Should fail because we didn't provide a control message */ EXPECT_EQ(recv(self->cfd, buf, send_len, 0), -1); - vec.iov_base = buf; - EXPECT_EQ(recvmsg(self->cfd, &msg, MSG_WAITALL | MSG_PEEK), send_len); - - cmsg = CMSG_FIRSTHDR(&msg); - EXPECT_NE(cmsg, NULL); - EXPECT_EQ(cmsg->cmsg_level, SOL_TLS); - EXPECT_EQ(cmsg->cmsg_type, TLS_GET_RECORD_TYPE); - record_type = *((unsigned char *)CMSG_DATA(cmsg)); - EXPECT_EQ(record_type, 100); + EXPECT_EQ(tls_recv_cmsg(_metadata, self->cfd, record_type, + buf, sizeof(buf), MSG_WAITALL | MSG_PEEK), + send_len); EXPECT_EQ(memcmp(buf, test_str, send_len), 0); /* Recv the message again without MSG_PEEK */ - record_type = 0; memset(buf, 0, sizeof(buf)); - EXPECT_EQ(recvmsg(self->cfd, &msg, MSG_WAITALL), send_len); - cmsg = CMSG_FIRSTHDR(&msg); - EXPECT_NE(cmsg, NULL); - EXPECT_EQ(cmsg->cmsg_level, SOL_TLS); - EXPECT_EQ(cmsg->cmsg_type, TLS_GET_RECORD_TYPE); - record_type = *((unsigned char *)CMSG_DATA(cmsg)); - EXPECT_EQ(record_type, 100); + EXPECT_EQ(tls_recv_cmsg(_metadata, self->cfd, record_type, + buf, sizeof(buf), MSG_WAITALL), + send_len); EXPECT_EQ(memcmp(buf, test_str, send_len), 0); } @@ -1217,6 +1413,160 @@ TEST_F(tls, shutdown_reuse) EXPECT_EQ(errno, EISCONN); } +FIXTURE(tls_err) +{ + int fd, cfd; + int fd2, cfd2; + bool notls; +}; + +FIXTURE_VARIANT(tls_err) +{ + uint16_t tls_version; +}; + +FIXTURE_VARIANT_ADD(tls_err, 12_aes_gcm) +{ + .tls_version = TLS_1_2_VERSION, +}; + +FIXTURE_VARIANT_ADD(tls_err, 13_aes_gcm) +{ + .tls_version = TLS_1_3_VERSION, +}; + +FIXTURE_SETUP(tls_err) +{ + struct tls_crypto_info_keys tls12; + int ret; + + tls_crypto_info_init(variant->tls_version, TLS_CIPHER_AES_GCM_128, + &tls12); + + ulp_sock_pair(_metadata, &self->fd, &self->cfd, &self->notls); + ulp_sock_pair(_metadata, &self->fd2, &self->cfd2, &self->notls); + if (self->notls) + return; + + ret = setsockopt(self->fd, SOL_TLS, TLS_TX, &tls12, tls12.len); + ASSERT_EQ(ret, 0); + + ret = setsockopt(self->cfd2, SOL_TLS, TLS_RX, &tls12, tls12.len); + ASSERT_EQ(ret, 0); +} + +FIXTURE_TEARDOWN(tls_err) +{ + close(self->fd); + close(self->cfd); + close(self->fd2); + close(self->cfd2); +} + +TEST_F(tls_err, bad_rec) +{ + char buf[64]; + + if (self->notls) + SKIP(return, "no TLS support"); + + memset(buf, 0x55, sizeof(buf)); + EXPECT_EQ(send(self->fd2, buf, sizeof(buf), 0), sizeof(buf)); + EXPECT_EQ(recv(self->cfd2, buf, sizeof(buf), 0), -1); + EXPECT_EQ(errno, EMSGSIZE); + EXPECT_EQ(recv(self->cfd2, buf, sizeof(buf), MSG_DONTWAIT), -1); + EXPECT_EQ(errno, EAGAIN); +} + +TEST_F(tls_err, bad_auth) +{ + char buf[128]; + int n; + + if (self->notls) + SKIP(return, "no TLS support"); + + memrnd(buf, sizeof(buf) / 2); + EXPECT_EQ(send(self->fd, buf, sizeof(buf) / 2, 0), sizeof(buf) / 2); + n = recv(self->cfd, buf, sizeof(buf), 0); + EXPECT_GT(n, sizeof(buf) / 2); + + buf[n - 1]++; + + EXPECT_EQ(send(self->fd2, buf, n, 0), n); + EXPECT_EQ(recv(self->cfd2, buf, sizeof(buf), 0), -1); + EXPECT_EQ(errno, EBADMSG); + EXPECT_EQ(recv(self->cfd2, buf, sizeof(buf), 0), -1); + EXPECT_EQ(errno, EBADMSG); +} + +TEST_F(tls_err, bad_in_large_read) +{ + char txt[3][64]; + char cip[3][128]; + char buf[3 * 128]; + int i, n; + + if (self->notls) + SKIP(return, "no TLS support"); + + /* Put 3 records in the sockets */ + for (i = 0; i < 3; i++) { + memrnd(txt[i], sizeof(txt[i])); + EXPECT_EQ(send(self->fd, txt[i], sizeof(txt[i]), 0), + sizeof(txt[i])); + n = recv(self->cfd, cip[i], sizeof(cip[i]), 0); + EXPECT_GT(n, sizeof(txt[i])); + /* Break the third message */ + if (i == 2) + cip[2][n - 1]++; + EXPECT_EQ(send(self->fd2, cip[i], n, 0), n); + } + + /* We should be able to receive the first two messages */ + EXPECT_EQ(recv(self->cfd2, buf, sizeof(buf), 0), sizeof(txt[0]) * 2); + EXPECT_EQ(memcmp(buf, txt[0], sizeof(txt[0])), 0); + EXPECT_EQ(memcmp(buf + sizeof(txt[0]), txt[1], sizeof(txt[1])), 0); + /* Third mesasge is bad */ + EXPECT_EQ(recv(self->cfd2, buf, sizeof(buf), 0), -1); + EXPECT_EQ(errno, EBADMSG); + EXPECT_EQ(recv(self->cfd2, buf, sizeof(buf), 0), -1); + EXPECT_EQ(errno, EBADMSG); +} + +TEST_F(tls_err, bad_cmsg) +{ + char *test_str = "test_read"; + int send_len = 10; + char cip[128]; + char buf[128]; + char txt[64]; + int n; + + if (self->notls) + SKIP(return, "no TLS support"); + + /* Queue up one data record */ + memrnd(txt, sizeof(txt)); + EXPECT_EQ(send(self->fd, txt, sizeof(txt), 0), sizeof(txt)); + n = recv(self->cfd, cip, sizeof(cip), 0); + EXPECT_GT(n, sizeof(txt)); + EXPECT_EQ(send(self->fd2, cip, n, 0), n); + + EXPECT_EQ(tls_send_cmsg(self->fd, 100, test_str, send_len, 0), 10); + n = recv(self->cfd, cip, sizeof(cip), 0); + cip[n - 1]++; /* Break it */ + EXPECT_GT(n, send_len); + EXPECT_EQ(send(self->fd2, cip, n, 0), n); + + EXPECT_EQ(recv(self->cfd2, buf, sizeof(buf), 0), sizeof(txt)); + EXPECT_EQ(memcmp(buf, txt, sizeof(txt)), 0); + EXPECT_EQ(recv(self->cfd2, buf, sizeof(buf), 0), -1); + EXPECT_EQ(errno, EBADMSG); + EXPECT_EQ(recv(self->cfd2, buf, sizeof(buf), 0), -1); + EXPECT_EQ(errno, EBADMSG); +} + TEST(non_established) { struct tls12_crypto_info_aes_gcm_256 tls12; struct sockaddr_in addr; @@ -1271,64 +1621,82 @@ TEST(non_established) { TEST(keysizes) { struct tls12_crypto_info_aes_gcm_256 tls12; - struct sockaddr_in addr; - int sfd, ret, fd, cfd; - socklen_t len; + int ret, fd, cfd; bool notls; - notls = false; - len = sizeof(addr); - memset(&tls12, 0, sizeof(tls12)); tls12.info.version = TLS_1_2_VERSION; tls12.info.cipher_type = TLS_CIPHER_AES_GCM_256; - addr.sin_family = AF_INET; - addr.sin_addr.s_addr = htonl(INADDR_ANY); - addr.sin_port = 0; + ulp_sock_pair(_metadata, &fd, &cfd, ¬ls); - fd = socket(AF_INET, SOCK_STREAM, 0); - sfd = socket(AF_INET, SOCK_STREAM, 0); + if (!notls) { + ret = setsockopt(fd, SOL_TLS, TLS_TX, &tls12, + sizeof(tls12)); + EXPECT_EQ(ret, 0); + + ret = setsockopt(cfd, SOL_TLS, TLS_RX, &tls12, + sizeof(tls12)); + EXPECT_EQ(ret, 0); + } + + close(fd); + close(cfd); +} + +TEST(tls_v6ops) { + struct tls_crypto_info_keys tls12; + struct sockaddr_in6 addr, addr2; + int sfd, ret, fd; + socklen_t len, len2; + + tls_crypto_info_init(TLS_1_2_VERSION, TLS_CIPHER_AES_GCM_128, &tls12); + + addr.sin6_family = AF_INET6; + addr.sin6_addr = in6addr_any; + addr.sin6_port = 0; + + fd = socket(AF_INET6, SOCK_STREAM, 0); + sfd = socket(AF_INET6, SOCK_STREAM, 0); ret = bind(sfd, &addr, sizeof(addr)); ASSERT_EQ(ret, 0); ret = listen(sfd, 10); ASSERT_EQ(ret, 0); + len = sizeof(addr); ret = getsockname(sfd, &addr, &len); ASSERT_EQ(ret, 0); ret = connect(fd, &addr, sizeof(addr)); ASSERT_EQ(ret, 0); + len = sizeof(addr); + ret = getsockname(fd, &addr, &len); + ASSERT_EQ(ret, 0); + ret = setsockopt(fd, IPPROTO_TCP, TCP_ULP, "tls", sizeof("tls")); - if (ret != 0) { - notls = true; - printf("Failure setting TCP_ULP, testing without tls\n"); + if (ret) { + ASSERT_EQ(errno, ENOENT); + SKIP(return, "no TLS support"); } + ASSERT_EQ(ret, 0); - if (!notls) { - ret = setsockopt(fd, SOL_TLS, TLS_TX, &tls12, - sizeof(tls12)); - EXPECT_EQ(ret, 0); - } + ret = setsockopt(fd, SOL_TLS, TLS_TX, &tls12, tls12.len); + ASSERT_EQ(ret, 0); - cfd = accept(sfd, &addr, &len); - ASSERT_GE(cfd, 0); + ret = setsockopt(fd, SOL_TLS, TLS_RX, &tls12, tls12.len); + ASSERT_EQ(ret, 0); - if (!notls) { - ret = setsockopt(cfd, IPPROTO_TCP, TCP_ULP, "tls", - sizeof("tls")); - EXPECT_EQ(ret, 0); + len2 = sizeof(addr2); + ret = getsockname(fd, &addr2, &len2); + ASSERT_EQ(ret, 0); - ret = setsockopt(cfd, SOL_TLS, TLS_RX, &tls12, - sizeof(tls12)); - EXPECT_EQ(ret, 0); - } + EXPECT_EQ(len2, len); + EXPECT_EQ(memcmp(&addr, &addr2, len), 0); - close(sfd); close(fd); - close(cfd); + close(sfd); } TEST_HARNESS_MAIN diff --git a/tools/testing/selftests/netfilter/nft_concat_range.sh b/tools/testing/selftests/netfilter/nft_concat_range.sh index 5a4938d6dcf25a3f8137be799091f4f0d16ad7fd..b5eef5ffb58e5f8981085e483409c259929546fc 100755 --- a/tools/testing/selftests/netfilter/nft_concat_range.sh +++ b/tools/testing/selftests/netfilter/nft_concat_range.sh @@ -27,7 +27,7 @@ TYPES="net_port port_net net6_port port_proto net6_port_mac net6_port_mac_proto net_port_mac_proto_net" # Reported bugs, also described by TYPE_ variables below -BUGS="flush_remove_add" +BUGS="flush_remove_add reload" # List of possible paths to pktgen script from kernel tree for performance tests PKTGEN_SCRIPT_PATHS=" @@ -337,6 +337,23 @@ TYPE_flush_remove_add=" display Add two elements, flush, re-add " +TYPE_reload=" +display net,mac with reload +type_spec ipv4_addr . ether_addr +chain_spec ip daddr . ether saddr +dst addr4 +src mac +start 1 +count 1 +src_delta 2000 +tools sendip nc bash +proto udp + +race_repeat 0 + +perf_duration 0 +" + # Set template for all tests, types and rules are filled in depending on test set_template=' flush ruleset @@ -1455,6 +1472,59 @@ test_bug_flush_remove_add() { nft flush ruleset } +# - add ranged element, check that packets match it +# - reload the set, check packets still match +test_bug_reload() { + setup veth send_"${proto}" set || return ${KSELFTEST_SKIP} + rstart=${start} + + range_size=1 + for i in $(seq "${start}" $((start + count))); do + end=$((start + range_size)) + + # Avoid negative or zero-sized port ranges + if [ $((end / 65534)) -gt $((start / 65534)) ]; then + start=${end} + end=$((end + 1)) + fi + srcstart=$((start + src_delta)) + srcend=$((end + src_delta)) + + add "$(format)" || return 1 + range_size=$((range_size + 1)) + start=$((end + range_size)) + done + + # check kernel does allocate pcpu sctrach map + # for reload with no elemet add/delete + ( echo flush set inet filter test ; + nft list set inet filter test ) | nft -f - + + start=${rstart} + range_size=1 + + for i in $(seq "${start}" $((start + count))); do + end=$((start + range_size)) + + # Avoid negative or zero-sized port ranges + if [ $((end / 65534)) -gt $((start / 65534)) ]; then + start=${end} + end=$((end + 1)) + fi + srcstart=$((start + src_delta)) + srcend=$((end + src_delta)) + + for j in $(seq ${start} $((range_size / 2 + 1)) ${end}); do + send_match "${j}" $((j + src_delta)) || return 1 + done + + range_size=$((range_size + 1)) + start=$((end + range_size)) + done + + nft flush ruleset +} + test_reported_issues() { eval test_bug_"${subtest}" } @@ -1513,4 +1583,4 @@ for name in ${TESTS}; do done done -[ ${passed} -eq 0 ] && exit ${KSELFTEST_SKIP} +[ ${passed} -eq 0 ] && exit ${KSELFTEST_SKIP} || exit 0 diff --git a/tools/testing/selftests/openat2/Makefile b/tools/testing/selftests/openat2/Makefile index 4b93b1417b8626043f220a0ecae479bd82191165..843ba56d8e49ecf35777214fff8690b4f6c688e7 100644 --- a/tools/testing/selftests/openat2/Makefile +++ b/tools/testing/selftests/openat2/Makefile @@ -5,4 +5,4 @@ TEST_GEN_PROGS := openat2_test resolve_test rename_attack_test include ../lib.mk -$(TEST_GEN_PROGS): helpers.c +$(TEST_GEN_PROGS): helpers.c helpers.h diff --git a/tools/testing/selftests/openat2/helpers.h b/tools/testing/selftests/openat2/helpers.h index a6ea27344db2db4aac7b2e7a676dedb4283a84ab..7056340b9339e9f4159999c2797d15f5ddf6d3fd 100644 --- a/tools/testing/selftests/openat2/helpers.h +++ b/tools/testing/selftests/openat2/helpers.h @@ -9,6 +9,7 @@ #define _GNU_SOURCE #include +#include #include #include #include "../kselftest.h" @@ -62,11 +63,12 @@ bool needs_openat2(const struct open_how *how); (similar to chroot(2)). */ #endif /* RESOLVE_IN_ROOT */ -#define E_func(func, ...) \ - do { \ - if (func(__VA_ARGS__) < 0) \ - ksft_exit_fail_msg("%s:%d %s failed\n", \ - __FILE__, __LINE__, #func);\ +#define E_func(func, ...) \ + do { \ + errno = 0; \ + if (func(__VA_ARGS__) < 0) \ + ksft_exit_fail_msg("%s:%d %s failed - errno:%d\n", \ + __FILE__, __LINE__, #func, errno); \ } while (0) #define E_asprintf(...) E_func(asprintf, __VA_ARGS__) diff --git a/tools/testing/selftests/openat2/openat2_test.c b/tools/testing/selftests/openat2/openat2_test.c index b386367c606b1d9a3fecc4ca359006468e051412..453152b58e7f0098cc28de2bc479fd7631e76496 100644 --- a/tools/testing/selftests/openat2/openat2_test.c +++ b/tools/testing/selftests/openat2/openat2_test.c @@ -244,6 +244,16 @@ void test_openat2_flags(void) unlink(path); fd = sys_openat2(AT_FDCWD, path, &test->how); + if (fd < 0 && fd == -EOPNOTSUPP) { + /* + * Skip the testcase if it failed because not supported + * by FS. (e.g. a valid O_TMPFILE combination on NFS) + */ + ksft_test_result_skip("openat2 with %s fails with %d (%s)\n", + test->name, fd, strerror(-fd)); + goto next; + } + if (test->err >= 0) failed = (fd < 0); else @@ -288,7 +298,7 @@ void test_openat2_flags(void) else resultfn("openat2 with %s fails with %d (%s)\n", test->name, test->err, strerror(-test->err)); - +next: free(fdpath); fflush(stdout); } diff --git a/tools/testing/selftests/pidfd/pidfd.h b/tools/testing/selftests/pidfd/pidfd.h index 01f8d3c0cf2cb844ecc233e5cc1f6b7fa1a42c0f..6922d6417e1cf06094c10627e0bd491d292d6fd2 100644 --- a/tools/testing/selftests/pidfd/pidfd.h +++ b/tools/testing/selftests/pidfd/pidfd.h @@ -68,7 +68,7 @@ #define PIDFD_SKIP 3 #define PIDFD_XFAIL 4 -int wait_for_pid(pid_t pid) +static inline int wait_for_pid(pid_t pid) { int status, ret; @@ -78,13 +78,20 @@ int wait_for_pid(pid_t pid) if (errno == EINTR) goto again; + ksft_print_msg("waitpid returned -1, errno=%d\n", errno); return -1; } - if (!WIFEXITED(status)) + if (!WIFEXITED(status)) { + ksft_print_msg( + "waitpid !WIFEXITED, WIFSIGNALED=%d, WTERMSIG=%d\n", + WIFSIGNALED(status), WTERMSIG(status)); return -1; + } - return WEXITSTATUS(status); + ret = WEXITSTATUS(status); + ksft_print_msg("waitpid WEXITSTATUS=%d\n", ret); + return ret; } static inline int sys_pidfd_open(pid_t pid, unsigned int flags) diff --git a/tools/testing/selftests/pidfd/pidfd_fdinfo_test.c b/tools/testing/selftests/pidfd/pidfd_fdinfo_test.c index 22558524f71c34da86e9e0dacc5a73c523254e72..3fd8e903118f532d43a2ac251ec71531f19ca161 100644 --- a/tools/testing/selftests/pidfd/pidfd_fdinfo_test.c +++ b/tools/testing/selftests/pidfd/pidfd_fdinfo_test.c @@ -12,6 +12,7 @@ #include #include #include +#include #include "pidfd.h" #include "../kselftest.h" @@ -80,7 +81,10 @@ static inline int error_check(struct error *err, const char *test_name) return err->code; } +#define CHILD_STACK_SIZE 8192 + struct child { + char *stack; pid_t pid; int fd; }; @@ -89,17 +93,22 @@ static struct child clone_newns(int (*fn)(void *), void *args, struct error *err) { static int flags = CLONE_PIDFD | CLONE_NEWPID | CLONE_NEWNS | SIGCHLD; - size_t stack_size = 1024; - char *stack[1024] = { 0 }; struct child ret; if (!(flags & CLONE_NEWUSER) && geteuid() != 0) flags |= CLONE_NEWUSER; + ret.stack = mmap(NULL, CHILD_STACK_SIZE, PROT_READ | PROT_WRITE, + MAP_PRIVATE | MAP_ANONYMOUS | MAP_STACK, -1, 0); + if (ret.stack == MAP_FAILED) { + error_set(err, -1, "mmap of stack failed (errno %d)", errno); + return ret; + } + #ifdef __ia64__ - ret.pid = __clone2(fn, stack, stack_size, flags, args, &ret.fd); + ret.pid = __clone2(fn, ret.stack, CHILD_STACK_SIZE, flags, args, &ret.fd); #else - ret.pid = clone(fn, stack + stack_size, flags, args, &ret.fd); + ret.pid = clone(fn, ret.stack + CHILD_STACK_SIZE, flags, args, &ret.fd); #endif if (ret.pid < 0) { @@ -129,6 +138,11 @@ static inline int child_join(struct child *child, struct error *err) else if (r > 0) error_set(err, r, "child %d reported: %d", child->pid, r); + if (munmap(child->stack, CHILD_STACK_SIZE)) { + error_set(err, -1, "munmap of child stack failed (errno %d)", errno); + r = -1; + } + return r; } diff --git a/tools/testing/selftests/pidfd/pidfd_test.c b/tools/testing/selftests/pidfd/pidfd_test.c index 529eb700ac26a39833a4e2c4c25b4caeb57af272..9a2d64901d591f877a7f9db3989df3cb01dce893 100644 --- a/tools/testing/selftests/pidfd/pidfd_test.c +++ b/tools/testing/selftests/pidfd/pidfd_test.c @@ -441,7 +441,6 @@ static void test_pidfd_poll_exec(int use_waitpid) { int pid, pidfd = 0; int status, ret; - pthread_t t1; time_t prog_start = time(NULL); const char *test_name = "pidfd_poll check for premature notification on child thread exec"; @@ -500,13 +499,14 @@ static int child_poll_leader_exit_test(void *args) */ *child_exit_secs = time(NULL); syscall(SYS_exit, 0); + /* Never reached, but appeases compiler thinking we should return. */ + exit(0); } static void test_pidfd_poll_leader_exit(int use_waitpid) { int pid, pidfd = 0; - int status, ret; - time_t prog_start = time(NULL); + int status, ret = 0; const char *test_name = "pidfd_poll check for premature notification on non-empty" "group leader exit"; diff --git a/tools/testing/selftests/pidfd/pidfd_wait.c b/tools/testing/selftests/pidfd/pidfd_wait.c index be2943f072f6088ba77df89ba2b7c942b9534465..17999e082aa715525f013f194b278e1bdf6786ad 100644 --- a/tools/testing/selftests/pidfd/pidfd_wait.c +++ b/tools/testing/selftests/pidfd/pidfd_wait.c @@ -39,7 +39,7 @@ static int sys_waitid(int which, pid_t pid, siginfo_t *info, int options, TEST(wait_simple) { - int pidfd = -1, status = 0; + int pidfd = -1; pid_t parent_tid = -1; struct clone_args args = { .parent_tid = ptr_to_u64(&parent_tid), @@ -47,7 +47,6 @@ TEST(wait_simple) .flags = CLONE_PIDFD | CLONE_PARENT_SETTID, .exit_signal = SIGCHLD, }; - int ret; pid_t pid; siginfo_t info = { .si_signo = 0, @@ -88,7 +87,7 @@ TEST(wait_simple) TEST(wait_states) { - int pidfd = -1, status = 0; + int pidfd = -1; pid_t parent_tid = -1; struct clone_args args = { .parent_tid = ptr_to_u64(&parent_tid), diff --git a/tools/testing/selftests/powerpc/security/spectre_v2.c b/tools/testing/selftests/powerpc/security/spectre_v2.c index adc2b7294e5fddb54150bd797b04efc5825b3c3d..83647b8277e7dfa2efa7a45e9bb327dd30a02208 100644 --- a/tools/testing/selftests/powerpc/security/spectre_v2.c +++ b/tools/testing/selftests/powerpc/security/spectre_v2.c @@ -193,7 +193,7 @@ int spectre_v2_test(void) * We are not vulnerable and reporting otherwise, so * missing such a mismatch is safe. */ - if (state == VULNERABLE) + if (miss_percent > 95) return 4; return 1; diff --git a/tools/testing/selftests/rtc/settings b/tools/testing/selftests/rtc/settings index ba4d85f74cd6b9a671652d2610ac08bbd386a577..a953c96aa16e1e814867b33d06e894ffb664bb1b 100644 --- a/tools/testing/selftests/rtc/settings +++ b/tools/testing/selftests/rtc/settings @@ -1 +1 @@ -timeout=90 +timeout=180 diff --git a/tools/testing/selftests/seccomp/Makefile b/tools/testing/selftests/seccomp/Makefile index 0ebfe8b0e147fa9bd4d3bd895774393cf0d32c28..585f7a0c10cbea67d418366c94b3415cf9e00bfe 100644 --- a/tools/testing/selftests/seccomp/Makefile +++ b/tools/testing/selftests/seccomp/Makefile @@ -1,5 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 -CFLAGS += -Wl,-no-as-needed -Wall +CFLAGS += -Wl,-no-as-needed -Wall -isystem ../../../../usr/include/ LDFLAGS += -lpthread TEST_GEN_PROGS := seccomp_bpf seccomp_benchmark diff --git a/tools/testing/selftests/vm/Makefile b/tools/testing/selftests/vm/Makefile index b2ed35f244f9207b53320436630c337bf02eb418..b150cc837177a1227dc036f9c2fd1fa552a2d4cc 100644 --- a/tools/testing/selftests/vm/Makefile +++ b/tools/testing/selftests/vm/Makefile @@ -41,9 +41,9 @@ TEST_GEN_FILES += userfaultfd TEST_GEN_FILES += khugepaged ifeq ($(MACHINE),x86_64) -CAN_BUILD_I386 := $(shell ./../x86/check_cc.sh $(CC) ../x86/trivial_32bit_program.c -m32) -CAN_BUILD_X86_64 := $(shell ./../x86/check_cc.sh $(CC) ../x86/trivial_64bit_program.c) -CAN_BUILD_WITH_NOPIE := $(shell ./../x86/check_cc.sh $(CC) ../x86/trivial_program.c -no-pie) +CAN_BUILD_I386 := $(shell ./../x86/check_cc.sh "$(CC)" ../x86/trivial_32bit_program.c -m32) +CAN_BUILD_X86_64 := $(shell ./../x86/check_cc.sh "$(CC)" ../x86/trivial_64bit_program.c) +CAN_BUILD_WITH_NOPIE := $(shell ./../x86/check_cc.sh "$(CC)" ../x86/trivial_program.c -no-pie) TARGETS := protection_keys BINARIES_32 := $(TARGETS:%=%_32) diff --git a/tools/testing/selftests/vm/hmm-tests.c b/tools/testing/selftests/vm/hmm-tests.c index c9404ef9698e2b2d96c96178dc64529cc4014470..426dccc08f90675192fd5e96f3de089e7f4ba6ce 100644 --- a/tools/testing/selftests/vm/hmm-tests.c +++ b/tools/testing/selftests/vm/hmm-tests.c @@ -1242,6 +1242,48 @@ TEST_F(hmm, anon_teardown) } } +/* + * Test memory snapshot without faulting in pages accessed by the device. + */ +TEST_F(hmm, mixedmap) +{ + struct hmm_buffer *buffer; + unsigned long npages; + unsigned long size; + unsigned char *m; + int ret; + + npages = 1; + size = npages << self->page_shift; + + buffer = malloc(sizeof(*buffer)); + ASSERT_NE(buffer, NULL); + + buffer->fd = -1; + buffer->size = size; + buffer->mirror = malloc(npages); + ASSERT_NE(buffer->mirror, NULL); + + + /* Reserve a range of addresses. */ + buffer->ptr = mmap(NULL, size, + PROT_READ | PROT_WRITE, + MAP_PRIVATE, + self->fd, 0); + ASSERT_NE(buffer->ptr, MAP_FAILED); + + /* Simulate a device snapshotting CPU pagetables. */ + ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_SNAPSHOT, buffer, npages); + ASSERT_EQ(ret, 0); + ASSERT_EQ(buffer->cpages, npages); + + /* Check what the device saw. */ + m = buffer->mirror; + ASSERT_EQ(m[0], HMM_DMIRROR_PROT_READ); + + hmm_buffer_free(buffer); +} + /* * Test memory snapshot without faulting in pages accessed by the device. */ diff --git a/tools/testing/selftests/vm/map_fixed_noreplace.c b/tools/testing/selftests/vm/map_fixed_noreplace.c index d91bde511268667ab183b98bdd1fa99258173df9..eed44322d1a635e6405bd6bc79eacf434a76697d 100644 --- a/tools/testing/selftests/vm/map_fixed_noreplace.c +++ b/tools/testing/selftests/vm/map_fixed_noreplace.c @@ -17,9 +17,6 @@ #define MAP_FIXED_NOREPLACE 0x100000 #endif -#define BASE_ADDRESS (256ul * 1024 * 1024) - - static void dump_maps(void) { char cmd[32]; @@ -28,18 +25,46 @@ static void dump_maps(void) system(cmd); } +static unsigned long find_base_addr(unsigned long size) +{ + void *addr; + unsigned long flags; + + flags = MAP_PRIVATE | MAP_ANONYMOUS; + addr = mmap(NULL, size, PROT_NONE, flags, -1, 0); + if (addr == MAP_FAILED) { + printf("Error: couldn't map the space we need for the test\n"); + return 0; + } + + if (munmap(addr, size) != 0) { + printf("Error: couldn't map the space we need for the test\n"); + return 0; + } + return (unsigned long)addr; +} + int main(void) { + unsigned long base_addr; unsigned long flags, addr, size, page_size; char *p; page_size = sysconf(_SC_PAGE_SIZE); + //let's find a base addr that is free before we start the tests + size = 5 * page_size; + base_addr = find_base_addr(size); + if (!base_addr) { + printf("Error: couldn't map the space we need for the test\n"); + return 1; + } + flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED_NOREPLACE; // Check we can map all the areas we need below errno = 0; - addr = BASE_ADDRESS; + addr = base_addr; size = 5 * page_size; p = mmap((void *)addr, size, PROT_NONE, flags, -1, 0); @@ -60,7 +85,7 @@ int main(void) printf("unmap() successful\n"); errno = 0; - addr = BASE_ADDRESS + page_size; + addr = base_addr + page_size; size = 3 * page_size; p = mmap((void *)addr, size, PROT_NONE, flags, -1, 0); printf("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p); @@ -80,7 +105,7 @@ int main(void) * +4 | free | new */ errno = 0; - addr = BASE_ADDRESS; + addr = base_addr; size = 5 * page_size; p = mmap((void *)addr, size, PROT_NONE, flags, -1, 0); printf("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p); @@ -101,7 +126,7 @@ int main(void) * +4 | free | */ errno = 0; - addr = BASE_ADDRESS + (2 * page_size); + addr = base_addr + (2 * page_size); size = page_size; p = mmap((void *)addr, size, PROT_NONE, flags, -1, 0); printf("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p); @@ -121,7 +146,7 @@ int main(void) * +4 | free | new */ errno = 0; - addr = BASE_ADDRESS + (3 * page_size); + addr = base_addr + (3 * page_size); size = 2 * page_size; p = mmap((void *)addr, size, PROT_NONE, flags, -1, 0); printf("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p); @@ -141,7 +166,7 @@ int main(void) * +4 | free | */ errno = 0; - addr = BASE_ADDRESS; + addr = base_addr; size = 2 * page_size; p = mmap((void *)addr, size, PROT_NONE, flags, -1, 0); printf("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p); @@ -161,7 +186,7 @@ int main(void) * +4 | free | */ errno = 0; - addr = BASE_ADDRESS; + addr = base_addr; size = page_size; p = mmap((void *)addr, size, PROT_NONE, flags, -1, 0); printf("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p); @@ -181,7 +206,7 @@ int main(void) * +4 | free | new */ errno = 0; - addr = BASE_ADDRESS + (4 * page_size); + addr = base_addr + (4 * page_size); size = page_size; p = mmap((void *)addr, size, PROT_NONE, flags, -1, 0); printf("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p); @@ -192,7 +217,7 @@ int main(void) return 1; } - addr = BASE_ADDRESS; + addr = base_addr; size = 5 * page_size; if (munmap((void *)addr, size) != 0) { dump_maps(); diff --git a/tools/testing/selftests/vm/userfaultfd.c b/tools/testing/selftests/vm/userfaultfd.c index d418ca5f903997a97acc62bbfdbd7463418843c1..034245ea397f6ce032067a5e8e1f7a0e6f9f9fae 100644 --- a/tools/testing/selftests/vm/userfaultfd.c +++ b/tools/testing/selftests/vm/userfaultfd.c @@ -46,6 +46,7 @@ #include #include #include +#include #include #include #include diff --git a/tools/testing/selftests/x86/Makefile b/tools/testing/selftests/x86/Makefile index 458ca0209dcf4342358330831aae3c1c1dbbab84..9c4c75f06396ceb1e21cdd1b2bdaf944f0a0ccb5 100644 --- a/tools/testing/selftests/x86/Makefile +++ b/tools/testing/selftests/x86/Makefile @@ -6,9 +6,9 @@ include ../lib.mk .PHONY: all all_32 all_64 warn_32bit_failure clean UNAME_M := $(shell uname -m) -CAN_BUILD_I386 := $(shell ./check_cc.sh $(CC) trivial_32bit_program.c -m32) -CAN_BUILD_X86_64 := $(shell ./check_cc.sh $(CC) trivial_64bit_program.c) -CAN_BUILD_WITH_NOPIE := $(shell ./check_cc.sh $(CC) trivial_program.c -no-pie) +CAN_BUILD_I386 := $(shell ./check_cc.sh "$(CC)" trivial_32bit_program.c -m32) +CAN_BUILD_X86_64 := $(shell ./check_cc.sh "$(CC)" trivial_64bit_program.c) +CAN_BUILD_WITH_NOPIE := $(shell ./check_cc.sh "$(CC)" trivial_program.c -no-pie) TARGETS_C_BOTHBITS := single_step_syscall sysret_ss_attrs syscall_nt test_mremap_vdso \ check_initial_reg_state sigreturn iopl ioperm \ diff --git a/tools/testing/selftests/x86/check_cc.sh b/tools/testing/selftests/x86/check_cc.sh index 3e2089c8cf54967fdd375ec151a15827e104fc53..8c669c0d662ee275f27132d2fcab174660ab4c81 100755 --- a/tools/testing/selftests/x86/check_cc.sh +++ b/tools/testing/selftests/x86/check_cc.sh @@ -7,7 +7,7 @@ CC="$1" TESTPROG="$2" shift 2 -if "$CC" -o /dev/null "$TESTPROG" -O0 "$@" 2>/dev/null; then +if [ -n "$CC" ] && $CC -o /dev/null "$TESTPROG" -O0 "$@" 2>/dev/null; then echo 1 else echo 0 diff --git a/tools/testing/selftests/zram/zram.sh b/tools/testing/selftests/zram/zram.sh index 232e958ec454756501f2caa8eaf2133067fe10ac..b0b91d9b0dc2143cf689c9d42be8410057790215 100755 --- a/tools/testing/selftests/zram/zram.sh +++ b/tools/testing/selftests/zram/zram.sh @@ -2,9 +2,6 @@ # SPDX-License-Identifier: GPL-2.0 TCID="zram.sh" -# Kselftest framework requirement - SKIP code is 4. -ksft_skip=4 - . ./zram_lib.sh run_zram () { @@ -18,14 +15,4 @@ echo "" check_prereqs -# check zram module exists -MODULE_PATH=/lib/modules/`uname -r`/kernel/drivers/block/zram/zram.ko -if [ -f $MODULE_PATH ]; then - run_zram -elif [ -b /dev/zram0 ]; then - run_zram -else - echo "$TCID : No zram.ko module or /dev/zram0 device file not found" - echo "$TCID : CONFIG_ZRAM is not set" - exit $ksft_skip -fi +run_zram diff --git a/tools/testing/selftests/zram/zram01.sh b/tools/testing/selftests/zram/zram01.sh index 114863d9fb8768c906732d7ce780e3f6ea73e44c..8f4affe34f3e4c1d379d96515506e048954be073 100755 --- a/tools/testing/selftests/zram/zram01.sh +++ b/tools/testing/selftests/zram/zram01.sh @@ -33,9 +33,7 @@ zram_algs="lzo" zram_fill_fs() { - local mem_free0=$(free -m | awk 'NR==2 {print $4}') - - for i in $(seq 0 $(($dev_num - 1))); do + for i in $(seq $dev_start $dev_end); do echo "fill zram$i..." local b=0 while [ true ]; do @@ -45,29 +43,17 @@ zram_fill_fs() b=$(($b + 1)) done echo "zram$i can be filled with '$b' KB" - done - local mem_free1=$(free -m | awk 'NR==2 {print $4}') - local used_mem=$(($mem_free0 - $mem_free1)) + local mem_used_total=`awk '{print $3}' "/sys/block/zram$i/mm_stat"` + local v=$((100 * 1024 * $b / $mem_used_total)) + if [ "$v" -lt 100 ]; then + echo "FAIL compression ratio: 0.$v:1" + ERR_CODE=-1 + return + fi - local total_size=0 - for sm in $zram_sizes; do - local s=$(echo $sm | sed 's/M//') - total_size=$(($total_size + $s)) + echo "zram compression ratio: $(echo "scale=2; $v / 100 " | bc):1: OK" done - - echo "zram used ${used_mem}M, zram disk sizes ${total_size}M" - - local v=$((100 * $total_size / $used_mem)) - - if [ "$v" -lt 100 ]; then - echo "FAIL compression ratio: 0.$v:1" - ERR_CODE=-1 - zram_cleanup - return - fi - - echo "zram compression ratio: $(echo "scale=2; $v / 100 " | bc):1: OK" } check_prereqs @@ -81,7 +67,6 @@ zram_mount zram_fill_fs zram_cleanup -zram_unload if [ $ERR_CODE -ne 0 ]; then echo "$TCID : [FAIL]" diff --git a/tools/testing/selftests/zram/zram02.sh b/tools/testing/selftests/zram/zram02.sh index e83b404807c09a7cf2a3cdefb9ec4eb746319a36..2418b0c4ed136b3b603ade62cd2838ff61af38b8 100755 --- a/tools/testing/selftests/zram/zram02.sh +++ b/tools/testing/selftests/zram/zram02.sh @@ -36,7 +36,6 @@ zram_set_memlimit zram_makeswap zram_swapoff zram_cleanup -zram_unload if [ $ERR_CODE -ne 0 ]; then echo "$TCID : [FAIL]" diff --git a/tools/testing/selftests/zram/zram_lib.sh b/tools/testing/selftests/zram/zram_lib.sh index 6f872f266fd1151b6771f55147d265468a3ad74d..21ec1966de76ca4047fe50eea82015bc03986c98 100755 --- a/tools/testing/selftests/zram/zram_lib.sh +++ b/tools/testing/selftests/zram/zram_lib.sh @@ -5,12 +5,17 @@ # Author: Alexey Kodanev # Modified: Naresh Kamboju -MODULE=0 dev_makeswap=-1 dev_mounted=-1 - +dev_start=0 +dev_end=-1 +module_load=-1 +sys_control=-1 # Kselftest framework requirement - SKIP code is 4. ksft_skip=4 +kernel_version=`uname -r | cut -d'.' -f1,2` +kernel_major=${kernel_version%.*} +kernel_minor=${kernel_version#*.} trap INT @@ -25,68 +30,104 @@ check_prereqs() fi } +kernel_gte() +{ + major=${1%.*} + minor=${1#*.} + + if [ $kernel_major -gt $major ]; then + return 0 + elif [[ $kernel_major -eq $major && $kernel_minor -ge $minor ]]; then + return 0 + fi + + return 1 +} + zram_cleanup() { echo "zram cleanup" local i= - for i in $(seq 0 $dev_makeswap); do + for i in $(seq $dev_start $dev_makeswap); do swapoff /dev/zram$i done - for i in $(seq 0 $dev_mounted); do + for i in $(seq $dev_start $dev_mounted); do umount /dev/zram$i done - for i in $(seq 0 $(($dev_num - 1))); do + for i in $(seq $dev_start $dev_end); do echo 1 > /sys/block/zram${i}/reset rm -rf zram$i done -} + if [ $sys_control -eq 1 ]; then + for i in $(seq $dev_start $dev_end); do + echo $i > /sys/class/zram-control/hot_remove + done + fi -zram_unload() -{ - if [ $MODULE -ne 0 ] ; then - echo "zram rmmod zram" + if [ $module_load -eq 1 ]; then rmmod zram > /dev/null 2>&1 fi } zram_load() { - # check zram module exists - MODULE_PATH=/lib/modules/`uname -r`/kernel/drivers/block/zram/zram.ko - if [ -f $MODULE_PATH ]; then - MODULE=1 - echo "create '$dev_num' zram device(s)" - modprobe zram num_devices=$dev_num - if [ $? -ne 0 ]; then - echo "failed to insert zram module" - exit 1 - fi - - dev_num_created=$(ls /dev/zram* | wc -w) + echo "create '$dev_num' zram device(s)" + + # zram module loaded, new kernel + if [ -d "/sys/class/zram-control" ]; then + echo "zram modules already loaded, kernel supports" \ + "zram-control interface" + dev_start=$(ls /dev/zram* | wc -w) + dev_end=$(($dev_start + $dev_num - 1)) + sys_control=1 + + for i in $(seq $dev_start $dev_end); do + cat /sys/class/zram-control/hot_add > /dev/null + done + + echo "all zram devices (/dev/zram$dev_start~$dev_end" \ + "successfully created" + return 0 + fi - if [ "$dev_num_created" -ne "$dev_num" ]; then - echo "unexpected num of devices: $dev_num_created" - ERR_CODE=-1 + # detect old kernel or built-in + modprobe zram num_devices=$dev_num + if [ ! -d "/sys/class/zram-control" ]; then + if grep -q '^zram' /proc/modules; then + rmmod zram > /dev/null 2>&1 + if [ $? -ne 0 ]; then + echo "zram module is being used on old kernel" \ + "without zram-control interface" + exit $ksft_skip + fi else - echo "zram load module successful" + echo "test needs CONFIG_ZRAM=m on old kernel without" \ + "zram-control interface" + exit $ksft_skip fi - elif [ -b /dev/zram0 ]; then - echo "/dev/zram0 device file found: OK" - else - echo "ERROR: No zram.ko module or no /dev/zram0 device found" - echo "$TCID : CONFIG_ZRAM is not set" - exit 1 + modprobe zram num_devices=$dev_num fi + + module_load=1 + dev_end=$(($dev_num - 1)) + echo "all zram devices (/dev/zram0~$dev_end) successfully created" } zram_max_streams() { echo "set max_comp_streams to zram device(s)" - local i=0 + kernel_gte 4.7 + if [ $? -eq 0 ]; then + echo "The device attribute max_comp_streams was"\ + "deprecated in 4.7" + return 0 + fi + + local i=$dev_start for max_s in $zram_max_streams; do local sys_path="/sys/block/zram${i}/max_comp_streams" echo $max_s > $sys_path || \ @@ -98,7 +139,7 @@ zram_max_streams() echo "FAIL can't set max_streams '$max_s', get $max_stream" i=$(($i + 1)) - echo "$sys_path = '$max_streams' ($i/$dev_num)" + echo "$sys_path = '$max_streams'" done echo "zram max streams: OK" @@ -108,15 +149,16 @@ zram_compress_alg() { echo "test that we can set compression algorithm" - local algs=$(cat /sys/block/zram0/comp_algorithm) + local i=$dev_start + local algs=$(cat /sys/block/zram${i}/comp_algorithm) echo "supported algs: $algs" - local i=0 + for alg in $zram_algs; do local sys_path="/sys/block/zram${i}/comp_algorithm" echo "$alg" > $sys_path || \ echo "FAIL can't set '$alg' to $sys_path" i=$(($i + 1)) - echo "$sys_path = '$alg' ($i/$dev_num)" + echo "$sys_path = '$alg'" done echo "zram set compression algorithm: OK" @@ -125,14 +167,14 @@ zram_compress_alg() zram_set_disksizes() { echo "set disk size to zram device(s)" - local i=0 + local i=$dev_start for ds in $zram_sizes; do local sys_path="/sys/block/zram${i}/disksize" echo "$ds" > $sys_path || \ echo "FAIL can't set '$ds' to $sys_path" i=$(($i + 1)) - echo "$sys_path = '$ds' ($i/$dev_num)" + echo "$sys_path = '$ds'" done echo "zram set disksizes: OK" @@ -142,14 +184,14 @@ zram_set_memlimit() { echo "set memory limit to zram device(s)" - local i=0 + local i=$dev_start for ds in $zram_mem_limits; do local sys_path="/sys/block/zram${i}/mem_limit" echo "$ds" > $sys_path || \ echo "FAIL can't set '$ds' to $sys_path" i=$(($i + 1)) - echo "$sys_path = '$ds' ($i/$dev_num)" + echo "$sys_path = '$ds'" done echo "zram set memory limit: OK" @@ -158,8 +200,8 @@ zram_set_memlimit() zram_makeswap() { echo "make swap with zram device(s)" - local i=0 - for i in $(seq 0 $(($dev_num - 1))); do + local i=$dev_start + for i in $(seq $dev_start $dev_end); do mkswap /dev/zram$i > err.log 2>&1 if [ $? -ne 0 ]; then cat err.log @@ -182,7 +224,7 @@ zram_makeswap() zram_swapoff() { local i= - for i in $(seq 0 $dev_makeswap); do + for i in $(seq $dev_start $dev_end); do swapoff /dev/zram$i > err.log 2>&1 if [ $? -ne 0 ]; then cat err.log @@ -196,7 +238,7 @@ zram_swapoff() zram_makefs() { - local i=0 + local i=$dev_start for fs in $zram_filesystems; do # if requested fs not supported default it to ext2 which mkfs.$fs > /dev/null 2>&1 || fs=ext2 @@ -215,7 +257,7 @@ zram_makefs() zram_mount() { local i=0 - for i in $(seq 0 $(($dev_num - 1))); do + for i in $(seq $dev_start $dev_end); do echo "mount /dev/zram$i" mkdir zram$i mount /dev/zram$i zram$i > /dev/null || \ diff --git a/tools/virtio/virtio_test.c b/tools/virtio/virtio_test.c index cb3f29c09aff3522b6225f1710d71e27097de9a2..23f142af544ad796146361cc81ce3315d303f42e 100644 --- a/tools/virtio/virtio_test.c +++ b/tools/virtio/virtio_test.c @@ -130,6 +130,7 @@ static void vdev_info_init(struct vdev_info* dev, unsigned long long features) memset(dev, 0, sizeof *dev); dev->vdev.features = features; INIT_LIST_HEAD(&dev->vdev.vqs); + spin_lock_init(&dev->vdev.vqs_list_lock); dev->buf_size = 1024; dev->buf = malloc(dev->buf_size); assert(dev->buf); diff --git a/usr/include/Makefile b/usr/include/Makefile index f6b3c85d900ede51844ca37a4ccd7d3817d23c0e..703a255cddc6358dc14f80f300abee8b9387ada4 100644 --- a/usr/include/Makefile +++ b/usr/include/Makefile @@ -34,7 +34,6 @@ no-header-test += linux/hdlc/ioctl.h no-header-test += linux/ivtv.h no-header-test += linux/kexec.h no-header-test += linux/matroxfb.h -no-header-test += linux/nfc.h no-header-test += linux/omap3isp.h no-header-test += linux/omapfb.h no-header-test += linux/patchkey.h diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c index c2323c27a28b52dd14c6405dcbc8d54b1356efe0..518cd8dc390e2d73a21aeac626865cb5e1a9fccf 100644 --- a/virt/kvm/eventfd.c +++ b/virt/kvm/eventfd.c @@ -451,8 +451,8 @@ bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin) idx = srcu_read_lock(&kvm->irq_srcu); gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin); if (gsi != -1) - hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list, - link) + hlist_for_each_entry_srcu(kian, &kvm->irq_ack_notifier_list, + link, srcu_read_lock_held(&kvm->irq_srcu)) if (kian->gsi == gsi) { srcu_read_unlock(&kvm->irq_srcu, idx); return true; @@ -468,8 +468,8 @@ void kvm_notify_acked_gsi(struct kvm *kvm, int gsi) { struct kvm_irq_ack_notifier *kian; - hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list, - link) + hlist_for_each_entry_srcu(kian, &kvm->irq_ack_notifier_list, + link, srcu_read_lock_held(&kvm->irq_srcu)) if (kian->gsi == gsi) kian->irq_acked(kian); } diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 9ad4d8a2649343e9b9b9e35838e9b64ffe80d5e2..a5c107bbf022fd066a8842b7e31895ed8fd9d84a 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -114,6 +114,8 @@ EXPORT_SYMBOL_GPL(kvm_debugfs_dir); static int kvm_debugfs_num_entries; static const struct file_operations stat_fops_per_vm; +static struct file_operations kvm_chardev_ops; + static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl, unsigned long arg); #ifdef CONFIG_KVM_COMPAT @@ -162,6 +164,10 @@ __weak void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, { } +__weak void kvm_arch_guest_memory_reclaimed(struct kvm *kvm) +{ +} + bool kvm_is_zone_device_pfn(kvm_pfn_t pfn) { /* @@ -338,6 +344,12 @@ void kvm_flush_remote_tlbs(struct kvm *kvm) EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs); #endif +static void kvm_flush_shadow_all(struct kvm *kvm) +{ + kvm_arch_flush_shadow_all(kvm); + kvm_arch_guest_memory_reclaimed(kvm); +} + void kvm_reload_remote_mmus(struct kvm *kvm) { kvm_make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD); @@ -492,6 +504,7 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, kvm_flush_remote_tlbs(kvm); spin_unlock(&kvm->mmu_lock); + kvm_arch_guest_memory_reclaimed(kvm); srcu_read_unlock(&kvm->srcu, idx); return 0; @@ -595,7 +608,7 @@ static void kvm_mmu_notifier_release(struct mmu_notifier *mn, int idx; idx = srcu_read_lock(&kvm->srcu); - kvm_arch_flush_shadow_all(kvm); + kvm_flush_shadow_all(kvm); srcu_read_unlock(&kvm->srcu, idx); } @@ -823,6 +836,16 @@ static struct kvm *kvm_create_vm(unsigned long type) preempt_notifier_inc(); + /* + * When the fd passed to this ioctl() is opened it pins the module, + * but try_module_get() also prevents getting a reference if the module + * is in MODULE_STATE_GOING (e.g. if someone ran "rmmod --wait"). + */ + if (!try_module_get(kvm_chardev_ops.owner)) { + r = -ENODEV; + goto out_err; + } + return kvm; out_err: @@ -892,7 +915,7 @@ static void kvm_destroy_vm(struct kvm *kvm) #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm); #else - kvm_arch_flush_shadow_all(kvm); + kvm_flush_shadow_all(kvm); #endif kvm_arch_destroy_vm(kvm); kvm_destroy_devices(kvm); @@ -904,6 +927,7 @@ static void kvm_destroy_vm(struct kvm *kvm) preempt_notifier_dec(); hardware_disable_all(); mmdrop(mm); + module_put(kvm_chardev_ops.owner); } void kvm_get_kvm(struct kvm *kvm) @@ -1233,6 +1257,7 @@ static int kvm_set_memslot(struct kvm *kvm, * - kvm_is_visible_gfn (mmu_check_root) */ kvm_arch_flush_shadow_memslot(kvm, slot); + kvm_arch_guest_memory_reclaimed(kvm); } r = kvm_arch_prepare_memory_region(kvm, new, mem, change); @@ -1699,7 +1724,6 @@ struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn { return __gfn_to_memslot(kvm_vcpu_memslots(vcpu), gfn); } -EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_memslot); bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) {